summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJonathan Abrahams <jonathan@mongodb.com>2016-03-09 12:17:50 -0500
committerJonathan Abrahams <jonathan@mongodb.com>2016-03-09 12:18:14 -0500
commit4ae691e8edc87d0e3cfb633bb91c328426be007b (patch)
tree52079a593f54382ca13a2e741633eab1b6271893
parenta025d43f3ce2efc1fb1282a718f5d286fa0a4dc1 (diff)
downloadmongo-4ae691e8edc87d0e3cfb633bb91c328426be007b.tar.gz
SERVER-22468 Format JS code with approved style in jstests/
-rw-r--r--jstests/aggregation/bugs/cond.js93
-rw-r--r--jstests/aggregation/bugs/firstlast.js130
-rw-r--r--jstests/aggregation/bugs/ifnull.js70
-rw-r--r--jstests/aggregation/bugs/lookup_unwind_getmore.js18
-rw-r--r--jstests/aggregation/bugs/match.js217
-rw-r--r--jstests/aggregation/bugs/server10176.js47
-rw-r--r--jstests/aggregation/bugs/server10530.js6
-rw-r--r--jstests/aggregation/bugs/server11118.js74
-rw-r--r--jstests/aggregation/bugs/server11675.js168
-rw-r--r--jstests/aggregation/bugs/server12015.js15
-rw-r--r--jstests/aggregation/bugs/server13715.js10
-rw-r--r--jstests/aggregation/bugs/server14421.js15
-rw-r--r--jstests/aggregation/bugs/server14969.js6
-rw-r--r--jstests/aggregation/bugs/server15810.js2
-rw-r--r--jstests/aggregation/bugs/server17224.js7
-rw-r--r--jstests/aggregation/bugs/server17943.js52
-rw-r--r--jstests/aggregation/bugs/server18198.js27
-rw-r--r--jstests/aggregation/bugs/server18222.js11
-rw-r--r--jstests/aggregation/bugs/server18427.js14
-rw-r--r--jstests/aggregation/bugs/server19095.js387
-rw-r--r--jstests/aggregation/bugs/server20168.js16
-rw-r--r--jstests/aggregation/bugs/server21632.js5
-rw-r--r--jstests/aggregation/bugs/server22093.js13
-rw-r--r--jstests/aggregation/bugs/server3253.js61
-rw-r--r--jstests/aggregation/bugs/server3832.js73
-rw-r--r--jstests/aggregation/bugs/server4588.js17
-rw-r--r--jstests/aggregation/bugs/server4589.js2
-rw-r--r--jstests/aggregation/bugs/server4638.js8
-rw-r--r--jstests/aggregation/bugs/server4656.js32
-rw-r--r--jstests/aggregation/bugs/server4738.js15
-rw-r--r--jstests/aggregation/bugs/server4899.js18
-rw-r--r--jstests/aggregation/bugs/server5012.js22
-rw-r--r--jstests/aggregation/bugs/server5044.js24
-rw-r--r--jstests/aggregation/bugs/server5209.js7
-rw-r--r--jstests/aggregation/bugs/server5782.js16
-rw-r--r--jstests/aggregation/bugs/server5932.js28
-rw-r--r--jstests/aggregation/bugs/server5973.js6
-rw-r--r--jstests/aggregation/bugs/server6045.js13
-rw-r--r--jstests/aggregation/bugs/server6118.js53
-rw-r--r--jstests/aggregation/bugs/server6120.js48
-rw-r--r--jstests/aggregation/bugs/server6121.js93
-rw-r--r--jstests/aggregation/bugs/server6125.js144
-rw-r--r--jstests/aggregation/bugs/server6127.js34
-rw-r--r--jstests/aggregation/bugs/server6131.js62
-rw-r--r--jstests/aggregation/bugs/server6143.js7
-rw-r--r--jstests/aggregation/bugs/server6147.js38
-rw-r--r--jstests/aggregation/bugs/server6165.js110
-rw-r--r--jstests/aggregation/bugs/server6177.js12
-rw-r--r--jstests/aggregation/bugs/server6179.js113
-rw-r--r--jstests/aggregation/bugs/server6181.js10
-rw-r--r--jstests/aggregation/bugs/server6184.js14
-rw-r--r--jstests/aggregation/bugs/server6185.js12
-rw-r--r--jstests/aggregation/bugs/server6186.js30
-rw-r--r--jstests/aggregation/bugs/server6189.js114
-rw-r--r--jstests/aggregation/bugs/server6190.js130
-rw-r--r--jstests/aggregation/bugs/server6192_server6193.js49
-rw-r--r--jstests/aggregation/bugs/server6194.js8
-rw-r--r--jstests/aggregation/bugs/server6195.js51
-rw-r--r--jstests/aggregation/bugs/server6198.js2
-rw-r--r--jstests/aggregation/bugs/server6238.js14
-rw-r--r--jstests/aggregation/bugs/server6239.js2
-rw-r--r--jstests/aggregation/bugs/server6240.js25
-rw-r--r--jstests/aggregation/bugs/server6269.js11
-rw-r--r--jstests/aggregation/bugs/server6275.js16
-rw-r--r--jstests/aggregation/bugs/server6290.js18
-rw-r--r--jstests/aggregation/bugs/server6335.js3
-rw-r--r--jstests/aggregation/bugs/server6361.js16
-rw-r--r--jstests/aggregation/bugs/server6468.js10
-rw-r--r--jstests/aggregation/bugs/server6529.js27
-rw-r--r--jstests/aggregation/bugs/server6530.js4
-rw-r--r--jstests/aggregation/bugs/server6531.js22
-rw-r--r--jstests/aggregation/bugs/server6556.js19
-rw-r--r--jstests/aggregation/bugs/server6570.js16
-rw-r--r--jstests/aggregation/bugs/server6779.js6
-rw-r--r--jstests/aggregation/bugs/server6861.js18
-rw-r--r--jstests/aggregation/bugs/server7768.js13
-rw-r--r--jstests/aggregation/bugs/server7781.js292
-rw-r--r--jstests/aggregation/bugs/server7900.js7
-rw-r--r--jstests/aggregation/bugs/server8141.js8
-rw-r--r--jstests/aggregation/bugs/server8568.js4
-rw-r--r--jstests/aggregation/bugs/server8581.js292
-rw-r--r--jstests/aggregation/bugs/server9289.js3
-rw-r--r--jstests/aggregation/bugs/server9444.js23
-rw-r--r--jstests/aggregation/bugs/server9625.js4
-rw-r--r--jstests/aggregation/bugs/server9840.js107
-rw-r--r--jstests/aggregation/bugs/server9841.js34
-rw-r--r--jstests/aggregation/bugs/strcasecmp.js76
-rw-r--r--jstests/aggregation/bugs/substr.js149
-rw-r--r--jstests/aggregation/bugs/upperlower.js75
-rw-r--r--jstests/aggregation/data/articles.js65
-rw-r--r--jstests/aggregation/disabled/server5369.js6
-rw-r--r--jstests/aggregation/extras/debug.js30
-rw-r--r--jstests/aggregation/extras/limitskip.js68
-rw-r--r--jstests/aggregation/extras/mrabench.js52
-rw-r--r--jstests/aggregation/extras/testutils.js121
-rw-r--r--jstests/aggregation/extras/utils.js253
-rw-r--r--jstests/aggregation/mongos_slaveok.js51
-rw-r--r--jstests/aggregation/testSlave.js8
-rw-r--r--jstests/aggregation/testall.js1893
-rw-r--r--jstests/aggregation/testshard1.js215
-rw-r--r--jstests/aggregation/unwind.js19
-rw-r--r--jstests/auth/access_control_with_unreachable_configs.js22
-rw-r--r--jstests/auth/arbiter.js30
-rw-r--r--jstests/auth/auth1.js103
-rw-r--r--jstests/auth/auth2.js26
-rw-r--r--jstests/auth/auth3.js38
-rw-r--r--jstests/auth/auth_helpers.js24
-rw-r--r--jstests/auth/auth_options.js48
-rw-r--r--jstests/auth/auth_schema_upgrade.js41
-rw-r--r--jstests/auth/authz_modifications_access_control.js422
-rw-r--r--jstests/auth/basic_role_auth.js777
-rw-r--r--jstests/auth/builtin_roles_system_colls.js30
-rw-r--r--jstests/auth/clac_system_colls.js53
-rw-r--r--jstests/auth/commands_builtin_roles.js47
-rw-r--r--jstests/auth/commands_user_defined_roles.js68
-rw-r--r--jstests/auth/copyauth.js176
-rw-r--r--jstests/auth/copyauth2.js25
-rw-r--r--jstests/auth/copyauth_between_shards.js26
-rw-r--r--jstests/auth/db_multiple_login.js18
-rw-r--r--jstests/auth/disable_localhost_bypass.js22
-rw-r--r--jstests/auth/explain_auth.js73
-rw-r--r--jstests/auth/indexSystemUsers.js38
-rw-r--r--jstests/auth/iteration_count_control.js17
-rw-r--r--jstests/auth/js_scope_leak.js80
-rw-r--r--jstests/auth/lib/commands_lib.js4878
-rw-r--r--jstests/auth/localhostAuthBypass.js125
-rw-r--r--jstests/auth/log_user_basic.js451
-rw-r--r--jstests/auth/log_userid_off.js17
-rw-r--r--jstests/auth/logout_reconnect.js36
-rw-r--r--jstests/auth/mergeAuthCollsCommand.js65
-rw-r--r--jstests/auth/mongos_cache_invalidation.js279
-rw-r--r--jstests/auth/mr_auth.js104
-rw-r--r--jstests/auth/profile.js7
-rw-r--r--jstests/auth/profile_access.js33
-rw-r--r--jstests/auth/pseudo_commands.js260
-rw-r--r--jstests/auth/readIndex.js14
-rw-r--r--jstests/auth/rename.js16
-rw-r--r--jstests/auth/renameSystemCollections.js30
-rw-r--r--jstests/auth/repl.js65
-rw-r--r--jstests/auth/repl_auth.js27
-rw-r--r--jstests/auth/resource_pattern_matching.js245
-rw-r--r--jstests/auth/role_management_commands.js468
-rw-r--r--jstests/auth/role_management_commands_edge_cases.js677
-rw-r--r--jstests/auth/secondary_invalidation.js21
-rw-r--r--jstests/auth/server-4892.js83
-rw-r--r--jstests/auth/show_log_auth.js16
-rw-r--r--jstests/auth/system_user_privileges.js11
-rw-r--r--jstests/auth/user_defined_roles.js124
-rw-r--r--jstests/auth/user_defined_roles_on_secondaries.js354
-rw-r--r--jstests/auth/user_management_commands.js345
-rw-r--r--jstests/auth/user_management_commands_edge_cases.js499
-rw-r--r--jstests/auth/user_special_chars.js9
-rw-r--r--jstests/concurrency/fsm_all.js5
-rw-r--r--jstests/concurrency/fsm_all_composed.js4
-rw-r--r--jstests/concurrency/fsm_all_replication.js11
-rw-r--r--jstests/concurrency/fsm_all_sharded_replication.js63
-rw-r--r--jstests/concurrency/fsm_all_sharded_replication_with_balancer.js67
-rw-r--r--jstests/concurrency/fsm_all_simultaneous.js8
-rw-r--r--jstests/concurrency/fsm_background_workloads/background_base.js10
-rw-r--r--jstests/concurrency/fsm_example.js23
-rw-r--r--jstests/concurrency/fsm_example_inheritance.js30
-rw-r--r--jstests/concurrency/fsm_libs/assert.js2
-rw-r--r--jstests/concurrency/fsm_libs/cluster.js77
-rw-r--r--jstests/concurrency/fsm_libs/composer.js22
-rw-r--r--jstests/concurrency/fsm_libs/extend_workload.js11
-rw-r--r--jstests/concurrency/fsm_libs/fsm.js12
-rw-r--r--jstests/concurrency/fsm_libs/parse_config.js92
-rw-r--r--jstests/concurrency/fsm_libs/runner.js238
-rw-r--r--jstests/concurrency/fsm_libs/thread_mgr.js66
-rw-r--r--jstests/concurrency/fsm_libs/worker_thread.js45
-rw-r--r--jstests/concurrency/fsm_selftests.js30
-rw-r--r--jstests/concurrency/fsm_utils/setup_teardown_functions.js17
-rw-r--r--jstests/concurrency/fsm_workload_helpers/drop_utils.js12
-rw-r--r--jstests/concurrency/fsm_workload_helpers/indexed_noindex.js1
-rw-r--r--jstests/concurrency/fsm_workload_helpers/server_types.js7
-rw-r--r--jstests/concurrency/fsm_workload_modifiers/drop_all_indexes.js1
-rw-r--r--jstests/concurrency/fsm_workload_modifiers/indexed_noindex.js2
-rw-r--r--jstests/concurrency/fsm_workload_modifiers/make_capped.js10
-rw-r--r--jstests/concurrency/fsm_workloads/agg_base.js22
-rw-r--r--jstests/concurrency/fsm_workloads/agg_group_external.js88
-rw-r--r--jstests/concurrency/fsm_workloads/agg_match.js50
-rw-r--r--jstests/concurrency/fsm_workloads/agg_sort.js50
-rw-r--r--jstests/concurrency/fsm_workloads/agg_sort_external.js81
-rw-r--r--jstests/concurrency/fsm_workloads/auth_create_role.js18
-rw-r--r--jstests/concurrency/fsm_workloads/auth_create_user.js12
-rw-r--r--jstests/concurrency/fsm_workloads/auth_drop_role.js21
-rw-r--r--jstests/concurrency/fsm_workloads/auth_drop_user.js13
-rw-r--r--jstests/concurrency/fsm_workloads/collmod.js19
-rw-r--r--jstests/concurrency/fsm_workloads/collmod_separate_collections.js52
-rw-r--r--jstests/concurrency/fsm_workloads/compact.js43
-rw-r--r--jstests/concurrency/fsm_workloads/compact_simultaneous_padding_bytes.js47
-rw-r--r--jstests/concurrency/fsm_workloads/convert_to_capped_collection.js8
-rw-r--r--jstests/concurrency/fsm_workloads/convert_to_capped_collection_index.js22
-rw-r--r--jstests/concurrency/fsm_workloads/count.js10
-rw-r--r--jstests/concurrency/fsm_workloads/count_indexed.js49
-rw-r--r--jstests/concurrency/fsm_workloads/count_limit_skip.js83
-rw-r--r--jstests/concurrency/fsm_workloads/create_capped_collection.js25
-rw-r--r--jstests/concurrency/fsm_workloads/create_capped_collection_maxdocs.js83
-rw-r--r--jstests/concurrency/fsm_workloads/create_collection.js6
-rw-r--r--jstests/concurrency/fsm_workloads/create_index_background.js98
-rw-r--r--jstests/concurrency/fsm_workloads/distinct.js12
-rw-r--r--jstests/concurrency/fsm_workloads/distinct_noindex.js9
-rw-r--r--jstests/concurrency/fsm_workloads/distinct_projection.js26
-rw-r--r--jstests/concurrency/fsm_workloads/drop_collection.js4
-rw-r--r--jstests/concurrency/fsm_workloads/drop_database.js4
-rw-r--r--jstests/concurrency/fsm_workloads/explain.js28
-rw-r--r--jstests/concurrency/fsm_workloads/explain_aggregate.js76
-rw-r--r--jstests/concurrency/fsm_workloads/explain_count.js101
-rw-r--r--jstests/concurrency/fsm_workloads/explain_distinct.js48
-rw-r--r--jstests/concurrency/fsm_workloads/explain_find.js111
-rw-r--r--jstests/concurrency/fsm_workloads/explain_group.js37
-rw-r--r--jstests/concurrency/fsm_workloads/explain_remove.js68
-rw-r--r--jstests/concurrency/fsm_workloads/explain_update.js114
-rw-r--r--jstests/concurrency/fsm_workloads/findAndModify_inc.js19
-rw-r--r--jstests/concurrency/fsm_workloads/findAndModify_remove.js12
-rw-r--r--jstests/concurrency/fsm_workloads/findAndModify_remove_queue.js43
-rw-r--r--jstests/concurrency/fsm_workloads/findAndModify_update.js32
-rw-r--r--jstests/concurrency/fsm_workloads/findAndModify_update_collscan.js22
-rw-r--r--jstests/concurrency/fsm_workloads/findAndModify_update_grow.js43
-rw-r--r--jstests/concurrency/fsm_workloads/findAndModify_update_queue.js104
-rw-r--r--jstests/concurrency/fsm_workloads/findAndModify_upsert.js21
-rw-r--r--jstests/concurrency/fsm_workloads/findAndModify_upsert_collscan.js15
-rw-r--r--jstests/concurrency/fsm_workloads/group.js12
-rw-r--r--jstests/concurrency/fsm_workloads/group_cond.js46
-rw-r--r--jstests/concurrency/fsm_workloads/indexed_insert_1char.js25
-rw-r--r--jstests/concurrency/fsm_workloads/indexed_insert_1char_noindex.js6
-rw-r--r--jstests/concurrency/fsm_workloads/indexed_insert_2d.js78
-rw-r--r--jstests/concurrency/fsm_workloads/indexed_insert_2dsphere.js23
-rw-r--r--jstests/concurrency/fsm_workloads/indexed_insert_base.js16
-rw-r--r--jstests/concurrency/fsm_workloads/indexed_insert_base_capped.js6
-rw-r--r--jstests/concurrency/fsm_workloads/indexed_insert_base_noindex.js6
-rw-r--r--jstests/concurrency/fsm_workloads/indexed_insert_compound.js48
-rw-r--r--jstests/concurrency/fsm_workloads/indexed_insert_eval.js42
-rw-r--r--jstests/concurrency/fsm_workloads/indexed_insert_eval_nolock.js13
-rw-r--r--jstests/concurrency/fsm_workloads/indexed_insert_heterogeneous.js94
-rw-r--r--jstests/concurrency/fsm_workloads/indexed_insert_heterogeneous_noindex.js6
-rw-r--r--jstests/concurrency/fsm_workloads/indexed_insert_large.js49
-rw-r--r--jstests/concurrency/fsm_workloads/indexed_insert_large_noindex.js6
-rw-r--r--jstests/concurrency/fsm_workloads/indexed_insert_long_fieldname.js24
-rw-r--r--jstests/concurrency/fsm_workloads/indexed_insert_long_fieldname_noindex.js6
-rw-r--r--jstests/concurrency/fsm_workloads/indexed_insert_multikey.js30
-rw-r--r--jstests/concurrency/fsm_workloads/indexed_insert_multikey_noindex.js6
-rw-r--r--jstests/concurrency/fsm_workloads/indexed_insert_ordered_bulk.js44
-rw-r--r--jstests/concurrency/fsm_workloads/indexed_insert_text.js6
-rw-r--r--jstests/concurrency/fsm_workloads/indexed_insert_text_multikey.js44
-rw-r--r--jstests/concurrency/fsm_workloads/indexed_insert_ttl.js20
-rw-r--r--jstests/concurrency/fsm_workloads/indexed_insert_unordered_bulk.js44
-rw-r--r--jstests/concurrency/fsm_workloads/indexed_insert_upsert.js50
-rw-r--r--jstests/concurrency/fsm_workloads/indexed_insert_where.js19
-rw-r--r--jstests/concurrency/fsm_workloads/list_indexes.js9
-rw-r--r--jstests/concurrency/fsm_workloads/map_reduce_drop.js11
-rw-r--r--jstests/concurrency/fsm_workloads/map_reduce_inline.js6
-rw-r--r--jstests/concurrency/fsm_workloads/map_reduce_merge.js77
-rw-r--r--jstests/concurrency/fsm_workloads/map_reduce_merge_nonatomic.js94
-rw-r--r--jstests/concurrency/fsm_workloads/map_reduce_reduce.js86
-rw-r--r--jstests/concurrency/fsm_workloads/map_reduce_reduce_nonatomic.js67
-rw-r--r--jstests/concurrency/fsm_workloads/map_reduce_replace.js90
-rw-r--r--jstests/concurrency/fsm_workloads/map_reduce_replace_nonexistent.js72
-rw-r--r--jstests/concurrency/fsm_workloads/plan_cache_drop_database.js12
-rw-r--r--jstests/concurrency/fsm_workloads/reindex.js68
-rw-r--r--jstests/concurrency/fsm_workloads/reindex_background.js36
-rw-r--r--jstests/concurrency/fsm_workloads/remove_and_bulk_insert.js4
-rw-r--r--jstests/concurrency/fsm_workloads/remove_multiple_documents.js18
-rw-r--r--jstests/concurrency/fsm_workloads/remove_single_document.js14
-rw-r--r--jstests/concurrency/fsm_workloads/remove_single_document_eval.js50
-rw-r--r--jstests/concurrency/fsm_workloads/remove_single_document_eval_nolock.js13
-rw-r--r--jstests/concurrency/fsm_workloads/remove_where.js57
-rw-r--r--jstests/concurrency/fsm_workloads/rename_capped_collection_chain.js6
-rw-r--r--jstests/concurrency/fsm_workloads/rename_capped_collection_dbname_chain.js6
-rw-r--r--jstests/concurrency/fsm_workloads/rename_capped_collection_dbname_droptarget.js6
-rw-r--r--jstests/concurrency/fsm_workloads/rename_capped_collection_droptarget.js10
-rw-r--r--jstests/concurrency/fsm_workloads/rename_collection_chain.js6
-rw-r--r--jstests/concurrency/fsm_workloads/rename_collection_dbname_chain.js6
-rw-r--r--jstests/concurrency/fsm_workloads/rename_collection_dbname_droptarget.js6
-rw-r--r--jstests/concurrency/fsm_workloads/rename_collection_droptarget.js10
-rw-r--r--jstests/concurrency/fsm_workloads/server_status.js2
-rw-r--r--jstests/concurrency/fsm_workloads/touch_base.js93
-rw-r--r--jstests/concurrency/fsm_workloads/touch_data.js24
-rw-r--r--jstests/concurrency/fsm_workloads/touch_index.js24
-rw-r--r--jstests/concurrency/fsm_workloads/touch_no_data_no_index.js34
-rw-r--r--jstests/concurrency/fsm_workloads/update_and_bulk_insert.js6
-rw-r--r--jstests/concurrency/fsm_workloads/update_array.js41
-rw-r--r--jstests/concurrency/fsm_workloads/update_array_noindex.js6
-rw-r--r--jstests/concurrency/fsm_workloads/update_check_index.js14
-rw-r--r--jstests/concurrency/fsm_workloads/update_inc.js21
-rw-r--r--jstests/concurrency/fsm_workloads/update_inc_capped.js6
-rw-r--r--jstests/concurrency/fsm_workloads/update_multifield.js38
-rw-r--r--jstests/concurrency/fsm_workloads/update_multifield_isolated_multiupdate.js52
-rw-r--r--jstests/concurrency/fsm_workloads/update_multifield_isolated_multiupdate_noindex.js6
-rw-r--r--jstests/concurrency/fsm_workloads/update_multifield_multiupdate.js88
-rw-r--r--jstests/concurrency/fsm_workloads/update_multifield_multiupdate_noindex.js6
-rw-r--r--jstests/concurrency/fsm_workloads/update_multifield_noindex.js6
-rw-r--r--jstests/concurrency/fsm_workloads/update_ordered_bulk_inc.js20
-rw-r--r--jstests/concurrency/fsm_workloads/update_rename.js16
-rw-r--r--jstests/concurrency/fsm_workloads/update_rename_noindex.js6
-rw-r--r--jstests/concurrency/fsm_workloads/update_replace.js24
-rw-r--r--jstests/concurrency/fsm_workloads/update_replace_noindex.js6
-rw-r--r--jstests/concurrency/fsm_workloads/update_simple.js27
-rw-r--r--jstests/concurrency/fsm_workloads/update_simple_eval.js38
-rw-r--r--jstests/concurrency/fsm_workloads/update_simple_eval_nolock.js13
-rw-r--r--jstests/concurrency/fsm_workloads/update_simple_noindex.js6
-rw-r--r--jstests/concurrency/fsm_workloads/update_upsert_multi.js37
-rw-r--r--jstests/concurrency/fsm_workloads/update_upsert_multi_noindex.js6
-rw-r--r--jstests/concurrency/fsm_workloads/update_where.js82
-rw-r--r--jstests/concurrency/fsm_workloads/upsert_where.js64
-rw-r--r--jstests/concurrency/fsm_workloads/yield.js50
-rw-r--r--jstests/concurrency/fsm_workloads/yield_and_hashed.js109
-rw-r--r--jstests/concurrency/fsm_workloads/yield_and_sorted.js92
-rw-r--r--jstests/concurrency/fsm_workloads/yield_fetch.js35
-rw-r--r--jstests/concurrency/fsm_workloads/yield_geo_near.js127
-rw-r--r--jstests/concurrency/fsm_workloads/yield_geo_near_dedup.js148
-rw-r--r--jstests/concurrency/fsm_workloads/yield_id_hack.js43
-rw-r--r--jstests/concurrency/fsm_workloads/yield_rooted_or.js78
-rw-r--r--jstests/concurrency/fsm_workloads/yield_sort.js59
-rw-r--r--jstests/concurrency/fsm_workloads/yield_sort_merge.js82
-rw-r--r--jstests/concurrency/fsm_workloads/yield_text.js77
-rw-r--r--jstests/core/all.js70
-rw-r--r--jstests/core/all2.js96
-rw-r--r--jstests/core/all3.js30
-rw-r--r--jstests/core/all4.js34
-rw-r--r--jstests/core/all5.js30
-rw-r--r--jstests/core/and.js84
-rw-r--r--jstests/core/and2.js12
-rw-r--r--jstests/core/and3.js92
-rw-r--r--jstests/core/andor.js148
-rw-r--r--jstests/core/apitest_db.js95
-rw-r--r--jstests/core/apitest_dbcollection.js194
-rw-r--r--jstests/core/apply_ops1.js225
-rw-r--r--jstests/core/apply_ops2.js74
-rw-r--r--jstests/core/apply_ops_dups.js29
-rw-r--r--jstests/core/array1.js16
-rw-r--r--jstests/core/array3.js11
-rw-r--r--jstests/core/array4.js18
-rw-r--r--jstests/core/array_match1.js34
-rw-r--r--jstests/core/array_match2.js18
-rw-r--r--jstests/core/array_match3.js10
-rw-r--r--jstests/core/array_match4.js4
-rw-r--r--jstests/core/arrayfind1.js43
-rw-r--r--jstests/core/arrayfind2.js35
-rw-r--r--jstests/core/arrayfind3.js17
-rw-r--r--jstests/core/arrayfind4.js22
-rw-r--r--jstests/core/arrayfind5.js27
-rw-r--r--jstests/core/arrayfind6.js14
-rw-r--r--jstests/core/arrayfind7.js52
-rw-r--r--jstests/core/arrayfind8.js163
-rw-r--r--jstests/core/arrayfind9.js30
-rw-r--r--jstests/core/arrayfinda.js20
-rw-r--r--jstests/core/arrayfindb.js12
-rw-r--r--jstests/core/auth1.js46
-rw-r--r--jstests/core/auth2.js8
-rw-r--r--jstests/core/auth_copydb.js18
-rw-r--r--jstests/core/autoid.js16
-rw-r--r--jstests/core/bad_index_plugin.js6
-rw-r--r--jstests/core/basic1.js22
-rw-r--r--jstests/core/basic2.js18
-rw-r--r--jstests/core/basic3.js28
-rw-r--r--jstests/core/basic4.js16
-rw-r--r--jstests/core/basic5.js7
-rw-r--r--jstests/core/basic6.js4
-rw-r--r--jstests/core/basic7.js11
-rw-r--r--jstests/core/basic8.js8
-rw-r--r--jstests/core/basic9.js12
-rw-r--r--jstests/core/basica.js18
-rw-r--r--jstests/core/basicb.js5
-rw-r--r--jstests/core/batch_size.js13
-rw-r--r--jstests/core/batch_write_command_delete.js174
-rw-r--r--jstests/core/batch_write_command_insert.js169
-rw-r--r--jstests/core/batch_write_command_update.js222
-rw-r--r--jstests/core/batch_write_command_wc.js6
-rw-r--r--jstests/core/bench_test1.js46
-rw-r--r--jstests/core/bench_test2.js54
-rw-r--r--jstests/core/bench_test3.js32
-rw-r--r--jstests/core/big_object1.js56
-rw-r--r--jstests/core/binData.js16
-rw-r--r--jstests/core/bindata_indexonly.js24
-rw-r--r--jstests/core/bittest.js15
-rw-r--r--jstests/core/bulk_api_ordered.js80
-rw-r--r--jstests/core/bulk_api_unordered.js80
-rw-r--r--jstests/core/bulk_insert.js20
-rw-r--r--jstests/core/bulk_insert_capped.js12
-rw-r--r--jstests/core/bulk_legacy_enforce_gle.js87
-rw-r--r--jstests/core/bypass_doc_validation.js80
-rw-r--r--jstests/core/capped.js13
-rw-r--r--jstests/core/capped1.js9
-rw-r--r--jstests/core/capped5.js49
-rw-r--r--jstests/core/capped6.js33
-rw-r--r--jstests/core/capped9.js29
-rw-r--r--jstests/core/capped_convertToCapped1.js28
-rw-r--r--jstests/core/capped_empty.js24
-rw-r--r--jstests/core/capped_max1.js22
-rw-r--r--jstests/core/capped_update.js15
-rw-r--r--jstests/core/cappeda.js25
-rw-r--r--jstests/core/check_shard_index.js175
-rw-r--r--jstests/core/cleanup_orphaned.js2
-rw-r--r--jstests/core/clone_as_capped_nonexistant.js16
-rw-r--r--jstests/core/collection_info_cache_race.js11
-rw-r--r--jstests/core/collection_truncate.js6
-rw-r--r--jstests/core/collmod.js98
-rw-r--r--jstests/core/compact_keeps_indexes.js22
-rw-r--r--jstests/core/compare_timestamps.js1
-rw-r--r--jstests/core/connection_status.js7
-rw-r--r--jstests/core/connection_string_validation.js80
-rw-r--r--jstests/core/constructors.js214
-rw-r--r--jstests/core/copydb.js5
-rw-r--r--jstests/core/count.js33
-rw-r--r--jstests/core/count10.js56
-rw-r--r--jstests/core/count11.js16
-rw-r--r--jstests/core/count2.js34
-rw-r--r--jstests/core/count3.js25
-rw-r--r--jstests/core/count4.js18
-rw-r--r--jstests/core/count5.js36
-rw-r--r--jstests/core/count6.js78
-rw-r--r--jstests/core/count7.js30
-rw-r--r--jstests/core/count9.js30
-rw-r--r--jstests/core/count_plan_summary.js5
-rw-r--r--jstests/core/counta.js27
-rw-r--r--jstests/core/countb.js12
-rw-r--r--jstests/core/countc.js137
-rw-r--r--jstests/core/coveredIndex1.js56
-rw-r--r--jstests/core/coveredIndex2.js28
-rw-r--r--jstests/core/coveredIndex3.js79
-rw-r--r--jstests/core/coveredIndex4.js35
-rw-r--r--jstests/core/covered_index_compound_1.js55
-rw-r--r--jstests/core/covered_index_negative_1.js64
-rw-r--r--jstests/core/covered_index_simple_1.js59
-rw-r--r--jstests/core/covered_index_simple_2.js46
-rw-r--r--jstests/core/covered_index_simple_3.js62
-rw-r--r--jstests/core/covered_index_simple_id.js42
-rw-r--r--jstests/core/covered_index_sort_1.js42
-rw-r--r--jstests/core/covered_index_sort_2.js17
-rw-r--r--jstests/core/covered_index_sort_3.js18
-rw-r--r--jstests/core/create_collection_fail_cleanup.js9
-rw-r--r--jstests/core/create_indexes.js124
-rw-r--r--jstests/core/crud_api.js724
-rw-r--r--jstests/core/currentop.js34
-rw-r--r--jstests/core/currentop_predicate.js14
-rw-r--r--jstests/core/cursor1.js16
-rw-r--r--jstests/core/cursor2.js18
-rw-r--r--jstests/core/cursor3.js41
-rw-r--r--jstests/core/cursor4.js74
-rw-r--r--jstests/core/cursor5.js58
-rw-r--r--jstests/core/cursor6.js92
-rw-r--r--jstests/core/cursor7.js87
-rw-r--r--jstests/core/cursora.js51
-rw-r--r--jstests/core/cursorb.js11
-rw-r--r--jstests/core/datasize2.js48
-rw-r--r--jstests/core/date1.js18
-rw-r--r--jstests/core/date2.js12
-rw-r--r--jstests/core/date3.js34
-rw-r--r--jstests/core/db.js9
-rw-r--r--jstests/core/dbadmin.js56
-rw-r--r--jstests/core/dbcase.js24
-rw-r--r--jstests/core/dbcase2.js9
-rw-r--r--jstests/core/dbhash.js65
-rw-r--r--jstests/core/dbhash2.js26
-rw-r--r--jstests/core/dbref1.js6
-rw-r--r--jstests/core/dbref2.js16
-rw-r--r--jstests/core/dbref3.js6
-rw-r--r--jstests/core/delx.js35
-rw-r--r--jstests/core/depth_limit.js17
-rw-r--r--jstests/core/distinct1.js57
-rw-r--r--jstests/core/distinct2.js13
-rw-r--r--jstests/core/distinct3.js36
-rw-r--r--jstests/core/distinct4.js31
-rw-r--r--jstests/core/distinct_array1.js101
-rw-r--r--jstests/core/distinct_index1.js81
-rw-r--r--jstests/core/distinct_index2.js38
-rw-r--r--jstests/core/distinct_speed1.js21
-rw-r--r--jstests/core/doc_validation.js10
-rw-r--r--jstests/core/doc_validation_invalid_validators.js57
-rw-r--r--jstests/core/doc_validation_options.js56
-rw-r--r--jstests/core/drop.js11
-rw-r--r--jstests/core/drop2.js58
-rw-r--r--jstests/core/drop3.js18
-rw-r--r--jstests/core/drop_index.js22
-rw-r--r--jstests/core/dropdb.js14
-rw-r--r--jstests/core/dropdb_race.js14
-rw-r--r--jstests/core/elemMatchProjection.js432
-rw-r--r--jstests/core/error2.js27
-rw-r--r--jstests/core/error5.js10
-rw-r--r--jstests/core/eval0.js24
-rw-r--r--jstests/core/eval1.js18
-rw-r--r--jstests/core/eval2.js42
-rw-r--r--jstests/core/eval3.js34
-rw-r--r--jstests/core/eval4.js25
-rw-r--r--jstests/core/eval5.js27
-rw-r--r--jstests/core/eval6.js16
-rw-r--r--jstests/core/eval7.js6
-rw-r--r--jstests/core/eval8.js19
-rw-r--r--jstests/core/eval9.js27
-rw-r--r--jstests/core/eval_mr.js12
-rw-r--r--jstests/core/eval_nolock.js20
-rw-r--r--jstests/core/evala.js7
-rw-r--r--jstests/core/evalb.js31
-rw-r--r--jstests/core/evalc.js15
-rw-r--r--jstests/core/evald.js78
-rw-r--r--jstests/core/evale.js10
-rw-r--r--jstests/core/evalg.js15
-rw-r--r--jstests/core/exists.js83
-rw-r--r--jstests/core/exists2.js19
-rw-r--r--jstests/core/exists3.js22
-rw-r--r--jstests/core/exists4.js53
-rw-r--r--jstests/core/exists5.js44
-rw-r--r--jstests/core/exists6.js32
-rw-r--r--jstests/core/exists7.js16
-rw-r--r--jstests/core/exists8.js98
-rw-r--r--jstests/core/exists9.js40
-rw-r--r--jstests/core/existsa.js100
-rw-r--r--jstests/core/existsb.js50
-rw-r--r--jstests/core/explain1.js30
-rw-r--r--jstests/core/explain2.js18
-rw-r--r--jstests/core/explain3.js16
-rw-r--r--jstests/core/explain4.js13
-rw-r--r--jstests/core/explain5.js28
-rw-r--r--jstests/core/explain6.js35
-rw-r--r--jstests/core/explain_batch_size.js10
-rw-r--r--jstests/core/explain_count.js34
-rw-r--r--jstests/core/explain_delete.js37
-rw-r--r--jstests/core/explain_distinct.js17
-rw-r--r--jstests/core/explain_execution_error.js42
-rw-r--r--jstests/core/explain_find.js19
-rw-r--r--jstests/core/explain_find_and_modify.js273
-rw-r--r--jstests/core/explain_missing_collection.js2
-rw-r--r--jstests/core/explain_missing_database.js2
-rw-r--r--jstests/core/explain_multi_plan.js15
-rw-r--r--jstests/core/explain_shell_helpers.js10
-rw-r--r--jstests/core/explain_upsert.js20
-rw-r--r--jstests/core/filemd5.js13
-rw-r--r--jstests/core/find1.js49
-rw-r--r--jstests/core/find2.js16
-rw-r--r--jstests/core/find3.js8
-rw-r--r--jstests/core/find4.js42
-rw-r--r--jstests/core/find5.js60
-rw-r--r--jstests/core/find6.js44
-rw-r--r--jstests/core/find7.js12
-rw-r--r--jstests/core/find8.js20
-rw-r--r--jstests/core/find9.js24
-rw-r--r--jstests/core/find_and_modify.js62
-rw-r--r--jstests/core/find_and_modify2.js18
-rw-r--r--jstests/core/find_and_modify3.js42
-rw-r--r--jstests/core/find_and_modify4.js50
-rw-r--r--jstests/core/find_and_modify_concurrent_update.js9
-rw-r--r--jstests/core/find_and_modify_empty_coll.js4
-rw-r--r--jstests/core/find_and_modify_empty_update.js4
-rw-r--r--jstests/core/find_and_modify_server6226.js5
-rw-r--r--jstests/core/find_and_modify_server6254.js9
-rw-r--r--jstests/core/find_and_modify_server6582.js17
-rw-r--r--jstests/core/find_and_modify_server6588.js39
-rw-r--r--jstests/core/find_and_modify_server6659.js6
-rw-r--r--jstests/core/find_and_modify_server6865.js286
-rw-r--r--jstests/core/find_and_modify_server6909.js29
-rw-r--r--jstests/core/find_and_modify_server6993.js12
-rw-r--r--jstests/core/find_and_modify_server7660.js17
-rw-r--r--jstests/core/find_and_modify_where.js7
-rw-r--r--jstests/core/find_dedup.js17
-rw-r--r--jstests/core/find_getmore_bsonsize.js5
-rw-r--r--jstests/core/find_getmore_cmd.js21
-rw-r--r--jstests/core/find_size.js4
-rw-r--r--jstests/core/finda.js94
-rw-r--r--jstests/core/fm1.js14
-rw-r--r--jstests/core/fm2.js7
-rw-r--r--jstests/core/fm3.js33
-rw-r--r--jstests/core/fm4.js16
-rw-r--r--jstests/core/fsync.js154
-rw-r--r--jstests/core/fts1.js22
-rw-r--r--jstests/core/fts2.js22
-rw-r--r--jstests/core/fts3.js21
-rw-r--r--jstests/core/fts4.js21
-rw-r--r--jstests/core/fts5.js20
-rw-r--r--jstests/core/fts_blog.js30
-rw-r--r--jstests/core/fts_blogwild.js60
-rw-r--r--jstests/core/fts_casesensitive.js4
-rw-r--r--jstests/core/fts_diacritic_and_caseinsensitive.js2
-rw-r--r--jstests/core/fts_diacritic_and_casesensitive.js94
-rw-r--r--jstests/core/fts_diacriticsensitive.js20
-rw-r--r--jstests/core/fts_explain.js2
-rw-r--r--jstests/core/fts_index.js40
-rw-r--r--jstests/core/fts_index2.js4
-rw-r--r--jstests/core/fts_index_version1.js3
-rw-r--r--jstests/core/fts_index_version2.js5
-rw-r--r--jstests/core/fts_mix.js199
-rw-r--r--jstests/core/fts_partition1.js25
-rw-r--r--jstests/core/fts_partition_no_multikey.js10
-rw-r--r--jstests/core/fts_phrase.js38
-rw-r--r--jstests/core/fts_proj.js24
-rw-r--r--jstests/core/fts_projection.js48
-rw-r--r--jstests/core/fts_querylang.js30
-rw-r--r--jstests/core/fts_score_sort.js12
-rw-r--r--jstests/core/fts_spanish.js29
-rw-r--r--jstests/core/geo1.js39
-rw-r--r--jstests/core/geo10.js15
-rw-r--r--jstests/core/geo2.js44
-rw-r--r--jstests/core/geo3.js111
-rw-r--r--jstests/core/geo5.js23
-rw-r--r--jstests/core/geo6.js29
-rw-r--r--jstests/core/geo7.js22
-rw-r--r--jstests/core/geo9.js36
-rw-r--r--jstests/core/geo_2d_with_geojson_point.js10
-rw-r--r--jstests/core/geo_allowedcomparisons.js54
-rw-r--r--jstests/core/geo_array0.js24
-rw-r--r--jstests/core/geo_array1.js32
-rw-r--r--jstests/core/geo_array2.js280
-rw-r--r--jstests/core/geo_big_polygon.js157
-rw-r--r--jstests/core/geo_big_polygon2.js1116
-rw-r--r--jstests/core/geo_big_polygon3.js286
-rw-r--r--jstests/core/geo_borders.js164
-rw-r--r--jstests/core/geo_box1.js56
-rw-r--r--jstests/core/geo_box1_noindex.js46
-rw-r--r--jstests/core/geo_box2.js18
-rw-r--r--jstests/core/geo_box3.js32
-rw-r--r--jstests/core/geo_center_sphere1.js103
-rw-r--r--jstests/core/geo_center_sphere2.js119
-rw-r--r--jstests/core/geo_circle1.js63
-rw-r--r--jstests/core/geo_circle1_noindex.js38
-rw-r--r--jstests/core/geo_circle2.js39
-rw-r--r--jstests/core/geo_circle2a.js41
-rw-r--r--jstests/core/geo_circle3.js29
-rw-r--r--jstests/core/geo_circle4.js38
-rw-r--r--jstests/core/geo_circle5.js20
-rw-r--r--jstests/core/geo_distinct.js117
-rw-r--r--jstests/core/geo_exactfetch.js4
-rw-r--r--jstests/core/geo_fiddly_box.js53
-rw-r--r--jstests/core/geo_fiddly_box2.js41
-rw-r--r--jstests/core/geo_group.js46
-rw-r--r--jstests/core/geo_haystack1.js68
-rw-r--r--jstests/core/geo_haystack2.js69
-rw-r--r--jstests/core/geo_haystack3.js38
-rw-r--r--jstests/core/geo_invalid_2d_params.js10
-rw-r--r--jstests/core/geo_invalid_polygon.js17
-rw-r--r--jstests/core/geo_mapreduce.js54
-rw-r--r--jstests/core/geo_mapreduce2.js39
-rw-r--r--jstests/core/geo_max.js69
-rw-r--r--jstests/core/geo_mindistance.js188
-rw-r--r--jstests/core/geo_mindistance_boundaries.js124
-rw-r--r--jstests/core/geo_multikey0.js29
-rw-r--r--jstests/core/geo_multikey1.js13
-rw-r--r--jstests/core/geo_multinest0.js60
-rw-r--r--jstests/core/geo_multinest1.js37
-rw-r--r--jstests/core/geo_near_random1.js18
-rw-r--r--jstests/core/geo_near_random2.js19
-rw-r--r--jstests/core/geo_nearwithin.js26
-rw-r--r--jstests/core/geo_oob_sphere.js37
-rw-r--r--jstests/core/geo_operator_crs.js38
-rw-r--r--jstests/core/geo_or.js86
-rw-r--r--jstests/core/geo_poly_edge.js22
-rw-r--r--jstests/core/geo_poly_line.js17
-rw-r--r--jstests/core/geo_polygon1.js81
-rw-r--r--jstests/core/geo_polygon1_noindex.js66
-rw-r--r--jstests/core/geo_polygon2.js178
-rw-r--r--jstests/core/geo_polygon3.js104
-rw-r--r--jstests/core/geo_queryoptimizer.js29
-rw-r--r--jstests/core/geo_regex0.js25
-rw-r--r--jstests/core/geo_s2cursorlimitskip.js30
-rw-r--r--jstests/core/geo_s2dedupnear.js10
-rw-r--r--jstests/core/geo_s2descindex.js47
-rw-r--r--jstests/core/geo_s2disjoint_holes.js40
-rw-r--r--jstests/core/geo_s2dupe_points.js81
-rw-r--r--[-rwxr-xr-x]jstests/core/geo_s2edgecases.js71
-rw-r--r--jstests/core/geo_s2exact.js16
-rw-r--r--jstests/core/geo_s2explain.js54
-rw-r--r--jstests/core/geo_s2holesameasshell.js45
-rw-r--r--[-rwxr-xr-x]jstests/core/geo_s2index.js163
-rw-r--r--[-rwxr-xr-x]jstests/core/geo_s2indexoldformat.js24
-rw-r--r--jstests/core/geo_s2indexversion1.js108
-rw-r--r--jstests/core/geo_s2intersection.js111
-rw-r--r--jstests/core/geo_s2largewithin.js27
-rw-r--r--jstests/core/geo_s2meridian.js46
-rw-r--r--jstests/core/geo_s2multi.js70
-rw-r--r--jstests/core/geo_s2near.js88
-rw-r--r--jstests/core/geo_s2nearComplex.js109
-rw-r--r--jstests/core/geo_s2near_equator_opposite.js14
-rw-r--r--jstests/core/geo_s2nearcorrect.js12
-rw-r--r--jstests/core/geo_s2nearwithin.js51
-rw-r--r--jstests/core/geo_s2nongeoarray.js12
-rw-r--r--[-rwxr-xr-x]jstests/core/geo_s2nonstring.js26
-rw-r--r--jstests/core/geo_s2nopoints.js7
-rw-r--r--jstests/core/geo_s2oddshapes.js94
-rw-r--r--jstests/core/geo_s2ordering.js13
-rw-r--r--jstests/core/geo_s2overlappingpolys.js195
-rw-r--r--[-rwxr-xr-x]jstests/core/geo_s2polywithholes.js71
-rw-r--r--jstests/core/geo_s2selfintersectingpoly.js9
-rw-r--r--jstests/core/geo_s2sparse.js34
-rw-r--r--jstests/core/geo_s2twofields.js48
-rw-r--r--jstests/core/geo_s2validindex.js8
-rw-r--r--jstests/core/geo_s2within.js38
-rw-r--r--jstests/core/geo_small_large.js122
-rw-r--r--jstests/core/geo_sort1.js22
-rw-r--r--jstests/core/geo_uniqueDocs.js38
-rw-r--r--jstests/core/geo_uniqueDocs2.js93
-rw-r--r--jstests/core/geo_update.js38
-rw-r--r--jstests/core/geo_update1.js40
-rw-r--r--jstests/core/geo_update2.js43
-rw-r--r--jstests/core/geo_update_btree.js32
-rw-r--r--jstests/core/geo_update_btree2.js51
-rw-r--r--jstests/core/geo_update_dedup.js35
-rw-r--r--jstests/core/geo_validate.js96
-rw-r--r--jstests/core/geo_withinquery.js18
-rw-r--r--jstests/core/geoa.js12
-rw-r--r--jstests/core/geob.js20
-rw-r--r--jstests/core/geoc.js33
-rw-r--r--jstests/core/geod.js20
-rw-r--r--jstests/core/geoe.js37
-rw-r--r--jstests/core/geof.js18
-rw-r--r--jstests/core/geonear_cmd_input_validation.js67
-rw-r--r--jstests/core/getlog1.js26
-rw-r--r--jstests/core/getlog2.js68
-rw-r--r--jstests/core/getmore_invalidation.js14
-rw-r--r--jstests/core/group1.js185
-rw-r--r--jstests/core/group2.js26
-rw-r--r--jstests/core/group3.js33
-rw-r--r--jstests/core/group4.js63
-rw-r--r--jstests/core/group5.js53
-rw-r--r--jstests/core/group6.js36
-rw-r--r--jstests/core/group7.js26
-rw-r--r--jstests/core/group8.js22
-rw-r--r--jstests/core/group_empty.js9
-rw-r--r--jstests/core/grow_hash_table.js15
-rw-r--r--jstests/core/hashindex1.js131
-rw-r--r--jstests/core/hashtest1.js129
-rw-r--r--jstests/core/hint1.js15
-rw-r--r--jstests/core/hostinfo.js34
-rw-r--r--jstests/core/id1.js22
-rw-r--r--jstests/core/idhack.js95
-rw-r--r--jstests/core/in.js30
-rw-r--r--jstests/core/in2.js49
-rw-r--r--jstests/core/in3.js32
-rw-r--r--jstests/core/in4.js24
-rw-r--r--jstests/core/in5.js70
-rw-r--r--jstests/core/in6.js8
-rw-r--r--jstests/core/in7.js18
-rw-r--r--jstests/core/in8.js20
-rw-r--r--jstests/core/inc-SERVER-7446.js38
-rw-r--r--jstests/core/inc1.js37
-rw-r--r--jstests/core/inc2.js26
-rw-r--r--jstests/core/inc3.js18
-rw-r--r--jstests/core/index1.js32
-rw-r--r--jstests/core/index13.js172
-rw-r--r--jstests/core/index2.js76
-rw-r--r--jstests/core/index3.js10
-rw-r--r--jstests/core/index4.js31
-rw-r--r--jstests/core/index5.js24
-rw-r--r--jstests/core/index6.js6
-rw-r--r--jstests/core/index8.js81
-rw-r--r--jstests/core/index9.js30
-rw-r--r--jstests/core/indexOtherNamespace.js10
-rw-r--r--jstests/core/index_arr1.js20
-rw-r--r--jstests/core/index_arr2.js52
-rw-r--r--jstests/core/index_big1.js31
-rw-r--r--[-rwxr-xr-x]jstests/core/index_bigkeys.js49
-rw-r--r--jstests/core/index_bigkeys_nofail.js42
-rw-r--r--jstests/core/index_bigkeys_update.js16
-rw-r--r--jstests/core/index_bigkeys_validation.js2
-rw-r--r--jstests/core/index_check2.js44
-rw-r--r--jstests/core/index_check3.js87
-rw-r--r--jstests/core/index_check5.js25
-rw-r--r--jstests/core/index_check6.js127
-rw-r--r--jstests/core/index_check7.js15
-rw-r--r--jstests/core/index_create_too_many.js13
-rw-r--r--jstests/core/index_create_with_nul_in_name.js8
-rw-r--r--jstests/core/index_diag.js49
-rw-r--r--jstests/core/index_dropdups_ignore.js10
-rw-r--r--jstests/core/index_elemmatch1.js39
-rw-r--r--jstests/core/index_filter_commands.js96
-rw-r--r--jstests/core/index_many.js26
-rw-r--r--jstests/core/index_many2.js27
-rw-r--r--jstests/core/index_partial_create_drop.js23
-rw-r--r--jstests/core/index_partial_read_ops.js12
-rw-r--r--jstests/core/index_partial_write_ops.js9
-rw-r--r--jstests/core/index_plugins.js28
-rw-r--r--jstests/core/index_sparse1.js66
-rw-r--r--jstests/core/index_sparse2.js33
-rw-r--r--jstests/core/index_stats.js50
-rw-r--r--jstests/core/indexa.js23
-rw-r--r--jstests/core/indexapi.js50
-rw-r--r--jstests/core/indexb.js27
-rw-r--r--jstests/core/indexc.js20
-rw-r--r--jstests/core/indexd.js12
-rw-r--r--jstests/core/indexe.js20
-rw-r--r--jstests/core/indexes_on_indexes.js10
-rw-r--r--jstests/core/indexf.js14
-rw-r--r--jstests/core/indexg.js10
-rw-r--r--jstests/core/indexj.js64
-rw-r--r--jstests/core/indexl.js32
-rw-r--r--jstests/core/indexm.js19
-rw-r--r--jstests/core/indexn.js24
-rw-r--r--jstests/core/indexp.js22
-rw-r--r--jstests/core/indexr.js34
-rw-r--r--jstests/core/indexs.js21
-rw-r--r--jstests/core/indext.js22
-rw-r--r--jstests/core/indexu.js117
-rw-r--r--jstests/core/indexv.js20
-rw-r--r--jstests/core/insert1.js25
-rw-r--r--jstests/core/insert2.js8
-rw-r--r--jstests/core/insert_id_undefined.js4
-rw-r--r--jstests/core/insert_illegal_doc.js2
-rw-r--r--jstests/core/insert_long_index_key.js8
-rw-r--r--jstests/core/invalid_db_name.js11
-rw-r--r--jstests/core/ismaster.js39
-rw-r--r--jstests/core/js1.js22
-rw-r--r--jstests/core/js2.js30
-rw-r--r--jstests/core/js3.js132
-rw-r--r--jstests/core/js4.js81
-rw-r--r--jstests/core/js5.js10
-rw-r--r--jstests/core/js7.js6
-rw-r--r--jstests/core/js8.js37
-rw-r--r--jstests/core/js9.js31
-rw-r--r--jstests/core/json1.js61
-rw-r--r--jstests/core/kill_cursors.js60
-rw-r--r--jstests/core/killop.js49
-rw-r--r--jstests/core/list_collections1.js173
-rw-r--r--jstests/core/list_collections_filter.js18
-rw-r--r--jstests/core/list_indexes.js21
-rw-r--r--jstests/core/list_indexes_invalid.js32
-rw-r--r--jstests/core/loadserverscripts.js36
-rw-r--r--jstests/core/loglong.js24
-rw-r--r--jstests/core/logprocessdetails.js12
-rw-r--r--jstests/core/long_index_rename.js16
-rw-r--r--jstests/core/map1.js34
-rw-r--r--jstests/core/max_doc_size.js54
-rw-r--r--jstests/core/max_time_ms.js365
-rw-r--r--jstests/core/maxscan.js16
-rw-r--r--jstests/core/minmax.js76
-rw-r--r--jstests/core/minmax_edge.js144
-rw-r--r--jstests/core/mod1.js38
-rw-r--r--jstests/core/mr1.js227
-rw-r--r--jstests/core/mr2.js90
-rw-r--r--jstests/core/mr3.js73
-rw-r--r--jstests/core/mr4.js49
-rw-r--r--jstests/core/mr5.js65
-rw-r--r--jstests/core/mr_bigobject.js33
-rw-r--r--jstests/core/mr_bigobject_replace.js27
-rw-r--r--jstests/core/mr_comments.js42
-rw-r--r--jstests/core/mr_errorhandling.js43
-rw-r--r--jstests/core/mr_index.js42
-rw-r--r--jstests/core/mr_index2.js25
-rw-r--r--jstests/core/mr_index3.js107
-rw-r--r--jstests/core/mr_killop.js202
-rw-r--r--jstests/core/mr_merge.js59
-rw-r--r--jstests/core/mr_merge2.js48
-rw-r--r--jstests/core/mr_mutable_properties.js42
-rw-r--r--jstests/core/mr_optim.js30
-rw-r--r--jstests/core/mr_outreduce.js51
-rw-r--r--jstests/core/mr_outreduce2.js29
-rw-r--r--jstests/core/mr_replaceIntoDB.js46
-rw-r--r--jstests/core/mr_sort.js51
-rw-r--r--jstests/core/mr_stored.js82
-rw-r--r--jstests/core/mr_undef.js24
-rw-r--r--jstests/core/multi.js26
-rw-r--r--jstests/core/multi2.js30
-rw-r--r--jstests/core/multikey_geonear.js8
-rw-r--r--jstests/core/ne1.js12
-rw-r--r--jstests/core/ne2.js18
-rw-r--r--jstests/core/ne3.js22
-rw-r--r--jstests/core/nestedarr1.js27
-rw-r--r--jstests/core/nestedobj1.js26
-rw-r--r--jstests/core/nin.js104
-rw-r--r--jstests/core/nin2.js64
-rw-r--r--jstests/core/no_db_created.js11
-rw-r--r--jstests/core/not1.js23
-rw-r--r--jstests/core/not2.js120
-rw-r--r--jstests/core/not3.js8
-rw-r--r--jstests/core/notablescan.js42
-rw-r--r--jstests/core/ns_length.js11
-rw-r--r--jstests/core/null.js26
-rw-r--r--jstests/core/null2.js54
-rw-r--r--jstests/core/null_field_name.js12
-rw-r--r--jstests/core/numberint.js116
-rw-r--r--jstests/core/numberlong.js171
-rw-r--r--jstests/core/numberlong2.js15
-rw-r--r--jstests/core/numberlong3.js24
-rw-r--r--jstests/core/numberlong4.js24
-rw-r--r--jstests/core/objid1.js20
-rw-r--r--jstests/core/objid2.js6
-rw-r--r--jstests/core/objid3.js7
-rw-r--r--jstests/core/objid4.js23
-rw-r--r--jstests/core/objid5.js20
-rw-r--r--jstests/core/objid7.js15
-rw-r--r--jstests/core/opcounters_active.js45
-rw-r--r--jstests/core/opcounters_write_cmd.js74
-rw-r--r--jstests/core/or1.js75
-rw-r--r--jstests/core/or2.js79
-rw-r--r--jstests/core/or3.js86
-rw-r--r--jstests/core/or4.js116
-rw-r--r--jstests/core/or5.js96
-rw-r--r--jstests/core/or7.js40
-rw-r--r--jstests/core/or8.js28
-rw-r--r--jstests/core/or9.js48
-rw-r--r--jstests/core/or_inexact.js120
-rw-r--r--jstests/core/ora.js10
-rw-r--r--jstests/core/orb.js15
-rw-r--r--jstests/core/orc.js52
-rw-r--r--jstests/core/ord.js24
-rw-r--r--jstests/core/ore.js10
-rw-r--r--jstests/core/orf.js20
-rw-r--r--jstests/core/org.js16
-rw-r--r--jstests/core/orh.js14
-rw-r--r--jstests/core/orj.js228
-rw-r--r--jstests/core/ork.js22
-rw-r--r--jstests/core/oro.js23
-rw-r--r--jstests/core/orp.js41
-rw-r--r--jstests/core/plan_cache_clear.js2
-rw-r--r--jstests/core/plan_cache_list_plans.js24
-rw-r--r--jstests/core/plan_cache_list_shapes.js9
-rw-r--r--jstests/core/plan_cache_shell_helpers.js100
-rw-r--r--jstests/core/pop_server_13516.js6
-rw-r--r--jstests/core/profile1.js17
-rw-r--r--jstests/core/profile2.js6
-rw-r--r--jstests/core/profile3.js45
-rw-r--r--jstests/core/profile4.js47
-rw-r--r--jstests/core/profile5.js3
-rw-r--r--jstests/core/profile_no_such_db.js64
-rw-r--r--jstests/core/proj_key1.js15
-rw-r--r--jstests/core/pull.js36
-rw-r--r--jstests/core/pull2.js42
-rw-r--r--jstests/core/pull_or.js21
-rw-r--r--jstests/core/pull_remove1.js18
-rw-r--r--jstests/core/pullall.js34
-rw-r--r--jstests/core/pullall2.js33
-rw-r--r--jstests/core/push.js75
-rw-r--r--jstests/core/push2.js12
-rw-r--r--jstests/core/push_sort.js77
-rw-r--r--jstests/core/pushall.js26
-rw-r--r--jstests/core/query1.js28
-rw-r--r--jstests/core/queryoptimizer3.js40
-rw-r--r--jstests/core/queryoptimizer6.js6
-rw-r--r--jstests/core/queryoptimizera.js60
-rw-r--r--jstests/core/read_after_optime.js20
-rw-r--r--jstests/core/recursion.js24
-rw-r--r--jstests/core/ref.js23
-rw-r--r--jstests/core/ref2.js21
-rw-r--r--jstests/core/ref3.js15
-rw-r--r--jstests/core/ref4.js20
-rw-r--r--jstests/core/regex.js26
-rw-r--r--jstests/core/regex2.js83
-rw-r--r--jstests/core/regex3.js55
-rw-r--r--jstests/core/regex4.js29
-rw-r--r--jstests/core/regex5.js40
-rw-r--r--jstests/core/regex6.js78
-rw-r--r--jstests/core/regex7.js36
-rw-r--r--jstests/core/regex8.js22
-rw-r--r--jstests/core/regex9.js12
-rw-r--r--jstests/core/regex_embed1.js29
-rw-r--r--jstests/core/regex_limit.js15
-rw-r--r--jstests/core/regex_not_id.js6
-rw-r--r--jstests/core/regex_options.js8
-rw-r--r--jstests/core/regex_util.js43
-rw-r--r--jstests/core/regexa.js16
-rw-r--r--jstests/core/regexb.js11
-rw-r--r--jstests/core/regexc.js6
-rw-r--r--jstests/core/remove.js25
-rw-r--r--jstests/core/remove2.js38
-rw-r--r--jstests/core/remove3.js18
-rw-r--r--jstests/core/remove4.js12
-rw-r--r--jstests/core/remove6.js39
-rw-r--r--jstests/core/remove7.js39
-rw-r--r--jstests/core/remove8.js20
-rw-r--r--jstests/core/remove9.js13
-rw-r--r--jstests/core/remove_justone.js18
-rw-r--r--jstests/core/remove_undefined.js57
-rw-r--r--jstests/core/removea.js24
-rw-r--r--jstests/core/removeb.js35
-rw-r--r--jstests/core/removec.js22
-rw-r--r--jstests/core/rename.js67
-rw-r--r--jstests/core/rename2.js16
-rw-r--r--jstests/core/rename3.js24
-rw-r--r--jstests/core/rename4.js187
-rw-r--r--jstests/core/rename5.js46
-rw-r--r--jstests/core/rename6.js24
-rw-r--r--jstests/core/rename7.js62
-rw-r--r--jstests/core/rename8.js5
-rw-r--r--jstests/core/rename_stayTemp.js25
-rw-r--r--jstests/core/repair_database.js17
-rw-r--r--jstests/core/repair_server12955.js8
-rw-r--r--jstests/core/return_key.js22
-rw-r--r--jstests/core/role_management_helpers.js222
-rw-r--r--jstests/core/run_program1.js12
-rw-r--r--jstests/core/server1470.js22
-rw-r--r--jstests/core/server14753.js8
-rw-r--r--jstests/core/server5346.js18
-rw-r--r--jstests/core/server7756.js11
-rw-r--r--jstests/core/server9385.js14
-rw-r--r--jstests/core/server9547.js2
-rw-r--r--jstests/core/set1.js8
-rw-r--r--jstests/core/set2.js21
-rw-r--r--jstests/core/set3.js9
-rw-r--r--jstests/core/set4.js24
-rw-r--r--jstests/core/set5.js12
-rw-r--r--jstests/core/set6.js24
-rw-r--r--jstests/core/set7.js72
-rw-r--r--jstests/core/set_param1.js172
-rw-r--r--jstests/core/shell1.js7
-rw-r--r--jstests/core/shell_writeconcern.js36
-rw-r--r--jstests/core/shellkillop.js75
-rw-r--r--jstests/core/shelltypes.js51
-rw-r--r--jstests/core/show_record_id.js26
-rw-r--r--jstests/core/skip1.js28
-rw-r--r--jstests/core/slice1.js82
-rw-r--r--jstests/core/snapshot_queries.js2
-rw-r--r--jstests/core/sort1.js67
-rw-r--r--jstests/core/sort10.js26
-rw-r--r--jstests/core/sort2.js34
-rw-r--r--jstests/core/sort3.js21
-rw-r--r--jstests/core/sort4.js49
-rw-r--r--jstests/core/sort5.js32
-rw-r--r--jstests/core/sort6.js40
-rw-r--r--jstests/core/sort7.js28
-rw-r--r--jstests/core/sort8.js36
-rw-r--r--jstests/core/sort9.js34
-rw-r--r--jstests/core/sort_numeric.js36
-rw-r--r--jstests/core/sortb.js24
-rw-r--r--jstests/core/sortc.js34
-rw-r--r--jstests/core/sortd.js67
-rw-r--r--jstests/core/sortf.js16
-rw-r--r--jstests/core/sortg.js52
-rw-r--r--jstests/core/sorth.js162
-rw-r--r--jstests/core/sorti.js28
-rw-r--r--jstests/core/sortj.js14
-rw-r--r--jstests/core/sortk.js164
-rw-r--r--jstests/core/sortl.js24
-rw-r--r--jstests/core/splitvector.js261
-rw-r--r--jstests/core/stages_and_hash.js42
-rw-r--r--jstests/core/stages_and_sorted.js57
-rw-r--r--jstests/core/stages_collection_scan.js18
-rw-r--r--jstests/core/stages_delete.js12
-rw-r--r--jstests/core/stages_fetch.js36
-rw-r--r--jstests/core/stages_ixscan.js96
-rw-r--r--jstests/core/stages_limit_skip.js23
-rw-r--r--jstests/core/stages_mergesort.js38
-rw-r--r--jstests/core/stages_or.js36
-rw-r--r--jstests/core/stages_sort.js32
-rw-r--r--jstests/core/stages_text.js6
-rw-r--r--jstests/core/startup_log.js141
-rw-r--r--jstests/core/storageDetailsCommand.js1
-rw-r--r--jstests/core/storefunc.js93
-rw-r--r--jstests/core/string_with_nul_bytes.js4
-rw-r--r--jstests/core/sub1.js13
-rw-r--r--jstests/core/system_profile.js32
-rw-r--r--jstests/core/tailable_skip_limit.js4
-rw-r--r--jstests/core/temp_cleanup.js20
-rw-r--r--jstests/core/test_command_line_test_helpers.js8
-rw-r--r--jstests/core/testminmax.js31
-rw-r--r--jstests/core/top.js56
-rw-r--r--jstests/core/ts1.js39
-rw-r--r--jstests/core/type1.js29
-rw-r--r--jstests/core/type2.js16
-rw-r--r--jstests/core/type3.js52
-rw-r--r--jstests/core/type4.js14
-rw-r--r--jstests/core/type5.js8
-rw-r--r--jstests/core/type6.js12
-rw-r--r--jstests/core/type7.js2
-rw-r--r--jstests/core/type8.js2
-rw-r--r--jstests/core/uniqueness.js57
-rw-r--r--jstests/core/unset.js25
-rw-r--r--jstests/core/unset2.js32
-rw-r--r--jstests/core/update2.js20
-rw-r--r--jstests/core/update3.js30
-rw-r--r--jstests/core/update5.js56
-rw-r--r--jstests/core/update6.js55
-rw-r--r--jstests/core/update7.js167
-rw-r--r--jstests/core/update8.js10
-rw-r--r--jstests/core/update9.js21
-rw-r--r--jstests/core/update_addToSet.js77
-rw-r--r--jstests/core/update_addToSet2.js12
-rw-r--r--jstests/core/update_addToSet3.js19
-rw-r--r--jstests/core/update_arraymatch1.js21
-rw-r--r--jstests/core/update_arraymatch2.js22
-rw-r--r--jstests/core/update_arraymatch3.js19
-rw-r--r--jstests/core/update_arraymatch4.js21
-rw-r--r--jstests/core/update_arraymatch5.js21
-rw-r--r--jstests/core/update_arraymatch6.js10
-rw-r--r--jstests/core/update_arraymatch7.js8
-rw-r--r--jstests/core/update_arraymatch8.js184
-rw-r--r--jstests/core/update_bit_examples.js6
-rw-r--r--jstests/core/update_blank1.js13
-rw-r--r--jstests/core/update_currentdate_examples.js6
-rw-r--r--jstests/core/update_dbref.js20
-rw-r--r--jstests/core/update_find_and_modify_id.js7
-rw-r--r--jstests/core/update_invalid1.js4
-rw-r--r--jstests/core/update_min_max_examples.js56
-rw-r--r--jstests/core/update_mul_examples.js10
-rw-r--r--jstests/core/update_multi3.js27
-rw-r--r--jstests/core/update_multi4.js18
-rw-r--r--jstests/core/update_multi5.js14
-rw-r--r--jstests/core/update_multi6.js9
-rw-r--r--jstests/core/update_replace.js14
-rw-r--r--jstests/core/update_server-12848.js12
-rw-r--r--jstests/core/update_setOnInsert.js32
-rw-r--r--jstests/core/updatea.js63
-rw-r--r--jstests/core/updateb.js10
-rw-r--r--jstests/core/updatec.js12
-rw-r--r--jstests/core/updated.js30
-rw-r--r--jstests/core/updatee.js57
-rw-r--r--jstests/core/updatef.js17
-rw-r--r--jstests/core/updateg.js16
-rw-r--r--jstests/core/updateh.js66
-rw-r--r--jstests/core/updatei.js60
-rw-r--r--jstests/core/updatej.js10
-rw-r--r--jstests/core/updatek.js13
-rw-r--r--jstests/core/updatel.js45
-rw-r--r--jstests/core/updatem.js14
-rw-r--r--jstests/core/upsert_and.js18
-rw-r--r--jstests/core/upsert_fields.js231
-rw-r--r--jstests/core/upsert_shell.js60
-rw-r--r--[-rwxr-xr-x]jstests/core/useindexonobjgtlt.js14
-rw-r--r--jstests/core/user_management_helpers.js171
-rw-r--r--jstests/core/validate_cmd_ns.js11
-rw-r--r--jstests/core/validate_pseudocommand_ns.js20
-rw-r--r--jstests/core/validate_user_documents.js37
-rw-r--r--jstests/core/verify_update_mods.js68
-rw-r--r--jstests/core/where1.js62
-rw-r--r--jstests/core/where2.js12
-rw-r--r--jstests/core/where3.js20
-rw-r--r--jstests/core/where4.js40
-rw-r--r--jstests/core/write_result.js53
-rw-r--r--jstests/decimal/decimal_constructors.js25
-rw-r--r--jstests/decimal/decimal_find_basic.js52
-rw-r--r--jstests/decimal/decimal_find_mixed.js124
-rw-r--r--jstests/decimal/decimal_find_query.js61
-rw-r--r--jstests/decimal/decimal_update.js35
-rw-r--r--jstests/disk/datafile_options.js38
-rw-r--r--jstests/disk/dbNoCreate.js14
-rw-r--r--jstests/disk/directoryperdb.js126
-rw-r--r--jstests/disk/diskfull.js4
-rw-r--r--jstests/disk/filesize.js13
-rw-r--r--jstests/disk/index_options.js36
-rw-r--r--jstests/disk/killall.js27
-rw-r--r--jstests/disk/newcollection.js15
-rw-r--r--jstests/disk/preallocate.js26
-rw-r--r--jstests/disk/preallocate2.js10
-rw-r--r--jstests/disk/preallocate_directoryperdb.js39
-rw-r--r--jstests/disk/quota.js46
-rw-r--r--jstests/disk/quota2.js47
-rw-r--r--jstests/disk/quota3.js27
-rw-r--r--jstests/disk/repair.js46
-rw-r--r--jstests/disk/repair2.js113
-rw-r--r--jstests/disk/repair3.js55
-rw-r--r--jstests/disk/repair4.js40
-rw-r--r--jstests/disk/repair5.js42
-rw-r--r--jstests/disk/too_many_fds.js13
-rw-r--r--[-rwxr-xr-x]jstests/dur/a_quick.js81
-rw-r--r--jstests/dur/checksum.js39
-rw-r--r--jstests/dur/closeall.js65
-rw-r--r--jstests/dur/diskfull.js115
-rw-r--r--jstests/dur/dropdb.js41
-rw-r--r--[-rwxr-xr-x]jstests/dur/dur1.js106
-rw-r--r--[-rwxr-xr-x]jstests/dur/dur1_tool.js110
-rw-r--r--jstests/dur/dur2.js78
-rw-r--r--jstests/dur/indexbg.js6
-rw-r--r--jstests/dur/indexbg2.js25
-rw-r--r--jstests/dur/journaling_options.js149
-rw-r--r--[-rwxr-xr-x]jstests/dur/lsn.js88
-rw-r--r--[-rwxr-xr-x]jstests/dur/manyRestart.js111
-rw-r--r--[-rwxr-xr-x]jstests/dur/oplog.js120
-rw-r--r--jstests/fail_point/fail_point.js39
-rw-r--r--jstests/gle/block2.js49
-rw-r--r--jstests/gle/core/error1.js43
-rw-r--r--jstests/gle/core/error3.js8
-rw-r--r--jstests/gle/core/gle_example.js13
-rw-r--r--jstests/gle/core/gle_shell_server5441.js14
-rw-r--r--jstests/gle/core/remove5.js30
-rw-r--r--jstests/gle/core/update4.js42
-rw-r--r--jstests/gle/create_index_gle.js74
-rw-r--r--jstests/gle/get_last_error.js17
-rw-r--r--jstests/gle/gle_explicit_optime.js47
-rw-r--r--jstests/gle/gle_sharded_wc.js250
-rw-r--r--jstests/gle/gle_sharded_write.js101
-rw-r--r--jstests/gle/opcounters_legacy.js87
-rw-r--r--jstests/gle/sync1.js72
-rw-r--r--jstests/gle/sync4.js22
-rw-r--r--jstests/gle/sync8.js10
-rw-r--r--jstests/gle/updated_existing.js18
-rw-r--r--jstests/httpinterface/httpinterface.js12
-rw-r--r--jstests/httpinterface/network_options.js227
-rw-r--r--jstests/httpinterface/sharding_configdb_on_default_ports.js10
-rw-r--r--jstests/libs/analyze_plan.js13
-rw-r--r--jstests/libs/chunk_manipulation_util.js168
-rw-r--r--jstests/libs/cleanup_orphaned_util.js62
-rw-r--r--jstests/libs/command_line/test_parsed_options.js34
-rw-r--r--jstests/libs/csrs_upgrade_util.js438
-rw-r--r--jstests/libs/election_timing_test.js60
-rw-r--r--jstests/libs/fts.js33
-rw-r--r--jstests/libs/geo_near_random.js69
-rw-r--r--jstests/libs/host_ipaddr.js13
-rw-r--r--jstests/libs/override_methods/find_batch_size.js2
-rw-r--r--jstests/libs/override_methods/implicitly_shard_accessed_collections.js8
-rw-r--r--jstests/libs/override_methods/set_majority_read_and_write_concerns.js32
-rw-r--r--jstests/libs/override_methods/sharding_continuous_config_stepdown.js353
-rw-r--r--jstests/libs/parallelTester.js354
-rw-r--r--jstests/libs/slow_weekly_util.js14
-rw-r--r--jstests/libs/ssl_test.js8
-rw-r--r--jstests/libs/test_background_ops.js459
-rw-r--r--jstests/libs/trace_missing_docs.js112
-rw-r--r--jstests/mmap_v1/capped2.js80
-rw-r--r--jstests/mmap_v1/capped3.js58
-rw-r--r--jstests/mmap_v1/capped7.js78
-rw-r--r--jstests/mmap_v1/capped8.js61
-rw-r--r--jstests/mmap_v1/capped_max.js23
-rw-r--r--jstests/mmap_v1/capped_server13912.js4
-rw-r--r--jstests/mmap_v1/capped_server2639.js21
-rw-r--r--jstests/mmap_v1/capped_server7543.js9
-rw-r--r--jstests/mmap_v1/collmod.js106
-rw-r--r--jstests/mmap_v1/compact.js69
-rw-r--r--jstests/mmap_v1/compactPreservePadding.js6
-rw-r--r--jstests/mmap_v1/datasize.js58
-rw-r--r--jstests/mmap_v1/datasize3.js39
-rw-r--r--jstests/mmap_v1/disk_reuse1.js30
-rw-r--r--jstests/mmap_v1/drop.js23
-rw-r--r--jstests/mmap_v1/dur_big_atomic_update.js23
-rw-r--r--jstests/mmap_v1/dur_remove_old_journals.js8
-rw-r--r--jstests/mmap_v1/extent.js9
-rw-r--r--jstests/mmap_v1/extent2.js24
-rw-r--r--jstests/mmap_v1/index_check1.js26
-rw-r--r--jstests/mmap_v1/indexh.js38
-rw-r--r--jstests/mmap_v1/indexi.js11
-rw-r--r--jstests/mmap_v1/list_collections2.js40
-rw-r--r--jstests/mmap_v1/list_indexes2.js13
-rw-r--r--jstests/mmap_v1/repair_cursor1.js22
-rw-r--r--jstests/mmap_v1/reverse_empty_extent.js13
-rw-r--r--jstests/mmap_v1/stats.js20
-rw-r--r--jstests/mmap_v1/touch1.js14
-rw-r--r--jstests/mmap_v1/update.js26
-rw-r--r--jstests/mmap_v1/use_power_of_2.js42
-rw-r--r--jstests/mmap_v1/use_power_of_2_a.js12
-rw-r--r--jstests/multiVersion/1_test_launching_replset.js39
-rw-r--r--jstests/multiVersion/2_test_launching_cluster.js264
-rw-r--r--jstests/multiVersion/3_upgrade_replset.js83
-rw-r--r--jstests/multiVersion/balancer_multiVersion_detect.js22
-rw-r--r--jstests/multiVersion/downgrade_replset.js12
-rw-r--r--jstests/multiVersion/dumprestore.js20
-rw-r--r--jstests/multiVersion/dumprestore_sharded.js40
-rw-r--r--jstests/multiVersion/geo_2dsphere_v2_to_v3.js25
-rw-r--r--jstests/multiVersion/initialize_from_old_node.js2
-rw-r--r--jstests/multiVersion/initialsync.js11
-rw-r--r--jstests/multiVersion/invalid_key_pattern_upgrade.js41
-rw-r--r--jstests/multiVersion/libs/auth_helpers.js16
-rw-r--r--jstests/multiVersion/libs/data_generators.js237
-rw-r--r--jstests/multiVersion/libs/dumprestore_helpers.js100
-rw-r--r--jstests/multiVersion/libs/multi_cluster.js64
-rw-r--r--jstests/multiVersion/libs/multi_rs.js24
-rw-r--r--jstests/multiVersion/libs/verify_collection_data.js78
-rw-r--r--jstests/multiVersion/libs/verify_versions.js18
-rw-r--r--jstests/multiVersion/migration_between_mixed_version_mongods.js180
-rw-r--r--jstests/multiVersion/minor_version_downgrade_replset.js12
-rw-r--r--jstests/multiVersion/minor_version_tags_new_old_new.js156
-rw-r--r--jstests/multiVersion/minor_version_tags_old_new_old.js156
-rw-r--r--jstests/multiVersion/minor_version_upgrade_replset.js83
-rw-r--r--jstests/multiVersion/mixed_storage_version_replication.js252
-rw-r--r--jstests/multiVersion/mmapv1_overrides_default_storage_engine.js47
-rw-r--r--jstests/multiVersion/partial_index_upgrade.js47
-rw-r--r--jstests/multiVersion/transitioning_to_and_from_WT.js65
-rw-r--r--jstests/multiVersion/upgrade_cluster.js164
-rw-r--r--jstests/multiVersion/wt_index_option_defaults_replset.js35
-rw-r--r--jstests/noPassthrough/awaitdata_getmore_cmd.js31
-rw-r--r--jstests/noPassthrough/backup_restore.js110
-rw-r--r--jstests/noPassthrough/balancer_window.js234
-rw-r--r--jstests/noPassthrough/command_line_parsing.js52
-rw-r--r--jstests/noPassthrough/commands_handle_kill.js44
-rw-r--r--jstests/noPassthrough/count_helper_read_preference.js18
-rw-r--r--jstests/noPassthrough/devnull.js6
-rw-r--r--jstests/noPassthrough/dir_per_db_and_split.js38
-rw-r--r--jstests/noPassthrough/directoryperdb.js38
-rw-r--r--jstests/noPassthrough/exit_logging.js96
-rw-r--r--jstests/noPassthrough/ftdc_setparam.js10
-rw-r--r--jstests/noPassthrough/geo_full.js639
-rw-r--r--jstests/noPassthrough/geo_mnypts_plus_fields.js123
-rw-r--r--jstests/noPassthrough/geo_near_random1.js11
-rw-r--r--jstests/noPassthrough/geo_near_random2.js15
-rw-r--r--jstests/noPassthrough/index_partial_no_explain_cmds.js24
-rw-r--r--jstests/noPassthrough/indexbg1.js105
-rw-r--r--jstests/noPassthrough/indexbg2.js34
-rw-r--r--jstests/noPassthrough/initial_sync_cloner_dups.js194
-rw-r--r--jstests/noPassthrough/javascript_options.js36
-rw-r--r--jstests/noPassthrough/js_protection.js95
-rw-r--r--jstests/noPassthrough/js_protection_roundtrip.js77
-rw-r--r--jstests/noPassthrough/lock_file.js7
-rw-r--r--jstests/noPassthrough/lock_file_fail_to_open.js11
-rw-r--r--jstests/noPassthrough/lock_stats.js26
-rw-r--r--jstests/noPassthrough/logging_options.js89
-rw-r--r--jstests/noPassthrough/minvalid.js11
-rw-r--r--jstests/noPassthrough/minvalid2.js29
-rw-r--r--jstests/noPassthrough/ns1.js57
-rw-r--r--jstests/noPassthrough/parameters.js15
-rw-r--r--jstests/noPassthrough/profile_options.js37
-rw-r--r--jstests/noPassthrough/query_yield1.js134
-rw-r--r--jstests/noPassthrough/query_yield2.js236
-rw-r--r--jstests/noPassthrough/query_yield_reset_timer.js28
-rw-r--r--jstests/noPassthrough/read_committed_lookup.js42
-rw-r--r--jstests/noPassthrough/read_concern_helper.js66
-rw-r--r--jstests/noPassthrough/read_majority.js354
-rw-r--r--jstests/noPassthrough/read_only_command_line.js5
-rw-r--r--jstests/noPassthrough/refresh_syncclusterconn.js8
-rw-r--r--jstests/noPassthrough/repair2.js27
-rw-r--r--jstests/noPassthrough/repl_write_threads_start_param.js6
-rw-r--r--jstests/noPassthrough/server22767.js7
-rw-r--r--jstests/noPassthrough/server_status.js20
-rw-r--r--jstests/noPassthrough/shard_does_not_hang_on_bad_config_server.js42
-rw-r--r--jstests/noPassthrough/split_collections_and_indexes.js32
-rw-r--r--jstests/noPassthrough/stepdown_query.js6
-rw-r--r--jstests/noPassthrough/sync_write.js6
-rw-r--r--jstests/noPassthrough/ttl_capped.js15
-rw-r--r--jstests/noPassthrough/ttl_partial_index.js16
-rw-r--r--jstests/noPassthrough/update_server-5552.js28
-rw-r--r--jstests/noPassthrough/update_yield1.js7
-rw-r--r--jstests/noPassthrough/write_local.js32
-rw-r--r--jstests/noPassthrough/wt_index_option_defaults.js69
-rw-r--r--jstests/noPassthrough/wt_nojournal_fsync.js22
-rw-r--r--jstests/noPassthrough/wt_nojournal_repl.js33
-rw-r--r--jstests/noPassthrough/wt_nojournal_skip_recovery.js5
-rw-r--r--jstests/noPassthrough/wt_nojournal_toggle.js19
-rw-r--r--jstests/noPassthroughWithMongod/apply_ops_errors.js55
-rw-r--r--jstests/noPassthroughWithMongod/background.js28
-rw-r--r--jstests/noPassthroughWithMongod/bench_test_crud_commands.js51
-rw-r--r--jstests/noPassthroughWithMongod/benchrun_substitution.js74
-rw-r--r--jstests/noPassthroughWithMongod/btreedel.js8
-rw-r--r--jstests/noPassthroughWithMongod/bulk_api_limits.js48
-rw-r--r--jstests/noPassthroughWithMongod/capped4.js32
-rw-r--r--jstests/noPassthroughWithMongod/capped_truncate.js33
-rw-r--r--jstests/noPassthroughWithMongod/clonecollection.js56
-rw-r--r--jstests/noPassthroughWithMongod/connections_opened.js71
-rw-r--r--jstests/noPassthroughWithMongod/create_indexes_shell_helper.js37
-rw-r--r--jstests/noPassthroughWithMongod/cursor8.js20
-rw-r--r--jstests/noPassthroughWithMongod/default_read_pref.js17
-rw-r--r--jstests/noPassthroughWithMongod/dup_bgindex.js12
-rw-r--r--jstests/noPassthroughWithMongod/explain1.js9
-rw-r--r--jstests/noPassthroughWithMongod/explain2.js14
-rw-r--r--jstests/noPassthroughWithMongod/explain3.js9
-rw-r--r--jstests/noPassthroughWithMongod/external_sort_text_agg.js11
-rw-r--r--jstests/noPassthroughWithMongod/find_and_modify_server16469.js19
-rw-r--r--jstests/noPassthroughWithMongod/fsync2.js51
-rw-r--r--jstests/noPassthroughWithMongod/ftdc_params.js14
-rw-r--r--jstests/noPassthroughWithMongod/geo_axis_aligned.js124
-rw-r--r--jstests/noPassthroughWithMongod/geo_mnypts.js48
-rw-r--r--jstests/noPassthroughWithMongod/geo_near_random1.js3
-rw-r--r--jstests/noPassthroughWithMongod/geo_near_random2.js10
-rw-r--r--jstests/noPassthroughWithMongod/geo_polygon.js78
-rw-r--r--jstests/noPassthroughWithMongod/getmore_error.js12
-rw-r--r--jstests/noPassthroughWithMongod/huge_multikey_index.js8
-rw-r--r--jstests/noPassthroughWithMongod/index_check10.js104
-rw-r--r--jstests/noPassthroughWithMongod/index_check9.js218
-rw-r--r--jstests/noPassthroughWithMongod/index_hammer1.js45
-rw-r--r--jstests/noPassthroughWithMongod/index_killop.js58
-rw-r--r--jstests/noPassthroughWithMongod/index_multi.js100
-rw-r--r--jstests/noPassthroughWithMongod/index_no_retry.js63
-rw-r--r--jstests/noPassthroughWithMongod/index_retry.js61
-rw-r--r--jstests/noPassthroughWithMongod/indexbg_drop.js53
-rw-r--r--jstests/noPassthroughWithMongod/indexbg_interrupts.js48
-rw-r--r--jstests/noPassthroughWithMongod/indexbg_restart_secondary.js49
-rw-r--r--jstests/noPassthroughWithMongod/indexbg_restart_sigkill_secondary_noretry.js52
-rw-r--r--jstests/noPassthroughWithMongod/insertMulti.js30
-rw-r--r--jstests/noPassthroughWithMongod/ipv6_connection_string_validation.js113
-rw-r--r--jstests/noPassthroughWithMongod/log_component_helpers.js11
-rw-r--r--jstests/noPassthroughWithMongod/logop_rollback.js7
-rw-r--r--jstests/noPassthroughWithMongod/logpath.js51
-rw-r--r--jstests/noPassthroughWithMongod/mapreduce_intermediate_reduce.js19
-rw-r--r--[-rwxr-xr-x]jstests/noPassthroughWithMongod/moveprimary-replset.js105
-rw-r--r--jstests/noPassthroughWithMongod/mr_noscripting.js21
-rw-r--r--jstests/noPassthroughWithMongod/mr_writeconflict.js64
-rw-r--r--jstests/noPassthroughWithMongod/newcollection2.js12
-rw-r--r--jstests/noPassthroughWithMongod/no_balance_collection.js80
-rw-r--r--jstests/noPassthroughWithMongod/parallel_collection_scan.js20
-rw-r--r--jstests/noPassthroughWithMongod/query_oplogreplay.js12
-rw-r--r--jstests/noPassthroughWithMongod/reconfigwt.js7
-rw-r--r--jstests/noPassthroughWithMongod/recstore.js16
-rw-r--r--jstests/noPassthroughWithMongod/remove9.js11
-rw-r--r--jstests/noPassthroughWithMongod/replReads.js120
-rw-r--r--jstests/noPassthroughWithMongod/replica_set_shard_version.js44
-rw-r--r--jstests/noPassthroughWithMongod/rpc_protocols.js62
-rw-r--r--jstests/noPassthroughWithMongod/server7428.js16
-rw-r--r--jstests/noPassthroughWithMongod/sharding_migrate_large_docs.js68
-rw-r--r--jstests/noPassthroughWithMongod/sharding_rs_arb1.js27
-rw-r--r--jstests/noPassthroughWithMongod/shelllimit.js8
-rw-r--r--jstests/noPassthroughWithMongod/temp_namespace.js45
-rw-r--r--jstests/noPassthroughWithMongod/testing_only_commands.js23
-rw-r--r--jstests/noPassthroughWithMongod/ttl1.js64
-rw-r--r--jstests/noPassthroughWithMongod/ttl_repl.js59
-rw-r--r--jstests/noPassthroughWithMongod/ttl_repl_maintenance.js12
-rw-r--r--jstests/noPassthroughWithMongod/ttl_repl_secondary_disabled.js34
-rw-r--r--jstests/noPassthroughWithMongod/ttl_sharded.js73
-rw-r--r--jstests/noPassthroughWithMongod/unix_socket1.js19
-rw-r--r--jstests/noPassthroughWithMongod/validate_command.js11
-rw-r--r--jstests/noPassthroughWithMongod/wt_roundtrip_creation_string.js24
-rw-r--r--jstests/parallel/allops.js40
-rw-r--r--jstests/parallel/basic.js16
-rw-r--r--jstests/parallel/basicPlus.js35
-rw-r--r--jstests/parallel/checkMultiThread.js18
-rw-r--r--jstests/parallel/del.js81
-rw-r--r--jstests/parallel/insert.js20
-rw-r--r--jstests/parallel/manyclients.js32
-rw-r--r--jstests/parallel/repl.js68
-rw-r--r--jstests/parallel/shellfork.js50
-rw-r--r--jstests/parallel/update_serializability1.js20
-rw-r--r--jstests/parallel/update_serializability2.js21
-rw-r--r--[-rwxr-xr-x]jstests/perf/compact_speed_test.js33
-rw-r--r--jstests/perf/find1.js89
-rw-r--r--jstests/perf/geo_near1.js7
-rw-r--r--jstests/perf/index1.js24
-rw-r--r--jstests/perf/mr_bench.js77
-rw-r--r--jstests/perf/remove1.js52
-rw-r--r--jstests/perf/v8_mapreduce.js35
-rw-r--r--jstests/readonly/aggregate.js145
-rw-r--r--jstests/readonly/count.js8
-rw-r--r--jstests/readonly/find.js7
-rw-r--r--jstests/readonly/geo.js39
-rw-r--r--jstests/readonly/lib/read_only_test.js11
-rw-r--r--jstests/readonly/server_status.js3
-rw-r--r--jstests/repl/basic1.js204
-rw-r--r--jstests/repl/batch_write_command_wc_repl.js27
-rw-r--r--jstests/repl/block1.js22
-rw-r--r--jstests/repl/block2.js30
-rw-r--r--jstests/repl/master1.js50
-rw-r--r--jstests/repl/mod_move.js42
-rw-r--r--jstests/repl/repair.js14
-rw-r--r--jstests/repl/repl1.js96
-rw-r--r--jstests/repl/repl10.js52
-rw-r--r--jstests/repl/repl12.js46
-rw-r--r--jstests/repl/repl13.js64
-rw-r--r--jstests/repl/repl14.js87
-rw-r--r--jstests/repl/repl15.js67
-rw-r--r--jstests/repl/repl16.js72
-rw-r--r--jstests/repl/repl17.js49
-rw-r--r--jstests/repl/repl2.js59
-rw-r--r--jstests/repl/repl3.js31
-rw-r--r--jstests/repl/repl4.js58
-rw-r--r--jstests/repl/repl5.js38
-rw-r--r--jstests/repl/repl6.js114
-rw-r--r--jstests/repl/repl7.js65
-rw-r--r--jstests/repl/repl8.js65
-rw-r--r--jstests/repl/repl9.js71
-rw-r--r--jstests/repl/repl_sync_only_db_with_special_chars.js24
-rw-r--r--jstests/repl/snapshot1.js64
-rw-r--r--jstests/replsets/apply_batch_only_goes_forward.js34
-rw-r--r--jstests/replsets/apply_ops_lastop.js114
-rw-r--r--jstests/replsets/apply_ops_wc.js79
-rw-r--r--jstests/replsets/auth1.js131
-rw-r--r--jstests/replsets/auth2.js36
-rw-r--r--jstests/replsets/auth3.js27
-rw-r--r--jstests/replsets/auth_no_pri.js46
-rw-r--r--jstests/replsets/await_replication_timeout.js76
-rw-r--r--jstests/replsets/background_index.js9
-rw-r--r--jstests/replsets/batch_write_command_wc.js83
-rw-r--r--jstests/replsets/buildindexes.js112
-rw-r--r--jstests/replsets/bulk_api_wc.js66
-rw-r--r--jstests/replsets/capped_id.js106
-rw-r--r--jstests/replsets/capped_insert_order.js2
-rw-r--r--jstests/replsets/chaining_removal.js56
-rw-r--r--jstests/replsets/cloneDb.js187
-rw-r--r--jstests/replsets/config_server_checks.js290
-rw-r--r--jstests/replsets/copydb.js12
-rw-r--r--jstests/replsets/disallow_adding_initialized_node1.js28
-rw-r--r--jstests/replsets/disallow_adding_initialized_node2.js24
-rw-r--r--jstests/replsets/drain.js58
-rw-r--r--jstests/replsets/drop_oplog.js16
-rw-r--r--jstests/replsets/election_id.js177
-rw-r--r--jstests/replsets/election_not_blocked.js28
-rw-r--r--jstests/replsets/explain_slaveok.js109
-rw-r--r--jstests/replsets/find_and_modify_wc.js30
-rw-r--r--jstests/replsets/fsync_lock_read_secondaries.js83
-rw-r--r--jstests/replsets/get_replication_info_helper.js14
-rw-r--r--jstests/replsets/get_status.js8
-rw-r--r--jstests/replsets/groupAndMapReduce.js70
-rw-r--r--jstests/replsets/index_delete.js54
-rw-r--r--jstests/replsets/index_restart_secondary.js49
-rw-r--r--jstests/replsets/initial_sync1.js35
-rw-r--r--jstests/replsets/initial_sync2.js199
-rw-r--r--jstests/replsets/initial_sync3.js9
-rw-r--r--jstests/replsets/initial_sync4.js52
-rw-r--r--jstests/replsets/initial_sync_unsupported_auth_schema.js19
-rw-r--r--jstests/replsets/initiate.js9
-rw-r--r--jstests/replsets/initiate_prohibits_w0.js6
-rw-r--r--jstests/replsets/initiate_without_replset_name_at_startup.js157
-rw-r--r--jstests/replsets/ismaster1.js340
-rw-r--r--jstests/replsets/last_op_visible.js85
-rw-r--r--jstests/replsets/lastop.js62
-rw-r--r--jstests/replsets/localhostAuthBypass.js127
-rw-r--r--jstests/replsets/maintenance.js51
-rw-r--r--jstests/replsets/maintenance2.js17
-rw-r--r--jstests/replsets/maintenance_non-blocking.js4
-rw-r--r--jstests/replsets/maxSyncSourceLagSecs.js42
-rw-r--r--jstests/replsets/no_chaining.js53
-rw-r--r--jstests/replsets/oplog_format.js165
-rw-r--r--jstests/replsets/oplog_note_cmd.js2
-rw-r--r--jstests/replsets/oplog_term.js88
-rw-r--r--jstests/replsets/oplog_truncated_on_recovery.js60
-rw-r--r--jstests/replsets/optime.js26
-rw-r--r--jstests/replsets/pipelineout.js24
-rw-r--r--jstests/replsets/plan_cache_slaveok.js60
-rw-r--r--jstests/replsets/priority_takeover_cascading_priorities.js22
-rw-r--r--jstests/replsets/priority_takeover_one_node_higher_priority.js15
-rw-r--r--jstests/replsets/priority_takeover_two_nodes_equal_priority.js79
-rw-r--r--jstests/replsets/protocol_version_upgrade_downgrade.js146
-rw-r--r--jstests/replsets/read_after_optime.js172
-rw-r--r--jstests/replsets/read_committed.js105
-rw-r--r--jstests/replsets/read_committed_no_snapshots.js117
-rw-r--r--jstests/replsets/read_committed_on_secondary.js168
-rw-r--r--jstests/replsets/read_majority_two_arbs.js98
-rw-r--r--jstests/replsets/reconfig.js15
-rw-r--r--jstests/replsets/reconfig_prohibits_w0.js10
-rw-r--r--jstests/replsets/reconfig_tags.js23
-rw-r--r--jstests/replsets/reconfig_without_increased_queues.js13
-rw-r--r--jstests/replsets/reindex_secondary.js9
-rw-r--r--jstests/replsets/remove1.js70
-rw-r--r--jstests/replsets/repl_options.js31
-rw-r--r--jstests/replsets/replset1.js77
-rw-r--r--jstests/replsets/replset2.js105
-rw-r--r--jstests/replsets/replset3.js31
-rw-r--r--jstests/replsets/replset4.js27
-rw-r--r--jstests/replsets/replset5.js15
-rw-r--r--jstests/replsets/replset6.js69
-rw-r--r--jstests/replsets/replset7.js36
-rw-r--r--jstests/replsets/replset8.js44
-rw-r--r--jstests/replsets/replset9.js47
-rw-r--r--jstests/replsets/replsetadd_profile.js8
-rw-r--r--jstests/replsets/replsetarb2.js18
-rw-r--r--jstests/replsets/replsetfreeze.js79
-rw-r--r--jstests/replsets/replsethostnametrim.js10
-rw-r--r--jstests/replsets/replsetprio1.js31
-rw-r--r--jstests/replsets/replsetrestart1.js18
-rw-r--r--jstests/replsets/restore_term.js90
-rw-r--r--[-rwxr-xr-x]jstests/replsets/resync.js50
-rw-r--r--jstests/replsets/resync_with_write_load.js75
-rw-r--r--jstests/replsets/rollback.js71
-rw-r--r--jstests/replsets/rollback2.js84
-rw-r--r--[-rwxr-xr-x]jstests/replsets/rollback3.js106
-rw-r--r--jstests/replsets/rollback5.js69
-rw-r--r--jstests/replsets/rollback_auth.js146
-rw-r--r--jstests/replsets/rollback_cmd_unrollbackable.js35
-rw-r--r--jstests/replsets/rollback_collMod_PowerOf2Sizes.js138
-rw-r--r--jstests/replsets/rollback_collMod_fatal.js31
-rw-r--r--jstests/replsets/rollback_different_h.js34
-rw-r--r--jstests/replsets/rollback_dropdb.js29
-rw-r--r--jstests/replsets/rollback_empty_ns.js32
-rw-r--r--jstests/replsets/rollback_empty_o.js32
-rw-r--r--jstests/replsets/rollback_empty_o2.js32
-rw-r--r--jstests/replsets/rollback_fake_cmd.js35
-rw-r--r--jstests/replsets/rollback_index.js40
-rw-r--r--jstests/replsets/rollback_too_new.js28
-rw-r--r--jstests/replsets/rslib.js390
-rw-r--r--jstests/replsets/server8070.js87
-rw-r--r--jstests/replsets/server_status_metrics.js32
-rw-r--r--jstests/replsets/server_status_repl.js6
-rw-r--r--jstests/replsets/single_server_majority.js2
-rw-r--r--jstests/replsets/sized_zero_capped.js16
-rw-r--r--jstests/replsets/slavedelay1.js177
-rw-r--r--jstests/replsets/slavedelay3.js12
-rw-r--r--jstests/replsets/stepdown.js68
-rw-r--r--jstests/replsets/stepdown3.js101
-rw-r--r--jstests/replsets/stepdown_catch_up_opt.js40
-rw-r--r--jstests/replsets/stepdown_kill_other_ops.js118
-rw-r--r--jstests/replsets/stepdown_killop.js168
-rw-r--r--jstests/replsets/stepdown_long_wait_time.js47
-rw-r--r--jstests/replsets/stepdown_wrt_electable.js13
-rw-r--r--jstests/replsets/sync2.js32
-rw-r--r--jstests/replsets/sync_passive.js28
-rw-r--r--jstests/replsets/system_profile.js4
-rw-r--r--jstests/replsets/tags.js144
-rw-r--r--jstests/replsets/tags2.js34
-rw-r--r--jstests/replsets/tags_with_reconfig.js45
-rw-r--r--jstests/replsets/temp_namespace.js94
-rw-r--r--jstests/replsets/test_command.js140
-rw-r--r--jstests/replsets/toostale.js80
-rw-r--r--[-rwxr-xr-x]jstests/replsets/two_initsync.js42
-rw-r--r--jstests/replsets/two_nodes_priority_take_over.js109
-rw-r--r--jstests/replsets/zero_vote_arbiter.js15
-rw-r--r--jstests/sharding/SERVER-7379.js32
-rw-r--r--jstests/sharding/add_invalid_shard.js58
-rw-r--r--jstests/sharding/addshard1.js108
-rw-r--r--jstests/sharding/addshard2.js250
-rw-r--r--jstests/sharding/addshard4.js77
-rw-r--r--jstests/sharding/addshard5.js73
-rw-r--r--jstests/sharding/all_config_hosts_down.js63
-rw-r--r--jstests/sharding/all_config_servers_blackholed_from_mongos.js57
-rw-r--r--jstests/sharding/array_shard_key.js99
-rw-r--r--jstests/sharding/auth.js606
-rw-r--r--jstests/sharding/auth2.js19
-rw-r--r--jstests/sharding/authCommands.js576
-rw-r--r--jstests/sharding/authConnectionHook.js44
-rw-r--r--jstests/sharding/auth_add_shard.js138
-rw-r--r--jstests/sharding/auth_copydb.js57
-rw-r--r--jstests/sharding/auth_repl.js11
-rw-r--r--jstests/sharding/auth_slaveok_routing.js68
-rw-r--r--jstests/sharding/authmr.js219
-rw-r--r--jstests/sharding/authwhere.js135
-rw-r--r--jstests/sharding/auto1.js113
-rw-r--r--jstests/sharding/auto2.js255
-rw-r--r--jstests/sharding/auto_rebalance.js92
-rw-r--r--jstests/sharding/autodiscover_config_rs_from_secondary.js72
-rw-r--r--jstests/sharding/autosplit_heuristics.js110
-rw-r--r--jstests/sharding/balance_repl.js110
-rw-r--r--jstests/sharding/balance_tags1.js48
-rw-r--r--jstests/sharding/balance_tags2.js46
-rw-r--r--jstests/sharding/basic_drop_coll.js61
-rw-r--r--jstests/sharding/basic_sharding_params.js45
-rw-r--r--jstests/sharding/basic_split.js127
-rw-r--r--jstests/sharding/batch_write_command_sharded.js476
-rw-r--r--jstests/sharding/bouncing_count.js75
-rw-r--r--jstests/sharding/bulk_insert.js490
-rw-r--r--jstests/sharding/bulk_shard_insert.js118
-rw-r--r--jstests/sharding/cleanup_orphaned.js4
-rw-r--r--jstests/sharding/cleanup_orphaned_auth.js79
-rw-r--r--jstests/sharding/cleanup_orphaned_basic.js244
-rw-r--r--jstests/sharding/cleanup_orphaned_cmd_during_movechunk.js292
-rw-r--r--jstests/sharding/cleanup_orphaned_cmd_during_movechunk_hashed.js225
-rw-r--r--jstests/sharding/cleanup_orphaned_cmd_hashed.js131
-rw-r--r--jstests/sharding/cleanup_orphaned_cmd_prereload.js118
-rw-r--r--jstests/sharding/coll_epoch_test0.js50
-rw-r--r--jstests/sharding/coll_epoch_test1.js108
-rw-r--r--jstests/sharding/coll_epoch_test2.js109
-rw-r--r--jstests/sharding/conf_server_write_concern.js66
-rw-r--r--jstests/sharding/config_rs_change.js4
-rw-r--r--jstests/sharding/config_rs_no_primary.js94
-rw-r--r--jstests/sharding/conn_pool_stats.js2
-rw-r--r--jstests/sharding/copydb_from_mongos.js30
-rw-r--r--jstests/sharding/count1.js346
-rw-r--r--jstests/sharding/count2.js76
-rw-r--r--jstests/sharding/count_config_servers.js88
-rw-r--r--jstests/sharding/count_slaveok.js134
-rw-r--r--jstests/sharding/covered_shard_key_indexes.js140
-rw-r--r--jstests/sharding/create_idx_empty_primary.js39
-rw-r--r--jstests/sharding/cursor1.js111
-rw-r--r--jstests/sharding/cursor_cleanup.js24
-rw-r--r--jstests/sharding/delete_during_migrate.js32
-rw-r--r--jstests/sharding/diffservers1.js31
-rw-r--r--jstests/sharding/disable_autosplit.js42
-rw-r--r--jstests/sharding/drop_configdb.js46
-rw-r--r--jstests/sharding/drop_sharded_db.js101
-rw-r--r--jstests/sharding/dump_coll_metadata.js78
-rw-r--r--jstests/sharding/empty_cluster_init.js29
-rw-r--r--jstests/sharding/empty_doc_results.js35
-rw-r--r--jstests/sharding/enable_sharding_basic.js68
-rw-r--r--jstests/sharding/error_propagation.js12
-rw-r--r--jstests/sharding/exact_shard_key_target.js70
-rw-r--r--jstests/sharding/explain_cmd.js67
-rw-r--r--jstests/sharding/explain_find_and_modify_sharded.js41
-rw-r--r--jstests/sharding/explain_read_pref.js79
-rw-r--r--jstests/sharding/fair_balancer_round.js21
-rw-r--r--jstests/sharding/features1.js378
-rw-r--r--jstests/sharding/features2.js348
-rw-r--r--jstests/sharding/features3.js256
-rw-r--r--jstests/sharding/find_and_modify_after_multi_write.js161
-rw-r--r--jstests/sharding/find_getmore_cmd.js14
-rw-r--r--jstests/sharding/findandmodify1.js92
-rw-r--r--jstests/sharding/findandmodify2.js83
-rw-r--r--jstests/sharding/forget_mr_temp_ns.js47
-rw-r--r--jstests/sharding/fts_score_sort_sharded.js46
-rw-r--r--jstests/sharding/geo_near_random1.js76
-rw-r--r--jstests/sharding/geo_near_random2.js95
-rw-r--r--jstests/sharding/geo_shardedgeonear.js38
-rw-r--r--jstests/sharding/group_slaveok.js95
-rw-r--r--jstests/sharding/hash_basic.js56
-rw-r--r--jstests/sharding/hash_shard1.js56
-rw-r--r--jstests/sharding/hash_shard_non_empty.js15
-rw-r--r--jstests/sharding/hash_shard_num_chunks.js51
-rw-r--r--jstests/sharding/hash_shard_unique_compound.js18
-rw-r--r--jstests/sharding/hash_single_shard.js16
-rw-r--r--jstests/sharding/hash_skey_split.js33
-rw-r--r--jstests/sharding/idhack_sharded.js4
-rw-r--r--jstests/sharding/implicit_db_creation.js52
-rw-r--r--jstests/sharding/in_memory_sort_limit.js86
-rw-r--r--jstests/sharding/index1.js734
-rw-r--r--jstests/sharding/inserts_consistent.js64
-rw-r--r--jstests/sharding/ismaster.js37
-rw-r--r--jstests/sharding/jumbo1.js74
-rw-r--r--jstests/sharding/key_many.js361
-rw-r--r--jstests/sharding/key_string.js114
-rw-r--r--jstests/sharding/lagged_config_secondary.js51
-rw-r--r--jstests/sharding/large_chunk.js93
-rw-r--r--jstests/sharding/large_skip_one_shard.js33
-rw-r--r--jstests/sharding/limit_push.js104
-rw-r--r--jstests/sharding/listDatabases.js36
-rw-r--r--jstests/sharding/listshards.js110
-rw-r--r--jstests/sharding/localhostAuthBypass.js175
-rw-r--r--jstests/sharding/major_version_check.js69
-rw-r--r--jstests/sharding/mapReduce_inSharded.js37
-rw-r--r--jstests/sharding/mapReduce_inSharded_outSharded.js38
-rw-r--r--jstests/sharding/mapReduce_nonSharded.js37
-rw-r--r--jstests/sharding/mapReduce_outSharded.js38
-rw-r--r--jstests/sharding/map_reduce_validation.js58
-rw-r--r--jstests/sharding/max_time_ms_sharded.js439
-rw-r--r--jstests/sharding/merge_chunks_basic.js76
-rw-r--r--jstests/sharding/merge_chunks_test.js150
-rw-r--r--jstests/sharding/merge_chunks_test_with_md_ops.js74
-rw-r--r--jstests/sharding/migrateBig.js116
-rw-r--r--jstests/sharding/migrateBig_balancer.js94
-rw-r--r--jstests/sharding/migrate_overwrite_id.js42
-rw-r--r--jstests/sharding/migration_failure.js74
-rw-r--r--jstests/sharding/migration_ignore_interrupts.js620
-rw-r--r--jstests/sharding/migration_sets_fromMigrate_flag.js291
-rw-r--r--jstests/sharding/migration_with_source_ops.js250
-rw-r--r--jstests/sharding/min_optime_recovery.js124
-rw-r--r--jstests/sharding/missing_key.js31
-rw-r--r--jstests/sharding/mongos_no_detect_sharding.js50
-rw-r--r--jstests/sharding/mongos_no_replica_set_refresh.js134
-rw-r--r--jstests/sharding/mongos_rs_auth_shard_failure_tolerance.js270
-rw-r--r--jstests/sharding/mongos_rs_shard_failure_tolerance.js728
-rw-r--r--jstests/sharding/mongos_shard_failure_tolerance.js171
-rw-r--r--jstests/sharding/mongos_validate_backoff.js86
-rw-r--r--jstests/sharding/mongos_validate_writes.js80
-rw-r--r--jstests/sharding/movePrimary1.js98
-rw-r--r--jstests/sharding/move_chunk_basic.js119
-rw-r--r--jstests/sharding/move_chunk_missing_idx.js33
-rw-r--r--jstests/sharding/move_primary_basic.js74
-rw-r--r--jstests/sharding/move_stale_mongos.js5
-rw-r--r--jstests/sharding/movechunk_include.js34
-rw-r--r--jstests/sharding/movechunk_with_default_paranoia.js9
-rw-r--r--jstests/sharding/movechunk_with_moveParanoia.js14
-rw-r--r--jstests/sharding/movechunk_with_noMoveParanoia.js14
-rw-r--r--jstests/sharding/moveprimary_ignore_sharded.js97
-rw-r--r--jstests/sharding/mrShardedOutput.js91
-rw-r--r--jstests/sharding/mrShardedOutputAuth.js155
-rw-r--r--jstests/sharding/mr_and_agg_versioning.js93
-rw-r--r--jstests/sharding/mr_noscripting.js24
-rw-r--r--jstests/sharding/mr_shard_version.js151
-rw-r--r--jstests/sharding/multi_coll_drop.js54
-rw-r--r--jstests/sharding/multi_mongos2.js97
-rw-r--r--jstests/sharding/multi_mongos2a.js43
-rw-r--r--jstests/sharding/multi_write_target.js98
-rw-r--r--jstests/sharding/names.js70
-rw-r--r--jstests/sharding/noUpdateButN1inAnotherCollection.js44
-rw-r--r--jstests/sharding/no_empty_reset.js59
-rw-r--r--jstests/sharding/parallel.js94
-rw-r--r--jstests/sharding/pending_chunk.js155
-rw-r--r--jstests/sharding/prefix_shard_key.js201
-rw-r--r--jstests/sharding/presplit.js75
-rw-r--r--jstests/sharding/printShardingStatus.js422
-rw-r--r--jstests/sharding/query_after_multi_write.js127
-rw-r--r--jstests/sharding/query_config.js168
-rw-r--r--jstests/sharding/query_sharded.js20
-rw-r--r--jstests/sharding/read_after_optime.js25
-rw-r--r--jstests/sharding/read_does_not_create_namespaces.js14
-rw-r--r--[-rwxr-xr-x]jstests/sharding/read_pref.js108
-rw-r--r--jstests/sharding/read_pref_cmd.js181
-rw-r--r--jstests/sharding/read_pref_multi_mongos_stale_config.js29
-rw-r--r--jstests/sharding/recovering_slaveok.js168
-rw-r--r--jstests/sharding/regex_targeting.js294
-rw-r--r--jstests/sharding/remove1.js44
-rw-r--r--jstests/sharding/remove2.js177
-rw-r--r--jstests/sharding/remove3.js86
-rw-r--r--jstests/sharding/rename.js83
-rw-r--r--jstests/sharding/rename_across_mongos.js38
-rw-r--r--jstests/sharding/repl_monitor_refresh.js112
-rw-r--r--jstests/sharding/replmonitor_bad_seed.js21
-rw-r--r--jstests/sharding/return_partial_shards_down.js35
-rw-r--r--jstests/sharding/rs_stepdown_and_pooling.js207
-rw-r--r--jstests/sharding/secondary_query_routing.js48
-rw-r--r--jstests/sharding/server_status.js87
-rw-r--r--jstests/sharding/shard1.js59
-rw-r--r--jstests/sharding/shard2.js282
-rw-r--r--jstests/sharding/shard3.js372
-rw-r--r--jstests/sharding/shard4.js78
-rw-r--r--jstests/sharding/shard5.js78
-rw-r--r--jstests/sharding/shard6.js114
-rw-r--r--jstests/sharding/shard7.js75
-rw-r--r--jstests/sharding/shard_collection_basic.js228
-rw-r--r--jstests/sharding/shard_existing.js56
-rw-r--r--jstests/sharding/shard_insert_getlasterror_w2.js55
-rw-r--r--jstests/sharding/shard_key_immutable.js549
-rw-r--r--jstests/sharding/shard_keycount.js60
-rw-r--r--jstests/sharding/shard_kill_and_pooling.js162
-rw-r--r--jstests/sharding/shard_targeting.js88
-rw-r--r--jstests/sharding/shard_with_special_db_names.js40
-rw-r--r--jstests/sharding/sharded_limit_batchsize.js219
-rw-r--r--jstests/sharding/sharded_profile.js36
-rw-r--r--jstests/sharding/sharding_balance1.js125
-rw-r--r--jstests/sharding/sharding_balance2.js101
-rw-r--r--jstests/sharding/sharding_balance3.js127
-rw-r--r--jstests/sharding/sharding_balance4.js244
-rw-r--r--jstests/sharding/sharding_migrate_cursor1.js114
-rw-r--r--jstests/sharding/sharding_multiple_ns_rs.js74
-rw-r--r--jstests/sharding/sharding_options.js113
-rw-r--r--jstests/sharding/sharding_rs1.js81
-rw-r--r--jstests/sharding/sharding_rs2.js377
-rw-r--r--jstests/sharding/sharding_state_after_stepdown.js323
-rw-r--r--jstests/sharding/sharding_system_namespaces.js39
-rw-r--r--jstests/sharding/sort1.js190
-rw-r--r--jstests/sharding/split_chunk.js195
-rw-r--r--jstests/sharding/split_large_key.js107
-rw-r--r--jstests/sharding/split_with_force.js62
-rw-r--r--jstests/sharding/split_with_force_small.js64
-rw-r--r--jstests/sharding/ssv_config_check.js108
-rw-r--r--jstests/sharding/stale_mongos_updates_and_removes.js87
-rw-r--r--jstests/sharding/stale_version_write.js30
-rw-r--r--jstests/sharding/startup_with_all_configs_down.js103
-rw-r--r--jstests/sharding/stats.js357
-rw-r--r--jstests/sharding/tag_auto_split.js86
-rw-r--r--jstests/sharding/tag_range.js68
-rw-r--r--jstests/sharding/test_stacked_migration_cleanup.js95
-rw-r--r--jstests/sharding/top_chunk_autosplit.js314
-rw-r--r--jstests/sharding/trace_missing_docs_test.js60
-rw-r--r--jstests/sharding/unowned_doc_filtering.js67
-rw-r--r--jstests/sharding/update_immutable_fields.js102
-rw-r--r--jstests/sharding/update_sharded.js192
-rw-r--r--jstests/sharding/upsert_sharded.js195
-rw-r--r--jstests/sharding/user_flags_sharded.js97
-rw-r--r--jstests/sharding/version1.js162
-rw-r--r--jstests/sharding/version2.js91
-rw-r--r--jstests/sharding/write_cmd_auto_split.js209
-rw-r--r--jstests/sharding/write_commands_sharding_state.js109
-rw-r--r--jstests/sharding/zbigMapReduce.js121
-rw-r--r--jstests/sharding/zero_shard_version.js333
-rw-r--r--jstests/slow1/election_timing.js209
-rw-r--r--jstests/slow1/large_role_chain.js10
-rw-r--r--jstests/slow1/memory.js47
-rw-r--r--jstests/slow1/replsets_priority1.js86
-rw-r--r--jstests/slow1/sharding_multiple_collections.js105
-rw-r--r--jstests/slow2/32bit.js72
-rw-r--r--jstests/slow2/conc_update.js47
-rw-r--r--jstests/slow2/cursor_timeout.js63
-rw-r--r--jstests/slow2/mr_during_migrate.js109
-rw-r--r--jstests/slow2/remove_during_mr.js5
-rw-r--r--jstests/slow2/replsets_killop.js55
-rw-r--r--jstests/ssl/disable_x509.js42
-rw-r--r--jstests/ssl/initial_sync1_x509.js49
-rw-r--r--jstests/ssl/libs/ssl_helpers.js61
-rw-r--r--jstests/ssl/mixed_mode_sharded.js2
-rw-r--r--jstests/ssl/set_parameter_ssl.js15
-rw-r--r--jstests/ssl/sharding_with_x509.js66
-rw-r--r--jstests/ssl/ssl_cert_password.js137
-rw-r--r--jstests/ssl/ssl_crl.js12
-rw-r--r--jstests/ssl/ssl_crl_revoked.js25
-rw-r--r--jstests/ssl/ssl_fips.js26
-rw-r--r--jstests/ssl/ssl_hostname_validation.js105
-rw-r--r--jstests/ssl/ssl_invalid_server_cert.js16
-rw-r--r--jstests/ssl/ssl_options.js21
-rw-r--r--jstests/ssl/ssl_weak.js45
-rw-r--r--jstests/ssl/ssl_without_ca.js49
-rw-r--r--jstests/ssl/upgrade_to_ssl.js24
-rw-r--r--jstests/ssl/upgrade_to_x509_ssl.js54
-rw-r--r--jstests/ssl/x509_client.js94
-rw-r--r--jstests/sslSpecial/set_parameter_nossl.js6
-rw-r--r--jstests/sslSpecial/ssl_mixedmode.js13
-rw-r--r--jstests/sslSpecial/upgrade_to_ssl_nossl.js16
-rw-r--r--jstests/sslSpecial/upgrade_to_x509_ssl_nossl.js83
-rw-r--r--jstests/tool/command_line_quotes.js10
-rw-r--r--jstests/tool/csv1.js61
-rw-r--r--jstests/tool/csvexport1.js69
-rw-r--r--jstests/tool/csvexport2.js39
-rw-r--r--jstests/tool/csvimport1.js75
-rw-r--r--jstests/tool/dumpauth.js31
-rw-r--r--jstests/tool/dumpfilename1.js13
-rw-r--r--jstests/tool/dumprestore1.js34
-rw-r--r--jstests/tool/dumprestore10.js10
-rw-r--r--jstests/tool/dumprestore3.js14
-rw-r--r--jstests/tool/dumprestore4.js29
-rw-r--r--jstests/tool/dumprestore6.js30
-rw-r--r--jstests/tool/dumprestore7.js36
-rw-r--r--jstests/tool/dumprestore8.js109
-rw-r--r--jstests/tool/dumprestore9.js138
-rw-r--r--jstests/tool/dumprestoreWithNoOptions.js99
-rw-r--r--jstests/tool/dumprestore_auth.js139
-rw-r--r--jstests/tool/dumprestore_auth2.js216
-rw-r--r--jstests/tool/dumprestore_auth3.js91
-rw-r--r--jstests/tool/dumprestore_excludecollections.js101
-rw-r--r--jstests/tool/dumpsecondary.js32
-rw-r--r--jstests/tool/exportimport1.js59
-rw-r--r--jstests/tool/exportimport3.js30
-rw-r--r--jstests/tool/exportimport4.js43
-rw-r--r--jstests/tool/exportimport5.js69
-rw-r--r--jstests/tool/exportimport6.js27
-rw-r--r--jstests/tool/exportimport_bigarray.js35
-rw-r--r--jstests/tool/exportimport_date.js20
-rw-r--r--jstests/tool/exportimport_minkey_maxkey.js20
-rw-r--r--jstests/tool/files1.js16
-rw-r--r--jstests/tool/gridfs.js17
-rw-r--r--jstests/tool/oplog1.js19
-rw-r--r--jstests/tool/oplog_all_ops.js46
-rw-r--r--jstests/tool/restorewithauth.js66
-rw-r--r--jstests/tool/stat1.js30
-rw-r--r--jstests/tool/tool1.js61
-rw-r--r--jstests/tool/tool_replset.js43
-rw-r--r--jstests/tool/tsv1.js63
1870 files changed, 60854 insertions, 59167 deletions
diff --git a/jstests/aggregation/bugs/cond.js b/jstests/aggregation/bugs/cond.js
index e71f49e25a5..2b4fa8ff16e 100644
--- a/jstests/aggregation/bugs/cond.js
+++ b/jstests/aggregation/bugs/cond.js
@@ -5,72 +5,79 @@ load('jstests/aggregation/extras/utils.js');
t = db.jstests_aggregation_cond;
t.drop();
-t.save( {} );
+t.save({});
-function assertError( expectedErrorCode, condSpec ) {
+function assertError(expectedErrorCode, condSpec) {
assertErrorCode(t, {$project: {a: {$cond: condSpec}}}, expectedErrorCode);
}
-function assertResult( expectedResult, arg ) {
- assert.eq( expectedResult,
- t.aggregate( { $project:{ a:{ $cond:arg } } } ).toArray()[0].a );
+function assertResult(expectedResult, arg) {
+ assert.eq(expectedResult, t.aggregate({$project: {a: {$cond: arg}}}).toArray()[0].a);
}
// Wrong number of args.
-assertError( 16020, [] );
-assertError( 16020, [1] );
-assertError( 16020, [false] );
-assertError( 16020, [1,1] );
-assertError( 16020, [1,1,null,1] );
-assertError( 16020, [1,1,1,undefined] );
+assertError(16020, []);
+assertError(16020, [1]);
+assertError(16020, [false]);
+assertError(16020, [1, 1]);
+assertError(16020, [1, 1, null, 1]);
+assertError(16020, [1, 1, 1, undefined]);
// Bad object cases
-assertError( 17080, {"else":1, then:1} );
-assertError( 17081, {"if":1, "else":1} );
-assertError( 17082, {"if":1, then:1} );
-assertError( 17083, {asdf:1, then:1} );
+assertError(17080, {"else": 1, then: 1});
+assertError(17081, {"if": 1, "else": 1});
+assertError(17082, {"if": 1, then: 1});
+assertError(17083, {asdf: 1, then: 1});
// Literal expressions.
-assertResult( 1, [true, 1, 2] );
-assertResult( 2, [false, 1, 2] );
+assertResult(1, [true, 1, 2]);
+assertResult(2, [false, 1, 2]);
// Order independence for object case
-assertResult(1, {"if":true, "then":1, "else":2});
-assertResult(1, {"if":true, "else":2, "then":1});
-assertResult(1, {"then":1, "if":true, "else":2});
-assertResult(1, {"then":1, "else":2, "if":true});
-assertResult(1, {"else":2, "then":1, "if":true});
-assertResult(1, {"else":2, "if":true, "then":1});
+assertResult(1, {"if": true, "then": 1, "else": 2});
+assertResult(1, {"if": true, "else": 2, "then": 1});
+assertResult(1, {"then": 1, "if": true, "else": 2});
+assertResult(1, {"then": 1, "else": 2, "if": true});
+assertResult(1, {"else": 2, "then": 1, "if": true});
+assertResult(1, {"else": 2, "if": true, "then": 1});
// Computed expressions.
-assertResult( 1, [{ $and:[] }, { $add:[ 1 ] }, { $add:[ 1, 1 ] }] );
-assertResult( 2, [{ $or:[] }, { $add:[ 1 ] }, { $add:[ 1, 1 ] }] );
+assertResult(1, [{$and: []}, {$add: [1]}, {$add: [1, 1]}]);
+assertResult(2, [{$or: []}, {$add: [1]}, {$add: [1, 1]}]);
t.drop();
-t.save( { t:true, f:false, x:'foo', y:'bar' } );
+t.save({t: true, f: false, x: 'foo', y: 'bar'});
// Field path expressions.
-assertResult( 'foo', ['$t', '$x', '$y'] );
-assertResult( 'bar', ['$f', '$x', '$y'] );
+assertResult('foo', ['$t', '$x', '$y']);
+assertResult('bar', ['$f', '$x', '$y']);
t.drop();
-t.save( {} );
+t.save({});
// Coerce to bool.
-assertResult( 'a', [1, 'a', 'b'] );
-assertResult( 'a', ['', 'a', 'b'] );
-assertResult( 'b', [0, 'a', 'b'] );
+assertResult('a', [1, 'a', 'b']);
+assertResult('a', ['', 'a', 'b']);
+assertResult('b', [0, 'a', 'b']);
// Nested.
t.drop();
-t.save( { noonSense:'am', mealCombined:'no' } );
-t.save( { noonSense:'am', mealCombined:'yes' } );
-t.save( { noonSense:'pm', mealCombined:'yes' } );
-t.save( { noonSense:'pm', mealCombined:'no' } );
-assert.eq( [ 'breakfast', 'brunch', 'linner', 'dinner' ],
- t.aggregate( { $project:{ a:{ $cond:[ { $eq:[ '$noonSense', 'am' ] },
- { $cond:[ { $eq:[ '$mealCombined', 'yes' ] },
- 'brunch', 'breakfast' ] },
- { $cond:[ { $eq:[ '$mealCombined', 'yes' ] },
- 'linner', 'dinner' ] } ] } } } )
- .map( function( x ) { return x.a; } ) );
+t.save({noonSense: 'am', mealCombined: 'no'});
+t.save({noonSense: 'am', mealCombined: 'yes'});
+t.save({noonSense: 'pm', mealCombined: 'yes'});
+t.save({noonSense: 'pm', mealCombined: 'no'});
+assert.eq(['breakfast', 'brunch', 'linner', 'dinner'],
+ t.aggregate({
+ $project: {
+ a: {
+ $cond: [
+ {$eq: ['$noonSense', 'am']},
+ {$cond: [{$eq: ['$mealCombined', 'yes']}, 'brunch', 'breakfast']},
+ {$cond: [{$eq: ['$mealCombined', 'yes']}, 'linner', 'dinner']}
+ ]
+ }
+ }
+ })
+ .map(function(x) {
+ return x.a;
+ }));
diff --git a/jstests/aggregation/bugs/firstlast.js b/jstests/aggregation/bugs/firstlast.js
index 5b3a92be2b4..ca9e963f6ca 100644
--- a/jstests/aggregation/bugs/firstlast.js
+++ b/jstests/aggregation/bugs/firstlast.js
@@ -5,106 +5,116 @@ t = db.jstests_aggregation_firstlast;
t.drop();
/** Check expected $first and $last result values. */
-function assertFirstLast( expectedFirst, expectedLast, pipeline, expression ) {
+function assertFirstLast(expectedFirst, expectedLast, pipeline, expression) {
pipeline = pipeline || [];
expression = expression || '$b';
- pipeline.push( { $group:{ _id:'$a',
- first:{ $first:expression },
- last:{ $last:expression } } } );
- result = t.aggregate( pipeline ).toArray();
- for( var i = 0; i < result.length; ++i ) {
- if ( result[ i ]._id == 1 ) {
+ pipeline.push({$group: {_id: '$a', first: {$first: expression}, last: {$last: expression}}});
+ result = t.aggregate(pipeline).toArray();
+ for (var i = 0; i < result.length; ++i) {
+ if (result[i]._id == 1) {
// Check results for group _id 1.
- assert.eq( expectedFirst, result[ i ].first );
- assert.eq( expectedLast, result[ i ].last );
+ assert.eq(expectedFirst, result[i].first);
+ assert.eq(expectedLast, result[i].last);
return;
}
}
- assert( false, "Expected group _id '1' missing." );
+ assert(false, "Expected group _id '1' missing.");
}
// One document.
-t.save( { a:1, b:1 } );
-assertFirstLast( 1, 1 );
+t.save({a: 1, b: 1});
+assertFirstLast(1, 1);
// Two documents.
-t.save( { a:1, b:2 } );
-assertFirstLast( 1, 2 );
+t.save({a: 1, b: 2});
+assertFirstLast(1, 2);
// Three documents.
-t.save( { a:1, b:3 } );
-assertFirstLast( 1, 3 );
+t.save({a: 1, b: 3});
+assertFirstLast(1, 3);
// Another 'a' key value does not affect outcome.
t.drop();
-t.save( { a:3, b:0 } );
-t.save( { a:1, b:1 } );
-t.save( { a:1, b:2 } );
-t.save( { a:1, b:3 } );
-t.save( { a:2, b:0 } );
-assertFirstLast( 1, 3 );
+t.save({a: 3, b: 0});
+t.save({a: 1, b: 1});
+t.save({a: 1, b: 2});
+t.save({a: 1, b: 3});
+t.save({a: 2, b: 0});
+assertFirstLast(1, 3);
// Additional pipeline stages do not affect outcome if order is maintained.
-assertFirstLast( 1, 3, [ { $project:{ x:'$a', y:'$b' } }, { $project:{ a:'$x', b:'$y' } } ] );
+assertFirstLast(1, 3, [{$project: {x: '$a', y: '$b'}}, {$project: {a: '$x', b: '$y'}}]);
// Additional pipeline stages affect outcome if order is modified.
-assertFirstLast( 3, 1, [ { $sort:{ b:-1 } } ] );
+assertFirstLast(3, 1, [{$sort: {b: -1}}]);
// Skip and limit affect the results seen.
t.drop();
-t.save( { a:1, b:1 } );
-t.save( { a:1, b:2 } );
-t.save( { a:1, b:3 } );
-assertFirstLast( 1, 2, [ { $limit:2 } ] );
-assertFirstLast( 2, 3, [ { $skip:1 }, { $limit:2 } ] );
-assertFirstLast( 2, 2, [ { $skip:1 }, { $limit:1 } ] );
+t.save({a: 1, b: 1});
+t.save({a: 1, b: 2});
+t.save({a: 1, b: 3});
+assertFirstLast(1, 2, [{$limit: 2}]);
+assertFirstLast(2, 3, [{$skip: 1}, {$limit: 2}]);
+assertFirstLast(2, 2, [{$skip: 1}, {$limit: 1}]);
// Mixed type values.
-t.save( { a:1, b:'foo' } );
-assertFirstLast( 1, 'foo' );
+t.save({a: 1, b: 'foo'});
+assertFirstLast(1, 'foo');
t.drop();
-t.save( { a:1, b:'bar' } );
-t.save( { a:1, b:true } );
-assertFirstLast( 'bar', true );
+t.save({a: 1, b: 'bar'});
+t.save({a: 1, b: true});
+assertFirstLast('bar', true);
// Value null.
t.drop();
-t.save( { a:1, b:null } );
-t.save( { a:1, b:2 } );
-assertFirstLast( null, 2 );
+t.save({a: 1, b: null});
+t.save({a: 1, b: 2});
+assertFirstLast(null, 2);
t.drop();
-t.save( { a:1, b:2 } );
-t.save( { a:1, b:null } );
-assertFirstLast( 2, null );
+t.save({a: 1, b: 2});
+t.save({a: 1, b: null});
+assertFirstLast(2, null);
t.drop();
-t.save( { a:1, b:null } );
-t.save( { a:1, b:null } );
-assertFirstLast( null, null );
+t.save({a: 1, b: null});
+t.save({a: 1, b: null});
+assertFirstLast(null, null);
// Value missing.
t.drop();
-t.save( { a:1 } );
-t.save( { a:1, b:2 } );
-assertFirstLast( undefined, 2 );
+t.save({a: 1});
+t.save({a: 1, b: 2});
+assertFirstLast(undefined, 2);
t.drop();
-t.save( { a:1, b:2 } );
-t.save( { a:1 } );
-assertFirstLast( 2, undefined );
+t.save({a: 1, b: 2});
+t.save({a: 1});
+assertFirstLast(2, undefined);
t.drop();
-t.save( { a:1 } );
-t.save( { a:1 } );
-assertFirstLast( undefined, undefined );
+t.save({a: 1});
+t.save({a: 1});
+assertFirstLast(undefined, undefined);
// Dotted field.
t.drop();
-t.save( { a:1, b:[ { c:1 }, { c:2 } ] } );
-t.save( { a:1, b:[ { c:6 }, {} ] } );
-assertFirstLast( [ 1, 2 ], [ 6 ], [], '$b.c' );
+t.save({a: 1, b: [{c: 1}, {c: 2}]});
+t.save({a: 1, b: [{c: 6}, {}]});
+assertFirstLast([1, 2], [6], [], '$b.c');
// Computed expressions.
t.drop();
-t.save( { a:1, b:1 } );
-t.save( { a:1, b:2 } );
-assertFirstLast( 1, 0, [], { $mod:[ '$b', 2 ] } );
-assertFirstLast( 0, 1, [], { $mod:[ { $add:[ '$b', 1 ] }, 2 ] } );
+t.save({a: 1, b: 1});
+t.save({a: 1, b: 2});
+assertFirstLast(1,
+ 0,
+ [],
+ {
+$mod:
+ ['$b', 2]
+ });
+assertFirstLast(0,
+ 1,
+ [],
+ {
+$mod:
+ [{$add: ['$b', 1]}, 2]
+ });
diff --git a/jstests/aggregation/bugs/ifnull.js b/jstests/aggregation/bugs/ifnull.js
index a8f0ccf4c66..8967ffe7ab7 100644
--- a/jstests/aggregation/bugs/ifnull.js
+++ b/jstests/aggregation/bugs/ifnull.js
@@ -5,58 +5,58 @@ load('jstests/aggregation/extras/utils.js');
t = db.jstests_aggregation_ifnull;
t.drop();
-t.save( {} );
+t.save({});
-function assertError( expectedErrorCode, ifNullSpec ) {
+function assertError(expectedErrorCode, ifNullSpec) {
assertErrorCode(t, {$project: {a: {$ifNull: ifNullSpec}}}, expectedErrorCode);
}
-function assertResult( expectedResult, arg0, arg1 ) {
- var res = t.aggregate( { $project:{ a:{ $ifNull:[ arg0, arg1 ] } } } ).toArray()[0];
- assert.eq( expectedResult, res.a );
+function assertResult(expectedResult, arg0, arg1) {
+ var res = t.aggregate({$project: {a: {$ifNull: [arg0, arg1]}}}).toArray()[0];
+ assert.eq(expectedResult, res.a);
}
// Wrong number of args.
-assertError( 16020, [] );
-assertError( 16020, [1] );
-assertError( 16020, [null] );
-assertError( 16020, [1,1,1] );
-assertError( 16020, [1,1,null] );
-assertError( 16020, [1,1,undefined] );
+assertError(16020, []);
+assertError(16020, [1]);
+assertError(16020, [null]);
+assertError(16020, [1, 1, 1]);
+assertError(16020, [1, 1, null]);
+assertError(16020, [1, 1, undefined]);
// First arg non null.
-assertResult( 1, 1, 2 );
-assertResult( 2, 2, 1 );
-assertResult( false, false, 1 );
-assertResult( '', '', 1 );
-assertResult( [], [], 1 );
-assertResult( {}, {}, 1 );
-assertResult( 1, 1, null );
-assertResult( 2, 2, undefined );
+assertResult(1, 1, 2);
+assertResult(2, 2, 1);
+assertResult(false, false, 1);
+assertResult('', '', 1);
+assertResult([], [], 1);
+assertResult({}, {}, 1);
+assertResult(1, 1, null);
+assertResult(2, 2, undefined);
// First arg null.
-assertResult( 2, null, 2 );
-assertResult( 1, null, 1 );
-assertResult( null, null, null );
-assertResult( undefined, null, undefined );
+assertResult(2, null, 2);
+assertResult(1, null, 1);
+assertResult(null, null, null);
+assertResult(undefined, null, undefined);
// First arg undefined.
-assertResult( 2, undefined, 2 );
-assertResult( 1, undefined, 1 );
-assertResult( null, undefined, null );
-assertResult( undefined, undefined, undefined );
+assertResult(2, undefined, 2);
+assertResult(1, undefined, 1);
+assertResult(null, undefined, null);
+assertResult(undefined, undefined, undefined);
// Computed expression.
-assertResult( 3, { $add:[ 1, 2 ] }, 5 );
-assertResult( 20, '$missingField', { $multiply:[ 4, 5 ] } );
+assertResult(3, {$add: [1, 2]}, 5);
+assertResult(20, '$missingField', {$multiply: [4, 5]});
// Divide/mod by 0.
-assertError(16608 , [{$divide: [1, 0]}, 0]);
-assertError(16610 , [{$mod: [1, 0]}, 0]);
+assertError(16608, [{$divide: [1, 0]}, 0]);
+assertError(16610, [{$mod: [1, 0]}, 0]);
// Nested.
t.drop();
-t.save( { d:'foo' } );
-assertResult( 'foo', '$a', { $ifNull:[ '$b', { $ifNull:[ '$c', '$d' ] } ] } );
-t.update( {}, { $set:{ b:'bar' } } );
-assertResult( 'bar', '$a', { $ifNull:[ '$b', { $ifNull:[ '$c', '$d' ] } ] } );
+t.save({d: 'foo'});
+assertResult('foo', '$a', {$ifNull: ['$b', {$ifNull: ['$c', '$d']}]});
+t.update({}, {$set: {b: 'bar'}});
+assertResult('bar', '$a', {$ifNull: ['$b', {$ifNull: ['$c', '$d']}]});
diff --git a/jstests/aggregation/bugs/lookup_unwind_getmore.js b/jstests/aggregation/bugs/lookup_unwind_getmore.js
index e15e4155136..6c8d886b78f 100644
--- a/jstests/aggregation/bugs/lookup_unwind_getmore.js
+++ b/jstests/aggregation/bugs/lookup_unwind_getmore.js
@@ -31,17 +31,17 @@
aggregate: 'source',
pipeline: [
{
- $lookup: {
- from: 'dest',
- localField: 'local',
- foreignField: 'foreign',
- as: 'matches',
- }
+ $lookup: {
+ from: 'dest',
+ localField: 'local',
+ foreignField: 'foreign',
+ as: 'matches',
+ }
},
{
- $unwind: {
- path: '$matches',
- },
+ $unwind: {
+ path: '$matches',
+ },
},
],
cursor: {
diff --git a/jstests/aggregation/bugs/match.js b/jstests/aggregation/bugs/match.js
index 70fb81e9520..fbc467812d7 100644
--- a/jstests/aggregation/bugs/match.js
+++ b/jstests/aggregation/bugs/match.js
@@ -6,11 +6,16 @@ load('jstests/aggregation/extras/utils.js');
t = db.jstests_aggregation_match;
t.drop();
-identityProjection = { _id:'$_id', a:'$a' };
+identityProjection = {
+ _id: '$_id',
+ a: '$a'
+};
/** Assert that an aggregation generated the expected error. */
-function assertError( expectedCode, matchSpec ) {
- matchStage = { $match:matchSpec };
+function assertError(expectedCode, matchSpec) {
+ matchStage = {
+ $match: matchSpec
+ };
// Check where matching is folded in to DocumentSourceCursor.
assertErrorCode(t, [matchStage], expectedCode);
// Check where matching is not folded in to DocumentSourceCursor.
@@ -18,166 +23,174 @@ function assertError( expectedCode, matchSpec ) {
}
/** Assert that the contents of two arrays are equal, ignoring element ordering. */
-function assertEqualResultsUnordered( one, two ) {
- oneStr = one.map( function( x ) { return tojson( x ); } );
- twoStr = two.map( function( x ) { return tojson( x ); } );
+function assertEqualResultsUnordered(one, two) {
+ oneStr = one.map(function(x) {
+ return tojson(x);
+ });
+ twoStr = two.map(function(x) {
+ return tojson(x);
+ });
oneStr.sort();
twoStr.sort();
- assert.eq( oneStr, twoStr );
+ assert.eq(oneStr, twoStr);
}
/** Assert that an aggregation result is as expected. */
-function assertResults( expectedResults, matchSpec ) {
- findResults = t.find( matchSpec ).toArray();
- if ( expectedResults ) {
- assertEqualResultsUnordered( expectedResults, findResults );
+function assertResults(expectedResults, matchSpec) {
+ findResults = t.find(matchSpec).toArray();
+ if (expectedResults) {
+ assertEqualResultsUnordered(expectedResults, findResults);
}
- matchStage = { $match:matchSpec };
+ matchStage = {
+ $match: matchSpec
+ };
// Check where matching is folded in to DocumentSourceCursor.
- assertEqualResultsUnordered( findResults, t.aggregate( matchStage ).toArray() );
+ assertEqualResultsUnordered(findResults, t.aggregate(matchStage).toArray());
// Check where matching is not folded in to DocumentSourceCursor.
- assertEqualResultsUnordered( findResults,
- t.aggregate( { $project:identityProjection },
- matchStage ).toArray() );
+ assertEqualResultsUnordered(findResults,
+ t.aggregate({$project: identityProjection}, matchStage).toArray());
}
// Invalid matcher syntax.
-assertError( 2, { a:{ $mod:[ 0 /* invalid */, 0 ] } } );
+assertError(2, {a: {$mod: [0 /* invalid */, 0]}});
// $where not allowed.
-assertError( 16395, { $where:'true' } );
+assertError(16395, {$where: 'true'});
// Geo not allowed.
-assertError( 16424, { $match:{ a:{ $near:[ 0, 0 ] } } } );
+assertError(16424, {$match: {a: {$near: [0, 0]}}});
// Update modifier not allowed.
-if ( 0 ) { // SERVER-6650
-assertError( 0, { a:1, $inc:{ b:1 } } );
+if (0) { // SERVER-6650
+ assertError(0, {a: 1, $inc: {b: 1}});
}
// Aggregation expression not allowed.
-if ( 0 ) { // SERVER-6650
-assertError( 0, { a:1, b:{ $gt:{ $add:[ 1, 1 ] } } } );
+if (0) { // SERVER-6650
+ assertError(0, {a: 1, b: {$gt: {$add: [1, 1]}}});
}
-function checkMatchResults( indexed ) {
-
+function checkMatchResults(indexed) {
// No results.
t.remove({});
- assertResults( [], {} );
+ assertResults([], {});
- t.save( { _id:0, a:1 } );
- t.save( { _id:1, a:2 } );
- t.save( { _id:2, a:3 } );
+ t.save({_id: 0, a: 1});
+ t.save({_id: 1, a: 2});
+ t.save({_id: 2, a: 3});
// Empty query.
- assertResults( [ { _id:0, a:1 }, { _id:1, a:2 }, { _id:2, a:3 } ], {} );
+ assertResults([{_id: 0, a: 1}, {_id: 1, a: 2}, {_id: 2, a: 3}], {});
// Simple queries.
- assertResults( [ { _id:0, a:1 } ], { a:1 } );
- assertResults( [ { _id:1, a:2 } ], { a:2 } );
- assertResults( [ { _id:1, a:2 }, { _id:2, a:3 } ], { a:{ $gt:1 } } );
- assertResults( [ { _id:0, a:1 }, { _id:1, a:2 } ], { a:{ $lte:2 } } );
- assertResults( [ { _id:0, a:1 }, { _id:2, a:3 } ], { a:{ $in:[ 1, 3 ] } } );
+ assertResults([{_id: 0, a: 1}], {a: 1});
+ assertResults([{_id: 1, a: 2}], {a: 2});
+ assertResults([{_id: 1, a: 2}, {_id: 2, a: 3}], {a: {$gt: 1}});
+ assertResults([{_id: 0, a: 1}, {_id: 1, a: 2}], {a: {$lte: 2}});
+ assertResults([{_id: 0, a: 1}, {_id: 2, a: 3}], {a: {$in: [1, 3]}});
// Regular expression.
t.remove({});
- t.save( { _id:0, a:'x' } );
- t.save( { _id:1, a:'yx' } );
- assertResults( [ { _id:0, a:'x' } ], { a:/^x/ } );
- assertResults( [ { _id:0, a:'x' }, { _id:1, a:'yx' } ], { a:/x/ } );
+ t.save({_id: 0, a: 'x'});
+ t.save({_id: 1, a: 'yx'});
+ assertResults([{_id: 0, a: 'x'}], {a: /^x/});
+ assertResults([{_id: 0, a: 'x'}, {_id: 1, a: 'yx'}], {a: /x/});
// Dotted field.
t.remove({});
- t.save( { _id:0, a:{ b:4 } } );
- t.save( { _id:1, a:2 } );
- assertResults( [ { _id:0, a:{ b:4 } } ], { 'a.b':4 } );
+ t.save({_id: 0, a: {b: 4}});
+ t.save({_id: 1, a: 2});
+ assertResults([{_id: 0, a: {b: 4}}], {'a.b': 4});
// Value within an array.
t.remove({});
- t.save( { _id:0, a:[ 1, 2, 3 ] } );
- t.save( { _id:1, a:[ 2, 2, 3 ] } );
- t.save( { _id:2, a:[ 2, 2, 2 ] } );
- assertResults( [ { _id:0, a:[ 1, 2, 3 ] }, { _id:1, a:[ 2, 2, 3 ] } ], { a:3 } );
+ t.save({_id: 0, a: [1, 2, 3]});
+ t.save({_id: 1, a: [2, 2, 3]});
+ t.save({_id: 2, a: [2, 2, 2]});
+ assertResults([{_id: 0, a: [1, 2, 3]}, {_id: 1, a: [2, 2, 3]}], {a: 3});
// Missing, null, $exists matching.
t.remove({});
- t.save( { _id:0 } );
- t.save( { _id:1, a:null } );
- if ( 0 ) { // SERVER-6571
- t.save( { _id:2, a:undefined } );
+ t.save({_id: 0});
+ t.save({_id: 1, a: null});
+ if (0) { // SERVER-6571
+ t.save({_id: 2, a: undefined});
}
- t.save( { _id:3, a:0 } );
- assertResults( [ { _id:0 }, { _id:1, a:null } ], { a:null } );
- assertResults( null, { a:{ $exists:true } } );
- assertResults( null, { a:{ $exists:false } } );
+ t.save({_id: 3, a: 0});
+ assertResults([{_id: 0}, {_id: 1, a: null}], {a: null});
+ assertResults(null, {a: {$exists: true}});
+ assertResults(null, {a: {$exists: false}});
// $elemMatch
t.remove({});
- t.save( { _id:0, a:[ 1, 2 ] } );
- t.save( { _id:1, a:[ 1, 2, 3 ] } );
- assertResults( [ { _id:1, a:[ 1, 2, 3 ] } ], { a:{ $elemMatch:{ $gt:1, $mod:[ 2, 1 ] } } } );
+ t.save({_id: 0, a: [1, 2]});
+ t.save({_id: 1, a: [1, 2, 3]});
+ assertResults([{_id: 1, a: [1, 2, 3]}], {a: {$elemMatch: {$gt: 1, $mod: [2, 1]}}});
t.remove({});
- t.save( { _id:0, a:[ { b:1 }, { c:2 } ] } );
- t.save( { _id:1, a:[ { b:1, c:2 } ] } );
- assertResults( [ { _id:1, a:[ { b:1, c:2 } ] } ], { a:{ $elemMatch:{ b:1, c:2 } } } );
+ t.save({_id: 0, a: [{b: 1}, {c: 2}]});
+ t.save({_id: 1, a: [{b: 1, c: 2}]});
+ assertResults([{_id: 1, a: [{b: 1, c: 2}]}], {a: {$elemMatch: {b: 1, c: 2}}});
// $size
t.remove({});
- t.save( {} );
- t.save( { a:null } );
- t.save( { a:[] } );
- t.save( { a:[ 1 ] } );
- t.save( { a:[ 1, 2 ] } );
- assertResults( null, { a:{ $size:0 } } );
- assertResults( null, { a:{ $size:1 } } );
- assertResults( null, { a:{ $size:2 } } );
+ t.save({});
+ t.save({a: null});
+ t.save({a: []});
+ t.save({a: [1]});
+ t.save({a: [1, 2]});
+ assertResults(null, {a: {$size: 0}});
+ assertResults(null, {a: {$size: 1}});
+ assertResults(null, {a: {$size: 2}});
// $type
t.remove({});
- t.save( {} );
- t.save( { a:null } );
- if ( 0 ) { // SERVER-6571
- t.save( { a:undefined } );
+ t.save({});
+ t.save({a: null});
+ if (0) { // SERVER-6571
+ t.save({a: undefined});
}
- t.save( { a:NumberInt( 1 ) } );
- t.save( { a:NumberLong( 2 ) } );
- t.save( { a:66.6 } );
- t.save( { a:'abc' } );
- t.save( { a:/xyz/ } );
- t.save( { a:{ q:1 } } );
- t.save( { a:true } );
- t.save( { a:new Date() } );
- t.save( { a:new ObjectId() } );
- for( type = 1; type <= 18; ++type ) {
- assertResults( null, { a:{ $type:type } } );
+ t.save({a: NumberInt(1)});
+ t.save({a: NumberLong(2)});
+ t.save({a: 66.6});
+ t.save({a: 'abc'});
+ t.save({a: /xyz/});
+ t.save({a: {q: 1}});
+ t.save({a: true});
+ t.save({a: new Date()});
+ t.save({a: new ObjectId()});
+ for (type = 1; type <= 18; ++type) {
+ assertResults(null, {a: {$type: type}});
}
// $atomic does not affect results.
t.remove({});
- t.save( { _id:0, a:1 } );
- t.save( { _id:1, a:2 } );
- t.save( { _id:2, a:3 } );
- assertResults( [ { _id:0, a:1 } ], { a:1, $atomic:true } );
- assertResults( [ { _id:1, a:2 } ], { a:2, $atomic:true } );
- assertResults( [ { _id:1, a:2 }, { _id:2, a:3 } ], { a:{ $gt:1 }, $atomic:true } );
- assertResults( [ { _id:0, a:1 }, { _id:1, a:2 } ], { a:{ $lte:2 }, $atomic:true } );
- assertResults( [ { _id:0, a:1 }, { _id:2, a:3 } ], { a:{ $in:[ 1, 3 ] }, $atomic:true } );
+ t.save({_id: 0, a: 1});
+ t.save({_id: 1, a: 2});
+ t.save({_id: 2, a: 3});
+ assertResults([{_id: 0, a: 1}], {a: 1, $atomic: true});
+ assertResults([{_id: 1, a: 2}], {a: 2, $atomic: true});
+ assertResults([{_id: 1, a: 2}, {_id: 2, a: 3}], {a: {$gt: 1}, $atomic: true});
+ assertResults([{_id: 0, a: 1}, {_id: 1, a: 2}], {a: {$lte: 2}, $atomic: true});
+ assertResults([{_id: 0, a: 1}, {_id: 2, a: 3}], {a: {$in: [1, 3]}, $atomic: true});
// $and
- assertResults( [ { _id:1, a:2 } ], { $and:[ { a:2 }, { _id:1 } ] } );
- assertResults( [], { $and:[ { a:1 }, { _id:1 } ] } );
- assertResults( [ { _id:1, a:2 }, { _id:2, a:3 } ],
- { $and:[ { $or:[ { _id:1 }, { a:3 } ] }, { $or:[ { _id:2 }, { a:2 } ] } ] } );
+ assertResults([{_id: 1, a: 2}], {$and: [{a: 2}, {_id: 1}]});
+ assertResults([],
+ {
+ $and:
+ [{a: 1}, {_id: 1}]
+ });
+ assertResults([{_id: 1, a: 2}, {_id: 2, a: 3}],
+ {$and: [{$or: [{_id: 1}, {a: 3}]}, {$or: [{_id: 2}, {a: 2}]}]});
// $or
- assertResults( [ { _id:0, a:1 }, { _id:2, a:3 } ], { $or:[ { _id:0 }, { a:3 } ] } );
+ assertResults([{_id: 0, a: 1}, {_id: 2, a: 3}], {$or: [{_id: 0}, {a: 3}]});
}
-checkMatchResults( false );
-t.ensureIndex( { a:1 } );
-checkMatchResults( true );
-t.ensureIndex( { 'a.b':1 } );
-t.ensureIndex( { 'a.c':1 } );
-checkMatchResults( true );
+checkMatchResults(false);
+t.ensureIndex({a: 1});
+checkMatchResults(true);
+t.ensureIndex({'a.b': 1});
+t.ensureIndex({'a.c': 1});
+checkMatchResults(true);
diff --git a/jstests/aggregation/bugs/server10176.js b/jstests/aggregation/bugs/server10176.js
index 50d02ce3a8e..5a56585265d 100644
--- a/jstests/aggregation/bugs/server10176.js
+++ b/jstests/aggregation/bugs/server10176.js
@@ -32,32 +32,33 @@ load('jstests/aggregation/extras/utils.js');
// valid use of $abs: numbers become positive, null/undefined/nonexistent become null
- var results = coll.aggregate([{$project: {a: {$abs: "$a" }}}]).toArray();
- assert.eq(results, [
- {_id: 0, a: 5},
- {_id: 1, a: 5},
- {_id: 2, a: 5.5},
- {_id: 3, a: 5.5},
- {_id: 4, a: 5},
- {_id: 5, a: 5},
- {_id: 6, a: NumberLong("5")},
- {_id: 7, a: NumberLong("5")},
- {_id: 8, a: 0},
- {_id: 9, a: 0},
- {_id: 10, a: 0},
- {_id: 11, a: NumberLong(Math.pow(2, 31))},
- {_id: 12, a: Math.pow(2, 31)},
- {_id: 13, a: NumberLong("1152921504606846977")},
- {_id: 14, a: NumberLong("1152921504606846977")},
- {_id: 15, a: null},
- {_id: 16, a: null},
- {_id: 17, a: NaN},
- {_id: 18, a: null},
- ]);
+ var results = coll.aggregate([{$project: {a: {$abs: "$a"}}}]).toArray();
+ assert.eq(results,
+ [
+ {_id: 0, a: 5},
+ {_id: 1, a: 5},
+ {_id: 2, a: 5.5},
+ {_id: 3, a: 5.5},
+ {_id: 4, a: 5},
+ {_id: 5, a: 5},
+ {_id: 6, a: NumberLong("5")},
+ {_id: 7, a: NumberLong("5")},
+ {_id: 8, a: 0},
+ {_id: 9, a: 0},
+ {_id: 10, a: 0},
+ {_id: 11, a: NumberLong(Math.pow(2, 31))},
+ {_id: 12, a: Math.pow(2, 31)},
+ {_id: 13, a: NumberLong("1152921504606846977")},
+ {_id: 14, a: NumberLong("1152921504606846977")},
+ {_id: 15, a: null},
+ {_id: 16, a: null},
+ {_id: 17, a: NaN},
+ {_id: 18, a: null},
+ ]);
// Invalid
// using $abs on string
- assertErrorCode(coll, [{$project: {a: {$abs: "string"}}}], 28765);
+ assertErrorCode(coll, [{$project: {a: {$abs: "string"}}}], 28765);
// using $abs on LLONG_MIN (-2 ^ 63)
assertErrorCode(coll, [{$project: {a: {$abs: NumberLong("-9223372036854775808")}}}], 28680);
diff --git a/jstests/aggregation/bugs/server10530.js b/jstests/aggregation/bugs/server10530.js
index eebfd60f15b..9d361dc16a7 100644
--- a/jstests/aggregation/bugs/server10530.js
+++ b/jstests/aggregation/bugs/server10530.js
@@ -3,9 +3,9 @@
var t = db.server10530;
t.drop();
-t.insert({big: Array(1024*1024).toString()});
-t.insert({big: Array(16*1024*1024 - 1024).toString()});
-t.insert({big: Array(1024*1024).toString()});
+t.insert({big: Array(1024 * 1024).toString()});
+t.insert({big: Array(16 * 1024 * 1024 - 1024).toString()});
+t.insert({big: Array(1024 * 1024).toString()});
assert.eq(t.aggregate().itcount(), 3);
diff --git a/jstests/aggregation/bugs/server11118.js b/jstests/aggregation/bugs/server11118.js
index 1ec99024a82..da4e9862bad 100644
--- a/jstests/aggregation/bugs/server11118.js
+++ b/jstests/aggregation/bugs/server11118.js
@@ -9,15 +9,9 @@ function testFormat(date, formatStr, expectedStr) {
db.dates.drop();
db.dates.insert({date: date});
- var res = db.dates.aggregate([{$project: {
- _id: 0,
- formatted: {
- $dateToString: {
- format: formatStr,
- date: "$date"
- }
- }
- }}]).toArray();
+ var res = db.dates.aggregate([{
+ $project: {_id: 0, formatted: {$dateToString: {format: formatStr, date: "$date"}}}
+ }]).toArray();
assert.eq(res[0].formatted, expectedStr);
}
@@ -27,21 +21,16 @@ function testFormatError(formatObj, errCode) {
db.dates.drop();
db.dates.insert({tm: ISODate()});
- assertErrorCode(db.dates, {$project: {
- _id: 0,
- formatted: {
- $dateToString: formatObj
- }}}, errCode);
+ assertErrorCode(db.dates, {$project: {_id: 0, formatted: {$dateToString: formatObj}}}, errCode);
}
// Used to verify that only date values are accepted for date parameter
function testDateValueError(dateVal, errCode) {
- db.dates.drop();
- db.dates.insert({date: dateVal});
+ db.dates.drop();
+ db.dates.insert({date: dateVal});
- assertErrorCode(db.dates, { $project:
- { formatted: { $dateToString : { format: "%Y", date: "$date" }} }
- }, errCode);
+ assertErrorCode(
+ db.dates, {$project: {formatted: {$dateToString: {format: "%Y", date: "$date"}}}}, errCode);
}
var now = ISODate();
@@ -50,17 +39,16 @@ var now = ISODate();
testFormat(now,
"%%-%Y-%m-%d-%H-%M-%S-%L",
[
- "%",
- now.getUTCFullYear().zeroPad(4),
- (now.getUTCMonth() + 1).zeroPad(2),
- now.getUTCDate().zeroPad(2),
- now.getUTCHours().zeroPad(2),
- now.getUTCMinutes().zeroPad(2),
- now.getUTCSeconds().zeroPad(2),
- now.getUTCMilliseconds().zeroPad(3)
+ "%",
+ now.getUTCFullYear().zeroPad(4),
+ (now.getUTCMonth() + 1).zeroPad(2),
+ now.getUTCDate().zeroPad(2),
+ now.getUTCHours().zeroPad(2),
+ now.getUTCMinutes().zeroPad(2),
+ now.getUTCSeconds().zeroPad(2),
+ now.getUTCMilliseconds().zeroPad(3)
].join("-"));
-
// Padding tests
var padme = ISODate("2001-02-03T04:05:06.007Z");
@@ -77,16 +65,16 @@ testFormat(padme, "%L", padme.getUTCMilliseconds().zeroPad(3));
testFormat(now,
"%d%d***%d***%d**%d*%d",
[
- now.getUTCDate().zeroPad(2),
- now.getUTCDate().zeroPad(2),
- "***",
- now.getUTCDate().zeroPad(2),
- "***",
- now.getUTCDate().zeroPad(2),
- "**",
- now.getUTCDate().zeroPad(2),
- "*",
- now.getUTCDate().zeroPad(2)
+ now.getUTCDate().zeroPad(2),
+ now.getUTCDate().zeroPad(2),
+ "***",
+ now.getUTCDate().zeroPad(2),
+ "***",
+ now.getUTCDate().zeroPad(2),
+ "**",
+ now.getUTCDate().zeroPad(2),
+ "*",
+ now.getUTCDate().zeroPad(2)
].join(""));
// JS doesn't have equivalents of these format specifiers
@@ -105,17 +93,17 @@ testFormatError({format: "%Y", date: "$date", extra: "whyamIhere"}, 18534);
testFormatError(["%Y", "$date"], 18629);
// Use invalid modifier at middle of string
-testFormatError({format:"%Y-%q", date: "$date"}, 18536);
+testFormatError({format: "%Y-%q", date: "$date"}, 18536);
// Odd number of percent signs at end
-testFormatError({format: "%U-%w-%j-%%%", date:"$date"}, 18535);
+testFormatError({format: "%U-%w-%j-%%%", date: "$date"}, 18535);
// Odd number of percent signs at middle
// will get interpreted as an invalid modifier since it will try to use '%A'
-testFormatError({format: "AAAAA%%%AAAAAA", date:"$date"}, 18536);
+testFormatError({format: "AAAAA%%%AAAAAA", date: "$date"}, 18536);
// Format parameter not a string
-testFormatError({format: {iamalion: "roar"}, date:"$date"}, 18533);
+testFormatError({format: {iamalion: "roar"}, date: "$date"}, 18533);
///
/// Additional Tests
@@ -126,7 +114,7 @@ var date = ISODate("1999-08-29");
testFormat(date, "%%d", "%d");
-//A very long string of "%"s
+// A very long string of "%"s
var longstr = Array(1000).join("%%");
var halfstr = Array(1000).join("%");
testFormat(date, longstr, halfstr);
diff --git a/jstests/aggregation/bugs/server11675.js b/jstests/aggregation/bugs/server11675.js
index 513714f98d7..709120c27ca 100644
--- a/jstests/aggregation/bugs/server11675.js
+++ b/jstests/aggregation/bugs/server11675.js
@@ -5,8 +5,8 @@ var server11675 = function() {
var t = db.server11675;
t.drop();
- if (typeof(RUNNING_IN_SHARDED_AGG_TEST) != 'undefined') { // see end of testshard1.js
- db.adminCommand( { shardcollection : t.getFullName(), key : { "_id" : 1 } } );
+ if (typeof(RUNNING_IN_SHARDED_AGG_TEST) != 'undefined') { // see end of testshard1.js
+ db.adminCommand({shardcollection: t.getFullName(), key: {"_id": 1}});
}
t.insert({_id: 1, text: "apple", words: 1});
@@ -22,7 +22,7 @@ var server11675 = function() {
var pipeline = [{$match: query.query}];
if ('project' in query) {
- cursor = t.find(query.query, query.project); // no way to add to constructed cursor
+ cursor = t.find(query.query, query.project); // no way to add to constructed cursor
pipeline.push({$project: query.project});
}
@@ -46,89 +46,99 @@ var server11675 = function() {
assert.docEq(aggRes, findRes);
};
- assertSameAsFind({query: {}}); // sanity check
- assertSameAsFind({query: {$text:{$search:"apple"}}});
- assertSameAsFind({query: {$and:[{$text:{$search:"apple"}}, {_id:1}]}});
- assertSameAsFind({query: {$text:{$search:"apple"}}
- ,project: {_id:1, score: {$meta: "textScore"}}
- });
- assertSameAsFind({query: {$text:{$search:"apple banana"}}
- ,project: {_id:1, score: {$meta: "textScore"}}
- });
- assertSameAsFind({query: {$text:{$search:"apple banana"}}
- ,project: {_id:1, score: {$meta: "textScore"}}
- ,sort: {score: {$meta: "textScore"}}
- });
- assertSameAsFind({query: {$text:{$search:"apple banana"}}
- ,project: {_id:1, score: {$meta: "textScore"}}
- ,sort: {score: {$meta: "textScore"}}
- ,limit: 1
- });
- assertSameAsFind({query: {$text:{$search:"apple banana"}}
- ,project: {_id:1, score: {$meta: "textScore"}}
- ,sort: {score: {$meta: "textScore"}}
- ,skip: 1
- });
- assertSameAsFind({query: {$text:{$search:"apple banana"}}
- ,project: {_id:1, score: {$meta: "textScore"}}
- ,sort: {score: {$meta: "textScore"}}
- ,skip: 1
- ,limit: 1
- });
+ assertSameAsFind({query: {}}); // sanity check
+ assertSameAsFind({query: {$text: {$search: "apple"}}});
+ assertSameAsFind({query: {$and: [{$text: {$search: "apple"}}, {_id: 1}]}});
+ assertSameAsFind(
+ {query: {$text: {$search: "apple"}}, project: {_id: 1, score: {$meta: "textScore"}}});
+ assertSameAsFind({
+ query: {$text: {$search: "apple banana"}},
+ project: {_id: 1, score: {$meta: "textScore"}}
+ });
+ assertSameAsFind({
+ query: {$text: {$search: "apple banana"}},
+ project: {_id: 1, score: {$meta: "textScore"}},
+ sort: {score: {$meta: "textScore"}}
+ });
+ assertSameAsFind({
+ query: {$text: {$search: "apple banana"}},
+ project: {_id: 1, score: {$meta: "textScore"}},
+ sort: {score: {$meta: "textScore"}},
+ limit: 1
+ });
+ assertSameAsFind({
+ query: {$text: {$search: "apple banana"}},
+ project: {_id: 1, score: {$meta: "textScore"}},
+ sort: {score: {$meta: "textScore"}},
+ skip: 1
+ });
+ assertSameAsFind({
+ query: {$text: {$search: "apple banana"}},
+ project: {_id: 1, score: {$meta: "textScore"}},
+ sort: {score: {$meta: "textScore"}},
+ skip: 1,
+ limit: 1
+ });
// sharded find requires projecting the score to sort, but sharded agg does not.
var findRes = t.find({$text: {$search: "apple banana"}}, {textScore: {$meta: 'textScore'}})
- .sort({textScore: {$meta: 'textScore'}})
- .map(function(obj) {
- delete obj.textScore; // remove it to match agg output
- return obj;
- });
- var res = t.aggregate([{$match: {$text: {$search: 'apple banana'}}}
- ,{$sort: {textScore: {$meta: 'textScore'}}}
- ]).toArray();
+ .sort({textScore: {$meta: 'textScore'}})
+ .map(function(obj) {
+ delete obj.textScore; // remove it to match agg output
+ return obj;
+ });
+ var res = t.aggregate([
+ {$match: {$text: {$search: 'apple banana'}}},
+ {$sort: {textScore: {$meta: 'textScore'}}}
+ ]).toArray();
assert.eq(res, findRes);
// Make sure {$meta: 'textScore'} can be used as a sub-expression
- var res = t.aggregate([{$match: {_id:1, $text: {$search: 'apple'}}}
- ,{$project: {words: 1
- ,score: {$meta: 'textScore'}
- ,wordsTimesScore: {$multiply: ['$words', {$meta:'textScore'}]}
- }}
- ]).toArray();
+ var res = t.aggregate([
+ {$match: {_id: 1, $text: {$search: 'apple'}}},
+ {
+ $project: {
+ words: 1,
+ score: {$meta: 'textScore'},
+ wordsTimesScore: {$multiply: ['$words', {$meta: 'textScore'}]}
+ }
+ }
+ ]).toArray();
assert.eq(res[0].wordsTimesScore, res[0].words * res[0].score, tojson(res));
// And can be used in $group
- var res = t.aggregate([{$match: {_id: 1, $text: {$search: 'apple banana'}}}
- ,{$group: {_id: {$meta: 'textScore'}
- ,score: {$first: {$meta: 'textScore'}}
- }}
- ]).toArray();
+ var res = t.aggregate([
+ {$match: {_id: 1, $text: {$search: 'apple banana'}}},
+ {$group: {_id: {$meta: 'textScore'}, score: {$first: {$meta: 'textScore'}}}}
+ ]).toArray();
assert.eq(res[0]._id, res[0].score, tojson(res));
// Make sure metadata crosses shard -> merger boundary
- var res = t.aggregate([{$match: {_id:1, $text: {$search: 'apple'}}}
- ,{$project: {scoreOnShard: {$meta: 'textScore'} }}
- ,{$limit:1} // force a split. later stages run on merger
- ,{$project: {scoreOnShard:1
- ,scoreOnMerger: {$meta: 'textScore'} }}
- ]).toArray();
+ var res = t.aggregate([
+ {$match: {_id: 1, $text: {$search: 'apple'}}},
+ {$project: {scoreOnShard: {$meta: 'textScore'}}},
+ {$limit: 1} // force a split. later stages run on merger
+ ,
+ {$project: {scoreOnShard: 1, scoreOnMerger: {$meta: 'textScore'}}}
+ ]).toArray();
assert.eq(res[0].scoreOnMerger, res[0].scoreOnShard);
- var score = res[0].scoreOnMerger; // save for later tests
+ var score = res[0].scoreOnMerger; // save for later tests
// Make sure metadata crosses shard -> merger boundary even if not used on shard
- var res = t.aggregate([{$match: {_id:1, $text: {$search: 'apple'}}}
- ,{$limit:1} // force a split. later stages run on merger
- ,{$project: {scoreOnShard:1
- ,scoreOnMerger: {$meta: 'textScore'} }}
- ]).toArray();
+ var res = t.aggregate([
+ {$match: {_id: 1, $text: {$search: 'apple'}}},
+ {$limit: 1} // force a split. later stages run on merger
+ ,
+ {$project: {scoreOnShard: 1, scoreOnMerger: {$meta: 'textScore'}}}
+ ]).toArray();
assert.eq(res[0].scoreOnMerger, score);
// Make sure metadata works if first $project doesn't use it.
- var res = t.aggregate([{$match: {_id:1, $text: {$search: 'apple'}}}
- ,{$project: {_id:1}}
- ,{$project: {_id:1
- ,score: {$meta: 'textScore'} }}
- ]).toArray();
+ var res = t.aggregate([
+ {$match: {_id: 1, $text: {$search: 'apple'}}},
+ {$project: {_id: 1}},
+ {$project: {_id: 1, score: {$meta: 'textScore'}}}
+ ]).toArray();
assert.eq(res[0].score, score);
// Make sure the metadata is 'missing()' when it doesn't exist because it was never created
@@ -136,24 +146,26 @@ var server11675 = function() {
assert(!("score" in res[0]));
// Make sure the metadata is 'missing()' when it doesn't exist because the document changed
- var res = t.aggregate([{$match: {_id: 1, $text: {$search: 'apple banana'}}},
- {$group: {_id: 1, score: {$first: {$meta: 'textScore'}}}},
- {$project: {_id: 1, scoreAgain: {$meta: 'textScore'}}},
- ]).toArray();
+ var res = t.aggregate([
+ {$match: {_id: 1, $text: {$search: 'apple banana'}}},
+ {$group: {_id: 1, score: {$first: {$meta: 'textScore'}}}},
+ {$project: {_id: 1, scoreAgain: {$meta: 'textScore'}}},
+ ]).toArray();
assert(!("scoreAgain" in res[0]));
// Make sure metadata works after a $unwind
t.insert({_id: 5, text: 'mango', words: [1, 2, 3]});
- var res = t.aggregate([{$match: {$text: {$search: 'mango'}}},
- {$project: {score: {$meta: "textScore"}, _id:1, words: 1}},
- {$unwind: '$words'},
- {$project: {scoreAgain: {$meta: "textScore"}, score: 1}}
- ]).toArray();
+ var res = t.aggregate([
+ {$match: {$text: {$search: 'mango'}}},
+ {$project: {score: {$meta: "textScore"}, _id: 1, words: 1}},
+ {$unwind: '$words'},
+ {$project: {scoreAgain: {$meta: "textScore"}, score: 1}}
+ ]).toArray();
assert.eq(res[0].scoreAgain, res[0].score);
// Error checking
// $match, but wrong position
- assertErrorCode(t, [{$sort: {text: 1}} ,{$match: {$text: {$search: 'apple banana'}}}], 17313);
+ assertErrorCode(t, [{$sort: {text: 1}}, {$match: {$text: {$search: 'apple banana'}}}], 17313);
// wrong $stage, but correct position
assertErrorCode(t, [{$project: {searchValue: {$text: {$search: 'apple banana'}}}}], 15999);
diff --git a/jstests/aggregation/bugs/server12015.js b/jstests/aggregation/bugs/server12015.js
index a9a8d6ab859..af4ee75f92d 100644
--- a/jstests/aggregation/bugs/server12015.js
+++ b/jstests/aggregation/bugs/server12015.js
@@ -8,11 +8,14 @@
load("jstests/aggregation/extras/utils.js"); // For orderedArrayEq.
-(function (){
+(function() {
"use strict";
var coll = db.server12015;
coll.drop();
- var indexSpec = {a: 1, b: 1};
+ var indexSpec = {
+ a: 1,
+ b: 1
+ };
assert.writeOK(coll.insert({_id: 0, a: 0, b: 0}));
assert.writeOK(coll.insert({_id: 1, a: 0, b: 1}));
@@ -52,8 +55,8 @@ load("jstests/aggregation/extras/utils.js"); // For orderedArrayEq.
// Non-blocking $sort, uncovered $project.
assertResultsMatch([{$sort: {a: -1, b: -1}}, {$project: {_id: 1, a: 1, b: 1}}]);
assertResultsMatch([{$sort: {a: 1, b: 1}}, {$project: {_id: 1, a: 1, b: 1}}]);
- assertResultsMatch([{$sort: {a: 1, b: 1}},
- {$group: {_id: "$_id", arr: {$push: "$a"}, sum: {$sum: "$b"}}}]);
+ assertResultsMatch(
+ [{$sort: {a: 1, b: 1}}, {$group: {_id: "$_id", arr: {$push: "$a"}, sum: {$sum: "$b"}}}]);
// Non-blocking $sort, covered $project.
assertResultsMatch([{$sort: {a: -1, b: -1}}, {$project: {_id: 0, a: 1, b: 1}}]);
@@ -62,8 +65,8 @@ load("jstests/aggregation/extras/utils.js"); // For orderedArrayEq.
// Blocking $sort, uncovered $project.
assertResultsMatch([{$sort: {b: 1, a: -1}}, {$project: {_id: 1, a: 1, b: 1}}]);
- assertResultsMatch([{$sort: {b: 1, a: -1}},
- {$group: {_id: "$_id", arr: {$push: "$a"}, sum: {$sum: "$b"}}}]);
+ assertResultsMatch(
+ [{$sort: {b: 1, a: -1}}, {$group: {_id: "$_id", arr: {$push: "$a"}, sum: {$sum: "$b"}}}]);
// Blocking $sort, covered $project.
assertResultsMatch([{$sort: {b: 1, a: -1}}, {$project: {_id: 0, a: 1, b: 1}}]);
diff --git a/jstests/aggregation/bugs/server13715.js b/jstests/aggregation/bugs/server13715.js
index 608e94d37a4..27482cc1c46 100644
--- a/jstests/aggregation/bugs/server13715.js
+++ b/jstests/aggregation/bugs/server13715.js
@@ -7,19 +7,13 @@ t.drop();
t.save({_id: 0, name: "red", value: 2});
t.save({_id: 1, name: "blue", value: 1});
-var cursor = t.aggregate([
- {$match: {$or: [{name: "red"}, {name: "blue"}]}},
- {$sort: {value: 1}}
-]);
+var cursor = t.aggregate([{$match: {$or: [{name: "red"}, {name: "blue"}]}}, {$sort: {value: 1}}]);
assert.eq(1, cursor.next()["_id"]);
assert.eq(0, cursor.next()["_id"]);
// Repeat the test with an index.
t.ensureIndex({name: 1});
-cursor = t.aggregate([
- {$match: {$or: [{name: "red"}, {name: "blue"}]}},
- {$sort: {value: 1}}
-]);
+cursor = t.aggregate([{$match: {$or: [{name: "red"}, {name: "blue"}]}}, {$sort: {value: 1}}]);
assert.eq(1, cursor.next()["_id"]);
assert.eq(0, cursor.next()["_id"]);
diff --git a/jstests/aggregation/bugs/server14421.js b/jstests/aggregation/bugs/server14421.js
index b5e800ec999..3201e20a81a 100644
--- a/jstests/aggregation/bugs/server14421.js
+++ b/jstests/aggregation/bugs/server14421.js
@@ -1,11 +1,12 @@
// SERVER-14421 minDistance for $geoNear aggregation operator
-(function () {
+(function() {
'use strict';
var coll = db.mindistance;
coll.drop();
- assert.writeOK(coll.insert([{_id: 0, loc: {type: "Point", coordinates: [0,0]}},
- {_id: 1, loc: {type: "Point", coordinates: [0,0.01]}}
- ]));
+ assert.writeOK(coll.insert([
+ {_id: 0, loc: {type: "Point", coordinates: [0, 0]}},
+ {_id: 1, loc: {type: "Point", coordinates: [0, 0.01]}}
+ ]));
var response = coll.createIndex({loc: "2dsphere"});
assert.eq(response.ok, 1, "Could not create 2dsphere index");
var results = coll.aggregate([{
@@ -13,7 +14,7 @@
minDistance: 10000,
spherical: true,
distanceField: "distance",
- near: {type: "Point", coordinates: [0,0]}
+ near: {type: "Point", coordinates: [0, 0]}
}
}]);
assert.eq(results.itcount(), 0);
@@ -22,7 +23,7 @@
minDistance: 1,
spherical: true,
distanceField: "distance",
- near: {type: "Point", coordinates: [0,0]}
+ near: {type: "Point", coordinates: [0, 0]}
}
}]);
assert.eq(results.itcount(), 1);
@@ -31,7 +32,7 @@
minDistance: 0,
spherical: true,
distanceField: "distance",
- near: {type: "Point", coordinates: [0,0]}
+ near: {type: "Point", coordinates: [0, 0]}
}
}]);
assert.eq(results.itcount(), 2);
diff --git a/jstests/aggregation/bugs/server14969.js b/jstests/aggregation/bugs/server14969.js
index 4d169586d11..629e54505fc 100644
--- a/jstests/aggregation/bugs/server14969.js
+++ b/jstests/aggregation/bugs/server14969.js
@@ -4,7 +4,7 @@ var docsPerBatch = 3;
coll.drop();
// Initialize collection with eight 1M documents, and index on field "a".
-var longString = new Array(1024*1024).join('x');
+var longString = new Array(1024 * 1024).join('x');
for (var i = 0; i < 100; ++i) {
assert.writeOK(coll.insert({a: 1, bigField: longString}));
}
@@ -18,15 +18,15 @@ for (var i = 0; i < docsPerBatch; ++i) {
}
// Drop index "a".
-assert.commandWorked(coll.dropIndex({a:1}));
+assert.commandWorked(coll.dropIndex({a: 1}));
// Issue a getmore against agg cursor. Note that it is not defined whether the server continues to
// generate further results for the cursor.
try {
cursor.hasNext();
cursor.next();
+} catch (e) {
}
-catch (e) {}
// Verify that the server hasn't crashed.
assert.commandWorked(db.adminCommand({ping: 1}));
diff --git a/jstests/aggregation/bugs/server15810.js b/jstests/aggregation/bugs/server15810.js
index d1c903334a2..88ef3b7ca38 100644
--- a/jstests/aggregation/bugs/server15810.js
+++ b/jstests/aggregation/bugs/server15810.js
@@ -1,4 +1,4 @@
// SERVER-15810: Server crash when running a poorly formed command
var res = db.runCommand({aggregate: 1, pipeline: []});
-assert.commandFailed(res); // command must fail
+assert.commandFailed(res); // command must fail
// TODO(geert): assert(!('code' in res)); // but must not cause massert
diff --git a/jstests/aggregation/bugs/server17224.js b/jstests/aggregation/bugs/server17224.js
index 33042abab53..888c99b808c 100644
--- a/jstests/aggregation/bugs/server17224.js
+++ b/jstests/aggregation/bugs/server17224.js
@@ -15,9 +15,10 @@
t.insert({a: new Array(1024 * 1024 - 1105).join('a')});
// do not use cursor form, since it has a different workaroud for this issue.
- assert.commandFailed(
- db.runCommand({aggregate: t.getName(),
- pipeline: [{$match: {}}, {$group: {_id: null, arr: {$push: {a: '$a'}}}}]}));
+ assert.commandFailed(db.runCommand({
+ aggregate: t.getName(),
+ pipeline: [{$match: {}}, {$group: {_id: null, arr: {$push: {a: '$a'}}}}]
+ }));
// Make sure the server is still up.
assert.commandWorked(db.runCommand('ping'));
diff --git a/jstests/aggregation/bugs/server17943.js b/jstests/aggregation/bugs/server17943.js
index e23d1639a66..10dbac2c37a 100644
--- a/jstests/aggregation/bugs/server17943.js
+++ b/jstests/aggregation/bugs/server17943.js
@@ -9,8 +9,8 @@ load('jstests/aggregation/extras/utils.js');
var coll = db.agg_filter_expr;
coll.drop();
- assert.writeOK(coll.insert({_id: 0, a: [1,2,3,4,5]}));
- assert.writeOK(coll.insert({_id: 1, a: [2,4]}));
+ assert.writeOK(coll.insert({_id: 0, a: [1, 2, 3, 4, 5]}));
+ assert.writeOK(coll.insert({_id: 1, a: [2, 4]}));
assert.writeOK(coll.insert({_id: 2, a: []}));
assert.writeOK(coll.insert({_id: 3, a: [1]}));
assert.writeOK(coll.insert({_id: 4, a: null}));
@@ -18,9 +18,13 @@ load('jstests/aggregation/extras/utils.js');
assert.writeOK(coll.insert({_id: 6}));
// Create filter to only accept odd numbers.
- filterDoc = {input: '$a', as: 'x', cond: {$eq: [1, {$mod: ['$$x', 2]}]}};
+ filterDoc = {
+ input: '$a',
+ as: 'x',
+ cond: {$eq: [1, {$mod: ['$$x', 2]}]}
+ };
var expectedResults = [
- {_id: 0, b: [1,3,5]},
+ {_id: 0, b: [1, 3, 5]},
{_id: 1, b: []},
{_id: 2, b: []},
{_id: 3, b: [1]},
@@ -41,31 +45,57 @@ load('jstests/aggregation/extras/utils.js');
assertErrorCode(coll, [{$project: {b: {$filter: filterDoc}}}], 28646);
// Extra field(s).
- filterDoc = {input: '$a', as: 'x', cond: true, extra: 1};
+ filterDoc = {
+ input: '$a',
+ as: 'x',
+ cond: true,
+ extra: 1
+ };
assertErrorCode(coll, [{$project: {b: {$filter: filterDoc}}}], 28647);
// Missing 'input'.
- filterDoc = {as: 'x', cond: true};
+ filterDoc = {
+ as: 'x',
+ cond: true
+ };
assertErrorCode(coll, [{$project: {b: {$filter: filterDoc}}}], 28648);
// Missing 'as'.
- filterDoc = {input: '$a', cond: true};
+ filterDoc = {
+ input: '$a',
+ cond: true
+ };
assertErrorCode(coll, [{$project: {b: {$filter: filterDoc}}}], 28649);
// Missing 'cond'.
- filterDoc = {input: '$a', as: 'x'};
+ filterDoc = {
+ input: '$a',
+ as: 'x'
+ };
assertErrorCode(coll, [{$project: {b: {$filter: filterDoc}}}], 28650);
// 'as' is not a valid variable name.
- filterDoc = {input: '$a', as: '$x', cond: true};
+ filterDoc = {
+ input: '$a',
+ as: '$x',
+ cond: true
+ };
assertErrorCode(coll, [{$project: {b: {$filter: filterDoc}}}], 16867);
// 'input' is not an array.
- filterDoc = {input: 'string', as: 'x', cond: true};
+ filterDoc = {
+ input: 'string',
+ as: 'x',
+ cond: true
+ };
assertErrorCode(coll, [{$project: {b: {$filter: filterDoc}}}], 28651);
coll.drop();
assert.writeOK(coll.insert({a: 'string'}));
- filterDoc = {input: '$a', as: 'x', cond: true};
+ filterDoc = {
+ input: '$a',
+ as: 'x',
+ cond: true
+ };
assertErrorCode(coll, [{$project: {b: {$filter: filterDoc}}}], 28651);
}());
diff --git a/jstests/aggregation/bugs/server18198.js b/jstests/aggregation/bugs/server18198.js
index 6d8d827e416..39cb37074e5 100644
--- a/jstests/aggregation/bugs/server18198.js
+++ b/jstests/aggregation/bugs/server18198.js
@@ -11,32 +11,41 @@
var commandsRan = [];
// hook in our patched mongo
var mockMongo = {
- getSlaveOk: function() { return true; },
+ getSlaveOk: function() {
+ return true;
+ },
runCommand: function(db, cmd, opts) {
- commandsRan.push({db: db, cmd: cmd, opts: opts});
- return {ok: 1.0};
+ commandsRan.push({db: db, cmd: cmd, opts: opts});
+ return {
+ ok: 1.0
+ };
+ },
+ getReadPref: function() {
+ return {
+ mode: "secondaryPreferred"
+ };
},
- getReadPref: function() { return {mode: "secondaryPreferred"}; },
- getReadPrefMode: function() { return "secondaryPreferred"; }
+ getReadPrefMode: function() {
+ return "secondaryPreferred";
+ }
};
db._mongo = mockMongo;
// this query should not get a read pref
- t.aggregate([{$sort: {"x" : 1}}, {$out: "foo"}]);
+ t.aggregate([{$sort: {"x": 1}}, {$out: "foo"}]);
assert.eq(commandsRan.length, 1);
// check that it doesn't have a read preference
assert(!commandsRan[0].cmd.hasOwnProperty("$readPreference"));
commandsRan = [];
- t.aggregate([{$sort: {"x" : 1}}]);
+ t.aggregate([{$sort: {"x": 1}}]);
// check another command was run
assert.eq(commandsRan.length, 1);
// check that it has a read preference
assert(commandsRan[0].cmd.hasOwnProperty("$readPreference"));
- }
- finally {
+ } finally {
db._mongo = mongo;
}
})();
diff --git a/jstests/aggregation/bugs/server18222.js b/jstests/aggregation/bugs/server18222.js
index cd1266a30c4..1a46ff349c8 100644
--- a/jstests/aggregation/bugs/server18222.js
+++ b/jstests/aggregation/bugs/server18222.js
@@ -1,5 +1,5 @@
// SERVER-18222: Add $isArray aggregation expression.
-(function (){
+(function() {
'use strict';
var coll = db.is_array_expr;
coll.drop();
@@ -20,13 +20,8 @@
assert.writeOK(coll.insert({_id: 10, x: ['0']}));
// Project field is_array to represent whether the field x was an array.
- var results = coll.aggregate([{$sort: {_id: 1}},
- {
- $project: {
- isArray: {$isArray: '$x'}
- }
- },
- ]).toArray();
+ var results =
+ coll.aggregate([{$sort: {_id: 1}}, {$project: {isArray: {$isArray: '$x'}}}, ]).toArray();
var expectedResults = [
{_id: 0, isArray: false},
{_id: 1, isArray: false},
diff --git a/jstests/aggregation/bugs/server18427.js b/jstests/aggregation/bugs/server18427.js
index 2fc9f4b70ad..35fcef8a4ac 100644
--- a/jstests/aggregation/bugs/server18427.js
+++ b/jstests/aggregation/bugs/server18427.js
@@ -64,15 +64,15 @@ load('jstests/aggregation/extras/utils.js');
// $pow -- if either input is a double return a double.
testOp({$pow: [10, 2]}, 100);
- testOp({$pow: [1/2, -1]}, 2);
+ testOp({$pow: [1 / 2, -1]}, 2);
testOp({$pow: [-2, 2]}, 4);
testOp({$pow: [NumberInt("2"), 2]}, 4);
testOp({$pow: [-2, NumberInt("2")]}, 4);
// If exponent is negative and base not -1, 0, or 1, return a double.
- testOp({$pow: [NumberLong("2"), NumberLong("-1")]}, 1/2);
- testOp({$pow: [NumberInt("4"), NumberInt("-1")]}, 1/4);
- testOp({$pow: [NumberInt("4"), NumberLong("-1")]}, 1/4);
+ testOp({$pow: [NumberLong("2"), NumberLong("-1")]}, 1 / 2);
+ testOp({$pow: [NumberInt("4"), NumberInt("-1")]}, 1 / 4);
+ testOp({$pow: [NumberInt("4"), NumberLong("-1")]}, 1 / 4);
testOp({$pow: [NumberInt("1"), NumberLong("-2")]}, NumberLong("1"));
testOp({$pow: [NumberInt("-1"), NumberLong("-2")]}, NumberLong("1"));
@@ -92,15 +92,15 @@ load('jstests/aggregation/extras/utils.js');
testOp({$pow: [NumberInt("4"), NumberInt("2")]}, 16);
// $exp always returns doubles, since e is a double.
- testOp({$exp: [NumberInt("-1")]}, 1/Math.E);
+ testOp({$exp: [NumberInt("-1")]}, 1 / Math.E);
testOp({$exp: [NumberLong("1")]}, Math.E);
// Null input results in null.
testOp({$pow: [null, 2]}, null);
- testOp({$pow: [1/2, null]}, null);
+ testOp({$pow: [1 / 2, null]}, null);
testOp({$exp: [null]}, null);
// NaN input results in NaN.
testOp({$pow: [NaN, 2]}, NaN);
- testOp({$pow: [1/2, NaN]}, NaN);
+ testOp({$pow: [1 / 2, NaN]}, NaN);
testOp({$exp: [NaN]}, NaN);
// Invalid inputs - non-numeric/non-null types, or 0 to a negative exponent.
diff --git a/jstests/aggregation/bugs/server19095.js b/jstests/aggregation/bugs/server19095.js
index 3728c06c06b..7d023ebc271 100644
--- a/jstests/aggregation/bugs/server19095.js
+++ b/jstests/aggregation/bugs/server19095.js
@@ -8,23 +8,23 @@ load("jstests/aggregation/extras/utils.js");
// Used by testPipeline to sort result documents. All _ids must be primitives.
function compareId(a, b) {
- if (a._id < b._id) {
- return -1;
- }
- if (a._id > b._id) {
- return 1;
- }
- return 0;
+ if (a._id < b._id) {
+ return -1;
+ }
+ if (a._id > b._id) {
+ return 1;
+ }
+ return 0;
}
// Helper for testing that pipeline returns correct set of results.
function testPipeline(pipeline, expectedResult, collection) {
assert.eq(collection.aggregate(pipeline).toArray().sort(compareId),
- expectedResult.sort(compareId));
+ expectedResult.sort(compareId));
}
function runTest(coll, from) {
- var db = null; // Using the db variable is banned in this function.
+ var db = null; // Using the db variable is banned in this function.
assert.writeOK(coll.insert({_id: 0, a: 1}));
assert.writeOK(coll.insert({_id: 1, a: null}));
@@ -45,14 +45,9 @@ load("jstests/aggregation/extras/utils.js");
{_id: 1, a: null, "same": [{_id: 1, b: null}, {_id: 2}]},
{_id: 2, "same": [{_id: 1, b: null}, {_id: 2}]}
];
- testPipeline([{
- $lookup: {
- localField: "a",
- foreignField: "b",
- from: "from",
- as: "same"
- }
- }], expectedResults, coll);
+ testPipeline([{$lookup: {localField: "a", foreignField: "b", from: "from", as: "same"}}],
+ expectedResults,
+ coll);
// If localField is nonexistent, it is treated as if it is null.
expectedResults = [
@@ -61,13 +56,10 @@ load("jstests/aggregation/extras/utils.js");
{_id: 2, "same": [{_id: 1, b: null}, {_id: 2}]}
];
testPipeline([{
- $lookup: {
- localField: "nonexistent",
- foreignField: "b",
- from: "from",
- as: "same"
- }
- }], expectedResults, coll);
+ $lookup: {localField: "nonexistent", foreignField: "b", from: "from", as: "same"}
+ }],
+ expectedResults,
+ coll);
// If foreignField is nonexistent, it is treated as if it is null.
expectedResults = [
@@ -76,36 +68,24 @@ load("jstests/aggregation/extras/utils.js");
{_id: 2, "same": [{_id: 0, b: 1}, {_id: 1, b: null}, {_id: 2}]}
];
testPipeline([{
- $lookup: {
- localField: "a",
- foreignField: "nonexistent",
- from: "from",
- as: "same"
- }
- }], expectedResults, coll);
+ $lookup: {localField: "a", foreignField: "nonexistent", from: "from", as: "same"}
+ }],
+ expectedResults,
+ coll);
// If there are no matches or the from coll doesn't exist, the result is an empty array.
- expectedResults = [
- {_id: 0, a: 1, "same": []},
- {_id: 1, a: null, "same": []},
- {_id: 2, "same": []}
- ];
+ expectedResults =
+ [{_id: 0, a: 1, "same": []}, {_id: 1, a: null, "same": []}, {_id: 2, "same": []}];
testPipeline([{
- $lookup: {
- localField: "_id",
- foreignField: "nonexistent",
- from: "from",
- as: "same"
- }
- }], expectedResults, coll);
+ $lookup: {localField: "_id", foreignField: "nonexistent", from: "from", as: "same"}
+ }],
+ expectedResults,
+ coll);
testPipeline([{
- $lookup: {
- localField: "a",
- foreignField: "b",
- from: "nonexistent",
- as: "same"
- }
- }], expectedResults, coll);
+ $lookup: {localField: "a", foreignField: "b", from: "nonexistent", as: "same"}
+ }],
+ expectedResults,
+ coll);
// If field name specified by "as" already exists, it is overwritten.
expectedResults = [
@@ -113,42 +93,26 @@ load("jstests/aggregation/extras/utils.js");
{_id: 1, "a": [{_id: 1, b: null}, {_id: 2}]},
{_id: 2, "a": [{_id: 1, b: null}, {_id: 2}]}
];
- testPipeline([{
- $lookup: {
- localField: "a",
- foreignField: "b",
- from: "from",
- as: "a"
- }
- }], expectedResults, coll);
-
+ testPipeline([{$lookup: {localField: "a", foreignField: "b", from: "from", as: "a"}}],
+ expectedResults,
+ coll);
// Running multiple $lookups in the same pipeline is allowed.
expectedResults = [
- {_id: 0, a: 1, "c": [{_id:0, b:1}], "d": [{_id:0, b:1}]},
- {_id: 1, a: null, "c": [{_id:1, b:null}, {_id:2}], "d": [{_id:1, b:null}, {_id:2}]},
- {_id: 2, "c": [{_id:1, b:null}, {_id:2}], "d": [{_id:1, b:null}, {_id:2}]}
+ {_id: 0, a: 1, "c": [{_id: 0, b: 1}], "d": [{_id: 0, b: 1}]},
+ {
+ _id: 1,
+ a: null, "c": [{_id: 1, b: null}, {_id: 2}], "d": [{_id: 1, b: null}, {_id: 2}]
+ },
+ {_id: 2, "c": [{_id: 1, b: null}, {_id: 2}], "d": [{_id: 1, b: null}, {_id: 2}]}
];
- testPipeline([{
- $lookup: {
- localField: "a",
- foreignField: "b",
- from: "from",
- as: "c"
- }
- }, {
- $project: {
- "a": 1,
- "c": 1
- }
- }, {
- $lookup: {
- localField: "a",
- foreignField: "b",
- from: "from",
- as: "d"
- }
- }], expectedResults, coll);
+ testPipeline([
+ {$lookup: {localField: "a", foreignField: "b", from: "from", as: "c"}},
+ {$project: {"a": 1, "c": 1}},
+ {$lookup: {localField: "a", foreignField: "b", from: "from", as: "d"}}
+ ],
+ expectedResults,
+ coll);
//
// Coalescing with $unwind.
@@ -162,16 +126,12 @@ load("jstests/aggregation/extras/utils.js");
{_id: 2, same: {_id: 1, b: null}},
{_id: 2, same: {_id: 2}}
];
- testPipeline([{
- $lookup: {
- localField: "a",
- foreignField: "b",
- from: "from",
- as: "same"
- }
- }, {
- $unwind: {path: "$same"}
- }], expectedResults, coll);
+ testPipeline([
+ {$lookup: {localField: "a", foreignField: "b", from: "from", as: "same"}},
+ {$unwind: {path: "$same"}}
+ ],
+ expectedResults,
+ coll);
// An $unwind on the "as" field, with includeArrayIndex.
expectedResults = [
@@ -181,72 +141,39 @@ load("jstests/aggregation/extras/utils.js");
{_id: 2, same: {_id: 1, b: null}, index: NumberLong(0)},
{_id: 2, same: {_id: 2}, index: NumberLong(1)},
];
- testPipeline([{
- $lookup: {
- localField: "a",
- foreignField: "b",
- from: "from",
- as: "same"
- }
- }, {
- $unwind: {
- path: "$same",
- includeArrayIndex: "index"
- }
- }], expectedResults, coll);
+ testPipeline([
+ {$lookup: {localField: "a", foreignField: "b", from: "from", as: "same"}},
+ {$unwind: {path: "$same", includeArrayIndex: "index"}}
+ ],
+ expectedResults,
+ coll);
// Normal $unwind with no matching documents.
expectedResults = [];
- testPipeline([{
- $lookup: {
- localField: "_id",
- foreignField: "nonexistent",
- from: "from",
- as: "same"
- }
- }, {
- $unwind: {path: "$same"}
- }], expectedResults, coll);
+ testPipeline([
+ {$lookup: {localField: "_id", foreignField: "nonexistent", from: "from", as: "same"}},
+ {$unwind: {path: "$same"}}
+ ],
+ expectedResults,
+ coll);
// $unwind with preserveNullAndEmptyArray with no matching documents.
- expectedResults = [
- {_id: 0, a: 1},
- {_id: 1, a: null},
- {_id: 2},
- ];
- testPipeline([{
- $lookup: {
- localField: "_id",
- foreignField: "nonexistent",
- from: "from",
- as: "same"
- }
- }, {
- $unwind: {
- path: "$same",
- preserveNullAndEmptyArrays: true
- }
- }], expectedResults, coll);
+ expectedResults = [{_id: 0, a: 1}, {_id: 1, a: null}, {_id: 2}, ];
+ testPipeline([
+ {$lookup: {localField: "_id", foreignField: "nonexistent", from: "from", as: "same"}},
+ {$unwind: {path: "$same", preserveNullAndEmptyArrays: true}}
+ ],
+ expectedResults,
+ coll);
// $unwind with preserveNullAndEmptyArray, some with matching documents, some without.
- expectedResults = [
- {_id: 0, a: 1},
- {_id: 1, a: null, same: {_id: 0, b: 1}},
- {_id: 2},
- ];
- testPipeline([{
- $lookup: {
- localField: "_id",
- foreignField: "b",
- from: "from",
- as: "same"
- }
- }, {
- $unwind: {
- path: "$same",
- preserveNullAndEmptyArrays: true
- }
- }], expectedResults, coll);
+ expectedResults = [{_id: 0, a: 1}, {_id: 1, a: null, same: {_id: 0, b: 1}}, {_id: 2}, ];
+ testPipeline([
+ {$lookup: {localField: "_id", foreignField: "b", from: "from", as: "same"}},
+ {$unwind: {path: "$same", preserveNullAndEmptyArrays: true}}
+ ],
+ expectedResults,
+ coll);
// $unwind with preserveNullAndEmptyArray and includeArrayIndex, some with matching
// documents, some without.
@@ -255,20 +182,15 @@ load("jstests/aggregation/extras/utils.js");
{_id: 1, a: null, same: {_id: 0, b: 1}, index: NumberLong(0)},
{_id: 2, index: null},
];
- testPipeline([{
- $lookup: {
- localField: "_id",
- foreignField: "b",
- from: "from",
- as: "same"
+ testPipeline([
+ {$lookup: {localField: "_id", foreignField: "b", from: "from", as: "same"}},
+ {
+ $unwind:
+ {path: "$same", preserveNullAndEmptyArrays: true, includeArrayIndex: "index"}
}
- }, {
- $unwind: {
- path: "$same",
- preserveNullAndEmptyArrays: true,
- includeArrayIndex: "index"
- }
- }], expectedResults, coll);
+ ],
+ expectedResults,
+ coll);
//
// Dependencies.
@@ -281,18 +203,12 @@ load("jstests/aggregation/extras/utils.js");
{_id: 1, "same": [{_id: 1, b: null}, {_id: 2}]},
{_id: 2, "same": [{_id: 1, b: null}, {_id: 2}]}
];
- testPipeline([{
- $lookup: {
- localField: "a",
- foreignField: "b",
- from: "from",
- as: "same"
- }
- }, {
- $project: {
- "same": 1
- }
- }], expectedResults, coll);
+ testPipeline([
+ {$lookup: {localField: "a", foreignField: "b", from: "from", as: "same"}},
+ {$project: {"same": 1}}
+ ],
+ expectedResults,
+ coll);
//
// Dotted field paths.
@@ -312,14 +228,7 @@ load("jstests/aggregation/extras/utils.js");
assert.writeOK(from.insert({_id: 4, b: {c: 2}}));
// Once without a dotted field.
- var pipeline = [{
- $lookup: {
- localField: "a",
- foreignField: "b",
- from: "from",
- as: "same"
- }
- }];
+ var pipeline = [{$lookup: {localField: "a", foreignField: "b", from: "from", as: "same"}}];
expectedResults = [
{_id: 0, a: 1, "same": [{_id: 0, b: 1}]},
{_id: 1, a: null, "same": [{_id: 1, b: null}, {_id: 2}]},
@@ -329,14 +238,7 @@ load("jstests/aggregation/extras/utils.js");
testPipeline(pipeline, expectedResults, coll);
// Look up a dotted field.
- pipeline = [{
- $lookup: {
- localField: "a.c",
- foreignField: "b.c",
- from: "from",
- as: "same"
- }
- }];
+ pipeline = [{$lookup: {localField: "a.c", foreignField: "b.c", from: "from", as: "same"}}];
// All but the last document in 'coll' have a nullish value for 'a.c'.
expectedResults = [
{_id: 0, a: 1, same: [{_id: 0, b: 1}, {_id: 1, b: null}, {_id: 2}]},
@@ -354,25 +256,33 @@ load("jstests/aggregation/extras/utils.js");
from.drop();
assert.writeOK(from.insert({_id: 0, target: 1}));
- pipeline = [{
- $lookup: {
- localField: "a.b",
- foreignField: "target",
- from: "from",
- as: "same.documents",
- }
- }, {
- // Expected input to $unwind:
- // {_id: 0, a: {b: 1}, same: {documents: [{_id: 0, target: 1}]}}
- // {_id: 1, same: {documents: []}}
- $unwind: {
- path: "$same.documents",
- preserveNullAndEmptyArrays: true,
- includeArrayIndex: "c.d.e",
+ pipeline = [
+ {
+ $lookup: {
+ localField: "a.b",
+ foreignField: "target",
+ from: "from",
+ as: "same.documents",
+ }
+ },
+ {
+ // Expected input to $unwind:
+ // {_id: 0, a: {b: 1}, same: {documents: [{_id: 0, target: 1}]}}
+ // {_id: 1, same: {documents: []}}
+ $unwind: {
+ path: "$same.documents",
+ preserveNullAndEmptyArrays: true,
+ includeArrayIndex: "c.d.e",
+ }
}
- }];
+ ];
expectedResults = [
- {_id: 0, a: {b: 1}, same: {documents: {_id: 0, target: 1}}, c: {d: {e: NumberLong(0)}}},
+ {
+ _id: 0,
+ a: {b: 1},
+ same: {documents: {_id: 0, target: 1}},
+ c: {d: {e: NumberLong(0)}}
+ },
{_id: 1, same: {}, c: {d: {e: null}}},
];
testPipeline(pipeline, expectedResults, coll);
@@ -390,16 +300,16 @@ load("jstests/aggregation/extras/utils.js");
assert.writeOK(from.insert({_id: 1, b: "string that matches /a regex/"}));
pipeline = [
- {$lookup: {
- localField: "a",
- foreignField: "b",
- from: "from",
- as: "b",
- }},
- ];
- expectedResults = [
- {_id: 0, a: /a regex/, b: [{_id: 0, b: /a regex/}]}
+ {
+ $lookup: {
+ localField: "a",
+ foreignField: "b",
+ from: "from",
+ as: "b",
+ }
+ },
];
+ expectedResults = [{_id: 0, a: /a regex/, b: [{_id: 0, b: /a regex/}]}];
testPipeline(pipeline, expectedResults, coll);
//
@@ -407,20 +317,21 @@ load("jstests/aggregation/extras/utils.js");
//
// All four fields must be specified.
- assertErrorCode(coll, [{$lookup: {foreignField:"b", from:"from", as:"same"}}], 4572);
- assertErrorCode(coll, [{$lookup: {localField:"a", from:"from", as:"same"}}], 4572);
- assertErrorCode(coll, [{$lookup: {localField:"a", foreignField:"b", as:"same"}}], 4572);
- assertErrorCode(coll, [{$lookup: {localField:"a", foreignField:"b", from:"from"}}], 4572);
+ assertErrorCode(coll, [{$lookup: {foreignField: "b", from: "from", as: "same"}}], 4572);
+ assertErrorCode(coll, [{$lookup: {localField: "a", from: "from", as: "same"}}], 4572);
+ assertErrorCode(coll, [{$lookup: {localField: "a", foreignField: "b", as: "same"}}], 4572);
+ assertErrorCode(
+ coll, [{$lookup: {localField: "a", foreignField: "b", from: "from"}}], 4572);
// All four field's values must be strings.
- assertErrorCode(coll, [{$lookup: {localField:1, foreignField:"b", from:"from", as:"as"}}]
- , 4570);
- assertErrorCode(coll, [{$lookup: {localField:"a", foreignField:1, from:"from", as:"as"}}]
- , 4570);
- assertErrorCode(coll, [{$lookup: {localField:"a", foreignField:"b", from:1, as:"as"}}]
- , 4570);
- assertErrorCode(coll, [{$lookup: {localField:"a", foreignField: "b", from:"from", as:1}}]
- , 4570);
+ assertErrorCode(
+ coll, [{$lookup: {localField: 1, foreignField: "b", from: "from", as: "as"}}], 4570);
+ assertErrorCode(
+ coll, [{$lookup: {localField: "a", foreignField: 1, from: "from", as: "as"}}], 4570);
+ assertErrorCode(
+ coll, [{$lookup: {localField: "a", foreignField: "b", from: 1, as: "as"}}], 4570);
+ assertErrorCode(
+ coll, [{$lookup: {localField: "a", foreignField: "b", from: "from", as: 1}}], 4570);
// $lookup's field must be an object.
assertErrorCode(coll, [{$lookup: "string"}], 4569);
@@ -433,21 +344,17 @@ load("jstests/aggregation/extras/utils.js");
// Run tests in a sharded environment.
var sharded = new ShardingTest({shards: 2, mongos: 1});
- assert(sharded.adminCommand({enableSharding : "test"}));
+ assert(sharded.adminCommand({enableSharding: "test"}));
sharded.getDB('test').lookUp.drop();
sharded.getDB('test').from.drop();
assert(sharded.adminCommand({shardCollection: "test.lookUp", key: {_id: 'hashed'}}));
runTest(sharded.getDB('test').lookUp, sharded.getDB('test').from);
// An error is thrown if the from collection is sharded.
- assert(sharded.adminCommand({ shardCollection:"test.from", key: {_id: 1}}));
- assertErrorCode(sharded.getDB('test').lookUp, [{
- $lookup: {
- localField: "a",
- foreignField: "b",
- from: "from",
- as: "same"
- }
- }], 28769);
+ assert(sharded.adminCommand({shardCollection: "test.from", key: {_id: 1}}));
+ assertErrorCode(
+ sharded.getDB('test').lookUp,
+ [{$lookup: {localField: "a", foreignField: "b", from: "from", as: "same"}}],
+ 28769);
sharded.stop();
}());
diff --git a/jstests/aggregation/bugs/server20168.js b/jstests/aggregation/bugs/server20168.js
index efe15a9a243..2ff8c6e53cd 100644
--- a/jstests/aggregation/bugs/server20168.js
+++ b/jstests/aggregation/bugs/server20168.js
@@ -19,16 +19,14 @@
assert.writeOK(coll.insert(inputDoc));
// If preserveNullAndEmptyArrays is passed, we should get an output document.
- var preservedResults = coll.aggregate(
- [{$unwind: {path: unwindPath, preserveNullAndEmptyArrays: true}}]
- ).toArray();
+ var preservedResults =
+ coll.aggregate([{$unwind: {path: unwindPath, preserveNullAndEmptyArrays: true}}])
+ .toArray();
assert.eq(1, preservedResults.length, "$unwind returned the wrong number of results");
- assert.eq(
- preservedResults[0],
- outputDoc,
- "Unexpected result for an $unwind with preserveNullAndEmptyArrays " +
- "(input was " + tojson(inputDoc) + ")"
- );
+ assert.eq(preservedResults[0],
+ outputDoc,
+ "Unexpected result for an $unwind with preserveNullAndEmptyArrays " +
+ "(input was " + tojson(inputDoc) + ")");
// If not, we should get no outputs.
var defaultResults = coll.aggregate([{$unwind: {path: unwindPath}}]).toArray();
diff --git a/jstests/aggregation/bugs/server21632.js b/jstests/aggregation/bugs/server21632.js
index b040b4191e6..2148beac282 100644
--- a/jstests/aggregation/bugs/server21632.js
+++ b/jstests/aggregation/bugs/server21632.js
@@ -24,7 +24,10 @@
// If there is only one document, we should get that document.
var paddingStr = "abcdefghijklmnopqrstuvwxyz";
- var firstDoc = {_id: 0, paddingStr: paddingStr};
+ var firstDoc = {
+ _id: 0,
+ paddingStr: paddingStr
+ };
assert.writeOK(coll.insert(firstDoc));
assert.eq([firstDoc], coll.aggregate([{$sample: {size: 1}}]).toArray());
assert.eq([firstDoc], coll.aggregate([{$sample: {size: 10}}]).toArray());
diff --git a/jstests/aggregation/bugs/server22093.js b/jstests/aggregation/bugs/server22093.js
index a3bc05de53a..aca39a4e789 100644
--- a/jstests/aggregation/bugs/server22093.js
+++ b/jstests/aggregation/bugs/server22093.js
@@ -22,14 +22,17 @@ load('jstests/libs/analyze_plan.js');
assert.eq(simpleGroup.length, 1);
assert.eq(simpleGroup[0]["count"], 15);
- var explained = coll.explain().aggregate([{$match: {foo: {$gt: 0}}},
- {$group: {_id: null, count: {$sum: 1}}}]);
+ var explained =
+ coll.explain()
+ .aggregate([{$match: {foo: {$gt: 0}}}, {$group: {_id: null, count: {$sum: 1}}}]);
assert(planHasStage(explained.stages[0].$cursor.queryPlanner.winningPlan, "COUNT_SCAN"));
- explained = coll.explain().aggregate([{$match: {foo: {$gt: 0}}},
- {$project: {_id: 0, a: {$literal: null}}},
- {$group: {_id: null, count: {$sum: 1}}}]);
+ explained = coll.explain().aggregate([
+ {$match: {foo: {$gt: 0}}},
+ {$project: {_id: 0, a: {$literal: null}}},
+ {$group: {_id: null, count: {$sum: 1}}}
+ ]);
assert(planHasStage(explained.stages[0].$cursor.queryPlanner.winningPlan, "COUNT_SCAN"));
}());
diff --git a/jstests/aggregation/bugs/server3253.js b/jstests/aggregation/bugs/server3253.js
index 602e3c4d839..fe47bba565e 100644
--- a/jstests/aggregation/bugs/server3253.js
+++ b/jstests/aggregation/bugs/server3253.js
@@ -7,7 +7,7 @@ var output = db.server3253_out;
var cappedOutput = db.server3253_out_capped;
input.drop();
-inputDoesntExist.drop(); // never created
+inputDoesntExist.drop(); // never created
output.drop();
function collectionExists(coll) {
@@ -16,9 +16,12 @@ function collectionExists(coll) {
function getOutputIndexes() {
return output.getIndexes().sort(function(a, b) {
- if (a.name < b.name) { return -1; }
- else { return 1; }
- });
+ if (a.name < b.name) {
+ return -1;
+ } else {
+ return 1;
+ }
+ });
}
function test(input, pipeline, expected) {
@@ -27,10 +30,10 @@ function test(input, pipeline, expected) {
var cursor = input.aggregate(pipeline);
- assert.eq(cursor.itcount(), 0); // empty cursor returned
- assert.eq(output.find().toArray(), expected); // correct results
+ assert.eq(cursor.itcount(), 0); // empty cursor returned
+ assert.eq(output.find().toArray(), expected); // correct results
var outputIndexes = getOutputIndexes();
- assert.eq(outputIndexes.length, indexes.length); // number of indexes maintained
+ assert.eq(outputIndexes.length, indexes.length); // number of indexes maintained
for (var i = 0; i < outputIndexes.length; i++) {
assert.docEq(outputIndexes[i], indexes[i]);
}
@@ -39,52 +42,52 @@ function test(input, pipeline, expected) {
}
function listCollections(name) {
- var collectionInfosCursor = db.runCommand("listCollections", {filter: { name: name}});
+ var collectionInfosCursor = db.runCommand("listCollections", {filter: {name: name}});
return new DBCommandCursor(db.getMongo(), collectionInfosCursor).toArray();
}
-input.insert({_id:1});
-input.insert({_id:2});
-input.insert({_id:3});
+input.insert({_id: 1});
+input.insert({_id: 2});
+input.insert({_id: 3});
// insert into output so that the index exists and test() does not fail the first time around
-output.insert({_id:1});
+output.insert({_id: 1});
// ensure there are no tmp agg_out collections before we begin
assert.eq([], listCollections(/tmp\.agg_out/));
// basic test
test(input,
- [{$project: {a: {$add: ['$_id', '$_id']}}}],
- [{_id:1, a:2},{_id:2, a:4},{_id:3, a:6}]);
+ [{$project: {a: {$add: ['$_id', '$_id']}}}],
+ [{_id: 1, a: 2}, {_id: 2, a: 4}, {_id: 3, a: 6}]);
// test with indexes
assert.eq(output.getIndexes().length, 1);
-output.ensureIndex({a:1});
+output.ensureIndex({a: 1});
assert.eq(output.getIndexes().length, 2);
test(input,
- [{$project: {a: {$multiply: ['$_id', '$_id']}}}],
- [{_id:1, a:1},{_id:2, a:4},{_id:3, a:9}]);
+ [{$project: {a: {$multiply: ['$_id', '$_id']}}}],
+ [{_id: 1, a: 1}, {_id: 2, a: 4}, {_id: 3, a: 9}]);
// test with empty result set and make sure old result is gone, but indexes remain
-test(input,
- [{$match: {_id: 11}}],
- []);
+test(input, [{$match: {_id: 11}}], []);
assert.eq(output.getIndexes().length, 2);
// test with geo index
-output.ensureIndex({b:"2d"});
+output.ensureIndex({b: "2d"});
assert.eq(output.getIndexes().length, 3);
-test(input,
- [{$project: {b: "$_id"}}],
- [{_id:1, b:1}, {_id:2, b:2}, {_id:3, b:3}]);
+test(input, [{$project: {b: "$_id"}}], [{_id: 1, b: 1}, {_id: 2, b: 2}, {_id: 3, b: 3}]);
// test with full text index
-output.ensureIndex({c:"text"});
+output.ensureIndex({c: "text"});
assert.eq(output.getIndexes().length, 4);
test(input,
- [{$project: {c: {$concat: ["hello there ", "_id"]}}}],
- [{_id:1, c:"hello there _id"}, {_id:2, c:"hello there _id"}, {_id:3, c:"hello there _id"}]);
+ [{$project: {c: {$concat: ["hello there ", "_id"]}}}],
+ [
+ {_id: 1, c: "hello there _id"},
+ {_id: 2, c: "hello there _id"},
+ {_id: 3, c: "hello there _id"}
+ ]);
// test with capped collection
cappedOutput.drop();
@@ -92,9 +95,7 @@ db.createCollection(cappedOutput.getName(), {capped: true, size: 2});
assertErrorCode(input, {$out: cappedOutput.getName()}, 17152);
// ensure everything works even if input doesn't exist.
-test(inputDoesntExist,
- [],
- []);
+test(inputDoesntExist, [], []);
// ensure we cant do dangerous things to system collections
var outputInSystem = db.system.server3253_out;
diff --git a/jstests/aggregation/bugs/server3832.js b/jstests/aggregation/bugs/server3832.js
index c5bdf27e36e..b2c59ccd244 100644
--- a/jstests/aggregation/bugs/server3832.js
+++ b/jstests/aggregation/bugs/server3832.js
@@ -1,89 +1,56 @@
var s3832 = db.c;
s3832.drop();
-s3832.save({_id: 1, a:"foo", b:"bar"});
-s3832.save({_id: 2, a:"feh", b:"baz"});
-s3832.save({_id: 3, a:"fee", b:"fum"});
-
-var a1 = s3832.aggregate( { $match : { b : "baz" } } );
-
-var a1result = [
- {
- "_id" : 2,
- "a" : "feh",
- "b" : "baz"
- }
-];
+s3832.save({_id: 1, a: "foo", b: "bar"});
+s3832.save({_id: 2, a: "feh", b: "baz"});
+s3832.save({_id: 3, a: "fee", b: "fum"});
-assert.eq(a1.toArray(), a1result, 's3832.a1 failed');
+var a1 = s3832.aggregate({$match: {b: "baz"}});
+
+var a1result = [{"_id": 2, "a": "feh", "b": "baz"}];
+assert.eq(a1.toArray(), a1result, 's3832.a1 failed');
-var a2 = s3832.aggregate( { $sort : { a : 1 } } );
+var a2 = s3832.aggregate({$sort: {a: 1}});
var a2result = [
- {
- "_id" : 3,
- "a" : "fee",
- "b" : "fum"
- },
- {
- "_id" : 2,
- "a" : "feh",
- "b" : "baz"
- },
- {
- "_id" : 1,
- "a" : "foo",
- "b" : "bar"
- }
+ {"_id": 3, "a": "fee", "b": "fum"},
+ {"_id": 2, "a": "feh", "b": "baz"},
+ {"_id": 1, "a": "foo", "b": "bar"}
];
assert.eq(a2.toArray(), a2result, 's3832.a2 failed');
-
-var a3 = s3832.aggregate(
- { $match : { b : "baz" } },
- { $sort : { a : 1 } } );
+var a3 = s3832.aggregate({$match: {b: "baz"}}, {$sort: {a: 1}});
assert.eq(a3.toArray(), a1result, 's3832.a3 failed');
+db.s3832.ensureIndex({b: 1}, {name: "s3832_b"});
-db.s3832.ensureIndex({ b : 1 }, { name : "s3832_b" });
-
-
-var a4 = s3832.aggregate({ $match : { b : "baz" } });
+var a4 = s3832.aggregate({$match: {b: "baz"}});
assert.eq(a4.toArray(), a1result, 's3832.a4 failed');
-
-var a5 = s3832.aggregate({ $sort : { a : 1 } });
+var a5 = s3832.aggregate({$sort: {a: 1}});
assert.eq(a5.toArray(), a2result, 's3832.a5 failed');
-
-var a6 = s3832.aggregate(
- { $match : { b : "baz" } },
- { $sort : { a : 1 } } );
+var a6 = s3832.aggregate({$match: {b: "baz"}}, {$sort: {a: 1}});
assert.eq(a6.toArray(), a1result, 's3832.a6 failed');
-
var dropb = db.s3832.dropIndex("s3832_b");
-db.s3832.ensureIndex({ a : 1 }, { name : "s3832_a" });
+db.s3832.ensureIndex({a: 1}, {name: "s3832_a"});
-var a7 = s3832.aggregate({ $match : { b : "baz" } });
+var a7 = s3832.aggregate({$match: {b: "baz"}});
assert.eq(a7.toArray(), a1result, 's3832.a7 failed');
-
-var a8 = s3832.aggregate({ $sort : { a : 1 } });
+var a8 = s3832.aggregate({$sort: {a: 1}});
assert.eq(a8.toArray(), a2result, 's3832.a8 failed');
-
-var a9 = s3832.aggregate(
- { $match : { b : "baz" } },
- { $sort : { a : 1 } } );
+var a9 = s3832.aggregate({$match: {b: "baz"}}, {$sort: {a: 1}});
assert.eq(a9.toArray(), a1result, 's3832.a9 failed');
diff --git a/jstests/aggregation/bugs/server4588.js b/jstests/aggregation/bugs/server4588.js
index 95b4ba86273..ba49c78e9ea 100644
--- a/jstests/aggregation/bugs/server4588.js
+++ b/jstests/aggregation/bugs/server4588.js
@@ -13,18 +13,11 @@
// Without includeArrayIndex.
var actualResults = coll.aggregate([{$unwind: {path: "$x"}}]).toArray();
- var expectedResults = [
- {_id: 3, x: 1},
- {_id: 3, x: 2},
- {_id: 3, x: 3},
- {_id: 4, x: 5},
- ];
+ var expectedResults = [{_id: 3, x: 1}, {_id: 3, x: 2}, {_id: 3, x: 3}, {_id: 4, x: 5}, ];
assert.eq(expectedResults, actualResults, "Incorrect results for normal $unwind");
// With includeArrayIndex, index inserted into a new field.
- actualResults = coll.aggregate([
- {$unwind: {path: "$x", includeArrayIndex: "index"}}
- ]).toArray();
+ actualResults = coll.aggregate([{$unwind: {path: "$x", includeArrayIndex: "index"}}]).toArray();
expectedResults = [
{_id: 3, x: 1, index: NumberLong(0)},
{_id: 3, x: 2, index: NumberLong(1)},
@@ -35,9 +28,9 @@
// With both includeArrayIndex and preserveNullAndEmptyArrays.
// TODO: update this test when SERVER-20168 is resolved.
- actualResults = coll.aggregate([
- {$unwind: {path: "$x", includeArrayIndex: "index", preserveNullAndEmptyArrays: true}}
- ]).toArray();
+ actualResults = coll.aggregate([{
+ $unwind: {path: "$x", includeArrayIndex: "index", preserveNullAndEmptyArrays: true}
+ }]).toArray();
expectedResults = [
{_id: 0, index: null},
{_id: 1, x: null, index: null},
diff --git a/jstests/aggregation/bugs/server4589.js b/jstests/aggregation/bugs/server4589.js
index 11e8cad7978..c71a7d802f8 100644
--- a/jstests/aggregation/bugs/server4589.js
+++ b/jstests/aggregation/bugs/server4589.js
@@ -9,7 +9,7 @@ load('jstests/aggregation/extras/utils.js');
var coll = db.agg_array_elem_at_expr;
coll.drop();
- assert.writeOK(coll.insert({a: [1,2,3,4,5]}));
+ assert.writeOK(coll.insert({a: [1, 2, 3, 4, 5]}));
// Normal indexing.
var pipeline = [{$project: {_id: 0, x: {$arrayElemAt: ['$a', 2]}}}];
diff --git a/jstests/aggregation/bugs/server4638.js b/jstests/aggregation/bugs/server4638.js
index 7d0f4b67e34..4934da94a34 100644
--- a/jstests/aggregation/bugs/server4638.js
+++ b/jstests/aggregation/bugs/server4638.js
@@ -4,15 +4,13 @@
t = db.server4638;
t.drop();
-t.insert( { _id : 0 , x : 0 , undef: undefined } );
+t.insert({_id: 0, x: 0, undef: undefined});
// Make sure having an undefined doesn't break pipelines not using the field
-res = t.aggregate( { $project : { x : 1 } } ).toArray();
+res = t.aggregate({$project: {x: 1}}).toArray();
assert.eq(res[0].x, 0);
-
// Make sure having an undefined doesn't break pipelines that do use the field
-res = t.aggregate( { $project : { undef : 1 } } ).toArray();
+res = t.aggregate({$project: {undef: 1}}).toArray();
assert.eq(res[0].undef, undefined);
assert.eq(typeof(res[0].undef), "undefined");
-
diff --git a/jstests/aggregation/bugs/server4656.js b/jstests/aggregation/bugs/server4656.js
index cbb07f81ea5..185f74bec54 100644
--- a/jstests/aggregation/bugs/server4656.js
+++ b/jstests/aggregation/bugs/server4656.js
@@ -17,20 +17,20 @@ function generateRandom() {
}
for (var i = 0; i < NUM_OBJS; i++) {
- c.insert({inc: i, dec: NUM_OBJS-i, rnd: generateRandom()});
+ c.insert({inc: i, dec: NUM_OBJS - i, rnd: generateRandom()});
}
-var inc_sorted = c.aggregate({$sort: {inc:1}}).toArray();
-var dec_sorted = c.aggregate({$sort: {dec:1}}).toArray();
-var rnd_sorted = c.aggregate({$sort: {rnd:1}}).toArray();
+var inc_sorted = c.aggregate({$sort: {inc: 1}}).toArray();
+var dec_sorted = c.aggregate({$sort: {dec: 1}}).toArray();
+var rnd_sorted = c.aggregate({$sort: {rnd: 1}}).toArray();
function test(limit, direction) {
try {
- var res_inc = c.aggregate({$sort: {inc:direction}}, {$limit:limit}).toArray();
- var res_dec = c.aggregate({$sort: {dec:direction}}, {$limit:limit}).toArray();
- var res_rnd = c.aggregate({$sort: {rnd:direction}}, {$limit:limit}).toArray();
+ var res_inc = c.aggregate({$sort: {inc: direction}}, {$limit: limit}).toArray();
+ var res_dec = c.aggregate({$sort: {dec: direction}}, {$limit: limit}).toArray();
+ var res_rnd = c.aggregate({$sort: {rnd: direction}}, {$limit: limit}).toArray();
- var expectedLength = Math.min(limit, NUM_OBJS) ;
+ var expectedLength = Math.min(limit, NUM_OBJS);
assert.eq(res_inc.length, expectedLength);
assert.eq(res_dec.length, expectedLength);
@@ -42,28 +42,26 @@ function test(limit, direction) {
assert.eq(res_dec[i], dec_sorted[i]);
assert.eq(res_rnd[i], rnd_sorted[i]);
}
- }
- else {
+ } else {
for (var i = 0; i < expectedLength; i++) {
assert.eq(res_inc[i], inc_sorted[NUM_OBJS - 1 - i]);
assert.eq(res_dec[i], dec_sorted[NUM_OBJS - 1 - i]);
assert.eq(res_rnd[i], rnd_sorted[NUM_OBJS - 1 - i]);
}
}
- }
- catch (e) {
+ } catch (e) {
print("failed with limit=" + limit + " direction= " + direction);
throw e;
}
}
-test(1, 1);
+test(1, 1);
test(1, -1);
-test(10, 1);
+test(10, 1);
test(10, -1);
-test(50, 1);
+test(50, 1);
test(50, -1);
-test(NUM_OBJS, 1);
+test(NUM_OBJS, 1);
test(NUM_OBJS, -1);
-test(NUM_OBJS + 10, 1);
+test(NUM_OBJS + 10, 1);
test(NUM_OBJS + 10, -1);
diff --git a/jstests/aggregation/bugs/server4738.js b/jstests/aggregation/bugs/server4738.js
index 7807daa020c..7a482ab0042 100644
--- a/jstests/aggregation/bugs/server4738.js
+++ b/jstests/aggregation/bugs/server4738.js
@@ -3,12 +3,13 @@ c = db.blah;
c.drop();
c.save({key: 4, v: 3, x: 2});
-var r = c.aggregate(
- { "$project" : {
- "_id" : 0,
- "key" : NumberLong(1),
- "v" : 1, /* javascript: really a double */
- "x" : NumberInt(1)
- }});
+var r = c.aggregate({
+ "$project": {
+ "_id": 0,
+ "key": NumberLong(1),
+ "v": 1, /* javascript: really a double */
+ "x": NumberInt(1)
+ }
+});
assert.eq(r.toArray(), [{key: 4, v: 3, x: 2}], "support204 failed");
diff --git a/jstests/aggregation/bugs/server4899.js b/jstests/aggregation/bugs/server4899.js
index 5c877ee90dd..b90ed984c2a 100644
--- a/jstests/aggregation/bugs/server4899.js
+++ b/jstests/aggregation/bugs/server4899.js
@@ -3,18 +3,14 @@ load('jstests/aggregation/extras/utils.js');
c = db.server4899;
c.drop();
-c.save({arr:[]});
-c.save({arr:[1]});
-c.save({arr:["asdf", "asdfasdf"]});
-c.save({arr:[1, "asdf", 1234, 4.3, {key:23}]});
-c.save({arr:[3, [31, 31, 13, 13]]});
+c.save({arr: []});
+c.save({arr: [1]});
+c.save({arr: ["asdf", "asdfasdf"]});
+c.save({arr: [1, "asdf", 1234, 4.3, {key: 23}]});
+c.save({arr: [3, [31, 31, 13, 13]]});
result = c.aggregate({$project: {_id: 0, length: {$size: "$arr"}}});
-assert.eq(result.toArray(), [{length:0},
- {length:1},
- {length:2},
- {length:5},
- {length:2}]);
+assert.eq(result.toArray(), [{length: 0}, {length: 1}, {length: 2}, {length: 5}, {length: 2}]);
-c.save({arr:231});
+c.save({arr: 231});
assertErrorCode(c, {$project: {_id: 0, length: {$size: "$arr"}}}, 17124);
diff --git a/jstests/aggregation/bugs/server5012.js b/jstests/aggregation/bugs/server5012.js
index 6e2bbd1ecb6..64f55369dc4 100644
--- a/jstests/aggregation/bugs/server5012.js
+++ b/jstests/aggregation/bugs/server5012.js
@@ -5,26 +5,8 @@ var article = db.article;
load('jstests/aggregation/data/articles.js');
// original crash from ticket
-var r3 = article.aggregate(
- { $project: {
- author: 1,
- _id: 0
- }},
- { $project: {
- Writer: "$author"
- }}
-);
+var r3 = article.aggregate({$project: {author: 1, _id: 0}}, {$project: {Writer: "$author"}});
-var r3result = [
- {
- "Writer" : "bob"
- },
- {
- "Writer" : "dave"
- },
- {
- "Writer" : "jane"
- }
-];
+var r3result = [{"Writer": "bob"}, {"Writer": "dave"}, {"Writer": "jane"}];
assert.eq(r3.toArray(), r3result, 's5012 failed');
diff --git a/jstests/aggregation/bugs/server5044.js b/jstests/aggregation/bugs/server5044.js
index 3a784afac01..945f31c302c 100644
--- a/jstests/aggregation/bugs/server5044.js
+++ b/jstests/aggregation/bugs/server5044.js
@@ -4,27 +4,28 @@ var t = db.server5044;
function test(data, popExpected, sampExpected) {
t.drop();
- assert.writeOK(t.insert({})); // need one document to ensure we get output
+ assert.writeOK(t.insert({})); // need one document to ensure we get output
- for (var i=0; i < data.length; i++)
+ for (var i = 0; i < data.length; i++)
assert.writeOK(t.insert({num: data[i]}));
- var res = t.aggregate({$group: {_id: 1,
- pop: {$stdDevPop: '$num'},
- samp: {$stdDevSamp: '$num'},
- }}).next();
+ var res = t.aggregate({
+ $group: {
+ _id: 1,
+ pop: {$stdDevPop: '$num'},
+ samp: {$stdDevSamp: '$num'},
+ }
+ }).next();
if (popExpected === null) {
assert.isnull(res.pop);
- }
- else {
+ } else {
assert.close(res.pop, popExpected, '', 10 /*decimal places*/);
}
if (sampExpected === null) {
assert.isnull(res.samp);
- }
- else {
+ } else {
assert.close(res.samp, sampExpected, '', 10 /*decimal places*/);
}
}
@@ -38,11 +39,10 @@ test([1, 'a'], 0, null);
test([1, 'a', 1], 0, 0);
test([1, 2], .5, Math.sqrt(.5));
-test([1, 2, 3], Math.sqrt(2/3), 1);
+test([1, 2, 3], Math.sqrt(2 / 3), 1);
// test from http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Example
test([4, 7, 13, 16], Math.sqrt(22.5), Math.sqrt(30));
test([1e8 + 4, 1e8 + 7, 1e8 + 13, 1e8 + 16], Math.sqrt(22.5), Math.sqrt(30));
test([1e9 + 4, 1e9 + 7, 1e9 + 13, 1e9 + 16], Math.sqrt(22.5), Math.sqrt(30));
test([1e10 + 4, 1e10 + 7, 1e10 + 13, 1e10 + 16], Math.sqrt(22.5), Math.sqrt(30));
-
diff --git a/jstests/aggregation/bugs/server5209.js b/jstests/aggregation/bugs/server5209.js
index d0890c2b47a..8c0f017bbc7 100644
--- a/jstests/aggregation/bugs/server5209.js
+++ b/jstests/aggregation/bugs/server5209.js
@@ -9,12 +9,7 @@ db.aggtype.insert({key: NumberInt(42), value: 11});
db.aggtype.insert({key: NumberLong(42), value: 13});
db.aggtype.insert({key: 42, value: 6});
-var at = db.aggtype.aggregate(
- {$group: {
- _id: "$key",
- s: {$sum: "$value"}
- }}
-).toArray();
+var at = db.aggtype.aggregate({$group: {_id: "$key", s: {$sum: "$value"}}}).toArray();
assert(at[0].s == 30, 'server5209 failed');
assert(at[1].s == 30, 'server5209 failed');
diff --git a/jstests/aggregation/bugs/server5782.js b/jstests/aggregation/bugs/server5782.js
index d4ac2eeb6f1..47bb7dd8826 100644
--- a/jstests/aggregation/bugs/server5782.js
+++ b/jstests/aggregation/bugs/server5782.js
@@ -4,15 +4,15 @@ db.server5782.drop();
db.server5782.save({string: "foo"});
// check that without $literal we end up comparing a field with itself and the result is true
-var result = db.runCommand({aggregate: "server5782",
- pipeline:[{$project:
- {stringis$string: {$eq:["$string", '$string']}}
- }]});
+var result = db.runCommand({
+ aggregate: "server5782",
+ pipeline: [{$project: {stringis$string: {$eq: ["$string", '$string']}}}]
+});
assert.eq(result.result[0].stringis$string, true);
// check that with $literal we end up comparing a field with '$string' and the result is true
-var result = db.runCommand({aggregate: "server5782",
- pipeline:[{$project:
- {stringis$string: {$eq:["$string", {$literal:'$string'}]}}
- }]});
+var result = db.runCommand({
+ aggregate: "server5782",
+ pipeline: [{$project: {stringis$string: {$eq: ["$string", {$literal: '$string'}]}}}]
+});
assert.eq(result.result[0].stringis$string, false);
diff --git a/jstests/aggregation/bugs/server5932.js b/jstests/aggregation/bugs/server5932.js
index 9308cbe6b72..942cab3b0d4 100644
--- a/jstests/aggregation/bugs/server5932.js
+++ b/jstests/aggregation/bugs/server5932.js
@@ -44,7 +44,7 @@ var bigArray = [];
for (var i = 0; i < 1000; i++)
bigArray.push(i);
-var bigStr = Array(1001).toString(); // 1000 bytes of ','
+var bigStr = Array(1001).toString(); // 1000 bytes of ','
for (var i = 0; i < 100; i++)
t.insert({_id: i, bigArray: bigArray, bigStr: bigStr});
@@ -54,35 +54,39 @@ for (var i = 0; i < 100; i++)
//
// successfully handles results > 16MB (bigArray.length * bytes in bigStr * t.count() == 100MB)
-var cursor = aggCursor([{$unwind:'$bigArray'}]); // default settings
+var cursor = aggCursor([{$unwind: '$bigArray'}]); // default settings
assert.eq(cursor.itcount(), bigArray.length * t.count());
-var cursor = aggCursor([{$unwind:'$bigArray'}], 0); // empty first batch
+var cursor = aggCursor([{$unwind: '$bigArray'}], 0); // empty first batch
assert.eq(cursor.itcount(), bigArray.length * t.count());
-var cursor = aggCursor([{$unwind:'$bigArray'}], 5, 5); // many small batches
+var cursor = aggCursor([{$unwind: '$bigArray'}], 5, 5); // many small batches
assert.eq(cursor.itcount(), bigArray.length * t.count());
// empty result set results in cursor.id == 0 unless batchSize is 0;
-var res = t.runCommand(buildAggCmd([{$match: {noSuchField: {$exists:true}}}]));
+var res = t.runCommand(buildAggCmd([{$match: {noSuchField: {$exists: true}}}]));
assert.eq(res.cursor.firstBatch, []);
assert.eq(res.cursor.id, 0);
-var res = t.runCommand(buildAggCmd([{$match: {noSuchField: {$exists:true}}}], 0));
+var res = t.runCommand(buildAggCmd([{$match: {noSuchField: {$exists: true}}}], 0));
assert.eq(res.cursor.firstBatch, []);
assert.neq(res.cursor.id, 0);
assert.eq(makeCursor(res).itcount(), 0);
// parse errors are caught before first batch, regardless of size
-var res = t.runCommand(buildAggCmd([{$noSuchStage:1}], 0));
+var res = t.runCommand(buildAggCmd([{$noSuchStage: 1}], 0));
assert.commandFailed(res);
// data dependent errors can get ok:1 but fail in getMore if they don't fail in first batch
-var res = t.runCommand(buildAggCmd([{$project:{cantAddString: {$add:[1, '$bigStr']}}}], 1));
+var res = t.runCommand(buildAggCmd([{$project: {cantAddString: {$add: [1, '$bigStr']}}}], 1));
assert.commandFailed(res);
-var res = t.runCommand(buildAggCmd([{$project:{cantAddString: {$add:[1, '$bigStr']}}}], 0));
+var res = t.runCommand(buildAggCmd([{$project: {cantAddString: {$add: [1, '$bigStr']}}}], 0));
assert.commandWorked(res);
-assert.throws(function() { makeCursor(res).itcount(); });
+assert.throws(function() {
+ makeCursor(res).itcount();
+});
// error if collection dropped after first batch
-var cursor = aggCursor([{$unwind:'$bigArray'}], 0);
+var cursor = aggCursor([{$unwind: '$bigArray'}], 0);
t.drop();
-assert.throws(function() { cursor.itcount(); });
+assert.throws(function() {
+ cursor.itcount();
+});
// DON'T ADD NEW TEST TO THIS FILE AFTER THIS ONE (unless you reseed the data)
diff --git a/jstests/aggregation/bugs/server5973.js b/jstests/aggregation/bugs/server5973.js
index 9ad4549ef80..d1889341f21 100644
--- a/jstests/aggregation/bugs/server5973.js
+++ b/jstests/aggregation/bugs/server5973.js
@@ -5,9 +5,9 @@
db = db.getSiblingDB('aggdb');
db.test.drop();
-db.test.insert({d:ISODate('1950-01-01')});
-db.test.insert({d:ISODate('1980-01-01')});
+db.test.insert({d: ISODate('1950-01-01')});
+db.test.insert({d: ISODate('1980-01-01')});
-var out = db.test.aggregate({$sort:{d:1}}).toArray();
+var out = db.test.aggregate({$sort: {d: 1}}).toArray();
assert.lt(out[0].d, out[1].d);
diff --git a/jstests/aggregation/bugs/server6045.js b/jstests/aggregation/bugs/server6045.js
index 1f3d3c04895..852a8bdc093 100644
--- a/jstests/aggregation/bugs/server6045.js
+++ b/jstests/aggregation/bugs/server6045.js
@@ -29,15 +29,8 @@ db.agg.insert({key: "yarn", value: 42});
// As pipeline
assertErrorCode(db.agg, [{}], 16435);
// Start of pipeline
-assertErrorCode(db.agg, [{$project: {value: 1}}
- ,{}
- ], 16435);
+assertErrorCode(db.agg, [{$project: {value: 1}}, {}], 16435);
// End of pipeline
-assertErrorCode(db.agg, [{}
- ,{$project: {value: 1}}
- ], 16435);
+assertErrorCode(db.agg, [{}, {$project: {value: 1}}], 16435);
// Middle of pipeline
-assertErrorCode(db.agg, [{$project: {value: 1}}
- ,{}
- ,{$project: {value: 1}}
- ], 16435);
+assertErrorCode(db.agg, [{$project: {value: 1}}, {}, {$project: {value: 1}}], 16435);
diff --git a/jstests/aggregation/bugs/server6118.js b/jstests/aggregation/bugs/server6118.js
index 7edfda4821f..898e5927b63 100644
--- a/jstests/aggregation/bugs/server6118.js
+++ b/jstests/aggregation/bugs/server6118.js
@@ -1,41 +1,40 @@
// SERVER-6118: support for sharded sorts
(function() {
-var s = new ShardingTest({ name: "aggregation_sort1", shards: 2, mongos: 1 });
-s.stopBalancer();
+ var s = new ShardingTest({name: "aggregation_sort1", shards: 2, mongos: 1});
+ s.stopBalancer();
-s.adminCommand({ enablesharding:"test" });
-s.ensurePrimaryShard('test', 'shard0001');
-s.adminCommand({ shardcollection: "test.data", key:{ _id: 1 } });
+ s.adminCommand({enablesharding: "test"});
+ s.ensurePrimaryShard('test', 'shard0001');
+ s.adminCommand({shardcollection: "test.data", key: {_id: 1}});
-var d = s.getDB( "test" );
+ var d = s.getDB("test");
-// Insert _id values 0 - 99
-var N = 100;
+ // Insert _id values 0 - 99
+ var N = 100;
-var bulkOp = d.data.initializeOrderedBulkOp();
-for(var i = 0; i < N; ++i) {
- bulkOp.insert({ _id: i });
-}
-bulkOp.execute();
+ var bulkOp = d.data.initializeOrderedBulkOp();
+ for (var i = 0; i < N; ++i) {
+ bulkOp.insert({_id: i});
+ }
+ bulkOp.execute();
-// Split the data into 3 chunks
-s.adminCommand( { split:"test.data", middle:{ _id:33 } } );
-s.adminCommand( { split:"test.data", middle:{ _id:66 } } );
+ // Split the data into 3 chunks
+ s.adminCommand({split: "test.data", middle: {_id: 33}});
+ s.adminCommand({split: "test.data", middle: {_id: 66}});
-// Migrate the middle chunk to another shard
-s.adminCommand({ movechunk: "test.data",
- find: { _id: 50 },
- to: s.getOther(s.getPrimaryShard("test")).name });
+ // Migrate the middle chunk to another shard
+ s.adminCommand(
+ {movechunk: "test.data", find: {_id: 50}, to: s.getOther(s.getPrimaryShard("test")).name});
-// Check that the results are in order.
-var result = d.data.aggregate({ $sort: { _id: 1 } }).toArray();
-printjson(result);
+ // Check that the results are in order.
+ var result = d.data.aggregate({$sort: {_id: 1}}).toArray();
+ printjson(result);
-for(var i = 0; i < N; ++i) {
- assert.eq(i, result[i]._id);
-}
+ for (var i = 0; i < N; ++i) {
+ assert.eq(i, result[i]._id);
+ }
-s.stop();
+ s.stop();
})();
diff --git a/jstests/aggregation/bugs/server6120.js b/jstests/aggregation/bugs/server6120.js
index cd2cf7bc25c..c66b296a5a7 100644
--- a/jstests/aggregation/bugs/server6120.js
+++ b/jstests/aggregation/bugs/server6120.js
@@ -3,41 +3,41 @@
t = db.jstests_aggregation_server6120;
t.drop();
-t.save( {object: {a:1}} );
+t.save({object: {a: 1}});
-function coerceToBool( value ) {
- return t.aggregate( { $project:{ boolValue:{ $and:[ value ] } } } ).toArray()[ 0 ].boolValue;
+function coerceToBool(value) {
+ return t.aggregate({$project: {boolValue: {$and: [value]}}}).toArray()[0].boolValue;
}
-function assertBoolValue( expectedBool, value ) {
- assert.eq( expectedBool, coerceToBool( value ) );
+function assertBoolValue(expectedBool, value) {
+ assert.eq(expectedBool, coerceToBool(value));
}
// Bool type.
-assertBoolValue( false, false );
-assertBoolValue( true, true );
+assertBoolValue(false, false);
+assertBoolValue(true, true);
// Numeric types.
-assertBoolValue( false, NumberLong( 0 ) );
-assertBoolValue( true, NumberLong( 1 ) );
-assertBoolValue( false, NumberInt( 0 ) );
-assertBoolValue( true, NumberInt( 1 ) );
-assertBoolValue( false, 0.0 );
-assertBoolValue( true, 1.0 );
+assertBoolValue(false, NumberLong(0));
+assertBoolValue(true, NumberLong(1));
+assertBoolValue(false, NumberInt(0));
+assertBoolValue(true, NumberInt(1));
+assertBoolValue(false, 0.0);
+assertBoolValue(true, 1.0);
// Always false types.
-assertBoolValue( false, null );
+assertBoolValue(false, null);
// Always true types.
-assertBoolValue( true, '' );
-assertBoolValue( true, 'a' );
-assertBoolValue( true, "$object" );
-assertBoolValue( true, [] );
-assertBoolValue( true, [ 1 ] );
-assertBoolValue( true, new ObjectId() );
-assertBoolValue( true, new Date() );
-assertBoolValue( true, /a/ );
-assertBoolValue( true, new Timestamp() );
+assertBoolValue(true, '');
+assertBoolValue(true, 'a');
+assertBoolValue(true, "$object");
+assertBoolValue(true, []);
+assertBoolValue(true, [1]);
+assertBoolValue(true, new ObjectId());
+assertBoolValue(true, new Date());
+assertBoolValue(true, /a/);
+assertBoolValue(true, new Timestamp());
// Missing field.
-assertBoolValue( false, '$missingField' );
+assertBoolValue(false, '$missingField');
diff --git a/jstests/aggregation/bugs/server6121.js b/jstests/aggregation/bugs/server6121.js
index e0051fe8430..97d5a4d72c9 100644
--- a/jstests/aggregation/bugs/server6121.js
+++ b/jstests/aggregation/bugs/server6121.js
@@ -19,69 +19,58 @@ load('jstests/aggregation/extras/utils.js');
// Clear db
db.s6121.drop();
// Populate db
-db.s6121.save({date:new Timestamp(1341337661, 1)});
-db.s6121.save({date:new Date(1341337661000)});
+db.s6121.save({date: new Timestamp(1341337661, 1)});
+db.s6121.save({date: new Date(1341337661000)});
// Aggregate checking various combinations of the constant and the field
-var s6121 = db.s6121.aggregate(
- {$project: {
- _id: 0,
- dayOfMonth: {$dayOfMonth: '$date'},
- dayOfWeek: {$dayOfWeek: '$date'},
- dayOfYear: {$dayOfYear: '$date'},
- hour: {$hour: '$date'},
- minute: {$minute: '$date'},
- month: {$month: '$date'},
- second: {$second: '$date'},
- week: {$week: '$date'},
- year: {$year: '$date'}
- }}
-).toArray();
+var s6121 = db.s6121.aggregate({
+ $project: {
+ _id: 0,
+ dayOfMonth: {$dayOfMonth: '$date'},
+ dayOfWeek: {$dayOfWeek: '$date'},
+ dayOfYear: {$dayOfYear: '$date'},
+ hour: {$hour: '$date'},
+ minute: {$minute: '$date'},
+ month: {$month: '$date'},
+ second: {$second: '$date'},
+ week: {$week: '$date'},
+ year: {$year: '$date'}
+ }
+}).toArray();
// Assert the two entries are equal
assert.eq(s6121[0], s6121[1], 's6121 failed');
-
// Clear db for timestamp to date compare test
// For historical reasons the compare the same if they are the same 64-bit representation.
// That means that the Timestamp has an "inc" that is the same as the Date has millis.
db.s6121.drop();
-db.s6121.save({time:new Timestamp( 0, 1234), date:new Date(1234)});
-db.s6121.save({time:new Timestamp( 1, 1234), date:new Date(1234)});
+db.s6121.save({time: new Timestamp(0, 1234), date: new Date(1234)});
+db.s6121.save({time: new Timestamp(1, 1234), date: new Date(1234)});
printjson(db.s6121.find().toArray());
-var s6121 = db.s6121.aggregate(
- {$project: {
- _id: 0,
- // comparison is different code path based on order (same as in bson)
- ts_date: {$eq: ['$time', '$date']},
- date_ts: {$eq: ['$date', '$time']}
- }}
-);
-assert.eq(s6121.toArray(), [{ts_date: false, date_ts: false}
- ,{ts_date: false, date_ts: false}]);
-
+var s6121 = db.s6121.aggregate({
+ $project: {
+ _id: 0,
+ // comparison is different code path based on order (same as in bson)
+ ts_date: {$eq: ['$time', '$date']},
+ date_ts: {$eq: ['$date', '$time']}
+ }
+});
+assert.eq(s6121.toArray(), [{ts_date: false, date_ts: false}, {ts_date: false, date_ts: false}]);
// Clear db for timestamp comparison tests
db.s6121.drop();
-db.s6121.save({time:new Timestamp(1341337661, 1), time2:new Timestamp(1341337661, 2)});
-var s6121 = db.s6121.aggregate(
- {$project: {
- _id: 0,
- cmp: {$cmp: ['$time', '$time2']},
- eq: {$eq: ['$time', '$time2']},
- gt: {$gt: ['$time', '$time2']},
- gte: {$gte: ['$time', '$time2']},
- lt: {$lt: ['$time', '$time2']},
- lte: {$lte: ['$time', '$time2']},
- ne: {$ne: ['$time', '$time2']}
- }}
-);
-var s6121result = [{
- cmp: -1,
- eq: false,
- gt: false,
- gte: false,
- lt: true,
- lte: true,
- ne: true
-}];
+db.s6121.save({time: new Timestamp(1341337661, 1), time2: new Timestamp(1341337661, 2)});
+var s6121 = db.s6121.aggregate({
+ $project: {
+ _id: 0,
+ cmp: {$cmp: ['$time', '$time2']},
+ eq: {$eq: ['$time', '$time2']},
+ gt: {$gt: ['$time', '$time2']},
+ gte: {$gte: ['$time', '$time2']},
+ lt: {$lt: ['$time', '$time2']},
+ lte: {$lte: ['$time', '$time2']},
+ ne: {$ne: ['$time', '$time2']}
+ }
+});
+var s6121result = [{cmp: -1, eq: false, gt: false, gte: false, lt: true, lte: true, ne: true}];
// Assert the results are as expected
assert.eq(s6121.toArray(), s6121result, 's6121 failed comparing two timestamps');
diff --git a/jstests/aggregation/bugs/server6125.js b/jstests/aggregation/bugs/server6125.js
index 3e8625382c5..746c191d8fe 100644
--- a/jstests/aggregation/bugs/server6125.js
+++ b/jstests/aggregation/bugs/server6125.js
@@ -1,91 +1,97 @@
//
-//testing $sort aggregation pipeline for heterogeneity (SERVER-6125)
-//method:
+// testing $sort aggregation pipeline for heterogeneity (SERVER-6125)
+// method:
// Create an array with all the different types. (Array is created with correct sort order)
// Randomise it (to prevent $sort returning types in same order).
-// Save the array members to the db.
-// aggregate($sort)
+// Save the array members to the db.
+// aggregate($sort)
// iterate through the array ensuring the _ids are in the correct order
-
-//to make results array nested (problem 2)
-function nestArray( nstArray ) {
- for( x = 0; x < nstArray.length; x++ ) {
- nstArray[x].a = { b : nstArray[x].a };
+
+// to make results array nested (problem 2)
+function nestArray(nstArray) {
+ for (x = 0; x < nstArray.length; x++) {
+ nstArray[x].a = {
+ b: nstArray[x].a
+ };
}
}
-
-//sort and run the tests
-function runSort( chkDoc, nest, problem ){
+
+// sort and run the tests
+function runSort(chkDoc, nest, problem) {
var chkArray = setupArray();
- if( nest ){ nestArray( chkArray ); }
+ if (nest) {
+ nestArray(chkArray);
+ }
Array.shuffle(chkArray);
var t = db.s6125;
t.drop();
- t.insert( chkArray );
-
- runAsserts( t.aggregate( { $sort : chkDoc } ).toArray(), problem );
+ t.insert(chkArray);
+
+ runAsserts(t.aggregate({$sort: chkDoc}).toArray(), problem);
}
-
-//actually run the tests
-function runAsserts( chkArray, problem ) {
- //check the _id at [0] to determine which way around this has been sorted
- //then check for gt / lt. Done rather than neq to preclude a < b > c issues
- if( chkArray[ 0 ]._id == 0 ) {
- for( var x=0; x<chkArray.length-1; x++ ) {
- assert.lt( chkArray[x]._id, chkArray[x + 1]._id );
+
+// actually run the tests
+function runAsserts(chkArray, problem) {
+ // check the _id at [0] to determine which way around this has been sorted
+ // then check for gt / lt. Done rather than neq to preclude a < b > c issues
+ if (chkArray[0]._id == 0) {
+ for (var x = 0; x < chkArray.length - 1; x++) {
+ assert.lt(chkArray[x]._id, chkArray[x + 1]._id);
}
- }
- else if( chkArray[ chkArray.length - 1 ]._id == 0 ) {
- for( var x=0; x<chkArray.length-1; x++ ) {
- assert.gt( chkArray[x]._id, chkArray[x + 1]._id );
+ } else if (chkArray[chkArray.length - 1]._id == 0) {
+ for (var x = 0; x < chkArray.length - 1; x++) {
+ assert.gt(chkArray[x]._id, chkArray[x + 1]._id);
}
- }
- else {
- assert.eq( true, chkArray[0]._id == 0 || chkArray[chkArray.length-1]._id == 0 );
+ } else {
+ assert.eq(true, chkArray[0]._id == 0 || chkArray[chkArray.length - 1]._id == 0);
}
}
-
-//set up data
-function setupArray(){
- return [
- { _id : 0, a : MinKey, ty : "MinKey" },
- { _id : 1, a : null, ty : "null" },
- { _id : 2, a : 1, ty : "Number" },
- { _id : 3, a : NumberLong(2), ty : "NumberLong"},
- { _id : 4, a : "3", ty : "String" },
- //Symbol not implemented in JS
- { _id : 5, a : {}, ty : "Object" },
- { _id : 6, a : new DBRef( "test.s6125", ObjectId("0102030405060708090A0B0C") ), ty : "DBRef" },
- { _id : 7, a : [ ], ty : "Empty Array" },
- { _id : 8, a : [ 1 , 2 , "a" , "B" ], ty : "Array" },
- { _id : 9, a : BinData(0, "77+9"), ty : "BinData" },
- { _id : 10, a : new ObjectId("0102030405060708090A0B0C"), ty : "ObjectId" },
- { _id : 11, a : true, ty : "Boolean" },
- { _id : 12, a : new Date( 2 ), ty : "Date" },
- { _id : 13, a : new Timestamp( 1/1000 , 1 ), ty : "Timestamp" },
- { _id : 14, a : /regex/, ty : "RegExp" },
- { _id : 15, a : new DBPointer("test.s6125",new ObjectId("0102030405060708090A0B0C")), ty : "DBPointer" },
- { _id : 16, a : function(){}, ty : "Code" },
- //Code with Scope not implemented in JS
- { _id : 17, a : MaxKey, ty : "MaxKey"}
+
+// set up data
+function setupArray() {
+ return [
+ {_id: 0, a: MinKey, ty: "MinKey"},
+ {_id: 1, a: null, ty: "null"},
+ {_id: 2, a: 1, ty: "Number"},
+ {_id: 3, a: NumberLong(2), ty: "NumberLong"},
+ {_id: 4, a: "3", ty: "String"},
+ // Symbol not implemented in JS
+ {_id: 5, a: {}, ty: "Object"},
+ {_id: 6, a: new DBRef("test.s6125", ObjectId("0102030405060708090A0B0C")), ty: "DBRef"},
+ {_id: 7, a: [], ty: "Empty Array"},
+ {_id: 8, a: [1, 2, "a", "B"], ty: "Array"},
+ {_id: 9, a: BinData(0, "77+9"), ty: "BinData"},
+ {_id: 10, a: new ObjectId("0102030405060708090A0B0C"), ty: "ObjectId"},
+ {_id: 11, a: true, ty: "Boolean"},
+ {_id: 12, a: new Date(2), ty: "Date"},
+ {_id: 13, a: new Timestamp(1 / 1000, 1), ty: "Timestamp"},
+ {_id: 14, a: /regex/, ty: "RegExp"},
+ {
+ _id: 15,
+ a: new DBPointer("test.s6125", new ObjectId("0102030405060708090A0B0C")),
+ ty: "DBPointer"
+ },
+ {_id: 16, a: function() {}, ty: "Code"},
+ // Code with Scope not implemented in JS
+ {_id: 17, a: MaxKey, ty: "MaxKey"}
];
}
//***
-//Begin testing for SERVER-6125
+// Begin testing for SERVER-6125
//***
Random.setRandomSeed();
-//problem 1, does aggregate $sort work with all types
-runSort( { a : 1 }, false, "p1" );
-
-//problem 2, does aggregate $sort work with all types nested
-runSort( { "a" : 1 }, true, "p2a" );
-runSort( { "a.b" : 1 }, true, "p2b" );
-
-//problem 3, check reverse order sort
-runSort( { a : -1 }, false, "p3" );
-
-//problem 4, reverse order sort with nested array
-runSort( { "a" : -1 }, true, "p4a" );
-runSort( { "a.b" : -1 }, true, "p4b" );
+// problem 1, does aggregate $sort work with all types
+runSort({a: 1}, false, "p1");
+
+// problem 2, does aggregate $sort work with all types nested
+runSort({"a": 1}, true, "p2a");
+runSort({"a.b": 1}, true, "p2b");
+
+// problem 3, check reverse order sort
+runSort({a: -1}, false, "p3");
+
+// problem 4, reverse order sort with nested array
+runSort({"a": -1}, true, "p4a");
+runSort({"a.b": -1}, true, "p4b");
diff --git a/jstests/aggregation/bugs/server6127.js b/jstests/aggregation/bugs/server6127.js
index d353d53ec0a..f217e9a8d93 100644
--- a/jstests/aggregation/bugs/server6127.js
+++ b/jstests/aggregation/bugs/server6127.js
@@ -16,19 +16,12 @@
db.s6127.drop();
// Populate db
-db.s6127.save({a:1});
-db.s6127.save({foo:2});
-db.s6127.save({foo:{bar:3}});
+db.s6127.save({a: 1});
+db.s6127.save({foo: 2});
+db.s6127.save({foo: {bar: 3}});
// Aggregate checking the field foo and the path foo.bar
-var s6127 = db.s6127.aggregate(
- { $project : {
- _id : 0,
- 'foo.bar' : 1,
- field : "$foo",
- path : "$foo.bar"
- }}
-);
+var s6127 = db.s6127.aggregate({$project: {_id: 0, 'foo.bar': 1, field: "$foo", path: "$foo.bar"}});
/*
* The first document should contain nothing as neither field exists, the second document should
@@ -37,20 +30,13 @@ var s6127 = db.s6127.aggregate(
* a field bar
*/
var s6127result = [
+ {},
+ {field: 2},
{
- },
- {
- field : 2
- },
- {
- foo : {
- bar : 3
- },
- field : {
- bar : 3
- },
- path : 3
-
+ foo: {bar: 3},
+ field: {bar: 3},
+ path: 3
+
}
];
diff --git a/jstests/aggregation/bugs/server6131.js b/jstests/aggregation/bugs/server6131.js
index bb2fe28e408..602894ab721 100644
--- a/jstests/aggregation/bugs/server6131.js
+++ b/jstests/aggregation/bugs/server6131.js
@@ -3,54 +3,74 @@
t = db.jstests_aggregation_server6131;
t.drop();
-function assertAggregationResults( expected, aggregation ) {
+function assertAggregationResults(expected, aggregation) {
assert.eq(expected, t.aggregate(aggregation).toArray());
}
t.drop();
// An empty array document is dropped.
-t.save( { _id:0, a:1, b:[], c:2 } );
-assertAggregationResults( [], { $unwind:'$b' } );
+t.save({_id: 0, a: 1, b: [], c: 2});
+assertAggregationResults([],
+ {
+$unwind:
+ '$b'
+ });
// Values from a nonempty array in another document are unwound.
-t.save( { _id:1, b:[ 4, 5 ] } );
-assertAggregationResults( [ { _id:1, b:4 },
- { _id:1, b:5 } ],
- { $unwind:'$b' } );
+t.save({_id: 1, b: [4, 5]});
+assertAggregationResults([{_id: 1, b: 4}, {_id: 1, b: 5}], {$unwind: '$b'});
// Another empty array document is dropped.
-t.save( { _id:2, b:[] } );
-assertAggregationResults( [ { _id:1, b:4 },
- { _id:1, b:5 } ],
- { $unwind:'$b' } );
+t.save({_id: 2, b: []});
+assertAggregationResults([{_id: 1, b: 4}, {_id: 1, b: 5}], {$unwind: '$b'});
t.drop();
// A nested empty array document is dropped.
-t.save( { _id:0, a:1, b:{ x:10, y:[], z:20 }, c:2 } );
-assertAggregationResults( [], { $unwind:'$b.y' } );
+t.save({_id: 0, a: 1, b: {x: 10, y: [], z: 20}, c: 2});
+assertAggregationResults([],
+ {
+$unwind:
+ '$b.y'
+ });
t.drop();
// A null value document is dropped.
-t.save( { _id:0, a:1, b:null, c:2 } );
-assertAggregationResults( [], { $unwind:'$b' } );
+t.save({_id: 0, a: 1, b: null, c: 2});
+assertAggregationResults([],
+ {
+$unwind:
+ '$b'
+ });
t.drop();
// A missing value causes the document to be dropped.
-t.save( { _id:0, a:1, c:2 } );
-assertAggregationResults( [], { $unwind:'$b' } );
+t.save({_id: 0, a: 1, c: 2});
+assertAggregationResults([],
+ {
+$unwind:
+ '$b'
+ });
t.drop();
// A missing value in an existing nested object causes the document to be dropped.
-t.save( { _id:0, a:1, b:{ d:4 }, c:2 } );
-assertAggregationResults( [], { $unwind:'$b.y' } );
+t.save({_id: 0, a: 1, b: {d: 4}, c: 2});
+assertAggregationResults([],
+ {
+$unwind:
+ '$b.y'
+ });
t.drop();
// A missing value in a missing nested object causes the document to be dropped.
-t.save( { _id:0, a:1, b:10, c:2 } );
-assertAggregationResults( [], { $unwind:'$b.y' } );
+t.save({_id: 0, a: 1, b: 10, c: 2});
+assertAggregationResults([],
+ {
+$unwind:
+ '$b.y'
+ });
diff --git a/jstests/aggregation/bugs/server6143.js b/jstests/aggregation/bugs/server6143.js
index 05e0b2a9de5..375616f00d7 100644
--- a/jstests/aggregation/bugs/server6143.js
+++ b/jstests/aggregation/bugs/server6143.js
@@ -18,10 +18,7 @@ load('jstests/aggregation/extras/utils.js');
db.s6143.drop();
// Populate db
-db.s6143.save({a:null});
+db.s6143.save({a: null});
// Aggregate using a date expression on a null value, assert error
-assertErrorCode(db.s6143,
- { $project : {dateConvert : {$dayOfWeek:["$a"]}}},
- 16006);
-
+assertErrorCode(db.s6143, {$project: {dateConvert: {$dayOfWeek: ["$a"]}}}, 16006);
diff --git a/jstests/aggregation/bugs/server6147.js b/jstests/aggregation/bugs/server6147.js
index 86f58e1c061..b376afa3c75 100644
--- a/jstests/aggregation/bugs/server6147.js
+++ b/jstests/aggregation/bugs/server6147.js
@@ -16,19 +16,19 @@
db.s6147.drop();
// Populate db
-db.s6147.save({a:1});
-db.s6147.save({a:2});
+db.s6147.save({a: 1});
+db.s6147.save({a: 2});
// Aggregate checking various combinations of the constant and the field
-var s6147 = db.s6147.aggregate(
- { $project : {
- _id : 0,
- constantAndField : { $ne: [1, "$a"] },
- fieldAndConstant : { $ne: ["$a", 1] },
- constantAndConstant : { $ne: [1, 1] },
- fieldAndField : { $ne: ["$a", "$a"] }
- }}
-);
+var s6147 = db.s6147.aggregate({
+ $project: {
+ _id: 0,
+ constantAndField: {$ne: [1, "$a"]},
+ fieldAndConstant: {$ne: ["$a", 1]},
+ constantAndConstant: {$ne: [1, 1]},
+ fieldAndField: {$ne: ["$a", "$a"]}
+ }
+});
/*
* In both documents the constantAndConstant and fieldAndField should be false since they compare
@@ -38,16 +38,16 @@ var s6147 = db.s6147.aggregate(
*/
var s6147result = [
{
- constantAndField : false,
- fieldAndConstant : false,
- constantAndConstant : false,
- fieldAndField : false
+ constantAndField: false,
+ fieldAndConstant: false,
+ constantAndConstant: false,
+ fieldAndField: false
},
{
- constantAndField : true,
- fieldAndConstant : true,
- constantAndConstant : false,
- fieldAndField : false
+ constantAndField: true,
+ fieldAndConstant: true,
+ constantAndConstant: false,
+ fieldAndField: false
}
];
diff --git a/jstests/aggregation/bugs/server6165.js b/jstests/aggregation/bugs/server6165.js
index c2ad5fc0fe6..7aeba059431 100644
--- a/jstests/aggregation/bugs/server6165.js
+++ b/jstests/aggregation/bugs/server6165.js
@@ -16,63 +16,65 @@ db.s6165.drop();
db.s6165.save({});
// Aggregate checking various combinations of number types
-// The $match portion ensures they are of the correct type as the shell turns
+// The $match portion ensures they are of the correct type as the shell turns
// the ints back to doubles at the end so we can not check types with asserts
-var s6165 = db.s6165.aggregate(
- { $project: {
- _id: 0,
- dub_dub: {$mod: [138.5, 3.0]},
- dub_int: {$mod: [138.5, NumberLong(3)]},
- dub_long: {$mod: [138.5, NumberInt(3)]},
- int_dub: {$mod: [NumberInt(8), 3.25]},
- int_dubint: {$mod: [NumberInt(8), 3.0]},
- int_int: {$mod: [NumberInt(8), NumberInt(3)]},
- int_long: {$mod: [NumberInt(8), NumberLong(3)]},
- long_dub: {$mod: [NumberLong(8), 3.25]},
- long_dubint: {$mod: [NumberLong(8), 3.0]},
- long_dublong: {$mod: [NumberLong(500000000000), 450000000000.0]},
- long_int: {$mod: [NumberLong(8), NumberInt(3)]},
- long_long: {$mod: [NumberLong(8), NumberLong(3)]},
- verylong_verylong: {$mod: [NumberLong(800000000000), NumberLong(300000000000)]}
- }},
- { $match: {
- // 1 is NumberDouble
- dub_dub: {$type: 1},
- dub_int: {$type: 1},
- dub_long: {$type: 1},
- int_dub: {$type: 1},
- // 16 is NumberInt
- int_dubint: {$type: 16},
- int_int: {$type: 16},
- // 18 is NumberLong
- int_long: {$type: 18},
- long_dub: {$type: 1},
- long_dubint: {$type: 18},
- long_dublong: {$type: 1},
- long_int: {$type: 18},
- long_long: {$type: 18},
- verylong_verylong: {$type: 18}
- }}
-);
+var s6165 =
+ db.s6165.aggregate(
+ {
+ $project: {
+ _id: 0,
+ dub_dub: {$mod: [138.5, 3.0]},
+ dub_int: {$mod: [138.5, NumberLong(3)]},
+ dub_long: {$mod: [138.5, NumberInt(3)]},
+ int_dub: {$mod: [NumberInt(8), 3.25]},
+ int_dubint: {$mod: [NumberInt(8), 3.0]},
+ int_int: {$mod: [NumberInt(8), NumberInt(3)]},
+ int_long: {$mod: [NumberInt(8), NumberLong(3)]},
+ long_dub: {$mod: [NumberLong(8), 3.25]},
+ long_dubint: {$mod: [NumberLong(8), 3.0]},
+ long_dublong: {$mod: [NumberLong(500000000000), 450000000000.0]},
+ long_int: {$mod: [NumberLong(8), NumberInt(3)]},
+ long_long: {$mod: [NumberLong(8), NumberLong(3)]},
+ verylong_verylong: {$mod: [NumberLong(800000000000), NumberLong(300000000000)]}
+ }
+ },
+ {
+ $match: {
+ // 1 is NumberDouble
+ dub_dub: {$type: 1},
+ dub_int: {$type: 1},
+ dub_long: {$type: 1},
+ int_dub: {$type: 1},
+ // 16 is NumberInt
+ int_dubint: {$type: 16},
+ int_int: {$type: 16},
+ // 18 is NumberLong
+ int_long: {$type: 18},
+ long_dub: {$type: 1},
+ long_dubint: {$type: 18},
+ long_dublong: {$type: 1},
+ long_int: {$type: 18},
+ long_long: {$type: 18},
+ verylong_verylong: {$type: 18}
+ }
+ });
// Correct answers (it is mainly the types that are important here)
-var s6165result = [
- {
- dub_dub: 0.5,
- dub_int: 0.5,
- dub_long: 0.5,
- int_dub: 1.5,
- int_dubint: 2,
- int_int: 2,
- int_long: NumberLong(2),
- long_dub: 1.5,
- long_dubint: NumberLong(2),
- long_dublong: 50000000000,
- long_int: NumberLong(2),
- long_long: NumberLong(2),
- verylong_verylong: NumberLong(200000000000)
- }
-];
+var s6165result = [{
+ dub_dub: 0.5,
+ dub_int: 0.5,
+ dub_long: 0.5,
+ int_dub: 1.5,
+ int_dubint: 2,
+ int_int: 2,
+ int_long: NumberLong(2),
+ long_dub: 1.5,
+ long_dubint: NumberLong(2),
+ long_dublong: 50000000000,
+ long_int: NumberLong(2),
+ long_long: NumberLong(2),
+ verylong_verylong: NumberLong(200000000000)
+}];
// Assert
assert.eq(s6165.toArray(), s6165result, 's6165 failed');
diff --git a/jstests/aggregation/bugs/server6177.js b/jstests/aggregation/bugs/server6177.js
index 2bdb3f44214..53339c57d22 100644
--- a/jstests/aggregation/bugs/server6177.js
+++ b/jstests/aggregation/bugs/server6177.js
@@ -6,14 +6,12 @@ load('jstests/aggregation/extras/utils.js');
var c = db.c;
c.drop();
-c.save( {} );
+c.save({});
// These currently give different errors
-assertErrorCode(c, { $project:{ 'x':{ $add:[ 1 ] }, 'x.b':1 } }, 16401);
-assertErrorCode(c, { $project:{ 'x.b': 1, 'x':{ $add:[ 1 ] }} }, 16400);
+assertErrorCode(c, {$project: {'x': {$add: [1]}, 'x.b': 1}}, 16401);
+assertErrorCode(c, {$project: {'x.b': 1, 'x': {$add: [1]}}}, 16400);
// These both give the same error however
-assertErrorCode(c, { $project:{'x':{'b':1}, 'x.b': 1} }, 16400);
-assertErrorCode(c, { $project:{'x.b': 1, 'x':{'b':1}} }, 16400);
-
-
+assertErrorCode(c, {$project: {'x': {'b': 1}, 'x.b': 1}}, 16400);
+assertErrorCode(c, {$project: {'x.b': 1, 'x': {'b': 1}}}, 16400);
diff --git a/jstests/aggregation/bugs/server6179.js b/jstests/aggregation/bugs/server6179.js
index 4eba802e14e..20158af7fb7 100644
--- a/jstests/aggregation/bugs/server6179.js
+++ b/jstests/aggregation/bugs/server6179.js
@@ -1,82 +1,51 @@
// SERVER-6179: support for two $groups in sharded agg
(function() {
-var s = new ShardingTest({ name: "aggregation_multiple_group", shards: 2, mongos: 1 });
-s.stopBalancer();
+ var s = new ShardingTest({name: "aggregation_multiple_group", shards: 2, mongos: 1});
+ s.stopBalancer();
-s.adminCommand({ enablesharding:"test" });
-s.ensurePrimaryShard('test', 'shard0001');
-s.adminCommand({ shardcollection: "test.data", key:{ _id: 1 } });
+ s.adminCommand({enablesharding: "test"});
+ s.ensurePrimaryShard('test', 'shard0001');
+ s.adminCommand({shardcollection: "test.data", key: {_id: 1}});
-var d = s.getDB( "test" );
+ var d = s.getDB("test");
-// Insert _id values 0 - 99
-var N = 100;
+ // Insert _id values 0 - 99
+ var N = 100;
-var bulkOp = d.data.initializeOrderedBulkOp();
-for(var i = 0; i < N; ++i) {
- bulkOp.insert({ _id: i, i: i%10 });
-}
-bulkOp.execute();
-
-// Split the data into 3 chunks
-s.adminCommand( { split:"test.data", middle:{ _id:33 } } );
-s.adminCommand( { split:"test.data", middle:{ _id:66 } } );
-
-// Migrate the middle chunk to another shard
-s.adminCommand({ movechunk: "test.data",
- find: { _id: 50 },
- to: s.getOther(s.getPrimaryShard("test")).name });
-
-// Check that we get results rather than an error
-var result = d.data.aggregate({$group: {_id: '$_id', i: {$first: '$i'}}},
- {$group: {_id: '$i', avg_id: {$avg: '$_id'}}},
- {$sort: {_id: 1}}).toArray();
-expected = [
- {
- "_id" : 0,
- "avg_id" : 45
- },
- {
- "_id" : 1,
- "avg_id" : 46
- },
- {
- "_id" : 2,
- "avg_id" : 47
- },
- {
- "_id" : 3,
- "avg_id" : 48
- },
- {
- "_id" : 4,
- "avg_id" : 49
- },
- {
- "_id" : 5,
- "avg_id" : 50
- },
- {
- "_id" : 6,
- "avg_id" : 51
- },
- {
- "_id" : 7,
- "avg_id" : 52
- },
- {
- "_id" : 8,
- "avg_id" : 53
- },
- {
- "_id" : 9,
- "avg_id" : 54
+ var bulkOp = d.data.initializeOrderedBulkOp();
+ for (var i = 0; i < N; ++i) {
+ bulkOp.insert({_id: i, i: i % 10});
}
-];
-
-assert.eq(result, expected);
-
-s.stop();
+ bulkOp.execute();
+
+ // Split the data into 3 chunks
+ s.adminCommand({split: "test.data", middle: {_id: 33}});
+ s.adminCommand({split: "test.data", middle: {_id: 66}});
+
+ // Migrate the middle chunk to another shard
+ s.adminCommand(
+ {movechunk: "test.data", find: {_id: 50}, to: s.getOther(s.getPrimaryShard("test")).name});
+
+ // Check that we get results rather than an error
+ var result = d.data.aggregate({$group: {_id: '$_id', i: {$first: '$i'}}},
+ {$group: {_id: '$i', avg_id: {$avg: '$_id'}}},
+ {$sort: {_id: 1}}).toArray();
+ expected = [
+ {"_id": 0, "avg_id": 45},
+ {"_id": 1, "avg_id": 46},
+ {"_id": 2, "avg_id": 47},
+ {"_id": 3, "avg_id": 48},
+ {"_id": 4, "avg_id": 49},
+ {"_id": 5, "avg_id": 50},
+ {"_id": 6, "avg_id": 51},
+ {"_id": 7, "avg_id": 52},
+ {"_id": 8, "avg_id": 53},
+ {"_id": 9, "avg_id": 54}
+ ];
+
+ assert.eq(result, expected);
+
+ s.stop();
})();
diff --git a/jstests/aggregation/bugs/server6181.js b/jstests/aggregation/bugs/server6181.js
index 68fc7998435..d48a5dbfe02 100644
--- a/jstests/aggregation/bugs/server6181.js
+++ b/jstests/aggregation/bugs/server6181.js
@@ -3,10 +3,10 @@
c = db.c;
c.drop();
-c.save( { a:2 } );
+c.save({a: 2});
-res = c.aggregate( { $project:{ _id:'$a' } } );
-assert.eq(res.toArray(), [{_id:2}]);
+res = c.aggregate({$project: {_id: '$a'}});
+assert.eq(res.toArray(), [{_id: 2}]);
-res = c.aggregate( { $project:{ _id:{$add: [1, '$a']} } } );
-assert.eq(res.toArray(), [{_id:3}]);
+res = c.aggregate({$project: {_id: {$add: [1, '$a']}}});
+assert.eq(res.toArray(), [{_id: 3}]);
diff --git a/jstests/aggregation/bugs/server6184.js b/jstests/aggregation/bugs/server6184.js
index fd2e1b397aa..ae0f5ae947c 100644
--- a/jstests/aggregation/bugs/server6184.js
+++ b/jstests/aggregation/bugs/server6184.js
@@ -3,18 +3,20 @@
c = db.c;
c.drop();
-c.save( { a:'missing', b:{ c:'bar', a: 'baz', z:'not there' } } );
+c.save({a: 'missing', b: {c: 'bar', a: 'baz', z: 'not there'}});
function test(projection) {
res = c.aggregate({$project: projection});
assert.eq(res.toArray()[0], {b: {c: 'bar', a: 'baz'}});
}
-test({_id:0, b: {a:1}, 'b.c': 1});
-test({_id:0, 'b.c': 1, b: {a:1}});
+test({_id: 0, b: {a: 1}, 'b.c': 1});
+test({_id: 0, 'b.c': 1, b: {a: 1}});
// Synthetic fields should be in the order they appear in the $project
-one = {$add:[1]};
-res = c.aggregate({$project: {_id:0, 'A.Z':one, A:{Y:one, A:one}, 'A.B': one}});
-assert.eq(res.toArray()[0], {A: {Z:1, Y:1, A:1, B:1}});
+one = {
+ $add: [1]
+};
+res = c.aggregate({$project: {_id: 0, 'A.Z': one, A: {Y: one, A: one}, 'A.B': one}});
+assert.eq(res.toArray()[0], {A: {Z: 1, Y: 1, A: 1, B: 1}});
diff --git a/jstests/aggregation/bugs/server6185.js b/jstests/aggregation/bugs/server6185.js
index ab7c821d001..e1b19ad2c1f 100644
--- a/jstests/aggregation/bugs/server6185.js
+++ b/jstests/aggregation/bugs/server6185.js
@@ -2,11 +2,11 @@
c = db.c;
c.drop();
-c.save({a:[1]});
-c.save({a:{c:1}});
-c.save({a:[{c:1},{b:1,c:1},{c:1}]});
-c.save({a:1});
-c.save({b:1});
+c.save({a: [1]});
+c.save({a: {c: 1}});
+c.save({a: [{c: 1}, {b: 1, c: 1}, {c: 1}]});
+c.save({a: 1});
+c.save({b: 1});
// assert the aggregation and the query produce the same thing
-assert.eq(c.aggregate({$project:{'a.b':1}}).toArray(), c.find({}, {'a.b':1}).toArray());
+assert.eq(c.aggregate({$project: {'a.b': 1}}).toArray(), c.find({}, {'a.b': 1}).toArray());
diff --git a/jstests/aggregation/bugs/server6186.js b/jstests/aggregation/bugs/server6186.js
index 1478641dd21..fb1ea25f34c 100644
--- a/jstests/aggregation/bugs/server6186.js
+++ b/jstests/aggregation/bugs/server6186.js
@@ -3,37 +3,37 @@
t = db.jstests_aggregation_server6186;
t.drop();
-t.save( {} );
+t.save({});
-function substr( string, pos, n ) {
- return t.aggregate( { $project:{ a:{ $substr:[ string, pos, n ] } } } ).toArray()[ 0 ].a;
+function substr(string, pos, n) {
+ return t.aggregate({$project: {a: {$substr: [string, pos, n]}}}).toArray()[0].a;
}
-function expectedSubstr( string, pos, n ) {
- if ( pos < 0 ) {
+function expectedSubstr(string, pos, n) {
+ if (pos < 0) {
// A negative value is interpreted as a large unsigned int, and is expected to be out of
// bounds.
return "";
}
- if ( n < 0 ) {
+ if (n < 0) {
// A negative value is interpreted as a large unsigned int, expected to exceed the length
// of the string. Passing the string length is functionally equivalent.
n = string.length;
}
- return string.substring( pos, pos + n );
+ return string.substring(pos, pos + n);
}
-function assertSubstr( string, pos, n ) {
- assert.eq( expectedSubstr( string, pos, n ), substr( string, pos, n ) );
+function assertSubstr(string, pos, n) {
+ assert.eq(expectedSubstr(string, pos, n), substr(string, pos, n));
}
-function checkVariousSubstrings( string ) {
- for( pos = -2; pos < 5; ++pos ) {
- for( n = -2; n < 7; ++n ) {
- assertSubstr( string, pos, n );
+function checkVariousSubstrings(string) {
+ for (pos = -2; pos < 5; ++pos) {
+ for (n = -2; n < 7; ++n) {
+ assertSubstr(string, pos, n);
}
}
}
-checkVariousSubstrings( "abc" );
-checkVariousSubstrings( "" );
+checkVariousSubstrings("abc");
+checkVariousSubstrings("");
diff --git a/jstests/aggregation/bugs/server6189.js b/jstests/aggregation/bugs/server6189.js
index ff888f8725e..72f3e002a5a 100644
--- a/jstests/aggregation/bugs/server6189.js
+++ b/jstests/aggregation/bugs/server6189.js
@@ -5,34 +5,42 @@ function test(date, testSynthetics) {
print("testing " + date);
c.drop();
- c.save( {date: date} );
+ c.save({date: date});
var ISOfmt = (date.getUTCMilliseconds() == 0) ? 'ISODate("%Y-%m-%dT%H:%M:%SZ")'
: 'ISODate("%Y-%m-%dT%H:%M:%S.%LZ")';
// Can't use aggregate helper or assertErrorCode because we need to handle multiple error types
- var res = c.runCommand('aggregate', {pipeline: [
- {$project: { _id: 0
- , year:{ $year: '$date' }
- , month:{ $month: '$date' }
- , dayOfMonth:{ $dayOfMonth: '$date' }
- , hour:{ $hour: '$date' }
- , minute:{ $minute: '$date' }
- , second:{ $second: '$date' }
-
- // server-6666
- , millisecond:{ $millisecond: '$date' }
-
- // server-9289
- , millisecondPlusTen:{ $millisecond: {$add: ['$date', 10]}}
-
- // $substr will call coerceToString
- , string: {$substr: ['$date', 0,1000]}
-
- // server-11118
- , format: {$dateToString: { format: ISOfmt
- , date: '$date'}}
- }}]});
+ var res = c.runCommand('aggregate',
+ {
+ pipeline: [{
+ $project: {
+ _id: 0,
+ year: {$year: '$date'},
+ month: {$month: '$date'},
+ dayOfMonth: {$dayOfMonth: '$date'},
+ hour: {$hour: '$date'},
+ minute: {$minute: '$date'},
+ second: {$second: '$date'}
+
+ // server-6666
+ ,
+ millisecond: {$millisecond: '$date'}
+
+ // server-9289
+ ,
+ millisecondPlusTen: {$millisecond: {$add: ['$date', 10]}}
+
+ // $substr will call coerceToString
+ ,
+ string: {$substr: ['$date', 0, 1000]}
+
+ // server-11118
+ ,
+ format: {$dateToString: {format: ISOfmt, date: '$date'}}
+ }
+ }]
+ });
if (date.valueOf() < 0 && _isWindows() && res.code == 16422) {
// some versions of windows (but not all) fail with dates before 1970
@@ -40,52 +48,52 @@ function test(date, testSynthetics) {
return;
}
- if (date.valueOf()/1000 < -2*1024*1024*1024 && res.code == 16421) {
+ if (date.valueOf() / 1000 < -2 * 1024 * 1024 * 1024 && res.code == 16421) {
// we correctly detected that we are outside of the range of a 32-bit time_t
print("skipping test of " + date.tojson() + " because it is outside of time_t range");
return;
}
assert.commandWorked(res);
- assert.eq(res.result[0], { year: date.getUTCFullYear()
- , month: date.getUTCMonth() + 1 // jan == 1
- , dayOfMonth: date.getUTCDate()
- , hour: date.getUTCHours()
- , minute: date.getUTCMinutes()
- , second: date.getUTCSeconds()
- , millisecond: date.getUTCMilliseconds()
- , millisecondPlusTen: ((date.getUTCMilliseconds() + 10) % 1000)
- , string: date.tojson().slice(9,28)
- , format: date.tojson()
- } );
+ assert.eq(res.result[0],
+ {
+ year: date.getUTCFullYear(),
+ month: date.getUTCMonth() + 1 // jan == 1
+ ,
+ dayOfMonth: date.getUTCDate(),
+ hour: date.getUTCHours(),
+ minute: date.getUTCMinutes(),
+ second: date.getUTCSeconds(),
+ millisecond: date.getUTCMilliseconds(),
+ millisecondPlusTen: ((date.getUTCMilliseconds() + 10) % 1000),
+ string: date.tojson().slice(9, 28),
+ format: date.tojson()
+ });
if (testSynthetics) {
// Tests with this set all have the same value for these fields
- res = c.aggregate( { $project:{ _id: 0
- , week:{ $week: '$date' }
- , dayOfWeek:{ $dayOfWeek: '$date' }
- , dayOfYear:{ $dayOfYear: '$date' }
- , format: { $dateToString: { format: '%U-%w-%j'
- , date: '$date' } }
- } } );
-
- assert.eq(res.toArray()[0], { week: 0
- , dayOfWeek: 7
- , dayOfYear: 2
- , format: '00-7-002'
- } );
+ res = c.aggregate({
+ $project: {
+ _id: 0,
+ week: {$week: '$date'},
+ dayOfWeek: {$dayOfWeek: '$date'},
+ dayOfYear: {$dayOfYear: '$date'},
+ format: {$dateToString: {format: '%U-%w-%j', date: '$date'}}
+ }
+ });
+
+ assert.eq(res.toArray()[0], {week: 0, dayOfWeek: 7, dayOfYear: 2, format: '00-7-002'});
}
}
-
// Basic test
test(ISODate('1960-01-02 03:04:05.006Z'), true);
// Testing special rounding rules for seconds
-test(ISODate('1960-01-02 03:04:04.999Z'), false); // second = 4
-test(ISODate('1960-01-02 03:04:05.000Z'), true); // second = 5
-test(ISODate('1960-01-02 03:04:05.001Z'), true); // second = 5
-test(ISODate('1960-01-02 03:04:05.999Z'), true); // second = 5
+test(ISODate('1960-01-02 03:04:04.999Z'), false); // second = 4
+test(ISODate('1960-01-02 03:04:05.000Z'), true); // second = 5
+test(ISODate('1960-01-02 03:04:05.001Z'), true); // second = 5
+test(ISODate('1960-01-02 03:04:05.999Z'), true); // second = 5
// Test date before 1900 (negative tm_year values from gmtime)
test(ISODate('1860-01-02 03:04:05.006Z'), false);
diff --git a/jstests/aggregation/bugs/server6190.js b/jstests/aggregation/bugs/server6190.js
index caefaeb1b3e..d32a652e74b 100644
--- a/jstests/aggregation/bugs/server6190.js
+++ b/jstests/aggregation/bugs/server6190.js
@@ -5,131 +5,132 @@ load('jstests/aggregation/extras/utils.js');
t = db.jstests_aggregation_server6190;
t.drop();
-t.save( {} );
+t.save({});
-function week( date ) {
- return t.aggregate( { $project:{ a:{ $week:date } } },
- { $match:{ a:{ $type:16 /* Int type expected */ } } } ).toArray()[ 0 ].a;
+function week(date) {
+ return t.aggregate({$project: {a: {$week: date}}},
+ {$match: {a: {$type: 16 /* Int type expected */}}})
+ .toArray()[0]
+ .a;
}
-function assertWeek( expectedWeek, date ) {
- assert.eq( expectedWeek, week( date ) );
+function assertWeek(expectedWeek, date) {
+ assert.eq(expectedWeek, week(date));
}
// Sun Jan 1 1984
-assertWeek( 1, new Date(Date.UTC( 1984, 0, 1 )) );
+assertWeek(1, new Date(Date.UTC(1984, 0, 1)));
// Mon Jan 2 1984
-assertWeek( 1, new Date(Date.UTC( 1984, 0, 2 )) );
+assertWeek(1, new Date(Date.UTC(1984, 0, 2)));
// Sat Jan 7 1984
-assertWeek( 1, new Date(Date.UTC( 1984, 0, 7 )) );
+assertWeek(1, new Date(Date.UTC(1984, 0, 7)));
// Sun Jan 8 1984
-assertWeek( 2, new Date(Date.UTC( 1984, 0, 8 )) );
+assertWeek(2, new Date(Date.UTC(1984, 0, 8)));
// Sat Feb 18 1984
-assertWeek( 7, new Date(Date.UTC( 1984, 1, 18 )) );
+assertWeek(7, new Date(Date.UTC(1984, 1, 18)));
// Sun Feb 19 1984
-assertWeek( 8, new Date(Date.UTC( 1984, 1, 19 )) );
+assertWeek(8, new Date(Date.UTC(1984, 1, 19)));
// Mon Jan 1 2007
-assertWeek( 0, new Date(Date.UTC( 2007, 0, 1 )) );
+assertWeek(0, new Date(Date.UTC(2007, 0, 1)));
// Tue Jan 2 2007
-assertWeek( 0, new Date(Date.UTC( 2007, 0, 2 )) );
+assertWeek(0, new Date(Date.UTC(2007, 0, 2)));
// Sat Jan 6 2007
-assertWeek( 0, new Date(Date.UTC( 2007, 0, 6 )) );
+assertWeek(0, new Date(Date.UTC(2007, 0, 6)));
// Sun Jan 7 2007
-assertWeek( 1, new Date(Date.UTC( 2007, 0, 7 )) );
+assertWeek(1, new Date(Date.UTC(2007, 0, 7)));
// Mon Jan 8 2007
-assertWeek( 1, new Date(Date.UTC( 2007, 0, 8 )) );
+assertWeek(1, new Date(Date.UTC(2007, 0, 8)));
// Sat Jan 13 2007
-assertWeek( 1, new Date(Date.UTC( 2007, 0, 13 )) );
+assertWeek(1, new Date(Date.UTC(2007, 0, 13)));
// Sun Jan 14 2007
-assertWeek( 2, new Date(Date.UTC( 2007, 0, 14 )) );
+assertWeek(2, new Date(Date.UTC(2007, 0, 14)));
// Sat Mar 3 2007
-assertWeek( 8, new Date(Date.UTC( 2007, 2, 3 )) );
+assertWeek(8, new Date(Date.UTC(2007, 2, 3)));
// Sun Mar 4 2007
-assertWeek( 9, new Date(Date.UTC( 2007, 2, 4 )) );
+assertWeek(9, new Date(Date.UTC(2007, 2, 4)));
// Tue Jan 1 2008
-assertWeek( 0, new Date(Date.UTC( 2008, 0, 1 )) );
+assertWeek(0, new Date(Date.UTC(2008, 0, 1)));
// Sat Jan 5 2008
-assertWeek( 0, new Date(Date.UTC( 2008, 0, 5 )) );
+assertWeek(0, new Date(Date.UTC(2008, 0, 5)));
// Sun Jan 6 2008
-assertWeek( 1, new Date(Date.UTC( 2008, 0, 6 )) );
+assertWeek(1, new Date(Date.UTC(2008, 0, 6)));
// Sat Apr 26 2008
-assertWeek( 16, new Date(Date.UTC( 2008, 3, 26 )) );
+assertWeek(16, new Date(Date.UTC(2008, 3, 26)));
// Sun Apr 27 2008
-assertWeek( 17, new Date(Date.UTC( 2008, 3, 27 )) );
+assertWeek(17, new Date(Date.UTC(2008, 3, 27)));
// Wed Jan 1 2003
-assertWeek( 0, new Date(Date.UTC( 2003, 0, 1 )) );
+assertWeek(0, new Date(Date.UTC(2003, 0, 1)));
// Sat Jan 4 2003
-assertWeek( 0, new Date(Date.UTC( 2003, 0, 4 )) );
+assertWeek(0, new Date(Date.UTC(2003, 0, 4)));
// Sun Jan 5 2003
-assertWeek( 1, new Date(Date.UTC( 2003, 0, 5 )) );
+assertWeek(1, new Date(Date.UTC(2003, 0, 5)));
// Sat Dec 27 2003
-assertWeek( 51, new Date(Date.UTC( 2003, 11, 27 )) );
+assertWeek(51, new Date(Date.UTC(2003, 11, 27)));
// Sat Dec 28 2003
-assertWeek( 52, new Date(Date.UTC( 2003, 11, 28 )) );
+assertWeek(52, new Date(Date.UTC(2003, 11, 28)));
// Thu Jan 1 2009
-assertWeek( 0, new Date(Date.UTC( 2009, 0, 1 )) );
+assertWeek(0, new Date(Date.UTC(2009, 0, 1)));
// Sat Jan 3 2009
-assertWeek( 0, new Date(Date.UTC( 2009, 0, 3 )) );
+assertWeek(0, new Date(Date.UTC(2009, 0, 3)));
// Sun Jan 4 2008
-assertWeek( 1, new Date(Date.UTC( 2009, 0, 4 )) );
+assertWeek(1, new Date(Date.UTC(2009, 0, 4)));
// Sat Oct 31 2009
-assertWeek( 43, new Date(Date.UTC( 2009, 9, 31 )) );
+assertWeek(43, new Date(Date.UTC(2009, 9, 31)));
// Sun Nov 1 2008
-assertWeek( 44, new Date(Date.UTC( 2009, 10, 1 )) );
+assertWeek(44, new Date(Date.UTC(2009, 10, 1)));
// Fri Jan 1 2010
-assertWeek( 0, new Date(Date.UTC( 2010, 0, 1 )) );
+assertWeek(0, new Date(Date.UTC(2010, 0, 1)));
// Sat Jan 2 2010
-assertWeek( 0, new Date(Date.UTC( 2010, 0, 2 )) );
+assertWeek(0, new Date(Date.UTC(2010, 0, 2)));
// Sun Jan 3 2010
-assertWeek( 1, new Date(Date.UTC( 2010, 0, 3 )) );
+assertWeek(1, new Date(Date.UTC(2010, 0, 3)));
// Sat Sept 18 2010
-assertWeek( 37, new Date(Date.UTC( 2010, 8, 18 )) );
+assertWeek(37, new Date(Date.UTC(2010, 8, 18)));
// Sun Sept 19 2010
-assertWeek( 38, new Date(Date.UTC( 2010, 8, 19 )) );
+assertWeek(38, new Date(Date.UTC(2010, 8, 19)));
// Sat Jan 1 2011
-assertWeek( 0, new Date(Date.UTC( 2011, 0, 1 )) );
+assertWeek(0, new Date(Date.UTC(2011, 0, 1)));
// Sun Jan 2 2011
-assertWeek( 1, new Date(Date.UTC( 2011, 0, 2 )) );
+assertWeek(1, new Date(Date.UTC(2011, 0, 2)));
// Sat Aug 20 2011
-assertWeek( 33, new Date(Date.UTC( 2011, 7, 20 )) );
+assertWeek(33, new Date(Date.UTC(2011, 7, 20)));
// Sun Aug 21 2011
-assertWeek( 34, new Date(Date.UTC( 2011, 7, 21 )) );
-
+assertWeek(34, new Date(Date.UTC(2011, 7, 21)));
// Leap year tests.
// Sat Feb 27 2016
-assertWeek( 8, new Date(Date.UTC( 2016, 1, 27 )) );
+assertWeek(8, new Date(Date.UTC(2016, 1, 27)));
// Sun Feb 28 2016
-assertWeek( 9, new Date(Date.UTC( 2016, 1, 28 )) );
+assertWeek(9, new Date(Date.UTC(2016, 1, 28)));
// Mon Feb 29 2016
-assertWeek( 9, new Date(Date.UTC( 2016, 1, 29 )) );
+assertWeek(9, new Date(Date.UTC(2016, 1, 29)));
// Tue Mar 1 2016
-assertWeek( 9, new Date(Date.UTC( 2016, 2, 1 )) );
+assertWeek(9, new Date(Date.UTC(2016, 2, 1)));
// Sat Feb 28 2032
-assertWeek( 8, new Date(Date.UTC( 2032, 1, 28 )) );
+assertWeek(8, new Date(Date.UTC(2032, 1, 28)));
// Sun Feb 29 2032
-assertWeek( 9, new Date(Date.UTC( 2032, 1, 29 )) );
+assertWeek(9, new Date(Date.UTC(2032, 1, 29)));
// Mon Mar 1 2032
-assertWeek( 9, new Date(Date.UTC( 2032, 2, 1 )) );
+assertWeek(9, new Date(Date.UTC(2032, 2, 1)));
// Fri Feb 28 2020
-assertWeek( 8, new Date(Date.UTC( 2020, 1, 28 )) );
+assertWeek(8, new Date(Date.UTC(2020, 1, 28)));
// Sat Feb 29 2020
-assertWeek( 8, new Date(Date.UTC( 2020, 1, 29 )) );
+assertWeek(8, new Date(Date.UTC(2020, 1, 29)));
// Sun Mar 1 2020
-assertWeek( 9, new Date(Date.UTC( 2020, 2, 1 )) );
+assertWeek(9, new Date(Date.UTC(2020, 2, 1)));
// Timestamp argument.
-assertWeek( 1, new Timestamp( new Date(Date.UTC( 1984, 0, 1 )).getTime() / 1000, 0 ) );
-assertWeek( 1, new Timestamp( new Date(Date.UTC( 1984, 0, 1 )).getTime() / 1000, 1000000000 ) );
+assertWeek(1, new Timestamp(new Date(Date.UTC(1984, 0, 1)).getTime() / 1000, 0));
+assertWeek(1, new Timestamp(new Date(Date.UTC(1984, 0, 1)).getTime() / 1000, 1000000000));
// Numeric argument not allowed.
assertErrorCode(t, {$project: {a: {$week: 5}}}, 16006);
@@ -138,15 +139,16 @@ assertErrorCode(t, {$project: {a: {$week: 5}}}, 16006);
assertErrorCode(t, {$project: {a: {$week: 'foo'}}}, 16006);
// Array argument format.
-assertWeek( 8, [ new Date(Date.UTC( 2016, 1, 27 )) ] );
+assertWeek(8, [new Date(Date.UTC(2016, 1, 27))]);
// Wrong number of arguments.
assertErrorCode(t, {$project: {a: {$week: []}}}, 16020);
-assertErrorCode(t, {$project: {a: {$week: [new Date(Date.UTC(2020, 1, 28)),
- new Date(Date.UTC(2020, 1, 29))]}}},
- 16020);
+assertErrorCode(
+ t,
+ {$project: {a: {$week: [new Date(Date.UTC(2020, 1, 28)), new Date(Date.UTC(2020, 1, 29))]}}},
+ 16020);
// From a field path expression.
t.remove({});
-t.save( { a:new Date(Date.UTC( 2020, 2, 1 )) } );
-assertWeek( 9, '$a' );
+t.save({a: new Date(Date.UTC(2020, 2, 1))});
+assertWeek(9, '$a');
diff --git a/jstests/aggregation/bugs/server6192_server6193.js b/jstests/aggregation/bugs/server6192_server6193.js
index 370c55b5a1f..f453a1e7060 100644
--- a/jstests/aggregation/bugs/server6192_server6193.js
+++ b/jstests/aggregation/bugs/server6192_server6193.js
@@ -3,13 +3,10 @@
var t = db.jstests_aggregation_server6192;
t.drop();
-t.save( {x: true} );
+t.save({x: true});
function assertOptimized(pipeline, v) {
- var explained = t.runCommand("aggregate", {
- pipeline: pipeline,
- explain: true
- });
+ var explained = t.runCommand("aggregate", {pipeline: pipeline, explain: true});
printjson({input: pipeline, output: explained});
@@ -20,10 +17,7 @@ function assertOptimized(pipeline, v) {
}
function assertNotOptimized(pipeline) {
- var explained = t.runCommand("aggregate", {
- pipeline: pipeline,
- explain: true
- });
+ var explained = t.runCommand("aggregate", {pipeline: pipeline, explain: true});
printjson({input: pipeline, output: explained});
@@ -34,28 +28,27 @@ function assertNotOptimized(pipeline) {
}
// short-circuiting for $and
-assertOptimized([ {$project: {a: {$and: [0, '$x']}}} ], false);
-assertOptimized([ {$project: {a: {$and: [0, 1, '$x']}}} ], false);
-assertOptimized([ {$project: {a: {$and: [0, 1, '', '$x']}}} ], false);
+assertOptimized([{$project: {a: {$and: [0, '$x']}}}], false);
+assertOptimized([{$project: {a: {$and: [0, 1, '$x']}}}], false);
+assertOptimized([{$project: {a: {$and: [0, 1, '', '$x']}}}], false);
-assertOptimized([ {$project: {a: {$and: [1, 0, '$x']}}} ], false);
-assertOptimized([ {$project: {a: {$and: [1, '', 0, '$x']}}} ], false);
-assertOptimized([ {$project: {a: {$and: [1, 1, 0, 1]}}} ], false);
+assertOptimized([{$project: {a: {$and: [1, 0, '$x']}}}], false);
+assertOptimized([{$project: {a: {$and: [1, '', 0, '$x']}}}], false);
+assertOptimized([{$project: {a: {$and: [1, 1, 0, 1]}}}], false);
// short-circuiting for $or
-assertOptimized([ {$project: {a: {$or: [1, '$x']}}} ], true);
-assertOptimized([ {$project: {a: {$or: [1, 0, '$x']}}} ], true);
-assertOptimized([ {$project: {a: {$or: [1, '', '$x']}}} ], true);
+assertOptimized([{$project: {a: {$or: [1, '$x']}}}], true);
+assertOptimized([{$project: {a: {$or: [1, 0, '$x']}}}], true);
+assertOptimized([{$project: {a: {$or: [1, '', '$x']}}}], true);
-assertOptimized([ {$project: {a: {$or: [0, 1, '$x']}}} ], true);
-assertOptimized([ {$project: {a: {$or: ['', 0, 1, '$x']}}} ], true);
-assertOptimized([ {$project: {a: {$or: [0, 0, 0, 1]}}} ], true);
+assertOptimized([{$project: {a: {$or: [0, 1, '$x']}}}], true);
+assertOptimized([{$project: {a: {$or: ['', 0, 1, '$x']}}}], true);
+assertOptimized([{$project: {a: {$or: [0, 0, 0, 1]}}}], true);
// examples that should not short-circuit
-assertNotOptimized([ {$project: {a: {$and: [1, '$x']}}} ]);
-assertNotOptimized([ {$project: {a: {$or: [0, '$x']}}} ]);
-assertNotOptimized([ {$project: {a: {$and: ['$x', '$x']}}} ]);
-assertNotOptimized([ {$project: {a: {$or: ['$x', '$x']}}} ]);
-assertNotOptimized([ {$project: {a: {$and: ['$x']}}} ]);
-assertNotOptimized([ {$project: {a: {$or: ['$x']}}} ]);
-
+assertNotOptimized([{$project: {a: {$and: [1, '$x']}}}]);
+assertNotOptimized([{$project: {a: {$or: [0, '$x']}}}]);
+assertNotOptimized([{$project: {a: {$and: ['$x', '$x']}}}]);
+assertNotOptimized([{$project: {a: {$or: ['$x', '$x']}}}]);
+assertNotOptimized([{$project: {a: {$and: ['$x']}}}]);
+assertNotOptimized([{$project: {a: {$or: ['$x']}}}]);
diff --git a/jstests/aggregation/bugs/server6194.js b/jstests/aggregation/bugs/server6194.js
index c0c72decd82..53c23f60c1f 100644
--- a/jstests/aggregation/bugs/server6194.js
+++ b/jstests/aggregation/bugs/server6194.js
@@ -2,8 +2,10 @@
c = db.c;
c.drop();
-c.save( { x:'3' } );
+c.save({x: '3'});
-project = { $project:{ a:{ $concat:[ '1', { $concat:[ 'foo', '$x', 'bar' ] }, '2' ] } } };
+project = {
+ $project: {a: {$concat: ['1', {$concat: ['foo', '$x', 'bar']}, '2']}}
+};
-assert.eq( '1foo3bar2', c.aggregate( project ).toArray()[ 0 ].a );
+assert.eq('1foo3bar2', c.aggregate(project).toArray()[0].a);
diff --git a/jstests/aggregation/bugs/server6195.js b/jstests/aggregation/bugs/server6195.js
index 72e0054b4fb..cca80a14ad5 100644
--- a/jstests/aggregation/bugs/server6195.js
+++ b/jstests/aggregation/bugs/server6195.js
@@ -4,32 +4,35 @@ load('jstests/aggregation/extras/utils.js');
c = db.s6570;
c.drop();
-c.save({v:"$", w:".", x:"foo", y:"bar"});
+c.save({v: "$", w: ".", x: "foo", y: "bar"});
-assert.eq(c.aggregate({$project:{str:{$concat:["X", "$x", "Y", "$y"]}}}).toArray()[0].str, "XfooYbar");
-assert.eq(c.aggregate({$project:{str:{$concat:["$v", "X", "$w", "Y"]}}}).toArray()[0].str, "$X.Y");
-assert.eq(c.aggregate({$project:{str:{$concat:["$w", "X", "$v", "Y"]}}}).toArray()[0].str, ".X$Y");
+assert.eq(c.aggregate({$project: {str: {$concat: ["X", "$x", "Y", "$y"]}}}).toArray()[0].str,
+ "XfooYbar");
+assert.eq(c.aggregate({$project: {str: {$concat: ["$v", "X", "$w", "Y"]}}}).toArray()[0].str,
+ "$X.Y");
+assert.eq(c.aggregate({$project: {str: {$concat: ["$w", "X", "$v", "Y"]}}}).toArray()[0].str,
+ ".X$Y");
// Nullish (both with and without other strings)
-assert.isnull(c.aggregate({$project:{str:{$concat: ["$missing"] }}}).toArray()[0].str);
-assert.isnull(c.aggregate({$project:{str:{$concat: [null] }}}).toArray()[0].str);
-assert.isnull(c.aggregate({$project:{str:{$concat: [undefined] }}}).toArray()[0].str);
-assert.isnull(c.aggregate({$project:{str:{$concat: ["$x", "$missing", "$y"] }}}).toArray()[0].str);
-assert.isnull(c.aggregate({$project:{str:{$concat: ["$x", null, "$y"] }}}).toArray()[0].str);
-assert.isnull(c.aggregate({$project:{str:{$concat: ["$x", undefined, "$y"] }}}).toArray()[0].str);
+assert.isnull(c.aggregate({$project: {str: {$concat: ["$missing"]}}}).toArray()[0].str);
+assert.isnull(c.aggregate({$project: {str: {$concat: [null]}}}).toArray()[0].str);
+assert.isnull(c.aggregate({$project: {str: {$concat: [undefined]}}}).toArray()[0].str);
+assert.isnull(c.aggregate({$project: {str: {$concat: ["$x", "$missing", "$y"]}}}).toArray()[0].str);
+assert.isnull(c.aggregate({$project: {str: {$concat: ["$x", null, "$y"]}}}).toArray()[0].str);
+assert.isnull(c.aggregate({$project: {str: {$concat: ["$x", undefined, "$y"]}}}).toArray()[0].str);
// assert fail for all other types
-assertErrorCode(c, {$project:{str:{$concat: [MinKey]}}}, 16702);
-assertErrorCode(c, {$project:{str:{$concat: [1]}}}, 16702);
-assertErrorCode(c, {$project:{str:{$concat: [NumberInt(1)]}}}, 16702);
-assertErrorCode(c, {$project:{str:{$concat: [NumberLong(1)]}}}, 16702);
-assertErrorCode(c, {$project:{str:{$concat: [true]}}}, 16702);
-assertErrorCode(c, {$project:{str:{$concat: [function(){}]}}}, 16702);
-assertErrorCode(c, {$project:{str:{$concat: [{}]}}}, 16702);
-assertErrorCode(c, {$project:{str:{$concat: [[]]}}}, 16702);
-assertErrorCode(c, {$project:{str:{$concat: [new Timestamp(0,0)]}}}, 16702);
-assertErrorCode(c, {$project:{str:{$concat: [new Date(0)]}}}, 16702);
-assertErrorCode(c, {$project:{str:{$concat: [new BinData(0,"")]}}}, 16702);
-assertErrorCode(c, {$project:{str:{$concat: [/asdf/]}}}, 16702);
-assertErrorCode(c, {$project:{str:{$concat: [MaxKey]}}}, 16702);
-assertErrorCode(c, {$project:{str:{$concat: [new ObjectId()]}}}, 16702);
+assertErrorCode(c, {$project: {str: {$concat: [MinKey]}}}, 16702);
+assertErrorCode(c, {$project: {str: {$concat: [1]}}}, 16702);
+assertErrorCode(c, {$project: {str: {$concat: [NumberInt(1)]}}}, 16702);
+assertErrorCode(c, {$project: {str: {$concat: [NumberLong(1)]}}}, 16702);
+assertErrorCode(c, {$project: {str: {$concat: [true]}}}, 16702);
+assertErrorCode(c, {$project: {str: {$concat: [function(){}]}}}, 16702);
+assertErrorCode(c, {$project: {str: {$concat: [{}]}}}, 16702);
+assertErrorCode(c, {$project: {str: {$concat: [[]]}}}, 16702);
+assertErrorCode(c, {$project: {str: {$concat: [new Timestamp(0, 0)]}}}, 16702);
+assertErrorCode(c, {$project: {str: {$concat: [new Date(0)]}}}, 16702);
+assertErrorCode(c, {$project: {str: {$concat: [new BinData(0, "")]}}}, 16702);
+assertErrorCode(c, {$project: {str: {$concat: [/asdf/]}}}, 16702);
+assertErrorCode(c, {$project: {str: {$concat: [MaxKey]}}}, 16702);
+assertErrorCode(c, {$project: {str: {$concat: [new ObjectId()]}}}, 16702);
diff --git a/jstests/aggregation/bugs/server6198.js b/jstests/aggregation/bugs/server6198.js
index 5371c1fc221..2d38853f72e 100644
--- a/jstests/aggregation/bugs/server6198.js
+++ b/jstests/aggregation/bugs/server6198.js
@@ -3,4 +3,4 @@ load('jstests/aggregation/extras/utils.js');
db.server6198.drop();
-assertErrorCode(db.server6198, {$group:{_id:null, "bar.baz": {$addToSet: "$foo"}}}, 16414);
+assertErrorCode(db.server6198, {$group: {_id: null, "bar.baz": {$addToSet: "$foo"}}}, 16414);
diff --git a/jstests/aggregation/bugs/server6238.js b/jstests/aggregation/bugs/server6238.js
index 0d1589f09f0..cd014219f47 100644
--- a/jstests/aggregation/bugs/server6238.js
+++ b/jstests/aggregation/bugs/server6238.js
@@ -4,12 +4,12 @@ load('jstests/aggregation/extras/utils.js');
c = db.c;
c.drop();
-c.insert({a:1});
+c.insert({a: 1});
// assert that we get the proper error in both $project and $group
-assertErrorCode(c, {$project:{$a:"$a"}}, 16404);
-assertErrorCode(c, {$project:{a:{$b: "$a"}}}, 15999);
-assertErrorCode(c, {$project:{a:{"$b": "$a"}}}, 15999);
-assertErrorCode(c, {$project:{'a.$b':"$a"}}, 16410);
-assertErrorCode(c, {$group:{_id: "$_id", $a:"$a"}}, 15950);
-assertErrorCode(c, {$group:{_id: {$a:"$a"}}}, 15999);
+assertErrorCode(c, {$project: {$a: "$a"}}, 16404);
+assertErrorCode(c, {$project: {a: {$b: "$a"}}}, 15999);
+assertErrorCode(c, {$project: {a: {"$b": "$a"}}}, 15999);
+assertErrorCode(c, {$project: {'a.$b': "$a"}}, 16410);
+assertErrorCode(c, {$group: {_id: "$_id", $a: "$a"}}, 15950);
+assertErrorCode(c, {$group: {_id: {$a: "$a"}}}, 15999);
diff --git a/jstests/aggregation/bugs/server6239.js b/jstests/aggregation/bugs/server6239.js
index d0e3be12816..f9cf35c1ea2 100644
--- a/jstests/aggregation/bugs/server6239.js
+++ b/jstests/aggregation/bugs/server6239.js
@@ -10,7 +10,7 @@ var num = 54312;
db.s6239.drop();
// Populate db
-db.s6239.save({date:new Date(millis), num: num});
+db.s6239.save({date: new Date(millis), num: num});
function test(expression, expected) {
var res = db.s6239.aggregate({$project: {out: expression}});
diff --git a/jstests/aggregation/bugs/server6240.js b/jstests/aggregation/bugs/server6240.js
index 0a13780761a..e3c59e0c649 100644
--- a/jstests/aggregation/bugs/server6240.js
+++ b/jstests/aggregation/bugs/server6240.js
@@ -20,33 +20,20 @@ load('jstests/aggregation/extras/utils.js');
db.s6240.drop();
// Populate db
-db.s6240.save({date:new Date()});
+db.s6240.save({date: new Date()});
// Aggregate using a date value in various math operations
// Add
-assertErrorCode(db.s6240,
- {$project: {add: {$add: ["$date", "$date"]}}},
- 16612);
-
+assertErrorCode(db.s6240, {$project: {add: {$add: ["$date", "$date"]}}}, 16612);
// Divide
-assertErrorCode(db.s6240,
- {$project: {divide: {$divide: ["$date", 2]}}},
- 16609);
+assertErrorCode(db.s6240, {$project: {divide: {$divide: ["$date", 2]}}}, 16609);
// Mod
-assertErrorCode(db.s6240,
- {$project: {mod: {$mod: ["$date", 2]}}},
- 16611);
-
+assertErrorCode(db.s6240, {$project: {mod: {$mod: ["$date", 2]}}}, 16611);
// Multiply
-assertErrorCode(db.s6240,
- {$project: {multiply: {$multiply: ["$date", 2]}}},
- 16555);
-
+assertErrorCode(db.s6240, {$project: {multiply: {$multiply: ["$date", 2]}}}, 16555);
// Subtract
-assertErrorCode(db.s6240,
- {$project: {subtract: {$subtract: [2, "$date"]}}},
- 16556);
+assertErrorCode(db.s6240, {$project: {subtract: {$subtract: [2, "$date"]}}}, 16556);
diff --git a/jstests/aggregation/bugs/server6269.js b/jstests/aggregation/bugs/server6269.js
index 181eaa6835f..c92245f6198 100644
--- a/jstests/aggregation/bugs/server6269.js
+++ b/jstests/aggregation/bugs/server6269.js
@@ -3,12 +3,11 @@
c = db.jstests_aggregation_server6269;
c.drop();
-c.save( { _id:0, a:[ 1, 2, 3 ] } );
+c.save({_id: 0, a: [1, 2, 3]});
// The unwound a:1 document is skipped, but the remainder are returned.
-assert.eq( [ { _id:0, a:2 }, { _id:0, a:3 } ],
- c.aggregate( { $unwind:'$a' }, { $skip:1 } ).toArray() );
+assert.eq([{_id: 0, a: 2}, {_id: 0, a: 3}], c.aggregate({$unwind: '$a'}, {$skip: 1}).toArray());
// Test with two documents.
-c.save( { _id:1, a:[ 4, 5, 6 ] } );
-assert.eq( [ { _id:0, a:3 }, { _id:1, a:4 }, { _id:1, a:5 }, { _id:1, a:6 } ],
- c.aggregate( { $unwind:'$a' }, { $skip:2 } ).toArray() );
+c.save({_id: 1, a: [4, 5, 6]});
+assert.eq([{_id: 0, a: 3}, {_id: 1, a: 4}, {_id: 1, a: 5}, {_id: 1, a: 6}],
+ c.aggregate({$unwind: '$a'}, {$skip: 2}).toArray());
diff --git a/jstests/aggregation/bugs/server6275.js b/jstests/aggregation/bugs/server6275.js
index a021f7a4af8..39feeb2552e 100644
--- a/jstests/aggregation/bugs/server6275.js
+++ b/jstests/aggregation/bugs/server6275.js
@@ -1,14 +1,14 @@
// confirm that undefined no longer counts as 0 in $avg
c = db.c;
c.drop();
-c.save({a:1});
-c.save({a:4});
-c.save({b:1});
-assert.eq(c.aggregate({$group:{_id: null, avg:{$avg:"$a"}}}).toArray()[0].avg, 2.5);
+c.save({a: 1});
+c.save({a: 4});
+c.save({b: 1});
+assert.eq(c.aggregate({$group: {_id: null, avg: {$avg: "$a"}}}).toArray()[0].avg, 2.5);
// again ensuring numberLongs work properly
c.drop();
-c.save({a:NumberLong(1)});
-c.save({a:NumberLong(4)});
-c.save({b:NumberLong(1)});
-assert.eq(c.aggregate({$group:{_id: null, avg:{$avg:"$a"}}}).toArray()[0].avg, 2.5);
+c.save({a: NumberLong(1)});
+c.save({a: NumberLong(4)});
+c.save({b: NumberLong(1)});
+assert.eq(c.aggregate({$group: {_id: null, avg: {$avg: "$a"}}}).toArray()[0].avg, 2.5);
diff --git a/jstests/aggregation/bugs/server6290.js b/jstests/aggregation/bugs/server6290.js
index c27cdc454bd..a9bbd65db77 100644
--- a/jstests/aggregation/bugs/server6290.js
+++ b/jstests/aggregation/bugs/server6290.js
@@ -6,22 +6,22 @@ load('jstests/aggregation/extras/utils.js');
var t = db.jstests_aggregation_server6290;
t.drop();
-t.save( {} );
+t.save({});
// code 15999: invalid operator
var error = 15999;
// $isoDate is an invalid operator.
-assertErrorCode(t, {$project:{ a:{ $isoDate:[ { year:1 } ] } } }, error);
+assertErrorCode(t, {$project: {a: {$isoDate: [{year: 1}]}}}, error);
// $date is an invalid operator.
-assertErrorCode(t, { $project:{ a:{ $date:[ { year:1 } ] } } }, error);
+assertErrorCode(t, {$project: {a: {$date: [{year: 1}]}}}, error);
// Alternative operands.
-assertErrorCode(t, { $project:{ a:{ $isoDate:[] } } }, error);
-assertErrorCode(t, { $project:{ a:{ $date:[] } } }, error);
-assertErrorCode(t, { $project:{ a:{ $isoDate:'foo' } } }, error);
-assertErrorCode(t, { $project:{ a:{ $date:'foo' } } }, error);
+assertErrorCode(t, {$project: {a: {$isoDate: []}}}, error);
+assertErrorCode(t, {$project: {a: {$date: []}}}, error);
+assertErrorCode(t, {$project: {a: {$isoDate: 'foo'}}}, error);
+assertErrorCode(t, {$project: {a: {$date: 'foo'}}}, error);
// Test with $group.
-assertErrorCode(t, { $group:{ _id:0, a:{ $first:{ $isoDate:[ { year:1 } ] } } } }, error);
-assertErrorCode(t, { $group:{ _id:0, a:{ $first:{ $date:[ { year:1 } ] } } } }, error);
+assertErrorCode(t, {$group: {_id: 0, a: {$first: {$isoDate: [{year: 1}]}}}}, error);
+assertErrorCode(t, {$group: {_id: 0, a: {$first: {$date: [{year: 1}]}}}}, error);
diff --git a/jstests/aggregation/bugs/server6335.js b/jstests/aggregation/bugs/server6335.js
index b4569f1b6b1..a26568280ce 100644
--- a/jstests/aggregation/bugs/server6335.js
+++ b/jstests/aggregation/bugs/server6335.js
@@ -4,5 +4,4 @@
load('jstests/aggregation/extras/utils.js');
assertErrorCode(db.foo, {$match: {$where: "return true"}}, 16395);
-assertErrorCode(db.foo, {$match: {$and:[{$where: "return true"}]}}, 16395);
-
+assertErrorCode(db.foo, {$match: {$and: [{$where: "return true"}]}}, 16395);
diff --git a/jstests/aggregation/bugs/server6361.js b/jstests/aggregation/bugs/server6361.js
index 86eddd2b4e8..873c08f43da 100644
--- a/jstests/aggregation/bugs/server6361.js
+++ b/jstests/aggregation/bugs/server6361.js
@@ -6,22 +6,22 @@ load('jstests/aggregation/extras/utils.js');
c = db.c;
c.drop();
-c.insert({a:2, nested: {_id:2, other:2}});
-assertErrorCode(c, {$project: {a:0}}, 16406);
+c.insert({a: 2, nested: {_id: 2, other: 2}});
+assertErrorCode(c, {$project: {a: 0}}, 16406);
// excluding top-level _id is still allowed
-res = c.aggregate({$project: {_id:0, a:1}});
-assert.eq(res.toArray()[0], {a:2});
+res = c.aggregate({$project: {_id: 0, a: 1}});
+assert.eq(res.toArray()[0], {a: 2});
// excluding nested _id is not
-assertErrorCode(c, {$project: {'nested._id':0}}, 16406);
+assertErrorCode(c, {$project: {'nested._id': 0}}, 16406);
// nested _id is not automatically included
-res = c.aggregate({$project: {_id:0, 'nested.other':1}});
-assert.eq(res.toArray()[0], {nested: {other:2}});
+res = c.aggregate({$project: {_id: 0, 'nested.other': 1}});
+assert.eq(res.toArray()[0], {nested: {other: 2}});
// not including anything is an error
assertErrorCode(c, {$project: {}}, 16403);
// even if you exclude _id
-assertErrorCode(c, {$project: {'_id':0}}, 16403);
+assertErrorCode(c, {$project: {'_id': 0}}, 16403);
diff --git a/jstests/aggregation/bugs/server6468.js b/jstests/aggregation/bugs/server6468.js
index aea9586973b..09515c746fa 100644
--- a/jstests/aggregation/bugs/server6468.js
+++ b/jstests/aggregation/bugs/server6468.js
@@ -2,7 +2,7 @@
c = db.c;
c.drop();
-c.save( { a:'foo', b:{ c:'bar', z:'not there' } } );
+c.save({a: 'foo', b: {c: 'bar', z: 'not there'}});
function test(projection) {
res = c.aggregate({$project: projection});
@@ -10,7 +10,7 @@ function test(projection) {
}
// These should all mean the same thing
-test({_id:0, 'b.c':1});
-test({_id:0, 'b.c':'$b.c'});
-test({_id:0, b: {c:1}});
-test({_id:0, b: {c:'$b.c'}});
+test({_id: 0, 'b.c': 1});
+test({_id: 0, 'b.c': '$b.c'});
+test({_id: 0, b: {c: 1}});
+test({_id: 0, b: {c: '$b.c'}});
diff --git a/jstests/aggregation/bugs/server6529.js b/jstests/aggregation/bugs/server6529.js
index a016f9123d1..1bc4119c547 100644
--- a/jstests/aggregation/bugs/server6529.js
+++ b/jstests/aggregation/bugs/server6529.js
@@ -4,18 +4,25 @@ load('jstests/aggregation/extras/utils.js');
c = db.s6529;
c.drop();
-c.save({a:{b:{c:{d:{e:{f:{g:19}}}}}}});
+c.save({a: {b: {c: {d: {e: {f: {g: 19}}}}}}});
// bad project
-assertErrorCode(c, {$project:{foo:{$add:[{b:1}]}}}, 16420);
+assertErrorCode(c, {$project: {foo: {$add: [{b: 1}]}}}, 16420);
// $group shouldnt allow numeric inclusions
-assertErrorCode(c, {$group:{_id: {a:1}}}, 17390);
+assertErrorCode(c, {$group: {_id: {a: 1}}}, 17390);
// but any amount of nesting in a project should work
-assert.eq(c.aggregate({$project:{_id:0, a:{b:{c:{d:{e:{f:{g:1}}}}}}}}).toArray(), [{a:{b:{c:{d:{e:{f:{g:19}}}}}}}]);
-assert.eq(c.aggregate({$project:{_id:0, a:{b:{c:{d:{e:{f:1}}}}}}}).toArray(), [{a:{b:{c:{d:{e:{f:{g:19}}}}}}}]);
-assert.eq(c.aggregate({$project:{_id:0, a:{b:{c:{d:{e:1}}}}}}).toArray(), [{a:{b:{c:{d:{e:{f:{g:19}}}}}}}]);
-assert.eq(c.aggregate({$project:{_id:0, a:{b:{c:{d:1}}}}}).toArray(), [{a:{b:{c:{d:{e:{f:{g:19}}}}}}}]);
-assert.eq(c.aggregate({$project:{_id:0, a:{b:{c:1}}}}).toArray(), [{a:{b:{c:{d:{e:{f:{g:19}}}}}}}]);
-assert.eq(c.aggregate({$project:{_id:0, a:{b:1}}}).toArray(), [{a:{b:{c:{d:{e:{f:{g:19}}}}}}}]);
-assert.eq(c.aggregate({$project:{_id:0, a:1}}).toArray(), [{a:{b:{c:{d:{e:{f:{g:19}}}}}}}]);
+assert.eq(c.aggregate({$project: {_id: 0, a: {b: {c: {d: {e: {f: {g: 1}}}}}}}}).toArray(),
+ [{a: {b: {c: {d: {e: {f: {g: 19}}}}}}}]);
+assert.eq(c.aggregate({$project: {_id: 0, a: {b: {c: {d: {e: {f: 1}}}}}}}).toArray(),
+ [{a: {b: {c: {d: {e: {f: {g: 19}}}}}}}]);
+assert.eq(c.aggregate({$project: {_id: 0, a: {b: {c: {d: {e: 1}}}}}}).toArray(),
+ [{a: {b: {c: {d: {e: {f: {g: 19}}}}}}}]);
+assert.eq(c.aggregate({$project: {_id: 0, a: {b: {c: {d: 1}}}}}).toArray(),
+ [{a: {b: {c: {d: {e: {f: {g: 19}}}}}}}]);
+assert.eq(c.aggregate({$project: {_id: 0, a: {b: {c: 1}}}}).toArray(),
+ [{a: {b: {c: {d: {e: {f: {g: 19}}}}}}}]);
+assert.eq(c.aggregate({$project: {_id: 0, a: {b: 1}}}).toArray(),
+ [{a: {b: {c: {d: {e: {f: {g: 19}}}}}}}]);
+assert.eq(c.aggregate({$project: {_id: 0, a: 1}}).toArray(),
+ [{a: {b: {c: {d: {e: {f: {g: 19}}}}}}}]);
diff --git a/jstests/aggregation/bugs/server6530.js b/jstests/aggregation/bugs/server6530.js
index eb69516255f..de2ec9c0912 100644
--- a/jstests/aggregation/bugs/server6530.js
+++ b/jstests/aggregation/bugs/server6530.js
@@ -1,5 +1,5 @@
// server-6530: disallow $near queries in $match operations
load('jstests/aggregation/extras/utils.js');
-assertErrorCode(db.foo, {$match: {$near: [0,0]}}, 16424);
-assertErrorCode(db.foo, {$match: {$nearSphere: [2,2]}}, 16426);
+assertErrorCode(db.foo, {$match: {$near: [0, 0]}}, 16424);
+assertErrorCode(db.foo, {$match: {$nearSphere: [2, 2]}}, 16426);
diff --git a/jstests/aggregation/bugs/server6531.js b/jstests/aggregation/bugs/server6531.js
index 6cd74c104be..7d117ce6905 100644
--- a/jstests/aggregation/bugs/server6531.js
+++ b/jstests/aggregation/bugs/server6531.js
@@ -3,16 +3,20 @@
c = db.s6531;
c.drop();
-for (var x=0; x < 10; x++) {
- for (var y=0; y < 10; y++) {
- c.insert({loc: [x,y]});
+for (var x = 0; x < 10; x++) {
+ for (var y = 0; y < 10; y++) {
+ c.insert({loc: [x, y]});
}
}
function test(variant) {
- query = {loc: {$within: {$center: [[5,5], 3]}}};
- sort = {_id: 1};
- aggOut = c.aggregate({$match:query}, {$sort: sort});
+ query = {
+ loc: {$within: {$center: [[5, 5], 3]}}
+ };
+ sort = {
+ _id: 1
+ };
+ aggOut = c.aggregate({$match: query}, {$sort: sort});
cursor = c.find(query).sort(sort);
assert.eq(aggOut.toArray(), cursor.toArray());
@@ -20,9 +24,9 @@ function test(variant) {
test("no index");
-c.ensureIndex({loc:"2d"});
+c.ensureIndex({loc: "2d"});
test("2d index");
-c.dropIndex({loc:"2d"});
-c.ensureIndex({loc:"2dsphere"});
+c.dropIndex({loc: "2d"});
+c.ensureIndex({loc: "2dsphere"});
test("2dsphere index");
diff --git a/jstests/aggregation/bugs/server6556.js b/jstests/aggregation/bugs/server6556.js
index 65a78e714b9..721b9fd5a98 100644
--- a/jstests/aggregation/bugs/server6556.js
+++ b/jstests/aggregation/bugs/server6556.js
@@ -3,17 +3,22 @@
c = db.s6556;
c.drop();
-c.save({foo:"as\0df"});
+c.save({foo: "as\0df"});
// compare the whole string, they should match
-assert.eq(c.aggregate({$project: {_id: 0, matches: {$eq:["as\0df", "$foo"]}}}).toArray(), [{matches:true}]);
+assert.eq(c.aggregate({$project: {_id: 0, matches: {$eq: ["as\0df", "$foo"]}}}).toArray(),
+ [{matches: true}]);
// compare with the substring containing only the up to the null, they should not match
-assert.eq(c.aggregate({$project: {_id: 0, matches: {$eq:["as\0df", {$substr:["$foo",0,3]}]}}}).toArray(), [{matches:false}]);
+assert.eq(c.aggregate({$project: {_id: 0, matches: {$eq: ["as\0df", {$substr: ["$foo", 0, 3]}]}}})
+ .toArray(),
+ [{matches: false}]);
// partial the other way shouldnt work either
-assert.eq(c.aggregate({$project: {_id: 0, matches: {$eq:["as", "$foo"]}}}).toArray(), [{matches:false}]);
+assert.eq(c.aggregate({$project: {_id: 0, matches: {$eq: ["as", "$foo"]}}}).toArray(),
+ [{matches: false}]);
// neither should one that differs after the null
-assert.eq(c.aggregate({$project: {_id: 0, matches: {$eq:["as\0de", "$foo"]}}}).toArray(), [{matches:false}]);
+assert.eq(c.aggregate({$project: {_id: 0, matches: {$eq: ["as\0de", "$foo"]}}}).toArray(),
+ [{matches: false}]);
// should assert on fieldpaths with a null
-assert.throws( function() {
- c.aggregate({$project: {_id: 0, matches: {$eq:["as\0df", "$f\0oo"]}}});
+assert.throws(function() {
+ c.aggregate({$project: {_id: 0, matches: {$eq: ["as\0df", "$f\0oo"]}}});
});
diff --git a/jstests/aggregation/bugs/server6570.js b/jstests/aggregation/bugs/server6570.js
index aef1e75346d..bb58cf0ce84 100644
--- a/jstests/aggregation/bugs/server6570.js
+++ b/jstests/aggregation/bugs/server6570.js
@@ -3,12 +3,12 @@ load('jstests/aggregation/extras/utils.js');
c = db.s6570;
c.drop();
-c.save({x:17, y:"foo"});
+c.save({x: 17, y: "foo"});
-assertErrorCode(c, {$project:{string_fields : { $add:[3, "$y", 4, "$y"] }}}, 16554);
-assertErrorCode(c, {$project:{number_fields : { $add:["a", "$x", "b", "$x"] }}}, 16554);
-assertErrorCode(c, {$project:{all_strings : { $add:["c", "$y", "d", "$y"] }}}, 16554);
-assertErrorCode(c, {$project:{potpourri_1 : { $add:[5, "$y", "e", "$x"] }}}, 16554);
-assertErrorCode(c, {$project:{potpourri_2 : { $add:[6, "$x", "f", "$y"] }}}, 16554);
-assertErrorCode(c, {$project:{potpourri_3 : { $add:["g", "$y", 7, "$x"] }}}, 16554);
-assertErrorCode(c, {$project:{potpourri_4 : { $add:["h", "$x", 8, "$y"] }}}, 16554);
+assertErrorCode(c, {$project: {string_fields: {$add: [3, "$y", 4, "$y"]}}}, 16554);
+assertErrorCode(c, {$project: {number_fields: {$add: ["a", "$x", "b", "$x"]}}}, 16554);
+assertErrorCode(c, {$project: {all_strings: {$add: ["c", "$y", "d", "$y"]}}}, 16554);
+assertErrorCode(c, {$project: {potpourri_1: {$add: [5, "$y", "e", "$x"]}}}, 16554);
+assertErrorCode(c, {$project: {potpourri_2: {$add: [6, "$x", "f", "$y"]}}}, 16554);
+assertErrorCode(c, {$project: {potpourri_3: {$add: ["g", "$y", 7, "$x"]}}}, 16554);
+assertErrorCode(c, {$project: {potpourri_4: {$add: ["h", "$x", 8, "$y"]}}}, 16554);
diff --git a/jstests/aggregation/bugs/server6779.js b/jstests/aggregation/bugs/server6779.js
index e9e4cc25f92..e3b8aaeca08 100644
--- a/jstests/aggregation/bugs/server6779.js
+++ b/jstests/aggregation/bugs/server6779.js
@@ -4,14 +4,14 @@ function test(op, val) {
t = db.server6779;
t.drop();
- t.insert({a:true});
- t.insert({a:false});
+ t.insert({a: true});
+ t.insert({a: false});
obj = {};
obj[op] = ['$a', val];
result = t.aggregate({$project: {_id: 0, bool: obj}});
- assert.eq(result.toArray(), [{bool:true}, {bool:false}]);
+ assert.eq(result.toArray(), [{bool: true}, {bool: false}]);
}
test('$and', true);
test('$or', false);
diff --git a/jstests/aggregation/bugs/server6861.js b/jstests/aggregation/bugs/server6861.js
index 4cce3effc6e..28d19445241 100644
--- a/jstests/aggregation/bugs/server6861.js
+++ b/jstests/aggregation/bugs/server6861.js
@@ -5,23 +5,23 @@ load('jstests/aggregation/extras/utils.js');
t = db.jstests_server6861;
t.drop();
-t.save( { a:1 } );
+t.save({a: 1});
-function assertCode( code, expression ) {
+function assertCode(code, expression) {
assertErrorCode(t, expression, code);
}
-function assertResult( result, expression ) {
- assert.eq( result, t.aggregate( expression ).toArray() );
+function assertResult(result, expression) {
+ assert.eq(result, t.aggregate(expression).toArray());
}
// Correct number of fields.
-assertResult( [ { a:1 } ], { $project:{ _id:0, a:1 } } );
+assertResult([{a: 1}], {$project: {_id: 0, a: 1}});
// Incorrect number of fields.
-assertCode( 16435, {} );
-assertCode( 16435, { $project:{ _id:0, a:1 }, $group:{ _id:0 } } );
-assertCode( 16435, { $project:{ _id:0, a:1 }, $group:{ _id:0 }, $sort:{ a:1 } } );
+assertCode(16435, {});
+assertCode(16435, {$project: {_id: 0, a: 1}, $group: {_id: 0}});
+assertCode(16435, {$project: {_id: 0, a: 1}, $group: {_id: 0}, $sort: {a: 1}});
// Invalid stage specification.
-assertCode( 16436, { $noSuchStage:{ a:1 } } );
+assertCode(16436, {$noSuchStage: {a: 1}});
diff --git a/jstests/aggregation/bugs/server7768.js b/jstests/aggregation/bugs/server7768.js
index a6c96d8f912..a820dd7526e 100644
--- a/jstests/aggregation/bugs/server7768.js
+++ b/jstests/aggregation/bugs/server7768.js
@@ -1,12 +1,13 @@
// SEVER-7768 aggregate cmd shouldn't fail when $readPreference is specified
collection = 'server7768';
db[collection].drop();
-db[collection].insert({foo:1});
+db[collection].insert({foo: 1});
// Can't use aggregate helper here because we need to add $readPreference flag
-res = db.runCommand({ 'aggregate': collection
- , 'pipeline': [{'$project': {'_id': false, 'foo': true}}]
- , $readPreference: {'mode': 'primary'}
- });
+res = db.runCommand({
+ 'aggregate': collection,
+ 'pipeline': [{'$project': {'_id': false, 'foo': true}}],
+ $readPreference: {'mode': 'primary'}
+});
assert.commandWorked(res);
-assert.eq(res.result, [{foo:1}]);
+assert.eq(res.result, [{foo: 1}]);
diff --git a/jstests/aggregation/bugs/server7781.js b/jstests/aggregation/bugs/server7781.js
index 4d2c3c1a3fa..230a8a64c9f 100644
--- a/jstests/aggregation/bugs/server7781.js
+++ b/jstests/aggregation/bugs/server7781.js
@@ -1,149 +1,169 @@
// SERVER-7781 $geoNear pipeline stage
(function() {
-load('jstests/libs/geo_near_random.js');
-load('jstests/aggregation/extras/utils.js');
-
-var coll = 'server7781';
-
-db[coll].drop();
-db[coll].insert({loc:[0,0]});
-
-// $geoNear is only allowed as the first stage in a pipeline, nowhere else.
-assertErrorCode(db[coll],
- [{$match: {x:1}}, {$geoNear:{near: [1,1], spherical: true, distanceField: 'dis'}}],
- 28837);
-
-function checkOutput(cmdOut, aggOut, expectedNum) {
- assert.commandWorked(cmdOut, "geoNear command");
-
- // the output arrays are accessed differently
- cmdOut = cmdOut.results;
- aggOut = aggOut.toArray();
-
- assert.eq(cmdOut.length, expectedNum);
- assert.eq(aggOut.length, expectedNum);
-
- var allSame = true;
- var massaged; // massage geoNear command output to match output from agg pipeline
- for (var i=0; i < cmdOut.length; i++) {
- massaged = {};
- Object.extend(massaged, cmdOut[i].obj, /*deep=*/true);
- massaged.stats = {'dis': cmdOut[i].dis,
- 'loc': cmdOut[i].loc};
-
- if (!friendlyEqual(massaged, aggOut[i])) {
- allSame = false; // don't bail yet since we want to print all differences
- print("Difference detected at index " + i + " of " + expectedNum);
- print("from geoNear command:" + tojson(massaged));
- print("from aggregate command:" + tojson(aggOut[i]));
- }
- }
+ load('jstests/libs/geo_near_random.js');
+ load('jstests/aggregation/extras/utils.js');
- assert(allSame);
-}
-
-// We use this to generate points. Using a single global to avoid reseting RNG in each pass.
-var pointMaker = new GeoNearRandomTest(coll);
+ var coll = 'server7781';
-function test(db, sharded, indexType) {
db[coll].drop();
-
- if (sharded) { // sharded setup
- var shards = [];
- var config = db.getSiblingDB("config");
- config.shards.find().forEach(function(shard) { shards.push(shard._id); });
-
- db.adminCommand({shardCollection: db[coll].getFullName(), key: {rand:1}});
- for (var i=1; i < 10; i++) {
- // split at 0.1, 0.2, ... 0.9
- db.adminCommand({split: db[coll].getFullName(), middle: {rand: i/10}});
- db.adminCommand({moveChunk: db[coll].getFullName(), find: {rand: i/10},
- to: shards[i%shards.length]});
+ db[coll].insert({loc: [0, 0]});
+
+ // $geoNear is only allowed as the first stage in a pipeline, nowhere else.
+ assertErrorCode(
+ db[coll],
+ [{$match: {x: 1}}, {$geoNear: {near: [1, 1], spherical: true, distanceField: 'dis'}}],
+ 28837);
+
+ function checkOutput(cmdOut, aggOut, expectedNum) {
+ assert.commandWorked(cmdOut, "geoNear command");
+
+ // the output arrays are accessed differently
+ cmdOut = cmdOut.results;
+ aggOut = aggOut.toArray();
+
+ assert.eq(cmdOut.length, expectedNum);
+ assert.eq(aggOut.length, expectedNum);
+
+ var allSame = true;
+ var massaged; // massage geoNear command output to match output from agg pipeline
+ for (var i = 0; i < cmdOut.length; i++) {
+ massaged = {};
+ Object.extend(massaged, cmdOut[i].obj, /*deep=*/true);
+ massaged.stats = {
+ 'dis': cmdOut[i].dis,
+ 'loc': cmdOut[i].loc
+ };
+
+ if (!friendlyEqual(massaged, aggOut[i])) {
+ allSame = false; // don't bail yet since we want to print all differences
+ print("Difference detected at index " + i + " of " + expectedNum);
+ print("from geoNear command:" + tojson(massaged));
+ print("from aggregate command:" + tojson(aggOut[i]));
+ }
}
- assert.eq(config.chunks.count({'ns': db[coll].getFullName()}), 10);
+ assert(allSame);
}
- // insert points
- var numPts = 10*1000;
- var bulk = db[coll].initializeUnorderedBulkOp();
- for (var i=0; i < numPts; i++) {
- bulk.insert({ rand: Math.random(), loc: pointMaker.mkPt() });
+ // We use this to generate points. Using a single global to avoid reseting RNG in each pass.
+ var pointMaker = new GeoNearRandomTest(coll);
+
+ function test(db, sharded, indexType) {
+ db[coll].drop();
+
+ if (sharded) { // sharded setup
+ var shards = [];
+ var config = db.getSiblingDB("config");
+ config.shards.find().forEach(function(shard) {
+ shards.push(shard._id);
+ });
+
+ db.adminCommand({shardCollection: db[coll].getFullName(), key: {rand: 1}});
+ for (var i = 1; i < 10; i++) {
+ // split at 0.1, 0.2, ... 0.9
+ db.adminCommand({split: db[coll].getFullName(), middle: {rand: i / 10}});
+ db.adminCommand({
+ moveChunk: db[coll].getFullName(),
+ find: {rand: i / 10},
+ to: shards[i % shards.length]
+ });
+ }
+
+ assert.eq(config.chunks.count({'ns': db[coll].getFullName()}), 10);
+ }
+
+ // insert points
+ var numPts = 10 * 1000;
+ var bulk = db[coll].initializeUnorderedBulkOp();
+ for (var i = 0; i < numPts; i++) {
+ bulk.insert({rand: Math.random(), loc: pointMaker.mkPt()});
+ }
+ assert.writeOK(bulk.execute());
+
+ assert.eq(db[coll].count(), numPts);
+
+ db[coll].ensureIndex({loc: indexType});
+
+ // test with defaults
+ var queryPoint = pointMaker.mkPt(0.25); // stick to center of map
+ geoCmd = {
+ geoNear: coll,
+ near: queryPoint,
+ includeLocs: true,
+ spherical: true
+ };
+ aggCmd = {
+ $geoNear: {
+ near: queryPoint,
+ includeLocs: 'stats.loc',
+ distanceField: 'stats.dis',
+ spherical: true
+ }
+ };
+ checkOutput(db.runCommand(geoCmd), db[coll].aggregate(aggCmd), 100);
+
+ // test with num
+ queryPoint = pointMaker.mkPt(0.25);
+ geoCmd.num = 75;
+ geoCmd.near = queryPoint;
+ aggCmd.$geoNear.num = 75;
+ aggCmd.$geoNear.near = queryPoint;
+ checkOutput(db.runCommand(geoCmd), db[coll].aggregate(aggCmd), 75);
+
+ // test with limit instead of num (they mean the same thing, but want to test both)
+ queryPoint = pointMaker.mkPt(0.25);
+ geoCmd.near = queryPoint;
+ delete geoCmd.num;
+ geoCmd.limit = 70;
+ aggCmd.$geoNear.near = queryPoint;
+ delete aggCmd.$geoNear.num;
+ aggCmd.$geoNear.limit = 70;
+ checkOutput(db.runCommand(geoCmd), db[coll].aggregate(aggCmd), 70);
+
+ // test spherical
+ queryPoint = pointMaker.mkPt(0.25);
+ geoCmd.spherical = true;
+ geoCmd.near = queryPoint;
+ aggCmd.$geoNear.spherical = true;
+ aggCmd.$geoNear.near = queryPoint;
+ checkOutput(db.runCommand(geoCmd), db[coll].aggregate(aggCmd), 70);
+
+ // test $geoNear + $limit coalescing
+ queryPoint = pointMaker.mkPt(0.25);
+ geoCmd.num = 40;
+ geoCmd.near = queryPoint;
+ aggCmd.$geoNear.near = queryPoint;
+ aggArr = [aggCmd, {$limit: 50}, {$limit: 60}, {$limit: 40}];
+ checkOutput(db.runCommand(geoCmd), db[coll].aggregate(aggArr), 40);
+
+ // Test $geoNear with an initial batchSize of 0. Regression test for SERVER-20935.
+ queryPoint = pointMaker.mkPt(0.25);
+ geoCmd.spherical = true;
+ geoCmd.near = queryPoint;
+ geoCmd.limit = 70;
+ delete geoCmd.num;
+ aggCmd.$geoNear.spherical = true;
+ aggCmd.$geoNear.near = queryPoint;
+ aggCmd.$geoNear.limit = 70;
+ delete aggCmd.$geoNear.num;
+ var cmdRes = db[coll].runCommand("aggregate", {pipeline: [aggCmd], cursor: {batchSize: 0}});
+ assert.commandWorked(cmdRes);
+ var cmdCursor = new DBCommandCursor(db[coll].getMongo(), cmdRes, 0);
+ checkOutput(db.runCommand(geoCmd), cmdCursor, 70);
}
- assert.writeOK(bulk.execute());
-
- assert.eq(db[coll].count(), numPts);
-
- db[coll].ensureIndex({loc: indexType});
-
- // test with defaults
- var queryPoint = pointMaker.mkPt(0.25); // stick to center of map
- geoCmd = {geoNear: coll, near: queryPoint, includeLocs: true, spherical: true};
- aggCmd = {$geoNear: {near: queryPoint, includeLocs: 'stats.loc', distanceField: 'stats.dis', spherical: true}};
- checkOutput(db.runCommand(geoCmd), db[coll].aggregate(aggCmd), 100);
-
- // test with num
- queryPoint = pointMaker.mkPt(0.25);
- geoCmd.num = 75;
- geoCmd.near = queryPoint;
- aggCmd.$geoNear.num = 75;
- aggCmd.$geoNear.near = queryPoint;
- checkOutput(db.runCommand(geoCmd), db[coll].aggregate(aggCmd), 75);
-
- // test with limit instead of num (they mean the same thing, but want to test both)
- queryPoint = pointMaker.mkPt(0.25);
- geoCmd.near = queryPoint;
- delete geoCmd.num;
- geoCmd.limit = 70;
- aggCmd.$geoNear.near = queryPoint;
- delete aggCmd.$geoNear.num;
- aggCmd.$geoNear.limit = 70;
- checkOutput(db.runCommand(geoCmd), db[coll].aggregate(aggCmd), 70);
-
- // test spherical
- queryPoint = pointMaker.mkPt(0.25);
- geoCmd.spherical = true;
- geoCmd.near = queryPoint;
- aggCmd.$geoNear.spherical = true;
- aggCmd.$geoNear.near = queryPoint;
- checkOutput(db.runCommand(geoCmd), db[coll].aggregate(aggCmd), 70);
-
- // test $geoNear + $limit coalescing
- queryPoint = pointMaker.mkPt(0.25);
- geoCmd.num = 40;
- geoCmd.near = queryPoint;
- aggCmd.$geoNear.near = queryPoint;
- aggArr = [aggCmd, {$limit: 50}, {$limit:60}, {$limit:40}];
- checkOutput(db.runCommand(geoCmd), db[coll].aggregate(aggArr), 40);
-
- // Test $geoNear with an initial batchSize of 0. Regression test for SERVER-20935.
- queryPoint = pointMaker.mkPt(0.25);
- geoCmd.spherical = true;
- geoCmd.near = queryPoint;
- geoCmd.limit = 70;
- delete geoCmd.num;
- aggCmd.$geoNear.spherical = true;
- aggCmd.$geoNear.near = queryPoint;
- aggCmd.$geoNear.limit = 70;
- delete aggCmd.$geoNear.num;
- var cmdRes = db[coll].runCommand("aggregate", {pipeline: [aggCmd], cursor: {batchSize: 0}});
- assert.commandWorked(cmdRes);
- var cmdCursor = new DBCommandCursor(db[coll].getMongo(), cmdRes, 0);
- checkOutput(db.runCommand(geoCmd), cmdCursor, 70);
-}
-
-test(db, false, '2d');
-test(db, false, '2dsphere');
-
-var sharded = new ShardingTest({shards: 3, mongos: 1});
-sharded.stopBalancer();
-sharded.adminCommand( { enablesharding : "test" } );
-sharded.ensurePrimaryShard('test', 'shard0001');
-
-test(sharded.getDB('test'), true, '2d');
-test(sharded.getDB('test'), true, '2dsphere');
-
-sharded.stop();
+
+ test(db, false, '2d');
+ test(db, false, '2dsphere');
+
+ var sharded = new ShardingTest({shards: 3, mongos: 1});
+ sharded.stopBalancer();
+ sharded.adminCommand({enablesharding: "test"});
+ sharded.ensurePrimaryShard('test', 'shard0001');
+
+ test(sharded.getDB('test'), true, '2d');
+ test(sharded.getDB('test'), true, '2dsphere');
+
+ sharded.stop();
})();
diff --git a/jstests/aggregation/bugs/server7900.js b/jstests/aggregation/bugs/server7900.js
index c05c1538e53..20bf085c7a1 100644
--- a/jstests/aggregation/bugs/server7900.js
+++ b/jstests/aggregation/bugs/server7900.js
@@ -3,9 +3,8 @@
c = db.s7900;
c.drop();
-for (var i=0; i < 5; i++)
- c.insert({_id:i});
+for (var i = 0; i < 5; i++)
+ c.insert({_id: i});
-res = c.aggregate({$sort: {_id: -1}}, {$limit: 2}); // uses index for sort
+res = c.aggregate({$sort: {_id: -1}}, {$limit: 2}); // uses index for sort
assert.eq(res.toArray(), [{_id: 4}, {_id: 3}]);
-
diff --git a/jstests/aggregation/bugs/server8141.js b/jstests/aggregation/bugs/server8141.js
index d450ec14227..9777737517b 100644
--- a/jstests/aggregation/bugs/server8141.js
+++ b/jstests/aggregation/bugs/server8141.js
@@ -34,8 +34,12 @@
assert.eq(coll.aggregate(pipeline).toArray(), [{d: ['foo']}]);
// Nested arrays.
- pipeline = [{$project: {_id: 0, d: {$setIntersection: [[[1, 'foo', 'bar']],
- [[1, {$toLower: 'FoO'}, '$b']]]}}}];
+ pipeline = [{
+ $project: {
+ _id: 0,
+ d: {$setIntersection: [[[1, 'foo', 'bar']], [[1, {$toLower: 'FoO'}, '$b']]]}
+ }
+ }];
assert.eq(coll.aggregate(pipeline).toArray(), [{d: [[1, 'foo', 'bar']]}]);
coll.drop();
diff --git a/jstests/aggregation/bugs/server8568.js b/jstests/aggregation/bugs/server8568.js
index 7490b286e38..ae9a9ad8202 100644
--- a/jstests/aggregation/bugs/server8568.js
+++ b/jstests/aggregation/bugs/server8568.js
@@ -17,8 +17,8 @@ load('jstests/aggregation/extras/utils.js');
// Helper for testing that op results in error with code errorCode.
function testError(op, errorCode) {
- var pipeline = [{$project: {_id: 0, result: op}}];
- assertErrorCode(coll, pipeline, errorCode);
+ var pipeline = [{$project: {_id: 0, result: op}}];
+ assertErrorCode(coll, pipeline, errorCode);
}
// Valid input: Numeric arg >= 0, null, or NaN.
diff --git a/jstests/aggregation/bugs/server8581.js b/jstests/aggregation/bugs/server8581.js
index aa2158c0ced..54b97be3d08 100644
--- a/jstests/aggregation/bugs/server8581.js
+++ b/jstests/aggregation/bugs/server8581.js
@@ -5,154 +5,123 @@ t = db.jstests_aggregation_redact;
t.drop();
// this document will always be present but its content will change
-t.save({ _id: 1,
- level: 1,
- // b will present on level 3, 4, and 5
- b: { level: 3,
- c: 5, // always included when b is included
- // the contents of d test that if we cannot see a document then we cannot see its
- // array-nested subdocument even if we have permissions to see the subdocument.
- // it also tests arrays containing documents we cannot see
- d: [ {level: 1, e: 4},
- {f: 6},
- {level: 5, g: 9},
- "NOT AN OBJECT!!11!", // always included when b is included
- [2, 3, 4, {level: 1, r: 11}, {level: 5, s: 99}]
- // nested array should always be included once b is
- // but the second object should only show up at level 5
- ]
- },
- // the contents of h test that in order to see a subdocument (j) we must be able to see all
- // parent documents (h and i) even if we have permissions to see the subdocument
- h: { level: 2,
- i: { level: 4,
- j: { level: 1,
- k: 8
- }
- }
- },
- // l checks that we get an empty document when we can see a document but none of its fields
- l: {
- m: { level: 3,
- n: 12
- }
- },
- // o checks that we get an empty array when we can see a array but none of its entries
- o: [{ level: 5,
- p: 19
- }],
- // q is a basic field check and should always be included
- q: 14
- });
+t.save({
+ _id: 1,
+ level: 1,
+ // b will present on level 3, 4, and 5
+ b: {
+ level: 3,
+ c: 5, // always included when b is included
+ // the contents of d test that if we cannot see a document then we cannot see its
+ // array-nested subdocument even if we have permissions to see the subdocument.
+ // it also tests arrays containing documents we cannot see
+ d: [
+ {level: 1, e: 4},
+ {f: 6},
+ {level: 5, g: 9},
+ "NOT AN OBJECT!!11!", // always included when b is included
+ [2, 3, 4, {level: 1, r: 11}, {level: 5, s: 99}]
+ // nested array should always be included once b is
+ // but the second object should only show up at level 5
+ ]
+ },
+ // the contents of h test that in order to see a subdocument (j) we must be able to see all
+ // parent documents (h and i) even if we have permissions to see the subdocument
+ h: {level: 2, i: {level: 4, j: {level: 1, k: 8}}},
+ // l checks that we get an empty document when we can see a document but none of its fields
+ l: {m: {level: 3, n: 12}},
+ // o checks that we get an empty array when we can see a array but none of its entries
+ o: [{level: 5, p: 19}],
+ // q is a basic field check and should always be included
+ q: 14
+});
// this document will sometimes be missing
-t.save({ _id: 2,
- level: 4,
- });
-
-a1 = t.aggregate({$redact: {$cond: [{$lte: ['$level', 1]}, "$$DESCEND", "$$PRUNE"]}});
-a2 = t.aggregate({$redact: {$cond: [{$lte: ['$level', 2]}, "$$DESCEND", "$$PRUNE"]}});
-a3 = t.aggregate({$redact: {$cond: [{$lte: ['$level', 3]}, "$$DESCEND", "$$PRUNE"]}});
-a4 = t.aggregate({$redact: {$cond: [{$lte: ['$level', 4]}, "$$DESCEND", "$$PRUNE"]}});
-a5 = t.aggregate({$redact: {$cond: [{$lte: ['$level', 5]}, "$$DESCEND", "$$PRUNE"]}});
-
-a1result = [{ _id: 1,
- level: 1,
- l: {},
- o: [],
- q: 14
- }];
-
-a2result = [{ _id: 1,
- level: 1,
- h: { level: 2,
- },
- l: {},
- o: [],
- q: 14
- }];
-
-a3result = [{ _id: 1,
- level: 1,
- b: { level: 3,
- c: 5,
- d: [ {level: 1, e: 4},
- {f: 6},
- "NOT AN OBJECT!!11!",
- [2, 3, 4, {level: 1, r: 11}]
- ]
- },
- h: { level: 2,
- },
- l: {
- m: { level: 3,
- n: 12
- }
- },
- o: [],
- q: 14
- }];
-
-a4result = [{ _id: 1,
- level: 1,
- b: { level: 3,
- c: 5,
- d: [ {level: 1, e: 4},
- {f: 6},
- "NOT AN OBJECT!!11!",
- [2, 3, 4, {level: 1, r: 11}]
- ]
- },
- h: { level: 2,
- i: { level: 4,
- j: { level: 1,
- k: 8
- }
- }
- },
- l: {
- m: { level: 3,
- n: 12
- }
- },
- o: [],
- q: 14
- },
- { _id: 2,
- level: 4,
- }];
-
-a5result = [{ _id: 1,
- level: 1,
- b: { level: 3,
- c: 5,
- d: [ {level: 1, e: 4},
- {f: 6},
- {level: 5, g: 9},
- "NOT AN OBJECT!!11!",
- [2, 3, 4, {level: 1, r: 11}, {level: 5, s: 99}]
- ]
- },
- h: { level: 2,
- i: { level: 4,
- j: { level: 1,
- k: 8
- }
- }
- },
- l: {
- m: { level: 3,
- n: 12
- }
- },
- o: [{ level: 5,
- p: 19
- }],
- q: 14
- },
- { _id: 2,
- level: 4,
- }];
+t.save({
+ _id: 2,
+ level: 4,
+});
+
+a1 = t.aggregate({$redact: {$cond: [{$lte: ['$level', 1]}, "$$DESCEND", "$$PRUNE"]}});
+a2 = t.aggregate({$redact: {$cond: [{$lte: ['$level', 2]}, "$$DESCEND", "$$PRUNE"]}});
+a3 = t.aggregate({$redact: {$cond: [{$lte: ['$level', 3]}, "$$DESCEND", "$$PRUNE"]}});
+a4 = t.aggregate({$redact: {$cond: [{$lte: ['$level', 4]}, "$$DESCEND", "$$PRUNE"]}});
+a5 = t.aggregate({$redact: {$cond: [{$lte: ['$level', 5]}, "$$DESCEND", "$$PRUNE"]}});
+
+a1result = [{_id: 1, level: 1, l: {}, o: [], q: 14}];
+
+a2result = [{
+ _id: 1,
+ level: 1,
+ h: {
+ level: 2,
+ },
+ l: {},
+ o: [],
+ q: 14
+}];
+
+a3result = [{
+ _id: 1,
+ level: 1,
+ b: {
+ level: 3,
+ c: 5,
+ d: [{level: 1, e: 4}, {f: 6}, "NOT AN OBJECT!!11!", [2, 3, 4, {level: 1, r: 11}]]
+ },
+ h: {
+ level: 2,
+ },
+ l: {m: {level: 3, n: 12}},
+ o: [],
+ q: 14
+}];
+
+a4result = [
+ {
+ _id: 1,
+ level: 1,
+ b: {
+ level: 3,
+ c: 5,
+ d: [{level: 1, e: 4}, {f: 6}, "NOT AN OBJECT!!11!", [2, 3, 4, {level: 1, r: 11}]]
+ },
+ h: {level: 2, i: {level: 4, j: {level: 1, k: 8}}},
+ l: {m: {level: 3, n: 12}},
+ o: [],
+ q: 14
+ },
+ {
+ _id: 2,
+ level: 4,
+ }
+];
+
+a5result = [
+ {
+ _id: 1,
+ level: 1,
+ b: {
+ level: 3,
+ c: 5,
+ d: [
+ {level: 1, e: 4},
+ {f: 6},
+ {level: 5, g: 9},
+ "NOT AN OBJECT!!11!",
+ [2, 3, 4, {level: 1, r: 11}, {level: 5, s: 99}]
+ ]
+ },
+ h: {level: 2, i: {level: 4, j: {level: 1, k: 8}}},
+ l: {m: {level: 3, n: 12}},
+ o: [{level: 5, p: 19}],
+ q: 14
+ },
+ {
+ _id: 2,
+ level: 4,
+ }
+];
assert.eq(a1.toArray(), a1result);
assert.eq(a2.toArray(), a2result);
@@ -168,35 +137,16 @@ assert.eq(t.aggregate({$redact: "$$DESCEND"}).toArray(), t.find().toArray());
// test $$KEEP
t.drop();
// entire document should be present at 2 and beyond
-t.save({ _id: 1,
- level: 2,
- b: { level: 3,
- c: 2
- },
- d: { level: 1,
- e: 8
- },
- f: 9
- });
-
-b1 = t.aggregate({$redact: {$cond: [{$lte: ['$level', 1]}, "$$KEEP", "$$PRUNE"]}});
-b2 = t.aggregate({$redact: {$cond: [{$lte: ['$level', 2]}, "$$KEEP", "$$PRUNE"]}});
-b3 = t.aggregate({$redact: {$cond: [{$lte: ['$level', 3]}, "$$KEEP", "$$PRUNE"]}});
+t.save({_id: 1, level: 2, b: {level: 3, c: 2}, d: {level: 1, e: 8}, f: 9});
+
+b1 = t.aggregate({$redact: {$cond: [{$lte: ['$level', 1]}, "$$KEEP", "$$PRUNE"]}});
+b2 = t.aggregate({$redact: {$cond: [{$lte: ['$level', 2]}, "$$KEEP", "$$PRUNE"]}});
+b3 = t.aggregate({$redact: {$cond: [{$lte: ['$level', 3]}, "$$KEEP", "$$PRUNE"]}});
b1result = [];
-b23result = [{ _id: 1,
- level: 2,
- b: { level: 3,
- c: 2
- },
- d: { level: 1,
- e: 8
- },
- f: 9
- }];
+b23result = [{_id: 1, level: 2, b: {level: 3, c: 2}, d: {level: 1, e: 8}, f: 9}];
assert.eq(b1.toArray(), b1result);
assert.eq(b2.toArray(), b23result);
assert.eq(b3.toArray(), b23result);
-
diff --git a/jstests/aggregation/bugs/server9289.js b/jstests/aggregation/bugs/server9289.js
index 934e5862314..06afc36d5e7 100644
--- a/jstests/aggregation/bugs/server9289.js
+++ b/jstests/aggregation/bugs/server9289.js
@@ -6,4 +6,5 @@ t.drop();
t.insert({date: ISODate('2013-08-14T21:41:43Z')});
// This would result in a parse error on older servers
-assert.eq(t.aggregate({$project: {year: {$year: {$add:['$date',1000]}}}}).toArray()[0].year, 2013);
+assert.eq(t.aggregate({$project: {year: {$year: {$add: ['$date', 1000]}}}}).toArray()[0].year,
+ 2013);
diff --git a/jstests/aggregation/bugs/server9444.js b/jstests/aggregation/bugs/server9444.js
index d8ab7781681..ad5f4b03ca6 100644
--- a/jstests/aggregation/bugs/server9444.js
+++ b/jstests/aggregation/bugs/server9444.js
@@ -3,19 +3,19 @@
var t = db.server9444;
t.drop();
-var sharded = (typeof(RUNNING_IN_SHARDED_AGG_TEST) != 'undefined'); // see end of testshard1.js
+var sharded = (typeof(RUNNING_IN_SHARDED_AGG_TEST) != 'undefined'); // see end of testshard1.js
if (sharded) {
- db.adminCommand( { shardcollection : t.getFullName(), key : { "_id" : 'hashed' } } );
+ db.adminCommand({shardcollection: t.getFullName(), key: {"_id": 'hashed'}});
}
var memoryLimitMB = sharded ? 200 : 100;
function loadData() {
- var bigStr = Array(1024*1024 + 1).toString(); // 1MB of ','
+ var bigStr = Array(1024 * 1024 + 1).toString(); // 1MB of ','
for (var i = 0; i < memoryLimitMB + 1; i++)
t.insert({_id: i, bigStr: i + bigStr, random: Math.random()});
- assert.gt(t.stats().size, memoryLimitMB * 1024*1024);
+ assert.gt(t.stats().size, memoryLimitMB * 1024 * 1024);
}
loadData();
@@ -37,7 +37,7 @@ function test(pipeline, outOfMemoryCode) {
// ensure we work when allowDiskUse === true
var res = t.aggregate(pipeline, {allowDiskUse: true});
- assert.eq(res.itcount(), t.count()); // all tests output one doc per input doc
+ assert.eq(res.itcount(), t.count()); // all tests output one doc per input doc
}
var groupCode = 16945;
@@ -48,16 +48,16 @@ test([{$group: {_id: '$_id', bigStr: {$first: '$bigStr'}}}], groupCode);
// sorting with _id would use index which doesn't require extsort
test([{$sort: {random: 1}}], sortCode);
-test([{$sort: {bigStr: 1}}], sortCode); // big key and value
+test([{$sort: {bigStr: 1}}], sortCode); // big key and value
// make sure sort + large limit won't crash the server (SERVER-10136)
-test([{$sort: {bigStr: 1}}, {$limit:1000*1000*1000}], sortLimitCode);
+test([{$sort: {bigStr: 1}}, {$limit: 1000 * 1000 * 1000}], sortLimitCode);
// test combining two extSorts in both same and different orders
-test([{$group: {_id: '$_id', bigStr: {$first: '$bigStr'}}}, {$sort: {_id:1}}], groupCode);
-test([{$group: {_id: '$_id', bigStr: {$first: '$bigStr'}}}, {$sort: {_id:-1}}], groupCode);
-test([{$group: {_id: '$_id', bigStr: {$first: '$bigStr'}}}, {$sort: {random:1}}], groupCode);
-test([{$sort: {random:1}}, {$group: {_id: '$_id', bigStr: {$first: '$bigStr'}}}], sortCode);
+test([{$group: {_id: '$_id', bigStr: {$first: '$bigStr'}}}, {$sort: {_id: 1}}], groupCode);
+test([{$group: {_id: '$_id', bigStr: {$first: '$bigStr'}}}, {$sort: {_id: -1}}], groupCode);
+test([{$group: {_id: '$_id', bigStr: {$first: '$bigStr'}}}, {$sort: {random: 1}}], groupCode);
+test([{$sort: {random: 1}}, {$group: {_id: '$_id', bigStr: {$first: '$bigStr'}}}], sortCode);
var origDB = db;
if (sharded) {
@@ -74,4 +74,3 @@ if (sharded) {
sh.startBalancer();
db = origDB;
}
-
diff --git a/jstests/aggregation/bugs/server9625.js b/jstests/aggregation/bugs/server9625.js
index 009f55b7ef0..4a525aba518 100644
--- a/jstests/aggregation/bugs/server9625.js
+++ b/jstests/aggregation/bugs/server9625.js
@@ -55,8 +55,8 @@ load('jstests/aggregation/extras/utils.js');
// These expressions are associative and commutative so inner expression can be combined with
// outer.
- testOp({$sum: ["$a", 2, 3, {$sum : [4, 5]}]}, 15);
- testOp({$min: ["$a", 2, 3, {$min : [4, 5]}]}, 1);
+ testOp({$sum: ["$a", 2, 3, {$sum: [4, 5]}]}, 15);
+ testOp({$min: ["$a", 2, 3, {$min: [4, 5]}]}, 1);
testOp({$max: ["$a", 2, 3, {$max: [4, 5]}]}, 5);
// These expressions are not associative and commutative so inner expression cannot be combined
diff --git a/jstests/aggregation/bugs/server9840.js b/jstests/aggregation/bugs/server9840.js
index 61c95ab9cd0..130066d8af4 100644
--- a/jstests/aggregation/bugs/server9840.js
+++ b/jstests/aggregation/bugs/server9840.js
@@ -6,15 +6,15 @@ t.drop();
function test(expression, expected) {
t.drop();
- t.insert({zero:0, one:1, two:2, three:3, nested: {four: 4}});
+ t.insert({zero: 0, one: 1, two: 2, three: 3, nested: {four: 4}});
// Test in projection:
- var result = t.aggregate({$project:{_id:0, res: expression}}).toArray();
- assert.eq(result, [{res:expected}]);
+ var result = t.aggregate({$project: {_id: 0, res: expression}}).toArray();
+ assert.eq(result, [{res: expected}]);
// Test in group:
- var result = t.aggregate({$group:{_id: 0, res: {$sum: expression}}}).toArray();
- assert.eq(result, [{_id: 0, res:expected}]);
+ var result = t.aggregate({$group: {_id: 0, res: {$sum: expression}}}).toArray();
+ assert.eq(result, [{_id: 0, res: expected}]);
}
// basics
@@ -29,55 +29,76 @@ test({$add: ['$$CURRENT.two', '$$ROOT.nested.four']}, 6);
// $let simple
test({$let: {vars: {a: 10}, in: '$$a'}}, 10);
test({$let: {vars: {a: '$zero'}, in: '$$a'}}, 0);
-test({$let: {vars: {a: {$add:['$one', '$two']},
- b: 10},
- in: {$multiply:['$$a', '$$b']}}},
- 30);
+test({$let: {vars: {a: {$add: ['$one', '$two']}, b: 10}, in: {$multiply: ['$$a', '$$b']}}}, 30);
// $let changing CURRENT
-test({$let: {vars: {CURRENT: '$$ROOT.nested'},
- in: {$multiply:['$four', '$$ROOT.two']}}},
- 8);
-test({$let: {vars: {CURRENT: '$$CURRENT.nested'}, // using original value of CURRENT
- in: {$multiply:['$four', '$$ROOT.two']}}},
- 8);
-test({$let: {vars: {CURRENT: '$nested'}, // same as last
- in: {$multiply:['$four', '$$ROOT.two']}}},
- 8);
-test({$let: {vars: {CURRENT: {$const:{ten: 10}}}, // "artificial" object
- in: {$multiply:['$ten', '$$ROOT.two']}}},
- 20);
-test({$let: {vars: {CURRENT: '$three'}, // sets current to the number 3 (not an object)
- in: {$multiply:['$$CURRENT', '$$ROOT.two']}}},
- 6);
+test({$let: {vars: {CURRENT: '$$ROOT.nested'}, in: {$multiply: ['$four', '$$ROOT.two']}}}, 8);
+test(
+ {
+ $let: {
+ vars: {CURRENT: '$$CURRENT.nested'}, // using original value of CURRENT
+ in: {$multiply: ['$four', '$$ROOT.two']}
+ }
+ },
+ 8);
+test(
+ {
+ $let: {
+ vars: {CURRENT: '$nested'}, // same as last
+ in: {$multiply: ['$four', '$$ROOT.two']}
+ }
+ },
+ 8);
+test(
+ {
+ $let: {
+ vars: {CURRENT: {$const: {ten: 10}}}, // "artificial" object
+ in: {$multiply: ['$ten', '$$ROOT.two']}
+ }
+ },
+ 20);
+test(
+ {
+ $let: {
+ vars: {CURRENT: '$three'}, // sets current to the number 3 (not an object)
+ in: {$multiply: ['$$CURRENT', '$$ROOT.two']}
+ }
+ },
+ 6);
// swapping with $let (ensures there is no ordering dependency in vars)
-test({$let: {vars: {x: 6, y: 10},
- in: {$let: {vars: {x: '$$y', y: '$$x'}, // now {x:10, y:6}
- in: {$subtract: ['$$x', '$$y']}}}}}, // not commutative!
- 4); // 10-6 not 6-10 or 6-6
-
+test(
+ {
+ $let: {
+ vars: {x: 6, y: 10},
+ in: {
+ $let: {
+ vars: {x: '$$y', y: '$$x'}, // now {x:10, y:6}
+ in: {$subtract: ['$$x', '$$y']}
+ }
+ }
+ }
+ }, // not commutative!
+ 4); // 10-6 not 6-10 or 6-6
// unicode is allowed
-test({$let: {vars: {'日本語': 10}, in: '$$日本語'}}, 10); // Japanese for "Japanese language"
+test({$let: {vars: {'日本語': 10}, in: '$$日本語'}}, 10); // Japanese for "Japanese language"
// Can use ROOT and CURRENT directly with no subfield (SERVER-5916)
t.drop();
t.insert({_id: 'obj'});
-assert.eq(t.aggregate({$project: {_id:0, obj: '$$ROOT'}}).toArray(),
- [{obj: {_id: 'obj'}}]);
-assert.eq(t.aggregate({$project: {_id:0, obj: '$$CURRENT'}}).toArray(),
- [{obj: {_id: 'obj'}}]);
-assert.eq(t.aggregate({$group: {_id:0, objs: {$push: '$$ROOT'}}}).toArray(),
- [{_id: 0, objs: [{_id: 'obj'}]}]);
-assert.eq(t.aggregate({$group: {_id:0, objs: {$push: '$$CURRENT'}}}).toArray(),
- [{_id: 0, objs: [{_id: 'obj'}]}]);
+assert.eq(t.aggregate({$project: {_id: 0, obj: '$$ROOT'}}).toArray(), [{obj: {_id: 'obj'}}]);
+assert.eq(t.aggregate({$project: {_id: 0, obj: '$$CURRENT'}}).toArray(), [{obj: {_id: 'obj'}}]);
+assert.eq(t.aggregate({$group: {_id: 0, objs: {$push: '$$ROOT'}}}).toArray(),
+ [{_id: 0, objs: [{_id: 'obj'}]}]);
+assert.eq(t.aggregate({$group: {_id: 0, objs: {$push: '$$CURRENT'}}}).toArray(),
+ [{_id: 0, objs: [{_id: 'obj'}]}]);
// check name validity checks
-assertErrorCode(t, {$project: {a: {$let:{vars: {ROOT: 1}, in: '$$ROOT'}}}}, 16867);
-assertErrorCode(t, {$project: {a: {$let:{vars: {FOO: 1}, in: '$$FOO'}}}}, 16867);
-assertErrorCode(t, {$project: {a: {$let:{vars: {_underbar: 1}, in: '$$FOO'}}}}, 16867);
-assertErrorCode(t, {$project: {a: {$let:{vars: {'a.b': 1}, in: '$$FOO'}}}}, 16868);
-assertErrorCode(t, {$project: {a: {$let:{vars: {'a b': 1}, in: '$$FOO'}}}}, 16868);
+assertErrorCode(t, {$project: {a: {$let: {vars: {ROOT: 1}, in: '$$ROOT'}}}}, 16867);
+assertErrorCode(t, {$project: {a: {$let: {vars: {FOO: 1}, in: '$$FOO'}}}}, 16867);
+assertErrorCode(t, {$project: {a: {$let: {vars: {_underbar: 1}, in: '$$FOO'}}}}, 16867);
+assertErrorCode(t, {$project: {a: {$let: {vars: {'a.b': 1}, in: '$$FOO'}}}}, 16868);
+assertErrorCode(t, {$project: {a: {$let: {vars: {'a b': 1}, in: '$$FOO'}}}}, 16868);
assertErrorCode(t, {$project: {a: '$$_underbar'}}, 16870);
assertErrorCode(t, {$project: {a: '$$with spaces'}}, 16871);
diff --git a/jstests/aggregation/bugs/server9841.js b/jstests/aggregation/bugs/server9841.js
index 46f06021d0c..5bf9b32db93 100644
--- a/jstests/aggregation/bugs/server9841.js
+++ b/jstests/aggregation/bugs/server9841.js
@@ -4,7 +4,7 @@ load('jstests/aggregation/extras/utils.js');
var t = db.server9841;
t.drop();
t.insert({
- simple: [1,2,3,4],
+ simple: [1, 2, 3, 4],
nested: [{a: 1}, {a: 2}],
mixed: [{a: 1}, {}, {a: 2}, {a: null}],
notArray: 1,
@@ -12,29 +12,31 @@ t.insert({
});
function test(expression, expected) {
- var result = t.aggregate({$project:{_id:0, res: expression}}).toArray();
- assert.eq(result, [{res:expected}]);
+ var result = t.aggregate({$project: {_id: 0, res: expression}}).toArray();
+ assert.eq(result, [{res: expected}]);
}
-test({$map: {input: "$simple", as: "var", in: '$$var'}}, [1,2,3,4]);
-test({$map: {input: "$simple", as: "var", in: {$add:[10, '$$var']}}}, [11,12,13,14]);
+test({$map: {input: "$simple", as: "var", in: '$$var'}}, [1, 2, 3, 4]);
+test({$map: {input: "$simple", as: "var", in: {$add: [10, '$$var']}}}, [11, 12, 13, 14]);
-test({$map: {input: "$nested", as: "var", in: '$$var.a'}}, [1,2]);
-test({$map: {input: "$nested", as: "CURRENT", in: '$a'}}, [1,2]);
+test({$map: {input: "$nested", as: "var", in: '$$var.a'}}, [1, 2]);
+test({$map: {input: "$nested", as: "CURRENT", in: '$a'}}, [1, 2]);
+
+test({$map: {input: "$mixed", as: "var", in: '$$var.a'}},
+ [1, null, 2, null]); // missing becomes null
-test({$map: {input: "$mixed", as: "var", in: '$$var.a'}}, [1,null,2,null]); // missing becomes null
-
test({$map: {input: "$null", as: "var", in: '$$var'}}, null);
// can't set ROOT
-assertErrorCode(t, {$project:{a:{$map:{input: "$simple", as: "ROOT", in: '$$ROOT'}}}}, 16867);
+assertErrorCode(t, {$project: {a: {$map: {input: "$simple", as: "ROOT", in: '$$ROOT'}}}}, 16867);
// error on non-array
-assertErrorCode(t, {$project:{a:{$map:{input: "$notArray", as: "var", in: '$$var'}}}}, 16883);
+assertErrorCode(t, {$project: {a: {$map: {input: "$notArray", as: "var", in: '$$var'}}}}, 16883);
// parse errors (missing or extra fields)
-assertErrorCode(t, {$project:{a:{$map:{x:1, input: "$simple", as: "var", in: '$$var'}}}}, 16879);
-assertErrorCode(t, {$project:{a:{$map:{as: "var", in: '$$var'}}}}, 16880);
-assertErrorCode(t, {$project:{a:{$map:{input: "$simple", in: '$$var'}}}}, 16881);
-assertErrorCode(t, {$project:{a:{$map:{input: "$simple", as: "var"}}}}, 16882);
-
+assertErrorCode(t,
+ {$project: {a: {$map: {x: 1, input: "$simple", as: "var", in: '$$var'}}}},
+ 16879);
+assertErrorCode(t, {$project: {a: {$map: {as: "var", in: '$$var'}}}}, 16880);
+assertErrorCode(t, {$project: {a: {$map: {input: "$simple", in: '$$var'}}}}, 16881);
+assertErrorCode(t, {$project: {a: {$map: {input: "$simple", as: "var"}}}}, 16882);
diff --git a/jstests/aggregation/bugs/strcasecmp.js b/jstests/aggregation/bugs/strcasecmp.js
index c0f6d9a9256..71a5f2996c9 100644
--- a/jstests/aggregation/bugs/strcasecmp.js
+++ b/jstests/aggregation/bugs/strcasecmp.js
@@ -3,64 +3,64 @@
t = db.jstests_aggregation_strcasecmp;
t.drop();
-t.save( {} );
+t.save({});
-function cmp( a, b ) {
- return t.aggregate( { $project:{ a:{ $cmp:[ a, b ] } } } ).toArray()[ 0 ].a;
+function cmp(a, b) {
+ return t.aggregate({$project: {a: {$cmp: [a, b]}}}).toArray()[0].a;
}
-function strcasecmp( a, b ) {
- return t.aggregate( { $project:{ a:{ $strcasecmp:[ a, b ] } } } ).toArray()[ 0 ].a;
+function strcasecmp(a, b) {
+ return t.aggregate({$project: {a: {$strcasecmp: [a, b]}}}).toArray()[0].a;
}
-function assertException( args ) {
- assert.commandFailed(t.runCommand('aggregate',
- {pipeline: [{$project: {a: {$strcasecmp: args}}}]}));
+function assertException(args) {
+ assert.commandFailed(
+ t.runCommand('aggregate', {pipeline: [{$project: {a: {$strcasecmp: args}}}]}));
}
-function assertStrcasecmp( expected, a, b ) {
- assert.eq( expected, strcasecmp( a, b ) );
- assert.eq( -expected, strcasecmp( b, a ) );
+function assertStrcasecmp(expected, a, b) {
+ assert.eq(expected, strcasecmp(a, b));
+ assert.eq(-expected, strcasecmp(b, a));
}
-function assertBoth( expectedStrcasecmp, expectedCmp, a, b ) {
- assertStrcasecmp( expectedStrcasecmp, a, b );
- assert.eq( expectedCmp, cmp( a, b ) );
- assert.eq( -expectedCmp, cmp( b, a ) );
+function assertBoth(expectedStrcasecmp, expectedCmp, a, b) {
+ assertStrcasecmp(expectedStrcasecmp, a, b);
+ assert.eq(expectedCmp, cmp(a, b));
+ assert.eq(-expectedCmp, cmp(b, a));
}
// Wrong number of arguments.
-assertException( [] );
-assertException( [ 'a' ] );
-assertException( [ 'a', 'b', 'c' ] );
+assertException([]);
+assertException(['a']);
+assertException(['a', 'b', 'c']);
// Basic tests.
-assertBoth( 0, 0, '', '' );
-assertBoth( -1, -1, '', 'a' );
-assertBoth( 0, -1, 'A', 'a' );
-assertBoth( 1, -1, 'Ab', 'a' );
-assertBoth( 0, -1, 'Ab', 'aB' );
-assertBoth( 1, -1, 'Bb', 'aB' );
-assertBoth( -1, -1, 'Bb', 'cB' );
-assertBoth( 1, -1, 'aB', 'aa' );
-assertBoth( -1, -1, 'aB', 'ac' );
+assertBoth(0, 0, '', '');
+assertBoth(-1, -1, '', 'a');
+assertBoth(0, -1, 'A', 'a');
+assertBoth(1, -1, 'Ab', 'a');
+assertBoth(0, -1, 'Ab', 'aB');
+assertBoth(1, -1, 'Bb', 'aB');
+assertBoth(-1, -1, 'Bb', 'cB');
+assertBoth(1, -1, 'aB', 'aa');
+assertBoth(-1, -1, 'aB', 'ac');
// With non alphabet characters.
-assertBoth( 0, -1, 'A$_b1C?', 'a$_b1C?' );
-assertBoth( 1, -1, 'ABC01234', 'abc0123' );
+assertBoth(0, -1, 'A$_b1C?', 'a$_b1C?');
+assertBoth(1, -1, 'ABC01234', 'abc0123');
// String coercion.
-assertStrcasecmp( 0, '1', 1 );
-assertStrcasecmp( 0, '1.23', 1.23 );
-assertStrcasecmp( 0, '1970-01-01T00:00:00', new Date( 0 ) );
-assertStrcasecmp( 0, '1970-01-01t00:00:00', new Date( 0 ) );
-assertException( [ 'abc', /abc/ ] );
+assertStrcasecmp(0, '1', 1);
+assertStrcasecmp(0, '1.23', 1.23);
+assertStrcasecmp(0, '1970-01-01T00:00:00', new Date(0));
+assertStrcasecmp(0, '1970-01-01t00:00:00', new Date(0));
+assertException(['abc', /abc/]);
// Extended characters.
-assertBoth( 0, -1, '\u0080D\u20ac', '\u0080d\u20ac' );
-assertBoth( 1, 1, 'ó', 'Ó' ); // Not treated as equal currently.
+assertBoth(0, -1, '\u0080D\u20ac', '\u0080d\u20ac');
+assertBoth(1, 1, 'ó', 'Ó'); // Not treated as equal currently.
// String from field path.
t.drop();
-t.save( { x:'abc' } );
-assertBoth( 0, 1, '$x', 'ABC' );
+t.save({x: 'abc'});
+assertBoth(0, 1, '$x', 'ABC');
diff --git a/jstests/aggregation/bugs/substr.js b/jstests/aggregation/bugs/substr.js
index 0d4eb72e691..4aee531dd67 100644
--- a/jstests/aggregation/bugs/substr.js
+++ b/jstests/aggregation/bugs/substr.js
@@ -3,115 +3,118 @@
t = db.jstests_aggregation_substr;
t.drop();
-t.save( {} );
+t.save({});
-function assertSubstring( expected, str, offset, len ) {
- assert.eq( expected,
- t.aggregate( { $project:{ a:{ $substr:[ str, offset, len ] } } } ).toArray()[ 0 ].a );
+function assertSubstring(expected, str, offset, len) {
+ assert.eq(expected, t.aggregate({$project: {a: {$substr: [str, offset, len]}}}).toArray()[0].a);
}
-function assertArgsException( args ) {
+function assertArgsException(args) {
assert.commandFailed(t.runCommand('aggregate', {pipeline: [{$substr: args}]}));
}
-function assertException( str, offset, len ) {
+function assertException(str, offset, len) {
assertArgsException([str, offset, len]);
}
// Wrong number of arguments.
-assertArgsException( [] );
-assertArgsException( [ 'foo' ] );
-assertArgsException( [ 'foo', 1 ] );
-assertArgsException( [ 'foo', 1, 1, 1 ] );
+assertArgsException([]);
+assertArgsException(['foo']);
+assertArgsException(['foo', 1]);
+assertArgsException(['foo', 1, 1, 1]);
// Basic offset / length checks.
-assertSubstring( 'abcd', 'abcd', 0, 4 );
-assertSubstring( 'abcd', 'abcd', 0, 5 );
-assertSubstring( '', 'abcd', -1 /* unsigned */, 4 );
-assertSubstring( 'a', 'abcd', 0, 1 );
-assertSubstring( 'ab', 'abcd', 0, 2 );
-assertSubstring( 'b', 'abcd', 1, 1 );
-assertSubstring( 'd', 'abcd', 3, 1 );
-assertSubstring( '', 'abcd', 4, 1 );
-assertSubstring( '', 'abcd', 3, 0 );
-assertSubstring( 'cd', 'abcd', 2, -1 /* unsigned */ );
+assertSubstring('abcd', 'abcd', 0, 4);
+assertSubstring('abcd', 'abcd', 0, 5);
+assertSubstring('', 'abcd', -1 /* unsigned */, 4);
+assertSubstring('a', 'abcd', 0, 1);
+assertSubstring('ab', 'abcd', 0, 2);
+assertSubstring('b', 'abcd', 1, 1);
+assertSubstring('d', 'abcd', 3, 1);
+assertSubstring('', 'abcd', 4, 1);
+assertSubstring('', 'abcd', 3, 0);
+assertSubstring('cd', 'abcd', 2, -1 /* unsigned */);
// See server6186.js for additional offset / length checks.
// Additional numeric types for offset / length.
-assertSubstring( 'bc', 'abcd', 1, 2 );
-assertSubstring( 'bc', 'abcd', 1.0, 2.0 );
-assertSubstring( 'bc', 'abcd', NumberInt( 1 ), NumberInt( 2 ) );
-assertSubstring( 'bc', 'abcd', NumberLong( 1 ), NumberLong( 2 ) );
-assertSubstring( 'bc', 'abcd', NumberInt( 1 ), NumberLong( 2 ) );
-assertSubstring( 'bc', 'abcd', NumberLong( 1 ), NumberInt( 2 ) );
+assertSubstring('bc', 'abcd', 1, 2);
+assertSubstring('bc', 'abcd', 1.0, 2.0);
+assertSubstring('bc', 'abcd', NumberInt(1), NumberInt(2));
+assertSubstring('bc', 'abcd', NumberLong(1), NumberLong(2));
+assertSubstring('bc', 'abcd', NumberInt(1), NumberLong(2));
+assertSubstring('bc', 'abcd', NumberLong(1), NumberInt(2));
// Integer component is used.
-assertSubstring( 'bc', 'abcd', 1.2, 2.2 );
-assertSubstring( 'bc', 'abcd', 1.9, 2.9 );
+assertSubstring('bc', 'abcd', 1.2, 2.2);
+assertSubstring('bc', 'abcd', 1.9, 2.9);
// Non numeric types for offset / length.
-assertException( 'abcd', false, 2 );
-assertException( 'abcd', 1, true );
-assertException( 'abcd', 'q', 2 );
-assertException( 'abcd', 1, 'r' );
-assertException( 'abcd', null, 3 );
-assertException( 'abcd', 1, undefined );
+assertException('abcd', false, 2);
+assertException('abcd', 1, true);
+assertException('abcd', 'q', 2);
+assertException('abcd', 1, 'r');
+assertException('abcd', null, 3);
+assertException('abcd', 1, undefined);
// String coercion.
-assertSubstring( '123', 123, 0, 3 );
-assertSubstring( '2', 123, 1, 1 );
-assertSubstring( '1970', new Date( 0 ), 0, 4 );
-assertSubstring( '', null, 0, 4 );
-assertException( /abc/, 0, 4 );
+assertSubstring('123', 123, 0, 3);
+assertSubstring('2', 123, 1, 1);
+assertSubstring('1970', new Date(0), 0, 4);
+assertSubstring('', null, 0, 4);
+assertException(/abc/, 0, 4);
// Field path like string.
-assertSubstring( '$a', 'a$a', 1, 2 );
+assertSubstring('$a', 'a$a', 1, 2);
// Multi byte utf-8.
-assertSubstring( '\u0080', '\u0080', 0, 2 );
+assertSubstring('\u0080', '\u0080', 0, 2);
-assertException( '\u0080', 0, 1 );
-assertException( '\u0080', 1, 1 );
+assertException('\u0080', 0, 1);
+assertException('\u0080', 1, 1);
-assertSubstring( '\u0080', '\u0080\u20ac', 0, 2 );
-assertSubstring( '\u20ac', '\u0080\u20ac', 2, 3 );
+assertSubstring('\u0080', '\u0080\u20ac', 0, 2);
+assertSubstring('\u20ac', '\u0080\u20ac', 2, 3);
-assertException( '\u0080\u20ac', 1, 3 );
-assertException( '\u0080\u20ac', 1, 4 );
-assertException( '\u0080\u20ac', 0, 3 );
+assertException('\u0080\u20ac', 1, 3);
+assertException('\u0080\u20ac', 1, 4);
+assertException('\u0080\u20ac', 0, 3);
-assertSubstring( '\u0044\u20ac', '\u0080\u0044\u20ac', 2, 4 );
-assertSubstring( '\u0044', '\u0080\u0044\u20ac', 2, 1 );
+assertSubstring('\u0044\u20ac', '\u0080\u0044\u20ac', 2, 4);
+assertSubstring('\u0044', '\u0080\u0044\u20ac', 2, 1);
// The four byte utf-8 character 𝌆 (have to represent in surrogate halves).
-assertSubstring( '\uD834\uDF06', '\uD834\uDF06', 0, 4 );
+assertSubstring('\uD834\uDF06', '\uD834\uDF06', 0, 4);
-assertException( '\uD834\uDF06', '\uD834\uDF06', 1, 4 );
-assertException( '\uD834\uDF06', '\uD834\uDF06', 0, 3 );
+assertException('\uD834\uDF06', '\uD834\uDF06', 1, 4);
+assertException('\uD834\uDF06', '\uD834\uDF06', 0, 3);
// Operands from document.
t.drop();
-t.save( { x:'a', y:'abc', z:'abcde', a:0, b:1, c:2, d:3, e:4, f:5 } );
-assertSubstring( 'a', '$x', '$a', '$b' );
-assertSubstring( 'a', '$x', '$a', '$f' );
-assertSubstring( 'b', '$y', '$b', '$b' );
-assertSubstring( 'b', '$z', '$b', '$b' );
-assertSubstring( 'bcd', '$z', '$b', '$d' );
-assertSubstring( 'cde', '$z', '$c', '$f' );
-assertSubstring( 'c', '$y', '$c', '$f' );
+t.save({x: 'a', y: 'abc', z: 'abcde', a: 0, b: 1, c: 2, d: 3, e: 4, f: 5});
+assertSubstring('a', '$x', '$a', '$b');
+assertSubstring('a', '$x', '$a', '$f');
+assertSubstring('b', '$y', '$b', '$b');
+assertSubstring('b', '$z', '$b', '$b');
+assertSubstring('bcd', '$z', '$b', '$d');
+assertSubstring('cde', '$z', '$c', '$f');
+assertSubstring('c', '$y', '$c', '$f');
// Computed operands.
-assertSubstring( 'cde', '$z', { $add:[ '$b', '$b' ] }, { $add:[ '$c', '$d' ] } );
-assertSubstring( 'cde', '$z', { $add:[ '$b', 1 ] }, { $add:[ 2, '$d' ] } );
+assertSubstring('cde', '$z', {$add: ['$b', '$b']}, {$add: ['$c', '$d']});
+assertSubstring('cde', '$z', {$add: ['$b', 1]}, {$add: [2, '$d']});
// Nested.
-assert.eq( 'e',
- t.aggregate( { $project:{ a:
- { $substr:
- [ { $substr:
- [ { $substr:
- [ { $substr:[ 'abcdefghij', 1, 6 ]
- }, 2, 5 ]
- }, 0, 3 ]
- }, 1, 1 ]
- } } } ).toArray()[ 0 ].a );
+assert.eq('e',
+ t.aggregate({
+ $project: {
+ a: {
+ $substr: [
+ {$substr: [{$substr: [{$substr: ['abcdefghij', 1, 6]}, 2, 5]}, 0, 3]},
+ 1,
+ 1
+ ]
+ }
+ }
+ })
+ .toArray()[0]
+ .a);
diff --git a/jstests/aggregation/bugs/upperlower.js b/jstests/aggregation/bugs/upperlower.js
index a97c2ec4aa3..60bcba8db20 100644
--- a/jstests/aggregation/bugs/upperlower.js
+++ b/jstests/aggregation/bugs/upperlower.js
@@ -3,63 +3,62 @@
t = db.jstests_aggregation_upperlower;
t.drop();
-t.save( {} );
+t.save({});
-function assertResult( expectedUpper, expectedLower, string ) {
- result = t.aggregate( { $project:{ upper:{ $toUpper:string },
- lower:{ $toLower:string } } } ).toArray()[ 0 ];
- assert.eq( expectedUpper, result.upper );
- assert.eq( expectedLower, result.lower );
+function assertResult(expectedUpper, expectedLower, string) {
+ result = t.aggregate({$project: {upper: {$toUpper: string}, lower: {$toLower: string}}})
+ .toArray()[0];
+ assert.eq(expectedUpper, result.upper);
+ assert.eq(expectedLower, result.lower);
}
-function assertException( string ) {
- assert.commandFailed(t.runCommand('aggregate',
- {pipeline: [{$project: {upper: {$toUpper: string}}}]}));
- assert.commandFailed(t.runCommand('aggregate',
- {pipeline: [{$project: {lower: {$toLower: string}}}]}));
+function assertException(string) {
+ assert.commandFailed(
+ t.runCommand('aggregate', {pipeline: [{$project: {upper: {$toUpper: string}}}]}));
+ assert.commandFailed(
+ t.runCommand('aggregate', {pipeline: [{$project: {lower: {$toLower: string}}}]}));
}
// Wrong number of arguments.
-assertException( [] );
-assertException( [ 'a', 'b' ] );
+assertException([]);
+assertException(['a', 'b']);
// Upper and lower case conversion.
-assertResult( '', '', '' );
-assertResult( '', '', [ '' ] );
-assertResult( 'AB', 'ab', 'aB' );
-assertResult( 'AB', 'ab', [ 'Ab' ] );
-assertResult( 'ABZ', 'abz', 'aBz' );
+assertResult('', '', '');
+assertResult('', '', ['']);
+assertResult('AB', 'ab', 'aB');
+assertResult('AB', 'ab', ['Ab']);
+assertResult('ABZ', 'abz', 'aBz');
// With non alphabet characters.
-assertResult( '1', '1', '1' );
-assertResult( '1^A-A_$%.', '1^a-a_$%.', '1^a-A_$%.' );
-assertResult( '1290B', '1290b', '1290b' );
-assertResult( '0XFF0B', '0xff0b', '0XfF0b' );
+assertResult('1', '1', '1');
+assertResult('1^A-A_$%.', '1^a-a_$%.', '1^a-A_$%.');
+assertResult('1290B', '1290b', '1290b');
+assertResult('0XFF0B', '0xff0b', '0XfF0b');
// Type coercion.
-assertResult( '555.5', '555.5', 555.5 );
-assertResult( '1970-01-01T00:00:00', '1970-01-01t00:00:00', new Date( 0 ) );
-assertResult( '', '', null );
-assertException( /abc/ );
+assertResult('555.5', '555.5', 555.5);
+assertResult('1970-01-01T00:00:00', '1970-01-01t00:00:00', new Date(0));
+assertResult('', '', null);
+assertException(/abc/);
// Nested.
spec = 'aBcDeFg';
-for( i = 0; i < 10; ++i ) {
- assertResult( 'ABCDEFG', 'abcdefg', spec );
- if ( i % 2 == 0 ) {
- spec = [ { $toUpper:spec } ];
- }
- else {
- spec = [ { $toLower:spec } ];
+for (i = 0; i < 10; ++i) {
+ assertResult('ABCDEFG', 'abcdefg', spec);
+ if (i % 2 == 0) {
+ spec = [{$toUpper: spec}];
+ } else {
+ spec = [{$toLower: spec}];
}
}
// Utf8.
-assertResult( '\u0080D\u20ac', '\u0080d\u20ac', '\u0080\u0044\u20ac' );
-assertResult( 'ó', 'ó', 'ó' ); // Not handled.
-assertResult( 'Ó', 'Ó', 'Ó' ); // Not handled.
+assertResult('\u0080D\u20ac', '\u0080d\u20ac', '\u0080\u0044\u20ac');
+assertResult('ó', 'ó', 'ó'); // Not handled.
+assertResult('Ó', 'Ó', 'Ó'); // Not handled.
// Value from field path.
t.drop();
-t.save( { string:'-_aB' } );
-assertResult( '-_AB', '-_ab', '$string' );
+t.save({string: '-_aB'});
+assertResult('-_AB', '-_ab', '$string');
diff --git a/jstests/aggregation/data/articles.js b/jstests/aggregation/data/articles.js
index 23c78332a77..0f55563f7be 100644
--- a/jstests/aggregation/data/articles.js
+++ b/jstests/aggregation/data/articles.js
@@ -4,44 +4,41 @@
db = db.getSiblingDB("aggdb");
db.article.drop();
-db.article.save( {
- _id : 1,
- title : "this is my title" ,
- author : "bob" ,
- posted : new Date(1079895594000) ,
- pageViews : 5 ,
- tags : [ "fun" , "good" , "fun" ] ,
- comments : [
- { author :"joe" , text : "this is cool" } ,
- { author :"sam" , text : "this is bad" }
- ],
- other : { foo : 5 }
+db.article.save({
+ _id: 1,
+ title: "this is my title",
+ author: "bob",
+ posted: new Date(1079895594000),
+ pageViews: 5,
+ tags: ["fun", "good", "fun"],
+ comments: [{author: "joe", text: "this is cool"}, {author: "sam", text: "this is bad"}],
+ other: {foo: 5}
});
-db.article.save( {
- _id : 2,
- title : "this is your title" ,
- author : "dave" ,
- posted : new Date(1912392670000) ,
- pageViews : 7 ,
- tags : [ "fun" , "nasty" ] ,
- comments : [
- { author :"barbara" , text : "this is interesting" } ,
- { author :"jenny" , text : "i like to play pinball", votes: 10 }
+db.article.save({
+ _id: 2,
+ title: "this is your title",
+ author: "dave",
+ posted: new Date(1912392670000),
+ pageViews: 7,
+ tags: ["fun", "nasty"],
+ comments: [
+ {author: "barbara", text: "this is interesting"},
+ {author: "jenny", text: "i like to play pinball", votes: 10}
],
- other : { bar : 14 }
+ other: {bar: 14}
});
-db.article.save( {
- _id : 3,
- title : "this is some other title" ,
- author : "jane" ,
- posted : new Date(978239834000) ,
- pageViews : 6 ,
- tags : [ "nasty" , "filthy" ] ,
- comments : [
- { author :"will" , text : "i don't like the color" } ,
- { author :"jenny" , text : "can i get that in green?" }
+db.article.save({
+ _id: 3,
+ title: "this is some other title",
+ author: "jane",
+ posted: new Date(978239834000),
+ pageViews: 6,
+ tags: ["nasty", "filthy"],
+ comments: [
+ {author: "will", text: "i don't like the color"},
+ {author: "jenny", text: "can i get that in green?"}
],
- other : { bar : 14 }
+ other: {bar: 14}
});
diff --git a/jstests/aggregation/disabled/server5369.js b/jstests/aggregation/disabled/server5369.js
index 754917ff8b4..6d8e030d717 100644
--- a/jstests/aggregation/disabled/server5369.js
+++ b/jstests/aggregation/disabled/server5369.js
@@ -6,8 +6,8 @@ db = db.getSiblingDB('aggdb');
// empty and populate
db.test.drop();
-db.test.save({a:1,b:2});
+db.test.save({a: 1, b: 2});
// agg with exclusion than ensure fields are only the two we expect
-var f = db.test.aggregate({$project:{a:0}});
-assert.eq(["_id","b"], Object.keySet(f.toArray()[0]), "server5369 failed");
+var f = db.test.aggregate({$project: {a: 0}});
+assert.eq(["_id", "b"], Object.keySet(f.toArray()[0]), "server5369 failed");
diff --git a/jstests/aggregation/extras/debug.js b/jstests/aggregation/extras/debug.js
index 24402c1ca69..f2a101976de 100644
--- a/jstests/aggregation/extras/debug.js
+++ b/jstests/aggregation/extras/debug.js
@@ -1,5 +1,6 @@
function assert(b, m) {
- if (!b) alert(m);
+ if (!b)
+ alert(m);
}
function ObjectId(id) {
@@ -7,28 +8,7 @@ function ObjectId(id) {
}
var t1result = [
- {
- "_id" : ObjectId("4dc07fedd8420ab8d0d4066d"),
- "pageViews" : 5,
- "tags" : [
- "fun",
- "good"
- ]
- },
- {
- "_id" : ObjectId("4dc07fedd8420ab8d0d4066e"),
- "pageViews" : 7,
- "tags" : [
- "fun",
- "nasty"
- ]
- },
- {
- "_id" : ObjectId("4dc07fedd8420ab8d0d4066f"),
- "pageViews" : 6,
- "tags" : [
- "nasty",
- "filthy"
- ]
- }
+ {"_id": ObjectId("4dc07fedd8420ab8d0d4066d"), "pageViews": 5, "tags": ["fun", "good"]},
+ {"_id": ObjectId("4dc07fedd8420ab8d0d4066e"), "pageViews": 7, "tags": ["fun", "nasty"]},
+ {"_id": ObjectId("4dc07fedd8420ab8d0d4066f"), "pageViews": 6, "tags": ["nasty", "filthy"]}
];
diff --git a/jstests/aggregation/extras/limitskip.js b/jstests/aggregation/extras/limitskip.js
index 9d04d595a3d..3644282b3c8 100644
--- a/jstests/aggregation/extras/limitskip.js
+++ b/jstests/aggregation/extras/limitskip.js
@@ -2,77 +2,63 @@
var coll = "numbers";
db[coll].drop();
-for (i=0; i<100; i++) {
- db[coll].save({_id : i, mod : [i%2, i%3, i%5]});
+for (i = 0; i < 100; i++) {
+ db[coll].save({_id: i, mod: [i % 2, i % 3, i % 5]});
}
print("-----LIMIT-----");
print("normal limit");
-var doc = db.runCommand({ aggregate : coll, pipeline : [{ $limit : 2}]});
+var doc = db.runCommand({aggregate: coll, pipeline: [{$limit: 2}]});
assert.eq(doc.result.length, 2, tojson(doc));
print("limit larger than result size");
-doc = db.runCommand({ aggregate : coll, pipeline : [{ $limit : 200}]});
+doc = db.runCommand({aggregate: coll, pipeline: [{$limit: 200}]});
assert.eq(doc.result.length, 100, tojson(doc));
-
print("limit on sort");
-doc = db.runCommand({ aggregate : coll, pipeline : [{$sort : {_id : -1}}, {$limit : 3}]});
+doc = db.runCommand({aggregate: coll, pipeline: [{$sort: {_id: -1}}, {$limit: 3}]});
r = doc.result;
assert.eq(doc.result.length, 3);
-for (var i=0; i<r; i++) {
+for (var i = 0; i < r; i++) {
assert.eq(100 - r[i]._id, i, tojson(doc));
}
-print("TODO: invalid limit"); // once assert has been replaced with uassert
-
+print("TODO: invalid limit"); // once assert has been replaced with uassert
print("-----SKIP------");
print("normal skip");
-doc = db.runCommand({ aggregate : coll, pipeline : [{ $skip : 95}]});
+doc = db.runCommand({aggregate: coll, pipeline: [{$skip: 95}]});
assert.eq(doc.result.length, 5, tojson(doc));
print("skip larger than result size");
-doc = db.runCommand({ aggregate : coll, pipeline : [{ $skip : 102}]});
+doc = db.runCommand({aggregate: coll, pipeline: [{$skip: 102}]});
assert.eq(doc.result.length, 0, tojson(doc));
-
print("check skip results");
-doc = db.runCommand({ aggregate : coll, pipeline : [{ $sort : {_id : 1}}, {$skip : 6}, {$limit : 3}]});
+doc = db.runCommand({aggregate: coll, pipeline: [{$sort: {_id: 1}}, {$skip: 6}, {$limit: 3}]});
assert.eq(doc.result.length, 3, tojson(doc));
-for (var i=0; i<3; i++) {
- assert.eq(i+6, doc.result[i]._id, tojson(doc));
+for (var i = 0; i < 3; i++) {
+ assert.eq(i + 6, doc.result[i]._id, tojson(doc));
}
-print("TODO: invalid skip"); // once assert has been replaced with uassert
-
+print("TODO: invalid skip"); // once assert has been replaced with uassert
print("on virtual collection");
-doc = db.runCommand({ aggregate : coll, pipeline : [
- {
- $unwind : "$mod"
- },
- {
- $project : { m : "$mod" }
- },
- {
- $sort : {
- m : 1,
- _id : -1
- }
- },
- {
- $skip : 150
- },
- {
- $limit : 5
- }
-]});
+doc = db.runCommand({
+ aggregate: coll,
+ pipeline: [
+ {$unwind: "$mod"},
+ {$project: {m: "$mod"}},
+ {$sort: {m: 1, _id: -1}},
+ {$skip: 150},
+ {$limit: 5}
+ ]
+});
assert.eq(doc.result.length, 5);
-for (var i=0; i<5; i++) {
+for (var i = 0; i < 5; i++) {
assert.eq(1, doc.result[i].m, tojson(doc));
}
assert.eq(doc.result[0]._id, 55, tojson(doc));
@@ -81,15 +67,13 @@ assert.eq(doc.result[2]._id, 52, tojson(doc));
assert.eq(doc.result[3]._id, 51, tojson(doc));
assert.eq(doc.result[4]._id, 51, tojson(doc));
-
print("size 0 collection");
db[coll].drop();
-doc = db.runCommand({ aggregate : coll, pipeline : [{$skip : 6}]});
+doc = db.runCommand({aggregate: coll, pipeline: [{$skip: 6}]});
assert.eq(doc.ok, 1);
assert.eq(doc.result.length, 0);
-doc = db.runCommand({ aggregate : coll, pipeline : [{$limit : 3}]});
+doc = db.runCommand({aggregate: coll, pipeline: [{$limit: 3}]});
assert.eq(doc.ok, 1);
assert.eq(doc.result.length, 0);
-
diff --git a/jstests/aggregation/extras/mrabench.js b/jstests/aggregation/extras/mrabench.js
index ca0a6ade0fe..fe731ecd28b 100644
--- a/jstests/aggregation/extras/mrabench.js
+++ b/jstests/aggregation/extras/mrabench.js
@@ -4,15 +4,18 @@
One way to do this is to dump and restore it using mongodump and mongorestore
*/
-db = db.getSiblingDB( "mongousage" );
+db = db.getSiblingDB("mongousage");
function rollupMap() {
- emit( this._id.t , { total : this.value , unique : 1 } );
+ emit(this._id.t, {total: this.value, unique: 1});
}
function rollupReduce(key, values) {
- var res = { total : 0 , unique : 0 };
- for ( var i=0; i<values.length; i++ ){
+ var res = {
+ total: 0,
+ unique: 0
+ };
+ for (var i = 0; i < values.length; i++) {
res.total += values[i].total;
res.unique += values[i].unique;
}
@@ -20,42 +23,33 @@ function rollupReduce(key, values) {
}
function mrrollups() {
+ res = db.gen.monthly.ip.mapReduce(rollupMap, rollupReduce, {out: "gen.monthly"});
+ res.find().sort({_id: -1}).forEach(printjsononeline);
- res = db.gen.monthly.ip.mapReduce( rollupMap , rollupReduce ,
- { out : "gen.monthly" } );
- res.find().sort( { _id : -1 } ).forEach( printjsononeline );
-
- res = db.gen.weekly.ip.mapReduce( rollupMap , rollupReduce ,
- { out : "gen.weekly" } );
- res.find().sort( { _id : -1 } ).forEach( printjsononeline );
+ res = db.gen.weekly.ip.mapReduce(rollupMap, rollupReduce, {out: "gen.weekly"});
+ res.find().sort({_id: -1}).forEach(printjsononeline);
}
function rollupMonthlyMR() {
- resMonthlyMR = db.gen.monthly.ip.mapReduce( rollupMap , rollupReduce ,
- { out: { inline : 1 }} );
+ resMonthlyMR = db.gen.monthly.ip.mapReduce(rollupMap, rollupReduce, {out: {inline: 1}});
}
function rollupWeeklyMR() {
- resWeeklyMR = db.gen.weekly.ip.mapReduce( rollupMap , rollupReduce ,
- { out : {inline : 1 }} );
+ resWeeklyMR = db.gen.weekly.ip.mapReduce(rollupMap, rollupReduce, {out: {inline: 1}});
}
function rollupMonthlyA() {
- resMonthlyA = db.runCommand( { aggregate: "gen.monthly.ip", pipeline : [
- { $group : {
- _id : { month: "_id.t" },
- total : { $sum : "$value" },
- unique : { $sum : 1 }
- }}
- ]});
+ resMonthlyA = db.runCommand({
+ aggregate: "gen.monthly.ip",
+ pipeline:
+ [{$group: {_id: {month: "_id.t"}, total: {$sum: "$value"}, unique: {$sum: 1}}}]
+ });
}
function rollupWeeklyA() {
- resWeeklyA = db.runCommand( { aggregate: "gen.weekly.ip", pipeline : [
- { $group : {
- _id : { month: "_id.t" },
- total : { $sum : "$value" },
- unique : { $sum : 1 }
- }}
- ]});
+ resWeeklyA = db.runCommand({
+ aggregate: "gen.weekly.ip",
+ pipeline:
+ [{$group: {_id: {month: "_id.t"}, total: {$sum: "$value"}, unique: {$sum: 1}}}]
+ });
}
diff --git a/jstests/aggregation/extras/testutils.js b/jstests/aggregation/extras/testutils.js
index 83f7571cc21..bb753921906 100644
--- a/jstests/aggregation/extras/testutils.js
+++ b/jstests/aggregation/extras/testutils.js
@@ -4,137 +4,44 @@
var verbose = false;
var t1result = [
- {
- "_id" : ObjectId("4dc07fedd8420ab8d0d4066d"),
- "pageViews" : 5,
- "tags" : [
- "fun",
- "good"
- ]
- },
- {
- "_id" : ObjectId("4dc07fedd8420ab8d0d4066e"),
- "pageViews" : 7,
- "tags" : [
- "fun",
- "nasty"
- ]
- },
- {
- "_id" : ObjectId("4dc07fedd8420ab8d0d4066f"),
- "pageViews" : 6,
- "tags" : [
- "nasty",
- "filthy"
- ]
- }
+ {"_id": ObjectId("4dc07fedd8420ab8d0d4066d"), "pageViews": 5, "tags": ["fun", "good"]},
+ {"_id": ObjectId("4dc07fedd8420ab8d0d4066e"), "pageViews": 7, "tags": ["fun", "nasty"]},
+ {"_id": ObjectId("4dc07fedd8420ab8d0d4066f"), "pageViews": 6, "tags": ["nasty", "filthy"]}
];
assert(arrayEq(t1result, t1result, verbose), 't0a failed');
assert(resultsEq(t1result, t1result, verbose), 't0b failed');
var t1resultr = [
- {
- "_id" : ObjectId("4dc07fedd8420ab8d0d4066d"),
- "pageViews" : 5,
- "tags" : [
- "fun",
- "good"
- ]
- },
- {
- "_id" : ObjectId("4dc07fedd8420ab8d0d4066f"),
- "pageViews" : 6,
- "tags" : [
- "nasty",
- "filthy"
- ]
- },
- {
- "_id" : ObjectId("4dc07fedd8420ab8d0d4066e"),
- "pageViews" : 7,
- "tags" : [
- "fun",
- "nasty"
- ]
- },
+ {"_id": ObjectId("4dc07fedd8420ab8d0d4066d"), "pageViews": 5, "tags": ["fun", "good"]},
+ {"_id": ObjectId("4dc07fedd8420ab8d0d4066f"), "pageViews": 6, "tags": ["nasty", "filthy"]},
+ {"_id": ObjectId("4dc07fedd8420ab8d0d4066e"), "pageViews": 7, "tags": ["fun", "nasty"]},
];
assert(resultsEq(t1resultr, t1result, verbose), 'tr1 failed');
assert(resultsEq(t1result, t1resultr, verbose), 'tr2 failed');
var t1resultf1 = [
- {
- "_id" : ObjectId("4dc07fedd8420ab8d0d4066e"),
- "pageViews" : 7,
- "tags" : [
- "fun",
- "nasty"
- ]
- },
- {
- "_id" : ObjectId("4dc07fedd8420ab8d0d4066f"),
- "pageViews" : 6,
- "tags" : [
- "nasty",
- "filthy"
- ]
- }
+ {"_id": ObjectId("4dc07fedd8420ab8d0d4066e"), "pageViews": 7, "tags": ["fun", "nasty"]},
+ {"_id": ObjectId("4dc07fedd8420ab8d0d4066f"), "pageViews": 6, "tags": ["nasty", "filthy"]}
];
assert(!resultsEq(t1result, t1resultf1, verbose), 't1a failed');
assert(!resultsEq(t1resultf1, t1result, verbose), 't1b failed');
var t1resultf2 = [
- {
- "pageViews" : 5,
- "tags" : [
- "fun",
- "good"
- ]
- },
- {
- "pageViews" : 7,
- "tags" : [
- "fun",
- "nasty"
- ]
- },
- {
- "pageViews" : 6,
- "tags" : [
- "nasty",
- "filthy"
- ]
- }
+ {"pageViews": 5, "tags": ["fun", "good"]},
+ {"pageViews": 7, "tags": ["fun", "nasty"]},
+ {"pageViews": 6, "tags": ["nasty", "filthy"]}
];
assert(!resultsEq(t1result, t1resultf2, verbose), 't2a failed');
assert(!resultsEq(t1resultf2, t1result, verbose), 't2b failed');
var t1resultf3 = [
- {
- "_id" : ObjectId("4dc07fedd8420ab8d0d4066d"),
- "pageViews" : 5,
- "tags" : [
- "fun",
- ]
- },
- {
- "_id" : ObjectId("4dc07fedd8420ab8d0d4066e"),
- "pageViews" : 7,
- "tags" : [
- "fun",
- "nasty"
- ]
- },
- {
- "_id" : ObjectId("4dc07fedd8420ab8d0d4066f"),
- "pageViews" : 6,
- "tags" : [
- "filthy"
- ]
- }
+ {"_id": ObjectId("4dc07fedd8420ab8d0d4066d"), "pageViews": 5, "tags": ["fun", ]},
+ {"_id": ObjectId("4dc07fedd8420ab8d0d4066e"), "pageViews": 7, "tags": ["fun", "nasty"]},
+ {"_id": ObjectId("4dc07fedd8420ab8d0d4066f"), "pageViews": 6, "tags": ["filthy"]}
];
assert(!resultsEq(t1result, t1resultf3, verbose), 't3a failed');
diff --git a/jstests/aggregation/extras/utils.js b/jstests/aggregation/extras/utils.js
index 60081dd56b9..ab91e71b8be 100644
--- a/jstests/aggregation/extras/utils.js
+++ b/jstests/aggregation/extras/utils.js
@@ -4,37 +4,36 @@
function anyEq(al, ar, v) {
if (typeof(v) == 'undefined')
- v = false;
+ v = false;
if (al instanceof Array) {
- if (!(ar instanceof Array)) {
- if (v)
- print('anyEq: ar is not an array ' + ar);
- return false;
- }
-
- if (!arrayEq(al, ar, v)) {
- if (v)
- print('anyEq: arrayEq(al, ar): false; al=' + al + ' ar=' + ar);
- return false;
- }
+ if (!(ar instanceof Array)) {
+ if (v)
+ print('anyEq: ar is not an array ' + ar);
+ return false;
+ }
+
+ if (!arrayEq(al, ar, v)) {
+ if (v)
+ print('anyEq: arrayEq(al, ar): false; al=' + al + ' ar=' + ar);
+ return false;
+ }
} else if (al instanceof Object) {
- if (!(ar instanceof Object)) {
- if (v)
- print('anyEq: ar is not an object ' + ar);
- return false;
- }
-
- if (!documentEq(al, ar, v)) {
- if (v)
- print('anyEq: documentEq(al, ar): false; al=' + al + ' ar=' + ar);
- return false;
- }
- }
- else if (al != ar) {
- if (v)
- print('anyEq: (al != ar): false; al=' + al + ' ar=' + ar);
- return false;
+ if (!(ar instanceof Object)) {
+ if (v)
+ print('anyEq: ar is not an object ' + ar);
+ return false;
+ }
+
+ if (!documentEq(al, ar, v)) {
+ if (v)
+ print('anyEq: documentEq(al, ar): false; al=' + al + ' ar=' + ar);
+ return false;
+ }
+ } else if (al != ar) {
+ if (v)
+ print('anyEq: (al != ar): false; al=' + al + ' ar=' + ar);
+ return false;
}
/* if we got here, they matched */
@@ -48,54 +47,53 @@ function anyEq(al, ar, v) {
*/
function documentEq(dl, dr, v) {
if (typeof(v) == 'undefined')
- v = false;
+ v = false;
/* make sure these are both objects */
if (!(dl instanceof Object)) {
- if (v)
- print('documentEq: dl is not an object ' + dl);
- return false;
+ if (v)
+ print('documentEq: dl is not an object ' + dl);
+ return false;
}
if (!(dr instanceof Object)) {
- if (v)
- print('documentEq: dr is not an object ' + dr);
- return false;
+ if (v)
+ print('documentEq: dr is not an object ' + dr);
+ return false;
}
/* start by checking for all of dl's properties in dr */
- for(var propertyName in dl) {
- /* skip inherited properties */
- if (!dl.hasOwnProperty(propertyName))
- continue;
-
- /* the documents aren't equal if they don't both have the property */
- if (!dr.hasOwnProperty(propertyName)) {
- if (v)
- print('documentEq: dr doesn\'t have property ' + propertyName);
- return false;
- }
-
- /* if the property is the _id, they don't have to be equal */
- if (propertyName == '_id')
- continue;
-
- if (!anyEq(dl[propertyName], dr[propertyName], v)) {
- return false;
- }
+ for (var propertyName in dl) {
+ /* skip inherited properties */
+ if (!dl.hasOwnProperty(propertyName))
+ continue;
+
+ /* the documents aren't equal if they don't both have the property */
+ if (!dr.hasOwnProperty(propertyName)) {
+ if (v)
+ print('documentEq: dr doesn\'t have property ' + propertyName);
+ return false;
+ }
+
+ /* if the property is the _id, they don't have to be equal */
+ if (propertyName == '_id')
+ continue;
+
+ if (!anyEq(dl[propertyName], dr[propertyName], v)) {
+ return false;
+ }
}
/* now make sure that dr doesn't have any extras that dl doesn't have */
- for(var propertyName in dr) {
- if (!dr.hasOwnProperty(propertyName))
- continue;
-
- /* if dl doesn't have this complain; if it does, we compared it above */
- if (!dl.hasOwnProperty(propertyName)) {
- if (v)
- print('documentEq: dl is missing property ' +
- propertyName);
- return false;
- }
+ for (var propertyName in dr) {
+ if (!dr.hasOwnProperty(propertyName))
+ continue;
+
+ /* if dl doesn't have this complain; if it does, we compared it above */
+ if (!dl.hasOwnProperty(propertyName)) {
+ if (v)
+ print('documentEq: dl is missing property ' + propertyName);
+ return false;
+ }
}
/* if we got here, the two documents are an exact match */
@@ -104,35 +102,34 @@ function documentEq(dl, dr, v) {
function arrayEq(al, ar, v) {
if (typeof(v) == 'undefined')
- v = false;
+ v = false;
/* check that these are both arrays */
if (!(al instanceof Array)) {
- if (v)
- print('arrayEq: al is not an array: ' + al);
- return false;
+ if (v)
+ print('arrayEq: al is not an array: ' + al);
+ return false;
}
if (!(ar instanceof Array)) {
- if (v)
- print('arrayEq: ar is not an array: ' + ar);
- return false;
+ if (v)
+ print('arrayEq: ar is not an array: ' + ar);
+ return false;
}
if (al.length != ar.length) {
- if (v)
- print('arrayEq: array lengths do not match: ' + al +
- ', ' + ar);
- return false;
+ if (v)
+ print('arrayEq: array lengths do not match: ' + al + ', ' + ar);
+ return false;
}
var i = 0;
var j = 0;
- while ( i < al.length ) {
- if (anyEq(al[i], ar[j], v) ) {
+ while (i < al.length) {
+ if (anyEq(al[i], ar[j], v)) {
j = 0;
i++;
- } else if ( j < ar.length ) {
+ } else if (j < ar.length) {
j++;
} else {
return false;
@@ -150,8 +147,8 @@ function arrayShallowCopy(a) {
assert(a instanceof Array, 'arrayShallowCopy: argument is not an array');
var c = [];
- for(var i = 0; i < a.length; ++i)
- c.push(a[i]);
+ for (var i = 0; i < a.length; ++i)
+ c.push(a[i]);
return c;
}
@@ -165,46 +162,45 @@ function arrayShallowCopy(a) {
*/
function resultsEq(rl, rr, v) {
if (typeof(v) == 'undefined')
- v = false;
+ v = false;
/* make clones of the arguments so that we don't damage them */
rl = arrayShallowCopy(rl);
rr = arrayShallowCopy(rr);
if (rl.length != rr.length) {
- if (v)
- print('resultsEq: array lengths do not match ' +
- rl + ', ' + rr);
- return false;
+ if (v)
+ print('resultsEq: array lengths do not match ' + rl + ', ' + rr);
+ return false;
}
- for(var i = 0; i < rl.length; ++i) {
- var foundIt = false;
-
- /* find a match in the other array */
- for(var j = 0; j < rr.length; ++j) {
- if (!anyEq(rl[i], rr[j], v))
- continue;
-
- /*
- Because we made the copies above, we can edit these out of the
- arrays so we don't check on them anymore.
-
- For the inner loop, we're going to be skipping out, so we don't
- need to be too careful.
- */
- rr.splice(j, 1);
- foundIt = true;
- break;
- /* TODO */
- }
-
- if (!foundIt) {
- /* if we got here, we didn't find this item */
- if (v)
- print('resultsEq: search target missing index: ' + i);
- return false;
- }
+ for (var i = 0; i < rl.length; ++i) {
+ var foundIt = false;
+
+ /* find a match in the other array */
+ for (var j = 0; j < rr.length; ++j) {
+ if (!anyEq(rl[i], rr[j], v))
+ continue;
+
+ /*
+ Because we made the copies above, we can edit these out of the
+ arrays so we don't check on them anymore.
+
+ For the inner loop, we're going to be skipping out, so we don't
+ need to be too careful.
+ */
+ rr.splice(j, 1);
+ foundIt = true;
+ break;
+ /* TODO */
+ }
+
+ if (!foundIt) {
+ /* if we got here, we didn't find this item */
+ if (v)
+ print('resultsEq: search target missing index: ' + i);
+ return false;
+ }
}
/* if we got here, everything matched */
@@ -214,16 +210,15 @@ function resultsEq(rl, rr, v) {
function orderedArrayEq(al, ar, v) {
if (al.length != ar.length) {
- if (v)
- print('orderedArrayEq: array lengths do not match ' +
- al + ', ' + ar);
- return false;
+ if (v)
+ print('orderedArrayEq: array lengths do not match ' + al + ', ' + ar);
+ return false;
}
/* check the elements in the array */
- for(var i = 0; i < al.length; ++i) {
- if (!anyEq(al[i], ar[i], v))
- return false;
+ for (var i = 0; i < al.length; ++i) {
+ if (!anyEq(al[i], ar[i], v))
+ return false;
}
/* if we got here, everything matched */
@@ -236,7 +231,6 @@ function orderedArrayEq(al, ar, v) {
*/
function assertErrorCode(coll, pipe, code, errmsg) {
-
if (!Array.isArray(pipe)) {
pipe = [pipe];
}
@@ -251,21 +245,26 @@ function assertErrorCode(coll, pipe, code, errmsg) {
assert.eq(res.code, code);
// Test with cursors
- var cmd = {pipeline: pipe};
+ var cmd = {
+ pipeline: pipe
+ };
// cmd.cursor = {};
- cmd.cursor = {batchSize: 0};
+ cmd.cursor = {
+ batchSize: 0
+ };
var cursorRes = coll.runCommand("aggregate", cmd);
if (cursorRes.ok) {
- var followupBatchSize = 0; // default
+ var followupBatchSize = 0; // default
var cursor = new DBCommandCursor(coll.getMongo(), cursorRes, followupBatchSize);
- var error = assert.throws(function(){cursor.itcount();}, [], "expected error: " + code);
+ var error = assert.throws(function() {
+ cursor.itcount();
+ }, [], "expected error: " + code);
if (!error.message.search(code)) {
assert(false, "expected error: " + code + " got: " + error);
}
- }
- else {
+ } else {
assert.eq(cursorRes.code, code);
}
}
diff --git a/jstests/aggregation/mongos_slaveok.js b/jstests/aggregation/mongos_slaveok.js
index 5a1b7203d48..91a0533d59e 100644
--- a/jstests/aggregation/mongos_slaveok.js
+++ b/jstests/aggregation/mongos_slaveok.js
@@ -4,40 +4,43 @@
*/
(function() {
-var NODES = 2;
+ var NODES = 2;
-var doTest = function(st, doSharded) {
- var testDB = st.s.getDB('test');
+ var doTest = function(st, doSharded) {
+ var testDB = st.s.getDB('test');
- if (doSharded) {
- testDB.adminCommand({ enableSharding: 'test' });
- testDB.adminCommand({ shardCollection: 'test.user', key: { x: 1 }});
- }
+ if (doSharded) {
+ testDB.adminCommand({enableSharding: 'test'});
+ testDB.adminCommand({shardCollection: 'test.user', key: {x: 1}});
+ }
- testDB.user.insert({ x: 10 }, { writeConcern: { w: NODES }});
- testDB.setSlaveOk(true);
+ testDB.user.insert({x: 10}, {writeConcern: {w: NODES}});
+ testDB.setSlaveOk(true);
- var secNode = st.rs0.getSecondary();
- secNode.getDB('test').setProfilingLevel(2);
+ var secNode = st.rs0.getSecondary();
+ secNode.getDB('test').setProfilingLevel(2);
- // wait for mongos to recognize that the slave is up
- ReplSetTest.awaitRSClientHosts(st.s, secNode, {ok: true });
+ // wait for mongos to recognize that the slave is up
+ ReplSetTest.awaitRSClientHosts(st.s, secNode, {ok: true});
- var res = testDB.runCommand({ aggregate: 'user', pipeline: [{ $project: { x: 1 }}]});
- assert(res.ok, 'aggregate command failed: ' + tojson(res));
+ var res = testDB.runCommand({aggregate: 'user', pipeline: [{$project: {x: 1}}]});
+ assert(res.ok, 'aggregate command failed: ' + tojson(res));
- var profileQuery = { op: 'command', ns: 'test.user', 'command.aggregate': 'user' };
- var profileDoc = secNode.getDB('test').system.profile.findOne(profileQuery);
+ var profileQuery = {
+ op: 'command',
+ ns: 'test.user', 'command.aggregate': 'user'
+ };
+ var profileDoc = secNode.getDB('test').system.profile.findOne(profileQuery);
- assert(profileDoc != null);
- testDB.dropDatabase();
-};
+ assert(profileDoc != null);
+ testDB.dropDatabase();
+ };
-var st = new ShardingTest({ shards: { rs0: { oplogSize: 10, nodes: NODES } } });
+ var st = new ShardingTest({shards: {rs0: {oplogSize: 10, nodes: NODES}}});
-doTest(st, false);
-doTest(st, true);
+ doTest(st, false);
+ doTest(st, true);
-st.stop();
+ st.stop();
})();
diff --git a/jstests/aggregation/testSlave.js b/jstests/aggregation/testSlave.js
index 40cf1ed17a1..21a798a1ad2 100644
--- a/jstests/aggregation/testSlave.js
+++ b/jstests/aggregation/testSlave.js
@@ -1,5 +1,5 @@
// This test just make sure that aggregation is possible on a secondary node.
-var replTest = new ReplSetTest( {name: 'aggTestSlave', nodes: 2} );
+var replTest = new ReplSetTest({name: 'aggTestSlave', nodes: 2});
var nodes = replTest.startSet();
replTest.initiate();
replTest.awaitReplication();
@@ -7,12 +7,14 @@ replTest.awaitReplication();
var primary = replTest.getPrimary().getDB('test');
var secondary = replTest.getSecondary().getDB('test');
-var options = { writeConcern: { w: 2 }};
+var options = {
+ writeConcern: {w: 2}
+};
primary.agg.insert({}, options);
primary.agg.insert({}, options);
primary.agg.insert({}, options);
var res = secondary.agg.aggregate({$group: {_id: null, count: {$sum: 1}}});
-assert.eq(res.toArray(), [{_id:null, count: 3}]);
+assert.eq(res.toArray(), [{_id: null, count: 3}]);
replTest.stopSet();
diff --git a/jstests/aggregation/testall.js b/jstests/aggregation/testall.js
index 57a6b6147e6..2f17d955566 100644
--- a/jstests/aggregation/testall.js
+++ b/jstests/aggregation/testall.js
@@ -8,202 +8,105 @@ load('jstests/aggregation/data/articles.js');
// make sure we're using the right db; this is the same as "use mydb;" in shell
db = db.getSiblingDB("aggdb");
-
// just passing through fields
-var p1 = db.runCommand(
-{ aggregate : "article", pipeline : [
- { $project : {
- tags : 1,
- pageViews : 1
- }}
-]});
+var p1 = db.runCommand({aggregate: "article", pipeline: [{$project: {tags: 1, pageViews: 1}}]});
var p1result = [
- {
- "_id" : 1,
- "pageViews" : 5,
- "tags" : [
- "fun",
- "good",
- "fun"
- ]
- },
- {
- "_id" : 2,
- "pageViews" : 7,
- "tags" : [
- "fun",
- "nasty"
- ]
- },
- {
- "_id" : 3,
- "pageViews" : 6,
- "tags" : [
- "nasty",
- "filthy"
- ]
- }
+ {"_id": 1, "pageViews": 5, "tags": ["fun", "good", "fun"]},
+ {"_id": 2, "pageViews": 7, "tags": ["fun", "nasty"]},
+ {"_id": 3, "pageViews": 6, "tags": ["nasty", "filthy"]}
];
assert.docEq(p1.result, p1result, 'p1 failed');
-
// a simple array unwinding
-var u1 = db.runCommand(
-{ aggregate : "article", pipeline : [
- { $unwind : "$tags" }
-]});
+var u1 = db.runCommand({aggregate: "article", pipeline: [{$unwind: "$tags"}]});
var u1result = [
{
- "_id" : 1,
- "title" : "this is my title",
- "author" : "bob",
- "posted" : ISODate("2004-03-21T18:59:54Z"),
- "pageViews" : 5,
- "tags" : "fun",
- "comments" : [
- {
- "author" : "joe",
- "text" : "this is cool"
- },
- {
- "author" : "sam",
- "text" : "this is bad"
- }
- ],
- "other" : {
- "foo" : 5
- }
- },
- {
- "_id" : 1,
- "title" : "this is my title",
- "author" : "bob",
- "posted" : ISODate("2004-03-21T18:59:54Z"),
- "pageViews" : 5,
- "tags" : "good",
- "comments" : [
- {
- "author" : "joe",
- "text" : "this is cool"
- },
- {
- "author" : "sam",
- "text" : "this is bad"
- }
- ],
- "other" : {
- "foo" : 5
- }
- },
- {
- "_id" : 1,
- "title" : "this is my title",
- "author" : "bob",
- "posted" : ISODate("2004-03-21T18:59:54Z"),
- "pageViews" : 5,
- "tags" : "fun",
- "comments" : [
- {
- "author" : "joe",
- "text" : "this is cool"
- },
- {
- "author" : "sam",
- "text" : "this is bad"
- }
- ],
- "other" : {
- "foo" : 5
- }
- },
- {
- "_id" : 2,
- "title" : "this is your title",
- "author" : "dave",
- "posted" : ISODate("2030-08-08T04:11:10Z"),
- "pageViews" : 7,
- "tags" : "fun",
- "comments" : [
- {
- "author" : "barbara",
- "text" : "this is interesting"
- },
- {
- "author" : "jenny",
- "text" : "i like to play pinball",
- "votes" : 10
- }
- ],
- "other" : {
- "bar" : 14
- }
- },
- {
- "_id" : 2,
- "title" : "this is your title",
- "author" : "dave",
- "posted" : ISODate("2030-08-08T04:11:10Z"),
- "pageViews" : 7,
- "tags" : "nasty",
- "comments" : [
- {
- "author" : "barbara",
- "text" : "this is interesting"
- },
- {
- "author" : "jenny",
- "text" : "i like to play pinball",
- "votes" : 10
- }
- ],
- "other" : {
- "bar" : 14
- }
- },
- {
- "_id" : 3,
- "title" : "this is some other title",
- "author" : "jane",
- "posted" : ISODate("2000-12-31T05:17:14Z"),
- "pageViews" : 6,
- "tags" : "nasty",
- "comments" : [
- {
- "author" : "will",
- "text" : "i don't like the color"
- },
- {
- "author" : "jenny",
- "text" : "can i get that in green?"
- }
- ],
- "other" : {
- "bar" : 14
- }
- },
- {
- "_id" : 3,
- "title" : "this is some other title",
- "author" : "jane",
- "posted" : ISODate("2000-12-31T05:17:14Z"),
- "pageViews" : 6,
- "tags" : "filthy",
- "comments" : [
- {
- "author" : "will",
- "text" : "i don't like the color"
- },
- {
- "author" : "jenny",
- "text" : "can i get that in green?"
- }
- ],
- "other" : {
- "bar" : 14
- }
+ "_id": 1,
+ "title": "this is my title",
+ "author": "bob",
+ "posted": ISODate("2004-03-21T18:59:54Z"),
+ "pageViews": 5,
+ "tags": "fun",
+ "comments":
+ [{"author": "joe", "text": "this is cool"}, {"author": "sam", "text": "this is bad"}],
+ "other": {"foo": 5}
+ },
+ {
+ "_id": 1,
+ "title": "this is my title",
+ "author": "bob",
+ "posted": ISODate("2004-03-21T18:59:54Z"),
+ "pageViews": 5,
+ "tags": "good",
+ "comments":
+ [{"author": "joe", "text": "this is cool"}, {"author": "sam", "text": "this is bad"}],
+ "other": {"foo": 5}
+ },
+ {
+ "_id": 1,
+ "title": "this is my title",
+ "author": "bob",
+ "posted": ISODate("2004-03-21T18:59:54Z"),
+ "pageViews": 5,
+ "tags": "fun",
+ "comments":
+ [{"author": "joe", "text": "this is cool"}, {"author": "sam", "text": "this is bad"}],
+ "other": {"foo": 5}
+ },
+ {
+ "_id": 2,
+ "title": "this is your title",
+ "author": "dave",
+ "posted": ISODate("2030-08-08T04:11:10Z"),
+ "pageViews": 7,
+ "tags": "fun",
+ "comments": [
+ {"author": "barbara", "text": "this is interesting"},
+ {"author": "jenny", "text": "i like to play pinball", "votes": 10}
+ ],
+ "other": {"bar": 14}
+ },
+ {
+ "_id": 2,
+ "title": "this is your title",
+ "author": "dave",
+ "posted": ISODate("2030-08-08T04:11:10Z"),
+ "pageViews": 7,
+ "tags": "nasty",
+ "comments": [
+ {"author": "barbara", "text": "this is interesting"},
+ {"author": "jenny", "text": "i like to play pinball", "votes": 10}
+ ],
+ "other": {"bar": 14}
+ },
+ {
+ "_id": 3,
+ "title": "this is some other title",
+ "author": "jane",
+ "posted": ISODate("2000-12-31T05:17:14Z"),
+ "pageViews": 6,
+ "tags": "nasty",
+ "comments": [
+ {"author": "will", "text": "i don't like the color"},
+ {"author": "jenny", "text": "can i get that in green?"}
+ ],
+ "other": {"bar": 14}
+ },
+ {
+ "_id": 3,
+ "title": "this is some other title",
+ "author": "jane",
+ "posted": ISODate("2000-12-31T05:17:14Z"),
+ "pageViews": 6,
+ "tags": "filthy",
+ "comments": [
+ {"author": "will", "text": "i don't like the color"},
+ {"author": "jenny", "text": "can i get that in green?"}
+ ],
+ "other": {"bar": 14}
}
];
@@ -211,1264 +114,672 @@ assert.docEq(u1.result, u1result, 'u1 failed');
// unwind an array at the end of a dotted path
db.ut.drop();
-db.ut.save({_id: 4, a:1, b:{e:7, f:[4, 3, 2, 1]}, c:12, d:17});
-var u2 = db.runCommand(
-{ aggregate : "ut", pipeline : [
- { $unwind : "$b.f" }
-]});
+db.ut.save({_id: 4, a: 1, b: {e: 7, f: [4, 3, 2, 1]}, c: 12, d: 17});
+var u2 = db.runCommand({aggregate: "ut", pipeline: [{$unwind: "$b.f"}]});
var u2result = [
- {
- "_id" : 4,
- "a" : 1,
- "b" : {
- "e" : 7,
- "f" : 4
- },
- "c" : 12,
- "d" : 17
- },
- {
- "_id" : 4,
- "a" : 1,
- "b" : {
- "e" : 7,
- "f" : 3
- },
- "c" : 12,
- "d" : 17
- },
- {
- "_id" : 4,
- "a" : 1,
- "b" : {
- "e" : 7,
- "f" : 2
- },
- "c" : 12,
- "d" : 17
- },
- {
- "_id" : 4,
- "a" : 1,
- "b" : {
- "e" : 7,
- "f" : 1
- },
- "c" : 12,
- "d" : 17
- }
+ {"_id": 4, "a": 1, "b": {"e": 7, "f": 4}, "c": 12, "d": 17},
+ {"_id": 4, "a": 1, "b": {"e": 7, "f": 3}, "c": 12, "d": 17},
+ {"_id": 4, "a": 1, "b": {"e": 7, "f": 2}, "c": 12, "d": 17},
+ {"_id": 4, "a": 1, "b": {"e": 7, "f": 1}, "c": 12, "d": 17}
];
assert.docEq(u2.result, u2result, 'u2 failed');
-
// combining a projection with unwinding an array
-var p2 = db.runCommand(
-{ aggregate : "article", pipeline : [
- { $project : {
- author : 1,
- tags : 1,
- pageViews : 1
- }},
- { $unwind : "$tags" }
-]});
+var p2 = db.runCommand({
+ aggregate: "article",
+ pipeline: [{$project: {author: 1, tags: 1, pageViews: 1}}, {$unwind: "$tags"}]
+});
var p2result = [
- {
- "_id" : 1,
- "author" : "bob",
- "pageViews" : 5,
- "tags" : "fun"
- },
- {
- "_id" : 1,
- "author" : "bob",
- "pageViews" : 5,
- "tags" : "good"
- },
- {
- "_id" : 1,
- "author" : "bob",
- "pageViews" : 5,
- "tags" : "fun"
- },
- {
- "_id" : 2,
- "author" : "dave",
- "pageViews" : 7,
- "tags" : "fun"
- },
- {
- "_id" : 2,
- "author" : "dave",
- "pageViews" : 7,
- "tags" : "nasty"
- },
- {
- "_id" : 3,
- "author" : "jane",
- "pageViews" : 6,
- "tags" : "nasty"
- },
- {
- "_id" : 3,
- "author" : "jane",
- "pageViews" : 6,
- "tags" : "filthy"
- }
+ {"_id": 1, "author": "bob", "pageViews": 5, "tags": "fun"},
+ {"_id": 1, "author": "bob", "pageViews": 5, "tags": "good"},
+ {"_id": 1, "author": "bob", "pageViews": 5, "tags": "fun"},
+ {"_id": 2, "author": "dave", "pageViews": 7, "tags": "fun"},
+ {"_id": 2, "author": "dave", "pageViews": 7, "tags": "nasty"},
+ {"_id": 3, "author": "jane", "pageViews": 6, "tags": "nasty"},
+ {"_id": 3, "author": "jane", "pageViews": 6, "tags": "filthy"}
];
assert.docEq(p2.result, p2result, 'p2 failed');
-
// pulling values out of subdocuments
-var p3 = db.runCommand(
-{ aggregate : "article", pipeline : [
- { $project : {
- otherfoo : "$other.foo",
- otherbar : "$other.bar"
- }}
-]});
-
-var p3result = [
- {
- "_id" : 1,
- "otherfoo" : 5
- },
- {
- "_id" : 2,
- "otherbar" : 14
- },
- {
- "_id" : 3,
- "otherbar" : 14
- }
-];
+var p3 = db.runCommand({
+ aggregate: "article",
+ pipeline: [{$project: {otherfoo: "$other.foo", otherbar: "$other.bar"}}]
+});
-assert.docEq(p3.result, p3result, 'p3 failed');
+var p3result = [{"_id": 1, "otherfoo": 5}, {"_id": 2, "otherbar": 14}, {"_id": 3, "otherbar": 14}];
+assert.docEq(p3.result, p3result, 'p3 failed');
// projection includes a computed value
-var p4 = db.runCommand(
-{ aggregate : "article", pipeline : [
- { $project : {
- author : 1,
- daveWroteIt : { $eq:["$author", "dave"] }
- }}
-]});
+var p4 = db.runCommand({
+ aggregate: "article",
+ pipeline: [{$project: {author: 1, daveWroteIt: {$eq: ["$author", "dave"]}}}]
+});
var p4result = [
- {
- "_id" : 1,
- "author" : "bob",
- "daveWroteIt" : false
- },
- {
- "_id" : 2,
- "author" : "dave",
- "daveWroteIt" : true
- },
- {
- "_id" : 3,
- "author" : "jane",
- "daveWroteIt" : false
- }
+ {"_id": 1, "author": "bob", "daveWroteIt": false},
+ {"_id": 2, "author": "dave", "daveWroteIt": true},
+ {"_id": 3, "author": "jane", "daveWroteIt": false}
];
assert.docEq(p4.result, p4result, 'p4 failed');
-
// projection includes a virtual (fabricated) document
-var p5 = db.runCommand(
-{ aggregate : "article", pipeline : [
- { $project : {
- author : 1,
- pageViews : 1,
- tags : 1
- }},
- { $unwind : "$tags" },
- { $project : {
- author : 1,
- subDocument : { foo : "$pageViews", bar : "$tags" }
- }}
-]});
+var p5 = db.runCommand({
+ aggregate: "article",
+ pipeline: [
+ {$project: {author: 1, pageViews: 1, tags: 1}},
+ {$unwind: "$tags"},
+ {$project: {author: 1, subDocument: {foo: "$pageViews", bar: "$tags"}}}
+ ]
+});
var p5result = [
- {
- "_id" : 1,
- "author" : "bob",
- "subDocument" : {
- "foo" : 5,
- "bar" : "fun"
- }
- },
- {
- "_id" : 1,
- "author" : "bob",
- "subDocument" : {
- "foo" : 5,
- "bar" : "good"
- }
- },
- {
- "_id" : 1,
- "author" : "bob",
- "subDocument" : {
- "foo" : 5,
- "bar" : "fun"
- }
- },
- {
- "_id" : 2,
- "author" : "dave",
- "subDocument" : {
- "foo" : 7,
- "bar" : "fun"
- }
- },
- {
- "_id" : 2,
- "author" : "dave",
- "subDocument" : {
- "foo" : 7,
- "bar" : "nasty"
- }
- },
- {
- "_id" : 3,
- "author" : "jane",
- "subDocument" : {
- "foo" : 6,
- "bar" : "nasty"
- }
- },
- {
- "_id" : 3,
- "author" : "jane",
- "subDocument" : {
- "foo" : 6,
- "bar" : "filthy"
- }
- }
+ {"_id": 1, "author": "bob", "subDocument": {"foo": 5, "bar": "fun"}},
+ {"_id": 1, "author": "bob", "subDocument": {"foo": 5, "bar": "good"}},
+ {"_id": 1, "author": "bob", "subDocument": {"foo": 5, "bar": "fun"}},
+ {"_id": 2, "author": "dave", "subDocument": {"foo": 7, "bar": "fun"}},
+ {"_id": 2, "author": "dave", "subDocument": {"foo": 7, "bar": "nasty"}},
+ {"_id": 3, "author": "jane", "subDocument": {"foo": 6, "bar": "nasty"}},
+ {"_id": 3, "author": "jane", "subDocument": {"foo": 6, "bar": "filthy"}}
];
assert.docEq(p5.result, p5result, 'p5 failed');
-
// multi-step aggregate
// nested expressions in computed fields
-var p6 = db.runCommand(
-{ aggregate : "article", pipeline : [
- { $project : {
- author : 1,
- tags : 1,
- pageViews : 1
- }},
- { $unwind : "$tags" },
- { $project : {
- author : 1,
- tag : "$tags",
- pageViews : 1,
- daveWroteIt : { $eq:["$author", "dave"] },
- weLikeIt : { $or:[ { $eq:["$author", "dave"] },
- { $eq:["$tags", "good"] } ] }
- }}
-]});
+var p6 = db.runCommand({
+ aggregate: "article",
+ pipeline: [
+ {$project: {author: 1, tags: 1, pageViews: 1}},
+ {$unwind: "$tags"},
+ {
+ $project: {
+ author: 1,
+ tag: "$tags",
+ pageViews: 1,
+ daveWroteIt: {$eq: ["$author", "dave"]},
+ weLikeIt: {$or: [{$eq: ["$author", "dave"]}, {$eq: ["$tags", "good"]}]}
+ }
+ }
+ ]
+});
var p6result = [
{
- "_id" : 1,
- "author" : "bob",
- "pageViews" : 5,
- "tag" : "fun",
- "daveWroteIt" : false,
- "weLikeIt" : false
+ "_id": 1,
+ "author": "bob",
+ "pageViews": 5,
+ "tag": "fun",
+ "daveWroteIt": false,
+ "weLikeIt": false
},
{
- "_id" : 1,
- "author" : "bob",
- "pageViews" : 5,
- "tag" : "good",
- "daveWroteIt" : false,
- "weLikeIt" : true
+ "_id": 1,
+ "author": "bob",
+ "pageViews": 5,
+ "tag": "good",
+ "daveWroteIt": false,
+ "weLikeIt": true
},
{
- "_id" : 1,
- "author" : "bob",
- "pageViews" : 5,
- "tag" : "fun",
- "daveWroteIt" : false,
- "weLikeIt" : false
+ "_id": 1,
+ "author": "bob",
+ "pageViews": 5,
+ "tag": "fun",
+ "daveWroteIt": false,
+ "weLikeIt": false
},
{
- "_id" : 2,
- "author" : "dave",
- "pageViews" : 7,
- "tag" : "fun",
- "daveWroteIt" : true,
- "weLikeIt" : true
+ "_id": 2,
+ "author": "dave",
+ "pageViews": 7,
+ "tag": "fun",
+ "daveWroteIt": true,
+ "weLikeIt": true
},
{
- "_id" : 2,
- "author" : "dave",
- "pageViews" : 7,
- "tag" : "nasty",
- "daveWroteIt" : true,
- "weLikeIt" : true
+ "_id": 2,
+ "author": "dave",
+ "pageViews": 7,
+ "tag": "nasty",
+ "daveWroteIt": true,
+ "weLikeIt": true
},
{
- "_id" : 3,
- "author" : "jane",
- "pageViews" : 6,
- "tag" : "nasty",
- "daveWroteIt" : false,
- "weLikeIt" : false
+ "_id": 3,
+ "author": "jane",
+ "pageViews": 6,
+ "tag": "nasty",
+ "daveWroteIt": false,
+ "weLikeIt": false
},
{
- "_id" : 3,
- "author" : "jane",
- "pageViews" : 6,
- "tag" : "filthy",
- "daveWroteIt" : false,
- "weLikeIt" : false
+ "_id": 3,
+ "author": "jane",
+ "pageViews": 6,
+ "tag": "filthy",
+ "daveWroteIt": false,
+ "weLikeIt": false
}
];
assert.docEq(p6.result, p6result, 'p6 failed');
-
// slightly more complex computed expression; $ifNull
-var p7 = db.runCommand(
-{ aggregate : "article", pipeline : [
- { $project : {
- theSum : { $add:["$pageViews",
- { $ifNull:["$other.foo",
- "$other.bar"] } ] }
- }}
-]});
-
-var p7result = [
- {
- "_id" : 1,
- "theSum" : 10
- },
- {
- "_id" : 2,
- "theSum" : 21
- },
- {
- "_id" : 3,
- "theSum" : 20
- }
-];
+var p7 = db.runCommand({
+ aggregate: "article",
+ pipeline:
+ [{$project: {theSum: {$add: ["$pageViews", {$ifNull: ["$other.foo", "$other.bar"]}]}}}]
+});
-assert.docEq(p7.result, p7result, 'p7 failed');
+var p7result = [{"_id": 1, "theSum": 10}, {"_id": 2, "theSum": 21}, {"_id": 3, "theSum": 20}];
+assert.docEq(p7.result, p7result, 'p7 failed');
// dotted path inclusion; _id exclusion
-var p8 = db.runCommand(
-{ aggregate : "article", pipeline : [
- { $project : {
- _id : 0,
- author : 1,
- tags : 1,
- "comments.author" : 1
- }},
- { $unwind : "$tags" }
-]});
+var p8 = db.runCommand({
+ aggregate: "article",
+ pipeline:
+ [{$project: {_id: 0, author: 1, tags: 1, "comments.author": 1}}, {$unwind: "$tags"}]
+});
var p8result = [
- {
- "author" : "bob",
- "tags" : "fun",
- "comments" : [
- {
- "author" : "joe"
- },
- {
- "author" : "sam"
- }
- ]
- },
- {
- "author" : "bob",
- "tags" : "good",
- "comments" : [
- {
- "author" : "joe"
- },
- {
- "author" : "sam"
- }
- ]
- },
- {
- "author" : "bob",
- "tags" : "fun",
- "comments" : [
- {
- "author" : "joe"
- },
- {
- "author" : "sam"
- }
- ]
- },
- {
- "author" : "dave",
- "tags" : "fun",
- "comments" : [
- {
- "author" : "barbara"
- },
- {
- "author" : "jenny"
- }
- ]
- },
- {
- "author" : "dave",
- "tags" : "nasty",
- "comments" : [
- {
- "author" : "barbara"
- },
- {
- "author" : "jenny"
- }
- ]
- },
- {
- "author" : "jane",
- "tags" : "nasty",
- "comments" : [
- {
- "author" : "will"
- },
- {
- "author" : "jenny"
- }
- ]
- },
- {
- "author" : "jane",
- "tags" : "filthy",
- "comments" : [
- {
- "author" : "will"
- },
- {
- "author" : "jenny"
- }
- ]
- }
+ {"author": "bob", "tags": "fun", "comments": [{"author": "joe"}, {"author": "sam"}]},
+ {"author": "bob", "tags": "good", "comments": [{"author": "joe"}, {"author": "sam"}]},
+ {"author": "bob", "tags": "fun", "comments": [{"author": "joe"}, {"author": "sam"}]},
+ {"author": "dave", "tags": "fun", "comments": [{"author": "barbara"}, {"author": "jenny"}]},
+ {"author": "dave", "tags": "nasty", "comments": [{"author": "barbara"}, {"author": "jenny"}]},
+ {"author": "jane", "tags": "nasty", "comments": [{"author": "will"}, {"author": "jenny"}]},
+ {"author": "jane", "tags": "filthy", "comments": [{"author": "will"}, {"author": "jenny"}]}
];
assert.docEq(p8.result, p8result, 'p8 failed');
-
// collapse a dotted path with an intervening array
-var p9 = db.runCommand(
-{ aggregate : "article", pipeline : [
- { $project : {
- _id : 0,
- author : 1,
- commentsAuthor : "$comments.author"
- }}
-]});
+var p9 = db.runCommand({
+ aggregate: "article",
+ pipeline: [{$project: {_id: 0, author: 1, commentsAuthor: "$comments.author"}}]
+});
var p9result = [
- {
- "author" : "bob",
- "commentsAuthor" : [
- "joe",
- "sam"
- ]
- },
- {
- "author" : "dave",
- "commentsAuthor" : [
- "barbara",
- "jenny"
- ]
- },
- {
- "author" : "jane",
- "commentsAuthor" : [
- "will",
- "jenny"
- ]
- }
+ {"author": "bob", "commentsAuthor": ["joe", "sam"]},
+ {"author": "dave", "commentsAuthor": ["barbara", "jenny"]},
+ {"author": "jane", "commentsAuthor": ["will", "jenny"]}
];
assert.docEq(p9.result, p9result, 'p9 failed');
-
// simple sort
-var p10 = db.runCommand(
-{ aggregate : "article", pipeline : [
- { $sort : { title : 1 }
- }
-]});
+var p10 = db.runCommand({aggregate: "article", pipeline: [{$sort: {title: 1}}]});
var p10result = [
{
- "_id" : 1,
- "title" : "this is my title",
- "author" : "bob",
- "posted" : ISODate("2004-03-21T18:59:54Z"),
- "pageViews" : 5,
- "tags" : [
- "fun",
- "good",
- "fun"
- ],
- "comments" : [
- {
- "author" : "joe",
- "text" : "this is cool"
- },
- {
- "author" : "sam",
- "text" : "this is bad"
- }
- ],
- "other" : {
- "foo" : 5
- }
- },
- {
- "_id" : 3,
- "title" : "this is some other title",
- "author" : "jane",
- "posted" : ISODate("2000-12-31T05:17:14Z"),
- "pageViews" : 6,
- "tags" : [
- "nasty",
- "filthy"
- ],
- "comments" : [
- {
- "author" : "will",
- "text" : "i don't like the color"
- },
- {
- "author" : "jenny",
- "text" : "can i get that in green?"
- }
- ],
- "other" : {
- "bar" : 14
- }
- },
- {
- "_id" : 2,
- "title" : "this is your title",
- "author" : "dave",
- "posted" : ISODate("2030-08-08T04:11:10Z"),
- "pageViews" : 7,
- "tags" : [
- "fun",
- "nasty"
- ],
- "comments" : [
- {
- "author" : "barbara",
- "text" : "this is interesting"
- },
- {
- "author" : "jenny",
- "text" : "i like to play pinball",
- "votes" : 10
- }
- ],
- "other" : {
- "bar" : 14
- }
+ "_id": 1,
+ "title": "this is my title",
+ "author": "bob",
+ "posted": ISODate("2004-03-21T18:59:54Z"),
+ "pageViews": 5,
+ "tags": ["fun", "good", "fun"],
+ "comments":
+ [{"author": "joe", "text": "this is cool"}, {"author": "sam", "text": "this is bad"}],
+ "other": {"foo": 5}
+ },
+ {
+ "_id": 3,
+ "title": "this is some other title",
+ "author": "jane",
+ "posted": ISODate("2000-12-31T05:17:14Z"),
+ "pageViews": 6,
+ "tags": ["nasty", "filthy"],
+ "comments": [
+ {"author": "will", "text": "i don't like the color"},
+ {"author": "jenny", "text": "can i get that in green?"}
+ ],
+ "other": {"bar": 14}
+ },
+ {
+ "_id": 2,
+ "title": "this is your title",
+ "author": "dave",
+ "posted": ISODate("2030-08-08T04:11:10Z"),
+ "pageViews": 7,
+ "tags": ["fun", "nasty"],
+ "comments": [
+ {"author": "barbara", "text": "this is interesting"},
+ {"author": "jenny", "text": "i like to play pinball", "votes": 10}
+ ],
+ "other": {"bar": 14}
}
];
assert.docEq(p10.result, p10result, 'p10 failed');
-
// unwind on nested array
db.p11.drop();
-db.p11.save( {
- _id : 5,
- name : 'MongoDB',
- items : {
- authors : ['jay', 'vivek', 'bjornar'],
- dbg : [17, 42]
- },
- favorites : ['pickles', 'ice cream', 'kettle chips']
+db.p11.save({
+ _id: 5,
+ name: 'MongoDB',
+ items: {authors: ['jay', 'vivek', 'bjornar'], dbg: [17, 42]},
+ favorites: ['pickles', 'ice cream', 'kettle chips']
});
-var p11 = db.runCommand(
-{ aggregate : "p11", pipeline : [
- { $unwind : "$items.authors" },
- { $project : {
- name : 1,
- author : "$items.authors"
- }},
-]});
+var p11 = db.runCommand({
+ aggregate: "p11",
+ pipeline: [{$unwind: "$items.authors"}, {$project: {name: 1, author: "$items.authors"}}, ]
+});
p11result = [
- {
- "_id" : 5,
- "name" : "MongoDB",
- "author" : "jay"
- },
- {
- "_id" : 5,
- "name" : "MongoDB",
- "author" : "vivek"
- },
- {
- "_id" : 5,
- "name" : "MongoDB",
- "author" : "bjornar"
- }
+ {"_id": 5, "name": "MongoDB", "author": "jay"},
+ {"_id": 5, "name": "MongoDB", "author": "vivek"},
+ {"_id": 5, "name": "MongoDB", "author": "bjornar"}
];
assert.docEq(p11.result, p11result, 'p11 failed');
-
// multiply test
-var p12 = db.runCommand(
-{ aggregate : "article", pipeline : [
- { $project : {
- theProduct : { $multiply:["$pageViews",
- { $ifNull:["$other.foo",
- "$other.bar"] } ] }
- }}
-]});
-
-var p12result = [
- {
- "_id" : 1,
- "theProduct" : 25
- },
- {
- "_id" : 2,
- "theProduct" : 98
- },
- {
- "_id" : 3,
- "theProduct" : 84
- }
-];
+var p12 = db.runCommand({
+ aggregate: "article",
+ pipeline: [{
+ $project:
+ {theProduct: {$multiply: ["$pageViews", {$ifNull: ["$other.foo", "$other.bar"]}]}}
+ }]
+});
-assert.docEq(p12.result, p12result, 'p12 failed');
+var p12result =
+ [{"_id": 1, "theProduct": 25}, {"_id": 2, "theProduct": 98}, {"_id": 3, "theProduct": 84}];
+assert.docEq(p12.result, p12result, 'p12 failed');
// subtraction test
-var p13 = db.runCommand(
-{ aggregate : "article", pipeline : [
- { $project : {
- theDifference : { $subtract:["$pageViews",
- { $ifNull:["$other.foo",
- "$other.bar"] } ] }
- }}
-]});
+var p13 = db.runCommand({
+ aggregate: "article",
+ pipeline: [{
+ $project: {
+ theDifference:
+ {$subtract: ["$pageViews", {$ifNull: ["$other.foo", "$other.bar"]}]}
+ }
+ }]
+});
var p13result = [
- {
- "_id" : 1,
- "theDifference" : 0
- },
- {
- "_id" : 2,
- "theDifference" : -7
- },
- {
- "_id" : 3,
- "theDifference" : -8
- }
+ {"_id": 1, "theDifference": 0},
+ {"_id": 2, "theDifference": -7},
+ {"_id": 3, "theDifference": -8}
];
assert.docEq(p13.result, p13result, 'p13 failed');
-
// mod test
-var p14 = db.runCommand(
-{ aggregate : "article", pipeline : [
- { $project : {
- theRemainder : { $mod:[
- { $ifNull:["$other.foo",
- "$other.bar"] },
- "$pageViews", ] }
- }}
-]});
-
-var p14result = [
- {
- "_id" : 1,
- "theRemainder" : 0
- },
- {
- "_id" : 2,
- "theRemainder" : 0
- },
- {
- "_id" : 3,
- "theRemainder" : 2
- }
-];
+var p14 = db.runCommand({
+ aggregate: "article",
+ pipeline: [{
+ $project:
+ {theRemainder: {$mod: [{$ifNull: ["$other.foo", "$other.bar"]}, "$pageViews", ]}}
+ }]
+});
-assert.docEq(p14.result, p14result, 'p14 failed');
+var p14result =
+ [{"_id": 1, "theRemainder": 0}, {"_id": 2, "theRemainder": 0}, {"_id": 3, "theRemainder": 2}];
+assert.docEq(p14.result, p14result, 'p14 failed');
// toUpper test
var p15 = db.runCommand(
-{ aggregate : "article", pipeline : [
- { $project : {
- author : {$toUpper: "$author"},
- pageViews : 1
- }}
-]});
+ {aggregate: "article", pipeline: [{$project: {author: {$toUpper: "$author"}, pageViews: 1}}]});
var p15result = [
- {
- "_id" : 1,
- "author" : "BOB",
- "pageViews" : 5
- },
- {
- "_id" : 2,
- "author" : "DAVE",
- "pageViews" : 7
- },
- {
- "_id" : 3,
- "author" : "JANE",
- "pageViews" : 6
- }
+ {"_id": 1, "author": "BOB", "pageViews": 5},
+ {"_id": 2, "author": "DAVE", "pageViews": 7},
+ {"_id": 3, "author": "JANE", "pageViews": 6}
];
assert.docEq(p15.result, p15result, 'p15 failed');
-
// toLower test
-var p16 = db.runCommand(
-{ aggregate : "article", pipeline : [
- { $project : {
- author : {$toUpper: "$author"},
- pageViews : 1
- }},
- { $project : {
- author : {$toLower: "$author"},
- pageViews : 1
- }}
-]});
+var p16 = db.runCommand({
+ aggregate: "article",
+ pipeline: [
+ {$project: {author: {$toUpper: "$author"}, pageViews: 1}},
+ {$project: {author: {$toLower: "$author"}, pageViews: 1}}
+ ]
+});
var p16result = [
{
- "_id" : 1,
- "author" : "bob",
- "pageViews" : 5,
+ "_id": 1,
+ "author": "bob",
+ "pageViews": 5,
},
{
- "_id" : 2,
- "author" : "dave",
- "pageViews" : 7,
+ "_id": 2,
+ "author": "dave",
+ "pageViews": 7,
},
{
- "_id" : 3,
- "author" : "jane",
- "pageViews" : 6,
+ "_id": 3,
+ "author": "jane",
+ "pageViews": 6,
}
];
assert.docEq(p16.result, p16result, 'p16 failed');
-
// substr test
-var p17 = db.runCommand(
-{ aggregate : "article", pipeline : [
- { $project : {
- author : {$substr: ["$author", 1, 2]},
- }}
-]});
-
-var p17result = [
- {
- "_id" : 1,
- "author" : "ob"
- },
- {
- "_id" : 2,
- "author" : "av"
- },
- {
- "_id" : 3,
- "author" : "an"
- }
-];
+var p17 = db.runCommand({
+ aggregate: "article",
+ pipeline: [{
+ $project: {
+ author: {$substr: ["$author", 1, 2]},
+ }
+ }]
+});
-assert.docEq(p17.result, p17result, 'p17 failed');
+var p17result =
+ [{"_id": 1, "author": "ob"}, {"_id": 2, "author": "av"}, {"_id": 3, "author": "an"}];
+assert.docEq(p17.result, p17result, 'p17 failed');
// strcasecmp test
-var p18 = db.runCommand(
-{aggregate : "article", pipeline : [
- { $project : {
- tags : 1,
- thisisalametest : {$strcasecmp: ["foo","bar"]},
- thisisalamepass : {$strcasecmp: ["foo","foo"]}
- }}
-]});
+var p18 = db.runCommand({
+ aggregate: "article",
+ pipeline: [{
+ $project: {
+ tags: 1,
+ thisisalametest: {$strcasecmp: ["foo", "bar"]},
+ thisisalamepass: {$strcasecmp: ["foo", "foo"]}
+ }
+ }]
+});
var p18result = [
- {
- "_id" : 1,
- "tags" : [
- "fun",
- "good",
- "fun"
- ],
- "thisisalametest" : 1,
- "thisisalamepass" : 0
- },
- {
- "_id" : 2,
- "tags" : [
- "fun",
- "nasty"
- ],
- "thisisalametest" : 1,
- "thisisalamepass" : 0
- },
- {
- "_id" : 3,
- "tags" : [
- "nasty",
- "filthy"
- ],
- "thisisalametest" : 1,
- "thisisalamepass" : 0
- }
+ {"_id": 1, "tags": ["fun", "good", "fun"], "thisisalametest": 1, "thisisalamepass": 0},
+ {"_id": 2, "tags": ["fun", "nasty"], "thisisalametest": 1, "thisisalamepass": 0},
+ {"_id": 3, "tags": ["nasty", "filthy"], "thisisalametest": 1, "thisisalamepass": 0}
];
assert.docEq(p18.result, p18result, 'p18 failed');
-
// date tests
-var p19 = db.runCommand({aggregate : "article", pipeline : [
- { $project : {
- authors: 1,
- posted: 1,
- seconds: {$second: "$posted"},
- minutes: {$minute: "$posted"},
- hour: {$hour: "$posted"},
- dayOfYear: {$dayOfYear: "$posted"},
- dayOfMonth: {$dayOfMonth: "$posted"},
- dayOfWeek: {$dayOfWeek: "$posted"},
- month: {$month: "$posted"},
- week: {$week: "$posted"},
- year: {$year: "$posted"}
- }}
-]});
+var p19 = db.runCommand({
+ aggregate: "article",
+ pipeline: [{
+ $project: {
+ authors: 1,
+ posted: 1,
+ seconds: {$second: "$posted"},
+ minutes: {$minute: "$posted"},
+ hour: {$hour: "$posted"},
+ dayOfYear: {$dayOfYear: "$posted"},
+ dayOfMonth: {$dayOfMonth: "$posted"},
+ dayOfWeek: {$dayOfWeek: "$posted"},
+ month: {$month: "$posted"},
+ week: {$week: "$posted"},
+ year: {$year: "$posted"}
+ }
+ }]
+});
var p19result = [
{
- "_id" : 1,
- "posted" : ISODate("2004-03-21T18:59:54Z"),
- "seconds" : 54,
- "minutes" : 59,
- "hour" : 18,
- "dayOfYear" : 81,
- "dayOfMonth" : 21,
- "dayOfWeek" : 1,
- "month" : 3,
- "week" : 12,
- "year" : 2004,
- },
- {
- "_id" : 2,
- "posted" : ISODate("2030-08-08T04:11:10Z"),
- "seconds" : 10,
- "minutes" : 11,
- "hour" : 4,
- "dayOfYear" : 220,
- "dayOfMonth" : 8,
- "dayOfWeek" : 5,
- "month" : 8,
- "week" : 31,
- "year" : 2030,
- },
- {
- "_id" : 3,
- "posted" : ISODate("2000-12-31T05:17:14Z"),
- "seconds" : 14,
- "minutes" : 17,
- "hour" : 5,
- "dayOfYear" : 366,
- "dayOfMonth" : 31,
- "dayOfWeek" : 1,
- "month" : 12,
- "week" : 53,
- "year" : 2000,
+ "_id": 1,
+ "posted": ISODate("2004-03-21T18:59:54Z"),
+ "seconds": 54,
+ "minutes": 59,
+ "hour": 18,
+ "dayOfYear": 81,
+ "dayOfMonth": 21,
+ "dayOfWeek": 1,
+ "month": 3,
+ "week": 12,
+ "year": 2004,
+ },
+ {
+ "_id": 2,
+ "posted": ISODate("2030-08-08T04:11:10Z"),
+ "seconds": 10,
+ "minutes": 11,
+ "hour": 4,
+ "dayOfYear": 220,
+ "dayOfMonth": 8,
+ "dayOfWeek": 5,
+ "month": 8,
+ "week": 31,
+ "year": 2030,
+ },
+ {
+ "_id": 3,
+ "posted": ISODate("2000-12-31T05:17:14Z"),
+ "seconds": 14,
+ "minutes": 17,
+ "hour": 5,
+ "dayOfYear": 366,
+ "dayOfMonth": 31,
+ "dayOfWeek": 1,
+ "month": 12,
+ "week": 53,
+ "year": 2000,
}
];
assert.docEq(p19.result, p19result, 'p19 failed');
-
db.vartype.drop();
-db.vartype.save({ x : 17, y : "foo"});
+db.vartype.save({x: 17, y: "foo"});
// ternary conditional operator
-var p21 = db.runCommand(
-{ aggregate : "article", pipeline : [
- { $project : {
- _id : 0,
- author : 1,
- pageViews : { $cond : [ {$eq:["$author", "dave"]},
- {$add:["$pageViews", 1000]}, "$pageViews" ]
+var p21 = db.runCommand({
+ aggregate: "article",
+ pipeline: [{
+ $project: {
+ _id: 0,
+ author: 1,
+ pageViews: {
+ $cond:
+ [{$eq: ["$author", "dave"]}, {$add: ["$pageViews", 1000]}, "$pageViews"]
+ }
}
- }}
-]});
+ }]
+});
var p21result = [
- {
- "author" : "bob",
- "pageViews" : 5
- },
- {
- "author" : "dave",
- "pageViews" : 1007
- },
- {
- "author" : "jane",
- "pageViews" : 6
- }
+ {"author": "bob", "pageViews": 5},
+ {"author": "dave", "pageViews": 1007},
+ {"author": "jane", "pageViews": 6}
];
assert.docEq(p21.result, p21result, 'p21 failed');
-
// simple matching
-var m1 = db.runCommand(
-{ aggregate : "article", pipeline : [
- { $match : { author : "dave" } }
-]});
-
-var m1result = [
- {
- "_id" : 2,
- "title" : "this is your title",
- "author" : "dave",
- "posted" : ISODate("2030-08-08T04:11:10Z"),
- "pageViews" : 7,
- "tags" : [
- "fun",
- "nasty"
- ],
- "comments" : [
- {
- "author" : "barbara",
- "text" : "this is interesting"
- },
- {
- "author" : "jenny",
- "text" : "i like to play pinball",
- "votes" : 10
- }
- ],
- "other" : {
- "bar" : 14
- }
- }
-];
+var m1 = db.runCommand({aggregate: "article", pipeline: [{$match: {author: "dave"}}]});
+
+var m1result = [{
+ "_id": 2,
+ "title": "this is your title",
+ "author": "dave",
+ "posted": ISODate("2030-08-08T04:11:10Z"),
+ "pageViews": 7,
+ "tags": ["fun", "nasty"],
+ "comments": [
+ {"author": "barbara", "text": "this is interesting"},
+ {"author": "jenny", "text": "i like to play pinball", "votes": 10}
+ ],
+ "other": {"bar": 14}
+}];
assert.docEq(m1.result, m1result, 'm1 failed');
-
// combining matching with a projection
-var m2 = db.runCommand(
-{ aggregate : "article", pipeline : [
- { $project : {
- title : 1,
- author : 1,
- pageViews : 1,
- tags : 1,
- comments : 1
- }},
- { $unwind : "$tags" },
- { $match : { tags : "nasty" } }
-]});
+var m2 = db.runCommand({
+ aggregate: "article",
+ pipeline: [
+ {$project: {title: 1, author: 1, pageViews: 1, tags: 1, comments: 1}},
+ {$unwind: "$tags"},
+ {$match: {tags: "nasty"}}
+ ]
+});
var m2result = [
{
- "_id" : 2,
- "title" : "this is your title",
- "author" : "dave",
- "pageViews" : 7,
- "tags" : "nasty",
- "comments" : [
- {
- "author" : "barbara",
- "text" : "this is interesting"
- },
- {
- "author" : "jenny",
- "text" : "i like to play pinball",
- "votes" : 10
- }
- ]
- },
- {
- "_id" : 3,
- "title" : "this is some other title",
- "author" : "jane",
- "pageViews" : 6,
- "tags" : "nasty",
- "comments" : [
- {
- "author" : "will",
- "text" : "i don't like the color"
- },
- {
- "author" : "jenny",
- "text" : "can i get that in green?"
- }
- ]
+ "_id": 2,
+ "title": "this is your title",
+ "author": "dave",
+ "pageViews": 7,
+ "tags": "nasty",
+ "comments": [
+ {"author": "barbara", "text": "this is interesting"},
+ {"author": "jenny", "text": "i like to play pinball", "votes": 10}
+ ]
+ },
+ {
+ "_id": 3,
+ "title": "this is some other title",
+ "author": "jane",
+ "pageViews": 6,
+ "tags": "nasty",
+ "comments": [
+ {"author": "will", "text": "i don't like the color"},
+ {"author": "jenny", "text": "can i get that in green?"}
+ ]
}
];
assert.docEq(m2.result, m2result, 'm2 failed');
-
// group by tag, _id is a field reference
-var g1 = db.runCommand(
-{ aggregate : "article", pipeline : [
- { $project : {
- author : 1,
- tags : 1,
- pageViews : 1
- }},
- { $unwind : "$tags" },
- { $group : {
- _id : "$tags",
- docsByTag : { $sum : 1 },
- viewsByTag : { $sum : "$pageViews" }
- }},
- {$sort: {'_id': 1}}
-]});
+var g1 = db.runCommand({
+ aggregate: "article",
+ pipeline: [
+ {$project: {author: 1, tags: 1, pageViews: 1}},
+ {$unwind: "$tags"},
+ {$group: {_id: "$tags", docsByTag: {$sum: 1}, viewsByTag: {$sum: "$pageViews"}}},
+ {$sort: {'_id': 1}}
+ ]
+});
var g1result = [
- {
- "_id" : "filthy",
- "docsByTag" : 1,
- "viewsByTag" : 6
- },
- {
- "_id" : "fun",
- "docsByTag" : 3,
- "viewsByTag" : 17
- },
- {
- "_id" : "good",
- "docsByTag" : 1,
- "viewsByTag" : 5
- },
- {
- "_id" : "nasty",
- "docsByTag" : 2,
- "viewsByTag" : 13
- },
+ {"_id": "filthy", "docsByTag": 1, "viewsByTag": 6},
+ {"_id": "fun", "docsByTag": 3, "viewsByTag": 17},
+ {"_id": "good", "docsByTag": 1, "viewsByTag": 5},
+ {"_id": "nasty", "docsByTag": 2, "viewsByTag": 13},
];
assert.docEq(g1.result, g1result, 'g1 failed');
-
// $max, and averaging in a final projection; _id is structured
-var g2 = db.runCommand(
-{ aggregate : "article", pipeline : [
- { $project : {
- author : 1,
- tags : 1,
- pageViews : 1
- }},
- { $unwind : "$tags" },
- { $group : {
- _id: { tags : "$tags" },
- docsByTag : { $sum : 1 },
- viewsByTag : { $sum : "$pageViews" },
- mostViewsByTag : { $max : "$pageViews" },
- }},
- { $project : {
- _id: false,
- tag : "$_id.tags",
- mostViewsByTag : 1,
- docsByTag : 1,
- viewsByTag : 1,
- avgByTag : { $divide:["$viewsByTag", "$docsByTag"] }
- }},
- {$sort: {'docsByTag': 1, 'viewsByTag': 1}}
-]});
+var g2 = db.runCommand({
+ aggregate: "article",
+ pipeline: [
+ {$project: {author: 1, tags: 1, pageViews: 1}},
+ {$unwind: "$tags"},
+ {
+ $group: {
+ _id: {tags: "$tags"},
+ docsByTag: {$sum: 1},
+ viewsByTag: {$sum: "$pageViews"},
+ mostViewsByTag: {$max: "$pageViews"},
+ }
+ },
+ {
+ $project: {
+ _id: false,
+ tag: "$_id.tags",
+ mostViewsByTag: 1,
+ docsByTag: 1,
+ viewsByTag: 1,
+ avgByTag: {$divide: ["$viewsByTag", "$docsByTag"]}
+ }
+ },
+ {$sort: {'docsByTag': 1, 'viewsByTag': 1}}
+ ]
+});
var g2result = [
- {
- "docsByTag" : 1,
- "viewsByTag" : 5,
- "mostViewsByTag" : 5,
- "tag" : "good",
- "avgByTag" : 5
- },
- {
- "docsByTag" : 1,
- "viewsByTag" : 6,
- "mostViewsByTag" : 6,
- "tag" : "filthy",
- "avgByTag" : 6
- },
- {
- "docsByTag" : 2,
- "viewsByTag" : 13,
- "mostViewsByTag" : 7,
- "tag" : "nasty",
- "avgByTag" : 6.5
- },
- {
- "docsByTag" : 3,
- "viewsByTag" : 17,
- "mostViewsByTag" : 7,
- "tag" : "fun",
- "avgByTag" : 5.666666666666667
+ {"docsByTag": 1, "viewsByTag": 5, "mostViewsByTag": 5, "tag": "good", "avgByTag": 5},
+ {"docsByTag": 1, "viewsByTag": 6, "mostViewsByTag": 6, "tag": "filthy", "avgByTag": 6},
+ {"docsByTag": 2, "viewsByTag": 13, "mostViewsByTag": 7, "tag": "nasty", "avgByTag": 6.5},
+ {
+ "docsByTag": 3,
+ "viewsByTag": 17,
+ "mostViewsByTag": 7,
+ "tag": "fun",
+ "avgByTag": 5.666666666666667
}
];
assert.docEq(g2.result, g2result, 'g2 failed');
-
// $push as an accumulator; can pivot data
-var g3 = db.runCommand(
-{ aggregate : "article", pipeline : [
- { $project : {
- author : 1,
- tags : 1,
- }},
- { $unwind : "$tags" },
- { $group : {
- _id : { tags : "$tags" },
- authors : { $push : "$author" }
- }},
- {$sort: {'_id': 1}}
-]});
+var g3 = db.runCommand({
+ aggregate: "article",
+ pipeline: [
+ {
+ $project: {
+ author: 1,
+ tags: 1,
+ }
+ },
+ {$unwind: "$tags"},
+ {$group: {_id: {tags: "$tags"}, authors: {$push: "$author"}}},
+ {$sort: {'_id': 1}}
+ ]
+});
var g3result = [
- {
- "_id" : {
- "tags" : "filthy"
- },
- "authors" : [
- "jane"
- ]
- },
- {
- "_id" : {
- "tags" : "fun"
- },
- "authors" : [
- "bob",
- "bob",
- "dave"
- ]
- },
- {
- "_id" : {
- "tags" : "good"
- },
- "authors" : [
- "bob"
- ]
- },
- {
- "_id" : {
- "tags" : "nasty"
- },
- "authors" : [
- "dave",
- "jane"
- ]
- }
+ {"_id": {"tags": "filthy"}, "authors": ["jane"]},
+ {"_id": {"tags": "fun"}, "authors": ["bob", "bob", "dave"]},
+ {"_id": {"tags": "good"}, "authors": ["bob"]},
+ {"_id": {"tags": "nasty"}, "authors": ["dave", "jane"]}
];
assert.docEq(g3.result, g3result, 'g3 failed');
-
// $avg, and averaging in a final projection
-var g4 = db.runCommand(
-{ aggregate : "article", pipeline : [
- { $project : {
- author : 1,
- tags : 1,
- pageViews : 1
- }},
- { $unwind : "$tags" },
- { $group : {
- _id: { tags : "$tags" },
- docsByTag : { $sum : 1 },
- viewsByTag : { $sum : "$pageViews" },
- avgByTag : { $avg : "$pageViews" },
- }},
- {$sort: {'_id': 1}}
-]});
+var g4 = db.runCommand({
+ aggregate: "article",
+ pipeline: [
+ {$project: {author: 1, tags: 1, pageViews: 1}},
+ {$unwind: "$tags"},
+ {
+ $group: {
+ _id: {tags: "$tags"},
+ docsByTag: {$sum: 1},
+ viewsByTag: {$sum: "$pageViews"},
+ avgByTag: {$avg: "$pageViews"},
+ }
+ },
+ {$sort: {'_id': 1}}
+ ]
+});
var g4result = [
- {
- "_id" : {
- "tags" : "filthy"
- },
- "docsByTag" : 1,
- "viewsByTag" : 6,
- "avgByTag" : 6
- },
- {
- "_id" : {
- "tags" : "fun"
- },
- "docsByTag" : 3,
- "viewsByTag" : 17,
- "avgByTag" : 5.666666666666667
- },
- {
- "_id" : {
- "tags" : "good"
- },
- "docsByTag" : 1,
- "viewsByTag" : 5,
- "avgByTag" : 5
- },
- {
- "_id" : {
- "tags" : "nasty"
- },
- "docsByTag" : 2,
- "viewsByTag" : 13,
- "avgByTag" : 6.5
- }
+ {"_id": {"tags": "filthy"}, "docsByTag": 1, "viewsByTag": 6, "avgByTag": 6},
+ {"_id": {"tags": "fun"}, "docsByTag": 3, "viewsByTag": 17, "avgByTag": 5.666666666666667},
+ {"_id": {"tags": "good"}, "docsByTag": 1, "viewsByTag": 5, "avgByTag": 5},
+ {"_id": {"tags": "nasty"}, "docsByTag": 2, "viewsByTag": 13, "avgByTag": 6.5}
];
assert.docEq(g4.result, g4result, 'g4 failed');
-
// $addToSet as an accumulator; can pivot data
-var g5 = db.runCommand(
-{ aggregate : "article", pipeline : [
- { $project : {
- author : 1,
- tags : 1,
- }},
- { $unwind : "$tags" },
- { $group : {
- _id : { tags : "$tags" },
- authors : { $addToSet : "$author" }
- }},
- {$sort: {'_id': 1}}
-]});
+var g5 = db.runCommand({
+ aggregate: "article",
+ pipeline: [
+ {
+ $project: {
+ author: 1,
+ tags: 1,
+ }
+ },
+ {$unwind: "$tags"},
+ {$group: {_id: {tags: "$tags"}, authors: {$addToSet: "$author"}}},
+ {$sort: {'_id': 1}}
+ ]
+});
// $addToSet doesn't guarantee order so we shouldn't test for it.
g5.result.forEach(function(obj) {
@@ -1476,73 +787,43 @@ g5.result.forEach(function(obj) {
});
var g5result = [
- {
- "_id" : {
- "tags" : "filthy"
- },
- "authors" : [
- "jane"
- ]
- },
- {
- "_id" : {
- "tags" : "fun"
- },
- "authors" : [
- "bob",
- "dave",
- ]
- },
- {
- "_id" : {
- "tags" : "good"
- },
- "authors" : [
- "bob"
- ]
- },
- {
- "_id" : {
- "tags" : "nasty"
- },
- "authors" : [
- "dave",
- "jane",
- ]
- }
+ {"_id": {"tags": "filthy"}, "authors": ["jane"]},
+ {"_id": {"tags": "fun"}, "authors": ["bob", "dave", ]},
+ {"_id": {"tags": "good"}, "authors": ["bob"]},
+ {"_id": {"tags": "nasty"}, "authors": ["dave", "jane", ]}
];
assert.docEq(g5.result, g5result, 'g5 failed');
-
// $first and $last accumulators, constant _id
-var g6 = db.runCommand(
-{ aggregate : "article", pipeline : [
- { $sort : { author : -1 } },
- { $group : {
- _id : "authors", /* constant string, *not* a field reference */
- firstAuthor : { $last : "$author" }, /* note reverse sort above */
- lastAuthor : { $first : "$author" }, /* note reverse sort above */
- count : { $sum : 1 }
- }}
-]});
-
-var g6result = [
- {
- "_id" : "authors",
- firstAuthor : "bob",
- lastAuthor : "jane",
- count : 3
- }
-];
+var g6 = db.runCommand({
+ aggregate: "article",
+ pipeline: [
+ {$sort: {author: -1}},
+ {
+ $group: {
+ _id: "authors", /* constant string, *not* a field reference */
+ firstAuthor: {$last: "$author"}, /* note reverse sort above */
+ lastAuthor: {$first: "$author"}, /* note reverse sort above */
+ count: {$sum: 1}
+ }
+ }
+ ]
+});
+
+var g6result = [{"_id": "authors", firstAuthor: "bob", lastAuthor: "jane", count: 3}];
// Test unwind on an unused field
-var g7 = db.runCommand(
-{ aggregate : "article", pipeline : [
- { $unwind : '$tags' },
- { $group : {
- _id : "tag_count", /* constant string, *not* a field reference */
- count : { $sum : 1 }
- }}
-]});
+var g7 = db.runCommand({
+ aggregate: "article",
+ pipeline: [
+ {$unwind: '$tags'},
+ {
+ $group: {
+ _id: "tag_count", /* constant string, *not* a field reference */
+ count: {$sum: 1}
+ }
+ }
+ ]
+});
assert.eq(g7.result[0].count, 7);
diff --git a/jstests/aggregation/testshard1.js b/jstests/aggregation/testshard1.js
index ef9f91bae70..0d773351f1d 100644
--- a/jstests/aggregation/testshard1.js
+++ b/jstests/aggregation/testshard1.js
@@ -1,11 +1,11 @@
load('jstests/aggregation/extras/utils.js');
-load('jstests/libs/analyze_plan.js'); // For planHasStage.
+load('jstests/libs/analyze_plan.js'); // For planHasStage.
// Use this for aggregations that only have arrays or results of specified order.
// It will check that cursors return the same results as non-cursors.
function aggregateOrdered(coll, pipeline) {
var cursor = coll.aggregate(pipeline).toArray();
- var noCursor = coll.runCommand('aggregate', {pipeline:pipeline}).result;
+ var noCursor = coll.runCommand('aggregate', {pipeline: pipeline}).result;
assert.eq(cursor, noCursor);
return cursor;
}
@@ -17,25 +17,21 @@ function aggregateNoOrder(coll, pipeline) {
}
jsTestLog("Creating sharded cluster");
-var shardedAggTest = new ShardingTest({
- shards: 2,
- mongos: 1,
- other: { chunkSize: 1, enableBalancer: true }
- });
+var shardedAggTest =
+ new ShardingTest({shards: 2, mongos: 1, other: {chunkSize: 1, enableBalancer: true}});
jsTestLog("Setting up sharded cluster");
-shardedAggTest.adminCommand( { enablesharding : "aggShard" } );
-db = shardedAggTest.getDB( "aggShard" );
-assert.commandWorked(db.adminCommand({setParameter: 1, logComponentVerbosity: { network: 0 }}));
+shardedAggTest.adminCommand({enablesharding: "aggShard"});
+db = shardedAggTest.getDB("aggShard");
+assert.commandWorked(db.adminCommand({setParameter: 1, logComponentVerbosity: {network: 0}}));
shardedAggTest.ensurePrimaryShard('aggShard', 'shard0000');
/* make sure its cleaned up */
db.ts1.drop();
db.literal.drop();
-shardedAggTest.adminCommand( { shardcollection : "aggShard.ts1", key : { "_id" : 1 } } );
-shardedAggTest.adminCommand( { shardcollection : "aggShard.literal", key : { "_id" : 1 } } );
-
+shardedAggTest.adminCommand({shardcollection: "aggShard.ts1", key: {"_id": 1}});
+shardedAggTest.adminCommand({shardcollection: "aggShard.literal", key: {"_id": 1}});
/*
Test combining results in mongos for operations that sub-aggregate on shards.
@@ -75,11 +71,14 @@ var strings = [
jsTestLog("Bulk inserting data");
var nItems = 200000;
var bulk = db.ts1.initializeUnorderedBulkOp();
-for(i = 1; i <= nItems; ++i) {
- bulk.insert(
- {_id: i,
- counter: ++count, number: strings[i % 20], random: Math.random(),
- filler: "0123456789012345678901234567890123456789"});
+for (i = 1; i <= nItems; ++i) {
+ bulk.insert({
+ _id: i,
+ counter: ++count,
+ number: strings[i % 20],
+ random: Math.random(),
+ filler: "0123456789012345678901234567890123456789"
+ });
}
assert.writeOK(bulk.execute());
@@ -88,78 +87,64 @@ assert.writeOK(bulk.execute());
var config = db.getMongo().getDB("config");
var shards = config.shards.find().toArray();
-jsTest.log( "Tracing all exceptions in mongod..." );
-for ( var i = 0; i < shards.length; i++ ) {
- var shardConn = new Mongo( shards[i].host );
- printjson(shardConn.getDB( "admin" ).runCommand({ setParameter : 1, traceExceptions : true }));
+jsTest.log("Tracing all exceptions in mongod...");
+for (var i = 0; i < shards.length; i++) {
+ var shardConn = new Mongo(shards[i].host);
+ printjson(shardConn.getDB("admin").runCommand({setParameter: 1, traceExceptions: true}));
}
jsTestLog('a project and group in shards, result combined in mongos');
-var a1 = aggregateNoOrder(db.ts1, [
- { $project: {
- cMod10: {$mod:["$counter", 10]},
- number: 1,
- counter: 1
- }},
- { $group: {
- _id: "$cMod10",
- numberSet: {$addToSet: "$number"},
- avgCounter: {$avg: "$cMod10"}
- }},
- { $sort: {_id:1} }
-]);
-
-for(i = 0 ; i < 10; ++i) {
- assert.eq(a1[i].avgCounter, a1[i]._id,
- 'agg sharded test avgCounter failed');
- assert.eq(a1[i].numberSet.length, 2,
- 'agg sharded test numberSet length failed');
+var a1 = aggregateNoOrder(
+ db.ts1,
+ [
+ {$project: {cMod10: {$mod: ["$counter", 10]}, number: 1, counter: 1}},
+ {$group: {_id: "$cMod10", numberSet: {$addToSet: "$number"}, avgCounter: {$avg: "$cMod10"}}},
+ {$sort: {_id: 1}}
+ ]);
+
+for (i = 0; i < 10; ++i) {
+ assert.eq(a1[i].avgCounter, a1[i]._id, 'agg sharded test avgCounter failed');
+ assert.eq(a1[i].numberSet.length, 2, 'agg sharded test numberSet length failed');
}
jsTestLog('an initial group starts the group in the shards, and combines them in mongos');
-var a2 = aggregateOrdered(db.ts1 , [
- { $group: {
- _id: "all",
- total: {$sum: "$counter"}
- }}
-]);
+var a2 = aggregateOrdered(db.ts1, [{$group: {_id: "all", total: {$sum: "$counter"}}}]);
jsTestLog('sum of an arithmetic progression S(n) = (n/2)(a(1) + a(n));');
-assert.eq(a2[0].total, (nItems/2)*(1 + nItems),
- 'agg sharded test counter sum failed');
+assert.eq(a2[0].total, (nItems / 2) * (1 + nItems), 'agg sharded test counter sum failed');
jsTestLog('A group combining all documents into one, averaging a null field.');
assert.eq(aggregateOrdered(db.ts1, [{$group: {_id: null, avg: {$avg: "$missing"}}}]),
- [{_id: null, avg: null}]);
+ [{_id: null, avg: null}]);
jsTestLog('an initial group starts the group in the shards, and combines them in mongos');
-var a3 = aggregateOrdered(db.ts1, [
- { $group: {
- _id: "$number",
- total: {$sum: 1}
- }},
- { $sort: {_id:1} }
-]);
-
-for(i = 0 ; i < strings.length; ++i) {
- assert.eq(a3[i].total, nItems/strings.length,
- 'agg sharded test sum numbers failed');
+var a3 =
+ aggregateOrdered(db.ts1, [{$group: {_id: "$number", total: {$sum: 1}}}, {$sort: {_id: 1}}]);
+
+for (i = 0; i < strings.length; ++i) {
+ assert.eq(a3[i].total, nItems / strings.length, 'agg sharded test sum numbers failed');
}
jsTestLog('a match takes place in the shards; just returning the results from mongos');
-var a4 = aggregateNoOrder(db.ts1, [
- { $match: {$or:[{counter:55}, {counter:1111},
- {counter: 2222}, {counter: 33333},
- {counter: 99999}, {counter: 55555}]}
- }
-]);
+var a4 = aggregateNoOrder(db.ts1,
+ [{
+ $match: {
+ $or: [
+ {counter: 55},
+ {counter: 1111},
+ {counter: 2222},
+ {counter: 33333},
+ {counter: 99999},
+ {counter: 55555}
+ ]
+ }
+ }]);
assert.eq(a4.length, 6, tojson(a4));
-for(i = 0; i < 6; ++i) {
+for (i = 0; i < 6; ++i) {
c = a4[i].counter;
- printjson({c:c});
- assert((c == 55) || (c == 1111) || (c == 2222) ||
- (c == 33333) || (c == 99999) || (c == 55555),
+ printjson({c: c});
+ assert((c == 55) || (c == 1111) || (c == 2222) || (c == 33333) || (c == 99999) || (c == 55555),
'agg sharded test simple match failed');
}
@@ -167,54 +152,53 @@ function testSkipLimit(ops, expectedCount) {
jsTestLog('testSkipLimit(' + tojson(ops) + ', ' + expectedCount + ')');
if (expectedCount > 10) {
// make shard -> mongos intermediate results less than 16MB
- ops.unshift({$project: {_id:1}});
+ ops.unshift({$project: {_id: 1}});
}
- ops.push({$group: {_id:1, count: {$sum: 1}}});
+ ops.push({$group: {_id: 1, count: {$sum: 1}}});
var out = aggregateOrdered(db.ts1, ops);
assert.eq(out[0].count, expectedCount);
}
-testSkipLimit([], nItems); // control
-testSkipLimit([{$skip:10}], nItems - 10);
-testSkipLimit([{$limit:10}], 10);
-testSkipLimit([{$skip:5}, {$limit:10}], 10);
-testSkipLimit([{$limit:10}, {$skip:5}], 10 - 5);
-testSkipLimit([{$skip:5}, {$skip: 3}, {$limit:10}], 10);
-testSkipLimit([{$skip:5}, {$limit:10}, {$skip: 3}], 10 - 3);
-testSkipLimit([{$limit:10}, {$skip:5}, {$skip: 3}], 10 - 3 - 5);
+testSkipLimit([], nItems); // control
+testSkipLimit([{$skip: 10}], nItems - 10);
+testSkipLimit([{$limit: 10}], 10);
+testSkipLimit([{$skip: 5}, {$limit: 10}], 10);
+testSkipLimit([{$limit: 10}, {$skip: 5}], 10 - 5);
+testSkipLimit([{$skip: 5}, {$skip: 3}, {$limit: 10}], 10);
+testSkipLimit([{$skip: 5}, {$limit: 10}, {$skip: 3}], 10 - 3);
+testSkipLimit([{$limit: 10}, {$skip: 5}, {$skip: 3}], 10 - 3 - 5);
// test sort + limit (using random to pull from both shards)
function testSortLimit(limit, direction) {
jsTestLog('testSortLimit(' + limit + ', ' + direction + ')');
- shardedAggTest.stopBalancer(); // TODO: remove after fixing SERVER-9622
- var from_cursor = db.ts1.find({},{random:1, _id:0})
- .sort({random: direction})
- .limit(limit)
- .toArray();
- shardedAggTest.startBalancer(); // TODO: remove after fixing SERVER-9622
- var from_agg = aggregateOrdered(db.ts1, [{$project: {random:1, _id:0}}
- ,{$sort: {random: direction}}
- ,{$limit: limit}
- ]);
+ shardedAggTest.stopBalancer(); // TODO: remove after fixing SERVER-9622
+ var from_cursor =
+ db.ts1.find({}, {random: 1, _id: 0}).sort({random: direction}).limit(limit).toArray();
+ shardedAggTest.startBalancer(); // TODO: remove after fixing SERVER-9622
+ var from_agg = aggregateOrdered(
+ db.ts1, [{$project: {random: 1, _id: 0}}, {$sort: {random: direction}}, {$limit: limit}]);
assert.eq(from_cursor, from_agg);
}
-testSortLimit(1, 1);
+testSortLimit(1, 1);
testSortLimit(1, -1);
-testSortLimit(10, 1);
+testSortLimit(10, 1);
testSortLimit(10, -1);
-testSortLimit(100, 1);
+testSortLimit(100, 1);
testSortLimit(100, -1);
function testAvgStdDev() {
jsTestLog('testing $avg and $stdDevPop in sharded $group');
// Note: not using aggregateOrdered since it requires exact results. $stdDevPop can vary
// slightly between runs if a migration occurs. This is why we use assert.close below.
- var res = db.ts1.aggregate([{$group: {_id: null,
- avg: {$avg: '$counter'},
- stdDevPop: {$stdDevPop: '$counter'},
- }}]).toArray();
+ var res = db.ts1.aggregate([{
+ $group: {
+ _id: null,
+ avg: {$avg: '$counter'},
+ stdDevPop: {$stdDevPop: '$counter'},
+ }
+ }]).toArray();
// http://en.wikipedia.org/wiki/Arithmetic_progression#Sum
var avg = (1 + nItems) / 2;
assert.close(res[0].avg, avg, '', 10 /*decimal places*/);
@@ -237,25 +221,28 @@ testSample();
jsTestLog('test $out by copying source collection verbatim to output');
var outCollection = db.ts1_out;
var res = aggregateOrdered(db.ts1, [{$out: outCollection.getName()}]);
-shardedAggTest.stopBalancer(); // TODO: remove after fixing SERVER-9622
+shardedAggTest.stopBalancer(); // TODO: remove after fixing SERVER-9622
assert.eq(db.ts1.find().itcount(), outCollection.find().itcount());
-assert.eq(db.ts1.find().sort({_id:1}).toArray(),
- outCollection.find().sort({_id:1}).toArray());
-shardedAggTest.startBalancer(); // TODO: remove after fixing SERVER-9622
+assert.eq(db.ts1.find().sort({_id: 1}).toArray(), outCollection.find().sort({_id: 1}).toArray());
+shardedAggTest.startBalancer(); // TODO: remove after fixing SERVER-9622
// Make sure we error out if $out collection is sharded
assertErrorCode(outCollection, [{$out: db.ts1.getName()}], 17017);
-db.literal.save({dollar:false});
+db.literal.save({dollar: false});
-result = aggregateOrdered(db.literal,
- [{$project:{_id:0, cost:{$cond:['$dollar', {$literal:'$1.00'}, {$literal:'$.99'}]}}}]);
+result = aggregateOrdered(
+ db.literal,
+ [{
+ $project:
+ {_id: 0, cost: {$cond: ['$dollar', {$literal: '$1.00'}, {$literal: '$.99'}]}}
+ }]);
-assert.eq([{cost:'$.99'}], result);
+assert.eq([{cost: '$.99'}], result);
jsTestLog("Do a basic sharded explain. This just makes sure that it doesn't error and has " +
"the right fields.");
-var res = db.ts1.aggregate([{$project: {a: 1}}], {explain:true});
+var res = db.ts1.aggregate([{$project: {a: 1}}], {explain: true});
assert.commandWorked(res);
printjson(res);
assert("splitPipeline" in res);
@@ -297,9 +284,11 @@ for (var shardName in res.shards) {
// Range query.
var range = 500;
var targetStart = Math.floor((nItems - range) * Math.random());
- pipeline = [{$match: {_id: {$gte: targetStart, $lt: targetStart + range}}},
- {$project: {_id: 1}},
- {$sort: {_id: 1}}];
+ pipeline = [
+ {$match: {_id: {$gte: targetStart, $lt: targetStart + range}}},
+ {$project: {_id: 1}},
+ {$sort: {_id: 1}}
+ ];
expectedDocs = [];
for (var i = targetStart; i < targetStart + range; i++) {
expectedDocs.push({_id: i});
@@ -315,11 +304,11 @@ for (var shardName in res.shards) {
// Call sub-tests designed to work sharded and unsharded.
// They check for this variable to know to shard their collections.
-RUNNING_IN_SHARDED_AGG_TEST = true; // global
+RUNNING_IN_SHARDED_AGG_TEST = true; // global
jsTestLog('running jstests/aggregation/bugs/server9444.js');
-load("jstests/aggregation/bugs/server9444.js"); // external sort
+load("jstests/aggregation/bugs/server9444.js"); // external sort
jsTestLog('running jstests/aggregation/bugs/server11675.js');
-load("jstests/aggregation/bugs/server11675.js"); // text support
+load("jstests/aggregation/bugs/server11675.js"); // text support
jsTestLog('shut everything down');
shardedAggTest.stop();
diff --git a/jstests/aggregation/unwind.js b/jstests/aggregation/unwind.js
index d4c540436ae..ba6ffa44a86 100644
--- a/jstests/aggregation/unwind.js
+++ b/jstests/aggregation/unwind.js
@@ -3,13 +3,16 @@
t = db.agg_unwind;
t.drop();
-t.insert( {_id : 1 } );
-t.insert( {_id : 2, x : null } );
-t.insert( {_id : 3, x : [] } );
-t.insert( {_id : 4, x : [1, 2] } );
-t.insert( {_id : 5, x : [3] } );
-t.insert( {_id : 6, x : 4 } );
+t.insert({_id: 1});
+t.insert({_id: 2, x: null});
+t.insert({_id: 3, x: []});
+t.insert({_id: 4, x: [1, 2]});
+t.insert({_id: 5, x: [3]});
+t.insert({_id: 6, x: 4});
-var res = t.aggregate( [ { $unwind : "$x" }, { $sort : { _id : 1 } } ] ).toArray();
+var res = t.aggregate([{$unwind: "$x"}, {$sort: {_id: 1}}]).toArray();
assert.eq(4, res.length);
-assert.eq([1, 2, 3, 4],res.map(function(z){ return z.x; }));
+assert.eq([1, 2, 3, 4],
+ res.map(function(z) {
+ return z.x;
+ }));
diff --git a/jstests/auth/access_control_with_unreachable_configs.js b/jstests/auth/access_control_with_unreachable_configs.js
index 23a38502301..6c833d5c844 100644
--- a/jstests/auth/access_control_with_unreachable_configs.js
+++ b/jstests/auth/access_control_with_unreachable_configs.js
@@ -3,14 +3,18 @@
// are user documents stored in the configuration information, it must assume that
// there are.
-var dopts = { smallfiles: "", nopreallocj: ""};
-var st = new ShardingTest(
- { shards: 1,
- mongos: 1,
- config: 1,
- keyFile: 'jstests/libs/key1',
- useHostname: false, // Needed when relying on the localhost exception
- other: { shardOptions: dopts, configOptions: dopts, mongosOptions: { verbose: 1 } } } );
+var dopts = {
+ smallfiles: "",
+ nopreallocj: ""
+};
+var st = new ShardingTest({
+ shards: 1,
+ mongos: 1,
+ config: 1,
+ keyFile: 'jstests/libs/key1',
+ useHostname: false, // Needed when relying on the localhost exception
+ other: {shardOptions: dopts, configOptions: dopts, mongosOptions: {verbose: 1}}
+});
var mongos = st.s;
var config = st.config0;
var authzErrorCode = 13;
@@ -35,7 +39,7 @@ assert.commandWorked(db.adminCommand('serverStatus'));
jsTest.log('repeat without config server');
// shut down only config server
-MongoRunner.stopMongod(config.port, /*signal*/15);
+MongoRunner.stopMongod(config.port, /*signal*/ 15);
// open a new connection to mongos (unauthorized)
var conn2 = new Mongo(mongos.host);
diff --git a/jstests/auth/arbiter.js b/jstests/auth/arbiter.js
index 9e7f048e1df..75b7b67a9ad 100644
--- a/jstests/auth/arbiter.js
+++ b/jstests/auth/arbiter.js
@@ -3,24 +3,32 @@
var name = "arbiter_localhost_test";
var key = "jstests/libs/key1";
-var replTest = new ReplSetTest({ name: name, nodes: 3, keyFile: key });
+var replTest = new ReplSetTest({name: name, nodes: 3, keyFile: key});
var nodes = replTest.nodeList();
replTest.startSet();
-replTest.initiate({_id: name,
- members: [
- { "_id": 0, "host": nodes[0], priority: 3 },
- { "_id": 1, "host": nodes[1] },
- { "_id": 2, "host": nodes[2], arbiterOnly: true }
- ],
- });
+replTest.initiate({
+ _id: name,
+ members: [
+ {"_id": 0, "host": nodes[0], priority: 3},
+ {"_id": 1, "host": nodes[1]},
+ {"_id": 2, "host": nodes[2], arbiterOnly: true}
+ ],
+});
var primaryAdmin = replTest.nodes[0].getDB("admin");
var arbiterAdmin = replTest.nodes[2].getDB("admin");
-var cmd0 = { getCmdLineOpts: 1 };
-var cmd1 = { getParameter: 1, logLevel: 1 };
-var cmd2 = { serverStatus: 1 };
+var cmd0 = {
+ getCmdLineOpts: 1
+};
+var cmd1 = {
+ getParameter: 1,
+ logLevel: 1
+};
+var cmd2 = {
+ serverStatus: 1
+};
assert.commandFailedWithCode(primaryAdmin.runCommand(cmd0), 13);
assert.commandFailedWithCode(primaryAdmin.runCommand(cmd1), 13);
diff --git a/jstests/auth/auth1.js b/jstests/auth/auth1.js
index 560d54828fc..c90765ffb02 100644
--- a/jstests/auth/auth1.js
+++ b/jstests/auth/auth1.js
@@ -5,42 +5,42 @@ function setupTest() {
print("START auth1.js");
baseName = "jstests_auth_auth1";
- m = MongoRunner.runMongod({auth: "",
- nohttpinterface: "",
- bind_ip: "127.0.0.1",
- useHostname: false});
+ m = MongoRunner.runMongod(
+ {auth: "", nohttpinterface: "", bind_ip: "127.0.0.1", useHostname: false});
return m;
}
function runTest(m) {
// these are used by read-only user
- db = m.getDB( "test" );
+ db = m.getDB("test");
mro = new Mongo(m.host);
- dbRO = mro.getDB( "test" );
- tRO = dbRO[ baseName ];
+ dbRO = mro.getDB("test");
+ tRO = dbRO[baseName];
db.getSisterDB("admin").createUser({user: "root", pwd: "root", roles: ["root"]});
db.getSisterDB("admin").auth("root", "root");
- t = db[ baseName ];
+ t = db[baseName];
t.drop();
db.dropAllUsers();
db.logout();
- db.getSisterDB( "admin" ).createUser({user: "super", pwd: "super", roles: ["__system"] });
+ db.getSisterDB("admin").createUser({user: "super", pwd: "super", roles: ["__system"]});
db.getSisterDB("admin").auth("super", "super");
- db.createUser({user: "eliot" , pwd: "eliot", roles: jsTest.basicUserRoles });
- db.createUser({user: "guest" , pwd: "guest", roles: jsTest.readOnlyUserRoles});
+ db.createUser({user: "eliot", pwd: "eliot", roles: jsTest.basicUserRoles});
+ db.createUser({user: "guest", pwd: "guest", roles: jsTest.readOnlyUserRoles});
db.getSisterDB("admin").logout();
- assert.throws( function() { t.findOne(); }, [], "read without login" );
+ assert.throws(function() {
+ t.findOne();
+ }, [], "read without login");
print("make sure we can't run certain commands w/out auth");
var codeUnauthorized = 13;
- var rslt = db.runCommand({eval : "function() { return 1; }"});
+ var rslt = db.runCommand({eval: "function() { return 1; }"});
assert.eq(rslt.code, codeUnauthorized, tojson(rslt));
- var rslt = db.runCommand({getLog : "global"});
+ var rslt = db.runCommand({getLog: "global"});
assert.eq(rslt.code, codeUnauthorized, tojson(rslt));
assert(!db.auth("eliot", "eliot2"), "auth succeeded with wrong password");
@@ -50,51 +50,68 @@ function runTest(m) {
assert(!db.auth("eliot", "eliot"), "auth succeeded with wrong password");
assert(db.auth("eliot", "eliot2"), "auth failed");
- for( i = 0; i < 1000; ++i ) {
- t.save( {i:i} );
+ for (i = 0; i < 1000; ++i) {
+ t.save({i: i});
}
- assert.eq( 1000, t.count() , "A1" );
- assert.eq( 1000, t.find().toArray().length , "A2" );
+ assert.eq(1000, t.count(), "A1");
+ assert.eq(1000, t.find().toArray().length, "A2");
- db.setProfilingLevel( 2 );
+ db.setProfilingLevel(2);
t.count();
- db.setProfilingLevel( 0 );
- assert.lt( 0 , db.system.profile.find( { user : "eliot@test" } ).count() , "AP1" );
-
- var p = { key : { i : true } ,
- reduce : function(obj,prev) { prev.count++; },
- initial: { count: 0 }
+ db.setProfilingLevel(0);
+ assert.lt(0, db.system.profile.find({user: "eliot@test"}).count(), "AP1");
+
+ var p = {
+ key: {i: true},
+ reduce: function(obj, prev) {
+ prev.count++;
+ },
+ initial: {count: 0}
};
- assert.eq( 1000, t.group( p ).length , "A5" );
+ assert.eq(1000, t.group(p).length, "A5");
- assert( dbRO.auth( "guest", "guest" ), "auth failed 2" );
+ assert(dbRO.auth("guest", "guest"), "auth failed 2");
- assert.eq( 1000, tRO.count() , "B1" );
- assert.eq( 1000, tRO.find().toArray().length , "B2" ); // make sure we have a getMore in play
- assert.commandWorked( dbRO.runCommand( {ismaster:1} ) , "B3" );
+ assert.eq(1000, tRO.count(), "B1");
+ assert.eq(1000, tRO.find().toArray().length, "B2"); // make sure we have a getMore in play
+ assert.commandWorked(dbRO.runCommand({ismaster: 1}), "B3");
assert.writeError(tRO.save({}));
- assert.eq( 1000, tRO.count() , "B6" );
-
- assert.eq( 1000, tRO.group( p ).length , "C1" );
+ assert.eq(1000, tRO.count(), "B6");
- var p = { key : { i : true } ,
- reduce : function(obj,prev) { db.jstests_auth_auth1.save( {i:10000} ); prev.count++; },
- initial: { count: 0 }
- };
+ assert.eq(1000, tRO.group(p).length, "C1");
+ var p = {
+ key: {i: true},
+ reduce: function(obj, prev) {
+ db.jstests_auth_auth1.save({i: 10000});
+ prev.count++;
+ },
+ initial: {count: 0}
+ };
- assert.throws( function() { return t.group( p ); }, null , "write reduce didn't fail" );
- assert.eq( 1000, dbRO.jstests_auth_auth1.count() , "C3" );
-
+ assert.throws(function() {
+ return t.group(p);
+ }, null, "write reduce didn't fail");
+ assert.eq(1000, dbRO.jstests_auth_auth1.count(), "C3");
db.getSiblingDB('admin').auth('super', 'super');
- assert.eq( 1000, db.eval( function() { return db[ "jstests_auth_auth1" ].count(); } ) , "D1" );
- db.eval( function() { db[ "jstests_auth_auth1" ].save( {i:1000} ); } );
- assert.eq( 1001, db.eval( function() { return db[ "jstests_auth_auth1" ].count(); } ) , "D2" );
+ assert.eq(1000,
+ db.eval(function() {
+ return db["jstests_auth_auth1"].count();
+ }),
+ "D1");
+ db.eval(function() {
+ db["jstests_auth_auth1"].save({i: 1000});
+ });
+ assert.eq(1001,
+ db.eval(function() {
+ return db["jstests_auth_auth1"].count();
+ }),
+ "D2");
print("SUCCESS auth1.js");
}
diff --git a/jstests/auth/auth2.js b/jstests/auth/auth2.js
index 52d1fac084b..f09d87d7275 100644
--- a/jstests/auth/auth2.js
+++ b/jstests/auth/auth2.js
@@ -1,24 +1,32 @@
// test read/write permissions
m = MongoRunner.runMongod({auth: "", bind_ip: "127.0.0.1", nojournal: "", smallfiles: ""});
-db = m.getDB( "admin" );
+db = m.getDB("admin");
// These statements throw because the localhost exception does not allow
// these operations: it only allows the creation of the first admin user
// and necessary setup operations.
-assert.throws( function(){ db.users.count(); } );
-assert.throws( function() { db.shutdownServer(); } );
+assert.throws(function() {
+ db.users.count();
+});
+assert.throws(function() {
+ db.shutdownServer();
+});
-db.createUser( { user: "eliot", pwd: "eliot", roles: [ "root" ] } );
+db.createUser({user: "eliot", pwd: "eliot", roles: ["root"]});
// These statements throw because we have a user but have not authenticated
// as that user.
-assert.throws( function(){ db.users.count(); } );
-assert.throws( function() { db.shutdownServer(); } );
+assert.throws(function() {
+ db.users.count();
+});
+assert.throws(function() {
+ db.shutdownServer();
+});
-db.auth( "eliot", "eliot" );
+db.auth("eliot", "eliot");
-users = db.getCollection( "system.users" );
-assert.eq( 1, users.count() );
+users = db.getCollection("system.users");
+assert.eq(1, users.count());
db.shutdownServer();
diff --git a/jstests/auth/auth3.js b/jstests/auth/auth3.js
index 9ec028aca21..dd87ea2448a 100644
--- a/jstests/auth/auth3.js
+++ b/jstests/auth/auth3.js
@@ -1,32 +1,32 @@
(function() {
-'use strict';
+ 'use strict';
-var conn = MongoRunner.runMongod({ auth: "" });
+ var conn = MongoRunner.runMongod({auth: ""});
-var admin = conn.getDB("admin");
-var errorCodeUnauthorized = 13;
+ var admin = conn.getDB("admin");
+ var errorCodeUnauthorized = 13;
-admin.createUser({user:"foo",pwd: "bar", roles: jsTest.adminUserRoles});
+ admin.createUser({user: "foo", pwd: "bar", roles: jsTest.adminUserRoles});
-print("make sure curop, killop, and unlock fail");
+ print("make sure curop, killop, and unlock fail");
-var x = admin.$cmd.sys.inprog.findOne();
-assert(!("inprog" in x), tojson(x));
-assert.eq(x.code, errorCodeUnauthorized, tojson(x));
+ var x = admin.$cmd.sys.inprog.findOne();
+ assert(!("inprog" in x), tojson(x));
+ assert.eq(x.code, errorCodeUnauthorized, tojson(x));
-x = admin.killOp(123);
-assert(!("info" in x), tojson(x));
-assert.eq(x.code, errorCodeUnauthorized, tojson(x));
+ x = admin.killOp(123);
+ assert(!("info" in x), tojson(x));
+ assert.eq(x.code, errorCodeUnauthorized, tojson(x));
-x = admin.fsyncUnlock();
-assert(x.errmsg != "not locked", tojson(x));
-assert.eq(x.code, errorCodeUnauthorized, tojson(x));
+ x = admin.fsyncUnlock();
+ assert(x.errmsg != "not locked", tojson(x));
+ assert.eq(x.code, errorCodeUnauthorized, tojson(x));
-conn.getDB("admin").auth("foo","bar");
+ conn.getDB("admin").auth("foo", "bar");
-assert("inprog" in admin.currentOp());
-assert("info" in admin.killOp(123));
-assert.eq(admin.fsyncUnlock().errmsg, "not locked");
+ assert("inprog" in admin.currentOp());
+ assert("info" in admin.killOp(123));
+ assert.eq(admin.fsyncUnlock().errmsg, "not locked");
})();
diff --git a/jstests/auth/auth_helpers.js b/jstests/auth/auth_helpers.js
index f35f2f579af..94131821784 100644
--- a/jstests/auth/auth_helpers.js
+++ b/jstests/auth/auth_helpers.js
@@ -3,33 +3,33 @@
// This test requires users to persist across a restart.
// @tags: [requires_persistence]
-var conn = MongoRunner.runMongod({ smallfiles: ""});
+var conn = MongoRunner.runMongod({smallfiles: ""});
var mechanisms, hasCR, hasCramMd5;
var admin = conn.getDB('admin');
// In order to test MONGODB-CR we need to "reset" the authSchemaVersion to
// 26Final "3" or else the user won't get MONGODB-CR credentials.
-admin.system.version.save({ "_id" : "authSchema", "currentVersion" : 3 });
-admin.createUser({user:'andy', pwd: 'a', roles: jsTest.adminUserRoles});
+admin.system.version.save({"_id": "authSchema", "currentVersion": 3});
+admin.createUser({user: 'andy', pwd: 'a', roles: jsTest.adminUserRoles});
admin.auth({user: 'andy', pwd: 'a'});
// Attempt to start with CRAM-MD5 enabled
// If this fails the build only supports default auth mechanisms
MongoRunner.stopMongod(conn);
-var restartedConn = MongoRunner.runMongod({
- auth: "",
- restart: conn,
- setParameter: "authenticationMechanisms=SCRAM-SHA-1,MONGODB-CR,CRAM-MD5"});
+var restartedConn = MongoRunner.runMongod({
+ auth: "",
+ restart: conn,
+ setParameter: "authenticationMechanisms=SCRAM-SHA-1,MONGODB-CR,CRAM-MD5"
+});
if (restartedConn != null) {
- mechanisms = [ "SCRAM-SHA-1", "MONGODB-CR", "CRAM-MD5" ];
+ mechanisms = ["SCRAM-SHA-1", "MONGODB-CR", "CRAM-MD5"];
hasCR = true;
hasCramMd5 = true;
print("test info: Enabling non-default authentication mechanisms.");
-}
-else {
- restartedConn = MongoRunner.runMongod({ restart: conn });
- mechanisms = [ "SCRAM-SHA-1", "MONGODB-CR" ];
+} else {
+ restartedConn = MongoRunner.runMongod({restart: conn});
+ mechanisms = ["SCRAM-SHA-1", "MONGODB-CR"];
hasCR = true;
hasCramMd5 = false;
print("test info: Using only default password authentication mechanisms.");
diff --git a/jstests/auth/auth_options.js b/jstests/auth/auth_options.js
index 1a38821fa03..d2f89d12a0f 100644
--- a/jstests/auth/auth_options.js
+++ b/jstests/auth/auth_options.js
@@ -4,63 +4,49 @@ load('jstests/libs/command_line/test_parsed_options.js');
jsTest.log("Testing \"auth\" command line option");
var expectedResult = {
- "parsed" : {
- "security" : {
- "authorization" : "enabled"
- }
- }
+ "parsed": {"security": {"authorization": "enabled"}}
};
-testGetCmdLineOptsMongod({ auth : "" }, expectedResult);
+testGetCmdLineOptsMongod({auth: ""}, expectedResult);
jsTest.log("Testing \"noauth\" command line option");
expectedResult = {
- "parsed" : {
- "security" : {
- "authorization" : "disabled"
- }
- }
+ "parsed": {"security": {"authorization": "disabled"}}
};
-testGetCmdLineOptsMongod({ noauth : "" }, expectedResult);
+testGetCmdLineOptsMongod({noauth: ""}, expectedResult);
jsTest.log("Testing \"security.authorization\" config file option");
expectedResult = {
- "parsed" : {
- "config" : "jstests/libs/config_files/enable_auth.json",
- "security" : {
- "authorization" : "enabled"
- }
+ "parsed": {
+ "config": "jstests/libs/config_files/enable_auth.json",
+ "security": {"authorization": "enabled"}
}
};
-testGetCmdLineOptsMongod({ config : "jstests/libs/config_files/enable_auth.json" }, expectedResult);
+testGetCmdLineOptsMongod({config: "jstests/libs/config_files/enable_auth.json"}, expectedResult);
jsTest.log("Testing with no explicit object check setting");
expectedResult = {
- "parsed" : { }
+ "parsed": {}
};
testGetCmdLineOptsMongod({}, expectedResult);
// Test that we preserve switches explicitly set to false in config files. See SERVER-13439.
jsTest.log("Testing explicitly disabled \"auth\" config file option");
expectedResult = {
- "parsed" : {
- "config" : "jstests/libs/config_files/disable_auth.ini",
- "security" : {
- "authorization" : "disabled"
- }
+ "parsed": {
+ "config": "jstests/libs/config_files/disable_auth.ini",
+ "security": {"authorization": "disabled"}
}
};
-testGetCmdLineOptsMongod({ config : "jstests/libs/config_files/disable_auth.ini" }, expectedResult);
+testGetCmdLineOptsMongod({config: "jstests/libs/config_files/disable_auth.ini"}, expectedResult);
jsTest.log("Testing explicitly disabled \"noauth\" config file option");
expectedResult = {
- "parsed" : {
- "config" : "jstests/libs/config_files/disable_noauth.ini",
- "security" : {
- "authorization" : "enabled"
- }
+ "parsed": {
+ "config": "jstests/libs/config_files/disable_noauth.ini",
+ "security": {"authorization": "enabled"}
}
};
-testGetCmdLineOptsMongod({ config : "jstests/libs/config_files/disable_noauth.ini" }, expectedResult);
+testGetCmdLineOptsMongod({config: "jstests/libs/config_files/disable_noauth.ini"}, expectedResult);
print(baseName + " succeeded.");
diff --git a/jstests/auth/auth_schema_upgrade.js b/jstests/auth/auth_schema_upgrade.js
index 95dcaad70eb..d80fbe6bbad 100644
--- a/jstests/auth/auth_schema_upgrade.js
+++ b/jstests/auth/auth_schema_upgrade.js
@@ -1,21 +1,17 @@
// Standalone test of authSchemaUpgrade
load('./jstests/multiVersion/libs/auth_helpers.js');
-var setupCRUsers = function(conn){
+var setupCRUsers = function(conn) {
jsTest.log("setting up legacy users");
var adminDB = conn.getDB('admin');
- adminDB.system.version.update({_id:"authSchema"},{"currentVersion":3},{upsert:true});
+ adminDB.system.version.update({_id: "authSchema"}, {"currentVersion": 3}, {upsert: true});
- adminDB.createUser({user: 'user1', pwd: 'pass',
- roles: jsTest.adminUserRoles});
- assert(adminDB.auth({mechanism: 'MONGODB-CR',
- user: 'user1', pwd: 'pass'}));
+ adminDB.createUser({user: 'user1', pwd: 'pass', roles: jsTest.adminUserRoles});
+ assert(adminDB.auth({mechanism: 'MONGODB-CR', user: 'user1', pwd: 'pass'}));
- adminDB.createUser({user: 'user2', pwd: 'pass',
- roles: jsTest.adminUserRoles});
- assert(adminDB.auth({mechanism: 'MONGODB-CR',
- user: 'user2', pwd: 'pass'}));
+ adminDB.createUser({user: 'user2', pwd: 'pass', roles: jsTest.adminUserRoles});
+ assert(adminDB.auth({mechanism: 'MONGODB-CR', user: 'user2', pwd: 'pass'}));
// Add $external no-op user to verify that it does not affect
// authSchemaUpgrade SERVER-18475
@@ -29,8 +25,7 @@ var setupCRUsers = function(conn){
verifyUserDoc(adminDB, 'user2', true, false);
verifyUserDoc(adminDB.getSiblingDB('$external'), "evil", false, false, true);
- adminDB.updateUser('user1', {pwd: 'newpass',
- roles: jsTest.adminUserRoles});
+ adminDB.updateUser('user1', {pwd: 'newpass', roles: jsTest.adminUserRoles});
verifyAuth(adminDB, 'user1', 'newpass', true, true);
verifyUserDoc(adminDB, 'user1', true, false);
@@ -47,7 +42,7 @@ var verifySchemaUpgrade = function(adminDB) {
verifyAuth(adminDB, 'user2', 'pass', false, true);
};
-var runAndVerifySchemaUpgrade = function(conn){
+var runAndVerifySchemaUpgrade = function(conn) {
jsTest.log("run authSchemaUpgrade");
var adminDB = conn.getDB('admin');
@@ -64,7 +59,7 @@ var testAuthSchemaUpgrade = function(conn) {
var testUpgradeShards = function(mongos, shard) {
setupCRUsers(shard);
- assert.commandWorked(mongos.adminCommand({"authSchemaUpgrade":1,"upgradeShards":1}));
+ assert.commandWorked(mongos.adminCommand({"authSchemaUpgrade": 1, "upgradeShards": 1}));
verifySchemaUpgrade(shard.getDB('admin'));
};
@@ -74,13 +69,17 @@ testAuthSchemaUpgrade(conn);
MongoRunner.stopMongod(conn);
jsTest.log('Test authSchemUpgrade sharded');
-var dopts = { smallfiles: "", nopreallocj: ""};
-var st = new ShardingTest(
- { shards: 1,
- mongos: 1,
- config: 1,
- useHostname: false, // Needed when relying on the localhost exception
- other: { shardOptions: dopts, configOptions: dopts, mongosOptions: { verbose: 1 } } } );
+var dopts = {
+ smallfiles: "",
+ nopreallocj: ""
+};
+var st = new ShardingTest({
+ shards: 1,
+ mongos: 1,
+ config: 1,
+ useHostname: false, // Needed when relying on the localhost exception
+ other: {shardOptions: dopts, configOptions: dopts, mongosOptions: {verbose: 1}}
+});
testAuthSchemaUpgrade(st.s);
testUpgradeShards(st.s, st.shard0);
st.stop();
diff --git a/jstests/auth/authz_modifications_access_control.js b/jstests/auth/authz_modifications_access_control.js
index 69e9b2b2c6e..bb294796a7f 100644
--- a/jstests/auth/authz_modifications_access_control.js
+++ b/jstests/auth/authz_modifications_access_control.js
@@ -5,19 +5,17 @@
function runTest(conn) {
var authzErrorCode = 13;
- conn.getDB('admin').createUser({user: 'userAdmin',
- pwd: 'pwd',
- roles: ['userAdminAnyDatabase']});
+ conn.getDB('admin')
+ .createUser({user: 'userAdmin', pwd: 'pwd', roles: ['userAdminAnyDatabase']});
var userAdminConn = new Mongo(conn.host);
userAdminConn.getDB('admin').auth('userAdmin', 'pwd');
var testUserAdmin = userAdminConn.getDB('test');
var adminUserAdmin = userAdminConn.getDB('admin');
- testUserAdmin.createRole({role: 'testRole', roles:[], privileges:[]});
- adminUserAdmin.createRole({role: 'adminRole', roles:[], privileges:[]});
- testUserAdmin.createUser({user: 'spencer',
- pwd: 'pwd',
- roles: ['testRole', {role: 'adminRole', db: 'admin'}]});
+ testUserAdmin.createRole({role: 'testRole', roles: [], privileges: []});
+ adminUserAdmin.createRole({role: 'adminRole', roles: [], privileges: []});
+ testUserAdmin.createUser(
+ {user: 'spencer', pwd: 'pwd', roles: ['testRole', {role: 'adminRole', db: 'admin'}]});
adminUserAdmin.createUser({user: 'otherUser', pwd: 'pwd', roles: []});
var db = conn.getDB('test');
@@ -32,43 +30,43 @@ function runTest(conn) {
// while "db" and "admindb" will be used for the actual permission checks that are being tested.
(function testCreateUser() {
- jsTestLog("Testing user creation");
+ jsTestLog("Testing user creation");
- var res = db.runCommand({createUser: 'andy', pwd: 'pwd', roles: []});
- assert.commandFailedWithCode(res, authzErrorCode);
+ var res = db.runCommand({createUser: 'andy', pwd: 'pwd', roles: []});
+ assert.commandFailedWithCode(res, authzErrorCode);
+ testUserAdmin.grantPrivilegesToRole(
+ 'testRole', [{resource: {db: 'test', collection: ''}, actions: ['createUser']}]);
- testUserAdmin.grantPrivilegesToRole('testRole', [{resource: {db: 'test', collection: ''},
- actions: ['createUser']}]);
+ assert.commandWorked(db.runCommand({createUser: 'andy', pwd: 'pwd', roles: []}));
-
- assert.commandWorked(db.runCommand({createUser: 'andy', pwd: 'pwd', roles: []}));
-
- res = admindb.runCommand({createUser: 'andy', pwd: 'pwd', roles: []});
- assert.commandFailedWithCode(res, authzErrorCode);
- })();
+ res = admindb.runCommand({createUser: 'andy', pwd: 'pwd', roles: []});
+ assert.commandFailedWithCode(res, authzErrorCode);
+ })();
(function testCreateRole() {
- jsTestLog("Testing role creation");
-
- var res = db.runCommand({createRole: 'testRole2', roles: [], privileges: []});
- assert.commandFailedWithCode(res, authzErrorCode);
+ jsTestLog("Testing role creation");
+ var res = db.runCommand({createRole: 'testRole2', roles: [], privileges: []});
+ assert.commandFailedWithCode(res, authzErrorCode);
- testUserAdmin.grantPrivilegesToRole('testRole', [{resource: {db: 'test', collection: ''},
- actions: ['createRole']}]);
+ testUserAdmin.grantPrivilegesToRole(
+ 'testRole', [{resource: {db: 'test', collection: ''}, actions: ['createRole']}]);
+ assert.commandWorked(db.runCommand({createRole: 'testRole2', roles: [], privileges: []}));
- assert.commandWorked(db.runCommand({createRole: 'testRole2', roles: [], privileges: []}));
-
- res = admindb.runCommand({createRole: 'testRole2', roles: [], privileges: []});
- assert.commandFailedWithCode(res, authzErrorCode);
- })();
+ res = admindb.runCommand({createRole: 'testRole2', roles: [], privileges: []});
+ assert.commandFailedWithCode(res, authzErrorCode);
+ })();
- (function () {
+ (function() {
jsTestLog("Testing role creation, of user-defined roles with same name as built-in roles");
- var cmdObj = {createRole: "readWrite", roles: [], privileges: []};
+ var cmdObj = {
+ createRole: "readWrite",
+ roles: [],
+ privileges: []
+ };
var res = adminUserAdmin.runCommand(cmdObj);
assert.commandFailed(res, tojson(cmdObj));
@@ -79,242 +77,232 @@ function runTest(conn) {
})();
(function testViewUser() {
- jsTestLog("Testing viewing user information");
-
- var res = db.runCommand({usersInfo: 'andy'});
- assert.commandFailedWithCode(res, authzErrorCode);
+ jsTestLog("Testing viewing user information");
+ var res = db.runCommand({usersInfo: 'andy'});
+ assert.commandFailedWithCode(res, authzErrorCode);
- testUserAdmin.grantPrivilegesToRole('testRole', [{resource: {db: 'test', collection: ''},
- actions: ['viewUser']}]);
+ testUserAdmin.grantPrivilegesToRole(
+ 'testRole', [{resource: {db: 'test', collection: ''}, actions: ['viewUser']}]);
+ assert.commandWorked(db.runCommand({usersInfo: 'andy'}));
- assert.commandWorked(db.runCommand({usersInfo: 'andy'}));
-
- res = admindb.runCommand({usersInfo: 'andy'});
- assert.commandFailedWithCode(res, authzErrorCode);
- })();
+ res = admindb.runCommand({usersInfo: 'andy'});
+ assert.commandFailedWithCode(res, authzErrorCode);
+ })();
(function testViewRole() {
- jsTestLog("Testing viewing role information");
-
- var res = db.runCommand({rolesInfo: 'testRole2'});
- assert.commandFailedWithCode(res, authzErrorCode);
+ jsTestLog("Testing viewing role information");
+ var res = db.runCommand({rolesInfo: 'testRole2'});
+ assert.commandFailedWithCode(res, authzErrorCode);
- testUserAdmin.grantPrivilegesToRole('testRole', [{resource: {db: 'test', collection: ''},
- actions: ['viewRole']}]);
+ testUserAdmin.grantPrivilegesToRole(
+ 'testRole', [{resource: {db: 'test', collection: ''}, actions: ['viewRole']}]);
+ assert.commandWorked(db.runCommand({rolesInfo: 'testRole2'}));
- assert.commandWorked(db.runCommand({rolesInfo: 'testRole2'}));
-
- res = admindb.runCommand({rolesInfo: 'testRole2'});
- assert.commandFailedWithCode(res, authzErrorCode);
- })();
+ res = admindb.runCommand({rolesInfo: 'testRole2'});
+ assert.commandFailedWithCode(res, authzErrorCode);
+ })();
(function testDropUser() {
- jsTestLog("Testing dropping user");
-
- var res = db.runCommand({dropUser: 'andy'});
- assert.commandFailedWithCode(res, authzErrorCode);
+ jsTestLog("Testing dropping user");
+ var res = db.runCommand({dropUser: 'andy'});
+ assert.commandFailedWithCode(res, authzErrorCode);
- testUserAdmin.grantPrivilegesToRole('testRole', [{resource: {db: 'test', collection: ''},
- actions: ['dropUser']}]);
+ testUserAdmin.grantPrivilegesToRole(
+ 'testRole', [{resource: {db: 'test', collection: ''}, actions: ['dropUser']}]);
+ assert.commandWorked(db.runCommand({dropUser: 'andy'}));
- assert.commandWorked(db.runCommand({dropUser: 'andy'}));
-
- res = admindb.runCommand({dropUser: 'andy'});
- assert.commandFailedWithCode(res, authzErrorCode);
- })();
+ res = admindb.runCommand({dropUser: 'andy'});
+ assert.commandFailedWithCode(res, authzErrorCode);
+ })();
(function testDropRole() {
- jsTestLog("Testing dropping role");
-
- var res = db.runCommand({dropRole: 'testRole2'});
- assert.commandFailedWithCode(res, authzErrorCode);
+ jsTestLog("Testing dropping role");
+ var res = db.runCommand({dropRole: 'testRole2'});
+ assert.commandFailedWithCode(res, authzErrorCode);
- testUserAdmin.grantPrivilegesToRole('testRole', [{resource: {db: 'test', collection: ''},
- actions: ['dropRole']}]);
+ testUserAdmin.grantPrivilegesToRole(
+ 'testRole', [{resource: {db: 'test', collection: ''}, actions: ['dropRole']}]);
+ assert.commandWorked(db.runCommand({dropRole: 'testRole2'}));
- assert.commandWorked(db.runCommand({dropRole: 'testRole2'}));
-
- res = admindb.runCommand({dropRole: 'testRole2'});
- assert.commandFailedWithCode(res, authzErrorCode);
- })();
+ res = admindb.runCommand({dropRole: 'testRole2'});
+ assert.commandFailedWithCode(res, authzErrorCode);
+ })();
(function testGrantRole() {
- jsTestLog("Testing granting roles");
-
- var res = db.runCommand({createUser: 'andy', pwd: 'pwd', roles: ['read']});
- assert.commandFailedWithCode(res, authzErrorCode);
+ jsTestLog("Testing granting roles");
- res = db.runCommand({grantRolesToUser: 'spencer', roles: ['read']});
- assert.commandFailedWithCode(res, authzErrorCode);
+ var res = db.runCommand({createUser: 'andy', pwd: 'pwd', roles: ['read']});
+ assert.commandFailedWithCode(res, authzErrorCode);
- res = db.runCommand({grantRolesToRole: 'testRole', roles: ['read']});
- assert.commandFailedWithCode(res, authzErrorCode);
+ res = db.runCommand({grantRolesToUser: 'spencer', roles: ['read']});
+ assert.commandFailedWithCode(res, authzErrorCode);
- res = admindb.runCommand({grantRolesToUser: 'otherUser',
- roles: [{role: 'read', db: 'test'}]});
- assert.commandFailedWithCode(res, authzErrorCode);
+ res = db.runCommand({grantRolesToRole: 'testRole', roles: ['read']});
+ assert.commandFailedWithCode(res, authzErrorCode);
+ res = admindb.runCommand(
+ {grantRolesToUser: 'otherUser', roles: [{role: 'read', db: 'test'}]});
+ assert.commandFailedWithCode(res, authzErrorCode);
- testUserAdmin.grantPrivilegesToRole('testRole', [{resource: {db: 'test', collection: ''},
- actions: ['grantRole']}]);
+ testUserAdmin.grantPrivilegesToRole(
+ 'testRole', [{resource: {db: 'test', collection: ''}, actions: ['grantRole']}]);
+ assert.commandWorked(db.runCommand({createUser: 'andy', pwd: 'pwd', roles: ['read']}));
+ assert.commandWorked(db.runCommand({grantRolesToUser: 'spencer', roles: ['read']}));
+ assert.commandWorked(db.runCommand({grantRolesToRole: 'testRole', roles: ['read']}));
- assert.commandWorked(db.runCommand({createUser: 'andy', pwd: 'pwd', roles: ['read']}));
- assert.commandWorked(db.runCommand({grantRolesToUser: 'spencer', roles: ['read']}));
- assert.commandWorked(db.runCommand({grantRolesToRole: 'testRole', roles: ['read']}));
+ // Granting roles from other dbs should fail
+ res = db.runCommand({grantRolesToUser: 'spencer', roles: [{role: 'read', db: 'other'}]});
+ assert.commandFailedWithCode(res, authzErrorCode);
- // Granting roles from other dbs should fail
- res = db.runCommand({grantRolesToUser: 'spencer', roles: [{role: 'read', db: 'other'}]});
- assert.commandFailedWithCode(res, authzErrorCode);
-
- // Granting roles from this db to users in another db, however, should work
- res = admindb.runCommand({grantRolesToUser: 'otherUser',
- roles: [{role: 'read', db: 'test'}]});
- assert.commandWorked(res);
- })();
+ // Granting roles from this db to users in another db, however, should work
+ res = admindb.runCommand(
+ {grantRolesToUser: 'otherUser', roles: [{role: 'read', db: 'test'}]});
+ assert.commandWorked(res);
+ })();
(function testRevokeRole() {
- jsTestLog("Testing revoking roles");
-
- var res = db.runCommand({revokeRolesFromUser: 'spencer', roles: ['read']});
- assert.commandFailedWithCode(res, authzErrorCode);
+ jsTestLog("Testing revoking roles");
- res = db.runCommand({revokeRolesFromRole: 'testRole', roles: ['read']});
- assert.commandFailedWithCode(res, authzErrorCode);
+ var res = db.runCommand({revokeRolesFromUser: 'spencer', roles: ['read']});
+ assert.commandFailedWithCode(res, authzErrorCode);
- res = admindb.runCommand({revokeRolesFromUser: 'otherUser',
- roles: [{role: 'read', db: 'test'}]});
- assert.commandFailedWithCode(res, authzErrorCode);
+ res = db.runCommand({revokeRolesFromRole: 'testRole', roles: ['read']});
+ assert.commandFailedWithCode(res, authzErrorCode);
+ res = admindb.runCommand(
+ {revokeRolesFromUser: 'otherUser', roles: [{role: 'read', db: 'test'}]});
+ assert.commandFailedWithCode(res, authzErrorCode);
- testUserAdmin.grantPrivilegesToRole('testRole', [{resource: {db: 'test', collection: ''},
- actions: ['revokeRole']}]);
+ testUserAdmin.grantPrivilegesToRole(
+ 'testRole', [{resource: {db: 'test', collection: ''}, actions: ['revokeRole']}]);
+ assert.commandWorked(db.runCommand({revokeRolesFromUser: 'spencer', roles: ['read']}));
+ assert.commandWorked(db.runCommand({revokeRolesFromRole: 'testRole', roles: ['read']}));
- assert.commandWorked(db.runCommand({revokeRolesFromUser: 'spencer', roles: ['read']}));
- assert.commandWorked(db.runCommand({revokeRolesFromRole: 'testRole', roles: ['read']}));
+ // Revoking roles from other dbs should fail
+ res = db.runCommand({revokeRolesFromUser: 'spencer', roles: [{role: 'read', db: 'other'}]});
+ assert.commandFailedWithCode(res, authzErrorCode);
- // Revoking roles from other dbs should fail
- res = db.runCommand({revokeRolesFromUser: 'spencer',
- roles: [{role: 'read', db: 'other'}]});
- assert.commandFailedWithCode(res, authzErrorCode);
-
- // Revoking roles from this db from users in another db, however, should work
- res = admindb.runCommand({revokeRolesFromUser: 'otherUser',
- roles: [{role: 'read', db: 'test'}]});
- assert.commandWorked(res);
- })();
+ // Revoking roles from this db from users in another db, however, should work
+ res = admindb.runCommand(
+ {revokeRolesFromUser: 'otherUser', roles: [{role: 'read', db: 'test'}]});
+ assert.commandWorked(res);
+ })();
(function testGrantPrivileges() {
- jsTestLog("Testing granting privileges");
-
- testUserAdmin.revokePrivilegesFromRole('testRole',
- [{resource: {db: 'test', collection: ''},
- actions: ['grantRole']}]);
-
-
- var res = db.runCommand({createRole: 'testRole2',
- roles: [],
- privileges: [{resource: {db: 'test', collection: ''},
- actions: ['find']}]});
- assert.commandFailedWithCode(res, authzErrorCode);
-
- res = db.runCommand({grantPrivilegesToRole: 'testRole',
- privileges: [{resource: {db: 'test', collection: ''},
- actions: ['find']}]});
- assert.commandFailedWithCode(res, authzErrorCode);
-
- res = admindb.runCommand({grantPrivilegesToRole: 'adminRole',
- privileges: [{resource: {db: 'test', collection: ''},
- actions: ['find']}]});
- assert.commandFailedWithCode(res, authzErrorCode);
-
-
- testUserAdmin.grantPrivilegesToRole('testRole', [{resource: {db: 'test', collection: ''},
- actions: ['grantRole']}]);
-
-
- res = db.runCommand({createRole: 'testRole2',
- roles: [],
- privileges: [{resource: {db: 'test', collection: ''},
- actions: ['find']}]});
- assert.commandWorked(res);
-
- res = db.runCommand({grantPrivilegesToRole: 'testRole',
- privileges: [{resource: {db: 'test', collection: ''},
- actions: ['find']}]});
- assert.commandWorked(res);
-
- // Granting privileges from other dbs should fail
- res = db.runCommand({grantPrivilegesToRole: 'testRole',
- privileges: [{resource: {db: 'other', collection: ''},
- actions: ['find']}]});
- assert.commandFailedWithCode(res, authzErrorCode);
-
- // Granting privileges from this db to users in another db, however, should work
- res = admindb.runCommand({grantPrivilegesToRole: 'adminRole',
- privileges: [{resource: {db: 'test', collection: ''},
- actions: ['find']}]});
- assert.commandWorked(res);
- })();
+ jsTestLog("Testing granting privileges");
+
+ testUserAdmin.revokePrivilegesFromRole(
+ 'testRole', [{resource: {db: 'test', collection: ''}, actions: ['grantRole']}]);
+
+ var res = db.runCommand({
+ createRole: 'testRole2',
+ roles: [],
+ privileges: [{resource: {db: 'test', collection: ''}, actions: ['find']}]
+ });
+ assert.commandFailedWithCode(res, authzErrorCode);
+
+ res = db.runCommand({
+ grantPrivilegesToRole: 'testRole',
+ privileges: [{resource: {db: 'test', collection: ''}, actions: ['find']}]
+ });
+ assert.commandFailedWithCode(res, authzErrorCode);
+
+ res = admindb.runCommand({
+ grantPrivilegesToRole: 'adminRole',
+ privileges: [{resource: {db: 'test', collection: ''}, actions: ['find']}]
+ });
+ assert.commandFailedWithCode(res, authzErrorCode);
+
+ testUserAdmin.grantPrivilegesToRole(
+ 'testRole', [{resource: {db: 'test', collection: ''}, actions: ['grantRole']}]);
+
+ res = db.runCommand({
+ createRole: 'testRole2',
+ roles: [],
+ privileges: [{resource: {db: 'test', collection: ''}, actions: ['find']}]
+ });
+ assert.commandWorked(res);
+
+ res = db.runCommand({
+ grantPrivilegesToRole: 'testRole',
+ privileges: [{resource: {db: 'test', collection: ''}, actions: ['find']}]
+ });
+ assert.commandWorked(res);
+
+ // Granting privileges from other dbs should fail
+ res = db.runCommand({
+ grantPrivilegesToRole: 'testRole',
+ privileges: [{resource: {db: 'other', collection: ''}, actions: ['find']}]
+ });
+ assert.commandFailedWithCode(res, authzErrorCode);
+
+ // Granting privileges from this db to users in another db, however, should work
+ res = admindb.runCommand({
+ grantPrivilegesToRole: 'adminRole',
+ privileges: [{resource: {db: 'test', collection: ''}, actions: ['find']}]
+ });
+ assert.commandWorked(res);
+ })();
(function testRevokePrivileges() {
- jsTestLog("Testing revoking privileges");
-
- testUserAdmin.revokePrivilegesFromRole('testRole',
- [{resource: {db: 'test', collection: ''},
- actions: ['revokeRole']}]);
-
-
- var res = db.runCommand({revokePrivilegesFromRole: 'testRole',
- privileges: [{resource: {db: 'test', collection: ''},
- actions: ['find']}]});
- assert.commandFailedWithCode(res, authzErrorCode);
-
- res = admindb.runCommand({revokePrivilegesFromRole: 'adminRole',
- privileges: [{resource: {db: 'test', collection: ''},
- actions: ['find']}]});
- assert.commandFailedWithCode(res, authzErrorCode);
-
-
- testUserAdmin.grantPrivilegesToRole('testRole', [{resource: {db: 'test', collection: ''},
- actions: ['revokeRole']}]);
-
-
- res = db.runCommand({revokePrivilegesFromRole: 'testRole',
- privileges: [{resource: {db: 'test', collection: ''},
- actions: ['find']}]});
- assert.commandWorked(res);
-
- // Revoking privileges from other dbs should fail
- res = db.runCommand({revokePrivilegesFromRole: 'testRole',
- privileges: [{resource: {db: 'other', collection: ''},
- actions: ['find']}]});
- assert.commandFailedWithCode(res, authzErrorCode);
-
- // Granting privileges from this db to users in another db, however, should work
- res = admindb.runCommand({revokePrivilegesFromRole: 'adminRole',
- privileges: [{resource: {db: 'test', collection: ''},
- actions: ['find']}]});
- assert.commandWorked(res);
- })();
+ jsTestLog("Testing revoking privileges");
+
+ testUserAdmin.revokePrivilegesFromRole(
+ 'testRole', [{resource: {db: 'test', collection: ''}, actions: ['revokeRole']}]);
+
+ var res = db.runCommand({
+ revokePrivilegesFromRole: 'testRole',
+ privileges: [{resource: {db: 'test', collection: ''}, actions: ['find']}]
+ });
+ assert.commandFailedWithCode(res, authzErrorCode);
+
+ res = admindb.runCommand({
+ revokePrivilegesFromRole: 'adminRole',
+ privileges: [{resource: {db: 'test', collection: ''}, actions: ['find']}]
+ });
+ assert.commandFailedWithCode(res, authzErrorCode);
+
+ testUserAdmin.grantPrivilegesToRole(
+ 'testRole', [{resource: {db: 'test', collection: ''}, actions: ['revokeRole']}]);
+
+ res = db.runCommand({
+ revokePrivilegesFromRole: 'testRole',
+ privileges: [{resource: {db: 'test', collection: ''}, actions: ['find']}]
+ });
+ assert.commandWorked(res);
+
+ // Revoking privileges from other dbs should fail
+ res = db.runCommand({
+ revokePrivilegesFromRole: 'testRole',
+ privileges: [{resource: {db: 'other', collection: ''}, actions: ['find']}]
+ });
+ assert.commandFailedWithCode(res, authzErrorCode);
+
+ // Granting privileges from this db to users in another db, however, should work
+ res = admindb.runCommand({
+ revokePrivilegesFromRole: 'adminRole',
+ privileges: [{resource: {db: 'test', collection: ''}, actions: ['find']}]
+ });
+ assert.commandWorked(res);
+ })();
}
-
jsTest.log('Test standalone');
-var conn = MongoRunner.runMongod({ auth: '' });
+var conn = MongoRunner.runMongod({auth: ''});
runTest(conn);
MongoRunner.stopMongod(conn.port);
jsTest.log('Test sharding');
-var st = new ShardingTest({ shards: 2, config: 3, keyFile: 'jstests/libs/key1' });
+var st = new ShardingTest({shards: 2, config: 3, keyFile: 'jstests/libs/key1'});
runTest(st.s);
st.stop();
diff --git a/jstests/auth/basic_role_auth.js b/jstests/auth/basic_role_auth.js
index 8b50f70671b..f44a331fa95 100644
--- a/jstests/auth/basic_role_auth.js
+++ b/jstests/auth/basic_role_auth.js
@@ -13,86 +13,59 @@
*/
var AUTH_INFO = {
admin: {
- root: {
- pwd: 'root',
- roles: [ 'root' ]
- },
- cluster: {
- pwd: 'cluster',
- roles: [ 'clusterAdmin' ]
- },
- anone: {
- pwd: 'none',
- roles: []
- },
- aro: {
- pwd: 'ro',
- roles: [ 'read' ]
- },
- arw: {
- pwd: 'rw',
- roles: [ 'readWrite' ]
- },
- aadmin: {
- pwd: 'admin',
- roles: [ 'dbAdmin' ]
- },
- auadmin: {
- pwd: 'uadmin',
- roles: [ 'userAdmin' ]
- },
- any_ro: {
- pwd: 'ro',
- roles: [ 'readAnyDatabase' ]
- },
- any_rw: {
- pwd: 'rw',
- roles: [ 'readWriteAnyDatabase' ]
- },
- any_admin: {
- pwd: 'admin',
- roles: [ 'dbAdminAnyDatabase' ]
- },
- any_uadmin: {
- pwd: 'uadmin',
- roles: [ 'userAdminAnyDatabase' ]
- }
+ root: {pwd: 'root', roles: ['root']},
+ cluster: {pwd: 'cluster', roles: ['clusterAdmin']},
+ anone: {pwd: 'none', roles: []},
+ aro: {pwd: 'ro', roles: ['read']},
+ arw: {pwd: 'rw', roles: ['readWrite']},
+ aadmin: {pwd: 'admin', roles: ['dbAdmin']},
+ auadmin: {pwd: 'uadmin', roles: ['userAdmin']},
+ any_ro: {pwd: 'ro', roles: ['readAnyDatabase']},
+ any_rw: {pwd: 'rw', roles: ['readWriteAnyDatabase']},
+ any_admin: {pwd: 'admin', roles: ['dbAdminAnyDatabase']},
+ any_uadmin: {pwd: 'uadmin', roles: ['userAdminAnyDatabase']}
},
test: {
- none: {
- pwd: 'none',
- roles: []
- },
- ro: {
- pwd: 'ro',
- roles: [ 'read' ]
- },
- rw: {
- pwd: 'rw',
- roles: [ 'readWrite' ]
- },
- roadmin: {
- pwd: 'roadmin',
- roles: [ 'read', 'dbAdmin' ]
- },
- admin: {
- pwd: 'admin',
- roles: [ 'dbAdmin' ]
- },
- uadmin: {
- pwd: 'uadmin',
- roles: [ 'userAdmin' ]
- }
+ none: {pwd: 'none', roles: []},
+ ro: {pwd: 'ro', roles: ['read']},
+ rw: {pwd: 'rw', roles: ['readWrite']},
+ roadmin: {pwd: 'roadmin', roles: ['read', 'dbAdmin']},
+ admin: {pwd: 'admin', roles: ['dbAdmin']},
+ uadmin: {pwd: 'uadmin', roles: ['userAdmin']}
}
};
// Constants that lists the privileges of a given role.
-var READ_PERM = { query: 1, index_r: 1, killCursor: 1 };
-var READ_WRITE_PERM = { insert: 1, update: 1, remove: 1, query: 1,
- index_r: 1, index_w: 1, killCursor: 1 };
-var ADMIN_PERM = { index_r: 1, index_w: 1, profile_r: 1 };
-var UADMIN_PERM = { user_r: 1, user_w: 1 };
-var CLUSTER_PERM = { killOp: 1, currentOp: 1, fsync_unlock: 1, killCursor: 1, profile_r: 1 };
+var READ_PERM = {
+ query: 1,
+ index_r: 1,
+ killCursor: 1
+};
+var READ_WRITE_PERM = {
+ insert: 1,
+ update: 1,
+ remove: 1,
+ query: 1,
+ index_r: 1,
+ index_w: 1,
+ killCursor: 1
+};
+var ADMIN_PERM = {
+ index_r: 1,
+ index_w: 1,
+ profile_r: 1
+};
+var UADMIN_PERM = {
+ user_r: 1,
+ user_w: 1
+};
+var CLUSTER_PERM = {
+ killOp: 1,
+ currentOp: 1,
+ fsync_unlock: 1,
+ killCursor: 1,
+ profile_r: 1
+};
/**
* Checks whether an error occurs after running an operation.
@@ -111,10 +84,9 @@ var checkErr = function(shouldPass, opFunc) {
success = false;
}
- assert(success == shouldPass, 'expected shouldPass: ' + shouldPass +
- ', got: ' + success +
- ', op: ' + tojson(opFunc) +
- ', exception: ' + tojson(exception));
+ assert(success == shouldPass,
+ 'expected shouldPass: ' + shouldPass + ', got: ' + success + ', op: ' + tojson(opFunc) +
+ ', exception: ' + tojson(exception));
};
/**
@@ -129,76 +101,91 @@ var checkErr = function(shouldPass, opFunc) {
* fsync_unlock.
*/
var testOps = function(db, allowedActions) {
- checkErr(allowedActions.hasOwnProperty('insert'), function() {
- var res = db.user.insert({ y: 1 });
- if (res.hasWriteError()) throw Error("insert failed: " + tojson(res.getRawResponse()));
- });
-
- checkErr(allowedActions.hasOwnProperty('update'), function() {
- var res = db.user.update({ y: 1 }, { z: 3 });
- if (res.hasWriteError()) throw Error("update failed: " + tojson(res.getRawResponse()));
- });
-
- checkErr(allowedActions.hasOwnProperty('remove'), function() {
- var res = db.user.remove({ y: 1 });
- if (res.hasWriteError()) throw Error("remove failed: " + tojson(res.getRawResponse()));
- });
-
- checkErr(allowedActions.hasOwnProperty('query'), function() {
- db.user.findOne({ y: 1 });
- });
-
- checkErr(allowedActions.hasOwnProperty('killOp'), function() {
- var errorCodeUnauthorized = 13;
- var res = db.killOp(1);
-
- if (res.code == errorCodeUnauthorized) {
- throw Error("unauthorized killOp");
- }
- });
-
- checkErr(allowedActions.hasOwnProperty('currentOp'), function() {
- var errorCodeUnauthorized = 13;
- var res = db.currentOp();
-
- if (res.code == errorCodeUnauthorized) {
- throw Error("unauthorized currentOp");
- }
- });
-
- checkErr(allowedActions.hasOwnProperty('index_r'), function() {
- db.system.indexes.findOne();
- });
-
- checkErr(allowedActions.hasOwnProperty('index_w'), function() {
- var res = db.user.ensureIndex({ x: 1 });
- if (res.code == 13) { // Unauthorized
- throw Error("unauthorized currentOp");
- }
- });
-
- checkErr(allowedActions.hasOwnProperty('profile_r'), function() {
- db.system.profile.findOne();
- });
-
- checkErr(allowedActions.hasOwnProperty('profile_w'), function() {
- var res = db.system.profile.insert({ x: 1 });
- if (res.hasWriteError()) {
- throw Error("profile insert failed: " + tojson(res.getRawResponse()));
- }
- });
-
- checkErr(allowedActions.hasOwnProperty('user_r'), function() {
- var result = db.runCommand({usersInfo: 1});
- if (!result.ok) {
- throw new Error(tojson(result));
- }
- });
-
- checkErr(allowedActions.hasOwnProperty('user_w'), function() {
- db.createUser({user:'a', pwd: 'a', roles: jsTest.basicUserRoles});
- assert(db.dropUser('a'));
- });
+ checkErr(allowedActions.hasOwnProperty('insert'),
+ function() {
+ var res = db.user.insert({y: 1});
+ if (res.hasWriteError())
+ throw Error("insert failed: " + tojson(res.getRawResponse()));
+ });
+
+ checkErr(allowedActions.hasOwnProperty('update'),
+ function() {
+ var res = db.user.update({y: 1}, {z: 3});
+ if (res.hasWriteError())
+ throw Error("update failed: " + tojson(res.getRawResponse()));
+ });
+
+ checkErr(allowedActions.hasOwnProperty('remove'),
+ function() {
+ var res = db.user.remove({y: 1});
+ if (res.hasWriteError())
+ throw Error("remove failed: " + tojson(res.getRawResponse()));
+ });
+
+ checkErr(allowedActions.hasOwnProperty('query'),
+ function() {
+ db.user.findOne({y: 1});
+ });
+
+ checkErr(allowedActions.hasOwnProperty('killOp'),
+ function() {
+ var errorCodeUnauthorized = 13;
+ var res = db.killOp(1);
+
+ if (res.code == errorCodeUnauthorized) {
+ throw Error("unauthorized killOp");
+ }
+ });
+
+ checkErr(allowedActions.hasOwnProperty('currentOp'),
+ function() {
+ var errorCodeUnauthorized = 13;
+ var res = db.currentOp();
+
+ if (res.code == errorCodeUnauthorized) {
+ throw Error("unauthorized currentOp");
+ }
+ });
+
+ checkErr(allowedActions.hasOwnProperty('index_r'),
+ function() {
+ db.system.indexes.findOne();
+ });
+
+ checkErr(allowedActions.hasOwnProperty('index_w'),
+ function() {
+ var res = db.user.ensureIndex({x: 1});
+ if (res.code == 13) { // Unauthorized
+ throw Error("unauthorized currentOp");
+ }
+ });
+
+ checkErr(allowedActions.hasOwnProperty('profile_r'),
+ function() {
+ db.system.profile.findOne();
+ });
+
+ checkErr(allowedActions.hasOwnProperty('profile_w'),
+ function() {
+ var res = db.system.profile.insert({x: 1});
+ if (res.hasWriteError()) {
+ throw Error("profile insert failed: " + tojson(res.getRawResponse()));
+ }
+ });
+
+ checkErr(allowedActions.hasOwnProperty('user_r'),
+ function() {
+ var result = db.runCommand({usersInfo: 1});
+ if (!result.ok) {
+ throw new Error(tojson(result));
+ }
+ });
+
+ checkErr(allowedActions.hasOwnProperty('user_w'),
+ function() {
+ db.createUser({user: 'a', pwd: 'a', roles: jsTest.basicUserRoles});
+ assert(db.dropUser('a'));
+ });
// Test for kill cursor
(function() {
@@ -208,8 +195,7 @@ var testOps = function(db, allowedActions) {
if (db2 == 'admin') {
assert.eq(1, db2.auth('aro', AUTH_INFO.admin.aro.pwd));
- }
- else {
+ } else {
assert.eq(1, db2.auth('ro', AUTH_INFO.test.ro.pwd));
}
@@ -218,33 +204,35 @@ var testOps = function(db, allowedActions) {
db.killCursor(cursor.id());
// Send a synchronous message to make sure that kill cursor was processed
// before proceeding.
- db.runCommand({ whatsmyuri: 1 });
-
- checkErr(!allowedActions.hasOwnProperty('killCursor'), function() {
- while (cursor.hasNext()) {
- var next = cursor.next();
-
- // This is a failure in mongos case. Standalone case will fail
- // when next() was called.
- if (next.code == 16336) {
- // could not find cursor in cache for id
- throw next.$err;
- }
- }
- });
- }); // TODO: enable test after SERVER-5813 is fixed.
-
- var isMongos = db.runCommand({ isdbgrid: 1 }).isdbgrid;
+ db.runCommand({whatsmyuri: 1});
+
+ checkErr(!allowedActions.hasOwnProperty('killCursor'),
+ function() {
+ while (cursor.hasNext()) {
+ var next = cursor.next();
+
+ // This is a failure in mongos case. Standalone case will fail
+ // when next() was called.
+ if (next.code == 16336) {
+ // could not find cursor in cache for id
+ throw next.$err;
+ }
+ }
+ });
+ }); // TODO: enable test after SERVER-5813 is fixed.
+
+ var isMongos = db.runCommand({isdbgrid: 1}).isdbgrid;
// Note: fsyncUnlock is not supported in mongos.
- if (!isMongos){
- checkErr(allowedActions.hasOwnProperty('fsync_unlock'), function() {
- var res = db.fsyncUnlock();
- var errorCodeUnauthorized = 13;
-
- if (res.code == errorCodeUnauthorized) {
- throw Error("unauthorized unauthorized fsyncUnlock");
- }
- });
+ if (!isMongos) {
+ checkErr(allowedActions.hasOwnProperty('fsync_unlock'),
+ function() {
+ var res = db.fsyncUnlock();
+ var errorCodeUnauthorized = 13;
+
+ if (res.code == errorCodeUnauthorized) {
+ throw Error("unauthorized unauthorized fsyncUnlock");
+ }
+ });
}
};
@@ -256,217 +244,217 @@ var testOps = function(db, allowedActions) {
// object.
// }
var TESTS = [
-{
- name: 'Test multiple user login separate connection',
- test: function(conn) {
- var testDB = conn.getDB('test');
- assert.eq(1, testDB.auth('ro', AUTH_INFO.test.ro.pwd));
-
- var conn2 = new Mongo(conn.host);
- var testDB2 = conn2.getDB('test');
- assert.eq(1, testDB2.auth('uadmin', AUTH_INFO.test.uadmin.pwd));
-
- testOps(testDB, READ_PERM);
- testOps(testDB2, UADMIN_PERM);
- }
-},
-{
- name: 'Test user with no role',
- test: function(conn) {
- var testDB = conn.getDB('test');
- assert.eq(1, testDB.auth('none', AUTH_INFO.test.none.pwd));
-
- testOps(testDB, {});
- }
-},
-{
- name: 'Test read only user',
- test: function(conn) {
- var testDB = conn.getDB('test');
- assert.eq(1, testDB.auth('ro', AUTH_INFO.test.ro.pwd));
-
- testOps(testDB, READ_PERM);
- }
-},
-{
- name: 'Test read/write user',
- test: function(conn) {
- var testDB = conn.getDB('test');
- assert.eq(1, testDB.auth('rw', AUTH_INFO.test.rw.pwd));
-
- testOps(testDB, READ_WRITE_PERM);
- }
-},
-{
- name: 'Test read + dbAdmin user',
- test: function(conn) {
- var testDB = conn.getDB('test');
- assert.eq(1, testDB.auth('roadmin', AUTH_INFO.test.roadmin.pwd));
-
- var combinedPerm = Object.extend({}, READ_PERM);
- combinedPerm = Object.extend(combinedPerm, ADMIN_PERM);
- testOps(testDB, combinedPerm);
- }
-},
-{
- name: 'Test dbAdmin user',
- test: function(conn) {
- var testDB = conn.getDB('test');
- assert.eq(1, testDB.auth('admin', AUTH_INFO.test.admin.pwd));
-
- testOps(testDB, ADMIN_PERM);
- }
-},
-{
- name: 'Test userAdmin user',
- test: function(conn) {
- var testDB = conn.getDB('test');
- assert.eq(1, testDB.auth('uadmin', AUTH_INFO.test.uadmin.pwd));
-
- testOps(testDB, UADMIN_PERM);
- }
-},
-{
- name: 'Test cluster user',
- test: function(conn) {
- var adminDB = conn.getDB('admin');
- assert.eq(1, adminDB.auth('cluster', AUTH_INFO.admin.cluster.pwd));
-
- testOps(conn.getDB('test'), CLUSTER_PERM);
- }
-},
-{
- name: 'Test admin user with no role',
- test: function(conn) {
- var adminDB = conn.getDB('admin');
- assert.eq(1, adminDB.auth('anone', AUTH_INFO.admin.anone.pwd));
-
- testOps(adminDB, {});
- testOps(conn.getDB('test'), {});
- }
-},
-{
- name: 'Test read only admin user',
- test: function(conn) {
- var adminDB = conn.getDB('admin');
- assert.eq(1, adminDB.auth('aro', AUTH_INFO.admin.aro.pwd));
-
- testOps(adminDB, READ_PERM);
- testOps(conn.getDB('test'), {});
- }
-},
-{
- name: 'Test read/write admin user',
- test: function(conn) {
- var adminDB = conn.getDB('admin');
- assert.eq(1, adminDB.auth('arw', AUTH_INFO.admin.arw.pwd));
-
- testOps(adminDB, READ_WRITE_PERM);
- testOps(conn.getDB('test'), {});
- }
-},
-{
- name: 'Test dbAdmin admin user',
- test: function(conn) {
- var adminDB = conn.getDB('admin');
- assert.eq(1, adminDB.auth('aadmin', AUTH_INFO.admin.aadmin.pwd));
-
- testOps(adminDB, ADMIN_PERM);
- testOps(conn.getDB('test'), {});
- }
-},
-{
- name: 'Test userAdmin admin user',
- test: function(conn) {
- var adminDB = conn.getDB('admin');
- assert.eq(1, adminDB.auth('auadmin', AUTH_INFO.admin.auadmin.pwd));
-
- testOps(adminDB, UADMIN_PERM);
- testOps(conn.getDB('test'), {});
- }
-},
-{
- name: 'Test read only any db user',
- test: function(conn) {
- var adminDB = conn.getDB('admin');
- assert.eq(1, adminDB.auth('any_ro', AUTH_INFO.admin.any_ro.pwd));
-
- testOps(adminDB, READ_PERM);
- testOps(conn.getDB('test'), READ_PERM);
- }
-},
-{
- name: 'Test read/write any db user',
- test: function(conn) {
- var adminDB = conn.getDB('admin');
- assert.eq(1, adminDB.auth('any_rw', AUTH_INFO.admin.any_rw.pwd));
-
- testOps(adminDB, READ_WRITE_PERM);
- testOps(conn.getDB('test'), READ_WRITE_PERM);
- }
-},
-{
- name: 'Test dbAdmin any db user',
- test: function(conn) {
- var adminDB = conn.getDB('admin');
- assert.eq(1, adminDB.auth('any_admin', AUTH_INFO.admin.any_admin.pwd));
-
- testOps(adminDB, ADMIN_PERM);
- testOps(conn.getDB('test'), ADMIN_PERM);
- }
-},
-{
- name: 'Test userAdmin any db user',
- test: function(conn) {
- var adminDB = conn.getDB('admin');
- assert.eq(1, adminDB.auth('any_uadmin', AUTH_INFO.admin.any_uadmin.pwd));
-
- testOps(adminDB, UADMIN_PERM);
- testOps(conn.getDB('test'), UADMIN_PERM);
- }
-},
-
-{
- name: 'Test change role',
- test: function(conn) {
- var testDB = conn.getDB('test');
- assert.eq(1, testDB.auth('rw', AUTH_INFO.test.rw.pwd));
-
- var newConn = new Mongo(conn.host);
- assert.eq(1, newConn.getDB('admin').auth('any_uadmin', AUTH_INFO.admin.any_uadmin.pwd));
- newConn.getDB('test').updateUser('rw', {roles: ['read']});
- var origSpec = newConn.getDB("test").getUser("rw");
-
- // role change should affect users already authenticated.
- testOps(testDB, READ_PERM);
-
- // role change should affect active connections.
- testDB.runCommand({ logout: 1 });
- assert.eq(1, testDB.auth('rw', AUTH_INFO.test.rw.pwd));
- testOps(testDB, READ_PERM);
-
- // role change should also affect new connections.
- var newConn3 = new Mongo(conn.host);
- var testDB3 = newConn3.getDB('test');
- assert.eq(1, testDB3.auth('rw', AUTH_INFO.test.rw.pwd));
- testOps(testDB3, READ_PERM);
-
- newConn.getDB('test').updateUser('rw', {roles: origSpec.roles});
- }
-},
+ {
+ name: 'Test multiple user login separate connection',
+ test: function(conn) {
+ var testDB = conn.getDB('test');
+ assert.eq(1, testDB.auth('ro', AUTH_INFO.test.ro.pwd));
+
+ var conn2 = new Mongo(conn.host);
+ var testDB2 = conn2.getDB('test');
+ assert.eq(1, testDB2.auth('uadmin', AUTH_INFO.test.uadmin.pwd));
+
+ testOps(testDB, READ_PERM);
+ testOps(testDB2, UADMIN_PERM);
+ }
+ },
+ {
+ name: 'Test user with no role',
+ test: function(conn) {
+ var testDB = conn.getDB('test');
+ assert.eq(1, testDB.auth('none', AUTH_INFO.test.none.pwd));
+
+ testOps(testDB, {});
+ }
+ },
+ {
+ name: 'Test read only user',
+ test: function(conn) {
+ var testDB = conn.getDB('test');
+ assert.eq(1, testDB.auth('ro', AUTH_INFO.test.ro.pwd));
+
+ testOps(testDB, READ_PERM);
+ }
+ },
+ {
+ name: 'Test read/write user',
+ test: function(conn) {
+ var testDB = conn.getDB('test');
+ assert.eq(1, testDB.auth('rw', AUTH_INFO.test.rw.pwd));
+
+ testOps(testDB, READ_WRITE_PERM);
+ }
+ },
+ {
+ name: 'Test read + dbAdmin user',
+ test: function(conn) {
+ var testDB = conn.getDB('test');
+ assert.eq(1, testDB.auth('roadmin', AUTH_INFO.test.roadmin.pwd));
+
+ var combinedPerm = Object.extend({}, READ_PERM);
+ combinedPerm = Object.extend(combinedPerm, ADMIN_PERM);
+ testOps(testDB, combinedPerm);
+ }
+ },
+ {
+ name: 'Test dbAdmin user',
+ test: function(conn) {
+ var testDB = conn.getDB('test');
+ assert.eq(1, testDB.auth('admin', AUTH_INFO.test.admin.pwd));
+
+ testOps(testDB, ADMIN_PERM);
+ }
+ },
+ {
+ name: 'Test userAdmin user',
+ test: function(conn) {
+ var testDB = conn.getDB('test');
+ assert.eq(1, testDB.auth('uadmin', AUTH_INFO.test.uadmin.pwd));
+
+ testOps(testDB, UADMIN_PERM);
+ }
+ },
+ {
+ name: 'Test cluster user',
+ test: function(conn) {
+ var adminDB = conn.getDB('admin');
+ assert.eq(1, adminDB.auth('cluster', AUTH_INFO.admin.cluster.pwd));
+
+ testOps(conn.getDB('test'), CLUSTER_PERM);
+ }
+ },
+ {
+ name: 'Test admin user with no role',
+ test: function(conn) {
+ var adminDB = conn.getDB('admin');
+ assert.eq(1, adminDB.auth('anone', AUTH_INFO.admin.anone.pwd));
+
+ testOps(adminDB, {});
+ testOps(conn.getDB('test'), {});
+ }
+ },
+ {
+ name: 'Test read only admin user',
+ test: function(conn) {
+ var adminDB = conn.getDB('admin');
+ assert.eq(1, adminDB.auth('aro', AUTH_INFO.admin.aro.pwd));
+
+ testOps(adminDB, READ_PERM);
+ testOps(conn.getDB('test'), {});
+ }
+ },
+ {
+ name: 'Test read/write admin user',
+ test: function(conn) {
+ var adminDB = conn.getDB('admin');
+ assert.eq(1, adminDB.auth('arw', AUTH_INFO.admin.arw.pwd));
+
+ testOps(adminDB, READ_WRITE_PERM);
+ testOps(conn.getDB('test'), {});
+ }
+ },
+ {
+ name: 'Test dbAdmin admin user',
+ test: function(conn) {
+ var adminDB = conn.getDB('admin');
+ assert.eq(1, adminDB.auth('aadmin', AUTH_INFO.admin.aadmin.pwd));
+
+ testOps(adminDB, ADMIN_PERM);
+ testOps(conn.getDB('test'), {});
+ }
+ },
+ {
+ name: 'Test userAdmin admin user',
+ test: function(conn) {
+ var adminDB = conn.getDB('admin');
+ assert.eq(1, adminDB.auth('auadmin', AUTH_INFO.admin.auadmin.pwd));
+
+ testOps(adminDB, UADMIN_PERM);
+ testOps(conn.getDB('test'), {});
+ }
+ },
+ {
+ name: 'Test read only any db user',
+ test: function(conn) {
+ var adminDB = conn.getDB('admin');
+ assert.eq(1, adminDB.auth('any_ro', AUTH_INFO.admin.any_ro.pwd));
+
+ testOps(adminDB, READ_PERM);
+ testOps(conn.getDB('test'), READ_PERM);
+ }
+ },
+ {
+ name: 'Test read/write any db user',
+ test: function(conn) {
+ var adminDB = conn.getDB('admin');
+ assert.eq(1, adminDB.auth('any_rw', AUTH_INFO.admin.any_rw.pwd));
+
+ testOps(adminDB, READ_WRITE_PERM);
+ testOps(conn.getDB('test'), READ_WRITE_PERM);
+ }
+ },
+ {
+ name: 'Test dbAdmin any db user',
+ test: function(conn) {
+ var adminDB = conn.getDB('admin');
+ assert.eq(1, adminDB.auth('any_admin', AUTH_INFO.admin.any_admin.pwd));
+
+ testOps(adminDB, ADMIN_PERM);
+ testOps(conn.getDB('test'), ADMIN_PERM);
+ }
+ },
+ {
+ name: 'Test userAdmin any db user',
+ test: function(conn) {
+ var adminDB = conn.getDB('admin');
+ assert.eq(1, adminDB.auth('any_uadmin', AUTH_INFO.admin.any_uadmin.pwd));
+
+ testOps(adminDB, UADMIN_PERM);
+ testOps(conn.getDB('test'), UADMIN_PERM);
+ }
+ },
-{
- name: 'Test override user',
- test: function(conn) {
- var testDB = conn.getDB('test');
- assert.eq(1, testDB.auth('rw', AUTH_INFO.test.rw.pwd));
- assert.eq(1, testDB.auth('ro', AUTH_INFO.test.ro.pwd));
- testOps(testDB, READ_PERM);
+ {
+ name: 'Test change role',
+ test: function(conn) {
+ var testDB = conn.getDB('test');
+ assert.eq(1, testDB.auth('rw', AUTH_INFO.test.rw.pwd));
+
+ var newConn = new Mongo(conn.host);
+ assert.eq(1, newConn.getDB('admin').auth('any_uadmin', AUTH_INFO.admin.any_uadmin.pwd));
+ newConn.getDB('test').updateUser('rw', {roles: ['read']});
+ var origSpec = newConn.getDB("test").getUser("rw");
+
+ // role change should affect users already authenticated.
+ testOps(testDB, READ_PERM);
+
+ // role change should affect active connections.
+ testDB.runCommand({logout: 1});
+ assert.eq(1, testDB.auth('rw', AUTH_INFO.test.rw.pwd));
+ testOps(testDB, READ_PERM);
+
+ // role change should also affect new connections.
+ var newConn3 = new Mongo(conn.host);
+ var testDB3 = newConn3.getDB('test');
+ assert.eq(1, testDB3.auth('rw', AUTH_INFO.test.rw.pwd));
+ testOps(testDB3, READ_PERM);
+
+ newConn.getDB('test').updateUser('rw', {roles: origSpec.roles});
+ }
+ },
- testDB.runCommand({ logout: 1 });
- testOps(testDB, {});
+ {
+ name: 'Test override user',
+ test: function(conn) {
+ var testDB = conn.getDB('test');
+ assert.eq(1, testDB.auth('rw', AUTH_INFO.test.rw.pwd));
+ assert.eq(1, testDB.auth('ro', AUTH_INFO.test.ro.pwd));
+ testOps(testDB, READ_PERM);
+
+ testDB.runCommand({logout: 1});
+ testOps(testDB, {});
+ }
}
-}
];
/**
@@ -480,13 +468,13 @@ var runTests = function(conn) {
var testDB = conn.getDB('test');
var adminDB = conn.getDB('admin');
- adminDB.createUser({ user: 'root', pwd: AUTH_INFO.admin.root.pwd,
- roles: AUTH_INFO.admin.root.roles });
+ adminDB.createUser(
+ {user: 'root', pwd: AUTH_INFO.admin.root.pwd, roles: AUTH_INFO.admin.root.roles});
adminDB.auth('root', AUTH_INFO.admin.root.pwd);
for (var x = 0; x < 10; x++) {
- testDB.kill_cursor.insert({ x: x });
- adminDB.kill_cursor.insert({ x: x });
+ testDB.kill_cursor.insert({x: x});
+ adminDB.kill_cursor.insert({x: x});
}
for (var dbName in AUTH_INFO) {
@@ -499,12 +487,11 @@ var runTests = function(conn) {
}
var info = dbObj[userName];
- conn.getDB(dbName).createUser({ user: userName,
- pwd: info.pwd, roles: info.roles });
+ conn.getDB(dbName).createUser({user: userName, pwd: info.pwd, roles: info.roles});
}
}
- adminDB.runCommand({ logout: 1 });
+ adminDB.runCommand({logout: 1});
};
var teardown = function() {
@@ -533,17 +520,19 @@ var runTests = function(conn) {
if (failures.length > 0) {
var list = '';
- failures.forEach(function(test) { list += (test + '\n'); });
+ failures.forEach(function(test) {
+ list += (test + '\n');
+ });
throw Error('Tests failed:\n' + list);
}
};
-var conn = MongoRunner.runMongod({ auth: '' });
+var conn = MongoRunner.runMongod({auth: ''});
runTests(conn);
MongoRunner.stopMongod(conn.port);
jsTest.log('Test sharding');
-var st = new ShardingTest({ shards: 1, keyFile: 'jstests/libs/key1' });
+var st = new ShardingTest({shards: 1, keyFile: 'jstests/libs/key1'});
runTests(st.s);
st.stop();
diff --git a/jstests/auth/builtin_roles_system_colls.js b/jstests/auth/builtin_roles_system_colls.js
index 0ce2d98664d..a6403311291 100644
--- a/jstests/auth/builtin_roles_system_colls.js
+++ b/jstests/auth/builtin_roles_system_colls.js
@@ -9,38 +9,42 @@
// the users collection easier if you have a lot of users, etc.
function testUserAdminAnyDatabaseSystemCollIndexing(adminDB) {
adminDB.auth("root", "pwd");
- adminDB.createUser({ user: "king", pwd: "pwd", roles: ["userAdminAnyDatabase"] });
+ adminDB.createUser({user: "king", pwd: "pwd", roles: ["userAdminAnyDatabase"]});
adminDB.logout();
adminDB.auth("king", "pwd");
- assert.commandWorked(adminDB.system.users.createIndex({ db: 1 }));
- assert.commandWorked(adminDB.system.roles.createIndex({ db: 1 }));
- assert.commandWorked(adminDB.system.users.dropIndex({ db: 1 }));
- assert.commandWorked(adminDB.system.roles.dropIndex({ db: 1 }));
+ assert.commandWorked(adminDB.system.users.createIndex({db: 1}));
+ assert.commandWorked(adminDB.system.roles.createIndex({db: 1}));
+ assert.commandWorked(adminDB.system.users.dropIndex({db: 1}));
+ assert.commandWorked(adminDB.system.roles.dropIndex({db: 1}));
adminDB.logout();
}
-
// SERVER-14701: the backup role should be able to run the
// collstats command on all resouces, including system resources.
function testBackupSystemCollStats(adminDB) {
adminDB.auth("root", "pwd");
- adminDB.createUser({ user: "backup-agent", pwd: "pwd", roles: ["backup"] });
- adminDB.system.js.save({ _id: "testFunction", value: function (x){ return x; }});
+ adminDB.createUser({user: "backup-agent", pwd: "pwd", roles: ["backup"]});
+ adminDB.system.js.save({
+ _id: "testFunction",
+ value: function(x) {
+ return x;
+ }
+ });
adminDB.logout();
adminDB.auth("backup-agent", "pwd");
- assert.commandWorked(adminDB.runCommand({ collstats: "system.users" }));
- assert.commandWorked(adminDB.runCommand({ collstats: "system.roles" }));
- assert.commandWorked(adminDB.runCommand({ collstats: "system.js" }));
+ assert.commandWorked(adminDB.runCommand({collstats: "system.users"}));
+ assert.commandWorked(adminDB.runCommand({collstats: "system.roles"}));
+ assert.commandWorked(adminDB.runCommand({collstats: "system.js"}));
adminDB.logout();
}
// ************************************************************
-var conn = MongoRunner.runMongod({ auth: "" });
+var conn = MongoRunner.runMongod({auth: ""});
var adminDB = conn.getDB("admin");
-adminDB.createUser({ user: "root", pwd: "pwd", roles: ["root"] });
+adminDB.createUser({user: "root", pwd: "pwd", roles: ["root"]});
testUserAdminAnyDatabaseSystemCollIndexing(adminDB);
testBackupSystemCollStats(adminDB);
diff --git a/jstests/auth/clac_system_colls.js b/jstests/auth/clac_system_colls.js
index 3f1782cfbb9..527f6c29e5d 100644
--- a/jstests/auth/clac_system_colls.js
+++ b/jstests/auth/clac_system_colls.js
@@ -9,53 +9,60 @@ function runTest(admindb) {
admindb.createUser({user: "admin", pwd: "pwd", roles: ["userAdminAnyDatabase"]});
assert.eq(1, admindb.auth("admin", "pwd"));
- var sysCollections = ["system.indexes", "system.js", "system.namespaces",
- "system.profile", "system.roles", "system.users"];
+ var sysCollections = [
+ "system.indexes",
+ "system.js",
+ "system.namespaces",
+ "system.profile",
+ "system.roles",
+ "system.users"
+ ];
var sysPrivs = new Array();
for (var i in sysCollections) {
- sysPrivs.push({resource: {db:admindb.getName(), collection:sysCollections[i]},
- actions:['find']});
+ sysPrivs.push(
+ {resource: {db: admindb.getName(), collection: sysCollections[i]}, actions: ['find']});
}
- var findPriv = {resource: {db:admindb.getName(), collection:""},
- actions:['find']};
+ var findPriv = {
+ resource: {db: admindb.getName(), collection: ""},
+ actions: ['find']
+ };
- admindb.createRole({role:"FindInDB",
- roles:[],
- privileges:[findPriv]});
- admindb.createRole({role:"FindOnSysRes",
- roles:[],
- privileges:sysPrivs});
+ admindb.createRole({role: "FindInDB", roles: [], privileges: [findPriv]});
+ admindb.createRole({role: "FindOnSysRes", roles: [], privileges: sysPrivs});
-
- admindb.createUser({user:"sysUser", pwd:"pwd", roles:["FindOnSysRes"]});
- admindb.createUser({user:"user", pwd:"pwd", roles:["FindInDB"]});
+ admindb.createUser({user: "sysUser", pwd: "pwd", roles: ["FindOnSysRes"]});
+ admindb.createUser({user: "user", pwd: "pwd", roles: ["FindInDB"]});
// Verify the find on all collections exludes system collections
- assert.eq(1,admindb.auth("user", "pwd"));
+ assert.eq(1, admindb.auth("user", "pwd"));
- assert.doesNotThrow(function() {admindb.foo.findOne();});
+ assert.doesNotThrow(function() {
+ admindb.foo.findOne();
+ });
for (var i in sysCollections) {
- assert.commandFailed(admindb.runCommand({count:sysCollections[i]}));
+ assert.commandFailed(admindb.runCommand({count: sysCollections[i]}));
}
// Verify that find on system collections gives find permissions
- assert.eq(1,admindb.auth("sysUser", "pwd"));
+ assert.eq(1, admindb.auth("sysUser", "pwd"));
- assert.throws(function() {admindb.foo.findOne();});
+ assert.throws(function() {
+ admindb.foo.findOne();
+ });
for (var i in sysCollections) {
- assert.commandWorked(admindb.runCommand({count:sysCollections[i]}));
+ assert.commandWorked(admindb.runCommand({count: sysCollections[i]}));
}
admindb.logout();
}
jsTest.log('Test standalone');
-var conn = MongoRunner.runMongod({ auth:'' });
+var conn = MongoRunner.runMongod({auth: ''});
runTest(conn.getDB("admin"));
MongoRunner.stopMongod(conn.port);
jsTest.log('Test sharding');
-var st = new ShardingTest({ shards:2, config:3, keyFile:'jstests/libs/key1' });
+var st = new ShardingTest({shards: 2, config: 3, keyFile: 'jstests/libs/key1'});
runTest(st.s.getDB("admin"));
st.stop();
diff --git a/jstests/auth/commands_builtin_roles.js b/jstests/auth/commands_builtin_roles.js
index a089278da6c..1d5f5f59756 100644
--- a/jstests/auth/commands_builtin_roles.js
+++ b/jstests/auth/commands_builtin_roles.js
@@ -52,30 +52,23 @@ function testProperAuthorization(conn, t, testcase, r) {
if (testcase.roles[r.role]) {
if (res.ok == 0 && res.code == authErrCode) {
- out = "expected authorization success" +
- " but received " + tojson(res) +
- " on db " + testcase.runOnDb +
- " with role " + r.key;
- }
- else if (res.ok == 0 && !testcase.expectFail && res.code != commandNotSupportedCode) {
+ out = "expected authorization success" + " but received " + tojson(res) + " on db " +
+ testcase.runOnDb + " with role " + r.key;
+ } else if (res.ok == 0 && !testcase.expectFail && res.code != commandNotSupportedCode) {
// don't error if the test failed with code commandNotSupported since
// some storage engines (e.g wiredTiger) don't support some commands (e.g. touch)
- out = "command failed with " + tojson(res) +
- " on db " + testcase.runOnDb +
- " with role " + r.key;
+ out = "command failed with " + tojson(res) + " on db " + testcase.runOnDb +
+ " with role " + r.key;
}
// test can provide a function that will run if
// the command completed successfully
else if (testcase.onSuccess) {
testcase.onSuccess(res);
}
- }
- else {
+ } else {
if (res.ok == 1 || (res.ok == 0 && res.code != authErrCode)) {
- out = "expected authorization failure" +
- " but received result " + tojson(res) +
- " on db " + testcase.runOnDb +
- " with role " + r.key;
+ out = "expected authorization failure" + " but received result " + tojson(res) +
+ " on db " + testcase.runOnDb + " with role " + r.key;
}
}
@@ -105,11 +98,7 @@ function runOneTest(conn, t) {
function createUsers(conn) {
var adminDb = conn.getDB(adminDbName);
- adminDb.createUser({
- user: "admin",
- pwd: "password",
- roles: ["__system"]
- });
+ adminDb.createUser({user: "admin", pwd: "password", roles: ["__system"]});
assert(adminDb.auth("admin", "password"));
for (var i = 0; i < roles.length; i++) {
@@ -138,15 +127,16 @@ function checkForNonExistentRoles() {
break;
}
}
- assert(roleExists, "Role " + role + " found in test: " + test.testname +
- ", but doesn't exist in roles array");
+ assert(roleExists,
+ "Role " + role + " found in test: " + test.testname +
+ ", but doesn't exist in roles array");
}
}
}
-}
+}
var opts = {
- auth:"",
+ auth: "",
enableExperimentalStorageDetailsCmd: ""
};
var impls = {
@@ -162,12 +152,7 @@ authCommandsLib.runTests(conn, impls);
MongoRunner.stopMongod(conn);
// run all tests sharded
-conn = new ShardingTest({
- shards: 2,
- mongos: 1,
- keyFile: "jstests/libs/key1",
- other: { shardOptions: opts }
-});
+conn = new ShardingTest(
+ {shards: 2, mongos: 1, keyFile: "jstests/libs/key1", other: {shardOptions: opts}});
authCommandsLib.runTests(conn, impls);
conn.stop();
-
diff --git a/jstests/auth/commands_user_defined_roles.js b/jstests/auth/commands_user_defined_roles.js
index 1d567df7462..8a7402f033d 100644
--- a/jstests/auth/commands_user_defined_roles.js
+++ b/jstests/auth/commands_user_defined_roles.js
@@ -23,10 +23,8 @@ function testProperAuthorization(conn, t, testcase) {
authCommandsLib.setup(conn, t, runOnDb);
adminDb.auth("admin", "password");
- assert.commandWorked(adminDb.runCommand({
- updateRole: testRole,
- privileges: testcase.privileges
- }));
+ assert.commandWorked(
+ adminDb.runCommand({updateRole: testRole, privileges: testcase.privileges}));
adminDb.logout();
assert(adminDb.auth(testUser, "password"));
@@ -36,15 +34,11 @@ function testProperAuthorization(conn, t, testcase) {
if (!testcase.expectFail && res.ok != 1 && res.code != commandNotSupportedCode) {
// don't error if the test failed with code commandNotSupported since
// some storage engines (e.g wiredTiger) don't support some commands (e.g. touch)
- out = "command failed with " + tojson(res) +
- " on db " + testcase.runOnDb +
- " with privileges " + tojson(testcase.privileges);
- }
- else if (testcase.expectFail && res.code == authErrCode) {
- out = "expected authorization success" +
- " but received " + tojson(res) +
- " on db " + testcase.runOnDb +
- " with privileges " + tojson(testcase.privileges);
+ out = "command failed with " + tojson(res) + " on db " + testcase.runOnDb +
+ " with privileges " + tojson(testcase.privileges);
+ } else if (testcase.expectFail && res.code == authErrCode) {
+ out = "expected authorization success" + " but received " + tojson(res) + " on db " +
+ testcase.runOnDb + " with privileges " + tojson(testcase.privileges);
}
firstDb.logout();
@@ -62,10 +56,7 @@ function testInsufficientPrivileges(conn, t, testcase, privileges) {
authCommandsLib.setup(conn, t, runOnDb);
adminDb.auth("admin", "password");
- assert.commandWorked(adminDb.runCommand({
- updateRole: testRole,
- privileges: privileges
- }));
+ assert.commandWorked(adminDb.runCommand({updateRole: testRole, privileges: privileges}));
adminDb.logout();
assert(adminDb.auth(testUser, "password"));
@@ -73,9 +64,8 @@ function testInsufficientPrivileges(conn, t, testcase, privileges) {
var res = runOnDb.runCommand(t.command);
if (res.ok == 1 || res.code != authErrCode) {
- out = "expected authorization failure " +
- " but received " + tojson(res) +
- " with privileges " + tojson(privileges);
+ out = "expected authorization failure " + " but received " + tojson(res) +
+ " with privileges " + tojson(privileges);
}
firstDb.logout();
@@ -105,15 +95,17 @@ function runOneTest(conn, t) {
continue;
}
- if ((privileges.length == 1 && privileges[0].actions.length > 1)
- || privileges.length > 1) {
+ if ((privileges.length == 1 && privileges[0].actions.length > 1) || privileges.length > 1) {
for (var j = 0; j < privileges.length; j++) {
var p = privileges[j];
var resource = p.resource;
var actions = p.actions;
for (var k = 0; k < actions.length; k++) {
- var privDoc = { resource: resource, actions: [actions[k]] };
+ var privDoc = {
+ resource: resource,
+ actions: [actions[k]]
+ };
msg = testInsufficientPrivileges(conn, t, testcase, [privDoc]);
if (msg) {
failures.push(t.testname + ": " + msg);
@@ -154,30 +146,19 @@ function runOneTest(conn, t) {
function createUsers(conn) {
var adminDb = conn.getDB(adminDbName);
var firstDb = conn.getDB(firstDbName);
- adminDb.createUser({
- user: "admin",
- pwd: "password",
- roles: ["__system"]
- });
+ adminDb.createUser({user: "admin", pwd: "password", roles: ["__system"]});
assert(adminDb.auth("admin", "password"));
- assert.commandWorked(adminDb.runCommand({
- createRole: testRole,
- privileges: [ ],
- roles: [ ]
- }));
- assert.commandWorked(adminDb.runCommand({
- createUser: testUser,
- pwd: "password",
- roles: [ { role: testRole, db: adminDbName } ]
- }));
+ assert.commandWorked(adminDb.runCommand({createRole: testRole, privileges: [], roles: []}));
+ assert.commandWorked(adminDb.runCommand(
+ {createUser: testUser, pwd: "password", roles: [{role: testRole, db: adminDbName}]}));
adminDb.logout();
}
var opts = {
- auth:"",
+ auth: "",
enableExperimentalStorageDetailsCmd: ""
};
var impls = {
@@ -191,12 +172,7 @@ authCommandsLib.runTests(conn, impls);
MongoRunner.stopMongod(conn);
// run all tests sharded
-conn = new ShardingTest({
- shards: 2,
- mongos: 1,
- keyFile: "jstests/libs/key1",
- other: { shardOptions: opts }
-});
+conn = new ShardingTest(
+ {shards: 2, mongos: 1, keyFile: "jstests/libs/key1", other: {shardOptions: opts}});
authCommandsLib.runTests(conn, impls);
conn.stop();
-
diff --git a/jstests/auth/copyauth.js b/jstests/auth/copyauth.js
index 4fb74101ad5..a8083df400e 100644
--- a/jstests/auth/copyauth.js
+++ b/jstests/auth/copyauth.js
@@ -1,8 +1,8 @@
// Test copyDatabase command with various combinations of authed/unauthed and single node/replica
// set source and dest.
-TestData.authMechanism = "SCRAM-SHA-1"; // SERVER-11428
-DB.prototype._defaultAuthenticationMechanism = "SCRAM-SHA-1"; // SERVER-11428
+TestData.authMechanism = "SCRAM-SHA-1"; // SERVER-11428
+DB.prototype._defaultAuthenticationMechanism = "SCRAM-SHA-1"; // SERVER-11428
var baseName = "jstests_clone_copyauth";
@@ -27,13 +27,12 @@ var baseName = "jstests_clone_copyauth";
* stop() - stop and cleanup whatever nodes the helper spawned when it was created.
*/
function ClusterSpawnHelper(clusterType, startWithAuth) {
-
if (clusterType === "sharded") {
var shardingTestConfig = {
- name : baseName + "_source",
- mongos : 1,
- shards : 1,
- config : 1
+ name: baseName + "_source",
+ mongos: 1,
+ shards: 1,
+ config: 1
};
if (startWithAuth) {
shardingTestConfig.auth = "";
@@ -42,9 +41,12 @@ function ClusterSpawnHelper(clusterType, startWithAuth) {
var shardingTest = new ShardingTest(shardingTestConfig);
this.conn = shardingTest.s;
this.connString = this.conn.host;
- }
- else if (clusterType === "repl") {
- var replSetTestConfig = { name: baseName + "_source", nodes: 3, nodeOptions: {} };
+ } else if (clusterType === "repl") {
+ var replSetTestConfig = {
+ name: baseName + "_source",
+ nodes: 3,
+ nodeOptions: {}
+ };
if (startWithAuth) {
replSetTestConfig.nodeOptions.auth = "";
replSetTestConfig.nodeOptions.keyFile = "jstests/libs/key1";
@@ -55,15 +57,15 @@ function ClusterSpawnHelper(clusterType, startWithAuth) {
if (startWithAuth) {
authutil.asCluster(replSetTest.nodes,
replSetTestConfig.nodeOptions.keyFile,
- function() { replSetTest.awaitReplication(); });
- }
- else {
+ function() {
+ replSetTest.awaitReplication();
+ });
+ } else {
replSetTest.awaitReplication();
}
this.conn = replSetTest.getPrimary();
this.connString = replSetTest.getURL();
- }
- else {
+ } else {
var singleNodeConfig = {};
if (startWithAuth) {
singleNodeConfig.auth = "";
@@ -72,14 +74,12 @@ function ClusterSpawnHelper(clusterType, startWithAuth) {
this.connString = this.conn.host;
}
- this.stop = function () {
+ this.stop = function() {
if (clusterType === "sharded") {
shardingTest.stop();
- }
- else if (clusterType === "repl") {
+ } else if (clusterType === "repl") {
replSetTest.stopSet();
- }
- else {
+ } else {
MongoRunner.stopMongod(this.conn.port);
}
};
@@ -101,16 +101,9 @@ function ClusterSpawnHelper(clusterType, startWithAuth) {
* }
*/
function copydbBetweenClustersTest(configObj) {
-
-
-
// First sanity check the arguments in our configObj
- var requiredKeys = [
- 'sourceClusterType',
- 'isSourceUsingAuth',
- 'targetClusterType',
- 'isTargetUsingAuth'
- ];
+ var requiredKeys =
+ ['sourceClusterType', 'isSourceUsingAuth', 'targetClusterType', 'isTargetUsingAuth'];
var i;
for (i = 0; i < requiredKeys.length; i++) {
@@ -118,118 +111,111 @@ function copydbBetweenClustersTest(configObj) {
"Missing required key: " + requiredKeys[i] + " in config object");
}
-
-
// 1. Get a connection to the source database, insert data and setup auth if applicable
source = new ClusterSpawnHelper(configObj.sourceClusterType, configObj.isSourceUsingAuth);
if (configObj.isSourceUsingAuth) {
// Create a super user so we can create a regular user and not be locked out afterwards
- source.conn.getDB("admin").createUser({ user: "sourceSuperUser", pwd: "sourceSuperUser",
- roles: [ "root" ] });
+ source.conn.getDB("admin")
+ .createUser({user: "sourceSuperUser", pwd: "sourceSuperUser", roles: ["root"]});
source.conn.getDB("admin").auth("sourceSuperUser", "sourceSuperUser");
- source.conn.getDB(baseName)[baseName].save({i:1});
+ source.conn.getDB(baseName)[baseName].save({i: 1});
assert.eq(1, source.conn.getDB(baseName)[baseName].count());
assert.eq(1, source.conn.getDB(baseName)[baseName].findOne().i);
// Insert a document and create a regular user that we will use for the target
// authenticating with the source
- source.conn.getDB(baseName).createUser({ user: "foo", pwd: "bar",
- roles: [ "dbOwner" ] });
+ source.conn.getDB(baseName).createUser({user: "foo", pwd: "bar", roles: ["dbOwner"]});
source.conn.getDB("admin").logout();
- assert.throws(function() { source.conn.getDB(baseName)[baseName].findOne(); });
+ assert.throws(function() {
+ source.conn.getDB(baseName)[baseName].findOne();
+ });
} else {
- source.conn.getDB(baseName)[baseName].save({i:1});
+ source.conn.getDB(baseName)[baseName].save({i: 1});
assert.eq(1, source.conn.getDB(baseName)[baseName].count());
assert.eq(1, source.conn.getDB(baseName)[baseName].findOne().i);
}
-
-
// 2. Get a connection to the target database, and set up auth if necessary
target = new ClusterSpawnHelper(configObj.targetClusterType, configObj.isTargetUsingAuth);
if (configObj.isTargetUsingAuth) {
- target.conn.getDB("admin").createUser({ user: "targetSuperUser", pwd: "targetSuperUser",
- roles: [ "root" ] });
- assert.throws(function() { target.conn.getDB(baseName)[baseName].findOne(); });
+ target.conn.getDB("admin")
+ .createUser({user: "targetSuperUser", pwd: "targetSuperUser", roles: ["root"]});
+ assert.throws(function() {
+ target.conn.getDB(baseName)[baseName].findOne();
+ });
target.conn.getDB("admin").auth("targetSuperUser", "targetSuperUser");
}
-
-
// 3. Run the copydb command
target.conn.getDB(baseName).dropDatabase();
assert.eq(0, target.conn.getDB(baseName)[baseName].count());
if (configObj.isSourceUsingAuth) {
// We only need to pass username and password if the target has to send authentication
// information to the source cluster
- assert.commandWorked(target.conn.getDB(baseName).copyDatabase(baseName, baseName,
- source.connString,
- "foo", "bar"));
- }
- else {
+ assert.commandWorked(target.conn.getDB(baseName).copyDatabase(
+ baseName, baseName, source.connString, "foo", "bar"));
+ } else {
// We are copying from a cluster with no auth
- assert.commandWorked(target.conn.getDB(baseName).copyDatabase(baseName, baseName,
- source.connString));
+ assert.commandWorked(
+ target.conn.getDB(baseName).copyDatabase(baseName, baseName, source.connString));
}
assert.eq(1, target.conn.getDB(baseName)[baseName].count());
assert.eq(1, target.conn.getDB(baseName)[baseName].findOne().i);
-
-
// 4. Do any necessary cleanup
source.stop();
target.stop();
}
(function() {
-"use strict";
-
-var sourceClusterTypeValues = [ "single", "repl", "sharded" ];
-var isSourceUsingAuthValues = [ true, false ];
-var targetClusterTypeValues = [ "single", "repl", "sharded" ];
-var isTargetUsingAuthValues = [ true, false ];
-for (var i = 0; i < sourceClusterTypeValues.length; i++) {
- for (var j = 0; j < isSourceUsingAuthValues.length; j++) {
- for (var k = 0; k < targetClusterTypeValues.length; k++) {
- for (var l = 0; l < isTargetUsingAuthValues.length; l++) {
- if (sourceClusterTypeValues[i] === "sharded" &&
- targetClusterTypeValues[k] === "sharded") {
- // SERVER-13112
- continue;
- }
- if (sourceClusterTypeValues[i] === "repl" &&
- targetClusterTypeValues[k] === "repl") {
- // SERVER-13077
- continue;
+ "use strict";
+
+ var sourceClusterTypeValues = ["single", "repl", "sharded"];
+ var isSourceUsingAuthValues = [true, false];
+ var targetClusterTypeValues = ["single", "repl", "sharded"];
+ var isTargetUsingAuthValues = [true, false];
+ for (var i = 0; i < sourceClusterTypeValues.length; i++) {
+ for (var j = 0; j < isSourceUsingAuthValues.length; j++) {
+ for (var k = 0; k < targetClusterTypeValues.length; k++) {
+ for (var l = 0; l < isTargetUsingAuthValues.length; l++) {
+ if (sourceClusterTypeValues[i] === "sharded" &&
+ targetClusterTypeValues[k] === "sharded") {
+ // SERVER-13112
+ continue;
+ }
+ if (sourceClusterTypeValues[i] === "repl" &&
+ targetClusterTypeValues[k] === "repl") {
+ // SERVER-13077
+ continue;
+ }
+ if (isSourceUsingAuthValues[j] === true &&
+ targetClusterTypeValues[k] === "sharded") {
+ // SERVER-6427
+ continue;
+ }
+ if (sourceClusterTypeValues[i] === "repl" &&
+ isSourceUsingAuthValues[j] === false &&
+ targetClusterTypeValues[k] === "sharded" &&
+ isTargetUsingAuthValues[l] == true) {
+ // SERVER-18103
+ continue;
+ }
+ var testCase = {
+ 'sourceClusterType': sourceClusterTypeValues[i],
+ 'isSourceUsingAuth': isSourceUsingAuthValues[j],
+ 'targetClusterType': targetClusterTypeValues[k],
+ 'isTargetUsingAuth': isTargetUsingAuthValues[l]
+ };
+ print("Running copydb with auth test:");
+ printjson(testCase);
+ copydbBetweenClustersTest(testCase);
}
- if (isSourceUsingAuthValues[j] === true &&
- targetClusterTypeValues[k] === "sharded") {
- // SERVER-6427
- continue;
- }
- if (sourceClusterTypeValues[i] === "repl" &&
- isSourceUsingAuthValues[j] === false &&
- targetClusterTypeValues[k] === "sharded" &&
- isTargetUsingAuthValues[l] == true) {
- // SERVER-18103
- continue;
- }
- var testCase = {
- 'sourceClusterType' : sourceClusterTypeValues[i],
- 'isSourceUsingAuth' : isSourceUsingAuthValues[j],
- 'targetClusterType' : targetClusterTypeValues[k],
- 'isTargetUsingAuth' : isTargetUsingAuthValues[l]
- };
- print("Running copydb with auth test:");
- printjson(testCase);
- copydbBetweenClustersTest(testCase);
}
}
}
-}
}());
print(baseName + " success!");
diff --git a/jstests/auth/copyauth2.js b/jstests/auth/copyauth2.js
index 852ba40e641..f47bfcba8ad 100644
--- a/jstests/auth/copyauth2.js
+++ b/jstests/auth/copyauth2.js
@@ -1,26 +1,25 @@
// Basic test that copydb works with auth enabled when copying within the same cluster
function runTest(a, b) {
- a.createUser({user: "chevy", pwd: "chase", roles: ["read", {role:'readWrite', db: b._name}]});
- a.foo.insert({a:1});
- b.getSiblingDB( "admin").logout();
+ a.createUser({user: "chevy", pwd: "chase", roles: ["read", {role: 'readWrite', db: b._name}]});
+ a.foo.insert({a: 1});
+ b.getSiblingDB("admin").logout();
a.auth("chevy", "chase");
- assert.eq( 1 , a.foo.count() , "A" );
- assert.eq( 0 , b.foo.count() , "B" );
+ assert.eq(1, a.foo.count(), "A");
+ assert.eq(0, b.foo.count(), "B");
- a.copyDatabase(a._name , b._name);
- assert.eq( 1 , a.foo.count() , "C" );
- assert.eq( 1 , b.foo.count() , "D" );
+ a.copyDatabase(a._name, b._name);
+ assert.eq(1, a.foo.count(), "C");
+ assert.eq(1, b.foo.count(), "D");
}
-
// run all tests standalone
-var conn = MongoRunner.runMongod({auth:""});
-var a = conn.getDB( "copydb2-test-a" );
-var b = conn.getDB( "copydb2-test-b" );
-var adminDB = conn.getDB( "admin" );
+var conn = MongoRunner.runMongod({auth: ""});
+var a = conn.getDB("copydb2-test-a");
+var b = conn.getDB("copydb2-test-b");
+var adminDB = conn.getDB("admin");
adminDB.createUser({user: "root", pwd: "root", roles: ["root"]});
adminDB.auth("root", "root");
runTest(a, b);
diff --git a/jstests/auth/copyauth_between_shards.js b/jstests/auth/copyauth_between_shards.js
index cd95c9e1508..219d38c7f0f 100644
--- a/jstests/auth/copyauth_between_shards.js
+++ b/jstests/auth/copyauth_between_shards.js
@@ -4,9 +4,11 @@
var baseName = "jstests_clone_copyauth_between_shards";
function copydbWithinShardedCluster(useReplSets, passCredentials, useAuth) {
- var clusterConfig = {shards : 1,
- mongos : 1,
- config : 1 };
+ var clusterConfig = {
+ shards: 1,
+ mongos: 1,
+ config: 1
+ };
if (useAuth) {
clusterConfig.auth = "";
@@ -25,7 +27,9 @@ function copydbWithinShardedCluster(useReplSets, passCredentials, useAuth) {
if (useAuth) {
mongos.getDB("admin").createUser({user: "super", pwd: "super", roles: ["root"]});
- assert.throws(function() { mongos.getDB("test1")["test1"].findOne(); });
+ assert.throws(function() {
+ mongos.getDB("test1")["test1"].findOne();
+ });
mongos.getDB("admin").auth("super", "super");
}
@@ -36,9 +40,9 @@ function copydbWithinShardedCluster(useReplSets, passCredentials, useAuth) {
// The copyDatabase command acts differently depending on whether we pass username and password
if (passCredentials) {
- var result = mongos.getDB('admin').copyDatabase('test1', 'test2', undefined, "super", "super");
- }
- else {
+ var result =
+ mongos.getDB('admin').copyDatabase('test1', 'test2', undefined, "super", "super");
+ } else {
var result = mongos.getDB('admin').copyDatabase('test1', 'test2');
}
printjson(result);
@@ -49,10 +53,10 @@ function copydbWithinShardedCluster(useReplSets, passCredentials, useAuth) {
}
// SERVER-13080
-//copydbWithinShardedCluster(true, true, true);
-//copydbWithinShardedCluster(false, true, true);
-//copydbWithinShardedCluster(true, false, true);
-//copydbWithinShardedCluster(false, false, true);
+// copydbWithinShardedCluster(true, true, true);
+// copydbWithinShardedCluster(false, true, true);
+// copydbWithinShardedCluster(true, false, true);
+// copydbWithinShardedCluster(false, false, true);
copydbWithinShardedCluster(true, false, false);
copydbWithinShardedCluster(false, false, false);
diff --git a/jstests/auth/db_multiple_login.js b/jstests/auth/db_multiple_login.js
index ed48bfb4e98..4a16efdf0f6 100644
--- a/jstests/auth/db_multiple_login.js
+++ b/jstests/auth/db_multiple_login.js
@@ -3,26 +3,28 @@
// authentication.
//
// Regression test for SERVER-8144.
-var conn = MongoRunner.runMongod({ auth: "", smallfiles: "" });
+var conn = MongoRunner.runMongod({auth: "", smallfiles: ""});
var admin = conn.getDB("admin");
var test = conn.getDB("test");
-admin.createUser({user:'admin', pwd: 'a', roles: jsTest.adminUserRoles});
+admin.createUser({user: 'admin', pwd: 'a', roles: jsTest.adminUserRoles});
assert(admin.auth('admin', 'a'));
-test.createUser({user: 'reader', pwd: 'a', roles: [ "read" ]});
-test.createUser({user: 'writer', pwd: 'a', roles: [ "readWrite" ]});
+test.createUser({user: 'reader', pwd: 'a', roles: ["read"]});
+test.createUser({user: 'writer', pwd: 'a', roles: ["readWrite"]});
admin.logout();
// Nothing logged in, can neither read nor write.
-assert.writeError(test.docs.insert({ value: 0 }));
-assert.throws(function() { test.foo.findOne(); });
+assert.writeError(test.docs.insert({value: 0}));
+assert.throws(function() {
+ test.foo.findOne();
+});
// Writer logged in, can read and write.
test.auth('writer', 'a');
-assert.writeOK(test.docs.insert({ value: 1 }));
+assert.writeOK(test.docs.insert({value: 1}));
test.foo.findOne();
// Reader logged in, replacing writer, can only read.
test.auth('reader', 'a');
-assert.writeError(test.docs.insert({ value: 2 }));
+assert.writeError(test.docs.insert({value: 2}));
test.foo.findOne();
diff --git a/jstests/auth/disable_localhost_bypass.js b/jstests/auth/disable_localhost_bypass.js
index 9d29f5ae9fe..a33d582cf15 100644
--- a/jstests/auth/disable_localhost_bypass.js
+++ b/jstests/auth/disable_localhost_bypass.js
@@ -1,24 +1,26 @@
-var conn1 = MongoRunner.runMongod({ auth: "",
- smallfiles: "",
- setParameter: "enableLocalhostAuthBypass=true"});
-var conn2 = MongoRunner.runMongod({ auth: "",
- smallfiles: "",
- setParameter: "enableLocalhostAuthBypass=false"});
+var conn1 = MongoRunner.runMongod(
+ {auth: "", smallfiles: "", setParameter: "enableLocalhostAuthBypass=true"});
+var conn2 = MongoRunner.runMongod(
+ {auth: "", smallfiles: "", setParameter: "enableLocalhostAuthBypass=false"});
// Should fail because of localhost exception narrowed (SERVER-12621).
-assert.writeError(conn1.getDB("test").foo.insert({a:1}));
-assert.throws(function() { conn2.getDB("test").foo.findOne(); });
+assert.writeError(conn1.getDB("test").foo.insert({a: 1}));
+assert.throws(function() {
+ conn2.getDB("test").foo.findOne();
+});
// Should succeed due to localhost exception.
conn1.getDB("admin").createUser({user: "root", pwd: "pass", roles: ["root"]});
conn1.getDB("admin").auth("root", "pass");
-conn1.getDB("test").foo.insert({a:1});
+conn1.getDB("test").foo.insert({a: 1});
conn1.getDB("admin").dropAllUsers();
conn1.getDB("admin").logout();
-assert.throws(function() { conn2.getDB("test").foo.findOne(); });
+assert.throws(function() {
+ conn2.getDB("test").foo.findOne();
+});
// Should fail since localhost exception is disabled
assert.throws(function() {
diff --git a/jstests/auth/explain_auth.js b/jstests/auth/explain_auth.js
index a718ddf8653..70570324260 100644
--- a/jstests/auth/explain_auth.js
+++ b/jstests/auth/explain_auth.js
@@ -34,8 +34,7 @@ function testExplainAuth(authSpec) {
function assertCmdResult(result, expectSuccess) {
if (expectSuccess) {
assert.commandWorked(result);
- }
- else {
+ } else {
assert.commandFailedWithCode(result, 13);
}
}
@@ -49,58 +48,36 @@ function testExplainAuth(authSpec) {
assertCmdResult(cmdResult, authSpec.count);
// .group()
- cmdResult = db.runCommand({
- explain: {
- group: {
- ns: coll.getName(),
- key: "a",
- $reduce: function() { },
- initial: { }
- }
- }
- });
+ cmdResult = db.runCommand(
+ {explain: {group: {ns: coll.getName(), key: "a", $reduce: function() {}, initial: {}}}});
assertCmdResult(cmdResult, authSpec.group);
// .remove()
- cmdResult = db.runCommand({
- explain: {
- delete: coll.getName(),
- deletes: [ {q: {a: 1}, limit: 1} ]
- }
- });
+ cmdResult =
+ db.runCommand({explain: {delete: coll.getName(), deletes: [{q: {a: 1}, limit: 1}]}});
assertCmdResult(cmdResult, authSpec.remove);
// .update()
- cmdResult = db.runCommand({
- explain: {
- update: coll.getName(),
- updates: [ {q: {a: 1}, u: {$set: {b: 1}}} ]
- }
- });
+ cmdResult = db.runCommand(
+ {explain: {update: coll.getName(), updates: [{q: {a: 1}, u: {$set: {b: 1}}}]}});
assertCmdResult(cmdResult, authSpec.update);
}
// Create some user-defined roles which we will grant to the users below.
db.createRole({
role: "findOnly",
- privileges: [
- {resource: {db: db.getName(), collection: coll.getName()}, actions: ["find"]}
- ],
- roles: [ ]
+ privileges: [{resource: {db: db.getName(), collection: coll.getName()}, actions: ["find"]}],
+ roles: []
});
db.createRole({
role: "updateOnly",
- privileges: [
- {resource: {db: db.getName(), collection: coll.getName()}, actions: ["update"]}
- ],
- roles: [ ]
+ privileges: [{resource: {db: db.getName(), collection: coll.getName()}, actions: ["update"]}],
+ roles: []
});
db.createRole({
role: "removeOnly",
- privileges: [
- {resource: {db: db.getName(), collection: coll.getName()}, actions: ["remove"]}
- ],
- roles: [ ]
+ privileges: [{resource: {db: db.getName(), collection: coll.getName()}, actions: ["remove"]}],
+ roles: []
});
// Create three users:
@@ -116,31 +93,13 @@ admin.logout();
// The "find" action allows explain of read operations.
db.auth("findOnly", "pwd");
-testExplainAuth({
- find: true,
- count: true,
- group: true,
- remove: false,
- update: false
-});
+testExplainAuth({find: true, count: true, group: true, remove: false, update: false});
db.logout();
db.auth("updateOnly", "pwd");
-testExplainAuth({
- find: false,
- count: false,
- group: false,
- remove: false,
- update: true
-});
+testExplainAuth({find: false, count: false, group: false, remove: false, update: true});
db.logout();
db.auth("removeOnly", "pwd");
-testExplainAuth({
- find: false,
- count: false,
- group: false,
- remove: true,
- update: false
-});
+testExplainAuth({find: false, count: false, group: false, remove: true, update: false});
db.logout();
diff --git a/jstests/auth/indexSystemUsers.js b/jstests/auth/indexSystemUsers.js
index 056013f18cf..ef7187dfe46 100644
--- a/jstests/auth/indexSystemUsers.js
+++ b/jstests/auth/indexSystemUsers.js
@@ -1,34 +1,38 @@
// SERVER-8802: Test that you can't build indexes on system.users and use that to drop users with
// dropDups.
-var conn = MongoRunner.runMongod({auth : ""});
+var conn = MongoRunner.runMongod({auth: ""});
var adminDB = conn.getDB("admin");
var testDB = conn.getDB("test");
-adminDB.createUser({user:'admin', pwd:'x', roles:['userAdminAnyDatabase']});
-adminDB.auth('admin','x');
-adminDB.createUser({user:'mallory', pwd:'x', roles:['readWriteAnyDatabase']});
-testDB.createUser({user:'user', pwd:'x', roles:['read']});
+adminDB.createUser({user: 'admin', pwd: 'x', roles: ['userAdminAnyDatabase']});
+adminDB.auth('admin', 'x');
+adminDB.createUser({user: 'mallory', pwd: 'x', roles: ['readWriteAnyDatabase']});
+testDB.createUser({user: 'user', pwd: 'x', roles: ['read']});
assert.eq(3, adminDB.system.users.count());
adminDB.logout();
adminDB.auth('mallory', 'x');
-var res = adminDB.system.users.createIndex({ haxx: 1 }, { unique: true, dropDups: true });
+var res = adminDB.system.users.createIndex({haxx: 1}, {unique: true, dropDups: true});
assert(!res.ok);
-assert.eq(13, res.code); // unauthorized
-assert.writeError(adminDB.exploit.system.indexes.insert({ ns: "admin.system.users",
- key: { haxx: 1.0 },
- name: "haxx_1",
- unique: true,
- dropDups: true }));
+assert.eq(13, res.code); // unauthorized
+assert.writeError(adminDB.exploit.system.indexes.insert(
+ {ns: "admin.system.users", key: {haxx: 1.0}, name: "haxx_1", unique: true, dropDups: true}));
// Make sure that no indexes were built.
-var collectionInfosCursor = adminDB.runCommand("listCollections", { filter:
- {$and : [{name : /^admin\.system\.users\.\$/},
- {name : {$ne : "admin.system.users.$_id_"}},
- {name : {$ne : "admin.system.users.$user_1_db_1"}} ]}});
+var collectionInfosCursor =
+ adminDB.runCommand("listCollections",
+ {
+ filter: {
+ $and: [
+ {name: /^admin\.system\.users\.\$/},
+ {name: {$ne: "admin.system.users.$_id_"}},
+ {name: {$ne: "admin.system.users.$user_1_db_1"}}
+ ]
+ }
+ });
assert.eq([], new DBCommandCursor(adminDB.getMongo(), collectionInfosCursor).toArray());
adminDB.logout();
-adminDB.auth('admin','x');
+adminDB.auth('admin', 'x');
// Make sure that no users were actually dropped
assert.eq(3, adminDB.system.users.count());
diff --git a/jstests/auth/iteration_count_control.js b/jstests/auth/iteration_count_control.js
index b0a790c43c6..0d710298ff4 100644
--- a/jstests/auth/iteration_count_control.js
+++ b/jstests/auth/iteration_count_control.js
@@ -3,32 +3,27 @@ load('./jstests/multiVersion/libs/auth_helpers.js');
var conn = MongoRunner.runMongod({auth: ''});
-var testIterationCountControl = function(){
+var testIterationCountControl = function() {
var adminDB = conn.getDB('admin');
- adminDB.createUser({user: 'user1', pwd: 'pass',
- roles: jsTest.adminUserRoles});
+ adminDB.createUser({user: 'user1', pwd: 'pass', roles: jsTest.adminUserRoles});
assert(adminDB.auth({user: 'user1', pwd: 'pass'}));
var userDoc = getUserDoc(adminDB, 'user1');
assert.eq(10000, userDoc.credentials['SCRAM-SHA-1'].iterationCount);
// Changing iterationCount should not affect existing users.
- assert.commandWorked(adminDB.runCommand({setParameter: 1,
- scramIterationCount: 5000}));
+ assert.commandWorked(adminDB.runCommand({setParameter: 1, scramIterationCount: 5000}));
userDoc = getUserDoc(adminDB, 'user1');
assert.eq(10000, userDoc.credentials['SCRAM-SHA-1'].iterationCount);
// But it should take effect when the user's password is changed.
- adminDB.updateUser('user1', {pwd: 'pass',
- roles: jsTest.adminUserRoles});
+ adminDB.updateUser('user1', {pwd: 'pass', roles: jsTest.adminUserRoles});
userDoc = getUserDoc(adminDB, 'user1');
assert.eq(5000, userDoc.credentials['SCRAM-SHA-1'].iterationCount);
// Test invalid values for iterationCount. 5000 is the minimum value.
- assert.commandFailed(adminDB.runCommand({setParameter: 1,
- scramIterationCount: 4999}));
- assert.commandFailed(adminDB.runCommand({setParameter: 1,
- scramIterationCount: -5000}));
+ assert.commandFailed(adminDB.runCommand({setParameter: 1, scramIterationCount: 4999}));
+ assert.commandFailed(adminDB.runCommand({setParameter: 1, scramIterationCount: -5000}));
};
testIterationCountControl();
diff --git a/jstests/auth/js_scope_leak.js b/jstests/auth/js_scope_leak.js
index 57e0b112937..f1d5d192876 100644
--- a/jstests/auth/js_scope_leak.js
+++ b/jstests/auth/js_scope_leak.js
@@ -7,23 +7,24 @@
//
// These transitions are tested for dbEval, $where, MapReduce and $group
-var conn = MongoRunner.runMongod({ smallfiles: ""});
+var conn = MongoRunner.runMongod({smallfiles: ""});
var test = conn.getDB("test");
// insert a single document and add two test users
-test.foo.insert({a:1});
+test.foo.insert({a: 1});
assert.eq(1, test.foo.findOne().a);
-test.createUser({user:'a', pwd: 'a', roles: jsTest.basicUserRoles});
-test.createUser({user:'b', pwd: 'b', roles: jsTest.basicUserRoles});
+test.createUser({user: 'a', pwd: 'a', roles: jsTest.basicUserRoles});
+test.createUser({user: 'b', pwd: 'b', roles: jsTest.basicUserRoles});
function missingOrEquals(string) {
- return 'function() { '
- + 'var global = function(){return this;}.call();'
- // Uncomment the next line when debugging.
- // + 'print(global.hasOwnProperty("someGlobal") ? someGlobal : "MISSING" );'
- + 'return !global.hasOwnProperty("someGlobal")'
- + ' || someGlobal == unescape("' + escape(string) + '");'
- +'}()';
+ return 'function() { ' +
+ 'var global = function(){return this;}.call();'
+ // Uncomment the next line when debugging.
+ // + 'print(global.hasOwnProperty("someGlobal") ? someGlobal : "MISSING" );'
+ +
+ 'return !global.hasOwnProperty("someGlobal")' +
+ ' || someGlobal == unescape("' + escape(string) + '");' +
+ '}()';
}
function testDbEval() {
@@ -50,20 +51,19 @@ testDbEval();
// test $where
function testWhere() {
// set the global variable 'someGlobal' before authenticating
- test.foo.findOne({$where:'someGlobal = "noUsers";'});
+ test.foo.findOne({$where: 'someGlobal = "noUsers";'});
// test new user auth causes scope to be cleared
test.auth('a', 'a');
- assert.eq(1,
- test.foo.count({$where: 'return ' + missingOrEquals('a')}),
- "$where: Auth user 'a");
+ assert.eq(
+ 1, test.foo.count({$where: 'return ' + missingOrEquals('a')}), "$where: Auth user 'a");
// test auth as another user causes scope to be cleared
- test.foo.findOne({$where:'someGlobal = "a";'});
+ test.foo.findOne({$where: 'someGlobal = "a";'});
test.auth('b', 'b');
assert(test.foo.count({$where: 'return ' + missingOrEquals('a&b')}), "$where: Auth user 'b'");
// test user logout causes scope to be cleared
- test.foo.findOne({$where:'someGlobal = "a&b";'});
+ test.foo.findOne({$where: 'someGlobal = "a&b";'});
test.logout();
assert(test.foo.count({$where: 'return ' + missingOrEquals('noUsers')}), "$where: log out");
}
@@ -71,14 +71,18 @@ testWhere();
testWhere();
function testMapReduce() {
- var mapSet = function(string) { return Function('someGlobal = "' + string + '"'); };
- var mapGet = function(string) { return Function('assert(' + missingOrEquals(string) +')'); };
- var reduce = function(k, v) { };
+ var mapSet = function(string) {
+ return Function('someGlobal = "' + string + '"');
+ };
+ var mapGet = function(string) {
+ return Function('assert(' + missingOrEquals(string) + ')');
+ };
+ var reduce = function(k, v) {};
var setGlobalInMap = function(string) {
- test.foo.mapReduce(mapSet(string), reduce, {out:{inline:1}});
+ test.foo.mapReduce(mapSet(string), reduce, {out: {inline: 1}});
};
var getGlobalFromMap = function(string) {
- test.foo.mapReduce(mapGet(string), reduce, {out:{inline:1}});
+ test.foo.mapReduce(mapGet(string), reduce, {out: {inline: 1}});
};
// set the global variable 'someGlobal' before authenticating
@@ -86,33 +90,41 @@ function testMapReduce() {
// test new user auth causes scope to be cleared
test.auth('a', 'a');
- assert.doesNotThrow(function() { getGlobalFromMap('a'); }, [], "M/R: Auth user 'a'");
+ assert.doesNotThrow(function() {
+ getGlobalFromMap('a');
+ }, [], "M/R: Auth user 'a'");
// test auth as another user causes scope to be cleared
setGlobalInMap('a');
test.auth('b', 'b');
- assert.doesNotThrow(function() { getGlobalFromMap('a&b'); }, [], "M/R: Auth user 'b'");
+ assert.doesNotThrow(function() {
+ getGlobalFromMap('a&b');
+ }, [], "M/R: Auth user 'b'");
// test user logout causes scope to be cleared
setGlobalInMap('a&b');
test.logout();
- assert.doesNotThrow(function() { getGlobalFromMap('noUsers'); }, [], "M/R: Log out");
+ assert.doesNotThrow(function() {
+ getGlobalFromMap('noUsers');
+ }, [], "M/R: Log out");
}
testMapReduce();
testMapReduce();
function testGroup() {
var setGlobalInGroup = function(string) {
- return test.foo.group({key: 'a',
- reduce: Function('doc1', 'agg',
- 'someGlobal = "' + string + '"'),
- initial:{}});
+ return test.foo.group({
+ key: 'a',
+ reduce: Function('doc1', 'agg', 'someGlobal = "' + string + '"'),
+ initial: {}
+ });
};
var getGlobalFromGroup = function(string) {
- return test.foo.group({key: 'a',
- reduce: Function('doc1', 'agg',
- 'assert(' + missingOrEquals(string) +')'),
- initial:{}});
+ return test.foo.group({
+ key: 'a',
+ reduce: Function('doc1', 'agg', 'assert(' + missingOrEquals(string) + ')'),
+ initial: {}
+ });
};
// set the global variable 'someGlobal' before authenticating
@@ -134,5 +146,3 @@ function testGroup() {
}
testGroup();
testGroup();
-
-
diff --git a/jstests/auth/lib/commands_lib.js b/jstests/auth/lib/commands_lib.js
index 69339a7655a..bab46779cb6 100644
--- a/jstests/auth/lib/commands_lib.js
+++ b/jstests/auth/lib/commands_lib.js
@@ -197,2577 +197,2345 @@ var roles_all = {
var authCommandsLib = {
-
-
/************* TEST CASES ****************/
tests: [
{
- testname: "addShard",
- command: {addShard: "x"},
- skipStandalone: true,
- testcases: [
- {
- runOnDb: adminDbName,
- roles: roles_clusterManager,
- privileges: [
- { resource: {cluster: true}, actions: ["addShard"] }
- ],
- expectFail: true
- },
- { runOnDb: firstDbName, roles: {} },
- { runOnDb: secondDbName, roles: {} }
- ]
- },
- {
- // Test that clusterManager role has permission to run addTagRange
- testname: "addTagRange",
- command: { // addTagRange is not a "real command"; it updates config.tags
- update: "tags",
- updates: [ {
- q: {_id: { ns : "test.x" , min : 1 }},
- u: {_id: { ns : "test.x" , min : 1 },
- ns : "test.x"}
- } ] },
- skipStandalone: true,
- testcases: [
- {
- runOnDb: "config",
- roles: Object.extend({readWriteAnyDatabase: 1}, roles_clusterManager)
- }
- ]
- },
- {
- testname: "applyOps",
- command: {applyOps: "x"},
- testcases: [
- {
- runOnDb: adminDbName,
- roles: {__system: 1},
- privileges: [
- { resource: {anyResource: true}, actions: ["anyAction"] }
- ],
- expectFail: true
- },
- {
- runOnDb: firstDbName,
- roles: {__system: 1},
- privileges: [
- { resource: {anyResource: true}, actions: ["anyAction"] }
- ],
- expectFail: true
- }
- ]
- },
- {
- testname: "aggregate_readonly",
- command: {aggregate: "foo", pipeline: []},
- testcases: [
- {
- runOnDb: firstDbName,
- roles: roles_read,
- privileges: [
- { resource: {db: firstDbName, collection: "foo"}, actions: ["find"] }
- ]
- },
- {
- runOnDb: secondDbName,
- roles: roles_readAny,
- privileges: [
- { resource: {db: secondDbName, collection: "foo"}, actions: ["find"] }
- ]
- }
- ]
- },
- {
- testname: "aggregate_explain",
- command: {aggregate: "foo", explain: true, pipeline: [ {$match: {bar: 1}} ] },
- testcases: [
- {
- runOnDb: firstDbName,
- roles: roles_read,
- privileges: [
- { resource: {db: firstDbName, collection: "foo"}, actions: ["find"] }
- ]
- },
- {
- runOnDb: secondDbName,
- roles: roles_readAny,
- privileges: [
- { resource: {db: secondDbName, collection: "foo"}, actions: ["find"] }
- ]
- }
- ]
- },
- {
- testname: "aggregate_write",
- command: {aggregate: "foo", pipeline: [ {$out: "foo_out"} ] },
- testcases: [
- {
- runOnDb: firstDbName,
- roles: { readWrite: 1,
- readWriteAnyDatabase: 1,
- dbOwner: 1,
- root: 1,
- __system: 1},
- privileges: [
- { resource: {db: firstDbName, collection: "foo"}, actions: ["find"] },
- { resource: {db: firstDbName, collection: "foo_out"}, actions: ["insert"] },
- { resource: {db: firstDbName, collection: "foo_out"}, actions: ["remove"] }
- ]
- },
- {
- runOnDb: secondDbName,
- roles: {readWriteAnyDatabase: 1, root: 1, __system: 1},
- privileges: [
- { resource: {db: secondDbName, collection: "foo"}, actions: ["find"] },
- { resource: {db: secondDbName, collection: "foo_out"}, actions: ["insert"] },
- { resource: {db: secondDbName, collection: "foo_out"}, actions: ["remove"] }
- ]
- }
- ]
- },
- {
- testname: "aggregate_indexStats",
- command: {aggregate: "foo", pipeline: [{$indexStats: {}}]},
- setup: function (db) {
- db.createCollection("foo");
- },
- teardown: function (db) {
- db.foo.drop();
- },
- testcases: [
- {
- runOnDb: firstDbName,
- roles: {
- clusterMonitor: 1,
- clusterAdmin: 1,
- root: 1,
- __system: 1
- },
- privileges: [
- {resource: {anyResource: true}, actions: ["indexStats"]}
- ]
- }
- ]
- },
- {
- testname: "appendOplogNote",
- command: {appendOplogNote: 1, data: {a: 1}},
- skipSharded: true,
- testcases: [
- {
- runOnDb: adminDbName,
- roles: {
- backup: 1,
- clusterManager: 1,
- clusterAdmin: 1,
- root: 1,
- __system: 1
- },
- privileges: [
- { resource: {cluster: true}, actions: ["appendOplogNote"] }
- ],
- expectFail: true, // because no replication enabled
- },
- { runOnDb: firstDbName, roles: {} },
- { runOnDb: secondDbName, roles: {} }
- ]
- },
- {
- testname: "authSchemaUpgrade",
- command: {authSchemaUpgrade: 1},
- testcases: [
- {
- runOnDb: adminDbName,
- roles: {
- userAdminAnyDatabase: 1,
- root: 1,
- __system: 1
+ testname: "addShard",
+ command: {addShard: "x"},
+ skipStandalone: true,
+ testcases: [
+ {
+ runOnDb: adminDbName,
+ roles: roles_clusterManager,
+ privileges: [{resource: {cluster: true}, actions: ["addShard"]}],
+ expectFail: true
+ },
+ {runOnDb: firstDbName, roles: {}},
+ {runOnDb: secondDbName, roles: {}}
+ ]
+ },
+ {
+ // Test that clusterManager role has permission to run addTagRange
+ testname: "addTagRange",
+ command: {
+ // addTagRange is not a "real command"; it updates config.tags
+ update: "tags",
+ updates: [{
+ q: {_id: {ns: "test.x", min: 1}},
+ u: {_id: {ns: "test.x", min: 1}, ns: "test.x"}
+ }]
+ },
+ skipStandalone: true,
+ testcases: [{
+ runOnDb: "config",
+ roles: Object.extend({readWriteAnyDatabase: 1}, roles_clusterManager)
+ }]
+ },
+ {
+ testname: "applyOps",
+ command: {applyOps: "x"},
+ testcases: [
+ {
+ runOnDb: adminDbName,
+ roles: {__system: 1},
+ privileges: [{resource: {anyResource: true}, actions: ["anyAction"]}],
+ expectFail: true
+ },
+ {
+ runOnDb: firstDbName,
+ roles: {__system: 1},
+ privileges: [{resource: {anyResource: true}, actions: ["anyAction"]}],
+ expectFail: true
+ }
+ ]
+ },
+ {
+ testname: "aggregate_readonly",
+ command: {aggregate: "foo", pipeline: []},
+ testcases: [
+ {
+ runOnDb: firstDbName,
+ roles: roles_read,
+ privileges: [{resource: {db: firstDbName, collection: "foo"}, actions: ["find"]}]
+ },
+ {
+ runOnDb: secondDbName,
+ roles: roles_readAny,
+ privileges:
+ [{resource: {db: secondDbName, collection: "foo"}, actions: ["find"]}]
+ }
+ ]
+ },
+ {
+ testname: "aggregate_explain",
+ command: {aggregate: "foo", explain: true, pipeline: [{$match: {bar: 1}}]},
+ testcases: [
+ {
+ runOnDb: firstDbName,
+ roles: roles_read,
+ privileges: [{resource: {db: firstDbName, collection: "foo"}, actions: ["find"]}]
+ },
+ {
+ runOnDb: secondDbName,
+ roles: roles_readAny,
+ privileges:
+ [{resource: {db: secondDbName, collection: "foo"}, actions: ["find"]}]
+ }
+ ]
+ },
+ {
+ testname: "aggregate_write",
+ command: {aggregate: "foo", pipeline: [{$out: "foo_out"}]},
+ testcases: [
+ {
+ runOnDb: firstDbName,
+ roles: {readWrite: 1, readWriteAnyDatabase: 1, dbOwner: 1, root: 1, __system: 1},
+ privileges: [
+ {resource: {db: firstDbName, collection: "foo"}, actions: ["find"]},
+ {resource: {db: firstDbName, collection: "foo_out"}, actions: ["insert"]},
+ {resource: {db: firstDbName, collection: "foo_out"}, actions: ["remove"]}
+ ]
+ },
+ {
+ runOnDb: secondDbName,
+ roles: {readWriteAnyDatabase: 1, root: 1, __system: 1},
+ privileges: [
+ {resource: {db: secondDbName, collection: "foo"}, actions: ["find"]},
+ {resource: {db: secondDbName, collection: "foo_out"}, actions: ["insert"]},
+ {resource: {db: secondDbName, collection: "foo_out"}, actions: ["remove"]}
+ ]
+ }
+ ]
+ },
+ {
+ testname: "aggregate_indexStats",
+ command: {aggregate: "foo", pipeline: [{$indexStats: {}}]},
+ setup: function(db) {
+ db.createCollection("foo");
+ },
+ teardown: function(db) {
+ db.foo.drop();
+ },
+ testcases: [{
+ runOnDb: firstDbName,
+ roles: {clusterMonitor: 1, clusterAdmin: 1, root: 1, __system: 1},
+ privileges: [{resource: {anyResource: true}, actions: ["indexStats"]}]
+ }]
+ },
+ {
+ testname: "appendOplogNote",
+ command: {appendOplogNote: 1, data: {a: 1}},
+ skipSharded: true,
+ testcases: [
+ {
+ runOnDb: adminDbName,
+ roles: {backup: 1, clusterManager: 1, clusterAdmin: 1, root: 1, __system: 1},
+ privileges: [{resource: {cluster: true}, actions: ["appendOplogNote"]}],
+ expectFail: true, // because no replication enabled
+ },
+ {runOnDb: firstDbName, roles: {}},
+ {runOnDb: secondDbName, roles: {}}
+ ]
+ },
+ {
+ testname: "authSchemaUpgrade",
+ command: {authSchemaUpgrade: 1},
+ testcases: [
+ {
+ runOnDb: adminDbName,
+ roles: {userAdminAnyDatabase: 1, root: 1, __system: 1},
+ privileges: [{resource: {cluster: true}, actions: ["authSchemaUpgrade"]}]
+ },
+ {runOnDb: firstDbName, roles: {}},
+ {runOnDb: secondDbName, roles: {}}
+ ]
+ },
+ {
+ testname: "buildInfo",
+ command: {buildInfo: 1},
+ testcases: [
+ {runOnDb: firstDbName, roles: roles_all, privileges: []},
+ {runOnDb: secondDbName, roles: roles_all, privileges: []}
+ ]
+ },
+ {
+ testname: "checkShardingIndex_firstDb",
+ command: {checkShardingIndex: firstDbName + ".x", keyPattern: {_id: 1}},
+ skipSharded: true,
+ testcases: [{
+ runOnDb: firstDbName,
+ roles: roles_read,
+ privileges: [{resource: {db: firstDbName, collection: "x"}, actions: ["find"]}]
+ }]
+ },
+ {
+ testname: "checkShardingIndex_secondDb",
+ command: {checkShardingIndex: secondDbName + ".x", keyPattern: {_id: 1}},
+ skipSharded: true,
+ testcases: [{
+ runOnDb: secondDbName,
+ roles: roles_readAny,
+ privileges: [{resource: {db: secondDbName, collection: "x"}, actions: ["find"]}]
+ }]
+ },
+ {
+ testname: "cleanupOrphaned",
+ command: {cleanupOrphaned: firstDbName + ".x"},
+ skipSharded: true,
+ testcases: [
+ {
+ runOnDb: adminDbName,
+ roles: roles_clusterManager,
+ privileges: [{resource: {cluster: true}, actions: ["cleanupOrphaned"]}],
+ expectFail: true
+ },
+ {runOnDb: firstDbName, roles: {}},
+ {runOnDb: secondDbName, roles: {}}
+ ]
+ },
+ {
+ testname: "cloneCollection_1",
+ command: {cloneCollection: firstDbName + ".x"},
+ skipSharded: true,
+ testcases: [{
+ runOnDb: firstDbName,
+ roles: roles_write,
+ privileges: [{
+ resource: {db: firstDbName, collection: "x"},
+ actions: ["insert", "createIndex"]
+ }],
+ expectFail: true
+ }]
+ },
+ {
+ testname: "cloneCollection_2",
+ command: {cloneCollection: secondDbName + ".x"},
+ skipSharded: true,
+ testcases: [{
+ runOnDb: secondDbName,
+ roles: {readWriteAnyDatabase: 1, restore: 1, root: 1, __system: 1},
+ privileges: [{
+ resource: {db: secondDbName, collection: "x"},
+ actions: ["insert", "createIndex"]
+ }],
+ expectFail: true
+ }]
+ },
+ {
+ testname: "cloneCollectionAsCapped",
+ command: {cloneCollectionAsCapped: "x", toCollection: "y", size: 1000},
+ skipSharded: true,
+ setup: function(db) {
+ db.x.save({});
+ },
+ teardown: function(db) {
+ db.x.drop();
+ db.y.drop();
+ },
+ testcases: [
+ {
+ runOnDb: firstDbName,
+ roles: {readWrite: 1, readWriteAnyDatabase: 1, dbOwner: 1, root: 1, __system: 1},
+ privileges: [
+ {
+ resource: {db: firstDbName, collection: "y"},
+ actions: ["insert", "createIndex", "convertToCapped"]
},
- privileges: [
- { resource: {cluster: true}, actions: ["authSchemaUpgrade"] }
- ]
- },
- { runOnDb: firstDbName, roles: {} },
- { runOnDb: secondDbName, roles: {} }
- ]
- },
- {
- testname: "buildInfo",
- command: {buildInfo: 1},
- testcases: [
- {
- runOnDb: firstDbName,
- roles: roles_all,
- privileges: [ ]
- },
- {
- runOnDb: secondDbName,
- roles: roles_all,
- privileges: [ ]
- }
- ]
- },
- {
- testname: "checkShardingIndex_firstDb",
- command: {checkShardingIndex: firstDbName + ".x", keyPattern: {_id: 1} },
- skipSharded: true,
- testcases: [
- {
- runOnDb: firstDbName,
- roles: roles_read,
- privileges: [
- { resource: {db: firstDbName, collection: "x"}, actions: ["find"] }
- ]
- }
- ]
- },
- {
- testname: "checkShardingIndex_secondDb",
- command: {checkShardingIndex: secondDbName + ".x", keyPattern: {_id: 1} },
- skipSharded: true,
- testcases: [
- {
- runOnDb: secondDbName,
- roles: roles_readAny,
- privileges: [
- { resource: {db: secondDbName, collection: "x"}, actions: ["find"] }
- ]
- }
- ]
- },
- {
- testname: "cleanupOrphaned",
- command: {cleanupOrphaned: firstDbName + ".x"},
- skipSharded: true,
- testcases: [
- {
- runOnDb: adminDbName,
- roles: roles_clusterManager,
- privileges: [
- { resource: {cluster: true}, actions: ["cleanupOrphaned"] }
- ],
- expectFail: true
- },
- { runOnDb: firstDbName, roles: {} },
- { runOnDb: secondDbName, roles: {} }
- ]
- },
- {
- testname: "cloneCollection_1",
- command: {cloneCollection: firstDbName + ".x"},
- skipSharded: true,
- testcases: [
- {
- runOnDb: firstDbName,
- roles: roles_write,
- privileges: [
- { resource: {db: firstDbName, collection: "x"}, actions: ["insert", "createIndex"] }
- ],
- expectFail: true
- }
- ]
- },
- {
- testname: "cloneCollection_2",
- command: {cloneCollection: secondDbName + ".x"},
- skipSharded: true,
- testcases: [
- {
- runOnDb: secondDbName,
- roles: {readWriteAnyDatabase: 1, restore: 1, root: 1, __system: 1},
- privileges: [
- { resource: {db: secondDbName, collection: "x"}, actions: ["insert", "createIndex"] }
- ],
- expectFail: true
- }
- ]
- },
- {
- testname: "cloneCollectionAsCapped",
- command: {cloneCollectionAsCapped: "x", toCollection: "y", size: 1000},
- skipSharded: true,
- setup: function (db) { db.x.save( {} ); },
- teardown: function (db) {
- db.x.drop();
- db.y.drop();
- },
- testcases: [
- {
- runOnDb: firstDbName,
- roles: { readWrite: 1,
- readWriteAnyDatabase: 1,
- dbOwner: 1,
- root: 1,
- __system: 1
- },
- privileges: [
- { resource: {db: firstDbName, collection: "y"}, actions: ["insert", "createIndex", "convertToCapped"] },
- { resource: {db: firstDbName, collection: "x"}, actions: ["find"] }
- ]
- },
- {
- runOnDb: secondDbName,
- roles: {readWriteAnyDatabase: 1, root: 1, __system: 1},
- privileges: [
- { resource: {db: secondDbName, collection: "y"}, actions: ["insert", "createIndex", "convertToCapped"] },
- { resource: {db: secondDbName, collection: "x"}, actions: ["find"] }
- ]
- }
- ]
- },
- {
- testname: "collMod",
- command: {collMod: "foo", usePowerOf2Sizes: true},
- setup: function (db) { db.foo.save( {} ); },
- teardown: function (db) { db.x.drop(); },
- testcases: [
- {
- runOnDb: firstDbName,
- roles: Object.extend({restore: 1}, roles_dbAdmin),
- privileges: [
- { resource: {db: firstDbName, collection: "foo"}, actions: ["collMod"] }
- ]
- },
- {
- runOnDb: secondDbName,
- roles: Object.extend({restore:1}, roles_dbAdminAny),
- privileges: [
- { resource: {db: secondDbName, collection: "foo"}, actions: ["collMod"] }
- ]
- }
- ]
- },
- {
- testname: "collStats",
- command: {collStats: "bar", scale: 1},
- setup: function (db) { db.bar.save( {} ); },
- teardown: function (db) { db.dropDatabase(); },
- testcases: [
- {
- runOnDb: firstDbName,
- roles: {
- read: 1,
- readAnyDatabase: 1,
- readWrite: 1,
- readWriteAnyDatabase: 1,
- dbAdmin: 1,
- dbAdminAnyDatabase: 1,
- dbOwner: 1,
- clusterMonitor: 1,
- clusterAdmin: 1,
- backup: 1,
- root: 1,
- __system: 1
+ {resource: {db: firstDbName, collection: "x"}, actions: ["find"]}
+ ]
+ },
+ {
+ runOnDb: secondDbName,
+ roles: {readWriteAnyDatabase: 1, root: 1, __system: 1},
+ privileges: [
+ {
+ resource: {db: secondDbName, collection: "y"},
+ actions: ["insert", "createIndex", "convertToCapped"]
},
- privileges: [
- { resource: {db: firstDbName, collection: "bar"}, actions: ["collStats"] }
- ]
- },
- {
- runOnDb: secondDbName,
- roles: {
- readAnyDatabase: 1,
- readWriteAnyDatabase: 1,
- dbAdminAnyDatabase: 1,
- clusterMonitor: 1,
- clusterAdmin: 1,
- backup: 1,
- root: 1,
- __system: 1
+ {resource: {db: secondDbName, collection: "x"}, actions: ["find"]}
+ ]
+ }
+ ]
+ },
+ {
+ testname: "collMod",
+ command: {collMod: "foo", usePowerOf2Sizes: true},
+ setup: function(db) {
+ db.foo.save({});
+ },
+ teardown: function(db) {
+ db.x.drop();
+ },
+ testcases: [
+ {
+ runOnDb: firstDbName,
+ roles: Object.extend({restore: 1}, roles_dbAdmin),
+ privileges:
+ [{resource: {db: firstDbName, collection: "foo"}, actions: ["collMod"]}]
+ },
+ {
+ runOnDb: secondDbName,
+ roles: Object.extend({restore: 1}, roles_dbAdminAny),
+ privileges:
+ [{resource: {db: secondDbName, collection: "foo"}, actions: ["collMod"]}]
+ }
+ ]
+ },
+ {
+ testname: "collStats",
+ command: {collStats: "bar", scale: 1},
+ setup: function(db) {
+ db.bar.save({});
+ },
+ teardown: function(db) {
+ db.dropDatabase();
+ },
+ testcases: [
+ {
+ runOnDb: firstDbName,
+ roles: {
+ read: 1,
+ readAnyDatabase: 1,
+ readWrite: 1,
+ readWriteAnyDatabase: 1,
+ dbAdmin: 1,
+ dbAdminAnyDatabase: 1,
+ dbOwner: 1,
+ clusterMonitor: 1,
+ clusterAdmin: 1,
+ backup: 1,
+ root: 1,
+ __system: 1
+ },
+ privileges:
+ [{resource: {db: firstDbName, collection: "bar"}, actions: ["collStats"]}]
+ },
+ {
+ runOnDb: secondDbName,
+ roles: {
+ readAnyDatabase: 1,
+ readWriteAnyDatabase: 1,
+ dbAdminAnyDatabase: 1,
+ clusterMonitor: 1,
+ clusterAdmin: 1,
+ backup: 1,
+ root: 1,
+ __system: 1
+ },
+ privileges:
+ [{resource: {db: secondDbName, collection: "bar"}, actions: ["collStats"]}]
+ }
+ ]
+ },
+ {
+ testname: "compact",
+ command: {compact: "foo"},
+ skipSharded: true,
+ setup: function(db) {
+ db.foo.save({});
+ },
+ teardown: function(db) {
+ db.dropDatabase();
+ },
+ testcases: [
+ {
+ runOnDb: firstDbName,
+ roles: roles_dbAdmin,
+ privileges:
+ [{resource: {db: firstDbName, collection: "foo"}, actions: ["compact"]}]
+ },
+ {
+ runOnDb: secondDbName,
+ roles: roles_dbAdminAny,
+ privileges:
+ [{resource: {db: secondDbName, collection: "foo"}, actions: ["compact"]}]
+ }
+ ]
+ },
+ {
+ testname: "connectionStatus",
+ command: {connectionStatus: 1},
+ testcases: [
+ {runOnDb: firstDbName, roles: roles_all, privileges: []},
+ {runOnDb: secondDbName, roles: roles_all, privileges: []}
+ ]
+ },
+ {
+ testname: "connPoolStats",
+ command: {connPoolStats: 1},
+ testcases: [
+ {
+ runOnDb: firstDbName,
+ roles: roles_monitoring,
+ privileges: [{resource: {cluster: true}, actions: ["connPoolStats"]}]
+ },
+ {
+ runOnDb: secondDbName,
+ roles: roles_monitoring,
+ privileges: [{resource: {cluster: true}, actions: ["connPoolStats"]}]
+ }
+ ]
+ },
+ {
+ testname: "connPoolSync",
+ command: {connPoolSync: 1},
+ testcases: [
+ {
+ runOnDb: firstDbName,
+ roles: roles_hostManager,
+ privileges: [{resource: {cluster: true}, actions: ["connPoolSync"]}]
+ },
+ {
+ runOnDb: secondDbName,
+ roles: roles_hostManager,
+ privileges: [{resource: {cluster: true}, actions: ["connPoolSync"]}]
+ }
+ ]
+ },
+ {
+ testname: "convertToCapped",
+ command: {convertToCapped: "toCapped", size: 1000},
+ setup: function(db) {
+ db.toCapped.save({});
+ },
+ teardown: function(db) {
+ db.toCapped.drop();
+ },
+ testcases: [
+ {
+ runOnDb: firstDbName,
+ roles: roles_writeDbAdmin,
+ privileges: [{
+ resource: {db: firstDbName, collection: "toCapped"},
+ actions: ["convertToCapped"]
+ }]
+ },
+ {
+ runOnDb: secondDbName,
+ roles: roles_writeDbAdminAny,
+ privileges: [{
+ resource: {db: secondDbName, collection: "toCapped"},
+ actions: ["convertToCapped"]
+ }]
+ }
+ ]
+ },
+ {
+ testname: "copydb",
+ command: {copydb: 1, fromdb: firstDbName, todb: secondDbName},
+ skipSharded: true, // Does not work sharded due to SERVER-13080
+ testcases: [
+ {
+ runOnDb: adminDbName,
+ roles: {readWriteAnyDatabase: 1, root: 1, __system: 1},
+ privileges: [
+ {resource: {db: firstDbName, collection: ""}, actions: ["find"]},
+ {resource: {db: firstDbName, collection: "system.js"}, actions: ["find"]},
+ {
+ resource: {db: secondDbName, collection: ""},
+ actions: ["insert", "createIndex"]
},
- privileges: [
- { resource: {db: secondDbName, collection: "bar"}, actions: ["collStats"] }
- ]
- }
- ]
- },
- {
- testname: "compact",
- command: {compact: "foo"},
- skipSharded: true,
- setup: function (db) { db.foo.save( {} ); },
- teardown: function (db) { db.dropDatabase(); },
- testcases: [
- {
- runOnDb: firstDbName,
- roles: roles_dbAdmin,
- privileges: [
- { resource: {db: firstDbName, collection: "foo"}, actions: ["compact"] }
- ]
- },
- {
- runOnDb: secondDbName,
- roles: roles_dbAdminAny,
- privileges: [
- { resource: {db: secondDbName, collection: "foo"}, actions: ["compact"] }
- ]
- }
- ]
- },
- {
- testname: "connectionStatus",
- command: {connectionStatus: 1},
- testcases: [
- {
- runOnDb: firstDbName,
- roles: roles_all,
- privileges: [ ]
- },
- {
- runOnDb: secondDbName,
- roles: roles_all,
- privileges: [ ]
- }
- ]
- },
- {
- testname: "connPoolStats",
- command: {connPoolStats: 1},
- testcases: [
- {
- runOnDb: firstDbName,
- roles: roles_monitoring,
- privileges: [
- { resource: {cluster: true}, actions: ["connPoolStats"] }
- ]
- },
- {
- runOnDb: secondDbName,
- roles: roles_monitoring,
- privileges: [
- { resource: {cluster: true}, actions: ["connPoolStats"] }
- ]
- }
- ]
- },
- {
- testname: "connPoolSync",
- command: {connPoolSync: 1},
- testcases: [
- {
- runOnDb: firstDbName,
- roles: roles_hostManager,
- privileges: [
- { resource: {cluster: true}, actions: ["connPoolSync"] }
- ]
- },
- {
- runOnDb: secondDbName,
- roles: roles_hostManager,
- privileges: [
- { resource: {cluster: true}, actions: ["connPoolSync"] }
- ]
- }
- ]
- },
- {
- testname: "convertToCapped",
- command: {convertToCapped: "toCapped", size: 1000},
- setup: function (db) { db.toCapped.save( {} ); },
- teardown: function (db) { db.toCapped.drop(); },
- testcases: [
- {
- runOnDb: firstDbName,
- roles: roles_writeDbAdmin,
- privileges: [
- { resource: {db: firstDbName, collection: "toCapped"}, actions:["convertToCapped"] }
- ]
- },
- {
- runOnDb: secondDbName,
- roles: roles_writeDbAdminAny,
- privileges: [
- { resource: {db: secondDbName, collection: "toCapped"}, actions:["convertToCapped"] }
- ]
- }
- ]
- },
- {
- testname: "copydb",
- command: {copydb: 1, fromdb: firstDbName, todb: secondDbName},
- skipSharded: true, // Does not work sharded due to SERVER-13080
- testcases: [
- {
- runOnDb: adminDbName,
- roles: {readWriteAnyDatabase: 1, root: 1, __system: 1},
- privileges: [
- { resource: {db: firstDbName, collection: ""}, actions:["find"] },
- { resource: {db: firstDbName, collection: "system.js"}, actions:["find"] },
- { resource: {db: secondDbName, collection: ""},
- actions:["insert", "createIndex"] },
- { resource: {db: secondDbName, collection: "system.js"}, actions:["insert"] },
- ]
- },
- ]
- },
- {
- testname: "count",
- command: {count: "x"},
- testcases: [
- {
- runOnDb: firstDbName,
- roles: roles_read,
- privileges: [
- { resource: {db: firstDbName, collection: "x"}, actions: ["find"] }
- ]
- },
- {
- runOnDb: secondDbName,
- roles: roles_readAny,
- privileges: [
- { resource: {db: secondDbName, collection: "x"}, actions: ["find"] }
- ]
- }
- ]
- },
- {
- testname: "create",
- command: {create: "x"},
- teardown: function (db) { db.x.drop(); },
- testcases: [
- {
- runOnDb: firstDbName,
- roles: Object.extend({restore:1}, roles_writeDbAdmin),
- privileges: [
- { resource: {db: firstDbName, collection: "x"}, actions: ["createCollection"] }
- ]
- },
- {
- runOnDb: firstDbName,
- roles: Object.extend({restore:1}, roles_writeDbAdmin),
- privileges: [
- { resource: {db: firstDbName, collection: "x"}, actions: ["insert"] }
- ]
- },
- {
- runOnDb: secondDbName,
- roles: Object.extend({restore:1}, roles_writeDbAdminAny),
- privileges: [
- { resource: {db: secondDbName, collection: "x"}, actions: ["createCollection"] }
- ]
- },
- {
- runOnDb: secondDbName,
- roles: Object.extend({restore:1}, roles_writeDbAdminAny),
- privileges: [
- { resource: {db: secondDbName, collection: "x"}, actions: ["insert"] }
- ]
- }
- ]
- },
- {
- testname: "create_capped",
- command: {create: "x", capped: true, size: 1000},
- teardown: function (db) { db.x.drop(); },
- testcases: [
- {
- runOnDb: firstDbName,
- roles: roles_writeDbAdmin,
- privileges: [
- { resource: {db: firstDbName, collection: "x"}, actions: ["createCollection", "convertToCapped"] }
- ]
- },
- {
- runOnDb: firstDbName,
- roles: roles_writeDbAdmin,
- privileges: [
- { resource: {db: firstDbName, collection: "x"}, actions: ["insert", "convertToCapped"] }
- ]
- },
- {
- runOnDb: secondDbName,
- roles: roles_writeDbAdminAny,
- privileges: [
- { resource: {db: secondDbName, collection: "x"}, actions: ["createCollection", "convertToCapped"] }
- ]
- },
- {
- runOnDb: secondDbName,
- roles: roles_writeDbAdminAny,
- privileges: [
- { resource: {db: secondDbName, collection: "x"}, actions: ["insert", "convertToCapped"] }
- ]
- }
- ]
- },
- {
- testname: "createIndexes",
- command: {createIndexes: "x", indexes: [{ns: firstDbName + ".x", key: {a:1}, name: "a_1"}] },
- teardown: function (db) { db.x.drop(); },
- testcases: [
- {
- runOnDb: firstDbName,
- roles: Object.extend({readWrite: 1,
- readWriteAnyDatabase: 1,
- dbAdmin: 1,
- dbAdminAnyDatabase: 1,
- dbOwner: 1,
- restore: 1,
- root: 1,
- __system: 1}),
- privileges: [
- { resource: {db: firstDbName, collection: "x"}, actions: ["createIndex"] }
- ]
- }
- ]
- },
- {
- testname: "currentOp",
- command: {currentOp: 1, $all: true},
- testcases: [
- {
- runOnDb: adminDbName,
- roles: roles_monitoring,
- privileges: [
- { resource: {cluster: true}, actions: ["inprog"] }
- ]
- },
- { runOnDb: firstDbName, roles: {} },
- { runOnDb: secondDbName, roles: {} }
- ]
- },
- {
- testname: "currentOpCtx",
- command: {currentOpCtx: 1},
- skipSharded: true,
- testcases: [
- {
- runOnDb: firstDbName,
- roles: roles_monitoring,
- privileges: [
- { resource: {cluster: true}, actions: ["inprog"] }
- ]
- },
- {
- runOnDb: secondDbName,
- roles: roles_monitoring,
- privileges: [
- { resource: {cluster: true}, actions: ["inprog"] }
- ]
- }
- ]
- },
- {
- testname: "dataSize_1",
- command: {dataSize: firstDbName + ".x"},
- testcases: [
- {
- runOnDb: firstDbName,
- roles: roles_read,
- privileges: [
- { resource: {db: firstDbName, collection: "x"}, actions: ["find"] }
- ]
- }
- ]
- },
- {
- testname: "dataSize_2",
- command: {dataSize: secondDbName + ".x"},
- testcases: [
- {
- runOnDb: secondDbName,
- roles: roles_readAny,
- privileges: [
- { resource: {db: secondDbName, collection: "x"}, actions: ["find"] }
- ]
- }
- ]
- },
- {
- testname: "dbHash",
- command: {dbHash: 1},
- skipSharded: true,
- testcases: [
- {
- runOnDb: firstDbName,
- roles: { read: 1,
- readAnyDatabase: 1,
- readWrite: 1,
- readWriteAnyDatabase: 1,
- dbOwner: 1,
- root: 1,
- __system: 1
- },
- privileges: [
- { resource: {db: firstDbName, collection: ""}, actions: ["dbHash"] }
- ]
- },
- {
- runOnDb: secondDbName,
- roles: { readAnyDatabase: 1,
- readWriteAnyDatabase: 1,
- root: 1,
- __system: 1
- },
- privileges: [
- { resource: {db: secondDbName, collection: ""}, actions: ["dbHash"] }
- ]
- }
- ]
- },
- {
- testname: "dbStats",
- command: {dbStats: 1, scale: 1024},
- testcases: [
- {
- runOnDb: firstDbName,
- roles: {
- read: 1,
- readAnyDatabase: 1,
- readWrite: 1,
- readWriteAnyDatabase: 1,
- dbAdmin: 1,
- dbAdminAnyDatabase: 1,
- dbOwner: 1,
- clusterMonitor: 1,
- clusterAdmin: 1,
- root: 1,
- __system: 1
- },
- privileges: [
- { resource: {db: firstDbName, collection: ""}, actions: ["dbStats"] }
- ]
- },
- {
- runOnDb: secondDbName,
- roles: {
- readAnyDatabase: 1,
- readWriteAnyDatabase: 1,
- dbAdminAnyDatabase: 1,
- clusterMonitor: 1,
- clusterAdmin: 1,
- root: 1,
- __system: 1
- },
- privileges: [
- { resource: {db: secondDbName, collection: ""}, actions: ["dbStats"] }
- ]
- }
- ]
- },
- {
- testname: "diagLogging",
- command: {diagLogging: 1},
- skipSharded: true,
- testcases: [
- {
- runOnDb: adminDbName,
- roles: roles_hostManager,
- privileges: [
- { resource: {cluster: true}, actions: ["diagLogging"] }
- ]
- },
- { runOnDb: firstDbName, roles: {} },
- { runOnDb: secondDbName, roles: {} }
- ]
- },
- {
- testname: "distinct",
- command: {distinct: "coll", key: "a", query: {}},
- testcases: [
- {
- runOnDb: firstDbName,
- roles: roles_read,
- privileges: [
- { resource: {db: firstDbName, collection: "coll"}, actions: ["find"] }
- ]
- },
- {
- runOnDb: secondDbName,
- roles: roles_readAny,
- privileges: [
- { resource: {db: secondDbName, collection: "coll"}, actions: ["find"] }
- ]
- }
- ]
- },
- {
- testname: "drop",
- command: {drop: "x"},
- setup: function (db) { db.x.save({}); },
- testcases: [
- {
- runOnDb: firstDbName,
- roles: Object.extend({restore: 1}, roles_writeDbAdmin),
- privileges: [
- { resource: {db: firstDbName, collection: "x"}, actions: ["dropCollection"] }
- ]
- },
- {
- runOnDb: secondDbName,
- roles: Object.extend({restore: 1}, roles_writeDbAdminAny),
- privileges: [
- { resource: {db: secondDbName, collection: "x"}, actions: ["dropCollection"] }
- ]
- }
- ]
- },
- {
- testname: "dropDatabase",
- command: {dropDatabase: 1},
- setup: function (db) { db.x.save({}); },
- teardown: function (db) { db.x.save({}); },
- testcases: [
- {
- runOnDb: firstDbName,
- roles: {
- dbAdmin: 1,
- dbAdminAnyDatabase: 1,
- dbOwner: 1,
- clusterAdmin: 1,
- root: 1,
- __system: 1
- },
- privileges: [
- { resource: {db: firstDbName, collection: ""}, actions: ["dropDatabase"] }
- ]
- },
- {
- runOnDb: secondDbName,
- roles: {
- dbAdminAnyDatabase: 1,
- clusterAdmin: 1,
- root: 1,
- __system: 1
- },
- privileges: [
- { resource: {db: secondDbName, collection: ""}, actions: ["dropDatabase"] }
- ]
- }
- ]
- },
- {
- testname: "dropIndexes",
- command: {dropIndexes: "x", index: "*"},
- testcases: [
- {
- runOnDb: firstDbName,
- roles: roles_writeDbAdmin,
- privileges: [
- { resource: {db: firstDbName, collection: "x"}, actions: ["dropIndex"] }
- ]
- },
- {
- runOnDb: secondDbName,
- roles: roles_writeDbAdminAny,
- privileges: [
- { resource: {db: secondDbName, collection: "x"}, actions: ["dropIndex"] }
- ]
- }
- ]
- },
- {
- testname: "enableSharding",
- command: {enableSharding: "x"},
- skipStandalone: true,
- testcases: [
- {
- runOnDb: adminDbName,
- roles: Object.extend({enableSharding:1}, roles_clusterManager),
- privileges: [
- { resource: {db: "x", collection: ""}, actions: ["enableSharding"] }
- ],
- expectFail: true
- },
- { runOnDb: firstDbName, roles: {} },
- { runOnDb: secondDbName, roles: {} }
- ]
- },
- {
- testname: "eval",
- command: {$eval: function () { print("noop"); } },
- testcases: [
- {
- runOnDb: firstDbName,
- roles: {__system: 1},
- privileges: [
- { resource: {anyResource: true}, actions: ["anyAction"] }
- ]
- },
- {
- runOnDb: secondDbName,
- roles: {__system: 1},
- privileges: [
- { resource: {anyResource: true}, actions: ["anyAction"] }
- ]
- }
- ]
- },
- {
- testname: "features",
- command: {features: 1},
- testcases: [
- {
- runOnDb: firstDbName,
- roles: roles_all,
- privilegesRequired: [ ]
- },
- {
- runOnDb: secondDbName,
- roles: roles_all,
- privilegesRequired: [ ]
- }
- ]
- },
- {
- testname: "filemd5",
- command: {filemd5: 1, root: "fs"},
- setup: function (db) {
- db.fs.chunks.drop();
- db.fs.chunks.insert({files_id: 1, n: 0, data: new BinData(0, "test")});
- db.fs.chunks.ensureIndex({files_id: 1, n: 1});
- },
- teardown: function (db) {
- db.fs.chunks.drop();
- },
- testcases: [
- {
- runOnDb: firstDbName,
- roles: roles_read,
- privileges: [
- { resource: {db: firstDbName, collection: ""}, actions: ["find"] }
- ]
- },
- {
- runOnDb: secondDbName,
- roles: roles_readAny,
- privileges: [
- { resource: {db: secondDbName, collection: ""}, actions: ["find"] }
- ]
- }
- ]
- },
- {
- testname: "find",
- command: {find: "foo"},
- testcases: [
- {
- runOnDb: firstDbName,
- roles: roles_read,
- privileges: [
- { resource: {db: firstDbName, collection: "foo"}, actions: ["find"] }
- ]
- },
- {
- runOnDb: secondDbName,
- roles: roles_readAny,
- privileges: [
- { resource: {db: secondDbName, collection: "foo"}, actions: ["find"] }
- ]
- }
- ]
- },
- {
- testname: "findWithTerm",
- command: {find: "foo", limit: -1, term: NumberLong(1)},
- testcases: [
- {
- runOnDb: firstDbName,
- roles: {__system: 1},
- privileges: [
- { resource: {db: firstDbName, collection: "foo"}, actions: ["find"] },
- { resource: {cluster: true}, actions: ["internal"] }
- ],
- expectFail: true // because of invalid limit
- },
- ]
- },
- {
- testname: "findAndModify",
- command: {findAndModify: "x", query: {_id: "abc"}, update: {$inc: {n: 1}}},
- setup: function (db) {
- db.x.drop();
- db.x.save( {_id: "abc", n: 0} );
- },
- teardown: function (db) { db.x.drop(); },
- testcases: [
- {
- runOnDb: firstDbName,
- roles: { readWrite: 1,
- readWriteAnyDatabase: 1,
- dbOwner: 1,
- root: 1,
- __system: 1
- },
- privileges: [
- { resource: {db: firstDbName, collection: "x"}, actions: ["find", "update"] }
- ]
- },
- {
- runOnDb: secondDbName,
- roles: {readWriteAnyDatabase: 1, root: 1, __system: 1},
- privileges: [
- { resource: {db: secondDbName, collection: "x"}, actions: ["find", "update"] }
- ]
- }
- ]
- },
- {
- testname: "flushRouterConfig",
- command: {flushRouterConfig: 1},
- skipStandalone: true,
- testcases: [
- {
- runOnDb: adminDbName,
- roles: Object.extend({clusterManager: 1}, roles_hostManager),
- privileges: [
- { resource: {cluster: true}, actions: ["flushRouterConfig"] }
- ]
- },
- { runOnDb: firstDbName, roles: {} },
- { runOnDb: secondDbName, roles: {} }
- ]
- },
- {
- testname: "fsync",
- command: {fsync: 1},
- testcases: [
- {
- runOnDb: adminDbName,
- roles: roles_hostManager,
- privileges: [
- { resource: {cluster: true}, actions: ["fsync"] }
- ]
- },
- { runOnDb: firstDbName, roles: {} },
- { runOnDb: secondDbName, roles: {} }
- ]
- },
- {
- testname: "fsyncUnlock",
- command: {fsyncUnlock: 1},
- skipSharded: true, // TODO: remove when fsyncUnlock is implemented in mongos
- testcases: [
- {
- runOnDb: adminDbName,
- roles: roles_hostManager,
- privileges: [
- { resource: {cluster: true}, actions: ["unlock"] }
- ],
- expectFail: true
- },
- { runOnDb: firstDbName, roles: {} },
- { runOnDb: secondDbName, roles: {} }
- ]
- },
- {
- testname: "geoNear",
- command: {geoNear: "x", near: [50, 50], num: 1},
- setup: function (db) {
- db.x.drop();
- db.x.save({loc: [50, 50]});
- db.x.ensureIndex({loc: "2d"});
- },
- teardown: function (db) { db.x.drop(); },
- testcases: [
- {
- runOnDb: firstDbName,
- roles: roles_read,
- privileges: [
- { resource: {db: firstDbName, collection: "x"}, actions: ["find"] }
- ]
- },
- {
- runOnDb: secondDbName,
- roles: roles_readAny,
- privileges: [
- { resource: {db: secondDbName, collection: "x"}, actions: ["find"] }
- ]
- }
- ]
- },
- {
- testname: "geoSearch",
- command: {geoSearch: "x", near: [50, 50], maxDistance: 6, limit: 1, search: {} },
- skipSharded: true,
- setup: function (db) {
- db.x.drop();
- db.x.save({loc: {long: 50, lat: 50}});
- db.x.ensureIndex({loc: "geoHaystack", type: 1}, {bucketSize: 1});
- },
- teardown: function (db) { db.x.drop(); },
- testcases: [
- {
- runOnDb: firstDbName,
- roles: roles_read,
- privileges: [
- { resource: {db: firstDbName, collection: "x"}, actions: ["find"] }
- ]
- },
- {
- runOnDb: secondDbName,
- roles: roles_readAny,
- privileges: [
- { resource: {db: secondDbName, collection: "x"}, actions: ["find"] }
- ]
- }
- ]
- },
- {
- testname: "getCmdLineOpts",
- command: {getCmdLineOpts: 1},
- testcases: [
- {
- runOnDb: adminDbName,
- roles: roles_monitoring,
- privileges: [
- { resource: {cluster: true}, actions: ["getCmdLineOpts"] }
- ]
- },
- { runOnDb: firstDbName, roles: {} },
- { runOnDb: secondDbName, roles: {} }
- ]
- },
- {
- testname: "getLastError",
- command: {getLastError: 1},
- testcases: [
- { runOnDb: firstDbName, roles: roles_all, privileges: [ ] },
- { runOnDb: secondDbName, roles: roles_all, privileges: [ ] }
- ]
- },
- {
- testname: "getLog",
- command: {getLog: "*"},
- testcases: [
- {
- runOnDb: adminDbName,
- roles: roles_monitoring,
- privileges: [
- { resource: {cluster: true}, actions: ["getLog"] }
- ]
- },
- { runOnDb: firstDbName, roles: {} },
- { runOnDb: secondDbName, roles: {} }
- ]
- },
- {
- testname: "getMore",
- command: {getMore: NumberLong("1"), collection: "foo"},
- testcases: [
- {
- runOnDb: firstDbName,
- roles: roles_read,
- privileges: [
- { resource: {db: firstDbName, collection: "foo"}, actions: ["find"] }
- ],
- expectFail: true
- },
- {
- runOnDb: secondDbName,
- roles: roles_readAny,
- privileges: [
- { resource: {db: secondDbName, collection: "foo"}, actions: ["find"] }
- ],
- expectFail: true
- }
- ]
- },
- {
- testname: "getMoreWithTerm",
- command: {getMore: NumberLong("1"), collection: "foo", term: NumberLong(1)},
- testcases: [
- {
- runOnDb: firstDbName,
- roles: {__system: 1},
- privileges: [
- { resource: {db: firstDbName, collection: "foo"}, actions: ["find"] },
- { resource: {cluster: true}, actions: ["internal"] }
- ],
- expectFail: true
- }
- ]
- },
- {
- testname: "getnonce",
- command: {getnonce: 1},
- testcases: [
- { runOnDb: firstDbName, roles: roles_all, privileges: [ ] },
- { runOnDb: secondDbName, roles: roles_all, privileges: [ ] }
- ]
- },
- {
- testname: "getParameter",
- command: {getParameter: 1, quiet: 1},
- testcases: [
- {
- runOnDb: adminDbName,
- roles: { backup: 1,
- restore: 1,
- clusterMonitor: 1,
- clusterAdmin: 1,
- root: 1,
- __system: 1
- },
- privileges: [
- { resource: {cluster: true}, actions: ["getParameter"] }
- ]
- },
- { runOnDb: firstDbName, roles: {} },
- { runOnDb: secondDbName, roles: {} }
- ]
- },
- {
- testname: "getPrevError",
- command: {getPrevError: 1},
- skipSharded: true,
- testcases: [
- { runOnDb: firstDbName, roles: roles_all, privileges: [ ] },
- { runOnDb: secondDbName, roles: roles_all, privileges: [ ] }
- ]
- },
- {
- testname: "getShardMap",
- command: {getShardMap: "x"},
- testcases: [
- {
- runOnDb: adminDbName,
- roles: roles_monitoring,
- privileges: [
- { resource: {cluster: true}, actions: ["getShardMap"] }
- ]
- },
- { runOnDb: firstDbName, roles: {} },
- { runOnDb: secondDbName, roles: {} }
- ]
- },
- {
- testname: "getShardVersion",
- command: {getShardVersion: "test.foo"},
- testcases: [
- {
- runOnDb: adminDbName,
- roles: roles_monitoring,
- privileges: [
- { resource: {db: "test", collection: 'foo'}, actions: ["getShardVersion"] }
- ],
- expectFail: true
- },
- { runOnDb: firstDbName, roles: {} },
- { runOnDb: secondDbName, roles: {} }
- ]
- },
- {
- testname: "group",
- command: {
- group: {
- ns: "x",
- key: {groupby: 1},
- initial: {total: 0},
- $reduce: function (curr, result) {
- result.total += curr.n;
+ {resource: {db: secondDbName, collection: "system.js"}, actions: ["insert"]},
+ ]
+ },
+ ]
+ },
+ {
+ testname: "count",
+ command: {count: "x"},
+ testcases: [
+ {
+ runOnDb: firstDbName,
+ roles: roles_read,
+ privileges: [{resource: {db: firstDbName, collection: "x"}, actions: ["find"]}]
+ },
+ {
+ runOnDb: secondDbName,
+ roles: roles_readAny,
+ privileges:
+ [{resource: {db: secondDbName, collection: "x"}, actions: ["find"]}]
+ }
+ ]
+ },
+ {
+ testname: "create",
+ command: {create: "x"},
+ teardown: function(db) {
+ db.x.drop();
+ },
+ testcases: [
+ {
+ runOnDb: firstDbName,
+ roles: Object.extend({restore: 1}, roles_writeDbAdmin),
+ privileges: [{
+ resource: {db: firstDbName, collection: "x"},
+ actions: ["createCollection"]
+ }]
+ },
+ {
+ runOnDb: firstDbName,
+ roles: Object.extend({restore: 1}, roles_writeDbAdmin),
+ privileges: [{resource: {db: firstDbName, collection: "x"}, actions: ["insert"]}]
+ },
+ {
+ runOnDb: secondDbName,
+ roles: Object.extend({restore: 1}, roles_writeDbAdminAny),
+ privileges: [{
+ resource: {db: secondDbName, collection: "x"},
+ actions: ["createCollection"]
+ }]
+ },
+ {
+ runOnDb: secondDbName,
+ roles: Object.extend({restore: 1}, roles_writeDbAdminAny),
+ privileges:
+ [{resource: {db: secondDbName, collection: "x"}, actions: ["insert"]}]
+ }
+ ]
+ },
+ {
+ testname: "create_capped",
+ command: {create: "x", capped: true, size: 1000},
+ teardown: function(db) {
+ db.x.drop();
+ },
+ testcases: [
+ {
+ runOnDb: firstDbName,
+ roles: roles_writeDbAdmin,
+ privileges: [{
+ resource: {db: firstDbName, collection: "x"},
+ actions: ["createCollection", "convertToCapped"]
+ }]
+ },
+ {
+ runOnDb: firstDbName,
+ roles: roles_writeDbAdmin,
+ privileges: [{
+ resource: {db: firstDbName, collection: "x"},
+ actions: ["insert", "convertToCapped"]
+ }]
+ },
+ {
+ runOnDb: secondDbName,
+ roles: roles_writeDbAdminAny,
+ privileges: [{
+ resource: {db: secondDbName, collection: "x"},
+ actions: ["createCollection", "convertToCapped"]
+ }]
+ },
+ {
+ runOnDb: secondDbName,
+ roles: roles_writeDbAdminAny,
+ privileges: [{
+ resource: {db: secondDbName, collection: "x"},
+ actions: ["insert", "convertToCapped"]
+ }]
+ }
+ ]
+ },
+ {
+ testname: "createIndexes",
+ command:
+ {createIndexes: "x", indexes: [{ns: firstDbName + ".x", key: {a: 1}, name: "a_1"}]},
+ teardown: function(db) {
+ db.x.drop();
+ },
+ testcases: [{
+ runOnDb: firstDbName,
+ roles: Object.extend({
+ readWrite: 1,
+ readWriteAnyDatabase: 1,
+ dbAdmin: 1,
+ dbAdminAnyDatabase: 1,
+ dbOwner: 1,
+ restore: 1,
+ root: 1,
+ __system: 1
+ }),
+ privileges:
+ [{resource: {db: firstDbName, collection: "x"}, actions: ["createIndex"]}]
+ }]
+ },
+ {
+ testname: "currentOp",
+ command: {currentOp: 1, $all: true},
+ testcases: [
+ {
+ runOnDb: adminDbName,
+ roles: roles_monitoring,
+ privileges: [{resource: {cluster: true}, actions: ["inprog"]}]
+ },
+ {runOnDb: firstDbName, roles: {}},
+ {runOnDb: secondDbName, roles: {}}
+ ]
+ },
+ {
+ testname: "currentOpCtx",
+ command: {currentOpCtx: 1},
+ skipSharded: true,
+ testcases: [
+ {
+ runOnDb: firstDbName,
+ roles: roles_monitoring,
+ privileges: [{resource: {cluster: true}, actions: ["inprog"]}]
+ },
+ {
+ runOnDb: secondDbName,
+ roles: roles_monitoring,
+ privileges: [{resource: {cluster: true}, actions: ["inprog"]}]
+ }
+ ]
+ },
+ {
+ testname: "dataSize_1",
+ command: {dataSize: firstDbName + ".x"},
+ testcases: [{
+ runOnDb: firstDbName,
+ roles: roles_read,
+ privileges: [{resource: {db: firstDbName, collection: "x"}, actions: ["find"]}]
+ }]
+ },
+ {
+ testname: "dataSize_2",
+ command: {dataSize: secondDbName + ".x"},
+ testcases: [{
+ runOnDb: secondDbName,
+ roles: roles_readAny,
+ privileges: [{resource: {db: secondDbName, collection: "x"}, actions: ["find"]}]
+ }]
+ },
+ {
+ testname: "dbHash",
+ command: {dbHash: 1},
+ skipSharded: true,
+ testcases: [
+ {
+ runOnDb: firstDbName,
+ roles: {
+ read: 1,
+ readAnyDatabase: 1,
+ readWrite: 1,
+ readWriteAnyDatabase: 1,
+ dbOwner: 1,
+ root: 1,
+ __system: 1
+ },
+ privileges: [{resource: {db: firstDbName, collection: ""}, actions: ["dbHash"]}]
+ },
+ {
+ runOnDb: secondDbName,
+ roles: {readAnyDatabase: 1, readWriteAnyDatabase: 1, root: 1, __system: 1},
+ privileges:
+ [{resource: {db: secondDbName, collection: ""}, actions: ["dbHash"]}]
+ }
+ ]
+ },
+ {
+ testname: "dbStats",
+ command: {dbStats: 1, scale: 1024},
+ testcases: [
+ {
+ runOnDb: firstDbName,
+ roles: {
+ read: 1,
+ readAnyDatabase: 1,
+ readWrite: 1,
+ readWriteAnyDatabase: 1,
+ dbAdmin: 1,
+ dbAdminAnyDatabase: 1,
+ dbOwner: 1,
+ clusterMonitor: 1,
+ clusterAdmin: 1,
+ root: 1,
+ __system: 1
+ },
+ privileges: [{resource: {db: firstDbName, collection: ""}, actions: ["dbStats"]}]
+ },
+ {
+ runOnDb: secondDbName,
+ roles: {
+ readAnyDatabase: 1,
+ readWriteAnyDatabase: 1,
+ dbAdminAnyDatabase: 1,
+ clusterMonitor: 1,
+ clusterAdmin: 1,
+ root: 1,
+ __system: 1
+ },
+ privileges:
+ [{resource: {db: secondDbName, collection: ""}, actions: ["dbStats"]}]
+ }
+ ]
+ },
+ {
+ testname: "diagLogging",
+ command: {diagLogging: 1},
+ skipSharded: true,
+ testcases: [
+ {
+ runOnDb: adminDbName,
+ roles: roles_hostManager,
+ privileges: [{resource: {cluster: true}, actions: ["diagLogging"]}]
+ },
+ {runOnDb: firstDbName, roles: {}},
+ {runOnDb: secondDbName, roles: {}}
+ ]
+ },
+ {
+ testname: "distinct",
+ command: {distinct: "coll", key: "a", query: {}},
+ testcases: [
+ {
+ runOnDb: firstDbName,
+ roles: roles_read,
+ privileges:
+ [{resource: {db: firstDbName, collection: "coll"}, actions: ["find"]}]
+ },
+ {
+ runOnDb: secondDbName,
+ roles: roles_readAny,
+ privileges:
+ [{resource: {db: secondDbName, collection: "coll"}, actions: ["find"]}]
+ }
+ ]
+ },
+ {
+ testname: "drop",
+ command: {drop: "x"},
+ setup: function(db) {
+ db.x.save({});
+ },
+ testcases: [
+ {
+ runOnDb: firstDbName,
+ roles: Object.extend({restore: 1}, roles_writeDbAdmin),
+ privileges:
+ [{resource: {db: firstDbName, collection: "x"}, actions: ["dropCollection"]}]
+ },
+ {
+ runOnDb: secondDbName,
+ roles: Object.extend({restore: 1}, roles_writeDbAdminAny),
+ privileges: [{
+ resource: {db: secondDbName, collection: "x"},
+ actions: ["dropCollection"]
+ }]
+ }
+ ]
+ },
+ {
+ testname: "dropDatabase",
+ command: {dropDatabase: 1},
+ setup: function(db) {
+ db.x.save({});
+ },
+ teardown: function(db) {
+ db.x.save({});
+ },
+ testcases: [
+ {
+ runOnDb: firstDbName,
+ roles: {
+ dbAdmin: 1,
+ dbAdminAnyDatabase: 1,
+ dbOwner: 1,
+ clusterAdmin: 1,
+ root: 1,
+ __system: 1
+ },
+ privileges:
+ [{resource: {db: firstDbName, collection: ""}, actions: ["dropDatabase"]}]
+ },
+ {
+ runOnDb: secondDbName,
+ roles: {dbAdminAnyDatabase: 1, clusterAdmin: 1, root: 1, __system: 1},
+ privileges:
+ [{resource: {db: secondDbName, collection: ""}, actions: ["dropDatabase"]}]
+ }
+ ]
+ },
+ {
+ testname: "dropIndexes",
+ command: {dropIndexes: "x", index: "*"},
+ testcases: [
+ {
+ runOnDb: firstDbName,
+ roles: roles_writeDbAdmin,
+ privileges:
+ [{resource: {db: firstDbName, collection: "x"}, actions: ["dropIndex"]}]
+ },
+ {
+ runOnDb: secondDbName,
+ roles: roles_writeDbAdminAny,
+ privileges:
+ [{resource: {db: secondDbName, collection: "x"}, actions: ["dropIndex"]}]
+ }
+ ]
+ },
+ {
+ testname: "enableSharding",
+ command: {enableSharding: "x"},
+ skipStandalone: true,
+ testcases: [
+ {
+ runOnDb: adminDbName,
+ roles: Object.extend({enableSharding: 1}, roles_clusterManager),
+ privileges: [{resource: {db: "x", collection: ""}, actions: ["enableSharding"]}],
+ expectFail: true
+ },
+ {runOnDb: firstDbName, roles: {}},
+ {runOnDb: secondDbName, roles: {}}
+ ]
+ },
+ {
+ testname: "eval",
+ command: {
+ $eval: function() {
+ print("noop");
+ }
+ },
+ testcases: [
+ {
+ runOnDb: firstDbName,
+ roles: {__system: 1},
+ privileges: [{resource: {anyResource: true}, actions: ["anyAction"]}]
+ },
+ {
+ runOnDb: secondDbName,
+ roles: {__system: 1},
+ privileges: [{resource: {anyResource: true}, actions: ["anyAction"]}]
+ }
+ ]
+ },
+ {
+ testname: "features",
+ command: {features: 1},
+ testcases: [
+ {runOnDb: firstDbName, roles: roles_all, privilegesRequired: []},
+ {runOnDb: secondDbName, roles: roles_all, privilegesRequired: []}
+ ]
+ },
+ {
+ testname: "filemd5",
+ command: {filemd5: 1, root: "fs"},
+ setup: function(db) {
+ db.fs.chunks.drop();
+ db.fs.chunks.insert({files_id: 1, n: 0, data: new BinData(0, "test")});
+ db.fs.chunks.ensureIndex({files_id: 1, n: 1});
+ },
+ teardown: function(db) {
+ db.fs.chunks.drop();
+ },
+ testcases: [
+ {
+ runOnDb: firstDbName,
+ roles: roles_read,
+ privileges: [{resource: {db: firstDbName, collection: ""}, actions: ["find"]}]
+ },
+ {
+ runOnDb: secondDbName,
+ roles: roles_readAny,
+ privileges: [{resource: {db: secondDbName, collection: ""}, actions: ["find"]}]
+ }
+ ]
+ },
+ {
+ testname: "find",
+ command: {find: "foo"},
+ testcases: [
+ {
+ runOnDb: firstDbName,
+ roles: roles_read,
+ privileges: [{resource: {db: firstDbName, collection: "foo"}, actions: ["find"]}]
+ },
+ {
+ runOnDb: secondDbName,
+ roles: roles_readAny,
+ privileges:
+ [{resource: {db: secondDbName, collection: "foo"}, actions: ["find"]}]
+ }
+ ]
+ },
+ {
+ testname: "findWithTerm",
+ command: {find: "foo", limit: -1, term: NumberLong(1)},
+ testcases: [
+ {
+ runOnDb: firstDbName,
+ roles: {__system: 1},
+ privileges: [
+ {resource: {db: firstDbName, collection: "foo"}, actions: ["find"]},
+ {resource: {cluster: true}, actions: ["internal"]}
+ ],
+ expectFail: true // because of invalid limit
+ },
+ ]
+ },
+ {
+ testname: "findAndModify",
+ command: {findAndModify: "x", query: {_id: "abc"}, update: {$inc: {n: 1}}},
+ setup: function(db) {
+ db.x.drop();
+ db.x.save({_id: "abc", n: 0});
+ },
+ teardown: function(db) {
+ db.x.drop();
+ },
+ testcases: [
+ {
+ runOnDb: firstDbName,
+ roles: {readWrite: 1, readWriteAnyDatabase: 1, dbOwner: 1, root: 1, __system: 1},
+ privileges:
+ [{resource: {db: firstDbName, collection: "x"}, actions: ["find", "update"]}]
+ },
+ {
+ runOnDb: secondDbName,
+ roles: {readWriteAnyDatabase: 1, root: 1, __system: 1},
+ privileges: [{
+ resource: {db: secondDbName, collection: "x"},
+ actions: ["find", "update"]
+ }]
+ }
+ ]
+ },
+ {
+ testname: "flushRouterConfig",
+ command: {flushRouterConfig: 1},
+ skipStandalone: true,
+ testcases: [
+ {
+ runOnDb: adminDbName,
+ roles: Object.extend({clusterManager: 1}, roles_hostManager),
+ privileges: [{resource: {cluster: true}, actions: ["flushRouterConfig"]}]
+ },
+ {runOnDb: firstDbName, roles: {}},
+ {runOnDb: secondDbName, roles: {}}
+ ]
+ },
+ {
+ testname: "fsync",
+ command: {fsync: 1},
+ testcases: [
+ {
+ runOnDb: adminDbName,
+ roles: roles_hostManager,
+ privileges: [{resource: {cluster: true}, actions: ["fsync"]}]
+ },
+ {runOnDb: firstDbName, roles: {}},
+ {runOnDb: secondDbName, roles: {}}
+ ]
+ },
+ {
+ testname: "fsyncUnlock",
+ command: {fsyncUnlock: 1},
+ skipSharded: true, // TODO: remove when fsyncUnlock is implemented in mongos
+ testcases: [
+ {
+ runOnDb: adminDbName,
+ roles: roles_hostManager,
+ privileges: [{resource: {cluster: true}, actions: ["unlock"]}],
+ expectFail: true
+ },
+ {runOnDb: firstDbName, roles: {}},
+ {runOnDb: secondDbName, roles: {}}
+ ]
+ },
+ {
+ testname: "geoNear",
+ command: {geoNear: "x", near: [50, 50], num: 1},
+ setup: function(db) {
+ db.x.drop();
+ db.x.save({loc: [50, 50]});
+ db.x.ensureIndex({loc: "2d"});
+ },
+ teardown: function(db) {
+ db.x.drop();
+ },
+ testcases: [
+ {
+ runOnDb: firstDbName,
+ roles: roles_read,
+ privileges: [{resource: {db: firstDbName, collection: "x"}, actions: ["find"]}]
+ },
+ {
+ runOnDb: secondDbName,
+ roles: roles_readAny,
+ privileges:
+ [{resource: {db: secondDbName, collection: "x"}, actions: ["find"]}]
+ }
+ ]
+ },
+ {
+ testname: "geoSearch",
+ command: {geoSearch: "x", near: [50, 50], maxDistance: 6, limit: 1, search: {}},
+ skipSharded: true,
+ setup: function(db) {
+ db.x.drop();
+ db.x.save({loc: {long: 50, lat: 50}});
+ db.x.ensureIndex({loc: "geoHaystack", type: 1}, {bucketSize: 1});
+ },
+ teardown: function(db) {
+ db.x.drop();
+ },
+ testcases: [
+ {
+ runOnDb: firstDbName,
+ roles: roles_read,
+ privileges: [{resource: {db: firstDbName, collection: "x"}, actions: ["find"]}]
+ },
+ {
+ runOnDb: secondDbName,
+ roles: roles_readAny,
+ privileges:
+ [{resource: {db: secondDbName, collection: "x"}, actions: ["find"]}]
+ }
+ ]
+ },
+ {
+ testname: "getCmdLineOpts",
+ command: {getCmdLineOpts: 1},
+ testcases: [
+ {
+ runOnDb: adminDbName,
+ roles: roles_monitoring,
+ privileges: [{resource: {cluster: true}, actions: ["getCmdLineOpts"]}]
+ },
+ {runOnDb: firstDbName, roles: {}},
+ {runOnDb: secondDbName, roles: {}}
+ ]
+ },
+ {
+ testname: "getLastError",
+ command: {getLastError: 1},
+ testcases: [
+ {runOnDb: firstDbName, roles: roles_all, privileges: []},
+ {runOnDb: secondDbName, roles: roles_all, privileges: []}
+ ]
+ },
+ {
+ testname: "getLog",
+ command: {getLog: "*"},
+ testcases: [
+ {
+ runOnDb: adminDbName,
+ roles: roles_monitoring,
+ privileges: [{resource: {cluster: true}, actions: ["getLog"]}]
+ },
+ {runOnDb: firstDbName, roles: {}},
+ {runOnDb: secondDbName, roles: {}}
+ ]
+ },
+ {
+ testname: "getMore",
+ command: {getMore: NumberLong("1"), collection: "foo"},
+ testcases: [
+ {
+ runOnDb: firstDbName,
+ roles: roles_read,
+ privileges: [{resource: {db: firstDbName, collection: "foo"}, actions: ["find"]}],
+ expectFail: true
+ },
+ {
+ runOnDb: secondDbName,
+ roles: roles_readAny,
+ privileges: [{resource: {db: secondDbName, collection: "foo"}, actions: ["find"]}],
+ expectFail: true
+ }
+ ]
+ },
+ {
+ testname: "getMoreWithTerm",
+ command: {getMore: NumberLong("1"), collection: "foo", term: NumberLong(1)},
+ testcases: [{
+ runOnDb: firstDbName,
+ roles: {__system: 1},
+ privileges: [
+ {resource: {db: firstDbName, collection: "foo"}, actions: ["find"]},
+ {resource: {cluster: true}, actions: ["internal"]}
+ ],
+ expectFail: true
+ }]
+ },
+ {
+ testname: "getnonce",
+ command: {getnonce: 1},
+ testcases: [
+ {runOnDb: firstDbName, roles: roles_all, privileges: []},
+ {runOnDb: secondDbName, roles: roles_all, privileges: []}
+ ]
+ },
+ {
+ testname: "getParameter",
+ command: {getParameter: 1, quiet: 1},
+ testcases: [
+ {
+ runOnDb: adminDbName,
+ roles: {
+ backup: 1,
+ restore: 1,
+ clusterMonitor: 1,
+ clusterAdmin: 1,
+ root: 1,
+ __system: 1
+ },
+ privileges: [{resource: {cluster: true}, actions: ["getParameter"]}]
+ },
+ {runOnDb: firstDbName, roles: {}},
+ {runOnDb: secondDbName, roles: {}}
+ ]
+ },
+ {
+ testname: "getPrevError",
+ command: {getPrevError: 1},
+ skipSharded: true,
+ testcases: [
+ {runOnDb: firstDbName, roles: roles_all, privileges: []},
+ {runOnDb: secondDbName, roles: roles_all, privileges: []}
+ ]
+ },
+ {
+ testname: "getShardMap",
+ command: {getShardMap: "x"},
+ testcases: [
+ {
+ runOnDb: adminDbName,
+ roles: roles_monitoring,
+ privileges: [{resource: {cluster: true}, actions: ["getShardMap"]}]
+ },
+ {runOnDb: firstDbName, roles: {}},
+ {runOnDb: secondDbName, roles: {}}
+ ]
+ },
+ {
+ testname: "getShardVersion",
+ command: {getShardVersion: "test.foo"},
+ testcases: [
+ {
+ runOnDb: adminDbName,
+ roles: roles_monitoring,
+ privileges:
+ [{resource: {db: "test", collection: 'foo'}, actions: ["getShardVersion"]}],
+ expectFail: true
+ },
+ {runOnDb: firstDbName, roles: {}},
+ {runOnDb: secondDbName, roles: {}}
+ ]
+ },
+ {
+ testname: "group",
+ command: {
+ group: {
+ ns: "x",
+ key: {groupby: 1},
+ initial: {total: 0},
+ $reduce: function(curr, result) {
+ result.total += curr.n;
+ }
+ }
+ },
+ setup: function(db) {
+ db.x.insert({groupby: 1, n: 5});
+ db.x.insert({groupby: 1, n: 6});
+ },
+ teardown: function(db) {
+ db.x.drop();
+ },
+ testcases: [
+ {
+ runOnDb: firstDbName,
+ roles: roles_read,
+ privileges: [{resource: {db: firstDbName, collection: "x"}, actions: ["find"]}]
+ },
+ {
+ runOnDb: secondDbName,
+ roles: roles_readAny,
+ privileges:
+ [{resource: {db: secondDbName, collection: "x"}, actions: ["find"]}]
+ }
+ ]
+ },
+ {
+ testname: "hostInfo",
+ command: {hostInfo: 1},
+ testcases: [
+ {
+ runOnDb: adminDbName,
+ roles: roles_monitoring,
+ privileges: [{resource: {cluster: true}, actions: ["hostInfo"]}]
+ },
+ {
+ runOnDb: firstDbName,
+ roles: roles_monitoring,
+ privileges: [{resource: {cluster: true}, actions: ["hostInfo"]}]
+ },
+ {
+ runOnDb: secondDbName,
+ roles: roles_monitoring,
+ privileges: [{resource: {cluster: true}, actions: ["hostInfo"]}]
+ }
+ ]
+ },
+ {
+ testname: "isMaster",
+ command: {isMaster: 1},
+ testcases: [
+ {runOnDb: adminDbName, roles: roles_all, privileges: []},
+ {runOnDb: firstDbName, roles: roles_all, privileges: []},
+ {runOnDb: secondDbName, roles: roles_all, privileges: []}
+ ]
+ },
+ {
+ testname: "killCursors",
+ command: {killCursors: "foo", cursors: [NumberLong("123")]},
+ skipSharded: true, // TODO enable when killCursors command is implemented on mongos
+ testcases: [
+ {
+ runOnDb: firstDbName,
+ roles: {
+ read: 1,
+ readAnyDatabase: 1,
+ readWrite: 1,
+ readWriteAnyDatabase: 1,
+ dbOwner: 1,
+ hostManager: 1,
+ clusterAdmin: 1,
+ backup: 1,
+ root: 1,
+ __system: 1
+ },
+ privileges:
+ [{resource: {db: firstDbName, collection: "foo"}, actions: ["killCursors"]}],
+ expectFail: true
+ },
+ {
+ runOnDb: secondDbName,
+ roles: {
+ readAnyDatabase: 1,
+ readWriteAnyDatabase: 1,
+ hostManager: 1,
+ clusterAdmin: 1,
+ backup: 1,
+ root: 1,
+ __system: 1
+ },
+ privileges:
+ [{resource: {db: secondDbName, collection: "foo"}, actions: ["killCursors"]}],
+ expectFail: true
+ }
+ ]
+ },
+ {
+ testname: "killOp", // standalone version
+ command: {killOp: 1, op: 123},
+ skipSharded: true,
+ testcases: [
+ {
+ runOnDb: adminDbName,
+ roles: roles_hostManager,
+ privileges: [{resource: {cluster: true}, actions: ["killop"]}]
+ },
+ {runOnDb: firstDbName, roles: {}},
+ {runOnDb: secondDbName, roles: {}}
+ ]
+ },
+ {
+ testname: "killOp", // sharded version
+ command: {killOp: 1, op: "shard1:123"},
+ skipStandalone: true,
+ testcases: [
+ {
+ runOnDb: adminDbName,
+ roles: roles_hostManager,
+ privileges: [{resource: {cluster: true}, actions: ["killop"]}],
+ expectFail: true // we won't be able to find the shardId
+ },
+ {runOnDb: firstDbName, roles: {}},
+ {runOnDb: secondDbName, roles: {}}
+ ]
+ },
+ {
+ testname: "listCommands",
+ command: {listCommands: 1},
+ testcases: [
+ {runOnDb: adminDbName, roles: roles_all, privileges: []},
+ {runOnDb: firstDbName, roles: roles_all, privileges: []},
+ {runOnDb: secondDbName, roles: roles_all, privileges: []}
+ ]
+ },
+ {
+ testname: "listDatabases",
+ command: {listDatabases: 1},
+ testcases: [
+ {
+ runOnDb: adminDbName,
+ roles: {
+ readAnyDatabase: 1,
+ readWriteAnyDatabase: 1,
+ dbAdminAnyDatabase: 1,
+ userAdminAnyDatabase: 1,
+ clusterMonitor: 1,
+ clusterAdmin: 1,
+ backup: 1,
+ root: 1,
+ __system: 1
+ },
+ privileges: [{resource: {cluster: true}, actions: ["listDatabases"]}]
+ },
+ {runOnDb: firstDbName, roles: {}},
+ {runOnDb: secondDbName, roles: {}}
+ ]
+ },
+ {
+ testname: "listCollections",
+ command: {listCollections: 1},
+ setup: function(db) {
+ db.x.insert({_id: 5});
+ db.y.insert({_id: 6});
+ },
+ teardown: function(db) {
+ db.x.drop();
+ db.y.drop();
+ },
+ testcases: [
+ {
+ runOnDb: firstDbName,
+ roles: {
+ read: 1,
+ readAnyDatabase: 1,
+ readWrite: 1,
+ readWriteAnyDatabase: 1,
+ clusterAdmin: 1,
+ clusterMonitor: 1,
+ clusterManager: 1,
+ dbAdmin: 1,
+ dbAdminAnyDatabase: 1,
+ dbOwner: 1,
+ backup: 1,
+ restore: 1,
+ root: 1,
+ __system: 1
+ },
+ privileges:
+ [{resource: {db: firstDbName, collection: ""}, actions: ["listCollections"]}]
+ },
+ // Test legacy (pre 3.0) way of authorizing listCollections.
+ {
+ runOnDb: firstDbName,
+ privileges: [{
+ resource: {db: firstDbName, collection: "system.namespaces"},
+ actions: ["find"]
+ }]
+ }
+ ]
+ },
+ {
+ testname: "listIndexes",
+ command: {listIndexes: "x"},
+ setup: function(db) {
+ db.x.insert({_id: 5});
+ db.x.insert({_id: 6});
+ },
+ teardown: function(db) {
+ db.x.drop();
+ },
+ testcases: [
+ {
+ runOnDb: firstDbName,
+ roles: {
+ read: 1,
+ readAnyDatabase: 1,
+ readWrite: 1,
+ readWriteAnyDatabase: 1,
+ dbAdmin: 1,
+ dbAdminAnyDatabase: 1,
+ dbOwner: 1,
+ backup: 1,
+ root: 1,
+ __system: 1
+ },
+ privileges:
+ [{resource: {db: firstDbName, collection: ""}, actions: ["listIndexes"]}]
+ },
+ // Test legacy (pre 3.0) way of authorizing listIndexes.
+ {
+ runOnDb: firstDbName,
+ privileges: [{
+ resource: {db: firstDbName, collection: "system.indexes"},
+ actions: ["find"]
+ }]
+ }
+ ]
+ },
+ {
+ testname: "listShards",
+ command: {listShards: 1},
+ skipStandalone: true,
+ testcases: [
+ {
+ runOnDb: adminDbName,
+ roles: Object.extend({clusterManager: 1}, roles_monitoring),
+ privileges: [{resource: {cluster: true}, actions: ["listShards"]}]
+ },
+ {runOnDb: firstDbName, roles: {}},
+ {runOnDb: secondDbName, roles: {}}
+ ]
+ },
+ {
+ testname: "logRotate",
+ command: {logRotate: 1},
+ testcases: [
+ {
+ runOnDb: adminDbName,
+ roles: roles_hostManager,
+ privileges: [{resource: {cluster: true}, actions: ["logRotate"]}]
+ },
+ {runOnDb: firstDbName, roles: {}},
+ {runOnDb: secondDbName, roles: {}}
+ ]
+ },
+ {
+ testname: "mapReduce_readonly",
+ command: {
+ mapreduce: "x",
+ map: function() {
+ emit(this.groupby, this.n);
+ },
+ reduce: function(id, emits) {
+ return Array.sum(emits);
+ },
+ out: {inline: 1}
+ },
+ setup: function(db) {
+ db.x.insert({groupby: 1, n: 5});
+ db.x.insert({groupby: 1, n: 6});
+ },
+ teardown: function(db) {
+ db.x.drop();
+ },
+ testcases: [
+ {
+ runOnDb: firstDbName,
+ roles: roles_read,
+ privileges: [{resource: {db: firstDbName, collection: "x"}, actions: ["find"]}]
+ },
+ {
+ runOnDb: secondDbName,
+ roles: roles_readAny,
+ privileges:
+ [{resource: {db: secondDbName, collection: "x"}, actions: ["find"]}]
+ }
+ ]
+ },
+ {
+ testname: "mapReduce_write",
+ command: {
+ mapreduce: "x",
+ map: function() {
+ emit(this.groupby, this.n);
+ },
+ reduce: function(id, emits) {
+ return Array.sum(emits);
+ },
+ out: "mr_out"
+ },
+ setup: function(db) {
+ db.x.insert({groupby: 1, n: 5});
+ db.x.insert({groupby: 1, n: 6});
+ },
+ teardown: function(db) {
+ db.x.drop();
+ },
+ testcases: [
+ {
+ runOnDb: firstDbName,
+ roles: {readWrite: 1, readWriteAnyDatabase: 1, dbOwner: 1, root: 1, __system: 1},
+ privileges: [
+ {resource: {db: firstDbName, collection: "x"}, actions: ["find"]},
+ {
+ resource: {db: firstDbName, collection: "mr_out"},
+ actions: ["insert", "remove"]
}
- }
- },
- setup: function (db) {
- db.x.insert({groupby: 1, n: 5});
- db.x.insert({groupby: 1, n: 6});
- },
- teardown: function (db) { db.x.drop(); },
- testcases: [
- {
- runOnDb: firstDbName,
- roles: roles_read,
- privileges: [
- { resource: {db: firstDbName, collection: "x"}, actions: ["find"] }
- ]
- },
- {
- runOnDb: secondDbName,
- roles: roles_readAny,
- privileges: [
- { resource: {db: secondDbName, collection: "x"}, actions: ["find"] }
- ]
- }
- ]
- },
- {
- testname: "hostInfo",
- command: {hostInfo: 1},
- testcases: [
- {
- runOnDb: adminDbName,
- roles: roles_monitoring,
- privileges: [
- { resource: {cluster: true}, actions: ["hostInfo"] }
- ]
- },
- {
- runOnDb: firstDbName,
- roles: roles_monitoring,
- privileges: [
- { resource: {cluster: true}, actions: ["hostInfo"] }
- ]
- },
- {
- runOnDb: secondDbName,
- roles: roles_monitoring,
- privileges: [
- { resource: {cluster: true}, actions: ["hostInfo"] }
- ]
- }
- ]
- },
- {
- testname: "isMaster",
- command: {isMaster: 1},
- testcases: [
- { runOnDb: adminDbName, roles: roles_all, privileges: [ ] },
- { runOnDb: firstDbName, roles: roles_all, privileges: [ ] },
- { runOnDb: secondDbName, roles: roles_all, privileges: [ ] }
- ]
- },
- {
- testname: "killCursors",
- command: {killCursors: "foo", cursors: [NumberLong("123")]},
- skipSharded: true, // TODO enable when killCursors command is implemented on mongos
- testcases: [
- {
- runOnDb: firstDbName,
- roles: {
- read: 1,
- readAnyDatabase: 1,
- readWrite: 1,
- readWriteAnyDatabase: 1,
- dbOwner: 1,
- hostManager: 1,
- clusterAdmin: 1,
- backup: 1,
- root: 1,
- __system: 1
- },
- privileges: [
- { resource: {db: firstDbName, collection: "foo"}, actions: ["killCursors"] }
- ],
- expectFail: true
- },
- {
- runOnDb: secondDbName,
- roles: {
- readAnyDatabase: 1,
- readWriteAnyDatabase: 1,
- hostManager: 1,
- clusterAdmin: 1,
- backup: 1,
- root: 1,
- __system: 1
- },
- privileges: [
- { resource: {db: secondDbName, collection: "foo"}, actions: ["killCursors"] }
- ],
- expectFail: true
- }
- ]
- },
- {
- testname: "killOp", // standalone version
- command: {killOp: 1, op: 123},
- skipSharded: true,
- testcases : [
- {
- runOnDb: adminDbName,
- roles: roles_hostManager,
- privileges: [
- { resource: {cluster: true}, actions: ["killop"] }
- ]
- },
- { runOnDb: firstDbName, roles: {} },
- { runOnDb: secondDbName, roles: {} }
- ]
- },
- {
- testname: "killOp", // sharded version
- command: {killOp: 1, op: "shard1:123"},
- skipStandalone: true,
- testcases : [
- {
- runOnDb: adminDbName,
- roles: roles_hostManager,
- privileges: [
- { resource: {cluster: true}, actions: ["killop"] }
- ],
- expectFail: true // we won't be able to find the shardId
- },
- { runOnDb: firstDbName, roles: {} },
- { runOnDb: secondDbName, roles: {} }
- ]
- },
- {
- testname: "listCommands",
- command: {listCommands: 1},
- testcases: [
- { runOnDb: adminDbName, roles: roles_all, privileges: [ ] },
- { runOnDb: firstDbName, roles: roles_all, privileges: [ ] },
- { runOnDb: secondDbName, roles: roles_all, privileges: [ ] }
- ]
- },
- {
- testname: "listDatabases",
- command: {listDatabases: 1},
- testcases: [
- {
- runOnDb: adminDbName,
- roles: {
- readAnyDatabase: 1,
- readWriteAnyDatabase: 1,
- dbAdminAnyDatabase: 1,
- userAdminAnyDatabase: 1,
- clusterMonitor: 1,
- clusterAdmin: 1,
- backup: 1,
- root: 1,
- __system: 1
+ ]
+ },
+ {
+ runOnDb: secondDbName,
+ roles: {readWriteAnyDatabase: 1, root: 1, __system: 1},
+ privileges: [
+ {resource: {db: secondDbName, collection: "x"}, actions: ["find"]},
+ {
+ resource: {db: secondDbName, collection: "mr_out"},
+ actions: ["insert", "remove"]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ testname: "s_mergeChunks",
+ command: {mergeChunks: "test.x", bounds: [{i: 0}, {i: 5}]},
+ skipStandalone: true,
+ testcases: [
+ {
+ runOnDb: adminDbName,
+ roles: roles_clusterManager,
+ privileges: [{resource: {db: "test", collection: "x"}, actions: ["splitChunk"]}],
+ expectFail: true
+ },
+ {runOnDb: firstDbName, roles: {}},
+ {runOnDb: secondDbName, roles: {}}
+ ]
+ },
+ {
+ testname: "d_mergeChunks",
+ command: {mergeChunks: "test.x", bounds: [{i: 0}, {i: 5}]},
+ skipSharded: true,
+ testcases: [
+ {
+ runOnDb: adminDbName,
+ roles: {__system: 1},
+ privileges: [{resource: {cluster: true}, actions: ["internal"]}],
+ expectFail: true
+ },
+ {runOnDb: firstDbName, roles: {}},
+ {runOnDb: secondDbName, roles: {}}
+ ]
+ },
+ {
+ testname: "s_moveChunk",
+ command: {moveChunk: "test.x"},
+ skipStandalone: true,
+ testcases: [
+ {
+ runOnDb: adminDbName,
+ roles: roles_clusterManager,
+ privileges: [{resource: {db: "test", collection: "x"}, actions: ["moveChunk"]}],
+ expectFail: true
+ },
+ {runOnDb: firstDbName, roles: {}},
+ {runOnDb: secondDbName, roles: {}}
+ ]
+ },
+ {
+ testname: "d_moveChunk",
+ command: {moveChunk: "test.x"},
+ skipSharded: true,
+ testcases: [
+ {
+ runOnDb: adminDbName,
+ roles: {__system: 1},
+ privileges: [{resource: {cluster: true}, actions: ["internal"]}],
+ expectFail: true
+ },
+ {runOnDb: firstDbName, roles: {}},
+ {runOnDb: secondDbName, roles: {}}
+ ]
+ },
+ {
+ testname: "movePrimary",
+ command: {movePrimary: "x"},
+ skipStandalone: true,
+ testcases: [
+ {
+ runOnDb: adminDbName,
+ roles: roles_clusterManager,
+ privileges: [{resource: {db: "x", collection: ""}, actions: ["moveChunk"]}],
+ expectFail: true
+ },
+ {runOnDb: firstDbName, roles: {}},
+ {runOnDb: secondDbName, roles: {}}
+ ]
+ },
+ {
+ testname: "netstat",
+ command: {netstat: "x"},
+ skipStandalone: true,
+ testcases: [
+ {
+ runOnDb: adminDbName,
+ roles: roles_monitoring,
+ privileges: [{resource: {cluster: true}, actions: ["netstat"]}],
+ expectFail: true
+ },
+ {runOnDb: firstDbName, roles: {}},
+ {runOnDb: secondDbName, roles: {}}
+ ]
+ },
+ {
+ testname: "planCacheIndexFilter",
+ command: {planCacheClearFilters: "x"},
+ skipSharded: true,
+ setup: function(db) {
+ db.x.save({});
+ },
+ teardown: function(db) {
+ db.x.drop();
+ },
+ testcases: [
+ {
+ runOnDb: firstDbName,
+ roles: roles_dbAdmin,
+ privileges: [{
+ resource: {db: firstDbName, collection: "x"},
+ actions: ["planCacheIndexFilter"]
+ }],
+ },
+ {
+ runOnDb: secondDbName,
+ roles: roles_dbAdminAny,
+ privileges: [{
+ resource: {db: secondDbName, collection: "x"},
+ actions: ["planCacheIndexFilter"]
+ }],
+ },
+ ]
+ },
+ {
+ testname: "planCacheRead",
+ command: {planCacheListQueryShapes: "x"},
+ skipSharded: true,
+ setup: function(db) {
+ db.x.save({});
+ },
+ teardown: function(db) {
+ db.x.drop();
+ },
+ testcases: [
+ {
+ runOnDb: firstDbName,
+ roles: roles_readDbAdmin,
+ privileges:
+ [{resource: {db: firstDbName, collection: "x"}, actions: ["planCacheRead"]}],
+ },
+ {
+ runOnDb: secondDbName,
+ roles: roles_readDbAdminAny,
+ privileges:
+ [{resource: {db: secondDbName, collection: "x"}, actions: ["planCacheRead"]}],
+ },
+ ]
+ },
+ {
+ testname: "planCacheWrite",
+ command: {planCacheClear: "x"},
+ skipSharded: true,
+ setup: function(db) {
+ db.x.save({});
+ },
+ teardown: function(db) {
+ db.x.drop();
+ },
+ testcases: [
+ {
+ runOnDb: firstDbName,
+ roles: roles_dbAdmin,
+ privileges:
+ [{resource: {db: firstDbName, collection: "x"}, actions: ["planCacheWrite"]}],
+ },
+ {
+ runOnDb: secondDbName,
+ roles: roles_dbAdminAny,
+ privileges:
+ [{resource: {db: secondDbName, collection: "x"}, actions: ["planCacheWrite"]}],
+ },
+ ]
+ },
+ {
+ testname: "ping",
+ command: {ping: 1},
+ testcases: [
+ {runOnDb: adminDbName, roles: roles_all, privileges: []},
+ {runOnDb: firstDbName, roles: roles_all, privileges: []},
+ {runOnDb: secondDbName, roles: roles_all, privileges: []}
+ ]
+ },
+ {
+ testname: "profile",
+ command: {profile: 0},
+ skipSharded: true,
+ testcases: [
+ {
+ runOnDb: firstDbName,
+ roles: roles_dbAdmin,
+ privileges:
+ [{resource: {db: firstDbName, collection: ""}, actions: ["enableProfiler"]}]
+ },
+ {
+ runOnDb: secondDbName,
+ roles: roles_dbAdminAny,
+ privileges: [{
+ resource: {db: secondDbName, collection: ""},
+ actions: ["enableProfiler"]
+ }]
+ }
+ ]
+ },
+ {
+ testname: "profileGetLevel",
+ command: {profile: -1},
+ skipSharded: true,
+ testcases: [
+ {
+ runOnDb: firstDbName,
+ roles: {
+ backup: 1,
+ dbAdmin: 1,
+ dbAdminAnyDatabase: 1,
+ dbOwner: 1,
+ clusterMonitor: 1,
+ clusterAdmin: 1,
+ root: 1,
+ __system: 1
+ },
+ privileges: [{
+ resource: {db: firstDbName, collection: "system.profile"},
+ actions: ["find"]
+ }]
+ },
+ {
+ runOnDb: secondDbName,
+ roles: {
+ backup: 1,
+ dbAdminAnyDatabase: 1,
+ clusterMonitor: 1,
+ clusterAdmin: 1,
+ root: 1,
+ __system: 1
+ },
+ privileges: [{
+ resource: {db: secondDbName, collection: "system.profile"},
+ actions: ["find"]
+ }]
+ }
+ ]
+ },
+ {
+ testname: "renameCollection_sameDb",
+ command:
+ {renameCollection: firstDbName + ".x", to: firstDbName + ".y", dropTarget: true},
+ setup: function(db) {
+ db.getSisterDB(firstDbName).x.save({});
+ },
+ teardown: function(db) {
+ db.getSisterDB(firstDbName).x.drop();
+ db.getSisterDB(firstDbName).y.drop();
+ },
+ testcases: [
+ {
+ runOnDb: adminDbName,
+ roles: roles_writeDbAdmin,
+ privileges: [
+ {
+ resource: {db: firstDbName, collection: ""},
+ actions: ["renameCollectionSameDB"]
},
- privileges: [
- { resource: {cluster: true}, actions: ["listDatabases"] }
- ]
- },
- { runOnDb: firstDbName, roles: {} },
- { runOnDb: secondDbName, roles: {} }
- ]
- },
- {
- testname: "listCollections",
- command: {listCollections: 1},
- setup: function (db) {
- db.x.insert({_id: 5});
- db.y.insert({_id: 6});
- },
- teardown: function (db) {
- db.x.drop();
- db.y.drop();
- },
- testcases: [
- {
- runOnDb: firstDbName,
- roles: {
- read: 1,
- readAnyDatabase: 1,
- readWrite: 1,
- readWriteAnyDatabase: 1,
- clusterAdmin: 1,
- clusterMonitor: 1,
- clusterManager: 1,
- dbAdmin: 1,
- dbAdminAnyDatabase: 1,
- dbOwner: 1,
- backup: 1,
- restore: 1,
- root: 1,
- __system: 1
+ {resource: {db: firstDbName, collection: "y"}, actions: ["dropCollection"]}
+ ]
+ },
+ {runOnDb: firstDbName, roles: {}},
+ {runOnDb: secondDbName, roles: {}}
+ ]
+ },
+ {
+ // Make sure that you cannot use renameCollectionSameDB to rename from a collection you
+ // don't have read access on to one that you do.
+ testname: "renameCollection_sameDb_failure",
+ command: {renameCollection: firstDbName + ".x", to: firstDbName + ".y"},
+ setup: function(db) {
+ db.getSisterDB(firstDbName).x.save({});
+ },
+ teardown: function(db) {
+ db.getSisterDB(firstDbName).x.drop();
+ db.getSisterDB(firstDbName).y.drop();
+ },
+ testcases: [
+ {
+ runOnDb: adminDbName,
+ privileges: [
+ {
+ resource: {db: firstDbName, collection: ""},
+ actions: ["renameCollectionSameDB"]
},
- privileges: [
- {
- resource: {db: firstDbName, collection: ""},
- actions: ["listCollections"]
- }
- ]
- },
- // Test legacy (pre 3.0) way of authorizing listCollections.
- {
- runOnDb: firstDbName,
- privileges: [
- {
- resource: {db: firstDbName, collection: "system.namespaces"},
- actions: ["find"]
- }
- ]
- }
- ]
- },
- {
- testname: "listIndexes",
- command: {listIndexes: "x"},
- setup: function (db) {
- db.x.insert({_id: 5});
- db.x.insert({_id: 6});
- },
- teardown: function (db) { db.x.drop(); },
- testcases: [
- {
- runOnDb: firstDbName,
- roles: {
- read: 1,
- readAnyDatabase: 1,
- readWrite: 1,
- readWriteAnyDatabase: 1,
- dbAdmin: 1,
- dbAdminAnyDatabase: 1,
- dbOwner: 1,
- backup: 1,
- root: 1,
- __system: 1
+ {resource: {db: firstDbName, collection: "y"}, actions: ["find"]}
+ ],
+ expectAuthzFailure: true
+ },
+ ]
+ },
+ {
+ testname: "renameCollection_twoDbs",
+ command: {renameCollection: firstDbName + ".x", to: secondDbName + ".y"},
+ setup: function(db) {
+ db.getSisterDB(firstDbName).x.save({});
+ db.getSisterDB(adminDbName).runCommand({movePrimary: firstDbName, to: shard0name});
+ db.getSisterDB(adminDbName).runCommand({movePrimary: secondDbName, to: shard0name});
+ },
+ teardown: function(db) {
+ db.getSisterDB(firstDbName).x.drop();
+ db.getSisterDB(secondDbName).y.drop();
+ },
+ testcases: [
+ {
+ runOnDb: adminDbName,
+ roles: {readWriteAnyDatabase: 1, root: 1, __system: 1},
+ privileges: [
+ {
+ resource: {db: firstDbName, collection: "x"},
+ actions: ["find", "dropCollection"]
},
- privileges: [
- {
- resource: {db: firstDbName, collection: ""},
- actions: ["listIndexes"]
- }
- ]
- },
- // Test legacy (pre 3.0) way of authorizing listIndexes.
- {
- runOnDb: firstDbName,
- privileges: [
- {
- resource: {db: firstDbName, collection: "system.indexes"},
- actions: ["find"]
- }
- ]
- }
- ]
- },
- {
- testname: "listShards",
- command: {listShards: 1},
- skipStandalone: true,
- testcases: [
- {
- runOnDb: adminDbName,
- roles: Object.extend({clusterManager: 1}, roles_monitoring),
- privileges: [
- { resource: {cluster: true}, actions: ["listShards"] }
- ]
- },
- { runOnDb: firstDbName, roles: {} },
- { runOnDb: secondDbName, roles: {} }
- ]
- },
- {
- testname: "logRotate",
- command: {logRotate: 1},
- testcases: [
- {
- runOnDb: adminDbName,
- roles: roles_hostManager,
- privileges: [
- { resource: {cluster: true}, actions: ["logRotate"] }
- ]
- },
- { runOnDb: firstDbName, roles: {} },
- { runOnDb: secondDbName, roles: {} }
- ]
- },
- {
- testname: "mapReduce_readonly",
- command: {
- mapreduce: "x",
- map: function () { emit(this.groupby, this.n); },
- reduce: function (id,emits) { return Array.sum(emits); },
- out: {inline: 1}
- },
- setup: function (db) {
- db.x.insert({groupby: 1, n: 5});
- db.x.insert({groupby: 1, n: 6});
- },
- teardown: function (db) { db.x.drop(); },
- testcases: [
- {
- runOnDb: firstDbName,
- roles: roles_read,
- privileges: [
- { resource: {db: firstDbName, collection: "x"}, actions: ["find"] }
- ]
- },
- {
- runOnDb: secondDbName,
- roles: roles_readAny,
- privileges: [
- { resource: {db: secondDbName, collection: "x"}, actions: ["find"] }
- ]
- }
- ]
- },
- {
- testname: "mapReduce_write",
- command: {
- mapreduce: "x",
- map: function () { emit(this.groupby, this.n); },
- reduce: function (id,emits) { return Array.sum(emits); },
- out: "mr_out"
- },
- setup: function (db) {
- db.x.insert({groupby: 1, n: 5});
- db.x.insert({groupby: 1, n: 6});
- },
- teardown: function (db) { db.x.drop(); },
- testcases: [
- {
- runOnDb: firstDbName,
- roles: { readWrite: 1,
- readWriteAnyDatabase: 1,
- dbOwner: 1,
- root: 1,
- __system: 1
- },
- privileges: [
- { resource: {db: firstDbName, collection: "x"}, actions: ["find"] },
- { resource: {db: firstDbName, collection: "mr_out"}, actions: ["insert", "remove"] }
- ]
- },
- {
- runOnDb: secondDbName,
- roles: {readWriteAnyDatabase: 1, root: 1, __system: 1},
- privileges: [
- { resource: {db: secondDbName, collection: "x"}, actions: ["find"] },
- { resource: {db: secondDbName, collection: "mr_out"}, actions: ["insert", "remove"] }
- ]
- }
- ]
- },
- {
- testname: "s_mergeChunks",
- command: {mergeChunks: "test.x", bounds: [{i : 0}, {i : 5}]},
- skipStandalone: true,
- testcases: [
- {
- runOnDb: adminDbName,
- roles: roles_clusterManager,
- privileges: [
- { resource: {db: "test", collection: "x"}, actions: ["splitChunk"] }
- ],
- expectFail: true
- },
- { runOnDb: firstDbName, roles: {} },
- { runOnDb: secondDbName, roles: {} }
- ]
- },
- {
- testname: "d_mergeChunks",
- command: {mergeChunks: "test.x", bounds: [{i : 0}, {i : 5}]},
- skipSharded: true,
- testcases: [
- {
- runOnDb: adminDbName,
- roles: { __system: 1 },
- privileges: [
- { resource: {cluster: true}, actions: ["internal"] }
- ],
- expectFail: true
- },
- { runOnDb: firstDbName, roles: {} },
- { runOnDb: secondDbName, roles: {} }
- ]
- },
- {
- testname: "s_moveChunk",
- command: {moveChunk: "test.x"},
- skipStandalone: true,
- testcases: [
- {
- runOnDb: adminDbName,
- roles: roles_clusterManager,
- privileges: [
- { resource: {db: "test", collection: "x"}, actions: ["moveChunk"] }
- ],
- expectFail: true
- },
- { runOnDb: firstDbName, roles: {} },
- { runOnDb: secondDbName, roles: {} }
- ]
- },
- {
- testname: "d_moveChunk",
- command: {moveChunk: "test.x"},
- skipSharded: true,
- testcases: [
- {
- runOnDb: adminDbName,
- roles: { __system: 1 },
- privileges: [
- { resource: {cluster: true}, actions: ["internal"] }
- ],
- expectFail: true
- },
- { runOnDb: firstDbName, roles: {} },
- { runOnDb: secondDbName, roles: {} }
- ]
- },
- {
- testname: "movePrimary",
- command: {movePrimary: "x"},
- skipStandalone: true,
- testcases: [
- {
- runOnDb: adminDbName,
- roles: roles_clusterManager,
- privileges: [
- { resource: {db: "x", collection: ""}, actions: ["moveChunk"] }
- ],
- expectFail: true
- },
- { runOnDb: firstDbName, roles: {} },
- { runOnDb: secondDbName, roles: {} }
- ]
- },
- {
- testname: "netstat",
- command: {netstat: "x"},
- skipStandalone: true,
- testcases: [
- {
- runOnDb: adminDbName,
- roles: roles_monitoring,
- privileges: [
- { resource: {cluster: true}, actions: ["netstat"] }
- ],
- expectFail: true
- },
- { runOnDb: firstDbName, roles: {} },
- { runOnDb: secondDbName, roles: {} }
- ]
- },
- {
- testname: "planCacheIndexFilter",
- command: {planCacheClearFilters: "x"},
- skipSharded: true,
- setup: function (db) { db.x.save( {} ); },
- teardown: function (db) { db.x.drop(); },
- testcases: [
- {
- runOnDb: firstDbName,
- roles: roles_dbAdmin,
- privileges: [
+ {
+ resource: {db: secondDbName, collection: "y"},
+ actions: ["insert", "createIndex"]
+ }
+ ]
+ },
+ {runOnDb: firstDbName, roles: {}},
+ {runOnDb: secondDbName, roles: {}}
+ ]
+ },
+ {
+ testname: "reIndex",
+ command: {reIndex: "x"},
+ setup: function(db) {
+ db.x.save({});
+ },
+ teardown: function(db) {
+ db.x.drop();
+ },
+ testcases: [
+ {
+ runOnDb: firstDbName,
+ roles: roles_dbAdmin,
+ privileges:
+ [{resource: {db: firstDbName, collection: "x"}, actions: ["reIndex"]}]
+ },
+ {
+ runOnDb: secondDbName,
+ roles: roles_dbAdminAny,
+ privileges:
+ [{resource: {db: secondDbName, collection: "x"}, actions: ["reIndex"]}]
+ }
+ ]
+ },
+ {
+ testname: "removeShard",
+ command: {removeShard: "x"},
+ skipStandalone: true,
+ testcases: [
+ {
+ runOnDb: adminDbName,
+ roles: roles_clusterManager,
+ privileges: [{resource: {cluster: true}, actions: ["removeShard"]}],
+ expectFail: true
+ },
+ {runOnDb: firstDbName, roles: {}},
+ {runOnDb: secondDbName, roles: {}}
+ ]
+ },
+ {
+ testname: "repairDatabase",
+ command: {repairDatabase: 1},
+ testcases: [
+ {
+ runOnDb: adminDbName,
+ roles:
+ {dbAdminAnyDatabase: 1, hostManager: 1, clusterAdmin: 1, root: 1, __system: 1},
+ privileges:
+ [{resource: {db: adminDbName, collection: ""}, actions: ["repairDatabase"]}]
+ },
+ {
+ runOnDb: firstDbName,
+ roles: {
+ dbAdmin: 1,
+ dbAdminAnyDatabase: 1,
+ hostManager: 1,
+ clusterAdmin: 1,
+ dbOwner: 1,
+ root: 1,
+ __system: 1
+ },
+ privileges:
+ [{resource: {db: firstDbName, collection: ""}, actions: ["repairDatabase"]}]
+ },
+ {
+ runOnDb: secondDbName,
+ roles:
+ {dbAdminAnyDatabase: 1, hostManager: 1, clusterAdmin: 1, root: 1, __system: 1},
+ privileges: [{
+ resource: {db: secondDbName, collection: ""},
+ actions: ["repairDatabase"]
+ }]
+ }
+ ]
+ },
+ {
+ testname: "replSetElect",
+ command: {replSetElect: 1},
+ skipSharded: true,
+ testcases: [
+ {
+ runOnDb: adminDbName,
+ roles: {__system: 1},
+ privileges: [{resource: {cluster: true}, actions: ["internal"]}],
+ expectFail: true
+ },
+ {runOnDb: firstDbName, roles: {}},
+ {runOnDb: secondDbName, roles: {}}
+ ]
+ },
+ {
+ testname: "replSetFreeze",
+ command: {replSetFreeze: "x"},
+ skipSharded: true,
+ testcases: [
+ {
+ runOnDb: adminDbName,
+ roles: roles_clusterManager,
+ privileges: [{resource: {cluster: true}, actions: ["replSetStateChange"]}],
+ expectFail: true
+ },
+ {runOnDb: firstDbName, roles: {}},
+ {runOnDb: secondDbName, roles: {}}
+ ]
+ },
+ {
+ testname: "replSetFresh",
+ command: {replSetFresh: "x"},
+ skipSharded: true,
+ testcases: [
+ {
+ runOnDb: adminDbName,
+ roles: {__system: 1},
+ privileges: [{resource: {cluster: true}, actions: ["internal"]}],
+ expectFail: true
+ },
+ {runOnDb: firstDbName, roles: {}},
+ {runOnDb: secondDbName, roles: {}}
+ ]
+ },
+ {
+ testname: "replSetGetRBID",
+ command: {replSetGetRBID: 1},
+ skipSharded: true,
+ testcases: [
+ {
+ runOnDb: adminDbName,
+ roles: {__system: 1},
+ privileges: [{resource: {cluster: true}, actions: ["internal"]}],
+ expectFail: true
+ },
+ {runOnDb: firstDbName, roles: {}},
+ {runOnDb: secondDbName, roles: {}}
+ ]
+ },
+ {
+ testname: "replSetGetStatus",
+ command: {replSetGetStatus: 1},
+ skipSharded: true,
+ testcases: [
+ {
+ runOnDb: adminDbName,
+ roles:
+ {clusterMonitor: 1, clusterManager: 1, clusterAdmin: 1, root: 1, __system: 1},
+ privileges: [{resource: {cluster: true}, actions: ["replSetGetStatus"]}],
+ expectFail: true
+ },
+ {runOnDb: firstDbName, roles: {}},
+ {runOnDb: secondDbName, roles: {}}
+ ]
+ },
+ {
+ testname: "replSetGetConfig",
+ command: {replSetGetConfig: 1},
+ skipSharded: true,
+ testcases: [
+ {
+ runOnDb: adminDbName,
+ roles:
+ {clusterMonitor: 1, clusterManager: 1, clusterAdmin: 1, root: 1, __system: 1},
+ privileges: [{resource: {cluster: true}, actions: ["replSetGetConfig"]}],
+ expectFail: true
+ },
+ {runOnDb: firstDbName, roles: {}},
+ {runOnDb: secondDbName, roles: {}}
+ ]
+ },
+ {
+ testname: "replSetHeartbeat",
+ command: {replSetHeartbeat: 1},
+ skipSharded: true,
+ testcases: [
+ {
+ runOnDb: adminDbName,
+ roles: {__system: 1},
+ privileges: [{resource: {cluster: true}, actions: ["internal"]}],
+ expectFail: true
+ },
+ {runOnDb: firstDbName, roles: {}},
+ {runOnDb: secondDbName, roles: {}}
+ ]
+ },
+ {
+ testname: "replSetInitiate",
+ command: {replSetInitiate: "x"},
+ skipSharded: true,
+ testcases: [
+ {
+ runOnDb: adminDbName,
+ roles: roles_clusterManager,
+ privileges: [{resource: {cluster: true}, actions: ["replSetConfigure"]}],
+ expectFail: true
+ },
+ {runOnDb: firstDbName, roles: {}},
+ {runOnDb: secondDbName, roles: {}}
+ ]
+ },
+ {
+ testname: "replSetMaintenance",
+ command: {replSetMaintenance: "x"},
+ skipSharded: true,
+ testcases: [
+ {
+ runOnDb: adminDbName,
+ roles: roles_clusterManager,
+ privileges: [{resource: {cluster: true}, actions: ["replSetStateChange"]}],
+ expectFail: true
+ },
+ {runOnDb: firstDbName, roles: {}},
+ {runOnDb: secondDbName, roles: {}}
+ ]
+ },
+ {
+ testname: "replSetReconfig",
+ command: {replSetReconfig: "x"},
+ skipSharded: true,
+ testcases: [
+ {
+ runOnDb: adminDbName,
+ roles: roles_clusterManager,
+ privileges: [{resource: {cluster: true}, actions: ["replSetConfigure"]}],
+ expectFail: true
+ },
+ {runOnDb: firstDbName, roles: {}},
+ {runOnDb: secondDbName, roles: {}}
+ ]
+ },
+ {
+ testname: "replSetStepDown",
+ command: {replSetStepDown: "x"},
+ skipSharded: true,
+ testcases: [
+ {
+ runOnDb: adminDbName,
+ roles: roles_clusterManager,
+ privileges: [{resource: {cluster: true}, actions: ["replSetStateChange"]}],
+ expectFail: true
+ },
+ {runOnDb: firstDbName, roles: {}},
+ {runOnDb: secondDbName, roles: {}}
+ ]
+ },
+ {
+ testname: "replSetSyncFrom",
+ command: {replSetSyncFrom: "x"},
+ skipSharded: true,
+ testcases: [
+ {
+ runOnDb: adminDbName,
+ roles: roles_clusterManager,
+ privileges: [{resource: {cluster: true}, actions: ["replSetStateChange"]}],
+ expectFail: true
+ },
+ {runOnDb: firstDbName, roles: {}},
+ {runOnDb: secondDbName, roles: {}}
+ ]
+ },
+ {
+ testname: "resetError",
+ command: {resetError: 1},
+ testcases: [
+ {runOnDb: adminDbName, roles: roles_all, privileges: []},
+ {runOnDb: firstDbName, roles: roles_all, privileges: []},
+ {runOnDb: secondDbName, roles: roles_all, privileges: []}
+ ]
+ },
+ {
+ testname: "resync",
+ command: {resync: 1},
+ skipSharded: true,
+ testcases: [
+ {
+ runOnDb: adminDbName,
+ roles: {hostManager: 1, clusterManager: 1, clusterAdmin: 1, root: 1, __system: 1},
+ privileges: [{resource: {cluster: true}, actions: ["resync"]}],
+ expectFail: true
+ },
+ {runOnDb: firstDbName, roles: {}},
+ {runOnDb: secondDbName, roles: {}}
+ ]
+ },
+ {
+ testname: "serverStatus",
+ command: {serverStatus: 1},
+ testcases: [
+ {
+ runOnDb: adminDbName,
+ roles: roles_monitoring,
+ privileges: [{resource: {cluster: true}, actions: ["serverStatus"]}]
+ },
+ {
+ runOnDb: firstDbName,
+ roles: roles_monitoring,
+ privileges: [{resource: {cluster: true}, actions: ["serverStatus"]}]
+ },
+ {
+ runOnDb: secondDbName,
+ roles: roles_monitoring,
+ privileges: [{resource: {cluster: true}, actions: ["serverStatus"]}]
+ }
+ ]
+ },
+ {
+ testname: "setParameter",
+ command: {setParameter: 1, quiet: 1},
+ testcases: [
+ {
+ runOnDb: adminDbName,
+ roles: roles_hostManager,
+ privileges: [{resource: {cluster: true}, actions: ["setParameter"]}]
+ },
+ {runOnDb: firstDbName, roles: {}},
+ {runOnDb: secondDbName, roles: {}}
+ ]
+ },
+ {
+ testname: "setShardVersion",
+ command: {setShardVersion: 1},
+ skipSharded: true,
+ testcases: [
+ {
+ runOnDb: adminDbName,
+ roles: {__system: 1},
+ privileges: [{resource: {cluster: true}, actions: ["internal"]}],
+ expectFail: true
+ },
+ {runOnDb: firstDbName, roles: {}},
+ {runOnDb: secondDbName, roles: {}}
+ ]
+ },
+ {
+ testname: "shardCollection",
+ command: {shardCollection: "test.x"},
+ skipStandalone: true,
+ testcases: [
+ {
+ runOnDb: adminDbName,
+ roles: Object.extend({enableSharding: 1}, roles_clusterManager),
+ privileges:
+ [{resource: {db: "test", collection: "x"}, actions: ["enableSharding"]}],
+ expectFail: true
+ },
+ {runOnDb: firstDbName, roles: {}},
+ {runOnDb: secondDbName, roles: {}}
+ ]
+ },
+ {
+ testname: "shardingState",
+ command: {shardingState: 1},
+ skipSharded: true,
+ testcases: [
+ {
+ runOnDb: adminDbName,
+ roles: roles_monitoring,
+ privileges: [{resource: {cluster: true}, actions: ["shardingState"]}]
+ },
+ {runOnDb: firstDbName, roles: {}},
+ {runOnDb: secondDbName, roles: {}}
+ ]
+ },
+ {
+ testname: "shutdown",
+ command: {shutdown: 1},
+ testcases: [{runOnDb: firstDbName, roles: {}}, {runOnDb: secondDbName, roles: {}}]
+ },
+ {
+ testname: "split",
+ command: {split: "test.x"},
+ skipStandalone: true,
+ testcases: [
+ {
+ runOnDb: adminDbName,
+ roles: roles_clusterManager,
+ privileges: [{resource: {db: "test", collection: "x"}, actions: ["splitChunk"]}],
+ expectFail: true
+ },
+ {runOnDb: firstDbName, roles: {}},
+ {runOnDb: secondDbName, roles: {}}
+ ]
+ },
+ {
+ testname: "splitChunk",
+ command: {splitChunk: "test.x"},
+ skipSharded: true,
+ testcases: [
+ {
+ runOnDb: adminDbName,
+ roles: {__system: 1},
+ privileges: [{resource: {cluster: true}, actions: ["internal"]}],
+ expectFail: true
+ },
+ {runOnDb: firstDbName, roles: {}},
+ {runOnDb: secondDbName, roles: {}}
+ ]
+ },
+ {
+ testname: "splitVector",
+ command: {splitVector: "test.x"},
+ testcases: [
+ {
+ runOnDb: adminDbName,
+ roles: roles_clusterManager,
+ privileges: [{resource: {db: "test", collection: "x"}, actions: ["splitVector"]}],
+ expectFail: true
+ },
+ {
+ runOnDb: firstDbName,
+ roles: roles_clusterManager,
+ privileges: [{resource: {db: "test", collection: "x"}, actions: ["splitVector"]}],
+ expectFail: true
+ },
+ {
+ runOnDb: secondDbName,
+ roles: roles_clusterManager,
+ privileges: [{resource: {db: "test", collection: "x"}, actions: ["splitVector"]}],
+ expectFail: true
+ }
+ ]
+ },
+ /* temporarily removed see SERVER-13555
+ {
+ testname: "storageDetails",
+ command: {storageDetails: "x", analyze: "diskStorage"},
+ skipSharded: true,
+ setup: function (db) { db.x.save( {} ); },
+ teardown: function (db) { db.x.drop(); },
+ testcases: [
{
- resource: {db: firstDbName, collection: "x"},
- actions: ["planCacheIndexFilter"]
- }
- ],
- },
- {
- runOnDb: secondDbName,
- roles: roles_dbAdminAny,
- privileges: [
+ runOnDb: firstDbName,
+ roles: roles_dbAdmin,
+ privileges: [
+ { resource: {db: firstDbName, collection: "x"}, actions:
+ ["storageDetails"] }
+ ]
+ },
{
- resource: {db: secondDbName, collection: "x"},
- actions: ["planCacheIndexFilter"]
+ runOnDb: secondDbName,
+ roles: roles_dbAdminAny,
+ privileges: [
+ { resource: {db: secondDbName, collection: "x"}, actions:
+ ["storageDetails"] }
+ ]
}
- ],
- },
- ]
- },
- {
- testname: "planCacheRead",
- command: {planCacheListQueryShapes: "x"},
- skipSharded: true,
- setup: function (db) { db.x.save( {} ); },
- teardown: function (db) { db.x.drop(); },
- testcases: [
- {
- runOnDb: firstDbName,
- roles: roles_readDbAdmin,
- privileges: [
- { resource: {db: firstDbName, collection: "x"}, actions: ["planCacheRead"] }
- ],
- },
- {
- runOnDb: secondDbName,
- roles: roles_readDbAdminAny,
- privileges: [
- { resource: {db: secondDbName, collection: "x"}, actions: ["planCacheRead"] }
- ],
- },
- ]
- },
- {
- testname: "planCacheWrite",
- command: {planCacheClear: "x"},
- skipSharded: true,
- setup: function (db) { db.x.save( {} ); },
- teardown: function (db) { db.x.drop(); },
- testcases: [
- {
- runOnDb: firstDbName,
- roles: roles_dbAdmin,
- privileges: [
- { resource: {db: firstDbName, collection: "x"}, actions: ["planCacheWrite"] }
- ],
- },
- {
- runOnDb: secondDbName,
- roles: roles_dbAdminAny,
- privileges: [
- { resource: {db: secondDbName, collection: "x"}, actions: ["planCacheWrite"] }
- ],
- },
- ]
- },
- {
- testname: "ping",
- command: {ping: 1},
- testcases: [
- { runOnDb: adminDbName, roles: roles_all, privileges: [ ] },
- { runOnDb: firstDbName, roles: roles_all, privileges: [ ] },
- { runOnDb: secondDbName, roles: roles_all, privileges: [ ] }
- ]
- },
- {
- testname: "profile",
- command: {profile: 0},
- skipSharded: true,
- testcases: [
- {
- runOnDb: firstDbName,
- roles: roles_dbAdmin,
- privileges: [
- { resource: {db: firstDbName, collection: ""}, actions: ["enableProfiler"] }
- ]
- },
- {
- runOnDb: secondDbName,
- roles: roles_dbAdminAny,
- privileges: [
- { resource: {db: secondDbName, collection: ""}, actions: ["enableProfiler"] }
- ]
- }
- ]
- },
- {
- testname: "profileGetLevel",
- command: {profile: -1},
- skipSharded: true,
- testcases: [
- {
- runOnDb: firstDbName,
- roles: {
- backup: 1,
- dbAdmin: 1,
- dbAdminAnyDatabase: 1,
- dbOwner: 1,
- clusterMonitor: 1,
- clusterAdmin: 1,
- root: 1,
- __system: 1
- },
- privileges: [
- { resource: {db: firstDbName, collection: "system.profile"},
- actions: ["find"] }
- ]
- },
- {
- runOnDb: secondDbName,
- roles: {
- backup: 1,
- dbAdminAnyDatabase: 1,
- clusterMonitor: 1,
- clusterAdmin: 1,
- root: 1,
- __system: 1
- },
- privileges: [
- { resource: {db: secondDbName, collection: "system.profile"},
- actions: ["find"] }
]
- }
- ]
- },
- {
- testname: "renameCollection_sameDb",
- command: {renameCollection: firstDbName + ".x",
- to: firstDbName + ".y",
- dropTarget: true},
- setup: function (db) { db.getSisterDB(firstDbName).x.save( {} ); },
- teardown: function (db) {
- db.getSisterDB(firstDbName).x.drop();
- db.getSisterDB(firstDbName).y.drop();
- },
- testcases: [
- {
- runOnDb: adminDbName,
- roles: roles_writeDbAdmin,
- privileges: [
- { resource: {db: firstDbName, collection: ""},
- actions: ["renameCollectionSameDB"] },
- { resource: {db: firstDbName, collection: "y"},
- actions: ["dropCollection"] }
- ]
- },
- { runOnDb: firstDbName, roles: {} },
- { runOnDb: secondDbName, roles: {} }
- ]
- },
- {
- // Make sure that you cannot use renameCollectionSameDB to rename from a collection you
- // don't have read access on to one that you do.
- testname: "renameCollection_sameDb_failure",
- command: {renameCollection: firstDbName + ".x", to: firstDbName + ".y"},
- setup: function (db) { db.getSisterDB(firstDbName).x.save( {} ); },
- teardown: function (db) {
- db.getSisterDB(firstDbName).x.drop();
- db.getSisterDB(firstDbName).y.drop();
- },
- testcases: [
- {
- runOnDb: adminDbName,
- privileges: [
- { resource: {db: firstDbName, collection: ""},
- actions: ["renameCollectionSameDB"] },
- { resource: {db: firstDbName, collection: "y"}, actions: ["find"] }
- ],
- expectAuthzFailure: true
- },
- ]
- },
- {
- testname: "renameCollection_twoDbs",
- command: {renameCollection: firstDbName + ".x", to: secondDbName + ".y"},
- setup: function (db) {
- db.getSisterDB(firstDbName).x.save( {} );
- db.getSisterDB(adminDbName).runCommand({movePrimary: firstDbName, to: shard0name});
- db.getSisterDB(adminDbName).runCommand({movePrimary: secondDbName, to: shard0name});
- },
- teardown: function (db) {
- db.getSisterDB(firstDbName).x.drop();
- db.getSisterDB(secondDbName).y.drop();
- },
- testcases: [
- {
- runOnDb: adminDbName,
- roles: {readWriteAnyDatabase: 1, root: 1, __system: 1},
- privileges: [
- { resource: {db: firstDbName, collection: "x"}, actions: ["find", "dropCollection"] },
- { resource: {db: secondDbName, collection: "y"}, actions: ["insert", "createIndex"] }
- ]
- },
- { runOnDb: firstDbName, roles: {} },
- { runOnDb: secondDbName, roles: {} }
- ]
- },
- {
- testname: "reIndex",
- command: {reIndex: "x"},
- setup: function (db) { db.x.save( {} ); },
- teardown: function (db) { db.x.drop(); },
- testcases: [
- {
- runOnDb: firstDbName,
- roles: roles_dbAdmin,
- privileges: [
- { resource: {db: firstDbName, collection: "x"}, actions: ["reIndex"] }
- ]
- },
- {
- runOnDb: secondDbName,
- roles: roles_dbAdminAny,
- privileges: [
- { resource: {db: secondDbName, collection: "x"}, actions: ["reIndex"] }
- ]
- }
- ]
- },
- {
- testname: "removeShard",
- command: {removeShard: "x"},
- skipStandalone: true,
- testcases: [
- {
- runOnDb: adminDbName,
- roles: roles_clusterManager,
- privileges: [
- { resource: {cluster: true}, actions: ["removeShard"] }
- ],
- expectFail: true
- },
- { runOnDb: firstDbName, roles: {} },
- { runOnDb: secondDbName, roles: {} }
- ]
- },
- {
- testname: "repairDatabase",
- command: {repairDatabase: 1},
- testcases: [
- {
- runOnDb: adminDbName,
- roles: {
- dbAdminAnyDatabase: 1,
- hostManager: 1,
- clusterAdmin: 1,
- root: 1,
- __system: 1
- },
- privileges: [
- { resource: {db: adminDbName, collection: ""}, actions: ["repairDatabase"] }
- ]
- },
- {
- runOnDb: firstDbName,
- roles: {
- dbAdmin: 1,
- dbAdminAnyDatabase: 1,
- hostManager: 1,
- clusterAdmin: 1,
- dbOwner: 1,
- root: 1,
- __system: 1
- },
- privileges: [
- { resource: {db: firstDbName, collection: ""}, actions: ["repairDatabase"] }
- ]
- },
- {
- runOnDb: secondDbName,
- roles: {
- dbAdminAnyDatabase: 1,
- hostManager: 1,
- clusterAdmin: 1,
- root: 1,
- __system: 1
- },
- privileges: [
- { resource: {db: secondDbName, collection: ""}, actions: ["repairDatabase"] }
- ]
- }
- ]
- },
- {
- testname: "replSetElect",
- command: {replSetElect: 1},
- skipSharded: true,
- testcases: [
- {
- runOnDb: adminDbName,
- roles: {__system: 1},
- privileges: [
- { resource: {cluster: true}, actions: ["internal"] }
- ],
- expectFail: true
- },
- { runOnDb: firstDbName, roles: {} },
- { runOnDb: secondDbName, roles: {} }
- ]
- },
- {
- testname: "replSetFreeze",
- command: {replSetFreeze: "x"},
- skipSharded: true,
- testcases: [
- {
- runOnDb: adminDbName,
- roles: roles_clusterManager,
- privileges: [
- { resource: {cluster: true}, actions: ["replSetStateChange"] }
- ],
- expectFail: true
- },
- { runOnDb: firstDbName, roles: {} },
- { runOnDb: secondDbName, roles: {} }
- ]
- },
- {
- testname: "replSetFresh",
- command: {replSetFresh: "x"},
- skipSharded: true,
- testcases: [
- {
- runOnDb: adminDbName,
- roles: {__system: 1},
- privileges: [
- { resource: {cluster: true}, actions: ["internal"] }
- ],
- expectFail: true
- },
- { runOnDb: firstDbName, roles: {} },
- { runOnDb: secondDbName, roles: {} }
- ]
- },
- {
- testname: "replSetGetRBID",
- command: {replSetGetRBID: 1},
- skipSharded: true,
- testcases: [
- {
- runOnDb: adminDbName,
- roles: {__system: 1},
- privileges: [
- { resource: {cluster: true}, actions: ["internal"] }
- ],
- expectFail: true
- },
- { runOnDb: firstDbName, roles: {} },
- { runOnDb: secondDbName, roles: {} }
- ]
- },
- {
- testname: "replSetGetStatus",
- command: {replSetGetStatus: 1},
- skipSharded: true,
- testcases: [
- {
- runOnDb: adminDbName,
- roles: {
- clusterMonitor: 1,
- clusterManager: 1,
- clusterAdmin: 1,
- root: 1,
- __system: 1
- },
- privileges: [
- { resource: {cluster: true}, actions: ["replSetGetStatus"] }
- ],
- expectFail: true
- },
- { runOnDb: firstDbName, roles: {} },
- { runOnDb: secondDbName, roles: {} }
- ]
- },
- {
- testname: "replSetGetConfig",
- command: {replSetGetConfig: 1},
- skipSharded: true,
- testcases: [
- {
- runOnDb: adminDbName,
- roles: {
- clusterMonitor: 1,
- clusterManager: 1,
- clusterAdmin: 1,
- root: 1,
- __system: 1
- },
- privileges: [
- { resource: {cluster: true}, actions: ["replSetGetConfig"] }
- ],
- expectFail: true
- },
- { runOnDb: firstDbName, roles: {} },
- { runOnDb: secondDbName, roles: {} }
- ]
- },
- {
- testname: "replSetHeartbeat",
- command: {replSetHeartbeat: 1},
- skipSharded: true,
- testcases: [
- {
- runOnDb: adminDbName,
- roles: {__system: 1},
- privileges: [
- { resource: {cluster: true}, actions: ["internal"] }
- ],
- expectFail: true
- },
- { runOnDb: firstDbName, roles: {} },
- { runOnDb: secondDbName, roles: {} }
- ]
- },
- {
- testname: "replSetInitiate",
- command: {replSetInitiate: "x"},
- skipSharded: true,
- testcases: [
- {
- runOnDb: adminDbName,
- roles: roles_clusterManager,
- privileges: [
- { resource: {cluster: true}, actions: ["replSetConfigure"] }
- ],
- expectFail: true
- },
- { runOnDb: firstDbName, roles: {} },
- { runOnDb: secondDbName, roles: {} }
- ]
- },
- {
- testname: "replSetMaintenance",
- command: {replSetMaintenance: "x"},
- skipSharded: true,
- testcases: [
- {
- runOnDb: adminDbName,
- roles: roles_clusterManager,
- privileges: [
- { resource: {cluster: true}, actions: ["replSetStateChange"] }
- ],
- expectFail: true
- },
- { runOnDb: firstDbName, roles: {} },
- { runOnDb: secondDbName, roles: {} }
- ]
- },
- {
- testname: "replSetReconfig",
- command: {replSetReconfig: "x"},
- skipSharded: true,
- testcases: [
- {
- runOnDb: adminDbName,
- roles: roles_clusterManager,
- privileges: [
- { resource: {cluster: true}, actions: ["replSetConfigure"] }
- ],
- expectFail: true
- },
- { runOnDb: firstDbName, roles: {} },
- { runOnDb: secondDbName, roles: {} }
- ]
- },
- {
- testname: "replSetStepDown",
- command: {replSetStepDown: "x"},
- skipSharded: true,
- testcases: [
- {
- runOnDb: adminDbName,
- roles: roles_clusterManager,
- privileges: [
- { resource: {cluster: true}, actions: ["replSetStateChange"] }
- ],
- expectFail: true
- },
- { runOnDb: firstDbName, roles: {} },
- { runOnDb: secondDbName, roles: {} }
- ]
- },
- {
- testname: "replSetSyncFrom",
- command: {replSetSyncFrom: "x"},
- skipSharded: true,
- testcases: [
- {
- runOnDb: adminDbName,
- roles: roles_clusterManager,
- privileges: [
- { resource: {cluster: true}, actions: ["replSetStateChange"] }
- ],
- expectFail: true
- },
- { runOnDb: firstDbName, roles: {} },
- { runOnDb: secondDbName, roles: {} }
- ]
- },
- {
- testname: "resetError",
- command: {resetError: 1},
- testcases: [
- { runOnDb: adminDbName, roles: roles_all, privileges: [ ] },
- { runOnDb: firstDbName, roles: roles_all, privileges: [ ] },
- { runOnDb: secondDbName, roles: roles_all, privileges: [ ] }
- ]
- },
- {
- testname: "resync",
- command: {resync: 1},
- skipSharded: true,
- testcases: [
- {
- runOnDb: adminDbName,
- roles: {
- hostManager: 1,
- clusterManager: 1,
- clusterAdmin: 1,
- root: 1,
- __system: 1
- },
- privileges: [
- { resource: {cluster: true}, actions: ["resync"] }
- ],
- expectFail: true
- },
- { runOnDb: firstDbName, roles: {} },
- { runOnDb: secondDbName, roles: {} }
- ]
- },
- {
- testname: "serverStatus",
- command: {serverStatus: 1},
- testcases: [
- {
- runOnDb: adminDbName,
- roles: roles_monitoring,
- privileges: [
- { resource: {cluster: true}, actions: ["serverStatus"] }
- ]
- },
- {
- runOnDb: firstDbName,
- roles: roles_monitoring,
- privileges: [
- { resource: {cluster: true}, actions: ["serverStatus"] }
- ]
- },
- {
- runOnDb: secondDbName,
- roles: roles_monitoring,
- privileges: [
- { resource: {cluster: true}, actions: ["serverStatus"] }
- ]
- }
- ]
- },
- {
- testname: "setParameter",
- command: {setParameter: 1, quiet: 1},
- testcases: [
- {
- runOnDb: adminDbName,
- roles: roles_hostManager,
- privileges: [
- { resource: {cluster: true}, actions: ["setParameter"] }
- ]
- },
- { runOnDb: firstDbName, roles: {} },
- { runOnDb: secondDbName, roles: {} }
- ]
- },
- {
- testname: "setShardVersion",
- command: {setShardVersion: 1},
- skipSharded: true,
- testcases: [
- {
- runOnDb: adminDbName,
- roles: {__system: 1},
- privileges: [
- { resource: {cluster: true}, actions: ["internal"] }
- ],
- expectFail: true
- },
- { runOnDb: firstDbName, roles: {} },
- { runOnDb: secondDbName, roles: {} }
- ]
- },
- {
- testname: "shardCollection",
- command: {shardCollection: "test.x"},
- skipStandalone: true,
- testcases: [
- {
- runOnDb: adminDbName,
- roles: Object.extend({enableSharding:1}, roles_clusterManager),
- privileges: [
- { resource: {db: "test", collection: "x"}, actions: ["enableSharding"] }
- ],
- expectFail: true
- },
- { runOnDb: firstDbName, roles: {} },
- { runOnDb: secondDbName, roles: {} }
- ]
- },
- {
- testname: "shardingState",
- command: {shardingState: 1},
- skipSharded: true,
- testcases: [
- {
- runOnDb: adminDbName,
- roles: roles_monitoring,
- privileges: [
- { resource: {cluster: true}, actions: ["shardingState"] }
- ]
- },
- { runOnDb: firstDbName, roles: {} },
- { runOnDb: secondDbName, roles: {} }
- ]
- },
- {
- testname: "shutdown",
- command: {shutdown: 1},
- testcases: [
- { runOnDb: firstDbName, roles: {} },
- { runOnDb: secondDbName, roles: {} }
- ]
- },
- {
- testname: "split",
- command: {split: "test.x"},
- skipStandalone: true,
- testcases: [
- {
- runOnDb: adminDbName,
- roles: roles_clusterManager,
- privileges: [
- { resource: {db: "test", collection: "x"}, actions: ["splitChunk"] }
- ],
- expectFail: true
- },
- { runOnDb: firstDbName, roles: {} },
- { runOnDb: secondDbName, roles: {} }
- ]
- },
- {
- testname: "splitChunk",
- command: {splitChunk: "test.x"},
- skipSharded: true,
- testcases: [
- {
- runOnDb: adminDbName,
- roles: { __system: 1 },
- privileges: [
- { resource: {cluster: true}, actions: ["internal"] }
- ],
- expectFail: true
- },
- { runOnDb: firstDbName, roles: {} },
- { runOnDb: secondDbName, roles: {} }
- ]
- },
- {
- testname: "splitVector",
- command: {splitVector: "test.x"},
- testcases: [
- {
- runOnDb: adminDbName,
- roles: roles_clusterManager,
- privileges: [
- { resource: {db: "test", collection: "x"}, actions: ["splitVector"] }
- ],
- expectFail: true
- },
- {
- runOnDb: firstDbName,
- roles: roles_clusterManager,
- privileges: [
- { resource: {db: "test", collection: "x"}, actions: ["splitVector"] }
- ],
- expectFail: true
- },
- {
- runOnDb: secondDbName,
- roles: roles_clusterManager,
- privileges: [
- { resource: {db: "test", collection: "x"}, actions: ["splitVector"] }
- ],
- expectFail: true
- }
- ]
- },
-/* temporarily removed see SERVER-13555
- {
- testname: "storageDetails",
- command: {storageDetails: "x", analyze: "diskStorage"},
- skipSharded: true,
- setup: function (db) { db.x.save( {} ); },
- teardown: function (db) { db.x.drop(); },
- testcases: [
- {
- runOnDb: firstDbName,
- roles: roles_dbAdmin,
- privileges: [
- { resource: {db: firstDbName, collection: "x"}, actions: ["storageDetails"] }
- ]
- },
- {
- runOnDb: secondDbName,
- roles: roles_dbAdminAny,
- privileges: [
- { resource: {db: secondDbName, collection: "x"}, actions: ["storageDetails"] }
- ]
- }
- ]
- }, */
- {
- testname: "top",
- command: {top: 1},
- skipSharded: true,
- testcases: [
- {
- runOnDb: adminDbName,
- roles: roles_monitoring,
- privileges: [
- { resource: {cluster: true}, actions: ["top"] }
- ]
- },
- { runOnDb: firstDbName, roles: {} },
- { runOnDb: secondDbName, roles: {} }
- ]
- },
- {
- testname: "touch",
- command: {touch: "x", data: true, index: false},
- skipSharded: true,
- setup: function (db) { db.x.save( {} ); },
- teardown: function (db) { db.x.drop(); },
- testcases: [
- {
- runOnDb: adminDbName,
- roles: roles_hostManager,
- privileges: [
- { resource: {cluster: true}, actions: ["touch"] }
- ]
- },
- {
- runOnDb: firstDbName,
- roles: roles_hostManager,
- privileges: [
- { resource: {cluster: true}, actions: ["touch"] }
- ]
- },
- {
- runOnDb: secondDbName,
- roles: roles_hostManager,
- privileges: [
- { resource: {cluster: true}, actions: ["touch"] }
- ]
- }
- ]
- },
- {
- testname: "unsetSharding",
- command: {unsetSharding: "x"},
- skipSharded: true,
- testcases: [
- {
- runOnDb: adminDbName,
- roles: {__system: 1},
- privileges: [
- { resource: {cluster: true}, actions: ["internal"] }
- ],
- expectFail: true
- },
- { runOnDb: firstDbName, roles: {} },
- { runOnDb: secondDbName, roles: {} }
- ]
- },
- {
- testname: "validate",
- command: {validate: "x"},
- setup: function (db) { db.x.save( {} ); },
- teardown: function (db) { db.x.drop(); },
- testcases: [
- {
- runOnDb: firstDbName,
- roles: roles_dbAdmin,
- privileges: [
- { resource: {db: firstDbName, collection: "x"}, actions: ["validate"] }
- ]
- },
- {
- runOnDb: secondDbName,
- roles: roles_dbAdminAny,
- privileges: [
- { resource: {db: secondDbName, collection: "x"}, actions: ["validate"] }
- ]
- }
- ]
- },
- {
- // Test that the root role has the privilege to validate any system.* collection
- testname: "validate_system",
- command: {validate: "system.users"},
- testcases: [
- {
- runOnDb: adminDbName,
- roles: {
- root: 1,
- __system: 1
- }
- }
- ]
- },
- {
- testname: "whatsmyuri",
- command: {whatsmyuri: 1},
- testcases: [
- { runOnDb: adminDbName, roles: roles_all, privileges: [ ] },
- { runOnDb: firstDbName, roles: roles_all, privileges: [ ] },
- { runOnDb: secondDbName, roles: roles_all, privileges: [ ] }
- ]
+ }, */
+ {
+ testname: "top",
+ command: {top: 1},
+ skipSharded: true,
+ testcases: [
+ {
+ runOnDb: adminDbName,
+ roles: roles_monitoring,
+ privileges: [{resource: {cluster: true}, actions: ["top"]}]
+ },
+ {runOnDb: firstDbName, roles: {}},
+ {runOnDb: secondDbName, roles: {}}
+ ]
+ },
+ {
+ testname: "touch",
+ command: {touch: "x", data: true, index: false},
+ skipSharded: true,
+ setup: function(db) {
+ db.x.save({});
+ },
+ teardown: function(db) {
+ db.x.drop();
+ },
+ testcases: [
+ {
+ runOnDb: adminDbName,
+ roles: roles_hostManager,
+ privileges: [{resource: {cluster: true}, actions: ["touch"]}]
+ },
+ {
+ runOnDb: firstDbName,
+ roles: roles_hostManager,
+ privileges: [{resource: {cluster: true}, actions: ["touch"]}]
+ },
+ {
+ runOnDb: secondDbName,
+ roles: roles_hostManager,
+ privileges: [{resource: {cluster: true}, actions: ["touch"]}]
+ }
+ ]
+ },
+ {
+ testname: "unsetSharding",
+ command: {unsetSharding: "x"},
+ skipSharded: true,
+ testcases: [
+ {
+ runOnDb: adminDbName,
+ roles: {__system: 1},
+ privileges: [{resource: {cluster: true}, actions: ["internal"]}],
+ expectFail: true
+ },
+ {runOnDb: firstDbName, roles: {}},
+ {runOnDb: secondDbName, roles: {}}
+ ]
+ },
+ {
+ testname: "validate",
+ command: {validate: "x"},
+ setup: function(db) {
+ db.x.save({});
+ },
+ teardown: function(db) {
+ db.x.drop();
+ },
+ testcases: [
+ {
+ runOnDb: firstDbName,
+ roles: roles_dbAdmin,
+ privileges:
+ [{resource: {db: firstDbName, collection: "x"}, actions: ["validate"]}]
+ },
+ {
+ runOnDb: secondDbName,
+ roles: roles_dbAdminAny,
+ privileges:
+ [{resource: {db: secondDbName, collection: "x"}, actions: ["validate"]}]
+ }
+ ]
+ },
+ {
+ // Test that the root role has the privilege to validate any system.* collection
+ testname: "validate_system",
+ command: {validate: "system.users"},
+ testcases: [{runOnDb: adminDbName, roles: {root: 1, __system: 1}}]
+ },
+ {
+ testname: "whatsmyuri",
+ command: {whatsmyuri: 1},
+ testcases: [
+ {runOnDb: adminDbName, roles: roles_all, privileges: []},
+ {runOnDb: firstDbName, roles: roles_all, privileges: []},
+ {runOnDb: secondDbName, roles: roles_all, privileges: []}
+ ]
}
],
-
/************* SHARED TEST LOGIC ****************/
/**
@@ -2844,7 +2612,9 @@ var authCommandsLib = {
failures = failures.concat(res);
}
- failures.forEach(function(i) { jsTest.log(i); });
+ failures.forEach(function(i) {
+ jsTest.log(i);
+ });
assert.eq(0, failures.length);
}
diff --git a/jstests/auth/localhostAuthBypass.js b/jstests/auth/localhostAuthBypass.js
index 1b07456fbe3..fdaf4386794 100644
--- a/jstests/auth/localhostAuthBypass.js
+++ b/jstests/auth/localhostAuthBypass.js
@@ -1,7 +1,7 @@
-//SERVER-6591: Localhost authentication exception doesn't work right on sharded cluster
+// SERVER-6591: Localhost authentication exception doesn't work right on sharded cluster
//
-//This test is to ensure that localhost authentication works correctly against a standalone
-//mongod whether it is hosted with "localhost" or a hostname.
+// This test is to ensure that localhost authentication works correctly against a standalone
+// mongod whether it is hosted with "localhost" or a hostname.
var baseName = "auth_server-6591";
var dbpath = MongoRunner.dataPath + baseName;
@@ -12,67 +12,81 @@ load("jstests/libs/host_ipaddr.js");
var createUser = function(mongo) {
print("============ adding a user.");
- mongo.getDB("admin").createUser(
- { user:username, pwd: password, roles: jsTest.adminUserRoles });
+ mongo.getDB("admin").createUser({user: username, pwd: password, roles: jsTest.adminUserRoles});
};
var assertCannotRunCommands = function(mongo) {
print("============ ensuring that commands cannot be run.");
var test = mongo.getDB("test");
- assert.throws( function() { test.system.users.findOne(); });
+ assert.throws(function() {
+ test.system.users.findOne();
+ });
- assert.writeError(test.foo.save({ _id: 0 }));
+ assert.writeError(test.foo.save({_id: 0}));
- assert.throws( function() { test.foo.findOne({_id:0}); });
+ assert.throws(function() {
+ test.foo.findOne({_id: 0});
+ });
- assert.writeError(test.foo.update({ _id: 0 }, { $set: { x: 20 }}));
- assert.writeError(test.foo.remove({ _id: 0 }));
+ assert.writeError(test.foo.update({_id: 0}, {$set: {x: 20}}));
+ assert.writeError(test.foo.remove({_id: 0}));
- assert.throws(function() {
+ assert.throws(function() {
test.foo.mapReduce(
- function() { emit(1, 1); },
- function(id, count) { return Array.sum(count); },
- { out: "other" });
+ function() {
+ emit(1, 1);
+ },
+ function(id, count) {
+ return Array.sum(count);
+ },
+ {out: "other"});
});
// Additional commands not permitted
// Create non-admin user
- assert.throws(function() { mongo.getDB("test").createUser(
- { user: username, pwd: password, roles: ['readWrite'] }); });
+ assert.throws(function() {
+ mongo.getDB("test").createUser({user: username, pwd: password, roles: ['readWrite']});
+ });
// DB operations
var authorizeErrorCode = 13;
- assert.commandFailedWithCode(mongo.getDB("test").copyDatabase("admin", "admin2"),
- authorizeErrorCode, "copyDatabase");
+ assert.commandFailedWithCode(
+ mongo.getDB("test").copyDatabase("admin", "admin2"), authorizeErrorCode, "copyDatabase");
// Create collection
- assert.commandFailedWithCode(mongo.getDB("test").createCollection(
- "log", { capped: true, size: 5242880, max: 5000 } ),
- authorizeErrorCode, "createCollection");
+ assert.commandFailedWithCode(
+ mongo.getDB("test").createCollection("log", {capped: true, size: 5242880, max: 5000}),
+ authorizeErrorCode,
+ "createCollection");
// Set/Get system parameters
- var params = [{ param: "journalCommitInterval", val: 200 },
- { param: "logLevel", val: 2 },
- { param: "logUserIds", val: 1 },
- { param: "notablescan", val: 1 },
- { param: "quiet", val: 1 },
- { param: "replApplyBatchSize", val: 10 },
- { param: "replIndexPrefetch", val: "none" },
- { param: "syncdelay", val: 30 },
- { param: "traceExceptions", val: true },
- { param: "sslMode", val: "preferSSL" },
- { param: "clusterAuthMode", val: "sendX509" },
- { param: "userCacheInvalidationIntervalSecs", val: 300 }
- ];
+ var params = [
+ {param: "journalCommitInterval", val: 200},
+ {param: "logLevel", val: 2},
+ {param: "logUserIds", val: 1},
+ {param: "notablescan", val: 1},
+ {param: "quiet", val: 1},
+ {param: "replApplyBatchSize", val: 10},
+ {param: "replIndexPrefetch", val: "none"},
+ {param: "syncdelay", val: 30},
+ {param: "traceExceptions", val: true},
+ {param: "sslMode", val: "preferSSL"},
+ {param: "clusterAuthMode", val: "sendX509"},
+ {param: "userCacheInvalidationIntervalSecs", val: 300}
+ ];
params.forEach(function(p) {
- var cmd = { setParameter: 1 };
+ var cmd = {
+ setParameter: 1
+ };
cmd[p.param] = p.val;
- assert.commandFailedWithCode(mongo.getDB("admin").runCommand(cmd),
- authorizeErrorCode, "setParameter: "+p.param);
+ assert.commandFailedWithCode(
+ mongo.getDB("admin").runCommand(cmd), authorizeErrorCode, "setParameter: " + p.param);
});
params.forEach(function(p) {
- var cmd = { getParameter: 1 };
+ var cmd = {
+ getParameter: 1
+ };
cmd[p.param] = 1;
- assert.commandFailedWithCode(mongo.getDB("admin").runCommand(cmd),
- authorizeErrorCode, "getParameter: "+p.param);
+ assert.commandFailedWithCode(
+ mongo.getDB("admin").runCommand(cmd), authorizeErrorCode, "getParameter: " + p.param);
});
};
@@ -83,15 +97,18 @@ var assertCanRunCommands = function(mongo) {
// will throw on failure
test.system.users.findOne();
- assert.writeOK(test.foo.save({ _id: 0 }));
- assert.writeOK(test.foo.update({ _id: 0 }, { $set: { x: 20 }}));
- assert.writeOK(test.foo.remove({ _id: 0 }));
+ assert.writeOK(test.foo.save({_id: 0}));
+ assert.writeOK(test.foo.update({_id: 0}, {$set: {x: 20}}));
+ assert.writeOK(test.foo.remove({_id: 0}));
test.foo.mapReduce(
- function() { emit(1, 1); },
- function(id, count) { return Array.sum(count); },
- { out: "other" }
- );
+ function() {
+ emit(1, 1);
+ },
+ function(id, count) {
+ return Array.sum(count);
+ },
+ {out: "other"});
};
var authenticate = function(mongo) {
@@ -101,7 +118,7 @@ var authenticate = function(mongo) {
var shutdown = function(conn) {
print("============ shutting down.");
- MongoRunner.stopMongod(conn.port, /*signal*/false, { auth: { user: username, pwd: password}});
+ MongoRunner.stopMongod(conn.port, /*signal*/ false, {auth: {user: username, pwd: password}});
};
var runTest = function(useHostName) {
@@ -143,10 +160,14 @@ var runNonlocalTest = function(host) {
var mongo = new Mongo(host + ":" + conn.port);
assertCannotRunCommands(mongo);
- assert.throws(function() { mongo.getDB("admin").createUser
- ({ user:username, pwd: password, roles: jsTest.adminUserRoles }); });
- assert.throws(function() { mongo.getDB("$external").createUser
- ({ user:username, pwd: password, roles: jsTest.adminUserRoles }); });
+ assert.throws(function() {
+ mongo.getDB("admin")
+ .createUser({user: username, pwd: password, roles: jsTest.adminUserRoles});
+ });
+ assert.throws(function() {
+ mongo.getDB("$external")
+ .createUser({user: username, pwd: password, roles: jsTest.adminUserRoles});
+ });
shutdown(conn);
};
diff --git a/jstests/auth/log_user_basic.js b/jstests/auth/log_user_basic.js
index 82702fc4756..06e74ea3109 100644
--- a/jstests/auth/log_user_basic.js
+++ b/jstests/auth/log_user_basic.js
@@ -4,258 +4,263 @@
// TODO(schwerin) Re-enable this test after resolving corresponding TODO in mongo/util/log.cpp.
if (0) {
+ /**
+ * Extracts information from a mongod/mongos log entry.
+ *
+ * @param line {string} a single line of log.
+ *
+ * @return {Object} format:
+ *
+ * {
+ * id: <string>, // thread id of the log line.
+ * // list of users logged in. Can be empty.
+ * users: <Object> // map of db name to user name
+ * }
+ */
+ var parseLog = function(line) {
+ var THREAD_ID_PATTERN = / [012]?\d:\d\d:\d\d\.\d\d\d \[(.+)\] /;
+ var ID_USER_PATTERN = new RegExp(THREAD_ID_PATTERN.source + 'user:([^ ]*) ');
+ var res = THREAD_ID_PATTERN.exec(line);
+
+ if (res == null) {
+ return null;
+ }
+
+ var logInfo = {
+ id: res[1],
+ users: {}
+ };
-/**
- * Extracts information from a mongod/mongos log entry.
- *
- * @param line {string} a single line of log.
- *
- * @return {Object} format:
- *
- * {
- * id: <string>, // thread id of the log line.
- * // list of users logged in. Can be empty.
- * users: <Object> // map of db name to user name
- * }
- */
-var parseLog = function(line) {
- var THREAD_ID_PATTERN = / [012]?\d:\d\d:\d\d\.\d\d\d \[(.+)\] /;
- var ID_USER_PATTERN = new RegExp(THREAD_ID_PATTERN.source + 'user:([^ ]*) ');
- var res = THREAD_ID_PATTERN.exec(line);
-
- if (res == null) {
- return null;
- }
-
- var logInfo = { id: res[1], users: {} };
+ var userLog = null;
+ res = ID_USER_PATTERN.exec(line);
- var userLog = null;
- res = ID_USER_PATTERN.exec(line);
+ if (res != null) {
+ userLog = res[2];
+ // should not have trailing commas
+ assert.neq(',', userLog[userLog.length - 1], 'Bad user log list format: ' + line);
- if (res != null) {
- userLog = res[2];
- // should not have trailing commas
- assert.neq(',', userLog[userLog.length - 1], 'Bad user log list format: ' + line);
+ userLog.split(',').forEach(function(userData) {
+ var userAndDB = userData.split('@');
+ assert.eq(2,
+ userAndDB.length,
+ 'Bad user db pair format: ' + userData + ' from line: ' + line);
+ logInfo.users[userAndDB[1]] = userAndDB[0];
+ });
+ }
- userLog.split(',').forEach(function(userData) {
- var userAndDB = userData.split('@');
- assert.eq(2, userAndDB.length, 'Bad user db pair format: ' + userData +
- ' from line: ' + line);
- logInfo.users[userAndDB[1]] = userAndDB[0];
- });
- }
+ return logInfo;
+ };
- return logInfo;
-};
+ /**
+ * Performs a series of test on user id logging.
+ *
+ * @param conn1 {Mongo} the connection object to use for logging in users.
+ * @param conn2 {Mongo} another connection object different from conn1.
+ */
+ var doTest = function(conn1, conn2) {
+ var connInfo1 = {
+ id: null, // thread id of this connection
+ mongo: conn1, // connection object
+ users: {} // contains authenticated users represented as a map of db to user names.
+ };
-/**
- * Performs a series of test on user id logging.
- *
- * @param conn1 {Mongo} the connection object to use for logging in users.
- * @param conn2 {Mongo} another connection object different from conn1.
- */
-var doTest = function(conn1, conn2) {
- var connInfo1 = {
- id: null, // thread id of this connection
- mongo: conn1, // connection object
- users: {} // contains authenticated users represented as a map of db to user names.
- };
+ var connInfo2 = {
+ id: null,
+ mongo: conn2,
+ users: {}
+ };
- var connInfo2 = {
- id: null, mongo: conn2, users: {}
- };
+ var conn1Auth =
+ [{user: 'foo', pwd: 'bar', db: 'test'}, {user: 'chun', pwd: 'li', db: 'sf'}];
- var conn1Auth = [
- { user: 'foo', pwd: 'bar', db: 'test' },
- { user: 'chun', pwd: 'li', db: 'sf' }
- ];
-
- var conn2Auth = [
- { user: 'root', pwd: 'ugat', db: 'admin' },
- { user: 'elbow', pwd: 'freeze', db: 'bboy' }
- ];
-
- var loginUser = function(connInfo, connAuth) {
- var db = connInfo.mongo.getDB(connAuth.db);
- db.createUser({user: connAuth.user, pwd: connAuth.pwd, roles: jsTest.adminUserRoles});
- db.auth(connAuth.user, connAuth.pwd);
- connInfo.users[connAuth.db] = connAuth.user;
- };
+ var conn2Auth =
+ [{user: 'root', pwd: 'ugat', db: 'admin'}, {user: 'elbow', pwd: 'freeze', db: 'bboy'}];
- var logoutUser = function(connInfo, connAuth) {
- var db = connInfo.mongo.getDB(connAuth.db);
- db.runCommand({ logout: 1 });
- delete connInfo.users[connAuth.db];
- };
+ var loginUser = function(connInfo, connAuth) {
+ var db = connInfo.mongo.getDB(connAuth.db);
+ db.createUser({user: connAuth.user, pwd: connAuth.pwd, roles: jsTest.adminUserRoles});
+ db.auth(connAuth.user, connAuth.pwd);
+ connInfo.users[connAuth.db] = connAuth.user;
+ };
- /**
- * Performs a couple of test to make sure that the format of the log is correct.
- * Also checks that whether the right users show up in the logs.
- *
- * @param log {Array.<string>} list of log lines to check.
- * @param connInfo {Object}
- */
- var checkLogs = function(log, connInfo) {
- var foundOne = false;
+ var logoutUser = function(connInfo, connAuth) {
+ var db = connInfo.mongo.getDB(connAuth.db);
+ db.runCommand({logout: 1});
+ delete connInfo.users[connAuth.db];
+ };
/**
- * @return true if the logInfo contains the same users as connIfo.
+ * Performs a couple of test to make sure that the format of the log is correct.
+ * Also checks that whether the right users show up in the logs.
+ *
+ * @param log {Array.<string>} list of log lines to check.
+ * @param connInfo {Object}
*/
- var checkUsers = function(logInfo) {
- for (var db in logInfo.users) {
- if (logInfo.users.hasOwnProperty(db) &&
- logInfo.users[db] != connInfo.users[db]) {
- return false;
+ var checkLogs = function(log, connInfo) {
+ var foundOne = false;
+
+ /**
+ * @return true if the logInfo contains the same users as connIfo.
+ */
+ var checkUsers = function(logInfo) {
+ for (var db in logInfo.users) {
+ if (logInfo.users.hasOwnProperty(db) &&
+ logInfo.users[db] != connInfo.users[db]) {
+ return false;
+ }
}
- }
- for (db in connInfo.users) {
- if (connInfo.users.hasOwnProperty(db) &&
- logInfo.users[db] != connInfo.users[db]) {
- return false;
+ for (db in connInfo.users) {
+ if (connInfo.users.hasOwnProperty(db) &&
+ logInfo.users[db] != connInfo.users[db]) {
+ return false;
+ }
}
- }
- return true;
- };
+ return true;
+ };
- var hasUser = function(logInfo) {
- for (var db in logInfo.users) {
- if (logInfo.users.hasOwnProperty(db)) {
- return true;
+ var hasUser = function(logInfo) {
+ for (var db in logInfo.users) {
+ if (logInfo.users.hasOwnProperty(db)) {
+ return true;
+ }
}
- }
- return false;
- };
+ return false;
+ };
- log.forEach(function(line) {
- var logInfo = parseLog(line);
+ log.forEach(function(line) {
+ var logInfo = parseLog(line);
- if (logInfo == null) return;
- if (connInfo.id == null) {
- if (checkUsers(logInfo)) {
- connInfo.id = logInfo.id;
+ if (logInfo == null)
+ return;
+ if (connInfo.id == null) {
+ if (checkUsers(logInfo)) {
+ connInfo.id = logInfo.id;
+ foundOne = true;
+ }
+
+ return;
+ }
+
+ if (logInfo.id == connInfo.id) {
foundOne = true;
+ assert(checkUsers(logInfo),
+ 'logged users does not match [' + tojson(connInfo.users) + '], log: ' +
+ line);
+ } else if (hasUser(logInfo)) {
+ assert(!checkUsers(logInfo), 'Unexpected user log on another thread: ' + line);
}
+ });
- return;
- }
+ assert(foundOne, 'User log not found in: ' + tojson(log));
+ };
- if (logInfo.id == connInfo.id) {
- foundOne = true;
- assert(checkUsers(logInfo), 'logged users does not match [' +
- tojson(connInfo.users) + '], log: ' + line);
+ var testDB1 = connInfo1.mongo.getDB('test');
+ var testDB2 = connInfo2.mongo.getDB('test');
+
+ // Note: The succeeding tests should not be re-ordered.
+ (function() {
+ jsTest.log('Test single user on 1 connection.');
+ loginUser(connInfo1, conn1Auth[0]);
+ testDB1.runCommand({dbStats: 1});
+ var log = testDB1.adminCommand({getLog: 'global'});
+ checkLogs(log.log, connInfo1);
+ })();
+
+ (function() {
+ jsTest.log('Test multiple conn with 1 user each');
+ loginUser(connInfo2, conn2Auth[0]);
+ testDB2.runCommand({dbStats: 1});
+ var log = testDB1.adminCommand({getLog: 'global'});
+ checkLogs(log.log, connInfo2);
+ })();
+
+ (function() {
+ jsTest.log('Test multiple conn with 1 multiple user');
+ loginUser(connInfo1, conn1Auth[1]);
+ var log = testDB1.adminCommand({getLog: 'global'});
+ var lastLogLine = log.log.pop(); // Used for trimming out logs before this point.
+ testDB1.runCommand({dbStats: 1});
+ log = testDB1.adminCommand({getLog: 'global'});
+
+ // Remove old log entries.
+ while (log.log.shift() != lastLogLine) {
}
- else if(hasUser(logInfo)) {
-
- assert(!checkUsers(logInfo), 'Unexpected user log on another thread: ' + line);
+ assert(log.log.length > 0);
+ checkLogs(log.log, connInfo1);
+ })();
+
+ (function() {
+ jsTest.log('Test multiple conn with multiple users each');
+ loginUser(connInfo2, conn2Auth[1]);
+ var log = testDB2.adminCommand({getLog: 'global'});
+ var lastLogLine = log.log.pop(); // Used for trimming out logs before this point.
+ testDB1.runCommand({dbStats: 1});
+ log = testDB2.adminCommand({getLog: 'global'});
+
+ // Remove old log entries.
+ while (log.log.shift() != lastLogLine) {
}
- });
-
- assert(foundOne, 'User log not found in: ' + tojson(log));
+ assert(log.log.length > 0);
+ checkLogs(log.log, connInfo2);
+ })();
+
+ (function() {
+ // Case for logout older user first.
+ jsTest.log('Test log line will not show foo');
+ logoutUser(connInfo1, conn1Auth[0]);
+ var log = testDB1.adminCommand({getLog: 'global'});
+ var lastLogLine = log.log.pop(); // Used for trimming out logs before this point.
+ testDB1.runCommand({dbStats: 1});
+ log = testDB1.adminCommand({getLog: 'global'});
+
+ // Remove old log entries.
+ while (log.log.shift() != lastLogLine) {
+ }
+ assert(log.log.length > 0);
+ checkLogs(log.log, connInfo1);
+ })();
+
+ (function() {
+ jsTest.log('Test that log for conn1 will not show \'user:\'');
+ logoutUser(connInfo1, conn1Auth[1]);
+ var log = testDB1.adminCommand({getLog: 'global'});
+ var lastLogLine = log.log.pop(); // Used for trimming out logs before this point.
+ testDB1.runCommand({dbStats: 1});
+ log = testDB1.adminCommand({getLog: 'global'});
+
+ // Remove old log entries.
+ while (log.log.shift() != lastLogLine) {
+ }
+ assert(log.log.length > 0);
+ checkLogs(log.log, connInfo1);
+ })();
+
+ (function() {
+ // Case for logout newer user first.
+ jsTest.log('Test log line will not show elbow');
+ logoutUser(connInfo2, conn2Auth[1]);
+ var log = testDB2.adminCommand({getLog: 'global'});
+ var lastLogLine = log.log.pop(); // Used for trimming out logs before this point.
+ testDB1.runCommand({dbStats: 1});
+ log = testDB2.adminCommand({getLog: 'global'});
+
+ // Remove old log entries.
+ while (log.log.shift() != lastLogLine) {
+ }
+ assert(log.log.length > 0);
+ checkLogs(log.log, connInfo2);
+ })();
};
- var testDB1 = connInfo1.mongo.getDB('test');
- var testDB2 = connInfo2.mongo.getDB('test');
-
- // Note: The succeeding tests should not be re-ordered.
- (function() {
- jsTest.log('Test single user on 1 connection.');
- loginUser(connInfo1, conn1Auth[0]);
- testDB1.runCommand({ dbStats: 1 });
- var log = testDB1.adminCommand({ getLog: 'global' });
- checkLogs(log.log, connInfo1);
- })();
-
- (function() {
- jsTest.log('Test multiple conn with 1 user each');
- loginUser(connInfo2, conn2Auth[0]);
- testDB2.runCommand({ dbStats: 1 });
- var log = testDB1.adminCommand({ getLog: 'global' });
- checkLogs(log.log, connInfo2);
- })();
-
- (function(){
- jsTest.log('Test multiple conn with 1 multiple user');
- loginUser(connInfo1, conn1Auth[1]);
- var log = testDB1.adminCommand({ getLog: 'global' });
- var lastLogLine = log.log.pop(); // Used for trimming out logs before this point.
- testDB1.runCommand({ dbStats: 1 });
- log = testDB1.adminCommand({ getLog: 'global' });
-
- // Remove old log entries.
- while (log.log.shift() != lastLogLine) { }
- assert(log.log.length > 0);
- checkLogs(log.log, connInfo1);
- })();
-
- (function(){
- jsTest.log('Test multiple conn with multiple users each');
- loginUser(connInfo2, conn2Auth[1]);
- var log = testDB2.adminCommand({ getLog: 'global' });
- var lastLogLine = log.log.pop(); // Used for trimming out logs before this point.
- testDB1.runCommand({ dbStats: 1 });
- log = testDB2.adminCommand({ getLog: 'global' });
-
- // Remove old log entries.
- while (log.log.shift() != lastLogLine) { }
- assert(log.log.length > 0);
- checkLogs(log.log, connInfo2);
- })();
-
- (function(){
- // Case for logout older user first.
- jsTest.log('Test log line will not show foo');
- logoutUser(connInfo1, conn1Auth[0]);
- var log = testDB1.adminCommand({ getLog: 'global' });
- var lastLogLine = log.log.pop(); // Used for trimming out logs before this point.
- testDB1.runCommand({ dbStats: 1 });
- log = testDB1.adminCommand({ getLog: 'global' });
-
- // Remove old log entries.
- while (log.log.shift() != lastLogLine) { }
- assert(log.log.length > 0);
- checkLogs(log.log, connInfo1);
- })();
-
- (function(){
- jsTest.log('Test that log for conn1 will not show \'user:\'');
- logoutUser(connInfo1, conn1Auth[1]);
- var log = testDB1.adminCommand({ getLog: 'global' });
- var lastLogLine = log.log.pop(); // Used for trimming out logs before this point.
- testDB1.runCommand({ dbStats: 1 });
- log = testDB1.adminCommand({ getLog: 'global' });
-
- // Remove old log entries.
- while (log.log.shift() != lastLogLine) { }
- assert(log.log.length > 0);
- checkLogs(log.log, connInfo1);
- })();
-
- (function(){
- // Case for logout newer user first.
- jsTest.log('Test log line will not show elbow');
- logoutUser(connInfo2, conn2Auth[1]);
- var log = testDB2.adminCommand({ getLog: 'global' });
- var lastLogLine = log.log.pop(); // Used for trimming out logs before this point.
- testDB1.runCommand({ dbStats: 1 });
- log = testDB2.adminCommand({ getLog: 'global' });
-
- // Remove old log entries.
- while (log.log.shift() != lastLogLine) { }
- assert(log.log.length > 0);
- checkLogs(log.log, connInfo2);
- })();
-};
-
-var mongo = MongoRunner.runMongod({ verbose: 5, setParameter: 'logUserIds=1' });
-doTest(mongo, new Mongo(mongo.host));
-MongoRunner.stopMongod(mongo.port);
-
-var st = new ShardingTest({ shards: 1, verbose: 5,
- other: { mongosOptions: { setParameter: 'logUserIds=1' }}});
-doTest(st.s, new Mongo(st.s.host));
-st.stop();
+ var mongo = MongoRunner.runMongod({verbose: 5, setParameter: 'logUserIds=1'});
+ doTest(mongo, new Mongo(mongo.host));
+ MongoRunner.stopMongod(mongo.port);
+ var st = new ShardingTest(
+ {shards: 1, verbose: 5, other: {mongosOptions: {setParameter: 'logUserIds=1'}}});
+ doTest(st.s, new Mongo(st.s.host));
+ st.stop();
}
diff --git a/jstests/auth/log_userid_off.js b/jstests/auth/log_userid_off.js
index abebda28709..62d0af74c02 100644
--- a/jstests/auth/log_userid_off.js
+++ b/jstests/auth/log_userid_off.js
@@ -3,7 +3,7 @@
*
* @param mongo {Mongo} connection object.
*/
-var doTest = function (mongo, callSetParam) {
+var doTest = function(mongo, callSetParam) {
var TEST_USER = 'foo';
var TEST_PWD = 'bar';
var testDB = mongo.getDB('test');
@@ -11,30 +11,29 @@ var doTest = function (mongo, callSetParam) {
testDB.createUser({user: TEST_USER, pwd: TEST_PWD, roles: jsTest.basicUserRoles});
testDB.auth(TEST_USER, TEST_PWD);
- testDB.runCommand({ dbStats: 1 });
+ testDB.runCommand({dbStats: 1});
- var log = testDB.adminCommand({ getLog: 'global' });
+ var log = testDB.adminCommand({getLog: 'global'});
log.log.forEach(function(line) {
assert.eq(-1, line.indexOf('user: foo@'), 'user logged: ' + line);
});
// logUserIds should not be settable
- var res = testDB.runCommand({ setParameter: 1, logUserIds: 1 });
+ var res = testDB.runCommand({setParameter: 1, logUserIds: 1});
assert(!res.ok);
- testDB.runCommand({ dbStats: 1 });
+ testDB.runCommand({dbStats: 1});
- log = testDB.adminCommand({ getLog: 'global' });
+ log = testDB.adminCommand({getLog: 'global'});
log.log.forEach(function(line) {
assert.eq(-1, line.indexOf('user: foo@'), 'user logged: ' + line);
});
};
-var mongo = MongoRunner.runMongod({ verbose: 5 });
+var mongo = MongoRunner.runMongod({verbose: 5});
doTest(mongo);
MongoRunner.stopMongod(mongo.port);
-var st = new ShardingTest({ shards: 1, verbose: 5 });
+var st = new ShardingTest({shards: 1, verbose: 5});
doTest(st.s);
st.stop();
-
diff --git a/jstests/auth/logout_reconnect.js b/jstests/auth/logout_reconnect.js
index dc59d408544..fa5d8a8fcdb 100644
--- a/jstests/auth/logout_reconnect.js
+++ b/jstests/auth/logout_reconnect.js
@@ -4,46 +4,40 @@
* It is a regression test for SERVER-8798.
*/
-var conn = MongoRunner.runMongod({
- auth : "",
- remember : true
-});
+var conn = MongoRunner.runMongod({auth: "", remember: true});
// create user with rw permissions and login
var testDB = conn.getDB('test');
var adminDB = conn.getDB('admin');
-adminDB.createUser({user:'admin', pwd:'admin', roles:['userAdminAnyDatabase']});
-adminDB.auth('admin','admin');
-testDB.createUser({user:'rwuser', pwd:'rwuser', roles:['readWrite']});
+adminDB.createUser({user: 'admin', pwd: 'admin', roles: ['userAdminAnyDatabase']});
+adminDB.auth('admin', 'admin');
+testDB.createUser({user: 'rwuser', pwd: 'rwuser', roles: ['readWrite']});
adminDB.logout();
testDB.auth('rwuser', 'rwuser');
// verify that the rwuser can read and write
-testDB.foo.insert({a:1});
-assert.eq(1, testDB.foo.find({a:1}).count(), "failed to read");
+testDB.foo.insert({a: 1});
+assert.eq(1, testDB.foo.find({a: 1}).count(), "failed to read");
// assert that the user cannot read unauthenticated
testDB.logout();
-assert.throws(function(){ testDB.foo.findOne(); },
- [],
- "user should not be able to read after logging out");
+assert.throws(function() {
+ testDB.foo.findOne();
+}, [], "user should not be able to read after logging out");
MongoRunner.stopMongod(conn);
-conn = MongoRunner.runMongod({
- restart : conn,
- noCleanData : true
-});
+conn = MongoRunner.runMongod({restart: conn, noCleanData: true});
// expect to fail on first attempt since the socket is no longer valid
-try{
+try {
val = testDB.foo.findOne();
+} catch (err) {
}
-catch(err){}
// assert that credentials were not autosubmitted on reconnect
-assert.throws(function(){ testDB.foo.findOne(); },
- [],
- "user should not be able to read after logging out");
+assert.throws(function() {
+ testDB.foo.findOne();
+}, [], "user should not be able to read after logging out");
MongoRunner.stopMongod(conn);
diff --git a/jstests/auth/mergeAuthCollsCommand.js b/jstests/auth/mergeAuthCollsCommand.js
index 612b3e62d32..c89a7ff460d 100644
--- a/jstests/auth/mergeAuthCollsCommand.js
+++ b/jstests/auth/mergeAuthCollsCommand.js
@@ -3,15 +3,14 @@
*/
function assertUsersAndRolesHaveRole(admin, role) {
- admin.system.users.find().forEach( function(doc) {
- assert.eq(1, doc.roles.length);
- assert.eq(role, doc.roles[0].role);
- });
- admin.system.roles.find().forEach( function(doc) {
- assert.eq(1, doc.roles.length);
- assert.eq(role, doc.roles[0].role);
- });
-
+ admin.system.users.find().forEach(function(doc) {
+ assert.eq(1, doc.roles.length);
+ assert.eq(role, doc.roles[0].role);
+ });
+ admin.system.roles.find().forEach(function(doc) {
+ assert.eq(1, doc.roles.length);
+ assert.eq(role, doc.roles[0].role);
+ });
}
function runTest(conn) {
var db = conn.getDB('test');
@@ -25,8 +24,12 @@ function runTest(conn) {
// Move the newly created users/roles to the temp collections to be used later by
// _mergeAuthzCollections
- admin.system.users.find().forEach(function (doc) { admin.tempusers.insert(doc); });
- admin.system.roles.find().forEach(function (doc) { admin.temproles.insert(doc); });
+ admin.system.users.find().forEach(function(doc) {
+ admin.tempusers.insert(doc);
+ });
+ admin.system.roles.find().forEach(function(doc) {
+ admin.temproles.insert(doc);
+ });
admin.system.users.remove({});
admin.system.roles.remove({});
@@ -44,11 +47,13 @@ function runTest(conn) {
assertUsersAndRolesHaveRole(admin, "readWrite");
jsTestLog("Overriding existing system.users and system.roles collections");
- assert.commandWorked(admin.runCommand({_mergeAuthzCollections: 1,
- tempUsersCollection: 'admin.tempusers',
- tempRolesCollection: 'admin.temproles',
- db: "",
- drop: true}));
+ assert.commandWorked(admin.runCommand({
+ _mergeAuthzCollections: 1,
+ tempUsersCollection: 'admin.tempusers',
+ tempRolesCollection: 'admin.temproles',
+ db: "",
+ drop: true
+ }));
assert.eq(2, admin.system.users.count());
assert.eq(2, admin.system.roles.count());
@@ -68,18 +73,18 @@ function runTest(conn) {
assertUsersAndRolesHaveRole(admin, "read");
jsTestLog("Adding users/roles from temp collections to the existing users/roles");
- assert.commandWorked(admin.runCommand({_mergeAuthzCollections: 1,
- tempUsersCollection: 'admin.tempusers',
- tempRolesCollection: 'admin.temproles',
- db: "",
- drop: false}));
-
+ assert.commandWorked(admin.runCommand({
+ _mergeAuthzCollections: 1,
+ tempUsersCollection: 'admin.tempusers',
+ tempRolesCollection: 'admin.temproles',
+ db: "",
+ drop: false
+ }));
assert.eq(4, admin.system.users.count());
assert.eq(4, admin.system.roles.count());
assertUsersAndRolesHaveRole(admin, "read");
-
jsTestLog("Make sure adding duplicate users/roles fails to change anything if 'drop' is false");
admin.system.users.remove({});
@@ -96,11 +101,13 @@ function runTest(conn) {
assertUsersAndRolesHaveRole(admin, "readWrite");
// This should succeed but have no effect as every user/role it tries to restore already exists
- assert.commandWorked(admin.runCommand({_mergeAuthzCollections: 1,
- tempUsersCollection: 'admin.tempusers',
- tempRolesCollection: 'admin.temproles',
- db: "",
- drop: false}));
+ assert.commandWorked(admin.runCommand({
+ _mergeAuthzCollections: 1,
+ tempUsersCollection: 'admin.tempusers',
+ tempRolesCollection: 'admin.temproles',
+ db: "",
+ drop: false
+ }));
assert.eq(2, admin.system.users.count());
assert.eq(2, admin.system.roles.count());
@@ -113,6 +120,6 @@ runTest(conn);
MongoRunner.stopMongod(conn.port);
jsTest.log('Test sharding');
-var st = new ShardingTest({ shards: 2, config: 3 });
+var st = new ShardingTest({shards: 2, config: 3});
runTest(st.s);
st.stop();
diff --git a/jstests/auth/mongos_cache_invalidation.js b/jstests/auth/mongos_cache_invalidation.js
index 1f8a03bac56..de2fce4ddfb 100644
--- a/jstests/auth/mongos_cache_invalidation.js
+++ b/jstests/auth/mongos_cache_invalidation.js
@@ -4,17 +4,21 @@
*/
var authzErrorCode = 13;
-var hasAuthzError = function (result) {
+var hasAuthzError = function(result) {
assert(result.hasWriteError());
assert.eq(authzErrorCode, result.getWriteError().code);
};
-var st = new ShardingTest({ shards: 2,
- config: 3,
- mongos: [{},
- {setParameter: "userCacheInvalidationIntervalSecs=5"},
- {setParameter: "userCacheInvalidationIntervalSecs=600"}],
- keyFile: 'jstests/libs/key1' });
+var st = new ShardingTest({
+ shards: 2,
+ config: 3,
+ mongos: [
+ {},
+ {setParameter: "userCacheInvalidationIntervalSecs=5"},
+ {setParameter: "userCacheInvalidationIntervalSecs=600"}
+ ],
+ keyFile: 'jstests/libs/key1'
+});
st.s1.getDB('admin').createUser({user: 'root', pwd: 'pwd', roles: ['root']});
st.s1.getDB('admin').auth('root', 'pwd');
@@ -28,21 +32,22 @@ assert.commandFailed(res, "Setting the invalidation interval to an disallowed va
res = st.s1.getDB('admin').runCommand({getParameter: 1, userCacheInvalidationIntervalSecs: 1});
assert.eq(5, res.userCacheInvalidationIntervalSecs);
-st.s1.getDB('test').foo.insert({a:1}); // initial data
-st.s1.getDB('test').bar.insert({a:1}); // initial data
+st.s1.getDB('test').foo.insert({a: 1}); // initial data
+st.s1.getDB('test').bar.insert({a: 1}); // initial data
st.s1.getDB('admin').createUser({user: 'admin', pwd: 'pwd', roles: ['userAdminAnyDatabase']});
st.s1.getDB('admin').logout();
st.s0.getDB('admin').auth('admin', 'pwd');
-st.s0.getDB('admin').createRole({role: 'myRole',
- roles: [],
- privileges: [{resource: {cluster: true},
- actions: ['invalidateUserCache', 'setParameter']}]});
-st.s0.getDB('test').createUser({user: 'spencer',
- pwd: 'pwd',
- roles: ['read',
- {role: 'myRole', db: 'admin'},
- {role: 'userAdminAnyDatabase', db: 'admin'}]});
+st.s0.getDB('admin').createRole({
+ role: 'myRole',
+ roles: [],
+ privileges: [{resource: {cluster: true}, actions: ['invalidateUserCache', 'setParameter']}]
+});
+st.s0.getDB('test').createUser({
+ user: 'spencer',
+ pwd: 'pwd',
+ roles: ['read', {role: 'myRole', db: 'admin'}, {role: 'userAdminAnyDatabase', db: 'admin'}]
+});
st.s0.getDB('admin').logout();
var db1 = st.s0.getDB('test');
@@ -61,163 +66,157 @@ db3.auth('spencer', 'pwd');
*/
(function testChangingInvalidationInterval() {
- jsTestLog("Test that changing the invalidation interval takes effect immediately");
+ jsTestLog("Test that changing the invalidation interval takes effect immediately");
- assert.commandFailedWithCode(db3.bar.runCommand("drop"), authzErrorCode);
- assert.eq(1, db3.bar.findOne().a);
+ assert.commandFailedWithCode(db3.bar.runCommand("drop"), authzErrorCode);
+ assert.eq(1, db3.bar.findOne().a);
- db1.getSiblingDB('admin').grantPrivilegesToRole("myRole",
- [{resource: {db: 'test', collection: ''},
- actions: ['dropCollection']}]);
+ db1.getSiblingDB('admin').grantPrivilegesToRole(
+ "myRole", [{resource: {db: 'test', collection: ''}, actions: ['dropCollection']}]);
- // At first db3 should still think we're unauthorized because it hasn't invalidated it's cache.
- assert.commandFailedWithCode(db3.bar.runCommand('drop'), authzErrorCode);
- // Changing the value of the invalidation interval should make it invalidate its cache quickly.
- assert.commandWorked(db3.adminCommand({setParameter: 1,
- userCacheInvalidationIntervalSecs: 1}));
- sleep(2000);
- assert.commandWorked(db3.bar.runCommand('drop'));
- assert.eq(0, db3.bar.count());
+ // At first db3 should still think we're unauthorized because it hasn't invalidated it's cache.
+ assert.commandFailedWithCode(db3.bar.runCommand('drop'), authzErrorCode);
+ // Changing the value of the invalidation interval should make it invalidate its cache quickly.
+ assert.commandWorked(db3.adminCommand({setParameter: 1, userCacheInvalidationIntervalSecs: 1}));
+ sleep(2000);
+ assert.commandWorked(db3.bar.runCommand('drop'));
+ assert.eq(0, db3.bar.count());
- // Set the invalidation interval back for the rest of the tests
- db3.adminCommand({setParameter: 1, userCacheInvalidationIntervalSecs: 600});
- })();
+ // Set the invalidation interval back for the rest of the tests
+ db3.adminCommand({setParameter: 1, userCacheInvalidationIntervalSecs: 600});
+})();
(function testGrantingPrivileges() {
- jsTestLog("Testing propagation of granting privileges");
-
- hasAuthzError(db1.foo.update({}, { $inc: { a: 1 }}));
- hasAuthzError(db2.foo.update({}, { $inc: { a: 1 }}));
- hasAuthzError(db3.foo.update({}, { $inc: { a: 1 }}));
-
- assert.eq(1, db1.foo.findOne().a);
- assert.eq(1, db2.foo.findOne().a);
- assert.eq(1, db3.foo.findOne().a);
-
- db1.getSiblingDB('admin').grantPrivilegesToRole("myRole",
- [{resource: {db: 'test', collection: ''},
- actions: ['update']}]);
-
- // s0/db1 should update its cache instantly
- assert.writeOK(db1.foo.update({}, { $inc: { a: 1 }}));
- assert.eq(2, db1.foo.findOne().a);
-
- // s1/db2 should update its cache in 10 seconds.
- assert.soon(function() {
- var res = db2.foo.update({}, { $inc: { a: 1 }});
- if (res.hasWriteError()) {
- return false;
- }
- return db2.foo.findOne().a == 3;
- },
- "Mongos did not update its user cache after 10 seconds",
- 10 * 1000);
-
- // We manually invalidate the cache on s2/db3.
- db3.adminCommand("invalidateUserCache");
- assert.writeOK(db3.foo.update({}, { $inc: { a: 1 }}));
- assert.eq(4, db3.foo.findOne().a);
-
- })();
+ jsTestLog("Testing propagation of granting privileges");
+
+ hasAuthzError(db1.foo.update({}, {$inc: {a: 1}}));
+ hasAuthzError(db2.foo.update({}, {$inc: {a: 1}}));
+ hasAuthzError(db3.foo.update({}, {$inc: {a: 1}}));
+
+ assert.eq(1, db1.foo.findOne().a);
+ assert.eq(1, db2.foo.findOne().a);
+ assert.eq(1, db3.foo.findOne().a);
+
+ db1.getSiblingDB('admin').grantPrivilegesToRole(
+ "myRole", [{resource: {db: 'test', collection: ''}, actions: ['update']}]);
+
+ // s0/db1 should update its cache instantly
+ assert.writeOK(db1.foo.update({}, {$inc: {a: 1}}));
+ assert.eq(2, db1.foo.findOne().a);
+
+ // s1/db2 should update its cache in 10 seconds.
+ assert.soon(function() {
+ var res = db2.foo.update({}, {$inc: {a: 1}});
+ if (res.hasWriteError()) {
+ return false;
+ }
+ return db2.foo.findOne().a == 3;
+ }, "Mongos did not update its user cache after 10 seconds", 10 * 1000);
+
+ // We manually invalidate the cache on s2/db3.
+ db3.adminCommand("invalidateUserCache");
+ assert.writeOK(db3.foo.update({}, {$inc: {a: 1}}));
+ assert.eq(4, db3.foo.findOne().a);
+
+})();
(function testRevokingPrivileges() {
- jsTestLog("Testing propagation of revoking privileges");
+ jsTestLog("Testing propagation of revoking privileges");
- db1.getSiblingDB('admin').revokePrivilegesFromRole("myRole",
- [{resource: {db: 'test', collection: ''},
- actions: ['update']}]);
+ db1.getSiblingDB('admin').revokePrivilegesFromRole(
+ "myRole", [{resource: {db: 'test', collection: ''}, actions: ['update']}]);
- // s0/db1 should update its cache instantly
- hasAuthzError(db1.foo.update({}, { $inc: { a: 1 }}));
+ // s0/db1 should update its cache instantly
+ hasAuthzError(db1.foo.update({}, {$inc: {a: 1}}));
- jsTest.log("Beginning wait for s1/db2 cache update.");
- // s1/db2 should update its cache in 10 seconds.
- assert.soon(function() {
- var res = db2.foo.update({}, { $inc: { a: 1 }});
- return res.hasWriteError() && res.getWriteError().code == authzErrorCode;
- },
- "Mongos did not update its user cache after 10 seconds",
- 10 * 1000);
+ jsTest.log("Beginning wait for s1/db2 cache update.");
+ // s1/db2 should update its cache in 10 seconds.
+ assert.soon(function() {
+ var res = db2.foo.update({}, {$inc: {a: 1}});
+ return res.hasWriteError() && res.getWriteError().code == authzErrorCode;
+ }, "Mongos did not update its user cache after 10 seconds", 10 * 1000);
- // We manually invalidate the cache on s1/db3.
- db3.adminCommand("invalidateUserCache");
- hasAuthzError(db3.foo.update({}, { $inc: { a: 1 }}));
- })();
+ // We manually invalidate the cache on s1/db3.
+ db3.adminCommand("invalidateUserCache");
+ hasAuthzError(db3.foo.update({}, {$inc: {a: 1}}));
+})();
(function testModifyingUser() {
- jsTestLog("Testing propagation modifications to a user, rather than to a role");
+ jsTestLog("Testing propagation modifications to a user, rather than to a role");
- hasAuthzError(db1.foo.update({}, { $inc: { a: 1 }}));
- hasAuthzError(db2.foo.update({}, { $inc: { a: 1 }}));
- hasAuthzError(db3.foo.update({}, { $inc: { a: 1}}));
+ hasAuthzError(db1.foo.update({}, {$inc: {a: 1}}));
+ hasAuthzError(db2.foo.update({}, {$inc: {a: 1}}));
+ hasAuthzError(db3.foo.update({}, {$inc: {a: 1}}));
- db1.getSiblingDB('test').grantRolesToUser("spencer", ['readWrite']);
+ db1.getSiblingDB('test').grantRolesToUser("spencer", ['readWrite']);
- // s0/db1 should update its cache instantly
- assert.writeOK(db1.foo.update({}, { $inc: { a: 1 }}));
+ // s0/db1 should update its cache instantly
+ assert.writeOK(db1.foo.update({}, {$inc: {a: 1}}));
- // s1/db2 should update its cache in 5 seconds.
- assert.soon(function() {
- return !db2.foo.update({}, { $inc: { a: 1 }}).hasWriteError();
- },
- "Mongos did not update its user cache after 5 seconds",
- 6 * 1000); // Give an extra 1 second to avoid races
+ // s1/db2 should update its cache in 5 seconds.
+ assert.soon(
+ function() {
+ return !db2.foo.update({}, {$inc: {a: 1}}).hasWriteError();
+ },
+ "Mongos did not update its user cache after 5 seconds",
+ 6 * 1000); // Give an extra 1 second to avoid races
- // We manually invalidate the cache on s1/db3.
- db3.adminCommand("invalidateUserCache");
- assert.writeOK(db3.foo.update({}, { $inc: { a: 1 }}));
- })();
+ // We manually invalidate the cache on s1/db3.
+ db3.adminCommand("invalidateUserCache");
+ assert.writeOK(db3.foo.update({}, {$inc: {a: 1}}));
+})();
(function testConcurrentUserModification() {
- jsTestLog("Testing having 2 mongoses modify the same user at the same time"); // SERVER-13850
+ jsTestLog("Testing having 2 mongoses modify the same user at the same time"); // SERVER-13850
- assert.writeOK(db1.foo.update({}, { $inc: { a: 1 }}));
- assert.writeOK(db3.foo.update({}, { $inc: { a: 1}}));
+ assert.writeOK(db1.foo.update({}, {$inc: {a: 1}}));
+ assert.writeOK(db3.foo.update({}, {$inc: {a: 1}}));
- db1.getSiblingDB('test').revokeRolesFromUser("spencer", ['readWrite']);
+ db1.getSiblingDB('test').revokeRolesFromUser("spencer", ['readWrite']);
- // At this point db3 still thinks "spencer" has readWrite. Use it to add a different role
- // and make sure it doesn't add back readWrite
- hasAuthzError(db1.foo.update({}, { $inc: { a: 1 }}));
- assert.writeOK(db3.foo.update({}, { $inc: { a: 1}}));
+ // At this point db3 still thinks "spencer" has readWrite. Use it to add a different role
+ // and make sure it doesn't add back readWrite
+ hasAuthzError(db1.foo.update({}, {$inc: {a: 1}}));
+ assert.writeOK(db3.foo.update({}, {$inc: {a: 1}}));
- db3.getSiblingDB('test').grantRolesToUser("spencer", ['dbAdmin']);
+ db3.getSiblingDB('test').grantRolesToUser("spencer", ['dbAdmin']);
- hasAuthzError(db1.foo.update({}, { $inc: { a: 1 }}));
- // modifying "spencer" should force db3 to update its cache entry for "spencer"
- hasAuthzError(db3.foo.update({}, { $inc: { a: 1 }}));
+ hasAuthzError(db1.foo.update({}, {$inc: {a: 1}}));
+ // modifying "spencer" should force db3 to update its cache entry for "spencer"
+ hasAuthzError(db3.foo.update({}, {$inc: {a: 1}}));
- // Make sure nothing changes from invalidating the cache
- db1.adminCommand('invalidateUserCache');
- db3.adminCommand('invalidateUserCache');
- hasAuthzError(db1.foo.update({}, { $inc: { a: 1 }}));
- hasAuthzError(db3.foo.update({}, { $inc: { a: 1 }}));
- })();
+ // Make sure nothing changes from invalidating the cache
+ db1.adminCommand('invalidateUserCache');
+ db3.adminCommand('invalidateUserCache');
+ hasAuthzError(db1.foo.update({}, {$inc: {a: 1}}));
+ hasAuthzError(db3.foo.update({}, {$inc: {a: 1}}));
+})();
(function testDroppingUser() {
- jsTestLog("Testing propagation of dropping users");
+ jsTestLog("Testing propagation of dropping users");
- assert.commandWorked(db1.foo.runCommand("collStats"));
- assert.commandWorked(db2.foo.runCommand("collStats"));
- assert.commandWorked(db3.foo.runCommand("collStats"));
+ assert.commandWorked(db1.foo.runCommand("collStats"));
+ assert.commandWorked(db2.foo.runCommand("collStats"));
+ assert.commandWorked(db3.foo.runCommand("collStats"));
- db1.dropUser('spencer');
+ db1.dropUser('spencer');
- // s0/db1 should update its cache instantly
- assert.commandFailedWithCode(db1.foo.runCommand("collStats"), authzErrorCode);
+ // s0/db1 should update its cache instantly
+ assert.commandFailedWithCode(db1.foo.runCommand("collStats"), authzErrorCode);
- // s1/db2 should update its cache in 5 seconds.
- assert.soon(function() {
- return db2.foo.runCommand("collStats").code == authzErrorCode;
- },
- "Mongos did not update its user cache after 5 seconds",
- 6 * 1000); // Give an extra 1 second to avoid races
+ // s1/db2 should update its cache in 5 seconds.
+ assert.soon(
+ function() {
+ return db2.foo.runCommand("collStats").code == authzErrorCode;
+ },
+ "Mongos did not update its user cache after 5 seconds",
+ 6 * 1000); // Give an extra 1 second to avoid races
- // We manually invalidate the cache on s2/db3.
- db3.adminCommand("invalidateUserCache");
- assert.commandFailedWithCode(db3.foo.runCommand("collStats"), authzErrorCode);
+ // We manually invalidate the cache on s2/db3.
+ db3.adminCommand("invalidateUserCache");
+ assert.commandFailedWithCode(db3.foo.runCommand("collStats"), authzErrorCode);
- })();
+})();
st.stop();
diff --git a/jstests/auth/mr_auth.js b/jstests/auth/mr_auth.js
index 421c5d21bf7..1adc66ea7fa 100644
--- a/jstests/auth/mr_auth.js
+++ b/jstests/auth/mr_auth.js
@@ -1,4 +1,5 @@
-// MapReduce executed by a read-only user when --auth enabled should only be able to use inline mode. Other modes require writing to an output collection which is not allowed. SERVER-3345
+// MapReduce executed by a read-only user when --auth enabled should only be able to use inline
+// mode. Other modes require writing to an output collection which is not allowed. SERVER-3345
//
// This test requires users to persist across a restart.
// @tags: [requires_persistence]
@@ -7,75 +8,98 @@ baseName = "jstests_mr_auth";
dbName = "test";
out = baseName + "_out";
-map = function(){ emit( this.x, this.y );};
-red = function( k, vs ){ var s=0; for (var i=0; i<vs.length; i++) s+=vs[i]; return s;};
-red2 = function( k, vs ){ return 42;};
+map = function() {
+ emit(this.x, this.y);
+};
+red = function(k, vs) {
+ var s = 0;
+ for (var i = 0; i < vs.length; i++)
+ s += vs[i];
+ return s;
+};
+red2 = function(k, vs) {
+ return 42;
+};
// make sure writing is allowed when started without --auth enabled
dbms = MongoRunner.runMongod({bind_ip: "127.0.0.1"});
-var d = dbms.getDB( dbName );
-var t = d[ baseName ];
+var d = dbms.getDB(dbName);
+var t = d[baseName];
-for( var i = 0; i < 1000; i++) t.insert( {_id:i, x:i%10, y:i%100} );
-assert.eq( 1000, t.count(), "inserts failed" );
+for (var i = 0; i < 1000; i++)
+ t.insert({_id: i, x: i % 10, y: i % 100});
+assert.eq(1000, t.count(), "inserts failed");
d.dropAllUsers();
-d.getSisterDB( "admin" ).createUser({user: "admin", pwd: "admin", roles: jsTest.adminUserRoles });
-d.getSisterDB( "admin" ).auth('admin', 'admin');
-d.createUser({user: "write" , pwd: "write", roles: jsTest.basicUserRoles});
-d.createUser({user: "read" , pwd: "read", roles: jsTest.readOnlyUserRoles});
-d.getSisterDB( "admin" ).logout();
+d.getSisterDB("admin").createUser({user: "admin", pwd: "admin", roles: jsTest.adminUserRoles});
+d.getSisterDB("admin").auth('admin', 'admin');
+d.createUser({user: "write", pwd: "write", roles: jsTest.basicUserRoles});
+d.createUser({user: "read", pwd: "read", roles: jsTest.readOnlyUserRoles});
+d.getSisterDB("admin").logout();
-t.mapReduce( map, red, {out: { inline: 1 }} );
+t.mapReduce(map, red, {out: {inline: 1}});
-t.mapReduce( map, red, {out: { replace: out }} );
-t.mapReduce( map, red, {out: { reduce: out }} );
-t.mapReduce( map, red, {out: { merge: out }} );
+t.mapReduce(map, red, {out: {replace: out}});
+t.mapReduce(map, red, {out: {reduce: out}});
+t.mapReduce(map, red, {out: {merge: out}});
-d[ out ].drop();
+d[out].drop();
MongoRunner.stopMongod(dbms);
+// In --auth mode, read-only user should not be able to write to existing or temporary collection,
+// thus only can execute inline mode
-// In --auth mode, read-only user should not be able to write to existing or temporary collection, thus only can execute inline mode
+dbms = MongoRunner.runMongod(
+ {restart: true, cleanData: false, dbpath: dbms.dbpath, auth: "", bind_ip: "127.0.0.1"});
+d = dbms.getDB(dbName);
+t = d[baseName];
-dbms = MongoRunner.runMongod({restart: true, cleanData: false, dbpath: dbms.dbpath, auth: "", bind_ip: "127.0.0.1"});
-d = dbms.getDB( dbName );
-t = d[ baseName ];
+assert.throws(function() {
+ t.findOne();
+}, [], "read without login");
-assert.throws( function() { t.findOne(); }, [], "read without login" );
+assert.throws(function() {
+ t.mapReduce(map, red, {out: {inline: 1}});
+}, [], "m/r without login");
-assert.throws( function(){ t.mapReduce( map, red, {out: { inline: 1 }} ); }, [], "m/r without login" );
-
-
-d.auth( "read", "read" );
+d.auth("read", "read");
t.findOne();
-t.mapReduce( map, red, {out: { inline: 1 }} );
+t.mapReduce(map, red, {out: {inline: 1}});
-assert.throws( function(){ t.mapReduce( map, red2, {out: { replace: out }} ); }, [], "read-only user shouldn't be able to output m/r to a collection" );
-assert.throws( function(){ t.mapReduce( map, red2, {out: { reduce: out }} ); }, [], "read-only user shouldn't be able to output m/r to a collection" );
-assert.throws( function(){ t.mapReduce( map, red2, {out: { merge: out }} ); }, [], "read-only user shouldn't be able to output m/r to a collection" );
+assert.throws(function() {
+ t.mapReduce(map, red2, {out: {replace: out}});
+}, [], "read-only user shouldn't be able to output m/r to a collection");
+assert.throws(function() {
+ t.mapReduce(map, red2, {out: {reduce: out}});
+}, [], "read-only user shouldn't be able to output m/r to a collection");
+assert.throws(function() {
+ t.mapReduce(map, red2, {out: {merge: out}});
+}, [], "read-only user shouldn't be able to output m/r to a collection");
-assert.eq (0, d[ out ].count(), "output collection should be empty");
+assert.eq(0, d[out].count(), "output collection should be empty");
d.logout();
-assert.throws( function(){ t.mapReduce( map, red, {out: { replace: out }} ); }, [], "m/r without login" );
-
+assert.throws(function() {
+ t.mapReduce(map, red, {out: {replace: out}});
+}, [], "m/r without login");
-d.auth( "write", "write" );
+d.auth("write", "write");
-t.mapReduce( map, red, {out: { inline: 1 }} );
+t.mapReduce(map, red, {out: {inline: 1}});
-t.mapReduce( map, red, {out: { replace: out }} );
-t.mapReduce( map, red, {out: { reduce: out }} );
-t.mapReduce( map, red, {out: { merge: out }} );
+t.mapReduce(map, red, {out: {replace: out}});
+t.mapReduce(map, red, {out: {reduce: out}});
+t.mapReduce(map, red, {out: {merge: out}});
// make sure it fails if output to a diff db
-assert.throws(function() { t.mapReduce( map, red, {out: { replace: out, db: "admin" }} ); });
+assert.throws(function() {
+ t.mapReduce(map, red, {out: {replace: out, db: "admin"}});
+});
MongoRunner.stopMongod(dbms);
diff --git a/jstests/auth/profile.js b/jstests/auth/profile.js
index f24a303229f..41a8d86f8a9 100644
--- a/jstests/auth/profile.js
+++ b/jstests/auth/profile.js
@@ -3,12 +3,11 @@ var conn = MongoRunner.runMongod();
var db1 = conn.getDB("profile-a");
var db2 = db1.getSisterDB("profile-b");
var username = "user";
-db1.createUser({user:username, pwd: "password", roles: jsTest.basicUserRoles});
-db2.createUser({user:username, pwd: "password", roles: jsTest.basicUserRoles});
-
+db1.createUser({user: username, pwd: "password", roles: jsTest.basicUserRoles});
+db2.createUser({user: username, pwd: "password", roles: jsTest.basicUserRoles});
function lastOp(db) {
- return db.system.profile.find().sort( { $natural:-1 } ).next();
+ return db.system.profile.find().sort({$natural: -1}).next();
}
function principalName(user, db) {
diff --git a/jstests/auth/profile_access.js b/jstests/auth/profile_access.js
index 504b89d7931..e7090ef3b0d 100644
--- a/jstests/auth/profile_access.js
+++ b/jstests/auth/profile_access.js
@@ -1,33 +1,40 @@
-var conn = MongoRunner.runMongod({auth : ""});
+var conn = MongoRunner.runMongod({auth: ""});
var adminDb = conn.getDB("admin");
var testDb = conn.getDB("testdb");
-adminDb.createUser({user:'admin',
- pwd:'password',
- roles:['userAdminAnyDatabase','dbAdminAnyDatabase', 'readWriteAnyDatabase']});
+adminDb.createUser({
+ user: 'admin',
+ pwd: 'password',
+ roles: ['userAdminAnyDatabase', 'dbAdminAnyDatabase', 'readWriteAnyDatabase']
+});
-adminDb.auth('admin','password');
-testDb.createUser({user:'readUser',pwd:'password',roles:['read']});
-testDb.createUser({user:'dbAdminUser',pwd:'password',roles:['dbAdmin']});
-testDb.createUser({user:'dbAdminAnyDBUser',pwd:'password',roles:[{role: 'dbAdminAnyDatabase',
- db: 'admin'}]});
+adminDb.auth('admin', 'password');
+testDb.createUser({user: 'readUser', pwd: 'password', roles: ['read']});
+testDb.createUser({user: 'dbAdminUser', pwd: 'password', roles: ['dbAdmin']});
+testDb.createUser({
+ user: 'dbAdminAnyDBUser',
+ pwd: 'password',
+ roles: [{role: 'dbAdminAnyDatabase', db: 'admin'}]
+});
testDb.setProfilingLevel(2);
testDb.foo.findOne();
adminDb.logout();
-testDb.auth('readUser','password');
-assert.throws(function() { testDb.system.profile.findOne(); });
+testDb.auth('readUser', 'password');
+assert.throws(function() {
+ testDb.system.profile.findOne();
+});
testDb.logout();
// SERVER-14355
-testDb.auth('dbAdminUser','password');
+testDb.auth('dbAdminUser', 'password');
testDb.setProfilingLevel(0);
testDb.system.profile.drop();
assert.commandWorked(testDb.createCollection("system.profile", {capped: true, size: 1024}));
testDb.logout();
// SERVER-16944
-testDb.auth('dbAdminAnyDBUser','password');
+testDb.auth('dbAdminAnyDBUser', 'password');
testDb.setProfilingLevel(0);
testDb.system.profile.drop();
assert.commandWorked(testDb.createCollection("system.profile", {capped: true, size: 1024})); \ No newline at end of file
diff --git a/jstests/auth/pseudo_commands.js b/jstests/auth/pseudo_commands.js
index 629b1fde0d3..510f8c71e53 100644
--- a/jstests/auth/pseudo_commands.js
+++ b/jstests/auth/pseudo_commands.js
@@ -49,144 +49,156 @@ function runTest(conn) {
return (res.ok == 1 && res.isdbgrid == 1);
}
-
(function testInprog() {
- jsTestLog("Testing inprog");
-
- var roles = {read: false,
- readAnyDatabase: false,
- readWrite: false,
- readWriteAnyDatabase: false,
- dbAdmin: false,
- dbAdminAnyDatabase: false,
- dbOwner: false,
- clusterMonitor: true,
- clusterManager: false,
- hostManager: false,
- clusterAdmin: true,
- root: true,
- __system: true
- };
-
- var privilege = { resource: {cluster: true}, actions: ['inprog'] };
-
- var testFunc = function(shouldPass) {
- var passed = true;
- try {
- var res = db.currentOp();
- passed = res.ok && !res.hasOwnProperty("errmsg");
- } catch (e) {
- passed = false;
- }
-
- assert.eq(shouldPass, passed);
- if (shouldPass) {
- assert.gte(res.inprog.length, 0);
- }
- };
-
- testProperAuthorization(testFunc, roles, privilege);
- })();
+ jsTestLog("Testing inprog");
+
+ var roles = {
+ read: false,
+ readAnyDatabase: false,
+ readWrite: false,
+ readWriteAnyDatabase: false,
+ dbAdmin: false,
+ dbAdminAnyDatabase: false,
+ dbOwner: false,
+ clusterMonitor: true,
+ clusterManager: false,
+ hostManager: false,
+ clusterAdmin: true,
+ root: true,
+ __system: true
+ };
+
+ var privilege = {
+ resource: {cluster: true},
+ actions: ['inprog']
+ };
+
+ var testFunc = function(shouldPass) {
+ var passed = true;
+ try {
+ var res = db.currentOp();
+ passed = res.ok && !res.hasOwnProperty("errmsg");
+ } catch (e) {
+ passed = false;
+ }
+
+ assert.eq(shouldPass, passed);
+ if (shouldPass) {
+ assert.gte(res.inprog.length, 0);
+ }
+ };
+
+ testProperAuthorization(testFunc, roles, privilege);
+ })();
(function testKillop() {
- jsTestLog("Testing killOp");
-
- var roles = {read: false,
- readAnyDatabase: false,
- readWrite: false,
- readWriteAnyDatabase: false,
- dbAdmin: false,
- dbAdminAnyDatabase: false,
- dbOwner: false,
- clusterMonitor: false,
- clusterManager: false,
- hostManager: true,
- clusterAdmin: true,
- root: true,
- __system: true
- };
-
- var privilege = { resource: {cluster: true}, actions: ['killop'] };
-
- var testFunc = function(shouldPass) {
- var passed = true;
- try {
- var opid;
- if (isMongos(db)) { // opid format different between mongos and mongod
- opid = "shard0000:1234";
- } else {
- opid = 1234;
- }
- var res = db.killOp(opid);
- printjson(res);
- passed = res.ok && !res.errmsg && !res.err && !res['$err'];
- } catch (e) {
- passed = false;
- }
- assert.eq(shouldPass, passed);
- };
-
- testProperAuthorization(testFunc, roles, privilege);
+ jsTestLog("Testing killOp");
+
+ var roles = {
+ read: false,
+ readAnyDatabase: false,
+ readWrite: false,
+ readWriteAnyDatabase: false,
+ dbAdmin: false,
+ dbAdminAnyDatabase: false,
+ dbOwner: false,
+ clusterMonitor: false,
+ clusterManager: false,
+ hostManager: true,
+ clusterAdmin: true,
+ root: true,
+ __system: true
+ };
+
+ var privilege = {
+ resource: {cluster: true},
+ actions: ['killop']
+ };
+
+ var testFunc = function(shouldPass) {
+ var passed = true;
+ try {
+ var opid;
+ if (isMongos(db)) { // opid format different between mongos and mongod
+ opid = "shard0000:1234";
+ } else {
+ opid = 1234;
+ }
+ var res = db.killOp(opid);
+ printjson(res);
+ passed = res.ok && !res.errmsg && !res.err && !res['$err'];
+ } catch (e) {
+ passed = false;
+ }
+ assert.eq(shouldPass, passed);
+ };
+
+ testProperAuthorization(testFunc, roles, privilege);
})();
(function testUnlock() {
- if (isMongos(db)) {
- return; // unlock doesn't work on mongos
- }
-
- jsTestLog("Testing unlock");
-
- var roles = {read: false,
- readAnyDatabase: false,
- readWrite: false,
- readWriteAnyDatabase: false,
- dbAdmin: false,
- dbAdminAnyDatabase: false,
- dbOwner: false,
- clusterMonitor: false,
- clusterManager: false,
- hostManager: true,
- clusterAdmin: true,
- root: true,
- __system: true
- };
-
- var privilege = { resource: {cluster: true}, actions: ['unlock'] };
-
- var testFunc = function(shouldPass) {
- var passed = true;
- try {
- var ret = admin.fsyncLock(); // must be locked first
- // If the storage engine doesnt support fsync lock, we can't proceed
- if (!ret.ok) {
+ if (isMongos(db)) {
+ return; // unlock doesn't work on mongos
+ }
+
+ jsTestLog("Testing unlock");
+
+ var roles = {
+ read: false,
+ readAnyDatabase: false,
+ readWrite: false,
+ readWriteAnyDatabase: false,
+ dbAdmin: false,
+ dbAdminAnyDatabase: false,
+ dbOwner: false,
+ clusterMonitor: false,
+ clusterManager: false,
+ hostManager: true,
+ clusterAdmin: true,
+ root: true,
+ __system: true
+ };
+
+ var privilege = {
+ resource: {cluster: true},
+ actions: ['unlock']
+ };
+
+ var testFunc = function(shouldPass) {
+ var passed = true;
+ try {
+ var ret = admin.fsyncLock(); // must be locked first
+ // If the storage engine doesnt support fsync lock, we can't proceed
+ if (!ret.ok) {
assert.commandFailedWithCode(ret, ErrorCodes.CommandNotSupported);
- assert(shouldPass); // If we get to the storage engine, we better be authorized.
+ assert(
+ shouldPass); // If we get to the storage engine, we better be authorized.
return;
- }
- var res = db.fsyncUnlock();
- printjson(res);
- passed = res.ok && !res.errmsg && !res.err && !res['$err'];
- passed = passed || false; // convert undefined to false
- } catch (e) {
- passed = false;
- }
- if (!passed) {
- admin.fsyncUnlock();
- }
-
- assert.eq(shouldPass, passed);
- };
-
- testProperAuthorization(testFunc, roles, privilege);
- })();
+ }
+ var res = db.fsyncUnlock();
+ printjson(res);
+ passed = res.ok && !res.errmsg && !res.err && !res['$err'];
+ passed = passed || false; // convert undefined to false
+ } catch (e) {
+ passed = false;
+ }
+ if (!passed) {
+ admin.fsyncUnlock();
+ }
+
+ assert.eq(shouldPass, passed);
+ };
+
+ testProperAuthorization(testFunc, roles, privilege);
+ })();
}
jsTest.log('Test standalone');
-var conn = MongoRunner.runMongod({ auth: '' });
+var conn = MongoRunner.runMongod({auth: ''});
runTest(conn);
MongoRunner.stopMongod(conn.port);
jsTest.log('Test sharding');
-var st = new ShardingTest({ shards: 2, config: 3, keyFile: 'jstests/libs/key1' });
+var st = new ShardingTest({shards: 2, config: 3, keyFile: 'jstests/libs/key1'});
runTest(st.s);
st.stop();
diff --git a/jstests/auth/readIndex.js b/jstests/auth/readIndex.js
index 433a2e7eb1d..deda8717a3b 100644
--- a/jstests/auth/readIndex.js
+++ b/jstests/auth/readIndex.js
@@ -1,21 +1,21 @@
// SERVER-8625: Test that dbAdmins can view index definitions.
-var conn = MongoRunner.runMongod({auth : ""});
+var conn = MongoRunner.runMongod({auth: ""});
var adminDB = conn.getDB("admin");
var testDB = conn.getDB("testdb");
var indexName = 'idx_a';
-adminDB.createUser({user:'root', pwd:'password', roles:['root']});
+adminDB.createUser({user: 'root', pwd: 'password', roles: ['root']});
adminDB.auth('root', 'password');
-testDB.foo.insert({a:1});
-testDB.createUser({user:'dbAdmin', pwd:'password', roles:['dbAdmin']});
+testDB.foo.insert({a: 1});
+testDB.createUser({user: 'dbAdmin', pwd: 'password', roles: ['dbAdmin']});
adminDB.logout();
testDB.auth('dbAdmin', 'password');
-testDB.foo.ensureIndex({a:1}, {name:indexName});
-assert.eq(2, testDB.foo.getIndexes().length); // index on 'a' plus default _id index
+testDB.foo.ensureIndex({a: 1}, {name: indexName});
+assert.eq(2, testDB.foo.getIndexes().length); // index on 'a' plus default _id index
var indexList = testDB.foo.getIndexes().filter(function(idx) {
return idx.name === indexName;
});
assert.eq(1, indexList.length, tojson(indexList));
-assert.docEq(indexList[0].key, {a:1}, tojson(indexList));
+assert.docEq(indexList[0].key, {a: 1}, tojson(indexList));
diff --git a/jstests/auth/rename.js b/jstests/auth/rename.js
index a83d2e75caa..bc1d8a40923 100644
--- a/jstests/auth/rename.js
+++ b/jstests/auth/rename.js
@@ -4,10 +4,10 @@ var m = MongoRunner.runMongod({auth: ""});
var db1 = m.getDB("foo");
var db2 = m.getDB("bar");
-var admin = m.getDB( 'admin' );
+var admin = m.getDB('admin');
// Setup initial data
-admin.createUser({user:'admin', pwd: 'password', roles: jsTest.adminUserRoles});
+admin.createUser({user: 'admin', pwd: 'password', roles: jsTest.adminUserRoles});
admin.auth('admin', 'password');
db1.createUser({user: "foo", pwd: "bar", roles: jsTest.basicUserRoles});
@@ -20,18 +20,22 @@ assert.eq(db1.a.count(), 1);
admin.logout();
// can't run same db w/o auth
-assert.commandFailed( admin.runCommand({renameCollection:db1.a.getFullName(), to: db1.b.getFullName()}) );
+assert.commandFailed(
+ admin.runCommand({renameCollection: db1.a.getFullName(), to: db1.b.getFullName()}));
// can run same db with auth
db1.auth('foo', 'bar');
-assert.commandWorked( admin.runCommand({renameCollection:db1.a.getFullName(), to: db1.b.getFullName()}) );
+assert.commandWorked(
+ admin.runCommand({renameCollection: db1.a.getFullName(), to: db1.b.getFullName()}));
// can't run diff db w/o auth
-assert.commandFailed( admin.runCommand({renameCollection:db1.b.getFullName(), to: db2.a.getFullName()}) );
+assert.commandFailed(
+ admin.runCommand({renameCollection: db1.b.getFullName(), to: db2.a.getFullName()}));
// can run diff db with auth
db2.auth('bar', 'foo');
-assert.commandWorked( admin.runCommand({renameCollection:db1.b.getFullName(), to: db2.a.getFullName()}) );
+assert.commandWorked(
+ admin.runCommand({renameCollection: db1.b.getFullName(), to: db2.a.getFullName()}));
// test post conditions
assert.eq(db1.a.count(), 0);
diff --git a/jstests/auth/renameSystemCollections.js b/jstests/auth/renameSystemCollections.js
index 7e4c7b821e3..dffee963499 100644
--- a/jstests/auth/renameSystemCollections.js
+++ b/jstests/auth/renameSystemCollections.js
@@ -1,5 +1,5 @@
// SERVER-8623: Test that renameCollection can't be used to bypass auth checks on system namespaces
-var conn = MongoRunner.runMongod({auth : ""});
+var conn = MongoRunner.runMongod({auth: ""});
var adminDB = conn.getDB("admin");
var testDB = conn.getDB("testdb");
@@ -7,38 +7,38 @@ var testDB2 = conn.getDB("testdb2");
var CodeUnauthorized = 13;
-var backdoorUserDoc = { user: 'backdoor', db: 'admin', pwd: 'hashed', roles: ['root'] };
+var backdoorUserDoc = {
+ user: 'backdoor',
+ db: 'admin',
+ pwd: 'hashed',
+ roles: ['root']
+};
-adminDB.createUser({user:'userAdmin',
- pwd:'password',
- roles:['userAdminAnyDatabase']});
+adminDB.createUser({user: 'userAdmin', pwd: 'password', roles: ['userAdminAnyDatabase']});
adminDB.auth('userAdmin', 'password');
-adminDB.createUser({user:'readWriteAdmin',
- pwd:'password',
- roles:['readWriteAnyDatabase']});
-adminDB.createUser({user:'readWriteAndUserAdmin',
- pwd:'password',
- roles:['readWriteAnyDatabase', 'userAdminAnyDatabase']});
+adminDB.createUser({user: 'readWriteAdmin', pwd: 'password', roles: ['readWriteAnyDatabase']});
+adminDB.createUser({
+ user: 'readWriteAndUserAdmin',
+ pwd: 'password',
+ roles: ['readWriteAnyDatabase', 'userAdminAnyDatabase']
+});
adminDB.createUser({user: 'root', pwd: 'password', roles: ['root']});
adminDB.createUser({user: 'rootier', pwd: 'password', roles: ['__system']});
adminDB.logout();
-
jsTestLog("Test that a readWrite user can't rename system.profile to something they can read");
adminDB.auth('readWriteAdmin', 'password');
res = adminDB.system.profile.renameCollection("profile");
assert.eq(0, res.ok);
assert.eq(CodeUnauthorized, res.code);
-
jsTestLog("Test that a readWrite user can't rename system.users to something they can read");
var res = adminDB.system.users.renameCollection("users");
assert.eq(0, res.ok);
assert.eq(CodeUnauthorized, res.code);
assert.eq(0, adminDB.users.count());
-
jsTestLog("Test that a readWrite user can't use renameCollection to override system.users");
adminDB.users.insert(backdoorUserDoc);
res = adminDB.users.renameCollection("system.users", true);
@@ -70,7 +70,7 @@ assert.eq(0, res.ok);
assert.eq(CodeUnauthorized, res.code);
assert.eq(null, adminDB.system.users.findOne({user: backdoorUserDoc.user}));
-assert.neq(null, adminDB.system.users.findOne({user:'userAdmin'}));
+assert.neq(null, adminDB.system.users.findOne({user: 'userAdmin'}));
adminDB.auth('rootier', 'password');
diff --git a/jstests/auth/repl.js b/jstests/auth/repl.js
index 1ed13ad1400..17ab3c2db20 100644
--- a/jstests/auth/repl.js
+++ b/jstests/auth/repl.js
@@ -3,7 +3,10 @@
var baseName = "jstests_auth_repl";
var rsName = baseName + "_rs";
var rtName = baseName + "_rt";
-var mongoOptions = {auth: null, keyFile: "jstests/libs/key1"};
+var mongoOptions = {
+ auth: null,
+ keyFile: "jstests/libs/key1"
+};
var authErrCode = 13;
var AuthReplTest = function(spec) {
@@ -73,20 +76,15 @@ var AuthReplTest = function(spec) {
var updateRole = function() {
var res = adminPri.runCommand({
updateRole: testRole,
- privileges: [
- { resource: {cluster: true}, actions: ["listDatabases"] }
- ],
+ privileges: [{resource: {cluster: true}, actions: ["listDatabases"]}],
writeConcern: {w: 2, wtimeout: 15000}
});
assert.commandWorked(res);
};
var updateUser = function() {
- var res = adminPri.runCommand({
- updateUser: testUser,
- roles: [testRole2],
- writeConcern: {w: 2, wtimeout: 15000}
- });
+ var res = adminPri.runCommand(
+ {updateUser: testUser, roles: [testRole2], writeConcern: {w: 2, wtimeout: 15000}});
assert.commandWorked(res);
};
@@ -108,11 +106,10 @@ var AuthReplTest = function(spec) {
* Remove test users and roles
*/
var cleanup = function() {
- var res = adminPri.runCommand({dropUser: testUser,
- writeConcern: {w: 2, wtimeout: 15000}});
+ var res = adminPri.runCommand({dropUser: testUser, writeConcern: {w: 2, wtimeout: 15000}});
assert.commandWorked(res);
- res = adminPri.runCommand({dropAllRolesFromDatabase: 1,
- writeConcern: {w: 2, wtimeout: 15000}});
+ res = adminPri.runCommand(
+ {dropAllRolesFromDatabase: 1, writeConcern: {w: 2, wtimeout: 15000}});
assert.commandWorked(res);
};
@@ -137,10 +134,8 @@ var AuthReplTest = function(spec) {
for (var i = 0; i < roles.length; i++) {
var res = adminPri.runCommand({
createRole: roles[i],
- privileges: [
- { resource: {cluster: true}, actions: [ actions[i] ] }
- ],
- roles: [ ],
+ privileges: [{resource: {cluster: true}, actions: [actions[i]]}],
+ roles: [],
writeConcern: {w: numNodes, wtimeout: 15000}
});
assert.commandWorked(res);
@@ -168,11 +163,11 @@ var AuthReplTest = function(spec) {
updateRole();
confirmPrivilegeAfterUpdate();
confirmRolesInfo("listDatabases");
-
+
updateUser();
confirmPrivilegeAfterUpdate();
confirmUsersInfo(testRole2);
-
+
cleanup();
};
@@ -183,31 +178,33 @@ jsTest.log("1 test replica sets");
var rs = new ReplSetTest({name: rsName, nodes: 2});
var nodes = rs.startSet(mongoOptions);
rs.initiate();
-authutil.asCluster(nodes, "jstests/libs/key1", function() { rs.awaitReplication(); });
+authutil.asCluster(nodes,
+ "jstests/libs/key1",
+ function() {
+ rs.awaitReplication();
+ });
var primary = rs.getPrimary();
var secondary = rs.getSecondary();
-var authReplTest = AuthReplTest({
- primaryConn: primary,
- secondaryConn: secondary
-});
+var authReplTest = AuthReplTest({primaryConn: primary, secondaryConn: secondary});
authReplTest.createUserAndRoles(2);
authReplTest.testAll();
rs.stopSet();
-jsTest.log("2 test initial sync");
+jsTest.log("2 test initial sync");
rs = new ReplSetTest({name: rsName, nodes: 1, nodeOptions: mongoOptions});
nodes = rs.startSet();
rs.initiate();
-authutil.asCluster(nodes, "jstests/libs/key1", function() { rs.awaitReplication(); });
+authutil.asCluster(nodes,
+ "jstests/libs/key1",
+ function() {
+ rs.awaitReplication();
+ });
primary = rs.getPrimary();
-var authReplTest = AuthReplTest({
- primaryConn: primary,
- secondaryConn: null
-});
+var authReplTest = AuthReplTest({primaryConn: primary, secondaryConn: null});
authReplTest.createUserAndRoles(1);
// Add a secondary and wait for initial sync
@@ -239,14 +236,10 @@ masterDB.createUser({user: "root", pwd: "pass", roles: ["root"]});
masterDB.auth("root", "pass");
// ensure that master/slave replication is up and running
-masterDB.foo.save({}, { writeConcern: { w: 2, wtimeout: 15000 }});
+masterDB.foo.save({}, {writeConcern: {w: 2, wtimeout: 15000}});
masterDB.foo.drop();
-authReplTest = AuthReplTest({
- primaryConn: master,
- secondaryConn: slave
-});
+authReplTest = AuthReplTest({primaryConn: master, secondaryConn: slave});
authReplTest.createUserAndRoles(2);
authReplTest.testAll();
rt.stop();
-
diff --git a/jstests/auth/repl_auth.js b/jstests/auth/repl_auth.js
index a5bde8167c9..bafaafd8f7b 100644
--- a/jstests/auth/repl_auth.js
+++ b/jstests/auth/repl_auth.js
@@ -4,8 +4,8 @@
*/
var NUM_NODES = 3;
-var rsTest = new ReplSetTest({ nodes: NUM_NODES });
-rsTest.startSet({ oplogSize: 10, keyFile: 'jstests/libs/key1' });
+var rsTest = new ReplSetTest({nodes: NUM_NODES});
+rsTest.startSet({oplogSize: 10, keyFile: 'jstests/libs/key1'});
rsTest.initiate();
rsTest.awaitSecondaryNodes();
@@ -13,14 +13,14 @@ var setupConn = rsTest.getPrimary();
var admin = setupConn.getDB('admin');
// Setup initial data
-admin.createUser({ user:'admin', pwd: 'password', roles: jsTest.adminUserRoles });
+admin.createUser({user: 'admin', pwd: 'password', roles: jsTest.adminUserRoles});
admin.auth('admin', 'password');
-setupConn.getDB('foo').createUser({ user: 'foo', pwd: 'foopwd', roles: jsTest.basicUserRoles },
- { w: NUM_NODES });
+setupConn.getDB('foo')
+ .createUser({user: 'foo', pwd: 'foopwd', roles: jsTest.basicUserRoles}, {w: NUM_NODES});
setupConn.getDB('foo').logout();
-setupConn.getDB('bar').createUser({ user: 'bar', pwd: 'barpwd', roles: jsTest.basicUserRoles },
- { w: NUM_NODES });
+setupConn.getDB('bar')
+ .createUser({user: 'bar', pwd: 'barpwd', roles: jsTest.basicUserRoles}, {w: NUM_NODES});
setupConn.getDB('bar').logout();
var replConn0 = new Mongo(rsTest.getURL());
@@ -33,16 +33,16 @@ var barDB1 = replConn1.getDB('bar');
fooDB0.auth('foo', 'foopwd');
barDB1.auth('bar', 'barpwd');
-assert.writeOK(fooDB0.user.insert({ x: 1 }, { writeConcern: { w: NUM_NODES }}));
-assert.writeError(barDB0.user.insert({ x: 1 }, { writeConcern: { w: NUM_NODES }}));
+assert.writeOK(fooDB0.user.insert({x: 1}, {writeConcern: {w: NUM_NODES}}));
+assert.writeError(barDB0.user.insert({x: 1}, {writeConcern: {w: NUM_NODES}}));
-assert.writeError(fooDB1.user.insert({ x: 2 }, { writeConcern: { w: NUM_NODES }}));
-assert.writeOK(barDB1.user.insert({ x: 2 }, { writeConcern: { w: NUM_NODES }}));
+assert.writeError(fooDB1.user.insert({x: 2}, {writeConcern: {w: NUM_NODES}}));
+assert.writeOK(barDB1.user.insert({x: 2}, {writeConcern: {w: NUM_NODES}}));
// Make sure replica set connection in the shell is ready.
-_awaitRSHostViaRSMonitor(rsTest.getPrimary().name, { ok: true, ismaster: true }, rsTest.name);
+_awaitRSHostViaRSMonitor(rsTest.getPrimary().name, {ok: true, ismaster: true}, rsTest.name);
rsTest.getSecondaries().forEach(function(sec) {
- _awaitRSHostViaRSMonitor(sec.name, { ok: true, secondary: true }, rsTest.name);
+ _awaitRSHostViaRSMonitor(sec.name, {ok: true, secondary: true}, rsTest.name);
});
// Note: secondary nodes are selected randomly and connections will only be returned to the
@@ -65,4 +65,3 @@ for (var x = 0; x < 20; x++) {
}
rsTest.stopSet();
-
diff --git a/jstests/auth/resource_pattern_matching.js b/jstests/auth/resource_pattern_matching.js
index 7acad4f5f1f..5a4da66871c 100644
--- a/jstests/auth/resource_pattern_matching.js
+++ b/jstests/auth/resource_pattern_matching.js
@@ -17,17 +17,9 @@ function setup_users(granter) {
admindb.auth("admin", "admin");
- printjson(admindb.runCommand({
- createRole: "test_role",
- privileges: [],
- roles: []
- }));
-
- printjson(admindb.runCommand({
- createUser: "test_user",
- pwd: "password",
- roles: [ "test_role" ]
- }));
+ printjson(admindb.runCommand({createRole: "test_role", privileges: [], roles: []}));
+
+ printjson(admindb.runCommand({createUser: "test_user", pwd: "password", roles: ["test_role"]}));
}
function setup_dbs_and_cols(db) {
@@ -54,7 +46,7 @@ function grant_privileges(granter, privileges) {
var result = admindb.runCommand({
grantPrivilegesToRole: "test_role",
privileges: privileges,
- writeConcern: { w: 'majority'}
+ writeConcern: {w: 'majority'}
});
admindb.logout();
@@ -70,7 +62,7 @@ function revoke_privileges(granter, privileges) {
var result = admindb.runCommand({
revokePrivilegesFromRole: "test_role",
privileges: privileges,
- writeConcern: { w: 'majority'}
+ writeConcern: {w: 'majority'}
});
admindb.logout();
@@ -107,118 +99,135 @@ function run_test(name, granter, verifier, privileges, collections) {
}
function run_test_bad_resource(name, granter, resource) {
- print("\n=== testing resource fail " + name + "() ===\n");
- var admindb = granter.getSiblingDB("admin");
- assert.commandFailed(
- grant_privileges(granter, [{ resource: resource, actions: [ "find" ] }])
- );
+ print("\n=== testing resource fail " + name + "() ===\n");
+ var admindb = granter.getSiblingDB("admin");
+ assert.commandFailed(grant_privileges(granter, [{resource: resource, actions: ["find"]}]));
}
function should_insert(testdb, testcol) {
- assert.doesNotThrow(function() { testcol.insert({ a : "b" }); });
+ assert.doesNotThrow(function() {
+ testcol.insert({a: "b"});
+ });
}
function should_fail_insert(testdb, testcol) {
- assert.throws(function() { testcol.insert({ a : "b" }); });
+ assert.throws(function() {
+ testcol.insert({a: "b"});
+ });
}
function should_find(testdb, testcol) {
- assert.doesNotThrow(function() { testcol.findOne(); });
+ assert.doesNotThrow(function() {
+ testcol.findOne();
+ });
}
function should_fail_find(testdb, testcol) {
- assert.throws(function() { testcol.findOne(); });
+ assert.throws(function() {
+ testcol.findOne();
+ });
}
function run_tests(granter, verifier) {
setup_users(granter);
setup_dbs_and_cols(granter);
- run_test("specific", granter, verifier,
- [ { resource: { db: "a", collection: "a" }, actions: [ "find" ]} ],
- {
- "a.a" : should_find,
- "a.b" : should_fail_find,
- "b.a" : should_fail_find,
- "b.b" : should_fail_find
- }
- );
-
- run_test("glob_collection", granter, verifier,
- [ { resource: { db: "a", collection: "" }, actions: [ "find" ]} ],
- {
- "a.a" : should_find,
- "a.b" : should_find,
- "b.a" : should_fail_find,
- "b.b" : should_fail_find
- }
- );
-
- run_test("glob_database", granter, verifier,
- [ { resource: { db: "", collection: "a" }, actions: [ "find" ]} ],
- {
- "a.a" : should_find,
- "a.b" : should_fail_find,
- "b.a" : should_find,
- "b.b" : should_fail_find
- }
- );
-
- run_test("glob_all", granter, verifier,
- [ { resource: { db: "", collection: "" }, actions: [ "find" ]} ],
- {
- "a.a" : should_find,
- "a.b" : should_find,
- "b.a" : should_find,
- "b.b" : should_find
- }
- );
-
- run_test("any_resource", granter, verifier,
- [ { resource: { anyResource: true }, actions: [ "find" ]} ],
- {
- "a.a" : should_find,
- "a.b" : should_find,
- "b.a" : should_find,
- "b.b" : should_find,
- "c.a" : should_find
- }
- );
-
- run_test("no_global_access", granter, verifier,
- [ { resource: { db: "$", collection: "cmd" }, actions: [ "find" ]} ],
- {
- "a.a" : function (testdb, testcol) {
- var r = testdb.stats();
-
- if (r["ok"]) throw ("db.$.cmd shouldn't give a.stats()");
- }
- }
- );
-
- run_test_bad_resource("empty_resource", granter, { });
- run_test_bad_resource("users_collection_any_db", granter, { collection: "users" });
- run_test_bad_resource("bad_key", granter, { myResource: "users" });
- run_test_bad_resource("extra_key", granter, { db: "test", collection: "users", cluster: true });
- run_test_bad_resource("bad_value_type", granter, { cluster: "false" });
- run_test_bad_resource("bad_collection", granter, { db: "test", collection: "$$$$" });
-
- run_test("mixed_find_write", granter, verifier,
- [
- { resource: { db: "a", collection: "a" }, actions: [ "find" ]},
- { resource: { db: "", collection: "" }, actions: [ "insert" ]}
- ],
- {
- "a.a" : function(testdb, testcol) { should_insert(testdb, testcol);
- should_find(testdb, testcol); },
- "a.b" : function(testdb, testcol) { should_insert(testdb, testcol);
- should_fail_find(testdb, testcol); },
- "b.a" : function(testdb, testcol) { should_insert(testdb, testcol);
- should_fail_find(testdb, testcol); },
- "b.b" : function(testdb, testcol) { should_insert(testdb, testcol);
- should_fail_find(testdb, testcol); },
- }
- );
+ run_test("specific",
+ granter,
+ verifier,
+ [{resource: {db: "a", collection: "a"}, actions: ["find"]}],
+ {
+ "a.a": should_find,
+ "a.b": should_fail_find,
+ "b.a": should_fail_find,
+ "b.b": should_fail_find
+ });
+
+ run_test("glob_collection",
+ granter,
+ verifier,
+ [{resource: {db: "a", collection: ""}, actions: ["find"]}],
+ {
+ "a.a": should_find,
+ "a.b": should_find,
+ "b.a": should_fail_find,
+ "b.b": should_fail_find
+ });
+
+ run_test("glob_database",
+ granter,
+ verifier,
+ [{resource: {db: "", collection: "a"}, actions: ["find"]}],
+ {
+ "a.a": should_find,
+ "a.b": should_fail_find,
+ "b.a": should_find,
+ "b.b": should_fail_find
+ });
+
+ run_test("glob_all",
+ granter,
+ verifier,
+ [{resource: {db: "", collection: ""}, actions: ["find"]}],
+ {"a.a": should_find, "a.b": should_find, "b.a": should_find, "b.b": should_find});
+
+ run_test("any_resource",
+ granter,
+ verifier,
+ [{resource: {anyResource: true}, actions: ["find"]}],
+ {
+ "a.a": should_find,
+ "a.b": should_find,
+ "b.a": should_find,
+ "b.b": should_find,
+ "c.a": should_find
+ });
+
+ run_test("no_global_access",
+ granter,
+ verifier,
+ [{resource: {db: "$", collection: "cmd"}, actions: ["find"]}],
+ {
+ "a.a": function(testdb, testcol) {
+ var r = testdb.stats();
+
+ if (r["ok"])
+ throw("db.$.cmd shouldn't give a.stats()");
+ }
+ });
+
+ run_test_bad_resource("empty_resource", granter, {});
+ run_test_bad_resource("users_collection_any_db", granter, {collection: "users"});
+ run_test_bad_resource("bad_key", granter, {myResource: "users"});
+ run_test_bad_resource("extra_key", granter, {db: "test", collection: "users", cluster: true});
+ run_test_bad_resource("bad_value_type", granter, {cluster: "false"});
+ run_test_bad_resource("bad_collection", granter, {db: "test", collection: "$$$$"});
+
+ run_test("mixed_find_write",
+ granter,
+ verifier,
+ [
+ {resource: {db: "a", collection: "a"}, actions: ["find"]},
+ {resource: {db: "", collection: ""}, actions: ["insert"]}
+ ],
+ {
+ "a.a": function(testdb, testcol) {
+ should_insert(testdb, testcol);
+ should_find(testdb, testcol);
+ },
+ "a.b": function(testdb, testcol) {
+ should_insert(testdb, testcol);
+ should_fail_find(testdb, testcol);
+ },
+ "b.a": function(testdb, testcol) {
+ should_insert(testdb, testcol);
+ should_fail_find(testdb, testcol);
+ },
+ "b.b": function(testdb, testcol) {
+ should_insert(testdb, testcol);
+ should_fail_find(testdb, testcol);
+ },
+ });
}
var keyfile = "jstests/libs/key1";
@@ -231,9 +240,9 @@ print('--- done standalone node test ---');
print('--- replica set test ---');
var rst = new ReplSetTest({
- name: 'testset',
- nodes: 2,
- nodeOptions: { 'auth': null, 'httpinterface': null, 'keyFile': keyfile }
+ name: 'testset',
+ nodes: 2,
+ nodeOptions: {'auth': null, 'httpinterface': null, 'keyFile': keyfile}
});
rst.startSet();
@@ -247,14 +256,14 @@ print('--- done with the rs tests ---');
print('--- sharding test ---');
var st = new ShardingTest({
- mongos: 2,
- shard: 1,
- keyFile: keyfile,
- other: {
- mongosOptions: { 'auth': null, 'httpinterface': null },
- configOptions: { 'auth': null, 'httpinterface': null },
- shardOptions: { 'auth': null, 'httpinterface': null }
- }
+ mongos: 2,
+ shard: 1,
+ keyFile: keyfile,
+ other: {
+ mongosOptions: {'auth': null, 'httpinterface': null},
+ configOptions: {'auth': null, 'httpinterface': null},
+ shardOptions: {'auth': null, 'httpinterface': null}
+ }
});
run_tests(st.s0.getDB('admin'), st.s1.getDB('admin'));
st.stop();
diff --git a/jstests/auth/role_management_commands.js b/jstests/auth/role_management_commands.js
index 54b7b91b8c8..9847818e3b1 100644
--- a/jstests/auth/role_management_commands.js
+++ b/jstests/auth/role_management_commands.js
@@ -15,11 +15,10 @@ function runTest(conn) {
var adminUserAdmin = userAdminConn.getDB('admin');
adminUserAdmin.createUser({user: 'userAdmin', pwd: 'pwd', roles: ['userAdminAnyDatabase']});
adminUserAdmin.auth('userAdmin', 'pwd');
- testUserAdmin.createUser({user: 'testUser', pwd: 'pwd', roles:[]});
+ testUserAdmin.createUser({user: 'testUser', pwd: 'pwd', roles: []});
var db = conn.getDB('test');
assert(db.auth('testUser', 'pwd'));
-
// At this point there are 3 databases handles in use. - "testUserAdmin" and "adminUserAdmin"
// are handles to the "test" and "admin" dbs respectively. They are on the same connection,
// which has been auth'd as a user with the 'userAdminAnyDatabase' role. This will be used
@@ -29,248 +28,289 @@ function runTest(conn) {
// the roles assigned to "testUser".
(function testCreateRole() {
- jsTestLog("Testing createRole");
-
- testUserAdmin.createRole({role: "testRole1", roles: ['read'], privileges: []});
- testUserAdmin.createRole({role: "testRole2",
- roles: [],
- privileges: [{resource: {db: 'test', collection: 'foo'},
- actions: ['insert']}]});
- testUserAdmin.createRole({role: "testRole3",
- roles: ['testRole1', {role: 'testRole2', db: 'test'}],
- privileges: []});
- testUserAdmin.createRole({role: "testRole4", roles: [], privileges: []});
- adminUserAdmin.createRole({role: "adminRole",
- roles: [],
- privileges: [{resource: {cluster: true},
- actions: ['connPoolSync']}]});
-
- testUserAdmin.updateUser('testUser', {roles: [{role: 'adminRole', db: 'admin'}]});
- assert.throws(function() {db.foo.findOne();});
- hasAuthzError(db.foo.insert({ a: 1 }));
- hasAuthzError(db.foo.update({}, { $inc: { a: 1 }}, false, true));
- assert.commandWorked(db.adminCommand('connPoolSync'));
-
- testUserAdmin.updateUser('testUser', {roles: ['testRole1']});
- assert.doesNotThrow(function() {db.foo.findOne();});
- assert.eq(0, db.foo.count());
- hasAuthzError(db.foo.insert({ a: 1 }));
- hasAuthzError(db.foo.update({}, { $inc: { a: 1 }}, false, true));
- assert.commandFailedWithCode(db.adminCommand('connPoolSync'), authzErrorCode);
-
- testUserAdmin.updateUser('testUser', {roles: ['testRole2']});
- assert.throws(function() {db.foo.findOne();});
- assert.writeOK(db.foo.insert({ a: 1 }));
- hasAuthzError(db.foo.update({}, { $inc: { a: 1 }}, false, true));
- assert.commandFailedWithCode(db.adminCommand('connPoolSync'), authzErrorCode);
-
- testUserAdmin.updateUser('testUser', {roles: ['testRole3']});
- assert.doesNotThrow(function() {db.foo.findOne();});
- assert.eq(1, db.foo.count());
- assert.writeOK(db.foo.insert({ a: 1 }));
- assert.eq(2, db.foo.count());
- hasAuthzError(db.foo.update({}, { $inc: { a: 1 }}, false, true));
- assert.eq(1, db.foo.findOne().a);
- assert.commandFailedWithCode(db.adminCommand('connPoolSync'), authzErrorCode);
-
- testUserAdmin.updateUser('testUser', {roles: [{role: 'testRole4', db: 'test'}]});
- assert.throws(function() {db.foo.findOne();});
- hasAuthzError(db.foo.insert({ a: 1 }));
- hasAuthzError(db.foo.update({}, { $inc: { a: 1 }}, false, true));
- assert.commandFailedWithCode(db.adminCommand('connPoolSync'), authzErrorCode);
- })();
+ jsTestLog("Testing createRole");
+
+ testUserAdmin.createRole({role: "testRole1", roles: ['read'], privileges: []});
+ testUserAdmin.createRole({
+ role: "testRole2",
+ roles: [],
+ privileges: [{resource: {db: 'test', collection: 'foo'}, actions: ['insert']}]
+ });
+ testUserAdmin.createRole({
+ role: "testRole3",
+ roles: ['testRole1', {role: 'testRole2', db: 'test'}],
+ privileges: []
+ });
+ testUserAdmin.createRole({role: "testRole4", roles: [], privileges: []});
+ adminUserAdmin.createRole({
+ role: "adminRole",
+ roles: [],
+ privileges: [{resource: {cluster: true}, actions: ['connPoolSync']}]
+ });
+
+ testUserAdmin.updateUser('testUser', {roles: [{role: 'adminRole', db: 'admin'}]});
+ assert.throws(function() {
+ db.foo.findOne();
+ });
+ hasAuthzError(db.foo.insert({a: 1}));
+ hasAuthzError(db.foo.update({}, {$inc: {a: 1}}, false, true));
+ assert.commandWorked(db.adminCommand('connPoolSync'));
+
+ testUserAdmin.updateUser('testUser', {roles: ['testRole1']});
+ assert.doesNotThrow(function() {
+ db.foo.findOne();
+ });
+ assert.eq(0, db.foo.count());
+ hasAuthzError(db.foo.insert({a: 1}));
+ hasAuthzError(db.foo.update({}, {$inc: {a: 1}}, false, true));
+ assert.commandFailedWithCode(db.adminCommand('connPoolSync'), authzErrorCode);
+
+ testUserAdmin.updateUser('testUser', {roles: ['testRole2']});
+ assert.throws(function() {
+ db.foo.findOne();
+ });
+ assert.writeOK(db.foo.insert({a: 1}));
+ hasAuthzError(db.foo.update({}, {$inc: {a: 1}}, false, true));
+ assert.commandFailedWithCode(db.adminCommand('connPoolSync'), authzErrorCode);
+
+ testUserAdmin.updateUser('testUser', {roles: ['testRole3']});
+ assert.doesNotThrow(function() {
+ db.foo.findOne();
+ });
+ assert.eq(1, db.foo.count());
+ assert.writeOK(db.foo.insert({a: 1}));
+ assert.eq(2, db.foo.count());
+ hasAuthzError(db.foo.update({}, {$inc: {a: 1}}, false, true));
+ assert.eq(1, db.foo.findOne().a);
+ assert.commandFailedWithCode(db.adminCommand('connPoolSync'), authzErrorCode);
+
+ testUserAdmin.updateUser('testUser', {roles: [{role: 'testRole4', db: 'test'}]});
+ assert.throws(function() {
+ db.foo.findOne();
+ });
+ hasAuthzError(db.foo.insert({a: 1}));
+ hasAuthzError(db.foo.update({}, {$inc: {a: 1}}, false, true));
+ assert.commandFailedWithCode(db.adminCommand('connPoolSync'), authzErrorCode);
+ })();
(function testUpdateRole() {
- jsTestLog("Testing updateRole");
-
- testUserAdmin.updateRole('testRole4',
- {roles: [{role: 'testRole2', db: 'test'}, "testRole2"]});
- assert.throws(function() {db.foo.findOne();});
- assert.writeOK(db.foo.insert({ a: 1 }));
- hasAuthzError(db.foo.update({}, { $inc: { a: 1 }}, false, true));
- assert.commandFailedWithCode(db.adminCommand('connPoolSync'), authzErrorCode);
-
- testUserAdmin.updateRole('testRole4',
- {privileges: [{resource: {db: 'test', collection: ''},
- actions: ['find']}]});
- assert.doesNotThrow(function() {db.foo.findOne();});
- assert.eq(3, db.foo.count());
- assert.writeOK(db.foo.insert({ a: 1 }));
- assert.eq(4, db.foo.count());
- hasAuthzError(db.foo.update({}, { $inc: { a: 1 }}, false, true));
- assert.eq(1, db.foo.findOne().a);
- assert.commandFailedWithCode(db.adminCommand('connPoolSync'), authzErrorCode);
-
- testUserAdmin.updateRole('testRole4', {roles: []});
- assert.doesNotThrow(function() {db.foo.findOne();});
- assert.eq(4, db.foo.count());
- hasAuthzError(db.foo.insert({ a: 1 }));
- assert.eq(4, db.foo.count());
- hasAuthzError(db.foo.update({}, { $inc: { a: 1 }}, false, true));
- assert.eq(1, db.foo.findOne().a);
- assert.commandFailedWithCode(db.adminCommand('connPoolSync'), authzErrorCode);
-
- testUserAdmin.updateUser('testUser', {roles: [{role: 'adminRole', db: 'admin'}]});
- adminUserAdmin.updateRole('adminRole', {roles: [{role: 'read', db: 'test'}]});
- assert.doesNotThrow(function() {db.foo.findOne();});
- assert.eq(4, db.foo.count());
- hasAuthzError(db.foo.insert({ a: 1 }));
- assert.eq(4, db.foo.count());
- hasAuthzError(db.foo.update({}, { $inc: { a: 1 }}, false, true));
- assert.eq(1, db.foo.findOne().a);
- assert.commandWorked(db.adminCommand('connPoolSync'));
- })();
+ jsTestLog("Testing updateRole");
+
+ testUserAdmin.updateRole('testRole4',
+ {roles: [{role: 'testRole2', db: 'test'}, "testRole2"]});
+ assert.throws(function() {
+ db.foo.findOne();
+ });
+ assert.writeOK(db.foo.insert({a: 1}));
+ hasAuthzError(db.foo.update({}, {$inc: {a: 1}}, false, true));
+ assert.commandFailedWithCode(db.adminCommand('connPoolSync'), authzErrorCode);
+
+ testUserAdmin.updateRole(
+ 'testRole4',
+ {privileges: [{resource: {db: 'test', collection: ''}, actions: ['find']}]});
+ assert.doesNotThrow(function() {
+ db.foo.findOne();
+ });
+ assert.eq(3, db.foo.count());
+ assert.writeOK(db.foo.insert({a: 1}));
+ assert.eq(4, db.foo.count());
+ hasAuthzError(db.foo.update({}, {$inc: {a: 1}}, false, true));
+ assert.eq(1, db.foo.findOne().a);
+ assert.commandFailedWithCode(db.adminCommand('connPoolSync'), authzErrorCode);
+
+ testUserAdmin.updateRole('testRole4', {roles: []});
+ assert.doesNotThrow(function() {
+ db.foo.findOne();
+ });
+ assert.eq(4, db.foo.count());
+ hasAuthzError(db.foo.insert({a: 1}));
+ assert.eq(4, db.foo.count());
+ hasAuthzError(db.foo.update({}, {$inc: {a: 1}}, false, true));
+ assert.eq(1, db.foo.findOne().a);
+ assert.commandFailedWithCode(db.adminCommand('connPoolSync'), authzErrorCode);
+
+ testUserAdmin.updateUser('testUser', {roles: [{role: 'adminRole', db: 'admin'}]});
+ adminUserAdmin.updateRole('adminRole', {roles: [{role: 'read', db: 'test'}]});
+ assert.doesNotThrow(function() {
+ db.foo.findOne();
+ });
+ assert.eq(4, db.foo.count());
+ hasAuthzError(db.foo.insert({a: 1}));
+ assert.eq(4, db.foo.count());
+ hasAuthzError(db.foo.update({}, {$inc: {a: 1}}, false, true));
+ assert.eq(1, db.foo.findOne().a);
+ assert.commandWorked(db.adminCommand('connPoolSync'));
+ })();
(function testGrantRolesToRole() {
- jsTestLog("Testing grantRolesToRole");
-
- assert.commandFailedWithCode(db.adminCommand('serverStatus'), authzErrorCode);
-
- adminUserAdmin.grantRolesToRole("adminRole",
- ['clusterMonitor',
- {role: 'read', db: 'test'},
- {role: 'testRole2', db: 'test'}]);
- assert.doesNotThrow(function() {db.foo.findOne();});
- assert.eq(4, db.foo.count());
- assert.writeOK(db.foo.insert({ a: 1 }));
- assert.eq(5, db.foo.count());
- hasAuthzError(db.foo.update({}, {$inc: {a:1}}, false, true));
- assert.eq(1, db.foo.findOne().a);
- assert.commandWorked(db.adminCommand('connPoolSync'));
- assert.commandWorked(db.adminCommand('serverStatus'));
- })();
+ jsTestLog("Testing grantRolesToRole");
+
+ assert.commandFailedWithCode(db.adminCommand('serverStatus'), authzErrorCode);
+
+ adminUserAdmin.grantRolesToRole(
+ "adminRole",
+ ['clusterMonitor', {role: 'read', db: 'test'}, {role: 'testRole2', db: 'test'}]);
+ assert.doesNotThrow(function() {
+ db.foo.findOne();
+ });
+ assert.eq(4, db.foo.count());
+ assert.writeOK(db.foo.insert({a: 1}));
+ assert.eq(5, db.foo.count());
+ hasAuthzError(db.foo.update({}, {$inc: {a: 1}}, false, true));
+ assert.eq(1, db.foo.findOne().a);
+ assert.commandWorked(db.adminCommand('connPoolSync'));
+ assert.commandWorked(db.adminCommand('serverStatus'));
+ })();
(function testRevokeRolesFromRole() {
- jsTestLog("Testing revokeRolesFromRole");
-
- adminUserAdmin.revokeRolesFromRole("adminRole",
- ['clusterMonitor',
- {role: 'read', db: 'test'},
- {role: 'testRole2', db: 'test'}]);
- assert.throws(function() {db.foo.findOne();});
- hasAuthzError(db.foo.insert({ a: 1 }));
- hasAuthzError(db.foo.update({}, { $inc: { a: 1 }}, false, true));
- assert.commandWorked(db.adminCommand('connPoolSync'));
- assert.commandFailedWithCode(db.adminCommand('serverStatus'), authzErrorCode);
- })();
+ jsTestLog("Testing revokeRolesFromRole");
+
+ adminUserAdmin.revokeRolesFromRole(
+ "adminRole",
+ ['clusterMonitor', {role: 'read', db: 'test'}, {role: 'testRole2', db: 'test'}]);
+ assert.throws(function() {
+ db.foo.findOne();
+ });
+ hasAuthzError(db.foo.insert({a: 1}));
+ hasAuthzError(db.foo.update({}, {$inc: {a: 1}}, false, true));
+ assert.commandWorked(db.adminCommand('connPoolSync'));
+ assert.commandFailedWithCode(db.adminCommand('serverStatus'), authzErrorCode);
+ })();
(function testGrantPrivilegesToRole() {
- jsTestLog("Testing grantPrivilegesToRole");
-
- adminUserAdmin.grantPrivilegesToRole('adminRole',
- [{resource: {cluster: true},
- actions: ['serverStatus']},
- {resource: {db:"", collection: ""},
- actions: ['find']}]);
- assert.doesNotThrow(function() {db.foo.findOne();});
- hasAuthzError(db.foo.insert({ a: 1 }));
- assert.eq(5, db.foo.count());
- hasAuthzError(db.foo.update({}, { $inc: { a: 1 }}, false, true));
- assert.eq(1, db.foo.findOne().a);
- assert.commandWorked(db.adminCommand('connPoolSync'));
- assert.commandWorked(db.adminCommand('serverStatus'));
-
- testUserAdmin.updateUser('testUser', {roles: ['testRole2']});
- testUserAdmin.grantPrivilegesToRole('testRole2',
- [{resource: {db: 'test', collection: ''},
- actions: ['insert', 'update']},
- {resource: {db: 'test', collection: 'foo'},
- actions: ['find']}]);
- assert.doesNotThrow(function() {db.foo.findOne();});
- assert.writeOK(db.foo.insert({ a: 1 }));
- assert.eq(6, db.foo.count());
- assert.writeOK(db.foo.update({}, { $inc: { a: 1 }}, false, true));
- assert.eq(2, db.foo.findOne().a);
- assert.commandFailedWithCode(db.adminCommand('connPoolSync'), authzErrorCode);
- assert.commandFailedWithCode(db.adminCommand('serverStatus'), authzErrorCode);
- })();
+ jsTestLog("Testing grantPrivilegesToRole");
+
+ adminUserAdmin.grantPrivilegesToRole(
+ 'adminRole',
+ [
+ {resource: {cluster: true}, actions: ['serverStatus']},
+ {resource: {db: "", collection: ""}, actions: ['find']}
+ ]);
+ assert.doesNotThrow(function() {
+ db.foo.findOne();
+ });
+ hasAuthzError(db.foo.insert({a: 1}));
+ assert.eq(5, db.foo.count());
+ hasAuthzError(db.foo.update({}, {$inc: {a: 1}}, false, true));
+ assert.eq(1, db.foo.findOne().a);
+ assert.commandWorked(db.adminCommand('connPoolSync'));
+ assert.commandWorked(db.adminCommand('serverStatus'));
+
+ testUserAdmin.updateUser('testUser', {roles: ['testRole2']});
+ testUserAdmin.grantPrivilegesToRole(
+ 'testRole2',
+ [
+ {resource: {db: 'test', collection: ''}, actions: ['insert', 'update']},
+ {resource: {db: 'test', collection: 'foo'}, actions: ['find']}
+ ]);
+ assert.doesNotThrow(function() {
+ db.foo.findOne();
+ });
+ assert.writeOK(db.foo.insert({a: 1}));
+ assert.eq(6, db.foo.count());
+ assert.writeOK(db.foo.update({}, {$inc: {a: 1}}, false, true));
+ assert.eq(2, db.foo.findOne().a);
+ assert.commandFailedWithCode(db.adminCommand('connPoolSync'), authzErrorCode);
+ assert.commandFailedWithCode(db.adminCommand('serverStatus'), authzErrorCode);
+ })();
(function testRevokePrivilegesFromRole() {
- jsTestLog("Testing revokePrivilegesFromRole");
-
- testUserAdmin.revokePrivilegesFromRole('testRole2',
- [{resource: {db: 'test', collection: ''},
- actions: ['insert', 'update', 'find']}]);
- assert.doesNotThrow(function() {db.foo.findOne();});
- assert.writeOK(db.foo.insert({ a: 1 }));
- assert.eq(7, db.foo.count());
- hasAuthzError(db.foo.update({}, { $inc: { a: 1 }}, false, true));
- assert.eq(2, db.foo.findOne().a);
- assert.commandFailedWithCode(db.adminCommand('connPoolSync'), authzErrorCode);
- assert.commandFailedWithCode(db.adminCommand('serverStatus'), authzErrorCode);
- })();
+ jsTestLog("Testing revokePrivilegesFromRole");
+
+ testUserAdmin.revokePrivilegesFromRole(
+ 'testRole2',
+ [{resource: {db: 'test', collection: ''}, actions: ['insert', 'update', 'find']}]);
+ assert.doesNotThrow(function() {
+ db.foo.findOne();
+ });
+ assert.writeOK(db.foo.insert({a: 1}));
+ assert.eq(7, db.foo.count());
+ hasAuthzError(db.foo.update({}, {$inc: {a: 1}}, false, true));
+ assert.eq(2, db.foo.findOne().a);
+ assert.commandFailedWithCode(db.adminCommand('connPoolSync'), authzErrorCode);
+ assert.commandFailedWithCode(db.adminCommand('serverStatus'), authzErrorCode);
+ })();
(function testRolesInfo() {
- jsTestLog("Testing rolesInfo");
-
- var res = testUserAdmin.runCommand({rolesInfo: 'testRole1'});
- assert.eq(1, res.roles.length);
- assert.eq("testRole1", res.roles[0].role);
- assert.eq("test", res.roles[0].db);
- assert.eq(1, res.roles[0].roles.length);
- assert.eq("read", res.roles[0].roles[0].role);
-
- res = testUserAdmin.runCommand({rolesInfo: {role: 'testRole1', db: 'test'}});
- assert.eq(1, res.roles.length);
- assert.eq("testRole1", res.roles[0].role);
- assert.eq("test", res.roles[0].db);
- assert.eq(1, res.roles[0].roles.length);
- assert.eq("read", res.roles[0].roles[0].role);
-
- res = testUserAdmin.runCommand({rolesInfo: ['testRole1', {role: 'adminRole', db: 'admin'}]});
- assert.eq(2, res.roles.length);
- assert.eq("testRole1", res.roles[0].role);
- assert.eq("test", res.roles[0].db);
- assert.eq(1, res.roles[0].roles.length);
- assert.eq("read", res.roles[0].roles[0].role);
- assert.eq("adminRole", res.roles[1].role);
- assert.eq("admin", res.roles[1].db);
- assert.eq(0, res.roles[1].roles.length);
-
- res = testUserAdmin.runCommand({rolesInfo: 1});
- assert.eq(4, res.roles.length);
-
- res = testUserAdmin.runCommand({rolesInfo: 1, showBuiltinRoles: 1});
- assert.eq(10, res.roles.length);
- })();
+ jsTestLog("Testing rolesInfo");
+
+ var res = testUserAdmin.runCommand({rolesInfo: 'testRole1'});
+ assert.eq(1, res.roles.length);
+ assert.eq("testRole1", res.roles[0].role);
+ assert.eq("test", res.roles[0].db);
+ assert.eq(1, res.roles[0].roles.length);
+ assert.eq("read", res.roles[0].roles[0].role);
+
+ res = testUserAdmin.runCommand({rolesInfo: {role: 'testRole1', db: 'test'}});
+ assert.eq(1, res.roles.length);
+ assert.eq("testRole1", res.roles[0].role);
+ assert.eq("test", res.roles[0].db);
+ assert.eq(1, res.roles[0].roles.length);
+ assert.eq("read", res.roles[0].roles[0].role);
+
+ res =
+ testUserAdmin.runCommand({rolesInfo: ['testRole1', {role: 'adminRole', db: 'admin'}]});
+ assert.eq(2, res.roles.length);
+ assert.eq("testRole1", res.roles[0].role);
+ assert.eq("test", res.roles[0].db);
+ assert.eq(1, res.roles[0].roles.length);
+ assert.eq("read", res.roles[0].roles[0].role);
+ assert.eq("adminRole", res.roles[1].role);
+ assert.eq("admin", res.roles[1].db);
+ assert.eq(0, res.roles[1].roles.length);
+
+ res = testUserAdmin.runCommand({rolesInfo: 1});
+ assert.eq(4, res.roles.length);
+
+ res = testUserAdmin.runCommand({rolesInfo: 1, showBuiltinRoles: 1});
+ assert.eq(10, res.roles.length);
+ })();
(function testDropRole() {
- jsTestLog("Testing dropRole");
+ jsTestLog("Testing dropRole");
- testUserAdmin.grantRolesToUser('testUser', ['testRole4']);
+ testUserAdmin.grantRolesToUser('testUser', ['testRole4']);
- assert.doesNotThrow(function() {db.foo.findOne();});
- assert.writeOK(db.foo.insert({ a: 1 }));
- assert.eq(8, db.foo.count());
+ assert.doesNotThrow(function() {
+ db.foo.findOne();
+ });
+ assert.writeOK(db.foo.insert({a: 1}));
+ assert.eq(8, db.foo.count());
- assert.commandWorked(testUserAdmin.runCommand({dropRole: 'testRole2'}));
+ assert.commandWorked(testUserAdmin.runCommand({dropRole: 'testRole2'}));
- assert.doesNotThrow(function() {db.foo.findOne();});
- hasAuthzError(db.foo.insert({ a: 1 }));
- assert.eq(8, db.foo.count());
+ assert.doesNotThrow(function() {
+ db.foo.findOne();
+ });
+ hasAuthzError(db.foo.insert({a: 1}));
+ assert.eq(8, db.foo.count());
- assert.eq(3, testUserAdmin.getRoles().length);
- })();
+ assert.eq(3, testUserAdmin.getRoles().length);
+ })();
(function testDropAllRolesFromDatabase() {
- jsTestLog("Testing dropAllRolesFromDatabase");
+ jsTestLog("Testing dropAllRolesFromDatabase");
- assert.doesNotThrow(function() {db.foo.findOne();});
- assert.eq(3, testUserAdmin.getRoles().length);
+ assert.doesNotThrow(function() {
+ db.foo.findOne();
+ });
+ assert.eq(3, testUserAdmin.getRoles().length);
- assert.commandWorked(testUserAdmin.runCommand({dropAllRolesFromDatabase: 1}));
+ assert.commandWorked(testUserAdmin.runCommand({dropAllRolesFromDatabase: 1}));
- assert.throws(function() {db.foo.findOne();});
- assert.eq(0, testUserAdmin.getRoles().length);
- })();
+ assert.throws(function() {
+ db.foo.findOne();
+ });
+ assert.eq(0, testUserAdmin.getRoles().length);
+ })();
}
jsTest.log('Test standalone');
-var conn = MongoRunner.runMongod({ auth: '', useHostname: false });
+var conn = MongoRunner.runMongod({auth: '', useHostname: false});
runTest(conn);
MongoRunner.stopMongod(conn.port);
jsTest.log('Test sharding');
-var st = new ShardingTest({ shards: 2, config: 3, keyFile: 'jstests/libs/key1', useHostname: false });
+var st = new ShardingTest({shards: 2, config: 3, keyFile: 'jstests/libs/key1', useHostname: false});
runTest(st.s);
st.stop();
diff --git a/jstests/auth/role_management_commands_edge_cases.js b/jstests/auth/role_management_commands_edge_cases.js
index 0415c9b20dc..122221ce234 100644
--- a/jstests/auth/role_management_commands_edge_cases.js
+++ b/jstests/auth/role_management_commands_edge_cases.js
@@ -10,376 +10,381 @@ function runTest(conn) {
admin.auth('userAdmin', 'pwd');
(function testCreateRole() {
- jsTestLog("Testing createRole");
-
- // Role with no privs
- db.createRole({role: "role1", roles: [], privileges: []});
-
- // Role with duplicate other roles
- db.createRole({role: "role2",
- roles: ['read', 'read', 'role1', 'role1'],
- privileges: []});
- assert.eq(2, db.getRole("role2").roles.length);
-
- // Role with duplicate privileges
- db.createRole({role: "role3",
- roles: ['role2'],
- privileges: [{resource: {db: 'test', collection: ''},
- actions:['find']},
- {resource: {db: 'test', collection: ''},
- actions:['find']}]});
- assert.eq(1, db.getRole("role3", {showPrivileges: true}).privileges.length);
-
- // Try to create role that already exists.
- assert.throws(function() {
- db.createRole({role: 'role2', roles:[], privileges:[]});
- });
-
-
- // Try to create role with no name
- assert.throws(function() {
- db.createRole({role: '', roles:[], privileges:[]});
- });
-
- // Try to create a role the wrong types for the arguments
- assert.throws(function() {
- db.createRole({role: 1, roles:[], privileges:[]});
- });
- assert.throws(function() {
- db.createRole({role: ["role4", "role5"],
- roles:[],
- privileges:[]});
- });
- assert.throws(function() {
- db.createRole({role: 'role6', roles:1, privileges:[]});
- });
- assert.throws(function() {
- db.createRole({role: 'role7', roles:[], privileges:1});
- });
-
- // Try to create a role with an invalid privilege
- assert.throws(function() {
- db.createRole({role: 'role8',
- roles:[],
- privileges:[{resource: {},
- actions:['find']}]});
- });
- assert.throws(function() {
- db.createRole({role: 'role9',
- roles:[],
- privileges:[{resource: {db:"test", collection:"foo"},
- actions:[]}]});
- });
- assert.throws(function() {
- db.createRole({role: 'role10',
- roles:[],
- privileges:[{resource: {db:"test"},
- actions:['find']}]});
- });
- assert.throws(function() {
- db.createRole({role: 'role11',
- roles:[],
- privileges:[{resource: {collection:"foo"},
- actions:['find']}]});
- });
- assert.throws(function() {
- db.createRole({role: 'role12',
- roles:[],
- privileges:[{resource: {anyResource: false},
- actions:['find']}]});
- });
- assert.throws(function() {
- db.createRole({role: 'role13',
- roles:[],
- privileges:[{resource: {db:"test",
- collection:"foo",
- cluster:true},
- actions:['find']}]});
- });
- assert.throws(function() {
- db.createRole({role: 'role14',
- roles:[],
- privileges:[{resource: {cluster: false},
- actions:['find']}]});
- });
- assert.throws(function() {
- db.createRole({role: 'role15',
- roles:[],
- privileges:[{resource: {db:"test", collection:"$cmd"},
- actions:['find']}]});
- });
- assert.throws(function() {
- db.createRole({role: 'role16',
- roles:[],
- privileges:[{resource: {db:"test", collection:"foo"},
- actions:['fakeAction']}]});
- });
-
- // Try to create role containing itself in its roles array
- assert.throws(function() {
- db.createRole({role: 'role17',
- roles:['role10'],
- privileges:[]});
- });
-
-
- assert.eq(3, db.getRoles().length);
- })();
+ jsTestLog("Testing createRole");
+
+ // Role with no privs
+ db.createRole({role: "role1", roles: [], privileges: []});
+
+ // Role with duplicate other roles
+ db.createRole({role: "role2", roles: ['read', 'read', 'role1', 'role1'], privileges: []});
+ assert.eq(2, db.getRole("role2").roles.length);
+
+ // Role with duplicate privileges
+ db.createRole({
+ role: "role3",
+ roles: ['role2'],
+ privileges: [
+ {resource: {db: 'test', collection: ''}, actions: ['find']},
+ {resource: {db: 'test', collection: ''}, actions: ['find']}
+ ]
+ });
+ assert.eq(1, db.getRole("role3", {showPrivileges: true}).privileges.length);
+
+ // Try to create role that already exists.
+ assert.throws(function() {
+ db.createRole({role: 'role2', roles: [], privileges: []});
+ });
+
+ // Try to create role with no name
+ assert.throws(function() {
+ db.createRole({role: '', roles: [], privileges: []});
+ });
+
+ // Try to create a role the wrong types for the arguments
+ assert.throws(function() {
+ db.createRole({role: 1, roles: [], privileges: []});
+ });
+ assert.throws(function() {
+ db.createRole({role: ["role4", "role5"], roles: [], privileges: []});
+ });
+ assert.throws(function() {
+ db.createRole({role: 'role6', roles: 1, privileges: []});
+ });
+ assert.throws(function() {
+ db.createRole({role: 'role7', roles: [], privileges: 1});
+ });
+
+ // Try to create a role with an invalid privilege
+ assert.throws(function() {
+ db.createRole(
+ {role: 'role8', roles: [], privileges: [{resource: {}, actions: ['find']}]});
+ });
+ assert.throws(function() {
+ db.createRole({
+ role: 'role9',
+ roles: [],
+ privileges: [{resource: {db: "test", collection: "foo"}, actions: []}]
+ });
+ });
+ assert.throws(function() {
+ db.createRole({
+ role: 'role10',
+ roles: [],
+ privileges: [{resource: {db: "test"}, actions: ['find']}]
+ });
+ });
+ assert.throws(function() {
+ db.createRole({
+ role: 'role11',
+ roles: [],
+ privileges: [{resource: {collection: "foo"}, actions: ['find']}]
+ });
+ });
+ assert.throws(function() {
+ db.createRole({
+ role: 'role12',
+ roles: [],
+ privileges: [{resource: {anyResource: false}, actions: ['find']}]
+ });
+ });
+ assert.throws(function() {
+ db.createRole({
+ role: 'role13',
+ roles: [],
+ privileges: [{
+ resource: {db: "test", collection: "foo", cluster: true},
+ actions: ['find']
+ }]
+ });
+ });
+ assert.throws(function() {
+ db.createRole({
+ role: 'role14',
+ roles: [],
+ privileges: [{resource: {cluster: false}, actions: ['find']}]
+ });
+ });
+ assert.throws(function() {
+ db.createRole({
+ role: 'role15',
+ roles: [],
+ privileges: [{resource: {db: "test", collection: "$cmd"}, actions: ['find']}]
+ });
+ });
+ assert.throws(function() {
+ db.createRole({
+ role: 'role16',
+ roles: [],
+ privileges:
+ [{resource: {db: "test", collection: "foo"}, actions: ['fakeAction']}]
+ });
+ });
+
+ // Try to create role containing itself in its roles array
+ assert.throws(function() {
+ db.createRole({role: 'role17', roles: ['role10'], privileges: []});
+ });
+
+ assert.eq(3, db.getRoles().length);
+ })();
(function testUpdateRole() {
- jsTestLog("Testing updateRole");
-
- // Try to update role that doesn't exist
- assert.throws(function() {
- db.updateRole("fakeRole", {roles: []});
- });
-
- // Try to update role to have a role that doesn't exist
- assert.throws(function() {
- db.updateRole("role1", {roles: ['fakeRole']});
- });
-
- // Try to update a built-in role
- assert.throws(function() {
- db.updateRole("read", {roles: ['readWrite']});
- });
-
- // Try to create a cycle in the role graph
- assert.throws(function() {
- db.updateRole("role1", {roles: ['role1']});
- });
- assert.eq(0, db.getRole('role1').roles.length);
-
- assert.throws(function() {
- db.updateRole("role1", {roles: ['role2']});
- });
- assert.eq(0, db.getRole('role1').roles.length);
-
- assert.throws(function() {
- db.updateRole("role1", {roles: ['role3']});
- });
- assert.eq(0, db.getRole('role1').roles.length);
- })();
+ jsTestLog("Testing updateRole");
+
+ // Try to update role that doesn't exist
+ assert.throws(function() {
+ db.updateRole("fakeRole", {roles: []});
+ });
+
+ // Try to update role to have a role that doesn't exist
+ assert.throws(function() {
+ db.updateRole("role1", {roles: ['fakeRole']});
+ });
+
+ // Try to update a built-in role
+ assert.throws(function() {
+ db.updateRole("read", {roles: ['readWrite']});
+ });
+
+ // Try to create a cycle in the role graph
+ assert.throws(function() {
+ db.updateRole("role1", {roles: ['role1']});
+ });
+ assert.eq(0, db.getRole('role1').roles.length);
+
+ assert.throws(function() {
+ db.updateRole("role1", {roles: ['role2']});
+ });
+ assert.eq(0, db.getRole('role1').roles.length);
+
+ assert.throws(function() {
+ db.updateRole("role1", {roles: ['role3']});
+ });
+ assert.eq(0, db.getRole('role1').roles.length);
+ })();
(function testGrantRolesToRole() {
- jsTestLog("Testing grantRolesToRole");
-
- // Grant role1 to role2 even though role2 already has role1
- db.grantRolesToRole("role2", ['role1']);
- assert.eq(2, db.getRole('role2').roles.length);
-
- // Try to grant a role that doesn't exist
- assert.throws(function() {
- db.grantRolesToRole("role1", ['fakeRole']);
- });
-
- // Try to grant *to* a role that doesn't exist
- assert.throws(function() {
- db.grantRolesToRole("fakeRole", ['role1']);
- });
-
- // Must specify at least 1 role
- assert.throws(function() {
- db.grantRolesToRole("role1", []);
- });
-
- // Try to grant to a built-in role
- assert.throws(function() {
- db.grantRolesToRole("read", ['role1']);
- });
-
- // Try to create a cycle in the role graph
- assert.throws(function() {
- db.grantRolesToRole("role1", ['role1']);
- });
- assert.eq(0, db.getRole('role1').roles.length);
-
- assert.throws(function() {
- db.grantRolesToRole("role1", ['role2']);
- });
- assert.eq(0, db.getRole('role1').roles.length);
-
- assert.throws(function() {
- db.grantRolesToRole("role1", ['role3']);
- });
- assert.eq(0, db.getRole('role1').roles.length);
- })();
+ jsTestLog("Testing grantRolesToRole");
+
+ // Grant role1 to role2 even though role2 already has role1
+ db.grantRolesToRole("role2", ['role1']);
+ assert.eq(2, db.getRole('role2').roles.length);
+
+ // Try to grant a role that doesn't exist
+ assert.throws(function() {
+ db.grantRolesToRole("role1", ['fakeRole']);
+ });
+
+ // Try to grant *to* a role that doesn't exist
+ assert.throws(function() {
+ db.grantRolesToRole("fakeRole", ['role1']);
+ });
+
+ // Must specify at least 1 role
+ assert.throws(function() {
+ db.grantRolesToRole("role1", []);
+ });
+
+ // Try to grant to a built-in role
+ assert.throws(function() {
+ db.grantRolesToRole("read", ['role1']);
+ });
+
+ // Try to create a cycle in the role graph
+ assert.throws(function() {
+ db.grantRolesToRole("role1", ['role1']);
+ });
+ assert.eq(0, db.getRole('role1').roles.length);
+
+ assert.throws(function() {
+ db.grantRolesToRole("role1", ['role2']);
+ });
+ assert.eq(0, db.getRole('role1').roles.length);
+
+ assert.throws(function() {
+ db.grantRolesToRole("role1", ['role3']);
+ });
+ assert.eq(0, db.getRole('role1').roles.length);
+ })();
(function testRevokeRolesFromRole() {
- jsTestLog("Testing revokeRolesFromRole");
+ jsTestLog("Testing revokeRolesFromRole");
- // Try to revoke a role that doesn't exist
- // Should not error but should do nothing.
- assert.doesNotThrow(function() {
- db.revokeRolesFromRole("role2", ['fakeRole']);
- });
+ // Try to revoke a role that doesn't exist
+ // Should not error but should do nothing.
+ assert.doesNotThrow(function() {
+ db.revokeRolesFromRole("role2", ['fakeRole']);
+ });
- // Try to revoke role3 from role2 even though role2 does not contain role3.
- // Should not error but should do nothing.
- assert.doesNotThrow(function() {
- db.revokeRolesFromRole("role2", ['role3']);
- });
- assert.eq(2, db.getRole("role2").roles.length);
+ // Try to revoke role3 from role2 even though role2 does not contain role3.
+ // Should not error but should do nothing.
+ assert.doesNotThrow(function() {
+ db.revokeRolesFromRole("role2", ['role3']);
+ });
+ assert.eq(2, db.getRole("role2").roles.length);
- // Must revoke something
- assert.throws(function() {
- db.revokeRolesFromRole("role2", []);
- });
+ // Must revoke something
+ assert.throws(function() {
+ db.revokeRolesFromRole("role2", []);
+ });
- // Try to remove from built-in role
- assert.throws(function() {
- db.revokeRolesFromRole("readWrite", ['read']);
- });
+ // Try to remove from built-in role
+ assert.throws(function() {
+ db.revokeRolesFromRole("readWrite", ['read']);
+ });
- })();
+ })();
(function testGrantPrivilegesToRole() {
- jsTestLog("Testing grantPrivilegesToRole");
-
- // Must grant something
- assert.throws(function() {
- db.grantPrivilegesToRole("role1", []);
- });
-
- var basicPriv = {resource: {db: 'test', collection:""}, actions: ['find']};
-
- // Invalid first argument
- assert.throws(function() {
- db.grantPrivilegesToRole(["role1", "role2"], [basicPriv]);
- });
-
- // Try to grant to role that doesn't exist
- assert.throws(function() {
- db.grantPrivilegesToRole("fakeRole", [basicPriv]);
- });
-
- // Test with invalid privileges
- var badPrivs = [];
- badPrivs.push("find");
- badPrivs.push({resource: {db: 'test', collection:""}, actions: ['fakeAction']});
- badPrivs.push({resource: {db: ['test'], collection:""}, actions: ['find']});
- badPrivs.push({resource: {db: 'test', collection:""}});
- badPrivs.push({actions: ['find']});
- badPrivs.push({resource: {db: 'test', collection:""}, actions: []});
- badPrivs.push({resource: {db: 'test'}, actions: ['find']});
- badPrivs.push({resource: {collection: 'test'}, actions: ['find']});
- badPrivs.push({resource: {}, actions: ['find']});
- badPrivs.push({resource: {db: 'test', collection:"", cluster:true }, actions: ['find']});
-
- for (var i = 0; i < badPrivs.length; i++) {
- assert.throws(function() {
- db.grantPrivilegesToRole("role1", [badPrivs[i]]);
- });
- }
-
- assert.eq(0, db.getRole('role1', {showPrivileges: true}).privileges.length);
- })();
+ jsTestLog("Testing grantPrivilegesToRole");
+
+ // Must grant something
+ assert.throws(function() {
+ db.grantPrivilegesToRole("role1", []);
+ });
+
+ var basicPriv = {
+ resource: {db: 'test', collection: ""},
+ actions: ['find']
+ };
+
+ // Invalid first argument
+ assert.throws(function() {
+ db.grantPrivilegesToRole(["role1", "role2"], [basicPriv]);
+ });
+
+ // Try to grant to role that doesn't exist
+ assert.throws(function() {
+ db.grantPrivilegesToRole("fakeRole", [basicPriv]);
+ });
+
+ // Test with invalid privileges
+ var badPrivs = [];
+ badPrivs.push("find");
+ badPrivs.push({resource: {db: 'test', collection: ""}, actions: ['fakeAction']});
+ badPrivs.push({resource: {db: ['test'], collection: ""}, actions: ['find']});
+ badPrivs.push({resource: {db: 'test', collection: ""}});
+ badPrivs.push({actions: ['find']});
+ badPrivs.push({resource: {db: 'test', collection: ""}, actions: []});
+ badPrivs.push({resource: {db: 'test'}, actions: ['find']});
+ badPrivs.push({resource: {collection: 'test'}, actions: ['find']});
+ badPrivs.push({resource: {}, actions: ['find']});
+ badPrivs.push({resource: {db: 'test', collection: "", cluster: true}, actions: ['find']});
+
+ for (var i = 0; i < badPrivs.length; i++) {
+ assert.throws(function() {
+ db.grantPrivilegesToRole("role1", [badPrivs[i]]);
+ });
+ }
+
+ assert.eq(0, db.getRole('role1', {showPrivileges: true}).privileges.length);
+ })();
(function testRevokePrivilegesFromRole() {
- jsTestLog("Testing revokePrivilegesFromRole");
-
- // Try to revoke a privilege the role doesn't have
- // Should not error but should do nothing.
- assert.doesNotThrow(function() {
- db.revokePrivilegesFromRole("role3",
- [{resource: {db: "test",
- collection: "foobar"},
- actions: ["insert"]}]);
- });
- assert.eq(0, db.getRole("role2", {showPrivileges: true}).privileges.length);
-
- // Must revoke something
- assert.throws(function() {
- db.revokePrivilegesFromRole("role3", []);
- });
-
- // Try to remove from built-in role
- assert.throws(function() {
- db.revokePrivilegesFromRole("readWrite", [{resource: {db: 'test',
- collection: ''},
- actions: ['find']}]);
- });
-
- var basicPriv = {resource: {db: 'test', collection:""}, actions: ['find']};
-
- // Invalid first argument
- assert.throws(function() {
- db.revokePrivilegesFromRole(["role3", "role2"], [basicPriv]);
- });
-
- // Try to revoke from role that doesn't exist
- assert.throws(function() {
- db.revokePrivilegesToRole("fakeRole", [basicPriv]);
- });
-
- // Test with invalid privileges
- var badPrivs = [];
- badPrivs.push("find");
- badPrivs.push({resource: {db: 'test', collection:""}, actions: ['fakeAction']});
- badPrivs.push({resource: {db: ['test'], collection:""}, actions: ['find']});
- badPrivs.push({resource: {db: 'test', collection:""}});
- badPrivs.push({actions: ['find']});
- badPrivs.push({resource: {db: 'test', collection:""}, actions: []});
- badPrivs.push({resource: {db: 'test'}, actions: ['find']});
- badPrivs.push({resource: {collection: 'test'}, actions: ['find']});
- badPrivs.push({resource: {}, actions: ['find']});
- badPrivs.push({resource: {db: 'test', collection:"", cluster:true }, actions: ['find']});
-
- for (var i = 0; i < badPrivs.length; i++) {
- assert.throws(function() {
- db.revokePrivilegesFromRole("role3", [badPrivs[i]]);
- });
- }
-
- assert.eq(1, db.getRole('role3', {showPrivileges: true}).privileges.length);
- })();
+ jsTestLog("Testing revokePrivilegesFromRole");
+
+ // Try to revoke a privilege the role doesn't have
+ // Should not error but should do nothing.
+ assert.doesNotThrow(function() {
+ db.revokePrivilegesFromRole(
+ "role3", [{resource: {db: "test", collection: "foobar"}, actions: ["insert"]}]);
+ });
+ assert.eq(0, db.getRole("role2", {showPrivileges: true}).privileges.length);
+
+ // Must revoke something
+ assert.throws(function() {
+ db.revokePrivilegesFromRole("role3", []);
+ });
+
+ // Try to remove from built-in role
+ assert.throws(function() {
+ db.revokePrivilegesFromRole(
+ "readWrite", [{resource: {db: 'test', collection: ''}, actions: ['find']}]);
+ });
+
+ var basicPriv = {
+ resource: {db: 'test', collection: ""},
+ actions: ['find']
+ };
+
+ // Invalid first argument
+ assert.throws(function() {
+ db.revokePrivilegesFromRole(["role3", "role2"], [basicPriv]);
+ });
+
+ // Try to revoke from role that doesn't exist
+ assert.throws(function() {
+ db.revokePrivilegesToRole("fakeRole", [basicPriv]);
+ });
+
+ // Test with invalid privileges
+ var badPrivs = [];
+ badPrivs.push("find");
+ badPrivs.push({resource: {db: 'test', collection: ""}, actions: ['fakeAction']});
+ badPrivs.push({resource: {db: ['test'], collection: ""}, actions: ['find']});
+ badPrivs.push({resource: {db: 'test', collection: ""}});
+ badPrivs.push({actions: ['find']});
+ badPrivs.push({resource: {db: 'test', collection: ""}, actions: []});
+ badPrivs.push({resource: {db: 'test'}, actions: ['find']});
+ badPrivs.push({resource: {collection: 'test'}, actions: ['find']});
+ badPrivs.push({resource: {}, actions: ['find']});
+ badPrivs.push({resource: {db: 'test', collection: "", cluster: true}, actions: ['find']});
+
+ for (var i = 0; i < badPrivs.length; i++) {
+ assert.throws(function() {
+ db.revokePrivilegesFromRole("role3", [badPrivs[i]]);
+ });
+ }
+
+ assert.eq(1, db.getRole('role3', {showPrivileges: true}).privileges.length);
+ })();
(function testRolesInfo() {
- jsTestLog("Testing rolesInfo");
+ jsTestLog("Testing rolesInfo");
- // Try to get role that does not exist
- assert.eq(null, db.getRole('fakeRole'));
+ // Try to get role that does not exist
+ assert.eq(null, db.getRole('fakeRole'));
- // Pass wrong type for role name
- assert.throws(function() {
- db.getRole(5);
- });
+ // Pass wrong type for role name
+ assert.throws(function() {
+ db.getRole(5);
+ });
- assert.throws(function() {
- db.getRole([]);
- });
+ assert.throws(function() {
+ db.getRole([]);
+ });
- assert.throws(function() {
- db.getRole(['role1', 'role2']);
- });
- })();
+ assert.throws(function() {
+ db.getRole(['role1', 'role2']);
+ });
+ })();
(function testDropRole() {
- jsTestLog("Testing dropRole");
+ jsTestLog("Testing dropRole");
- // Try to drop a role that doesn't exist
- // Should not error but should do nothing
- assert.doesNotThrow(function() {
- db.dropRole('fakeRole');
- });
+ // Try to drop a role that doesn't exist
+ // Should not error but should do nothing
+ assert.doesNotThrow(function() {
+ db.dropRole('fakeRole');
+ });
- // Try to drop a built-in role
- assert.throws(function() {
- db.dropRole('read');
- });
+ // Try to drop a built-in role
+ assert.throws(function() {
+ db.dropRole('read');
+ });
- assert.eq(3, db.getRoles().length);
- })();
+ assert.eq(3, db.getRoles().length);
+ })();
// dropAllRolesFromDatabase ignores its arguments, so there's nothing to test for it.
}
jsTest.log('Test standalone');
-var conn = MongoRunner.runMongod({ auth: '' });
+var conn = MongoRunner.runMongod({auth: ''});
runTest(conn);
MongoRunner.stopMongod(conn.port);
jsTest.log('Test sharding');
-var st = new ShardingTest({ shards: 2, config: 3, keyFile: 'jstests/libs/key1' });
+var st = new ShardingTest({shards: 2, config: 3, keyFile: 'jstests/libs/key1'});
runTest(st.s);
st.stop();
diff --git a/jstests/auth/secondary_invalidation.js b/jstests/auth/secondary_invalidation.js
index 4809c3c30fc..3752a3aa678 100644
--- a/jstests/auth/secondary_invalidation.js
+++ b/jstests/auth/secondary_invalidation.js
@@ -1,11 +1,11 @@
/**
- * Test that user modifications on replica set primaries
+ * Test that user modifications on replica set primaries
* will invalidate cached user credentials on secondaries
*/
var NUM_NODES = 3;
-var rsTest = new ReplSetTest({ nodes: NUM_NODES });
-rsTest.startSet({ oplogSize: 10, keyFile: 'jstests/libs/key1' });
+var rsTest = new ReplSetTest({nodes: NUM_NODES});
+rsTest.startSet({oplogSize: 10, keyFile: 'jstests/libs/key1'});
rsTest.initiate();
rsTest.awaitSecondaryNodes();
@@ -14,17 +14,20 @@ var secondary = rsTest.getSecondary();
var admin = primary.getDB('admin');
// Setup initial data
-admin.createUser({ user:'admin', pwd: 'password', roles: jsTest.adminUserRoles });
+admin.createUser({user: 'admin', pwd: 'password', roles: jsTest.adminUserRoles});
admin.auth('admin', 'password');
-primary.getDB('foo').createUser({ user: 'foo', pwd: 'foopwd', roles: [] },
- { w: NUM_NODES });
+primary.getDB('foo').createUser({user: 'foo', pwd: 'foopwd', roles: []}, {w: NUM_NODES});
secondaryFoo = secondary.getDB('foo');
secondaryFoo.auth('foo', 'foopwd');
-assert.throws(function (){secondaryFoo.col.findOne();}, [], "Secondary read worked without permissions");
+assert.throws(function() {
+ secondaryFoo.col.findOne();
+}, [], "Secondary read worked without permissions");
-primary.getDB('foo').updateUser('foo', {roles: jsTest.basicUserRoles}, { w: NUM_NODES });
-assert.doesNotThrow(function (){secondaryFoo.col.findOne();}, [], "Secondary read did not work with permissions");
+primary.getDB('foo').updateUser('foo', {roles: jsTest.basicUserRoles}, {w: NUM_NODES});
+assert.doesNotThrow(function() {
+ secondaryFoo.col.findOne();
+}, [], "Secondary read did not work with permissions");
rsTest.stopSet();
diff --git a/jstests/auth/server-4892.js b/jstests/auth/server-4892.js
index 16e4b8b6f64..ef0c95c868e 100644
--- a/jstests/auth/server-4892.js
+++ b/jstests/auth/server-4892.js
@@ -26,9 +26,9 @@ function withMongod(extraMongodArgs, operation) {
var mongod = MongoRunner.runMongod(Object.merge(mongodCommonArgs, extraMongodArgs));
try {
- operation( mongod );
+ operation(mongod);
} finally {
- MongoRunner.stopMongod( mongod.port );
+ MongoRunner.stopMongod(mongod.port);
}
}
@@ -37,50 +37,53 @@ function withMongod(extraMongodArgs, operation) {
* cursors on the server.
*/
function expectNumLiveCursors(mongod, expectedNumLiveCursors) {
- var conn = new Mongo( mongod.host );
- var db = mongod.getDB( 'admin' );
- db.auth( 'admin', 'admin' );
+ var conn = new Mongo(mongod.host);
+ var db = mongod.getDB('admin');
+ db.auth('admin', 'admin');
var actualNumLiveCursors = db.serverStatus().metrics.cursor.open.total;
- assert( actualNumLiveCursors == expectedNumLiveCursors,
- "actual num live cursors (" + actualNumLiveCursors + ") != exptected ("
- + expectedNumLiveCursors + ")");
+ assert(actualNumLiveCursors == expectedNumLiveCursors,
+ "actual num live cursors (" + actualNumLiveCursors + ") != exptected (" +
+ expectedNumLiveCursors + ")");
}
-withMongod({noauth: ""}, function setupTest(mongod) {
- var admin, somedb, conn;
- conn = new Mongo( mongod.host );
- admin = conn.getDB( 'admin' );
- somedb = conn.getDB( 'somedb' );
- admin.createUser({user: 'admin', pwd: 'admin', roles: jsTest.adminUserRoles});
- admin.auth('admin', 'admin');
- somedb.createUser({user: 'frim', pwd: 'fram', roles: jsTest.basicUserRoles});
- somedb.data.drop();
- for (var i = 0; i < 10; ++i) {
- assert.writeOK(somedb.data.insert( { val: i } ));
- }
- admin.logout();
-} );
-
-withMongod({auth: ""}, function runTest(mongod) {
- var conn = new Mongo( mongod.host );
- var somedb = conn.getDB( 'somedb' );
- somedb.auth('frim', 'fram');
+withMongod({noauth: ""},
+ function setupTest(mongod) {
+ var admin, somedb, conn;
+ conn = new Mongo(mongod.host);
+ admin = conn.getDB('admin');
+ somedb = conn.getDB('somedb');
+ admin.createUser({user: 'admin', pwd: 'admin', roles: jsTest.adminUserRoles});
+ admin.auth('admin', 'admin');
+ somedb.createUser({user: 'frim', pwd: 'fram', roles: jsTest.basicUserRoles});
+ somedb.data.drop();
+ for (var i = 0; i < 10; ++i) {
+ assert.writeOK(somedb.data.insert({val: i}));
+ }
+ admin.logout();
+ });
- expectNumLiveCursors( mongod, 0 );
+withMongod({auth: ""},
+ function runTest(mongod) {
+ var conn = new Mongo(mongod.host);
+ var somedb = conn.getDB('somedb');
+ somedb.auth('frim', 'fram');
- var cursor = somedb.data.find({}, {'_id': 1}).batchSize(1);
- cursor.next();
- expectNumLiveCursors( mongod, 1 );
+ expectNumLiveCursors(mongod, 0);
- cursor = null;
- // NOTE(schwerin): We assume that after setting cursor = null, there are no remaining references
- // to the cursor, and that gc() will deterministically garbage collect it.
- gc();
+ var cursor = somedb.data.find({}, {'_id': 1}).batchSize(1);
+ cursor.next();
+ expectNumLiveCursors(mongod, 1);
- // NOTE(schwerin): dbKillCursors gets piggybacked on subsequent messages on the connection, so we
- // have to force a message to the server.
- somedb.data.findOne();
+ cursor = null;
+ // NOTE(schwerin): We assume that after setting cursor = null, there are no remaining
+ // references
+ // to the cursor, and that gc() will deterministically garbage collect it.
+ gc();
- expectNumLiveCursors( mongod, 0 );
-});
+ // NOTE(schwerin): dbKillCursors gets piggybacked on subsequent messages on the
+ // connection, so we
+ // have to force a message to the server.
+ somedb.data.findOne();
+ expectNumLiveCursors(mongod, 0);
+ });
diff --git a/jstests/auth/show_log_auth.js b/jstests/auth/show_log_auth.js
index be7d17cd7bc..b318e0536ad 100644
--- a/jstests/auth/show_log_auth.js
+++ b/jstests/auth/show_log_auth.js
@@ -3,21 +3,23 @@
var baseName = "jstests_show_log_auth";
var m = MongoRunner.runMongod({auth: "", bind_ip: "127.0.0.1", nojournal: "", smallfiles: ""});
-var db = m.getDB( "admin" );
+var db = m.getDB("admin");
-db.createUser({user: "admin" , pwd: "pass", roles: jsTest.adminUserRoles});
+db.createUser({user: "admin", pwd: "pass", roles: jsTest.adminUserRoles});
function assertStartsWith(s, prefix) {
assert.eq(s.substr(0, prefix.length), prefix);
}
-assertStartsWith( print.captureAllOutput( function () {
+assertStartsWith(print.captureAllOutput(function() {
shellHelper.show('logs');
-} ).output[0], 'Error while trying to show logs');
+}).output[0],
+ 'Error while trying to show logs');
-assertStartsWith( print.captureAllOutput( function () {
+assertStartsWith(print.captureAllOutput(function() {
shellHelper.show('log ' + baseName);
-} ).output[0], 'Error while trying to show ' + baseName + ' log');
+}).output[0],
+ 'Error while trying to show ' + baseName + ' log');
-db.auth( "admin" , "pass" );
+db.auth("admin", "pass");
db.shutdownServer();
diff --git a/jstests/auth/system_user_privileges.js b/jstests/auth/system_user_privileges.js
index 1a1c1151be8..2e49a9d5d9a 100644
--- a/jstests/auth/system_user_privileges.js
+++ b/jstests/auth/system_user_privileges.js
@@ -17,16 +17,17 @@
// Runs the "count" command on a database in a way that returns the result document, for easier
// inspection of the errmsg.
function runCountCommand(conn, dbName, collectionName) {
- return conn.getDB(dbName).runCommand({ count: collectionName });
+ return conn.getDB(dbName).runCommand({count: collectionName});
}
// Asserts that on the given "conn", "dbName"."collectionName".count() fails as unauthorized.
function assertCountUnauthorized(conn, dbName, collectionName) {
- assert.eq(runCountCommand(conn, dbName, collectionName).code, 13,
+ assert.eq(runCountCommand(conn, dbName, collectionName).code,
+ 13,
"On " + dbName + "." + collectionName);
}
- var conn = MongoRunner.runMongod({ smallfiles: "", auth: "" });
+ var conn = MongoRunner.runMongod({smallfiles: "", auth: ""});
var admin = conn.getDB('admin');
var test = conn.getDB('test');
@@ -35,7 +36,7 @@
//
// Preliminary set up.
//
- admin.createUser({user:'admin', pwd: 'a', roles: jsTest.adminUserRoles});
+ admin.createUser({user: 'admin', pwd: 'a', roles: jsTest.adminUserRoles});
admin.auth('admin', 'a');
//
@@ -57,7 +58,6 @@
test.foo.insert({_id: 2});
local.foo.insert({_id: 3});
-
admin.logout();
assertCountUnauthorized(conn, "admin", "foo");
assertCountUnauthorized(conn, "local", "foo");
@@ -99,4 +99,3 @@
assertCountUnauthorized(conn, "test", "foo");
})();
-
diff --git a/jstests/auth/user_defined_roles.js b/jstests/auth/user_defined_roles.js
index 25eb885366c..0190ad9385e 100644
--- a/jstests/auth/user_defined_roles.js
+++ b/jstests/auth/user_defined_roles.js
@@ -12,21 +12,19 @@ function runTest(conn) {
conn.getDB('admin').createUser({user: 'admin', pwd: 'pwd', roles: ['root']});
conn.getDB('admin').auth('admin', 'pwd');
- conn.getDB('admin').createUser({user: 'userAdmin',
- pwd: 'pwd',
- roles: ['userAdminAnyDatabase']});
+ conn.getDB('admin')
+ .createUser({user: 'userAdmin', pwd: 'pwd', roles: ['userAdminAnyDatabase']});
conn.getDB('admin').logout();
var userAdminConn = new Mongo(conn.host);
var adminUserAdmin = userAdminConn.getDB('admin');
adminUserAdmin.auth('userAdmin', 'pwd');
- adminUserAdmin.createRole({role: 'adminRole', privileges:[], roles:[]});
+ adminUserAdmin.createRole({role: 'adminRole', privileges: [], roles: []});
var testUserAdmin = userAdminConn.getDB('test');
- testUserAdmin.createRole({role: 'testRole1', privileges:[], roles:[]});
- testUserAdmin.createRole({role: 'testRole2', privileges:[], roles:['testRole1']});
- testUserAdmin.createUser({user: 'testUser',
- pwd: 'pwd',
- roles: ['testRole2', {role: 'adminRole', db: 'admin'}]});
+ testUserAdmin.createRole({role: 'testRole1', privileges: [], roles: []});
+ testUserAdmin.createRole({role: 'testRole2', privileges: [], roles: ['testRole1']});
+ testUserAdmin.createUser(
+ {user: 'testUser', pwd: 'pwd', roles: ['testRole2', {role: 'adminRole', db: 'admin'}]});
var testDB = conn.getDB('test');
assert(testDB.auth('testUser', 'pwd'));
@@ -39,66 +37,74 @@ function runTest(conn) {
// auth'd as 'testUser@test' - this is the connection that will be used to test how privilege
// enforcement works.
-
// test CRUD
- hasAuthzError(testDB.foo.insert({ a: 1 }));
- assert.throws(function() { testDB.foo.findOne();});
-
- testUserAdmin.grantPrivilegesToRole('testRole1', [{resource: {db: 'test', collection: ''},
- actions:['find']}]);
-
- hasAuthzError(testDB.foo.insert({ a: 1 }));
- assert.doesNotThrow(function() { testDB.foo.findOne();});
+ hasAuthzError(testDB.foo.insert({a: 1}));
+ assert.throws(function() {
+ testDB.foo.findOne();
+ });
+
+ testUserAdmin.grantPrivilegesToRole(
+ 'testRole1', [{resource: {db: 'test', collection: ''}, actions: ['find']}]);
+
+ hasAuthzError(testDB.foo.insert({a: 1}));
+ assert.doesNotThrow(function() {
+ testDB.foo.findOne();
+ });
assert.eq(0, testDB.foo.count());
assert.eq(0, testDB.foo.find().itcount());
- testUserAdmin.grantPrivilegesToRole('testRole1', [{resource: {db: 'test', collection: 'foo'},
- actions:['insert']}]);
+ testUserAdmin.grantPrivilegesToRole(
+ 'testRole1', [{resource: {db: 'test', collection: 'foo'}, actions: ['insert']}]);
- assert.writeOK(testDB.foo.insert({ a: 1 }));
+ assert.writeOK(testDB.foo.insert({a: 1}));
assert.eq(1, testDB.foo.findOne().a);
assert.eq(1, testDB.foo.count());
assert.eq(1, testDB.foo.find().itcount());
- hasAuthzError(testDB.foo.update({ a: 1 }, { $inc: { a: 1 }}));
+ hasAuthzError(testDB.foo.update({a: 1}, {$inc: {a: 1}}));
assert.eq(1, testDB.foo.findOne().a);
- hasAuthzError(testDB.bar.insert({ a: 1 }));
+ hasAuthzError(testDB.bar.insert({a: 1}));
assert.eq(0, testDB.bar.count());
- adminUserAdmin.grantPrivilegesToRole('adminRole', [{resource: {db: '', collection: 'foo'},
- actions:['update']}]);
- assert.writeOK(testDB.foo.update({ a: 1 }, { $inc: { a: 1 }}));
+ adminUserAdmin.grantPrivilegesToRole(
+ 'adminRole', [{resource: {db: '', collection: 'foo'}, actions: ['update']}]);
+ assert.writeOK(testDB.foo.update({a: 1}, {$inc: {a: 1}}));
assert.eq(2, testDB.foo.findOne().a);
- assert.writeOK(testDB.foo.update({ b: 1 }, { $inc: { b: 1 }}, true)); // upsert
+ assert.writeOK(testDB.foo.update({b: 1}, {$inc: {b: 1}}, true)); // upsert
assert.eq(2, testDB.foo.count());
assert.eq(2, testDB.foo.findOne({b: {$exists: true}}).b);
- hasAuthzError(testDB.foo.remove({ b: 2 }));
+ hasAuthzError(testDB.foo.remove({b: 2}));
assert.eq(2, testDB.foo.count());
- adminUserAdmin.grantPrivilegesToRole('adminRole', [{resource: {db: '', collection: ''},
- actions:['remove']}]);
- assert.writeOK(testDB.foo.remove({ b: 2 }));
+ adminUserAdmin.grantPrivilegesToRole(
+ 'adminRole', [{resource: {db: '', collection: ''}, actions: ['remove']}]);
+ assert.writeOK(testDB.foo.remove({b: 2}));
assert.eq(1, testDB.foo.count());
-
// Test revoking privileges
- testUserAdmin.revokePrivilegesFromRole('testRole1', [{resource: {db: 'test', collection: 'foo'},
- actions:['insert']}]);
- hasAuthzError(testDB.foo.insert({ a: 1 }));
+ testUserAdmin.revokePrivilegesFromRole(
+ 'testRole1', [{resource: {db: 'test', collection: 'foo'}, actions: ['insert']}]);
+ hasAuthzError(testDB.foo.insert({a: 1}));
assert.eq(1, testDB.foo.count());
- assert.writeOK(testDB.foo.update({ a: 2 }, { $inc: { a: 1 }}));
+ assert.writeOK(testDB.foo.update({a: 2}, {$inc: {a: 1}}));
assert.eq(3, testDB.foo.findOne({a: {$exists: true}}).a);
- hasAuthzError(testDB.foo.update({ c: 1 }, { $inc: { c: 1 }}, true)); // upsert should fail
+ hasAuthzError(testDB.foo.update({c: 1}, {$inc: {c: 1}}, true)); // upsert should fail
assert.eq(1, testDB.foo.count());
-
// Test changeOwnPassword/changeOwnCustomData
- assert.throws(function() { testDB.changeUserPassword('testUser', 'password'); });
- assert.throws(function() { testDB.updateUser('testUser', {customData: {zipCode: 10036}});});
+ assert.throws(function() {
+ testDB.changeUserPassword('testUser', 'password');
+ });
+ assert.throws(function() {
+ testDB.updateUser('testUser', {customData: {zipCode: 10036}});
+ });
assert.eq(null, testDB.getUser('testUser').customData);
- testUserAdmin.grantPrivilegesToRole('testRole1', [{resource: {db: 'test', collection: ''},
- actions:['changeOwnPassword',
- 'changeOwnCustomData']}]);
+ testUserAdmin.grantPrivilegesToRole(
+ 'testRole1',
+ [{
+ resource: {db: 'test', collection: ''},
+ actions: ['changeOwnPassword', 'changeOwnCustomData']
+ }]);
testDB.changeUserPassword('testUser', 'password');
assert(!testDB.auth('testUser', 'pwd'));
assert(testDB.auth('testUser', 'password'));
@@ -106,36 +112,42 @@ function runTest(conn) {
assert.eq(10036, testDB.getUser('testUser').customData.zipCode);
testUserAdmin.revokeRolesFromRole('testRole2', ['testRole1']);
- assert.throws(function() { testDB.changeUserPassword('testUser', 'pwd'); });
- assert.throws(function() { testDB.foo.findOne();});
- assert.throws(function() { testDB.updateUser('testUser', {customData: {zipCode: 10028}});});
+ assert.throws(function() {
+ testDB.changeUserPassword('testUser', 'pwd');
+ });
+ assert.throws(function() {
+ testDB.foo.findOne();
+ });
+ assert.throws(function() {
+ testDB.updateUser('testUser', {customData: {zipCode: 10028}});
+ });
assert.eq(10036, testDB.getUser('testUser').customData.zipCode);
// Test changeAnyPassword/changeAnyCustomData
- testUserAdmin.grantPrivilegesToRole('testRole2', [{resource: {db: 'test', collection: ''},
- actions: ['changePassword',
- 'changeCustomData']}]);
+ testUserAdmin.grantPrivilegesToRole('testRole2',
+ [{
+ resource: {db: 'test', collection: ''},
+ actions: ['changePassword', 'changeCustomData']
+ }]);
testDB.changeUserPassword('testUser', 'pwd');
assert(!testDB.auth('testUser', 'password'));
assert(testDB.auth('testUser', 'pwd'));
testDB.updateUser('testUser', {customData: {zipCode: 10028}});
assert.eq(10028, testDB.getUser('testUser').customData.zipCode);
-
// Test privileges on the cluster resource
- assert.commandFailed(testDB.runCommand({serverStatus:1}));
- adminUserAdmin.grantPrivilegesToRole('adminRole', [{resource: {cluster: true},
- actions:['serverStatus']}]);
+ assert.commandFailed(testDB.runCommand({serverStatus: 1}));
+ adminUserAdmin.grantPrivilegesToRole(
+ 'adminRole', [{resource: {cluster: true}, actions: ['serverStatus']}]);
assert.commandWorked(testDB.serverStatus());
-
}
jsTest.log('Test standalone');
-var conn = MongoRunner.runMongod({ auth: '' });
+var conn = MongoRunner.runMongod({auth: ''});
runTest(conn);
MongoRunner.stopMongod(conn.port);
jsTest.log('Test sharding');
-var st = new ShardingTest({ shards: 2, config: 3, keyFile: 'jstests/libs/key1' });
+var st = new ShardingTest({shards: 2, config: 3, keyFile: 'jstests/libs/key1'});
runTest(st.s);
st.stop();
diff --git a/jstests/auth/user_defined_roles_on_secondaries.js b/jstests/auth/user_defined_roles_on_secondaries.js
index 4ca2d14f651..959b76a3cae 100644
--- a/jstests/auth/user_defined_roles_on_secondaries.js
+++ b/jstests/auth/user_defined_roles_on_secondaries.js
@@ -36,193 +36,177 @@
(function() {
-var name = 'user_defined_roles_on_secondaries';
-var m0, m1;
-
-function assertListContainsRole(list, role, msg) {
- var i;
- for (i = 0; i < list.length; ++i) {
- if (list[i].role == role.role && list[i].db == role.db)
- return;
+ var name = 'user_defined_roles_on_secondaries';
+ var m0, m1;
+
+ function assertListContainsRole(list, role, msg) {
+ var i;
+ for (i = 0; i < list.length; ++i) {
+ if (list[i].role == role.role && list[i].db == role.db)
+ return;
+ }
+ doassert("Could not find value " + tojson(val) + " in " +
+ tojson(list)(msg ? ": " + msg : ""));
}
- doassert("Could not find value " + tojson(val) + " in " + tojson(list)
- (msg ? ": " + msg : ""));
-}
-
-//
-// Create a 1-node replicaset and add two roles, inheriting the built-in read role on db1.
-//
-// read
-// / \
+
+ //
+ // Create a 1-node replicaset and add two roles, inheriting the built-in read role on db1.
+ //
+ // read
+ // / \
// r1 r2
-//
-var rstest = new ReplSetTest({
- name: name,
- nodes: 1,
- nodeOptions: {}
-});
-
-rstest.startSet();
-rstest.initiate();
-
-m0 = rstest.nodes[0];
-
-m0.getDB("db1").createRole({
- role: "r1",
- roles: [ "read" ],
- privileges: [
- { resource: { db: "db1", collection: "system.users" }, actions: [ "find" ] }
- ]
-});
-
-m0.getDB("db1").createRole({
- role: "r2",
- roles: [ "read" ],
- privileges: [
- { resource: { db: "db1", collection: "log" }, actions: [ "insert" ] }
- ]
-});
-
-//
-// Add a second node to the set, and add a third role, dependent on the first two.
-//
-// read
-// / \
+ //
+ var rstest = new ReplSetTest({name: name, nodes: 1, nodeOptions: {}});
+
+ rstest.startSet();
+ rstest.initiate();
+
+ m0 = rstest.nodes[0];
+
+ m0.getDB("db1").createRole({
+ role: "r1",
+ roles: ["read"],
+ privileges: [{resource: {db: "db1", collection: "system.users"}, actions: ["find"]}]
+ });
+
+ m0.getDB("db1").createRole({
+ role: "r2",
+ roles: ["read"],
+ privileges: [{resource: {db: "db1", collection: "log"}, actions: ["insert"]}]
+ });
+
+ //
+ // Add a second node to the set, and add a third role, dependent on the first two.
+ //
+ // read
+ // / \
// r1 r2
-// \ /
-// r3
-//
-rstest.add();
-rstest.reInitiate();
-
-rstest.getPrimary().getDB("db1").createRole({
- role: "r3",
- roles: [ "r1", "r2" ],
- privileges: [
- { resource: { db: "db1", collection: "log" }, actions: [ "update" ] }
- ]
-}, { w: 2 });
-
-// Verify that both members of the set see the same role graph.
-rstest.nodes.forEach(function (node) {
- var role = node.getDB("db1").getRole("r3");
- assert.eq(2, role.roles.length, node);
- assertListContainsRole(role.roles, {role: "r1", db: "db1"}, node);
- assertListContainsRole(role.roles, {role: "r2", db: "db1"}, node);
- assert.eq(3, role.inheritedRoles.length, node);
- assertListContainsRole(role.inheritedRoles, {role: "r1", db: "db1"}, node);
- assertListContainsRole(role.inheritedRoles, {role: "r2", db: "db1"}, node);
- assertListContainsRole(role.inheritedRoles, {role: "read", db: "db1"}, node);
-});
-
-// Verify that updating roles propagates.
-rstest.getPrimary().getDB("db1").revokeRolesFromRole("r1", [ "read" ], { w: 2 });
-rstest.getPrimary().getDB("db1").grantRolesToRole("r1", [ "dbAdmin" ], { w: 2 });
-rstest.nodes.forEach(function (node) {
- var role = node.getDB("db1").getRole("r1");
- assert.eq(1, role.roles.length, node);
- assertListContainsRole(role.roles, { role: "dbAdmin", db: "db1" });
-});
-
-// Verify that dropping roles propagates.
-rstest.getPrimary().getDB("db1").dropRole("r2", { w: 2});
-rstest.nodes.forEach(function (node) {
- assert.eq(null, node.getDB("db1").getRole("r2"));
- var role = node.getDB("db1").getRole("r3");
- assert.eq(1, role.roles.length, node);
- assertListContainsRole(role.roles, {role: "r1", db: "db1"}, node);
- assert.eq(2, role.inheritedRoles.length, node);
- assertListContainsRole(role.inheritedRoles, {role: "r1", db: "db1"}, node);
- assertListContainsRole(role.inheritedRoles, {role: "dbAdmin", db: "db1"}, node);
-});
-
-// Verify that dropping the admin database propagates.
-assert.commandWorked(rstest.getPrimary().getDB("admin").dropDatabase());
-assert.commandWorked(rstest.getPrimary().getDB("admin").getLastErrorObj(2));
-rstest.nodes.forEach(function (node) {
- var roles = node.getDB("db1").getRoles();
- assert.eq(0, roles.length, node);
-});
-
-// Verify that applyOps commands propagate.
-// NOTE: This section of the test depends on the oplog and roles schemas.
-assert.commandWorked(rstest.getPrimary().getDB("admin").runCommand({ applyOps: [
- {
- op: "c",
- ns: "admin.$cmd",
- o: { create: "system.roles" }
- },
- {
- op: "i",
- ns: "admin.system.roles",
- o: {
- _id: "db1.s1",
- role: "s1",
- db: "db1",
- roles: [ { role: "read", db: "db1" } ],
- privileges: [ { resource: { db: "db1", collection: "system.users" },
- actions: [ "find" ] } ] }
- },
- {
- op: "i",
- ns: "admin.system.roles",
- o: {
- _id: "db1.s2",
- role: "s2",
- db: "db1",
- roles: [ { role: "read", db: "db1" } ],
- privileges: [ { resource: { db: "db1", collection: "log" },
- actions: [ "insert" ] } ] }
- },
- {
- op: "c",
- ns: "admin.$cmd",
- o: { dropDatabase: 1 }
- },
- {
- op: "c",
- ns: "admin.$cmd",
- o: { create: "system.roles" }
- },
- {
- op: "i",
- ns: "admin.system.roles",
- o: {
- _id: "db1.t1",
- role: "t1",
- db: "db1",
- roles: [ { role: "read", db: "db1" } ],
- privileges: [ { resource: { db: "db1", collection: "system.users" },
- actions: [ "find" ] } ] }
- },
- {
- op: "i",
- ns: "admin.system.roles",
- o: {
- _id: "db1.t2",
- role: "t2",
- db: "db1",
- roles: [ ],
- privileges: [ { resource: { db: "db1", collection: "log" },
- actions: [ "insert" ] } ] }
- },
- {
- op: "u",
- ns: "admin.system.roles",
- o: { $set: { roles: [ { role: "readWrite", db: "db1" } ] } },
- o2: { _id: "db1.t2" }
- }
-] }));
-
-assert.commandWorked(rstest.getPrimary().getDB("admin").getLastErrorObj(2));
-rstest.nodes.forEach(function (node) {
- var role = node.getDB("db1").getRole("t1");
- assert.eq(1, role.roles.length, node);
- assertListContainsRole(role.roles, {role: "read", db: "db1"}, node);
-
- var role = node.getDB("db1").getRole("t2");
- assert.eq(1, role.roles.length, node);
- assertListContainsRole(role.roles, {role: "readWrite", db: "db1"}, node);
-});
+ // \ /
+ // r3
+ //
+ rstest.add();
+ rstest.reInitiate();
+
+ rstest.getPrimary().getDB("db1").createRole(
+ {
+ role: "r3",
+ roles: ["r1", "r2"],
+ privileges: [{resource: {db: "db1", collection: "log"}, actions: ["update"]}]
+ },
+ {w: 2});
+
+ // Verify that both members of the set see the same role graph.
+ rstest.nodes.forEach(function(node) {
+ var role = node.getDB("db1").getRole("r3");
+ assert.eq(2, role.roles.length, node);
+ assertListContainsRole(role.roles, {role: "r1", db: "db1"}, node);
+ assertListContainsRole(role.roles, {role: "r2", db: "db1"}, node);
+ assert.eq(3, role.inheritedRoles.length, node);
+ assertListContainsRole(role.inheritedRoles, {role: "r1", db: "db1"}, node);
+ assertListContainsRole(role.inheritedRoles, {role: "r2", db: "db1"}, node);
+ assertListContainsRole(role.inheritedRoles, {role: "read", db: "db1"}, node);
+ });
+
+ // Verify that updating roles propagates.
+ rstest.getPrimary().getDB("db1").revokeRolesFromRole("r1", ["read"], {w: 2});
+ rstest.getPrimary().getDB("db1").grantRolesToRole("r1", ["dbAdmin"], {w: 2});
+ rstest.nodes.forEach(function(node) {
+ var role = node.getDB("db1").getRole("r1");
+ assert.eq(1, role.roles.length, node);
+ assertListContainsRole(role.roles, {role: "dbAdmin", db: "db1"});
+ });
+
+ // Verify that dropping roles propagates.
+ rstest.getPrimary().getDB("db1").dropRole("r2", {w: 2});
+ rstest.nodes.forEach(function(node) {
+ assert.eq(null, node.getDB("db1").getRole("r2"));
+ var role = node.getDB("db1").getRole("r3");
+ assert.eq(1, role.roles.length, node);
+ assertListContainsRole(role.roles, {role: "r1", db: "db1"}, node);
+ assert.eq(2, role.inheritedRoles.length, node);
+ assertListContainsRole(role.inheritedRoles, {role: "r1", db: "db1"}, node);
+ assertListContainsRole(role.inheritedRoles, {role: "dbAdmin", db: "db1"}, node);
+ });
+
+ // Verify that dropping the admin database propagates.
+ assert.commandWorked(rstest.getPrimary().getDB("admin").dropDatabase());
+ assert.commandWorked(rstest.getPrimary().getDB("admin").getLastErrorObj(2));
+ rstest.nodes.forEach(function(node) {
+ var roles = node.getDB("db1").getRoles();
+ assert.eq(0, roles.length, node);
+ });
+
+ // Verify that applyOps commands propagate.
+ // NOTE: This section of the test depends on the oplog and roles schemas.
+ assert.commandWorked(rstest.getPrimary().getDB("admin").runCommand({
+ applyOps: [
+ {op: "c", ns: "admin.$cmd", o: {create: "system.roles"}},
+ {
+ op: "i",
+ ns: "admin.system.roles",
+ o: {
+ _id: "db1.s1",
+ role: "s1",
+ db: "db1",
+ roles: [{role: "read", db: "db1"}],
+ privileges:
+ [{resource: {db: "db1", collection: "system.users"}, actions: ["find"]}]
+ }
+ },
+ {
+ op: "i",
+ ns: "admin.system.roles",
+ o: {
+ _id: "db1.s2",
+ role: "s2",
+ db: "db1",
+ roles: [{role: "read", db: "db1"}],
+ privileges: [{resource: {db: "db1", collection: "log"}, actions: ["insert"]}]
+ }
+ },
+ {op: "c", ns: "admin.$cmd", o: {dropDatabase: 1}},
+ {op: "c", ns: "admin.$cmd", o: {create: "system.roles"}},
+ {
+ op: "i",
+ ns: "admin.system.roles",
+ o: {
+ _id: "db1.t1",
+ role: "t1",
+ db: "db1",
+ roles: [{role: "read", db: "db1"}],
+ privileges:
+ [{resource: {db: "db1", collection: "system.users"}, actions: ["find"]}]
+ }
+ },
+ {
+ op: "i",
+ ns: "admin.system.roles",
+ o: {
+ _id: "db1.t2",
+ role: "t2",
+ db: "db1",
+ roles: [],
+ privileges: [{resource: {db: "db1", collection: "log"}, actions: ["insert"]}]
+ }
+ },
+ {
+ op: "u",
+ ns: "admin.system.roles",
+ o: {$set: {roles: [{role: "readWrite", db: "db1"}]}},
+ o2: {_id: "db1.t2"}
+ }
+ ]
+ }));
+
+ assert.commandWorked(rstest.getPrimary().getDB("admin").getLastErrorObj(2));
+ rstest.nodes.forEach(function(node) {
+ var role = node.getDB("db1").getRole("t1");
+ assert.eq(1, role.roles.length, node);
+ assertListContainsRole(role.roles, {role: "read", db: "db1"}, node);
+
+ var role = node.getDB("db1").getRole("t2");
+ assert.eq(1, role.roles.length, node);
+ assertListContainsRole(role.roles, {role: "readWrite", db: "db1"}, node);
+ });
}());
diff --git a/jstests/auth/user_management_commands.js b/jstests/auth/user_management_commands.js
index 1a777a00e6b..e835aa4b348 100644
--- a/jstests/auth/user_management_commands.js
+++ b/jstests/auth/user_management_commands.js
@@ -12,23 +12,27 @@ function runTest(conn) {
conn.getDB('admin').createUser({user: 'admin', pwd: 'pwd', roles: ['root']});
conn.getDB('admin').auth('admin', 'pwd');
- conn.getDB('admin').createUser({user: 'userAdmin',
- pwd: 'pwd',
- roles: ['userAdminAnyDatabase'],
- customData: {userAdmin: true}});
+ conn.getDB('admin').createUser({
+ user: 'userAdmin',
+ pwd: 'pwd',
+ roles: ['userAdminAnyDatabase'],
+ customData: {userAdmin: true}
+ });
conn.getDB('admin').logout();
var userAdminConn = new Mongo(conn.host);
userAdminConn.getDB('admin').auth('userAdmin', 'pwd');
var testUserAdmin = userAdminConn.getDB('test');
- testUserAdmin.createRole({role: 'testRole',
- roles:[],
- privileges:[{resource: {db: 'test', collection: ''},
- actions: ['viewRole']}],});
- userAdminConn.getDB('admin').createRole({role: 'adminRole',
- roles:[],
- privileges:[{resource: {cluster: true},
- actions: ['connPoolSync']}]});
+ testUserAdmin.createRole({
+ role: 'testRole',
+ roles: [],
+ privileges: [{resource: {db: 'test', collection: ''}, actions: ['viewRole']}],
+ });
+ userAdminConn.getDB('admin').createRole({
+ role: 'adminRole',
+ roles: [],
+ privileges: [{resource: {cluster: true}, actions: ['connPoolSync']}]
+ });
var db = conn.getDB('test');
@@ -40,182 +44,207 @@ function runTest(conn) {
// various users and test that their access control is correct.
(function testCreateUser() {
- jsTestLog("Testing createUser");
-
- testUserAdmin.createUser({user: "spencer",
- pwd: "pwd",
- customData: {zipCode: 10028},
- roles: ['readWrite',
- 'testRole',
- {role: 'adminRole', db: 'admin'}]});
- testUserAdmin.createUser({user: "andy", pwd: "pwd", roles: []});
-
- var user = testUserAdmin.getUser('spencer');
- assert.eq(10028, user.customData.zipCode);
- assert(db.auth('spencer', 'pwd'));
- assert.writeOK(db.foo.insert({ a: 1 }));
- assert.eq(1, db.foo.findOne().a);
- assert.doesNotThrow(function() {db.getRole('testRole');});
- assert.commandWorked(db.adminCommand('connPoolSync'));
-
- db.logout();
- assert(db.auth('andy', 'pwd'));
- hasAuthzError(db.foo.insert({ a: 1 }));
- assert.throws(function() { db.foo.findOne();});
- assert.throws(function() {db.getRole('testRole');});
- })();
+ jsTestLog("Testing createUser");
+
+ testUserAdmin.createUser({
+ user: "spencer",
+ pwd: "pwd",
+ customData: {zipCode: 10028},
+ roles: ['readWrite', 'testRole', {role: 'adminRole', db: 'admin'}]
+ });
+ testUserAdmin.createUser({user: "andy", pwd: "pwd", roles: []});
+
+ var user = testUserAdmin.getUser('spencer');
+ assert.eq(10028, user.customData.zipCode);
+ assert(db.auth('spencer', 'pwd'));
+ assert.writeOK(db.foo.insert({a: 1}));
+ assert.eq(1, db.foo.findOne().a);
+ assert.doesNotThrow(function() {
+ db.getRole('testRole');
+ });
+ assert.commandWorked(db.adminCommand('connPoolSync'));
+
+ db.logout();
+ assert(db.auth('andy', 'pwd'));
+ hasAuthzError(db.foo.insert({a: 1}));
+ assert.throws(function() {
+ db.foo.findOne();
+ });
+ assert.throws(function() {
+ db.getRole('testRole');
+ });
+ })();
(function testUpdateUser() {
- jsTestLog("Testing updateUser");
-
- testUserAdmin.updateUser('spencer', {pwd: 'password', customData: {}});
- var user = testUserAdmin.getUser('spencer');
- assert.eq(null, user.customData.zipCode);
- assert(!db.auth('spencer', 'pwd'));
- assert(db.auth('spencer', 'password'));
-
- testUserAdmin.updateUser('spencer', {customData: {zipCode: 10036},
- roles: ["read", "testRole"]});
- var user = testUserAdmin.getUser('spencer');
- assert.eq(10036, user.customData.zipCode);
- hasAuthzError(db.foo.insert({ a: 1 }));
- assert.eq(1, db.foo.findOne().a);
- assert.eq(1, db.foo.count());
- assert.doesNotThrow(function() {db.getRole('testRole');});
- assert.commandFailedWithCode(db.adminCommand('connPoolSync'), authzErrorCode);
-
- testUserAdmin.updateUser('spencer', {roles: ["readWrite",
- {role: 'adminRole', db:'admin'}]});
- assert.writeOK(db.foo.update({}, { $inc: { a: 1 }}));
- assert.eq(2, db.foo.findOne().a);
- assert.eq(1, db.foo.count());
- assert.throws(function() {db.getRole('testRole');});
- assert.commandWorked(db.adminCommand('connPoolSync'));
- })();
+ jsTestLog("Testing updateUser");
+
+ testUserAdmin.updateUser('spencer', {pwd: 'password', customData: {}});
+ var user = testUserAdmin.getUser('spencer');
+ assert.eq(null, user.customData.zipCode);
+ assert(!db.auth('spencer', 'pwd'));
+ assert(db.auth('spencer', 'password'));
+
+ testUserAdmin.updateUser('spencer',
+ {customData: {zipCode: 10036}, roles: ["read", "testRole"]});
+ var user = testUserAdmin.getUser('spencer');
+ assert.eq(10036, user.customData.zipCode);
+ hasAuthzError(db.foo.insert({a: 1}));
+ assert.eq(1, db.foo.findOne().a);
+ assert.eq(1, db.foo.count());
+ assert.doesNotThrow(function() {
+ db.getRole('testRole');
+ });
+ assert.commandFailedWithCode(db.adminCommand('connPoolSync'), authzErrorCode);
+
+ testUserAdmin.updateUser('spencer',
+ {roles: ["readWrite", {role: 'adminRole', db: 'admin'}]});
+ assert.writeOK(db.foo.update({}, {$inc: {a: 1}}));
+ assert.eq(2, db.foo.findOne().a);
+ assert.eq(1, db.foo.count());
+ assert.throws(function() {
+ db.getRole('testRole');
+ });
+ assert.commandWorked(db.adminCommand('connPoolSync'));
+ })();
(function testGrantRolesToUser() {
- jsTestLog("Testing grantRolesToUser");
+ jsTestLog("Testing grantRolesToUser");
- assert.commandFailedWithCode(db.runCommand({collMod: 'foo', usePowerOf2Sizes: true}),
- authzErrorCode);
+ assert.commandFailedWithCode(db.runCommand({collMod: 'foo', usePowerOf2Sizes: true}),
+ authzErrorCode);
- testUserAdmin.grantRolesToUser('spencer',
- ['readWrite',
+ testUserAdmin.grantRolesToUser('spencer',
+ [
+ 'readWrite',
'dbAdmin',
{role: 'readWrite', db: 'test'},
{role: 'testRole', db: 'test'},
- 'readWrite']);
-
- assert.commandWorked(db.runCommand({collMod: 'foo', usePowerOf2Sizes: true}));
- assert.writeOK(db.foo.update({}, { $inc: { a: 1 }}));
- assert.eq(3, db.foo.findOne().a);
- assert.eq(1, db.foo.count());
- assert.doesNotThrow(function() {db.getRole('testRole');});
- assert.commandWorked(db.adminCommand('connPoolSync'));
- })();
+ 'readWrite'
+ ]);
+
+ assert.commandWorked(db.runCommand({collMod: 'foo', usePowerOf2Sizes: true}));
+ assert.writeOK(db.foo.update({}, {$inc: {a: 1}}));
+ assert.eq(3, db.foo.findOne().a);
+ assert.eq(1, db.foo.count());
+ assert.doesNotThrow(function() {
+ db.getRole('testRole');
+ });
+ assert.commandWorked(db.adminCommand('connPoolSync'));
+ })();
(function testRevokeRolesFromUser() {
- jsTestLog("Testing revokeRolesFromUser");
-
- testUserAdmin.revokeRolesFromUser('spencer',
- ['readWrite',
- {role: 'dbAdmin', db: 'test2'}, // role user doesnt have
- "testRole"]);
-
- assert.commandWorked(db.runCommand({collMod: 'foo', usePowerOf2Sizes: true}));
- hasAuthzError(db.foo.update({}, { $inc: { a: 1 }}));
- assert.throws(function() { db.foo.findOne();});
- assert.throws(function() {db.getRole('testRole');});
- assert.commandWorked(db.adminCommand('connPoolSync'));
-
-
- testUserAdmin.revokeRolesFromUser('spencer', [{role: 'adminRole', db: 'admin'}]);
-
- hasAuthzError(db.foo.update({}, { $inc: { a: 1 }}));
- assert.throws(function() { db.foo.findOne();});
- assert.throws(function() {db.getRole('testRole');});
- assert.commandFailedWithCode(db.adminCommand('connPoolSync'), authzErrorCode);
-
- })();
+ jsTestLog("Testing revokeRolesFromUser");
+
+ testUserAdmin.revokeRolesFromUser(
+ 'spencer',
+ [
+ 'readWrite',
+ {role: 'dbAdmin', db: 'test2'}, // role user doesnt have
+ "testRole"
+ ]);
+
+ assert.commandWorked(db.runCommand({collMod: 'foo', usePowerOf2Sizes: true}));
+ hasAuthzError(db.foo.update({}, {$inc: {a: 1}}));
+ assert.throws(function() {
+ db.foo.findOne();
+ });
+ assert.throws(function() {
+ db.getRole('testRole');
+ });
+ assert.commandWorked(db.adminCommand('connPoolSync'));
+
+ testUserAdmin.revokeRolesFromUser('spencer', [{role: 'adminRole', db: 'admin'}]);
+
+ hasAuthzError(db.foo.update({}, {$inc: {a: 1}}));
+ assert.throws(function() {
+ db.foo.findOne();
+ });
+ assert.throws(function() {
+ db.getRole('testRole');
+ });
+ assert.commandFailedWithCode(db.adminCommand('connPoolSync'), authzErrorCode);
+
+ })();
(function testUsersInfo() {
- jsTestLog("Testing usersInfo");
-
- var res = testUserAdmin.runCommand({usersInfo: 'spencer'});
- printjson(res);
- assert.eq(1, res.users.length);
- assert.eq(10036, res.users[0].customData.zipCode);
-
- res = testUserAdmin.runCommand({usersInfo: {user: 'spencer', db: 'test'}});
- assert.eq(1, res.users.length);
- assert.eq(10036, res.users[0].customData.zipCode);
-
- res = testUserAdmin.runCommand({usersInfo: ['spencer', {user: 'userAdmin', db: 'admin'}]});
- printjson(res);
- assert.eq(2, res.users.length);
- if (res.users[0].user == "spencer") {
- assert.eq(10036, res.users[0].customData.zipCode);
- assert(res.users[1].customData.userAdmin);
- } else if (res.users[0].user == "userAdmin") {
- assert.eq(10036, res.users[1].customData.zipCode);
- assert(res.users[0].customData.userAdmin);
- } else {
- doassert("Expected user names returned by usersInfo to be either 'userAdmin' or 'spencer', "
- + "but got: " + res.users[0].user);
- }
-
-
- res = testUserAdmin.runCommand({usersInfo: 1});
- assert.eq(2, res.users.length);
- if (res.users[0].user == "spencer") {
- assert.eq("andy", res.users[1].user);
- assert.eq(10036, res.users[0].customData.zipCode);
- assert(!res.users[1].customData);
- } else if (res.users[0].user == "andy") {
- assert.eq("spencer", res.users[1].user);
- assert(!res.users[0].customData);
- assert.eq(10036, res.users[1].customData.zipCode);
- } else {
- doassert("Expected user names returned by usersInfo to be either 'andy' or 'spencer', "
- + "but got: " + res.users[0].user);
- }
-
- })();
+ jsTestLog("Testing usersInfo");
+
+ var res = testUserAdmin.runCommand({usersInfo: 'spencer'});
+ printjson(res);
+ assert.eq(1, res.users.length);
+ assert.eq(10036, res.users[0].customData.zipCode);
+
+ res = testUserAdmin.runCommand({usersInfo: {user: 'spencer', db: 'test'}});
+ assert.eq(1, res.users.length);
+ assert.eq(10036, res.users[0].customData.zipCode);
+
+ res = testUserAdmin.runCommand({usersInfo: ['spencer', {user: 'userAdmin', db: 'admin'}]});
+ printjson(res);
+ assert.eq(2, res.users.length);
+ if (res.users[0].user == "spencer") {
+ assert.eq(10036, res.users[0].customData.zipCode);
+ assert(res.users[1].customData.userAdmin);
+ } else if (res.users[0].user == "userAdmin") {
+ assert.eq(10036, res.users[1].customData.zipCode);
+ assert(res.users[0].customData.userAdmin);
+ } else {
+ doassert(
+ "Expected user names returned by usersInfo to be either 'userAdmin' or 'spencer', " +
+ "but got: " + res.users[0].user);
+ }
+
+ res = testUserAdmin.runCommand({usersInfo: 1});
+ assert.eq(2, res.users.length);
+ if (res.users[0].user == "spencer") {
+ assert.eq("andy", res.users[1].user);
+ assert.eq(10036, res.users[0].customData.zipCode);
+ assert(!res.users[1].customData);
+ } else if (res.users[0].user == "andy") {
+ assert.eq("spencer", res.users[1].user);
+ assert(!res.users[0].customData);
+ assert.eq(10036, res.users[1].customData.zipCode);
+ } else {
+ doassert(
+ "Expected user names returned by usersInfo to be either 'andy' or 'spencer', " +
+ "but got: " + res.users[0].user);
+ }
+
+ })();
(function testDropUser() {
- jsTestLog("Testing dropUser");
+ jsTestLog("Testing dropUser");
- assert(db.auth('spencer', 'password'));
- assert(db.auth('andy', 'pwd'));
+ assert(db.auth('spencer', 'password'));
+ assert(db.auth('andy', 'pwd'));
- assert.commandWorked(testUserAdmin.runCommand({dropUser: 'spencer'}));
+ assert.commandWorked(testUserAdmin.runCommand({dropUser: 'spencer'}));
- assert(!db.auth('spencer', 'password'));
- assert(db.auth('andy', 'pwd'));
+ assert(!db.auth('spencer', 'password'));
+ assert(db.auth('andy', 'pwd'));
- assert.eq(1, testUserAdmin.getUsers().length);
- })();
+ assert.eq(1, testUserAdmin.getUsers().length);
+ })();
(function testDropAllUsersFromDatabase() {
- jsTestLog("Testing dropAllUsersFromDatabase");
+ jsTestLog("Testing dropAllUsersFromDatabase");
- assert.eq(1, testUserAdmin.getUsers().length);
- assert(db.auth('andy', 'pwd'));
+ assert.eq(1, testUserAdmin.getUsers().length);
+ assert(db.auth('andy', 'pwd'));
- assert.commandWorked(testUserAdmin.runCommand({dropAllUsersFromDatabase: 1}));
+ assert.commandWorked(testUserAdmin.runCommand({dropAllUsersFromDatabase: 1}));
- assert(!db.auth('andy', 'pwd'));
- assert.eq(0, testUserAdmin.getUsers().length);
- })();
+ assert(!db.auth('andy', 'pwd'));
+ assert.eq(0, testUserAdmin.getUsers().length);
+ })();
}
jsTest.log('Test standalone');
-var conn = MongoRunner.runMongod({ auth: '' });
-conn.getDB('admin').runCommand({setParameter:1, newCollectionsUsePowerOf2Sizes: false});
+var conn = MongoRunner.runMongod({auth: ''});
+conn.getDB('admin').runCommand({setParameter: 1, newCollectionsUsePowerOf2Sizes: false});
runTest(conn);
MongoRunner.stopMongod(conn.port);
jsTest.log('Test sharding');
-var st = new ShardingTest({ shards: 2, config: 3, keyFile: 'jstests/libs/key1' });
+var st = new ShardingTest({shards: 2, config: 3, keyFile: 'jstests/libs/key1'});
runTest(st.s);
st.stop();
diff --git a/jstests/auth/user_management_commands_edge_cases.js b/jstests/auth/user_management_commands_edge_cases.js
index b8f11505c58..ab42714fb9a 100644
--- a/jstests/auth/user_management_commands_edge_cases.js
+++ b/jstests/auth/user_management_commands_edge_cases.js
@@ -4,289 +4,284 @@
*/
function runTest(conn) {
-
var db = conn.getDB('test');
var admin = conn.getDB('admin');
admin.createUser({user: 'userAdmin', pwd: 'pwd', roles: ['userAdminAnyDatabase']});
admin.auth('userAdmin', 'pwd');
(function testCreateUser() {
- jsTestLog("Testing createUser");
-
- db.createUser({user: 'user1', pwd: 'pwd', roles: []});
-
- // Try to create duplicate user
- assert.throws(function() {
- db.createUser({user: 'user1', pwd: 'pwd', roles: ['read']});
- });
- assert.eq(0, db.getUser('user1').roles.length);
-
- // Try to create user with role that doesn't exist
- assert.throws(function() {
- db.createUser({user: 'user2', pwd: 'pwd', roles: ['fakeRole']});
- });
-
- // Try to create user with invalid arguments
- assert.throws(function() {
- db.createUser({user: '', pwd: 'pwd', roles: ['read']});
- });
- assert.throws(function() {
- db.createUser({user: ['user2'], pwd: 'pwd', roles: ['read']});
- });
- assert.throws(function() {
- db.createUser({user: 'user2', pwd: '', roles: ['read']});
- });
- assert.throws(function() {
- db.createUser({user: 'user2', pwd: ['pwd'], roles: ['read']});
- });
- assert.throws(function() {
- db.createUser({user: 'user2', pwd: 'pwd', roles: ['']});
- });
- assert.throws(function() {
- db.createUser({user: 'user2', pwd: 'pwd', roles: [{}]});
- });
- assert.throws(function() {
- db.createUser({user: 'user2', pwd: 'pwd', roles: [1]});
- });
- assert.throws(function() {
- db.createUser({user: 'user2', pwd: 'pwd', roles: [{role: 'read'}]});
- });
- assert.throws(function() {
- db.createUser({user: 'user2', pwd: 'pwd', roles: [{db: 'test'}]});
- });
- assert.throws(function() {
- db.createUser({user: 'user2', pwd: 'pwd', roles: [{role: 'read',
- db:''}]});
- });
- assert.throws(function() {
- db.createUser({user: 'user2', pwd: 'pwd', roles: [{role: '',
- db: 'test'}]});
- });
- assert.throws(function() {
- db.createUser({user: 'null\u0000char', pwd: 'pwd', roles: []});
- });
- assert.throws(function() {
- db.createUser({user: 'null\0char', pwd: 'pwd', roles: []});
- });
- // Regression test for SERVER-17125
- assert.throws(function() {
- db.getSiblingDB('$external').createUser({user: '', roles: []});
- });
-
- assert.eq(1, db.getUsers().length);
- })();
+ jsTestLog("Testing createUser");
+
+ db.createUser({user: 'user1', pwd: 'pwd', roles: []});
+
+ // Try to create duplicate user
+ assert.throws(function() {
+ db.createUser({user: 'user1', pwd: 'pwd', roles: ['read']});
+ });
+ assert.eq(0, db.getUser('user1').roles.length);
+
+ // Try to create user with role that doesn't exist
+ assert.throws(function() {
+ db.createUser({user: 'user2', pwd: 'pwd', roles: ['fakeRole']});
+ });
+
+ // Try to create user with invalid arguments
+ assert.throws(function() {
+ db.createUser({user: '', pwd: 'pwd', roles: ['read']});
+ });
+ assert.throws(function() {
+ db.createUser({user: ['user2'], pwd: 'pwd', roles: ['read']});
+ });
+ assert.throws(function() {
+ db.createUser({user: 'user2', pwd: '', roles: ['read']});
+ });
+ assert.throws(function() {
+ db.createUser({user: 'user2', pwd: ['pwd'], roles: ['read']});
+ });
+ assert.throws(function() {
+ db.createUser({user: 'user2', pwd: 'pwd', roles: ['']});
+ });
+ assert.throws(function() {
+ db.createUser({user: 'user2', pwd: 'pwd', roles: [{}]});
+ });
+ assert.throws(function() {
+ db.createUser({user: 'user2', pwd: 'pwd', roles: [1]});
+ });
+ assert.throws(function() {
+ db.createUser({user: 'user2', pwd: 'pwd', roles: [{role: 'read'}]});
+ });
+ assert.throws(function() {
+ db.createUser({user: 'user2', pwd: 'pwd', roles: [{db: 'test'}]});
+ });
+ assert.throws(function() {
+ db.createUser({user: 'user2', pwd: 'pwd', roles: [{role: 'read', db: ''}]});
+ });
+ assert.throws(function() {
+ db.createUser({user: 'user2', pwd: 'pwd', roles: [{role: '', db: 'test'}]});
+ });
+ assert.throws(function() {
+ db.createUser({user: 'null\u0000char', pwd: 'pwd', roles: []});
+ });
+ assert.throws(function() {
+ db.createUser({user: 'null\0char', pwd: 'pwd', roles: []});
+ });
+ // Regression test for SERVER-17125
+ assert.throws(function() {
+ db.getSiblingDB('$external').createUser({user: '', roles: []});
+ });
+
+ assert.eq(1, db.getUsers().length);
+ })();
(function testUpdateUser() {
- jsTestLog("Testing updateUser");
-
- // Must update something
- assert.throws(function() {
- db.updateUser('user1', {});
- });
-
- // Try to grant role that doesn't exist
- assert.throws(function() {
- db.updateUser('user1', {roles: ['fakeRole']});
- });
-
- // Try to update user that doesn't exist
- assert.throws(function() {
- db.updateUser('fakeUser', {roles: ['read']});
- });
-
- // Try to update user with invalid password
- assert.throws(function() {
- db.updateUser('user1', {pwd: ''});
- });
- assert.throws(function() {
- db.updateUser('user1', {pwd: 5});
- });
- assert.throws(function() {
- db.updateUser('user1', {pwd: ['a']});
- });
-
-
- // Try to update user with invalid customData
- assert.throws(function() {
- db.updateUser('user1', {customData: 1});
- });
- assert.throws(function() {
- db.updateUser('user1', {customData: ""});
- });
-
-
- // Try to update with invalid "roles" argument
- assert.throws(function() {
- db.updateUser('user1', {roles: 'read'});
- });
- assert.throws(function() {
- db.updateUser('user1', {roles: ['']});
- });
- assert.throws(function() {
- db.updateUser('user1', {roles: [{}]});
- });
- assert.throws(function() {
- db.updateUser('user1', {roles: [1]});
- });
- assert.throws(function() {
- db.updateUser('user1', {roles: [{role: 'read'}]});
- });
- assert.throws(function() {
- db.updateUser('user1', {roles: [{db: 'test'}]});
- });
- assert.throws(function() {
- db.updateUser('user1', {roles: [{role: '', db:'test'}]});
- });
- assert.throws(function() {
- db.updateUser('user1', {roles: [{role: 'read', db: ''}]});
- });
-
- assert.eq(0, db.getUser('user1').roles.length);
- })();
+ jsTestLog("Testing updateUser");
+
+ // Must update something
+ assert.throws(function() {
+ db.updateUser('user1', {});
+ });
+
+ // Try to grant role that doesn't exist
+ assert.throws(function() {
+ db.updateUser('user1', {roles: ['fakeRole']});
+ });
+
+ // Try to update user that doesn't exist
+ assert.throws(function() {
+ db.updateUser('fakeUser', {roles: ['read']});
+ });
+
+ // Try to update user with invalid password
+ assert.throws(function() {
+ db.updateUser('user1', {pwd: ''});
+ });
+ assert.throws(function() {
+ db.updateUser('user1', {pwd: 5});
+ });
+ assert.throws(function() {
+ db.updateUser('user1', {pwd: ['a']});
+ });
+
+ // Try to update user with invalid customData
+ assert.throws(function() {
+ db.updateUser('user1', {customData: 1});
+ });
+ assert.throws(function() {
+ db.updateUser('user1', {customData: ""});
+ });
+
+ // Try to update with invalid "roles" argument
+ assert.throws(function() {
+ db.updateUser('user1', {roles: 'read'});
+ });
+ assert.throws(function() {
+ db.updateUser('user1', {roles: ['']});
+ });
+ assert.throws(function() {
+ db.updateUser('user1', {roles: [{}]});
+ });
+ assert.throws(function() {
+ db.updateUser('user1', {roles: [1]});
+ });
+ assert.throws(function() {
+ db.updateUser('user1', {roles: [{role: 'read'}]});
+ });
+ assert.throws(function() {
+ db.updateUser('user1', {roles: [{db: 'test'}]});
+ });
+ assert.throws(function() {
+ db.updateUser('user1', {roles: [{role: '', db: 'test'}]});
+ });
+ assert.throws(function() {
+ db.updateUser('user1', {roles: [{role: 'read', db: ''}]});
+ });
+
+ assert.eq(0, db.getUser('user1').roles.length);
+ })();
(function testGrantRolesToUser() {
- jsTestLog("Testing grantRolesToUser");
-
- // Try to grant role that doesn't exist
- assert.throws(function() {
- db.grantRolesToUser('user1', {roles: ['fakeRole']});
- });
-
- // Try to grant to user that doesn't exist
- assert.throws(function() {
- db.grantRolesToUser('fakeUser', {roles: ['read']});
- });
-
- // Must grant something
- assert.throws(function() {
- db.grantRolesToUser('user1', []);
- });
-
- // Try to grant with invalid arguments
- assert.throws(function() {
- db.grantRolesToUser('user1', 1);
- });
- assert.throws(function() {
- db.grantRolesToUser('user1', [{}]);
- });
- assert.throws(function() {
- db.grantRolesToUser('user1', [1]);
- });
- assert.throws(function() {
- db.grantRolesToUser('user1', 'read');
- });
- assert.throws(function() {
- db.grantRolesToUser('user1', [{role: 'read'}]);
- });
- assert.throws(function() {
- db.grantRolesToUser('user1', [{db: 'test'}]);
- });
- assert.throws(function() {
- db.grantRolesToUser('user1', [{role: 'read', db: ''}]);
- });
- assert.throws(function() {
- db.grantRolesToUser('user1', [{role: '', db: 'test'}]);
- });
-
- assert.eq(0, db.getUser('user1').roles.length);
- assert.eq(null, db.getUser('user1').customData);
- // Make sure password didn't change
- assert(new Mongo(db.getMongo().host).getDB(db.getName()).auth('user1', 'pwd'));
- })();
+ jsTestLog("Testing grantRolesToUser");
+
+ // Try to grant role that doesn't exist
+ assert.throws(function() {
+ db.grantRolesToUser('user1', {roles: ['fakeRole']});
+ });
+
+ // Try to grant to user that doesn't exist
+ assert.throws(function() {
+ db.grantRolesToUser('fakeUser', {roles: ['read']});
+ });
+
+ // Must grant something
+ assert.throws(function() {
+ db.grantRolesToUser('user1', []);
+ });
+
+ // Try to grant with invalid arguments
+ assert.throws(function() {
+ db.grantRolesToUser('user1', 1);
+ });
+ assert.throws(function() {
+ db.grantRolesToUser('user1', [{}]);
+ });
+ assert.throws(function() {
+ db.grantRolesToUser('user1', [1]);
+ });
+ assert.throws(function() {
+ db.grantRolesToUser('user1', 'read');
+ });
+ assert.throws(function() {
+ db.grantRolesToUser('user1', [{role: 'read'}]);
+ });
+ assert.throws(function() {
+ db.grantRolesToUser('user1', [{db: 'test'}]);
+ });
+ assert.throws(function() {
+ db.grantRolesToUser('user1', [{role: 'read', db: ''}]);
+ });
+ assert.throws(function() {
+ db.grantRolesToUser('user1', [{role: '', db: 'test'}]);
+ });
+
+ assert.eq(0, db.getUser('user1').roles.length);
+ assert.eq(null, db.getUser('user1').customData);
+ // Make sure password didn't change
+ assert(new Mongo(db.getMongo().host).getDB(db.getName()).auth('user1', 'pwd'));
+ })();
(function testRevokeRolesFromUser() {
- jsTestLog("Testing revokeRolesFromUser");
-
- // Revoking a role the user doesn't have should succeed but do nothing
- db.revokeRolesFromUser('user1', ['read']);
-
- // Try to revoke role that doesn't exist
- assert.throws(function() {
- db.revokeRolesFromUser('user1', {roles: ['fakeRole']});
- });
-
- // Try to revoke from user that doesn't exist
- assert.throws(function() {
- db.revokeRolesFromUser('fakeUser', {roles: ['read']});
- });
-
- // Must revoke something
- assert.throws(function() {
- db.revokeRolesFromUser('user1', []);
- });
-
- // Try to revoke with invalid arguments
- assert.throws(function() {
- db.revokeRolesFromUser('user1', 1);
- });
- assert.throws(function() {
- db.revokeRolesFromUser('user1', [{}]);
- });
- assert.throws(function() {
- db.revokeRolesFromUser('user1', [1]);
- });
- assert.throws(function() {
- db.revokeRolesFromUser('user1', 'read');
- });
- assert.throws(function() {
- db.revokeRolesFromUser('user1', [{role: 'read'}]);
- });
- assert.throws(function() {
- db.revokeRolesFromUser('user1', [{db: 'test'}]);
- });
- assert.throws(function() {
- db.revokeRolesFromUser('user1', [{role: 'read', db: ''}]);
- });
- assert.throws(function() {
- db.revokeRolesFromUser('user1', [{role: '', db: 'test'}]);
- });
-
- assert.eq(0, db.getUser('user1').roles.length);
- })();
+ jsTestLog("Testing revokeRolesFromUser");
+
+ // Revoking a role the user doesn't have should succeed but do nothing
+ db.revokeRolesFromUser('user1', ['read']);
+
+ // Try to revoke role that doesn't exist
+ assert.throws(function() {
+ db.revokeRolesFromUser('user1', {roles: ['fakeRole']});
+ });
+
+ // Try to revoke from user that doesn't exist
+ assert.throws(function() {
+ db.revokeRolesFromUser('fakeUser', {roles: ['read']});
+ });
+
+ // Must revoke something
+ assert.throws(function() {
+ db.revokeRolesFromUser('user1', []);
+ });
+
+ // Try to revoke with invalid arguments
+ assert.throws(function() {
+ db.revokeRolesFromUser('user1', 1);
+ });
+ assert.throws(function() {
+ db.revokeRolesFromUser('user1', [{}]);
+ });
+ assert.throws(function() {
+ db.revokeRolesFromUser('user1', [1]);
+ });
+ assert.throws(function() {
+ db.revokeRolesFromUser('user1', 'read');
+ });
+ assert.throws(function() {
+ db.revokeRolesFromUser('user1', [{role: 'read'}]);
+ });
+ assert.throws(function() {
+ db.revokeRolesFromUser('user1', [{db: 'test'}]);
+ });
+ assert.throws(function() {
+ db.revokeRolesFromUser('user1', [{role: 'read', db: ''}]);
+ });
+ assert.throws(function() {
+ db.revokeRolesFromUser('user1', [{role: '', db: 'test'}]);
+ });
+
+ assert.eq(0, db.getUser('user1').roles.length);
+ })();
(function testUsersInfo() {
- jsTestLog("Testing usersInfo");
+ jsTestLog("Testing usersInfo");
- // Try to get user that does not exist
- assert.eq(null, db.getUser('fakeUser'));
+ // Try to get user that does not exist
+ assert.eq(null, db.getUser('fakeUser'));
- // Pass wrong type for user name
- assert.throws(function() {
- db.getUser(5);
- });
+ // Pass wrong type for user name
+ assert.throws(function() {
+ db.getUser(5);
+ });
- assert.throws(function() {
- db.getUser([]);
- });
+ assert.throws(function() {
+ db.getUser([]);
+ });
- assert.throws(function() {
- db.getUser(['user1']);
- });
+ assert.throws(function() {
+ db.getUser(['user1']);
+ });
- })();
+ })();
(function testDropUser() {
- jsTestLog("Testing dropUser");
+ jsTestLog("Testing dropUser");
- // Try to drop a user that doesn't exist
- // Should not error but should do nothing
- assert.doesNotThrow(function() {
- db.dropUser('fakeUser');
- });
+ // Try to drop a user that doesn't exist
+ // Should not error but should do nothing
+ assert.doesNotThrow(function() {
+ db.dropUser('fakeUser');
+ });
- assert.eq(1, db.getUsers().length);
- })();
+ assert.eq(1, db.getUsers().length);
+ })();
// dropAllUsersFromDatabase ignores its arguments, so there's nothing to test for it.
}
jsTest.log('Test standalone');
-var conn = MongoRunner.runMongod({ auth: '' });
-conn.getDB('admin').runCommand({setParameter:1, newCollectionsUsePowerOf2Sizes: false});
+var conn = MongoRunner.runMongod({auth: ''});
+conn.getDB('admin').runCommand({setParameter: 1, newCollectionsUsePowerOf2Sizes: false});
runTest(conn);
MongoRunner.stopMongod(conn.port);
jsTest.log('Test sharding');
-var st = new ShardingTest({ shards: 2, config: 3, keyFile: 'jstests/libs/key1' });
+var st = new ShardingTest({shards: 2, config: 3, keyFile: 'jstests/libs/key1'});
runTest(st.s);
st.stop();
diff --git a/jstests/auth/user_special_chars.js b/jstests/auth/user_special_chars.js
index 72fd06dff53..e28e63dd153 100644
--- a/jstests/auth/user_special_chars.js
+++ b/jstests/auth/user_special_chars.js
@@ -1,13 +1,12 @@
// Test creating and authenticating a user with special characters.
var conn = MongoRunner.runMongod({auth: ''});
-var testUserSpecialCharacters = function(){
+var testUserSpecialCharacters = function() {
// Create a user with special characters, make sure it can auth.
var adminDB = conn.getDB('admin');
- adminDB.createUser({user: '~`!@#$%^&*()-_+={}[]||;:",.//><', pwd: 'pass',
- roles: jsTest.adminUserRoles});
- assert(adminDB.auth({user: '~`!@#$%^&*()-_+={}[]||;:",.//><',
- pwd: 'pass'}));
+ adminDB.createUser(
+ {user: '~`!@#$%^&*()-_+={}[]||;:",.//><', pwd: 'pass', roles: jsTest.adminUserRoles});
+ assert(adminDB.auth({user: '~`!@#$%^&*()-_+={}[]||;:",.//><', pwd: 'pass'}));
};
testUserSpecialCharacters();
diff --git a/jstests/concurrency/fsm_all.js b/jstests/concurrency/fsm_all.js
index c174342f6de..35031becb89 100644
--- a/jstests/concurrency/fsm_all.js
+++ b/jstests/concurrency/fsm_all.js
@@ -4,8 +4,9 @@ load('jstests/concurrency/fsm_libs/runner.js');
var dir = 'jstests/concurrency/fsm_workloads';
-var blacklist = [
-].map(function(file) { return dir + '/' + file; });
+var blacklist = [].map(function(file) {
+ return dir + '/' + file;
+});
runWorkloadsSerially(ls(dir).filter(function(file) {
return !Array.contains(blacklist, file);
diff --git a/jstests/concurrency/fsm_all_composed.js b/jstests/concurrency/fsm_all_composed.js
index d1786b96a01..159ff0919c9 100644
--- a/jstests/concurrency/fsm_all_composed.js
+++ b/jstests/concurrency/fsm_all_composed.js
@@ -16,7 +16,9 @@ var blacklist = [
// is slow and the composer doesn't honor iteration counts:
'remove_single_document_eval_nolock.js',
'update_simple_eval_nolock.js',
-].map(function(file) { return dir + '/' + file; });
+].map(function(file) {
+ return dir + '/' + file;
+});
// SERVER-16196 re-enable executing workloads
// runCompositionOfWorkloads(ls(dir).filter(function(file) {
diff --git a/jstests/concurrency/fsm_all_replication.js b/jstests/concurrency/fsm_all_replication.js
index e59fe0a2c16..88b01bc2231 100644
--- a/jstests/concurrency/fsm_all_replication.js
+++ b/jstests/concurrency/fsm_all_replication.js
@@ -6,10 +6,13 @@ var dir = 'jstests/concurrency/fsm_workloads';
var blacklist = [
// Disabled due to MongoDB restrictions and/or workload restrictions
- 'agg_group_external.js', // uses >100MB of data, which can overwhelm test hosts
- 'agg_sort_external.js', // uses >100MB of data, which can overwhelm test hosts
-].map(function(file) { return dir + '/' + file; });
+ 'agg_group_external.js', // uses >100MB of data, which can overwhelm test hosts
+ 'agg_sort_external.js', // uses >100MB of data, which can overwhelm test hosts
+].map(function(file) {
+ return dir + '/' + file;
+});
runWorkloadsSerially(ls(dir).filter(function(file) {
return !Array.contains(blacklist, file);
-}), { replication: true });
+}),
+ {replication: true});
diff --git a/jstests/concurrency/fsm_all_sharded_replication.js b/jstests/concurrency/fsm_all_sharded_replication.js
index e77d750edbc..b925a868b50 100644
--- a/jstests/concurrency/fsm_all_sharded_replication.js
+++ b/jstests/concurrency/fsm_all_sharded_replication.js
@@ -6,10 +6,10 @@ var dir = 'jstests/concurrency/fsm_workloads';
var blacklist = [
// Disabled due to known bugs
- 'distinct.js', // SERVER-13116 distinct isn't sharding aware
- 'distinct_noindex.js', // SERVER-13116 distinct isn't sharding aware
- 'distinct_projection.js', // SERVER-13116 distinct isn't sharding aware
- 'drop_database.js', // SERVER-17397 Drops of sharded namespaces may not fully succeed
+ 'distinct.js', // SERVER-13116 distinct isn't sharding aware
+ 'distinct_noindex.js', // SERVER-13116 distinct isn't sharding aware
+ 'distinct_projection.js', // SERVER-13116 distinct isn't sharding aware
+ 'drop_database.js', // SERVER-17397 Drops of sharded namespaces may not fully succeed
// Disabled due to SERVER-3645, '.count() can be wrong on sharded collections'.
// This bug is problematic for these workloads because they assert on count() values:
@@ -39,23 +39,23 @@ var blacklist = [
'auth_drop_role.js',
'auth_drop_user.js',
- 'agg_group_external.js', // uses >100MB of data, which can overwhelm test hosts
- 'agg_sort_external.js', // uses >100MB of data, which can overwhelm test hosts
- 'compact.js', // compact can only be run against a standalone mongod
- 'compact_simultaneous_padding_bytes.js', // compact can only be run against a mongod
- 'convert_to_capped_collection.js', // convertToCapped can't be run on mongos processes
- 'convert_to_capped_collection_index.js', // convertToCapped can't be run on mongos processes
- 'findAndModify_remove_queue.js', // remove cannot be {} for findAndModify
- 'findAndModify_update_collscan.js', // findAndModify requires a shard key
- 'findAndModify_update_queue.js', // findAndModify requires a shard key
- 'group.js', // the group command cannot be issued against a sharded cluster
- 'group_cond.js', // the group command cannot be issued against a sharded cluster
- 'indexed_insert_eval.js', // eval doesn't work with sharded collections
- 'indexed_insert_eval_nolock.js', // eval doesn't work with sharded collections
- 'plan_cache_drop_database.js', // cannot ensureIndex after dropDatabase without sharding first
- 'remove_single_document.js', // our .remove(query, {justOne: true}) calls lack shard keys
- 'remove_single_document_eval.js', // eval doesn't work with sharded collections
- 'remove_single_document_eval_nolock.js', // eval doesn't work with sharded collections
+ 'agg_group_external.js', // uses >100MB of data, which can overwhelm test hosts
+ 'agg_sort_external.js', // uses >100MB of data, which can overwhelm test hosts
+ 'compact.js', // compact can only be run against a standalone mongod
+ 'compact_simultaneous_padding_bytes.js', // compact can only be run against a mongod
+ 'convert_to_capped_collection.js', // convertToCapped can't be run on mongos processes
+ 'convert_to_capped_collection_index.js', // convertToCapped can't be run on mongos processes
+ 'findAndModify_remove_queue.js', // remove cannot be {} for findAndModify
+ 'findAndModify_update_collscan.js', // findAndModify requires a shard key
+ 'findAndModify_update_queue.js', // findAndModify requires a shard key
+ 'group.js', // the group command cannot be issued against a sharded cluster
+ 'group_cond.js', // the group command cannot be issued against a sharded cluster
+ 'indexed_insert_eval.js', // eval doesn't work with sharded collections
+ 'indexed_insert_eval_nolock.js', // eval doesn't work with sharded collections
+ 'plan_cache_drop_database.js', // cannot ensureIndex after dropDatabase without sharding first
+ 'remove_single_document.js', // our .remove(query, {justOne: true}) calls lack shard keys
+ 'remove_single_document_eval.js', // eval doesn't work with sharded collections
+ 'remove_single_document_eval_nolock.js', // eval doesn't work with sharded collections
// The rename_* workloads are disabled since renameCollection doesn't work with sharded
// collections
@@ -68,15 +68,18 @@ var blacklist = [
'rename_collection_dbname_droptarget.js',
'rename_collection_droptarget.js',
- 'update_simple_eval.js', // eval doesn't work with sharded collections
- 'update_simple_eval_nolock.js', // eval doesn't work with sharded collections
- 'update_upsert_multi.js', // our update queries lack shard keys
- 'update_upsert_multi_noindex.js', // our update queries lack shard keys
- 'upsert_where.js', // cannot use upsert command with $where with sharded collections
- 'yield_and_hashed.js', // stagedebug can only be run against a standalone mongod
- 'yield_and_sorted.js', // stagedebug can only be run against a standalone mongod
-].map(function(file) { return dir + '/' + file; });
+ 'update_simple_eval.js', // eval doesn't work with sharded collections
+ 'update_simple_eval_nolock.js', // eval doesn't work with sharded collections
+ 'update_upsert_multi.js', // our update queries lack shard keys
+ 'update_upsert_multi_noindex.js', // our update queries lack shard keys
+ 'upsert_where.js', // cannot use upsert command with $where with sharded collections
+ 'yield_and_hashed.js', // stagedebug can only be run against a standalone mongod
+ 'yield_and_sorted.js', // stagedebug can only be run against a standalone mongod
+].map(function(file) {
+ return dir + '/' + file;
+});
runWorkloadsSerially(ls(dir).filter(function(file) {
return !Array.contains(blacklist, file);
-}), { sharded: true, replication: true });
+}),
+ {sharded: true, replication: true});
diff --git a/jstests/concurrency/fsm_all_sharded_replication_with_balancer.js b/jstests/concurrency/fsm_all_sharded_replication_with_balancer.js
index d934927b86b..20f4fca18ba 100644
--- a/jstests/concurrency/fsm_all_sharded_replication_with_balancer.js
+++ b/jstests/concurrency/fsm_all_sharded_replication_with_balancer.js
@@ -6,11 +6,11 @@ var dir = 'jstests/concurrency/fsm_workloads';
var blacklist = [
// Disabled due to known bugs
- 'distinct.js', // SERVER-13116 distinct isn't sharding aware
- 'distinct_noindex.js', // SERVER-13116 distinct isn't sharding aware
- 'distinct_projection.js', // SERVER-13116 distinct isn't sharding aware
- 'drop_database.js', // SERVER-17397 Drops of sharded namespaces may not fully succeed
- 'remove_where.js', // SERVER-14669 Multi-removes that use $where miscount removed documents
+ 'distinct.js', // SERVER-13116 distinct isn't sharding aware
+ 'distinct_noindex.js', // SERVER-13116 distinct isn't sharding aware
+ 'distinct_projection.js', // SERVER-13116 distinct isn't sharding aware
+ 'drop_database.js', // SERVER-17397 Drops of sharded namespaces may not fully succeed
+ 'remove_where.js', // SERVER-14669 Multi-removes that use $where miscount removed documents
// Disabled due to SERVER-3645, '.count() can be wrong on sharded collections'.
// This bug is problematic for these workloads because they assert on count() values:
@@ -44,23 +44,23 @@ var blacklist = [
'auth_drop_role.js',
'auth_drop_user.js',
- 'agg_group_external.js', // uses >100MB of data, which can overwhelm test hosts
- 'agg_sort_external.js', // uses >100MB of data, which can overwhelm test hosts
- 'compact.js', // compact can only be run against a standalone mongod
- 'compact_simultaneous_padding_bytes.js', // compact can only be run against a mongod
- 'convert_to_capped_collection.js', // convertToCapped can't be run on mongos processes
- 'convert_to_capped_collection_index.js', // convertToCapped can't be run on mongos processes
- 'findAndModify_remove_queue.js', // remove cannot be {} for findAndModify
- 'findAndModify_update_collscan.js', // findAndModify requires a shard key
- 'findAndModify_update_queue.js', // findAndModify requires a shard key
- 'group.js', // the group command cannot be issued against a sharded cluster
- 'group_cond.js', // the group command cannot be issued against a sharded cluster
- 'indexed_insert_eval.js', // eval doesn't work with sharded collections
- 'indexed_insert_eval_nolock.js', // eval doesn't work with sharded collections
- 'plan_cache_drop_database.js', // cannot ensureIndex after dropDatabase without sharding first
- 'remove_single_document.js', // our .remove(query, {justOne: true}) calls lack shard keys
- 'remove_single_document_eval.js', // eval doesn't work with sharded collections
- 'remove_single_document_eval_nolock.js', // eval doesn't work with sharded collections
+ 'agg_group_external.js', // uses >100MB of data, which can overwhelm test hosts
+ 'agg_sort_external.js', // uses >100MB of data, which can overwhelm test hosts
+ 'compact.js', // compact can only be run against a standalone mongod
+ 'compact_simultaneous_padding_bytes.js', // compact can only be run against a mongod
+ 'convert_to_capped_collection.js', // convertToCapped can't be run on mongos processes
+ 'convert_to_capped_collection_index.js', // convertToCapped can't be run on mongos processes
+ 'findAndModify_remove_queue.js', // remove cannot be {} for findAndModify
+ 'findAndModify_update_collscan.js', // findAndModify requires a shard key
+ 'findAndModify_update_queue.js', // findAndModify requires a shard key
+ 'group.js', // the group command cannot be issued against a sharded cluster
+ 'group_cond.js', // the group command cannot be issued against a sharded cluster
+ 'indexed_insert_eval.js', // eval doesn't work with sharded collections
+ 'indexed_insert_eval_nolock.js', // eval doesn't work with sharded collections
+ 'plan_cache_drop_database.js', // cannot ensureIndex after dropDatabase without sharding first
+ 'remove_single_document.js', // our .remove(query, {justOne: true}) calls lack shard keys
+ 'remove_single_document_eval.js', // eval doesn't work with sharded collections
+ 'remove_single_document_eval_nolock.js', // eval doesn't work with sharded collections
// The rename_* workloads are disabled since renameCollection doesn't work with sharded
// collections
@@ -73,17 +73,18 @@ var blacklist = [
'rename_collection_dbname_droptarget.js',
'rename_collection_droptarget.js',
- 'update_simple_eval.js', // eval doesn't work with sharded collections
- 'update_simple_eval_nolock.js', // eval doesn't work with sharded collections
- 'update_upsert_multi.js', // our update queries lack shard keys
- 'update_upsert_multi_noindex.js', // our update queries lack shard keys
- 'upsert_where.js', // cannot use upsert command with $where with sharded collections
- 'yield_and_hashed.js', // stagedebug can only be run against a standalone mongod
- 'yield_and_sorted.js', // stagedebug can only be run against a standalone mongod
-].map(function(file) { return dir + '/' + file; });
+ 'update_simple_eval.js', // eval doesn't work with sharded collections
+ 'update_simple_eval_nolock.js', // eval doesn't work with sharded collections
+ 'update_upsert_multi.js', // our update queries lack shard keys
+ 'update_upsert_multi_noindex.js', // our update queries lack shard keys
+ 'upsert_where.js', // cannot use upsert command with $where with sharded collections
+ 'yield_and_hashed.js', // stagedebug can only be run against a standalone mongod
+ 'yield_and_sorted.js', // stagedebug can only be run against a standalone mongod
+].map(function(file) {
+ return dir + '/' + file;
+});
runWorkloadsSerially(ls(dir).filter(function(file) {
return !Array.contains(blacklist, file);
-}), { sharded: true,
- replication: true,
- enableBalancer: true });
+}),
+ {sharded: true, replication: true, enableBalancer: true});
diff --git a/jstests/concurrency/fsm_all_simultaneous.js b/jstests/concurrency/fsm_all_simultaneous.js
index e50e48656f1..4eada69c5c0 100644
--- a/jstests/concurrency/fsm_all_simultaneous.js
+++ b/jstests/concurrency/fsm_all_simultaneous.js
@@ -12,9 +12,11 @@ var blacklist = [
'list_indexes.js',
'update_inc_capped.js',
- 'agg_group_external.js', // uses >100MB of data, which can overwhelm test hosts
- 'agg_sort_external.js', // uses >100MB of data, which can overwhelm test hosts
-].map(function(file) { return dir + '/' + file; });
+ 'agg_group_external.js', // uses >100MB of data, which can overwhelm test hosts
+ 'agg_sort_external.js', // uses >100MB of data, which can overwhelm test hosts
+].map(function(file) {
+ return dir + '/' + file;
+});
runWorkloadsInParallel(ls(dir).filter(function(file) {
return !Array.contains(blacklist, file);
diff --git a/jstests/concurrency/fsm_background_workloads/background_base.js b/jstests/concurrency/fsm_background_workloads/background_base.js
index 715cb822bd7..febc3a5d0dd 100644
--- a/jstests/concurrency/fsm_background_workloads/background_base.js
+++ b/jstests/concurrency/fsm_background_workloads/background_base.js
@@ -1,6 +1,6 @@
'use strict';
-load('jstests/concurrency/fsm_libs/errors.js'); // for IterationEnd
+load('jstests/concurrency/fsm_libs/errors.js'); // for IterationEnd
/**
* background_base.js
@@ -24,16 +24,16 @@ var $config = (function() {
checkForTermination: function checkForTermination(db, collName) {
var coll = db.getSiblingDB('config').fsm_background;
- var numDocs = coll.find({ terminate: true }).itcount();
+ var numDocs = coll.find({terminate: true}).itcount();
if (numDocs >= 1) {
- throw new IterationEnd('Background workload was instructed to terminate');
+ throw new IterationEnd('Background workload was instructed to terminate');
}
}
};
var transitions = {
- wait: { checkForTermination: 1 },
- checkForTermination: { wait: 1 }
+ wait: {checkForTermination: 1},
+ checkForTermination: {wait: 1}
};
var teardown = function teardown(db, collName, cluster) {
diff --git a/jstests/concurrency/fsm_example.js b/jstests/concurrency/fsm_example.js
index a112195e1a4..79cc891609c 100644
--- a/jstests/concurrency/fsm_example.js
+++ b/jstests/concurrency/fsm_example.js
@@ -20,11 +20,11 @@ var $config = (function() {
},
scanGT: function scanGT(db, collName) {
- db[collName].find({ _id: { $gt: this.start } }).itcount();
+ db[collName].find({_id: {$gt: this.start}}).itcount();
},
scanLTE: function scanLTE(db, collName) {
- db[collName].find({ _id: { $lte: this.start } }).itcount();
+ db[collName].find({_id: {$lte: this.start}}).itcount();
},
};
@@ -39,9 +39,9 @@ var $config = (function() {
//
// All state functions should appear as keys within 'transitions'.
var transitions = {
- init: { scanGT: 0.5, scanLTE: 0.5 },
- scanGT: { scanGT: 0.8, scanLTE: 0.2 },
- scanLTE: { scanGT: 0.2, scanLTE: 0.8 }
+ init: {scanGT: 0.5, scanLTE: 0.5},
+ scanGT: {scanGT: 0.8, scanLTE: 0.2},
+ scanLTE: {scanGT: 0.2, scanLTE: 0.8}
};
// 'setup' is run once by the parent thread after the cluster has
@@ -52,7 +52,7 @@ var $config = (function() {
// Workloads should NOT drop the collection db[collName], as
// doing so is handled by runner.js before 'setup' is called.
for (var i = 0; i < 1000; ++i) {
- db[collName].insert({ _id: i });
+ db[collName].insert({_id: i});
}
cluster.executeOnMongodNodes(function(db) {
@@ -68,17 +68,18 @@ var $config = (function() {
// is destroyed, but after the worker threads have been reaped.
// The 'this' argument is bound as '$config.data'. 'cluster' is provided
// to allow execution against all mongos and mongod nodes.
- function teardown(db, collName, cluster) {}
+ function teardown(db, collName, cluster) {
+ }
return {
threadCount: 5,
iterations: 10,
- startState: 'init', // optional, default 'init'
+ startState: 'init', // optional, default 'init'
states: states,
transitions: transitions,
- setup: setup, // optional, default empty function
- teardown: teardown, // optional, default empty function
- data: data // optional, default empty object
+ setup: setup, // optional, default empty function
+ teardown: teardown, // optional, default empty function
+ data: data // optional, default empty object
};
})();
diff --git a/jstests/concurrency/fsm_example_inheritance.js b/jstests/concurrency/fsm_example_inheritance.js
index 820758f0c6a..702f0208dda 100644
--- a/jstests/concurrency/fsm_example_inheritance.js
+++ b/jstests/concurrency/fsm_example_inheritance.js
@@ -1,20 +1,24 @@
'use strict';
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_example.js'); // for $config
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_example.js'); // for $config
// extendWorkload takes a $config object and a callback, and returns an extended $config object.
-var $config = extendWorkload($config, function($config, $super) {
- // In the callback, $super is the base workload definition we're extending,
- // and $config is the extended workload definition we're creating.
+var $config = extendWorkload($config,
+ function($config, $super) {
+ // In the callback, $super is the base workload definition we're
+ // extending,
+ // and $config is the extended workload definition we're creating.
- // You can replace any properties on $config, including methods you want to override.
- $config.setup = function(db, collName, cluster) {
- // Overridden methods should usually call the corresponding method on $super.
- $super.setup.apply(this, arguments);
+ // You can replace any properties on $config, including methods you
+ // want to override.
+ $config.setup = function(db, collName, cluster) {
+ // Overridden methods should usually call the corresponding
+ // method on $super.
+ $super.setup.apply(this, arguments);
- db[collName].ensureIndex({ exampleIndexedField: 1 });
- };
+ db[collName].ensureIndex({exampleIndexedField: 1});
+ };
- return $config;
-});
+ return $config;
+ });
diff --git a/jstests/concurrency/fsm_libs/assert.js b/jstests/concurrency/fsm_libs/assert.js
index d6de79bf7dc..1c3dfa55408 100644
--- a/jstests/concurrency/fsm_libs/assert.js
+++ b/jstests/concurrency/fsm_libs/assert.js
@@ -57,7 +57,7 @@ var assertWithLevel = function(level) {
var doassertSaved = doassert;
try {
doassert = quietlyDoAssert;
- fn.apply(assert, args); // functions typically get called on 'assert'
+ fn.apply(assert, args); // functions typically get called on 'assert'
} finally {
doassert = doassertSaved;
}
diff --git a/jstests/concurrency/fsm_libs/cluster.js b/jstests/concurrency/fsm_libs/cluster.js
index 1735b5285b5..5a350c9836d 100644
--- a/jstests/concurrency/fsm_libs/cluster.js
+++ b/jstests/concurrency/fsm_libs/cluster.js
@@ -22,9 +22,10 @@ var Cluster = function(options) {
];
Object.keys(options).forEach(function(option) {
- assert.contains(option, allowedKeys,
- 'invalid option: ' + tojson(option) +
- '; valid options are: ' + tojson(allowedKeys));
+ assert.contains(option,
+ allowedKeys,
+ 'invalid option: ' + tojson(option) + '; valid options are: ' +
+ tojson(allowedKeys));
});
options.enableBalancer = options.enableBalancer || false;
@@ -83,10 +84,10 @@ var Cluster = function(options) {
assert(options.teardownFunctions.mongos.every(f => (typeof f === 'function')),
'Expected teardownFunctions.mongos to be an array of functions');
- assert(!options.masterSlave || !options.replication, "Both 'masterSlave' and " +
- "'replication' cannot be true");
- assert(!options.masterSlave || !options.sharded, "Both 'masterSlave' and 'sharded' cannot" +
- "be true");
+ assert(!options.masterSlave || !options.replication,
+ "Both 'masterSlave' and " + "'replication' cannot be true");
+ assert(!options.masterSlave || !options.sharded,
+ "Both 'masterSlave' and 'sharded' cannot" + "be true");
}
var conn;
@@ -123,7 +124,7 @@ var Cluster = function(options) {
shards: 2,
mongos: 2,
verbose: verbosityLevel,
- other: { enableBalancer: options.enableBalancer }
+ other: {enableBalancer: options.enableBalancer}
};
// TODO: allow 'options' to specify an 'rs' config
@@ -144,7 +145,7 @@ var Cluster = function(options) {
st = new ShardingTest(shardConfig);
- conn = st.s; // mongos
+ conn = st.s; // mongos
this.teardown = function teardown() {
options.teardownFunctions.mongod.forEach(this.executeOnMongodNodes);
@@ -185,7 +186,7 @@ var Cluster = function(options) {
nodes: replSetNodes,
// Increase the oplog size (in MB) to prevent rollover during write-heavy workloads
oplogSize: 1024,
- nodeOptions: { verbose: verbosityLevel }
+ nodeOptions: {verbose: verbosityLevel}
};
var rst = new ReplSetTest(replSetConfig);
@@ -215,8 +216,8 @@ var Cluster = function(options) {
var slave = rt.start(false);
conn = master;
- master.adminCommand({ setParameter: 1, logLevel: verbosityLevel });
- slave.adminCommand({ setParameter: 1, logLevel: verbosityLevel });
+ master.adminCommand({setParameter: 1, logLevel: verbosityLevel});
+ slave.adminCommand({setParameter: 1, logLevel: verbosityLevel});
this.teardown = function teardown() {
options.teardownFunctions.mongod.forEach(this.executeOnMongodNodes);
@@ -226,9 +227,9 @@ var Cluster = function(options) {
_conns.mongod = [master, slave];
- } else { // standalone server
+ } else { // standalone server
conn = db.getMongo();
- db.adminCommand({ setParameter: 1, logLevel: verbosityLevel });
+ db.adminCommand({setParameter: 1, logLevel: verbosityLevel});
_conns.mongod = [conn];
}
@@ -242,10 +243,9 @@ var Cluster = function(options) {
}
};
-
this._addReplicaSetConns = function _addReplicaSetConns(rsTest) {
_conns.mongod.push(rsTest.getPrimary());
- rsTest.getSecondaries().forEach(function (secondaryConn) {
+ rsTest.getSecondaries().forEach(function(secondaryConn) {
_conns.mongod.push(secondaryConn);
});
};
@@ -406,7 +406,7 @@ var Cluster = function(options) {
// Use liveNodes.master instead of getPrimary() to avoid the detection of a new primary.
var primary = rst.liveNodes.master;
- var res = primary.adminCommand({ listDatabases: 1 });
+ var res = primary.adminCommand({listDatabases: 1});
assert.commandWorked(res);
res.databases.forEach(dbInfo => {
@@ -425,20 +425,23 @@ var Cluster = function(options) {
var primaryNumCollections = Object.keys(primaryDBHash.collections).length;
var secondaryNumCollections = Object.keys(secondaryDBHash.collections).length;
- assert.eq(primaryNumCollections, secondaryNumCollections,
+ assert.eq(primaryNumCollections,
+ secondaryNumCollections,
phase + ', the primary and secondary have a different number of' +
- ' collections: ' + tojson(dbHashes));
+ ' collections: ' + tojson(dbHashes));
// Only compare the dbhashes of non-capped collections because capped collections
// are not necessarily truncated at the same points across replica set members.
- var collNames = Object.keys(primaryDBHash.collections).filter(collName =>
- !primary.getDB(dbName)[collName].isCapped());
+ var collNames =
+ Object.keys(primaryDBHash.collections)
+ .filter(collName => !primary.getDB(dbName)[collName].isCapped());
collNames.forEach(collName => {
assert.eq(primaryDBHash.collections[collName],
secondaryDBHash.collections[collName],
phase + ', the primary and secondary have a different hash for the' +
- ' collection ' + dbName + '.' + collName + ': ' + tojson(dbHashes));
+ ' collection ' + dbName + '.' + collName + ': ' +
+ tojson(dbHashes));
});
if (collNames.length === primaryNumCollections) {
@@ -448,15 +451,14 @@ var Cluster = function(options) {
assert.eq(primaryDBHash.md5,
secondaryDBHash.md5,
phase + ', the primary and secondary have a different hash for the ' +
- dbName + ' database: ' + tojson(dbHashes));
+ dbName + ' database: ' + tojson(dbHashes));
}
});
});
};
- this.checkReplicationConsistency = function checkReplicationConsistency(dbBlacklist,
- phase,
- ttlIndexExists) {
+ this.checkReplicationConsistency = function checkReplicationConsistency(
+ dbBlacklist, phase, ttlIndexExists) {
assert(initialized, 'cluster must be initialized first');
if (!this.isReplication()) {
@@ -477,7 +479,7 @@ var Cluster = function(options) {
if (shouldCheckDBHashes && ttlIndexExists) {
// Lock the primary to prevent the TTL monitor from deleting expired documents in
// the background while we are getting the dbhashes of the replica set members.
- assert.commandWorked(primary.adminCommand({ fsync: 1, lock: 1 }),
+ assert.commandWorked(primary.adminCommand({fsync: 1, lock: 1}),
phase + ', failed to lock the primary');
}
@@ -486,14 +488,13 @@ var Cluster = function(options) {
try {
// Get the latest optime from the primary.
- var replSetStatus = primary.adminCommand({ replSetGetStatus: 1 });
- assert.commandWorked(replSetStatus,
- phase + ', error getting replication status');
+ var replSetStatus = primary.adminCommand({replSetGetStatus: 1});
+ assert.commandWorked(replSetStatus, phase + ', error getting replication status');
var primaryInfo = replSetStatus.members.find(memberInfo => memberInfo.self);
assert(primaryInfo !== undefined,
phase + ', failed to find self in replication status: ' +
- tojson(replSetStatus));
+ tojson(replSetStatus));
// Wait for all previous workload operations to complete. We use the "getLastError"
// command rather than a replicated write because the primary is currently
@@ -516,12 +517,12 @@ var Cluster = function(options) {
} finally {
if (shouldCheckDBHashes && ttlIndexExists) {
// Allow writes on the primary.
- res = primary.adminCommand({ fsyncUnlock: 1 });
+ res = primary.adminCommand({fsyncUnlock: 1});
// Returning early would suppress the exception rethrown in the catch block.
if (!res.ok) {
msg = phase + ', failed to unlock the primary, which may cause this' +
- ' test to hang: ' + tojson(res);
+ ' test to hang: ' + tojson(res);
if (activeException) {
jsTest.log(msg);
} else {
@@ -533,7 +534,7 @@ var Cluster = function(options) {
var totalTime = Date.now() - startTime;
jsTest.log('Finished consistency checks of replica set with ' + primary.host +
- ' as primary in ' + totalTime + ' ms, ' + phase);
+ ' as primary in ' + totalTime + ' ms, ' + phase);
});
};
@@ -546,11 +547,11 @@ var Cluster = function(options) {
// We record the contents of the 'lockpings' and 'locks' collections to make it easier to
// debug issues with distributed locks in the sharded cluster.
- data.lockpings = configDB.lockpings.find({ ping: { $gte: clusterStartTime } }).toArray();
+ data.lockpings = configDB.lockpings.find({ping: {$gte: clusterStartTime}}).toArray();
// We suppress some fields from the result set to reduce the amount of data recorded.
- data.locks = configDB.locks.find({ when: { $gte: clusterStartTime } },
- { process: 0, ts: 0 }).toArray();
+ data.locks =
+ configDB.locks.find({when: {$gte: clusterStartTime}}, {process: 0, ts: 0}).toArray();
return data;
};
@@ -561,7 +562,7 @@ var Cluster = function(options) {
var data = {};
st._configServers.forEach(config =>
- (data[config.host] = this.recordConfigServerData(config)));
+ (data[config.host] = this.recordConfigServerData(config)));
return data;
};
diff --git a/jstests/concurrency/fsm_libs/composer.js b/jstests/concurrency/fsm_libs/composer.js
index d0552f45fd0..495648fb01a 100644
--- a/jstests/concurrency/fsm_libs/composer.js
+++ b/jstests/concurrency/fsm_libs/composer.js
@@ -6,8 +6,8 @@ var composer = (function() {
// TODO: what if a workload depends on iterations?
var iterations = 100;
- assert.eq(AssertLevel.ALWAYS, globalAssertLevel,
- 'global assertion level is not set as ALWAYS');
+ assert.eq(
+ AssertLevel.ALWAYS, globalAssertLevel, 'global assertion level is not set as ALWAYS');
var currentWorkload = getRandomElem(workloads, Random.rand());
var currentState = configs[currentWorkload].startState;
@@ -18,8 +18,7 @@ var composer = (function() {
var args = configs[workload];
if (!first) {
assert.eq(myDB, args.db, 'expected all workloads to use same database');
- assert.eq(collName, args.collName,
- 'expected all workloads to use same collection');
+ assert.eq(collName, args.collName, 'expected all workloads to use same collection');
}
myDB = args.db;
collName = args.collName;
@@ -38,8 +37,8 @@ var composer = (function() {
// Transition to another valid state of the current workload,
// with probability '1 - mixProb'
if (Random.rand() >= mixProb) {
- var nextState = fsm._getWeightedRandomChoice(args.transitions[currentState],
- Random.rand());
+ var nextState =
+ fsm._getWeightedRandomChoice(args.transitions[currentState], Random.rand());
currentState = nextState;
continue;
}
@@ -52,11 +51,12 @@ var composer = (function() {
}
var args = configs[workload];
- Object.keys(args.states).forEach(function(state) {
- if (state !== args.startState) {
- otherStates.push({ workload: workload, state: state });
- }
- });
+ Object.keys(args.states)
+ .forEach(function(state) {
+ if (state !== args.startState) {
+ otherStates.push({workload: workload, state: state});
+ }
+ });
});
var next = getRandomElem(otherStates, Random.rand());
diff --git a/jstests/concurrency/fsm_libs/extend_workload.js b/jstests/concurrency/fsm_libs/extend_workload.js
index c31e1a4cdec..2b6fe47ed4e 100644
--- a/jstests/concurrency/fsm_libs/extend_workload.js
+++ b/jstests/concurrency/fsm_libs/extend_workload.js
@@ -1,6 +1,6 @@
'use strict';
-load('jstests/concurrency/fsm_libs/parse_config.js'); // for parseConfig
+load('jstests/concurrency/fsm_libs/parse_config.js'); // for parseConfig
/** extendWorkload usage:
*
@@ -13,11 +13,12 @@ load('jstests/concurrency/fsm_libs/parse_config.js'); // for parseConfig
* });
*/
function extendWorkload($config, callback) {
- assert.eq(2, arguments.length,
+ assert.eq(2,
+ arguments.length,
'extendWorkload must be called with 2 arguments: $config and callback');
- assert.eq('function', typeof callback,
- '2nd argument to extendWorkload must be a callback');
- assert.eq(2, callback.length,
+ assert.eq('function', typeof callback, '2nd argument to extendWorkload must be a callback');
+ assert.eq(2,
+ callback.length,
'2nd argument to extendWorkload must take 2 arguments: $config and $super');
var parsedSuperConfig = parseConfig($config);
var childConfig = Object.extend({}, parsedSuperConfig, true);
diff --git a/jstests/concurrency/fsm_libs/fsm.js b/jstests/concurrency/fsm_libs/fsm.js
index be9473c2684..0a3e4a45bf4 100644
--- a/jstests/concurrency/fsm_libs/fsm.js
+++ b/jstests/concurrency/fsm_libs/fsm.js
@@ -31,10 +31,8 @@ var fsm = (function() {
var shardNames = Object.keys(args.cluster.shards);
-
- shardNames.forEach(name =>
- (connCache.shards[name] = args.cluster.shards[name].map(connStr =>
- new Mongo(connStr))));
+ shardNames.forEach(name => (connCache.shards[name] = args.cluster.shards[name].map(
+ connStr => new Mongo(connStr))));
}
for (var i = 0; i < args.iterations; ++i) {
@@ -67,7 +65,9 @@ var fsm = (function() {
// weights = [ 0.25, 0.5, 0.25 ]
// => accumulated = [ 0.25, 0.75, 1 ]
- var weights = states.map(function(k) { return doc[k]; });
+ var weights = states.map(function(k) {
+ return doc[k];
+ });
var accumulated = [];
var sum = weights.reduce(function(a, b, i) {
@@ -76,7 +76,7 @@ var fsm = (function() {
}, 0);
// Scale the random value by the sum of the weights
- randVal *= sum; // ~ U[0, sum)
+ randVal *= sum; // ~ U[0, sum)
// Find the state corresponding to randVal
for (var i = 0; i < accumulated.length; ++i) {
diff --git a/jstests/concurrency/fsm_libs/parse_config.js b/jstests/concurrency/fsm_libs/parse_config.js
index 1b53f42841c..b569f660c8a 100644
--- a/jstests/concurrency/fsm_libs/parse_config.js
+++ b/jstests/concurrency/fsm_libs/parse_config.js
@@ -4,7 +4,7 @@
// Normalized means all optional parameters are set to their default values,
// and any parameters that need to be coerced have been coerced.
function parseConfig(config) {
- config = Object.extend({}, config, true); // defensive deep copy
+ config = Object.extend({}, config, true); // defensive deep copy
var allowedKeys = [
'data',
@@ -19,72 +19,76 @@ function parseConfig(config) {
];
Object.keys(config).forEach(function(key) {
- assert.contains(key, allowedKeys,
- 'invalid config parameter: ' + key +
- '; valid parameters are: ' + tojson(allowedKeys));
+ assert.contains(
+ key,
+ allowedKeys,
+ 'invalid config parameter: ' + key + '; valid parameters are: ' + tojson(allowedKeys));
});
- assert(Number.isInteger(config.threadCount),
- 'expected number of threads to be an integer');
- assert.gt(config.threadCount, 0,
- 'expected number of threads to be positive');
+ assert(Number.isInteger(config.threadCount), 'expected number of threads to be an integer');
+ assert.gt(config.threadCount, 0, 'expected number of threads to be positive');
- assert(Number.isInteger(config.iterations),
- 'expected number of iterations to be an integer');
- assert.gt(config.iterations, 0,
- 'expected number of iterations to be positive');
+ assert(Number.isInteger(config.iterations), 'expected number of iterations to be an integer');
+ assert.gt(config.iterations, 0, 'expected number of iterations to be positive');
config.startState = config.startState || 'init';
assert.eq('string', typeof config.startState);
assert.eq('object', typeof config.states);
assert.gt(Object.keys(config.states).length, 0);
- Object.keys(config.states).forEach(function(k) {
- assert.eq('function', typeof config.states[k],
- 'config.states.' + k + ' is not a function');
- if (config.passConnectionCache) {
- assert.eq(3, config.states[k].length,
- 'if passConnectionCache is true, state functions should ' +
- 'accept 3 parameters: db, collName, and connCache');
- } else {
- assert.eq(2, config.states[k].length,
- 'if passConnectionCache is false, state functions should ' +
- 'accept 2 parameters: db and collName');
- }
- });
+ Object.keys(config.states)
+ .forEach(function(k) {
+ assert.eq(
+ 'function', typeof config.states[k], 'config.states.' + k + ' is not a function');
+ if (config.passConnectionCache) {
+ assert.eq(3,
+ config.states[k].length,
+ 'if passConnectionCache is true, state functions should ' +
+ 'accept 3 parameters: db, collName, and connCache');
+ } else {
+ assert.eq(2,
+ config.states[k].length,
+ 'if passConnectionCache is false, state functions should ' +
+ 'accept 2 parameters: db and collName');
+ }
+ });
// assert all states mentioned in config.transitions are present in config.states
assert.eq('object', typeof config.transitions);
assert.gt(Object.keys(config.transitions).length, 0);
- Object.keys(config.transitions).forEach(function(fromState) {
- assert(config.states.hasOwnProperty(fromState),
- 'config.transitions contains a state not in config.states: ' + fromState);
+ Object.keys(config.transitions)
+ .forEach(function(fromState) {
+ assert(config.states.hasOwnProperty(fromState),
+ 'config.transitions contains a state not in config.states: ' + fromState);
- assert.gt(Object.keys(config.transitions[fromState]).length, 0);
- Object.keys(config.transitions[fromState]).forEach(function(toState) {
- assert(config.states.hasOwnProperty(toState),
- 'config.transitions.' + fromState +
- ' contains a state not in config.states: ' + toState);
- assert.eq('number', typeof config.transitions[fromState][toState],
- 'transitions.' + fromState + '.' + toState + ' should be a number');
- assert(!isNaN(config.transitions[fromState][toState]),
- 'transitions.' + fromState + '.' + toState + ' cannot be NaN');
+ assert.gt(Object.keys(config.transitions[fromState]).length, 0);
+ Object.keys(config.transitions[fromState])
+ .forEach(function(toState) {
+ assert(config.states.hasOwnProperty(toState),
+ 'config.transitions.' + fromState +
+ ' contains a state not in config.states: ' + toState);
+ assert.eq('number',
+ typeof config.transitions[fromState][toState],
+ 'transitions.' + fromState + '.' + toState + ' should be a number');
+ assert(!isNaN(config.transitions[fromState][toState]),
+ 'transitions.' + fromState + '.' + toState + ' cannot be NaN');
+ });
});
- });
- config.setup = config.setup || function(){};
+ config.setup = config.setup || function() {};
assert.eq('function', typeof config.setup);
- config.teardown = config.teardown || function(){};
+ config.teardown = config.teardown || function() {};
assert.eq('function', typeof config.teardown);
config.data = config.data || {};
assert.eq('object', typeof config.data);
- assert.eq(false, config.data.hasOwnProperty('tid'),
- 'data object cannot redefine "tid"');
- assert.eq(false, config.data.hasOwnProperty('iterations'),
+ assert.eq(false, config.data.hasOwnProperty('tid'), 'data object cannot redefine "tid"');
+ assert.eq(false,
+ config.data.hasOwnProperty('iterations'),
'data object cannot redefine "iterations"');
- assert.eq(false, config.data.hasOwnProperty('threadCount'),
+ assert.eq(false,
+ config.data.hasOwnProperty('threadCount'),
'data object cannot redefine "threadCount"');
config.passConnectionCache = config.passConnectionCache || false;
diff --git a/jstests/concurrency/fsm_libs/runner.js b/jstests/concurrency/fsm_libs/runner.js
index 78f7f9b74ee..e580a3891e0 100644
--- a/jstests/concurrency/fsm_libs/runner.js
+++ b/jstests/concurrency/fsm_libs/runner.js
@@ -2,24 +2,22 @@
load('jstests/concurrency/fsm_libs/assert.js');
load('jstests/concurrency/fsm_libs/cluster.js');
-load('jstests/concurrency/fsm_libs/errors.js'); // for IterationEnd
+load('jstests/concurrency/fsm_libs/errors.js'); // for IterationEnd
load('jstests/concurrency/fsm_libs/parse_config.js');
load('jstests/concurrency/fsm_libs/thread_mgr.js');
-load('jstests/concurrency/fsm_utils/name_utils.js'); // for uniqueCollName and uniqueDBName
+load('jstests/concurrency/fsm_utils/name_utils.js'); // for uniqueCollName and uniqueDBName
load('jstests/concurrency/fsm_utils/setup_teardown_functions.js');
var runner = (function() {
function validateExecutionMode(mode) {
- var allowedKeys = [
- 'composed',
- 'parallel'
- ];
+ var allowedKeys = ['composed', 'parallel'];
Object.keys(mode).forEach(function(option) {
- assert.contains(option, allowedKeys,
- 'invalid option: ' + tojson(option) +
- '; valid options are: ' + tojson(allowedKeys));
+ assert.contains(option,
+ allowedKeys,
+ 'invalid option: ' + tojson(option) + '; valid options are: ' +
+ tojson(allowedKeys));
});
mode.composed = mode.composed || false;
@@ -35,12 +33,8 @@ var runner = (function() {
}
function validateExecutionOptions(mode, options) {
- var allowedKeys = [
- 'backgroundWorkloads',
- 'dbNamePrefix',
- 'iterationMultiplier',
- 'threadMultiplier'
- ];
+ var allowedKeys =
+ ['backgroundWorkloads', 'dbNamePrefix', 'iterationMultiplier', 'threadMultiplier'];
if (mode.parallel || mode.composed) {
allowedKeys.push('numSubsets');
@@ -52,14 +46,14 @@ var runner = (function() {
}
Object.keys(options).forEach(function(option) {
- assert.contains(option, allowedKeys,
- 'invalid option: ' + tojson(option) +
- '; valid options are: ' + tojson(allowedKeys));
+ assert.contains(option,
+ allowedKeys,
+ 'invalid option: ' + tojson(option) + '; valid options are: ' +
+ tojson(allowedKeys));
});
if (typeof options.subsetSize !== 'undefined') {
- assert(Number.isInteger(options.subsetSize),
- 'expected subset size to be an integer');
+ assert(Number.isInteger(options.subsetSize), 'expected subset size to be an integer');
assert.gt(options.subsetSize, 1);
}
@@ -86,35 +80,35 @@ var runner = (function() {
'expected backgroundWorkloads to be an array');
if (typeof options.dbNamePrefix !== 'undefined') {
- assert.eq('string', typeof options.dbNamePrefix,
- 'expected dbNamePrefix to be a string');
+ assert.eq(
+ 'string', typeof options.dbNamePrefix, 'expected dbNamePrefix to be a string');
}
options.iterationMultiplier = options.iterationMultiplier || 1;
assert(Number.isInteger(options.iterationMultiplier),
'expected iterationMultiplier to be an integer');
- assert.gte(options.iterationMultiplier, 1,
+ assert.gte(options.iterationMultiplier,
+ 1,
'expected iterationMultiplier to be greater than or equal to 1');
options.threadMultiplier = options.threadMultiplier || 1;
assert(Number.isInteger(options.threadMultiplier),
'expected threadMultiplier to be an integer');
- assert.gte(options.threadMultiplier, 1,
+ assert.gte(options.threadMultiplier,
+ 1,
'expected threadMultiplier to be greater than or equal to 1');
return options;
}
function validateCleanupOptions(options) {
- var allowedKeys = [
- 'dropDatabaseBlacklist',
- 'keepExistingDatabases'
- ];
+ var allowedKeys = ['dropDatabaseBlacklist', 'keepExistingDatabases'];
Object.keys(options).forEach(function(option) {
- assert.contains(option, allowedKeys,
- 'invalid option: ' + tojson(option) +
- '; valid options are: ' + tojson(allowedKeys));
+ assert.contains(option,
+ allowedKeys,
+ 'invalid option: ' + tojson(option) + '; valid options are: ' +
+ tojson(allowedKeys));
});
if (typeof options.dropDatabaseBlacklist !== 'undefined') {
@@ -123,7 +117,8 @@ var runner = (function() {
}
if (typeof options.keepExistingDatabases !== 'undefined') {
- assert.eq('boolean', typeof options.keepExistingDatabases,
+ assert.eq('boolean',
+ typeof options.keepExistingDatabases,
'expected keepExistingDatabases to be a boolean');
}
@@ -139,9 +134,9 @@ var runner = (function() {
* executed simultaneously, followed by workloads #2 and #3 together.
*/
function scheduleWorkloads(workloads, executionMode, executionOptions) {
- if (!executionMode.composed && !executionMode.parallel) { // serial execution
+ if (!executionMode.composed && !executionMode.parallel) { // serial execution
return Array.shuffle(workloads).map(function(workload) {
- return [workload]; // run each workload by itself
+ return [workload]; // run each workload by itself
});
}
@@ -159,7 +154,7 @@ var runner = (function() {
numSubsets = Math.ceil(2.5 * workloads.length / subsetSize);
}
- workloads = workloads.slice(0); // copy
+ workloads = workloads.slice(0); // copy
workloads = Array.shuffle(workloads);
var start = 0;
@@ -196,9 +191,8 @@ var runner = (function() {
workloads.forEach(function(workload) {
// Workloads cannot have a shardKey if sameCollection is specified
- if (clusterOptions.sameCollection &&
- cluster.isSharded() &&
- context[workload].config.data.shardKey) {
+ if (clusterOptions.sameCollection && cluster.isSharded() &&
+ context[workload].config.data.shardKey) {
throw new Error('cannot specify a shardKey with sameCollection option');
}
if (firstWorkload || !clusterOptions.sameCollection) {
@@ -211,7 +205,9 @@ var runner = (function() {
myDB[collName].drop();
if (cluster.isSharded()) {
- var shardKey = context[workload].config.data.shardKey || { _id: 'hashed' };
+ var shardKey = context[workload].config.data.shardKey || {
+ _id: 'hashed'
+ };
// TODO: allow workload config data to specify split
cluster.shardCollection(myDB[collName], shardKey, false);
}
@@ -288,9 +284,11 @@ var runner = (function() {
});
return uniqueStackTraces.map(function(value, i) {
- return { value: value,
- freq: associatedTids[i].size,
- tids: Array.from(associatedTids[i]) };
+ return {
+ value: value,
+ freq: associatedTids[i].size,
+ tids: Array.from(associatedTids[i])
+ };
});
}
@@ -314,25 +312,25 @@ var runner = (function() {
// Special case message when threads all have the same trace
if (numUniqueTraces === 1) {
return pluralize('thread', stackTraces.length) + ' threw\n\n' +
- indent(uniqueTraces[0].value, 8);
+ indent(uniqueTraces[0].value, 8);
}
var summary = pluralize('exception', stackTraces.length) + ' were thrown, ' +
- numUniqueTraces + ' of which were unique:\n\n';
+ numUniqueTraces + ' of which were unique:\n\n';
- return summary + uniqueTraces.map(function(obj) {
- var line = pluralize('thread', obj.freq) +
- ' with tids ' + JSON.stringify(obj.tids) +
- ' threw\n';
- return indent(line + obj.value, 8);
- }).join('\n\n');
+ return summary +
+ uniqueTraces.map(function(obj) {
+ var line = pluralize('thread', obj.freq) + ' with tids ' +
+ JSON.stringify(obj.tids) + ' threw\n';
+ return indent(line + obj.value, 8);
+ }).join('\n\n');
}
if (workerErrs.length > 0) {
var err = new Error(prepareMsg(workerErrs) + '\n');
// Avoid having any stack traces omitted from the logs
- var maxLogLine = 10 * 1024; // 10KB
+ var maxLogLine = 10 * 1024; // 10KB
// Check if the combined length of the error message and the stack traces
// exceeds the maximum line-length the shell will log.
@@ -366,20 +364,16 @@ var runner = (function() {
// This property must be enumerable because of SERVER-21338, which prevents
// objects with non-enumerable properties from being serialized properly in
// ScopedThreads.
- Object.defineProperty(config.data, 'iterations', {
- enumerable: true,
- value: config.iterations
- });
+ Object.defineProperty(
+ config.data, 'iterations', {enumerable: true, value: config.iterations});
}
function setThreadCount(config) {
// This property must be enumerable because of SERVER-21338, which prevents
// objects with non-enumerable properties from being serialized properly in
// ScopedThreads.
- Object.defineProperty(config.data, 'threadCount', {
- enumerable: true,
- value: config.threadCount
- });
+ Object.defineProperty(
+ config.data, 'threadCount', {enumerable: true, value: config.threadCount});
}
function useDropDistLockFailPoint(cluster, clusterOptions) {
@@ -396,9 +390,11 @@ var runner = (function() {
function loadWorkloadContext(workloads, context, executionOptions, applyMultipliers) {
workloads.forEach(function(workload) {
- load(workload); // for $config
+ load(workload); // for $config
assert.neq('undefined', typeof $config, '$config was not defined by ' + workload);
- context[workload] = { config: parseConfig($config) };
+ context[workload] = {
+ config: parseConfig($config)
+ };
if (applyMultipliers) {
context[workload].config.iterations *= executionOptions.iterationMultiplier;
context[workload].config.threadCount *= executionOptions.threadMultiplier;
@@ -422,8 +418,8 @@ var runner = (function() {
jsTest.log('End of schedule');
}
- function cleanupWorkload(workload, context, cluster, errors, header, dbHashBlacklist,
- ttlIndexExists) {
+ function cleanupWorkload(
+ workload, context, cluster, errors, header, dbHashBlacklist, ttlIndexExists) {
// Returns true if the workload's teardown succeeds and false if the workload's
// teardown fails.
@@ -433,8 +429,8 @@ var runner = (function() {
var phase = 'before workload ' + workload + ' teardown';
cluster.checkReplicationConsistency(dbHashBlacklist, phase, ttlIndexExists);
} catch (e) {
- errors.push(new WorkloadFailure(e.toString(), e.stack, 'main',
- header + ' checking consistency on secondaries'));
+ errors.push(new WorkloadFailure(
+ e.toString(), e.stack, 'main', header + ' checking consistency on secondaries'));
return false;
}
@@ -472,12 +468,20 @@ var runner = (function() {
}
}
- function runWorkloadGroup(threadMgr, workloads, context, cluster, clusterOptions, executionMode,
- executionOptions, errors, maxAllowedThreads, dbHashBlacklist,
+ function runWorkloadGroup(threadMgr,
+ workloads,
+ context,
+ cluster,
+ clusterOptions,
+ executionMode,
+ executionOptions,
+ errors,
+ maxAllowedThreads,
+ dbHashBlacklist,
configServerData) {
var cleanup = [];
var teardownFailed = false;
- var startTime = Date.now(); // Initialize in case setupWorkload fails below.
+ var startTime = Date.now(); // Initialize in case setupWorkload fails below.
var totalTime;
var ttlIndexExists;
@@ -512,9 +516,9 @@ var runner = (function() {
} finally {
// Threads must be joined before destruction, so do this
// even in the presence of exceptions.
- errors.push(...threadMgr.joinAll().map(e =>
- new WorkloadFailure(e.err, e.stack, e.tid,
- 'Foreground ' + e.workloads.join(' '))));
+ errors.push(... threadMgr.joinAll().map(
+ e => new WorkloadFailure(
+ e.err, e.stack, e.tid, 'Foreground ' + e.workloads.join(' '))));
}
} finally {
// Checking that the data is consistent across the primary and secondaries requires
@@ -522,19 +526,22 @@ var runner = (function() {
// primary due to the TTL monitor. If none of the workloads actually created any TTL
// indexes (and we dropped the data of any previous workloads), then don't expend any
// additional effort in trying to handle that case.
- ttlIndexExists = workloads.some(workload =>
- context[workload].config.data.ttlIndexExists);
+ ttlIndexExists =
+ workloads.some(workload => context[workload].config.data.ttlIndexExists);
// Call each foreground workload's teardown function. After all teardowns have completed
// check if any of them failed.
- var cleanupResults = cleanup.map(workload =>
- cleanupWorkload(workload, context, cluster, errors,
- 'Foreground', dbHashBlacklist, ttlIndexExists));
+ var cleanupResults = cleanup.map(workload => cleanupWorkload(workload,
+ context,
+ cluster,
+ errors,
+ 'Foreground',
+ dbHashBlacklist,
+ ttlIndexExists));
teardownFailed = cleanupResults.some(success => (success === false));
totalTime = Date.now() - startTime;
- jsTest.log('Workload(s) completed in ' + totalTime + ' ms: ' +
- workloads.join(' '));
+ jsTest.log('Workload(s) completed in ' + totalTime + ' ms: ' + workloads.join(' '));
recordConfigServerData(cluster, workloads, configServerData, errors);
}
@@ -552,25 +559,21 @@ var runner = (function() {
ttlIndexExists = false;
// Ensure that all operations replicated correctly to the secondaries.
- cluster.checkReplicationConsistency(dbHashBlacklist,
- 'after workload-group teardown and data clean-up',
- ttlIndexExists);
+ cluster.checkReplicationConsistency(
+ dbHashBlacklist, 'after workload-group teardown and data clean-up', ttlIndexExists);
}
- function runWorkloads(workloads,
- clusterOptions,
- executionMode,
- executionOptions,
- cleanupOptions) {
+ function runWorkloads(
+ workloads, clusterOptions, executionMode, executionOptions, cleanupOptions) {
assert.gt(workloads.length, 0, 'need at least one workload to run');
executionMode = validateExecutionMode(executionMode);
- Object.freeze(executionMode); // immutable after validation (and normalization)
+ Object.freeze(executionMode); // immutable after validation (and normalization)
validateExecutionOptions(executionMode, executionOptions);
- Object.freeze(executionOptions); // immutable after validation (and normalization)
+ Object.freeze(executionOptions); // immutable after validation (and normalization)
- Object.freeze(cleanupOptions); // immutable prior to validation
+ Object.freeze(cleanupOptions); // immutable prior to validation
validateCleanupOptions(cleanupOptions);
if (executionMode.composed) {
@@ -599,8 +602,7 @@ var runner = (function() {
var bgContext = {};
var bgWorkloads = executionOptions.backgroundWorkloads;
- loadWorkloadContext(bgWorkloads, bgContext, executionOptions,
- false /* applyMultipliers */);
+ loadWorkloadContext(bgWorkloads, bgContext, executionOptions, false /* applyMultipliers */);
var bgThreadMgr = new ThreadManager(clusterOptions);
var cluster = new Cluster(clusterOptions);
@@ -619,8 +621,8 @@ var runner = (function() {
var dbHashBlacklist = ['local'];
if (cleanupOptions.dropDatabaseBlacklist) {
- dbBlacklist.push(...cleanupOptions.dropDatabaseBlacklist);
- dbHashBlacklist.push(...cleanupOptions.dropDatabaseBlacklist);
+ dbBlacklist.push(... cleanupOptions.dropDatabaseBlacklist);
+ dbHashBlacklist.push(... cleanupOptions.dropDatabaseBlacklist);
}
if (!cleanupOptions.keepExistingDatabases) {
dropAllDatabases(cluster.getDB('test'), dbBlacklist);
@@ -661,7 +663,7 @@ var runner = (function() {
schedule.forEach(function(workloads) {
// Check if any background workloads have failed.
- if (bgThreadMgr.checkForErrors()){
+ if (bgThreadMgr.checkForErrors()) {
var msg = 'Background workload failed before all foreground workloads ran';
throw new IterationEnd(msg);
}
@@ -677,16 +679,24 @@ var runner = (function() {
});
// Run the next group of workloads in the schedule.
- runWorkloadGroup(threadMgr, workloads, groupContext, cluster, clusterOptions,
- executionMode, executionOptions, errors, maxAllowedThreads,
- dbHashBlacklist, configServerData);
+ runWorkloadGroup(threadMgr,
+ workloads,
+ groupContext,
+ cluster,
+ clusterOptions,
+ executionMode,
+ executionOptions,
+ errors,
+ maxAllowedThreads,
+ dbHashBlacklist,
+ configServerData);
});
} finally {
// Set a flag so background threads know to terminate.
bgThreadMgr.markAllForTermination();
- errors.push(...bgThreadMgr.joinAll().map(e =>
- new WorkloadFailure(e.err, e.stack, e.tid,
- 'Background ' + e.workloads.join(' '))));
+ errors.push(... bgThreadMgr.joinAll().map(
+ e => new WorkloadFailure(
+ e.err, e.stack, e.tid, 'Background ' + e.workloads.join(' '))));
}
} finally {
try {
@@ -695,13 +705,17 @@ var runner = (function() {
// primary due to the TTL monitor. If none of the workloads actually created any TTL
// indexes (and we dropped the data of any previous workloads), then don't expend
// any additional effort in trying to handle that case.
- var ttlIndexExists = bgWorkloads.some(bgWorkload =>
- bgContext[bgWorkload].config.data.ttlIndexExists);
+ var ttlIndexExists = bgWorkloads.some(
+ bgWorkload => bgContext[bgWorkload].config.data.ttlIndexExists);
// Call each background workload's teardown function.
- bgCleanup.forEach(bgWorkload => cleanupWorkload(bgWorkload, bgContext, cluster,
- errors, 'Background',
- dbHashBlacklist, ttlIndexExists));
+ bgCleanup.forEach(bgWorkload => cleanupWorkload(bgWorkload,
+ bgContext,
+ cluster,
+ errors,
+ 'Background',
+ dbHashBlacklist,
+ ttlIndexExists));
// TODO: Call cleanupWorkloadData() on background workloads here if no background
// workload teardown functions fail.
@@ -736,11 +750,8 @@ var runner = (function() {
executionOptions = executionOptions || {};
cleanupOptions = cleanupOptions || {};
- runWorkloads(workloads,
- clusterOptions,
- { parallel: true },
- executionOptions,
- cleanupOptions);
+ runWorkloads(
+ workloads, clusterOptions, {parallel: true}, executionOptions, cleanupOptions);
},
composed: function composed(workloads, clusterOptions, executionOptions, cleanupOptions) {
@@ -748,11 +759,8 @@ var runner = (function() {
executionOptions = executionOptions || {};
cleanupOptions = cleanupOptions || {};
- runWorkloads(workloads,
- clusterOptions,
- { composed: true },
- executionOptions,
- cleanupOptions);
+ runWorkloads(
+ workloads, clusterOptions, {composed: true}, executionOptions, cleanupOptions);
}
};
diff --git a/jstests/concurrency/fsm_libs/thread_mgr.js b/jstests/concurrency/fsm_libs/thread_mgr.js
index 6952719c173..283da7ab7ee 100644
--- a/jstests/concurrency/fsm_libs/thread_mgr.js
+++ b/jstests/concurrency/fsm_libs/thread_mgr.js
@@ -1,13 +1,13 @@
'use strict';
-load('jstests/libs/parallelTester.js'); // for ScopedThread and CountDownLatch
-load('jstests/concurrency/fsm_libs/worker_thread.js'); // for workerThread
+load('jstests/libs/parallelTester.js'); // for ScopedThread and CountDownLatch
+load('jstests/concurrency/fsm_libs/worker_thread.js'); // for workerThread
/**
* Helper for spawning and joining worker threads.
*/
-var ThreadManager = function(clusterOptions, executionMode = { composed: false }) {
+var ThreadManager = function(clusterOptions, executionMode = {composed: false}) {
if (!(this instanceof ThreadManager)) {
return new ThreadManager(clusterOptions, executionMode);
}
@@ -28,12 +28,11 @@ var ThreadManager = function(clusterOptions, executionMode = { composed: false }
};
if (executionMode.composed) {
- return new ScopedThread(guardedThreadFn, workerThread.composed,
- workloads, args, options);
+ return new ScopedThread(
+ guardedThreadFn, workerThread.composed, workloads, args, options);
}
- return new ScopedThread(guardedThreadFn, workerThread.fsm,
- workloads, args, options);
+ return new ScopedThread(guardedThreadFn, workerThread.fsm, workloads, args, options);
}
var latch;
@@ -46,10 +45,11 @@ var ThreadManager = function(clusterOptions, executionMode = { composed: false }
var _workloads, _context;
this.init = function init(workloads, context, maxAllowedThreads) {
- assert.eq('number', typeof maxAllowedThreads,
- 'the maximum allowed threads must be a number');
+ assert.eq(
+ 'number', typeof maxAllowedThreads, 'the maximum allowed threads must be a number');
assert.gt(maxAllowedThreads, 0, 'the maximum allowed threads must be positive');
- assert.eq(maxAllowedThreads, Math.floor(maxAllowedThreads),
+ assert.eq(maxAllowedThreads,
+ Math.floor(maxAllowedThreads),
'the maximum allowed threads must be an integer');
function computeNumThreads() {
@@ -71,7 +71,7 @@ var ThreadManager = function(clusterOptions, executionMode = { composed: false }
var config = context[workload].config;
var threadCount = config.threadCount;
threadCount = Math.floor(factor * threadCount);
- threadCount = Math.max(1, threadCount); // ensure workload is executed
+ threadCount = Math.max(1, threadCount); // ensure workload is executed
config.threadCount = threadCount;
});
}
@@ -82,8 +82,8 @@ var ThreadManager = function(clusterOptions, executionMode = { composed: false }
errorLatch = new CountDownLatch(numThreads);
var plural = numThreads === 1 ? '' : 's';
- print('Using ' + numThreads + ' thread' + plural +
- ' (requested ' + requestedNumThreads + ')');
+ print('Using ' + numThreads + ' thread' + plural + ' (requested ' + requestedNumThreads +
+ ')');
_workloads = workloads;
_context = context;
@@ -101,9 +101,9 @@ var ThreadManager = function(clusterOptions, executionMode = { composed: false }
_workloads.forEach(function(workload) {
var config = _context[workload].config;
workloadData[workload] = config.data;
- var workloads = [workload]; // worker thread only needs to load 'workload'
+ var workloads = [workload]; // worker thread only needs to load 'workload'
if (executionMode.composed) {
- workloads = _workloads; // worker thread needs to load all workloads
+ workloads = _workloads; // worker thread needs to load all workloads
}
for (var i = 0; i < config.threadCount; ++i) {
@@ -116,7 +116,7 @@ var ThreadManager = function(clusterOptions, executionMode = { composed: false }
collName: _context[workload].collName,
cluster: cluster.getSerializedCluster(),
clusterOptions: clusterOptions,
- seed: Random.randInt(1e13), // contains range of Date.getTime()
+ seed: Random.randInt(1e13), // contains range of Date.getTime()
globalAssertLevel: globalAssertLevel,
errorLatch: errorLatch
};
@@ -197,7 +197,7 @@ var ThreadManager = function(clusterOptions, executionMode = { composed: false }
// 'config' database for a document specifying { terminate: true }. If such a
// document is found the background thread terminates.
var coll = _context[_workloads[0]].db.getSiblingDB('config').fsm_background;
- assert.writeOK(coll.update({ terminate: true }, { terminate: true }, { upsert: true }));
+ assert.writeOK(coll.update({terminate: true}, {terminate: true}, {upsert: true}));
};
};
@@ -208,21 +208,25 @@ var ThreadManager = function(clusterOptions, executionMode = { composed: false }
*/
workerThread.fsm = function(workloads, args, options) {
- load('jstests/concurrency/fsm_libs/worker_thread.js'); // for workerThread.main
- load('jstests/concurrency/fsm_libs/fsm.js'); // for fsm.run
-
- return workerThread.main(workloads, args, function(configs) {
- var workloads = Object.keys(configs);
- assert.eq(1, workloads.length);
- fsm.run(configs[workloads[0]]);
- });
+ load('jstests/concurrency/fsm_libs/worker_thread.js'); // for workerThread.main
+ load('jstests/concurrency/fsm_libs/fsm.js'); // for fsm.run
+
+ return workerThread.main(workloads,
+ args,
+ function(configs) {
+ var workloads = Object.keys(configs);
+ assert.eq(1, workloads.length);
+ fsm.run(configs[workloads[0]]);
+ });
};
workerThread.composed = function(workloads, args, options) {
- load('jstests/concurrency/fsm_libs/worker_thread.js'); // for workerThread.main
- load('jstests/concurrency/fsm_libs/composer.js'); // for composer.run
-
- return workerThread.main(workloads, args, function(configs) {
- composer.run(workloads, configs, options);
- });
+ load('jstests/concurrency/fsm_libs/worker_thread.js'); // for workerThread.main
+ load('jstests/concurrency/fsm_libs/composer.js'); // for composer.run
+
+ return workerThread.main(workloads,
+ args,
+ function(configs) {
+ composer.run(workloads, configs, options);
+ });
};
diff --git a/jstests/concurrency/fsm_libs/worker_thread.js b/jstests/concurrency/fsm_libs/worker_thread.js
index 53c923c26ec..64c7750a6f4 100644
--- a/jstests/concurrency/fsm_libs/worker_thread.js
+++ b/jstests/concurrency/fsm_libs/worker_thread.js
@@ -1,8 +1,8 @@
'use strict';
load('jstests/concurrency/fsm_libs/assert.js');
-load('jstests/concurrency/fsm_libs/cluster.js'); // for Cluster.isStandalone
-load('jstests/concurrency/fsm_libs/parse_config.js'); // for parseConfig
+load('jstests/concurrency/fsm_libs/cluster.js'); // for Cluster.isStandalone
+load('jstests/concurrency/fsm_libs/parse_config.js'); // for parseConfig
var workerThread = (function() {
@@ -40,8 +40,8 @@ var workerThread = (function() {
}
workloads.forEach(function(workload) {
- load(workload); // for $config
- var config = parseConfig($config); // to normalize
+ load(workload); // for $config
+ var config = parseConfig($config); // to normalize
// Copy any modifications that were made to $config.data
// during the setup function of the workload (see caveat
@@ -62,20 +62,21 @@ var workerThread = (function() {
// configurable, enumerable, and writable. To prevent workloads from changing
// the iterations and threadCount properties in their state functions, we redefine
// them here as non-configurable, non-enumerable, and non-writable.
- Object.defineProperties(data, {
- 'iterations': {
- configurable: false,
- enumerable: false,
- writable: false,
- value: data.iterations
- },
- 'threadCount': {
- configurable: false,
- enumerable: false,
- writable: false,
- value: data.threadCount
- }
- });
+ Object.defineProperties(data,
+ {
+ 'iterations': {
+ configurable: false,
+ enumerable: false,
+ writable: false,
+ value: data.iterations
+ },
+ 'threadCount': {
+ configurable: false,
+ enumerable: false,
+ writable: false,
+ value: data.threadCount
+ }
+ });
data.tid = args.tid;
configs[workload] = {
@@ -98,12 +99,14 @@ var workerThread = (function() {
// an exception. Nothing prior to (and including) args.latch.countDown()
// should be wrapped in a try/catch statement.
try {
- args.latch.await(); // wait for all threads to start
+ args.latch.await(); // wait for all threads to start
Random.setRandomSeed(args.seed);
run(configs);
- return { ok: 1 };
- } catch(e) {
+ return {
+ ok: 1
+ };
+ } catch (e) {
args.errorLatch.countDown();
return {
ok: 0,
diff --git a/jstests/concurrency/fsm_selftests.js b/jstests/concurrency/fsm_selftests.js
index 7314a9d63be..686a6d286e7 100644
--- a/jstests/concurrency/fsm_selftests.js
+++ b/jstests/concurrency/fsm_selftests.js
@@ -17,19 +17,25 @@ load('jstests/concurrency/fsm_libs/fsm.js');
// NOTE: getWeightedRandomChoice calls assert internally, so it will print stack traces
// when assert.throws executes
- assert.throws(function() { getWeightedRandomChoice(doc, -1); }, [],
- 'should reject negative values');
- assert.throws(function() { getWeightedRandomChoice(doc, 1); }, [],
- 'should reject values == 1');
- assert.throws(function() { getWeightedRandomChoice(doc, 2); }, [],
- 'should reject values > 1');
+ assert.throws(function() {
+ getWeightedRandomChoice(doc, -1);
+ }, [], 'should reject negative values');
+ assert.throws(function() {
+ getWeightedRandomChoice(doc, 1);
+ }, [], 'should reject values == 1');
+ assert.throws(function() {
+ getWeightedRandomChoice(doc, 2);
+ }, [], 'should reject values > 1');
- assert.throws(function() { getWeightedRandomChoice({}, 0.0); }, [],
- 'cannot choose from zero states');
- assert.throws(function() { getWeightedRandomChoice({}, 0.5); }, [],
- 'cannot choose from zero states');
- assert.throws(function() { getWeightedRandomChoice({}, 0.99); }, [],
- 'cannot choose from zero states');
+ assert.throws(function() {
+ getWeightedRandomChoice({}, 0.0);
+ }, [], 'cannot choose from zero states');
+ assert.throws(function() {
+ getWeightedRandomChoice({}, 0.5);
+ }, [], 'cannot choose from zero states');
+ assert.throws(function() {
+ getWeightedRandomChoice({}, 0.99);
+ }, [], 'cannot choose from zero states');
assert.eq('a', getWeightedRandomChoice(doc, 0.00), '0');
assert.eq('a', getWeightedRandomChoice(doc, 0.24), '1');
diff --git a/jstests/concurrency/fsm_utils/setup_teardown_functions.js b/jstests/concurrency/fsm_utils/setup_teardown_functions.js
index 295ab25676c..790bcc93856 100644
--- a/jstests/concurrency/fsm_utils/setup_teardown_functions.js
+++ b/jstests/concurrency/fsm_utils/setup_teardown_functions.js
@@ -8,19 +8,17 @@
*/
var increaseDropDistLockTimeout = function increaseDropDistLockTimeout(db) {
- var waitTimeSecs = 10 * 60; // 10 minutes
+ var waitTimeSecs = 10 * 60; // 10 minutes
assert.commandWorked(db.runCommand({
configureFailPoint: 'setDropCollDistLockWait',
mode: 'alwaysOn',
- data: { waitForSecs: waitTimeSecs }
+ data: {waitForSecs: waitTimeSecs}
}));
};
var resetDropDistLockTimeout = function resetDropDistLockTimeout(db) {
- assert.commandWorked(db.runCommand({
- configureFailPoint: 'setDropCollDistLockWait',
- mode: 'off'
- }));
+ assert.commandWorked(
+ db.runCommand({configureFailPoint: 'setDropCollDistLockWait', mode: 'off'}));
};
var setYieldAllLocksFailPoint = function setYieldAllLocksFailPoint(db) {
@@ -28,13 +26,10 @@ var setYieldAllLocksFailPoint = function setYieldAllLocksFailPoint(db) {
assert.commandWorked(db.runCommand({
configureFailPoint: 'setYieldAllLocksWait',
mode: 'alwaysOn',
- data: { waitForMillis: waitTimeMillis }
+ data: {waitForMillis: waitTimeMillis}
}));
};
var resetYieldAllLocksFailPoint = function resetYieldAllLocksFailPoint(db) {
- assert.commandWorked(db.runCommand({
- configureFailPoint: 'setYieldAllLocksWait',
- mode: 'off'
- }));
+ assert.commandWorked(db.runCommand({configureFailPoint: 'setYieldAllLocksWait', mode: 'off'}));
};
diff --git a/jstests/concurrency/fsm_workload_helpers/drop_utils.js b/jstests/concurrency/fsm_workload_helpers/drop_utils.js
index e7f373d7067..21f9c5548cd 100644
--- a/jstests/concurrency/fsm_workload_helpers/drop_utils.js
+++ b/jstests/concurrency/fsm_workload_helpers/drop_utils.js
@@ -8,11 +8,13 @@
function dropCollections(db, pattern) {
assert(pattern instanceof RegExp, 'expected pattern to be a regular expression');
- db.getCollectionInfos().filter(function(collInfo) {
- return pattern.test(collInfo.name);
- }).forEach(function(collInfo) {
- assertAlways(db[collInfo.name].drop());
- });
+ db.getCollectionInfos()
+ .filter(function(collInfo) {
+ return pattern.test(collInfo.name);
+ })
+ .forEach(function(collInfo) {
+ assertAlways(db[collInfo.name].drop());
+ });
}
function dropDatabases(db, pattern) {
diff --git a/jstests/concurrency/fsm_workload_helpers/indexed_noindex.js b/jstests/concurrency/fsm_workload_helpers/indexed_noindex.js
index 47f22e0daa3..9ae4c5dea85 100644
--- a/jstests/concurrency/fsm_workload_helpers/indexed_noindex.js
+++ b/jstests/concurrency/fsm_workload_helpers/indexed_noindex.js
@@ -5,7 +5,6 @@
* $config.data.getIndexSpec(), at the end of the workload setup.
*/
function indexedNoindex($config, $super) {
-
$config.setup = function(db, collName, cluster) {
$super.setup.apply(this, arguments);
diff --git a/jstests/concurrency/fsm_workload_helpers/server_types.js b/jstests/concurrency/fsm_workload_helpers/server_types.js
index e5f9b648c25..3a5a6930bce 100644
--- a/jstests/concurrency/fsm_workload_helpers/server_types.js
+++ b/jstests/concurrency/fsm_workload_helpers/server_types.js
@@ -28,10 +28,9 @@ function getStorageEngineName(db) {
var status = db.serverStatus();
assert.commandWorked(status);
- assert(isMongod(db),
- 'no storage engine is reported when connected to mongos');
- assert.neq('undefined', typeof status.storageEngine,
- 'missing storage engine info in server status');
+ assert(isMongod(db), 'no storage engine is reported when connected to mongos');
+ assert.neq(
+ 'undefined', typeof status.storageEngine, 'missing storage engine info in server status');
return status.storageEngine.name;
}
diff --git a/jstests/concurrency/fsm_workload_modifiers/drop_all_indexes.js b/jstests/concurrency/fsm_workload_modifiers/drop_all_indexes.js
index b4d61410c4a..9332ef4f7c6 100644
--- a/jstests/concurrency/fsm_workload_modifiers/drop_all_indexes.js
+++ b/jstests/concurrency/fsm_workload_modifiers/drop_all_indexes.js
@@ -9,7 +9,6 @@
*/
function dropAllIndexes($config, $super) {
-
$config.setup = function setup(db, collName, cluster) {
var oldIndexes = db[collName].getIndexes().map(function(ixSpec) {
return ixSpec.name;
diff --git a/jstests/concurrency/fsm_workload_modifiers/indexed_noindex.js b/jstests/concurrency/fsm_workload_modifiers/indexed_noindex.js
index 1a3f5ef03dd..6a92bd9b5bb 100644
--- a/jstests/concurrency/fsm_workload_modifiers/indexed_noindex.js
+++ b/jstests/concurrency/fsm_workload_modifiers/indexed_noindex.js
@@ -7,7 +7,6 @@
* $config.data.getIndexSpec(), at the end of the workload setup.
*/
function indexedNoindex($config, $super) {
-
$config.setup = function(db, collName, cluster) {
$super.setup.apply(this, arguments);
@@ -16,7 +15,6 @@ function indexedNoindex($config, $super) {
this.indexExists = false;
};
-
// Remove the shard key for the no index tests
delete $config.data.shardKey;
diff --git a/jstests/concurrency/fsm_workload_modifiers/make_capped.js b/jstests/concurrency/fsm_workload_modifiers/make_capped.js
index abe6ebc8264..00f68964531 100644
--- a/jstests/concurrency/fsm_workload_modifiers/make_capped.js
+++ b/jstests/concurrency/fsm_workload_modifiers/make_capped.js
@@ -14,14 +14,14 @@
*/
function makeCapped($config, $super) {
-
$config.setup = function setup(db, collName, cluster) {
assertWhenOwnColl(function() {
db[collName].drop();
- assertAlways.commandWorked(db.createCollection(collName, {
- capped: true,
- size: 16384 // bytes
- }));
+ assertAlways.commandWorked(db.createCollection(collName,
+ {
+ capped: true,
+ size: 16384 // bytes
+ }));
});
$super.setup.apply(this, arguments);
diff --git a/jstests/concurrency/fsm_workloads/agg_base.js b/jstests/concurrency/fsm_workloads/agg_base.js
index 3ce16aaea31..846e6900215 100644
--- a/jstests/concurrency/fsm_workloads/agg_base.js
+++ b/jstests/concurrency/fsm_workloads/agg_base.js
@@ -31,8 +31,8 @@ var $config = (function() {
// overhead
doc.padding = "";
var paddingLength = size - Object.bsonsize(doc);
- assertAlways.lte(0, paddingLength,
- 'document is already bigger than ' + size + ' bytes: ' + tojson(doc));
+ assertAlways.lte(
+ 0, paddingLength, 'document is already bigger than ' + size + ' bytes: ' + tojson(doc));
doc.padding = getStringOfLength(paddingLength);
assertAlways.eq(size, Object.bsonsize(doc));
return doc;
@@ -46,7 +46,7 @@ var $config = (function() {
};
var transitions = {
- query: { query: 1 }
+ query: {query: 1}
};
function setup(db, collName, cluster) {
@@ -55,18 +55,20 @@ var $config = (function() {
for (var i = 0; i < this.numDocs; ++i) {
// note: padDoc caches the large string after allocating it once, so it's ok to call it
// in this loop
- bulk.insert(padDoc({
- flag: i % 2 ? true : false,
- rand: Random.rand(),
- randInt: Random.randInt(this.numDocs)
- }, this.docSize));
+ bulk.insert(padDoc(
+ {
+ flag: i % 2 ? true : false,
+ rand: Random.rand(),
+ randInt: Random.randInt(this.numDocs)
+ },
+ this.docSize));
}
var res = bulk.execute();
assertWhenOwnColl.writeOK(res);
assertWhenOwnColl.eq(this.numDocs, res.nInserted);
assertWhenOwnColl.eq(this.numDocs, db[collName].find().itcount());
- assertWhenOwnColl.eq(this.numDocs / 2, db[collName].find({ flag: false }).itcount());
- assertWhenOwnColl.eq(this.numDocs / 2, db[collName].find({ flag: true }).itcount());
+ assertWhenOwnColl.eq(this.numDocs / 2, db[collName].find({flag: false}).itcount());
+ assertWhenOwnColl.eq(this.numDocs / 2, db[collName].find({flag: true}).itcount());
}
function teardown(db, collName, cluster) {
diff --git a/jstests/concurrency/fsm_workloads/agg_group_external.js b/jstests/concurrency/fsm_workloads/agg_group_external.js
index 3c3cf973434..38c47d79f13 100644
--- a/jstests/concurrency/fsm_workloads/agg_group_external.js
+++ b/jstests/concurrency/fsm_workloads/agg_group_external.js
@@ -8,47 +8,47 @@
* The data passed to the $group is greater than 100MB, which should force
* disk to be used.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/agg_base.js'); // for $config
-load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropCollections
-
-var $config = extendWorkload($config, function($config, $super) {
-
- // use enough docs to exceed 100MB, the in-memory limit for $sort and $group
- $config.data.numDocs = 24 * 1000;
- var MB = 1024 * 1024; // bytes
- assertAlways.lte(100 * MB, $config.data.numDocs * $config.data.docSize);
-
- // assume no other workload will manipulate collections with this prefix
- $config.data.getOutputCollPrefix = function getOutputCollPrefix(collName) {
- return collName + '_out_agg_group_external_';
- };
-
- $config.states.query = function query(db, collName) {
- var otherCollName = this.getOutputCollPrefix(collName) + this.tid;
- var cursor = db[collName].aggregate([
- { $group: { _id: '$randInt', count: { $sum: 1 } } },
- { $out: otherCollName }
- ], {
- allowDiskUse: true
- });
- assertAlways.eq(0, cursor.itcount());
- assertWhenOwnColl(function() {
- // sum the .count fields in the output coll
- var sum = db[otherCollName].aggregate([
- { $group: { _id: null, totalCount: { $sum: '$count' } } }
- ]).toArray()[0].totalCount;
- assertWhenOwnColl.eq(this.numDocs, sum);
- }.bind(this));
- };
-
- $config.teardown = function teardown(db, collName, cluster) {
- $super.teardown.apply(this, arguments);
-
- // drop all collections with this workload's assumed-to-be-unique prefix
- // NOTE: assumes the prefix contains no special regex chars
- dropCollections(db, new RegExp('^' + this.getOutputCollPrefix(collName)));
- };
-
- return $config;
-});
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/agg_base.js'); // for $config
+load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropCollections
+
+var $config = extendWorkload(
+ $config,
+ function($config, $super) {
+
+ // use enough docs to exceed 100MB, the in-memory limit for $sort and $group
+ $config.data.numDocs = 24 * 1000;
+ var MB = 1024 * 1024; // bytes
+ assertAlways.lte(100 * MB, $config.data.numDocs * $config.data.docSize);
+
+ // assume no other workload will manipulate collections with this prefix
+ $config.data.getOutputCollPrefix = function getOutputCollPrefix(collName) {
+ return collName + '_out_agg_group_external_';
+ };
+
+ $config.states.query = function query(db, collName) {
+ var otherCollName = this.getOutputCollPrefix(collName) + this.tid;
+ var cursor = db[collName].aggregate(
+ [{$group: {_id: '$randInt', count: {$sum: 1}}}, {$out: otherCollName}],
+ {allowDiskUse: true});
+ assertAlways.eq(0, cursor.itcount());
+ assertWhenOwnColl(function() {
+ // sum the .count fields in the output coll
+ var sum = db[otherCollName]
+ .aggregate([{$group: {_id: null, totalCount: {$sum: '$count'}}}])
+ .toArray()[0]
+ .totalCount;
+ assertWhenOwnColl.eq(this.numDocs, sum);
+ }.bind(this));
+ };
+
+ $config.teardown = function teardown(db, collName, cluster) {
+ $super.teardown.apply(this, arguments);
+
+ // drop all collections with this workload's assumed-to-be-unique prefix
+ // NOTE: assumes the prefix contains no special regex chars
+ dropCollections(db, new RegExp('^' + this.getOutputCollPrefix(collName)));
+ };
+
+ return $config;
+ });
diff --git a/jstests/concurrency/fsm_workloads/agg_match.js b/jstests/concurrency/fsm_workloads/agg_match.js
index 00e23a24c03..d93c4cdddd5 100644
--- a/jstests/concurrency/fsm_workloads/agg_match.js
+++ b/jstests/concurrency/fsm_workloads/agg_match.js
@@ -5,34 +5,34 @@
*
* Runs an aggregation with a $match that returns half the documents.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/agg_base.js'); // for $config
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/agg_base.js'); // for $config
-var $config = extendWorkload($config, function($config, $super) {
+var $config = extendWorkload(
+ $config,
+ function($config, $super) {
- $config.data.getOutCollName = function getOutCollName(collName) {
- return collName + '_out_agg_match';
- };
+ $config.data.getOutCollName = function getOutCollName(collName) {
+ return collName + '_out_agg_match';
+ };
- $config.states.query = function query(db, collName) {
- // note that all threads output to the same collection
- var otherCollName = this.getOutCollName(collName);
- var cursor = db[collName].aggregate([
- { $match: { flag: true } },
- { $out: otherCollName }
- ]);
- assertAlways.eq(0, cursor.itcount(), 'cursor returned by $out should always be empty');
- // NOTE: This relies on the fast-path for .count() with no query being isolated.
- // NOTE: There's a bug, SERVER-3645, where .count() is wrong on sharded collections, so we
- // blacklisted this test for sharded clusters.
- assertWhenOwnColl.eq(db[collName].count() / 2, db[otherCollName].count());
- };
+ $config.states.query = function query(db, collName) {
+ // note that all threads output to the same collection
+ var otherCollName = this.getOutCollName(collName);
+ var cursor = db[collName].aggregate([{$match: {flag: true}}, {$out: otherCollName}]);
+ assertAlways.eq(0, cursor.itcount(), 'cursor returned by $out should always be empty');
+ // NOTE: This relies on the fast-path for .count() with no query being isolated.
+ // NOTE: There's a bug, SERVER-3645, where .count() is wrong on sharded collections, so
+ // we
+ // blacklisted this test for sharded clusters.
+ assertWhenOwnColl.eq(db[collName].count() / 2, db[otherCollName].count());
+ };
- $config.teardown = function teardown(db, collName, cluster) {
- $super.teardown.apply(this, arguments);
+ $config.teardown = function teardown(db, collName, cluster) {
+ $super.teardown.apply(this, arguments);
- assertWhenOwnColl(db[this.getOutCollName(collName)].drop());
- };
+ assertWhenOwnColl(db[this.getOutCollName(collName)].drop());
+ };
- return $config;
-});
+ return $config;
+ });
diff --git a/jstests/concurrency/fsm_workloads/agg_sort.js b/jstests/concurrency/fsm_workloads/agg_sort.js
index 2f312e0adda..03de9a1aeea 100644
--- a/jstests/concurrency/fsm_workloads/agg_sort.js
+++ b/jstests/concurrency/fsm_workloads/agg_sort.js
@@ -6,34 +6,34 @@
* Runs an aggregation with a $match that returns half the documents followed
* by a $sort on a field containing a random float.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/agg_base.js'); // for $config
-load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropCollections
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/agg_base.js'); // for $config
+load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropCollections
-var $config = extendWorkload($config, function($config, $super) {
+var $config = extendWorkload(
+ $config,
+ function($config, $super) {
- $config.data.getOutputCollPrefix = function getOutputCollPrefix(collName) {
- return collName + '_out_agg_sort_';
- };
+ $config.data.getOutputCollPrefix = function getOutputCollPrefix(collName) {
+ return collName + '_out_agg_sort_';
+ };
- $config.states.query = function query(db, collName) {
- var otherCollName = this.getOutputCollPrefix(collName) + this.tid;
- var cursor = db[collName].aggregate([
- { $match: { flag: true } },
- { $sort: { rand: 1 } },
- { $out: otherCollName }
- ]);
- assertAlways.eq(0, cursor.itcount());
- assertWhenOwnColl.eq(db[collName].find().itcount() / 2, db[otherCollName].find().itcount());
- };
+ $config.states.query = function query(db, collName) {
+ var otherCollName = this.getOutputCollPrefix(collName) + this.tid;
+ var cursor = db[collName].aggregate(
+ [{$match: {flag: true}}, {$sort: {rand: 1}}, {$out: otherCollName}]);
+ assertAlways.eq(0, cursor.itcount());
+ assertWhenOwnColl.eq(db[collName].find().itcount() / 2,
+ db[otherCollName].find().itcount());
+ };
- $config.teardown = function teardown(db, collName, cluster) {
- $super.teardown.apply(this, arguments);
+ $config.teardown = function teardown(db, collName, cluster) {
+ $super.teardown.apply(this, arguments);
- // drop all collections with this workload's assumed-to-be-unique prefix
- // NOTE: assumes the prefix contains no special regex chars
- dropCollections(db, new RegExp('^' + this.getOutputCollPrefix(collName)));
- };
+ // drop all collections with this workload's assumed-to-be-unique prefix
+ // NOTE: assumes the prefix contains no special regex chars
+ dropCollections(db, new RegExp('^' + this.getOutputCollPrefix(collName)));
+ };
- return $config;
-});
+ return $config;
+ });
diff --git a/jstests/concurrency/fsm_workloads/agg_sort_external.js b/jstests/concurrency/fsm_workloads/agg_sort_external.js
index 161f7592d08..c2bda97e8cd 100644
--- a/jstests/concurrency/fsm_workloads/agg_sort_external.js
+++ b/jstests/concurrency/fsm_workloads/agg_sort_external.js
@@ -8,43 +8,44 @@
*
* The data returned by the $match is greater than 100MB, which should force an external sort.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/agg_base.js'); // for $config
-load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropCollections
-
-var $config = extendWorkload($config, function($config, $super) {
-
- // use enough docs to exceed 100MB, the in-memory limit for $sort and $group
- $config.data.numDocs = 24 * 1000;
- var MB = 1024 * 1024; // bytes
- // assert that *half* the docs exceed the in-memory limit, because the $match stage will only
- // pass half the docs in the collection on to the $sort stage.
- assertAlways.lte(100 * MB, $config.data.numDocs * $config.data.docSize / 2);
-
- $config.data.getOutputCollPrefix = function getOutputCollPrefix(collName) {
- return collName + '_out_agg_sort_external_';
- };
-
- $config.states.query = function query(db, collName) {
- var otherCollName = this.getOutputCollPrefix(collName) + this.tid;
- var cursor = db[collName].aggregate([
- { $match: { flag: true } },
- { $sort: { rand: 1 } },
- { $out: otherCollName }
- ], {
- allowDiskUse: true
- });
- assertAlways.eq(0, cursor.itcount());
- assertWhenOwnColl.eq(db[collName].find().itcount() / 2, db[otherCollName].find().itcount());
- };
-
- $config.teardown = function teardown(db, collName, cluster) {
- $super.teardown.apply(this, arguments);
-
- // drop all collections with this workload's assumed-to-be-unique prefix
- // NOTE: assumes the prefix contains no special regex chars
- dropCollections(db, new RegExp('^' + this.getOutputCollPrefix(collName)));
- };
-
- return $config;
-});
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/agg_base.js'); // for $config
+load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropCollections
+
+var $config = extendWorkload(
+ $config,
+ function($config, $super) {
+
+ // use enough docs to exceed 100MB, the in-memory limit for $sort and $group
+ $config.data.numDocs = 24 * 1000;
+ var MB = 1024 * 1024; // bytes
+ // assert that *half* the docs exceed the in-memory limit, because the $match stage will
+ // only
+ // pass half the docs in the collection on to the $sort stage.
+ assertAlways.lte(100 * MB, $config.data.numDocs * $config.data.docSize / 2);
+
+ $config.data.getOutputCollPrefix = function getOutputCollPrefix(collName) {
+ return collName + '_out_agg_sort_external_';
+ };
+
+ $config.states.query = function query(db, collName) {
+ var otherCollName = this.getOutputCollPrefix(collName) + this.tid;
+ var cursor =
+ db[collName]
+ .aggregate([{$match: {flag: true}}, {$sort: {rand: 1}}, {$out: otherCollName}],
+ {allowDiskUse: true});
+ assertAlways.eq(0, cursor.itcount());
+ assertWhenOwnColl.eq(db[collName].find().itcount() / 2,
+ db[otherCollName].find().itcount());
+ };
+
+ $config.teardown = function teardown(db, collName, cluster) {
+ $super.teardown.apply(this, arguments);
+
+ // drop all collections with this workload's assumed-to-be-unique prefix
+ // NOTE: assumes the prefix contains no special regex chars
+ dropCollections(db, new RegExp('^' + this.getOutputCollPrefix(collName)));
+ };
+
+ return $config;
+ });
diff --git a/jstests/concurrency/fsm_workloads/auth_create_role.js b/jstests/concurrency/fsm_workloads/auth_create_role.js
index 6ad1573cb5a..8b8d3933c2d 100644
--- a/jstests/concurrency/fsm_workloads/auth_create_role.js
+++ b/jstests/concurrency/fsm_workloads/auth_create_role.js
@@ -5,7 +5,7 @@
*
* Repeatedly creates new roles on a database.
*/
-load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropRoles
+load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropRoles
var $config = (function() {
@@ -29,15 +29,9 @@ var $config = (function() {
var roleName = uniqueRoleName(this.prefix, this.tid, this.num++);
db.createRole({
role: roleName,
- privileges: [
- {
- resource: { db: db.getName(), collection: collName },
- actions: ['update']
- }
- ],
- roles: [
- { role: 'read', db: db.getName() }
- ]
+ privileges:
+ [{resource: {db: db.getName(), collection: collName}, actions: ['update']}],
+ roles: [{role: 'read', db: db.getName()}]
});
// Verify the newly created role exists, as well as all previously created roles
@@ -58,8 +52,8 @@ var $config = (function() {
})();
var transitions = {
- init: { createRole: 1 },
- createRole: { createRole: 1 }
+ init: {createRole: 1},
+ createRole: {createRole: 1}
};
function teardown(db, collName, cluster) {
diff --git a/jstests/concurrency/fsm_workloads/auth_create_user.js b/jstests/concurrency/fsm_workloads/auth_create_user.js
index 7fe71f006fb..e49c63bc68e 100644
--- a/jstests/concurrency/fsm_workloads/auth_create_user.js
+++ b/jstests/concurrency/fsm_workloads/auth_create_user.js
@@ -5,7 +5,7 @@
*
* Repeatedly creates new users on a database.
*/
-load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropUsers
+load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropUsers
var $config = (function() {
@@ -27,11 +27,7 @@ var $config = (function() {
function createUser(db, collName) {
var username = uniqueUsername(this.prefix, this.tid, this.num++);
- db.createUser({
- user: username,
- pwd: 'password',
- roles: ['readWrite', 'dbAdmin']
- });
+ db.createUser({user: username, pwd: 'password', roles: ['readWrite', 'dbAdmin']});
// Verify the newly created user exists, as well as all previously created users
for (var i = 0; i < this.num; ++i) {
@@ -51,8 +47,8 @@ var $config = (function() {
})();
var transitions = {
- init: { createUser: 1 },
- createUser: { createUser: 1 }
+ init: {createUser: 1},
+ createUser: {createUser: 1}
};
function teardown(db, collName, cluster) {
diff --git a/jstests/concurrency/fsm_workloads/auth_drop_role.js b/jstests/concurrency/fsm_workloads/auth_drop_role.js
index 262de710fa2..d41066dbc63 100644
--- a/jstests/concurrency/fsm_workloads/auth_drop_role.js
+++ b/jstests/concurrency/fsm_workloads/auth_drop_role.js
@@ -6,7 +6,7 @@
* Repeatedly creates a new role on a database, and subsequently
* drops it from the database.
*/
-load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropRoles
+load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropRoles
var $config = (function() {
@@ -30,15 +30,9 @@ var $config = (function() {
var roleName = uniqueRoleName(this.prefix, this.tid, this.num++);
db.createRole({
role: roleName,
- privileges: [
- {
- resource: { db: db.getName(), collection: collName },
- actions: ['remove']
- }
- ],
- roles: [
- { role: 'read', db: db.getName() }
- ]
+ privileges:
+ [{resource: {db: db.getName(), collection: collName}, actions: ['remove']}],
+ roles: [{role: 'read', db: db.getName()}]
});
var res = db.getRole(roleName);
@@ -47,8 +41,7 @@ var $config = (function() {
assertAlways(!res.isBuiltin, 'role should be user-defined');
assertAlways(db.dropRole(roleName));
- assertAlways.isnull(db.getRole(roleName),
- "role '" + roleName + "' should not exist");
+ assertAlways.isnull(db.getRole(roleName), "role '" + roleName + "' should not exist");
}
return {
@@ -59,8 +52,8 @@ var $config = (function() {
})();
var transitions = {
- init: { createAndDropRole: 1 },
- createAndDropRole: { createAndDropRole: 1 }
+ init: {createAndDropRole: 1},
+ createAndDropRole: {createAndDropRole: 1}
};
return {
diff --git a/jstests/concurrency/fsm_workloads/auth_drop_user.js b/jstests/concurrency/fsm_workloads/auth_drop_user.js
index 96f41eb4160..65cb8e41da2 100644
--- a/jstests/concurrency/fsm_workloads/auth_drop_user.js
+++ b/jstests/concurrency/fsm_workloads/auth_drop_user.js
@@ -26,11 +26,7 @@ var $config = (function() {
function createAndDropUser(db, collName) {
var username = uniqueUsername(this.prefix, this.tid, this.num++);
- db.createUser({
- user: username,
- pwd: 'password',
- roles: ['readWrite', 'dbAdmin']
- });
+ db.createUser({user: username, pwd: 'password', roles: ['readWrite', 'dbAdmin']});
var res = db.getUser(username);
assertAlways(res !== null, "user '" + username + "' should exist");
@@ -38,8 +34,7 @@ var $config = (function() {
assertAlways.eq(db.getName(), res.db);
assertAlways(db.dropUser(username));
- assertAlways.isnull(db.getUser(username),
- "user '" + username + "' should not exist");
+ assertAlways.isnull(db.getUser(username), "user '" + username + "' should not exist");
}
return {
@@ -50,8 +45,8 @@ var $config = (function() {
})();
var transitions = {
- init: { createAndDropUser: 1 },
- createAndDropUser: { createAndDropUser: 1 }
+ init: {createAndDropUser: 1},
+ createAndDropUser: {createAndDropUser: 1}
};
return {
diff --git a/jstests/concurrency/fsm_workloads/collmod.js b/jstests/concurrency/fsm_workloads/collmod.js
index efed90ef9d1..7b803cd3284 100644
--- a/jstests/concurrency/fsm_workloads/collmod.js
+++ b/jstests/concurrency/fsm_workloads/collmod.js
@@ -14,7 +14,7 @@ var $config = (function() {
var data = {
numDocs: 1000,
- maxTTL: 5000, // max time to live
+ maxTTL: 5000, // max time to live
ttlIndexExists: true
};
@@ -22,12 +22,10 @@ var $config = (function() {
function collMod(db, collName) {
var newTTL = Random.randInt(this.maxTTL);
- var res = db.runCommand({ collMod: this.threadCollName,
- index: {
- keyPattern: { createdAt: 1 },
- expireAfterSeconds: newTTL
- }
- });
+ var res = db.runCommand({
+ collMod: this.threadCollName,
+ index: {keyPattern: {createdAt: 1}, expireAfterSeconds: newTTL}
+ });
assertAlways.commandWorked(res);
// only assert if new expireAfterSeconds differs from old one
if (res.hasOwnProperty('expireAfterSeconds_new')) {
@@ -42,7 +40,7 @@ var $config = (function() {
})();
var transitions = {
- collMod: { collMod: 1 }
+ collMod: {collMod: 1}
};
function setup(db, collName, cluster) {
@@ -50,7 +48,7 @@ var $config = (function() {
this.threadCollName = this.threadCollName || collName;
var bulk = db[this.threadCollName].initializeUnorderedBulkOp();
for (var i = 0; i < this.numDocs; ++i) {
- bulk.insert({ createdAt: new Date() });
+ bulk.insert({createdAt: new Date()});
}
var res = bulk.execute();
@@ -58,8 +56,7 @@ var $config = (function() {
assertAlways.eq(this.numDocs, res.nInserted);
// create TTL index
- res = db[this.threadCollName].ensureIndex({ createdAt: 1 },
- { expireAfterSeconds: 3600 });
+ res = db[this.threadCollName].ensureIndex({createdAt: 1}, {expireAfterSeconds: 3600});
assertAlways.commandWorked(res);
}
diff --git a/jstests/concurrency/fsm_workloads/collmod_separate_collections.js b/jstests/concurrency/fsm_workloads/collmod_separate_collections.js
index 05976a3ffce..5f9490dbaba 100644
--- a/jstests/concurrency/fsm_workloads/collmod_separate_collections.js
+++ b/jstests/concurrency/fsm_workloads/collmod_separate_collections.js
@@ -9,34 +9,36 @@
*
* Each thread updates a TTL index on a separate collection.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/collmod.js'); // for $config
-load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropCollections
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/collmod.js'); // for $config
+load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropCollections
-var $config = extendWorkload($config, function($config, $super) {
- $config.data.prefix = 'collmod_separate_collections';
- $config.data.shardKey = { createdAt: 1 };
+var $config = extendWorkload($config,
+ function($config, $super) {
+ $config.data.prefix = 'collmod_separate_collections';
+ $config.data.shardKey = {
+ createdAt: 1
+ };
- $config.states.init = function init(db, collName) {
- this.threadCollName = this.prefix + '_' + this.tid;
- $super.setup.call(this, db, this.threadCollName);
- };
+ $config.states.init = function init(db, collName) {
+ this.threadCollName = this.prefix + '_' + this.tid;
+ $super.setup.call(this, db, this.threadCollName);
+ };
- $config.transitions = Object.extend({
- init: { collMod: 1 }
- }, $super.transitions);
+ $config.transitions =
+ Object.extend({init: {collMod: 1}}, $super.transitions);
- $config.setup = function setup(db, collName, cluster) {
- // no-op: since the init state is used to setup
- // the separate collections on a per-thread basis.
- };
+ $config.setup = function setup(db, collName, cluster) {
+ // no-op: since the init state is used to setup
+ // the separate collections on a per-thread basis.
+ };
- $config.teardown = function teardown(db, collName, cluster) {
- var pattern = new RegExp('^' + this.prefix + '_\\d+$');
- dropCollections(db, pattern);
- $super.teardown.apply(this, arguments);
- };
+ $config.teardown = function teardown(db, collName, cluster) {
+ var pattern = new RegExp('^' + this.prefix + '_\\d+$');
+ dropCollections(db, pattern);
+ $super.teardown.apply(this, arguments);
+ };
- $config.startState = 'init';
- return $config;
-});
+ $config.startState = 'init';
+ return $config;
+ });
diff --git a/jstests/concurrency/fsm_workloads/compact.js b/jstests/concurrency/fsm_workloads/compact.js
index 855cd6e73fa..b80e46c0d65 100644
--- a/jstests/concurrency/fsm_workloads/compact.js
+++ b/jstests/concurrency/fsm_workloads/compact.js
@@ -8,25 +8,21 @@
* for each thread.
*/
-load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropCollections
-load('jstests/concurrency/fsm_workload_helpers/server_types.js'); // for isEphemeral
+load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropCollections
+load('jstests/concurrency/fsm_workload_helpers/server_types.js'); // for isEphemeral
var $config = (function() {
var data = {
nDocumentsToInsert: 1000,
- nIndexes: 3 + 1, // The number of indexes created in createIndexes + 1 for { _id: 1 }
- prefix: 'compact' // Use filename for prefix because filename is assumed unique
+ nIndexes: 3 + 1, // The number of indexes created in createIndexes + 1 for { _id: 1 }
+ prefix: 'compact' // Use filename for prefix because filename is assumed unique
};
var states = (function() {
function insertDocuments(db, collName) {
var bulk = db[collName].initializeUnorderedBulkOp();
for (var i = 0; i < this.nDocumentsToInsert; ++i) {
- bulk.insert({
- a: Random.randInt(10),
- b: Random.randInt(10),
- c: Random.randInt(10)
- });
+ bulk.insert({a: Random.randInt(10), b: Random.randInt(10), c: Random.randInt(10)});
}
var res = bulk.execute();
assertAlways.writeOK(res);
@@ -35,20 +31,20 @@ var $config = (function() {
function createIndexes(db, collName) {
// The number of indexes created here is also stored in data.nIndexes
- var aResult = db[collName].ensureIndex({ a: 1 });
+ var aResult = db[collName].ensureIndex({a: 1});
assertAlways.commandWorked(aResult);
- var bResult = db[collName].ensureIndex({ b: 1 });
+ var bResult = db[collName].ensureIndex({b: 1});
assertAlways.commandWorked(bResult);
- var cResult = db[collName].ensureIndex({ c: 1 });
+ var cResult = db[collName].ensureIndex({c: 1});
assertAlways.commandWorked(cResult);
}
// This method is independent of collectionSetup to allow it to be overridden in
// workloads that extend this one
function init(db, collName) {
- this.threadCollName = this.prefix + '_' + this.tid;
+ this.threadCollName = this.prefix + '_' + this.tid;
}
function collectionSetup(db, collName) {
@@ -57,11 +53,8 @@ var $config = (function() {
}
function compact(db, collName) {
- var res = db.runCommand({
- compact: this.threadCollName,
- paddingFactor: 1.0,
- force: true
- });
+ var res =
+ db.runCommand({compact: this.threadCollName, paddingFactor: 1.0, force: true});
if (!isEphemeral(db)) {
assertAlways.commandWorked(res);
} else {
@@ -71,8 +64,10 @@ var $config = (function() {
function query(db, collName) {
var count = db[this.threadCollName].find().itcount();
- assertWhenOwnColl.eq(count, this.nDocumentsToInsert, 'number of documents in ' +
- 'collection should not change following a compact');
+ assertWhenOwnColl.eq(count,
+ this.nDocumentsToInsert,
+ 'number of documents in ' +
+ 'collection should not change following a compact');
var indexesCount = db[this.threadCollName].getIndexes().length;
assertWhenOwnColl.eq(indexesCount, this.nIndexes);
}
@@ -86,10 +81,10 @@ var $config = (function() {
})();
var transitions = {
- init: { collectionSetup: 1 },
- collectionSetup: { compact: 0.5, query: 0.5 },
- compact: { compact: 0.5, query: 0.5 },
- query: { compact: 0.5, query: 0.5 }
+ init: {collectionSetup: 1},
+ collectionSetup: {compact: 0.5, query: 0.5},
+ compact: {compact: 0.5, query: 0.5},
+ query: {compact: 0.5, query: 0.5}
};
var teardown = function teardown(db, collName, cluster) {
diff --git a/jstests/concurrency/fsm_workloads/compact_simultaneous_padding_bytes.js b/jstests/concurrency/fsm_workloads/compact_simultaneous_padding_bytes.js
index dc8d9881f69..22eef359b87 100644
--- a/jstests/concurrency/fsm_workloads/compact_simultaneous_padding_bytes.js
+++ b/jstests/concurrency/fsm_workloads/compact_simultaneous_padding_bytes.js
@@ -8,31 +8,30 @@
* for all threads. Uses paddingBytes as a parameter for compact.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/compact.js'); // for $config
-load('jstests/concurrency/fsm_workload_helpers/server_types.js'); // for isEphemeral
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/compact.js'); // for $config
+load('jstests/concurrency/fsm_workload_helpers/server_types.js'); // for isEphemeral
-var $config = extendWorkload($config, function($config, $super) {
- $config.states.init = function init(db, collName) {
- this.threadCollName = collName;
- };
+var $config = extendWorkload(
+ $config,
+ function($config, $super) {
+ $config.states.init = function init(db, collName) {
+ this.threadCollName = collName;
+ };
- $config.states.compact = function compact(db, collName) {
- var res = db.runCommand({
- compact: this.threadCollName,
- paddingBytes: 1024 * 5,
- force: true
- });
- if (!isEphemeral(db)) {
- assertAlways.commandWorked(res);
- } else {
- assertAlways.commandFailedWithCode(res, ErrorCodes.CommandNotSupported);
- }
- };
+ $config.states.compact = function compact(db, collName) {
+ var res =
+ db.runCommand({compact: this.threadCollName, paddingBytes: 1024 * 5, force: true});
+ if (!isEphemeral(db)) {
+ assertAlways.commandWorked(res);
+ } else {
+ assertAlways.commandFailedWithCode(res, ErrorCodes.CommandNotSupported);
+ }
+ };
- // no-op the query state because querying while compacting can result in closed cursors
- // as per SERVER-3964, as well as inaccurate counts, leaving nothing to assert.
- $config.states.query = function query(db, collName) { };
+ // no-op the query state because querying while compacting can result in closed cursors
+ // as per SERVER-3964, as well as inaccurate counts, leaving nothing to assert.
+ $config.states.query = function query(db, collName) {};
- return $config;
-});
+ return $config;
+ });
diff --git a/jstests/concurrency/fsm_workloads/convert_to_capped_collection.js b/jstests/concurrency/fsm_workloads/convert_to_capped_collection.js
index 92000e0e164..79b9934077b 100644
--- a/jstests/concurrency/fsm_workloads/convert_to_capped_collection.js
+++ b/jstests/concurrency/fsm_workloads/convert_to_capped_collection.js
@@ -32,7 +32,7 @@ var $config = (function() {
var bulk = db[this.threadCollName].initializeUnorderedBulkOp();
for (var i = 0; i < (this.tid + 1) * 200; i++) {
- bulk.insert({ i: i, rand: Random.rand() });
+ bulk.insert({i: i, rand: Random.rand()});
}
var res = bulk.execute();
@@ -58,7 +58,7 @@ var $config = (function() {
var indexKeys = db[this.threadCollName].getIndexKeys();
assertWhenOwnDB.eq(1, indexKeys.length);
assertWhenOwnDB(function() {
- assertWhenOwnDB.docEq({ _id: 1 }, indexKeys[0]);
+ assertWhenOwnDB.docEq({_id: 1}, indexKeys[0]);
});
}
@@ -69,8 +69,8 @@ var $config = (function() {
})();
var transitions = {
- init: { convertToCapped: 1 },
- convertToCapped: { convertToCapped: 1 }
+ init: {convertToCapped: 1},
+ convertToCapped: {convertToCapped: 1}
};
function setup(db, collName, cluster) {
diff --git a/jstests/concurrency/fsm_workloads/convert_to_capped_collection_index.js b/jstests/concurrency/fsm_workloads/convert_to_capped_collection_index.js
index dd6716c750d..2eaa8e261b2 100644
--- a/jstests/concurrency/fsm_workloads/convert_to_capped_collection_index.js
+++ b/jstests/concurrency/fsm_workloads/convert_to_capped_collection_index.js
@@ -13,15 +13,17 @@
* but that only the _id index remains after (re-)converting
* to a capped collection.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/convert_to_capped_collection.js'); // for $config
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/convert_to_capped_collection.js'); // for $config
-var $config = extendWorkload($config, function($config, $super) {
- $config.states.convertToCapped = function convertToCapped(db, collName) {
- assertWhenOwnDB.commandWorked(db[this.threadCollName].ensureIndex({ i: 1, rand: 1 }));
- assertWhenOwnDB.eq(2, db[this.threadCollName].getIndexes().length);
- $super.states.convertToCapped.apply(this, arguments);
- };
+var $config = extendWorkload(
+ $config,
+ function($config, $super) {
+ $config.states.convertToCapped = function convertToCapped(db, collName) {
+ assertWhenOwnDB.commandWorked(db[this.threadCollName].ensureIndex({i: 1, rand: 1}));
+ assertWhenOwnDB.eq(2, db[this.threadCollName].getIndexes().length);
+ $super.states.convertToCapped.apply(this, arguments);
+ };
- return $config;
-});
+ return $config;
+ });
diff --git a/jstests/concurrency/fsm_workloads/count.js b/jstests/concurrency/fsm_workloads/count.js
index 626ac49a4c8..61a4c93d3ab 100644
--- a/jstests/concurrency/fsm_workloads/count.js
+++ b/jstests/concurrency/fsm_workloads/count.js
@@ -23,7 +23,7 @@ var $config = (function() {
return this.modulus * this.countPerNum;
},
getCount: function getCount(db, predicate) {
- var query = Object.extend({ tid: this.tid }, predicate);
+ var query = Object.extend({tid: this.tid}, predicate);
return db[this.threadCollName].count(query);
}
};
@@ -39,7 +39,7 @@ var $config = (function() {
var bulk = db[this.threadCollName].initializeUnorderedBulkOp();
for (var i = 0; i < this.getNumDocs(); ++i) {
- bulk.insert({ i: i % this.modulus, tid: this.tid });
+ bulk.insert({i: i % this.modulus, tid: this.tid});
}
var res = bulk.execute();
assertAlways.writeOK(res);
@@ -50,7 +50,7 @@ var $config = (function() {
assertWhenOwnColl.eq(this.getCount(db), this.getNumDocs());
var num = Random.randInt(this.modulus);
- assertWhenOwnColl.eq(this.getCount(db, { i: num }), this.countPerNum);
+ assertWhenOwnColl.eq(this.getCount(db, {i: num}), this.countPerNum);
}
return {
@@ -61,8 +61,8 @@ var $config = (function() {
})();
var transitions = {
- init: { count: 1 },
- count: { count: 1 }
+ init: {count: 1},
+ count: {count: 1}
};
return {
diff --git a/jstests/concurrency/fsm_workloads/count_indexed.js b/jstests/concurrency/fsm_workloads/count_indexed.js
index dc49593f46f..d7a49c6fb40 100644
--- a/jstests/concurrency/fsm_workloads/count_indexed.js
+++ b/jstests/concurrency/fsm_workloads/count_indexed.js
@@ -10,30 +10,35 @@
* and then inserts 'modulus * countPerNum' documents. [250, 1000]
* Each thread inserts docs into a unique collection.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/count.js'); // for $config
-load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropCollections
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/count.js'); // for $config
+load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropCollections
-var $config = extendWorkload($config, function($config, $super) {
- $config.data.prefix = 'count_fsm';
- $config.data.shardKey = { tid: 1, i: 1 };
+var $config = extendWorkload(
+ $config,
+ function($config, $super) {
+ $config.data.prefix = 'count_fsm';
+ $config.data.shardKey = {
+ tid: 1,
+ i: 1
+ };
- $config.data.getCount = function getCount(db, predicate) {
- var query = Object.extend({ tid: this.tid }, predicate);
- return db[this.threadCollName].find(query).hint({ tid: 1, i: 1 }).count();
- };
+ $config.data.getCount = function getCount(db, predicate) {
+ var query = Object.extend({tid: this.tid}, predicate);
+ return db[this.threadCollName].find(query).hint({tid: 1, i: 1}).count();
+ };
- $config.states.init = function init(db, collName) {
- this.threadCollName = this.prefix + '_' + this.tid;
- $super.states.init.apply(this, arguments);
- assertAlways.commandWorked(db[this.threadCollName].ensureIndex({ tid: 1, i: 1 }));
- };
+ $config.states.init = function init(db, collName) {
+ this.threadCollName = this.prefix + '_' + this.tid;
+ $super.states.init.apply(this, arguments);
+ assertAlways.commandWorked(db[this.threadCollName].ensureIndex({tid: 1, i: 1}));
+ };
- $config.teardown = function teardown(db, collName) {
- var pattern = new RegExp('^' + this.prefix + '_\\d+$');
- dropCollections(db, pattern);
- $super.teardown.apply(this, arguments);
- };
+ $config.teardown = function teardown(db, collName) {
+ var pattern = new RegExp('^' + this.prefix + '_\\d+$');
+ dropCollections(db, pattern);
+ $super.teardown.apply(this, arguments);
+ };
- return $config;
-});
+ return $config;
+ });
diff --git a/jstests/concurrency/fsm_workloads/count_limit_skip.js b/jstests/concurrency/fsm_workloads/count_limit_skip.js
index b770e542382..999fc941f8b 100644
--- a/jstests/concurrency/fsm_workloads/count_limit_skip.js
+++ b/jstests/concurrency/fsm_workloads/count_limit_skip.js
@@ -10,43 +10,46 @@
* and then inserts 'modulus * countPerNum' documents. [250, 1000]
* Each thread inserts docs into a unique collection.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/count.js'); // for $config
-load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropCollections
-
-var $config = extendWorkload($config, function($config, $super) {
- $config.data.prefix = 'count_fsm_q_l_s';
-
- $config.data.getCount = function getCount(db, predicate) {
- var query = Object.extend({ tid: this.tid }, predicate);
- return db[this.threadCollName].find(query)
- .skip(this.countPerNum - 1)
- .limit(10).count(true);
- };
-
- $config.states.init = function init(db, collName) {
- this.threadCollName = this.prefix + '_' + this.tid;
-
- $super.states.init.apply(this, arguments);
- };
-
- $config.states.count = function count(db, collName) {
- assertWhenOwnColl.eq(this.getCount(db),
- // having done 'skip(this.countPerNum - 1).limit(10)'
- 10);
-
- var num = Random.randInt(this.modulus);
- assertWhenOwnColl.eq(this.getCount(db, { i: num }),
- // having done 'skip(this.countPerNum - 1).limit(10)'
- 1);
- };
-
- $config.teardown = function teardown(db, collName) {
- var pattern = new RegExp('^' + this.prefix + '_\\d+$');
- dropCollections(db, pattern);
- $super.teardown.apply(this, arguments);
- };
-
- return $config;
-});
-
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/count.js'); // for $config
+load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropCollections
+
+var $config = extendWorkload(
+ $config,
+ function($config, $super) {
+ $config.data.prefix = 'count_fsm_q_l_s';
+
+ $config.data.getCount = function getCount(db, predicate) {
+ var query = Object.extend({tid: this.tid}, predicate);
+ return db[this.threadCollName]
+ .find(query)
+ .skip(this.countPerNum - 1)
+ .limit(10)
+ .count(true);
+ };
+
+ $config.states.init = function init(db, collName) {
+ this.threadCollName = this.prefix + '_' + this.tid;
+
+ $super.states.init.apply(this, arguments);
+ };
+
+ $config.states.count = function count(db, collName) {
+ assertWhenOwnColl.eq(this.getCount(db),
+ // having done 'skip(this.countPerNum - 1).limit(10)'
+ 10);
+
+ var num = Random.randInt(this.modulus);
+ assertWhenOwnColl.eq(this.getCount(db, {i: num}),
+ // having done 'skip(this.countPerNum - 1).limit(10)'
+ 1);
+ };
+
+ $config.teardown = function teardown(db, collName) {
+ var pattern = new RegExp('^' + this.prefix + '_\\d+$');
+ dropCollections(db, pattern);
+ $super.teardown.apply(this, arguments);
+ };
+
+ return $config;
+ });
diff --git a/jstests/concurrency/fsm_workloads/create_capped_collection.js b/jstests/concurrency/fsm_workloads/create_capped_collection.js
index 90f2426b221..43cf7fe2b54 100644
--- a/jstests/concurrency/fsm_workloads/create_capped_collection.js
+++ b/jstests/concurrency/fsm_workloads/create_capped_collection.js
@@ -6,15 +6,18 @@
* Repeatedly creates a capped collection. Also verifies that truncation
* occurs once the collection reaches a certain size.
*/
-load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropCollections
-load('jstests/concurrency/fsm_workload_helpers/server_types.js'); // for isMongod and isMMAPv1
+load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropCollections
+load('jstests/concurrency/fsm_workload_helpers/server_types.js'); // for isMongod and isMMAPv1
var $config = (function() {
// Returns a document of the form { _id: ObjectId(...), field: '...' }
// with specified BSON size.
function makeDocWithSize(targetSize) {
- var doc = { _id: new ObjectId(), field: '' };
+ var doc = {
+ _id: new ObjectId(),
+ field: ''
+ };
var size = Object.bsonsize(doc);
assertAlways.gte(targetSize, size);
@@ -42,9 +45,11 @@ var $config = (function() {
// Returns an array containing the _id fields of all the documents
// in the collection, sorted according to their insertion order.
function getObjectIds(db, collName) {
- return db[collName].find({}, { _id: 1 }).map(function(doc) {
- return doc._id;
- });
+ return db[collName]
+ .find({}, {_id: 1})
+ .map(function(doc) {
+ return doc._id;
+ });
}
var data = {
@@ -67,7 +72,7 @@ var $config = (function() {
// Truncation in MMAPv1 has well defined behavior.
if (isMongod(db) && isMMAPv1(db)) {
ids.push(this.insert(db, myCollName, largeDocSize));
-
+
// Insert a large document and verify that a truncation has occurred.
// There should be 1 document in the collection and it should always be
// the most recently inserted document.
@@ -124,7 +129,7 @@ var $config = (function() {
var options = {
capped: true,
- size: 8192 // multiple of 256; larger than 4096 default
+ size: 8192 // multiple of 256; larger than 4096 default
};
function uniqueCollectionName(prefix, tid, num) {
@@ -151,8 +156,8 @@ var $config = (function() {
})();
var transitions = {
- init: { create: 1 },
- create: { create: 1 }
+ init: {create: 1},
+ create: {create: 1}
};
function teardown(db, collName, cluster) {
diff --git a/jstests/concurrency/fsm_workloads/create_capped_collection_maxdocs.js b/jstests/concurrency/fsm_workloads/create_capped_collection_maxdocs.js
index ebdc1d55723..53bc9554904 100644
--- a/jstests/concurrency/fsm_workloads/create_capped_collection_maxdocs.js
+++ b/jstests/concurrency/fsm_workloads/create_capped_collection_maxdocs.js
@@ -7,57 +7,60 @@
* occurs once the collection reaches a certain size or contains a
* certain number of documents.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/create_capped_collection.js'); // for $config
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/create_capped_collection.js'); // for $config
-var $config = extendWorkload($config, function($config, $super) {
+var $config = extendWorkload(
+ $config,
+ function($config, $super) {
- // Use the workload name as a prefix for the collection name,
- // since the workload name is assumed to be unique.
- $config.data.prefix = 'create_capped_collection_maxdocs';
+ // Use the workload name as a prefix for the collection name,
+ // since the workload name is assumed to be unique.
+ $config.data.prefix = 'create_capped_collection_maxdocs';
- var options = {
- capped: true,
- size: 8192, // multiple of 256; larger than 4096 default
- max: 3
- };
+ var options = {
+ capped: true,
+ size: 8192, // multiple of 256; larger than 4096 default
+ max: 3
+ };
- function uniqueCollectionName(prefix, tid, num) {
- return prefix + tid + '_' + num;
- }
-
- // TODO: how to avoid having too many files open?
- function create(db, collName) {
- var myCollName = uniqueCollectionName(this.prefix, this.tid, this.num++);
- assertAlways.commandWorked(db.createCollection(myCollName, options));
+ function uniqueCollectionName(prefix, tid, num) {
+ return prefix + tid + '_' + num;
+ }
- // Define a small document to be an eighth the size of the capped collection.
- var smallDocSize = Math.floor(options.size / 8) - 1;
+ // TODO: how to avoid having too many files open?
+ function create(db, collName) {
+ var myCollName = uniqueCollectionName(this.prefix, this.tid, this.num++);
+ assertAlways.commandWorked(db.createCollection(myCollName, options));
- // Verify size functionality still works as we expect
- this.verifySizeTruncation(db, myCollName, options);
+ // Define a small document to be an eighth the size of the capped collection.
+ var smallDocSize = Math.floor(options.size / 8) - 1;
- // Insert multiple small documents and verify that at least one truncation has occurred.
- // There should never be more than 3 documents in the collection, regardless of the storage
- // engine. They should always be the most recently inserted documents.
+ // Verify size functionality still works as we expect
+ this.verifySizeTruncation(db, myCollName, options);
- var ids = [];
- var count;
+ // Insert multiple small documents and verify that at least one truncation has occurred.
+ // There should never be more than 3 documents in the collection, regardless of the
+ // storage
+ // engine. They should always be the most recently inserted documents.
- ids.push(this.insert(db, myCollName, smallDocSize));
- ids.push(this.insert(db, myCollName, smallDocSize));
+ var ids = [];
+ var count;
- for (var i = 0; i < 50; i++) {
ids.push(this.insert(db, myCollName, smallDocSize));
- count = db[myCollName].find().itcount();
- assertWhenOwnDB.eq(3, count, 'expected truncation to occur due to number of docs');
- assertWhenOwnDB.eq(ids.slice(ids.length - count),
- this.getObjectIds(db, myCollName),
- 'expected truncation to remove the oldest documents');
+ ids.push(this.insert(db, myCollName, smallDocSize));
+
+ for (var i = 0; i < 50; i++) {
+ ids.push(this.insert(db, myCollName, smallDocSize));
+ count = db[myCollName].find().itcount();
+ assertWhenOwnDB.eq(3, count, 'expected truncation to occur due to number of docs');
+ assertWhenOwnDB.eq(ids.slice(ids.length - count),
+ this.getObjectIds(db, myCollName),
+ 'expected truncation to remove the oldest documents');
+ }
}
- }
- $config.states.create = create;
+ $config.states.create = create;
- return $config;
-});
+ return $config;
+ });
diff --git a/jstests/concurrency/fsm_workloads/create_collection.js b/jstests/concurrency/fsm_workloads/create_collection.js
index fa2a13fb45d..fdc6d8af9fd 100644
--- a/jstests/concurrency/fsm_workloads/create_collection.js
+++ b/jstests/concurrency/fsm_workloads/create_collection.js
@@ -5,7 +5,7 @@
*
* Repeatedly creates a collection.
*/
-load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropCollections
+load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropCollections
var $config = (function() {
@@ -40,8 +40,8 @@ var $config = (function() {
})();
var transitions = {
- init: { create: 1 },
- create: { create: 1 }
+ init: {create: 1},
+ create: {create: 1}
};
function teardown(db, collName, cluster) {
diff --git a/jstests/concurrency/fsm_workloads/create_index_background.js b/jstests/concurrency/fsm_workloads/create_index_background.js
index 4dcb1e9ec7b..046709ebdd3 100644
--- a/jstests/concurrency/fsm_workloads/create_index_background.js
+++ b/jstests/concurrency/fsm_workloads/create_index_background.js
@@ -8,7 +8,7 @@
* index has completed and the test no longer needs to execute more transitions.
* The first thread (tid = 0) will be the one that creates the background index.
*/
-load('jstests/concurrency/fsm_workload_helpers/server_types.js'); // for isMongos
+load('jstests/concurrency/fsm_workload_helpers/server_types.js'); // for isMongos
var $config = (function() {
@@ -21,7 +21,7 @@ var $config = (function() {
getHighestX: function getHighestX(coll, tid) {
// Find highest value of x.
var highest = 0;
- var cursor = coll.find({ tid: tid }).sort({ x: -1 }).limit(-1);
+ var cursor = coll.find({tid: tid}).sort({x: -1}).limit(-1);
assertWhenOwnColl(function() {
highest = cursor.next().x;
});
@@ -35,7 +35,7 @@ var $config = (function() {
// Add thread-specific documents
var bulk = db[collName].initializeUnorderedBulkOp();
for (var i = 0; i < this.nDocumentsToSeed; ++i) {
- bulk.insert({ x: i, tid: this.tid });
+ bulk.insert({x: i, tid: this.tid});
}
var res = bulk.execute();
assertAlways.writeOK(res);
@@ -47,9 +47,9 @@ var $config = (function() {
// Before creating the background index make sure insert or update
// CRUD operations are active.
assertWhenOwnColl.soon(function() {
- return coll.find({ crud: { $exists: true } }).itcount() > 0;
+ return coll.find({crud: {$exists: true}}).itcount() > 0;
}, 'No documents with "crud" field have been inserted or updated', 60 * 1000);
- res = coll.ensureIndex({ x: 1 }, { background: true });
+ res = coll.ensureIndex({x: 1}, {background: true});
assertAlways.commandWorked(res, tojson(res));
}
}
@@ -58,15 +58,15 @@ var $config = (function() {
// Insert documents with an increasing value of index x.
var coll = db[collName];
var res;
- var count = coll.find({ tid: this.tid }).itcount();
+ var count = coll.find({tid: this.tid}).itcount();
var highest = this.getHighestX(coll, this.tid);
for (var i = 0; i < this.nDocumentsToCreate; ++i) {
- res = coll.insert({ x: i + highest + 1, tid: this.tid, crud: 1 });
+ res = coll.insert({x: i + highest + 1, tid: this.tid, crud: 1});
assertAlways.writeOK(res);
assertAlways.eq(res.nInserted, 1, tojson(res));
}
- assertWhenOwnColl.eq(coll.find({ tid: this.tid }).itcount(),
+ assertWhenOwnColl.eq(coll.find({tid: this.tid}).itcount(),
this.nDocumentsToCreate + count,
'createDocs itcount mismatch');
}
@@ -75,21 +75,19 @@ var $config = (function() {
// Read random documents from the collection on index x.
var coll = db[collName];
var res;
- var count = coll.find({ tid: this.tid }).itcount();
- assertWhenOwnColl.gte(count,
- this.nDocumentsToRead,
- 'readDocs not enough documents for tid ' + this.tid);
+ var count = coll.find({tid: this.tid}).itcount();
+ assertWhenOwnColl.gte(
+ count, this.nDocumentsToRead, 'readDocs not enough documents for tid ' + this.tid);
var highest = this.getHighestX(coll, this.tid);
for (var i = 0; i < this.nDocumentsToRead; ++i) {
// Do randomized reads on index x. A document is not guaranteed
// to match the randomized 'x' predicate.
- res = coll.find({ x: Random.randInt(highest), tid: this.tid }).itcount();
- assertWhenOwnColl.contains(res, [ 0, 1 ], tojson(res));
+ res = coll.find({x: Random.randInt(highest), tid: this.tid}).itcount();
+ assertWhenOwnColl.contains(res, [0, 1], tojson(res));
}
- assertWhenOwnColl.eq(coll.find({ tid: this.tid }).itcount(),
- count,
- 'readDocs itcount mismatch');
+ assertWhenOwnColl.eq(
+ coll.find({tid: this.tid}).itcount(), count, 'readDocs itcount mismatch');
}
function updateDocs(db, collName) {
@@ -98,7 +96,7 @@ var $config = (function() {
if (!isMongos(db)) {
var coll = db[collName];
var res;
- var count = coll.find({ tid: this.tid }).itcount();
+ var count = coll.find({tid: this.tid}).itcount();
assertWhenOwnColl.gte(count,
this.nDocumentsToUpdate,
'updateDocs not enough documents for tid ' + this.tid);
@@ -107,18 +105,17 @@ var $config = (function() {
for (var i = 0; i < this.nDocumentsToUpdate; ++i) {
// Do randomized updates on index x. A document is not guaranteed
// to match the randomized 'x' predicate.
- res = coll.update({ x: Random.randInt(highest), tid: this.tid },
- { $inc: { crud: 1 } });
+ res = coll.update({x: Random.randInt(highest), tid: this.tid},
+ {$inc: {crud: 1}});
assertAlways.writeOK(res);
if (db.getMongo().writeMode() === 'commands') {
- assertWhenOwnColl.contains(res.nModified, [ 0, 1 ], tojson(res));
+ assertWhenOwnColl.contains(res.nModified, [0, 1], tojson(res));
}
- assertWhenOwnColl.contains(res.nMatched, [ 0, 1 ], tojson(res));
+ assertWhenOwnColl.contains(res.nMatched, [0, 1], tojson(res));
assertWhenOwnColl.eq(res.nUpserted, 0, tojson(res));
}
- assertWhenOwnColl.eq(coll.find({ tid: this.tid }).itcount(),
- count,
- 'updateDocs itcount mismatch');
+ assertWhenOwnColl.eq(
+ coll.find({tid: this.tid}).itcount(), count, 'updateDocs itcount mismatch');
}
}
@@ -126,7 +123,7 @@ var $config = (function() {
// Remove random documents from the collection on index x.
var coll = db[collName];
var res;
- var count = coll.find({ tid: this.tid }).itcount();
+ var count = coll.find({tid: this.tid}).itcount();
// Set the maximum number of documents we can delete to ensure that there
// are documents to read or update after deleteDocs completes.
@@ -145,12 +142,12 @@ var $config = (function() {
for (var i = 0; i < nDeleteDocs; ++i) {
// Do randomized deletes on index x. A document is not guaranteed
// to match the randomized 'x' predicate.
- res = coll.remove({ x: Random.randInt(highest), tid: this.tid });
+ res = coll.remove({x: Random.randInt(highest), tid: this.tid});
assertAlways.writeOK(res);
- assertWhenOwnColl.contains(res.nRemoved, [ 0, 1 ], tojson(res));
+ assertWhenOwnColl.contains(res.nRemoved, [0, 1], tojson(res));
nActualDeletes += res.nRemoved;
}
- assertWhenOwnColl.eq(coll.find({ tid: this.tid }).itcount(),
+ assertWhenOwnColl.eq(coll.find({tid: this.tid}).itcount(),
count - nActualDeletes,
'deleteDocs itcount mismatch');
}
@@ -166,16 +163,11 @@ var $config = (function() {
})();
var transitions = {
- init:
- { createDocs: 1 },
- createDocs:
- { createDocs: 0.25, readDocs: 0.25, updateDocs: 0.25, deleteDocs: 0.25 },
- readDocs:
- { createDocs: 0.25, readDocs: 0.25, updateDocs: 0.25, deleteDocs: 0.25 },
- updateDocs:
- { createDocs: 0.25, readDocs: 0.25, updateDocs: 0.25, deleteDocs: 0.25 },
- deleteDocs:
- { createDocs: 0.25, readDocs: 0.25, updateDocs: 0.25, deleteDocs: 0.25 },
+ init: {createDocs: 1},
+ createDocs: {createDocs: 0.25, readDocs: 0.25, updateDocs: 0.25, deleteDocs: 0.25},
+ readDocs: {createDocs: 0.25, readDocs: 0.25, updateDocs: 0.25, deleteDocs: 0.25},
+ updateDocs: {createDocs: 0.25, readDocs: 0.25, updateDocs: 0.25, deleteDocs: 0.25},
+ deleteDocs: {createDocs: 0.25, readDocs: 0.25, updateDocs: 0.25, deleteDocs: 0.25},
};
var internalQueryExecYieldIterations;
@@ -185,12 +177,12 @@ var $config = (function() {
var nSetupDocs = this.nDocumentsToSeed * 200;
var coll = db[collName];
- var res = coll.ensureIndex({ tid: 1 });
+ var res = coll.ensureIndex({tid: 1});
assertAlways.commandWorked(res, tojson(res));
var bulk = coll.initializeUnorderedBulkOp();
for (var i = 0; i < nSetupDocs; ++i) {
- bulk.insert({ x: i });
+ bulk.insert({x: i});
}
res = bulk.execute();
assertAlways.writeOK(res);
@@ -199,11 +191,11 @@ var $config = (function() {
// Increase the following parameters to reduce the number of yields.
cluster.executeOnMongodNodes(function(db) {
var res;
- res = db.adminCommand({ setParameter: 1, internalQueryExecYieldIterations: 100000 });
+ res = db.adminCommand({setParameter: 1, internalQueryExecYieldIterations: 100000});
assertAlways.commandWorked(res);
internalQueryExecYieldIterations = res.was;
- res = db.adminCommand({ setParameter: 1, internalQueryExecYieldPeriodMS: 10000 });
+ res = db.adminCommand({setParameter: 1, internalQueryExecYieldPeriodMS: 10000});
assertAlways.commandWorked(res);
internalQueryExecYieldPeriodMS = res.was;
});
@@ -211,18 +203,14 @@ var $config = (function() {
function teardown(db, collName, cluster) {
cluster.executeOnMongodNodes(function(db) {
- assertAlways.commandWorked(
- db.adminCommand({
- setParameter: 1,
- internalQueryExecYieldIterations: internalQueryExecYieldIterations
- })
- );
- assertAlways.commandWorked(
- db.adminCommand({
- setParameter: 1,
- internalQueryExecYieldPeriodMS: internalQueryExecYieldPeriodMS
- })
- );
+ assertAlways.commandWorked(db.adminCommand({
+ setParameter: 1,
+ internalQueryExecYieldIterations: internalQueryExecYieldIterations
+ }));
+ assertAlways.commandWorked(db.adminCommand({
+ setParameter: 1,
+ internalQueryExecYieldPeriodMS: internalQueryExecYieldPeriodMS
+ }));
});
}
diff --git a/jstests/concurrency/fsm_workloads/distinct.js b/jstests/concurrency/fsm_workloads/distinct.js
index b8598984f80..c76b5e972f5 100644
--- a/jstests/concurrency/fsm_workloads/distinct.js
+++ b/jstests/concurrency/fsm_workloads/distinct.js
@@ -7,14 +7,14 @@
* The indexed field contains unique values.
* Each thread operates on a separate collection.
*/
-load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropCollections
+load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropCollections
var $config = (function() {
var data = {
numDocs: 1000,
prefix: 'distinct_fsm',
- shardKey: { i: 1 }
+ shardKey: {i: 1}
};
var states = (function() {
@@ -23,12 +23,12 @@ var $config = (function() {
this.threadCollName = this.prefix + '_' + this.tid;
var bulk = db[this.threadCollName].initializeUnorderedBulkOp();
for (var i = 0; i < this.numDocs; ++i) {
- bulk.insert({ i: i });
+ bulk.insert({i: i});
}
var res = bulk.execute();
assertAlways.writeOK(res);
assertAlways.eq(this.numDocs, res.nInserted);
- assertAlways.commandWorked(db[this.threadCollName].ensureIndex({ i: 1 }));
+ assertAlways.commandWorked(db[this.threadCollName].ensureIndex({i: 1}));
}
function distinct(db, collName) {
@@ -43,8 +43,8 @@ var $config = (function() {
})();
var transitions = {
- init: { distinct: 1 },
- distinct: { distinct: 1 }
+ init: {distinct: 1},
+ distinct: {distinct: 1}
};
function teardown(db, collName, cluster) {
diff --git a/jstests/concurrency/fsm_workloads/distinct_noindex.js b/jstests/concurrency/fsm_workloads/distinct_noindex.js
index 6a38830f9d6..b55d1e58d3b 100644
--- a/jstests/concurrency/fsm_workloads/distinct_noindex.js
+++ b/jstests/concurrency/fsm_workloads/distinct_noindex.js
@@ -24,7 +24,7 @@ var $config = (function() {
var bulk = db[collName].initializeUnorderedBulkOp();
for (var i = 0; i < this.numDocs; ++i) {
- bulk.insert({ i: i % this.modulus, tid: this.tid });
+ bulk.insert({i: i % this.modulus, tid: this.tid});
}
var res = bulk.execute();
assertAlways.writeOK(res);
@@ -32,8 +32,7 @@ var $config = (function() {
}
function distinct(db, collName) {
- assertWhenOwnColl.eq(this.modulus,
- db[collName].distinct('i', { tid: this.tid }).length);
+ assertWhenOwnColl.eq(this.modulus, db[collName].distinct('i', {tid: this.tid}).length);
}
return {
@@ -44,8 +43,8 @@ var $config = (function() {
})();
var transitions = {
- init: { distinct: 1 },
- distinct: { distinct: 1 }
+ init: {distinct: 1},
+ distinct: {distinct: 1}
};
return {
diff --git a/jstests/concurrency/fsm_workloads/distinct_projection.js b/jstests/concurrency/fsm_workloads/distinct_projection.js
index d934c329f20..cf8d5ab9501 100644
--- a/jstests/concurrency/fsm_workloads/distinct_projection.js
+++ b/jstests/concurrency/fsm_workloads/distinct_projection.js
@@ -7,17 +7,21 @@
* The indexed field contains unique values.
* Each thread operates on a separate collection.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/distinct.js'); // for $config
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/distinct.js'); // for $config
-var $config = extendWorkload($config, function($config, $super) {
- $config.data.prefix = 'distinct_projection_fsm';
+var $config = extendWorkload($config,
+ function($config, $super) {
+ $config.data.prefix = 'distinct_projection_fsm';
- $config.states.distinct = function distinct(db, collName) {
- var query = { i: { $lt: this.numDocs / 2 } };
- assertWhenOwnColl.eq(this.numDocs / 2,
- db[this.threadCollName].distinct('i', query).length);
- };
+ $config.states.distinct = function distinct(db, collName) {
+ var query = {
+ i: {$lt: this.numDocs / 2}
+ };
+ assertWhenOwnColl.eq(
+ this.numDocs / 2,
+ db[this.threadCollName].distinct('i', query).length);
+ };
- return $config;
-});
+ return $config;
+ });
diff --git a/jstests/concurrency/fsm_workloads/drop_collection.js b/jstests/concurrency/fsm_workloads/drop_collection.js
index 1f92541e9fe..64a60ef8e79 100644
--- a/jstests/concurrency/fsm_workloads/drop_collection.js
+++ b/jstests/concurrency/fsm_workloads/drop_collection.js
@@ -38,8 +38,8 @@ var $config = (function() {
})();
var transitions = {
- init: { createAndDrop: 1 },
- createAndDrop: { createAndDrop: 1 }
+ init: {createAndDrop: 1},
+ createAndDrop: {createAndDrop: 1}
};
return {
diff --git a/jstests/concurrency/fsm_workloads/drop_database.js b/jstests/concurrency/fsm_workloads/drop_database.js
index fee8ab3d1a3..9a6b9e0fb80 100644
--- a/jstests/concurrency/fsm_workloads/drop_database.js
+++ b/jstests/concurrency/fsm_workloads/drop_database.js
@@ -25,8 +25,8 @@ var $config = (function() {
};
var transitions = {
- init: { createAndDrop: 1 },
- createAndDrop: { createAndDrop: 1 }
+ init: {createAndDrop: 1},
+ createAndDrop: {createAndDrop: 1}
};
return {
diff --git a/jstests/concurrency/fsm_workloads/explain.js b/jstests/concurrency/fsm_workloads/explain.js
index e5199bcc377..983218a7fbf 100644
--- a/jstests/concurrency/fsm_workloads/explain.js
+++ b/jstests/concurrency/fsm_workloads/explain.js
@@ -6,14 +6,14 @@
* Runs explain() on a collection.
*
*/
-load('jstests/concurrency/fsm_workload_helpers/server_types.js'); // for isMongod
+load('jstests/concurrency/fsm_workload_helpers/server_types.js'); // for isMongod
var $config = (function() {
var data = {
collNotExist: 'donotexist__',
nInserted: 0,
- shardKey: { j: 1 },
+ shardKey: {j: 1},
assignEqualProbsToTransitions: function assignEqualProbsToTransitions(statesMap) {
var states = Object.keys(statesMap);
assertAlways.gt(states.length, 0);
@@ -27,28 +27,22 @@ var $config = (function() {
};
function setup(db, collName, cluster) {
- assertAlways.commandWorked(db[collName].ensureIndex({ j: 1 }));
+ assertAlways.commandWorked(db[collName].ensureIndex({j: 1}));
}
var states = (function() {
function insert(db, collName) {
- db[collName].insert({
- i: this.nInserted,
- j: 2 * this.nInserted
- });
+ db[collName].insert({i: this.nInserted, j: 2 * this.nInserted});
this.nInserted++;
}
function explain(db, collName) {
// test the three verbosity levels:
// 'queryPlanner', 'executionStats', and 'allPlansExecution'
- ['queryPlanner', 'executionStats', 'allPlansExecution'].forEach(
- function(verbosity) {
- assertAlways.commandWorked(db[collName]
- .find({ j: this.nInserted / 2 })
- .explain(verbosity));
- }.bind(this)
- );
+ ['queryPlanner', 'executionStats', 'allPlansExecution'].forEach(function(verbosity) {
+ assertAlways.commandWorked(
+ db[collName].find({j: this.nInserted / 2}).explain(verbosity));
+ }.bind(this));
}
function explainNonExistentNS(db, collName) {
@@ -76,9 +70,9 @@ var $config = (function() {
})();
var transitions = {
- insert: { insert: 0.1, explain: 0.8, explainNonExistentNS: 0.1 },
- explain: { insert: 0.7, explain: 0.2, explainNonExistentNS: 0.1 },
- explainNonExistentNS: { insert: 0.4, explain: 0.5, explainNonExistentNS: 0.1 }
+ insert: {insert: 0.1, explain: 0.8, explainNonExistentNS: 0.1},
+ explain: {insert: 0.7, explain: 0.2, explainNonExistentNS: 0.1},
+ explainNonExistentNS: {insert: 0.4, explain: 0.5, explainNonExistentNS: 0.1}
};
return {
diff --git a/jstests/concurrency/fsm_workloads/explain_aggregate.js b/jstests/concurrency/fsm_workloads/explain_aggregate.js
index 02a00923c0d..82542be4cc4 100644
--- a/jstests/concurrency/fsm_workloads/explain_aggregate.js
+++ b/jstests/concurrency/fsm_workloads/explain_aggregate.js
@@ -6,40 +6,46 @@
* Runs explain() and aggregate() on a collection.
*
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/explain.js'); // for $config
-
-var $config = extendWorkload($config, function($config, $super) {
-
- function assertCursorStages(num, obj) {
- assertAlways(obj.stages, tojson(obj));
- assertAlways.eq(num, obj.stages.length, tojson(obj.stages));
- assertAlways(obj.stages[0].$cursor, tojson(obj.stages[0]));
- assertAlways(obj.stages[0].$cursor.hasOwnProperty('queryPlanner'),
- tojson(obj.stages[0].$cursor));
- }
-
- $config.states = Object.extend({
- explainMatch: function explainMatch(db, collName) {
- var res = db[collName].explain().aggregate([{ $match: { i: this.nInserted / 2 } }]);
- assertAlways.commandWorked(res);
-
- // stages reported: $cursor
- assertCursorStages(1, res);
- },
- explainMatchProject: function explainMatchProject(db, collName) {
- var res = db[collName].explain().aggregate([{ $match: { i: this.nInserted / 3 } },
- { $project: { i: 1 } }]);
- assertAlways.commandWorked(res);
-
- // stages reported: $cursor, $project
- assertCursorStages(2, res);
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/explain.js'); // for $config
+
+var $config = extendWorkload(
+ $config,
+ function($config, $super) {
+
+ function assertCursorStages(num, obj) {
+ assertAlways(obj.stages, tojson(obj));
+ assertAlways.eq(num, obj.stages.length, tojson(obj.stages));
+ assertAlways(obj.stages[0].$cursor, tojson(obj.stages[0]));
+ assertAlways(obj.stages[0].$cursor.hasOwnProperty('queryPlanner'),
+ tojson(obj.stages[0].$cursor));
}
- }, $super.states);
- $config.transitions = Object.extend({
- explain: $config.data.assignEqualProbsToTransitions($config.states)
- }, $super.transitions);
-
- return $config;
-});
+ $config.states = Object.extend(
+ {
+ explainMatch: function explainMatch(db, collName) {
+ var res = db[collName].explain().aggregate([{$match: {i: this.nInserted / 2}}]);
+ assertAlways.commandWorked(res);
+
+ // stages reported: $cursor
+ assertCursorStages(1, res);
+ },
+ explainMatchProject: function explainMatchProject(db, collName) {
+ var res =
+ db[collName]
+ .explain()
+ .aggregate([{$match: {i: this.nInserted / 3}}, {$project: {i: 1}}]);
+ assertAlways.commandWorked(res);
+
+ // stages reported: $cursor, $project
+ assertCursorStages(2, res);
+ }
+ },
+ $super.states);
+
+ $config.transitions =
+ Object.extend({explain: $config.data.assignEqualProbsToTransitions($config.states)},
+ $super.transitions);
+
+ return $config;
+ });
diff --git a/jstests/concurrency/fsm_workloads/explain_count.js b/jstests/concurrency/fsm_workloads/explain_count.js
index 0b44073195d..05cfcc5ba87 100644
--- a/jstests/concurrency/fsm_workloads/explain_count.js
+++ b/jstests/concurrency/fsm_workloads/explain_count.js
@@ -5,55 +5,64 @@
*
* Runs explain() and count() on a collection.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/explain.js'); // for $config
-load('jstests/concurrency/fsm_workload_helpers/server_types.js'); // for isMongos
-load('jstests/libs/analyze_plan.js'); // for planHasStage
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/explain.js'); // for $config
+load('jstests/concurrency/fsm_workload_helpers/server_types.js'); // for isMongos
+load('jstests/libs/analyze_plan.js'); // for planHasStage
-var $config = extendWorkload($config, function($config, $super) {
+var $config = extendWorkload(
+ $config,
+ function($config, $super) {
- function assertNCounted(num, obj, db) {
- var stage = obj.executionStats.executionStages;
- // get sharded stage(s) if counting on mongos
- if (isMongos(db)) {
- stage = stage.shards[0].executionStages;
+ function assertNCounted(num, obj, db) {
+ var stage = obj.executionStats.executionStages;
+ // get sharded stage(s) if counting on mongos
+ if (isMongos(db)) {
+ stage = stage.shards[0].executionStages;
+ }
+ assertWhenOwnColl.eq(num, stage.nCounted);
}
- assertWhenOwnColl.eq(num, stage.nCounted);
- }
- $config.states = Object.extend({
- explainBasicCount: function explainBasicCount(db, collName) {
- var res = db[collName].explain().count();
- assertAlways.commandWorked(res);
- assertAlways(planHasStage(res.queryPlanner.winningPlan, 'COUNT'));
- },
- explainCountHint: function explainCountHint(db, collName) {
- assertWhenOwnColl(function() {
- var res = db[collName].explain()
- .find({ i: this.nInserted / 2 })
- .hint({ i: 1 }).count();
- assertWhenOwnColl.commandWorked(res);
- assertWhenOwnColl(planHasStage(res.queryPlanner.winningPlan, 'COUNT'));
- assertWhenOwnColl(planHasStage(res.queryPlanner.winningPlan, 'COUNT_SCAN'));
- });
- },
- explainCountNoSkipLimit: function explainCountNoSkipLimit(db, collName) {
- var res = db[collName].explain('executionStats')
- .find({ i: this.nInserted }).skip(1).count(false);
- assertAlways.commandWorked(res);
- assertNCounted(1, res, db);
- },
- explainCountSkipLimit: function explainCountSkipLimit(db, collName) {
- var res = db[collName].explain('executionStats')
- .find({ i: this.nInserted }).skip(1).count(true);
- assertAlways.commandWorked(res);
- assertNCounted(0, res, db);
- }
- }, $super.states);
+ $config.states = Object.extend(
+ {
+ explainBasicCount: function explainBasicCount(db, collName) {
+ var res = db[collName].explain().count();
+ assertAlways.commandWorked(res);
+ assertAlways(planHasStage(res.queryPlanner.winningPlan, 'COUNT'));
+ },
+ explainCountHint: function explainCountHint(db, collName) {
+ assertWhenOwnColl(function() {
+ var res =
+ db[collName].explain().find({i: this.nInserted / 2}).hint({i: 1}).count();
+ assertWhenOwnColl.commandWorked(res);
+ assertWhenOwnColl(planHasStage(res.queryPlanner.winningPlan, 'COUNT'));
+ assertWhenOwnColl(planHasStage(res.queryPlanner.winningPlan, 'COUNT_SCAN'));
+ });
+ },
+ explainCountNoSkipLimit: function explainCountNoSkipLimit(db, collName) {
+ var res = db[collName]
+ .explain('executionStats')
+ .find({i: this.nInserted})
+ .skip(1)
+ .count(false);
+ assertAlways.commandWorked(res);
+ assertNCounted(1, res, db);
+ },
+ explainCountSkipLimit: function explainCountSkipLimit(db, collName) {
+ var res = db[collName]
+ .explain('executionStats')
+ .find({i: this.nInserted})
+ .skip(1)
+ .count(true);
+ assertAlways.commandWorked(res);
+ assertNCounted(0, res, db);
+ }
+ },
+ $super.states);
- $config.transitions = Object.extend({
- explain: $config.data.assignEqualProbsToTransitions($config.states)
- }, $super.transitions);
+ $config.transitions =
+ Object.extend({explain: $config.data.assignEqualProbsToTransitions($config.states)},
+ $super.transitions);
- return $config;
-});
+ return $config;
+ });
diff --git a/jstests/concurrency/fsm_workloads/explain_distinct.js b/jstests/concurrency/fsm_workloads/explain_distinct.js
index 65588909917..b772ac3ac25 100644
--- a/jstests/concurrency/fsm_workloads/explain_distinct.js
+++ b/jstests/concurrency/fsm_workloads/explain_distinct.js
@@ -5,28 +5,32 @@
*
* Runs explain() and distinct() on a collection.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/explain.js'); // for $config
-load('jstests/libs/analyze_plan.js'); // for planHasStage
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/explain.js'); // for $config
+load('jstests/libs/analyze_plan.js'); // for planHasStage
-var $config = extendWorkload($config, function($config, $super) {
- $config.states = Object.extend({
- explainBasicDistinct: function (db, collName) {
- var res = db[collName].explain().distinct('i');
- assertAlways.commandWorked(res);
- assertAlways(planHasStage(res.queryPlanner.winningPlan, 'COLLSCAN'));
- },
- explainDistinctIndex: function (db, collName) {
- var res = db[collName].explain().distinct('_id');
- assertAlways.commandWorked(res);
- assertAlways(planHasStage(res.queryPlanner.winningPlan, 'PROJECTION'));
- assertAlways(planHasStage(res.queryPlanner.winningPlan, 'DISTINCT_SCAN'));
- }
- }, $super.states);
+var $config = extendWorkload(
+ $config,
+ function($config, $super) {
+ $config.states = Object.extend(
+ {
+ explainBasicDistinct: function(db, collName) {
+ var res = db[collName].explain().distinct('i');
+ assertAlways.commandWorked(res);
+ assertAlways(planHasStage(res.queryPlanner.winningPlan, 'COLLSCAN'));
+ },
+ explainDistinctIndex: function(db, collName) {
+ var res = db[collName].explain().distinct('_id');
+ assertAlways.commandWorked(res);
+ assertAlways(planHasStage(res.queryPlanner.winningPlan, 'PROJECTION'));
+ assertAlways(planHasStage(res.queryPlanner.winningPlan, 'DISTINCT_SCAN'));
+ }
+ },
+ $super.states);
- $config.transitions = Object.extend({
- explain: $config.data.assignEqualProbsToTransitions($config.states)
- }, $super.transitions);
+ $config.transitions =
+ Object.extend({explain: $config.data.assignEqualProbsToTransitions($config.states)},
+ $super.transitions);
- return $config;
-});
+ return $config;
+ });
diff --git a/jstests/concurrency/fsm_workloads/explain_find.js b/jstests/concurrency/fsm_workloads/explain_find.js
index acb189d24be..0712c94f483 100644
--- a/jstests/concurrency/fsm_workloads/explain_find.js
+++ b/jstests/concurrency/fsm_workloads/explain_find.js
@@ -6,61 +6,66 @@
* Runs explain() and find() on a collection.
*
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/explain.js'); // for $config
-load('jstests/libs/analyze_plan.js'); // for planHasStage and isIxscan
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/explain.js'); // for $config
+load('jstests/libs/analyze_plan.js'); // for planHasStage and isIxscan
-var $config = extendWorkload($config, function($config, $super) {
+var $config = extendWorkload(
+ $config,
+ function($config, $super) {
- $config.states = Object.extend({
- explainLimit: function explainLimit(db, collName) {
- var res = db[collName].find().limit(3).explain();
- assertAlways.commandWorked(res);
- assertAlways(planHasStage(res.queryPlanner.winningPlan, 'LIMIT'));
- },
- explainBatchSize: function explainBatchSize(db, collName) {
- var res = db[collName].find().batchSize(3).explain();
- assertAlways.commandWorked(res);
- },
- explainAddOption: function explainAddOption(db, collName) {
- var res = db[collName].explain().find().addOption(DBQuery.Option.exhaust).finish();
- assertAlways.commandWorked(res);
- },
- explainSkip: function explainSkip(db, collName) {
- var res = db[collName].explain().find().skip(3).finish();
- assertAlways.commandWorked(res);
- assertAlways(planHasStage(res.queryPlanner.winningPlan, 'SKIP'));
- },
- explainSort: function explainSort(db, collName) {
- var res = db[collName].find().sort({ i: -1 }).explain();
- assertAlways.commandWorked(res);
- assertAlways(planHasStage(res.queryPlanner.winningPlan, 'SORT'));
- },
- explainHint: function explainHint(db, collName) {
- assertWhenOwnColl(function() {
- var res = db[collName].find().hint({ j: 1 }).explain();
- assertWhenOwnColl.commandWorked(res);
- assertWhenOwnColl(isIxscan(res.queryPlanner.winningPlan));
- });
- },
- explainMaxTimeMS: function explainMaxTimeMS(db, collName) {
- var res = db[collName].find().maxTimeMS(2000).explain();
- assertAlways.commandWorked(res);
- },
- explainSnapshot: function explainSnapshot(db, collName) {
- var res = db[collName].find().snapshot().explain();
- assertAlways.commandWorked(res);
- assertWhenOwnColl(isIxscan(res.queryPlanner.winningPlan));
- }
- }, $super.states);
+ $config.states = Object.extend(
+ {
+ explainLimit: function explainLimit(db, collName) {
+ var res = db[collName].find().limit(3).explain();
+ assertAlways.commandWorked(res);
+ assertAlways(planHasStage(res.queryPlanner.winningPlan, 'LIMIT'));
+ },
+ explainBatchSize: function explainBatchSize(db, collName) {
+ var res = db[collName].find().batchSize(3).explain();
+ assertAlways.commandWorked(res);
+ },
+ explainAddOption: function explainAddOption(db, collName) {
+ var res =
+ db[collName].explain().find().addOption(DBQuery.Option.exhaust).finish();
+ assertAlways.commandWorked(res);
+ },
+ explainSkip: function explainSkip(db, collName) {
+ var res = db[collName].explain().find().skip(3).finish();
+ assertAlways.commandWorked(res);
+ assertAlways(planHasStage(res.queryPlanner.winningPlan, 'SKIP'));
+ },
+ explainSort: function explainSort(db, collName) {
+ var res = db[collName].find().sort({i: -1}).explain();
+ assertAlways.commandWorked(res);
+ assertAlways(planHasStage(res.queryPlanner.winningPlan, 'SORT'));
+ },
+ explainHint: function explainHint(db, collName) {
+ assertWhenOwnColl(function() {
+ var res = db[collName].find().hint({j: 1}).explain();
+ assertWhenOwnColl.commandWorked(res);
+ assertWhenOwnColl(isIxscan(res.queryPlanner.winningPlan));
+ });
+ },
+ explainMaxTimeMS: function explainMaxTimeMS(db, collName) {
+ var res = db[collName].find().maxTimeMS(2000).explain();
+ assertAlways.commandWorked(res);
+ },
+ explainSnapshot: function explainSnapshot(db, collName) {
+ var res = db[collName].find().snapshot().explain();
+ assertAlways.commandWorked(res);
+ assertWhenOwnColl(isIxscan(res.queryPlanner.winningPlan));
+ }
+ },
+ $super.states);
- $config.transitions = Object.extend({
- explain: $config.data.assignEqualProbsToTransitions($config.states)
- }, $super.transitions);
+ $config.transitions =
+ Object.extend({explain: $config.data.assignEqualProbsToTransitions($config.states)},
+ $super.transitions);
- // doubling number of iterations so there is a higher chance we will
- // transition to each of the 8 new states at least once
- $config.iterations = $super.iterations * 2;
+ // doubling number of iterations so there is a higher chance we will
+ // transition to each of the 8 new states at least once
+ $config.iterations = $super.iterations * 2;
- return $config;
-});
+ return $config;
+ });
diff --git a/jstests/concurrency/fsm_workloads/explain_group.js b/jstests/concurrency/fsm_workloads/explain_group.js
index d99a60d7c42..007c703c648 100644
--- a/jstests/concurrency/fsm_workloads/explain_group.js
+++ b/jstests/concurrency/fsm_workloads/explain_group.js
@@ -6,24 +6,27 @@
* Runs explain() and group() on a collection.
*
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/explain.js'); // for $config
-load('jstests/libs/analyze_plan.js'); // for planHasStage
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/explain.js'); // for $config
+load('jstests/libs/analyze_plan.js'); // for planHasStage
-var $config = extendWorkload($config, function($config, $super) {
+var $config =
+ extendWorkload($config,
+ function($config, $super) {
- $config.states = Object.extend({
- explainBasicGroup: function explainBasicGroup(db, collName) {
- var res = db[collName].explain().group(
- { key: { i: 1 }, initial: {}, reduce: function() {} }
- );
- assertAlways.commandWorked(res);
- }
- }, $super.states);
+ $config.states = Object.extend(
+ {
+ explainBasicGroup: function explainBasicGroup(db, collName) {
+ var res = db[collName].explain().group(
+ {key: {i: 1}, initial: {}, reduce: function() {}});
+ assertAlways.commandWorked(res);
+ }
+ },
+ $super.states);
- $config.transitions = Object.extend({
- explain: $config.data.assignEqualProbsToTransitions($config.states)
- }, $super.transitions);
+ $config.transitions = Object.extend(
+ {explain: $config.data.assignEqualProbsToTransitions($config.states)},
+ $super.transitions);
- return $config;
-});
+ return $config;
+ });
diff --git a/jstests/concurrency/fsm_workloads/explain_remove.js b/jstests/concurrency/fsm_workloads/explain_remove.js
index 02620a92bea..37b451994d9 100644
--- a/jstests/concurrency/fsm_workloads/explain_remove.js
+++ b/jstests/concurrency/fsm_workloads/explain_remove.js
@@ -5,39 +5,45 @@
*
* Runs explain() and remove() on a collection.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/explain.js'); // for $config
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/explain.js'); // for $config
-var $config = extendWorkload($config, function($config, $super) {
+var $config = extendWorkload(
+ $config,
+ function($config, $super) {
- $config.states = Object.extend({
- explainSingleRemove: function explainSingleRemove(db, collName) {
- var res = db[collName].explain('executionStats')
- .remove({ i: this.nInserted }, /* justOne */ true);
- assertAlways.commandWorked(res);
- assertWhenOwnColl(function() {
- assertWhenOwnColl.eq(1, res.executionStats.totalDocsExamined);
+ $config.states = Object.extend(
+ {
+ explainSingleRemove: function explainSingleRemove(db, collName) {
+ var res = db[collName]
+ .explain('executionStats')
+ .remove({i: this.nInserted}, /* justOne */ true);
+ assertAlways.commandWorked(res);
+ assertWhenOwnColl(function() {
+ assertWhenOwnColl.eq(1, res.executionStats.totalDocsExamined);
- // the document should not have been deleted.
- assertWhenOwnColl.eq(1, db[collName].find({i: this.nInserted}).itcount());
- }.bind(this));
- },
- explainMultiRemove: function explainMultiRemove(db, collName) {
- var res = db[collName].explain('executionStats')
- .remove({i: {$lte: this.nInserted / 2}});
- assertAlways.commandWorked(res);
- assertWhenOwnColl(function() {
- assertWhenOwnColl.eq(this.nInserted / 2 + 1,
- explain.executionStats.totalDocsExamined);
- // no documents should have been deleted
- assertWhenOwnColl.eq(this.nInserted, db[collName].itcount());
- }.bind(this));
- }
- }, $super.states);
+ // the document should not have been deleted.
+ assertWhenOwnColl.eq(1, db[collName].find({i: this.nInserted}).itcount());
+ }.bind(this));
+ },
+ explainMultiRemove: function explainMultiRemove(db, collName) {
+ var res = db[collName]
+ .explain('executionStats')
+ .remove({i: {$lte: this.nInserted / 2}});
+ assertAlways.commandWorked(res);
+ assertWhenOwnColl(function() {
+ assertWhenOwnColl.eq(this.nInserted / 2 + 1,
+ explain.executionStats.totalDocsExamined);
+ // no documents should have been deleted
+ assertWhenOwnColl.eq(this.nInserted, db[collName].itcount());
+ }.bind(this));
+ }
+ },
+ $super.states);
- $config.transitions = Object.extend({
- explain: $config.data.assignEqualProbsToTransitions($config.states)
- }, $super.transitions);
+ $config.transitions =
+ Object.extend({explain: $config.data.assignEqualProbsToTransitions($config.states)},
+ $super.transitions);
- return $config;
-});
+ return $config;
+ });
diff --git a/jstests/concurrency/fsm_workloads/explain_update.js b/jstests/concurrency/fsm_workloads/explain_update.js
index f72f06babb5..89876439bc2 100644
--- a/jstests/concurrency/fsm_workloads/explain_update.js
+++ b/jstests/concurrency/fsm_workloads/explain_update.js
@@ -5,63 +5,73 @@
*
* Runs explain() and update() on a collection.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/explain.js'); // for $config
-load('jstests/concurrency/fsm_workload_helpers/server_types.js'); // for isMongos
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/explain.js'); // for $config
+load('jstests/concurrency/fsm_workload_helpers/server_types.js'); // for isMongos
-var $config = extendWorkload($config, function($config, $super) {
+var $config = extendWorkload(
+ $config,
+ function($config, $super) {
- $config.states = Object.extend({
- explainBasicUpdate: function explainBasicUpdate(db, collName) {
- var res = db[collName].explain('executionStats').update({i: this.nInserted},
- {$set: {j: 49}});
- assertAlways.commandWorked(res);
- assertWhenOwnColl(function() {
- assertWhenOwnColl.eq(1, explain.executionStats.totalDocsExamined);
+ $config.states = Object.extend(
+ {
+ explainBasicUpdate: function explainBasicUpdate(db, collName) {
+ var res = db[collName]
+ .explain('executionStats')
+ .update({i: this.nInserted}, {$set: {j: 49}});
+ assertAlways.commandWorked(res);
+ assertWhenOwnColl(function() {
+ assertWhenOwnColl.eq(1, explain.executionStats.totalDocsExamined);
- // document should not have been updated.
- var doc = db[collName].findOne({ i: this.nInserted });
- assertWhenOwnColl.eq(2 * this.nInserted, doc.j);
- }.bind(this));
- },
- explainUpdateUpsert: function explainUpdateUpsert(db, collName) {
- var res = db[collName].explain('executionStats').update({i: 2 * this.nInserted + 1},
- {$set: {j: 81}},
- /* upsert */ true);
- assertAlways.commandWorked(res);
- var stage = res.executionStats.executionStages;
+ // document should not have been updated.
+ var doc = db[collName].findOne({i: this.nInserted});
+ assertWhenOwnColl.eq(2 * this.nInserted, doc.j);
+ }.bind(this));
+ },
+ explainUpdateUpsert: function explainUpdateUpsert(db, collName) {
+ var res = db[collName]
+ .explain('executionStats')
+ .update({i: 2 * this.nInserted + 1},
+ {$set: {j: 81}},
+ /* upsert */ true);
+ assertAlways.commandWorked(res);
+ var stage = res.executionStats.executionStages;
- // if explaining a write command through mongos
- if (isMongos(db)) {
- stage = stage.shards[0].executionStages;
- }
- assertAlways.eq(stage.stage, 'UPDATE');
- assertWhenOwnColl(stage.wouldInsert);
+ // if explaining a write command through mongos
+ if (isMongos(db)) {
+ stage = stage.shards[0].executionStages;
+ }
+ assertAlways.eq(stage.stage, 'UPDATE');
+ assertWhenOwnColl(stage.wouldInsert);
- // make sure that the insert didn't actually happen.
- assertWhenOwnColl.eq(this.nInserted, db[collName].find().itcount());
- },
- explainUpdateMulti: function explainUpdateMulti(db, collName) {
- var res = db[collName].explain('executionStats').update({i: {$lte: 2}}, {$set: {b: 3}},
- /* upsert */ false,
- /* multi */ true);
- assertAlways.commandWorked(res);
- var stage = res.executionStats.executionStages;
+ // make sure that the insert didn't actually happen.
+ assertWhenOwnColl.eq(this.nInserted, db[collName].find().itcount());
+ },
+ explainUpdateMulti: function explainUpdateMulti(db, collName) {
+ var res = db[collName]
+ .explain('executionStats')
+ .update({i: {$lte: 2}},
+ {$set: {b: 3}},
+ /* upsert */ false,
+ /* multi */ true);
+ assertAlways.commandWorked(res);
+ var stage = res.executionStats.executionStages;
- // if explaining a write command through mongos
- if (isMongos(db)) {
- stage = stage.shards[0].executionStages;
- }
- assertAlways.eq(stage.stage, 'UPDATE');
- assertWhenOwnColl(!stage.wouldInsert);
- assertWhenOwnColl.eq(3, stage.nMatched);
- assertWhenOwnColl.eq(3, stage.nWouldModify);
- }
- }, $super.states);
+ // if explaining a write command through mongos
+ if (isMongos(db)) {
+ stage = stage.shards[0].executionStages;
+ }
+ assertAlways.eq(stage.stage, 'UPDATE');
+ assertWhenOwnColl(!stage.wouldInsert);
+ assertWhenOwnColl.eq(3, stage.nMatched);
+ assertWhenOwnColl.eq(3, stage.nWouldModify);
+ }
+ },
+ $super.states);
- $config.transitions = Object.extend({
- explain: $config.data.assignEqualProbsToTransitions($config.states)
- }, $super.transitions);
+ $config.transitions =
+ Object.extend({explain: $config.data.assignEqualProbsToTransitions($config.states)},
+ $super.transitions);
- return $config;
-});
+ return $config;
+ });
diff --git a/jstests/concurrency/fsm_workloads/findAndModify_inc.js b/jstests/concurrency/fsm_workloads/findAndModify_inc.js
index 2c14791e8d9..cf0a50284ff 100644
--- a/jstests/concurrency/fsm_workloads/findAndModify_inc.js
+++ b/jstests/concurrency/fsm_workloads/findAndModify_inc.js
@@ -22,14 +22,13 @@ var $config = (function() {
},
update: function update(db, collName) {
- var updateDoc = { $inc: {} };
+ var updateDoc = {
+ $inc: {}
+ };
updateDoc.$inc[this.fieldName] = 1;
- var res = db.runCommand({
- findAndModify: collName,
- query: { _id: 'findAndModify_inc' },
- update: updateDoc
- });
+ var res = db.runCommand(
+ {findAndModify: collName, query: {_id: 'findAndModify_inc'}, update: updateDoc});
assertAlways.commandWorked(res);
// If the document was invalidated during a yield, then we wouldn't have modified it.
@@ -64,13 +63,13 @@ var $config = (function() {
};
var transitions = {
- init: { update: 1 },
- update: { find: 1 },
- find: { update: 1 }
+ init: {update: 1},
+ update: {find: 1},
+ find: {update: 1}
};
function setup(db, collName, cluster) {
- db[collName].insert({ _id: 'findAndModify_inc' });
+ db[collName].insert({_id: 'findAndModify_inc'});
}
return {
diff --git a/jstests/concurrency/fsm_workloads/findAndModify_remove.js b/jstests/concurrency/fsm_workloads/findAndModify_remove.js
index ea89c971ff2..b33e67b2e01 100644
--- a/jstests/concurrency/fsm_workloads/findAndModify_remove.js
+++ b/jstests/concurrency/fsm_workloads/findAndModify_remove.js
@@ -9,7 +9,7 @@
var $config = (function() {
var data = {
- shardKey: { tid: 1 }
+ shardKey: {tid: 1}
};
var states = (function() {
@@ -19,14 +19,14 @@ var $config = (function() {
}
function insertAndRemove(db, collName) {
- var res = db[collName].insert({ tid: this.tid, value: this.iter });
+ var res = db[collName].insert({tid: this.tid, value: this.iter});
assertAlways.writeOK(res);
assertAlways.eq(1, res.nInserted);
res = db.runCommand({
findandmodify: db[collName].getName(),
- query: { tid: this.tid },
- sort: { iter: -1 },
+ query: {tid: this.tid},
+ sort: {iter: -1},
remove: true
});
assertAlways.commandWorked(res);
@@ -50,8 +50,8 @@ var $config = (function() {
})();
var transitions = {
- init: { insertAndRemove: 1 },
- insertAndRemove: { insertAndRemove: 1 }
+ init: {insertAndRemove: 1},
+ insertAndRemove: {insertAndRemove: 1}
};
return {
diff --git a/jstests/concurrency/fsm_workloads/findAndModify_remove_queue.js b/jstests/concurrency/fsm_workloads/findAndModify_remove_queue.js
index 9df0e8c8636..c08fc5775aa 100644
--- a/jstests/concurrency/fsm_workloads/findAndModify_remove_queue.js
+++ b/jstests/concurrency/fsm_workloads/findAndModify_remove_queue.js
@@ -14,17 +14,22 @@ load('jstests/concurrency/fsm_workload_helpers/server_types.js'); // for isMong
var $config = (function() {
- var data = {
+ var data = {
// Use the workload name as the database name, since the workload name is assumed to be
// unique.
uniqueDBName: 'findAndModify_remove_queue',
newDocForInsert: function newDocForInsert(i) {
- return { _id: i, rand: Random.rand() };
+ return {
+ _id: i,
+ rand: Random.rand()
+ };
},
getIndexSpec: function getIndexSpec() {
- return { rand: 1 };
+ return {
+ rand: 1
+ };
},
opName: 'removed',
@@ -33,10 +38,12 @@ var $config = (function() {
// Use a separate database to avoid conflicts with other FSM workloads.
var ownedDB = db.getSiblingDB(db.getName() + this.uniqueDBName);
- var updateDoc = { $push: {} };
+ var updateDoc = {
+ $push: {}
+ };
updateDoc.$push[this.opName] = id;
- var res = ownedDB[collName].update({ _id: this.tid }, updateDoc, { upsert: true });
+ var res = ownedDB[collName].update({_id: this.tid}, updateDoc, {upsert: true});
assertAlways.writeOK(res);
assertAlways.contains(res.nMatched, [0, 1], tojson(res));
@@ -45,8 +52,7 @@ var $config = (function() {
assertAlways.eq(0, res.nModified, tojson(res));
}
assertAlways.eq(1, res.nUpserted, tojson(res));
- }
- else {
+ } else {
if (ownedDB.getMongo().writeMode() === 'commands') {
assertAlways.eq(1, res.nModified, tojson(res));
}
@@ -61,7 +67,7 @@ var $config = (function() {
var res = db.runCommand({
findAndModify: db[collName].getName(),
query: {},
- sort: { rand: -1 },
+ sort: {rand: -1},
remove: true
});
assertAlways.commandWorked(res);
@@ -86,7 +92,7 @@ var $config = (function() {
})();
var transitions = {
- remove: { remove: 1 }
+ remove: {remove: 1}
};
function setup(db, collName, cluster) {
@@ -98,8 +104,10 @@ var $config = (function() {
var doc = this.newDocForInsert(i);
// Require that documents inserted by this workload use _id values that can be compared
// using the default JS comparator.
- assertAlways.neq(typeof doc._id, 'object', 'default comparator of' +
- ' Array.prototype.sort() is not well-ordered for JS objects');
+ assertAlways.neq(typeof doc._id,
+ 'object',
+ 'default comparator of' +
+ ' Array.prototype.sort() is not well-ordered for JS objects');
bulk.insert(doc);
}
var res = bulk.execute();
@@ -152,9 +160,8 @@ var $config = (function() {
break;
}
- var msg = 'threads ' + tojson(smallest.indices) +
- ' claim to have ' + opName +
- ' a document with _id = ' + tojson(smallest.value);
+ var msg = 'threads ' + tojson(smallest.indices) + ' claim to have ' + opName +
+ ' a document with _id = ' + tojson(smallest.value);
assertWhenOwnColl.eq(1, smallest.indices.length, msg);
indices[smallest.indices[0]]++;
@@ -176,8 +183,7 @@ var $config = (function() {
smallestValueIsSet = true;
smallestValue = value;
smallestIndices = [i];
- }
- else if (value === smallestValue) {
+ } else if (value === smallestValue) {
smallestIndices.push(i);
}
}
@@ -185,7 +191,10 @@ var $config = (function() {
if (!smallestValueIsSet) {
return null;
}
- return { value: smallestValue, indices: smallestIndices };
+ return {
+ value: smallestValue,
+ indices: smallestIndices
+ };
}
}
diff --git a/jstests/concurrency/fsm_workloads/findAndModify_update.js b/jstests/concurrency/fsm_workloads/findAndModify_update.js
index 8d6c8b9b2c7..c794c755ed9 100644
--- a/jstests/concurrency/fsm_workloads/findAndModify_update.js
+++ b/jstests/concurrency/fsm_workloads/findAndModify_update.js
@@ -11,14 +11,18 @@
var $config = (function() {
var data = {
- numDocsPerThread: 3, // >1 for 'sort' to be meaningful
- shardKey: { tid: 1 }
+ numDocsPerThread: 3, // >1 for 'sort' to be meaningful
+ shardKey: {tid: 1}
};
var states = (function() {
function makeDoc(tid) {
- return { _id: new ObjectId(), tid: tid, value: 0 };
+ return {
+ _id: new ObjectId(),
+ tid: tid,
+ value: 0
+ };
}
function init(db, collName) {
@@ -34,10 +38,9 @@ var $config = (function() {
var res = db.runCommand({
findandmodify: db[collName].getName(),
- query: { tid: this.tid },
- sort: { value: 1 },
- update: { $max: { value: updatedValue } },
- new: true
+ query: {tid: this.tid},
+ sort: {value: 1},
+ update: {$max: {value: updatedValue}}, new: true
});
assertAlways.commandWorked(res);
@@ -55,10 +58,9 @@ var $config = (function() {
var res = db.runCommand({
findandmodify: db[collName].getName(),
- query: { tid: this.tid },
- sort: { value: -1 },
- update: { $min: { value: updatedValue } },
- new: true
+ query: {tid: this.tid},
+ sort: {value: -1},
+ update: {$min: {value: updatedValue}}, new: true
});
assertAlways.commandWorked(res);
@@ -80,13 +82,13 @@ var $config = (function() {
})();
var transitions = {
- init: { findAndModifyAscending: 0.5, findAndModifyDescending: 0.5 },
- findAndModifyAscending: { findAndModifyAscending: 0.5, findAndModifyDescending: 0.5 },
- findAndModifyDescending: { findAndModifyAscending: 0.5, findAndModifyDescending: 0.5 }
+ init: {findAndModifyAscending: 0.5, findAndModifyDescending: 0.5},
+ findAndModifyAscending: {findAndModifyAscending: 0.5, findAndModifyDescending: 0.5},
+ findAndModifyDescending: {findAndModifyAscending: 0.5, findAndModifyDescending: 0.5}
};
function setup(db, collName, cluster) {
- var res = db[collName].ensureIndex({ tid: 1, value: 1 });
+ var res = db[collName].ensureIndex({tid: 1, value: 1});
assertAlways.commandWorked(res);
}
diff --git a/jstests/concurrency/fsm_workloads/findAndModify_update_collscan.js b/jstests/concurrency/fsm_workloads/findAndModify_update_collscan.js
index abaf073288f..ed874f1bd81 100644
--- a/jstests/concurrency/fsm_workloads/findAndModify_update_collscan.js
+++ b/jstests/concurrency/fsm_workloads/findAndModify_update_collscan.js
@@ -10,17 +10,19 @@
*
* Attempts to force a collection scan by not creating an index.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/findAndModify_update.js'); // for $config
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/findAndModify_update.js'); // for $config
-var $config = extendWorkload($config, function($config, $super) {
+var $config = extendWorkload($config,
+ function($config, $super) {
- // Do not create the { tid: 1, value: 1 } index so that a collection
- // scan is performed for the query and sort operations.
- $config.setup = function setup(db, collName, cluster) { };
+ // Do not create the { tid: 1, value: 1 } index so that a
+ // collection
+ // scan is performed for the query and sort operations.
+ $config.setup = function setup(db, collName, cluster) {};
- // Remove the shardKey so that a collection scan is performed
- delete $config.data.shardKey;
+ // Remove the shardKey so that a collection scan is performed
+ delete $config.data.shardKey;
- return $config;
-});
+ return $config;
+ });
diff --git a/jstests/concurrency/fsm_workloads/findAndModify_update_grow.js b/jstests/concurrency/fsm_workloads/findAndModify_update_grow.js
index b1b0c0add53..277b2882700 100644
--- a/jstests/concurrency/fsm_workloads/findAndModify_update_grow.js
+++ b/jstests/concurrency/fsm_workloads/findAndModify_update_grow.js
@@ -8,12 +8,12 @@
* a document move by growing the size of the inserted document using
* the $set and $mul update operators.
*/
-load('jstests/concurrency/fsm_workload_helpers/server_types.js'); // for isMongod and isMMAPv1
+load('jstests/concurrency/fsm_workload_helpers/server_types.js'); // for isMongod and isMMAPv1
var $config = (function() {
var data = {
- shardKey: { tid: 1 },
+ shardKey: {tid: 1},
};
var states = (function() {
@@ -30,7 +30,11 @@ var $config = (function() {
function makeDoc(tid) {
// Use 32-bit integer for representing 'length' property
// to ensure $mul does integer multiplication
- var doc = { _id: new ObjectId(), tid: tid, length: new NumberInt(1) };
+ var doc = {
+ _id: new ObjectId(),
+ tid: tid,
+ length: new NumberInt(1)
+ };
doc[uniqueFieldName] = makeStringOfLength(doc.length);
return doc;
}
@@ -53,11 +57,12 @@ var $config = (function() {
}
// Get the DiskLoc of the document before its potential move
- var before = db[collName].find({ tid: this.tid })
- .showDiskLoc()
- .sort({ length: 1 }) // fetch document of smallest size
- .limit(1)
- .next();
+ var before = db[collName]
+ .find({tid: this.tid})
+ .showDiskLoc()
+ .sort({length: 1}) // fetch document of smallest size
+ .limit(1)
+ .next();
// Increase the length of the 'findAndModify_update_grow' string
// to double the size of the overall document
@@ -65,15 +70,17 @@ var $config = (function() {
var updatedLength = factor * this.length;
var updatedValue = makeStringOfLength(updatedLength);
- var update = { $set: {}, $mul: { length: factor } };
+ var update = {
+ $set: {},
+ $mul: {length: factor}
+ };
update.$set[uniqueFieldName] = updatedValue;
var res = db.runCommand({
findandmodify: db[collName].getName(),
- query: { tid: this.tid },
- sort: { length: 1 }, // fetch document of smallest size
- update: update,
- new: true
+ query: {tid: this.tid},
+ sort: {length: 1}, // fetch document of smallest size
+ update: update, new: true
});
assertAlways.commandWorked(res);
@@ -92,14 +99,14 @@ var $config = (function() {
this.bsonsize = Object.bsonsize(doc);
// Get the DiskLoc of the document after its potential move
- var after = db[collName].find({ _id: before._id }).showDiskLoc().next();
+ var after = db[collName].find({_id: before._id}).showDiskLoc().next();
if (isMongod(db) && isMMAPv1(db)) {
// Since the document has at least doubled in size, and the default
// allocation strategy of mmapv1 is to use power of two sizes, the
// document will have always moved
- assertWhenOwnColl.neq(before.$recordId, after.$recordId,
- 'document should have moved');
+ assertWhenOwnColl.neq(
+ before.$recordId, after.$recordId, 'document should have moved');
}
}
@@ -111,8 +118,8 @@ var $config = (function() {
})();
var transitions = {
- insert: { findAndModify: 1 },
- findAndModify: { findAndModify: 1 }
+ insert: {findAndModify: 1},
+ findAndModify: {findAndModify: 1}
};
return {
diff --git a/jstests/concurrency/fsm_workloads/findAndModify_update_queue.js b/jstests/concurrency/fsm_workloads/findAndModify_update_queue.js
index 0ca53015b68..8ed1a148afa 100644
--- a/jstests/concurrency/fsm_workloads/findAndModify_update_queue.js
+++ b/jstests/concurrency/fsm_workloads/findAndModify_update_queue.js
@@ -11,64 +11,76 @@
* This workload was designed to reproduce an issue similar to SERVER-18304 for update operations
* using the findAndModify command where the old version of the document is returned.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/findAndModify_remove_queue.js'); // for $config
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/findAndModify_remove_queue.js'); // for $config
load('jstests/concurrency/fsm_workload_helpers/server_types.js'); // for isMongod and isMMAPv1
-var $config = extendWorkload($config, function($config, $super) {
+var $config = extendWorkload(
+ $config,
+ function($config, $super) {
- // Use the workload name as the database name, since the workload name is assumed to be unique.
- $config.data.uniqueDBName = 'findAndModify_update_queue';
+ // Use the workload name as the database name, since the workload name is assumed to be
+ // unique.
+ $config.data.uniqueDBName = 'findAndModify_update_queue';
- $config.data.newDocForInsert = function newDocForInsert(i) {
- return { _id: i, rand: Random.rand(), counter: 0 };
- };
+ $config.data.newDocForInsert = function newDocForInsert(i) {
+ return {
+ _id: i,
+ rand: Random.rand(),
+ counter: 0
+ };
+ };
- $config.data.getIndexSpec = function getIndexSpec() {
- return { counter: 1, rand: -1 };
- };
+ $config.data.getIndexSpec = function getIndexSpec() {
+ return {
+ counter: 1,
+ rand: -1
+ };
+ };
- $config.data.opName = 'updated';
+ $config.data.opName = 'updated';
- var states = (function() {
+ var states = (function() {
- function update(db, collName) {
- // Update the counter field to avoid matching the same document again.
- var res = db.runCommand({
- findAndModify: db[collName].getName(),
- query: { counter: 0 },
- sort: { rand: -1 },
- update: { $inc: { counter: 1 } },
- new: false
- });
- assertAlways.commandWorked(res);
+ function update(db, collName) {
+ // Update the counter field to avoid matching the same document again.
+ var res = db.runCommand({
+ findAndModify: db[collName].getName(),
+ query: {counter: 0},
+ sort: {rand: -1},
+ update: {$inc: {counter: 1}}, new: false
+ });
+ assertAlways.commandWorked(res);
- var doc = res.value;
- if (isMongod(db) && !isMMAPv1(db)) {
- // MMAPv1 does not automatically retry if there was a conflict, so it is expected
- // that it may return null in the case of a conflict. All other storage engines
- // should automatically retry the operation, and thus should never return null.
- assertWhenOwnColl.neq(
- doc, null, 'findAndModify should have found and updated a matching document');
- }
- if (doc !== null) {
- this.saveDocId(db, collName, doc._id);
+ var doc = res.value;
+ if (isMongod(db) && !isMMAPv1(db)) {
+ // MMAPv1 does not automatically retry if there was a conflict, so it is
+ // expected
+ // that it may return null in the case of a conflict. All other storage engines
+ // should automatically retry the operation, and thus should never return null.
+ assertWhenOwnColl.neq(
+ doc,
+ null,
+ 'findAndModify should have found and updated a matching document');
+ }
+ if (doc !== null) {
+ this.saveDocId(db, collName, doc._id);
+ }
}
- }
- return {
- update: update
- };
+ return {
+ update: update
+ };
- })();
+ })();
- var transitions = {
- update: { update: 1 }
- };
+ var transitions = {
+ update: {update: 1}
+ };
- $config.startState = 'update';
- $config.states = states;
- $config.transitions = transitions;
+ $config.startState = 'update';
+ $config.states = states;
+ $config.transitions = transitions;
- return $config;
-});
+ return $config;
+ });
diff --git a/jstests/concurrency/fsm_workloads/findAndModify_upsert.js b/jstests/concurrency/fsm_workloads/findAndModify_upsert.js
index a1073106ab0..499e8324cae 100644
--- a/jstests/concurrency/fsm_workloads/findAndModify_upsert.js
+++ b/jstests/concurrency/fsm_workloads/findAndModify_upsert.js
@@ -12,7 +12,7 @@ var $config = (function() {
var data = {
sort: false,
- shardKey: { tid: 1 }
+ shardKey: {tid: 1}
};
var states = (function() {
@@ -41,13 +41,15 @@ var $config = (function() {
var updatedValue = this.iter++;
// Use a query specification that does not match any existing documents
- var query = { _id: new ObjectId(), tid: this.tid };
+ var query = {
+ _id: new ObjectId(),
+ tid: this.tid
+ };
var cmdObj = {
findandmodify: db[collName].getName(),
query: query,
- update: { $setOnInsert: { values: [updatedValue] } },
- new: true,
+ update: {$setOnInsert: {values: [updatedValue]}}, new: true,
upsert: true
};
@@ -74,9 +76,8 @@ var $config = (function() {
var cmdObj = {
findandmodify: db[collName].getName(),
- query: { tid: this.tid },
- update: { $push: { values: updatedValue } },
- new: true,
+ query: {tid: this.tid},
+ update: {$push: {values: updatedValue}}, new: true,
upsert: false
};
@@ -111,9 +112,9 @@ var $config = (function() {
})();
var transitions = {
- init: { upsert: 0.1, update: 0.9 },
- upsert: { upsert: 0.1, update: 0.9 },
- update: { upsert: 0.1, update: 0.9 }
+ init: {upsert: 0.1, update: 0.9},
+ upsert: {upsert: 0.1, update: 0.9},
+ update: {upsert: 0.1, update: 0.9}
};
return {
diff --git a/jstests/concurrency/fsm_workloads/findAndModify_upsert_collscan.js b/jstests/concurrency/fsm_workloads/findAndModify_upsert_collscan.js
index 200de213235..e9cca5d6d8f 100644
--- a/jstests/concurrency/fsm_workloads/findAndModify_upsert_collscan.js
+++ b/jstests/concurrency/fsm_workloads/findAndModify_upsert_collscan.js
@@ -10,12 +10,15 @@
*
* Forces 'sort' to perform a collection scan by using $natural.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/findAndModify_upsert.js'); // for $config
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/findAndModify_upsert.js'); // for $config
-var $config = extendWorkload($config, function($config, $super) {
+var $config = extendWorkload($config,
+ function($config, $super) {
- $config.data.sort = { $natural: 1 };
+ $config.data.sort = {
+ $natural: 1
+ };
- return $config;
-});
+ return $config;
+ });
diff --git a/jstests/concurrency/fsm_workloads/group.js b/jstests/concurrency/fsm_workloads/group.js
index 34bde848e00..3ccc909e0c9 100644
--- a/jstests/concurrency/fsm_workloads/group.js
+++ b/jstests/concurrency/fsm_workloads/group.js
@@ -18,10 +18,12 @@ var $config = (function() {
return {
group: {
ns: collName,
- initial: { bucketCount: 0, bucketSum: 0},
+ initial: {bucketCount: 0, bucketSum: 0},
$keyf: function $keyf(doc) {
// place doc.rand into appropriate bucket
- return { bucket: Math.floor(doc.rand * 10) + 1 };
+ return {
+ bucket: Math.floor(doc.rand * 10) + 1
+ };
},
$reduce: function $reduce(curr, result) {
result.bucketCount++;
@@ -41,7 +43,7 @@ var $config = (function() {
}, 0);
}
- var data = {
+ var data = {
numDocs: 1000,
generateGroupCmdObj: generateGroupCmdObj,
sumBucketCount: sumBucketCount
@@ -68,13 +70,13 @@ var $config = (function() {
})();
var transitions = {
- group: { group: 1 }
+ group: {group: 1}
};
function setup(db, collName, cluster) {
var bulk = db[collName].initializeUnorderedBulkOp();
for (var i = 0; i < this.numDocs; ++i) {
- bulk.insert({ rand: Random.rand() });
+ bulk.insert({rand: Random.rand()});
}
var res = bulk.execute();
assertAlways.writeOK(res);
diff --git a/jstests/concurrency/fsm_workloads/group_cond.js b/jstests/concurrency/fsm_workloads/group_cond.js
index 7344b781d31..226b9a9afad 100644
--- a/jstests/concurrency/fsm_workloads/group_cond.js
+++ b/jstests/concurrency/fsm_workloads/group_cond.js
@@ -13,28 +13,32 @@
*
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/group.js'); // for $config
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/group.js'); // for $config
-var $config = extendWorkload($config, function($config, $super) {
- $config.setup = function setup(db, collName, cluster) {
- $super.setup.apply(this, arguments);
- assertAlways.commandWorked(db[collName].ensureIndex({ rand: 1 }));
- };
+var $config =
+ extendWorkload($config,
+ function($config, $super) {
+ $config.setup = function setup(db, collName, cluster) {
+ $super.setup.apply(this, arguments);
+ assertAlways.commandWorked(db[collName].ensureIndex({rand: 1}));
+ };
- $config.states.group = function group(db, collName) {
- var cmdObj = this.generateGroupCmdObj(collName);
- cmdObj.group.cond = { rand: { $gte: 0.5 } };
- var res = db.runCommand(cmdObj);
- assertWhenOwnColl.commandWorked(res);
+ $config.states.group = function group(db, collName) {
+ var cmdObj = this.generateGroupCmdObj(collName);
+ cmdObj.group.cond = {
+ rand: {$gte: 0.5}
+ };
+ var res = db.runCommand(cmdObj);
+ assertWhenOwnColl.commandWorked(res);
- assertWhenOwnColl.lte(res.count, this.numDocs);
- assertWhenOwnColl.lte(res.keys, 5);
- assertWhenOwnColl(function() {
- assertWhenOwnColl.lte(res.retval.length, 5);
- assertWhenOwnColl.eq(this.sumBucketCount(res.retval), res.count);
- }.bind(this));
- };
+ assertWhenOwnColl.lte(res.count, this.numDocs);
+ assertWhenOwnColl.lte(res.keys, 5);
+ assertWhenOwnColl(function() {
+ assertWhenOwnColl.lte(res.retval.length, 5);
+ assertWhenOwnColl.eq(this.sumBucketCount(res.retval), res.count);
+ }.bind(this));
+ };
- return $config;
-});
+ return $config;
+ });
diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_1char.js b/jstests/concurrency/fsm_workloads/indexed_insert_1char.js
index 6df68323dd1..3d90da7470a 100644
--- a/jstests/concurrency/fsm_workloads/indexed_insert_1char.js
+++ b/jstests/concurrency/fsm_workloads/indexed_insert_1char.js
@@ -7,20 +7,21 @@
* documents appear in both a collection scan and an index scan. The indexed
* value is a 1-character string based on the thread's id.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/indexed_insert_base.js'); // for $config
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/indexed_insert_base.js'); // for $config
-var $config = extendWorkload($config, function($config, $super) {
+var $config = extendWorkload($config,
+ function($config, $super) {
- $config.data.indexedField = 'indexed_insert_1char';
- $config.data.shardKey = {};
- $config.data.shardKey[$config.data.indexedField] = 1;
+ $config.data.indexedField = 'indexed_insert_1char';
+ $config.data.shardKey = {};
+ $config.data.shardKey[$config.data.indexedField] = 1;
- $config.states.init = function init(db, collName) {
- $super.states.init.apply(this, arguments);
+ $config.states.init = function init(db, collName) {
+ $super.states.init.apply(this, arguments);
- this.indexedValue = String.fromCharCode(33 + this.tid);
- };
+ this.indexedValue = String.fromCharCode(33 + this.tid);
+ };
- return $config;
-});
+ return $config;
+ });
diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_1char_noindex.js b/jstests/concurrency/fsm_workloads/indexed_insert_1char_noindex.js
index 2a66590fb31..cdbba38b172 100644
--- a/jstests/concurrency/fsm_workloads/indexed_insert_1char_noindex.js
+++ b/jstests/concurrency/fsm_workloads/indexed_insert_1char_noindex.js
@@ -5,8 +5,8 @@
*
* Executes the indexed_insert_1char.js workload after dropping its index.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/indexed_insert_1char.js'); // for $config
-load('jstests/concurrency/fsm_workload_modifiers/indexed_noindex.js'); // for indexedNoindex
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/indexed_insert_1char.js'); // for $config
+load('jstests/concurrency/fsm_workload_modifiers/indexed_noindex.js'); // for indexedNoindex
var $config = extendWorkload($config, indexedNoindex);
diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_2d.js b/jstests/concurrency/fsm_workloads/indexed_insert_2d.js
index 3192aa185cc..c8abb257745 100644
--- a/jstests/concurrency/fsm_workloads/indexed_insert_2d.js
+++ b/jstests/concurrency/fsm_workloads/indexed_insert_2d.js
@@ -7,49 +7,51 @@
* appear in both a collection scan and an index scan. The indexed value is a
* legacy coordinate pair, indexed with a 2d index.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/indexed_insert_base.js'); // for $config
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/indexed_insert_base.js'); // for $config
-var $config = extendWorkload($config, function($config, $super) {
+var $config = extendWorkload(
+ $config,
+ function($config, $super) {
- $config.data.indexedField = 'indexed_insert_2d';
- // Remove the shard key for 2d indexes, as they are not supported
- delete $config.data.shardKey;
+ $config.data.indexedField = 'indexed_insert_2d';
+ // Remove the shard key for 2d indexes, as they are not supported
+ delete $config.data.shardKey;
- $config.states.init = function init(db, collName) {
- $super.states.init.apply(this, arguments);
+ $config.states.init = function init(db, collName) {
+ $super.states.init.apply(this, arguments);
- assertAlways.lt(this.tid, 1 << 16); // assume tid is a 16 bit nonnegative int
- // split the tid into the odd bits and the even bits
- // for example:
- // tid: 57 = 00111001
- // even: 0 1 0 1 = 5
- // odd: 0 1 1 0 = 6
- // This lets us turn every tid into a unique pair of numbers within the range [0, 255].
- // The pairs are then normalized to have valid longitude and latitude values.
- var oddBits = 0;
- var evenBits = 0;
- for (var i = 0; i < 16; ++i) {
- if (this.tid & 1 << i) {
- if (i % 2 === 0) {
- // i is even
- evenBits |= 1 << (i / 2);
- } else {
- // i is odd
- oddBits |= 1 << (i / 2);
+ assertAlways.lt(this.tid, 1 << 16); // assume tid is a 16 bit nonnegative int
+ // split the tid into the odd bits and the even bits
+ // for example:
+ // tid: 57 = 00111001
+ // even: 0 1 0 1 = 5
+ // odd: 0 1 1 0 = 6
+ // This lets us turn every tid into a unique pair of numbers within the range [0, 255].
+ // The pairs are then normalized to have valid longitude and latitude values.
+ var oddBits = 0;
+ var evenBits = 0;
+ for (var i = 0; i < 16; ++i) {
+ if (this.tid & 1 << i) {
+ if (i % 2 === 0) {
+ // i is even
+ evenBits |= 1 << (i / 2);
+ } else {
+ // i is odd
+ oddBits |= 1 << (i / 2);
+ }
}
}
- }
- assertAlways.lt(oddBits, 256);
- assertAlways.lt(evenBits, 256);
- this.indexedValue = [(evenBits - 128) / 2, (oddBits - 128) / 2];
- };
+ assertAlways.lt(oddBits, 256);
+ assertAlways.lt(evenBits, 256);
+ this.indexedValue = [(evenBits - 128) / 2, (oddBits - 128) / 2];
+ };
- $config.data.getIndexSpec = function getIndexSpec() {
- var ixSpec = {};
- ixSpec[this.indexedField] = '2d';
- return ixSpec;
- };
+ $config.data.getIndexSpec = function getIndexSpec() {
+ var ixSpec = {};
+ ixSpec[this.indexedField] = '2d';
+ return ixSpec;
+ };
- return $config;
-});
+ return $config;
+ });
diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_2dsphere.js b/jstests/concurrency/fsm_workloads/indexed_insert_2dsphere.js
index 0271c223049..6c8fd86c104 100644
--- a/jstests/concurrency/fsm_workloads/indexed_insert_2dsphere.js
+++ b/jstests/concurrency/fsm_workloads/indexed_insert_2dsphere.js
@@ -7,18 +7,19 @@
* appear in both a collection scan and an index scan. The indexed value is a
* legacy coordinate pair, indexed with a 2dsphere index.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/indexed_insert_2d.js'); // for $config
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/indexed_insert_2d.js'); // for $config
-var $config = extendWorkload($config, function($config, $super) {
+var $config = extendWorkload($config,
+ function($config, $super) {
- $config.data.indexedField = 'indexed_insert_2dsphere';
+ $config.data.indexedField = 'indexed_insert_2dsphere';
- $config.data.getIndexSpec = function getIndexSpec() {
- var ixSpec = {};
- ixSpec[this.indexedField] = '2dsphere';
- return ixSpec;
- };
+ $config.data.getIndexSpec = function getIndexSpec() {
+ var ixSpec = {};
+ ixSpec[this.indexedField] = '2dsphere';
+ return ixSpec;
+ };
- return $config;
-});
+ return $config;
+ });
diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_base.js b/jstests/concurrency/fsm_workloads/indexed_insert_base.js
index 07e5e287518..59dcab4f0a0 100644
--- a/jstests/concurrency/fsm_workloads/indexed_insert_base.js
+++ b/jstests/concurrency/fsm_workloads/indexed_insert_base.js
@@ -18,7 +18,7 @@ var $config = (function() {
}
var order = ixSpec[field];
- if (order !== 1 && order !== -1) { // e.g. '2d' or '2dsphere'
+ if (order !== 1 && order !== -1) { // e.g. '2d' or '2dsphere'
order = 1;
}
@@ -43,14 +43,16 @@ var $config = (function() {
find: function find(db, collName) {
// collection scan
- var count = db[collName].find(this.getDoc()).sort({ $natural: 1 }).itcount();
+ var count = db[collName].find(this.getDoc()).sort({$natural: 1}).itcount();
assertWhenOwnColl.eq(count, this.nInserted);
// Use hint() to force an index scan, but only when an appropriate index exists.
// We can only use hint() when the index exists and we know that the collection
// is not being potentially modified by other workloads.
var ownColl = false;
- assertWhenOwnColl(function() { ownColl = true; });
+ assertWhenOwnColl(function() {
+ ownColl = true;
+ });
if (this.indexExists && ownColl) {
count = db[collName].find(this.getDoc()).hint(this.getIndexSpec()).itcount();
assertWhenOwnColl.eq(count, this.nInserted);
@@ -68,9 +70,9 @@ var $config = (function() {
};
var transitions = {
- init: { insert: 1 },
- insert: { find: 1 },
- find: { insert: 1 }
+ init: {insert: 1},
+ insert: {find: 1},
+ find: {insert: 1}
};
function setup(db, collName, cluster) {
@@ -96,7 +98,7 @@ var $config = (function() {
return doc;
},
indexedField: 'x',
- shardKey: { x: 1 },
+ shardKey: {x: 1},
docsPerInsert: 1
},
setup: setup
diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_base_capped.js b/jstests/concurrency/fsm_workloads/indexed_insert_base_capped.js
index a18fd00d9e0..bdffdce6c8c 100644
--- a/jstests/concurrency/fsm_workloads/indexed_insert_base_capped.js
+++ b/jstests/concurrency/fsm_workloads/indexed_insert_base_capped.js
@@ -5,9 +5,9 @@
*
* Executes the indexed_insert_base.js workload on a capped collection.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/indexed_insert_base.js'); // for $config
-load('jstests/concurrency/fsm_workload_modifiers/make_capped.js'); // for makeCapped
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/indexed_insert_base.js'); // for $config
+load('jstests/concurrency/fsm_workload_modifiers/make_capped.js'); // for makeCapped
var $config = extendWorkload($config, makeCapped);
diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_base_noindex.js b/jstests/concurrency/fsm_workloads/indexed_insert_base_noindex.js
index e58ff22a5b3..aaa3b2e0e07 100644
--- a/jstests/concurrency/fsm_workloads/indexed_insert_base_noindex.js
+++ b/jstests/concurrency/fsm_workloads/indexed_insert_base_noindex.js
@@ -5,8 +5,8 @@
*
* Executes the indexed_insert_base.js workload after dropping its index.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/indexed_insert_base.js'); // for $config
-load('jstests/concurrency/fsm_workload_modifiers/indexed_noindex.js'); // for indexedNoindex
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/indexed_insert_base.js'); // for $config
+load('jstests/concurrency/fsm_workload_modifiers/indexed_noindex.js'); // for indexedNoindex
var $config = extendWorkload($config, indexedNoindex);
diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_compound.js b/jstests/concurrency/fsm_workloads/indexed_insert_compound.js
index e57c7a62c1f..fe9641502b0 100644
--- a/jstests/concurrency/fsm_workloads/indexed_insert_compound.js
+++ b/jstests/concurrency/fsm_workloads/indexed_insert_compound.js
@@ -7,32 +7,34 @@
* appear in both a collection scan and an index scan. The collection is indexed
* with a compound index on three different fields.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/indexed_insert_base.js'); // for $config
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/indexed_insert_base.js'); // for $config
-var $config = extendWorkload($config, function($config, $super) {
+var $config =
+ extendWorkload($config,
+ function($config, $super) {
- $config.states.init = function init(db, collName) {
- $super.states.init.apply(this, arguments);
- };
+ $config.states.init = function init(db, collName) {
+ $super.states.init.apply(this, arguments);
+ };
- $config.data.getDoc = function getDoc() {
- return {
- indexed_insert_compound_x: this.tid & 0x0f, // lowest 4 bits
- indexed_insert_compound_y: this.tid >> 4, // high bits
- indexed_insert_compound_z: String.fromCharCode(33 + this.tid)
- };
- };
+ $config.data.getDoc = function getDoc() {
+ return {
+ indexed_insert_compound_x: this.tid & 0x0f, // lowest 4 bits
+ indexed_insert_compound_y: this.tid >> 4, // high bits
+ indexed_insert_compound_z: String.fromCharCode(33 + this.tid)
+ };
+ };
- $config.data.getIndexSpec = function getIndexSpec() {
- return {
- indexed_insert_compound_x: 1,
- indexed_insert_compound_y: 1,
- indexed_insert_compound_z: 1
- };
- };
+ $config.data.getIndexSpec = function getIndexSpec() {
+ return {
+ indexed_insert_compound_x: 1,
+ indexed_insert_compound_y: 1,
+ indexed_insert_compound_z: 1
+ };
+ };
- $config.data.shardKey = $config.data.getIndexSpec();
+ $config.data.shardKey = $config.data.getIndexSpec();
- return $config;
-});
+ return $config;
+ });
diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_eval.js b/jstests/concurrency/fsm_workloads/indexed_insert_eval.js
index 1d877f6d88b..a7a4797efef 100644
--- a/jstests/concurrency/fsm_workloads/indexed_insert_eval.js
+++ b/jstests/concurrency/fsm_workloads/indexed_insert_eval.js
@@ -7,27 +7,29 @@
* Asserts that all documents appear in both a collection scan and an index
* scan. The indexed value is the thread id.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/indexed_insert_base.js'); // for $config
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/indexed_insert_base.js'); // for $config
-var $config = extendWorkload($config, function($config, $super) {
+var $config =
+ extendWorkload($config,
+ function($config, $super) {
- $config.data.nolock = false;
+ $config.data.nolock = false;
- $config.states.insert = function insert(db, collName) {
- var evalResult = db.runCommand({
- eval: function(collName, doc) {
- var insertResult = db[collName].insert(doc);
- return tojson(insertResult);
- },
- args: [collName, this.getDoc()],
- nolock: this.nolock
- });
- assertAlways.commandWorked(evalResult);
- var insertResult = JSON.parse(evalResult.retval);
- assertAlways.eq(1, insertResult.nInserted, tojson(insertResult));
- this.nInserted += this.docsPerInsert;
- };
+ $config.states.insert = function insert(db, collName) {
+ var evalResult = db.runCommand({
+ eval: function(collName, doc) {
+ var insertResult = db[collName].insert(doc);
+ return tojson(insertResult);
+ },
+ args: [collName, this.getDoc()],
+ nolock: this.nolock
+ });
+ assertAlways.commandWorked(evalResult);
+ var insertResult = JSON.parse(evalResult.retval);
+ assertAlways.eq(1, insertResult.nInserted, tojson(insertResult));
+ this.nInserted += this.docsPerInsert;
+ };
- return $config;
-});
+ return $config;
+ });
diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_eval_nolock.js b/jstests/concurrency/fsm_workloads/indexed_insert_eval_nolock.js
index d7a28961711..d1d2727c0d7 100644
--- a/jstests/concurrency/fsm_workloads/indexed_insert_eval_nolock.js
+++ b/jstests/concurrency/fsm_workloads/indexed_insert_eval_nolock.js
@@ -7,12 +7,13 @@
* with the option { nolock: true }. Asserts that all documents appear in both a
* collection scan and an index scan. The indexed value is the thread id.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/indexed_insert_eval.js'); // for $config
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/indexed_insert_eval.js'); // for $config
-var $config = extendWorkload($config, function($config, $super) {
+var $config = extendWorkload($config,
+ function($config, $super) {
- $config.data.nolock = true;
+ $config.data.nolock = true;
- return $config;
-});
+ return $config;
+ });
diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_heterogeneous.js b/jstests/concurrency/fsm_workloads/indexed_insert_heterogeneous.js
index 11fee4a4061..c34b986bb7b 100644
--- a/jstests/concurrency/fsm_workloads/indexed_insert_heterogeneous.js
+++ b/jstests/concurrency/fsm_workloads/indexed_insert_heterogeneous.js
@@ -7,49 +7,51 @@
* documents appear in both a collection scan and an index scan. The indexed
* value is a different BSON type, depending on the thread's id.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/indexed_insert_base.js'); // for $config
-
-var $config = extendWorkload($config, function($config, $super) {
-
- $config.data.indexedField = 'indexed_insert_heterogeneous';
- $config.data.shardKey = {};
- $config.data.shardKey[$config.data.indexedField] = 1;
-
- $config.states.init = function init(db, collName) {
- $super.states.init.apply(this, arguments);
-
- // prefix str with zeroes to make it have length len
- function pad(len, str) {
- var padding = new Array(len + 1).join('0');
- return (padding + str).slice(-len);
- }
-
- function makeOID(tid) {
- var str = pad(24, tid.toString(16));
- return new ObjectId(str);
- }
-
- function makeDate(tid) {
- var d = new ISODate('2000-01-01T00:00:00.000Z');
- // setSeconds(n) where n >= 60 will just cause the minutes, hours, etc to increase,
- // so this produces a unique date for each tid
- d.setSeconds(tid);
- return d;
- }
-
- var choices = [
- this.tid, // int
- this.tid.toString(), // string
- this.tid * 0.0001, // float
- { tid: this.tid }, // subdocument
- makeOID(this.tid), // objectid
- makeDate(this.tid), // date
- new Function('', 'return ' + this.tid + ';') // function
- ];
-
- this.indexedValue = choices[this.tid % choices.length];
- };
-
- return $config;
-});
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/indexed_insert_base.js'); // for $config
+
+var $config = extendWorkload($config,
+ function($config, $super) {
+
+ $config.data.indexedField = 'indexed_insert_heterogeneous';
+ $config.data.shardKey = {};
+ $config.data.shardKey[$config.data.indexedField] = 1;
+
+ $config.states.init = function init(db, collName) {
+ $super.states.init.apply(this, arguments);
+
+ // prefix str with zeroes to make it have length len
+ function pad(len, str) {
+ var padding = new Array(len + 1).join('0');
+ return (padding + str).slice(-len);
+ }
+
+ function makeOID(tid) {
+ var str = pad(24, tid.toString(16));
+ return new ObjectId(str);
+ }
+
+ function makeDate(tid) {
+ var d = new ISODate('2000-01-01T00:00:00.000Z');
+ // setSeconds(n) where n >= 60 will just cause the minutes,
+ // hours, etc to increase,
+ // so this produces a unique date for each tid
+ d.setSeconds(tid);
+ return d;
+ }
+
+ var choices = [
+ this.tid, // int
+ this.tid.toString(), // string
+ this.tid * 0.0001, // float
+ {tid: this.tid}, // subdocument
+ makeOID(this.tid), // objectid
+ makeDate(this.tid), // date
+ new Function('', 'return ' + this.tid + ';') // function
+ ];
+
+ this.indexedValue = choices[this.tid % choices.length];
+ };
+
+ return $config;
+ });
diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_heterogeneous_noindex.js b/jstests/concurrency/fsm_workloads/indexed_insert_heterogeneous_noindex.js
index 99a3e080ef5..56aac8ff2ca 100644
--- a/jstests/concurrency/fsm_workloads/indexed_insert_heterogeneous_noindex.js
+++ b/jstests/concurrency/fsm_workloads/indexed_insert_heterogeneous_noindex.js
@@ -5,8 +5,8 @@
*
* Executes the indexed_insert_heterogeneous.js workload after dropping its index.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/indexed_insert_heterogeneous.js'); // for $config
-load('jstests/concurrency/fsm_workload_modifiers/indexed_noindex.js'); // for indexedNoindex
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/indexed_insert_heterogeneous.js'); // for $config
+load('jstests/concurrency/fsm_workload_modifiers/indexed_noindex.js'); // for indexedNoindex
var $config = extendWorkload($config, indexedNoindex);
diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_large.js b/jstests/concurrency/fsm_workloads/indexed_insert_large.js
index b997dda4978..50317368aa6 100644
--- a/jstests/concurrency/fsm_workloads/indexed_insert_large.js
+++ b/jstests/concurrency/fsm_workloads/indexed_insert_large.js
@@ -8,36 +8,41 @@
* value is a string large enough to make the whole index key be 1K, which is
* the maximum.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/indexed_insert_base.js'); // for $config
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/indexed_insert_base.js'); // for $config
-var $config = extendWorkload($config, function($config, $super) {
+var $config = extendWorkload(
+ $config,
+ function($config, $super) {
- $config.data.indexedField = 'indexed_insert_large';
+ $config.data.indexedField = 'indexed_insert_large';
- // Remove the shard key, since it cannot be greater than 512 bytes
- delete $config.data.shardKey;
+ // Remove the shard key, since it cannot be greater than 512 bytes
+ delete $config.data.shardKey;
- $config.states.init = function init(db, collName) {
- $super.states.init.apply(this, arguments);
+ $config.states.init = function init(db, collName) {
+ $super.states.init.apply(this, arguments);
- // "The total size of an index entry, which can include structural overhead depending on the
- // BSON type, must be less than 1024 bytes."
- // http://docs.mongodb.org/manual/reference/limits/
- var maxIndexedSize = 1023;
+ // "The total size of an index entry, which can include structural overhead depending on
+ // the
+ // BSON type, must be less than 1024 bytes."
+ // http://docs.mongodb.org/manual/reference/limits/
+ var maxIndexedSize = 1023;
- var bsonOverhead = Object.bsonsize({ '': '' });
+ var bsonOverhead = Object.bsonsize({'': ''});
- var bigstr = new Array(maxIndexedSize + 1).join('x');
+ var bigstr = new Array(maxIndexedSize + 1).join('x');
- // prefix the big string with tid to make it unique,
- // then trim it down so that it plus bson overhead is maxIndexedSize
+ // prefix the big string with tid to make it unique,
+ // then trim it down so that it plus bson overhead is maxIndexedSize
- this.indexedValue = (this.tid + bigstr).slice(0, maxIndexedSize - bsonOverhead);
+ this.indexedValue = (this.tid + bigstr).slice(0, maxIndexedSize - bsonOverhead);
- assertAlways.eq(maxIndexedSize, Object.bsonsize({ '': this.indexedValue }),
- 'buggy test: the inserted docs will not have the expected index-key size');
- };
+ assertAlways.eq(
+ maxIndexedSize,
+ Object.bsonsize({'': this.indexedValue}),
+ 'buggy test: the inserted docs will not have the expected index-key size');
+ };
- return $config;
-});
+ return $config;
+ });
diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_large_noindex.js b/jstests/concurrency/fsm_workloads/indexed_insert_large_noindex.js
index 893c1484a71..98c75cab734 100644
--- a/jstests/concurrency/fsm_workloads/indexed_insert_large_noindex.js
+++ b/jstests/concurrency/fsm_workloads/indexed_insert_large_noindex.js
@@ -5,8 +5,8 @@
*
* Executes the indexed_insert_large.js workload after dropping its index.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/indexed_insert_large.js'); // for $config
-load('jstests/concurrency/fsm_workload_modifiers/indexed_noindex.js'); // for indexedNoindex
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/indexed_insert_large.js'); // for $config
+load('jstests/concurrency/fsm_workload_modifiers/indexed_noindex.js'); // for indexedNoindex
var $config = extendWorkload($config, indexedNoindex);
diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_long_fieldname.js b/jstests/concurrency/fsm_workloads/indexed_insert_long_fieldname.js
index ea92ebd5d0c..47867362aac 100644
--- a/jstests/concurrency/fsm_workloads/indexed_insert_long_fieldname.js
+++ b/jstests/concurrency/fsm_workloads/indexed_insert_long_fieldname.js
@@ -7,17 +7,19 @@
* documents appear in both a collection scan and an index scan. The indexed
* field name is a long string.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/indexed_insert_base.js'); // for $config
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/indexed_insert_base.js'); // for $config
-var $config = extendWorkload($config, function($config, $super) {
+var $config = extendWorkload($config,
+ function($config, $super) {
- // TODO: make this field name even longer?
- var length = 100;
- var prefix = 'indexed_insert_long_fieldname_';
- $config.data.indexedField = prefix + new Array(length - prefix.length + 1).join('x');
- $config.data.shardKey = {};
- $config.data.shardKey[$config.data.indexedField] = 1;
+ // TODO: make this field name even longer?
+ var length = 100;
+ var prefix = 'indexed_insert_long_fieldname_';
+ $config.data.indexedField =
+ prefix + new Array(length - prefix.length + 1).join('x');
+ $config.data.shardKey = {};
+ $config.data.shardKey[$config.data.indexedField] = 1;
- return $config;
-});
+ return $config;
+ });
diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_long_fieldname_noindex.js b/jstests/concurrency/fsm_workloads/indexed_insert_long_fieldname_noindex.js
index 74ab2e12786..4466d57efd0 100644
--- a/jstests/concurrency/fsm_workloads/indexed_insert_long_fieldname_noindex.js
+++ b/jstests/concurrency/fsm_workloads/indexed_insert_long_fieldname_noindex.js
@@ -5,8 +5,8 @@
*
* Executes the indexed_insert_long_fieldname.js workload after dropping its index.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/indexed_insert_long_fieldname.js'); // for $config
-load('jstests/concurrency/fsm_workload_modifiers/indexed_noindex.js'); // for indexedNoindex
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/indexed_insert_long_fieldname.js'); // for $config
+load('jstests/concurrency/fsm_workload_modifiers/indexed_noindex.js'); // for indexedNoindex
var $config = extendWorkload($config, indexedNoindex);
diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_multikey.js b/jstests/concurrency/fsm_workloads/indexed_insert_multikey.js
index 5e80c0ae2cd..bff99ae85c7 100644
--- a/jstests/concurrency/fsm_workloads/indexed_insert_multikey.js
+++ b/jstests/concurrency/fsm_workloads/indexed_insert_multikey.js
@@ -7,22 +7,24 @@
* documents appear in both a collection scan and an index scan. The indexed
* value is an array of numbers.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/indexed_insert_base.js'); // for $config
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/indexed_insert_base.js'); // for $config
-var $config = extendWorkload($config, function($config, $super) {
+var $config =
+ extendWorkload($config,
+ function($config, $super) {
- $config.data.indexedField = 'indexed_insert_multikey';
- // Remove the shard key, since it cannot be a multikey index
- delete $config.data.shardKey;
+ $config.data.indexedField = 'indexed_insert_multikey';
+ // Remove the shard key, since it cannot be a multikey index
+ delete $config.data.shardKey;
- $config.states.init = function init(db, collName) {
- $super.states.init.apply(this, arguments);
+ $config.states.init = function init(db, collName) {
+ $super.states.init.apply(this, arguments);
- this.indexedValue = [0,1,2,3,4,5,6,7,8,9].map(function(n) {
- return this.tid * 10 + n;
- }.bind(this));
- };
+ this.indexedValue = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9].map(function(n) {
+ return this.tid * 10 + n;
+ }.bind(this));
+ };
- return $config;
-});
+ return $config;
+ });
diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_multikey_noindex.js b/jstests/concurrency/fsm_workloads/indexed_insert_multikey_noindex.js
index 50fd8b8d4e9..9f8e491d2da 100644
--- a/jstests/concurrency/fsm_workloads/indexed_insert_multikey_noindex.js
+++ b/jstests/concurrency/fsm_workloads/indexed_insert_multikey_noindex.js
@@ -5,8 +5,8 @@
*
* Executes the indexed_insert_multikey.js workload after dropping its index.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/indexed_insert_multikey.js'); // for $config
-load('jstests/concurrency/fsm_workload_modifiers/indexed_noindex.js'); // for indexedNoindex
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/indexed_insert_multikey.js'); // for $config
+load('jstests/concurrency/fsm_workload_modifiers/indexed_noindex.js'); // for indexedNoindex
var $config = extendWorkload($config, indexedNoindex);
diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_ordered_bulk.js b/jstests/concurrency/fsm_workloads/indexed_insert_ordered_bulk.js
index 1355d2158c9..17ffec0bb40 100644
--- a/jstests/concurrency/fsm_workloads/indexed_insert_ordered_bulk.js
+++ b/jstests/concurrency/fsm_workloads/indexed_insert_ordered_bulk.js
@@ -8,31 +8,33 @@
*
* Uses an ordered, bulk operation to perform the inserts.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/indexed_insert_base.js'); // for $config
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/indexed_insert_base.js'); // for $config
-var $config = extendWorkload($config, function($config, $super) {
+var $config =
+ extendWorkload($config,
+ function($config, $super) {
- $config.data.indexedField = 'indexed_insert_ordered_bulk';
- $config.data.shardKey = {};
- $config.data.shardKey[$config.data.indexedField] = 1;
+ $config.data.indexedField = 'indexed_insert_ordered_bulk';
+ $config.data.shardKey = {};
+ $config.data.shardKey[$config.data.indexedField] = 1;
- $config.states.insert = function insert(db, collName) {
- var doc = {};
- doc[this.indexedField] = this.indexedValue;
+ $config.states.insert = function insert(db, collName) {
+ var doc = {};
+ doc[this.indexedField] = this.indexedValue;
- var bulk = db[collName].initializeOrderedBulkOp();
- for (var i = 0; i < this.docsPerInsert; ++i) {
- bulk.insert(doc);
- }
- var res = bulk.execute();
- assertAlways.writeOK(res);
- assertAlways.eq(this.docsPerInsert, res.nInserted, tojson(res));
+ var bulk = db[collName].initializeOrderedBulkOp();
+ for (var i = 0; i < this.docsPerInsert; ++i) {
+ bulk.insert(doc);
+ }
+ var res = bulk.execute();
+ assertAlways.writeOK(res);
+ assertAlways.eq(this.docsPerInsert, res.nInserted, tojson(res));
- this.nInserted += this.docsPerInsert;
- };
+ this.nInserted += this.docsPerInsert;
+ };
- $config.data.docsPerInsert = 15;
+ $config.data.docsPerInsert = 15;
- return $config;
-});
+ return $config;
+ });
diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_text.js b/jstests/concurrency/fsm_workloads/indexed_insert_text.js
index b73373b1090..ab38d07098f 100644
--- a/jstests/concurrency/fsm_workloads/indexed_insert_text.js
+++ b/jstests/concurrency/fsm_workloads/indexed_insert_text.js
@@ -28,13 +28,13 @@ var $config = (function() {
if (Array.isArray(snippet)) {
snippet = snippet.join(' ');
}
- assertWhenOwnColl.gt(db[collName].find({ $text: { $search: snippet } }).itcount(), 0);
+ assertWhenOwnColl.gt(db[collName].find({$text: {$search: snippet}}).itcount(), 0);
}
};
var transitions = {
- init: { insert: 1 },
- insert: { insert: 1 }
+ init: {insert: 1},
+ insert: {insert: 1}
};
function setup(db, collName, cluster) {
diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_text_multikey.js b/jstests/concurrency/fsm_workloads/indexed_insert_text_multikey.js
index c372e3d5f51..bacaff869e4 100644
--- a/jstests/concurrency/fsm_workloads/indexed_insert_text_multikey.js
+++ b/jstests/concurrency/fsm_workloads/indexed_insert_text_multikey.js
@@ -5,29 +5,33 @@
*
* like indexed_insert_text.js but the indexed value is an array of strings
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/indexed_insert_text.js'); // for $config
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/indexed_insert_text.js'); // for $config
-var $config = extendWorkload($config, function($config, $super) {
+var $config =
+ extendWorkload($config,
+ function($config, $super) {
- $config.states.init = function init(db, collName) {
- $super.states.init.apply(this, arguments);
- };
+ $config.states.init = function init(db, collName) {
+ $super.states.init.apply(this, arguments);
+ };
- $config.data.getRandomTextSnippet = function getRandomTextSnippet() {
- var len = Random.randInt(5) + 1; // ensure we always add some text, not just empty array
- var textArr = [];
- for (var i = 0; i < len; ++i) {
- textArr.push($super.data.getRandomTextSnippet.call(this, arguments));
- }
- return textArr;
- };
+ $config.data.getRandomTextSnippet = function getRandomTextSnippet() {
+ var len = Random.randInt(5) +
+ 1; // ensure we always add some text, not just empty array
+ var textArr = [];
+ for (var i = 0; i < len; ++i) {
+ textArr.push($super.data.getRandomTextSnippet.call(this, arguments));
+ }
+ return textArr;
+ };
- // SERVER-21291: Reduce the thread count to alleviate PV1 failovers on Windows DEBUG hosts.
- $config.threadCount = 5;
+ // SERVER-21291: Reduce the thread count to alleviate PV1 failovers on
+ // Windows DEBUG hosts.
+ $config.threadCount = 5;
- // Remove the shard key, since it cannot be a multikey index
- delete $config.data.shardKey;
+ // Remove the shard key, since it cannot be a multikey index
+ delete $config.data.shardKey;
- return $config;
-});
+ return $config;
+ });
diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_ttl.js b/jstests/concurrency/fsm_workloads/indexed_insert_ttl.js
index fbdbb9c523e..90aa6d3baf7 100644
--- a/jstests/concurrency/fsm_workloads/indexed_insert_ttl.js
+++ b/jstests/concurrency/fsm_workloads/indexed_insert_ttl.js
@@ -12,27 +12,26 @@ var $config = (function() {
var states = {
init: function init(db, collName) {
- var res = db[collName].insert({ indexed_insert_ttl: new ISODate(), first: true });
+ var res = db[collName].insert({indexed_insert_ttl: new ISODate(), first: true});
assertAlways.writeOK(res);
assertWhenOwnColl.eq(1, res.nInserted, tojson(res));
},
insert: function insert(db, collName) {
- var res = db[collName].insert({ indexed_insert_ttl: new ISODate() });
+ var res = db[collName].insert({indexed_insert_ttl: new ISODate()});
assertAlways.writeOK(res);
assertWhenOwnColl.eq(1, res.nInserted, tojson(res));
}
};
var transitions = {
- init: { insert: 1 },
- insert: { insert: 1 }
+ init: {insert: 1},
+ insert: {insert: 1}
};
function setup(db, collName, cluster) {
- var res = db[collName].ensureIndex(
- { indexed_insert_ttl: 1 },
- { expireAfterSeconds: this.ttlSeconds });
+ var res = db[collName].ensureIndex({indexed_insert_ttl: 1},
+ {expireAfterSeconds: this.ttlSeconds});
assertAlways.commandWorked(res);
}
@@ -48,7 +47,7 @@ var $config = (function() {
assertWhenOwnColl.soon(function checkTTLCount() {
// All initial documents should be removed by the end of the workload.
- var count = db[collName].find({ first: true }).itcount();
+ var count = db[collName].find({first: true}).itcount();
return count === 0;
}, 'Expected oldest documents with TTL fields to be removed', timeoutMS);
}
@@ -59,10 +58,7 @@ var $config = (function() {
states: states,
transitions: transitions,
setup: setup,
- data: {
- ttlSeconds: 5,
- ttlIndexExists: true
- },
+ data: {ttlSeconds: 5, ttlIndexExists: true},
teardown: teardown
};
})();
diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_unordered_bulk.js b/jstests/concurrency/fsm_workloads/indexed_insert_unordered_bulk.js
index caf278d6066..f1d00d7cf64 100644
--- a/jstests/concurrency/fsm_workloads/indexed_insert_unordered_bulk.js
+++ b/jstests/concurrency/fsm_workloads/indexed_insert_unordered_bulk.js
@@ -8,31 +8,33 @@
*
* Uses an unordered, bulk operation to perform the inserts.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/indexed_insert_base.js'); // for $config
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/indexed_insert_base.js'); // for $config
-var $config = extendWorkload($config, function($config, $super) {
+var $config =
+ extendWorkload($config,
+ function($config, $super) {
- $config.data.indexedField = 'indexed_insert_unordered_bulk';
- $config.data.shardKey = {};
- $config.data.shardKey[$config.data.indexedField] = 1;
+ $config.data.indexedField = 'indexed_insert_unordered_bulk';
+ $config.data.shardKey = {};
+ $config.data.shardKey[$config.data.indexedField] = 1;
- $config.states.insert = function insert(db, collName) {
- var doc = {};
- doc[this.indexedField] = this.indexedValue;
+ $config.states.insert = function insert(db, collName) {
+ var doc = {};
+ doc[this.indexedField] = this.indexedValue;
- var bulk = db[collName].initializeUnorderedBulkOp();
- for (var i = 0; i < this.docsPerInsert; ++i) {
- bulk.insert(doc);
- }
- var res = bulk.execute();
- assertAlways.writeOK(res);
- assertAlways.eq(this.docsPerInsert, res.nInserted, tojson(res));
+ var bulk = db[collName].initializeUnorderedBulkOp();
+ for (var i = 0; i < this.docsPerInsert; ++i) {
+ bulk.insert(doc);
+ }
+ var res = bulk.execute();
+ assertAlways.writeOK(res);
+ assertAlways.eq(this.docsPerInsert, res.nInserted, tojson(res));
- this.nInserted += this.docsPerInsert;
- };
+ this.nInserted += this.docsPerInsert;
+ };
- $config.data.docsPerInsert = 15;
+ $config.data.docsPerInsert = 15;
- return $config;
-});
+ return $config;
+ });
diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_upsert.js b/jstests/concurrency/fsm_workloads/indexed_insert_upsert.js
index 1f2fd0adedc..a3d0bd2c8cd 100644
--- a/jstests/concurrency/fsm_workloads/indexed_insert_upsert.js
+++ b/jstests/concurrency/fsm_workloads/indexed_insert_upsert.js
@@ -10,35 +10,37 @@
* Instead of inserting via coll.insert(), this workload inserts using an
* upsert.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/indexed_insert_base.js'); // for $config
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/indexed_insert_base.js'); // for $config
-var $config = extendWorkload($config, function($config, $super) {
+var $config = extendWorkload(
+ $config,
+ function($config, $super) {
- $config.data.indexedField = 'indexed_insert_upsert';
- $config.data.shardKey = {};
- $config.data.shardKey[$config.data.indexedField] = 1;
+ $config.data.indexedField = 'indexed_insert_upsert';
+ $config.data.shardKey = {};
+ $config.data.shardKey[$config.data.indexedField] = 1;
- $config.states.init = function init(db, collName) {
- $super.states.init.apply(this, arguments);
+ $config.states.init = function init(db, collName) {
+ $super.states.init.apply(this, arguments);
- this.counter = 0;
- };
+ this.counter = 0;
+ };
- $config.states.insert = function insert(db, collName) {
- var doc = this.getDoc();
- doc.counter = this.counter++; // ensure doc is unique to guarantee an upsert occurs
- doc._id = new ObjectId(); // _id is required for shard targeting
+ $config.states.insert = function insert(db, collName) {
+ var doc = this.getDoc();
+ doc.counter = this.counter++; // ensure doc is unique to guarantee an upsert occurs
+ doc._id = new ObjectId(); // _id is required for shard targeting
- var res = db[collName].update(doc, { $inc: { unused: 0 } }, { upsert: true });
- assertAlways.eq(0, res.nMatched, tojson(res));
- assertAlways.eq(1, res.nUpserted, tojson(res));
- if (db.getMongo().writeMode() === 'commands') {
- assertAlways.eq(0, res.nModified, tojson(res));
- }
+ var res = db[collName].update(doc, {$inc: {unused: 0}}, {upsert: true});
+ assertAlways.eq(0, res.nMatched, tojson(res));
+ assertAlways.eq(1, res.nUpserted, tojson(res));
+ if (db.getMongo().writeMode() === 'commands') {
+ assertAlways.eq(0, res.nModified, tojson(res));
+ }
- this.nInserted += this.docsPerInsert;
- };
+ this.nInserted += this.docsPerInsert;
+ };
- return $config;
-});
+ return $config;
+ });
diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_where.js b/jstests/concurrency/fsm_workloads/indexed_insert_where.js
index 055ad1b574c..14408c26f69 100644
--- a/jstests/concurrency/fsm_workloads/indexed_insert_where.js
+++ b/jstests/concurrency/fsm_workloads/indexed_insert_where.js
@@ -15,9 +15,11 @@ var $config = (function() {
documentsToInsert: 100,
insertedDocuments: 0,
generateDocumentToInsert: function generateDocumentToInsert() {
- return { tid: this.tid };
+ return {
+ tid: this.tid
+ };
},
- shardKey: { tid: 1 }
+ shardKey: {tid: 1}
};
var states = {
@@ -33,20 +35,21 @@ var $config = (function() {
},
query: function query(db, collName) {
- var count = db[collName].find({ $where: 'this.tid === ' + this.tid }).itcount();
- assertWhenOwnColl.eq(count, this.insertedDocuments,
+ var count = db[collName].find({$where: 'this.tid === ' + this.tid}).itcount();
+ assertWhenOwnColl.eq(count,
+ this.insertedDocuments,
'$where query should return the number of documents this ' +
- 'thread inserted');
+ 'thread inserted');
}
};
var transitions = {
- insert: { insert: 0.2, query: 0.8 },
- query: { insert: 0.8, query: 0.2 }
+ insert: {insert: 0.2, query: 0.8},
+ query: {insert: 0.8, query: 0.2}
};
var setup = function setup(db, collName, cluster) {
- assertAlways.commandWorked(db[collName].ensureIndex({ tid: 1 }));
+ assertAlways.commandWorked(db[collName].ensureIndex({tid: 1}));
};
return {
diff --git a/jstests/concurrency/fsm_workloads/list_indexes.js b/jstests/concurrency/fsm_workloads/list_indexes.js
index 6ab3a8c28b9..6bcdb8ba96c 100644
--- a/jstests/concurrency/fsm_workloads/list_indexes.js
+++ b/jstests/concurrency/fsm_workloads/list_indexes.js
@@ -21,9 +21,8 @@ var $config = (function() {
// List indexes, using a batchSize of 2 to ensure getmores happen.
function listIndices(db, collName) {
- var cursor = new DBCommandCursor(db.getMongo(),
- db.runCommand({listIndexes: collName,
- cursor: {batchSize: 2}}));
+ var cursor = new DBCommandCursor(
+ db.getMongo(), db.runCommand({listIndexes: collName, cursor: {batchSize: 2}}));
assertWhenOwnColl.gte(cursor.itcount(), 0);
}
@@ -34,8 +33,8 @@ var $config = (function() {
})();
var transitions = {
- modifyIndices: { listIndices: 0.75, modifyIndices: 0.25 },
- listIndices: { listIndices: 0.25, modifyIndices: 0.75 }
+ modifyIndices: {listIndices: 0.75, modifyIndices: 0.25},
+ listIndices: {listIndices: 0.25, modifyIndices: 0.75}
};
function setup(db, collName) {
diff --git a/jstests/concurrency/fsm_workloads/map_reduce_drop.js b/jstests/concurrency/fsm_workloads/map_reduce_drop.js
index 9ebdbab9ae3..ef03805dffd 100644
--- a/jstests/concurrency/fsm_workloads/map_reduce_drop.js
+++ b/jstests/concurrency/fsm_workloads/map_reduce_drop.js
@@ -60,7 +60,7 @@ var $config = (function() {
// iterations and threads in this workload.
var bulk = mapReduceDB[collName].initializeUnorderedBulkOp();
for (var i = 0; i < this.numDocs; ++i) {
- bulk.insert({ key: Random.randInt(10000) });
+ bulk.insert({key: Random.randInt(10000)});
}
var res = bulk.execute();
assertAlways.writeOK(res);
@@ -74,8 +74,7 @@ var $config = (function() {
try {
mapReduceDB[collName].mapReduce(this.mapper, this.reducer, options);
- }
- catch (e) {
+ } catch (e) {
// Ignore all mapReduce exceptions. This workload is only concerned
// with verifying server availability.
}
@@ -90,9 +89,9 @@ var $config = (function() {
})();
var transitions = {
- dropColl: { mapReduce: 1 },
- dropDB: { mapReduce: 1 },
- mapReduce: { mapReduce: 0.7, dropDB: 0.05, dropColl: 0.25 }
+ dropColl: {mapReduce: 1},
+ dropDB: {mapReduce: 1},
+ mapReduce: {mapReduce: 0.7, dropDB: 0.05, dropColl: 0.25}
};
function teardown(db, collName, cluster) {
diff --git a/jstests/concurrency/fsm_workloads/map_reduce_inline.js b/jstests/concurrency/fsm_workloads/map_reduce_inline.js
index 278d9e95f25..1633ce0cc19 100644
--- a/jstests/concurrency/fsm_workloads/map_reduce_inline.js
+++ b/jstests/concurrency/fsm_workloads/map_reduce_inline.js
@@ -55,7 +55,7 @@ var $config = (function() {
function mapReduce(db, collName) {
var options = {
finalize: this.finalizer,
- out: { inline: 1 }
+ out: {inline: 1}
};
var res = db[collName].mapReduce(this.mapper, this.reducer, options);
@@ -70,8 +70,8 @@ var $config = (function() {
})();
var transitions = {
- init: { mapReduce: 1 },
- mapReduce: { mapReduce: 1 }
+ init: {mapReduce: 1},
+ mapReduce: {mapReduce: 1}
};
function makeDoc(keyLimit, valueLimit) {
diff --git a/jstests/concurrency/fsm_workloads/map_reduce_merge.js b/jstests/concurrency/fsm_workloads/map_reduce_merge.js
index 7f83a924bce..fd892dc72d9 100644
--- a/jstests/concurrency/fsm_workloads/map_reduce_merge.js
+++ b/jstests/concurrency/fsm_workloads/map_reduce_merge.js
@@ -13,53 +13,52 @@
*
* Writes the results of each thread to the same collection.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/map_reduce_inline.js'); // for $config
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/map_reduce_inline.js'); // for $config
-var $config = extendWorkload($config, function($config, $super) {
+var $config =
+ extendWorkload($config,
+ function($config, $super) {
- // Use the workload name as the database name,
- // since the workload name is assumed to be unique.
- var uniqueDBName = 'map_reduce_merge';
+ // Use the workload name as the database name,
+ // since the workload name is assumed to be unique.
+ var uniqueDBName = 'map_reduce_merge';
- $config.states.init = function init(db, collName) {
- $super.states.init.apply(this, arguments);
+ $config.states.init = function init(db, collName) {
+ $super.states.init.apply(this, arguments);
- this.outDBName = db.getName() + uniqueDBName;
- };
+ this.outDBName = db.getName() + uniqueDBName;
+ };
- $config.states.mapReduce = function mapReduce(db, collName) {
- var outDB = db.getSiblingDB(this.outDBName);
- var fullName = outDB[collName].getFullName();
- assertAlways(outDB[collName].exists() !== null,
- "output collection '" + fullName + "' should exist");
+ $config.states.mapReduce = function mapReduce(db, collName) {
+ var outDB = db.getSiblingDB(this.outDBName);
+ var fullName = outDB[collName].getFullName();
+ assertAlways(outDB[collName].exists() !== null,
+ "output collection '" + fullName + "' should exist");
- // Have all threads combine their results into the same collection
- var options = {
- finalize: this.finalizer,
- out: {
- merge: collName,
- db: this.outDBName
- }
- };
+ // Have all threads combine their results into the same collection
+ var options = {
+ finalize: this.finalizer,
+ out: {merge: collName, db: this.outDBName}
+ };
- var res = db[collName].mapReduce(this.mapper, this.reducer, options);
- assertAlways.commandWorked(res);
- };
+ var res = db[collName].mapReduce(this.mapper, this.reducer, options);
+ assertAlways.commandWorked(res);
+ };
- $config.setup = function setup(db, collName, cluster) {
- $super.setup.apply(this, arguments);
+ $config.setup = function setup(db, collName, cluster) {
+ $super.setup.apply(this, arguments);
- var outDB = db.getSiblingDB(db.getName() + uniqueDBName);
- assertAlways.commandWorked(outDB.createCollection(collName));
- };
+ var outDB = db.getSiblingDB(db.getName() + uniqueDBName);
+ assertAlways.commandWorked(outDB.createCollection(collName));
+ };
- $config.teardown = function teardown(db, collName, cluster) {
- var outDB = db.getSiblingDB(db.getName() + uniqueDBName);
- var res = outDB.dropDatabase();
- assertAlways.commandWorked(res);
- assertAlways.eq(db.getName() + uniqueDBName, res.dropped);
- };
+ $config.teardown = function teardown(db, collName, cluster) {
+ var outDB = db.getSiblingDB(db.getName() + uniqueDBName);
+ var res = outDB.dropDatabase();
+ assertAlways.commandWorked(res);
+ assertAlways.eq(db.getName() + uniqueDBName, res.dropped);
+ };
- return $config;
-});
+ return $config;
+ });
diff --git a/jstests/concurrency/fsm_workloads/map_reduce_merge_nonatomic.js b/jstests/concurrency/fsm_workloads/map_reduce_merge_nonatomic.js
index fd8d2c1136c..49897e20548 100644
--- a/jstests/concurrency/fsm_workloads/map_reduce_merge_nonatomic.js
+++ b/jstests/concurrency/fsm_workloads/map_reduce_merge_nonatomic.js
@@ -13,51 +13,49 @@
*
* Specifies nonAtomic=true.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/map_reduce_inline.js'); // for $config
-load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropDatabases
-
-var $config = extendWorkload($config, function($config, $super) {
-
- // Use the workload name as a prefix for the database name,
- // since the workload name is assumed to be unique.
- var prefix = 'map_reduce_merge_nonatomic';
-
- function uniqueDBName(prefix, tid) {
- return prefix + tid;
- }
-
- $config.states.init = function init(db, collName) {
- $super.states.init.apply(this, arguments);
-
- this.outDBName = db.getName() + uniqueDBName(prefix, this.tid);
- var outDB = db.getSiblingDB(this.outDBName);
- assertAlways.commandWorked(outDB.createCollection(collName));
- };
-
- $config.states.mapReduce = function mapReduce(db, collName) {
- var outDB = db.getSiblingDB(this.outDBName);
- var fullName = outDB[collName].getFullName();
- assertAlways(outDB[collName].exists() !== null,
- "output collection '" + fullName + "' should exist");
-
- var options = {
- finalize: this.finalizer,
- out: {
- merge: collName,
- db: this.outDBName,
- nonAtomic: true
- }
- };
-
- var res = db[collName].mapReduce(this.mapper, this.reducer, options);
- assertAlways.commandWorked(res);
- };
-
- $config.teardown = function teardown(db, collName, cluster) {
- var pattern = new RegExp('^' + db.getName() + prefix + '\\d+$');
- dropDatabases(db, pattern);
- };
-
- return $config;
-});
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/map_reduce_inline.js'); // for $config
+load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropDatabases
+
+var $config =
+ extendWorkload($config,
+ function($config, $super) {
+
+ // Use the workload name as a prefix for the database name,
+ // since the workload name is assumed to be unique.
+ var prefix = 'map_reduce_merge_nonatomic';
+
+ function uniqueDBName(prefix, tid) {
+ return prefix + tid;
+ }
+
+ $config.states.init = function init(db, collName) {
+ $super.states.init.apply(this, arguments);
+
+ this.outDBName = db.getName() + uniqueDBName(prefix, this.tid);
+ var outDB = db.getSiblingDB(this.outDBName);
+ assertAlways.commandWorked(outDB.createCollection(collName));
+ };
+
+ $config.states.mapReduce = function mapReduce(db, collName) {
+ var outDB = db.getSiblingDB(this.outDBName);
+ var fullName = outDB[collName].getFullName();
+ assertAlways(outDB[collName].exists() !== null,
+ "output collection '" + fullName + "' should exist");
+
+ var options = {
+ finalize: this.finalizer,
+ out: {merge: collName, db: this.outDBName, nonAtomic: true}
+ };
+
+ var res = db[collName].mapReduce(this.mapper, this.reducer, options);
+ assertAlways.commandWorked(res);
+ };
+
+ $config.teardown = function teardown(db, collName, cluster) {
+ var pattern = new RegExp('^' + db.getName() + prefix + '\\d+$');
+ dropDatabases(db, pattern);
+ };
+
+ return $config;
+ });
diff --git a/jstests/concurrency/fsm_workloads/map_reduce_reduce.js b/jstests/concurrency/fsm_workloads/map_reduce_reduce.js
index 53b1246f4c6..7f6ff6d535f 100644
--- a/jstests/concurrency/fsm_workloads/map_reduce_reduce.js
+++ b/jstests/concurrency/fsm_workloads/map_reduce_reduce.js
@@ -11,45 +11,47 @@
* Uses the "reduce" action to combine the results with the contents
* of the output collection.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/map_reduce_inline.js'); // for $config
-load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropCollections
-
-var $config = extendWorkload($config, function($config, $super) {
-
- // Use the workload name as a prefix for the collection name,
- // since the workload name is assumed to be unique.
- var prefix = 'map_reduce_reduce';
-
- function uniqueCollectionName(prefix, tid) {
- return prefix + tid;
- }
-
- $config.states.init = function init(db, collName) {
- $super.states.init.apply(this, arguments);
-
- this.outCollName = uniqueCollectionName(prefix, this.tid);
- assertAlways.commandWorked(db.createCollection(this.outCollName));
- };
-
- $config.states.mapReduce = function mapReduce(db, collName) {
- var fullName = db[this.outCollName].getFullName();
- assertAlways(db[this.outCollName].exists() !== null,
- "output collection '" + fullName + "' should exist");
-
- var options = {
- finalize: this.finalizer,
- out: { reduce: this.outCollName }
- };
-
- var res = db[collName].mapReduce(this.mapper, this.reducer, options);
- assertAlways.commandWorked(res);
- };
-
- $config.teardown = function teardown(db, collName, cluster) {
- var pattern = new RegExp('^' + prefix + '\\d+$');
- dropCollections(db, pattern);
- };
-
- return $config;
-});
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/map_reduce_inline.js'); // for $config
+load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropCollections
+
+var $config =
+ extendWorkload($config,
+ function($config, $super) {
+
+ // Use the workload name as a prefix for the collection name,
+ // since the workload name is assumed to be unique.
+ var prefix = 'map_reduce_reduce';
+
+ function uniqueCollectionName(prefix, tid) {
+ return prefix + tid;
+ }
+
+ $config.states.init = function init(db, collName) {
+ $super.states.init.apply(this, arguments);
+
+ this.outCollName = uniqueCollectionName(prefix, this.tid);
+ assertAlways.commandWorked(db.createCollection(this.outCollName));
+ };
+
+ $config.states.mapReduce = function mapReduce(db, collName) {
+ var fullName = db[this.outCollName].getFullName();
+ assertAlways(db[this.outCollName].exists() !== null,
+ "output collection '" + fullName + "' should exist");
+
+ var options = {
+ finalize: this.finalizer,
+ out: {reduce: this.outCollName}
+ };
+
+ var res = db[collName].mapReduce(this.mapper, this.reducer, options);
+ assertAlways.commandWorked(res);
+ };
+
+ $config.teardown = function teardown(db, collName, cluster) {
+ var pattern = new RegExp('^' + prefix + '\\d+$');
+ dropCollections(db, pattern);
+ };
+
+ return $config;
+ });
diff --git a/jstests/concurrency/fsm_workloads/map_reduce_reduce_nonatomic.js b/jstests/concurrency/fsm_workloads/map_reduce_reduce_nonatomic.js
index cb0eeb1948a..b566f9db39f 100644
--- a/jstests/concurrency/fsm_workloads/map_reduce_reduce_nonatomic.js
+++ b/jstests/concurrency/fsm_workloads/map_reduce_reduce_nonatomic.js
@@ -14,48 +14,47 @@
* Specifies nonAtomic=true and writes the results of each thread to
* the same collection.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/map_reduce_inline.js'); // for $config
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/map_reduce_inline.js'); // for $config
-var $config = extendWorkload($config, function($config, $super) {
+var $config =
+ extendWorkload($config,
+ function($config, $super) {
- // Use the workload name as the collection name,
- // since the workload name is assumed to be unique.
- var uniqueCollectionName = 'map_reduce_reduce_nonatomic';
+ // Use the workload name as the collection name,
+ // since the workload name is assumed to be unique.
+ var uniqueCollectionName = 'map_reduce_reduce_nonatomic';
- $config.states.init = function init(db, collName) {
- $super.states.init.apply(this, arguments);
+ $config.states.init = function init(db, collName) {
+ $super.states.init.apply(this, arguments);
- this.outCollName = uniqueCollectionName;
- };
+ this.outCollName = uniqueCollectionName;
+ };
- $config.states.mapReduce = function mapReduce(db, collName) {
- var fullName = db[this.outCollName].getFullName();
- assertAlways(db[this.outCollName].exists() !== null,
- "output collection '" + fullName + "' should exist");
+ $config.states.mapReduce = function mapReduce(db, collName) {
+ var fullName = db[this.outCollName].getFullName();
+ assertAlways(db[this.outCollName].exists() !== null,
+ "output collection '" + fullName + "' should exist");
- // Have all threads combine their results into the same collection
- var options = {
- finalize: this.finalizer,
- out: {
- reduce: this.outCollName,
- nonAtomic: true
- }
- };
+ // Have all threads combine their results into the same collection
+ var options = {
+ finalize: this.finalizer,
+ out: {reduce: this.outCollName, nonAtomic: true}
+ };
- var res = db[collName].mapReduce(this.mapper, this.reducer, options);
- assertAlways.commandWorked(res);
- };
+ var res = db[collName].mapReduce(this.mapper, this.reducer, options);
+ assertAlways.commandWorked(res);
+ };
- $config.setup = function setup(db, collName, cluster) {
- $super.setup.apply(this, arguments);
+ $config.setup = function setup(db, collName, cluster) {
+ $super.setup.apply(this, arguments);
- assertAlways.commandWorked(db.createCollection(uniqueCollectionName));
- };
+ assertAlways.commandWorked(db.createCollection(uniqueCollectionName));
+ };
- $config.teardown = function teardown(db, collName, cluster) {
- assertAlways(db[uniqueCollectionName].drop());
- };
+ $config.teardown = function teardown(db, collName, cluster) {
+ assertAlways(db[uniqueCollectionName].drop());
+ };
- return $config;
-});
+ return $config;
+ });
diff --git a/jstests/concurrency/fsm_workloads/map_reduce_replace.js b/jstests/concurrency/fsm_workloads/map_reduce_replace.js
index 186caf5a41e..4f22bd225b2 100644
--- a/jstests/concurrency/fsm_workloads/map_reduce_replace.js
+++ b/jstests/concurrency/fsm_workloads/map_reduce_replace.js
@@ -11,47 +11,49 @@
* Uses the "replace" action to overwrite the entire contents of the
* collection.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/map_reduce_inline.js'); // for $config
-load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropCollections
-
-var $config = extendWorkload($config, function($config, $super) {
-
- // Use the workload name as a prefix for the collection name,
- // since the workload name is assumed to be unique.
- var prefix = 'map_reduce_replace';
-
- function uniqueCollectionName(prefix, tid) {
- return prefix + tid;
- }
-
- $config.states.init = function init(db, collName) {
- $super.states.init.apply(this, arguments);
-
- this.outCollName = uniqueCollectionName(prefix, this.tid);
- assertAlways.commandWorked(db.createCollection(this.outCollName));
- };
-
- $config.states.mapReduce = function mapReduce(db, collName) {
- var fullName = db[this.outCollName].getFullName();
- assertAlways(db[this.outCollName].exists() !== null,
- "output collection '" + fullName + "' should exist");
-
- var options = {
- finalize: this.finalizer,
- out: { replace: this.outCollName },
- query: { key: { $exists: true }, value: { $exists: true } },
- sort: { _id: -1 } // sort key must be an existing index
- };
-
- var res = db[collName].mapReduce(this.mapper, this.reducer, options);
- assertAlways.commandWorked(res);
- };
-
- $config.teardown = function teardown(db, collName, cluster) {
- var pattern = new RegExp('^' + prefix + '\\d+$');
- dropCollections(db, pattern);
- };
-
- return $config;
-});
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/map_reduce_inline.js'); // for $config
+load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropCollections
+
+var $config =
+ extendWorkload($config,
+ function($config, $super) {
+
+ // Use the workload name as a prefix for the collection name,
+ // since the workload name is assumed to be unique.
+ var prefix = 'map_reduce_replace';
+
+ function uniqueCollectionName(prefix, tid) {
+ return prefix + tid;
+ }
+
+ $config.states.init = function init(db, collName) {
+ $super.states.init.apply(this, arguments);
+
+ this.outCollName = uniqueCollectionName(prefix, this.tid);
+ assertAlways.commandWorked(db.createCollection(this.outCollName));
+ };
+
+ $config.states.mapReduce = function mapReduce(db, collName) {
+ var fullName = db[this.outCollName].getFullName();
+ assertAlways(db[this.outCollName].exists() !== null,
+ "output collection '" + fullName + "' should exist");
+
+ var options = {
+ finalize: this.finalizer,
+ out: {replace: this.outCollName},
+ query: {key: {$exists: true}, value: {$exists: true}},
+ sort: {_id: -1} // sort key must be an existing index
+ };
+
+ var res = db[collName].mapReduce(this.mapper, this.reducer, options);
+ assertAlways.commandWorked(res);
+ };
+
+ $config.teardown = function teardown(db, collName, cluster) {
+ var pattern = new RegExp('^' + prefix + '\\d+$');
+ dropCollections(db, pattern);
+ };
+
+ return $config;
+ });
diff --git a/jstests/concurrency/fsm_workloads/map_reduce_replace_nonexistent.js b/jstests/concurrency/fsm_workloads/map_reduce_replace_nonexistent.js
index 1cd6e18fbef..3ee8af21409 100644
--- a/jstests/concurrency/fsm_workloads/map_reduce_replace_nonexistent.js
+++ b/jstests/concurrency/fsm_workloads/map_reduce_replace_nonexistent.js
@@ -10,41 +10,43 @@
* Uses the "replace" action to write the results to a nonexistent
* output collection.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/map_reduce_inline.js'); // for $config
-load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropCollections
-
-var $config = extendWorkload($config, function($config, $super) {
-
- // Use the workload name as a prefix for the collection name,
- // since the workload name is assumed to be unique.
- var prefix = 'map_reduce_replace_nonexistent';
-
- function uniqueCollectionName(prefix, tid) {
- return prefix + tid;
- }
-
- $config.states.mapReduce = function mapReduce(db, collName) {
- var outCollName = uniqueCollectionName(prefix, this.tid);
- var fullName = db[outCollName].getFullName();
- assertAlways.isnull(db[outCollName].exists(),
- "output collection '" + fullName + "' should not exist");
-
- var options = {
- finalize: this.finalizer,
- out: { replace: outCollName },
- query: { key: { $exists: true }, value: { $exists: true } }
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/map_reduce_inline.js'); // for $config
+load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropCollections
+
+var $config = extendWorkload(
+ $config,
+ function($config, $super) {
+
+ // Use the workload name as a prefix for the collection name,
+ // since the workload name is assumed to be unique.
+ var prefix = 'map_reduce_replace_nonexistent';
+
+ function uniqueCollectionName(prefix, tid) {
+ return prefix + tid;
+ }
+
+ $config.states.mapReduce = function mapReduce(db, collName) {
+ var outCollName = uniqueCollectionName(prefix, this.tid);
+ var fullName = db[outCollName].getFullName();
+ assertAlways.isnull(db[outCollName].exists(),
+ "output collection '" + fullName + "' should not exist");
+
+ var options = {
+ finalize: this.finalizer,
+ out: {replace: outCollName},
+ query: {key: {$exists: true}, value: {$exists: true}}
+ };
+
+ var res = db[collName].mapReduce(this.mapper, this.reducer, options);
+ assertAlways.commandWorked(res);
+ assertAlways(db[outCollName].drop());
};
- var res = db[collName].mapReduce(this.mapper, this.reducer, options);
- assertAlways.commandWorked(res);
- assertAlways(db[outCollName].drop());
- };
-
- $config.teardown = function teardown(db, collName, cluster) {
- var pattern = new RegExp('^' + prefix + '\\d+$');
- dropCollections(db, pattern);
- };
+ $config.teardown = function teardown(db, collName, cluster) {
+ var pattern = new RegExp('^' + prefix + '\\d+$');
+ dropCollections(db, pattern);
+ };
- return $config;
-});
+ return $config;
+ });
diff --git a/jstests/concurrency/fsm_workloads/plan_cache_drop_database.js b/jstests/concurrency/fsm_workloads/plan_cache_drop_database.js
index f75b7e3c77f..abd1312b7c3 100644
--- a/jstests/concurrency/fsm_workloads/plan_cache_drop_database.js
+++ b/jstests/concurrency/fsm_workloads/plan_cache_drop_database.js
@@ -23,7 +23,7 @@ var $config = (function() {
var bulk = coll.initializeUnorderedBulkOp();
for (var i = 0; i < 1000; ++i) {
- bulk.insert({ a: 1, b: Random.rand() });
+ bulk.insert({a: 1, b: Random.rand()});
}
var res = bulk.execute();
assertAlways.writeOK(res);
@@ -31,8 +31,8 @@ var $config = (function() {
// Create two indexes to force plan caching: The {a: 1} index is
// cached by the query planner because we query on a single value
// of 'a' and a range of 'b' values.
- assertAlways.commandWorked(coll.ensureIndex({ a: 1 }));
- assertAlways.commandWorked(coll.ensureIndex({ b: 1 }));
+ assertAlways.commandWorked(coll.ensureIndex({a: 1}));
+ assertAlways.commandWorked(coll.ensureIndex({b: 1}));
}
var states = (function() {
@@ -41,7 +41,7 @@ var $config = (function() {
var coll = db.getSiblingDB(this.dbName)[collName];
var cmdObj = {
- query: { a: 1, b: { $gt: Random.rand() } },
+ query: {a: 1, b: {$gt: Random.rand()}},
limit: Random.randInt(10)
};
@@ -69,8 +69,8 @@ var $config = (function() {
})();
var transitions = {
- count: { count: 0.95, dropDB: 0.05 },
- dropDB: { count: 0.95, dropDB: 0.05 }
+ count: {count: 0.95, dropDB: 0.05},
+ dropDB: {count: 0.95, dropDB: 0.05}
};
function setup(db, collName, cluster) {
diff --git a/jstests/concurrency/fsm_workloads/reindex.js b/jstests/concurrency/fsm_workloads/reindex.js
index 51aad94c016..7d71e4ea7be 100644
--- a/jstests/concurrency/fsm_workloads/reindex.js
+++ b/jstests/concurrency/fsm_workloads/reindex.js
@@ -7,24 +7,24 @@
* against the collection. Operates on a separate collection for each thread.
*/
-load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropCollections
+load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropCollections
var $config = (function() {
var data = {
- nIndexes: 3 + 1, // 3 created and 1 for _id
+ nIndexes: 3 + 1, // 3 created and 1 for _id
nDocumentsToInsert: 1000,
- maxInteger: 100, // Used for document values. Must be a factor of nDocumentsToInsert
- prefix: 'reindex' // Use filename for prefix because filename is assumed unique
+ maxInteger: 100, // Used for document values. Must be a factor of nDocumentsToInsert
+ prefix: 'reindex' // Use filename for prefix because filename is assumed unique
};
var states = (function() {
function insertDocuments(db, collName) {
var bulk = db[collName].initializeUnorderedBulkOp();
for (var i = 0; i < this.nDocumentsToInsert; ++i) {
- bulk.insert({
- text: 'Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do' +
- ' eiusmod tempor incididunt ut labore et dolore magna aliqua.',
- geo: { type: 'Point', coordinates: [(i % 50) - 25, (i % 50) - 25] },
+ bulk.insert({
+ text: 'Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do' +
+ ' eiusmod tempor incididunt ut labore et dolore magna aliqua.',
+ geo: {type: 'Point', coordinates: [(i % 50) - 25, (i % 50) - 25]},
integer: i % this.maxInteger
});
}
@@ -35,43 +35,51 @@ var $config = (function() {
function createIndexes(db, collName) {
// The number of indexes created here is also stored in data.nIndexes
- var textResult = db[this.threadCollName].ensureIndex({ text: 'text' });
+ var textResult = db[this.threadCollName].ensureIndex({text: 'text'});
assertAlways.commandWorked(textResult);
- var geoResult = db[this.threadCollName].ensureIndex({ geo: '2dsphere' });
+ var geoResult = db[this.threadCollName].ensureIndex({geo: '2dsphere'});
assertAlways.commandWorked(geoResult);
- var integerResult = db[this.threadCollName].ensureIndex({ integer: 1 });
+ var integerResult = db[this.threadCollName].ensureIndex({integer: 1});
assertAlways.commandWorked(integerResult);
}
function init(db, collName) {
- this.threadCollName = this.prefix + '_' + this.tid;
+ this.threadCollName = this.prefix + '_' + this.tid;
insertDocuments.call(this, db, this.threadCollName);
}
function query(db, collName) {
var coll = db[this.threadCollName];
var nInsertedDocuments = this.nDocumentsToInsert;
- var count = coll.find({ integer: Random.randInt(this.maxInteger) }).itcount();
- assertWhenOwnColl.eq(nInsertedDocuments / this.maxInteger, count, 'number of ' +
- 'documents returned by integer query should match the number ' +
- 'inserted');
-
- var coords = [[ [-26, -26], [-26, 26], [26, 26], [26, -26], [-26, -26] ]];
- var geoQuery = { geo: { $geoWithin: { $geometry: { type: 'Polygon',
- coordinates: coords}}}};
+ var count = coll.find({integer: Random.randInt(this.maxInteger)}).itcount();
+ assertWhenOwnColl.eq(
+ nInsertedDocuments / this.maxInteger,
+ count,
+ 'number of ' +
+ 'documents returned by integer query should match the number ' +
+ 'inserted');
+
+ var coords = [[[-26, -26], [-26, 26], [26, 26], [26, -26], [-26, -26]]];
+ var geoQuery = {
+ geo: {$geoWithin: {$geometry: {type: 'Polygon', coordinates: coords}}}
+ };
// We can only perform a geo query when we own the collection and are sure a geo index
// is present. The same is true of text queries.
assertWhenOwnColl(function() {
count = coll.find(geoQuery).itcount();
- assertWhenOwnColl.eq(count, nInsertedDocuments, 'number of documents returned by' +
- ' geospatial query should match number inserted');
-
- count = coll.find({ $text: { $search: 'ipsum' } }).itcount();
- assertWhenOwnColl.eq(count, nInsertedDocuments, 'number of documents returned by' +
- ' text query should match number inserted');
+ assertWhenOwnColl.eq(count,
+ nInsertedDocuments,
+ 'number of documents returned by' +
+ ' geospatial query should match number inserted');
+
+ count = coll.find({$text: {$search: 'ipsum'}}).itcount();
+ assertWhenOwnColl.eq(count,
+ nInsertedDocuments,
+ 'number of documents returned by' +
+ ' text query should match number inserted');
});
var indexCount = db[this.threadCollName].getIndexes().length;
@@ -92,10 +100,10 @@ var $config = (function() {
})();
var transitions = {
- init: { createIndexes: 1 },
- createIndexes: { reIndex: 0.5, query: 0.5 },
- reIndex: { reIndex: 0.5, query: 0.5 },
- query: { reIndex: 0.5, query: 0.5 }
+ init: {createIndexes: 1},
+ createIndexes: {reIndex: 0.5, query: 0.5},
+ reIndex: {reIndex: 0.5, query: 0.5},
+ query: {reIndex: 0.5, query: 0.5}
};
var teardown = function teardown(db, collName, cluster) {
diff --git a/jstests/concurrency/fsm_workloads/reindex_background.js b/jstests/concurrency/fsm_workloads/reindex_background.js
index fe4d00bb9e5..7a5c25679f1 100644
--- a/jstests/concurrency/fsm_workloads/reindex_background.js
+++ b/jstests/concurrency/fsm_workloads/reindex_background.js
@@ -5,29 +5,31 @@
*
* Bulk inserts 1000 documents and builds indexes in background, then alternates between reindexing
* and querying against the collection. Operates on a separate collection for each thread. Note
- * that because indexes are initially built in the background, reindexing is also done in the
+ * that because indexes are initially built in the background, reindexing is also done in the
* background.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/reindex.js'); // for $config
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/reindex.js'); // for $config
-var $config = extendWorkload($config, function($config, $super) {
- $config.data.prefix = 'reindex_background';
+var $config =
+ extendWorkload($config,
+ function($config, $super) {
+ $config.data.prefix = 'reindex_background';
- $config.states.createIndexes = function createIndexes(db, collName) {
- var coll = db[this.threadCollName];
+ $config.states.createIndexes = function createIndexes(db, collName) {
+ var coll = db[this.threadCollName];
- // The number of indexes created here is also stored in data.nIndexes
- var textResult = coll.ensureIndex({ text: 'text' }, { background: true });
- assertAlways.commandWorked(textResult);
+ // The number of indexes created here is also stored in data.nIndexes
+ var textResult = coll.ensureIndex({text: 'text'}, {background: true});
+ assertAlways.commandWorked(textResult);
- var geoResult = coll.ensureIndex({ geo: '2dsphere' }, { background: true });
- assertAlways.commandWorked(geoResult);
+ var geoResult = coll.ensureIndex({geo: '2dsphere'}, {background: true});
+ assertAlways.commandWorked(geoResult);
- var integerResult = coll.ensureIndex({ integer: 1 }, {background: true });
- assertAlways.commandWorked(integerResult);
- };
+ var integerResult = coll.ensureIndex({integer: 1}, {background: true});
+ assertAlways.commandWorked(integerResult);
+ };
- return $config;
-});
+ return $config;
+ });
diff --git a/jstests/concurrency/fsm_workloads/remove_and_bulk_insert.js b/jstests/concurrency/fsm_workloads/remove_and_bulk_insert.js
index 74139f07117..a57e61a44b0 100644
--- a/jstests/concurrency/fsm_workloads/remove_and_bulk_insert.js
+++ b/jstests/concurrency/fsm_workloads/remove_and_bulk_insert.js
@@ -27,8 +27,8 @@ var $config = (function() {
};
var transitions = {
- insert: { insert: 0.5, remove: 0.5 },
- remove: { insert: 0.5, remove: 0.5 }
+ insert: {insert: 0.5, remove: 0.5},
+ remove: {insert: 0.5, remove: 0.5}
};
return {
diff --git a/jstests/concurrency/fsm_workloads/remove_multiple_documents.js b/jstests/concurrency/fsm_workloads/remove_multiple_documents.js
index c04cb198f50..d809b0be8e3 100644
--- a/jstests/concurrency/fsm_workloads/remove_multiple_documents.js
+++ b/jstests/concurrency/fsm_workloads/remove_multiple_documents.js
@@ -13,7 +13,7 @@ var $config = (function() {
init: function init(db, collName) {
this.numDocs = 200;
for (var i = 0; i < this.numDocs; ++i) {
- db[collName].insert({ tid: this.tid, rand: Random.rand() });
+ db[collName].insert({tid: this.tid, rand: Random.rand()});
}
},
@@ -22,28 +22,22 @@ var $config = (function() {
var low = Random.rand();
var high = low + 0.05 * Random.rand();
- var res = db[collName].remove({
- tid: this.tid,
- rand: { $gte: low, $lte: high }
- });
+ var res = db[collName].remove({tid: this.tid, rand: {$gte: low, $lte: high}});
assertAlways.gte(res.nRemoved, 0);
assertAlways.lte(res.nRemoved, this.numDocs);
this.numDocs -= res.nRemoved;
},
count: function count(db, collName) {
- var numDocs = db[collName].find({ tid: this.tid }).itcount();
+ var numDocs = db[collName].find({tid: this.tid}).itcount();
assertWhenOwnColl.eq(this.numDocs, numDocs);
}
};
var transitions = {
- init: { count: 1 },
- count: { remove: 1 },
- remove: {
- remove: 0.825,
- count: 0.125
- }
+ init: {count: 1},
+ count: {remove: 1},
+ remove: {remove: 0.825, count: 0.125}
};
return {
diff --git a/jstests/concurrency/fsm_workloads/remove_single_document.js b/jstests/concurrency/fsm_workloads/remove_single_document.js
index 75442919f2d..5f83e0f57f8 100644
--- a/jstests/concurrency/fsm_workloads/remove_single_document.js
+++ b/jstests/concurrency/fsm_workloads/remove_single_document.js
@@ -10,19 +10,13 @@ var $config = (function() {
var states = {
remove: function remove(db, collName) {
// try removing a random document
- var res = this.doRemove(db,
- collName,
- { rand: { $gte: Random.rand() } },
- { justOne: true });
+ var res = this.doRemove(db, collName, {rand: {$gte: Random.rand()}}, {justOne: true});
assertAlways.lte(res.nRemoved, 1);
if (res.nRemoved === 0) {
// The above remove() can fail to remove a document when the random value
// in the query is greater than any of the random values in the collection.
// When that situation occurs, just remove an arbitrary document instead.
- res = this.doRemove(db,
- collName,
- {},
- { justOne: true });
+ res = this.doRemove(db, collName, {}, {justOne: true});
assertAlways.lte(res.nRemoved, 1);
}
this.assertResult(res);
@@ -30,14 +24,14 @@ var $config = (function() {
};
var transitions = {
- remove: { remove: 1 }
+ remove: {remove: 1}
};
function setup(db, collName, cluster) {
// insert enough documents so that each thread can remove exactly one per iteration
var num = this.threadCount * this.iterations;
for (var i = 0; i < num; ++i) {
- db[collName].insert({ i: i, rand: Random.rand() });
+ db[collName].insert({i: i, rand: Random.rand()});
}
assertWhenOwnColl.eq(db[collName].find().itcount(), num);
}
diff --git a/jstests/concurrency/fsm_workloads/remove_single_document_eval.js b/jstests/concurrency/fsm_workloads/remove_single_document_eval.js
index ee6411c9c74..97dca4e242f 100644
--- a/jstests/concurrency/fsm_workloads/remove_single_document_eval.js
+++ b/jstests/concurrency/fsm_workloads/remove_single_document_eval.js
@@ -5,33 +5,35 @@
*
* Runs remove_single_document using the eval command.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/remove_single_document.js'); // for $config
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/remove_single_document.js'); // for $config
-var $config = extendWorkload($config, function($config, $super) {
+var $config =
+ extendWorkload($config,
+ function($config, $super) {
- $config.data.doRemove = function doRemove(db, collName, query, options) {
- var evalResult = db.runCommand({
- eval: function(f, collName, query, options) {
- return tojson(f(db, collName, query, options));
- },
- args: [$super.data.doRemove, collName, query, options],
- nolock: this.nolock
- });
- assertAlways.commandWorked(evalResult);
- var res = JSON.parse(evalResult.retval);
- return res;
- };
+ $config.data.doRemove = function doRemove(db, collName, query, options) {
+ var evalResult = db.runCommand({
+ eval: function(f, collName, query, options) {
+ return tojson(f(db, collName, query, options));
+ },
+ args: [$super.data.doRemove, collName, query, options],
+ nolock: this.nolock
+ });
+ assertAlways.commandWorked(evalResult);
+ var res = JSON.parse(evalResult.retval);
+ return res;
+ };
- $config.data.assertResult = function assertResult(res) {
- assertWhenOwnColl.eq(1, res.nRemoved, tojson(res));
- };
+ $config.data.assertResult = function assertResult(res) {
+ assertWhenOwnColl.eq(1, res.nRemoved, tojson(res));
+ };
- $config.data.nolock = false;
+ $config.data.nolock = false;
- // scale down threadCount and iterations because eval takes a global lock
- $config.threadCount = 5;
- $config.iterations = 10;
+ // scale down threadCount and iterations because eval takes a global lock
+ $config.threadCount = 5;
+ $config.iterations = 10;
- return $config;
-});
+ return $config;
+ });
diff --git a/jstests/concurrency/fsm_workloads/remove_single_document_eval_nolock.js b/jstests/concurrency/fsm_workloads/remove_single_document_eval_nolock.js
index c5aba00523e..1663f808fdb 100644
--- a/jstests/concurrency/fsm_workloads/remove_single_document_eval_nolock.js
+++ b/jstests/concurrency/fsm_workloads/remove_single_document_eval_nolock.js
@@ -5,12 +5,13 @@
*
* Runs remove_single_document_eval with the eval option { nolock: true }.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/remove_single_document_eval.js'); // for $config
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/remove_single_document_eval.js'); // for $config
-var $config = extendWorkload($config, function($config, $super) {
+var $config = extendWorkload($config,
+ function($config, $super) {
- $config.data.nolock = true;
+ $config.data.nolock = true;
- return $config;
-});
+ return $config;
+ });
diff --git a/jstests/concurrency/fsm_workloads/remove_where.js b/jstests/concurrency/fsm_workloads/remove_where.js
index ecfbe722db8..0ef4f3d9931 100644
--- a/jstests/concurrency/fsm_workloads/remove_where.js
+++ b/jstests/concurrency/fsm_workloads/remove_where.js
@@ -8,35 +8,40 @@
* counts.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/indexed_insert_where.js'); // for $config
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/indexed_insert_where.js'); // for $config
-var $config = extendWorkload($config, function($config, $super) {
- $config.data.randomBound = 10;
- $config.data.generateDocumentToInsert = function generateDocumentToInsert() {
- return { tid: this.tid, x: Random.randInt(this.randomBound) };
- };
+var $config = extendWorkload(
+ $config,
+ function($config, $super) {
+ $config.data.randomBound = 10;
+ $config.data.generateDocumentToInsert = function generateDocumentToInsert() {
+ return {
+ tid: this.tid,
+ x: Random.randInt(this.randomBound)
+ };
+ };
- $config.states.remove = function remove(db, collName) {
- var res = db[collName].remove({
- // Server-side JS does not support Random.randInt, so use Math.floor/random instead
- $where: 'this.x === Math.floor(Math.random() * ' + this.randomBound + ') ' +
+ $config.states.remove = function remove(db, collName) {
+ var res = db[collName].remove({
+ // Server-side JS does not support Random.randInt, so use Math.floor/random instead
+ $where: 'this.x === Math.floor(Math.random() * ' + this.randomBound + ') ' +
'&& this.tid === ' + this.tid
- });
- assertWhenOwnColl.gte(res.nRemoved, 0);
- assertWhenOwnColl.lte(res.nRemoved, this.insertedDocuments);
- this.insertedDocuments -= res.nRemoved;
- };
+ });
+ assertWhenOwnColl.gte(res.nRemoved, 0);
+ assertWhenOwnColl.lte(res.nRemoved, this.insertedDocuments);
+ this.insertedDocuments -= res.nRemoved;
+ };
- $config.transitions = {
- insert: { insert: 0.2, remove: 0.4, query: 0.4 },
- remove: { insert: 0.4, remove: 0.2, query: 0.4 },
- query: { insert: 0.4, remove: 0.4, query: 0.2 }
- };
+ $config.transitions = {
+ insert: {insert: 0.2, remove: 0.4, query: 0.4},
+ remove: {insert: 0.4, remove: 0.2, query: 0.4},
+ query: {insert: 0.4, remove: 0.4, query: 0.2}
+ };
- $config.setup = function setup(db, collName, cluster) {
- /* no-op to prevent index from being created */
- };
+ $config.setup = function setup(db, collName, cluster) {
+ /* no-op to prevent index from being created */
+ };
- return $config;
-});
+ return $config;
+ });
diff --git a/jstests/concurrency/fsm_workloads/rename_capped_collection_chain.js b/jstests/concurrency/fsm_workloads/rename_capped_collection_chain.js
index 7b77eec792b..b02642cb4c2 100644
--- a/jstests/concurrency/fsm_workloads/rename_capped_collection_chain.js
+++ b/jstests/concurrency/fsm_workloads/rename_capped_collection_chain.js
@@ -7,7 +7,7 @@
* command against it. The previous "to" namespace is used as the next "from"
* namespace.
*/
-load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropCollections
+load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropCollections
var $config = (function() {
@@ -52,8 +52,8 @@ var $config = (function() {
})();
var transitions = {
- init: { rename: 1 },
- rename: { rename: 1 }
+ init: {rename: 1},
+ rename: {rename: 1}
};
function teardown(db, collName, cluster) {
diff --git a/jstests/concurrency/fsm_workloads/rename_capped_collection_dbname_chain.js b/jstests/concurrency/fsm_workloads/rename_capped_collection_dbname_chain.js
index f9a02412aef..d69bb975d62 100644
--- a/jstests/concurrency/fsm_workloads/rename_capped_collection_dbname_chain.js
+++ b/jstests/concurrency/fsm_workloads/rename_capped_collection_dbname_chain.js
@@ -7,7 +7,7 @@
* command against it, specifying a different database name in the namespace.
* The previous "to" namespace is used as the next "from" namespace.
*/
-load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropDatabases
+load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropDatabases
var $config = (function() {
@@ -65,8 +65,8 @@ var $config = (function() {
})();
var transitions = {
- init: { rename: 1 },
- rename: { rename: 1 }
+ init: {rename: 1},
+ rename: {rename: 1}
};
function teardown(db, collName, cluster) {
diff --git a/jstests/concurrency/fsm_workloads/rename_capped_collection_dbname_droptarget.js b/jstests/concurrency/fsm_workloads/rename_capped_collection_dbname_droptarget.js
index d995ec143b8..06930a0457c 100644
--- a/jstests/concurrency/fsm_workloads/rename_capped_collection_dbname_droptarget.js
+++ b/jstests/concurrency/fsm_workloads/rename_capped_collection_dbname_droptarget.js
@@ -7,7 +7,7 @@
* command against it, specifying a different database name in the namespace.
* Inserts documents into the "to" namespace and specifies dropTarget=true.
*/
-load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropDatabases
+load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropDatabases
var $config = (function() {
@@ -88,8 +88,8 @@ var $config = (function() {
})();
var transitions = {
- init: { rename: 1 },
- rename: { rename: 1 }
+ init: {rename: 1},
+ rename: {rename: 1}
};
function teardown(db, collName, cluster) {
diff --git a/jstests/concurrency/fsm_workloads/rename_capped_collection_droptarget.js b/jstests/concurrency/fsm_workloads/rename_capped_collection_droptarget.js
index b656b004373..11621a0318b 100644
--- a/jstests/concurrency/fsm_workloads/rename_capped_collection_droptarget.js
+++ b/jstests/concurrency/fsm_workloads/rename_capped_collection_droptarget.js
@@ -7,7 +7,7 @@
* command against it. Inserts documents into the "to" namespace and specifies
* dropTarget=true.
*/
-load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropCollections
+load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropCollections
var $config = (function() {
@@ -59,8 +59,8 @@ var $config = (function() {
// Verify that 'fromCollCount' documents exist in the "to" collection
// after the rename occurs
- var res = db[this.fromCollName].renameCollection(this.toCollName,
- true /* dropTarget */);
+ var res =
+ db[this.fromCollName].renameCollection(this.toCollName, true /* dropTarget */);
assertWhenOwnDB.commandWorked(res);
assertWhenOwnDB(db[this.toCollName].isCapped());
assertWhenOwnDB.eq(fromCollCount, db[this.toCollName].find().itcount());
@@ -80,8 +80,8 @@ var $config = (function() {
})();
var transitions = {
- init: { rename: 1 },
- rename: { rename: 1 }
+ init: {rename: 1},
+ rename: {rename: 1}
};
function teardown(db, collName, cluster) {
diff --git a/jstests/concurrency/fsm_workloads/rename_collection_chain.js b/jstests/concurrency/fsm_workloads/rename_collection_chain.js
index 0514fe6d075..81c0313e217 100644
--- a/jstests/concurrency/fsm_workloads/rename_collection_chain.js
+++ b/jstests/concurrency/fsm_workloads/rename_collection_chain.js
@@ -7,7 +7,7 @@
* command against it. The previous "to" namespace is used as the next "from"
* namespace.
*/
-load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropCollections
+load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropCollections
var $config = (function() {
@@ -44,8 +44,8 @@ var $config = (function() {
})();
var transitions = {
- init: { rename: 1 },
- rename: { rename: 1 }
+ init: {rename: 1},
+ rename: {rename: 1}
};
function teardown(db, collName, cluster) {
diff --git a/jstests/concurrency/fsm_workloads/rename_collection_dbname_chain.js b/jstests/concurrency/fsm_workloads/rename_collection_dbname_chain.js
index 505c77d1d5e..d11dfd19d22 100644
--- a/jstests/concurrency/fsm_workloads/rename_collection_dbname_chain.js
+++ b/jstests/concurrency/fsm_workloads/rename_collection_dbname_chain.js
@@ -7,7 +7,7 @@
* command against it, specifying a different database name in the namespace.
* The previous "to" namespace is used as the next "from" namespace.
*/
-load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropDatabases
+load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropDatabases
var $config = (function() {
@@ -57,8 +57,8 @@ var $config = (function() {
})();
var transitions = {
- init: { rename: 1 },
- rename: { rename: 1 }
+ init: {rename: 1},
+ rename: {rename: 1}
};
function teardown(db, collName, cluster) {
diff --git a/jstests/concurrency/fsm_workloads/rename_collection_dbname_droptarget.js b/jstests/concurrency/fsm_workloads/rename_collection_dbname_droptarget.js
index cdc135ba069..453d5a27379 100644
--- a/jstests/concurrency/fsm_workloads/rename_collection_dbname_droptarget.js
+++ b/jstests/concurrency/fsm_workloads/rename_collection_dbname_droptarget.js
@@ -7,7 +7,7 @@
* command against it, specifying a different database name in the namespace.
* Inserts documents into the "to" namespace and specifies dropTarget=true.
*/
-load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropDatabases
+load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropDatabases
var $config = (function() {
@@ -80,8 +80,8 @@ var $config = (function() {
})();
var transitions = {
- init: { rename: 1 },
- rename: { rename: 1 }
+ init: {rename: 1},
+ rename: {rename: 1}
};
function teardown(db, collName, cluster) {
diff --git a/jstests/concurrency/fsm_workloads/rename_collection_droptarget.js b/jstests/concurrency/fsm_workloads/rename_collection_droptarget.js
index 161720d019e..bb2651258a5 100644
--- a/jstests/concurrency/fsm_workloads/rename_collection_droptarget.js
+++ b/jstests/concurrency/fsm_workloads/rename_collection_droptarget.js
@@ -7,7 +7,7 @@
* command against it. Inserts documents into the "to" namespace and specifies
* dropTarget=true.
*/
-load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropCollections
+load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropCollections
var $config = (function() {
@@ -52,8 +52,8 @@ var $config = (function() {
// Verify that 'fromCollCount' documents exist in the "to" collection
// after the rename occurs
- var res = db[this.fromCollName].renameCollection(this.toCollName,
- true /* dropTarget */);
+ var res =
+ db[this.fromCollName].renameCollection(this.toCollName, true /* dropTarget */);
assertWhenOwnDB.commandWorked(res);
assertWhenOwnDB.eq(fromCollCount, db[this.toCollName].find().itcount());
assertWhenOwnDB.eq(0, db[this.fromCollName].find().itcount());
@@ -72,8 +72,8 @@ var $config = (function() {
})();
var transitions = {
- init: { rename: 1 },
- rename: { rename: 1 }
+ init: {rename: 1},
+ rename: {rename: 1}
};
function teardown(db, collName, cluster) {
diff --git a/jstests/concurrency/fsm_workloads/server_status.js b/jstests/concurrency/fsm_workloads/server_status.js
index 70de8395f49..dbde1420b99 100644
--- a/jstests/concurrency/fsm_workloads/server_status.js
+++ b/jstests/concurrency/fsm_workloads/server_status.js
@@ -24,7 +24,7 @@ var $config = (function() {
};
var transitions = {
- status: { status: 1 }
+ status: {status: 1}
};
return {
diff --git a/jstests/concurrency/fsm_workloads/touch_base.js b/jstests/concurrency/fsm_workloads/touch_base.js
index df2d0851cd5..6e2cce202ed 100644
--- a/jstests/concurrency/fsm_workloads/touch_base.js
+++ b/jstests/concurrency/fsm_workloads/touch_base.js
@@ -4,49 +4,60 @@
* touch_base.js
*
* Bulk inserts documents in batches of 100, uses the touch command on "data" and "index",
- * and queries to verify the number of documents inserted by the thread.
+ * and queries to verify the number of documents inserted by the thread.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/indexed_insert_where.js'); // for $config
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/indexed_insert_where.js'); // for $config
// For isMongod, isMMAPv1, and isEphemeral.
load('jstests/concurrency/fsm_workload_helpers/server_types.js');
-var $config = extendWorkload($config, function($config, $super) {
- $config.data.generateDocumentToInsert = function generateDocumentToInsert() {
- return { tid: this.tid, x: Random.randInt(10) };
- };
-
- $config.data.generateTouchCmdObj = function generateTouchCmdObj(collName) {
- return { touch: collName, data: true, index: true };
- };
-
- $config.states.touch = function touch(db, collName) {
- var res = db.runCommand(this.generateTouchCmdObj(collName));
- if (isMongod(db) && (isMMAPv1(db) || isEphemeral(db))) {
- assertAlways.commandWorked(res);
- } else {
- // SERVER-16850 and SERVER-16797
- assertAlways.commandFailed(res);
- }
- };
-
- $config.states.query = function query(db, collName) {
- var count = db[collName].find( { tid: this.tid } ).itcount();
- assertWhenOwnColl.eq(count, this.insertedDocuments,
- 'collection scan should return the number of documents this thread' +
- ' inserted');
- };
-
- $config.transitions = {
- insert: { insert: 0.2, touch: 0.4, query: 0.4 },
- touch: { insert: 0.4, touch: 0.2, query: 0.4 },
- query: { insert: 0.4, touch: 0.4, query: 0.2 }
- };
-
- $config.setup = function setup(db, collName, cluster) {
- assertAlways.commandWorked(db[collName].ensureIndex({ x: 1 }));
- };
-
- return $config;
-});
+var $config =
+ extendWorkload($config,
+ function($config, $super) {
+ $config.data.generateDocumentToInsert = function generateDocumentToInsert() {
+ return {
+ tid: this.tid,
+ x: Random.randInt(10)
+ };
+ };
+
+ $config.data.generateTouchCmdObj = function generateTouchCmdObj(collName) {
+ return {
+ touch: collName,
+ data: true,
+ index: true
+ };
+ };
+
+ $config.states.touch = function touch(db, collName) {
+ var res = db.runCommand(this.generateTouchCmdObj(collName));
+ if (isMongod(db) && (isMMAPv1(db) || isEphemeral(db))) {
+ assertAlways.commandWorked(res);
+ } else {
+ // SERVER-16850 and SERVER-16797
+ assertAlways.commandFailed(res);
+ }
+ };
+
+ $config.states.query = function query(db, collName) {
+ var count = db[collName].find({tid: this.tid}).itcount();
+ assertWhenOwnColl.eq(
+ count,
+ this.insertedDocuments,
+ 'collection scan should return the number of documents this thread' +
+ ' inserted');
+ };
+
+ $config.transitions = {
+ insert: {insert: 0.2, touch: 0.4, query: 0.4},
+ touch: {insert: 0.4, touch: 0.2, query: 0.4},
+ query: {insert: 0.4, touch: 0.4, query: 0.2}
+ };
+
+ $config.setup = function setup(db, collName, cluster) {
+ assertAlways.commandWorked(db[collName].ensureIndex({x: 1}));
+ };
+
+ return $config;
+ });
diff --git a/jstests/concurrency/fsm_workloads/touch_data.js b/jstests/concurrency/fsm_workloads/touch_data.js
index 08130dfcf2e..dc3b7cecef0 100644
--- a/jstests/concurrency/fsm_workloads/touch_data.js
+++ b/jstests/concurrency/fsm_workloads/touch_data.js
@@ -4,16 +4,22 @@
* touch_data.js
*
* Bulk inserts documents in batches of 100, uses touch on "data" but not "index",
- * and queries to verify the number of documents inserted by the thread.
+ * and queries to verify the number of documents inserted by the thread.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/touch_base.js'); // for $config
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/touch_base.js'); // for $config
-var $config = extendWorkload($config, function($config, $super) {
- $config.data.generateTouchCmdObj = function generateTouchCmdObj(collName) {
- return { touch: collName, data: true, index: false };
- };
+var $config =
+ extendWorkload($config,
+ function($config, $super) {
+ $config.data.generateTouchCmdObj = function generateTouchCmdObj(collName) {
+ return {
+ touch: collName,
+ data: true,
+ index: false
+ };
+ };
- return $config;
-});
+ return $config;
+ });
diff --git a/jstests/concurrency/fsm_workloads/touch_index.js b/jstests/concurrency/fsm_workloads/touch_index.js
index a1cfa6db2ba..cc0b6fcf48d 100644
--- a/jstests/concurrency/fsm_workloads/touch_index.js
+++ b/jstests/concurrency/fsm_workloads/touch_index.js
@@ -4,16 +4,22 @@
* touch_index.js
*
* Bulk inserts documents in batches of 100, uses touch on "index" but not "data",
- * and queries to verify the number of documents inserted by the thread.
+ * and queries to verify the number of documents inserted by the thread.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/touch_base.js'); // for $config
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/touch_base.js'); // for $config
-var $config = extendWorkload($config, function($config, $super) {
- $config.data.generateTouchCmdObj = function generateTouchCmdObj(collName) {
- return { touch: collName, data: false, index: true };
- };
+var $config =
+ extendWorkload($config,
+ function($config, $super) {
+ $config.data.generateTouchCmdObj = function generateTouchCmdObj(collName) {
+ return {
+ touch: collName,
+ data: false,
+ index: true
+ };
+ };
- return $config;
-});
+ return $config;
+ });
diff --git a/jstests/concurrency/fsm_workloads/touch_no_data_no_index.js b/jstests/concurrency/fsm_workloads/touch_no_data_no_index.js
index 18cf0329b02..25ce50fc5ac 100644
--- a/jstests/concurrency/fsm_workloads/touch_no_data_no_index.js
+++ b/jstests/concurrency/fsm_workloads/touch_no_data_no_index.js
@@ -4,22 +4,28 @@
* touch_no_data_no_index.js
*
* Bulk inserts documents in batches of 100, uses touch as a no-op,
- * and queries to verify the number of documents inserted by the thread.
+ * and queries to verify the number of documents inserted by the thread.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/touch_base.js'); // for $config
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/touch_base.js'); // for $config
-var $config = extendWorkload($config, function($config, $super) {
- $config.data.generateTouchCmdObj = function generateTouchCmdObj(collName) {
- return { touch: collName, data: false, index: false };
- };
+var $config =
+ extendWorkload($config,
+ function($config, $super) {
+ $config.data.generateTouchCmdObj = function generateTouchCmdObj(collName) {
+ return {
+ touch: collName,
+ data: false,
+ index: false
+ };
+ };
- $config.states.touch = function touch(db, collName) {
- var res = db.runCommand(this.generateTouchCmdObj(collName));
- // The command always fails because "index" and "data" are both false
- assertAlways.commandFailed(res);
- };
+ $config.states.touch = function touch(db, collName) {
+ var res = db.runCommand(this.generateTouchCmdObj(collName));
+ // The command always fails because "index" and "data" are both false
+ assertAlways.commandFailed(res);
+ };
- return $config;
-});
+ return $config;
+ });
diff --git a/jstests/concurrency/fsm_workloads/update_and_bulk_insert.js b/jstests/concurrency/fsm_workloads/update_and_bulk_insert.js
index ab2671eb5b1..5d59ff2b0ef 100644
--- a/jstests/concurrency/fsm_workloads/update_and_bulk_insert.js
+++ b/jstests/concurrency/fsm_workloads/update_and_bulk_insert.js
@@ -22,7 +22,7 @@ var $config = (function() {
},
update: function update(db, collName) {
- var res = db[collName].update({}, { $inc: { n: 1 } }, { multi: true });
+ var res = db[collName].update({}, {$inc: {n: 1}}, {multi: true});
assertAlways.lte(0, res.nMatched, tojson(res));
if (db.getMongo().writeMode() === 'commands') {
assertAlways.eq(res.nMatched, res.nModified, tojson(res));
@@ -32,8 +32,8 @@ var $config = (function() {
};
var transitions = {
- insert: { insert: 0.2, update: 0.8 },
- update: { insert: 0.2, update: 0.8 }
+ insert: {insert: 0.2, update: 0.8},
+ update: {insert: 0.2, update: 0.8}
};
return {
diff --git a/jstests/concurrency/fsm_workloads/update_array.js b/jstests/concurrency/fsm_workloads/update_array.js
index b2681fb792a..e275d290911 100644
--- a/jstests/concurrency/fsm_workloads/update_array.js
+++ b/jstests/concurrency/fsm_workloads/update_array.js
@@ -9,7 +9,7 @@
* though other threads in the workload may be modifying the array between the
* update and the find, because thread ids are unique.
*/
-load('jstests/concurrency/fsm_workload_helpers/server_types.js'); // for isMongod and isMMAPv1
+load('jstests/concurrency/fsm_workload_helpers/server_types.js'); // for isMongod and isMMAPv1
var $config = (function() {
@@ -26,8 +26,7 @@ var $config = (function() {
if (db.getMongo().writeMode() === 'commands') {
assertWhenOwnColl.contains(res.nModified, nModifiedPossibilities, tojson(res));
}
- }
- else {
+ } else {
// Zero matches are possible for MMAP v1 because the update will skip a document
// that was invalidated during a yield.
assertWhenOwnColl.contains(res.nMatched, [0, 1], tojson(res));
@@ -38,13 +37,13 @@ var $config = (function() {
}
function doPush(db, collName, docIndex, value) {
- var res = db[collName].update({ _id: docIndex }, { $push: { arr: value } });
+ var res = db[collName].update({_id: docIndex}, {$push: {arr: value}});
// assert the update reported success
assertUpdateSuccess(db, res, [1]);
// find the doc and make sure it was updated
- var doc = db[collName].findOne({ _id: docIndex });
+ var doc = db[collName].findOne({_id: docIndex});
assertWhenOwnColl(function() {
assertWhenOwnColl.neq(null, doc);
assertWhenOwnColl(doc.hasOwnProperty('arr'),
@@ -54,21 +53,22 @@ var $config = (function() {
// anything. The $push operator always modifies the matched document, so if we
// matched something, then we must have updated it.
if (res.nMatched > 0) {
- assertWhenOwnColl.contains(value, doc.arr,
+ assertWhenOwnColl.contains(value,
+ doc.arr,
"doc.arr doesn't contain value (" + value +
- ') after $push: ' + tojson(doc.arr));
+ ') after $push: ' + tojson(doc.arr));
}
});
}
function doPull(db, collName, docIndex, value) {
- var res = db[collName].update({ _id: docIndex }, { $pull: { arr: value } });
+ var res = db[collName].update({_id: docIndex}, {$pull: {arr: value}});
// assert the update reported success
assertUpdateSuccess(db, res, [0, 1]);
// find the doc and make sure it was updated
- var doc = db[collName].findOne({ _id: docIndex });
+ var doc = db[collName].findOne({_id: docIndex});
assertWhenOwnColl(function() {
assertWhenOwnColl.neq(null, doc);
@@ -77,9 +77,10 @@ var $config = (function() {
// removed all occurrences of 'value' from the array (meaning that there should be
// none left).
if (res.nMatched > 0) {
- assertWhenOwnColl.eq(-1, doc.arr.indexOf(value),
+ assertWhenOwnColl.eq(-1,
+ doc.arr.indexOf(value),
'doc.arr contains removed value (' + value +
- ') after $pull: ' + tojson(doc.arr));
+ ') after $pull: ' + tojson(doc.arr));
}
});
}
@@ -103,21 +104,15 @@ var $config = (function() {
})();
var transitions = {
- push: {
- push: 0.8,
- pull: 0.2
- },
- pull: {
- push: 0.8,
- pull: 0.2
- }
+ push: {push: 0.8, pull: 0.2},
+ pull: {push: 0.8, pull: 0.2}
};
function setup(db, collName, cluster) {
// index on 'arr', the field being updated
- assertAlways.commandWorked(db[collName].ensureIndex({ arr: 1 }));
+ assertAlways.commandWorked(db[collName].ensureIndex({arr: 1}));
for (var i = 0; i < this.numDocs; ++i) {
- var res = db[collName].insert({ _id: i, arr: [] });
+ var res = db[collName].insert({_id: i, arr: []});
assertWhenOwnColl.writeOK(res);
assertWhenOwnColl.eq(1, res.nInserted);
}
@@ -129,9 +124,7 @@ var $config = (function() {
startState: 'push',
states: states,
transitions: transitions,
- data: {
- numDocs: 10
- },
+ data: {numDocs: 10},
setup: setup
};
diff --git a/jstests/concurrency/fsm_workloads/update_array_noindex.js b/jstests/concurrency/fsm_workloads/update_array_noindex.js
index cd1b4c27129..2e99c5a709b 100644
--- a/jstests/concurrency/fsm_workloads/update_array_noindex.js
+++ b/jstests/concurrency/fsm_workloads/update_array_noindex.js
@@ -6,8 +6,8 @@
* Executes the update_array.js workload after dropping all non-_id indexes on
* the collection.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/update_array.js'); // for $config
-load('jstests/concurrency/fsm_workload_modifiers/drop_all_indexes.js'); // for dropAllIndexes
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/update_array.js'); // for $config
+load('jstests/concurrency/fsm_workload_modifiers/drop_all_indexes.js'); // for dropAllIndexes
var $config = extendWorkload($config, dropAllIndexes);
diff --git a/jstests/concurrency/fsm_workloads/update_check_index.js b/jstests/concurrency/fsm_workloads/update_check_index.js
index a7b71b98848..3e099d6b2a5 100644
--- a/jstests/concurrency/fsm_workloads/update_check_index.js
+++ b/jstests/concurrency/fsm_workloads/update_check_index.js
@@ -21,7 +21,7 @@ var $config = (function() {
})();
var transitions = {
- multiUpdate: { multiUpdate: 1.0 }
+ multiUpdate: {multiUpdate: 1.0}
};
function setup(db, collName, cluster) {
@@ -41,16 +41,16 @@ var $config = (function() {
assertWhenOwnColl(function() {
var numIndexKeys = db[collName].find({}, {_id: 0, a: 1}).hint({a: 1}).itcount();
var numDocs = db[collName].find().itcount();
- assertWhenOwnColl.eq(numIndexKeys, numDocs,
- 'index {a: 1} has wrong number of index keys');
+ assertWhenOwnColl.eq(
+ numIndexKeys, numDocs, 'index {a: 1} has wrong number of index keys');
numIndexKeys = db[collName].find({}, {_id: 0, b: 1}).hint({b: 1}).itcount();
- assertWhenOwnColl.eq(numIndexKeys, numDocs,
- 'index {b: 1} has wrong number of index keys');
+ assertWhenOwnColl.eq(
+ numIndexKeys, numDocs, 'index {b: 1} has wrong number of index keys');
numIndexKeys = db[collName].find({}, {_id: 0, c: 1}).hint({c: 1}).itcount();
- assertWhenOwnColl.eq(numIndexKeys, numDocs,
- 'index {c: 1} has wrong number of index keys');
+ assertWhenOwnColl.eq(
+ numIndexKeys, numDocs, 'index {c: 1} has wrong number of index keys');
});
}
diff --git a/jstests/concurrency/fsm_workloads/update_inc.js b/jstests/concurrency/fsm_workloads/update_inc.js
index adc1c536fdd..bd4c832e96f 100644
--- a/jstests/concurrency/fsm_workloads/update_inc.js
+++ b/jstests/concurrency/fsm_workloads/update_inc.js
@@ -8,7 +8,7 @@
* field. Asserts that the field has the correct value based on the number
* of increments performed.
*/
-load('jstests/concurrency/fsm_workload_helpers/server_types.js'); // for isMongod and isMMAPv1
+load('jstests/concurrency/fsm_workload_helpers/server_types.js'); // for isMongod and isMMAPv1
var $config = (function() {
@@ -25,10 +25,12 @@ var $config = (function() {
},
update: function update(db, collName) {
- var updateDoc = { $inc: {} };
+ var updateDoc = {
+ $inc: {}
+ };
updateDoc.$inc[this.fieldName] = 1;
- var res = db[collName].update({ _id: this.id }, updateDoc);
+ var res = db[collName].update({_id: this.id}, updateDoc);
assertAlways.eq(0, res.nUpserted, tojson(res));
if (isMongod(db) && !isMMAPv1(db)) {
@@ -38,8 +40,7 @@ var $config = (function() {
if (db.getMongo().writeMode() === 'commands') {
assertWhenOwnColl.eq(res.nModified, 1, tojson(res));
}
- }
- else {
+ } else {
// Zero matches are possible for MMAP v1 because the update will skip a document
// that was invalidated during a yield.
assertWhenOwnColl.contains(res.nMatched, [0, 1], tojson(res));
@@ -70,13 +71,15 @@ var $config = (function() {
};
var transitions = {
- init: { update: 1 },
- update: { find: 1 },
- find: { update: 1 }
+ init: {update: 1},
+ update: {find: 1},
+ find: {update: 1}
};
function setup(db, collName, cluster) {
- var doc = { _id: this.id };
+ var doc = {
+ _id: this.id
+ };
// Pre-populate the fields we need to avoid size change for capped collections.
for (var i = 0; i < this.threadCount; ++i) {
diff --git a/jstests/concurrency/fsm_workloads/update_inc_capped.js b/jstests/concurrency/fsm_workloads/update_inc_capped.js
index 34c8fbc72b5..19588195f07 100644
--- a/jstests/concurrency/fsm_workloads/update_inc_capped.js
+++ b/jstests/concurrency/fsm_workloads/update_inc_capped.js
@@ -5,8 +5,8 @@
*
* Executes the update_inc.js workload on a capped collection.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/update_inc.js'); // for $config
-load('jstests/concurrency/fsm_workload_modifiers/make_capped.js'); // for makeCapped
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/update_inc.js'); // for $config
+load('jstests/concurrency/fsm_workload_modifiers/make_capped.js'); // for makeCapped
var $config = extendWorkload($config, makeCapped);
diff --git a/jstests/concurrency/fsm_workloads/update_multifield.js b/jstests/concurrency/fsm_workloads/update_multifield.js
index f04f347d262..af520797ac8 100644
--- a/jstests/concurrency/fsm_workloads/update_multifield.js
+++ b/jstests/concurrency/fsm_workloads/update_multifield.js
@@ -7,7 +7,7 @@
* The collection has an index for each field, and a compound index for all fields.
*/
-load('jstests/concurrency/fsm_workload_helpers/server_types.js'); // for isMongod and isMMAPv1
+load('jstests/concurrency/fsm_workload_helpers/server_types.js'); // for isMongod and isMMAPv1
var $config = (function() {
@@ -34,9 +34,15 @@ var $config = (function() {
var push = Random.rand() > 0.2;
var updateDoc = {};
- updateDoc[set ? '$set' : '$unset'] = { x: x };
- updateDoc[push ? '$push' : '$pull'] = { y: y };
- updateDoc.$inc = { z: z };
+ updateDoc[set ? '$set' : '$unset'] = {
+ x: x
+ };
+ updateDoc[push ? '$push' : '$pull'] = {
+ y: y
+ };
+ updateDoc.$inc = {
+ z: z
+ };
return updateDoc;
}
@@ -47,32 +53,29 @@ var $config = (function() {
var updateDoc = makeRandomUpdateDoc();
// apply this update
- var query = makeQuery({
- multi: this.multi,
- isolated: this.isolated,
- numDocs: this.numDocs
- });
- var res = db[collName].update(query, updateDoc, { multi: this.multi });
+ var query =
+ makeQuery({multi: this.multi, isolated: this.isolated, numDocs: this.numDocs});
+ var res = db[collName].update(query, updateDoc, {multi: this.multi});
this.assertResult(res, db, collName, query);
}
};
var transitions = {
- update: { update: 1 }
+ update: {update: 1}
};
function setup(db, collName, cluster) {
- assertAlways.commandWorked(db[collName].ensureIndex({ x: 1 }));
- assertAlways.commandWorked(db[collName].ensureIndex({ y: 1 }));
- assertAlways.commandWorked(db[collName].ensureIndex({ z: 1 }));
- assertAlways.commandWorked(db[collName].ensureIndex({ x: 1, y: 1, z: 1 }));
+ assertAlways.commandWorked(db[collName].ensureIndex({x: 1}));
+ assertAlways.commandWorked(db[collName].ensureIndex({y: 1}));
+ assertAlways.commandWorked(db[collName].ensureIndex({z: 1}));
+ assertAlways.commandWorked(db[collName].ensureIndex({x: 1, y: 1, z: 1}));
// numDocs should be much less than threadCount, to make more threads use the same docs.
this.numDocs = Math.floor(this.threadCount / 3);
assertAlways.gt(this.numDocs, 0, 'numDocs should be a positive number');
for (var i = 0; i < this.numDocs; ++i) {
- var res = db[collName].insert({ _id: i });
+ var res = db[collName].insert({_id: i});
assertWhenOwnColl.writeOK(res);
assertWhenOwnColl.eq(1, res.nInserted);
}
@@ -95,8 +98,7 @@ var $config = (function() {
if (db.getMongo().writeMode() === 'commands') {
assertWhenOwnColl.eq(res.nModified, 1, tojson(res));
}
- }
- else {
+ } else {
// Zero matches are possible for MMAP v1 because the update will skip a document
// that was invalidated during a yield.
assertWhenOwnColl.contains(res.nMatched, [0, 1], tojson(res));
diff --git a/jstests/concurrency/fsm_workloads/update_multifield_isolated_multiupdate.js b/jstests/concurrency/fsm_workloads/update_multifield_isolated_multiupdate.js
index 8c3f6704231..a8debf271e7 100644
--- a/jstests/concurrency/fsm_workloads/update_multifield_isolated_multiupdate.js
+++ b/jstests/concurrency/fsm_workloads/update_multifield_isolated_multiupdate.js
@@ -6,33 +6,35 @@
* Does updates that affect multiple fields on multiple documents, using $isolated.
* The collection has an index for each field, and a multikey index for all fields.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/update_multifield.js'); // for $config
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/update_multifield.js'); // for $config
-var $config = extendWorkload($config, function($config, $super) {
+var $config =
+ extendWorkload($config,
+ function($config, $super) {
- $config.data.multi = true;
- $config.data.isolated = true;
+ $config.data.multi = true;
+ $config.data.isolated = true;
- $config.data.assertResult = function assertResult(res, db, collName, query) {
- assertAlways.eq(0, res.nUpserted, tojson(res));
- // documents can't move during an update, because we use $isolated
- assertWhenOwnColl.eq(this.numDocs, res.nMatched, tojson(res));
- if (db.getMongo().writeMode() === 'commands') {
- assertWhenOwnColl.eq(this.numDocs, res.nModified, tojson(res));
- }
+ $config.data.assertResult = function assertResult(res, db, collName, query) {
+ assertAlways.eq(0, res.nUpserted, tojson(res));
+ // documents can't move during an update, because we use $isolated
+ assertWhenOwnColl.eq(this.numDocs, res.nMatched, tojson(res));
+ if (db.getMongo().writeMode() === 'commands') {
+ assertWhenOwnColl.eq(this.numDocs, res.nModified, tojson(res));
+ }
- // every thread only increments z, and z starts at 0,
- // so z should always be strictly greater than 0 after an update,
- // even if other threads modify the doc.
- var docs = db[collName].find().toArray();
- assertWhenOwnColl(function() {
- docs.forEach(function(doc) {
- assertWhenOwnColl.eq('number', typeof doc.z);
- assertWhenOwnColl.gt(doc.z, 0);
- });
- });
- };
+ // every thread only increments z, and z starts at 0,
+ // so z should always be strictly greater than 0 after an update,
+ // even if other threads modify the doc.
+ var docs = db[collName].find().toArray();
+ assertWhenOwnColl(function() {
+ docs.forEach(function(doc) {
+ assertWhenOwnColl.eq('number', typeof doc.z);
+ assertWhenOwnColl.gt(doc.z, 0);
+ });
+ });
+ };
- return $config;
-});
+ return $config;
+ });
diff --git a/jstests/concurrency/fsm_workloads/update_multifield_isolated_multiupdate_noindex.js b/jstests/concurrency/fsm_workloads/update_multifield_isolated_multiupdate_noindex.js
index 6ac6aeabc89..2f9aeded5ad 100644
--- a/jstests/concurrency/fsm_workloads/update_multifield_isolated_multiupdate_noindex.js
+++ b/jstests/concurrency/fsm_workloads/update_multifield_isolated_multiupdate_noindex.js
@@ -6,8 +6,8 @@
* Executes the update_multifield_isolated_multiupdate.js workload after
* dropping all non-_id indexes on the collection.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/update_multifield_isolated_multiupdate.js'); // for $config
-load('jstests/concurrency/fsm_workload_modifiers/drop_all_indexes.js'); // for dropAllIndexes
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/update_multifield_isolated_multiupdate.js'); // for $config
+load('jstests/concurrency/fsm_workload_modifiers/drop_all_indexes.js'); // for dropAllIndexes
var $config = extendWorkload($config, dropAllIndexes);
diff --git a/jstests/concurrency/fsm_workloads/update_multifield_multiupdate.js b/jstests/concurrency/fsm_workloads/update_multifield_multiupdate.js
index 8d9f3d875cc..46532c8db47 100644
--- a/jstests/concurrency/fsm_workloads/update_multifield_multiupdate.js
+++ b/jstests/concurrency/fsm_workloads/update_multifield_multiupdate.js
@@ -6,46 +6,48 @@
* Does updates that affect multiple fields on multiple documents.
* The collection has an index for each field, and a multikey index for all fields.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/update_multifield.js'); // for $config
-load('jstests/concurrency/fsm_workload_helpers/server_types.js'); // for isMongod and isMMAPv1
-
-var $config = extendWorkload($config, function($config, $super) {
-
- $config.data.multi = true;
-
- $config.data.assertResult = function(res, db, collName, query) {
- assertAlways.eq(0, res.nUpserted, tojson(res));
-
- if (isMongod(db)) {
- if (isMMAPv1(db)) {
- // If an update triggers a document to move forward, then
- // that document can be matched multiple times. If an update
- // triggers a document to move backwards, then that document
- // can be missed by other threads.
- assertAlways.gte(res.nMatched, 0, tojson(res));
- } else { // non-mmapv1 storage engine
- // TODO: Can we assert exact equality with WiredTiger?
- // What about for other storage engines?
- assertWhenOwnColl.lte(this.numDocs, res.nMatched, tojson(res));
- }
- } else { // mongos
- // In a mixed cluster, it is unknown what underlying storage engine
- // the update operations will be executed against. Thus, we can only
- // make the weakest of all assertions above.
- assertAlways.gte(res.nMatched, 0, tojson(res));
- }
-
- if (db.getMongo().writeMode() === 'commands') {
- assertWhenOwnColl.eq(res.nMatched, res.nModified, tojson(res));
- }
-
- var docs = db[collName].find().toArray();
- docs.forEach(function(doc) {
- assertWhenOwnColl.eq('number', typeof doc.z);
- assertWhenOwnColl.gt(doc.z, 0);
- });
- };
-
- return $config;
-});
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/update_multifield.js'); // for $config
+load('jstests/concurrency/fsm_workload_helpers/server_types.js'); // for isMongod and isMMAPv1
+
+var $config =
+ extendWorkload($config,
+ function($config, $super) {
+
+ $config.data.multi = true;
+
+ $config.data.assertResult = function(res, db, collName, query) {
+ assertAlways.eq(0, res.nUpserted, tojson(res));
+
+ if (isMongod(db)) {
+ if (isMMAPv1(db)) {
+ // If an update triggers a document to move forward, then
+ // that document can be matched multiple times. If an update
+ // triggers a document to move backwards, then that document
+ // can be missed by other threads.
+ assertAlways.gte(res.nMatched, 0, tojson(res));
+ } else { // non-mmapv1 storage engine
+ // TODO: Can we assert exact equality with WiredTiger?
+ // What about for other storage engines?
+ assertWhenOwnColl.lte(this.numDocs, res.nMatched, tojson(res));
+ }
+ } else { // mongos
+ // In a mixed cluster, it is unknown what underlying storage engine
+ // the update operations will be executed against. Thus, we can only
+ // make the weakest of all assertions above.
+ assertAlways.gte(res.nMatched, 0, tojson(res));
+ }
+
+ if (db.getMongo().writeMode() === 'commands') {
+ assertWhenOwnColl.eq(res.nMatched, res.nModified, tojson(res));
+ }
+
+ var docs = db[collName].find().toArray();
+ docs.forEach(function(doc) {
+ assertWhenOwnColl.eq('number', typeof doc.z);
+ assertWhenOwnColl.gt(doc.z, 0);
+ });
+ };
+
+ return $config;
+ });
diff --git a/jstests/concurrency/fsm_workloads/update_multifield_multiupdate_noindex.js b/jstests/concurrency/fsm_workloads/update_multifield_multiupdate_noindex.js
index fe12f2e33fb..f2739e329dd 100644
--- a/jstests/concurrency/fsm_workloads/update_multifield_multiupdate_noindex.js
+++ b/jstests/concurrency/fsm_workloads/update_multifield_multiupdate_noindex.js
@@ -6,8 +6,8 @@
* Executes the update_multifield_multiupdate.js workload after dropping all
* non-_id indexes on the collection.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/update_multifield_multiupdate.js'); // for $config
-load('jstests/concurrency/fsm_workload_modifiers/drop_all_indexes.js'); // for dropAllIndexes
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/update_multifield_multiupdate.js'); // for $config
+load('jstests/concurrency/fsm_workload_modifiers/drop_all_indexes.js'); // for dropAllIndexes
var $config = extendWorkload($config, dropAllIndexes);
diff --git a/jstests/concurrency/fsm_workloads/update_multifield_noindex.js b/jstests/concurrency/fsm_workloads/update_multifield_noindex.js
index 0be46a25f6a..22b230d7c9e 100644
--- a/jstests/concurrency/fsm_workloads/update_multifield_noindex.js
+++ b/jstests/concurrency/fsm_workloads/update_multifield_noindex.js
@@ -6,8 +6,8 @@
* Executes the update_multifield.js workload after dropping all non-_id indexes
* on the collection.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/update_multifield.js'); // for $config
-load('jstests/concurrency/fsm_workload_modifiers/drop_all_indexes.js'); // for dropAllIndexes
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/update_multifield.js'); // for $config
+load('jstests/concurrency/fsm_workload_modifiers/drop_all_indexes.js'); // for dropAllIndexes
var $config = extendWorkload($config, dropAllIndexes);
diff --git a/jstests/concurrency/fsm_workloads/update_ordered_bulk_inc.js b/jstests/concurrency/fsm_workloads/update_ordered_bulk_inc.js
index 93911a4a44f..a799d5dfe43 100644
--- a/jstests/concurrency/fsm_workloads/update_ordered_bulk_inc.js
+++ b/jstests/concurrency/fsm_workloads/update_ordered_bulk_inc.js
@@ -10,7 +10,7 @@
*
* Uses an ordered, bulk operation to perform the updates.
*/
-load('jstests/concurrency/fsm_workload_helpers/server_types.js'); // for isMMAPv1 and isMongod
+load('jstests/concurrency/fsm_workload_helpers/server_types.js'); // for isMMAPv1 and isMongod
var $config = (function() {
@@ -20,12 +20,14 @@ var $config = (function() {
},
update: function update(db, collName) {
- var updateDoc = { $inc: {} };
+ var updateDoc = {
+ $inc: {}
+ };
updateDoc.$inc[this.fieldName] = 1;
var bulk = db[collName].initializeOrderedBulkOp();
for (var i = 0; i < this.docCount; ++i) {
- bulk.find({ _id: i }).update(updateDoc);
+ bulk.find({_id: i}).update(updateDoc);
}
var result = bulk.execute();
// TODO: this actually does assume that there are no unique indexes.
@@ -64,15 +66,15 @@ var $config = (function() {
};
var transitions = {
- init: { update: 1 },
- update: { find: 1 },
- find: { update: 1 }
+ init: {update: 1},
+ update: {find: 1},
+ find: {update: 1}
};
function setup(db, collName, cluster) {
this.count = 0;
for (var i = 0; i < this.docCount; ++i) {
- db[collName].insert({ _id: i });
+ db[collName].insert({_id: i});
}
}
@@ -82,9 +84,7 @@ var $config = (function() {
states: states,
transitions: transitions,
setup: setup,
- data: {
- docCount: 15
- }
+ data: {docCount: 15}
};
})();
diff --git a/jstests/concurrency/fsm_workloads/update_rename.js b/jstests/concurrency/fsm_workloads/update_rename.js
index c74a657f312..b163b44f690 100644
--- a/jstests/concurrency/fsm_workloads/update_rename.js
+++ b/jstests/concurrency/fsm_workloads/update_rename.js
@@ -17,17 +17,23 @@ var $config = (function() {
var states = {
update: function update(db, collName) {
var from = choose(fieldNames);
- var to = choose(fieldNames.filter(function(n) { return n !== from; }));
- var updater = { $rename: {} };
+ var to = choose(fieldNames.filter(function(n) {
+ return n !== from;
+ }));
+ var updater = {
+ $rename: {}
+ };
updater.$rename[from] = to;
var query = {};
- query[from] = { $exists: 1 };
+ query[from] = {
+ $exists: 1
+ };
var res = db[collName].update(query, updater);
assertAlways.eq(0, res.nUpserted, tojson(res));
- assertWhenOwnColl.contains(res.nMatched, [0, 1], tojson(res));
+ assertWhenOwnColl.contains(res.nMatched, [0, 1], tojson(res));
if (db.getMongo().writeMode() === 'commands') {
assertWhenOwnColl.eq(res.nMatched, res.nModified, tojson(res));
}
@@ -35,7 +41,7 @@ var $config = (function() {
};
var transitions = {
- update: { update: 1 }
+ update: {update: 1}
};
function setup(db, collName, cluster) {
diff --git a/jstests/concurrency/fsm_workloads/update_rename_noindex.js b/jstests/concurrency/fsm_workloads/update_rename_noindex.js
index bbf19227865..0bcb0cd9145 100644
--- a/jstests/concurrency/fsm_workloads/update_rename_noindex.js
+++ b/jstests/concurrency/fsm_workloads/update_rename_noindex.js
@@ -6,8 +6,8 @@
* Executes the update_rename.js workload after dropping all non-_id indexes on
* the collection.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/update_rename.js'); // for $config
-load('jstests/concurrency/fsm_workload_modifiers/drop_all_indexes.js'); // for dropAllIndexes
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/update_rename.js'); // for $config
+load('jstests/concurrency/fsm_workload_modifiers/drop_all_indexes.js'); // for dropAllIndexes
var $config = extendWorkload($config, dropAllIndexes);
diff --git a/jstests/concurrency/fsm_workloads/update_replace.js b/jstests/concurrency/fsm_workloads/update_replace.js
index bc183d01fbb..b9d4cf75380 100644
--- a/jstests/concurrency/fsm_workloads/update_replace.js
+++ b/jstests/concurrency/fsm_workloads/update_replace.js
@@ -6,7 +6,7 @@
* Does updates that replace an entire document.
* The collection has indexes on some but not all fields.
*/
-load('jstests/concurrency/fsm_workload_helpers/server_types.js'); // for isMongod and isMMAPv1
+load('jstests/concurrency/fsm_workload_helpers/server_types.js'); // for isMongod and isMMAPv1
var $config = (function() {
@@ -34,11 +34,7 @@ var $config = (function() {
// returns an update doc
function getRandomUpdateDoc() {
- var choices = [
- {},
- { x: 1, y: 1, z: 1 },
- { a: 1, b: 1, c: 1 }
- ];
+ var choices = [{}, {x: 1, y: 1, z: 1}, {a: 1, b: 1, c: 1}];
return choices[Random.randInt(choices.length)];
}
@@ -51,30 +47,30 @@ var $config = (function() {
var updateDoc = getRandomUpdateDoc();
// apply the update
- var res = db[collName].update({ _id: docIndex }, updateDoc);
+ var res = db[collName].update({_id: docIndex}, updateDoc);
assertResult(db, res);
}
};
var transitions = {
- update: { update: 1 }
+ update: {update: 1}
};
function setup(db, collName, cluster) {
- assertAlways.commandWorked(db[collName].ensureIndex({ a: 1 }));
- assertAlways.commandWorked(db[collName].ensureIndex({ b: 1 }));
+ assertAlways.commandWorked(db[collName].ensureIndex({a: 1}));
+ assertAlways.commandWorked(db[collName].ensureIndex({b: 1}));
// no index on c
- assertAlways.commandWorked(db[collName].ensureIndex({ x: 1 }));
- assertAlways.commandWorked(db[collName].ensureIndex({ y: 1 }));
+ assertAlways.commandWorked(db[collName].ensureIndex({x: 1}));
+ assertAlways.commandWorked(db[collName].ensureIndex({y: 1}));
// no index on z
// numDocs should be much less than threadCount, to make more threads use the same docs.
- this.numDocs = Math.floor(this.threadCount / 3);
+ this.numDocs = Math.floor(this.threadCount / 3);
assertAlways.gt(this.numDocs, 0, 'numDocs should be a positive number');
for (var i = 0; i < this.numDocs; ++i) {
- var res = db[collName].insert({ _id: i });
+ var res = db[collName].insert({_id: i});
assertWhenOwnColl.writeOK(res);
assertWhenOwnColl.eq(1, res.nInserted);
}
diff --git a/jstests/concurrency/fsm_workloads/update_replace_noindex.js b/jstests/concurrency/fsm_workloads/update_replace_noindex.js
index 590326a8edc..a10323fb455 100644
--- a/jstests/concurrency/fsm_workloads/update_replace_noindex.js
+++ b/jstests/concurrency/fsm_workloads/update_replace_noindex.js
@@ -6,8 +6,8 @@
* Executes the update_replace.js workload after dropping all non-_id indexes
* on the collection.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/update_replace.js'); // for $config
-load('jstests/concurrency/fsm_workload_modifiers/drop_all_indexes.js'); // for dropAllIndexes
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/update_replace.js'); // for $config
+load('jstests/concurrency/fsm_workload_modifiers/drop_all_indexes.js'); // for dropAllIndexes
var $config = extendWorkload($config, dropAllIndexes);
diff --git a/jstests/concurrency/fsm_workloads/update_simple.js b/jstests/concurrency/fsm_workloads/update_simple.js
index 7e3c00de390..ae694ace309 100644
--- a/jstests/concurrency/fsm_workloads/update_simple.js
+++ b/jstests/concurrency/fsm_workloads/update_simple.js
@@ -8,7 +8,7 @@
* - whether to $set or $unset its field
* - what value to $set the field to
*/
-load('jstests/concurrency/fsm_workload_helpers/server_types.js'); // for isMongod and isMMAPv1
+load('jstests/concurrency/fsm_workload_helpers/server_types.js'); // for isMongod and isMMAPv1
var $config = (function() {
@@ -23,19 +23,13 @@ var $config = (function() {
};
var transitions = {
- set: {
- set: 0.5,
- unset: 0.5
- },
- unset: {
- set: 0.5,
- unset: 0.5
- }
+ set: {set: 0.5, unset: 0.5},
+ unset: {set: 0.5, unset: 0.5}
};
function setup(db, collName, cluster) {
// index on 'value', the field being updated
- assertAlways.commandWorked(db[collName].ensureIndex({ value: 1 }));
+ assertAlways.commandWorked(db[collName].ensureIndex({value: 1}));
// numDocs should be much less than threadCount, to make more threads use the same docs.
this.numDocs = Math.floor(this.threadCount / 5);
@@ -44,7 +38,7 @@ var $config = (function() {
for (var i = 0; i < this.numDocs; ++i) {
// make sure the inserted docs have a 'value' field, so they won't need
// to grow when this workload runs against a capped collection
- var res = db[collName].insert({ _id: i, value: 0 });
+ var res = db[collName].insert({_id: i, value: 0});
assertWhenOwnColl.writeOK(res);
assertWhenOwnColl.eq(1, res.nInserted);
}
@@ -65,8 +59,7 @@ var $config = (function() {
// For non-mmap storage engines we can have a strong assertion that exactly one
// doc will be modified.
assertWhenOwnColl.eq(res.nMatched, 1, tojson(res));
- }
- else {
+ } else {
// Zero matches are possible for MMAP v1 because the update will skip a document
// that was invalidated during a yield.
assertWhenOwnColl.contains(res.nMatched, [0, 1], tojson(res));
@@ -85,9 +78,13 @@ var $config = (function() {
var value = Random.randInt(5);
var updater = {};
- updater[set ? '$set' : '$unset'] = { value: value };
+ updater[set ? '$set' : '$unset'] = {
+ value: value
+ };
- var query = { _id: docIndex };
+ var query = {
+ _id: docIndex
+ };
var res = this.doUpdate(db, collName, query, updater);
this.assertResult(db, res);
},
diff --git a/jstests/concurrency/fsm_workloads/update_simple_eval.js b/jstests/concurrency/fsm_workloads/update_simple_eval.js
index b0f0897a3eb..cf2b10f897a 100644
--- a/jstests/concurrency/fsm_workloads/update_simple_eval.js
+++ b/jstests/concurrency/fsm_workloads/update_simple_eval.js
@@ -9,25 +9,27 @@
* - what value to $set the field to
* and then applies the update using db.runCommand({ eval: ... })
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/update_simple.js'); // for $config
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/update_simple.js'); // for $config
-var $config = extendWorkload($config, function($config, $super) {
+var $config =
+ extendWorkload($config,
+ function($config, $super) {
- $config.data.doUpdate = function doUpdate(db, collName, query, updater) {
- var evalResult = db.runCommand({
- eval: function(f, collName, query, updater) {
- return tojson(f(db, collName, query, updater));
- },
- args: [$super.data.doUpdate, collName, query, updater],
- nolock: this.nolock
- });
- assertAlways.commandWorked(evalResult);
- var res = JSON.parse(evalResult.retval);
- return res;
- };
+ $config.data.doUpdate = function doUpdate(db, collName, query, updater) {
+ var evalResult = db.runCommand({
+ eval: function(f, collName, query, updater) {
+ return tojson(f(db, collName, query, updater));
+ },
+ args: [$super.data.doUpdate, collName, query, updater],
+ nolock: this.nolock
+ });
+ assertAlways.commandWorked(evalResult);
+ var res = JSON.parse(evalResult.retval);
+ return res;
+ };
- $config.data.nolock = false;
+ $config.data.nolock = false;
- return $config;
-});
+ return $config;
+ });
diff --git a/jstests/concurrency/fsm_workloads/update_simple_eval_nolock.js b/jstests/concurrency/fsm_workloads/update_simple_eval_nolock.js
index 0d89e509751..87e24965a7a 100644
--- a/jstests/concurrency/fsm_workloads/update_simple_eval_nolock.js
+++ b/jstests/concurrency/fsm_workloads/update_simple_eval_nolock.js
@@ -5,12 +5,13 @@
*
* Runs update_simple_eval with the eval option { nolock: true }.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/update_simple_eval.js'); // for $config
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/update_simple_eval.js'); // for $config
-var $config = extendWorkload($config, function($config, $super) {
+var $config = extendWorkload($config,
+ function($config, $super) {
- $config.data.nolock = true;
+ $config.data.nolock = true;
- return $config;
-});
+ return $config;
+ });
diff --git a/jstests/concurrency/fsm_workloads/update_simple_noindex.js b/jstests/concurrency/fsm_workloads/update_simple_noindex.js
index b39c71f4266..65bad2855ab 100644
--- a/jstests/concurrency/fsm_workloads/update_simple_noindex.js
+++ b/jstests/concurrency/fsm_workloads/update_simple_noindex.js
@@ -6,8 +6,8 @@
* Executes the update_simple.js workload after dropping all non-_id indexes on
* the collection.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/update_simple.js'); // for $config
-load('jstests/concurrency/fsm_workload_modifiers/drop_all_indexes.js'); // for dropAllIndexes
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/update_simple.js'); // for $config
+load('jstests/concurrency/fsm_workload_modifiers/drop_all_indexes.js'); // for dropAllIndexes
var $config = extendWorkload($config, dropAllIndexes);
diff --git a/jstests/concurrency/fsm_workloads/update_upsert_multi.js b/jstests/concurrency/fsm_workloads/update_upsert_multi.js
index 63aed616bc3..96d83cb5115 100644
--- a/jstests/concurrency/fsm_workloads/update_upsert_multi.js
+++ b/jstests/concurrency/fsm_workloads/update_upsert_multi.js
@@ -16,11 +16,10 @@ var $config = (function() {
var query, update, options;
var res = db[collName].update(
// The counter ensures that the query will not match any existing document.
- query = { tid: this.tid, i: this.counter++ },
- update = { $inc: { n: 1 } },
- options = { multi: true, upsert: true }
- );
- var debugDoc = tojson({ query: query, update: update, options: options, result: res });
+ query = {tid: this.tid, i: this.counter++},
+ update = {$inc: {n: 1}},
+ options = {multi: true, upsert: true});
+ var debugDoc = tojson({query: query, update: update, options: options, result: res});
assertWhenOwnColl.eq(1, res.nUpserted, debugDoc);
assertWhenOwnColl.eq(0, res.nMatched, debugDoc);
if (db.getMongo().writeMode() === 'commands') {
@@ -32,10 +31,9 @@ var $config = (function() {
var res = db[collName].update(
// This query will match an existing document, since the 'insert' state
// always runs first.
- { tid: this.tid },
- { $inc: { n: 1 } },
- { multi: true, upsert: true }
- );
+ {tid: this.tid},
+ {$inc: {n: 1}},
+ {multi: true, upsert: true});
assertWhenOwnColl.eq(0, res.nUpserted, tojson(res));
assertWhenOwnColl.lte(1, res.nMatched, tojson(res));
@@ -53,21 +51,24 @@ var $config = (function() {
// because docs with lower i are newer, so they have had fewer
// opportunities to have n incremented.)
var prevN = Infinity;
- db[collName].find({ tid: this.tid }).sort({ i: 1 }).forEach(function(doc) {
- assertWhenOwnColl.gte(prevN, doc.n);
- prevN = doc.n;
- });
+ db[collName]
+ .find({tid: this.tid})
+ .sort({i: 1})
+ .forEach(function(doc) {
+ assertWhenOwnColl.gte(prevN, doc.n);
+ prevN = doc.n;
+ });
}
};
var transitions = {
- insert: { update: 0.875, assertConsistency: 0.125 },
- update: { insert: 0.875, assertConsistency: 0.125 },
- assertConsistency: { insert: 0.5, update: 0.5 }
+ insert: {update: 0.875, assertConsistency: 0.125},
+ update: {insert: 0.875, assertConsistency: 0.125},
+ assertConsistency: {insert: 0.5, update: 0.5}
};
function setup(db, collName, cluster) {
- assertAlways.commandWorked(db[collName].ensureIndex({ tid: 1, i: 1 }));
+ assertAlways.commandWorked(db[collName].ensureIndex({tid: 1, i: 1}));
}
return {
@@ -76,7 +77,7 @@ var $config = (function() {
states: states,
startState: 'insert',
transitions: transitions,
- data: { counter: 0 },
+ data: {counter: 0},
setup: setup
};
diff --git a/jstests/concurrency/fsm_workloads/update_upsert_multi_noindex.js b/jstests/concurrency/fsm_workloads/update_upsert_multi_noindex.js
index a463c6ba17d..14b6c02d61f 100644
--- a/jstests/concurrency/fsm_workloads/update_upsert_multi_noindex.js
+++ b/jstests/concurrency/fsm_workloads/update_upsert_multi_noindex.js
@@ -6,8 +6,8 @@
* Executes the update_upsert_multi.js workload after dropping all non-_id
* indexes on the collection.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/update_upsert_multi.js'); // for $config
-load('jstests/concurrency/fsm_workload_modifiers/drop_all_indexes.js'); // for dropAllIndexes
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/update_upsert_multi.js'); // for $config
+load('jstests/concurrency/fsm_workload_modifiers/drop_all_indexes.js'); // for dropAllIndexes
var $config = extendWorkload($config, dropAllIndexes);
diff --git a/jstests/concurrency/fsm_workloads/update_where.js b/jstests/concurrency/fsm_workloads/update_where.js
index befc8cde972..ac0bb893160 100644
--- a/jstests/concurrency/fsm_workloads/update_where.js
+++ b/jstests/concurrency/fsm_workloads/update_where.js
@@ -3,44 +3,50 @@
/**
* update_where.js
*
- * Bulk inserts documents in batches of 100, randomly selects ~1/10th of documents inserted by the
+ * Bulk inserts documents in batches of 100, randomly selects ~1/10th of documents inserted by the
* thread and updates them. Also queries by the thread that created the documents to verify counts.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/indexed_insert_where.js'); // for $config
-
-var $config = extendWorkload($config, function($config, $super) {
- $config.data.randomBound = 10;
- $config.data.generateDocumentToInsert = function generateDocumentToInsert() {
- return { tid: this.tid, x: Random.randInt(this.randomBound) };
- };
-
- $config.states.update = function update(db, collName) {
- var res = db[collName].update(
- // Server-side JS does not support Random.randInt, so use Math.floor/random instead
- { $where: 'this.x === Math.floor(Math.random() * ' + this.randomBound + ') ' +
- '&& this.tid === ' + this.tid },
- { $set: { x: Random.randInt(this.randomBound) } },
- { multi: true }
- );
- assertAlways.writeOK(res);
-
- if (db.getMongo().writeMode() === 'commands') {
- assertWhenOwnColl.gte(res.nModified, 0);
- assertWhenOwnColl.lte(res.nModified, this.insertedDocuments);
- }
- };
-
- $config.transitions = {
- insert: { insert: 0.2, update: 0.4, query: 0.4 },
- update: { insert: 0.4, update: 0.2, query: 0.4 },
- query: { insert: 0.4, update: 0.4, query: 0.2 }
- };
-
- $config.setup = function setup(db, collName, cluster) {
- /* no-op to prevent index from being created */
- };
-
- return $config;
-});
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/indexed_insert_where.js'); // for $config
+
+var $config = extendWorkload(
+ $config,
+ function($config, $super) {
+ $config.data.randomBound = 10;
+ $config.data.generateDocumentToInsert = function generateDocumentToInsert() {
+ return {
+ tid: this.tid,
+ x: Random.randInt(this.randomBound)
+ };
+ };
+
+ $config.states.update = function update(db, collName) {
+ var res = db[collName].update(
+ // Server-side JS does not support Random.randInt, so use Math.floor/random instead
+ {
+ $where: 'this.x === Math.floor(Math.random() * ' + this.randomBound + ') ' +
+ '&& this.tid === ' + this.tid
+ },
+ {$set: {x: Random.randInt(this.randomBound)}},
+ {multi: true});
+ assertAlways.writeOK(res);
+
+ if (db.getMongo().writeMode() === 'commands') {
+ assertWhenOwnColl.gte(res.nModified, 0);
+ assertWhenOwnColl.lte(res.nModified, this.insertedDocuments);
+ }
+ };
+
+ $config.transitions = {
+ insert: {insert: 0.2, update: 0.4, query: 0.4},
+ update: {insert: 0.4, update: 0.2, query: 0.4},
+ query: {insert: 0.4, update: 0.4, query: 0.2}
+ };
+
+ $config.setup = function setup(db, collName, cluster) {
+ /* no-op to prevent index from being created */
+ };
+
+ return $config;
+ });
diff --git a/jstests/concurrency/fsm_workloads/upsert_where.js b/jstests/concurrency/fsm_workloads/upsert_where.js
index e89aa56d184..35430ccfa2f 100644
--- a/jstests/concurrency/fsm_workloads/upsert_where.js
+++ b/jstests/concurrency/fsm_workloads/upsert_where.js
@@ -6,38 +6,42 @@
* Bulk inserts documents in batches of 100, randomly selects a document that doesn't exist and
* updates it, and queries by the thread that created the documents to verify counts. */
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/indexed_insert_where.js'); // for $config
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/indexed_insert_where.js'); // for $config
-var $config = extendWorkload($config, function($config, $super) {
- $config.data.randomBound = 10;
- $config.data.generateDocumentToInsert = function generateDocumentToInsert() {
- return { tid: this.tid, x: Random.randInt(this.randomBound)};
- };
+var $config = extendWorkload(
+ $config,
+ function($config, $super) {
+ $config.data.randomBound = 10;
+ $config.data.generateDocumentToInsert = function generateDocumentToInsert() {
+ return {
+ tid: this.tid,
+ x: Random.randInt(this.randomBound)
+ };
+ };
- $config.states.upsert = function upsert(db, collName) {
- var res = db[collName].update(
- { $where: 'this.x === ' + this.randomBound + ' && this.tid === ' + this.tid },
- { $set: { x: Random.randInt(this.randomBound), tid: this.tid } },
- { upsert: true }
- );
- assertWhenOwnColl.eq(res.nUpserted, 1);
- var upsertedDocument = db[collName].findOne({ _id: res.getUpsertedId()._id });
- assertWhenOwnColl(function() {
- assertWhenOwnColl.eq(upsertedDocument.tid, this.tid);
- }.bind(this));
- this.insertedDocuments += res.nUpserted;
- };
+ $config.states.upsert = function upsert(db, collName) {
+ var res = db[collName].update(
+ {$where: 'this.x === ' + this.randomBound + ' && this.tid === ' + this.tid},
+ {$set: {x: Random.randInt(this.randomBound), tid: this.tid}},
+ {upsert: true});
+ assertWhenOwnColl.eq(res.nUpserted, 1);
+ var upsertedDocument = db[collName].findOne({_id: res.getUpsertedId()._id});
+ assertWhenOwnColl(function() {
+ assertWhenOwnColl.eq(upsertedDocument.tid, this.tid);
+ }.bind(this));
+ this.insertedDocuments += res.nUpserted;
+ };
- $config.transitions = {
- insert: { insert: 0.2, upsert: 0.4, query: 0.4 },
- upsert: { insert: 0.4, upsert: 0.2, query: 0.4 },
- query: { insert: 0.4, upsert: 0.4, query: 0.2 }
- };
+ $config.transitions = {
+ insert: {insert: 0.2, upsert: 0.4, query: 0.4},
+ upsert: {insert: 0.4, upsert: 0.2, query: 0.4},
+ query: {insert: 0.4, upsert: 0.4, query: 0.2}
+ };
- $config.setup = function setup(db, collName, cluster) {
- /* no-op to prevent index from being created */
- };
+ $config.setup = function setup(db, collName, cluster) {
+ /* no-op to prevent index from being created */
+ };
- return $config;
-});
+ return $config;
+ });
diff --git a/jstests/concurrency/fsm_workloads/yield.js b/jstests/concurrency/fsm_workloads/yield.js
index 369db4a0c85..0ef6aa9b1a0 100644
--- a/jstests/concurrency/fsm_workloads/yield.js
+++ b/jstests/concurrency/fsm_workloads/yield.js
@@ -1,6 +1,6 @@
'use strict';
-load('jstests/concurrency/fsm_workload_helpers/server_types.js'); // for isMongod
+load('jstests/concurrency/fsm_workload_helpers/server_types.js'); // for isMongod
/**
* yield.js
@@ -31,9 +31,9 @@ var $config = (function() {
doc = cursor.next();
assertAlways(verifier(doc, prevDoc),
'Verifier failed!\nQuery: ' + tojson(cursor._query) + '\n' +
- 'Query plan: ' + tojson(cursor.explain()) + '\n' +
- 'Previous doc: ' + tojson(prevDoc) + '\n' +
- 'This doc: ' + tojson(doc));
+ 'Query plan: ' + tojson(cursor.explain()) + '\n' +
+ 'Previous doc: ' + tojson(prevDoc) + '\n' +
+ 'This doc: ' + tojson(doc));
}
assertAlways.eq(cursor.itcount(), 0);
},
@@ -44,7 +44,9 @@ var $config = (function() {
*/
genUpdateDoc: function genUpdateDoc() {
var newVal = Random.randInt(this.nDocs);
- return { $set: { a: newVal } };
+ return {
+ $set: {a: newVal}
+ };
}
};
@@ -54,7 +56,7 @@ var $config = (function() {
*/
update: function update(db, collName) {
var id = Random.randInt(this.nDocs);
- var randDoc = db[collName].findOne({ _id: id });
+ var randDoc = db[collName].findOne({_id: id});
if (randDoc === null) {
return;
}
@@ -68,9 +70,9 @@ var $config = (function() {
*/
remove: function remove(db, collName) {
var id = Random.randInt(this.nDocs);
- var doc = db[collName].findOne({ _id: id });
+ var doc = db[collName].findOne({_id: id});
if (doc !== null) {
- var res = db[collName].remove({ _id: id });
+ var res = db[collName].remove({_id: id});
assertAlways.writeOK(res);
if (res.nRemoved > 0) {
assertAlways.writeOK(db[collName].insert(doc));
@@ -84,8 +86,7 @@ var $config = (function() {
*/
query: function collScan(db, collName) {
var nMatches = 100;
- var cursor = db[collName].find({ a: { $lt: nMatches } })
- .batchSize(this.batchSize);
+ var cursor = db[collName].find({a: {$lt: nMatches}}).batchSize(this.batchSize);
var collScanVerifier = function collScanVerifier(doc, prevDoc) {
return doc.a < nMatches;
};
@@ -110,9 +111,9 @@ var $config = (function() {
*
*/
var transitions = {
- update: { update: 0.334, remove: 0.333, query: 0.333 },
- remove: { update: 0.333, remove: 0.334, query: 0.333 },
- query: { update: 0.333, remove: 0.333, query: 0.334 }
+ update: {update: 0.334, remove: 0.333, query: 0.333},
+ remove: {update: 0.333, remove: 0.334, query: 0.333},
+ query: {update: 0.333, remove: 0.333, query: 0.334}
};
/*
@@ -126,18 +127,15 @@ var $config = (function() {
cluster.executeOnMongodNodes(function enableFailPoint(db) {
assertAlways.commandWorked(
- db.adminCommand({ configureFailPoint: 'recordNeedsFetchFail', mode: 'alwaysOn' })
- );
+ db.adminCommand({configureFailPoint: 'recordNeedsFetchFail', mode: 'alwaysOn'}));
});
// Lower the following parameters to force even more yields.
cluster.executeOnMongodNodes(function lowerYieldParams(db) {
assertAlways.commandWorked(
- db.adminCommand({ setParameter: 1, internalQueryExecYieldIterations: 5 })
- );
+ db.adminCommand({setParameter: 1, internalQueryExecYieldIterations: 5}));
assertAlways.commandWorked(
- db.adminCommand({ setParameter: 1, internalQueryExecYieldPeriodMS: 1 })
- );
+ db.adminCommand({setParameter: 1, internalQueryExecYieldPeriodMS: 1}));
});
// Set up some data to query.
var N = this.nDocs;
@@ -145,9 +143,8 @@ var $config = (function() {
for (var i = 0; i < N; i++) {
// Give each doc some word of text
var word = this.words[i % this.words.length];
- bulk.find({ _id: i }).upsert().updateOne(
- { $set: { a: i, b: N - i, c: i, d: N - i, yield_text: word } }
- );
+ bulk.find({_id: i}).upsert().updateOne(
+ {$set: {a: i, b: N - i, c: i, d: N - i, yield_text: word}});
}
assertAlways.writeOK(bulk.execute());
}
@@ -158,16 +155,13 @@ var $config = (function() {
function teardown(db, collName, cluster) {
cluster.executeOnMongodNodes(function disableFailPoint(db) {
assertAlways.commandWorked(
- db.adminCommand({ configureFailPoint: 'recordNeedsFetchFail', mode: 'off' })
- );
+ db.adminCommand({configureFailPoint: 'recordNeedsFetchFail', mode: 'off'}));
});
cluster.executeOnMongodNodes(function resetYieldParams(db) {
assertAlways.commandWorked(
- db.adminCommand({ setParameter: 1, internalQueryExecYieldIterations: 128 })
- );
+ db.adminCommand({setParameter: 1, internalQueryExecYieldIterations: 128}));
assertAlways.commandWorked(
- db.adminCommand({ setParameter: 1, internalQueryExecYieldPeriodMS: 10 })
- );
+ db.adminCommand({setParameter: 1, internalQueryExecYieldPeriodMS: 10}));
});
}
diff --git a/jstests/concurrency/fsm_workloads/yield_and_hashed.js b/jstests/concurrency/fsm_workloads/yield_and_hashed.js
index aa53a354209..d0eef4c8d4f 100644
--- a/jstests/concurrency/fsm_workloads/yield_and_hashed.js
+++ b/jstests/concurrency/fsm_workloads/yield_and_hashed.js
@@ -6,45 +6,70 @@
* Intersperse queries which use the AND_HASH stage with updates and deletes of documents they may
* match.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/yield_rooted_or.js'); // for $config
-
-var $config = extendWorkload($config, function($config, $super) {
-
- /*
- * Issue a query that will use the AND_HASH stage. This is a little tricky, so use
- * stagedebug to force it to happen. Unfortunately this means it can't be batched.
- */
- $config.states.query = function andHash(db, collName) {
- var nMatches = 100;
- assertAlways.lte(nMatches, this.nDocs);
- // Construct the query plan: two ixscans under an andHashed.
- // Scan c <= nMatches
- var ixscan1 = { ixscan: { args: { name: 'stages_and_hashed', keyPattern: { c: 1 },
- startKey: { '': nMatches }, endKey: {},
- endKeyInclusive: true, direction: -1 } } };
-
- // Scan d >= this.nDocs - nMatches
- var ixscan2 = { ixscan: { args: { name: 'stages_and_hashed', keyPattern: { d: 1 },
- startKey: { '': this.nDocs - nMatches }, endKey: {},
- endKeyInclusive: true, direction: 1 } } };
-
- var andix1ix2 = { andHash: { args: { nodes: [ixscan1, ixscan2] } } };
-
- // On non-MMAP storage engines, index intersection plans will always re-filter
- // the docs to make sure we don't get any spurious matches.
- var fetch = { fetch: { filter: { c: { $lte: nMatches },
- d: { $gte: (this.nDocs - nMatches) } },
- args: { node: andix1ix2 } } };
-
- var res = db.runCommand({ stageDebug: { plan: fetch, collection: collName } });
- assertAlways.commandWorked(res);
- for (var i = 0; i < res.results.length; i++) {
- var result = res.results[i];
- assertAlways.lte(result.c, nMatches);
- assertAlways.gte(result.d, this.nDocs - nMatches);
- }
- };
-
- return $config;
-});
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/yield_rooted_or.js'); // for $config
+
+var $config = extendWorkload(
+ $config,
+ function($config, $super) {
+
+ /*
+ * Issue a query that will use the AND_HASH stage. This is a little tricky, so use
+ * stagedebug to force it to happen. Unfortunately this means it can't be batched.
+ */
+ $config.states.query = function andHash(db, collName) {
+ var nMatches = 100;
+ assertAlways.lte(nMatches, this.nDocs);
+ // Construct the query plan: two ixscans under an andHashed.
+ // Scan c <= nMatches
+ var ixscan1 = {
+ ixscan: {
+ args: {
+ name: 'stages_and_hashed',
+ keyPattern: {c: 1},
+ startKey: {'': nMatches},
+ endKey: {},
+ endKeyInclusive: true,
+ direction: -1
+ }
+ }
+ };
+
+ // Scan d >= this.nDocs - nMatches
+ var ixscan2 = {
+ ixscan: {
+ args: {
+ name: 'stages_and_hashed',
+ keyPattern: {d: 1},
+ startKey: {'': this.nDocs - nMatches},
+ endKey: {},
+ endKeyInclusive: true,
+ direction: 1
+ }
+ }
+ };
+
+ var andix1ix2 = {
+ andHash: {args: {nodes: [ixscan1, ixscan2]}}
+ };
+
+ // On non-MMAP storage engines, index intersection plans will always re-filter
+ // the docs to make sure we don't get any spurious matches.
+ var fetch = {
+ fetch: {
+ filter: {c: {$lte: nMatches}, d: {$gte: (this.nDocs - nMatches)}},
+ args: {node: andix1ix2}
+ }
+ };
+
+ var res = db.runCommand({stageDebug: {plan: fetch, collection: collName}});
+ assertAlways.commandWorked(res);
+ for (var i = 0; i < res.results.length; i++) {
+ var result = res.results[i];
+ assertAlways.lte(result.c, nMatches);
+ assertAlways.gte(result.d, this.nDocs - nMatches);
+ }
+ };
+
+ return $config;
+ });
diff --git a/jstests/concurrency/fsm_workloads/yield_and_sorted.js b/jstests/concurrency/fsm_workloads/yield_and_sorted.js
index 11ef5f9b089..42bd94b4acd 100644
--- a/jstests/concurrency/fsm_workloads/yield_and_sorted.js
+++ b/jstests/concurrency/fsm_workloads/yield_and_sorted.js
@@ -6,43 +6,65 @@
* Intersperse queries which use the AND_SORTED stage with updates and deletes of documents they
* may match.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/yield_rooted_or.js'); // for $config
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/yield_rooted_or.js'); // for $config
-var $config = extendWorkload($config, function($config, $super) {
+var $config = extendWorkload(
+ $config,
+ function($config, $super) {
- /*
- * Issue a query that will use the AND_SORTED stage. This is a little tricky, so use
- * stagedebug to force it to happen. Unfortunately this means it can't be batched.
- */
- $config.states.query = function andSorted(db, collName) {
- // Not very many docs returned in this, so loop to increase chances of yielding in the
- // middle.
- for (var i = 0; i < 100; i++) {
- // Construct the query plan: two ixscans under an andSorted.
- // Scan a == 0
- var ixscan1 = { ixscan: { args: { name: 'stages_and_sorted', keyPattern: { c: 1 },
- startKey: { '': 0 }, endKey: { '': 0 },
- endKeyInclusive: false, direction: 1 } } };
- // Scan b == this.nDocs
- var ixscan2 = { ixscan: { args: { name: 'stages_and_sorted', keyPattern: { d: 1 },
- startKey: { '': this.nDocs },
- endKey: { '': this.nDocs },
- endKeyInclusive: false, direction: -1 } } };
+ /*
+ * Issue a query that will use the AND_SORTED stage. This is a little tricky, so use
+ * stagedebug to force it to happen. Unfortunately this means it can't be batched.
+ */
+ $config.states.query = function andSorted(db, collName) {
+ // Not very many docs returned in this, so loop to increase chances of yielding in the
+ // middle.
+ for (var i = 0; i < 100; i++) {
+ // Construct the query plan: two ixscans under an andSorted.
+ // Scan a == 0
+ var ixscan1 = {
+ ixscan: {
+ args: {
+ name: 'stages_and_sorted',
+ keyPattern: {c: 1},
+ startKey: {'': 0},
+ endKey: {'': 0},
+ endKeyInclusive: false,
+ direction: 1
+ }
+ }
+ };
+ // Scan b == this.nDocs
+ var ixscan2 = {
+ ixscan: {
+ args: {
+ name: 'stages_and_sorted',
+ keyPattern: {d: 1},
+ startKey: {'': this.nDocs},
+ endKey: {'': this.nDocs},
+ endKeyInclusive: false,
+ direction: -1
+ }
+ }
+ };
- // Intersect the two
- var andix1ix2 = { andSorted: { args: { nodes: [ixscan1, ixscan2] } } };
- var res = db.runCommand({ stageDebug: { collection: collName, plan: andix1ix2 } });
- assertAlways.commandWorked(res);
- for (var j = 0; j < res.results.length; j++) {
- var result = res.results[j];
- // These should always be true, since they're just verifying that the results match
- // the query predicate.
- assertAlways.eq(result.c, 0);
- assertAlways.eq(result.d, this.nDocs);
+ // Intersect the two
+ var andix1ix2 = {
+ andSorted: {args: {nodes: [ixscan1, ixscan2]}}
+ };
+ var res = db.runCommand({stageDebug: {collection: collName, plan: andix1ix2}});
+ assertAlways.commandWorked(res);
+ for (var j = 0; j < res.results.length; j++) {
+ var result = res.results[j];
+ // These should always be true, since they're just verifying that the results
+ // match
+ // the query predicate.
+ assertAlways.eq(result.c, 0);
+ assertAlways.eq(result.d, this.nDocs);
+ }
}
- }
- };
+ };
- return $config;
-});
+ return $config;
+ });
diff --git a/jstests/concurrency/fsm_workloads/yield_fetch.js b/jstests/concurrency/fsm_workloads/yield_fetch.js
index ddec0529e50..0e1073f774a 100644
--- a/jstests/concurrency/fsm_workloads/yield_fetch.js
+++ b/jstests/concurrency/fsm_workloads/yield_fetch.js
@@ -6,26 +6,27 @@
* Intersperse queries which use the FETCH stage with updates and deletes of documents they may
* match.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/yield_rooted_or.js'); // for $config
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/yield_rooted_or.js'); // for $config
-var $config = extendWorkload($config, function($config, $super) {
+var $config = extendWorkload(
+ $config,
+ function($config, $super) {
- /*
- * Issue a query that will use the FETCH stage.
- */
- $config.states.query = function fetch(db, collName) {
- var nMatches = 100;
+ /*
+ * Issue a query that will use the FETCH stage.
+ */
+ $config.states.query = function fetch(db, collName) {
+ var nMatches = 100;
- var cursor = db[collName].find({ c: { $lt: nMatches } })
- .batchSize(this.batchSize);
+ var cursor = db[collName].find({c: {$lt: nMatches}}).batchSize(this.batchSize);
- var verifier = function fetchVerifier(doc, prevDoc) {
- return doc.c < nMatches;
- };
+ var verifier = function fetchVerifier(doc, prevDoc) {
+ return doc.c < nMatches;
+ };
- this.advanceCursor(cursor, verifier);
- };
+ this.advanceCursor(cursor, verifier);
+ };
- return $config;
-});
+ return $config;
+ });
diff --git a/jstests/concurrency/fsm_workloads/yield_geo_near.js b/jstests/concurrency/fsm_workloads/yield_geo_near.js
index b75c77d77a1..324c384636e 100644
--- a/jstests/concurrency/fsm_workloads/yield_geo_near.js
+++ b/jstests/concurrency/fsm_workloads/yield_geo_near.js
@@ -5,72 +5,83 @@
*
* Intersperse geo $near queries with updates and deletes of documents they may match.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/yield.js'); // for $config
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/yield.js'); // for $config
-var $config = extendWorkload($config, function($config, $super) {
+var $config = extendWorkload(
+ $config,
+ function($config, $super) {
- /*
- * Use geo $near query to find points near the origin. Note this should be done using the
- * geoNear command, rather than a $near query, as the $near query doesn't work in a sharded
- * environment. Unfortunately this means we cannot batch the request.
- */
- $config.states.query = function geoNear(db, collName) {
- // This distance gets about 80 docs around the origin. There is one doc inserted
- // every 1m^2 and the area scanned by a 5m radius is PI*(5m)^2 ~ 79.
- var maxDistance = 5;
+ /*
+ * Use geo $near query to find points near the origin. Note this should be done using the
+ * geoNear command, rather than a $near query, as the $near query doesn't work in a sharded
+ * environment. Unfortunately this means we cannot batch the request.
+ */
+ $config.states.query = function geoNear(db, collName) {
+ // This distance gets about 80 docs around the origin. There is one doc inserted
+ // every 1m^2 and the area scanned by a 5m radius is PI*(5m)^2 ~ 79.
+ var maxDistance = 5;
- var res = db.runCommand({ geoNear: collName, near: [0, 0], maxDistance: maxDistance });
- assertWhenOwnColl.commandWorked(res); // Could fail if more than 1 2d index.
- assertWhenOwnColl(function verifyResults() {
- var results = res.results;
- var prevDoc = { dis: 0 }; // distance should never be less than 0
- for (var i = 0; i < results.length; i++) {
- var doc = results[i];
- assertAlways.lte(NumberInt(doc.dis), maxDistance); // satisfies query
- assertAlways.lte(prevDoc.dis, doc.dis); // returned in the correct order
- prevDoc = doc;
- }
- });
- };
+ var res = db.runCommand({geoNear: collName, near: [0, 0], maxDistance: maxDistance});
+ assertWhenOwnColl.commandWorked(res); // Could fail if more than 1 2d index.
+ assertWhenOwnColl(function verifyResults() {
+ var results = res.results;
+ var prevDoc = {
+ dis: 0
+ }; // distance should never be less than 0
+ for (var i = 0; i < results.length; i++) {
+ var doc = results[i];
+ assertAlways.lte(NumberInt(doc.dis), maxDistance); // satisfies query
+ assertAlways.lte(prevDoc.dis, doc.dis); // returned in the correct order
+ prevDoc = doc;
+ }
+ });
+ };
- $config.data.genUpdateDoc = function genUpdateDoc() {
- var P = Math.floor(Math.sqrt(this.nDocs));
+ $config.data.genUpdateDoc = function genUpdateDoc() {
+ var P = Math.floor(Math.sqrt(this.nDocs));
- // Move the point to another location within the PxP grid.
- var newX = Random.randInt(P) - P/2;
- var newY = Random.randInt(P) - P/2;
- return { $set: { geo: [newX, newY] } };
- };
+ // Move the point to another location within the PxP grid.
+ var newX = Random.randInt(P) - P / 2;
+ var newY = Random.randInt(P) - P / 2;
+ return {
+ $set: {geo: [newX, newY]}
+ };
+ };
- $config.data.getIndexSpec = function getIndexSpec() {
- return { geo: '2d' };
- };
+ $config.data.getIndexSpec = function getIndexSpec() {
+ return {
+ geo: '2d'
+ };
+ };
- $config.data.getReplaceSpec = function getReplaceSpec(i, coords) {
- return { _id: i, geo: coords };
- };
+ $config.data.getReplaceSpec = function getReplaceSpec(i, coords) {
+ return {
+ _id: i,
+ geo: coords
+ };
+ };
- /*
- * Insert some docs in geo form and make a 2d index.
- */
- $config.setup = function setup(db, collName, cluster) {
- $super.setup.apply(this, arguments);
+ /*
+ * Insert some docs in geo form and make a 2d index.
+ */
+ $config.setup = function setup(db, collName, cluster) {
+ $super.setup.apply(this, arguments);
- var P = Math.floor(Math.sqrt(this.nDocs));
- var i = 0;
- // Set up some points to query (in a PxP grid around 0,0).
- var bulk = db[collName].initializeUnorderedBulkOp();
- for (var x = 0; x < P; x++) {
- for (var y = 0; y < P; y++) {
- var coords = [x - P/2, y - P/2];
- bulk.find({ _id: i }).upsert().replaceOne(this.getReplaceSpec(i, coords));
- i++;
+ var P = Math.floor(Math.sqrt(this.nDocs));
+ var i = 0;
+ // Set up some points to query (in a PxP grid around 0,0).
+ var bulk = db[collName].initializeUnorderedBulkOp();
+ for (var x = 0; x < P; x++) {
+ for (var y = 0; y < P; y++) {
+ var coords = [x - P / 2, y - P / 2];
+ bulk.find({_id: i}).upsert().replaceOne(this.getReplaceSpec(i, coords));
+ i++;
+ }
}
- }
- assertAlways.writeOK(bulk.execute());
- assertAlways.commandWorked(db[collName].ensureIndex(this.getIndexSpec()));
- };
+ assertAlways.writeOK(bulk.execute());
+ assertAlways.commandWorked(db[collName].ensureIndex(this.getIndexSpec()));
+ };
- return $config;
-});
+ return $config;
+ });
diff --git a/jstests/concurrency/fsm_workloads/yield_geo_near_dedup.js b/jstests/concurrency/fsm_workloads/yield_geo_near_dedup.js
index 6e9d70177b7..e2a63f8c546 100644
--- a/jstests/concurrency/fsm_workloads/yield_geo_near_dedup.js
+++ b/jstests/concurrency/fsm_workloads/yield_geo_near_dedup.js
@@ -5,79 +5,95 @@
*
* Intersperse geo $near queries with updates of non-geo fields to test deduplication.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/yield_geo_near.js'); // for $config
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/yield_geo_near.js'); // for $config
-var $config = extendWorkload($config, function($config, $super) {
+var $config = extendWorkload(
+ $config,
+ function($config, $super) {
- $config.states.remove = function remove(db, collName) {
- var id = Random.randInt(this.nDocs);
- var doc = db[collName].findOne({ _id: id });
- if (doc !== null) {
- var res = db[collName].remove({ _id: id });
- assertAlways.writeOK(res);
- if (res.nRemoved > 0) {
- // Re-insert the document with the same '_id', but an incremented 'timesInserted' to
- // distinguish it from the deleted document.
- doc.timesInserted++;
- assertAlways.writeOK(db[collName].insert(doc));
+ $config.states.remove = function remove(db, collName) {
+ var id = Random.randInt(this.nDocs);
+ var doc = db[collName].findOne({_id: id});
+ if (doc !== null) {
+ var res = db[collName].remove({_id: id});
+ assertAlways.writeOK(res);
+ if (res.nRemoved > 0) {
+ // Re-insert the document with the same '_id', but an incremented
+ // 'timesInserted' to
+ // distinguish it from the deleted document.
+ doc.timesInserted++;
+ assertAlways.writeOK(db[collName].insert(doc));
+ }
}
- }
- };
+ };
- /*
- * Use geo $nearSphere query to find points near the origin. Note this should be done using the
- * geoNear command, rather than a $nearSphere query, as the $nearSphere query doesn't work in a
- * sharded environment. Unfortunately this means we cannot batch the request.
- *
- * Only points are covered in this test as there is no guarantee that geometries indexed in
- * multiple cells will be deduplicated correctly with interspersed updates. If multiple index
- * cells for the same geometry occur in the same search interval, an update may cause geoNear
- * to return the same document multiple times.
- */
- $config.states.query = function geoNear(db, collName) {
- // This distance gets about 80 docs around the origin. There is one doc inserted
- // every 1m^2 and the area scanned by a 5m radius is PI*(5m)^2 ~ 79.
- var maxDistance = 5;
+ /*
+ * Use geo $nearSphere query to find points near the origin. Note this should be done using
+ *the
+ * geoNear command, rather than a $nearSphere query, as the $nearSphere query doesn't work
+ *in a
+ * sharded environment. Unfortunately this means we cannot batch the request.
+ *
+ * Only points are covered in this test as there is no guarantee that geometries indexed in
+ * multiple cells will be deduplicated correctly with interspersed updates. If multiple
+ *index
+ * cells for the same geometry occur in the same search interval, an update may cause
+ *geoNear
+ * to return the same document multiple times.
+ */
+ $config.states.query = function geoNear(db, collName) {
+ // This distance gets about 80 docs around the origin. There is one doc inserted
+ // every 1m^2 and the area scanned by a 5m radius is PI*(5m)^2 ~ 79.
+ var maxDistance = 5;
- var res = db.runCommand({
- geoNear: collName,
- near: [0, 0],
- maxDistance: maxDistance,
- spherical: true
- });
- assertWhenOwnColl.commandWorked(res);
- assertWhenOwnColl(function verifyResults() {
- var results = res.results;
- var seenObjs = [];
- for (var i = 0; i < results.length; i++) {
- var doc = results[i].obj;
+ var res = db.runCommand(
+ {geoNear: collName, near: [0, 0], maxDistance: maxDistance, spherical: true});
+ assertWhenOwnColl.commandWorked(res);
+ assertWhenOwnColl(function verifyResults() {
+ var results = res.results;
+ var seenObjs = [];
+ for (var i = 0; i < results.length; i++) {
+ var doc = results[i].obj;
- // The pair (_id, timesInserted) is the smallest set of attributes that uniquely
- // identifies a document.
- var objToSearchFor = { _id: doc._id, timesInserted: doc.timesInserted };
- var found = seenObjs.some(function(obj) {
- return bsonWoCompare(obj, objToSearchFor) === 0;
- });
- assertWhenOwnColl(!found, 'geoNear command returned the document ' + tojson(doc) +
- ' multiple times: ' + tojson(seenObjs));
- seenObjs.push(objToSearchFor);
- }
- });
- };
+ // The pair (_id, timesInserted) is the smallest set of attributes that uniquely
+ // identifies a document.
+ var objToSearchFor = {
+ _id: doc._id,
+ timesInserted: doc.timesInserted
+ };
+ var found = seenObjs.some(function(obj) {
+ return bsonWoCompare(obj, objToSearchFor) === 0;
+ });
+ assertWhenOwnColl(!found,
+ 'geoNear command returned the document ' + tojson(doc) +
+ ' multiple times: ' + tojson(seenObjs));
+ seenObjs.push(objToSearchFor);
+ }
+ });
+ };
- $config.data.genUpdateDoc = function genUpdateDoc() {
- // Attempts to perform an in-place update to trigger an invalidation on MMAP v1.
- return { $inc: { timesUpdated: 1 } };
- };
+ $config.data.genUpdateDoc = function genUpdateDoc() {
+ // Attempts to perform an in-place update to trigger an invalidation on MMAP v1.
+ return {
+ $inc: {timesUpdated: 1}
+ };
+ };
- $config.data.getIndexSpec = function getIndexSpec() {
- return { geo: '2dsphere' };
- };
+ $config.data.getIndexSpec = function getIndexSpec() {
+ return {
+ geo: '2dsphere'
+ };
+ };
- $config.data.getReplaceSpec = function getReplaceSpec(i, coords) {
- return { _id: i, geo: coords, timesUpdated: 0, timesInserted: 0 };
- };
+ $config.data.getReplaceSpec = function getReplaceSpec(i, coords) {
+ return {
+ _id: i,
+ geo: coords,
+ timesUpdated: 0,
+ timesInserted: 0
+ };
+ };
- return $config;
-});
+ return $config;
+ });
diff --git a/jstests/concurrency/fsm_workloads/yield_id_hack.js b/jstests/concurrency/fsm_workloads/yield_id_hack.js
index ef43bbed49b..81a5acbb0fd 100644
--- a/jstests/concurrency/fsm_workloads/yield_id_hack.js
+++ b/jstests/concurrency/fsm_workloads/yield_id_hack.js
@@ -6,26 +6,29 @@
* Intersperse queries which use the ID_HACK stage with updates and deletes of documents they may
* match.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/yield.js'); // for $config
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/yield.js'); // for $config
-var $config = extendWorkload($config, function($config, $super) {
+var $config = extendWorkload($config,
+ function($config, $super) {
- /*
- * Issue a query that will use the ID_HACK stage. This cannot be batched, so issue a
- * number of them to increase the chances of yielding between getting the key and looking
- * up its value.
- */
- $config.states.query = function idHack(db, collName) {
- var nQueries = 100;
- for (var i = 0; i < nQueries; i++) {
- assertAlways.lte(db[collName].find({ _id: i }).itcount(), 1);
- var res = db[collName].findOne({ _id: i });
- if (res !== null) {
- assertAlways.eq(i, res._id);
- }
- }
- };
+ /*
+ * Issue a query that will use the ID_HACK stage. This cannot be
+ * batched, so issue a
+ * number of them to increase the chances of yielding between
+ * getting the key and looking
+ * up its value.
+ */
+ $config.states.query = function idHack(db, collName) {
+ var nQueries = 100;
+ for (var i = 0; i < nQueries; i++) {
+ assertAlways.lte(db[collName].find({_id: i}).itcount(), 1);
+ var res = db[collName].findOne({_id: i});
+ if (res !== null) {
+ assertAlways.eq(i, res._id);
+ }
+ }
+ };
- return $config;
-});
+ return $config;
+ });
diff --git a/jstests/concurrency/fsm_workloads/yield_rooted_or.js b/jstests/concurrency/fsm_workloads/yield_rooted_or.js
index ee742067ff0..4f8415b4fb0 100644
--- a/jstests/concurrency/fsm_workloads/yield_rooted_or.js
+++ b/jstests/concurrency/fsm_workloads/yield_rooted_or.js
@@ -7,40 +7,44 @@
* match.
* Other workloads that need an index on c and d can inherit from this.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/yield.js'); // for $config
-
-var $config = extendWorkload($config, function($config, $super) {
-
- /*
- * Issue a query with an or stage as the root.
- */
- $config.states.query = function rootedOr(db, collName) {
- var nMatches = 100;
-
- var cursor = db[collName].find({ $or: [ { c: { $lte: nMatches / 2 } },
- { d: { $lte: nMatches / 2 } } ] })
- .batchSize(this.batchSize);
-
- var verifier = function rootedOrVerifier(doc, prevDoc) {
- return (doc.c <= nMatches / 2 || doc.d <= nMatches / 2);
- };
-
- this.advanceCursor(cursor, verifier);
- };
-
- $config.data.genUpdateDoc = function genUpdateDoc() {
- var newC = Random.randInt(this.nDocs);
- var newD = Random.randInt(this.nDocs);
- return { $set: { c: newC, d: newD } };
- };
-
- $config.setup = function setup(db, collName, cluster) {
- $super.setup.apply(this, arguments);
-
- assertAlways.commandWorked(db[collName].ensureIndex({ c: 1 }));
- assertAlways.commandWorked(db[collName].ensureIndex({ d: 1 }));
- };
-
- return $config;
-});
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/yield.js'); // for $config
+
+var $config =
+ extendWorkload($config,
+ function($config, $super) {
+
+ /*
+ * Issue a query with an or stage as the root.
+ */
+ $config.states.query = function rootedOr(db, collName) {
+ var nMatches = 100;
+
+ var cursor = db[collName].find({
+ $or: [{c: {$lte: nMatches / 2}}, {d: {$lte: nMatches / 2}}]
+ }).batchSize(this.batchSize);
+
+ var verifier = function rootedOrVerifier(doc, prevDoc) {
+ return (doc.c <= nMatches / 2 || doc.d <= nMatches / 2);
+ };
+
+ this.advanceCursor(cursor, verifier);
+ };
+
+ $config.data.genUpdateDoc = function genUpdateDoc() {
+ var newC = Random.randInt(this.nDocs);
+ var newD = Random.randInt(this.nDocs);
+ return {
+ $set: {c: newC, d: newD}
+ };
+ };
+
+ $config.setup = function setup(db, collName, cluster) {
+ $super.setup.apply(this, arguments);
+
+ assertAlways.commandWorked(db[collName].ensureIndex({c: 1}));
+ assertAlways.commandWorked(db[collName].ensureIndex({d: 1}));
+ };
+
+ return $config;
+ });
diff --git a/jstests/concurrency/fsm_workloads/yield_sort.js b/jstests/concurrency/fsm_workloads/yield_sort.js
index b3aaed620ff..628314fd36b 100644
--- a/jstests/concurrency/fsm_workloads/yield_sort.js
+++ b/jstests/concurrency/fsm_workloads/yield_sort.js
@@ -6,37 +6,40 @@
* Intersperse queries which use the SORT stage with updates and deletes of documents they may
* match.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/yield_sort_merge.js'); // for $config
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/yield_sort_merge.js'); // for $config
-var $config = extendWorkload($config, function($config, $super) {
+var $config = extendWorkload(
+ $config,
+ function($config, $super) {
- /*
- * Execute a query that will use the SORT stage.
- */
- $config.states.query = function sort(db, collName) {
- var nMatches = 100;
- // Sort on c, since it's not an indexed field.
- var cursor = db[collName].find({ a: { $lt: nMatches } })
- .sort({ c: -1 })
- .batchSize(this.batchSize);
+ /*
+ * Execute a query that will use the SORT stage.
+ */
+ $config.states.query = function sort(db, collName) {
+ var nMatches = 100;
+ // Sort on c, since it's not an indexed field.
+ var cursor =
+ db[collName].find({a: {$lt: nMatches}}).sort({c: -1}).batchSize(this.batchSize);
- var verifier = function sortVerifier(doc, prevDoc) {
- var correctOrder = true;
- if (prevDoc !== null) {
- correctOrder = (doc.c <= prevDoc.c);
- }
- return doc.a < nMatches && correctOrder;
- };
+ var verifier = function sortVerifier(doc, prevDoc) {
+ var correctOrder = true;
+ if (prevDoc !== null) {
+ correctOrder = (doc.c <= prevDoc.c);
+ }
+ return doc.a < nMatches && correctOrder;
+ };
- this.advanceCursor(cursor, verifier);
- };
+ this.advanceCursor(cursor, verifier);
+ };
- $config.data.genUpdateDoc = function genUpdateDoc() {
- var newA = Random.randInt(this.nDocs);
- var newC = Random.randInt(this.nDocs);
- return { $set: { a: newA, c: newC } };
- };
+ $config.data.genUpdateDoc = function genUpdateDoc() {
+ var newA = Random.randInt(this.nDocs);
+ var newC = Random.randInt(this.nDocs);
+ return {
+ $set: {a: newA, c: newC}
+ };
+ };
- return $config;
-});
+ return $config;
+ });
diff --git a/jstests/concurrency/fsm_workloads/yield_sort_merge.js b/jstests/concurrency/fsm_workloads/yield_sort_merge.js
index cea2a974090..ee63b0d8298 100644
--- a/jstests/concurrency/fsm_workloads/yield_sort_merge.js
+++ b/jstests/concurrency/fsm_workloads/yield_sort_merge.js
@@ -7,50 +7,52 @@
* may match.
* Other workloads that need an index { a: 1, b: 1 } can extend this
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/yield.js'); // for $config
-
-var $config = extendWorkload($config, function($config, $super) {
-
- /*
- * Execute a query that will use the SORT_MERGE stage.
- */
- $config.states.query = function sortMerge(db, collName) {
- var nMatches = 50; // Don't push this too high, or SORT_MERGE stage won't be selected.
-
- // Build an array [0, nMatches).
- var matches = [];
- for (var i = 0; i < nMatches; i++) {
- matches.push(i);
- }
-
- var cursor = db[collName].find({ a: { $in: matches } })
- .sort({ b: -1 })
- .batchSize(this.batchSize);
-
- var verifier = function sortMergeVerifier(doc, prevDoc) {
- var correctOrder = true;
- if (prevDoc !== null) {
- correctOrder = (doc.b <= prevDoc.b);
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/yield.js'); // for $config
+
+var $config = extendWorkload(
+ $config,
+ function($config, $super) {
+
+ /*
+ * Execute a query that will use the SORT_MERGE stage.
+ */
+ $config.states.query = function sortMerge(db, collName) {
+ var nMatches = 50; // Don't push this too high, or SORT_MERGE stage won't be selected.
+
+ // Build an array [0, nMatches).
+ var matches = [];
+ for (var i = 0; i < nMatches; i++) {
+ matches.push(i);
}
- return doc.a < nMatches && correctOrder;
- };
- this.advanceCursor(cursor, verifier);
- };
+ var cursor =
+ db[collName].find({a: {$in: matches}}).sort({b: -1}).batchSize(this.batchSize);
- $config.data.genUpdateDoc = function genUpdateDoc() {
- var newA = Random.randInt(this.nDocs);
- var newB = Random.randInt(this.nDocs);
- return { $set: { a: newA, b: newB } };
- };
+ var verifier = function sortMergeVerifier(doc, prevDoc) {
+ var correctOrder = true;
+ if (prevDoc !== null) {
+ correctOrder = (doc.b <= prevDoc.b);
+ }
+ return doc.a < nMatches && correctOrder;
+ };
- $config.setup = function setup(db, collName, cluster) {
- $super.setup.apply(this, arguments);
+ this.advanceCursor(cursor, verifier);
+ };
- assertAlways.commandWorked(db[collName].ensureIndex({ a: 1, b: 1 }));
- };
+ $config.data.genUpdateDoc = function genUpdateDoc() {
+ var newA = Random.randInt(this.nDocs);
+ var newB = Random.randInt(this.nDocs);
+ return {
+ $set: {a: newA, b: newB}
+ };
+ };
- return $config;
-});
+ $config.setup = function setup(db, collName, cluster) {
+ $super.setup.apply(this, arguments);
+
+ assertAlways.commandWorked(db[collName].ensureIndex({a: 1, b: 1}));
+ };
+ return $config;
+ });
diff --git a/jstests/concurrency/fsm_workloads/yield_text.js b/jstests/concurrency/fsm_workloads/yield_text.js
index 33f16e85de9..67d7c618319 100644
--- a/jstests/concurrency/fsm_workloads/yield_text.js
+++ b/jstests/concurrency/fsm_workloads/yield_text.js
@@ -6,43 +6,50 @@
* Intersperse queries which use the TEXT stage with updates and deletes of documents they may
* match.
*/
-load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
-load('jstests/concurrency/fsm_workloads/yield.js'); // for $config
-
-var $config = extendWorkload($config, function($config, $super) {
-
- /*
- * Pick a random word and search for it using full text search.
- */
- $config.states.query = function text(db, collName) {
- var word = this.words[Random.randInt(this.words.length)];
-
- var cursor = db[collName].find({ $text: { $search: word },
- yield_text: { $exists: true } })
- .batchSize(this.batchSize);
-
- var verifier = function textVerifier(doc, prevDoc) {
- return doc.yield_text.indexOf(word) !== -1;
+load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
+load('jstests/concurrency/fsm_workloads/yield.js'); // for $config
+
+var $config = extendWorkload(
+ $config,
+ function($config, $super) {
+
+ /*
+ * Pick a random word and search for it using full text search.
+ */
+ $config.states.query = function text(db, collName) {
+ var word = this.words[Random.randInt(this.words.length)];
+
+ var cursor = db[collName].find({
+ $text: {$search: word},
+ yield_text: {$exists: true}
+ }).batchSize(this.batchSize);
+
+ var verifier = function textVerifier(doc, prevDoc) {
+ return doc.yield_text.indexOf(word) !== -1;
+ };
+
+ // If we don't have the right text index, or someone drops our text index, this
+ // assertion
+ // is either pointless or won't work. So only verify the results when we know no one
+ // else
+ // is messing with our indices.
+ assertWhenOwnColl(function verifyTextResults() {
+ this.advanceCursor(cursor, verifier);
+ }.bind(this));
};
- // If we don't have the right text index, or someone drops our text index, this assertion
- // is either pointless or won't work. So only verify the results when we know no one else
- // is messing with our indices.
- assertWhenOwnColl(function verifyTextResults() {
- this.advanceCursor(cursor, verifier);
- }.bind(this));
- };
-
- $config.data.genUpdateDoc = function genUpdateDoc() {
- var newWord = this.words[Random.randInt(this.words.length)];
- return { $set: { yield_text: newWord } };
- };
+ $config.data.genUpdateDoc = function genUpdateDoc() {
+ var newWord = this.words[Random.randInt(this.words.length)];
+ return {
+ $set: {yield_text: newWord}
+ };
+ };
- $config.setup = function setup(db, collName, cluster) {
- $super.setup.apply(this, arguments);
+ $config.setup = function setup(db, collName, cluster) {
+ $super.setup.apply(this, arguments);
- assertWhenOwnColl.commandWorked(db[collName].ensureIndex({ yield_text: 'text' }));
- };
+ assertWhenOwnColl.commandWorked(db[collName].ensureIndex({yield_text: 'text'}));
+ };
- return $config;
-});
+ return $config;
+ });
diff --git a/jstests/core/all.js b/jstests/core/all.js
index d4ddd6c6ee4..221cf1daeda 100644
--- a/jstests/core/all.js
+++ b/jstests/core/all.js
@@ -2,46 +2,44 @@ t = db.jstests_all;
t.drop();
doTest = function() {
-
- t.save( { a:[ 1,2,3 ] } );
- t.save( { a:[ 1,2,4 ] } );
- t.save( { a:[ 1,8,5 ] } );
- t.save( { a:[ 1,8,6 ] } );
- t.save( { a:[ 1,9,7 ] } );
- t.save( { a : [] } );
- t.save( {} );
-
- assert.eq( 5, t.find( { a: { $all: [ 1 ] } } ).count() );
- assert.eq( 2, t.find( { a: { $all: [ 1, 2 ] } } ).count() );
- assert.eq( 2, t.find( { a: { $all: [ 1, 8 ] } } ).count() );
- assert.eq( 1, t.find( { a: { $all: [ 1, 3 ] } } ).count() );
- assert.eq( 2, t.find( { a: { $all: [ 2 ] } } ).count() );
- assert.eq( 1, t.find( { a: { $all: [ 2, 3 ] } } ).count() );
- assert.eq( 2, t.find( { a: { $all: [ 2, 1 ] } } ).count() );
-
- t.save( { a: [ 2, 2 ] } );
- assert.eq( 3, t.find( { a: { $all: [ 2, 2 ] } } ).count() );
-
- t.save( { a: [ [ 2 ] ] } );
- assert.eq( 3, t.find( { a: { $all: [ 2 ] } } ).count() );
-
- t.save( { a: [ { b: [ 10, 11 ] }, 11 ] } );
- assert.eq( 1, t.find( { 'a.b': { $all: [ 10 ] } } ).count() );
- assert.eq( 1, t.find( { a: { $all: [ 11 ] } } ).count() );
-
- t.save( { a: { b: [ 20, 30 ] } } );
- assert.eq( 1, t.find( { 'a.b': { $all: [ 20 ] } } ).count() );
- assert.eq( 1, t.find( { 'a.b': { $all: [ 20, 30 ] } } ).count() );
-
-
- assert.eq( 5 , t.find( { a : { $all : [1] } } ).count() , "E1" );
- assert.eq( 0 , t.find( { a : { $all : [19] } } ).count() , "E2" );
- assert.eq( 0 , t.find( { a : { $all : [] } } ).count() , "E3" );
+ t.save({a: [1, 2, 3]});
+ t.save({a: [1, 2, 4]});
+ t.save({a: [1, 8, 5]});
+ t.save({a: [1, 8, 6]});
+ t.save({a: [1, 9, 7]});
+ t.save({a: []});
+ t.save({});
+
+ assert.eq(5, t.find({a: {$all: [1]}}).count());
+ assert.eq(2, t.find({a: {$all: [1, 2]}}).count());
+ assert.eq(2, t.find({a: {$all: [1, 8]}}).count());
+ assert.eq(1, t.find({a: {$all: [1, 3]}}).count());
+ assert.eq(2, t.find({a: {$all: [2]}}).count());
+ assert.eq(1, t.find({a: {$all: [2, 3]}}).count());
+ assert.eq(2, t.find({a: {$all: [2, 1]}}).count());
+
+ t.save({a: [2, 2]});
+ assert.eq(3, t.find({a: {$all: [2, 2]}}).count());
+
+ t.save({a: [[2]]});
+ assert.eq(3, t.find({a: {$all: [2]}}).count());
+
+ t.save({a: [{b: [10, 11]}, 11]});
+ assert.eq(1, t.find({'a.b': {$all: [10]}}).count());
+ assert.eq(1, t.find({a: {$all: [11]}}).count());
+
+ t.save({a: {b: [20, 30]}});
+ assert.eq(1, t.find({'a.b': {$all: [20]}}).count());
+ assert.eq(1, t.find({'a.b': {$all: [20, 30]}}).count());
+
+ assert.eq(5, t.find({a: {$all: [1]}}).count(), "E1");
+ assert.eq(0, t.find({a: {$all: [19]}}).count(), "E2");
+ assert.eq(0, t.find({a: {$all: []}}).count(), "E3");
};
doTest();
t.drop();
-t.ensureIndex( {a:1} );
+t.ensureIndex({a: 1});
doTest();
diff --git a/jstests/core/all2.js b/jstests/core/all2.js
index 6beb346775d..4842460c4db 100644
--- a/jstests/core/all2.js
+++ b/jstests/core/all2.js
@@ -2,85 +2,83 @@
t = db.all2;
t.drop();
-t.save( { a : [ { x : 1 } , { x : 2 } ] } );
-t.save( { a : [ { x : 2 } , { x : 3 } ] } );
-t.save( { a : [ { x : 3 } , { x : 4 } ] } );
+t.save({a: [{x: 1}, {x: 2}]});
+t.save({a: [{x: 2}, {x: 3}]});
+t.save({a: [{x: 3}, {x: 4}]});
state = "no index";
-function check( n , q , e ){
- assert.eq( n , t.find( q ).count() , tojson( q ) + " " + e + " count " + state );
- assert.eq( n , t.find( q ).itcount() , tojson( q ) + " " + e + " itcount" + state );
+function check(n, q, e) {
+ assert.eq(n, t.find(q).count(), tojson(q) + " " + e + " count " + state);
+ assert.eq(n, t.find(q).itcount(), tojson(q) + " " + e + " itcount" + state);
}
-check( 1 , { "a.x" : { $in : [ 1 ] } } , "A" );
-check( 2 , { "a.x" : { $in : [ 2 ] } } , "B" );
+check(1, {"a.x": {$in: [1]}}, "A");
+check(2, {"a.x": {$in: [2]}}, "B");
-check( 2 , { "a.x" : { $in : [ 1 , 2 ] } } , "C" );
-check( 3 , { "a.x" : { $in : [ 2 , 3 ] } } , "D" );
-check( 3 , { "a.x" : { $in : [ 1 , 3 ] } } , "E" );
+check(2, {"a.x": {$in: [1, 2]}}, "C");
+check(3, {"a.x": {$in: [2, 3]}}, "D");
+check(3, {"a.x": {$in: [1, 3]}}, "E");
-check( 1 , { "a.x" : { $all : [ 1 , 2 ] } } , "F" );
-check( 1 , { "a.x" : { $all : [ 2 , 3 ] } } , "G" );
-check( 0 , { "a.x" : { $all : [ 1 , 3 ] } } , "H" );
+check(1, {"a.x": {$all: [1, 2]}}, "F");
+check(1, {"a.x": {$all: [2, 3]}}, "G");
+check(0, {"a.x": {$all: [1, 3]}}, "H");
-t.ensureIndex( { "a.x" : 1 } );
+t.ensureIndex({"a.x": 1});
state = "index";
-check( 1 , { "a.x" : { $in : [ 1 ] } } , "A" );
-check( 2 , { "a.x" : { $in : [ 2 ] } } , "B" );
+check(1, {"a.x": {$in: [1]}}, "A");
+check(2, {"a.x": {$in: [2]}}, "B");
-check( 2 , { "a.x" : { $in : [ 1 , 2 ] } } , "C" );
-check( 3 , { "a.x" : { $in : [ 2 , 3 ] } } , "D" );
-check( 3 , { "a.x" : { $in : [ 1 , 3 ] } } , "E" );
+check(2, {"a.x": {$in: [1, 2]}}, "C");
+check(3, {"a.x": {$in: [2, 3]}}, "D");
+check(3, {"a.x": {$in: [1, 3]}}, "E");
-check( 1 , { "a.x" : { $all : [ 1 , 2 ] } } , "F" );
-check( 1 , { "a.x" : { $all : [ 2 , 3 ] } } , "G" );
-check( 0 , { "a.x" : { $all : [ 1 , 3 ] } } , "H" );
+check(1, {"a.x": {$all: [1, 2]}}, "F");
+check(1, {"a.x": {$all: [2, 3]}}, "G");
+check(0, {"a.x": {$all: [1, 3]}}, "H");
// --- more
t.drop();
-t.save( { a : [ 1 , 2 ] } );
-t.save( { a : [ 2 , 3 ] } );
-t.save( { a : [ 3 , 4 ] } );
+t.save({a: [1, 2]});
+t.save({a: [2, 3]});
+t.save({a: [3, 4]});
state = "more no index";
-check( 1 , { "a" : { $in : [ 1 ] } } , "A" );
-check( 2 , { "a" : { $in : [ 2 ] } } , "B" );
+check(1, {"a": {$in: [1]}}, "A");
+check(2, {"a": {$in: [2]}}, "B");
-check( 2 , { "a" : { $in : [ 1 , 2 ] } } , "C" );
-check( 3 , { "a" : { $in : [ 2 , 3 ] } } , "D" );
-check( 3 , { "a" : { $in : [ 1 , 3 ] } } , "E" );
+check(2, {"a": {$in: [1, 2]}}, "C");
+check(3, {"a": {$in: [2, 3]}}, "D");
+check(3, {"a": {$in: [1, 3]}}, "E");
-check( 1 , { "a" : { $all : [ 1 , 2 ] } } , "F" );
-check( 1 , { "a" : { $all : [ 2 , 3 ] } } , "G" );
-check( 0 , { "a" : { $all : [ 1 , 3 ] } } , "H" );
+check(1, {"a": {$all: [1, 2]}}, "F");
+check(1, {"a": {$all: [2, 3]}}, "G");
+check(0, {"a": {$all: [1, 3]}}, "H");
-t.ensureIndex( { "a" : 1 } );
+t.ensureIndex({"a": 1});
state = "more index";
-check( 1 , { "a" : { $in : [ 1 ] } } , "A" );
-check( 2 , { "a" : { $in : [ 2 ] } } , "B" );
+check(1, {"a": {$in: [1]}}, "A");
+check(2, {"a": {$in: [2]}}, "B");
-check( 2 , { "a" : { $in : [ 1 , 2 ] } } , "C" );
-check( 3 , { "a" : { $in : [ 2 , 3 ] } } , "D" );
-check( 3 , { "a" : { $in : [ 1 , 3 ] } } , "E" );
-
-check( 1 , { "a" : { $all : [ 1 , 2 ] } } , "F" );
-check( 1 , { "a" : { $all : [ 2 , 3 ] } } , "G" );
-check( 0 , { "a" : { $all : [ 1 , 3 ] } } , "H" );
+check(2, {"a": {$in: [1, 2]}}, "C");
+check(3, {"a": {$in: [2, 3]}}, "D");
+check(3, {"a": {$in: [1, 3]}}, "E");
+check(1, {"a": {$all: [1, 2]}}, "F");
+check(1, {"a": {$all: [2, 3]}}, "G");
+check(0, {"a": {$all: [1, 3]}}, "H");
// more 2
state = "more 2";
t.drop();
-t.save( { name : [ "harry","jack","tom" ] } );
-check( 0 , { name : { $all : ["harry","john"] } } , "A" );
-t.ensureIndex( { name : 1 } );
-check( 0 , { name : { $all : ["harry","john"] } } , "B" );
-
+t.save({name: ["harry", "jack", "tom"]});
+check(0, {name: {$all: ["harry", "john"]}}, "A");
+t.ensureIndex({name: 1});
+check(0, {name: {$all: ["harry", "john"]}}, "B");
diff --git a/jstests/core/all3.js b/jstests/core/all3.js
index b7a05321bbf..ae1a9460089 100644
--- a/jstests/core/all3.js
+++ b/jstests/core/all3.js
@@ -5,24 +5,24 @@ t.drop();
t.save({});
-assert.eq( 1, t.count( {foo:{$in:[null]}} ) );
-assert.eq( 1, t.count( {foo:{$all:[null]}} ) );
-assert.eq( 0, t.count( {foo:{$not:{$all:[null]}}} ) );
-assert.eq( 0, t.count( {foo:{$not:{$in:[null]}}} ) );
+assert.eq(1, t.count({foo: {$in: [null]}}));
+assert.eq(1, t.count({foo: {$all: [null]}}));
+assert.eq(0, t.count({foo: {$not: {$all: [null]}}}));
+assert.eq(0, t.count({foo: {$not: {$in: [null]}}}));
t.remove({});
-t.save({foo:1});
-assert.eq( 0, t.count( {foo:{$in:[null]}} ) );
-assert.eq( 0, t.count( {foo:{$all:[null]}} ) );
-assert.eq( 1, t.count( {foo:{$not:{$in:[null]}}} ) );
-assert.eq( 1, t.count( {foo:{$not:{$all:[null]}}} ) );
+t.save({foo: 1});
+assert.eq(0, t.count({foo: {$in: [null]}}));
+assert.eq(0, t.count({foo: {$all: [null]}}));
+assert.eq(1, t.count({foo: {$not: {$in: [null]}}}));
+assert.eq(1, t.count({foo: {$not: {$all: [null]}}}));
t.remove({});
-t.save( {foo:[0,1]} );
-assert.eq( 1, t.count( {foo:{$in:[[0,1]]}} ) );
-assert.eq( 1, t.count( {foo:{$all:[[0,1]]}} ) );
+t.save({foo: [0, 1]});
+assert.eq(1, t.count({foo: {$in: [[0, 1]]}}));
+assert.eq(1, t.count({foo: {$all: [[0, 1]]}}));
t.remove({});
-t.save( {foo:[]} );
-assert.eq( 1, t.count( {foo:{$in:[[]]}} ) );
-assert.eq( 1, t.count( {foo:{$all:[[]]}} ) );
+t.save({foo: []});
+assert.eq(1, t.count({foo: {$in: [[]]}}));
+assert.eq(1, t.count({foo: {$all: [[]]}}));
diff --git a/jstests/core/all4.js b/jstests/core/all4.js
index 109795754bc..eb979289496 100644
--- a/jstests/core/all4.js
+++ b/jstests/core/all4.js
@@ -3,26 +3,26 @@
t = db.jstests_all4;
t.drop();
-function checkQuery( query, val ) {
- assert.eq( val, t.count(query) );
- assert.eq( val, t.find(query).itcount() );
+function checkQuery(query, val) {
+ assert.eq(val, t.count(query));
+ assert.eq(val, t.find(query).itcount());
}
-checkQuery( {a:{$all:[]}}, 0 );
-checkQuery( {a:{$all:[1]}}, 0 );
-checkQuery( {a:{$all:[{$elemMatch:{b:1}}]}}, 0 );
+checkQuery({a: {$all: []}}, 0);
+checkQuery({a: {$all: [1]}}, 0);
+checkQuery({a: {$all: [{$elemMatch: {b: 1}}]}}, 0);
t.save({});
-checkQuery( {a:{$all:[]}}, 0 );
-checkQuery( {a:{$all:[1]}}, 0 );
-checkQuery( {a:{$all:[{$elemMatch:{b:1}}]}}, 0 );
+checkQuery({a: {$all: []}}, 0);
+checkQuery({a: {$all: [1]}}, 0);
+checkQuery({a: {$all: [{$elemMatch: {b: 1}}]}}, 0);
-t.save({a:1});
-checkQuery( {a:{$all:[]}}, 0 );
-checkQuery( {a:{$all:[1]}}, 1 );
-checkQuery( {a:{$all:[{$elemMatch:{b:1}}]}}, 0 );
+t.save({a: 1});
+checkQuery({a: {$all: []}}, 0);
+checkQuery({a: {$all: [1]}}, 1);
+checkQuery({a: {$all: [{$elemMatch: {b: 1}}]}}, 0);
-t.save({a:[{b:1}]});
-checkQuery( {a:{$all:[]}}, 0 );
-checkQuery( {a:{$all:[1]}}, 1 );
-checkQuery( {a:{$all:[{$elemMatch:{b:1}}]}}, 1 );
+t.save({a: [{b: 1}]});
+checkQuery({a: {$all: []}}, 0);
+checkQuery({a: {$all: [1]}}, 1);
+checkQuery({a: {$all: [{$elemMatch: {b: 1}}]}}, 1);
diff --git a/jstests/core/all5.js b/jstests/core/all5.js
index a5d9e312292..a5faaa1767f 100644
--- a/jstests/core/all5.js
+++ b/jstests/core/all5.js
@@ -3,26 +3,26 @@
t = db.jstests_all5;
t.drop();
-function checkMatch( doc ) {
+function checkMatch(doc) {
t.drop();
- t.save( doc );
- assert.eq( 1, t.count( {a:{$elemMatch:{b:null}}} ) );
- assert.eq( 1, t.count( {a:{$all:[{$elemMatch:{b:null}}]}} ) );
+ t.save(doc);
+ assert.eq(1, t.count({a: {$elemMatch: {b: null}}}));
+ assert.eq(1, t.count({a: {$all: [{$elemMatch: {b: null}}]}}));
}
-function checkNoMatch( doc ) {
+function checkNoMatch(doc) {
t.drop();
- t.save( doc );
- assert.eq( 0, t.count( {a:{$all:[{$elemMatch:{b:null}}]}} ) );
+ t.save(doc);
+ assert.eq(0, t.count({a: {$all: [{$elemMatch: {b: null}}]}}));
}
-checkNoMatch( {} );
-checkNoMatch( {a:1} );
+checkNoMatch({});
+checkNoMatch({a: 1});
-checkNoMatch( {a:[]} );
-checkNoMatch( {a:[1]} );
+checkNoMatch({a: []});
+checkNoMatch({a: [1]});
-checkMatch( {a:[{}]} );
-checkMatch( {a:[{c:1}]} );
-checkMatch( {a:[{b:null}]} );
-checkNoMatch( {a:[{b:1}]}, 0 );
+checkMatch({a: [{}]});
+checkMatch({a: [{c: 1}]});
+checkMatch({a: [{b: null}]});
+checkNoMatch({a: [{b: 1}]}, 0);
diff --git a/jstests/core/and.js b/jstests/core/and.js
index ea2fec4554e..a29d95e84e8 100644
--- a/jstests/core/and.js
+++ b/jstests/core/and.js
@@ -3,72 +3,78 @@
t = db.jstests_and;
t.drop();
-t.save( {a:[1,2]} );
-t.save( {a:'foo'} );
+t.save({a: [1, 2]});
+t.save({a: 'foo'});
function check() {
// $and must be an array
- assert.throws( function() { t.find( {$and:4} ).toArray(); } );
+ assert.throws(function() {
+ t.find({$and: 4}).toArray();
+ });
// $and array must not be empty
- assert.throws( function() { t.find( {$and:[]} ).toArray(); } );
+ assert.throws(function() {
+ t.find({$and: []}).toArray();
+ });
// $and elements must be objects
- assert.throws( function() { t.find( {$and:[4]} ).toArray(); } );
+ assert.throws(function() {
+ t.find({$and: [4]}).toArray();
+ });
// Check equality matching
- assert.eq( 1, t.count( {$and:[{a:1}]} ) );
- assert.eq( 1, t.count( {$and:[{a:1},{a:2}]} ) );
- assert.eq( 0, t.count( {$and:[{a:1},{a:3}]} ) );
- assert.eq( 0, t.count( {$and:[{a:1},{a:2},{a:3}]} ) );
- assert.eq( 1, t.count( {$and:[{a:'foo'}]} ) );
- assert.eq( 0, t.count( {$and:[{a:'foo'},{a:'g'}]} ) );
+ assert.eq(1, t.count({$and: [{a: 1}]}));
+ assert.eq(1, t.count({$and: [{a: 1}, {a: 2}]}));
+ assert.eq(0, t.count({$and: [{a: 1}, {a: 3}]}));
+ assert.eq(0, t.count({$and: [{a: 1}, {a: 2}, {a: 3}]}));
+ assert.eq(1, t.count({$and: [{a: 'foo'}]}));
+ assert.eq(0, t.count({$and: [{a: 'foo'}, {a: 'g'}]}));
// Check $and with other fields
- assert.eq( 1, t.count( {a:2,$and:[{a:1}]} ) );
- assert.eq( 0, t.count( {a:0,$and:[{a:1}]} ) );
- assert.eq( 0, t.count( {a:2,$and:[{a:0}]} ) );
- assert.eq( 1, t.count( {a:1,$and:[{a:1}]} ) );
+ assert.eq(1, t.count({a: 2, $and: [{a: 1}]}));
+ assert.eq(0, t.count({a: 0, $and: [{a: 1}]}));
+ assert.eq(0, t.count({a: 2, $and: [{a: 0}]}));
+ assert.eq(1, t.count({a: 1, $and: [{a: 1}]}));
// Check recursive $and
- assert.eq( 1, t.count( {a:2,$and:[{$and:[{a:1}]}]} ) );
- assert.eq( 0, t.count( {a:0,$and:[{$and:[{a:1}]}]} ) );
- assert.eq( 0, t.count( {a:2,$and:[{$and:[{a:0}]}]} ) );
- assert.eq( 1, t.count( {a:1,$and:[{$and:[{a:1}]}]} ) );
+ assert.eq(1, t.count({a: 2, $and: [{$and: [{a: 1}]}]}));
+ assert.eq(0, t.count({a: 0, $and: [{$and: [{a: 1}]}]}));
+ assert.eq(0, t.count({a: 2, $and: [{$and: [{a: 0}]}]}));
+ assert.eq(1, t.count({a: 1, $and: [{$and: [{a: 1}]}]}));
- assert.eq( 1, t.count( {$and:[{a:2},{$and:[{a:1}]}]} ) );
- assert.eq( 0, t.count( {$and:[{a:0},{$and:[{a:1}]}]} ) );
- assert.eq( 0, t.count( {$and:[{a:2},{$and:[{a:0}]}]} ) );
- assert.eq( 1, t.count( {$and:[{a:1},{$and:[{a:1}]}]} ) );
+ assert.eq(1, t.count({$and: [{a: 2}, {$and: [{a: 1}]}]}));
+ assert.eq(0, t.count({$and: [{a: 0}, {$and: [{a: 1}]}]}));
+ assert.eq(0, t.count({$and: [{a: 2}, {$and: [{a: 0}]}]}));
+ assert.eq(1, t.count({$and: [{a: 1}, {$and: [{a: 1}]}]}));
// Some of these cases were more important with an alternative $and syntax
// that was rejected, but they're still valid checks.
// Check simple regex
- assert.eq( 1, t.count( {$and:[{a:/foo/}]} ) );
+ assert.eq(1, t.count({$and: [{a: /foo/}]}));
// Check multiple regexes
- assert.eq( 1, t.count( {$and:[{a:/foo/},{a:/^f/},{a:/o/}]} ) );
- assert.eq( 0, t.count( {$and:[{a:/foo/},{a:/^g/}]} ) );
- assert.eq( 1, t.count( {$and:[{a:/^f/},{a:'foo'}]} ) );
+ assert.eq(1, t.count({$and: [{a: /foo/}, {a: /^f/}, {a: /o/}]}));
+ assert.eq(0, t.count({$and: [{a: /foo/}, {a: /^g/}]}));
+ assert.eq(1, t.count({$and: [{a: /^f/}, {a: 'foo'}]}));
// Check regex flags
- assert.eq( 0, t.count( {$and:[{a:/^F/},{a:'foo'}]} ) );
- assert.eq( 1, t.count( {$and:[{a:/^F/i},{a:'foo'}]} ) );
+ assert.eq(0, t.count({$and: [{a: /^F/}, {a: 'foo'}]}));
+ assert.eq(1, t.count({$and: [{a: /^F/i}, {a: 'foo'}]}));
// Check operator
- assert.eq( 1, t.count( {$and:[{a:{$gt:0}}]} ) );
+ assert.eq(1, t.count({$and: [{a: {$gt: 0}}]}));
// Check where
- assert.eq( 1, t.count( {a:'foo',$where:'this.a=="foo"'} ) );
- assert.eq( 1, t.count( {$and:[{a:'foo'}],$where:'this.a=="foo"'} ) );
- assert.eq( 1, t.count( {$and:[{a:'foo'}],$where:'this.a=="foo"'} ) );
+ assert.eq(1, t.count({a: 'foo', $where: 'this.a=="foo"'}));
+ assert.eq(1, t.count({$and: [{a: 'foo'}], $where: 'this.a=="foo"'}));
+ assert.eq(1, t.count({$and: [{a: 'foo'}], $where: 'this.a=="foo"'}));
// Nested where ok
- assert.eq( 1, t.count({$and:[{$where:'this.a=="foo"'}]}) );
- assert.eq( 1, t.count({$and:[{a:'foo'},{$where:'this.a=="foo"'}]}) );
- assert.eq( 1, t.count({$and:[{$where:'this.a=="foo"'}],$where:'this.a=="foo"'}) );
+ assert.eq(1, t.count({$and: [{$where: 'this.a=="foo"'}]}));
+ assert.eq(1, t.count({$and: [{a: 'foo'}, {$where: 'this.a=="foo"'}]}));
+ assert.eq(1, t.count({$and: [{$where: 'this.a=="foo"'}], $where: 'this.a=="foo"'}));
}
check();
-t.ensureIndex( {a:1} );
+t.ensureIndex({a: 1});
check();
-assert.eq( 1, t.find({a:1,$and:[{a:2}]}).itcount() );
-assert.eq( 1, t.find({$and:[{a:1},{a:2}]}).itcount() );
+assert.eq(1, t.find({a: 1, $and: [{a: 2}]}).itcount());
+assert.eq(1, t.find({$and: [{a: 1}, {a: 2}]}).itcount());
diff --git a/jstests/core/and2.js b/jstests/core/and2.js
index 6e7214eb7f3..f3b014c13ad 100644
--- a/jstests/core/and2.js
+++ b/jstests/core/and2.js
@@ -3,11 +3,11 @@
t = db.jstests_and2;
t.drop();
-t.save( {a:[1,2]} );
-t.update( {a:1}, {$set:{'a.$':5}} );
-assert.eq( [5,2], t.findOne().a );
+t.save({a: [1, 2]});
+t.update({a: 1}, {$set: {'a.$': 5}});
+assert.eq([5, 2], t.findOne().a);
t.drop();
-t.save( {a:[1,2]} );
-t.update( {$and:[{a:1}]}, {$set:{'a.$':5}} );
-assert.eq( [5,2], t.findOne().a );
+t.save({a: [1, 2]});
+t.update({$and: [{a: 1}]}, {$set: {'a.$': 5}});
+assert.eq([5, 2], t.findOne().a);
diff --git a/jstests/core/and3.js b/jstests/core/and3.js
index a0a779937b1..4f6d6bd28fd 100644
--- a/jstests/core/and3.js
+++ b/jstests/core/and3.js
@@ -3,53 +3,53 @@
t = db.jstests_and3;
t.drop();
-t.save( {a:1} );
-t.save( {a:'foo'} );
+t.save({a: 1});
+t.save({a: 'foo'});
-t.ensureIndex( {a:1} );
+t.ensureIndex({a: 1});
-function checkScanMatch( query, docsExamined, n ) {
- var e = t.find( query ).hint( {a:1} ).explain( "executionStats" );
- assert.eq( docsExamined, e.executionStats.totalDocsExamined );
- assert.eq( n, e.executionStats.nReturned );
+function checkScanMatch(query, docsExamined, n) {
+ var e = t.find(query).hint({a: 1}).explain("executionStats");
+ assert.eq(docsExamined, e.executionStats.totalDocsExamined);
+ assert.eq(n, e.executionStats.nReturned);
}
-checkScanMatch( {a:/o/}, 1, 1 );
-checkScanMatch( {a:/a/}, 0, 0 );
-checkScanMatch( {a:{$not:/o/}}, 2, 1 );
-checkScanMatch( {a:{$not:/a/}}, 2, 2 );
-
-checkScanMatch( {$and:[{a:/o/}]}, 1, 1 );
-checkScanMatch( {$and:[{a:/a/}]}, 0, 0 );
-checkScanMatch( {$and:[{a:{$not:/o/}}]}, 2, 1 );
-checkScanMatch( {$and:[{a:{$not:/a/}}]}, 2, 2 );
-checkScanMatch( {$and:[{a:/o/},{a:{$not:/o/}}]}, 1, 0 );
-checkScanMatch( {$and:[{a:/o/},{a:{$not:/a/}}]}, 1, 1 );
-checkScanMatch( {$or:[{a:/o/}]}, 1, 1 );
-checkScanMatch( {$or:[{a:/a/}]}, 0, 0 );
-checkScanMatch( {$nor:[{a:/o/}]}, 2, 1 );
-checkScanMatch( {$nor:[{a:/a/}]}, 2, 2 );
-
-checkScanMatch( {$and:[{$and:[{a:/o/}]}]}, 1, 1 );
-checkScanMatch( {$and:[{$and:[{a:/a/}]}]}, 0, 0 );
-checkScanMatch( {$and:[{$and:[{a:{$not:/o/}}]}]}, 2, 1 );
-checkScanMatch( {$and:[{$and:[{a:{$not:/a/}}]}]}, 2, 2 );
-checkScanMatch( {$and:[{$or:[{a:/o/}]}]}, 1, 1 );
-checkScanMatch( {$and:[{$or:[{a:/a/}]}]}, 0, 0 );
-checkScanMatch( {$or:[{a:{$not:/o/}}]}, 2, 1 );
-checkScanMatch( {$and:[{$or:[{a:{$not:/o/}}]}]}, 2, 1 );
-checkScanMatch( {$and:[{$or:[{a:{$not:/a/}}]}]}, 2, 2 );
-checkScanMatch( {$and:[{$nor:[{a:/o/}]}]}, 2, 1 );
-checkScanMatch( {$and:[{$nor:[{a:/a/}]}]}, 2, 2 );
-
-checkScanMatch( {$where:'this.a==1'}, 2, 1 );
-checkScanMatch( {$and:[{$where:'this.a==1'}]}, 2, 1 );
-
-checkScanMatch( {a:1,$where:'this.a==1'}, 1, 1 );
-checkScanMatch( {a:1,$and:[{$where:'this.a==1'}]}, 1, 1 );
-checkScanMatch( {$and:[{a:1},{$where:'this.a==1'}]}, 1, 1 );
-checkScanMatch( {$and:[{a:1,$where:'this.a==1'}]}, 1, 1 );
-checkScanMatch( {a:1,$and:[{a:1},{a:1,$where:'this.a==1'}]}, 1, 1 );
-
-assert.eq( 0, t.find({a:1,$and:[{a:2}]}).itcount() );
-assert.eq( 0, t.find({$and:[{a:1},{a:2}]}).itcount() );
+checkScanMatch({a: /o/}, 1, 1);
+checkScanMatch({a: /a/}, 0, 0);
+checkScanMatch({a: {$not: /o/}}, 2, 1);
+checkScanMatch({a: {$not: /a/}}, 2, 2);
+
+checkScanMatch({$and: [{a: /o/}]}, 1, 1);
+checkScanMatch({$and: [{a: /a/}]}, 0, 0);
+checkScanMatch({$and: [{a: {$not: /o/}}]}, 2, 1);
+checkScanMatch({$and: [{a: {$not: /a/}}]}, 2, 2);
+checkScanMatch({$and: [{a: /o/}, {a: {$not: /o/}}]}, 1, 0);
+checkScanMatch({$and: [{a: /o/}, {a: {$not: /a/}}]}, 1, 1);
+checkScanMatch({$or: [{a: /o/}]}, 1, 1);
+checkScanMatch({$or: [{a: /a/}]}, 0, 0);
+checkScanMatch({$nor: [{a: /o/}]}, 2, 1);
+checkScanMatch({$nor: [{a: /a/}]}, 2, 2);
+
+checkScanMatch({$and: [{$and: [{a: /o/}]}]}, 1, 1);
+checkScanMatch({$and: [{$and: [{a: /a/}]}]}, 0, 0);
+checkScanMatch({$and: [{$and: [{a: {$not: /o/}}]}]}, 2, 1);
+checkScanMatch({$and: [{$and: [{a: {$not: /a/}}]}]}, 2, 2);
+checkScanMatch({$and: [{$or: [{a: /o/}]}]}, 1, 1);
+checkScanMatch({$and: [{$or: [{a: /a/}]}]}, 0, 0);
+checkScanMatch({$or: [{a: {$not: /o/}}]}, 2, 1);
+checkScanMatch({$and: [{$or: [{a: {$not: /o/}}]}]}, 2, 1);
+checkScanMatch({$and: [{$or: [{a: {$not: /a/}}]}]}, 2, 2);
+checkScanMatch({$and: [{$nor: [{a: /o/}]}]}, 2, 1);
+checkScanMatch({$and: [{$nor: [{a: /a/}]}]}, 2, 2);
+
+checkScanMatch({$where: 'this.a==1'}, 2, 1);
+checkScanMatch({$and: [{$where: 'this.a==1'}]}, 2, 1);
+
+checkScanMatch({a: 1, $where: 'this.a==1'}, 1, 1);
+checkScanMatch({a: 1, $and: [{$where: 'this.a==1'}]}, 1, 1);
+checkScanMatch({$and: [{a: 1}, {$where: 'this.a==1'}]}, 1, 1);
+checkScanMatch({$and: [{a: 1, $where: 'this.a==1'}]}, 1, 1);
+checkScanMatch({a: 1, $and: [{a: 1}, {a: 1, $where: 'this.a==1'}]}, 1, 1);
+
+assert.eq(0, t.find({a: 1, $and: [{a: 2}]}).itcount());
+assert.eq(0, t.find({$and: [{a: 1}, {a: 2}]}).itcount());
diff --git a/jstests/core/andor.js b/jstests/core/andor.js
index 73327acde9b..c574ab261a4 100644
--- a/jstests/core/andor.js
+++ b/jstests/core/andor.js
@@ -4,96 +4,96 @@ t = db.jstests_andor;
t.drop();
// not ok
-function ok( q ) {
- assert.eq( 1, t.find( q ).itcount() );
+function ok(q) {
+ assert.eq(1, t.find(q).itcount());
}
-t.save( {a:1} );
+t.save({a: 1});
test = function() {
-
- ok( {a:1} );
-
- ok( {$and:[{a:1}]} );
- ok( {$or:[{a:1}]} );
-
- ok( {$and:[{$and:[{a:1}]}]} );
- ok( {$or:[{$or:[{a:1}]}]} );
-
- ok( {$and:[{$or:[{a:1}]}]} );
- ok( {$or:[{$and:[{a:1}]}]} );
-
- ok( {$and:[{$and:[{$or:[{a:1}]}]}]} );
- ok( {$and:[{$or:[{$and:[{a:1}]}]}]} );
- ok( {$or:[{$and:[{$and:[{a:1}]}]}]} );
-
- ok( {$or:[{$and:[{$or:[{a:1}]}]}]} );
-
+
+ ok({a: 1});
+
+ ok({$and: [{a: 1}]});
+ ok({$or: [{a: 1}]});
+
+ ok({$and: [{$and: [{a: 1}]}]});
+ ok({$or: [{$or: [{a: 1}]}]});
+
+ ok({$and: [{$or: [{a: 1}]}]});
+ ok({$or: [{$and: [{a: 1}]}]});
+
+ ok({$and: [{$and: [{$or: [{a: 1}]}]}]});
+ ok({$and: [{$or: [{$and: [{a: 1}]}]}]});
+ ok({$or: [{$and: [{$and: [{a: 1}]}]}]});
+
+ ok({$or: [{$and: [{$or: [{a: 1}]}]}]});
+
// now test $nor
-
- ok( {$and:[{a:1}]} );
- ok( {$nor:[{a:2}]} );
-
- ok( {$and:[{$and:[{a:1}]}]} );
- ok( {$nor:[{$nor:[{a:1}]}]} );
-
- ok( {$and:[{$nor:[{a:2}]}]} );
- ok( {$nor:[{$and:[{a:2}]}]} );
-
- ok( {$and:[{$and:[{$nor:[{a:2}]}]}]} );
- ok( {$and:[{$nor:[{$and:[{a:2}]}]}]} );
- ok( {$nor:[{$and:[{$and:[{a:2}]}]}]} );
-
- ok( {$nor:[{$and:[{$nor:[{a:1}]}]}]} );
-
+
+ ok({$and: [{a: 1}]});
+ ok({$nor: [{a: 2}]});
+
+ ok({$and: [{$and: [{a: 1}]}]});
+ ok({$nor: [{$nor: [{a: 1}]}]});
+
+ ok({$and: [{$nor: [{a: 2}]}]});
+ ok({$nor: [{$and: [{a: 2}]}]});
+
+ ok({$and: [{$and: [{$nor: [{a: 2}]}]}]});
+ ok({$and: [{$nor: [{$and: [{a: 2}]}]}]});
+ ok({$nor: [{$and: [{$and: [{a: 2}]}]}]});
+
+ ok({$nor: [{$and: [{$nor: [{a: 1}]}]}]});
+
};
test();
-t.ensureIndex( {a:1} );
+t.ensureIndex({a: 1});
test();
// Test an inequality base match.
test = function() {
-
- ok( {a:{$ne:2}} );
-
- ok( {$and:[{a:{$ne:2}}]} );
- ok( {$or:[{a:{$ne:2}}]} );
-
- ok( {$and:[{$and:[{a:{$ne:2}}]}]} );
- ok( {$or:[{$or:[{a:{$ne:2}}]}]} );
-
- ok( {$and:[{$or:[{a:{$ne:2}}]}]} );
- ok( {$or:[{$and:[{a:{$ne:2}}]}]} );
-
- ok( {$and:[{$and:[{$or:[{a:{$ne:2}}]}]}]} );
- ok( {$and:[{$or:[{$and:[{a:{$ne:2}}]}]}]} );
- ok( {$or:[{$and:[{$and:[{a:{$ne:2}}]}]}]} );
-
- ok( {$or:[{$and:[{$or:[{a:{$ne:2}}]}]}]} );
-
+
+ ok({a: {$ne: 2}});
+
+ ok({$and: [{a: {$ne: 2}}]});
+ ok({$or: [{a: {$ne: 2}}]});
+
+ ok({$and: [{$and: [{a: {$ne: 2}}]}]});
+ ok({$or: [{$or: [{a: {$ne: 2}}]}]});
+
+ ok({$and: [{$or: [{a: {$ne: 2}}]}]});
+ ok({$or: [{$and: [{a: {$ne: 2}}]}]});
+
+ ok({$and: [{$and: [{$or: [{a: {$ne: 2}}]}]}]});
+ ok({$and: [{$or: [{$and: [{a: {$ne: 2}}]}]}]});
+ ok({$or: [{$and: [{$and: [{a: {$ne: 2}}]}]}]});
+
+ ok({$or: [{$and: [{$or: [{a: {$ne: 2}}]}]}]});
+
// now test $nor
-
- ok( {$and:[{a:{$ne:2}}]} );
- ok( {$nor:[{a:{$ne:1}}]} );
-
- ok( {$and:[{$and:[{a:{$ne:2}}]}]} );
- ok( {$nor:[{$nor:[{a:{$ne:2}}]}]} );
-
- ok( {$and:[{$nor:[{a:{$ne:1}}]}]} );
- ok( {$nor:[{$and:[{a:{$ne:1}}]}]} );
-
- ok( {$and:[{$and:[{$nor:[{a:{$ne:1}}]}]}]} );
- ok( {$and:[{$nor:[{$and:[{a:{$ne:1}}]}]}]} );
- ok( {$nor:[{$and:[{$and:[{a:{$ne:1}}]}]}]} );
-
- ok( {$nor:[{$and:[{$nor:[{a:{$ne:2}}]}]}]} );
-
+
+ ok({$and: [{a: {$ne: 2}}]});
+ ok({$nor: [{a: {$ne: 1}}]});
+
+ ok({$and: [{$and: [{a: {$ne: 2}}]}]});
+ ok({$nor: [{$nor: [{a: {$ne: 2}}]}]});
+
+ ok({$and: [{$nor: [{a: {$ne: 1}}]}]});
+ ok({$nor: [{$and: [{a: {$ne: 1}}]}]});
+
+ ok({$and: [{$and: [{$nor: [{a: {$ne: 1}}]}]}]});
+ ok({$and: [{$nor: [{$and: [{a: {$ne: 1}}]}]}]});
+ ok({$nor: [{$and: [{$and: [{a: {$ne: 1}}]}]}]});
+
+ ok({$nor: [{$and: [{$nor: [{a: {$ne: 2}}]}]}]});
+
};
t.drop();
-t.save( {a:1} );
+t.save({a: 1});
test();
-t.ensureIndex( {a:1} );
+t.ensureIndex({a: 1});
test();
diff --git a/jstests/core/apitest_db.js b/jstests/core/apitest_db.js
index f2271649016..688baa8dd4f 100644
--- a/jstests/core/apitest_db.js
+++ b/jstests/core/apitest_db.js
@@ -2,40 +2,44 @@
* Tests for the db object enhancement
*/
-assert( "test" == db, "wrong database currently not test" );
+assert("test" == db, "wrong database currently not test");
-dd = function( x ){
- //print( x );
+dd = function(x) {
+ // print( x );
};
-dd( "a" );
+dd("a");
-
-dd( "b" );
+dd("b");
/*
* be sure the public collection API is complete
*/
-assert(db.createCollection , "createCollection" );
-assert(db.getProfilingLevel , "getProfilingLevel" );
-assert(db.setProfilingLevel , "setProfilingLevel" );
-assert(db.dbEval , "dbEval" );
-assert(db.group , "group" );
+assert(db.createCollection, "createCollection");
+assert(db.getProfilingLevel, "getProfilingLevel");
+assert(db.setProfilingLevel, "setProfilingLevel");
+assert(db.dbEval, "dbEval");
+assert(db.group, "group");
-dd( "c" );
+dd("c");
/*
* test createCollection
*/
-db.getCollection( "test" ).drop();
-db.getCollectionNames().forEach( function(x) { assert(x != "test"); });
+db.getCollection("test").drop();
+db.getCollectionNames().forEach(function(x) {
+ assert(x != "test");
+});
-dd( "d" );
+dd("d");
db.createCollection("test");
var found = false;
-db.getCollectionNames().forEach( function(x) { if (x == "test") found = true; });
+db.getCollectionNames().forEach(function(x) {
+ if (x == "test")
+ found = true;
+});
assert(found, "found test.test in collection infos");
// storageEngine in collection options must:
@@ -62,37 +66,41 @@ assert.commandWorked(db.createCollection('test', {storageEngine: validStorageEng
var collectionInfos = db.getCollectionInfos({name: 'test'});
assert.eq(1, collectionInfos.length, "'test' collection not created");
assert.eq('test', collectionInfos[0].name, "'test' collection not created");
-assert.docEq(validStorageEngineOptions, collectionInfos[0].options.storageEngine,
+assert.docEq(validStorageEngineOptions,
+ collectionInfos[0].options.storageEngine,
'storage engine options not found in listCommands result');
// The indexOptionDefaults must be a document that contains only a storageEngine field.
db.idxOptions.drop();
assert.commandFailed(db.createCollection('idxOptions', {indexOptionDefaults: 'not a document'}));
-assert.commandFailed(db.createCollection('idxOptions', {
- indexOptionDefaults: {unknownOption: true}
-}), 'created a collection with an unknown option to indexOptionDefaults');
+assert.commandFailed(db.createCollection('idxOptions',
+ {indexOptionDefaults: {unknownOption: true}}),
+ 'created a collection with an unknown option to indexOptionDefaults');
assert.commandWorked(db.createCollection('idxOptions', {indexOptionDefaults: {}}),
'should have been able to specify an empty object for indexOptionDefaults');
assert(db.idxOptions.drop());
-assert.commandWorked(db.createCollection('idxOptions', {
- indexOptionDefaults: {storageEngine: {}}
-}), 'should have been able to configure zero storage engines');
+assert.commandWorked(db.createCollection('idxOptions', {indexOptionDefaults: {storageEngine: {}}}),
+ 'should have been able to configure zero storage engines');
assert(db.idxOptions.drop());
// The storageEngine subdocument must configure only registered storage engines.
-assert.commandFailed(db.createCollection('idxOptions', {
- indexOptionDefaults: {storageEngine: {unknownStorageEngine: {}}}
-}), 'configured an unregistered storage engine');
+assert.commandFailed(
+ db.createCollection('idxOptions',
+ {indexOptionDefaults: {storageEngine: {unknownStorageEngine: {}}}}),
+ 'configured an unregistered storage engine');
// The storageEngine subdocument must contain valid storage engine options.
-assert.commandFailed(db.createCollection('idxOptions', {
- indexOptionDefaults: {storageEngine: invalidStorageEngineOptions}
-}), 'configured a storage engine with invalid options');
+assert.commandFailed(
+ db.createCollection('idxOptions',
+ {indexOptionDefaults: {storageEngine: invalidStorageEngineOptions}}),
+ 'configured a storage engine with invalid options');
// Tests that a non-active storage engine can be configured so long as it is registered.
if (db.serverBuildInfo().bits === 64) {
// wiredTiger is not a registered storage engine on 32-bit systems.
- var indexOptions = {storageEngine: {}};
+ var indexOptions = {
+ storageEngine: {}
+ };
if (storageEngineName === 'wiredTiger') {
indexOptions.storageEngine.mmapv1 = {};
} else {
@@ -104,9 +112,8 @@ if (db.serverBuildInfo().bits === 64) {
}
// Tests that the indexOptionDefaults are retrievable from the collection options.
-assert.commandWorked(db.createCollection('idxOptions', {
- indexOptionDefaults: {storageEngine: validStorageEngineOptions}
-}));
+assert.commandWorked(db.createCollection(
+ 'idxOptions', {indexOptionDefaults: {storageEngine: validStorageEngineOptions}}));
var collectionInfos = db.getCollectionInfos({name: 'idxOptions'});
assert.eq(1, collectionInfos.length, "'idxOptions' collection not created");
@@ -114,12 +121,12 @@ assert.docEq({storageEngine: validStorageEngineOptions},
collectionInfos[0].options.indexOptionDefaults,
'indexOptionDefaults were not applied: ' + tojson(collectionInfos));
-dd( "e" );
+dd("e");
/*
* profile level
- */
-
+ */
+
db.setProfilingLevel(0);
assert(db.getProfilingLevel() == 0, "prof level 0");
@@ -132,22 +139,18 @@ assert(db.getProfilingLevel() == 2, "p2");
db.setProfilingLevel(0);
assert(db.getProfilingLevel() == 0, "prof level 0");
-dd( "f" );
+dd("f");
asserted = false;
try {
db.setProfilingLevel(10);
assert(false);
-}
-catch (e) {
+} catch (e) {
asserted = true;
assert(e.dbSetProfilingException);
}
-assert( asserted, "should have asserted" );
-
-dd( "g" );
-
-
+assert(asserted, "should have asserted");
-assert.eq( "foo" , db.getSisterDB( "foo" ).getName() );
-assert.eq( "foo" , db.getSiblingDB( "foo" ).getName() );
+dd("g");
+assert.eq("foo", db.getSisterDB("foo").getName());
+assert.eq("foo", db.getSiblingDB("foo").getName());
diff --git a/jstests/core/apitest_dbcollection.js b/jstests/core/apitest_dbcollection.js
index d60778363b8..d542ad6d7e1 100644
--- a/jstests/core/apitest_dbcollection.js
+++ b/jstests/core/apitest_dbcollection.js
@@ -2,47 +2,45 @@
* Tests for the db collection
*/
-
-
/*
* test drop
*/
-db.getCollection( "test_db" ).drop();
-assert.eq(0, db.getCollection( "test_db" ).find().length(), "1");
+db.getCollection("test_db").drop();
+assert.eq(0, db.getCollection("test_db").find().length(), "1");
-db.getCollection( "test_db" ).save({a:1});
-assert.eq(1, db.getCollection( "test_db" ).find().length(), "2");
+db.getCollection("test_db").save({a: 1});
+assert.eq(1, db.getCollection("test_db").find().length(), "2");
-db.getCollection( "test_db" ).drop();
-assert.eq(0, db.getCollection( "test_db" ).find().length(), "3");
+db.getCollection("test_db").drop();
+assert.eq(0, db.getCollection("test_db").find().length(), "3");
/*
* test count
*/
-assert.eq(0, db.getCollection( "test_db" ).count(), "4");
-db.getCollection( "test_db" ).save({a:1});
-assert.eq(1, db.getCollection( "test_db" ).count(), "5");
+assert.eq(0, db.getCollection("test_db").count(), "4");
+db.getCollection("test_db").save({a: 1});
+assert.eq(1, db.getCollection("test_db").count(), "5");
for (i = 0; i < 100; i++) {
- db.getCollection( "test_db" ).save({a:1});
+ db.getCollection("test_db").save({a: 1});
}
-assert.eq(101, db.getCollection( "test_db" ).count(), "6");
-db.getCollection( "test_db" ).drop();
-assert.eq(0, db.getCollection( "test_db" ).count(), "7");
+assert.eq(101, db.getCollection("test_db").count(), "6");
+db.getCollection("test_db").drop();
+assert.eq(0, db.getCollection("test_db").count(), "7");
- /*
- * test validate
- */
+/*
+ * test validate
+ */
-db.getCollection( "test_db" ).drop();
-assert.eq(0, db.getCollection( "test_db" ).count(), "8");
+db.getCollection("test_db").drop();
+assert.eq(0, db.getCollection("test_db").count(), "8");
for (i = 0; i < 100; i++) {
- db.getCollection( "test_db" ).save({a:1});
+ db.getCollection("test_db").save({a: 1});
}
(function() {
- var validateResult = assert.commandWorked(db.getCollection( "test_db" ).validate());
+ var validateResult = assert.commandWorked(db.getCollection("test_db").validate());
// Extract validation results from mongos output if running in a sharded context.
if (jsTest.isMongos(db.getMongo())) {
// Sample mongos format:
@@ -73,7 +71,8 @@ for (i = 0; i < 100; i++) {
validateResult = result;
}
- assert.eq('test.test_db', validateResult.ns,
+ assert.eq('test.test_db',
+ validateResult.ns,
'incorrect namespace in db.collection.validate() result: ' + tojson(validateResult));
assert(validateResult.valid, 'collection validation failed');
assert.eq(100, validateResult.nrecords, 11);
@@ -83,82 +82,83 @@ for (i = 0; i < 100; i++) {
* test deleteIndex, deleteIndexes
*/
-db.getCollection( "test_db" ).drop();
-assert.eq(0, db.getCollection( "test_db" ).count(), "12");
-db.getCollection( "test_db" ).dropIndexes();
-assert.eq(0, db.getCollection( "test_db" ).getIndexes().length, "13");
+db.getCollection("test_db").drop();
+assert.eq(0, db.getCollection("test_db").count(), "12");
+db.getCollection("test_db").dropIndexes();
+assert.eq(0, db.getCollection("test_db").getIndexes().length, "13");
-db.getCollection( "test_db" ).save({a:10});
-assert.eq(1, db.getCollection( "test_db" ).getIndexes().length, "14");
+db.getCollection("test_db").save({a: 10});
+assert.eq(1, db.getCollection("test_db").getIndexes().length, "14");
-db.getCollection( "test_db" ).ensureIndex({a:1});
-db.getCollection( "test_db" ).save({a:10});
+db.getCollection("test_db").ensureIndex({a: 1});
+db.getCollection("test_db").save({a: 10});
-print( tojson( db.getCollection( "test_db" ).getIndexes() ) );
-assert.eq(2, db.getCollection( "test_db" ).getIndexes().length, "15");
+print(tojson(db.getCollection("test_db").getIndexes()));
+assert.eq(2, db.getCollection("test_db").getIndexes().length, "15");
-db.getCollection( "test_db" ).dropIndex({a:1});
-assert.eq(1, db.getCollection( "test_db" ).getIndexes().length, "16");
+db.getCollection("test_db").dropIndex({a: 1});
+assert.eq(1, db.getCollection("test_db").getIndexes().length, "16");
-db.getCollection( "test_db" ).save({a:10});
-db.getCollection( "test_db" ).ensureIndex({a:1});
-db.getCollection( "test_db" ).save({a:10});
+db.getCollection("test_db").save({a: 10});
+db.getCollection("test_db").ensureIndex({a: 1});
+db.getCollection("test_db").save({a: 10});
-assert.eq(2, db.getCollection( "test_db" ).getIndexes().length, "17");
+assert.eq(2, db.getCollection("test_db").getIndexes().length, "17");
-db.getCollection( "test_db" ).dropIndex("a_1");
-assert.eq(1, db.getCollection( "test_db" ).getIndexes().length, "18");
+db.getCollection("test_db").dropIndex("a_1");
+assert.eq(1, db.getCollection("test_db").getIndexes().length, "18");
-db.getCollection( "test_db" ).save({a:10, b:11});
-db.getCollection( "test_db" ).ensureIndex({a:1});
-db.getCollection( "test_db" ).ensureIndex({b:1});
-db.getCollection( "test_db" ).save({a:10, b:12});
+db.getCollection("test_db").save({a: 10, b: 11});
+db.getCollection("test_db").ensureIndex({a: 1});
+db.getCollection("test_db").ensureIndex({b: 1});
+db.getCollection("test_db").save({a: 10, b: 12});
-assert.eq(3, db.getCollection( "test_db" ).getIndexes().length, "19");
+assert.eq(3, db.getCollection("test_db").getIndexes().length, "19");
-db.getCollection( "test_db" ).dropIndex({b:1});
-assert.eq(2, db.getCollection( "test_db" ).getIndexes().length, "20");
-db.getCollection( "test_db" ).dropIndex({a:1});
-assert.eq(1, db.getCollection( "test_db" ).getIndexes().length, "21");
+db.getCollection("test_db").dropIndex({b: 1});
+assert.eq(2, db.getCollection("test_db").getIndexes().length, "20");
+db.getCollection("test_db").dropIndex({a: 1});
+assert.eq(1, db.getCollection("test_db").getIndexes().length, "21");
-db.getCollection( "test_db" ).save({a:10, b:11});
-db.getCollection( "test_db" ).ensureIndex({a:1});
-db.getCollection( "test_db" ).ensureIndex({b:1});
-db.getCollection( "test_db" ).save({a:10, b:12});
+db.getCollection("test_db").save({a: 10, b: 11});
+db.getCollection("test_db").ensureIndex({a: 1});
+db.getCollection("test_db").ensureIndex({b: 1});
+db.getCollection("test_db").save({a: 10, b: 12});
-assert.eq(3, db.getCollection( "test_db" ).getIndexes().length, "22");
+assert.eq(3, db.getCollection("test_db").getIndexes().length, "22");
-db.getCollection( "test_db" ).dropIndexes();
-assert.eq(1, db.getCollection( "test_db" ).getIndexes().length, "23");
+db.getCollection("test_db").dropIndexes();
+assert.eq(1, db.getCollection("test_db").getIndexes().length, "23");
-db.getCollection( "test_db" ).find();
+db.getCollection("test_db").find();
-db.getCollection( "test_db" ).drop();
-assert.eq(0, db.getCollection( "test_db" ).getIndexes().length, "24");
+db.getCollection("test_db").drop();
+assert.eq(0, db.getCollection("test_db").getIndexes().length, "24");
/*
* stats()
*/
- (function() {
+(function() {
var t = db.apttest_dbcollection;
// Non-existent collection.
t.drop();
- assert.commandFailed(t.stats(),
- 'db.collection.stats() should fail on non-existent collection');
+ assert.commandFailed(t.stats(), 'db.collection.stats() should fail on non-existent collection');
// scale - passed to stats() as sole numerical argument or part of an options object.
t.drop();
- assert.commandWorked(db.createCollection(t.getName(), {capped: true, size: 10*1024*1024}));
- var collectionStats = assert.commandWorked(t.stats(1024*1024));
- assert.eq(10, collectionStats.maxSize,
+ assert.commandWorked(db.createCollection(t.getName(), {capped: true, size: 10 * 1024 * 1024}));
+ var collectionStats = assert.commandWorked(t.stats(1024 * 1024));
+ assert.eq(10,
+ collectionStats.maxSize,
'db.collection.stats(scale) - capped collection size scaled incorrectly: ' +
- tojson(collectionStats));
- var collectionStats = assert.commandWorked(t.stats({scale: 1024*1024}));
- assert.eq(10, collectionStats.maxSize,
+ tojson(collectionStats));
+ var collectionStats = assert.commandWorked(t.stats({scale: 1024 * 1024}));
+ assert.eq(10,
+ collectionStats.maxSize,
'db.collection.stats({scale: N}) - capped collection size scaled incorrectly: ' +
- tojson(collectionStats));
+ tojson(collectionStats));
// indexDetails - If true, includes 'indexDetails' field in results. Default: false.
t.drop();
@@ -167,47 +167,54 @@ assert.eq(0, db.getCollection( "test_db" ).getIndexes().length, "24");
collectionStats = assert.commandWorked(t.stats());
assert(!collectionStats.hasOwnProperty('indexDetails'),
'unexpected indexDetails found in db.collection.stats() result: ' +
- tojson(collectionStats));
+ tojson(collectionStats));
collectionStats = assert.commandWorked(t.stats({indexDetails: false}));
assert(!collectionStats.hasOwnProperty('indexDetails'),
'unexpected indexDetails found in db.collection.stats({indexDetails: true}) result: ' +
- tojson(collectionStats));
+ tojson(collectionStats));
collectionStats = assert.commandWorked(t.stats({indexDetails: true}));
assert(collectionStats.hasOwnProperty('indexDetails'),
'indexDetails missing from db.collection.stats({indexDetails: true}) result: ' +
- tojson(collectionStats));
+ tojson(collectionStats));
// Returns index name.
function getIndexName(indexKey) {
var indexes = t.getIndexes().filter(function(doc) {
return friendlyEqual(doc.key, indexKey);
});
- assert.eq(1, indexes.length, tojson(indexKey) + ' not found in getIndexes() result: ' +
- tojson(t.getIndexes()));
+ assert.eq(
+ 1,
+ indexes.length,
+ tojson(indexKey) + ' not found in getIndexes() result: ' + tojson(t.getIndexes()));
return indexes[0].name;
}
function checkIndexDetails(options, indexName) {
var collectionStats = assert.commandWorked(t.stats(options));
assert(collectionStats.hasOwnProperty('indexDetails'),
- 'indexDetails missing from ' + 'db.collection.stats(' + tojson(options) +
- ') result: ' + tojson(collectionStats));
+ 'indexDetails missing from ' +
+ 'db.collection.stats(' + tojson(options) + ') result: ' +
+ tojson(collectionStats));
// Currently, indexDetails is only supported with WiredTiger.
var storageEngine = jsTest.options().storageEngine;
if (storageEngine && storageEngine !== 'wiredTiger') {
return;
}
- assert.eq(1, Object.keys(collectionStats.indexDetails).length,
+ assert.eq(1,
+ Object.keys(collectionStats.indexDetails).length,
'indexDetails must have exactly one entry');
assert(collectionStats.indexDetails[indexName],
indexName + ' missing from indexDetails: ' + tojson(collectionStats.indexDetails));
- assert.neq(0, Object.keys(collectionStats.indexDetails[indexName]).length,
+ assert.neq(0,
+ Object.keys(collectionStats.indexDetails[indexName]).length,
indexName + ' exists in indexDetails but contains no information: ' +
- tojson(collectionStats));
+ tojson(collectionStats));
}
// indexDetailsKey - show indexDetails results for this index key only.
- var indexKey = {a: 1};
+ var indexKey = {
+ a: 1
+ };
var indexName = getIndexName(indexKey);
checkIndexDetails({indexDetails: true, indexDetailsKey: indexKey}, indexName);
@@ -218,12 +225,13 @@ assert.eq(0, db.getCollection( "test_db" ).getIndexes().length, "24");
var error = assert.throws(function() {
t.stats({indexDetails: true, indexDetailsKey: indexKey, indexDetailsName: indexName});
}, null, 'indexDetailsKey and indexDetailsName cannot be used at the same time');
- assert.eq(Error, error.constructor,
+ assert.eq(Error,
+ error.constructor,
'db.collection.stats() failed when both indexDetailsKey and indexDetailsName ' +
- 'are used but with incorrect error type');
+ 'are used but with incorrect error type');
t.drop();
- }());
+}());
/*
* test db.collection.totalSize()
@@ -236,19 +244,24 @@ assert.eq(0, db.getCollection( "test_db" ).getIndexes().length, "24");
t.drop();
var failedStats = assert.commandFailed(t.stats());
assert.eq(failedStats.storageSize, t.storageSize());
- assert.eq(undefined, t.storageSize(),
+ assert.eq(undefined,
+ t.storageSize(),
'db.collection.storageSize() on empty collection should return undefined');
assert.eq(failedStats.totalIndexSize, t.totalIndexSize());
- assert.eq(undefined, t.totalIndexSize(),
+ assert.eq(undefined,
+ t.totalIndexSize(),
'db.collection.totalIndexSize() on empty collection should return undefined');
- assert.eq(undefined, t.totalSize(),
+ assert.eq(undefined,
+ t.totalSize(),
'db.collection.totalSize() on empty collection should return undefined');
t.save({a: 1});
var stats = assert.commandWorked(t.stats());
- assert.neq(undefined, t.storageSize(),
+ assert.neq(undefined,
+ t.storageSize(),
'db.collection.storageSize() cannot be undefined on a non-empty collection');
- assert.neq(undefined, t.totalIndexSize(),
+ assert.neq(undefined,
+ t.totalIndexSize(),
'db.collection.totalIndexSize() cannot be undefined on a non-empty collection');
if (db.isMaster().msg !== 'isdbgrid' && db.serverStatus().storageEngine.name === 'mmapv1') {
@@ -256,7 +269,8 @@ assert.eq(0, db.getCollection( "test_db" ).getIndexes().length, "24");
// collection.
assert.eq(stats.storageSize, t.storageSize());
assert.eq(stats.totalIndexSize, t.totalIndexSize());
- assert.eq(t.storageSize() + t.totalIndexSize(), t.totalSize(),
+ assert.eq(t.storageSize() + t.totalIndexSize(),
+ t.totalSize(),
'incorrect db.collection.totalSize() on a non-empty collection');
}
diff --git a/jstests/core/apply_ops1.js b/jstests/core/apply_ops1.js
index 6129c202d59..8a19caa9f23 100644
--- a/jstests/core/apply_ops1.js
+++ b/jstests/core/apply_ops1.js
@@ -57,9 +57,8 @@
// Empty 'ns' field value in operation type other than 'n'.
assert.commandFailed(
- db.adminCommand({applyOps: [{op: 'c', ns: ''}]}),
- 'applyOps should fail on non-"n" operation type with empty "ns" field value'
- );
+ db.adminCommand({applyOps: [{op: 'c', ns: ''}]}),
+ 'applyOps should fail on non-"n" operation type with empty "ns" field value');
// Missing 'o' field value in an operation of type 'i' on 'system.indexes' collection.
assert.commandFailedWithCode(
@@ -75,76 +74,100 @@
// Missing 'ns' field in index spec.
assert.commandFailedWithCode(
- db.adminCommand({applyOps: [{op: 'i', ns: db.getName() + '.system.indexes', o: {
- key: {a: 1},
- name: 'a_1',
- }}]}),
+ db.adminCommand({
+ applyOps: [{
+ op: 'i',
+ ns: db.getName() + '.system.indexes',
+ o: {
+ key: {a: 1},
+ name: 'a_1',
+ }
+ }]
+ }),
ErrorCodes.NoSuchKey,
'applyOps should fail on system.indexes insert operation with missing index namespace');
// Non-string 'ns' field in index spec.
assert.commandFailedWithCode(
- db.adminCommand({applyOps: [{op: 'i', ns: db.getName() + '.system.indexes', o: {
- ns: 12345,
- key: {a: 1},
- name: 'a_1',
- }}]}),
+ db.adminCommand({
+ applyOps: [{
+ op: 'i',
+ ns: db.getName() + '.system.indexes',
+ o: {
+ ns: 12345,
+ key: {a: 1},
+ name: 'a_1',
+ }
+ }]
+ }),
ErrorCodes.TypeMismatch,
'applyOps should fail on system.indexes insert operation with non-string index namespace');
// Invalid 'ns' field in index spec.
assert.commandFailedWithCode(
- db.adminCommand({applyOps: [{op: 'i', ns: db.getName() + '.system.indexes', o: {
- ns: 'invalid_namespace',
- key: {a: 1},
- name: 'a_1',
- }}]}),
+ db.adminCommand({
+ applyOps: [{
+ op: 'i',
+ ns: db.getName() + '.system.indexes',
+ o: {
+ ns: 'invalid_namespace',
+ key: {a: 1},
+ name: 'a_1',
+ }
+ }]
+ }),
ErrorCodes.InvalidNamespace,
'applyOps should fail on system.indexes insert operation with invalid index namespace');
// Inconsistent database name in index spec namespace.
assert.commandFailedWithCode(
- db.adminCommand({applyOps: [{op: 'i', ns: db.getName() + '.system.indexes', o: {
- ns: 'baddbprefix' + t.getFullName(),
- key: {a: 1},
- name: 'a_1',
- }}]}),
+ db.adminCommand({
+ applyOps: [{
+ op: 'i',
+ ns: db.getName() + '.system.indexes',
+ o: {
+ ns: 'baddbprefix' + t.getFullName(),
+ key: {a: 1},
+ name: 'a_1',
+ }
+ }]
+ }),
ErrorCodes.InvalidNamespace,
'applyOps should fail on system.indexes insert operation with index namespace containing ' +
- 'inconsistent database name');
+ 'inconsistent database name');
// Valid 'ns' field value in unknown operation type 'x'.
assert.commandFailed(
- db.adminCommand({applyOps: [{op: 'x', ns: t.getFullName()}]}),
- 'applyOps should fail on unknown operation type "x" with valid "ns" value'
- );
-
- assert.eq(0, t.find().count() , "Non-zero amount of documents in collection to start");
- assert.commandFailed(db.adminCommand(
- {applyOps: [{"op": "i", "ns": t.getFullName(), "o": {_id: 5, x: 17}}]}
- ),
+ db.adminCommand({applyOps: [{op: 'x', ns: t.getFullName()}]}),
+ 'applyOps should fail on unknown operation type "x" with valid "ns" value');
+
+ assert.eq(0, t.find().count(), "Non-zero amount of documents in collection to start");
+ assert.commandFailed(
+ db.adminCommand({applyOps: [{"op": "i", "ns": t.getFullName(), "o": {_id: 5, x: 17}}]}),
"Applying an insert operation on a non-existent collection should fail");
assert.commandWorked(db.createCollection(t.getName()));
- var a = db.adminCommand(
- {applyOps: [{"op": "i", "ns": t.getFullName(), "o": {_id: 5, x: 17}}]}
- );
- assert.eq(1, t.find().count() , "Valid insert failed");
+ var a = db.adminCommand({applyOps: [{"op": "i", "ns": t.getFullName(), "o": {_id: 5, x: 17}}]});
+ assert.eq(1, t.find().count(), "Valid insert failed");
assert.eq(true, a.results[0], "Bad result value for valid insert");
- a = assert.commandWorked(db.adminCommand(
- {applyOps: [{"op": "i", "ns": t.getFullName(), "o": {_id: 5, x: 17}}]}
- ));
- assert.eq(1, t.find().count() , "Duplicate insert failed");
+ a = assert.commandWorked(
+ db.adminCommand({applyOps: [{"op": "i", "ns": t.getFullName(), "o": {_id: 5, x: 17}}]}));
+ assert.eq(1, t.find().count(), "Duplicate insert failed");
assert.eq(true, a.results[0], "Bad result value for duplicate insert");
- var o = {_id: 5, x: 17};
- assert.eq(o , t.findOne() , "Mismatching document inserted.");
+ var o = {
+ _id: 5,
+ x: 17
+ };
+ assert.eq(o, t.findOne(), "Mismatching document inserted.");
- var res = db.runCommand({applyOps: [
- {op: "u", ns: t.getFullName(), o2: { _id : 5 }, o: {$inc: {x: 1}}},
- {op: "u", ns: t.getFullName(), o2: { _id : 5 }, o: {$inc: {x: 1}}}
- ]});
+ var res = db.runCommand({
+ applyOps: [
+ {op: "u", ns: t.getFullName(), o2: {_id: 5}, o: {$inc: {x: 1}}},
+ {op: "u", ns: t.getFullName(), o2: {_id: 5}, o: {$inc: {x: 1}}}
+ ]
+ });
o.x++;
o.x++;
@@ -154,14 +177,14 @@
assert.eq(true, res.results[0], "Bad result value for valid update");
assert.eq(true, res.results[1], "Bad result value for valid update");
- //preCondition fully matches
- res = db.runCommand({applyOps:
- [
- {op: "u", ns: t.getFullName(), o2: {_id : 5}, o: {$inc: {x :1}}},
- {op: "u", ns: t.getFullName(), o2: {_id : 5}, o: {$inc: {x :1}}}
- ],
- preCondition: [{ns : t.getFullName(), q: {_id: 5}, res:{x: 19}}]
- });
+ // preCondition fully matches
+ res = db.runCommand({
+ applyOps: [
+ {op: "u", ns: t.getFullName(), o2: {_id: 5}, o: {$inc: {x: 1}}},
+ {op: "u", ns: t.getFullName(), o2: {_id: 5}, o: {$inc: {x: 1}}}
+ ],
+ preCondition: [{ns: t.getFullName(), q: {_id: 5}, res: {x: 19}}]
+ });
o.x++;
o.x++;
@@ -171,68 +194,78 @@
assert.eq(true, res.results[0], "Bad result value for valid update");
assert.eq(true, res.results[1], "Bad result value for valid update");
- //preCondition doesn't match ns
- res = db.runCommand({applyOps:
- [
- {op: "u", ns: t.getFullName(), o2: {_id: 5}, o: {$inc: {x: 1}}},
- {op: "u", ns: t.getFullName(), o2: {_id: 5}, o: {$inc: {x: 1}}}
- ],
- preCondition: [{ns: "foo.otherName", q: {_id: 5}, res: {x: 21}}]
- });
+ // preCondition doesn't match ns
+ res = db.runCommand({
+ applyOps: [
+ {op: "u", ns: t.getFullName(), o2: {_id: 5}, o: {$inc: {x: 1}}},
+ {op: "u", ns: t.getFullName(), o2: {_id: 5}, o: {$inc: {x: 1}}}
+ ],
+ preCondition: [{ns: "foo.otherName", q: {_id: 5}, res: {x: 21}}]
+ });
assert.eq(o, t.findOne(), "preCondition didn't match, but ops were still applied");
- //preCondition doesn't match query
- res = db.runCommand({applyOps:
- [
- {op: "u", ns: t.getFullName(), o2: {_id: 5}, o: {$inc: {x : 1}}},
- {op: "u", ns: t.getFullName(), o2: {_id: 5}, o: {$inc: {x : 1}}}
- ],
- preCondition: [{ns: t.getFullName(), q: {_id: 5}, res: {x: 19}}]
- });
+ // preCondition doesn't match query
+ res = db.runCommand({
+ applyOps: [
+ {op: "u", ns: t.getFullName(), o2: {_id: 5}, o: {$inc: {x: 1}}},
+ {op: "u", ns: t.getFullName(), o2: {_id: 5}, o: {$inc: {x: 1}}}
+ ],
+ preCondition: [{ns: t.getFullName(), q: {_id: 5}, res: {x: 19}}]
+ });
assert.eq(o, t.findOne(), "preCondition didn't match, but ops were still applied");
- res = db.runCommand({applyOps:
- [
- {op: "u", ns: t.getFullName(), o2: {_id: 5}, o: {$inc: {x : 1}}},
- {op: "u", ns: t.getFullName(), o2: {_id: 6}, o: {$inc: {x : 1}}}
- ]
- });
+ res = db.runCommand({
+ applyOps: [
+ {op: "u", ns: t.getFullName(), o2: {_id: 5}, o: {$inc: {x: 1}}},
+ {op: "u", ns: t.getFullName(), o2: {_id: 6}, o: {$inc: {x: 1}}}
+ ]
+ });
assert.eq(true, res.results[0], "Valid update failed");
assert.eq(true, res.results[1], "Valid update failed");
// Foreground index build.
res = assert.commandWorked(db.adminCommand({
- applyOps: [{"op": "i", "ns": db.getName() + ".system.indexes", "o": {
- ns: t.getFullName(),
- key: {a: 1},
- name: "a_1",
- }
- }]}));
+ applyOps: [{
+ "op": "i",
+ "ns": db.getName() + ".system.indexes",
+ "o": {
+ ns: t.getFullName(),
+ key: {a: 1},
+ name: "a_1",
+ }
+ }]
+ }));
assert.eq(1, res.applied, "Incorrect number of operations applied");
assert.eq(true, res.results[0], "Foreground index creation failed");
res = t.getIndexes();
- assert.eq(
- 1,
- res.filter(function(element, index, array) {return element.name == 'a_1';}).length,
- 'Foreground index not found in listIndexes result: ' + tojson(res));
+ assert.eq(1,
+ res.filter(function(element, index, array) {
+ return element.name == 'a_1';
+ }).length,
+ 'Foreground index not found in listIndexes result: ' + tojson(res));
// Background indexes are created in the foreground when processed by applyOps.
res = assert.commandWorked(db.adminCommand({
- applyOps: [{"op": "i", "ns": db.getName() + ".system.indexes", "o": {
- ns: t.getFullName(),
- key: {b: 1},
- name: "b_1",
- background: true,
- }
- }]}));
+ applyOps: [{
+ "op": "i",
+ "ns": db.getName() + ".system.indexes",
+ "o": {
+ ns: t.getFullName(),
+ key: {b: 1},
+ name: "b_1",
+ background: true,
+ }
+ }]
+ }));
assert.eq(1, res.applied, "Incorrect number of operations applied");
assert.eq(true, res.results[0], "Background index creation failed");
res = t.getIndexes();
- assert.eq(
- 1,
- res.filter(function(element, index, array) {return element.name == 'b_1';}).length,
- 'Background index not found in listIndexes result: ' + tojson(res));
+ assert.eq(1,
+ res.filter(function(element, index, array) {
+ return element.name == 'b_1';
+ }).length,
+ 'Background index not found in listIndexes result: ' + tojson(res));
})();
diff --git a/jstests/core/apply_ops2.js b/jstests/core/apply_ops2.js
index 1a5923c3465..bf804214846 100644
--- a/jstests/core/apply_ops2.js
+++ b/jstests/core/apply_ops2.js
@@ -1,70 +1,56 @@
-//Test applyops upsert flag SERVER-7452
+// Test applyops upsert flag SERVER-7452
var t = db.apply_ops2;
t.drop();
assert.eq(0, t.find().count(), "test collection not empty");
-t.insert({_id:1, x:"init"});
+t.insert({_id: 1, x: "init"});
-//alwaysUpsert = true
+// alwaysUpsert = true
print("Testing applyOps with alwaysUpsert = true");
-var res = db.runCommand({ applyOps: [
- {
- op: "u",
- ns: t.getFullName(),
- o2 : { _id: 1 },
- o: { $set: { x: "upsert=true existing" }}
- },
- {
- op: "u",
- ns: t.getFullName(),
- o2: { _id: 2 },
- o: { $set : { x: "upsert=true non-existing" }}
- }], alwaysUpsert: true });
+var res = db.runCommand({
+ applyOps: [
+ {op: "u", ns: t.getFullName(), o2: {_id: 1}, o: {$set: {x: "upsert=true existing"}}},
+ {op: "u", ns: t.getFullName(), o2: {_id: 2}, o: {$set: {x: "upsert=true non-existing"}}}
+ ],
+ alwaysUpsert: true
+});
assert.eq(true, res.results[0], "upsert = true, existing doc update failed");
assert.eq(true, res.results[1], "upsert = true, nonexisting doc not upserted");
assert.eq(2, t.find().count(), "2 docs expected after upsert");
-//alwaysUpsert = false
+// alwaysUpsert = false
print("Testing applyOps with alwaysUpsert = false");
-res = db.runCommand({ applyOps: [
- {
- op: "u",
- ns: t.getFullName(),
- o2: { _id: 1 },
- o: { $set : { x: "upsert=false existing" }}
- },
- {
- op: "u",
- ns: t.getFullName(),
- o2: { _id: 3 },
- o: { $set: { x: "upsert=false non-existing" }}
- }], alwaysUpsert: false });
+res = db.runCommand({
+ applyOps: [
+ {op: "u", ns: t.getFullName(), o2: {_id: 1}, o: {$set: {x: "upsert=false existing"}}},
+ {op: "u", ns: t.getFullName(), o2: {_id: 3}, o: {$set: {x: "upsert=false non-existing"}}}
+ ],
+ alwaysUpsert: false
+});
assert.eq(true, res.results[0], "upsert = false, existing doc update failed");
assert.eq(false, res.results[1], "upsert = false, nonexisting doc upserted");
assert.eq(2, t.find().count(), "2 docs expected after upsert failure");
-//alwaysUpsert not specified, should default to true
+// alwaysUpsert not specified, should default to true
print("Testing applyOps with default alwaysUpsert");
-res = db.runCommand({ applyOps: [
- {
- op: "u",
- ns: t.getFullName(),
- o2: { _id: 1 },
- o: { $set: { x: "upsert=default existing" }}
- },
- {
- op: "u",
- ns: t.getFullName(),
- o2: { _id: 4 },
- o: { $set: { x: "upsert=defaults non-existing" }}
- }]});
+res = db.runCommand({
+ applyOps: [
+ {op: "u", ns: t.getFullName(), o2: {_id: 1}, o: {$set: {x: "upsert=default existing"}}},
+ {
+ op: "u",
+ ns: t.getFullName(),
+ o2: {_id: 4},
+ o: {$set: {x: "upsert=defaults non-existing"}}
+ }
+ ]
+});
assert.eq(true, res.results[0], "default upsert, existing doc update failed");
assert.eq(true, res.results[1], "default upsert, nonexisting doc not upserted");
diff --git a/jstests/core/apply_ops_dups.js b/jstests/core/apply_ops_dups.js
index 9d8dfb8dc0f..bdca02a605c 100644
--- a/jstests/core/apply_ops_dups.js
+++ b/jstests/core/apply_ops_dups.js
@@ -4,23 +4,28 @@
t.drop();
// Check that duplicate _id fields don't cause an error
- assert.writeOK(t.insert({_id:0, x:1}));
- assert.commandWorked(t.createIndex({x:1}, {unique:true}));
- var a = assert.commandWorked(db.adminCommand(
- {applyOps: [{"op": "i", "ns": t.getFullName(), "o": {_id: 5, x: -1}},
- {"op": "i", "ns": t.getFullName(), "o": {_id: 5, x: 0}}]}
- ));
+ assert.writeOK(t.insert({_id: 0, x: 1}));
+ assert.commandWorked(t.createIndex({x: 1}, {unique: true}));
+ var a = assert.commandWorked(db.adminCommand({
+ applyOps: [
+ {"op": "i", "ns": t.getFullName(), "o": {_id: 5, x: -1}},
+ {"op": "i", "ns": t.getFullName(), "o": {_id: 5, x: 0}}
+ ]
+ }));
printjson(a);
printjson(t.find().toArray());
- assert.eq(2, t.find().count() , "Invalid insert worked");
+ assert.eq(2, t.find().count(), "Invalid insert worked");
assert.eq(true, a.results[0], "Valid insert was rejected");
assert.eq(true, a.results[1], "Insert should have not failed (but should be ignored");
printjson(t.find().toArray());
// Check that dups on non-id cause errors
- var a = assert.commandFailedWithCode(db.adminCommand(
- {applyOps: [{"op": "i", "ns": t.getFullName(), "o": {_id: 1, x: 0}},
- {"op": "i", "ns": t.getFullName(), "o": {_id: 2, x: 1}}]}
- ), 11000 /*DuplicateKey*/);
- assert.eq(2, t.find().count() , "Invalid insert worked");
+ var a = assert.commandFailedWithCode(db.adminCommand({
+ applyOps: [
+ {"op": "i", "ns": t.getFullName(), "o": {_id: 1, x: 0}},
+ {"op": "i", "ns": t.getFullName(), "o": {_id: 2, x: 1}}
+ ]
+ }),
+ 11000 /*DuplicateKey*/);
+ assert.eq(2, t.find().count(), "Invalid insert worked");
})();
diff --git a/jstests/core/array1.js b/jstests/core/array1.js
index 3a27feb028a..8d6be81ca1f 100644
--- a/jstests/core/array1.js
+++ b/jstests/core/array1.js
@@ -1,14 +1,16 @@
t = db.array1;
t.drop();
-x = { a : [ 1 , 2 ] };
+x = {
+ a: [1, 2]
+};
-t.save( { a : [ [1,2] ] } );
-assert.eq( 1 , t.find( x ).count() , "A" );
+t.save({a: [[1, 2]]});
+assert.eq(1, t.find(x).count(), "A");
-t.save( x );
+t.save(x);
delete x._id;
-assert.eq( 2 , t.find( x ).count() , "B" );
+assert.eq(2, t.find(x).count(), "B");
-t.ensureIndex( { a : 1 } );
-assert.eq( 2 , t.find( x ).count() , "C" ); // TODO SERVER-146
+t.ensureIndex({a: 1});
+assert.eq(2, t.find(x).count(), "C"); // TODO SERVER-146
diff --git a/jstests/core/array3.js b/jstests/core/array3.js
index 8b024bd3a0c..42acdfb6d3e 100644
--- a/jstests/core/array3.js
+++ b/jstests/core/array3.js
@@ -1,8 +1,7 @@
-assert.eq( 5 , Array.sum( [ 1 , 4 ] ), "A" );
-assert.eq( 2.5 , Array.avg( [ 1 , 4 ] ), "B" );
-
-arr = [ 2 , 4 , 4 , 4 , 5 , 5 , 7 , 9 ];
-assert.eq( 5 , Array.avg( arr ) , "C" );
-assert.eq( 2 , Array.stdDev( arr ) , "D" );
+assert.eq(5, Array.sum([1, 4]), "A");
+assert.eq(2.5, Array.avg([1, 4]), "B");
+arr = [2, 4, 4, 4, 5, 5, 7, 9];
+assert.eq(5, Array.avg(arr), "C");
+assert.eq(2, Array.stdDev(arr), "D");
diff --git a/jstests/core/array4.js b/jstests/core/array4.js
index 1053e160f11..c6fe1599880 100644
--- a/jstests/core/array4.js
+++ b/jstests/core/array4.js
@@ -3,9 +3,11 @@ t = db.array4;
t.drop();
t.insert({"a": ["1", "2", "3"]});
-t.insert({"a" : ["2", "1"]});
+t.insert({"a": ["2", "1"]});
-var x = {'a.0' : /1/};
+var x = {
+ 'a.0': /1/
+};
assert.eq(t.count(x), 1);
@@ -14,17 +16,19 @@ assert.eq(t.findOne(x).a[1], 2);
t.drop();
-t.insert({"a" : {"0" : "1"}});
-t.insert({"a" : ["2", "1"]});
+t.insert({"a": {"0": "1"}});
+t.insert({"a": ["2", "1"]});
assert.eq(t.count(x), 1);
assert.eq(t.findOne(x).a[0], 1);
t.drop();
-t.insert({"a" : ["0", "1", "2", "3", "4", "5", "6", "1", "1", "1", "2", "3", "2", "1"]});
-t.insert({"a" : ["2", "1"]});
+t.insert({"a": ["0", "1", "2", "3", "4", "5", "6", "1", "1", "1", "2", "3", "2", "1"]});
+t.insert({"a": ["2", "1"]});
-x = {"a.12" : /2/};
+x = {
+ "a.12": /2/
+};
assert.eq(t.count(x), 1);
assert.eq(t.findOne(x).a[0], 0);
diff --git a/jstests/core/array_match1.js b/jstests/core/array_match1.js
index 194ebcb85c3..9923677b8df 100644
--- a/jstests/core/array_match1.js
+++ b/jstests/core/array_match1.js
@@ -2,30 +2,30 @@
t = db.array_match1;
t.drop();
-t.insert( { _id : 1 , a : [ 5 , 5 ] } );
-t.insert( { _id : 2 , a : [ 6 , 6 ] } );
-t.insert( { _id : 3 , a : [ 5 , 5 ] } );
+t.insert({_id: 1, a: [5, 5]});
+t.insert({_id: 2, a: [6, 6]});
+t.insert({_id: 3, a: [5, 5]});
-function test( f , m ){
+function test(f, m) {
var q = {};
- q[f] = [5,5];
- assert.eq( 2 , t.find( q ).itcount() , m + "1" );
+ q[f] = [5, 5];
+ assert.eq(2, t.find(q).itcount(), m + "1");
- q[f] = [6,6];
- assert.eq( 1 , t.find( q ).itcount() , m + "2" );
+ q[f] = [6, 6];
+ assert.eq(1, t.find(q).itcount(), m + "2");
}
-test( "a" , "A" );
-t.ensureIndex( { a : 1 } );
-test( "a" , "B" );
+test("a", "A");
+t.ensureIndex({a: 1});
+test("a", "B");
t.drop();
-t.insert( { _id : 1 , a : { b : [ 5 , 5 ] } } );
-t.insert( { _id : 2 , a : { b : [ 6 , 6 ] } } );
-t.insert( { _id : 3 , a : { b : [ 5 , 5 ] } } );
+t.insert({_id: 1, a: {b: [5, 5]}});
+t.insert({_id: 2, a: {b: [6, 6]}});
+t.insert({_id: 3, a: {b: [5, 5]}});
-test( "a.b" , "C" );
-t.ensureIndex( { a : 1 } );
-test( "a.b" , "D" );
+test("a.b", "C");
+t.ensureIndex({a: 1});
+test("a.b", "D");
diff --git a/jstests/core/array_match2.js b/jstests/core/array_match2.js
index d254b0a3fdd..44cbfe33941 100644
--- a/jstests/core/array_match2.js
+++ b/jstests/core/array_match2.js
@@ -2,19 +2,19 @@
t = db.jstests_array_match2;
t.drop();
-t.save( {a:[{1:4},5]} );
+t.save({a: [{1: 4}, 5]});
// When the array index is the last field, both of these match types work.
-assert.eq( 1, t.count( {'a.1':4} ) );
-assert.eq( 1, t.count( {'a.1':5} ) );
+assert.eq(1, t.count({'a.1': 4}));
+assert.eq(1, t.count({'a.1': 5}));
t.remove({});
// When the array index is not the last field, only one of the match types works.
-t.save( {a:[{1:{foo:4}},{foo:5}]} );
-assert.eq( 1, t.count( {'a.1.foo':4} ) );
-assert.eq( 1, t.count( {'a.1.foo':5} ) );
+t.save({a: [{1: {foo: 4}}, {foo: 5}]});
+assert.eq(1, t.count({'a.1.foo': 4}));
+assert.eq(1, t.count({'a.1.foo': 5}));
// Same issue with the $exists operator
t.remove({});
-t.save( {a:[{1:{foo:4}},{}]} );
-assert.eq( 1, t.count( {'a.1':{$exists:true}} ) );
-assert.eq( 1, t.count( {'a.1.foo':{$exists:true}} ) );
+t.save({a: [{1: {foo: 4}}, {}]});
+assert.eq(1, t.count({'a.1': {$exists: true}}));
+assert.eq(1, t.count({'a.1.foo': {$exists: true}}));
diff --git a/jstests/core/array_match3.js b/jstests/core/array_match3.js
index c8653430770..837341afc8a 100644
--- a/jstests/core/array_match3.js
+++ b/jstests/core/array_match3.js
@@ -4,10 +4,10 @@ t = db.jstests_array_match3;
t.drop();
// Test matching numericallly referenced array element.
-t.save( {a:{'0':5}} );
-t.save( {a:[5]} );
-assert.eq( 2, t.count( {'a.0':5} ) );
+t.save({a: {'0': 5}});
+t.save({a: [5]});
+assert.eq(2, t.count({'a.0': 5}));
// Test with index.
-t.ensureIndex( {'a.0':1} );
-assert.eq( 2, t.count( {'a.0':5} ) );
+t.ensureIndex({'a.0': 1});
+assert.eq(2, t.count({'a.0': 5}));
diff --git a/jstests/core/array_match4.js b/jstests/core/array_match4.js
index b4cdec5143a..4956fc1d8b2 100644
--- a/jstests/core/array_match4.js
+++ b/jstests/core/array_match4.js
@@ -3,7 +3,9 @@ var t = db.array_match4;
t.drop();
t.save({a: [1, 2]});
-var query_gte = {a: {$gte: [1, 2]}};
+var query_gte = {
+ a: {$gte: [1, 2]}
+};
//
// without index
diff --git a/jstests/core/arrayfind1.js b/jstests/core/arrayfind1.js
index 5a9f2227806..bd8d47b845e 100644
--- a/jstests/core/arrayfind1.js
+++ b/jstests/core/arrayfind1.js
@@ -2,32 +2,31 @@
t = db.arrayfind1;
t.drop();
-t.save( { a : [ { x : 1 } ] } );
-t.save( { a : [ { x : 1 , y : 2 , z : 1 } ] } );
-t.save( { a : [ { x : 1 , y : 1 , z : 3 } ] } );
+t.save({a: [{x: 1}]});
+t.save({a: [{x: 1, y: 2, z: 1}]});
+t.save({a: [{x: 1, y: 1, z: 3}]});
-function test( exptected , q , name ){
- assert.eq( exptected , t.find( q ).itcount() , name + " " + tojson( q ) + " itcount" );
- assert.eq( exptected , t.find( q ).count() , name + " " + tojson( q ) + " count" );
+function test(exptected, q, name) {
+ assert.eq(exptected, t.find(q).itcount(), name + " " + tojson(q) + " itcount");
+ assert.eq(exptected, t.find(q).count(), name + " " + tojson(q) + " count");
}
-test( 3 , {} , "A1" );
-test( 1 , { "a.y" : 2 } , "A2" );
-test( 1 , { "a" : { x : 1 } } , "A3" );
-test( 3 , { "a" : { $elemMatch : { x : 1 } } } , "A4" ); // SERVER-377
+test(3, {}, "A1");
+test(1, {"a.y": 2}, "A2");
+test(1, {"a": {x: 1}}, "A3");
+test(3, {"a": {$elemMatch: {x: 1}}}, "A4"); // SERVER-377
+t.save({a: [{x: 2}]});
+t.save({a: [{x: 3}]});
+t.save({a: [{x: 4}]});
-t.save( { a : [ { x : 2 } ] } );
-t.save( { a : [ { x : 3 } ] } );
-t.save( { a : [ { x : 4 } ] } );
+assert.eq(1, t.find({a: {$elemMatch: {x: 2}}}).count(), "B1");
+assert.eq(2, t.find({a: {$elemMatch: {x: {$gt: 2}}}}).count(), "B2");
-assert.eq( 1 , t.find( { a : { $elemMatch : { x : 2 } } } ).count() , "B1" );
-assert.eq( 2 , t.find( { a : { $elemMatch : { x : { $gt : 2 } } } } ).count() , "B2" );
+t.ensureIndex({"a.x": 1});
+assert.eq(1, t.find({a: {$elemMatch: {x: 2}}}).count(), "D1");
+assert.eq(3, t.find({"a.x": 1}).count(), "D2.1");
+assert.eq(3, t.find({"a.x": {$gt: 1}}).count(), "D2.2");
+assert.eq(2, t.find({a: {$elemMatch: {x: {$gt: 2}}}}).count(), "D3");
-t.ensureIndex( { "a.x" : 1 } );
-assert.eq( 1 , t.find( { a : { $elemMatch : { x : 2 } } } ).count() , "D1" );
-assert.eq( 3, t.find( { "a.x" : 1 } ).count() , "D2.1" );
-assert.eq( 3, t.find( { "a.x" : { $gt : 1 } } ).count() , "D2.2" );
-assert.eq( 2 , t.find( { a : { $elemMatch : { x : { $gt : 2 } } } } ).count() , "D3" );
-
-assert.eq( 2 , t.find( { a : { $ne:2, $elemMatch : { x : { $gt : 2 } } } } ).count() , "E1" );
+assert.eq(2, t.find({a: {$ne: 2, $elemMatch: {x: {$gt: 2}}}}).count(), "E1");
diff --git a/jstests/core/arrayfind2.js b/jstests/core/arrayfind2.js
index b292b13bf52..60eaa27f0d7 100644
--- a/jstests/core/arrayfind2.js
+++ b/jstests/core/arrayfind2.js
@@ -2,22 +2,27 @@
t = db.arrayfind2;
t.drop();
-function go( prefix ){
- assert.eq( 3 , t.count() , prefix + " A1" );
- assert.eq( 3 , t.find( { a : { $elemMatch : { x : { $gt : 4 } } } } ).count() , prefix + " A2" );
- assert.eq( 1 , t.find( { a : { $elemMatch : { x : { $lt : 2 } } } } ).count() , prefix + " A3" );
- assert.eq( 1 , t.find( { a : { $all : [ { $elemMatch : { x : { $lt : 4 } } } ,
- { $elemMatch : { x : { $gt : 5 } } } ] } } ).count() , prefix + " A4" );
-
- assert.throws( function() { return t.findOne( { a : { $all : [ 1, { $elemMatch : { x : 3 } } ] } } ); } );
- assert.throws( function() { return t.findOne( { a : { $all : [ /a/, { $elemMatch : { x : 3 } } ] } } ); } );
+function go(prefix) {
+ assert.eq(3, t.count(), prefix + " A1");
+ assert.eq(3, t.find({a: {$elemMatch: {x: {$gt: 4}}}}).count(), prefix + " A2");
+ assert.eq(1, t.find({a: {$elemMatch: {x: {$lt: 2}}}}).count(), prefix + " A3");
+ assert.eq(
+ 1,
+ t.find({a: {$all: [{$elemMatch: {x: {$lt: 4}}}, {$elemMatch: {x: {$gt: 5}}}]}}).count(),
+ prefix + " A4");
+ assert.throws(function() {
+ return t.findOne({a: {$all: [1, {$elemMatch: {x: 3}}]}});
+ });
+ assert.throws(function() {
+ return t.findOne({a: {$all: [/a/, {$elemMatch: {x: 3}}]}});
+ });
}
-t.save( { a : [ { x : 1 } , { x : 5 } ] } );
-t.save( { a : [ { x : 3 } , { x : 5 } ] } );
-t.save( { a : [ { x : 3 } , { x : 6 } ] } );
+t.save({a: [{x: 1}, {x: 5}]});
+t.save({a: [{x: 3}, {x: 5}]});
+t.save({a: [{x: 3}, {x: 6}]});
-go( "no index" );
-t.ensureIndex( { a : 1 } );
-go( "index(a)" );
+go("no index");
+t.ensureIndex({a: 1});
+go("index(a)");
diff --git a/jstests/core/arrayfind3.js b/jstests/core/arrayfind3.js
index 395b428ac1f..07fbc3670d5 100644
--- a/jstests/core/arrayfind3.js
+++ b/jstests/core/arrayfind3.js
@@ -2,15 +2,14 @@
t = db.arrayfind3;
t.drop();
-t.save({a:[1,2]});
-t.save({a:[1, 2, 6]});
-t.save({a:[1, 4, 6]});
+t.save({a: [1, 2]});
+t.save({a: [1, 2, 6]});
+t.save({a: [1, 4, 6]});
+assert.eq(2, t.find({a: {$gte: 3, $lte: 5}}).itcount(), "A1");
+assert.eq(1, t.find({a: {$elemMatch: {$gte: 3, $lte: 5}}}).itcount(), "A2");
-assert.eq( 2 , t.find( {a:{$gte:3, $lte: 5}} ).itcount() , "A1" );
-assert.eq( 1 , t.find( {a:{$elemMatch:{$gte:3, $lte: 5}}} ).itcount() , "A2" );
+t.ensureIndex({a: 1});
-t.ensureIndex( { a : 1 } );
-
-assert.eq( 2 , t.find( {a:{$gte:3, $lte: 5}} ).itcount() , "B1" );
-assert.eq( 1 , t.find( {a:{$elemMatch:{$gte:3, $lte: 5}}} ).itcount() , "B2" );
+assert.eq(2, t.find({a: {$gte: 3, $lte: 5}}).itcount(), "B1");
+assert.eq(1, t.find({a: {$elemMatch: {$gte: 3, $lte: 5}}}).itcount(), "B2");
diff --git a/jstests/core/arrayfind4.js b/jstests/core/arrayfind4.js
index 17b02c8886b..a43a914b930 100644
--- a/jstests/core/arrayfind4.js
+++ b/jstests/core/arrayfind4.js
@@ -3,20 +3,20 @@
t = db.jstests_arrayfind4;
t.drop();
-t.save( {a:[]} );
-t.ensureIndex( {a:1} );
+t.save({a: []});
+t.ensureIndex({a: 1});
-assert.eq( 1, t.find( {a:[]} ).hint( {$natural:1} ).itcount() );
-assert.eq( 1, t.find( {a:[]} ).hint( {a:1} ).itcount() );
+assert.eq(1, t.find({a: []}).hint({$natural: 1}).itcount());
+assert.eq(1, t.find({a: []}).hint({a: 1}).itcount());
-assert.eq( 1, t.find( {a:{$in:[[]]}} ).hint( {$natural:1} ).itcount() );
-assert.eq( 1, t.find( {a:{$in:[[]]}} ).hint( {a:1} ).itcount() );
+assert.eq(1, t.find({a: {$in: [[]]}}).hint({$natural: 1}).itcount());
+assert.eq(1, t.find({a: {$in: [[]]}}).hint({a: 1}).itcount());
t.remove({});
-t.save( {a:[[]]} );
+t.save({a: [[]]});
-assert.eq( 1, t.find( {a:[]} ).hint( {$natural:1} ).itcount() );
-assert.eq( 1, t.find( {a:[]} ).hint( {a:1} ).itcount() );
+assert.eq(1, t.find({a: []}).hint({$natural: 1}).itcount());
+assert.eq(1, t.find({a: []}).hint({a: 1}).itcount());
-assert.eq( 1, t.find( {a:{$in:[[]]}} ).hint( {$natural:1} ).itcount() );
-assert.eq( 1, t.find( {a:{$in:[[]]}} ).hint( {a:1} ).itcount() );
+assert.eq(1, t.find({a: {$in: [[]]}}).hint({$natural: 1}).itcount());
+assert.eq(1, t.find({a: {$in: [[]]}}).hint({a: 1}).itcount());
diff --git a/jstests/core/arrayfind5.js b/jstests/core/arrayfind5.js
index 9ff6e2b8a5f..3fba886b83a 100644
--- a/jstests/core/arrayfind5.js
+++ b/jstests/core/arrayfind5.js
@@ -3,21 +3,22 @@
t = db.jstests_arrayfind5;
t.drop();
-function check( nullElemMatch ) {
- assert.eq( 1, t.find( {'a.b':1} ).itcount() );
- assert.eq( 1, t.find( {a:{$elemMatch:{b:1}}} ).itcount() );
- assert.eq( nullElemMatch ? 1 : 0 , t.find( {'a.b':null} ).itcount() );
- assert.eq( nullElemMatch ? 1 : 0, t.find( {a:{$elemMatch:{b:null}}} ).itcount() ); // see SERVER-3377
+function check(nullElemMatch) {
+ assert.eq(1, t.find({'a.b': 1}).itcount());
+ assert.eq(1, t.find({a: {$elemMatch: {b: 1}}}).itcount());
+ assert.eq(nullElemMatch ? 1 : 0, t.find({'a.b': null}).itcount());
+ assert.eq(nullElemMatch ? 1 : 0,
+ t.find({a: {$elemMatch: {b: null}}}).itcount()); // see SERVER-3377
}
-t.save( {a:[{},{b:1}]} );
-check( true );
-t.ensureIndex( {'a.b':1} );
-check( true );
+t.save({a: [{}, {b: 1}]});
+check(true);
+t.ensureIndex({'a.b': 1});
+check(true);
t.drop();
-t.save( {a:[5,{b:1}]} );
-check( false );
-t.ensureIndex( {'a.b':1} );
-check( false );
+t.save({a: [5, {b: 1}]});
+check(false);
+t.ensureIndex({'a.b': 1});
+check(false);
diff --git a/jstests/core/arrayfind6.js b/jstests/core/arrayfind6.js
index 9b54d5b2c07..f01271b6673 100644
--- a/jstests/core/arrayfind6.js
+++ b/jstests/core/arrayfind6.js
@@ -3,19 +3,19 @@
t = db.jstests_arrayfind6;
t.drop();
-t.save( { a:[ { b:1, c:2 } ] } );
+t.save({a: [{b: 1, c: 2}]});
function checkElemMatchMatches() {
- assert.eq( 1, t.count( { a:{ $elemMatch:{ b:1, c:2 } } } ) );
- assert.eq( 0, t.count( { a:{ $not:{ $elemMatch:{ b:1, c:2 } } } } ) );
- assert.eq( 1, t.count( { a:{ $not:{ $elemMatch:{ b:1, c:3 } } } } ) );
- assert.eq( 1, t.count( { a:{ $not:{ $elemMatch:{ b:{ $ne:1 }, c:3 } } } } ) );
+ assert.eq(1, t.count({a: {$elemMatch: {b: 1, c: 2}}}));
+ assert.eq(0, t.count({a: {$not: {$elemMatch: {b: 1, c: 2}}}}));
+ assert.eq(1, t.count({a: {$not: {$elemMatch: {b: 1, c: 3}}}}));
+ assert.eq(1, t.count({a: {$not: {$elemMatch: {b: {$ne: 1}, c: 3}}}}));
// Index bounds must be determined for $not:$elemMatch, not $not:$ne. In this case if index
// bounds are determined for $not:$ne, the a.b index will be constrained to the interval [2,2]
// and the saved document will not be matched as it should.
- assert.eq( 1, t.count( { a:{ $not:{ $elemMatch:{ b:{ $ne:2 }, c:3 } } } } ) );
+ assert.eq(1, t.count({a: {$not: {$elemMatch: {b: {$ne: 2}, c: 3}}}}));
}
checkElemMatchMatches();
-t.ensureIndex( { 'a.b':1 } );
+t.ensureIndex({'a.b': 1});
checkElemMatchMatches();
diff --git a/jstests/core/arrayfind7.js b/jstests/core/arrayfind7.js
index 7c44de1dc1d..f0dc2e2caa8 100644
--- a/jstests/core/arrayfind7.js
+++ b/jstests/core/arrayfind7.js
@@ -3,50 +3,50 @@
t = db.jstests_arrayfind7;
t.drop();
-t.save( { a:[ { b:[ { c:1, d:2 } ] } ] } );
+t.save({a: [{b: [{c: 1, d: 2}]}]});
function checkElemMatchMatches() {
- assert.eq( 1, t.count( { a:{ $elemMatch:{ b:{ $elemMatch:{ c:1, d:2 } } } } } ) );
+ assert.eq(1, t.count({a: {$elemMatch: {b: {$elemMatch: {c: 1, d: 2}}}}}));
}
// The document is matched using nested $elemMatch expressions, with and without an index.
checkElemMatchMatches();
-t.ensureIndex( { 'a.b.c':1 } );
+t.ensureIndex({'a.b.c': 1});
checkElemMatchMatches();
-function checkElemMatch( index, document, query ) {
+function checkElemMatch(index, document, query) {
// The document is matched without an index, and with single and multi key indexes.
t.drop();
- t.save( document );
- assert.eq( 1, t.count( query ) );
- t.ensureIndex( index );
- assert.eq( 1, t.count( query ) );
- t.save( { a:{ b:{ c:[ 10, 11 ] } } } ); // Make the index multikey.
- assert.eq( 1, t.count( query ) );
+ t.save(document);
+ assert.eq(1, t.count(query));
+ t.ensureIndex(index);
+ assert.eq(1, t.count(query));
+ t.save({a: {b: {c: [10, 11]}}}); // Make the index multikey.
+ assert.eq(1, t.count(query));
}
// Two constraints within a nested $elemMatch expression.
-checkElemMatch( { 'a.b.c':1 },
- { a:[ { b:[ { c:1 } ] } ] },
- { a:{ $elemMatch:{ b:{ $elemMatch:{ c:{ $gte:1, $lte:1 } } } } } });
+checkElemMatch({'a.b.c': 1},
+ {a: [{b: [{c: 1}]}]},
+ {a: {$elemMatch: {b: {$elemMatch: {c: {$gte: 1, $lte: 1}}}}}});
// Two constraints within a nested $elemMatch expression, one of which contains the other.
-checkElemMatch( { 'a.b.c':1 },
- { a:[ { b:[ { c:2 } ] } ] },
- { a:{ $elemMatch:{ b:{ $elemMatch:{ c:{ $gte:1, $in:[2] } } } } } });
+checkElemMatch({'a.b.c': 1},
+ {a: [{b: [{c: 2}]}]},
+ {a: {$elemMatch: {b: {$elemMatch: {c: {$gte: 1, $in: [2]}}}}}});
// Two nested $elemMatch expressions.
-checkElemMatch( { 'a.d.e':1, 'a.b.c':1 },
- { a:[ { b:[ { c:1 } ], d:[ { e:1 } ] } ] },
- { a:{ $elemMatch:{ d:{ $elemMatch:{ e:{ $lte:1 } } },
- b:{ $elemMatch:{ c:{ $gte:1 } } } } } });
+checkElemMatch(
+ {'a.d.e': 1, 'a.b.c': 1},
+ {a: [{b: [{c: 1}], d: [{e: 1}]}]},
+ {a: {$elemMatch: {d: {$elemMatch: {e: {$lte: 1}}}, b: {$elemMatch: {c: {$gte: 1}}}}}});
// A non $elemMatch expression and a nested $elemMatch expression.
-checkElemMatch( { 'a.x':1, 'a.b.c':1 },
- { a:[ { b:[ { c:1 } ], x:1 } ] },
- { 'a.x':1, a:{ $elemMatch:{ b:{ $elemMatch:{ c:{ $gte:1 } } } } } });
+checkElemMatch({'a.x': 1, 'a.b.c': 1},
+ {a: [{b: [{c: 1}], x: 1}]},
+ {'a.x': 1, a: {$elemMatch: {b: {$elemMatch: {c: {$gte: 1}}}}}});
// $elemMatch is applied directly to a top level field.
-checkElemMatch( { 'a.b.c':1 },
- { a:[ { b:[ { c:[ 1 ] } ] } ] },
- { a:{ $elemMatch:{ 'b.c':{ $elemMatch:{ $gte:1, $lte:1 } } } } });
+checkElemMatch({'a.b.c': 1},
+ {a: [{b: [{c: [1]}]}]},
+ {a: {$elemMatch: {'b.c': {$elemMatch: {$gte: 1, $lte: 1}}}}});
diff --git a/jstests/core/arrayfind8.js b/jstests/core/arrayfind8.js
index e74093d9457..d322229a298 100644
--- a/jstests/core/arrayfind8.js
+++ b/jstests/core/arrayfind8.js
@@ -6,34 +6,34 @@ var debuggingEnabled = false;
t = db.jstests_arrayfind8;
t.drop();
-function debug( x ) {
- if ( debuggingEnabled ) {
- printjson( x );
+function debug(x) {
+ if (debuggingEnabled) {
+ printjson(x);
}
}
/** Set index state for the test. */
-function setIndexKey( key ) {
+function setIndexKey(key) {
indexKey = key;
indexSpec = {};
- indexSpec[ key ] = 1;
+ indexSpec[key] = 1;
}
-setIndexKey( 'a' );
+setIndexKey('a');
/** Check that the query results match the documents in the 'expected' array. */
-function assertResults( expected, query, context ) {
- debug( query );
- assert.eq( expected.length, t.count( query ), 'unexpected count in ' + context );
- results = t.find( query ).toArray();
- for( i in results ) {
+function assertResults(expected, query, context) {
+ debug(query);
+ assert.eq(expected.length, t.count(query), 'unexpected count in ' + context);
+ results = t.find(query).toArray();
+ for (i in results) {
found = false;
- for( j in expected ) {
- if ( friendlyEqual( expected[ j ], results[ i ].a ) ) {
+ for (j in expected) {
+ if (friendlyEqual(expected[j], results[i].a)) {
found = true;
}
}
- assert( found, 'unexpected result ' + results[ i ] + ' in ' + context );
+ assert(found, 'unexpected result ' + results[i] + ' in ' + context);
}
}
@@ -43,23 +43,22 @@ function assertResults( expected, query, context ) {
* @param elemMatch - document matched by elemMatchQuery but not standardQuery
* @param notElemMatch - document matched by standardQuery but not elemMatchQuery
*/
-function checkMatch( bothMatch, elemMatch, nonElemMatch, standardQuery, elemMatchQuery, context ) {
-
- function mayPush( arr, elt ) {
- if ( elt ) {
- arr.push( elt );
+function checkMatch(bothMatch, elemMatch, nonElemMatch, standardQuery, elemMatchQuery, context) {
+ function mayPush(arr, elt) {
+ if (elt) {
+ arr.push(elt);
}
}
expectedStandardQueryResults = [];
- mayPush( expectedStandardQueryResults, bothMatch );
- mayPush( expectedStandardQueryResults, nonElemMatch );
- assertResults( expectedStandardQueryResults, standardQuery, context + ' standard query' );
+ mayPush(expectedStandardQueryResults, bothMatch);
+ mayPush(expectedStandardQueryResults, nonElemMatch);
+ assertResults(expectedStandardQueryResults, standardQuery, context + ' standard query');
expectedElemMatchQueryResults = [];
- mayPush( expectedElemMatchQueryResults, bothMatch );
- mayPush( expectedElemMatchQueryResults, elemMatch );
- assertResults( expectedElemMatchQueryResults, elemMatchQuery, context + ' elemMatch query' );
+ mayPush(expectedElemMatchQueryResults, bothMatch);
+ mayPush(expectedElemMatchQueryResults, elemMatch);
+ assertResults(expectedElemMatchQueryResults, elemMatchQuery, context + ' elemMatch query');
}
/**
@@ -71,100 +70,100 @@ function checkMatch( bothMatch, elemMatch, nonElemMatch, standardQuery, elemMatc
* @param notElemMatch - document matched by standardQuery but not elemMatchQuery
* @param additionalConstraints - additional query parameters not generated from @param subQuery
*/
-function checkQuery( subQuery, bothMatch, elemMatch, nonElemMatch,
- additionalConstraints ) {
+function checkQuery(subQuery, bothMatch, elemMatch, nonElemMatch, additionalConstraints) {
t.drop();
additionalConstraints = additionalConstraints || {};
-
+
// Construct standard and elemMatch queries from subQuery.
- firstSubQueryKey = Object.keySet( subQuery )[ 0 ];
- if ( firstSubQueryKey[ 0 ] == '$' ) {
- standardQuery = { $and:[ { a:subQuery }, additionalConstraints ] };
- }
- else {
+ firstSubQueryKey = Object.keySet(subQuery)[0];
+ if (firstSubQueryKey[0] == '$') {
+ standardQuery = {
+ $and: [{a: subQuery}, additionalConstraints]
+ };
+ } else {
// If the subQuery contains a field rather than operators, append to the 'a' field.
modifiedSubQuery = {};
- modifiedSubQuery[ 'a.' + firstSubQueryKey ] = subQuery[ firstSubQueryKey ];
- standardQuery = { $and:[ modifiedSubQuery, additionalConstraints ] };
+ modifiedSubQuery['a.' + firstSubQueryKey] = subQuery[firstSubQueryKey];
+ standardQuery = {
+ $and: [modifiedSubQuery, additionalConstraints]
+ };
}
- elemMatchQuery = { $and:[ { a:{ $elemMatch:subQuery } }, additionalConstraints ] };
- debug( elemMatchQuery );
-
- function maySave( aValue ) {
- if ( aValue ) {
- debug( { a:aValue } );
- t.save( { a:aValue } );
+ elemMatchQuery = {
+ $and: [{a: {$elemMatch: subQuery}}, additionalConstraints]
+ };
+ debug(elemMatchQuery);
+
+ function maySave(aValue) {
+ if (aValue) {
+ debug({a: aValue});
+ t.save({a: aValue});
}
}
// Save all documents and check matching without indexes.
- maySave( bothMatch );
- maySave( elemMatch );
- maySave( nonElemMatch );
+ maySave(bothMatch);
+ maySave(elemMatch);
+ maySave(nonElemMatch);
- checkMatch( bothMatch, elemMatch, nonElemMatch, standardQuery, elemMatchQuery, 'unindexed' );
+ checkMatch(bothMatch, elemMatch, nonElemMatch, standardQuery, elemMatchQuery, 'unindexed');
// Check matching and index bounds for a single key index.
t.drop();
- maySave( bothMatch );
- maySave( elemMatch );
+ maySave(bothMatch);
+ maySave(elemMatch);
// The nonElemMatch document is not tested here, as it will often make the index multikey.
- t.ensureIndex( indexSpec );
- checkMatch( bothMatch, elemMatch, null, standardQuery, elemMatchQuery, 'single key index' );
+ t.ensureIndex(indexSpec);
+ checkMatch(bothMatch, elemMatch, null, standardQuery, elemMatchQuery, 'single key index');
// Check matching and index bounds for a multikey index.
// Now the nonElemMatch document is tested.
- maySave( nonElemMatch );
+ maySave(nonElemMatch);
// Force the index to be multikey.
- t.save( { a:[ -1, -2 ] } );
- t.save( { a:{ b:[ -1, -2 ] } } );
- checkMatch( bothMatch, elemMatch, nonElemMatch, standardQuery, elemMatchQuery,
- 'multikey index' );
+ t.save({a: [-1, -2]});
+ t.save({a: {b: [-1, -2]}});
+ checkMatch(bothMatch, elemMatch, nonElemMatch, standardQuery, elemMatchQuery, 'multikey index');
}
maxNumber = Infinity;
// Basic test.
-checkQuery( { $gt:4 }, [ 5 ] );
+checkQuery({$gt: 4}, [5]);
// Multiple constraints within a $elemMatch clause.
-checkQuery( { $gt:4, $lt:6 }, [ 5 ], null, [ 3, 7 ] );
-checkQuery( { $gt:4, $not:{ $gte:6 } }, [ 5 ] );
-checkQuery( { $gt:4, $not:{ $ne:6 } }, [ 6 ] );
-checkQuery( { $gte:5, $lte:5 }, [ 5 ], null, [ 4, 6 ] );
-checkQuery( { $in:[ 4, 6 ], $gt:5 }, [ 6 ], null, [ 4, 7 ] );
-checkQuery( { $regex:'^a' }, [ 'a' ] );
+checkQuery({$gt: 4, $lt: 6}, [5], null, [3, 7]);
+checkQuery({$gt: 4, $not: {$gte: 6}}, [5]);
+checkQuery({$gt: 4, $not: {$ne: 6}}, [6]);
+checkQuery({$gte: 5, $lte: 5}, [5], null, [4, 6]);
+checkQuery({$in: [4, 6], $gt: 5}, [6], null, [4, 7]);
+checkQuery({$regex: '^a'}, ['a']);
// Some constraints within a $elemMatch clause and other constraints outside of it.
-checkQuery( { $gt:4 }, [ 5 ], null, null, { a:{ $lt:6 } } );
-checkQuery( { $gte:5 }, [ 5 ], null, null, { a:{ $lte:5 } } );
-checkQuery( { $in:[ 4, 6 ] }, [ 6 ], null, null, { a:{ $gt:5 } } );
+checkQuery({$gt: 4}, [5], null, null, {a: {$lt: 6}});
+checkQuery({$gte: 5}, [5], null, null, {a: {$lte: 5}});
+checkQuery({$in: [4, 6]}, [6], null, null, {a: {$gt: 5}});
// Constraints in different $elemMatch clauses.
-checkQuery( { $gt:4 }, [ 5 ], null, null, { a:{ $elemMatch:{ $lt:6 } } } );
-checkQuery( { $gt:4 }, [ 3, 7 ], null, null, { a:{ $elemMatch:{ $lt:6 } } } );
-checkQuery( { $gte:5 }, [ 5 ], null, null, { a:{ $elemMatch:{ $lte:5 } } } );
-checkQuery( { $in:[ 4, 6 ] }, [ 6 ], null, null, { a:{ $elemMatch:{ $gt:5 } } } );
+checkQuery({$gt: 4}, [5], null, null, {a: {$elemMatch: {$lt: 6}}});
+checkQuery({$gt: 4}, [3, 7], null, null, {a: {$elemMatch: {$lt: 6}}});
+checkQuery({$gte: 5}, [5], null, null, {a: {$elemMatch: {$lte: 5}}});
+checkQuery({$in: [4, 6]}, [6], null, null, {a: {$elemMatch: {$gt: 5}}});
// TODO SERVER-1264
-if ( 0 ) {
-checkQuery( { $elemMatch:{ $in:[ 5 ] } }, null, [[ 5 ]], [ 5 ], null );
+if (0) {
+ checkQuery({$elemMatch: {$in: [5]}}, null, [[5]], [5], null);
}
-setIndexKey( 'a.b' );
-checkQuery( { $elemMatch:{ b:{ $gte:1, $lte:1 } } }, null, [[ { b:1 } ]],
- [ { b:1 } ], null );
-checkQuery( { $elemMatch:{ b:{ $gte:1, $lte:1 } } }, null, [[ { b:[ 0, 2 ] } ]],
- [ { b:[ 0, 2 ] } ], null );
+setIndexKey('a.b');
+checkQuery({$elemMatch: {b: {$gte: 1, $lte: 1}}}, null, [[{b: 1}]], [{b: 1}], null);
+checkQuery({$elemMatch: {b: {$gte: 1, $lte: 1}}}, null, [[{b: [0, 2]}]], [{b: [0, 2]}], null);
// Constraints for a top level (SERVER-1264 style) $elemMatch nested within a non top level
// $elemMatch.
-checkQuery( { b:{ $elemMatch:{ $gte:1, $lte:1 } } }, [ { b:[ 1 ] } ] );
-checkQuery( { b:{ $elemMatch:{ $gte:1, $lte:4 } } }, [ { b:[ 1 ] } ] );
+checkQuery({b: {$elemMatch: {$gte: 1, $lte: 1}}}, [{b: [1]}]);
+checkQuery({b: {$elemMatch: {$gte: 1, $lte: 4}}}, [{b: [1]}]);
-checkQuery( { b:{ $elemMatch:{ $gte:1, $lte:4 } } }, [ { b:[ 2 ] } ], null,
- null, { 'a.b':{ $in:[ 2, 5 ] } } );
-checkQuery( { b:{ $elemMatch:{ $in:[ 1, 2 ] }, $in:[ 2, 3 ] } },
- [ { b:[ 2 ] } ], null, [ { b:[ 1 ] }, { b:[ 3 ] } ], null );
+checkQuery({b: {$elemMatch: {$gte: 1, $lte: 4}}}, [{b: [2]}], null, null, {'a.b': {$in: [2, 5]}});
+checkQuery(
+ {b: {$elemMatch: {$in: [1, 2]}, $in: [2, 3]}}, [{b: [2]}], null, [{b: [1]}, {b: [3]}], null);
diff --git a/jstests/core/arrayfind9.js b/jstests/core/arrayfind9.js
index 4ee14c56580..98396701dce 100644
--- a/jstests/core/arrayfind9.js
+++ b/jstests/core/arrayfind9.js
@@ -4,31 +4,31 @@ t = db.jstests_arrayfind9;
t.drop();
// Top level field $elemMatch:$not matching
-t.save( { a:[ 1 ] } );
-assert.eq( 1, t.count( { a:{ $elemMatch:{ $not:{ $ne:1 } } } } ) );
+t.save({a: [1]});
+assert.eq(1, t.count({a: {$elemMatch: {$not: {$ne: 1}}}}));
// Top level field object $elemMatch matching.
t.drop();
-t.save( { a:[ {} ] } );
-assert.eq( 1, t.count( { a:{ $elemMatch:{ $gte:{} } } } ) );
+t.save({a: [{}]});
+assert.eq(1, t.count({a: {$elemMatch: {$gte: {}}}}));
// Top level field array $elemMatch matching.
t.drop();
-t.save( { a:[ [] ] } );
-assert.eq( 1, t.count( { a:{ $elemMatch:{ $in:[ [] ] } } } ) );
+t.save({a: [[]]});
+assert.eq(1, t.count({a: {$elemMatch: {$in: [[]]}}}));
// Matching by array index.
t.drop();
-t.save( { a:[ [ 'x' ] ] } );
-assert.eq( 1, t.count( { a:{ $elemMatch:{ '0':'x' } } } ) );
+t.save({a: [['x']]});
+assert.eq(1, t.count({a: {$elemMatch: {'0': 'x'}}}));
// Matching multiple values of a nested array.
t.drop();
-t.save( { a:[ { b:[ 0, 2 ] } ] } );
-t.ensureIndex( { a:1 } );
-t.ensureIndex( { 'a.b':1 } );
-plans = [ { $natural:1 }, { a:1 }, { 'a.b':1 } ];
-for( i in plans ) {
- p = plans[ i ];
- assert.eq( 1, t.find( { a:{ $elemMatch:{ b:{ $gte:1, $lte:1 } } } } ).hint( p ).itcount() );
+t.save({a: [{b: [0, 2]}]});
+t.ensureIndex({a: 1});
+t.ensureIndex({'a.b': 1});
+plans = [{$natural: 1}, {a: 1}, {'a.b': 1}];
+for (i in plans) {
+ p = plans[i];
+ assert.eq(1, t.find({a: {$elemMatch: {b: {$gte: 1, $lte: 1}}}}).hint(p).itcount());
}
diff --git a/jstests/core/arrayfinda.js b/jstests/core/arrayfinda.js
index 179d3985580..f2939d0c1ba 100644
--- a/jstests/core/arrayfinda.js
+++ b/jstests/core/arrayfinda.js
@@ -4,18 +4,18 @@ t = db.jstests_arrayfinda;
t.drop();
// $elemMatch only matches elements within arrays (a descriptive, not a normative test).
-t.save( { a:[ { b:1 } ] } );
-t.save( { a:{ b:1 } } );
+t.save({a: [{b: 1}]});
+t.save({a: {b: 1}});
-function assertExpectedMatch( cursor ) {
- assert.eq( [ { b:1 } ], cursor.next().a );
- assert( !cursor.hasNext() );
+function assertExpectedMatch(cursor) {
+ assert.eq([{b: 1}], cursor.next().a);
+ assert(!cursor.hasNext());
}
-assertExpectedMatch( t.find( { a:{ $elemMatch:{ b:{ $gte:1 } } } } ) );
-assertExpectedMatch( t.find( { a:{ $elemMatch:{ b:1 } } } ) );
+assertExpectedMatch(t.find({a: {$elemMatch: {b: {$gte: 1}}}}));
+assertExpectedMatch(t.find({a: {$elemMatch: {b: 1}}}));
// $elemMatch is not used to perform key matching. SERVER-6001
-t.ensureIndex( { a:1 } );
-assertExpectedMatch( t.find( { a:{ $elemMatch:{ b:{ $gte:1 } } } } ).hint( { a:1 } ) );
-assertExpectedMatch( t.find( { a:{ $elemMatch:{ b:1 } } } ).hint( { a:1 } ) );
+t.ensureIndex({a: 1});
+assertExpectedMatch(t.find({a: {$elemMatch: {b: {$gte: 1}}}}).hint({a: 1}));
+assertExpectedMatch(t.find({a: {$elemMatch: {b: 1}}}).hint({a: 1}));
diff --git a/jstests/core/arrayfindb.js b/jstests/core/arrayfindb.js
index ad1a86be142..483e1e46134 100644
--- a/jstests/core/arrayfindb.js
+++ b/jstests/core/arrayfindb.js
@@ -5,11 +5,13 @@ t.drop();
// Case #1: Ensure correct matching for $elemMatch with an embedded $and (SERVER-13664).
t.save({a: [{b: 1, c: 25}, {a: 3, b: 59}]});
-assert.eq(0, t.find({a: {$elemMatch: {b: {$gte: 2, $lt: 4}, c: 25}}}).itcount(),
+assert.eq(0,
+ t.find({a: {$elemMatch: {b: {$gte: 2, $lt: 4}, c: 25}}}).itcount(),
"Case #1: wrong number of results returned -- unindexed");
t.ensureIndex({"a.b": 1, "a.c": 1});
-assert.eq(0, t.find({a: {$elemMatch: {b: {$gte: 2, $lt: 4}, c: 25}}}).itcount(),
+assert.eq(0,
+ t.find({a: {$elemMatch: {b: {$gte: 2, $lt: 4}, c: 25}}}).itcount(),
"Case #1: wrong number of results returned -- indexed");
// Case #2: Ensure correct matching for $elemMatch with an embedded $or.
@@ -17,10 +19,12 @@ t.drop();
t.save({a: [{b: 1}, {c: 1}]});
t.save({a: [{b: 2}, {c: 1}]});
t.save({a: [{b: 1}, {c: 2}]});
-assert.eq(2, t.find({a: {$elemMatch: {$or: [{b: 2}, {c: 2}]}}}).itcount(),
+assert.eq(2,
+ t.find({a: {$elemMatch: {$or: [{b: 2}, {c: 2}]}}}).itcount(),
"Case #2: wrong number of results returned -- unindexed");
t.ensureIndex({"a.b": 1});
t.ensureIndex({"a.c": 1});
-assert.eq(2, t.find({a: {$elemMatch: {$or: [{b: 2}, {c: 2}]}}}).itcount(),
+assert.eq(2,
+ t.find({a: {$elemMatch: {$or: [{b: 2}, {c: 2}]}}}).itcount(),
"Case #2: wrong number of results returned -- indexed");
diff --git a/jstests/core/auth1.js b/jstests/core/auth1.js
index 268f55f2af8..46d61f7d4b0 100644
--- a/jstests/core/auth1.js
+++ b/jstests/core/auth1.js
@@ -2,53 +2,51 @@ var mydb = db.getSiblingDB('auth1_db');
mydb.dropAllUsers();
pass = "a" + Math.random();
-//print( "password [" + pass + "]" );
+// print( "password [" + pass + "]" );
-mydb.createUser({user: "eliot" ,pwd: pass, roles: jsTest.basicUserRoles});
+mydb.createUser({user: "eliot", pwd: pass, roles: jsTest.basicUserRoles});
-assert( mydb.auth( "eliot" , pass ) , "auth failed" );
-assert( ! mydb.auth( "eliot" , pass + "a" ) , "auth should have failed" );
+assert(mydb.auth("eliot", pass), "auth failed");
+assert(!mydb.auth("eliot", pass + "a"), "auth should have failed");
pass2 = "b" + Math.random();
mydb.changeUserPassword("eliot", pass2);
-assert( ! mydb.auth( "eliot" , pass ) , "failed to change password failed" );
-assert( mydb.auth( "eliot" , pass2 ) , "new password didn't take" );
+assert(!mydb.auth("eliot", pass), "failed to change password failed");
+assert(mydb.auth("eliot", pass2), "new password didn't take");
-assert( mydb.auth( "eliot" , pass2 ) , "what?" );
-mydb.dropUser( "eliot" );
-assert( ! mydb.auth( "eliot" , pass2 ) , "didn't drop user" );
+assert(mydb.auth("eliot", pass2), "what?");
+mydb.dropUser("eliot");
+assert(!mydb.auth("eliot", pass2), "didn't drop user");
-
-var a = mydb.getMongo().getDB( "admin" );
+var a = mydb.getMongo().getDB("admin");
a.dropAllUsers();
pass = "c" + Math.random();
a.createUser({user: "super", pwd: pass, roles: jsTest.adminUserRoles});
-assert( a.auth( "super" , pass ) , "auth failed" );
-assert( !a.auth( "super" , pass + "a" ) , "auth should have failed" );
+assert(a.auth("super", pass), "auth failed");
+assert(!a.auth("super", pass + "a"), "auth should have failed");
mydb.dropAllUsers();
pass = "a" + Math.random();
-mydb.createUser({user: "eliot" , pwd: pass, roles: jsTest.basicUserRoles});
+mydb.createUser({user: "eliot", pwd: pass, roles: jsTest.basicUserRoles});
-assert.commandFailed( mydb.runCommand( { authenticate: 1, user: "eliot", nonce: "foo", key: "bar" } ) );
+assert.commandFailed(mydb.runCommand({authenticate: 1, user: "eliot", nonce: "foo", key: "bar"}));
// check sanity check SERVER-3003
var before = a.system.users.count({db: mydb.getName()});
-assert.throws( function(){
- mydb.createUser({ user: "" , pwd: "abc", roles: jsTest.basicUserRoles});
-} , null , "C1" );
-assert.throws( function(){
- mydb.createUser({ user: "abc" , pwd: "", roles: jsTest.basicUserRoles});
-} , null , "C2" );
-
+assert.throws(function() {
+ mydb.createUser({user: "", pwd: "abc", roles: jsTest.basicUserRoles});
+}, null, "C1");
+assert.throws(function() {
+ mydb.createUser({user: "abc", pwd: "", roles: jsTest.basicUserRoles});
+}, null, "C2");
var after = a.system.users.count({db: mydb.getName()});
-assert( before > 0 , "C3" );
-assert.eq( before , after , "C4" );
+assert(before > 0, "C3");
+assert.eq(before, after, "C4");
// Clean up after ourselves so other tests using authentication don't get messed up.
mydb.dropAllUsers();
diff --git a/jstests/core/auth2.js b/jstests/core/auth2.js
index 2c4cff6796b..c9e6d29dac9 100644
--- a/jstests/core/auth2.js
+++ b/jstests/core/auth2.js
@@ -1,9 +1,9 @@
// just make sure logout doesn't break anything
// SERVER-724
-db.runCommand({logout : 1});
-x = db.runCommand({logout : 1});
-assert.eq( 1 , x.ok , "A" );
+db.runCommand({logout: 1});
+x = db.runCommand({logout: 1});
+assert.eq(1, x.ok, "A");
x = db.logout();
-assert.eq( 1 , x.ok , "B" );
+assert.eq(1, x.ok, "B");
diff --git a/jstests/core/auth_copydb.js b/jstests/core/auth_copydb.js
index f04cd0b0d29..ae0e6888c01 100644
--- a/jstests/core/auth_copydb.js
+++ b/jstests/core/auth_copydb.js
@@ -1,19 +1,19 @@
-a = db.getSisterDB( "copydb2-test-a" );
-b = db.getSisterDB( "copydb2-test-b" );
+a = db.getSisterDB("copydb2-test-a");
+b = db.getSisterDB("copydb2-test-b");
a.dropDatabase();
b.dropDatabase();
a.dropAllUsers();
b.dropAllUsers();
-a.foo.save( { a : 1 } );
+a.foo.save({a: 1});
-a.createUser({user: "chevy" , pwd: "chase", roles: jsTest.basicUserRoles});
+a.createUser({user: "chevy", pwd: "chase", roles: jsTest.basicUserRoles});
-assert.eq( 1 , a.foo.count() , "A" );
-assert.eq( 0 , b.foo.count() , "B" );
+assert.eq(1, a.foo.count(), "A");
+assert.eq(0, b.foo.count(), "B");
// SERVER-727
-a.copyDatabase( a._name , b._name, "" , "chevy" , "chase" );
-assert.eq( 1 , a.foo.count() , "C" );
-assert.eq( 1 , b.foo.count() , "D" );
+a.copyDatabase(a._name, b._name, "", "chevy", "chase");
+assert.eq(1, a.foo.count(), "C");
+assert.eq(1, b.foo.count(), "D");
diff --git a/jstests/core/autoid.js b/jstests/core/autoid.js
index 6c8062fd093..f4707e5fe65 100644
--- a/jstests/core/autoid.js
+++ b/jstests/core/autoid.js
@@ -1,11 +1,11 @@
f = db.jstests_autoid;
f.drop();
-f.save( {z:1} );
-a = f.findOne( {z:1} );
-f.update( {z:1}, {z:2} );
-b = f.findOne( {z:2} );
-assert.eq( a._id.str, b._id.str );
-c = f.update( {z:2}, {z:"abcdefgabcdefgabcdefg"} );
-c = f.findOne( {} );
-assert.eq( a._id.str, c._id.str );
+f.save({z: 1});
+a = f.findOne({z: 1});
+f.update({z: 1}, {z: 2});
+b = f.findOne({z: 2});
+assert.eq(a._id.str, b._id.str);
+c = f.update({z: 2}, {z: "abcdefgabcdefgabcdefg"});
+c = f.findOne({});
+assert.eq(a._id.str, c._id.str);
diff --git a/jstests/core/bad_index_plugin.js b/jstests/core/bad_index_plugin.js
index c22bba5e0cc..7ecfe76c198 100644
--- a/jstests/core/bad_index_plugin.js
+++ b/jstests/core/bad_index_plugin.js
@@ -1,11 +1,11 @@
// SERVER-5826 ensure you can't build an index with a non-existent plugin
t = db.bad_index_plugin;
-assert.commandWorked(t.ensureIndex({ good: 1 }));
-assert.eq(t.getIndexes().length, 2); // good + _id
+assert.commandWorked(t.ensureIndex({good: 1}));
+assert.eq(t.getIndexes().length, 2); // good + _id
var err = t.ensureIndex({bad: 'bad'});
assert.commandFailed(err);
assert(err.code >= 0);
-assert.eq(t.getIndexes().length, 2); // good + _id (no bad)
+assert.eq(t.getIndexes().length, 2); // good + _id (no bad)
diff --git a/jstests/core/basic1.js b/jstests/core/basic1.js
index e5fa577f0b2..f4ca8a283d9 100644
--- a/jstests/core/basic1.js
+++ b/jstests/core/basic1.js
@@ -1,21 +1,23 @@
-t = db.getCollection( "basic1" );
+t = db.getCollection("basic1");
t.drop();
-o = { a : 1 };
-t.save( o );
+o = {
+ a: 1
+};
+t.save(o);
-assert.eq( 1 , t.findOne().a , "first" );
-assert( o._id , "now had id" );
-assert( o._id.str , "id not a real id" );
+assert.eq(1, t.findOne().a, "first");
+assert(o._id, "now had id");
+assert(o._id.str, "id not a real id");
o.a = 2;
-t.save( o );
+t.save(o);
-assert.eq( 2 , t.findOne().a , "second" );
+assert.eq(2, t.findOne().a, "second");
assert(t.validate().valid);
-// not a very good test of currentOp, but tests that it at least
+// not a very good test of currentOp, but tests that it at least
// is sort of there:
-assert( db.currentOp().inprog != null );
+assert(db.currentOp().inprog != null);
diff --git a/jstests/core/basic2.js b/jstests/core/basic2.js
index aaa3de4366e..50b3db323ce 100644
--- a/jstests/core/basic2.js
+++ b/jstests/core/basic2.js
@@ -1,16 +1,18 @@
-t = db.getCollection( "basic2" );
+t = db.getCollection("basic2");
t.drop();
-o = { n : 2 };
-t.save( o );
+o = {
+ n: 2
+};
+t.save(o);
-assert.eq( 1 , t.find().count() );
+assert.eq(1, t.find().count());
-assert.eq( 2 , t.find( o._id ).toArray()[0].n );
-assert.eq( 2 , t.find( o._id , { n : 1 } ).toArray()[0].n );
+assert.eq(2, t.find(o._id).toArray()[0].n);
+assert.eq(2, t.find(o._id, {n: 1}).toArray()[0].n);
-t.remove( o._id );
-assert.eq( 0 , t.find().count() );
+t.remove(o._id);
+assert.eq(0, t.find().count());
assert(t.validate().valid);
diff --git a/jstests/core/basic3.js b/jstests/core/basic3.js
index 61f68047d30..ec0b48ec0cf 100644
--- a/jstests/core/basic3.js
+++ b/jstests/core/basic3.js
@@ -1,8 +1,8 @@
// Tests that "." cannot be in field names
-t = db.getCollection( "foo_basic3" );
+t = db.getCollection("foo_basic3");
t.drop();
-//more diagnostics on bad save, if exception fails
+// more diagnostics on bad save, if exception fails
doBadSave = function(param) {
print("doing save with " + tojson(param));
var res = t.save(param);
@@ -10,7 +10,7 @@ doBadSave = function(param) {
printjson(res);
};
-//more diagnostics on bad save, if exception fails
+// more diagnostics on bad save, if exception fails
doBadUpdate = function(query, update) {
print("doing update with " + tojson(query) + " " + tojson(update));
var res = t.update(query, update);
@@ -18,28 +18,24 @@ doBadUpdate = function(query, update) {
printjson(res);
};
-assert.throws(doBadSave, [{"a.b":5}], ". in names aren't allowed doesn't work");
+assert.throws(doBadSave, [{"a.b": 5}], ". in names aren't allowed doesn't work");
-assert.throws(doBadSave,
- [{ "x" : { "a.b" : 5 } }],
- ". in embedded names aren't allowed doesn't work");
+assert.throws(doBadSave, [{"x": {"a.b": 5}}], ". in embedded names aren't allowed doesn't work");
// following tests make sure update keys are checked
-t.save({"a": 0,"b": 1});
+t.save({"a": 0, "b": 1});
-assert.throws(doBadUpdate, [{a:0}, { "b.b" : 1 }],
- "must deny '.' in key of update");
+assert.throws(doBadUpdate, [{a: 0}, {"b.b": 1}], "must deny '.' in key of update");
// upsert with embedded doc
-assert.throws(doBadUpdate, [{a:10}, { c: {"b.b" : 1 }}],
- "must deny embedded '.' in key of update");
+assert.throws(doBadUpdate, [{a: 10}, {c: {"b.b": 1}}], "must deny embedded '.' in key of update");
// if it is a modifier, it should still go through
-t.update({"a": 0}, {$set: { "c.c": 1}});
-t.update({"a": 0}, {$inc: { "c.c": 1}});
+t.update({"a": 0}, {$set: {"c.c": 1}});
+t.update({"a": 0}, {$inc: {"c.c": 1}});
// edge cases
-assert.throws(doBadUpdate, [{a:0}, { "":{"b.b" : 1} }],
+assert.throws(doBadUpdate,
+ [{a: 0}, {"": {"b.b": 1}}],
"must deny '' embedded '.' in key of update");
t.update({"a": 0}, {});
-
diff --git a/jstests/core/basic4.js b/jstests/core/basic4.js
index 0cf7a261e63..4b2cf6f96be 100644
--- a/jstests/core/basic4.js
+++ b/jstests/core/basic4.js
@@ -1,12 +1,12 @@
-t = db.getCollection( "basic4" );
+t = db.getCollection("basic4");
t.drop();
-t.save( { a : 1 , b : 1.0 } );
+t.save({a: 1, b: 1.0});
-assert( t.findOne() );
-assert( t.findOne( { a : 1 } ) );
-assert( t.findOne( { a : 1.0 } ) );
-assert( t.findOne( { b : 1 } ) );
-assert( t.findOne( { b : 1.0 } ) );
+assert(t.findOne());
+assert(t.findOne({a: 1}));
+assert(t.findOne({a: 1.0}));
+assert(t.findOne({b: 1}));
+assert(t.findOne({b: 1.0}));
-assert( ! t.findOne( { b : 2.0 } ) );
+assert(!t.findOne({b: 2.0}));
diff --git a/jstests/core/basic5.js b/jstests/core/basic5.js
index bfa40fb8f5e..7ec41ef7872 100644
--- a/jstests/core/basic5.js
+++ b/jstests/core/basic5.js
@@ -1,6 +1,5 @@
-t = db.getCollection( "basic5" );
+t = db.getCollection("basic5");
t.drop();
-t.save( { a : 1 , b : [ 1 , 2 , 3 ] } );
-assert.eq( 3 , t.findOne().b.length );
-
+t.save({a: 1, b: [1, 2, 3]});
+assert.eq(3, t.findOne().b.length);
diff --git a/jstests/core/basic6.js b/jstests/core/basic6.js
index e0cd6f1586e..c5919bfb158 100644
--- a/jstests/core/basic6.js
+++ b/jstests/core/basic6.js
@@ -4,5 +4,5 @@ t = db.basic6;
t.findOne();
t.a.findOne();
-assert.eq( "test.basic6" , t.toString() );
-assert.eq( "test.basic6.a" , t.a.toString() );
+assert.eq("test.basic6", t.toString());
+assert.eq("test.basic6.a", t.a.toString());
diff --git a/jstests/core/basic7.js b/jstests/core/basic7.js
index 4ae7d6902b3..bfe82ccda17 100644
--- a/jstests/core/basic7.js
+++ b/jstests/core/basic7.js
@@ -2,10 +2,9 @@
t = db.basic7;
t.drop();
-t.save( { a : 1 } );
-t.ensureIndex( { a : 1 } );
-
-assert.eq( t.find().toArray()[0].a , 1 );
-assert.eq( t.find().arrayAccess(0).a , 1 );
-assert.eq( t.find()[0].a , 1 );
+t.save({a: 1});
+t.ensureIndex({a: 1});
+assert.eq(t.find().toArray()[0].a, 1);
+assert.eq(t.find().arrayAccess(0).a, 1);
+assert.eq(t.find()[0].a, 1);
diff --git a/jstests/core/basic8.js b/jstests/core/basic8.js
index 513da0d15d1..d9b158487cc 100644
--- a/jstests/core/basic8.js
+++ b/jstests/core/basic8.js
@@ -2,10 +2,10 @@
t = db.basic8;
t.drop();
-t.save( { a : 1 } );
+t.save({a: 1});
o = t.findOne();
o.b = 2;
-t.save( o );
+t.save(o);
-assert.eq( 1 , t.find().count() , "A" );
-assert.eq( 2 , t.findOne().b , "B" );
+assert.eq(1, t.find().count(), "A");
+assert.eq(2, t.findOne().b, "B");
diff --git a/jstests/core/basic9.js b/jstests/core/basic9.js
index 3078fcad2bc..bebaeb54740 100644
--- a/jstests/core/basic9.js
+++ b/jstests/core/basic9.js
@@ -1,5 +1,5 @@
// Tests that $<prefix> field names are not allowed, but you can use a $ anywhere else.
-t = db.getCollection( "foo_basic9" );
+t = db.getCollection("foo_basic9");
t.drop();
// more diagnostics on bad save, if exception fails
@@ -10,10 +10,10 @@ doBadSave = function(param) {
print('Should have errored out: ' + tojson(res));
};
-t.save({foo$foo:5});
-t.save({foo$:5});
+t.save({foo$foo: 5});
+t.save({foo$: 5});
-assert.throws(doBadSave, [{$foo:5}], "key names aren't allowed to start with $ doesn't work");
-assert.throws(doBadSave,
- [{x:{$foo:5}}],
+assert.throws(doBadSave, [{$foo: 5}], "key names aren't allowed to start with $ doesn't work");
+assert.throws(doBadSave,
+ [{x: {$foo: 5}}],
"embedded key names aren't allowed to start with $ doesn't work");
diff --git a/jstests/core/basica.js b/jstests/core/basica.js
index 0cc364beb42..1fe8b7c5de4 100644
--- a/jstests/core/basica.js
+++ b/jstests/core/basica.js
@@ -1,10 +1,9 @@
t = db.basica;
-
t.drop();
-t.save( { a : 1 , b : [ { x : 2 , y : 2 } , { x : 3 , y : 3 } ] } );
+t.save({a: 1, b: [{x: 2, y: 2}, {x: 3, y: 3}]});
x = t.findOne();
x.b["0"].x = 4;
@@ -14,20 +13,19 @@ x.b[0]["asd"] = 11;
x.a = 2;
x.z = 11;
-tojson( x );
-t.save( x );
-assert.eq( tojson( x ) , tojson( t.findOne() ) , "FIRST" );
+tojson(x);
+t.save(x);
+assert.eq(tojson(x), tojson(t.findOne()), "FIRST");
// -----
t.drop();
-t.save( { a : 1 , b : [ { x : 2 , y : 2 } , { x : 3 , y : 3 } ] } );
+t.save({a: 1, b: [{x: 2, y: 2}, {x: 3, y: 3}]});
x = t.findOne();
x.b["0"].z = 4;
-//printjson( x );
-t.save( x );
-assert.eq( tojson( x ) , tojson( t.findOne() ) , "SECOND" );
-
+// printjson( x );
+t.save(x);
+assert.eq(tojson(x), tojson(t.findOne()), "SECOND");
diff --git a/jstests/core/basicb.js b/jstests/core/basicb.js
index 95eb60151af..65531d706a1 100644
--- a/jstests/core/basicb.js
+++ b/jstests/core/basicb.js
@@ -2,5 +2,6 @@
t = db.basicb;
t.drop();
-assert.throws( function() { t.insert( { '$a' : 5 } ); });
-
+assert.throws(function() {
+ t.insert({'$a': 5});
+});
diff --git a/jstests/core/batch_size.js b/jstests/core/batch_size.js
index dce9cda4451..eca41e412ba 100644
--- a/jstests/core/batch_size.js
+++ b/jstests/core/batch_size.js
@@ -81,21 +81,26 @@ assert.lte(explain.executionStats.totalKeysExamined, 60, 'S');
assert.lte(explain.executionStats.totalDocsExamined, 60, 'T');
assert.eq(explain.executionStats.nReturned, 6, 'U');
-
// -------
-
// During plan ranking, we treat ntoreturn as a limit. This prevents us from buffering
// too much data in a blocking sort stage during plan ranking.
t.drop();
// Generate big string to use in the object - 1MB+ String
var bigStr = "ABCDEFGHIJKLMNBOPQRSTUVWXYZ012345687890";
-while (bigStr.length < 1000000) { bigStr = bigStr + "::" + bigStr; }
+while (bigStr.length < 1000000) {
+ bigStr = bigStr + "::" + bigStr;
+}
// Insert enough documents to exceed the 32 MB in-memory sort limit.
for (var i = 0; i < 40; i++) {
- var doc = {x: 1, y: 1, z: i, big: bigStr};
+ var doc = {
+ x: 1,
+ y: 1,
+ z: i,
+ big: bigStr
+ };
t.insert(doc);
}
diff --git a/jstests/core/batch_write_command_delete.js b/jstests/core/batch_write_command_delete.js
index eb08179e463..2aefcea6a7f 100644
--- a/jstests/core/batch_write_command_delete.js
+++ b/jstests/core/batch_write_command_delete.js
@@ -2,7 +2,7 @@
// Ensures that mongod respects the batch write protocols for delete
//
-var coll = db.getCollection( "batch_write_delete" );
+var coll = db.getCollection("batch_write_delete");
coll.drop();
assert(coll.getDB().getMongo().useWriteCommands(), "test is not running with write commands");
@@ -13,18 +13,13 @@ var batch;
var maxWriteBatchSize = 1000;
-function resultOK( result ) {
- return result.ok &&
- !( 'code' in result ) &&
- !( 'errmsg' in result ) &&
- !( 'errInfo' in result ) &&
- !( 'writeErrors' in result );
+function resultOK(result) {
+ return result.ok && !('code' in result) && !('errmsg' in result) && !('errInfo' in result) &&
+ !('writeErrors' in result);
}
-function resultNOK( result ) {
- return !result.ok &&
- typeof( result.code ) == 'number' &&
- typeof( result.errmsg ) == 'string';
+function resultNOK(result) {
+ return !result.ok && typeof(result.code) == 'number' && typeof(result.errmsg) == 'string';
}
// EACH TEST BELOW SHOULD BE SELF-CONTAINED, FOR EASIER DEBUGGING
@@ -32,8 +27,10 @@ function resultNOK( result ) {
//
// NO DOCS, illegal command
coll.remove({});
-coll.insert({a:1});
-request = { delete: coll.getName()};
+coll.insert({a: 1});
+request = {
+ delete: coll.getName()
+};
result = coll.runCommand(request);
assert(resultNOK(result), tojson(result));
assert.eq(1, coll.count());
@@ -41,9 +38,11 @@ assert.eq(1, coll.count());
//
// Single document remove, default write concern specified
coll.remove({});
-coll.insert({a:1});
-request = { delete: coll.getName(),
- deletes: [{q: {a:1}, limit: 1}]};
+coll.insert({a: 1});
+request = {
+ delete: coll.getName(),
+ deletes: [{q: {a: 1}, limit: 1}]
+};
result = coll.runCommand(request);
assert(resultOK(result), tojson(result));
assert.eq(1, result.n);
@@ -52,10 +51,12 @@ assert.eq(0, coll.count());
//
// Single document delete, w:0 write concern specified
coll.remove({});
-coll.insert({a:1});
-request = { delete: coll.getName(),
- deletes: [{q: {a: 1}, limit: 1}],
- writeConcern:{w:0}};
+coll.insert({a: 1});
+request = {
+ delete: coll.getName(),
+ deletes: [{q: {a: 1}, limit: 1}],
+ writeConcern: {w: 0}
+};
result = coll.runCommand(request);
assert(resultOK(result), tojson(result));
assert.eq(0, coll.count());
@@ -67,11 +68,13 @@ for (var field in result) {
//
// Single document remove, w:1 write concern specified, ordered:true
coll.remove({});
-coll.insert([{a:1}, {a:1}]);
-request = { delete: coll.getName(),
- deletes: [{q: {a: 1}, limit: 1}],
- writeConcern:{w:1},
- ordered: false};
+coll.insert([{a: 1}, {a: 1}]);
+request = {
+ delete: coll.getName(),
+ deletes: [{q: {a: 1}, limit: 1}],
+ writeConcern: {w: 1},
+ ordered: false
+};
result = coll.runCommand(request);
assert(resultOK(result), tojson(result));
assert.eq(1, result.n);
@@ -80,11 +83,13 @@ assert.eq(1, coll.count());
//
// Multiple document remove, w:1 write concern specified, ordered:true, default top
coll.remove({});
-coll.insert([{a:1}, {a:1}]);
-request = { delete: coll.getName(),
- deletes: [{q: {a: 1}, limit: 0}],
- writeConcern:{w:1},
- ordered: false};
+coll.insert([{a: 1}, {a: 1}]);
+request = {
+ delete: coll.getName(),
+ deletes: [{q: {a: 1}, limit: 0}],
+ writeConcern: {w: 1},
+ ordered: false
+};
result = coll.runCommand(request);
assert(resultOK(result), tojson(result));
assert.eq(2, result.n);
@@ -93,11 +98,13 @@ assert.eq(0, coll.count());
//
// Multiple document remove, w:1 write concern specified, ordered:true, top:0
coll.remove({});
-coll.insert([{a:1}, {a:1}]);
-request = { delete: coll.getName(),
- deletes: [{q: {a: 1}, limit: 0}],
- writeConcern:{w:1},
- ordered: false};
+coll.insert([{a: 1}, {a: 1}]);
+request = {
+ delete: coll.getName(),
+ deletes: [{q: {a: 1}, limit: 0}],
+ writeConcern: {w: 1},
+ ordered: false
+};
result = coll.runCommand(request);
assert(resultOK(result), tojson(result));
assert.eq(2, result.n);
@@ -108,13 +115,15 @@ assert.eq(0, coll.count());
coll.remove({});
batch = [];
for (var i = 0; i < maxWriteBatchSize; ++i) {
- coll.insert({a:i});
- batch.push({q:{a:i}, limit: 0});
+ coll.insert({a: i});
+ batch.push({q: {a: i}, limit: 0});
}
-request = { delete: coll.getName(),
- deletes: batch,
- writeConcern:{w:1},
- ordered: false};
+request = {
+ delete: coll.getName(),
+ deletes: batch,
+ writeConcern: {w: 1},
+ ordered: false
+};
result = coll.runCommand(request);
assert(resultOK(result), tojson(result));
assert.eq(batch.length, result.n);
@@ -125,13 +134,15 @@ assert.eq(0, coll.count());
coll.remove({});
batch = [];
for (var i = 0; i < maxWriteBatchSize + 1; ++i) {
- coll.insert({a:i});
- batch.push({q:{a:i}, limit: 0});
+ coll.insert({a: i});
+ batch.push({q: {a: i}, limit: 0});
}
-request = { delete: coll.getName(),
- deletes: batch,
- writeConcern:{w:1},
- ordered: false};
+request = {
+ delete: coll.getName(),
+ deletes: batch,
+ writeConcern: {w: 1},
+ ordered: false
+};
result = coll.runCommand(request);
assert(resultNOK(result), tojson(result));
assert.eq(batch.length, coll.count());
@@ -139,13 +150,13 @@ assert.eq(batch.length, coll.count());
//
// Cause remove error using ordered:true
coll.remove({});
-coll.insert({a:1});
-request = { delete: coll.getName(),
- deletes: [{q: {a:1}, limit: 0},
- {q: {$set: {a: 1}}, limit: 0},
- {q: {$set: {a: 1}}, limit: 0}],
- writeConcern:{w:1},
- ordered: true};
+coll.insert({a: 1});
+request = {
+ delete: coll.getName(),
+ deletes: [{q: {a: 1}, limit: 0}, {q: {$set: {a: 1}}, limit: 0}, {q: {$set: {a: 1}}, limit: 0}],
+ writeConcern: {w: 1},
+ ordered: true
+};
result = coll.runCommand(request);
assert.commandWorked(result);
assert.eq(1, result.n);
@@ -160,13 +171,13 @@ assert.eq(0, coll.count());
//
// Cause remove error using ordered:false
coll.remove({});
-coll.insert({a:1});
-request = { delete: coll.getName(),
- deletes: [{q: {$set: {a: 1}}, limit: 0},
- {q: {$set: {a: 1}}, limit: 0},
- {q: {a:1}, limit: 0}],
- writeConcern:{w:1},
- ordered: false};
+coll.insert({a: 1});
+request = {
+ delete: coll.getName(),
+ deletes: [{q: {$set: {a: 1}}, limit: 0}, {q: {$set: {a: 1}}, limit: 0}, {q: {a: 1}, limit: 0}],
+ writeConcern: {w: 1},
+ ordered: false
+};
result = coll.runCommand(request);
assert.commandWorked(result);
assert.eq(1, result.n);
@@ -184,13 +195,13 @@ assert.eq(0, coll.count());
//
// Cause remove error using ordered:false and w:0
coll.remove({});
-coll.insert({a:1});
-request = { delete: coll.getName(),
- deletes: [{q: {$set: {a: 1}}, limit: 0},
- {q: {$set: {a: 1}}, limit: 0},
- {q: {a:1}, limit: 0}],
- writeConcern:{w:0},
- ordered: false};
+coll.insert({a: 1});
+request = {
+ delete: coll.getName(),
+ deletes: [{q: {$set: {a: 1}}, limit: 0}, {q: {$set: {a: 1}}, limit: 0}, {q: {a: 1}, limit: 0}],
+ writeConcern: {w: 0},
+ ordered: false
+};
result = coll.runCommand(request);
assert.commandWorked(result);
assert.eq(0, coll.count());
@@ -202,13 +213,14 @@ for (var field in result) {
//
// Cause remove error using ordered:true and w:0
coll.remove({});
-coll.insert({a:1});
-request = { delete: coll.getName(),
- deletes: [{q: {$set: {a: 1}}, limit: 0},
- {q: {$set: {a: 1}}, limit: 0},
- {q: {a:1}, limit:(1)}],
- writeConcern:{w:0},
- ordered: true};
+coll.insert({a: 1});
+request = {
+ delete: coll.getName(),
+ deletes:
+ [{q: {$set: {a: 1}}, limit: 0}, {q: {$set: {a: 1}}, limit: 0}, {q: {a: 1}, limit: (1)}],
+ writeConcern: {w: 0},
+ ordered: true
+};
result = coll.runCommand(request);
assert.commandWorked(result);
assert.eq(1, coll.count());
@@ -220,10 +232,12 @@ for (var field in result) {
//
// When limit is not 0 and 1
coll.remove({});
-coll.insert({a:1});
-request = { delete: coll.getName(),
- deletes: [{q: {a: 1}, limit: 2}],
- writeConcern:{w:0},
- ordered: false};
+coll.insert({a: 1});
+request = {
+ delete: coll.getName(),
+ deletes: [{q: {a: 1}, limit: 2}],
+ writeConcern: {w: 0},
+ ordered: false
+};
result = coll.runCommand(request);
assert(resultNOK(result), tojson(result));
diff --git a/jstests/core/batch_write_command_insert.js b/jstests/core/batch_write_command_insert.js
index f0b2ed24985..6b42cf08ebf 100644
--- a/jstests/core/batch_write_command_insert.js
+++ b/jstests/core/batch_write_command_insert.js
@@ -2,7 +2,7 @@
// Ensures that mongod respects the batch write protocol for inserts
//
-var coll = db.getCollection( "batch_write_insert" );
+var coll = db.getCollection("batch_write_insert");
coll.drop();
assert(coll.getDB().getMongo().useWriteCommands(), "test is not running with write commands");
@@ -13,18 +13,13 @@ var batch;
var maxWriteBatchSize = 1000;
-function resultOK( result ) {
- return result.ok &&
- !( 'code' in result ) &&
- !( 'errmsg' in result ) &&
- !( 'errInfo' in result ) &&
- !( 'writeErrors' in result );
+function resultOK(result) {
+ return result.ok && !('code' in result) && !('errmsg' in result) && !('errInfo' in result) &&
+ !('writeErrors' in result);
}
-function resultNOK( result ) {
- return !result.ok &&
- typeof( result.code ) == 'number' &&
- typeof( result.errmsg ) == 'string';
+function resultNOK(result) {
+ return !result.ok && typeof(result.code) == 'number' && typeof(result.errmsg) == 'string';
}
// EACH TEST BELOW SHOULD BE SELF-CONTAINED, FOR EASIER DEBUGGING
@@ -32,14 +27,19 @@ function resultNOK( result ) {
//
// NO DOCS, illegal command
coll.remove({});
-request = { insert: coll.getName() };
+request = {
+ insert: coll.getName()
+};
result = coll.runCommand(request);
assert(resultNOK(result), tojson(result));
//
// Single document insert, no write concern specified
coll.remove({});
-request = { insert: coll.getName(), documents: [{ a: 1 }]};
+request = {
+ insert: coll.getName(),
+ documents: [{a: 1}]
+};
result = coll.runCommand(request);
assert(resultOK(result), tojson(result));
assert.eq(1, result.n);
@@ -48,7 +48,11 @@ assert.eq(coll.count(), 1);
//
// Single document insert, w:0 write concern specified, missing ordered
coll.remove({});
-request = { insert: coll.getName(), documents: [{ a: 1 }], writeConcern: { w: 0 }};
+request = {
+ insert: coll.getName(),
+ documents: [{a: 1}],
+ writeConcern: {w: 0}
+};
result = coll.runCommand(request);
assert(resultOK(result), tojson(result));
assert.eq(coll.count(), 1);
@@ -60,7 +64,12 @@ for (var field in result) {
//
// Single document insert, w:1 write concern specified, ordered:true
coll.remove({});
-request = { insert: coll.getName(), documents: [{ a: 1 }], writeConcern: { w: 1 }, ordered: true };
+request = {
+ insert: coll.getName(),
+ documents: [{a: 1}],
+ writeConcern: {w: 1},
+ ordered: true
+};
result = coll.runCommand(request);
assert(resultOK(result), tojson(result));
assert.eq(1, result.n);
@@ -69,7 +78,12 @@ assert.eq(coll.count(), 1);
//
// Single document insert, w:1 write concern specified, ordered:false
coll.remove({});
-request = { insert: coll.getName(), documents: [{ a: 1 }], writeConcern: { w: 1 }, ordered: false };
+request = {
+ insert: coll.getName(),
+ documents: [{a: 1}],
+ writeConcern: {w: 1},
+ ordered: false
+};
result = coll.runCommand(request);
assert(resultOK(result), tojson(result));
assert.eq(1, result.n);
@@ -80,8 +94,8 @@ assert.eq(coll.count(), 1);
coll.remove({});
request = {
insert: coll.getName(),
- documents: [{ $set: { a: 1 }}],
- writeConcern: { w: 1 },
+ documents: [{$set: {a: 1}}],
+ writeConcern: {w: 1},
ordered: false
};
result = coll.runCommand(request);
@@ -96,8 +110,8 @@ assert.eq(coll.count(), 0);
coll.remove({});
request = {
insert: coll.getName(),
- documents: [{ o: { $set: { a: 1 }}}],
- writeConcern: { w: 1 },
+ documents: [{o: {$set: {a: 1}}}],
+ writeConcern: {w: 1},
ordered: false
};
result = coll.runCommand(request);
@@ -112,7 +126,12 @@ batch = [];
for (var i = 0; i < maxWriteBatchSize; ++i) {
batch.push({});
}
-request = { insert: coll.getName(), documents: batch, writeConcern: { w: 1 }, ordered: false };
+request = {
+ insert: coll.getName(),
+ documents: batch,
+ writeConcern: {w: 1},
+ ordered: false
+};
result = coll.runCommand(request);
assert(resultOK(result), tojson(result));
assert.eq(batch.length, result.n);
@@ -125,7 +144,12 @@ batch = [];
for (var i = 0; i < maxWriteBatchSize + 1; ++i) {
batch.push({});
}
-request = { insert : coll.getName(), documents: batch, writeConcern: { w: 1 }, ordered: false };
+request = {
+ insert: coll.getName(),
+ documents: batch,
+ writeConcern: {w: 1},
+ ordered: false
+};
result = coll.runCommand(request);
assert(resultNOK(result), tojson(result));
assert.eq(coll.count(), 0);
@@ -133,7 +157,10 @@ assert.eq(coll.count(), 0);
//
// Batch of size zero should fail to insert
coll.remove({});
-request = { insert: coll.getName(), documents: [] };
+request = {
+ insert: coll.getName(),
+ documents: []
+};
result = coll.runCommand(request);
assert(resultNOK(result), tojson(result));
@@ -141,13 +168,16 @@ assert(resultNOK(result), tojson(result));
//
// Unique index tests
coll.remove({});
-coll.ensureIndex({a : 1}, {unique : true});
+coll.ensureIndex({a: 1}, {unique: true});
//
// Should fail single insert due to duplicate key
coll.remove({});
-coll.insert({a:1});
-request = { insert: coll.getName(), documents: [{ a: 1 }] };
+coll.insert({a: 1});
+request = {
+ insert: coll.getName(),
+ documents: [{a: 1}]
+};
result = coll.runCommand(request);
assert(result.ok, tojson(result));
assert.eq(1, result.writeErrors.length);
@@ -159,9 +189,9 @@ assert.eq(coll.count(), 1);
coll.remove({});
request = {
insert: coll.getName(),
- documents: [{ a: 1 }, { a: 1 }, { a: 1 }],
- writeConcern: { w: 1 },
- ordered:false
+ documents: [{a: 1}, {a: 1}, {a: 1}],
+ writeConcern: {w: 1},
+ ordered: false
};
result = coll.runCommand(request);
assert(result.ok, tojson(result));
@@ -184,8 +214,8 @@ assert.eq(coll.count(), 1);
coll.remove({});
request = {
insert: coll.getName(),
- documents: [{ a: 1 }, { a: 1 }, { a: 1 }],
- writeConcern: { w: 1 },
+ documents: [{a: 1}, {a: 1}, {a: 1}],
+ writeConcern: {w: 1},
ordered: true
};
result = coll.runCommand(request);
@@ -202,12 +232,15 @@ assert.eq(coll.count(), 1);
//
// Ensure _id is the first field in all documents
coll.remove({});
-request = { insert: coll.getName(), documents : [{ a: 1 }, { a: 2, _id: 2 }]};
+request = {
+ insert: coll.getName(),
+ documents: [{a: 1}, {a: 2, _id: 2}]
+};
result = coll.runCommand(request);
assert.eq(2, coll.count());
coll.find().forEach(function(doc) {
var firstKey = null;
- for ( var key in doc) {
+ for (var key in doc) {
firstKey = key;
break;
}
@@ -221,8 +254,10 @@ coll.find().forEach(function(doc) {
//
// Successful index creation
coll.drop();
-request = { insert: "system.indexes",
- documents: [{ ns: coll.toString(), key: { x: 1 }, name: "x_1" }]};
+request = {
+ insert: "system.indexes",
+ documents: [{ns: coll.toString(), key: {x: 1}, name: "x_1"}]
+};
result = coll.runCommand(request);
assert(result.ok, tojson(result));
assert.eq(1, result.n);
@@ -231,10 +266,11 @@ assert.eq(coll.getIndexes().length, 2);
//
// Duplicate index insertion gives n = 0
coll.drop();
-coll.ensureIndex({x : 1}, {unique : true});
-request = { insert: "system.indexes",
- documents : [{ ns: coll.toString(),
- key: { x: 1 }, name: "x_1", unique: true}]};
+coll.ensureIndex({x: 1}, {unique: true});
+request = {
+ insert: "system.indexes",
+ documents: [{ns: coll.toString(), key: {x: 1}, name: "x_1", unique: true}]
+};
result = coll.runCommand(request);
assert(result.ok, tojson(result));
assert.eq(0, result.n, 'duplicate index insertion should give n = 0: ' + tojson(result));
@@ -244,9 +280,10 @@ assert.eq(coll.getIndexes().length, 2);
//
// Invalid index insertion with mismatched collection db
coll.drop();
-request = { insert: "system.indexes",
- documents: [{ ns: "invalid." + coll.getName(),
- key: { x: 1 }, name: "x_1", unique: true }]};
+request = {
+ insert: "system.indexes",
+ documents: [{ns: "invalid." + coll.getName(), key: {x: 1}, name: "x_1", unique: true}]
+};
result = coll.runCommand(request);
assert(!result.ok, tojson(result));
assert.eq(coll.getIndexes().length, 0);
@@ -254,7 +291,10 @@ assert.eq(coll.getIndexes().length, 0);
//
// Empty index insertion
coll.drop();
-request = { insert: "system.indexes", documents : [{}] };
+request = {
+ insert: "system.indexes",
+ documents: [{}]
+};
result = coll.runCommand(request);
assert(!result.ok, tojson(result));
assert.eq(coll.getIndexes().length, 0);
@@ -262,7 +302,10 @@ assert.eq(coll.getIndexes().length, 0);
//
// Invalid index desc
coll.drop();
-request = { insert: "system.indexes", documents: [{ ns: coll.toString() }] };
+request = {
+ insert: "system.indexes",
+ documents: [{ns: coll.toString()}]
+};
result = coll.runCommand(request);
assert(result.ok, tojson(result));
assert.eq(0, result.n);
@@ -272,8 +315,10 @@ assert.eq(coll.getIndexes().length, 1);
//
// Invalid index desc
coll.drop();
-request = { insert: "system.indexes",
- documents: [{ ns: coll.toString(), key: { x: 1 }}] };
+request = {
+ insert: "system.indexes",
+ documents: [{ns: coll.toString(), key: {x: 1}}]
+};
result = coll.runCommand(request);
assert(result.ok, tojson(result));
assert.eq(0, result.n);
@@ -283,8 +328,10 @@ assert.eq(coll.getIndexes().length, 1);
//
// Invalid index desc
coll.drop();
-request = { insert: "system.indexes",
- documents: [{ ns: coll.toString(), name: "x_1" }]};
+request = {
+ insert: "system.indexes",
+ documents: [{ns: coll.toString(), name: "x_1"}]
+};
result = coll.runCommand(request);
assert(result.ok, tojson(result));
assert.eq(0, result.n);
@@ -294,9 +341,13 @@ assert.eq(coll.getIndexes().length, 1);
//
// Cannot insert more than one index at a time through the batch writes
coll.drop();
-request = { insert: "system.indexes",
- documents: [{ ns: coll.toString(), key: { x: 1 }, name: "x_1" },
- { ns: coll.toString(), key: { y: 1 }, name: "y_1" }]};
+request = {
+ insert: "system.indexes",
+ documents: [
+ {ns: coll.toString(), key: {x: 1}, name: "x_1"},
+ {ns: coll.toString(), key: {y: 1}, name: "y_1"}
+ ]
+};
result = coll.runCommand(request);
assert(!result.ok, tojson(result));
assert.eq(coll.getIndexes().length, 0);
@@ -308,12 +359,12 @@ coll.insert({_id: 50}); // Create a document to force a duplicate key exception
var bulk = coll.initializeOrderedBulkOp();
for (i = 1; i < 100; i++) {
- bulk.insert( { _id: i } );
+ bulk.insert({_id: i});
}
try {
bulk.execute();
assert(false, "should have failed due to duplicate key");
-} catch(err) {
+} catch (err) {
assert(coll.count() == 50, "Unexpected number inserted by bulk write: " + coll.count());
}
@@ -322,14 +373,12 @@ try {
// Note: due to SERVER-13304 this test is at the end of this file, and we don't drop
// the collection afterwards.
coll.drop();
-coll.insert({ x : 1 });
-request = { insert: "system.indexes",
- documents: [{ ns: coll.toString(),
- key: { x: 1 },
- name: "x_1",
- background : true }]};
+coll.insert({x: 1});
+request = {
+ insert: "system.indexes",
+ documents: [{ns: coll.toString(), key: {x: 1}, name: "x_1", background: true}]
+};
result = coll.runCommand(request);
assert(result.ok, tojson(result));
assert.eq(1, result.n);
assert.eq(coll.getIndexes().length, 2);
-
diff --git a/jstests/core/batch_write_command_update.js b/jstests/core/batch_write_command_update.js
index 00964131efe..2d9d2d699b2 100644
--- a/jstests/core/batch_write_command_update.js
+++ b/jstests/core/batch_write_command_update.js
@@ -2,7 +2,7 @@
// Ensures that mongod respects the batch write protocols for updates
//
-var coll = db.getCollection( "batch_write_update" );
+var coll = db.getCollection("batch_write_update");
coll.drop();
assert(coll.getDB().getMongo().useWriteCommands(), "test is not running with write commands");
@@ -13,18 +13,13 @@ var batch;
var maxWriteBatchSize = 1000;
-function resultOK( result ) {
- return result.ok &&
- !( 'code' in result ) &&
- !( 'errmsg' in result ) &&
- !( 'errInfo' in result ) &&
- !( 'writeErrors' in result );
+function resultOK(result) {
+ return result.ok && !('code' in result) && !('errmsg' in result) && !('errInfo' in result) &&
+ !('writeErrors' in result);
}
-function resultNOK( result ) {
- return !result.ok &&
- typeof( result.code ) == 'number' &&
- typeof( result.errmsg ) == 'string';
+function resultNOK(result) {
+ return !result.ok && typeof(result.code) == 'number' && typeof(result.errmsg) == 'string';
}
// EACH TEST BELOW SHOULD BE SELF-CONTAINED, FOR EASIER DEBUGGING
@@ -32,19 +27,23 @@ function resultNOK( result ) {
//
// NO DOCS, illegal command
coll.remove({});
-request = { update: coll.getName() };
+request = {
+ update: coll.getName()
+};
result = coll.runCommand(request);
assert(resultNOK(result), tojson(result));
//
// Single document upsert, no write concern specified
coll.remove({});
-request = { update: coll.getName(),
- updates: [{ q: { a: 1 }, u: { $set: { a: 1 }}, upsert: true }]};
+request = {
+ update: coll.getName(),
+ updates: [{q: {a: 1}, u: {$set: {a: 1}}, upsert: true}]
+};
result = coll.runCommand(request);
assert(resultOK(result), tojson(result));
assert.eq(1, result.n);
-assert( 'upserted' in result );
+assert('upserted' in result);
assert.eq(1, result.upserted.length);
assert.eq(0, result.upserted[0].index);
@@ -56,13 +55,15 @@ assert.eq(0, result.nModified, "missing/wrong nModified");
//
// Single document upsert, write concern specified, no ordered specified
coll.remove({});
-request = { update: coll.getName(),
- updates: [{ q: { a: 1 }, u: { $set: { a: 1 }}, upsert: true }],
- writeConcern: { w: 1 }};
+request = {
+ update: coll.getName(),
+ updates: [{q: {a: 1}, u: {$set: {a: 1}}, upsert: true}],
+ writeConcern: {w: 1}
+};
result = coll.runCommand(request);
assert(resultOK(result), tojson(result));
assert.eq(1, result.n);
-assert( 'upserted' in result );
+assert('upserted' in result);
assert.eq(1, result.upserted.length);
assert.eq(0, result.upserted[0].index);
@@ -74,14 +75,16 @@ assert.eq(0, result.nModified, "missing/wrong nModified");
//
// Single document upsert, write concern specified, ordered = true
coll.remove({});
-request = { update: coll.getName(),
- updates: [{ q: { a: 1 }, u: { $set: { a: 1 }}, upsert: true }],
- writeConcern: { w: 1 },
- ordered: true };
+request = {
+ update: coll.getName(),
+ updates: [{q: {a: 1}, u: {$set: {a: 1}}, upsert: true}],
+ writeConcern: {w: 1},
+ ordered: true
+};
result = coll.runCommand(request);
assert(resultOK(result), tojson(result));
assert.eq(1, result.n);
-assert( 'upserted' in result );
+assert('upserted' in result);
assert.eq(1, result.upserted.length);
assert.eq(0, result.upserted[0].index);
@@ -93,13 +96,15 @@ assert.eq(0, result.nModified, "missing/wrong nModified");
//
// Single document upsert, write concern 0 specified, ordered = true
coll.remove({});
-request = { update: coll.getName(),
- updates: [{ q: { a: 1 }, u: { $set: { a: 1 }}, upsert: true }],
- writeConcern: { w: 0 },
- ordered: true };
+request = {
+ update: coll.getName(),
+ updates: [{q: {a: 1}, u: {$set: {a: 1}}, upsert: true}],
+ writeConcern: {w: 0},
+ ordered: true
+};
result = coll.runCommand(request);
assert(resultOK(result), tojson(result));
-assert.eq(1, coll.count({ }));
+assert.eq(1, coll.count({}));
for (var field in result) {
assert.eq('ok', field, 'unexpected field found in result: ' + field);
@@ -108,11 +113,15 @@ for (var field in result) {
//
// Two document upsert, write concern 0 specified, ordered = true
coll.remove({});
-request = { update: coll.getName(),
- updates: [{ q: { a: 2 }, u: { $set: { a: 1 }}, upsert: true },
- { q: { a: 2 }, u: { $set: { a: 2 }}, upsert: true }],
- writeConcern: { w: 0 },
- ordered: true };
+request = {
+ update: coll.getName(),
+ updates: [
+ {q: {a: 2}, u: {$set: {a: 1}}, upsert: true},
+ {q: {a: 2}, u: {$set: {a: 2}}, upsert: true}
+ ],
+ writeConcern: {w: 0},
+ ordered: true
+};
result = coll.runCommand(request);
assert(resultOK(result), tojson(result));
assert.eq(2, coll.count());
@@ -124,10 +133,12 @@ for (var field in result) {
//
// Single document update
coll.remove({});
-coll.insert({a:1});
-request = { update: coll.getName(),
- updates: [{ q: { a: 1 }, u: { $set: { c: 1 }}}],
- writeConcern: { w: 1 }};
+coll.insert({a: 1});
+request = {
+ update: coll.getName(),
+ updates: [{q: {a: 1}, u: {$set: {c: 1}}}],
+ writeConcern: {w: 1}
+};
result = coll.runCommand(request);
assert(resultOK(result), tojson(result));
assert.eq(1, result.n);
@@ -138,12 +149,16 @@ assert.eq(1, result.nModified, "missing/wrong nModified");
//
// Multi document update/upsert
coll.remove({});
-coll.insert({b:1});
-request = { update: coll.getName(),
- updates: [{ q: { b: 1 }, u: { $set: { b: 1, a: 1 }}, upsert: true },
- { q: { b: 2 }, u: { $set: { b: 2, a: 1 }}, upsert: true }],
- writeConcern: { w: 1 },
- ordered: false };
+coll.insert({b: 1});
+request = {
+ update: coll.getName(),
+ updates: [
+ {q: {b: 1}, u: {$set: {b: 1, a: 1}}, upsert: true},
+ {q: {b: 2}, u: {$set: {b: 2, a: 1}}, upsert: true}
+ ],
+ writeConcern: {w: 1},
+ ordered: false
+};
result = coll.runCommand(request);
assert(resultOK(result), tojson(result));
assert.eq(2, result.n);
@@ -157,86 +172,97 @@ assert.eq(2, coll.count());
//
// Multiple document update
coll.remove({});
-coll.insert({a:1});
-coll.insert({a:1});
-request = { update: coll.getName(),
- updates: [{ q: { a: 1 }, u: { $set: { c: 2 }}, multi: true }],
- writeConcern: { w: 1 },
- ordered: false };
+coll.insert({a: 1});
+coll.insert({a: 1});
+request = {
+ update: coll.getName(),
+ updates: [{q: {a: 1}, u: {$set: {c: 2}}, multi: true}],
+ writeConcern: {w: 1},
+ ordered: false
+};
result = coll.runCommand(request);
assert(resultOK(result), tojson(result));
assert.eq(2, result.n);
assert.eq(2, result.nModified, "missing/wrong nModified");
-assert.eq(2, coll.find({a:1, c:2}).count());
+assert.eq(2, coll.find({a: 1, c: 2}).count());
assert.eq(2, coll.count());
//
-//Multiple document update, some no-ops
+// Multiple document update, some no-ops
coll.remove({});
-coll.insert({a:1, c:2});
-coll.insert({a:1});
-request = { update: coll.getName(),
- updates: [{ q: { a: 1 }, u: { $set: { c: 2 }}, multi: true }],
- writeConcern: { w: 1 },
- ordered: false };
-printjson( result = coll.runCommand(request) );
+coll.insert({a: 1, c: 2});
+coll.insert({a: 1});
+request = {
+ update: coll.getName(),
+ updates: [{q: {a: 1}, u: {$set: {c: 2}}, multi: true}],
+ writeConcern: {w: 1},
+ ordered: false
+};
+printjson(result = coll.runCommand(request));
assert(resultOK(result), tojson(result));
assert.eq(2, result.n);
assert.eq(1, result.nModified, "missing/wrong nModified");
-assert.eq(2, coll.find({a:1, c:2}).count());
+assert.eq(2, coll.find({a: 1, c: 2}).count());
assert.eq(2, coll.count());
//
// Large batch under the size threshold should update successfully
coll.remove({});
-coll.insert({a:0});
+coll.insert({a: 0});
batch = [];
for (var i = 0; i < maxWriteBatchSize; ++i) {
- batch.push({q:{}, u: {$inc: {a:1}}});
+ batch.push({q: {}, u: {$inc: {a: 1}}});
}
-request = { update: coll.getName(),
- updates: batch,
- writeConcern: { w: 1 },
- ordered: false };
+request = {
+ update: coll.getName(),
+ updates: batch,
+ writeConcern: {w: 1},
+ ordered: false
+};
result = coll.runCommand(request);
assert(resultOK(result), tojson(result));
assert.eq(batch.length, result.n);
assert.eq(batch.length, result.nModified, "missing/wrong nModified");
-assert.eq(1, coll.find({a:batch.length}).count());
+assert.eq(1, coll.find({a: batch.length}).count());
assert.eq(1, coll.count());
//
// Large batch above the size threshold should fail to update
coll.remove({});
-coll.insert({a:0});
+coll.insert({a: 0});
batch = [];
for (var i = 0; i < maxWriteBatchSize + 1; ++i) {
- batch.push({q:{}, u: {$inc: {a:1}}});
+ batch.push({q: {}, u: {$inc: {a: 1}}});
}
-request = { update: coll.getName(),
- updates: batch,
- writeConcern: { w: 1 },
- ordered: false };
+request = {
+ update: coll.getName(),
+ updates: batch,
+ writeConcern: {w: 1},
+ ordered: false
+};
result = coll.runCommand(request);
assert(resultNOK(result), tojson(result));
-assert.eq(1, coll.find({a:0}).count());
+assert.eq(1, coll.find({a: 0}).count());
assert.eq(1, coll.count());
-
//
//
// Unique index tests
coll.remove({});
-coll.ensureIndex({a : 1}, {unique : true});
+coll.ensureIndex({a: 1}, {unique: true});
//
// Upsert fail due to duplicate key index, w:0, ordered:true
coll.remove({});
-request = { update: coll.getName(),
- updates: [{ q: { b: 1 }, u: { $set: { b: 1, a: 1 }}, upsert: true },
- { q: { b: 2 }, u: { $set: { b: 2, a: 1 }}, upsert: true }],
- writeConcern: { w: 0 },
- ordered: true };
+request = {
+ update: coll.getName(),
+ updates: [
+ {q: {b: 1}, u: {$set: {b: 1, a: 1}}, upsert: true},
+ {q: {b: 2}, u: {$set: {b: 2, a: 1}}, upsert: true}
+ ],
+ writeConcern: {w: 0},
+ ordered: true
+};
result = coll.runCommand(request);
assert(result.ok, tojson(result));
assert.eq(1, coll.count());
@@ -248,12 +274,16 @@ for (var field in result) {
//
// Upsert fail due to duplicate key index, w:1, ordered:true
coll.remove({});
-request = { update: coll.getName(),
- updates: [{ q: { b: 1 }, u: { $set: { b: 1, a: 1 }}, upsert: true },
- { q: { b: 3 }, u: { $set: { b: 3, a: 2 }}, upsert: true },
- { q: { b: 2 }, u: { $set: { b: 2, a: 1 }}, upsert: true }],
- writeConcern: { w: 1 },
- ordered: true };
+request = {
+ update: coll.getName(),
+ updates: [
+ {q: {b: 1}, u: {$set: {b: 1, a: 1}}, upsert: true},
+ {q: {b: 3}, u: {$set: {b: 3, a: 2}}, upsert: true},
+ {q: {b: 2}, u: {$set: {b: 2, a: 1}}, upsert: true}
+ ],
+ writeConcern: {w: 1},
+ ordered: true
+};
result = coll.runCommand(request);
assert(result.ok, tojson(result));
assert.eq(2, result.n);
@@ -274,13 +304,17 @@ assert.eq(1, coll.count({_id: result.upserted[1]._id}));
//
// Upsert fail due to duplicate key index, w:1, ordered:false
coll.remove({});
-request = { update: coll.getName(),
- updates: [{ q: { b: 1 }, u: { $set: { b: 1, a: 1 }}, upsert: true },
- { q: { b: 2 }, u: { $set: { b: 2, a: 1 }}, upsert: true },
- { q: { b: 2 }, u: { $set: { b: 2, a: 1 }}, upsert: true },
- { q: { b: 3 }, u: { $set: { b: 3, a: 3 }}, upsert: true }],
- writeConcern: { w: 1 },
- ordered: false };
+request = {
+ update: coll.getName(),
+ updates: [
+ {q: {b: 1}, u: {$set: {b: 1, a: 1}}, upsert: true},
+ {q: {b: 2}, u: {$set: {b: 2, a: 1}}, upsert: true},
+ {q: {b: 2}, u: {$set: {b: 2, a: 1}}, upsert: true},
+ {q: {b: 3}, u: {$set: {b: 3, a: 3}}, upsert: true}
+ ],
+ writeConcern: {w: 1},
+ ordered: false
+};
result = coll.runCommand(request);
assert(result.ok, tojson(result));
assert.eq(2, result.n);
diff --git a/jstests/core/batch_write_command_wc.js b/jstests/core/batch_write_command_wc.js
index bad79e5d605..07a6bf96f9d 100644
--- a/jstests/core/batch_write_command_wc.js
+++ b/jstests/core/batch_write_command_wc.js
@@ -2,15 +2,13 @@
// Ensures that the server respects the batch write command WriteConcern, and behaves correctly
//
-var coll = db.getCollection( "batch_write_wc" );
+var coll = db.getCollection("batch_write_wc");
coll.drop();
assert(coll.getDB().getMongo().useWriteCommands(), "test is not running with write commands");
// Basic validation of WriteConcern
// -- {}, versus {w:0}/{w:1} +opt wTimeout
-// -- j:1, fsync:1,
+// -- j:1, fsync:1,
// -- replication: w:N (>1), w:String, wTimeout
// -- randomField:true, etc
-
-
diff --git a/jstests/core/bench_test1.js b/jstests/core/bench_test1.js
index bbc38af2ba5..2242cf229ec 100644
--- a/jstests/core/bench_test1.js
+++ b/jstests/core/bench_test1.js
@@ -2,36 +2,40 @@
t = db.bench_test1;
t.drop();
-t.insert( { _id : 1 , x : 1 } );
-t.insert( { _id : 2 , x : 1 } );
+t.insert({_id: 1, x: 1});
+t.insert({_id: 2, x: 1});
ops = [
- { op : "findOne" , ns : t.getFullName() , query : { _id : 1 } } ,
- { op : "update" , ns : t.getFullName() , query : { _id : 1 } , update : { $inc : { x : 1 } } }
+ {op: "findOne", ns: t.getFullName(), query: {_id: 1}},
+ {op: "update", ns: t.getFullName(), query: {_id: 1}, update: {$inc: {x: 1}}}
];
seconds = 2;
-benchArgs = { ops : ops , parallel : 2 , seconds : seconds , host : db.getMongo().host };
+benchArgs = {
+ ops: ops,
+ parallel: 2,
+ seconds: seconds,
+ host: db.getMongo().host
+};
if (jsTest.options().auth) {
benchArgs['db'] = 'admin';
benchArgs['username'] = jsTest.options().adminUser;
benchArgs['password'] = jsTest.options().adminPassword;
}
-res = benchRun( benchArgs );
-
-assert.lte( seconds * res.update , t.findOne( { _id : 1 } ).x * 1.5 , "A1" );
-
-
-assert.eq( 1 , t.getIndexes().length , "B1" );
-benchArgs['ops']=[ { op : "createIndex" , ns : t.getFullName() , key : { x : 1 } } ];
-benchArgs['parallel']=1;
-benchArgs['seconds']=1;
-benchRun( benchArgs );
-assert.eq( 2 , t.getIndexes().length , "B2" );
-benchArgs['ops']=[ { op : "dropIndex" , ns : t.getFullName() , key : { x : 1 } } ];
-benchRun( benchArgs );
-assert.soon( function(){ return t.getIndexes().length == 1; } );
-
-
+res = benchRun(benchArgs);
+
+assert.lte(seconds * res.update, t.findOne({_id: 1}).x * 1.5, "A1");
+
+assert.eq(1, t.getIndexes().length, "B1");
+benchArgs['ops'] = [{op: "createIndex", ns: t.getFullName(), key: {x: 1}}];
+benchArgs['parallel'] = 1;
+benchArgs['seconds'] = 1;
+benchRun(benchArgs);
+assert.eq(2, t.getIndexes().length, "B2");
+benchArgs['ops'] = [{op: "dropIndex", ns: t.getFullName(), key: {x: 1}}];
+benchRun(benchArgs);
+assert.soon(function() {
+ return t.getIndexes().length == 1;
+});
diff --git a/jstests/core/bench_test2.js b/jstests/core/bench_test2.js
index c2e3881632c..072686348e4 100644
--- a/jstests/core/bench_test2.js
+++ b/jstests/core/bench_test2.js
@@ -2,16 +2,20 @@
t = db.bench_test2;
t.drop();
-for ( i=0; i<100; i++ )
- t.insert( { _id : i , x : 0 } );
-
-benchArgs = { ops : [ { ns : t.getFullName() ,
- op : "update" ,
- query : { _id : { "#RAND_INT" : [ 0 , 100 ] } } ,
- update : { $inc : { x : 1 } } } ] ,
- parallel : 2 ,
- seconds : 1 ,
- host : db.getMongo().host };
+for (i = 0; i < 100; i++)
+ t.insert({_id: i, x: 0});
+
+benchArgs = {
+ ops: [{
+ ns: t.getFullName(),
+ op: "update",
+ query: {_id: {"#RAND_INT": [0, 100]}},
+ update: {$inc: {x: 1}}
+ }],
+ parallel: 2,
+ seconds: 1,
+ host: db.getMongo().host
+};
if (jsTest.options().auth) {
benchArgs['db'] = 'admin';
@@ -19,29 +23,25 @@ if (jsTest.options().auth) {
benchArgs['password'] = jsTest.options().adminPassword;
}
-res = benchRun( benchArgs );
-printjson( res );
+res = benchRun(benchArgs);
+printjson(res);
sumsq = 0;
sum = 0;
min = 1000;
max = 0;
-t.find().forEach(
- function(z){
- sum += z.x;
- sumsq += Math.pow( ( res.update / 100 ) - z.x , 2 );
- min = Math.min( z.x , min );
- max = Math.max( z.x , max );
- }
-);
+t.find().forEach(function(z) {
+ sum += z.x;
+ sumsq += Math.pow((res.update / 100) - z.x, 2);
+ min = Math.min(z.x, min);
+ max = Math.max(z.x, max);
+});
avg = sum / 100;
-std = Math.sqrt( sumsq / 100 );
-
-print( "Avg: " + avg );
-print( "Std: " + std );
-print( "Min: " + min );
-print( "Max: " + max );
-
+std = Math.sqrt(sumsq / 100);
+print("Avg: " + avg);
+print("Std: " + std);
+print("Min: " + min);
+print("Max: " + max);
diff --git a/jstests/core/bench_test3.js b/jstests/core/bench_test3.js
index 2e130662829..24e230cc16d 100644
--- a/jstests/core/bench_test3.js
+++ b/jstests/core/bench_test3.js
@@ -1,15 +1,18 @@
t = db.bench_test3;
t.drop();
-
-benchArgs = { ops : [ { ns : t.getFullName() ,
- op : "update" ,
- upsert : true ,
- query : { _id : { "#RAND_INT" : [ 0 , 5 , 4 ] } } ,
- update : { $inc : { x : 1 } } } ] ,
- parallel : 2 ,
- seconds : 5 ,
- host : db.getMongo().host };
+benchArgs = {
+ ops: [{
+ ns: t.getFullName(),
+ op: "update",
+ upsert: true,
+ query: {_id: {"#RAND_INT": [0, 5, 4]}},
+ update: {$inc: {x: 1}}
+ }],
+ parallel: 2,
+ seconds: 5,
+ host: db.getMongo().host
+};
if (jsTest.options().auth) {
benchArgs['db'] = 'admin';
@@ -17,11 +20,14 @@ if (jsTest.options().auth) {
benchArgs['password'] = jsTest.options().adminPassword;
}
-res = benchRun( benchArgs );
-printjson( res );
+res = benchRun(benchArgs);
+printjson(res);
var keys = [];
var totals = {};
-db.bench_test3.find().sort( { _id : 1 } ).forEach( function(z){ keys.push( z._id ); totals[z._id] = z.x; } );
+db.bench_test3.find().sort({_id: 1}).forEach(function(z) {
+ keys.push(z._id);
+ totals[z._id] = z.x;
+});
printjson(totals);
-assert.eq( [ 0 , 4 , 8 , 12 , 16 ] , keys );
+assert.eq([0, 4, 8, 12, 16], keys);
diff --git a/jstests/core/big_object1.js b/jstests/core/big_object1.js
index 82ecf025799..017fcdc9756 100644
--- a/jstests/core/big_object1.js
+++ b/jstests/core/big_object1.js
@@ -2,54 +2,54 @@
t = db.big_object1;
t.drop();
-if ( db.adminCommand( "buildinfo" ).bits == 64 ){
-
+if (db.adminCommand("buildinfo").bits == 64) {
var large = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa";
var s = large;
- while ( s.length < 850 * 1024 ){
+ while (s.length < 850 * 1024) {
s += large;
}
x = 0;
- while ( true ){
+ while (true) {
var result;
- n = { _id : x , a : [] };
- for ( i=0; i<14+x; i++ )
- n.a.push( s );
+ n = {
+ _id: x,
+ a: []
+ };
+ for (i = 0; i < 14 + x; i++)
+ n.a.push(s);
try {
- result = t.insert( n );
+ result = t.insert(n);
o = n;
- }
- catch ( e ){
+ } catch (e) {
break;
}
-
- if ( result.hasWriteError() )
+
+ if (result.hasWriteError())
break;
x++;
}
-
- printjson( t.stats(1024*1024) );
-
- assert.lt( 15 * 1024 * 1024 , Object.bsonsize( o ) , "A1" );
- assert.gt( 17 * 1024 * 1024 , Object.bsonsize( o ) , "A2" );
-
- assert.eq( x , t.count() , "A3" );
-
- for ( i=0; i<x; i++ ){
- o = t.findOne( { _id : i } );
+
+ printjson(t.stats(1024 * 1024));
+
+ assert.lt(15 * 1024 * 1024, Object.bsonsize(o), "A1");
+ assert.gt(17 * 1024 * 1024, Object.bsonsize(o), "A2");
+
+ assert.eq(x, t.count(), "A3");
+
+ for (i = 0; i < x; i++) {
+ o = t.findOne({_id: i});
try {
// test large mongo -> js conversion
var a = o.a;
- } catch(e) {
+ } catch (e) {
assert(false, "Caught exception trying to insert during iteration " + i + ": " + e);
}
- assert( o , "B" + i );
+ assert(o, "B" + i);
}
-
+
t.drop();
-}
-else {
- print( "skipping big_object1 b/c not 64-bit" );
+} else {
+ print("skipping big_object1 b/c not 64-bit");
}
print("SUCCESS");
diff --git a/jstests/core/binData.js b/jstests/core/binData.js
index 3f037650e05..521815bec95 100644
--- a/jstests/core/binData.js
+++ b/jstests/core/binData.js
@@ -5,10 +5,16 @@ assert.eq(x.base64(), "OEJTfmD8twzaj/LPKLIVkA==", "bad base64");
assert.eq(x.type, 3, "bad type");
assert.eq(x.length(), 16, "bad length");
-x = new BinData(0, "TWFuIGlzIGRpc3Rpbmd1aXNoZWQsIG5vdCBvbmx5IGJ5IGhpcyByZWFzb24sIGJ1dCBieSB0aGlzIHNpbmd1bGFyIHBhc3Npb24gZnJvbSBvdGhlciBhbmltYWxzLCB3aGljaCBpcyBhIGx1c3Qgb2YgdGhlIG1pbmQsIHRoYXQgYnkgYSBwZXJzZXZlcmFuY2Ugb2YgZGVsaWdodCBpbiB0aGUgY29udGludWVkIGFuZCBpbmRlZmF0aWdhYmxlIGdlbmVyYXRpb24gb2Yga25vd2xlZGdlLCBleGNlZWRzIHRoZSBzaG9ydCB2ZWhlbWVuY2Ugb2YgYW55IGNhcm5hbCBwbGVhc3VyZS4=");
-assert.eq(x.hex(), "4d616e2069732064697374696e677569736865642c206e6f74206f6e6c792062792068697320726561736f6e2c2062757420627920746869732073696e67756c61722070617373696f6e2066726f6d206f7468657220616e696d616c732c2077686963682069732061206c757374206f6620746865206d696e642c20746861742062792061207065727365766572616e6365206f662064656c6967687420696e2074686520636f6e74696e75656420616e6420696e6465666174696761626c652067656e65726174696f6e206f66206b6e6f776c656467652c2065786365656473207468652073686f727420766568656d656e6365206f6620616e79206361726e616c20706c6561737572652e", "bad hex");
-assert.eq(x.base64(), "TWFuIGlzIGRpc3Rpbmd1aXNoZWQsIG5vdCBvbmx5IGJ5IGhpcyByZWFzb24sIGJ1dCBieSB0aGlzIHNpbmd1bGFyIHBhc3Npb24gZnJvbSBvdGhlciBhbmltYWxzLCB3aGljaCBpcyBhIGx1c3Qgb2YgdGhlIG1pbmQsIHRoYXQgYnkgYSBwZXJzZXZlcmFuY2Ugb2YgZGVsaWdodCBpbiB0aGUgY29udGludWVkIGFuZCBpbmRlZmF0aWdhYmxlIGdlbmVyYXRpb24gb2Yga25vd2xlZGdlLCBleGNlZWRzIHRoZSBzaG9ydCB2ZWhlbWVuY2Ugb2YgYW55IGNhcm5hbCBwbGVhc3VyZS4=", "bad base64");
+x = new BinData(
+ 0,
+ "TWFuIGlzIGRpc3Rpbmd1aXNoZWQsIG5vdCBvbmx5IGJ5IGhpcyByZWFzb24sIGJ1dCBieSB0aGlzIHNpbmd1bGFyIHBhc3Npb24gZnJvbSBvdGhlciBhbmltYWxzLCB3aGljaCBpcyBhIGx1c3Qgb2YgdGhlIG1pbmQsIHRoYXQgYnkgYSBwZXJzZXZlcmFuY2Ugb2YgZGVsaWdodCBpbiB0aGUgY29udGludWVkIGFuZCBpbmRlZmF0aWdhYmxlIGdlbmVyYXRpb24gb2Yga25vd2xlZGdlLCBleGNlZWRzIHRoZSBzaG9ydCB2ZWhlbWVuY2Ugb2YgYW55IGNhcm5hbCBwbGVhc3VyZS4=");
+assert.eq(
+ x.hex(),
+ "4d616e2069732064697374696e677569736865642c206e6f74206f6e6c792062792068697320726561736f6e2c2062757420627920746869732073696e67756c61722070617373696f6e2066726f6d206f7468657220616e696d616c732c2077686963682069732061206c757374206f6620746865206d696e642c20746861742062792061207065727365766572616e6365206f662064656c6967687420696e2074686520636f6e74696e75656420616e6420696e6465666174696761626c652067656e65726174696f6e206f66206b6e6f776c656467652c2065786365656473207468652073686f727420766568656d656e6365206f6620616e79206361726e616c20706c6561737572652e",
+ "bad hex");
+assert.eq(
+ x.base64(),
+ "TWFuIGlzIGRpc3Rpbmd1aXNoZWQsIG5vdCBvbmx5IGJ5IGhpcyByZWFzb24sIGJ1dCBieSB0aGlzIHNpbmd1bGFyIHBhc3Npb24gZnJvbSBvdGhlciBhbmltYWxzLCB3aGljaCBpcyBhIGx1c3Qgb2YgdGhlIG1pbmQsIHRoYXQgYnkgYSBwZXJzZXZlcmFuY2Ugb2YgZGVsaWdodCBpbiB0aGUgY29udGludWVkIGFuZCBpbmRlZmF0aWdhYmxlIGdlbmVyYXRpb24gb2Yga25vd2xlZGdlLCBleGNlZWRzIHRoZSBzaG9ydCB2ZWhlbWVuY2Ugb2YgYW55IGNhcm5hbCBwbGVhc3VyZS4=",
+ "bad base64");
assert.eq(x.type, 0, "bad type");
assert.eq(x.length(), 269, "bad length");
-
-
diff --git a/jstests/core/bindata_indexonly.js b/jstests/core/bindata_indexonly.js
index ece4a1c82eb..a8a2d281560 100644
--- a/jstests/core/bindata_indexonly.js
+++ b/jstests/core/bindata_indexonly.js
@@ -18,14 +18,16 @@
assert.commandWorked(coll.createIndex({_id: 1, a: 1}));
function testIndexOnlyBinData(blob) {
- var explain = coll.find({$and: [{_id: {$lte: BinData(0, blob)}},
- {_id: {$gte: BinData(0, blob)}}]}, {_id: 1, a: 1})
- .hint({_id: 1, a: 1})
- .explain("executionStats");
+ var explain =
+ coll.find({$and: [{_id: {$lte: BinData(0, blob)}}, {_id: {$gte: BinData(0, blob)}}]},
+ {_id: 1, a: 1})
+ .hint({_id: 1, a: 1})
+ .explain("executionStats");
assert(isIndexOnly(explain.queryPlanner.winningPlan),
"indexonly.BinData(0, " + blob + ") - must be index-only");
- assert.eq(1, explain.executionStats.nReturned,
+ assert.eq(1,
+ explain.executionStats.nReturned,
"EXACTone.BinData(0, " + blob + ") - should only return one in unique set");
}
@@ -40,28 +42,32 @@
.hint({_id: 1, a: 1})
.explain("executionStats");
assert(isIndexOnly(explain), "indexonly.$lt.1 - must be index-only");
- assert.eq(0, explain.executionStats.nReturned,
+ assert.eq(0,
+ explain.executionStats.nReturned,
"correctcount.$lt.1 - not returning correct documents");
explain = coll.find({_id: {$gt: BinData(0, "////////////////////////////")}}, {_id: 1, a: 1})
.hint({_id: 1, a: 1})
.explain("executionStats");
assert(isIndexOnly(explain), "indexonly.$gt.2 - must be index-only");
- assert.eq(0, explain.executionStats.nReturned,
+ assert.eq(0,
+ explain.executionStats.nReturned,
"correctcount.$gt.2 - not returning correct documents");
explain = coll.find({_id: {$lte: BinData(0, "AQAAAAEBAAVlbl9VSwAAAAAAAAhv")}}, {_id: 1, a: 1})
.hint({_id: 1, a: 1})
.explain("executionStats");
assert(isIndexOnly(explain), "indexonly.$lte.3 - must be index-only");
- assert.eq(2, explain.executionStats.nReturned,
+ assert.eq(2,
+ explain.executionStats.nReturned,
"correctcount.$lte.3 - not returning correct documents");
explain = coll.find({_id: {$gte: BinData(0, "AQAAAAEBAAVlbl9VSwAAAAAAAAhz")}}, {_id: 1, a: 1})
.hint({_id: 1, a: 1})
.explain("executionStats");
assert(isIndexOnly(explain), "indexonly.$gte.3 - must be index-only");
- assert.eq(2, explain.executionStats.nReturned,
+ assert.eq(2,
+ explain.executionStats.nReturned,
"correctcount.$gte.3 - not returning correct documents");
coll.drop();
diff --git a/jstests/core/bittest.js b/jstests/core/bittest.js
index ebd44734952..45559d8f505 100644
--- a/jstests/core/bittest.js
+++ b/jstests/core/bittest.js
@@ -12,7 +12,8 @@
var explain = coll.find(query).explain("executionStats");
assert(isCollscan(explain.queryPlanner.winningPlan),
"expected bit test query plan to be COLLSCAN");
- assert.eq(count, explain.executionStats.nReturned,
+ assert.eq(count,
+ explain.executionStats.nReturned,
"bit test query not returning correct documents");
}
@@ -95,7 +96,7 @@
// Tests with array of bit positions.
var allPositions = [];
- for (var i = 0; i < 64; i ++) {
+ for (var i = 0; i < 64; i++) {
allPositions.push(i);
}
assertQueryCorrect({a: {$bitsAllSet: []}}, 3);
@@ -143,8 +144,14 @@
assertQueryCorrect({a: {$bitsAnyClear: BinData(0, "////////////////////////////")}}, 3);
// Tests with multiple predicates.
- assertQueryCorrect({a: {$bitsAllSet: BinData(0, "AANgAAAAAAAAAAAAAAAAAAAAAAAA"),
- $bitsAllClear: BinData(0, "//yf////////////////////////")}}, 1);
+ assertQueryCorrect(
+ {
+ a: {
+ $bitsAllSet: BinData(0, "AANgAAAAAAAAAAAAAAAAAAAAAAAA"),
+ $bitsAllClear: BinData(0, "//yf////////////////////////")
+ }
+ },
+ 1);
coll.drop();
})(); \ No newline at end of file
diff --git a/jstests/core/bulk_api_ordered.js b/jstests/core/bulk_api_ordered.js
index 87ecd66a1b5..a880cdb630e 100644
--- a/jstests/core/bulk_api_ordered.js
+++ b/jstests/core/bulk_api_ordered.js
@@ -20,32 +20,36 @@ var executeTests = function() {
*/
var bulkOp = coll.initializeOrderedBulkOp();
- assert.throws( function(){ bulkOp.find();} );
- assert.throws( function(){ bulkOp.insert({$key: 1});} );
+ assert.throws(function() {
+ bulkOp.find();
+ });
+ assert.throws(function() {
+ bulkOp.insert({$key: 1});
+ });
/**
* Single successful ordered bulk operation
*/
var bulkOp = coll.initializeOrderedBulkOp();
- bulkOp.insert({a:1});
- bulkOp.find({a:1}).updateOne({$set: {b:1}});
+ bulkOp.insert({a: 1});
+ bulkOp.find({a: 1}).updateOne({$set: {b: 1}});
// no-op, should increment nMatched but not nModified
- bulkOp.find({a:1}).updateOne({$set: {b:1}});
- bulkOp.find({a:2}).upsert().updateOne({$set: {b:2}});
- bulkOp.insert({a:3});
- bulkOp.find({a:3}).update({$set: {b:1}});
- bulkOp.find({a:3}).upsert().update({$set: {b:2}});
- bulkOp.find({a:10}).upsert().update({$set: {b:2}});
- bulkOp.find({a:2}).replaceOne({a:11});
- bulkOp.find({a:11}).removeOne();
- bulkOp.find({a:3}).remove({a:3});
+ bulkOp.find({a: 1}).updateOne({$set: {b: 1}});
+ bulkOp.find({a: 2}).upsert().updateOne({$set: {b: 2}});
+ bulkOp.insert({a: 3});
+ bulkOp.find({a: 3}).update({$set: {b: 1}});
+ bulkOp.find({a: 3}).upsert().update({$set: {b: 2}});
+ bulkOp.find({a: 10}).upsert().update({$set: {b: 2}});
+ bulkOp.find({a: 2}).replaceOne({a: 11});
+ bulkOp.find({a: 11}).removeOne();
+ bulkOp.find({a: 3}).remove({a: 3});
var result = bulkOp.execute();
assert.eq(2, result.nInserted);
assert.eq(2, result.nUpserted);
assert.eq(5, result.nMatched);
// only check nModified if write commands are enabled
- if ( coll.getMongo().writeMode() == "commands" ) {
- assert.eq(4, result.nModified);
+ if (coll.getMongo().writeMode() == "commands") {
+ assert.eq(4, result.nModified);
}
assert.eq(2, result.nRemoved);
var upserts = result.getUpsertedIds();
@@ -58,29 +62,35 @@ var executeTests = function() {
assert.eq(2, coll.find({}).itcount(), "find should return two documents");
// illegal to try to convert a multi-op batch into a SingleWriteResult
- assert.throws(function() { result.toSingleResult(); } );
+ assert.throws(function() {
+ result.toSingleResult();
+ });
// attempt to re-run bulk operation
- assert.throws(function() { bulkOp.execute(); } );
+ assert.throws(function() {
+ bulkOp.execute();
+ });
// Test SingleWriteResult
var singleBatch = coll.initializeOrderedBulkOp();
- singleBatch.find({a:4}).upsert().updateOne({$set: {b:1}});
+ singleBatch.find({a: 4}).upsert().updateOne({$set: {b: 1}});
var singleResult = singleBatch.execute().toSingleResult();
assert(singleResult.getUpsertedId() != null);
// Create unique index
coll.remove({});
- coll.ensureIndex({a : 1}, {unique : true});
+ coll.ensureIndex({a: 1}, {unique: true});
/**
* Single error ordered bulk operation
*/
var bulkOp = coll.initializeOrderedBulkOp();
- bulkOp.insert({b:1, a:1});
- bulkOp.find({b:2}).upsert().updateOne({$set: {a:1}});
- bulkOp.insert({b:3, a:2});
- var result = assert.throws( function() { bulkOp.execute(); } );
+ bulkOp.insert({b: 1, a: 1});
+ bulkOp.find({b: 2}).upsert().updateOne({$set: {a: 1}});
+ bulkOp.insert({b: 3, a: 2});
+ var result = assert.throws(function() {
+ bulkOp.execute();
+ });
assert(result instanceof BulkWriteError);
assert(result instanceof Error);
// Basic properties check
@@ -107,19 +117,21 @@ var executeTests = function() {
// Create unique index
coll.dropIndexes();
coll.remove({});
- coll.ensureIndex({a : 1}, {unique : true});
+ coll.ensureIndex({a: 1}, {unique: true});
/**
* Multiple error ordered bulk operation
*/
var bulkOp = coll.initializeOrderedBulkOp();
- bulkOp.insert({b:1, a:1});
- bulkOp.find({b:2}).upsert().updateOne({$set: {a:1}});
- bulkOp.find({b:3}).upsert().updateOne({$set: {a:2}});
- bulkOp.find({b:2}).upsert().updateOne({$set: {a:1}});
- bulkOp.insert({b:4, a:3});
- bulkOp.insert({b:5, a:1});
- var result = assert.throws( function() { bulkOp.execute(); } );
+ bulkOp.insert({b: 1, a: 1});
+ bulkOp.find({b: 2}).upsert().updateOne({$set: {a: 1}});
+ bulkOp.find({b: 3}).upsert().updateOne({$set: {a: 2}});
+ bulkOp.find({b: 2}).upsert().updateOne({$set: {a: 1}});
+ bulkOp.insert({b: 4, a: 3});
+ bulkOp.insert({b: 5, a: 1});
+ var result = assert.throws(function() {
+ bulkOp.execute();
+ });
// Basic properties check
assert.eq(1, result.nInserted);
@@ -139,17 +151,17 @@ var executeTests = function() {
// Create unique index
coll.dropIndexes();
coll.remove({});
- coll.ensureIndex({a : 1}, {unique : true});
+ coll.ensureIndex({a: 1}, {unique: true});
};
-var buildVersion = parseInt(db.runCommand({buildInfo:1}).versionArray.slice(0, 3).join(""), 10);
+var buildVersion = parseInt(db.runCommand({buildInfo: 1}).versionArray.slice(0, 3).join(""), 10);
// Save the existing useWriteCommands function
var _useWriteCommands = coll.getMongo().useWriteCommands;
//
// Only execute write command tests if we have > 2.5.5 otherwise
// execute the down converted version
-if(buildVersion >= 255) {
+if (buildVersion >= 255) {
// Force the use of useWriteCommands
coll._mongo.useWriteCommands = function() {
return true;
diff --git a/jstests/core/bulk_api_unordered.js b/jstests/core/bulk_api_unordered.js
index 0323dabd10b..6720e644e47 100644
--- a/jstests/core/bulk_api_unordered.js
+++ b/jstests/core/bulk_api_unordered.js
@@ -22,32 +22,36 @@ var executeTests = function() {
*/
var bulkOp = coll.initializeUnorderedBulkOp();
- assert.throws( function(){ bulkOp.find();} );
- assert.throws( function(){ bulkOp.insert({$key: 1});} );
+ assert.throws(function() {
+ bulkOp.find();
+ });
+ assert.throws(function() {
+ bulkOp.insert({$key: 1});
+ });
/**
* Single successful unordered bulk operation
*/
var bulkOp = coll.initializeUnorderedBulkOp();
- bulkOp.insert({a:1});
- bulkOp.find({a:1}).updateOne({$set: {b:1}});
+ bulkOp.insert({a: 1});
+ bulkOp.find({a: 1}).updateOne({$set: {b: 1}});
// no-op, should increment nMatched but not nModified
- bulkOp.find({a:1}).updateOne({$set: {b:1}});
- bulkOp.find({a:2}).upsert().updateOne({$set: {b:2}});
- bulkOp.insert({a:3});
- bulkOp.find({a:3}).update({$set: {b:1}});
- bulkOp.find({a:3}).upsert().update({$set: {b:2}});
- bulkOp.find({a:10}).upsert().update({$set: {b:2}});
- bulkOp.find({a:2}).replaceOne({a:11});
- bulkOp.find({a:11}).removeOne();
- bulkOp.find({a:3}).remove({a:3});
+ bulkOp.find({a: 1}).updateOne({$set: {b: 1}});
+ bulkOp.find({a: 2}).upsert().updateOne({$set: {b: 2}});
+ bulkOp.insert({a: 3});
+ bulkOp.find({a: 3}).update({$set: {b: 1}});
+ bulkOp.find({a: 3}).upsert().update({$set: {b: 2}});
+ bulkOp.find({a: 10}).upsert().update({$set: {b: 2}});
+ bulkOp.find({a: 2}).replaceOne({a: 11});
+ bulkOp.find({a: 11}).removeOne();
+ bulkOp.find({a: 3}).remove({a: 3});
var result = bulkOp.execute();
assert.eq(2, result.nInserted);
assert.eq(2, result.nUpserted);
assert.eq(5, result.nMatched);
// only check nModified if write commands are enabled
- if ( coll.getMongo().writeMode() == "commands" ) {
- assert.eq(4, result.nModified);
+ if (coll.getMongo().writeMode() == "commands") {
+ assert.eq(4, result.nModified);
}
assert.eq(2, result.nRemoved);
assert.eq(false, result.hasWriteErrors());
@@ -62,29 +66,35 @@ var executeTests = function() {
assert.eq(2, coll.find({}).itcount(), "find should return two documents");
// illegal to try to convert a multi-op batch into a SingleWriteResult
- assert.throws(function() { result.toSingleResult(); } );
+ assert.throws(function() {
+ result.toSingleResult();
+ });
// attempt to re-run bulk
- assert.throws(function() { bulkOp.execute(); } );
+ assert.throws(function() {
+ bulkOp.execute();
+ });
// Test SingleWriteResult
var singleBatch = coll.initializeUnorderedBulkOp();
- singleBatch.find({a:4}).upsert().updateOne({$set: {b:1}});
+ singleBatch.find({a: 4}).upsert().updateOne({$set: {b: 1}});
var singleResult = singleBatch.execute().toSingleResult();
assert(singleResult.getUpsertedId() != null);
// Create unique index
coll.remove({});
- coll.ensureIndex({a : 1}, {unique : true});
+ coll.ensureIndex({a: 1}, {unique: true});
/**
* Single error unordered bulk operation
*/
var bulkOp = coll.initializeUnorderedBulkOp();
- bulkOp.insert({b:1, a:1});
- bulkOp.find({b:2}).upsert().updateOne({$set: {a:1}});
- bulkOp.insert({b:3, a:2});
- var result = assert.throws( function() { bulkOp.execute(); } );
+ bulkOp.insert({b: 1, a: 1});
+ bulkOp.find({b: 2}).upsert().updateOne({$set: {a: 1}});
+ bulkOp.insert({b: 3, a: 2});
+ var result = assert.throws(function() {
+ bulkOp.execute();
+ });
// Basic properties check
assert.eq(2, result.nInserted);
@@ -106,19 +116,21 @@ var executeTests = function() {
// Create unique index
coll.dropIndexes();
coll.remove({});
- coll.ensureIndex({a : 1}, {unique : true});
+ coll.ensureIndex({a: 1}, {unique: true});
/**
* Multiple error unordered bulk operation
*/
var bulkOp = coll.initializeUnorderedBulkOp();
- bulkOp.insert({b:1, a:1});
- bulkOp.find({b:2}).upsert().updateOne({$set: {a:1}});
- bulkOp.find({b:3}).upsert().updateOne({$set: {a:2}});
- bulkOp.find({b:2}).upsert().updateOne({$set: {a:1}});
- bulkOp.insert({b:4, a:3});
- bulkOp.insert({b:5, a:1});
- var result = assert.throws( function() { bulkOp.execute(); } );
+ bulkOp.insert({b: 1, a: 1});
+ bulkOp.find({b: 2}).upsert().updateOne({$set: {a: 1}});
+ bulkOp.find({b: 3}).upsert().updateOne({$set: {a: 2}});
+ bulkOp.find({b: 2}).upsert().updateOne({$set: {a: 1}});
+ bulkOp.insert({b: 4, a: 3});
+ bulkOp.insert({b: 5, a: 1});
+ var result = assert.throws(function() {
+ bulkOp.execute();
+ });
// Basic properties check
assert.eq(2, result.nInserted);
@@ -154,17 +166,17 @@ var executeTests = function() {
// Create unique index
coll.dropIndexes();
coll.remove({});
- coll.ensureIndex({a : 1}, {unique : true});
+ coll.ensureIndex({a: 1}, {unique: true});
};
-var buildVersion = parseInt(db.runCommand({buildInfo:1}).versionArray.slice(0, 3).join(""), 10);
+var buildVersion = parseInt(db.runCommand({buildInfo: 1}).versionArray.slice(0, 3).join(""), 10);
// Save the existing useWriteCommands function
var _useWriteCommands = coll.getMongo().useWriteCommands;
//
// Only execute write command tests if we have > 2.5.5 otherwise
// execute the down converted version
-if(buildVersion >= 255) {
+if (buildVersion >= 255) {
// Force the use of useWriteCommands
coll._mongo.useWriteCommands = function() {
return true;
diff --git a/jstests/core/bulk_insert.js b/jstests/core/bulk_insert.js
index a946cba8ddb..157b24aabba 100644
--- a/jstests/core/bulk_insert.js
+++ b/jstests/core/bulk_insert.js
@@ -4,21 +4,21 @@ var coll = db.bulkInsertTest;
coll.drop();
var seed = new Date().getTime();
-Random.srand( seed );
+Random.srand(seed);
print("Seed for randomized test is " + seed);
-var bulkSize = Math.floor( Random.rand() * 200 ) + 1;
-var numInserts = Math.floor( Random.rand() * 300 ) + 1;
+var bulkSize = Math.floor(Random.rand() * 200) + 1;
+var numInserts = Math.floor(Random.rand() * 300) + 1;
-print( "Inserting " + numInserts + " bulks of " + bulkSize + " documents." );
+print("Inserting " + numInserts + " bulks of " + bulkSize + " documents.");
-for( var i = 0; i < numInserts; i++ ){
+for (var i = 0; i < numInserts; i++) {
var bulk = [];
- for( var j = 0; j < bulkSize; j++ ){
- bulk.push({ hi : "there", i : i, j : j });
+ for (var j = 0; j < bulkSize; j++) {
+ bulk.push({hi: "there", i: i, j: j});
}
-
- coll.insert( bulk );
+
+ coll.insert(bulk);
}
-assert.eq( coll.count(), bulkSize * numInserts );
+assert.eq(coll.count(), bulkSize * numInserts);
diff --git a/jstests/core/bulk_insert_capped.js b/jstests/core/bulk_insert_capped.js
index 70edf98ca4e..129c393dbfb 100644
--- a/jstests/core/bulk_insert_capped.js
+++ b/jstests/core/bulk_insert_capped.js
@@ -5,19 +5,19 @@
var t = db.capped_multi_insert;
t.drop();
- db.createCollection(t.getName(), {capped: true, size: 16*1024, max: 1});
+ db.createCollection(t.getName(), {capped: true, size: 16 * 1024, max: 1});
- t.insert([{_id:1}, {_id:2}]);
+ t.insert([{_id: 1}, {_id: 2}]);
assert.gleSuccess(db);
// Ensure the collection is valid.
var res = t.validate(true);
assert(res.valid, tojson(res));
-
+
// Ensure that various ways of iterating the collection only return one document.
- assert.eq(t.find().itcount(), 1); // Table scan.
- assert.eq(t.find({}, {_id: 1}).hint({_id: 1}).itcount(), 1); // Index only (covered).
- assert.eq(t.find().hint({_id: 1}).itcount(), 1); // Index scan with fetch.
+ assert.eq(t.find().itcount(), 1); // Table scan.
+ assert.eq(t.find({}, {_id: 1}).hint({_id: 1}).itcount(), 1); // Index only (covered).
+ assert.eq(t.find().hint({_id: 1}).itcount(), 1); // Index scan with fetch.
// Ensure that the second document is the one that is kept.
assert.eq(t.findOne(), {_id: 2});
diff --git a/jstests/core/bulk_legacy_enforce_gle.js b/jstests/core/bulk_legacy_enforce_gle.js
index 4efc280ab37..2e8e076e070 100644
--- a/jstests/core/bulk_legacy_enforce_gle.js
+++ b/jstests/core/bulk_legacy_enforce_gle.js
@@ -9,92 +9,103 @@ var coll = db.bulk_legacy_enforce_gle;
// batch of size 1 no error case.
coll.drop();
var bulk = coll.initializeUnorderedBulkOp();
-bulk.find({ none: 1 }).upsert().updateOne({ _id: 1 });
-assert( bulk.execute() instanceof BulkWriteResult );
+bulk.find({none: 1}).upsert().updateOne({_id: 1});
+assert(bulk.execute() instanceof BulkWriteResult);
-var gle = db.runCommand({ getLastError: 1 });
+var gle = db.runCommand({getLastError: 1});
assert(gle.ok, tojson(gle));
assert.eq(1, gle.n);
// batch of size 1 should not call resetError even when it errors out.
coll.drop();
-coll.insert({ _id: 1 });
+coll.insert({_id: 1});
bulk = coll.initializeUnorderedBulkOp();
-bulk.find({ none: 1 }).upsert().updateOne({ _id: 1 });
-assert.throws( function() { bulk.execute(); } );
+bulk.find({none: 1}).upsert().updateOne({_id: 1});
+assert.throws(function() {
+ bulk.execute();
+});
-gle = db.runCommand({ getLastError: 1 });
+gle = db.runCommand({getLastError: 1});
assert(gle.ok, tojson(gle));
assert.neq(null, gle.err);
// batch with all error except last should not call resetError.
coll.drop();
-coll.insert({ _id: 1 });
+coll.insert({_id: 1});
bulk = coll.initializeUnorderedBulkOp();
-bulk.find({ none: 1 }).upsert().updateOne({ _id: 1 });
-bulk.find({ none: 1 }).upsert().updateOne({ _id: 1 });
-bulk.find({ none: 1 }).upsert().updateOne({ _id: 0 });
-var res = assert.throws( function() { bulk.execute(); } );
+bulk.find({none: 1}).upsert().updateOne({_id: 1});
+bulk.find({none: 1}).upsert().updateOne({_id: 1});
+bulk.find({none: 1}).upsert().updateOne({_id: 0});
+var res = assert.throws(function() {
+ bulk.execute();
+});
assert.eq(2, res.getWriteErrors().length);
-gle = db.runCommand({ getLastError: 1 });
+gle = db.runCommand({getLastError: 1});
assert(gle.ok, tojson(gle));
assert.eq(1, gle.n);
// batch with error at middle should not call resetError.
coll.drop();
-coll.insert({ _id: 1 });
+coll.insert({_id: 1});
bulk = coll.initializeUnorderedBulkOp();
-bulk.find({ none: 1 }).upsert().updateOne({ _id: 0 });
-bulk.find({ none: 1 }).upsert().updateOne({ _id: 1 });
-bulk.find({ none: 1 }).upsert().updateOne({ _id: 2 });
-var res = assert.throws( function() { bulk.execute(); } );
+bulk.find({none: 1}).upsert().updateOne({_id: 0});
+bulk.find({none: 1}).upsert().updateOne({_id: 1});
+bulk.find({none: 1}).upsert().updateOne({_id: 2});
+var res = assert.throws(function() {
+ bulk.execute();
+});
assert.eq(1, res.getWriteErrors().length);
-gle = db.runCommand({ getLastError: 1 });
+gle = db.runCommand({getLastError: 1});
assert(gle.ok, tojson(gle));
// mongos sends the bulk as one while the shell sends the write individually
assert.gte(gle.n, 1);
// batch with error at last should call resetError.
coll.drop();
-coll.insert({ _id: 2 });
+coll.insert({_id: 2});
bulk = coll.initializeUnorderedBulkOp();
-bulk.find({ none: 1 }).upsert().updateOne({ _id: 0 });
-bulk.find({ none: 1 }).upsert().updateOne({ _id: 1 });
-bulk.find({ none: 1 }).upsert().updateOne({ _id: 2 });
-res = assert.throws( function() { bulk.execute(); } );
+bulk.find({none: 1}).upsert().updateOne({_id: 0});
+bulk.find({none: 1}).upsert().updateOne({_id: 1});
+bulk.find({none: 1}).upsert().updateOne({_id: 2});
+res = assert.throws(function() {
+ bulk.execute();
+});
assert.eq(1, res.getWriteErrors().length);
-gle = db.runCommand({ getLastError: 1 });
+gle = db.runCommand({getLastError: 1});
assert(gle.ok, tojson(gle));
assert.eq(0, gle.n);
// batch with error at last should not call resetError if { w: 1 }
coll.drop();
-coll.insert({ _id: 2 });
+coll.insert({_id: 2});
bulk = coll.initializeUnorderedBulkOp();
-bulk.find({ none: 1 }).upsert().updateOne({ _id: 0 });
-bulk.find({ none: 1 }).upsert().updateOne({ _id: 1 });
-bulk.find({ none: 1 }).upsert().updateOne({ _id: 2 });
-res = assert.throws( function() { bulk.execute(); } );
+bulk.find({none: 1}).upsert().updateOne({_id: 0});
+bulk.find({none: 1}).upsert().updateOne({_id: 1});
+bulk.find({none: 1}).upsert().updateOne({_id: 2});
+res = assert.throws(function() {
+ bulk.execute();
+});
assert.eq(1, res.getWriteErrors().length);
-gle = db.runCommand({ getLastError: 1, w: 1 });
+gle = db.runCommand({getLastError: 1, w: 1});
assert(gle.ok, tojson(gle));
assert.neq(null, gle.err);
// batch with error at last should not call resetError if { w: 0 }
coll.drop();
-coll.insert({ _id: 2 });
+coll.insert({_id: 2});
bulk = coll.initializeUnorderedBulkOp();
-bulk.find({ none: 1 }).upsert().updateOne({ _id: 0 });
-bulk.find({ none: 1 }).upsert().updateOne({ _id: 1 });
-bulk.find({ none: 1 }).upsert().updateOne({ _id: 2 });
-res = assert.throws( function() { bulk.execute(); } );
+bulk.find({none: 1}).upsert().updateOne({_id: 0});
+bulk.find({none: 1}).upsert().updateOne({_id: 1});
+bulk.find({none: 1}).upsert().updateOne({_id: 2});
+res = assert.throws(function() {
+ bulk.execute();
+});
assert.eq(1, res.getWriteErrors().length);
-gle = db.runCommand({ getLastError: 1, w: 0 });
+gle = db.runCommand({getLastError: 1, w: 0});
assert(gle.ok, tojson(gle));
assert.neq(null, gle.err);
-
diff --git a/jstests/core/bypass_doc_validation.js b/jstests/core/bypass_doc_validation.js
index 1dfe9b4d158..79a2eb7d4a2 100644
--- a/jstests/core/bypass_doc_validation.js
+++ b/jstests/core/bypass_doc_validation.js
@@ -21,8 +21,7 @@
var op = [{ts: Timestamp(0, 0), h: 1, v: 2, op: 'i', ns: coll.getFullName(), o: {_id: 9}}];
// SERVER-21345: applyOps is returning UnknownError instead of DocumentValidationFailure
assert.commandFailedWithCode(
- myDb.runCommand({applyOps: op, bypassDocumentValidation: false}), 8
- );
+ myDb.runCommand({applyOps: op, bypassDocumentValidation: false}), 8);
assert.eq(0, coll.count({_id: 9}));
assert.commandWorked(myDb.runCommand({applyOps: op, bypassDocumentValidation: true}));
assert.eq(1, coll.count({_id: 9}));
@@ -35,12 +34,11 @@
assert.commandWorked(myDb.createCollection(outputCollName, {validator: {a: {$exists: true}}}));
// Test the aggregate shell helper.
- var pipeline = [
- {$match: {_id: 1}},
- {$project: {aggregation: {$add: [1]}}},
- {$out: outputCollName}
- ];
- assert.throws(function() { coll.aggregate(pipeline, {bypassDocumentValidation: false}); });
+ var pipeline =
+ [{$match: {_id: 1}}, {$project: {aggregation: {$add: [1]}}}, {$out: outputCollName}];
+ assert.throws(function() {
+ coll.aggregate(pipeline, {bypassDocumentValidation: false});
+ });
assert.eq(0, outputColl.count({aggregation: 1}));
coll.aggregate(pipeline, {bypassDocumentValidation: true});
assert.eq(1, outputColl.count({aggregation: 1}));
@@ -48,15 +46,14 @@
// Test the copyDb command.
var copyDbName = dbName + '_copy';
myDb.getSiblingDB(copyDbName).dropDatabase();
- assert.commandFailedWithCode(db.adminCommand({
- copydb: 1, fromdb: dbName, todb: copyDbName, bypassDocumentValidation: false}),
- docValidationErrorCode
- );
+ assert.commandFailedWithCode(
+ db.adminCommand(
+ {copydb: 1, fromdb: dbName, todb: copyDbName, bypassDocumentValidation: false}),
+ docValidationErrorCode);
assert.eq(0, db.getSiblingDB(copyDbName)[collName].count());
myDb.getSiblingDB(copyDbName).dropDatabase();
- assert.commandWorked(db.adminCommand({
- copydb: 1, fromdb: dbName, todb: copyDbName, bypassDocumentValidation: true
- }));
+ assert.commandWorked(db.adminCommand(
+ {copydb: 1, fromdb: dbName, todb: copyDbName, bypassDocumentValidation: true}));
assert.eq(coll.count(), db.getSiblingDB(copyDbName)[collName].count());
// Test the findAndModify shell helper.
@@ -68,40 +65,57 @@
assert.eq(1, coll.count({findAndModify: 1}));
// Test the map/reduce command.
- var map = function() {emit(1, 1);};
- var reduce = function(k, vs) {return 'mapReduce';};
- assert.commandFailedWithCode(coll.runCommand({mapReduce: collName,
- map: map, reduce: reduce, out: {replace: outputCollName}, bypassDocumentValidation: false}),
- docValidationErrorCode
- );
+ var map = function() {
+ emit(1, 1);
+ };
+ var reduce = function(k, vs) {
+ return 'mapReduce';
+ };
+ assert.commandFailedWithCode(coll.runCommand({
+ mapReduce: collName,
+ map: map,
+ reduce: reduce,
+ out: {replace: outputCollName},
+ bypassDocumentValidation: false
+ }),
+ docValidationErrorCode);
assert.eq(0, outputColl.count({value: 'mapReduce'}));
- var res = coll.runCommand({mapReduce: collName,
- map: map, reduce: reduce, out: {replace: outputCollName}, bypassDocumentValidation: true});
+ var res = coll.runCommand({
+ mapReduce: collName,
+ map: map,
+ reduce: reduce,
+ out: {replace: outputCollName},
+ bypassDocumentValidation: true
+ });
assert.commandWorked(res);
assert.eq(1, outputColl.count({value: 'mapReduce'}));
// Test the insert command. Includes a test for a doc with no _id (SERVER-20859).
res = myDb.runCommand({insert: collName, documents: [{}], bypassDocumentValidation: false});
assert.eq(res.writeErrors[0].code, docValidationErrorCode, tojson(res));
- res = myDb.runCommand({insert: collName, documents: [{}, {_id: 6}],
- bypassDocumentValidation: false
- });
+ res = myDb.runCommand(
+ {insert: collName, documents: [{}, {_id: 6}], bypassDocumentValidation: false});
assert.eq(0, coll.count({_id: 6}));
assert.eq(res.writeErrors[0].code, docValidationErrorCode, tojson(res));
- res = myDb.runCommand({insert: collName, documents: [{}, {_id: 6}],
- bypassDocumentValidation: true
- });
+ res = myDb.runCommand(
+ {insert: collName, documents: [{}, {_id: 6}], bypassDocumentValidation: true});
assert.commandWorked(res);
assert.eq(null, res.writeErrors);
assert.eq(1, coll.count({_id: 6}));
// Test the update command.
- res = myDb.runCommand({update: collName,
- updates: [{q: {}, u: {$set: {update: 1}}}], bypassDocumentValidation: false});
+ res = myDb.runCommand({
+ update: collName,
+ updates: [{q: {}, u: {$set: {update: 1}}}],
+ bypassDocumentValidation: false
+ });
assert.eq(res.writeErrors[0].code, docValidationErrorCode, tojson(res));
assert.eq(0, coll.count({update: 1}));
- res = myDb.runCommand({update: collName,
- updates: [{q: {}, u: {$set: {update: 1}}}], bypassDocumentValidation: true});
+ res = myDb.runCommand({
+ update: collName,
+ updates: [{q: {}, u: {$set: {update: 1}}}],
+ bypassDocumentValidation: true
+ });
assert.commandWorked(res);
assert.eq(null, res.writeErrors);
assert.eq(1, coll.count({update: 1}));
diff --git a/jstests/core/capped.js b/jstests/core/capped.js
index 72eddb8de2f..0b2945bba04 100644
--- a/jstests/core/capped.js
+++ b/jstests/core/capped.js
@@ -1,12 +1,11 @@
db.jstests_capped.drop();
-db.createCollection("jstests_capped", {capped:true, size:30000});
+db.createCollection("jstests_capped", {capped: true, size: 30000});
t = db.jstests_capped;
-assert.eq( 1, t.getIndexes().length, "expected a count of one index for new capped collection" );
+assert.eq(1, t.getIndexes().length, "expected a count of one index for new capped collection");
+t.save({x: 1});
+t.save({x: 2});
-t.save({x:1});
-t.save({x:2});
-
-assert( t.find().sort({$natural:1})[0].x == 1 , "expected obj.x==1");
-assert( t.find().sort({$natural:-1})[0].x == 2, "expected obj.x == 2");
+assert(t.find().sort({$natural: 1})[0].x == 1, "expected obj.x==1");
+assert(t.find().sort({$natural: -1})[0].x == 2, "expected obj.x == 2");
diff --git a/jstests/core/capped1.js b/jstests/core/capped1.js
index 96099d7a9fd..923ee3aa668 100644
--- a/jstests/core/capped1.js
+++ b/jstests/core/capped1.js
@@ -2,10 +2,9 @@
t = db.capped1;
t.drop();
-db.createCollection("capped1" , {capped:true, size:1024 });
+db.createCollection("capped1", {capped: true, size: 1024});
v = t.validate();
-assert( v.valid , "A : " + tojson( v ) ); // SERVER-485
-
-t.save( { x : 1 } );
-assert( t.validate().valid , "B" );
+assert(v.valid, "A : " + tojson(v)); // SERVER-485
+t.save({x: 1});
+assert(t.validate().valid, "B");
diff --git a/jstests/core/capped5.js b/jstests/core/capped5.js
index 8a40c78ae98..33d78c5e17f 100644
--- a/jstests/core/capped5.js
+++ b/jstests/core/capped5.js
@@ -4,37 +4,36 @@ tn = "capped5";
t = db[tn];
t.drop();
+db.createCollection(tn, {capped: true, size: 1024 * 1024 * 1});
+t.insert({_id: 5, x: 11, z: 52});
+assert.eq(1, t.getIndexKeys().length, "A0"); // now we assume _id index even on capped coll
+assert.eq(52, t.findOne({x: 11}).z, "A1");
-db.createCollection( tn , {capped: true, size: 1024 * 1024 * 1 } );
-t.insert( { _id : 5 , x : 11 , z : 52 } );
-assert.eq( 1 , t.getIndexKeys().length , "A0" ); //now we assume _id index even on capped coll
-assert.eq( 52 , t.findOne( { x : 11 } ).z , "A1" );
+t.ensureIndex({_id: 1});
+t.ensureIndex({x: 1});
-t.ensureIndex( { _id : 1 } );
-t.ensureIndex( { x : 1 } );
-
-assert.eq( 52 , t.findOne( { x : 11 } ).z , "B1" );
-assert.eq( 52 , t.findOne( { _id : 5 } ).z , "B2" );
+assert.eq(52, t.findOne({x: 11}).z, "B1");
+assert.eq(52, t.findOne({_id: 5}).z, "B2");
t.drop();
-db.createCollection( tn , {capped: true, size: 1024 * 1024 * 1 } );
-t.insert( { _id : 5 , x : 11 } );
-t.insert( { _id : 5 , x : 12 } );
-assert.eq( 1, t.getIndexes().length ); //now we assume _id index
-assert.eq( 1, t.find().toArray().length ); //_id index unique, so second insert fails
+db.createCollection(tn, {capped: true, size: 1024 * 1024 * 1});
+t.insert({_id: 5, x: 11});
+t.insert({_id: 5, x: 12});
+assert.eq(1, t.getIndexes().length); // now we assume _id index
+assert.eq(1, t.find().toArray().length); //_id index unique, so second insert fails
t.drop();
-db.createCollection( tn , {capped: true, size: 1024 * 1024 * 1 } );
-t.insert( { _id : 5 , x : 11 } );
-t.insert( { _id : 6 , x : 12 } );
-t.ensureIndex( { x:1 }, {unique:true} );
-assert.eq( 2, t.getIndexes().length ); //now we assume _id index
-assert.eq( 2, t.find().hint( {x:1} ).toArray().length );
+db.createCollection(tn, {capped: true, size: 1024 * 1024 * 1});
+t.insert({_id: 5, x: 11});
+t.insert({_id: 6, x: 12});
+t.ensureIndex({x: 1}, {unique: true});
+assert.eq(2, t.getIndexes().length); // now we assume _id index
+assert.eq(2, t.find().hint({x: 1}).toArray().length);
// SERVER-525 (closed) unique indexes in capped collection
t.drop();
-db.createCollection( tn , {capped: true, size: 1024 * 1024 * 1 } );
-t.ensureIndex( { _id:1 } ); // note we assume will be automatically unique because it is _id
-t.insert( { _id : 5 , x : 11 } );
-t.insert( { _id : 5 , x : 12 } );
-assert.eq( 1, t.find().toArray().length );
+db.createCollection(tn, {capped: true, size: 1024 * 1024 * 1});
+t.ensureIndex({_id: 1}); // note we assume will be automatically unique because it is _id
+t.insert({_id: 5, x: 11});
+t.insert({_id: 5, x: 12});
+assert.eq(1, t.find().toArray().length);
diff --git a/jstests/core/capped6.js b/jstests/core/capped6.js
index e643c77760d..d7b8a60985a 100644
--- a/jstests/core/capped6.js
+++ b/jstests/core/capped6.js
@@ -12,19 +12,19 @@
* check is performed in both forward and reverse directions.
*/
function checkOrder(i, valueArray) {
- res = coll.find().sort( { $natural: -1 } );
- assert( res.hasNext(), "A" );
+ res = coll.find().sort({$natural: -1});
+ assert(res.hasNext(), "A");
var j = i;
- while(res.hasNext()) {
- assert.eq( valueArray[j--].a, res.next().a, "B" );
+ while (res.hasNext()) {
+ assert.eq(valueArray[j--].a, res.next().a, "B");
}
- res = coll.find().sort( { $natural: 1 } );
- assert( res.hasNext(), "C" );
- while( res.hasNext() ) {
- assert.eq( valueArray[++j].a, res.next().a, "D" );
+ res = coll.find().sort({$natural: 1});
+ assert(res.hasNext(), "C");
+ while (res.hasNext()) {
+ assert.eq(valueArray[++j].a, res.next().a, "D");
}
- assert.eq( j, i, "E" );
+ assert.eq(j, i, "E");
}
/*
@@ -32,13 +32,15 @@
*/
function prepareCollection(shouldReverse) {
coll.drop();
- db._dbCommand({create: "capped6", capped: true, size: 1000, $nExtents: 11,
- autoIndexId: false});
+ db._dbCommand(
+ {create: "capped6", capped: true, size: 1000, $nExtents: 11, autoIndexId: false});
var valueArray = new Array(maxDocuments);
var c = "";
- for( i = 0; i < maxDocuments; ++i, c += "-" ) {
+ for (i = 0; i < maxDocuments; ++i, c += "-") {
// The a values are strings of increasing length.
- valueArray[i] = {a: c};
+ valueArray[i] = {
+ a: c
+ };
}
if (shouldReverse) {
valueArray.reverse();
@@ -52,11 +54,10 @@
* 'maxDocuments' number of documents since it is a capped collection.
* 2. Remove all but one documents via one or more "captrunc" requests.
* 3. For each subsequent call to this function, keep track of the removed documents using
- * 'valueArrayIndexes' and re-insert the removed documents each time this function is
+ * 'valueArrayIndexes' and re-insert the removed documents each time this function is
* called.
*/
function runCapTrunc(valueArray, valueArrayCurIndex, n, inc) {
-
// If n <= 0, no documents are removed by captrunc.
assert.gt(n, 0);
assert.gte(valueArray.length, maxDocuments);
@@ -73,7 +74,7 @@
var iterations = Math.floor((count - 1) / (n + inc));
for (i = 0; i < iterations; ++i) {
- assert.commandWorked(db.runCommand({captrunc:"capped6", n:n, inc:inc}));
+ assert.commandWorked(db.runCommand({captrunc: "capped6", n: n, inc: inc}));
count -= (n + inc);
valueArrayCurIndex -= (n + inc);
checkOrder(valueArrayCurIndex, valueArray);
diff --git a/jstests/core/capped9.js b/jstests/core/capped9.js
index 50ebb64744f..ae54839c9d1 100644
--- a/jstests/core/capped9.js
+++ b/jstests/core/capped9.js
@@ -2,26 +2,21 @@
t = db.capped9;
t.drop();
-db.createCollection("capped9" , {capped:true, size:1024*50 });
+db.createCollection("capped9", {capped: true, size: 1024 * 50});
-t.insert( { _id : 1 , x : 2 , y : 3 } );
-
-assert.eq( 1 , t.find( { x : 2 } ).itcount() , "A1" );
-assert.eq( 1 , t.find( { y : 3 } ).itcount() , "A2" );
-//assert.throws( function(){ t.find( { _id : 1 } ).itcount(); } , [] , "A3" ); // SERVER-3064
-
-t.update( { _id : 1 } , { $set : { y : 4 } } );
-//assert( db.getLastError() , "B1" ); // SERVER-3064
-//assert.eq( 3 , t.findOne().y , "B2" ); // SERVER-3064
-
-t.ensureIndex( { _id : 1 } );
-
-assert.eq( 1 , t.find( { _id : 1 } ).itcount() , "D1" );
-
-assert.writeOK( t.update( { _id: 1 }, { $set: { y: 4 } } ));
-assert.eq( 4 , t.findOne().y , "D2" );
+t.insert({_id: 1, x: 2, y: 3});
+assert.eq(1, t.find({x: 2}).itcount(), "A1");
+assert.eq(1, t.find({y: 3}).itcount(), "A2");
+// assert.throws( function(){ t.find( { _id : 1 } ).itcount(); } , [] , "A3" ); // SERVER-3064
+t.update({_id: 1}, {$set: {y: 4}});
+// assert( db.getLastError() , "B1" ); // SERVER-3064
+// assert.eq( 3 , t.findOne().y , "B2" ); // SERVER-3064
+t.ensureIndex({_id: 1});
+assert.eq(1, t.find({_id: 1}).itcount(), "D1");
+assert.writeOK(t.update({_id: 1}, {$set: {y: 4}}));
+assert.eq(4, t.findOne().y, "D2");
diff --git a/jstests/core/capped_convertToCapped1.js b/jstests/core/capped_convertToCapped1.js
index 4ee9ff2785e..269a33f89a5 100644
--- a/jstests/core/capped_convertToCapped1.js
+++ b/jstests/core/capped_convertToCapped1.js
@@ -8,26 +8,22 @@ dest.drop();
N = 1000;
-for( i = 0; i < N; ++i ) {
- source.save( {i:i} );
+for (i = 0; i < N; ++i) {
+ source.save({i: i});
}
-assert.eq( N, source.count() );
+assert.eq(N, source.count());
// should all fit
-res = db.runCommand( { cloneCollectionAsCapped:source.getName(),
- toCollection:dest.getName(),
- size:100000 } );
-assert.commandWorked( res );
-assert.eq( source.count(), dest.count() );
-assert.eq( N, source.count() ); // didn't delete source
+res = db.runCommand(
+ {cloneCollectionAsCapped: source.getName(), toCollection: dest.getName(), size: 100000});
+assert.commandWorked(res);
+assert.eq(source.count(), dest.count());
+assert.eq(N, source.count()); // didn't delete source
dest.drop();
// should NOT all fit
-assert.commandWorked( db.runCommand( { cloneCollectionAsCapped:source.getName(),
- toCollection:dest.getName(),
- size:1000 } ) );
-
-
-assert.eq( N, source.count() ); // didn't delete source
-assert.gt( source.count(), dest.count() );
+assert.commandWorked(db.runCommand(
+ {cloneCollectionAsCapped: source.getName(), toCollection: dest.getName(), size: 1000}));
+assert.eq(N, source.count()); // didn't delete source
+assert.gt(source.count(), dest.count());
diff --git a/jstests/core/capped_empty.js b/jstests/core/capped_empty.js
index b922dca0d46..63272a4e546 100644
--- a/jstests/core/capped_empty.js
+++ b/jstests/core/capped_empty.js
@@ -2,21 +2,21 @@
t = db.capped_empty;
t.drop();
-db.createCollection( t.getName() , { capped : true , size : 100 } );
+db.createCollection(t.getName(), {capped: true, size: 100});
-t.insert( { x : 1 } );
-t.insert( { x : 2 } );
-t.insert( { x : 3 } );
-t.ensureIndex( { x : 1 } );
+t.insert({x: 1});
+t.insert({x: 2});
+t.insert({x: 3});
+t.ensureIndex({x: 1});
-assert.eq( 3 , t.count() );
+assert.eq(3, t.count());
-t.runCommand( "emptycapped" );
+t.runCommand("emptycapped");
-assert.eq( 0 , t.count() );
+assert.eq(0, t.count());
-t.insert( { x : 1 } );
-t.insert( { x : 2 } );
-t.insert( { x : 3 } );
+t.insert({x: 1});
+t.insert({x: 2});
+t.insert({x: 3});
-assert.eq( 3 , t.count() );
+assert.eq(3, t.count());
diff --git a/jstests/core/capped_max1.js b/jstests/core/capped_max1.js
index 7811489773b..e4fbda62233 100644
--- a/jstests/core/capped_max1.js
+++ b/jstests/core/capped_max1.js
@@ -5,24 +5,26 @@ t.drop();
var max = 10;
var maxSize = 64 * 1024;
-db.createCollection( t.getName() , {capped: true, size: maxSize, max: max } );
-assert.eq( max, t.stats().max );
-assert.eq( maxSize, t.stats().maxSize );
-assert.eq( Math.floor(maxSize/1000), t.stats(1000).maxSize );
+db.createCollection(t.getName(), {capped: true, size: maxSize, max: max});
+assert.eq(max, t.stats().max);
+assert.eq(maxSize, t.stats().maxSize);
+assert.eq(Math.floor(maxSize / 1000), t.stats(1000).maxSize);
-for ( var i=0; i < max * 2; i++ ) {
- t.insert( { x : i } );
+for (var i = 0; i < max * 2; i++) {
+ t.insert({x: i});
}
-assert.eq( max, t.count() );
+assert.eq(max, t.count());
// Test invalidation of cursors
var cursor = t.find().batchSize(4);
assert(cursor.hasNext());
var myX = cursor.next();
-for ( var j = 0; j < max * 2; j++ ) {
- t.insert( { x : j+i } );
+for (var j = 0; j < max * 2; j++) {
+ t.insert({x: j + i});
}
// Cursor should now be dead.
-assert.throws(function () { cursor.toArray(); });
+assert.throws(function() {
+ cursor.toArray();
+});
diff --git a/jstests/core/capped_update.js b/jstests/core/capped_update.js
index 293cada4f8e..3ae434a8334 100644
--- a/jstests/core/capped_update.js
+++ b/jstests/core/capped_update.js
@@ -7,24 +7,23 @@
'use strict';
var t = db.cannot_change_capped_size;
t.drop();
- assert.commandWorked(db.createCollection(t.getName(), {capped: true,
- size: 1024,
- autoIndexId:false}));
+ assert.commandWorked(
+ db.createCollection(t.getName(), {capped: true, size: 1024, autoIndexId: false}));
assert.eq(0, t.getIndexes().length, "the capped collection has indexes");
for (var j = 1; j <= 10; j++) {
assert.writeOK(t.insert({_id: j, s: "Hello, World!"}));
}
- assert.writeOK(t.update({_id: 3}, {s: "Hello, Mongo!"})); // Mongo is same length as World
+ assert.writeOK(t.update({_id: 3}, {s: "Hello, Mongo!"})); // Mongo is same length as World
assert.writeError(t.update({_id: 3}, {$set: {s: "Hello!"}}));
assert.writeError(t.update({_id: 10}, {}));
assert.writeError(t.update({_id: 10}, {s: "Hello, World!!!"}));
- assert.commandWorked(t.getDB().runCommand({godinsert:t.getName(), obj:{a:2}}));
- var doc = t.findOne({a:2});
+ assert.commandWorked(t.getDB().runCommand({godinsert: t.getName(), obj: {a: 2}}));
+ var doc = t.findOne({a: 2});
assert.eq(undefined, doc["_id"], "now has _id after godinsert");
- assert.writeOK(t.update({a:2}, {$inc:{a:1}}));
- doc = t.findOne({a:3});
+ assert.writeOK(t.update({a: 2}, {$inc: {a: 1}}));
+ doc = t.findOne({a: 3});
assert.eq(undefined, doc["_id"], "now has _id after update");
})();
diff --git a/jstests/core/cappeda.js b/jstests/core/cappeda.js
index 79df5f33aa6..f5c56a44e89 100644
--- a/jstests/core/cappeda.js
+++ b/jstests/core/cappeda.js
@@ -2,31 +2,30 @@
t = db.scan_capped_id;
t.drop();
-x = t.runCommand( "create" , { capped : true , size : 10000 } );
-assert( x.ok );
+x = t.runCommand("create", {capped: true, size: 10000});
+assert(x.ok);
-for ( i=0; i<100; i++ )
- t.insert( { _id : i , x : 1 } );
+for (i = 0; i < 100; i++)
+ t.insert({_id: i, x: 1});
function q() {
- return t.findOne( { _id : 5 } );
+ return t.findOne({_id: 5});
}
function u() {
- var res = t.update( { _id : 5 } , { $set : { x : 2 } } );
- if ( res.hasWriteError() )
+ var res = t.update({_id: 5}, {$set: {x: 2}});
+ if (res.hasWriteError())
throw res;
}
-
// SERVER-3064
-//assert.throws( q , [] , "A1" );
-//assert.throws( u , [] , "B1" );
+// assert.throws( q , [] , "A1" );
+// assert.throws( u , [] , "B1" );
-t.ensureIndex( { _id : 1 } );
+t.ensureIndex({_id: 1});
-assert.eq( 1 , q().x );
+assert.eq(1, q().x);
q();
u();
-assert.eq( 2 , q().x );
+assert.eq(2, q().x);
diff --git a/jstests/core/check_shard_index.js b/jstests/core/check_shard_index.js
index a862715e5e6..6551699c65c 100644
--- a/jstests/core/check_shard_index.js
+++ b/jstests/core/check_shard_index.js
@@ -5,137 +5,138 @@
f = db.jstests_shardingindex;
f.drop();
-
// -------------------------
// Case 1: all entries filled or empty should make a valid index
//
f.drop();
-f.ensureIndex( { x: 1 , y: 1 } );
-assert.eq( 0 , f.count() , "1. initial count should be zero" );
-
-res = db.runCommand( { checkShardingIndex: "test.jstests_shardingindex" , keyPattern: {x:1, y:1} });
-assert.eq( true , res.ok, "1a" );
+f.ensureIndex({x: 1, y: 1});
+assert.eq(0, f.count(), "1. initial count should be zero");
-f.save( { x: 1 , y : 1 } );
-assert.eq( 1 , f.count() , "1. count after initial insert should be 1" );
-res = db.runCommand( { checkShardingIndex: "test.jstests_shardingindex" , keyPattern: {x:1, y:1} });
-assert.eq( true , res.ok , "1b" );
+res = db.runCommand({checkShardingIndex: "test.jstests_shardingindex", keyPattern: {x: 1, y: 1}});
+assert.eq(true, res.ok, "1a");
+f.save({x: 1, y: 1});
+assert.eq(1, f.count(), "1. count after initial insert should be 1");
+res = db.runCommand({checkShardingIndex: "test.jstests_shardingindex", keyPattern: {x: 1, y: 1}});
+assert.eq(true, res.ok, "1b");
// -------------------------
// Case 2: entry with null values would make an index unsuitable
//
f.drop();
-f.ensureIndex( { x: 1 , y: 1 } );
-assert.eq( 0 , f.count() , "2. initial count should be zero" );
+f.ensureIndex({x: 1, y: 1});
+assert.eq(0, f.count(), "2. initial count should be zero");
-f.save( { x: 1 , y : 1 } );
-f.save( { x: null , y : 1 } );
+f.save({x: 1, y: 1});
+f.save({x: null, y: 1});
-res = db.runCommand( { checkShardingIndex: "test.jstests_shardingindex" , keyPattern: {x:1, y:1} });
-assert.eq( true , res.ok , "2a " + tojson(res) );
+res = db.runCommand({checkShardingIndex: "test.jstests_shardingindex", keyPattern: {x: 1, y: 1}});
+assert.eq(true, res.ok, "2a " + tojson(res));
-f.save( { y: 2 } );
-assert.eq( 3 , f.count() , "2. count after initial insert should be 3" );
-res = db.runCommand( { checkShardingIndex: "test.jstests_shardingindex" , keyPattern: {x:1, y:1} });
-assert.eq( false , res.ok , "2b " + tojson(res) );
+f.save({y: 2});
+assert.eq(3, f.count(), "2. count after initial insert should be 3");
+res = db.runCommand({checkShardingIndex: "test.jstests_shardingindex", keyPattern: {x: 1, y: 1}});
+assert.eq(false, res.ok, "2b " + tojson(res));
// Check _id index
-res = db.runCommand( { checkShardingIndex: "test.jstests_shardingindex" , keyPattern: {_id:1} });
-assert.eq( true , res.ok , "2c " + tojson(res) );
-assert( res.idskip , "2d " + tojson(res) );
+res = db.runCommand({checkShardingIndex: "test.jstests_shardingindex", keyPattern: {_id: 1}});
+assert.eq(true, res.ok, "2c " + tojson(res));
+assert(res.idskip, "2d " + tojson(res));
// -------------------------
// Case 3: entry with array values would make an index unsuitable
//
f.drop();
-f.ensureIndex( { x: 1 , y: 1 } );
-assert.eq( 0 , f.count() , "3. initial count should be zero" );
+f.ensureIndex({x: 1, y: 1});
+assert.eq(0, f.count(), "3. initial count should be zero");
-f.save( { x: 1 , y : 1 } );
-f.save( { x: [1, 2] , y : 2 } );
+f.save({x: 1, y: 1});
+f.save({x: [1, 2], y: 2});
-assert.eq( 2 , f.count() , "3. count after initial insert should be 2" );
-res = db.runCommand( { checkShardingIndex: "test.jstests_shardingindex" , keyPattern: {x:1, y:1} });
-assert.eq( false , res.ok , "3a " + tojson(res) );
+assert.eq(2, f.count(), "3. count after initial insert should be 2");
+res = db.runCommand({checkShardingIndex: "test.jstests_shardingindex", keyPattern: {x: 1, y: 1}});
+assert.eq(false, res.ok, "3a " + tojson(res));
-f.remove( { y : 2 } );
+f.remove({y: 2});
f.reIndex();
-assert.eq( 1 , f.count() , "3. count after removing array value should be 1" );
-res = db.runCommand( { checkShardingIndex: "test.jstests_shardingindex" , keyPattern: {x:1, y:1} });
-assert.eq( true , res.ok , "3b " + tojson(res) );
+assert.eq(1, f.count(), "3. count after removing array value should be 1");
+res = db.runCommand({checkShardingIndex: "test.jstests_shardingindex", keyPattern: {x: 1, y: 1}});
+assert.eq(true, res.ok, "3b " + tojson(res));
-f.save( { x : 2, y : [1, 2] } );
+f.save({x: 2, y: [1, 2]});
-assert.eq( 2 , f.count() , "3. count after adding array value should be 2" );
-res = db.runCommand( { checkShardingIndex: "test.jstests_shardingindex" , keyPattern: {x:1, y:1} });
-assert.eq( false , res.ok , "3c " + tojson(res) );
+assert.eq(2, f.count(), "3. count after adding array value should be 2");
+res = db.runCommand({checkShardingIndex: "test.jstests_shardingindex", keyPattern: {x: 1, y: 1}});
+assert.eq(false, res.ok, "3c " + tojson(res));
// -------------------------
// Case 4: Handles prefix shard key indexes.
//
f.drop();
-f.ensureIndex( { x: 1 , y: 1, z: 1 } );
-assert.eq( 0 , f.count() , "4. initial count should be zero" );
-
-f.save( { x: 1 , y : 1, z : 1 } );
+f.ensureIndex({x: 1, y: 1, z: 1});
+assert.eq(0, f.count(), "4. initial count should be zero");
-assert.eq( 1 , f.count() , "4. count after initial insert should be 1" );
-res = db.runCommand( { checkShardingIndex: "test.jstests_shardingindex" , keyPattern: {x:1} });
-assert.eq( true , res.ok , "4a " + tojson(res) );
+f.save({x: 1, y: 1, z: 1});
-res = db.runCommand( { checkShardingIndex: "test.jstests_shardingindex" , keyPattern: {x:1, y:1} });
-assert.eq( true , res.ok , "4b " + tojson(res) );
+assert.eq(1, f.count(), "4. count after initial insert should be 1");
+res = db.runCommand({checkShardingIndex: "test.jstests_shardingindex", keyPattern: {x: 1}});
+assert.eq(true, res.ok, "4a " + tojson(res));
-f.save( { x: [1, 2] , y : 2, z : 2 } );
+res = db.runCommand({checkShardingIndex: "test.jstests_shardingindex", keyPattern: {x: 1, y: 1}});
+assert.eq(true, res.ok, "4b " + tojson(res));
-assert.eq( 2 , f.count() , "4. count after adding array value should be 2" );
-res = db.runCommand( { checkShardingIndex: "test.jstests_shardingindex" , keyPattern: {x:1} });
-assert.eq( false , res.ok , "4c " + tojson(res) );
-res = db.runCommand( { checkShardingIndex: "test.jstests_shardingindex" , keyPattern: {x:1, y:1} });
-assert.eq( false , res.ok , "4d " + tojson(res) );
-res = db.runCommand( { checkShardingIndex: "test.jstests_shardingindex" , keyPattern: {x:1, y:1, z:1} });
-assert.eq( false , res.ok , "4e " + tojson(res) );
+f.save({x: [1, 2], y: 2, z: 2});
+assert.eq(2, f.count(), "4. count after adding array value should be 2");
+res = db.runCommand({checkShardingIndex: "test.jstests_shardingindex", keyPattern: {x: 1}});
+assert.eq(false, res.ok, "4c " + tojson(res));
+res = db.runCommand({checkShardingIndex: "test.jstests_shardingindex", keyPattern: {x: 1, y: 1}});
+assert.eq(false, res.ok, "4d " + tojson(res));
+res = db.runCommand(
+ {checkShardingIndex: "test.jstests_shardingindex", keyPattern: {x: 1, y: 1, z: 1}});
+assert.eq(false, res.ok, "4e " + tojson(res));
-f.remove( { y : 2 } );
+f.remove({y: 2});
f.reIndex();
-assert.eq( 1 , f.count() , "4. count after removing array value should be 1" );
-res = db.runCommand( { checkShardingIndex: "test.jstests_shardingindex" , keyPattern: {x:1, y:1, z:1} });
-assert.eq( true , res.ok , "4f " + tojson(res) );
-
-f.save( { x : 3, y : [1, 2], z : 3 } );
-
-assert.eq( 2 , f.count() , "4. count after adding array value on second key should be 2" );
-res = db.runCommand( { checkShardingIndex: "test.jstests_shardingindex" , keyPattern: {x:1} });
-assert.eq( false , res.ok , "4g " + tojson(res) );
-res = db.runCommand( { checkShardingIndex: "test.jstests_shardingindex" , keyPattern: {x:1, y:1} });
-assert.eq( false , res.ok , "4h " + tojson(res) );
-res = db.runCommand( { checkShardingIndex: "test.jstests_shardingindex" , keyPattern: {x:1, y:1, z:1} });
-assert.eq( false , res.ok , "4i " + tojson(res) );
-
-f.remove( { x : 3 } );
-f.reIndex(); // Necessary so that the index is no longer marked as multikey
-
-assert.eq( 1 , f.count() , "4. count after removing array value should be 1 again" );
-res = db.runCommand( { checkShardingIndex: "test.jstests_shardingindex" , keyPattern: {x:1, y:1, z:1} });
-assert.eq( true , res.ok , "4e " + tojson(res) );
-
-f.save( { x : 4, y : 4, z : [1, 2] } );
-
-assert.eq( 2 , f.count() , "4. count after adding array value on third key should be 2" );
-res = db.runCommand( { checkShardingIndex: "test.jstests_shardingindex" , keyPattern: {x:1} });
-assert.eq( false , res.ok , "4c " + tojson(res) );
-res = db.runCommand( { checkShardingIndex: "test.jstests_shardingindex" , keyPattern: {x:1, y:1} });
-assert.eq( false , res.ok , "4d " + tojson(res) );
-res = db.runCommand( { checkShardingIndex: "test.jstests_shardingindex" , keyPattern: {x:1, y:1, z:1} });
-assert.eq( false , res.ok , "4e " + tojson(res) );
-
+assert.eq(1, f.count(), "4. count after removing array value should be 1");
+res = db.runCommand(
+ {checkShardingIndex: "test.jstests_shardingindex", keyPattern: {x: 1, y: 1, z: 1}});
+assert.eq(true, res.ok, "4f " + tojson(res));
+
+f.save({x: 3, y: [1, 2], z: 3});
+
+assert.eq(2, f.count(), "4. count after adding array value on second key should be 2");
+res = db.runCommand({checkShardingIndex: "test.jstests_shardingindex", keyPattern: {x: 1}});
+assert.eq(false, res.ok, "4g " + tojson(res));
+res = db.runCommand({checkShardingIndex: "test.jstests_shardingindex", keyPattern: {x: 1, y: 1}});
+assert.eq(false, res.ok, "4h " + tojson(res));
+res = db.runCommand(
+ {checkShardingIndex: "test.jstests_shardingindex", keyPattern: {x: 1, y: 1, z: 1}});
+assert.eq(false, res.ok, "4i " + tojson(res));
+
+f.remove({x: 3});
+f.reIndex(); // Necessary so that the index is no longer marked as multikey
+
+assert.eq(1, f.count(), "4. count after removing array value should be 1 again");
+res = db.runCommand(
+ {checkShardingIndex: "test.jstests_shardingindex", keyPattern: {x: 1, y: 1, z: 1}});
+assert.eq(true, res.ok, "4e " + tojson(res));
+
+f.save({x: 4, y: 4, z: [1, 2]});
+
+assert.eq(2, f.count(), "4. count after adding array value on third key should be 2");
+res = db.runCommand({checkShardingIndex: "test.jstests_shardingindex", keyPattern: {x: 1}});
+assert.eq(false, res.ok, "4c " + tojson(res));
+res = db.runCommand({checkShardingIndex: "test.jstests_shardingindex", keyPattern: {x: 1, y: 1}});
+assert.eq(false, res.ok, "4d " + tojson(res));
+res = db.runCommand(
+ {checkShardingIndex: "test.jstests_shardingindex", keyPattern: {x: 1, y: 1, z: 1}});
+assert.eq(false, res.ok, "4e " + tojson(res));
print("PASSED");
diff --git a/jstests/core/cleanup_orphaned.js b/jstests/core/cleanup_orphaned.js
index a0444a9f90c..2ece4316fa2 100644
--- a/jstests/core/cleanup_orphaned.js
+++ b/jstests/core/cleanup_orphaned.js
@@ -1,3 +1,3 @@
// Test that cleanupOrphaned cannot be run on stand alone mongod.
-var res = db.adminCommand({ cleanupOrphaned: 'unsharded.coll' });
+var res = db.adminCommand({cleanupOrphaned: 'unsharded.coll'});
assert(!res.ok, tojson(res));
diff --git a/jstests/core/clone_as_capped_nonexistant.js b/jstests/core/clone_as_capped_nonexistant.js
index e6f925d2a13..f9e68ee9648 100644
--- a/jstests/core/clone_as_capped_nonexistant.js
+++ b/jstests/core/clone_as_capped_nonexistant.js
@@ -8,23 +8,19 @@
testDb.dropDatabase();
// Database does not exist here
- var res = testDb.runCommand({cloneCollectionAsCapped: 'foo',
- toCollection: 'bar',
- size: 1024});
+ var res = testDb.runCommand({cloneCollectionAsCapped: 'foo', toCollection: 'bar', size: 1024});
assert.eq(res.ok, 0, "cloning a nonexistent collection to capped should not have worked");
var isSharded = (db.isMaster().msg == "isdbgrid");
- assert.eq(res.errmsg,
- isSharded ? "no such cmd: cloneCollectionAsCapped"
- : "database " + dbname + " not found",
- "converting a nonexistent to capped failed but for the wrong reason");
+ assert.eq(
+ res.errmsg,
+ isSharded ? "no such cmd: cloneCollectionAsCapped" : "database " + dbname + " not found",
+ "converting a nonexistent to capped failed but for the wrong reason");
// Database exists, but collection doesn't
testDb.coll.insert({});
- var res = testDb.runCommand({cloneCollectionAsCapped: 'foo',
- toCollection: 'bar',
- size: 1024});
+ var res = testDb.runCommand({cloneCollectionAsCapped: 'foo', toCollection: 'bar', size: 1024});
assert.eq(res.ok, 0, "cloning a nonexistent collection to capped should not have worked");
assert.eq(res.errmsg,
isSharded ? "no such cmd: cloneCollectionAsCapped"
diff --git a/jstests/core/collection_info_cache_race.js b/jstests/core/collection_info_cache_race.js
index e1b06d52e46..d57fc3340db 100644
--- a/jstests/core/collection_info_cache_race.js
+++ b/jstests/core/collection_info_cache_race.js
@@ -3,12 +3,11 @@
// Create collection without an index, then try to save a doc.
var coll = db.collection_info_cache_race;
coll.drop();
-assert.commandWorked(db.createCollection(coll.getName(), {autoIndexId:false}));
+assert.commandWorked(db.createCollection(coll.getName(), {autoIndexId: false}));
// Fails when SERVER-16502 was not fixed, due to invariant
-assert.writeOK(coll.save({_id:false}, {writeConcern:{w:1}}));
-
+assert.writeOK(coll.save({_id: false}, {writeConcern: {w: 1}}));
coll.drop();
-assert.commandWorked(db.createCollection(coll.getName(), {autoIndexId:false}));
-assert.eq(null,coll.findOne());
-assert.writeOK(coll.save({_id:false}, {writeConcern:{w:1}}));
+assert.commandWorked(db.createCollection(coll.getName(), {autoIndexId: false}));
+assert.eq(null, coll.findOne());
+assert.writeOK(coll.save({_id: false}, {writeConcern: {w: 1}}));
diff --git a/jstests/core/collection_truncate.js b/jstests/core/collection_truncate.js
index 1581762f30a..08de3e8c1ed 100644
--- a/jstests/core/collection_truncate.js
+++ b/jstests/core/collection_truncate.js
@@ -5,7 +5,7 @@ t.drop();
function truncate() {
// Until SERVER-15274 is implemented, this is the only way to truncate a collection.
- assert.commandWorked(t.runCommand('emptycapped')); // works on non-capped as well.
+ assert.commandWorked(t.runCommand('emptycapped')); // works on non-capped as well.
}
function assertEmpty() {
@@ -27,14 +27,14 @@ function assertEmpty() {
}
// Single record case.
-t.insert({a:1});
+t.insert({a: 1});
truncate();
assertEmpty();
// Multi-extent case.
var initialStorageSize = t.stats().storageSize;
while (t.stats().storageSize == initialStorageSize) {
- t.insert({a:1});
+ t.insert({a: 1});
}
truncate();
assertEmpty();
diff --git a/jstests/core/collmod.js b/jstests/core/collmod.js
index f901c8a3e6f..e91a7bd8484 100644
--- a/jstests/core/collmod.js
+++ b/jstests/core/collmod.js
@@ -1,94 +1,94 @@
// Basic js tests for the collMod command.
// Test setting the usePowerOf2Sizes flag, and modifying TTL indexes.
-function debug( x ) {
- //printjson( x );
+function debug(x) {
+ // printjson( x );
}
var coll = "collModTest";
-var t = db.getCollection( coll );
+var t = db.getCollection(coll);
t.drop();
var isMongos = ("isdbgrid" == db.runCommand("ismaster").msg);
-db.createCollection( coll );
+db.createCollection(coll);
-function findTTL( key, expireAfterSeconds ) {
+function findTTL(key, expireAfterSeconds) {
var all = t.getIndexes();
- all = all.filter( function(z) {
- return z.expireAfterSeconds == expireAfterSeconds &&
- friendlyEqual( z.key, key ); } );
+ all = all.filter(function(z) {
+ return z.expireAfterSeconds == expireAfterSeconds && friendlyEqual(z.key, key);
+ });
return all.length == 1;
}
function findCollectionInfo() {
var all = db.getCollectionInfos();
- all = all.filter( function(z) { return z.name == t.getName(); } );
+ all = all.filter(function(z) {
+ return z.name == t.getName();
+ });
assert.eq(all.length, 1);
return all[0];
}
// ensure we fail with gibberish options
-assert.commandFailed(t.runCommand('collmod', {NotARealOption:1}));
+assert.commandFailed(t.runCommand('collmod', {NotARealOption: 1}));
// add a TTL index
-t.ensureIndex( {a : 1}, { "expireAfterSeconds": 50 } );
-assert( findTTL( { a : 1 }, 50 ), "TTL index not added" );
+t.ensureIndex({a: 1}, {"expireAfterSeconds": 50});
+assert(findTTL({a: 1}, 50), "TTL index not added");
// try to modify it with a bad key pattern
-var res = db.runCommand( { "collMod" : coll,
- "index" : { "keyPattern" : "bad" , "expireAfterSeconds" : 100 } } );
-debug( res );
-assert.eq( 0 , res.ok , "mod shouldn't work with bad keypattern");
+var res =
+ db.runCommand({"collMod": coll, "index": {"keyPattern": "bad", "expireAfterSeconds": 100}});
+debug(res);
+assert.eq(0, res.ok, "mod shouldn't work with bad keypattern");
// try to modify it without expireAfterSeconds field
-var res = db.runCommand( { "collMod" : coll,
- "index" : { "keyPattern" : {a : 1} } } );
-debug( res );
-assert.eq( 0 , res.ok , "TTL mod shouldn't work without expireAfterSeconds");
+var res = db.runCommand({"collMod": coll, "index": {"keyPattern": {a: 1}}});
+debug(res);
+assert.eq(0, res.ok, "TTL mod shouldn't work without expireAfterSeconds");
// try to modify it with a non-numeric expireAfterSeconds field
-var res = db.runCommand( { "collMod" : coll,
- "index" : { "keyPattern" : {a : 1}, "expireAfterSeconds" : "100" } } );
-debug( res );
-assert.eq( 0 , res.ok , "TTL mod shouldn't work with non-numeric expireAfterSeconds");
+var res =
+ db.runCommand({"collMod": coll, "index": {"keyPattern": {a: 1}, "expireAfterSeconds": "100"}});
+debug(res);
+assert.eq(0, res.ok, "TTL mod shouldn't work with non-numeric expireAfterSeconds");
// this time modifying should finally work
-var res = db.runCommand( { "collMod" : coll,
- "index" : { "keyPattern" : {a : 1}, "expireAfterSeconds" : 100 } } );
-debug( res );
-assert( findTTL( {a:1}, 100 ), "TTL index not modified" );
+var res =
+ db.runCommand({"collMod": coll, "index": {"keyPattern": {a: 1}, "expireAfterSeconds": 100}});
+debug(res);
+assert(findTTL({a: 1}, 100), "TTL index not modified");
// try to modify a faulty TTL index with a non-numeric expireAfterSeconds field
-t.dropIndex( {a : 1 } );
-t.ensureIndex( {a : 1} , { "expireAfterSeconds": "50" } );
-var res = db.runCommand( { "collMod" : coll,
- "index" : { "keyPattern" : {a : 1} , "expireAfterSeconds" : 100 } } );
-debug( res );
-assert.eq( 0, res.ok, "shouldn't be able to modify faulty index spec" );
+t.dropIndex({a: 1});
+t.ensureIndex({a: 1}, {"expireAfterSeconds": "50"});
+var res =
+ db.runCommand({"collMod": coll, "index": {"keyPattern": {a: 1}, "expireAfterSeconds": 100}});
+debug(res);
+assert.eq(0, res.ok, "shouldn't be able to modify faulty index spec");
// try with new index, this time set both expireAfterSeconds and the usePowerOf2Sizes flag
-t.dropIndex( {a : 1 } );
-t.ensureIndex( {a : 1} , { "expireAfterSeconds": 50 } );
-var res = db.runCommand( { "collMod" : coll ,
- "usePowerOf2Sizes" : true,
- "index" : { "keyPattern" : {a : 1} , "expireAfterSeconds" : 100 } } );
-debug( res );
-assert( findTTL( {a:1}, 100), "TTL index should be 100 now" );
+t.dropIndex({a: 1});
+t.ensureIndex({a: 1}, {"expireAfterSeconds": 50});
+var res = db.runCommand({
+ "collMod": coll,
+ "usePowerOf2Sizes": true,
+ "index": {"keyPattern": {a: 1}, "expireAfterSeconds": 100}
+});
+debug(res);
+assert(findTTL({a: 1}, 100), "TTL index should be 100 now");
// Clear usePowerOf2Sizes and enable noPadding. Make sure collection options.flags is updated.
-var res = db.runCommand( { "collMod" : coll ,
- "usePowerOf2Sizes" : false,
- "noPadding" : true} );
-debug( res );
+var res = db.runCommand({"collMod": coll, "usePowerOf2Sizes": false, "noPadding": true});
+debug(res);
assert.commandWorked(res);
var info = findCollectionInfo();
-assert.eq(info.options.flags, 2, tojson(info)); // 2 is CollectionOptions::Flag_NoPadding
+assert.eq(info.options.flags, 2, tojson(info)); // 2 is CollectionOptions::Flag_NoPadding
// Clear noPadding and check results object and options.flags.
-var res = db.runCommand( { "collMod" : coll ,
- "noPadding" : false} );
-debug( res );
+var res = db.runCommand({"collMod": coll, "noPadding": false});
+debug(res);
assert.commandWorked(res);
if (!isMongos) {
// don't check this for sharding passthrough since mongos has a different output format.
diff --git a/jstests/core/compact_keeps_indexes.js b/jstests/core/compact_keeps_indexes.js
index 68f52c126f5..f2da7597cdf 100644
--- a/jstests/core/compact_keeps_indexes.js
+++ b/jstests/core/compact_keeps_indexes.js
@@ -8,22 +8,22 @@
var coll = db.compact_keeps_indexes;
coll.drop();
- coll.insert({_id:1, x:1});
- coll.ensureIndex({x:1});
+ coll.insert({_id: 1, x: 1});
+ coll.ensureIndex({x: 1});
assert.eq(coll.getIndexes().length, 2);
// force:true is for replset passthroughs
- var res = coll.runCommand('compact', {force:true});
+ var res = coll.runCommand('compact', {force: true});
// Some storage engines (for example, inMemoryExperiment) do not support the compact command.
- if (res.code == 115) { // CommandNotSupported
+ if (res.code == 115) { // CommandNotSupported
return;
}
assert.commandWorked(res);
assert.eq(coll.getIndexes().length, 2);
- assert.eq(coll.find({_id:1}).itcount(), 1);
- assert.eq(coll.find({x:1}).itcount(), 1);
+ assert.eq(coll.find({_id: 1}).itcount(), 1);
+ assert.eq(coll.find({x: 1}).itcount(), 1);
// Run compact repeatedly while simultaneously creating and dropping a collection in a
// different database.
@@ -31,19 +31,19 @@
// The test uses a single collection in the database test_compact_keeps_indexes_drop
// which triggers a series of slow resync operations in the slave as the collection is
// repeatedly created and dropped.
- var isMasterSlave = testingReplication &&
- !assert.commandWorked(db.isMaster()).hasOwnProperty('setName');
+ var isMasterSlave =
+ testingReplication && !assert.commandWorked(db.isMaster()).hasOwnProperty('setName');
if (!isMasterSlave) {
var dropCollectionShell = startParallelShell(function() {
var t = db.getSiblingDB('test_compact_keeps_indexes_drop').testcoll;
t.drop();
- for (var i=0; i<100; i++) {
+ for (var i = 0; i < 100; i++) {
t.save({a: 1});
t.drop();
}
});
- for (var i=0; i<10; i++) {
- coll.runCommand('compact');
+ for (var i = 0; i < 10; i++) {
+ coll.runCommand('compact');
}
dropCollectionShell();
}
diff --git a/jstests/core/compare_timestamps.js b/jstests/core/compare_timestamps.js
index 2a7fcb3683c..2440fac3fe1 100644
--- a/jstests/core/compare_timestamps.js
+++ b/jstests/core/compare_timestamps.js
@@ -7,4 +7,3 @@
assert.writeOK(t.insert({a: new Timestamp(0, 0), b: "zero"}));
assert.eq(t.find().sort({a: 1}).limit(1).next().b, "zero", "timestamp must compare unsigned");
}());
-
diff --git a/jstests/core/connection_status.js b/jstests/core/connection_status.js
index bbae51b9eb0..29b8999ccc8 100644
--- a/jstests/core/connection_status.js
+++ b/jstests/core/connection_status.js
@@ -15,7 +15,7 @@ function test(userName) {
var users = output.authInfo.authenticatedUsers;
var matches = 0;
- for (var i=0; i < users.length; i++) {
+ for (var i = 0; i < users.length; i++) {
if (users[i].db != dbName)
continue;
@@ -28,7 +28,7 @@ function test(userName) {
var roles = output.authInfo.authenticatedUserRoles;
matches = 0;
- for (var i=0; i < roles.length; i++) {
+ for (var i = 0; i < roles.length; i++) {
if (roles[i].db != "admin")
continue;
@@ -48,7 +48,7 @@ function test(userName) {
var privileges = output.authInfo.authenticatedUserPrivileges;
matches = 0;
- for (var i=0; i < privileges.length; i++) {
+ for (var i = 0; i < privileges.length; i++) {
if (privileges[i].resource.anyResource) {
matches++;
}
@@ -65,4 +65,3 @@ function test(userName) {
test("someone");
test("someone else");
-
diff --git a/jstests/core/connection_string_validation.js b/jstests/core/connection_string_validation.js
index a682b27ea79..232650f230b 100644
--- a/jstests/core/connection_string_validation.js
+++ b/jstests/core/connection_string_validation.js
@@ -3,39 +3,36 @@
port = "27017";
-if ( db.getMongo().host.indexOf( ":" ) >= 0 ) {
- var idx = db.getMongo().host.indexOf( ":" );
- port = db.getMongo().host.substring( idx + 1 );
+if (db.getMongo().host.indexOf(":") >= 0) {
+ var idx = db.getMongo().host.indexOf(":");
+ port = db.getMongo().host.substring(idx + 1);
}
-var goodStrings = [
- "localhost:" + port + "/test",
- "127.0.0.1:" + port + "/test"
- ];
+var goodStrings = ["localhost:" + port + "/test", "127.0.0.1:" + port + "/test"];
var badStrings = [
- { s: undefined, r: /^Missing connection string$/ },
- { s: 7, r: /^Incorrect type/ },
- { s: null, r: /^Incorrect type/ },
- { s: "", r: /^Empty connection string$/ },
- { s: " ", r: /^Empty connection string$/ },
- { s: ":", r: /^Missing host name/ },
- { s: "/", r: /^Missing host name/ },
- { s: ":/", r: /^Missing host name/ },
- { s: ":/test", r: /^Missing host name/ },
- { s: ":" + port + "/", r: /^Missing host name/ },
- { s: ":" + port + "/test", r: /^Missing host name/ },
- { s: "/test", r: /^Missing host name/ },
- { s: "localhost:/test", r: /^Missing port number/ },
- { s: "127.0.0.1:/test", r: /^Missing port number/ },
- { s: "127.0.0.1:cat/test", r: /^Invalid port number/ },
- { s: "127.0.0.1:1cat/test", r: /^Invalid port number/ },
- { s: "127.0.0.1:123456/test", r: /^Invalid port number/ },
- { s: "127.0.0.1:65536/test", r: /^Invalid port number/ },
- { s: "::1:65536/test", r: /^Invalid port number/ },
- { s: "127.0.0.1:" + port + "/", r: /^Missing database name/ },
- { s: "::1:" + port + "/", r: /^Missing database name/ }
- ];
+ {s: undefined, r: /^Missing connection string$/},
+ {s: 7, r: /^Incorrect type/},
+ {s: null, r: /^Incorrect type/},
+ {s: "", r: /^Empty connection string$/},
+ {s: " ", r: /^Empty connection string$/},
+ {s: ":", r: /^Missing host name/},
+ {s: "/", r: /^Missing host name/},
+ {s: ":/", r: /^Missing host name/},
+ {s: ":/test", r: /^Missing host name/},
+ {s: ":" + port + "/", r: /^Missing host name/},
+ {s: ":" + port + "/test", r: /^Missing host name/},
+ {s: "/test", r: /^Missing host name/},
+ {s: "localhost:/test", r: /^Missing port number/},
+ {s: "127.0.0.1:/test", r: /^Missing port number/},
+ {s: "127.0.0.1:cat/test", r: /^Invalid port number/},
+ {s: "127.0.0.1:1cat/test", r: /^Invalid port number/},
+ {s: "127.0.0.1:123456/test", r: /^Invalid port number/},
+ {s: "127.0.0.1:65536/test", r: /^Invalid port number/},
+ {s: "::1:65536/test", r: /^Invalid port number/},
+ {s: "127.0.0.1:" + port + "/", r: /^Missing database name/},
+ {s: "::1:" + port + "/", r: /^Missing database name/}
+];
function testGood(i, connectionString) {
print("\nTesting good connection string " + i + " (\"" + connectionString + "\") ...");
@@ -44,18 +41,17 @@ function testGood(i, connectionString) {
try {
var connectDB = connect(connectionString);
connectDB = null;
- }
- catch (e) {
+ } catch (e) {
gotException = true;
exception = e;
}
if (!gotException) {
- print("Good connection string " + i +
- " (\"" + connectionString + "\") correctly validated");
+ print("Good connection string " + i + " (\"" + connectionString +
+ "\") correctly validated");
return;
}
- var message = "FAILED to correctly validate goodString " + i +
- " (\"" + connectionString + "\"): exception was \"" + tojson(exception) + "\"";
+ var message = "FAILED to correctly validate goodString " + i + " (\"" + connectionString +
+ "\"): exception was \"" + tojson(exception) + "\"";
doassert(message);
}
@@ -67,8 +63,7 @@ function testBad(i, connectionString, errorRegex) {
try {
var connectDB = connect(connectionString);
connectDB = null;
- }
- catch (e) {
+ } catch (e) {
gotException = true;
exception = e;
if (errorRegex.test(e.message)) {
@@ -80,13 +75,12 @@ function testBad(i, connectionString, errorRegex) {
"\") correctly rejected:\n" + tojson(exception));
return;
}
- var message = "FAILED to generate correct exception for badString " + i +
- " (\"" + connectionString + "\"): ";
+ var message = "FAILED to generate correct exception for badString " + i + " (\"" +
+ connectionString + "\"): ";
if (gotException) {
- message += "exception was \"" + tojson(exception) +
- "\", it should have matched \"" + errorRegex.toString() + "\"";
- }
- else {
+ message += "exception was \"" + tojson(exception) + "\", it should have matched \"" +
+ errorRegex.toString() + "\"";
+ } else {
message += "no exception was thrown";
}
doassert(message);
diff --git a/jstests/core/constructors.js b/jstests/core/constructors.js
index 346b1ca0dfc..814766ee2c3 100644
--- a/jstests/core/constructors.js
+++ b/jstests/core/constructors.js
@@ -2,8 +2,8 @@
// Takes a list of constructors and returns a new list with an extra entry for each constructor with
// "new" prepended
-function addConstructorsWithNew (constructorList) {
- function prependNew (constructor) {
+function addConstructorsWithNew(constructorList) {
+ function prependNew(constructor) {
return "new " + constructor;
}
@@ -12,152 +12,143 @@ function addConstructorsWithNew (constructorList) {
// We use slice(0) here to make a copy of our lists
var validWithNew = valid.concat(valid.slice(0).map(prependNew));
var invalidWithNew = invalid.concat(invalid.slice(0).map(prependNew));
- return { "valid" : validWithNew, "invalid" : invalidWithNew };
+ return {
+ "valid": validWithNew,
+ "invalid": invalidWithNew
+ };
}
-function clientEvalConstructorTest (constructorList) {
+function clientEvalConstructorTest(constructorList) {
constructorList = addConstructorsWithNew(constructorList);
- constructorList.valid.forEach(function (constructor) {
+ constructorList.valid.forEach(function(constructor) {
try {
eval(constructor);
- }
- catch (e) {
- throw ("valid constructor: " + constructor + " failed in eval context: " + e);
+ } catch (e) {
+ throw("valid constructor: " + constructor + " failed in eval context: " + e);
}
});
- constructorList.invalid.forEach(function (constructor) {
- assert.throws(function () { eval(constructor); },
- [], "invalid constructor did not throw error in eval context: " + constructor);
+ constructorList.invalid.forEach(function(constructor) {
+ assert.throws(function() {
+ eval(constructor);
+ }, [], "invalid constructor did not throw error in eval context: " + constructor);
});
}
-function dbEvalConstructorTest (constructorList) {
+function dbEvalConstructorTest(constructorList) {
constructorList = addConstructorsWithNew(constructorList);
- constructorList.valid.forEach(function (constructor) {
+ constructorList.valid.forEach(function(constructor) {
try {
db.eval(constructor);
- }
- catch (e) {
- throw ("valid constructor: " + constructor + " failed in db.eval context: " + e);
+ } catch (e) {
+ throw("valid constructor: " + constructor + " failed in db.eval context: " + e);
}
});
- constructorList.invalid.forEach(function (constructor) {
- assert.throws(function () { db.eval(constructor); },
- [], "invalid constructor did not throw error in db.eval context: " + constructor);
+ constructorList.invalid.forEach(function(constructor) {
+ assert.throws(function() {
+ db.eval(constructor);
+ }, [], "invalid constructor did not throw error in db.eval context: " + constructor);
});
}
-function mapReduceConstructorTest (constructorList) {
+function mapReduceConstructorTest(constructorList) {
constructorList = addConstructorsWithNew(constructorList);
t = db.mr_constructors;
t.drop();
- t.save( { "partner" : 1, "visits" : 9 } );
- t.save( { "partner" : 2, "visits" : 9 } );
- t.save( { "partner" : 1, "visits" : 11 } );
- t.save( { "partner" : 1, "visits" : 30 } );
- t.save( { "partner" : 2, "visits" : 41 } );
- t.save( { "partner" : 2, "visits" : 41 } );
+ t.save({"partner": 1, "visits": 9});
+ t.save({"partner": 2, "visits": 9});
+ t.save({"partner": 1, "visits": 11});
+ t.save({"partner": 1, "visits": 30});
+ t.save({"partner": 2, "visits": 41});
+ t.save({"partner": 2, "visits": 41});
- constructorList.valid.forEach(function (constructor) {
+ constructorList.valid.forEach(function(constructor) {
try {
m = eval("dummy = function(){ emit( \"test\" , " + constructor + " ) }");
r = eval("dummy = function( k , v ){ return { test : " + constructor + " } }");
- res = t.mapReduce( m , r , { out : "mr_constructors_out" , scope : { xx : 1 } } );
- }
- catch (e) {
- throw ("valid constructor: " + constructor + " failed in mapReduce context: " + e);
+ res = t.mapReduce(m, r, {out: "mr_constructors_out", scope: {xx: 1}});
+ } catch (e) {
+ throw("valid constructor: " + constructor + " failed in mapReduce context: " + e);
}
});
- constructorList.invalid.forEach(function (constructor) {
+ constructorList.invalid.forEach(function(constructor) {
m = eval("dummy = function(){ emit( \"test\" , " + constructor + " ) }");
r = eval("dummy = function( k , v ){ return { test : " + constructor + " } }");
- assert.throws(function () { res = t.mapReduce( m , r ,
- { out : "mr_constructors_out" , scope : { xx : 1 } } ); },
- [], "invalid constructor did not throw error in mapReduce context: " + constructor);
+ assert.throws(function() {
+ res = t.mapReduce(m, r, {out: "mr_constructors_out", scope: {xx: 1}});
+ }, [], "invalid constructor did not throw error in mapReduce context: " + constructor);
});
db.mr_constructors_out.drop();
t.drop();
}
-function whereConstructorTest (constructorList) {
+function whereConstructorTest(constructorList) {
constructorList = addConstructorsWithNew(constructorList);
t = db.where_constructors;
t.drop();
- assert.writeOK( t.insert({ x : 1 }));
+ assert.writeOK(t.insert({x: 1}));
- constructorList.valid.forEach(function (constructor) {
+ constructorList.valid.forEach(function(constructor) {
try {
- t.findOne({ $where : constructor });
- }
- catch (e) {
- throw ("valid constructor: " + constructor + " failed in $where query: " + e);
+ t.findOne({$where: constructor});
+ } catch (e) {
+ throw("valid constructor: " + constructor + " failed in $where query: " + e);
}
});
- constructorList.invalid.forEach(function (constructor) {
- assert.throws(function () { t.findOne({ $where : constructor }); },
- [], "invalid constructor did not throw error in $where query: " + constructor);
+ constructorList.invalid.forEach(function(constructor) {
+ assert.throws(function() {
+ t.findOne({$where: constructor});
+ }, [], "invalid constructor did not throw error in $where query: " + constructor);
});
}
var dbrefConstructors = {
- "valid" : [
- "DBRef(\"namespace\", 0)",
- "DBRef(\"namespace\", \"test\")",
- "DBRef(\"namespace\", \"test\", \"database\")",
- "DBRef(\"namespace\", ObjectId())",
- "DBRef(\"namespace\", ObjectId(\"000000000000000000000000\"))",
- "DBRef(\"namespace\", ObjectId(\"000000000000000000000000\"), \"database\")",
- ],
- "invalid" : [
- "DBRef()",
- "DBRef(true, ObjectId())",
- "DBRef(true, ObjectId(), true)",
- "DBRef(\"namespace\")",
- "DBRef(\"namespace\", ObjectId(), true)",
- "DBRef(\"namespace\", ObjectId(), 123)",
- ]
+ "valid": [
+ "DBRef(\"namespace\", 0)",
+ "DBRef(\"namespace\", \"test\")",
+ "DBRef(\"namespace\", \"test\", \"database\")",
+ "DBRef(\"namespace\", ObjectId())",
+ "DBRef(\"namespace\", ObjectId(\"000000000000000000000000\"))",
+ "DBRef(\"namespace\", ObjectId(\"000000000000000000000000\"), \"database\")",
+ ],
+ "invalid": [
+ "DBRef()",
+ "DBRef(true, ObjectId())",
+ "DBRef(true, ObjectId(), true)",
+ "DBRef(\"namespace\")",
+ "DBRef(\"namespace\", ObjectId(), true)",
+ "DBRef(\"namespace\", ObjectId(), 123)",
+ ]
};
var dbpointerConstructors = {
- "valid" : [
- "DBPointer(\"namespace\", ObjectId())",
- "DBPointer(\"namespace\", ObjectId(\"000000000000000000000000\"))",
- ],
- "invalid" : [
- "DBPointer()",
- "DBPointer(true, ObjectId())",
- "DBPointer(\"namespace\", 0)",
- "DBPointer(\"namespace\", \"test\")",
- "DBPointer(\"namespace\")",
- "DBPointer(\"namespace\", ObjectId(), true)",
- ]
+ "valid": [
+ "DBPointer(\"namespace\", ObjectId())",
+ "DBPointer(\"namespace\", ObjectId(\"000000000000000000000000\"))",
+ ],
+ "invalid": [
+ "DBPointer()",
+ "DBPointer(true, ObjectId())",
+ "DBPointer(\"namespace\", 0)",
+ "DBPointer(\"namespace\", \"test\")",
+ "DBPointer(\"namespace\")",
+ "DBPointer(\"namespace\", ObjectId(), true)",
+ ]
};
-
var objectidConstructors = {
- "valid" : [
- 'ObjectId()',
- 'ObjectId("FFFFFFFFFFFFFFFFFFFFFFFF")',
- ],
- "invalid" : [
- 'ObjectId(5)',
- 'ObjectId("FFFFFFFFFFFFFFFFFFFFFFFQ")',
- ]
+ "valid": ['ObjectId()', 'ObjectId("FFFFFFFFFFFFFFFFFFFFFFFF")', ],
+ "invalid": ['ObjectId(5)', 'ObjectId("FFFFFFFFFFFFFFFFFFFFFFFQ")', ]
};
var timestampConstructors = {
- "valid" : [
- 'Timestamp()',
- 'Timestamp(0,0)',
- 'Timestamp(1.0,1.0)',
- ],
- "invalid" : [
+ "valid": ['Timestamp()', 'Timestamp(0,0)', 'Timestamp(1.0,1.0)', ],
+ "invalid": [
'Timestamp(0)',
'Timestamp(0,0,0)',
'Timestamp("test","test")',
@@ -166,14 +157,12 @@ var timestampConstructors = {
'Timestamp(true,true)',
'Timestamp(true,0)',
'Timestamp(0,true)',
- ]
+ ]
};
var bindataConstructors = {
- "valid" : [
- 'BinData(0,"test")',
- ],
- "invalid" : [
+ "valid": ['BinData(0,"test")', ],
+ "invalid": [
'BinData(0,"test", "test")',
'BinData()',
'BinData(-1, "")',
@@ -185,14 +174,12 @@ var bindataConstructors = {
'BinData(0, {})',
'BinData(0, [])',
'BinData(0, function () {})',
- ]
+ ]
};
var uuidConstructors = {
- "valid" : [
- 'UUID("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa")',
- ],
- "invalid" : [
+ "valid": ['UUID("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa")', ],
+ "invalid": [
'UUID("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", 0)',
'UUID()',
'UUID("aa")',
@@ -204,14 +191,12 @@ var uuidConstructors = {
'UUID({})',
'UUID([])',
'UUID(function () {})',
- ]
+ ]
};
var md5Constructors = {
- "valid" : [
- 'MD5("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa")',
- ],
- "invalid" : [
+ "valid": ['MD5("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa")', ],
+ "invalid": [
'MD5("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", 0)',
'MD5()',
'MD5("aa")',
@@ -223,17 +208,17 @@ var md5Constructors = {
'MD5({})',
'MD5([])',
'MD5(function () {})',
- ]
+ ]
};
var hexdataConstructors = {
- "valid" : [
+ "valid": [
'HexData(0, "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa")',
'HexData(0, "")',
'HexData(0, "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa")',
- 'HexData(0, "000000000000000000000005")', // SERVER-9605
- ],
- "invalid" : [
+ 'HexData(0, "000000000000000000000005")', // SERVER-9605
+ ],
+ "invalid": [
'HexData(0, "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", 0)',
'HexData()',
'HexData(0)',
@@ -249,19 +234,12 @@ var hexdataConstructors = {
'HexData(0, [])',
'HexData(0, function () {})',
'HexData(0, "invalidhex")',
- ]
+ ]
};
var dateConstructors = {
- "valid" : [
- 'Date()',
- 'Date(0)',
- 'Date(0,0)',
- 'Date(0,0,0)',
- 'Date("foo")',
- ],
- "invalid" : [
- ]
+ "valid": ['Date()', 'Date(0)', 'Date(0,0)', 'Date(0,0,0)', 'Date("foo")', ],
+ "invalid": []
};
clientEvalConstructorTest(dbrefConstructors);
diff --git a/jstests/core/copydb.js b/jstests/core/copydb.js
index d653053fbb0..4494bcc4716 100644
--- a/jstests/core/copydb.js
+++ b/jstests/core/copydb.js
@@ -16,6 +16,5 @@ assert.eq(1, db2.foo.count(), "D");
assert.eq(db1.foo.getIndexes().length, db2.foo.getIndexes().length);
// Test command input validation.
-assert.commandFailed(db1.adminCommand({copydb: 1,
- fromdb: db1.getName(),
- todb: "copydb.invalid"})); // Name can't contain dot.
+assert.commandFailed(db1.adminCommand(
+ {copydb: 1, fromdb: db1.getName(), todb: "copydb.invalid"})); // Name can't contain dot.
diff --git a/jstests/core/count.js b/jstests/core/count.js
index 5502d7176c1..9ec6a424c34 100644
--- a/jstests/core/count.js
+++ b/jstests/core/count.js
@@ -1,25 +1,24 @@
t = db.jstests_count;
t.drop();
-t.save( { i: 1 } );
-t.save( { i: 2 } );
-assert.eq( 1, t.find( { i: 1 } ).count(), "A" );
-assert.eq( 1, t.count( { i: 1 } ) , "B" );
-assert.eq( 2, t.find().count() , "C" );
-assert.eq( 2, t.find( undefined ).count() , "D" );
-assert.eq( 2, t.find( null ).count() , "E" );
-assert.eq( 2, t.count() , "F" );
+t.save({i: 1});
+t.save({i: 2});
+assert.eq(1, t.find({i: 1}).count(), "A");
+assert.eq(1, t.count({i: 1}), "B");
+assert.eq(2, t.find().count(), "C");
+assert.eq(2, t.find(undefined).count(), "D");
+assert.eq(2, t.find(null).count(), "E");
+assert.eq(2, t.count(), "F");
t.drop();
-t.save( {a:true,b:false} );
-t.ensureIndex( {b:1,a:1} );
-assert.eq( 1, t.find( {a:true,b:false} ).count() , "G" );
-assert.eq( 1, t.find( {b:false,a:true} ).count() , "H" );
+t.save({a: true, b: false});
+t.ensureIndex({b: 1, a: 1});
+assert.eq(1, t.find({a: true, b: false}).count(), "G");
+assert.eq(1, t.find({b: false, a: true}).count(), "H");
t.drop();
-t.save( {a:true,b:false} );
-t.ensureIndex( {b:1,a:1,c:1} );
-
-assert.eq( 1, t.find( {a:true,b:false} ).count() , "I" );
-assert.eq( 1, t.find( {b:false,a:true} ).count() , "J" );
+t.save({a: true, b: false});
+t.ensureIndex({b: 1, a: 1, c: 1});
+assert.eq(1, t.find({a: true, b: false}).count(), "I");
+assert.eq(1, t.find({b: false, a: true}).count(), "J");
diff --git a/jstests/core/count10.js b/jstests/core/count10.js
index 24d61956e4d..2a1853c399a 100644
--- a/jstests/core/count10.js
+++ b/jstests/core/count10.js
@@ -3,40 +3,38 @@
t = db.count10;
t.drop();
-for ( i=0; i<100; i++ ){
- t.save( { x : i } );
+for (i = 0; i < 100; i++) {
+ t.save({x: i});
}
// Start a parallel shell which repeatedly checks for a count
// query using db.currentOp(). As soon as the op is found,
// kill it via db.killOp().
-s = startParallelShell(
- 'assert.soon(function() {' +
- ' current = db.currentOp({"ns": db.count10.getFullName(), ' +
- ' "query.count": db.count10.getName()}); ' +
-
- // Check that we found the count op. If not, return false so
- // that assert.soon will retry.
- ' assert("inprog" in current); ' +
- ' if (current.inprog.length === 0) { ' +
- ' jsTest.log("count10.js: did not find count op, retrying"); ' +
- ' printjson(current); ' +
- ' return false; ' +
- ' } ' +
- ' countOp = current.inprog[0]; ' +
- ' if (!countOp) { ' +
- ' jsTest.log("count10.js: did not find count op, retrying"); ' +
- ' printjson(current); ' +
- ' return false; ' +
- ' } ' +
-
- // Found the count op. Try to kill it.
- ' jsTest.log("count10.js: found count op:"); ' +
- ' printjson(current); ' +
- ' printjson(db.killOp(countOp.opid)); ' +
- ' return true; ' +
- '}, "count10.js: could not find count op after retrying, gave up");'
-);
+s = startParallelShell('assert.soon(function() {' +
+ ' current = db.currentOp({"ns": db.count10.getFullName(), ' +
+ ' "query.count": db.count10.getName()}); ' +
+
+ // Check that we found the count op. If not, return false so
+ // that assert.soon will retry.
+ ' assert("inprog" in current); ' +
+ ' if (current.inprog.length === 0) { ' +
+ ' jsTest.log("count10.js: did not find count op, retrying"); ' +
+ ' printjson(current); ' +
+ ' return false; ' +
+ ' } ' +
+ ' countOp = current.inprog[0]; ' +
+ ' if (!countOp) { ' +
+ ' jsTest.log("count10.js: did not find count op, retrying"); ' +
+ ' printjson(current); ' +
+ ' return false; ' +
+ ' } ' +
+
+ // Found the count op. Try to kill it.
+ ' jsTest.log("count10.js: found count op:"); ' +
+ ' printjson(current); ' +
+ ' printjson(db.killOp(countOp.opid)); ' +
+ ' return true; ' +
+ '}, "count10.js: could not find count op after retrying, gave up");');
function getKilledCount() {
try {
diff --git a/jstests/core/count11.js b/jstests/core/count11.js
index 14392b9d90c..4ce218bfc43 100644
--- a/jstests/core/count11.js
+++ b/jstests/core/count11.js
@@ -5,8 +5,12 @@ var t = db.count11;
t.drop();
-var validQuery = {a: 1};
-var invalidQuery = {a: {$invalid: 1}};
+var validQuery = {
+ a: 1
+};
+var invalidQuery = {
+ a: {$invalid: 1}
+};
// Query non-existing collection with empty query.
assert.eq(0, t.find().count());
@@ -16,11 +20,15 @@ assert.eq(0, t.find().itcount());
// Returns 0 on valid syntax query.
// Fails on invalid syntax query.
assert.eq(0, t.find(validQuery).count());
-assert.throws(function() { t.find(invalidQuery).count(); });
+assert.throws(function() {
+ t.find(invalidQuery).count();
+});
// Query existing collection.
// Returns 0 on valid syntax query.
// Fails on invalid syntax query.
assert.commandWorked(db.createCollection(t.getName()));
assert.eq(0, t.find(validQuery).count());
-assert.throws(function() { t.find(invalidQuery).count(); });
+assert.throws(function() {
+ t.find(invalidQuery).count();
+});
diff --git a/jstests/core/count2.js b/jstests/core/count2.js
index 29084306a2f..0999a27e9ff 100644
--- a/jstests/core/count2.js
+++ b/jstests/core/count2.js
@@ -1,28 +1,28 @@
t = db.count2;
t.drop();
-for ( var i=0; i<1000; i++ ){
- t.save( { num : i , m : i % 20 } );
+for (var i = 0; i < 1000; i++) {
+ t.save({num: i, m: i % 20});
}
-assert.eq( 1000 , t.count() , "A" );
-assert.eq( 1000 , t.find().count() , "B" );
-assert.eq( 1000 , t.find().toArray().length , "C" );
+assert.eq(1000, t.count(), "A");
+assert.eq(1000, t.find().count(), "B");
+assert.eq(1000, t.find().toArray().length, "C");
-assert.eq( 50 , t.find( { m : 5 } ).toArray().length , "D" );
-assert.eq( 50 , t.find( { m : 5 } ).count() , "E" );
+assert.eq(50, t.find({m: 5}).toArray().length, "D");
+assert.eq(50, t.find({m: 5}).count(), "E");
-assert.eq( 40 , t.find( { m : 5 } ).skip( 10 ).toArray().length , "F" );
-assert.eq( 50 , t.find( { m : 5 } ).skip( 10 ).count() , "G" );
-assert.eq( 40 , t.find( { m : 5 } ).skip( 10 ).countReturn() , "H" );
+assert.eq(40, t.find({m: 5}).skip(10).toArray().length, "F");
+assert.eq(50, t.find({m: 5}).skip(10).count(), "G");
+assert.eq(40, t.find({m: 5}).skip(10).countReturn(), "H");
-assert.eq( 20 , t.find( { m : 5 } ).skip( 10 ).limit(20).toArray().length , "I" );
-assert.eq( 50 , t.find( { m : 5 } ).skip( 10 ).limit(20).count() , "J" );
-assert.eq( 20 , t.find( { m : 5 } ).skip( 10 ).limit(20).countReturn() , "K" );
+assert.eq(20, t.find({m: 5}).skip(10).limit(20).toArray().length, "I");
+assert.eq(50, t.find({m: 5}).skip(10).limit(20).count(), "J");
+assert.eq(20, t.find({m: 5}).skip(10).limit(20).countReturn(), "K");
-assert.eq( 5 , t.find( { m : 5 } ).skip( 45 ).limit(20).countReturn() , "L" );
+assert.eq(5, t.find({m: 5}).skip(45).limit(20).countReturn(), "L");
// Negative skip values should return error
-var negSkipResult = db.runCommand({ count: 't', skip : -2 });
-assert( ! negSkipResult.ok , "negative skip value shouldn't work, n = " + negSkipResult.n );
-assert( negSkipResult.errmsg.length > 0 , "no error msg for negative skip" );
+var negSkipResult = db.runCommand({count: 't', skip: -2});
+assert(!negSkipResult.ok, "negative skip value shouldn't work, n = " + negSkipResult.n);
+assert(negSkipResult.errmsg.length > 0, "no error msg for negative skip");
diff --git a/jstests/core/count3.js b/jstests/core/count3.js
index a8c3ef5faad..d93df020f0b 100644
--- a/jstests/core/count3.js
+++ b/jstests/core/count3.js
@@ -3,24 +3,21 @@ t = db.count3;
t.drop();
-t.save( { a : 1 } );
-t.save( { a : 1 , b : 2 } );
+t.save({a: 1});
+t.save({a: 1, b: 2});
-assert.eq( 2 , t.find( { a : 1 } ).itcount() , "A" );
-assert.eq( 2 , t.find( { a : 1 } ).count() , "B" );
+assert.eq(2, t.find({a: 1}).itcount(), "A");
+assert.eq(2, t.find({a: 1}).count(), "B");
-assert.eq( 2 , t.find( { a : 1 } , { b : 1 } ).itcount() , "C" );
-assert.eq( 2 , t.find( { a : 1 } , { b : 1 } ).count() , "D" );
+assert.eq(2, t.find({a: 1}, {b: 1}).itcount(), "C");
+assert.eq(2, t.find({a: 1}, {b: 1}).count(), "D");
t.drop();
-t.save( { a : 1 } );
-
-assert.eq( 1 , t.find( { a : 1 } ).itcount() , "E" );
-assert.eq( 1 , t.find( { a : 1 } ).count() , "F" );
-
-assert.eq( 1 , t.find( { a : 1 } , { b : 1 } ).itcount() , "G" );
-assert.eq( 1 , t.find( { a : 1 } , { b : 1 } ).count() , "H" );
-
+t.save({a: 1});
+assert.eq(1, t.find({a: 1}).itcount(), "E");
+assert.eq(1, t.find({a: 1}).count(), "F");
+assert.eq(1, t.find({a: 1}, {b: 1}).itcount(), "G");
+assert.eq(1, t.find({a: 1}, {b: 1}).count(), "H");
diff --git a/jstests/core/count4.js b/jstests/core/count4.js
index 11a43afbb01..ac0b3d3491f 100644
--- a/jstests/core/count4.js
+++ b/jstests/core/count4.js
@@ -2,16 +2,18 @@
t = db.count4;
t.drop();
-for ( i=0; i<100; i++ ){
- t.save( { x : i } );
+for (i = 0; i < 100; i++) {
+ t.save({x: i});
}
-q = { x : { $gt : 25 , $lte : 75 } };
+q = {
+ x: {$gt: 25, $lte: 75}
+};
-assert.eq( 50 , t.find( q ).count() , "A" );
-assert.eq( 50 , t.find( q ).itcount() , "B" );
+assert.eq(50, t.find(q).count(), "A");
+assert.eq(50, t.find(q).itcount(), "B");
-t.ensureIndex( { x : 1 } );
+t.ensureIndex({x: 1});
-assert.eq( 50 , t.find( q ).count() , "C" );
-assert.eq( 50 , t.find( q ).itcount() , "D" );
+assert.eq(50, t.find(q).count(), "C");
+assert.eq(50, t.find(q).itcount(), "D");
diff --git a/jstests/core/count5.js b/jstests/core/count5.js
index 3d7cf04a27c..5a23fde5661 100644
--- a/jstests/core/count5.js
+++ b/jstests/core/count5.js
@@ -2,29 +2,31 @@
t = db.count5;
t.drop();
-for ( i=0; i<100; i++ ){
- t.save( { x : i } );
+for (i = 0; i < 100; i++) {
+ t.save({x: i});
}
-q = { x : { $gt : 25 , $lte : 75 } };
+q = {
+ x: {$gt: 25, $lte: 75}
+};
-assert.eq( 50 , t.find( q ).count() , "A" );
-assert.eq( 50 , t.find( q ).itcount() , "B" );
+assert.eq(50, t.find(q).count(), "A");
+assert.eq(50, t.find(q).itcount(), "B");
-t.ensureIndex( { x : 1 } );
+t.ensureIndex({x: 1});
-assert.eq( 50 , t.find( q ).count() , "C" );
-assert.eq( 50 , t.find( q ).itcount() , "D" );
+assert.eq(50, t.find(q).count(), "C");
+assert.eq(50, t.find(q).itcount(), "D");
-assert.eq( 50 , t.find( q ).limit(1).count() , "E" );
-assert.eq( 1 , t.find( q ).limit(1).itcount() , "F" );
+assert.eq(50, t.find(q).limit(1).count(), "E");
+assert.eq(1, t.find(q).limit(1).itcount(), "F");
-assert.eq( 5 , t.find( q ).limit(5).size() , "G" );
-assert.eq( 5 , t.find( q ).skip(5).limit(5).size() , "H" );
-assert.eq( 2 , t.find( q ).skip(48).limit(5).size() , "I" );
+assert.eq(5, t.find(q).limit(5).size(), "G");
+assert.eq(5, t.find(q).skip(5).limit(5).size(), "H");
+assert.eq(2, t.find(q).skip(48).limit(5).size(), "I");
-assert.eq( 20 , t.find().limit(20).size() , "J" );
+assert.eq(20, t.find().limit(20).size(), "J");
-assert.eq( 0 , t.find().skip(120).size() , "K" );
-assert.eq( 1 , db.runCommand( { count: "count5" } )["ok"] , "L" );
-assert.eq( 1 , db.runCommand( { count: "count5", skip: 120 } )["ok"] , "M" );
+assert.eq(0, t.find().skip(120).size(), "K");
+assert.eq(1, db.runCommand({count: "count5"})["ok"], "L");
+assert.eq(1, db.runCommand({count: "count5", skip: 120})["ok"], "M");
diff --git a/jstests/core/count6.js b/jstests/core/count6.js
index 44c5fa33bc7..c1268f66047 100644
--- a/jstests/core/count6.js
+++ b/jstests/core/count6.js
@@ -2,60 +2,64 @@
t = db.jstests_count6;
-function checkCountForObject( obj ) {
+function checkCountForObject(obj) {
t.drop();
- t.ensureIndex( {b:1,a:1} );
-
- function checkCounts( query, expected ) {
- assert.eq( expected, t.count( query ) , "A1" );
- assert.eq( expected, t.find( query ).skip( 0 ).limit( 0 ).count( true ) , "A2" );
+ t.ensureIndex({b: 1, a: 1});
+
+ function checkCounts(query, expected) {
+ assert.eq(expected, t.count(query), "A1");
+ assert.eq(expected, t.find(query).skip(0).limit(0).count(true), "A2");
// Check proper counts with various skip and limit specs.
- for( var skip = 1; skip <= 2; ++skip ) {
- for( var limit = 1; limit <= 2; ++limit ) {
- assert.eq( Math.max( expected - skip, 0 ), t.find( query ).skip( skip ).count( true ) , "B1" );
- assert.eq( Math.min( expected, limit ), t.find( query ).limit( limit ).count( true ) , "B2" );
- assert.eq( Math.min( Math.max( expected - skip, 0 ), limit ), t.find( query ).skip( skip ).limit( limit ).count( true ) , "B4" );
+ for (var skip = 1; skip <= 2; ++skip) {
+ for (var limit = 1; limit <= 2; ++limit) {
+ assert.eq(Math.max(expected - skip, 0), t.find(query).skip(skip).count(true), "B1");
+ assert.eq(Math.min(expected, limit), t.find(query).limit(limit).count(true), "B2");
+ assert.eq(Math.min(Math.max(expected - skip, 0), limit),
+ t.find(query).skip(skip).limit(limit).count(true),
+ "B4");
// Check limit(x) = limit(-x)
- assert.eq( t.find( query ).limit( limit ).count( true ),
- t.find( query ).limit( -limit ).count( true ) , "C1" );
- assert.eq( t.find( query ).skip( skip ).limit( limit ).count( true ),
- t.find( query ).skip( skip ).limit( -limit ).count( true ) , "C2" );
+ assert.eq(t.find(query).limit(limit).count(true),
+ t.find(query).limit(-limit).count(true),
+ "C1");
+ assert.eq(t.find(query).skip(skip).limit(limit).count(true),
+ t.find(query).skip(skip).limit(-limit).count(true),
+ "C2");
}
}
// Check limit(0) has no effect
- assert.eq( expected, t.find( query ).limit( 0 ).count( true ) , "D1" );
- assert.eq( Math.max( expected - skip, 0 ),
- t.find( query ).skip( skip ).limit( 0 ).count( true ) , "D2" );
- assert.eq( expected, t.getDB().runCommand({ count: t.getName(),
- query: query, limit: 0 }).n , "D3" );
- assert.eq( Math.max( expected - skip, 0 ),
- t.getDB().runCommand({ count: t.getName(),
- query: query, limit: 0, skip: skip }).n , "D4" );
+ assert.eq(expected, t.find(query).limit(0).count(true), "D1");
+ assert.eq(
+ Math.max(expected - skip, 0), t.find(query).skip(skip).limit(0).count(true), "D2");
+ assert.eq(
+ expected, t.getDB().runCommand({count: t.getName(), query: query, limit: 0}).n, "D3");
+ assert.eq(Math.max(expected - skip, 0),
+ t.getDB().runCommand({count: t.getName(), query: query, limit: 0, skip: skip}).n,
+ "D4");
}
- for( var i = 0; i < 5; ++i ) {
- checkCounts( {a:obj.a,b:obj.b}, i );
- checkCounts( {b:obj.b,a:obj.a}, i );
- t.insert( obj );
+ for (var i = 0; i < 5; ++i) {
+ checkCounts({a: obj.a, b: obj.b}, i);
+ checkCounts({b: obj.b, a: obj.a}, i);
+ t.insert(obj);
}
- t.insert( {a:true,b:true} );
- t.insert( {a:true,b:1} );
- t.insert( {a:false,b:1} );
- t.insert( {a:false,b:true} );
- t.insert( {a:false,b:false} );
+ t.insert({a: true, b: true});
+ t.insert({a: true, b: 1});
+ t.insert({a: false, b: 1});
+ t.insert({a: false, b: true});
+ t.insert({a: false, b: false});
- checkCounts( {a:obj.a,b:obj.b}, i );
- checkCounts( {b:obj.b,a:obj.a}, i );
+ checkCounts({a: obj.a, b: obj.b}, i);
+ checkCounts({b: obj.b, a: obj.a}, i);
// Check with no query
- checkCounts( {}, 10 );
+ checkCounts({}, 10);
}
// Check fast count mode.
-checkCountForObject( {a:true,b:false} );
+checkCountForObject({a: true, b: false});
// Check normal count mode.
-checkCountForObject( {a:1,b:0} );
+checkCountForObject({a: 1, b: 0});
diff --git a/jstests/core/count7.js b/jstests/core/count7.js
index c2c1260d49b..8a3255d712a 100644
--- a/jstests/core/count7.js
+++ b/jstests/core/count7.js
@@ -3,23 +3,23 @@
t = db.jstests_count7;
t.drop();
-t.ensureIndex( {a:1} );
-t.save( {a:'algebra'} );
-t.save( {a:'apple'} );
-t.save( {a:'azores'} );
-t.save( {a:'bumper'} );
-t.save( {a:'supper'} );
-t.save( {a:'termite'} );
-t.save( {a:'zeppelin'} );
-t.save( {a:'ziggurat'} );
-t.save( {a:'zope'} );
+t.ensureIndex({a: 1});
+t.save({a: 'algebra'});
+t.save({a: 'apple'});
+t.save({a: 'azores'});
+t.save({a: 'bumper'});
+t.save({a: 'supper'});
+t.save({a: 'termite'});
+t.save({a: 'zeppelin'});
+t.save({a: 'ziggurat'});
+t.save({a: 'zope'});
-assert.eq( 5, t.count( {a:/p/} ) );
+assert.eq(5, t.count({a: /p/}));
t.remove({});
-t.save( {a:[1,2,3]} );
-t.save( {a:[1,2,3]} );
-t.save( {a:[1]} );
+t.save({a: [1, 2, 3]});
+t.save({a: [1, 2, 3]});
+t.save({a: [1]});
-assert.eq( 2, t.count( {a:{$gt:1}} ) );
+assert.eq(2, t.count({a: {$gt: 1}}));
diff --git a/jstests/core/count9.js b/jstests/core/count9.js
index 888ffe3b544..517322a9310 100644
--- a/jstests/core/count9.js
+++ b/jstests/core/count9.js
@@ -3,26 +3,26 @@
t = db.jstests_count9;
t.drop();
-t.ensureIndex( {a:1} );
+t.ensureIndex({a: 1});
-t.save( {a:['a','b','a']} );
-assert.eq( 1, t.count( {a:'a'} ) );
+t.save({a: ['a', 'b', 'a']});
+assert.eq(1, t.count({a: 'a'}));
-t.save( {a:['a','b','a']} );
-assert.eq( 2, t.count( {a:'a'} ) );
+t.save({a: ['a', 'b', 'a']});
+assert.eq(2, t.count({a: 'a'}));
t.drop();
-t.ensureIndex( {a:1,b:1} );
+t.ensureIndex({a: 1, b: 1});
-t.save( {a:['a','b','a'],b:'r'} );
-assert.eq( 1, t.count( {a:'a',b:'r'} ) );
-assert.eq( 1, t.count( {a:'a'} ) );
+t.save({a: ['a', 'b', 'a'], b: 'r'});
+assert.eq(1, t.count({a: 'a', b: 'r'}));
+assert.eq(1, t.count({a: 'a'}));
-t.save( {a:['a','b','a'],b:'r'} );
-assert.eq( 2, t.count( {a:'a',b:'r'} ) );
-assert.eq( 2, t.count( {a:'a'} ) );
+t.save({a: ['a', 'b', 'a'], b: 'r'});
+assert.eq(2, t.count({a: 'a', b: 'r'}));
+assert.eq(2, t.count({a: 'a'}));
t.drop();
-t.ensureIndex( {'a.b':1,'a.c':1} );
-t.save( {a:[{b:'b',c:'c'},{b:'b',c:'c'}]} );
-assert.eq( 1, t.count( {'a.b':'b','a.c':'c'} ) );
+t.ensureIndex({'a.b': 1, 'a.c': 1});
+t.save({a: [{b: 'b', c: 'c'}, {b: 'b', c: 'c'}]});
+assert.eq(1, t.count({'a.b': 'b', 'a.c': 'c'}));
diff --git a/jstests/core/count_plan_summary.js b/jstests/core/count_plan_summary.js
index a822c7666b5..48891d21e8e 100644
--- a/jstests/core/count_plan_summary.js
+++ b/jstests/core/count_plan_summary.js
@@ -10,9 +10,8 @@ for (var i = 0; i < 1000; i++) {
// Mock a long-running count operation by sleeping for each of
// the documents in the collection.
-var awaitShell = startParallelShell(
- "db.jstests_count_plan_summary.find({x: 1, $where: 'sleep(100)'}).count()"
-);
+var awaitShell =
+ startParallelShell("db.jstests_count_plan_summary.find({x: 1, $where: 'sleep(100)'}).count()");
// Find the count op in db.currentOp() and check for the plan summary.
assert.soon(function() {
diff --git a/jstests/core/counta.js b/jstests/core/counta.js
index eadec40f4a1..c65df230c72 100644
--- a/jstests/core/counta.js
+++ b/jstests/core/counta.js
@@ -3,17 +3,24 @@
t = db.jstests_counta;
t.drop();
-for( i = 0; i < 10; ++i ) {
- t.save( {a:i} );
+for (i = 0; i < 10; ++i) {
+ t.save({a: i});
}
-// f() is undefined, causing an assertion
-assert.throws(
- function(){
- t.count( { $where:function() { if ( this.a < 5 ) { return true; } else { f(); } } } );
- } );
+// f() is undefined, causing an assertion
+assert.throws(function() {
+ t.count({
+ $where: function() {
+ if (this.a < 5) {
+ return true;
+ } else {
+ f();
+ }
+ }
+ });
+});
// count must return error if collection name is absent
-res=db.runCommand("count");
-assert.eq(res.ok, 0); // must not be OK
-assert(res.code == 2); // should fail with errorcode("BadValue"), not an massert
+res = db.runCommand("count");
+assert.eq(res.ok, 0); // must not be OK
+assert(res.code == 2); // should fail with errorcode("BadValue"), not an massert
diff --git a/jstests/core/countb.js b/jstests/core/countb.js
index 8f7131a5a6c..a36378009c0 100644
--- a/jstests/core/countb.js
+++ b/jstests/core/countb.js
@@ -3,9 +3,9 @@
t = db.jstests_countb;
t.drop();
-t.ensureIndex( {a:1} );
-t.save( {a:['a','b']} );
-assert.eq( 0, t.find( {a:{$in:['a'],$gt:'b'}} ).count() );
-assert.eq( 0, t.find( {$and:[{a:'a'},{a:{$gt:'b'}}]} ).count() );
-assert.eq( 1, t.find( {$and:[{a:'a'},{$where:"this.a[1]=='b'"}]} ).count() );
-assert.eq( 0, t.find( {$and:[{a:'a'},{$where:"this.a[1]!='b'"}]} ).count() );
+t.ensureIndex({a: 1});
+t.save({a: ['a', 'b']});
+assert.eq(0, t.find({a: {$in: ['a'], $gt: 'b'}}).count());
+assert.eq(0, t.find({$and: [{a: 'a'}, {a: {$gt: 'b'}}]}).count());
+assert.eq(1, t.find({$and: [{a: 'a'}, {$where: "this.a[1]=='b'"}]}).count());
+assert.eq(0, t.find({$and: [{a: 'a'}, {$where: "this.a[1]!='b'"}]}).count());
diff --git a/jstests/core/countc.js b/jstests/core/countc.js
index 260dbb1f264..ea4aed54903 100644
--- a/jstests/core/countc.js
+++ b/jstests/core/countc.js
@@ -7,118 +7,109 @@
t = db.jstests_countc;
t.drop();
-
// Match a subset of inserted values within a $in operator.
t.drop();
-t.ensureIndex( { a:1 } );
+t.ensureIndex({a: 1});
// Save 'a' values 0, 0.5, 1.5, 2.5 ... 97.5, 98.5, 99.
-t.save( { a:0 } );
-t.save( { a:99 } );
-for( i = 0; i < 99; ++i ) {
- t.save( { a:( i + 0.5 ) } );
+t.save({a: 0});
+t.save({a: 99});
+for (i = 0; i < 99; ++i) {
+ t.save({a: (i + 0.5)});
}
// Query 'a' values $in 0, 1, 2, ..., 99.
vals = [];
-for( i = 0; i < 100; ++i ) {
- vals.push( i );
+for (i = 0; i < 100; ++i) {
+ vals.push(i);
}
// Only values 0 and 99 of the $in set are present in the collection, so the expected count is 2.
-assert.eq( 2, t.count( { a:{ $in:vals } } ) );
-
+assert.eq(2, t.count({a: {$in: vals}}));
// Match 'a' values within upper and lower limits.
t.drop();
-t.ensureIndex( { a:1 } );
-t.save( { a:[ 1, 2 ] } ); // Will match because 'a' is in range.
-t.save( { a:9 } ); // Will not match because 'a' is not in range.
+t.ensureIndex({a: 1});
+t.save({a: [1, 2]}); // Will match because 'a' is in range.
+t.save({a: 9}); // Will not match because 'a' is not in range.
// Only one document matches.
-assert.eq( 1, t.count( { a:{ $gt:0, $lt:5 } } ) );
-
+assert.eq(1, t.count({a: {$gt: 0, $lt: 5}}));
// Match two nested fields within an array.
t.drop();
-t.ensureIndex( { 'a.b':1, 'a.c':1 } );
-t.save( { a:[ { b:2, c:3 }, {} ] } );
+t.ensureIndex({'a.b': 1, 'a.c': 1});
+t.save({a: [{b: 2, c: 3}, {}]});
// The document does not match because its c value is 3.
-assert.eq( 0, t.count( { 'a.b':2, 'a.c':2 } ) );
-
+assert.eq(0, t.count({'a.b': 2, 'a.c': 2}));
// $gt:string only matches strings.
t.drop();
-t.ensureIndex( { a:1 } );
-t.save( { a:'a' } ); // Will match.
-t.save( { a:{} } ); // Will not match because {} is not a string.
+t.ensureIndex({a: 1});
+t.save({a: 'a'}); // Will match.
+t.save({a: {}}); // Will not match because {} is not a string.
// Only one document matches.
-assert.eq( 1, t.count( { a:{ $gte:'' } } ) );
-
+assert.eq(1, t.count({a: {$gte: ''}}));
// $lte:date only matches dates.
t.drop();
-t.ensureIndex( { a:1 } );
-t.save( { a:new Date( 1 ) } ); // Will match.
-t.save( { a:true } ); // Will not match because 'true' is not a date.
+t.ensureIndex({a: 1});
+t.save({a: new Date(1)}); // Will match.
+t.save({a: true}); // Will not match because 'true' is not a date.
// Only one document matches.
-assert.eq( 1, t.count( { a:{ $lte:new Date( 1 ) } } ) );
-
+assert.eq(1, t.count({a: {$lte: new Date(1)}}));
// Querying for 'undefined' triggers an error.
t.drop();
-t.ensureIndex( { a:1 } );
-assert.throws( function() { t.count( { a:undefined } ); } );
-
+t.ensureIndex({a: 1});
+assert.throws(function() {
+ t.count({a: undefined});
+});
// Count using a descending order index.
t.drop();
-t.ensureIndex( { a:-1 } );
-t.save( { a:1 } );
-t.save( { a:2 } );
-t.save( { a:3 } );
-assert.eq( 1, t.count( { a:{ $gt:2 } } ) );
-assert.eq( 1, t.count( { a:{ $lt:2 } } ) );
-assert.eq( 2, t.count( { a:{ $lte:2 } } ) );
-assert.eq( 2, t.count( { a:{ $lt:3 } } ) );
-
+t.ensureIndex({a: -1});
+t.save({a: 1});
+t.save({a: 2});
+t.save({a: 3});
+assert.eq(1, t.count({a: {$gt: 2}}));
+assert.eq(1, t.count({a: {$lt: 2}}));
+assert.eq(2, t.count({a: {$lte: 2}}));
+assert.eq(2, t.count({a: {$lt: 3}}));
// Count using a compound index.
t.drop();
-t.ensureIndex( { a:1, b:1 } );
-t.save( { a:1, b:2 } );
-t.save( { a:2, b:1 } );
-t.save( { a:2, b:3 } );
-t.save( { a:3, b:4 } );
-assert.eq( 1, t.count( { a:1 }));
-assert.eq( 2, t.count( { a:2 }));
-assert.eq( 1, t.count( { a:{ $gt:2 } } ) );
-assert.eq( 1, t.count( { a:{ $lt:2 } } ) );
-assert.eq( 2, t.count( { a:2, b:{ $gt:0 } } ) );
-assert.eq( 1, t.count( { a:2, b:{ $lt:3 } } ) );
-assert.eq( 1, t.count( { a:1, b:{ $lt:3 } } ) );
-
+t.ensureIndex({a: 1, b: 1});
+t.save({a: 1, b: 2});
+t.save({a: 2, b: 1});
+t.save({a: 2, b: 3});
+t.save({a: 3, b: 4});
+assert.eq(1, t.count({a: 1}));
+assert.eq(2, t.count({a: 2}));
+assert.eq(1, t.count({a: {$gt: 2}}));
+assert.eq(1, t.count({a: {$lt: 2}}));
+assert.eq(2, t.count({a: 2, b: {$gt: 0}}));
+assert.eq(1, t.count({a: 2, b: {$lt: 3}}));
+assert.eq(1, t.count({a: 1, b: {$lt: 3}}));
// Count using a compound descending order index.
t.drop();
-t.ensureIndex( { a:1, b:-1 } );
-t.save( { a:1, b:2 } );
-t.save( { a:2, b:1 } );
-t.save( { a:2, b:3 } );
-t.save( { a:3, b:4 } );
-assert.eq( 1, t.count( { a:{ $gt:2 } } ) );
-assert.eq( 1, t.count( { a:{ $lt:2 } } ) );
-assert.eq( 2, t.count( { a:2, b:{ $gt:0 } } ) );
-assert.eq( 1, t.count( { a:2, b:{ $lt:3 } } ) );
-assert.eq( 1, t.count( { a:1, b:{ $lt:3 } } ) );
-
+t.ensureIndex({a: 1, b: -1});
+t.save({a: 1, b: 2});
+t.save({a: 2, b: 1});
+t.save({a: 2, b: 3});
+t.save({a: 3, b: 4});
+assert.eq(1, t.count({a: {$gt: 2}}));
+assert.eq(1, t.count({a: {$lt: 2}}));
+assert.eq(2, t.count({a: 2, b: {$gt: 0}}));
+assert.eq(1, t.count({a: 2, b: {$lt: 3}}));
+assert.eq(1, t.count({a: 1, b: {$lt: 3}}));
// Count with a multikey value.
t.drop();
-t.ensureIndex( { a:1 } );
-t.save( { a:[ 1, 2 ] } );
-assert.eq( 1, t.count( { a:{ $gt:0, $lte:2 } } ) );
-
+t.ensureIndex({a: 1});
+t.save({a: [1, 2]});
+assert.eq(1, t.count({a: {$gt: 0, $lte: 2}}));
// Count with a match constraint on an unindexed field.
t.drop();
-t.ensureIndex( { a:1 } );
-t.save( { a:1, b:1 } );
-t.save( { a:1, b:2 } );
-assert.eq( 1, t.count( { a:1, $where:'this.b == 1' } ) );
+t.ensureIndex({a: 1});
+t.save({a: 1, b: 1});
+t.save({a: 1, b: 2});
+assert.eq(1, t.count({a: 1, $where: 'this.b == 1'}));
diff --git a/jstests/core/coveredIndex1.js b/jstests/core/coveredIndex1.js
index 434988b3b30..328c53ba0bc 100644
--- a/jstests/core/coveredIndex1.js
+++ b/jstests/core/coveredIndex1.js
@@ -11,78 +11,76 @@ t.save({fn: "john", ln: "smith"});
t.save({fn: "jack", ln: "black"});
t.save({fn: "bob", ln: "murray"});
t.save({fn: "aaa", ln: "bbb", obj: {a: 1, b: "blah"}});
-assert.eq( t.findOne({ln: "doe"}).fn, "john", "Cannot find right record" );
-assert.eq( t.count(), 6, "Not right length" );
+assert.eq(t.findOne({ln: "doe"}).fn, "john", "Cannot find right record");
+assert.eq(t.count(), 6, "Not right length");
// use simple index
t.ensureIndex({ln: 1});
-assert( !isIndexOnly(t.find({ln: "doe"}).explain().queryPlanner.winningPlan),
- "Find using covered index but all fields are returned");
-assert( !isIndexOnly(t.find({ln: "doe"}, {ln: 1}).explain().queryPlanner.winningPlan),
- "Find using covered index but _id is returned");
-assert( isIndexOnly(t.find({ln: "doe"}, {ln: 1, _id: 0}).explain().queryPlanner.winningPlan),
- "Find is not using covered index");
+assert(!isIndexOnly(t.find({ln: "doe"}).explain().queryPlanner.winningPlan),
+ "Find using covered index but all fields are returned");
+assert(!isIndexOnly(t.find({ln: "doe"}, {ln: 1}).explain().queryPlanner.winningPlan),
+ "Find using covered index but _id is returned");
+assert(isIndexOnly(t.find({ln: "doe"}, {ln: 1, _id: 0}).explain().queryPlanner.winningPlan),
+ "Find is not using covered index");
// this time, without a query spec
// SERVER-2109
-//assert.eq( t.find({}, {ln: 1, _id: 0}).explain().indexOnly, true, "Find is not using covered index");
-assert( isIndexOnly(t.find({}, {ln: 1, _id: 0}).hint({ln: 1}).explain().queryPlanner.winningPlan),
- "Find is not using covered index");
+// assert.eq( t.find({}, {ln: 1, _id: 0}).explain().indexOnly, true, "Find is not using covered
+// index");
+assert(isIndexOnly(t.find({}, {ln: 1, _id: 0}).hint({ln: 1}).explain().queryPlanner.winningPlan),
+ "Find is not using covered index");
// use compound index
t.dropIndex({ln: 1});
t.ensureIndex({ln: 1, fn: 1});
// return 1 field
var plan = t.find({ln: "doe"}, {ln: 1, _id: 0}).explain();
-assert( isIndexOnly(plan.queryPlanner.winningPlan), "Find is not using covered index");
+assert(isIndexOnly(plan.queryPlanner.winningPlan), "Find is not using covered index");
// return both fields, multiple docs returned
var plan = t.find({ln: "doe"}, {ln: 1, fn: 1, _id: 0}).explain();
-assert( isIndexOnly(plan.queryPlanner.winningPlan), "Find is not using covered index");
+assert(isIndexOnly(plan.queryPlanner.winningPlan), "Find is not using covered index");
// match 1 record using both fields
var plan = t.find({ln: "doe", fn: "john"}, {ln: 1, fn: 1, _id: 0}).explain();
-assert( isIndexOnly(plan.queryPlanner.winningPlan), "Find is not using covered index");
+assert(isIndexOnly(plan.queryPlanner.winningPlan), "Find is not using covered index");
// change ordering
var plan = t.find({fn: "john", ln: "doe"}, {fn: 1, ln: 1, _id: 0}).explain();
-assert( isIndexOnly(plan.queryPlanner.winningPlan), "Find is not using covered index");
+assert(isIndexOnly(plan.queryPlanner.winningPlan), "Find is not using covered index");
// ask from 2nd index key
var plan = t.find({fn: "john"}, {fn: 1, _id: 0}).explain();
-assert( !isIndexOnly(plan.queryPlanner.winningPlan),
- "Find is using covered index, but doesnt have 1st key");
+assert(!isIndexOnly(plan.queryPlanner.winningPlan),
+ "Find is using covered index, but doesnt have 1st key");
// repeat above but with _id field
t.dropIndex({ln: 1, fn: 1});
t.ensureIndex({_id: 1, ln: 1});
// return 1 field
var plan = t.find({_id: 123, ln: "doe"}, {_id: 1}).explain();
-assert( isIndexOnly(plan.queryPlanner.winningPlan), "Find is not using covered index");
+assert(isIndexOnly(plan.queryPlanner.winningPlan), "Find is not using covered index");
// match 1 record using both fields
var plan = t.find({_id: 123, ln: "doe"}, {ln: 1}).explain();
-assert( isIndexOnly(plan.queryPlanner.winningPlan), "Find is not using covered index");
+assert(isIndexOnly(plan.queryPlanner.winningPlan), "Find is not using covered index");
// change ordering
var plan = t.find({ln: "doe", _id: 123}, {ln: 1, _id: 1}).explain();
-assert( isIndexOnly(plan.queryPlanner.winningPlan), "Find is not using covered index");
+assert(isIndexOnly(plan.queryPlanner.winningPlan), "Find is not using covered index");
// ask from 2nd index key
var plan = t.find({ln: "doe"}, {ln: 1}).explain();
-assert( !isIndexOnly(plan.queryPlanner.winningPlan),
- "Find is using covered index, but doesnt have 1st key");
+assert(!isIndexOnly(plan.queryPlanner.winningPlan),
+ "Find is using covered index, but doesnt have 1st key");
// repeat above but with embedded obj
t.dropIndex({_id: 1, ln: 1});
t.ensureIndex({obj: 1});
var plan = t.find({"obj.a": 1}, {obj: 1}).explain();
-assert( !isIndexOnly(plan.queryPlanner.winningPlan),
- "Shouldnt use index when introspecting object");
+assert(!isIndexOnly(plan.queryPlanner.winningPlan), "Shouldnt use index when introspecting object");
var plan = t.find({obj: {a: 1, b: "blah"}}).explain();
-assert( !isIndexOnly(plan.queryPlanner.winningPlan), "Index doesnt have all fields to cover");
+assert(!isIndexOnly(plan.queryPlanner.winningPlan), "Index doesnt have all fields to cover");
var plan = t.find({obj: {a: 1, b: "blah"}}, {obj: 1, _id: 0}).explain();
-assert( isIndexOnly(plan.queryPlanner.winningPlan), "Find is not using covered index");
+assert(isIndexOnly(plan.queryPlanner.winningPlan), "Find is not using covered index");
// repeat above but with index on sub obj field
t.dropIndex({obj: 1});
t.ensureIndex({"obj.a": 1, "obj.b": 1});
var plan = t.find({"obj.a": 1}, {obj: 1}).explain();
-assert( !isIndexOnly(plan.queryPlanner.winningPlan),
- "Shouldnt use index when introspecting object");
+assert(!isIndexOnly(plan.queryPlanner.winningPlan), "Shouldnt use index when introspecting object");
assert(t.validate().valid);
-
diff --git a/jstests/core/coveredIndex2.js b/jstests/core/coveredIndex2.js
index 0f26037bf43..f7b542008f3 100644
--- a/jstests/core/coveredIndex2.js
+++ b/jstests/core/coveredIndex2.js
@@ -6,23 +6,21 @@ load("jstests/libs/analyze_plan.js");
t.save({a: 1});
t.save({a: 2});
-assert.eq( t.findOne({a: 1}).a, 1, "Cannot find right record" );
-assert.eq( t.count(), 2, "Not right length" );
+assert.eq(t.findOne({a: 1}).a, 1, "Cannot find right record");
+assert.eq(t.count(), 2, "Not right length");
// use simple index
t.ensureIndex({a: 1});
-var plan = t.find({a:1}).explain();
-assert( !isIndexOnly(plan.queryPlanner.winningPlan),
- "Find using covered index but all fields are returned");
-var plan = t.find({a:1}, {a: 1}).explain();
-assert( !isIndexOnly(plan.queryPlanner.winningPlan),
- "Find using covered index but _id is returned");
-var plan = t.find({a:1}, {a: 1, _id: 0}).explain();
-assert( isIndexOnly(plan.queryPlanner.winningPlan),
- "Find is not using covered index");
+var plan = t.find({a: 1}).explain();
+assert(!isIndexOnly(plan.queryPlanner.winningPlan),
+ "Find using covered index but all fields are returned");
+var plan = t.find({a: 1}, {a: 1}).explain();
+assert(!isIndexOnly(plan.queryPlanner.winningPlan), "Find using covered index but _id is returned");
+var plan = t.find({a: 1}, {a: 1, _id: 0}).explain();
+assert(isIndexOnly(plan.queryPlanner.winningPlan), "Find is not using covered index");
// add multikey
-t.save({a:[3,4]});
-var plan = t.find({a:1}, {a: 1, _id: 0}).explain();
-assert( !isIndexOnly(plan.queryPlanner.winningPlan),
- "Find is using covered index even after multikey insert");
+t.save({a: [3, 4]});
+var plan = t.find({a: 1}, {a: 1, _id: 0}).explain();
+assert(!isIndexOnly(plan.queryPlanner.winningPlan),
+ "Find is using covered index even after multikey insert");
diff --git a/jstests/core/coveredIndex3.js b/jstests/core/coveredIndex3.js
index 66180342605..4bfedda888b 100644
--- a/jstests/core/coveredIndex3.js
+++ b/jstests/core/coveredIndex3.js
@@ -1,54 +1,49 @@
// Check proper covered index handling when query and processGetMore yield.
// SERVER-4975
-if ( 0 ) { // SERVER-4975
-
-t = db.jstests_coveredIndex3;
-t2 = db.jstests_coveredIndex3_other;
-t.drop();
-t2.drop();
-
-function doTest( batchSize ) {
-
- // Insert an array, which will make the { a:1 } index multikey and should disable covered index
- // matching.
- p1 = startParallelShell(
- 'for( i = 0; i < 60; ++i ) { \
+if (0) { // SERVER-4975
+
+ t = db.jstests_coveredIndex3;
+ t2 = db.jstests_coveredIndex3_other;
+ t.drop();
+ t2.drop();
+
+ function doTest(batchSize) {
+ // Insert an array, which will make the { a:1 } index multikey and should disable covered
+ // index
+ // matching.
+ p1 = startParallelShell('for( i = 0; i < 60; ++i ) { \
db.jstests_coveredIndex3.save( { a:[ 2000, 2001 ] } ); \
sleep( 300 ); \
- }'
- );
+ }');
- // Frequent writes cause the find operation to yield.
- p2 = startParallelShell(
- 'for( i = 0; i < 1800; ++i ) { \
+ // Frequent writes cause the find operation to yield.
+ p2 = startParallelShell('for( i = 0; i < 1800; ++i ) { \
db.jstests_coveredIndex3_other.save( {} ); \
sleep( 10 ); \
- }'
- );
-
- for( i = 0; i < 30; ++i ) {
- t.drop();
- t.ensureIndex( { a:1 } );
-
- for( j = 0; j < 1000; ++j ) {
- t.save( { a:j } );
+ }');
+
+ for (i = 0; i < 30; ++i) {
+ t.drop();
+ t.ensureIndex({a: 1});
+
+ for (j = 0; j < 1000; ++j) {
+ t.save({a: j});
+ }
+
+ c = t.find({}, {_id: 0, a: 1}).hint({a: 1}).batchSize(batchSize);
+ while (c.hasNext()) {
+ o = c.next();
+ // If o contains a high numeric 'a' value, it must come from an array saved in p1.
+ assert(!(o.a > 1500), 'improper object returned ' + tojson(o));
+ }
}
-
- c = t.find( {}, { _id:0, a:1 } ).hint( { a:1 } ).batchSize( batchSize );
- while( c.hasNext() ) {
- o = c.next();
- // If o contains a high numeric 'a' value, it must come from an array saved in p1.
- assert( !( o.a > 1500 ), 'improper object returned ' + tojson( o ) );
- }
- }
- p1();
- p2();
-
-}
-
-doTest( 2000 ); // Test query.
-doTest( 500 ); // Try to test getMore - not clear if this will actually trigger the getMore issue.
+ p1();
+ p2();
+ }
+ doTest(2000); // Test query.
+ doTest(
+ 500); // Try to test getMore - not clear if this will actually trigger the getMore issue.
}
diff --git a/jstests/core/coveredIndex4.js b/jstests/core/coveredIndex4.js
index 136eba603cf..e7c6cc93a76 100644
--- a/jstests/core/coveredIndex4.js
+++ b/jstests/core/coveredIndex4.js
@@ -4,37 +4,36 @@
t = db.jstests_coveredIndex4;
t.drop();
-t.ensureIndex( { a:1 } );
-t.ensureIndex( { b:1 } );
+t.ensureIndex({a: 1});
+t.ensureIndex({b: 1});
orClause = [];
-for( i = 0; i < 200; ++i ) {
- if ( i % 2 == 0 ) {
- t.save( { a:i } );
- orClause.push( { a:i } );
- }
- else {
- t.save( { b:i } );
- orClause.push( { b:i } );
+for (i = 0; i < 200; ++i) {
+ if (i % 2 == 0) {
+ t.save({a: i});
+ orClause.push({a: i});
+ } else {
+ t.save({b: i});
+ orClause.push({b: i});
}
}
-c = t.find( { $or:orClause }, { _id:0, a:1 } );
+c = t.find({$or: orClause}, {_id: 0, a: 1});
// No odd values of a were saved, so we should not see any in the results.
-while( c.hasNext() ) {
+while (c.hasNext()) {
o = c.next();
- if ( o.a ) {
- assert.eq( 0, o.a % 2, 'unexpected result: ' + tojson( o ) );
+ if (o.a) {
+ assert.eq(0, o.a % 2, 'unexpected result: ' + tojson(o));
}
}
-c = t.find( { $or:orClause }, { _id:0, b:1 } );
+c = t.find({$or: orClause}, {_id: 0, b: 1});
// No even values of b were saved, so we should not see any in the results.
-while( c.hasNext() ) {
+while (c.hasNext()) {
o = c.next();
- if ( o.b ) {
- assert.eq( 1, o.b % 2, 'unexpected result: ' + tojson( o ) );
+ if (o.b) {
+ assert.eq(1, o.b % 2, 'unexpected result: ' + tojson(o));
}
}
diff --git a/jstests/core/covered_index_compound_1.js b/jstests/core/covered_index_compound_1.js
index ffe01646ba7..45f17fd7d44 100644
--- a/jstests/core/covered_index_compound_1.js
+++ b/jstests/core/covered_index_compound_1.js
@@ -5,70 +5,77 @@ load("jstests/libs/analyze_plan.js");
var coll = db.getCollection("covered_compound_1");
coll.drop();
-for (i=0;i<100;i++) {
- coll.insert({a:i, b:"strvar_"+(i%13), c:NumberInt(i%10)});
+for (i = 0; i < 100; i++) {
+ coll.insert({a: i, b: "strvar_" + (i % 13), c: NumberInt(i % 10)});
}
-coll.ensureIndex({a:1,b:-1,c:1});
+coll.ensureIndex({a: 1, b: -1, c: 1});
// Test equality - all indexed fields queried and projected
-var plan = coll.find({a:10, b:"strvar_10", c:0}, {a:1, b:1, c:1, _id:0})
- .hint({a:1, b:-1, c:1})
+var plan = coll.find({a: 10, b: "strvar_10", c: 0}, {a: 1, b: 1, c: 1, _id: 0})
+ .hint({a: 1, b: -1, c: 1})
.explain("executionStats");
assert(isIndexOnly(plan.queryPlanner.winningPlan),
- "compound.1.1 - indexOnly should be true on covered query");
-assert.eq(0, plan.executionStats.totalDocsExamined,
+ "compound.1.1 - indexOnly should be true on covered query");
+assert.eq(0,
+ plan.executionStats.totalDocsExamined,
"compound.1.1 - nscannedObjects should be 0 for covered query");
// Test query on subset of fields queried and project all
-var plan = coll.find({a:26, b:"strvar_0"}, {a:1, b:1, c:1, _id:0})
- .hint({a:1, b:-1, c:1})
+var plan = coll.find({a: 26, b: "strvar_0"}, {a: 1, b: 1, c: 1, _id: 0})
+ .hint({a: 1, b: -1, c: 1})
.explain("executionStats");
assert(isIndexOnly(plan.queryPlanner.winningPlan),
"compound.1.2 - indexOnly should be true on covered query");
-assert.eq(0, plan.executionStats.totalDocsExamined,
+assert.eq(0,
+ plan.executionStats.totalDocsExamined,
"compound.1.2 - nscannedObjects should be 0 for covered query");
// Test query on all fields queried and project subset
-var plan = coll.find({a:38, b:"strvar_12", c: 8}, {b:1, c:1, _id:0})
- .hint({a:1, b:-1, c:1})
+var plan = coll.find({a: 38, b: "strvar_12", c: 8}, {b: 1, c: 1, _id: 0})
+ .hint({a: 1, b: -1, c: 1})
.explain("executionStats");
assert(isIndexOnly(plan.queryPlanner.winningPlan),
"compound.1.3 - indexOnly should be true on covered query");
-assert.eq(0, plan.executionStats.totalDocsExamined,
+assert.eq(0,
+ plan.executionStats.totalDocsExamined,
"compound.1.3 - nscannedObjects should be 0 for covered query");
// Test no query
-var plan = coll.find({}, {b:1, c:1, _id:0}).hint({a:1, b:-1, c:1}).explain("executionStats");
+var plan = coll.find({}, {b: 1, c: 1, _id: 0}).hint({a: 1, b: -1, c: 1}).explain("executionStats");
assert(isIndexOnly(plan.queryPlanner.winningPlan),
"compound.1.4 - indexOnly should be true on covered query");
-assert.eq(0, plan.executionStats.totalDocsExamined,
+assert.eq(0,
+ plan.executionStats.totalDocsExamined,
"compound.1.4 - nscannedObjects should be 0 for covered query");
// Test range query
-var plan = coll.find({a:{$gt:25,$lt:43}}, {b:1, c:1, _id:0})
- .hint({a:1, b:-1, c:1})
+var plan = coll.find({a: {$gt: 25, $lt: 43}}, {b: 1, c: 1, _id: 0})
+ .hint({a: 1, b: -1, c: 1})
.explain("executionStats");
assert(isIndexOnly(plan.queryPlanner.winningPlan),
"compound.1.5 - indexOnly should be true on covered query");
-assert.eq(0, plan.executionStats.totalDocsExamined,
+assert.eq(0,
+ plan.executionStats.totalDocsExamined,
"compound.1.5 - nscannedObjects should be 0 for covered query");
// Test in query
-var plan = coll.find({a:38, b:"strvar_12", c:{$in:[5,8]}}, {b:1, c:1, _id:0})
- .hint({a:1, b:-1, c:1})
+var plan = coll.find({a: 38, b: "strvar_12", c: {$in: [5, 8]}}, {b: 1, c: 1, _id: 0})
+ .hint({a: 1, b: -1, c: 1})
.explain("executionStats");
assert(isIndexOnly(plan.queryPlanner.winningPlan),
"compound.1.6 - indexOnly should be true on covered query");
-assert.eq(0, plan.executionStats.totalDocsExamined,
+assert.eq(0,
+ plan.executionStats.totalDocsExamined,
"compound.1.6 - nscannedObjects should be 0 for covered query");
// Test no result
-var plan = coll.find({a:38, b:"strvar_12", c:55},{a:1, b:1, c:1, _id:0})
- .hint({a:1, b:-1, c:1})
+var plan = coll.find({a: 38, b: "strvar_12", c: 55}, {a: 1, b: 1, c: 1, _id: 0})
+ .hint({a: 1, b: -1, c: 1})
.explain("executionStats");
assert(isIndexOnly(plan.queryPlanner.winningPlan),
"compound.1.7 - indexOnly should be true on covered query");
-assert.eq(0, plan.executionStats.totalDocsExamined,
+assert.eq(0,
+ plan.executionStats.totalDocsExamined,
"compound.1.7 - nscannedObjects should be 0 for covered query");
print('all tests passed');
diff --git a/jstests/core/covered_index_negative_1.js b/jstests/core/covered_index_negative_1.js
index 8afd03f2a58..37a9b4dc3bb 100644
--- a/jstests/core/covered_index_negative_1.js
+++ b/jstests/core/covered_index_negative_1.js
@@ -8,72 +8,84 @@ load("jstests/libs/analyze_plan.js");
var coll = db.getCollection("covered_negative_1");
coll.drop();
-for (i=0;i<100;i++) {
- coll.insert({a:i, b:"strvar_"+(i%13), c:NumberInt(i%10), d: i*10, e: [i, i%10],
- f:i});
+for (i = 0; i < 100; i++) {
+ coll.insert(
+ {a: i, b: "strvar_" + (i % 13), c: NumberInt(i % 10), d: i * 10, e: [i, i % 10], f: i});
}
-coll.ensureIndex({a:1,b:-1,c:1});
-coll.ensureIndex({e:1});
-coll.ensureIndex({d:1});
-coll.ensureIndex({f:"hashed"});
+coll.ensureIndex({a: 1, b: -1, c: 1});
+coll.ensureIndex({e: 1});
+coll.ensureIndex({d: 1});
+coll.ensureIndex({f: "hashed"});
// Test no projection
-var plan = coll.find({a:10, b:"strvar_10", c:0}).hint({a:1, b:-1, c:1}).explain("executionStats");
+var plan =
+ coll.find({a: 10, b: "strvar_10", c: 0}).hint({a: 1, b: -1, c: 1}).explain("executionStats");
assert(!isIndexOnly(plan.queryPlanner.winningPlan),
"negative.1.1 - indexOnly should be false on a non covered query");
-assert.neq(0, plan.executionStats.totalDocsExamined,
+assert.neq(0,
+ plan.executionStats.totalDocsExamined,
"negative.1.1 - docs examined should not be 0 for a non covered query");
// Test projection and not excluding _id
-var plan = coll.find({a:10, b:"strvar_10", c:0},{a:1, b:1, c:1})
- .hint({a:1, b:-1, c:1})
+var plan = coll.find({a: 10, b: "strvar_10", c: 0}, {a: 1, b: 1, c: 1})
+ .hint({a: 1, b: -1, c: 1})
.explain("executionStats");
assert(!isIndexOnly(plan.queryPlanner.winningPlan),
"negative.1.2 - indexOnly should be false on a non covered query");
-assert.neq(0, plan.executionStats.totalDocsExamined,
+assert.neq(0,
+ plan.executionStats.totalDocsExamined,
"negative.1.2 - docs examined should not be 0 for a non covered query");
// Test projection of non-indexed field
-var plan = coll.find({d:100},{d:1, c:1, _id:0}).hint({d:1}).explain("executionStats");
+var plan = coll.find({d: 100}, {d: 1, c: 1, _id: 0}).hint({d: 1}).explain("executionStats");
assert(!isIndexOnly(plan.queryPlanner.winningPlan),
"negative.1.3 - indexOnly should be false on a non covered query");
-assert.neq(0, plan.executionStats.totalDocsExamined,
+assert.neq(0,
+ plan.executionStats.totalDocsExamined,
"negative.1.3 - docs examined should not be 0 for a non covered query");
// Test query and projection on a multi-key index
-var plan = coll.find({e:99},{e:1, _id:0}).hint({e:1}).explain("executionStats");
+var plan = coll.find({e: 99}, {e: 1, _id: 0}).hint({e: 1}).explain("executionStats");
assert(!isIndexOnly(plan.queryPlanner.winningPlan),
"negative.1.4 - indexOnly should be false on a non covered query");
-assert.neq(0, plan.executionStats.totalDocsExamined,
+assert.neq(0,
+ plan.executionStats.totalDocsExamined,
"negative.1.4 - docs examined should not be 0 for a non covered query");
// Commenting out negative.1.5 and 1.6 pending fix in SERVER-8650
// // Test projection and $natural sort
-// var plan = coll.find({a:{$gt:70}},{a:1, b:1, c:1, _id:0}).sort({$natural:1}).hint({a:1, b:-1, c:1}).explain()
+// var plan = coll.find({a:{$gt:70}},{a:1, b:1, c:1, _id:0}).sort({$natural:1}).hint({a:1, b:-1,
+// c:1}).explain()
// // indexOnly should be false but is not due to bug https://jira.mongodb.org/browse/SERVER-8561
-// assert.eq(true, plan.indexOnly, "negative.1.5 - indexOnly should be false on a non covered query")
-// assert.neq(0, plan.nscannedObjects, "negative.1.5 - nscannedObjects should not be 0 for a non covered query")
+// assert.eq(true, plan.indexOnly, "negative.1.5 - indexOnly should be false on a non covered
+// query")
+// assert.neq(0, plan.nscannedObjects, "negative.1.5 - nscannedObjects should not be 0 for a non
+// covered query")
// // Test sort on non-indexed field
// var plan = coll.find({d:{$lt:1000}},{d:1, _id:0}).sort({c:1}).hint({d:1}).explain()
// //indexOnly should be false but is not due to bug https://jira.mongodb.org/browse/SERVER-8562
-// assert.eq(true, plan.indexOnly, "negative.1.6 - indexOnly should be false on a non covered query")
-// assert.neq(0, plan.nscannedObjects, "negative.1.6 - nscannedObjects should not be 0 for a non covered query")
+// assert.eq(true, plan.indexOnly, "negative.1.6 - indexOnly should be false on a non covered
+// query")
+// assert.neq(0, plan.nscannedObjects, "negative.1.6 - nscannedObjects should not be 0 for a non
+// covered query")
// Test query on non-indexed field
-var plan = coll.find({d:{$lt:1000}},{a:1, b:1, c:1, _id:0})
- .hint({a:1, b:-1, c:1})
+var plan = coll.find({d: {$lt: 1000}}, {a: 1, b: 1, c: 1, _id: 0})
+ .hint({a: 1, b: -1, c: 1})
.explain("executionStats");
assert(!isIndexOnly(plan.queryPlanner.winningPlan),
"negative.1.7 - indexOnly should be false on a non covered query");
-assert.neq(0, plan.executionStats.totalDocsExamined,
+assert.neq(0,
+ plan.executionStats.totalDocsExamined,
"negative.1.7 - docs examined should not be 0 for a non covered query");
// Test query on hashed indexed field
-var plan = coll.find({f:10},{f:1, _id:0}).hint({f:"hashed"}).explain("executionStats");
+var plan = coll.find({f: 10}, {f: 1, _id: 0}).hint({f: "hashed"}).explain("executionStats");
assert(!isIndexOnly(plan.queryPlanner.winningPlan),
"negative.1.8 - indexOnly should be false on a non covered query");
-assert.neq(0, plan.executionStats.totalDocsExamined,
+assert.neq(0,
+ plan.executionStats.totalDocsExamined,
"negative.1.8 - nscannedObjects should not be 0 for a non covered query");
print('all tests passed');
diff --git a/jstests/core/covered_index_simple_1.js b/jstests/core/covered_index_simple_1.js
index 25badb1601f..3827ef4acfc 100644
--- a/jstests/core/covered_index_simple_1.js
+++ b/jstests/core/covered_index_simple_1.js
@@ -5,67 +5,76 @@ load("jstests/libs/analyze_plan.js");
var coll = db.getCollection("covered_simple_1");
coll.drop();
-for (i=0;i<10;i++) {
- coll.insert({foo:i});
+for (i = 0; i < 10; i++) {
+ coll.insert({foo: i});
}
-for (i=0;i<10;i++) {
- coll.insert({foo:i});
+for (i = 0; i < 10; i++) {
+ coll.insert({foo: i});
}
-for (i=0;i<5;i++) {
- coll.insert({bar:i});
+for (i = 0; i < 5; i++) {
+ coll.insert({bar: i});
}
-coll.insert({foo:"string"});
-coll.insert({foo:{bar:1}});
-coll.insert({foo:null});
-coll.ensureIndex({foo:1});
+coll.insert({foo: "string"});
+coll.insert({foo: {bar: 1}});
+coll.insert({foo: null});
+coll.ensureIndex({foo: 1});
// Test equality with int value
-var plan = coll.find({foo:1}, {foo:1, _id:0}).hint({foo:1}).explain("executionStats");
+var plan = coll.find({foo: 1}, {foo: 1, _id: 0}).hint({foo: 1}).explain("executionStats");
assert(isIndexOnly(plan.queryPlanner.winningPlan),
"simple.1.1 - indexOnly should be true on covered query");
-assert.eq(0, plan.executionStats.totalDocsExamined,
+assert.eq(0,
+ plan.executionStats.totalDocsExamined,
"simple.1.1 - docs examined should be 0 for covered query");
// Test equality with string value
-var plan = coll.find({foo:"string"}, {foo:1, _id:0}).hint({foo:1}).explain("executionStats");
+var plan = coll.find({foo: "string"}, {foo: 1, _id: 0}).hint({foo: 1}).explain("executionStats");
assert(isIndexOnly(plan.queryPlanner.winningPlan),
"simple.1.2 - indexOnly should be true on covered query");
-assert.eq(0, plan.executionStats.totalDocsExamined,
+assert.eq(0,
+ plan.executionStats.totalDocsExamined,
"simple.1.2 - docs examined should be 0 for covered query");
// Test equality with doc value
-var plan = coll.find({foo:{bar:1}}, {foo:1, _id:0}).hint({foo:1}).explain("executionStats");
+var plan = coll.find({foo: {bar: 1}}, {foo: 1, _id: 0}).hint({foo: 1}).explain("executionStats");
assert(isIndexOnly(plan.queryPlanner.winningPlan),
"simple.1.3 - indexOnly should be true on covered query");
-assert.eq(0, plan.executionStats.totalDocsExamined,
+assert.eq(0,
+ plan.executionStats.totalDocsExamined,
"simple.1.3 - docs examined should be 0 for covered query");
// Test no query
-var plan = coll.find({}, {foo:1, _id:0}).hint({foo:1}).explain("executionStats");
+var plan = coll.find({}, {foo: 1, _id: 0}).hint({foo: 1}).explain("executionStats");
assert(isIndexOnly(plan.queryPlanner.winningPlan),
"simple.1.4 - indexOnly should be true on covered query");
-assert.eq(0, plan.executionStats.totalDocsExamined,
+assert.eq(0,
+ plan.executionStats.totalDocsExamined,
"simple.1.4 - docs examined should be 0 for covered query");
// Test range query
-var plan = coll.find({foo:{$gt:2,$lt:6}}, {foo:1, _id:0}).hint({foo:1}).explain("executionStats");
+var plan =
+ coll.find({foo: {$gt: 2, $lt: 6}}, {foo: 1, _id: 0}).hint({foo: 1}).explain("executionStats");
assert(isIndexOnly(plan.queryPlanner.winningPlan),
"simple.1.5 - indexOnly should be true on covered query");
-assert.eq(0, plan.executionStats.totalDocsExamined,
+assert.eq(0,
+ plan.executionStats.totalDocsExamined,
"simple.1.5 - docs examined should be 0 for covered query");
// Test in query
-var plan = coll.find({foo:{$in:[5,8]}}, {foo:1, _id:0}).hint({foo:1}).explain("executionStats");
+var plan =
+ coll.find({foo: {$in: [5, 8]}}, {foo: 1, _id: 0}).hint({foo: 1}).explain("executionStats");
assert(isIndexOnly(plan.queryPlanner.winningPlan),
"simple.1.6 - indexOnly should be true on covered query");
-assert.eq(0, plan.executionStats.totalDocsExamined,
+assert.eq(0,
+ plan.executionStats.totalDocsExamined,
"simple.1.6 - docs examined should be 0 for covered query");
// Test no return
-var plan = coll.find({foo:"2"}, {foo:1, _id:0}).hint({foo:1}).explain("executionStats");
+var plan = coll.find({foo: "2"}, {foo: 1, _id: 0}).hint({foo: 1}).explain("executionStats");
assert(isIndexOnly(plan.queryPlanner.winningPlan),
"simple.1.7 - indexOnly should be true on covered query");
-assert.eq(0, plan.executionStats.totalDocsExamined,
+assert.eq(0,
+ plan.executionStats.totalDocsExamined,
"simple.1.7 - nscannedObjects should be 0 for covered query");
-print ('all tests pass');
+print('all tests pass');
diff --git a/jstests/core/covered_index_simple_2.js b/jstests/core/covered_index_simple_2.js
index f666a9eb4ae..0c947849703 100644
--- a/jstests/core/covered_index_simple_2.js
+++ b/jstests/core/covered_index_simple_2.js
@@ -5,54 +5,62 @@ load("jstests/libs/analyze_plan.js");
var coll = db.getCollection("covered_simple_2");
coll.drop();
-for (i=0;i<10;i++) {
- coll.insert({foo:i});
+for (i = 0; i < 10; i++) {
+ coll.insert({foo: i});
}
-coll.insert({foo:"string"});
-coll.insert({foo:{bar:1}});
-coll.insert({foo:null});
-coll.ensureIndex({foo:1},{unique:true});
+coll.insert({foo: "string"});
+coll.insert({foo: {bar: 1}});
+coll.insert({foo: null});
+coll.ensureIndex({foo: 1}, {unique: true});
// Test equality with int value
-var plan = coll.find({foo:1}, {foo:1, _id:0}).hint({foo:1}).explain("executionStats");
+var plan = coll.find({foo: 1}, {foo: 1, _id: 0}).hint({foo: 1}).explain("executionStats");
assert(isIndexOnly(plan.queryPlanner.winningPlan),
"simple.2.1 - indexOnly should be true on covered query");
-assert.eq(0, plan.executionStats.totalDocsExamined,
+assert.eq(0,
+ plan.executionStats.totalDocsExamined,
"simple.2.1 - docs examined should be 0 for covered query");
// Test equality with string value
-var plan = coll.find({foo:"string"}, {foo:1, _id:0}).hint({foo:1}).explain("executionStats");
+var plan = coll.find({foo: "string"}, {foo: 1, _id: 0}).hint({foo: 1}).explain("executionStats");
assert(isIndexOnly(plan.queryPlanner.winningPlan),
"simple.2.2 - indexOnly should be true on covered query");
-assert.eq(0, plan.executionStats.totalDocsExamined,
+assert.eq(0,
+ plan.executionStats.totalDocsExamined,
"simple.2.2 - docs examined should be 0 for covered query");
// Test equality with int value on a dotted field
-var plan = coll.find({foo:{bar:1}}, {foo:1, _id:0}).hint({foo:1}).explain("executionStats");
+var plan = coll.find({foo: {bar: 1}}, {foo: 1, _id: 0}).hint({foo: 1}).explain("executionStats");
assert(isIndexOnly(plan.queryPlanner.winningPlan),
"simple.2.3 - indexOnly should be true on covered query");
-assert.eq(0, plan.executionStats.totalDocsExamined,
+assert.eq(0,
+ plan.executionStats.totalDocsExamined,
"simple.2.3 - docs examined should be 0 for covered query");
// Test no query
-var plan = coll.find({}, {foo:1, _id:0}).hint({foo:1}).explain("executionStats");
+var plan = coll.find({}, {foo: 1, _id: 0}).hint({foo: 1}).explain("executionStats");
assert(isIndexOnly(plan.queryPlanner.winningPlan),
"simple.2.4 - indexOnly should be true on covered query");
-assert.eq(0, plan.executionStats.totalDocsExamined,
+assert.eq(0,
+ plan.executionStats.totalDocsExamined,
"simple.2.4 - docs examined should be 0 for covered query");
// Test range query
-var plan = coll.find({foo:{$gt:2,$lt:6}}, {foo:1, _id:0}).hint({foo:1}).explain("executionStats");
+var plan =
+ coll.find({foo: {$gt: 2, $lt: 6}}, {foo: 1, _id: 0}).hint({foo: 1}).explain("executionStats");
assert(isIndexOnly(plan.queryPlanner.winningPlan),
"simple.2.5 - indexOnly should be true on covered query");
-assert.eq(0, plan.executionStats.totalDocsExamined,
+assert.eq(0,
+ plan.executionStats.totalDocsExamined,
"simple.2.5 - docs examined should be 0 for covered query");
// Test in query
-var plan = coll.find({foo:{$in:[5,8]}}, {foo:1, _id:0}).hint({foo:1}).explain("executionStats");
+var plan =
+ coll.find({foo: {$in: [5, 8]}}, {foo: 1, _id: 0}).hint({foo: 1}).explain("executionStats");
assert(isIndexOnly(plan.queryPlanner.winningPlan),
"simple.2.6 - indexOnly should be true on covered query");
-assert.eq(0, plan.executionStats.totalDocsExamined,
+assert.eq(0,
+ plan.executionStats.totalDocsExamined,
"simple.2.6 - docs examined should be 0 for covered query");
-print ('all tests pass');
+print('all tests pass');
diff --git a/jstests/core/covered_index_simple_3.js b/jstests/core/covered_index_simple_3.js
index 5338f4bd782..e445396c4c8 100644
--- a/jstests/core/covered_index_simple_3.js
+++ b/jstests/core/covered_index_simple_3.js
@@ -5,73 +5,85 @@ load("jstests/libs/analyze_plan.js");
var coll = db.getCollection("covered_simple_3");
coll.drop();
-for (i=0;i<10;i++) {
- coll.insert({foo:i});
+for (i = 0; i < 10; i++) {
+ coll.insert({foo: i});
}
-for (i=0;i<5;i++) {
- coll.insert({bar:i});
+for (i = 0; i < 5; i++) {
+ coll.insert({bar: i});
}
-coll.insert({foo:"string"});
-coll.insert({foo:{bar:1}});
-coll.insert({foo:null});
-coll.ensureIndex({foo:1}, {sparse:true, unique:true});
+coll.insert({foo: "string"});
+coll.insert({foo: {bar: 1}});
+coll.insert({foo: null});
+coll.ensureIndex({foo: 1}, {sparse: true, unique: true});
// Test equality with int value
-var plan = coll.find({foo:1}, {foo:1, _id:0}).hint({foo:1}).explain("executionStats");
+var plan = coll.find({foo: 1}, {foo: 1, _id: 0}).hint({foo: 1}).explain("executionStats");
assert(isIndexOnly(plan.queryPlanner.winningPlan),
"simple.3.1 - indexOnly should be true on covered query");
-assert.eq(0, plan.executionStats.totalDocsExamined,
+assert.eq(0,
+ plan.executionStats.totalDocsExamined,
"simple.3.1 - docs examined should be 0 for covered query");
// Test equality with string value
-var plan = coll.find({foo:"string"}, {foo:1, _id:0}).hint({foo:1}).explain("executionStats");
+var plan = coll.find({foo: "string"}, {foo: 1, _id: 0}).hint({foo: 1}).explain("executionStats");
assert(isIndexOnly(plan.queryPlanner.winningPlan),
"simple.3.2 - indexOnly should be true on covered query");
-assert.eq(0, plan.executionStats.totalDocsExamined,
+assert.eq(0,
+ plan.executionStats.totalDocsExamined,
"simple.3.2 - docs examined should be 0 for covered query");
// Test equality with int value on a dotted field
-var plan = coll.find({foo:{bar:1}}, {foo:1, _id:0}).hint({foo:1}).explain("executionStats");
+var plan = coll.find({foo: {bar: 1}}, {foo: 1, _id: 0}).hint({foo: 1}).explain("executionStats");
assert(isIndexOnly(plan.queryPlanner.winningPlan),
"simple.3.3 - indexOnly should be true on covered query");
-assert.eq(0, plan.executionStats.totalDocsExamined,
+assert.eq(0,
+ plan.executionStats.totalDocsExamined,
"simple.3.3 - docs examined should be 0 for covered query");
// Test no query
-var plan = coll.find({}, {foo:1, _id:0}).hint({foo:1}).explain("executionStats");
+var plan = coll.find({}, {foo: 1, _id: 0}).hint({foo: 1}).explain("executionStats");
assert(isIndexOnly(plan.queryPlanner.winningPlan),
"simple.3.4 - indexOnly should be true on covered query");
-assert.eq(0, plan.executionStats.totalDocsExamined,
+assert.eq(0,
+ plan.executionStats.totalDocsExamined,
"simple.3.4 - docs examined should be 0 for covered query");
// Test range query
-var plan = coll.find({foo:{$gt:2,$lt:6}}, {foo:1, _id:0}).hint({foo:1}).explain("executionStats");
+var plan =
+ coll.find({foo: {$gt: 2, $lt: 6}}, {foo: 1, _id: 0}).hint({foo: 1}).explain("executionStats");
assert(isIndexOnly(plan.queryPlanner.winningPlan),
"simple.3.5 - indexOnly should be true on covered query");
-assert.eq(0, plan.executionStats.totalDocsExamined,
+assert.eq(0,
+ plan.executionStats.totalDocsExamined,
"simple.3.5 - docs examined should be 0 for covered query");
// Test in query
-var plan = coll.find({foo:{$in:[5,8]}}, {foo:1, _id:0}).hint({foo:1}).explain("executionStats");
+var plan =
+ coll.find({foo: {$in: [5, 8]}}, {foo: 1, _id: 0}).hint({foo: 1}).explain("executionStats");
assert(isIndexOnly(plan.queryPlanner.winningPlan),
"simple.3.6 - indexOnly should be true on covered query");
-assert.eq(0, plan.executionStats.totalDocsExamined,
+assert.eq(0,
+ plan.executionStats.totalDocsExamined,
"simple.3.6 - docs examined should be 0 for covered query");
// Test $exists true
-var plan = coll.find({foo:{$exists:true}}, {foo:1, _id:0}).hint({foo:1}).explain("executionStats");
+var plan =
+ coll.find({foo: {$exists: true}}, {foo: 1, _id: 0}).hint({foo: 1}).explain("executionStats");
assert(isIndexOnly(plan.queryPlanner.winningPlan),
"simple.3.7 - indexOnly should be true on covered query");
-assert.eq(0, plan.executionStats.totalDocsExamined,
+assert.eq(0,
+ plan.executionStats.totalDocsExamined,
"simple.3.7 - docs examined should be 0 for covered query");
// Check that $nin can be covered.
coll.dropIndexes();
coll.ensureIndex({bar: 1});
-var plan = coll.find({bar:{$nin:[5,8]}}, {bar:1, _id:0}).hint({bar:1}).explain("executionStats");
+var plan =
+ coll.find({bar: {$nin: [5, 8]}}, {bar: 1, _id: 0}).hint({bar: 1}).explain("executionStats");
assert(isIndexOnly(plan.queryPlanner.winningPlan),
"simple.3.8 - indexOnly should be true on covered query");
-assert.eq(0, plan.executionStats.totalDocsExamined,
+assert.eq(0,
+ plan.executionStats.totalDocsExamined,
"simple.3.8 - docs examined should be 0 for covered query");
-print ('all tests pass');
+print('all tests pass');
diff --git a/jstests/core/covered_index_simple_id.js b/jstests/core/covered_index_simple_id.js
index d6db2c3149f..c2550544abd 100644
--- a/jstests/core/covered_index_simple_id.js
+++ b/jstests/core/covered_index_simple_id.js
@@ -5,53 +5,59 @@ load("jstests/libs/analyze_plan.js");
var coll = db.getCollection("covered_simple_id");
coll.drop();
-for (i=0;i<10;i++) {
- coll.insert({_id:i});
+for (i = 0; i < 10; i++) {
+ coll.insert({_id: i});
}
-coll.insert({_id:"string"});
-coll.insert({_id:{bar:1}});
-coll.insert({_id:null});
+coll.insert({_id: "string"});
+coll.insert({_id: {bar: 1}});
+coll.insert({_id: null});
// Test equality with int value
-var plan = coll.find({_id:1}, {_id:1}).hint({_id:1}).explain("executionStats");
+var plan = coll.find({_id: 1}, {_id: 1}).hint({_id: 1}).explain("executionStats");
assert(isIndexOnly(plan.queryPlanner.winningPlan),
"simple.id.1 - indexOnly should be true on covered query");
-assert.eq(0, plan.executionStats.totalDocsExamined,
+assert.eq(0,
+ plan.executionStats.totalDocsExamined,
"simple.id.1 - docs examined should be 0 for covered query");
// Test equality with string value
-var plan = coll.find({_id:"string"}, {_id:1}).hint({_id:1}).explain("executionStats");
+var plan = coll.find({_id: "string"}, {_id: 1}).hint({_id: 1}).explain("executionStats");
assert(isIndexOnly(plan.queryPlanner.winningPlan),
"simple.id.2 - indexOnly should be true on covered query");
-assert.eq(0, plan.executionStats.totalDocsExamined,
+assert.eq(0,
+ plan.executionStats.totalDocsExamined,
"simple.id.2 - docs examined should be 0 for covered query");
// Test equality with int value on a dotted field
-var plan = coll.find({_id:{bar:1}}, {_id:1}).hint({_id:1}).explain("executionStats");
+var plan = coll.find({_id: {bar: 1}}, {_id: 1}).hint({_id: 1}).explain("executionStats");
assert(isIndexOnly(plan.queryPlanner.winningPlan),
"simple.id.3 - indexOnly should be true on covered query");
-assert.eq(0, plan.executionStats.totalDocsExamined,
+assert.eq(0,
+ plan.executionStats.totalDocsExamined,
"simple.id.3 - docs examined should be 0 for covered query");
// Test no query
-var plan = coll.find({}, {_id:1}).hint({_id:1}).explain("executionStats");
+var plan = coll.find({}, {_id: 1}).hint({_id: 1}).explain("executionStats");
assert(isIndexOnly(plan.queryPlanner.winningPlan),
"simple.id.4 - indexOnly should be true on covered query");
-assert.eq(0, plan.executionStats.totalDocsExamined,
+assert.eq(0,
+ plan.executionStats.totalDocsExamined,
"simple.id.4 - docs examined should be 0 for covered query");
// Test range query
-var plan = coll.find({_id:{$gt:2,$lt:6}}, {_id:1}).hint({_id:1}).explain("executionStats");
+var plan = coll.find({_id: {$gt: 2, $lt: 6}}, {_id: 1}).hint({_id: 1}).explain("executionStats");
assert(isIndexOnly(plan.queryPlanner.winningPlan),
"simple.id.5 - indexOnly should be true on covered query");
-assert.eq(0, plan.executionStats.totalDocsExamined,
+assert.eq(0,
+ plan.executionStats.totalDocsExamined,
"simple.id.5 - docs examined should be 0 for covered query");
// Test in query
-var plan = coll.find({_id:{$in:[5,8]}}, {_id:1}).hint({_id:1}).explain("executionStats");
+var plan = coll.find({_id: {$in: [5, 8]}}, {_id: 1}).hint({_id: 1}).explain("executionStats");
assert(isIndexOnly(plan.queryPlanner.winningPlan),
"simple.id.6 - indexOnly should be true on covered query");
-assert.eq(0, plan.executionStats.totalDocsExamined,
+assert.eq(0,
+ plan.executionStats.totalDocsExamined,
"simple.id.6 - docs examined should be 0 for covered query");
-print ('all tests pass');
+print('all tests pass');
diff --git a/jstests/core/covered_index_sort_1.js b/jstests/core/covered_index_sort_1.js
index 3ddd9e7b701..a5984a34f19 100644
--- a/jstests/core/covered_index_sort_1.js
+++ b/jstests/core/covered_index_sort_1.js
@@ -5,41 +5,45 @@ load("jstests/libs/analyze_plan.js");
var coll = db.getCollection("covered_sort_1");
coll.drop();
-for (i=0;i<10;i++) {
- coll.insert({foo:i});
+for (i = 0; i < 10; i++) {
+ coll.insert({foo: i});
}
-for (i=0;i<10;i++) {
- coll.insert({foo:i});
+for (i = 0; i < 10; i++) {
+ coll.insert({foo: i});
}
-for (i=0;i<5;i++) {
- coll.insert({bar:i});
+for (i = 0; i < 5; i++) {
+ coll.insert({bar: i});
}
-coll.insert({foo:"1"});
-coll.insert({foo:{bar:1}});
-coll.insert({foo:null});
-coll.ensureIndex({foo:1});
+coll.insert({foo: "1"});
+coll.insert({foo: {bar: 1}});
+coll.insert({foo: null});
+coll.ensureIndex({foo: 1});
// Test no query and sort ascending
-var plan = coll.find({}, {foo:1, _id:0}).sort({foo:1}).hint({foo:1}).explain("executionStats");
+var plan = coll.find({}, {foo: 1, _id: 0}).sort({foo: 1}).hint({foo: 1}).explain("executionStats");
assert(isIndexOnly(plan.queryPlanner.winningPlan),
"sort.1.1 - indexOnly should be true on covered query");
-assert.eq(0, plan.executionStats.totalDocsExamined,
+assert.eq(0,
+ plan.executionStats.totalDocsExamined,
"sort.1.1 - docs examined should be 0 for covered query");
// Test no query and sort descending
-var plan = coll.find({}, {foo:1, _id:0}).sort({foo:-1}).hint({foo:1}).explain("executionStats");
+var plan = coll.find({}, {foo: 1, _id: 0}).sort({foo: -1}).hint({foo: 1}).explain("executionStats");
assert(isIndexOnly(plan.queryPlanner.winningPlan),
"sort.1.2 - indexOnly should be true on covered query");
-assert.eq(0, plan.executionStats.totalDocsExamined,
+assert.eq(0,
+ plan.executionStats.totalDocsExamined,
"sort.1.2 - docs examined should be 0 for covered query");
// Test range query with sort
-var plan = coll.find({foo:{$gt:2}}, {foo:1, _id:0}).sort({foo:-1})
- .hint({foo:1})
- .explain("executionStats");
+var plan = coll.find({foo: {$gt: 2}}, {foo: 1, _id: 0})
+ .sort({foo: -1})
+ .hint({foo: 1})
+ .explain("executionStats");
assert(isIndexOnly(plan.queryPlanner.winningPlan),
"sort.1.3 - indexOnly should be true on covered query");
-assert.eq(0, plan.executionStats.totalDocsExamined,
+assert.eq(0,
+ plan.executionStats.totalDocsExamined,
"sort.1.3 - docs examined should be 0 for covered query");
-print ('all tests pass');
+print('all tests pass');
diff --git a/jstests/core/covered_index_sort_2.js b/jstests/core/covered_index_sort_2.js
index 75a89ee7618..5ed3dc869c9 100644
--- a/jstests/core/covered_index_sort_2.js
+++ b/jstests/core/covered_index_sort_2.js
@@ -5,18 +5,19 @@ load("jstests/libs/analyze_plan.js");
var coll = db.getCollection("covered_sort_2");
coll.drop();
-for (i=0;i<10;i++) {
- coll.insert({_id:i});
+for (i = 0; i < 10; i++) {
+ coll.insert({_id: i});
}
-coll.insert({_id:"1"});
-coll.insert({_id:{bar:1}});
-coll.insert({_id:null});
+coll.insert({_id: "1"});
+coll.insert({_id: {bar: 1}});
+coll.insert({_id: null});
// Test no query
-var plan = coll.find({}, {_id:1}).sort({_id:-1}).hint({_id:1}).explain("executionStats");
+var plan = coll.find({}, {_id: 1}).sort({_id: -1}).hint({_id: 1}).explain("executionStats");
assert(isIndexOnly(plan.queryPlanner.winningPlan),
"sort.2.1 - indexOnly should be true on covered query");
-assert.eq(0, plan.executionStats.totalDocsExamined,
+assert.eq(0,
+ plan.executionStats.totalDocsExamined,
"sort.2.1 - docs examined should be 0 for covered query");
-print ('all tests pass');
+print('all tests pass');
diff --git a/jstests/core/covered_index_sort_3.js b/jstests/core/covered_index_sort_3.js
index 735b93877ba..1c98fb69be4 100644
--- a/jstests/core/covered_index_sort_3.js
+++ b/jstests/core/covered_index_sort_3.js
@@ -5,19 +5,21 @@ load("jstests/libs/analyze_plan.js");
var coll = db.getCollection("covered_sort_3");
coll.drop();
-for (i=0;i<100;i++) {
- coll.insert({a:i, b:"strvar_"+(i%13), c:NumberInt(i%10)});
+for (i = 0; i < 100; i++) {
+ coll.insert({a: i, b: "strvar_" + (i % 13), c: NumberInt(i % 10)});
}
coll.insert;
-coll.ensureIndex({a:1,b:-1,c:1});
+coll.ensureIndex({a: 1, b: -1, c: 1});
// Test no query, sort on all fields in index order
-var plan = coll.find({}, {b:1, c:1, _id:0}).sort({a:1,b:-1,c:1})
- .hint({a:1, b:-1, c:1})
- .explain("executionStats");
+var plan = coll.find({}, {b: 1, c: 1, _id: 0})
+ .sort({a: 1, b: -1, c: 1})
+ .hint({a: 1, b: -1, c: 1})
+ .explain("executionStats");
assert(isIndexOnly(plan.queryPlanner.winningPlan),
"sort.3.1 - indexOnly should be true on covered query");
-assert.eq(0, plan.executionStats.totalDocsExamined,
+assert.eq(0,
+ plan.executionStats.totalDocsExamined,
"sort.3.1 - docs examined should be 0 for covered query");
-print ('all tests pass');
+print('all tests pass');
diff --git a/jstests/core/create_collection_fail_cleanup.js b/jstests/core/create_collection_fail_cleanup.js
index a1548d35105..1417a54496c 100644
--- a/jstests/core/create_collection_fail_cleanup.js
+++ b/jstests/core/create_collection_fail_cleanup.js
@@ -9,8 +9,7 @@ assert(dbTest.getCollectionNames().length == 0);
var res = dbTest.createCollection("broken", {capped: true, size: -1});
assert.eq(false, res.ok);
-dbTest.getCollectionNames().forEach(
- function(collName) {
- print(collName);
- assert(collName != 'broken');
- });
+dbTest.getCollectionNames().forEach(function(collName) {
+ print(collName);
+ assert(collName != 'broken');
+});
diff --git a/jstests/core/create_indexes.js b/jstests/core/create_indexes.js
index f86208ec3b0..3e069d7478e 100644
--- a/jstests/core/create_indexes.js
+++ b/jstests/core/create_indexes.js
@@ -4,7 +4,8 @@
var isMongos = ("isdbgrid" == db.runCommand("ismaster").msg);
var extractResult = function(obj) {
- if (!isMongos) return obj;
+ if (!isMongos)
+ return obj;
// Sample mongos format:
// {
@@ -36,95 +37,96 @@
// Database does not exist
var collDbNotExist = dbTest.create_indexes_no_db;
- var res = assert.commandWorked(collDbNotExist.runCommand(
- 'createIndexes',
- {indexes: [{key: {x: 1}, name: 'x_1'}]}));
- res = extractResult( res );
- assert( res.createdCollectionAutomatically );
- assert.eq( 1, res.numIndexesBefore );
- assert.eq( 2, res.numIndexesAfter );
+ var res = assert.commandWorked(
+ collDbNotExist.runCommand('createIndexes', {indexes: [{key: {x: 1}, name: 'x_1'}]}));
+ res = extractResult(res);
+ assert(res.createdCollectionAutomatically);
+ assert.eq(1, res.numIndexesBefore);
+ assert.eq(2, res.numIndexesAfter);
assert.isnull(res.note,
'createIndexes.note should not be present in results when adding a new index: ' +
- tojson(res));
+ tojson(res));
// Collection does not exist, but database does
var t = dbTest.create_indexes;
- var res = assert.commandWorked(t.runCommand('createIndexes',
- {indexes: [{key: {x: 1}, name: 'x_1'}]}));
- res = extractResult( res );
- assert( res.createdCollectionAutomatically );
- assert.eq( 1, res.numIndexesBefore );
- assert.eq( 2, res.numIndexesAfter );
+ var res = assert.commandWorked(
+ t.runCommand('createIndexes', {indexes: [{key: {x: 1}, name: 'x_1'}]}));
+ res = extractResult(res);
+ assert(res.createdCollectionAutomatically);
+ assert.eq(1, res.numIndexesBefore);
+ assert.eq(2, res.numIndexesAfter);
assert.isnull(res.note,
'createIndexes.note should not be present in results when adding a new index: ' +
- tojson(res));
+ tojson(res));
// Both database and collection exist
- res = assert.commandWorked(t.runCommand('createIndexes',
- {indexes: [{key: {x: 1}, name: 'x_1'}]}));
- res = extractResult( res );
+ res = assert.commandWorked(
+ t.runCommand('createIndexes', {indexes: [{key: {x: 1}, name: 'x_1'}]}));
+ res = extractResult(res);
assert(!res.createdCollectionAutomatically);
assert.eq(2, res.numIndexesBefore);
- assert.eq(2, res.numIndexesAfter,
+ assert.eq(2,
+ res.numIndexesAfter,
'numIndexesAfter missing from createIndexes result when adding a duplicate index: ' +
- tojson(res));
+ tojson(res));
assert(res.note,
'createIndexes.note should be present in results when adding a duplicate index: ' +
- tojson(res));
-
- res = t.runCommand( "createIndexes", { indexes : [ { key : { "x" : 1 }, name : "x_1" },
- { key : { "y" : 1 }, name : "y_1" } ] } );
- res = extractResult( res );
- assert( !res.createdCollectionAutomatically );
- assert.eq( 2, res.numIndexesBefore );
- assert.eq( 3, res.numIndexesAfter );
-
- res = assert.commandWorked(t.runCommand('createIndexes',
- {indexes: [{key: {a: 1}, name: 'a_1'}, {key: {b: 1}, name: 'b_1'}]}));
- res = extractResult( res );
- assert( !res.createdCollectionAutomatically );
- assert.eq( 3, res.numIndexesBefore );
- assert.eq( 5, res.numIndexesAfter );
+ tojson(res));
+
+ res = t.runCommand("createIndexes",
+ {indexes: [{key: {"x": 1}, name: "x_1"}, {key: {"y": 1}, name: "y_1"}]});
+ res = extractResult(res);
+ assert(!res.createdCollectionAutomatically);
+ assert.eq(2, res.numIndexesBefore);
+ assert.eq(3, res.numIndexesAfter);
+
+ res = assert.commandWorked(t.runCommand(
+ 'createIndexes', {indexes: [{key: {a: 1}, name: 'a_1'}, {key: {b: 1}, name: 'b_1'}]}));
+ res = extractResult(res);
+ assert(!res.createdCollectionAutomatically);
+ assert.eq(3, res.numIndexesBefore);
+ assert.eq(5, res.numIndexesAfter);
assert.isnull(res.note,
'createIndexes.note should not be present in results when adding new indexes: ' +
- tojson(res));
+ tojson(res));
- res = assert.commandWorked(t.runCommand('createIndexes',
- {indexes: [{key: {a: 1}, name: 'a_1'}, {key: {b: 1}, name: 'b_1'}]}));
+ res = assert.commandWorked(t.runCommand(
+ 'createIndexes', {indexes: [{key: {a: 1}, name: 'a_1'}, {key: {b: 1}, name: 'b_1'}]}));
- res = extractResult( res );
- assert.eq( 5, res.numIndexesBefore );
- assert.eq(5, res.numIndexesAfter,
+ res = extractResult(res);
+ assert.eq(5, res.numIndexesBefore);
+ assert.eq(5,
+ res.numIndexesAfter,
'numIndexesAfter missing from createIndexes result when adding duplicate indexes: ' +
- tojson(res));
+ tojson(res));
assert(res.note,
'createIndexes.note should be present in results when adding a duplicate index: ' +
- tojson(res));
+ tojson(res));
- res = t.runCommand( "createIndexes", { indexes : [ {} ] } );
- assert( !res.ok );
+ res = t.runCommand("createIndexes", {indexes: [{}]});
+ assert(!res.ok);
- res = t.runCommand( "createIndexes", { indexes : [ {} , { key : { m : 1 }, name : "asd" } ] } );
- assert( !res.ok );
+ res = t.runCommand("createIndexes", {indexes: [{}, {key: {m: 1}, name: "asd"}]});
+ assert(!res.ok);
- assert.eq( 5, t.getIndexes().length );
+ assert.eq(5, t.getIndexes().length);
- res = t.runCommand( "createIndexes",
- { indexes : [ { key : { "c" : 1 }, sparse : true, name : "c_1" } ] } );
- assert.eq( 6, t.getIndexes().length );
- assert.eq( 1, t.getIndexes().filter( function(z){ return z.sparse; } ).length );
+ res = t.runCommand("createIndexes", {indexes: [{key: {"c": 1}, sparse: true, name: "c_1"}]});
+ assert.eq(6, t.getIndexes().length);
+ assert.eq(1,
+ t.getIndexes().filter(function(z) {
+ return z.sparse;
+ }).length);
- res = t.runCommand( "createIndexes",
- { indexes : [ { key : { "x" : "foo" }, name : "x_1" } ] } );
- assert( !res.ok );
+ res = t.runCommand("createIndexes", {indexes: [{key: {"x": "foo"}, name: "x_1"}]});
+ assert(!res.ok);
- assert.eq( 6, t.getIndexes().length );
+ assert.eq(6, t.getIndexes().length);
- res = t.runCommand( "createIndexes",
- { indexes : [ { key : { "x" : 1 }, name : "" } ] } );
- assert( !res.ok );
+ res = t.runCommand("createIndexes", {indexes: [{key: {"x": 1}, name: ""}]});
+ assert(!res.ok);
- assert.eq( 6, t.getIndexes().length );
+ assert.eq(6, t.getIndexes().length);
// Test that v0 indexes cannot be created.
res = t.runCommand('createIndexes', {indexes: [{key: {d: 1}, name: 'd_1', v: 0}]});
diff --git a/jstests/core/crud_api.js b/jstests/core/crud_api.js
index 0c248884350..f6cc77025c3 100644
--- a/jstests/core/crud_api.js
+++ b/jstests/core/crud_api.js
@@ -32,7 +32,9 @@
if (db.getMongo().writeMode() === 'commands') {
assert.docEq(first, second);
} else {
- var overrideModifiedCount = {modifiedCount: undefined};
+ var overrideModifiedCount = {
+ modifiedCount: undefined
+ };
assert.docEq(Object.merge(first, overrideModifiedCount),
Object.merge(second, overrideModifiedCount));
}
@@ -42,12 +44,12 @@
var deleteManyExecutor = createTestExecutor(coll, 'deleteMany', checkResultObject);
var deleteOneExecutor = createTestExecutor(coll, 'deleteOne', checkResultObject);
var bulkWriteExecutor = createTestExecutor(coll, 'bulkWrite', checkResultObject);
- var findOneAndDeleteExecutor = createTestExecutor(coll, 'findOneAndDelete',
- checkResultObject);
- var findOneAndReplaceExecutor = createTestExecutor(coll, 'findOneAndReplace',
- checkResultObject);
- var findOneAndUpdateExecutor = createTestExecutor(coll, 'findOneAndUpdate',
- checkResultObject);
+ var findOneAndDeleteExecutor =
+ createTestExecutor(coll, 'findOneAndDelete', checkResultObject);
+ var findOneAndReplaceExecutor =
+ createTestExecutor(coll, 'findOneAndReplace', checkResultObject);
+ var findOneAndUpdateExecutor =
+ createTestExecutor(coll, 'findOneAndUpdate', checkResultObject);
var insertManyExecutor = createTestExecutor(coll, 'insertMany', checkResultObject);
var insertOneExecutor = createTestExecutor(coll, 'insertOne', checkResultObject);
var replaceOneExecutor = createTestExecutor(coll, 'replaceOne', checkResultObject);
@@ -61,36 +63,53 @@
//
bulkWriteExecutor({
- insert: [{ _id: 1, c: 1 }, { _id: 2, c: 2 }, { _id: 3, c: 3 }],
- params: [[
- { insertOne: { document: {_id: 4, a: 1 } } }
- , { updateOne: { filter: {_id: 5, a:2}, update: {$set: {a:2}}, upsert:true } }
- , { updateMany: { filter: {_id: 6,a:3}, update: {$set: {a:3}}, upsert:true } }
- , { deleteOne: { filter: {c:1} } }
- , { insertOne: { document: {_id: 7, c: 2 } } }
- , { deleteMany: { filter: {c:2} } }
- , { replaceOne: { filter: {c:3}, replacement: {c:4}, upsert:true } }]],
- result: {
- acknowledged: true, insertedCount:2,
- matchedCount:1, deletedCount: 3,
- upsertedCount:2, insertedIds : {'0' : 4, '4' : 7 }, upsertedIds : { '1' : 5, '2' : 6 }
- },
- expected: [{ "_id" : 3, "c" : 4 }, { "_id" : 4, "a" : 1 }, { "_id" : 5, "a" : 2 }, { "_id" : 6, "a" : 3 }]
+ insert: [{_id: 1, c: 1}, {_id: 2, c: 2}, {_id: 3, c: 3}],
+ params: [[
+ {insertOne: {document: {_id: 4, a: 1}}},
+ {updateOne: {filter: {_id: 5, a: 2}, update: {$set: {a: 2}}, upsert: true}},
+ {updateMany: {filter: {_id: 6, a: 3}, update: {$set: {a: 3}}, upsert: true}},
+ {deleteOne: {filter: {c: 1}}},
+ {insertOne: {document: {_id: 7, c: 2}}},
+ {deleteMany: {filter: {c: 2}}},
+ {replaceOne: {filter: {c: 3}, replacement: {c: 4}, upsert: true}}
+ ]],
+ result: {
+ acknowledged: true,
+ insertedCount: 2,
+ matchedCount: 1,
+ deletedCount: 3,
+ upsertedCount: 2,
+ insertedIds: {'0': 4, '4': 7},
+ upsertedIds: {'1': 5, '2': 6}
+ },
+ expected:
+ [{"_id": 3, "c": 4}, {"_id": 4, "a": 1}, {"_id": 5, "a": 2}, {"_id": 6, "a": 3}]
});
bulkWriteExecutor({
- insert: [{ _id: 1, c: 1 }, { _id: 2, c: 2 }, { _id: 3, c: 3 }],
- params: [[
- { insertOne: { document: { _id: 4, a: 1 } } }
- , { updateOne: { filter: {_id: 5, a:2}, update: {$set: {a:2}}, upsert:true } }
- , { updateMany: { filter: {_id: 6, a:3}, update: {$set: {a:3}}, upsert:true } }
- , { deleteOne: { filter: {c:1} } }
- , { deleteMany: { filter: {c:2} } }
- , { replaceOne: { filter: {c:3}, replacement: {c:4}, upsert:true } }], { ordered: false }],
- result: {
- acknowledged: true, insertedCount:1, matchedCount:1, deletedCount:2, upsertedCount:2, insertedIds : {'0' : 4 }, upsertedIds : { '1' : 5, '2' : 6 }
- },
- expected: [{ "_id" : 3, "c" : 4 }, { "_id" : 4, "a" : 1 }, { "_id" : 5, "a" : 2 }, { "_id" : 6, "a" : 3 }]
+ insert: [{_id: 1, c: 1}, {_id: 2, c: 2}, {_id: 3, c: 3}],
+ params: [
+ [
+ {insertOne: {document: {_id: 4, a: 1}}},
+ {updateOne: {filter: {_id: 5, a: 2}, update: {$set: {a: 2}}, upsert: true}},
+ {updateMany: {filter: {_id: 6, a: 3}, update: {$set: {a: 3}}, upsert: true}},
+ {deleteOne: {filter: {c: 1}}},
+ {deleteMany: {filter: {c: 2}}},
+ {replaceOne: {filter: {c: 3}, replacement: {c: 4}, upsert: true}}
+ ],
+ {ordered: false}
+ ],
+ result: {
+ acknowledged: true,
+ insertedCount: 1,
+ matchedCount: 1,
+ deletedCount: 2,
+ upsertedCount: 2,
+ insertedIds: {'0': 4},
+ upsertedIds: {'1': 5, '2': 6}
+ },
+ expected:
+ [{"_id": 3, "c": 4}, {"_id": 4, "a": 1}, {"_id": 5, "a": 2}, {"_id": 6, "a": 3}]
});
// DeleteMany
@@ -98,30 +117,24 @@
// DeleteMany when many documents match
deleteManyExecutor({
- insert: [{ _id: 1, x:11 }, { _id: 2, x:22 }, { _id: 3, x:33 }],
- params: [
- { _id: { $gt: 1 } }
- ],
- result: {acknowledged: true, deletedCount:2},
- expected: [{_id:1, x: 11}]
+ insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
+ params: [{_id: {$gt: 1}}],
+ result: {acknowledged: true, deletedCount: 2},
+ expected: [{_id: 1, x: 11}]
});
// DeleteMany when no document matches
deleteManyExecutor({
- insert: [{ _id: 1, x:11 }, { _id: 2, x:22 }, { _id: 3, x:33 }],
- params: [
- { _id: 4 }
- ],
- result: {acknowledged: true, deletedCount:0},
- expected: [{_id:1, x: 11}, {_id:2, x: 22}, {_id:3, x: 33}]
+ insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
+ params: [{_id: 4}],
+ result: {acknowledged: true, deletedCount: 0},
+ expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}]
});
// DeleteMany when many documents match, no write concern
deleteManyExecutor({
- insert: [{ _id: 1, x:11 }, { _id: 2, x:22 }, { _id: 3, x:33 }],
- params: [
- { _id: { $gt: 1 } }, { w : 0 }
- ],
- result: {acknowledged: false},
- expected: [{_id:1, x: 11}]
+ insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
+ params: [{_id: {$gt: 1}}, {w: 0}],
+ result: {acknowledged: false},
+ expected: [{_id: 1, x: 11}]
});
//
@@ -130,39 +143,31 @@
// DeleteOne when many documents match
deleteOneExecutor({
- insert: [{ _id: 1, x:11 }, { _id: 2, x:22 }, { _id: 3, x:33 }],
- params: [
- { _id: { $gt: 1 } }
- ],
- result: {acknowledged: true, deletedCount:1},
- expected: [{_id:1, x: 11}, {_id:3, x: 33}]
+ insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
+ params: [{_id: {$gt: 1}}],
+ result: {acknowledged: true, deletedCount: 1},
+ expected: [{_id: 1, x: 11}, {_id: 3, x: 33}]
});
// DeleteOne when one document matches
deleteOneExecutor({
- insert: [{ _id: 1, x:11 }, { _id: 2, x:22 }, { _id: 3, x:33 }],
- params: [
- { _id: 2 }
- ],
- result: {acknowledged: true, deletedCount:1},
- expected: [{_id:1, x: 11}, {_id:3, x: 33}]
+ insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
+ params: [{_id: 2}],
+ result: {acknowledged: true, deletedCount: 1},
+ expected: [{_id: 1, x: 11}, {_id: 3, x: 33}]
});
// DeleteOne when no documents match
deleteOneExecutor({
- insert: [{ _id: 1, x:11 }, { _id: 2, x:22 }, { _id: 3, x:33 }],
- params: [
- { _id: 4 }
- ],
- result: {acknowledged: true, deletedCount:0},
- expected: [{_id:1, x: 11}, {_id:2, x: 22}, {_id:3, x: 33}]
+ insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
+ params: [{_id: 4}],
+ result: {acknowledged: true, deletedCount: 0},
+ expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}]
});
// DeleteOne when many documents match, no write concern
deleteOneExecutor({
- insert: [{ _id: 1, x:11 }, { _id: 2, x:22 }, { _id: 3, x:33 }],
- params: [
- { _id: { $gt: 1 } }, {w:0}
- ],
- result: {acknowledged: false},
- expected: [{_id:1, x: 11}, {_id:3, x: 33}]
+ insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
+ params: [{_id: {$gt: 1}}, {w: 0}],
+ result: {acknowledged: false},
+ expected: [{_id: 1, x: 11}, {_id: 3, x: 33}]
});
//
@@ -171,33 +176,24 @@
// FindOneAndDelete when one document matches
findOneAndDeleteExecutor({
- insert: [{ _id: 1, x:11 }, { _id: 2, x:22 }, { _id: 3, x:33 }],
- params: [
- { _id: { $gt: 2 } }
- , { projection: { x: 1, _id: 0 }, sort: { x: 1 } }
- ],
- result: {x:33},
- expected: [{_id:1, x: 11}, {_id:2, x: 22}]
+ insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
+ params: [{_id: {$gt: 2}}, {projection: {x: 1, _id: 0}, sort: {x: 1}}],
+ result: {x: 33},
+ expected: [{_id: 1, x: 11}, {_id: 2, x: 22}]
});
// FindOneAndDelete when one document matches
findOneAndDeleteExecutor({
- insert: [{ _id: 1, x:11 }, { _id: 2, x:22 }, { _id: 3, x:33 }],
- params: [
- { _id: 2 }
- , { projection: { x: 1, _id: 0 }, sort: { x: 1 } }
- ],
- result: {x:22},
- expected: [{_id:1, x: 11}, {_id:3, x: 33}]
+ insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
+ params: [{_id: 2}, {projection: {x: 1, _id: 0}, sort: {x: 1}}],
+ result: {x: 22},
+ expected: [{_id: 1, x: 11}, {_id: 3, x: 33}]
});
// FindOneAndDelete when no documents match
findOneAndDeleteExecutor({
- insert: [{ _id: 1, x:11 }, { _id: 2, x:22 }, { _id: 3, x:33 }],
- params: [
- { _id: 4 }
- , { projection: { x: 1, _id: 0 }, sort: { x: 1 } }
- ],
- result: null,
- expected: [{_id:1, x: 11}, {_id:2, x: 22}, {_id:3, x: 33}]
+ insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
+ params: [{_id: 4}, {projection: {x: 1, _id: 0}, sort: {x: 1}}],
+ result: null,
+ expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}]
});
//
@@ -206,95 +202,81 @@
// FindOneAndReplace when many documents match returning the document before modification
findOneAndReplaceExecutor({
- insert: [{ _id: 1, x:11 }, { _id: 2, x:22 }, { _id: 3, x:33 }],
- params: [
- { _id: { $gt: 1 } }
- , { x: 32 }
- , { projection: { x: 1, _id: 0 }, sort: { x: 1 } }
- ],
- result: {x:22},
- expected: [{_id:1, x: 11}, {_id:2, x: 32}, {_id:3, x: 33}]
+ insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
+ params: [{_id: {$gt: 1}}, {x: 32}, {projection: {x: 1, _id: 0}, sort: {x: 1}}],
+ result: {x: 22},
+ expected: [{_id: 1, x: 11}, {_id: 2, x: 32}, {_id: 3, x: 33}]
});
// FindOneAndReplace when many documents match returning the document after modification
findOneAndReplaceExecutor({
- insert: [{ _id: 1, x:11 }, { _id: 2, x:22 }, { _id: 3, x:33 }],
- params: [
- { _id: { $gt: 1 } }
- , { x: 32 }
- , { projection: { x: 1, _id: 0 }, sort: { x: 1 }, returnNewDocument:true }
+ insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
+ params: [
+ {_id: {$gt: 1}},
+ {x: 32},
+ {projection: {x: 1, _id: 0}, sort: {x: 1}, returnNewDocument: true}
],
- result: {x:32},
- expected: [{_id:1, x: 11}, {_id:2, x: 32}, {_id:3, x: 33}]
+ result: {x: 32},
+ expected: [{_id: 1, x: 11}, {_id: 2, x: 32}, {_id: 3, x: 33}]
});
// FindOneAndReplace when one document matches returning the document before modification
findOneAndReplaceExecutor({
- insert: [{ _id: 1, x:11 }, { _id: 2, x:22 }, { _id: 3, x:33 }],
- params: [
- { _id: 2 }
- , { x: 32 }
- , { projection: { x: 1, _id: 0 }, sort: { x: 1 } }
- ],
- result: {x:22},
- expected: [{_id:1, x: 11}, {_id:2, x: 32}, {_id:3, x: 33}]
+ insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
+ params: [{_id: 2}, {x: 32}, {projection: {x: 1, _id: 0}, sort: {x: 1}}],
+ result: {x: 22},
+ expected: [{_id: 1, x: 11}, {_id: 2, x: 32}, {_id: 3, x: 33}]
});
// FindOneAndReplace when one document matches returning the document after modification
findOneAndReplaceExecutor({
- insert: [{ _id: 1, x:11 }, { _id: 2, x:22 }, { _id: 3, x:33 }],
- params: [
- { _id: 2 }
- , { x: 32 }
- , { projection: { x: 1, _id: 0 }, sort: { x: 1 }, returnNewDocument:true }
+ insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
+ params: [
+ {_id: 2},
+ {x: 32},
+ {projection: {x: 1, _id: 0}, sort: {x: 1}, returnNewDocument: true}
],
- result: {x:32},
- expected: [{_id:1, x: 11}, {_id:2, x: 32}, {_id:3, x: 33}]
+ result: {x: 32},
+ expected: [{_id: 1, x: 11}, {_id: 2, x: 32}, {_id: 3, x: 33}]
});
// FindOneAndReplace when no documents match returning the document before modification
findOneAndReplaceExecutor({
- insert: [{ _id: 1, x:11 }, { _id: 2, x:22 }, { _id: 3, x:33 }],
- params: [
- { _id: 4 }
- , { x: 44 }
- , { projection: { x: 1, _id: 0 }, sort: { x: 1 } }
- ],
- result: null,
- expected: [{_id:1, x: 11}, {_id:2, x: 22}, {_id:3, x: 33}]
+ insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
+ params: [{_id: 4}, {x: 44}, {projection: {x: 1, _id: 0}, sort: {x: 1}}],
+ result: null,
+ expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}]
});
- // FindOneAndReplace when no documents match with upsert returning the document before modification
+ // FindOneAndReplace when no documents match with upsert returning the document before
+ // modification
findOneAndReplaceExecutor({
- insert: [{ _id: 1, x:11 }, { _id: 2, x:22 }, { _id: 3, x:33 }],
- params: [
- { _id: 4 }
- , { x: 44 }
- , { projection: { x: 1, _id: 0 }, sort: { x: 1 }, upsert:true }
- ],
- result: null,
- expected: [{_id:1, x: 11}, {_id:2, x: 22}, {_id:3, x: 33}, {_id:4, x:44}]
+ insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
+ params: [{_id: 4}, {x: 44}, {projection: {x: 1, _id: 0}, sort: {x: 1}, upsert: true}],
+ result: null,
+ expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}, {_id: 4, x: 44}]
});
// FindOneAndReplace when no documents match returning the document after modification
findOneAndReplaceExecutor({
- insert: [{ _id: 1, x:11 }, { _id: 2, x:22 }, { _id: 3, x:33 }],
- params: [
- { _id: 4 }
- , { x: 44 }
- , { projection: { x: 1, _id: 0 }, sort: { x: 1 }, returnNewDocument:true }
+ insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
+ params: [
+ {_id: 4},
+ {x: 44},
+ {projection: {x: 1, _id: 0}, sort: {x: 1}, returnNewDocument: true}
],
- result: null,
- expected: [{_id:1, x: 11}, {_id:2, x: 22}, {_id:3, x: 33}]
+ result: null,
+ expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}]
});
- // FindOneAndReplace when no documents match with upsert returning the document after modification
+ // FindOneAndReplace when no documents match with upsert returning the document after
+ // modification
findOneAndReplaceExecutor({
- insert: [{ _id: 1, x:11 }, { _id: 2, x:22 }, { _id: 3, x:33 }],
- params: [
- { _id: 4 }
- , { x: 44 }
- , { projection: { x: 1, _id: 0 }, sort: { x: 1 }, returnNewDocument:true, upsert:true }
+ insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
+ params: [
+ {_id: 4},
+ {x: 44},
+ {projection: {x: 1, _id: 0}, sort: {x: 1}, returnNewDocument: true, upsert: true}
],
- result: {x:44},
- expected: [{_id:1, x: 11}, {_id:2, x: 22}, {_id:3, x: 33}, {_id:4, x: 44}]
+ result: {x: 44},
+ expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}, {_id: 4, x: 44}]
});
assert.throws(function() {
- coll.findOneAndReplace({a:1}, {$set:{b:1}});
+ coll.findOneAndReplace({a: 1}, {$set: {b: 1}});
});
//
@@ -303,99 +285,89 @@
// FindOneAndUpdate when many documents match returning the document before modification
findOneAndUpdateExecutor({
- insert: [{ _id: 1, x:11 }, { _id: 2, x:22 }, { _id: 3, x:33 }],
- params: [
- { _id: { $gt: 1 } }
- , { $inc: { x: 1 } }
- , { projection: { x: 1, _id: 0 }, sort: { x: 1 } }
- ],
- result: {x:22},
- expected: [{_id:1, x: 11}, {_id:2, x: 23}, {_id:3, x: 33}]
+ insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
+ params: [{_id: {$gt: 1}}, {$inc: {x: 1}}, {projection: {x: 1, _id: 0}, sort: {x: 1}}],
+ result: {x: 22},
+ expected: [{_id: 1, x: 11}, {_id: 2, x: 23}, {_id: 3, x: 33}]
});
// FindOneAndUpdate when many documents match returning the document after modification
findOneAndUpdateExecutor({
- insert: [{ _id: 1, x:11 }, { _id: 2, x:22 }, { _id: 3, x:33 }],
- params: [
- { _id: { $gt: 1 } }
- , { $inc: { x: 1 } }
- , { projection: { x: 1, _id: 0 }, sort: { x: 1 }, returnNewDocument: true }
+ insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
+ params: [
+ {_id: {$gt: 1}},
+ {$inc: {x: 1}},
+ {projection: {x: 1, _id: 0}, sort: {x: 1}, returnNewDocument: true}
],
- result: {x:23},
- expected: [{_id:1, x: 11}, {_id:2, x: 23}, {_id:3, x: 33}]
+ result: {x: 23},
+ expected: [{_id: 1, x: 11}, {_id: 2, x: 23}, {_id: 3, x: 33}]
});
// FindOneAndUpdate when one document matches returning the document before modification
findOneAndUpdateExecutor({
- insert: [{ _id: 1, x:11 }, { _id: 2, x:22 }, { _id: 3, x:33 }],
- params: [
- { _id: 2 }
- , { $inc: { x: 1 } }
- , { projection: { x: 1, _id: 0 }, sort: { x: 1 } }
- ],
- result: {x:22},
- expected: [{_id:1, x: 11}, {_id:2, x: 23}, {_id:3, x: 33}]
+ insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
+ params: [{_id: 2}, {$inc: {x: 1}}, {projection: {x: 1, _id: 0}, sort: {x: 1}}],
+ result: {x: 22},
+ expected: [{_id: 1, x: 11}, {_id: 2, x: 23}, {_id: 3, x: 33}]
});
// FindOneAndUpdate when one document matches returning the document after modification
findOneAndUpdateExecutor({
- insert: [{ _id: 1, x:11 }, { _id: 2, x:22 }, { _id: 3, x:33 }],
- params: [
- { _id: 2 }
- , { $inc: { x: 1 } }
- , { projection: { x: 1, _id: 0 }, sort: { x: 1 }, returnNewDocument: true }
+ insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
+ params: [
+ {_id: 2},
+ {$inc: {x: 1}},
+ {projection: {x: 1, _id: 0}, sort: {x: 1}, returnNewDocument: true}
],
- result: {x:23},
- expected: [{_id:1, x: 11}, {_id:2, x: 23}, {_id:3, x: 33}]
+ result: {x: 23},
+ expected: [{_id: 1, x: 11}, {_id: 2, x: 23}, {_id: 3, x: 33}]
});
// FindOneAndUpdate when no documents match returning the document before modification
findOneAndUpdateExecutor({
- insert: [{ _id: 1, x:11 }, { _id: 2, x:22 }, { _id: 3, x:33 }],
- params: [
- { _id: 4 }
- , { $inc: { x: 1 } }
- , { projection: { x: 1, _id: 0 }, sort: { x: 1 } }
- ],
- result: null,
- expected: [{_id:1, x: 11}, {_id:2, x: 22}, {_id:3, x: 33}]
+ insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
+ params: [{_id: 4}, {$inc: {x: 1}}, {projection: {x: 1, _id: 0}, sort: {x: 1}}],
+ result: null,
+ expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}]
});
- // FindOneAndUpdate when no documents match with upsert returning the document before modification
+ // FindOneAndUpdate when no documents match with upsert returning the document before
+ // modification
findOneAndUpdateExecutor({
- insert: [{ _id: 1, x:11 }, { _id: 2, x:22 }, { _id: 3, x:33 }],
- params: [
- { _id: 4 }
- , { $inc: { x: 1 } }
- , { projection: { x: 1, _id: 0 }, sort: { x: 1 }, upsert:true }
+ insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
+ params: [
+ {_id: 4},
+ {$inc: {x: 1}},
+ {projection: {x: 1, _id: 0}, sort: {x: 1}, upsert: true}
],
- result: null,
- expected: [{_id:1, x: 11}, {_id:2, x: 22}, {_id:3, x: 33}, {_id:4, x: 1}]
+ result: null,
+ expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}, {_id: 4, x: 1}]
});
// FindOneAndUpdate when no documents match returning the document after modification
findOneAndUpdateExecutor({
- insert: [{ _id: 1, x:11 }, { _id: 2, x:22 }, { _id: 3, x:33 }],
- params: [
- { _id: 4 }
- , { $inc: { x: 1 } }
- , { projection: { x: 1, _id: 0 }, sort: { x: 1 }, returnNewDocument:true }
+ insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
+ params: [
+ {_id: 4},
+ {$inc: {x: 1}},
+ {projection: {x: 1, _id: 0}, sort: {x: 1}, returnNewDocument: true}
],
- result: null,
- expected: [{_id:1, x: 11}, {_id:2, x: 22}, {_id:3, x: 33}]
+ result: null,
+ expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}]
});
- // FindOneAndUpdate when no documents match with upsert returning the document after modification
+ // FindOneAndUpdate when no documents match with upsert returning the document after
+ // modification
findOneAndUpdateExecutor({
- insert: [{ _id: 1, x:11 }, { _id: 2, x:22 }, { _id: 3, x:33 }],
- params: [
- { _id: 4 }
- , { $inc: { x: 1 } }
- , { projection: { x: 1, _id: 0 }, sort: { x: 1 }, returnNewDocument:true, upsert:true }
+ insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
+ params: [
+ {_id: 4},
+ {$inc: {x: 1}},
+ {projection: {x: 1, _id: 0}, sort: {x: 1}, returnNewDocument: true, upsert: true}
],
- result: {x:1},
- expected: [{_id:1, x: 11}, {_id:2, x: 22}, {_id:3, x: 33}, {_id:4, x: 1}]
+ result: {x: 1},
+ expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}, {_id: 4, x: 1}]
});
assert.throws(function() {
- coll.findOneAndUpdate({a:1}, {});
+ coll.findOneAndUpdate({a: 1}, {});
});
assert.throws(function() {
- coll.findOneAndUpdate({a:1}, {b:1});
+ coll.findOneAndUpdate({a: 1}, {b: 1});
});
//
@@ -404,22 +376,17 @@
// InsertMany with non-existing documents
insertManyExecutor({
- insert: [{ _id:1, x:11 }],
- params: [
- [{_id: 2, x: 22}, {_id:3, x:33}]
- ],
- result: {acknowledged: true, insertedIds: [2, 3]},
- expected: [{_id:1, x: 11}, {_id:2, x: 22}, {_id:3, x: 33}]
+ insert: [{_id: 1, x: 11}],
+ params: [[{_id: 2, x: 22}, {_id: 3, x: 33}]],
+ result: {acknowledged: true, insertedIds: [2, 3]},
+ expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}]
});
// InsertMany with non-existing documents, no write concern
insertManyExecutor({
- insert: [{ _id:1, x:11 }],
- params: [
- [{_id: 2, x: 22}, {_id:3, x:33}]
- , {w:0}
- ],
- result: {acknowledged: false},
- expected: [{_id:1, x: 11}, {_id:2, x: 22}, {_id:3, x: 33}]
+ insert: [{_id: 1, x: 11}],
+ params: [[{_id: 2, x: 22}, {_id: 3, x: 33}], {w: 0}],
+ result: {acknowledged: false},
+ expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}]
});
//
@@ -428,21 +395,17 @@
// InsertOne with non-existing documents
insertOneExecutor({
- insert: [{ _id:1, x:11 }],
- params: [
- {_id: 2, x: 22}
- ],
- result: {acknowledged: true, insertedId: 2},
- expected: [{_id:1, x: 11}, {_id:2, x: 22}]
+ insert: [{_id: 1, x: 11}],
+ params: [{_id: 2, x: 22}],
+ result: {acknowledged: true, insertedId: 2},
+ expected: [{_id: 1, x: 11}, {_id: 2, x: 22}]
});
// InsertOne with non-existing documents, no write concern
insertOneExecutor({
- insert: [{ _id:1, x:11 }],
- params: [
- {_id: 2, x: 22}, {w:0}
- ],
- result: {acknowledged: false},
- expected: [{_id:1, x: 11}, {_id:2, x: 22}]
+ insert: [{_id: 1, x: 11}],
+ params: [{_id: 2, x: 22}, {w: 0}],
+ result: {acknowledged: false},
+ expected: [{_id: 1, x: 11}, {_id: 2, x: 22}]
});
//
@@ -451,56 +414,56 @@
// ReplaceOne when many documents match
replaceOneExecutor({
- insert: [{ _id: 1, x: 11 }, { _id: 2, x: 22 }, { _id:3, x:33 }],
- params: [{ _id: { $gt: 1 } }, { x: 111 }],
- result: {acknowledged:true, matchedCount:1, modifiedCount:1},
- expected: [{_id:1, x: 11}, {_id:2, x: 111}, {_id:3, x: 33}]
+ insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
+ params: [{_id: {$gt: 1}}, {x: 111}],
+ result: {acknowledged: true, matchedCount: 1, modifiedCount: 1},
+ expected: [{_id: 1, x: 11}, {_id: 2, x: 111}, {_id: 3, x: 33}]
});
// ReplaceOne when one document matches
replaceOneExecutor({
- insert: [{ _id: 1, x: 11 }, { _id: 2, x: 22 }, { _id:3, x:33 }],
- params: [{ _id: 1 }, { _id: 1, x: 111 }],
- result: {acknowledged:true, matchedCount:1, modifiedCount:1},
- expected: [{_id:1, x: 111}, {_id:2, x: 22}, {_id:3, x: 33}]
+ insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
+ params: [{_id: 1}, {_id: 1, x: 111}],
+ result: {acknowledged: true, matchedCount: 1, modifiedCount: 1},
+ expected: [{_id: 1, x: 111}, {_id: 2, x: 22}, {_id: 3, x: 33}]
});
// ReplaceOne when no documents match
replaceOneExecutor({
- insert: [{ _id: 1, x: 11 }, { _id: 2, x: 22 }, { _id:3, x:33 }],
- params: [{ _id: 4 }, { _id: 4, x: 1 }],
- result: {acknowledged:true, matchedCount:0, modifiedCount:0},
- expected: [{_id:1, x: 11}, {_id:2, x: 22}, {_id:3, x: 33}]
+ insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
+ params: [{_id: 4}, {_id: 4, x: 1}],
+ result: {acknowledged: true, matchedCount: 0, modifiedCount: 0},
+ expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}]
});
// ReplaceOne with upsert when no documents match without an id specified
replaceOneExecutor({
- insert: [{ _id: 1, x: 11 }, { _id: 2, x: 22 }, { _id:3, x:33 }],
- params: [{ _id: 4 }, { x: 1 }, {upsert:true}],
- result: {acknowledged:true, matchedCount:0, modifiedCount:0, upsertedId: 4},
- expected: [{_id:1, x: 11}, {_id:2, x: 22}, {_id:3, x: 33}, {_id:4, x: 1}]
+ insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
+ params: [{_id: 4}, {x: 1}, {upsert: true}],
+ result: {acknowledged: true, matchedCount: 0, modifiedCount: 0, upsertedId: 4},
+ expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}, {_id: 4, x: 1}]
});
// ReplaceOne with upsert when no documents match with an id specified
replaceOneExecutor({
- insert: [{ _id: 1, x: 11 }, { _id: 2, x: 22 }, { _id:3, x:33 }],
- params: [{ _id: 4 }, { _id: 4, x: 1 }, {upsert:true}],
- result: {acknowledged:true, matchedCount:0, modifiedCount:0, upsertedId: 4},
- expected: [{_id:1, x: 11}, {_id:2, x: 22}, {_id:3, x: 33}, {_id:4, x: 1}]
+ insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
+ params: [{_id: 4}, {_id: 4, x: 1}, {upsert: true}],
+ result: {acknowledged: true, matchedCount: 0, modifiedCount: 0, upsertedId: 4},
+ expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}, {_id: 4, x: 1}]
});
// ReplaceOne with upsert when no documents match with an id specified, no write concern
replaceOneExecutor({
- insert: [{ _id: 1, x: 11 }, { _id: 2, x: 22 }, { _id:3, x:33 }],
- params: [{ _id: 4 }, { _id: 4, x: 1 }, {upsert:true, w:0}],
- result: {acknowledged:false},
- expected: [{_id:1, x: 11}, {_id:2, x: 22}, {_id:3, x: 33}, {_id:4, x: 1}]
+ insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
+ params: [{_id: 4}, {_id: 4, x: 1}, {upsert: true, w: 0}],
+ result: {acknowledged: false},
+ expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}, {_id: 4, x: 1}]
});
// ReplaceOne with upsert when no documents match with an id specified, no write concern
replaceOneExecutor({
- insert: [{ _id: 1, x: 11 }, { _id: 2, x: 22 }, { _id:3, x:33 }],
- params: [{ _id: 4 }, { _id: 4, x: 1 }, {upsert:true, writeConcern:{w:0}}],
- result: {acknowledged:false},
- expected: [{_id:1, x: 11}, {_id:2, x: 22}, {_id:3, x: 33}, {_id:4, x: 1}]
+ insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
+ params: [{_id: 4}, {_id: 4, x: 1}, {upsert: true, writeConcern: {w: 0}}],
+ result: {acknowledged: false},
+ expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}, {_id: 4, x: 1}]
});
assert.throws(function() {
- coll.replaceOne({a:1}, {$set:{b:1}});
+ coll.replaceOne({a: 1}, {$set: {b: 1}});
});
//
@@ -509,46 +472,46 @@
// UpdateMany when many documents match
updateManyExecutor({
- insert: [{ _id: 1, x: 11 }, { _id: 2, x: 22 }, { _id:3, x:33 }],
- params: [{ _id: { $gt: 1 } }, { $inc: { x: 1 } }],
- result: {acknowledged:true, matchedCount:2, modifiedCount:2},
- expected: [{_id:1, x: 11}, {_id:2, x: 23}, {_id:3, x: 34}]
+ insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
+ params: [{_id: {$gt: 1}}, {$inc: {x: 1}}],
+ result: {acknowledged: true, matchedCount: 2, modifiedCount: 2},
+ expected: [{_id: 1, x: 11}, {_id: 2, x: 23}, {_id: 3, x: 34}]
});
// UpdateMany when one document matches
updateManyExecutor({
- insert: [{ _id: 1, x: 11 }, { _id: 2, x: 22 }, { _id:3, x:33 }],
- params: [{ _id: 1 }, { $inc: { x: 1 } }],
- result: {acknowledged:true, matchedCount:1, modifiedCount:1},
- expected: [{_id:1, x: 12}, {_id:2, x: 22}, {_id:3, x: 33}]
+ insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
+ params: [{_id: 1}, {$inc: {x: 1}}],
+ result: {acknowledged: true, matchedCount: 1, modifiedCount: 1},
+ expected: [{_id: 1, x: 12}, {_id: 2, x: 22}, {_id: 3, x: 33}]
});
// UpdateMany when no documents match
updateManyExecutor({
- insert: [{ _id: 1, x: 11 }, { _id: 2, x: 22 }, { _id:3, x:33 }],
- params: [{ _id: 4 }, { $inc: { x: 1 } }],
- result: {acknowledged:true, matchedCount:0, modifiedCount:0},
- expected: [{_id:1, x: 11}, {_id:2, x: 22}, {_id:3, x: 33}]
+ insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
+ params: [{_id: 4}, {$inc: {x: 1}}],
+ result: {acknowledged: true, matchedCount: 0, modifiedCount: 0},
+ expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}]
});
// UpdateMany with upsert when no documents match
updateManyExecutor({
- insert: [{ _id: 1, x: 11 }, { _id: 2, x: 22 }, { _id:3, x:33 }],
- params: [{ _id: 4 }, { $inc: { x: 1 } }, { upsert: true }],
- result: {acknowledged:true, matchedCount:0, modifiedCount:0, upsertedId: 4},
- expected: [{_id:1, x: 11}, {_id:2, x: 22}, {_id:3, x: 33}, {_id:4, x: 1}]
+ insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
+ params: [{_id: 4}, {$inc: {x: 1}}, {upsert: true}],
+ result: {acknowledged: true, matchedCount: 0, modifiedCount: 0, upsertedId: 4},
+ expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}, {_id: 4, x: 1}]
});
// UpdateMany with upsert when no documents match, no write concern
updateManyExecutor({
- insert: [{ _id: 1, x: 11 }, { _id: 2, x: 22 }, { _id:3, x:33 }],
- params: [{ _id: 4 }, { $inc: { x: 1 } }, { upsert: true, w: 0 }],
- result: {acknowledged:false},
- expected: [{_id:1, x: 11}, {_id:2, x: 22}, {_id:3, x: 33}, {_id:4, x: 1}]
+ insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
+ params: [{_id: 4}, {$inc: {x: 1}}, {upsert: true, w: 0}],
+ result: {acknowledged: false},
+ expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}, {_id: 4, x: 1}]
});
assert.throws(function() {
- coll.updateMany({a:1}, {});
+ coll.updateMany({a: 1}, {});
});
assert.throws(function() {
- coll.updateMany({a:1}, {b:1});
+ coll.updateMany({a: 1}, {b: 1});
});
//
@@ -557,47 +520,47 @@
// UpdateOne when many documents match
updateOneExecutor({
- insert: [{ _id: 1, x: 11 }, { _id: 2, x: 22 }, { _id:3, x:33 }],
- params: [{ _id: { $gt: 1 } }, { $inc: { x: 1 } }],
- result: {acknowledged:true, matchedCount:1, modifiedCount:1},
- expected: [{_id:1, x: 11}, {_id:2, x: 23}, {_id:3, x: 33}]
+ insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
+ params: [{_id: {$gt: 1}}, {$inc: {x: 1}}],
+ result: {acknowledged: true, matchedCount: 1, modifiedCount: 1},
+ expected: [{_id: 1, x: 11}, {_id: 2, x: 23}, {_id: 3, x: 33}]
});
// UpdateOne when one document matches
updateOneExecutor({
- insert: [{ _id: 1, x: 11 }, { _id: 2, x: 22 }, { _id:3, x:33 }],
- params: [{ _id: 1 }, { $inc: { x: 1 } }],
- result: {acknowledged:true, matchedCount:1, modifiedCount:1},
- expected: [{_id:1, x: 12}, {_id:2, x: 22}, {_id:3, x: 33}]
+ insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
+ params: [{_id: 1}, {$inc: {x: 1}}],
+ result: {acknowledged: true, matchedCount: 1, modifiedCount: 1},
+ expected: [{_id: 1, x: 12}, {_id: 2, x: 22}, {_id: 3, x: 33}]
});
// UpdateOne when no documents match
updateOneExecutor({
- insert: [{ _id: 1, x: 11 }, { _id: 2, x: 22 }, { _id:3, x:33 }],
- params: [{ _id: 4 }, { $inc: { x: 1 } }],
- result: {acknowledged:true, matchedCount:0, modifiedCount:0},
- expected: [{_id:1, x: 11}, {_id:2, x: 22}, {_id:3, x: 33}]
+ insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
+ params: [{_id: 4}, {$inc: {x: 1}}],
+ result: {acknowledged: true, matchedCount: 0, modifiedCount: 0},
+ expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}]
});
// UpdateOne with upsert when no documents match
updateOneExecutor({
- insert: [{ _id: 1, x: 11 }, { _id: 2, x: 22 }, { _id:3, x:33 }],
- params: [{ _id: 4 }, { $inc: { x: 1 } }, {upsert:true}],
- result: {acknowledged:true, matchedCount:0, modifiedCount:0, upsertedId: 4},
- expected: [{_id:1, x: 11}, {_id:2, x: 22}, {_id:3, x: 33}, {_id: 4, x: 1}]
+ insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
+ params: [{_id: 4}, {$inc: {x: 1}}, {upsert: true}],
+ result: {acknowledged: true, matchedCount: 0, modifiedCount: 0, upsertedId: 4},
+ expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}, {_id: 4, x: 1}]
});
// UpdateOne when many documents match, no write concern
updateOneExecutor({
- insert: [{ _id: 1, x: 11 }, { _id: 2, x: 22 }, { _id:3, x:33 }],
- params: [{ _id: { $gt: 1 } }, { $inc: { x: 1 } }, {w:0}],
- result: {acknowledged:false},
- expected: [{_id:1, x: 11}, {_id:2, x: 23}, {_id:3, x: 33}]
+ insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
+ params: [{_id: {$gt: 1}}, {$inc: {x: 1}}, {w: 0}],
+ result: {acknowledged: false},
+ expected: [{_id: 1, x: 11}, {_id: 2, x: 23}, {_id: 3, x: 33}]
});
assert.throws(function() {
- coll.updateOne({a:1}, {});
+ coll.updateOne({a: 1}, {});
});
assert.throws(function() {
- coll.updateOne({a:1}, {b:1});
+ coll.updateOne({a: 1}, {b: 1});
});
//
@@ -606,45 +569,45 @@
// Simple count of all elements
countExecutor({
- insert: [{ _id: 1, x:11 }, { _id: 2, x:22 }, { _id: 3, x:33 }],
- params: [{}],
- result: 3,
- expected: [{ _id: 1, x:11 }, { _id: 2, x:22 }, { _id: 3, x:33 }]
+ insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
+ params: [{}],
+ result: 3,
+ expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}]
});
// Simple count no arguments
countExecutor({
- insert: [{ _id: 1, x:11 }, { _id: 2, x:22 }, { _id: 3, x:33 }],
- params: [],
- result: 3,
- expected: [{ _id: 1, x:11 }, { _id: 2, x:22 }, { _id: 3, x:33 }]
+ insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
+ params: [],
+ result: 3,
+ expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}]
});
// Simple count filtered
countExecutor({
- insert: [{ _id: 1, x:11 }, { _id: 2, x:22 }, { _id: 3, x:33 }],
- params: [{_id: {$gt: 1}}],
- result: 2,
- expected: [{ _id: 1, x:11 }, { _id: 2, x:22 }, { _id: 3, x:33 }]
+ insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
+ params: [{_id: {$gt: 1}}],
+ result: 2,
+ expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}]
});
// Simple count of all elements, applying limit
countExecutor({
- insert: [{ _id: 1, x:11 }, { _id: 2, x:22 }, { _id: 3, x:33 }],
- params: [{}, {limit:1}],
- result: 1,
- expected: [{ _id: 1, x:11 }, { _id: 2, x:22 }, { _id: 3, x:33 }]
+ insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
+ params: [{}, {limit: 1}],
+ result: 1,
+ expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}]
});
// Simple count of all elements, applying skip
countExecutor({
- insert: [{ _id: 1, x:11 }, { _id: 2, x:22 }, { _id: 3, x:33 }],
- params: [{}, {skip:1}],
- result: 2,
- expected: [{ _id: 1, x:11 }, { _id: 2, x:22 }, { _id: 3, x:33 }]
+ insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
+ params: [{}, {skip: 1}],
+ result: 2,
+ expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}]
});
// Simple count no arguments, applying hint
countExecutor({
- insert: [{ _id: 1, x:11 }, { _id: 2, x:22 }, { _id: 3, x:33 }],
- params: [{}, {hint: { "_id": 1}}],
- result: 3,
- expected: [{ _id: 1, x:11 }, { _id: 2, x:22 }, { _id: 3, x:33 }]
+ insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
+ params: [{}, {hint: {"_id": 1}}],
+ result: 3,
+ expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}]
});
//
@@ -653,31 +616,31 @@
// Simple distinct of field x no filter
distinctExecutor({
- insert: [{ _id: 1, x:11 }, { _id: 2, x:22 }, { _id: 3, x:33 }],
- params: ['x'],
- result: [11, 22, 33],
- expected: [{ _id: 1, x:11 }, { _id: 2, x:22 }, { _id: 3, x:33 }]
+ insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
+ params: ['x'],
+ result: [11, 22, 33],
+ expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}]
});
// Simple distinct of field x
distinctExecutor({
- insert: [{ _id: 1, x:11 }, { _id: 2, x:22 }, { _id: 3, x:33 }],
- params: ['x', {}],
- result: [11, 22, 33],
- expected: [{ _id: 1, x:11 }, { _id: 2, x:22 }, { _id: 3, x:33 }]
+ insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
+ params: ['x', {}],
+ result: [11, 22, 33],
+ expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}]
});
// Simple distinct of field x filtered
distinctExecutor({
- insert: [{ _id: 1, x:11 }, { _id: 2, x:22 }, { _id: 3, x:33 }],
- params: ['x', {x: { $gt: 11 }}],
- result: [22, 33],
- expected: [{ _id: 1, x:11 }, { _id: 2, x:22 }, { _id: 3, x:33 }]
+ insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
+ params: ['x', {x: {$gt: 11}}],
+ result: [22, 33],
+ expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}]
});
// Simple distinct of field x filtered with maxTimeMS
distinctExecutor({
- insert: [{ _id: 1, x:11 }, { _id: 2, x:22 }, { _id: 3, x:33 }],
- params: ['x', {x: { $gt: 11 }}, {maxTimeMS:100000}],
- result: [22, 33],
- expected: [{ _id: 1, x:11 }, { _id: 2, x:22 }, { _id: 3, x:33 }]
+ insert: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}],
+ params: ['x', {x: {$gt: 11}}, {maxTimeMS: 100000}],
+ result: [22, 33],
+ expected: [{_id: 1, x: 11}, {_id: 2, x: 22}, {_id: 3, x: 33}]
});
//
@@ -686,20 +649,21 @@
coll.deleteMany({});
// Insert all of them
- coll.insertMany([{a:0, b:0}, {a:1, b:1}]);
+ coll.insertMany([{a: 0, b: 0}, {a: 1, b: 1}]);
// Simple projection
- var result = coll.find({}).sort({a:1}).limit(1).skip(1).projection({_id:0, a:1}).toArray();
- assert.docEq(result, [{a:1}]);
+ var result =
+ coll.find({}).sort({a: 1}).limit(1).skip(1).projection({_id: 0, a: 1}).toArray();
+ assert.docEq(result, [{a: 1}]);
// Simple tailable cursor
- var cursor = coll.find({}).sort({a:1}).tailable();
+ var cursor = coll.find({}).sort({a: 1}).tailable();
assert.eq(34, (cursor._options & ~DBQuery.Option.slaveOk));
- var cursor = coll.find({}).sort({a:1}).tailable(false);
+ var cursor = coll.find({}).sort({a: 1}).tailable(false);
assert.eq(2, (cursor._options & ~DBQuery.Option.slaveOk));
// Check modifiers
- var cursor = coll.find({}).modifiers({$hint:'a_1'});
+ var cursor = coll.find({}).modifiers({$hint: 'a_1'});
assert.eq('a_1', cursor._query['$hint']);
// allowPartialResults
@@ -720,50 +684,48 @@
coll.deleteMany({});
// Insert all of them
- coll.insertMany([{a:0, b:0}, {a:1, b:1}]);
+ coll.insertMany([{a: 0, b: 0}, {a: 1, b: 1}]);
// Simple aggregation with useCursor
- var result = coll.aggregate([{$match: {}}], {useCursor:true}).toArray();
+ var result = coll.aggregate([{$match: {}}], {useCursor: true}).toArray();
assert.eq(2, result.length);
// Simple aggregation with batchSize
- var result = coll.aggregate([{$match: {}}], {batchSize:2}).toArray();
+ var result = coll.aggregate([{$match: {}}], {batchSize: 2}).toArray();
assert.eq(2, result.length);
// Drop collection
coll.drop();
- coll.ensureIndex({a:1}, {unique:true});
+ coll.ensureIndex({a: 1}, {unique: true});
// Should throw duplicate key error
assert.throws(function() {
- coll.insertMany([{a:0, b:0}, {a:0, b:1}]);
+ coll.insertMany([{a: 0, b: 0}, {a: 0, b: 1}]);
});
- assert(coll.findOne({a:0, b:0}) != null);
+ assert(coll.findOne({a: 0, b: 0}) != null);
assert.throws(function() {
- coll.insertOne({a:0, b:0});
+ coll.insertOne({a: 0, b: 0});
});
assert.throws(function() {
- coll.updateOne({b:2}, {$set: {a:0}}, {upsert:true});
+ coll.updateOne({b: 2}, {$set: {a: 0}}, {upsert: true});
});
assert.throws(function() {
- coll.updateMany({b:2}, {$set: {a:0}}, {upsert:true});
+ coll.updateMany({b: 2}, {$set: {a: 0}}, {upsert: true});
});
assert.throws(function() {
- coll.deleteOne({$invalidFieldName:{a:1}});
+ coll.deleteOne({$invalidFieldName: {a: 1}});
});
assert.throws(function() {
- coll.deleteMany({$set:{a:1}});
+ coll.deleteMany({$set: {a: 1}});
});
assert.throws(function() {
- coll.bulkWrite([
- { insertOne: { document: { _id: 4, a: 0 } } }
- ]);
+ coll.bulkWrite([{insertOne: {document: {_id: 4, a: 0}}}]);
});
};
diff --git a/jstests/core/currentop.js b/jstests/core/currentop.js
index 34e96696481..ef948d415be 100644
--- a/jstests/core/currentop.js
+++ b/jstests/core/currentop.js
@@ -4,20 +4,21 @@ print("BEGIN currentop.js");
t = db.jstests_currentop;
t.drop();
-for(i=0;i<100;i++) {
- t.save({ "num": i });
+for (i = 0; i < 100; i++) {
+ t.save({"num": i});
}
print("count:" + t.count());
function ops(q) {
- printjson( db.currentOp().inprog );
+ printjson(db.currentOp().inprog);
return db.currentOp(q).inprog;
}
print("start shell");
-// sleep for a second for each (of 100) documents; can be killed in between documents & test should complete before 100 seconds
+// sleep for a second for each (of 100) documents; can be killed in between documents & test should
+// complete before 100 seconds
s1 = startParallelShell("db.jstests_currentop.count( { '$where': function() { sleep(1000); } } )");
print("sleep");
@@ -33,25 +34,26 @@ print();
// need to wait for read to start
print("wait have some ops");
-assert.soon( function(){
- return ops( { "locks.Collection": "r", "ns": "test.jstests_currentop" } ).length +
- ops({ "locks.Collection": "R", "ns": "test.jstests_currentop" }).length >= 1;
+assert.soon(function() {
+ return ops({"locks.Collection": "r", "ns": "test.jstests_currentop"}).length +
+ ops({"locks.Collection": "R", "ns": "test.jstests_currentop"}).length >=
+ 1;
}, "have_some_ops");
print("ok");
-
-s2 = startParallelShell( "db.jstests_currentop.update({ '$where': function() { sleep(150); } }," +
- " { '$inc': {num: 1} }, false, true );" );
+
+s2 = startParallelShell("db.jstests_currentop.update({ '$where': function() { sleep(150); } }," +
+ " { '$inc': {num: 1} }, false, true );");
o = [];
function f() {
- o = ops({ "ns": "test.jstests_currentop" });
+ o = ops({"ns": "test.jstests_currentop"});
printjson(o);
- var writes = ops({ "locks.Collection": "w", "ns": "test.jstests_currentop" }).length;
+ var writes = ops({"locks.Collection": "w", "ns": "test.jstests_currentop"}).length;
- var readops = ops({ "locks.Collection": "r", "ns": "test.jstests_currentop" });
+ var readops = ops({"locks.Collection": "r", "ns": "test.jstests_currentop"});
print("readops:");
printjson(readops);
var reads = readops.length;
@@ -63,10 +65,10 @@ function f() {
print("go");
-assert.soon( f, "f" );
+assert.soon(f, "f");
// avoid waiting for the operations to complete (if soon succeeded)
-for(var i in o) {
+for (var i in o) {
db.killOp(o[i].opid);
}
@@ -77,4 +79,4 @@ s1({checkExitSuccess: false});
s2({checkExitSuccess: false});
// don't want to pass if timeout killed the js function
-assert( ( new Date() ) - start < 30000 );
+assert((new Date()) - start < 30000);
diff --git a/jstests/core/currentop_predicate.js b/jstests/core/currentop_predicate.js
index ef0dd854464..98df3c9756e 100644
--- a/jstests/core/currentop_predicate.js
+++ b/jstests/core/currentop_predicate.js
@@ -1,12 +1,22 @@
// Tests the use of a match predicate with the currentOp command.
(function() {
// Test a $where predicate that matches the currentOp operation we are running.
- var res = db.adminCommand("currentOp", {$where: function() { return true; }});
+ var res = db.adminCommand("currentOp",
+ {
+ $where: function() {
+ return true;
+ }
+ });
assert.commandWorked(res);
assert.gt(res.inprog.length, 0, tojson(res));
// Test a $where predicate that matches no operations.
- res = db.adminCommand("currentOp", {$where: function() { return false; }});
+ res = db.adminCommand("currentOp",
+ {
+ $where: function() {
+ return false;
+ }
+ });
assert.commandWorked(res);
assert.eq(res.inprog.length, 0, tojson(res));
})();
diff --git a/jstests/core/cursor1.js b/jstests/core/cursor1.js
index 65c62e64f91..c98bec5044c 100644
--- a/jstests/core/cursor1.js
+++ b/jstests/core/cursor1.js
@@ -3,18 +3,18 @@ t = db.cursor1;
t.drop();
big = "";
-while ( big.length < 50000 )
+while (big.length < 50000)
big += "asdasdasdasdsdsdadsasdasdasD";
-num = Math.ceil( 10000000 / big.length );
+num = Math.ceil(10000000 / big.length);
-for ( var i=0; i<num; i++ ){
- t.save( { num : i , str : big } );
+for (var i = 0; i < num; i++) {
+ t.save({num: i, str: big});
}
-assert.eq( num , t.find().count() );
-assert.eq( num , t.find().itcount() );
+assert.eq(num, t.find().count());
+assert.eq(num, t.find().itcount());
-assert.eq( num / 2 , t.find().limit(num/2).itcount() );
+assert.eq(num / 2, t.find().limit(num / 2).itcount());
-t.drop(); // save some space
+t.drop(); // save some space
diff --git a/jstests/core/cursor2.js b/jstests/core/cursor2.js
index 2389a6a5d74..cf496db2c3f 100644
--- a/jstests/core/cursor2.js
+++ b/jstests/core/cursor2.js
@@ -1,24 +1,24 @@
/**
- * test to see if the count returned from the cursor is the number of objects that would be returned
+ * test to see if the count returned from the cursor is the number of objects that would be
+ *returned
*
* BUG 884
*/
function testCursorCountVsArrLen(dbConn) {
-
var coll = dbConn.ed_db_cursor2_ccvsal;
coll.drop();
- coll.save({ a: 1, b : 1});
- coll.save({ a: 2, b : 1});
- coll.save({ a: 3});
+ coll.save({a: 1, b: 1});
+ coll.save({a: 2, b: 1});
+ coll.save({a: 3});
- var fromCount = coll.find({}, {b:1}).count();
- var fromArrLen = coll.find({}, {b:1}).toArray().length;
+ var fromCount = coll.find({}, {b: 1}).count();
+ var fromArrLen = coll.find({}, {b: 1}).toArray().length;
- assert(fromCount == fromArrLen, "count from cursor [" + fromCount + "] != count from arrlen [" + fromArrLen + "]");
+ assert(fromCount == fromArrLen,
+ "count from cursor [" + fromCount + "] != count from arrlen [" + fromArrLen + "]");
}
-
testCursorCountVsArrLen(db);
diff --git a/jstests/core/cursor3.js b/jstests/core/cursor3.js
index fb1d95afb18..cc602d523f0 100644
--- a/jstests/core/cursor3.js
+++ b/jstests/core/cursor3.js
@@ -3,33 +3,34 @@
testNum = 1;
-function checkResults( expected, cursor , testNum ) {
- assert.eq( expected.length, cursor.count() , "testNum: " + testNum + " A : " + tojson( cursor.toArray() ) + " " + tojson( cursor.explain() ) );
- for( i = 0; i < expected.length; ++i ) {
- assert.eq( expected[ i ], cursor[ i ][ "a" ] , "testNum: " + testNum + " B" );
+function checkResults(expected, cursor, testNum) {
+ assert.eq(expected.length,
+ cursor.count(),
+ "testNum: " + testNum + " A : " + tojson(cursor.toArray()) + " " +
+ tojson(cursor.explain()));
+ for (i = 0; i < expected.length; ++i) {
+ assert.eq(expected[i], cursor[i]["a"], "testNum: " + testNum + " B");
}
}
t = db.cursor3;
t.drop();
-t.save( { a: 0 } );
-t.save( { a: 1 } );
-t.save( { a: 2 } );
+t.save({a: 0});
+t.save({a: 1});
+t.save({a: 2});
-t.ensureIndex( { a: 1 } );
+t.ensureIndex({a: 1});
+checkResults([1], t.find({a: 1}).sort({a: 1}).hint({a: 1}), testNum++);
+checkResults([1], t.find({a: 1}).sort({a: -1}).hint({a: 1}), testNum++);
+checkResults([1, 2], t.find({a: {$gt: 0}}).sort({a: 1}).hint({a: 1}), testNum++);
+checkResults([2, 1], t.find({a: {$gt: 0}}).sort({a: -1}).hint({a: 1}), testNum++);
+checkResults([1, 2], t.find({a: {$gte: 1}}).sort({a: 1}).hint({a: 1}), testNum++);
+checkResults([2, 1], t.find({a: {$gte: 1}}).sort({a: -1}).hint({a: 1}), testNum++);
-checkResults( [ 1 ], t.find( { a: 1 } ).sort( { a: 1 } ).hint( { a: 1 } ) , testNum++ );
-checkResults( [ 1 ], t.find( { a: 1 } ).sort( { a: -1 } ).hint( { a: 1 } ) , testNum++ );
-
-checkResults( [ 1, 2 ], t.find( { a: { $gt: 0 } } ).sort( { a: 1 } ).hint( { a: 1 } ) , testNum++ );
-checkResults( [ 2, 1 ], t.find( { a: { $gt: 0 } } ).sort( { a: -1 } ).hint( { a: 1 } ) , testNum++ );
-checkResults( [ 1, 2 ], t.find( { a: { $gte: 1 } } ).sort( { a: 1 } ).hint( { a: 1 } ) , testNum++ );
-checkResults( [ 2, 1 ], t.find( { a: { $gte: 1 } } ).sort( { a: -1 } ).hint( { a: 1 } ) , testNum++ );
-
-checkResults( [ 0, 1 ], t.find( { a: { $lt: 2 } } ).sort( { a: 1 } ).hint( { a: 1 } ) , testNum++ );
-checkResults( [ 1, 0 ], t.find( { a: { $lt: 2 } } ).sort( { a: -1 } ).hint( { a: 1 } ) , testNum++ );
-checkResults( [ 0, 1 ], t.find( { a: { $lte: 1 } } ).sort( { a: 1 } ).hint( { a: 1 } ) , testNum++ );
-checkResults( [ 1, 0 ], t.find( { a: { $lte: 1 } } ).sort( { a: -1 } ).hint( { a: 1 } ) , testNum++ );
+checkResults([0, 1], t.find({a: {$lt: 2}}).sort({a: 1}).hint({a: 1}), testNum++);
+checkResults([1, 0], t.find({a: {$lt: 2}}).sort({a: -1}).hint({a: 1}), testNum++);
+checkResults([0, 1], t.find({a: {$lte: 1}}).sort({a: 1}).hint({a: 1}), testNum++);
+checkResults([1, 0], t.find({a: {$lte: 1}}).sort({a: -1}).hint({a: 1}), testNum++);
diff --git a/jstests/core/cursor4.js b/jstests/core/cursor4.js
index b08a72f62e5..dd7875d0836 100644
--- a/jstests/core/cursor4.js
+++ b/jstests/core/cursor4.js
@@ -1,47 +1,53 @@
// Test inequality bounds with multi-field sorting
-function checkResults( expected, cursor ) {
- assert.eq( expected.length, cursor.count() );
- for( i = 0; i < expected.length; ++i ) {
- assert.eq( expected[ i ].a, cursor[ i ].a );
- assert.eq( expected[ i ].b, cursor[ i ].b );
+function checkResults(expected, cursor) {
+ assert.eq(expected.length, cursor.count());
+ for (i = 0; i < expected.length; ++i) {
+ assert.eq(expected[i].a, cursor[i].a);
+ assert.eq(expected[i].b, cursor[i].b);
}
}
-function testConstrainedFindMultiFieldSorting( db ) {
+function testConstrainedFindMultiFieldSorting(db) {
r = db.ed_db_cursor4_cfmfs;
r.drop();
- entries = [ { a: 0, b: 0 },
- { a: 0, b: 1 },
- { a: 1, b: 1 },
- { a: 1, b: 1 },
- { a: 2, b: 0 } ];
- for( i = 0; i < entries.length; ++i )
- r.save( entries[ i ] );
- r.ensureIndex( { a: 1, b: 1 } );
+ entries = [{a: 0, b: 0}, {a: 0, b: 1}, {a: 1, b: 1}, {a: 1, b: 1}, {a: 2, b: 0}];
+ for (i = 0; i < entries.length; ++i)
+ r.save(entries[i]);
+ r.ensureIndex({a: 1, b: 1});
reverseEntries = entries.slice();
reverseEntries.reverse();
- checkResults( entries.slice( 2, 4 ), r.find( { a: 1, b: 1 } ).sort( { a: 1, b: 1 } ).hint( { a: 1, b: 1 } ) );
- checkResults( entries.slice( 2, 4 ), r.find( { a: 1, b: 1 } ).sort( { a: -1, b: -1 } ).hint( { a: 1, b: 1 } ) );
-
- checkResults( entries.slice( 2, 5 ), r.find( { a: { $gt: 0 } } ).sort( { a: 1, b: 1 } ).hint( { a: 1, b: 1 } ) );
- checkResults( reverseEntries.slice( 0, 3 ), r.find( { a: { $gt: 0 } } ).sort( { a: -1, b: -1 } ).hint( { a: 1, b: 1 } ) );
- checkResults( entries.slice( 0, 4 ), r.find( { a: { $lt: 2 } } ).sort( { a: 1, b: 1 } ).hint( { a: 1, b: 1 } ) );
- checkResults( reverseEntries.slice( 1, 5 ), r.find( { a: { $lt: 2 } } ).sort( { a: -1, b: -1 } ).hint( { a: 1, b: 1 } ) );
-
- checkResults( entries.slice( 4, 5 ), r.find( { a: { $gt: 0 }, b: { $lt: 1 } } ).sort( { a: 1, b: 1 } ).hint( { a: 1, b: 1 } ) );
- checkResults( entries.slice( 2, 4 ), r.find( { a: { $gt: 0 }, b: { $gt: 0 } } ).sort( { a: 1, b: 1 } ).hint( { a: 1, b: 1 } ) );
-
- checkResults( reverseEntries.slice( 0, 1 ), r.find( { a: { $gt: 0 }, b: { $lt: 1 } } ).sort( { a: -1, b: -1 } ).hint( { a: 1, b: 1 } ) );
- checkResults( reverseEntries.slice( 1, 3 ), r.find( { a: { $gt: 0 }, b: { $gt: 0 } } ).sort( { a: -1, b: -1 } ).hint( { a: 1, b: 1 } ) );
-
- checkResults( entries.slice( 0, 1 ), r.find( { a: { $lt: 2 }, b: { $lt: 1 } } ).sort( { a: 1, b: 1 } ).hint( { a: 1, b: 1 } ) );
- checkResults( entries.slice( 1, 4 ), r.find( { a: { $lt: 2 }, b: { $gt: 0 } } ).sort( { a: 1, b: 1 } ).hint( { a: 1, b: 1 } ) );
-
- checkResults( reverseEntries.slice( 4, 5 ), r.find( { a: { $lt: 2 }, b: { $lt: 1 } } ).sort( { a: -1, b: -1 } ).hint( { a: 1, b: 1 } ) );
- checkResults( reverseEntries.slice( 1, 4 ), r.find( { a: { $lt: 2 }, b: { $gt: 0 } } ).sort( { a: -1, b: -1 } ).hint( { a: 1, b: 1 } ) );
+ checkResults(entries.slice(2, 4), r.find({a: 1, b: 1}).sort({a: 1, b: 1}).hint({a: 1, b: 1}));
+ checkResults(entries.slice(2, 4), r.find({a: 1, b: 1}).sort({a: -1, b: -1}).hint({a: 1, b: 1}));
+
+ checkResults(entries.slice(2, 5), r.find({a: {$gt: 0}}).sort({a: 1, b: 1}).hint({a: 1, b: 1}));
+ checkResults(reverseEntries.slice(0, 3),
+ r.find({a: {$gt: 0}}).sort({a: -1, b: -1}).hint({a: 1, b: 1}));
+ checkResults(entries.slice(0, 4), r.find({a: {$lt: 2}}).sort({a: 1, b: 1}).hint({a: 1, b: 1}));
+ checkResults(reverseEntries.slice(1, 5),
+ r.find({a: {$lt: 2}}).sort({a: -1, b: -1}).hint({a: 1, b: 1}));
+
+ checkResults(entries.slice(4, 5),
+ r.find({a: {$gt: 0}, b: {$lt: 1}}).sort({a: 1, b: 1}).hint({a: 1, b: 1}));
+ checkResults(entries.slice(2, 4),
+ r.find({a: {$gt: 0}, b: {$gt: 0}}).sort({a: 1, b: 1}).hint({a: 1, b: 1}));
+
+ checkResults(reverseEntries.slice(0, 1),
+ r.find({a: {$gt: 0}, b: {$lt: 1}}).sort({a: -1, b: -1}).hint({a: 1, b: 1}));
+ checkResults(reverseEntries.slice(1, 3),
+ r.find({a: {$gt: 0}, b: {$gt: 0}}).sort({a: -1, b: -1}).hint({a: 1, b: 1}));
+
+ checkResults(entries.slice(0, 1),
+ r.find({a: {$lt: 2}, b: {$lt: 1}}).sort({a: 1, b: 1}).hint({a: 1, b: 1}));
+ checkResults(entries.slice(1, 4),
+ r.find({a: {$lt: 2}, b: {$gt: 0}}).sort({a: 1, b: 1}).hint({a: 1, b: 1}));
+
+ checkResults(reverseEntries.slice(4, 5),
+ r.find({a: {$lt: 2}, b: {$lt: 1}}).sort({a: -1, b: -1}).hint({a: 1, b: 1}));
+ checkResults(reverseEntries.slice(1, 4),
+ r.find({a: {$lt: 2}, b: {$gt: 0}}).sort({a: -1, b: -1}).hint({a: 1, b: 1}));
}
-testConstrainedFindMultiFieldSorting( db );
+testConstrainedFindMultiFieldSorting(db);
diff --git a/jstests/core/cursor5.js b/jstests/core/cursor5.js
index 6434d2b3887..64158eaccec 100644
--- a/jstests/core/cursor5.js
+++ b/jstests/core/cursor5.js
@@ -1,36 +1,46 @@
// Test bounds with subobject indexes.
-function checkResults( expected, cursor ) {
- assert.eq( expected.length, cursor.count() );
- for( i = 0; i < expected.length; ++i ) {
- assert.eq( expected[ i ].a.b, cursor[ i ].a.b );
- assert.eq( expected[ i ].a.c, cursor[ i ].a.c );
- assert.eq( expected[ i ].a.d, cursor[ i ].a.d );
- assert.eq( expected[ i ].e, cursor[ i ].e );
+function checkResults(expected, cursor) {
+ assert.eq(expected.length, cursor.count());
+ for (i = 0; i < expected.length; ++i) {
+ assert.eq(expected[i].a.b, cursor[i].a.b);
+ assert.eq(expected[i].a.c, cursor[i].a.c);
+ assert.eq(expected[i].a.d, cursor[i].a.d);
+ assert.eq(expected[i].e, cursor[i].e);
}
}
-function testBoundsWithSubobjectIndexes( db ) {
+function testBoundsWithSubobjectIndexes(db) {
r = db.ed_db_cursor5_bwsi;
r.drop();
- z = [ { a: { b: 1, c: 2, d: 3 }, e: 4 },
- { a: { b: 1, c: 2, d: 3 }, e: 5 },
- { a: { b: 1, c: 2, d: 4 }, e: 4 },
- { a: { b: 1, c: 2, d: 4 }, e: 5 },
- { a: { b: 2, c: 2, d: 3 }, e: 4 },
- { a: { b: 2, c: 2, d: 3 }, e: 5 } ];
- for( i = 0; i < z.length; ++i )
- r.save( z[ i ] );
- idx = { "a.d": 1, a: 1, e: -1 };
- rIdx = { "a.d": -1, a: -1, e: 1 };
- r.ensureIndex( idx );
+ z = [
+ {a: {b: 1, c: 2, d: 3}, e: 4},
+ {a: {b: 1, c: 2, d: 3}, e: 5},
+ {a: {b: 1, c: 2, d: 4}, e: 4},
+ {a: {b: 1, c: 2, d: 4}, e: 5},
+ {a: {b: 2, c: 2, d: 3}, e: 4},
+ {a: {b: 2, c: 2, d: 3}, e: 5}
+ ];
+ for (i = 0; i < z.length; ++i)
+ r.save(z[i]);
+ idx = {
+ "a.d": 1,
+ a: 1,
+ e: -1
+ };
+ rIdx = {
+ "a.d": -1,
+ a: -1,
+ e: 1
+ };
+ r.ensureIndex(idx);
- checkResults( [ z[ 0 ], z[ 4 ], z[ 2 ] ], r.find( { e: 4 } ).sort( idx ).hint( idx ) );
- checkResults( [ z[ 1 ], z[ 3 ] ], r.find( { e: { $gt: 4 }, "a.b": 1 } ).sort( idx ).hint( idx ) );
+ checkResults([z[0], z[4], z[2]], r.find({e: 4}).sort(idx).hint(idx));
+ checkResults([z[1], z[3]], r.find({e: {$gt: 4}, "a.b": 1}).sort(idx).hint(idx));
- checkResults( [ z[ 2 ], z[ 4 ], z[ 0 ] ], r.find( { e: 4 } ).sort( rIdx ).hint( idx ) );
- checkResults( [ z[ 3 ], z[ 1 ] ], r.find( { e: { $gt: 4 }, "a.b": 1 } ).sort( rIdx ).hint( idx ) );
+ checkResults([z[2], z[4], z[0]], r.find({e: 4}).sort(rIdx).hint(idx));
+ checkResults([z[3], z[1]], r.find({e: {$gt: 4}, "a.b": 1}).sort(rIdx).hint(idx));
}
-testBoundsWithSubobjectIndexes( db );
+testBoundsWithSubobjectIndexes(db);
diff --git a/jstests/core/cursor6.js b/jstests/core/cursor6.js
index bb0af64cfc9..f793d37bfe5 100644
--- a/jstests/core/cursor6.js
+++ b/jstests/core/cursor6.js
@@ -1,66 +1,68 @@
// Test different directions for compound indexes
-function eq( one, two ) {
- assert.eq( one.a, two.a );
- assert.eq( one.b, two.b );
+function eq(one, two) {
+ assert.eq(one.a, two.a);
+ assert.eq(one.b, two.b);
}
-function check( indexed ) {
+function check(indexed) {
var hint;
- if ( indexed ) {
- hint = { a: 1, b: -1 };
+ if (indexed) {
+ hint = {
+ a: 1,
+ b: -1
+ };
} else {
- hint = { $natural: 1 };
+ hint = {
+ $natural: 1
+ };
}
- f = r.find().sort( { a: 1, b: 1 } ).hint( hint );
- eq( z[ 0 ], f[ 0 ] );
- eq( z[ 1 ], f[ 1 ] );
- eq( z[ 2 ], f[ 2 ] );
- eq( z[ 3 ], f[ 3 ] );
+ f = r.find().sort({a: 1, b: 1}).hint(hint);
+ eq(z[0], f[0]);
+ eq(z[1], f[1]);
+ eq(z[2], f[2]);
+ eq(z[3], f[3]);
- f = r.find().sort( { a: 1, b: -1 } ).hint( hint );
- eq( z[ 1 ], f[ 0 ] );
- eq( z[ 0 ], f[ 1 ] );
- eq( z[ 3 ], f[ 2 ] );
- eq( z[ 2 ], f[ 3 ] );
+ f = r.find().sort({a: 1, b: -1}).hint(hint);
+ eq(z[1], f[0]);
+ eq(z[0], f[1]);
+ eq(z[3], f[2]);
+ eq(z[2], f[3]);
- f = r.find().sort( { a: -1, b: 1 } ).hint( hint );
- eq( z[ 2 ], f[ 0 ] );
- eq( z[ 3 ], f[ 1 ] );
- eq( z[ 0 ], f[ 2 ] );
- eq( z[ 1 ], f[ 3 ] );
+ f = r.find().sort({a: -1, b: 1}).hint(hint);
+ eq(z[2], f[0]);
+ eq(z[3], f[1]);
+ eq(z[0], f[2]);
+ eq(z[1], f[3]);
- f = r.find( { a: { $gte: 2 } } ).sort( { a: 1, b: -1 } ).hint( hint );
- eq( z[ 3 ], f[ 0 ] );
- eq( z[ 2 ], f[ 1 ] );
+ f = r.find({a: {$gte: 2}}).sort({a: 1, b: -1}).hint(hint);
+ eq(z[3], f[0]);
+ eq(z[2], f[1]);
- f = r.find( { a: { $gte: 2 } } ).sort( { a: -1, b: 1 } ).hint( hint );
- eq( z[ 2 ], f[ 0 ] );
- eq( z[ 3 ], f[ 1 ] );
+ f = r.find({a: {$gte: 2}}).sort({a: -1, b: 1}).hint(hint);
+ eq(z[2], f[0]);
+ eq(z[3], f[1]);
- f = r.find( { a: { $gte: 2 } } ).sort( { a: 1, b: 1 } ).hint( hint );
- eq( z[ 2 ], f[ 0 ] );
- eq( z[ 3 ], f[ 1 ] );
+ f = r.find({a: {$gte: 2}}).sort({a: 1, b: 1}).hint(hint);
+ eq(z[2], f[0]);
+ eq(z[3], f[1]);
- f = r.find().sort( { a: -1, b: -1 } ).hint( hint );
- eq( z[ 3 ], f[ 0 ] );
- eq( z[ 2 ], f[ 1 ] );
- eq( z[ 1 ], f[ 2 ] );
- eq( z[ 0 ], f[ 3 ] );
+ f = r.find().sort({a: -1, b: -1}).hint(hint);
+ eq(z[3], f[0]);
+ eq(z[2], f[1]);
+ eq(z[1], f[2]);
+ eq(z[0], f[3]);
}
r = db.ed_db_cursor6;
r.drop();
-z = [ { a: 1, b: 1 },
- { a: 1, b: 2 },
- { a: 2, b: 1 },
- { a: 2, b: 2 } ];
-for( i = 0; i < z.length; ++i )
- r.save( z[ i ] );
+z = [{a: 1, b: 1}, {a: 1, b: 2}, {a: 2, b: 1}, {a: 2, b: 2}];
+for (i = 0; i < z.length; ++i)
+ r.save(z[i]);
-r.ensureIndex( { a: 1, b: -1 } );
+r.ensureIndex({a: 1, b: -1});
-check( false );
-check( true );
+check(false);
+check(true);
diff --git a/jstests/core/cursor7.js b/jstests/core/cursor7.js
index 336beafaa90..6e77a144ba3 100644
--- a/jstests/core/cursor7.js
+++ b/jstests/core/cursor7.js
@@ -1,40 +1,67 @@
// Test bounds with multiple inequalities and sorting.
-function checkResults( expected, cursor ) {
- assert.eq( expected.length, cursor.count() );
- for( i = 0; i < expected.length; ++i ) {
- assert.eq( expected[ i ].a, cursor[ i ].a );
- assert.eq( expected[ i ].b, cursor[ i ].b );
+function checkResults(expected, cursor) {
+ assert.eq(expected.length, cursor.count());
+ for (i = 0; i < expected.length; ++i) {
+ assert.eq(expected[i].a, cursor[i].a);
+ assert.eq(expected[i].b, cursor[i].b);
}
}
-function testMultipleInequalities( db ) {
+function testMultipleInequalities(db) {
r = db.ed_db_cursor_mi;
r.drop();
- z = [ { a: 1, b: 2 },
- { a: 3, b: 4 },
- { a: 5, b: 6 },
- { a: 7, b: 8 } ];
- for( i = 0; i < z.length; ++i )
- r.save( z[ i ] );
- idx = { a: 1, b: 1 };
- rIdx = { a: -1, b: -1 };
- r.ensureIndex( idx );
-
- checkResults( [ z[ 2 ], z[ 3 ] ], r.find( { a: { $gt: 3 } } ).sort( idx ).hint( idx ) );
- checkResults( [ z[ 2 ] ], r.find( { a: { $gt: 3, $lt: 7 } } ).sort( idx ).hint( idx ) );
- checkResults( [ z[ 2 ] ], r.find( { a: { $gt: 3, $lt: 7, $lte: 5 } } ).sort( idx ).hint( idx ) );
-
- checkResults( [ z[ 3 ], z[ 2 ] ], r.find( { a: { $gt: 3 } } ).sort( rIdx ).hint( idx ) );
- checkResults( [ z[ 2 ] ], r.find( { a: { $gt: 3, $lt: 7 } } ).sort( rIdx ).hint( idx ) );
- checkResults( [ z[ 2 ] ], r.find( { a: { $gt: 3, $lt: 7, $lte: 5 } } ).sort( rIdx ).hint( idx ) );
-
- checkResults( [ z[ 1 ], z[ 2 ] ], r.find( { a: { $gt: 1, $lt: 7, $gte: 3, $lte: 5 }, b: { $gt: 2, $lt: 8, $gte: 4, $lte: 6 } } ).sort( idx ).hint( idx ) );
- checkResults( [ z[ 2 ], z[ 1 ] ], r.find( { a: { $gt: 1, $lt: 7, $gte: 3, $lte: 5 }, b: { $gt: 2, $lt: 8, $gte: 4, $lte: 6 } } ).sort( rIdx ).hint( idx ) );
-
- checkResults( [ z[ 1 ], z[ 2 ] ], r.find( { a: { $gte: 1, $lte: 7, $gt: 2, $lt: 6 }, b: { $gte: 2, $lte: 8, $gt: 3, $lt: 7 } } ).sort( idx ).hint( idx ) );
- checkResults( [ z[ 2 ], z[ 1 ] ], r.find( { a: { $gte: 1, $lte: 7, $gt: 2, $lt: 6 }, b: { $gte: 2, $lte: 8, $gt: 3, $lt: 7 } } ).sort( rIdx ).hint( idx ) );
+ z = [{a: 1, b: 2}, {a: 3, b: 4}, {a: 5, b: 6}, {a: 7, b: 8}];
+ for (i = 0; i < z.length; ++i)
+ r.save(z[i]);
+ idx = {
+ a: 1,
+ b: 1
+ };
+ rIdx = {
+ a: -1,
+ b: -1
+ };
+ r.ensureIndex(idx);
+
+ checkResults([z[2], z[3]], r.find({a: {$gt: 3}}).sort(idx).hint(idx));
+ checkResults([z[2]], r.find({a: {$gt: 3, $lt: 7}}).sort(idx).hint(idx));
+ checkResults([z[2]], r.find({a: {$gt: 3, $lt: 7, $lte: 5}}).sort(idx).hint(idx));
+
+ checkResults([z[3], z[2]], r.find({a: {$gt: 3}}).sort(rIdx).hint(idx));
+ checkResults([z[2]], r.find({a: {$gt: 3, $lt: 7}}).sort(rIdx).hint(idx));
+ checkResults([z[2]], r.find({a: {$gt: 3, $lt: 7, $lte: 5}}).sort(rIdx).hint(idx));
+
+ checkResults([z[1], z[2]],
+ r.find({
+ a: {$gt: 1, $lt: 7, $gte: 3, $lte: 5},
+ b: {$gt: 2, $lt: 8, $gte: 4, $lte: 6}
+ })
+ .sort(idx)
+ .hint(idx));
+ checkResults([z[2], z[1]],
+ r.find({
+ a: {$gt: 1, $lt: 7, $gte: 3, $lte: 5},
+ b: {$gt: 2, $lt: 8, $gte: 4, $lte: 6}
+ })
+ .sort(rIdx)
+ .hint(idx));
+
+ checkResults([z[1], z[2]],
+ r.find({
+ a: {$gte: 1, $lte: 7, $gt: 2, $lt: 6},
+ b: {$gte: 2, $lte: 8, $gt: 3, $lt: 7}
+ })
+ .sort(idx)
+ .hint(idx));
+ checkResults([z[2], z[1]],
+ r.find({
+ a: {$gte: 1, $lte: 7, $gt: 2, $lt: 6},
+ b: {$gte: 2, $lte: 8, $gt: 3, $lt: 7}
+ })
+ .sort(rIdx)
+ .hint(idx));
}
-testMultipleInequalities( db );
+testMultipleInequalities(db);
diff --git a/jstests/core/cursora.js b/jstests/core/cursora.js
index 0a69c4e5592..dfd9e28f281 100644
--- a/jstests/core/cursora.js
+++ b/jstests/core/cursora.js
@@ -1,21 +1,21 @@
t = db.cursora;
-function run( n , atomic ){
- if( !isNumber(n) ) {
- print("n:");
- printjson(n);
- assert(isNumber(n), "cursora.js isNumber");
+function run(n, atomic) {
+ if (!isNumber(n)) {
+ print("n:");
+ printjson(n);
+ assert(isNumber(n), "cursora.js isNumber");
}
t.drop();
var bulk = t.initializeUnorderedBulkOp();
- for ( i=0; i<n; i++ )
- bulk.insert( { _id : i } );
+ for (i = 0; i < n; i++)
+ bulk.insert({_id: i});
assert.writeOK(bulk.execute());
- print("cursora.js startParallelShell n:"+n+" atomic:"+atomic);
- join = startParallelShell( "sleep(50);" +
- "db.cursora.remove({" + ( atomic ? "$atomic:true" : "" ) + "});" );
+ print("cursora.js startParallelShell n:" + n + " atomic:" + atomic);
+ join = startParallelShell("sleep(50);" + "db.cursora.remove({" +
+ (atomic ? "$atomic:true" : "") + "});");
var start = null;
var ex = null;
@@ -23,14 +23,16 @@ function run( n , atomic ){
var end = null;
try {
start = new Date();
- num = t.find(function () {
+ num = t.find(function() {
num = 2;
- for (var x = 0; x < 1000; x++) num += 2;
+ for (var x = 0; x < 1000; x++)
+ num += 2;
return num > 0;
- }).sort({ _id: -1 }).itcount();
+ })
+ .sort({_id: -1})
+ .itcount();
end = new Date();
- }
- catch (e) {
+ } catch (e) {
print("cursora.js FAIL " + e);
join();
throw e;
@@ -38,15 +40,16 @@ function run( n , atomic ){
join();
- //print( "cursora.js num: " + num + " time:" + ( end.getTime() - start.getTime() ) )
- assert.eq( 0 , t.count() , "after remove: " + tojson( ex ) );
- // assert.lt( 0 , ex.nYields , "not enough yields : " + tojson( ex ) ); // TODO make this more reliable so cen re-enable assert
- if ( n == num )
- print( "cursora.js warning: shouldn't have counted all n: " + n + " num: " + num );
+ // print( "cursora.js num: " + num + " time:" + ( end.getTime() - start.getTime() ) )
+ assert.eq(0, t.count(), "after remove: " + tojson(ex));
+ // assert.lt( 0 , ex.nYields , "not enough yields : " + tojson( ex ) ); // TODO make this more
+ // reliable so cen re-enable assert
+ if (n == num)
+ print("cursora.js warning: shouldn't have counted all n: " + n + " num: " + num);
}
-run( 1500 );
-run( 5000 );
-run( 1500 , true );
-run( 5000 , true );
+run(1500);
+run(5000);
+run(1500, true);
+run(5000, true);
print("cursora.js SUCCESS");
diff --git a/jstests/core/cursorb.js b/jstests/core/cursorb.js
index 70f49c50454..62c6db802dd 100644
--- a/jstests/core/cursorb.js
+++ b/jstests/core/cursorb.js
@@ -5,13 +5,14 @@ t = db.jstests_cursorb;
t.drop();
// Exhaust a client cursor in get more.
-for( i = 0; i < 200; ++i ) {
- t.save( { a:i } );
+for (i = 0; i < 200; ++i) {
+ t.save({a: i});
}
t.find().itcount();
// Check that the 'cursor not found in map -1' message is not printed. This message indicates an
// attempt to look up a cursor with an invalid id and should never appear in the log.
-log = db.adminCommand( { getLog:'global' } ).log;
-log.forEach( function( line ) { assert( !line.match( /cursor not found in map -1 / ),
- 'Cursor map lookup with id -1.' ); } );
+log = db.adminCommand({getLog: 'global'}).log;
+log.forEach(function(line) {
+ assert(!line.match(/cursor not found in map -1 /), 'Cursor map lookup with id -1.');
+});
diff --git a/jstests/core/datasize2.js b/jstests/core/datasize2.js
index d83894bc189..6cb5b9b10d9 100644
--- a/jstests/core/datasize2.js
+++ b/jstests/core/datasize2.js
@@ -4,30 +4,36 @@
//
(function() {
-"use strict";
+ "use strict";
-var coll = db.foo;
-var adminDB = db.getSiblingDB('admin');
-coll.drop();
+ var coll = db.foo;
+ var adminDB = db.getSiblingDB('admin');
+ coll.drop();
-var N = 1000;
-for (var i = 0; i < N; i++) {
- coll.insert({_id: i, s: "asdasdasdasdasdasdasd"});
-}
+ var N = 1000;
+ for (var i = 0; i < N; i++) {
+ coll.insert({_id: i, s: "asdasdasdasdasdasdasd"});
+ }
-var dataSizeCommand = { "dataSize": "test.foo",
- "keyPattern": { "_id" : 1 },
- "min": { "_id" : 0 },
- "max": { "_id" : N } };
+ var dataSizeCommand = {
+ "dataSize": "test.foo",
+ "keyPattern": {"_id": 1},
+ "min": {"_id": 0},
+ "max": {"_id": N}
+ };
-assert.eq(N, db.runCommand(dataSizeCommand).numObjects,
- "dataSize command on 'test.foo' failed when called on the 'test' DB.");
-assert.eq(N, adminDB.runCommand(dataSizeCommand).numObjects,
- "dataSize command on 'test.foo' failed when called on the 'admin' DB.");
+ assert.eq(N,
+ db.runCommand(dataSizeCommand).numObjects,
+ "dataSize command on 'test.foo' failed when called on the 'test' DB.");
+ assert.eq(N,
+ adminDB.runCommand(dataSizeCommand).numObjects,
+ "dataSize command on 'test.foo' failed when called on the 'admin' DB.");
-dataSizeCommand.maxObjects = 100;
-assert.eq(101, db.runCommand(dataSizeCommand).numObjects,
- "dataSize command with max number of objects set failed on 'test' DB");
-assert.eq(101, db.runCommand(dataSizeCommand).numObjects,
- "dataSize command with max number of objects set failed on 'admin' DB");
+ dataSizeCommand.maxObjects = 100;
+ assert.eq(101,
+ db.runCommand(dataSizeCommand).numObjects,
+ "dataSize command with max number of objects set failed on 'test' DB");
+ assert.eq(101,
+ db.runCommand(dataSizeCommand).numObjects,
+ "dataSize command with max number of objects set failed on 'admin' DB");
})();
diff --git a/jstests/core/date1.js b/jstests/core/date1.js
index 7e893ea1355..65449c662b5 100644
--- a/jstests/core/date1.js
+++ b/jstests/core/date1.js
@@ -1,17 +1,15 @@
t = db.date1;
-
-function go( d , msg ){
+function go(d, msg) {
t.drop();
- t.save({ a: 1, d: d });
-// printjson(d);
-// printjson(t.findOne().d);
- assert.eq( d , t.findOne().d , msg );
+ t.save({a: 1, d: d});
+ // printjson(d);
+ // printjson(t.findOne().d);
+ assert.eq(d, t.findOne().d, msg);
}
-go( new Date() , "A" );
-go( new Date( 1 ) , "B");
-go( new Date( 0 ) , "C (old spidermonkey lib fails this test)");
+go(new Date(), "A");
+go(new Date(1), "B");
+go(new Date(0), "C (old spidermonkey lib fails this test)");
go(new Date(-10), "neg");
-
diff --git a/jstests/core/date2.js b/jstests/core/date2.js
index 9e3af9b7555..2980f10bf7a 100644
--- a/jstests/core/date2.js
+++ b/jstests/core/date2.js
@@ -3,9 +3,11 @@
t = db.jstests_date2;
t.drop();
-t.ensureIndex( {a:1} );
+t.ensureIndex({a: 1});
-var obj = {a:new Timestamp(0, 1)}; // in old versions this was == to new Date(1)
-t.save( obj );
-assert.eq( 0, t.find( {a:{$gt:new Date(1)}} ).itcount() );
-assert.eq( 1, t.find(obj).itcount() );
+var obj = {
+ a: new Timestamp(0, 1)
+}; // in old versions this was == to new Date(1)
+t.save(obj);
+assert.eq(0, t.find({a: {$gt: new Date(1)}}).itcount());
+assert.eq(1, t.find(obj).itcount());
diff --git a/jstests/core/date3.js b/jstests/core/date3.js
index 8329cca0dfe..9f7204bb52a 100644
--- a/jstests/core/date3.js
+++ b/jstests/core/date3.js
@@ -7,25 +7,25 @@ d1 = new Date(-1000);
dz = new Date(0);
d2 = new Date(1000);
-t.save( {x: 3, d: dz} );
-t.save( {x: 2, d: d2} );
-t.save( {x: 1, d: d1} );
+t.save({x: 3, d: dz});
+t.save({x: 2, d: d2});
+t.save({x: 1, d: d1});
-function test () {
- var list = t.find( {d: {$lt: dz}} );
- assert.eq ( 1, list.size() );
- assert.eq ( 1, list[0].x );
- assert.eq ( d1, list[0].d );
- var list = t.find( {d: {$gt: dz}} );
- assert.eq ( 1, list.size() );
- assert.eq ( 2, list[0].x );
- var list = t.find().sort( {d:1} );
- assert.eq ( 3, list.size() );
- assert.eq ( 1, list[0].x );
- assert.eq ( 3, list[1].x );
- assert.eq ( 2, list[2].x );
+function test() {
+ var list = t.find({d: {$lt: dz}});
+ assert.eq(1, list.size());
+ assert.eq(1, list[0].x);
+ assert.eq(d1, list[0].d);
+ var list = t.find({d: {$gt: dz}});
+ assert.eq(1, list.size());
+ assert.eq(2, list[0].x);
+ var list = t.find().sort({d: 1});
+ assert.eq(3, list.size());
+ assert.eq(1, list[0].x);
+ assert.eq(3, list[1].x);
+ assert.eq(2, list[2].x);
}
test();
-t.ensureIndex( {d: 1} );
+t.ensureIndex({d: 1});
test();
diff --git a/jstests/core/db.js b/jstests/core/db.js
index 66a0bd73ede..cc698f483c7 100644
--- a/jstests/core/db.js
+++ b/jstests/core/db.js
@@ -1,11 +1,14 @@
function testInvalidDBNameThrowsExceptionWithConstructor() {
- assert.throws( function() { return new DB( null, "/\\" ); } );
+ assert.throws(function() {
+ return new DB(null, "/\\");
+ });
}
function testInvalidDBNameThrowsExceptionWithSibling() {
- assert.throws( function() { return db.getSiblingDB( "/\\" ); } );
+ assert.throws(function() {
+ return db.getSiblingDB("/\\");
+ });
}
testInvalidDBNameThrowsExceptionWithConstructor();
testInvalidDBNameThrowsExceptionWithSibling();
-
diff --git a/jstests/core/dbadmin.js b/jstests/core/dbadmin.js
index 94ae45d34c1..43af2df057e 100644
--- a/jstests/core/dbadmin.js
+++ b/jstests/core/dbadmin.js
@@ -1,33 +1,33 @@
load('jstests/aggregation/extras/utils.js');
(function() {
-'use strict';
-
-var t = db.dbadmin;
-t.save( { x : 1 } );
-t.save( { x : 1 } );
-
-var res = db._adminCommand( "listDatabases" );
-assert( res.databases && res.databases.length > 0 , "listDatabases 1 " + tojson(res) );
-
-var now = new Date();
-var x = db._adminCommand( "ismaster" );
-assert( x.ismaster , "ismaster failed: " + tojson( x ) );
-assert( x.localTime, "ismaster didn't include time: " + tojson(x));
-
-var localTimeSkew = x.localTime - now;
-if ( localTimeSkew >= 50 ) {
- print( "Warning: localTimeSkew " + localTimeSkew + " > 50ms." );
-}
-assert.lt( localTimeSkew, 500, "isMaster.localTime" );
-
-var before = db.runCommand( "serverStatus" );
-print(before.uptimeEstimate);
-sleep( 5000 );
-
-var after = db.runCommand( "serverStatus" );
-print(after.uptimeEstimate);
-assert.gte( after.uptimeEstimate, before.uptimeEstimate,
- "uptime estimate should be non-decreasing" );
+ 'use strict';
+
+ var t = db.dbadmin;
+ t.save({x: 1});
+ t.save({x: 1});
+
+ var res = db._adminCommand("listDatabases");
+ assert(res.databases && res.databases.length > 0, "listDatabases 1 " + tojson(res));
+
+ var now = new Date();
+ var x = db._adminCommand("ismaster");
+ assert(x.ismaster, "ismaster failed: " + tojson(x));
+ assert(x.localTime, "ismaster didn't include time: " + tojson(x));
+
+ var localTimeSkew = x.localTime - now;
+ if (localTimeSkew >= 50) {
+ print("Warning: localTimeSkew " + localTimeSkew + " > 50ms.");
+ }
+ assert.lt(localTimeSkew, 500, "isMaster.localTime");
+
+ var before = db.runCommand("serverStatus");
+ print(before.uptimeEstimate);
+ sleep(5000);
+
+ var after = db.runCommand("serverStatus");
+ print(after.uptimeEstimate);
+ assert.gte(
+ after.uptimeEstimate, before.uptimeEstimate, "uptime estimate should be non-decreasing");
})();
diff --git a/jstests/core/dbcase.js b/jstests/core/dbcase.js
index 5663046443f..033608a3f6a 100644
--- a/jstests/core/dbcase.js
+++ b/jstests/core/dbcase.js
@@ -1,26 +1,26 @@
// Check db name duplication constraint SERVER-2111
-a = db.getSisterDB( "dbcasetest_dbnamea" );
-b = db.getSisterDB( "dbcasetest_dbnameA" );
+a = db.getSisterDB("dbcasetest_dbnamea");
+b = db.getSisterDB("dbcasetest_dbnameA");
a.dropDatabase();
b.dropDatabase();
-assert.writeOK( a.foo.save( { x : 1 } ));
+assert.writeOK(a.foo.save({x: 1}));
-res = b.foo.save( { x : 1 } );
-assert.writeError( res );
+res = b.foo.save({x: 1});
+assert.writeError(res);
-assert.neq( -1, db.getMongo().getDBNames().indexOf( a.getName() ) );
-assert.eq( -1, db.getMongo().getDBNames().indexOf( b.getName() ) );
-printjson( db.getMongo().getDBs().databases );
+assert.neq(-1, db.getMongo().getDBNames().indexOf(a.getName()));
+assert.eq(-1, db.getMongo().getDBNames().indexOf(b.getName()));
+printjson(db.getMongo().getDBs().databases);
a.dropDatabase();
b.dropDatabase();
-ai = db.getMongo().getDBNames().indexOf( a.getName() );
-bi = db.getMongo().getDBNames().indexOf( b.getName() );
+ai = db.getMongo().getDBNames().indexOf(a.getName());
+bi = db.getMongo().getDBNames().indexOf(b.getName());
// One of these dbs may exist if there is a slave active, but they must
// not both exist.
-assert( ai == -1 || bi == -1 );
-printjson( db.getMongo().getDBs().databases );
+assert(ai == -1 || bi == -1);
+printjson(db.getMongo().getDBs().databases);
diff --git a/jstests/core/dbcase2.js b/jstests/core/dbcase2.js
index 37c037001d8..2c3517e5e63 100644
--- a/jstests/core/dbcase2.js
+++ b/jstests/core/dbcase2.js
@@ -1,7 +1,8 @@
-// SERVER-2111 Check that an in memory db name will block creation of a db with a similar but differently cased name.
+// SERVER-2111 Check that an in memory db name will block creation of a db with a similar but
+// differently cased name.
-var dbLowerCase = db.getSisterDB( "dbcase2test_dbnamea" );
-var dbUpperCase = db.getSisterDB( "dbcase2test_dbnameA" );
+var dbLowerCase = db.getSisterDB("dbcase2test_dbnamea");
+var dbUpperCase = db.getSisterDB("dbcase2test_dbnameA");
var resultLower = dbLowerCase.c.insert({});
assert.eq(1, resultLower.nInserted);
@@ -10,4 +11,4 @@ var resultUpper = dbUpperCase.c.insert({});
assert.eq(0, resultUpper.nInserted);
assert.writeError(resultUpper);
-assert.eq( -1, db.getMongo().getDBNames().indexOf( "dbcase2test_dbnameA" ) );
+assert.eq(-1, db.getMongo().getDBNames().indexOf("dbcase2test_dbnameA"));
diff --git a/jstests/core/dbhash.js b/jstests/core/dbhash.js
index 7fea4b4d50c..cc202656fbe 100644
--- a/jstests/core/dbhash.js
+++ b/jstests/core/dbhash.js
@@ -6,53 +6,54 @@ a.drop();
b.drop();
// debug SERVER-761
-db.getCollectionNames().forEach( function( x ) {
- v = db[ x ].validate();
- if ( !v.valid ) {
- print( x );
- printjson( v );
- }
- } );
-
-function dbhash( mydb ) {
- var ret = mydb.runCommand( "dbhash" );
- assert.commandWorked( ret, "dbhash failure" );
+db.getCollectionNames().forEach(function(x) {
+ v = db[x].validate();
+ if (!v.valid) {
+ print(x);
+ printjson(v);
+ }
+});
+
+function dbhash(mydb) {
+ var ret = mydb.runCommand("dbhash");
+ assert.commandWorked(ret, "dbhash failure");
return ret;
}
-function gh( coll , mydb ){
- if ( ! mydb ) mydb = db;
- var x = dbhash( mydb ).collections[coll.getName()];
- if ( ! x )
+function gh(coll, mydb) {
+ if (!mydb)
+ mydb = db;
+ var x = dbhash(mydb).collections[coll.getName()];
+ if (!x)
return "";
return x;
}
-function dbh( mydb ){
- return dbhash( mydb ).md5;
+function dbh(mydb) {
+ return dbhash(mydb).md5;
}
-assert.eq( gh( a ) , gh( b ) , "A1" );
+assert.eq(gh(a), gh(b), "A1");
-a.insert( { _id : 5 } );
-assert.neq( gh( a ) , gh( b ) , "A2" );
+a.insert({_id: 5});
+assert.neq(gh(a), gh(b), "A2");
-b.insert( { _id : 5 } );
-assert.eq( gh( a ) , gh( b ) , "A3" );
+b.insert({_id: 5});
+assert.eq(gh(a), gh(b), "A3");
-dba = db.getSisterDB( "dbhasha" );
-dbb = db.getSisterDB( "dbhashb" );
+dba = db.getSisterDB("dbhasha");
+dbb = db.getSisterDB("dbhashb");
dba.dropDatabase();
dbb.dropDatabase();
-assert.eq( gh( dba.foo , dba ) , gh( dbb.foo , dbb ) , "B1" );
-assert.eq( dbh( dba ) , dbh( dbb ) , "C1" );
+assert.eq(gh(dba.foo, dba), gh(dbb.foo, dbb), "B1");
+assert.eq(dbh(dba), dbh(dbb), "C1");
-dba.foo.insert( { _id : 5 } );
-assert.neq( gh( dba.foo , dba ) , gh( dbb.foo , dbb ) , "B2" );
-assert.neq( dbh( dba ) , dbh( dbb ) , "C2" );
+dba.foo.insert({_id: 5});
+assert.neq(gh(dba.foo, dba), gh(dbb.foo, dbb), "B2");
+assert.neq(dbh(dba), dbh(dbb), "C2");
-dbb.foo.insert( { _id : 5 } );
-assert.eq( gh( dba.foo , dba ) , gh( dbb.foo , dbb ) , "B3" );
-assert.eq( dbh( dba ) , dbh( dbb ) , "C3" );
+dbb.foo.insert({_id: 5});
+assert.eq(gh(dba.foo, dba), gh(dbb.foo, dbb), "B3");
+assert.eq(dbh(dba), dbh(dbb), "C3");
diff --git a/jstests/core/dbhash2.js b/jstests/core/dbhash2.js
index 74cc1fb8422..8e779ca7806 100644
--- a/jstests/core/dbhash2.js
+++ b/jstests/core/dbhash2.js
@@ -1,23 +1,21 @@
-mydb = db.getSisterDB( "config" );
+mydb = db.getSisterDB("config");
t = mydb.foo;
t.drop();
-t.insert( { x : 1 } );
-res1 = mydb.runCommand( "dbhash" );
-assert( res1.fromCache.indexOf( "config.foo" ) == -1 );
+t.insert({x: 1});
+res1 = mydb.runCommand("dbhash");
+assert(res1.fromCache.indexOf("config.foo") == -1);
-res2 = mydb.runCommand( "dbhash" );
-assert( res2.fromCache.indexOf( "config.foo" ) >= 0 );
-assert.eq( res1.collections.foo, res2.collections.foo );
+res2 = mydb.runCommand("dbhash");
+assert(res2.fromCache.indexOf("config.foo") >= 0);
+assert.eq(res1.collections.foo, res2.collections.foo);
-t.insert( { x : 2 } );
-res3 = mydb.runCommand( "dbhash" );
-assert( res3.fromCache.indexOf( "config.foo" ) < 0 );
-assert.neq( res1.collections.foo, res3.collections.foo );
+t.insert({x: 2});
+res3 = mydb.runCommand("dbhash");
+assert(res3.fromCache.indexOf("config.foo") < 0);
+assert.neq(res1.collections.foo, res3.collections.foo);
// Validate dbHash with an empty database does not trigger an fassert/invariant
-assert.commandFailed(db.runCommand( {"dbhash" : "" }));
-
-
+assert.commandFailed(db.runCommand({"dbhash": ""}));
diff --git a/jstests/core/dbref1.js b/jstests/core/dbref1.js
index 4a827662c1a..b5bb06f230d 100644
--- a/jstests/core/dbref1.js
+++ b/jstests/core/dbref1.js
@@ -5,6 +5,6 @@ b = db.dbref1b;
a.drop();
b.drop();
-a.save( { name : "eliot" } );
-b.save( { num : 1 , link : new DBPointer( "dbref1a" , a.findOne()._id ) } );
-assert.eq( "eliot" , b.findOne().link.fetch().name , "A" );
+a.save({name: "eliot"});
+b.save({num: 1, link: new DBPointer("dbref1a", a.findOne()._id)});
+assert.eq("eliot", b.findOne().link.fetch().name, "A");
diff --git a/jstests/core/dbref2.js b/jstests/core/dbref2.js
index d1b4870322d..9f3cb4e2ca4 100644
--- a/jstests/core/dbref2.js
+++ b/jstests/core/dbref2.js
@@ -7,14 +7,14 @@ a.drop();
b.drop();
c.drop();
-a.save( { name : "eliot" } );
-b.save( { num : 1 , link : new DBRef( "dbref2a" , a.findOne()._id ) } );
-c.save( { num : 1 , links : [ new DBRef( "dbref2a" , a.findOne()._id ) ] } );
+a.save({name: "eliot"});
+b.save({num: 1, link: new DBRef("dbref2a", a.findOne()._id)});
+c.save({num: 1, links: [new DBRef("dbref2a", a.findOne()._id)]});
-assert.eq( "eliot" , b.findOne().link.fetch().name , "A" );
-assert.neq( "el" , b.findOne().link.fetch().name , "B" );
+assert.eq("eliot", b.findOne().link.fetch().name, "A");
+assert.neq("el", b.findOne().link.fetch().name, "B");
// $elemMatch value
-var doc = c.findOne( { links: { $elemMatch: { $ref : "dbref2a", $id : a.findOne()._id } } } );
-assert.eq( "eliot" , doc.links[0].fetch().name , "C" );
-assert.neq( "el" , doc.links[0].fetch().name , "D" );
+var doc = c.findOne({links: {$elemMatch: {$ref: "dbref2a", $id: a.findOne()._id}}});
+assert.eq("eliot", doc.links[0].fetch().name, "C");
+assert.neq("el", doc.links[0].fetch().name, "D");
diff --git a/jstests/core/dbref3.js b/jstests/core/dbref3.js
index 2f3ab8fa79c..5bf0470442d 100644
--- a/jstests/core/dbref3.js
+++ b/jstests/core/dbref3.js
@@ -9,11 +9,11 @@ t.drop();
// true cases
t.insert({sub: {$ref: "foo", $id: "bar"}, dbref: true});
t.insert({sub: {$ref: "foo", $id: "bar", $db: "baz"}, dbref: true});
-t.insert({sub: {$ref: "foo", $id: "bar", db: "baz"}, dbref: true}); // out of spec but accepted
+t.insert({sub: {$ref: "foo", $id: "bar", db: "baz"}, dbref: true}); // out of spec but accepted
t.insert({sub: {$ref: "foo", $id: ObjectId()}, dbref: true});
t.insert({sub: {$ref: "foo", $id: 1}, dbref: true});
-t.insert({sub: {$ref: 123/*not a string*/, $id: "bar"}, dbref: false});
+t.insert({sub: {$ref: 123 /*not a string*/, $id: "bar"}, dbref: false});
t.insert({sub: {$id: "bar", $ref: "foo"}, dbref: false});
t.insert({sub: {$ref: "foo"}, dbref: false});
t.insert({sub: {$id: "foo"}, dbref: false});
@@ -42,4 +42,4 @@ assert.eq(1, distinctDBs.length);
t.insert({sub: {$ref: "foo", $id: [{x: 1, y: 1}, {x: 2, y: 2}, {x: 3, y: 3}]}});
var k = t.findOne({'sub.$id': {$elemMatch: {x: 2}}}, {_id: 0, 'sub.$id.$': 1});
print('k = ' + tojson(k));
-assert.eq({sub: {$id: [{x: 2, y:2}]}}, k); \ No newline at end of file
+assert.eq({sub: {$id: [{x: 2, y: 2}]}}, k); \ No newline at end of file
diff --git a/jstests/core/delx.js b/jstests/core/delx.js
index c4e3ca263d6..004b54ee6ca 100644
--- a/jstests/core/delx.js
+++ b/jstests/core/delx.js
@@ -1,31 +1,34 @@
-a = db.getSisterDB("delxa" );
-b = db.getSisterDB("delxb" );
+a = db.getSisterDB("delxa");
+b = db.getSisterDB("delxb");
-function setup( mydb ){
+function setup(mydb) {
mydb.dropDatabase();
- for ( i=0; i<100; i++ ){
- mydb.foo.insert( { _id : i } );
+ for (i = 0; i < 100; i++) {
+ mydb.foo.insert({_id: i});
}
}
-setup( a );
-setup( b );
+setup(a);
+setup(b);
-assert.eq( 100 , a.foo.find().itcount() , "A1" );
-assert.eq( 100 , b.foo.find().itcount() , "A2" );
+assert.eq(100, a.foo.find().itcount(), "A1");
+assert.eq(100, b.foo.find().itcount(), "A2");
-x = a.foo.find().sort( { _id : 1 } ).batchSize( 60 );
-y = b.foo.find().sort( { _id : 1 } ).batchSize( 60 );
+x = a.foo.find().sort({_id: 1}).batchSize(60);
+y = b.foo.find().sort({_id: 1}).batchSize(60);
x.next();
y.next();
-a.foo.remove( { _id : { $gt : 50 } } );
+a.foo.remove({_id: {$gt: 50}});
-assert.eq( 51 , a.foo.find().itcount() , "B1" );
-assert.eq( 100 , b.foo.find().itcount() , "B2" );
+assert.eq(51, a.foo.find().itcount(), "B1");
+assert.eq(100, b.foo.find().itcount(), "B2");
xCount = x.itcount();
-assert( xCount == 59 || xCount == 99, "C1 : " + xCount ); // snapshot or not is ok
-assert.eq( 99 , y.itcount() , "C2" ); // this was asserting because ClientCursor byLoc doesn't take db into consideration
+assert(xCount == 59 || xCount == 99, "C1 : " + xCount); // snapshot or not is ok
+assert.eq(
+ 99,
+ y.itcount(),
+ "C2"); // this was asserting because ClientCursor byLoc doesn't take db into consideration
diff --git a/jstests/core/depth_limit.js b/jstests/core/depth_limit.js
index 3a8ef2460ca..ddb648b4586 100644
--- a/jstests/core/depth_limit.js
+++ b/jstests/core/depth_limit.js
@@ -3,17 +3,23 @@
function test() {
function assertTooBig(obj) {
// This used to crash rather than throwing an exception.
- assert.throws(function(){Object.bsonsize(obj);});
+ assert.throws(function() {
+ Object.bsonsize(obj);
+ });
}
function assertNotTooBig(obj) {
- assert.doesNotThrow(function(){Object.bsonsize(obj);});
+ assert.doesNotThrow(function() {
+ Object.bsonsize(obj);
+ });
}
function objWithDepth(depth) {
var out = 1;
while (depth--) {
- out = {o: out};
+ out = {
+ o: out
+ };
}
return out;
}
@@ -41,8 +47,7 @@ function test() {
assertNotTooBig(objWithDepth(objDepthLimit - 1));
assertTooBig(objWithDepth(objDepthLimit));
-
- var arrayDepthLimit = objDepthLimit - 1; // one lower due to wrapping object
+ var arrayDepthLimit = objDepthLimit - 1; // one lower due to wrapping object
assertNotTooBig({array: arrayWithDepth(arrayDepthLimit - 1)});
assertTooBig({array: arrayWithDepth(arrayDepthLimit)});
}
@@ -53,4 +58,4 @@ test();
// test on server
db.depth_limit.drop();
db.depth_limit.insert({});
-db.depth_limit.find({$where: test}).itcount(); // itcount ensures that cursor is executed on server
+db.depth_limit.find({$where: test}).itcount(); // itcount ensures that cursor is executed on server
diff --git a/jstests/core/distinct1.js b/jstests/core/distinct1.js
index 779f5a12ba1..893e1f6ba65 100644
--- a/jstests/core/distinct1.js
+++ b/jstests/core/distinct1.js
@@ -2,31 +2,30 @@
t = db.distinct1;
t.drop();
-assert.eq( 0 , t.distinct( "a" ).length , "test empty" );
+assert.eq(0, t.distinct("a").length, "test empty");
-t.save( { a : 1 } );
-t.save( { a : 2 } );
-t.save( { a : 2 } );
-t.save( { a : 2 } );
-t.save( { a : 3 } );
+t.save({a: 1});
+t.save({a: 2});
+t.save({a: 2});
+t.save({a: 2});
+t.save({a: 3});
+res = t.distinct("a");
+assert.eq("1,2,3", res.toString(), "A1");
-res = t.distinct( "a" );
-assert.eq( "1,2,3" , res.toString() , "A1" );
-
-assert.eq( "1,2" , t.distinct( "a" , { a : { $lt : 3 } } ) , "A2" );
+assert.eq("1,2", t.distinct("a", {a: {$lt: 3}}), "A2");
t.drop();
-t.save( { a : { b : "a" } , c : 12 } );
-t.save( { a : { b : "b" } , c : 12 } );
-t.save( { a : { b : "c" } , c : 12 } );
-t.save( { a : { b : "c" } , c : 12 } );
+t.save({a: {b: "a"}, c: 12});
+t.save({a: {b: "b"}, c: 12});
+t.save({a: {b: "c"}, c: 12});
+t.save({a: {b: "c"}, c: 12});
-res = t.distinct( "a.b" );
-assert.eq( "a,b,c" , res.toString() , "B1" );
-printjson(t._distinct( "a.b" ).stats);
-assert.eq( "COLLSCAN" , t._distinct( "a.b" ).stats.planSummary , "B2" );
+res = t.distinct("a.b");
+assert.eq("a,b,c", res.toString(), "B1");
+printjson(t._distinct("a.b").stats);
+assert.eq("COLLSCAN", t._distinct("a.b").stats.planSummary, "B2");
t.drop();
@@ -34,21 +33,21 @@ t.save({_id: 1, a: 1});
t.save({_id: 2, a: 2});
// Test distinct with _id.
-res = t.distinct( "_id" );
-assert.eq( "1,2", res.toString(), "C1" );
-res = t.distinct( "a", {_id: 1} );
-assert.eq( "1", res.toString(), "C2" );
+res = t.distinct("_id");
+assert.eq("1,2", res.toString(), "C1");
+res = t.distinct("a", {_id: 1});
+assert.eq("1", res.toString(), "C2");
// Test distinct with db.runCommand
t.drop();
-t.save({a :1, b: 2});
-t.save({a :2, b: 2});
-t.save({a :2, b: 1});
-t.save({a :2, b: 2});
-t.save({a :3, b: 2});
-t.save({a :4, b: 1});
-t.save({a :4, b: 1});
+t.save({a: 1, b: 2});
+t.save({a: 2, b: 2});
+t.save({a: 2, b: 1});
+t.save({a: 2, b: 2});
+t.save({a: 3, b: 2});
+t.save({a: 4, b: 1});
+t.save({a: 4, b: 1});
res = db.runCommand({distinct: "distinct1", key: "a"});
assert.commandWorked(res);
diff --git a/jstests/core/distinct2.js b/jstests/core/distinct2.js
index 1517ec5ca3a..fc6ff7779b7 100644
--- a/jstests/core/distinct2.js
+++ b/jstests/core/distinct2.js
@@ -2,12 +2,11 @@
t = db.distinct2;
t.drop();
-t.save({a:null});
-assert.eq( 0 , t.distinct('a.b').length , "A" );
+t.save({a: null});
+assert.eq(0, t.distinct('a.b').length, "A");
t.drop();
-t.save( { a : 1 } );
-assert.eq( [1] , t.distinct( "a" ) , "B" );
-t.save( {} );
-assert.eq( [1] , t.distinct( "a" ) , "C" );
-
+t.save({a: 1});
+assert.eq([1], t.distinct("a"), "B");
+t.save({});
+assert.eq([1], t.distinct("a"), "C");
diff --git a/jstests/core/distinct3.js b/jstests/core/distinct3.js
index c82dc7e9043..6ab21599f97 100644
--- a/jstests/core/distinct3.js
+++ b/jstests/core/distinct3.js
@@ -3,33 +3,33 @@
t = db.jstests_distinct3;
t.drop();
-t.ensureIndex({a:1});
-t.ensureIndex({b:1});
+t.ensureIndex({a: 1});
+t.ensureIndex({b: 1});
var bulk = t.initializeUnorderedBulkOp();
-for( i = 0; i < 50; ++i ) {
- for( j = 0; j < 2; ++j ) {
- bulk.insert({a:i,c:i,d:j});
+for (i = 0; i < 50; ++i) {
+ for (j = 0; j < 2; ++j) {
+ bulk.insert({a: i, c: i, d: j});
}
}
-for( i = 0; i < 100; ++i ) {
- bulk.insert({b:i,c:i+50});
+for (i = 0; i < 100; ++i) {
+ bulk.insert({b: i, c: i + 50});
}
assert.writeOK(bulk.execute());
// Attempt to remove the last match for the {a:1} index scan while distinct is yielding.
-p = startParallelShell( 'for( i = 0; i < 100; ++i ) { ' +
- ' var bulk = db.jstests_distinct3.initializeUnorderedBulkOp();' +
- ' bulk.find( { a:49 } ).remove(); ' +
- ' for( j = 0; j < 20; ++j ) { ' +
- ' bulk.insert( { a:49, c:49, d:j } ); ' +
- ' } ' +
- ' assert.writeOK(bulk.execute()); ' +
- '} ' );
+p = startParallelShell('for( i = 0; i < 100; ++i ) { ' +
+ ' var bulk = db.jstests_distinct3.initializeUnorderedBulkOp();' +
+ ' bulk.find( { a:49 } ).remove(); ' +
+ ' for( j = 0; j < 20; ++j ) { ' +
+ ' bulk.insert( { a:49, c:49, d:j } ); ' +
+ ' } ' +
+ ' assert.writeOK(bulk.execute()); ' +
+ '} ');
-for( i = 0; i < 100; ++i ) {
- count = t.distinct( 'c', {$or:[{a:{$gte:0},d:0},{b:{$gte:0}}]} ).length;
- assert.gt( count, 100 );
+for (i = 0; i < 100; ++i) {
+ count = t.distinct('c', {$or: [{a: {$gte: 0}, d: 0}, {b: {$gte: 0}}]}).length;
+ assert.gt(count, 100);
}
p();
diff --git a/jstests/core/distinct4.js b/jstests/core/distinct4.js
index 1fa2763bf40..fae4b99e59c 100644
--- a/jstests/core/distinct4.js
+++ b/jstests/core/distinct4.js
@@ -6,37 +6,40 @@
var t = db.distinct4;
t.drop();
- t.save({a:null});
- t.save({a:1});
- t.save({a:1});
- t.save({a:2});
- t.save({a:3});
+ t.save({a: null});
+ t.save({a: 1});
+ t.save({a: 1});
+ t.save({a: 2});
+ t.save({a: 3});
- //first argument should be a string or error
+ // first argument should be a string or error
// from shell helper
- assert.throws( function () { t.distinct({a:1}); } );
+ assert.throws(function() {
+ t.distinct({a: 1});
+ });
// from command interface
assert.commandFailedWithCode(t.runCommand("distinct", {"key": {a: 1}}),
ErrorCodes.TypeMismatch);
-
- //second argument should be a document or error
+ // second argument should be a document or error
// from shell helper
- assert.throws( function () { t.distinct('a', '1'); } );
+ assert.throws(function() {
+ t.distinct('a', '1');
+ });
// from command interface
assert.commandFailedWithCode(t.runCommand("distinct", {"key": "a", "query": "a"}),
ErrorCodes.TypeMismatch);
-
-
// empty query clause should not cause error
// from shell helper
- var a = assert.doesNotThrow( function () { return t.distinct('a'); } );
+ var a = assert.doesNotThrow(function() {
+ return t.distinct('a');
+ });
// [ null, 1, 2, 3 ]
assert.eq(4, a.length, tojson(a));
assert.contains(null, a);
@@ -45,6 +48,6 @@
assert.contains(3, a);
// from command interface
- assert.commandWorked( t.runCommand( "distinct", { "key" : "a" } ) );
+ assert.commandWorked(t.runCommand("distinct", {"key": "a"}));
})();
diff --git a/jstests/core/distinct_array1.js b/jstests/core/distinct_array1.js
index d59e4351db2..679395a064b 100644
--- a/jstests/core/distinct_array1.js
+++ b/jstests/core/distinct_array1.js
@@ -1,91 +1,90 @@
t = db.distinct_array1;
t.drop();
-t.save( { a : [1,2,3] } );
-t.save( { a : [2,3,4] } );
-t.save( { a : [3,4,5] } );
-t.save( { a : 9 } );
-
+t.save({a: [1, 2, 3]});
+t.save({a: [2, 3, 4]});
+t.save({a: [3, 4, 5]});
+t.save({a: 9});
// Without index.
-res = t.distinct( "a" ).sort();
-assert.eq( "1,2,3,4,5,9" , res.toString() , "A1" );
+res = t.distinct("a").sort();
+assert.eq("1,2,3,4,5,9", res.toString(), "A1");
// Array element 0 without index.
-res = t.distinct( "a.0" ).sort();
-assert.eq( "1,2,3" , res.toString() , "A2" );
+res = t.distinct("a.0").sort();
+assert.eq("1,2,3", res.toString(), "A2");
// Array element 1 without index.
-res = t.distinct( "a.1" ).sort();
-assert.eq( "2,3,4" , res.toString() , "A3" );
+res = t.distinct("a.1").sort();
+assert.eq("2,3,4", res.toString(), "A3");
// With index.
-t.ensureIndex( { a : 1 } );
-res = t.distinct( "a" ).sort();
-assert.eq( "1,2,3,4,5,9" , res.toString() , "A4" );
+t.ensureIndex({a: 1});
+res = t.distinct("a").sort();
+assert.eq("1,2,3,4,5,9", res.toString(), "A4");
// Array element 0 with index.
-res = t.distinct( "a.0" ).sort();
-assert.eq( "1,2,3" , res.toString() , "A5" );
+res = t.distinct("a.0").sort();
+assert.eq("1,2,3", res.toString(), "A5");
// Array element 1 with index.
-res = t.distinct( "a.1" ).sort();
-assert.eq( "2,3,4" , res.toString() , "A6" );
+res = t.distinct("a.1").sort();
+assert.eq("2,3,4", res.toString(), "A6");
-//t.drop();
+// t.drop();
-t.save( { a : [{b:"a"}, {b:"d"}] , c : 12 } );
-t.save( { a : [{b:"b"}, {b:"d"}] , c : 12 } );
-t.save( { a : [{b:"c"}, {b:"e"}] , c : 12 } );
-t.save( { a : [{b:"c"}, {b:"f"}] , c : 12 } );
-t.save( { a : [] , c : 12 } );
-t.save( { a : { b : "z"} , c : 12 } );
+t.save({a: [{b: "a"}, {b: "d"}], c: 12});
+t.save({a: [{b: "b"}, {b: "d"}], c: 12});
+t.save({a: [{b: "c"}, {b: "e"}], c: 12});
+t.save({a: [{b: "c"}, {b: "f"}], c: 12});
+t.save({a: [], c: 12});
+t.save({a: {b: "z"}, c: 12});
// Without index.
-res = t.distinct( "a.b" ).sort();
-assert.eq( "a,b,c,d,e,f,z" , res.toString() , "B1" );
+res = t.distinct("a.b").sort();
+assert.eq("a,b,c,d,e,f,z", res.toString(), "B1");
// Array element 0 without index
-res = t.distinct( "a.0.b" ).sort();
-assert.eq( "a,b,c" , res.toString() , "B2" );
+res = t.distinct("a.0.b").sort();
+assert.eq("a,b,c", res.toString(), "B2");
// Array element 1 without index
-res = t.distinct( "a.1.b" ).sort();
-assert.eq( "d,e,f" , res.toString() , "B3" );
+res = t.distinct("a.1.b").sort();
+assert.eq("d,e,f", res.toString(), "B3");
// With index.
-t.ensureIndex( { "a.b" : 1 } );
-res = t.distinct( "a.b" );
+t.ensureIndex({"a.b": 1});
+res = t.distinct("a.b");
res.sort();
-assert.eq( "a,b,c,d,e,f,z" , res.toString() , "B4" );
+assert.eq("a,b,c,d,e,f,z", res.toString(), "B4");
// _id as an document containing an array
-t.save( { _id : { a : [1,2,3] } } );
-t.save( { _id : { a : [2,3,4] } } );
-t.save( { _id : { a : [3,4,5] } } );
-t.save( { _id : { a : 9 } } );
+t.save({_id: {a: [1, 2, 3]}});
+t.save({_id: {a: [2, 3, 4]}});
+t.save({_id: {a: [3, 4, 5]}});
+t.save({_id: {a: 9}});
// Without index.
-res = t.distinct( "_id.a" ).sort();
-assert.eq( "1,2,3,4,5,9" , res.toString() , "C1" );
+res = t.distinct("_id.a").sort();
+assert.eq("1,2,3,4,5,9", res.toString(), "C1");
// Array element 0 without index.
-res = t.distinct( "_id.a.0" ).sort();
-assert.eq( "1,2,3" , res.toString() , "C2" );
+res = t.distinct("_id.a.0").sort();
+assert.eq("1,2,3", res.toString(), "C2");
// Array element 1 without index.
-res = t.distinct( "_id.a.1" ).sort();
-assert.eq( "2,3,4" , res.toString() , "C3" );
+res = t.distinct("_id.a.1").sort();
+assert.eq("2,3,4", res.toString(), "C3");
// With index.
-t.ensureIndex( { "_id.a" : 1 } );
-res = t.distinct( "_id.a" ).sort();
-assert.eq( "1,2,3,4,5,9" , res.toString() , "C4" );
+t.ensureIndex({"_id.a": 1});
+res = t.distinct("_id.a").sort();
+assert.eq("1,2,3,4,5,9", res.toString(), "C4");
// Array element 0 with index.
-res = t.distinct( "_id.a.0" ).sort();
-assert.eq( "1,2,3" , res.toString() , "C5" );
+res = t.distinct("_id.a.0").sort();
+assert.eq("1,2,3", res.toString(), "C5");
// Array element 1 with index.
-res = t.distinct( "_id.a.1" ).sort();
-assert.eq( "2,3,4" , res.toString() , "C6" );
+res = t.distinct("_id.a.1").sort();
+assert.eq("2,3,4", res.toString(), "C6");
diff --git a/jstests/core/distinct_index1.js b/jstests/core/distinct_index1.js
index 959e46ebdaf..31faecd376e 100644
--- a/jstests/core/distinct_index1.js
+++ b/jstests/core/distinct_index1.js
@@ -2,74 +2,75 @@
t = db.distinct_index1;
t.drop();
-function r( x ){
- return Math.floor( Math.sqrt( x * 123123 ) ) % 10;
+function r(x) {
+ return Math.floor(Math.sqrt(x * 123123)) % 10;
}
-function d( k , q ){
- return t.runCommand( "distinct" , { key : k , query : q || {} } );
+function d(k, q) {
+ return t.runCommand("distinct", {key: k, query: q || {}});
}
-for ( i=0; i<1000; i++ ){
- o = { a : r(i*5) , b : r(i) };
- t.insert( o );
+for (i = 0; i < 1000; i++) {
+ o = {
+ a: r(i * 5),
+ b: r(i)
+ };
+ t.insert(o);
}
-x = d( "a" );
+x = d("a");
// Collection scan looks at all 1000 documents and gets 1000
// distinct values. Looks at 0 index keys.
-assert.eq( 1000 , x.stats.n , "AA1" );
-assert.eq( 0 , x.stats.nscanned , "AA2" );
-assert.eq( 1000 , x.stats.nscannedObjects , "AA3" );
+assert.eq(1000, x.stats.n, "AA1");
+assert.eq(0, x.stats.nscanned, "AA2");
+assert.eq(1000, x.stats.nscannedObjects, "AA3");
-x = d( "a" , { a : { $gt : 5 } } );
+x = d("a", {a: {$gt: 5}});
// Collection scan looks at all 1000 documents and gets 398
// distinct values which match the query. Looks at 0 index keys.
-assert.eq( 398 , x.stats.n , "AB1" );
-assert.eq( 0 , x.stats.nscanned , "AB2" );
-assert.eq( 1000 , x.stats.nscannedObjects , "AB3" );
+assert.eq(398, x.stats.n, "AB1");
+assert.eq(0, x.stats.nscanned, "AB2");
+assert.eq(1000, x.stats.nscannedObjects, "AB3");
-x = d( "b" , { a : { $gt : 5 } } );
+x = d("b", {a: {$gt: 5}});
// Collection scan looks at all 1000 documents and gets 398
// distinct values which match the query. Looks at 0 index keys.
-assert.eq( 398 , x.stats.n , "AC1" );
-assert.eq( 0 , x.stats.nscanned , "AC2" );
-assert.eq( 1000 , x.stats.nscannedObjects , "AC3" );
+assert.eq(398, x.stats.n, "AC1");
+assert.eq(0, x.stats.nscanned, "AC2");
+assert.eq(1000, x.stats.nscannedObjects, "AC3");
+t.ensureIndex({a: 1});
-
-t.ensureIndex( { a : 1 } );
-
-x = d( "a" );
+x = d("a");
// There are only 10 values. We use the fast distinct hack and only examine each value once.
-assert.eq( 10 , x.stats.n , "BA1" );
-assert.eq( 10 , x.stats.nscanned , "BA2" );
+assert.eq(10, x.stats.n, "BA1");
+assert.eq(10, x.stats.nscanned, "BA2");
-x = d( "a" , { a : { $gt : 5 } } );
+x = d("a", {a: {$gt: 5}});
// Only 4 values of a are >= 5 and we use the fast distinct hack.
-assert.eq(4, x.stats.n , "BB1" );
-assert.eq(4, x.stats.nscanned , "BB2" );
-assert.eq(0, x.stats.nscannedObjects , "BB3" );
+assert.eq(4, x.stats.n, "BB1");
+assert.eq(4, x.stats.nscanned, "BB2");
+assert.eq(0, x.stats.nscannedObjects, "BB3");
-x = d( "b" , { a : { $gt : 5 } } );
+x = d("b", {a: {$gt: 5}});
// We can't use the fast distinct hack here because we're distinct-ing over 'b'.
-assert.eq( 398 , x.stats.n , "BC1" );
-assert.eq( 398 , x.stats.nscanned , "BC2" );
-assert.eq( 398 , x.stats.nscannedObjects , "BC3" );
+assert.eq(398, x.stats.n, "BC1");
+assert.eq(398, x.stats.nscanned, "BC2");
+assert.eq(398, x.stats.nscannedObjects, "BC3");
// Check proper nscannedObjects count when using a query optimizer cursor.
t.dropIndexes();
-t.ensureIndex( { a : 1, b : 1 } );
-x = d( "b" , { a : { $gt : 5 }, b : { $gt : 5 } } );
+t.ensureIndex({a: 1, b: 1});
+x = d("b", {a: {$gt: 5}, b: {$gt: 5}});
printjson(x);
// 171 is the # of results we happen to scan when we don't use a distinct
// hack. When we use the distinct hack we scan 16, currently.
assert.lte(x.stats.n, 171);
-assert.eq(171, x.stats.nscannedObjects , "BD3" );
+assert.eq(171, x.stats.nscannedObjects, "BD3");
// Should use an index scan over the hashed index.
t.dropIndexes();
-t.ensureIndex( { a : "hashed" } );
-x = d( "a", { $or : [ { a : 3 }, { a : 5 } ] } );
-assert.eq( 188, x.stats.n, "DA1" );
-assert.eq( "IXSCAN { a: \"hashed\" }", x.stats.planSummary );
+t.ensureIndex({a: "hashed"});
+x = d("a", {$or: [{a: 3}, {a: 5}]});
+assert.eq(188, x.stats.n, "DA1");
+assert.eq("IXSCAN { a: \"hashed\" }", x.stats.planSummary);
diff --git a/jstests/core/distinct_index2.js b/jstests/core/distinct_index2.js
index 8899a048714..d1b72565102 100644
--- a/jstests/core/distinct_index2.js
+++ b/jstests/core/distinct_index2.js
@@ -1,41 +1,41 @@
t = db.distinct_index2;
t.drop();
-t.ensureIndex( { a : 1 , b : 1 } );
-t.ensureIndex( { c : 1 } );
+t.ensureIndex({a: 1, b: 1});
+t.ensureIndex({c: 1});
// Uniformly distributed dataset.
// If we use a randomly generated dataset, we might not
// generate all the distinct values in the range [0, 10).
-for ( var a=0; a<10; a++ ) {
- for ( var b=0; b<10; b++ ) {
- for ( var c=0; c<10; c++ ) {
- t.insert( { a : a , b : b , c : c } );
+for (var a = 0; a < 10; a++) {
+ for (var b = 0; b < 10; b++) {
+ for (var c = 0; c < 10; c++) {
+ t.insert({a: a, b: b, c: c});
}
}
}
correct = [];
-for ( i=0; i<10; i++ )
- correct.push( i );
+for (i = 0; i < 10; i++)
+ correct.push(i);
-function check( field ){
- res = t.distinct( field );
+function check(field) {
+ res = t.distinct(field);
res = res.sort();
- assert.eq( correct , res , "check: " + field );
+ assert.eq(correct, res, "check: " + field);
- if ( field != "a" ){
- res = t.distinct( field , { a : 1 } );
+ if (field != "a") {
+ res = t.distinct(field, {a: 1});
res = res.sort();
- assert.eq( correct , res , "check 2: " + field );
+ assert.eq(correct, res, "check 2: " + field);
}
}
-check( "a" );
-check( "b" );
-check( "c" );
+check("a");
+check("b");
+check("c");
// hashed index should produce same results.
t.dropIndexes();
-t.ensureIndex( { a : "hashed" } );
-check( "a" );
+t.ensureIndex({a: "hashed"});
+check("a");
diff --git a/jstests/core/distinct_speed1.js b/jstests/core/distinct_speed1.js
index c511de34087..9d7a114e418 100644
--- a/jstests/core/distinct_speed1.js
+++ b/jstests/core/distinct_speed1.js
@@ -2,25 +2,24 @@
t = db.distinct_speed1;
t.drop();
-for ( var i=0; i<10000; i++ ){
- t.save( { x : i % 10 } );
+for (var i = 0; i < 10000; i++) {
+ t.save({x: i % 10});
}
-assert.eq( 10 , t.distinct("x").length , "A1" );
+assert.eq(10, t.distinct("x").length, "A1");
-function fast(){
+function fast() {
t.find().explain("executionStats").executionStats.executionTimeMillis;
}
-function slow(){
+function slow() {
t.distinct("x");
}
-for ( i=0; i<3; i++ ){
- print( "it: " + Date.timeFunc( fast ) );
- print( "di: " + Date.timeFunc( slow ) );
+for (i = 0; i < 3; i++) {
+ print("it: " + Date.timeFunc(fast));
+ print("di: " + Date.timeFunc(slow));
}
-
-t.ensureIndex( { x : 1 } );
-t.distinct( "x" , { x : 5 } );
+t.ensureIndex({x: 1});
+t.distinct("x", {x: 5});
diff --git a/jstests/core/doc_validation.js b/jstests/core/doc_validation.js
index 1230f316b36..92bea28af35 100644
--- a/jstests/core/doc_validation.js
+++ b/jstests/core/doc_validation.js
@@ -34,14 +34,12 @@
// Drop will assert on failure.
coll.drop();
-
// Check that we can only update documents that pass validation.
// Set up valid and invalid docs then set validator.
assert.writeOK(coll.insert({_id: 'valid1', a: 1}));
assert.writeOK(coll.insert({_id: 'invalid2', b: 1}));
- assert.commandWorked(db.runCommand({"collMod": collName,
- "validator" : {a: {$exists: true}}}));
+ assert.commandWorked(db.runCommand({"collMod": collName, "validator": {a: {$exists: true}}}));
// Updates affecting fields not included in validator document
// on a conforming document.
@@ -65,19 +63,17 @@
coll.drop();
-
// Verify can't make a conforming doc fail validation,
// but can update non-conforming doc to pass validation.
assert.writeOK(coll.insert({_id: 'valid1', a: 1}));
assert.writeOK(coll.insert({_id: 'invalid2', b: 1}));
- assert.commandWorked(db.runCommand({"collMod": collName,
- "validator" : {a: {$exists: true}}}));
+ assert.commandWorked(db.runCommand({"collMod": collName, "validator": {a: {$exists: true}}}));
assertFailsValidation(coll.update({_id: 'valid1'}, {$unset: {a: 1}}));
assert.writeOK(coll.update({_id: 'invalid2'}, {$set: {a: 1}}));
// Modify collection to remove validator statement
- assert.commandWorked(db.runCommand({"collMod": collName, "validator" : {}}));
+ assert.commandWorked(db.runCommand({"collMod": collName, "validator": {}}));
// Verify no validation applied to updates.
assert.writeOK(coll.update({_id: 'valid1'}, {$set: {z: 1}}));
diff --git a/jstests/core/doc_validation_invalid_validators.js b/jstests/core/doc_validation_invalid_validators.js
index b77cc07fa3d..aeebae42820 100644
--- a/jstests/core/doc_validation_invalid_validators.js
+++ b/jstests/core/doc_validation_invalid_validators.js
@@ -12,41 +12,36 @@
assert.commandFailed(db.createCollection(collName, {validator: "assert"}));
// Check some disallowed match statements.
- assert.commandFailed(db.createCollection(collName, {validator:
- {$text: "bob"}}));
- assert.commandFailed(db.createCollection(collName, {validator:
- {$where: "this.a == this.b"}}));
- assert.commandFailed(db.createCollection(collName, {validator:
- {$near: {place:"holder"}}}));
- assert.commandFailed(db.createCollection(collName, {validator:
- {$geoNear: {place:"holder"}}}));
- assert.commandFailed(db.createCollection(collName, {validator:
- {$nearSphere: {place:"holder"}}}));
+ assert.commandFailed(db.createCollection(collName, {validator: {$text: "bob"}}));
+ assert.commandFailed(db.createCollection(collName, {validator: {$where: "this.a == this.b"}}));
+ assert.commandFailed(db.createCollection(collName, {validator: {$near: {place: "holder"}}}));
+ assert.commandFailed(db.createCollection(collName, {validator: {$geoNear: {place: "holder"}}}));
+ assert.commandFailed(
+ db.createCollection(collName, {validator: {$nearSphere: {place: "holder"}}}));
// Verify we fail on admin, local and config databases.
- assert.commandFailed(db.getSiblingDB("admin").createCollection(collName,
- {validator: {a: {$exists: true}}}));
+ assert.commandFailed(
+ db.getSiblingDB("admin").createCollection(collName, {validator: {a: {$exists: true}}}));
if (!db.runCommand("isdbgrid").isdbgrid) {
- assert.commandFailed(db.getSiblingDB("local").createCollection(collName,
- {validator: {a: {$exists: true}}}));
+ assert.commandFailed(db.getSiblingDB("local")
+ .createCollection(collName, {validator: {a: {$exists: true}}}));
}
- assert.commandFailed(db.getSiblingDB("config").createCollection(collName,
- {validator: {a: {$exists: true}}}));
+ assert.commandFailed(
+ db.getSiblingDB("config").createCollection(collName, {validator: {a: {$exists: true}}}));
// Create collection with document validator.
assert.commandWorked(db.createCollection(collName, {validator: {a: {$exists: true}}}));
// Verify some invalid match statements can't be passed to collMod.
- assert.commandFailed(db.runCommand({"collMod": collName, "validator" :
- {$text: "bob"}}));
- assert.commandFailed(db.runCommand({"collMod": collName, "validator" :
- {$where: "this.a == this.b"}}));
- assert.commandFailed(db.runCommand({"collMod": collName, "validator" :
- {$near: {place:"holder"}}}));
- assert.commandFailed(db.runCommand({"collMod": collName, "validator" :
- {$geoNear: {place:"holder"}}}));
- assert.commandFailed(db.runCommand({"collMod": collName, "validator" :
- {$nearSphere: {place:"holder"}}}));
+ assert.commandFailed(db.runCommand({"collMod": collName, "validator": {$text: "bob"}}));
+ assert.commandFailed(
+ db.runCommand({"collMod": collName, "validator": {$where: "this.a == this.b"}}));
+ assert.commandFailed(
+ db.runCommand({"collMod": collName, "validator": {$near: {place: "holder"}}}));
+ assert.commandFailed(
+ db.runCommand({"collMod": collName, "validator": {$geoNear: {place: "holder"}}}));
+ assert.commandFailed(
+ db.runCommand({"collMod": collName, "validator": {$nearSphere: {place: "holder"}}}));
coll.drop();
@@ -54,11 +49,9 @@
assert.commandWorked(db.createCollection(collName));
// Verify we can't add an invalid validator to a collection without a validator.
- assert.commandFailed(db.runCommand({"collMod": collName, "validator" :
- {$text: "bob"}}));
- assert.commandFailed(db.runCommand({"collMod": collName, "validator" :
- {$where: "this.a == this.b"}}));
- assert.commandWorked(db.runCommand({"collMod": collName, "validator" :
- {a: {$exists: true}}}));
+ assert.commandFailed(db.runCommand({"collMod": collName, "validator": {$text: "bob"}}));
+ assert.commandFailed(
+ db.runCommand({"collMod": collName, "validator": {$where: "this.a == this.b"}}));
+ assert.commandWorked(db.runCommand({"collMod": collName, "validator": {a: {$exists: true}}}));
coll.drop();
})();
diff --git a/jstests/core/doc_validation_options.js b/jstests/core/doc_validation_options.js
index cfc9263d376..8a96685e48f 100644
--- a/jstests/core/doc_validation_options.js
+++ b/jstests/core/doc_validation_options.js
@@ -9,54 +9,46 @@
var t = db.doc_validation_options;
t.drop();
-
- assert.commandWorked(db.createCollection(t.getName(),
- {validator: {a : 1}}));
- assertFailsValidation(t.insert({a:2}));
- t.insert({a:1});
+ assert.commandWorked(db.createCollection(t.getName(), {validator: {a: 1}}));
+
+ assertFailsValidation(t.insert({a: 2}));
+ t.insert({a: 1});
assert.eq(1, t.count());
// test default to strict
- assertFailsValidation(t.update({}, {$set: {a:2}}));
- assert.eq(1, t.find({a:1}).itcount());
+ assertFailsValidation(t.update({}, {$set: {a: 2}}));
+ assert.eq(1, t.find({a: 1}).itcount());
// check we can do a bad update in warn mode
- assert.commandWorked(t.runCommand("collMod",
- {validationAction: "warn"}));
- t.update({}, {$set: {a:2}});
- assert.eq(1, t.find({a:2}).itcount());
+ assert.commandWorked(t.runCommand("collMod", {validationAction: "warn"}));
+ t.update({}, {$set: {a: 2}});
+ assert.eq(1, t.find({a: 2}).itcount());
// TODO: check log for message?
// make sure persisted
var info = db.getCollectionInfos({name: t.getName()})[0];
assert.eq("warn", info.options.validationAction, tojson(info));
-
+
// check we can go back to enforce strict
- assert.commandWorked(t.runCommand("collMod",
- {validationAction: "error",
- validationLevel: "strict"}));
- assertFailsValidation(t.update({}, {$set: {a:3}}));
- assert.eq(1, t.find({a:2}).itcount());
-
+ assert.commandWorked(
+ t.runCommand("collMod", {validationAction: "error", validationLevel: "strict"}));
+ assertFailsValidation(t.update({}, {$set: {a: 3}}));
+ assert.eq(1, t.find({a: 2}).itcount());
+
// check bad -> bad is ok
- assert.commandWorked(t.runCommand("collMod",
- {validationLevel: "moderate"}));
- t.update({}, {$set: {a:3}});
- assert.eq(1, t.find({a:3}).itcount());
+ assert.commandWorked(t.runCommand("collMod", {validationLevel: "moderate"}));
+ t.update({}, {$set: {a: 3}});
+ assert.eq(1, t.find({a: 3}).itcount());
// test create
t.drop();
- assert.commandWorked(db.createCollection(t.getName(),
- {validator: {a : 1},
- validationAction: "warn"}));
-
- t.insert({a:2});
- t.insert({a:1});
+ assert.commandWorked(
+ db.createCollection(t.getName(), {validator: {a: 1}, validationAction: "warn"}));
+
+ t.insert({a: 2});
+ t.insert({a: 1});
assert.eq(2, t.count());
-
-})();
-
-
+})();
diff --git a/jstests/core/drop.js b/jstests/core/drop.js
index 67be6844813..22ed97ad805 100644
--- a/jstests/core/drop.js
+++ b/jstests/core/drop.js
@@ -3,20 +3,19 @@ var coll = db.jstests_drop;
coll.drop();
res = coll.runCommand("drop");
-assert( !res.ok, tojson( res ) );
-
+assert(!res.ok, tojson(res));
assert.eq(0, coll.getIndexes().length, "A");
coll.save({});
assert.eq(1, coll.getIndexes().length, "B");
-coll.ensureIndex({a : 1});
+coll.ensureIndex({a: 1});
assert.eq(2, coll.getIndexes().length, "C");
-assert.commandWorked(db.runCommand({drop : coll.getName()}));
+assert.commandWorked(db.runCommand({drop: coll.getName()}));
assert.eq(0, coll.getIndexes().length, "D");
-coll.ensureIndex({a : 1});
+coll.ensureIndex({a: 1});
assert.eq(2, coll.getIndexes().length, "E");
-assert.commandWorked(db.runCommand({deleteIndexes : coll.getName(), index : "*"}),
+assert.commandWorked(db.runCommand({deleteIndexes: coll.getName(), index: "*"}),
"delete indexes A");
assert.eq(1, coll.getIndexes().length, "G");
diff --git a/jstests/core/drop2.js b/jstests/core/drop2.js
index f0dee81b592..54f6b8b78ab 100644
--- a/jstests/core/drop2.js
+++ b/jstests/core/drop2.js
@@ -1,23 +1,23 @@
var coll = db.jstests_drop2;
coll.drop();
-function debug( x ) {
- printjson( x );
+function debug(x) {
+ printjson(x);
}
-coll.save( {} );
+coll.save({});
-function getOpId( drop ) {
+function getOpId(drop) {
var inProg = db.currentOp().inprog;
- debug( inProg );
- for ( var id in inProg ) {
- var op = inProg[ id ];
- if ( drop ) {
- if ( op.query && op.query.drop && op.query.drop == coll.getName() ) {
+ debug(inProg);
+ for (var id in inProg) {
+ var op = inProg[id];
+ if (drop) {
+ if (op.query && op.query.drop && op.query.drop == coll.getName()) {
return op.opid;
}
} else {
- if ( op.query && op.query.query && op.query.query.$where && op.ns == (coll + "") ) {
+ if (op.query && op.query.query && op.query.query.$where && op.ns == (coll + "")) {
return op.opid;
}
}
@@ -25,26 +25,28 @@ function getOpId( drop ) {
return null;
}
-var awaitCount = startParallelShell( "print(\"Count thread started\");"
- + "db.getMongo().getCollection(\""
- + (coll + "") + "\")"
- + ".count( { $where: function() {"
- + "while( 1 ) { sleep( 1 ); } } } );"
- + "print(\"Count thread terminating\");" );
+var awaitCount = startParallelShell(
+ "print(\"Count thread started\");" + "db.getMongo().getCollection(\"" + (coll + "") + "\")" +
+ ".count( { $where: function() {" + "while( 1 ) { sleep( 1 ); } } } );" +
+ "print(\"Count thread terminating\");");
countOpId = null;
-assert.soon( function() { countOpId = getOpId( false ); return countOpId; } );
-
-var awaitDrop = startParallelShell( "print(\"Drop thread started\");"
- + "print(\"drop result: \" + "
- + "db.getMongo().getCollection(\""
- + (coll + "") + "\")"
- + ".drop() );"
- + "print(\"Drop thread terminating\")" );
+assert.soon(function() {
+ countOpId = getOpId(false);
+ return countOpId;
+});
+
+var awaitDrop =
+ startParallelShell("print(\"Drop thread started\");" + "print(\"drop result: \" + " +
+ "db.getMongo().getCollection(\"" + (coll + "") + "\")" + ".drop() );" +
+ "print(\"Drop thread terminating\")");
dropOpId = null;
-assert.soon( function() { dropOpId = getOpId( true ); return dropOpId; } );
+assert.soon(function() {
+ dropOpId = getOpId(true);
+ return dropOpId;
+});
-db.killOp( dropOpId );
-db.killOp( countOpId );
+db.killOp(dropOpId);
+db.killOp(countOpId);
var exitCode = awaitCount({checkExitSuccess: false});
assert.neq(0, exitCode, "expected shell to exit abnormally due to JS execution being terminated");
@@ -52,4 +54,4 @@ assert.neq(0, exitCode, "expected shell to exit abnormally due to JS execution b
// The drop operation may or may not have been killed.
awaitDrop({checkExitSuccess: false});
-coll.drop(); // in SERVER-1818, this fails
+coll.drop(); // in SERVER-1818, this fails
diff --git a/jstests/core/drop3.js b/jstests/core/drop3.js
index 65fe072cf97..1215d218e4f 100644
--- a/jstests/core/drop3.js
+++ b/jstests/core/drop3.js
@@ -4,22 +4,22 @@ sub = t.sub;
t.drop();
sub.drop();
-
-for (var i = 0; i < 10; i++){
+for (var i = 0; i < 10; i++) {
t.insert({});
sub.insert({});
}
-var cursor = t.find().batchSize(2);
+var cursor = t.find().batchSize(2);
var subcursor = sub.find().batchSize(2);
cursor.next();
subcursor.next();
-assert.eq( cursor.objsLeftInBatch(), 1 );
-assert.eq( subcursor.objsLeftInBatch(), 1 );
-
-t.drop(); // should invalidate cursor, but not subcursor
+assert.eq(cursor.objsLeftInBatch(), 1);
+assert.eq(subcursor.objsLeftInBatch(), 1);
-assert.throws( function(){ cursor.itcount(); } ); // throws "cursor doesn't exist on server" error on getMore
-assert.eq( subcursor.itcount(), 9 ); //one already seen
+t.drop(); // should invalidate cursor, but not subcursor
+assert.throws(function() {
+ cursor.itcount();
+}); // throws "cursor doesn't exist on server" error on getMore
+assert.eq(subcursor.itcount(), 9); // one already seen
diff --git a/jstests/core/drop_index.js b/jstests/core/drop_index.js
index 938ac0d2547..f2e99baa8f8 100644
--- a/jstests/core/drop_index.js
+++ b/jstests/core/drop_index.js
@@ -2,19 +2,19 @@
t = db.dropIndex;
t.drop();
-t.insert( { _id : 1 , a : 2 , b : 3 } );
-assert.eq( 1 , t.getIndexes().length , "A1" );
+t.insert({_id: 1, a: 2, b: 3});
+assert.eq(1, t.getIndexes().length, "A1");
-t.ensureIndex( { a : 1 } );
-t.ensureIndex( { b : 1 } );
-assert.eq( 3 , t.getIndexes().length , "A2" );
+t.ensureIndex({a: 1});
+t.ensureIndex({b: 1});
+assert.eq(3, t.getIndexes().length, "A2");
-x = db._dbCommand( { dropIndexes: t.getName() , index : t._genIndexName( { a : 1 } ) } );
-assert.eq( 2 , t.getIndexes().length , "B1 " + tojson(x) );
+x = db._dbCommand({dropIndexes: t.getName(), index: t._genIndexName({a: 1})});
+assert.eq(2, t.getIndexes().length, "B1 " + tojson(x));
-x = db._dbCommand( { dropIndexes: t.getName() , index : { b : 1 } } );
-assert.eq( 1 , t.getIndexes().length , "B2" );
+x = db._dbCommand({dropIndexes: t.getName(), index: {b: 1}});
+assert.eq(1, t.getIndexes().length, "B2");
// ensure you can recreate indexes, even if you don't use dropIndex method
-t.ensureIndex({a:1});
-assert.eq(2 , t.getIndexes().length);
+t.ensureIndex({a: 1});
+assert.eq(2, t.getIndexes().length);
diff --git a/jstests/core/dropdb.js b/jstests/core/dropdb.js
index afe783338c4..1af56da34bf 100644
--- a/jstests/core/dropdb.js
+++ b/jstests/core/dropdb.js
@@ -4,18 +4,19 @@
m = db.getMongo();
baseName = "jstests_dropdb";
-ddb = db.getSisterDB( baseName );
+ddb = db.getSisterDB(baseName);
print("initial dbs: " + tojson(m.getDBNames()));
function check(shouldExist) {
var dbs = m.getDBNames();
- assert.eq(Array.contains(dbs, baseName), shouldExist,
- "DB " + baseName + " should " + (shouldExist ? "" : "not ") + "exist."
- + " dbs: " + tojson(dbs) + "\n" + tojson( m.getDBs() ) );
+ assert.eq(Array.contains(dbs, baseName),
+ shouldExist,
+ "DB " + baseName + " should " + (shouldExist ? "" : "not ") + "exist." + " dbs: " +
+ tojson(dbs) + "\n" + tojson(m.getDBs()));
}
-ddb.c.save( {} );
+ddb.c.save({});
check(true);
var res = ddb.dropDatabase();
@@ -25,6 +26,7 @@ check(false);
var res = ddb.dropDatabase();
assert.commandWorked(res);
-assert.eq(res.dropped, undefined,
+assert.eq(res.dropped,
+ undefined,
"dropped field was populated even though nothing should have been dropped");
check(false);
diff --git a/jstests/core/dropdb_race.js b/jstests/core/dropdb_race.js
index f61b4ef52e2..b4666ecc3ad 100644
--- a/jstests/core/dropdb_race.js
+++ b/jstests/core/dropdb_race.js
@@ -5,7 +5,7 @@ baseName = "jstests_dur_droprace";
d = db.getSisterDB(baseName);
t = d.foo;
-assert(d.adminCommand({ setParameter: 1, syncdelay: 5 }).ok);
+assert(d.adminCommand({setParameter: 1, syncdelay: 5}).ok);
var s = 0;
@@ -17,9 +17,9 @@ for (var pass = 0; pass < 100; pass++) {
var options = ( pass % 4 == 0 )? { writeConcern: { fsync: true }} : undefined;
t.insert({}, options);
}
- t.insert({ x: 1 });
- t.insert({ x: 3 });
- t.ensureIndex({ x: 1 });
+ t.insert({x: 1});
+ t.insert({x: 3});
+ t.ensureIndex({x: 1});
sleep(s);
if (pass % 13 == 0)
t.drop();
@@ -29,13 +29,13 @@ for (var pass = 0; pass < 100; pass++) {
d.dropDatabase();
if (pass % 7 == 0)
- d.runCommand({getLastError:1,j:1});
+ d.runCommand({getLastError: 1, j: 1});
d.getLastError();
s = (s + 1) % 25;
- //print(pass);
+ // print(pass);
if ((new Date()) - start > 60000) {
- print("stopping early");
+ print("stopping early");
break;
}
}
diff --git a/jstests/core/elemMatchProjection.js b/jstests/core/elemMatchProjection.js
index 0ccfdfb161b..97d1be0f081 100644
--- a/jstests/core/elemMatchProjection.js
+++ b/jstests/core/elemMatchProjection.js
@@ -5,245 +5,255 @@ t.drop();
date1 = new Date();
// Insert various styles of arrays
-for ( i = 0; i < 100; i++ ) {
- t.insert({ group: 1, x: [ 1, 2, 3, 4, 5 ] });
- t.insert({ group: 2, x: [ { a: 1, b: 2 }, { a: 2, c: 3 }, { a:1, d:5 } ] });
- t.insert({ group: 3, x: [ { a: 1, b: 2 }, { a: 2, c: 3 }, { a:1, d:5 } ],
- y: [ { aa: 1, bb: 2 }, { aa: 2, cc: 3 }, { aa:1, dd:5 } ] });
- t.insert({ group: 3, x: [ { a: 1, b: 3 }, { a: -6, c: 3 } ] });
- t.insert({ group: 4, x: [ { a: 1, b: 4 }, { a: -6, c: 3 } ] });
- t.insert({ group: 5, x: [ new Date(), 5, 10, 'string', new ObjectId(), 123.456 ] });
- t.insert({ group: 6, x: [ { a: 'string', b: date1 },
- { a: new ObjectId(), b: 1.2345 },
- { a: 'string2', b: date1 } ] });
- t.insert({ group: 7, x: [ { y: [ 1, 2, 3, 4 ] } ] });
- t.insert({ group: 8, x: [ { y: [ { a: 1, b: 2 }, {a: 3, b: 4} ] } ] });
- t.insert({ group: 9, x: [ { y: [ { a: 1, b: 2 }, {a: 3, b: 4} ] },
- { z: [ { a: 1, b: 2 }, {a: 3, b: 4} ] } ] });
- t.insert({ group: 10, x: [ { a: 1, b: 2 }, {a: 3, b: 4} ],
- y: [ { c: 1, d: 2 }, {c: 3, d: 4} ] });
- t.insert({ group: 10, x: [ { a: 1, b: 2 }, {a: 3, b: 4} ],
- y: [ { c: 1, d: 2 }, {c: 3, d: 4} ] });
- t.insert({ group: 11, x: [ { a: 1, b: 2 }, { a: 2, c: 3 }, { a:1, d:5 } ],
- covered: [ { aa: 1, bb: 2 }, { aa: 2, cc: 3 }, { aa:1, dd:5 } ] });
- t.insert({ group: 12, x: { y : [ { a: 1, b: 1 }, { a: 1, b: 2} ] } } );
- t.insert({ group: 13, x: [ { a: 1, b: 1 }, {a: 1, b: 2 } ] } );
- t.insert({ group: 13, x: [ { a: 1, b: 2 }, {a: 1, b: 1 } ] } );
+for (i = 0; i < 100; i++) {
+ t.insert({group: 1, x: [1, 2, 3, 4, 5]});
+ t.insert({group: 2, x: [{a: 1, b: 2}, {a: 2, c: 3}, {a: 1, d: 5}]});
+ t.insert({
+ group: 3,
+ x: [{a: 1, b: 2}, {a: 2, c: 3}, {a: 1, d: 5}],
+ y: [{aa: 1, bb: 2}, {aa: 2, cc: 3}, {aa: 1, dd: 5}]
+ });
+ t.insert({group: 3, x: [{a: 1, b: 3}, {a: -6, c: 3}]});
+ t.insert({group: 4, x: [{a: 1, b: 4}, {a: -6, c: 3}]});
+ t.insert({group: 5, x: [new Date(), 5, 10, 'string', new ObjectId(), 123.456]});
+ t.insert({
+ group: 6,
+ x: [{a: 'string', b: date1}, {a: new ObjectId(), b: 1.2345}, {a: 'string2', b: date1}]
+ });
+ t.insert({group: 7, x: [{y: [1, 2, 3, 4]}]});
+ t.insert({group: 8, x: [{y: [{a: 1, b: 2}, {a: 3, b: 4}]}]});
+ t.insert({group: 9, x: [{y: [{a: 1, b: 2}, {a: 3, b: 4}]}, {z: [{a: 1, b: 2}, {a: 3, b: 4}]}]});
+ t.insert({group: 10, x: [{a: 1, b: 2}, {a: 3, b: 4}], y: [{c: 1, d: 2}, {c: 3, d: 4}]});
+ t.insert({group: 10, x: [{a: 1, b: 2}, {a: 3, b: 4}], y: [{c: 1, d: 2}, {c: 3, d: 4}]});
+ t.insert({
+ group: 11,
+ x: [{a: 1, b: 2}, {a: 2, c: 3}, {a: 1, d: 5}],
+ covered: [{aa: 1, bb: 2}, {aa: 2, cc: 3}, {aa: 1, dd: 5}]
+ });
+ t.insert({group: 12, x: {y: [{a: 1, b: 1}, {a: 1, b: 2}]}});
+ t.insert({group: 13, x: [{a: 1, b: 1}, {a: 1, b: 2}]});
+ t.insert({group: 13, x: [{a: 1, b: 2}, {a: 1, b: 1}]});
}
-t.ensureIndex({group:1, 'y.d':1}); // for regular index test (not sure if this is really adding anything useful)
-t.ensureIndex({group:1, covered:1}); // for covered index test
+t.ensureIndex({
+ group: 1,
+ 'y.d': 1
+}); // for regular index test (not sure if this is really adding anything useful)
+t.ensureIndex({group: 1, covered: 1}); // for covered index test
//
// SERVER-828: Positional operator ($) projection tests
//
-assert.eq( 1,
- t.find( { group:3, 'x.a':2 }, { 'x.$':1 } ).toArray()[0].x.length,
- "single object match (array length match)" );
-
-assert.eq( 2,
- t.find( { group:3, 'x.a':1 }, { 'x.$':1 } ).toArray()[0].x[0].b,
- "single object match first" );
-
-assert.eq( undefined,
- t.find( { group:3, 'x.a':2 }, { _id:0, 'x.$':1 } ).toArray()[0]._id,
- "single object match with filtered _id" );
-
-assert.eq( 1,
- t.find( { group:3, 'x.a':2 }, { 'x.$':1 } ).sort( { _id:1 } ).toArray()[0].x.length,
- "sorted single object match with filtered _id (array length match)" );
-
-assert.eq( 1,
- t.find( { 'group':2, 'x': { '$elemMatch' : { 'a':1, 'b':2 } } }, { 'x.$':1 } ).toArray()[0].x.length,
- "single object match with elemMatch" );
-
-assert.eq( 1,
- t.find( { 'group':2, 'x': { '$elemMatch' : { 'a':1, 'b':2 } } }, { 'x.$':{'$slice':1} } ).toArray()[0].x.length,
- "single object match with elemMatch and positive slice" );
-
-assert.eq( 1,
- t.find( { 'group':2, 'x': { '$elemMatch' : { 'a':1, 'b':2 } } }, { 'x.$':{'$slice':-1} } ).toArray()[0].x.length,
- "single object match with elemMatch and negative slice" );
-
-assert.eq( 1,
- t.find( { 'group':12, 'x.y.a':1 }, { 'x.y.$': 1 } ).toArray()[0].x.y.length,
- "single object match with two level dot notation" );
-
-assert.eq( 1,
- t.find( { group:3, 'x.a':2 }, { 'x.$':1 } ).sort( { x:1 } ).toArray()[0].x.length,
- "sorted object match (array length match)" );
-
-assert.eq( { aa:1, dd:5 },
- t.find( { group:3, 'y.dd':5 }, { 'y.$':1 } ).toArray()[0].y[0],
- "single object match (value match)" );
-
-assert.throws( function() {
- t.find( { group:3, 'x.a':2 }, { 'y.$':1 } ).toArray();
- }, [], "throw on invalid projection (field mismatch)" );
-
-assert.throws( function() {
- t.find( { group:3, 'x.a':2 }, { 'y.$':1 } ).sort( { x:1 } ).toArray();
- }, [], "throw on invalid sorted projection (field mismatch)" );
-
-assert.throws( function() {x;
- t.find( { group:3, 'x.a':2 }, { 'x.$':1, group:0 } ).sort( { x:1 } ).toArray();
- }, [], "throw on invalid projection combination (include and exclude)" );
-
-assert.throws( function() {
- t.find( { group:3, 'x.a':1, 'y.aa':1 }, { 'x.$':1, 'y.$':1 } ).toArray();
- }, [], "throw on multiple projections" );
-
-assert.throws( function() {
- t.find( { group:3}, { 'g.$':1 } ).toArray();
- }, [], "throw on invalid projection (non-array field)" );
-
-assert.eq( { aa:1, dd:5 },
- t.find( { group:11, 'covered.dd':5 }, { 'covered.$':1 } ).toArray()[0].covered[0],
- "single object match (covered index)" );
-
-assert.eq( { aa:1, dd:5 },
- t.find( { group:11, 'covered.dd':5 }, { 'covered.$':1 } ).sort( { covered:1 } ).toArray()[0].covered[0],
- "single object match (sorted covered index)" );
-
-assert.eq( 1,
- t.find( { group:10, 'y.d': 4 }, { 'y.$':1 } ).toArray()[0].y.length,
- "single object match (regular index" );
+assert.eq(1,
+ t.find({group: 3, 'x.a': 2}, {'x.$': 1}).toArray()[0].x.length,
+ "single object match (array length match)");
+
+assert.eq(2,
+ t.find({group: 3, 'x.a': 1}, {'x.$': 1}).toArray()[0].x[0].b,
+ "single object match first");
+
+assert.eq(undefined,
+ t.find({group: 3, 'x.a': 2}, {_id: 0, 'x.$': 1}).toArray()[0]._id,
+ "single object match with filtered _id");
+
+assert.eq(1,
+ t.find({group: 3, 'x.a': 2}, {'x.$': 1}).sort({_id: 1}).toArray()[0].x.length,
+ "sorted single object match with filtered _id (array length match)");
+
+assert.eq(
+ 1,
+ t.find({'group': 2, 'x': {'$elemMatch': {'a': 1, 'b': 2}}}, {'x.$': 1}).toArray()[0].x.length,
+ "single object match with elemMatch");
+
+assert.eq(1,
+ t.find({'group': 2, 'x': {'$elemMatch': {'a': 1, 'b': 2}}}, {'x.$': {'$slice': 1}})
+ .toArray()[0]
+ .x.length,
+ "single object match with elemMatch and positive slice");
+
+assert.eq(1,
+ t.find({'group': 2, 'x': {'$elemMatch': {'a': 1, 'b': 2}}}, {'x.$': {'$slice': -1}})
+ .toArray()[0]
+ .x.length,
+ "single object match with elemMatch and negative slice");
+
+assert.eq(1,
+ t.find({'group': 12, 'x.y.a': 1}, {'x.y.$': 1}).toArray()[0].x.y.length,
+ "single object match with two level dot notation");
+
+assert.eq(1,
+ t.find({group: 3, 'x.a': 2}, {'x.$': 1}).sort({x: 1}).toArray()[0].x.length,
+ "sorted object match (array length match)");
+
+assert.eq({aa: 1, dd: 5},
+ t.find({group: 3, 'y.dd': 5}, {'y.$': 1}).toArray()[0].y[0],
+ "single object match (value match)");
+
+assert.throws(function() {
+ t.find({group: 3, 'x.a': 2}, {'y.$': 1}).toArray();
+}, [], "throw on invalid projection (field mismatch)");
+
+assert.throws(function() {
+ t.find({group: 3, 'x.a': 2}, {'y.$': 1}).sort({x: 1}).toArray();
+}, [], "throw on invalid sorted projection (field mismatch)");
+
+assert.throws(function() {
+ x;
+ t.find({group: 3, 'x.a': 2}, {'x.$': 1, group: 0}).sort({x: 1}).toArray();
+}, [], "throw on invalid projection combination (include and exclude)");
+
+assert.throws(function() {
+ t.find({group: 3, 'x.a': 1, 'y.aa': 1}, {'x.$': 1, 'y.$': 1}).toArray();
+}, [], "throw on multiple projections");
+
+assert.throws(function() {
+ t.find({group: 3}, {'g.$': 1}).toArray();
+}, [], "throw on invalid projection (non-array field)");
+
+assert.eq({aa: 1, dd: 5},
+ t.find({group: 11, 'covered.dd': 5}, {'covered.$': 1}).toArray()[0].covered[0],
+ "single object match (covered index)");
+
+assert.eq({aa: 1, dd: 5},
+ t.find({group: 11, 'covered.dd': 5}, {'covered.$': 1})
+ .sort({covered: 1})
+ .toArray()[0]
+ .covered[0],
+ "single object match (sorted covered index)");
+
+assert.eq(1,
+ t.find({group: 10, 'y.d': 4}, {'y.$': 1}).toArray()[0].y.length,
+ "single object match (regular index");
if (false) {
+ assert.eq(2, // SERVER-1013: allow multiple positional operators
+ t.find({group: 3, 'y.bb': 2, 'x.d': 5}, {'y.$': 1, 'x.$': 1}).toArray()[0].y[0].bb,
+ "multi match, multi proj 1");
- assert.eq( 2, // SERVER-1013: allow multiple positional operators
- t.find( { group:3, 'y.bb':2, 'x.d':5 }, { 'y.$':1, 'x.$':1 } ).toArray()[0].y[0].bb,
- "multi match, multi proj 1" );
-
- assert.eq( 5, // SSERVER-1013: allow multiple positional operators
- t.find( { group:3, 'y.bb':2, 'x.d':5 }, { 'y.$':1, 'x.$':1 } ).toArray()[0].x[0].d,
- "multi match, multi proj 2" );
-
- assert.eq( 2, // SERVER-1243: allow multiple results from same matcher
- t.find( { group:2, x: { $elemMatchAll: { a:1 } } }, { 'x.$':1 } ).toArray()[0].x.length,
- "multi element match, single proj" );
+ assert.eq(5, // SSERVER-1013: allow multiple positional operators
+ t.find({group: 3, 'y.bb': 2, 'x.d': 5}, {'y.$': 1, 'x.$': 1}).toArray()[0].x[0].d,
+ "multi match, multi proj 2");
- assert.eq( 2, // SERVER-1013: multiple array matches with one prositional operator
- t.find( { group:3, 'y.bb':2, 'x.d':5 }, { 'y.$':1 } ).toArray()[0].y[0].bb,
- "multi match, single proj 1" );
+ assert.eq(2, // SERVER-1243: allow multiple results from same matcher
+ t.find({group: 2, x: {$elemMatchAll: {a: 1}}}, {'x.$': 1}).toArray()[0].x.length,
+ "multi element match, single proj");
- assert.eq( 2, // SERVER-1013: multiple array matches with one positional operator
- t.find( { group:3, 'y.cc':3, 'x.b':2 }, { 'x.$':1 } ).toArray()[0].x[0].b,
- "multi match, single proj 2" );
+ assert.eq(2, // SERVER-1013: multiple array matches with one prositional operator
+ t.find({group: 3, 'y.bb': 2, 'x.d': 5}, {'y.$': 1}).toArray()[0].y[0].bb,
+ "multi match, single proj 1");
+ assert.eq(2, // SERVER-1013: multiple array matches with one positional operator
+ t.find({group: 3, 'y.cc': 3, 'x.b': 2}, {'x.$': 1}).toArray()[0].x[0].b,
+ "multi match, single proj 2");
}
//
// SERVER-2238: $elemMatch projections
//
-assert.eq( -6,
- t.find( { group:4 }, { x: { $elemMatch: { a:-6 } } } ).toArray()[0].x[0].a,
- "single object match" );
-
-assert.eq( 1,
- t.find( { group:4 }, { x: { $elemMatch: { a:-6 } } } ).toArray()[0].x.length,
- "filters non-matching array elements" );
-
-assert.eq( 1,
- t.find( { group:4 }, { x: { $elemMatch: { a:-6, c:3 } } } ).toArray()[0].x.length,
- "filters non-matching array elements with multiple elemMatch criteria" );
-
-assert.eq( 1,
- t.find( { group: 13 }, { 'x' : {'$elemMatch' : { a: {$gt: 0, $lt: 2} } } } ).toArray()[0].x.length,
- "filters non-matching array elements with multiple criteria for a single element in the array" );
-
-assert.eq( 3,
- t.find( { group:4 }, { x: { $elemMatch: { a:{ $lt:1 } } } } ).toArray()[0].x[0].c,
- "object operator match" );
-
-assert.eq( [ 4 ],
- t.find( { group:1 }, { x: { $elemMatch: { $in:[100, 4, -123] } } } ).toArray()[0].x,
- "$in number match" );
-
-assert.eq( [ {a : 1, b : 2} ],
- t.find( { group:2 }, { x: { $elemMatch: { a: { $in:[1] } } } } ).toArray()[0].x,
- "$in number match" );
-
-assert.eq( [1],
- t.find( { group:1 }, { x: { $elemMatch: { $nin:[4, 5, 6] } } } ).toArray()[0].x,
- "$nin number match" );
-
-// but this may become a user assertion, since a single element of an array can't match more than one value
-assert.eq( [ 1],
- t.find( { group:1 }, { x: { $elemMatch: { $all:[1] } } } ).toArray()[0].x,
- "$in number match" );
-
-assert.eq( [ { a: 'string', b: date1 } ],
- t.find( { group:6 }, { x: { $elemMatch: { a:'string' } } } ).toArray()[0].x,
- "mixed object match on string eq" );
-
-assert.eq( [ { a: 'string2', b: date1 } ],
- t.find( { group:6 }, { x: { $elemMatch: { a:/ring2/ } } } ).toArray()[0].x,
- "mixed object match on regexp" );
-
-assert.eq( [ { a: 'string', b: date1 } ],
- t.find( { group:6 }, { x: { $elemMatch: { a: { $type: 2 } } } } ).toArray()[0].x,
- "mixed object match on type" );
-
-assert.eq( [ { a : 2, c : 3} ],
- t.find( { group:2 }, { x: { $elemMatch: { a: { $ne: 1 } } } } ).toArray()[0].x,
- "mixed object match on ne" );
-
-assert.eq( [ {a : 1, d : 5} ],
- t.find( { group:3 }, { x: { $elemMatch: { d: { $exists: true } } } } ).toArray()[0].x,
- "mixed object match on exists" );
-
-assert.eq( [ {a : 2, c : 3} ],
- t.find( { group:3 }, { x: { $elemMatch: { a: { $mod : [2, 0 ] } } } } ).toArray()[0].x,
- "mixed object match on mod" );
-
-assert.eq( {"x" : [ { "a" : 1, "b" : 2 } ], "y" : [ { "c" : 3, "d" : 4 } ] },
- t.find( { group:10 }, { _id : 0,
- x: { $elemMatch: { a: 1 } },
- y: { $elemMatch: { c: 3 } } } ).toArray()[0],
- "multiple $elemMatch on unique fields 1" );
-
-assert.eq( {"x" : [ { "y" : [ { "a" : 1, "b" : 2 }, { "a" : 3, "b" : 4 } ] } ] },
- t.find( { group:8 },
- { _id : 0,
- x: { $elemMatch: { y: { $elemMatch : { a: 3 } } } } } ).toArray()[0],
- "nested $elemMatch" );
-
-assert.throws( function() {
- t.find( { group:3, 'x.a':1 },
- { 'x.$':1, y: { $elemMatch: { aa: 1 } } } ).toArray();
- }, [], "throw on positional operator with $elemMatch" );
+assert.eq(-6,
+ t.find({group: 4}, {x: {$elemMatch: {a: -6}}}).toArray()[0].x[0].a,
+ "single object match");
+
+assert.eq(1,
+ t.find({group: 4}, {x: {$elemMatch: {a: -6}}}).toArray()[0].x.length,
+ "filters non-matching array elements");
+
+assert.eq(1,
+ t.find({group: 4}, {x: {$elemMatch: {a: -6, c: 3}}}).toArray()[0].x.length,
+ "filters non-matching array elements with multiple elemMatch criteria");
+
+assert.eq(
+ 1,
+ t.find({group: 13}, {'x': {'$elemMatch': {a: {$gt: 0, $lt: 2}}}}).toArray()[0].x.length,
+ "filters non-matching array elements with multiple criteria for a single element in the array");
+
+assert.eq(3,
+ t.find({group: 4}, {x: {$elemMatch: {a: {$lt: 1}}}}).toArray()[0].x[0].c,
+ "object operator match");
+
+assert.eq([4],
+ t.find({group: 1}, {x: {$elemMatch: {$in: [100, 4, -123]}}}).toArray()[0].x,
+ "$in number match");
+
+assert.eq([{a: 1, b: 2}],
+ t.find({group: 2}, {x: {$elemMatch: {a: {$in: [1]}}}}).toArray()[0].x,
+ "$in number match");
+
+assert.eq([1],
+ t.find({group: 1}, {x: {$elemMatch: {$nin: [4, 5, 6]}}}).toArray()[0].x,
+ "$nin number match");
+
+// but this may become a user assertion, since a single element of an array can't match more than
+// one value
+assert.eq([1],
+ t.find({group: 1}, {x: {$elemMatch: {$all: [1]}}}).toArray()[0].x,
+ "$in number match");
+
+assert.eq([{a: 'string', b: date1}],
+ t.find({group: 6}, {x: {$elemMatch: {a: 'string'}}}).toArray()[0].x,
+ "mixed object match on string eq");
+
+assert.eq([{a: 'string2', b: date1}],
+ t.find({group: 6}, {x: {$elemMatch: {a: /ring2/}}}).toArray()[0].x,
+ "mixed object match on regexp");
+
+assert.eq([{a: 'string', b: date1}],
+ t.find({group: 6}, {x: {$elemMatch: {a: {$type: 2}}}}).toArray()[0].x,
+ "mixed object match on type");
+
+assert.eq([{a: 2, c: 3}],
+ t.find({group: 2}, {x: {$elemMatch: {a: {$ne: 1}}}}).toArray()[0].x,
+ "mixed object match on ne");
+
+assert.eq([{a: 1, d: 5}],
+ t.find({group: 3}, {x: {$elemMatch: {d: {$exists: true}}}}).toArray()[0].x,
+ "mixed object match on exists");
+
+assert.eq([{a: 2, c: 3}],
+ t.find({group: 3}, {x: {$elemMatch: {a: {$mod: [2, 0]}}}}).toArray()[0].x,
+ "mixed object match on mod");
+
+assert.eq({"x": [{"a": 1, "b": 2}], "y": [{"c": 3, "d": 4}]},
+ t.find({group: 10}, {_id: 0, x: {$elemMatch: {a: 1}}, y: {$elemMatch: {c: 3}}})
+ .toArray()[0],
+ "multiple $elemMatch on unique fields 1");
+
+assert.eq({"x": [{"y": [{"a": 1, "b": 2}, {"a": 3, "b": 4}]}]},
+ t.find({group: 8}, {_id: 0, x: {$elemMatch: {y: {$elemMatch: {a: 3}}}}}).toArray()[0],
+ "nested $elemMatch");
+
+assert.throws(function() {
+ t.find({group: 3, 'x.a': 1}, {'x.$': 1, y: {$elemMatch: {aa: 1}}}).toArray();
+}, [], "throw on positional operator with $elemMatch");
if (false) {
+ assert.eq(2, // SERVER-1243: handle multiple $elemMatch results
+ t.find({group: 4}, {x: {$elemMatchAll: {a: {$lte: 2}}}}).toArray()[0].x.length,
+ "multi object match");
- assert.eq( 2 , // SERVER-1243: handle multiple $elemMatch results
- t.find( { group:4 }, { x: { $elemMatchAll: { a:{ $lte:2 } } } } ).toArray()[0].x.length,
- "multi object match" );
-
- assert.eq( 3 , // SERVER-1243: handle multiple $elemMatch results
- t.find( { group:1 }, { x: { $elemMatchAll: { $in:[1, 2, 3] } } } ).toArray()[0].x.length,
- "$in number match" );
-
- assert.eq( 1 , // SERVER-1243: handle multiple $elemMatch results
- t.find( { group:5 }, { x: { $elemMatchAll: { $ne: 5 } } } ).toArray()[0].x.length,
- "single mixed type match 1" );
+ assert.eq(3, // SERVER-1243: handle multiple $elemMatch results
+ t.find({group: 1}, {x: {$elemMatchAll: {$in: [1, 2, 3]}}}).toArray()[0].x.length,
+ "$in number match");
- assert.eq( 1 , // SERVER-831: handle nested arrays
- t.find( { group:9 }, { 'x.y': { $elemMatch: { a: 1 } } } ).toArray()[0].x.length,
- "single dotted match" );
+ assert.eq(1, // SERVER-1243: handle multiple $elemMatch results
+ t.find({group: 5}, {x: {$elemMatchAll: {$ne: 5}}}).toArray()[0].x.length,
+ "single mixed type match 1");
+ assert.eq(1, // SERVER-831: handle nested arrays
+ t.find({group: 9}, {'x.y': {$elemMatch: {a: 1}}}).toArray()[0].x.length,
+ "single dotted match");
}
//
// Batch/getMore tests
//
// test positional operator across multiple batches
-a = t.find( { group:3, 'x.b':2 }, { 'x.$':1 } ).batchSize(1);
-while ( a.hasNext() ) {
- assert.eq( 2, a.next().x[0].b, "positional getMore test");
+a = t.find({group: 3, 'x.b': 2}, {'x.$': 1}).batchSize(1);
+while (a.hasNext()) {
+ assert.eq(2, a.next().x[0].b, "positional getMore test");
}
// test $elemMatch operator across multiple batches
-a = t.find( { group:3 }, { x:{$elemMatch:{a:1}} } ).batchSize(1);
-while ( a.hasNext() ) {
- assert.eq( 1, a.next().x[0].a, "positional getMore test");
+a = t.find({group: 3}, {x: {$elemMatch: {a: 1}}}).batchSize(1);
+while (a.hasNext()) {
+ assert.eq(1, a.next().x[0].a, "positional getMore test");
}
diff --git a/jstests/core/error2.js b/jstests/core/error2.js
index 2a6a10170a1..6f0b95bc17e 100644
--- a/jstests/core/error2.js
+++ b/jstests/core/error2.js
@@ -4,18 +4,19 @@ f = db.jstests_error2;
f.drop();
-f.save( {a:1} );
+f.save({a: 1});
-assert.throws(
- function(){
- c = f.find({$where : function(){ return a(); }});
- c.next();
- }
-);
-
-assert.throws(
- function(){
- db.eval( function() { return a(); } );
- }
-);
+assert.throws(function() {
+ c = f.find({
+ $where: function() {
+ return a();
+ }
+ });
+ c.next();
+});
+assert.throws(function() {
+ db.eval(function() {
+ return a();
+ });
+});
diff --git a/jstests/core/error5.js b/jstests/core/error5.js
index 4a58f0dcf7a..0b5ab003dda 100644
--- a/jstests/core/error5.js
+++ b/jstests/core/error5.js
@@ -2,7 +2,9 @@
t = db.error5;
t.drop();
-assert.throws( function(){ t.save( 4 ); printjson( t.findOne() ); } , null , "A" );
-t.save( { a : 1 } );
-assert.eq( 1 , t.count() , "B" );
-
+assert.throws(function() {
+ t.save(4);
+ printjson(t.findOne());
+}, null, "A");
+t.save({a: 1});
+assert.eq(1, t.count(), "B");
diff --git a/jstests/core/eval0.js b/jstests/core/eval0.js
index 4375cace839..a0c93da2cab 100644
--- a/jstests/core/eval0.js
+++ b/jstests/core/eval0.js
@@ -1,8 +1,22 @@
-assert.eq( 17 , db.eval( function(){ return 11 + 6; } ) , "A" );
-assert.eq( 17 , db.eval( function( x ){ return 10 + x; } , 7 ) , "B" );
+assert.eq(17,
+ db.eval(function() {
+ return 11 + 6;
+ }),
+ "A");
+assert.eq(17,
+ db.eval(
+ function(x) {
+ return 10 + x;
+ },
+ 7),
+ "B");
// check that functions in system.js work
-db.system.js.insert({_id: "add", value: function(x,y){ return x + y;}});
-assert.eq( 20 , db.eval( "this.add(15, 5);" ) , "C" );
-
+db.system.js.insert({
+ _id: "add",
+ value: function(x, y) {
+ return x + y;
+ }
+});
+assert.eq(20, db.eval("this.add(15, 5);"), "C");
diff --git a/jstests/core/eval1.js b/jstests/core/eval1.js
index 1fdcec66152..8b139cae02a 100644
--- a/jstests/core/eval1.js
+++ b/jstests/core/eval1.js
@@ -2,16 +2,14 @@
t = db.eval1;
t.drop();
-t.save( { _id : 1 , name : "eliot" } );
-t.save( { _id : 2 , name : "sara" } );
+t.save({_id: 1, name: "eliot"});
+t.save({_id: 2, name: "sara"});
-f = function(id){
- return db["eval1"].findOne( { _id : id } ).name;
+f = function(id) {
+ return db["eval1"].findOne({_id: id}).name;
};
-
-assert.eq( "eliot" , f( 1 ) , "A" );
-assert.eq( "sara" , f( 2 ) , "B" );
-assert.eq( "eliot" , db.eval( f , 1 ) , "C" );
-assert.eq( "sara" , db.eval( f , 2 ) , "D" );
-
+assert.eq("eliot", f(1), "A");
+assert.eq("sara", f(2), "B");
+assert.eq("eliot", db.eval(f, 1), "C");
+assert.eq("sara", db.eval(f, 2), "D");
diff --git a/jstests/core/eval2.js b/jstests/core/eval2.js
index 6e39bb4a7bd..598314a6c5b 100644
--- a/jstests/core/eval2.js
+++ b/jstests/core/eval2.js
@@ -1,28 +1,28 @@
t = db.eval2;
t.drop();
-t.save({a:1});
-t.save({a:1});
+t.save({a: 1});
+t.save({a: 1});
-var f = db.group(
- {
- ns: t.getName(),
- key: { a:true},
- cond: { a:1 },
- reduce: function(obj,prev) { prev.csum++; } ,
- initial: { csum: 0}
- }
-);
+var f = db.group({
+ ns: t.getName(),
+ key: {a: true},
+ cond: {a: 1},
+ reduce: function(obj, prev) {
+ prev.csum++;
+ },
+ initial: {csum: 0}
+});
-assert(f[0].a == 1 && f[0].csum == 2 , "on db" );
+assert(f[0].a == 1 && f[0].csum == 2, "on db");
-var f = t.group(
- {
- key: { a:true},
- cond: { a:1 },
- reduce: function(obj,prev) { prev.csum++; } ,
- initial: { csum: 0}
- }
-);
+var f = t.group({
+ key: {a: true},
+ cond: {a: 1},
+ reduce: function(obj, prev) {
+ prev.csum++;
+ },
+ initial: {csum: 0}
+});
-assert(f[0].a == 1 && f[0].csum == 2 , "on coll" );
+assert(f[0].a == 1 && f[0].csum == 2, "on coll");
diff --git a/jstests/core/eval3.js b/jstests/core/eval3.js
index 404d4d863b7..c4f8be21056 100644
--- a/jstests/core/eval3.js
+++ b/jstests/core/eval3.js
@@ -2,20 +2,34 @@
t = db.eval3;
t.drop();
-t.save( { _id : 1 , name : "eliot" } );
-assert.eq( 1 , t.count() , "A" );
+t.save({_id: 1, name: "eliot"});
+assert.eq(1, t.count(), "A");
-function z( a , b ){
- db.eval3.save( { _id : a , name : b } );
+function z(a, b) {
+ db.eval3.save({_id: a, name: b});
return b;
}
-z( 2 , "sara" );
-assert.eq( 2 , t.count() , "B" );
+z(2, "sara");
+assert.eq(2, t.count(), "B");
-assert.eq( "eliot,sara" , t.find().toArray().map( function(z){ return z.name; } ).sort().toString() );
+assert.eq("eliot,sara",
+ t.find()
+ .toArray()
+ .map(function(z) {
+ return z.name;
+ })
+ .sort()
+ .toString());
-assert.eq( "joe" , db.eval( z , 3 , "joe" ) , "C" );
-assert.eq( 3 , t.count() , "D" );
+assert.eq("joe", db.eval(z, 3, "joe"), "C");
+assert.eq(3, t.count(), "D");
-assert.eq( "eliot,joe,sara" , t.find().toArray().map( function(z){ return z.name; } ).sort().toString() );
+assert.eq("eliot,joe,sara",
+ t.find()
+ .toArray()
+ .map(function(z) {
+ return z.name;
+ })
+ .sort()
+ .toString());
diff --git a/jstests/core/eval4.js b/jstests/core/eval4.js
index 31d6ef0c2a8..0d120b393de 100644
--- a/jstests/core/eval4.js
+++ b/jstests/core/eval4.js
@@ -2,22 +2,21 @@
t = db.eval4;
t.drop();
-t.save( { a : 1 } );
-t.save( { a : 2 } );
-t.save( { a : 3 } );
+t.save({a: 1});
+t.save({a: 2});
+t.save({a: 3});
-assert.eq( 3 , t.count() , "A" );
+assert.eq(3, t.count(), "A");
-function f( x ){
- db.eval4.remove( { a : x } );
+function f(x) {
+ db.eval4.remove({a: x});
}
-f( 2 );
-assert.eq( 2 , t.count() , "B" );
+f(2);
+assert.eq(2, t.count(), "B");
-db.eval( f , 2 );
-assert.eq( 2 , t.count() , "C" );
-
-db.eval( f , 3 );
-assert.eq( 1 , t.count() , "D" );
+db.eval(f, 2);
+assert.eq(2, t.count(), "C");
+db.eval(f, 3);
+assert.eq(1, t.count(), "D");
diff --git a/jstests/core/eval5.js b/jstests/core/eval5.js
index a9223a555a6..46bd679dd77 100644
--- a/jstests/core/eval5.js
+++ b/jstests/core/eval5.js
@@ -2,22 +2,15 @@
t = db.eval5;
t.drop();
-t.save( { a : 1 , b : 2 , c : 3 } );
+t.save({a: 1, b: 2, c: 3});
-assert.eq( 3 ,
- db.eval(
- function(z){
- return db.eval5.find().toArray()[0].c;
- }
- ) ,
- "something weird A"
- );
+assert.eq(3,
+ db.eval(function(z) {
+ return db.eval5.find().toArray()[0].c;
+ }),
+ "something weird A");
-assert.isnull(
- db.eval(
- function(z){
- return db.eval5.find( {} , { a : 1 } ).toArray()[0].c;
- }
- ),
- "field spec didn't work"
- );
+assert.isnull(db.eval(function(z) {
+ return db.eval5.find({}, {a: 1}).toArray()[0].c;
+}),
+ "field spec didn't work");
diff --git a/jstests/core/eval6.js b/jstests/core/eval6.js
index 5fe096974c6..31258f6917b 100644
--- a/jstests/core/eval6.js
+++ b/jstests/core/eval6.js
@@ -2,14 +2,12 @@
t = db.eval6;
t.drop();
-t.save( { a : 1 } );
+t.save({a: 1});
-db.eval(
- function(){
- o = db.eval6.findOne();
- o.b = 2;
- db.eval6.save( o );
- }
-);
+db.eval(function() {
+ o = db.eval6.findOne();
+ o.b = 2;
+ db.eval6.save(o);
+});
-assert.eq( 2 , t.findOne().b );
+assert.eq(2, t.findOne().b);
diff --git a/jstests/core/eval7.js b/jstests/core/eval7.js
index 3d706a2eaa7..89f395d5128 100644
--- a/jstests/core/eval7.js
+++ b/jstests/core/eval7.js
@@ -1,3 +1,5 @@
-assert.eq( 6 , db.eval( "5 + 1" ) , "A" );
-assert.throws( function(z){ db.eval( "5 + function x; + 1" );} );
+assert.eq(6, db.eval("5 + 1"), "A");
+assert.throws(function(z) {
+ db.eval("5 + function x; + 1");
+});
diff --git a/jstests/core/eval8.js b/jstests/core/eval8.js
index e2ec3db31a8..24f710f4b9f 100644
--- a/jstests/core/eval8.js
+++ b/jstests/core/eval8.js
@@ -2,18 +2,21 @@
t = db.eval8;
t.drop();
-x = { a : 1 , b : 2 };
-t.save( x );
+x = {
+ a: 1,
+ b: 2
+};
+t.save(x);
x = t.findOne();
-assert( x.a && x.b , "A" );
+assert(x.a && x.b, "A");
delete x.b;
-assert( x.a && ! x.b , "B" );
+assert(x.a && !x.b, "B");
x.b = 3;
-assert( x.a && x.b , "C" );
-assert.eq( 3 , x.b , "D" );
+assert(x.a && x.b, "C");
+assert.eq(3, x.b, "D");
-t.save( x );
+t.save(x);
y = t.findOne();
-assert.eq( tojson( x ) , tojson( y ) , "E" );
+assert.eq(tojson(x), tojson(y), "E");
diff --git a/jstests/core/eval9.js b/jstests/core/eval9.js
index a6d8560416e..6998345bf13 100644
--- a/jstests/core/eval9.js
+++ b/jstests/core/eval9.js
@@ -1,22 +1,23 @@
-a = [ 1 , "asd" , null , [ 2 , 3 ] , new Date() , { x : 1 } ];
+a = [1, "asd", null, [2, 3], new Date(), {x: 1}];
-for ( var i=0; i<a.length; i++ ){
- var ret = db.eval( "function( a , i ){ return a[i]; }" , a , i );
- assert.eq( typeof( a[i] ) , typeof( ret ) , "type test" );
- assert.eq( a[i] , ret , "val test: " + typeof( a[i] ) );
+for (var i = 0; i < a.length; i++) {
+ var ret = db.eval("function( a , i ){ return a[i]; }", a, i);
+ assert.eq(typeof(a[i]), typeof(ret), "type test");
+ assert.eq(a[i], ret, "val test: " + typeof(a[i]));
}
db.eval9.drop();
-db.eval9.save( { a : 17 } );
+db.eval9.save({a: 17});
-assert.eq( 1 , db.eval( "return db.eval9.find().toArray()" ).length , "A" );
-assert.eq( 17 , db.eval( "return db.eval9.find().toArray()" )[0].a , "B" );
+assert.eq(1, db.eval("return db.eval9.find().toArray()").length, "A");
+assert.eq(17, db.eval("return db.eval9.find().toArray()")[0].a, "B");
// just to make sure these things don't crash (but may throw an exception)
try {
- db.eval( "return db.eval9.find()" );
- db.eval( "return db.eval9" );
- db.eval( "return db" );
- db.eval( "return print" );
-} catch (ex) { } \ No newline at end of file
+ db.eval("return db.eval9.find()");
+ db.eval("return db.eval9");
+ db.eval("return db");
+ db.eval("return print");
+} catch (ex) {
+} \ No newline at end of file
diff --git a/jstests/core/eval_mr.js b/jstests/core/eval_mr.js
index 84a929035d6..84036b1e0d5 100644
--- a/jstests/core/eval_mr.js
+++ b/jstests/core/eval_mr.js
@@ -7,9 +7,15 @@
assert.writeOK(db.eval_mr.insert({val: 2}));
var runBasicMapReduce = function() {
return db.eval_mr.runCommand("mapReduce",
- {map: function() { emit(0, this.val); },
- reduce: function(id, values) { return Array.sum(values); },
- out: {replace: "eval_mr_out"}});
+ {
+ map: function() {
+ emit(0, this.val);
+ },
+ reduce: function(id, values) {
+ return Array.sum(values);
+ },
+ out: {replace: "eval_mr_out"}
+ });
};
assert.commandWorked(runBasicMapReduce());
assert.eq(3, db.eval_mr_out.findOne().value);
diff --git a/jstests/core/eval_nolock.js b/jstests/core/eval_nolock.js
index 4701df9b7f7..9511784becb 100644
--- a/jstests/core/eval_nolock.js
+++ b/jstests/core/eval_nolock.js
@@ -2,15 +2,15 @@
t = db.eval_nolock;
t.drop();
-for ( i=0; i<10; i++ )
- t.insert( { _id : i } );
+for (i = 0; i < 10; i++)
+ t.insert({_id: i});
-res = db.runCommand( { eval :
- function(){
- db.eval_nolock.insert( { _id : 123 } );
- return db.eval_nolock.count();
- }
- , nolock : true } );
-
-assert.eq( 11 , res.retval , "A" );
+res = db.runCommand({
+ eval: function() {
+ db.eval_nolock.insert({_id: 123});
+ return db.eval_nolock.count();
+ },
+ nolock: true
+});
+assert.eq(11, res.retval, "A");
diff --git a/jstests/core/evala.js b/jstests/core/evala.js
index 88d479127c0..7ccf33ac754 100644
--- a/jstests/core/evala.js
+++ b/jstests/core/evala.js
@@ -2,8 +2,7 @@
t = db.evala;
t.drop();
-t.save( { x : 5 } );
-
-assert.eq( 5 , db.eval( "function(){ return db.evala.findOne().x; }" ) , "A" );
-assert.eq( 5 , db.eval( "/* abc */function(){ return db.evala.findOne().x; }" ) , "B" );
+t.save({x: 5});
+assert.eq(5, db.eval("function(){ return db.evala.findOne().x; }"), "A");
+assert.eq(5, db.eval("/* abc */function(){ return db.evala.findOne().x; }"), "B");
diff --git a/jstests/core/evalb.js b/jstests/core/evalb.js
index 5e8fac05537..3391c4cc4f2 100644
--- a/jstests/core/evalb.js
+++ b/jstests/core/evalb.js
@@ -3,39 +3,44 @@
// Use a reserved database name to avoid a conflict in the parallel test suite.
var stddb = db;
-var db = db.getSisterDB( 'evalb' );
+var db = db.getSisterDB('evalb');
function profileCursor() {
- return db.system.profile.find( { user:username + "@" + db.getName() } );
+ return db.system.profile.find({user: username + "@" + db.getName()});
}
function lastOp() {
- return profileCursor().sort( { $natural:-1 } ).next();
+ return profileCursor().sort({$natural: -1}).next();
}
try {
-
username = 'jstests_evalb_user';
db.dropUser(username);
db.createUser({user: username, pwd: 'password', roles: jsTest.basicUserRoles});
- db.auth( username, 'password' );
+ db.auth(username, 'password');
t = db.evalb;
t.drop();
- t.save( { x:3 } );
+ t.save({x: 3});
- assert.eq( 3, db.eval( function() { return db.evalb.findOne().x; } ), 'A' );
+ assert.eq(3,
+ db.eval(function() {
+ return db.evalb.findOne().x;
+ }),
+ 'A');
- db.setProfilingLevel( 2 );
+ db.setProfilingLevel(2);
- assert.eq( 3, db.eval( function() { return db.evalb.findOne().x; } ), 'B' );
+ assert.eq(3,
+ db.eval(function() {
+ return db.evalb.findOne().x;
+ }),
+ 'B');
o = lastOp();
- assert( tojson( o ).indexOf( 'findOne().x' ) > 0, 'C : ' + tojson( o ) );
-}
-finally {
-
+ assert(tojson(o).indexOf('findOne().x') > 0, 'C : ' + tojson(o));
+} finally {
db.setProfilingLevel(0);
db = stddb;
}
diff --git a/jstests/core/evalc.js b/jstests/core/evalc.js
index 36ea04037d0..0d55790afe3 100644
--- a/jstests/core/evalc.js
+++ b/jstests/core/evalc.js
@@ -4,22 +4,23 @@ t.drop();
t2 = db.evalc_done;
t2.drop();
-for( i = 0; i < 10; ++i ) {
- t.save( {i:i} );
+for (i = 0; i < 10; ++i) {
+ t.save({i: i});
}
// SERVER-1610
-assert.eq( 0 , t2.count() , "X1" );
+assert.eq(0, t2.count(), "X1");
-s = startParallelShell( "print( 'starting forked:' + Date() ); for ( i=0; i<10*1000; i++ ){ db.currentOp(); } print( 'ending forked:' + Date() ); db.evalc_done.insert( { x : 1 } ); " );
+s = startParallelShell(
+ "print( 'starting forked:' + Date() ); for ( i=0; i<10*1000; i++ ){ db.currentOp(); } print( 'ending forked:' + Date() ); db.evalc_done.insert( { x : 1 } ); ");
-print( "starting eval: " + Date() );
+print("starting eval: " + Date());
assert.soon(function() {
- db.eval( "db.jstests_evalc.count( {i:10} );" );
+ db.eval("db.jstests_evalc.count( {i:10} );");
return t2.count() > 0;
}, 'parallel shell failed to update ' + t2.getFullName(), 120000, 10);
-print( "end eval: " + Date() );
+print("end eval: " + Date());
s();
diff --git a/jstests/core/evald.js b/jstests/core/evald.js
index 7e516e9f7d6..8049d2ba8ae 100644
--- a/jstests/core/evald.js
+++ b/jstests/core/evald.js
@@ -1,25 +1,26 @@
t = db.jstests_evald;
t.drop();
-function debug( x ) {
-// printjson( x );
+function debug(x) {
+ // printjson( x );
}
-for( i = 0; i < 10; ++i ) {
- t.save( {i:i} );
+for (i = 0; i < 10; ++i) {
+ t.save({i: i});
}
-function op( ev, where ) {
+function op(ev, where) {
p = db.currentOp().inprog;
- debug( p );
- for ( var i in p ) {
- var o = p[ i ];
- if ( where ) {
- if ( o.active && o.query && o.query.query && o.query.query.$where && o.ns == "test.jstests_evald" ) {
+ debug(p);
+ for (var i in p) {
+ var o = p[i];
+ if (where) {
+ if (o.active && o.query && o.query.query && o.query.query.$where &&
+ o.ns == "test.jstests_evald") {
return o.opid;
}
} else {
- if ( o.active && o.query && o.query.$eval && o.query.$eval == ev ) {
+ if (o.active && o.query && o.query.$eval && o.query.$eval == ev) {
return o.opid;
}
}
@@ -27,31 +28,34 @@ function op( ev, where ) {
return -1;
}
-function doIt( ev, wait, where ) {
+function doIt(ev, wait, where) {
var awaitShell;
- if ( where ) {
- awaitShell = startParallelShell( ev );
+ if (where) {
+ awaitShell = startParallelShell(ev);
} else {
- awaitShell = startParallelShell( "db.eval( '" + ev + "' )" );
+ awaitShell = startParallelShell("db.eval( '" + ev + "' )");
}
o = null;
- assert.soon( function() { o = op( ev, where ); return o != -1; } );
+ assert.soon(function() {
+ o = op(ev, where);
+ return o != -1;
+ });
- if ( wait ) {
- sleep( 2000 );
+ if (wait) {
+ sleep(2000);
}
- debug( "going to kill" );
+ debug("going to kill");
- db.killOp( o );
+ db.killOp(o);
- debug( "sent kill" );
+ debug("sent kill");
var exitCode = awaitShell({checkExitSuccess: false});
- assert.neq(0, exitCode,
- "expected shell to exit abnormally due to JS execution being terminated");
+ assert.neq(
+ 0, exitCode, "expected shell to exit abnormally due to JS execution being terminated");
}
// nested scope with nested invoke()
@@ -75,24 +79,18 @@ doIt("while(1) { for( var i = 0; i < 10000; ++i ) {;} db.jstests_evald.count();
// try/catch with tight-loop kill tests.
// native callback with nested invoke(), drop JS exceptions
doIt("while(1) { " +
- " for(var i = 0; i < 10000; ++i) {;} " +
- " try { " +
- " db.jstests_evald.count({i:10}); " +
- " } catch (e) {} " +
- "}", true );
+ " for(var i = 0; i < 10000; ++i) {;} " +
+ " try { " +
+ " db.jstests_evald.count({i:10}); " +
+ " } catch (e) {} " + "}",
+ true);
// native callback, drop JS exceptions
-doIt("while(1) { " +
- " try { " +
- " while(1) { " +
- " sleep(1); " +
- " } " +
- " } catch (e) {} " +
- "}", true );
+doIt("while(1) { " + " try { " + " while(1) { " +
+ " sleep(1); " + " } " + " } catch (e) {} " + "}",
+ true);
// no native callback and drop JS exceptions
-doIt("while(1) { " +
- " try { " +
- " while(1) {;} " +
- " } catch (e) {} " +
- "}", true );
+doIt("while(1) { " + " try { " + " while(1) {;} " +
+ " } catch (e) {} " + "}",
+ true);
diff --git a/jstests/core/evale.js b/jstests/core/evale.js
index a0f81942479..1ddc8519fc6 100644
--- a/jstests/core/evale.js
+++ b/jstests/core/evale.js
@@ -1,5 +1,11 @@
t = db.jstests_evale;
t.drop();
-db.eval( function() { return db.jstests_evale.count( { $where:function() { return true; } } ); } );
-db.eval( "db.jstests_evale.count( { $where:function() { return true; } } )" ); \ No newline at end of file
+db.eval(function() {
+ return db.jstests_evale.count({
+ $where: function() {
+ return true;
+ }
+ });
+});
+db.eval("db.jstests_evale.count( { $where:function() { return true; } } )"); \ No newline at end of file
diff --git a/jstests/core/evalg.js b/jstests/core/evalg.js
index 280e5261ef9..570464cbce2 100644
--- a/jstests/core/evalg.js
+++ b/jstests/core/evalg.js
@@ -1,11 +1,12 @@
// SERVER-17499: Test behavior of getMore on aggregation cursor under eval command.
db.evalg.drop();
-for (var i = 0; i < 102; ++i){
+for (var i = 0; i < 102; ++i) {
db.evalg.insert({});
}
-assert.eq(102, db.eval(function() {
- var cursor = db.evalg.aggregate();
- assert(cursor.hasNext());
- assert.eq(101, cursor.objsLeftInBatch());
- return cursor.itcount();
-}));
+assert.eq(102,
+ db.eval(function() {
+ var cursor = db.evalg.aggregate();
+ assert(cursor.hasNext());
+ assert.eq(101, cursor.objsLeftInBatch());
+ return cursor.itcount();
+ }));
diff --git a/jstests/core/exists.js b/jstests/core/exists.js
index e41a7cfde04..a29f0cb1d45 100644
--- a/jstests/core/exists.js
+++ b/jstests/core/exists.js
@@ -1,49 +1,48 @@
t = db.jstests_exists;
t.drop();
-t.save( {} );
-t.save( {a:1} );
-t.save( {a:{b:1}} );
-t.save( {a:{b:{c:1}}} );
-t.save( {a:{b:{c:{d:null}}}} );
-
-function dotest( n ){
-
- assert.eq( 5, t.count() , n );
- assert.eq( 1, t.count( {a:null} ) , n );
- assert.eq( 2, t.count( {'a.b':null} ) , n );
- assert.eq( 3, t.count( {'a.b.c':null} ) , n );
- assert.eq( 5, t.count( {'a.b.c.d':null} ) , n );
-
- assert.eq( 5, t.count() , n );
- assert.eq( 4, t.count( {a:{$ne:null}} ) , n );
- assert.eq( 3, t.count( {'a.b':{$ne:null}} ) , n );
- assert.eq( 2, t.count( {'a.b.c':{$ne:null}} ) , n );
- assert.eq( 0, t.count( {'a.b.c.d':{$ne:null}} ) , n );
-
- assert.eq( 4, t.count( {a: {$exists:true}} ) , n );
- assert.eq( 3, t.count( {'a.b': {$exists:true}} ) , n );
- assert.eq( 2, t.count( {'a.b.c': {$exists:true}} ) , n );
- assert.eq( 1, t.count( {'a.b.c.d': {$exists:true}} ) , n );
-
- assert.eq( 1, t.count( {a: {$exists:false}} ) , n );
- assert.eq( 2, t.count( {'a.b': {$exists:false}} ) , n );
- assert.eq( 3, t.count( {'a.b.c': {$exists:false}} ) , n );
- assert.eq( 4, t.count( {'a.b.c.d': {$exists:false}} ) , n );
+t.save({});
+t.save({a: 1});
+t.save({a: {b: 1}});
+t.save({a: {b: {c: 1}}});
+t.save({a: {b: {c: {d: null}}}});
+
+function dotest(n) {
+ assert.eq(5, t.count(), n);
+ assert.eq(1, t.count({a: null}), n);
+ assert.eq(2, t.count({'a.b': null}), n);
+ assert.eq(3, t.count({'a.b.c': null}), n);
+ assert.eq(5, t.count({'a.b.c.d': null}), n);
+
+ assert.eq(5, t.count(), n);
+ assert.eq(4, t.count({a: {$ne: null}}), n);
+ assert.eq(3, t.count({'a.b': {$ne: null}}), n);
+ assert.eq(2, t.count({'a.b.c': {$ne: null}}), n);
+ assert.eq(0, t.count({'a.b.c.d': {$ne: null}}), n);
+
+ assert.eq(4, t.count({a: {$exists: true}}), n);
+ assert.eq(3, t.count({'a.b': {$exists: true}}), n);
+ assert.eq(2, t.count({'a.b.c': {$exists: true}}), n);
+ assert.eq(1, t.count({'a.b.c.d': {$exists: true}}), n);
+
+ assert.eq(1, t.count({a: {$exists: false}}), n);
+ assert.eq(2, t.count({'a.b': {$exists: false}}), n);
+ assert.eq(3, t.count({'a.b.c': {$exists: false}}), n);
+ assert.eq(4, t.count({'a.b.c.d': {$exists: false}}), n);
}
-dotest( "before index" );
-t.ensureIndex( { "a" : 1 } );
-t.ensureIndex( { "a.b" : 1 } );
-t.ensureIndex( { "a.b.c" : 1 } );
-t.ensureIndex( { "a.b.c.d" : 1 } );
-dotest( "after index" );
-assert.eq( 1, t.find( {a: {$exists:false}} ).hint( {a:1} ).itcount() );
-
+dotest("before index");
+t.ensureIndex({"a": 1});
+t.ensureIndex({"a.b": 1});
+t.ensureIndex({"a.b.c": 1});
+t.ensureIndex({"a.b.c.d": 1});
+dotest("after index");
+assert.eq(1, t.find({a: {$exists: false}}).hint({a: 1}).itcount());
+
t.drop();
-t.save( {r:[{s:1}]} );
-assert( t.findOne( {'r.s':{$exists:true}} ) );
-assert( !t.findOne( {'r.s':{$exists:false}} ) );
-assert( !t.findOne( {'r.t':{$exists:true}} ) );
-assert( t.findOne( {'r.t':{$exists:false}} ) );
+t.save({r: [{s: 1}]});
+assert(t.findOne({'r.s': {$exists: true}}));
+assert(!t.findOne({'r.s': {$exists: false}}));
+assert(!t.findOne({'r.t': {$exists: true}}));
+assert(t.findOne({'r.t': {$exists: false}}));
diff --git a/jstests/core/exists2.js b/jstests/core/exists2.js
index 90ff25e0feb..5d8a0d80f91 100644
--- a/jstests/core/exists2.js
+++ b/jstests/core/exists2.js
@@ -2,15 +2,14 @@
t = db.exists2;
t.drop();
-t.save( { a : 1 , b : 1 } );
-t.save( { a : 1 , b : 1 , c : 1 } );
+t.save({a: 1, b: 1});
+t.save({a: 1, b: 1, c: 1});
-assert.eq( 2 , t.find().itcount() , "A1" );
-assert.eq( 2 , t.find( { a : 1 , b : 1 } ).itcount() , "A2" );
-assert.eq( 1 , t.find( { a : 1 , b : 1 , c : { "$exists" : true } } ).itcount() , "A3" );
-assert.eq( 1 , t.find( { a : 1 , b : 1 , c : { "$exists" : false } } ).itcount() , "A4" );
-
-t.ensureIndex( { a : 1 , b : 1 , c : 1 } );
-assert.eq( 1 , t.find( { a : 1 , b : 1 , c : { "$exists" : true } } ).itcount() , "B1" );
-assert.eq( 1 , t.find( { a : 1 , b : 1 , c : { "$exists" : false } } ).itcount() , "B2" );
+assert.eq(2, t.find().itcount(), "A1");
+assert.eq(2, t.find({a: 1, b: 1}).itcount(), "A2");
+assert.eq(1, t.find({a: 1, b: 1, c: {"$exists": true}}).itcount(), "A3");
+assert.eq(1, t.find({a: 1, b: 1, c: {"$exists": false}}).itcount(), "A4");
+t.ensureIndex({a: 1, b: 1, c: 1});
+assert.eq(1, t.find({a: 1, b: 1, c: {"$exists": true}}).itcount(), "B1");
+assert.eq(1, t.find({a: 1, b: 1, c: {"$exists": false}}).itcount(), "B2");
diff --git a/jstests/core/exists3.js b/jstests/core/exists3.js
index c61f022939c..e4ce03437bb 100644
--- a/jstests/core/exists3.js
+++ b/jstests/core/exists3.js
@@ -5,17 +5,17 @@ t.drop();
t.insert({a: 1, b: 2});
-assert.eq( 1, t.find({}).sort({c: -1}).itcount() );
-assert.eq( 1, t.count({c: {$exists: false}}) );
-assert.eq( 1, t.find({c: {$exists: false}}).itcount() );
-assert.eq( 1, t.find({c: {$exists: false}}).sort({c: -1}).itcount() );
+assert.eq(1, t.find({}).sort({c: -1}).itcount());
+assert.eq(1, t.count({c: {$exists: false}}));
+assert.eq(1, t.find({c: {$exists: false}}).itcount());
+assert.eq(1, t.find({c: {$exists: false}}).sort({c: -1}).itcount());
-// now we have an index on the sort key
+// now we have an index on the sort key
t.ensureIndex({c: -1});
-assert.eq( 1, t.find({c: {$exists: false}}).sort({c: -1}).itcount() );
-assert.eq( 1, t.find({c: {$exists: false}}).itcount() );
-// still ok without the $exists
-assert.eq( 1, t.find({}).sort({c: -1}).itcount() );
-// and ok with a convoluted $not $exists
-assert.eq( 1, t.find({c: {$not: {$exists: true}}}).sort({c: -1}).itcount() );
+assert.eq(1, t.find({c: {$exists: false}}).sort({c: -1}).itcount());
+assert.eq(1, t.find({c: {$exists: false}}).itcount());
+// still ok without the $exists
+assert.eq(1, t.find({}).sort({c: -1}).itcount());
+// and ok with a convoluted $not $exists
+assert.eq(1, t.find({c: {$not: {$exists: true}}}).sort({c: -1}).itcount());
diff --git a/jstests/core/exists4.js b/jstests/core/exists4.js
index fb801ed62e9..097a3462da9 100644
--- a/jstests/core/exists4.js
+++ b/jstests/core/exists4.js
@@ -3,18 +3,43 @@
t = db.jstests_exists4;
t.drop();
-t.ensureIndex({date: -1, country_code: 1, user_id: 1}, {unique: 1, background: 1});
-t.insert({ date: new Date("08/27/2010"), tot_visit: 100});
-t.insert({ date: new Date("08/27/2010"), country_code: "IT", tot_visit: 77});
-t.insert({ date: new Date("08/27/2010"), country_code: "ES", tot_visit: 23});
-t.insert({ date: new Date("08/27/2010"), country_code: "ES", user_id: "and...@spacca.org", tot_visit: 11});
-t.insert({ date: new Date("08/27/2010"), country_code: "ES", user_id: "andrea.spa...@gmail.com", tot_visit: 5});
-t.insert({ date: new Date("08/27/2010"), country_code: "ES", user_id: "andrea.spa...@progloedizioni.com", tot_visit: 7});
+t.ensureIndex({date: -1, country_code: 1, user_id: 1}, {unique: 1, background: 1});
+t.insert({date: new Date("08/27/2010"), tot_visit: 100});
+t.insert({date: new Date("08/27/2010"), country_code: "IT", tot_visit: 77});
+t.insert({date: new Date("08/27/2010"), country_code: "ES", tot_visit: 23});
+t.insert({
+ date: new Date("08/27/2010"),
+ country_code: "ES",
+ user_id: "and...@spacca.org",
+ tot_visit: 11
+});
+t.insert({
+ date: new Date("08/27/2010"),
+ country_code: "ES",
+ user_id: "andrea.spa...@gmail.com",
+ tot_visit: 5
+});
+t.insert({
+ date: new Date("08/27/2010"),
+ country_code: "ES",
+ user_id: "andrea.spa...@progloedizioni.com",
+ tot_visit: 7
+});
-assert.eq( 6, t.find({date: new Date("08/27/2010")}).count() );
-assert.eq( 5, t.find({date: new Date("08/27/2010"), country_code: {$exists: true}}).count() );
-assert.eq( 1, t.find({date: new Date("08/27/2010"), country_code: {$exists: false}}).count() );
-assert.eq( 1, t.find({date: new Date("08/27/2010"), country_code: null}).count() );
-assert.eq( 3, t.find({date: new Date("08/27/2010"), country_code: {$exists: true}, user_id: {$exists: true}}).count() );
-assert.eq( 2, t.find({date: new Date("08/27/2010"), country_code: {$exists: true}, user_id: {$exists: false}}).count() );
-assert.eq( 2, t.find({date: new Date("08/27/2010"), country_code: {$exists: true}, user_id: null}).count() );
+assert.eq(6, t.find({date: new Date("08/27/2010")}).count());
+assert.eq(5, t.find({date: new Date("08/27/2010"), country_code: {$exists: true}}).count());
+assert.eq(1, t.find({date: new Date("08/27/2010"), country_code: {$exists: false}}).count());
+assert.eq(1, t.find({date: new Date("08/27/2010"), country_code: null}).count());
+assert.eq(
+ 3,
+ t.find({date: new Date("08/27/2010"), country_code: {$exists: true}, user_id: {$exists: true}})
+ .count());
+assert.eq(2,
+ t.find({
+ date: new Date("08/27/2010"),
+ country_code: {$exists: true},
+ user_id: {$exists: false}
+ }).count());
+assert.eq(2,
+ t.find({date: new Date("08/27/2010"), country_code: {$exists: true}, user_id: null})
+ .count());
diff --git a/jstests/core/exists5.js b/jstests/core/exists5.js
index a90a94f908f..2f4b1a9b8de 100644
--- a/jstests/core/exists5.js
+++ b/jstests/core/exists5.js
@@ -3,31 +3,31 @@
t = db.jstests_exists5;
t.drop();
-t.save( {a:1} );
-assert.eq( 1, t.count( {'a.b':{$exists:false}} ) );
-assert.eq( 1, t.count( {'a.b':{$not:{$exists:true}}} ) );
-assert.eq( 1, t.count( {'c.d':{$not:{$exists:true}}} ) );
-assert.eq( 0, t.count( {'a.b':{$exists:true}} ) );
-assert.eq( 0, t.count( {'a.b':{$not:{$exists:false}}} ) );
-assert.eq( 0, t.count( {'c.d':{$not:{$exists:false}}} ) );
+t.save({a: 1});
+assert.eq(1, t.count({'a.b': {$exists: false}}));
+assert.eq(1, t.count({'a.b': {$not: {$exists: true}}}));
+assert.eq(1, t.count({'c.d': {$not: {$exists: true}}}));
+assert.eq(0, t.count({'a.b': {$exists: true}}));
+assert.eq(0, t.count({'a.b': {$not: {$exists: false}}}));
+assert.eq(0, t.count({'c.d': {$not: {$exists: false}}}));
t.drop();
-t.save( {a:{b:1}} );
-assert.eq( 1, t.count( {'a.b':{$exists:true}} ) );
-assert.eq( 1, t.count( {'a.b':{$not:{$exists:false}}} ) );
-assert.eq( 0, t.count( {'a.b':{$exists:false}} ) );
-assert.eq( 0, t.count( {'a.b':{$not:{$exists:true}}} ) );
+t.save({a: {b: 1}});
+assert.eq(1, t.count({'a.b': {$exists: true}}));
+assert.eq(1, t.count({'a.b': {$not: {$exists: false}}}));
+assert.eq(0, t.count({'a.b': {$exists: false}}));
+assert.eq(0, t.count({'a.b': {$not: {$exists: true}}}));
t.drop();
-t.save( {a:[1]} );
-assert.eq( 1, t.count( {'a.b':{$exists:false}} ) );
-assert.eq( 1, t.count( {'a.b':{$not:{$exists:true}}} ) );
-assert.eq( 0, t.count( {'a.b':{$exists:true}} ) );
-assert.eq( 0, t.count( {'a.b':{$not:{$exists:false}}} ) );
+t.save({a: [1]});
+assert.eq(1, t.count({'a.b': {$exists: false}}));
+assert.eq(1, t.count({'a.b': {$not: {$exists: true}}}));
+assert.eq(0, t.count({'a.b': {$exists: true}}));
+assert.eq(0, t.count({'a.b': {$not: {$exists: false}}}));
t.drop();
-t.save( {a:[{b:1}]} );
-assert.eq( 1, t.count( {'a.b':{$exists:true}} ) );
-assert.eq( 1, t.count( {'a.b':{$not:{$exists:false}}} ) );
-assert.eq( 0, t.count( {'a.b':{$exists:false}} ) );
-assert.eq( 0, t.count( {'a.b':{$not:{$exists:true}}} ) );
+t.save({a: [{b: 1}]});
+assert.eq(1, t.count({'a.b': {$exists: true}}));
+assert.eq(1, t.count({'a.b': {$not: {$exists: false}}}));
+assert.eq(0, t.count({'a.b': {$exists: false}}));
+assert.eq(0, t.count({'a.b': {$not: {$exists: true}}}));
diff --git a/jstests/core/exists6.js b/jstests/core/exists6.js
index 79d4885283d..67ab7e5345d 100644
--- a/jstests/core/exists6.js
+++ b/jstests/core/exists6.js
@@ -3,23 +3,23 @@
t = db.jstests_exists6;
t.drop();
-t.ensureIndex( {b:1} );
-t.save( {} );
-t.save( {b:1} );
-t.save( {b:null} );
+t.ensureIndex({b: 1});
+t.save({});
+t.save({b: 1});
+t.save({b: null});
-assert.eq( 2, t.find({b:{$exists:true}}).itcount() );
-assert.eq( 2, t.find({b:{$not:{$exists:false}}}).itcount() );
-assert.eq( 1, t.find({b:{$exists:false}}).itcount() );
-assert.eq( 1, t.find({b:{$not:{$exists:true}}}).itcount() );
+assert.eq(2, t.find({b: {$exists: true}}).itcount());
+assert.eq(2, t.find({b: {$not: {$exists: false}}}).itcount());
+assert.eq(1, t.find({b: {$exists: false}}).itcount());
+assert.eq(1, t.find({b: {$not: {$exists: true}}}).itcount());
// Now check existence of second compound field.
-t.ensureIndex( {a:1,b:1} );
-t.save( {a:1} );
-t.save( {a:1,b:1} );
-t.save( {a:1,b:null} );
+t.ensureIndex({a: 1, b: 1});
+t.save({a: 1});
+t.save({a: 1, b: 1});
+t.save({a: 1, b: null});
-assert.eq( 2, t.find({a:1,b:{$exists:true}}).itcount() );
-assert.eq( 2, t.find({a:1,b:{$not:{$exists:false}}}).itcount() );
-assert.eq( 1, t.find({a:1,b:{$exists:false}}).itcount() );
-assert.eq( 1, t.find({a:1,b:{$not:{$exists:true}}}).itcount() );
+assert.eq(2, t.find({a: 1, b: {$exists: true}}).itcount());
+assert.eq(2, t.find({a: 1, b: {$not: {$exists: false}}}).itcount());
+assert.eq(1, t.find({a: 1, b: {$exists: false}}).itcount());
+assert.eq(1, t.find({a: 1, b: {$not: {$exists: true}}}).itcount());
diff --git a/jstests/core/exists7.js b/jstests/core/exists7.js
index ce278ae1a57..ab02a41d445 100644
--- a/jstests/core/exists7.js
+++ b/jstests/core/exists7.js
@@ -6,16 +6,16 @@ t.drop();
function testIntegerExistsSpec() {
t.remove({});
- t.save( {} );
- t.save( {a:1} );
- t.save( {a:2} );
- t.save( {a:3, b:3} );
- t.save( {a:4, b:4} );
+ t.save({});
+ t.save({a: 1});
+ t.save({a: 2});
+ t.save({a: 3, b: 3});
+ t.save({a: 4, b: 4});
- assert.eq( 2, t.count( {b:{$exists:1}} ) );
- assert.eq( 3, t.count( {b:{$exists:0}} ) );
+ assert.eq(2, t.count({b: {$exists: 1}}));
+ assert.eq(3, t.count({b: {$exists: 0}}));
}
testIntegerExistsSpec();
-t.ensureIndex( {b:1} );
+t.ensureIndex({b: 1});
testIntegerExistsSpec();
diff --git a/jstests/core/exists8.js b/jstests/core/exists8.js
index ca62ebeb9ab..4a8f66461a3 100644
--- a/jstests/core/exists8.js
+++ b/jstests/core/exists8.js
@@ -3,74 +3,74 @@
t = db.jstests_exists8;
t.drop();
-t.save( {a:[1]} );
-assert.eq( 1, t.count( {'a.0':{$exists:true}} ) );
-assert.eq( 1, t.count( {'a.1':{$exists:false}} ) );
-assert.eq( 0, t.count( {'a.0':{$exists:false}} ) );
-assert.eq( 0, t.count( {'a.1':{$exists:true}} ) );
+t.save({a: [1]});
+assert.eq(1, t.count({'a.0': {$exists: true}}));
+assert.eq(1, t.count({'a.1': {$exists: false}}));
+assert.eq(0, t.count({'a.0': {$exists: false}}));
+assert.eq(0, t.count({'a.1': {$exists: true}}));
t.remove({});
-t.save( {a:[1,2]} );
-assert.eq( 1, t.count( {'a.0':{$exists:true}} ) );
-assert.eq( 0, t.count( {'a.1':{$exists:false}} ) );
-assert.eq( 0, t.count( {'a.0':{$exists:false}} ) );
-assert.eq( 1, t.count( {'a.1':{$exists:true}} ) );
+t.save({a: [1, 2]});
+assert.eq(1, t.count({'a.0': {$exists: true}}));
+assert.eq(0, t.count({'a.1': {$exists: false}}));
+assert.eq(0, t.count({'a.0': {$exists: false}}));
+assert.eq(1, t.count({'a.1': {$exists: true}}));
t.remove({});
-t.save( {a:[{}]} );
-assert.eq( 1, t.count( {'a.0':{$exists:true}} ) );
-assert.eq( 1, t.count( {'a.1':{$exists:false}} ) );
-assert.eq( 0, t.count( {'a.0':{$exists:false}} ) );
-assert.eq( 0, t.count( {'a.1':{$exists:true}} ) );
+t.save({a: [{}]});
+assert.eq(1, t.count({'a.0': {$exists: true}}));
+assert.eq(1, t.count({'a.1': {$exists: false}}));
+assert.eq(0, t.count({'a.0': {$exists: false}}));
+assert.eq(0, t.count({'a.1': {$exists: true}}));
t.remove({});
-t.save( {a:[{},{}]} );
-assert.eq( 1, t.count( {'a.0':{$exists:true}} ) );
-assert.eq( 0, t.count( {'a.1':{$exists:false}} ) );
-assert.eq( 0, t.count( {'a.0':{$exists:false}} ) );
-assert.eq( 1, t.count( {'a.1':{$exists:true}} ) );
+t.save({a: [{}, {}]});
+assert.eq(1, t.count({'a.0': {$exists: true}}));
+assert.eq(0, t.count({'a.1': {$exists: false}}));
+assert.eq(0, t.count({'a.0': {$exists: false}}));
+assert.eq(1, t.count({'a.1': {$exists: true}}));
t.remove({});
-t.save( {a:[{'b':2},{'a':1}]} );
-assert.eq( 1, t.count( {'a.a':{$exists:true}} ) );
-assert.eq( 1, t.count( {'a.1.a':{$exists:true}} ) );
-assert.eq( 1, t.count( {'a.0.a':{$exists:false}} ) );
+t.save({a: [{'b': 2}, {'a': 1}]});
+assert.eq(1, t.count({'a.a': {$exists: true}}));
+assert.eq(1, t.count({'a.1.a': {$exists: true}}));
+assert.eq(1, t.count({'a.0.a': {$exists: false}}));
t.remove({});
-t.save( {a:[[1]]} );
-assert.eq( 1, t.count( {'a.0':{$exists:true}} ) );
-assert.eq( 1, t.count( {'a.0.0':{$exists:true}} ) );
-assert.eq( 0, t.count( {'a.0.0':{$exists:false}} ) );
-assert.eq( 0, t.count( {'a.0.0.0':{$exists:true}} ) );
-assert.eq( 1, t.count( {'a.0.0.0':{$exists:false}} ) );
+t.save({a: [[1]]});
+assert.eq(1, t.count({'a.0': {$exists: true}}));
+assert.eq(1, t.count({'a.0.0': {$exists: true}}));
+assert.eq(0, t.count({'a.0.0': {$exists: false}}));
+assert.eq(0, t.count({'a.0.0.0': {$exists: true}}));
+assert.eq(1, t.count({'a.0.0.0': {$exists: false}}));
t.remove({});
-t.save( {a:[[[1]]]} );
-assert.eq( 1, t.count( {'a.0.0.0':{$exists:true}} ) );
+t.save({a: [[[1]]]});
+assert.eq(1, t.count({'a.0.0.0': {$exists: true}}));
t.remove({});
-t.save( {a:[[{b:1}]]} );
-assert.eq( 0, t.count( {'a.b':{$exists:true}} ) );
-assert.eq( 1, t.count( {'a.b':{$exists:false}} ) );
-assert.eq( 1, t.count( {'a.0.b':{$exists:true}} ) );
-assert.eq( 0, t.count( {'a.0.b':{$exists:false}} ) );
+t.save({a: [[{b: 1}]]});
+assert.eq(0, t.count({'a.b': {$exists: true}}));
+assert.eq(1, t.count({'a.b': {$exists: false}}));
+assert.eq(1, t.count({'a.0.b': {$exists: true}}));
+assert.eq(0, t.count({'a.0.b': {$exists: false}}));
t.remove({});
-t.save( {a:[[],[{b:1}]]} );
-assert.eq( 0, t.count( {'a.0.b':{$exists:true}} ) );
-assert.eq( 1, t.count( {'a.0.b':{$exists:false}} ) );
+t.save({a: [[], [{b: 1}]]});
+assert.eq(0, t.count({'a.0.b': {$exists: true}}));
+assert.eq(1, t.count({'a.0.b': {$exists: false}}));
t.remove({});
-t.save( {a:[[],[{b:1}]]} );
-assert.eq( 1, t.count( {'a.1.b':{$exists:true}} ) );
-assert.eq( 0, t.count( {'a.1.b':{$exists:false}} ) );
+t.save({a: [[], [{b: 1}]]});
+assert.eq(1, t.count({'a.1.b': {$exists: true}}));
+assert.eq(0, t.count({'a.1.b': {$exists: false}}));
t.remove({});
-t.save( {a:[[],[{b:1}]]} );
-assert.eq( 1, t.count( {'a.1.0.b':{$exists:true}} ) );
-assert.eq( 0, t.count( {'a.1.0.b':{$exists:false}} ) );
+t.save({a: [[], [{b: 1}]]});
+assert.eq(1, t.count({'a.1.0.b': {$exists: true}}));
+assert.eq(0, t.count({'a.1.0.b': {$exists: false}}));
t.remove({});
-t.save( {a:[[],[{b:1}]]} );
-assert.eq( 0, t.count( {'a.1.1.b':{$exists:true}} ) );
-assert.eq( 1, t.count( {'a.1.1.b':{$exists:false}} ) );
+t.save({a: [[], [{b: 1}]]});
+assert.eq(0, t.count({'a.1.1.b': {$exists: true}}));
+assert.eq(1, t.count({'a.1.1.b': {$exists: false}}));
diff --git a/jstests/core/exists9.js b/jstests/core/exists9.js
index 75b09018797..aaa7563e7df 100644
--- a/jstests/core/exists9.js
+++ b/jstests/core/exists9.js
@@ -4,37 +4,37 @@ t = db.jstests_exists9;
t.drop();
// Check existence of missing nested field.
-t.save( {a:{}} );
-assert.eq( 1, t.count( {'a.b':{$exists:false}} ) );
-assert.eq( 0, t.count( {'a.b':{$exists:true}} ) );
+t.save({a: {}});
+assert.eq(1, t.count({'a.b': {$exists: false}}));
+assert.eq(0, t.count({'a.b': {$exists: true}}));
// With index.
-t.ensureIndex( {'a.b':1} );
-assert.eq( 1, t.find( {'a.b':{$exists:false}} ).hint( {'a.b':1} ).itcount() );
-assert.eq( 0, t.find( {'a.b':{$exists:true}} ).hint( {'a.b':1} ).itcount() );
+t.ensureIndex({'a.b': 1});
+assert.eq(1, t.find({'a.b': {$exists: false}}).hint({'a.b': 1}).itcount());
+assert.eq(0, t.find({'a.b': {$exists: true}}).hint({'a.b': 1}).itcount());
t.drop();
// Check that an empty array 'exists'.
-t.save( {} );
-t.save( {a:[]} );
-assert.eq( 1, t.count( {a:{$exists:true}} ) );
-assert.eq( 1, t.count( {a:{$exists:false}} ) );
+t.save({});
+t.save({a: []});
+assert.eq(1, t.count({a: {$exists: true}}));
+assert.eq(1, t.count({a: {$exists: false}}));
// With index.
-t.ensureIndex( {a:1} );
-assert.eq( 1, t.find( {a:{$exists:true}} ).hint( {a:1} ).itcount() );
-assert.eq( 1, t.find( {a:{$exists:false}} ).hint( {a:1} ).itcount() );
+t.ensureIndex({a: 1});
+assert.eq(1, t.find({a: {$exists: true}}).hint({a: 1}).itcount());
+assert.eq(1, t.find({a: {$exists: false}}).hint({a: 1}).itcount());
t.drop();
// Check that an indexed field within an empty array does not exist.
-t.save( {a:{'0':1}} );
-t.save( {a:[]} );
-assert.eq( 1, t.count( {'a.0':{$exists:true}} ) );
-assert.eq( 1, t.count( {'a.0':{$exists:false}} ) );
+t.save({a: {'0': 1}});
+t.save({a: []});
+assert.eq(1, t.count({'a.0': {$exists: true}}));
+assert.eq(1, t.count({'a.0': {$exists: false}}));
// With index.
-t.ensureIndex( {'a.0':1} );
-assert.eq( 1, t.find( {'a.0':{$exists:true}} ).hint( {'a.0':1} ).itcount() );
-assert.eq( 1, t.find( {'a.0':{$exists:false}} ).hint( {'a.0':1} ).itcount() );
+t.ensureIndex({'a.0': 1});
+assert.eq(1, t.find({'a.0': {$exists: true}}).hint({'a.0': 1}).itcount());
+assert.eq(1, t.find({'a.0': {$exists: false}}).hint({'a.0': 1}).itcount());
diff --git a/jstests/core/existsa.js b/jstests/core/existsa.js
index 0d2472ca328..466a5e94a63 100644
--- a/jstests/core/existsa.js
+++ b/jstests/core/existsa.js
@@ -3,99 +3,103 @@
t = db.jstests_existsa;
t.drop();
-t.save( {} );
-t.save( { a:1 } );
-t.save( { a:{ x:1 }, b:1 } );
+t.save({});
+t.save({a: 1});
+t.save({a: {x: 1}, b: 1});
/** Configure testing of an index { <indexKeyField>:1 }. */
-function setIndex( _indexKeyField ) {
+function setIndex(_indexKeyField) {
indexKeyField = _indexKeyField;
indexKeySpec = {};
- indexKeySpec[ indexKeyField ] = 1;
- t.ensureIndex( indexKeySpec, { sparse:true } );
+ indexKeySpec[indexKeyField] = 1;
+ t.ensureIndex(indexKeySpec, {sparse: true});
}
-setIndex( 'a' );
+setIndex('a');
/** @return count when hinting the index to use. */
-function hintedCount( query ) {
- return t.find( query ).hint( indexKeySpec ).itcount();
+function hintedCount(query) {
+ return t.find(query).hint(indexKeySpec).itcount();
}
/** The query field does not exist and the sparse index is not used without a hint. */
-function assertMissing( query, expectedMissing, expectedIndexedMissing ) {
+function assertMissing(query, expectedMissing, expectedIndexedMissing) {
expectedMissing = expectedMissing || 1;
expectedIndexedMissing = expectedIndexedMissing || 0;
- assert.eq( expectedMissing, t.count( query ) );
+ assert.eq(expectedMissing, t.count(query));
// We also shouldn't get a different count depending on whether
// an index is used or not.
- assert.eq( expectedIndexedMissing, hintedCount( query ) );
+ assert.eq(expectedIndexedMissing, hintedCount(query));
}
/** The query field exists and the sparse index is used without a hint. */
-function assertExists( query, expectedExists ) {
+function assertExists(query, expectedExists) {
expectedExists = expectedExists || 2;
- assert.eq( expectedExists, t.count( query ) );
+ assert.eq(expectedExists, t.count(query));
// An $exists:true predicate generates no index filters. Add another predicate on the index key
// to trigger use of the index.
andClause = {};
- andClause[ indexKeyField ] = { $ne:null };
- Object.extend( query, { $and:[ andClause ] } );
- assert.eq( expectedExists, t.count( query ) );
- assert.eq( expectedExists, hintedCount( query ) );
+ andClause[indexKeyField] = {
+ $ne: null
+ };
+ Object.extend(query, {$and: [andClause]});
+ assert.eq(expectedExists, t.count(query));
+ assert.eq(expectedExists, hintedCount(query));
}
/** The query field exists and the sparse index is not used without a hint. */
-function assertExistsUnindexed( query, expectedExists ) {
+function assertExistsUnindexed(query, expectedExists) {
expectedExists = expectedExists || 2;
- assert.eq( expectedExists, t.count( query ) );
+ assert.eq(expectedExists, t.count(query));
// Even with another predicate on the index key, the sparse index is disallowed.
andClause = {};
- andClause[ indexKeyField ] = { $ne:null };
- Object.extend( query, { $and:[ andClause ] } );
- assert.eq( expectedExists, t.count( query ) );
- assert.eq( expectedExists, hintedCount( query ) );
+ andClause[indexKeyField] = {
+ $ne: null
+ };
+ Object.extend(query, {$and: [andClause]});
+ assert.eq(expectedExists, t.count(query));
+ assert.eq(expectedExists, hintedCount(query));
}
// $exists:false queries match the proper number of documents and disallow the sparse index.
-assertMissing( { a:{ $exists:false } } );
-assertMissing( { a:{ $not:{ $exists:true } } } );
-assertMissing( { $and:[ { a:{ $exists:false } } ] } );
-assertMissing( { $or:[ { a:{ $exists:false } } ] } );
-assertMissing( { $nor:[ { a:{ $exists:true } } ] } );
-assertMissing( { 'a.x':{ $exists:false } }, 2, 1 );
+assertMissing({a: {$exists: false}});
+assertMissing({a: {$not: {$exists: true}}});
+assertMissing({$and: [{a: {$exists: false}}]});
+assertMissing({$or: [{a: {$exists: false}}]});
+assertMissing({$nor: [{a: {$exists: true}}]});
+assertMissing({'a.x': {$exists: false}}, 2, 1);
// Currently a sparse index is disallowed even if the $exists:false query is on a different field.
-assertMissing( { b:{ $exists:false } }, 2, 1 );
-assertMissing( { b:{ $exists:false }, a:{ $ne:6 } }, 2, 1 );
-assertMissing( { b:{ $not:{ $exists:true } } }, 2, 1 );
+assertMissing({b: {$exists: false}}, 2, 1);
+assertMissing({b: {$exists: false}, a: {$ne: 6}}, 2, 1);
+assertMissing({b: {$not: {$exists: true}}}, 2, 1);
// Top level $exists:true queries match the proper number of documents
// and use the sparse index on { a : 1 }.
-assertExists( { a:{ $exists:true } } );
+assertExists({a: {$exists: true}});
// Nested $exists queries match the proper number of documents and disallow the sparse index.
-assertExistsUnindexed( { $nor:[ { a:{ $exists:false } } ] } );
-assertExistsUnindexed( { $nor:[ { 'a.x':{ $exists:false } } ] }, 1 );
-assertExistsUnindexed( { a:{ $not:{ $exists:false } } } );
+assertExistsUnindexed({$nor: [{a: {$exists: false}}]});
+assertExistsUnindexed({$nor: [{'a.x': {$exists: false}}]}, 1);
+assertExistsUnindexed({a: {$not: {$exists: false}}});
// Nested $exists queries disallow the sparse index in some cases where it is not strictly
// necessary to do so. (Descriptive tests.)
-assertExistsUnindexed( { $nor:[ { b:{ $exists:false } } ] }, 1 ); // Unindexed field.
-assertExists( { $or:[ { a:{ $exists:true } } ] } ); // $exists:true not $exists:false.
+assertExistsUnindexed({$nor: [{b: {$exists: false}}]}, 1); // Unindexed field.
+assertExists({$or: [{a: {$exists: true}}]}); // $exists:true not $exists:false.
// Behavior is similar with $elemMatch.
t.drop();
-t.save( { a:[ {} ] } );
-t.save( { a:[ { b:1 } ] } );
-t.save( { a:[ { b:1 } ] } );
-setIndex( 'a.b' );
+t.save({a: [{}]});
+t.save({a: [{b: 1}]});
+t.save({a: [{b: 1}]});
+setIndex('a.b');
-assertMissing( { a:{ $elemMatch:{ b:{ $exists:false } } } } );
+assertMissing({a: {$elemMatch: {b: {$exists: false}}}});
// A $elemMatch predicate is treated as nested, and the index should be used for $exists:true.
-assertExists( { a:{ $elemMatch:{ b:{ $exists:true } } } } );
+assertExists({a: {$elemMatch: {b: {$exists: true}}}});
// A non sparse index will not be disallowed.
t.drop();
-t.save( {} );
-t.ensureIndex( { a:1 } );
-assert.eq( 1, t.find( { a:{ $exists:false } } ).itcount() );
+t.save({});
+t.ensureIndex({a: 1});
+assert.eq(1, t.find({a: {$exists: false}}).itcount());
diff --git a/jstests/core/existsb.js b/jstests/core/existsb.js
index a212be145c0..d46266cdd16 100644
--- a/jstests/core/existsb.js
+++ b/jstests/core/existsb.js
@@ -23,54 +23,54 @@
t = db.jstests_existsb;
t.drop();
-t.save( {} );
-t.save( { a: 1 } );
-t.save( { b: 1 } );
-t.save( { a: 1, b: null } );
-t.save( { a: 1, b: 1 } );
+t.save({});
+t.save({a: 1});
+t.save({b: 1});
+t.save({a: 1, b: null});
+t.save({a: 1, b: 1});
/** run a series of checks, just on the number of docs found */
function checkExistsNull() {
// Basic cases
- assert.eq( 3, t.count({ a:{ $exists: true }}) );
- assert.eq( 2, t.count({ a:{ $exists: false }}) );
- assert.eq( 3, t.count({ b:{ $exists: true }}) );
- assert.eq( 2, t.count({ b:{ $exists: false }}) );
+ assert.eq(3, t.count({a: {$exists: true}}));
+ assert.eq(2, t.count({a: {$exists: false}}));
+ assert.eq(3, t.count({b: {$exists: true}}));
+ assert.eq(2, t.count({b: {$exists: false}}));
// With negations
- assert.eq( 3, t.count({ a:{ $not:{ $exists: false }}}) );
- assert.eq( 2, t.count({ a:{ $not:{ $exists: true }}}) );
- assert.eq( 3, t.count({ b:{ $not:{ $exists: false }}}) );
- assert.eq( 2, t.count({ b:{ $not:{ $exists: true }}}) );
+ assert.eq(3, t.count({a: {$not: {$exists: false}}}));
+ assert.eq(2, t.count({a: {$not: {$exists: true}}}));
+ assert.eq(3, t.count({b: {$not: {$exists: false}}}));
+ assert.eq(2, t.count({b: {$not: {$exists: true}}}));
// Both fields
- assert.eq( 2, t.count({ a:1, b: { $exists: true }}) );
- assert.eq( 1, t.count({ a:1, b: { $exists: false }}) );
- assert.eq( 1, t.count({ a:{ $exists: true }, b:1}) );
- assert.eq( 1, t.count({ a:{ $exists: false }, b:1}) );
+ assert.eq(2, t.count({a: 1, b: {$exists: true}}));
+ assert.eq(1, t.count({a: 1, b: {$exists: false}}));
+ assert.eq(1, t.count({a: {$exists: true}, b: 1}));
+ assert.eq(1, t.count({a: {$exists: false}, b: 1}));
// Both fields, both $exists
- assert.eq( 2, t.count({ a:{ $exists: true }, b:{ $exists: true }}) );
- assert.eq( 1, t.count({ a:{ $exists: true }, b:{ $exists: false }}) );
- assert.eq( 1, t.count({ a:{ $exists: false }, b:{ $exists: true }}) );
- assert.eq( 1, t.count({ a:{ $exists: false }, b:{ $exists: false }}) );
+ assert.eq(2, t.count({a: {$exists: true}, b: {$exists: true}}));
+ assert.eq(1, t.count({a: {$exists: true}, b: {$exists: false}}));
+ assert.eq(1, t.count({a: {$exists: false}, b: {$exists: true}}));
+ assert.eq(1, t.count({a: {$exists: false}, b: {$exists: false}}));
}
// with no index, make sure we get correct results
checkExistsNull();
// try with a standard index
-t.ensureIndex({ a : 1 });
+t.ensureIndex({a: 1});
checkExistsNull();
// try with a sparse index
t.dropIndexes();
-t.ensureIndex({ a : 1 }, { sparse:true });
+t.ensureIndex({a: 1}, {sparse: true});
checkExistsNull();
// try with a compound index
t.dropIndexes();
-t.ensureIndex({ a : 1, b : 1 });
+t.ensureIndex({a: 1, b: 1});
checkExistsNull();
// try with sparse compound index
t.dropIndexes();
-t.ensureIndex({ a : 1, b : 1 }, { sparse:true });
+t.ensureIndex({a: 1, b: 1}, {sparse: true});
checkExistsNull();
diff --git a/jstests/core/explain1.js b/jstests/core/explain1.js
index 2022a189a07..3c7d8b9df2c 100644
--- a/jstests/core/explain1.js
+++ b/jstests/core/explain1.js
@@ -2,23 +2,25 @@
t = db.explain1;
t.drop();
-for ( var i=0; i<100; i++ ){
- t.save( { x : i } );
+for (var i = 0; i < 100; i++) {
+ t.save({x: i});
}
-q = { x : { $gt : 50 } };
+q = {
+ x: {$gt: 50}
+};
-assert.eq( 49 , t.find( q ).count() , "A" );
-assert.eq( 49 , t.find( q ).itcount() , "B" );
-assert.eq( 20 , t.find( q ).limit(20).itcount() , "C" );
+assert.eq(49, t.find(q).count(), "A");
+assert.eq(49, t.find(q).itcount(), "B");
+assert.eq(20, t.find(q).limit(20).itcount(), "C");
-t.ensureIndex( { x : 1 } );
+t.ensureIndex({x: 1});
-assert.eq( 49 , t.find( q ).count() , "D" );
-assert.eq( 49 , t.find( q ).itcount() , "E" );
-assert.eq( 20 , t.find( q ).limit(20).itcount() , "F" );
+assert.eq(49, t.find(q).count(), "D");
+assert.eq(49, t.find(q).itcount(), "E");
+assert.eq(20, t.find(q).limit(20).itcount(), "F");
-assert.eq( 49 , t.find(q).explain("executionStats").executionStats.nReturned , "G" );
-assert.eq( 20 , t.find(q).limit(20).explain("executionStats").executionStats.nReturned , "H" );
-assert.eq( 20 , t.find(q).limit(-20).explain("executionStats").executionStats.nReturned , "I" );
-assert.eq( 49 , t.find(q).batchSize(20).explain("executionStats").executionStats.nReturned , "J" );
+assert.eq(49, t.find(q).explain("executionStats").executionStats.nReturned, "G");
+assert.eq(20, t.find(q).limit(20).explain("executionStats").executionStats.nReturned, "H");
+assert.eq(20, t.find(q).limit(-20).explain("executionStats").executionStats.nReturned, "I");
+assert.eq(49, t.find(q).batchSize(20).explain("executionStats").executionStats.nReturned, "J");
diff --git a/jstests/core/explain2.js b/jstests/core/explain2.js
index 799f5323598..a0a65de4fe9 100644
--- a/jstests/core/explain2.js
+++ b/jstests/core/explain2.js
@@ -3,22 +3,22 @@
t = db.jstests_explain2;
t.drop();
-t.ensureIndex( { a:1 } );
-for( i = 1000; i < 4000; i += 1000 ) {
- t.save( { a:i } );
+t.ensureIndex({a: 1});
+for (i = 1000; i < 4000; i += 1000) {
+ t.save({a: i});
}
// Run a query with one $or clause per a-value, each of which sleeps for 'a' milliseconds.
function slow() {
- sleep( this.a );
+ sleep(this.a);
return true;
}
clauses = [];
-for( i = 1000; i < 4000; i += 1000 ) {
- clauses.push( { a:i, $where:slow } );
+for (i = 1000; i < 4000; i += 1000) {
+ clauses.push({a: i, $where: slow});
}
-explain = t.find( { $or:clauses } ).explain( true );
-printjson( explain );
+explain = t.find({$or: clauses}).explain(true);
+printjson(explain);
// Verify the duration of the whole query, and of each clause.
-assert.gt( explain.executionStats.executionTimeMillis, 1000 - 500 + 2000 - 500 + 3000 - 500 );
+assert.gt(explain.executionStats.executionTimeMillis, 1000 - 500 + 2000 - 500 + 3000 - 500);
diff --git a/jstests/core/explain3.js b/jstests/core/explain3.js
index 738f8570a0d..64db7686699 100644
--- a/jstests/core/explain3.js
+++ b/jstests/core/explain3.js
@@ -3,21 +3,21 @@
t = db.jstests_explain3;
t.drop();
-t.ensureIndex( {i:1} );
-for( var i = 0; i < 10000; ++i ) {
- t.save( {i:i,j:0} );
+t.ensureIndex({i: 1});
+for (var i = 0; i < 10000; ++i) {
+ t.save({i: i, j: 0});
}
-s = startParallelShell( "sleep( 20 ); db.jstests_explain3.dropIndex( {i:1} );" );
+s = startParallelShell("sleep( 20 ); db.jstests_explain3.dropIndex( {i:1} );");
try {
- t.find( {i:{$gt:-1},j:1} ).hint( {i:1} ).explain();
+ t.find({i: {$gt: -1}, j: 1}).hint({i: 1}).explain();
} catch (e) {
- print( "got exception" );
- printjson( e );
+ print("got exception");
+ printjson(e);
}
s();
// Sanity check to make sure mongod didn't seg fault.
-assert.eq( 10000, t.count() );
+assert.eq(10000, t.count());
diff --git a/jstests/core/explain4.js b/jstests/core/explain4.js
index effd080d8fd..fe67516fe61 100644
--- a/jstests/core/explain4.js
+++ b/jstests/core/explain4.js
@@ -3,16 +3,13 @@
t = db.jstests_explain4;
t.drop();
-t.ensureIndex( { a:1 } );
+t.ensureIndex({a: 1});
-for( i = 0; i < 10; ++i ) {
- t.save( { a:i, b:0 } );
+for (i = 0; i < 10; ++i) {
+ t.save({a: i, b: 0});
}
-explain = t.find( { a:{ $gte:0 }, b:0 } ).sort( { a:1 } )
- .hint( { a:1 } )
- .limit( 5 )
- .explain( true );
+explain = t.find({a: {$gte: 0}, b: 0}).sort({a: 1}).hint({a: 1}).limit(5).explain(true);
// Five results are expected, matching the limit spec.
-assert.eq( 5, explain.executionStats.nReturned );
+assert.eq(5, explain.executionStats.nReturned);
diff --git a/jstests/core/explain5.js b/jstests/core/explain5.js
index eb8e5d9f4a2..35841ac0789 100644
--- a/jstests/core/explain5.js
+++ b/jstests/core/explain5.js
@@ -3,31 +3,27 @@
t = db.jstests_explain5;
t.drop();
-t.ensureIndex( { a:1 } );
-t.ensureIndex( { b:1 } );
+t.ensureIndex({a: 1});
+t.ensureIndex({b: 1});
-for( i = 0; i < 1000; ++i ) {
- t.save( { a:i, b:i%3 } );
+for (i = 0; i < 1000; ++i) {
+ t.save({a: i, b: i % 3});
}
// Query with an initial set of documents.
-var explain1 = t.find( { a:{ $gte:0 }, b:2 } ).sort( { a:1 } )
- .hint( { a:1 } )
- .explain("executionStats");
+var explain1 = t.find({a: {$gte: 0}, b: 2}).sort({a: 1}).hint({a: 1}).explain("executionStats");
printjson(explain1);
var stats1 = explain1.executionStats;
-assert.eq( 333, stats1.nReturned, 'wrong nReturned for explain1' );
-assert.eq( 1000, stats1.totalKeysExamined, 'wrong totalKeysExamined for explain1' );
+assert.eq(333, stats1.nReturned, 'wrong nReturned for explain1');
+assert.eq(1000, stats1.totalKeysExamined, 'wrong totalKeysExamined for explain1');
-for( i = 1000; i < 2000; ++i ) {
- t.save( { a:i, b:i%3 } );
+for (i = 1000; i < 2000; ++i) {
+ t.save({a: i, b: i % 3});
}
// Query with some additional documents.
-var explain2 = t.find( { a:{ $gte:0 }, b:2 } ).sort( { a:1 } )
- .hint ( { a:1 } )
- .explain("executionStats");
+var explain2 = t.find({a: {$gte: 0}, b: 2}).sort({a: 1}).hint({a: 1}).explain("executionStats");
printjson(explain2);
var stats2 = explain2.executionStats;
-assert.eq( 666, stats2.nReturned, 'wrong nReturned for explain2' );
-assert.eq( 2000, stats2.totalKeysExamined, 'wrong totalKeysExamined for explain2' );
+assert.eq(666, stats2.nReturned, 'wrong nReturned for explain2');
+assert.eq(2000, stats2.totalKeysExamined, 'wrong totalKeysExamined for explain2');
diff --git a/jstests/core/explain6.js b/jstests/core/explain6.js
index 7bcc09b8f2a..4b8c75fa4eb 100644
--- a/jstests/core/explain6.js
+++ b/jstests/core/explain6.js
@@ -4,32 +4,31 @@
t = db.jstests_explain6;
t.drop();
-t.ensureIndex( { a:1, b:1 } );
-t.ensureIndex( { b:1, a:1 } );
+t.ensureIndex({a: 1, b: 1});
+t.ensureIndex({b: 1, a: 1});
-t.save( { a:0, b:1 } );
-t.save( { a:1, b:0 } );
+t.save({a: 0, b: 1});
+t.save({a: 1, b: 0});
-explain = t.find( { a:{ $gte:0 }, b:{ $gte:0 } } ).explain( true );
+explain = t.find({a: {$gte: 0}, b: {$gte: 0}}).explain(true);
-assert.eq( 2, explain.executionStats.nReturned );
-assert.eq( 2, explain.executionStats.totalKeysExamined );
-assert.eq( 2, explain.executionStats.totalDocsExamined );
+assert.eq(2, explain.executionStats.nReturned);
+assert.eq(2, explain.executionStats.totalKeysExamined);
+assert.eq(2, explain.executionStats.totalDocsExamined);
// A limit of 2.
-explain = t.find( { a:{ $gte:0 }, b:{ $gte:0 } } ).limit( -2 ).explain( true );
-assert.eq( 2, explain.executionStats.nReturned );
+explain = t.find({a: {$gte: 0}, b: {$gte: 0}}).limit(-2).explain(true);
+assert.eq(2, explain.executionStats.nReturned);
// A $or query.
-explain = t.find( { $or:[ { a:{ $gte:0 }, b:{ $gte:1 } },
- { a:{ $gte:1 }, b:{ $gte:0 } } ] } ).explain( true );
-assert.eq( 2, explain.executionStats.nReturned );
+explain = t.find({$or: [{a: {$gte: 0}, b: {$gte: 1}}, {a: {$gte: 1}, b: {$gte: 0}}]}).explain(true);
+assert.eq(2, explain.executionStats.nReturned);
// A non $or case where totalKeysExamined != number of results
t.remove({});
-t.save( { a:'0', b:'1' } );
-t.save( { a:'1', b:'0' } );
-explain = t.find( { a:/0/, b:/1/ } ).explain( true );
-assert.eq( 1, explain.executionStats.nReturned );
-assert.eq( 2, explain.executionStats.totalKeysExamined );
+t.save({a: '0', b: '1'});
+t.save({a: '1', b: '0'});
+explain = t.find({a: /0/, b: /1/}).explain(true);
+assert.eq(1, explain.executionStats.nReturned);
+assert.eq(2, explain.executionStats.totalKeysExamined);
diff --git a/jstests/core/explain_batch_size.js b/jstests/core/explain_batch_size.js
index 8331e158ff8..7f94adb13ce 100644
--- a/jstests/core/explain_batch_size.js
+++ b/jstests/core/explain_batch_size.js
@@ -8,12 +8,12 @@ t = db.explain_batch_size;
t.drop();
var n = 3;
-for (i=0; i<n; i++) {
- t.save( { x : i } );
+for (i = 0; i < n; i++) {
+ t.save({x: i});
}
var q = {};
-assert.eq( n , t.find( q ).count() , "A" );
-assert.eq( n , t.find( q ).itcount() , "B" );
-assert.eq( n , t.find( q ).batchSize(1).explain("executionStats").executionStats.nReturned , "C" );
+assert.eq(n, t.find(q).count(), "A");
+assert.eq(n, t.find(q).itcount(), "B");
+assert.eq(n, t.find(q).batchSize(1).explain("executionStats").executionStats.nReturned, "C");
diff --git a/jstests/core/explain_count.js b/jstests/core/explain_count.js
index 6e42657fd3b..4943c511252 100644
--- a/jstests/core/explain_count.js
+++ b/jstests/core/explain_count.js
@@ -20,8 +20,7 @@ function checkCountExplain(explain, nCounted) {
var countStage = execStages.shards[0].executionStages;
assert.eq(countStage.stage, "COUNT", "root stage on shard is not COUNT");
assert.eq(countStage.nCounted, nCounted, "wrong count result");
- }
- else {
+ } else {
assert.eq(execStages.stage, "COUNT", "root stage is not COUNT");
assert.eq(execStages.nCounted, nCounted, "wrong count result");
}
@@ -46,13 +45,13 @@ explain = db.runCommand({explain: {count: collName, limit: -3}, verbosity: "exec
checkCountExplain(explain, 0);
assert.eq(0, db.runCommand({count: collName, limit: -3, skip: 4}).n);
-explain = db.runCommand({explain: {count: collName, limit: -3, skip: 4},
- verbosity: "executionStats"});
+explain =
+ db.runCommand({explain: {count: collName, limit: -3, skip: 4}, verbosity: "executionStats"});
checkCountExplain(explain, 0);
assert.eq(0, db.runCommand({count: collName, query: {a: 1}, limit: -3, skip: 4}).n);
-explain = db.runCommand({explain: {count: collName, query: {a: 1}, limit: -3, skip: 4},
- verbosity: "executionStats"});
+explain = db.runCommand(
+ {explain: {count: collName, query: {a: 1}, limit: -3, skip: 4}, verbosity: "executionStats"});
checkCountExplain(explain, 0);
// Now add a bit of data to the collection.
@@ -83,26 +82,25 @@ checkCountExplain(explain, 3);
// Trivial count with both limit and skip.
assert.eq(3, db.runCommand({count: collName, limit: -3, skip: 4}).n);
-explain = db.runCommand({explain: {count: collName, limit: -3, skip: 4},
- verbosity: "executionStats"});
+explain =
+ db.runCommand({explain: {count: collName, limit: -3, skip: 4}, verbosity: "executionStats"});
checkCountExplain(explain, 3);
// With a query.
assert.eq(10, db.runCommand({count: collName, query: {a: 1}}).n);
-explain = db.runCommand({explain: {count: collName, query: {a: 1}},
- verbosity: "executionStats"});
+explain = db.runCommand({explain: {count: collName, query: {a: 1}}, verbosity: "executionStats"});
checkCountExplain(explain, 10);
// With a query and skip.
assert.eq(7, db.runCommand({count: collName, query: {a: 1}, skip: 3}).n);
-explain = db.runCommand({explain: {count: collName, query: {a: 1}, skip: 3},
- verbosity: "executionStats"});
+explain = db.runCommand(
+ {explain: {count: collName, query: {a: 1}, skip: 3}, verbosity: "executionStats"});
checkCountExplain(explain, 7);
// With a query and limit.
assert.eq(3, db.runCommand({count: collName, query: {a: 1}, limit: 3}).n);
-explain = db.runCommand({explain: {count: collName, query: {a: 1}, limit: 3},
- verbosity: "executionStats"});
+explain = db.runCommand(
+ {explain: {count: collName, query: {a: 1}, limit: 3}, verbosity: "executionStats"});
checkCountExplain(explain, 3);
// Insert one more doc for the last few tests.
@@ -110,12 +108,12 @@ t.insert({a: 2});
// Case where all results are skipped.
assert.eq(0, db.runCommand({count: collName, query: {a: 2}, skip: 2}).n);
-explain = db.runCommand({explain: {count: collName, query: {a: 2}, skip: 2},
- verbosity: "executionStats"});
+explain = db.runCommand(
+ {explain: {count: collName, query: {a: 2}, skip: 2}, verbosity: "executionStats"});
checkCountExplain(explain, 0);
// Case where we have a limit, but we don't hit it.
assert.eq(1, db.runCommand({count: collName, query: {a: 2}, limit: 2}).n);
-explain = db.runCommand({explain: {count: collName, query: {a: 2}, limit: 2},
- verbosity: "executionStats"});
+explain = db.runCommand(
+ {explain: {count: collName, query: {a: 2}, limit: 2}, verbosity: "executionStats"});
checkCountExplain(explain, 1);
diff --git a/jstests/core/explain_delete.js b/jstests/core/explain_delete.js
index a3508e71e29..eeee5c23c14 100644
--- a/jstests/core/explain_delete.js
+++ b/jstests/core/explain_delete.js
@@ -24,34 +24,19 @@ function checkNWouldDelete(explain, nWouldDelete) {
var deleteStage = execStages.shards[0].executionStages;
assert.eq(deleteStage.stage, "DELETE");
assert.eq(deleteStage.nWouldDelete, nWouldDelete);
- }
- else {
+ } else {
assert.eq(execStages.stage, "DELETE");
assert.eq(execStages.nWouldDelete, nWouldDelete);
}
}
// Explain delete against an empty collection.
-explain = db.runCommand({
- explain: {
- delete: collName,
- deletes: [
- {q: {a: 1}, limit: 0}
- ]
- }
-});
+explain = db.runCommand({explain: {delete: collName, deletes: [{q: {a: 1}, limit: 0}]}});
checkNWouldDelete(explain, 0);
// Add an index but no data, and check that the explain still works.
t.ensureIndex({a: 1});
-explain = db.runCommand({
- explain: {
- delete: collName,
- deletes: [
- {q: {a: 1}, limit: 0}
- ]
- }
-});
+explain = db.runCommand({explain: {delete: collName, deletes: [{q: {a: 1}, limit: 0}]}});
checkNWouldDelete(explain, 0);
// Add some copies of the same document.
@@ -61,25 +46,13 @@ for (var i = 0; i < 10; i++) {
assert.eq(10, t.count());
// Run an explain which shows that all 10 documents *would* be deleted.
-explain = db.runCommand({
- explain: {
- delete: collName,
- deletes: [
- {q: {a: 1}, limit: 0}
- ]
- }
-});
+explain = db.runCommand({explain: {delete: collName, deletes: [{q: {a: 1}, limit: 0}]}});
checkNWouldDelete(explain, 10);
// Make sure all 10 documents are still there.
assert.eq(10, t.count());
// If we run the same thing without the explain, then all 10 docs should be deleted.
-var deleteResult = db.runCommand({
- delete: collName,
- deletes: [
- {q: {a: 1}, limit: 0}
- ]
-});
+var deleteResult = db.runCommand({delete: collName, deletes: [{q: {a: 1}, limit: 0}]});
assert.commandWorked(deleteResult);
assert.eq(0, t.count());
diff --git a/jstests/core/explain_distinct.js b/jstests/core/explain_distinct.js
index ad359016530..37d5a485516 100644
--- a/jstests/core/explain_distinct.js
+++ b/jstests/core/explain_distinct.js
@@ -19,10 +19,7 @@
distinctCmd.query = query;
}
- return coll.runCommand({
- explain: distinctCmd,
- verbosity: 'executionStats'
- });
+ return coll.runCommand({explain: distinctCmd, verbosity: 'executionStats'});
}
coll.drop();
@@ -33,16 +30,16 @@
assert(planHasStage(explain.queryPlanner.winningPlan, "EOF"));
// Insert the data to perform distinct() on.
- for (var i = 0; i < 10; i ++) {
+ for (var i = 0; i < 10; i++) {
assert.writeOK(coll.insert({a: 1, b: 1}));
assert.writeOK(coll.insert({a: 2, c: 1}));
}
- assert.commandFailed(runDistinctExplain(coll, {}, {})); // Bad keyString.
- assert.commandFailed(runDistinctExplain(coll, 'a', 'a')); // Bad query.
- assert.commandFailed(runDistinctExplain(coll, 'b', {$not: 1})); // Bad query.
- assert.commandFailed(runDistinctExplain(coll, 'a', {$not: 1})); // Bad query.
- assert.commandFailed(runDistinctExplain(coll, '_id', {$not: 1})); // Bad query.
+ assert.commandFailed(runDistinctExplain(coll, {}, {})); // Bad keyString.
+ assert.commandFailed(runDistinctExplain(coll, 'a', 'a')); // Bad query.
+ assert.commandFailed(runDistinctExplain(coll, 'b', {$not: 1})); // Bad query.
+ assert.commandFailed(runDistinctExplain(coll, 'a', {$not: 1})); // Bad query.
+ assert.commandFailed(runDistinctExplain(coll, '_id', {$not: 1})); // Bad query.
// Ensure that server accepts a distinct command with no 'query' field.
assert.commandWorked(runDistinctExplain(coll, '', null));
diff --git a/jstests/core/explain_execution_error.js b/jstests/core/explain_execution_error.js
index 280e4e3250d..1eb08c9cc21 100644
--- a/jstests/core/explain_execution_error.js
+++ b/jstests/core/explain_execution_error.js
@@ -15,8 +15,7 @@ function assertExecError(explain) {
var execStats = explain.executionStats;
if (execStats.executionStages.stage == "SINGLE_SHARD") {
errorObj = execStats.executionStages.shards[0];
- }
- else {
+ } else {
errorObj = execStats;
}
@@ -34,8 +33,7 @@ function assertExecSuccess(explain) {
var execStats = explain.executionStats;
if (execStats.executionStages.stage == "SINGLE_SHARD") {
errorObj = execStats.executionStages.shards[0];
- }
- else {
+ } else {
errorObj = execStats;
}
@@ -63,11 +61,7 @@ assert.throws(function() {
// Explain of this query should succeed at query planner verbosity.
result = db.runCommand({
- explain: {
- find: t.getName(),
- filter: {a: {$exists: true}},
- sort: {b: 1}
- },
+ explain: {find: t.getName(), filter: {a: {$exists: true}}, sort: {b: 1}},
verbosity: "queryPlanner"
});
assert.commandWorked(result);
@@ -76,11 +70,7 @@ assert("queryPlanner" in result);
// Explaining the same query at execution stats verbosity should succeed, but indicate that the
// underlying operation failed.
result = db.runCommand({
- explain: {
- find: t.getName(),
- filter: {a: {$exists: true}},
- sort: {b: 1}
- },
+ explain: {find: t.getName(), filter: {a: {$exists: true}}, sort: {b: 1}},
verbosity: "executionStats"
});
assert.commandWorked(result);
@@ -90,11 +80,7 @@ assertExecError(result);
// The underlying operation should also report a failure at allPlansExecution verbosity.
result = db.runCommand({
- explain: {
- find: t.getName(),
- filter: {a: {$exists: true}},
- sort: {b: 1}
- },
+ explain: {find: t.getName(), filter: {a: {$exists: true}}, sort: {b: 1}},
verbosity: "allPlansExecution"
});
assert.commandWorked(result);
@@ -115,22 +101,14 @@ assert.eq(40, t.find({c: {$lt: 40}}).sort({b: 1}).itcount());
// The explain should succeed at all verbosity levels because the query itself succeeds.
// First test "queryPlanner" verbosity.
result = db.runCommand({
- explain: {
- find: t.getName(),
- filter: {c: {$lt: 40}},
- sort: {b: 1}
- },
+ explain: {find: t.getName(), filter: {c: {$lt: 40}}, sort: {b: 1}},
verbosity: "queryPlanner"
});
assert.commandWorked(result);
assert("queryPlanner" in result);
result = db.runCommand({
- explain: {
- find: t.getName(),
- filter: {c: {$lt: 40}},
- sort: {b: 1}
- },
+ explain: {find: t.getName(), filter: {c: {$lt: 40}}, sort: {b: 1}},
verbosity: "executionStats"
});
assert.commandWorked(result);
@@ -140,11 +118,7 @@ assertExecSuccess(result);
// We expect allPlansExecution verbosity to show execution stats for both candidate plans.
result = db.runCommand({
- explain: {
- find: t.getName(),
- filter: {c: {$lt: 40}},
- sort: {b: 1}
- },
+ explain: {find: t.getName(), filter: {c: {$lt: 40}}, sort: {b: 1}},
verbosity: "allPlansExecution"
});
assert.commandWorked(result);
diff --git a/jstests/core/explain_find.js b/jstests/core/explain_find.js
index 2e2699ea05b..820e6dffbcd 100644
--- a/jstests/core/explain_find.js
+++ b/jstests/core/explain_find.js
@@ -10,25 +10,14 @@ for (var i = 0; i < 10; i++) {
t.insert({_id: i, a: i});
}
-var explain = db.runCommand({
- explain: {
- find: collName,
- filter: {a: {$lte: 2}}
- },
- verbosity: "executionStats"
-});
+var explain = db.runCommand(
+ {explain: {find: collName, filter: {a: {$lte: 2}}}, verbosity: "executionStats"});
printjson(explain);
assert.commandWorked(explain);
assert.eq(3, explain.executionStats.nReturned);
-explain = db.runCommand({
- explain: {
- find: collName,
- min: {a: 4},
- max: {a: 6}
- },
- verbosity: "executionStats"
-});
+explain = db.runCommand(
+ {explain: {find: collName, min: {a: 4}, max: {a: 6}}, verbosity: "executionStats"});
printjson(explain);
assert.commandWorked(explain);
assert.eq(2, explain.executionStats.nReturned);
diff --git a/jstests/core/explain_find_and_modify.js b/jstests/core/explain_find_and_modify.js
index 94040cf95f8..346e7029cd1 100644
--- a/jstests/core/explain_find_and_modify.js
+++ b/jstests/core/explain_find_and_modify.js
@@ -13,10 +13,15 @@
var t = db.getCollection(cName);
// Different types of findAndModify explain requests.
- var explainRemove = {explain: {findAndModify: cName, remove: true, query: {_id: 0}}};
- var explainUpdate = {explain: {findAndModify: cName, update: {$inc: {i: 1}}, query: {_id: 0}}};
- var explainUpsert = {explain:
- {findAndModify: cName, update: {$inc: {i: 1}}, query: {_id: 0}, upsert: true}};
+ var explainRemove = {
+ explain: {findAndModify: cName, remove: true, query: {_id: 0}}
+ };
+ var explainUpdate = {
+ explain: {findAndModify: cName, update: {$inc: {i: 1}}, query: {_id: 0}}
+ };
+ var explainUpsert = {
+ explain: {findAndModify: cName, update: {$inc: {i: 1}}, query: {_id: 0}, upsert: true}
+ };
// 1. Explaining findAndModify should never create a database.
@@ -55,24 +60,38 @@
assert.commandFailed(db.runCommand({remove: true, new: true}));
// 4. Explaining findAndModify should not modify any contents of the collection.
- var onlyDoc = {_id: 0, i: 1};
+ var onlyDoc = {
+ _id: 0,
+ i: 1
+ };
assert.writeOK(t.insert(onlyDoc));
// Explaining a delete should not delete anything.
- var matchingRemoveCmd = {findAndModify: cName, remove: true, query: {_id: onlyDoc._id}};
+ var matchingRemoveCmd = {
+ findAndModify: cName,
+ remove: true,
+ query: {_id: onlyDoc._id}
+ };
var res = db.runCommand({explain: matchingRemoveCmd});
assert.commandWorked(res);
assert.eq(t.find().itcount(), 1, "Explaining a remove should not remove any documents.");
// Explaining an update should not update anything.
- var matchingUpdateCmd = {findAndModify: cName, update: {x: "x"}, query: {_id: onlyDoc._id}};
+ var matchingUpdateCmd = {
+ findAndModify: cName,
+ update: {x: "x"},
+ query: {_id: onlyDoc._id}
+ };
var res = db.runCommand({explain: matchingUpdateCmd});
assert.commandWorked(res);
assert.eq(t.findOne(), onlyDoc, "Explaining an update should not update any documents.");
// Explaining an upsert should not insert anything.
var matchingUpsertCmd = {
- findAndModify: cName, update: {x: "x"}, query: {_id: "non-match"}, upsert: true
+ findAndModify: cName,
+ update: {x: "x"},
+ query: {_id: "non-match"},
+ upsert: true
};
var res = db.runCommand({explain: matchingUpsertCmd});
assert.commandWorked(res);
@@ -85,139 +104,105 @@
var testCases = [
// -------------------------------------- Removes ----------------------------------------
{
- // Non-matching remove command.
- cmd: {remove: true, query: {_id: "no-match"}},
- expectedResult: {
- executionStats: {
- nReturned: 0,
- executionSuccess: true,
- executionStages: {
- stage: "DELETE",
- nWouldDelete: 0
- }
- }
- }
+ // Non-matching remove command.
+ cmd: {remove: true, query: {_id: "no-match"}},
+ expectedResult: {
+ executionStats: {
+ nReturned: 0,
+ executionSuccess: true,
+ executionStages: {stage: "DELETE", nWouldDelete: 0}
+ }
+ }
},
{
- // Matching remove command.
- cmd: {remove: true, query: {_id: onlyDoc._id}},
- expectedResult: {
- executionStats: {
- nReturned: 1,
- executionSuccess: true,
- executionStages: {
- stage: "DELETE",
- nWouldDelete: 1
- }
- }
- }
+ // Matching remove command.
+ cmd: {remove: true, query: {_id: onlyDoc._id}},
+ expectedResult: {
+ executionStats: {
+ nReturned: 1,
+ executionSuccess: true,
+ executionStages: {stage: "DELETE", nWouldDelete: 1}
+ }
+ }
},
// -------------------------------------- Updates ----------------------------------------
{
- // Non-matching update query.
- cmd: {update: {$inc: {i: 1}}, query: {_id: "no-match"}},
- expectedResult: {
- executionStats: {
- nReturned: 0,
- executionSuccess: true,
- executionStages: {
- stage: "UPDATE",
- nWouldModify: 0,
- wouldInsert: false
- }
- }
- }
+ // Non-matching update query.
+ cmd: {update: {$inc: {i: 1}}, query: {_id: "no-match"}},
+ expectedResult: {
+ executionStats: {
+ nReturned: 0,
+ executionSuccess: true,
+ executionStages: {stage: "UPDATE", nWouldModify: 0, wouldInsert: false}
+ }
+ }
},
{
- // Non-matching update query, returning new doc.
- cmd: {update: {$inc: {i: 1}}, query: {_id: "no-match"}, new: true},
- expectedResult: {
- executionStats: {
- nReturned: 0,
- executionSuccess: true,
- executionStages: {
- stage: "UPDATE",
- nWouldModify: 0,
- wouldInsert: false
- }
- }
- }
+ // Non-matching update query, returning new doc.
+ cmd: {update: {$inc: {i: 1}}, query: {_id: "no-match"}, new: true},
+ expectedResult: {
+ executionStats: {
+ nReturned: 0,
+ executionSuccess: true,
+ executionStages: {stage: "UPDATE", nWouldModify: 0, wouldInsert: false}
+ }
+ }
},
{
- // Matching update query.
- cmd: {update: {$inc: {i: 1}}, query: {_id: onlyDoc._id}},
- expectedResult: {
- executionStats: {
- nReturned: 1,
- executionSuccess: true,
- executionStages: {
- stage: "UPDATE",
- nWouldModify: 1,
- wouldInsert: false
- }
- }
- }
+ // Matching update query.
+ cmd: {update: {$inc: {i: 1}}, query: {_id: onlyDoc._id}},
+ expectedResult: {
+ executionStats: {
+ nReturned: 1,
+ executionSuccess: true,
+ executionStages: {stage: "UPDATE", nWouldModify: 1, wouldInsert: false}
+ }
+ }
},
{
- // Matching update query, returning new doc.
- cmd: {update: {$inc: {i: 1}}, query: {_id: onlyDoc._id}, new: true},
- expectedResult: {
- executionStats: {
- nReturned: 1,
- executionSuccess: true,
- executionStages: {
- stage: "UPDATE",
- nWouldModify: 1,
- wouldInsert: false
- }
- }
- }
+ // Matching update query, returning new doc.
+ cmd: {update: {$inc: {i: 1}}, query: {_id: onlyDoc._id}, new: true},
+ expectedResult: {
+ executionStats: {
+ nReturned: 1,
+ executionSuccess: true,
+ executionStages: {stage: "UPDATE", nWouldModify: 1, wouldInsert: false}
+ }
+ }
},
// -------------------------------------- Upserts ----------------------------------------
{
- // Non-matching upsert query.
- cmd: {update: {$inc: {i: 1}}, upsert: true, query: {_id: "no-match"}},
- expectedResult: {
- executionStats: {
- nReturned: 0,
- executionSuccess: true,
- executionStages: {
- stage: "UPDATE",
- nWouldModify: 0,
- wouldInsert: true
- }
- }
- }
+ // Non-matching upsert query.
+ cmd: {update: {$inc: {i: 1}}, upsert: true, query: {_id: "no-match"}},
+ expectedResult: {
+ executionStats: {
+ nReturned: 0,
+ executionSuccess: true,
+ executionStages: {stage: "UPDATE", nWouldModify: 0, wouldInsert: true}
+ }
+ }
},
{
- // Non-matching upsert query, returning new doc.
- cmd: {update: {$inc: {i: 1}}, upsert: true, query: {_id: "no-match"}, new: true},
- expectedResult: {
- executionStats: {
- nReturned: 1,
- executionSuccess: true,
- executionStages: {
- stage: "UPDATE",
- nWouldModify: 0,
- wouldInsert: true
- }
- }
- }
+ // Non-matching upsert query, returning new doc.
+ cmd: {update: {$inc: {i: 1}}, upsert: true, query: {_id: "no-match"}, new: true},
+ expectedResult: {
+ executionStats: {
+ nReturned: 1,
+ executionSuccess: true,
+ executionStages: {stage: "UPDATE", nWouldModify: 0, wouldInsert: true}
+ }
+ }
},
{
- // Matching upsert query, returning new doc.
- cmd: {update: {$inc: {i: 1}}, upsert: true, query: {_id: onlyDoc._id}, new: true},
- expectedResult: {
- executionStats: {
- nReturned: 1,
- executionSuccess: true,
- executionStages: {
- stage: "UPDATE",
- nWouldModify: 1,
- wouldInsert: false
- }
- }
- }
+ // Matching upsert query, returning new doc.
+ cmd: {update: {$inc: {i: 1}}, upsert: true, query: {_id: onlyDoc._id}, new: true},
+ expectedResult: {
+ executionStats: {
+ nReturned: 1,
+ executionSuccess: true,
+ executionStages: {stage: "UPDATE", nWouldModify: 1, wouldInsert: false}
+ }
+ }
}
];
@@ -288,26 +273,23 @@
function assertExplainResultsMatch(explainOut, expectedMatches, preMsg, currentPath) {
// This is only used recursively, to keep track of where we are in the document.
var isRootLevel = typeof currentPath === "undefined";
- Object.keys(expectedMatches).forEach(function(key) {
- var totalFieldName = isRootLevel ? key : currentPath + "." + key;
- assert(explainOut.hasOwnProperty(key),
- preMsg + "Explain's output does not have a value for " + key);
- if (typeof expectedMatches[key] === "object") {
- // Sub-doc, recurse to match on it's fields
- assertExplainResultsMatch(explainOut[key],
- expectedMatches[key],
- preMsg,
- totalFieldName);
- }
- else {
- assert.eq(
- explainOut[key],
- expectedMatches[key],
- preMsg + "Explain's " + totalFieldName + " (" + explainOut[key] + ")" +
- " does not match expected value (" + expectedMatches[key] + ")."
- );
- }
- });
+ Object.keys(expectedMatches)
+ .forEach(function(key) {
+ var totalFieldName = isRootLevel ? key : currentPath + "." + key;
+ assert(explainOut.hasOwnProperty(key),
+ preMsg + "Explain's output does not have a value for " + key);
+ if (typeof expectedMatches[key] === "object") {
+ // Sub-doc, recurse to match on it's fields
+ assertExplainResultsMatch(
+ explainOut[key], expectedMatches[key], preMsg, totalFieldName);
+ } else {
+ assert.eq(explainOut[key],
+ expectedMatches[key],
+ preMsg + "Explain's " + totalFieldName + " (" + explainOut[key] +
+ ")" + " does not match expected value (" + expectedMatches[key] +
+ ").");
+ }
+ });
}
/**
@@ -340,8 +322,7 @@
}
function assertCollDoesNotExist(cName, msg) {
- assert.eq(db.getCollectionNames().indexOf(cName),
- -1,
- msg + "collection " + cName + " exists.");
+ assert.eq(
+ db.getCollectionNames().indexOf(cName), -1, msg + "collection " + cName + " exists.");
}
})();
diff --git a/jstests/core/explain_missing_collection.js b/jstests/core/explain_missing_collection.js
index 93af3ed8fd0..0d1eae844f8 100644
--- a/jstests/core/explain_missing_collection.js
+++ b/jstests/core/explain_missing_collection.js
@@ -20,7 +20,7 @@
// .group()
missingColl.drop();
explainColl = missingColl.explain("executionStats");
- explain = explainColl.group({key: "a", initial: {}, reduce: function() { } });
+ explain = explainColl.group({key: "a", initial: {}, reduce: function() {}});
assert.commandWorked(explain);
assert("executionStats" in explain);
diff --git a/jstests/core/explain_missing_database.js b/jstests/core/explain_missing_database.js
index 5fff4502361..598cd7a9e11 100644
--- a/jstests/core/explain_missing_database.js
+++ b/jstests/core/explain_missing_database.js
@@ -20,7 +20,7 @@
// .group()
explainMissingDb.dropDatabase();
explainColl = explainMissingDb.collection.explain("executionStats");
- explain = explainColl.group({key: "a", initial: {}, reduce: function() { } });
+ explain = explainColl.group({key: "a", initial: {}, reduce: function() {}});
assert.commandWorked(explain);
assert("executionStats" in explain);
diff --git a/jstests/core/explain_multi_plan.js b/jstests/core/explain_multi_plan.js
index abcec153816..f74078c717d 100644
--- a/jstests/core/explain_multi_plan.js
+++ b/jstests/core/explain_multi_plan.js
@@ -2,7 +2,7 @@
* Tests running explain on a variety of explainable commands (find, update, remove, etc.) when
* there are multiple plans available. This is a regression test for SERVER-20849 and SERVER-21376.
*/
-(function () {
+(function() {
"use strict";
var coll = db.explainMultiPlan;
coll.drop();
@@ -49,12 +49,13 @@
});
assert.doesNotThrow(function() {
- coll.explain("allPlansExecution").group({
- key: {a: 1},
- cond: {a: {$gte: 1}},
- reduce: function (curr, result) {},
- initial: {}
- });
+ coll.explain("allPlansExecution")
+ .group({
+ key: {a: 1},
+ cond: {a: {$gte: 1}},
+ reduce: function(curr, result) {},
+ initial: {}
+ });
});
// SERVER-21376: Make sure the 'rejectedPlans' field is filled in appropriately.
diff --git a/jstests/core/explain_shell_helpers.js b/jstests/core/explain_shell_helpers.js
index 836092bb0d4..3c05b760e71 100644
--- a/jstests/core/explain_shell_helpers.js
+++ b/jstests/core/explain_shell_helpers.js
@@ -191,7 +191,7 @@ assert(!explainQuery.hasNext());
// .forEach()
var results = [];
-t.explain().find().forEach(function (res) {
+t.explain().find().forEach(function(res) {
results.push(res);
});
assert.eq(1, results.length);
@@ -257,7 +257,7 @@ assert(planHasStage(explain.queryPlanner.winningPlan, "COUNT_SCAN"));
// .group()
//
-explain = t.explain().group({key: "a", initial: {}, reduce: function() { } });
+explain = t.explain().group({key: "a", initial: {}, reduce: function() {}});
assert.commandWorked(explain);
//
@@ -393,8 +393,8 @@ assert.eq(1, explain.executionStats.totalDocsExamined);
assert.eq(10, t.count());
// findAndModify with upsert flag set that should do an insert.
-explain = t.explain("executionStats").findAndModify(
- {query: {a: 15}, update: {$set: {b: 3}}, upsert: true});
+explain = t.explain("executionStats")
+ .findAndModify({query: {a: 15}, update: {$set: {b: 3}}, upsert: true});
assert.commandWorked(explain);
stage = explain.executionStats.executionStages;
if ("SINGLE_SHARD" === stage.stage) {
@@ -435,7 +435,7 @@ assert.throws(function() {
// Missing "initial" for explaining a group.
assert.throws(function() {
- t.explain().group({key: "a", reduce: function() { } });
+ t.explain().group({key: "a", reduce: function() {}});
});
// Can't specify both remove and update in a findAndModify
diff --git a/jstests/core/explain_upsert.js b/jstests/core/explain_upsert.js
index 41282e9bf51..1ac254291f1 100644
--- a/jstests/core/explain_upsert.js
+++ b/jstests/core/explain_upsert.js
@@ -6,14 +6,8 @@ t.drop();
var explain;
// Explained upsert against an empty collection should succeed and be a no-op.
-explain = db.runCommand({
- explain: {
- update: t.getName(),
- updates: [
- {q: {a: 1}, u: {a: 1}, upsert: true}
- ]
- }
-});
+explain = db.runCommand(
+ {explain: {update: t.getName(), updates: [{q: {a: 1}, u: {a: 1}, upsert: true}]}});
assert.commandWorked(explain);
// Collection should still not exist.
@@ -24,13 +18,7 @@ assert(!t.drop());
t.insert({a: 3});
// An explained upsert against a non-empty collection should also succeed as a no-op.
-explain = db.runCommand({
- explain: {
- update: t.getName(),
- updates: [
- {q: {a: 1}, u: {a: 1}, upsert: true}
- ]
- }
-});
+explain = db.runCommand(
+ {explain: {update: t.getName(), updates: [{q: {a: 1}, u: {a: 1}, upsert: true}]}});
assert.commandWorked(explain);
assert.eq(1, t.count());
diff --git a/jstests/core/filemd5.js b/jstests/core/filemd5.js
index 62f69bd657f..b43dccf7036 100644
--- a/jstests/core/filemd5.js
+++ b/jstests/core/filemd5.js
@@ -1,11 +1,10 @@
db.fs.chunks.drop();
-db.fs.chunks.insert({files_id:1,n:0,data:new BinData(0,"test")});
+db.fs.chunks.insert({files_id: 1, n: 0, data: new BinData(0, "test")});
-x = db.runCommand({"filemd5":1,"root":"fs"});
-assert( ! x.ok , tojson(x) );
-
-db.fs.chunks.ensureIndex({files_id:1,n:1});
-x = db.runCommand({"filemd5":1,"root":"fs"});
-assert( x.ok , tojson(x) );
+x = db.runCommand({"filemd5": 1, "root": "fs"});
+assert(!x.ok, tojson(x));
+db.fs.chunks.ensureIndex({files_id: 1, n: 1});
+x = db.runCommand({"filemd5": 1, "root": "fs"});
+assert(x.ok, tojson(x));
diff --git a/jstests/core/find1.js b/jstests/core/find1.js
index e6d68540bdf..a09c0822a47 100644
--- a/jstests/core/find1.js
+++ b/jstests/core/find1.js
@@ -5,49 +5,50 @@ lookAtDocumentMetrics = false;
// QUERY MIGRATION
// New system is still not connected to server status
-if ( db.serverStatus().metrics ) {
+if (db.serverStatus().metrics) {
// var ss = db.serverStatus();
- // lookAtDocumentMetrics = ss.metrics.document != null && ss.metrics.queryExecutor.scanned != null;
+ // lookAtDocumentMetrics = ss.metrics.document != null && ss.metrics.queryExecutor.scanned !=
+ // null;
}
-print( "lookAtDocumentMetrics: " + lookAtDocumentMetrics );
+print("lookAtDocumentMetrics: " + lookAtDocumentMetrics);
-if ( lookAtDocumentMetrics ) {
+if (lookAtDocumentMetrics) {
// ignore mongos
nscannedStart = db.serverStatus().metrics.queryExecutor.scanned;
}
-
-t.save( { a : 1 , b : "hi" } );
-t.save( { a : 2 , b : "hi" } );
+t.save({a: 1, b: "hi"});
+t.save({a: 2, b: "hi"});
// Basic test of .snapshot().
-assert( t.find().snapshot()[0].a == 1 , ".snapshot() simple test 1" );
+assert(t.find().snapshot()[0].a == 1, ".snapshot() simple test 1");
var q = t.findOne();
-q.c = "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz";
-t.save(q); // will move a:1 object to after a:2 in the file
-assert( t.find().snapshot()[0].a == 1 , ".snapshot() simple test 2" );
+q.c =
+ "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz";
+t.save(q); // will move a:1 object to after a:2 in the file
+assert(t.find().snapshot()[0].a == 1, ".snapshot() simple test 2");
-assert( t.findOne( { a : 1 } ).b != null , "A" );
-assert( t.findOne( { a : 1 } , { a : 1 } ).b == null , "B");
+assert(t.findOne({a: 1}).b != null, "A");
+assert(t.findOne({a: 1}, {a: 1}).b == null, "B");
-assert( t.find( { a : 1 } )[0].b != null , "C" );
-assert( t.find( { a : 1 } , { a : 1 } )[0].b == null , "D" );
-assert( t.find( { a : 1 } , { a : 1 } ).sort( { a : 1 } )[0].b == null , "D" );
+assert(t.find({a: 1})[0].b != null, "C");
+assert(t.find({a: 1}, {a: 1})[0].b == null, "D");
+assert(t.find({a: 1}, {a: 1}).sort({a: 1})[0].b == null, "D");
id = t.findOne()._id;
-assert( t.findOne( id ) , "E" );
-assert( t.findOne( id ).a , "F" );
-assert( t.findOne( id ).b , "G" );
+assert(t.findOne(id), "E");
+assert(t.findOne(id).a, "F");
+assert(t.findOne(id).b, "G");
-assert( t.findOne( id , { a : 1 } ).a , "H" );
-assert( ! t.findOne( id , { a : 1 } ).b , "I" );
+assert(t.findOne(id, {a: 1}).a, "H");
+assert(!t.findOne(id, {a: 1}).b, "I");
-assert(t.validate().valid,"not valid");
+assert(t.validate().valid, "not valid");
-if ( lookAtDocumentMetrics ) {
+if (lookAtDocumentMetrics) {
// ignore mongos
nscannedEnd = db.serverStatus().metrics.queryExecutor.scanned;
- assert.lte( nscannedStart + 16, nscannedEnd );
+ assert.lte(nscannedStart + 16, nscannedEnd);
}
diff --git a/jstests/core/find2.js b/jstests/core/find2.js
index f72203419bc..2992bb683f1 100644
--- a/jstests/core/find2.js
+++ b/jstests/core/find2.js
@@ -1,16 +1,16 @@
// Test object id sorting.
-function testObjectIdFind( db ) {
+function testObjectIdFind(db) {
r = db.ed_db_find2_oif;
r.drop();
- for( i = 0; i < 3; ++i )
- r.save( {} );
+ for (i = 0; i < 3; ++i)
+ r.save({});
- f = r.find().sort( { _id: 1 } );
- assert.eq( 3, f.count() );
- assert( f[ 0 ]._id < f[ 1 ]._id );
- assert( f[ 1 ]._id < f[ 2 ]._id );
+ f = r.find().sort({_id: 1});
+ assert.eq(3, f.count());
+ assert(f[0]._id < f[1]._id);
+ assert(f[1]._id < f[2]._id);
}
-testObjectIdFind( db );
+testObjectIdFind(db);
diff --git a/jstests/core/find3.js b/jstests/core/find3.js
index a5e4b7a4d66..42c06065e9e 100644
--- a/jstests/core/find3.js
+++ b/jstests/core/find3.js
@@ -1,10 +1,10 @@
t = db.find3;
t.drop();
-for ( i=1; i<=50; i++)
- t.save( { a : i } );
+for (i = 1; i <= 50; i++)
+ t.save({a: i});
-assert.eq( 50 , t.find().toArray().length );
-assert.eq( 20 , t.find().limit(20).toArray().length );
+assert.eq(50, t.find().toArray().length);
+assert.eq(20, t.find().limit(20).toArray().length);
assert(t.validate().valid);
diff --git a/jstests/core/find4.js b/jstests/core/find4.js
index eb9ff60e33c..7a5ebf79578 100644
--- a/jstests/core/find4.js
+++ b/jstests/core/find4.js
@@ -2,25 +2,33 @@
t = db.find4;
t.drop();
-t.save( { a : 1123 , b : 54332 } );
+t.save({a: 1123, b: 54332});
-o = t.find( {} , {} )[0];
-assert.eq( 1123 , o.a , "A" );
-assert.eq( 54332 , o.b , "B" );
-assert( o._id.str , "C" );
+o = t.find({}, {})[0];
+assert.eq(1123, o.a, "A");
+assert.eq(54332, o.b, "B");
+assert(o._id.str, "C");
-o = t.find( {} , { a : 1 } )[0];
-assert.eq( 1123 , o.a , "D" );
-assert( o._id.str , "E" );
-assert( ! o.b , "F" );
+o = t.find({}, {a: 1})[0];
+assert.eq(1123, o.a, "D");
+assert(o._id.str, "E");
+assert(!o.b, "F");
-o = t.find( {} , { b : 1 } )[0];
-assert.eq( 54332 , o.b , "G" );
-assert( o._id.str , "H" );
-assert( ! o.a , "I" );
+o = t.find({}, {b: 1})[0];
+assert.eq(54332, o.b, "G");
+assert(o._id.str, "H");
+assert(!o.a, "I");
t.drop();
-t.save( { a : 1 , b : 1 } );
-t.save( { a : 2 , b : 2 } );
-assert.eq( "1-1,2-2" , t.find().map( function(z){ return z.a + "-" + z.b; } ).toString() );
-assert.eq( "1-undefined,2-undefined" , t.find( {} , { a : 1 }).map( function(z){ return z.a + "-" + z.b; } ).toString() );
+t.save({a: 1, b: 1});
+t.save({a: 2, b: 2});
+assert.eq("1-1,2-2",
+ t.find().map(function(z) {
+ return z.a + "-" + z.b;
+ }).toString());
+assert.eq("1-undefined,2-undefined",
+ t.find({}, {a: 1})
+ .map(function(z) {
+ return z.a + "-" + z.b;
+ })
+ .toString());
diff --git a/jstests/core/find5.js b/jstests/core/find5.js
index ab648906122..33ba96ea103 100644
--- a/jstests/core/find5.js
+++ b/jstests/core/find5.js
@@ -5,47 +5,47 @@ t.drop();
t.save({a: 1});
t.save({b: 5});
-assert.eq( 2 , t.find({}, {b:1}).count(), "A");
+assert.eq(2, t.find({}, {b: 1}).count(), "A");
-function getIds( f ){
- return t.find( {} , f ).map( function(z){ return z._id; } );
+function getIds(f) {
+ return t.find({}, f).map(function(z) {
+ return z._id;
+ });
}
-assert.eq( Array.tojson( getIds( null ) ) , Array.tojson( getIds( {} ) ) , "B1 " );
-assert.eq( Array.tojson( getIds( null ) ) , Array.tojson( getIds( { a : 1 } ) ) , "B2 " );
-assert.eq( Array.tojson( getIds( null ) ) , Array.tojson( getIds( { b : 1 } ) ) , "B3 " );
-assert.eq( Array.tojson( getIds( null ) ) , Array.tojson( getIds( { c : 1 } ) ) , "B4 " );
+assert.eq(Array.tojson(getIds(null)), Array.tojson(getIds({})), "B1 ");
+assert.eq(Array.tojson(getIds(null)), Array.tojson(getIds({a: 1})), "B2 ");
+assert.eq(Array.tojson(getIds(null)), Array.tojson(getIds({b: 1})), "B3 ");
+assert.eq(Array.tojson(getIds(null)), Array.tojson(getIds({c: 1})), "B4 ");
-x = t.find( {} , { a : 1 } )[0];
-assert.eq( 1 , x.a , "C1" );
-assert.isnull( x.b , "C2" );
+x = t.find({}, {a: 1})[0];
+assert.eq(1, x.a, "C1");
+assert.isnull(x.b, "C2");
-x = t.find( {} , { a : 1 } )[1];
-assert.isnull( x.a , "C3" );
-assert.isnull( x.b , "C4" );
+x = t.find({}, {a: 1})[1];
+assert.isnull(x.a, "C3");
+assert.isnull(x.b, "C4");
-x = t.find( {} , { b : 1 } )[0];
-assert.isnull( x.a , "C5" );
-assert.isnull( x.b , "C6" );
+x = t.find({}, {b: 1})[0];
+assert.isnull(x.a, "C5");
+assert.isnull(x.b, "C6");
-x = t.find( {} , { b : 1 } )[1];
-assert.isnull( x.a , "C7" );
-assert.eq( 5 , x.b , "C8" );
+x = t.find({}, {b: 1})[1];
+assert.isnull(x.a, "C7");
+assert.eq(5, x.b, "C8");
t.drop();
+t.save({a: 1, b: {c: 2, d: 3, e: 4}});
+assert.eq(2, t.find({}, {"b.c": 1}).toArray()[0].b.c, "D");
-t.save( { a : 1 , b : { c : 2 , d : 3 , e : 4 } } );
-assert.eq( 2 , t.find( {} , { "b.c" : 1 } ).toArray()[0].b.c , "D" );
+o = t.find({}, {"b.c": 1, "b.d": 1}).toArray()[0];
+assert(o.b.c, "E 1");
+assert(o.b.d, "E 2");
+assert(!o.b.e, "E 3");
-o = t.find( {} , { "b.c" : 1 , "b.d" : 1 } ).toArray()[0];
-assert( o.b.c , "E 1" );
-assert( o.b.d , "E 2" );
-assert( !o.b.e , "E 3" );
-
-assert( ! t.find( {} , { "b.c" : 1 } ).toArray()[0].b.d , "F" );
+assert(!t.find({}, {"b.c": 1}).toArray()[0].b.d, "F");
t.drop();
-t.save( { a : { b : { c : 1 } } } );
-assert.eq( 1 , t.find( {} , { "a.b.c" : 1 } )[0].a.b.c , "G" );
-
+t.save({a: {b: {c: 1}}});
+assert.eq(1, t.find({}, {"a.b.c": 1})[0].a.b.c, "G");
diff --git a/jstests/core/find6.js b/jstests/core/find6.js
index 0739c38aa9f..96d76192814 100644
--- a/jstests/core/find6.js
+++ b/jstests/core/find6.js
@@ -2,40 +2,38 @@
t = db.find6;
t.drop();
-t.save( { a : 1 } );
-t.save( { a : 1 , b : 1 } );
+t.save({a: 1});
+t.save({a: 1, b: 1});
-assert.eq( 2 , t.find().count() , "A" );
-assert.eq( 1 , t.find( { b : null } ).count() , "B" );
-assert.eq( 1 , t.find( "function() { return this.b == null; }" ).itcount() , "C" );
-assert.eq( 1 , t.find( "function() { return this.b == null; }" ).count() , "D" );
+assert.eq(2, t.find().count(), "A");
+assert.eq(1, t.find({b: null}).count(), "B");
+assert.eq(1, t.find("function() { return this.b == null; }").itcount(), "C");
+assert.eq(1, t.find("function() { return this.b == null; }").count(), "D");
/* test some stuff with dot array notation */
q = db.find6a;
q.drop();
-q.insert( { "a" : [ { "0" : 1 } ] } );
-q.insert( { "a" : [ { "0" : 2 } ] } );
-q.insert( { "a" : [ 1 ] } );
-q.insert( { "a" : [ 9, 1 ] } );
+q.insert({"a": [{"0": 1}]});
+q.insert({"a": [{"0": 2}]});
+q.insert({"a": [1]});
+q.insert({"a": [9, 1]});
-function f() {
-
- assert.eq( 2, q.find( { 'a.0' : 1 } ).count(), "da1");
- assert.eq( 2, q.find( { 'a.0' : 1 } ).count(), "da2");
-
- assert.eq( 1, q.find( { 'a.0' : { $gt : 8 } } ).count(), "da3");
- assert.eq( 0, q.find( { 'a.0' : { $lt : 0 } } ).count(), "da4");
+function f() {
+ assert.eq(2, q.find({'a.0': 1}).count(), "da1");
+ assert.eq(2, q.find({'a.0': 1}).count(), "da2");
+ assert.eq(1, q.find({'a.0': {$gt: 8}}).count(), "da3");
+ assert.eq(0, q.find({'a.0': {$lt: 0}}).count(), "da4");
}
-for( var pass = 0; pass <= 1 ; pass++ ) {
+for (var pass = 0; pass <= 1; pass++) {
f();
- q.ensureIndex({a:1});
+ q.ensureIndex({a: 1});
}
t = db.multidim;
t.drop();
-t.insert({"a" : [ [ ], 1, [ 3, 4 ] ] });
-assert.eq(1, t.find({"a.2":[3,4]}).count(), "md1");
-assert.eq(1, t.find({"a.2.1":4}).count(), "md2");
-assert.eq(0, t.find({"a.2.1":3}).count(), "md3");
+t.insert({"a": [[], 1, [3, 4]]});
+assert.eq(1, t.find({"a.2": [3, 4]}).count(), "md1");
+assert.eq(1, t.find({"a.2.1": 4}).count(), "md2");
+assert.eq(0, t.find({"a.2.1": 3}).count(), "md3");
diff --git a/jstests/core/find7.js b/jstests/core/find7.js
index ca4c7d449bf..ed18dcbb0ff 100644
--- a/jstests/core/find7.js
+++ b/jstests/core/find7.js
@@ -1,8 +1,10 @@
t = db.find7;
t.drop();
-x = { "_id" : { "d" : 3649, "w" : "signed" }, "u" : { "3649" : 5 } };
-t.insert(x );
-assert.eq( x , t.findOne() , "A1" );
-assert.eq( x , t.findOne( { _id : x._id } ) , "A2" );
-
+x = {
+ "_id": {"d": 3649, "w": "signed"},
+ "u": {"3649": 5}
+};
+t.insert(x);
+assert.eq(x, t.findOne(), "A1");
+assert.eq(x, t.findOne({_id: x._id}), "A2");
diff --git a/jstests/core/find8.js b/jstests/core/find8.js
index 3622eba8ae6..14930a056e7 100644
--- a/jstests/core/find8.js
+++ b/jstests/core/find8.js
@@ -3,21 +3,21 @@
t = db.jstests_find8;
t.drop();
-t.save( {a:[1,10]} );
-assert.eq( 1, t.count( { a: { $gt:2,$lt:5} } ) );
+t.save({a: [1, 10]});
+assert.eq(1, t.count({a: {$gt: 2, $lt: 5}}));
// Check that we can do a query with 'invalid' range.
-assert.eq( 1, t.count( { a: { $gt:5,$lt:2} } ) );
+assert.eq(1, t.count({a: {$gt: 5, $lt: 2}}));
-t.save( {a:[-1,12]} );
+t.save({a: [-1, 12]});
// Check that we can do a query with 'invalid' range and sort.
-assert.eq( 2, t.find( { a: { $gt:5,$lt:2} } ).sort( {a:1} ).itcount() );
-assert.eq( 2, t.find( { a: { $gt:5,$lt:2} } ).sort( {$natural:-1} ).itcount() );
+assert.eq(2, t.find({a: {$gt: 5, $lt: 2}}).sort({a: 1}).itcount());
+assert.eq(2, t.find({a: {$gt: 5, $lt: 2}}).sort({$natural: -1}).itcount());
// SERVER-2864
-if( 0 ) {
-t.find( { a: { $gt:5,$lt:2} } ).itcount();
-// Check that we can record a plan for an 'invalid' range.
-assert( t.find( { a: { $gt:5,$lt:2} } ).explain( true ).oldPlan );
+if (0) {
+ t.find({a: {$gt: 5, $lt: 2}}).itcount();
+ // Check that we can record a plan for an 'invalid' range.
+ assert(t.find({a: {$gt: 5, $lt: 2}}).explain(true).oldPlan);
}
diff --git a/jstests/core/find9.js b/jstests/core/find9.js
index 8c2b7ac282b..1c56e8c850d 100644
--- a/jstests/core/find9.js
+++ b/jstests/core/find9.js
@@ -3,26 +3,26 @@
t = db.jstests_find9;
t.drop();
-big = new Array( 500000 ).toString();
-for( i = 0; i < 60; ++i ) {
- t.save( { a:i, b:big } );
+big = new Array(500000).toString();
+for (i = 0; i < 60; ++i) {
+ t.save({a: i, b: big});
}
// Check size limit with a simple query.
-assert.eq( 60, t.find( {}, { a:1 } ).objsLeftInBatch() ); // Projection resizes the result set.
-assert.gt( 60, t.find().objsLeftInBatch() );
+assert.eq(60, t.find({}, {a: 1}).objsLeftInBatch()); // Projection resizes the result set.
+assert.gt(60, t.find().objsLeftInBatch());
// Check size limit on a query with an explicit batch size.
-assert.eq( 60, t.find( {}, { a:1 } ).batchSize( 80 ).objsLeftInBatch() );
-assert.gt( 60, t.find().batchSize( 80 ).objsLeftInBatch() );
+assert.eq(60, t.find({}, {a: 1}).batchSize(80).objsLeftInBatch());
+assert.gt(60, t.find().batchSize(80).objsLeftInBatch());
-for( i = 0; i < 60; ++i ) {
- t.save( { a:i, b:big } );
+for (i = 0; i < 60; ++i) {
+ t.save({a: i, b: big});
}
// Check size limit with get more.
-c = t.find().batchSize( 80 );
-while( c.hasNext() ) {
- assert.gt( 60, c.objsLeftInBatch() );
+c = t.find().batchSize(80);
+while (c.hasNext()) {
+ assert.gt(60, c.objsLeftInBatch());
c.next();
}
diff --git a/jstests/core/find_and_modify.js b/jstests/core/find_and_modify.js
index afaeda3d9a9..cf2f8804d9e 100644
--- a/jstests/core/find_and_modify.js
+++ b/jstests/core/find_and_modify.js
@@ -2,48 +2,60 @@ t = db.find_and_modify;
t.drop();
// fill db
-for(var i=1; i<=10; i++) {
- t.insert({priority:i, inprogress:false, value:0});
+for (var i = 1; i <= 10; i++) {
+ t.insert({priority: i, inprogress: false, value: 0});
}
// returns old
-out = t.findAndModify({update: {$set: {inprogress: true}, $inc: {value:1}}});
+out = t.findAndModify({update: {$set: {inprogress: true}, $inc: {value: 1}}});
assert.eq(out.value, 0);
assert.eq(out.inprogress, false);
t.update({_id: out._id}, {$set: {inprogress: false}});
// returns new
-out = t.findAndModify({update: {$set: {inprogress: true}, $inc: {value:1}}, 'new': true});
+out = t.findAndModify({update: {$set: {inprogress: true}, $inc: {value: 1}}, 'new': true});
assert.eq(out.value, 2);
assert.eq(out.inprogress, true);
t.update({_id: out._id}, {$set: {inprogress: false}});
// update highest priority
-out = t.findAndModify({query: {inprogress:false}, sort:{priority:-1}, update: {$set: {inprogress: true}}});
+out = t.findAndModify(
+ {query: {inprogress: false}, sort: {priority: -1}, update: {$set: {inprogress: true}}});
assert.eq(out.priority, 10);
// update next highest priority
-out = t.findAndModify({query: {inprogress:false}, sort:{priority:-1}, update: {$set: {inprogress: true}}});
+out = t.findAndModify(
+ {query: {inprogress: false}, sort: {priority: -1}, update: {$set: {inprogress: true}}});
assert.eq(out.priority, 9);
// remove lowest priority
-out = t.findAndModify({sort:{priority:1}, remove:true});
+out = t.findAndModify({sort: {priority: 1}, remove: true});
assert.eq(out.priority, 1);
// remove next lowest priority
-out = t.findAndModify({sort:{priority:1}, remove:1});
+out = t.findAndModify({sort: {priority: 1}, remove: 1});
assert.eq(out.priority, 2);
// return null (was {} before 1.5.4) if no matches (drivers may handle this differently)
-out = t.findAndModify({query:{no_such_field:1}, remove:1});
+out = t.findAndModify({query: {no_such_field: 1}, remove: 1});
assert.eq(out, null);
// make sure we fail with conflicting params to findAndModify SERVER-16601
-t.insert({x:1});
-assert.throws(function() { t.findAndModify({query:{x:1}, update:{y:2}, remove:true}); });
-assert.throws(function() { t.findAndModify({query:{x:1}, update:{y:2}, remove:true, sort: {x:1}}); });
-assert.throws(function() { t.findAndModify({query:{x:1}, update:{y:2}, remove:true, upsert:true}); });
-assert.throws(function() { t.findAndModify({query:{x:1}, update:{y:2}, new:true, remove:true}); });
-assert.throws(function() { t.findAndModify({query:{x:1}, upsert:true, remove:true}); });
+t.insert({x: 1});
+assert.throws(function() {
+ t.findAndModify({query: {x: 1}, update: {y: 2}, remove: true});
+});
+assert.throws(function() {
+ t.findAndModify({query: {x: 1}, update: {y: 2}, remove: true, sort: {x: 1}});
+});
+assert.throws(function() {
+ t.findAndModify({query: {x: 1}, update: {y: 2}, remove: true, upsert: true});
+});
+assert.throws(function() {
+ t.findAndModify({query: {x: 1}, update: {y: 2}, new: true, remove: true});
+});
+assert.throws(function() {
+ t.findAndModify({query: {x: 1}, upsert: true, remove: true});
+});
//
// SERVER-17387: Find and modify should throw in the case of invalid projection.
@@ -57,8 +69,7 @@ var cmdRes = db.runCommand({
query: {_id: "miss"},
update: {$inc: {y: 1}},
fields: {foo: {$pop: ["bar"]}},
- upsert: true,
- new: true
+ upsert: true, new: true
});
assert.commandFailed(cmdRes);
@@ -70,8 +81,7 @@ cmdRes = db.runCommand({
query: {_id: "found"},
update: {$inc: {y: 1}},
fields: {foo: {$pop: ["bar"]}},
- upsert: true,
- new: true
+ upsert: true, new: true
});
assert.commandFailed(cmdRes);
@@ -80,8 +90,7 @@ cmdRes = db.runCommand({
findAndModify: t.getName(),
query: {_id: "found"},
update: {$inc: {y: 1}},
- fields: {foo: {$pop: ["bar"]}},
- new: true
+ fields: {foo: {$pop: ["bar"]}}, new: true
});
assert.commandFailed(cmdRes);
@@ -109,12 +118,8 @@ assert.commandFailed(cmdRes);
//
t.drop();
-cmdRes = db.runCommand({
- findAndModify: t.getName(),
- query: {_id: "miss"},
- update: {$inc: {y: 1}},
- upsert: true
-});
+cmdRes = db.runCommand(
+ {findAndModify: t.getName(), query: {_id: "miss"}, update: {$inc: {y: 1}}, upsert: true});
assert.commandWorked(cmdRes);
assert("value" in cmdRes);
assert.eq(null, cmdRes.value);
@@ -123,8 +128,7 @@ cmdRes = db.runCommand({
findAndModify: t.getName(),
query: {_id: "missagain"},
update: {$inc: {y: 1}},
- upsert: true,
- new: true
+ upsert: true, new: true
});
assert.commandWorked(cmdRes);
assert("value" in cmdRes);
diff --git a/jstests/core/find_and_modify2.js b/jstests/core/find_and_modify2.js
index 2c8ab5b3bb6..e9bc8f5b23a 100644
--- a/jstests/core/find_and_modify2.js
+++ b/jstests/core/find_and_modify2.js
@@ -1,16 +1,16 @@
t = db.find_and_modify2;
t.drop();
-t.insert({_id:1, i:0, j:0});
+t.insert({_id: 1, i: 0, j: 0});
-out = t.findAndModify({update: {$inc: {i:1}}, 'new': true, fields: {i:1}});
-assert.eq(out, {_id:1, i:1});
+out = t.findAndModify({update: {$inc: {i: 1}}, 'new': true, fields: {i: 1}});
+assert.eq(out, {_id: 1, i: 1});
-out = t.findAndModify({update: {$inc: {i:1}}, fields: {i:0}});
-assert.eq(out, {_id:1, j:0});
+out = t.findAndModify({update: {$inc: {i: 1}}, fields: {i: 0}});
+assert.eq(out, {_id: 1, j: 0});
-out = t.findAndModify({update: {$inc: {i:1}}, fields: {_id:0, j:1}});
-assert.eq(out, {j:0});
+out = t.findAndModify({update: {$inc: {i: 1}}, fields: {_id: 0, j: 1}});
+assert.eq(out, {j: 0});
-out = t.findAndModify({update: {$inc: {i:1}}, fields: {_id:0, j:1}, 'new': true});
-assert.eq(out, {j:0});
+out = t.findAndModify({update: {$inc: {i: 1}}, fields: {_id: 0, j: 1}, 'new': true});
+assert.eq(out, {j: 0});
diff --git a/jstests/core/find_and_modify3.js b/jstests/core/find_and_modify3.js
index 5dd24726d30..a1a88aeecb5 100644
--- a/jstests/core/find_and_modify3.js
+++ b/jstests/core/find_and_modify3.js
@@ -1,21 +1,33 @@
t = db.find_and_modify3;
t.drop();
-t.insert({_id:0, other:0, comments:[{i:0, j:0}, {i:1, j:1}]});
-t.insert({_id:1, other:1, comments:[{i:0, j:0}, {i:1, j:1}]}); // this is the only one that gets modded
-t.insert({_id:2, other:2, comments:[{i:0, j:0}, {i:1, j:1}]});
+t.insert({_id: 0, other: 0, comments: [{i: 0, j: 0}, {i: 1, j: 1}]});
+t.insert({
+ _id: 1,
+ other: 1,
+ comments: [{i: 0, j: 0}, {i: 1, j: 1}]
+}); // this is the only one that gets modded
+t.insert({_id: 2, other: 2, comments: [{i: 0, j: 0}, {i: 1, j: 1}]});
-orig0 = t.findOne({_id:0});
-orig2 = t.findOne({_id:2});
+orig0 = t.findOne({_id: 0});
+orig2 = t.findOne({_id: 2});
-out = t.findAndModify({query: {_id:1, 'comments.i':0}, update: {$set: {'comments.$.j':2}}, 'new': true, sort:{other:1}});
-assert.eq(out.comments[0], {i:0, j:2});
-assert.eq(out.comments[1], {i:1, j:1});
-assert.eq(t.findOne({_id:0}), orig0);
-assert.eq(t.findOne({_id:2}), orig2);
+out = t.findAndModify({
+ query: {_id: 1, 'comments.i': 0},
+ update: {$set: {'comments.$.j': 2}}, 'new': true,
+ sort: {other: 1}
+});
+assert.eq(out.comments[0], {i: 0, j: 2});
+assert.eq(out.comments[1], {i: 1, j: 1});
+assert.eq(t.findOne({_id: 0}), orig0);
+assert.eq(t.findOne({_id: 2}), orig2);
-out = t.findAndModify({query: {other:1, 'comments.i':1}, update: {$set: {'comments.$.j':3}}, 'new': true, sort:{other:1}});
-assert.eq(out.comments[0], {i:0, j:2});
-assert.eq(out.comments[1], {i:1, j:3});
-assert.eq(t.findOne({_id:0}), orig0);
-assert.eq(t.findOne({_id:2}), orig2);
+out = t.findAndModify({
+ query: {other: 1, 'comments.i': 1},
+ update: {$set: {'comments.$.j': 3}}, 'new': true,
+ sort: {other: 1}
+});
+assert.eq(out.comments[0], {i: 0, j: 2});
+assert.eq(out.comments[1], {i: 1, j: 3});
+assert.eq(t.findOne({_id: 0}), orig0);
+assert.eq(t.findOne({_id: 2}), orig2);
diff --git a/jstests/core/find_and_modify4.js b/jstests/core/find_and_modify4.js
index 04abc2f1ce7..b6be565b70a 100644
--- a/jstests/core/find_and_modify4.js
+++ b/jstests/core/find_and_modify4.js
@@ -2,32 +2,31 @@ t = db.find_and_modify4;
t.drop();
// this is the best way to build auto-increment
-function getNextVal(counterName){
+function getNextVal(counterName) {
var ret = t.findAndModify({
- query: {_id: counterName},
- update: {$inc: {val: 1}},
- upsert: true,
- 'new': true,
- });
+ query: {_id: counterName},
+ update: {$inc: {val: 1}},
+ upsert: true, 'new': true,
+ });
return ret;
}
-assert.eq(getNextVal("a"), {_id:"a", val:1});
-assert.eq(getNextVal("a"), {_id:"a", val:2});
-assert.eq(getNextVal("a"), {_id:"a", val:3});
-assert.eq(getNextVal("z"), {_id:"z", val:1});
-assert.eq(getNextVal("z"), {_id:"z", val:2});
-assert.eq(getNextVal("a"), {_id:"a", val:4});
+assert.eq(getNextVal("a"), {_id: "a", val: 1});
+assert.eq(getNextVal("a"), {_id: "a", val: 2});
+assert.eq(getNextVal("a"), {_id: "a", val: 3});
+assert.eq(getNextVal("z"), {_id: "z", val: 1});
+assert.eq(getNextVal("z"), {_id: "z", val: 2});
+assert.eq(getNextVal("a"), {_id: "a", val: 4});
t.drop();
-function helper(upsert){
+function helper(upsert) {
return t.findAndModify({
- query: {_id: "asdf"},
- update: {$inc: {val: 1}},
- upsert: upsert,
- 'new': false // the default
- });
+ query: {_id: "asdf"},
+ update: {$inc: {val: 1}},
+ upsert: upsert,
+ 'new': false // the default
+ });
}
// upsert:false so nothing there before and after
@@ -37,19 +36,12 @@ assert.eq(t.count(), 0);
// upsert:true so nothing there before; something there after
assert.eq(helper(true), null);
assert.eq(t.count(), 1);
-assert.eq(helper(true), {_id: 'asdf', val: 1});
-assert.eq(helper(false), {_id: 'asdf', val: 2}); // upsert only matters when obj doesn't exist
-assert.eq(helper(true), {_id: 'asdf', val: 3});
-
+assert.eq(helper(true), {_id: 'asdf', val: 1});
+assert.eq(helper(false), {_id: 'asdf', val: 2}); // upsert only matters when obj doesn't exist
+assert.eq(helper(true), {_id: 'asdf', val: 3});
// _id created if not specified
-var out = t.findAndModify({
- query: {a:1},
- update: {$set: {b: 2}},
- upsert: true,
- 'new': true
- });
+var out = t.findAndModify({query: {a: 1}, update: {$set: {b: 2}}, upsert: true, 'new': true});
assert.neq(out._id, undefined);
assert.eq(out.a, 1);
assert.eq(out.b, 2);
-
diff --git a/jstests/core/find_and_modify_concurrent_update.js b/jstests/core/find_and_modify_concurrent_update.js
index 2dd1e182008..3986ac62ea9 100644
--- a/jstests/core/find_and_modify_concurrent_update.js
+++ b/jstests/core/find_and_modify_concurrent_update.js
@@ -13,15 +13,12 @@
assert.writeOK(t.insert({_id: 1, a: 1, b: 1}));
var join = startParallelShell(
- "db.find_and_modify_concurrent.update({a: 1, b: 1}, {$inc: {a: 1}});"
- );
+ "db.find_and_modify_concurrent.update({a: 1, b: 1}, {$inc: {a: 1}});");
// Due to the sleep, we expect this find and modify to yield before updating the
// document.
- var res = t.findAndModify({
- query: {a: 1, b: 1, $where: "sleep(100); return true;"},
- update: {$inc: {a: 1}}
- });
+ var res = t.findAndModify(
+ {query: {a: 1, b: 1, $where: "sleep(100); return true;"}, update: {$inc: {a: 1}}});
join();
var docs = t.find().toArray();
diff --git a/jstests/core/find_and_modify_empty_coll.js b/jstests/core/find_and_modify_empty_coll.js
index 9c231fb2d1f..2d3a2ee8ffd 100644
--- a/jstests/core/find_and_modify_empty_coll.js
+++ b/jstests/core/find_and_modify_empty_coll.js
@@ -8,8 +8,8 @@
assert.eq(null, coll.findAndModify({remove: true}));
assert.eq(null, coll.findAndModify({update: {$inc: {i: 1}}}));
- var upserted = coll.findAndModify(
- {query: {_id: 0}, update: {$inc: {i: 1}}, upsert: true, new: true});
+ var upserted =
+ coll.findAndModify({query: {_id: 0}, update: {$inc: {i: 1}}, upsert: true, new: true});
assert.eq(upserted, {_id: 0, i: 1});
coll.drop();
diff --git a/jstests/core/find_and_modify_empty_update.js b/jstests/core/find_and_modify_empty_update.js
index 3d72a4ff9f3..ccfb1a8201f 100644
--- a/jstests/core/find_and_modify_empty_update.js
+++ b/jstests/core/find_and_modify_empty_update.js
@@ -46,7 +46,7 @@ assert.eq(coll.findOne({_id: 0}), {_id: 0});
// Test update:{} with a sort, upsert:true, and new:true.
coll.remove({});
-ret = coll.findAndModify({query: {_id: 0, a: 1}, update: {}, upsert: true, sort: {a: 1},
- new: true});
+ret =
+ coll.findAndModify({query: {_id: 0, a: 1}, update: {}, upsert: true, sort: {a: 1}, new: true});
assert.eq(ret, {_id: 0});
assert.eq(coll.findOne({_id: 0}), {_id: 0});
diff --git a/jstests/core/find_and_modify_server6226.js b/jstests/core/find_and_modify_server6226.js
index 21d1bdad6f8..e847f6e5697 100644
--- a/jstests/core/find_and_modify_server6226.js
+++ b/jstests/core/find_and_modify_server6226.js
@@ -2,6 +2,5 @@
t = db.find_and_modify_server6226;
t.drop();
-ret = t.findAndModify( { query : { _id : 1 } , update : { "$inc" : { i : 1 } } , upsert : true } );
-assert.isnull( ret );
-
+ret = t.findAndModify({query: {_id: 1}, update: {"$inc": {i: 1}}, upsert: true});
+assert.isnull(ret);
diff --git a/jstests/core/find_and_modify_server6254.js b/jstests/core/find_and_modify_server6254.js
index 9850a4f3fb9..c2bfa3628a1 100644
--- a/jstests/core/find_and_modify_server6254.js
+++ b/jstests/core/find_and_modify_server6254.js
@@ -2,9 +2,8 @@
t = db.find_and_modify_server6254;
t.drop();
-t.insert( { x : 1 } );
-ret = t.findAndModify( { query : { x : 1 } , update : { $set : { x : 2 } } , new : true } );
-assert.eq( 2 , ret.x , tojson( ret ) );
-
-assert.eq( 1 , t.count() );
+t.insert({x: 1});
+ret = t.findAndModify({query: {x: 1}, update: {$set: {x: 2}}, new: true});
+assert.eq(2, ret.x, tojson(ret));
+assert.eq(1, t.count());
diff --git a/jstests/core/find_and_modify_server6582.js b/jstests/core/find_and_modify_server6582.js
index a48cc962fea..79a0b31d4ed 100644
--- a/jstests/core/find_and_modify_server6582.js
+++ b/jstests/core/find_and_modify_server6582.js
@@ -2,17 +2,14 @@
t = db.find_and_modify_server6582;
t.drop();
-x = t.runCommand( "findAndModify" , {query:{f:1}, update:{$set:{f:2}}, upsert:true, new:true});
+x = t.runCommand("findAndModify", {query: {f: 1}, update: {$set: {f: 2}}, upsert: true, new: true});
le = x.lastErrorObject;
-assert.eq( le.updatedExisting, false );
-assert.eq( le.n, 1 );
-assert.eq( le.upserted, x.value._id );
+assert.eq(le.updatedExisting, false);
+assert.eq(le.n, 1);
+assert.eq(le.upserted, x.value._id);
t.drop();
-t.insert( { f : 1 } );
-x = t.runCommand( "findAndModify" , {query:{f:1}, remove : true } );
+t.insert({f: 1});
+x = t.runCommand("findAndModify", {query: {f: 1}, remove: true});
le = x.lastErrorObject;
-assert.eq( le.n, 1 );
-
-
-
+assert.eq(le.n, 1);
diff --git a/jstests/core/find_and_modify_server6588.js b/jstests/core/find_and_modify_server6588.js
index 68d7f0739dc..9c546daba72 100644
--- a/jstests/core/find_and_modify_server6588.js
+++ b/jstests/core/find_and_modify_server6588.js
@@ -1,22 +1,35 @@
t = db.find_and_modify_sever6588;
-initial = { _id : 1 , a : [ { b : 1 } ] , z : 1 };
-up = { "$set" : { "a.$.b" : 2 } };
-q = { _id : 1 , "a.b" : 1 };
-correct = { _id : 1 , a : [ { b : 2 } ] , z : 1 };
+initial = {
+ _id: 1,
+ a: [{b: 1}],
+ z: 1
+};
+up = {
+ "$set": {"a.$.b": 2}
+};
+q = {
+ _id: 1,
+ "a.b": 1
+};
+correct = {
+ _id: 1,
+ a: [{b: 2}],
+ z: 1
+};
t.drop();
-t.insert( initial );
-t.update( q , up );
-assert.eq( correct , t.findOne() );
+t.insert(initial);
+t.update(q, up);
+assert.eq(correct, t.findOne());
t.drop();
-t.insert( initial );
-x = t.findAndModify( { query : q , update : up } );
-assert.eq( correct , t.findOne() );
+t.insert(initial);
+x = t.findAndModify({query: q, update: up});
+assert.eq(correct, t.findOne());
t.drop();
-t.insert( initial );
-x = t.findAndModify( { query : { z : 1 , "a.b" : 1 } , update : up } );
-assert.eq( correct , t.findOne() );
+t.insert(initial);
+x = t.findAndModify({query: {z: 1, "a.b": 1}, update: up});
+assert.eq(correct, t.findOne());
diff --git a/jstests/core/find_and_modify_server6659.js b/jstests/core/find_and_modify_server6659.js
index f5f89b051e6..6b3f958c0e0 100644
--- a/jstests/core/find_and_modify_server6659.js
+++ b/jstests/core/find_and_modify_server6659.js
@@ -2,6 +2,6 @@
t = db.find_and_modify_server6659;
t.drop();
-x = t.findAndModify({query:{f:1}, update:{$set:{f:2}}, upsert:true, new:true});
-assert.eq( 2, x.f );
-assert.eq( 2, t.findOne().f );
+x = t.findAndModify({query: {f: 1}, update: {$set: {f: 2}}, upsert: true, new: true});
+assert.eq(2, x.f);
+assert.eq(2, t.findOne().f);
diff --git a/jstests/core/find_and_modify_server6865.js b/jstests/core/find_and_modify_server6865.js
index 8e1c21e19a6..b38c0b1bee4 100644
--- a/jstests/core/find_and_modify_server6865.js
+++ b/jstests/core/find_and_modify_server6865.js
@@ -59,295 +59,247 @@
//
// Simple query that uses an inclusion projection.
- testFAMWorked(
- {_id: 42, a: [1, 2], b: 3},
- {query: {_id: 42}, fields: {_id: 0, b: 1}, remove: true},
- {b: 3}
- );
+ testFAMWorked({_id: 42, a: [1, 2], b: 3},
+ {query: {_id: 42}, fields: {_id: 0, b: 1}, remove: true},
+ {b: 3});
// Simple query that uses an exclusion projection.
- testFAMWorked(
- {_id: 42, a: [1, 2], b: 3, c: 4},
- {query: {_id: 42}, fields: {a: 0, b: 0}, remove: true},
- {_id: 42, c: 4}
- );
+ testFAMWorked({_id: 42, a: [1, 2], b: 3, c: 4},
+ {query: {_id: 42}, fields: {a: 0, b: 0}, remove: true},
+ {_id: 42, c: 4});
// Simple query that uses $elemMatch in the projection.
testFAMWorked(
- {_id: 42, b: [{name: 'first', value: 1},
- {name: 'second', value: 2},
- {name: 'third', value: 3}]
+ {
+ _id: 42,
+ b: [{name: 'first', value: 1}, {name: 'second', value: 2}, {name: 'third', value: 3}]
},
{query: {_id: 42}, fields: {b: {$elemMatch: {value: 2}}}, remove: true},
- {_id: 42, b: [{name: 'second', value: 2}]}
- );
+ {_id: 42, b: [{name: 'second', value: 2}]});
// Query on an array of values while using a positional projection.
- testFAMWorked(
- {_id: 42, a: [1, 2]},
- {query: {a: 2}, fields: {'a.$': 1}, remove: true},
- {_id: 42, a: [2]}
- );
+ testFAMWorked({_id: 42, a: [1, 2]},
+ {query: {a: 2}, fields: {'a.$': 1}, remove: true},
+ {_id: 42, a: [2]});
// Query on an array of objects while using a positional projection.
testFAMWorked(
- {_id: 42, b: [{name: 'first', value: 1},
- {name: 'second', value: 2},
- {name: 'third', value: 3}]
+ {
+ _id: 42,
+ b: [{name: 'first', value: 1}, {name: 'second', value: 2}, {name: 'third', value: 3}]
},
{query: {_id: 42, 'b.name': 'third'}, fields: {'b.$': 1}, remove: true},
- {_id: 42, b: [{name: 'third', value: 3}]}
- );
+ {_id: 42, b: [{name: 'third', value: 3}]});
// Query on an array of objects while using a position projection.
// Verifies that the projection {'b.$.value': 1} is treated the
// same as {'b.$': 1}.
testFAMWorked(
- {_id: 42, b: [{name: 'first', value: 1},
- {name: 'second', value: 2},
- {name: 'third', value: 3}]
+ {
+ _id: 42,
+ b: [{name: 'first', value: 1}, {name: 'second', value: 2}, {name: 'third', value: 3}]
},
{query: {_id: 42, 'b.name': 'third'}, fields: {'b.$.value': 1}, remove: true},
- {_id: 42, b: [{name: 'third', value: 3}]}
- );
+ {_id: 42, b: [{name: 'third', value: 3}]});
// Query on an array of objects using $elemMatch while using an inclusion projection.
testFAMWorked(
- {_id: 42, a: 5, b: [{name: 'john', value: 1},
- {name: 'jess', value: 2},
- {name: 'jeff', value: 3}]
+ {
+ _id: 42,
+ a: 5,
+ b: [{name: 'john', value: 1}, {name: 'jess', value: 2}, {name: 'jeff', value: 3}]
},
{
- query: {b: {$elemMatch: {name: 'john', value: {$lt: 2}}}},
- fields: {_id: 0, a: 5},
- remove: true
+ query: {b: {$elemMatch: {name: 'john', value: {$lt: 2}}}},
+ fields: {_id: 0, a: 5},
+ remove: true
},
- {a: 5}
- );
+ {a: 5});
// Query on an array of objects using $elemMatch while using the positional
// operator in the projection.
testFAMWorked(
- {_id: 42, b: [{name: 'john', value: 1},
- {name: 'jess', value: 2},
- {name: 'jeff', value: 3}]
+ {
+ _id: 42,
+ b: [{name: 'john', value: 1}, {name: 'jess', value: 2}, {name: 'jeff', value: 3}]
},
{
- query: {b: {$elemMatch: {name: 'john', value: {$lt: 2}}}},
- fields: {_id: 0, 'b.$': 1},
- remove: true
+ query: {b: {$elemMatch: {name: 'john', value: {$lt: 2}}}},
+ fields: {_id: 0, 'b.$': 1},
+ remove: true
},
- {b: [{name: 'john', value: 1}]}
- );
+ {b: [{name: 'john', value: 1}]});
//
// Update operations with new=false
//
// Simple query that uses an inclusion projection.
- testFAMWorked(
- {_id: 42, a: [1, 2], b: 3},
- {query: {_id: 42}, fields: {_id: 0, b: 1}, update: {$inc: {b: 1}}, new: false},
- {b: 3}
- );
+ testFAMWorked({_id: 42, a: [1, 2], b: 3},
+ {query: {_id: 42}, fields: {_id: 0, b: 1}, update: {$inc: {b: 1}}, new: false},
+ {b: 3});
// Simple query that uses an exclusion projection.
- testFAMWorked(
- {_id: 42, a: [1, 2], b: 3, c: 4},
- {query: {_id: 42}, fields: {a: 0, b: 0}, update: {$set: {c: 5}}, new: false},
- {_id: 42, c: 4}
- );
+ testFAMWorked({_id: 42, a: [1, 2], b: 3, c: 4},
+ {query: {_id: 42}, fields: {a: 0, b: 0}, update: {$set: {c: 5}}, new: false},
+ {_id: 42, c: 4});
// Simple query that uses $elemMatch in the projection.
testFAMWorked(
- {_id: 42, b: [{name: 'first', value: 1},
- {name: 'second', value: 2},
- {name: 'third', value: 3}]
+ {
+ _id: 42,
+ b: [{name: 'first', value: 1}, {name: 'second', value: 2}, {name: 'third', value: 3}]
},
{
- query: {_id: 42},
- fields: {b: {$elemMatch: {value: 2}}},
- update: {$set: {name: '2nd'}},
- new: false
+ query: {_id: 42},
+ fields: {b: {$elemMatch: {value: 2}}},
+ update: {$set: {name: '2nd'}}, new: false
},
- {_id: 42, b: [{name: 'second', value: 2}]}
- );
+ {_id: 42, b: [{name: 'second', value: 2}]});
// Query on an array of values while using a positional projection.
testFAMWorked(
{_id: 42, a: [1, 2]},
{query: {a: 2}, fields: {'a.$': 1}, update: {$set: {'b.kind': 'xyz'}}, new: false},
- {_id: 42, a: [2]}
- );
+ {_id: 42, a: [2]});
// Query on an array of objects while using a positional projection.
testFAMWorked(
- {_id: 42, b: [{name: 'first', value: 1},
- {name: 'second', value: 2},
- {name: 'third', value: 3}]
+ {
+ _id: 42,
+ b: [{name: 'first', value: 1}, {name: 'second', value: 2}, {name: 'third', value: 3}]
},
{
- query: {_id: 42, 'b.name': 'third'},
- fields: {'b.$': 1},
- update: {$set: {'b.$.kind': 'xyz'}},
- new: false
+ query: {_id: 42, 'b.name': 'third'},
+ fields: {'b.$': 1},
+ update: {$set: {'b.$.kind': 'xyz'}}, new: false
},
- {_id: 42, b: [{name: 'third', value: 3}]}
- );
+ {_id: 42, b: [{name: 'third', value: 3}]});
// Query on an array of objects while using $elemMatch in the projection,
// where the matched array element is modified.
testFAMWorked(
{_id: 1, a: [{x: 1, y: 1}, {x: 1, y: 2}]},
- {
- query: {_id: 1},
- fields: {a: {$elemMatch: {x: 1}}},
- update: {$pop: {a: -1}},
- new: false
- },
- {_id: 1, a: [{x: 1, y: 1}]}
- );
+ {query: {_id: 1}, fields: {a: {$elemMatch: {x: 1}}}, update: {$pop: {a: -1}}, new: false},
+ {_id: 1, a: [{x: 1, y: 1}]});
// Query on an array of objects using $elemMatch while using an inclusion projection.
testFAMWorked(
- {_id: 42, a: 5, b: [{name: 'john', value: 1},
- {name: 'jess', value: 2},
- {name: 'jeff', value: 3}]
+ {
+ _id: 42,
+ a: 5,
+ b: [{name: 'john', value: 1}, {name: 'jess', value: 2}, {name: 'jeff', value: 3}]
},
{
- query: {b: {$elemMatch: {name: 'john', value: {$lt: 2}}}},
- fields: {_id: 0, a: 5},
- update: {$inc: {a: 6}},
- new: false
+ query: {b: {$elemMatch: {name: 'john', value: {$lt: 2}}}},
+ fields: {_id: 0, a: 5},
+ update: {$inc: {a: 6}}, new: false
},
- {a: 5}
- );
+ {a: 5});
// Query on an array of objects using $elemMatch while using the positional
// operator in the projection.
testFAMWorked(
- {_id: 42, b: [{name: 'john', value: 1},
- {name: 'jess', value: 2},
- {name: 'jeff', value: 3}]
+ {
+ _id: 42,
+ b: [{name: 'john', value: 1}, {name: 'jess', value: 2}, {name: 'jeff', value: 3}]
},
{
- query: {b: {$elemMatch: {name: 'john', value: {$lt: 2}}}},
- fields: {_id: 0, 'b.$': 1},
- update: {$set: {name: 'james'}},
- new: false
+ query: {b: {$elemMatch: {name: 'john', value: {$lt: 2}}}},
+ fields: {_id: 0, 'b.$': 1},
+ update: {$set: {name: 'james'}}, new: false
},
- {b: [{name: 'john', value: 1}]}
- );
+ {b: [{name: 'john', value: 1}]});
//
// Update operations with new=true
//
// Simple query that uses an inclusion projection.
- testFAMWorked(
- {_id: 42, a: [1, 2], b: 3},
- {query: {_id: 42}, fields: {_id: 0, b: 1}, update: {$inc: {b: 1}}, new: true},
- {b: 4}
- );
+ testFAMWorked({_id: 42, a: [1, 2], b: 3},
+ {query: {_id: 42}, fields: {_id: 0, b: 1}, update: {$inc: {b: 1}}, new: true},
+ {b: 4});
// Simple query that uses an exclusion projection.
- testFAMWorked(
- {_id: 42, a: [1, 2], b: 3, c: 4},
- {query: {_id: 42}, fields: {a: 0, b: 0}, update: {$set: {c: 5}}, new: true},
- {_id: 42, c: 5}
- );
+ testFAMWorked({_id: 42, a: [1, 2], b: 3, c: 4},
+ {query: {_id: 42}, fields: {a: 0, b: 0}, update: {$set: {c: 5}}, new: true},
+ {_id: 42, c: 5});
// Simple query that uses $elemMatch in the projection.
testFAMWorked(
- {_id: 42, b: [{name: 'first', value: 1},
- {name: 'second', value: 2},
- {name: 'third', value: 3}]
+ {
+ _id: 42,
+ b: [{name: 'first', value: 1}, {name: 'second', value: 2}, {name: 'third', value: 3}]
},
{
- query: {_id: 42},
- fields: {b: {$elemMatch: {value: 2}}},
- update: {$set: {'b.1.name': '2nd'}},
- new: true
+ query: {_id: 42},
+ fields: {b: {$elemMatch: {value: 2}}},
+ update: {$set: {'b.1.name': '2nd'}}, new: true
},
- {_id: 42, b: [{name: '2nd', value: 2}]}
- );
+ {_id: 42, b: [{name: '2nd', value: 2}]});
// Query on an array of values while using a positional projection.
testFAMFailed(
{_id: 42, a: [1, 2]},
- {query: {a: 2}, fields: {'a.$': 1}, update: {$set: {'b.kind': 'xyz'}}, new: true}
- );
+ {query: {a: 2}, fields: {'a.$': 1}, update: {$set: {'b.kind': 'xyz'}}, new: true});
// Query on an array of objects while using a positional projection.
testFAMFailed(
- {_id: 42, b: [{name: 'first', value: 1},
- {name: 'second', value: 2},
- {name: 'third', value: 3}]
+ {
+ _id: 42,
+ b: [{name: 'first', value: 1}, {name: 'second', value: 2}, {name: 'third', value: 3}]
},
{
- query: {_id: 42, 'b.name': 'third'},
- fields: {'b.$': 1},
- update: {$set: {'b.$.kind': 'xyz'}},
- new: true
- }
- );
+ query: {_id: 42, 'b.name': 'third'},
+ fields: {'b.$': 1},
+ update: {$set: {'b.$.kind': 'xyz'}}, new: true
+ });
// Query on an array of objects while using $elemMatch in the projection.
testFAMWorked(
- {_id: 42, b: [{name: 'first', value: 1},
- {name: 'second', value: 2},
- {name: 'third', value: 3}]
+ {
+ _id: 42,
+ b: [{name: 'first', value: 1}, {name: 'second', value: 2}, {name: 'third', value: 3}]
},
{
- query: {_id: 42},
- fields: {b: {$elemMatch: {value: 2}}, c: 1},
- update: {$set: {c: 'xyz'}},
- new: true
+ query: {_id: 42},
+ fields: {b: {$elemMatch: {value: 2}}, c: 1},
+ update: {$set: {c: 'xyz'}}, new: true
},
- {_id: 42, b: [{name: 'second', value: 2}], c: 'xyz'}
- );
+ {_id: 42, b: [{name: 'second', value: 2}], c: 'xyz'});
// Query on an array of objects while using $elemMatch in the projection,
// where the matched array element is modified.
testFAMWorked(
{_id: 1, a: [{x: 1, y: 1}, {x: 1, y: 2}]},
- {
- query: {_id: 1},
- fields: {a: {$elemMatch: {x: 1}}},
- update: {$pop: {a: -1}},
- new: true
- },
- {_id: 1, a: [{x: 1, y: 2}]}
- );
+ {query: {_id: 1}, fields: {a: {$elemMatch: {x: 1}}}, update: {$pop: {a: -1}}, new: true},
+ {_id: 1, a: [{x: 1, y: 2}]});
// Query on an array of objects using $elemMatch while using an inclusion projection.
testFAMWorked(
- {_id: 42, a: 5, b: [{name: 'john', value: 1},
- {name: 'jess', value: 2},
- {name: 'jeff', value: 3}]
+ {
+ _id: 42,
+ a: 5,
+ b: [{name: 'john', value: 1}, {name: 'jess', value: 2}, {name: 'jeff', value: 3}]
},
{
- query: {b: {$elemMatch: {name: 'john', value: {$lt: 2}}}},
- fields: {_id: 0, a: 5},
- update: {$inc: {a: 6}},
- new: true
+ query: {b: {$elemMatch: {name: 'john', value: {$lt: 2}}}},
+ fields: {_id: 0, a: 5},
+ update: {$inc: {a: 6}}, new: true
},
- {a: 11}
- );
+ {a: 11});
// Query on an array of objects using $elemMatch while using the positional
// operator in the projection.
testFAMFailed(
- {_id: 42, b: [{name: 'john', value: 1},
- {name: 'jess', value: 2},
- {name: 'jeff', value: 3}]
+ {
+ _id: 42,
+ b: [{name: 'john', value: 1}, {name: 'jess', value: 2}, {name: 'jeff', value: 3}]
},
{
- query: {b: {$elemMatch: {name: 'john', value: {$lt: 2}}}},
- fields: {_id: 0, 'b.$': 1},
- update: {$set: {name: 'james'}},
- new: true
- }
- );
+ query: {b: {$elemMatch: {name: 'john', value: {$lt: 2}}}},
+ fields: {_id: 0, 'b.$': 1},
+ update: {$set: {name: 'james'}}, new: true
+ });
})();
diff --git a/jstests/core/find_and_modify_server6909.js b/jstests/core/find_and_modify_server6909.js
index 2f688459698..8e807e0c893 100644
--- a/jstests/core/find_and_modify_server6909.js
+++ b/jstests/core/find_and_modify_server6909.js
@@ -1,21 +1,22 @@
c = db.find_and_modify_server6906;
-
c.drop();
-c.insert( { _id : 5 , a:{ b:1 } } );
-ret = c.findAndModify( { query:{ 'a.b':1 },
- update:{ $set:{ 'a.b':2 } }, // Ensure the query on 'a.b' no longer matches.
- new:true } );
-assert.eq( 5, ret._id );
-assert.eq( 2, ret.a.b );
-
+c.insert({_id: 5, a: {b: 1}});
+ret = c.findAndModify({
+ query: {'a.b': 1},
+ update: {$set: {'a.b': 2}}, // Ensure the query on 'a.b' no longer matches.
+ new: true
+});
+assert.eq(5, ret._id);
+assert.eq(2, ret.a.b);
c.drop();
-c.insert( { _id : null , a:{ b:1 } } );
-ret = c.findAndModify( { query:{ 'a.b':1 },
- update:{ $set:{ 'a.b':2 } }, // Ensure the query on 'a.b' no longer matches.
- new:true } );
-assert.eq( 2, ret.a.b );
-
+c.insert({_id: null, a: {b: 1}});
+ret = c.findAndModify({
+ query: {'a.b': 1},
+ update: {$set: {'a.b': 2}}, // Ensure the query on 'a.b' no longer matches.
+ new: true
+});
+assert.eq(2, ret.a.b);
diff --git a/jstests/core/find_and_modify_server6993.js b/jstests/core/find_and_modify_server6993.js
index b8a31915372..bf8ed52c9c7 100644
--- a/jstests/core/find_and_modify_server6993.js
+++ b/jstests/core/find_and_modify_server6993.js
@@ -1,9 +1,9 @@
c = db.find_and_modify_server6993;
c.drop();
-
-c.insert( { a:[ 1, 2 ] } );
-
-c.findAndModify( { query:{ a:1 }, update:{ $set:{ 'a.$':5 } } } );
-
-assert.eq( 5, c.findOne().a[ 0 ] );
+
+c.insert({a: [1, 2]});
+
+c.findAndModify({query: {a: 1}, update: {$set: {'a.$': 5}}});
+
+assert.eq(5, c.findOne().a[0]);
diff --git a/jstests/core/find_and_modify_server7660.js b/jstests/core/find_and_modify_server7660.js
index 4828dff4e49..d344d773dca 100644
--- a/jstests/core/find_and_modify_server7660.js
+++ b/jstests/core/find_and_modify_server7660.js
@@ -2,17 +2,10 @@
t = db.find_and_modify_server7660;
t.drop();
-a = t.findAndModify({
- query : { foo : 'bar' },
- update : { $set : { bob : 'john' } },
- sort: { foo : 1},
- upsert: true,
- new : true
-});
+a = t.findAndModify(
+ {query: {foo: 'bar'}, update: {$set: {bob: 'john'}}, sort: {foo: 1}, upsert: true, new: true});
b = t.findOne();
-assert.eq( a, b );
-assert.eq( "bar", a.foo );
-assert.eq( "john", a.bob );
-
-
+assert.eq(a, b);
+assert.eq("bar", a.foo);
+assert.eq("john", a.bob);
diff --git a/jstests/core/find_and_modify_where.js b/jstests/core/find_and_modify_where.js
index fe13a6894fd..e3d5604559a 100644
--- a/jstests/core/find_and_modify_where.js
+++ b/jstests/core/find_and_modify_where.js
@@ -2,9 +2,8 @@
t = db.find_and_modify_where;
t.drop();
-t.insert( { _id : 1 , x : 1 } );
+t.insert({_id: 1, x: 1});
-res = t.findAndModify( { query : { $where : "return this.x == 1" } , update : { $set : { y : 1 } } } );
-
-assert.eq( 1 , t.findOne().y );
+res = t.findAndModify({query: {$where: "return this.x == 1"}, update: {$set: {y: 1}}});
+assert.eq(1, t.findOne().y);
diff --git a/jstests/core/find_dedup.js b/jstests/core/find_dedup.js
index 401384ceb7a..a9160df7562 100644
--- a/jstests/core/find_dedup.js
+++ b/jstests/core/find_dedup.js
@@ -20,16 +20,21 @@ t.save({_id: 2, a: 1, b: 1});
t.save({_id: 3, a: 2, b: 2});
t.save({_id: 4, a: 3, b: 3});
t.save({_id: 5, a: 3, b: 3});
-checkDedup({$or: [{a:{$gte:0,$lte:2},b:{$gte:0,$lte:2}},
- {a:{$gte:1,$lte:3},b:{$gte:1,$lte:3}},
- {a:{$gte:1,$lte:4},b:{$gte:1,$lte:4}}]},
- [1, 2, 3, 4, 5]);
+checkDedup(
+ {
+ $or: [
+ {a: {$gte: 0, $lte: 2}, b: {$gte: 0, $lte: 2}},
+ {a: {$gte: 1, $lte: 3}, b: {$gte: 1, $lte: 3}},
+ {a: {$gte: 1, $lte: 4}, b: {$gte: 1, $lte: 4}}
+ ]
+ },
+ [1, 2, 3, 4, 5]);
// Deduping multikey
t.drop();
t.save({_id: 1, a: [1, 2, 3], b: [4, 5, 6]});
t.save({_id: 2, a: [1, 2, 3], b: [4, 5, 6]});
-assert.eq( 2, t.count() );
+assert.eq(2, t.count());
checkDedup({$or: [{a: {$in: [1, 2]}}, {b: {$in: [4, 5]}}]}, [1, 2]);
-t.ensureIndex( { a : 1 } );
+t.ensureIndex({a: 1});
checkDedup({$or: [{a: {$in: [1, 2]}}, {b: {$in: [4, 5]}}]}, [1, 2]);
diff --git a/jstests/core/find_getmore_bsonsize.js b/jstests/core/find_getmore_bsonsize.js
index fdad2b1f1d6..904a9c33ab0 100644
--- a/jstests/core/find_getmore_bsonsize.js
+++ b/jstests/core/find_getmore_bsonsize.js
@@ -74,7 +74,10 @@
bigStr += bigStr;
}
bigStr = bigStr.substring(0, (16 * oneMB) - 32);
- var maxSizeDoc = {_id: 0, padding: bigStr};
+ var maxSizeDoc = {
+ _id: 0,
+ padding: bigStr
+ };
assert.eq(Object.bsonsize(maxSizeDoc), 16 * oneMB);
assert.writeOK(coll.insert(maxSizeDoc));
diff --git a/jstests/core/find_getmore_cmd.js b/jstests/core/find_getmore_cmd.js
index b9d12c41a19..3f3d50993e7 100644
--- a/jstests/core/find_getmore_cmd.js
+++ b/jstests/core/find_getmore_cmd.js
@@ -45,11 +45,8 @@
assert.gt(cmdRes.cursor.id, NumberLong(0));
assert.eq(cmdRes.cursor.ns, coll.getFullName());
assert.eq(cmdRes.cursor.firstBatch.length, 10);
- cmdRes = db.runCommand({
- getMore: cmdRes.cursor.id,
- collection: collName,
- batchSize: NumberInt(5)
- });
+ cmdRes =
+ db.runCommand({getMore: cmdRes.cursor.id, collection: collName, batchSize: NumberInt(5)});
assert.gt(cmdRes.cursor.id, NumberLong(0));
assert.eq(cmdRes.cursor.ns, coll.getFullName());
assert.eq(cmdRes.cursor.nextBatch.length, 5);
@@ -60,11 +57,8 @@
assert.gt(cmdRes.cursor.id, NumberLong(0));
assert.eq(cmdRes.cursor.ns, coll.getFullName());
assert.eq(cmdRes.cursor.firstBatch.length, 0);
- cmdRes = db.runCommand({
- getMore: cmdRes.cursor.id,
- collection: collName,
- batchSize: NumberInt(5)
- });
+ cmdRes =
+ db.runCommand({getMore: cmdRes.cursor.id, collection: collName, batchSize: NumberInt(5)});
assert.gt(cmdRes.cursor.id, NumberLong(0));
assert.eq(cmdRes.cursor.ns, coll.getFullName());
assert.eq(cmdRes.cursor.nextBatch.length, 5);
@@ -75,11 +69,8 @@
assert.gt(cmdRes.cursor.id, NumberLong(0));
assert.eq(cmdRes.cursor.ns, coll.getFullName());
assert.eq(cmdRes.cursor.firstBatch.length, 10);
- cmdRes = db.runCommand({
- getMore: cmdRes.cursor.id,
- collection: collName,
- batchSize: NumberInt(11)
- });
+ cmdRes =
+ db.runCommand({getMore: cmdRes.cursor.id, collection: collName, batchSize: NumberInt(11)});
assert.eq(cmdRes.cursor.id, NumberLong(0));
assert.eq(cmdRes.cursor.ns, coll.getFullName());
assert.eq(cmdRes.cursor.nextBatch.length, 10);
diff --git a/jstests/core/find_size.js b/jstests/core/find_size.js
index 0293c3e2b56..0327a20085f 100644
--- a/jstests/core/find_size.js
+++ b/jstests/core/find_size.js
@@ -15,7 +15,7 @@ assert.eq(1, t.count({arr: {$size: NumberInt(4)}}));
// bad inputs
var badInputs = [-1, NumberLong(-10000), "str", 3.2, 0.1, NumberLong(-9223372036854775808)];
-badInputs.forEach(function(x) {
+badInputs.forEach(function(x) {
assert.commandFailed(db.runCommand({count: t.getName(), query: {arr: {$size: x}}}),
- "$size argument " + x + " should have failed");
+ "$size argument " + x + " should have failed");
});
diff --git a/jstests/core/finda.js b/jstests/core/finda.js
index 4017ce91ad5..711b70f2e25 100644
--- a/jstests/core/finda.js
+++ b/jstests/core/finda.js
@@ -6,101 +6,99 @@ t.drop();
numDocs = 200;
function clearQueryPlanCache() {
- t.ensureIndex( { c:1 } );
- t.dropIndex( { c:1 } );
+ t.ensureIndex({c: 1});
+ t.dropIndex({c: 1});
}
-function assertAllFound( matches ) {
-// printjson( matches );
- found = new Array( numDocs );
- for( var i = 0; i < numDocs; ++i ) {
- found[ i ] = false;
+function assertAllFound(matches) {
+ // printjson( matches );
+ found = new Array(numDocs);
+ for (var i = 0; i < numDocs; ++i) {
+ found[i] = false;
}
- for( var i in matches ) {
- m = matches[ i ];
- found[ m._id ] = true;
+ for (var i in matches) {
+ m = matches[i];
+ found[m._id] = true;
}
- for( var i = 0; i < numDocs; ++i ) {
- assert( found[ i ], i.toString() );
+ for (var i = 0; i < numDocs; ++i) {
+ assert(found[i], i.toString());
}
}
-function makeCursor( query, projection, sort, batchSize, returnKey ) {
+function makeCursor(query, projection, sort, batchSize, returnKey) {
print("\n*** query:");
printjson(query);
print("proj:");
printjson(projection);
- cursor = t.find( query, projection );
- if ( sort ) {
- cursor.sort( sort );
+ cursor = t.find(query, projection);
+ if (sort) {
+ cursor.sort(sort);
print("sort:");
printjson(sort);
}
- if ( batchSize ) {
- cursor.batchSize( batchSize );
+ if (batchSize) {
+ cursor.batchSize(batchSize);
print("bs: " + batchSize);
}
- if ( returnKey ) {
+ if (returnKey) {
cursor.returnKey();
}
return cursor;
}
-function checkCursorWithBatchSizeProjection( query, projection, sort, batchSize,
- expectedLeftInBatch ) {
+function checkCursorWithBatchSizeProjection(
+ query, projection, sort, batchSize, expectedLeftInBatch) {
clearQueryPlanCache();
- cursor = makeCursor( query, projection, sort, batchSize );
+ cursor = makeCursor(query, projection, sort, batchSize);
// XXX: this
- assert.eq( expectedLeftInBatch, cursor.objsLeftInBatch() );
- assertAllFound( cursor.toArray() );
+ assert.eq(expectedLeftInBatch, cursor.objsLeftInBatch());
+ assertAllFound(cursor.toArray());
}
-function checkCursorWithBatchSize( query, sort, batchSize, expectedLeftInBatch ) {
- checkCursorWithBatchSizeProjection( query, {}, sort, batchSize, expectedLeftInBatch );
- checkCursorWithBatchSizeProjection( query, { a:1, _id:1 }, sort, batchSize,
- expectedLeftInBatch );
+function checkCursorWithBatchSize(query, sort, batchSize, expectedLeftInBatch) {
+ checkCursorWithBatchSizeProjection(query, {}, sort, batchSize, expectedLeftInBatch);
+ checkCursorWithBatchSizeProjection(query, {a: 1, _id: 1}, sort, batchSize, expectedLeftInBatch);
// In the cases tested, when expectedLeftInBatch is high enough takeover will occur during
// the query operation rather than getMore and the last few matches should properly return keys
// from the a,_id index.
clearQueryPlanCache();
- if ( expectedLeftInBatch > 110 ) {
- cursor = makeCursor( query, {}, sort, batchSize, true );
+ if (expectedLeftInBatch > 110) {
+ cursor = makeCursor(query, {}, sort, batchSize, true);
lastNonAIndexResult = -1;
- for( var i = 0; i < expectedLeftInBatch; ++i ) {
+ for (var i = 0; i < expectedLeftInBatch; ++i) {
next = cursor.next();
// Identify the query plan used by checking the fields of a returnKey query.
- if ( !friendlyEqual( [ 'a', '_id' ], Object.keySet( next ) ) ) {
+ if (!friendlyEqual(['a', '_id'], Object.keySet(next))) {
lastNonAIndexResult = i;
}
}
// The last results should come from the a,_id index.
- assert.lt( lastNonAIndexResult, expectedLeftInBatch - 5 );
+ assert.lt(lastNonAIndexResult, expectedLeftInBatch - 5);
}
}
-function queryWithPlanTypes( withDups ) {
+function queryWithPlanTypes(withDups) {
t.drop();
- for( var i = 1; i < numDocs; ++i ) {
- t.save( { _id:i, a:i, b:0 } );
+ for (var i = 1; i < numDocs; ++i) {
+ t.save({_id: i, a: i, b: 0});
}
- if ( withDups ) {
- t.save( { _id:0, a:[ 0, numDocs ], b:0 } ); // Add a dup on a:1 index.
+ if (withDups) {
+ t.save({_id: 0, a: [0, numDocs], b: 0}); // Add a dup on a:1 index.
+ } else {
+ t.save({_id: 0, a: 0, b: 0});
}
- else {
- t.save( { _id:0, a:0, b:0 } );
- }
- t.ensureIndex( { a:1, _id:1 } ); // Include _id for a covered index projection.
+ t.ensureIndex({a: 1, _id: 1}); // Include _id for a covered index projection.
// All plans in order.
- checkCursorWithBatchSize( { a:{ $gte:0 } }, null, 150, 150 );
+ checkCursorWithBatchSize({a: {$gte: 0}}, null, 150, 150);
// All plans out of order.
- checkCursorWithBatchSize( { a:{ $gte:0 } }, { c:1 }, null, 101 );
+ checkCursorWithBatchSize({a: {$gte: 0}}, {c: 1}, null, 101);
// Some plans in order, some out of order.
- checkCursorWithBatchSize( { a:{ $gte:0 }, b:0 }, { a:1 }, 150, 150 );
- checkCursorWithBatchSize( { a:{ $gte:0 }, b:0 }, { a:1 }, null, 101 );
+ checkCursorWithBatchSize({a: {$gte: 0}, b: 0}, {a: 1}, 150, 150);
+ checkCursorWithBatchSize({a: {$gte: 0}, b: 0}, {a: 1}, null, 101);
}
-queryWithPlanTypes( false );
-queryWithPlanTypes( true );
+queryWithPlanTypes(false);
+queryWithPlanTypes(true);
diff --git a/jstests/core/fm1.js b/jstests/core/fm1.js
index de1df03edcb..cff14b029d9 100644
--- a/jstests/core/fm1.js
+++ b/jstests/core/fm1.js
@@ -2,11 +2,9 @@
t = db.fm1;
t.drop();
-t.insert({foo:{bar:1}});
-t.find({},{foo:1}).toArray();
-t.find({},{'foo.bar':1}).toArray();
-t.find({},{'baz':1}).toArray();
-t.find({},{'baz.qux':1}).toArray();
-t.find({},{'foo.qux':1}).toArray();
-
-
+t.insert({foo: {bar: 1}});
+t.find({}, {foo: 1}).toArray();
+t.find({}, {'foo.bar': 1}).toArray();
+t.find({}, {'baz': 1}).toArray();
+t.find({}, {'baz.qux': 1}).toArray();
+t.find({}, {'foo.qux': 1}).toArray();
diff --git a/jstests/core/fm2.js b/jstests/core/fm2.js
index 93284c0c611..14fa8e06466 100644
--- a/jstests/core/fm2.js
+++ b/jstests/core/fm2.js
@@ -2,8 +2,7 @@
t = db.fm2;
t.drop();
-t.insert( { "one" : { "two" : {"three":"four"} } } );
-
-x = t.find({},{"one.two":1})[0];
-assert.eq( 1 , Object.keySet( x.one ).length , "ks l 1" );
+t.insert({"one": {"two": {"three": "four"}}});
+x = t.find({}, {"one.two": 1})[0];
+assert.eq(1, Object.keySet(x.one).length, "ks l 1");
diff --git a/jstests/core/fm3.js b/jstests/core/fm3.js
index ebe79f16dc4..301ce3d56ab 100644
--- a/jstests/core/fm3.js
+++ b/jstests/core/fm3.js
@@ -1,37 +1,36 @@
t = db.fm3;
t.drop();
-t.insert( {a:[{c:{e:1, f:1}}, {d:2}, 'z'], b:1} );
+t.insert({a: [{c: {e: 1, f: 1}}, {d: 2}, 'z'], b: 1});
-
-res = t.findOne({}, {a:1});
-assert.eq(res.a, [{c:{e:1, f:1}}, {d:2}, 'z'], "one a");
+res = t.findOne({}, {a: 1});
+assert.eq(res.a, [{c: {e: 1, f: 1}}, {d: 2}, 'z'], "one a");
assert.eq(res.b, undefined, "one b");
-res = t.findOne({}, {a:0});
+res = t.findOne({}, {a: 0});
assert.eq(res.a, undefined, "two a");
assert.eq(res.b, 1, "two b");
-res = t.findOne({}, {'a.d':1});
-assert.eq(res.a, [{}, {d:2}], "three a");
+res = t.findOne({}, {'a.d': 1});
+assert.eq(res.a, [{}, {d: 2}], "three a");
assert.eq(res.b, undefined, "three b");
-res = t.findOne({}, {'a.d':0});
-assert.eq(res.a, [{c:{e:1, f:1}}, {}, 'z'], "four a");
+res = t.findOne({}, {'a.d': 0});
+assert.eq(res.a, [{c: {e: 1, f: 1}}, {}, 'z'], "four a");
assert.eq(res.b, 1, "four b");
-res = t.findOne({}, {'a.c':1});
-assert.eq(res.a, [{c:{e:1, f:1}}, {}], "five a");
+res = t.findOne({}, {'a.c': 1});
+assert.eq(res.a, [{c: {e: 1, f: 1}}, {}], "five a");
assert.eq(res.b, undefined, "five b");
-res = t.findOne({}, {'a.c':0});
-assert.eq(res.a, [{}, {d:2}, 'z'], "six a");
+res = t.findOne({}, {'a.c': 0});
+assert.eq(res.a, [{}, {d: 2}, 'z'], "six a");
assert.eq(res.b, 1, "six b");
-res = t.findOne({}, {'a.c.e':1});
-assert.eq(res.a, [{c:{e:1}}, {}], "seven a");
+res = t.findOne({}, {'a.c.e': 1});
+assert.eq(res.a, [{c: {e: 1}}, {}], "seven a");
assert.eq(res.b, undefined, "seven b");
-res = t.findOne({}, {'a.c.e':0});
-assert.eq(res.a, [{c:{f:1}}, {d:2}, 'z'], "eight a");
+res = t.findOne({}, {'a.c.e': 0});
+assert.eq(res.a, [{c: {f: 1}}, {d: 2}, 'z'], "eight a");
assert.eq(res.b, 1, "eight b");
diff --git a/jstests/core/fm4.js b/jstests/core/fm4.js
index c90041cf485..6a1aa5a44b5 100644
--- a/jstests/core/fm4.js
+++ b/jstests/core/fm4.js
@@ -1,16 +1,16 @@
t = db.fm4;
t.drop();
-t.insert({_id:1, a:1, b:1});
+t.insert({_id: 1, a: 1, b: 1});
-assert.eq( t.findOne({}, {_id:1}), {_id:1}, 1);
-assert.eq( t.findOne({}, {_id:0}), {a:1, b:1}, 2);
+assert.eq(t.findOne({}, {_id: 1}), {_id: 1}, 1);
+assert.eq(t.findOne({}, {_id: 0}), {a: 1, b: 1}, 2);
-assert.eq( t.findOne({}, {_id:1, a:1}), {_id:1, a:1}, 3);
-assert.eq( t.findOne({}, {_id:0, a:1}), {a:1}, 4);
+assert.eq(t.findOne({}, {_id: 1, a: 1}), {_id: 1, a: 1}, 3);
+assert.eq(t.findOne({}, {_id: 0, a: 1}), {a: 1}, 4);
-assert.eq( t.findOne({}, {_id:0, a:0}), {b:1}, 6);
-assert.eq( t.findOne({}, { a:0}), {_id:1, b:1}, 5);
+assert.eq(t.findOne({}, {_id: 0, a: 0}), {b: 1}, 6);
+assert.eq(t.findOne({}, {a: 0}), {_id: 1, b: 1}, 5);
// not sure if we want to suport this since it is the same as above
-//assert.eq( t.findOne({}, {_id:1, a:0}), {_id:1, b:1}, 5)
+// assert.eq( t.findOne({}, {_id:1, a:0}), {_id:1, b:1}, 5)
diff --git a/jstests/core/fsync.js b/jstests/core/fsync.js
index 99aceb83c9e..57762ce8c78 100644
--- a/jstests/core/fsync.js
+++ b/jstests/core/fsync.js
@@ -8,81 +8,81 @@
* - Confirm that the pseudo commands and eval can perform fsyncLock/Unlock
*/
(function() {
-"use strict";
-
-// Start with a clean DB
-var fsyncLockDB = db.getSisterDB('fsyncLockTestDB');
-fsyncLockDB.dropDatabase();
-
-// Tests the db.fsyncLock/fsyncUnlock features
-var storageEngine = db.serverStatus().storageEngine.name;
-
-// As of SERVER-18899 fsyncLock/fsyncUnlock will error when called on a storage engine
-// that does not support the begin/end backup commands.
-var supportsFsync = db.fsyncLock();
-
-if (!supportsFsync.ok) {
- assert.commandFailedWithCode(supportsFsync, ErrorCodes.CommandNotSupported);
- jsTestLog("Skipping test for " + storageEngine + " as it does not support fsync");
- return;
-}
-db.fsyncUnlock();
-
-var resFail = fsyncLockDB.runCommand({fsync:1, lock:1});
-
-// Start with a clean DB
-var fsyncLockDB = db.getSisterDB('fsyncLockTestDB');
-fsyncLockDB.dropDatabase();
-
-// Test it doesn't work unless invoked against the admin DB
-var resFail = fsyncLockDB.runCommand({fsync:1, lock:1});
-assert(!resFail.ok, "fsyncLock command succeeded against DB other than admin.");
-
-// Uses admin automatically and locks the server for writes
-var fsyncLockRes = db.fsyncLock();
-assert(fsyncLockRes.ok, "fsyncLock command failed against admin DB");
-assert(db.currentOp().fsyncLock, "Value in db.currentOp incorrect for fsyncLocked server");
-
-// Make sure writes are blocked. Spawn a write operation in a separate shell and make sure it
-// is blocked. There is really now way to do that currently, so just check that the write didn't
-// go through.
-var writeOpHandle = startParallelShell("db.getSisterDB('fsyncLockTestDB').coll.insert({x:1});");
-sleep(1000);
-
-// Make sure reads can still run even though there is a pending write and also that the write
-// didn't get through
-assert.eq(0, fsyncLockDB.coll.count({}));
-
-// Unlock and make sure the insert succeeded
-var fsyncUnlockRes = db.fsyncUnlock();
-assert(fsyncUnlockRes.ok, "fsyncUnlock command failed");
-assert(db.currentOp().fsyncLock == null, "fsyncUnlock is not null in db.currentOp");
-
-// Make sure the db is unlocked and the initial write made it through.
-writeOpHandle();
-fsyncLockDB.coll.insert({x:2});
-
-assert.eq(2, fsyncLockDB.coll.count({}));
-
-// Issue the fsyncLock and fsyncUnlock a second time, to ensure that we can
-// run this command repeatedly with no problems.
-var fsyncLockRes = db.fsyncLock();
-assert(fsyncLockRes.ok, "Second execution of fsyncLock command failed");
-
-var fsyncUnlockRes = db.fsyncUnlock();
-assert(fsyncUnlockRes.ok, "Second execution of fsyncUnlock command failed");
-
-// Ensure eval is not allowed to invoke fsyncLock
-assert(!db.eval('db.fsyncLock()').ok, "eval('db.fsyncLock()') should fail.");
-
-// Check that the fsyncUnlock pseudo-command (a lookup on cmd.$sys.unlock)
-// still has the same effect as a legitimate 'fsyncUnlock' command
-// TODO: remove this in in the release following MongoDB 3.2 when pseudo-commands
-// are removed
-var fsyncCommandRes = db.fsyncLock();
-assert(fsyncLockRes.ok, "fsyncLock command failed against admin DB");
-assert(db.currentOp().fsyncLock, "Value in db.currentOp incorrect for fsyncLocked server");
-var fsyncPseudoCommandRes = db.getSiblingDB("admin").$cmd.sys.unlock.findOne();
-assert(fsyncPseudoCommandRes.ok, "fsyncUnlock pseudo-command failed");
-assert(db.currentOp().fsyncLock == null, "fsyncUnlock is not null in db.currentOp");
+ "use strict";
+
+ // Start with a clean DB
+ var fsyncLockDB = db.getSisterDB('fsyncLockTestDB');
+ fsyncLockDB.dropDatabase();
+
+ // Tests the db.fsyncLock/fsyncUnlock features
+ var storageEngine = db.serverStatus().storageEngine.name;
+
+ // As of SERVER-18899 fsyncLock/fsyncUnlock will error when called on a storage engine
+ // that does not support the begin/end backup commands.
+ var supportsFsync = db.fsyncLock();
+
+ if (!supportsFsync.ok) {
+ assert.commandFailedWithCode(supportsFsync, ErrorCodes.CommandNotSupported);
+ jsTestLog("Skipping test for " + storageEngine + " as it does not support fsync");
+ return;
+ }
+ db.fsyncUnlock();
+
+ var resFail = fsyncLockDB.runCommand({fsync: 1, lock: 1});
+
+ // Start with a clean DB
+ var fsyncLockDB = db.getSisterDB('fsyncLockTestDB');
+ fsyncLockDB.dropDatabase();
+
+ // Test it doesn't work unless invoked against the admin DB
+ var resFail = fsyncLockDB.runCommand({fsync: 1, lock: 1});
+ assert(!resFail.ok, "fsyncLock command succeeded against DB other than admin.");
+
+ // Uses admin automatically and locks the server for writes
+ var fsyncLockRes = db.fsyncLock();
+ assert(fsyncLockRes.ok, "fsyncLock command failed against admin DB");
+ assert(db.currentOp().fsyncLock, "Value in db.currentOp incorrect for fsyncLocked server");
+
+ // Make sure writes are blocked. Spawn a write operation in a separate shell and make sure it
+ // is blocked. There is really now way to do that currently, so just check that the write didn't
+ // go through.
+ var writeOpHandle = startParallelShell("db.getSisterDB('fsyncLockTestDB').coll.insert({x:1});");
+ sleep(1000);
+
+ // Make sure reads can still run even though there is a pending write and also that the write
+ // didn't get through
+ assert.eq(0, fsyncLockDB.coll.count({}));
+
+ // Unlock and make sure the insert succeeded
+ var fsyncUnlockRes = db.fsyncUnlock();
+ assert(fsyncUnlockRes.ok, "fsyncUnlock command failed");
+ assert(db.currentOp().fsyncLock == null, "fsyncUnlock is not null in db.currentOp");
+
+ // Make sure the db is unlocked and the initial write made it through.
+ writeOpHandle();
+ fsyncLockDB.coll.insert({x: 2});
+
+ assert.eq(2, fsyncLockDB.coll.count({}));
+
+ // Issue the fsyncLock and fsyncUnlock a second time, to ensure that we can
+ // run this command repeatedly with no problems.
+ var fsyncLockRes = db.fsyncLock();
+ assert(fsyncLockRes.ok, "Second execution of fsyncLock command failed");
+
+ var fsyncUnlockRes = db.fsyncUnlock();
+ assert(fsyncUnlockRes.ok, "Second execution of fsyncUnlock command failed");
+
+ // Ensure eval is not allowed to invoke fsyncLock
+ assert(!db.eval('db.fsyncLock()').ok, "eval('db.fsyncLock()') should fail.");
+
+ // Check that the fsyncUnlock pseudo-command (a lookup on cmd.$sys.unlock)
+ // still has the same effect as a legitimate 'fsyncUnlock' command
+ // TODO: remove this in in the release following MongoDB 3.2 when pseudo-commands
+ // are removed
+ var fsyncCommandRes = db.fsyncLock();
+ assert(fsyncLockRes.ok, "fsyncLock command failed against admin DB");
+ assert(db.currentOp().fsyncLock, "Value in db.currentOp incorrect for fsyncLocked server");
+ var fsyncPseudoCommandRes = db.getSiblingDB("admin").$cmd.sys.unlock.findOne();
+ assert(fsyncPseudoCommandRes.ok, "fsyncUnlock pseudo-command failed");
+ assert(db.currentOp().fsyncLock == null, "fsyncUnlock is not null in db.currentOp");
}());
diff --git a/jstests/core/fts1.js b/jstests/core/fts1.js
index 5bdaa926b45..23364b2ecb7 100644
--- a/jstests/core/fts1.js
+++ b/jstests/core/fts1.js
@@ -1,20 +1,20 @@
-load( "jstests/libs/fts.js" );
+load("jstests/libs/fts.js");
t = db.text1;
t.drop();
-t.ensureIndex( { x : "text" } );
+t.ensureIndex({x: "text"});
-assert.eq( [] , queryIDS( t , "az" ) , "A0" );
+assert.eq([], queryIDS(t, "az"), "A0");
-t.save( { _id : 1 , x : "az b c" } );
-t.save( { _id : 2 , x : "az b" } );
-t.save( { _id : 3 , x : "b c" } );
-t.save( { _id : 4 , x : "b c d" } );
+t.save({_id: 1, x: "az b c"});
+t.save({_id: 2, x: "az b"});
+t.save({_id: 3, x: "b c"});
+t.save({_id: 4, x: "b c d"});
-assert.eq( [1,2,3,4] , queryIDS( t , "c az" ) , "A1" );
-assert.eq( [4] , queryIDS( t , "d" ) , "A2" );
+assert.eq([1, 2, 3, 4], queryIDS(t, "c az"), "A1");
+assert.eq([4], queryIDS(t, "d"), "A2");
idx = t.getIndexes()[1];
-assert( idx.v >= 1, tojson( idx ) );
-assert( idx.textIndexVersion >= 1, tojson( idx ) );
+assert(idx.v >= 1, tojson(idx));
+assert(idx.textIndexVersion >= 1, tojson(idx));
diff --git a/jstests/core/fts2.js b/jstests/core/fts2.js
index 11b74a76b0b..cf0b875c220 100644
--- a/jstests/core/fts2.js
+++ b/jstests/core/fts2.js
@@ -1,21 +1,19 @@
-load( "jstests/libs/fts.js" );
+load("jstests/libs/fts.js");
t = db.text2;
t.drop();
-t.save( { _id : 1 , x : "az b x" , y : "c d m" , z : 1 } );
-t.save( { _id : 2 , x : "c d y" , y : "az b n" , z : 2 } );
+t.save({_id: 1, x: "az b x", y: "c d m", z: 1});
+t.save({_id: 2, x: "c d y", y: "az b n", z: 2});
-t.ensureIndex( { x : "text" } , { weights : { x : 10 , y : 1 } } );
+t.ensureIndex({x: "text"}, {weights: {x: 10, y: 1}});
-assert.eq( [1,2] , queryIDS( t , "az" ) , "A1" );
-assert.eq( [2,1] , queryIDS( t , "d" ) , "A2" );
-
-assert.eq( [1] , queryIDS( t , "x" ) , "A3" );
-assert.eq( [2] , queryIDS( t , "y" ) , "A4" );
-
-assert.eq( [1] , queryIDS( t , "az" , { z : 1 } ) , "B1" );
-assert.eq( [1] , queryIDS( t , "d" , { z : 1 } ) , "B2" );
+assert.eq([1, 2], queryIDS(t, "az"), "A1");
+assert.eq([2, 1], queryIDS(t, "d"), "A2");
+assert.eq([1], queryIDS(t, "x"), "A3");
+assert.eq([2], queryIDS(t, "y"), "A4");
+assert.eq([1], queryIDS(t, "az", {z: 1}), "B1");
+assert.eq([1], queryIDS(t, "d", {z: 1}), "B2");
diff --git a/jstests/core/fts3.js b/jstests/core/fts3.js
index 8c550259d10..64e37d95105 100644
--- a/jstests/core/fts3.js
+++ b/jstests/core/fts3.js
@@ -1,20 +1,19 @@
-load( "jstests/libs/fts.js" );
+load("jstests/libs/fts.js");
t = db.text3;
t.drop();
-t.save( { _id : 1 , x : "az b x" , y : "c d m" , z : 1 } );
-t.save( { _id : 2 , x : "c d y" , y : "az b n" , z : 2 } );
+t.save({_id: 1, x: "az b x", y: "c d m", z: 1});
+t.save({_id: 2, x: "c d y", y: "az b n", z: 2});
-t.ensureIndex( { x : "text" , z : 1 } , { weights : { x : 10 , y : 1 } } );
+t.ensureIndex({x: "text", z: 1}, {weights: {x: 10, y: 1}});
-assert.eq( [1,2] , queryIDS( t , "az" ) , "A1" );
-assert.eq( [2,1] , queryIDS( t , "d" ) , "A2" );
+assert.eq([1, 2], queryIDS(t, "az"), "A1");
+assert.eq([2, 1], queryIDS(t, "d"), "A2");
-assert.eq( [1] , queryIDS( t , "x" ) , "A3" );
-assert.eq( [2] , queryIDS( t , "y" ) , "A4" );
-
-assert.eq( [1] , queryIDS( t , "az" , { z : 1 } ) , "B1" );
-assert.eq( [1] , queryIDS( t , "d" , { z : 1 } ) , "B2" );
+assert.eq([1], queryIDS(t, "x"), "A3");
+assert.eq([2], queryIDS(t, "y"), "A4");
+assert.eq([1], queryIDS(t, "az", {z: 1}), "B1");
+assert.eq([1], queryIDS(t, "d", {z: 1}), "B2");
diff --git a/jstests/core/fts4.js b/jstests/core/fts4.js
index fe35bdafe44..13a9e73cd10 100644
--- a/jstests/core/fts4.js
+++ b/jstests/core/fts4.js
@@ -1,20 +1,19 @@
-load( "jstests/libs/fts.js" );
+load("jstests/libs/fts.js");
t = db.text4;
t.drop();
-t.save( { _id : 1 , x : [ "az" , "b" , "x" ] , y : [ "c" , "d" , "m" ] , z : 1 } );
-t.save( { _id : 2 , x : [ "c" , "d" , "y" ] , y : [ "az" , "b" , "n" ] , z : 2 } );
+t.save({_id: 1, x: ["az", "b", "x"], y: ["c", "d", "m"], z: 1});
+t.save({_id: 2, x: ["c", "d", "y"], y: ["az", "b", "n"], z: 2});
-t.ensureIndex( { y : "text" , z : 1 } , { weights : { x : 10 } } );
+t.ensureIndex({y: "text", z: 1}, {weights: {x: 10}});
-assert.eq( [1,2] , queryIDS( t , "az" ) , "A1" );
-assert.eq( [2,1] , queryIDS( t , "d" ) , "A2" );
+assert.eq([1, 2], queryIDS(t, "az"), "A1");
+assert.eq([2, 1], queryIDS(t, "d"), "A2");
-assert.eq( [1] , queryIDS( t , "x" ) , "A3" );
-assert.eq( [2] , queryIDS( t , "y" ) , "A4" );
-
-assert.eq( [1] , queryIDS( t , "az" , { z : 1 } ) , "B1" );
-assert.eq( [1] , queryIDS( t , "d" , { z : 1 } ) , "B2" );
+assert.eq([1], queryIDS(t, "x"), "A3");
+assert.eq([2], queryIDS(t, "y"), "A4");
+assert.eq([1], queryIDS(t, "az", {z: 1}), "B1");
+assert.eq([1], queryIDS(t, "d", {z: 1}), "B2");
diff --git a/jstests/core/fts5.js b/jstests/core/fts5.js
index a95917f5d26..d3d6bb4de44 100644
--- a/jstests/core/fts5.js
+++ b/jstests/core/fts5.js
@@ -1,19 +1,19 @@
-load( "jstests/libs/fts.js" );
+load("jstests/libs/fts.js");
t = db.text5;
t.drop();
-t.save( { _id: 1 , x: [ { a: "az" } , { a: "b" } , { a: "x" } ] , y: [ "c" , "d" , "m" ] , z: 1 } );
-t.save( { _id: 2 , x: [ { a: "c" } , { a: "d" } , { a: "y" } ] , y: [ "az" , "b" , "n" ] , z: 2 } );
+t.save({_id: 1, x: [{a: "az"}, {a: "b"}, {a: "x"}], y: ["c", "d", "m"], z: 1});
+t.save({_id: 2, x: [{a: "c"}, {a: "d"}, {a: "y"}], y: ["az", "b", "n"], z: 2});
-t.ensureIndex( { y: "text" , z: 1 } , { weights: { "x.a": 10 } } );
+t.ensureIndex({y: "text", z: 1}, {weights: {"x.a": 10}});
-assert.eq( [1,2] , queryIDS( t , "az" ) , "A1" );
-assert.eq( [2,1] , queryIDS( t , "d" ) , "A2" );
+assert.eq([1, 2], queryIDS(t, "az"), "A1");
+assert.eq([2, 1], queryIDS(t, "d"), "A2");
-assert.eq( [1] , queryIDS( t , "x" ) , "A3" );
-assert.eq( [2] , queryIDS( t , "y" ) , "A4" );
+assert.eq([1], queryIDS(t, "x"), "A3");
+assert.eq([2], queryIDS(t, "y"), "A4");
-assert.eq( [1] , queryIDS( t , "az" , { z: 1 } ) , "B1" );
-assert.eq( [1] , queryIDS( t , "d" , { z: 1 } ) , "B2" );
+assert.eq([1], queryIDS(t, "az", {z: 1}), "B1");
+assert.eq([1], queryIDS(t, "d", {z: 1}), "B2");
diff --git a/jstests/core/fts_blog.js b/jstests/core/fts_blog.js
index 78b9ef34ecc..9f35836ef37 100644
--- a/jstests/core/fts_blog.js
+++ b/jstests/core/fts_blog.js
@@ -1,26 +1,20 @@
t = db.text_blog;
t.drop();
-t.save( { _id : 1 , title : "my blog post" , text : "this is a new blog i am writing. yay" } );
-t.save( { _id : 2 , title : "my 2nd post" , text : "this is a new blog i am writing. yay" } );
-t.save( { _id : 3 , title : "knives are Fun" , text : "this is a new blog i am writing. yay" } );
+t.save({_id: 1, title: "my blog post", text: "this is a new blog i am writing. yay"});
+t.save({_id: 2, title: "my 2nd post", text: "this is a new blog i am writing. yay"});
+t.save({_id: 3, title: "knives are Fun", text: "this is a new blog i am writing. yay"});
// default weight is 1
// specify weights if you want a field to be more meaningull
-t.ensureIndex( { "title" : "text" , text : "text" } , { weights : { title : 10 } } );
-
-res = t.find( { "$text" : { "$search" : "blog" } } , { score: { "$meta" : "textScore" } } ).sort( { score: { "$meta" : "textScore" } });
-assert.eq( 3, res.length());
-assert.eq( 1, res[0]._id );
-
-res = t.find( { "$text" : { "$search" : "write" } }, { score: { "$meta" : "textScore" } } );
-assert.eq( 3, res.length() );
-assert.eq( res[0].score, res[1].score );
-assert.eq( res[0].score, res[2].score );
-
-
-
-
-
+t.ensureIndex({"title": "text", text: "text"}, {weights: {title: 10}});
+res = t.find({"$text": {"$search": "blog"}}, {score: {"$meta": "textScore"}})
+ .sort({score: {"$meta": "textScore"}});
+assert.eq(3, res.length());
+assert.eq(1, res[0]._id);
+res = t.find({"$text": {"$search": "write"}}, {score: {"$meta": "textScore"}});
+assert.eq(3, res.length());
+assert.eq(res[0].score, res[1].score);
+assert.eq(res[0].score, res[2].score);
diff --git a/jstests/core/fts_blogwild.js b/jstests/core/fts_blogwild.js
index e220bd89032..dad96cd2836 100644
--- a/jstests/core/fts_blogwild.js
+++ b/jstests/core/fts_blogwild.js
@@ -1,40 +1,40 @@
t = db.text_blogwild;
t.drop();
-t.save( { _id: 1 , title: "my blog post" , text: "this is a new blog i am writing. yay eliot" } );
-t.save( { _id: 2 , title: "my 2nd post" , text: "this is a new blog i am writing. yay" } );
-t.save( { _id: 3 , title: "knives are Fun for writing eliot" , text: "this is a new blog i am writing. yay" } );
+t.save({_id: 1, title: "my blog post", text: "this is a new blog i am writing. yay eliot"});
+t.save({_id: 2, title: "my 2nd post", text: "this is a new blog i am writing. yay"});
+t.save({
+ _id: 3,
+ title: "knives are Fun for writing eliot",
+ text: "this is a new blog i am writing. yay"
+});
// default weight is 1
// specify weights if you want a field to be more meaningull
-t.ensureIndex( { dummy: "text" } , { weights: "$**" } );
+t.ensureIndex({dummy: "text"}, {weights: "$**"});
-res = t.find( { "$text" : { "$search": "blog" } } );
-assert.eq( 3 , res.length() , "A1" );
+res = t.find({"$text": {"$search": "blog"}});
+assert.eq(3, res.length(), "A1");
-res = t.find( { "$text" : { "$search": "write" } } );
-assert.eq( 3 , res.length() , "B1" );
+res = t.find({"$text": {"$search": "write"}});
+assert.eq(3, res.length(), "B1");
// mixing
-t.dropIndex( "dummy_text" );
-assert.eq( 1 , t.getIndexKeys().length , "C1" );
-t.ensureIndex( { dummy: "text" } , { weights: { "$**": 1 , title: 2 } } );
-
-
-res = t.find( { "$text" : { "$search": "write" } }, { score: { "$meta" : "textScore" } } ).sort( { score: { "$meta" : "textScore" } });
-assert.eq( 3 , res.length() , "C2" );
-assert.eq( 3 , res[0]._id , "C3" );
-
-res = t.find( { "$text" : { "$search": "blog" } }, { score: { "$meta" : "textScore" } } ).sort( { score: { "$meta" : "textScore" } });
-assert.eq( 3 , res.length() , "D1" );
-assert.eq( 1 , res[0]._id , "D2" );
-
-res = t.find( { "$text" : { "$search": "eliot" } }, { score: { "$meta" : "textScore" } } ).sort( { score: { "$meta" : "textScore" } });
-assert.eq( 2 , res.length() , "E1" );
-assert.eq( 3 , res[0]._id , "E2" );
-
-
-
-
-
-
+t.dropIndex("dummy_text");
+assert.eq(1, t.getIndexKeys().length, "C1");
+t.ensureIndex({dummy: "text"}, {weights: {"$**": 1, title: 2}});
+
+res = t.find({"$text": {"$search": "write"}}, {score: {"$meta": "textScore"}})
+ .sort({score: {"$meta": "textScore"}});
+assert.eq(3, res.length(), "C2");
+assert.eq(3, res[0]._id, "C3");
+
+res = t.find({"$text": {"$search": "blog"}}, {score: {"$meta": "textScore"}})
+ .sort({score: {"$meta": "textScore"}});
+assert.eq(3, res.length(), "D1");
+assert.eq(1, res[0]._id, "D2");
+
+res = t.find({"$text": {"$search": "eliot"}}, {score: {"$meta": "textScore"}})
+ .sort({score: {"$meta": "textScore"}});
+assert.eq(2, res.length(), "E1");
+assert.eq(3, res[0]._id, "E2");
diff --git a/jstests/core/fts_casesensitive.js b/jstests/core/fts_casesensitive.js
index e49de5c1f7f..5b0e0832130 100644
--- a/jstests/core/fts_casesensitive.js
+++ b/jstests/core/fts_casesensitive.js
@@ -8,7 +8,9 @@ coll.drop();
assert.writeOK(coll.insert({_id: 0, a: "The Quick Brown Fox Jumps Over The Lazy Dog"}));
assert.commandWorked(coll.ensureIndex({a: "text"}));
-assert.throws(function() { queryIDS(coll, "hello", null, {$caseSensitive: "invalid"}); });
+assert.throws(function() {
+ queryIDS(coll, "hello", null, {$caseSensitive: "invalid"});
+});
assert.eq([0], queryIDS(coll, "The quick Brown", null, {$caseSensitive: true}));
assert.eq([0], queryIDS(coll, "Jumped", null, {$caseSensitive: true}));
diff --git a/jstests/core/fts_diacritic_and_caseinsensitive.js b/jstests/core/fts_diacritic_and_caseinsensitive.js
index 7a65a56e2fc..898735f3140 100644
--- a/jstests/core/fts_diacritic_and_caseinsensitive.js
+++ b/jstests/core/fts_diacritic_and_caseinsensitive.js
@@ -9,7 +9,7 @@ load('jstests/libs/fts.js');
coll.drop();
assert.writeOK(coll.insert({
- _id: 0,
+ _id: 0,
a: "O próximo Vôo à Noite sobre o Atlântico, Põe Freqüentemente o único Médico."
}));
diff --git a/jstests/core/fts_diacritic_and_casesensitive.js b/jstests/core/fts_diacritic_and_casesensitive.js
index 6f6ef4439e0..397b6033f88 100644
--- a/jstests/core/fts_diacritic_and_casesensitive.js
+++ b/jstests/core/fts_diacritic_and_casesensitive.js
@@ -1,4 +1,5 @@
-// Integration tests for {$diacriticSensitive: true, $caseSensitive: true} option to $text query operator.
+// Integration tests for {$diacriticSensitive: true, $caseSensitive: true} option to $text query
+// operator.
load('jstests/libs/fts.js');
@@ -9,56 +10,57 @@ load('jstests/libs/fts.js');
coll.drop();
assert.writeOK(coll.insert({
- _id: 0,
+ _id: 0,
a: "O próximo Vôo à Noite sobre o Atlântico, Põe Freqüentemente o único Médico."
}));
assert.commandWorked(coll.ensureIndex({a: "text"}, {default_language: "portuguese"}));
- assert.eq([0], queryIDS(
- coll, "próximo vôo à", null, {$diacriticSensitive: true, $caseSensitive: true}
- ));
- assert.eq([0], queryIDS(
- coll, "Atlântico", null, {$diacriticSensitive: true, $caseSensitive: true}
- ));
- assert.eq([0], queryIDS(
- coll, "\"próximo\"", null, {$diacriticSensitive: true, $caseSensitive: true}
- ));
- assert.eq([0], queryIDS(
- coll, "\"Põe\" Atlântico", null, {$diacriticSensitive: true, $caseSensitive: true}
- ));
- assert.eq([0], queryIDS(
- coll, "\"próximo Vôo\" \"único Médico\"", null,
- {$diacriticSensitive: true, $caseSensitive: true}
- ));
- assert.eq([0], queryIDS(
- coll, "\"próximo Vôo\" -\"único médico\"", null,
- {$diacriticSensitive: true, $caseSensitive: true}
- ));
+ assert.eq(
+ [0],
+ queryIDS(coll, "próximo vôo à", null, {$diacriticSensitive: true, $caseSensitive: true}));
+ assert.eq(
+ [0], queryIDS(coll, "Atlântico", null, {$diacriticSensitive: true, $caseSensitive: true}));
+ assert.eq(
+ [0],
+ queryIDS(coll, "\"próximo\"", null, {$diacriticSensitive: true, $caseSensitive: true}));
+ assert.eq(
+ [0],
+ queryIDS(
+ coll, "\"Põe\" Atlântico", null, {$diacriticSensitive: true, $caseSensitive: true}));
+ assert.eq([0],
+ queryIDS(coll,
+ "\"próximo Vôo\" \"único Médico\"",
+ null,
+ {$diacriticSensitive: true, $caseSensitive: true}));
+ assert.eq([0],
+ queryIDS(coll,
+ "\"próximo Vôo\" -\"único médico\"",
+ null,
+ {$diacriticSensitive: true, $caseSensitive: true}));
- assert.eq([], queryIDS(
- coll, "À", null, {$diacriticSensitive: true, $caseSensitive: true}
- ));
- assert.eq([], queryIDS(
- coll, "Próximo", null, {$diacriticSensitive: true, $caseSensitive: true}
- ));
- assert.eq([], queryIDS(
- coll, "proximo vôo à", null, {$diacriticSensitive: true, $caseSensitive: true}
- ));
- assert.eq([], queryIDS(
- coll, "À -próximo -Vôo", null, {$diacriticSensitive: true, $caseSensitive: true}
- ));
- assert.eq([], queryIDS(
- coll, "à proximo -Vôo", null, {$diacriticSensitive: true, $caseSensitive: true}
- ));
- assert.eq([], queryIDS(
- coll, "mo Vô", null, {$diacriticSensitive: true, $caseSensitive: true}
- ));
- assert.eq([], queryIDS(
- coll, "\"único médico\"", null, {$diacriticSensitive: true, $caseSensitive: true}
- ));
- assert.eq([], queryIDS(
- coll, "\"próximo Vôo\" -\"único Médico\"", null, {$diacriticSensitive: true, $caseSensitive: true}
- ));
+ assert.eq([], queryIDS(coll, "À", null, {$diacriticSensitive: true, $caseSensitive: true}));
+ assert.eq([],
+ queryIDS(coll, "Próximo", null, {$diacriticSensitive: true, $caseSensitive: true}));
+ assert.eq(
+ [],
+ queryIDS(coll, "proximo vôo à", null, {$diacriticSensitive: true, $caseSensitive: true}));
+ assert.eq(
+ [],
+ queryIDS(
+ coll, "À -próximo -Vôo", null, {$diacriticSensitive: true, $caseSensitive: true}));
+ assert.eq(
+ [],
+ queryIDS(coll, "à proximo -Vôo", null, {$diacriticSensitive: true, $caseSensitive: true}));
+ assert.eq([], queryIDS(coll, "mo Vô", null, {$diacriticSensitive: true, $caseSensitive: true}));
+ assert.eq(
+ [],
+ queryIDS(
+ coll, "\"único médico\"", null, {$diacriticSensitive: true, $caseSensitive: true}));
+ assert.eq([],
+ queryIDS(coll,
+ "\"próximo Vôo\" -\"único Médico\"",
+ null,
+ {$diacriticSensitive: true, $caseSensitive: true}));
})(); \ No newline at end of file
diff --git a/jstests/core/fts_diacriticsensitive.js b/jstests/core/fts_diacriticsensitive.js
index c38978e3c4b..29e7784a785 100644
--- a/jstests/core/fts_diacriticsensitive.js
+++ b/jstests/core/fts_diacriticsensitive.js
@@ -9,7 +9,7 @@ load('jstests/libs/fts.js');
coll.drop();
assert.writeOK(coll.insert({
- _id: 0,
+ _id: 0,
a: "O próximo vôo à noite sobre o Atlântico, põe freqüentemente o único médico."
}));
@@ -23,12 +23,12 @@ load('jstests/libs/fts.js');
assert.eq([0], queryIDS(coll, "atlântico", null, {$diacriticSensitive: true}));
assert.eq([0], queryIDS(coll, "\"próximo\"", null, {$diacriticSensitive: true}));
assert.eq([0], queryIDS(coll, "\"põe\" atlântico", null, {$diacriticSensitive: true}));
- assert.eq([0], queryIDS(
- coll, "\"próximo vôo\" \"único médico\"", null, {$diacriticSensitive: true}
- ));
- assert.eq([0], queryIDS(
- coll, "\"próximo vôo\" -\"unico médico\"", null, {$diacriticSensitive: true}
- ));
+ assert.eq(
+ [0],
+ queryIDS(coll, "\"próximo vôo\" \"único médico\"", null, {$diacriticSensitive: true}));
+ assert.eq(
+ [0],
+ queryIDS(coll, "\"próximo vôo\" -\"unico médico\"", null, {$diacriticSensitive: true}));
assert.eq([], queryIDS(coll, "à", null, {$diacriticSensitive: true}));
assert.eq([], queryIDS(coll, "proximo", null, {$diacriticSensitive: true}));
@@ -37,8 +37,8 @@ load('jstests/libs/fts.js');
assert.eq([], queryIDS(coll, "à proximo -vôo", null, {$diacriticSensitive: true}));
assert.eq([], queryIDS(coll, "mo vô", null, {$diacriticSensitive: true}));
assert.eq([], queryIDS(coll, "\"unico medico\"", null, {$diacriticSensitive: true}));
- assert.eq([], queryIDS(
- coll, "\"próximo vôo\" -\"único médico\"", null, {$diacriticSensitive: true
- }));
+ assert.eq(
+ [],
+ queryIDS(coll, "\"próximo vôo\" -\"único médico\"", null, {$diacriticSensitive: true}));
})();
diff --git a/jstests/core/fts_explain.js b/jstests/core/fts_explain.js
index de55e98ddc8..225be626d2c 100644
--- a/jstests/core/fts_explain.js
+++ b/jstests/core/fts_explain.js
@@ -10,7 +10,7 @@ assert.commandWorked(res);
res = coll.insert({content: "some data"});
assert.writeOK(res);
-var explain = coll.find({$text:{$search: "\"a\" -b -\"c\""}}).explain(true);
+var explain = coll.find({$text: {$search: "\"a\" -b -\"c\""}}).explain(true);
var stage = explain.executionStats.executionStages;
if ("SINGLE_SHARD" === stage.stage) {
stage = stage.shards[0].executionStages;
diff --git a/jstests/core/fts_index.js b/jstests/core/fts_index.js
index 5410b8c4ca2..8cda28096d2 100644
--- a/jstests/core/fts_index.js
+++ b/jstests/core/fts_index.js
@@ -16,30 +16,49 @@ coll.getDB().createCollection(coll.getName());
// Spec passes text-specific index validation.
assert.commandWorked(coll.ensureIndex({a: "text"}, {name: indexName, default_language: "spanish"}));
-assert.eq( 1, coll.getIndexes().filter( function(z){ return z.name == indexName; } ).length );
+assert.eq(1,
+ coll.getIndexes().filter(function(z) {
+ return z.name == indexName;
+ }).length);
coll.dropIndexes();
// Spec fails text-specific index validation ("spanglish" unrecognized).
-assert.commandFailed(coll.ensureIndex({a: "text"}, {name: indexName, default_language: "spanglish"}));
-assert.eq( 0, coll.getIndexes().filter( function(z){ return z.name == indexName; } ).length );
+assert.commandFailed(coll.ensureIndex({a: "text"},
+ {name: indexName, default_language: "spanglish"}));
+assert.eq(0,
+ coll.getIndexes().filter(function(z) {
+ return z.name == indexName;
+ }).length);
coll.dropIndexes();
// Spec passes general index validation.
assert.commandWorked(coll.ensureIndex({"$**": "text"}, {name: indexName}));
-assert.eq( 1, coll.getIndexes().filter( function(z){ return z.name == indexName; } ).length );
+assert.eq(1,
+ coll.getIndexes().filter(function(z) {
+ return z.name == indexName;
+ }).length);
coll.dropIndexes();
// Spec fails general index validation ("a.$**" invalid field name for key).
assert.commandFailed(coll.ensureIndex({"a.$**": "text"}, {name: indexName}));
-assert.eq( 0, coll.getIndexes().filter( function(z){ return z.name == indexName; } ).length );
+assert.eq(0,
+ coll.getIndexes().filter(function(z) {
+ return z.name == indexName;
+ }).length);
coll.dropIndexes();
// SERVER-19519 Spec fails if '_fts' is specified on a non-text index.
assert.commandFailed(coll.ensureIndex({_fts: 1}, {name: indexName}));
-assert.eq( 0, coll.getIndexes().filter( function(z){ return z.name == indexName; } ).length );
+assert.eq(0,
+ coll.getIndexes().filter(function(z) {
+ return z.name == indexName;
+ }).length);
coll.dropIndexes();
assert.commandFailed(coll.ensureIndex({_fts: "text"}, {name: indexName}));
-assert.eq( 0, coll.getIndexes().filter( function(z){ return z.name == indexName; } ).length );
+assert.eq(0,
+ coll.getIndexes().filter(function(z) {
+ return z.name == indexName;
+ }).length);
coll.dropIndexes();
//
@@ -60,12 +79,12 @@ coll.drop();
// Can insert documents with valid language_override into text-indexed collection.
assert.commandWorked(coll.ensureIndex({a: "text"}));
coll.insert({a: ""});
-assert.writeOK( coll.insert({a: "", language: "spanish"}));
+assert.writeOK(coll.insert({a: "", language: "spanish"}));
coll.drop();
// Can't insert documents with invalid language_override into text-indexed collection.
assert.commandWorked(coll.ensureIndex({a: "text"}));
-assert.writeError( coll.insert({a: "", language: "spanglish"}));
+assert.writeError(coll.insert({a: "", language: "spanglish"}));
coll.drop();
//
@@ -142,7 +161,7 @@ assert.commandWorked(coll.ensureIndex({a: "text"}));
var longstring = "";
var longstring2 = "";
-for(var i = 0; i < 1024 * 1024; ++i) {
+for (var i = 0; i < 1024 * 1024; ++i) {
longstring = longstring + "a";
longstring2 = longstring2 + "b";
}
@@ -157,5 +176,4 @@ coll.dropIndexes();
assert.commandFailed(coll.ensureIndex({a: 1, _fts: "text", _ftsx: 1, c: 1}, {weights: {}}));
assert.commandFailed(coll.ensureIndex({a: 1, _fts: "text", _ftsx: 1, c: 1}));
-
coll.drop();
diff --git a/jstests/core/fts_index2.js b/jstests/core/fts_index2.js
index aa17df1514a..fa0129acc5c 100644
--- a/jstests/core/fts_index2.js
+++ b/jstests/core/fts_index2.js
@@ -11,6 +11,6 @@ assert.commandWorked(coll1.ensureIndex({"$**": "text"}));
assert.eq(1, coll1.count({$text: {$search: "content"}}));
// Rename within same database.
-assert.commandWorked(coll1.getDB().adminCommand({renameCollection: coll1.getFullName(),
- to: coll2.getFullName() }));
+assert.commandWorked(
+ coll1.getDB().adminCommand({renameCollection: coll1.getFullName(), to: coll2.getFullName()}));
assert.eq(1, coll2.count({$text: {$search: "content"}}));
diff --git a/jstests/core/fts_index_version1.js b/jstests/core/fts_index_version1.js
index 1095c5828ac..0b1c869a3a5 100644
--- a/jstests/core/fts_index_version1.js
+++ b/jstests/core/fts_index_version1.js
@@ -10,8 +10,7 @@ assert.eq(1, coll.count({$text: {$search: "run"}}));
// Test search with a "language alias" only recognized in textIndexVersion:1 (note that the stopword
// machinery doesn't recognize these aliases).
coll.drop();
-assert.commandWorked(coll.ensureIndex({a: "text"},
- {default_language: "eng", textIndexVersion: 1}));
+assert.commandWorked(coll.ensureIndex({a: "text"}, {default_language: "eng", textIndexVersion: 1}));
assert.writeOK(coll.insert({a: "running"}));
assert.eq(1, coll.count({$text: {$search: "run"}}));
diff --git a/jstests/core/fts_index_version2.js b/jstests/core/fts_index_version2.js
index 53557c8c6d6..05fecab36ee 100644
--- a/jstests/core/fts_index_version2.js
+++ b/jstests/core/fts_index_version2.js
@@ -9,13 +9,12 @@ load('jstests/libs/fts.js');
coll.drop();
assert.writeOK(coll.insert({
- _id: 0,
+ _id: 0,
a: "O próximo Vôo à Noite sobre o Atlântico, Põe Freqüentemente o único Médico."
}));
assert.commandWorked(
- coll.ensureIndex({a: "text"}, {default_language: "portuguese", textIndexVersion: 2}
- ));
+ coll.ensureIndex({a: "text"}, {default_language: "portuguese", textIndexVersion: 2}));
assert.eq([0], queryIDS(coll, "próximo vôo à", null));
assert.eq([0], queryIDS(coll, "atlântico", null));
diff --git a/jstests/core/fts_mix.js b/jstests/core/fts_mix.js
index 4ef8da0a28d..5142497fb41 100644
--- a/jstests/core/fts_mix.js
+++ b/jstests/core/fts_mix.js
@@ -1,5 +1,5 @@
-load( "jstests/libs/fts.js" );
+load("jstests/libs/fts.js");
// test collection
tc = db.text_mix;
@@ -7,151 +7,204 @@ tc.drop();
// creation of collection documents
// content generated using wikipedia random article
-tc.save( { _id: 1, title: "Olivia Shakespear",text: "Olivia Shakespear (born Olivia Tucker; 17 March 1863 – 3 October 1938) was a British novelist, playwright, and patron of the arts. She wrote six books that are described as \"marriage problem\" novels. Her works sold poorly, sometimes only a few hundred copies. Her last novel, Uncle Hilary, is considered her best. She wrote two plays in collaboration with Florence Farr." } );
-tc.save( { _id: 2, title: "Mahim Bora", text: "Mahim Bora (born 1926) is an Indian writer and educationist from Assam state. He was born at a tea estate of Sonitpur district. He is an M.A. in Assamese literature from Gauhati University and had been a teacher in the Nowgong College for most of his teaching career. He has now retired and lives at Nagaon. Bora spent a good part of his childhood in the culture-rich surroundings of rural Nagaon, where the river Kalong was the life-blood of a community. His impressionable mind was to capture a myriad memories of that childhood, later to find expression in his poems, short stories and novels with humour, irony and pathos woven into their texture. When this river was dammed up, its disturbing effect was on the entire community dependant on nature's bounty." } );
-tc.save( { _id: 3, title: "A break away!", text: "A break away! is an 1891 painting by Australian artist Tom Roberts. The painting depicts a mob of thirsty sheep stampeding towards a dam. A drover on horseback is attempting to turn the mob before they drown or crush each other in their desire to drink. The painting, an \"icon of Australian art\", is part of a series of works by Roberts that \"captures what was an emerging spirit of national identity.\" Roberts painted the work at Corowa. The painting depicts a time of drought, with little grass and the soil kicked up as dust. The work itself is a reflection on the pioneering days of the pastoral industry, which were coming to an end by the 1890s." } );
-tc.save( { _id: 4, title: "Linn-Kristin Riegelhuth Koren", text: "Linn-Kristin Riegelhuth Koren (born 1 August 1984, in Ski) is a Norwegian handballer playing for Larvik HK and the Norwegian national team. She is commonly known as Linka. Outside handball she is a qualified nurse." } );
-tc.save( { _id: 5, title: "Morten Jensen", text: "Morten Jensen (born December 2, 1982 in Lynge) is a Danish athlete. He primarily participates in long jump, 100 metres and 200 metres. He competed at the World Championships in 2005 and 2007, the 2006 World Indoor Championships, the 2006 European Championships, the 2007 World Championships and the 2008 Olympic Games without qualifying for the final round. He was runner-up in the 2010 Finnish Elite Games rankings, just missing out to Levern Spencer for that year's jackpot. He holds the Danish record in both long jump and 100 metres. He also holds the Danish indoor record in the 200 metres. He has been a part of the Sparta teamsine 2005, before then he was a part of FIF Hillerd. His coach was Leif Dahlberg after the 2010 European Championships he change to Lars Nielsen and Anders Miller." } );
-tc.save( { _id: 6, title: "Janet Laurence", text: "Janet Laurence (born 1947) is a Sydney based Australian artist who works in mixed media and installation. Her work has been included in major survey exhibitions, nationally and internationally and is regularly exhibited in Sydney, Melbourne and Japan. Her work explores a relationship to the natural world, often from an architectural context. It extends from the gallery space into the urban fabric, and has been realized in many site specific projects, often involving collaborations with architects, landscape architects and environmental scientists. She has received many grants and awards including a Rockefeller Residency in 1997. Laurence was a Trustee of the Art Gallery of NSW from 1995 to 2005. Laurence was the subject of John Beard's winning entry for the 2007 Archibald Prize." } );
-tc.save( { _id: 7, title: "Glen-Coats Baronets", text: "The Glen-Coats Baronetcy, of Ferguslie Park in the Parish of Abbey in the County of Renfrew, was a title in the Baronetage of the United Kingdom. It was created on 25 June 1894 for Thomas Glen-Coats, Director of the thread-making firm of J. & P. Coats, Ltd, and later Liberal Member of Parliament for Renfrewshire West. Born Thomas Coats, he assumed the additional surname of Glen, which was that of his maternal grandfather. He was succeeded by his son, the second Baronet. He won a gold medal in sailing at the 1908 Summer Olympics. The title became extinct on his death in 1954. Two other members of the Coats family also gained distinction. George Coats, 1st Baron Glentanar, was the younger brother of the first Baronet, while Sir James Coats, 1st Baronet (see Coats Baronets), was the first cousin of the first Baronet." } );
-tc.save( { _id: 8, title: "Grapeleaf Skeletonizer", text: "The Grapeleaf Skeletonizer, Harrisina americana is a moth in the family Zygaenidae. It is widespread in the eastern half of the United States, and commonly noticed defoliating grapes, especially of the Virginia creeper (Parthenocissus quinquefolia). The western grapeleaf skeletonizer, Harrisina brillians is very similar to and slightly larger than H. americana, but their distributions are different. Members of this family all produce hydrogen cyanide, a potent antipredator toxin." } );
-tc.save( { _id: 9, title: "Physics World", text: "Physics World is the membership magazine of the Institute of Physics, one of the largest physical societies in the world. It is an international monthly magazine covering all areas of physics, both pure and applied, and is aimed at physicists in research, industry and education worldwide. It was launched in 1988 by IOP Publishing Ltd and has established itself as one of the world's leading physics magazines. The magazine is sent free to members of the Institute of Physics, who can also access a digital edition of the magazine, although selected articles can be read by anyone for free online. It was redesigned in September 2005 and has an audited circulation of just under 35000. The current editor is Matin Durrani. Also on the team are Dens Milne (associate editor), Michael Banks (news editor), Louise Mayor (features editor) and Margaret Harris (reviews and careers editor). Hamish Johnston is the editor of the magazine's website physicsworld.com and James Dacey is its reporter." } );
-tc.save( { _id: 10, title: "Mallacoota, Victoria", text: "Mallacoota is a small town in the East Gippsland region of Victoria, Australia. At the 2006 census, Mallacoota had a population of 972. At holiday times, particularly Easter and Christmas, the population increases by about 8,000. It is one of the most isolated towns in the state of Victoria, 25 kilometres off the Princes Highway and 523 kilometres (325 mi) from Melbourne. It is 526 kilometres (327 mi) from Sydney, New South Wales. It is halfway between Melbourne and Sydney when travelling via Princes Highway, though that is a long route between Australia's two main cities. It is the last official township on Victoria's east coast before the border with New South Wales. Mallacoota has a regional airport (Mallacoota Airport) YMCO (XMC) consisting of a grassed field for private light planes. It is known for its wild flowers, abalone industry, the inlet estuary consisting of Top Lake and Bottom Lake, and Croajingolong National Park that surround it. It is a popular and beautiful holiday spot for boating, fishing, walking the wilderness coast, swimming, birdwatching, and surfing. The Mallacoota Arts Council runs events throughout each year. Mallacoota Inlet is one of the main villages along the wilderness coast walk from NSW to Victoria, Australia." } );
+tc.save({
+ _id: 1,
+ title: "Olivia Shakespear",
+ text:
+ "Olivia Shakespear (born Olivia Tucker; 17 March 1863 – 3 October 1938) was a British novelist, playwright, and patron of the arts. She wrote six books that are described as \"marriage problem\" novels. Her works sold poorly, sometimes only a few hundred copies. Her last novel, Uncle Hilary, is considered her best. She wrote two plays in collaboration with Florence Farr."
+});
+tc.save({
+ _id: 2,
+ title: "Mahim Bora",
+ text:
+ "Mahim Bora (born 1926) is an Indian writer and educationist from Assam state. He was born at a tea estate of Sonitpur district. He is an M.A. in Assamese literature from Gauhati University and had been a teacher in the Nowgong College for most of his teaching career. He has now retired and lives at Nagaon. Bora spent a good part of his childhood in the culture-rich surroundings of rural Nagaon, where the river Kalong was the life-blood of a community. His impressionable mind was to capture a myriad memories of that childhood, later to find expression in his poems, short stories and novels with humour, irony and pathos woven into their texture. When this river was dammed up, its disturbing effect was on the entire community dependant on nature's bounty."
+});
+tc.save({
+ _id: 3,
+ title: "A break away!",
+ text:
+ "A break away! is an 1891 painting by Australian artist Tom Roberts. The painting depicts a mob of thirsty sheep stampeding towards a dam. A drover on horseback is attempting to turn the mob before they drown or crush each other in their desire to drink. The painting, an \"icon of Australian art\", is part of a series of works by Roberts that \"captures what was an emerging spirit of national identity.\" Roberts painted the work at Corowa. The painting depicts a time of drought, with little grass and the soil kicked up as dust. The work itself is a reflection on the pioneering days of the pastoral industry, which were coming to an end by the 1890s."
+});
+tc.save({
+ _id: 4,
+ title: "Linn-Kristin Riegelhuth Koren",
+ text:
+ "Linn-Kristin Riegelhuth Koren (born 1 August 1984, in Ski) is a Norwegian handballer playing for Larvik HK and the Norwegian national team. She is commonly known as Linka. Outside handball she is a qualified nurse."
+});
+tc.save({
+ _id: 5,
+ title: "Morten Jensen",
+ text:
+ "Morten Jensen (born December 2, 1982 in Lynge) is a Danish athlete. He primarily participates in long jump, 100 metres and 200 metres. He competed at the World Championships in 2005 and 2007, the 2006 World Indoor Championships, the 2006 European Championships, the 2007 World Championships and the 2008 Olympic Games without qualifying for the final round. He was runner-up in the 2010 Finnish Elite Games rankings, just missing out to Levern Spencer for that year's jackpot. He holds the Danish record in both long jump and 100 metres. He also holds the Danish indoor record in the 200 metres. He has been a part of the Sparta teamsine 2005, before then he was a part of FIF Hillerd. His coach was Leif Dahlberg after the 2010 European Championships he change to Lars Nielsen and Anders Miller."
+});
+tc.save({
+ _id: 6,
+ title: "Janet Laurence",
+ text:
+ "Janet Laurence (born 1947) is a Sydney based Australian artist who works in mixed media and installation. Her work has been included in major survey exhibitions, nationally and internationally and is regularly exhibited in Sydney, Melbourne and Japan. Her work explores a relationship to the natural world, often from an architectural context. It extends from the gallery space into the urban fabric, and has been realized in many site specific projects, often involving collaborations with architects, landscape architects and environmental scientists. She has received many grants and awards including a Rockefeller Residency in 1997. Laurence was a Trustee of the Art Gallery of NSW from 1995 to 2005. Laurence was the subject of John Beard's winning entry for the 2007 Archibald Prize."
+});
+tc.save({
+ _id: 7,
+ title: "Glen-Coats Baronets",
+ text:
+ "The Glen-Coats Baronetcy, of Ferguslie Park in the Parish of Abbey in the County of Renfrew, was a title in the Baronetage of the United Kingdom. It was created on 25 June 1894 for Thomas Glen-Coats, Director of the thread-making firm of J. & P. Coats, Ltd, and later Liberal Member of Parliament for Renfrewshire West. Born Thomas Coats, he assumed the additional surname of Glen, which was that of his maternal grandfather. He was succeeded by his son, the second Baronet. He won a gold medal in sailing at the 1908 Summer Olympics. The title became extinct on his death in 1954. Two other members of the Coats family also gained distinction. George Coats, 1st Baron Glentanar, was the younger brother of the first Baronet, while Sir James Coats, 1st Baronet (see Coats Baronets), was the first cousin of the first Baronet."
+});
+tc.save({
+ _id: 8,
+ title: "Grapeleaf Skeletonizer",
+ text:
+ "The Grapeleaf Skeletonizer, Harrisina americana is a moth in the family Zygaenidae. It is widespread in the eastern half of the United States, and commonly noticed defoliating grapes, especially of the Virginia creeper (Parthenocissus quinquefolia). The western grapeleaf skeletonizer, Harrisina brillians is very similar to and slightly larger than H. americana, but their distributions are different. Members of this family all produce hydrogen cyanide, a potent antipredator toxin."
+});
+tc.save({
+ _id: 9,
+ title: "Physics World",
+ text:
+ "Physics World is the membership magazine of the Institute of Physics, one of the largest physical societies in the world. It is an international monthly magazine covering all areas of physics, both pure and applied, and is aimed at physicists in research, industry and education worldwide. It was launched in 1988 by IOP Publishing Ltd and has established itself as one of the world's leading physics magazines. The magazine is sent free to members of the Institute of Physics, who can also access a digital edition of the magazine, although selected articles can be read by anyone for free online. It was redesigned in September 2005 and has an audited circulation of just under 35000. The current editor is Matin Durrani. Also on the team are Dens Milne (associate editor), Michael Banks (news editor), Louise Mayor (features editor) and Margaret Harris (reviews and careers editor). Hamish Johnston is the editor of the magazine's website physicsworld.com and James Dacey is its reporter."
+});
+tc.save({
+ _id: 10,
+ title: "Mallacoota, Victoria",
+ text:
+ "Mallacoota is a small town in the East Gippsland region of Victoria, Australia. At the 2006 census, Mallacoota had a population of 972. At holiday times, particularly Easter and Christmas, the population increases by about 8,000. It is one of the most isolated towns in the state of Victoria, 25 kilometres off the Princes Highway and 523 kilometres (325 mi) from Melbourne. It is 526 kilometres (327 mi) from Sydney, New South Wales. It is halfway between Melbourne and Sydney when travelling via Princes Highway, though that is a long route between Australia's two main cities. It is the last official township on Victoria's east coast before the border with New South Wales. Mallacoota has a regional airport (Mallacoota Airport) YMCO (XMC) consisting of a grassed field for private light planes. It is known for its wild flowers, abalone industry, the inlet estuary consisting of Top Lake and Bottom Lake, and Croajingolong National Park that surround it. It is a popular and beautiful holiday spot for boating, fishing, walking the wilderness coast, swimming, birdwatching, and surfing. The Mallacoota Arts Council runs events throughout each year. Mallacoota Inlet is one of the main villages along the wilderness coast walk from NSW to Victoria, Australia."
+});
// begin tests
// -------------------------------------------- INDEXING & WEIGHTING -------------------------------
// start with basic index, one item with default weight
-tc.ensureIndex( { "title": "text" } );
+tc.ensureIndex({"title": "text"});
// test the single result case..
-res = tc.find( { "$text": { "$search": "Victoria" } } );
-assert.eq( 1, res.length() );
-assert.eq( 10, res[0]._id );
+res = tc.find({"$text": {"$search": "Victoria"}});
+assert.eq(1, res.length());
+assert.eq(10, res[0]._id);
tc.dropIndexes();
// now let's see about multiple fields, with specific weighting
-tc.ensureIndex( { "title": "text", "text": "text" }, { weights: { "title": 10 } } );
-assert.eq( [9,7,8], queryIDS( tc, "members physics" ) );
+tc.ensureIndex({"title": "text", "text": "text"}, {weights: {"title": 10}});
+assert.eq([9, 7, 8], queryIDS(tc, "members physics"));
tc.dropIndexes();
// test all-1 weighting with "$**"
-tc.ensureIndex( { "$**": "text" } );
-assert.eq( [2,8,7], queryIDS( tc, "family tea estate" ) );
+tc.ensureIndex({"$**": "text"});
+assert.eq([2, 8, 7], queryIDS(tc, "family tea estate"));
tc.dropIndexes();
// non-1 weight on "$**" + other weight specified for some field
-tc.ensureIndex( { "$**": "text" }, { weights: { "$**": 10, "text": 2 } } );
-assert.eq( [7,5], queryIDS( tc, "Olympic Games gold medal" ) );
+tc.ensureIndex({"$**": "text"}, {weights: {"$**": 10, "text": 2}});
+assert.eq([7, 5], queryIDS(tc, "Olympic Games gold medal"));
tc.dropIndexes();
-// -------------------------------------------- "search"ING ------------------------------------------
+// -------------------------------------------- "search"ING
+// ------------------------------------------
// go back to "$**": 1, "title": 10.. and test more specific "search" functionality!
-tc.ensureIndex( { "$**": "text" }, { weights: { "title": 10 } } );
+tc.ensureIndex({"$**": "text"}, {weights: {"title": 10}});
// -------------------------------------------- STEMMING -------------------------------------------
// tests stemming for basic plural case
-res = tc.find( { "$text": { "$search": "member" } } );
-res2 = tc.find( { "$text": { "$search": "members" } } );
-assert.eq( getIDS( res ), getIDS( res2 ) );
+res = tc.find({"$text": {"$search": "member"}});
+res2 = tc.find({"$text": {"$search": "members"}});
+assert.eq(getIDS(res), getIDS(res2));
// "search" for something with potential 's bug.
-res = tc.find( { "$text": { "$search": "magazine's" } } );
-res2 = tc.find( { "$text": { "$search": "magazine" } } );
-assert.eq( getIDS( res ), getIDS( res2 ) );
+res = tc.find({"$text": {"$search": "magazine's"}});
+res2 = tc.find({"$text": {"$search": "magazine"}});
+assert.eq(getIDS(res), getIDS(res2));
// -------------------------------------------- LANGUAGE -------------------------------------------
-assert.throws(tc.find( { "$text": { "$search": "member", $language: "spanglish" } } ));
-assert.doesNotThrow(function() {tc.find( { "$text": { "$search": "member", $language: "english" } });} );
+assert.throws(tc.find({"$text": {"$search": "member", $language: "spanglish"}}));
+assert.doesNotThrow(function() {
+ tc.find({"$text": {"$search": "member", $language: "english"}});
+});
// -------------------------------------------- LIMIT RESULTS --------------------------------------
// ensure limit limits results
-assert.eq( [2], queryIDS( tc, "rural river dam", null, null, 1) );
+assert.eq([2], queryIDS(tc, "rural river dam", null, null, 1));
// ensure top results are the same regardless of limit
// make sure that this uses a case where it wouldn't be otherwise..
-res = tc.find( { "$text": { "$search": "united kingdom british princes" }}).limit(1);
-res2 = tc.find( { "$text": { "$search": "united kingdom british princes" } } );
-assert.eq( 1, res.length() );
-assert.eq( 4, res2.length() );
-assert.eq( res[0]._id, res2[0]._id );
+res = tc.find({"$text": {"$search": "united kingdom british princes"}}).limit(1);
+res2 = tc.find({"$text": {"$search": "united kingdom british princes"}});
+assert.eq(1, res.length());
+assert.eq(4, res2.length());
+assert.eq(res[0]._id, res2[0]._id);
// -------------------------------------------- PROJECTION -----------------------------------------
// test projection.. show just title and id
-res = tc.find( { "$text": { "$search": "Morten Jensen" }}, { title: 1 } );
-assert.eq( 1, res.length() );
-assert.eq( 5, res[0]._id );
-assert.eq( null, res[0].text );
-assert.neq( null, res[0].title );
-assert.neq( null, res[0]._id );
+res = tc.find({"$text": {"$search": "Morten Jensen"}}, {title: 1});
+assert.eq(1, res.length());
+assert.eq(5, res[0]._id);
+assert.eq(null, res[0].text);
+assert.neq(null, res[0].title);
+assert.neq(null, res[0]._id);
// test negative projection, ie. show everything but text
-res = tc.find( { "$text": { "$search": "handball" }}, { text: 0 } );
-assert.eq( 1, res.length() );
-assert.eq( 4, res[0]._id );
-assert.eq( null, res[0].text );
-assert.neq( null, res[0].title );
-assert.neq( null, res[0]._id );
+res = tc.find({"$text": {"$search": "handball"}}, {text: 0});
+assert.eq(1, res.length());
+assert.eq(4, res[0]._id);
+assert.eq(null, res[0].text);
+assert.neq(null, res[0].title);
+assert.neq(null, res[0]._id);
// test projection only title, no id
-res = tc.find( { "$text": { "$search": "Mahim Bora" }}, { _id: 0, title: 1 } );
-assert.eq( 1, res.length() );
-assert.eq( "Mahim Bora", res[0].title );
-assert.eq( null, res[0].text );
-assert.neq( null, res[0].title );
-assert.eq( null, res[0]._id );
+res = tc.find({"$text": {"$search": "Mahim Bora"}}, {_id: 0, title: 1});
+assert.eq(1, res.length());
+assert.eq("Mahim Bora", res[0].title);
+assert.eq(null, res[0].text);
+assert.neq(null, res[0].title);
+assert.eq(null, res[0]._id);
// -------------------------------------------- NEGATION -------------------------------------------
// test negation
-assert.eq( [8], queryIDS( tc, "United -Kingdom" ) );
-assert.eq( -1, tc.findOne( { _id : 8 } ).text.search(/Kingdom/i) );
+assert.eq([8], queryIDS(tc, "United -Kingdom"));
+assert.eq(-1, tc.findOne({_id: 8}).text.search(/Kingdom/i));
// test negation edge cases... hyphens, double dash, etc.
-assert.eq( [4], queryIDS( tc, "Linn-Kristin" ) );
+assert.eq([4], queryIDS(tc, "Linn-Kristin"));
// -------------------------------------------- PHRASE MATCHING ------------------------------------
// test exact phrase matching on
-assert.eq( [7], queryIDS( tc, "\"Summer Olympics\"" ) );
-assert.neq( -1, tc.findOne( { _id: 7 } ).text.indexOf("Summer Olympics") );
+assert.eq([7], queryIDS(tc, "\"Summer Olympics\""));
+assert.neq(-1, tc.findOne({_id: 7}).text.indexOf("Summer Olympics"));
// phrasematch with other stuff.. negation, other terms, etc.
-assert.eq( [10], queryIDS( tc, "\"wild flowers\" Sydney" ) );
+assert.eq([10], queryIDS(tc, "\"wild flowers\" Sydney"));
-assert.eq( [3], queryIDS( tc, "\"industry\" -Melbourne -Physics" ) );
+assert.eq([3], queryIDS(tc, "\"industry\" -Melbourne -Physics"));
// -------------------------------------------- EDGE CASES -----------------------------------------
// test empty string
-res = tc.find( { "$text": { "$search": "" } } );
-assert.eq( 0, res.length() );
+res = tc.find({"$text": {"$search": ""}});
+assert.eq(0, res.length());
// test string with a space in it
-res = tc.find( { "$text": { "$search": " " } } );
-assert.eq( 0, res.length() );
+res = tc.find({"$text": {"$search": " "}});
+assert.eq(0, res.length());
// -------------------------------------------- FILTERING ------------------------------------------
-assert.eq( [2], queryIDS( tc, "Mahim" ) );
-assert.eq( [2], queryIDS( tc, "Mahim", { _id: 2 } ) );
-assert.eq( [], queryIDS( tc, "Mahim", { _id: 1 } ) );
-assert.eq( [], queryIDS( tc, "Mahim", { _id: { $gte: 4 } } ) );
-assert.eq( [2], queryIDS( tc, "Mahim", { _id: { $lte: 4 } } ) );
+assert.eq([2], queryIDS(tc, "Mahim"));
+assert.eq([2], queryIDS(tc, "Mahim", {_id: 2}));
+assert.eq([], queryIDS(tc, "Mahim", {_id: 1}));
+assert.eq([], queryIDS(tc, "Mahim", {_id: {$gte: 4}}));
+assert.eq([2], queryIDS(tc, "Mahim", {_id: {$lte: 4}}));
// using regex conditional filtering
-assert.eq( [9], queryIDS( tc, "members", { title: { $regex: /Phy.*/i } } ) );
+assert.eq([9], queryIDS(tc, "members", {title: {$regex: /Phy.*/i}}));
// -------------------------------------------------------------------------------------------------
-assert( tc.validate().valid );
+assert(tc.validate().valid);
diff --git a/jstests/core/fts_partition1.js b/jstests/core/fts_partition1.js
index 52874f6628b..fc32507f430 100644
--- a/jstests/core/fts_partition1.js
+++ b/jstests/core/fts_partition1.js
@@ -1,22 +1,23 @@
-load( "jstests/libs/fts.js" );
+load("jstests/libs/fts.js");
t = db.text_parition1;
t.drop();
-t.insert( { _id : 1 , x : 1 , y : "foo" } );
-t.insert( { _id : 2 , x : 1 , y : "bar" } );
-t.insert( { _id : 3 , x : 2 , y : "foo" } );
-t.insert( { _id : 4 , x : 2 , y : "bar" } );
+t.insert({_id: 1, x: 1, y: "foo"});
+t.insert({_id: 2, x: 1, y: "bar"});
+t.insert({_id: 3, x: 2, y: "foo"});
+t.insert({_id: 4, x: 2, y: "bar"});
-t.ensureIndex( { x : 1, y : "text" } );
+t.ensureIndex({x: 1, y: "text"});
-assert.throws(t.find( { "$text": { "$search" : "foo" } } ));
+assert.throws(t.find({"$text": {"$search": "foo"}}));
-assert.eq( [ 1 ], queryIDS( t, "foo" , { x : 1 } ) );
+assert.eq([1], queryIDS(t, "foo", {x: 1}));
-res = t.find( { "$text": { "$search" : "foo" }, x : 1 }, { score: { "$meta" : "textScore" } } );
-assert( res[0].score > 0, tojson(res.toArray()));
+res = t.find({"$text": {"$search": "foo"}, x: 1}, {score: {"$meta": "textScore"}});
+assert(res[0].score > 0, tojson(res.toArray()));
// repeat "search" with "language" specified, SERVER-8999
-res = t.find( { "$text": { "$search" : "foo" , "$language" : "english" }, x : 1 }, { score: { "$meta" : "textScore" } } );
-assert( res[0].score > 0, tojson(res.toArray()));
+res = t.find({"$text": {"$search": "foo", "$language": "english"}, x: 1},
+ {score: {"$meta": "textScore"}});
+assert(res[0].score > 0, tojson(res.toArray()));
diff --git a/jstests/core/fts_partition_no_multikey.js b/jstests/core/fts_partition_no_multikey.js
index f77dc053f85..4c249522c30 100644
--- a/jstests/core/fts_partition_no_multikey.js
+++ b/jstests/core/fts_partition_no_multikey.js
@@ -2,12 +2,12 @@
t = db.fts_partition_no_multikey;
t.drop();
-t.ensureIndex( { x : 1, y : "text" } );
+t.ensureIndex({x: 1, y: "text"});
-assert.writeOK( t.insert( { x : 5 , y : "this is fun" } ));
+assert.writeOK(t.insert({x: 5, y: "this is fun"}));
-assert.writeError( t.insert( { x : [] , y : "this is fun" } ));
+assert.writeError(t.insert({x: [], y: "this is fun"}));
-assert.writeError( t.insert( { x : [1] , y : "this is fun" } ));
+assert.writeError(t.insert({x: [1], y: "this is fun"}));
-assert.writeError( t.insert( { x : [1,2] , y : "this is fun" } ));
+assert.writeError(t.insert({x: [1, 2], y: "this is fun"}));
diff --git a/jstests/core/fts_phrase.js b/jstests/core/fts_phrase.js
index 471fedbfb42..d36df8aaeb0 100644
--- a/jstests/core/fts_phrase.js
+++ b/jstests/core/fts_phrase.js
@@ -2,24 +2,20 @@
t = db.text_phrase;
t.drop();
-t.save( { _id : 1 , title : "my blog post" , text : "i am writing a blog. yay" } );
-t.save( { _id : 2 , title : "my 2nd post" , text : "this is a new blog i am typing. yay" } );
-t.save( { _id : 3 , title : "knives are Fun" , text : "this is a new blog i am writing. yay" } );
-
-t.ensureIndex( { "title" : "text" , text : "text" } , { weights : { title : 10 } } );
-
-res = t.find( { "$text" : { "$search" : "blog write" } }, { score: { "$meta" : "textScore" } } ).sort( { score: { "$meta" : "textScore" } });
-assert.eq( 3, res.length() );
-assert.eq( 1, res[0]._id );
-assert( res[0].score > (res[1].score*2), tojson(res.toArray()));
-
-res = t.find( { "$text" : { "$search" : "write blog" } }, { score: { "$meta" : "textScore" } } ).sort( { score: { "$meta" : "textScore" } });
-assert.eq( 3, res.length() );
-assert.eq( 1, res[0]._id );
-assert( res[0].score > (res[1].score*2), tojson(res.toArray()));
-
-
-
-
-
-
+t.save({_id: 1, title: "my blog post", text: "i am writing a blog. yay"});
+t.save({_id: 2, title: "my 2nd post", text: "this is a new blog i am typing. yay"});
+t.save({_id: 3, title: "knives are Fun", text: "this is a new blog i am writing. yay"});
+
+t.ensureIndex({"title": "text", text: "text"}, {weights: {title: 10}});
+
+res = t.find({"$text": {"$search": "blog write"}}, {score: {"$meta": "textScore"}})
+ .sort({score: {"$meta": "textScore"}});
+assert.eq(3, res.length());
+assert.eq(1, res[0]._id);
+assert(res[0].score > (res[1].score * 2), tojson(res.toArray()));
+
+res = t.find({"$text": {"$search": "write blog"}}, {score: {"$meta": "textScore"}})
+ .sort({score: {"$meta": "textScore"}});
+assert.eq(3, res.length());
+assert.eq(1, res[0]._id);
+assert(res[0].score > (res[1].score * 2), tojson(res.toArray()));
diff --git a/jstests/core/fts_proj.js b/jstests/core/fts_proj.js
index ecd60e83a65..b59c02cc293 100644
--- a/jstests/core/fts_proj.js
+++ b/jstests/core/fts_proj.js
@@ -1,20 +1,16 @@
t = db.text_proj;
t.drop();
-t.save( { _id : 1 , x : "a", y: "b", z : "c"});
-t.save( { _id : 2 , x : "d", y: "e", z : "f"});
-t.save( { _id : 3 , x : "a", y: "g", z : "h"});
-
-t.ensureIndex( { x : "text"} , { default_language : "none" } );
-
-res = t.find( { "$text": {"$search" : "a"}} );
-assert.eq( 2, res.length() );
-assert( res[0].y, tojson(res.toArray()));
-
-res = t.find( { "$text": {"$search" : "a"}}, {x: 1} );
-assert.eq( 2, res.length() );
-assert( !res[0].y, tojson(res.toArray()));
-
+t.save({_id: 1, x: "a", y: "b", z: "c"});
+t.save({_id: 2, x: "d", y: "e", z: "f"});
+t.save({_id: 3, x: "a", y: "g", z: "h"});
+t.ensureIndex({x: "text"}, {default_language: "none"});
+res = t.find({"$text": {"$search": "a"}});
+assert.eq(2, res.length());
+assert(res[0].y, tojson(res.toArray()));
+res = t.find({"$text": {"$search": "a"}}, {x: 1});
+assert.eq(2, res.length());
+assert(!res[0].y, tojson(res.toArray()));
diff --git a/jstests/core/fts_projection.js b/jstests/core/fts_projection.js
index 60bb445a7b3..50fe4755fc3 100644
--- a/jstests/core/fts_projection.js
+++ b/jstests/core/fts_projection.js
@@ -8,10 +8,11 @@ db.adminCommand({setParameter: 1, newQueryFrameworkEnabled: true});
t.insert({_id: 0, a: "textual content"});
t.insert({_id: 1, a: "additional content", b: -1});
t.insert({_id: 2, a: "irrelevant content"});
-t.ensureIndex({a:"text"});
+t.ensureIndex({a: "text"});
// Project the text score.
-var results = t.find({$text: {$search: "textual content -irrelevant"}}, {_idCopy:0, score:{$meta: "textScore"}}).toArray();
+var results = t.find({$text: {$search: "textual content -irrelevant"}},
+ {_idCopy: 0, score: {$meta: "textScore"}}).toArray();
// printjson(results);
// Scores should exist.
assert.eq(results.length, 2);
@@ -28,7 +29,8 @@ scores[results[1]._id] = results[1].score;
//
// Project text score into 2 fields.
-results = t.find({$text: {$search: "textual content -irrelevant"}}, {otherScore: {$meta: "textScore"}, score:{$meta: "textScore"}}).toArray();
+results = t.find({$text: {$search: "textual content -irrelevant"}},
+ {otherScore: {$meta: "textScore"}, score: {$meta: "textScore"}}).toArray();
assert.eq(2, results.length);
for (var i = 0; i < results.length; ++i) {
assert.close(scores[results[i]._id], results[i].score);
@@ -38,18 +40,22 @@ for (var i = 0; i < results.length; ++i) {
// printjson(results);
// Project text score into "x.$" shouldn't crash
-assert.throws(function() { t.find({$text: {$search: "textual content -irrelevant"}}, {'x.$': {$meta: "textScore"}}).toArray(); });
+assert.throws(function() {
+ t.find({$text: {$search: "textual content -irrelevant"}}, {'x.$': {$meta: "textScore"}})
+ .toArray();
+});
// TODO: We can't project 'x.y':1 and 'x':1 (yet).
// Clobber an existing field and behave nicely.
-results = t.find({$text: {$search: "textual content -irrelevant"}},
- {b: {$meta: "textScore"}}).toArray();
+results =
+ t.find({$text: {$search: "textual content -irrelevant"}}, {b: {$meta: "textScore"}}).toArray();
assert.eq(2, results.length);
for (var i = 0; i < results.length; ++i) {
- assert.close(scores[results[i]._id], results[i].b,
- i + ': existing field in ' + tojson(results[i], '', true) +
- ' is not clobbered with score');
+ assert.close(
+ scores[results[i]._id],
+ results[i].b,
+ i + ': existing field in ' + tojson(results[i], '', true) + ' is not clobbered with score');
}
assert.neq(-1, results[0].b);
@@ -59,35 +65,40 @@ var results = t.find({a: /text/}, {score: {$meta: "textScore"}}).toArray();
// printjson(results);
// No textScore proj. with nested fields
-assert.throws(function() { t.find({$text: {$search: "blah"}}, {'x.y':{$meta: "textScore"}}).toArray(); });
+assert.throws(function() {
+ t.find({$text: {$search: "blah"}}, {'x.y': {$meta: "textScore"}}).toArray();
+});
// SERVER-12173
// When $text operator is in $or, should evaluate first
results = t.find({$or: [{$text: {$search: "textual content -irrelevant"}}, {_id: 1}]},
- {_idCopy:0, score:{$meta: "textScore"}}).toArray();
+ {_idCopy: 0, score: {$meta: "textScore"}}).toArray();
printjson(results);
assert.eq(2, results.length);
for (var i = 0; i < results.length; ++i) {
- assert.close(scores[results[i]._id], results[i].score,
+ assert.close(scores[results[i]._id],
+ results[i].score,
i + ': TEXT under OR invalid score: ' + tojson(results[i], '', true));
}
// SERVER-12592
-// When $text operator is in $or, all non-$text children must be indexed. Otherwise, we should produce
+// When $text operator is in $or, all non-$text children must be indexed. Otherwise, we should
+// produce
// a readable error.
var errorMessage = '';
-assert.throws( function() {
+assert.throws(function() {
try {
t.find({$or: [{$text: {$search: "textual content -irrelevant"}}, {b: 1}]}).itcount();
- }
- catch (e) {
+ } catch (e) {
errorMessage = e;
throw e;
}
}, null, 'Expected error from failed TEXT under OR planning');
-assert.neq(-1, errorMessage.message.indexOf('TEXT'),
+assert.neq(-1,
+ errorMessage.message.indexOf('TEXT'),
'message from failed text planning does not mention TEXT: ' + errorMessage);
-assert.neq(-1, errorMessage.message.indexOf('OR'),
+assert.neq(-1,
+ errorMessage.message.indexOf('OR'),
'message from failed text planning does not mention OR: ' + errorMessage);
// Scores should exist.
@@ -96,4 +107,3 @@ assert(results[0].score,
"invalid text score for " + tojson(results[0], '', true) + " when $text is in $or");
assert(results[1].score,
"invalid text score for " + tojson(results[0], '', true) + " when $text is in $or");
-
diff --git a/jstests/core/fts_querylang.js b/jstests/core/fts_querylang.js
index 4685b6fa550..2b13485699e 100644
--- a/jstests/core/fts_querylang.js
+++ b/jstests/core/fts_querylang.js
@@ -23,18 +23,25 @@ assert.neq(results[0]._id, 2);
assert.neq(results[1]._id, 2);
// Test sort with basic text query.
-results = t.find({$text: {$search: "textual content -irrelevant"}}).sort({unindexedField: 1}).toArray();
+results =
+ t.find({$text: {$search: "textual content -irrelevant"}}).sort({unindexedField: 1}).toArray();
assert.eq(results.length, 2);
assert.eq(results[0]._id, 0);
assert.eq(results[1]._id, 1);
// Test skip with basic text query.
-results = t.find({$text: {$search: "textual content -irrelevant"}}).sort({unindexedField: 1}).skip(1).toArray();
+results = t.find({$text: {$search: "textual content -irrelevant"}})
+ .sort({unindexedField: 1})
+ .skip(1)
+ .toArray();
assert.eq(results.length, 1);
assert.eq(results[0]._id, 1);
// Test limit with basic text query.
-results = t.find({$text: {$search: "textual content -irrelevant"}}).sort({unindexedField: 1}).limit(1).toArray();
+results = t.find({$text: {$search: "textual content -irrelevant"}})
+ .sort({unindexedField: 1})
+ .limit(1)
+ .toArray();
assert.eq(results.length, 1);
assert.eq(results[0]._id, 0);
@@ -44,19 +51,17 @@ assert.eq(results[0]._id, 0);
// framework.
// Test $and of basic text query with indexed expression.
-results = t.find({$text: {$search: "content -irrelevant"},
- _id: 1}).toArray();
+results = t.find({$text: {$search: "content -irrelevant"}, _id: 1}).toArray();
assert.eq(results.length, 1);
assert.eq(results[0]._id, 1);
// Test $and of basic text query with indexed expression, and bad language
assert.throws(function() {
- t.find({$text: {$search: "content -irrelevant", $language: "spanglish"}, _id: 1})
- .itcount();});
+ t.find({$text: {$search: "content -irrelevant", $language: "spanglish"}, _id: 1}).itcount();
+});
// Test $and of basic text query with unindexed expression.
-results = t.find({$text: {$search: "content -irrelevant"},
- unindexedField: 1}).toArray();
+results = t.find({$text: {$search: "content -irrelevant"}, unindexedField: 1}).toArray();
assert.eq(results.length, 1);
assert.eq(results[0]._id, 1);
@@ -70,14 +75,15 @@ cursor = t.find({$text: {$search: "contents", $language: "EN"}});
assert.eq(true, cursor.hasNext());
cursor = t.find({$text: {$search: "contents", $language: "spanglish"}});
-assert.throws(function() { cursor.next(); });
+assert.throws(function() {
+ cursor.next();
+});
// TODO Test $and of basic text query with geo expression.
// Test update with $text.
t.update({$text: {$search: "textual content -irrelevant"}}, {$set: {b: 1}}, {multi: true});
-assert.eq(2, t.find({b: 1}).itcount(),
- 'incorrect number of documents updated');
+assert.eq(2, t.find({b: 1}).itcount(), 'incorrect number of documents updated');
// TODO Test remove with $text, once it is enabled with the new query framework.
diff --git a/jstests/core/fts_score_sort.js b/jstests/core/fts_score_sort.js
index 59fb852a774..3ca22fe947d 100644
--- a/jstests/core/fts_score_sort.js
+++ b/jstests/core/fts_score_sort.js
@@ -8,10 +8,13 @@ db.adminCommand({setParameter: 1, newQueryFrameworkEnabled: true});
t.insert({_id: 0, a: "textual content"});
t.insert({_id: 1, a: "additional content"});
t.insert({_id: 2, a: "irrelevant content"});
-t.ensureIndex({a:"text"});
+t.ensureIndex({a: "text"});
// Sort by the text score.
-var results = t.find({$text: {$search: "textual content -irrelevant"}}, {score: {$meta: "textScore"}}).sort({score: {$meta: "textScore"}}).toArray();
+var results =
+ t.find({$text: {$search: "textual content -irrelevant"}}, {score: {$meta: "textScore"}})
+ .sort({score: {$meta: "textScore"}})
+ .toArray();
// printjson(results);
assert.eq(results.length, 2);
assert.eq(results[0]._id, 0);
@@ -19,7 +22,10 @@ assert.eq(results[1]._id, 1);
assert(results[0].score > results[1].score);
// Sort by {_id descending, score} and verify the order is right.
-var results = t.find({$text: {$search: "textual content -irrelevant"}}, {score: {$meta: "textScore"}}).sort({_id: -1, score: {$meta: "textScore"}}).toArray();
+var results =
+ t.find({$text: {$search: "textual content -irrelevant"}}, {score: {$meta: "textScore"}})
+ .sort({_id: -1, score: {$meta: "textScore"}})
+ .toArray();
printjson(results);
assert.eq(results.length, 2);
assert.eq(results[0]._id, 1);
diff --git a/jstests/core/fts_spanish.js b/jstests/core/fts_spanish.js
index 7c8ccecd577..74d71cceddf 100644
--- a/jstests/core/fts_spanish.js
+++ b/jstests/core/fts_spanish.js
@@ -1,30 +1,29 @@
-load( "jstests/libs/fts.js" );
+load("jstests/libs/fts.js");
t = db.text_spanish;
t.drop();
-t.save( { _id: 1, title: "mi blog", text: "Este es un blog de prueba" } );
-t.save( { _id: 2, title: "mi segundo post", text: "Este es un blog de prueba" } );
-t.save( { _id: 3, title: "cuchillos son divertidos", text: "este es mi tercer blog stemmed" } );
-t.save( { _id: 4, language: "en", title: "My fourth blog", text: "This stemmed blog is in english" } );
+t.save({_id: 1, title: "mi blog", text: "Este es un blog de prueba"});
+t.save({_id: 2, title: "mi segundo post", text: "Este es un blog de prueba"});
+t.save({_id: 3, title: "cuchillos son divertidos", text: "este es mi tercer blog stemmed"});
+t.save({_id: 4, language: "en", title: "My fourth blog", text: "This stemmed blog is in english"});
// default weight is 1
// specify weights if you want a field to be more meaningull
-t.ensureIndex( { "title": "text", text: "text" }, { weights: { title: 10 },
- default_language: "es" } );
+t.ensureIndex({"title": "text", text: "text"}, {weights: {title: 10}, default_language: "es"});
-res = t.find( { "$text" : { "$search" : "blog" } } );
-assert.eq( 4, res.length() );
+res = t.find({"$text": {"$search": "blog"}});
+assert.eq(4, res.length());
-assert.eq( [4], queryIDS( t, "stem" ) );
-assert.eq( [3], queryIDS( t, "stemmed" ) );
-assert.eq( [4], queryIDS( t, "stemmed", null, { "$language" : "en" } ) );
+assert.eq([4], queryIDS(t, "stem"));
+assert.eq([3], queryIDS(t, "stemmed"));
+assert.eq([4], queryIDS(t, "stemmed", null, {"$language": "en"}));
-assert.eq( [1,2], queryIDS( t, "prueba" ) );
+assert.eq([1, 2], queryIDS(t, "prueba"));
-assert.writeError( t.save( { _id: 5, language: "spanglish", title: "", text: "" } ));
+assert.writeError(t.save({_id: 5, language: "spanglish", title: "", text: ""}));
t.dropIndexes();
-res = t.ensureIndex( { "title": "text", text: "text" }, { default_language: "spanglish" } );
+res = t.ensureIndex({"title": "text", text: "text"}, {default_language: "spanglish"});
assert.neq(null, res);
diff --git a/jstests/core/geo1.js b/jstests/core/geo1.js
index e1dc23fe153..724ae31a3ce 100644
--- a/jstests/core/geo1.js
+++ b/jstests/core/geo1.js
@@ -2,36 +2,39 @@
t = db.geo1;
t.drop();
-idx = { loc : "2d" , zip : 1 };
+idx = {
+ loc: "2d",
+ zip: 1
+};
-t.insert( { zip : "06525" , loc : [ 41.352964 , 73.01212 ] } );
-t.insert( { zip : "10024" , loc : [ 40.786387 , 73.97709 ] } );
-assert.writeOK( t.insert( { zip : "94061" , loc : [ 37.463911 , 122.23396 ] } ));
+t.insert({zip: "06525", loc: [41.352964, 73.01212]});
+t.insert({zip: "10024", loc: [40.786387, 73.97709]});
+assert.writeOK(t.insert({zip: "94061", loc: [37.463911, 122.23396]}));
// test "2d" has to be first
-assert.eq( 1 , t.getIndexKeys().length , "S1" );
-t.ensureIndex( { zip : 1 , loc : "2d" } );
-assert.eq( 1 , t.getIndexKeys().length , "S2" );
+assert.eq(1, t.getIndexKeys().length, "S1");
+t.ensureIndex({zip: 1, loc: "2d"});
+assert.eq(1, t.getIndexKeys().length, "S2");
-t.ensureIndex( idx );
-assert.eq( 2 , t.getIndexKeys().length , "S3" );
+t.ensureIndex(idx);
+assert.eq(2, t.getIndexKeys().length, "S3");
-assert.eq( 3 , t.count() , "B1" );
-assert.writeError( t.insert( { loc : [ 200 , 200 ] } ));
-assert.eq( 3 , t.count() , "B3" );
+assert.eq(3, t.count(), "B1");
+assert.writeError(t.insert({loc: [200, 200]}));
+assert.eq(3, t.count(), "B3");
// test normal access
-wb = t.findOne( { zip : "06525" } );
-assert( wb , "C1" );
+wb = t.findOne({zip: "06525"});
+assert(wb, "C1");
-assert.eq( "06525" , t.find( { loc : wb.loc } ).hint( { "$natural" : 1 } )[0].zip , "C2" );
-assert.eq( "06525" , t.find( { loc : wb.loc } )[0].zip , "C3" );
+assert.eq("06525", t.find({loc: wb.loc}).hint({"$natural": 1})[0].zip, "C2");
+assert.eq("06525", t.find({loc: wb.loc})[0].zip, "C3");
// assert.eq( 1 , t.find( { loc : wb.loc } ).explain().nscanned , "C4" )
// test config options
t.drop();
-t.ensureIndex( { loc : "2d" } , { min : -500 , max : 500 , bits : 4 } );
-assert.writeOK( t.insert( { loc : [ 200 , 200 ] } ));
+t.ensureIndex({loc: "2d"}, {min: -500, max: 500, bits: 4});
+assert.writeOK(t.insert({loc: [200, 200]}));
diff --git a/jstests/core/geo10.js b/jstests/core/geo10.js
index 5c26fbb3609..10879fc5d80 100644
--- a/jstests/core/geo10.js
+++ b/jstests/core/geo10.js
@@ -3,11 +3,14 @@
coll = db.geo10;
coll.drop();
-assert.commandWorked( db.geo10.ensureIndex( { c : '2d', t : 1 }, { min : 0, max : Math.pow( 2, 40 ) } ));
-assert.eq( 2, db.geo10.getIndexes().length, "A3" );
+assert.commandWorked(db.geo10.ensureIndex({c: '2d', t: 1}, {min: 0, max: Math.pow(2, 40)}));
+assert.eq(2, db.geo10.getIndexes().length, "A3");
-assert.writeOK( db.geo10.insert( { c : [ 1, 1 ], t : 1 } ));
-assert.writeOK( db.geo10.insert( { c : [ 3600, 3600 ], t : 1 } ));
-assert.writeOK( db.geo10.insert( { c : [ 0.001, 0.001 ], t : 1 } ));
+assert.writeOK(db.geo10.insert({c: [1, 1], t: 1}));
+assert.writeOK(db.geo10.insert({c: [3600, 3600], t: 1}));
+assert.writeOK(db.geo10.insert({c: [0.001, 0.001], t: 1}));
-printjson( db.geo10.find({ c : { $within : { $box : [[0.001, 0.001], [Math.pow(2, 40) - 0.001, Math.pow(2, 40) - 0.001]] } }, t : 1 }).toArray() );
+printjson(db.geo10.find({
+ c: {$within: {$box: [[0.001, 0.001], [Math.pow(2, 40) - 0.001, Math.pow(2, 40) - 0.001]]}},
+ t: 1
+}).toArray());
diff --git a/jstests/core/geo2.js b/jstests/core/geo2.js
index 4317d044f76..0b7e91c18bc 100644
--- a/jstests/core/geo2.js
+++ b/jstests/core/geo2.js
@@ -4,42 +4,40 @@ t.drop();
n = 1;
arr = [];
-for ( var x=-100; x<100; x+=2 ){
- for ( var y=-100; y<100; y+=2 ){
- arr.push( { _id : n++ , loc : [ x , y ] } );
+for (var x = -100; x < 100; x += 2) {
+ for (var y = -100; y < 100; y += 2) {
+ arr.push({_id: n++, loc: [x, y]});
}
}
-t.insert( arr );
-assert.eq( t.count(), 100 * 100 );
-assert.eq( t.count(), n - 1 );
+t.insert(arr);
+assert.eq(t.count(), 100 * 100);
+assert.eq(t.count(), n - 1);
+t.ensureIndex({loc: "2d"});
-t.ensureIndex( { loc : "2d" } );
+fast = db.runCommand({geoNear: t.getName(), near: [50, 50], num: 10});
-fast = db.runCommand( { geoNear : t.getName() , near : [ 50 , 50 ] , num : 10 } );
-
-function a( cur ){
+function a(cur) {
var total = 0;
var outof = 0;
- while ( cur.hasNext() ){
+ while (cur.hasNext()) {
var o = cur.next();
- total += Geo.distance( [ 50 , 50 ] , o.loc );
+ total += Geo.distance([50, 50], o.loc);
outof++;
}
- return total/outof;
+ return total / outof;
}
-assert.close( fast.stats.avgDistance , a( t.find( { loc : { $near : [ 50 , 50 ] } } ).limit(10) ) , "B1" );
-assert.close( 1.33333 , a( t.find( { loc : { $near : [ 50 , 50 ] } } ).limit(3) ) , "B2" );
-assert.close( fast.stats.avgDistance , a( t.find( { loc : { $near : [ 50 , 50 ] } } ).limit(10) ) , "B3" );
-
-printjson( t.find( { loc : { $near : [ 50 , 50 ] } } ).explain() );
+assert.close(fast.stats.avgDistance, a(t.find({loc: {$near: [50, 50]}}).limit(10)), "B1");
+assert.close(1.33333, a(t.find({loc: {$near: [50, 50]}}).limit(3)), "B2");
+assert.close(fast.stats.avgDistance, a(t.find({loc: {$near: [50, 50]}}).limit(10)), "B3");
+printjson(t.find({loc: {$near: [50, 50]}}).explain());
-assert.lt( 3 , a( t.find( { loc : { $near : [ 50 , 50 ] } } ).limit(50) ) , "C1" );
-assert.gt( 3 , a( t.find( { loc : { $near : [ 50 , 50 , 3 ] } } ).limit(50) ) , "C2" );
-assert.gt( 3 , a( t.find( { loc : { $near : [ 50 , 50 ] , $maxDistance : 3 } } ).limit(50) ) , "C3" );
+assert.lt(3, a(t.find({loc: {$near: [50, 50]}}).limit(50)), "C1");
+assert.gt(3, a(t.find({loc: {$near: [50, 50, 3]}}).limit(50)), "C2");
+assert.gt(3, a(t.find({loc: {$near: [50, 50], $maxDistance: 3}}).limit(50)), "C3");
// SERVER-8974 - test if $geoNear operator works with 2d index as well
-var geoNear_cursor = t.find( { loc : { $geoNear : [50, 50] } } ).limit(100);
-assert.eq( geoNear_cursor.count(true), 100 );
+var geoNear_cursor = t.find({loc: {$geoNear: [50, 50]}}).limit(100);
+assert.eq(geoNear_cursor.count(true), 100);
diff --git a/jstests/core/geo3.js b/jstests/core/geo3.js
index a11c24ed338..da3d8641049 100644
--- a/jstests/core/geo3.js
+++ b/jstests/core/geo3.js
@@ -4,80 +4,83 @@ t.drop();
n = 1;
arr = [];
-for ( var x=-100; x<100; x+=2 ){
- for ( var y=-100; y<100; y+=2 ){
- arr.push( { _id : n++ , loc : [ x , y ] , a : Math.abs( x ) % 5 , b : Math.abs( y ) % 5 } );
+for (var x = -100; x < 100; x += 2) {
+ for (var y = -100; y < 100; y += 2) {
+ arr.push({_id: n++, loc: [x, y], a: Math.abs(x) % 5, b: Math.abs(y) % 5});
}
}
-t.insert( arr );
-assert.eq( t.count(), 100 * 100 );
-assert.eq( t.count(), n - 1 );
+t.insert(arr);
+assert.eq(t.count(), 100 * 100);
+assert.eq(t.count(), n - 1);
+t.ensureIndex({loc: "2d"});
-t.ensureIndex( { loc : "2d" } );
-
-fast = db.runCommand( { geoNear : t.getName() , near : [ 50 , 50 ] , num : 10 } );
+fast = db.runCommand({geoNear: t.getName(), near: [50, 50], num: 10});
// test filter
-filtered1 = db.runCommand( { geoNear : t.getName() , near : [ 50 , 50 ] , num : 10 , query : { a : 2 } } );
-assert.eq( 10 , filtered1.results.length , "B1" );
-filtered1.results.forEach( function(z){ assert.eq( 2 , z.obj.a , "B2: " + tojson( z ) ); } );
-//printjson( filtered1.stats );
+filtered1 = db.runCommand({geoNear: t.getName(), near: [50, 50], num: 10, query: {a: 2}});
+assert.eq(10, filtered1.results.length, "B1");
+filtered1.results.forEach(function(z) {
+ assert.eq(2, z.obj.a, "B2: " + tojson(z));
+});
+// printjson( filtered1.stats );
-function avgA( q , len ){
- if ( ! len )
+function avgA(q, len) {
+ if (!len)
len = 10;
- var realq = { loc : { $near : [ 50 , 50 ] } };
- if ( q )
- Object.extend( realq , q );
- var as =
- t.find( realq ).limit(len).map(
- function(z){
- return z.a;
- }
- );
- assert.eq( len , as.length , "length in avgA" );
- return Array.avg( as );
+ var realq = {
+ loc: {$near: [50, 50]}
+ };
+ if (q)
+ Object.extend(realq, q);
+ var as = t.find(realq).limit(len).map(function(z) {
+ return z.a;
+ });
+ assert.eq(len, as.length, "length in avgA");
+ return Array.avg(as);
}
-function testFiltering( msg ){
- assert.gt( 2 , avgA( {} ) , msg + " testFiltering 1 " );
- assert.eq( 2 , avgA( { a : 2 } ) , msg + " testFiltering 2 " );
- assert.eq( 4 , avgA( { a : 4 } ) , msg + " testFiltering 3 " );
+function testFiltering(msg) {
+ assert.gt(2, avgA({}), msg + " testFiltering 1 ");
+ assert.eq(2, avgA({a: 2}), msg + " testFiltering 2 ");
+ assert.eq(4, avgA({a: 4}), msg + " testFiltering 3 ");
}
-testFiltering( "just loc" );
-
-t.dropIndex( { loc : "2d" } );
-assert.eq( 1 , t.getIndexKeys().length , "setup 3a" );
-t.ensureIndex( { loc : "2d" , a : 1 } );
-assert.eq( 2 , t.getIndexKeys().length , "setup 3b" );
+testFiltering("just loc");
-filtered2 = db.runCommand( { geoNear : t.getName() , near : [ 50 , 50 ] , num : 10 , query : { a : 2 } } );
-assert.eq( 10 , filtered2.results.length , "B3" );
-filtered2.results.forEach( function(z){ assert.eq( 2 , z.obj.a , "B4: " + tojson( z ) ); } );
+t.dropIndex({loc: "2d"});
+assert.eq(1, t.getIndexKeys().length, "setup 3a");
+t.ensureIndex({loc: "2d", a: 1});
+assert.eq(2, t.getIndexKeys().length, "setup 3b");
-assert.eq( filtered1.stats.avgDistance , filtered2.stats.avgDistance , "C1" );
-assert.gt( filtered1.stats.objectsLoaded , filtered2.stats.objectsLoaded , "C3" );
+filtered2 = db.runCommand({geoNear: t.getName(), near: [50, 50], num: 10, query: {a: 2}});
+assert.eq(10, filtered2.results.length, "B3");
+filtered2.results.forEach(function(z) {
+ assert.eq(2, z.obj.a, "B4: " + tojson(z));
+});
-testFiltering( "loc and a" );
+assert.eq(filtered1.stats.avgDistance, filtered2.stats.avgDistance, "C1");
+assert.gt(filtered1.stats.objectsLoaded, filtered2.stats.objectsLoaded, "C3");
-t.dropIndex( { loc : "2d" , a : 1 } );
-assert.eq( 1 , t.getIndexKeys().length , "setup 4a" );
-t.ensureIndex( { loc : "2d" , b : 1 } );
-assert.eq( 2 , t.getIndexKeys().length , "setup 4b" );
+testFiltering("loc and a");
-testFiltering( "loc and b" );
+t.dropIndex({loc: "2d", a: 1});
+assert.eq(1, t.getIndexKeys().length, "setup 4a");
+t.ensureIndex({loc: "2d", b: 1});
+assert.eq(2, t.getIndexKeys().length, "setup 4b");
+testFiltering("loc and b");
-q = { loc : { $near : [ 50 , 50 ] } };
-assert.eq( 100 , t.find( q ).limit(100).itcount() , "D1" );
-assert.eq( 100 , t.find( q ).limit(100).size() , "D2" );
+q = {
+ loc: {$near: [50, 50]}
+};
+assert.eq(100, t.find(q).limit(100).itcount(), "D1");
+assert.eq(100, t.find(q).limit(100).size(), "D2");
-assert.eq( 20 , t.find( q ).limit(20).itcount() , "D3" );
-assert.eq( 20 , t.find( q ).limit(20).size() , "D4" );
+assert.eq(20, t.find(q).limit(20).itcount(), "D3");
+assert.eq(20, t.find(q).limit(20).size(), "D4");
// SERVER-14039 Wrong limit after skip with $nearSphere, 2d index
-assert.eq( 10 , t.find( q ).skip(10).limit(10).itcount() , "D5" );
-assert.eq( 10 , t.find( q ).skip(10).limit(10).size() , "D6" );
+assert.eq(10, t.find(q).skip(10).limit(10).itcount(), "D5");
+assert.eq(10, t.find(q).skip(10).limit(10).size(), "D6");
diff --git a/jstests/core/geo5.js b/jstests/core/geo5.js
index 1a0830113e9..bbaa84c1d17 100644
--- a/jstests/core/geo5.js
+++ b/jstests/core/geo5.js
@@ -1,18 +1,17 @@
t = db.geo5;
t.drop();
-t.insert( { p : [ 0,0 ] } );
-t.ensureIndex( { p : "2d" } );
+t.insert({p: [0, 0]});
+t.ensureIndex({p: "2d"});
-res = t.runCommand( "geoNear" , { near : [1,1] } );
-assert.eq( 1 , res.results.length , "A1" );
+res = t.runCommand("geoNear", {near: [1, 1]});
+assert.eq(1, res.results.length, "A1");
-t.insert( { p : [ 1,1 ] } );
-t.insert( { p : [ -1,-1 ] } );
-res = t.runCommand( "geoNear" , { near : [50,50] } );
-assert.eq( 3 , res.results.length , "A2" );
-
-t.insert( { p : [ -1,-1 ] } );
-res = t.runCommand( "geoNear" , { near : [50,50] } );
-assert.eq( 4 , res.results.length , "A3" );
+t.insert({p: [1, 1]});
+t.insert({p: [-1, -1]});
+res = t.runCommand("geoNear", {near: [50, 50]});
+assert.eq(3, res.results.length, "A2");
+t.insert({p: [-1, -1]});
+res = t.runCommand("geoNear", {near: [50, 50]});
+assert.eq(4, res.results.length, "A3");
diff --git a/jstests/core/geo6.js b/jstests/core/geo6.js
index e57f8a6b6f6..3d681fe9b7e 100644
--- a/jstests/core/geo6.js
+++ b/jstests/core/geo6.js
@@ -2,23 +2,22 @@
t = db.geo6;
t.drop();
-t.ensureIndex( { loc : "2d" } );
+t.ensureIndex({loc: "2d"});
-assert.eq( 0 , t.find().itcount() , "pre0" );
-assert.eq( 0 , t.find( { loc : { $near : [50,50] } } ).itcount() , "pre1" );
+assert.eq(0, t.find().itcount(), "pre0");
+assert.eq(0, t.find({loc: {$near: [50, 50]}}).itcount(), "pre1");
-t.insert( { _id : 1 , loc : [ 1 , 1 ] } );
-t.insert( { _id : 2 , loc : [ 1 , 2 ] } );
-t.insert( { _id : 3 } );
+t.insert({_id: 1, loc: [1, 1]});
+t.insert({_id: 2, loc: [1, 2]});
+t.insert({_id: 3});
-assert.eq( 3 , t.find().itcount() , "A1" );
-assert.eq( 2 , t.find().hint( { loc : "2d" } ).itcount() , "A2" );
-assert.eq( 2 , t.find( { loc : { $near : [50,50] } } ).itcount() , "A3" );
+assert.eq(3, t.find().itcount(), "A1");
+assert.eq(2, t.find().hint({loc: "2d"}).itcount(), "A2");
+assert.eq(2, t.find({loc: {$near: [50, 50]}}).itcount(), "A3");
-t.find( { loc : { $near : [50,50] } } ).sort( { _id : 1 } ).forEach(printjson);
-assert.eq( 1 , t.find( { loc : { $near : [50,50] } } ).sort( { _id : 1 } ).next()._id , "B1" );
-assert.eq( 2 , t.find( { loc : { $near : [50,50] } } ).sort( { _id : -1 } ).next()._id , "B1" );
+t.find({loc: {$near: [50, 50]}}).sort({_id: 1}).forEach(printjson);
+assert.eq(1, t.find({loc: {$near: [50, 50]}}).sort({_id: 1}).next()._id, "B1");
+assert.eq(2, t.find({loc: {$near: [50, 50]}}).sort({_id: -1}).next()._id, "B1");
-
-t.insert( { _id : 4 , loc : [] } );
-assert.eq( 4 , t.find().itcount() , "C1" );
+t.insert({_id: 4, loc: []});
+assert.eq(4, t.find().itcount(), "C1");
diff --git a/jstests/core/geo7.js b/jstests/core/geo7.js
index f353f75d789..b7563e9f155 100644
--- a/jstests/core/geo7.js
+++ b/jstests/core/geo7.js
@@ -2,19 +2,19 @@
t = db.geo7;
t.drop();
-t.insert({_id:1,y:[1,1]});
-t.insert({_id:2,y:[1,1],z:3});
-t.insert({_id:3,y:[1,1],z:4});
-t.insert({_id:4,y:[1,1],z:5});
+t.insert({_id: 1, y: [1, 1]});
+t.insert({_id: 2, y: [1, 1], z: 3});
+t.insert({_id: 3, y: [1, 1], z: 4});
+t.insert({_id: 4, y: [1, 1], z: 5});
-t.ensureIndex({y:"2d",z:1});
+t.ensureIndex({y: "2d", z: 1});
-assert.eq( 1 , t.find({y:[1,1],z:3}).itcount() , "A1" );
+assert.eq(1, t.find({y: [1, 1], z: 3}).itcount(), "A1");
-t.dropIndex({y:"2d",z:1});
+t.dropIndex({y: "2d", z: 1});
-t.ensureIndex({y:"2d"});
-assert.eq( 1 , t.find({y:[1,1],z:3}).itcount() , "A2" );
+t.ensureIndex({y: "2d"});
+assert.eq(1, t.find({y: [1, 1], z: 3}).itcount(), "A2");
-t.insert( { _id : 5 , y : 5 } );
-assert.eq( 5 , t.findOne( { y : 5 } )._id , "B1" );
+t.insert({_id: 5, y: 5});
+assert.eq(5, t.findOne({y: 5})._id, "B1");
diff --git a/jstests/core/geo9.js b/jstests/core/geo9.js
index 1e295911393..201bee7dfa5 100644
--- a/jstests/core/geo9.js
+++ b/jstests/core/geo9.js
@@ -2,27 +2,27 @@
t = db.geo9;
t.drop();
-t.save( { _id : 1 , a : [ 10 , 10 ] , b : [ 50 , 50 ] } );
-t.save( { _id : 2 , a : [ 11 , 11 ] , b : [ 51 , 52 ] } );
-t.save( { _id : 3 , a : [ 12 , 12 ] , b : [ 52 , 52 ] } );
+t.save({_id: 1, a: [10, 10], b: [50, 50]});
+t.save({_id: 2, a: [11, 11], b: [51, 52]});
+t.save({_id: 3, a: [12, 12], b: [52, 52]});
-t.save( { _id : 4 , a : [ 50 , 50 ] , b : [ 10 , 10 ] } );
-t.save( { _id : 5 , a : [ 51 , 51 ] , b : [ 11 , 11 ] } );
-t.save( { _id : 6 , a : [ 52 , 52 ] , b : [ 12 , 12 ] } );
+t.save({_id: 4, a: [50, 50], b: [10, 10]});
+t.save({_id: 5, a: [51, 51], b: [11, 11]});
+t.save({_id: 6, a: [52, 52], b: [12, 12]});
-t.ensureIndex( { a : "2d" } );
-t.ensureIndex( { b : "2d" } );
+t.ensureIndex({a: "2d"});
+t.ensureIndex({b: "2d"});
-function check( field ){
+function check(field) {
var q = {};
- q[field] = { $near : [ 11 , 11 ] };
- arr = t.find( q ).limit(3).map(
- function(z){
- return Geo.distance( [ 11 , 11 ] , z[field] );
- }
- );
- assert.eq( 2 * Math.sqrt( 2 ) , Array.sum( arr ) , "test " + field );
+ q[field] = {
+ $near: [11, 11]
+ };
+ arr = t.find(q).limit(3).map(function(z) {
+ return Geo.distance([11, 11], z[field]);
+ });
+ assert.eq(2 * Math.sqrt(2), Array.sum(arr), "test " + field);
}
-check( "a" );
-check( "b" );
+check("a");
+check("b");
diff --git a/jstests/core/geo_2d_with_geojson_point.js b/jstests/core/geo_2d_with_geojson_point.js
index b5afc8b77b8..aaadf4be333 100644
--- a/jstests/core/geo_2d_with_geojson_point.js
+++ b/jstests/core/geo_2d_with_geojson_point.js
@@ -11,10 +11,6 @@ var geoJSONPoint = {
coordinates: [0, 0]
};
-print(assert.throws(
- function() {
- t.findOne({
- loc: {$near: {$geometry: geoJSONPoint}}});
- },
- [],
- 'querying 2d index with GeoJSON point.'));
+print(assert.throws(function() {
+ t.findOne({loc: {$near: {$geometry: geoJSONPoint}}});
+}, [], 'querying 2d index with GeoJSON point.'));
diff --git a/jstests/core/geo_allowedcomparisons.js b/jstests/core/geo_allowedcomparisons.js
index e1a36d495eb..576e764820a 100644
--- a/jstests/core/geo_allowedcomparisons.js
+++ b/jstests/core/geo_allowedcomparisons.js
@@ -2,22 +2,30 @@
t = db.geo_allowedcomparisons;
// Any GeoJSON object can intersect with any geojson object.
-geojsonPoint = { "type" : "Point", "coordinates": [ 0, 0 ] };
-oldPoint = [0,0];
+geojsonPoint = {
+ "type": "Point",
+ "coordinates": [0, 0]
+};
+oldPoint = [0, 0];
// GeoJSON polygons can contain any geojson object and OLD points.
-geojsonPoly = { "type" : "Polygon",
- "coordinates" : [ [ [-5,-5], [-5,5], [5,5], [5,-5], [-5,-5]]]};
+geojsonPoly = {
+ "type": "Polygon",
+ "coordinates": [[[-5, -5], [-5, 5], [5, 5], [5, -5], [-5, -5]]]
+};
// This can be contained by GJ polygons, intersected by anything GJ and old points.
-geojsonLine = { "type" : "LineString", "coordinates": [ [ 0, 0], [1, 1]]};
+geojsonLine = {
+ "type": "LineString",
+ "coordinates": [[0, 0], [1, 1]]
+};
// $centerSphere can contain old or new points.
oldCenterSphere = [[0, 0], Math.PI / 180];
// $box can contain old points.
-oldBox = [[-5,-5], [5,5]];
+oldBox = [[-5, -5], [5, 5]];
// $polygon can contain old points.
-oldPolygon = [[-5,-5], [-5,5], [5,5], [5,-5], [-5,-5]];
+oldPolygon = [[-5, -5], [-5, 5], [5, 5], [5, -5], [-5, -5]];
// $center can contain old points.
oldCenter = [[0, 0], 1];
@@ -39,7 +47,10 @@ assert.writeError(t.insert({geo: oldCenter}));
// Verify that even if we can't index them, we can use them in a matcher.
t.insert({gj: geojsonLine});
t.insert({gj: geojsonPoly});
-geojsonPoint2 = { "type" : "Point", "coordinates": [ 0, 0.001 ] };
+geojsonPoint2 = {
+ "type": "Point",
+ "coordinates": [0, 0.001]
+};
t.insert({gjp: geojsonPoint2});
// We convert between old and new style points.
@@ -56,17 +67,22 @@ function runTests() {
assert.eq(1, t.find({geo: {$geoWithin: {$center: oldCenter}}}).itcount());
assert.eq(1, t.find({geo: {$geoWithin: {$centerSphere: oldCenterSphere}}}).itcount());
// Using geojson with 2d-style geoWithin syntax should choke.
- assert.throws(function() { return t.find({geo: {$geoWithin: {$polygon: geojsonPoly}}})
- .itcount();});
+ assert.throws(function() {
+ return t.find({geo: {$geoWithin: {$polygon: geojsonPoly}}}).itcount();
+ });
// Using old polygon w/new syntax should choke too.
- assert.throws(function() { return t.find({geo: {$geoWithin: {$geometry: oldPolygon}}})
- .itcount();});
- assert.throws(function() { return t.find({geo: {$geoWithin: {$geometry: oldBox}}})
- .itcount();});
- assert.throws(function() { return t.find({geo: {$geoWithin: {$geometry: oldCenter}}})
- .itcount();});
- assert.throws(function() { return t.find({geo: {$geoWithin: {$geometry: oldCenterSphere}}})
- .itcount();});
+ assert.throws(function() {
+ return t.find({geo: {$geoWithin: {$geometry: oldPolygon}}}).itcount();
+ });
+ assert.throws(function() {
+ return t.find({geo: {$geoWithin: {$geometry: oldBox}}}).itcount();
+ });
+ assert.throws(function() {
+ return t.find({geo: {$geoWithin: {$geometry: oldCenter}}}).itcount();
+ });
+ assert.throws(function() {
+ return t.find({geo: {$geoWithin: {$geometry: oldCenterSphere}}}).itcount();
+ });
// Even if we only have a 2d index, the 2d suitability function should
// allow the matcher to deal with this. If we have a 2dsphere index we use it.
assert.eq(1, t.find({geo: {$geoWithin: {$geometry: geojsonPoly}}}).itcount());
@@ -83,7 +99,7 @@ t.dropIndex({geo: "2d"});
runTests();
// 2dsphere index now.
-assert.commandWorked( t.ensureIndex({geo: "2dsphere"}) );
+assert.commandWorked(t.ensureIndex({geo: "2dsphere"}));
// 2dsphere does not support arrays of points.
assert.writeError(t.insert({geo: [geojsonPoint2, geojsonPoint]}));
runTests();
diff --git a/jstests/core/geo_array0.js b/jstests/core/geo_array0.js
index c83223cef05..42b9c758e45 100644
--- a/jstests/core/geo_array0.js
+++ b/jstests/core/geo_array0.js
@@ -3,24 +3,24 @@ t = db.geoarray;
function test(index) {
t.drop();
- t.insert( { zip : "10001", loc : [[ 10, 10 ], [ 50, 50 ]] } );
- t.insert( { zip : "10002", loc : [[ 20, 20 ], [ 50, 50 ]] } );
- var res = t.insert( { zip : "10003", loc : [[ 30, 30 ], [ 50, 50 ]] } );
- assert.writeOK( res );
+ t.insert({zip: "10001", loc: [[10, 10], [50, 50]]});
+ t.insert({zip: "10002", loc: [[20, 20], [50, 50]]});
+ var res = t.insert({zip: "10003", loc: [[30, 30], [50, 50]]});
+ assert.writeOK(res);
if (index) {
- assert.commandWorked(t.ensureIndex( { loc : "2d", zip : 1 } ));
- assert.eq( 2, t.getIndexKeys().length );
+ assert.commandWorked(t.ensureIndex({loc: "2d", zip: 1}));
+ assert.eq(2, t.getIndexKeys().length);
}
- res = t.insert( { zip : "10004", loc : [[ 40, 40 ], [ 50, 50 ]] } );
- assert.writeOK( res );
+ res = t.insert({zip: "10004", loc: [[40, 40], [50, 50]]});
+ assert.writeOK(res);
// test normal access
- printjson( t.find( { loc : { $within : { $box : [ [ 0, 0 ], [ 45, 45 ] ] } } } ).toArray() );
- assert.eq( 4, t.find( { loc : { $within : { $box : [ [ 0, 0 ], [ 45, 45 ] ] } } } ).count() );
- assert.eq( 4, t.find( { loc : { $within : { $box : [ [ 45, 45 ], [ 50, 50 ] ] } } } ).count() );
+ printjson(t.find({loc: {$within: {$box: [[0, 0], [45, 45]]}}}).toArray());
+ assert.eq(4, t.find({loc: {$within: {$box: [[0, 0], [45, 45]]}}}).count());
+ assert.eq(4, t.find({loc: {$within: {$box: [[45, 45], [50, 50]]}}}).count());
}
-//test(false); // this was removed as part of SERVER-6400
+// test(false); // this was removed as part of SERVER-6400
test(true);
diff --git a/jstests/core/geo_array1.js b/jstests/core/geo_array1.js
index c37c80b21e0..08b6060f3cc 100644
--- a/jstests/core/geo_array1.js
+++ b/jstests/core/geo_array1.js
@@ -5,31 +5,31 @@ function test(index) {
t.drop();
var locObj = [];
- // Add locations everywhere
- for ( var i = 0; i < 10; i++ ) {
- for ( var j = 0; j < 10; j++ ) {
- if ( j % 2 == 0 )
- locObj.push( [ i, j ] );
- else
- locObj.push( { x : i, y : j } );
- }
+ // Add locations everywhere
+ for (var i = 0; i < 10; i++) {
+ for (var j = 0; j < 10; j++) {
+ if (j % 2 == 0)
+ locObj.push([i, j]);
+ else
+ locObj.push({x: i, y: j});
}
+ }
// Add docs with all these locations
- for( var i = 0; i < 300; i++ ){
- t.insert( { loc : locObj } );
+ for (var i = 0; i < 300; i++) {
+ t.insert({loc: locObj});
}
if (index) {
- t.ensureIndex( { loc : "2d" } );
+ t.ensureIndex({loc: "2d"});
}
// Pull them back
- for ( var i = 0; i < 10; i++ ) {
- for ( var j = 0; j < 10; j++ ) {
- assert.eq(300, t.find({loc: {$within: {$box: [[i - 0.5, j - 0.5 ],
- [i + 0.5,j + 0.5]]}}})
- .count());
+ for (var i = 0; i < 10; i++) {
+ for (var j = 0; j < 10; j++) {
+ assert.eq(
+ 300,
+ t.find({loc: {$within: {$box: [[i - 0.5, j - 0.5], [i + 0.5, j + 0.5]]}}}).count());
}
}
}
diff --git a/jstests/core/geo_array2.js b/jstests/core/geo_array2.js
index 68ecb65323e..33aad98930a 100644
--- a/jstests/core/geo_array2.js
+++ b/jstests/core/geo_array2.js
@@ -10,154 +10,146 @@ Random.setRandomSeed();
// Test the semantics of near / nearSphere / etc. queries with multiple keys per object
-for( var i = -1; i < 2; i++ ){
- for(var j = -1; j < 2; j++ ){
-
- locObj = [];
-
- if( i != 0 || j != 0 )
- locObj.push( { x : i * 50 + Random.rand(),
- y : j * 50 + Random.rand() } );
- locObj.push( { x : Random.rand(),
- y : Random.rand() } );
- locObj.push( { x : Random.rand(),
- y : Random.rand() } );
-
- t.insert({ name : "" + i + "" + j , loc : locObj , type : "A" });
- t.insert({ name : "" + i + "" + j , loc : locObj , type : "B" });
- }
-}
+for (var i = -1; i < 2; i++) {
+ for (var j = -1; j < 2; j++) {
+ locObj = [];
-assert.commandWorked(t.ensureIndex({ loc : "2d" , type : 1 }));
-
-print( "Starting testing phase... ");
-
-for( var t = 0; t < 2; t++ ){
-
-var type = t == 0 ? "A" : "B";
-
-for( var i = -1; i < 2; i++ ){
- for(var j = -1; j < 2; j++ ){
-
- var center = [ i * 50 , j * 50 ];
- var count = i == 0 && j == 0 ? 9 : 1;
- var objCount = 1;
-
- // Do near check
-
- var nearResults = db.runCommand( { geoNear : "geoarray2" ,
- near : center ,
- num : count,
- query : { type : type } } ).results;
- //printjson( nearResults )
-
- var objsFound = {};
- var lastResult = 0;
- for( var k = 0; k < nearResults.length; k++ ){
-
- // All distances should be small, for the # of results
- assert.gt( 1.5 , nearResults[k].dis );
- // Distances should be increasing
- assert.lte( lastResult, nearResults[k].dis );
- // Objs should be of the right type
- assert.eq( type, nearResults[k].obj.type );
-
- lastResult = nearResults[k].dis;
-
- var objKey = "" + nearResults[k].obj._id;
-
- if( objKey in objsFound ) objsFound[ objKey ]++;
- else objsFound[ objKey ] = 1;
-
- }
-
- // Make sure we found the right objects each time
- // Note: Multiple objects could be found for diff distances.
- for( var q in objsFound ){
- assert.eq( objCount , objsFound[q] );
- }
-
-
- // Do nearSphere check
-
- // Earth Radius from geoconstants.h
- var eRad = 6378.1;
-
- nearResults = db.geoarray2.find( { loc : { $nearSphere : center , $maxDistance : 500 /* km */ / eRad }, type : type } ).toArray();
-
- assert.eq( nearResults.length , count );
-
- objsFound = {};
- lastResult = 0;
- for( var k = 0; k < nearResults.length; k++ ){
- var objKey = "" + nearResults[k]._id;
- if( objKey in objsFound ) objsFound[ objKey ]++;
- else objsFound[ objKey ] = 1;
-
- }
-
- // Make sure we found the right objects each time
- for( var q in objsFound ){
- assert.eq( objCount , objsFound[q] );
- }
-
-
-
- // Within results do not return duplicate documents
-
- var count = i == 0 && j == 0 ? 9 : 1;
- var objCount = i == 0 && j == 0 ? 1 : 1;
-
- // Do within check
- objsFound = {};
-
- var box = [ [center[0] - 1, center[1] - 1] , [center[0] + 1, center[1] + 1] ];
-
- //printjson( box )
-
- var withinResults = db.geoarray2.find({ loc : { $within : { $box : box } } , type : type }).toArray();
-
- assert.eq( withinResults.length , count );
-
- for( var k = 0; k < withinResults.length; k++ ){
- var objKey = "" + withinResults[k]._id;
- if( objKey in objsFound ) objsFound[ objKey ]++;
- else objsFound[ objKey ] = 1;
- }
-
- //printjson( objsFound )
-
- // Make sure we found the right objects each time
- for( var q in objsFound ){
- assert.eq( objCount , objsFound[q] );
- }
-
-
- // Do within check (circle)
- objsFound = {};
-
- withinResults = db.geoarray2.find({ loc : { $within : { $center : [ center, 1.5 ] } } , type : type }).toArray();
-
- assert.eq( withinResults.length , count );
-
- for( var k = 0; k < withinResults.length; k++ ){
- var objKey = "" + withinResults[k]._id;
- if( objKey in objsFound ) objsFound[ objKey ]++;
- else objsFound[ objKey ] = 1;
- }
-
- // Make sure we found the right objects each time
- for( var q in objsFound ){
- assert.eq( objCount , objsFound[q] );
- }
-
-
-
- }
-}
+ if (i != 0 || j != 0)
+ locObj.push({x: i * 50 + Random.rand(), y: j * 50 + Random.rand()});
+ locObj.push({x: Random.rand(), y: Random.rand()});
+ locObj.push({x: Random.rand(), y: Random.rand()});
+ t.insert({name: "" + i + "" + j, loc: locObj, type: "A"});
+ t.insert({name: "" + i + "" + j, loc: locObj, type: "B"});
+ }
}
+assert.commandWorked(t.ensureIndex({loc: "2d", type: 1}));
+
+print("Starting testing phase... ");
+
+for (var t = 0; t < 2; t++) {
+ var type = t == 0 ? "A" : "B";
+
+ for (var i = -1; i < 2; i++) {
+ for (var j = -1; j < 2; j++) {
+ var center = [i * 50, j * 50];
+ var count = i == 0 && j == 0 ? 9 : 1;
+ var objCount = 1;
+
+ // Do near check
+
+ var nearResults =
+ db.runCommand(
+ {geoNear: "geoarray2", near: center, num: count, query: {type: type}})
+ .results;
+ // printjson( nearResults )
+
+ var objsFound = {};
+ var lastResult = 0;
+ for (var k = 0; k < nearResults.length; k++) {
+ // All distances should be small, for the # of results
+ assert.gt(1.5, nearResults[k].dis);
+ // Distances should be increasing
+ assert.lte(lastResult, nearResults[k].dis);
+ // Objs should be of the right type
+ assert.eq(type, nearResults[k].obj.type);
+
+ lastResult = nearResults[k].dis;
+
+ var objKey = "" + nearResults[k].obj._id;
+
+ if (objKey in objsFound)
+ objsFound[objKey]++;
+ else
+ objsFound[objKey] = 1;
+ }
+
+ // Make sure we found the right objects each time
+ // Note: Multiple objects could be found for diff distances.
+ for (var q in objsFound) {
+ assert.eq(objCount, objsFound[q]);
+ }
+
+ // Do nearSphere check
+
+ // Earth Radius from geoconstants.h
+ var eRad = 6378.1;
+
+ nearResults = db.geoarray2.find({
+ loc: {$nearSphere: center, $maxDistance: 500 /* km */ / eRad},
+ type: type
+ }).toArray();
+
+ assert.eq(nearResults.length, count);
+ objsFound = {};
+ lastResult = 0;
+ for (var k = 0; k < nearResults.length; k++) {
+ var objKey = "" + nearResults[k]._id;
+ if (objKey in objsFound)
+ objsFound[objKey]++;
+ else
+ objsFound[objKey] = 1;
+ }
+ // Make sure we found the right objects each time
+ for (var q in objsFound) {
+ assert.eq(objCount, objsFound[q]);
+ }
+ // Within results do not return duplicate documents
+
+ var count = i == 0 && j == 0 ? 9 : 1;
+ var objCount = i == 0 && j == 0 ? 1 : 1;
+
+ // Do within check
+ objsFound = {};
+
+ var box = [[center[0] - 1, center[1] - 1], [center[0] + 1, center[1] + 1]];
+
+ // printjson( box )
+
+ var withinResults =
+ db.geoarray2.find({loc: {$within: {$box: box}}, type: type}).toArray();
+
+ assert.eq(withinResults.length, count);
+
+ for (var k = 0; k < withinResults.length; k++) {
+ var objKey = "" + withinResults[k]._id;
+ if (objKey in objsFound)
+ objsFound[objKey]++;
+ else
+ objsFound[objKey] = 1;
+ }
+
+ // printjson( objsFound )
+
+ // Make sure we found the right objects each time
+ for (var q in objsFound) {
+ assert.eq(objCount, objsFound[q]);
+ }
+
+ // Do within check (circle)
+ objsFound = {};
+
+ withinResults =
+ db.geoarray2.find({loc: {$within: {$center: [center, 1.5]}}, type: type}).toArray();
+
+ assert.eq(withinResults.length, count);
+
+ for (var k = 0; k < withinResults.length; k++) {
+ var objKey = "" + withinResults[k]._id;
+ if (objKey in objsFound)
+ objsFound[objKey]++;
+ else
+ objsFound[objKey] = 1;
+ }
+
+ // Make sure we found the right objects each time
+ for (var q in objsFound) {
+ assert.eq(objCount, objsFound[q]);
+ }
+ }
+ }
+}
diff --git a/jstests/core/geo_big_polygon.js b/jstests/core/geo_big_polygon.js
index 8022a5c74f5..6f278c59147 100644
--- a/jstests/core/geo_big_polygon.js
+++ b/jstests/core/geo_big_polygon.js
@@ -5,102 +5,115 @@
var coll = db.geo_big_polygon;
coll.drop();
-//coll.ensureIndex({ loc : "2dsphere" });
-
-coll.getMongo().getDB("admin").runCommand({ setParameter : 1, verboseQueryLogging : true });
-
-var bigCRS = { type : "name",
- properties : { name : "urn:x-mongodb:crs:strictwinding:EPSG:4326" } };
-
-var bigPoly20 = { type : "Polygon", coordinates : [[[10.0, 10.0],
- [-10.0, 10.0],
- [-10.0, -10.0],
- [10.0, -10.0],
- [10.0, 10.0]]],
- crs : bigCRS };
-
-var bigPoly20Comp = { type : "Polygon", coordinates : [[[10.0, 10.0],
- [10.0, -10.0],
- [-10.0, -10.0],
- [-10.0, 10.0],
- [10.0, 10.0]]],
- crs : bigCRS };
-
-var poly10 = { type : "Polygon", coordinates : [[[5.0, 5.0],
- [5.0, -5.0],
- [-5.0, -5.0],
- [-5.0, 5.0],
- [5.0, 5.0]]] };
-
-var line10 = { type : "LineString", coordinates : [[5.0, 5.0],
- [5.0, -5.0],
- [-5.0, -5.0],
- [-5.0, 5.0],
- [5.0, 5.0]] };
-
-var centerPoint = { type : "Point", coordinates : [0, 0] };
-
-var polarPoint = { type : "Point", coordinates : [85, 85] };
-
-var lineEquator = { type : "LineString", coordinates : [[-20, 0], [20, 0]] };
-
-assert.writeOK(coll.insert({ loc : poly10 }));
-assert.writeOK(coll.insert({ loc : line10 }));
-assert.writeOK(coll.insert({ loc : centerPoint }));
-assert.writeOK(coll.insert({ loc : polarPoint }));
-assert.writeOK(coll.insert({ loc : lineEquator }));
+// coll.ensureIndex({ loc : "2dsphere" });
+
+coll.getMongo().getDB("admin").runCommand({setParameter: 1, verboseQueryLogging: true});
+
+var bigCRS = {
+ type: "name",
+ properties: {name: "urn:x-mongodb:crs:strictwinding:EPSG:4326"}
+};
+
+var bigPoly20 = {
+ type: "Polygon",
+ coordinates: [[[10.0, 10.0], [-10.0, 10.0], [-10.0, -10.0], [10.0, -10.0], [10.0, 10.0]]],
+ crs: bigCRS
+};
+
+var bigPoly20Comp = {
+ type: "Polygon",
+ coordinates: [[[10.0, 10.0], [10.0, -10.0], [-10.0, -10.0], [-10.0, 10.0], [10.0, 10.0]]],
+ crs: bigCRS
+};
+
+var poly10 = {
+ type: "Polygon",
+ coordinates: [[[5.0, 5.0], [5.0, -5.0], [-5.0, -5.0], [-5.0, 5.0], [5.0, 5.0]]]
+};
+
+var line10 = {
+ type: "LineString",
+ coordinates: [[5.0, 5.0], [5.0, -5.0], [-5.0, -5.0], [-5.0, 5.0], [5.0, 5.0]]
+};
+
+var centerPoint = {
+ type: "Point",
+ coordinates: [0, 0]
+};
+
+var polarPoint = {
+ type: "Point",
+ coordinates: [85, 85]
+};
+
+var lineEquator = {
+ type: "LineString",
+ coordinates: [[-20, 0], [20, 0]]
+};
+
+assert.writeOK(coll.insert({loc: poly10}));
+assert.writeOK(coll.insert({loc: line10}));
+assert.writeOK(coll.insert({loc: centerPoint}));
+assert.writeOK(coll.insert({loc: polarPoint}));
+assert.writeOK(coll.insert({loc: lineEquator}));
assert.eq(coll.find({}).count(), 5);
jsTest.log("Starting query...");
-assert.eq(coll.find({ loc : { $geoWithin : { $geometry : bigPoly20 } } }).count(), 3);
-assert.eq(coll.find({ loc : { $geoIntersects : { $geometry : bigPoly20 } } }).count(), 4);
-assert.eq(coll.find({ loc : { $geoWithin : { $geometry : bigPoly20Comp } } }).count(), 1);
-assert.eq(coll.find({ loc : { $geoIntersects : { $geometry : bigPoly20Comp } } }).count(), 2);
+assert.eq(coll.find({loc: {$geoWithin: {$geometry: bigPoly20}}}).count(), 3);
+assert.eq(coll.find({loc: {$geoIntersects: {$geometry: bigPoly20}}}).count(), 4);
+assert.eq(coll.find({loc: {$geoWithin: {$geometry: bigPoly20Comp}}}).count(), 1);
+assert.eq(coll.find({loc: {$geoIntersects: {$geometry: bigPoly20Comp}}}).count(), 2);
-assert.commandWorked(coll.ensureIndex({ loc : "2dsphere" }));
-
-assert.eq(coll.find({ loc : { $geoWithin : { $geometry : bigPoly20 } } }).count(), 3);
-assert.eq(coll.find({ loc : { $geoIntersects : { $geometry : bigPoly20 } } }).count(), 4);
-assert.eq(coll.find({ loc : { $geoWithin : { $geometry : bigPoly20Comp } } }).count(), 1);
-assert.eq(coll.find({ loc : { $geoIntersects : { $geometry : bigPoly20Comp } } }).count(), 2);
+assert.commandWorked(coll.ensureIndex({loc: "2dsphere"}));
+assert.eq(coll.find({loc: {$geoWithin: {$geometry: bigPoly20}}}).count(), 3);
+assert.eq(coll.find({loc: {$geoIntersects: {$geometry: bigPoly20}}}).count(), 4);
+assert.eq(coll.find({loc: {$geoWithin: {$geometry: bigPoly20Comp}}}).count(), 1);
+assert.eq(coll.find({loc: {$geoIntersects: {$geometry: bigPoly20Comp}}}).count(), 2);
// Test not indexing and querying big polygon
assert.commandWorked(coll.dropIndexes());
// 1. Without index, insert succeeds, but query ignores big polygon.
-var bigPoly10 = { type : "Polygon", coordinates : [[[5.0, 5.0],
- [-5.0, 5.0],
- [-5.0, -5.0],
- [5.0, -5.0],
- [5.0, 5.0]]],
- crs : bigCRS };
+var bigPoly10 = {
+ type: "Polygon",
+ coordinates: [[[5.0, 5.0], [-5.0, 5.0], [-5.0, -5.0], [5.0, -5.0], [5.0, 5.0]]],
+ crs: bigCRS
+};
-assert.writeOK(coll.insert({ _id: "bigPoly10", loc: bigPoly10}));
+assert.writeOK(coll.insert({_id: "bigPoly10", loc: bigPoly10}));
-assert.eq(coll.find({ loc : { $geoWithin : { $geometry : bigPoly20 } } }).count(), 3);
-assert.eq(coll.find({ loc : { $geoIntersects : { $geometry : bigPoly20 } } }).count(), 4);
-assert.eq(coll.find({ loc : { $geoWithin : { $geometry : bigPoly20Comp } } }).count(), 1);
-assert.eq(coll.find({ loc : { $geoIntersects : { $geometry : bigPoly20Comp } } }).count(), 2);
+assert.eq(coll.find({loc: {$geoWithin: {$geometry: bigPoly20}}}).count(), 3);
+assert.eq(coll.find({loc: {$geoIntersects: {$geometry: bigPoly20}}}).count(), 4);
+assert.eq(coll.find({loc: {$geoWithin: {$geometry: bigPoly20Comp}}}).count(), 1);
+assert.eq(coll.find({loc: {$geoIntersects: {$geometry: bigPoly20Comp}}}).count(), 2);
// 2. Building index fails due to big polygon
-assert.commandFailed(coll.ensureIndex({ loc : "2dsphere" }));
+assert.commandFailed(coll.ensureIndex({loc: "2dsphere"}));
// 3. After removing big polygon, index builds successfully
assert.writeOK(coll.remove({_id: "bigPoly10"}));
-assert.commandWorked(coll.ensureIndex({ loc : "2dsphere" }));
+assert.commandWorked(coll.ensureIndex({loc: "2dsphere"}));
// 4. With index, insert fails.
-assert.writeError(coll.insert({ _id: "bigPoly10", loc: bigPoly10}));
+assert.writeError(coll.insert({_id: "bigPoly10", loc: bigPoly10}));
// Query geometries that don't support big CRS should error out.
-var bigPoint = { type: "Point", coordinates: [0, 0], crs: bigCRS };
-var bigLine = { type : "LineString", coordinates : [[-20, 0], [20, 0]], crs: bigCRS };
+var bigPoint = {
+ type: "Point",
+ coordinates: [0, 0],
+ crs: bigCRS
+};
+var bigLine = {
+ type: "LineString",
+ coordinates: [[-20, 0], [20, 0]],
+ crs: bigCRS
+};
assert.throws(function() {
- coll.find( { loc : { $geoIntersects : { $geometry : bigPoint }}}).itcount();
+ coll.find({loc: {$geoIntersects: {$geometry: bigPoint}}}).itcount();
});
assert.throws(function() {
- coll.find( { loc : { $geoIntersects : { $geometry : bigLine }}}).itcount();
+ coll.find({loc: {$geoIntersects: {$geometry: bigLine}}}).itcount();
});
diff --git a/jstests/core/geo_big_polygon2.js b/jstests/core/geo_big_polygon2.js
index 9fb9ffead3d..46ac327b7e0 100644
--- a/jstests/core/geo_big_polygon2.js
+++ b/jstests/core/geo_big_polygon2.js
@@ -7,35 +7,27 @@
var crs84CRS = {
type: "name",
- properties: {
- name: "urn:ogc:def:crs:OGC:1.3:CRS84"
- }
+ properties: {name: "urn:ogc:def:crs:OGC:1.3:CRS84"}
};
var epsg4326CRS = {
type: "name",
- properties: {
- name: "EPSG:4326"
- }
+ properties: {name: "EPSG:4326"}
};
var strictCRS = {
type: "name",
- properties: {
- name: "urn:x-mongodb:crs:strictwinding:EPSG:4326"
- }
+ properties: {name: "urn:x-mongodb:crs:strictwinding:EPSG:4326"}
};
// invalid CRS name
var badCRS = {
type: "name",
- properties: {
- name: "urn:x-mongodb:crs:invalid:EPSG:4326"
- }
+ properties: {name: "urn:x-mongodb:crs:invalid:EPSG:4326"}
};
// helper to generate a line along a longitudinal
function genLonLine(lon, startLat, endLat, latStep) {
var line = [];
for (var lat = startLat; lat <= endLat; lat += latStep) {
- line.push( [ lon, lat ] );
+ line.push([lon, lat]);
}
return line;
}
@@ -49,570 +41,392 @@ coll.drop();
// coordinates are longitude, latitude
// strictCRS (big polygon) cannot be stored in the collection
var objects = [
+ {name: "boat ramp", geo: {type: "Point", coordinates: [-97.927117, 30.327376]}},
+ {name: "on equator", geo: {type: "Point", coordinates: [-97.9, 0]}},
+ {name: "just north of equator", geo: {type: "Point", coordinates: [-97.9, 0.1]}},
+ {name: "just south of equator", geo: {type: "Point", coordinates: [-97.9, -0.1]}},
+ {
+ name: "north pole - crs84CRS",
+ geo: {type: "Point", coordinates: [-97.9, 90.0], crs: crs84CRS}
+ },
+ {
+ name: "south pole - epsg4326CRS",
+ geo: {type: "Point", coordinates: [-97.9, -90.0], crs: epsg4326CRS}
+ },
+ {
+ name: "short line string: PA, LA, 4corners, ATX, Mansfield, FL, Reston, NYC",
+ geo: {
+ type: "LineString",
+ coordinates: [
+ [-122.1611953, 37.4420407],
+ [-118.283638, 34.028517],
+ [-109.045223, 36.9990835],
+ [-97.850404, 30.3921555],
+ [-97.904187, 30.395457],
+ [-86.600836, 30.398147],
+ [-77.357837, 38.9589935],
+ [-73.987723, 40.7575074]
+ ]
+ }
+ },
+ {
+ name: "1024 point long line string from south pole to north pole",
+ geo: {type: "LineString", coordinates: genLonLine(2.349902, -90.0, 90.0, 180.0 / 1024)}
+ },
{
- name: "boat ramp",
- geo: {
- type: "Point",
- coordinates: [ -97.927117, 30.327376 ]
- }
- },
- {
- name: "on equator",
- geo: {
- type: "Point",
- coordinates: [ -97.9 , 0 ]
- }
- },
- {
- name: "just north of equator",
- geo: {
- type: "Point",
- coordinates: [ -97.9 , 0.1 ]
- }
- },
- {
- name: "just south of equator",
- geo: {
- type: "Point",
- coordinates: [ -97.9 , -0.1 ]
- }
- },
- {
- name: "north pole - crs84CRS",
- geo: {
- type: "Point",
- coordinates: [ -97.9 , 90.0 ],
- crs: crs84CRS
- }
- },
- {
- name: "south pole - epsg4326CRS",
- geo: {
- type: "Point",
- coordinates: [ -97.9 , -90.0 ],
- crs: epsg4326CRS
- }
+ name: "line crossing equator - epsg4326CRS",
+ geo: {
+ type: "LineString",
+ coordinates: [[-77.0451853, -12.0553442], [-76.7784557, 18.0098528]],
+ crs: epsg4326CRS
+ }
},
{
- name: "short line string: PA, LA, 4corners, ATX, Mansfield, FL, Reston, NYC",
- geo: {
- type: "LineString",
- coordinates: [
- [ -122.1611953, 37.4420407 ],
- [ -118.283638, 34.028517 ],
- [ -109.045223, 36.9990835 ],
- [ -97.850404, 30.3921555 ],
- [ -97.904187, 30.395457 ],
- [ -86.600836, 30.398147 ],
- [ -77.357837, 38.9589935 ],
- [ -73.987723, 40.7575074 ]
- ]
- }
+ name: "GeoJson polygon",
+ geo: {
+ type: "Polygon",
+ coordinates:
+ [[[-80.0, 30.0], [-40.0, 30.0], [-40.0, 60.0], [-80.0, 60.0], [-80.0, 30.0]]]
+ }
},
{
- name: "1024 point long line string from south pole to north pole",
- geo: {
- type: "LineString",
- coordinates: genLonLine(2.349902, -90.0, 90.0, 180.0 / 1024)
- }
+ name: "polygon w/ hole",
+ geo: {
+ type: "Polygon",
+ coordinates: [
+ [[-80.0, 30.0], [-40.0, 30.0], [-40.0, 60.0], [-80.0, 60.0], [-80.0, 30.0]],
+ [[-70.0, 40.0], [-60.0, 40.0], [-60.0, 50.0], [-70.0, 50.0], [-70.0, 40.0]]
+ ]
+ }
+ },
+ {
+ name: "polygon w/ two holes",
+ geo: {
+ type: "Polygon",
+ coordinates: [
+ [[-80.0, 30.0], [-40.0, 30.0], [-40.0, 60.0], [-80.0, 60.0], [-80.0, 30.0]],
+ [[-70.0, 40.0], [-60.0, 40.0], [-60.0, 50.0], [-70.0, 50.0], [-70.0, 40.0]],
+ [[-55.0, 40.0], [-45.0, 40.0], [-45.0, 50.0], [-55.0, 50.0], [-55.0, 40.0]]
+ ]
+ }
},
{
- name: "line crossing equator - epsg4326CRS",
- geo: {
- type: "LineString",
- coordinates: [
- [ -77.0451853, -12.0553442 ],
- [ -76.7784557, 18.0098528 ]
- ],
- crs: epsg4326CRS
- }
- },
- {
- name: "GeoJson polygon",
- geo: {
- type: "Polygon",
- coordinates: [
- [ [ -80.0, 30.0 ],
- [ -40.0, 30.0 ],
- [ -40.0, 60.0 ],
- [ -80.0, 60.0 ],
- [ -80.0, 30.0 ] ]
- ]
- }
- },
- {
- name: "polygon w/ hole",
- geo: {
- type: "Polygon",
- coordinates: [
- [ [ -80.0, 30.0 ],
- [ -40.0, 30.0 ],
- [ -40.0, 60.0 ],
- [-80.0, 60.0 ],
- [ -80.0, 30.0 ] ],
- [ [ -70.0, 40.0 ],
- [ -60.0, 40.0 ],
- [ -60.0, 50.0 ],
- [ -70.0, 50.0 ],
- [ -70.0, 40.0 ] ]
- ]
- }
- },
- {
- name: "polygon w/ two holes",
- geo: {
- type: "Polygon",
- coordinates: [
- [ [ -80.0, 30.0 ],
- [ -40.0, 30.0 ],
- [ -40.0, 60.0 ],
- [ -80.0, 60.0 ],
- [ -80.0, 30.0 ] ],
- [ [ -70.0, 40.0 ],
- [ -60.0, 40.0 ],
- [ -60.0, 50.0 ],
- [ -70.0, 50.0 ],
- [ -70.0, 40.0 ] ],
- [ [ -55.0, 40.0 ],
- [ -45.0, 40.0 ],
- [ -45.0, 50.0 ],
- [ -55.0, 50.0 ],
- [ -55.0, 40.0 ] ]
- ]
- }
- },
- {
- name: "polygon covering North pole",
- geo: {
- type: "Polygon",
- coordinates: [
- [ [ -120.0, 89.0 ],
- [ 0.0, 89.0 ],
- [ 120.0, 89.0 ],
- [ -120.0, 89.0 ] ]
- ]
- }
- },
- {
- name: "polygon covering South pole",
- geo: {
- type: "Polygon",
- coordinates: [
- [ [ -120.0, -89.0 ],
- [ 0.0, -89.0 ],
- [ 120.0, -89.0 ],
- [ -120.0, -89.0 ] ]
- ]
- }
- },
- {
- name: "big polygon/rectangle covering both poles",
- geo: {
- type: "Polygon",
- coordinates: [
- [ [ -130.0, 89.0 ],
- [ -120.0, 89.0 ],
- [ -120.0, -89.0 ],
- [ -130.0, -89.0 ],
- [ -130.0, 89.0 ] ]
- ],
- crs: strictCRS
- }
- },
- {
- name: "polygon (triangle) w/ hole at North pole",
- geo: {
- type: "Polygon",
- coordinates: [
- [ [ -120.0, 80.0 ],
- [ 0.0, 80.0 ],
- [ 120.0, 80.0 ],
- [-120.0, 80.0 ] ],
- [ [ -120.0, 88.0 ],
- [ 0.0, 88.0 ],
- [ 120.0, 88.0 ],
- [-120.0, 88.0 ] ]
- ]
- }
- },
- {
- name: "polygon with edge on equator",
- geo: {
- type: "Polygon",
- coordinates: [
- [ [ -120.0, 0.0 ],
- [ 120.0, 0.0 ],
- [ 0.0, 90.0 ],
- [ -120.0, 0.0 ] ]
- ]
- }
- },
- {
- name: "polygon just inside single hemisphere (Northern) - China, California, Europe",
- geo: {
- type: "Polygon",
- coordinates: [
- [ [ 120.0, 0.000001 ],
- [ -120.0, 0.000001 ],
- [ 0.0, 0.000001 ],
- [ 120.0, 0.000001 ] ]
- ]
- }
- },
- {
- name: "polygon inside Northern hemisphere",
- geo: {
- type: "Polygon",
- coordinates: [
- [ [ 120.0, 80.0 ],
- [ -120.0, 80.0 ],
- [ 0.0, 80.0 ],
- [ 120.0, 80.0 ] ]
- ]
- }
- },
- {
- name: "polygon just inside a single hemisphere (Southern) - Pacific, Indonesia, Africa",
- geo: {
- type: "Polygon",
- coordinates: [
- [ [ -120.0, -0.000001 ],
- [ 120.0, -0.000001 ],
- [ 0.0, -0.000001 ],
- [ -120.0, -0.000001 ] ]
- ]
- }
- },
- {
- name: "polygon inside Southern hemisphere",
- geo: {
- type: "Polygon",
- coordinates: [
- [ [ -120.0, -80.0 ],
- [ 120.0, -80.0 ],
- [ 0.0, -80.0 ],
- [ -120.0, -80.0 ] ]
- ]
- }
- },
- {
- name: "single point (MultiPoint): Palo Alto",
- geo: {
- type: "MultiPoint",
- coordinates: [
- [ -122.1611953, 37.4420407 ]
- ]
- }
- },
- {
- name: "multiple points(MultiPoint): PA, LA, 4corners, ATX, Mansfield, FL, Reston, NYC",
- geo: {
- type: "MultiPoint",
- coordinates: [
- [ -122.1611953, 37.4420407 ],
- [ -118.283638, 34.028517 ],
- [ -109.045223, 36.9990835 ],
- [ -97.850404, 30.3921555 ],
- [ -97.904187, 30.395457 ],
- [ -86.600836, 30.398147 ],
- [ -77.357837, 38.9589935 ],
- [ -73.987723, 40.7575074 ]
- ]
- }
- },
- {
- name: "two points (MultiPoint): Shenzhen, Guangdong, China",
- geo: {
- type: "MultiPoint",
- coordinates: [
- [ 114.0538788, 22.5551603 ],
- [ 114.022837, 22.44395 ]
- ]
- }
- },
- {
- name: "two points (MultiPoint) but only one in: Shenzhen, Guangdong, China",
- geo: {
- type: "MultiPoint",
- coordinates: [
- [ 114.0538788, 22.5551603 ],
- [ 113.743858, 23.025815 ]
- ]
- }
- },
- {
- name: "multi line string: new zealand bays",
- geo: {
- type: "MultiLineString",
- coordinates: [
- [ [ 172.803869, -43.592789 ],
- [ 172.659335, -43.620348 ],
- [ 172.684038, -43.636528 ],
- [ 172.820922, -43.605325 ] ],
- [ [ 172.830497, -43.607768 ],
- [ 172.813263, -43.656319 ],
- [ 172.823096, -43.660996 ],
- [ 172.850943, -43.607609 ] ],
- [ [ 172.912056, -43.623148 ],
- [ 172.887696, -43.670897 ],
- [ 172.900469, -43.676178 ],
- [ 172.931735, -43.622839 ] ]
- ]
- }
- },
- {
- name: "multi polygon: new zealand north and south islands",
- geo: {
- type: "MultiPolygon",
- coordinates: [
- [
- [ [ 165.773255, -45.902933 ],
- [ 169.398419, -47.261538 ],
- [ 174.672744, -41.767722 ],
- [ 172.288845, -39.897992 ],
- [ 165.773255, -45.902933 ] ]
- ],
- [
- [ [ 173.166448, -39.778262 ],
- [ 175.342744, -42.677333 ],
- [ 179.913373, -37.224362 ],
- [ 171.475953, -32.688871 ],
- [ 173.166448, -39.778262 ] ]
- ]
- ]
- }
- },
- {
- name: "geometry collection: point in Australia and triangle around Australia",
- geo: {
- type: "GeometryCollection",
- geometries: [
- {
- name: "center of Australia",
- type: "Point",
- coordinates: [ 133.985885, -27.240790 ]
- },
- {
- name: "Triangle around Australia",
- type: "Polygon",
- coordinates: [
- [ [ 97.423178, -44.735405 ],
- [ 169.845050, -38.432287 ],
- [ 143.824366, 15.966509 ],
- [ 97.423178, -44.735405 ] ]
- ]
- }
- ]
- }
+ name: "polygon covering North pole",
+ geo: {
+ type: "Polygon",
+ coordinates: [[[-120.0, 89.0], [0.0, 89.0], [120.0, 89.0], [-120.0, 89.0]]]
+ }
+ },
+ {
+ name: "polygon covering South pole",
+ geo: {
+ type: "Polygon",
+ coordinates: [[[-120.0, -89.0], [0.0, -89.0], [120.0, -89.0], [-120.0, -89.0]]]
+ }
+ },
+ {
+ name: "big polygon/rectangle covering both poles",
+ geo: {
+ type: "Polygon",
+ coordinates:
+ [[[-130.0, 89.0], [-120.0, 89.0], [-120.0, -89.0], [-130.0, -89.0], [-130.0, 89.0]]],
+ crs: strictCRS
+ }
+ },
+ {
+ name: "polygon (triangle) w/ hole at North pole",
+ geo: {
+ type: "Polygon",
+ coordinates: [
+ [[-120.0, 80.0], [0.0, 80.0], [120.0, 80.0], [-120.0, 80.0]],
+ [[-120.0, 88.0], [0.0, 88.0], [120.0, 88.0], [-120.0, 88.0]]
+ ]
+ }
+ },
+ {
+ name: "polygon with edge on equator",
+ geo: {
+ type: "Polygon",
+ coordinates: [[[-120.0, 0.0], [120.0, 0.0], [0.0, 90.0], [-120.0, 0.0]]]
+ }
+ },
+ {
+ name: "polygon just inside single hemisphere (Northern) - China, California, Europe",
+ geo: {
+ type: "Polygon",
+ coordinates:
+ [[[120.0, 0.000001], [-120.0, 0.000001], [0.0, 0.000001], [120.0, 0.000001]]]
+ }
+ },
+ {
+ name: "polygon inside Northern hemisphere",
+ geo: {
+ type: "Polygon",
+ coordinates: [[[120.0, 80.0], [-120.0, 80.0], [0.0, 80.0], [120.0, 80.0]]]
+ }
+ },
+ {
+ name: "polygon just inside a single hemisphere (Southern) - Pacific, Indonesia, Africa",
+ geo: {
+ type: "Polygon",
+ coordinates:
+ [[[-120.0, -0.000001], [120.0, -0.000001], [0.0, -0.000001], [-120.0, -0.000001]]]
+ }
+ },
+ {
+ name: "polygon inside Southern hemisphere",
+ geo: {
+ type: "Polygon",
+ coordinates: [[[-120.0, -80.0], [120.0, -80.0], [0.0, -80.0], [-120.0, -80.0]]]
+ }
+ },
+ {
+ name: "single point (MultiPoint): Palo Alto",
+ geo: {type: "MultiPoint", coordinates: [[-122.1611953, 37.4420407]]}
+ },
+ {
+ name: "multiple points(MultiPoint): PA, LA, 4corners, ATX, Mansfield, FL, Reston, NYC",
+ geo: {
+ type: "MultiPoint",
+ coordinates: [
+ [-122.1611953, 37.4420407],
+ [-118.283638, 34.028517],
+ [-109.045223, 36.9990835],
+ [-97.850404, 30.3921555],
+ [-97.904187, 30.395457],
+ [-86.600836, 30.398147],
+ [-77.357837, 38.9589935],
+ [-73.987723, 40.7575074]
+ ]
+ }
+ },
+ {
+ name: "two points (MultiPoint): Shenzhen, Guangdong, China",
+ geo: {type: "MultiPoint", coordinates: [[114.0538788, 22.5551603], [114.022837, 22.44395]]}
+ },
+ {
+ name: "two points (MultiPoint) but only one in: Shenzhen, Guangdong, China",
+ geo:
+ {type: "MultiPoint", coordinates: [[114.0538788, 22.5551603], [113.743858, 23.025815]]}
+ },
+ {
+ name: "multi line string: new zealand bays",
+ geo: {
+ type: "MultiLineString",
+ coordinates: [
+ [
+ [172.803869, -43.592789],
+ [172.659335, -43.620348],
+ [172.684038, -43.636528],
+ [172.820922, -43.605325]
+ ],
+ [
+ [172.830497, -43.607768],
+ [172.813263, -43.656319],
+ [172.823096, -43.660996],
+ [172.850943, -43.607609]
+ ],
+ [
+ [172.912056, -43.623148],
+ [172.887696, -43.670897],
+ [172.900469, -43.676178],
+ [172.931735, -43.622839]
+ ]
+ ]
+ }
+ },
+ {
+ name: "multi polygon: new zealand north and south islands",
+ geo: {
+ type: "MultiPolygon",
+ coordinates: [
+ [[
+ [165.773255, -45.902933],
+ [169.398419, -47.261538],
+ [174.672744, -41.767722],
+ [172.288845, -39.897992],
+ [165.773255, -45.902933]
+ ]],
+ [[
+ [173.166448, -39.778262],
+ [175.342744, -42.677333],
+ [179.913373, -37.224362],
+ [171.475953, -32.688871],
+ [173.166448, -39.778262]
+ ]]
+ ]
+ }
+ },
+ {
+ name: "geometry collection: point in Australia and triangle around Australia",
+ geo: {
+ type: "GeometryCollection",
+ geometries: [
+ {name: "center of Australia", type: "Point", coordinates: [133.985885, -27.240790]},
+ {
+ name: "Triangle around Australia",
+ type: "Polygon",
+ coordinates: [[
+ [97.423178, -44.735405],
+ [169.845050, -38.432287],
+ [143.824366, 15.966509],
+ [97.423178, -44.735405]
+ ]]
+ }
+ ]
+ }
}
];
-
// Test various polygons which are not queryable
var badPolys = [
{
- name: "Polygon with bad CRS",
- type: "Polygon",
- coordinates: [
- [ [ 114.0834046, 22.6648202 ],
- [ 113.8293457, 22.3819359 ],
- [ 114.2736054, 22.4047911 ],
- [ 114.0834046, 22.6648202 ] ]
- ],
- crs: badCRS
- },
- {
- name: "Open polygon < 3 sides",
- type: "Polygon",
- coordinates: [
- [ [ 114.0834046, 22.6648202 ],
- [ 113.8293457, 22.3819359 ] ]
- ],
- crs: strictCRS
- },
- {
- name: "Open polygon > 3 sides",
- type: "Polygon",
- coordinates: [
- [ [ 114.0834046, 22.6648202 ],
- [ 113.8293457, 22.3819359 ],
- [ 114.2736054, 22.4047911 ],
- [ 114.1, 22.5 ] ]
- ],
- crs: strictCRS
- },
- {
- name: "duplicate non-adjacent points",
- type: "Polygon",
- coordinates: [
- [ [ 114.0834046, 22.6648202 ],
- [ 113.8293457, 22.3819359 ],
- [ 114.2736054, 22.4047911 ],
- [ 113.8293457, 22.3819359 ],
- [ -65.9165954, 22.6648202 ],
- [ 114.0834046, 22.6648202 ] ]
- ],
- crs: strictCRS
- },
- {
- name: "One hole in polygon",
- type: "Polygon",
- coordinates: [
- [ [ -80.0, 30.0 ],
- [ -40.0, 30.0 ],
- [ -40.0, 60.0 ],
- [ -80.0, 60.0 ],
- [ -80.0, 30.0 ] ],
- [ [ -70.0, 40.0 ],
- [ -60.0, 40.0 ],
- [ -60.0, 50.0 ],
- [ -70.0, 50.0 ],
- [ -70.0, 40.0 ] ]
- ],
- crs: strictCRS
- },
- {
- name: "2 holes in polygon",
- type: "Polygon",
- coordinates: [
- [ [ -80.0, 30.0 ],
- [ -40.0, 30.0 ],
- [ -40.0, 60.0 ],
- [ -80.0, 60.0 ],
- [ -80.0, 30.0 ] ],
- [ [ -70.0, 40.0 ],
- [ -60.0, 40.0 ],
- [ -60.0, 50.0 ],
- [ -70.0, 50.0 ],
- [ -70.0, 40.0 ] ],
- [ [ -55.0, 40.0 ],
- [ -45.0, 40.0 ],
- [ -45.0, 50.0 ],
- [ -55.0, 50.0 ],
- [ -55.0, 40.0 ] ]
- ],
- crs: strictCRS
- },
- {
- name: "complex polygon (edges cross)",
- type: "Polygon",
- coordinates: [
- [ [ 10.0, 10.0 ],
- [ 20.0, 10.0 ],
- [ 10.0, 20.0 ],
- [ 20.0, 20.0 ],
- [ 10.0, 10.0 ] ]
- ],
- crs: strictCRS
+ name: "Polygon with bad CRS",
+ type: "Polygon",
+ coordinates: [[
+ [114.0834046, 22.6648202],
+ [113.8293457, 22.3819359],
+ [114.2736054, 22.4047911],
+ [114.0834046, 22.6648202]
+ ]],
+ crs: badCRS
+ },
+ {
+ name: "Open polygon < 3 sides",
+ type: "Polygon",
+ coordinates: [[[114.0834046, 22.6648202], [113.8293457, 22.3819359]]],
+ crs: strictCRS
+ },
+ {
+ name: "Open polygon > 3 sides",
+ type: "Polygon",
+ coordinates: [[
+ [114.0834046, 22.6648202],
+ [113.8293457, 22.3819359],
+ [114.2736054, 22.4047911],
+ [114.1, 22.5]
+ ]],
+ crs: strictCRS
+ },
+ {
+ name: "duplicate non-adjacent points",
+ type: "Polygon",
+ coordinates: [[
+ [114.0834046, 22.6648202],
+ [113.8293457, 22.3819359],
+ [114.2736054, 22.4047911],
+ [113.8293457, 22.3819359],
+ [-65.9165954, 22.6648202],
+ [114.0834046, 22.6648202]
+ ]],
+ crs: strictCRS
+ },
+ {
+ name: "One hole in polygon",
+ type: "Polygon",
+ coordinates: [
+ [[-80.0, 30.0], [-40.0, 30.0], [-40.0, 60.0], [-80.0, 60.0], [-80.0, 30.0]],
+ [[-70.0, 40.0], [-60.0, 40.0], [-60.0, 50.0], [-70.0, 50.0], [-70.0, 40.0]]
+ ],
+ crs: strictCRS
+ },
+ {
+ name: "2 holes in polygon",
+ type: "Polygon",
+ coordinates: [
+ [[-80.0, 30.0], [-40.0, 30.0], [-40.0, 60.0], [-80.0, 60.0], [-80.0, 30.0]],
+ [[-70.0, 40.0], [-60.0, 40.0], [-60.0, 50.0], [-70.0, 50.0], [-70.0, 40.0]],
+ [[-55.0, 40.0], [-45.0, 40.0], [-45.0, 50.0], [-55.0, 50.0], [-55.0, 40.0]]
+ ],
+ crs: strictCRS
+ },
+ {
+ name: "complex polygon (edges cross)",
+ type: "Polygon",
+ coordinates: [[[10.0, 10.0], [20.0, 10.0], [10.0, 20.0], [20.0, 20.0], [10.0, 10.0]]],
+ crs: strictCRS
}
];
-
// Closed polygons used in query (3, 4, 5, 6-sided)
var polys = [
{
- name: "3 sided closed polygon",
- type: "Polygon", // triangle
- coordinates: [ [
- [ 10.0, 10.0 ],
- [ 20.0, 10.0 ],
- [ 15.0, 17.0 ],
- [ 10.0, 10.0 ]
- ] ],
- crs: strictCRS,
- nW: 0, nI: 1
- },
- {
- name: "3 sided closed polygon (non-big)",
- type: "Polygon", // triangle
- coordinates: [ [
- [ 10.0, 10.0 ],
- [ 20.0, 10.0 ],
- [ 15.0, 17.0 ],
- [ 10.0, 10.0 ]
- ] ],
- nW: 0, nI: 1
- },
- {
- name: "4 sided closed polygon",
- type: "Polygon", // rectangle
- coordinates: [ [
- [ 10.0, 10.0 ],
- [ 20.0, 10.0 ],
- [ 20.0, 20.0 ],
- [ 10.0, 20.0 ],
- [ 10.0, 10.0 ]
- ] ],
- crs: strictCRS,
- nW: 0, nI: 1
- },
- {
- name: "4 sided closed polygon (non-big)",
- type: "Polygon", // rectangle
- coordinates: [
- [ [ 10.0, 10.0 ],
- [ 20.0, 10.0 ],
- [ 20.0, 20.0 ],
- [ 10.0, 20.0 ],
- [ 10.0, 10.0 ] ]
- ],
- nW: 0, nI: 1
- },
- {
- name: "5 sided closed polygon",
- type: "Polygon", // pentagon
- coordinates: [ [
- [ 10.0, 10.0 ],
- [ 20.0, 10.0 ],
- [ 25.0, 18.0 ],
- [ 15.0, 25.0 ],
- [ 5.0, 18.0 ],
- [ 10.0, 10.0 ]
- ] ],
- crs: strictCRS,
- nW: 0, nI: 1
- },
- {
- name: "5 sided closed polygon (non-big)",
- type: "Polygon", // pentagon
- coordinates: [ [
- [ 10.0, 10.0 ],
- [ 20.0, 10.0 ],
- [ 25.0, 18.0 ],
- [ 15.0, 25.0 ],
- [ 5.0, 18.0 ],
- [ 10.0, 10.0 ]
- ] ],
- nW: 0, nI: 1
- },
- {
- name: "6 sided closed polygon",
- type: "Polygon", // hexagon
- coordinates: [ [
- [ 10.0, 10.0 ],
- [ 15.0, 10.0 ],
- [ 22.0, 15.0 ],
- [ 15.0, 20.0 ],
- [ 10.0, 20.0 ],
- [ 7.0, 15.0 ],
- [ 10.0, 10.0 ]
- ] ],
- crs: strictCRS,
- nW: 0, nI: 1
- },
- {
- name: "6 sided closed polygon (non-big)",
- type: "Polygon", // hexagon
- coordinates: [ [
- [ 10.0, 10.0 ],
- [ 15.0, 10.0 ],
- [ 22.0, 15.0 ],
- [ 15.0, 20.0 ],
- [ 10.0, 20.0 ],
- [ 7.0, 15.0 ],
- [ 10.0, 10.0 ]
- ] ],
- nW: 0, nI: 1
+ name: "3 sided closed polygon",
+ type: "Polygon", // triangle
+ coordinates: [[[10.0, 10.0], [20.0, 10.0], [15.0, 17.0], [10.0, 10.0]]],
+ crs: strictCRS,
+ nW: 0,
+ nI: 1
+ },
+ {
+ name: "3 sided closed polygon (non-big)",
+ type: "Polygon", // triangle
+ coordinates: [[[10.0, 10.0], [20.0, 10.0], [15.0, 17.0], [10.0, 10.0]]],
+ nW: 0,
+ nI: 1
+ },
+ {
+ name: "4 sided closed polygon",
+ type: "Polygon", // rectangle
+ coordinates: [[[10.0, 10.0], [20.0, 10.0], [20.0, 20.0], [10.0, 20.0], [10.0, 10.0]]],
+ crs: strictCRS,
+ nW: 0,
+ nI: 1
+ },
+ {
+ name: "4 sided closed polygon (non-big)",
+ type: "Polygon", // rectangle
+ coordinates: [[[10.0, 10.0], [20.0, 10.0], [20.0, 20.0], [10.0, 20.0], [10.0, 10.0]]],
+ nW: 0,
+ nI: 1
+ },
+ {
+ name: "5 sided closed polygon",
+ type: "Polygon", // pentagon
+ coordinates:
+ [[[10.0, 10.0], [20.0, 10.0], [25.0, 18.0], [15.0, 25.0], [5.0, 18.0], [10.0, 10.0]]],
+ crs: strictCRS,
+ nW: 0,
+ nI: 1
+ },
+ {
+ name: "5 sided closed polygon (non-big)",
+ type: "Polygon", // pentagon
+ coordinates:
+ [[[10.0, 10.0], [20.0, 10.0], [25.0, 18.0], [15.0, 25.0], [5.0, 18.0], [10.0, 10.0]]],
+ nW: 0,
+ nI: 1
+ },
+ {
+ name: "6 sided closed polygon",
+ type: "Polygon", // hexagon
+ coordinates: [[
+ [10.0, 10.0],
+ [15.0, 10.0],
+ [22.0, 15.0],
+ [15.0, 20.0],
+ [10.0, 20.0],
+ [7.0, 15.0],
+ [10.0, 10.0]
+ ]],
+ crs: strictCRS,
+ nW: 0,
+ nI: 1
+ },
+ {
+ name: "6 sided closed polygon (non-big)",
+ type: "Polygon", // hexagon
+ coordinates: [[
+ [10.0, 10.0],
+ [15.0, 10.0],
+ [22.0, 15.0],
+ [15.0, 20.0],
+ [10.0, 20.0],
+ [7.0, 15.0],
+ [10.0, 10.0]
+ ]],
+ nW: 0,
+ nI: 1
}
];
@@ -628,23 +442,23 @@ function nGonGenerator(N, D, clockwise, LON, LAT) {
// edge lengths will be uneven with this quick & dirty approach
N = (N % 2 == 1) ? N + 1 : N;
var eps = 2 * D / N;
- var lat=0;
- var lon=0;
+ var lat = 0;
+ var lon = 0;
var pts = [];
var i = 0;
// produce longitude values in pairs
// traverse with left foot outside the circle (clockwise) to define the big polygon
for (i = 0, lat = D / 2; i <= N / 2; ++i, lat -= eps) {
- if ( lat < (-D / 2) ) {
+ if (lat < (-D / 2)) {
// set fixing lat
lat = (-D / 2);
}
- lon = Math.sqrt( (D / 2) * (D / 2) - (lat * lat) );
+ lon = Math.sqrt((D / 2) * (D / 2) - (lat * lat));
newlat = lat + LAT;
newlon = lon + LON;
conjugateLon = LON - lon;
- pts[i] = [ newlon, newlat ];
- pts[N-i] = [ conjugateLon, newlat ];
+ pts[i] = [newlon, newlat];
+ pts[N - i] = [conjugateLon, newlat];
}
// Reverse points if counterclockwise
if (!clockwise) {
@@ -674,83 +488,67 @@ var totalObjects = getNumberOfValidObjects(objects);
var nsidedPolys = [
// Big Polygon centered on 0, 0
{
- name: "4 sided polygon centered on 0, 0",
- type: "Polygon",
- coordinates: [
- nGonGenerator(4, 30, true, 0, 0)
- ],
- crs: strictCRS,
- nW: totalObjects - 3,
- nI: totalObjects
+ name: "4 sided polygon centered on 0, 0",
+ type: "Polygon",
+ coordinates: [nGonGenerator(4, 30, true, 0, 0)],
+ crs: strictCRS,
+ nW: totalObjects - 3,
+ nI: totalObjects
},
// Non-big polygons have counterclockwise coordinates
{
- name: "4 sided polygon centered on 0, 0 (non-big)",
- type: "Polygon",
- coordinates: [
- nGonGenerator(4, 30, false, 0, 0)
- ],
- nW: 0,
- nI: 3
- },
- {
- name: "100 sided polygon centered on 0, 0",
- type: "Polygon",
- coordinates: [
- nGonGenerator(100, 20, true, 0, 0)
- ],
- crs: strictCRS,
- nW: totalObjects - 3,
- nI: totalObjects
- },
- {
- name: "100 sided polygon centered on 0, 0 (non-big)",
- type: "Polygon",
- coordinates: [
- nGonGenerator(100, 20, false, 0, 0)
- ],
- nW: 0,
- nI: 3
- },
- {
- name: "5000 sided polygon centered on 0, 0 (non-big)",
- type: "Polygon",
- coordinates: [
- nGonGenerator(5000, 89.99, false, 0, 0)
- ],
- nW: 0,
- nI: 3
- },
- {
- name: "25000 sided polygon centered on 0, 0",
- type: "Polygon",
- coordinates: [
- nGonGenerator(25000, 89.99, true, 0, 0)
- ],
- crs: strictCRS,
- nW: totalObjects - 3,
- nI: totalObjects
+ name: "4 sided polygon centered on 0, 0 (non-big)",
+ type: "Polygon",
+ coordinates: [nGonGenerator(4, 30, false, 0, 0)],
+ nW: 0,
+ nI: 3
+ },
+ {
+ name: "100 sided polygon centered on 0, 0",
+ type: "Polygon",
+ coordinates: [nGonGenerator(100, 20, true, 0, 0)],
+ crs: strictCRS,
+ nW: totalObjects - 3,
+ nI: totalObjects
+ },
+ {
+ name: "100 sided polygon centered on 0, 0 (non-big)",
+ type: "Polygon",
+ coordinates: [nGonGenerator(100, 20, false, 0, 0)],
+ nW: 0,
+ nI: 3
+ },
+ {
+ name: "5000 sided polygon centered on 0, 0 (non-big)",
+ type: "Polygon",
+ coordinates: [nGonGenerator(5000, 89.99, false, 0, 0)],
+ nW: 0,
+ nI: 3
+ },
+ {
+ name: "25000 sided polygon centered on 0, 0",
+ type: "Polygon",
+ coordinates: [nGonGenerator(25000, 89.99, true, 0, 0)],
+ crs: strictCRS,
+ nW: totalObjects - 3,
+ nI: totalObjects
},
// Big polygon centered on Shenzen
{
- name: "4 sided polygon centered on Shenzen",
- type: "Polygon",
- coordinates: [
- nGonGenerator(4, 5, true, 114.1, 22.55)
- ],
- crs: strictCRS,
- nW: totalObjects - 3,
- nI: totalObjects - 2
- },
- {
- name: "4 sided polygon centered on Shenzen (non-big)",
- type: "Polygon",
- coordinates: [
- nGonGenerator(4, 5, false, 114.1, 22.55)
- ],
- crs: strictCRS,
- nW: 2,
- nI: 3
+ name: "4 sided polygon centered on Shenzen",
+ type: "Polygon",
+ coordinates: [nGonGenerator(4, 5, true, 114.1, 22.55)],
+ crs: strictCRS,
+ nW: totalObjects - 3,
+ nI: totalObjects - 2
+ },
+ {
+ name: "4 sided polygon centered on Shenzen (non-big)",
+ type: "Polygon",
+ coordinates: [nGonGenerator(4, 5, false, 114.1, 22.55)],
+ crs: strictCRS,
+ nW: 2,
+ nI: 3
}
];
@@ -768,19 +566,11 @@ objects.forEach(function(o) {
});
// Try creating other index types
-assert.commandWorked(
- coll.ensureIndex({geo: "2dsphere", a: 1}),
- "compound index, geo");
+assert.commandWorked(coll.ensureIndex({geo: "2dsphere", a: 1}), "compound index, geo");
// These other index types will fail because of the GeoJSON documents
-assert.commandFailed(
- coll.ensureIndex({geo: "2dsphere", a: "text"}),
- "compound index, geo & text");
-assert.commandFailed(
- coll.ensureIndex({geo: "geoHaystack" }, {bucketSize:1}),
- "geoHaystack index");
-assert.commandFailed(
- coll.ensureIndex({geo: "2d"}),
- "2d index");
+assert.commandFailed(coll.ensureIndex({geo: "2dsphere", a: "text"}), "compound index, geo & text");
+assert.commandFailed(coll.ensureIndex({geo: "geoHaystack"}, {bucketSize: 1}), "geoHaystack index");
+assert.commandFailed(coll.ensureIndex({geo: "2d"}), "2d index");
totalObjects = coll.count();
@@ -794,7 +584,7 @@ indexes.forEach(function(index) {
if (index != "none") {
// Create index
- assert.commandWorked(coll.ensureIndex({geo: index}), "create " + index + " index");
+ assert.commandWorked(coll.ensureIndex({geo: index}), "create " + index + " index");
}
// These polygons should not be queryable
@@ -802,24 +592,23 @@ indexes.forEach(function(index) {
// within
assert.throws(function() {
- coll.count({geo: {$geoWithin: {$geometry: p}}});},
- null,
- "within " + p.name);
+ coll.count({geo: {$geoWithin: {$geometry: p}}});
+ }, null, "within " + p.name);
// intersection
assert.throws(function() {
- coll.count({geo: {$geoIntersects: {$geometry: p}}});},
- null,
- "intersects " + p.name);
+ coll.count({geo: {$geoIntersects: {$geometry: p}}});
+ }, null, "intersects " + p.name);
});
-
// Tests for closed polygons
polys.forEach(function(p) {
// geoWithin query
var docArray = [];
- var q = {geo: {$geoWithin: {$geometry: p}}};
+ var q = {
+ geo: {$geoWithin: {$geometry: p}}
+ };
// Test query in aggregate
docArray = coll.aggregate({$match: q}).toArray();
assert.eq(p.nW, docArray.length, "aggregate within " + p.name);
@@ -827,7 +616,9 @@ indexes.forEach(function(index) {
assert.eq(p.nW, docArray.length, "within " + p.name);
// geoIntersects query
- q = {geo: {$geoIntersects: {$geometry: p}}};
+ q = {
+ geo: {$geoIntersects: {$geometry: p}}
+ };
// Test query in aggregate
docArray = coll.aggregate({$match: q}).toArray();
assert.eq(p.nI, docArray.length, "aggregate intersects " + p.name);
@@ -836,7 +627,7 @@ indexes.forEach(function(index) {
// Update on matching docs
var result = coll.update(q, {$set: {stored: ObjectId()}}, {multi: true});
// only check nModified if write commands are enabled
- if ( coll.getMongo().writeMode() == "commands" ) {
+ if (coll.getMongo().writeMode() == "commands") {
assert.eq(p.nI, result.nModified, "update " + p.name);
}
// Remove & restore matching docs
@@ -853,14 +644,11 @@ indexes.forEach(function(index) {
nsidedPolys.forEach(function(p) {
// within
- assert.eq(p.nW,
- coll.count({geo: {$geoWithin: {$geometry: p}}}),
- "within " + p.name);
+ assert.eq(p.nW, coll.count({geo: {$geoWithin: {$geometry: p}}}), "within " + p.name);
// intersects
- assert.eq(p.nI,
- coll.count({geo: {$geoIntersects: {$geometry: p}}}),
- "intersection " + p.name);
+ assert.eq(
+ p.nI, coll.count({geo: {$geoIntersects: {$geometry: p}}}), "intersection " + p.name);
});
diff --git a/jstests/core/geo_big_polygon3.js b/jstests/core/geo_big_polygon3.js
index bf155c842ae..049064ebc5b 100644
--- a/jstests/core/geo_big_polygon3.js
+++ b/jstests/core/geo_big_polygon3.js
@@ -12,21 +12,15 @@
var crs84CRS = {
type: "name",
- properties: {
- name: "urn:ogc:def:crs:OGC:1.3:CRS84"
- }
+ properties: {name: "urn:ogc:def:crs:OGC:1.3:CRS84"}
};
var epsg4326CRS = {
type: "name",
- properties: {
- name: "EPSG:4326"
- }
+ properties: {name: "EPSG:4326"}
};
var strictCRS = {
type: "name",
- properties: {
- name: "urn:x-mongodb:crs:strictwinding:EPSG:4326"
- }
+ properties: {name: "urn:x-mongodb:crs:strictwinding:EPSG:4326"}
};
var coll = db.geo_bigpoly_edgecases;
@@ -35,59 +29,39 @@ coll.drop();
// Edge cases producing error
// These non-polygon objects cannot be queried because they are strictCRS
var objects = [
+ {name: "point with strictCRS", type: "Point", coordinates: [-97.9, 0], crs: strictCRS},
{
- name: "point with strictCRS",
- type: "Point",
- coordinates: [ -97.9 , 0 ],
- crs: strictCRS
- },
- {
- name: "multipoint with strictCRS",
- type: "MultiPoint",
- coordinates: [
- [ -97.9 , 0 ],
- [ -10.9 , 0 ]
- ],
- crs: strictCRS
+ name: "multipoint with strictCRS",
+ type: "MultiPoint",
+ coordinates: [[-97.9, 0], [-10.9, 0]],
+ crs: strictCRS
},
{
- name: "line with strictCRS",
- type: "LineString",
- coordinates: [
- [ -122.1611953, 37.4420407 ],
- [ -118.283638, 34.028517 ]
- ],
- crs: strictCRS
+ name: "line with strictCRS",
+ type: "LineString",
+ coordinates: [[-122.1611953, 37.4420407], [-118.283638, 34.028517]],
+ crs: strictCRS
}
];
-
objects.forEach(function(o) {
// within
assert.throws(function() {
- coll.count({geo: {$geoWithin: {$geometry: o}}});},
- null,
- "within " + o.name);
+ coll.count({geo: {$geoWithin: {$geometry: o}}});
+ }, null, "within " + o.name);
// intersection
assert.throws(function() {
- coll.count({geo: {$geoIntersects: {$geometry: o}}});},
- null,
- "intersection " + o.name);
+ coll.count({geo: {$geoIntersects: {$geometry: o}}});
+ }, null, "intersection " + o.name);
});
-
// Big Polygon query for $nearSphere & geoNear should fail
var bigPoly = {
name: "3 sided closed polygon",
type: "Polygon", // triangle
- coordinates: [ [
- [ 10.0, 10.0 ],
- [ 20.0, 10.0 ],
- [ 15.0, 17.0 ],
- [ 10.0, 10.0 ]
- ] ],
+ coordinates: [[[10.0, 10.0], [20.0, 10.0], [15.0, 17.0], [10.0, 10.0]]],
crs: strictCRS
};
@@ -96,39 +70,29 @@ assert.commandWorked(coll.ensureIndex({geo: "2dsphere"}), "2dsphere index");
// $nearSphere on big polygon should fail
assert.throws(function() {
- coll.count({geo: {$nearSphere: {$geometry: bigPoly}}});},
- null,
- "nearSphere " + bigPoly.name);
+ coll.count({geo: {$nearSphere: {$geometry: bigPoly}}});
+}, null, "nearSphere " + bigPoly.name);
// geoNear on big polygon should fail
-assert.commandFailed(
- db.runCommand({
- geoNear: coll.getName(),
- near: bigPoly,
- spherical: true
- }),
- "geoNear " + bigPoly.name);
+assert.commandFailed(db.runCommand({geoNear: coll.getName(), near: bigPoly, spherical: true}),
+ "geoNear " + bigPoly.name);
// aggregate $geoNear on big polygon should fail
-assert.commandFailed(
- db.runCommand({
- aggregate: coll.getName(),
- pipeline: [
- {$geoNear:
- {near: bigPoly, distanceField: "geo.calculated", spherical: true}}]
- }),
- "aggregate $geoNear " + bigPoly.name);
+assert.commandFailed(db.runCommand({
+ aggregate: coll.getName(),
+ pipeline: [{$geoNear: {near: bigPoly, distanceField: "geo.calculated", spherical: true}}]
+}),
+ "aggregate $geoNear " + bigPoly.name);
// mapReduce on big polygon should work
-assert.commandWorked(
- db.runCommand({
- mapReduce: coll.getName(),
- map: function() {},
- reduce: function() {},
- query: {geo: {$geoIntersects: {$geometry: bigPoly}}},
- out: {inline:1 },
- }),
- "mapReduce " + bigPoly.name);
+assert.commandWorked(db.runCommand({
+ mapReduce: coll.getName(),
+ map: function() {},
+ reduce: function() {},
+ query: {geo: {$geoIntersects: {$geometry: bigPoly}}},
+ out: {inline: 1},
+}),
+ "mapReduce " + bigPoly.name);
// Tests that stored objects with strictCRS will be ignored by query
// If strictCRS is removed from the document then they will be found
@@ -138,48 +102,37 @@ assert.commandWorked(coll.dropIndex({geo: "2dsphere"}), "drop 2dsphere index");
objects = [
{
- name: "NYC Times Square - point",
- geo: {
- type: "Point",
- coordinates: [ -73.9857 , 40.7577 ],
- crs: strictCRS
- }
+ name: "NYC Times Square - point",
+ geo: {type: "Point", coordinates: [-73.9857, 40.7577], crs: strictCRS}
},
{
- name: "NYC CitiField & JFK - multipoint",
- geo: {
- type: "MultiPoint",
- coordinates: [
- [ -73.8458 , 40.7569 ],
- [ -73.7789 , 40.6397 ]
- ],
- crs: strictCRS
- }
+ name: "NYC CitiField & JFK - multipoint",
+ geo: {
+ type: "MultiPoint",
+ coordinates: [[-73.8458, 40.7569], [-73.7789, 40.6397]],
+ crs: strictCRS
+ }
},
{
- name: "NYC - Times Square to CitiField to JFK - line/string",
- geo: {
- type: "LineString",
- coordinates: [
- [ -73.9857 , 40.7577 ],
- [ -73.8458 , 40.7569 ],
- [ -73.7789 , 40.6397 ]
- ],
- crs: strictCRS
- }
+ name: "NYC - Times Square to CitiField to JFK - line/string",
+ geo: {
+ type: "LineString",
+ coordinates: [[-73.9857, 40.7577], [-73.8458, 40.7569], [-73.7789, 40.6397]],
+ crs: strictCRS
+ }
},
{
- name: "NYC - Times Square to CitiField to JFK to Times Square - polygon",
- geo: {
- type: "Polygon",
- coordinates: [
- [ [ -73.9857 , 40.7577 ],
- [ -73.7789 , 40.6397 ],
- [ -73.8458 , 40.7569 ],
- [ -73.9857 , 40.7577 ] ]
- ],
- crs: strictCRS
- }
+ name: "NYC - Times Square to CitiField to JFK to Times Square - polygon",
+ geo: {
+ type: "Polygon",
+ coordinates: [[
+ [-73.9857, 40.7577],
+ [-73.7789, 40.6397],
+ [-73.8458, 40.7569],
+ [-73.9857, 40.7577]
+ ]],
+ crs: strictCRS
+ }
}
];
@@ -193,33 +146,25 @@ objects.forEach(function(o) {
var poly = {
name: "4 sided polygon around NYC",
type: "Polygon", // triangle
- coordinates: [ [
- [ -74.5, 40.5 ],
- [ -72.0, 40.5 ],
- [ -72.00, 41.0 ],
- [ -74.5, 41.0 ],
- [ -74.5, 40.5 ]
- ] ],
+ coordinates: [[[-74.5, 40.5], [-72.0, 40.5], [-72.00, 41.0], [-74.5, 41.0], [-74.5, 40.5]]],
crs: strictCRS
};
assert.eq(0,
- coll.count({geo: {$geoWithin: {$geometry: poly}}}),
- "ignore objects with strictCRS within");
+ coll.count({geo: {$geoWithin: {$geometry: poly}}}),
+ "ignore objects with strictCRS within");
assert.eq(0,
- coll.count({geo: {$geoIntersects: {$geometry: poly}}}),
- "ignore objects with strictCRS intersects");
+ coll.count({geo: {$geoIntersects: {$geometry: poly}}}),
+ "ignore objects with strictCRS intersects");
// Now remove the strictCRS and find all the objects
-coll.update({},{$unset: {"geo.crs": ""}}, {multi: true});
+coll.update({}, {$unset: {"geo.crs": ""}}, {multi: true});
var totalDocs = coll.count();
+assert.eq(totalDocs, coll.count({geo: {$geoWithin: {$geometry: poly}}}), "no strictCRS within");
assert.eq(totalDocs,
- coll.count({geo: {$geoWithin: {$geometry: poly}}}),
- "no strictCRS within");
-assert.eq(totalDocs,
- coll.count({geo: {$geoIntersects: {$geometry: poly}}}),
- "no strictCRS intersects");
+ coll.count({geo: {$geoIntersects: {$geometry: poly}}}),
+ "no strictCRS intersects");
// Clear collection
coll.remove({});
@@ -228,66 +173,44 @@ coll.remove({});
// Objects should be found from query
objects = [
{
- name: "NYC Times Square - point crs84CRS",
- geo: {
- type: "Point",
- coordinates: [ -73.9857 , 40.7577 ],
- crs: crs84CRS
- }
+ name: "NYC Times Square - point crs84CRS",
+ geo: {type: "Point", coordinates: [-73.9857, 40.7577], crs: crs84CRS}
},
{
- name: "NYC Times Square - point epsg4326CRS",
- geo: {
- type: "Point",
- coordinates: [ -73.9857 , 40.7577 ],
- crs: epsg4326CRS
- }
+ name: "NYC Times Square - point epsg4326CRS",
+ geo: {type: "Point", coordinates: [-73.9857, 40.7577], crs: epsg4326CRS}
},
{
- name: "NYC CitiField & JFK - multipoint crs84CRS",
- geo: {
- type: "MultiPoint",
- coordinates: [
- [ -73.8458 , 40.7569 ],
- [ -73.7789 , 40.6397 ]
- ],
- crs: crs84CRS
- }
+ name: "NYC CitiField & JFK - multipoint crs84CRS",
+ geo: {
+ type: "MultiPoint",
+ coordinates: [[-73.8458, 40.7569], [-73.7789, 40.6397]],
+ crs: crs84CRS
+ }
},
{
- name: "NYC CitiField & JFK - multipoint epsg4326CRS",
- geo: {
- type: "MultiPoint",
- coordinates: [
- [ -73.8458 , 40.7569 ],
- [ -73.7789 , 40.6397 ]
- ],
- crs: epsg4326CRS
- }
+ name: "NYC CitiField & JFK - multipoint epsg4326CRS",
+ geo: {
+ type: "MultiPoint",
+ coordinates: [[-73.8458, 40.7569], [-73.7789, 40.6397]],
+ crs: epsg4326CRS
+ }
},
{
- name: "NYC - Times Square to CitiField to JFK - line/string crs84CRS",
- geo: {
- type: "LineString",
- coordinates: [
- [ -73.9857 , 40.7577 ],
- [ -73.8458 , 40.7569 ],
- [ -73.7789 , 40.6397 ]
- ],
- crs: crs84CRS
- }
+ name: "NYC - Times Square to CitiField to JFK - line/string crs84CRS",
+ geo: {
+ type: "LineString",
+ coordinates: [[-73.9857, 40.7577], [-73.8458, 40.7569], [-73.7789, 40.6397]],
+ crs: crs84CRS
+ }
},
{
- name: "NYC - Times Square to CitiField to JFK - line/string epsg4326CRS",
- geo: {
- type: "LineString",
- coordinates: [
- [ -73.9857 , 40.7577 ],
- [ -73.8458 , 40.7569 ],
- [ -73.7789 , 40.6397 ]
- ],
- crs: epsg4326CRS
- }
+ name: "NYC - Times Square to CitiField to JFK - line/string epsg4326CRS",
+ geo: {
+ type: "LineString",
+ coordinates: [[-73.9857, 40.7577], [-73.8458, 40.7569], [-73.7789, 40.6397]],
+ crs: epsg4326CRS
+ }
}
];
@@ -300,19 +223,18 @@ objects.forEach(function(o) {
totalDocs = coll.count();
assert.eq(totalDocs,
- coll.count({geo: {$geoWithin: {$geometry: poly}}}),
- "crs84CRS or epsg4326CRS within");
+ coll.count({geo: {$geoWithin: {$geometry: poly}}}),
+ "crs84CRS or epsg4326CRS within");
assert.eq(totalDocs,
- coll.count({geo: {$geoIntersects: {$geometry: poly}}}),
- "crs84CRS or epsg4326CRS intersects");
+ coll.count({geo: {$geoIntersects: {$geometry: poly}}}),
+ "crs84CRS or epsg4326CRS intersects");
// Add index and look again for stored point & spherical CRS documents
assert.commandWorked(coll.ensureIndex({geo: "2dsphere"}), "2dsphere index");
assert.eq(totalDocs,
- coll.count({geo: {$geoWithin: {$geometry: poly}}}),
- "2dsphere index - crs84CRS or epsg4326CRS within");
+ coll.count({geo: {$geoWithin: {$geometry: poly}}}),
+ "2dsphere index - crs84CRS or epsg4326CRS within");
assert.eq(totalDocs,
- coll.count({geo: {$geoIntersects: {$geometry: poly}}}),
- "2dsphere index - crs84CRS or epsg4326CRS intersects");
-
+ coll.count({geo: {$geoIntersects: {$geometry: poly}}}),
+ "2dsphere index - crs84CRS or epsg4326CRS intersects");
diff --git a/jstests/core/geo_borders.js b/jstests/core/geo_borders.js
index 4768ff8503f..f0a47339591 100644
--- a/jstests/core/geo_borders.js
+++ b/jstests/core/geo_borders.js
@@ -9,9 +9,9 @@ max = 1;
step = 1;
numItems = 0;
-for ( var x = min; x <= max; x += step ) {
- for ( var y = min; y <= max; y += step ) {
- t.insert( { loc : { x : x, y : y } } );
+for (var x = min; x <= max; x += step) {
+ for (var y = min; y <= max; y += step) {
+ t.insert({loc: {x: x, y: y}});
numItems++;
}
}
@@ -20,13 +20,12 @@ overallMin = -1;
overallMax = 1;
// Create a point index slightly smaller than the points we have
-var res = t.ensureIndex({ loc: "2d" },
- { max: overallMax - epsilon / 2,
- min: overallMin + epsilon / 2 });
+var res =
+ t.ensureIndex({loc: "2d"}, {max: overallMax - epsilon / 2, min: overallMin + epsilon / 2});
assert.commandFailed(res);
// Create a point index only slightly bigger than the points we have
-res = t.ensureIndex( { loc : "2d" }, { max : overallMax + epsilon, min : overallMin - epsilon } );
+res = t.ensureIndex({loc: "2d"}, {max: overallMax + epsilon, min: overallMin - epsilon});
assert.commandWorked(res);
// ************
@@ -34,86 +33,129 @@ assert.commandWorked(res);
// ************
// If the bounds are bigger than the box itself, just clip at the borders
-assert.eq( numItems, t.find(
- { loc : { $within : { $box : [
- [ overallMin - 2 * epsilon, overallMin - 2 * epsilon ],
- [ overallMax + 2 * epsilon, overallMax + 2 * epsilon ] ] } } } ).count() );
+assert.eq(numItems,
+ t.find({
+ loc: {
+ $within: {
+ $box: [
+ [overallMin - 2 * epsilon, overallMin - 2 * epsilon],
+ [overallMax + 2 * epsilon, overallMax + 2 * epsilon]
+ ]
+ }
+ }
+ }).count());
// Check this works also for bounds where only a single dimension is off-bounds
-assert.eq( numItems - 5, t.find(
- { loc : { $within : { $box : [
- [ overallMin - 2 * epsilon, overallMin - 0.5 * epsilon ],
- [ overallMax - epsilon, overallMax - epsilon ] ] } } } ).count() );
+assert.eq(numItems - 5,
+ t.find({
+ loc: {
+ $within: {
+ $box: [
+ [overallMin - 2 * epsilon, overallMin - 0.5 * epsilon],
+ [overallMax - epsilon, overallMax - epsilon]
+ ]
+ }
+ }
+ }).count());
// Make sure we can get at least close to the bounds of the index
-assert.eq( numItems, t.find(
- { loc : { $within : { $box : [
- [ overallMin - epsilon / 2, overallMin - epsilon / 2 ],
- [ overallMax + epsilon / 2, overallMax + epsilon / 2 ] ] } } } ).count() );
+assert.eq(numItems,
+ t.find({
+ loc: {
+ $within: {
+ $box: [
+ [overallMin - epsilon / 2, overallMin - epsilon / 2],
+ [overallMax + epsilon / 2, overallMax + epsilon / 2]
+ ]
+ }
+ }
+ }).count());
// Make sure we can get at least close to the bounds of the index
-assert.eq( numItems, t.find(
- { loc : { $within : { $box : [
- [ overallMax + epsilon / 2, overallMax + epsilon / 2 ],
- [ overallMin - epsilon / 2, overallMin - epsilon / 2 ] ] } } } ).count() );
+assert.eq(numItems,
+ t.find({
+ loc: {
+ $within: {
+ $box: [
+ [overallMax + epsilon / 2, overallMax + epsilon / 2],
+ [overallMin - epsilon / 2, overallMin - epsilon / 2]
+ ]
+ }
+ }
+ }).count());
// Check that swapping min/max has good behavior
-assert.eq( numItems, t.find(
- { loc : { $within : { $box : [
- [ overallMax + epsilon / 2, overallMax + epsilon / 2 ],
- [ overallMin - epsilon / 2, overallMin - epsilon / 2 ] ] } } } ).count() );
-
-assert.eq( numItems, t.find(
- { loc : { $within : { $box : [
- [ overallMax + epsilon / 2, overallMin - epsilon / 2 ],
- [ overallMin - epsilon / 2, overallMax + epsilon / 2 ] ] } } } ).count() );
+assert.eq(numItems,
+ t.find({
+ loc: {
+ $within: {
+ $box: [
+ [overallMax + epsilon / 2, overallMax + epsilon / 2],
+ [overallMin - epsilon / 2, overallMin - epsilon / 2]
+ ]
+ }
+ }
+ }).count());
+
+assert.eq(numItems,
+ t.find({
+ loc: {
+ $within: {
+ $box: [
+ [overallMax + epsilon / 2, overallMin - epsilon / 2],
+ [overallMin - epsilon / 2, overallMax + epsilon / 2]
+ ]
+ }
+ }
+ }).count());
// **************
// Circle tests
// **************
-center = ( overallMax + overallMin ) / 2;
-center = [ center, center ];
+center = (overallMax + overallMin) / 2;
+center = [center, center];
radius = overallMax;
-offCenter = [ center[0] + radius, center[1] + radius ];
-onBounds = [ offCenter[0] + epsilon, offCenter[1] + epsilon ];
-offBounds = [ onBounds[0] + epsilon, onBounds[1] + epsilon ];
-onBoundsNeg = [ -onBounds[0], -onBounds[1] ];
+offCenter = [center[0] + radius, center[1] + radius];
+onBounds = [offCenter[0] + epsilon, offCenter[1] + epsilon];
+offBounds = [onBounds[0] + epsilon, onBounds[1] + epsilon];
+onBoundsNeg = [-onBounds[0], -onBounds[1]];
// Make sure we can get all points when radius is exactly at full bounds
-assert.lt( 0, t.find( { loc : { $within : { $center : [ center, radius + epsilon ] } } } ).count() );
+assert.lt(0, t.find({loc: {$within: {$center: [center, radius + epsilon]}}}).count());
// Make sure we can get points when radius is over full bounds
-assert.lt( 0, t.find( { loc : { $within : { $center : [ center, radius + 2 * epsilon ] } } } ).count() );
+assert.lt(0, t.find({loc: {$within: {$center: [center, radius + 2 * epsilon]}}}).count());
// Make sure we can get points when radius is over full bounds, off-centered
-assert.lt( 0, t.find( { loc : { $within : { $center : [ offCenter, radius + 2 * epsilon ] } } } ).count() );
+assert.lt(0, t.find({loc: {$within: {$center: [offCenter, radius + 2 * epsilon]}}}).count());
// Make sure we get correct corner point when center is in bounds
// (x bounds wrap, so could get other corner)
-cornerPt = t.findOne( { loc : { $within : { $center : [ offCenter, step / 2 ] } } } );
-assert.eq( cornerPt.loc.y, overallMax );
+cornerPt = t.findOne({loc: {$within: {$center: [offCenter, step / 2]}}});
+assert.eq(cornerPt.loc.y, overallMax);
// Make sure we get correct corner point when center is on bounds
// NOTE: Only valid points on MIN bounds
-cornerPt = t
- .findOne( { loc : { $within : { $center : [ onBoundsNeg, Math.sqrt( 2 * epsilon * epsilon ) + ( step / 2 ) ] } } } );
-assert.eq( cornerPt.loc.y, overallMin );
+cornerPt = t.findOne(
+ {loc: {$within: {$center: [onBoundsNeg, Math.sqrt(2 * epsilon * epsilon) + (step / 2)]}}});
+assert.eq(cornerPt.loc.y, overallMin);
// Make sure we can't get corner point when center is over bounds
// TODO: SERVER-5800 clean up wrapping rules for different CRS queries - not sure this is an error
/*
assert.throws(function(){
- t.findOne( { loc : { $within : { $center : [ offBounds, Math.sqrt( 8 * epsilon * epsilon ) + ( step / 2 ) ] } } } );
+ t.findOne( { loc : { $within : { $center : [ offBounds, Math.sqrt( 8 * epsilon * epsilon ) + (
+step / 2 ) ] } } } );
});
*/
-
// Make sure we can't get corner point when center is on max bounds
// Broken - see SERVER-13581
-//assert.throws(function(){
-// t.findOne( { loc : { $within : { $center : [ onBounds, Math.sqrt( 8 * epsilon * epsilon ) + ( step / 2 ) ] } } } );
+// assert.throws(function(){
+// t.findOne( { loc : { $within : { $center : [ onBounds, Math.sqrt( 8 * epsilon * epsilon ) + (
+// step / 2 ) ] } } } );
//});
// ***********
@@ -121,10 +163,10 @@ assert.throws(function(){
// ***********
// Make sure we can get all nearby points to point in range
-assert.eq( overallMax, t.find( { loc : { $near : offCenter } } ).next().loc.y );
+assert.eq(overallMax, t.find({loc: {$near: offCenter}}).next().loc.y);
// Make sure we can get all nearby points to point on boundary
-assert.eq( overallMin, t.find( { loc : { $near : onBoundsNeg } } ).next().loc.y );
+assert.eq(overallMin, t.find({loc: {$near: onBoundsNeg}}).next().loc.y);
// Make sure we can't get all nearby points to point over boundary
// TODO: SERVER-9986 clean up wrapping rules for different CRS queries - not sure this is an error
@@ -135,33 +177,35 @@ assert.throws(function(){
*/
// Make sure we can't get all nearby points to point on max boundary
-//Broken - see SERVER-13581
-//assert.throws(function(){
+// Broken - see SERVER-13581
+// assert.throws(function(){
// t.findOne( { loc : { $near : onBoundsNeg } } );
//});
// Make sure we can get all nearby points within one step (4 points in top
// corner)
-assert.eq( 4, t.find( { loc : { $near : offCenter, $maxDistance : step * 1.9 } } ).count() );
+assert.eq(4, t.find({loc: {$near: offCenter, $maxDistance: step * 1.9}}).count());
// **************
// Command Tests
// **************
// Make sure we can get all nearby points to point in range
-assert.eq( overallMax, db.runCommand( { geoNear : "borders", near : offCenter } ).results[0].obj.loc.y );
+assert.eq(overallMax, db.runCommand({geoNear: "borders", near: offCenter}).results[0].obj.loc.y);
// Make sure we can get all nearby points to point on boundary
-assert.eq( overallMin, db.runCommand( { geoNear : "borders", near : onBoundsNeg } ).results[0].obj.loc.y );
+assert.eq(overallMin, db.runCommand({geoNear: "borders", near: onBoundsNeg}).results[0].obj.loc.y);
// Make sure we can't get all nearby points to point over boundary
-//TODO: SERVER-9986 clean up wrapping rules for different CRS queries - not sure this is an error
+// TODO: SERVER-9986 clean up wrapping rules for different CRS queries - not sure this is an error
/*
assert.commandFailed( db.runCommand( { geoNear : "borders", near : offBounds } ));
*/
// Make sure we can't get all nearby points to point on max boundary
-assert.commandWorked( db.runCommand( { geoNear : "borders", near : onBounds } ));
+assert.commandWorked(db.runCommand({geoNear: "borders", near: onBounds}));
// Make sure we can get all nearby points within one step (4 points in top
// corner)
-assert.eq( 4, db.runCommand( { geoNear : "borders", near : offCenter, maxDistance : step * 1.5 } ).results.length );
+assert.eq(4,
+ db.runCommand({geoNear: "borders", near: offCenter, maxDistance: step * 1.5})
+ .results.length);
diff --git a/jstests/core/geo_box1.js b/jstests/core/geo_box1.js
index ee21f02df0c..45e9aab9118 100644
--- a/jstests/core/geo_box1.js
+++ b/jstests/core/geo_box1.js
@@ -3,46 +3,42 @@ t = db.geo_box1;
t.drop();
num = 0;
-for ( x=0; x<=20; x++ ){
- for ( y=0; y<=20; y++ ){
- o = { _id : num++ , loc : [ x , y ] };
- t.save( o );
+for (x = 0; x <= 20; x++) {
+ for (y = 0; y <= 20; y++) {
+ o = {
+ _id: num++,
+ loc: [x, y]
+ };
+ t.save(o);
}
}
-t.ensureIndex( { loc : "2d" } );
+t.ensureIndex({loc: "2d"});
-searches = [
- [ [ 1 , 2 ] , [ 4 , 5 ] ] ,
- [ [ 1 , 1 ] , [ 2 , 2 ] ] ,
- [ [ 0 , 2 ] , [ 4 , 5 ] ] ,
- [ [ 1 , 1 ] , [ 2 , 8 ] ] ,
-];
+searches = [[[1, 2], [4, 5]], [[1, 1], [2, 2]], [[0, 2], [4, 5]], [[1, 1], [2, 8]], ];
-
-for ( i=0; i<searches.length; i++ ){
+for (i = 0; i < searches.length; i++) {
b = searches[i];
- //printjson( b );
-
- q = { loc : { $within : { $box : b } } };
- numWanetd = ( 1 + b[1][0] - b[0][0] ) * ( 1 + b[1][1] - b[0][1] );
- assert.eq( numWanetd , t.find(q).itcount() , "itcount: " + tojson( q ) );
- printjson( t.find(q).explain() );
+ // printjson( b );
+
+ q = {
+ loc: {$within: {$box: b}}
+ };
+ numWanetd = (1 + b[1][0] - b[0][0]) * (1 + b[1][1] - b[0][1]);
+ assert.eq(numWanetd, t.find(q).itcount(), "itcount: " + tojson(q));
+ printjson(t.find(q).explain());
}
+assert.eq(0, t.find({loc: {$within: {$box: [[100, 100], [110, 110]]}}}).itcount(), "E1");
+assert.eq(0, t.find({loc: {$within: {$box: [[100, 100], [110, 110]]}}}).count(), "E2");
+assert.eq(num, t.find({loc: {$within: {$box: [[0, 0], [110, 110]]}}}).count(), "E3");
+assert.eq(num, t.find({loc: {$within: {$box: [[0, 0], [110, 110]]}}}).itcount(), "E4");
-assert.eq( 0 , t.find( { loc : { $within : { $box : [ [100 , 100 ] , [ 110 , 110 ] ] } } } ).itcount() , "E1" );
-assert.eq( 0 , t.find( { loc : { $within : { $box : [ [100 , 100 ] , [ 110 , 110 ] ] } } } ).count() , "E2" );
-
-
-assert.eq( num , t.find( { loc : { $within : { $box : [ [ 0 , 0 ] , [ 110 , 110 ] ] } } } ).count() , "E3" );
-assert.eq( num , t.find( { loc : { $within : { $box : [ [ 0 , 0 ] , [ 110 , 110 ] ] } } } ).itcount() , "E4" );
-
-assert.eq( 57 , t.find( { loc : { $within : { $box : [ [ 0 , 0 ] , [ 110 , 110 ] ] } } } ).limit(57).itcount() , "E5" );
+assert.eq(57, t.find({loc: {$within: {$box: [[0, 0], [110, 110]]}}}).limit(57).itcount(), "E5");
// SERVER-13621
// Eetect and invert the $box coordinates when they're specified incorrectly.
-assert.eq( num , t.find( { loc : { $within : { $box : [ [ 110 , 110 ], [ 0 , 0 ] ] } } } ).count() , "E5" );
-assert.eq( num , t.find( { loc : { $within : { $box : [ [ 110 , 0 ], [ 0 , 110 ] ] } } } ).count() , "E6" );
-assert.eq( num , t.find( { loc : { $within : { $box : [ [ 0 , 110 ], [ 110 , 0 ] ] } } } ).count() , "E7" );
+assert.eq(num, t.find({loc: {$within: {$box: [[110, 110], [0, 0]]}}}).count(), "E5");
+assert.eq(num, t.find({loc: {$within: {$box: [[110, 0], [0, 110]]}}}).count(), "E6");
+assert.eq(num, t.find({loc: {$within: {$box: [[0, 110], [110, 0]]}}}).count(), "E7");
diff --git a/jstests/core/geo_box1_noindex.js b/jstests/core/geo_box1_noindex.js
index abf21266dac..36e932105a6 100644
--- a/jstests/core/geo_box1_noindex.js
+++ b/jstests/core/geo_box1_noindex.js
@@ -3,36 +3,36 @@ t = db.geo_box1_noindex;
t.drop();
num = 0;
-for ( x=0; x<=20; x++ ){
- for ( y=0; y<=20; y++ ){
- o = { _id : num++ , loc : [ x , y ] };
- t.save( o );
+for (x = 0; x <= 20; x++) {
+ for (y = 0; y <= 20; y++) {
+ o = {
+ _id: num++,
+ loc: [x, y]
+ };
+ t.save(o);
}
}
-searches = [
- [ [ 1 , 2 ] , [ 4 , 5 ] ] ,
- [ [ 1 , 1 ] , [ 2 , 2 ] ] ,
- [ [ 0 , 2 ] , [ 4 , 5 ] ] ,
- [ [ 1 , 1 ] , [ 2 , 8 ] ] ,
-];
+searches = [[[1, 2], [4, 5]], [[1, 1], [2, 2]], [[0, 2], [4, 5]], [[1, 1], [2, 8]], ];
-for ( i=0; i<searches.length; i++ ){
+for (i = 0; i < searches.length; i++) {
b = searches[i];
- q = { loc : { $within : { $box : b } } };
- numWanted = ( 1 + b[1][0] - b[0][0] ) * ( 1 + b[1][1] - b[0][1] );
- assert.eq( numWanted , t.find(q).itcount() , "itcount: " + tojson( q ) );
- printjson( t.find(q).explain() );
+ q = {
+ loc: {$within: {$box: b}}
+ };
+ numWanted = (1 + b[1][0] - b[0][0]) * (1 + b[1][1] - b[0][1]);
+ assert.eq(numWanted, t.find(q).itcount(), "itcount: " + tojson(q));
+ printjson(t.find(q).explain());
}
-assert.eq( 0 , t.find( { loc : { $within : { $box : [ [100 , 100 ] , [ 110 , 110 ] ] } } } ).itcount() , "E1" );
-assert.eq( 0 , t.find( { loc : { $within : { $box : [ [100 , 100 ] , [ 110 , 110 ] ] } } } ).count() , "E2" );
-assert.eq( num , t.find( { loc : { $within : { $box : [ [ 0 , 0 ] , [ 110 , 110 ] ] } } } ).count() , "E3" );
-assert.eq( num , t.find( { loc : { $within : { $box : [ [ 0 , 0 ] , [ 110 , 110 ] ] } } } ).itcount() , "E4" );
-assert.eq( 57 , t.find( { loc : { $within : { $box : [ [ 0 , 0 ] , [ 110 , 110 ] ] } } } ).limit(57).itcount() , "E5" );
+assert.eq(0, t.find({loc: {$within: {$box: [[100, 100], [110, 110]]}}}).itcount(), "E1");
+assert.eq(0, t.find({loc: {$within: {$box: [[100, 100], [110, 110]]}}}).count(), "E2");
+assert.eq(num, t.find({loc: {$within: {$box: [[0, 0], [110, 110]]}}}).count(), "E3");
+assert.eq(num, t.find({loc: {$within: {$box: [[0, 0], [110, 110]]}}}).itcount(), "E4");
+assert.eq(57, t.find({loc: {$within: {$box: [[0, 0], [110, 110]]}}}).limit(57).itcount(), "E5");
// SERVER-13621
// Eetect and invert the $box coordinates when they're specified incorrectly.
-assert.eq( num , t.find( { loc : { $within : { $box : [ [ 110 , 110 ], [ 0 , 0 ] ] } } } ).count() , "E5" );
-assert.eq( num , t.find( { loc : { $within : { $box : [ [ 110 , 0 ], [ 0 , 110 ] ] } } } ).count() , "E6" );
-assert.eq( num , t.find( { loc : { $within : { $box : [ [ 0 , 110 ], [ 110 , 0 ] ] } } } ).count() , "E7" );
+assert.eq(num, t.find({loc: {$within: {$box: [[110, 110], [0, 0]]}}}).count(), "E5");
+assert.eq(num, t.find({loc: {$within: {$box: [[110, 0], [0, 110]]}}}).count(), "E6");
+assert.eq(num, t.find({loc: {$within: {$box: [[0, 110], [110, 0]]}}}).count(), "E7");
diff --git a/jstests/core/geo_box2.js b/jstests/core/geo_box2.js
index c20a1701874..74f9695f9b2 100644
--- a/jstests/core/geo_box2.js
+++ b/jstests/core/geo_box2.js
@@ -3,16 +3,16 @@ t = db.geo_box2;
t.drop();
-for (i=1; i<10; i++) {
- for(j=1; j<10; j++) {
- t.insert({loc : [i,j]});
- }
+for (i = 1; i < 10; i++) {
+ for (j = 1; j < 10; j++) {
+ t.insert({loc: [i, j]});
+ }
}
-t.ensureIndex({"loc" : "2d"} );
-assert.eq( 9 , t.find({loc : {$within : {$box : [[4,4],[6,6]]}}}).itcount() , "A1" );
+t.ensureIndex({"loc": "2d"});
+assert.eq(9, t.find({loc: {$within: {$box: [[4, 4], [6, 6]]}}}).itcount(), "A1");
-t.dropIndex( { "loc" : "2d" } );
+t.dropIndex({"loc": "2d"});
-t.ensureIndex({"loc" : "2d"} , {"min" : 0, "max" : 10});
-assert.eq( 9 , t.find({loc : {$within : {$box : [[4,4],[6,6]]}}}).itcount() , "B1" );
+t.ensureIndex({"loc": "2d"}, {"min": 0, "max": 10});
+assert.eq(9, t.find({loc: {$within: {$box: [[4, 4], [6, 6]]}}}).itcount(), "B1");
diff --git a/jstests/core/geo_box3.js b/jstests/core/geo_box3.js
index 8941f637518..7f9dd12ea60 100644
--- a/jstests/core/geo_box3.js
+++ b/jstests/core/geo_box3.js
@@ -4,33 +4,33 @@
// bounding box.
// This is the bug reported in SERVER-994.
-t=db.geo_box3;
+t = db.geo_box3;
t.drop();
-t.insert({ point : { x : -15000000, y : 10000000 } });
-t.ensureIndex( { point : "2d" } , { min : -21000000 , max : 21000000 } );
-var c=t.find({point: {"$within": {"$box": [[-20000000, 7000000], [0, 15000000]]} } });
+t.insert({point: {x: -15000000, y: 10000000}});
+t.ensureIndex({point: "2d"}, {min: -21000000, max: 21000000});
+var c = t.find({point: {"$within": {"$box": [[-20000000, 7000000], [0, 15000000]]}}});
assert.eq(1, c.count(), "A1");
// Same thing, modulo 1000000.
-t=db.geo_box3;
+t = db.geo_box3;
t.drop();
-t.insert({ point : { x : -15, y : 10 } });
-t.ensureIndex( { point : "2d" } , { min : -21 , max : 21 } );
-var c=t.find({point: {"$within": {"$box": [[-20, 7], [0, 15]]} } });
+t.insert({point: {x: -15, y: 10}});
+t.ensureIndex({point: "2d"}, {min: -21, max: 21});
+var c = t.find({point: {"$within": {"$box": [[-20, 7], [0, 15]]}}});
assert.eq(1, c.count(), "B1");
// Two more examples, one where the index is centered at the origin,
// one not.
-t=db.geo_box3;
+t = db.geo_box3;
t.drop();
-t.insert({ point : { x : 1.0 , y : 1.0 } });
-t.ensureIndex( { point : "2d" } , { min : -2 , max : 2 } );
-var c=t.find({point: {"$within": {"$box": [[.1, .1], [1.99, 1.99]]} } });
+t.insert({point: {x: 1.0, y: 1.0}});
+t.ensureIndex({point: "2d"}, {min: -2, max: 2});
+var c = t.find({point: {"$within": {"$box": [[.1, .1], [1.99, 1.99]]}}});
assert.eq(1, c.count(), "C1");
-t=db.geo_box3;
+t = db.geo_box3;
t.drop();
-t.insert({ point : { x : 3.9 , y : 3.9 } });
-t.ensureIndex( { point : "2d" } , { min : 0 , max : 4 } );
-var c=t.find({point: {"$within": {"$box": [[2.05, 2.05], [3.99, 3.99]]} } });
+t.insert({point: {x: 3.9, y: 3.9}});
+t.ensureIndex({point: "2d"}, {min: 0, max: 4});
+var c = t.find({point: {"$within": {"$box": [[2.05, 2.05], [3.99, 3.99]]}}});
assert.eq(1, c.count(), "D1");
diff --git a/jstests/core/geo_center_sphere1.js b/jstests/core/geo_center_sphere1.js
index 2c61a54588b..f3b39b552cd 100644
--- a/jstests/core/geo_center_sphere1.js
+++ b/jstests/core/geo_center_sphere1.js
@@ -2,66 +2,77 @@ t = db.geo_center_sphere1;
function test(index) {
t.drop();
- skip = 8; // lower for more rigor, higher for more speed (tested with .5, .678, 1, 2, 3, and 4)
+ skip = 8; // lower for more rigor, higher for more speed (tested with .5, .678, 1, 2, 3, and 4)
- searches = [
+ searches = [
// x , y rad
- [ [ 5 , 0 ] , 0.05 ] , // ~200 miles
- [ [ 135 , 0 ] , 0.05 ] ,
+ [[5, 0], 0.05], // ~200 miles
+ [[135, 0], 0.05],
- [ [ 5 , 70 ] , 0.05 ] ,
- [ [ 135 , 70 ] , 0.05 ] ,
- [ [ 5 , 85 ] , 0.05 ] ,
+ [[5, 70], 0.05],
+ [[135, 70], 0.05],
+ [[5, 85], 0.05],
- [ [ 20 , 0 ] , 0.25 ] , // ~1000 miles
- [ [ 20 , -45 ] , 0.25 ] ,
- [ [ -20 , 60 ] , 0.25 ] ,
- [ [ -20 , -70 ] , 0.25 ] ,
+ [[20, 0], 0.25], // ~1000 miles
+ [[20, -45], 0.25],
+ [[-20, 60], 0.25],
+ [[-20, -70], 0.25],
];
- correct = searches.map( function(z){ return []; } );
+ correct = searches.map(function(z) {
+ return [];
+ });
num = 0;
var bulk = t.initializeUnorderedBulkOp();
- for ( x=-179; x<=179; x += skip ){
- for ( y=-89; y<=89; y += skip ){
- o = { _id : num++ , loc : [ x , y ] };
- bulk.insert( o );
- for ( i=0; i<searches.length; i++ ){
- if ( Geo.sphereDistance( [ x , y ] , searches[i][0] ) <= searches[i][1])
- correct[i].push( o );
+ for (x = -179; x <= 179; x += skip) {
+ for (y = -89; y <= 89; y += skip) {
+ o = {
+ _id: num++,
+ loc: [x, y]
+ };
+ bulk.insert(o);
+ for (i = 0; i < searches.length; i++) {
+ if (Geo.sphereDistance([x, y], searches[i][0]) <= searches[i][1])
+ correct[i].push(o);
}
}
- gc(); // needed with low skip values
+ gc(); // needed with low skip values
}
assert.writeOK(bulk.execute());
if (index) {
- t.ensureIndex( { loc : index } );
+ t.ensureIndex({loc: index});
}
- for ( i=0; i<searches.length; i++ ){
+ for (i = 0; i < searches.length; i++) {
print('------------');
- print( tojson( searches[i] ) + "\t" + correct[i].length );
- q = { loc : { $within : { $centerSphere : searches[i] } } };
+ print(tojson(searches[i]) + "\t" + correct[i].length);
+ q = {
+ loc: {$within: {$centerSphere: searches[i]}}
+ };
- //correct[i].forEach( printjson )
- //printjson( q );
- //t.find( q ).forEach( printjson )
-
- //printjson(t.find( q ).explain())
+ // correct[i].forEach( printjson )
+ // printjson( q );
+ // t.find( q ).forEach( printjson )
+
+ // printjson(t.find( q ).explain())
+
+ // printjson( Array.sort( correct[i].map( function(z){ return z._id; } ) ) )
+ // printjson( Array.sort( t.find(q).map( function(z){ return z._id; } ) ) )
- //printjson( Array.sort( correct[i].map( function(z){ return z._id; } ) ) )
- //printjson( Array.sort( t.find(q).map( function(z){ return z._id; } ) ) )
-
var numExpected = correct[i].length;
- var x = correct[i].map( function(z){ return z._id; } );
- var y = t.find(q).map( function(z){ return z._id; } );
+ var x = correct[i].map(function(z) {
+ return z._id;
+ });
+ var y = t.find(q).map(function(z) {
+ return z._id;
+ });
missing = [];
- epsilon = 0.001; // allow tenth of a percent error due to conversions
- for (var j=0; j<x.length; j++){
- if (!Array.contains(y, x[j])){
+ epsilon = 0.001; // allow tenth of a percent error due to conversions
+ for (var j = 0; j < x.length; j++) {
+ if (!Array.contains(y, x[j])) {
missing.push(x[j]);
var obj = t.findOne({_id: x[j]});
var dist = Geo.sphereDistance(searches[i][0], obj.loc);
@@ -70,8 +81,8 @@ function test(index) {
numExpected -= 1;
}
}
- for (var j=0; j<y.length; j++){
- if (!Array.contains(x, y[j])){
+ for (var j = 0; j < y.length; j++) {
+ if (!Array.contains(x, y[j])) {
missing.push(y[j]);
var obj = t.findOne({_id: y[j]});
var dist = Geo.sphereDistance(searches[i][0], obj.loc);
@@ -81,16 +92,16 @@ function test(index) {
}
}
-
- assert.eq( numExpected , t.find( q ).itcount() , "itcount : " + tojson( searches[i] ) );
- assert.eq( numExpected , t.find( q ).count() , "count : " + tojson( searches[i] ) );
+ assert.eq(numExpected, t.find(q).itcount(), "itcount : " + tojson(searches[i]));
+ assert.eq(numExpected, t.find(q).count(), "count : " + tojson(searches[i]));
if (index == "2d") {
- var explain = t.find( q ).explain("executionStats");
- print( 'explain for ' + tojson( q , '' , true ) + ' = ' + tojson( explain ) );
+ var explain = t.find(q).explain("executionStats");
+ print('explain for ' + tojson(q, '', true) + ' = ' + tojson(explain));
// The index should be at least minimally effective in preventing the full collection
// scan.
- assert.gt( t.find().count(), explain.executionStats.totalKeysExamined ,
- "nscanned : " + tojson( searches[i] ) );
+ assert.gt(t.find().count(),
+ explain.executionStats.totalKeysExamined,
+ "nscanned : " + tojson(searches[i]));
}
}
}
diff --git a/jstests/core/geo_center_sphere2.js b/jstests/core/geo_center_sphere2.js
index a569c4d5c96..ac8f09cbe77 100644
--- a/jstests/core/geo_center_sphere2.js
+++ b/jstests/core/geo_center_sphere2.js
@@ -5,39 +5,42 @@
// multiple documents, and so requires simultaneous testing.
//
-function deg2rad(arg) { return arg * Math.PI / 180.0; }
-function rad2deg(arg) { return arg * 180.0 / Math.PI; }
+function deg2rad(arg) {
+ return arg * Math.PI / 180.0;
+}
+function rad2deg(arg) {
+ return arg * 180.0 / Math.PI;
+}
function computexscandist(y, maxDistDegrees) {
- return maxDistDegrees / Math.min(Math.cos(deg2rad(Math.min(89.0, y + maxDistDegrees))),
- Math.cos(deg2rad(Math.max(-89.0, y - maxDistDegrees))));
+ return maxDistDegrees /
+ Math.min(Math.cos(deg2rad(Math.min(89.0, y + maxDistDegrees))),
+ Math.cos(deg2rad(Math.max(-89.0, y - maxDistDegrees))));
}
function pointIsOK(startPoint, radius) {
yscandist = rad2deg(radius) + 0.01;
xscandist = computexscandist(startPoint[1], yscandist);
- return (startPoint[0] + xscandist < 180)
- && (startPoint[0] - xscandist > -180)
- && (startPoint[1] + yscandist < 90)
- && (startPoint[1] - yscandist > -90);
+ return (startPoint[0] + xscandist < 180) && (startPoint[0] - xscandist > -180) &&
+ (startPoint[1] + yscandist < 90) && (startPoint[1] - yscandist > -90);
}
var numTests = 30;
-for ( var test = 0; test < numTests; test++ ) {
- Random.srand( 1337 + test );
+for (var test = 0; test < numTests; test++) {
+ Random.srand(1337 + test);
- var radius = 5000 * Random.rand(); // km
- radius = radius / 6378.1; // radians; earth radius from geoconstants.h
- var numDocs = Math.floor( 400 * Random.rand() );
+ var radius = 5000 * Random.rand(); // km
+ radius = radius / 6378.1; // radians; earth radius from geoconstants.h
+ var numDocs = Math.floor(400 * Random.rand());
// TODO: Wrapping uses the error value to figure out what would overlap...
- var bits = Math.floor( 5 + Random.rand() * 28 );
+ var bits = Math.floor(5 + Random.rand() * 28);
var maxPointsPerDoc = 50;
var t = db.sphere;
var randomPoint = function() {
- return [ Random.rand() * 360 - 180, Random.rand() * 180 - 90 ];
+ return [Random.rand() * 360 - 180, Random.rand() * 180 - 90];
};
// Get a start point that doesn't require wrapping
@@ -47,7 +50,7 @@ for ( var test = 0; test < numTests; test++ ) {
do {
t.drop();
startPoint = randomPoint();
- t.ensureIndex( { loc : "2d" }, { bits : bits } );
+ t.ensureIndex({loc: "2d"}, {bits: bits});
} while (!pointIsOK(startPoint, radius));
var pointsIn = 0;
@@ -57,19 +60,18 @@ for ( var test = 0; test < numTests; test++ ) {
var totalPoints = 0;
var bulk = t.initializeUnorderedBulkOp();
- for ( var i = 0; i < numDocs; i++ ) {
-
- var numPoints = Math.floor( Random.rand() * maxPointsPerDoc + 1 );
+ for (var i = 0; i < numDocs; i++) {
+ var numPoints = Math.floor(Random.rand() * maxPointsPerDoc + 1);
var docIn = false;
var multiPoint = [];
totalPoints += numPoints;
- for ( var p = 0; p < numPoints; p++ ) {
+ for (var p = 0; p < numPoints; p++) {
var point = randomPoint();
- multiPoint.push( point );
+ multiPoint.push(point);
- if ( Geo.sphereDistance( startPoint, point ) <= radius ) {
+ if (Geo.sphereDistance(startPoint, point) <= radius) {
pointsIn++;
docIn = true;
} else {
@@ -77,54 +79,64 @@ for ( var test = 0; test < numTests; test++ ) {
}
}
- bulk.insert( { loc : multiPoint } );
+ bulk.insert({loc: multiPoint});
- if ( docIn )
+ if (docIn)
docsIn++;
else
docsOut++;
-
}
- printjson( { test: test, radius : radius, bits : bits, numDocs : numDocs,
- pointsIn : pointsIn, docsIn : docsIn, pointsOut : pointsOut,
- docsOut : docsOut } );
+ printjson({
+ test: test,
+ radius: radius,
+ bits: bits,
+ numDocs: numDocs,
+ pointsIn: pointsIn,
+ docsIn: docsIn,
+ pointsOut: pointsOut,
+ docsOut: docsOut
+ });
assert.writeOK(bulk.execute());
- assert.eq( docsIn + docsOut, numDocs );
- assert.eq( pointsIn + pointsOut, totalPoints );
+ assert.eq(docsIn + docsOut, numDocs);
+ assert.eq(pointsIn + pointsOut, totalPoints);
// $centerSphere
- assert.eq( docsIn, t.find({ loc: { $within:
- { $centerSphere: [ startPoint, radius ]}}} ).count() );
+ assert.eq(docsIn, t.find({loc: {$within: {$centerSphere: [startPoint, radius]}}}).count());
// $nearSphere
- var cursor = t.find({ loc: { $nearSphere: startPoint, $maxDistance: radius }});
- var results = cursor.limit( 2 * pointsIn ).toArray();
+ var cursor = t.find({loc: {$nearSphere: startPoint, $maxDistance: radius}});
+ var results = cursor.limit(2 * pointsIn).toArray();
- assert.eq( docsIn, results.length );
+ assert.eq(docsIn, results.length);
var distance = 0;
- for ( var i = 0; i < results.length; i++ ) {
+ for (var i = 0; i < results.length; i++) {
var minNewDistance = radius + 1;
- for( var j = 0; j < results[i].loc.length; j++ ){
- var newDistance = Geo.sphereDistance( startPoint, results[i].loc[j] );
- if( newDistance < minNewDistance && newDistance >= distance ) {
+ for (var j = 0; j < results[i].loc.length; j++) {
+ var newDistance = Geo.sphereDistance(startPoint, results[i].loc[j]);
+ if (newDistance < minNewDistance && newDistance >= distance) {
minNewDistance = newDistance;
}
}
- //print( "Dist from : " + results[i].loc[j] + " to " + startPoint + " is "
+ // print( "Dist from : " + results[i].loc[j] + " to " + startPoint + " is "
// + minNewDistance + " vs " + radius )
- assert.lte( minNewDistance, radius );
- assert.gte( minNewDistance, distance );
+ assert.lte(minNewDistance, radius);
+ assert.gte(minNewDistance, distance);
distance = minNewDistance;
}
// geoNear
- results = db.runCommand({ geoNear: "sphere", near: startPoint, maxDistance: radius,
- num : 2 * pointsIn, spherical : true } ).results;
+ results = db.runCommand({
+ geoNear: "sphere",
+ near: startPoint,
+ maxDistance: radius,
+ num: 2 * pointsIn,
+ spherical: true
+ }).results;
/*
printjson( results );
@@ -135,26 +147,25 @@ for ( var test = 0; test < numTests; test++ ) {
}
*/
- assert.eq( docsIn, results.length );
+ assert.eq(docsIn, results.length);
var distance = 0;
- for ( var i = 0; i < results.length; i++ ) {
+ for (var i = 0; i < results.length; i++) {
var retDistance = results[i].dis;
// print( "Dist from : " + results[i].loc + " to " + startPoint + " is "
// + retDistance + " vs " + radius )
var distInObj = false;
- for ( var j = 0; j < results[i].obj.loc.length && distInObj == false; j++ ) {
- var newDistance = Geo.sphereDistance( startPoint, results[i].obj.loc[j] );
- distInObj = ( newDistance >= retDistance - 0.0001 &&
- newDistance <= retDistance + 0.0001 );
+ for (var j = 0; j < results[i].obj.loc.length && distInObj == false; j++) {
+ var newDistance = Geo.sphereDistance(startPoint, results[i].obj.loc[j]);
+ distInObj =
+ (newDistance >= retDistance - 0.0001 && newDistance <= retDistance + 0.0001);
}
- assert( distInObj );
- assert.lte( retDistance, radius );
- assert.gte( retDistance, distance );
+ assert(distInObj);
+ assert.lte(retDistance, radius);
+ assert.gte(retDistance, distance);
distance = retDistance;
}
}
-
diff --git a/jstests/core/geo_circle1.js b/jstests/core/geo_circle1.js
index c4b79e645ab..a679a408b32 100644
--- a/jstests/core/geo_circle1.js
+++ b/jstests/core/geo_circle1.js
@@ -2,45 +2,48 @@
t = db.geo_circle1;
t.drop();
-searches = [
- [ [ 5 , 5 ] , 3 ] ,
- [ [ 5 , 5 ] , 1 ] ,
- [ [ 5 , 5 ] , 5 ] ,
- [ [ 0 , 5 ] , 5 ] ,
-];
-correct = searches.map( function(z){ return []; } );
+searches = [[[5, 5], 3], [[5, 5], 1], [[5, 5], 5], [[0, 5], 5], ];
+correct = searches.map(function(z) {
+ return [];
+});
num = 0;
-for ( x=0; x<=20; x++ ){
- for ( y=0; y<=20; y++ ){
- o = { _id : num++ , loc : [ x , y ] };
- t.save( o );
- for ( i=0; i<searches.length; i++ )
- if ( Geo.distance( [ x , y ] , searches[i][0] ) <= searches[i][1] )
- correct[i].push( o );
+for (x = 0; x <= 20; x++) {
+ for (y = 0; y <= 20; y++) {
+ o = {
+ _id: num++,
+ loc: [x, y]
+ };
+ t.save(o);
+ for (i = 0; i < searches.length; i++)
+ if (Geo.distance([x, y], searches[i][0]) <= searches[i][1])
+ correct[i].push(o);
}
}
-t.ensureIndex( { loc : "2d" } );
+t.ensureIndex({loc: "2d"});
-for ( i=0; i<searches.length; i++ ){
- //print( tojson( searches[i] ) + "\t" + correct[i].length )
- q = { loc : { $within : { $center : searches[i] } } };
+for (i = 0; i < searches.length; i++) {
+ // print( tojson( searches[i] ) + "\t" + correct[i].length )
+ q = {
+ loc: {$within: {$center: searches[i]}}
+ };
- //correct[i].forEach( printjson )
- //printjson( q );
- //t.find( q ).forEach( printjson )
+ // correct[i].forEach( printjson )
+ // printjson( q );
+ // t.find( q ).forEach( printjson )
- //printjson( Array.sort( correct[i].map( function(z){ return z._id; } ) ) )
- //printjson( Array.sort( t.find(q).map( function(z){ return z._id; } ) ) )
-
- assert.eq( correct[i].length , t.find( q ).itcount() , "itcount : " + tojson( searches[i] ) );
- assert.eq( correct[i].length , t.find( q ).count() , "count : " + tojson( searches[i] ) );
- var explain = t.find( q ).explain("executionStats");
- print( 'explain for ' + tojson( q , '' , true ) + ' = ' + tojson( explain ) );
+ // printjson( Array.sort( correct[i].map( function(z){ return z._id; } ) ) )
+ // printjson( Array.sort( t.find(q).map( function(z){ return z._id; } ) ) )
+
+ assert.eq(correct[i].length, t.find(q).itcount(), "itcount : " + tojson(searches[i]));
+ assert.eq(correct[i].length, t.find(q).count(), "count : " + tojson(searches[i]));
+ var explain = t.find(q).explain("executionStats");
+ print('explain for ' + tojson(q, '', true) + ' = ' + tojson(explain));
// The index should be at least minimally effective in preventing the full collection
// scan.
- assert.gt( t.find().count(), explain.executionStats.totalKeysExamined,
- "nscanned : " + tojson( searches[i] ) );
+ assert.gt(t.find().count(),
+ explain.executionStats.totalKeysExamined,
+ "nscanned : " + tojson(searches[i]));
}
diff --git a/jstests/core/geo_circle1_noindex.js b/jstests/core/geo_circle1_noindex.js
index 304f1404b64..872883dbf74 100644
--- a/jstests/core/geo_circle1_noindex.js
+++ b/jstests/core/geo_circle1_noindex.js
@@ -2,28 +2,30 @@
t = db.geo_circle1_noindex;
t.drop();
-searches = [
- [ [ 5 , 5 ] , 3 ] ,
- [ [ 5 , 5 ] , 1 ] ,
- [ [ 5 , 5 ] , 5 ] ,
- [ [ 0 , 5 ] , 5 ] ,
-];
-correct = searches.map( function(z){ return []; } );
+searches = [[[5, 5], 3], [[5, 5], 1], [[5, 5], 5], [[0, 5], 5], ];
+correct = searches.map(function(z) {
+ return [];
+});
num = 0;
-for ( x=0; x<=20; x++ ){
- for ( y=0; y<=20; y++ ){
- o = { _id : num++ , loc : [ x , y ] };
- t.save( o );
- for ( i=0; i<searches.length; i++ )
- if ( Geo.distance( [ x , y ] , searches[i][0] ) <= searches[i][1] )
- correct[i].push( o );
+for (x = 0; x <= 20; x++) {
+ for (y = 0; y <= 20; y++) {
+ o = {
+ _id: num++,
+ loc: [x, y]
+ };
+ t.save(o);
+ for (i = 0; i < searches.length; i++)
+ if (Geo.distance([x, y], searches[i][0]) <= searches[i][1])
+ correct[i].push(o);
}
}
-for ( i=0; i<searches.length; i++ ){
- q = { loc : { $within : { $center : searches[i] } } };
- assert.eq( correct[i].length , t.find( q ).itcount() , "itcount : " + tojson( searches[i] ) );
- assert.eq( correct[i].length , t.find( q ).count() , "count : " + tojson( searches[i] ) );
+for (i = 0; i < searches.length; i++) {
+ q = {
+ loc: {$within: {$center: searches[i]}}
+ };
+ assert.eq(correct[i].length, t.find(q).itcount(), "itcount : " + tojson(searches[i]));
+ assert.eq(correct[i].length, t.find(q).count(), "count : " + tojson(searches[i]));
}
diff --git a/jstests/core/geo_circle2.js b/jstests/core/geo_circle2.js
index 9a3b3c94860..6c89098f684 100644
--- a/jstests/core/geo_circle2.js
+++ b/jstests/core/geo_circle2.js
@@ -2,25 +2,28 @@
t = db.geo_circle2;
t.drop();
-t.ensureIndex({loc : "2d", categories:1}, {"name":"placesIdx", "min": -100, "max": 100});
+t.ensureIndex({loc: "2d", categories: 1}, {"name": "placesIdx", "min": -100, "max": 100});
-t.insert({ "uid" : 368900 , "loc" : { "x" : -36 , "y" : -8} ,"categories" : [ "sports" , "hotel" , "restaurant"]});
-t.insert({ "uid" : 555344 , "loc" : { "x" : 13 , "y" : 29} ,"categories" : [ "sports" , "hotel"]});
-t.insert({ "uid" : 855878 , "loc" : { "x" : 38 , "y" : 30} ,"categories" : [ "sports" , "hotel"]});
-t.insert({ "uid" : 917347 , "loc" : { "x" : 15 , "y" : 46} ,"categories" : [ "hotel"]});
-t.insert({ "uid" : 647874 , "loc" : { "x" : 25 , "y" : 23} ,"categories" : [ "hotel" , "restaurant"]});
-t.insert({ "uid" : 518482 , "loc" : { "x" : 4 , "y" : 25} ,"categories" : [ ]});
-t.insert({ "uid" : 193466 , "loc" : { "x" : -39 , "y" : 22} ,"categories" : [ "sports" , "hotel"]});
-t.insert({ "uid" : 622442 , "loc" : { "x" : -24 , "y" : -46} ,"categories" : [ "hotel"]});
-t.insert({ "uid" : 297426 , "loc" : { "x" : 33 , "y" : -49} ,"categories" : [ "hotel"]});
-t.insert({ "uid" : 528464 , "loc" : { "x" : -43 , "y" : 48} ,"categories" : [ "restaurant"]});
-t.insert({ "uid" : 90579 , "loc" : { "x" : -4 , "y" : -23} ,"categories" : [ "restaurant"]});
-t.insert({ "uid" : 368895 , "loc" : { "x" : -8 , "y" : 14} ,"categories" : [ "sports" ]});
-t.insert({ "uid" : 355844 , "loc" : { "x" : 34 , "y" : -4} ,"categories" : [ "sports" , "hotel"]});
+t.insert(
+ {"uid": 368900, "loc": {"x": -36, "y": -8}, "categories": ["sports", "hotel", "restaurant"]});
+t.insert({"uid": 555344, "loc": {"x": 13, "y": 29}, "categories": ["sports", "hotel"]});
+t.insert({"uid": 855878, "loc": {"x": 38, "y": 30}, "categories": ["sports", "hotel"]});
+t.insert({"uid": 917347, "loc": {"x": 15, "y": 46}, "categories": ["hotel"]});
+t.insert({"uid": 647874, "loc": {"x": 25, "y": 23}, "categories": ["hotel", "restaurant"]});
+t.insert({"uid": 518482, "loc": {"x": 4, "y": 25}, "categories": []});
+t.insert({"uid": 193466, "loc": {"x": -39, "y": 22}, "categories": ["sports", "hotel"]});
+t.insert({"uid": 622442, "loc": {"x": -24, "y": -46}, "categories": ["hotel"]});
+t.insert({"uid": 297426, "loc": {"x": 33, "y": -49}, "categories": ["hotel"]});
+t.insert({"uid": 528464, "loc": {"x": -43, "y": 48}, "categories": ["restaurant"]});
+t.insert({"uid": 90579, "loc": {"x": -4, "y": -23}, "categories": ["restaurant"]});
+t.insert({"uid": 368895, "loc": {"x": -8, "y": 14}, "categories": ["sports"]});
+t.insert({"uid": 355844, "loc": {"x": 34, "y": -4}, "categories": ["sports", "hotel"]});
-
-assert.eq( 10 , t.find({ "loc" : { "$within" : { "$center" : [ { "x" : 0 ,"y" : 0} , 50]}} } ).itcount() , "A" );
-assert.eq( 6 , t.find({ "loc" : { "$within" : { "$center" : [ { "x" : 0 ,"y" : 0} , 50]}}, "categories" : "sports" } ).itcount() , "B" );
+assert.eq(10, t.find({"loc": {"$within": {"$center": [{"x": 0, "y": 0}, 50]}}}).itcount(), "A");
+assert.eq(6,
+ t.find({"loc": {"$within": {"$center": [{"x": 0, "y": 0}, 50]}}, "categories": "sports"})
+ .itcount(),
+ "B");
// When not a $near or $within query, geo index should not be used. Fails if geo index is used.
-assert.eq( 1 , t.find({ "loc" : { "x" : -36, "y" : -8}, "categories" : "sports" }).itcount(), "C" );
+assert.eq(1, t.find({"loc": {"x": -36, "y": -8}, "categories": "sports"}).itcount(), "C");
diff --git a/jstests/core/geo_circle2a.js b/jstests/core/geo_circle2a.js
index e6d9d829782..1a0cc06a7f8 100644
--- a/jstests/core/geo_circle2a.js
+++ b/jstests/core/geo_circle2a.js
@@ -1,37 +1,40 @@
-// From SERVER-2381
-// Tests to make sure that nested multi-key indexing works for geo indexes and is not used for direct position
+// From SERVER-2381
+// Tests to make sure that nested multi-key indexing works for geo indexes and is not used for
+// direct position
// lookups
var coll = db.geo_circle2a;
coll.drop();
-coll.insert({ p : [1112,3473], t : [{ k : 'a', v : 'b' }, { k : 'c', v : 'd' }] });
-coll.ensureIndex({ p : '2d', 't.k' : 1 }, { min : 0, max : 10000 });
+coll.insert({p: [1112, 3473], t: [{k: 'a', v: 'b'}, {k: 'c', v: 'd'}]});
+coll.ensureIndex({p: '2d', 't.k': 1}, {min: 0, max: 10000});
// Succeeds, since on direct lookup should not use the index
-assert(1 == coll.find({p:[1112,3473],'t.k':'a'}).count(), "A");
+assert(1 == coll.find({p: [1112, 3473], 't.k': 'a'}).count(), "A");
// Succeeds and uses the geo index
-assert(1 == coll.find({p:{$within:{$box:[[1111,3472],[1113,3475]]}}, 't.k' : 'a' }).count(), "B");
-
+assert(1 == coll.find({p: {$within: {$box: [[1111, 3472], [1113, 3475]]}}, 't.k': 'a'}).count(),
+ "B");
coll.drop();
-coll.insert({ point:[ 1, 10 ], tags : [ { k : 'key', v : 'value' }, { k : 'key2', v : 123 } ] });
-coll.insert({ point:[ 1, 10 ], tags : [ { k : 'key', v : 'value' } ] });
+coll.insert({point: [1, 10], tags: [{k: 'key', v: 'value'}, {k: 'key2', v: 123}]});
+coll.insert({point: [1, 10], tags: [{k: 'key', v: 'value'}]});
-coll.ensureIndex({ point : "2d" , "tags.k" : 1, "tags.v" : 1 });
+coll.ensureIndex({point: "2d", "tags.k": 1, "tags.v": 1});
// Succeeds, since should now lookup multi-keys correctly
-assert(2 == coll.find({ point : { $within : { $box : [[0,0],[12,12]] } } }).count(), "C");
+assert(2 == coll.find({point: {$within: {$box: [[0, 0], [12, 12]]}}}).count(), "C");
// Succeeds, and should not use geoindex
-assert(2 == coll.find({ point : [1, 10] }).count(), "D");
-assert(2 == coll.find({ point : [1, 10], "tags.v" : "value" }).count(), "E");
-assert(1 == coll.find({ point : [1, 10], "tags.v" : 123 }).count(), "F");
-
+assert(2 == coll.find({point: [1, 10]}).count(), "D");
+assert(2 == coll.find({point: [1, 10], "tags.v": "value"}).count(), "E");
+assert(1 == coll.find({point: [1, 10], "tags.v": 123}).count(), "F");
coll.drop();
-coll.insert({ point:[ 1, 10 ], tags : [ { k : { 'hello' : 'world'}, v : 'value' }, { k : 'key2', v : 123 } ] });
-coll.insert({ point:[ 1, 10 ], tags : [ { k : 'key', v : 'value' } ] });
+coll.insert({point: [1, 10], tags: [{k: {'hello': 'world'}, v: 'value'}, {k: 'key2', v: 123}]});
+coll.insert({point: [1, 10], tags: [{k: 'key', v: 'value'}]});
-coll.ensureIndex({ point : "2d" , "tags.k" : 1, "tags.v" : 1 });
+coll.ensureIndex({point: "2d", "tags.k": 1, "tags.v": 1});
// Succeeds, should be able to look up the complex element
-assert(1 == coll.find({ point : { $within : { $box : [[0,0],[12,12]] } }, 'tags.k' : { 'hello' : 'world' } }).count(), "G"); \ No newline at end of file
+assert(1 ==
+ coll.find({point: {$within: {$box: [[0, 0], [12, 12]]}}, 'tags.k': {'hello': 'world'}})
+ .count(),
+ "G"); \ No newline at end of file
diff --git a/jstests/core/geo_circle3.js b/jstests/core/geo_circle3.js
index 96907ce9706..4e1fde4aa89 100644
--- a/jstests/core/geo_circle3.js
+++ b/jstests/core/geo_circle3.js
@@ -2,22 +2,21 @@
db.places.drop();
n = 0;
-db.places.save({ "_id": n++, "loc" : { "x" : 4.9999, "y" : 52 } });
-db.places.save({ "_id": n++, "loc" : { "x" : 5, "y" : 52 } });
-db.places.save({ "_id": n++, "loc" : { "x" : 5.0001, "y" : 52 } });
-db.places.save({ "_id": n++, "loc" : { "x" : 5, "y" : 52.0001 } });
-db.places.save({ "_id": n++, "loc" : { "x" : 5, "y" : 51.9999 } });
-db.places.save({ "_id": n++, "loc" : { "x" : 4.9999, "y" : 52.0001 } });
-db.places.save({ "_id": n++, "loc" : { "x" : 5.0001, "y" : 52.0001 } });
-db.places.save({ "_id": n++, "loc" : { "x" : 4.9999, "y" : 51.9999 } });
-db.places.save({ "_id": n++, "loc" : { "x" : 5.0001, "y" : 51.9999 } });
-db.places.ensureIndex( { loc : "2d" } );
-radius=0.0001;
-center=[5,52];
-//print(db.places.find({"loc" : {"$within" : {"$center" : [center, radius]}}}).count())
+db.places.save({"_id": n++, "loc": {"x": 4.9999, "y": 52}});
+db.places.save({"_id": n++, "loc": {"x": 5, "y": 52}});
+db.places.save({"_id": n++, "loc": {"x": 5.0001, "y": 52}});
+db.places.save({"_id": n++, "loc": {"x": 5, "y": 52.0001}});
+db.places.save({"_id": n++, "loc": {"x": 5, "y": 51.9999}});
+db.places.save({"_id": n++, "loc": {"x": 4.9999, "y": 52.0001}});
+db.places.save({"_id": n++, "loc": {"x": 5.0001, "y": 52.0001}});
+db.places.save({"_id": n++, "loc": {"x": 4.9999, "y": 51.9999}});
+db.places.save({"_id": n++, "loc": {"x": 5.0001, "y": 51.9999}});
+db.places.ensureIndex({loc: "2d"});
+radius = 0.0001;
+center = [5, 52];
+// print(db.places.find({"loc" : {"$within" : {"$center" : [center, radius]}}}).count())
// FIXME: we want an assert, e.g., that there be 5 answers in the find().
-db.places.find({"loc" : {"$within" : {"$center" : [center, radius]}}}).forEach(printjson);
-
+db.places.find({"loc": {"$within": {"$center": [center, radius]}}}).forEach(printjson);
// the result:
// { "_id" : ObjectId("4bb1f2f088df513435bcb4e1"), "loc" : { "x" : 5, "y" : 52 } }
diff --git a/jstests/core/geo_circle4.js b/jstests/core/geo_circle4.js
index 3113d54b38a..0d2b74b2cba 100644
--- a/jstests/core/geo_circle4.js
+++ b/jstests/core/geo_circle4.js
@@ -2,29 +2,31 @@
function test(index) {
db.server848.drop();
- radius=0.0001;
- center=[5,52];
+ radius = 0.0001;
+ center = [5, 52];
- db.server848.save({ "_id": 1, "loc" : { "x" : 4.9999, "y" : 52 } });
- db.server848.save({ "_id": 2, "loc" : { "x" : 5, "y" : 52 } });
- db.server848.save({ "_id": 3, "loc" : { "x" : 5.0001, "y" : 52 } });
- db.server848.save({ "_id": 4, "loc" : { "x" : 5, "y" : 52.0001 } });
- db.server848.save({ "_id": 5, "loc" : { "x" : 5, "y" : 51.9999 } });
- db.server848.save({ "_id": 6, "loc" : { "x" : 4.9999, "y" : 52.0001 } });
- db.server848.save({ "_id": 7, "loc" : { "x" : 5.0001, "y" : 52.0001 } });
- db.server848.save({ "_id": 8, "loc" : { "x" : 4.9999, "y" : 51.9999 } });
- db.server848.save({ "_id": 9, "loc" : { "x" : 5.0001, "y" : 51.9999 } });
+ db.server848.save({"_id": 1, "loc": {"x": 4.9999, "y": 52}});
+ db.server848.save({"_id": 2, "loc": {"x": 5, "y": 52}});
+ db.server848.save({"_id": 3, "loc": {"x": 5.0001, "y": 52}});
+ db.server848.save({"_id": 4, "loc": {"x": 5, "y": 52.0001}});
+ db.server848.save({"_id": 5, "loc": {"x": 5, "y": 51.9999}});
+ db.server848.save({"_id": 6, "loc": {"x": 4.9999, "y": 52.0001}});
+ db.server848.save({"_id": 7, "loc": {"x": 5.0001, "y": 52.0001}});
+ db.server848.save({"_id": 8, "loc": {"x": 4.9999, "y": 51.9999}});
+ db.server848.save({"_id": 9, "loc": {"x": 5.0001, "y": 51.9999}});
if (index) {
- db.server848.ensureIndex( { loc : "2d" } );
+ db.server848.ensureIndex({loc: "2d"});
}
- r=db.server848.find({"loc" : {"$within" : {"$center" : [center, radius]}}}, {_id:1});
- assert.eq(5, r.count(), "A1");
+ r = db.server848.find({"loc": {"$within": {"$center": [center, radius]}}}, {_id: 1});
+ assert.eq(5, r.count(), "A1");
// FIXME: surely code like this belongs in utils.js.
- a=r.toArray();
- x=[];
- for (k in a) { x.push(a[k]["_id"]); }
+ a = r.toArray();
+ x = [];
+ for (k in a) {
+ x.push(a[k]["_id"]);
+ }
x.sort();
- assert.eq([1,2,3,4,5], x, "B1");
+ assert.eq([1, 2, 3, 4, 5], x, "B1");
}
test(false);
diff --git a/jstests/core/geo_circle5.js b/jstests/core/geo_circle5.js
index 32b5744cea4..1b3d67e91b0 100644
--- a/jstests/core/geo_circle5.js
+++ b/jstests/core/geo_circle5.js
@@ -2,20 +2,20 @@
db.server1238.drop();
db.server1238.remove({});
-db.server1238.save({ loc: [ 5000000, 900000 ], id: 1});
-db.server1238.save({ loc: [ 5000000, 900000 ], id: 2});
-db.server1238.ensureIndex( { loc : "2d" } , { min : -21000000 , max : 21000000 } );
-db.server1238.save({ loc: [ 5000000, 900000 ], id: 3});
-db.server1238.save({ loc: [ 5000000, 900000 ], id: 4});
+db.server1238.save({loc: [5000000, 900000], id: 1});
+db.server1238.save({loc: [5000000, 900000], id: 2});
+db.server1238.ensureIndex({loc: "2d"}, {min: -21000000, max: 21000000});
+db.server1238.save({loc: [5000000, 900000], id: 3});
+db.server1238.save({loc: [5000000, 900000], id: 4});
-c1=db.server1238.find({"loc" : {"$within" : {"$center" : [[5000000, 900000], 1.0]}}}).count();
-
-c2=db.server1238.find({"loc" : {"$within" : {"$center" : [[5000001, 900000], 5.0]}}}).count();
+c1 = db.server1238.find({"loc": {"$within": {"$center": [[5000000, 900000], 1.0]}}}).count();
+c2 = db.server1238.find({"loc": {"$within": {"$center": [[5000001, 900000], 5.0]}}}).count();
assert.eq(4, c1, "A1");
assert.eq(c1, c2, "B1");
-//print(db.server1238.find({"loc" : {"$within" : {"$center" : [[5000001, 900000], 5.0]}}}).toArray());
+// print(db.server1238.find({"loc" : {"$within" : {"$center" : [[5000001, 900000],
+// 5.0]}}}).toArray());
// [
// {
// "_id" : ObjectId("4c173306f5d9d34a46cb7b11"),
@@ -25,4 +25,4 @@ assert.eq(c1, c2, "B1");
// ],
// "id" : 4
// }
-// ]
+// ]
diff --git a/jstests/core/geo_distinct.js b/jstests/core/geo_distinct.js
index eccb517ed83..705bf1cc7ce 100644
--- a/jstests/core/geo_distinct.js
+++ b/jstests/core/geo_distinct.js
@@ -10,25 +10,25 @@ var res;
//
coll.drop();
-coll.insert( { loc: { type: 'Point', coordinates: [ 10, 20 ] } } );
-coll.insert( { loc: { type: 'Point', coordinates: [ 10, 20 ] } } );
-coll.insert( { loc: { type: 'Point', coordinates: [ 20, 30 ] } } );
-coll.insert( { loc: { type: 'Point', coordinates: [ 20, 30 ] } } );
-assert.eq( 4, coll.count() );
+coll.insert({loc: {type: 'Point', coordinates: [10, 20]}});
+coll.insert({loc: {type: 'Point', coordinates: [10, 20]}});
+coll.insert({loc: {type: 'Point', coordinates: [20, 30]}});
+coll.insert({loc: {type: 'Point', coordinates: [20, 30]}});
+assert.eq(4, coll.count());
// Test distinct on GeoJSON points with/without a 2dsphere index.
-res = coll.runCommand( 'distinct', { key: 'loc' } );
-assert.commandWorked( res );
-assert.eq( res.values.sort(), [ { type: 'Point', coordinates: [ 10, 20 ] },
- { type: 'Point', coordinates: [ 20, 30 ] } ] );
+res = coll.runCommand('distinct', {key: 'loc'});
+assert.commandWorked(res);
+assert.eq(res.values.sort(),
+ [{type: 'Point', coordinates: [10, 20]}, {type: 'Point', coordinates: [20, 30]}]);
-assert.commandWorked( coll.ensureIndex( { loc: '2dsphere' } ) );
+assert.commandWorked(coll.ensureIndex({loc: '2dsphere'}));
-res = coll.runCommand( 'distinct', { key: 'loc' } );
-assert.commandWorked( res );
-assert.eq( res.values.sort(), [ { type: 'Point', coordinates: [ 10, 20 ] },
- { type: 'Point', coordinates: [ 20, 30 ] } ] );
+res = coll.runCommand('distinct', {key: 'loc'});
+assert.commandWorked(res);
+assert.eq(res.values.sort(),
+ [{type: 'Point', coordinates: [10, 20]}, {type: 'Point', coordinates: [20, 30]}]);
// Test distinct on legacy points with/without a 2d index.
@@ -38,69 +38,72 @@ assert.eq( res.values.sort(), [ { type: 'Point', coordinates: [ 10, 20 ] },
coll.dropIndexes();
-res = coll.runCommand( 'distinct', { key: 'loc.coordinates' } );
-assert.commandWorked( res );
-assert.eq( res.values.sort(), [ 10, 20, 30 ] );
+res = coll.runCommand('distinct', {key: 'loc.coordinates'});
+assert.commandWorked(res);
+assert.eq(res.values.sort(), [10, 20, 30]);
-assert.commandWorked( coll.ensureIndex( { 'loc.coordinates': '2d' } ) );
+assert.commandWorked(coll.ensureIndex({'loc.coordinates': '2d'}));
-res = coll.runCommand( 'distinct', { key: 'loc.coordinates' } );
-assert.commandWorked( res );
-assert.eq( res.values.sort(), [ 10, 20, 30 ] );
+res = coll.runCommand('distinct', {key: 'loc.coordinates'});
+assert.commandWorked(res);
+assert.eq(res.values.sort(), [10, 20, 30]);
//
// 2. Test distinct with geo predicates for 'query'.
//
coll.drop();
-for (var i=0; i<50; ++i) {
- coll.insert( { zone: 1, loc: { type: 'Point', coordinates: [ -20, -20 ] } } );
- coll.insert( { zone: 2, loc: { type: 'Point', coordinates: [ -10, -10 ] } } );
- coll.insert( { zone: 3, loc: { type: 'Point', coordinates: [ 0, 0 ] } } );
- coll.insert( { zone: 4, loc: { type: 'Point', coordinates: [ 10, 10 ] } } );
- coll.insert( { zone: 5, loc: { type: 'Point', coordinates: [ 20, 20 ] } } );
+for (var i = 0; i < 50; ++i) {
+ coll.insert({zone: 1, loc: {type: 'Point', coordinates: [-20, -20]}});
+ coll.insert({zone: 2, loc: {type: 'Point', coordinates: [-10, -10]}});
+ coll.insert({zone: 3, loc: {type: 'Point', coordinates: [0, 0]}});
+ coll.insert({zone: 4, loc: {type: 'Point', coordinates: [10, 10]}});
+ coll.insert({zone: 5, loc: {type: 'Point', coordinates: [20, 20]}});
}
-var originGeoJSON = { type: 'Point', coordinates: [ 0, 0 ] };
+var originGeoJSON = {
+ type: 'Point',
+ coordinates: [0, 0]
+};
// Test distinct with $nearSphere query predicate.
// A. Unindexed key, no geo index on query predicate.
-res = coll.runCommand( 'distinct', { key: 'zone',
- query: { loc: { $nearSphere: { $geometry: originGeoJSON,
- $maxDistance: 1 } } } } );
-assert.commandFailed( res );
+res = coll.runCommand(
+ 'distinct',
+ {key: 'zone', query: {loc: {$nearSphere: {$geometry: originGeoJSON, $maxDistance: 1}}}});
+assert.commandFailed(res);
// B. Unindexed key, with 2dsphere index on query predicate.
-assert.commandWorked( coll.ensureIndex( { loc: '2dsphere' } ) );
-res = coll.runCommand( 'distinct', { key: 'zone',
- query: { loc: { $nearSphere: { $geometry: originGeoJSON,
- $maxDistance: 1 } } } } );
-assert.commandWorked( res );
-assert.eq( res.values.sort(), [ 3 ] );
+assert.commandWorked(coll.ensureIndex({loc: '2dsphere'}));
+res = coll.runCommand(
+ 'distinct',
+ {key: 'zone', query: {loc: {$nearSphere: {$geometry: originGeoJSON, $maxDistance: 1}}}});
+assert.commandWorked(res);
+assert.eq(res.values.sort(), [3]);
// C. Indexed key, with 2dsphere index on query predicate.
-assert.commandWorked( coll.ensureIndex( { zone: 1 } ) );
-res = coll.runCommand( 'distinct', { key: 'zone',
- query: { loc: { $nearSphere: { $geometry: originGeoJSON,
- $maxDistance: 1 } } } } );
-assert.commandWorked( res );
-assert.eq( res.values.sort(), [ 3 ] );
+assert.commandWorked(coll.ensureIndex({zone: 1}));
+res = coll.runCommand(
+ 'distinct',
+ {key: 'zone', query: {loc: {$nearSphere: {$geometry: originGeoJSON, $maxDistance: 1}}}});
+assert.commandWorked(res);
+assert.eq(res.values.sort(), [3]);
// Test distinct with $near query predicate.
coll.dropIndexes();
// A. Unindexed key, no geo index on query predicate.
-res = coll.runCommand( 'distinct', { key: 'zone',
- query: { 'loc.coordinates': { $near: [ 0, 0 ], $maxDistance: 1 } } } );
-assert.commandFailed( res );
+res = coll.runCommand('distinct',
+ {key: 'zone', query: {'loc.coordinates': {$near: [0, 0], $maxDistance: 1}}});
+assert.commandFailed(res);
// B. Unindexed key, with 2d index on query predicate.
-assert.commandWorked( coll.ensureIndex( { 'loc.coordinates': '2d' } ) );
-res = coll.runCommand( 'distinct', { key: 'zone',
- query: { 'loc.coordinates': { $near: [ 0, 0 ], $maxDistance: 1 } } } );
-assert.commandWorked( res );
-assert.eq( res.values.sort(), [ 3 ] );
+assert.commandWorked(coll.ensureIndex({'loc.coordinates': '2d'}));
+res = coll.runCommand('distinct',
+ {key: 'zone', query: {'loc.coordinates': {$near: [0, 0], $maxDistance: 1}}});
+assert.commandWorked(res);
+assert.eq(res.values.sort(), [3]);
// C. Indexed key, with 2d index on query predicate.
-assert.commandWorked( coll.ensureIndex( { zone: 1 } ) );
-res = coll.runCommand( 'distinct', { key: 'zone',
- query: { 'loc.coordinates': { $near: [ 0, 0 ], $maxDistance: 1 } } } );
-assert.commandWorked( res );
-assert.eq( res.values.sort(), [ 3 ] );
+assert.commandWorked(coll.ensureIndex({zone: 1}));
+res = coll.runCommand('distinct',
+ {key: 'zone', query: {'loc.coordinates': {$near: [0, 0], $maxDistance: 1}}});
+assert.commandWorked(res);
+assert.eq(res.values.sort(), [3]);
diff --git a/jstests/core/geo_exactfetch.js b/jstests/core/geo_exactfetch.js
index 43023897a4e..4af4032045f 100644
--- a/jstests/core/geo_exactfetch.js
+++ b/jstests/core/geo_exactfetch.js
@@ -10,8 +10,8 @@ function test(indexname) {
t.dropIndex({lon_lat: indexname, population: -1});
}
-t.insert({ city: "B", lon_lat: [-71.34895, 42.46037], population: 1000});
-t.insert({ city: "A", lon_lat: [1.48736, 42.55327], population: 100});
+t.insert({city: "B", lon_lat: [-71.34895, 42.46037], population: 1000});
+t.insert({city: "A", lon_lat: [1.48736, 42.55327], population: 100});
test("2d");
test("2dsphere");
diff --git a/jstests/core/geo_fiddly_box.js b/jstests/core/geo_fiddly_box.js
index cfda0a99757..f5cd3ddcc6b 100644
--- a/jstests/core/geo_fiddly_box.js
+++ b/jstests/core/geo_fiddly_box.js
@@ -1,26 +1,27 @@
// Reproduces simple test for SERVER-2832
-// The setup to reproduce was/is to create a set of points where the
+// The setup to reproduce was/is to create a set of points where the
// "expand" portion of the geo-lookup expands the 2d range in only one
// direction (so points are required on either side of the expanding range)
t = db.geo_fiddly_box;
t.drop();
-t.ensureIndex({ loc : "2d" });
+t.ensureIndex({loc: "2d"});
-t.insert({ "loc" : [3, 1] });
-t.insert({ "loc" : [3, 0.5] });
-t.insert({ "loc" : [3, 0.25] });
-t.insert({ "loc" : [3, -0.01] });
-t.insert({ "loc" : [3, -0.25] });
-t.insert({ "loc" : [3, -0.5] });
-t.insert({ "loc" : [3, -1] });
+t.insert({"loc": [3, 1]});
+t.insert({"loc": [3, 0.5]});
+t.insert({"loc": [3, 0.25]});
+t.insert({"loc": [3, -0.01]});
+t.insert({"loc": [3, -0.25]});
+t.insert({"loc": [3, -0.5]});
+t.insert({"loc": [3, -1]});
// OK!
-print( t.count() );
-assert.eq( 7, t.count({ "loc" : { "$within" : { "$box" : [ [2, -2], [46, 2] ] } } }), "Not all locations found!" );
-
+print(t.count());
+assert.eq(7,
+ t.count({"loc": {"$within": {"$box": [[2, -2], [46, 2]]}}}),
+ "Not all locations found!");
// Test normal lookup of a small square of points as a sanity check.
@@ -31,16 +32,24 @@ step = 1;
numItems = 0;
t.drop();
-t.ensureIndex({ loc : "2d" }, { max : max + epsilon / 2, min : min - epsilon / 2 });
+t.ensureIndex({loc: "2d"}, {max: max + epsilon / 2, min: min - epsilon / 2});
-for(var x = min; x <= max; x += step){
- for(var y = min; y <= max; y += step){
- t.insert({ "loc" : { x : x, y : y } });
- numItems++;
- }
+for (var x = min; x <= max; x += step) {
+ for (var y = min; y <= max; y += step) {
+ t.insert({"loc": {x: x, y: y}});
+ numItems++;
+ }
}
-assert.eq( numItems, t.count({ loc : { $within : { $box : [[min - epsilon / 3,
- min - epsilon / 3],
- [max + epsilon / 3,
- max + epsilon / 3]] } } }), "Not all locations found!");
+assert.eq(numItems,
+ t.count({
+ loc: {
+ $within: {
+ $box: [
+ [min - epsilon / 3, min - epsilon / 3],
+ [max + epsilon / 3, max + epsilon / 3]
+ ]
+ }
+ }
+ }),
+ "Not all locations found!");
diff --git a/jstests/core/geo_fiddly_box2.js b/jstests/core/geo_fiddly_box2.js
index c0be0f2c8dc..6a40b5032b5 100644
--- a/jstests/core/geo_fiddly_box2.js
+++ b/jstests/core/geo_fiddly_box2.js
@@ -1,32 +1,33 @@
// Reproduces simple test for SERVER-2115
-// The setup to reproduce is to create a set of points and a really big bounds so that we are required to do
+// The setup to reproduce is to create a set of points and a really big bounds so that we are
+// required to do
// exact lookups on the points to get correct results.
t = db.geo_fiddly_box2;
t.drop();
-t.insert( { "letter" : "S", "position" : [ -3, 0 ] } );
-t.insert( { "letter" : "C", "position" : [ -2, 0 ] } );
-t.insert( { "letter" : "R", "position" : [ -1, 0 ] } );
-t.insert( { "letter" : "A", "position" : [ 0, 0 ] } );
-t.insert( { "letter" : "B", "position" : [ 1, 0 ] } );
-t.insert( { "letter" : "B", "position" : [ 2, 0 ] } );
-t.insert( { "letter" : "L", "position" : [ 3, 0 ] } );
-t.insert( { "letter" : "E", "position" : [ 4, 0 ] } );
+t.insert({"letter": "S", "position": [-3, 0]});
+t.insert({"letter": "C", "position": [-2, 0]});
+t.insert({"letter": "R", "position": [-1, 0]});
+t.insert({"letter": "A", "position": [0, 0]});
+t.insert({"letter": "B", "position": [1, 0]});
+t.insert({"letter": "B", "position": [2, 0]});
+t.insert({"letter": "L", "position": [3, 0]});
+t.insert({"letter": "E", "position": [4, 0]});
-t.ensureIndex( { position : "2d" } );
-result = t.find( { "position" : { "$within" : { "$box" : [ [ -3, -1 ], [ 0, 1 ] ] } } } );
-assert.eq( 4, result.count() );
+t.ensureIndex({position: "2d"});
+result = t.find({"position": {"$within": {"$box": [[-3, -1], [0, 1]]}}});
+assert.eq(4, result.count());
-t.dropIndex( { position : "2d" } );
-t.ensureIndex( { position : "2d" }, { min : -10000000, max : 10000000 } );
+t.dropIndex({position: "2d"});
+t.ensureIndex({position: "2d"}, {min: -10000000, max: 10000000});
-result = t.find( { "position" : { "$within" : { "$box" : [ [ -3, -1 ], [ 0, 1 ] ] } } } );
-assert.eq( 4, result.count() );
+result = t.find({"position": {"$within": {"$box": [[-3, -1], [0, 1]]}}});
+assert.eq(4, result.count());
-t.dropIndex( { position : "2d" } );
-t.ensureIndex( { position : "2d" }, { min : -1000000000, max : 1000000000 } );
+t.dropIndex({position: "2d"});
+t.ensureIndex({position: "2d"}, {min: -1000000000, max: 1000000000});
-result = t.find( { "position" : { "$within" : { "$box" : [ [ -3, -1 ], [ 0, 1 ] ] } } } );
-assert.eq( 4, result.count() );
+result = t.find({"position": {"$within": {"$box": [[-3, -1], [0, 1]]}}});
+assert.eq(4, result.count());
diff --git a/jstests/core/geo_group.js b/jstests/core/geo_group.js
index b19c35ec738..9ee5a76b7ea 100644
--- a/jstests/core/geo_group.js
+++ b/jstests/core/geo_group.js
@@ -3,35 +3,39 @@ t.drop();
n = 1;
var bulk = t.initializeUnorderedBulkOp();
-for ( var x=-100; x<100; x+=2 ){
- for ( var y=-100; y<100; y+=2 ){
- bulk.insert( { _id : n++ , loc : [ x , y ] } );
+for (var x = -100; x < 100; x += 2) {
+ for (var y = -100; y < 100; y += 2) {
+ bulk.insert({_id: n++, loc: [x, y]});
}
}
assert.writeOK(bulk.execute());
-t.ensureIndex( { loc : "2d" } );
+t.ensureIndex({loc: "2d"});
// Test basic count with $near
assert.eq(t.find().count(), 10000);
-assert.eq(t.find( { loc : { $within : {$center : [[56,8], 10]}}}).count(), 81);
-assert.eq(t.find( { loc : { $near : [56, 8, 10] } } ).count(), 81);
+assert.eq(t.find({loc: {$within: {$center: [[56, 8], 10]}}}).count(), 81);
+assert.eq(t.find({loc: {$near: [56, 8, 10]}}).count(), 81);
// Test basic group that effectively does a count
-assert.eq(
- t.group( {
- reduce : function (obj, prev) { prev.sums = { count : prev.sums.count + 1}; },
- initial : { sums:{count:0} } }
- ),
- [ { "sums" : { "count" : 10000 } } ]
-);
+assert.eq(t.group({
+ reduce: function(obj, prev) {
+ prev.sums = {
+ count: prev.sums.count + 1
+ };
+ },
+ initial: {sums: {count: 0}}
+}),
+ [{"sums": {"count": 10000}}]);
// Test basic group + $near that does a count
-assert.eq(
- t.group( {
- reduce : function (obj, prev) { prev.sums = { count : prev.sums.count + 1}; },
- initial : { sums:{count:0} },
- cond : { loc : { $near : [56, 8, 10] } } }
- ),
- [ { "sums" : { "count" : 81 } } ]
-);
+assert.eq(t.group({
+ reduce: function(obj, prev) {
+ prev.sums = {
+ count: prev.sums.count + 1
+ };
+ },
+ initial: {sums: {count: 0}},
+ cond: {loc: {$near: [56, 8, 10]}}
+}),
+ [{"sums": {"count": 81}}]);
diff --git a/jstests/core/geo_haystack1.js b/jstests/core/geo_haystack1.js
index f29f407c52e..5abb166a6f9 100644
--- a/jstests/core/geo_haystack1.js
+++ b/jstests/core/geo_haystack1.js
@@ -2,39 +2,41 @@
t = db.geo_haystack1;
t.drop();
-function distance( a , b ){
+function distance(a, b) {
var x = a[0] - b[0];
var y = a[1] - b[1];
- return Math.sqrt( ( x * x ) + ( y * y ) );
+ return Math.sqrt((x * x) + (y * y));
}
-function distanceTotal( a , arr , f ){
+function distanceTotal(a, arr, f) {
var total = 0;
- for ( var i=0; i<arr.length; i++ ){
- total += distance( a , arr[i][f] );
+ for (var i = 0; i < arr.length; i++) {
+ total += distance(a, arr[i][f]);
}
return total;
}
-queries = [
- { near : [ 7 , 8 ] , maxDistance : 3 , search : { z : 3 } } ,
-];
-
-answers = queries.map( function(){ return { totalDistance : 0 , results : [] }; } );
+queries = [{near: [7, 8], maxDistance: 3, search: {z: 3}}, ];
+answers = queries.map(function() {
+ return {
+ totalDistance: 0,
+ results: []
+ };
+});
n = 0;
-for ( x=0; x<20; x++ ){
- for ( y=0; y<20; y++ ){
- t.insert( { _id : n , loc : [ x , y ] , z : n % 5 } );
-
- for ( i=0; i<queries.length; i++ ){
- var d = distance( queries[i].near , [ x , y ] );
- if ( d > queries[i].maxDistance )
+for (x = 0; x < 20; x++) {
+ for (y = 0; y < 20; y++) {
+ t.insert({_id: n, loc: [x, y], z: n % 5});
+
+ for (i = 0; i < queries.length; i++) {
+ var d = distance(queries[i].near, [x, y]);
+ if (d > queries[i].maxDistance)
continue;
- if ( queries[i].search.z != n % 5 )
+ if (queries[i].search.z != n % 5)
continue;
- answers[i].results.push( { _id : n , loc : [ x , y ]} );
+ answers[i].results.push({_id: n, loc: [x, y]});
answers[i].totalDistance += d;
}
@@ -42,18 +44,18 @@ for ( x=0; x<20; x++ ){
}
}
-t.ensureIndex( { loc : "geoHaystack" , z : 1 } , { bucketSize : .7 } );
-
-for ( i=0; i<queries.length; i++ ){
- print( "---------" );
- printjson( queries[i] );
- res = t.runCommand( "geoSearch" , queries[i] );
- print( "\t" + tojson( res.stats ) );
- print( "\tshould have: " + answers[i].results.length + "\t actually got: " + res.stats.n );
- assert.eq( answers[i].results.length , res.stats.n, "num:"+ i + " number matches" );
- assert.eq( answers[i].totalDistance , distanceTotal( queries[i].near , res.results , "loc" ), "num:"+ i + " totalDistance" );
- //printjson( res );
- //printjson( answers[i].length );
+t.ensureIndex({loc: "geoHaystack", z: 1}, {bucketSize: .7});
+
+for (i = 0; i < queries.length; i++) {
+ print("---------");
+ printjson(queries[i]);
+ res = t.runCommand("geoSearch", queries[i]);
+ print("\t" + tojson(res.stats));
+ print("\tshould have: " + answers[i].results.length + "\t actually got: " + res.stats.n);
+ assert.eq(answers[i].results.length, res.stats.n, "num:" + i + " number matches");
+ assert.eq(answers[i].totalDistance,
+ distanceTotal(queries[i].near, res.results, "loc"),
+ "num:" + i + " totalDistance");
+ // printjson( res );
+ // printjson( answers[i].length );
}
-
-
diff --git a/jstests/core/geo_haystack2.js b/jstests/core/geo_haystack2.js
index dee935b9b2b..cb684239a63 100644
--- a/jstests/core/geo_haystack2.js
+++ b/jstests/core/geo_haystack2.js
@@ -2,40 +2,41 @@
t = db.geo_haystack2;
t.drop();
-function distance( a , b ){
+function distance(a, b) {
var x = a[0] - b[0];
var y = a[1] - b[1];
- return Math.sqrt( ( x * x ) + ( y * y ) );
+ return Math.sqrt((x * x) + (y * y));
}
-function distanceTotal( a , arr , f ){
+function distanceTotal(a, arr, f) {
var total = 0;
- for ( var i=0; i<arr.length; i++ ){
- total += distance( a , arr[i][f] );
+ for (var i = 0; i < arr.length; i++) {
+ total += distance(a, arr[i][f]);
}
return total;
}
-queries = [
- { near : [ 7 , 8 ] , maxDistance : 3 , search : { z : 3 } } ,
-];
-
-answers = queries.map( function(){ return { totalDistance : 0 , results : [] }; } );
+queries = [{near: [7, 8], maxDistance: 3, search: {z: 3}}, ];
+answers = queries.map(function() {
+ return {
+ totalDistance: 0,
+ results: []
+ };
+});
n = 0;
-for ( x=0; x<20; x++ ){
- for ( y=0; y<20; y++ ){
- t.insert( { _id : n , loc : [ x , y ] , z : [ n % 10 , ( n + 5 ) % 10 ] } );
-
- for ( i=0; i<queries.length; i++ ){
- var d = distance( queries[i].near , [ x , y ] );
- if ( d > queries[i].maxDistance )
+for (x = 0; x < 20; x++) {
+ for (y = 0; y < 20; y++) {
+ t.insert({_id: n, loc: [x, y], z: [n % 10, (n + 5) % 10]});
+
+ for (i = 0; i < queries.length; i++) {
+ var d = distance(queries[i].near, [x, y]);
+ if (d > queries[i].maxDistance)
continue;
- if ( queries[i].search.z != n % 10 &&
- queries[i].search.z != ( n + 5 ) % 10 )
+ if (queries[i].search.z != n % 10 && queries[i].search.z != (n + 5) % 10)
continue;
- answers[i].results.push( { _id : n , loc : [ x , y ] } );
+ answers[i].results.push({_id: n, loc: [x, y]});
answers[i].totalDistance += d;
}
@@ -43,18 +44,18 @@ for ( x=0; x<20; x++ ){
}
}
-t.ensureIndex( { loc : "geoHaystack" , z : 1 } , { bucketSize : .7 } );
-
-for ( i=0; i<queries.length; i++ ){
- print( "---------" );
- printjson( queries[i] );
- res = t.runCommand( "geoSearch" , queries[i] );
- print( "\t" + tojson( res.stats ) );
- print( "\tshould have: " + answers[i].results.length + "\t actually got: " + res.stats.n );
- assert.eq( answers[i].results.length , res.stats.n, "num:"+ i + " number matches" );
- assert.eq( answers[i].totalDistance , distanceTotal( queries[i].near , res.results , "loc" ), "num:"+ i + " totalDistance" );
- //printjson( res );
- //printjson( answers[i].length );
+t.ensureIndex({loc: "geoHaystack", z: 1}, {bucketSize: .7});
+
+for (i = 0; i < queries.length; i++) {
+ print("---------");
+ printjson(queries[i]);
+ res = t.runCommand("geoSearch", queries[i]);
+ print("\t" + tojson(res.stats));
+ print("\tshould have: " + answers[i].results.length + "\t actually got: " + res.stats.n);
+ assert.eq(answers[i].results.length, res.stats.n, "num:" + i + " number matches");
+ assert.eq(answers[i].totalDistance,
+ distanceTotal(queries[i].near, res.results, "loc"),
+ "num:" + i + " totalDistance");
+ // printjson( res );
+ // printjson( answers[i].length );
}
-
-
diff --git a/jstests/core/geo_haystack3.js b/jstests/core/geo_haystack3.js
index 4c55e94ad7f..1357ccf4f51 100644
--- a/jstests/core/geo_haystack3.js
+++ b/jstests/core/geo_haystack3.js
@@ -5,52 +5,50 @@ t.drop();
// Tests for geo haystack validity
//
-t.insert({ pos : "invalid" });
-assert.commandFailed(t.ensureIndex({ pos : "geoHaystack", type : 1 }, { bucketSize : 1 }));
+t.insert({pos: "invalid"});
+assert.commandFailed(t.ensureIndex({pos: "geoHaystack", type: 1}, {bucketSize: 1}));
t.drop();
-t.insert({ pos : [] });
-assert.commandFailed(t.ensureIndex({ pos : "geoHaystack", type : 1 }, { bucketSize : 1 }));
+t.insert({pos: []});
+assert.commandFailed(t.ensureIndex({pos: "geoHaystack", type: 1}, {bucketSize: 1}));
t.drop();
-t.insert({ pos : [1, 2] });
-assert.commandWorked(t.ensureIndex({ pos : "geoHaystack", type : 1 }, { bucketSize : 1 }));
+t.insert({pos: [1, 2]});
+assert.commandWorked(t.ensureIndex({pos: "geoHaystack", type: 1}, {bucketSize: 1}));
t.drop();
-t.insert({ pos : {x : 1, y : 2} });
-assert.commandWorked(t.ensureIndex({ pos : "geoHaystack", type : 1 }, { bucketSize : 1 }));
+t.insert({pos: {x: 1, y: 2}});
+assert.commandWorked(t.ensureIndex({pos: "geoHaystack", type: 1}, {bucketSize: 1}));
t.drop();
-t.insert({ pos : {x : 1, y : 2, z : 3} });
-assert.commandWorked(t.ensureIndex({ pos : "geoHaystack", type : 1 }, { bucketSize : 1 }));
+t.insert({pos: {x: 1, y: 2, z: 3}});
+assert.commandWorked(t.ensureIndex({pos: "geoHaystack", type: 1}, {bucketSize: 1}));
t.drop();
//
// Tests for geo haystack search
//
-t.insert({ pos : { long : 34, lat : 33 }});
-t.insert({ pos : { long : 34.2, lat : 33.3 }, type : ["bar", "restaurant" ]});
-t.insert({ pos : { long : 34.2, lat : 37.3 }, type : ["bar", "chicken" ]});
-t.insert({ pos : { long : 59.1, lat : 87.2 }, type : ["baz", "office" ]});
-t.ensureIndex({ pos : "geoHaystack", type : 1 }, { bucketSize : 1 });
+t.insert({pos: {long: 34, lat: 33}});
+t.insert({pos: {long: 34.2, lat: 33.3}, type: ["bar", "restaurant"]});
+t.insert({pos: {long: 34.2, lat: 37.3}, type: ["bar", "chicken"]});
+t.insert({pos: {long: 59.1, lat: 87.2}, type: ["baz", "office"]});
+t.ensureIndex({pos: "geoHaystack", type: 1}, {bucketSize: 1});
// This only matches the first insert. What do we want? First 3 or just the first?
-res = t.runCommand("geoSearch", { near : [33, 33], maxDistance : 6, search : {}, limit : 30 });
+res = t.runCommand("geoSearch", {near: [33, 33], maxDistance: 6, search: {}, limit: 30});
assert.eq(1, res.stats.n, "Right # of matches");
assert.eq(34, res.results[0].pos.long, "expected longitude");
assert.eq(33, res.results[0].pos.lat, "expected latitude");
// This matches the middle 2 of the 4 elements above.
-res = t.runCommand("geoSearch", { near : [33, 33], maxDistance : 6, search : { type : "bar" },
- limit : 2 });
+res = t.runCommand("geoSearch", {near: [33, 33], maxDistance: 6, search: {type: "bar"}, limit: 2});
assert.eq(2, res.stats.n, "Right # of matches");
assert.eq("bar", res.results[0].type[0], "expected value for type");
assert.eq("bar", res.results[1].type[0], "expected value for type");
assert.neq(res.results[0].type[1], res.results[1].type[1], "should get 2 diff results");
// This is a test for the limit being reached/only 1 returned.
-res = t.runCommand("geoSearch", { near : [33, 33], maxDistance : 6, search : { type : "bar" },
- limit : 1 });
+res = t.runCommand("geoSearch", {near: [33, 33], maxDistance: 6, search: {type: "bar"}, limit: 1});
assert.eq(1, res.stats.n, "Right # of matches");
assert.eq("bar", res.results[0].type[0], "expected value for type");
diff --git a/jstests/core/geo_invalid_2d_params.js b/jstests/core/geo_invalid_2d_params.js
index 72bb8e443fe..738fa1ad353 100644
--- a/jstests/core/geo_invalid_2d_params.js
+++ b/jstests/core/geo_invalid_2d_params.js
@@ -1,8 +1,8 @@
var t = db.geo_invalid_2d_params;
t.drop();
-assert.commandFailed(t.ensureIndex({ loc : "2d" }, { bits : 33 }));
-assert.commandFailed(t.ensureIndex({ loc : "2d" }, { min : -1, max : -1 }));
-assert.commandFailed(t.ensureIndex({ loc : "2d" }, { bits : -1 }));
-assert.commandFailed(t.ensureIndex({ loc : "2d" }, { min : 10, max : 9 }));
-assert.commandWorked(t.ensureIndex({ loc : "2d" }, { bits : 1, min : -1, max : 1 })); \ No newline at end of file
+assert.commandFailed(t.ensureIndex({loc: "2d"}, {bits: 33}));
+assert.commandFailed(t.ensureIndex({loc: "2d"}, {min: -1, max: -1}));
+assert.commandFailed(t.ensureIndex({loc: "2d"}, {bits: -1}));
+assert.commandFailed(t.ensureIndex({loc: "2d"}, {min: 10, max: 9}));
+assert.commandWorked(t.ensureIndex({loc: "2d"}, {bits: 1, min: -1, max: 1})); \ No newline at end of file
diff --git a/jstests/core/geo_invalid_polygon.js b/jstests/core/geo_invalid_polygon.js
index 1e31e0b2fe5..c3d244a504f 100644
--- a/jstests/core/geo_invalid_polygon.js
+++ b/jstests/core/geo_invalid_polygon.js
@@ -7,15 +7,7 @@ t.drop();
// "Exterior shell of polygon is invalid".
var geometry = {
type: "Polygon",
- coordinates: [
- [
- [ 0, 0 ],
- [ 0, 1 ],
- [ 1, 1 ],
- [-2,-1 ],
- [ 0, 0 ]
- ]
- ]
+ coordinates: [[[0, 0], [0, 1], [1, 1], [-2, -1], [0, 0]]]
};
t.insert({_id: 42, geometry: geometry});
@@ -23,8 +15,5 @@ var err = t.createIndex({geometry: '2dsphere'});
assert.commandFailed(err);
// Document's _id should be in error message.
-assert(
- -1 != err.errmsg.indexOf('42'),
- "Error message didn't contain document _id.\nMessage: \"" + err.errmsg
- + '"\n'
-);
+assert(-1 != err.errmsg.indexOf('42'),
+ "Error message didn't contain document _id.\nMessage: \"" + err.errmsg + '"\n');
diff --git a/jstests/core/geo_mapreduce.js b/jstests/core/geo_mapreduce.js
index 810c2605426..e15a4911763 100644
--- a/jstests/core/geo_mapreduce.js
+++ b/jstests/core/geo_mapreduce.js
@@ -4,46 +4,50 @@
// setup test collection
db.apples.drop();
-db.apples.insert( { "geo" : { "lat" : 32.68331909, "long" : 69.41610718 }, "apples" : 5 } );
-db.apples.insert( { "geo" : { "lat" : 35.01860809, "long" : 70.92027283 }, "apples" : 2 } );
-db.apples.insert( { "geo" : { "lat" : 31.11639023, "long" : 64.19970703 }, "apples" : 11 } );
-db.apples.insert( { "geo" : { "lat" : 32.64500046, "long" : 69.36251068 }, "apples" : 4 } );
-db.apples.insert( { "geo" : { "lat" : 33.23638916, "long" : 69.81360626 }, "apples" : 9 } );
-db.apples.ensureIndex( { "geo" : "2d" } );
-
-center = [ 32.68, 69.41 ];
-radius = 10 / 111; // 10km; 1 arcdegree ~= 111km
-geo_query = { geo : { '$within' : { '$center' : [ center, radius ] } } };
+db.apples.insert({"geo": {"lat": 32.68331909, "long": 69.41610718}, "apples": 5});
+db.apples.insert({"geo": {"lat": 35.01860809, "long": 70.92027283}, "apples": 2});
+db.apples.insert({"geo": {"lat": 31.11639023, "long": 64.19970703}, "apples": 11});
+db.apples.insert({"geo": {"lat": 32.64500046, "long": 69.36251068}, "apples": 4});
+db.apples.insert({"geo": {"lat": 33.23638916, "long": 69.81360626}, "apples": 9});
+db.apples.ensureIndex({"geo": "2d"});
+
+center = [32.68, 69.41];
+radius = 10 / 111; // 10km; 1 arcdegree ~= 111km
+geo_query = {
+ geo: {'$within': {'$center': [center, radius]}}
+};
// geo query on collection works fine
-res = db.apples.find( geo_query );
-assert.eq( 2, res.count() );
+res = db.apples.find(geo_query);
+assert.eq(2, res.count());
// map function
m = function() {
- emit( null, { "apples" : this.apples } );
+ emit(null, {"apples": this.apples});
};
// reduce function
r = function(key, values) {
- var total = 0;
- for ( var i = 0; i < values.length; i++ ) {
- total += values[i].apples;
- }
- return { "apples" : total };
+ var total = 0;
+ for (var i = 0; i < values.length; i++) {
+ total += values[i].apples;
+ }
+ return {
+ "apples": total
+ };
};
// mapreduce without geo query works fine
-res = db.apples.mapReduce( m, r, { out : { inline : 1 } } );
+res = db.apples.mapReduce(m, r, {out: {inline: 1}});
-printjson( res );
+printjson(res);
total = res.results[0];
-assert.eq( 31, total.value.apples );
+assert.eq(31, total.value.apples);
// mapreduce with regular query works fine too
-res = db.apples.mapReduce( m, r, { out : { inline : 1 }, query : { apples : { '$lt' : 9 } } } );
+res = db.apples.mapReduce(m, r, {out: {inline: 1}, query: {apples: {'$lt': 9}}});
total = res.results[0];
-assert.eq( 11, total.value.apples );
+assert.eq(11, total.value.apples);
// mapreduce with geo query gives error on mongodb version 1.6.2
// uncaught exception: map reduce failed: {
@@ -51,6 +55,6 @@ assert.eq( 11, total.value.apples );
// "assertionCode" : 13285,
// "errmsg" : "db assertion failure",
// "ok" : 0 }
-res = db.apples.mapReduce( m, r, { out : { inline : 1 }, query : geo_query } );
+res = db.apples.mapReduce(m, r, {out: {inline: 1}, query: geo_query});
total = res.results[0];
-assert.eq( 9, total.value.apples );
+assert.eq(9, total.value.apples);
diff --git a/jstests/core/geo_mapreduce2.js b/jstests/core/geo_mapreduce2.js
index 3911d02cf81..d71eb8ef216 100644
--- a/jstests/core/geo_mapreduce2.js
+++ b/jstests/core/geo_mapreduce2.js
@@ -3,34 +3,39 @@
var coll = db.geoMR2;
coll.drop();
-for( var i = 0; i < 300; i++ )
- coll.insert({ i : i, location : [ 10, 20 ] });
-
-coll.ensureIndex({ location : "2d" });
+for (var i = 0; i < 300; i++)
+ coll.insert({i: i, location: [10, 20]});
+
+coll.ensureIndex({location: "2d"});
// map function
m = function() {
- emit( null, { count : this.i } );
+ emit(null, {count: this.i});
};
// reduce function
-r = function( key, values ) {
-
+r = function(key, values) {
+
var total = 0;
- for ( var i = 0; i < values.length; i++ ) {
+ for (var i = 0; i < values.length; i++) {
total += values[i].count;
}
-
- return { count : total };
+
+ return {
+ count: total
+ };
};
-try{ coll.mapReduce( m, r,
- { out : coll.getName() + "_mr",
- sort : { _id : 1 },
- query : { 'location' : { $within : { $centerSphere : [[ 10, 20 ], 0.01 ] } } } });
+try {
+ coll.mapReduce(m,
+ r,
+ {
+ out: coll.getName() + "_mr",
+ sort: {_id: 1},
+ query: {'location': {$within: {$centerSphere: [[10, 20], 0.01]}}}
+ });
-}
-catch( e ){
+} catch (e) {
// This should occur, since we can't in-mem sort for mreduce
- printjson( e );
+ printjson(e);
}
diff --git a/jstests/core/geo_max.js b/jstests/core/geo_max.js
index 1dcbf39c907..03771ea34d4 100644
--- a/jstests/core/geo_max.js
+++ b/jstests/core/geo_max.js
@@ -5,47 +5,58 @@ load("jstests/libs/geo_near_random.js");
var test = new GeoNearRandomTest("geo_near_max");
-test.insertPts(/*numPts*/1000, /*indexBounds*/{min:-180, max:180}, /*scale*/0.9);
+test.insertPts(/*numPts*/ 1000, /*indexBounds*/ {min: -180, max: 180}, /*scale*/ 0.9);
-test.t.insert({loc: [ 180, 0]});
+test.t.insert({loc: [180, 0]});
test.t.insert({loc: [-180, 0]});
-test.t.insert({loc: [ 179.999, 0]});
+test.t.insert({loc: [179.999, 0]});
test.t.insert({loc: [-179.999, 0]});
-assertXIsNegative = function(obj) { assert.lt(obj.loc[0], 0); };
-assertXIsPositive = function(obj) { assert.gt(obj.loc[0], 0); };
+assertXIsNegative = function(obj) {
+ assert.lt(obj.loc[0], 0);
+};
+assertXIsPositive = function(obj) {
+ assert.gt(obj.loc[0], 0);
+};
-assert.eq(test.t.count({loc:{$within: {$center:[[ 180, 0], 1]}}}), 2);
-assert.eq(test.t.count({loc:{$within: {$center:[[-180, 0], 1]}}}), 2);
-test.t.find({loc:{$within: {$center:[[ 180, 0], 1]}}}).forEach(assertXIsPositive);
-test.t.find({loc:{$within: {$center:[[-180, 0], 1]}}}).forEach(assertXIsNegative);
+assert.eq(test.t.count({loc: {$within: {$center: [[180, 0], 1]}}}), 2);
+assert.eq(test.t.count({loc: {$within: {$center: [[-180, 0], 1]}}}), 2);
+test.t.find({loc: {$within: {$center: [[180, 0], 1]}}}).forEach(assertXIsPositive);
+test.t.find({loc: {$within: {$center: [[-180, 0], 1]}}}).forEach(assertXIsNegative);
-var oneDegree = Math.PI / 180; // in radians
+var oneDegree = Math.PI / 180; // in radians
// errors out due to SERVER-1760
if (0) {
-assert.eq(test.t.count({loc:{$within: {$centerSphere:[[ 180, 0], oneDegree]}}}), 2);
-assert.eq(test.t.count({loc:{$within: {$centerSphere:[[-180, 0], oneDegree]}}}), 2);
-test.t.find({loc:{$within: {$centerSphere:[[ 180, 0], oneDegree]}}}).forEach(assertXIsPositive);
-test.t.find({loc:{$within: {$centerSphere:[[-180, 0], oneDegree]}}}).forEach(assertXIsNegative);
+ assert.eq(test.t.count({loc: {$within: {$centerSphere: [[180, 0], oneDegree]}}}), 2);
+ assert.eq(test.t.count({loc: {$within: {$centerSphere: [[-180, 0], oneDegree]}}}), 2);
+ test.t.find({loc: {$within: {$centerSphere: [[180, 0], oneDegree]}}})
+ .forEach(assertXIsPositive);
+ test.t.find({loc: {$within: {$centerSphere: [[-180, 0], oneDegree]}}})
+ .forEach(assertXIsNegative);
}
-assert.eq(test.t.count({loc:{$within: {$box:[[ 180, 0.1], [ 179, -0.1]]}}}), 2);
-assert.eq(test.t.count({loc:{$within: {$box:[[-180, 0.1], [-179, -0.1]]}}}), 2);
-test.t.find({loc:{$within: {$box:[[ 180, 0.1], [ 179, -0.1]]}}}).forEach(assertXIsPositive);
-test.t.find({loc:{$within: {$box:[[-180, 0.1], [-179, -0.1]]}}}).forEach(assertXIsNegative);
+assert.eq(test.t.count({loc: {$within: {$box: [[180, 0.1], [179, -0.1]]}}}), 2);
+assert.eq(test.t.count({loc: {$within: {$box: [[-180, 0.1], [-179, -0.1]]}}}), 2);
+test.t.find({loc: {$within: {$box: [[180, 0.1], [179, -0.1]]}}}).forEach(assertXIsPositive);
+test.t.find({loc: {$within: {$box: [[-180, 0.1], [-179, -0.1]]}}}).forEach(assertXIsNegative);
-assert.eq(test.t.count({loc:{$within: {$polygon:[[ 180, 0], [ 179, 0], [ 179.5, 0.5]]}}}), 2);
-assert.eq(test.t.count({loc:{$within: {$polygon:[[-180, 0], [-179, 0], [ 179.5, 0.5]]}}}), 2);
-test.t.find({loc:{$within: {$polygon:[[ 180, 0], [ 179, 0], [ 179.5, 0.5]]}}}).forEach(assertXIsPositive);
-test.t.find({loc:{$within: {$polygon:[[-180, 0], [-179, 0], [ 179.5, 0.5]]}}}).forEach(assertXIsNegative);
+assert.eq(test.t.count({loc: {$within: {$polygon: [[180, 0], [179, 0], [179.5, 0.5]]}}}), 2);
+assert.eq(test.t.count({loc: {$within: {$polygon: [[-180, 0], [-179, 0], [179.5, 0.5]]}}}), 2);
+test.t.find({loc: {$within: {$polygon: [[180, 0], [179, 0], [179.5, 0.5]]}}})
+ .forEach(assertXIsPositive);
+test.t.find({loc: {$within: {$polygon: [[-180, 0], [-179, 0], [179.5, 0.5]]}}})
+ .forEach(assertXIsNegative);
-assert.eq(test.t.find({loc:{$near:[ 180, 0]}}, {_id:0}).limit(2).toArray(), [{loc: [ 180, 0]}, {loc: [ 179.999, 0]}]);
-assert.eq(test.t.find({loc:{$near:[-180, 0]}}, {_id:0}).limit(2).toArray(), [{loc: [-180, 0]}, {loc: [-179.999, 0]}]);
+assert.eq(test.t.find({loc: {$near: [180, 0]}}, {_id: 0}).limit(2).toArray(),
+ [{loc: [180, 0]}, {loc: [179.999, 0]}]);
+assert.eq(test.t.find({loc: {$near: [-180, 0]}}, {_id: 0}).limit(2).toArray(),
+ [{loc: [-180, 0]}, {loc: [-179.999, 0]}]);
// These will need to change when SERVER-1760 is fixed
-printjson(test.t.find({loc:{$nearSphere:[ 180, 0]}}, {_id:0}).limit(2).explain());
-assert.eq(test.t.find({loc:{$nearSphere:[ 180, 0]}}, {_id:0}).limit(2).toArray(), [{loc: [ 180, 0]}, {loc: [ 179.999, 0]}]);
-printjson(test.t.find({loc:{$nearSphere:[-180, 0]}}, {_id:0}).limit(2).explain());
-assert.eq(test.t.find({loc:{$nearSphere:[-180, 0]}}, {_id:0}).limit(2).toArray(), [{loc: [-180, 0]}, {loc: [-179.999, 0]}]);
-
+printjson(test.t.find({loc: {$nearSphere: [180, 0]}}, {_id: 0}).limit(2).explain());
+assert.eq(test.t.find({loc: {$nearSphere: [180, 0]}}, {_id: 0}).limit(2).toArray(),
+ [{loc: [180, 0]}, {loc: [179.999, 0]}]);
+printjson(test.t.find({loc: {$nearSphere: [-180, 0]}}, {_id: 0}).limit(2).explain());
+assert.eq(test.t.find({loc: {$nearSphere: [-180, 0]}}, {_id: 0}).limit(2).toArray(),
+ [{loc: [-180, 0]}, {loc: [-179.999, 0]}]);
diff --git a/jstests/core/geo_mindistance.js b/jstests/core/geo_mindistance.js
index b429eacb708..6a2329bc524 100644
--- a/jstests/core/geo_mindistance.js
+++ b/jstests/core/geo_mindistance.js
@@ -8,10 +8,11 @@ t.drop();
// Useful constants and functions.
//
-var km = 1000,
- earthRadiusMeters = 6378.1 * km;
+var km = 1000, earthRadiusMeters = 6378.1 * km;
-function metersToRadians(m) { return m / earthRadiusMeters; }
+function metersToRadians(m) {
+ return m / earthRadiusMeters;
+}
/* Count documents within some radius of (0, 0), in kilometers.
* With this function we can use the existing $maxDistance option to test
@@ -46,8 +47,11 @@ for (var x = 0; x <= 10; x += 1) {
/* $minDistance is supported for 2dsphere index only, not 2d or geoHaystack. */
t.ensureIndex({loc: "2dsphere"});
-var n_docs = t.count(),
- geoJSONPoint = {type: 'Point', coordinates: [0, 0]},
+var n_docs = t.count(), geoJSONPoint =
+ {
+ type: 'Point',
+ coordinates: [0, 0]
+ },
legacyPoint = [0, 0];
//
@@ -55,31 +59,23 @@ var n_docs = t.count(),
// min/maxDistance are in meters.
//
-var n_min1400_count = t.find({loc: {
- $near: {$geometry: geoJSONPoint, $minDistance: 1400 * km
-}}}).count();
-
-assert.eq(
- n_docs - n_docs_within(1400),
- n_min1400_count,
- "Expected " + (n_docs - n_docs_within(1400))
- + " points $near (0, 0) with $minDistance 1400 km, got "
- + n_min1400_count
-);
-
-var n_bw500_and_1000_count = t.find({loc: {
- $near: {$geometry: geoJSONPoint,
- $minDistance: 500 * km,
- $maxDistance: 1000 * km
-}}}).count();
-
-assert.eq(
- n_docs_within(1000) - n_docs_within(500),
- n_bw500_and_1000_count,
- "Expected " + (n_docs_within(1000) - n_docs_within(500))
- + " points $near (0, 0) with $minDistance 500 km and $maxDistance 1000 km, got "
- + n_bw500_and_1000_count
-);
+var n_min1400_count =
+ t.find({loc: {$near: {$geometry: geoJSONPoint, $minDistance: 1400 * km}}}).count();
+
+assert.eq(n_docs - n_docs_within(1400),
+ n_min1400_count,
+ "Expected " + (n_docs - n_docs_within(1400)) +
+ " points $near (0, 0) with $minDistance 1400 km, got " + n_min1400_count);
+
+var n_bw500_and_1000_count = t.find({
+ loc: {$near: {$geometry: geoJSONPoint, $minDistance: 500 * km, $maxDistance: 1000 * km}}
+}).count();
+
+assert.eq(n_docs_within(1000) - n_docs_within(500),
+ n_bw500_and_1000_count,
+ "Expected " + (n_docs_within(1000) - n_docs_within(500)) +
+ " points $near (0, 0) with $minDistance 500 km and $maxDistance 1000 km, got " +
+ n_bw500_and_1000_count);
//
// $nearSphere with 2dsphere index can take a legacy or GeoJSON point.
@@ -87,63 +83,49 @@ assert.eq(
// min/maxDistance are in radians.
//
-n_min1400_count = t.find({loc: {
- $nearSphere: legacyPoint, $minDistance: metersToRadians(1400 * km)
-}}).count();
-
-assert.eq(
- n_docs - n_docs_within(1400),
- n_min1400_count,
- "Expected " + (n_docs - n_docs_within(1400))
- + " points $nearSphere (0, 0) with $minDistance 1400 km, got "
- + n_min1400_count
-);
-
-n_bw500_and_1000_count = t.find({loc: {
- $nearSphere: legacyPoint,
- $minDistance: metersToRadians(500 * km),
- $maxDistance: metersToRadians(1000 * km)
-}}).count();
-
-assert.eq(
- n_docs_within(1000) - n_docs_within(500),
- n_bw500_and_1000_count,
- "Expected " + (n_docs_within(1000) - n_docs_within(500))
- + " points $nearSphere (0, 0) with $minDistance 500 km and $maxDistance 1000 km, got "
- + n_bw500_and_1000_count
-);
+n_min1400_count =
+ t.find({loc: {$nearSphere: legacyPoint, $minDistance: metersToRadians(1400 * km)}}).count();
+
+assert.eq(n_docs - n_docs_within(1400),
+ n_min1400_count,
+ "Expected " + (n_docs - n_docs_within(1400)) +
+ " points $nearSphere (0, 0) with $minDistance 1400 km, got " + n_min1400_count);
+
+n_bw500_and_1000_count = t.find({
+ loc: {
+ $nearSphere: legacyPoint,
+ $minDistance: metersToRadians(500 * km),
+ $maxDistance: metersToRadians(1000 * km)
+ }
+}).count();
+
+assert.eq(n_docs_within(1000) - n_docs_within(500),
+ n_bw500_and_1000_count,
+ "Expected " + (n_docs_within(1000) - n_docs_within(500)) +
+ " points $nearSphere (0, 0) with $minDistance 500 km and $maxDistance 1000 km, got " +
+ n_bw500_and_1000_count);
//
// Test $nearSphere with GeoJSON point.
// min/maxDistance are in meters.
//
-n_min1400_count = t.find({loc: {
- $nearSphere: geoJSONPoint, $minDistance: 1400 * km
-}}).count();
-
-assert.eq(
- n_docs - n_docs_within(1400),
- n_min1400_count,
- "Expected " + (n_docs - n_docs_within(1400))
- + " points $nearSphere (0, 0) with $minDistance 1400 km, got "
- + n_min1400_count
-);
-
-n_bw500_and_1000_count = t.find({loc: {
- $nearSphere: geoJSONPoint,
- $minDistance: 500 * km,
- $maxDistance: 1000 * km
-}}).count();
-
-assert.eq(
- n_docs_within(1000) - n_docs_within(500),
- n_bw500_and_1000_count,
- "Expected " + (n_docs_within(1000) - n_docs_within(500))
- + " points $nearSphere (0, 0) with $minDistance 500 km and $maxDistance 1000 km, got "
- + n_bw500_and_1000_count
-);
+n_min1400_count = t.find({loc: {$nearSphere: geoJSONPoint, $minDistance: 1400 * km}}).count();
+
+assert.eq(n_docs - n_docs_within(1400),
+ n_min1400_count,
+ "Expected " + (n_docs - n_docs_within(1400)) +
+ " points $nearSphere (0, 0) with $minDistance 1400 km, got " + n_min1400_count);
+
+n_bw500_and_1000_count =
+ t.find({loc: {$nearSphere: geoJSONPoint, $minDistance: 500 * km, $maxDistance: 1000 * km}})
+ .count();
+assert.eq(n_docs_within(1000) - n_docs_within(500),
+ n_bw500_and_1000_count,
+ "Expected " + (n_docs_within(1000) - n_docs_within(500)) +
+ " points $nearSphere (0, 0) with $minDistance 500 km and $maxDistance 1000 km, got " +
+ n_bw500_and_1000_count);
//
// Test geoNear command with GeoJSON point.
@@ -156,13 +138,10 @@ var cmdResult = db.runCommand({
minDistance: 1400 * km,
spherical: true // spherical required for 2dsphere index
});
-assert.eq(
- n_docs - n_docs_within(1400),
- cmdResult.results.length,
- "Expected " + (n_docs - n_docs_within(1400))
- + " points geoNear (0, 0) with $minDistance 1400 km, got "
- + cmdResult.results.length
-);
+assert.eq(n_docs - n_docs_within(1400),
+ cmdResult.results.length,
+ "Expected " + (n_docs - n_docs_within(1400)) +
+ " points geoNear (0, 0) with $minDistance 1400 km, got " + cmdResult.results.length);
cmdResult = db.runCommand({
geoNear: t.getName(),
@@ -171,13 +150,11 @@ cmdResult = db.runCommand({
maxDistance: 1000 * km,
spherical: true
});
-assert.eq(
- n_docs_within(1000) - n_docs_within(500),
- cmdResult.results.length,
- "Expected " + (n_docs_within(1000) - n_docs_within(500))
- + " points geoNear (0, 0) with $minDistance 500 km and $maxDistance 1000 km, got "
- + cmdResult.results.length
-);
+assert.eq(n_docs_within(1000) - n_docs_within(500),
+ cmdResult.results.length,
+ "Expected " + (n_docs_within(1000) - n_docs_within(500)) +
+ " points geoNear (0, 0) with $minDistance 500 km and $maxDistance 1000 km, got " +
+ cmdResult.results.length);
//
// Test geoNear command with legacy point.
@@ -190,13 +167,10 @@ cmdResult = db.runCommand({
minDistance: metersToRadians(1400 * km),
spherical: true // spherical required for 2dsphere index
});
-assert.eq(
- n_docs - n_docs_within(1400),
- cmdResult.results.length,
- "Expected " + (n_docs - n_docs_within(1400))
- + " points geoNear (0, 0) with $minDistance 1400 km, got "
- + cmdResult.results.length
-);
+assert.eq(n_docs - n_docs_within(1400),
+ cmdResult.results.length,
+ "Expected " + (n_docs - n_docs_within(1400)) +
+ " points geoNear (0, 0) with $minDistance 1400 km, got " + cmdResult.results.length);
cmdResult = db.runCommand({
geoNear: t.getName(),
@@ -205,10 +179,8 @@ cmdResult = db.runCommand({
maxDistance: metersToRadians(1000 * km),
spherical: true
});
-assert.eq(
- n_docs_within(1000) - n_docs_within(500),
- cmdResult.results.length,
- "Expected " + (n_docs_within(1000) - n_docs_within(500))
- + " points geoNear (0, 0) with $minDistance 500 km and $maxDistance 1000 km, got "
- + cmdResult.results.length
-);
+assert.eq(n_docs_within(1000) - n_docs_within(500),
+ cmdResult.results.length,
+ "Expected " + (n_docs_within(1000) - n_docs_within(500)) +
+ " points geoNear (0, 0) with $minDistance 500 km and $maxDistance 1000 km, got " +
+ cmdResult.results.length);
diff --git a/jstests/core/geo_mindistance_boundaries.js b/jstests/core/geo_mindistance_boundaries.js
index 80e933827b6..6cbae8015e9 100644
--- a/jstests/core/geo_mindistance_boundaries.js
+++ b/jstests/core/geo_mindistance_boundaries.js
@@ -12,79 +12,65 @@ t.ensureIndex({loc: "2dsphere"});
// Useful constants.
//
-var km = 1000,
- earthRadiusMeters = 6378.1 * km,
- geoJSONPoint = {type: 'Point', coordinates: [0, 0]},
+var km = 1000, earthRadiusMeters = 6378.1 * km, geoJSONPoint =
+ {
+ type: 'Point',
+ coordinates: [0, 0]
+ },
// One degree of longitude at the equator, about 111 km.
- degreeInMeters = 2 * Math.PI * earthRadiusMeters / 360,
- metersEpsilon = Number.MIN_VALUE;
+ degreeInMeters = 2 * Math.PI * earthRadiusMeters / 360, metersEpsilon = Number.MIN_VALUE;
/* Grow epsilon's exponent until epsilon exceeds the margin of error for the
* representation of degreeInMeters. The server uses 64-bit math, too, so we'll
* find the smallest epsilon the server can detect.
*/
-while (degreeInMeters + metersEpsilon == degreeInMeters) { metersEpsilon *= 2; }
+while (degreeInMeters + metersEpsilon == degreeInMeters) {
+ metersEpsilon *= 2;
+}
//
// Test boundary conditions for $near and GeoJSON, in meters.
//
-
// minDistance must be within the args to $near, not on the side.
-assert.throws(function() { t.find({loc:{$near:{$geometry: geoJSONPoint},
- $minDistance:0.1}}).itcount();});
+assert.throws(function() {
+ t.find({loc: {$near: {$geometry: geoJSONPoint}, $minDistance: 0.1}}).itcount();
+});
-assert.eq(
- 1, t.find({loc: {
- $near: {$geometry: geoJSONPoint,
- $minDistance: degreeInMeters
- }}}).itcount(),
- "Expected to find (0, 1) within $minDistance 1 degree from origin"
-);
+assert.eq(1,
+ t.find({loc: {$near: {$geometry: geoJSONPoint, $minDistance: degreeInMeters}}}).itcount(),
+ "Expected to find (0, 1) within $minDistance 1 degree from origin");
assert.eq(
- 1, t.find({loc: {
- $near: {$geometry: geoJSONPoint,
- $minDistance: degreeInMeters - metersEpsilon
- }}}).itcount(),
- "Expected to find (0, 1) within $minDistance (1 degree - epsilon) from origin"
-);
+ 1,
+ t.find({loc: {$near: {$geometry: geoJSONPoint, $minDistance: degreeInMeters - metersEpsilon}}})
+ .itcount(),
+ "Expected to find (0, 1) within $minDistance (1 degree - epsilon) from origin");
assert.eq(
- 0, t.find({loc: {
- $near: {$geometry: geoJSONPoint,
- $minDistance: degreeInMeters + metersEpsilon
- }}}).itcount(),
- "Expected *not* to find (0, 1) within $minDistance (1 degree + epsilon) from origin"
-);
+ 0,
+ t.find({loc: {$near: {$geometry: geoJSONPoint, $minDistance: degreeInMeters + metersEpsilon}}})
+ .itcount(),
+ "Expected *not* to find (0, 1) within $minDistance (1 degree + epsilon) from origin");
//
// Test boundary conditions for $nearSphere and GeoJSON, in meters.
//
-assert.eq(
- 1, t.find({loc: {
- $nearSphere: {$geometry: geoJSONPoint,
- $minDistance: degreeInMeters
- }}}).itcount(),
- "Expected to find (0, 1) within $minDistance 1 degree from origin"
-);
+assert.eq(1,
+ t.find({loc: {$nearSphere: {$geometry: geoJSONPoint, $minDistance: degreeInMeters}}})
+ .itcount(),
+ "Expected to find (0, 1) within $minDistance 1 degree from origin");
-assert.eq(
- 1, t.find({loc: {
- $nearSphere: geoJSONPoint,
- $minDistance: degreeInMeters - metersEpsilon
- }}).itcount(),
- "Expected to find (0, 1) within $minDistance (1 degree - epsilon) from origin"
-);
+assert.eq(1,
+ t.find({loc: {$nearSphere: geoJSONPoint, $minDistance: degreeInMeters - metersEpsilon}})
+ .itcount(),
+ "Expected to find (0, 1) within $minDistance (1 degree - epsilon) from origin");
-assert.eq(
- 0, t.find({loc: {
- $nearSphere: geoJSONPoint,
- $minDistance: degreeInMeters + metersEpsilon
- }}).itcount(),
- "Expected *not* to find (0, 1) within $minDistance (1 degree + epsilon) from origin"
-);
+assert.eq(0,
+ t.find({loc: {$nearSphere: geoJSONPoint, $minDistance: degreeInMeters + metersEpsilon}})
+ .itcount(),
+ "Expected *not* to find (0, 1) within $minDistance (1 degree + epsilon) from origin");
//
// Test boundary conditions for $nearSphere and a legacy point, in radians.
@@ -93,32 +79,22 @@ assert.eq(
// supported.
//
-var legacyPoint = [0, 0],
- degreeInRadians = 2 * Math.PI / 360,
- radiansEpsilon = Number.MIN_VALUE;
+var legacyPoint = [0, 0], degreeInRadians = 2 * Math.PI / 360, radiansEpsilon = Number.MIN_VALUE;
-while (1 + radiansEpsilon == 1) { radiansEpsilon *= 2; }
+while (1 + radiansEpsilon == 1) {
+ radiansEpsilon *= 2;
+}
-assert.eq(
- 1, t.find({loc: {
- $nearSphere: legacyPoint,
- $minDistance: degreeInRadians
- }}).itcount(),
- "Expected to find (0, 1) within $minDistance 1 degree from origin"
-);
+assert.eq(1,
+ t.find({loc: {$nearSphere: legacyPoint, $minDistance: degreeInRadians}}).itcount(),
+ "Expected to find (0, 1) within $minDistance 1 degree from origin");
-assert.eq(
- 1, t.find({loc: {
- $nearSphere: legacyPoint,
- $minDistance: degreeInRadians - radiansEpsilon
- }}).itcount(),
- "Expected to find (0, 1) within $minDistance (1 degree - epsilon) from origin"
-);
+assert.eq(1,
+ t.find({loc: {$nearSphere: legacyPoint, $minDistance: degreeInRadians - radiansEpsilon}})
+ .itcount(),
+ "Expected to find (0, 1) within $minDistance (1 degree - epsilon) from origin");
-assert.eq(
- 0, t.find({loc: {
- $nearSphere: legacyPoint,
- $minDistance: degreeInRadians + radiansEpsilon
- }}).itcount(),
- "Expected *not* to find (0, 1) within $minDistance (1 degree + epsilon) from origin"
-);
+assert.eq(0,
+ t.find({loc: {$nearSphere: legacyPoint, $minDistance: degreeInRadians + radiansEpsilon}})
+ .itcount(),
+ "Expected *not* to find (0, 1) within $minDistance (1 degree + epsilon) from origin");
diff --git a/jstests/core/geo_multikey0.js b/jstests/core/geo_multikey0.js
index 7d0ea57e329..827dd9a41a1 100644
--- a/jstests/core/geo_multikey0.js
+++ b/jstests/core/geo_multikey0.js
@@ -4,23 +4,28 @@ t = db.jstests_geo_multikey0;
t.drop();
// Check that conflicting constraints are satisfied by parallel array elements.
-t.save( {loc:[{x:20,y:30},{x:30,y:40}]} );
-assert.eq( 1, t.count( {loc:{x:20,y:30},$and:[{loc:{$gt:{x:20,y:35},$lt:{x:20,y:34}}}]} ) );
+t.save({loc: [{x: 20, y: 30}, {x: 30, y: 40}]});
+assert.eq(
+ 1, t.count({loc: {x: 20, y: 30}, $and: [{loc: {$gt: {x: 20, y: 35}, $lt: {x: 20, y: 34}}}]}));
-// Check that conflicting constraints are satisfied by parallel array elements with a 2d index on loc.
-if ( 0 ) { // SERVER-3793
-t.ensureIndex( {loc:'2d'} );
-assert.eq( 1, t.count( {loc:{x:20,y:30},$and:[{loc:{$gt:{x:20,y:35},$lt:{x:20,y:34}}}]} ) );
+// Check that conflicting constraints are satisfied by parallel array elements with a 2d index on
+// loc.
+if (0) { // SERVER-3793
+ t.ensureIndex({loc: '2d'});
+ assert.eq(
+ 1,
+ t.count({loc: {x: 20, y: 30}, $and: [{loc: {$gt: {x: 20, y: 35}, $lt: {x: 20, y: 34}}}]}));
}
t.drop();
// Check that conflicting constraints are satisfied by parallel array elements of x.
-t.save( {loc:[20,30],x:[1,2]} );
-assert.eq( 1, t.count( {loc:[20,30],x:{$gt:1.7,$lt:1.2}} ) );
+t.save({loc: [20, 30], x: [1, 2]});
+assert.eq(1, t.count({loc: [20, 30], x: {$gt: 1.7, $lt: 1.2}}));
-// Check that conflicting constraints are satisfied by parallel array elements of x with a 2d index on loc,x.
-if ( 0 ) { // SERVER-3793
-t.ensureIndex( {loc:'2d',x:1} );
-assert.eq( 1, t.count( {loc:[20,30],x:{$gt:1.7,$lt:1.2}} ) );
+// Check that conflicting constraints are satisfied by parallel array elements of x with a 2d index
+// on loc,x.
+if (0) { // SERVER-3793
+ t.ensureIndex({loc: '2d', x: 1});
+ assert.eq(1, t.count({loc: [20, 30], x: {$gt: 1.7, $lt: 1.2}}));
}
diff --git a/jstests/core/geo_multikey1.js b/jstests/core/geo_multikey1.js
index 7bf5cfaafe1..9c092f4ec31 100644
--- a/jstests/core/geo_multikey1.js
+++ b/jstests/core/geo_multikey1.js
@@ -5,15 +5,14 @@ t.drop();
locArr = [];
arr = [];
-for( i = 0; i < 10; ++i ) {
- locArr.push( [i,i+1] );
- arr.push( i );
+for (i = 0; i < 10; ++i) {
+ locArr.push([i, i + 1]);
+ arr.push(i);
}
-t.save( {loc:locArr,a:arr,b:arr,c:arr} );
+t.save({loc: locArr, a: arr, b: arr, c: arr});
// Parallel arrays are allowed for geo indexes.
-assert.commandWorked(t.ensureIndex( {loc:'2d',a:1,b:1,c:1} ));
+assert.commandWorked(t.ensureIndex({loc: '2d', a: 1, b: 1, c: 1}));
// Parallel arrays are not allowed for normal indexes.
-assert.commandFailed(t.ensureIndex( {loc:1,a:1,b:1,c:1} ));
-
+assert.commandFailed(t.ensureIndex({loc: 1, a: 1, b: 1, c: 1}));
diff --git a/jstests/core/geo_multinest0.js b/jstests/core/geo_multinest0.js
index c3f6fa5c29e..30b66b4adbb 100644
--- a/jstests/core/geo_multinest0.js
+++ b/jstests/core/geo_multinest0.js
@@ -3,58 +3,48 @@
t = db.geonest;
t.drop();
-t.insert( { zip : "10001", data : [ { loc : [ 10, 10 ], type : "home" },
- { loc : [ 50, 50 ], type : "work" } ] } );
-t.insert( { zip : "10002", data : [ { loc : [ 20, 20 ], type : "home" },
- { loc : [ 50, 50 ], type : "work" } ] } );
-var res = t.insert( { zip : "10003", data : [ { loc : [ 30, 30 ], type : "home" },
- { loc : [ 50, 50 ], type : "work" } ] } );
-assert.writeOK( res );
+t.insert({zip: "10001", data: [{loc: [10, 10], type: "home"}, {loc: [50, 50], type: "work"}]});
+t.insert({zip: "10002", data: [{loc: [20, 20], type: "home"}, {loc: [50, 50], type: "work"}]});
+var res =
+ t.insert({zip: "10003", data: [{loc: [30, 30], type: "home"}, {loc: [50, 50], type: "work"}]});
+assert.writeOK(res);
-assert.commandWorked(t.ensureIndex( { "data.loc" : "2d", zip : 1 } ));
-assert.eq( 2, t.getIndexKeys().length );
+assert.commandWorked(t.ensureIndex({"data.loc": "2d", zip: 1}));
+assert.eq(2, t.getIndexKeys().length);
-res = t.insert( { zip : "10004", data : [ { loc : [ 40, 40 ], type : "home" },
- { loc : [ 50, 50 ], type : "work" } ] } );
-assert.writeOK( res );
+res =
+ t.insert({zip: "10004", data: [{loc: [40, 40], type: "home"}, {loc: [50, 50], type: "work"}]});
+assert.writeOK(res);
// test normal access
-printjson( t.find( { "data.loc" : { $within : { $box : [ [ 0, 0 ], [ 45, 45 ] ] } } } ).toArray() );
-
-assert.eq( 4, t.find( { "data.loc" : { $within : { $box : [ [ 0, 0 ], [ 45, 45 ] ] } } } ).count() );
-
-assert.eq( 4, t.find( { "data.loc" : { $within : { $box : [ [ 45, 45 ], [ 50, 50 ] ] } } } ).count() );
-
-
+printjson(t.find({"data.loc": {$within: {$box: [[0, 0], [45, 45]]}}}).toArray());
+assert.eq(4, t.find({"data.loc": {$within: {$box: [[0, 0], [45, 45]]}}}).count());
+assert.eq(4, t.find({"data.loc": {$within: {$box: [[45, 45], [50, 50]]}}}).count());
// Try a complex nesting
t = db.geonest;
t.drop();
-t.insert( { zip : "10001", data : [ { loc : [ [ 10, 10 ], { lat : 50, long : 50 } ], type : "home" } ] } );
-t.insert( { zip : "10002", data : [ { loc : [ 20, 20 ], type : "home" },
- { loc : [ 50, 50 ], type : "work" } ] } );
-res = t.insert({ zip: "10003", data: [{ loc: [{ x: 30, y: 30 }, [ 50, 50 ]], type: "home" }]});
-assert( !res.hasWriteError() );
+t.insert({zip: "10001", data: [{loc: [[10, 10], {lat: 50, long: 50}], type: "home"}]});
+t.insert({zip: "10002", data: [{loc: [20, 20], type: "home"}, {loc: [50, 50], type: "work"}]});
+res = t.insert({zip: "10003", data: [{loc: [{x: 30, y: 30}, [50, 50]], type: "home"}]});
+assert(!res.hasWriteError());
-assert.commandWorked(t.ensureIndex( { "data.loc" : "2d", zip : 1 } ));
-assert.eq( 2, t.getIndexKeys().length );
+assert.commandWorked(t.ensureIndex({"data.loc": "2d", zip: 1}));
+assert.eq(2, t.getIndexKeys().length);
-res = t.insert( { zip : "10004", data : [ { loc : [ 40, 40 ], type : "home" },
- { loc : [ 50, 50 ], type : "work" } ] } );
+res =
+ t.insert({zip: "10004", data: [{loc: [40, 40], type: "home"}, {loc: [50, 50], type: "work"}]});
-assert.writeOK( res );
+assert.writeOK(res);
// test normal access
-printjson( t.find( { "data.loc" : { $within : { $box : [ [ 0, 0 ], [ 45, 45 ] ] } } } ).toArray() );
-
-assert.eq( 4, t.find( { "data.loc" : { $within : { $box : [ [ 0, 0 ], [ 45, 45 ] ] } } } ).count() );
-
-assert.eq( 4, t.find( { "data.loc" : { $within : { $box : [ [ 45, 45 ], [ 50, 50 ] ] } } } ).count() );
-
+printjson(t.find({"data.loc": {$within: {$box: [[0, 0], [45, 45]]}}}).toArray());
+assert.eq(4, t.find({"data.loc": {$within: {$box: [[0, 0], [45, 45]]}}}).count());
+assert.eq(4, t.find({"data.loc": {$within: {$box: [[45, 45], [50, 50]]}}}).count());
diff --git a/jstests/core/geo_multinest1.js b/jstests/core/geo_multinest1.js
index f93138e1fd0..78e4c38e49b 100644
--- a/jstests/core/geo_multinest1.js
+++ b/jstests/core/geo_multinest1.js
@@ -3,34 +3,29 @@
t = db.multinest;
t.drop();
-t.insert( { zip : "10001", data : [ { loc : [ 10, 10 ], type : "home" },
- { loc : [ 29, 29 ], type : "work" } ] } );
-t.insert( { zip : "10002", data : [ { loc : [ 20, 20 ], type : "home" },
- { loc : [ 39, 39 ], type : "work" } ] } );
-var res = t.insert( { zip : "10003", data : [ { loc : [ 30, 30 ], type : "home" },
- { loc : [ 49, 49 ], type : "work" } ] } );
-assert.writeOK( res );
+t.insert({zip: "10001", data: [{loc: [10, 10], type: "home"}, {loc: [29, 29], type: "work"}]});
+t.insert({zip: "10002", data: [{loc: [20, 20], type: "home"}, {loc: [39, 39], type: "work"}]});
+var res =
+ t.insert({zip: "10003", data: [{loc: [30, 30], type: "home"}, {loc: [49, 49], type: "work"}]});
+assert.writeOK(res);
-assert.commandWorked(t.ensureIndex( { "data.loc" : "2d", zip : 1 } ));
-assert.eq( 2, t.getIndexKeys().length );
+assert.commandWorked(t.ensureIndex({"data.loc": "2d", zip: 1}));
+assert.eq(2, t.getIndexKeys().length);
-res = t.insert( { zip : "10004", data : [ { loc : [ 40, 40 ], type : "home" },
- { loc : [ 59, 59 ], type : "work" } ] } );
-assert.writeOK( res );
+res =
+ t.insert({zip: "10004", data: [{loc: [40, 40], type: "home"}, {loc: [59, 59], type: "work"}]});
+assert.writeOK(res);
// test normal access
-var result = t.find({ "data.loc" : { $near : [0, 0] } }).toArray();
+var result = t.find({"data.loc": {$near: [0, 0]}}).toArray();
-printjson( result );
+printjson(result);
-assert.eq( 4, result.length );
+assert.eq(4, result.length);
-var order = [ 1, 2, 3, 4 ];
+var order = [1, 2, 3, 4];
-for( var i = 0; i < result.length; i++ ){
- assert.eq( "1000" + order[i], result[i].zip );
+for (var i = 0; i < result.length; i++) {
+ assert.eq("1000" + order[i], result[i].zip);
}
-
-
-
diff --git a/jstests/core/geo_near_random1.js b/jstests/core/geo_near_random1.js
index 5c75b458957..1e7f2bb587d 100644
--- a/jstests/core/geo_near_random1.js
+++ b/jstests/core/geo_near_random1.js
@@ -5,23 +5,25 @@ var test = new GeoNearRandomTest("geo_near_random1");
test.insertPts(50);
-// test.testPt() runs geoNear commands at the given coordinates with
-// limits from 1 to nPts(# of inserted points). At the nth run, it
-// compares the first (n - 1) results with the result of the (n - 1)th
+// test.testPt() runs geoNear commands at the given coordinates with
+// limits from 1 to nPts(# of inserted points). At the nth run, it
+// compares the first (n - 1) results with the result of the (n - 1)th
// run to make sure they are identical. It also makes sure that the
// distances are in increasing order. The test runs in O(N^2).
// Test $near with a 2dindex
-test.testPt([0,0]);
+test.testPt([0, 0]);
test.testPt(test.mkPt());
test.testPt(test.mkPt());
test.testPt(test.mkPt());
test.testPt(test.mkPt());
-opts = {sphere: 1};
+opts = {
+ sphere: 1
+};
-// Test $nearSphere with a 2d index
-test.testPt([0,0], opts);
+// Test $nearSphere with a 2d index
+test.testPt([0, 0], opts);
test.testPt(test.mkPt(), opts);
test.testPt(test.mkPt(), opts);
test.testPt(test.mkPt(), opts);
@@ -30,7 +32,7 @@ test.testPt(test.mkPt(), opts);
// Test $nearSphere with a 2dsphere index
assert.commandWorked(db.geo_near_random1.dropIndex({loc: '2d'}));
assert.commandWorked(db.geo_near_random1.ensureIndex({loc: '2dsphere'}));
-test.testPt([0,0], opts);
+test.testPt([0, 0], opts);
test.testPt(test.mkPt(), opts);
test.testPt(test.mkPt(), opts);
test.testPt(test.mkPt(), opts);
diff --git a/jstests/core/geo_near_random2.js b/jstests/core/geo_near_random2.js
index af48c9d072e..0cbf374446d 100644
--- a/jstests/core/geo_near_random2.js
+++ b/jstests/core/geo_near_random2.js
@@ -5,15 +5,18 @@ var test = new GeoNearRandomTest("geo_near_random2");
test.insertPts(5000);
-// test.testPt() runs geoNear commands at the given coordinates with
-// limits from 1 to nPts(# of inserted points). At the nth run, it
-// compares the first (n - 1) results with the result of the (n - 1)th
+// test.testPt() runs geoNear commands at the given coordinates with
+// limits from 1 to nPts(# of inserted points). At the nth run, it
+// compares the first (n - 1) results with the result of the (n - 1)th
// run to make sure they are identical. It also makes sure that the
// distances are in increasing order. The test runs in O(N^2).
// Test $near with 2d index
-opts = {sphere: 0, nToTest: test.nPts*0.01};
-test.testPt([0,0], opts);
+opts = {
+ sphere: 0,
+ nToTest: test.nPts * 0.01
+};
+test.testPt([0, 0], opts);
test.testPt(test.mkPt(), opts);
test.testPt(test.mkPt(), opts);
test.testPt(test.mkPt(), opts);
@@ -22,8 +25,8 @@ test.testPt(test.mkPt(), opts);
opts.sphere = 1;
// Test $nearSphere with 2d index
-test.testPt([0,0], opts);
-// test.mkPt(0.8) generates a random point in the maximum
+test.testPt([0, 0], opts);
+// test.mkPt(0.8) generates a random point in the maximum
// lat long bounds scaled by 0.8
test.testPt(test.mkPt(0.8), opts);
test.testPt(test.mkPt(0.8), opts);
@@ -33,7 +36,7 @@ test.testPt(test.mkPt(0.8), opts);
// Test $nearSphere with 2dsphere index
assert.commandWorked(db.geo_near_random2.dropIndex({loc: '2d'}));
assert.commandWorked(db.geo_near_random2.ensureIndex({loc: '2dsphere'}));
-test.testPt([0,0], opts);
+test.testPt([0, 0], opts);
test.testPt(test.mkPt(0.8), opts);
test.testPt(test.mkPt(0.8), opts);
test.testPt(test.mkPt(0.8), opts);
diff --git a/jstests/core/geo_nearwithin.js b/jstests/core/geo_nearwithin.js
index 2b0462ebe3d..69eaac51ffe 100644
--- a/jstests/core/geo_nearwithin.js
+++ b/jstests/core/geo_nearwithin.js
@@ -9,19 +9,31 @@ for (var x = -points; x < points; x += 1) {
}
}
-t.ensureIndex({ geo : "2d" });
+t.ensureIndex({geo: "2d"});
-resNear = db.runCommand({geoNear : t.getName(), near: [0, 0], query: {geo: {$within: {$center: [[0, 0], 1]}}}});
+resNear = db.runCommand(
+ {geoNear: t.getName(), near: [0, 0], query: {geo: {$within: {$center: [[0, 0], 1]}}}});
assert.eq(resNear.results.length, 5);
-resNear = db.runCommand({geoNear : t.getName(), near: [0, 0], query: {geo: {$within: {$center: [[0, 0], 0]}}}});
+resNear = db.runCommand(
+ {geoNear: t.getName(), near: [0, 0], query: {geo: {$within: {$center: [[0, 0], 0]}}}});
assert.eq(resNear.results.length, 1);
-resNear = db.runCommand({geoNear : t.getName(), near: [0, 0], query: {geo: {$within: {$center: [[1, 0], 0.5]}}}});
+resNear = db.runCommand(
+ {geoNear: t.getName(), near: [0, 0], query: {geo: {$within: {$center: [[1, 0], 0.5]}}}});
assert.eq(resNear.results.length, 1);
-resNear = db.runCommand({geoNear : t.getName(), near: [0, 0], query: {geo: {$within: {$center: [[1, 0], 1.5]}}}});
+resNear = db.runCommand(
+ {geoNear: t.getName(), near: [0, 0], query: {geo: {$within: {$center: [[1, 0], 1.5]}}}});
assert.eq(resNear.results.length, 9);
// We want everything distance >1 from us but <1.5
// These points are (-+1, -+1)
-resNear = db.runCommand({geoNear : t.getName(), near: [0, 0], query: {$and: [{geo: {$within: {$center: [[0, 0], 1.5]}}},
- {geo: {$not: {$within: {$center: [[0,0], 1]}}}}]}});
+resNear = db.runCommand({
+ geoNear: t.getName(),
+ near: [0, 0],
+ query: {
+ $and: [
+ {geo: {$within: {$center: [[0, 0], 1.5]}}},
+ {geo: {$not: {$within: {$center: [[0, 0], 1]}}}}
+ ]
+ }
+});
assert.eq(resNear.results.length, 4);
diff --git a/jstests/core/geo_oob_sphere.js b/jstests/core/geo_oob_sphere.js
index 7403cc99610..40249766355 100644
--- a/jstests/core/geo_oob_sphere.js
+++ b/jstests/core/geo_oob_sphere.js
@@ -5,29 +5,36 @@
t = db.geooobsphere;
t.drop();
-t.insert({ loc : { x : 30, y : 89 } });
-t.insert({ loc : { x : 30, y : 89 } });
-t.insert({ loc : { x : 30, y : 89 } });
-t.insert({ loc : { x : 30, y : 89 } });
-t.insert({ loc : { x : 30, y : 89 } });
-t.insert({ loc : { x : 30, y : 89 } });
-t.insert({ loc : { x : 30, y : 91 } });
+t.insert({loc: {x: 30, y: 89}});
+t.insert({loc: {x: 30, y: 89}});
+t.insert({loc: {x: 30, y: 89}});
+t.insert({loc: {x: 30, y: 89}});
+t.insert({loc: {x: 30, y: 89}});
+t.insert({loc: {x: 30, y: 89}});
+t.insert({loc: {x: 30, y: 91}});
-assert.commandWorked(t.ensureIndex({ loc : "2d" }));
+assert.commandWorked(t.ensureIndex({loc: "2d"}));
-assert.throws( function() { t.find({ loc : { $nearSphere : [ 30, 91 ], $maxDistance : 0.25 } }).count(); } );
+assert.throws(function() {
+ t.find({loc: {$nearSphere: [30, 91], $maxDistance: 0.25}}).count();
+});
// TODO: SERVER-9986 - it's not clear that throwing is correct behavior here
-// assert.throws( function() { t.find({ loc : { $nearSphere : [ 30, 89 ], $maxDistance : 0.25 } }).count() } );
+// assert.throws( function() { t.find({ loc : { $nearSphere : [ 30, 89 ], $maxDistance : 0.25 }
+// }).count() } );
-assert.throws( function() { t.find({ loc : { $within : { $centerSphere : [[ -180, -91 ], 0.25] } } }).count(); } );
+assert.throws(function() {
+ t.find({loc: {$within: {$centerSphere: [[-180, -91], 0.25]}}}).count();
+});
var res;
-res = db.runCommand({ geoNear : "geooobsphere", near : [179, -91], maxDistance : 0.25, spherical : true });
-assert.commandFailed( res );
-printjson( res );
+res =
+ db.runCommand({geoNear: "geooobsphere", near: [179, -91], maxDistance: 0.25, spherical: true});
+assert.commandFailed(res);
+printjson(res);
// TODO: SERVER-9986 - it's not clear that throwing is correct behavior here
-// res = db.runCommand({ geoNear : "geooobsphere", near : [30, 89], maxDistance : 0.25, spherical : true })
+// res = db.runCommand({ geoNear : "geooobsphere", near : [30, 89], maxDistance : 0.25, spherical :
+// true })
// assert.commandFailed( res )
// printjson( res )
diff --git a/jstests/core/geo_operator_crs.js b/jstests/core/geo_operator_crs.js
index 99aec03062a..b9e242309dc 100644
--- a/jstests/core/geo_operator_crs.js
+++ b/jstests/core/geo_operator_crs.js
@@ -9,27 +9,29 @@ coll.drop();
// Test 2dsphere index
//
-assert.commandWorked(coll.ensureIndex({ geo : "2dsphere" }));
+assert.commandWorked(coll.ensureIndex({geo: "2dsphere"}));
var legacyZeroPt = [0, 0];
-var jsonZeroPt = { type : "Point", coordinates : [0, 0] };
+var jsonZeroPt = {
+ type: "Point",
+ coordinates: [0, 0]
+};
var legacy90Pt = [90, 0];
-var json90Pt = { type : "Point", coordinates : [90, 0] };
+var json90Pt = {
+ type: "Point",
+ coordinates: [90, 0]
+};
-assert.writeOK(coll.insert({ geo : json90Pt }));
+assert.writeOK(coll.insert({geo: json90Pt}));
var earthRadiusMeters = 6378.1 * 1000;
var result = null;
-result = coll.getDB().runCommand({ geoNear : coll.getName(),
- near : legacyZeroPt,
- spherical : true });
+result = coll.getDB().runCommand({geoNear: coll.getName(), near: legacyZeroPt, spherical: true});
assert.commandWorked(result);
assert.close(result.results[0].dis, Math.PI / 2);
-result = coll.getDB().runCommand({ geoNear : coll.getName(),
- near : jsonZeroPt,
- spherical : true });
+result = coll.getDB().runCommand({geoNear: coll.getName(), near: jsonZeroPt, spherical: true});
assert.commandWorked(result);
assert.close(result.results[0].dis, (Math.PI / 2) * earthRadiusMeters);
@@ -40,13 +42,11 @@ assert.commandWorked(coll.dropIndexes());
// Test 2d Index
//
-assert.commandWorked(coll.ensureIndex({ geo : "2d" }));
+assert.commandWorked(coll.ensureIndex({geo: "2d"}));
-assert.writeOK(coll.insert({ geo : legacy90Pt }));
+assert.writeOK(coll.insert({geo: legacy90Pt}));
-result = coll.getDB().runCommand({ geoNear : coll.getName(),
- near : legacyZeroPt,
- spherical : true });
+result = coll.getDB().runCommand({geoNear: coll.getName(), near: legacyZeroPt, spherical: true});
assert.commandWorked(result);
assert.close(result.results[0].dis, Math.PI / 2);
@@ -56,11 +56,7 @@ assert.close(result.results[0].dis, Math.PI / 2);
// Test with a 2d and 2dsphere index
//
-assert.commandWorked(coll.ensureIndex({ geo : "2dsphere" }));
-result = coll.getDB().runCommand({ geoNear : coll.getName(),
- near : jsonZeroPt,
- spherical : true });
+assert.commandWorked(coll.ensureIndex({geo: "2dsphere"}));
+result = coll.getDB().runCommand({geoNear: coll.getName(), near: jsonZeroPt, spherical: true});
assert.commandWorked(result);
assert.close(result.results[0].dis, (Math.PI / 2) * earthRadiusMeters);
-
-
diff --git a/jstests/core/geo_or.js b/jstests/core/geo_or.js
index 17c7340faff..20eb7b7dce1 100644
--- a/jstests/core/geo_or.js
+++ b/jstests/core/geo_or.js
@@ -17,39 +17,68 @@ t.ensureIndex({loc: indexname});
assert.eq(1, t.find({loc: p}).itcount(), indexname);
// $or supports at most one $near clause
-assert.eq(2, t.find({$or: [{loc: {$nearSphere: p}}]}).itcount(),
+assert.eq(2,
+ t.find({$or: [{loc: {$nearSphere: p}}]}).itcount(),
'geo query not supported by $or. index type: ' + indexname);
assert.throws(function() {
- assert.eq(2, t.find({$or: [{loc: {$nearSphere: p}},
- {loc: {$nearSphere: q}}]}).itcount(),
+ assert.eq(2,
+ t.find({$or: [{loc: {$nearSphere: p}}, {loc: {$nearSphere: q}}]}).itcount(),
'geo query not supported by $or. index type: ' + indexname);
}, null, '$or with multiple $near clauses');
// the following tests should match the points in the collection
-assert.eq(2, t.find({$or: [
- {loc: {$geoWithin: {$centerSphere: [p, 10]}}},
- {loc: {$geoWithin: {$centerSphere: [p, 10]}}}
- ]}).itcount(),
+assert.eq(2,
+ t.find({
+ $or: [
+ {loc: {$geoWithin: {$centerSphere: [p, 10]}}},
+ {loc: {$geoWithin: {$centerSphere: [p, 10]}}}
+ ]
+ }).itcount(),
'multiple $geoWithin clauses not supported by $or. index type: ' + indexname);
-assert.eq(2, t.find({$or: [
- {loc: {$geoIntersects: {$geometry: {type: 'LineString', coordinates: [p, q]}}}},
- {loc: {$geoIntersects: {$geometry: {type: 'LineString',
- coordinates: [[0,0], [1,1]]}}}}
- ]}).itcount(),
- 'multiple $geoIntersects LineString clauses not supported by $or. index type: ' + indexname);
-assert.eq(2, t.find({$or: [
- {loc: {$geoIntersects: {$geometry: {type: 'Point', coordinates: p}}}},
- {loc: {$geoIntersects: {$geometry: {type: 'Point', coordinates: q}}}}
- ]}).itcount(),
+assert.eq(2,
+ t.find({
+ $or: [
+ {loc: {$geoIntersects: {$geometry: {type: 'LineString', coordinates: [p, q]}}}},
+ {
+ loc: {
+ $geoIntersects:
+ {$geometry: {type: 'LineString', coordinates: [[0, 0], [1, 1]]}}
+ }
+ }
+ ]
+ }).itcount(),
+ 'multiple $geoIntersects LineString clauses not supported by $or. index type: ' +
+ indexname);
+assert.eq(2,
+ t.find({
+ $or: [
+ {loc: {$geoIntersects: {$geometry: {type: 'Point', coordinates: p}}}},
+ {loc: {$geoIntersects: {$geometry: {type: 'Point', coordinates: q}}}}
+ ]
+ }).itcount(),
'multiple $geoIntersects Point clauses not supported by $or. index type: ' + indexname);
-assert.eq(2, t.find({$or: [
- {loc: {$geoIntersects: {$geometry: {type: 'Polygon',
- coordinates: [[[0, 0], p, q, [0, 0]]]}}}},
- {loc: {$geoIntersects: {$geometry:
- {type: 'Polygon', coordinates: [[[0, 0], [1, 1], [0, 1], [0, 0]]]}}}}
- ]}).itcount(),
- 'multiple $geoIntersects Polygon clauses not supported by $or. index type: ' + indexname);
+assert.eq(
+ 2,
+ t.find({
+ $or: [
+ {
+ loc: {
+ $geoIntersects:
+ {$geometry: {type: 'Polygon', coordinates: [[[0, 0], p, q, [0, 0]]]}}
+ }
+ },
+ {
+ loc: {
+ $geoIntersects: {
+ $geometry:
+ {type: 'Polygon', coordinates: [[[0, 0], [1, 1], [0, 1], [0, 0]]]}
+ }
+ }
+ }
+ ]
+ }).itcount(),
+ 'multiple $geoIntersects Polygon clauses not supported by $or. index type: ' + indexname);
t.dropIndexes();
@@ -57,6 +86,11 @@ var indexname = "2d";
t.ensureIndex({loc: indexname});
-assert.eq(2, t.find({$or: [{loc: {$geoWithin: {$centerSphere: [p, 10]}}},
- {loc: {$geoWithin: {$centerSphere: [p, 10]}}}]}).itcount(),
+assert.eq(2,
+ t.find({
+ $or: [
+ {loc: {$geoWithin: {$centerSphere: [p, 10]}}},
+ {loc: {$geoWithin: {$centerSphere: [p, 10]}}}
+ ]
+ }).itcount(),
'multiple $geoWithin clauses not supported by $or. index type: ' + indexname);
diff --git a/jstests/core/geo_poly_edge.js b/jstests/core/geo_poly_edge.js
index ce93607f1d3..380fe533861 100644
--- a/jstests/core/geo_poly_edge.js
+++ b/jstests/core/geo_poly_edge.js
@@ -2,21 +2,21 @@
// Tests polygon edge cases
//
-var coll = db.getCollection( 'jstests_geo_poly_edge' );
+var coll = db.getCollection('jstests_geo_poly_edge');
coll.drop();
-coll.ensureIndex({ loc : "2d" });
+coll.ensureIndex({loc: "2d"});
-coll.insert({ loc : [10, 10] });
-coll.insert({ loc : [10, -10] });
+coll.insert({loc: [10, 10]});
+coll.insert({loc: [10, -10]});
-assert.eq( coll.find({ loc : { $within : { $polygon : [[ 10, 10 ], [ 10, 10 ], [ 10, -10 ]] } } }).itcount(), 2 );
+assert.eq(coll.find({loc: {$within: {$polygon: [[10, 10], [10, 10], [10, -10]]}}}).itcount(), 2);
-assert.eq( coll.find({ loc : { $within : { $polygon : [[ 10, 10 ], [ 10, 10 ], [ 10, 10 ]] } } }).itcount(), 1 );
+assert.eq(coll.find({loc: {$within: {$polygon: [[10, 10], [10, 10], [10, 10]]}}}).itcount(), 1);
+coll.insert({loc: [179, 0]});
+coll.insert({loc: [0, 179]});
-coll.insert({ loc : [179, 0] });
-coll.insert({ loc : [0, 179] });
-
-assert.eq( coll.find({ loc : { $within : { $polygon : [[0, 0], [1000, 0], [1000, 1000], [0, 1000]] } } }).itcount(), 3 );
-
+assert.eq(coll.find({loc: {$within: {$polygon: [[0, 0], [1000, 0], [1000, 1000], [0, 1000]]}}})
+ .itcount(),
+ 3);
diff --git a/jstests/core/geo_poly_line.js b/jstests/core/geo_poly_line.js
index 2b61d464b05..fe00e0483e4 100644
--- a/jstests/core/geo_poly_line.js
+++ b/jstests/core/geo_poly_line.js
@@ -3,15 +3,14 @@
t = db.geo_polygon5;
t.drop();
-t.insert({loc:[0,0]});
-t.insert({loc:[1,0]});
-t.insert({loc:[2,0]});
-t.insert({loc:[3,0]});
-t.insert({loc:[4,0]});
+t.insert({loc: [0, 0]});
+t.insert({loc: [1, 0]});
+t.insert({loc: [2, 0]});
+t.insert({loc: [3, 0]});
+t.insert({loc: [4, 0]});
-t.ensureIndex( { loc : "2d" } );
+t.ensureIndex({loc: "2d"});
-printjson( t.find({ loc: { "$within": { "$polygon" : [[0,0], [2,0], [4,0]] } } }).toArray() );
-
-assert.eq( 5, t.find({ loc: { "$within": { "$polygon" : [[0,0], [2,0], [4,0]] } } }).itcount() );
+printjson(t.find({loc: {"$within": {"$polygon": [[0, 0], [2, 0], [4, 0]]}}}).toArray());
+assert.eq(5, t.find({loc: {"$within": {"$polygon": [[0, 0], [2, 0], [4, 0]]}}}).itcount());
diff --git a/jstests/core/geo_polygon1.js b/jstests/core/geo_polygon1.js
index de2652e69bc..487df91a167 100644
--- a/jstests/core/geo_polygon1.js
+++ b/jstests/core/geo_polygon1.js
@@ -6,68 +6,81 @@ t = db.geo_polygon1;
t.drop();
num = 0;
-for ( x=1; x < 9; x++ ){
- for ( y= 1; y < 9; y++ ){
- o = { _id : num++ , loc : [ x , y ] };
- t.save( o );
+for (x = 1; x < 9; x++) {
+ for (y = 1; y < 9; y++) {
+ o = {
+ _id: num++,
+ loc: [x, y]
+ };
+ t.save(o);
}
}
-t.ensureIndex( { loc : "2d" } );
+t.ensureIndex({loc: "2d"});
-triangle = [[0,0], [1,1], [0,2]];
+triangle = [[0, 0], [1, 1], [0, 2]];
// Look at only a small slice of the data within a triangle
-assert.eq( 1 , t.find( { loc: { "$within": { "$polygon" : triangle }}} ).count() , "Triangle Test" );
+assert.eq(1, t.find({loc: {"$within": {"$polygon": triangle}}}).count(), "Triangle Test");
-boxBounds = [ [0,0], [0,10], [10,10], [10,0] ];
+boxBounds = [[0, 0], [0, 10], [10, 10], [10, 0]];
-assert.eq( num , t.find( { loc : { "$within" : { "$polygon" : boxBounds } } } ).count() , "Bounding Box Test" );
+assert.eq(num, t.find({loc: {"$within": {"$polygon": boxBounds}}}).count(), "Bounding Box Test");
-//Make sure we can add object-based polygons
-assert.eq( num, t.find( { loc : { $within : { $polygon : { a : [-10, -10], b : [-10, 10], c : [10, 10], d : [10, -10] } } } } ).count() );
+// Make sure we can add object-based polygons
+assert.eq(
+ num,
+ t.find({loc: {$within: {$polygon: {a: [-10, -10], b: [-10, 10], c: [10, 10], d: [10, -10]}}}})
+ .count());
// Look in a box much bigger than the one we have data in
-boxBounds = [[-100,-100], [-100, 100], [100,100], [100,-100]];
-assert.eq( num , t.find( { loc : { "$within" : { "$polygon" : boxBounds } } } ).count() , "Big Bounding Box Test" );
+boxBounds = [[-100, -100], [-100, 100], [100, 100], [100, -100]];
+assert.eq(num,
+ t.find({loc: {"$within": {"$polygon": boxBounds}}}).count(),
+ "Big Bounding Box Test");
t.drop();
pacman = [
- [0,2], [0,4], [2,6], [4,6], // Head
- [6,4], [4,3], [6,2], // Mouth
- [4,0], [2,0] // Bottom
- ];
+ [0, 2],
+ [0, 4],
+ [2, 6],
+ [4, 6], // Head
+ [6, 4],
+ [4, 3],
+ [6, 2], // Mouth
+ [4, 0],
+ [2, 0] // Bottom
+];
-t.save({loc: [1,3] }); // Add a point that's in
-assert.commandWorked(t.ensureIndex( { loc : "2d" } ));
+t.save({loc: [1, 3]}); // Add a point that's in
+assert.commandWorked(t.ensureIndex({loc: "2d"}));
-assert.eq( 1 , t.find({loc : { $within : { $polygon : pacman }}} ).count() , "Pacman single point" );
+assert.eq(1, t.find({loc: {$within: {$polygon: pacman}}}).count(), "Pacman single point");
-t.save({ loc : [5, 3] }); // Add a point that's out right in the mouth opening
-t.save({ loc : [3, 7] }); // Add a point above the center of the head
-t.save({ loc : [3,-1] }); // Add a point below the center of the bottom
+t.save({loc: [5, 3]}); // Add a point that's out right in the mouth opening
+t.save({loc: [3, 7]}); // Add a point above the center of the head
+t.save({loc: [3, -1]}); // Add a point below the center of the bottom
-assert.eq( 1 , t.find({loc : { $within : { $polygon : pacman }}} ).count() , "Pacman double point" );
+assert.eq(1, t.find({loc: {$within: {$polygon: pacman}}}).count(), "Pacman double point");
// Make sure we can't add bad polygons
okay = true;
-try{
- t.find( { loc : { $within : { $polygon : [1, 2] } } } ).toArray();
+try {
+ t.find({loc: {$within: {$polygon: [1, 2]}}}).toArray();
okay = false;
+} catch (e) {
}
-catch(e){}
assert(okay);
-try{
- t.find( { loc : { $within : { $polygon : [[1, 2]] } } } ).toArray();
+try {
+ t.find({loc: {$within: {$polygon: [[1, 2]]}}}).toArray();
okay = false;
+} catch (e) {
}
-catch(e){}
assert(okay);
-try{
- t.find( { loc : { $within : { $polygon : [[1, 2], [2, 3]] } } } ).toArray();
+try {
+ t.find({loc: {$within: {$polygon: [[1, 2], [2, 3]]}}}).toArray();
okay = false;
+} catch (e) {
}
-catch(e){}
assert(okay);
-
diff --git a/jstests/core/geo_polygon1_noindex.js b/jstests/core/geo_polygon1_noindex.js
index 56d1cc64f59..672f53ebd90 100644
--- a/jstests/core/geo_polygon1_noindex.js
+++ b/jstests/core/geo_polygon1_noindex.js
@@ -4,43 +4,57 @@ t = db.geo_polygon1_noindex;
t.drop();
num = 0;
-for ( x=1; x < 9; x++ ){
- for ( y= 1; y < 9; y++ ){
- o = { _id : num++ , loc : [ x , y ] };
- t.save( o );
+for (x = 1; x < 9; x++) {
+ for (y = 1; y < 9; y++) {
+ o = {
+ _id: num++,
+ loc: [x, y]
+ };
+ t.save(o);
}
}
-triangle = [[0,0], [1,1], [0,2]];
+triangle = [[0, 0], [1, 1], [0, 2]];
// Look at only a small slice of the data within a triangle
-assert.eq( 1 , t.find({ loc: { "$within": { "$polygon" : triangle }}} ).count() , "Triangle Test" );
+assert.eq(1, t.find({loc: {"$within": {"$polygon": triangle}}}).count(), "Triangle Test");
-boxBounds = [ [0,0], [0,10], [10,10], [10,0] ];
+boxBounds = [[0, 0], [0, 10], [10, 10], [10, 0]];
-assert.eq( num , t.find( { loc : { "$within" : { "$polygon" : boxBounds } } } ).count() , "Bounding Box Test" );
+assert.eq(num, t.find({loc: {"$within": {"$polygon": boxBounds}}}).count(), "Bounding Box Test");
-//Make sure we can add object-based polygons
-assert.eq( num, t.find( { loc : { $within : { $polygon : { a : [-10, -10], b : [-10, 10], c : [10, 10], d : [10, -10] } } } } ).count() );
+// Make sure we can add object-based polygons
+assert.eq(
+ num,
+ t.find({loc: {$within: {$polygon: {a: [-10, -10], b: [-10, 10], c: [10, 10], d: [10, -10]}}}})
+ .count());
// Look in a box much bigger than the one we have data in
-boxBounds = [[-100,-100], [-100, 100], [100,100], [100,-100]];
-assert.eq( num , t.find( { loc : { "$within" : { "$polygon" : boxBounds } } } ).count() , "Big Bounding Box Test" );
+boxBounds = [[-100, -100], [-100, 100], [100, 100], [100, -100]];
+assert.eq(num,
+ t.find({loc: {"$within": {"$polygon": boxBounds}}}).count(),
+ "Big Bounding Box Test");
t.drop();
pacman = [
- [0,2], [0,4], [2,6], [4,6], // Head
- [6,4], [4,3], [6,2], // Mouth
- [4,0], [2,0] // Bottom
- ];
-
-assert.writeOK(t.save({loc: [1,3] })); // Add a point that's in
-
-assert.eq( 1 , t.find({loc : { $within : { $polygon : pacman }}} ).count() , "Pacman single point" );
-
-t.save({ loc : [5, 3] }); // Add a point that's out right in the mouth opening
-t.save({ loc : [3, 7] }); // Add a point above the center of the head
-t.save({ loc : [3,-1] }); // Add a point below the center of the bottom
-
-assert.eq( 1 , t.find({loc : { $within : { $polygon : pacman }}} ).count() , "Pacman double point" );
+ [0, 2],
+ [0, 4],
+ [2, 6],
+ [4, 6], // Head
+ [6, 4],
+ [4, 3],
+ [6, 2], // Mouth
+ [4, 0],
+ [2, 0] // Bottom
+];
+
+assert.writeOK(t.save({loc: [1, 3]})); // Add a point that's in
+
+assert.eq(1, t.find({loc: {$within: {$polygon: pacman}}}).count(), "Pacman single point");
+
+t.save({loc: [5, 3]}); // Add a point that's out right in the mouth opening
+t.save({loc: [3, 7]}); // Add a point above the center of the head
+t.save({loc: [3, -1]}); // Add a point below the center of the bottom
+
+assert.eq(1, t.find({loc: {$within: {$polygon: pacman}}}).count(), "Pacman double point");
diff --git a/jstests/core/geo_polygon2.js b/jstests/core/geo_polygon2.js
index c6857341d79..8a011beba0c 100644
--- a/jstests/core/geo_polygon2.js
+++ b/jstests/core/geo_polygon2.js
@@ -8,28 +8,27 @@
var numTests = 4;
-for ( var test = 0; test < numTests; test++ ) {
-
- Random.srand( 1337 + test );
+for (var test = 0; test < numTests; test++) {
+ Random.srand(1337 + test);
var numTurtles = 4;
- var gridSize = [ 20, 20 ];
+ var gridSize = [20, 20];
var turtleSteps = 500;
- var bounds = [ Random.rand() * -1000000 + 0.00001, Random.rand() * 1000000 + 0.00001 ];
+ var bounds = [Random.rand() * -1000000 + 0.00001, Random.rand() * 1000000 + 0.00001];
var rotation = Math.PI * Random.rand();
- var bits = Math.floor( Random.rand() * 32 );
+ var bits = Math.floor(Random.rand() * 32);
- printjson( { test : test, rotation : rotation, bits : bits });
+ printjson({test: test, rotation: rotation, bits: bits});
- var rotatePoint = function( x, y ) {
+ var rotatePoint = function(x, y) {
- if( y == undefined ){
+ if (y == undefined) {
y = x[1];
x = x[0];
}
- xp = x * Math.cos( rotation ) - y * Math.sin( rotation );
- yp = y * Math.cos( rotation ) + x * Math.sin( rotation );
+ xp = x * Math.cos(rotation) - y * Math.sin(rotation);
+ yp = y * Math.cos(rotation) + x * Math.sin(rotation);
var scaleX = (bounds[1] - bounds[0]) / 360;
var scaleY = (bounds[1] - bounds[0]) / 360;
@@ -41,21 +40,21 @@ for ( var test = 0; test < numTests; test++ ) {
};
var grid = [];
- for ( var i = 0; i < gridSize[0]; i++ ) {
- grid.push( new Array( gridSize[1] ) );
+ for (var i = 0; i < gridSize[0]; i++) {
+ grid.push(new Array(gridSize[1]));
}
grid.toString = function() {
var gridStr = "";
- for ( var j = grid[0].length - 1; j >= -1; j-- ) {
- for ( var i = 0; i < grid.length; i++ ) {
- if ( i == 0 )
- gridStr += ( j == -1 ? " " : ( j % 10) ) + ": ";
- if ( j != -1 )
- gridStr += "[" + ( grid[i][j] != undefined ? grid[i][j] : " " ) + "]";
+ for (var j = grid[0].length - 1; j >= -1; j--) {
+ for (var i = 0; i < grid.length; i++) {
+ if (i == 0)
+ gridStr += (j == -1 ? " " : (j % 10)) + ": ";
+ if (j != -1)
+ gridStr += "[" + (grid[i][j] != undefined ? grid[i][j] : " ") + "]";
else
- gridStr += " " + ( i % 10 ) + " ";
+ gridStr += " " + (i % 10) + " ";
}
gridStr += "\n";
}
@@ -64,89 +63,82 @@ for ( var test = 0; test < numTests; test++ ) {
};
var turtles = [];
- for ( var i = 0; i < numTurtles; i++ ) {
-
- var up = ( i % 2 == 0 ) ? i - 1 : 0;
- var left = ( i % 2 == 1 ) ? ( i - 1 ) - 1 : 0;
+ for (var i = 0; i < numTurtles; i++) {
+ var up = (i % 2 == 0) ? i - 1 : 0;
+ var left = (i % 2 == 1) ? (i - 1) - 1 : 0;
turtles[i] = [
- [ Math.floor( gridSize[0] / 2 ), Math.floor( gridSize[1] / 2 ) ],
- [ Math.floor( gridSize[0] / 2 ) + left, Math.floor( gridSize[1] / 2 ) + up ] ];
+ [Math.floor(gridSize[0] / 2), Math.floor(gridSize[1] / 2)],
+ [Math.floor(gridSize[0] / 2) + left, Math.floor(gridSize[1] / 2) + up]
+ ];
grid[turtles[i][1][0]][turtles[i][1][1]] = i;
-
}
- grid[Math.floor( gridSize[0] / 2 )][Math.floor( gridSize[1] / 2 )] = "S";
+ grid[Math.floor(gridSize[0] / 2)][Math.floor(gridSize[1] / 2)] = "S";
// print( grid.toString() )
var pickDirections = function() {
- var up = Math.floor( Random.rand() * 3 );
- if ( up == 2 )
+ var up = Math.floor(Random.rand() * 3);
+ if (up == 2)
up = -1;
- if ( up == 0 ) {
- var left = Math.floor( Random.rand() * 3 );
- if ( left == 2 )
+ if (up == 0) {
+ var left = Math.floor(Random.rand() * 3);
+ if (left == 2)
left = -1;
} else
left = 0;
- if ( Random.rand() < 0.5 ) {
+ if (Random.rand() < 0.5) {
var swap = left;
left = up;
up = swap;
}
- return [ left, up ];
+ return [left, up];
};
- for ( var s = 0; s < turtleSteps; s++ ) {
-
- for ( var t = 0; t < numTurtles; t++ ) {
-
+ for (var s = 0; s < turtleSteps; s++) {
+ for (var t = 0; t < numTurtles; t++) {
var dirs = pickDirections();
var up = dirs[0];
var left = dirs[1];
var lastTurtle = turtles[t][turtles[t].length - 1];
- var nextTurtle = [ lastTurtle[0] + left, lastTurtle[1] + up ];
+ var nextTurtle = [lastTurtle[0] + left, lastTurtle[1] + up];
- if ( nextTurtle[0] >= gridSize[0] ||
- nextTurtle[1] >= gridSize[1] ||
- nextTurtle[0] < 0 ||
- nextTurtle[1] < 0 )
+ if (nextTurtle[0] >= gridSize[0] || nextTurtle[1] >= gridSize[1] || nextTurtle[0] < 0 ||
+ nextTurtle[1] < 0)
continue;
- if ( grid[nextTurtle[0]][nextTurtle[1]] == undefined ) {
- turtles[t].push( nextTurtle );
+ if (grid[nextTurtle[0]][nextTurtle[1]] == undefined) {
+ turtles[t].push(nextTurtle);
grid[nextTurtle[0]][nextTurtle[1]] = t;
}
-
}
}
turtlePaths = [];
- for ( var t = 0; t < numTurtles; t++ ) {
-
+ for (var t = 0; t < numTurtles; t++) {
turtlePath = [];
var nextSeg = function(currTurtle, prevTurtle) {
var pathX = currTurtle[0];
- if ( currTurtle[1] < prevTurtle[1] ) {
+ if (currTurtle[1] < prevTurtle[1]) {
pathX = currTurtle[0] + 1;
pathY = prevTurtle[1];
- } else if ( currTurtle[1] > prevTurtle[1] ) {
+ } else if (currTurtle[1] > prevTurtle[1]) {
pathX = currTurtle[0];
pathY = currTurtle[1];
- } else if ( currTurtle[0] < prevTurtle[0] ) {
+ } else if (currTurtle[0] < prevTurtle[0]) {
pathX = prevTurtle[0];
pathY = currTurtle[1];
- } else if ( currTurtle[0] > prevTurtle[0] ) {
+ } else if (currTurtle[0] > prevTurtle[0]) {
pathX = currTurtle[0];
pathY = currTurtle[1] + 1;
}
@@ -155,25 +147,21 @@ for ( var test = 0; test < numTests; test++ ) {
// : "
// + [pathX, pathY]);
- return [ pathX, pathY ];
+ return [pathX, pathY];
};
- for ( var s = 1; s < turtles[t].length; s++ ) {
-
+ for (var s = 1; s < turtles[t].length; s++) {
currTurtle = turtles[t][s];
prevTurtle = turtles[t][s - 1];
- turtlePath.push( nextSeg( currTurtle, prevTurtle ) );
-
+ turtlePath.push(nextSeg(currTurtle, prevTurtle));
}
- for ( var s = turtles[t].length - 2; s >= 0; s-- ) {
-
+ for (var s = turtles[t].length - 2; s >= 0; s--) {
currTurtle = turtles[t][s];
prevTurtle = turtles[t][s + 1];
- turtlePath.push( nextSeg( currTurtle, prevTurtle ) );
-
+ turtlePath.push(nextSeg(currTurtle, prevTurtle));
}
// printjson( turtlePath )
@@ -183,38 +171,37 @@ for ( var test = 0; test < numTests; test++ ) {
grid[lastTurtle[0]][lastTurtle[1]] = undefined;
fixedTurtlePath = [];
- for ( var s = 1; s < turtlePath.length; s++ ) {
-
- if ( turtlePath[s - 1][0] == turtlePath[s][0] &&
- turtlePath[s - 1][1] == turtlePath[s][1] ) {
+ for (var s = 1; s < turtlePath.length; s++) {
+ if (turtlePath[s - 1][0] == turtlePath[s][0] &&
+ turtlePath[s - 1][1] == turtlePath[s][1]) {
continue;
}
var up = turtlePath[s][1] - turtlePath[s - 1][1];
var right = turtlePath[s][0] - turtlePath[s - 1][0];
- var addPoint = ( up != 0 && right != 0 );
+ var addPoint = (up != 0 && right != 0);
- if ( addPoint && up != right ) {
- fixedTurtlePath.push( [ turtlePath[s][0], turtlePath[s - 1][1] ] );
- } else if ( addPoint ) {
- fixedTurtlePath.push( [ turtlePath[s - 1][0], turtlePath[s][1] ] );
+ if (addPoint && up != right) {
+ fixedTurtlePath.push([turtlePath[s][0], turtlePath[s - 1][1]]);
+ } else if (addPoint) {
+ fixedTurtlePath.push([turtlePath[s - 1][0], turtlePath[s][1]]);
}
- fixedTurtlePath.push( turtlePath[s] );
+ fixedTurtlePath.push(turtlePath[s]);
}
// printjson( fixedTurtlePath )
- turtlePaths.push( fixedTurtlePath );
+ turtlePaths.push(fixedTurtlePath);
}
// Uncomment to print polygon shape
// print( grid.toString() )
var polygon = [];
- for ( var t = 0; t < turtlePaths.length; t++ ) {
- for ( var s = 0; s < turtlePaths[t].length; s++ ) {
- polygon.push( rotatePoint( turtlePaths[t][s] ) );
+ for (var t = 0; t < turtlePaths.length; t++) {
+ for (var s = 0; s < turtlePaths[t].length; s++) {
+ polygon.push(rotatePoint(turtlePaths[t][s]));
}
}
@@ -230,34 +217,33 @@ for ( var test = 0; test < numTests; test++ ) {
var allPointsIn = [];
var allPointsOut = [];
- for ( var j = grid[0].length - 1; j >= 0; j-- ) {
- for ( var i = 0; i < grid.length; i++ ) {
- var point = rotatePoint( [ i + 0.5, j + 0.5 ] );
+ for (var j = grid[0].length - 1; j >= 0; j--) {
+ for (var i = 0; i < grid.length; i++) {
+ var point = rotatePoint([i + 0.5, j + 0.5]);
- t.insert( { loc : point } );
- if ( grid[i][j] != undefined ){
- allPointsIn.push( point );
+ t.insert({loc: point});
+ if (grid[i][j] != undefined) {
+ allPointsIn.push(point);
pointsIn++;
- }
- else{
- allPointsOut.push( point );
+ } else {
+ allPointsOut.push(point);
pointsOut++;
}
}
}
- var res = t.ensureIndex({ loc: "2d" }, { bits: 1 + bits, max: bounds[1], min: bounds[0] });
- assert.commandWorked( res );
+ var res = t.ensureIndex({loc: "2d"}, {bits: 1 + bits, max: bounds[1], min: bounds[0]});
+ assert.commandWorked(res);
- t.insert( { loc : allPointsIn } );
- t.insert( { loc : allPointsOut } );
- allPoints = allPointsIn.concat( allPointsOut );
- t.insert( { loc : allPoints } );
+ t.insert({loc: allPointsIn});
+ t.insert({loc: allPointsOut});
+ allPoints = allPointsIn.concat(allPointsOut);
+ t.insert({loc: allPoints});
- print( "Points : " );
- printjson( { pointsIn : pointsIn, pointsOut : pointsOut } );
- //print( t.find( { loc : { "$within" : { "$polygon" : polygon } } } ).count() )
+ print("Points : ");
+ printjson({pointsIn: pointsIn, pointsOut: pointsOut});
+ // print( t.find( { loc : { "$within" : { "$polygon" : polygon } } } ).count() )
- assert.eq( gridSize[0] * gridSize[1] + 3, t.find().count() );
- assert.eq( 2 + pointsIn, t.find( { loc : { "$within" : { "$polygon" : polygon } } } ).count() );
+ assert.eq(gridSize[0] * gridSize[1] + 3, t.find().count());
+ assert.eq(2 + pointsIn, t.find({loc: {"$within": {"$polygon": polygon}}}).count());
}
diff --git a/jstests/core/geo_polygon3.js b/jstests/core/geo_polygon3.js
index 495ecb189b1..887e81701cd 100644
--- a/jstests/core/geo_polygon3.js
+++ b/jstests/core/geo_polygon3.js
@@ -4,51 +4,61 @@
var numTests = 31;
-for( var n = 0; n < numTests; n++ ){
-
- t = db.geo_polygon3;
- t.drop();
-
- num = 0;
- for ( x=1; x < 9; x++ ){
- for ( y= 1; y < 9; y++ ){
- o = { _id : num++ , loc : [ x , y ] };
- t.save( o );
- }
- }
-
- t.ensureIndex( { loc : "2d" }, { bits : 2 + n } );
-
- triangle = [[0,0], [1,1], [0,2]];
-
- // Look at only a small slice of the data within a triangle
- assert.eq( 1 , t.find( { loc: { "$within": { "$polygon" : triangle }}} ).itcount() , "Triangle Test" );
-
-
- boxBounds = [ [0,0], [0,10], [10,10], [10,0] ];
-
- assert.eq( num , t.find( { loc : { "$within" : { "$polygon" : boxBounds } } } ).itcount() , "Bounding Box Test" );
-
- // Look in a box much bigger than the one we have data in
- boxBounds = [[-100,-100], [-100, 100], [100,100], [100,-100]];
- assert.eq( num , t.find( { loc : { "$within" : { "$polygon" : boxBounds } } } ).itcount() , "Big Bounding Box Test" );
-
- t.drop();
-
- pacman = [
- [0,2], [0,4], [2,6], [4,6], // Head
- [6,4], [4,3], [6,2], // Mouth
- [4,0], [2,0] // Bottom
- ];
-
- t.save({loc: [1,3] }); // Add a point that's in
- t.ensureIndex( { loc : "2d" }, { bits : 2 + t } );
-
- assert.eq( 1 , t.find({loc : { $within : { $polygon : pacman }}} ).itcount() , "Pacman single point" );
-
- t.save({ loc : [5, 3] }); // Add a point that's out right in the mouth opening
- t.save({ loc : [3, 7] }); // Add a point above the center of the head
- t.save({ loc : [3,-1] }); // Add a point below the center of the bottom
-
- assert.eq( 1 , t.find({loc : { $within : { $polygon : pacman }}} ).itcount() , "Pacman double point" );
+for (var n = 0; n < numTests; n++) {
+ t = db.geo_polygon3;
+ t.drop();
+
+ num = 0;
+ for (x = 1; x < 9; x++) {
+ for (y = 1; y < 9; y++) {
+ o = {
+ _id: num++,
+ loc: [x, y]
+ };
+ t.save(o);
+ }
+ }
+
+ t.ensureIndex({loc: "2d"}, {bits: 2 + n});
+
+ triangle = [[0, 0], [1, 1], [0, 2]];
+
+ // Look at only a small slice of the data within a triangle
+ assert.eq(1, t.find({loc: {"$within": {"$polygon": triangle}}}).itcount(), "Triangle Test");
+
+ boxBounds = [[0, 0], [0, 10], [10, 10], [10, 0]];
+
+ assert.eq(
+ num, t.find({loc: {"$within": {"$polygon": boxBounds}}}).itcount(), "Bounding Box Test");
+
+ // Look in a box much bigger than the one we have data in
+ boxBounds = [[-100, -100], [-100, 100], [100, 100], [100, -100]];
+ assert.eq(num,
+ t.find({loc: {"$within": {"$polygon": boxBounds}}}).itcount(),
+ "Big Bounding Box Test");
+
+ t.drop();
+
+ pacman = [
+ [0, 2],
+ [0, 4],
+ [2, 6],
+ [4, 6], // Head
+ [6, 4],
+ [4, 3],
+ [6, 2], // Mouth
+ [4, 0],
+ [2, 0] // Bottom
+ ];
+
+ t.save({loc: [1, 3]}); // Add a point that's in
+ t.ensureIndex({loc: "2d"}, {bits: 2 + t});
+
+ assert.eq(1, t.find({loc: {$within: {$polygon: pacman}}}).itcount(), "Pacman single point");
+
+ t.save({loc: [5, 3]}); // Add a point that's out right in the mouth opening
+ t.save({loc: [3, 7]}); // Add a point above the center of the head
+ t.save({loc: [3, -1]}); // Add a point below the center of the bottom
+
+ assert.eq(1, t.find({loc: {$within: {$polygon: pacman}}}).itcount(), "Pacman double point");
}
diff --git a/jstests/core/geo_queryoptimizer.js b/jstests/core/geo_queryoptimizer.js
index f75afad645e..af55507ff86 100644
--- a/jstests/core/geo_queryoptimizer.js
+++ b/jstests/core/geo_queryoptimizer.js
@@ -2,26 +2,25 @@
t = db.geo_qo1;
t.drop();
-t.ensureIndex({loc:"2d"});
+t.ensureIndex({loc: "2d"});
-t.insert({'issue':0});
-t.insert({'issue':1});
-t.insert({'issue':2});
-t.insert({'issue':2, 'loc':[30.12,-118]});
-t.insert({'issue':1, 'loc':[30.12,-118]});
-t.insert({'issue':0, 'loc':[30.12,-118]});
+t.insert({'issue': 0});
+t.insert({'issue': 1});
+t.insert({'issue': 2});
+t.insert({'issue': 2, 'loc': [30.12, -118]});
+t.insert({'issue': 1, 'loc': [30.12, -118]});
+t.insert({'issue': 0, 'loc': [30.12, -118]});
-assert.eq( 6 , t.find().itcount() , "A1" );
+assert.eq(6, t.find().itcount(), "A1");
-assert.eq( 2 , t.find({'issue':0}).itcount() , "A2" );
+assert.eq(2, t.find({'issue': 0}).itcount(), "A2");
-assert.eq( 1 , t.find({'issue':0,'loc':{$near:[30.12,-118]}}).itcount() , "A3" );
+assert.eq(1, t.find({'issue': 0, 'loc': {$near: [30.12, -118]}}).itcount(), "A3");
-assert.eq( 2 , t.find({'issue':0}).itcount() , "B1" );
+assert.eq(2, t.find({'issue': 0}).itcount(), "B1");
-assert.eq( 6 , t.find().itcount() , "B2" );
+assert.eq(6, t.find().itcount(), "B2");
-assert.eq( 2 , t.find({'issue':0}).itcount() , "B3" );
-
-assert.eq( 1 , t.find({'issue':0,'loc':{$near:[30.12,-118]}}).itcount() , "B4" );
+assert.eq(2, t.find({'issue': 0}).itcount(), "B3");
+assert.eq(1, t.find({'issue': 0, 'loc': {$near: [30.12, -118]}}).itcount(), "B4");
diff --git a/jstests/core/geo_regex0.js b/jstests/core/geo_regex0.js
index ae7fddabcf3..1add7f4e0c3 100644
--- a/jstests/core/geo_regex0.js
+++ b/jstests/core/geo_regex0.js
@@ -4,15 +4,20 @@
t = db.regex0;
t.drop();
-t.ensureIndex( { point : '2d', words : 1 } );
-t.insert( { point : [ 1, 1 ], words : [ 'foo', 'bar' ] } );
-
-regex = { words : /^f/ };
-geo = { point : { $near : [ 1, 1 ] } };
-both = { point : { $near : [ 1, 1 ] }, words : /^f/ };
-
-assert.eq(1, t.find( regex ).count() );
-assert.eq(1, t.find( geo ).count() );
-assert.eq(1, t.find( both ).count() );
+t.ensureIndex({point: '2d', words: 1});
+t.insert({point: [1, 1], words: ['foo', 'bar']});
+regex = {
+ words: /^f/
+};
+geo = {
+ point: {$near: [1, 1]}
+};
+both = {
+ point: {$near: [1, 1]},
+ words: /^f/
+};
+assert.eq(1, t.find(regex).count());
+assert.eq(1, t.find(geo).count());
+assert.eq(1, t.find(both).count());
diff --git a/jstests/core/geo_s2cursorlimitskip.js b/jstests/core/geo_s2cursorlimitskip.js
index cbf360a45b0..868b57de39f 100644
--- a/jstests/core/geo_s2cursorlimitskip.js
+++ b/jstests/core/geo_s2cursorlimitskip.js
@@ -14,12 +14,16 @@ var random = Random.rand;
* fetch the rest of the points and again verify that the
* number of query and getmore operations are correct.
*/
-function sign() { return random() > 0.5 ? 1 : -1; }
-function insertRandomPoints(num, minDist, maxDist){
- for(var i = 0; i < num; i++){
+function sign() {
+ return random() > 0.5 ? 1 : -1;
+}
+function insertRandomPoints(num, minDist, maxDist) {
+ for (var i = 0; i < num; i++) {
var lat = sign() * (minDist + random() * (maxDist - minDist));
var lng = sign() * (minDist + random() * (maxDist - minDist));
- var point = { geo: { type: "Point", coordinates: [lng, lat] } };
+ var point = {
+ geo: {type: "Point", coordinates: [lng, lat]}
+ };
assert.writeOK(t.insert(point));
}
}
@@ -31,8 +35,8 @@ var batchSize = 4;
// Insert points between 0.01 and 1.0 away.
insertRandomPoints(totalPointCount, 0.01, 1.0);
-var cursor = t.find({geo: {$geoNear : {$geometry: {type: "Point", coordinates: [0.0, 0.0]}}}})
- .batchSize(batchSize);
+var cursor = t.find({geo: {$geoNear: {$geometry: {type: "Point", coordinates: [0.0, 0.0]}}}})
+ .batchSize(batchSize);
assert.eq(cursor.count(), totalPointCount);
// Disable profiling in order to drop the system.profile collection.
@@ -71,14 +75,20 @@ assert(!cursor.hasNext());
var someLimit = 23;
// Make sure limit does something.
-cursor = t.find({geo: {$geoNear : {$geometry: {type: "Point", coordinates: [0.0, 0.0]}}}}).limit(someLimit);
+cursor = t.find({geo: {$geoNear: {$geometry: {type: "Point", coordinates: [0.0, 0.0]}}}})
+ .limit(someLimit);
// Count doesn't work here -- ignores limit/skip, so we use itcount.
assert.eq(cursor.itcount(), someLimit);
// Make sure skip works by skipping some stuff ourselves.
var someSkip = 3;
-cursor = t.find({geo: {$geoNear : {$geometry: {type: "Point", coordinates: [0.0, 0.0]}}}}).limit(someLimit + someSkip);
-for (var i = 0; i < someSkip; ++i) { cursor.next(); }
-var cursor2 = t.find({geo: {$geoNear : {$geometry: {type: "Point", coordinates: [0.0, 0.0]}}}}).skip(someSkip).limit(someLimit);
+cursor = t.find({geo: {$geoNear: {$geometry: {type: "Point", coordinates: [0.0, 0.0]}}}})
+ .limit(someLimit + someSkip);
+for (var i = 0; i < someSkip; ++i) {
+ cursor.next();
+}
+var cursor2 = t.find({geo: {$geoNear: {$geometry: {type: "Point", coordinates: [0.0, 0.0]}}}})
+ .skip(someSkip)
+ .limit(someLimit);
while (cursor.hasNext()) {
assert(cursor2.hasNext());
assert.eq(cursor.next(), cursor2.next());
diff --git a/jstests/core/geo_s2dedupnear.js b/jstests/core/geo_s2dedupnear.js
index 1b6f11ce504..21378893720 100644
--- a/jstests/core/geo_s2dedupnear.js
+++ b/jstests/core/geo_s2dedupnear.js
@@ -3,9 +3,11 @@
t = db.geo_s2dedupnear;
t.drop();
-t.ensureIndex( { geo : "2dsphere" } );
-var x = { "type" : "Polygon",
- "coordinates" : [ [ [100.0, 0.0], [101.0, 0.0], [101.0, 1.0], [100.0, 1.0], [100.0, 0.0]]]};
+t.ensureIndex({geo: "2dsphere"});
+var x = {
+ "type": "Polygon",
+ "coordinates": [[[100.0, 0.0], [101.0, 0.0], [101.0, 1.0], [100.0, 1.0], [100.0, 0.0]]]
+};
t.insert({geo: x});
-res = t.find({geo: {$geoNear: {"type" : "Point", "coordinates" : [31, 41]}}});
+res = t.find({geo: {$geoNear: {"type": "Point", "coordinates": [31, 41]}}});
assert.eq(res.itcount(), 1);
diff --git a/jstests/core/geo_s2descindex.js b/jstests/core/geo_s2descindex.js
index 26c422bc04f..d6dca95213d 100644
--- a/jstests/core/geo_s2descindex.js
+++ b/jstests/core/geo_s2descindex.js
@@ -5,13 +5,24 @@
var coll = db.getCollection("twodspheredesc");
var descriptors = [["field1", -1], ["field2", -1], ["coordinates", "2dsphere"]];
-var docA = {field1 : "a", field2 : 1, coordinates : [-118.2400013, 34.073893]};
-var docB = {field1 : "b", field2 : 1, coordinates : [-118.2400012, 34.073894]};
+var docA = {
+ field1: "a",
+ field2: 1,
+ coordinates: [-118.2400013, 34.073893]
+};
+var docB = {
+ field1: "b",
+ field2: 1,
+ coordinates: [-118.2400012, 34.073894]
+};
// Try both regular and near index cursors
-var query = {coordinates : {$geoWithin : {$centerSphere : [[-118.240013, 34.073893],
- 0.44915760491198753]}}};
-var queryNear = {coordinates : {$geoNear : {"type" : "Point", "coordinates" : [0, 0]}}};
+var query = {
+ coordinates: {$geoWithin: {$centerSphere: [[-118.240013, 34.073893], 0.44915760491198753]}}
+};
+var queryNear = {
+ coordinates: {$geoNear: {"type": "Point", "coordinates": [0, 0]}}
+};
//
// The idea here is we try "2dsphere" indexes in combination with descending
@@ -19,10 +30,9 @@ var queryNear = {coordinates : {$geoNear : {"type" : "Point", "coordinates" : [0
// positions and ensure that we return correct results.
//
-for ( var t = 0; t < descriptors.length; t++) {
-
+for (var t = 0; t < descriptors.length; t++) {
var descriptor = {};
- for ( var i = 0; i < descriptors.length; i++) {
+ for (var i = 0; i < descriptors.length; i++) {
descriptor[descriptors[i][0]] = descriptors[i][1];
}
@@ -34,10 +44,10 @@ for ( var t = 0; t < descriptors.length; t++) {
coll.insert(docA);
coll.insert(docB);
- assert.eq(1, coll.count(Object.merge(query, {field1 : "a"})));
- assert.eq(1, coll.count(Object.merge(query, {field1 : "b"})));
- assert.eq(2, coll.count(Object.merge(query, {field2 : 1})));
- assert.eq(0, coll.count(Object.merge(query, {field2 : 0})));
+ assert.eq(1, coll.count(Object.merge(query, {field1: "a"})));
+ assert.eq(1, coll.count(Object.merge(query, {field1: "b"})));
+ assert.eq(2, coll.count(Object.merge(query, {field2: 1})));
+ assert.eq(0, coll.count(Object.merge(query, {field2: 0})));
var firstEls = descriptors.splice(1);
descriptors = firstEls.concat(descriptors);
@@ -50,15 +60,16 @@ for ( var t = 0; t < descriptors.length; t++) {
jsTest.log("Trying case found in wild...");
coll.drop();
-coll.ensureIndex({coordinates : "2dsphere", field : -1});
-coll.insert({coordinates : [-118.240013, 34.073893]});
-var query = {coordinates : {$geoWithin : {$centerSphere : [[-118.240013, 34.073893],
- 0.44915760491198753]}},
- field : 1};
+coll.ensureIndex({coordinates: "2dsphere", field: -1});
+coll.insert({coordinates: [-118.240013, 34.073893]});
+var query = {
+ coordinates: {$geoWithin: {$centerSphere: [[-118.240013, 34.073893], 0.44915760491198753]}},
+ field: 1
+};
assert.eq(null, coll.findOne(query));
coll.remove({});
-coll.insert({coordinates : [-118.240013, 34.073893], field : 1});
+coll.insert({coordinates: [-118.240013, 34.073893], field: 1});
assert.neq(null, coll.findOne(query));
jsTest.log("Success!");
diff --git a/jstests/core/geo_s2disjoint_holes.js b/jstests/core/geo_s2disjoint_holes.js
index bb6b8a4ef2c..a3988e9a614 100644
--- a/jstests/core/geo_s2disjoint_holes.js
+++ b/jstests/core/geo_s2disjoint_holes.js
@@ -8,17 +8,17 @@
// http://geojson.org/geojson-spec.html#polygon
//
-var t = db.geo_s2disjoint_holes,
- coordinates = [
- // One square.
- [[9, 9], [9, 11], [11, 11], [11, 9], [9, 9]],
- // Another disjoint square.
- [[0, 0], [0, 1], [1, 1], [1, 0], [0, 0]]
- ],
- poly = {
- type: 'Polygon',
- coordinates: coordinates
- },
+var t = db.geo_s2disjoint_holes, coordinates = [
+ // One square.
+ [[9, 9], [9, 11], [11, 11], [11, 9], [9, 9]],
+ // Another disjoint square.
+ [[0, 0], [0, 1], [1, 1], [1, 0], [0, 0]]
+],
+ poly =
+ {
+ type: 'Polygon',
+ coordinates: coordinates
+ },
multiPoly = {
type: 'MultiPolygon',
// Multi-polygon's coordinates are wrapped in one more array.
@@ -32,19 +32,13 @@ jsTest.log("We're going to print some error messages, don't be alarmed.");
//
// Can't query with a polygon or multi-polygon that has a non-contained hole.
//
-print(assert.throws(
- function() {
- t.findOne({geo: {$geoWithin: {$geometry: poly}}});
- },
- [],
- "parsing a polygon with non-overlapping holes."));
+print(assert.throws(function() {
+ t.findOne({geo: {$geoWithin: {$geometry: poly}}});
+}, [], "parsing a polygon with non-overlapping holes."));
-print(assert.throws(
- function() {
- t.findOne({geo: {$geoWithin: {$geometry: multiPoly}}});
- },
- [],
- "parsing a multi-polygon with non-overlapping holes."));
+print(assert.throws(function() {
+ t.findOne({geo: {$geoWithin: {$geometry: multiPoly}}});
+}, [], "parsing a multi-polygon with non-overlapping holes."));
//
// Can't insert a bad polygon or a bad multi-polygon with a 2dsphere index.
diff --git a/jstests/core/geo_s2dupe_points.js b/jstests/core/geo_s2dupe_points.js
index 5b9a30e61c5..63e4369d2fa 100644
--- a/jstests/core/geo_s2dupe_points.js
+++ b/jstests/core/geo_s2dupe_points.js
@@ -15,53 +15,72 @@ function testDuplicates(shapeName, shapeWithDupes, shapeWithoutDupes) {
assert.neq(shapeWithoutDupes, t.findOne({_id: shapeName}).geo);
// can query with $geoIntersects inserted doc using both the duplicated and de-duplicated docs
- assert.eq(t.find({ geo: { $geoIntersects: { $geometry : shapeWithDupes.geo } } } ).itcount(), 1);
- assert.eq(t.find({ geo: { $geoIntersects: { $geometry : shapeWithoutDupes } } } ).itcount(), 1);
+ assert.eq(t.find({geo: {$geoIntersects: {$geometry: shapeWithDupes.geo}}}).itcount(), 1);
+ assert.eq(t.find({geo: {$geoIntersects: {$geometry: shapeWithoutDupes}}}).itcount(), 1);
// direct document equality in queries is preserved
- assert.eq(t.find({ geo: shapeWithoutDupes} ).itcount(), 0);
- assert.eq(t.find({ geo: shapeWithDupes.geo } ).itcount(), 1);
+ assert.eq(t.find({geo: shapeWithoutDupes}).itcount(), 0);
+ assert.eq(t.find({geo: shapeWithDupes.geo}).itcount(), 1);
}
// LineString
-var lineWithDupes = { _id: "line", geo: { type: "LineString",
- coordinates: [ [40,5], [40,5], [ 40, 5], [41, 6], [41,6] ]
- }
+var lineWithDupes = {
+ _id: "line",
+ geo: {type: "LineString", coordinates: [[40, 5], [40, 5], [40, 5], [41, 6], [41, 6]]}
+};
+var lineWithoutDupes = {
+ type: "LineString",
+ coordinates: [[40, 5], [41, 6]]
};
-var lineWithoutDupes = { type: "LineString", coordinates: [ [40,5], [41,6] ] };
// Polygon
-var polygonWithDupes = { _id: "poly", geo: { type: "Polygon",
- coordinates: [
- [ [-3.0, -3.0], [3.0, -3.0], [3.0, 3.0], [-3.0, 3.0], [-3.0, -3.0] ],
- [ [-2.0, -2.0], [2.0, -2.0], [2.0, 2.0], [-2.0, 2.0], [-2.0, -2.0], [-2.0, -2.0] ]
- ] }
+var polygonWithDupes = {
+ _id: "poly",
+ geo: {
+ type: "Polygon",
+ coordinates: [
+ [[-3.0, -3.0], [3.0, -3.0], [3.0, 3.0], [-3.0, 3.0], [-3.0, -3.0]],
+ [[-2.0, -2.0], [2.0, -2.0], [2.0, 2.0], [-2.0, 2.0], [-2.0, -2.0], [-2.0, -2.0]]
+ ]
+ }
};
-var polygonWithoutDupes = { type: "Polygon",
+var polygonWithoutDupes = {
+ type: "Polygon",
coordinates: [
- [ [-3.0, -3.0], [3.0, -3.0], [3.0, 3.0], [-3.0, 3.0], [-3.0, -3.0] ],
- [ [-2.0, -2.0], [2.0, -2.0], [2.0, 2.0], [-2.0, 2.0], [-2.0, -2.0] ]
+ [[-3.0, -3.0], [3.0, -3.0], [3.0, 3.0], [-3.0, 3.0], [-3.0, -3.0]],
+ [[-2.0, -2.0], [2.0, -2.0], [2.0, 2.0], [-2.0, 2.0], [-2.0, -2.0]]
]
};
// MultiPolygon
-var multiPolygonWithDupes = { _id: "multi", geo: { type: "MultiPolygon", coordinates: [
- [
- [ [102.0, 2.0], [103.0, 2.0], [103.0, 2.0], [103.0, 3.0], [102.0, 3.0], [102.0, 2.0] ]
- ],
- [
- [ [100.0, 0.0], [101.0, 0.0], [101.0, 1.0], [101.0, 1.0], [100.0, 1.0], [100.0, 0.0] ],
- [ [100.2, 0.2], [100.8, 0.2], [100.8, 0.8], [100.8, 0.8], [100.8, 0.8], [100.2, 0.8], [100.2, 0.2] ]
+var multiPolygonWithDupes = {
+ _id: "multi",
+ geo: {
+ type: "MultiPolygon",
+ coordinates: [
+ [[[102.0, 2.0], [103.0, 2.0], [103.0, 2.0], [103.0, 3.0], [102.0, 3.0], [102.0, 2.0]]],
+ [
+ [[100.0, 0.0], [101.0, 0.0], [101.0, 1.0], [101.0, 1.0], [100.0, 1.0], [100.0, 0.0]],
+ [
+ [100.2, 0.2],
+ [100.8, 0.2],
+ [100.8, 0.8],
+ [100.8, 0.8],
+ [100.8, 0.8],
+ [100.2, 0.8],
+ [100.2, 0.2]
+ ]
+ ]
]
- ]
-} };
-var multiPolygonWithoutDupes = { type: "MultiPolygon", coordinates: [
- [
- [ [102.0, 2.0], [103.0, 2.0], [103.0, 3.0], [102.0, 3.0], [102.0, 2.0] ]
- ],
+ }
+};
+var multiPolygonWithoutDupes = {
+ type: "MultiPolygon",
+ coordinates: [
+ [[[102.0, 2.0], [103.0, 2.0], [103.0, 3.0], [102.0, 3.0], [102.0, 2.0]]],
[
- [ [100.0, 0.0], [101.0, 0.0], [101.0, 1.0], [100.0, 1.0], [100.0, 0.0] ],
- [ [100.2, 0.2], [100.8, 0.2], [100.8, 0.8], [100.2, 0.8], [100.2, 0.2] ]
+ [[100.0, 0.0], [101.0, 0.0], [101.0, 1.0], [100.0, 1.0], [100.0, 0.0]],
+ [[100.2, 0.2], [100.8, 0.2], [100.8, 0.8], [100.2, 0.8], [100.2, 0.2]]
]
]
};
diff --git a/jstests/core/geo_s2edgecases.js b/jstests/core/geo_s2edgecases.js
index 6cb8ff63809..3dabfdf0bcb 100755..100644
--- a/jstests/core/geo_s2edgecases.js
+++ b/jstests/core/geo_s2edgecases.js
@@ -1,40 +1,61 @@
t = db.geo_s2edgecases;
t.drop();
-roundworldpoint = { "type" : "Point", "coordinates": [ 180, 0 ] };
+roundworldpoint = {
+ "type": "Point",
+ "coordinates": [180, 0]
+};
// Opposite the equator
-roundworld = { "type" : "Polygon",
- "coordinates" : [ [ [179,1], [-179,1], [-179,-1], [179,-1], [179,1]]]};
-t.insert({geo : roundworld});
+roundworld = {
+ "type": "Polygon",
+ "coordinates": [[[179, 1], [-179, 1], [-179, -1], [179, -1], [179, 1]]]
+};
+t.insert({geo: roundworld});
-roundworld2 = { "type" : "Polygon",
- "coordinates" : [ [ [179,1], [179,-1], [-179,-1], [-179,1], [179,1]]]};
-t.insert({geo : roundworld2});
+roundworld2 = {
+ "type": "Polygon",
+ "coordinates": [[[179, 1], [179, -1], [-179, -1], [-179, 1], [179, 1]]]
+};
+t.insert({geo: roundworld2});
// North pole
-santapoint = { "type" : "Point", "coordinates": [ 180, 90 ] };
-santa = { "type" : "Polygon",
- "coordinates" : [ [ [179,89], [179,90], [-179,90], [-179,89], [179,89]]]};
-t.insert({geo : santa});
-santa2 = { "type" : "Polygon",
- "coordinates" : [ [ [179,89], [-179,89], [-179,90], [179,90], [179,89]]]};
-t.insert({geo : santa2});
+santapoint = {
+ "type": "Point",
+ "coordinates": [180, 90]
+};
+santa = {
+ "type": "Polygon",
+ "coordinates": [[[179, 89], [179, 90], [-179, 90], [-179, 89], [179, 89]]]
+};
+t.insert({geo: santa});
+santa2 = {
+ "type": "Polygon",
+ "coordinates": [[[179, 89], [-179, 89], [-179, 90], [179, 90], [179, 89]]]
+};
+t.insert({geo: santa2});
// South pole
-penguinpoint = { "type" : "Point", "coordinates": [ 0, -90 ] };
-penguin1 = { "type" : "Polygon",
- "coordinates" : [ [ [0,-89], [0,-90], [179,-90], [179,-89], [0,-89]]]};
-t.insert({geo : penguin1});
-penguin2 = { "type" : "Polygon",
- "coordinates" : [ [ [0,-89], [179,-89], [179,-90], [0,-90], [0,-89]]]};
-t.insert({geo : penguin2});
+penguinpoint = {
+ "type": "Point",
+ "coordinates": [0, -90]
+};
+penguin1 = {
+ "type": "Polygon",
+ "coordinates": [[[0, -89], [0, -90], [179, -90], [179, -89], [0, -89]]]
+};
+t.insert({geo: penguin1});
+penguin2 = {
+ "type": "Polygon",
+ "coordinates": [[[0, -89], [179, -89], [179, -90], [0, -90], [0, -89]]]
+};
+t.insert({geo: penguin2});
-t.ensureIndex( { geo : "2dsphere", nonGeo: 1 } );
+t.ensureIndex({geo: "2dsphere", nonGeo: 1});
-res = t.find({ "geo" : { "$geoIntersects" : { "$geometry" : roundworldpoint} } });
+res = t.find({"geo": {"$geoIntersects": {"$geometry": roundworldpoint}}});
assert.eq(res.count(), 2);
-res = t.find({ "geo" : { "$geoIntersects" : { "$geometry" : santapoint} } });
+res = t.find({"geo": {"$geoIntersects": {"$geometry": santapoint}}});
assert.eq(res.count(), 2);
-res = t.find({ "geo" : { "$geoIntersects" : { "$geometry" : penguinpoint} } });
+res = t.find({"geo": {"$geoIntersects": {"$geometry": penguinpoint}}});
assert.eq(res.count(), 2);
diff --git a/jstests/core/geo_s2exact.js b/jstests/core/geo_s2exact.js
index 29150d63376..3acd5b68969 100644
--- a/jstests/core/geo_s2exact.js
+++ b/jstests/core/geo_s2exact.js
@@ -10,12 +10,20 @@ function test(geometry) {
t.dropIndex({geo: "2dsphere"});
}
-pointA = { "type" : "Point", "coordinates": [ 40, 5 ] };
+pointA = {
+ "type": "Point",
+ "coordinates": [40, 5]
+};
test(pointA);
-someline = { "type" : "LineString", "coordinates": [ [ 40, 5], [41, 6]]};
+someline = {
+ "type": "LineString",
+ "coordinates": [[40, 5], [41, 6]]
+};
test(someline);
-somepoly = { "type" : "Polygon",
- "coordinates" : [ [ [40,5], [40,6], [41,6], [41,5], [40,5]]]};
+somepoly = {
+ "type": "Polygon",
+ "coordinates": [[[40, 5], [40, 6], [41, 6], [41, 5], [40, 5]]]
+};
test(somepoly);
diff --git a/jstests/core/geo_s2explain.js b/jstests/core/geo_s2explain.js
index e5035713e38..c8d32e00379 100644
--- a/jstests/core/geo_s2explain.js
+++ b/jstests/core/geo_s2explain.js
@@ -4,44 +4,48 @@
var t = db.jstests_geo_s2explain;
t.drop();
-var point1 = { loc : { type : "Point", coordinates : [10, 10] } };
-var point2 = { loc : { type : "Point", coordinates : [10.001, 10] } };
-assert.writeOK( t.insert( [ point1, point2] ) );
-
-assert.commandWorked( t.ensureIndex( { loc : "2dsphere"} ) );
-
-var explain = t.find( {
- loc: { $nearSphere : { type : "Point", coordinates : [10, 10] } }
- } ).limit(1).explain("executionStats");
+var point1 = {
+ loc: {type: "Point", coordinates: [10, 10]}
+};
+var point2 = {
+ loc: {type: "Point", coordinates: [10.001, 10]}
+};
+assert.writeOK(t.insert([point1, point2]));
+
+assert.commandWorked(t.ensureIndex({loc: "2dsphere"}));
+
+var explain = t.find({loc: {$nearSphere: {type: "Point", coordinates: [10, 10]}}})
+ .limit(1)
+ .explain("executionStats");
var inputStage = explain.executionStats.executionStages.inputStage;
-assert.eq( 1, inputStage.searchIntervals.length );
+assert.eq(1, inputStage.searchIntervals.length);
// Populates the collection with a few hundred points at varying distances
var points = [];
-for ( var i = 10; i < 70; i+=0.1 ) {
- points.push({ loc : { type : "Point", coordinates : [i, i] } });
+for (var i = 10; i < 70; i += 0.1) {
+ points.push({loc: {type: "Point", coordinates: [i, i]}});
}
-assert.writeOK( t.insert( points ) );
+assert.writeOK(t.insert(points));
-explain = t.find( {
- loc: { $nearSphere : { type : "Point", coordinates : [10, 10] } }
- } ).limit(10).explain("executionStats");
+explain = t.find({loc: {$nearSphere: {type: "Point", coordinates: [10, 10]}}})
+ .limit(10)
+ .explain("executionStats");
inputStage = explain.executionStats.executionStages.inputStage;
-assert.eq( inputStage.inputStages.length, inputStage.searchIntervals.length );
+assert.eq(inputStage.inputStages.length, inputStage.searchIntervals.length);
-explain = t.find( {
- loc: { $nearSphere : { type : "Point", coordinates : [10, 10] } }
- } ).limit(50).explain("executionStats");
+explain = t.find({loc: {$nearSphere: {type: "Point", coordinates: [10, 10]}}})
+ .limit(50)
+ .explain("executionStats");
inputStage = explain.executionStats.executionStages.inputStage;
-assert.eq( inputStage.inputStages.length, inputStage.searchIntervals.length );
+assert.eq(inputStage.inputStages.length, inputStage.searchIntervals.length);
-explain = t.find( {
- loc: { $nearSphere : { type : "Point", coordinates : [10, 10] } }
- } ).limit(200).explain("executionStats");
+explain = t.find({loc: {$nearSphere: {type: "Point", coordinates: [10, 10]}}})
+ .limit(200)
+ .explain("executionStats");
inputStage = explain.executionStats.executionStages.inputStage;
-assert.eq( inputStage.inputStages.length, inputStage.searchIntervals.length );
+assert.eq(inputStage.inputStages.length, inputStage.searchIntervals.length);
diff --git a/jstests/core/geo_s2holesameasshell.js b/jstests/core/geo_s2holesameasshell.js
index 91c05ca4979..29f00b88f7a 100644
--- a/jstests/core/geo_s2holesameasshell.js
+++ b/jstests/core/geo_s2holesameasshell.js
@@ -3,21 +3,30 @@ var t = db.geo_s2holessameasshell;
t.drop();
t.ensureIndex({geo: "2dsphere"});
-var centerPoint = {"type": "Point", "coordinates": [0.5, 0.5]};
-var edgePoint = {"type": "Point", "coordinates": [0, 0.5]};
-var cornerPoint = {"type": "Point", "coordinates": [0, 0]};
+var centerPoint = {
+ "type": "Point",
+ "coordinates": [0.5, 0.5]
+};
+var edgePoint = {
+ "type": "Point",
+ "coordinates": [0, 0.5]
+};
+var cornerPoint = {
+ "type": "Point",
+ "coordinates": [0, 0]
+};
// Various "edge" cases. None of them should be returned by the non-polygon
// polygon below.
-t.insert({geo : centerPoint});
-t.insert({geo : edgePoint});
-t.insert({geo : cornerPoint});
+t.insert({geo: centerPoint});
+t.insert({geo: edgePoint});
+t.insert({geo: cornerPoint});
// This generates an empty covering.
-var polygonWithFullHole = { "type" : "Polygon", "coordinates": [
- [[0,0], [0,1], [1, 1], [1, 0], [0, 0]],
- [[0,0], [0,1], [1, 1], [1, 0], [0, 0]]
- ]
+var polygonWithFullHole = {
+ "type": "Polygon",
+ "coordinates":
+ [[[0, 0], [0, 1], [1, 1], [1, 0], [0, 0]], [[0, 0], [0, 1], [1, 1], [1, 0], [0, 0]]]
};
// No keys for insert should error.
@@ -25,14 +34,17 @@ assert.writeError(t.insert({geo: polygonWithFullHole}));
// No covering to search over should give an empty result set.
assert.throws(function() {
- return t.find({geo: {$geoWithin: {$geometry: polygonWithFullHole}}}).count();});
+ return t.find({geo: {$geoWithin: {$geometry: polygonWithFullHole}}}).count();
+});
// Similar polygon to the one above, but is covered by two holes instead of
// one.
-var polygonWithTwoHolesCoveringWholeArea = {"type" : "Polygon", "coordinates": [
- [[0,0], [0,1], [1, 1], [1, 0], [0, 0]],
- [[0,0], [0,0.5], [1, 0.5], [1, 0], [0, 0]],
- [[0,0.5], [0,1], [1, 1], [1, 0.5], [0, 0.5]]
+var polygonWithTwoHolesCoveringWholeArea = {
+ "type": "Polygon",
+ "coordinates": [
+ [[0, 0], [0, 1], [1, 1], [1, 0], [0, 0]],
+ [[0, 0], [0, 0.5], [1, 0.5], [1, 0], [0, 0]],
+ [[0, 0.5], [0, 1], [1, 1], [1, 0.5], [0, 0.5]]
]
};
@@ -41,4 +53,5 @@ assert.writeError(t.insert({geo: polygonWithTwoHolesCoveringWholeArea}));
// No covering to search over should give an empty result set.
assert.throws(function() {
- return t.find({geo: {$geoWithin: {$geometry: polygonWithTwoHolesCoveringWholeArea}}}).count();});
+ return t.find({geo: {$geoWithin: {$geometry: polygonWithTwoHolesCoveringWholeArea}}}).count();
+});
diff --git a/jstests/core/geo_s2index.js b/jstests/core/geo_s2index.js
index 1909fb95783..cc25b4fabfe 100755..100644
--- a/jstests/core/geo_s2index.js
+++ b/jstests/core/geo_s2index.js
@@ -2,131 +2,164 @@ t = db.geo_s2index;
t.drop();
// We internally drop adjacent duplicate points in lines.
-someline = { "type" : "LineString", "coordinates": [ [40,5], [40,5], [ 40, 5], [41, 6], [41,6]]};
-t.insert( {geo : someline , nonGeo: "someline"});
+someline = {
+ "type": "LineString",
+ "coordinates": [[40, 5], [40, 5], [40, 5], [41, 6], [41, 6]]
+};
+t.insert({geo: someline, nonGeo: "someline"});
t.ensureIndex({geo: "2dsphere"});
-foo = t.find({geo: {$geoIntersects: {$geometry: {type: "Point", coordinates: [40,5]}}}}).next();
+foo = t.find({geo: {$geoIntersects: {$geometry: {type: "Point", coordinates: [40, 5]}}}}).next();
assert.eq(foo.geo, someline);
t.dropIndex({geo: "2dsphere"});
-pointA = { "type" : "Point", "coordinates": [ 40, 5 ] };
-t.insert( {geo : pointA , nonGeo: "pointA"});
-
-pointD = { "type" : "Point", "coordinates": [ 41.001, 6.001 ] };
-t.insert( {geo : pointD , nonGeo: "pointD"});
-
-pointB = { "type" : "Point", "coordinates": [ 41, 6 ] };
-t.insert( {geo : pointB , nonGeo: "pointB"});
-
-pointC = { "type" : "Point", "coordinates": [ 41, 6 ] };
-t.insert( {geo : pointC} );
+pointA = {
+ "type": "Point",
+ "coordinates": [40, 5]
+};
+t.insert({geo: pointA, nonGeo: "pointA"});
+
+pointD = {
+ "type": "Point",
+ "coordinates": [41.001, 6.001]
+};
+t.insert({geo: pointD, nonGeo: "pointD"});
+
+pointB = {
+ "type": "Point",
+ "coordinates": [41, 6]
+};
+t.insert({geo: pointB, nonGeo: "pointB"});
+
+pointC = {
+ "type": "Point",
+ "coordinates": [41, 6]
+};
+t.insert({geo: pointC});
// Add a point within the polygon but not on the border. Don't want to be on
// the path of the polyline.
-pointE = { "type" : "Point", "coordinates": [ 40.6, 5.4 ] };
-t.insert( {geo : pointE} );
+pointE = {
+ "type": "Point",
+ "coordinates": [40.6, 5.4]
+};
+t.insert({geo: pointE});
// Make sure we can index this without error.
t.insert({nonGeo: "noGeoField!"});
-somepoly = { "type" : "Polygon",
- "coordinates" : [ [ [40,5], [40,6], [41,6], [41,5], [40,5]]]};
-t.insert( {geo : somepoly, nonGeo: "somepoly" });
+somepoly = {
+ "type": "Polygon",
+ "coordinates": [[[40, 5], [40, 6], [41, 6], [41, 5], [40, 5]]]
+};
+t.insert({geo: somepoly, nonGeo: "somepoly"});
-var res = t.ensureIndex( { geo : "2dsphere", nonGeo: 1 } );
+var res = t.ensureIndex({geo: "2dsphere", nonGeo: 1});
// We have a point without any geo data. Don't error.
assert.commandWorked(res);
-res = t.find({ "geo" : { "$geoIntersects" : { "$geometry" : pointA} } });
+res = t.find({"geo": {"$geoIntersects": {"$geometry": pointA}}});
assert.eq(res.itcount(), 3);
-res = t.find({ "geo" : { "$geoIntersects" : { "$geometry" : pointB} } });
+res = t.find({"geo": {"$geoIntersects": {"$geometry": pointB}}});
assert.eq(res.itcount(), 4);
-res = t.find({ "geo" : { "$geoIntersects" : { "$geometry" : pointD} } });
+res = t.find({"geo": {"$geoIntersects": {"$geometry": pointD}}});
assert.eq(res.itcount(), 1);
-res = t.find({ "geo" : { "$geoIntersects" : { "$geometry" : someline} } });
+res = t.find({"geo": {"$geoIntersects": {"$geometry": someline}}});
assert.eq(res.itcount(), 5);
-res = t.find({ "geo" : { "$geoIntersects" : { "$geometry" : somepoly} } });
+res = t.find({"geo": {"$geoIntersects": {"$geometry": somepoly}}});
assert.eq(res.itcount(), 6);
-res = t.find({ "geo" : { "$within" : { "$geometry" : somepoly} } });
+res = t.find({"geo": {"$within": {"$geometry": somepoly}}});
assert.eq(res.itcount(), 6);
-res = t.find({ "geo" : { "$geoIntersects" : { "$geometry" : somepoly} } }).limit(1);
+res = t.find({"geo": {"$geoIntersects": {"$geometry": somepoly}}}).limit(1);
assert.eq(res.itcount(), 1);
-res = t.find({ "nonGeo": "pointA",
- "geo" : { "$geoIntersects" : { "$geometry" : somepoly} } });
+res = t.find({"nonGeo": "pointA", "geo": {"$geoIntersects": {"$geometry": somepoly}}});
assert.eq(res.itcount(), 1);
// Don't crash mongod if we give it bad input.
t.drop();
-t.ensureIndex({loc: "2dsphere", x:1});
-t.save({loc: [0,0]});
-assert.throws(function() { return t.count({loc: {$foo:[0,0]}}); });
-assert.throws(function() { return t.find({ "nonGeo": "pointA",
- "geo" : { "$geoIntersects" : { "$geometry" : somepoly},
- "$near": {"$geometry" : somepoly }}}).count();});
+t.ensureIndex({loc: "2dsphere", x: 1});
+t.save({loc: [0, 0]});
+assert.throws(function() {
+ return t.count({loc: {$foo: [0, 0]}});
+});
+assert.throws(function() {
+ return t.find({
+ "nonGeo": "pointA",
+ "geo": {"$geoIntersects": {"$geometry": somepoly}, "$near": {"$geometry": somepoly}}
+ }).count();
+});
// If we specify a datum, it has to be valid (WGS84).
t.drop();
t.ensureIndex({loc: "2dsphere"});
-res = t.insert({ loc: { type: 'Point',
- coordinates: [40, 5],
- crs: { type: 'name', properties: { name: 'EPSG:2000' }}}});
+res = t.insert({
+ loc: {
+ type: 'Point',
+ coordinates: [40, 5],
+ crs: {type: 'name', properties: {name: 'EPSG:2000'}}
+ }
+});
assert.writeError(res);
assert.eq(0, t.find().itcount());
-res = t.insert({ loc: { type: 'Point', coordinates: [40, 5] }});
+res = t.insert({loc: {type: 'Point', coordinates: [40, 5]}});
assert.writeOK(res);
-res = t.insert({ loc: { type: 'Point',
- coordinates: [40, 5],
- crs: { type: 'name', properties: {name :'EPSG:4326' }}}});
+res = t.insert({
+ loc: {
+ type: 'Point',
+ coordinates: [40, 5],
+ crs: {type: 'name', properties: {name: 'EPSG:4326'}}
+ }
+});
assert.writeOK(res);
-res = t.insert({ loc: { type:'Point',
- coordinates: [40, 5],
- crs: { type: 'name',
- properties: { name: 'urn:ogc:def:crs:OGC:1.3:CRS84'}}}});
+res = t.insert({
+ loc: {
+ type: 'Point',
+ coordinates: [40, 5],
+ crs: {type: 'name', properties: {name: 'urn:ogc:def:crs:OGC:1.3:CRS84'}}
+ }
+});
assert.writeOK(res);
// We can pass level parameters and we verify that they're valid.
// 0 <= coarsestIndexedLevel <= finestIndexedLevel <= 30.
t.drop();
-t.save({loc: [0,0]});
-res = t.ensureIndex({ loc: "2dsphere" }, { finestIndexedLevel: 17, coarsestIndexedLevel: 5 });
+t.save({loc: [0, 0]});
+res = t.ensureIndex({loc: "2dsphere"}, {finestIndexedLevel: 17, coarsestIndexedLevel: 5});
assert.commandWorked(res);
// Ensure the index actually works at a basic level
-assert.neq(null,
- t.findOne({ loc : { $geoNear : { $geometry : { type: 'Point', coordinates: [0, 0] } } } }));
+assert.neq(null, t.findOne({loc: {$geoNear: {$geometry: {type: 'Point', coordinates: [0, 0]}}}}));
t.drop();
-t.save({loc: [0,0]});
-res = t.ensureIndex({ loc: "2dsphere" }, { finestIndexedLevel: 31, coarsestIndexedLevel: 5 });
+t.save({loc: [0, 0]});
+res = t.ensureIndex({loc: "2dsphere"}, {finestIndexedLevel: 31, coarsestIndexedLevel: 5});
assert.commandFailed(res);
t.drop();
-t.save({loc: [0,0]});
-res = t.ensureIndex({ loc: "2dsphere" }, { finestIndexedLevel: 30, coarsestIndexedLevel: 0 });
+t.save({loc: [0, 0]});
+res = t.ensureIndex({loc: "2dsphere"}, {finestIndexedLevel: 30, coarsestIndexedLevel: 0});
assert.commandWorked(res);
-//Ensure the index actually works at a basic level
-assert.neq(null,
- t.findOne({ loc : { $geoNear : { $geometry : { type: 'Point', coordinates: [0, 0] } } } }));
+// Ensure the index actually works at a basic level
+assert.neq(null, t.findOne({loc: {$geoNear: {$geometry: {type: 'Point', coordinates: [0, 0]}}}}));
t.drop();
-t.save({loc: [0,0]});
-res = t.ensureIndex({ loc: "2dsphere" }, { finestIndexedLevel: 30, coarsestIndexedLevel: -1 });
+t.save({loc: [0, 0]});
+res = t.ensureIndex({loc: "2dsphere"}, {finestIndexedLevel: 30, coarsestIndexedLevel: -1});
assert.commandFailed(res);
// SERVER-21491 Verify that 2dsphere index options require correct types.
-res = t.ensureIndex({ loc: '2dsphere' }, { '2dsphereIndexVersion': 'NOT_A_NUMBER' });
+res = t.ensureIndex({loc: '2dsphere'}, {'2dsphereIndexVersion': 'NOT_A_NUMBER'});
assert.commandFailed(res);
-res = t.ensureIndex({ loc: '2dsphere' }, { finestIndexedLevel: 'NOT_A_NUMBER' });
+res = t.ensureIndex({loc: '2dsphere'}, {finestIndexedLevel: 'NOT_A_NUMBER'});
assert.commandFailedWithCode(res, ErrorCodes.TypeMismatch);
-res = t.ensureIndex({ loc: '2dsphere' }, { coarsestIndexedLevel: 'NOT_A_NUMBER' });
+res = t.ensureIndex({loc: '2dsphere'}, {coarsestIndexedLevel: 'NOT_A_NUMBER'});
assert.commandFailedWithCode(res, ErrorCodes.TypeMismatch);
// Ensure polygon which previously triggered an assertion error in SERVER-19674
@@ -134,10 +167,8 @@ assert.commandFailedWithCode(res, ErrorCodes.TypeMismatch);
t.drop();
t.insert({
loc: {
- "type" : "Polygon",
- "coordinates" : [
- [[-45, 0], [-44.875, 0], [-44.875, 0.125], [-45, 0.125], [-45,0]]
- ]
+ "type": "Polygon",
+ "coordinates": [[[-45, 0], [-44.875, 0], [-44.875, 0.125], [-45, 0.125], [-45, 0]]]
}
});
res = t.createIndex({loc: "2dsphere"});
diff --git a/jstests/core/geo_s2indexoldformat.js b/jstests/core/geo_s2indexoldformat.js
index 4ed0afba8dd..43974f695cb 100755..100644
--- a/jstests/core/geo_s2indexoldformat.js
+++ b/jstests/core/geo_s2indexoldformat.js
@@ -3,26 +3,26 @@
t = db.geo_s2indexoldformat;
t.drop();
-t.insert( {geo : [40, 5], nonGeo: ["pointA"]});
-t.insert( {geo : [41.001, 6.001], nonGeo: ["pointD"]});
-t.insert( {geo : [41, 6], nonGeo: ["pointB"]});
-t.insert( {geo : [41, 6]} );
-t.insert( {geo : {x:40.6, y:5.4}} );
+t.insert({geo: [40, 5], nonGeo: ["pointA"]});
+t.insert({geo: [41.001, 6.001], nonGeo: ["pointD"]});
+t.insert({geo: [41, 6], nonGeo: ["pointB"]});
+t.insert({geo: [41, 6]});
+t.insert({geo: {x: 40.6, y: 5.4}});
-t.ensureIndex( { geo : "2dsphere", nonGeo: 1 } );
+t.ensureIndex({geo: "2dsphere", nonGeo: 1});
-res = t.find({ "geo" : { "$geoIntersects" : { "$geometry": {x:40, y:5}}}});
+res = t.find({"geo": {"$geoIntersects": {"$geometry": {x: 40, y: 5}}}});
assert.eq(res.count(), 1);
-res = t.find({ "geo" : { "$geoIntersects" : {"$geometry": [41,6]}}});
+res = t.find({"geo": {"$geoIntersects": {"$geometry": [41, 6]}}});
assert.eq(res.count(), 2);
// We don't support legacy polygons in 2dsphere.
-assert.writeError(t.insert( {geo : [[40,5],[40,6],[41,6],[41,5]], nonGeo: ["somepoly"] }));
-assert.writeError(t.insert( {geo : {a:{x:40,y:5},b:{x:40,y:6},c:{x:41,y:6},d:{x:41,y:5}}}));
+assert.writeError(t.insert({geo: [[40, 5], [40, 6], [41, 6], [41, 5]], nonGeo: ["somepoly"]}));
+assert.writeError(
+ t.insert({geo: {a: {x: 40, y: 5}, b: {x: 40, y: 6}, c: {x: 41, y: 6}, d: {x: 41, y: 5}}}));
// Test "Can't canonicalize query: BadValue bad geo query" error.
assert.throws(function() {
- t.findOne({ "geo" : { "$geoIntersects" : {"$geometry": [[40,5],[40,6],[41,6],[41,5]]}}});
+ t.findOne({"geo": {"$geoIntersects": {"$geometry": [[40, 5], [40, 6], [41, 6], [41, 5]]}}});
});
-
diff --git a/jstests/core/geo_s2indexversion1.js b/jstests/core/geo_s2indexversion1.js
index 0899eb404cd..49aa80dbbca 100644
--- a/jstests/core/geo_s2indexversion1.js
+++ b/jstests/core/geo_s2indexversion1.js
@@ -71,7 +71,9 @@ coll.drop();
res = coll.ensureIndex({geo: "2dsphere"});
assert.commandWorked(res);
-var specObj = coll.getIndexes().filter( function(z){ return z.name == "geo_2dsphere"; } )[0];
+var specObj = coll.getIndexes().filter(function(z) {
+ return z.name == "geo_2dsphere";
+})[0];
assert.eq(3, specObj["2dsphereIndexVersion"]);
coll.drop();
@@ -104,38 +106,78 @@ coll.drop();
// Test compatibility of various GeoJSON objects with both 2dsphere index versions.
//
-var pointDoc = {geo: {type: "Point", coordinates: [40, 5]}};
-var lineStringDoc = {geo: {type: "LineString", coordinates: [[40, 5], [41, 6]]}};
-var polygonDoc = {geo: {type: "Polygon", coordinates: [[[0, 0], [3, 6], [6, 1], [0, 0]]]}};
-var multiPointDoc = {geo: {type: "MultiPoint",
- coordinates: [[-73.9580, 40.8003], [-73.9498, 40.7968],
- [-73.9737, 40.7648], [-73.9814, 40.7681]]}};
-var multiLineStringDoc = {geo: {type: "MultiLineString",
- coordinates: [[[-73.96943, 40.78519], [-73.96082, 40.78095]],
- [[-73.96415, 40.79229], [-73.95544, 40.78854]],
- [[-73.97162, 40.78205], [-73.96374, 40.77715]],
- [[-73.97880, 40.77247], [-73.97036, 40.76811]]]}};
-var multiPolygonDoc = {geo: {type: "MultiPolygon",
- coordinates: [[[[-73.958, 40.8003], [-73.9498, 40.7968],
- [-73.9737, 40.7648], [-73.9814, 40.7681],
- [-73.958, 40.8003]]],
- [[[-73.958, 40.8003], [-73.9498, 40.7968],
- [-73.9737, 40.7648], [-73.958, 40.8003]]]]}};
-var geometryCollectionDoc = {geo: {type: "GeometryCollection",
- geometries: [{type: "MultiPoint",
- coordinates: [[-73.9580, 40.8003],
- [-73.9498, 40.7968],
- [-73.9737, 40.7648],
- [-73.9814, 40.7681]]},
- {type: "MultiLineString",
- coordinates: [[[-73.96943, 40.78519],
- [-73.96082, 40.78095]],
- [[-73.96415, 40.79229],
- [-73.95544, 40.78854]],
- [[-73.97162, 40.78205],
- [-73.96374, 40.77715]],
- [[-73.97880, 40.77247],
- [-73.97036, 40.76811]]]}]}};
+var pointDoc = {
+ geo: {type: "Point", coordinates: [40, 5]}
+};
+var lineStringDoc = {
+ geo: {type: "LineString", coordinates: [[40, 5], [41, 6]]}
+};
+var polygonDoc = {
+ geo: {type: "Polygon", coordinates: [[[0, 0], [3, 6], [6, 1], [0, 0]]]}
+};
+var multiPointDoc = {
+ geo: {
+ type: "MultiPoint",
+ coordinates:
+ [[-73.9580, 40.8003], [-73.9498, 40.7968], [-73.9737, 40.7648], [-73.9814, 40.7681]]
+ }
+};
+var multiLineStringDoc = {
+ geo: {
+ type: "MultiLineString",
+ coordinates: [
+ [[-73.96943, 40.78519], [-73.96082, 40.78095]],
+ [[-73.96415, 40.79229], [-73.95544, 40.78854]],
+ [[-73.97162, 40.78205], [-73.96374, 40.77715]],
+ [[-73.97880, 40.77247], [-73.97036, 40.76811]]
+ ]
+ }
+};
+var multiPolygonDoc = {
+ geo: {
+ type: "MultiPolygon",
+ coordinates: [
+ [[
+ [-73.958, 40.8003],
+ [-73.9498, 40.7968],
+ [-73.9737, 40.7648],
+ [-73.9814, 40.7681],
+ [-73.958, 40.8003]
+ ]],
+ [[
+ [-73.958, 40.8003],
+ [-73.9498, 40.7968],
+ [-73.9737, 40.7648],
+ [-73.958, 40.8003]
+ ]]
+ ]
+ }
+};
+var geometryCollectionDoc = {
+ geo: {
+ type: "GeometryCollection",
+ geometries: [
+ {
+ type: "MultiPoint",
+ coordinates: [
+ [-73.9580, 40.8003],
+ [-73.9498, 40.7968],
+ [-73.9737, 40.7648],
+ [-73.9814, 40.7681]
+ ]
+ },
+ {
+ type: "MultiLineString",
+ coordinates: [
+ [[-73.96943, 40.78519], [-73.96082, 40.78095]],
+ [[-73.96415, 40.79229], [-73.95544, 40.78854]],
+ [[-73.97162, 40.78205], [-73.96374, 40.77715]],
+ [[-73.97880, 40.77247], [-73.97036, 40.76811]]
+ ]
+ }
+ ]
+ }
+};
// {2dsphereIndexVersion: 2} indexes allow all supported GeoJSON objects.
res = coll.ensureIndex({geo: "2dsphere"}, {"2dsphereIndexVersion": 2});
diff --git a/jstests/core/geo_s2intersection.js b/jstests/core/geo_s2intersection.js
index 287d52dfe10..bf65c02c0c2 100644
--- a/jstests/core/geo_s2intersection.js
+++ b/jstests/core/geo_s2intersection.js
@@ -1,33 +1,25 @@
var t = db.geo_s2intersectinglines;
t.drop();
-t.ensureIndex( { geo : "2dsphere" } );
+t.ensureIndex({geo: "2dsphere"});
/* All the tests in this file are generally confirming intersections based upon
* these three geo objects.
*/
var canonLine = {
name: 'canonLine',
- geo: {
- type: "LineString",
- coordinates: [[0.0, 0.0], [1.0, 0.0]]
- }
+ geo: {type: "LineString", coordinates: [[0.0, 0.0], [1.0, 0.0]]}
};
var canonPoint = {
name: 'canonPoint',
- geo: {
- type: "Point",
- coordinates: [10.0, 10.0]
- }
+ geo: {type: "Point", coordinates: [10.0, 10.0]}
};
var canonPoly = {
name: 'canonPoly',
geo: {
type: "Polygon",
- coordinates: [
- [[50.0, 50.0], [51.0, 50.0], [51.0, 51.0], [50.0, 51.0], [50.0, 50.0]]
- ]
+ coordinates: [[[50.0, 50.0], [51.0, 50.0], [51.0, 51.0], [50.0, 51.0], [50.0, 50.0]]]
}
};
@@ -35,33 +27,34 @@ t.insert(canonLine);
t.insert(canonPoint);
t.insert(canonPoly);
-
-//Case 1: Basic sanity intersection.
-var testLine = {type: "LineString",
- coordinates: [[0.5, 0.5], [0.5, -0.5]]};
+// Case 1: Basic sanity intersection.
+var testLine = {
+ type: "LineString",
+ coordinates: [[0.5, 0.5], [0.5, -0.5]]
+};
var result = t.find({geo: {$geoIntersects: {$geometry: testLine}}});
assert.eq(result.count(), 1);
assert.eq(result[0]['name'], 'canonLine');
-
-//Case 2: Basic Polygon intersection.
+// Case 2: Basic Polygon intersection.
// we expect that the canonLine should intersect with this polygon.
-var testPoly = {type: "Polygon",
- coordinates: [
- [[0.4, -0.1],[0.4, 0.1], [0.6, 0.1], [0.6, -0.1], [0.4, -0.1]]
- ]};
+var testPoly = {
+ type: "Polygon",
+ coordinates: [[[0.4, -0.1], [0.4, 0.1], [0.6, 0.1], [0.6, -0.1], [0.4, -0.1]]]
+};
result = t.find({geo: {$geoIntersects: {$geometry: testPoly}}});
assert.eq(result.count(), 1);
assert.eq(result[0]['name'], 'canonLine');
-
-//Case 3: Intersects the vertex of a line.
+// Case 3: Intersects the vertex of a line.
// When a line intersects the vertex of a line, we expect this to
// count as a geoIntersection.
-testLine = {type: "LineString",
- coordinates: [[0.0, 0.5], [0.0, -0.5]]};
+testLine = {
+ type: "LineString",
+ coordinates: [[0.0, 0.5], [0.0, -0.5]]
+};
result = t.find({geo: {$geoIntersects: {$geometry: testLine}}});
assert.eq(result.count(), 1);
@@ -70,35 +63,41 @@ assert.eq(result[0]['name'], 'canonLine');
// Case 4: Sanity no intersection.
// This line just misses the canonLine in the negative direction. This
// should not count as a geoIntersection.
-testLine = {type: "LineString",
- coordinates: [[-0.1, 0.5], [-0.1, -0.5]]};
+testLine = {
+ type: "LineString",
+ coordinates: [[-0.1, 0.5], [-0.1, -0.5]]
+};
result = t.find({geo: {$geoIntersects: {$geometry: testLine}}});
assert.eq(result.count(), 0);
-
// Case 5: Overlapping line - only partially overlaps.
// Undefined behaviour: does intersect
-testLine = {type: "LineString",
- coordinates: [[-0.5, 0.0], [0.5, 0.0]]};
+testLine = {
+ type: "LineString",
+ coordinates: [[-0.5, 0.0], [0.5, 0.0]]
+};
var result = t.find({geo: {$geoIntersects: {$geometry: testLine}}});
assert.eq(result.count(), 1);
assert.eq(result[0]['name'], 'canonLine');
-
// Case 6: Contained line - this line is fully contained by the canonLine
// Undefined behaviour: doesn't intersect.
-testLine = {type: "LineString",
- coordinates: [[0.1, 0.0], [0.9, 0.0]]};
+testLine = {
+ type: "LineString",
+ coordinates: [[0.1, 0.0], [0.9, 0.0]]
+};
result = t.find({geo: {$geoIntersects: {$geometry: testLine}}});
assert.eq(result.count(), 0);
// Case 7: Identical line in the identical position.
// Undefined behaviour: does intersect.
-testLine = {type: "LineString",
- coordinates: [[0.0, 0.0], [1.0, 0.0]]};
+testLine = {
+ type: "LineString",
+ coordinates: [[0.0, 0.0], [1.0, 0.0]]
+};
result = t.find({geo: {$geoIntersects: {$geometry: testLine}}});
assert.eq(result.count(), 1);
@@ -106,8 +105,10 @@ assert.eq(result[0]['name'], 'canonLine');
// Case 8: Point intersection - we search with a line that intersects
// with the canonPoint.
-testLine = {type: "LineString",
- coordinates: [[10.0, 11.0], [10.0, 9.0]]};
+testLine = {
+ type: "LineString",
+ coordinates: [[10.0, 11.0], [10.0, 9.0]]
+};
result = t.find({geo: {$geoIntersects: {$geometry: testLine}}});
assert.eq(result.count(), 1);
@@ -116,25 +117,30 @@ assert.eq(result[0]['name'], 'canonPoint');
// Case 9: Point point intersection
// as above but with an identical point to the canonPoint. We expect an
// intersection here.
-testPoint = {type: "Point",
- coordinates: [10.0, 10.0]};
+testPoint = {
+ type: "Point",
+ coordinates: [10.0, 10.0]
+};
result = t.find({geo: {$geoIntersects: {$geometry: testPoint}}});
assert.eq(result.count(), 1);
assert.eq(result[0]['name'], 'canonPoint');
-
-//Case 10: Sanity point non-intersection.
-var testPoint = {type: "Point",
- coordinates: [12.0, 12.0]};
+// Case 10: Sanity point non-intersection.
+var testPoint = {
+ type: "Point",
+ coordinates: [12.0, 12.0]
+};
result = t.find({geo: {$geoIntersects: {$geometry: testPoint}}});
assert.eq(result.count(), 0);
// Case 11: Point polygon intersection
// verify that a point inside a polygon $geoIntersects.
-testPoint = {type: "Point",
- coordinates: [50.5, 50.5]};
+testPoint = {
+ type: "Point",
+ coordinates: [50.5, 50.5]
+};
result = t.find({geo: {$geoIntersects: {$geometry: testPoint}}});
assert.eq(result.count(), 1);
@@ -144,10 +150,14 @@ assert.eq(result[0]['name'], 'canonPoly');
// $geoIntersects predicates.
t.drop();
t.ensureIndex({a: "2dsphere"});
-t.insert({a: {type: "Polygon", coordinates: [[[0,0], [3,6], [6,0], [0,0]]]}});
+t.insert({a: {type: "Polygon", coordinates: [[[0, 0], [3, 6], [6, 0], [0, 0]]]}});
-var firstPoint = {$geometry: {type: "Point", coordinates: [3.0, 1.0]}};
-var secondPoint = {$geometry: {type: "Point", coordinates: [4.0, 1.0]}};
+var firstPoint = {
+ $geometry: {type: "Point", coordinates: [3.0, 1.0]}
+};
+var secondPoint = {
+ $geometry: {type: "Point", coordinates: [4.0, 1.0]}
+};
// First point should intersect with the polygon.
result = t.find({a: {$geoIntersects: firstPoint}});
@@ -159,6 +169,5 @@ assert.eq(result.count(), 1);
// Both points intersect with the polygon, so the $and of
// two $geoIntersects should as well.
-result = t.find({$and: [{a: {$geoIntersects: firstPoint}},
- {a: {$geoIntersects: secondPoint}}]});
+result = t.find({$and: [{a: {$geoIntersects: firstPoint}}, {a: {$geoIntersects: secondPoint}}]});
assert.eq(result.count(), 1);
diff --git a/jstests/core/geo_s2largewithin.js b/jstests/core/geo_s2largewithin.js
index bd4ccafdae1..2bb0fb557b2 100644
--- a/jstests/core/geo_s2largewithin.js
+++ b/jstests/core/geo_s2largewithin.js
@@ -2,42 +2,33 @@
// doesn't take forever.
t = db.geo_s2largewithin;
t.drop();
-t.ensureIndex( { geo : "2dsphere" } );
+t.ensureIndex({geo: "2dsphere"});
testPoint = {
name: "origin",
- geo: {
- type: "Point",
- coordinates: [0.0, 0.0]
- }
+ geo: {type: "Point", coordinates: [0.0, 0.0]}
};
testHorizLine = {
name: "horiz",
- geo: {
- type: "LineString",
- coordinates: [[-2.0, 10.0], [2.0, 10.0]]
- }
+ geo: {type: "LineString", coordinates: [[-2.0, 10.0], [2.0, 10.0]]}
};
testVertLine = {
name: "vert",
- geo: {
- type: "LineString",
- coordinates: [[10.0, -2.0], [10.0, 2.0]]
- }
+ geo: {type: "LineString", coordinates: [[10.0, -2.0], [10.0, 2.0]]}
};
t.insert(testPoint);
t.insert(testHorizLine);
t.insert(testVertLine);
-//Test a poly that runs horizontally along the equator.
+// Test a poly that runs horizontally along the equator.
-longPoly = {type: "Polygon",
- coordinates: [
- [[30.0, 1.0], [-30.0, 1.0], [-30.0, -1.0], [30.0, -1.0], [30.0, 1.0]]
- ]};
+longPoly = {
+ type: "Polygon",
+ coordinates: [[[30.0, 1.0], [-30.0, 1.0], [-30.0, -1.0], [30.0, -1.0], [30.0, 1.0]]]
+};
result = t.find({geo: {$geoWithin: {$geometry: longPoly}}});
assert.eq(result.itcount(), 1);
diff --git a/jstests/core/geo_s2meridian.js b/jstests/core/geo_s2meridian.js
index feb1dbefed5..583b426845c 100644
--- a/jstests/core/geo_s2meridian.js
+++ b/jstests/core/geo_s2meridian.js
@@ -9,22 +9,14 @@ t.ensureIndex({geo: "2dsphere"});
*/
meridianCrossingLine = {
- geo: {
- type: "LineString",
- coordinates: [
- [-178.0, 10.0],
- [178.0, 10.0]]
- }
+ geo: {type: "LineString", coordinates: [[-178.0, 10.0], [178.0, 10.0]]}
};
assert.writeOK(t.insert(meridianCrossingLine));
lineAlongMeridian = {
- type: "LineString",
- coordinates: [
- [180.0, 11.0],
- [180.0, 9.0]
- ]
+ type: "LineString",
+ coordinates: [[180.0, 11.0], [180.0, 9.0]]
};
result = t.find({geo: {$geoIntersects: {$geometry: lineAlongMeridian}}});
@@ -34,26 +26,17 @@ t.drop();
t.ensureIndex({geo: "2dsphere"});
/*
* Test 2: check that within work across the meridian. We insert points
- * on the meridian, and immediately on either side, and confirm that a poly
+ * on the meridian, and immediately on either side, and confirm that a poly
* covering all of them returns them all.
*/
pointOnNegativeSideOfMeridian = {
- geo: {
- type: "Point",
- coordinates: [-179.0, 1.0]
- }
+ geo: {type: "Point", coordinates: [-179.0, 1.0]}
};
pointOnMeridian = {
- geo: {
- type: "Point",
- coordinates: [180.0, 1.0]
- }
+ geo: {type: "Point", coordinates: [180.0, 1.0]}
};
pointOnPositiveSideOfMeridian = {
- geo: {
- type: "Point",
- coordinates: [179.0, 1.0]
- }
+ geo: {type: "Point", coordinates: [179.0, 1.0]}
};
t.insert(pointOnMeridian);
@@ -62,9 +45,8 @@ t.insert(pointOnPositiveSideOfMeridian);
meridianCrossingPoly = {
type: "Polygon",
- coordinates: [
- [[-178.0, 10.0], [178.0, 10.0], [178.0, -10.0], [-178.0, -10.0], [-178.0, 10.0]]
- ]
+ coordinates:
+ [[[-178.0, 10.0], [178.0, 10.0], [178.0, -10.0], [-178.0, -10.0], [-178.0, 10.0]]]
};
result = t.find({geo: {$geoWithin: {$geometry: meridianCrossingPoly}}});
@@ -79,18 +61,12 @@ t.ensureIndex({geo: "2dsphere"});
*/
pointOnNegativeSideOfMerid = {
name: "closer",
- geo: {
- type: "Point",
- coordinates: [-179.0, 0.0]
- }
+ geo: {type: "Point", coordinates: [-179.0, 0.0]}
};
pointOnPositiveSideOfMerid = {
name: "farther",
- geo: {
- type: "Point",
- coordinates: [176.0, 0.0]
- }
+ geo: {type: "Point", coordinates: [176.0, 0.0]}
};
t.insert(pointOnNegativeSideOfMerid);
diff --git a/jstests/core/geo_s2multi.js b/jstests/core/geo_s2multi.js
index 8d86f8ad08c..2cd6a3d73d7 100644
--- a/jstests/core/geo_s2multi.js
+++ b/jstests/core/geo_s2multi.js
@@ -4,38 +4,70 @@ t.drop();
t.ensureIndex({geo: "2dsphere"});
// Let's try the examples in the GeoJSON spec.
-multiPointA = { "type": "MultiPoint", "coordinates": [ [100.0, 0.0], [101.0, 1.0] ] };
+multiPointA = {
+ "type": "MultiPoint",
+ "coordinates": [[100.0, 0.0], [101.0, 1.0]]
+};
assert.writeOK(t.insert({geo: multiPointA}));
-multiLineStringA = { "type": "MultiLineString", "coordinates": [ [ [100.0, 0.0], [101.0, 1.0] ],
- [ [102.0, 2.0], [103.0, 3.0] ]]};
+multiLineStringA = {
+ "type": "MultiLineString",
+ "coordinates": [[[100.0, 0.0], [101.0, 1.0]], [[102.0, 2.0], [103.0, 3.0]]]
+};
assert.writeOK(t.insert({geo: multiLineStringA}));
-multiPolygonA = { "type": "MultiPolygon", "coordinates": [
- [[[102.0, 2.0], [103.0, 2.0], [103.0, 3.0], [102.0, 3.0], [102.0, 2.0]]],
- [[[100.0, 0.0], [101.0, 0.0], [101.0, 1.0], [100.0, 1.0], [100.0, 0.0]],
- [[100.2, 0.2], [100.8, 0.2], [100.8, 0.8], [100.2, 0.8], [100.2, 0.2]]]]};
+multiPolygonA = {
+ "type": "MultiPolygon",
+ "coordinates": [
+ [[[102.0, 2.0], [103.0, 2.0], [103.0, 3.0], [102.0, 3.0], [102.0, 2.0]]],
+ [
+ [[100.0, 0.0], [101.0, 0.0], [101.0, 1.0], [100.0, 1.0], [100.0, 0.0]],
+ [[100.2, 0.2], [100.8, 0.2], [100.8, 0.8], [100.2, 0.8], [100.2, 0.2]]
+ ]
+ ]
+};
assert.writeOK(t.insert({geo: multiPolygonA}));
-assert.eq(3, t.find({geo: {$geoIntersects: {$geometry:
- {"type": "Point", "coordinates": [100,0]}}}}).itcount());
-assert.eq(3, t.find({geo: {$geoIntersects: {$geometry:
- {"type": "Point", "coordinates": [101.0,1.0]}}}}).itcount());
+assert.eq(3,
+ t.find({geo: {$geoIntersects: {$geometry: {"type": "Point", "coordinates": [100, 0]}}}})
+ .itcount());
+assert.eq(3,
+ t.find({
+ geo: {$geoIntersects: {$geometry: {"type": "Point", "coordinates": [101.0, 1.0]}}}
+ }).itcount());
// Inside the hole in multiPolygonA
-assert.eq(0, t.find({geo: {$geoIntersects: {$geometry:
- {"type": "Point", "coordinates": [100.21,0.21]}}}}).itcount());
+assert.eq(
+ 0,
+ t.find({geo: {$geoIntersects: {$geometry: {"type": "Point", "coordinates": [100.21, 0.21]}}}})
+ .itcount());
// One point inside the hole, one out.
-assert.eq(3, t.find({geo: {$geoIntersects: {$geometry:
- {"type": "MultiPoint", "coordinates": [[100,0],[100.21,0.21]]}}}}).itcount());
-assert.eq(3, t.find({geo: {$geoIntersects: {$geometry:
- {"type": "MultiPoint", "coordinates": [[100,0],[100.21,0.21],[101,1]]}}}}).itcount());
+assert.eq(
+ 3,
+ t.find({
+ geo: {
+ $geoIntersects:
+ {$geometry: {"type": "MultiPoint", "coordinates": [[100, 0], [100.21, 0.21]]}}
+ }
+ }).itcount());
+assert.eq(
+ 3,
+ t.find({
+ geo: {
+ $geoIntersects: {
+ $geometry:
+ {"type": "MultiPoint", "coordinates": [[100, 0], [100.21, 0.21], [101, 1]]}
+ }
+ }
+ }).itcount());
// Polygon contains itself and the multipoint.
assert.eq(2, t.find({geo: {$geoWithin: {$geometry: multiPolygonA}}}).itcount());
-partialPolygonA = { "type": "Polygon", "coordinates":
- [ [[102.0, 2.0], [103.0, 2.0], [103.0, 3.0], [102.0, 3.0], [102.0, 2.0]] ] };
+partialPolygonA = {
+ "type": "Polygon",
+ "coordinates": [[[102.0, 2.0], [103.0, 2.0], [103.0, 3.0], [102.0, 3.0], [102.0, 2.0]]]
+};
assert.writeOK(t.insert({geo: partialPolygonA}));
// Polygon contains itself, the partial poly, and the multipoint
assert.eq(3, t.find({geo: {$geoWithin: {$geometry: multiPolygonA}}}).itcount());
diff --git a/jstests/core/geo_s2near.js b/jstests/core/geo_s2near.js
index 5d0da52b6ec..08bf5ab9081 100644
--- a/jstests/core/geo_s2near.js
+++ b/jstests/core/geo_s2near.js
@@ -3,10 +3,14 @@ t = db.geo_s2near;
t.drop();
// Make sure that geoNear gives us back loc
-goldenPoint = {type: "Point", coordinates: [ 31.0, 41.0]};
+goldenPoint = {
+ type: "Point",
+ coordinates: [31.0, 41.0]
+};
t.insert({geo: goldenPoint});
-t.ensureIndex({ geo : "2dsphere" });
-resNear = db.runCommand({geoNear : t.getName(), near: [30, 40], num: 1, spherical: true, includeLocs: true});
+t.ensureIndex({geo: "2dsphere"});
+resNear = db.runCommand(
+ {geoNear: t.getName(), near: [30, 40], num: 1, spherical: true, includeLocs: true});
assert.eq(resNear.results[0].loc, goldenPoint);
// FYI:
@@ -17,60 +21,83 @@ lng = 0;
points = 10;
for (var x = -points; x < points; x += 1) {
for (var y = -points; y < points; y += 1) {
- t.insert({geo : { "type" : "Point", "coordinates" : [lng + x/1000.0, lat + y/1000.0]}});
+ t.insert({geo: {"type": "Point", "coordinates": [lng + x / 1000.0, lat + y / 1000.0]}});
}
}
-origin = { "type" : "Point", "coordinates": [ lng, lat ] };
+origin = {
+ "type": "Point",
+ "coordinates": [lng, lat]
+};
-t.ensureIndex({ geo : "2dsphere" });
+t.ensureIndex({geo: "2dsphere"});
// Near only works when the query is a point.
-someline = { "type" : "LineString", "coordinates": [ [ 40, 5], [41, 6]]};
-somepoly = { "type" : "Polygon",
- "coordinates" : [ [ [40,5], [40,6], [41,6], [41,5], [40,5]]]};
-assert.throws(function() { return t.find({ "geo" : { "$near" : { "$geometry" : someline } } }).count();});
-assert.throws(function() { return t.find({ "geo" : { "$near" : { "$geometry" : somepoly } } }).count();});
-assert.throws(function() { return db.runCommand({geoNear : t.getName(), near: someline, spherical:true }).results.length;});
-assert.throws(function() { return db.runCommand({geoNear : t.getName(), near: somepoly, spherical:true }).results.length;});
+someline = {
+ "type": "LineString",
+ "coordinates": [[40, 5], [41, 6]]
+};
+somepoly = {
+ "type": "Polygon",
+ "coordinates": [[[40, 5], [40, 6], [41, 6], [41, 5], [40, 5]]]
+};
+assert.throws(function() {
+ return t.find({"geo": {"$near": {"$geometry": someline}}}).count();
+});
+assert.throws(function() {
+ return t.find({"geo": {"$near": {"$geometry": somepoly}}}).count();
+});
+assert.throws(function() {
+ return db.runCommand({geoNear: t.getName(), near: someline, spherical: true}).results.length;
+});
+assert.throws(function() {
+ return db.runCommand({geoNear: t.getName(), near: somepoly, spherical: true}).results.length;
+});
// Do some basic near searches.
-res = t.find({ "geo" : { "$near" : { "$geometry" : origin, $maxDistance: 2000} } }).limit(10);
-resNear = db.runCommand({geoNear : t.getName(), near: [0,0], num: 10, maxDistance: Math.PI, spherical: true});
+res = t.find({"geo": {"$near": {"$geometry": origin, $maxDistance: 2000}}}).limit(10);
+resNear = db.runCommand(
+ {geoNear: t.getName(), near: [0, 0], num: 10, maxDistance: Math.PI, spherical: true});
assert.eq(res.itcount(), resNear.results.length, 10);
-res = t.find({ "geo" : { "$near" : { "$geometry" : origin } } }).limit(10);
-resNear = db.runCommand({geoNear : t.getName(), near: [0,0], num: 10, spherical: true});
+res = t.find({"geo": {"$near": {"$geometry": origin}}}).limit(10);
+resNear = db.runCommand({geoNear: t.getName(), near: [0, 0], num: 10, spherical: true});
assert.eq(res.itcount(), resNear.results.length, 10);
// Find all the points!
-res = t.find({ "geo" : { "$near" : { "$geometry" : origin } } }).limit(10000);
-resNear = db.runCommand({geoNear : t.getName(), near: [0,0], num: 10000, spherical: true});
+res = t.find({"geo": {"$near": {"$geometry": origin}}}).limit(10000);
+resNear = db.runCommand({geoNear: t.getName(), near: [0, 0], num: 10000, spherical: true});
assert.eq(resNear.results.length, res.itcount(), (2 * points) * (2 * points));
// longitude goes -180 to 180
// latitude goes -90 to 90
// Let's put in some perverse (polar) data and make sure we get it back.
// Points go long, lat.
-t.insert({geo: { "type" : "Point", "coordinates" : [-180, -90]}});
-t.insert({geo: { "type" : "Point", "coordinates" : [180, -90]}});
-t.insert({geo: { "type" : "Point", "coordinates" : [180, 90]}});
-t.insert({geo: { "type" : "Point", "coordinates" : [-180, 90]}});
-res = t.find({ "geo" : { "$near" : { "$geometry" : origin } } }).limit(10000);
-resNear = db.runCommand({geoNear : t.getName(), near: [0,0], num: 10000, spherical: true});
+t.insert({geo: {"type": "Point", "coordinates": [-180, -90]}});
+t.insert({geo: {"type": "Point", "coordinates": [180, -90]}});
+t.insert({geo: {"type": "Point", "coordinates": [180, 90]}});
+t.insert({geo: {"type": "Point", "coordinates": [-180, 90]}});
+res = t.find({"geo": {"$near": {"$geometry": origin}}}).limit(10000);
+resNear = db.runCommand({geoNear: t.getName(), near: [0, 0], num: 10000, spherical: true});
assert.eq(res.itcount(), resNear.results.length, (2 * points) * (2 * points) + 4);
function testRadAndDegreesOK(distance) {
// Distance for old style points is radians.
- resRadians = t.find({geo: {$nearSphere: [0,0], $maxDistance: (distance / (6378.1 * 1000))}});
+ resRadians = t.find({geo: {$nearSphere: [0, 0], $maxDistance: (distance / (6378.1 * 1000))}});
// Distance for new style points is meters.
- resMeters = t.find({ "geo" : { "$near" : { "$geometry" : origin, $maxDistance: distance} } });
+ resMeters = t.find({"geo": {"$near": {"$geometry": origin, $maxDistance: distance}}});
// And we should get the same # of results no matter what.
assert.eq(resRadians.itcount(), resMeters.itcount());
// Also, geoNear should behave the same way.
- resGNMeters = db.runCommand({geoNear : t.getName(), near: origin, maxDistance: distance, spherical: true});
- resGNRadians = db.runCommand({geoNear : t.getName(), near: [0,0], maxDistance: (distance / (6378.1 * 1000)), spherical: true});
+ resGNMeters = db.runCommand(
+ {geoNear: t.getName(), near: origin, maxDistance: distance, spherical: true});
+ resGNRadians = db.runCommand({
+ geoNear: t.getName(),
+ near: [0, 0],
+ maxDistance: (distance / (6378.1 * 1000)),
+ spherical: true
+ });
assert.eq(resGNRadians.results.length, resGNMeters.results.length);
for (var i = 0; i < resGNRadians.length; ++i) {
// Radius of earth * radians = distance in meters.
@@ -84,4 +111,5 @@ testRadAndDegreesOK(50);
testRadAndDegreesOK(10000);
// SERVER-13666 legacy coordinates must be in bounds for spherical near queries.
-assert.commandFailed(db.runCommand({geoNear : t.getName(), near: [1210.466, 31.2051], spherical: true, num: 10}));
+assert.commandFailed(
+ db.runCommand({geoNear: t.getName(), near: [1210.466, 31.2051], spherical: true, num: 10}));
diff --git a/jstests/core/geo_s2nearComplex.js b/jstests/core/geo_s2nearComplex.js
index ecb5e646a54..0584c5e694a 100644
--- a/jstests/core/geo_s2nearComplex.js
+++ b/jstests/core/geo_s2nearComplex.js
@@ -11,25 +11,26 @@ var sin = Math.sin;
var cos = Math.cos;
var atan2 = Math.atan2;
-
-var originGeo = {type: "Point", coordinates: [20.0, 20.0]};
+var originGeo = {
+ type: "Point",
+ coordinates: [20.0, 20.0]
+};
// Center point for all tests.
var origin = {
name: "origin",
- geo: originGeo
+ geo: originGeo
};
-
/*
* Convenience function for checking that coordinates match. threshold let's you
* specify how accurate equals should be.
*/
-function coordinateEqual(first, second, threshold){
+function coordinateEqual(first, second, threshold) {
threshold = threshold || 0.001;
first = first['geo']['coordinates'];
second = second['geo']['coordinates'];
- if(Math.abs(first[0] - second[0]) <= threshold){
- if(Math.abs(first[1] - second[1]) <= threshold){
+ if (Math.abs(first[0] - second[0]) <= threshold) {
+ if (Math.abs(first[1] - second[1]) <= threshold) {
return true;
}
}
@@ -43,44 +44,49 @@ function coordinateEqual(first, second, threshold){
* be returned.
* based on this algorithm: http://williams.best.vwh.net/avform.htm#LL
*/
-function uniformPoints(origin, count, minDist, maxDist){
+function uniformPoints(origin, count, minDist, maxDist) {
var i;
var lng = origin['geo']['coordinates'][0];
var lat = origin['geo']['coordinates'][1];
var distances = [];
var points = [];
- for(i=0; i < count; i++){
+ for (i = 0; i < count; i++) {
distances.push((random() * (maxDist - minDist)) + minDist);
}
distances.sort();
- while(points.length < count){
+ while (points.length < count) {
var angle = random() * 2 * PI;
var distance = distances[points.length];
var pointLat = asin((sin(lat) * cos(distance)) + (cos(lat) * sin(distance) * cos(angle)));
- var pointDLng = atan2(sin(angle) * sin(distance) * cos(lat), cos(distance) - sin(lat) * sin(pointLat));
- var pointLng = ((lng - pointDLng + PI) % 2*PI) - PI;
+ var pointDLng =
+ atan2(sin(angle) * sin(distance) * cos(lat), cos(distance) - sin(lat) * sin(pointLat));
+ var pointLng = ((lng - pointDLng + PI) % 2 * PI) - PI;
// Latitude must be [-90, 90]
var newLat = lat + pointLat;
- if (newLat > 90) newLat -= 180;
- if (newLat < -90) newLat += 180;
+ if (newLat > 90)
+ newLat -= 180;
+ if (newLat < -90)
+ newLat += 180;
// Longitude must be [-180, 180]
var newLng = lng + pointLng;
- if (newLng > 180) newLng -= 360;
- if (newLng < -180) newLng += 360;
+ if (newLng > 180)
+ newLng -= 360;
+ if (newLng < -180)
+ newLng += 360;
var newPoint = {
geo: {
type: "Point",
- //coordinates: [lng + pointLng, lat + pointLat]
+ // coordinates: [lng + pointLng, lat + pointLat]
coordinates: [newLng, newLat]
}
};
points.push(newPoint);
}
- for(i=0; i < points.length; i++){
+ for (i = 0; i < points.length; i++) {
t.insert(points[i]);
}
return points;
@@ -88,12 +94,12 @@ function uniformPoints(origin, count, minDist, maxDist){
/*
* Creates a random uniform field as above, excepting for `numberOfHoles` gaps that
- * have `sizeOfHoles` points missing centered around a random point.
+ * have `sizeOfHoles` points missing centered around a random point.
*/
-function uniformPointsWithGaps(origin, count, minDist, maxDist, numberOfHoles, sizeOfHoles){
+function uniformPointsWithGaps(origin, count, minDist, maxDist, numberOfHoles, sizeOfHoles) {
var points = uniformPoints(origin, count, minDist, maxDist);
var i;
- for(i=0; i<numberOfHoles; i++){
+ for (i = 0; i < numberOfHoles; i++) {
var randomPoint = points[Math.floor(random() * points.length)];
removeNearest(randomPoint, sizeOfHoles);
}
@@ -105,10 +111,11 @@ function uniformPointsWithGaps(origin, count, minDist, maxDist, numberOfHoles, s
* you may specify an optional `distRatio` parameter which will specify the area that the cluster
* covers as a fraction of the full area that points are created on. Defaults to 10.
*/
-function uniformPointsWithClusters(origin, count, minDist, maxDist, numberOfClusters, minClusterSize, maxClusterSize, distRatio){
+function uniformPointsWithClusters(
+ origin, count, minDist, maxDist, numberOfClusters, minClusterSize, maxClusterSize, distRatio) {
distRatio = distRatio || 10;
var points = uniformPoints(origin, count, minDist, maxDist);
- for(j=0; j<numberOfClusters; j++){
+ for (j = 0; j < numberOfClusters; j++) {
var randomPoint = points[Math.floor(random() * points.length)];
var clusterSize = (random() * (maxClusterSize - minClusterSize)) + minClusterSize;
uniformPoints(randomPoint, clusterSize, minDist / distRatio, maxDist / distRatio);
@@ -118,10 +125,10 @@ function uniformPointsWithClusters(origin, count, minDist, maxDist, numberOfClus
* Function used to create gaps in existing point field. Will remove the `number` nearest
* geo objects to the specified `point`.
*/
-function removeNearest(point, number){
+function removeNearest(point, number) {
var pointsToRemove = t.find({geo: {$geoNear: {$geometry: point['geo']}}}).limit(number);
var idsToRemove = [];
- while(pointsToRemove.hasNext()){
+ while (pointsToRemove.hasNext()) {
point = pointsToRemove.next();
idsToRemove.push(point['_id']);
}
@@ -129,34 +136,36 @@ function removeNearest(point, number){
t.remove({_id: {$in: idsToRemove}});
}
/*
- * Validates the ordering of the nearest results is the same no matter how many
+ * Validates the ordering of the nearest results is the same no matter how many
* geo objects are requested. This could fail if two points have the same dist
* from origin, because they may not be well-ordered. If we see strange failures,
* we should consider that.
*/
-function validateOrdering(query){
+function validateOrdering(query) {
var near10 = t.find(query).limit(10);
var near20 = t.find(query).limit(20);
var near30 = t.find(query).limit(30);
var near40 = t.find(query).limit(40);
- for(i=0;i<10;i++){
+ for (i = 0; i < 10; i++) {
assert(coordinateEqual(near10[i], near20[i]));
assert(coordinateEqual(near10[i], near30[i]));
assert(coordinateEqual(near10[i], near40[i]));
}
- for(i=0;i<20;i++){
+ for (i = 0; i < 20; i++) {
assert(coordinateEqual(near20[i], near30[i]));
assert(coordinateEqual(near20[i], near40[i]));
}
- for(i=0;i<30;i++){
+ for (i = 0; i < 30; i++) {
assert(coordinateEqual(near30[i], near40[i]));
}
}
-var query = {geo: {$geoNear: {$geometry: originGeo}}};
+var query = {
+ geo: {$geoNear: {$geometry: originGeo}}
+};
// Test a uniform distribution of 1000 points.
uniformPoints(origin, 1000, 0.5, 1.5);
@@ -199,10 +208,13 @@ t.ensureIndex({geo: "2dsphere"});
// Test a uniform near search with origin around the pole.
// Center point near pole.
-originGeo = {type: "Point", coordinates: [0.0, 89.0]};
+originGeo = {
+ type: "Point",
+ coordinates: [0.0, 89.0]
+};
origin = {
name: "origin",
- geo: originGeo
+ geo: originGeo
};
uniformPoints(origin, 50, 0.5, 1.5);
@@ -210,17 +222,21 @@ validateOrdering({geo: {$geoNear: {$geometry: originGeo}}});
print("Millis for uniform near pole:");
print(t.find({geo: {$geoNear: {$geometry: originGeo}}})
- .explain("executionStats").executionStats.executionTimeMillis);
+ .explain("executionStats")
+ .executionStats.executionTimeMillis);
assert.eq(t.find({geo: {$geoNear: {$geometry: originGeo}}}).itcount(), 50);
t.drop();
t.ensureIndex({geo: "2dsphere"});
// Center point near the meridian
-originGeo = {type: "Point", coordinates: [179.0, 0.0]};
+originGeo = {
+ type: "Point",
+ coordinates: [179.0, 0.0]
+};
origin = {
name: "origin",
- geo: originGeo
+ geo: originGeo
};
uniformPoints(origin, 50, 0.5, 1.5);
@@ -228,17 +244,21 @@ validateOrdering({geo: {$geoNear: {$geometry: originGeo}}});
print("Millis for uniform on meridian:");
print(t.find({geo: {$geoNear: {$geometry: originGeo}}})
- .explain("executionStats").executionStats.executionTimeMillis);
+ .explain("executionStats")
+ .executionStats.executionTimeMillis);
assert.eq(t.find({geo: {$geoNear: {$geometry: originGeo}}}).itcount(), 50);
t.drop();
t.ensureIndex({geo: "2dsphere"});
// Center point near the negative meridian
-originGeo = {type: "Point", coordinates: [-179.0, 0.0]};
+originGeo = {
+ type: "Point",
+ coordinates: [-179.0, 0.0]
+};
origin = {
name: "origin",
- geo: originGeo
+ geo: originGeo
};
uniformPoints(origin, 50, 0.5, 1.5);
@@ -246,13 +266,17 @@ validateOrdering({geo: {$near: {$geometry: originGeo}}});
print("Millis for uniform on negative meridian:");
print(t.find({geo: {$geoNear: {$geometry: originGeo}}})
- .explain("executionStats").executionStats.executionTimeMillis);
+ .explain("executionStats")
+ .executionStats.executionTimeMillis);
assert.eq(t.find({geo: {$near: {$geometry: originGeo}}}).itcount(), 50);
// Near search with points that are really far away.
t.drop();
t.ensureIndex({geo: "2dsphere"});
-originGeo = {type: "Point", coordinates: [0.0, 0.0]};
+originGeo = {
+ type: "Point",
+ coordinates: [0.0, 0.0]
+};
origin = {
name: "origin",
geo: originGeo
@@ -267,6 +291,7 @@ cur = t.find({geo: {$near: {$geometry: originGeo}}});
print("Near search on very distant points:");
print(t.find({geo: {$geoNear: {$geometry: originGeo}}})
- .explain("executionStats").executionStats.executionTimeMillis);
+ .explain("executionStats")
+ .executionStats.executionTimeMillis);
pt = cur.next();
assert(pt);
diff --git a/jstests/core/geo_s2near_equator_opposite.js b/jstests/core/geo_s2near_equator_opposite.js
index 8ee5d486d5e..13bbc776daa 100644
--- a/jstests/core/geo_s2near_equator_opposite.js
+++ b/jstests/core/geo_s2near_equator_opposite.js
@@ -14,12 +14,14 @@ t.ensureIndex({loc: '2dsphere'});
// upper bound for half of earth's circumference in meters
var dist = 40075000 / 2 + 1;
-var nearSphereCount = t.find({loc: {$nearSphere:
- {$geometry: {type: 'Point', coordinates: [180, 0]}, $maxDistance: dist}}}).itcount();
-var nearCount = t.find({loc: {$near:
- {$geometry: {type: 'Point', coordinates: [180, 0]}, $maxDistance: dist}}}).itcount();
-var geoNearResult = db.runCommand({geoNear: t.getName(), near:
- {type: 'Point', coordinates: [180, 0]}, spherical: true});
+var nearSphereCount = t.find({
+ loc: {$nearSphere: {$geometry: {type: 'Point', coordinates: [180, 0]}, $maxDistance: dist}}
+}).itcount();
+var nearCount =
+ t.find({loc: {$near: {$geometry: {type: 'Point', coordinates: [180, 0]}, $maxDistance: dist}}})
+ .itcount();
+var geoNearResult = db.runCommand(
+ {geoNear: t.getName(), near: {type: 'Point', coordinates: [180, 0]}, spherical: true});
print('nearSphere count = ' + nearSphereCount);
print('near count = ' + nearCount);
diff --git a/jstests/core/geo_s2nearcorrect.js b/jstests/core/geo_s2nearcorrect.js
index 9fdeb4aa6a3..54552a4bee5 100644
--- a/jstests/core/geo_s2nearcorrect.js
+++ b/jstests/core/geo_s2nearcorrect.js
@@ -5,8 +5,14 @@
t = db.geo_s2nearcorrect;
t.drop();
-longline = { "type" : "LineString", "coordinates": [ [0,0], [179, 89]]};
+longline = {
+ "type": "LineString",
+ "coordinates": [[0, 0], [179, 89]]
+};
t.insert({geo: longline});
t.ensureIndex({geo: "2dsphere"});
-origin = { "type" : "Point", "coordinates": [ 45, 45] };
-assert.eq(1, t.find({ "geo" : { "$near" : { "$geometry" : origin, $maxDistance: 20000000} } }).count());
+origin = {
+ "type": "Point",
+ "coordinates": [45, 45]
+};
+assert.eq(1, t.find({"geo": {"$near": {"$geometry": origin, $maxDistance: 20000000}}}).count());
diff --git a/jstests/core/geo_s2nearwithin.js b/jstests/core/geo_s2nearwithin.js
index 1e5a20d5209..1bcec709643 100644
--- a/jstests/core/geo_s2nearwithin.js
+++ b/jstests/core/geo_s2nearwithin.js
@@ -9,33 +9,56 @@ for (var x = -points; x < points; x += 1) {
}
}
-origin = { "type" : "Point", "coordinates": [ 0, 0] };
+origin = {
+ "type": "Point",
+ "coordinates": [0, 0]
+};
-t.ensureIndex({ geo : "2dsphere" });
+t.ensureIndex({geo: "2dsphere"});
// Near requires an index, and 2dsphere is an index. Spherical isn't
// specified so this doesn't work.
-assert.commandFailed( db.runCommand({ geoNear: t.getName(), near: [0, 0],
- query: { geo: { $within: { $center: [[0, 0], 1] }}}}));
+assert.commandFailed(db.runCommand(
+ {geoNear: t.getName(), near: [0, 0], query: {geo: {$within: {$center: [[0, 0], 1]}}}}));
// Spherical is specified so this does work. Old style points are weird
// because you can use them with both $center and $centerSphere. Points are
// the only things we will do this conversion for.
-resNear = db.runCommand({geoNear : t.getName(), near: [0, 0], spherical: true,
- query: {geo: {$within: {$center: [[0, 0], 1]}}}});
+resNear = db.runCommand({
+ geoNear: t.getName(),
+ near: [0, 0],
+ spherical: true,
+ query: {geo: {$within: {$center: [[0, 0], 1]}}}
+});
assert.eq(resNear.results.length, 5);
-resNear = db.runCommand({geoNear : t.getName(), near: [0, 0], spherical: true,
- query: {geo: {$within: {$centerSphere: [[0, 0], Math.PI/180.0]}}}});
+resNear = db.runCommand({
+ geoNear: t.getName(),
+ near: [0, 0],
+ spherical: true,
+ query: {geo: {$within: {$centerSphere: [[0, 0], Math.PI / 180.0]}}}
+});
assert.eq(resNear.results.length, 5);
-resNear = db.runCommand({geoNear : t.getName(), near: [0, 0], spherical: true,
- query: {geo: {$within: {$centerSphere: [[0, 0], 0]}}}});
+resNear = db.runCommand({
+ geoNear: t.getName(),
+ near: [0, 0],
+ spherical: true,
+ query: {geo: {$within: {$centerSphere: [[0, 0], 0]}}}
+});
assert.eq(resNear.results.length, 1);
-resNear = db.runCommand({geoNear : t.getName(), near: [0, 0], spherical: true,
- query: {geo: {$within: {$centerSphere: [[1, 0], 0.5 * Math.PI/180.0]}}}});
+resNear = db.runCommand({
+ geoNear: t.getName(),
+ near: [0, 0],
+ spherical: true,
+ query: {geo: {$within: {$centerSphere: [[1, 0], 0.5 * Math.PI / 180.0]}}}
+});
assert.eq(resNear.results.length, 1);
-resNear = db.runCommand({geoNear : t.getName(), near: [0, 0], spherical: true,
- query: {geo: {$within: {$center: [[1, 0], 1.5]}}}});
+resNear = db.runCommand({
+ geoNear: t.getName(),
+ near: [0, 0],
+ spherical: true,
+ query: {geo: {$within: {$center: [[1, 0], 1.5]}}}
+});
assert.eq(resNear.results.length, 9);
diff --git a/jstests/core/geo_s2nongeoarray.js b/jstests/core/geo_s2nongeoarray.js
index 067c338faf3..8684706d168 100644
--- a/jstests/core/geo_s2nongeoarray.js
+++ b/jstests/core/geo_s2nongeoarray.js
@@ -2,14 +2,18 @@
// we find them with queries.
t = db.geo_s2nongeoarray;
-oldPoint = [40,5];
+oldPoint = [40, 5];
-var data = {geo: oldPoint, nonGeo: [123,456], otherNonGeo: [{b:[1,2]},{b:[3,4]}]};
+var data = {
+ geo: oldPoint,
+ nonGeo: [123, 456],
+ otherNonGeo: [{b: [1, 2]}, {b: [3, 4]}]
+};
t.drop();
assert.writeOK(t.insert(data));
-assert.commandWorked(t.ensureIndex({ otherNonGeo: 1 }));
-assert.eq(1, t.find({otherNonGeo: {b:[1,2]}}).itcount());
+assert.commandWorked(t.ensureIndex({otherNonGeo: 1}));
+assert.eq(1, t.find({otherNonGeo: {b: [1, 2]}}).itcount());
assert.eq(0, t.find({otherNonGeo: 1}).itcount());
assert.eq(1, t.find({'otherNonGeo.b': 1}).itcount());
diff --git a/jstests/core/geo_s2nonstring.js b/jstests/core/geo_s2nonstring.js
index 1f3258eeac3..43587f0c8e8 100755..100644
--- a/jstests/core/geo_s2nonstring.js
+++ b/jstests/core/geo_s2nonstring.js
@@ -2,21 +2,27 @@
t = db.geo_s2nonstring;
t.drop();
-t.ensureIndex( { geo:'2dsphere', x:1 } );
+t.ensureIndex({geo: '2dsphere', x: 1});
-t.save( { geo:{ type:'Point', coordinates:[ 0, 0 ] }, x:'a' } );
-t.save( { geo:{ type:'Point', coordinates:[ 0, 0 ] }, x:5 } );
+t.save({geo: {type: 'Point', coordinates: [0, 0]}, x: 'a'});
+t.save({geo: {type: 'Point', coordinates: [0, 0]}, x: 5});
t.drop();
-t.ensureIndex( { geo:'2dsphere', x:1 } );
+t.ensureIndex({geo: '2dsphere', x: 1});
-t.save( { geo:{ type:'Point', coordinates:[ 0, 0 ] }, x:'a' } );
-t.save( { geo:{ type:'Point', coordinates:[ 0, 0 ] } } );
+t.save({geo: {type: 'Point', coordinates: [0, 0]}, x: 'a'});
+t.save({geo: {type: 'Point', coordinates: [0, 0]}});
// Expect 1 match, where x is 'a'
-assert.eq( 1, t.count( { geo:{ $near:{ $geometry:{ type:'Point', coordinates:[ 0, 0 ] },
- $maxDistance: 20 } }, x:'a' } ) );
+assert.eq(1,
+ t.count({
+ geo: {$near: {$geometry: {type: 'Point', coordinates: [0, 0]}, $maxDistance: 20}},
+ x: 'a'
+ }));
// Expect 1 match, where x matches null (missing matches null).
-assert.eq( 1, t.count( { geo:{ $near:{ $geometry:{ type:'Point', coordinates:[ 0, 0 ] },
- $maxDistance: 20 } }, x:null } ) );
+assert.eq(1,
+ t.count({
+ geo: {$near: {$geometry: {type: 'Point', coordinates: [0, 0]}, $maxDistance: 20}},
+ x: null
+ }));
diff --git a/jstests/core/geo_s2nopoints.js b/jstests/core/geo_s2nopoints.js
index 903487c7008..0d2afdb1672 100644
--- a/jstests/core/geo_s2nopoints.js
+++ b/jstests/core/geo_s2nopoints.js
@@ -2,6 +2,7 @@
t = db.geo_s2nopoints;
t.drop();
-t.ensureIndex({loc: "2dsphere", x:1});
-assert.eq(0, t.count({loc: {$near: {$geometry: {type: 'Point', coordinates:[0,0]},
- $maxDistance: 10}}}));
+t.ensureIndex({loc: "2dsphere", x: 1});
+assert.eq(
+ 0,
+ t.count({loc: {$near: {$geometry: {type: 'Point', coordinates: [0, 0]}, $maxDistance: 10}}}));
diff --git a/jstests/core/geo_s2oddshapes.js b/jstests/core/geo_s2oddshapes.js
index aa284bbe20e..6f14533c928 100644
--- a/jstests/core/geo_s2oddshapes.js
+++ b/jstests/core/geo_s2oddshapes.js
@@ -3,60 +3,51 @@
// rather wide if their latitude (or longitude) range is large.
var t = db.geo_s2oddshapes;
t.drop();
-t.ensureIndex( { geo : "2dsphere" } );
+t.ensureIndex({geo: "2dsphere"});
var testPoint = {
name: "origin",
- geo: {
- type: "Point",
- coordinates: [0.0, 0.0]
- }
+ geo: {type: "Point", coordinates: [0.0, 0.0]}
};
var testHorizLine = {
name: "horiz",
- geo: {
- type: "LineString",
- coordinates: [[-2.0, 10.0], [2.0, 10.0]]
- }
+ geo: {type: "LineString", coordinates: [[-2.0, 10.0], [2.0, 10.0]]}
};
var testVertLine = {
name: "vert",
- geo: {
- type: "LineString",
- coordinates: [[10.0, -2.0], [10.0, 2.0]]
- }
+ geo: {type: "LineString", coordinates: [[10.0, -2.0], [10.0, 2.0]]}
};
t.insert(testPoint);
t.insert(testHorizLine);
t.insert(testVertLine);
-//Test a poly that runs vertically all the way along the meridian.
+// Test a poly that runs vertically all the way along the meridian.
-var tallPoly = {type: "Polygon",
- coordinates: [
- [[1.0, 89.0], [-1.0, 89.0], [-1.0, -89.0], [1.0, -89.0], [1.0, 89.0]]
- ]};
-//We expect that the testPoint (at the origin) will be within this poly.
+var tallPoly = {
+ type: "Polygon",
+ coordinates: [[[1.0, 89.0], [-1.0, 89.0], [-1.0, -89.0], [1.0, -89.0], [1.0, 89.0]]]
+};
+// We expect that the testPoint (at the origin) will be within this poly.
var result = t.find({geo: {$within: {$geometry: tallPoly}}});
assert.eq(result.itcount(), 1);
var result = t.find({geo: {$within: {$geometry: tallPoly}}});
assert.eq(result[0].name, 'origin');
-//We expect that the testPoint, and the testHorizLine should geoIntersect
-//with this poly.
+// We expect that the testPoint, and the testHorizLine should geoIntersect
+// with this poly.
result = t.find({geo: {$geoIntersects: {$geometry: tallPoly}}});
assert.eq(result.itcount(), 2);
result = t.find({geo: {$geoIntersects: {$geometry: tallPoly}}});
-//Test a poly that runs horizontally along the equator.
+// Test a poly that runs horizontally along the equator.
-var longPoly = {type: "Polygon",
- coordinates: [
- [[89.0, 1.0], [-89.0, 1.0], [-89.0, -1.0], [89.0, -1.0], [89.0, 1.0]]
- ]};
+var longPoly = {
+ type: "Polygon",
+ coordinates: [[[89.0, 1.0], [-89.0, 1.0], [-89.0, -1.0], [89.0, -1.0], [89.0, 1.0]]]
+};
// Thanks to spherical geometry, this poly contains most of the hemisphere.
result = t.find({geo: {$within: {$geometry: longPoly}}});
@@ -64,36 +55,28 @@ assert.eq(result.itcount(), 3);
result = t.find({geo: {$geoIntersects: {$geometry: longPoly}}});
assert.eq(result.itcount(), 3);
-//Test a poly that is the size of half the earth.
+// Test a poly that is the size of half the earth.
t.drop();
-t.ensureIndex( { geo : "2dsphere" } );
+t.ensureIndex({geo: "2dsphere"});
var insidePoint = {
name: "inside",
- geo: {
- type: "Point",
- name: "inside",
- coordinates: [100.0, 0.0]
- }
+ geo: {type: "Point", name: "inside", coordinates: [100.0, 0.0]}
};
var outsidePoint = {
name: "inside",
- geo: {
- type: "Point",
- name: "inside",
- coordinates: [-100.0, 0.0]
- }
+ geo: {type: "Point", name: "inside", coordinates: [-100.0, 0.0]}
};
t.insert(insidePoint);
t.insert(outsidePoint);
-var largePoly = {type: "Polygon",
- coordinates: [
- [[0.0, -90.0], [0.0, 90.0], [180.0, 0], [0.0, -90.0]]
- ]};
+var largePoly = {
+ type: "Polygon",
+ coordinates: [[[0.0, -90.0], [0.0, 90.0], [180.0, 0], [0.0, -90.0]]]
+};
result = t.find({geo: {$within: {$geometry: largePoly}}});
assert.eq(result.itcount(), 1);
@@ -101,38 +84,31 @@ result = t.find({geo: {$within: {$geometry: largePoly}}});
var point = result[0];
assert.eq(point.name, 'inside');
-//Test a poly that is very small. A couple meters around.
+// Test a poly that is very small. A couple meters around.
t.drop();
-t.ensureIndex( { geo : "2dsphere" } );
+t.ensureIndex({geo: "2dsphere"});
insidePoint = {
name: "inside",
- geo: {
- type: "Point",
- name: "inside",
- coordinates: [0.01, 0.0]
- }};
+ geo: {type: "Point", name: "inside", coordinates: [0.01, 0.0]}
+};
outsidePoint = {
name: "inside",
- geo: {
- type: "Point",
- name: "inside",
- coordinates: [0.2, 0.0]
- }};
+ geo: {type: "Point", name: "inside", coordinates: [0.2, 0.0]}
+};
t.insert(insidePoint);
t.insert(outsidePoint);
-smallPoly = {type: "Polygon",
- coordinates: [
- [[0.0, -0.01], [0.015, -0.01], [0.015, 0.01], [0.0, 0.01], [0.0, -0.01]]
- ]};
+smallPoly = {
+ type: "Polygon",
+ coordinates: [[[0.0, -0.01], [0.015, -0.01], [0.015, 0.01], [0.0, 0.01], [0.0, -0.01]]]
+};
result = t.find({geo: {$within: {$geometry: smallPoly}}});
assert.eq(result.itcount(), 1);
result = t.find({geo: {$within: {$geometry: smallPoly}}});
point = result[0];
assert.eq(point.name, 'inside');
-
diff --git a/jstests/core/geo_s2ordering.js b/jstests/core/geo_s2ordering.js
index 84b78edecfb..026fdda62c6 100644
--- a/jstests/core/geo_s2ordering.js
+++ b/jstests/core/geo_s2ordering.js
@@ -16,20 +16,21 @@ function makepoints(needle) {
for (var x = -points; x < points; x += 1) {
for (var y = -points; y < points; y += 1) {
tag = x.toString() + "," + y.toString();
- bulk.insert({ nongeo: tag,
- geo: {
- type: "Point",
- coordinates: [lng + x/points, lat + y/points]}});
+ bulk.insert({
+ nongeo: tag,
+ geo: {type: "Point", coordinates: [lng + x / points, lat + y / points]}
+ });
}
}
- bulk.insert({ nongeo: needle, geo: { type: "Point", coordinates: [0,0] }});
+ bulk.insert({nongeo: needle, geo: {type: "Point", coordinates: [0, 0]}});
assert.writeOK(bulk.execute());
}
function runTest(index) {
t.ensureIndex(index);
var resultcount = 0;
- var cursor = t.find({nongeo: needle, geo: {$within: {$centerSphere: [[0,0], Math.PI/180.0]}}});
+ var cursor =
+ t.find({nongeo: needle, geo: {$within: {$centerSphere: [[0, 0], Math.PI / 180.0]}}});
var stats = cursor.explain("executionStats").executionStats;
t.dropIndex(index);
diff --git a/jstests/core/geo_s2overlappingpolys.js b/jstests/core/geo_s2overlappingpolys.js
index 819879d960d..485132039d5 100644
--- a/jstests/core/geo_s2overlappingpolys.js
+++ b/jstests/core/geo_s2overlappingpolys.js
@@ -1,57 +1,55 @@
var t = db.geo_s2overlappingpolys;
t.drop();
-t.ensureIndex( { geo : "2dsphere" } );
+t.ensureIndex({geo: "2dsphere"});
var minError = 0.8e-13;
-var canonPoly = {type: "Polygon",
- coordinates: [
- [[-1.0, -1.0], [1.0, -1.0], [1.0, 1.0], [-1.0, 1.0], [-1.0, -1.0]]
- ]};
+var canonPoly = {
+ type: "Polygon",
+ coordinates: [[[-1.0, -1.0], [1.0, -1.0], [1.0, 1.0], [-1.0, 1.0], [-1.0, -1.0]]]
+};
t.insert({geo: canonPoly});
-// Test 1: If a poly completely encloses the canonPoly, we expect the canonPoly
-// to be returned for both $within and $geoIntersect
+// Test 1: If a poly completely encloses the canonPoly, we expect the canonPoly
+// to be returned for both $within and $geoIntersect
-var outerPoly = {type: "Polygon",
- coordinates: [
- [[-2.0, -2.0], [2.0, -2.0], [2.0, 2.0], [-2.0, 2.0], [-2.0, -2.0]]
- ]};
+var outerPoly = {
+ type: "Polygon",
+ coordinates: [[[-2.0, -2.0], [2.0, -2.0], [2.0, 2.0], [-2.0, 2.0], [-2.0, -2.0]]]
+};
var result = t.find({geo: {$within: {$geometry: outerPoly}}});
assert.eq(result.itcount(), 1);
result = t.find({geo: {$geoIntersects: {$geometry: outerPoly}}});
assert.eq(result.itcount(), 1);
-
// Test 2: If a poly that covers half of the canonPoly, we expect that it should
// geoIntersect, but should not be within.
-var partialPoly = {type: "Polygon",
- coordinates: [
- [[-2.0, -2.0], [2.0, -2.0], [2.0, 0.0], [-2.0, 0.0], [-2.0, -2.0]]
- ]};
+var partialPoly = {
+ type: "Polygon",
+ coordinates: [[[-2.0, -2.0], [2.0, -2.0], [2.0, 0.0], [-2.0, 0.0], [-2.0, -2.0]]]
+};
-//Should not be within
+// Should not be within
result = t.find({geo: {$within: {$geometry: partialPoly}}});
assert.eq(result.itcount(), 0);
-//This should however count as a geoIntersect
+// This should however count as a geoIntersect
result = t.find({geo: {$geoIntersects: {$geometry: partialPoly}}});
assert.eq(result.itcount(), 1);
-
-// Test 3: Polygons that intersect at a point or an edge have undefined
-// behaviour in s2 The s2 library we're using appears to have
+// Test 3: Polygons that intersect at a point or an edge have undefined
+// behaviour in s2 The s2 library we're using appears to have
// the following behaviour.
// Case (a): Polygons that intersect at one point (not a vertex).
// behaviour: geoIntersects.
-var sharedPointPoly = {type: "Polygon",
- coordinates: [
- [[0.0, -2.0], [0.0, -1.0], [1.0, -2.0], [0.0, -2.0]]
- ]};
+var sharedPointPoly = {
+ type: "Polygon",
+ coordinates: [[[0.0, -2.0], [0.0, -1.0], [1.0, -2.0], [0.0, -2.0]]]
+};
result = t.find({geo: {$geoIntersects: {$geometry: sharedPointPoly}}});
assert.eq(result.itcount(), 1);
@@ -59,34 +57,33 @@ assert.eq(result.itcount(), 1);
// Case (b): Polygons that intersect at one point (a vertex).
// behaviour: not geoIntersect
-var sharedVertexPoly = {type: "Polygon",
- coordinates: [
- [[0.0, -2.0], [1.0, -1.0], [1.0, -2.0], [0.0, -2.0]]
- ]};
+var sharedVertexPoly = {
+ type: "Polygon",
+ coordinates: [[[0.0, -2.0], [1.0, -1.0], [1.0, -2.0], [0.0, -2.0]]]
+};
result = t.find({geo: {$geoIntersects: {$geometry: sharedVertexPoly}}});
assert.eq(result.itcount(), 0);
-// Case (c): Polygons that intesersect at one point that is very close to a
+// Case (c): Polygons that intesersect at one point that is very close to a
// vertex should have the same behaviour as Case (b).
-var almostSharedVertexPoly = {type: "Polygon",
- coordinates: [
- [[0.0, -2.0], [1.0 - minError, -1.0], [1.0, -2.0], [0.0, -2.0]]
- ]};
+var almostSharedVertexPoly = {
+ type: "Polygon",
+ coordinates: [[[0.0, -2.0], [1.0 - minError, -1.0], [1.0, -2.0], [0.0, -2.0]]]
+};
result = t.find({geo: {$geoIntersects: {$geometry: almostSharedVertexPoly}}});
assert.eq(result.itcount(), 0);
-
-// Case (d): Polygons that intesersect at one point that is not quite as close
-// to a vertex should behave as though it were not a vertex, and should
+// Case (d): Polygons that intesersect at one point that is not quite as close
+// to a vertex should behave as though it were not a vertex, and should
// geoIntersect
-var notCloseEnoughSharedVertexPoly = {type: "Polygon",
- coordinates: [
- [[0.0, -2.0], [1.0 - (10 * minError), -1.0], [1.0, -2.0], [0.0, -2.0]]
- ]};
+var notCloseEnoughSharedVertexPoly = {
+ type: "Polygon",
+ coordinates: [[[0.0, -2.0], [1.0 - (10 * minError), -1.0], [1.0, -2.0], [0.0, -2.0]]]
+};
result = t.find({geo: {$geoIntersects: {$geometry: notCloseEnoughSharedVertexPoly}}});
assert.eq(result.itcount(), 1);
@@ -94,40 +91,39 @@ assert.eq(result.itcount(), 1);
// Case (e): Polygons that come very close to having a point intersection
// on a non-vertex coordinate should intersect.
-var almostSharedPointPoly = {type: "Polygon",
- coordinates: [
- [[0.0, -2.0], [0.0, (-1.0 - minError)], [1.0, -2.0], [0.0, -2.0]]
- ]};
+var almostSharedPointPoly = {
+ type: "Polygon",
+ coordinates: [[[0.0, -2.0], [0.0, (-1.0 - minError)], [1.0, -2.0], [0.0, -2.0]]]
+};
result = t.find({geo: {$geoIntersects: {$geometry: almostSharedPointPoly}}});
assert.eq(result.itcount(), 1);
-
// Case (f): If we increase the error a little, it should no longer act
// as though it's intersecting.
// NOTE: I think this error bound seems odd. Going to 0.000152297 will break this test.
// I've confirmed there is an error bound, but it's a lot larger than we experienced above.
var errorBound = 0.000152298;
-var notCloseEnoughSharedPointPoly = {type: "Polygon",
- coordinates: [
- [[0.0, -2.0], [0.0, -1.0 - errorBound], [1.0, -2.0], [0.0, -2.0]]
- ]};
+var notCloseEnoughSharedPointPoly = {
+ type: "Polygon",
+ coordinates: [[[0.0, -2.0], [0.0, -1.0 - errorBound], [1.0, -2.0], [0.0, -2.0]]]
+};
result = t.find({geo: {$geoIntersects: {$geometry: notCloseEnoughSharedPointPoly}}});
assert.eq(result.itcount(), 0);
-/* Test 3: Importantly, polygons with shared edges have undefined intersection
- * under s2. Therefore these test serve more to make sure nothing changes than
+/* Test 3: Importantly, polygons with shared edges have undefined intersection
+ * under s2. Therefore these test serve more to make sure nothing changes than
* to confirm an expected behaviour.
*/
// Case 1: A polygon who shares an edge with another polygon, where the searching
// polygon's edge is fully covered by the canon polygon's edge.
// Result: No intersection.
-var fullyCoveredEdgePoly = {type: "Polygon",
- coordinates: [
- [[-2.0, -0.5], [-1.0, -0.5], [-1.0, 0.5], [-2.0, 0.5], [-2.0, -0.5]]
- ]};
+var fullyCoveredEdgePoly = {
+ type: "Polygon",
+ coordinates: [[[-2.0, -0.5], [-1.0, -0.5], [-1.0, 0.5], [-2.0, 0.5], [-2.0, -0.5]]]
+};
result = t.find({geo: {$geoIntersects: {$geometry: fullyCoveredEdgePoly}}});
assert.eq(result.itcount(), 0);
@@ -135,10 +131,10 @@ assert.eq(result.itcount(), 0);
// Case 2: A polygon who shares an edge with another polygon, where the searching
// polygon's edge fully covers the canon polygon's edge.
// Result: Intersection.
-var coveringEdgePoly = {type: "Polygon",
- coordinates: [
- [[-2.0, -1.5], [-1.0, -1.5], [-1.0, 1.5], [-2.0, 1.5], [-2.0, -1.5]]
- ]};
+var coveringEdgePoly = {
+ type: "Polygon",
+ coordinates: [[[-2.0, -1.5], [-1.0, -1.5], [-1.0, 1.5], [-2.0, 1.5], [-2.0, -1.5]]]
+};
result = t.find({geo: {$geoIntersects: {$geometry: coveringEdgePoly}}});
assert.eq(result.itcount(), 1);
@@ -146,21 +142,34 @@ assert.eq(result.itcount(), 1);
// Case 2a: same as Case 2, except pulled slightly away from the polygon.
// Result: Intersection.
// NOTE: Scales of errors?
-var closebyCoveringEdgePoly = {type: "Polygon",
- coordinates: [
- [[-2.0, -1.5], [-1.0 - (minError / 1000), -1.5], [-1.0 - (minError / 1000), 1.5], [-2.0, 1.5], [-2.0, -1.5]]
- ]};
+var closebyCoveringEdgePoly = {
+ type: "Polygon",
+ coordinates: [[
+ [-2.0, -1.5],
+ [-1.0 - (minError / 1000), -1.5],
+ [-1.0 - (minError / 1000), 1.5],
+ [-2.0, 1.5],
+ [-2.0, -1.5]
+ ]]
+};
result = t.find({geo: {$geoIntersects: {$geometry: closebyCoveringEdgePoly}}});
assert.eq(result.itcount(), 1);
-// Case 2b: same as Case 4, except pulled slightly away from the polygon, so that it's not intersecting.
+// Case 2b: same as Case 4, except pulled slightly away from the polygon, so that it's not
+// intersecting.
// Result: No Intersection.
// NOTE: Scales of errors?
-var notCloseEnoughCoveringEdgePoly = {type: "Polygon",
- coordinates: [
- [[-2.0, -1.5], [-1.0 - (minError / 100), -1.5], [-1.0 - (minError / 100), 1.5], [-2.0, 1.5], [-2.0, -1.5]]
- ]};
+var notCloseEnoughCoveringEdgePoly = {
+ type: "Polygon",
+ coordinates: [[
+ [-2.0, -1.5],
+ [-1.0 - (minError / 100), -1.5],
+ [-1.0 - (minError / 100), 1.5],
+ [-2.0, 1.5],
+ [-2.0, -1.5]
+ ]]
+};
result = t.find({geo: {$geoIntersects: {$geometry: notCloseEnoughCoveringEdgePoly}}});
assert.eq(result.itcount(), 0);
@@ -168,44 +177,60 @@ assert.eq(result.itcount(), 0);
// Case 3: A polygon who shares an edge with another polygon, where the searching
// polygon's edge partially covers by the canon polygon's edge.
// Result: No intersection.
-var partiallyCoveringEdgePoly = {type: "Polygon",
- coordinates: [
- [[-2.0, -1.5], [-1.0, -1.5], [-1.0, 0.5], [-2.0, 0.5], [-2.0, -1.5]]
- ]};
+var partiallyCoveringEdgePoly = {
+ type: "Polygon",
+ coordinates: [[[-2.0, -1.5], [-1.0, -1.5], [-1.0, 0.5], [-2.0, 0.5], [-2.0, -1.5]]]
+};
result = t.find({geo: {$geoIntersects: {$geometry: partiallyCoveringEdgePoly}}});
assert.eq(result.itcount(), 0);
-
-//Polygons that intersect at three non-co-linear points should geoIntersect
-var sharedPointsPoly = {type: "Polygon",
- coordinates: [
- [[0.0, -3.0], [0.0, -1.0], [2.0, -2.0], [1.0, 0.0], [2.0, 2.0], [0.0, 1.0], [0.0, 3.0], [3.0, 3.0], [3.0, -3.0], [0.0, -3.0]]
- ]};
+// Polygons that intersect at three non-co-linear points should geoIntersect
+var sharedPointsPoly = {
+ type: "Polygon",
+ coordinates: [[
+ [0.0, -3.0],
+ [0.0, -1.0],
+ [2.0, -2.0],
+ [1.0, 0.0],
+ [2.0, 2.0],
+ [0.0, 1.0],
+ [0.0, 3.0],
+ [3.0, 3.0],
+ [3.0, -3.0],
+ [0.0, -3.0]
+ ]]
+};
result = t.find({geo: {$geoIntersects: {$geometry: sharedPointsPoly}}});
assert.eq(result.itcount(), 1);
-//If a polygon contains a hole, and another polygon is within that hole, it should not be within or intersect.
+// If a polygon contains a hole, and another polygon is within that hole, it should not be within or
+// intersect.
-var bigHolePoly = {type: "Polygon",
+var bigHolePoly = {
+ type: "Polygon",
coordinates: [
[[-3.0, -3.0], [3.0, -3.0], [3.0, 3.0], [-3.0, 3.0], [-3.0, -3.0]],
[[-2.0, -2.0], [2.0, -2.0], [2.0, 2.0], [-2.0, 2.0], [-2.0, -2.0]]
- ]};
+ ]
+};
result = t.find({geo: {$within: {$geometry: bigHolePoly}}});
assert.eq(result.itcount(), 0);
result = t.find({geo: {$geoIntersects: {$geometry: bigHolePoly}}});
assert.eq(result.itcount(), 0);
-// If a polygon has a hole, and another polygon is contained partially by that hole, it should be an intersection
+// If a polygon has a hole, and another polygon is contained partially by that hole, it should be an
+// intersection
// but not a within.
-var internalOverlapPoly = {type: "Polygon",
+var internalOverlapPoly = {
+ type: "Polygon",
coordinates: [
[[-3.0, -3.0], [3.0, -3.0], [3.0, 3.0], [-3.0, 3.0], [-3.0, -3.0]],
[[-2.0, 0.0], [2.0, 0.0], [2.0, 2.0], [-2.0, 2.0], [-2.0, 0.0]]
- ]};
+ ]
+};
result = t.find({geo: {$geoIntersects: {$geometry: internalOverlapPoly}}});
assert.eq(result.itcount(), 1);
diff --git a/jstests/core/geo_s2polywithholes.js b/jstests/core/geo_s2polywithholes.js
index 85aafccdb68..6ace711c718 100755..100644
--- a/jstests/core/geo_s2polywithholes.js
+++ b/jstests/core/geo_s2polywithholes.js
@@ -2,17 +2,26 @@ var t = db.geo_s2weirdpolys;
t.drop();
t.ensureIndex({geo: "2dsphere"});
-var centerPoint = {"type": "Point", "coordinates": [0.5, 0.5]};
-var edgePoint = {"type": "Point", "coordinates": [0, 0.5]};
-var cornerPoint = {"type": "Point", "coordinates": [0, 0]};
+var centerPoint = {
+ "type": "Point",
+ "coordinates": [0.5, 0.5]
+};
+var edgePoint = {
+ "type": "Point",
+ "coordinates": [0, 0.5]
+};
+var cornerPoint = {
+ "type": "Point",
+ "coordinates": [0, 0]
+};
-t.insert({geo : centerPoint});
-t.insert({geo : edgePoint});
-t.insert({geo : cornerPoint});
+t.insert({geo: centerPoint});
+t.insert({geo: edgePoint});
+t.insert({geo: cornerPoint});
-var polygonWithNoHole = {"type" : "Polygon", "coordinates": [
- [[0,0], [0,1], [1, 1], [1, 0], [0, 0]]
- ]
+var polygonWithNoHole = {
+ "type": "Polygon",
+ "coordinates": [[[0, 0], [0, 1], [1, 1], [1, 0], [0, 0]]]
};
// Test 1: Sanity check. Expect all three points.
@@ -20,9 +29,11 @@ var sanityResult = t.find({geo: {$within: {$geometry: polygonWithNoHole}}});
assert.eq(sanityResult.itcount(), 3);
// Test 2: Polygon with a hole that isn't contained byt the poly shell.
-var polygonWithProtrudingHole = {"type" : "Polygon", "coordinates": [
- [[0,0], [0,1], [1, 1], [1, 0], [0, 0]],
- [[0.4,0.9], [0.4,1.1], [0.5, 1.1], [0.5, 0.9], [0.4, 0.9]]
+var polygonWithProtrudingHole = {
+ "type": "Polygon",
+ "coordinates": [
+ [[0, 0], [0, 1], [1, 1], [1, 0], [0, 0]],
+ [[0.4, 0.9], [0.4, 1.1], [0.5, 1.1], [0.5, 0.9], [0.4, 0.9]]
]
};
@@ -36,36 +47,44 @@ assert.throws(function() {
// Test 3: This test will confirm that a polygon with overlapping holes throws
// an error.
-var polyWithOverlappingHoles = {"type" : "Polygon", "coordinates": [
- [[0,0], [0,1], [1, 1], [1, 0], [0, 0]],
- [[0.2,0.6], [0.2,0.9], [0.6, 0.9], [0.6, 0.6], [0.2, 0.6]],
- [[0.5,0.4], [0.5,0.7], [0.8, 0.7], [0.8, 0.4], [0.5, 0.4]]
+var polyWithOverlappingHoles = {
+ "type": "Polygon",
+ "coordinates": [
+ [[0, 0], [0, 1], [1, 1], [1, 0], [0, 0]],
+ [[0.2, 0.6], [0.2, 0.9], [0.6, 0.9], [0.6, 0.6], [0.2, 0.6]],
+ [[0.5, 0.4], [0.5, 0.7], [0.8, 0.7], [0.8, 0.4], [0.5, 0.4]]
]
};
assert.writeError(t.insert({geo: polyWithOverlappingHoles}));
// Test 4: Only one nesting is allowed by GeoJSON.
-var polyWithDeepHole = {"type" : "Polygon", "coordinates": [
- [[0,0], [0,1], [1, 1], [1, 0], [0, 0]],
- [[0.1,0.1], [0.1,0.9], [0.9, 0.9], [0.9, 0.1], [0.1, 0.1]],
- [[0.2,0.2], [0.2,0.8], [0.8, 0.8], [0.8, 0.2], [0.2, 0.2]]
+var polyWithDeepHole = {
+ "type": "Polygon",
+ "coordinates": [
+ [[0, 0], [0, 1], [1, 1], [1, 0], [0, 0]],
+ [[0.1, 0.1], [0.1, 0.9], [0.9, 0.9], [0.9, 0.1], [0.1, 0.1]],
+ [[0.2, 0.2], [0.2, 0.8], [0.8, 0.8], [0.8, 0.2], [0.2, 0.2]]
]
};
assert.writeError(t.insert({geo: polyWithDeepHole}));
// Test 5: The first ring must be the exterior ring.
-var polyWithBiggerHole = {"type" : "Polygon", "coordinates": [
- [[0.1,0.1], [0.1,0.9], [0.9, 0.9], [0.9, 0.1], [0.1, 0.1]],
- [[0,0], [0,1], [1, 1], [1, 0], [0, 0]]
+var polyWithBiggerHole = {
+ "type": "Polygon",
+ "coordinates": [
+ [[0.1, 0.1], [0.1, 0.9], [0.9, 0.9], [0.9, 0.1], [0.1, 0.1]],
+ [[0, 0], [0, 1], [1, 1], [1, 0], [0, 0]]
]
};
assert.writeError(t.insert({geo: polyWithBiggerHole}));
// Test 6: Holes cannot share more than one vertex with exterior loop
-var polySharedVertices = {"type" : "Polygon", "coordinates": [
- [[0,0], [0,1], [1, 1], [1, 0], [0, 0]],
- [[0,0], [0.1,0.9], [1, 1], [0.9, 0.1], [0, 0]]
+var polySharedVertices = {
+ "type": "Polygon",
+ "coordinates": [
+ [[0, 0], [0, 1], [1, 1], [1, 0], [0, 0]],
+ [[0, 0], [0.1, 0.9], [1, 1], [0.9, 0.1], [0, 0]]
]
};
assert.writeError(t.insert({geo: polySharedVertices}));
diff --git a/jstests/core/geo_s2selfintersectingpoly.js b/jstests/core/geo_s2selfintersectingpoly.js
index f34ea3a5ff1..236283ab8ac 100644
--- a/jstests/core/geo_s2selfintersectingpoly.js
+++ b/jstests/core/geo_s2selfintersectingpoly.js
@@ -2,10 +2,11 @@ var t = db.geo_s2selfintersectingpoly;
t.drop();
t.ensureIndex({geo: "2dsphere"});
-var intersectingPolygon = {"type": "Polygon", "coordinates": [
- [[0.0, 0.0], [0.0, 4.0], [-3.0, 2.0], [1.0, 2.0], [0.0, 0.0]]
-]};
+var intersectingPolygon = {
+ "type": "Polygon",
+ "coordinates": [[[0.0, 0.0], [0.0, 4.0], [-3.0, 2.0], [1.0, 2.0], [0.0, 0.0]]]
+};
/*
* Self intersecting polygons should cause a parse exception.
*/
-assert.writeError(t.insert({geo : intersectingPolygon}));
+assert.writeError(t.insert({geo: intersectingPolygon}));
diff --git a/jstests/core/geo_s2sparse.js b/jstests/core/geo_s2sparse.js
index e6454fbfbb7..ab3363b5860 100644
--- a/jstests/core/geo_s2sparse.js
+++ b/jstests/core/geo_s2sparse.js
@@ -3,9 +3,15 @@
var coll = db.geo_s2sparse;
-var point = { type: "Point", coordinates: [5, 5] };
+var point = {
+ type: "Point",
+ coordinates: [5, 5]
+};
-var indexSpec = { geo: "2dsphere", nonGeo: 1 };
+var indexSpec = {
+ geo: "2dsphere",
+ nonGeo: 1
+};
var indexName = 'test.geo_s2sparse.$geo_2dsphere_nonGeo_1';
@@ -20,7 +26,7 @@ coll.ensureIndex(indexSpec);
// Insert N documents with the geo field.
var N = 1000;
for (var i = 0; i < N; i++) {
- coll.insert({ geo: point, nonGeo: "point_"+i });
+ coll.insert({geo: point, nonGeo: "point_" + i});
}
// Expect N keys.
@@ -28,7 +34,7 @@ assert.eq(N, coll.validate().keysPerIndex[indexName]);
// Insert N documents without the geo field.
for (var i = 0; i < N; i++) {
- coll.insert({ wrongGeo: point, nonGeo: i});
+ coll.insert({wrongGeo: point, nonGeo: i});
}
// Still expect N keys as we didn't insert any geo stuff.
@@ -36,7 +42,7 @@ assert.eq(N, coll.validate().keysPerIndex[indexName]);
// Insert N documents with just the geo field.
for (var i = 0; i < N; i++) {
- coll.insert({ geo: point});
+ coll.insert({geo: point});
}
// Expect 2N keys.
@@ -44,10 +50,10 @@ assert.eq(N + N, coll.validate().keysPerIndex[indexName]);
// Add some "not geo" stuff.
for (var i = 0; i < N; i++) {
- coll.insert({ geo: null});
- coll.insert({ geo: []});
- coll.insert({ geo: undefined});
- coll.insert({ geo: {}});
+ coll.insert({geo: null});
+ coll.insert({geo: []});
+ coll.insert({geo: undefined});
+ coll.insert({geo: {}});
}
// Still expect 2N keys.
@@ -62,7 +68,7 @@ coll.ensureIndex(indexSpec, {"2dsphereIndexVersion": 1});
// Insert N documents with the geo field.
for (var i = 0; i < N; i++) {
- coll.insert({ geo: point, nonGeo: "point_"+i });
+ coll.insert({geo: point, nonGeo: "point_" + i});
}
// Expect N keys.
@@ -70,7 +76,7 @@ assert.eq(N, coll.validate().keysPerIndex[indexName]);
// Insert N documents without the geo field.
for (var i = 0; i < N; i++) {
- coll.insert({ wrongGeo: point, nonGeo: i});
+ coll.insert({wrongGeo: point, nonGeo: i});
}
// Expect N keys as it's a V1 index.
@@ -89,7 +95,7 @@ indexName = 'test.geo_s2sparse.$geo_2dsphere_otherGeo_2dsphere';
// Insert N documents with the first geo field.
var N = 1000;
for (var i = 0; i < N; i++) {
- coll.insert({ geo: point});
+ coll.insert({geo: point});
}
// Expect N keys.
@@ -98,7 +104,7 @@ assert.eq(N, coll.validate().keysPerIndex[indexName]);
// Insert N documents with the second geo field.
var N = 1000;
for (var i = 0; i < N; i++) {
- coll.insert({ otherGeo: point});
+ coll.insert({otherGeo: point});
}
// They get inserted too.
@@ -106,7 +112,7 @@ assert.eq(N + N, coll.validate().keysPerIndex[indexName]);
// Insert N documents with neither geo field.
for (var i = 0; i < N; i++) {
- coll.insert({ nonGeo: i});
+ coll.insert({nonGeo: i});
}
// Still expect 2N keys as the neither geo docs were omitted from the index.
diff --git a/jstests/core/geo_s2twofields.js b/jstests/core/geo_s2twofields.js
index 26c75b08bfb..1868287cf5b 100644
--- a/jstests/core/geo_s2twofields.js
+++ b/jstests/core/geo_s2twofields.js
@@ -13,8 +13,14 @@ function randomCoord(center, minDistDeg, maxDistDeg) {
return [center[0] + dx, center[1] + dy];
}
-var nyc = {type: "Point", coordinates: [-74.0064, 40.7142]};
-var miami = {type: "Point", coordinates: [-80.1303, 25.7903]};
+var nyc = {
+ type: "Point",
+ coordinates: [-74.0064, 40.7142]
+};
+var miami = {
+ type: "Point",
+ coordinates: [-80.1303, 25.7903]
+};
var maxPoints = 10000;
var degrees = 5;
@@ -23,19 +29,23 @@ for (var i = 0; i < maxPoints; ++i) {
var fromCoord = randomCoord(nyc.coordinates, 0, degrees);
var toCoord = randomCoord(miami.coordinates, 0, degrees);
- arr.push( { from: { type: "Point", coordinates: fromCoord },
- to: { type: "Point", coordinates: toCoord}} );
+ arr.push({
+ from: {type: "Point", coordinates: fromCoord},
+ to: {type: "Point", coordinates: toCoord}
+ });
}
-res = t.insert( arr );
+res = t.insert(arr);
assert.writeOK(res);
-assert.eq( t.count(), maxPoints );
+assert.eq(t.count(), maxPoints);
function semiRigorousTime(func) {
var lowestTime = func();
var iter = 2;
for (var i = 0; i < iter; ++i) {
var run = func();
- if (run < lowestTime) { lowestTime = run; }
+ if (run < lowestTime) {
+ lowestTime = run;
+ }
}
return lowestTime;
}
@@ -55,19 +65,31 @@ function timeWithoutAndWithAnIndex(index, query) {
var maxQueryRad = 0.5 * PI / 180.0;
// When we're not looking at ALL the data, anything indexed should beat not-indexed.
-var smallQuery = timeWithoutAndWithAnIndex({to: "2dsphere", from: "2dsphere"},
- {from: {$within: {$centerSphere: [nyc.coordinates, maxQueryRad]}}, to: {$within: {$centerSphere: [miami.coordinates, maxQueryRad]}}});
+var smallQuery =
+ timeWithoutAndWithAnIndex({to: "2dsphere", from: "2dsphere"},
+ {
+ from: {$within: {$centerSphere: [nyc.coordinates, maxQueryRad]}},
+ to: {$within: {$centerSphere: [miami.coordinates, maxQueryRad]}}
+ });
print("Indexed time " + smallQuery[1] + " unindexed " + smallQuery[0]);
// assert(smallQuery[0] > smallQuery[1]);
// Let's just index one field.
-var smallQuery = timeWithoutAndWithAnIndex({to: "2dsphere"},
- {from: {$within: {$centerSphere: [nyc.coordinates, maxQueryRad]}}, to: {$within: {$centerSphere: [miami.coordinates, maxQueryRad]}}});
+var smallQuery =
+ timeWithoutAndWithAnIndex({to: "2dsphere"},
+ {
+ from: {$within: {$centerSphere: [nyc.coordinates, maxQueryRad]}},
+ to: {$within: {$centerSphere: [miami.coordinates, maxQueryRad]}}
+ });
print("Indexed time " + smallQuery[1] + " unindexed " + smallQuery[0]);
// assert(smallQuery[0] > smallQuery[1]);
// And the other one.
-var smallQuery = timeWithoutAndWithAnIndex({from: "2dsphere"},
- {from: {$within: {$centerSphere: [nyc.coordinates, maxQueryRad]}}, to: {$within: {$centerSphere: [miami.coordinates, maxQueryRad]}}});
+var smallQuery =
+ timeWithoutAndWithAnIndex({from: "2dsphere"},
+ {
+ from: {$within: {$centerSphere: [nyc.coordinates, maxQueryRad]}},
+ to: {$within: {$centerSphere: [miami.coordinates, maxQueryRad]}}
+ });
print("Indexed time " + smallQuery[1] + " unindexed " + smallQuery[0]);
// assert(smallQuery[0] > smallQuery[1]);
diff --git a/jstests/core/geo_s2validindex.js b/jstests/core/geo_s2validindex.js
index c6dd8be58d9..4c024d2d585 100644
--- a/jstests/core/geo_s2validindex.js
+++ b/jstests/core/geo_s2validindex.js
@@ -6,19 +6,19 @@ var coll = db.getCollection("twodspherevalid");
// Valid index
coll.drop();
-assert.commandWorked(coll.ensureIndex({geo : "2dsphere", other : 1}));
+assert.commandWorked(coll.ensureIndex({geo: "2dsphere", other: 1}));
// Valid index
coll.drop();
-assert.commandWorked(coll.ensureIndex({geo : "2dsphere", other : 1, geo2 : "2dsphere"}));
+assert.commandWorked(coll.ensureIndex({geo: "2dsphere", other: 1, geo2: "2dsphere"}));
// Invalid index, using hash with 2dsphere
coll.drop();
-assert.commandFailed(coll.ensureIndex({geo : "2dsphere", other : "hash"}));
+assert.commandFailed(coll.ensureIndex({geo: "2dsphere", other: "hash"}));
// Invalid index, using 2d with 2dsphere
coll.drop();
-assert.commandFailed(coll.ensureIndex({geo : "2dsphere", other : "2d"}));
+assert.commandFailed(coll.ensureIndex({geo: "2dsphere", other: "2d"}));
jsTest.log("Success!");
diff --git a/jstests/core/geo_s2within.js b/jstests/core/geo_s2within.js
index 77a9ed9ed3e..430e4f4dc07 100644
--- a/jstests/core/geo_s2within.js
+++ b/jstests/core/geo_s2within.js
@@ -3,34 +3,40 @@ t = db.geo_s2within;
t.drop();
t.ensureIndex({geo: "2dsphere"});
-somepoly = { "type" : "Polygon",
- "coordinates" : [ [ [40,5], [40,6], [41,6], [41,5], [40,5]]]};
+somepoly = {
+ "type": "Polygon",
+ "coordinates": [[[40, 5], [40, 6], [41, 6], [41, 5], [40, 5]]]
+};
-t.insert({geo: { "type" : "LineString", "coordinates": [ [ 40.1, 5.1], [40.2, 5.2]]}});
+t.insert({geo: {"type": "LineString", "coordinates": [[40.1, 5.1], [40.2, 5.2]]}});
// This is only partially contained within the polygon.
-t.insert({geo: { "type" : "LineString", "coordinates": [ [ 40.1, 5.1], [42, 7]]}});
+t.insert({geo: {"type": "LineString", "coordinates": [[40.1, 5.1], [42, 7]]}});
-res = t.find({ "geo" : { "$within" : { "$geometry" : somepoly} } });
+res = t.find({"geo": {"$within": {"$geometry": somepoly}}});
assert.eq(res.itcount(), 1);
t.drop();
t.ensureIndex({geo: "2dsphere"});
-somepoly = { "type" : "Polygon",
- "coordinates" : [ [ [40,5], [40,8], [43,8], [43,5], [40,5]],
- [ [41,6], [42,6], [42,7], [41,7], [41,6]]]};
+somepoly = {
+ "type": "Polygon",
+ "coordinates": [
+ [[40, 5], [40, 8], [43, 8], [43, 5], [40, 5]],
+ [[41, 6], [42, 6], [42, 7], [41, 7], [41, 6]]
+ ]
+};
-t.insert({geo:{ "type" : "Point", "coordinates": [ 40, 5 ] }});
-res = t.find({ "geo" : { "$within" : { "$geometry" : somepoly} } });
+t.insert({geo: {"type": "Point", "coordinates": [40, 5]}});
+res = t.find({"geo": {"$within": {"$geometry": somepoly}}});
assert.eq(res.itcount(), 1);
// In the hole. Shouldn't find it.
-t.insert({geo:{ "type" : "Point", "coordinates": [ 41.1, 6.1 ] }});
-res = t.find({ "geo" : { "$within" : { "$geometry" : somepoly} } });
+t.insert({geo: {"type": "Point", "coordinates": [41.1, 6.1]}});
+res = t.find({"geo": {"$within": {"$geometry": somepoly}}});
assert.eq(res.itcount(), 1);
// Also in the hole.
-t.insert({geo: { "type" : "LineString", "coordinates": [ [ 41.1, 6.1], [41.2, 6.2]]}});
-res = t.find({ "geo" : { "$within" : { "$geometry" : somepoly} } });
+t.insert({geo: {"type": "LineString", "coordinates": [[41.1, 6.1], [41.2, 6.2]]}});
+res = t.find({"geo": {"$within": {"$geometry": somepoly}}});
assert.eq(res.itcount(), 1);
// Half-hole, half-not. Shouldn't be $within.
-t.insert({geo: { "type" : "LineString", "coordinates": [ [ 41.5, 6.5], [42.5, 7.5]]}});
-res = t.find({ "geo" : { "$within" : { "$geometry" : somepoly} } });
+t.insert({geo: {"type": "LineString", "coordinates": [[41.5, 6.5], [42.5, 7.5]]}});
+res = t.find({"geo": {"$within": {"$geometry": somepoly}}});
assert.eq(res.itcount(), 1);
diff --git a/jstests/core/geo_small_large.js b/jstests/core/geo_small_large.js
index e927e8d5402..549f00369a2 100644
--- a/jstests/core/geo_small_large.js
+++ b/jstests/core/geo_small_large.js
@@ -1,52 +1,50 @@
// SERVER-2386, general geo-indexing using very large and very small bounds
-load( "jstests/libs/geo_near_random.js" );
+load("jstests/libs/geo_near_random.js");
// Do some random tests (for near queries) with very large and small ranges
-var test = new GeoNearRandomTest( "geo_small_large" );
+var test = new GeoNearRandomTest("geo_small_large");
-bounds = { min : -Math.pow( 2, 34 ), max : Math.pow( 2, 34 ) };
+bounds = {
+ min: -Math.pow(2, 34),
+ max: Math.pow(2, 34)
+};
-test.insertPts( 50, bounds );
+test.insertPts(50, bounds);
-printjson( db["geo_small_large"].find().limit( 10 ).toArray() );
+printjson(db["geo_small_large"].find().limit(10).toArray());
-test.testPt( [ 0, 0 ] );
-test.testPt( test.mkPt( undefined, bounds ) );
-test.testPt( test.mkPt( undefined, bounds ) );
-test.testPt( test.mkPt( undefined, bounds ) );
-test.testPt( test.mkPt( undefined, bounds ) );
+test.testPt([0, 0]);
+test.testPt(test.mkPt(undefined, bounds));
+test.testPt(test.mkPt(undefined, bounds));
+test.testPt(test.mkPt(undefined, bounds));
+test.testPt(test.mkPt(undefined, bounds));
-test = new GeoNearRandomTest( "geo_small_large" );
+test = new GeoNearRandomTest("geo_small_large");
-bounds = { min : -Math.pow( 2, -34 ), max : Math.pow( 2, -34 ) };
+bounds = {
+ min: -Math.pow(2, -34),
+ max: Math.pow(2, -34)
+};
-test.insertPts( 50, bounds );
+test.insertPts(50, bounds);
-printjson( db["geo_small_large"].find().limit( 10 ).toArray() );
-
-test.testPt( [ 0, 0 ] );
-test.testPt( test.mkPt( undefined, bounds ) );
-test.testPt( test.mkPt( undefined, bounds ) );
-test.testPt( test.mkPt( undefined, bounds ) );
-test.testPt( test.mkPt( undefined, bounds ) );
+printjson(db["geo_small_large"].find().limit(10).toArray());
+test.testPt([0, 0]);
+test.testPt(test.mkPt(undefined, bounds));
+test.testPt(test.mkPt(undefined, bounds));
+test.testPt(test.mkPt(undefined, bounds));
+test.testPt(test.mkPt(undefined, bounds));
// Check that our box and circle queries also work
-var scales = [
- Math.pow( 2, 40 ),
- Math.pow( 2, -40 ),
- Math.pow(2, 2),
- Math.pow(3, -15),
- Math.pow(3, 15)
-];
-
-for ( var i = 0; i < scales.length; i++ ) {
+var scales = [Math.pow(2, 40), Math.pow(2, -40), Math.pow(2, 2), Math.pow(3, -15), Math.pow(3, 15)];
+for (var i = 0; i < scales.length; i++) {
var scale = scales[i];
- var eps = Math.pow( 2, -7 ) * scale;
+ var eps = Math.pow(2, -7) * scale;
var radius = 5 * scale;
var max = 10 * scale;
var min = -max;
@@ -55,52 +53,51 @@ for ( var i = 0; i < scales.length; i++ ) {
var t = db["geo_small_large"];
t.drop();
- t.ensureIndex( { p : "2d" }, { min : min, max : max, bits : bits });
+ t.ensureIndex({p: "2d"}, {min: min, max: max, bits: bits});
var outPoints = 0;
var inPoints = 0;
- printjson({ eps : eps, radius : radius, max : max, min : min, range : range, bits : bits });
+ printjson({eps: eps, radius: radius, max: max, min: min, range: range, bits: bits});
// Put a point slightly inside and outside our range
- for ( var j = 0; j < 2; j++ ) {
- var currRad = ( j % 2 == 0 ? radius + eps : radius - eps );
- var res = t.insert( { p : { x : currRad, y : 0 } } );
- print( res.toString() );
+ for (var j = 0; j < 2; j++) {
+ var currRad = (j % 2 == 0 ? radius + eps : radius - eps);
+ var res = t.insert({p: {x: currRad, y: 0}});
+ print(res.toString());
}
- printjson( t.find().toArray() );
+ printjson(t.find().toArray());
- assert.eq( t.count( { p : { $within : { $center : [[0, 0], radius ] } } } ), 1,
- "Incorrect center points found!" );
- assert.eq( t.count( { p : { $within : { $box : [ [ -radius, -radius ], [ radius, radius ] ] } } } ), 1,
- "Incorrect box points found!" );
+ assert.eq(
+ t.count({p: {$within: {$center: [[0, 0], radius]}}}), 1, "Incorrect center points found!");
+ assert.eq(t.count({p: {$within: {$box: [[-radius, -radius], [radius, radius]]}}}),
+ 1,
+ "Incorrect box points found!");
var shouldFind = [];
var randoms = [];
- for ( var j = 0; j < 2; j++ ) {
-
- var randX = Math.random(); // randoms[j].randX
- var randY = Math.random(); // randoms[j].randY
+ for (var j = 0; j < 2; j++) {
+ var randX = Math.random(); // randoms[j].randX
+ var randY = Math.random(); // randoms[j].randY
- randoms.push({ randX : randX, randY : randY });
+ randoms.push({randX: randX, randY: randY});
- var x = randX * ( range - eps ) + eps + min;
- var y = randY * ( range - eps ) + eps + min;
+ var x = randX * (range - eps) + eps + min;
+ var y = randY * (range - eps) + eps + min;
- t.insert( { p : [ x, y ] } );
+ t.insert({p: [x, y]});
- if ( x * x + y * y > radius * radius ){
+ if (x * x + y * y > radius * radius) {
// print( "out point ");
// printjson({ x : x, y : y })
outPoints++;
- }
- else{
+ } else {
// print( "in point ");
// printjson({ x : x, y : y })
inPoints++;
- shouldFind.push({ x : x, y : y, radius : Math.sqrt( x * x + y * y ) });
+ shouldFind.push({x: x, y: y, radius: Math.sqrt(x * x + y * y)});
}
}
@@ -138,21 +135,22 @@ for ( var i = 0; i < scales.length; i++ ) {
printDiff( shouldFind, didFind )
*/
- assert.eq( t.count( { p : { $within : { $center : [[0, 0], radius ] } } } ), 1 + inPoints,
- "Incorrect random center points found!\n" + tojson( randoms ) );
+ assert.eq(t.count({p: {$within: {$center: [[0, 0], radius]}}}),
+ 1 + inPoints,
+ "Incorrect random center points found!\n" + tojson(randoms));
print("Found " + inPoints + " points in and " + outPoints + " points out.");
- var found = t.find( { p : { $near : [0, 0], $maxDistance : radius } } ).toArray();
+ var found = t.find({p: {$near: [0, 0], $maxDistance: radius}}).toArray();
var dist = 0;
- for( var f = 0; f < found.length; f++ ){
+ for (var f = 0; f < found.length; f++) {
var x = found[f].p.x != undefined ? found[f].p.x : found[f].p[0];
var y = found[f].p.y != undefined ? found[f].p.y : found[f].p[1];
- print( "Dist: x : " + x + " y : " + y + " dist : " +
- Math.sqrt( x * x + y * y) + " radius : " + radius );
+ print("Dist: x : " + x + " y : " + y + " dist : " + Math.sqrt(x * x + y * y) +
+ " radius : " + radius);
}
- assert.eq( t.count( { p : { $near : [0, 0], $maxDistance : radius } } ), 1 + inPoints,
- "Incorrect random center points found near!\n" + tojson( randoms ) );
+ assert.eq(t.count({p: {$near: [0, 0], $maxDistance: radius}}),
+ 1 + inPoints,
+ "Incorrect random center points found near!\n" + tojson(randoms));
}
-
diff --git a/jstests/core/geo_sort1.js b/jstests/core/geo_sort1.js
index cd07345b587..b7a229bb8ae 100644
--- a/jstests/core/geo_sort1.js
+++ b/jstests/core/geo_sort1.js
@@ -2,21 +2,23 @@
t = db.geo_sort1;
t.drop();
-for ( x=0; x<10; x++ ){
- for ( y=0; y<10; y++ ){
- t.insert( { loc : [ x , y ] , foo : x * x * y } );
+for (x = 0; x < 10; x++) {
+ for (y = 0; y < 10; y++) {
+ t.insert({loc: [x, y], foo: x * x * y});
}
}
-t.ensureIndex( { loc : "2d" , foo : 1 } );
+t.ensureIndex({loc: "2d", foo: 1});
-q = t.find( { loc : { $near : [ 5 , 5 ] } , foo : { $gt : 20 } } );
-m = function(z){ return z.foo; };
+q = t.find({loc: {$near: [5, 5]}, foo: {$gt: 20}});
+m = function(z) {
+ return z.foo;
+};
-a = q.clone().map( m );
-b = q.clone().sort( { foo : 1 } ).map( m );
+a = q.clone().map(m);
+b = q.clone().sort({foo: 1}).map(m);
-assert.neq( a , b , "A" );
+assert.neq(a, b, "A");
a.sort();
b.sort();
-assert.eq( a , b , "B" );
+assert.eq(a, b, "B");
diff --git a/jstests/core/geo_uniqueDocs.js b/jstests/core/geo_uniqueDocs.js
index 23297bb1ec9..8c4e11fc82e 100644
--- a/jstests/core/geo_uniqueDocs.js
+++ b/jstests/core/geo_uniqueDocs.js
@@ -5,36 +5,40 @@ collName = 'geo_uniqueDocs_test';
t = db.geo_uniqueDocs_test;
t.drop();
-t.save( { locs : [ [0,2], [3,4]] } );
-t.save( { locs : [ [6,8], [10,10] ] } );
+t.save({locs: [[0, 2], [3, 4]]});
+t.save({locs: [[6, 8], [10, 10]]});
-t.ensureIndex( { locs : '2d' } );
+t.ensureIndex({locs: '2d'});
// geoNear tests
// uniqueDocs option is ignored.
-assert.eq(2, db.runCommand({geoNear:collName, near:[0,0]}).results.length);
-assert.eq(2, db.runCommand({geoNear:collName, near:[0,0], uniqueDocs:false}).results.length);
-assert.eq(2, db.runCommand({geoNear:collName, near:[0,0], uniqueDocs:true}).results.length);
-results = db.runCommand({geoNear:collName, near:[0,0], num:2}).results;
+assert.eq(2, db.runCommand({geoNear: collName, near: [0, 0]}).results.length);
+assert.eq(2, db.runCommand({geoNear: collName, near: [0, 0], uniqueDocs: false}).results.length);
+assert.eq(2, db.runCommand({geoNear: collName, near: [0, 0], uniqueDocs: true}).results.length);
+results = db.runCommand({geoNear: collName, near: [0, 0], num: 2}).results;
assert.eq(2, results.length);
assert.close(2, results[0].dis);
assert.close(10, results[1].dis);
-results = db.runCommand({geoNear:collName, near:[0,0], num:2, uniqueDocs:true}).results;
+results = db.runCommand({geoNear: collName, near: [0, 0], num: 2, uniqueDocs: true}).results;
assert.eq(2, results.length);
assert.close(2, results[0].dis);
assert.close(10, results[1].dis);
// $within tests
-assert.eq(2, t.find( {locs: {$within: {$box : [[0,0],[9,9]]}}}).itcount());
-assert.eq(2, t.find( {locs: {$within: {$box : [[0,0],[9,9]], $uniqueDocs : true}}}).itcount());
-assert.eq(2, t.find( {locs: {$within: {$box : [[0,0],[9,9]], $uniqueDocs : false}}}).itcount());
+assert.eq(2, t.find({locs: {$within: {$box: [[0, 0], [9, 9]]}}}).itcount());
+assert.eq(2, t.find({locs: {$within: {$box: [[0, 0], [9, 9]], $uniqueDocs: true}}}).itcount());
+assert.eq(2, t.find({locs: {$within: {$box: [[0, 0], [9, 9]], $uniqueDocs: false}}}).itcount());
-assert.eq(2, t.find( {locs: {$within: {$center : [[5,5],7], $uniqueDocs : true}}}).itcount());
-assert.eq(2, t.find( {locs: {$within: {$center : [[5,5],7], $uniqueDocs : false}}}).itcount());
+assert.eq(2, t.find({locs: {$within: {$center: [[5, 5], 7], $uniqueDocs: true}}}).itcount());
+assert.eq(2, t.find({locs: {$within: {$center: [[5, 5], 7], $uniqueDocs: false}}}).itcount());
-assert.eq(2, t.find( {locs: {$within: {$centerSphere : [[5,5],1], $uniqueDocs : true}}}).itcount());
-assert.eq(2, t.find( {locs: {$within: {$centerSphere : [[5,5],1], $uniqueDocs : false}}}).itcount());
+assert.eq(2, t.find({locs: {$within: {$centerSphere: [[5, 5], 1], $uniqueDocs: true}}}).itcount());
+assert.eq(2, t.find({locs: {$within: {$centerSphere: [[5, 5], 1], $uniqueDocs: false}}}).itcount());
-assert.eq(2, t.find( {locs: {$within: {$polygon : [[0,0],[0,9],[9,9]], $uniqueDocs : true}}}).itcount());
-assert.eq(2, t.find( {locs: {$within: {$polygon : [[0,0],[0,9],[9,9]], $uniqueDocs : false}}}).itcount());
+assert.eq(2,
+ t.find({locs: {$within: {$polygon: [[0, 0], [0, 9], [9, 9]], $uniqueDocs: true}}})
+ .itcount());
+assert.eq(2,
+ t.find({locs: {$within: {$polygon: [[0, 0], [0, 9], [9, 9]], $uniqueDocs: false}}})
+ .itcount());
diff --git a/jstests/core/geo_uniqueDocs2.js b/jstests/core/geo_uniqueDocs2.js
index 62a27d606e3..f6481b30f41 100644
--- a/jstests/core/geo_uniqueDocs2.js
+++ b/jstests/core/geo_uniqueDocs2.js
@@ -6,75 +6,90 @@ collName = 'jstests_geo_uniqueDocs2';
t = db[collName];
t.drop();
-t.save( {loc:[[20,30],[40,50]]} );
-t.ensureIndex( {loc:'2d'} );
+t.save({loc: [[20, 30], [40, 50]]});
+t.ensureIndex({loc: '2d'});
// Check exact matches of different locations.
-assert.eq( 1, t.count( { loc : [20,30] } ) );
-assert.eq( 1, t.count( { loc : [40,50] } ) );
+assert.eq(1, t.count({loc: [20, 30]}));
+assert.eq(1, t.count({loc: [40, 50]}));
// Check behavior for $near, where $uniqueDocs mode is unavailable.
-assert.eq( [t.findOne()], t.find( { loc: { $near: [50,50] } } ).toArray() );
+assert.eq([t.findOne()], t.find({loc: {$near: [50, 50]}}).toArray());
// Check correct number of matches for $within / $uniqueDocs.
// uniqueDocs ignored - does not affect results.
-assert.eq( 1, t.count( { loc : { $within : { $center : [[30, 30], 40] } } } ) );
-assert.eq( 1, t.count( { loc : { $within : { $center : [[30, 30], 40], $uniqueDocs : true } } } ) );
-assert.eq( 1, t.count( { loc : { $within : { $center : [[30, 30], 40], $uniqueDocs : false } } } ) );
+assert.eq(1, t.count({loc: {$within: {$center: [[30, 30], 40]}}}));
+assert.eq(1, t.count({loc: {$within: {$center: [[30, 30], 40], $uniqueDocs: true}}}));
+assert.eq(1, t.count({loc: {$within: {$center: [[30, 30], 40], $uniqueDocs: false}}}));
// For $within / $uniqueDocs, limit applies to docs.
-assert.eq( 1, t.find( { loc : { $within : { $center : [[30, 30], 40], $uniqueDocs : false } } } ).limit(1).itcount() );
+assert.eq(
+ 1, t.find({loc: {$within: {$center: [[30, 30], 40], $uniqueDocs: false}}}).limit(1).itcount());
// Now check a circle only containing one of the locs.
-assert.eq( 1, t.count( { loc : { $within : { $center : [[30, 30], 10] } } } ) );
-assert.eq( 1, t.count( { loc : { $within : { $center : [[30, 30], 10], $uniqueDocs : true } } } ) );
-assert.eq( 1, t.count( { loc : { $within : { $center : [[30, 30], 10], $uniqueDocs : false } } } ) );
+assert.eq(1, t.count({loc: {$within: {$center: [[30, 30], 10]}}}));
+assert.eq(1, t.count({loc: {$within: {$center: [[30, 30], 10], $uniqueDocs: true}}}));
+assert.eq(1, t.count({loc: {$within: {$center: [[30, 30], 10], $uniqueDocs: false}}}));
// Check number and character of results with geoNear / uniqueDocs / includeLocs.
-notUniqueNotInclude = db.runCommand( { geoNear : collName , near : [50,50], num : 10, uniqueDocs : false, includeLocs : false } );
-uniqueNotInclude = db.runCommand( { geoNear : collName , near : [50,50], num : 10, uniqueDocs : true, includeLocs : false } );
-notUniqueInclude = db.runCommand( { geoNear : collName , near : [50,50], num : 10, uniqueDocs : false, includeLocs : true } );
-uniqueInclude = db.runCommand( { geoNear : collName , near : [50,50], num : 10, uniqueDocs : true, includeLocs : true } );
+notUniqueNotInclude = db.runCommand(
+ {geoNear: collName, near: [50, 50], num: 10, uniqueDocs: false, includeLocs: false});
+uniqueNotInclude = db.runCommand(
+ {geoNear: collName, near: [50, 50], num: 10, uniqueDocs: true, includeLocs: false});
+notUniqueInclude = db.runCommand(
+ {geoNear: collName, near: [50, 50], num: 10, uniqueDocs: false, includeLocs: true});
+uniqueInclude = db.runCommand(
+ {geoNear: collName, near: [50, 50], num: 10, uniqueDocs: true, includeLocs: true});
// Check that only unique docs are returned.
-assert.eq( 1, notUniqueNotInclude.results.length );
-assert.eq( 1, uniqueNotInclude.results.length );
-assert.eq( 1, notUniqueInclude.results.length );
-assert.eq( 1, uniqueInclude.results.length );
+assert.eq(1, notUniqueNotInclude.results.length);
+assert.eq(1, uniqueNotInclude.results.length);
+assert.eq(1, notUniqueInclude.results.length);
+assert.eq(1, uniqueInclude.results.length);
// Check that locs are included.
-assert( !notUniqueNotInclude.results[0].loc );
-assert( !uniqueNotInclude.results[0].loc );
-assert( notUniqueInclude.results[0].loc );
-assert( uniqueInclude.results[0].loc );
+assert(!notUniqueNotInclude.results[0].loc);
+assert(!uniqueNotInclude.results[0].loc);
+assert(notUniqueInclude.results[0].loc);
+assert(uniqueInclude.results[0].loc);
// For geoNear / uniqueDocs, 'num' limit seems to apply to locs.
-assert.eq( 1, db.runCommand( { geoNear : collName , near : [50,50], num : 1, uniqueDocs : false, includeLocs : false } ).results.length );
+assert.eq(
+ 1,
+ db.runCommand(
+ {geoNear: collName, near: [50, 50], num: 1, uniqueDocs: false, includeLocs: false})
+ .results.length);
// Check locs returned in includeLocs mode.
t.remove({});
-objLocs = [{x:20,y:30,z:['loc1','loca']},{x:40,y:50,z:['loc2','locb']}];
-t.save( {loc:objLocs} );
-results = db.runCommand( { geoNear : collName , near : [50,50], num : 10, uniqueDocs : false, includeLocs : true } ).results;
-assert.contains( results[0].loc, objLocs );
+objLocs = [{x: 20, y: 30, z: ['loc1', 'loca']}, {x: 40, y: 50, z: ['loc2', 'locb']}];
+t.save({loc: objLocs});
+results =
+ db.runCommand(
+ {geoNear: collName, near: [50, 50], num: 10, uniqueDocs: false, includeLocs: true})
+ .results;
+assert.contains(results[0].loc, objLocs);
// Check locs returned in includeLocs mode, where locs are arrays.
t.remove({});
-arrLocs = [[20,30],[40,50]];
-t.save( {loc:arrLocs} );
-results = db.runCommand( { geoNear : collName , near : [50,50], num : 10, uniqueDocs : false, includeLocs : true } ).results;
+arrLocs = [[20, 30], [40, 50]];
+t.save({loc: arrLocs});
+results =
+ db.runCommand(
+ {geoNear: collName, near: [50, 50], num: 10, uniqueDocs: false, includeLocs: true})
+ .results;
// The original loc arrays are returned as objects.
expectedLocs = arrLocs;
-assert.contains( results[0].loc, expectedLocs );
+assert.contains(results[0].loc, expectedLocs);
// Test a large number of locations in the array.
t.drop();
arr = [];
-for( i = 0; i < 10000; ++i ) {
- arr.push( [10,10] );
+for (i = 0; i < 10000; ++i) {
+ arr.push([10, 10]);
}
-arr.push( [100,100] );
-t.save( {loc:arr} );
-t.ensureIndex( {loc:'2d'} );
-assert.eq( 1, t.count( { loc : { $within : { $center : [[99, 99], 5] } } } ) );
+arr.push([100, 100]);
+t.save({loc: arr});
+t.ensureIndex({loc: '2d'});
+assert.eq(1, t.count({loc: {$within: {$center: [[99, 99], 5]}}}));
diff --git a/jstests/core/geo_update.js b/jstests/core/geo_update.js
index 34305559039..ebe754680e9 100644
--- a/jstests/core/geo_update.js
+++ b/jstests/core/geo_update.js
@@ -4,34 +4,28 @@
var coll = db.testGeoUpdate;
coll.drop();
-coll.ensureIndex({ loc : "2d" });
+coll.ensureIndex({loc: "2d"});
// Test normal update
-print( "Updating..." );
+print("Updating...");
-coll.insert({ loc : [1.0, 2.0] });
+coll.insert({loc: [1.0, 2.0]});
-coll.update({ loc : { $near : [1.0, 2.0] } },
- { x : true, loc : [1.0, 2.0] });
+coll.update({loc: {$near: [1.0, 2.0]}}, {x: true, loc: [1.0, 2.0]});
// Test upsert
-print( "Upserting..." );
-
-coll.update({ loc : { $within : { $center : [[10, 20], 1] } } },
- { x : true },
- true);
-
-coll.update({ loc : { $near : [10.0, 20.0], $maxDistance : 1 } },
- { x : true },
- true);
-
-
-coll.update({ loc : { $near : [100, 100], $maxDistance : 1 } },
- { $set : { loc : [100, 100] }, $push : { people : "chris" } },
+print("Upserting...");
+
+coll.update({loc: {$within: {$center: [[10, 20], 1]}}}, {x: true}, true);
+
+coll.update({loc: {$near: [10.0, 20.0], $maxDistance: 1}}, {x: true}, true);
+
+coll.update({loc: {$near: [100, 100], $maxDistance: 1}},
+ {$set: {loc: [100, 100]}, $push: {people: "chris"}},
true);
-
-coll.update({ loc : { $near : [100, 100], $maxDistance : 1 } },
- { $set : { loc : [100, 100] }, $push : { people : "john" } },
+
+coll.update({loc: {$near: [100, 100], $maxDistance: 1}},
+ {$set: {loc: [100, 100]}, $push: {people: "john"}},
true);
-assert.eq( 4, coll.find().itcount() );
+assert.eq(4, coll.find().itcount());
diff --git a/jstests/core/geo_update1.js b/jstests/core/geo_update1.js
index c3d2623d3de..e966afa7ea9 100644
--- a/jstests/core/geo_update1.js
+++ b/jstests/core/geo_update1.js
@@ -2,35 +2,35 @@
t = db.geo_update1;
t.drop();
-for(var x = 0; x < 10; x++ ) {
- for(var y = 0; y < 10; y++ ) {
- t.insert({"loc": [x, y] , x : x , y : y , z : 1 });
- }
-}
-
-t.ensureIndex( { loc : "2d" } );
-
-function p(){
- print( "--------------" );
- for ( var y=0; y<10; y++ ){
- var c = t.find( { y : y } ).sort( { x : 1 } );
+for (var x = 0; x < 10; x++) {
+ for (var y = 0; y < 10; y++) {
+ t.insert({"loc": [x, y], x: x, y: y, z: 1});
+ }
+}
+
+t.ensureIndex({loc: "2d"});
+
+function p() {
+ print("--------------");
+ for (var y = 0; y < 10; y++) {
+ var c = t.find({y: y}).sort({x: 1});
var s = "";
- while ( c.hasNext() )
+ while (c.hasNext())
s += c.next().z + " ";
- print( s );
+ print(s);
}
- print( "--------------" );
+ print("--------------");
}
p();
-var res = t.update({ loc: { $within: { $center: [[ 5, 5 ], 2 ]}}}, { $inc: { z: 1 }}, false, true);
-assert.writeOK( res );
+var res = t.update({loc: {$within: {$center: [[5, 5], 2]}}}, {$inc: {z: 1}}, false, true);
+assert.writeOK(res);
p();
-assert.writeOK(t.update({}, {'$inc' : { 'z' : 1}}, false, true));
+assert.writeOK(t.update({}, {'$inc': {'z': 1}}, false, true));
p();
-res = t.update({ loc: { $within: { $center: [[ 5, 5 ], 2 ]}}}, { $inc: { z: 1 }}, false, true);
-assert.writeOK( res );
+res = t.update({loc: {$within: {$center: [[5, 5], 2]}}}, {$inc: {z: 1}}, false, true);
+assert.writeOK(res);
p();
diff --git a/jstests/core/geo_update2.js b/jstests/core/geo_update2.js
index f2f1b6cee4b..ffcf02617be 100644
--- a/jstests/core/geo_update2.js
+++ b/jstests/core/geo_update2.js
@@ -2,38 +2,35 @@
t = db.geo_update2;
t.drop();
-for(var x = 0; x < 10; x++ ) {
- for(var y = 0; y < 10; y++ ) {
- t.insert({"loc": [x, y] , x : x , y : y });
- }
-}
-
-t.ensureIndex( { loc : "2d" } );
-
-function p(){
- print( "--------------" );
- for ( var y=0; y<10; y++ ){
- var c = t.find( { y : y } ).sort( { x : 1 } );
+for (var x = 0; x < 10; x++) {
+ for (var y = 0; y < 10; y++) {
+ t.insert({"loc": [x, y], x: x, y: y});
+ }
+}
+
+t.ensureIndex({loc: "2d"});
+
+function p() {
+ print("--------------");
+ for (var y = 0; y < 10; y++) {
+ var c = t.find({y: y}).sort({x: 1});
var s = "";
- while ( c.hasNext() )
+ while (c.hasNext())
s += c.next().z + " ";
- print( s );
+ print(s);
}
- print( "--------------" );
+ print("--------------");
}
p();
-
-assert.writeOK(t.update({"loc" : {"$within" : {"$center" : [[5,5], 2]}}},
- {'$inc' : { 'z' : 1}}, false, true));
+assert.writeOK(
+ t.update({"loc": {"$within": {"$center": [[5, 5], 2]}}}, {'$inc': {'z': 1}}, false, true));
p();
-assert.writeOK(t.update({}, {'$inc' : { 'z' : 1}}, false, true));
+assert.writeOK(t.update({}, {'$inc': {'z': 1}}, false, true));
p();
-
-assert.writeOK(t.update({"loc" : {"$within" : {"$center" : [[5,5], 2]}}},
- {'$inc' : { 'z' : 1}}, false, true));
+assert.writeOK(
+ t.update({"loc": {"$within": {"$center": [[5, 5], 2]}}}, {'$inc': {'z': 1}}, false, true));
p();
-
diff --git a/jstests/core/geo_update_btree.js b/jstests/core/geo_update_btree.js
index 12a10c736b8..ea1025b10a9 100644
--- a/jstests/core/geo_update_btree.js
+++ b/jstests/core/geo_update_btree.js
@@ -1,31 +1,37 @@
// Tests whether the geospatial search is stable under btree updates
-var coll = db.getCollection( "jstests_geo_update_btree" );
+var coll = db.getCollection("jstests_geo_update_btree");
coll.drop();
-coll.ensureIndex( { loc : '2d' } );
+coll.ensureIndex({loc: '2d'});
-var big = new Array( 3000 ).toString();
+var big = new Array(3000).toString();
if (testingReplication) {
- coll.setWriteConcern({ w: 2 });
+ coll.setWriteConcern({w: 2});
}
Random.setRandomSeed();
var parallelInsert = startParallelShell(
- "Random.setRandomSeed();" +
- "for ( var i = 0; i < 1000; i++ ) {" +
+ "Random.setRandomSeed();" + "for ( var i = 0; i < 1000; i++ ) {" +
" var doc = { loc: [ Random.rand() * 180, Random.rand() * 180 ], v: '' };" +
- " db.jstests_geo_update_btree.insert(doc);" +
- "}");
+ " db.jstests_geo_update_btree.insert(doc);" + "}");
-for ( i = 0; i < 1000; i++ ) {
+for (i = 0; i < 1000; i++) {
coll.update(
- { loc : { $within : { $center : [ [ Random.rand() * 180, Random.rand() * 180 ], Random.rand() * 50 ] } } },
- { $set : { v : big } }, false, true );
-
- if( i % 10 == 0 ) print( i );
+ {
+ loc: {
+ $within:
+ {$center: [[Random.rand() * 180, Random.rand() * 180], Random.rand() * 50]}
+ }
+ },
+ {$set: {v: big}},
+ false,
+ true);
+
+ if (i % 10 == 0)
+ print(i);
}
parallelInsert();
diff --git a/jstests/core/geo_update_btree2.js b/jstests/core/geo_update_btree2.js
index b4ec059166a..de867bf8e14 100644
--- a/jstests/core/geo_update_btree2.js
+++ b/jstests/core/geo_update_btree2.js
@@ -11,56 +11,57 @@
// In order to expose the specific NON GUARANTEED isolation behavior this file tests
// we disable table scans to ensure that the new query system only looks at the 2d
// scan.
-assert.commandWorked( db._adminCommand( { setParameter:1, notablescan:true } ) );
+assert.commandWorked(db._adminCommand({setParameter: 1, notablescan: true}));
-var status = function( msg ){
- print( "\n\n###\n" + msg + "\n###\n\n" );
+var status = function(msg) {
+ print("\n\n###\n" + msg + "\n###\n\n");
};
-var coll = db.getCollection( "jstests_geo_update_btree2" );
+var coll = db.getCollection("jstests_geo_update_btree2");
coll.drop();
-coll.ensureIndex( { loc : '2d' } );
+coll.ensureIndex({loc: '2d'});
-status( "Inserting points..." );
+status("Inserting points...");
var numPoints = 10;
Random.setRandomSeed();
-for ( i = 0; i < numPoints; i++ ) {
- coll.insert( { _id : i, loc : [ Random.rand() * 180, Random.rand() * 180 ], i : i % 2 } );
+for (i = 0; i < numPoints; i++) {
+ coll.insert({_id: i, loc: [Random.rand() * 180, Random.rand() * 180], i: i % 2});
}
-status( "Starting long query..." );
+status("Starting long query...");
-var query = coll.find({ loc : { $within : { $box : [[-180, -180], [180, 180]] } } }).batchSize( 2 );
-var firstValues = [ query.next()._id, query.next()._id ];
-printjson( firstValues );
+var query = coll.find({loc: {$within: {$box: [[-180, -180], [180, 180]]}}}).batchSize(2);
+var firstValues = [query.next()._id, query.next()._id];
+printjson(firstValues);
-status( "Removing points not returned by query..." );
+status("Removing points not returned by query...");
var allQuery = coll.find();
var removeIds = [];
-while( allQuery.hasNext() ){
+while (allQuery.hasNext()) {
var id = allQuery.next()._id;
- if( firstValues.indexOf( id ) < 0 ){
- removeIds.push( id );
+ if (firstValues.indexOf(id) < 0) {
+ removeIds.push(id);
}
}
var updateIds = [];
-for( var i = 0, max = removeIds.length / 2; i < max; i++ ) updateIds.push( removeIds.pop() );
+for (var i = 0, max = removeIds.length / 2; i < max; i++)
+ updateIds.push(removeIds.pop());
-printjson( removeIds );
-coll.remove({ _id : { $in : removeIds } });
+printjson(removeIds);
+coll.remove({_id: {$in: removeIds}});
-status( "Updating points returned by query..." );
+status("Updating points returned by query...");
printjson(updateIds);
-var big = new Array( 3000 ).toString();
-for( var i = 0; i < updateIds.length; i++ )
- coll.update({ _id : updateIds[i] }, { $set : { data : big } });
+var big = new Array(3000).toString();
+for (var i = 0; i < updateIds.length; i++)
+ coll.update({_id: updateIds[i]}, {$set: {data: big}});
-status( "Counting final points..." );
+status("Counting final points...");
// It's not defined whether or not we return documents that are modified during a query. We
// shouldn't crash, but it's not defined how many results we get back. This test is modifying every
@@ -69,4 +70,4 @@ status( "Counting final points..." );
// assert.eq( ( numPoints - 2 ) / 2, query.itcount() )
query.itcount();
-assert.commandWorked( db._adminCommand( { setParameter:1, notablescan:false} ) );
+assert.commandWorked(db._adminCommand({setParameter: 1, notablescan: false}));
diff --git a/jstests/core/geo_update_dedup.js b/jstests/core/geo_update_dedup.js
index 3011c5c9cfe..b354f3ca7ae 100644
--- a/jstests/core/geo_update_dedup.js
+++ b/jstests/core/geo_update_dedup.js
@@ -7,9 +7,11 @@ var t = db.jstests_geo_update_dedup;
// 2d index with $near
t.drop();
t.ensureIndex({locs: "2d"});
-t.save({locs: [[49.999,49.999], [50.0,50.0], [50.001,50.001]]});
+t.save({locs: [[49.999, 49.999], [50.0, 50.0], [50.001, 50.001]]});
-var q = {locs: {$near: [50.0, 50.0]}};
+var q = {
+ locs: {$near: [50.0, 50.0]}
+};
assert.eq(1, t.find(q).itcount(), 'duplicates returned from query');
var res = t.update({locs: {$near: [50.0, 50.0]}}, {$inc: {touchCount: 1}}, false, true);
@@ -18,8 +20,8 @@ assert.eq(1, t.findOne().touchCount);
t.drop();
t.ensureIndex({locs: "2d"});
-t.save({locs: [{x:49.999,y:49.999}, {x:50.0,y:50.0}, {x:50.001,y:50.001}]});
-res = t.update({locs: {$near: {x:50.0, y:50.0}}}, {$inc: {touchCount: 1}});
+t.save({locs: [{x: 49.999, y: 49.999}, {x: 50.0, y: 50.0}, {x: 50.001, y: 50.001}]});
+res = t.update({locs: {$near: {x: 50.0, y: 50.0}}}, {$inc: {touchCount: 1}});
assert.eq(1, res.nMatched);
assert.eq(1, t.findOne().touchCount);
@@ -35,26 +37,31 @@ assert.eq(1, t.findOne().touchCount);
// 2dsphere index with $geoNear
t.drop();
t.ensureIndex({geo: "2dsphere"});
-var x = { "type" : "Polygon",
- "coordinates" : [[[49.999,49.999], [50.0,50.0], [50.001,50.001], [49.999,49.999]]]};
+var x = {
+ "type": "Polygon",
+ "coordinates": [[[49.999, 49.999], [50.0, 50.0], [50.001, 50.001], [49.999, 49.999]]]
+};
t.save({geo: x});
-res = t.update({geo: {$geoNear: {"type" : "Point", "coordinates" : [50.0, 50.0]}}},
- {$inc: {touchCount: 1}}, false, true);
+res = t.update({geo: {$geoNear: {"type": "Point", "coordinates": [50.0, 50.0]}}},
+ {$inc: {touchCount: 1}},
+ false,
+ true);
assert.eq(1, res.nMatched);
assert.eq(1, t.findOne().touchCount);
t.drop();
var locdata = [
- {geo: {type: "Point", coordinates: [49.999,49.999]}},
- {geo: {type: "Point", coordinates: [50.000,50.000]}},
- {geo: {type: "Point", coordinates: [50.001,50.001]}}
+ {geo: {type: "Point", coordinates: [49.999, 49.999]}},
+ {geo: {type: "Point", coordinates: [50.000, 50.000]}},
+ {geo: {type: "Point", coordinates: [50.001, 50.001]}}
];
t.save({locdata: locdata, count: 0});
t.ensureIndex({"locdata.geo": "2dsphere"});
-res = t.update({"locdata.geo": {$geoNear: {"type" : "Point", "coordinates" : [50.0, 50.0]}}},
- {$inc: {touchCount: 1}}, false, true);
+res = t.update({"locdata.geo": {$geoNear: {"type": "Point", "coordinates": [50.0, 50.0]}}},
+ {$inc: {touchCount: 1}},
+ false,
+ true);
assert.eq(1, res.nMatched);
assert.eq(1, t.findOne().touchCount);
-
diff --git a/jstests/core/geo_validate.js b/jstests/core/geo_validate.js
index 5b9957166c3..6d92e5736ce 100644
--- a/jstests/core/geo_validate.js
+++ b/jstests/core/geo_validate.js
@@ -5,95 +5,95 @@
var coll = db.geo_validate;
coll.drop();
-coll.ensureIndex({ geo : "2dsphere" });
+coll.ensureIndex({geo: "2dsphere"});
//
//
// Make sure we can't do a $within search with an invalid circular region
-assert.throws(function(){
- coll.findOne({ geo : { $within : { $center : [[0, 0], -1] } } });
+assert.throws(function() {
+ coll.findOne({geo: {$within: {$center: [[0, 0], -1]}}});
});
-assert.throws(function(){
- coll.findOne({ geo : { $within : { $centerSphere : [[0, 0], -1] } } });
+assert.throws(function() {
+ coll.findOne({geo: {$within: {$centerSphere: [[0, 0], -1]}}});
});
-assert.throws(function(){
- coll.findOne({ geo : { $within : { $center : [[0, 0], NaN] } } });
+assert.throws(function() {
+ coll.findOne({geo: {$within: {$center: [[0, 0], NaN]}}});
});
-assert.throws(function(){
- coll.findOne({ geo : { $within : { $centerSphere : [[0, 0], NaN] } } });
+assert.throws(function() {
+ coll.findOne({geo: {$within: {$centerSphere: [[0, 0], NaN]}}});
});
-assert.throws(function(){
- coll.findOne({ geo : { $within : { $center : [[0, 0], -Infinity] } } });
+assert.throws(function() {
+ coll.findOne({geo: {$within: {$center: [[0, 0], -Infinity]}}});
});
-assert.throws(function(){
- coll.findOne({ geo : { $within : { $centerSphere : [[0, 0], -Infinity] } } });
+assert.throws(function() {
+ coll.findOne({geo: {$within: {$centerSphere: [[0, 0], -Infinity]}}});
});
//
//
// Make sure we can't do geo search with invalid point coordinates.
-assert.throws(function(){
- coll.findOne({ geo : { $within : { $center : [[NaN, 0], 1] } } });
+assert.throws(function() {
+ coll.findOne({geo: {$within: {$center: [[NaN, 0], 1]}}});
});
-assert.throws(function(){
- coll.findOne({ geo : { $within : { $centerSphere : [[NaN, 0], 1] } } });
+assert.throws(function() {
+ coll.findOne({geo: {$within: {$centerSphere: [[NaN, 0], 1]}}});
});
-assert.throws(function(){
- coll.findOne({ geo : { $within : { $center : [[Infinity, 0], 1] } } });
+assert.throws(function() {
+ coll.findOne({geo: {$within: {$center: [[Infinity, 0], 1]}}});
});
-assert.throws(function(){
- coll.findOne({ geo : { $within : { $centerSphere : [[-Infinity, 0], 1] } } });
+assert.throws(function() {
+ coll.findOne({geo: {$within: {$centerSphere: [[-Infinity, 0], 1]}}});
});
//
//
// Make sure we can do a $within search with a zero-radius circular region
-assert.writeOK(coll.insert({ geo : [0, 0] }));
-assert.neq(null, coll.findOne({ geo : { $within : { $center : [[0, 0], 0] } } }));
-assert.neq(null, coll.findOne({ geo : { $within : { $centerSphere : [[0, 0], 0] } } }));
-assert.neq(null, coll.findOne({ geo : { $within : { $center : [[0, 0], Infinity] } } }));
-assert.neq(null, coll.findOne({ geo : { $within : { $centerSphere : [[0, 0], Infinity] } } }));
+assert.writeOK(coll.insert({geo: [0, 0]}));
+assert.neq(null, coll.findOne({geo: {$within: {$center: [[0, 0], 0]}}}));
+assert.neq(null, coll.findOne({geo: {$within: {$centerSphere: [[0, 0], 0]}}}));
+assert.neq(null, coll.findOne({geo: {$within: {$center: [[0, 0], Infinity]}}}));
+assert.neq(null, coll.findOne({geo: {$within: {$centerSphere: [[0, 0], Infinity]}}}));
//
//
// Make sure we can't do a $near search with an invalid circular region
-assert.throws(function(){
- coll.findOne({ geo : { $geoNear : [0, 0, -1] } });
+assert.throws(function() {
+ coll.findOne({geo: {$geoNear: [0, 0, -1]}});
});
-assert.throws(function(){
- coll.findOne({ geo : { $geoNear : [0, 0], $maxDistance : -1 } });
+assert.throws(function() {
+ coll.findOne({geo: {$geoNear: [0, 0], $maxDistance: -1}});
});
-assert.throws(function(){
- coll.findOne({ geo : { $geoNear : [0, 0, NaN] } });
+assert.throws(function() {
+ coll.findOne({geo: {$geoNear: [0, 0, NaN]}});
});
-assert.throws(function(){
- coll.findOne({ geo : { $geoNear : [0, 0], $maxDistance : NaN } });
+assert.throws(function() {
+ coll.findOne({geo: {$geoNear: [0, 0], $maxDistance: NaN}});
});
-assert.throws(function(){
- coll.findOne({ geo : { $geoNear : [0, 0, -Infinity] } });
+assert.throws(function() {
+ coll.findOne({geo: {$geoNear: [0, 0, -Infinity]}});
});
-assert.throws(function(){
- coll.findOne({ geo : { $geoNear : [0, 0], $maxDistance : -Infinity } });
+assert.throws(function() {
+ coll.findOne({geo: {$geoNear: [0, 0], $maxDistance: -Infinity}});
});
//
//
// Make sure we can't do a near search with a negative limit
-assert.commandFailed(db.runCommand({geoNear: coll.getName(),
- near: [0,0], spherical: true, num: -1}));
-assert.commandFailed(db.runCommand({geoNear: coll.getName(),
- near: [0,0], spherical: true, num: -Infinity}));
+assert.commandFailed(
+ db.runCommand({geoNear: coll.getName(), near: [0, 0], spherical: true, num: -1}));
+assert.commandFailed(
+ db.runCommand({geoNear: coll.getName(), near: [0, 0], spherical: true, num: -Infinity}));
// NaN is interpreted as limit 0
-assert.commandWorked(db.runCommand({geoNear: coll.getName(),
- near: [0,0], spherical: true, num: NaN}));
-
+assert.commandWorked(
+ db.runCommand({geoNear: coll.getName(), near: [0, 0], spherical: true, num: NaN}));
//
// SERVER-17241 Polygon has no loop
-assert.writeError(coll.insert({ geo : { type: 'Polygon', coordinates: [] } }));
+assert.writeError(coll.insert({geo: {type: 'Polygon', coordinates: []}}));
//
// SERVER-17486 Loop has less then 3 vertices.
assert.writeError(coll.insert({geo: {type: 'Polygon', coordinates: [[]]}}));
-assert.writeError(coll.insert({geo: {type: 'Polygon', coordinates: [[[0,0]]]}}));
-assert.writeError(coll.insert({geo: {type: 'Polygon', coordinates: [[[0,0], [0,0], [0,0], [0,0]]]}}));
+assert.writeError(coll.insert({geo: {type: 'Polygon', coordinates: [[[0, 0]]]}}));
+assert.writeError(
+ coll.insert({geo: {type: 'Polygon', coordinates: [[[0, 0], [0, 0], [0, 0], [0, 0]]]}}));
diff --git a/jstests/core/geo_withinquery.js b/jstests/core/geo_withinquery.js
index d60116c8838..3a71608ab6d 100644
--- a/jstests/core/geo_withinquery.js
+++ b/jstests/core/geo_withinquery.js
@@ -3,13 +3,17 @@ t = db.geo_withinquery;
t.drop();
num = 0;
-for ( x=0; x<=20; x++ ){
- for ( y=0; y<=20; y++ ){
- o = { _id : num++ , loc : [ x , y ] };
- t.save( o );
+for (x = 0; x <= 20; x++) {
+ for (y = 0; y <= 20; y++) {
+ o = {
+ _id: num++,
+ loc: [x, y]
+ };
+ t.save(o);
}
}
-assert.eq(21 * 21 - 1, t.find({ $and: [ {loc: {$ne:[0,0]}},
- {loc: {$within: {$box: [[0,0], [100,100]]}}},
- ]}).itcount(), "UHOH!");
+assert.eq(21 * 21 - 1,
+ t.find({$and: [{loc: {$ne: [0, 0]}}, {loc: {$within: {$box: [[0, 0], [100, 100]]}}}, ]})
+ .itcount(),
+ "UHOH!");
diff --git a/jstests/core/geoa.js b/jstests/core/geoa.js
index 036a5630550..cd1eeaf5aaa 100644
--- a/jstests/core/geoa.js
+++ b/jstests/core/geoa.js
@@ -2,11 +2,11 @@
t = db.geoa;
t.drop();
-t.save( { _id : 1 , a : { loc : [ 5 , 5 ] } } );
-t.save( { _id : 2 , a : { loc : [ 6 , 6 ] } } );
-t.save( { _id : 3 , a : { loc : [ 7 , 7 ] } } );
+t.save({_id: 1, a: {loc: [5, 5]}});
+t.save({_id: 2, a: {loc: [6, 6]}});
+t.save({_id: 3, a: {loc: [7, 7]}});
-t.ensureIndex( { "a.loc" : "2d" } );
+t.ensureIndex({"a.loc": "2d"});
-cur = t.find( { "a.loc" : { $near : [ 6 , 6 ] } } );
-assert.eq( 2 , cur.next()._id , "A1" );
+cur = t.find({"a.loc": {$near: [6, 6]}});
+assert.eq(2, cur.next()._id, "A1");
diff --git a/jstests/core/geob.js b/jstests/core/geob.js
index 0dcc2658ba2..b78eaa453df 100644
--- a/jstests/core/geob.js
+++ b/jstests/core/geob.js
@@ -1,10 +1,18 @@
var t = db.geob;
t.drop();
-var a = {p: [0, 0]};
-var b = {p: [1, 0]};
-var c = {p: [3, 4]};
-var d = {p: [0, 6]};
+var a = {
+ p: [0, 0]
+};
+var b = {
+ p: [1, 0]
+};
+var c = {
+ p: [3, 4]
+};
+var d = {
+ p: [0, 6]
+};
t.save(a);
t.save(b);
@@ -12,7 +20,7 @@ t.save(c);
t.save(d);
t.ensureIndex({p: "2d"});
-var res = t.runCommand("geoNear", {near: [0,0]});
+var res = t.runCommand("geoNear", {near: [0, 0]});
assert.close(3, res.stats.avgDistance, "A");
assert.close(0, res.results[0].dis, "B1");
@@ -27,7 +35,7 @@ assert.eq(c._id, res.results[2].obj._id, "D2");
assert.close(6, res.results[3].dis, "E1");
assert.eq(d._id, res.results[3].obj._id, "E2");
-res = t.runCommand("geoNear", {near: [0,0], distanceMultiplier: 2});
+res = t.runCommand("geoNear", {near: [0, 0], distanceMultiplier: 2});
assert.close(6, res.stats.avgDistance, "F");
assert.close(0, res.results[0].dis, "G");
assert.close(2, res.results[1].dis, "H");
diff --git a/jstests/core/geoc.js b/jstests/core/geoc.js
index 138b86c65c5..26e762e4978 100644
--- a/jstests/core/geoc.js
+++ b/jstests/core/geoc.js
@@ -1,24 +1,27 @@
t = db.geoc;
-t.drop();
+t.drop();
N = 1000;
-for (var i=0; i<N; i++) t.insert({loc:[100+Math.random(), 100+Math.random()], z:0});
-for (var i=0; i<N; i++) t.insert({loc:[0+Math.random(), 0+Math.random()], z:1});
-for (var i=0; i<N; i++) t.insert({loc:[-100+Math.random(), -100+Math.random()], z:2});
+for (var i = 0; i < N; i++)
+ t.insert({loc: [100 + Math.random(), 100 + Math.random()], z: 0});
+for (var i = 0; i < N; i++)
+ t.insert({loc: [0 + Math.random(), 0 + Math.random()], z: 1});
+for (var i = 0; i < N; i++)
+ t.insert({loc: [-100 + Math.random(), -100 + Math.random()], z: 2});
-t.ensureIndex({loc:'2d'});
+t.ensureIndex({loc: '2d'});
-function test( z , l ){
- assert.lt( 0 , t.find({loc:{$near:[100,100]}, z:z}).limit(l).itcount() , "z: " + z + " l: " + l );
+function test(z, l) {
+ assert.lt(
+ 0, t.find({loc: {$near: [100, 100]}, z: z}).limit(l).itcount(), "z: " + z + " l: " + l);
}
-test( 1 , 1 );
-test( 1 , 2 );
-test( 2 , 2 );
-test( 2 , 10 );
-test( 2 , 1000 );
-test( 2 , 100000 );
-test( 2 , 10000000 );
-
+test(1, 1);
+test(1, 2);
+test(2, 2);
+test(2, 10);
+test(2, 1000);
+test(2, 100000);
+test(2, 10000000);
diff --git a/jstests/core/geod.js b/jstests/core/geod.js
index 118f5021381..35844d0f914 100644
--- a/jstests/core/geod.js
+++ b/jstests/core/geod.js
@@ -1,14 +1,14 @@
-var t=db.geod;
-t.drop();
-t.save( { loc: [0,0] } );
-t.save( { loc: [0.5,0] } );
-t.ensureIndex({loc:"2d"});
+var t = db.geod;
+t.drop();
+t.save({loc: [0, 0]});
+t.save({loc: [0.5, 0]});
+t.ensureIndex({loc: "2d"});
// do a few geoNears with different maxDistances. The first iteration
// should match no points in the dataset.
dists = [.49, .51, 1.0];
-for (idx in dists){
- b=db.runCommand({geoNear:"geod", near:[1,0], num:2, maxDistance:dists[idx]});
- assert.eq(b.errmsg, undefined, "A"+idx);
- l=b.results.length;
- assert.eq(l, idx, "B"+idx);
+for (idx in dists) {
+ b = db.runCommand({geoNear: "geod", near: [1, 0], num: 2, maxDistance: dists[idx]});
+ assert.eq(b.errmsg, undefined, "A" + idx);
+ l = b.results.length;
+ assert.eq(l, idx, "B" + idx);
}
diff --git a/jstests/core/geoe.js b/jstests/core/geoe.js
index 22feb83ab1e..9568e13dc08 100644
--- a/jstests/core/geoe.js
+++ b/jstests/core/geoe.js
@@ -4,29 +4,28 @@
// the end of the btree and not reverse direction (leaving the rest of
// the search always looking at some random non-matching point).
-t=db.geo_box;
+t = db.geo_box;
t.drop();
-t.insert({"_id": 1, "geo" : [ 33, -11.1 ] });
-t.insert({"_id": 2, "geo" : [ -122, 33.3 ] });
-t.insert({"_id": 3, "geo" : [ -122, 33.4 ] });
-t.insert({"_id": 4, "geo" : [ -122.28, 37.67 ] });
-t.insert({"_id": 5, "geo" : [ -122.29, 37.68 ] });
-t.insert({"_id": 6, "geo" : [ -122.29, 37.67 ] });
-t.insert({"_id": 7, "geo" : [ -122.29, 37.67 ] });
-t.insert({"_id": 8, "geo" : [ -122.29, 37.68 ] });
-t.insert({"_id": 9, "geo" : [ -122.29, 37.68 ] });
-t.insert({"_id": 10, "geo" : [ -122.3, 37.67 ] });
-t.insert({"_id": 11, "geo" : [ -122.31, 37.67 ] });
-t.insert({"_id": 12, "geo" : [ -122.3, 37.66 ] });
-t.insert({"_id": 13, "geo" : [ -122.2435, 37.637072 ] });
-t.insert({"_id": 14, "geo" : [ -122.289505, 37.695774 ] });
+t.insert({"_id": 1, "geo": [33, -11.1]});
+t.insert({"_id": 2, "geo": [-122, 33.3]});
+t.insert({"_id": 3, "geo": [-122, 33.4]});
+t.insert({"_id": 4, "geo": [-122.28, 37.67]});
+t.insert({"_id": 5, "geo": [-122.29, 37.68]});
+t.insert({"_id": 6, "geo": [-122.29, 37.67]});
+t.insert({"_id": 7, "geo": [-122.29, 37.67]});
+t.insert({"_id": 8, "geo": [-122.29, 37.68]});
+t.insert({"_id": 9, "geo": [-122.29, 37.68]});
+t.insert({"_id": 10, "geo": [-122.3, 37.67]});
+t.insert({"_id": 11, "geo": [-122.31, 37.67]});
+t.insert({"_id": 12, "geo": [-122.3, 37.66]});
+t.insert({"_id": 13, "geo": [-122.2435, 37.637072]});
+t.insert({"_id": 14, "geo": [-122.289505, 37.695774]});
+t.ensureIndex({geo: "2d"});
-t.ensureIndex({ geo : "2d" });
-
-c=t.find({geo: {"$within": {"$box": [[-125.078461,36.494473], [-120.320648,38.905199]]} } });
+c = t.find({geo: {"$within": {"$box": [[-125.078461, 36.494473], [-120.320648, 38.905199]]}}});
assert.eq(11, c.count(), "A1");
-c=t.find({geo: {"$within": {"$box": [[-124.078461,36.494473], [-120.320648,38.905199]]} } });
+c = t.find({geo: {"$within": {"$box": [[-124.078461, 36.494473], [-120.320648, 38.905199]]}}});
assert.eq(11, c.count(), "B1");
diff --git a/jstests/core/geof.js b/jstests/core/geof.js
index 718c6e6593d..1d7f13eb881 100644
--- a/jstests/core/geof.js
+++ b/jstests/core/geof.js
@@ -2,18 +2,20 @@ t = db.geof;
t.drop();
// corners (dist ~0.98)
-t.insert({loc: [ 0.7, 0.7]});
-t.insert({loc: [ 0.7, -0.7]});
-t.insert({loc: [-0.7, 0.7]});
+t.insert({loc: [0.7, 0.7]});
+t.insert({loc: [0.7, -0.7]});
+t.insert({loc: [-0.7, 0.7]});
t.insert({loc: [-0.7, -0.7]});
// on x axis (dist == 0.9)
t.insert({loc: [-0.9, 0]});
t.insert({loc: [-0.9, 0]});
-t.ensureIndex( { loc : "2d" } );
+t.ensureIndex({loc: "2d"});
-t.find({loc: {$near: [0,0]}}).limit(2).forEach( function(o){
- //printjson(o);
- assert.lt(Geo.distance([0,0], o.loc), 0.95);
-});
+t.find({loc: {$near: [0, 0]}})
+ .limit(2)
+ .forEach(function(o) {
+ // printjson(o);
+ assert.lt(Geo.distance([0, 0], o.loc), 0.95);
+ });
diff --git a/jstests/core/geonear_cmd_input_validation.js b/jstests/core/geonear_cmd_input_validation.js
index 2a44391183b..ad3d56d240a 100644
--- a/jstests/core/geonear_cmd_input_validation.js
+++ b/jstests/core/geonear_cmd_input_validation.js
@@ -9,12 +9,8 @@ t.ensureIndex({loc: "2dsphere"});
// 2d index and minDistance.
// 2d index and GeoJSON
// 2dsphere index and spherical=false
-var indexTypes = ['2d', '2dsphere'],
- pointTypes = [
- {type: 'Point', coordinates: [0, 0]},
- [0, 0]],
- sphericalOptions = [true, false],
- optionNames = ['minDistance', 'maxDistance'],
+var indexTypes = ['2d', '2dsphere'], pointTypes = [{type: 'Point', coordinates: [0, 0]}, [0, 0]],
+ sphericalOptions = [true, false], optionNames = ['minDistance', 'maxDistance'],
badNumbers = [-1, undefined, 'foo'];
indexTypes.forEach(function(indexType) {
@@ -38,28 +34,20 @@ indexTypes.forEach(function(indexType) {
}
// Unsupported combinations should return errors.
- if (
- (indexType == '2d' && optionName == 'minDistance') ||
- (indexType == '2d' && !isLegacy) ||
- (indexType == '2dsphere' && !spherical)
- ) {
- assert.commandFailed(
- db.runCommand(makeCommand(1)),
- "geoNear with spherical=" + spherical + " and " + indexType
- + " index and " + pointDescription
- + " should've failed."
- );
+ if ((indexType == '2d' && optionName == 'minDistance') ||
+ (indexType == '2d' && !isLegacy) || (indexType == '2dsphere' && !spherical)) {
+ assert.commandFailed(db.runCommand(makeCommand(1)),
+ "geoNear with spherical=" + spherical + " and " +
+ indexType + " index and " + pointDescription +
+ " should've failed.");
// Stop processing this combination in the test matrix.
return;
}
// This is a supported combination. No error.
- assert.commandWorked(db.runCommand({
- geoNear: t.getName(),
- near: pointType,
- spherical: spherical
- }));
+ assert.commandWorked(
+ db.runCommand({geoNear: t.getName(), near: pointType, spherical: spherical}));
// No error with min/maxDistance 1.
db.runCommand(makeCommand(1));
@@ -71,28 +59,25 @@ indexTypes.forEach(function(indexType) {
}
// Try several bad values for min/maxDistance.
- badNumbers.concat(outOfRangeDistances).forEach(function(badDistance) {
+ badNumbers.concat(outOfRangeDistances)
+ .forEach(function(badDistance) {
- var msg = (
- "geoNear with spherical=" + spherical + " and "
- + pointDescription + " and " + indexType
- + " index should've failed with "
- + optionName + " " + badDistance);
+ var msg =
+ ("geoNear with spherical=" + spherical + " and " + pointDescription +
+ " and " + indexType + " index should've failed with " + optionName +
+ " " + badDistance);
- assert.commandFailed(
- db.runCommand(makeCommand(badDistance)),
- msg);
- });
+ assert.commandFailed(db.runCommand(makeCommand(badDistance)), msg);
+ });
// Bad values for limit / num.
['num', 'limit'].forEach(function(limitOptionName) {
[-1, 'foo'].forEach(function(badLimit) {
- var msg = (
- "geoNear with spherical=" + spherical + " and "
- + pointDescription + " and " + indexType
- + " index should've failed with '"
- + limitOptionName + "' " + badLimit);
+ var msg =
+ ("geoNear with spherical=" + spherical + " and " + pointDescription +
+ " and " + indexType + " index should've failed with '" +
+ limitOptionName + "' " + badLimit);
var command = makeCommand(1);
command[limitOptionName] = badLimit;
@@ -103,11 +88,9 @@ indexTypes.forEach(function(indexType) {
// Bad values for distanceMultiplier.
badNumbers.forEach(function(badNumber) {
- var msg = (
- "geoNear with spherical=" + spherical + " and "
- + pointDescription + " and " + indexType
- + " index should've failed with distanceMultiplier "
- + badNumber);
+ var msg = ("geoNear with spherical=" + spherical + " and " + pointDescription +
+ " and " + indexType +
+ " index should've failed with distanceMultiplier " + badNumber);
var command = makeCommand(1);
command['distanceMultiplier'] = badNumber;
diff --git a/jstests/core/getlog1.js b/jstests/core/getlog1.js
index cf59b8f8318..d19ba7cdde8 100644
--- a/jstests/core/getlog1.js
+++ b/jstests/core/getlog1.js
@@ -1,7 +1,7 @@
-// to run:
+// to run:
// ./mongo jstests/<this-file>
-contains = function(arr,obj) {
+contains = function(arr, obj) {
var i = arr.length;
while (i--) {
if (arr[i] === obj) {
@@ -11,17 +11,17 @@ contains = function(arr,obj) {
return false;
};
-var resp = db.adminCommand({getLog:"*"});
-assert( resp.ok == 1, "error executing getLog command" );
-assert( resp.names, "no names field" );
-assert( resp.names.length > 0, "names array is empty" );
-assert( contains(resp.names,"global") , "missing global category" );
-assert( !contains(resp.names,"butty") , "missing butty category" );
+var resp = db.adminCommand({getLog: "*"});
+assert(resp.ok == 1, "error executing getLog command");
+assert(resp.names, "no names field");
+assert(resp.names.length > 0, "names array is empty");
+assert(contains(resp.names, "global"), "missing global category");
+assert(!contains(resp.names, "butty"), "missing butty category");
-resp = db.adminCommand({getLog:"global"});
-assert( resp.ok == 1, "error executing getLog command" );
-assert( resp.log, "no log field" );
-assert( resp.log.length > 0 , "no log lines" );
+resp = db.adminCommand({getLog: "global"});
+assert(resp.ok == 1, "error executing getLog command");
+assert(resp.log, "no log field");
+assert(resp.log.length > 0, "no log lines");
// getLog value must be a string
-assert.commandFailed(db.adminCommand({ getLog: 21 }));
+assert.commandFailed(db.adminCommand({getLog: 21}));
diff --git a/jstests/core/getlog2.js b/jstests/core/getlog2.js
index 9bd217d3844..b6cf223b967 100644
--- a/jstests/core/getlog2.js
+++ b/jstests/core/getlog2.js
@@ -14,35 +14,49 @@ contains = function(arr, func) {
};
// test doesn't work when talking to mongos
-if(db.isMaster().msg != "isdbgrid") {
- // run a slow query
- glcol.save({ "SENTINEL": 1 });
- glcol.findOne({ "SENTINEL": 1, "$where": function() { sleep(1000); return true; } });
+if (db.isMaster().msg != "isdbgrid") {
+ // run a slow query
+ glcol.save({"SENTINEL": 1});
+ glcol.findOne({
+ "SENTINEL": 1,
+ "$where": function() {
+ sleep(1000);
+ return true;
+ }
+ });
- // run a slow update
- glcol.update({ "SENTINEL": 1, "$where": function() { sleep(1000); return true; } }, { "x": "x" });
+ // run a slow update
+ glcol.update(
+ {
+ "SENTINEL": 1,
+ "$where": function() {
+ sleep(1000);
+ return true;
+ }
+ },
+ {"x": "x"});
- var resp = db.adminCommand({getLog:"global"});
- assert( resp.ok == 1, "error executing getLog command" );
- assert( resp.log, "no log field" );
- assert( resp.log.length > 0 , "no log lines" );
+ var resp = db.adminCommand({getLog: "global"});
+ assert(resp.ok == 1, "error executing getLog command");
+ assert(resp.log, "no log field");
+ assert(resp.log.length > 0, "no log lines");
- // ensure that slow query is logged in detail
- assert( contains(resp.log, function(v) {
- print(v);
- var opString = db.getMongo().useReadCommands() ? " find " : " query ";
- var filterString = db.getMongo().useReadCommands() ? "filter:" : "query:";
- return v.indexOf(opString) != -1 && v.indexOf(filterString) != -1 &&
- v.indexOf("keysExamined:") != -1 &&
- v.indexOf("docsExamined:") != -1 &&
- v.indexOf("SENTINEL") != -1;
- }) );
+ // ensure that slow query is logged in detail
+ assert(contains(resp.log,
+ function(v) {
+ print(v);
+ var opString = db.getMongo().useReadCommands() ? " find " : " query ";
+ var filterString = db.getMongo().useReadCommands() ? "filter:" : "query:";
+ return v.indexOf(opString) != -1 && v.indexOf(filterString) != -1 &&
+ v.indexOf("keysExamined:") != -1 && v.indexOf("docsExamined:") != -1 &&
+ v.indexOf("SENTINEL") != -1;
+ }));
- // same, but for update
- assert( contains(resp.log, function(v) {
- return v.indexOf(" update ") != -1 && v.indexOf("query") != -1 &&
- v.indexOf("keysExamined:") != -1 &&
- v.indexOf("docsExamined:") != -1 &&
- v.indexOf("SENTINEL") != -1;
- }) );
+ // same, but for update
+ assert(contains(resp.log,
+ function(v) {
+ return v.indexOf(" update ") != -1 && v.indexOf("query") != -1 &&
+ v.indexOf("keysExamined:") != -1 && v.indexOf("docsExamined:") != -1 &&
+ v.indexOf("SENTINEL") != -1;
+ }));
}
diff --git a/jstests/core/getmore_invalidation.js b/jstests/core/getmore_invalidation.js
index 58104aaf95b..5c5d06deb1c 100644
--- a/jstests/core/getmore_invalidation.js
+++ b/jstests/core/getmore_invalidation.js
@@ -43,7 +43,8 @@
// Update the next matching doc so that it no longer matches.
assert.writeOK(t.update({_id: 3}, {$set: {a: "nomatch"}}));
- // Either the cursor should skip the result that no longer matches, or we should get back the old
+ // Either the cursor should skip the result that no longer matches, or we should get back the
+ // old
// version of the doc.
assert(!cursor.hasNext() || cursor.next()["a"] === "bar");
@@ -55,7 +56,7 @@
assert.writeOK(t.insert({a: 2, b: 3}));
assert.writeOK(t.insert({a: 2, b: 4}));
- cursor = t.find({a: {$in: [1,2]}}).sort({b: 1}).batchSize(2);
+ cursor = t.find({a: {$in: [1, 2]}}).sort({b: 1}).batchSize(2);
cursor.next();
cursor.next();
@@ -72,13 +73,14 @@
assert.writeOK(t.insert({a: 2, b: 3}));
assert.writeOK(t.insert({a: 2, b: 4}));
- cursor = t.find({a: {$in: [1,2]}}).sort({b: 1}).batchSize(2);
+ cursor = t.find({a: {$in: [1, 2]}}).sort({b: 1}).batchSize(2);
cursor.next();
cursor.next();
assert.writeOK(t.update({a: 2, b: 3}, {$set: {a: 6}}));
- // Either the cursor should skip the result that no longer matches, or we should get back the old
+ // Either the cursor should skip the result that no longer matches, or we should get back the
+ // old
// version of the doc.
assert(cursor.hasNext());
assert(cursor.next()["a"] === 2);
@@ -198,7 +200,7 @@
t.insert({a: 3, b: 3});
t.insert({a: 2, b: 1});
- cursor = t.find({a: {$in: [1,2,3]}}).sort({b: 1}).batchSize(2);
+ cursor = t.find({a: {$in: [1, 2, 3]}}).sort({b: 1}).batchSize(2);
cursor.next();
cursor.next();
@@ -215,7 +217,7 @@
t.insert({a: 3, b: 3});
t.insert({a: 2, b: 1});
- cursor = t.find({a: {$in: [1,2,3]}}).sort({b: 1}).batchSize(2);
+ cursor = t.find({a: {$in: [1, 2, 3]}}).sort({b: 1}).batchSize(2);
cursor.next();
cursor.next();
diff --git a/jstests/core/group1.js b/jstests/core/group1.js
index f59442cc6d7..6100ee94c70 100644
--- a/jstests/core/group1.js
+++ b/jstests/core/group1.js
@@ -1,116 +1,145 @@
t = db.group1;
t.drop();
-t.save( { n : 1 , a : 1 } );
-t.save( { n : 2 , a : 1 } );
-t.save( { n : 3 , a : 2 } );
-t.save( { n : 4 , a : 2 } );
-t.save( { n : 5 , a : 2 } );
-
-var p = { key : { a : true } ,
- reduce : function(obj,prev) { prev.count++; },
- initial: { count: 0 }
- };
-
-res = t.group( p );
-
-assert( res.length == 2 , "A" );
-assert( res[0].a == 1 , "B" );
-assert( res[0].count == 2 , "C" );
-assert( res[1].a == 2 , "D" );
-assert( res[1].count == 3 , "E" );
-
-assert.eq( res , t.groupcmd( p ) , "ZZ" );
-
-ret = t.groupcmd( { key : {} , reduce : p.reduce , initial : p.initial } );
-assert.eq( 1 , ret.length , "ZZ 2" );
-assert.eq( 5 , ret[0].count , "ZZ 3" );
-
-ret = t.groupcmd( { key : {} , reduce : function(obj,prev){ prev.sum += obj.n; } , initial : { sum : 0 } } );
-assert.eq( 1 , ret.length , "ZZ 4" );
-assert.eq( 15 , ret[0].sum , "ZZ 5" );
+t.save({n: 1, a: 1});
+t.save({n: 2, a: 1});
+t.save({n: 3, a: 2});
+t.save({n: 4, a: 2});
+t.save({n: 5, a: 2});
+
+var p = {
+ key: {a: true},
+ reduce: function(obj, prev) {
+ prev.count++;
+ },
+ initial: {count: 0}
+};
+
+res = t.group(p);
+
+assert(res.length == 2, "A");
+assert(res[0].a == 1, "B");
+assert(res[0].count == 2, "C");
+assert(res[1].a == 2, "D");
+assert(res[1].count == 3, "E");
+
+assert.eq(res, t.groupcmd(p), "ZZ");
+
+ret = t.groupcmd({key: {}, reduce: p.reduce, initial: p.initial});
+assert.eq(1, ret.length, "ZZ 2");
+assert.eq(5, ret[0].count, "ZZ 3");
+
+ret = t.groupcmd({
+ key: {},
+ reduce: function(obj, prev) {
+ prev.sum += obj.n;
+ },
+ initial: {sum: 0}
+});
+assert.eq(1, ret.length, "ZZ 4");
+assert.eq(15, ret[0].sum, "ZZ 5");
t.drop();
-t.save( { "a" : 2 } );
-t.save( { "b" : 5 } );
-t.save( { "a" : 1 } );
-t.save( { "a" : 2 } );
-
-c = {key: {a:1}, cond: {}, initial: {"count": 0}, reduce: function(obj, prev) { prev.count++; } };
+t.save({"a": 2});
+t.save({"b": 5});
+t.save({"a": 1});
+t.save({"a": 2});
-assert.eq( t.group( c ) , t.groupcmd( c ) , "ZZZZ" );
+c = {
+ key: {a: 1},
+ cond: {},
+ initial: {"count": 0},
+ reduce: function(obj, prev) {
+ prev.count++;
+ }
+};
+assert.eq(t.group(c), t.groupcmd(c), "ZZZZ");
t.drop();
-t.save( { name : { first : "a" , last : "A" } } );
-t.save( { name : { first : "b" , last : "B" } } );
-t.save( { name : { first : "a" , last : "A" } } );
-
+t.save({name: {first: "a", last: "A"}});
+t.save({name: {first: "b", last: "B"}});
+t.save({name: {first: "a", last: "A"}});
-p = { key : { 'name.first' : true } ,
- reduce : function(obj,prev) { prev.count++; },
- initial: { count: 0 }
- };
-
-res = t.group( p );
-assert.eq( 2 , res.length , "Z1" );
-assert.eq( "a" , res[0]['name.first'] , "Z2" );
-assert.eq( "b" , res[1]['name.first'] , "Z3" );
-assert.eq( 2 , res[0].count , "Z4" );
-assert.eq( 1 , res[1].count , "Z5" );
+p = {
+ key: {'name.first': true},
+ reduce: function(obj, prev) {
+ prev.count++;
+ },
+ initial: {count: 0}
+};
+
+res = t.group(p);
+assert.eq(2, res.length, "Z1");
+assert.eq("a", res[0]['name.first'], "Z2");
+assert.eq("b", res[1]['name.first'], "Z3");
+assert.eq(2, res[0].count, "Z4");
+assert.eq(1, res[1].count, "Z5");
// SERVER-15851 Test invalid user input.
p = {
- ns: "group1",
- key: {"name.first": true},
- $reduce: function(obj, prev){prev.count++;},
- initial: {count: 0},
- finalize: "abc"
- };
+ ns: "group1",
+ key: {"name.first": true},
+ $reduce: function(obj, prev) {
+ prev.count++;
+ },
+ initial: {count: 0},
+ finalize: "abc"
+};
assert.commandFailedWithCode(db.runCommand({group: p}),
ErrorCodes.JSInterpreterFailure,
"Illegal finalize function");
p = {
- ns: "group1",
- key: {"name.first": true},
- $reduce: function(obj, prev){prev.count++;},
- initial: {count: 0},
- finalize: function(obj){ob;}
- };
+ ns: "group1",
+ key: {"name.first": true},
+ $reduce: function(obj, prev) {
+ prev.count++;
+ },
+ initial: {count: 0},
+ finalize: function(obj) {
+ ob;
+ }
+};
assert.commandFailedWithCode(db.runCommand({group: p}),
ErrorCodes.JSInterpreterFailure,
"Illegal finalize function 2");
p = {
- ns: "group1",
- $keyf: "a" ,
- $reduce: function(obj, prev){prev.count++;},
- initial: {count: 0},
- finalize: function(obj){ob;}
- };
+ ns: "group1",
+ $keyf: "a",
+ $reduce: function(obj, prev) {
+ prev.count++;
+ },
+ initial: {count: 0},
+ finalize: function(obj) {
+ ob;
+ }
+};
assert.commandFailedWithCode(db.runCommand({group: p}),
ErrorCodes.JSInterpreterFailure,
"Illegal keyf function");
p = {
- ns: "group1",
- key: {"name.first": true},
- $reduce: "abc",
- initial: {count: 0}
- };
+ ns: "group1",
+ key: {"name.first": true},
+ $reduce: "abc",
+ initial: {count: 0}
+};
assert.commandFailedWithCode(db.runCommand({group: p}),
ErrorCodes.JSInterpreterFailure,
"Illegal reduce function");
p = {
- ns: "group1",
- key: {"name.first": true},
- $reduce: function(obj, pre){prev.count++;},
- initial: {count: 0}
- };
+ ns: "group1",
+ key: {"name.first": true},
+ $reduce: function(obj, pre) {
+ prev.count++;
+ },
+ initial: {count: 0}
+};
assert.commandFailedWithCode(db.runCommand({group: p}),
ErrorCodes.JSInterpreterFailure,
"Illegal reduce function 2");
diff --git a/jstests/core/group2.js b/jstests/core/group2.js
index b5566bce2c7..ada675f6f69 100644
--- a/jstests/core/group2.js
+++ b/jstests/core/group2.js
@@ -5,12 +5,13 @@ t.save({a: 2});
t.save({b: 5});
t.save({a: 1});
-cmd = { key: {a: 1},
- initial: {count: 0},
- reduce: function(obj, prev) {
- prev.count++;
- }
- };
+cmd = {
+ key: {a: 1},
+ initial: {count: 0},
+ reduce: function(obj, prev) {
+ prev.count++;
+ }
+};
result = t.group(cmd);
@@ -24,18 +25,19 @@ assert.eq(1, result[1].count, "G");
assert.eq(1, result[2].count, "H");
var keyFn = function(x) {
- return { a: 'a' in x ? x.a : null };
+ return {
+ a: 'a' in x ? x.a : null
+ };
};
delete cmd.key;
cmd["$keyf"] = keyFn;
-result2 = t.group( cmd );
-
-assert.eq( result , result2, "check result2" );
+result2 = t.group(cmd);
+assert.eq(result, result2, "check result2");
delete cmd.$keyf;
cmd["keyf"] = keyFn;
-result3 = t.group( cmd );
+result3 = t.group(cmd);
-assert.eq( result , result3, "check result3" );
+assert.eq(result, result3, "check result3");
diff --git a/jstests/core/group3.js b/jstests/core/group3.js
index d113b9d570f..134953b6649 100644
--- a/jstests/core/group3.js
+++ b/jstests/core/group3.js
@@ -6,20 +6,20 @@ t.save({a: 2});
t.save({a: 3});
t.save({a: 4});
-
-cmd = { initial: {count: 0, sum: 0},
- reduce: function(obj, prev) {
- prev.count++;
- prev.sum += obj.a;
- },
- finalize: function(obj) {
- if (obj.count){
- obj.avg = obj.sum / obj.count;
- }else{
- obj.avg = 0;
- }
- },
- };
+cmd = {
+ initial: {count: 0, sum: 0},
+ reduce: function(obj, prev) {
+ prev.count++;
+ prev.sum += obj.a;
+ },
+ finalize: function(obj) {
+ if (obj.count) {
+ obj.avg = obj.sum / obj.count;
+ } else {
+ obj.avg = 0;
+ }
+ },
+};
result1 = t.group(cmd);
@@ -28,11 +28,10 @@ assert.eq(10, result1[0].sum, "test1");
assert.eq(4, result1[0].count, "test1");
assert.eq(2.5, result1[0].avg, "test1");
-
cmd['finalize'] = function(obj) {
- if (obj.count){
+ if (obj.count) {
return obj.sum / obj.count;
- }else{
+ } else {
return 0;
}
};
diff --git a/jstests/core/group4.js b/jstests/core/group4.js
index 788d55c7962..2465274c027 100644
--- a/jstests/core/group4.js
+++ b/jstests/core/group4.js
@@ -2,44 +2,39 @@
t = db.group4;
t.drop();
-function test( c , n ){
+function test(c, n) {
var x = {};
- c.forEach(
- function(z){
- assert.eq( z.count , z.values.length , n + "\t" + tojson( z ) );
- }
- );
+ c.forEach(function(z) {
+ assert.eq(z.count, z.values.length, n + "\t" + tojson(z));
+ });
}
-t.insert({name:'bob',foo:1});
-t.insert({name:'bob',foo:2});
-t.insert({name:'alice',foo:1});
-t.insert({name:'alice',foo:3});
-t.insert({name:'fred',foo:3});
-t.insert({name:'fred',foo:4});
+t.insert({name: 'bob', foo: 1});
+t.insert({name: 'bob', foo: 2});
+t.insert({name: 'alice', foo: 1});
+t.insert({name: 'alice', foo: 3});
+t.insert({name: 'fred', foo: 3});
+t.insert({name: 'fred', foo: 4});
-x = t.group(
- {
- key: {foo:1},
- initial: {count:0,values:[]},
- reduce: function (obj, prev){
- prev.count++;
- prev.values.push(obj.name);
- }
- }
-);
-test( x , "A" );
+x = t.group({
+ key: {foo: 1},
+ initial: {count: 0, values: []},
+ reduce: function(obj, prev) {
+ prev.count++;
+ prev.values.push(obj.name);
+ }
+});
+test(x, "A");
-x = t.group(
- {
- key: {foo:1},
- initial: {count:0},
- reduce: function (obj, prev){
- if (!prev.values) {prev.values = [];}
- prev.count++;
- prev.values.push(obj.name);
+x = t.group({
+ key: {foo: 1},
+ initial: {count: 0},
+ reduce: function(obj, prev) {
+ if (!prev.values) {
+ prev.values = [];
}
+ prev.count++;
+ prev.values.push(obj.name);
}
-);
-test( x , "B" );
-
+});
+test(x, "B");
diff --git a/jstests/core/group5.js b/jstests/core/group5.js
index 5d13297bd4d..aa6283f73ff 100644
--- a/jstests/core/group5.js
+++ b/jstests/core/group5.js
@@ -3,36 +3,35 @@ t = db.group5;
t.drop();
// each group has groupnum+1 5 users
-for ( var group=0; group<10; group++ ){
- for ( var i=0; i<5+group; i++ ){
- t.save( { group : "group" + group , user : i } );
+for (var group = 0; group < 10; group++) {
+ for (var i = 0; i < 5 + group; i++) {
+ t.save({group: "group" + group, user: i});
}
}
-function c( group ){
- return t.group(
- {
- key : { group : 1 } ,
- q : { group : "group" + group } ,
- initial : { users : {} },
- reduce : function(obj,prev){
- prev.users[obj.user] = true; // add this user to the hash
- },
- finalize : function(x){
- var count = 0;
- for (var key in x.users){
- count++;
- }
-
- //replace user obj with count
- //count add new field and keep users
- x.users = count;
- return x;
+function c(group) {
+ return t.group({
+ key: {group: 1},
+ q: {group: "group" + group},
+ initial: {users: {}},
+ reduce: function(obj, prev) {
+ prev.users[obj.user] = true; // add this user to the hash
+ },
+ finalize: function(x) {
+ var count = 0;
+ for (var key in x.users) {
+ count++;
}
- })[0]; // returns array
+
+ // replace user obj with count
+ // count add new field and keep users
+ x.users = count;
+ return x;
+ }
+ })[0]; // returns array
}
-assert.eq( "group0" , c(0).group , "g0" );
-assert.eq( 5 , c(0).users , "g0 a" );
-assert.eq( "group5" , c(5).group , "g5" );
-assert.eq( 10 , c(5).users , "g5 a" );
+assert.eq("group0", c(0).group, "g0");
+assert.eq(5, c(0).users, "g0 a");
+assert.eq("group5", c(5).group, "g5");
+assert.eq(10, c(5).users, "g5 a");
diff --git a/jstests/core/group6.js b/jstests/core/group6.js
index b77a37a5d11..fff2fcaafd7 100644
--- a/jstests/core/group6.js
+++ b/jstests/core/group6.js
@@ -1,32 +1,40 @@
t = db.jstests_group6;
t.drop();
-for( i = 1; i <= 10; ++i ) {
- t.save( {i:new NumberLong( i ),y:1} );
+for (i = 1; i <= 10; ++i) {
+ t.save({i: new NumberLong(i), y: 1});
}
-assert.eq.automsg( "55", "t.group( {key:'y', reduce:function(doc,out){ out.i += doc.i; }, initial:{i:0} } )[ 0 ].i" );
+assert.eq.automsg(
+ "55",
+ "t.group( {key:'y', reduce:function(doc,out){ out.i += doc.i; }, initial:{i:0} } )[ 0 ].i");
t.drop();
-for( i = 1; i <= 10; ++i ) {
- if ( i % 2 == 0 ) {
- t.save( {i:new NumberLong( i ),y:1} );
+for (i = 1; i <= 10; ++i) {
+ if (i % 2 == 0) {
+ t.save({i: new NumberLong(i), y: 1});
} else {
- t.save( {i:i,y:1} );
+ t.save({i: i, y: 1});
}
}
-assert.eq.automsg( "55", "t.group( {key:'y', reduce:function(doc,out){ out.i += doc.i; }, initial:{i:0} } )[ 0 ].i" );
+assert.eq.automsg(
+ "55",
+ "t.group( {key:'y', reduce:function(doc,out){ out.i += doc.i; }, initial:{i:0} } )[ 0 ].i");
t.drop();
-for( i = 1; i <= 10; ++i ) {
- if ( i % 2 == 1 ) {
- t.save( {i:new NumberLong( i ),y:1} );
+for (i = 1; i <= 10; ++i) {
+ if (i % 2 == 1) {
+ t.save({i: new NumberLong(i), y: 1});
} else {
- t.save( {i:i,y:1} );
+ t.save({i: i, y: 1});
}
}
-assert.eq.automsg( "55", "t.group( {key:'y', reduce:function(doc,out){ out.i += doc.i; }, initial:{i:0} } )[ 0 ].i" );
+assert.eq.automsg(
+ "55",
+ "t.group( {key:'y', reduce:function(doc,out){ out.i += doc.i; }, initial:{i:0} } )[ 0 ].i");
-assert.eq.automsg( "NumberLong(10)", "t.group( {$reduce: function(doc, prev) { prev.count += 1; }, initial: {count: new NumberLong(0) }} )[ 0 ].count" ); \ No newline at end of file
+assert.eq.automsg(
+ "NumberLong(10)",
+ "t.group( {$reduce: function(doc, prev) { prev.count += 1; }, initial: {count: new NumberLong(0) }} )[ 0 ].count"); \ No newline at end of file
diff --git a/jstests/core/group7.js b/jstests/core/group7.js
index 1413000079c..6d6ef03e99d 100644
--- a/jstests/core/group7.js
+++ b/jstests/core/group7.js
@@ -3,28 +3,28 @@
t = db.jstests_group7;
t.drop();
-function checkForYield( docs, updates ) {
+function checkForYield(docs, updates) {
t.drop();
a = 0;
- for( var i = 0; i < docs; ++i ) {
- t.save( {a:a} );
+ for (var i = 0; i < docs; ++i) {
+ t.save({a: a});
}
// Iteratively update all a values atomically.
p = startParallelShell(
'for( a = 0; a < ' + updates + '; ++a ) {' +
- 'db.jstests_group7.update({ $atomic: true }, { $set: { a: a }}, false, true);' +
- '}' );
+ 'db.jstests_group7.update({ $atomic: true }, { $set: { a: a }}, false, true);' +
+ '}');
- for( var i = 0; i < updates; ++i ) {
+ for (var i = 0; i < updates; ++i) {
print("running group " + i + " of " + updates);
- ret = t.group({key:{a:1},reduce:function(){},initial:{}});
+ ret = t.group({key: {a: 1}, reduce: function() {}, initial: {}});
// Check if group sees more than one a value, indicating that it yielded.
- if ( ret.length > 1 ) {
+ if (ret.length > 1) {
p();
return true;
}
- printjson( ret );
+ printjson(ret);
}
p();
@@ -34,14 +34,14 @@ function checkForYield( docs, updates ) {
var yielded = false;
var docs = 1500;
var updates = 50;
-for( var j = 1; j <= 6; ++j ) {
+for (var j = 1; j <= 6; ++j) {
print("Iteration " + j + " docs = " + docs + " updates = " + updates);
- if ( checkForYield( docs, updates ) ) {
+ if (checkForYield(docs, updates)) {
yielded = true;
break;
}
- // Increase docs and updates to encourage yielding.
+ // Increase docs and updates to encourage yielding.
docs *= 2;
updates *= 2;
}
-assert( yielded );
+assert(yielded);
diff --git a/jstests/core/group8.js b/jstests/core/group8.js
index 14fd890f1e7..85c8248b992 100644
--- a/jstests/core/group8.js
+++ b/jstests/core/group8.js
@@ -9,16 +9,30 @@ assert.writeOK(coll.insert({a: 2, b: "x"}));
assert.writeOK(coll.insert({a: 3, b: "y"}));
// Test case when "count" and "keys" are both zero.
-result = coll.runCommand({group: {ns: coll.getName(), key: {a: 1}, cond: {b: "z"},
- $reduce: function(x, y) {}, initial: {}}});
+result = coll.runCommand({
+ group: {
+ ns: coll.getName(),
+ key: {a: 1},
+ cond: {b: "z"},
+ $reduce: function(x, y) {},
+ initial: {}
+ }
+});
assert.commandWorked(result);
assert.eq(result.count, 0);
assert.eq(result.keys, 0);
assert.eq(result.retval.length, 0);
// Test case when "count" and "keys" are both non-zero.
-result = coll.runCommand({group: {ns: coll.getName(), key: {a: 1}, cond: {b: "x"},
- $reduce: function(x, y) {}, initial: {}}});
+result = coll.runCommand({
+ group: {
+ ns: coll.getName(),
+ key: {a: 1},
+ cond: {b: "x"},
+ $reduce: function(x, y) {},
+ initial: {}
+ }
+});
assert.commandWorked(result);
assert.eq(result.count, 3);
assert.eq(result.keys, 2);
diff --git a/jstests/core/group_empty.js b/jstests/core/group_empty.js
index c1772f88b22..6f5637ac0df 100644
--- a/jstests/core/group_empty.js
+++ b/jstests/core/group_empty.js
@@ -2,11 +2,12 @@
t = db.group_empty;
t.drop();
-res1 = db.runCommand({group: {$reduce: function(){}, ns: 'group_empty', cond: {}, key: {}, initial: {count: 0}}});
-t.ensureIndex( { x : 1 } );
-res2 = db.runCommand({group: {$reduce: function(){}, ns: 'group_empty', cond: {}, key: {}, initial: {count: 0}}});
+res1 = db.runCommand(
+ {group: {$reduce: function() {}, ns: 'group_empty', cond: {}, key: {}, initial: {count: 0}}});
+t.ensureIndex({x: 1});
+res2 = db.runCommand(
+ {group: {$reduce: function() {}, ns: 'group_empty', cond: {}, key: {}, initial: {count: 0}}});
assert.docEq(res1.retval, res2.retval);
assert.eq(res1.keys, res2.keys);
assert.eq(res1.count, res2.count);
-
diff --git a/jstests/core/grow_hash_table.js b/jstests/core/grow_hash_table.js
index 0c782444a2d..b26baae31af 100644
--- a/jstests/core/grow_hash_table.js
+++ b/jstests/core/grow_hash_table.js
@@ -11,9 +11,13 @@ var testDB = db.getSiblingDB('grow_hash_table');
var doTest = function(count) {
print('Testing with count of ' + count);
testDB.dropDatabase();
- var id = { data: 1 };
- var doc = { _id: id };
- var projection = { };
+ var id = {
+ data: 1
+ };
+ var doc = {
+ _id: id
+ };
+ var projection = {};
// Create a document and a projection with fields r1, r2, r3 ...
for (var i = 1; i <= count; ++i) {
@@ -27,11 +31,10 @@ var doTest = function(count) {
// Try to read the document using a large projection
try {
- var findCount = testDB.collection.find({ _id: id }, projection).itcount();
+ var findCount = testDB.collection.find({_id: id}, projection).itcount();
assert(findCount == 1,
'Failed to find single stored document, find().itcount() == ' + findCount);
- }
- catch (e) {
+ } catch (e) {
testDB.dropDatabase();
doassert('Test FAILED! Caught exception ' + tojsononeline(e));
}
diff --git a/jstests/core/hashindex1.js b/jstests/core/hashindex1.js
index 449f5520b84..778e31d84b2 100644
--- a/jstests/core/hashindex1.js
+++ b/jstests/core/hashindex1.js
@@ -4,88 +4,95 @@ t.drop();
// Include helpers for analyzing explain output.
load("jstests/libs/analyze_plan.js");
-//test non-single field hashed indexes don't get created (maybe change later)
-var badspec = {a : "hashed" , b : 1};
-t.ensureIndex( badspec );
-assert.eq( t.getIndexes().length , 1 , "only _id index should be created");
-
-//test unique index not created (maybe change later)
-var goodspec = {a : "hashed"};
-t.ensureIndex( goodspec , {"unique" : true});
-assert.eq( t.getIndexes().length , 1 , "unique index got created.");
-
-//now test that non-unique index does get created
+// test non-single field hashed indexes don't get created (maybe change later)
+var badspec = {
+ a: "hashed",
+ b: 1
+};
+t.ensureIndex(badspec);
+assert.eq(t.getIndexes().length, 1, "only _id index should be created");
+
+// test unique index not created (maybe change later)
+var goodspec = {
+ a: "hashed"
+};
+t.ensureIndex(goodspec, {"unique": true});
+assert.eq(t.getIndexes().length, 1, "unique index got created.");
+
+// now test that non-unique index does get created
t.ensureIndex(goodspec);
-assert.eq( t.getIndexes().length , 2 , "hashed index didn't get created");
+assert.eq(t.getIndexes().length, 2, "hashed index didn't get created");
-//test basic inserts
-for(i=0; i < 10; i++ ){
- t.insert( {a:i } );
+// test basic inserts
+for (i = 0; i < 10; i++) {
+ t.insert({a: i});
}
-assert.eq( t.find().count() , 10 , "basic insert didn't work");
-assert.eq( t.find().hint(goodspec).toArray().length , 10 , "basic insert didn't work");
-assert.eq( t.find({a : 3}).hint({_id : 1}).toArray()[0]._id ,
- t.find({a : 3}).hint(goodspec).toArray()[0]._id ,
- "hashindex lookup didn't work" );
-
-
-//make sure things with the same hash are not both returned
-t.insert( {a: 3.1} );
-assert.eq( t.find().count() , 11 , "additional insert didn't work");
-assert.eq( t.find({a : 3.1}).hint(goodspec).toArray().length , 1);
-assert.eq( t.find({a : 3}).hint(goodspec).toArray().length , 1);
-//test right obj is found
-assert.eq( t.find({a : 3.1}).hint(goodspec).toArray()[0].a , 3.1);
+assert.eq(t.find().count(), 10, "basic insert didn't work");
+assert.eq(t.find().hint(goodspec).toArray().length, 10, "basic insert didn't work");
+assert.eq(t.find({a: 3}).hint({_id: 1}).toArray()[0]._id,
+ t.find({a: 3}).hint(goodspec).toArray()[0]._id,
+ "hashindex lookup didn't work");
+
+// make sure things with the same hash are not both returned
+t.insert({a: 3.1});
+assert.eq(t.find().count(), 11, "additional insert didn't work");
+assert.eq(t.find({a: 3.1}).hint(goodspec).toArray().length, 1);
+assert.eq(t.find({a: 3}).hint(goodspec).toArray().length, 1);
+// test right obj is found
+assert.eq(t.find({a: 3.1}).hint(goodspec).toArray()[0].a, 3.1);
// Make sure we're using the hashed index.
-var explain = t.find({a : 1}).explain();
-assert( isIxscan(explain.queryPlanner.winningPlan), "not using hashed index");
+var explain = t.find({a: 1}).explain();
+assert(isIxscan(explain.queryPlanner.winningPlan), "not using hashed index");
// SERVER-12222
-//printjson( t.find({a : {$gte : 3 , $lte : 3}}).explain() )
-//assert.eq( t.find({a : {$gte : 3 , $lte : 3}}).explain().cursor ,
+// printjson( t.find({a : {$gte : 3 , $lte : 3}}).explain() )
+// assert.eq( t.find({a : {$gte : 3 , $lte : 3}}).explain().cursor ,
// cursorname ,
// "not using hashed cursor");
-var explain = t.find({c : 1}).explain();
-assert( !isIxscan(explain.queryPlanner.winningPlan), "using irrelevant hashed index");
+var explain = t.find({c: 1}).explain();
+assert(!isIxscan(explain.queryPlanner.winningPlan), "using irrelevant hashed index");
// Hash index used with a $in set membership predicate.
-var explain = t.find({a : {$in : [1,2]}}).explain();
+var explain = t.find({a: {$in: [1, 2]}}).explain();
printjson(explain);
-assert( isIxscan(explain.queryPlanner.winningPlan), "not using hashed index");
+assert(isIxscan(explain.queryPlanner.winningPlan), "not using hashed index");
// Hash index used with a singleton $and predicate conjunction.
-var explain = t.find({$and : [{a : 1}]}).explain();
-assert( isIxscan(explain.queryPlanner.winningPlan), "not using hashed index");
+var explain = t.find({$and: [{a: 1}]}).explain();
+assert(isIxscan(explain.queryPlanner.winningPlan), "not using hashed index");
// Hash index used with a non singleton $and predicate conjunction.
-var explain = t.find({$and : [{a : {$in : [1,2]}},{a : {$gt : 1}}]}).explain();
-assert( isIxscan(explain.queryPlanner.winningPlan), "not using hashed index");
+var explain = t.find({$and: [{a: {$in: [1, 2]}}, {a: {$gt: 1}}]}).explain();
+assert(isIxscan(explain.queryPlanner.winningPlan), "not using hashed index");
-//test creation of index based on hash of _id index
-var goodspec2 = {'_id' : "hashed"};
-t.ensureIndex( goodspec2 );
-assert.eq( t.getIndexes().length , 3 , "_id index didn't get created");
+// test creation of index based on hash of _id index
+var goodspec2 = {
+ '_id': "hashed"
+};
+t.ensureIndex(goodspec2);
+assert.eq(t.getIndexes().length, 3, "_id index didn't get created");
var newid = t.findOne()["_id"];
-assert.eq( t.find( {_id : newid} ).hint( {_id : 1} ).toArray()[0]._id ,
- t.find( {_id : newid} ).hint( goodspec2 ).toArray()[0]._id,
- "using hashed index and different index returns different docs");
-
-
-//test creation of sparse hashed index
-var sparseindex = {b : "hashed"};
-t.ensureIndex( sparseindex , {"sparse" : true});
-assert.eq( t.getIndexes().length , 4 , "sparse index didn't get created");
-
-//test sparse index has smaller total items on after inserts
-for(i=0; i < 10; i++ ){
- t.insert( {b : i} );
+assert.eq(t.find({_id: newid}).hint({_id: 1}).toArray()[0]._id,
+ t.find({_id: newid}).hint(goodspec2).toArray()[0]._id,
+ "using hashed index and different index returns different docs");
+
+// test creation of sparse hashed index
+var sparseindex = {
+ b: "hashed"
+};
+t.ensureIndex(sparseindex, {"sparse": true});
+assert.eq(t.getIndexes().length, 4, "sparse index didn't get created");
+
+// test sparse index has smaller total items on after inserts
+for (i = 0; i < 10; i++) {
+ t.insert({b: i});
}
var totalb = t.find().hint(sparseindex).toArray().length;
-assert.eq( totalb , 10 , "sparse index has wrong total");
+assert.eq(totalb, 10, "sparse index has wrong total");
-var total = t.find().hint({"_id" : 1}).toArray().length;
+var total = t.find().hint({"_id": 1}).toArray().length;
var totala = t.find().hint(goodspec).toArray().length;
-assert.eq(total , totala , "non-sparse index has wrong total");
-assert.lt(totalb , totala , "sparse index should have smaller total");
+assert.eq(total, totala, "non-sparse index has wrong total");
+assert.lt(totalb, totala, "sparse index should have smaller total");
diff --git a/jstests/core/hashtest1.js b/jstests/core/hashtest1.js
index d0307c2e59e..631aee522da 100644
--- a/jstests/core/hashtest1.js
+++ b/jstests/core/hashtest1.js
@@ -1,79 +1,80 @@
-//hashtest1.js
-//Simple tests to check hashing of various types
-//make sure that different numeric types hash to same thing, and other sanity checks
-
-var hash = function( v , seed ){
- if (seed)
- return db.runCommand({"_hashBSONElement" : v , "seed" : seed})["out"];
- else
- return db.runCommand({"_hashBSONElement" : v})["out"];
+// hashtest1.js
+// Simple tests to check hashing of various types
+// make sure that different numeric types hash to same thing, and other sanity checks
+
+var hash = function(v, seed) {
+ if (seed)
+ return db.runCommand({"_hashBSONElement": v, "seed": seed})["out"];
+ else
+ return db.runCommand({"_hashBSONElement": v})["out"];
};
-var oidHash = hash( ObjectId() );
-var oidHash2 = hash( ObjectId() );
-var oidHash3 = hash( ObjectId() );
-assert(! friendlyEqual( oidHash, oidHash2) , "ObjectIDs should hash to different things");
-assert(! friendlyEqual( oidHash, oidHash3) , "ObjectIDs should hash to different things");
-assert(! friendlyEqual( oidHash2, oidHash3) , "ObjectIDs should hash to different things");
-
-var intHash = hash( NumberInt(3) );
-var doubHash = hash( 3 );
-var doubHash2 = hash( 3.0 );
-var longHash = hash( NumberLong(3) );
-var fracHash = hash( NumberInt(3.5) );
-assert.eq( intHash , doubHash );
-assert.eq( intHash , doubHash2 );
-assert.eq( intHash , longHash );
-assert.eq( intHash , fracHash );
-
-var trueHash = hash( true );
-var falseHash = hash( false );
-assert(! friendlyEqual( trueHash, falseHash) , "true and false should hash to different things");
-
-var nullHash = hash( null );
-assert(! friendlyEqual( falseHash , nullHash ) , "false and null should hash to different things");
-
-var dateHash = hash( new Date() );
+var oidHash = hash(ObjectId());
+var oidHash2 = hash(ObjectId());
+var oidHash3 = hash(ObjectId());
+assert(!friendlyEqual(oidHash, oidHash2), "ObjectIDs should hash to different things");
+assert(!friendlyEqual(oidHash, oidHash3), "ObjectIDs should hash to different things");
+assert(!friendlyEqual(oidHash2, oidHash3), "ObjectIDs should hash to different things");
+
+var intHash = hash(NumberInt(3));
+var doubHash = hash(3);
+var doubHash2 = hash(3.0);
+var longHash = hash(NumberLong(3));
+var fracHash = hash(NumberInt(3.5));
+assert.eq(intHash, doubHash);
+assert.eq(intHash, doubHash2);
+assert.eq(intHash, longHash);
+assert.eq(intHash, fracHash);
+
+var trueHash = hash(true);
+var falseHash = hash(false);
+assert(!friendlyEqual(trueHash, falseHash), "true and false should hash to different things");
+
+var nullHash = hash(null);
+assert(!friendlyEqual(falseHash, nullHash), "false and null should hash to different things");
+
+var dateHash = hash(new Date());
// Sleep so we get a new date. Sleeping for 1 sometimes returns the same date, so 2
sleep(2);
-var isodateHash = hash( ISODate() );
-assert(! friendlyEqual( dateHash, isodateHash) , "different dates should hash to different things");
+var isodateHash = hash(ISODate());
+assert(!friendlyEqual(dateHash, isodateHash), "different dates should hash to different things");
-var stringHash = hash( "3" );
-assert(! friendlyEqual( intHash , stringHash ), "3 and \"3\" should hash to different things");
+var stringHash = hash("3");
+assert(!friendlyEqual(intHash, stringHash), "3 and \"3\" should hash to different things");
-var regExpHash = hash( RegExp("3") );
-assert(! friendlyEqual( stringHash , regExpHash) , "\"3\" and RegExp(3) should hash to different things");
+var regExpHash = hash(RegExp("3"));
+assert(!friendlyEqual(stringHash, regExpHash),
+ "\"3\" and RegExp(3) should hash to different things");
-var intHash4 = hash( 4 );
-assert(! friendlyEqual( intHash , intHash4 ), "3 and 4 should hash to different things");
+var intHash4 = hash(4);
+assert(!friendlyEqual(intHash, intHash4), "3 and 4 should hash to different things");
-var intHashSeeded = hash( 4 , 3 );
-assert(! friendlyEqual(intHash4 , intHashSeeded ), "different seeds should make different hashes");
+var intHashSeeded = hash(4, 3);
+assert(!friendlyEqual(intHash4, intHashSeeded), "different seeds should make different hashes");
-var minkeyHash = hash( MinKey );
-var maxkeyHash = hash( MaxKey );
-assert(! friendlyEqual(minkeyHash , maxkeyHash ), "minkey and maxkey should hash to different things");
+var minkeyHash = hash(MinKey);
+var maxkeyHash = hash(MaxKey);
+assert(!friendlyEqual(minkeyHash, maxkeyHash), "minkey and maxkey should hash to different things");
-var arrayHash = hash( [0,1.0,NumberLong(2)] );
-var arrayHash2 = hash( [0,NumberInt(1),2] );
-assert.eq( arrayHash , arrayHash2 , "didn't squash numeric types in array");
+var arrayHash = hash([0, 1.0, NumberLong(2)]);
+var arrayHash2 = hash([0, NumberInt(1), 2]);
+assert.eq(arrayHash, arrayHash2, "didn't squash numeric types in array");
-var objectHash = hash( {"0":0, "1" : NumberInt(1), "2" : 2} );
-assert(! friendlyEqual(objectHash , arrayHash2) , "arrays and sub-objects should hash to different things");
+var objectHash = hash({"0": 0, "1": NumberInt(1), "2": 2});
+assert(!friendlyEqual(objectHash, arrayHash2),
+ "arrays and sub-objects should hash to different things");
-var c = hash( {a : {}, b : 1} );
-var d = hash( {a : {b : 1}} );
-assert(! friendlyEqual( c , d ) , "hashing doesn't group sub-docs and fields correctly");
+var c = hash({a: {}, b: 1});
+var d = hash({a: {b: 1}});
+assert(!friendlyEqual(c, d), "hashing doesn't group sub-docs and fields correctly");
-var e = hash( {a : 3 , b : [NumberLong(3), {c : NumberInt(3)}]} );
-var f = hash( {a : NumberLong(3) , b : [NumberInt(3), {c : 3.0}]} );
-assert.eq( e , f , "recursive number squashing doesn't work");
+var e = hash({a: 3, b: [NumberLong(3), {c: NumberInt(3)}]});
+var f = hash({a: NumberLong(3), b: [NumberInt(3), {c: 3.0}]});
+assert.eq(e, f, "recursive number squashing doesn't work");
-var nanHash = hash( 0/0 );
-var zeroHash = hash( 0 );
-assert.eq( nanHash , zeroHash , "NaN and Zero should hash to the same thing");
+var nanHash = hash(0 / 0);
+var zeroHash = hash(0);
+assert.eq(nanHash, zeroHash, "NaN and Zero should hash to the same thing");
-
-//should also test that CodeWScope hashes correctly
-//but waiting for SERVER-3391 (CodeWScope support in shell)
+// should also test that CodeWScope hashes correctly
+// but waiting for SERVER-3391 (CodeWScope support in shell)
diff --git a/jstests/core/hint1.js b/jstests/core/hint1.js
index 1de06fd4e41..ddee0f369be 100644
--- a/jstests/core/hint1.js
+++ b/jstests/core/hint1.js
@@ -1,7 +1,16 @@
p = db.jstests_hint1;
p.drop();
-p.save( { ts: new Date( 1 ), cls: "entry", verticals: "alleyinsider", live: true } );
-p.ensureIndex( { ts: 1 } );
+p.save({ts: new Date(1), cls: "entry", verticals: "alleyinsider", live: true});
+p.ensureIndex({ts: 1});
-assert.eq(1, p.find({ live: true, ts: { $lt: new Date(1234119308272) }, cls: "entry", verticals: "alleyinsider" }).sort({ ts: -1 }).hint({ ts: 1 }).count());
+assert.eq(1,
+ p.find({
+ live: true,
+ ts: {$lt: new Date(1234119308272)},
+ cls: "entry",
+ verticals: "alleyinsider"
+ })
+ .sort({ts: -1})
+ .hint({ts: 1})
+ .count());
diff --git a/jstests/core/hostinfo.js b/jstests/core/hostinfo.js
index 16c3810b2c4..6d27b195f39 100644
--- a/jstests/core/hostinfo.js
+++ b/jstests/core/hostinfo.js
@@ -1,33 +1,33 @@
// SERVER-4615: Ensure hostInfo() command returns expected results on each platform
-assert.commandWorked( db.hostInfo() );
+assert.commandWorked(db.hostInfo());
var hostinfo = db.hostInfo();
// test for os-specific fields
if (hostinfo.os.type == "Windows") {
- assert.neq( hostinfo.os.name, "" || null, "Missing Windows os name" );
- assert.neq( hostinfo.os.version, "" || null, "Missing Windows version" );
+ assert.neq(hostinfo.os.name, "" || null, "Missing Windows os name");
+ assert.neq(hostinfo.os.version, "" || null, "Missing Windows version");
} else if (hostinfo.os.type == "Linux") {
- assert.neq( hostinfo.os.name, "" || null, "Missing Linux os/distro name" );
- assert.neq( hostinfo.os.version, "" || null, "Missing Lindows version" );
+ assert.neq(hostinfo.os.name, "" || null, "Missing Linux os/distro name");
+ assert.neq(hostinfo.os.version, "" || null, "Missing Lindows version");
} else if (hostinfo.os.type == "Darwin") {
- assert.neq( hostinfo.os.name, "" || null, "Missing Darwin os name" );
- assert.neq( hostinfo.os.version, "" || null, "Missing Darwin version" );
+ assert.neq(hostinfo.os.name, "" || null, "Missing Darwin os name");
+ assert.neq(hostinfo.os.version, "" || null, "Missing Darwin version");
} else if (hostinfo.os.type == "BSD") {
- assert.neq( hostinfo.os.name, "" || null, "Missing FreeBSD os name" );
- assert.neq( hostinfo.os.version, "" || null, "Missing FreeBSD version" );
+ assert.neq(hostinfo.os.name, "" || null, "Missing FreeBSD os name");
+ assert.neq(hostinfo.os.version, "" || null, "Missing FreeBSD version");
}
-// comment out this block for systems which have not implemented hostinfo.
+// comment out this block for systems which have not implemented hostinfo.
if (hostinfo.os.type != "") {
- assert.neq( hostinfo.system.hostname, "" || null, "Missing Hostname" );
- assert.neq( hostinfo.system.currentTime, "" || null, "Missing Current Time" );
- assert.neq( hostinfo.system.cpuAddrSize, "" || null || 0, "Missing CPU Address Size" );
- assert.neq( hostinfo.system.memSizeMB, "" || null, "Missing Memory Size" );
- assert.neq( hostinfo.system.numCores, "" || null || 0, "Missing Number of Cores" );
- assert.neq( hostinfo.system.cpuArch, "" || null, "Missing CPU Architecture" );
- assert.neq( hostinfo.system.numaEnabled, "" || null, "Missing NUMA flag" );
+ assert.neq(hostinfo.system.hostname, "" || null, "Missing Hostname");
+ assert.neq(hostinfo.system.currentTime, "" || null, "Missing Current Time");
+ assert.neq(hostinfo.system.cpuAddrSize, "" || null || 0, "Missing CPU Address Size");
+ assert.neq(hostinfo.system.memSizeMB, "" || null, "Missing Memory Size");
+ assert.neq(hostinfo.system.numCores, "" || null || 0, "Missing Number of Cores");
+ assert.neq(hostinfo.system.cpuArch, "" || null, "Missing CPU Architecture");
+ assert.neq(hostinfo.system.numaEnabled, "" || null, "Missing NUMA flag");
}
diff --git a/jstests/core/id1.js b/jstests/core/id1.js
index 7c40f206851..dedf9c449c5 100644
--- a/jstests/core/id1.js
+++ b/jstests/core/id1.js
@@ -2,15 +2,15 @@
t = db.id1;
t.drop();
-t.save( { _id : { a : 1 , b : 2 } , x : "a" } );
-t.save( { _id : { a : 1 , b : 2 } , x : "b" } );
-t.save( { _id : { a : 3 , b : 2 } , x : "c" } );
-t.save( { _id : { a : 4 , b : 2 } , x : "d" } );
-t.save( { _id : { a : 4 , b : 2 } , x : "e" } );
-t.save( { _id : { a : 2 , b : 2 } , x : "f" } );
+t.save({_id: {a: 1, b: 2}, x: "a"});
+t.save({_id: {a: 1, b: 2}, x: "b"});
+t.save({_id: {a: 3, b: 2}, x: "c"});
+t.save({_id: {a: 4, b: 2}, x: "d"});
+t.save({_id: {a: 4, b: 2}, x: "e"});
+t.save({_id: {a: 2, b: 2}, x: "f"});
-assert.eq( 4 , t.find().count() , "A" );
-assert.eq( "b" , t.findOne( { _id : { a : 1 , b : 2 } } ).x );
-assert.eq( "c" , t.findOne( { _id : { a : 3 , b : 2 } } ).x );
-assert.eq( "e" , t.findOne( { _id : { a : 4 , b : 2 } } ).x );
-assert.eq( "f" , t.findOne( { _id : { a : 2 , b : 2 } } ).x );
+assert.eq(4, t.find().count(), "A");
+assert.eq("b", t.findOne({_id: {a: 1, b: 2}}).x);
+assert.eq("c", t.findOne({_id: {a: 3, b: 2}}).x);
+assert.eq("e", t.findOne({_id: {a: 4, b: 2}}).x);
+assert.eq("f", t.findOne({_id: {a: 2, b: 2}}).x);
diff --git a/jstests/core/idhack.js b/jstests/core/idhack.js
index b6f1c5fc4de..292c2ed86b6 100644
--- a/jstests/core/idhack.js
+++ b/jstests/core/idhack.js
@@ -5,78 +5,83 @@ t.drop();
// Include helpers for analyzing explain output.
load("jstests/libs/analyze_plan.js");
-t.insert( { _id : { x : 1 } , z : 1 } );
-t.insert( { _id : { x : 2 } , z : 2 } );
-t.insert( { _id : { x : 3 } , z : 3 } );
-t.insert( { _id : 1 , z : 4 } );
-t.insert( { _id : 2 , z : 5 } );
-t.insert( { _id : 3 , z : 6 } );
+t.insert({_id: {x: 1}, z: 1});
+t.insert({_id: {x: 2}, z: 2});
+t.insert({_id: {x: 3}, z: 3});
+t.insert({_id: 1, z: 4});
+t.insert({_id: 2, z: 5});
+t.insert({_id: 3, z: 6});
-assert.eq( 2 , t.findOne( { _id : { x : 2 } } ).z , "A1" );
-assert.eq( 2 , t.find( { _id : { $gte : 2 } } ).count() , "A2" );
-assert.eq( 2 , t.find( { _id : { $gte : 2 } } ).itcount() , "A3" );
+assert.eq(2, t.findOne({_id: {x: 2}}).z, "A1");
+assert.eq(2, t.find({_id: {$gte: 2}}).count(), "A2");
+assert.eq(2, t.find({_id: {$gte: 2}}).itcount(), "A3");
-t.update( { _id : { x : 2 } } , { $set : { z : 7 } } );
-assert.eq( 7 , t.findOne( { _id : { x : 2 } } ).z , "B1" );
+t.update({_id: {x: 2}}, {$set: {z: 7}});
+assert.eq(7, t.findOne({_id: {x: 2}}).z, "B1");
-t.update( { _id : { $gte : 2 } } , { $set : { z : 8 } } , false , true );
-assert.eq( 4 , t.findOne( { _id : 1 } ).z , "C1" );
-assert.eq( 8 , t.findOne( { _id : 2 } ).z , "C2" );
-assert.eq( 8 , t.findOne( { _id : 3 } ).z , "C3" );
+t.update({_id: {$gte: 2}}, {$set: {z: 8}}, false, true);
+assert.eq(4, t.findOne({_id: 1}).z, "C1");
+assert.eq(8, t.findOne({_id: 2}).z, "C2");
+assert.eq(8, t.findOne({_id: 3}).z, "C3");
// explain output should show that the ID hack was applied.
-var query = { _id : { x : 2 } };
-var explain = t.find( query ).explain( true );
-print( "explain for " + tojson( query , "" , true ) + " = " + tojson( explain ) );
-assert.eq( 1 , explain.executionStats.nReturned , "D1" );
-assert.eq( 1 , explain.executionStats.totalKeysExamined , "D2" );
-assert( isIdhack(explain.queryPlanner.winningPlan), "D3" );
+var query = {
+ _id: {x: 2}
+};
+var explain = t.find(query).explain(true);
+print("explain for " + tojson(query, "", true) + " = " + tojson(explain));
+assert.eq(1, explain.executionStats.nReturned, "D1");
+assert.eq(1, explain.executionStats.totalKeysExamined, "D2");
+assert(isIdhack(explain.queryPlanner.winningPlan), "D3");
// ID hack cannot be used with hint().
-t.ensureIndex( { _id : 1 , a : 1 } );
-var hintExplain = t.find( query ).hint( { _id : 1 , a : 1 } ).explain();
-print( "explain for hinted query = " + tojson( hintExplain ) );
-assert( !isIdhack(hintExplain.queryPlanner.winningPlan), "E1" );
+t.ensureIndex({_id: 1, a: 1});
+var hintExplain = t.find(query).hint({_id: 1, a: 1}).explain();
+print("explain for hinted query = " + tojson(hintExplain));
+assert(!isIdhack(hintExplain.queryPlanner.winningPlan), "E1");
// ID hack cannot be used with skip().
-var skipExplain = t.find( query ).skip(1).explain();
-print( "explain for skip query = " + tojson( skipExplain ) );
-assert( !isIdhack(skipExplain.queryPlanner.winningPlan), "F1" );
+var skipExplain = t.find(query).skip(1).explain();
+print("explain for skip query = " + tojson(skipExplain));
+assert(!isIdhack(skipExplain.queryPlanner.winningPlan), "F1");
// Covered query returning _id field only can be handled by ID hack.
-var coveredExplain = t.find( query, { _id : 1 } ).explain();
-print( "explain for covered query = " + tojson( coveredExplain ) );
-assert( isIdhack(coveredExplain.queryPlanner.winningPlan), "G1" );
+var coveredExplain = t.find(query, {_id: 1}).explain();
+print("explain for covered query = " + tojson(coveredExplain));
+assert(isIdhack(coveredExplain.queryPlanner.winningPlan), "G1");
// Check doc from covered ID hack query.
-assert.eq( { _id : { x: 2 } }, t.findOne( query, { _id : 1 } ), "G2" );
+assert.eq({_id: {x: 2}}, t.findOne(query, {_id: 1}), "G2");
//
// Non-covered projection for idhack.
//
t.drop();
-t.insert( { _id: 0, a: 0, b: [ { c: 1 }, { c: 2 } ] });
-t.insert( { _id: 1, a: 1, b: [ { c: 3 }, { c: 4 } ] });
+t.insert({_id: 0, a: 0, b: [{c: 1}, {c: 2}]});
+t.insert({_id: 1, a: 1, b: [{c: 3}, {c: 4}]});
// Simple inclusion.
-assert.eq( { _id: 1, a: 1 }, t.find( { _id: 1 }, { a: 1 } ).next() );
-assert.eq( { a: 1 }, t.find({ _id: 1 }, { _id: 0, a: 1 } ).next() );
-assert.eq( { _id: 0, a: 0 }, t.find( { _id: 0 }, { _id: 1, a: 1 } ).next() );
+assert.eq({_id: 1, a: 1}, t.find({_id: 1}, {a: 1}).next());
+assert.eq({a: 1}, t.find({_id: 1}, {_id: 0, a: 1}).next());
+assert.eq({_id: 0, a: 0}, t.find({_id: 0}, {_id: 1, a: 1}).next());
// Non-simple: exclusion.
-assert.eq( { _id: 1, a: 1 }, t.find( { _id: 1 }, { b: 0 } ).next() );
-assert.eq( { _id: 0, }, t.find( { _id: 0 }, { a: 0, b: 0 } ).next() );
+assert.eq({_id: 1, a: 1}, t.find({_id: 1}, {b: 0}).next());
+assert.eq(
+ {
+ _id: 0,
+ },
+ t.find({_id: 0}, {a: 0, b: 0}).next());
// Non-simple: dotted fields.
-assert.eq( { b: [ { c: 1 }, { c: 2 } ] }, t.find( { _id: 0 }, { _id: 0, "b.c": 1 } ).next() );
-assert.eq( { _id: 1 }, t.find( { _id: 1 }, { "foo.bar": 1 } ).next() );
+assert.eq({b: [{c: 1}, {c: 2}]}, t.find({_id: 0}, {_id: 0, "b.c": 1}).next());
+assert.eq({_id: 1}, t.find({_id: 1}, {"foo.bar": 1}).next());
// Non-simple: elemMatch projection.
-assert.eq( { _id: 1, b: [ { c: 4 } ] },
- t.find( { _id: 1 }, { b: { $elemMatch: { c: 4 } } } ).next() );
+assert.eq({_id: 1, b: [{c: 4}]}, t.find({_id: 1}, {b: {$elemMatch: {c: 4}}}).next());
// Non-simple: .returnKey().
-assert.eq( { _id: 1 }, t.find( { _id: 1 } ).returnKey().next() );
+assert.eq({_id: 1}, t.find({_id: 1}).returnKey().next());
// Non-simple: .returnKey() overrides other projections.
-assert.eq( { _id: 1 }, t.find( { _id: 1 }, { a: 1 } ).returnKey().next() );
+assert.eq({_id: 1}, t.find({_id: 1}, {a: 1}).returnKey().next());
diff --git a/jstests/core/in.js b/jstests/core/in.js
index da1313692e1..852f6bcbca4 100644
--- a/jstests/core/in.js
+++ b/jstests/core/in.js
@@ -2,23 +2,27 @@
t = db.in1;
t.drop();
-t.save( { a : 1 } );
-t.save( { a : 2 } );
+t.save({a: 1});
+t.save({a: 2});
// $in must take an array as argument: SERVER-7445
-assert.throws( function() { return t.find( { a : { $in : { x : 1 } } } ).itcount(); } );
-assert.throws( function() { return t.find( { a : { $in : 1 } } ).itcount(); } );
+assert.throws(function() {
+ return t.find({a: {$in: {x: 1}}}).itcount();
+});
+assert.throws(function() {
+ return t.find({a: {$in: 1}}).itcount();
+});
-assert.eq( 1 , t.find( { a : { $in : [ 1 ] } } ).itcount() , "A" );
-assert.eq( 1 , t.find( { a : { $in : [ 2 ] } } ).itcount() , "B" );
-assert.eq( 2 , t.find( { a : { $in : [ 1 , 2 ] } } ).itcount() , "C" );
+assert.eq(1, t.find({a: {$in: [1]}}).itcount(), "A");
+assert.eq(1, t.find({a: {$in: [2]}}).itcount(), "B");
+assert.eq(2, t.find({a: {$in: [1, 2]}}).itcount(), "C");
-t.ensureIndex( { a : 1 } );
+t.ensureIndex({a: 1});
-assert.eq( 1 , t.find( { a : { $in : [ 1 ] } } ).itcount(), "D" );
-assert.eq( 1 , t.find( { a : { $in : [ 2 ] } } ).itcount() , "E" );
-assert.eq( 2 , t.find( { a : { $in : [ 1 , 2 ] } } ).itcount() , "F" );
+assert.eq(1, t.find({a: {$in: [1]}}).itcount(), "D");
+assert.eq(1, t.find({a: {$in: [2]}}).itcount(), "E");
+assert.eq(2, t.find({a: {$in: [1, 2]}}).itcount(), "F");
-assert.eq( 0 , t.find( { a : { $in : [] } } ).itcount() , "G" );
+assert.eq(0, t.find({a: {$in: []}}).itcount(), "G");
-assert.eq( 1 , t.find( { a : { $gt: 1, $in : [ 2 ] } } ).itcount() , "H" );
+assert.eq(1, t.find({a: {$gt: 1, $in: [2]}}).itcount(), "H");
diff --git a/jstests/core/in2.js b/jstests/core/in2.js
index 66b90daa25a..ddcee67ccd0 100644
--- a/jstests/core/in2.js
+++ b/jstests/core/in2.js
@@ -1,33 +1,30 @@
t = db.in2;
-function go( name , index ){
-
+function go(name, index) {
t.drop();
-
- t.save( { a : 1 , b : 1 } );
- t.save( { a : 1 , b : 2 } );
- t.save( { a : 1 , b : 3 } );
-
- t.save( { a : 1 , b : 1 } );
- t.save( { a : 2 , b : 2 } );
- t.save( { a : 3 , b : 3 } );
-
- t.save( { a : 1 , b : 1 } );
- t.save( { a : 2 , b : 1 } );
- t.save( { a : 3 , b : 1 } );
-
- if ( index )
- t.ensureIndex( index );
-
- assert.eq( 7 , t.find( { a : { $in : [ 1 , 2 ] } } ).count() , name + " A" );
-
- assert.eq( 6 , t.find( { a : { $in : [ 1 , 2 ] } , b : { $in : [ 1 , 2 ] } } ).count() , name + " B" );
-}
-go( "no index" );
-go( "index on a" , { a : 1 } );
-go( "index on b" , { b : 1 } );
-go( "index on a&b" , { a : 1 , b : 1 } );
+ t.save({a: 1, b: 1});
+ t.save({a: 1, b: 2});
+ t.save({a: 1, b: 3});
+
+ t.save({a: 1, b: 1});
+ t.save({a: 2, b: 2});
+ t.save({a: 3, b: 3});
+
+ t.save({a: 1, b: 1});
+ t.save({a: 2, b: 1});
+ t.save({a: 3, b: 1});
+ if (index)
+ t.ensureIndex(index);
+
+ assert.eq(7, t.find({a: {$in: [1, 2]}}).count(), name + " A");
+
+ assert.eq(6, t.find({a: {$in: [1, 2]}, b: {$in: [1, 2]}}).count(), name + " B");
+}
+go("no index");
+go("index on a", {a: 1});
+go("index on b", {b: 1});
+go("index on a&b", {a: 1, b: 1});
diff --git a/jstests/core/in3.js b/jstests/core/in3.js
index 5e7e587629f..02680642939 100644
--- a/jstests/core/in3.js
+++ b/jstests/core/in3.js
@@ -1,23 +1,23 @@
// SERVER-2829 Test arrays matching themselves within a $in expression.
t = db.jstests_in8;
-t.drop();
+t.drop();
-t.save( {key: [1]} );
-t.save( {key: ['1']} );
-t.save( {key: [[2]]} );
+t.save({key: [1]});
+t.save({key: ['1']});
+t.save({key: [[2]]});
-function doTest() {
- assert.eq( 1, t.count( {key:[1]} ) );
- assert.eq( 1, t.count( {key:{$in:[[1]]}} ) );
- assert.eq( 1, t.count( {key:{$in:[[1]],$ne:[2]}} ) );
- assert.eq( 1, t.count( {key:{$in:[['1']],$type:2}} ) );
- assert.eq( 1, t.count( {key:['1']} ) );
- assert.eq( 1, t.count( {key:{$in:[['1']]}} ) );
- assert.eq( 1, t.count( {key:[2]} ) );
- assert.eq( 1, t.count( {key:{$in:[[2]]}} ) );
-}
+function doTest() {
+ assert.eq(1, t.count({key: [1]}));
+ assert.eq(1, t.count({key: {$in: [[1]]}}));
+ assert.eq(1, t.count({key: {$in: [[1]], $ne: [2]}}));
+ assert.eq(1, t.count({key: {$in: [['1']], $type: 2}}));
+ assert.eq(1, t.count({key: ['1']}));
+ assert.eq(1, t.count({key: {$in: [['1']]}}));
+ assert.eq(1, t.count({key: [2]}));
+ assert.eq(1, t.count({key: {$in: [[2]]}}));
+}
-doTest();
-t.ensureIndex( {key:1} );
+doTest();
+t.ensureIndex({key: 1});
doTest();
diff --git a/jstests/core/in4.js b/jstests/core/in4.js
index cbe28e2e2df..c2f47bf8ed4 100644
--- a/jstests/core/in4.js
+++ b/jstests/core/in4.js
@@ -1,24 +1,24 @@
// SERVER-2343 Test $in empty array matching.
t = db.jstests_in9;
-t.drop();
+t.drop();
function someData() {
t.remove({});
- t.save( {key: []} );
+ t.save({key: []});
}
function moreData() {
- someData();
- t.save( {key: [1]} );
- t.save( {key: ['1']} );
- t.save( {key: null} );
- t.save( {} );
+ someData();
+ t.save({key: [1]});
+ t.save({key: ['1']});
+ t.save({key: null});
+ t.save({});
}
function check() {
- assert.eq( 1, t.count( {key:[]} ) );
- assert.eq( 1, t.count( {key:{$in:[[]]}} ) );
+ assert.eq(1, t.count({key: []}));
+ assert.eq(1, t.count({key: {$in: [[]]}}));
}
function doTest() {
@@ -26,10 +26,10 @@ function doTest() {
check();
moreData();
check();
-}
+}
-doTest();
+doTest();
// SERVER-1943 not fixed yet
-t.ensureIndex( {key:1} );
+t.ensureIndex({key: 1});
doTest();
diff --git a/jstests/core/in5.js b/jstests/core/in5.js
index a966a6a187c..c56621c91f3 100644
--- a/jstests/core/in5.js
+++ b/jstests/core/in5.js
@@ -1,56 +1,58 @@
t = db.in5;
-function go( fn ){
+function go(fn) {
t.drop();
o = {};
- o[fn] = { a : 1 , b : 2 };
- t.insert( o );
+ o[fn] = {
+ a: 1,
+ b: 2
+ };
+ t.insert(o);
x = {};
- x[fn] = { a : 1 , b : 2 };
- assert.eq( 1 , t.find( x ).itcount() , "A1 - " + fn );
-
+ x[fn] = {
+ a: 1,
+ b: 2
+ };
+ assert.eq(1, t.find(x).itcount(), "A1 - " + fn);
y = {};
- y[fn] = { $in : [ { a : 1 , b : 2 } ] };
- assert.eq( 1 , t.find( y ).itcount() , "A2 - " + fn );
-
+ y[fn] = {
+ $in: [{a: 1, b: 2}]
+ };
+ assert.eq(1, t.find(y).itcount(), "A2 - " + fn);
z = {};
- z[fn+".a"] = 1;
- z[fn+".b"] = { $in : [ 2 ] };
- assert.eq( 1 , t.find( z ).itcount() , "A3 - " + fn ); // SERVER-1366
+ z[fn + ".a"] = 1;
+ z[fn + ".b"] = {
+ $in: [2]
+ };
+ assert.eq(1, t.find(z).itcount(), "A3 - " + fn); // SERVER-1366
-
i = {};
i[fn] = 1;
- t.ensureIndex( i );
+ t.ensureIndex(i);
+
+ assert.eq(1, t.find(x).itcount(), "B1 - " + fn);
+ assert.eq(1, t.find(y).itcount(), "B2 - " + fn);
+ assert.eq(1, t.find(z).itcount(), "B3 - " + fn); // SERVER-1366
- assert.eq( 1 , t.find( x ).itcount() , "B1 - " + fn );
- assert.eq( 1 , t.find( y ).itcount() , "B2 - " + fn );
- assert.eq( 1 , t.find( z ).itcount() , "B3 - " + fn ); // SERVER-1366
-
- t.dropIndex( i );
+ t.dropIndex(i);
- assert.eq( 1 , t.getIndexes().length , "T2" );
+ assert.eq(1, t.getIndexes().length, "T2");
i = {};
- i[fn + ".a" ] = 1;
- t.ensureIndex( i );
- assert.eq( 2 , t.getIndexes().length , "T3" );
+ i[fn + ".a"] = 1;
+ t.ensureIndex(i);
+ assert.eq(2, t.getIndexes().length, "T3");
- assert.eq( 1 , t.find( x ).itcount() , "C1 - " + fn );
- assert.eq( 1 , t.find( y ).itcount() , "C2 - " + fn );
- assert.eq( 1 , t.find( z ).itcount() , "C3 - " + fn ); // SERVER-1366
-
- t.dropIndex( i );
+ assert.eq(1, t.find(x).itcount(), "C1 - " + fn);
+ assert.eq(1, t.find(y).itcount(), "C2 - " + fn);
+ assert.eq(1, t.find(z).itcount(), "C3 - " + fn); // SERVER-1366
-
+ t.dropIndex(i);
}
-go( "x" );
-go( "_id" );
-
-
-
+go("x");
+go("_id");
diff --git a/jstests/core/in6.js b/jstests/core/in6.js
index f114d93442a..4ee06541b81 100644
--- a/jstests/core/in6.js
+++ b/jstests/core/in6.js
@@ -1,13 +1,13 @@
t = db.jstests_in6;
t.drop();
-t.save( {} );
+t.save({});
function doTest() {
- assert.eq.automsg( "1", "t.count( {i:null} )" );
- assert.eq.automsg( "1", "t.count( {i:{$in:[null]}} )" );
+ assert.eq.automsg("1", "t.count( {i:null} )");
+ assert.eq.automsg("1", "t.count( {i:{$in:[null]}} )");
}
doTest();
-t.ensureIndex( {i:1} );
+t.ensureIndex({i: 1});
doTest();
diff --git a/jstests/core/in7.js b/jstests/core/in7.js
index cf614ab994d..2f6c9e3ff1a 100644
--- a/jstests/core/in7.js
+++ b/jstests/core/in7.js
@@ -2,13 +2,21 @@
t = db.jstests_ina;
t.drop();
-t.save( {} );
+t.save({});
-assert.throws( function() { t.find( {a:{$in:[{$elemMatch:{b:1}}]}} ).itcount(); } );
-assert.throws( function() { t.find( {a:{$not:{$in:[{$elemMatch:{b:1}}]}}} ).itcount(); } );
+assert.throws(function() {
+ t.find({a: {$in: [{$elemMatch: {b: 1}}]}}).itcount();
+});
+assert.throws(function() {
+ t.find({a: {$not: {$in: [{$elemMatch: {b: 1}}]}}}).itcount();
+});
-assert.throws( function() { t.find( {a:{$nin:[{$elemMatch:{b:1}}]}} ).itcount(); } );
-assert.throws( function() { t.find( {a:{$not:{$nin:[{$elemMatch:{b:1}}]}}} ).itcount(); } );
+assert.throws(function() {
+ t.find({a: {$nin: [{$elemMatch: {b: 1}}]}}).itcount();
+});
+assert.throws(function() {
+ t.find({a: {$not: {$nin: [{$elemMatch: {b: 1}}]}}}).itcount();
+});
// NOTE Above we don't check cases like {b:2,$elemMatch:{b:3,4}} - generally
// we assume that the first key is $elemMatch if any key is, and validating
diff --git a/jstests/core/in8.js b/jstests/core/in8.js
index be2a696f7c3..a6bdc520926 100644
--- a/jstests/core/in8.js
+++ b/jstests/core/in8.js
@@ -3,16 +3,16 @@
t = db.jstests_inb;
t.drop();
-function checkResults( query ) {
- assert.eq( 4, t.count( query ) );
- assert.eq( 4, t.find( query ).itcount() );
+function checkResults(query) {
+ assert.eq(4, t.count(query));
+ assert.eq(4, t.find(query).itcount());
}
-t.ensureIndex( {x:1} );
-t.save( {x:'aa'} );
-t.save( {x:'ab'} );
-t.save( {x:'ac'} );
-t.save( {x:'ad'} );
+t.ensureIndex({x: 1});
+t.save({x: 'aa'});
+t.save({x: 'ab'});
+t.save({x: 'ac'});
+t.save({x: 'ad'});
-checkResults( {x:{$in:[/^a/,/^ab/]}} );
-checkResults( {x:{$in:[/^ab/,/^a/]}} );
+checkResults({x: {$in: [/^a/, /^ab/]}});
+checkResults({x: {$in: [/^ab/, /^a/]}});
diff --git a/jstests/core/inc-SERVER-7446.js b/jstests/core/inc-SERVER-7446.js
index 6f365e1f5e2..2503df21f87 100644
--- a/jstests/core/inc-SERVER-7446.js
+++ b/jstests/core/inc-SERVER-7446.js
@@ -2,38 +2,44 @@ var c = db.incSERVER7446;
// A 32 bit overflow spills to 64 bits
c.drop();
-c.save( { a: NumberInt( "2147483647" ) } );
-var updateResult = c.update( {}, { $inc:{ a:NumberInt( 1 ) } } );
+c.save({a: NumberInt("2147483647")});
+var updateResult = c.update({}, {$inc: {a: NumberInt(1)}});
assert.eq(1, updateResult.nMatched, "Object not modified");
var res = c.findOne();
-assert.eq(NumberLong, res.a.constructor,
+assert.eq(NumberLong,
+ res.a.constructor,
"NumberInt incremented beyond std::numeric_limits<in32_t>::max() not NumberLong");
-assert.eq(NumberLong("2147483648"), res.a,
+assert.eq(NumberLong("2147483648"),
+ res.a,
"NumberInt incremented beyond std::numeric_limits<in32_t>::max() has wrong value");
// A 32 bit underflow spills to 64 bits
c.drop();
-c.save( { a: NumberInt( "-2147483648" ) } );
-updateResult = c.update( {}, { $inc:{ a:NumberInt( -1 ) } } );
+c.save({a: NumberInt("-2147483648")});
+updateResult = c.update({}, {$inc: {a: NumberInt(-1)}});
assert.eq(1, updateResult.nMatched, "Object not modified");
res = c.findOne();
-assert.eq(NumberLong, res.a.constructor,
+assert.eq(NumberLong,
+ res.a.constructor,
"NumberInt decremented beyond std::numeric_limits<in32_t>::min() not NumberLong");
-assert.eq(NumberLong("-2147483649"), res.a,
+assert.eq(NumberLong("-2147483649"),
+ res.a,
"NumberInt decremented beyond std::numeric_limits<in32_t>::min() has wrong value");
// A 64 bit overflow is an error
c.drop();
-c.save( { a: NumberLong( "9223372036854775807" ) } );
-updateResult = c.update( {}, { $inc:{ a:NumberInt( 1 ) } } );
-assert.eq(0, updateResult.nMatched,
- "Did not fail to increment a NumberLong past std::numeric_limits<int64_t>::max()");
+c.save({a: NumberLong("9223372036854775807")});
+updateResult = c.update({}, {$inc: {a: NumberInt(1)}});
+assert.eq(0,
+ updateResult.nMatched,
+ "Did not fail to increment a NumberLong past std::numeric_limits<int64_t>::max()");
// A 64 bit underflow is an error
c.drop();
-c.save( { a: NumberLong( "-9223372036854775808" ) } );
-updateResult = c.update( {}, { $inc:{ a:NumberInt( -1 ) } } );
-assert.eq(0, updateResult.nMatched,
- "Did not fail to decrement a NumberLong past std::numeric_limits<int64_t>::min()");
+c.save({a: NumberLong("-9223372036854775808")});
+updateResult = c.update({}, {$inc: {a: NumberInt(-1)}});
+assert.eq(0,
+ updateResult.nMatched,
+ "Did not fail to decrement a NumberLong past std::numeric_limits<int64_t>::min()");
c.drop();
diff --git a/jstests/core/inc1.js b/jstests/core/inc1.js
index 027f307a476..75192ebe013 100644
--- a/jstests/core/inc1.js
+++ b/jstests/core/inc1.js
@@ -2,31 +2,30 @@
t = db.inc1;
t.drop();
-function test( num , name ){
- assert.eq( 1 , t.count() , name + " count" );
- assert.eq( num , t.findOne().x , name + " value" );
+function test(num, name) {
+ assert.eq(1, t.count(), name + " count");
+ assert.eq(num, t.findOne().x, name + " value");
}
-t.save( { _id : 1 , x : 1 } );
-test( 1 , "A" );
+t.save({_id: 1, x: 1});
+test(1, "A");
-t.update( { _id : 1 } , { $inc : { x : 1 } } );
-test( 2 , "B" );
+t.update({_id: 1}, {$inc: {x: 1}});
+test(2, "B");
-t.update( { _id : 1 } , { $inc : { x : 1 } } );
-test( 3 , "C" );
+t.update({_id: 1}, {$inc: {x: 1}});
+test(3, "C");
-t.update( { _id : 2 } , { $inc : { x : 1 } } );
-test( 3 , "D" );
+t.update({_id: 2}, {$inc: {x: 1}});
+test(3, "D");
-t.update( { _id : 1 } , { $inc : { x : 2 } } );
-test( 5 , "E" );
+t.update({_id: 1}, {$inc: {x: 2}});
+test(5, "E");
-t.update( { _id : 1 } , { $inc : { x : -1 } } );
-test( 4 , "F" );
+t.update({_id: 1}, {$inc: {x: -1}});
+test(4, "F");
-t.ensureIndex( { x : 1 } );
-
-t.update( { _id : 1 } , { $inc : { x : 1 } } );
-test( 5 , "G" );
+t.ensureIndex({x: 1});
+t.update({_id: 1}, {$inc: {x: 1}});
+test(5, "G");
diff --git a/jstests/core/inc2.js b/jstests/core/inc2.js
index 3bcc5146f75..6ff4842e254 100644
--- a/jstests/core/inc2.js
+++ b/jstests/core/inc2.js
@@ -2,21 +2,23 @@
t = db.inc2;
t.drop();
-t.save( { _id : 1 , x : 1 } );
-t.save( { _id : 2 , x : 2 } );
-t.save( { _id : 3 , x : 3 } );
+t.save({_id: 1, x: 1});
+t.save({_id: 2, x: 2});
+t.save({_id: 3, x: 3});
-function order(){
- return t.find().sort( { x : 1 } ).map( function(z){ return z._id; } );
+function order() {
+ return t.find().sort({x: 1}).map(function(z) {
+ return z._id;
+ });
}
-assert.eq( "1,2,3" , order() , "A" );
+assert.eq("1,2,3", order(), "A");
-t.update( { _id : 1 } , { $inc : { x : 4 } } );
-assert.eq( "2,3,1" , order() , "B" );
+t.update({_id: 1}, {$inc: {x: 4}});
+assert.eq("2,3,1", order(), "B");
-t.ensureIndex( { x : 1 } );
-assert.eq( "2,3,1" , order() , "C" );
+t.ensureIndex({x: 1});
+assert.eq("2,3,1", order(), "C");
-t.update( { _id : 3 } , { $inc : { x : 4 } } );
-assert.eq( "2,1,3" , order() , "D" );
+t.update({_id: 3}, {$inc: {x: 4}});
+assert.eq("2,1,3", order(), "D");
diff --git a/jstests/core/inc3.js b/jstests/core/inc3.js
index 6f10ad2b27b..b69a77c64cd 100644
--- a/jstests/core/inc3.js
+++ b/jstests/core/inc3.js
@@ -2,15 +2,13 @@
t = db.inc3;
t.drop();
-t.save( { _id : 1 , z : 1 , a : 1 } );
-t.update( {} , { $inc : { z : 1 , a : 1 } } );
-t.update( {} , { $inc : { a : 1 , z : 1 } } );
-assert.eq( { _id : 1 , z : 3 , a : 3 } , t.findOne() , "A" );
-
+t.save({_id: 1, z: 1, a: 1});
+t.update({}, {$inc: {z: 1, a: 1}});
+t.update({}, {$inc: {a: 1, z: 1}});
+assert.eq({_id: 1, z: 3, a: 3}, t.findOne(), "A");
t.drop();
-t.save( { _id : 1 , a : 1 , z : 1 } );
-t.update( {} , { $inc : { z : 1 , a : 1 } } );
-t.update( {} , { $inc : { a : 1 , z : 1 } } );
-assert.eq( { _id : 1 , a : 3 , z : 3 } , t.findOne() , "B" );
-
+t.save({_id: 1, a: 1, z: 1});
+t.update({}, {$inc: {z: 1, a: 1}});
+t.update({}, {$inc: {a: 1, z: 1}});
+assert.eq({_id: 1, a: 3, z: 3}, t.findOne(), "B");
diff --git a/jstests/core/index1.js b/jstests/core/index1.js
index 64bbfa8732b..1bcc23be135 100644
--- a/jstests/core/index1.js
+++ b/jstests/core/index1.js
@@ -1,24 +1,30 @@
t = db.embeddedIndexTest;
-t.remove( {} );
+t.remove({});
-o = { name : "foo" , z : { a : 17 , b : 4} };
-t.save( o );
+o = {
+ name: "foo",
+ z: {a: 17, b: 4}
+};
+t.save(o);
-assert( t.findOne().z.a == 17 );
-assert( t.findOne( { z : { a : 17 } } ) == null);
+assert(t.findOne().z.a == 17);
+assert(t.findOne({z: {a: 17}}) == null);
-t.ensureIndex( { "z.a" : 1 } );
+t.ensureIndex({"z.a": 1});
-assert( t.findOne().z.a == 17 );
-assert( t.findOne( { z : { a : 17 } } ) == null);
+assert(t.findOne().z.a == 17);
+assert(t.findOne({z: {a: 17}}) == null);
-o = { name : "bar" , z : { a : 18 } };
-t.save( o );
+o = {
+ name: "bar",
+ z: {a: 18}
+};
+t.save(o);
-assert.eq.automsg( "2", "t.find().length()" );
-assert.eq.automsg( "2", "t.find().sort( { 'z.a' : 1 } ).length()" );
-assert.eq.automsg( "2", "t.find().sort( { 'z.a' : -1 } ).length()" );
+assert.eq.automsg("2", "t.find().length()");
+assert.eq.automsg("2", "t.find().sort( { 'z.a' : 1 } ).length()");
+assert.eq.automsg("2", "t.find().sort( { 'z.a' : -1 } ).length()");
assert(t.validate().valid);
diff --git a/jstests/core/index13.js b/jstests/core/index13.js
index 21105166037..920061dd6f5 100644
--- a/jstests/core/index13.js
+++ b/jstests/core/index13.js
@@ -19,129 +19,151 @@
t = db.jstests_index13;
t.drop();
-function assertConsistentResults( query ) {
- assert.eq( t.find( query ).hint( { $natural:1 } ).sort( { _id:1 } ).toArray(),
- t.find( query ).hint( index ).sort( { _id:1 } ).toArray() );
+function assertConsistentResults(query) {
+ assert.eq(t.find(query).hint({$natural: 1}).sort({_id: 1}).toArray(),
+ t.find(query).hint(index).sort({_id: 1}).toArray());
}
-function assertResults( query ) {
- explain = t.find( query ).hint( index ).explain();
+function assertResults(query) {
+ explain = t.find(query).hint(index).explain();
// printjson( explain ); // debug
- assertConsistentResults( query );
+ assertConsistentResults(query);
}
// Cases with single dotted index fied names.
-index = { 'a.b':1, 'a.c':1 };
-t.ensureIndex( index );
-t.save( { a:[ { b:1 }, { c:1 } ] } );
-t.save( { a:[ { b:1, c:1 } ] } );
-assert.eq( 2, t.count() );
+index = {
+ 'a.b': 1,
+ 'a.c': 1
+};
+t.ensureIndex(index);
+t.save({a: [{b: 1}, {c: 1}]});
+t.save({a: [{b: 1, c: 1}]});
+assert.eq(2, t.count());
// Without $elemMatch.
-assertResults( { 'a.b':1, 'a.c':1 } );
+assertResults({'a.b': 1, 'a.c': 1});
// With $elemMatch.
-assertResults( { a:{ $elemMatch:{ b:1, c:1 } } } );
+assertResults({a: {$elemMatch: {b: 1, c: 1}}});
// Without shared $elemMatch.
-assertResults( { 'a.b':1, a:{ $elemMatch:{ c:1 } } } );
+assertResults({'a.b': 1, a: {$elemMatch: {c: 1}}});
// Two different $elemMatch expressions.
-assertResults( { $and:[ { a:{ $elemMatch:{ b:1 } } },
- { a:{ $elemMatch:{ c:1 } } } ] } );
-
+assertResults({$and: [{a: {$elemMatch: {b: 1}}}, {a: {$elemMatch: {c: 1}}}]});
// Cases relating to parse order and inclusion of intersected ranges.
-assertResults( { 'a.b':1, a:{ $elemMatch:{ b:{ $gt:0 }, c:1 } } } );
-assertResults( { a:{ $elemMatch:{ b:1, c:1 } }, 'a.b':1 } );
-assertResults( { 'a.c':1, a:{ $elemMatch:{ b:1, c:1 } } } );
-assertResults( { a:{ $elemMatch:{ b:1, c:1 } }, 'a.b':{ $gt:0 } } );
+assertResults({'a.b': 1, a: {$elemMatch: {b: {$gt: 0}, c: 1}}});
+assertResults({a: {$elemMatch: {b: 1, c: 1}}, 'a.b': 1});
+assertResults({'a.c': 1, a: {$elemMatch: {b: 1, c: 1}}});
+assertResults({a: {$elemMatch: {b: 1, c: 1}}, 'a.b': {$gt: 0}});
// Cases with $elemMatch on multiple fields.
t.remove({});
-index = { 'a.b':1, 'a.c':1, 'd.e':1, 'd.f':1 };
-t.ensureIndex( index );
-t.insert( { a:[ { b:1 }, { c:1 } ], d: { e:1, f:1 } } );
-t.insert( { a:[ { b:1, c:1 } ], d: { e:1, f:1 } } );
-t.insert( { a:{ b:1, c:1 }, d:[ { e:1, f:1 } ] } );
-t.insert( { a:{ b:1, c:1 }, d:[ { e:1 }, { f:1 } ] } );
-
-assert.eq( 4, t.count() );
+index = {
+ 'a.b': 1,
+ 'a.c': 1,
+ 'd.e': 1,
+ 'd.f': 1
+};
+t.ensureIndex(index);
+t.insert({a: [{b: 1}, {c: 1}], d: {e: 1, f: 1}});
+t.insert({a: [{b: 1, c: 1}], d: {e: 1, f: 1}});
+t.insert({a: {b: 1, c: 1}, d: [{e: 1, f: 1}]});
+t.insert({a: {b: 1, c: 1}, d: [{e: 1}, {f: 1}]});
+
+assert.eq(4, t.count());
// Without $elemMatch.
-assertResults( { 'a.b':1, 'a.c':1, 'd.e':1, 'd.f':1 } );
+assertResults({'a.b': 1, 'a.c': 1, 'd.e': 1, 'd.f': 1});
// With $elemMatch.
-assertResults( { a:{ $elemMatch:{ b:1, c:1 } }, 'd': { $elemMatch:{ e:1, f:1 } } } );
-assertResults( { a:{ $elemMatch:{ b:1, c:1 } }, 'd.e': 1, 'd.f' : 1 } );
-assertResults( { 'a.b': 1, 'a.c' : 1, 'd': { $elemMatch:{ e:1, f:1 } } } );
-
+assertResults({a: {$elemMatch: {b: 1, c: 1}}, 'd': {$elemMatch: {e: 1, f: 1}}});
+assertResults({a: {$elemMatch: {b: 1, c: 1}}, 'd.e': 1, 'd.f': 1});
+assertResults({'a.b': 1, 'a.c': 1, 'd': {$elemMatch: {e: 1, f: 1}}});
// Cases with nested $elemMatch.
t.remove({});
-index = { 'a.b.c':1, 'a.b.d' :1 };
-t.ensureIndex( index );
-t.insert( { a:[ { b: [ { c : 1, d : 1 } ] } ] } ) ;
-t.insert( { a:[ { b: [ { c : 1 } , { d : 1 } ] } ] } ) ;
-assert.eq( 2, t.count() );
+index = {
+ 'a.b.c': 1,
+ 'a.b.d': 1
+};
+t.ensureIndex(index);
+t.insert({a: [{b: [{c: 1, d: 1}]}]});
+t.insert({a: [{b: [{c: 1}, {d: 1}]}]});
+assert.eq(2, t.count());
// Without $elemMatch.
-assertResults( { 'a.b.c':1, 'a.b.d':1 } );
+assertResults({'a.b.c': 1, 'a.b.d': 1});
// With $elemMatch.
-assertResults( { "a" : { $elemMatch : { "b" : { $elemMatch : { c : 1, d : 1 } } } } } );
+assertResults({"a": {$elemMatch: {"b": {$elemMatch: {c: 1, d: 1}}}}});
// Cases with double dotted index field names.
t.drop();
-index = { 'a.b.x':1, 'a.b.y':1 };
-t.ensureIndex( index );
-t.save( { a:{ b:{ x:1, y:1 } } } );
-t.save( { a:[ { b:{ x:1 } }, { b:{ y:1 } } ] } );
-t.save( { a:[ { b:[ { x:1 }, { y:1 } ] } ] } );
-t.save( { a:[ { b:[ { x:1, y:1 } ] } ] } );
-assert.eq( 4, t.count() );
+index = {
+ 'a.b.x': 1,
+ 'a.b.y': 1
+};
+t.ensureIndex(index);
+t.save({a: {b: {x: 1, y: 1}}});
+t.save({a: [{b: {x: 1}}, {b: {y: 1}}]});
+t.save({a: [{b: [{x: 1}, {y: 1}]}]});
+t.save({a: [{b: [{x: 1, y: 1}]}]});
+assert.eq(4, t.count());
// No $elemMatch.
-assertResults( { 'a.b.x':1, 'a.b.y':1 } );
+assertResults({'a.b.x': 1, 'a.b.y': 1});
// $elemMatch with dotted children.
-assertResults( { a:{ $elemMatch:{ 'b.x':1, 'b.y':1 } } } );
+assertResults({a: {$elemMatch: {'b.x': 1, 'b.y': 1}}});
// $elemMatch with undotted children.
-assertResults( { 'a.b':{ $elemMatch:{ x:1, y:1 } } } );
+assertResults({'a.b': {$elemMatch: {x: 1, y: 1}}});
// Cases where a field is indexed along with its children.
t.dropIndexes();
-index = { 'a':1, 'a.b.x':1, 'a.b.y':1 };
-t.ensureIndex( index );
+index = {
+ 'a': 1,
+ 'a.b.x': 1,
+ 'a.b.y': 1
+};
+t.ensureIndex(index);
// With $ne.
-assertResults( { a:{ $ne:4 }, 'a.b':{ $elemMatch:{ x:1, y:1 } } } );
+assertResults({a: {$ne: 4}, 'a.b': {$elemMatch: {x: 1, y: 1}}});
// No constraint on a prior parent field.
-assertResults( { 'a.b':{ $elemMatch:{ x:1, y:1 } } } );
+assertResults({'a.b': {$elemMatch: {x: 1, y: 1}}});
// Cases with double dotted index field names branching to different fields at each dot.
t.drop();
-index = { 'a.b.c':1, 'a.e.f':1, 'a.b.d':1, 'a.e.g':1 };
-t.ensureIndex( index );
-t.save( { a:{ b:{ c:1, d:1 }, e:{ f:1, g:1 } } } );
-t.save( { a:[ { b:{ c:1 }, e:{ f:1 } }, { b:{ d:1 }, e:{ g:1 } } ] } );
-t.save( { a:[ { b:{ c:1 } }, { e:{ f:1 } }, { b:{ d:1 } }, { e:{ g:1 } } ] } );
-t.save( { a:[ { b:[ { c:1 }, { d:1 } ] }, { e:[ { f:1 }, { g:1 } ] } ] } );
-t.save( { a:[ { b:[ { c:[ 1 ] }, { d:[ 1 ] } ] }, { e:[ { f:[ 1 ] }, { g:[ 1 ] } ] } ] } );
-t.save( { a:[ { b:[ { c:1, d:1 } ] }, { e:[ { f:1 }, { g:1 } ] } ] } );
-t.save( { a:[ { b:[ { c:1, d:1 } ] }, { e:[ { f:1, g:1 } ] } ] } );
-assert.eq( 7, t.count() );
+index = {
+ 'a.b.c': 1,
+ 'a.e.f': 1,
+ 'a.b.d': 1,
+ 'a.e.g': 1
+};
+t.ensureIndex(index);
+t.save({a: {b: {c: 1, d: 1}, e: {f: 1, g: 1}}});
+t.save({a: [{b: {c: 1}, e: {f: 1}}, {b: {d: 1}, e: {g: 1}}]});
+t.save({a: [{b: {c: 1}}, {e: {f: 1}}, {b: {d: 1}}, {e: {g: 1}}]});
+t.save({a: [{b: [{c: 1}, {d: 1}]}, {e: [{f: 1}, {g: 1}]}]});
+t.save({a: [{b: [{c: [1]}, {d: [1]}]}, {e: [{f: [1]}, {g: [1]}]}]});
+t.save({a: [{b: [{c: 1, d: 1}]}, {e: [{f: 1}, {g: 1}]}]});
+t.save({a: [{b: [{c: 1, d: 1}]}, {e: [{f: 1, g: 1}]}]});
+assert.eq(7, t.count());
// Constraint on a prior cousin field.
-assertResults( { 'a.b':{ $elemMatch:{ c:1, d:1 } },
- 'a.e':{ $elemMatch:{ f:1, g:1 } } } );
+assertResults({'a.b': {$elemMatch: {c: 1, d: 1}}, 'a.e': {$elemMatch: {f: 1, g: 1}}});
// Different constraint on a prior cousin field.
-assertResults( { 'a.b':{ $elemMatch:{ d:1 } },
- 'a.e':{ $elemMatch:{ f:1, g:1 } } } );
-
+assertResults({'a.b': {$elemMatch: {d: 1}}, 'a.e': {$elemMatch: {f: 1, g: 1}}});
// Cases with double dotted index field names branching to different fields at each dot, and the
// same field name strings after the second dot.
t.drop();
-index = { 'a.b.c':1, 'a.e.c':1, 'a.b.d':1, 'a.e.d':1 };
-t.ensureIndex( index );
-t.save( { a:[ { b:[ { c:1, d:1 } ] }, { e:[ { c:1, d:1 } ] } ] } );
-assert.eq( 1, t.count() );
+index = {
+ 'a.b.c': 1,
+ 'a.e.c': 1,
+ 'a.b.d': 1,
+ 'a.e.d': 1
+};
+t.ensureIndex(index);
+t.save({a: [{b: [{c: 1, d: 1}]}, {e: [{c: 1, d: 1}]}]});
+assert.eq(1, t.count());
// Constraint on a prior cousin field with the same field names.
-assertResults( { 'a.b':{ $elemMatch:{ c:1, d:1 } }, 'a.e':{ $elemMatch:{ c:1, d:1 } } } );
+assertResults({'a.b': {$elemMatch: {c: 1, d: 1}}, 'a.e': {$elemMatch: {c: 1, d: 1}}});
diff --git a/jstests/core/index2.js b/jstests/core/index2.js
index b54abcaa792..11ef4e68caa 100644
--- a/jstests/core/index2.js
+++ b/jstests/core/index2.js
@@ -4,37 +4,49 @@
t = db.embeddedIndexTest2;
t.drop();
-assert( t.findOne() == null );
-
-o = { name : "foo" , z : { a : 17 } };
-p = { name : "foo" , z : { a : 17 } };
-q = { name : "barrr" , z : { a : 18 } };
-r = { name : "barrr" , z : { k : "zzz", L:[1,2] } };
-
-t.save( o );
-
-assert( t.findOne().z.a == 17 );
-
-t.save( p );
-t.save( q );
-
-assert( t.findOne({z:{a:17}}).z.a==17 );
-assert( t.find({z:{a:17}}).length() == 2 );
-assert( t.find({z:{a:18}}).length() == 1 );
-
-t.save( r );
-
-assert( t.findOne({z:{a:17}}).z.a==17 );
-assert( t.find({z:{a:17}}).length() == 2 );
-assert( t.find({z:{a:18}}).length() == 1 );
-
-t.ensureIndex( { z : 1 } );
-
-assert( t.findOne({z:{a:17}}).z.a==17 );
-assert( t.find({z:{a:17}}).length() == 2 );
-assert( t.find({z:{a:18}}).length() == 1 );
-
-assert( t.find().sort( { z : 1 } ).length() == 4 );
-assert( t.find().sort( { z : -1 } ).length() == 4 );
+assert(t.findOne() == null);
+
+o = {
+ name: "foo",
+ z: {a: 17}
+};
+p = {
+ name: "foo",
+ z: {a: 17}
+};
+q = {
+ name: "barrr",
+ z: {a: 18}
+};
+r = {
+ name: "barrr",
+ z: {k: "zzz", L: [1, 2]}
+};
+
+t.save(o);
+
+assert(t.findOne().z.a == 17);
+
+t.save(p);
+t.save(q);
+
+assert(t.findOne({z: {a: 17}}).z.a == 17);
+assert(t.find({z: {a: 17}}).length() == 2);
+assert(t.find({z: {a: 18}}).length() == 1);
+
+t.save(r);
+
+assert(t.findOne({z: {a: 17}}).z.a == 17);
+assert(t.find({z: {a: 17}}).length() == 2);
+assert(t.find({z: {a: 18}}).length() == 1);
+
+t.ensureIndex({z: 1});
+
+assert(t.findOne({z: {a: 17}}).z.a == 17);
+assert(t.find({z: {a: 17}}).length() == 2);
+assert(t.find({z: {a: 18}}).length() == 1);
+
+assert(t.find().sort({z: 1}).length() == 4);
+assert(t.find().sort({z: -1}).length() == 4);
assert(t.validate().valid);
diff --git a/jstests/core/index3.js b/jstests/core/index3.js
index 80139460cb4..e908f1fe2c9 100644
--- a/jstests/core/index3.js
+++ b/jstests/core/index3.js
@@ -3,14 +3,14 @@
t = db.index3;
t.drop();
-assert( t.getIndexes().length == 0 );
+assert(t.getIndexes().length == 0);
-t.ensureIndex( { name : 1 } );
+t.ensureIndex({name: 1});
-t.save( { name : "a" } );
+t.save({name: "a"});
-t.ensureIndex( { name : 1 } );
+t.ensureIndex({name: 1});
-assert( t.getIndexes().length == 2 );
+assert(t.getIndexes().length == 2);
assert(t.validate().valid);
diff --git a/jstests/core/index4.js b/jstests/core/index4.js
index ee8c59fa37c..1c96ded434a 100644
--- a/jstests/core/index4.js
+++ b/jstests/core/index4.js
@@ -1,33 +1,22 @@
// index4.js
-
t = db.index4;
t.drop();
-t.save( { name : "alleyinsider" ,
- instances : [
- { pool : "prod1" } ,
- { pool : "dev1" }
- ]
- } );
-
-t.save( { name : "clusterstock" ,
- instances : [
- { pool : "dev1" }
- ]
- } );
+t.save({name: "alleyinsider", instances: [{pool: "prod1"}, {pool: "dev1"}]});
+t.save({name: "clusterstock", instances: [{pool: "dev1"}]});
// this should fail, not allowed -- we confirm that.
-t.ensureIndex( { instances : { pool : 1 } } );
-assert.eq( 1, t.getIndexes().length, "no indexes other than _id should be here yet");
+t.ensureIndex({instances: {pool: 1}});
+assert.eq(1, t.getIndexes().length, "no indexes other than _id should be here yet");
-t.ensureIndex( { "instances.pool" : 1 } );
+t.ensureIndex({"instances.pool": 1});
-sleep( 10 );
+sleep(10);
-a = t.find( { instances : { pool : "prod1" } } );
-assert( a.length() == 1, "len1" );
-assert( a[0].name == "alleyinsider", "alley" );
+a = t.find({instances: {pool: "prod1"}});
+assert(a.length() == 1, "len1");
+assert(a[0].name == "alleyinsider", "alley");
-assert(t.validate().valid, "valid" );
+assert(t.validate().valid, "valid");
diff --git a/jstests/core/index5.js b/jstests/core/index5.js
index 841ac12ed45..11cfa1882e4 100644
--- a/jstests/core/index5.js
+++ b/jstests/core/index5.js
@@ -1,24 +1,24 @@
// index5.js - test reverse direction index
function validate() {
- assert.eq( 2, t.find().count() );
- f = t.find().sort( { a: 1 } );
- assert.eq( 2, t.count() );
- assert.eq( 1, f[ 0 ].a );
- assert.eq( 2, f[ 1 ].a );
- r = t.find().sort( { a: -1 } );
- assert.eq( 2, r.count() );
- assert.eq( 2, r[ 0 ].a );
- assert.eq( 1, r[ 1 ].a );
+ assert.eq(2, t.find().count());
+ f = t.find().sort({a: 1});
+ assert.eq(2, t.count());
+ assert.eq(1, f[0].a);
+ assert.eq(2, f[1].a);
+ r = t.find().sort({a: -1});
+ assert.eq(2, r.count());
+ assert.eq(2, r[0].a);
+ assert.eq(1, r[1].a);
}
t = db.index5;
t.drop();
-t.save( { a: 1 } );
-t.save( { a: 2 } );
+t.save({a: 1});
+t.save({a: 2});
validate();
-t.ensureIndex( { a: -1 } );
+t.ensureIndex({a: -1});
validate();
diff --git a/jstests/core/index6.js b/jstests/core/index6.js
index 8dbd8f74fcf..9adef9bf366 100644
--- a/jstests/core/index6.js
+++ b/jstests/core/index6.js
@@ -3,6 +3,6 @@
r = db.ed.db.index6;
r.drop();
-r.save( { comments : [ { name : "eliot", foo : 1 } ] } );
-r.ensureIndex( { "comments.name": 1 } );
-assert( r.findOne( { "comments.name": "eliot" } ) );
+r.save({comments: [{name: "eliot", foo: 1}]});
+r.ensureIndex({"comments.name": 1});
+assert(r.findOne({"comments.name": "eliot"}));
diff --git a/jstests/core/index8.js b/jstests/core/index8.js
index 7b41da5ce50..6773f2d29f5 100644
--- a/jstests/core/index8.js
+++ b/jstests/core/index8.js
@@ -3,60 +3,59 @@
t = db.jstests_index8;
t.drop();
-t.ensureIndex( { a: 1 } );
-t.ensureIndex( { b: 1 }, true );
-t.ensureIndex( { c: 1 }, [ false, "cIndex" ] );
+t.ensureIndex({a: 1});
+t.ensureIndex({b: 1}, true);
+t.ensureIndex({c: 1}, [false, "cIndex"]);
-checkIndexes = function( num ) {
+checkIndexes = function(num) {
var indexes = t.getIndexes();
- assert.eq( 4, indexes.length );
+ assert.eq(4, indexes.length);
var start = 0;
- if ( indexes[0].name == "_id_" )
+ if (indexes[0].name == "_id_")
start = 1;
- assert( !indexes[ start ].unique , "A" + num );
- assert( indexes[ start + 1 ].unique , "B" + num + " " + tojson( indexes[start+1] ) );
- assert( !indexes[ start + 2 ].unique , "C" + num );
- assert.eq( "cIndex", indexes[ start + 2 ].name , "D" + num );
+ assert(!indexes[start].unique, "A" + num);
+ assert(indexes[start + 1].unique, "B" + num + " " + tojson(indexes[start + 1]));
+ assert(!indexes[start + 2].unique, "C" + num);
+ assert.eq("cIndex", indexes[start + 2].name, "D" + num);
};
-checkIndexes( 1 );
+checkIndexes(1);
t.reIndex();
-checkIndexes( 2 );
+checkIndexes(2);
-t.save( { a: 2, b: 1 } );
-t.save( { a: 2 } );
-assert.eq( 2, t.find().count() );
+t.save({a: 2, b: 1});
+t.save({a: 2});
+assert.eq(2, t.find().count());
-t.save( { b: 4 } );
-t.save( { b: 4 } );
-assert.eq( 3, t.find().count() );
-assert.eq( 3, t.find().hint( {c:1} ).toArray().length );
-assert.eq( 3, t.find().hint( {b:1} ).toArray().length );
-assert.eq( 3, t.find().hint( {a:1} ).toArray().length );
+t.save({b: 4});
+t.save({b: 4});
+assert.eq(3, t.find().count());
+assert.eq(3, t.find().hint({c: 1}).toArray().length);
+assert.eq(3, t.find().hint({b: 1}).toArray().length);
+assert.eq(3, t.find().hint({a: 1}).toArray().length);
t.drop();
-t.ensureIndex( { a: 1, b: -1 }, true );
-t.save( { a: 2, b: 3 } );
-t.save( { a: 2, b: 3 } );
-t.save( { a: 2, b: 4 } );
-t.save( { a: 1, b: 3 } );
-assert.eq( 3, t.find().count() );
+t.ensureIndex({a: 1, b: -1}, true);
+t.save({a: 2, b: 3});
+t.save({a: 2, b: 3});
+t.save({a: 2, b: 4});
+t.save({a: 1, b: 3});
+assert.eq(3, t.find().count());
t.drop();
-t.ensureIndex( { a: 1 }, true );
-t.save( { a: [ 2, 3 ] } );
-t.save( { a: 2 } );
-assert.eq( 1, t.find().count() );
+t.ensureIndex({a: 1}, true);
+t.save({a: [2, 3]});
+t.save({a: 2});
+assert.eq(1, t.find().count());
t.drop();
-t.ensureIndex( { a: 1 }, true );
-t.save( { a: 2 } );
-t.save( { a: [ 1, 2, 3 ] } );
-t.save( { a: [ 3, 2, 1 ] } );
-assert.eq( 1, t.find().sort( { a: 1 } ).hint( { a: 1 } ).toArray().length );
-assert.eq( 1, t.find().sort( { a: -1 } ).hint( { a: 1 } ).toArray().length );
-
-assert.eq( t._indexSpec( { x : 1 } , true ) , t._indexSpec( { x : 1 } , [ true ] ) , "spec 1" );
-assert.eq( t._indexSpec( { x : 1 } , "eliot" ) , t._indexSpec( { x : 1 } , [ "eliot" ] ) , "spec 2" );
-
+t.ensureIndex({a: 1}, true);
+t.save({a: 2});
+t.save({a: [1, 2, 3]});
+t.save({a: [3, 2, 1]});
+assert.eq(1, t.find().sort({a: 1}).hint({a: 1}).toArray().length);
+assert.eq(1, t.find().sort({a: -1}).hint({a: 1}).toArray().length);
+
+assert.eq(t._indexSpec({x: 1}, true), t._indexSpec({x: 1}, [true]), "spec 1");
+assert.eq(t._indexSpec({x: 1}, "eliot"), t._indexSpec({x: 1}, ["eliot"]), "spec 2");
diff --git a/jstests/core/index9.js b/jstests/core/index9.js
index 8fee4a35ca0..5f31dc978aa 100644
--- a/jstests/core/index9.js
+++ b/jstests/core/index9.js
@@ -1,25 +1,25 @@
t = db.jstests_index9;
t.drop();
-db.createCollection( "jstests_index9" );
-assert.eq( 1, t.getIndexes().length, "There should be 1 index with default collection" );
+db.createCollection("jstests_index9");
+assert.eq(1, t.getIndexes().length, "There should be 1 index with default collection");
t.drop();
-db.createCollection( "jstests_index9", {autoIndexId: true} );
-assert.eq( 1, t.getIndexes().length, "There should be 1 index if autoIndexId: true" );
+db.createCollection("jstests_index9", {autoIndexId: true});
+assert.eq(1, t.getIndexes().length, "There should be 1 index if autoIndexId: true");
t.drop();
-db.createCollection( "jstests_index9", {autoIndexId:false} );
-assert.eq( 0, t.getIndexes().length, "There should be 0 index if autoIndexId: false" );
-t.createIndex( { _id:1 } );
-assert.eq( 1, t.getIndexes().length );
-t.createIndex( { _id:1 } );
-assert.eq( 1, t.getIndexes().length );
+db.createCollection("jstests_index9", {autoIndexId: false});
+assert.eq(0, t.getIndexes().length, "There should be 0 index if autoIndexId: false");
+t.createIndex({_id: 1});
+assert.eq(1, t.getIndexes().length);
+t.createIndex({_id: 1});
+assert.eq(1, t.getIndexes().length);
t.drop();
-t.createIndex( { _id:1 } );
-assert.eq( 1, t.getIndexes().length );
+t.createIndex({_id: 1});
+assert.eq(1, t.getIndexes().length);
t.drop();
-t.save( {a:1} );
-t.createIndex( { _id:1 } );
-assert.eq( 1, t.getIndexes().length );
+t.save({a: 1});
+t.createIndex({_id: 1});
+assert.eq(1, t.getIndexes().length);
diff --git a/jstests/core/indexOtherNamespace.js b/jstests/core/indexOtherNamespace.js
index a94cff5d51b..f5919f721e8 100644
--- a/jstests/core/indexOtherNamespace.js
+++ b/jstests/core/indexOtherNamespace.js
@@ -6,14 +6,14 @@ load("jstests/libs/analyze_plan.js");
var otherDB = db.getSiblingDB("indexOtherNS");
otherDB.dropDatabase();
-otherDB.foo.insert({a:1});
+otherDB.foo.insert({a: 1});
assert.eq(1, otherDB.foo.getIndexes().length);
-assert(isCollscan(otherDB.foo.find({a:1}).explain().queryPlanner.winningPlan));
+assert(isCollscan(otherDB.foo.find({a: 1}).explain().queryPlanner.winningPlan));
-assert.writeError(otherDB.randomNS.system.indexes.insert({ ns: "indexOtherNS.foo",
- key: { a: 1 }, name: "a_1"}));
+assert.writeError(
+ otherDB.randomNS.system.indexes.insert({ns: "indexOtherNS.foo", key: {a: 1}, name: "a_1"}));
// Assert that index didn't actually get built
assert.eq(1, otherDB.foo.getIndexes().length);
-assert(isCollscan(otherDB.foo.find({a:1}).explain().queryPlanner.winningPlan));
+assert(isCollscan(otherDB.foo.find({a: 1}).explain().queryPlanner.winningPlan));
otherDB.dropDatabase();
diff --git a/jstests/core/index_arr1.js b/jstests/core/index_arr1.js
index 0878e19aa22..ba821bd3730 100644
--- a/jstests/core/index_arr1.js
+++ b/jstests/core/index_arr1.js
@@ -1,17 +1,17 @@
t = db.index_arr1;
t.drop();
-t.insert( { _id : 1 , a : 5 , b : [ { x : 1 } ] } );
-t.insert( { _id : 2 , a : 5 , b : [] } );
-t.insert( { _id : 3 , a : 5 } );
+t.insert({_id: 1, a: 5, b: [{x: 1}]});
+t.insert({_id: 2, a: 5, b: []});
+t.insert({_id: 3, a: 5});
-assert.eq( 3 , t.find( { a : 5 } ).itcount() , "A1" );
+assert.eq(3, t.find({a: 5}).itcount(), "A1");
-t.ensureIndex( { a : 1 , "b.x" : 1 } );
+t.ensureIndex({a: 1, "b.x": 1});
-assert.eq( 3 , t.find( { a : 5 } ).itcount() , "A2" ); // SERVER-1082
+assert.eq(3, t.find({a: 5}).itcount(), "A2"); // SERVER-1082
-assert.eq( 2 , t.getIndexes().length , "B1" );
-t.insert( { _id : 4 , a : 5 , b : [] } );
-t.ensureIndex( { a : 1 , "b.a" : 1 , "b.c" : 1 } );
-assert.eq( 3 , t.getIndexes().length , "B2" );
+assert.eq(2, t.getIndexes().length, "B1");
+t.insert({_id: 4, a: 5, b: []});
+t.ensureIndex({a: 1, "b.a": 1, "b.c": 1});
+assert.eq(3, t.getIndexes().length, "B2");
diff --git a/jstests/core/index_arr2.js b/jstests/core/index_arr2.js
index 78c480719dc..952be73ff13 100644
--- a/jstests/core/index_arr2.js
+++ b/jstests/core/index_arr2.js
@@ -3,49 +3,45 @@ M = 5;
t = db.jstests_arr2;
-function test( withIndex ){
+function test(withIndex) {
t.drop();
-
+
// insert a bunch of items to force queries to use the index.
newObject = {
- _id : 1,
- a : [
- { b : { c : 1 } }
- ]
+ _id: 1,
+ a: [{b: {c: 1}}]
};
-
+
now = (new Date()).getTime() / 1000;
- for (created = now - NUM; created <= now; created++ ) {
+ for (created = now - NUM; created <= now; created++) {
newObject['created'] = created;
t.insert(newObject);
- newObject['_id'] ++;
+ newObject['_id']++;
}
-
+
// change the last M items.
query = {
- 'created' : { '$gte' : now - M }
+ 'created': {'$gte': now - M}
};
-
- Z = t.find( query ).count();
-
- if ( withIndex ){
- //t.ensureIndex( { 'a.b.c' : 1, 'created' : -1 } )
- //t.ensureIndex( { created : -1 } )
- t.ensureIndex( { 'a.b.c' : 1 } , { name : "x" } );
+
+ Z = t.find(query).count();
+
+ if (withIndex) {
+ // t.ensureIndex( { 'a.b.c' : 1, 'created' : -1 } )
+ // t.ensureIndex( { created : -1 } )
+ t.ensureIndex({'a.b.c': 1}, {name: "x"});
}
-
- var res = t.update(query, { '$set' : { "a.0.b.c" : 0 } } , false , true );
- assert.eq( Z, res.nMatched, "num updated withIndex:" + withIndex );
-
+
+ var res = t.update(query, {'$set': {"a.0.b.c": 0}}, false, true);
+ assert.eq(Z, res.nMatched, "num updated withIndex:" + withIndex);
+
// now see how many were actually updated.
query['a.b.c'] = 0;
-
+
count = t.count(query);
- assert.eq( Z , count , "count after withIndex:" + withIndex );
+ assert.eq(Z, count, "count after withIndex:" + withIndex);
}
-test( false );
-test( true );
-
-
+test(false);
+test(true);
diff --git a/jstests/core/index_big1.js b/jstests/core/index_big1.js
index eb4df5d5100..8f600fa37bc 100644
--- a/jstests/core/index_big1.js
+++ b/jstests/core/index_big1.js
@@ -7,32 +7,33 @@ t.drop();
var s = "";
-t.ensureIndex( { a : 1 , x : 1 } );
+t.ensureIndex({a: 1, x: 1});
var bulk = t.initializeUnorderedBulkOp();
-for ( i=0; i<N; i++ ) {
- bulk.insert( { a : i + .5 , x : s } );
+for (i = 0; i < N; i++) {
+ bulk.insert({a: i + .5, x: s});
s += "x";
}
-assert.throws( function() { bulk.execute(); } );
+assert.throws(function() {
+ bulk.execute();
+});
-assert.eq( 2 , t.getIndexes().length );
+assert.eq(2, t.getIndexes().length);
flip = -1;
-for ( i=0; i<N; i++ ) {
- var c = t.find( { a : i + .5 } ).count();
- if ( c == 1 ) {
- assert.eq( -1 , flip , "flipping : " + i );
- }
- else {
- if ( flip == -1 ) {
+for (i = 0; i < N; i++) {
+ var c = t.find({a: i + .5}).count();
+ if (c == 1) {
+ assert.eq(-1, flip, "flipping : " + i);
+ } else {
+ if (flip == -1) {
flip = i;
}
}
}
-//print(flip);
-//print(flip/1024);
+// print(flip);
+// print(flip/1024);
-assert.eq( /*v0 index : 797*/1002, flip , "flip changed" );
+assert.eq(/*v0 index : 797*/ 1002, flip, "flip changed");
diff --git a/jstests/core/index_bigkeys.js b/jstests/core/index_bigkeys.js
index 4b692ce1b19..564ddde2a5e 100755..100644
--- a/jstests/core/index_bigkeys.js
+++ b/jstests/core/index_bigkeys.js
@@ -5,20 +5,19 @@ var keys = [];
var str = "aaaabbbbccccddddeeeeffffgggghhhh";
-while ( str.length < 20000 ) {
- keys.push( str );
+while (str.length < 20000) {
+ keys.push(str);
str = str + str;
}
-function doInsert( order ) {
+function doInsert(order) {
if (order == 1) {
for (var i = 0; i < 10; i++) {
- t.insert({ _id: i, k: keys[i] });
+ t.insert({_id: i, k: keys[i]});
}
- }
- else {
+ } else {
for (var i = 9; i >= 0; i--) {
- t.insert({ _id: i, k: keys[i] });
+ t.insert({_id: i, k: keys[i]});
}
}
}
@@ -27,33 +26,33 @@ var expect = null;
function check() {
assert(t.validate().valid);
- assert.eq( 5, t.count() );
+ assert.eq(5, t.count());
- var c = t.find({ k: /^a/ }).count();
- assert.eq( 5, c );
+ var c = t.find({k: /^a/}).count();
+ assert.eq(5, c);
}
-function runTest( order ) {
+function runTest(order) {
t.drop();
- t.ensureIndex({ k: 1 });
- doInsert( order );
- check(); // check incremental addition
+ t.ensureIndex({k: 1});
+ doInsert(order);
+ check(); // check incremental addition
t.reIndex();
- check(); // check bottom up
+ check(); // check bottom up
t.drop();
- doInsert( order );
- assert.eq( 1, t.getIndexes().length );
- t.ensureIndex({ k: 1 });
- assert.eq( 1, t.getIndexes().length );
+ doInsert(order);
+ assert.eq(1, t.getIndexes().length);
+ t.ensureIndex({k: 1});
+ assert.eq(1, t.getIndexes().length);
t.drop();
- doInsert( order );
- assert.eq( 1, t.getIndexes().length );
- t.ensureIndex({ k: 1 }, { background: true });
- assert.eq( 1, t.getIndexes().length );
+ doInsert(order);
+ assert.eq(1, t.getIndexes().length);
+ t.ensureIndex({k: 1}, {background: true});
+ assert.eq(1, t.getIndexes().length);
}
-runTest( 1 );
-runTest( 2 );
+runTest(1);
+runTest(2);
diff --git a/jstests/core/index_bigkeys_nofail.js b/jstests/core/index_bigkeys_nofail.js
index 417470d7f04..10d8a0791c0 100644
--- a/jstests/core/index_bigkeys_nofail.js
+++ b/jstests/core/index_bigkeys_nofail.js
@@ -2,50 +2,50 @@
(function() {
"use strict";
- var t=db.index_bigkeys_nofail;
+ var t = db.index_bigkeys_nofail;
t.drop();
- var res=db.getSiblingDB('admin').runCommand( { setParameter: 1, failIndexKeyTooLong: true } );
- var was=res.was;
+ var res = db.getSiblingDB('admin').runCommand({setParameter: 1, failIndexKeyTooLong: true});
+ var was = res.was;
assert.commandWorked(res);
var x = new Array(1025).join('x');
- assert.commandWorked(t.ensureIndex({name:1}));
- assert.writeError(t.insert({name:x}));
- assert.commandWorked(t.dropIndex({name:1}));
- assert.writeOK(t.insert({name:x}));
- assert.commandFailed(t.ensureIndex({name:1}));
+ assert.commandWorked(t.ensureIndex({name: 1}));
+ assert.writeError(t.insert({name: x}));
+ assert.commandWorked(t.dropIndex({name: 1}));
+ assert.writeOK(t.insert({name: x}));
+ assert.commandFailed(t.ensureIndex({name: 1}));
t.drop();
- db.getSiblingDB('admin').runCommand( { setParameter: 1, failIndexKeyTooLong: false } );
+ db.getSiblingDB('admin').runCommand({setParameter: 1, failIndexKeyTooLong: false});
// inserts
- assert.writeOK(t.insert({_id: 1, name:x}));
- assert.commandWorked(t.ensureIndex({name:1}));
- assert.writeOK(t.insert({_id: 2, name:x}));
- assert.writeOK(t.insert({_id: 3, name:x}));
+ assert.writeOK(t.insert({_id: 1, name: x}));
+ assert.commandWorked(t.ensureIndex({name: 1}));
+ assert.writeOK(t.insert({_id: 2, name: x}));
+ assert.writeOK(t.insert({_id: 3, name: x}));
assert.eq(t.count(), 3);
// updates (smaller and larger)
- assert.writeOK(t.update({_id: 1}, {$set:{name:'short'}}));
- assert.writeOK(t.update({_id: 1}, {$set:{name: x}}));
- assert.writeOK(t.update({_id: 1}, {$set:{name: x + 'even longer'}}));
+ assert.writeOK(t.update({_id: 1}, {$set: {name: 'short'}}));
+ assert.writeOK(t.update({_id: 1}, {$set: {name: x}}));
+ assert.writeOK(t.update({_id: 1}, {$set: {name: x + 'even longer'}}));
// remove
assert.writeOK(t.remove({_id: 1}));
assert.eq(t.count(), 2);
- db.getSiblingDB('admin').runCommand( { setParameter: 1, failIndexKeyTooLong: true } );
+ db.getSiblingDB('admin').runCommand({setParameter: 1, failIndexKeyTooLong: true});
// can still delete even if key is oversized
assert.writeOK(t.remove({_id: 2}));
assert.eq(t.count(), 1);
// can still update to shorter, but not longer name.
- assert.writeError(t.update({_id: 3}, {$set:{name: x + 'even longer'}}));
- assert.writeOK(t.update({_id: 3}, {$set:{name:'short'}}));
- assert.writeError(t.update({_id: 3}, {$set:{name: x}}));
+ assert.writeError(t.update({_id: 3}, {$set: {name: x + 'even longer'}}));
+ assert.writeOK(t.update({_id: 3}, {$set: {name: 'short'}}));
+ assert.writeError(t.update({_id: 3}, {$set: {name: x}}));
- db.getSiblingDB('admin').runCommand( { setParameter: 1, failIndexKeyTooLong: was } );
+ db.getSiblingDB('admin').runCommand({setParameter: 1, failIndexKeyTooLong: was});
// Explicitly drop the collection to avoid failures in post-test hooks that run dbHash and
// validate commands.
diff --git a/jstests/core/index_bigkeys_update.js b/jstests/core/index_bigkeys_update.js
index 6bdaf033542..a3074bfdfdd 100644
--- a/jstests/core/index_bigkeys_update.js
+++ b/jstests/core/index_bigkeys_update.js
@@ -1,18 +1,18 @@
bigString = "";
-while ( bigString.length < 16000 )
+while (bigString.length < 16000)
bigString += ".";
t = db.index_bigkeys_update;
t.drop();
-t.insert( { _id : 0, x : "asd" } );
-t.ensureIndex( { x : 1 } );
+t.insert({_id: 0, x: "asd"});
+t.ensureIndex({x: 1});
-assert.eq( 1, t.count() );
+assert.eq(1, t.count());
-assert.writeError(t.update( {} , { $set : { x : bigString } } ));
+assert.writeError(t.update({}, {$set: {x: bigString}}));
-assert.eq( 1, t.count() );
-assert.eq( "asd", t.findOne().x ); // make sure doc is the old version
-assert.eq( "asd", t.findOne( { _id : 0 } ).x ); // make sure doc is the old version
+assert.eq(1, t.count());
+assert.eq("asd", t.findOne().x); // make sure doc is the old version
+assert.eq("asd", t.findOne({_id: 0}).x); // make sure doc is the old version
diff --git a/jstests/core/index_bigkeys_validation.js b/jstests/core/index_bigkeys_validation.js
index ef29b07ecc7..98c80aa081a 100644
--- a/jstests/core/index_bigkeys_validation.js
+++ b/jstests/core/index_bigkeys_validation.js
@@ -6,7 +6,7 @@
var coll = db.longindex;
coll.drop();
- var longVal = new Array(1025).join('x'); // Keys >= 1024 bytes cannot be indexed.
+ var longVal = new Array(1025).join('x'); // Keys >= 1024 bytes cannot be indexed.
assert.commandWorked(db.adminCommand({setParameter: 1, failIndexKeyTooLong: false}));
diff --git a/jstests/core/index_check2.js b/jstests/core/index_check2.js
index 8ebd13c850c..f8590229d39 100644
--- a/jstests/core/index_check2.js
+++ b/jstests/core/index_check2.js
@@ -5,38 +5,44 @@ t.drop();
// Include helpers for analyzing explain output.
load("jstests/libs/analyze_plan.js");
-for ( var i=0; i<1000; i++ ){
+for (var i = 0; i < 1000; i++) {
var a = [];
- for ( var j=1; j<5; j++ ){
- a.push( "tag" + ( i * j % 50 ));
+ for (var j = 1; j < 5; j++) {
+ a.push("tag" + (i * j % 50));
}
- t.save( { num : i , tags : a } );
+ t.save({num: i, tags: a});
}
-q1 = { tags : "tag6" };
-q2 = { tags : "tag12" };
-q3 = { tags : { $all : [ "tag6" , "tag12" ] } };
+q1 = {
+ tags: "tag6"
+};
+q2 = {
+ tags: "tag12"
+};
+q3 = {
+ tags: {$all: ["tag6", "tag12"]}
+};
-assert.eq( 120 , t.find( q1 ).itcount() , "q1 a");
-assert.eq( 120 , t.find( q2 ).itcount() , "q2 a" );
-assert.eq( 60 , t.find( q3 ).itcount() , "q3 a");
+assert.eq(120, t.find(q1).itcount(), "q1 a");
+assert.eq(120, t.find(q2).itcount(), "q2 a");
+assert.eq(60, t.find(q3).itcount(), "q3 a");
-t.ensureIndex( { tags : 1 } );
+t.ensureIndex({tags: 1});
-assert.eq( 120 , t.find( q1 ).itcount() , "q1 a");
-assert.eq( 120 , t.find( q2 ).itcount() , "q2 a" );
-assert.eq( 60 , t.find( q3 ).itcount() , "q3 a");
+assert.eq(120, t.find(q1).itcount(), "q1 a");
+assert.eq(120, t.find(q2).itcount(), "q2 a");
+assert.eq(60, t.find(q3).itcount(), "q3 a");
// We expect these queries to use index scans over { tags: 1 }.
-assert( isIxscan(t.find(q1).explain().queryPlanner.winningPlan) , "e1" );
-assert( isIxscan(t.find(q2).explain().queryPlanner.winningPlan) , "e2" );
-assert( isIxscan(t.find(q3).explain().queryPlanner.winningPlan) , "e3" );
+assert(isIxscan(t.find(q1).explain().queryPlanner.winningPlan), "e1");
+assert(isIxscan(t.find(q2).explain().queryPlanner.winningPlan), "e2");
+assert(isIxscan(t.find(q3).explain().queryPlanner.winningPlan), "e3");
scanned1 = t.find(q1).explain("executionStats").executionStats.totalKeysExamined;
scanned2 = t.find(q2).explain("executionStats").executionStats.totalKeysExamined;
scanned3 = t.find(q3).explain("executionStats").executionStats.totalKeysExamined;
-//print( "scanned1: " + scanned1 + " scanned2: " + scanned2 + " scanned3: " + scanned3 );
+// print( "scanned1: " + scanned1 + " scanned2: " + scanned2 + " scanned3: " + scanned3 );
// $all should just iterate either of the words
-assert( scanned3 <= Math.max( scanned1 , scanned2 ) , "$all makes query optimizer not work well" );
+assert(scanned3 <= Math.max(scanned1, scanned2), "$all makes query optimizer not work well");
diff --git a/jstests/core/index_check3.js b/jstests/core/index_check3.js
index 78135ff30ca..2c07ae6d50a 100644
--- a/jstests/core/index_check3.js
+++ b/jstests/core/index_check3.js
@@ -3,63 +3,62 @@
t = db.index_check3;
t.drop();
+t.save({a: 1});
+t.save({a: 2});
+t.save({a: 3});
+t.save({a: "z"});
+assert.eq(1, t.find({a: {$lt: 2}}).itcount(), "A");
+assert.eq(1, t.find({a: {$gt: 2}}).itcount(), "B");
-t.save( { a : 1 } );
-t.save( { a : 2 } );
-t.save( { a : 3 } );
-t.save( { a : "z" } );
+t.ensureIndex({a: 1});
-assert.eq( 1 , t.find( { a : { $lt : 2 } } ).itcount() , "A" );
-assert.eq( 1 , t.find( { a : { $gt : 2 } } ).itcount() , "B" );
-
-t.ensureIndex( { a : 1 } );
-
-assert.eq( 1 , t.find( { a : { $lt : 2 } } ).itcount() , "C" );
-assert.eq( 1 , t.find( { a : { $gt : 2 } } ).itcount() , "D" );
+assert.eq(1, t.find({a: {$lt: 2}}).itcount(), "C");
+assert.eq(1, t.find({a: {$gt: 2}}).itcount(), "D");
t.drop();
-for ( var i=0; i<100; i++ ){
- var o = { i : i };
- if ( i % 2 == 0 )
+for (var i = 0; i < 100; i++) {
+ var o = {
+ i: i
+ };
+ if (i % 2 == 0)
o.foo = i;
- t.save( o );
+ t.save(o);
}
-t.ensureIndex( { foo : 1 } );
-
-var explain = t.find( { foo : { $lt : 50 } } ).explain("executionStats");
-assert.gt( 30 , explain.executionStats.totalKeysExamined , "lt" );
-var explain = t.find( { foo : { $gt : 50 } } ).explain("executionStats");
-assert.gt( 30 , explain.executionStats.totalKeysExamined , "gt" );
+t.ensureIndex({foo: 1});
+var explain = t.find({foo: {$lt: 50}}).explain("executionStats");
+assert.gt(30, explain.executionStats.totalKeysExamined, "lt");
+var explain = t.find({foo: {$gt: 50}}).explain("executionStats");
+assert.gt(30, explain.executionStats.totalKeysExamined, "gt");
t.drop();
-t.save( {i:'a'} );
-for( var i=0; i < 10; ++i ) {
- t.save( {} );
+t.save({i: 'a'});
+for (var i = 0; i < 10; ++i) {
+ t.save({});
}
-t.ensureIndex( { i : 1 } );
+t.ensureIndex({i: 1});
-var explain = t.find( { i : { $lte : 'a' } } ).explain("executionStats");
-assert.gt( 3 , explain.executionStats.totalKeysExamined , "lte" );
-//printjson( t.find( { i : { $gte : 'a' } } ).explain() );
+var explain = t.find({i: {$lte: 'a'}}).explain("executionStats");
+assert.gt(3, explain.executionStats.totalKeysExamined, "lte");
+// printjson( t.find( { i : { $gte : 'a' } } ).explain() );
// bug SERVER-99
-var explain = t.find( { i : { $gte : 'a' } } ).explain("executionStats");
-assert.gt( 3 , explain.executionStats.totalKeysExamined , "gte" );
-assert.eq( 1 , t.find( { i : { $gte : 'a' } } ).count() , "gte a" );
-assert.eq( 1 , t.find( { i : { $gte : 'a' } } ).itcount() , "gte b" );
-assert.eq( 1 , t.find( { i : { $gte : 'a' } } ).sort( { i : 1 } ).count() , "gte c" );
-assert.eq( 1 , t.find( { i : { $gte : 'a' } } ).sort( { i : 1 } ).itcount() , "gte d" );
-
-t.save( { i : "b" } );
-
-var explain = t.find( { i : { $gte : 'a' } } ).explain("executionStats");
-assert.gt( 3 , explain.executionStats.totalKeysExamined , "gte" );
-assert.eq( 2 , t.find( { i : { $gte : 'a' } } ).count() , "gte a2" );
-assert.eq( 2 , t.find( { i : { $gte : 'a' } } ).itcount() , "gte b2" );
-assert.eq( 2 , t.find( { i : { $gte : 'a' , $lt : MaxKey } } ).itcount() , "gte c2" );
-assert.eq( 2 , t.find( { i : { $gte : 'a' , $lt : MaxKey } } ).sort( { i : -1 } ).itcount() , "gte d2" );
-assert.eq( 2 , t.find( { i : { $gte : 'a' , $lt : MaxKey } } ).sort( { i : 1 } ).itcount() , "gte e2" );
+var explain = t.find({i: {$gte: 'a'}}).explain("executionStats");
+assert.gt(3, explain.executionStats.totalKeysExamined, "gte");
+assert.eq(1, t.find({i: {$gte: 'a'}}).count(), "gte a");
+assert.eq(1, t.find({i: {$gte: 'a'}}).itcount(), "gte b");
+assert.eq(1, t.find({i: {$gte: 'a'}}).sort({i: 1}).count(), "gte c");
+assert.eq(1, t.find({i: {$gte: 'a'}}).sort({i: 1}).itcount(), "gte d");
+
+t.save({i: "b"});
+
+var explain = t.find({i: {$gte: 'a'}}).explain("executionStats");
+assert.gt(3, explain.executionStats.totalKeysExamined, "gte");
+assert.eq(2, t.find({i: {$gte: 'a'}}).count(), "gte a2");
+assert.eq(2, t.find({i: {$gte: 'a'}}).itcount(), "gte b2");
+assert.eq(2, t.find({i: {$gte: 'a', $lt: MaxKey}}).itcount(), "gte c2");
+assert.eq(2, t.find({i: {$gte: 'a', $lt: MaxKey}}).sort({i: -1}).itcount(), "gte d2");
+assert.eq(2, t.find({i: {$gte: 'a', $lt: MaxKey}}).sort({i: 1}).itcount(), "gte e2");
diff --git a/jstests/core/index_check5.js b/jstests/core/index_check5.js
index f6b48448f63..2a3e73f9e8d 100644
--- a/jstests/core/index_check5.js
+++ b/jstests/core/index_check5.js
@@ -2,16 +2,17 @@
t = db.index_check5;
t.drop();
-t.save( { "name" : "Player1" ,
- "scores" : [{"level" : 1 , "score" : 100},
- {"level" : 2 , "score" : 50}],
- "total" : 150 } );
-t.save( { "name" : "Player2" ,
- "total" : 90 ,
- "scores" : [ {"level" : 1 , "score" : 90},
- {"level" : 2 , "score" : 0} ]
- } );
+t.save({
+ "name": "Player1",
+ "scores": [{"level": 1, "score": 100}, {"level": 2, "score": 50}],
+ "total": 150
+});
+t.save({
+ "name": "Player2",
+ "total": 90,
+ "scores": [{"level": 1, "score": 90}, {"level": 2, "score": 0}]
+});
-assert.eq( 2 , t.find( { "scores.level": 2, "scores.score": {$gt:30} } ).itcount() , "A" );
-t.ensureIndex( { "scores.level" : 1 , "scores.score" : 1 } );
-assert.eq( 2 , t.find( { "scores.level": 2, "scores.score": {$gt:30} } ).itcount() , "B" );
+assert.eq(2, t.find({"scores.level": 2, "scores.score": {$gt: 30}}).itcount(), "A");
+t.ensureIndex({"scores.level": 1, "scores.score": 1});
+assert.eq(2, t.find({"scores.level": 2, "scores.score": {$gt: 30}}).itcount(), "B");
diff --git a/jstests/core/index_check6.js b/jstests/core/index_check6.js
index a3b0e51ded2..4baeced8fb9 100644
--- a/jstests/core/index_check6.js
+++ b/jstests/core/index_check6.js
@@ -7,89 +7,98 @@ function keysExamined(query, hint) {
return explain.executionStats.totalKeysExamined;
}
-t.ensureIndex( { age : 1 , rating : 1 } );
+t.ensureIndex({age: 1, rating: 1});
-for ( var age=10; age<50; age++ ){
- for ( var rating=0; rating<10; rating++ ){
- t.save( { age : age , rating : rating } );
+for (var age = 10; age < 50; age++) {
+ for (var rating = 0; rating < 10; rating++) {
+ t.save({age: age, rating: rating});
}
}
-assert.eq( 10 , keysExamined( { age : 30 }, {} ) , "A" );
-assert.eq( 20 , keysExamined( { age : { $gte : 29 , $lte : 30 } }, {} ) , "B" );
-assert.eq( 19 , keysExamined( { age : { $gte : 25 , $lte : 30 }, rating: {$in: [0,9] } },
- {age:1,rating:1} ) , "C1" );
-assert.eq( 24 , keysExamined( { age : { $gte : 25 , $lte : 30 }, rating: {$in: [0,8] } },
- {age:1,rating:1} ) , "C2" );
-assert.eq( 29 , keysExamined( { age : { $gte : 25 , $lte : 30 }, rating: {$in: [1,8] } },
- {age:1,rating:1} ) , "C3" );
+assert.eq(10, keysExamined({age: 30}, {}), "A");
+assert.eq(20, keysExamined({age: {$gte: 29, $lte: 30}}, {}), "B");
+assert.eq(19,
+ keysExamined({age: {$gte: 25, $lte: 30}, rating: {$in: [0, 9]}}, {age: 1, rating: 1}),
+ "C1");
+assert.eq(24,
+ keysExamined({age: {$gte: 25, $lte: 30}, rating: {$in: [0, 8]}}, {age: 1, rating: 1}),
+ "C2");
+assert.eq(29,
+ keysExamined({age: {$gte: 25, $lte: 30}, rating: {$in: [1, 8]}}, {age: 1, rating: 1}),
+ "C3");
-assert.eq( 5 , keysExamined( { age : { $gte : 29 , $lte : 30 } , rating : 5 },
- {age:1,rating:1} ) , "C" ); // SERVER-371
-assert.eq( 7 , keysExamined( { age : { $gte : 29 , $lte : 30 } , rating : { $gte : 4 , $lte : 5 } },
- {age:1,rating:1} ) , "D" ); // SERVER-371
+assert.eq(5,
+ keysExamined({age: {$gte: 29, $lte: 30}, rating: 5}, {age: 1, rating: 1}),
+ "C"); // SERVER-371
+assert.eq(7,
+ keysExamined({age: {$gte: 29, $lte: 30}, rating: {$gte: 4, $lte: 5}},
+ {age: 1, rating: 1}),
+ "D"); // SERVER-371
-assert.eq.automsg( "2", "t.find( { age:30, rating:{ $gte:4, $lte:5} } )" +
- ".explain('executionStats')" +
- ".executionStats.totalKeysExamined" );
+assert.eq.automsg("2",
+ "t.find( { age:30, rating:{ $gte:4, $lte:5} } )" + ".explain('executionStats')" +
+ ".executionStats.totalKeysExamined");
t.drop();
-for ( var a=1; a<10; a++ ){
- for ( var b=0; b<10; b++ ){
- for ( var c=0; c<10; c++ ) {
- t.save( { a:a, b:b, c:c } );
+for (var a = 1; a < 10; a++) {
+ for (var b = 0; b < 10; b++) {
+ for (var c = 0; c < 10; c++) {
+ t.save({a: a, b: b, c: c});
}
}
}
-function doQuery( count, query, sort, index ) {
- var explain = t.find( query ).hint( index ).sort( sort ).explain("executionStats");
+function doQuery(count, query, sort, index) {
+ var explain = t.find(query).hint(index).sort(sort).explain("executionStats");
var nscanned = explain.executionStats.totalKeysExamined;
assert(Math.abs(count - nscanned) <= 2);
}
-function doTest( sort, index ) {
- doQuery( 1, { a:5, b:5, c:5 }, sort, index );
- doQuery( 2, { a:5, b:5, c:{$gte:5,$lte:6} }, sort, index );
- doQuery( 1, { a:5, b:5, c:{$gte:5.5,$lte:6} }, sort, index );
- doQuery( 1, { a:5, b:5, c:{$gte:5,$lte:5.5} }, sort, index );
- doQuery( 3, { a:5, b:5, c:{$gte:5,$lte:7} }, sort, index );
- doQuery( 4, { a:5, b:{$gte:5,$lte:6}, c:5 }, sort, index );
- if ( sort.b > 0 ) {
- doQuery( 3, { a:5, b:{$gte:5.5,$lte:6}, c:5 }, sort, index );
- doQuery( 3, { a:5, b:{$gte:5,$lte:5.5}, c:5 }, sort, index );
+function doTest(sort, index) {
+ doQuery(1, {a: 5, b: 5, c: 5}, sort, index);
+ doQuery(2, {a: 5, b: 5, c: {$gte: 5, $lte: 6}}, sort, index);
+ doQuery(1, {a: 5, b: 5, c: {$gte: 5.5, $lte: 6}}, sort, index);
+ doQuery(1, {a: 5, b: 5, c: {$gte: 5, $lte: 5.5}}, sort, index);
+ doQuery(3, {a: 5, b: 5, c: {$gte: 5, $lte: 7}}, sort, index);
+ doQuery(4, {a: 5, b: {$gte: 5, $lte: 6}, c: 5}, sort, index);
+ if (sort.b > 0) {
+ doQuery(3, {a: 5, b: {$gte: 5.5, $lte: 6}, c: 5}, sort, index);
+ doQuery(3, {a: 5, b: {$gte: 5, $lte: 5.5}, c: 5}, sort, index);
} else {
- doQuery( 3, { a:5, b:{$gte:5.5,$lte:6}, c:5 }, sort, index );
- doQuery( 3, { a:5, b:{$gte:5,$lte:5.5}, c:5 }, sort, index );
+ doQuery(3, {a: 5, b: {$gte: 5.5, $lte: 6}, c: 5}, sort, index);
+ doQuery(3, {a: 5, b: {$gte: 5, $lte: 5.5}, c: 5}, sort, index);
}
- doQuery( 8, { a:5, b:{$gte:5,$lte:7}, c:5 }, sort, index );
- doQuery( 5, { a:{$gte:5,$lte:6}, b:5, c:5 }, sort, index );
- if ( sort.a > 0 ) {
- doQuery( 3, { a:{$gte:5.5,$lte:6}, b:5, c:5 }, sort, index );
- doQuery( 3, { a:{$gte:5,$lte:5.5}, b:5, c:5 }, sort, index );
- doQuery( 3, { a:{$gte:5.5,$lte:6}, b:5, c:{$gte:5,$lte:6} }, sort, index );
+ doQuery(8, {a: 5, b: {$gte: 5, $lte: 7}, c: 5}, sort, index);
+ doQuery(5, {a: {$gte: 5, $lte: 6}, b: 5, c: 5}, sort, index);
+ if (sort.a > 0) {
+ doQuery(3, {a: {$gte: 5.5, $lte: 6}, b: 5, c: 5}, sort, index);
+ doQuery(3, {a: {$gte: 5, $lte: 5.5}, b: 5, c: 5}, sort, index);
+ doQuery(3, {a: {$gte: 5.5, $lte: 6}, b: 5, c: {$gte: 5, $lte: 6}}, sort, index);
} else {
- doQuery( 3, { a:{$gte:5.5,$lte:6}, b:5, c:5 }, sort, index );
- doQuery( 3, { a:{$gte:5,$lte:5.5}, b:5, c:5 }, sort, index );
- doQuery( 4, { a:{$gte:5.5,$lte:6}, b:5, c:{$gte:5,$lte:6} }, sort, index );
+ doQuery(3, {a: {$gte: 5.5, $lte: 6}, b: 5, c: 5}, sort, index);
+ doQuery(3, {a: {$gte: 5, $lte: 5.5}, b: 5, c: 5}, sort, index);
+ doQuery(4, {a: {$gte: 5.5, $lte: 6}, b: 5, c: {$gte: 5, $lte: 6}}, sort, index);
}
- doQuery( 8, { a:{$gte:5,$lte:7}, b:5, c:5 }, sort, index );
- doQuery( 7, { a:{$gte:5,$lte:6}, b:5, c:{$gte:5,$lte:6} }, sort, index );
- doQuery( 7, { a:5, b:{$gte:5,$lte:6}, c:{$gte:5,$lte:6} }, sort, index );
- doQuery( 11, { a:{$gte:5,$lte:6}, b:{$gte:5,$lte:6}, c:5 }, sort, index );
- doQuery( 15, { a:{$gte:5,$lte:6}, b:{$gte:5,$lte:6}, c:{$gte:5,$lte:6} }, sort, index );
+ doQuery(8, {a: {$gte: 5, $lte: 7}, b: 5, c: 5}, sort, index);
+ doQuery(7, {a: {$gte: 5, $lte: 6}, b: 5, c: {$gte: 5, $lte: 6}}, sort, index);
+ doQuery(7, {a: 5, b: {$gte: 5, $lte: 6}, c: {$gte: 5, $lte: 6}}, sort, index);
+ doQuery(11, {a: {$gte: 5, $lte: 6}, b: {$gte: 5, $lte: 6}, c: 5}, sort, index);
+ doQuery(15, {a: {$gte: 5, $lte: 6}, b: {$gte: 5, $lte: 6}, c: {$gte: 5, $lte: 6}}, sort, index);
}
-for ( var a = -1; a <= 1; a += 2 ) {
- for( var b = -1; b <= 1; b += 2 ) {
- for( var c = -1; c <= 1; c += 2 ) {
+for (var a = -1; a <= 1; a += 2) {
+ for (var b = -1; b <= 1; b += 2) {
+ for (var c = -1; c <= 1; c += 2) {
t.dropIndexes();
- var spec = {a:a,b:b,c:c};
- t.ensureIndex( spec );
- doTest( spec, spec );
- doTest( {a:-a,b:-b,c:-c}, spec );
+ var spec = {
+ a: a,
+ b: b,
+ c: c
+ };
+ t.ensureIndex(spec);
+ doTest(spec, spec);
+ doTest({a: -a, b: -b, c: -c}, spec);
}
}
}
-
diff --git a/jstests/core/index_check7.js b/jstests/core/index_check7.js
index f8020d76143..fda248db467 100644
--- a/jstests/core/index_check7.js
+++ b/jstests/core/index_check7.js
@@ -2,14 +2,13 @@
t = db.index_check7;
t.drop();
-for ( var i=0; i<100; i++ )
- t.save( { x : i } );
+for (var i = 0; i < 100; i++)
+ t.save({x: i});
-t.ensureIndex( { x : 1 } );
-assert.eq( 1 , t.find( { x : 27 } ).explain(true).executionStats.totalKeysExamined , "A" );
+t.ensureIndex({x: 1});
+assert.eq(1, t.find({x: 27}).explain(true).executionStats.totalKeysExamined, "A");
-t.ensureIndex( { x : -1 } );
-assert.eq( 1 , t.find( { x : 27 } ).explain(true).executionStats.totalKeysExamined , "B" );
+t.ensureIndex({x: -1});
+assert.eq(1, t.find({x: 27}).explain(true).executionStats.totalKeysExamined, "B");
-assert.eq( 40 , t.find( { x : { $gt : 59 } } ).explain(true)
- .executionStats.totalKeysExamined , "C" );
+assert.eq(40, t.find({x: {$gt: 59}}).explain(true).executionStats.totalKeysExamined, "C");
diff --git a/jstests/core/index_create_too_many.js b/jstests/core/index_create_too_many.js
index add81a86703..44d5016a7cf 100644
--- a/jstests/core/index_create_too_many.js
+++ b/jstests/core/index_create_too_many.js
@@ -6,7 +6,10 @@ coll.drop();
// create 62 indexes, which leaves us with 63 indexes total (+1 for the _id index)
for (var i = 0; i < 62; i++) {
var name = 'i' + i;
- var spec = {key: {}, name: name};
+ var spec = {
+ key: {},
+ name: name
+ };
spec.key[name] = 1;
var res = coll.runCommand('createIndexes', {indexes: [spec]});
@@ -14,12 +17,8 @@ for (var i = 0; i < 62; i++) {
}
// attempt to add 2 more indexes to push over the limit (64).
-var newSpecs = [
- {key: {i62: 1 }, name: 'i62'},
- {key: {i63: 1 }, name: 'i63'}
-];
+var newSpecs = [{key: {i62: 1}, name: 'i62'}, {key: {i63: 1}, name: 'i63'}];
var res = coll.runCommand('createIndexes', {indexes: newSpecs});
assert.commandFailed(res, tojson(res));
-assert.eq(res.code, 67); // CannotCreateIndex
-
+assert.eq(res.code, 67); // CannotCreateIndex
diff --git a/jstests/core/index_create_with_nul_in_name.js b/jstests/core/index_create_with_nul_in_name.js
index 3c84460ddc4..9134649c086 100644
--- a/jstests/core/index_create_with_nul_in_name.js
+++ b/jstests/core/index_create_with_nul_in_name.js
@@ -6,9 +6,13 @@
var coll = db.create_index_with_nul_in_name;
coll.drop();
- var idx = {key: {'a': 1}, name: 'foo\0bar', ns: coll.getFullName()};
+ var idx = {
+ key: {'a': 1},
+ name: 'foo\0bar',
+ ns: coll.getFullName()
+ };
var res = coll.runCommand('createIndexes', {indexes: [idx]});
assert.commandFailed(res, tojson(res));
- assert.eq(res.code, 67); // CannotCreateIndex
+ assert.eq(res.code, 67); // CannotCreateIndex
}());
diff --git a/jstests/core/index_diag.js b/jstests/core/index_diag.js
index edb86e841e1..3e25bf2a1eb 100644
--- a/jstests/core/index_diag.js
+++ b/jstests/core/index_diag.js
@@ -2,43 +2,46 @@
t = db.index_diag;
t.drop();
-t.ensureIndex( { x : 1 } );
+t.ensureIndex({x: 1});
all = [];
ids = [];
xs = [];
-function r( a ){
+function r(a) {
var n = [];
- for ( var x=a.length-1; x>=0; x-- )
- n.push( a[x] );
+ for (var x = a.length - 1; x >= 0; x--)
+ n.push(a[x]);
return n;
}
-for ( i=1; i<4; i++ ){
- o = { _id : i , x : -i };
- t.insert( o );
- all.push( o );
- ids.push( { _id : i } );
- xs.push( { x : -i } );
+for (i = 1; i < 4; i++) {
+ o = {
+ _id: i,
+ x: -i
+ };
+ t.insert(o);
+ all.push(o);
+ ids.push({_id: i});
+ xs.push({x: -i});
}
-assert.eq( all , t.find().sort( { _id : 1 } ).toArray() , "A1" );
-assert.eq( r( all ) , t.find().sort( { _id : -1 } ).toArray() , "A2" );
+assert.eq(all, t.find().sort({_id: 1}).toArray(), "A1");
+assert.eq(r(all), t.find().sort({_id: -1}).toArray(), "A2");
-assert.eq( all , t.find().sort( { x : -1 } ).toArray() , "A3" );
-assert.eq( r( all ) , t.find().sort( { x : 1 } ).toArray() , "A4" );
+assert.eq(all, t.find().sort({x: -1}).toArray(), "A3");
+assert.eq(r(all), t.find().sort({x: 1}).toArray(), "A4");
-assert.eq( ids , t.find().sort( { _id : 1 } ).returnKey().toArray() , "B1" );
-assert.eq( r( ids ) , t.find().sort( { _id : -1 } ).returnKey().toArray() , "B2" );
-assert.eq( xs , t.find().sort( { x : -1 } ).returnKey().toArray() , "B3" );
-assert.eq( r( xs ) , t.find().sort( { x : 1 } ).returnKey().toArray() , "B4" );
+assert.eq(ids, t.find().sort({_id: 1}).returnKey().toArray(), "B1");
+assert.eq(r(ids), t.find().sort({_id: -1}).returnKey().toArray(), "B2");
+assert.eq(xs, t.find().sort({x: -1}).returnKey().toArray(), "B3");
+assert.eq(r(xs), t.find().sort({x: 1}).returnKey().toArray(), "B4");
-assert.eq( r( xs ) , t.find().hint( { x : 1 } ).returnKey().toArray() , "B4" );
+assert.eq(r(xs), t.find().hint({x: 1}).returnKey().toArray(), "B4");
// SERVER-4981
-t.ensureIndex( { _id : 1 , x : 1 } );
-assert.eq( all , t.find().hint( { _id : 1 , x : 1 } ).returnKey().toArray() );
-assert.eq( r( all ) , t.find().hint( { _id : 1 , x : 1 } ).sort( { x : 1 } ).returnKey().toArray() );
+t.ensureIndex({_id: 1, x: 1});
+assert.eq(all, t.find().hint({_id: 1, x: 1}).returnKey().toArray());
+assert.eq(r(all), t.find().hint({_id: 1, x: 1}).sort({x: 1}).returnKey().toArray());
-assert.eq( [ {} , {} , {} ], t.find().hint( { $natural : 1 } ).returnKey().toArray() );
+assert.eq([{}, {}, {}], t.find().hint({$natural: 1}).returnKey().toArray());
diff --git a/jstests/core/index_dropdups_ignore.js b/jstests/core/index_dropdups_ignore.js
index d1ab12f3b2c..3622d800b01 100644
--- a/jstests/core/index_dropdups_ignore.js
+++ b/jstests/core/index_dropdups_ignore.js
@@ -3,17 +3,17 @@
var t = db.index_dropdups_ignore;
t.drop();
-t.insert({_id:1, a: 'dup'});
-t.insert({_id:2, a: 'dup'});
+t.insert({_id: 1, a: 'dup'});
+t.insert({_id: 2, a: 'dup'});
// Should fail with a dup-key error even though dropDups is true;
-var res = t.ensureIndex({a:1}, {unique: true, dropDups:true});
+var res = t.ensureIndex({a: 1}, {unique: true, dropDups: true});
assert.commandFailed(res);
assert.eq(res.code, 11000, tojson(res));
// Succeeds with the dup manually removed.
-t.remove({_id:2});
-var res = t.ensureIndex({a:1}, {unique: true, dropDups:true});
+t.remove({_id: 2});
+var res = t.ensureIndex({a: 1}, {unique: true, dropDups: true});
assert.commandWorked(res);
// The spec should have been stripped of the dropDups option.
diff --git a/jstests/core/index_elemmatch1.js b/jstests/core/index_elemmatch1.js
index fb3bd5e76b4..710db37b09e 100644
--- a/jstests/core/index_elemmatch1.js
+++ b/jstests/core/index_elemmatch1.js
@@ -5,37 +5,42 @@ t.drop();
x = 0;
y = 0;
var bulk = t.initializeUnorderedBulkOp();
-for ( a=0; a<100; a++ ){
- for ( b=0; b<100; b++ ){
- bulk.insert( { a : a , b : b % 10 , arr : [ { x : x++ % 10 , y : y++ % 10 } ] } );
+for (a = 0; a < 100; a++) {
+ for (b = 0; b < 100; b++) {
+ bulk.insert({a: a, b: b % 10, arr: [{x: x++ % 10, y: y++ % 10}]});
}
}
assert.writeOK(bulk.execute());
-t.ensureIndex( { a : 1 , b : 1 } );
-t.ensureIndex( { "arr.x" : 1 , a : 1 } );
+t.ensureIndex({a: 1, b: 1});
+t.ensureIndex({"arr.x": 1, a: 1});
-assert.eq( 100 , t.find( { a : 55 } ).itcount() , "A1" );
-assert.eq( 10 , t.find( { a : 55 , b : 7 } ).itcount() , "A2" );
+assert.eq(100, t.find({a: 55}).itcount(), "A1");
+assert.eq(10, t.find({a: 55, b: 7}).itcount(), "A2");
-q = { a : 55 , b : { $in : [ 1 , 5 , 8 ] } };
-assert.eq( 30 , t.find( q ).itcount() , "A3" );
+q = {
+ a: 55,
+ b: {$in: [1, 5, 8]}
+};
+assert.eq(30, t.find(q).itcount(), "A3");
-q.arr = { $elemMatch : { x : 5 , y : 5 } };
-assert.eq( 10 , t.find( q ).itcount() , "A4" );
+q.arr = {
+ $elemMatch: {x: 5, y: 5}
+};
+assert.eq(10, t.find(q).itcount(), "A4");
-function nscannedForCursor( explain, cursor ) {
+function nscannedForCursor(explain, cursor) {
plans = explain.allPlans;
- for( i in plans ) {
- if ( plans[ i ].cursor == cursor ) {
- return plans[ i ].nscanned;
+ for (i in plans) {
+ if (plans[i].cursor == cursor) {
+ return plans[i].nscanned;
}
}
return -1;
}
-var explain = t.find(q).hint( { "arr.x" : 1 , a : 1 } ).explain("executionStats");
-assert.eq( t.find(q).itcount(), explain.executionStats.totalKeysExamined );
+var explain = t.find(q).hint({"arr.x": 1, a: 1}).explain("executionStats");
+assert.eq(t.find(q).itcount(), explain.executionStats.totalKeysExamined);
printjson(t.find(q).explain());
print("Num results:");
diff --git a/jstests/core/index_filter_commands.js b/jstests/core/index_filter_commands.js
index b422dfffcd5..027731e97cf 100644
--- a/jstests/core/index_filter_commands.js
+++ b/jstests/core/index_filter_commands.js
@@ -1,6 +1,6 @@
/**
* Index Filter commands
- *
+ *
* Commands:
* - planCacheListFilters
* Displays index filters for all query shapes in a collection.
@@ -20,7 +20,7 @@
* cache state. We would do this with the planCacheListPlans command
* on the same query shape with the index filters.
*
- */
+ */
var t = db.jstests_index_filter_commands;
@@ -36,16 +36,32 @@ t.save({a: 1, b: 1});
// Add 2 indexes.
// 1st index is more efficient.
// 2nd and 3rd indexes will be used to test index filters.
-var indexA1 = {a: 1};
-var indexA1B1 = {a: 1, b: 1};
-var indexA1C1 = {a: 1, c: 1};
+var indexA1 = {
+ a: 1
+};
+var indexA1B1 = {
+ a: 1,
+ b: 1
+};
+var indexA1C1 = {
+ a: 1,
+ c: 1
+};
t.ensureIndex(indexA1);
t.ensureIndex(indexA1B1);
t.ensureIndex(indexA1C1);
-var queryA1 = {a: 1, b: 1};
-var projectionA1 = {_id: 0, a: 1};
-var sortA1 = {a: -1};
+var queryA1 = {
+ a: 1,
+ b: 1
+};
+var projectionA1 = {
+ _id: 0,
+ a: 1
+};
+var sortA1 = {
+ a: -1
+};
//
// Tests for planCacheListFilters, planCacheClearFilters, planCacheSetFilter
@@ -61,7 +77,6 @@ function getFilters(collection) {
assert.commandWorked(res, 'planCacheListFilters failed');
assert(res.hasOwnProperty('filters'), 'filters missing from planCacheListFilters result');
return res.filters;
-
}
// If query shape is in plan cache,
@@ -76,8 +91,8 @@ function planCacheContains(shape) {
function getPlans(shape) {
var res = t.runCommand('planCacheListPlans', shape);
assert.commandWorked(res, 'planCacheListPlans(' + tojson(shape, '', true) + ' failed');
- assert(res.hasOwnProperty('plans'), 'plans missing from planCacheListPlans(' +
- tojson(shape, '', true) + ') result');
+ assert(res.hasOwnProperty('plans'),
+ 'plans missing from planCacheListPlans(' + tojson(shape, '', true) + ') result');
return res.plans;
}
@@ -85,7 +100,8 @@ function getPlans(shape) {
// will return empty results.
var missingCollection = db.jstests_index_filter_commands_missing;
missingCollection.drop();
-assert.eq(0, getFilters(missingCollection),
+assert.eq(0,
+ getFilters(missingCollection),
'planCacheListFilters should return empty array on non-existent collection');
// Retrieve index filters from an empty test collection.
@@ -94,21 +110,31 @@ assert.eq(0, filters.length, 'unexpected number of index filters in planCacheLis
// Check details of winning plan in plan cache before setting index filter.
assert.eq(1, t.find(queryA1, projectionA1).sort(sortA1).itcount(), 'unexpected document count');
-var shape = {query: queryA1, sort: sortA1, projection: projectionA1};
+var shape = {
+ query: queryA1,
+ sort: sortA1,
+ projection: projectionA1
+};
var planBeforeSetFilter = getPlans(shape)[0];
print('Winning plan (before setting index filters) = ' + tojson(planBeforeSetFilter));
// Check filterSet field in plan details
-assert.eq(false, planBeforeSetFilter.filterSet, 'missing or invalid filterSet field in plan details');
+assert.eq(false,
+ planBeforeSetFilter.filterSet,
+ 'missing or invalid filterSet field in plan details');
// Adding index filters to a non-existent collection should be an error.
-assert.commandFailed(missingCollection.runCommand('planCacheSetFilter',
- {query: queryA1, sort: sortA1, projection: projectionA1, indexes: [indexA1B1, indexA1C1]}));
+assert.commandFailed(missingCollection.runCommand(
+ 'planCacheSetFilter',
+ {query: queryA1, sort: sortA1, projection: projectionA1, indexes: [indexA1B1, indexA1C1]}));
// Add index filters for simple query.
-assert.commandWorked(t.runCommand('planCacheSetFilter',
+assert.commandWorked(t.runCommand(
+ 'planCacheSetFilter',
{query: queryA1, sort: sortA1, projection: projectionA1, indexes: [indexA1B1, indexA1C1]}));
filters = getFilters();
-assert.eq(1, filters.length, 'no change in query settings after successfully setting index filters');
+assert.eq(1,
+ filters.length,
+ 'no change in query settings after successfully setting index filters');
assert.eq(queryA1, filters[0].query, 'unexpected query in filters');
assert.eq(sortA1, filters[0].sort, 'unexpected sort in filters');
assert.eq(projectionA1, filters[0].projection, 'unexpected projection in filters');
@@ -154,23 +180,33 @@ if (db.isMaster().msg !== "isdbgrid") {
// No filter.
t.getPlanCache().clear();
assert.eq(false, t.find({z: 1}).explain('queryPlanner').queryPlanner.indexFilterSet);
- assert.eq(false, t.find(queryA1, projectionA1).sort(sortA1)
- .explain('queryPlanner').queryPlanner.indexFilterSet);
+ assert.eq(false,
+ t.find(queryA1, projectionA1)
+ .sort(sortA1)
+ .explain('queryPlanner')
+ .queryPlanner.indexFilterSet);
// With one filter set.
assert.commandWorked(t.runCommand('planCacheSetFilter', {query: {z: 1}, indexes: [{z: 1}]}));
assert.eq(true, t.find({z: 1}).explain('queryPlanner').queryPlanner.indexFilterSet);
- assert.eq(false, t.find(queryA1, projectionA1).sort(sortA1)
- .explain('queryPlanner').queryPlanner.indexFilterSet);
+ assert.eq(false,
+ t.find(queryA1, projectionA1)
+ .sort(sortA1)
+ .explain('queryPlanner')
+ .queryPlanner.indexFilterSet);
// With two filters set.
- assert.commandWorked(t.runCommand('planCacheSetFilter', {
- query: queryA1,
- projection: projectionA1,
- sort: sortA1,
- indexes: [indexA1B1, indexA1C1]
- }));
+ assert.commandWorked(t.runCommand('planCacheSetFilter',
+ {
+ query: queryA1,
+ projection: projectionA1,
+ sort: sortA1,
+ indexes: [indexA1B1, indexA1C1]
+ }));
assert.eq(true, t.find({z: 1}).explain('queryPlanner').queryPlanner.indexFilterSet);
- assert.eq(true, t.find(queryA1, projectionA1).sort(sortA1)
- .explain('queryPlanner').queryPlanner.indexFilterSet);
+ assert.eq(true,
+ t.find(queryA1, projectionA1)
+ .sort(sortA1)
+ .explain('queryPlanner')
+ .queryPlanner.indexFilterSet);
}
diff --git a/jstests/core/index_many.js b/jstests/core/index_many.js
index a9eddbb32f1..142c9bbc4a5 100644
--- a/jstests/core/index_many.js
+++ b/jstests/core/index_many.js
@@ -3,12 +3,11 @@
t = db.many;
function f() {
-
t.drop();
db.many2.drop();
- t.save({ x: 9, y : 99 });
- t.save({ x: 19, y : 99 });
+ t.save({x: 9, y: 99});
+ t.save({x: 19, y: 99});
x = 2;
var lastErr = null;
@@ -16,9 +15,13 @@ function f() {
patt = {};
patt[x] = 1;
if (x == 20)
- patt = { x: 1 };
+ patt = {
+ x: 1
+ };
if (x == 64)
- patt = { y: 1 };
+ patt = {
+ y: 1
+ };
lastErr = t.ensureIndex(patt);
x++;
}
@@ -33,17 +36,16 @@ function f() {
}
assert(lim == 64, "not 64 indexes");
- assert(t.find({ x: 9 }).length() == 1, "b");
+ assert(t.find({x: 9}).length() == 1, "b");
- assert(t.find({ y: 99 }).length() == 2, "y idx");
+ assert(t.find({y: 99}).length() == 2, "y idx");
/* check that renamecollection remaps all the indexes right */
assert(t.renameCollection("many2").ok, "rename failed");
- assert(t.find({ x: 9 }).length() == 0, "many2a");
- assert(db.many2.find({ x: 9 }).length() == 1, "many2b");
- assert(t.find({ y: 99 }).length() == 0, "many2c");
- assert(db.many2.find({ y: 99 }).length() == 2, "many2d");
-
+ assert(t.find({x: 9}).length() == 0, "many2a");
+ assert(db.many2.find({x: 9}).length() == 1, "many2b");
+ assert(t.find({y: 99}).length() == 0, "many2c");
+ assert(db.many2.find({y: 99}).length() == 2, "many2d");
}
f();
diff --git a/jstests/core/index_many2.js b/jstests/core/index_many2.js
index ac265e5cf6e..87e99898b1f 100644
--- a/jstests/core/index_many2.js
+++ b/jstests/core/index_many2.js
@@ -2,30 +2,29 @@
t = db.index_many2;
t.drop();
-t.save( { x : 1 } );
+t.save({x: 1});
-assert.eq( 1 , t.getIndexKeys().length , "A1" );
+assert.eq(1, t.getIndexKeys().length, "A1");
-function make( n ){
+function make(n) {
var x = {};
- x["x"+n] = 1;
+ x["x" + n] = 1;
return x;
}
-for ( i=1; i<1000; i++ ){
- t.ensureIndex( make(i) );
+for (i = 1; i < 1000; i++) {
+ t.ensureIndex(make(i));
}
-assert.eq( 64 , t.getIndexKeys().length , "A2" );
-
+assert.eq(64, t.getIndexKeys().length, "A2");
num = t.getIndexKeys().length;
-t.dropIndex( make(num-1) );
-assert.eq( num - 1 , t.getIndexKeys().length , "B0" );
+t.dropIndex(make(num - 1));
+assert.eq(num - 1, t.getIndexKeys().length, "B0");
-t.ensureIndex( { z : 1 } );
-assert.eq( num , t.getIndexKeys().length , "B1" );
+t.ensureIndex({z: 1});
+assert.eq(num, t.getIndexKeys().length, "B1");
-t.dropIndex( "*" );
-assert.eq( 1 , t.getIndexKeys().length , "C1" );
+t.dropIndex("*");
+assert.eq(1, t.getIndexKeys().length, "C1");
diff --git a/jstests/core/index_partial_create_drop.js b/jstests/core/index_partial_create_drop.js
index d997f0fcfa3..34693ec9ae9 100644
--- a/jstests/core/index_partial_create_drop.js
+++ b/jstests/core/index_partial_create_drop.js
@@ -5,13 +5,12 @@
var isMongos = (db.runCommand("isMaster").msg === "isdbgrid");
var coll = db.index_partial_create_drop;
- var getNumKeys = function (idxName) {
+ var getNumKeys = function(idxName) {
var res = assert.commandWorked(coll.validate(true));
var kpi;
if (isMongos) {
kpi = res.raw[Object.getOwnPropertyNames(res.raw)[0]].keysPerIndex;
- }
- else {
+ } else {
kpi = res.keysPerIndex;
}
return kpi[coll.getFullName() + ".$" + idxName];
@@ -24,10 +23,12 @@
assert.commandFailed(coll.ensureIndex({x: 1}, {partialFilterExpression: {x: {$asdasd: 3}}}));
assert.commandFailed(coll.ensureIndex({x: 1}, {partialFilterExpression: {$and: 5}}));
assert.commandFailed(coll.ensureIndex({x: 1}, {partialFilterExpression: {x: /abc/}}));
- assert.commandFailed(coll.ensureIndex({x: 1},
- {partialFilterExpression: {$and: [
- {$and: [{x: {$lt: 2}}, {x: {$gt: 0}}]},
- {x: {$exists: true}}]}}));
+ assert.commandFailed(coll.ensureIndex(
+ {x: 1},
+ {
+ partialFilterExpression:
+ {$and: [{$and: [{x: {$lt: 2}}, {x: {$gt: 0}}]}, {x: {$exists: true}}]}
+ }));
for (var i = 0; i < 10; i++) {
assert.writeOK(coll.insert({x: i, a: i}));
@@ -40,8 +41,8 @@
assert.eq(1, coll.getIndexes().length);
// Create partial index in background.
- assert.commandWorked(coll.ensureIndex({x: 1}, {background: true,
- partialFilterExpression: {a: {$lt: 5}}}));
+ assert.commandWorked(
+ coll.ensureIndex({x: 1}, {background: true, partialFilterExpression: {a: {$lt: 5}}}));
assert.eq(5, getNumKeys("x_1"));
assert.commandWorked(coll.dropIndex({x: 1}));
assert.eq(1, coll.getIndexes().length);
@@ -55,8 +56,8 @@
// Partial indexes can't also be sparse indexes.
assert.commandFailed(coll.ensureIndex({x: 1}, {partialFilterExpression: {a: 1}, sparse: true}));
assert.commandFailed(coll.ensureIndex({x: 1}, {partialFilterExpression: {a: 1}, sparse: 1}));
- assert.commandWorked(coll.ensureIndex({x: 1}, {partialFilterExpression: {a: 1},
- sparse: false}));
+ assert.commandWorked(
+ coll.ensureIndex({x: 1}, {partialFilterExpression: {a: 1}, sparse: false}));
assert.eq(2, coll.getIndexes().length);
assert.commandWorked(coll.dropIndex({x: 1}));
assert.eq(1, coll.getIndexes().length);
diff --git a/jstests/core/index_partial_read_ops.js b/jstests/core/index_partial_read_ops.js
index ef5c30d25a6..42f77486c17 100644
--- a/jstests/core/index_partial_read_ops.js
+++ b/jstests/core/index_partial_read_ops.js
@@ -10,8 +10,8 @@ load("jstests/libs/analyze_plan.js");
coll.drop();
assert.commandWorked(coll.ensureIndex({x: 1}, {partialFilterExpression: {a: {$lte: 1.5}}}));
- assert.writeOK(coll.insert({x: 5, a: 2})); // Not in index.
- assert.writeOK(coll.insert({x: 6, a: 1})); // In index.
+ assert.writeOK(coll.insert({x: 5, a: 2})); // Not in index.
+ assert.writeOK(coll.insert({x: 6, a: 1})); // In index.
//
// Verify basic functionality with find().
@@ -65,14 +65,14 @@ load("jstests/libs/analyze_plan.js");
//
// findAndModify operation that should use index.
- explain = coll.explain('executionStats').findAndModify({query: {x: {$gt: 1}, a: 1},
- update: {$inc: {x: 1}}});
+ explain = coll.explain('executionStats')
+ .findAndModify({query: {x: {$gt: 1}, a: 1}, update: {$inc: {x: 1}}});
assert.eq(1, explain.executionStats.nReturned);
assert(isIxscan(explain.queryPlanner.winningPlan));
// findAndModify operation that should not use index.
- explain = coll.explain('executionStats').findAndModify({query: {x: {$gt: 1}, a: 2},
- update: {$inc: {x: 1}}});
+ explain = coll.explain('executionStats')
+ .findAndModify({query: {x: {$gt: 1}, a: 2}, update: {$inc: {x: 1}}});
assert.eq(1, explain.executionStats.nReturned);
assert(isCollscan(explain.queryPlanner.winningPlan));
})();
diff --git a/jstests/core/index_partial_write_ops.js b/jstests/core/index_partial_write_ops.js
index 92d3720d07e..b962347a26d 100644
--- a/jstests/core/index_partial_write_ops.js
+++ b/jstests/core/index_partial_write_ops.js
@@ -5,13 +5,12 @@
var isMongos = (db.runCommand("isMaster").msg === "isdbgrid");
var coll = db.index_partial_write_ops;
- var getNumKeys = function (idxName) {
+ var getNumKeys = function(idxName) {
var res = assert.commandWorked(coll.validate(true));
var kpi;
if (isMongos) {
kpi = res.raw[Object.getOwnPropertyNames(res.raw)[0]].keysPerIndex;
- }
- else {
+ } else {
kpi = res.keysPerIndex;
}
return kpi[coll.getFullName() + ".$" + idxName];
@@ -22,8 +21,8 @@
// Create partial index.
assert.commandWorked(coll.ensureIndex({x: 1}, {partialFilterExpression: {a: 1}}));
- assert.writeOK(coll.insert({_id: 1, x: 5, a: 2, b: 1})); // Not in index.
- assert.writeOK(coll.insert({_id: 2, x: 6, a: 1, b: 1})); // In index.
+ assert.writeOK(coll.insert({_id: 1, x: 5, a: 2, b: 1})); // Not in index.
+ assert.writeOK(coll.insert({_id: 2, x: 6, a: 1, b: 1})); // In index.
assert.eq(1, getNumKeys("x_1"));
diff --git a/jstests/core/index_plugins.js b/jstests/core/index_plugins.js
index d7271217e2f..f32e1e4345a 100644
--- a/jstests/core/index_plugins.js
+++ b/jstests/core/index_plugins.js
@@ -14,9 +14,9 @@ coll.dropIndexes();
assert.commandWorked(coll.ensureIndex({a: "text"}));
coll.dropIndexes();
-assert.commandFailed(coll.ensureIndex({a: "geoHaystack"}, {bucketSize: 1})); // compound required
+assert.commandFailed(coll.ensureIndex({a: "geoHaystack"}, {bucketSize: 1})); // compound required
-// Test compounding special index types with an ascending index.
+// Test compounding special index types with an ascending index.
assert.commandWorked(coll.ensureIndex({a: "2dsphere", b: 1}));
coll.dropIndexes();
@@ -30,14 +30,14 @@ coll.dropIndexes();
assert.commandWorked(coll.ensureIndex({a: "2d", b: 1}));
coll.dropIndexes();
-assert.commandFailed(coll.ensureIndex({a: 1, b: "2d"})); // unsupported
+assert.commandFailed(coll.ensureIndex({a: 1, b: "2d"})); // unsupported
assert.commandWorked(coll.ensureIndex({a: "geoHaystack", b: 1}, {bucketSize: 1}));
coll.dropIndexes();
-assert.commandFailed(coll.ensureIndex({a: 1, b: "geoHaystack"}, {bucketSize: 1})); // unsupported
+assert.commandFailed(coll.ensureIndex({a: 1, b: "geoHaystack"}, {bucketSize: 1})); // unsupported
-assert.commandFailed(coll.ensureIndex({a: "hashed", b: 1})); // unsupported
-assert.commandFailed(coll.ensureIndex({a: 1, b: "hashed"})); // unsupported
+assert.commandFailed(coll.ensureIndex({a: "hashed", b: 1})); // unsupported
+assert.commandFailed(coll.ensureIndex({a: 1, b: "hashed"})); // unsupported
// Test compound index where multiple fields have same special index type.
@@ -46,17 +46,17 @@ coll.dropIndexes();
assert.commandWorked(coll.ensureIndex({a: "text", b: "text"}));
coll.dropIndexes();
-assert.commandFailed(coll.ensureIndex({a: "2d", b: "2d"})); // unsupported
-assert.commandFailed(coll.ensureIndex({a: "geoHaystack", b: "geoHaystack"}, // unsupported
+assert.commandFailed(coll.ensureIndex({a: "2d", b: "2d"})); // unsupported
+assert.commandFailed(coll.ensureIndex({a: "geoHaystack", b: "geoHaystack"}, // unsupported
{bucketSize: 1}));
-assert.commandFailed(coll.ensureIndex({a: "hashed", b: "hashed"})); // unsupported
+assert.commandFailed(coll.ensureIndex({a: "hashed", b: "hashed"})); // unsupported
// Test compounding different special index types with each other.
-assert.commandFailed(coll.ensureIndex({a: "2d", b: "hashed"})); // unsupported
-assert.commandFailed(coll.ensureIndex({a: "hashed", b: "2dsphere"})); // unsupported
-assert.commandFailed(coll.ensureIndex({a: "2dsphere", b: "text"})); // unsupported
-assert.commandFailed(coll.ensureIndex({a: "text", b: "geoHaystack"})); // unsupported
-assert.commandFailed(coll.ensureIndex({a: "geoHaystack", b: "2d"}, // unsupported
+assert.commandFailed(coll.ensureIndex({a: "2d", b: "hashed"})); // unsupported
+assert.commandFailed(coll.ensureIndex({a: "hashed", b: "2dsphere"})); // unsupported
+assert.commandFailed(coll.ensureIndex({a: "2dsphere", b: "text"})); // unsupported
+assert.commandFailed(coll.ensureIndex({a: "text", b: "geoHaystack"})); // unsupported
+assert.commandFailed(coll.ensureIndex({a: "geoHaystack", b: "2d"}, // unsupported
{bucketSize: 1}));
diff --git a/jstests/core/index_sparse1.js b/jstests/core/index_sparse1.js
index 950c8a8d797..d71c3c1dbfe 100644
--- a/jstests/core/index_sparse1.js
+++ b/jstests/core/index_sparse1.js
@@ -2,44 +2,40 @@
t = db.index_sparse1;
t.drop();
-t.insert( { _id : 1 , x : 1 } );
-t.insert( { _id : 2 , x : 2 } );
-t.insert( { _id : 3 , x : 2 } );
-t.insert( { _id : 4 } );
-t.insert( { _id : 5 } );
-
-assert.eq( 5 , t.count() , "A1" );
-assert.eq( 5 , t.find().sort( { x : 1 } ).itcount() , "A2" );
-
-t.ensureIndex( { x : 1 } );
-assert.eq( 2 , t.getIndexes().length , "B1" );
-assert.eq( 5 , t.find().sort( { x : 1 } ).itcount() , "B2" );
-t.dropIndex( { x : 1 } );
-assert.eq( 1 , t.getIndexes().length , "B3" );
-
-t.ensureIndex( { x : 1 } , { sparse : 1 } );
-assert.eq( 2 , t.getIndexes().length , "C1" );
-assert.eq( 5 , t.find().sort( { x : 1 } ).itcount() , "C2" );
-t.dropIndex( { x : 1 } );
-assert.eq( 1 , t.getIndexes().length , "C3" );
+t.insert({_id: 1, x: 1});
+t.insert({_id: 2, x: 2});
+t.insert({_id: 3, x: 2});
+t.insert({_id: 4});
+t.insert({_id: 5});
+
+assert.eq(5, t.count(), "A1");
+assert.eq(5, t.find().sort({x: 1}).itcount(), "A2");
+
+t.ensureIndex({x: 1});
+assert.eq(2, t.getIndexes().length, "B1");
+assert.eq(5, t.find().sort({x: 1}).itcount(), "B2");
+t.dropIndex({x: 1});
+assert.eq(1, t.getIndexes().length, "B3");
+
+t.ensureIndex({x: 1}, {sparse: 1});
+assert.eq(2, t.getIndexes().length, "C1");
+assert.eq(5, t.find().sort({x: 1}).itcount(), "C2");
+t.dropIndex({x: 1});
+assert.eq(1, t.getIndexes().length, "C3");
// -- sparse & unique
-t.remove( { _id : 2 } );
+t.remove({_id: 2});
-// test that we can't create a unique index without sparse
-assert.commandFailed( t.ensureIndex( { x : 1 } , { unique : 1 } ));
-assert.eq( 1 , t.getIndexes().length , "D2" );
-
-
-t.ensureIndex( { x : 1 } , { unique : 1 , sparse : 1 } );
-assert.eq( 2 , t.getIndexes().length , "E1" );
-t.dropIndex( { x : 1 } );
-assert.eq( 1 , t.getIndexes().length , "E3" );
-
-
-t.insert( { _id : 2 , x : 2 } );
-t.ensureIndex( { x : 1 } , { unique : 1 , sparse : 1 } );
-assert.eq( 1 , t.getIndexes().length , "F1" );
+// test that we can't create a unique index without sparse
+assert.commandFailed(t.ensureIndex({x: 1}, {unique: 1}));
+assert.eq(1, t.getIndexes().length, "D2");
+t.ensureIndex({x: 1}, {unique: 1, sparse: 1});
+assert.eq(2, t.getIndexes().length, "E1");
+t.dropIndex({x: 1});
+assert.eq(1, t.getIndexes().length, "E3");
+t.insert({_id: 2, x: 2});
+t.ensureIndex({x: 1}, {unique: 1, sparse: 1});
+assert.eq(1, t.getIndexes().length, "F1");
diff --git a/jstests/core/index_sparse2.js b/jstests/core/index_sparse2.js
index 4d5c5b84cd9..702bb8cd5f8 100644
--- a/jstests/core/index_sparse2.js
+++ b/jstests/core/index_sparse2.js
@@ -1,23 +1,20 @@
t = db.index_sparse2;
t.drop();
-t.insert( { _id : 1 , x : 1 , y : 1 } );
-t.insert( { _id : 2 , x : 2 } );
-t.insert( { _id : 3 } );
-
-t.ensureIndex( { x : 1 , y : 1 } );
-assert.eq( 2 , t.getIndexes().length , "A1" );
-assert.eq( 3 , t.find().sort( { x : 1 , y : 1 } ).count() , "A2 count()" );
-assert.eq( 3 , t.find().sort( { x : 1 , y : 1 } ).itcount() , "A2 itcount()" );
-t.dropIndex( { x : 1 , y : 1 } );
-assert.eq( 1 , t.getIndexes().length , "A3" );
-
-t.ensureIndex( { x : 1 , y : 1 } , { sparse : 1 } );
-assert.eq( 2 , t.getIndexes().length , "B1" );
-assert.eq( 3 , t.find().sort( { x : 1 , y : 1 } ).count() , "B2 count()" );
-assert.eq( 3 , t.find().sort( { x : 1 , y : 1 } ).itcount() , "B2 itcount()" );
-t.dropIndex( { x : 1 , y : 1 } );
-assert.eq( 1 , t.getIndexes().length , "B3" );
-
+t.insert({_id: 1, x: 1, y: 1});
+t.insert({_id: 2, x: 2});
+t.insert({_id: 3});
+t.ensureIndex({x: 1, y: 1});
+assert.eq(2, t.getIndexes().length, "A1");
+assert.eq(3, t.find().sort({x: 1, y: 1}).count(), "A2 count()");
+assert.eq(3, t.find().sort({x: 1, y: 1}).itcount(), "A2 itcount()");
+t.dropIndex({x: 1, y: 1});
+assert.eq(1, t.getIndexes().length, "A3");
+t.ensureIndex({x: 1, y: 1}, {sparse: 1});
+assert.eq(2, t.getIndexes().length, "B1");
+assert.eq(3, t.find().sort({x: 1, y: 1}).count(), "B2 count()");
+assert.eq(3, t.find().sort({x: 1, y: 1}).itcount(), "B2 itcount()");
+t.dropIndex({x: 1, y: 1});
+assert.eq(1, t.getIndexes().length, "B3");
diff --git a/jstests/core/index_stats.js b/jstests/core/index_stats.js
index fc4aca2a77f..7db4559210c 100644
--- a/jstests/core/index_stats.js
+++ b/jstests/core/index_stats.js
@@ -78,10 +78,8 @@
//
// Confirm index stats tick on findAndModify() update.
//
- var res = db.runCommand({findAndModify: colName,
- query: {a: 1},
- update: {$set: {d: 1}},
- 'new': true});
+ var res = db.runCommand(
+ {findAndModify: colName, query: {a: 1}, update: {$set: {d: 1}}, 'new': true});
assert.commandWorked(res);
countA++;
assert.eq(countA, getUsageCount("a_1"));
@@ -89,9 +87,7 @@
//
// Confirm index stats tick on findAndModify() delete.
//
- res = db.runCommand({findAndModify: colName,
- query: {a: 2},
- remove: true});
+ res = db.runCommand({findAndModify: colName, query: {a: 2}, remove: true});
assert.commandWorked(res);
countA++;
assert.eq(countA, getUsageCount("a_1"));
@@ -112,8 +108,7 @@
var name = indexNameList[i];
if (name === "a_1") {
countA++;
- }
- else {
+ } else {
assert(name === "b_1_c_1");
countB++;
}
@@ -137,11 +132,15 @@
//
// Confirm index stats tick on group().
//
- res = db.runCommand({group: {ns: colName,
- key: {b: 1, c: 1},
- cond: {b: {$gt: 0}},
- $reduce: function(curr, result) {},
- initial: {}}});
+ res = db.runCommand({
+ group: {
+ ns: colName,
+ key: {b: 1, c: 1},
+ cond: {b: {$gt: 0}},
+ $reduce: function(curr, result) {},
+ initial: {}
+ }
+ });
assert.commandWorked(res);
countB++;
assert.eq(countB, getUsageCount("b_1_c_1"));
@@ -149,8 +148,7 @@
//
// Confirm index stats tick on aggregate w/ match.
//
- res = db.runCommand({aggregate: colName,
- pipeline: [{$match: {b: 1}}]});
+ res = db.runCommand({aggregate: colName, pipeline: [{$match: {b: 1}}]});
assert.commandWorked(res);
countB++;
assert.eq(countB, getUsageCount("b_1_c_1"));
@@ -158,11 +156,17 @@
//
// Confirm index stats tick on mapReduce with query.
//
- res = db.runCommand({mapReduce: colName,
- map: function() {emit(this.b, this.c);},
- reduce: function(key, val) {return val;},
- query: {b: 2},
- out: {inline: true}});
+ res = db.runCommand({
+ mapReduce: colName,
+ map: function() {
+ emit(this.b, this.c);
+ },
+ reduce: function(key, val) {
+ return val;
+ },
+ query: {b: 2},
+ out: {inline: true}
+ });
assert.commandWorked(res);
countB++;
assert.eq(countB, getUsageCount("b_1_c_1"));
@@ -206,5 +210,7 @@
//
// Confirm that retrieval fails if $indexStats is not in the first pipeline position.
//
- assert.throws(function() { col.aggregate([{$match: {}}, {$indexStats: {}}]); });
+ assert.throws(function() {
+ col.aggregate([{$match: {}}, {$indexStats: {}}]);
+ });
})();
diff --git a/jstests/core/indexa.js b/jstests/core/indexa.js
index 73da14fc8bc..be86c9acf54 100644
--- a/jstests/core/indexa.js
+++ b/jstests/core/indexa.js
@@ -1,22 +1,23 @@
-// unique index constraint test for updates
+// unique index constraint test for updates
// case where object doesn't grow tested here
t = db.indexa;
t.drop();
-t.ensureIndex( { x:1 }, true );
+t.ensureIndex({x: 1}, true);
-t.insert( { 'x':'A' } );
-t.insert( { 'x':'B' } );
-t.insert( { 'x':'A' } );
+t.insert({'x': 'A'});
+t.insert({'x': 'B'});
+t.insert({'x': 'A'});
-assert.eq( 2 , t.count() , "indexa 1" );
+assert.eq(2, t.count(), "indexa 1");
-t.update( {x:'B'}, { x:'A' } );
+t.update({x: 'B'}, {x: 'A'});
a = t.find().toArray();
-u = Array.unique( a.map( function(z){ return z.x; } ) );
-assert.eq( 2 , t.count() , "indexa 2" );
-
-assert( a.length == u.length , "unique index update is broken" );
+u = Array.unique(a.map(function(z) {
+ return z.x;
+}));
+assert.eq(2, t.count(), "indexa 2");
+assert(a.length == u.length, "unique index update is broken");
diff --git a/jstests/core/indexapi.js b/jstests/core/indexapi.js
index 911e58e980c..2df9709171d 100644
--- a/jstests/core/indexapi.js
+++ b/jstests/core/indexapi.js
@@ -2,41 +2,45 @@
t = db.indexapi;
t.drop();
-key = { x : 1 };
+key = {
+ x: 1
+};
-c = { ns : t._fullName , key : key , name : t._genIndexName( key ) };
-assert.eq( c , t._indexSpec( { x : 1 } ) , "A" );
+c = {
+ ns: t._fullName,
+ key: key,
+ name: t._genIndexName(key)
+};
+assert.eq(c, t._indexSpec({x: 1}), "A");
c.name = "bob";
-assert.eq( c , t._indexSpec( { x : 1 } , "bob" ) , "B" );
+assert.eq(c, t._indexSpec({x: 1}, "bob"), "B");
-c.name = t._genIndexName( key );
-assert.eq( c , t._indexSpec( { x : 1 } ) , "C" );
+c.name = t._genIndexName(key);
+assert.eq(c, t._indexSpec({x: 1}), "C");
c.unique = true;
-assert.eq( c , t._indexSpec( { x : 1 } , true ) , "D" );
-assert.eq( c , t._indexSpec( { x : 1 } , [ true ] ) , "E" );
-assert.eq( c , t._indexSpec( { x : 1 } , { unique : true } ) , "F" );
+assert.eq(c, t._indexSpec({x: 1}, true), "D");
+assert.eq(c, t._indexSpec({x: 1}, [true]), "E");
+assert.eq(c, t._indexSpec({x: 1}, {unique: true}), "F");
c.dropDups = true;
-assert.eq( c , t._indexSpec( { x : 1 } , [ true , true ] ) , "G" );
-assert.eq( c , t._indexSpec( { x : 1 } , { unique : true , dropDups : true } ) , "F" );
+assert.eq(c, t._indexSpec({x: 1}, [true, true]), "G");
+assert.eq(c, t._indexSpec({x: 1}, {unique: true, dropDups: true}), "F");
-t.ensureIndex( { x : 1 } , { unique : true } );
+t.ensureIndex({x: 1}, {unique: true});
idx = t.getIndexes();
-assert.eq( 2 , idx.length , "M1" );
-assert.eq( key , idx[1].key , "M2" );
-assert( idx[1].unique , "M3" );
+assert.eq(2, idx.length, "M1");
+assert.eq(key, idx[1].key, "M2");
+assert(idx[1].unique, "M3");
t.drop();
-t.ensureIndex( { x : 1 } , { unique : 1 } );
+t.ensureIndex({x: 1}, {unique: 1});
idx = t.getIndexes();
-assert.eq( 2 , idx.length , "M1" );
-assert.eq( key , idx[1].key , "M2" );
-assert( idx[1].unique , "M3" );
-//printjson( idx );
+assert.eq(2, idx.length, "M1");
+assert.eq(key, idx[1].key, "M2");
+assert(idx[1].unique, "M3");
+// printjson( idx );
// Test that attempting to create index in an invalid namespace fails.
-assert.writeError(db.system.indexes.insert( { ns : "test" , key : { x : 1 } , name : "x" } ));
-
-
+assert.writeError(db.system.indexes.insert({ns: "test", key: {x: 1}, name: "x"}));
diff --git a/jstests/core/indexb.js b/jstests/core/indexb.js
index d7d2e8c9f05..1262c621ba9 100644
--- a/jstests/core/indexb.js
+++ b/jstests/core/indexb.js
@@ -1,29 +1,28 @@
// unique index test for a case where the object grows
// and must move
-// see indexa.js for the test case for an update with dup id check
+// see indexa.js for the test case for an update with dup id check
// when it doesn't move
-
t = db.indexb;
t.drop();
-t.ensureIndex({a:1},true);
+t.ensureIndex({a: 1}, true);
-t.insert({a:1});
+t.insert({a: 1});
-x = { a : 2 };
+x = {
+ a: 2
+};
t.save(x);
{
+ assert(t.count() == 2, "count wrong B");
- assert( t.count() == 2, "count wrong B");
-
- x.a = 1;
- x.filler = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa";
- t.save(x); // should fail, not unique.
-
- assert( t.count() == 2,"count wrong" );
- assert( t.find({a:1}).count() == 1,"bfail1" );
- assert( t.find({a:2}).count() == 1,"bfail2" );
+ x.a = 1;
+ x.filler = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa";
+ t.save(x); // should fail, not unique.
+ assert(t.count() == 2, "count wrong");
+ assert(t.find({a: 1}).count() == 1, "bfail1");
+ assert(t.find({a: 2}).count() == 1, "bfail2");
}
diff --git a/jstests/core/indexc.js b/jstests/core/indexc.js
index b099e2d2823..8a4591bbd9a 100644
--- a/jstests/core/indexc.js
+++ b/jstests/core/indexc.js
@@ -2,19 +2,19 @@
t = db.indexc;
t.drop();
-for ( var i=1; i<100; i++ ){
- var d = new Date( ( new Date() ).getTime() + i );
- t.save( { a : i , ts : d , cats : [ i , i + 1 , i + 2 ] } );
- if ( i == 51 )
+for (var i = 1; i < 100; i++) {
+ var d = new Date((new Date()).getTime() + i);
+ t.save({a: i, ts: d, cats: [i, i + 1, i + 2]});
+ if (i == 51)
mid = d;
}
-assert.eq( 50 , t.find( { ts : { $lt : mid } } ).itcount() , "A" );
-assert.eq( 50 , t.find( { ts : { $lt : mid } } ).sort( { ts : 1 } ).itcount() , "B" );
+assert.eq(50, t.find({ts: {$lt: mid}}).itcount(), "A");
+assert.eq(50, t.find({ts: {$lt: mid}}).sort({ts: 1}).itcount(), "B");
-t.ensureIndex( { ts : 1 , cats : 1 } );
-t.ensureIndex( { cats : 1 } );
+t.ensureIndex({ts: 1, cats: 1});
+t.ensureIndex({cats: 1});
// multi-key bug was firing here (related to getsetdup()):
-assert.eq( 50 , t.find( { ts : { $lt : mid } } ).itcount() , "C" );
-assert.eq( 50 , t.find( { ts : { $lt : mid } } ).sort( { ts : 1 } ).itcount() , "D" );
+assert.eq(50, t.find({ts: {$lt: mid}}).itcount(), "C");
+assert.eq(50, t.find({ts: {$lt: mid}}).sort({ts: 1}).itcount(), "D");
diff --git a/jstests/core/indexd.js b/jstests/core/indexd.js
index 33246ad9812..31281de7ffa 100644
--- a/jstests/core/indexd.js
+++ b/jstests/core/indexd.js
@@ -2,9 +2,11 @@
t = db.indexd;
t.drop();
-t.save( { a : 1 } );
-t.ensureIndex( { a : 1 } );
-assert.throws( function(){ db.indexd.$_id_.drop(); } );
-assert( t.drop() );
+t.save({a: 1});
+t.ensureIndex({a: 1});
+assert.throws(function() {
+ db.indexd.$_id_.drop();
+});
+assert(t.drop());
-//db.indexd.$_id_.remove({});
+// db.indexd.$_id_.remove({});
diff --git a/jstests/core/indexe.js b/jstests/core/indexe.js
index e84322c6510..55e256b9df9 100644
--- a/jstests/core/indexe.js
+++ b/jstests/core/indexe.js
@@ -4,19 +4,19 @@ t.drop();
var num = 1000;
-for ( i=0; i<num; i++){
- t.insert( { a : "b" } );
+for (i = 0; i < num; i++) {
+ t.insert({a: "b"});
}
-assert.eq( num , t.find().count() ,"A1" );
-assert.eq( num , t.find( { a : "b" } ).count() , "B1" );
-assert.eq( num , t.find( { a : "b" } ).itcount() , "C1" );
+assert.eq(num, t.find().count(), "A1");
+assert.eq(num, t.find({a: "b"}).count(), "B1");
+assert.eq(num, t.find({a: "b"}).itcount(), "C1");
-t.ensureIndex( { a : 1 } );
+t.ensureIndex({a: 1});
-assert.eq( num , t.find().count() ,"A2" );
-assert.eq( num , t.find().sort( { a : 1 } ).count() , "A2a" );
-assert.eq( num , t.find( { a : "b" } ).count() , "B2" );
-assert.eq( num , t.find( { a : "b" } ).itcount() , "C3" );
+assert.eq(num, t.find().count(), "A2");
+assert.eq(num, t.find().sort({a: 1}).count(), "A2a");
+assert.eq(num, t.find({a: "b"}).count(), "B2");
+assert.eq(num, t.find({a: "b"}).itcount(), "C3");
t.drop();
diff --git a/jstests/core/indexes_on_indexes.js b/jstests/core/indexes_on_indexes.js
index df42c5161e5..3ebbf1d2f9c 100644
--- a/jstests/core/indexes_on_indexes.js
+++ b/jstests/core/indexes_on_indexes.js
@@ -8,20 +8,18 @@
assert.eq(t.system.indexes.getIndexes().length, 0);
print("trying via ensureIndex");
- assert.commandFailed(t.system.indexes.ensureIndex({_id:1}));
+ assert.commandFailed(t.system.indexes.ensureIndex({_id: 1}));
printjson(t.system.indexes.getIndexes());
assert.eq(t.system.indexes.getIndexes().length, 0);
print("trying via createIndex");
- assert.throws(t.system.indexes.createIndex({_id:1}));
+ assert.throws(t.system.indexes.createIndex({_id: 1}));
printjson(t.system.indexes.getIndexes());
assert.eq(t.system.indexes.getIndexes().length, 0);
print("trying via direct insertion");
- assert.throws(t.system.indexes.insert({ v:1,
- key:{_id:1},
- ns: "indexes_on_indexes.system.indexes",
- name:"wontwork"}));
+ assert.throws(t.system.indexes.insert(
+ {v: 1, key: {_id: 1}, ns: "indexes_on_indexes.system.indexes", name: "wontwork"}));
printjson(t.system.indexes.getIndexes());
assert.eq(t.system.indexes.getIndexes().length, 0);
}());
diff --git a/jstests/core/indexf.js b/jstests/core/indexf.js
index 3ad222bcbb8..5a126b3e22f 100644
--- a/jstests/core/indexf.js
+++ b/jstests/core/indexf.js
@@ -2,12 +2,12 @@
t = db.indexf;
t.drop();
-t.ensureIndex( { x : 1 } );
+t.ensureIndex({x: 1});
-t.save( { x : 2 } );
-t.save( { y : 3 } );
-t.save( { x : 4 } );
+t.save({x: 2});
+t.save({y: 3});
+t.save({x: 4});
-assert.eq( 2 , t.findOne( { x : 2 } ).x , "A1" );
-assert.eq( 3 , t.findOne( { x : null } ).y , "A2" );
-assert.eq( 4 , t.findOne( { x : 4 } ).x , "A3" );
+assert.eq(2, t.findOne({x: 2}).x, "A1");
+assert.eq(3, t.findOne({x: null}).y, "A2");
+assert.eq(4, t.findOne({x: 4}).x, "A3");
diff --git a/jstests/core/indexg.js b/jstests/core/indexg.js
index f4b2e0cada1..3cafede4cff 100644
--- a/jstests/core/indexg.js
+++ b/jstests/core/indexg.js
@@ -1,13 +1,13 @@
f = db.jstests_indexg;
f.drop();
-f.save( { list: [1, 2] } );
-f.save( { list: [1, 3] } );
+f.save({list: [1, 2]});
+f.save({list: [1, 3]});
doit = function() {
- assert.eq( 1, f.count( { list: { $in: [1], $ne: 3 } } ) );
- assert.eq( 1, f.count( { list: { $in: [1], $not:{$in: [3] } } } ) );
+ assert.eq(1, f.count({list: {$in: [1], $ne: 3}}));
+ assert.eq(1, f.count({list: {$in: [1], $not: {$in: [3]}}}));
};
doit();
-f.ensureIndex( { list: 1 } );
+f.ensureIndex({list: 1});
doit(); \ No newline at end of file
diff --git a/jstests/core/indexj.js b/jstests/core/indexj.js
index 7208abfdea6..01754466a3a 100644
--- a/jstests/core/indexj.js
+++ b/jstests/core/indexj.js
@@ -14,42 +14,42 @@ function keysExamined(query, hint, sort) {
return explain.executionStats.totalKeysExamined;
}
-t.ensureIndex( {a:1} );
-t.save( {a:5} );
-assert.eq( 0, keysExamined( { a: { $gt:4, $lt:5 } } ), "A" );
+t.ensureIndex({a: 1});
+t.save({a: 5});
+assert.eq(0, keysExamined({a: {$gt: 4, $lt: 5}}), "A");
t.drop();
-t.ensureIndex( {a:1} );
-t.save( {a:4} );
-assert.eq( 0, keysExamined( { a: { $gt:4, $lt:5 } } ), "B" );
+t.ensureIndex({a: 1});
+t.save({a: 4});
+assert.eq(0, keysExamined({a: {$gt: 4, $lt: 5}}), "B");
-t.save( {a:5} );
-assert.eq( 0, keysExamined( { a: { $gt:4, $lt:5 } } ), "D" );
+t.save({a: 5});
+assert.eq(0, keysExamined({a: {$gt: 4, $lt: 5}}), "D");
-t.save( {a:4} );
-assert.eq( 0, keysExamined( { a: { $gt:4, $lt:5 } } ), "C" );
+t.save({a: 4});
+assert.eq(0, keysExamined({a: {$gt: 4, $lt: 5}}), "C");
-t.save( {a:5} );
-assert.eq( 0, keysExamined( { a: { $gt:4, $lt:5 } } ), "D" );
+t.save({a: 5});
+assert.eq(0, keysExamined({a: {$gt: 4, $lt: 5}}), "D");
t.drop();
-t.ensureIndex( {a:1,b:1} );
-t.save( { a:1,b:1 } );
-t.save( { a:1,b:2 } );
-t.save( { a:2,b:1 } );
-t.save( { a:2,b:2 } );
-
-assert.eq( 3, keysExamined( { a:{$in:[1,2]}, b:{$gt:1,$lt:2} }, {a:1,b:1} ) );
-assert.eq( 3, keysExamined( { a:{$in:[1,2]}, b:{$gt:1,$lt:2} }, {a:1,b:1}, {a:-1,b:-1} ) );
-
-t.save( {a:1,b:1} );
-t.save( {a:1,b:1} );
-assert.eq( 3, keysExamined( { a:{$in:[1,2]}, b:{$gt:1,$lt:2} }, {a:1,b:1} ) );
-assert.eq( 3, keysExamined( { a:{$in:[1,2]}, b:{$gt:1,$lt:2} }, {a:1,b:1} ) );
-assert.eq( 3, keysExamined( { a:{$in:[1,2]}, b:{$gt:1,$lt:2} }, {a:1,b:1}, {a:-1,b:-1} ) );
-
-assert.eq( 2, keysExamined( { a:{$in:[1,1.9]}, b:{$gt:1,$lt:2} }, {a:1,b:1} ) );
-assert.eq( 2, keysExamined( { a:{$in:[1.1,2]}, b:{$gt:1,$lt:2} }, {a:1,b:1}, {a:-1,b:-1} ) );
-
-t.save( { a:1,b:1.5} );
-assert.eq( 4, keysExamined( { a:{$in:[1,2]}, b:{$gt:1,$lt:2} }, {a:1,b:1} ), "F" );
+t.ensureIndex({a: 1, b: 1});
+t.save({a: 1, b: 1});
+t.save({a: 1, b: 2});
+t.save({a: 2, b: 1});
+t.save({a: 2, b: 2});
+
+assert.eq(3, keysExamined({a: {$in: [1, 2]}, b: {$gt: 1, $lt: 2}}, {a: 1, b: 1}));
+assert.eq(3, keysExamined({a: {$in: [1, 2]}, b: {$gt: 1, $lt: 2}}, {a: 1, b: 1}, {a: -1, b: -1}));
+
+t.save({a: 1, b: 1});
+t.save({a: 1, b: 1});
+assert.eq(3, keysExamined({a: {$in: [1, 2]}, b: {$gt: 1, $lt: 2}}, {a: 1, b: 1}));
+assert.eq(3, keysExamined({a: {$in: [1, 2]}, b: {$gt: 1, $lt: 2}}, {a: 1, b: 1}));
+assert.eq(3, keysExamined({a: {$in: [1, 2]}, b: {$gt: 1, $lt: 2}}, {a: 1, b: 1}, {a: -1, b: -1}));
+
+assert.eq(2, keysExamined({a: {$in: [1, 1.9]}, b: {$gt: 1, $lt: 2}}, {a: 1, b: 1}));
+assert.eq(2, keysExamined({a: {$in: [1.1, 2]}, b: {$gt: 1, $lt: 2}}, {a: 1, b: 1}, {a: -1, b: -1}));
+
+t.save({a: 1, b: 1.5});
+assert.eq(4, keysExamined({a: {$in: [1, 2]}, b: {$gt: 1, $lt: 2}}, {a: 1, b: 1}), "F");
diff --git a/jstests/core/indexl.js b/jstests/core/indexl.js
index 3a5d0275887..c83638607d4 100644
--- a/jstests/core/indexl.js
+++ b/jstests/core/indexl.js
@@ -3,25 +3,25 @@
t = db.jstests_indexl;
function test(t) {
- t.save( {a:[1,2]} );
- assert.eq( 1, t.count( {a:{$all:[1],$in:[2]}} ) );
- assert.eq( 1, t.count( {a:{$all:[2],$in:[1]}} ) );
- assert.eq( 1, t.count( {a:{$in:[2],$all:[1]}} ) );
- assert.eq( 1, t.count( {a:{$in:[1],$all:[2]}} ) );
- assert.eq( 1, t.count( {a:{$all:[1],$in:[2]}} ) );
- t.save({a:[3,4]});
- t.save({a:[2,3]});
- t.save({a:[1,2,3,4]});
- assert.eq( 2, t.count( {a:{$in:[2],$all:[1]}} ) );
- assert.eq( 1, t.count( {a:{$in:[3],$all:[1,2]}} ) );
- assert.eq( 1, t.count( {a:{$in:[1],$all:[3]}} ) );
- assert.eq( 2, t.count( {a:{$in:[2,3],$all:[1]}} ) );
- assert.eq( 1, t.count( {a:{$in:[4],$all:[2,3]}} ) );
- assert.eq( 3, t.count( {a:{$in:[1,3],$all:[2]}} ) );
+ t.save({a: [1, 2]});
+ assert.eq(1, t.count({a: {$all: [1], $in: [2]}}));
+ assert.eq(1, t.count({a: {$all: [2], $in: [1]}}));
+ assert.eq(1, t.count({a: {$in: [2], $all: [1]}}));
+ assert.eq(1, t.count({a: {$in: [1], $all: [2]}}));
+ assert.eq(1, t.count({a: {$all: [1], $in: [2]}}));
+ t.save({a: [3, 4]});
+ t.save({a: [2, 3]});
+ t.save({a: [1, 2, 3, 4]});
+ assert.eq(2, t.count({a: {$in: [2], $all: [1]}}));
+ assert.eq(1, t.count({a: {$in: [3], $all: [1, 2]}}));
+ assert.eq(1, t.count({a: {$in: [1], $all: [3]}}));
+ assert.eq(2, t.count({a: {$in: [2, 3], $all: [1]}}));
+ assert.eq(1, t.count({a: {$in: [4], $all: [2, 3]}}));
+ assert.eq(3, t.count({a: {$in: [1, 3], $all: [2]}}));
}
t.drop();
test(t);
t.drop();
-t.ensureIndex( {a:1} );
+t.ensureIndex({a: 1});
test(t); \ No newline at end of file
diff --git a/jstests/core/indexm.js b/jstests/core/indexm.js
index 1c6f1c7baac..820bedd7baa 100644
--- a/jstests/core/indexm.js
+++ b/jstests/core/indexm.js
@@ -3,27 +3,22 @@
t = db.jstests_indexm;
t.drop();
-t.save( { a : [ { x : 1 } , { x : 2 } , { x : 3 } , { x : 4 } ] } );
+t.save({a: [{x: 1}, {x: 2}, {x: 3}, {x: 4}]});
-function test(){
- assert.eq( 1, t.count(
- {
- a : { x : 1 } ,
- "$or" : [ { a : { x : 2 } } , { a : { x : 3 } } ]
- }
- ) );
-}
+function test() {
+ assert.eq(1, t.count({a: {x: 1}, "$or": [{a: {x: 2}}, {a: {x: 3}}]}));
+}
// The first find will return a result since there isn't an index.
-test();
+test();
// Now create an index.
-t.ensureIndex({"a":1});
+t.ensureIndex({"a": 1});
test();
// Now create a different index.
t.dropIndexes();
-t.ensureIndex({"a.x":1});
+t.ensureIndex({"a.x": 1});
test();
// Drop the indexes.
diff --git a/jstests/core/indexn.js b/jstests/core/indexn.js
index 66a45d88836..416f0ec31b2 100644
--- a/jstests/core/indexn.js
+++ b/jstests/core/indexn.js
@@ -4,26 +4,26 @@
t = db.jstests_indexn;
t.drop();
-t.save( {a:1,b:[1,2]} );
+t.save({a: 1, b: [1, 2]});
-t.ensureIndex( {a:1} );
-t.ensureIndex( {b:1} );
+t.ensureIndex({a: 1});
+t.ensureIndex({b: 1});
// {a:1} is a single key index, so no matches are possible for this query
-assert.eq( 0, t.count( {a:{$gt:5,$lt:0}} ) );
+assert.eq(0, t.count({a: {$gt: 5, $lt: 0}}));
-assert.eq( 0, t.count( {a:{$gt:5,$lt:0},b:2} ) );
+assert.eq(0, t.count({a: {$gt: 5, $lt: 0}, b: 2}));
-assert.eq( 0, t.count( {a:{$gt:5,$lt:0},b:{$gt:0,$lt:5}} ) );
+assert.eq(0, t.count({a: {$gt: 5, $lt: 0}, b: {$gt: 0, $lt: 5}}));
// One clause of an $or is an "impossible match"
-printjson( t.find( {$or:[{a:{$gt:5,$lt:0}},{a:1}]} ).explain() );
-assert.eq( 1, t.count( {$or:[{a:{$gt:5,$lt:0}},{a:1}]} ) );
+printjson(t.find({$or: [{a: {$gt: 5, $lt: 0}}, {a: 1}]}).explain());
+assert.eq(1, t.count({$or: [{a: {$gt: 5, $lt: 0}}, {a: 1}]}));
// One clause of an $or is an "impossible match"; original order of the $or
// does not matter.
-printjson( t.find( {$or:[{a:1},{a:{$gt:5,$lt:0}}]} ).explain() );
-assert.eq( 1, t.count( {$or:[{a:1},{a:{$gt:5,$lt:0}}]} ) );
+printjson(t.find({$or: [{a: 1}, {a: {$gt: 5, $lt: 0}}]}).explain());
+assert.eq(1, t.count({$or: [{a: 1}, {a: {$gt: 5, $lt: 0}}]}));
-t.save( {a:2} );
-assert.eq( 2, t.count( {$or:[{a:1},{a:{$gt:5,$lt:0}},{a:2}]} ) );
+t.save({a: 2});
+assert.eq(2, t.count({$or: [{a: 1}, {a: {$gt: 5, $lt: 0}}, {a: 2}]}));
diff --git a/jstests/core/indexp.js b/jstests/core/indexp.js
index c2a6866fc12..0111f0cca35 100644
--- a/jstests/core/indexp.js
+++ b/jstests/core/indexp.js
@@ -6,18 +6,18 @@
var coll = db.jstests_indexp;
// Empty field checks.
-assert.commandFailed(coll.ensureIndex({ 'a..b': 1 }));
-assert.commandFailed(coll.ensureIndex({ '.a': 1 }));
-assert.commandFailed(coll.ensureIndex({ 'a.': 1 }));
-assert.commandFailed(coll.ensureIndex({ '.': 1 }));
-assert.commandFailed(coll.ensureIndex({ '': 1 }));
-assert.commandWorked(coll.ensureIndex({ 'a.b': 1 }));
+assert.commandFailed(coll.ensureIndex({'a..b': 1}));
+assert.commandFailed(coll.ensureIndex({'.a': 1}));
+assert.commandFailed(coll.ensureIndex({'a.': 1}));
+assert.commandFailed(coll.ensureIndex({'.': 1}));
+assert.commandFailed(coll.ensureIndex({'': 1}));
+assert.commandWorked(coll.ensureIndex({'a.b': 1}));
// '$'-prefixed field checks.
-assert.commandFailed(coll.ensureIndex({ '$a': 1 }));
-assert.commandFailed(coll.ensureIndex({ 'a.$b': 1 }));
-assert.commandFailed(coll.ensureIndex({ '$db': 1 }));
-assert.commandWorked(coll.ensureIndex({ 'a$ap': 1 })); // $ in middle is ok
-assert.commandWorked(coll.ensureIndex({ 'a.$id': 1 })); // $id/$db/$ref are execptions
+assert.commandFailed(coll.ensureIndex({'$a': 1}));
+assert.commandFailed(coll.ensureIndex({'a.$b': 1}));
+assert.commandFailed(coll.ensureIndex({'$db': 1}));
+assert.commandWorked(coll.ensureIndex({'a$ap': 1})); // $ in middle is ok
+assert.commandWorked(coll.ensureIndex({'a.$id': 1})); // $id/$db/$ref are execptions
coll.dropIndexes();
diff --git a/jstests/core/indexr.js b/jstests/core/indexr.js
index 1f7b75bbcf7..d242ad87316 100644
--- a/jstests/core/indexr.js
+++ b/jstests/core/indexr.js
@@ -4,32 +4,32 @@ t = db.jstests_indexr;
t.drop();
// Check without indexes.
-t.save( { a: [ { b: 3, c: 6 }, { b: 1, c: 1 } ] } );
-assert.eq( 1, t.count( { 'a.b':{ $gt:2 }, 'a.c': { $lt:4 } } ) );
-assert.eq( 1, t.count( { a:{ b:3, c:6 }, 'a.c': { $lt:4 } } ) );
+t.save({a: [{b: 3, c: 6}, {b: 1, c: 1}]});
+assert.eq(1, t.count({'a.b': {$gt: 2}, 'a.c': {$lt: 4}}));
+assert.eq(1, t.count({a: {b: 3, c: 6}, 'a.c': {$lt: 4}}));
// Check with single key indexes.
t.remove({});
-t.ensureIndex( {'a.b':1,'a.c':1} );
-t.ensureIndex( {a:1,'a.c':1} );
-assert.eq( 0, t.count( { 'a.b':{ $gt:2 }, 'a.c': { $lt:4 } } ) );
-assert.eq( 0, t.count( { a:{ b:3, c:6 }, 'a.c': { $lt:4 } } ) );
+t.ensureIndex({'a.b': 1, 'a.c': 1});
+t.ensureIndex({a: 1, 'a.c': 1});
+assert.eq(0, t.count({'a.b': {$gt: 2}, 'a.c': {$lt: 4}}));
+assert.eq(0, t.count({a: {b: 3, c: 6}, 'a.c': {$lt: 4}}));
-t.save( { a: { b: 3, c: 3 } } );
-assert.eq( 1, t.count( { 'a.b':{ $gt:2 }, 'a.c': { $lt:4 } } ) );
-assert.eq( 1, t.count( { a:{ b:3, c:3 }, 'a.c': { $lt:4 } } ) );
+t.save({a: {b: 3, c: 3}});
+assert.eq(1, t.count({'a.b': {$gt: 2}, 'a.c': {$lt: 4}}));
+assert.eq(1, t.count({a: {b: 3, c: 3}, 'a.c': {$lt: 4}}));
// Check with multikey indexes.
t.remove({});
-t.save( { a: [ { b: 3, c: 6 }, { b: 1, c: 1 } ] } );
+t.save({a: [{b: 3, c: 6}, {b: 1, c: 1}]});
-assert.eq( 1, t.count( { 'a.b':{ $gt:2 }, 'a.c': { $lt:4 } } ) );
-assert.eq( 1, t.count( { a:{ b:3, c:6 }, 'a.c': { $lt:4 } } ) );
+assert.eq(1, t.count({'a.b': {$gt: 2}, 'a.c': {$lt: 4}}));
+assert.eq(1, t.count({a: {b: 3, c: 6}, 'a.c': {$lt: 4}}));
// Check reverse direction.
-assert.eq( 1, t.find( { 'a.b':{ $gt:2 }, 'a.c': { $lt:4 } } ).sort( {'a.b':-1} ).itcount() );
-assert.eq( 1, t.find( { a:{ b:3, c:6 }, 'a.c': { $lt:4 } } ).sort( {a:-1} ).itcount() );
+assert.eq(1, t.find({'a.b': {$gt: 2}, 'a.c': {$lt: 4}}).sort({'a.b': -1}).itcount());
+assert.eq(1, t.find({a: {b: 3, c: 6}, 'a.c': {$lt: 4}}).sort({a: -1}).itcount());
// Check second field is constrained if first is not.
-assert.eq( 1, t.find( { 'a.c': { $lt:4 } } ).hint( {'a.b':1,'a.c':1} ).itcount() );
-assert.eq( 1, t.find( { 'a.c': { $lt:4 } } ).hint( {a:1,'a.c':1} ).itcount() );
+assert.eq(1, t.find({'a.c': {$lt: 4}}).hint({'a.b': 1, 'a.c': 1}).itcount());
+assert.eq(1, t.find({'a.c': {$lt: 4}}).hint({a: 1, 'a.c': 1}).itcount());
diff --git a/jstests/core/indexs.js b/jstests/core/indexs.js
index 0b7bfe412c4..2fc9724a590 100644
--- a/jstests/core/indexs.js
+++ b/jstests/core/indexs.js
@@ -1,18 +1,19 @@
-// Test index key generation issue with parent and nested fields in same index and array containing subobject SERVER-3005.
+// Test index key generation issue with parent and nested fields in same index and array containing
+// subobject SERVER-3005.
t = db.jstests_indexs;
t.drop();
-t.ensureIndex( {a:1} );
-t.save( { a: [ { b: 3 } ] } );
-assert.eq( 1, t.count( { a:{ b:3 } } ) );
+t.ensureIndex({a: 1});
+t.save({a: [{b: 3}]});
+assert.eq(1, t.count({a: {b: 3}}));
t.drop();
-t.ensureIndex( {a:1,'a.b':1} );
-t.save( { a: { b: 3 } } );
-assert.eq( 1, t.count( { a:{ b:3 } } ) );
+t.ensureIndex({a: 1, 'a.b': 1});
+t.save({a: {b: 3}});
+assert.eq(1, t.count({a: {b: 3}}));
t.drop();
-t.ensureIndex( {a:1,'a.b':1} );
-t.save( { a: [ { b: 3 } ] } );
-assert.eq( 1, t.count( { a:{ b:3 } } ) );
+t.ensureIndex({a: 1, 'a.b': 1});
+t.save({a: [{b: 3}]});
+assert.eq(1, t.count({a: {b: 3}}));
diff --git a/jstests/core/indext.js b/jstests/core/indext.js
index 134e81acdeb..163a3e4a8a3 100644
--- a/jstests/core/indext.js
+++ b/jstests/core/indext.js
@@ -3,16 +3,16 @@
t = db.jstests_indext;
t.drop();
-t.ensureIndex( {'a.b':1}, {sparse:true} );
-t.save( {a:[]} );
-t.save( {a:1} );
-assert.eq( 0, t.find().hint( {'a.b':1} ).itcount() );
+t.ensureIndex({'a.b': 1}, {sparse: true});
+t.save({a: []});
+t.save({a: 1});
+assert.eq(0, t.find().hint({'a.b': 1}).itcount());
-t.ensureIndex( {'a.b':1,'a.c':1}, {sparse:true} );
-t.save( {a:[]} );
-t.save( {a:1} );
-assert.eq( 0, t.find().hint( {'a.b':1,'a.c':1} ).itcount() );
+t.ensureIndex({'a.b': 1, 'a.c': 1}, {sparse: true});
+t.save({a: []});
+t.save({a: 1});
+assert.eq(0, t.find().hint({'a.b': 1, 'a.c': 1}).itcount());
-t.save( {a:[{b:1}]} );
-t.save( {a:1} );
-assert.eq( 1, t.find().hint( {'a.b':1,'a.c':1} ).itcount() );
+t.save({a: [{b: 1}]});
+t.save({a: 1});
+assert.eq(1, t.find().hint({'a.b': 1, 'a.c': 1}).itcount());
diff --git a/jstests/core/indexu.js b/jstests/core/indexu.js
index d1ef13d0468..923356bf79b 100644
--- a/jstests/core/indexu.js
+++ b/jstests/core/indexu.js
@@ -4,105 +4,112 @@
t = db.jstests_indexu;
t.drop();
-var dupDoc = {a:[{'0':1}]}; // There are two 'a.0' fields in this doc.
-var dupDoc2 = {a:[{'1':1},'c']};
-var noDupDoc = {a:[{'1':1}]};
+var dupDoc = {
+ a: [{'0': 1}]
+}; // There are two 'a.0' fields in this doc.
+var dupDoc2 = {
+ a: [{'1': 1}, 'c']
+};
+var noDupDoc = {
+ a: [{'1': 1}]
+};
// Test that we can't index dupDoc.
-assert.writeOK( t.save( dupDoc ));
-assert.commandFailed(t.ensureIndex( {'a.0':1} ));
+assert.writeOK(t.save(dupDoc));
+assert.commandFailed(t.ensureIndex({'a.0': 1}));
t.remove({});
-assert.commandWorked(t.ensureIndex( {'a.0':1} ));
-assert.writeError( t.save( dupDoc ));
+assert.commandWorked(t.ensureIndex({'a.0': 1}));
+assert.writeError(t.save(dupDoc));
// Test that we can't index dupDoc2.
t.drop();
-assert.writeOK(t.save( dupDoc2 ));
-assert.commandFailed(t.ensureIndex( {'a.1':1} ));
+assert.writeOK(t.save(dupDoc2));
+assert.commandFailed(t.ensureIndex({'a.1': 1}));
t.remove({});
-assert.commandWorked(t.ensureIndex( {'a.1':1} ));
-assert.writeError(t.save( dupDoc2 ));
+assert.commandWorked(t.ensureIndex({'a.1': 1}));
+assert.writeError(t.save(dupDoc2));
// Test that we can index dupDoc with a different index.
t.drop();
-t.ensureIndex( {'a.b':1} );
-assert.writeOK(t.save( dupDoc ));
+t.ensureIndex({'a.b': 1});
+assert.writeOK(t.save(dupDoc));
// Test number field starting with hyphen.
t.drop();
-t.ensureIndex( {'a.-1':1} );
-assert.writeOK(t.save( {a:[{'-1':1}]} ));
+t.ensureIndex({'a.-1': 1});
+assert.writeOK(t.save({a: [{'-1': 1}]}));
// Test number field starting with zero.
t.drop();
-t.ensureIndex( {'a.00':1} );
-assert.writeOK( t.save( {a:[{'00':1}]} ));
+t.ensureIndex({'a.00': 1});
+assert.writeOK(t.save({a: [{'00': 1}]}));
// Test multiple array indexes
t.drop();
-t.ensureIndex( {'a.0':1,'a.1':1} );
-assert.writeOK( t.save( {a:[{'1':1}]} ));
-assert.writeError( t.save( {a:[{'1':1},4]} ));
+t.ensureIndex({'a.0': 1, 'a.1': 1});
+assert.writeOK(t.save({a: [{'1': 1}]}));
+assert.writeError(t.save({a: [{'1': 1}, 4]}));
// Test that we can index noDupDoc.
t.drop();
-t.save( noDupDoc );
-assert.commandWorked(t.ensureIndex( {'a.0':1} ));
-assert.commandWorked(t.ensureIndex( {'a.1':1} ));
+t.save(noDupDoc);
+assert.commandWorked(t.ensureIndex({'a.0': 1}));
+assert.commandWorked(t.ensureIndex({'a.1': 1}));
t.drop();
-t.ensureIndex( {'a.0':1} );
-t.ensureIndex( {'a.1':1} );
-assert.writeOK(t.save( noDupDoc ));
+t.ensureIndex({'a.0': 1});
+t.ensureIndex({'a.1': 1});
+assert.writeOK(t.save(noDupDoc));
// Test that we can query noDupDoc.
-assert.eq( 1, t.find( {'a.1':1} ).hint( {'a.1':1} ).itcount() );
-assert.eq( 1, t.find( {'a.1':1} ).hint( {$natural:1} ).itcount() );
-assert.eq( 1, t.find( {'a.0':{'1':1}} ).hint( {'a.0':1} ).itcount() );
-assert.eq( 1, t.find( {'a.0':{'1':1}} ).hint( {$natural:1} ).itcount() );
+assert.eq(1, t.find({'a.1': 1}).hint({'a.1': 1}).itcount());
+assert.eq(1, t.find({'a.1': 1}).hint({$natural: 1}).itcount());
+assert.eq(1, t.find({'a.0': {'1': 1}}).hint({'a.0': 1}).itcount());
+assert.eq(1, t.find({'a.0': {'1': 1}}).hint({$natural: 1}).itcount());
// Check multiple nested array fields.
t.drop();
-t.save( {a:[[1]]} );
-assert.commandWorked(t.ensureIndex( {'a.0.0':1} ));
-assert.eq( 1, t.find( {'a.0.0':1} ).hint( {$natural:1} ).itcount() );
-assert.eq( 1, t.find( {'a.0.0':1} ).hint( {'a.0.0':1} ).itcount() );
+t.save({a: [[1]]});
+assert.commandWorked(t.ensureIndex({'a.0.0': 1}));
+assert.eq(1, t.find({'a.0.0': 1}).hint({$natural: 1}).itcount());
+assert.eq(1, t.find({'a.0.0': 1}).hint({'a.0.0': 1}).itcount());
-// Check where there is a duplicate for a partially addressed field but not for a fully addressed field.
+// Check where there is a duplicate for a partially addressed field but not for a fully addressed
+// field.
t.drop();
-t.save( {a:[[1],{'0':1}]} );
-assert.commandFailed(t.ensureIndex( {'a.0.0':1} ));
+t.save({a: [[1], {'0': 1}]});
+assert.commandFailed(t.ensureIndex({'a.0.0': 1}));
// Check where there is a duplicate for a fully addressed field.
t.drop();
-assert.writeOK( t.save( {a:[[1],{'0':[1]}]} ));
-assert.commandFailed(t.ensureIndex( {'a.0.0':1} ));
+assert.writeOK(t.save({a: [[1], {'0': [1]}]}));
+assert.commandFailed(t.ensureIndex({'a.0.0': 1}));
// Two ways of addressing parse to an array.
t.drop();
-t.save( {a:[{'0':1}]} );
-assert.commandFailed(t.ensureIndex( {'a.0.0':1} ));
+t.save({a: [{'0': 1}]});
+assert.commandFailed(t.ensureIndex({'a.0.0': 1}));
// Test several key depths - with same arrays being found.
t.drop();
-t.save( {a:[{'0':[{'0':1}]}]} );
-assert.commandFailed(t.ensureIndex( {'a.0.0.0.0.0.0':1} ));
-assert.commandFailed(t.ensureIndex( {'a.0.0.0.0.0':1} ));
-assert.commandFailed(t.ensureIndex( {'a.0.0.0.0':1} ));
-assert.commandFailed(t.ensureIndex( {'a.0.0.0':1} ));
-assert.commandFailed(t.ensureIndex( {'a.0.0':1} ));
-assert.commandFailed(t.ensureIndex( {'a.0':1} ));
-assert.commandWorked(t.ensureIndex( {'a':1} ));
+t.save({a: [{'0': [{'0': 1}]}]});
+assert.commandFailed(t.ensureIndex({'a.0.0.0.0.0.0': 1}));
+assert.commandFailed(t.ensureIndex({'a.0.0.0.0.0': 1}));
+assert.commandFailed(t.ensureIndex({'a.0.0.0.0': 1}));
+assert.commandFailed(t.ensureIndex({'a.0.0.0': 1}));
+assert.commandFailed(t.ensureIndex({'a.0.0': 1}));
+assert.commandFailed(t.ensureIndex({'a.0': 1}));
+assert.commandWorked(t.ensureIndex({'a': 1}));
// Two prefixes extract docs, but one terminates extraction before array.
t.drop();
-t.save( {a:[{'0':{'c':[]}}]} );
-assert.commandFailed(t.ensureIndex( {'a.0.c':1} ));
+t.save({a: [{'0': {'c': []}}]});
+assert.commandFailed(t.ensureIndex({'a.0.c': 1}));
t.drop();
-t.save( {a:[[{'b':1}]]} );
-assert.eq( 1, t.find( {'a.0.b':1} ).itcount() );
-t.ensureIndex( {'a.0.b':1} );
-assert.eq( 1, t.find( {'a.0.b':1} ).itcount() );
+t.save({a: [[{'b': 1}]]});
+assert.eq(1, t.find({'a.0.b': 1}).itcount());
+t.ensureIndex({'a.0.b': 1});
+assert.eq(1, t.find({'a.0.b': 1}).itcount());
diff --git a/jstests/core/indexv.js b/jstests/core/indexv.js
index 95074da7333..638de158aa4 100644
--- a/jstests/core/indexv.js
+++ b/jstests/core/indexv.js
@@ -3,16 +3,16 @@
t = db.jstests_indexv;
t.drop();
-t.ensureIndex( {'a.b':1} );
+t.ensureIndex({'a.b': 1});
-t.save( {a:[{},{b:1}]} );
-var e = t.find( {'a.b':null} ).explain("executionStats");
-assert.eq( 1, e.executionStats.nReturned );
-assert.eq( 1, e.executionStats.totalKeysExamined );
+t.save({a: [{}, {b: 1}]});
+var e = t.find({'a.b': null}).explain("executionStats");
+assert.eq(1, e.executionStats.nReturned);
+assert.eq(1, e.executionStats.totalKeysExamined);
t.drop();
-t.ensureIndex( {'a.b.c':1} );
-t.save( {a:[{b:[]},{b:{c:1}}]} );
-var e = t.find( {'a.b.c':null} ).explain("executionStats");
-assert.eq( 0, e.executionStats.nReturned );
-assert.eq( 1, e.executionStats.totalKeysExamined );
+t.ensureIndex({'a.b.c': 1});
+t.save({a: [{b: []}, {b: {c: 1}}]});
+var e = t.find({'a.b.c': null}).explain("executionStats");
+assert.eq(0, e.executionStats.nReturned);
+assert.eq(1, e.executionStats.totalKeysExamined);
diff --git a/jstests/core/insert1.js b/jstests/core/insert1.js
index ff53e2b2bd7..0f4f6977a1a 100644
--- a/jstests/core/insert1.js
+++ b/jstests/core/insert1.js
@@ -1,14 +1,19 @@
t = db.insert1;
t.drop();
-var o = {a:1};
+var o = {
+ a: 1
+};
t.insert(o);
var doc = t.findOne();
assert.eq(1, doc.a);
assert(doc._id != null, tojson(doc));
t.drop();
-o = {a:2, _id:new ObjectId()};
+o = {
+ a: 2,
+ _id: new ObjectId()
+};
var id = o._id;
t.insert(o);
doc = t.findOne();
@@ -16,7 +21,10 @@ assert.eq(2, doc.a);
assert.eq(id, doc._id);
t.drop();
-o = {a:3, _id:"asdf"};
+o = {
+ a: 3,
+ _id: "asdf"
+};
id = o._id;
t.insert(o);
doc = t.findOne();
@@ -24,7 +32,10 @@ assert.eq(3, doc.a);
assert.eq(id, doc._id);
t.drop();
-o = {a:4, _id:null};
+o = {
+ a: 4,
+ _id: null
+};
t.insert(o);
doc = t.findOne();
assert.eq(4, doc.a);
@@ -33,8 +44,10 @@ assert.eq(null, doc._id, tojson(doc));
t.drop();
var toInsert = [];
var count = 100 * 1000;
-for (i = 0; i < count; ++i) { toInsert.push({_id: i, a: 5}); }
+for (i = 0; i < count; ++i) {
+ toInsert.push({_id: i, a: 5});
+}
assert.writeOK(t.insert(toInsert));
-doc = t.findOne({_id:1});
+doc = t.findOne({_id: 1});
assert.eq(5, doc.a);
assert.eq(count, t.count(), "bad count");
diff --git a/jstests/core/insert2.js b/jstests/core/insert2.js
index 4d5de35bb36..f01fd153d0c 100644
--- a/jstests/core/insert2.js
+++ b/jstests/core/insert2.js
@@ -7,8 +7,8 @@ conn.forceWriteMode(db.getMongo().writeMode());
t = conn.getDB(db.getName()).insert2;
t.drop();
-assert.isnull( t.findOne() , "A" );
-assert.writeError(t.insert( { z : 1 , $inc : { x : 1 } } , 0, true ));
-assert.isnull( t.findOne() , "B" );
+assert.isnull(t.findOne(), "A");
+assert.writeError(t.insert({z: 1, $inc: {x: 1}}, 0, true));
+assert.isnull(t.findOne(), "B");
// Collection should not exist
-assert.commandFailed( t.stats() );
+assert.commandFailed(t.stats());
diff --git a/jstests/core/insert_id_undefined.js b/jstests/core/insert_id_undefined.js
index d6b9008fbdf..874d6c0228c 100644
--- a/jstests/core/insert_id_undefined.js
+++ b/jstests/core/insert_id_undefined.js
@@ -1,7 +1,7 @@
// ensure a document with _id undefined cannot be saved
t = db.insert_id_undefined;
t.drop();
-t.insert({_id:undefined});
+t.insert({_id: undefined});
assert.eq(t.count(), 0);
// Make sure the collection was not created
-assert.commandFailed( t.stats() );
+assert.commandFailed(t.stats());
diff --git a/jstests/core/insert_illegal_doc.js b/jstests/core/insert_illegal_doc.js
index 0d92653d3a0..97edeba4f17 100644
--- a/jstests/core/insert_illegal_doc.js
+++ b/jstests/core/insert_illegal_doc.js
@@ -20,7 +20,7 @@ assert.eq(0, coll.find().itcount(), "should not be a doc");
// test update
res = coll.insert({_id: 1});
assert.writeOK(res, "insert failed");
-res = coll.update({_id: 1}, {$set : { a : [1, 2, 3], b: [4, 5, 6]}});
+res = coll.update({_id: 1}, {$set: {a: [1, 2, 3], b: [4, 5, 6]}});
assert.writeError(res);
assert.eq(res.getWriteError().code, 10088);
assert.eq(undefined, coll.findOne().a, "update should have failed");
diff --git a/jstests/core/insert_long_index_key.js b/jstests/core/insert_long_index_key.js
index 6379c36fb4a..934b51b0369 100644
--- a/jstests/core/insert_long_index_key.js
+++ b/jstests/core/insert_long_index_key.js
@@ -2,9 +2,9 @@ t = db.insert_long_index_key;
t.drop();
var s = new Array(2000).toString();
-t.ensureIndex( { x : 1 } );
+t.ensureIndex({x: 1});
-t.insert({ x: 1 });
-t.insert({ x: s });
+t.insert({x: 1});
+t.insert({x: s});
-assert.eq( 1, t.count() );
+assert.eq(1, t.count());
diff --git a/jstests/core/invalid_db_name.js b/jstests/core/invalid_db_name.js
index 58cabab7c00..4b9fb7ba895 100644
--- a/jstests/core/invalid_db_name.js
+++ b/jstests/core/invalid_db_name.js
@@ -5,20 +5,21 @@
// This is a hack to bypass invalid database name checking by the DB constructor
invalidDB._name = "Invalid DB Name";
- var doWrite = function() { return invalidDB.coll.insert({x: 1}); };
+ var doWrite = function() {
+ return invalidDB.coll.insert({x: 1});
+ };
// This will return a $err style error message if we use OP_INSERT, but a
// {ok: 0, errmsg: "...", code: ...} style response if we use write commands.
if (db.getMongo().writeMode() == "compatibility") {
assert.throws(doWrite);
- }
- else {
+ } else {
assert.writeError(doWrite());
}
// Ensure that no database was created
- var dbList = db.getSiblingDB('admin').runCommand({ listDatabases : 1 }).databases;
- dbList.forEach(function (dbInfo) {
+ var dbList = db.getSiblingDB('admin').runCommand({listDatabases: 1}).databases;
+ dbList.forEach(function(dbInfo) {
assert.neq('Invalid DB Name', dbInfo.name, 'database with invalid name was created');
});
}());
diff --git a/jstests/core/ismaster.js b/jstests/core/ismaster.js
index cae8c848044..d920e20383b 100644
--- a/jstests/core/ismaster.js
+++ b/jstests/core/ismaster.js
@@ -1,23 +1,32 @@
var res = db.isMaster();
// check that the fields that should be there are there and have proper values
-assert( res.maxBsonObjectSize &&
- isNumber(res.maxBsonObjectSize) &&
- res.maxBsonObjectSize > 0, "maxBsonObjectSize possibly missing:" + tojson(res));
-assert( res.maxMessageSizeBytes &&
- isNumber(res.maxMessageSizeBytes) &&
- res.maxBsonObjectSize > 0, "maxMessageSizeBytes possibly missing:" + tojson(res));
-assert( res.maxWriteBatchSize &&
- isNumber(res.maxWriteBatchSize) &&
- res.maxWriteBatchSize > 0, "maxWriteBatchSize possibly missing:" + tojson(res));
+assert(res.maxBsonObjectSize && isNumber(res.maxBsonObjectSize) && res.maxBsonObjectSize > 0,
+ "maxBsonObjectSize possibly missing:" + tojson(res));
+assert(res.maxMessageSizeBytes && isNumber(res.maxMessageSizeBytes) && res.maxBsonObjectSize > 0,
+ "maxMessageSizeBytes possibly missing:" + tojson(res));
+assert(res.maxWriteBatchSize && isNumber(res.maxWriteBatchSize) && res.maxWriteBatchSize > 0,
+ "maxWriteBatchSize possibly missing:" + tojson(res));
assert(res.ismaster, "ismaster missing or false:" + tojson(res));
assert(res.localTime, "localTime possibly missing:" + tojson(res));
-
if (!testingReplication) {
var badFields = [];
- var unwantedReplSetFields = ["setName", "setVersion", "secondary", "hosts", "passives",
- "arbiters", "primary", "aribterOnly", "passive",
- "slaveDelay", "hidden", "tags", "buildIndexes", "me"];
+ var unwantedReplSetFields = [
+ "setName",
+ "setVersion",
+ "secondary",
+ "hosts",
+ "passives",
+ "arbiters",
+ "primary",
+ "aribterOnly",
+ "passive",
+ "slaveDelay",
+ "hidden",
+ "tags",
+ "buildIndexes",
+ "me"
+ ];
// check that the fields that shouldn't be there are not there
for (field in res) {
if (!res.hasOwnProperty(field)) {
@@ -27,6 +36,6 @@ if (!testingReplication) {
badFields.push(field);
}
}
- assert(badFields.length === 0, "\nthe result:\n" + tojson(res)
- + "\ncontained fields it shouldn't have: " + badFields);
+ assert(badFields.length === 0,
+ "\nthe result:\n" + tojson(res) + "\ncontained fields it shouldn't have: " + badFields);
}
diff --git a/jstests/core/js1.js b/jstests/core/js1.js
index 240d9f82fbb..89910f4bd23 100644
--- a/jstests/core/js1.js
+++ b/jstests/core/js1.js
@@ -1,12 +1,22 @@
t = db.jstests_js1;
-t.remove( {} );
+t.remove({});
-t.save( { z : 1 } );
-t.save( { z : 2 } );
-assert( 2 == t.find().length() );
-assert( 2 == t.find( { $where : function(){ return 1; } } ).length() );
-assert( 1 == t.find( { $where : function(){ return obj.z == 2; } } ).length() );
+t.save({z: 1});
+t.save({z: 2});
+assert(2 == t.find().length());
+assert(2 ==
+ t.find({
+ $where: function() {
+ return 1;
+ }
+ }).length());
+assert(1 ==
+ t.find({
+ $where: function() {
+ return obj.z == 2;
+ }
+ }).length());
assert(t.validate().valid);
diff --git a/jstests/core/js2.js b/jstests/core/js2.js
index 54c919ac1ba..9dfb5c0b091 100644
--- a/jstests/core/js2.js
+++ b/jstests/core/js2.js
@@ -1,23 +1,23 @@
t = db.jstests_js2;
-t.remove( {} );
+t.remove({});
t2 = db.jstests_js2_2;
-t2.remove( {} );
+t2.remove({});
-assert.eq( 0 , t2.find().length() , "A" );
+assert.eq(0, t2.find().length(), "A");
-t.save( { z : 1 } );
-t.save( { z : 2 } );
-assert.throws( function(){
- t.find( { $where :
- function(){
- db.jstests_js2_2.save( { y : 1 } );
- return 1;
- }
- } ).forEach( printjson );
-} , null , "can't save from $where" );
+t.save({z: 1});
+t.save({z: 2});
+assert.throws(function() {
+ t.find({
+ $where: function() {
+ db.jstests_js2_2.save({y: 1});
+ return 1;
+ }
+ }).forEach(printjson);
+}, null, "can't save from $where");
-assert.eq( 0 , t2.find().length() , "B" );
+assert.eq(0, t2.find().length(), "B");
-assert(t.validate().valid , "E");
+assert(t.validate().valid, "E");
diff --git a/jstests/core/js3.js b/jstests/core/js3.js
index 97ed0bfb834..36d16051135 100644
--- a/jstests/core/js3.js
+++ b/jstests/core/js3.js
@@ -1,76 +1,88 @@
t = db.jstests_js3;
-debug = function( s ){
- //printjson( s );
+debug = function(s) {
+ // printjson( s );
};
-for( z = 0; z < 2; z++ ) {
+for (z = 0; z < 2; z++) {
debug(z);
-
+
t.drop();
-
- if( z > 0 ) {
- t.ensureIndex({_id:1});
- t.ensureIndex({i:1});
+
+ if (z > 0) {
+ t.ensureIndex({_id: 1});
+ t.ensureIndex({i: 1});
}
-
- for( i = 0; i < 1000; i++ )
- t.save( { i:i, z: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" } );
-
- assert( 33 == db.dbEval(function() { return 33; } ) );
-
- db.dbEval( function() { db.jstests_js3.save({i:-1, z:"server side"}); } );
-
- assert( t.findOne({i:-1}) );
-
- assert( 2 == t.find( { $where :
- function(){
- return obj.i == 7 || obj.i == 8;
- }
- } ).length() );
-
-
+
+ for (i = 0; i < 1000; i++)
+ t.save({
+ i: i,
+ z:
+ "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
+ });
+
+ assert(33 ==
+ db.dbEval(function() {
+ return 33;
+ }));
+
+ db.dbEval(function() {
+ db.jstests_js3.save({i: -1, z: "server side"});
+ });
+
+ assert(t.findOne({i: -1}));
+
+ assert(2 ==
+ t.find({
+ $where: function() {
+ return obj.i == 7 || obj.i == 8;
+ }
+ }).length());
+
// NPE test
var ok = false;
try {
- var x = t.find( { $where :
- function(){
- asdf.asdf.f.s.s();
- }
- } );
- debug( x.length() );
- debug( tojson( x ) );
- }
- catch(e) {
- ok = true;
+ var x = t.find({
+ $where: function() {
+ asdf.asdf.f.s.s();
+ }
+ });
+ debug(x.length());
+ debug(tojson(x));
+ } catch (e) {
+ ok = true;
}
- debug( ok );
+ debug(ok);
assert(ok);
-
- t.ensureIndex({z:1});
- t.ensureIndex({q:1});
-
- debug( "before indexed find" );
-
- arr = t.find( { $where :
- function(){
- return obj.i == 7 || obj.i == 8;
- }
- } ).toArray();
- debug( arr );
- assert.eq( 2, arr.length );
-
- debug( "after indexed find" );
-
- for( i = 1000; i < 2000; i++ )
- t.save( { i:i, z: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" } );
-
- assert( t.find().count() == 2001 );
-
- assert( t.validate().valid );
-
- debug( "done iter" );
+
+ t.ensureIndex({z: 1});
+ t.ensureIndex({q: 1});
+
+ debug("before indexed find");
+
+ arr = t.find({
+ $where: function() {
+ return obj.i == 7 || obj.i == 8;
+ }
+ }).toArray();
+ debug(arr);
+ assert.eq(2, arr.length);
+
+ debug("after indexed find");
+
+ for (i = 1000; i < 2000; i++)
+ t.save({
+ i: i,
+ z:
+ "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
+ });
+
+ assert(t.find().count() == 2001);
+
+ assert(t.validate().valid);
+
+ debug("done iter");
}
t.drop(); \ No newline at end of file
diff --git a/jstests/core/js4.js b/jstests/core/js4.js
index 3afa4720dd6..51a85182866 100644
--- a/jstests/core/js4.js
+++ b/jstests/core/js4.js
@@ -1,49 +1,50 @@
t = db.jstests_js4;
t.drop();
-real = { a : 1 ,
- b : "abc" ,
- c : /abc/i ,
- d : new Date(111911100111) ,
- e : null ,
- f : true
- };
-
-t.save( real );
-
-assert.eq( "/abc/i" , real.c.toString() , "regex 1" );
-
-var cursor = t.find( { $where :
- function(){
- fullObject;
- assert.eq( 7 , Object.keySet( obj ).length , "A" );
- assert.eq( 1 , obj.a , "B" );
- assert.eq( "abc" , obj.b , "C" );
- assert.eq( "/abc/i" , obj.c.toString() , "D" );
- assert.eq( 111911100111 , obj.d.getTime() , "E" );
- assert( obj.f , "F" );
- assert( ! obj.e , "G" );
-
- return true;
- }
- } );
-assert.eq( 1 , cursor.toArray().length );
-assert.eq( "abc" , cursor[0].b );
+real = {
+ a: 1,
+ b: "abc",
+ c: /abc/i,
+ d: new Date(111911100111),
+ e: null,
+ f: true
+};
+
+t.save(real);
+
+assert.eq("/abc/i", real.c.toString(), "regex 1");
+
+var cursor = t.find({
+ $where: function() {
+ fullObject;
+ assert.eq(7, Object.keySet(obj).length, "A");
+ assert.eq(1, obj.a, "B");
+ assert.eq("abc", obj.b, "C");
+ assert.eq("/abc/i", obj.c.toString(), "D");
+ assert.eq(111911100111, obj.d.getTime(), "E");
+ assert(obj.f, "F");
+ assert(!obj.e, "G");
+
+ return true;
+ }
+});
+assert.eq(1, cursor.toArray().length);
+assert.eq("abc", cursor[0].b);
// ---
t.drop();
-t.save( { a : 2 , b : { c : 7 , d : "d is good" } } );
-var cursor = t.find( { $where :
- function(){
- fullObject;
- assert.eq( 3 , Object.keySet( obj ).length );
- assert.eq( 2 , obj.a );
- assert.eq( 7 , obj.b.c );
- assert.eq( "d is good" , obj.b.d );
- return true;
- }
- } );
-assert.eq( 1 , cursor.toArray().length );
+t.save({a: 2, b: {c: 7, d: "d is good"}});
+var cursor = t.find({
+ $where: function() {
+ fullObject;
+ assert.eq(3, Object.keySet(obj).length);
+ assert.eq(2, obj.a);
+ assert.eq(7, obj.b.c);
+ assert.eq("d is good", obj.b.d);
+ return true;
+ }
+});
+assert.eq(1, cursor.toArray().length);
assert(t.validate().valid);
diff --git a/jstests/core/js5.js b/jstests/core/js5.js
index 8fa45a0afe3..c02b451de42 100644
--- a/jstests/core/js5.js
+++ b/jstests/core/js5.js
@@ -2,9 +2,9 @@
t = db.jstests_js5;
t.drop();
-t.save( { a : 1 } );
-t.save( { a : 2 } );
+t.save({a: 1});
+t.save({a: 2});
-assert.eq( 2 , t.find( { "$where" : "this.a" } ).count() , "A" );
-assert.eq( 0 , t.find( { "$where" : "this.b" } ).count() , "B" );
-assert.eq( 0 , t.find( { "$where" : "this.b > 45" } ).count() , "C" );
+assert.eq(2, t.find({"$where": "this.a"}).count(), "A");
+assert.eq(0, t.find({"$where": "this.b"}).count(), "B");
+assert.eq(0, t.find({"$where": "this.b > 45"}).count(), "C");
diff --git a/jstests/core/js7.js b/jstests/core/js7.js
index d12e207379e..aeaec66ff47 100644
--- a/jstests/core/js7.js
+++ b/jstests/core/js7.js
@@ -1,5 +1,7 @@
t = db.jstests_js7;
t.drop();
-assert.eq( 17 , db.eval( function( foo ){ return foo; } , 17 ) );
-
+assert.eq(17,
+ db.eval(function(foo) {
+ return foo;
+ }, 17));
diff --git a/jstests/core/js8.js b/jstests/core/js8.js
index da2dcc619cd..15b7ff7d7af 100644
--- a/jstests/core/js8.js
+++ b/jstests/core/js8.js
@@ -1,14 +1,35 @@
t = db.jstests_js8;
t.drop();
-t.save( { a : 1 , b : [ 2 , 3 , 4 ] } );
-
-assert.eq( 1 , t.find().length() , "A" );
-assert.eq( 1 , t.find( function(){ return this.a == 1; } ).length() , "B" );
-assert.eq( 1 , t.find( function(){ if ( ! this.b.length ) return true; return this.b.length == 3; } ).length() , "B2" );
-assert.eq( 1 , t.find( function(){ return this.b[0] == 2; } ).length() , "C" );
-assert.eq( 0 , t.find( function(){ return this.b[0] == 3; } ).length() , "D" );
-assert.eq( 1 , t.find( function(){ return this.b[1] == 3; } ).length() , "E" );
+t.save({a: 1, b: [2, 3, 4]});
+assert.eq(1, t.find().length(), "A");
+assert.eq(1,
+ t.find(function() {
+ return this.a == 1;
+ }).length(),
+ "B");
+assert.eq(1,
+ t.find(function() {
+ if (!this.b.length)
+ return true;
+ return this.b.length == 3;
+ }).length(),
+ "B2");
+assert.eq(1,
+ t.find(function() {
+ return this.b[0] == 2;
+ }).length(),
+ "C");
+assert.eq(0,
+ t.find(function() {
+ return this.b[0] == 3;
+ }).length(),
+ "D");
+assert.eq(1,
+ t.find(function() {
+ return this.b[1] == 3;
+ }).length(),
+ "E");
assert(t.validate().valid);
diff --git a/jstests/core/js9.js b/jstests/core/js9.js
index 286adb9a1a4..b29a31afdc4 100644
--- a/jstests/core/js9.js
+++ b/jstests/core/js9.js
@@ -1,24 +1,17 @@
c = db.jstests_js9;
c.drop();
-c.save( { a : 1 } );
-c.save( { a : 2 } );
+c.save({a: 1});
+c.save({a: 2});
+assert.eq(2, c.find().length());
+assert.eq(2, c.find().count());
-assert.eq( 2 , c.find().length() );
-assert.eq( 2 , c.find().count() );
-
-
-assert.eq( 2 ,
- db.eval(
- function(){
- num = 0;
- db.jstests_js9.find().forEach(
- function(z){
- num++;
- }
- );
- return num;
- }
- )
- );
+assert.eq(2,
+ db.eval(function() {
+ num = 0;
+ db.jstests_js9.find().forEach(function(z) {
+ num++;
+ });
+ return num;
+ }));
diff --git a/jstests/core/json1.js b/jstests/core/json1.js
index d4bb8435086..d502a683e06 100644
--- a/jstests/core/json1.js
+++ b/jstests/core/json1.js
@@ -1,28 +1,57 @@
-x = { quotes:"a\"b" , nulls:null };
-eval( "y = " + tojson( x ) );
-assert.eq( tojson( x ) , tojson( y ) , "A" );
-assert.eq( typeof( x.nulls ) , typeof( y.nulls ) , "B" );
+x = {
+ quotes: "a\"b",
+ nulls: null
+};
+eval("y = " + tojson(x));
+assert.eq(tojson(x), tojson(y), "A");
+assert.eq(typeof(x.nulls), typeof(y.nulls), "B");
// each type is parsed properly
-x = {"x" : null, "y" : true, "z" : 123, "w" : "foo", "a": undefined};
-assert.eq(tojson(x,"",false), '{\n\t"x" : null,\n\t"y" : true,\n\t"z" : 123,\n\t"w" : "foo",\n\t"a" : undefined\n}' , "C" );
+x = {
+ "x": null,
+ "y": true,
+ "z": 123,
+ "w": "foo",
+ "a": undefined
+};
+assert.eq(tojson(x, "", false),
+ '{\n\t"x" : null,\n\t"y" : true,\n\t"z" : 123,\n\t"w" : "foo",\n\t"a" : undefined\n}',
+ "C");
-x = {"x" : [], "y" : {}};
-assert.eq(tojson(x,"",false), '{\n\t"x" : [ ],\n\t"y" : {\n\t\t\n\t}\n}' , "D" );
+x = {
+ "x": [],
+ "y": {}
+};
+assert.eq(tojson(x, "", false), '{\n\t"x" : [ ],\n\t"y" : {\n\t\t\n\t}\n}', "D");
// nested
-x = {"x" : [{"x" : [1,2,[]], "z" : "ok", "y" : [[]]}, {"foo" : "bar"}], "y" : null};
-assert.eq(tojson(x), '{\n\t"x" : [\n\t\t{\n\t\t\t"x" : [\n\t\t\t\t1,\n\t\t\t\t2,\n\t\t\t\t[ ]\n\t\t\t],\n\t\t\t"z" : "ok",\n\t\t\t"y" : [\n\t\t\t\t[ ]\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t"foo" : "bar"\n\t\t}\n\t],\n\t"y" : null\n}' , "E" );
+x = {
+ "x": [{"x": [1, 2, []], "z": "ok", "y": [[]]}, {"foo": "bar"}],
+ "y": null
+};
+assert.eq(
+ tojson(x),
+ '{\n\t"x" : [\n\t\t{\n\t\t\t"x" : [\n\t\t\t\t1,\n\t\t\t\t2,\n\t\t\t\t[ ]\n\t\t\t],\n\t\t\t"z" : "ok",\n\t\t\t"y" : [\n\t\t\t\t[ ]\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t"foo" : "bar"\n\t\t}\n\t],\n\t"y" : null\n}',
+ "E");
// special types
-x = {"x" : ObjectId("4ad35a73d2e34eb4fc43579a"), 'z' : /xd?/ig};
-assert.eq(tojson(x,"",false), '{\n\t"x" : ObjectId("4ad35a73d2e34eb4fc43579a"),\n\t"z" : /xd?/gi\n}' , "F" );
+x = {
+ "x": ObjectId("4ad35a73d2e34eb4fc43579a"),
+ 'z': /xd?/ig
+};
+assert.eq(tojson(x, "", false),
+ '{\n\t"x" : ObjectId("4ad35a73d2e34eb4fc43579a"),\n\t"z" : /xd?/gi\n}',
+ "F");
// Timestamp type
-x = {"x" : Timestamp()};
-assert.eq(tojson(x,"",false), '{\n\t"x" : Timestamp(0, 0)\n}' , "G");
+x = {
+ "x": Timestamp()
+};
+assert.eq(tojson(x, "", false), '{\n\t"x" : Timestamp(0, 0)\n}', "G");
// Timestamp type, second
-x = {"x" : Timestamp(10,2)};
-assert.eq(tojson(x,"",false), '{\n\t"x" : Timestamp(10, 2)\n}' , "H");
+x = {
+ "x": Timestamp(10, 2)
+};
+assert.eq(tojson(x, "", false), '{\n\t"x" : Timestamp(10, 2)\n}', "H");
diff --git a/jstests/core/kill_cursors.js b/jstests/core/kill_cursors.js
index 60dc5e2d3e4..0433e29609b 100644
--- a/jstests/core/kill_cursors.js
+++ b/jstests/core/kill_cursors.js
@@ -14,31 +14,22 @@
}
// killCursors command should fail if the collection name is not a string.
- cmdRes = db.runCommand({
- killCursors: {foo: "bad collection param"},
- cursors: [NumberLong(123), NumberLong(456)]
- });
+ cmdRes = db.runCommand(
+ {killCursors: {foo: "bad collection param"}, cursors: [NumberLong(123), NumberLong(456)]});
assert.commandFailedWithCode(cmdRes, ErrorCodes.FailedToParse);
// killCursors command should fail if the cursors parameter is not an array.
- cmdRes = db.runCommand({
- killCursors: coll.getName(),
- cursors: {a: NumberLong(123), b: NumberLong(456)}
- });
+ cmdRes = db.runCommand(
+ {killCursors: coll.getName(), cursors: {a: NumberLong(123), b: NumberLong(456)}});
assert.commandFailedWithCode(cmdRes, ErrorCodes.FailedToParse);
// killCursors command should fail if the cursors parameter is an empty array.
- cmdRes = db.runCommand({
- killCursors: coll.getName(),
- cursors: []
- });
+ cmdRes = db.runCommand({killCursors: coll.getName(), cursors: []});
assert.commandFailedWithCode(cmdRes, ErrorCodes.BadValue);
// killCursors command should report cursors as not found if the collection does not exist.
- cmdRes = db.runCommand({
- killCursors: "non-existent-collection",
- cursors: [NumberLong(123), NumberLong(456)]
- });
+ cmdRes = db.runCommand(
+ {killCursors: "non-existent-collection", cursors: [NumberLong(123), NumberLong(456)]});
assert.commandWorked(cmdRes);
assert.eq(cmdRes.cursorsKilled, []);
assert.eq(cmdRes.cursorsNotFound, [NumberLong(123), NumberLong(456)]);
@@ -46,10 +37,8 @@
assert.eq(cmdRes.cursorsUnknown, []);
// killCursors command should report non-existent cursors as "not found".
- cmdRes = db.runCommand({
- killCursors: coll.getName(),
- cursors: [NumberLong(123), NumberLong(456)]
- });
+ cmdRes =
+ db.runCommand({killCursors: coll.getName(), cursors: [NumberLong(123), NumberLong(456)]});
assert.commandWorked(cmdRes);
assert.eq(cmdRes.cursorsKilled, []);
assert.eq(cmdRes.cursorsNotFound, [NumberLong(123), NumberLong(456)]);
@@ -62,10 +51,7 @@
cursorId = cmdRes.cursor.id;
assert.neq(cursorId, NumberLong(0));
- cmdRes = db.runCommand({
- killCursors: coll.getName(),
- cursors: [NumberLong(123), cursorId]
- });
+ cmdRes = db.runCommand({killCursors: coll.getName(), cursors: [NumberLong(123), cursorId]});
assert.commandWorked(cmdRes);
assert.eq(cmdRes.cursorsKilled, [cursorId]);
assert.eq(cmdRes.cursorsNotFound, [NumberLong(123)]);
@@ -78,10 +64,7 @@
cursorId = cmdRes.cursor.id;
assert.neq(cursorId, NumberLong(0));
- cmdRes = db.runCommand({
- killCursors: coll.getName(),
- cursors: [NumberLong(123), cursorId]
- });
+ cmdRes = db.runCommand({killCursors: coll.getName(), cursors: [NumberLong(123), cursorId]});
assert.commandWorked(cmdRes);
assert.eq(cmdRes.cursorsKilled, [cursorId]);
assert.eq(cmdRes.cursorsNotFound, [NumberLong(123)]);
@@ -95,10 +78,8 @@
var cleanup;
try {
// Enable a failpoint to ensure that the cursor remains pinned.
- assert.commandWorked(db.adminCommand({
- configureFailPoint: failpointName,
- mode: "alwaysOn"
- }));
+ assert.commandWorked(
+ db.adminCommand({configureFailPoint: failpointName, mode: "alwaysOn"}));
cmdRes = db.runCommand({find: coll.getName(), batchSize: 2});
assert.commandWorked(cmdRes);
@@ -110,8 +91,8 @@
var isMongos = (cmdRes.msg === "isdbgrid");
// Pin the cursor during a getMore.
- var code = 'db.runCommand({getMore: ' + cursorId.toString() +
- ', collection: "' + coll.getName() + '"});';
+ var code = 'db.runCommand({getMore: ' + cursorId.toString() + ', collection: "' +
+ coll.getName() + '"});';
cleanup = startParallelShell(code);
// Sleep to make it more likely that the cursor will be pinned.
@@ -122,10 +103,7 @@
//
// Currently, pinned cursors that are targeted by a killCursors operation are kept alive on
// mongod but are killed on mongos (see SERVER-21710).
- cmdRes = db.runCommand({
- killCursors: coll.getName(),
- cursors: [NumberLong(123), cursorId]
- });
+ cmdRes = db.runCommand({killCursors: coll.getName(), cursors: [NumberLong(123), cursorId]});
assert.commandWorked(cmdRes);
assert.eq(cmdRes.cursorsNotFound, [NumberLong(123)]);
assert.eq(cmdRes.cursorsUnknown, []);
@@ -133,15 +111,13 @@
if (isMongos) {
assert.eq(cmdRes.cursorsKilled, [cursorId]);
assert.eq(cmdRes.cursorsAlive, []);
- }
- else {
+ } else {
// If the cursor has already been pinned it will be left alive; otherwise it will be
// killed.
if (cmdRes.cursorsAlive.length === 1) {
assert.eq(cmdRes.cursorsKilled, []);
assert.eq(cmdRes.cursorsAlive, [cursorId]);
- }
- else {
+ } else {
assert.eq(cmdRes.cursorsKilled, [cursorId]);
assert.eq(cmdRes.cursorsAlive, []);
}
diff --git a/jstests/core/killop.js b/jstests/core/killop.js
index f367d3dec33..66476ec10f4 100644
--- a/jstests/core/killop.js
+++ b/jstests/core/killop.js
@@ -15,7 +15,7 @@
t = db.jstests_killop;
t.drop();
-t.save( {x:1} );
+t.save({x: 1});
/**
* This function filters for the operations that we're looking for, based on their state and
@@ -24,42 +24,53 @@ t.save( {x:1} );
function ops() {
p = db.currentOp().inprog;
ids = [];
- for ( var i in p ) {
- var o = p[ i ];
- // We *can't* check for ns, b/c it's not guaranteed to be there unless the query is active, which
- // it may not be in our polling cycle - particularly b/c we sleep every second in both the query and
+ for (var i in p) {
+ var o = p[i];
+ // We *can't* check for ns, b/c it's not guaranteed to be there unless the query is active,
+ // which
+ // it may not be in our polling cycle - particularly b/c we sleep every second in both the
+ // query and
// the assert
- if ( ( o.active || o.waitingForLock ) && o.query && o.query.query && o.query.query.$where && o.query.count == "jstests_killop" ) {
- ids.push( o.opid );
+ if ((o.active || o.waitingForLock) && o.query && o.query.query && o.query.query.$where &&
+ o.query.count == "jstests_killop") {
+ ids.push(o.opid);
}
}
return ids;
}
jsTestLog("Starting long-running $where operation");
-var s1 = startParallelShell(
- "db.jstests_killop.count( { $where: function() { while( 1 ) { ; } } } )" );
-var s2 = startParallelShell(
- "db.jstests_killop.count( { $where: function() { while( 1 ) { ; } } } )" );
+var s1 =
+ startParallelShell("db.jstests_killop.count( { $where: function() { while( 1 ) { ; } } } )");
+var s2 =
+ startParallelShell("db.jstests_killop.count( { $where: function() { while( 1 ) { ; } } } )");
jsTestLog("Finding ops in currentOp() output");
o = [];
-assert.soon(function() { o = ops(); return o.length == 2; },
- { toString: function () { return tojson(db.currentOp().inprog); } },
- 10000);
+assert.soon(
+ function() {
+ o = ops();
+ return o.length == 2;
+ },
+ {
+ toString: function() {
+ return tojson(db.currentOp().inprog);
+ }
+ },
+ 10000);
start = new Date();
jsTestLog("Killing ops");
-db.killOp( o[ 0 ] );
-db.killOp( o[ 1 ] );
+db.killOp(o[0]);
+db.killOp(o[1]);
jsTestLog("Waiting for ops to terminate");
[s1, s2].forEach(function(awaitShell) {
var exitCode = awaitShell({checkExitSuccess: false});
- assert.neq(0, exitCode,
- "expected shell to exit abnormally due to JS execution being terminated");
+ assert.neq(
+ 0, exitCode, "expected shell to exit abnormally due to JS execution being terminated");
});
// don't want to pass if timeout killed the js function.
var end = new Date();
var diff = end - start;
-assert.lt( diff, 30000, "Start: " + start + "; end: " + end + "; diff: " + diff); \ No newline at end of file
+assert.lt(diff, 30000, "Start: " + start + "; end: " + end + "; diff: " + diff); \ No newline at end of file
diff --git a/jstests/core/list_collections1.js b/jstests/core/list_collections1.js
index aaf47b50099..04acb82290b 100644
--- a/jstests/core/list_collections1.js
+++ b/jstests/core/list_collections1.js
@@ -6,7 +6,7 @@
// listCollections output, but rather tests for existence or absence of particular collections in
// listCollections output.
-(function () {
+(function() {
"use strict";
var mydb = db.getSiblingDB("list_collections1");
@@ -25,7 +25,9 @@
assert.eq('object', typeof(res.cursor));
assert.eq(0, res.cursor.id);
assert.eq('string', typeof(res.cursor.ns));
- collObj = res.cursor.firstBatch.filter(function(c) { return c.name === "foo"; })[0];
+ collObj = res.cursor.firstBatch.filter(function(c) {
+ return c.name === "foo";
+ })[0];
assert(collObj);
assert.eq('object', typeof(collObj.options));
@@ -34,9 +36,8 @@
//
var getListCollectionsCursor = function(options, subsequentBatchSize) {
- return new DBCommandCursor(mydb.getMongo(),
- mydb.runCommand("listCollections", options),
- subsequentBatchSize);
+ return new DBCommandCursor(
+ mydb.getMongo(), mydb.runCommand("listCollections", options), subsequentBatchSize);
};
var cursorCountMatching = function(cursor, pred) {
@@ -45,8 +46,11 @@
assert.commandWorked(mydb.dropDatabase());
assert.commandWorked(mydb.createCollection("foo"));
- assert.eq(1, cursorCountMatching(getListCollectionsCursor(),
- function(c) { return c.name === "foo"; }));
+ assert.eq(1,
+ cursorCountMatching(getListCollectionsCursor(),
+ function(c) {
+ return c.name === "foo";
+ }));
//
// Test that the collection metadata object is returned correctly.
@@ -55,12 +59,16 @@
assert.commandWorked(mydb.dropDatabase());
assert.commandWorked(mydb.createCollection("foo"));
assert.commandWorked(mydb.createCollection("bar", {temp: true}));
- assert.eq(1, cursorCountMatching(getListCollectionsCursor(),
- function(c) { return c.name === "foo" &&
- c.options.temp === undefined; }));
- assert.eq(1, cursorCountMatching(getListCollectionsCursor(),
- function(c) { return c.name === "bar" &&
- c.options.temp === true; }));
+ assert.eq(1,
+ cursorCountMatching(getListCollectionsCursor(),
+ function(c) {
+ return c.name === "foo" && c.options.temp === undefined;
+ }));
+ assert.eq(1,
+ cursorCountMatching(getListCollectionsCursor(),
+ function(c) {
+ return c.name === "bar" && c.options.temp === true;
+ }));
//
// Test basic usage of "filter" option.
@@ -69,23 +77,29 @@
assert.commandWorked(mydb.dropDatabase());
assert.commandWorked(mydb.createCollection("foo"));
assert.commandWorked(mydb.createCollection("bar", {temp: true}));
- assert.eq(2, cursorCountMatching(getListCollectionsCursor({filter: {}}),
- function(c) { return c.name === "foo" ||
- c.name === "bar"; }));
+ assert.eq(2,
+ cursorCountMatching(getListCollectionsCursor({filter: {}}),
+ function(c) {
+ return c.name === "foo" || c.name === "bar";
+ }));
assert.eq(2, getListCollectionsCursor({filter: {name: {$in: ["foo", "bar"]}}}).itcount());
assert.eq(1, getListCollectionsCursor({filter: {name: /^foo$/}}).itcount());
assert.eq(1, getListCollectionsCursor({filter: {"options.temp": true}}).itcount());
mydb.foo.drop();
- assert.eq(1, cursorCountMatching(getListCollectionsCursor({filter: {}}),
- function(c) { return c.name === "foo" ||
- c.name === "bar"; }));
+ assert.eq(1,
+ cursorCountMatching(getListCollectionsCursor({filter: {}}),
+ function(c) {
+ return c.name === "foo" || c.name === "bar";
+ }));
assert.eq(1, getListCollectionsCursor({filter: {name: {$in: ["foo", "bar"]}}}).itcount());
assert.eq(0, getListCollectionsCursor({filter: {name: /^foo$/}}).itcount());
assert.eq(1, getListCollectionsCursor({filter: {"options.temp": true}}).itcount());
mydb.bar.drop();
- assert.eq(0, cursorCountMatching(getListCollectionsCursor({filter: {}}),
- function(c) { return c.name === "foo" ||
- c.name === "bar"; }));
+ assert.eq(0,
+ cursorCountMatching(getListCollectionsCursor({filter: {}}),
+ function(c) {
+ return c.name === "foo" || c.name === "bar";
+ }));
assert.eq(0, getListCollectionsCursor({filter: {name: {$in: ["foo", "bar"]}}}).itcount());
assert.eq(0, getListCollectionsCursor({filter: {name: /^foo$/}}).itcount());
assert.eq(0, getListCollectionsCursor({filter: {"options.temp": true}}).itcount());
@@ -94,10 +108,18 @@
// Test for invalid values of "filter".
//
- assert.throws(function() { getListCollectionsCursor({filter: {$invalid: 1}}); });
- assert.throws(function() { getListCollectionsCursor({filter: 0}); });
- assert.throws(function() { getListCollectionsCursor({filter: 'x'}); });
- assert.throws(function() { getListCollectionsCursor({filter: []}); });
+ assert.throws(function() {
+ getListCollectionsCursor({filter: {$invalid: 1}});
+ });
+ assert.throws(function() {
+ getListCollectionsCursor({filter: 0});
+ });
+ assert.throws(function() {
+ getListCollectionsCursor({filter: 'x'});
+ });
+ assert.throws(function() {
+ getListCollectionsCursor({filter: []});
+ });
//
// Test basic usage of "cursor.batchSize" option.
@@ -108,32 +130,50 @@
assert.commandWorked(mydb.createCollection("bar"));
cursor = getListCollectionsCursor({cursor: {batchSize: 2}});
assert.eq(2, cursor.objsLeftInBatch());
- assert.eq(2, cursorCountMatching(cursor, function(c) { return c.name === "foo" ||
- c.name === "bar"; }));
+ assert.eq(2,
+ cursorCountMatching(cursor,
+ function(c) {
+ return c.name === "foo" || c.name === "bar";
+ }));
cursor = getListCollectionsCursor({cursor: {batchSize: 1}});
assert.eq(1, cursor.objsLeftInBatch());
- assert.eq(2, cursorCountMatching(cursor, function(c) { return c.name === "foo" ||
- c.name === "bar"; }));
+ assert.eq(2,
+ cursorCountMatching(cursor,
+ function(c) {
+ return c.name === "foo" || c.name === "bar";
+ }));
cursor = getListCollectionsCursor({cursor: {batchSize: 0}});
assert.eq(0, cursor.objsLeftInBatch());
- assert.eq(2, cursorCountMatching(cursor, function(c) { return c.name === "foo" ||
- c.name === "bar"; }));
+ assert.eq(2,
+ cursorCountMatching(cursor,
+ function(c) {
+ return c.name === "foo" || c.name === "bar";
+ }));
cursor = getListCollectionsCursor({cursor: {batchSize: NumberInt(2)}});
assert.eq(2, cursor.objsLeftInBatch());
- assert.eq(2, cursorCountMatching(cursor, function(c) { return c.name === "foo" ||
- c.name === "bar"; }));
+ assert.eq(2,
+ cursorCountMatching(cursor,
+ function(c) {
+ return c.name === "foo" || c.name === "bar";
+ }));
cursor = getListCollectionsCursor({cursor: {batchSize: NumberLong(2)}});
assert.eq(2, cursor.objsLeftInBatch());
- assert.eq(2, cursorCountMatching(cursor, function(c) { return c.name === "foo" ||
- c.name === "bar"; }));
+ assert.eq(2,
+ cursorCountMatching(cursor,
+ function(c) {
+ return c.name === "foo" || c.name === "bar";
+ }));
// Test a large batch size, and assert that at least 2 results are returned in the initial
// batch.
cursor = getListCollectionsCursor({cursor: {batchSize: Math.pow(2, 62)}});
assert.lte(2, cursor.objsLeftInBatch());
- assert.eq(2, cursorCountMatching(cursor, function(c) { return c.name === "foo" ||
- c.name === "bar"; }));
+ assert.eq(2,
+ cursorCountMatching(cursor,
+ function(c) {
+ return c.name === "foo" || c.name === "bar";
+ }));
// Ensure that the server accepts an empty object for "cursor". This is equivalent to not
// specifying "cursor" at all.
@@ -141,21 +181,40 @@
// We do not test for objsLeftInBatch() here, since the default batch size for this command
// is not specified.
cursor = getListCollectionsCursor({cursor: {}});
- assert.eq(2, cursorCountMatching(cursor, function(c) { return c.name === "foo" ||
- c.name === "bar"; }));
+ assert.eq(2,
+ cursorCountMatching(cursor,
+ function(c) {
+ return c.name === "foo" || c.name === "bar";
+ }));
//
// Test for invalid values of "cursor" and "cursor.batchSize".
//
- assert.throws(function() { getListCollectionsCursor({cursor: 0}); });
- assert.throws(function() { getListCollectionsCursor({cursor: 'x'}); });
- assert.throws(function() { getListCollectionsCursor({cursor: []}); });
- assert.throws(function() { getListCollectionsCursor({cursor: {foo: 1}}); });
- assert.throws(function() { getListCollectionsCursor({cursor: {batchSize: -1}}); });
- assert.throws(function() { getListCollectionsCursor({cursor: {batchSize: 'x'}}); });
- assert.throws(function() { getListCollectionsCursor({cursor: {batchSize: {}}}); });
- assert.throws(function() { getListCollectionsCursor({cursor: {batchSize: 2, foo: 1}}); });
+ assert.throws(function() {
+ getListCollectionsCursor({cursor: 0});
+ });
+ assert.throws(function() {
+ getListCollectionsCursor({cursor: 'x'});
+ });
+ assert.throws(function() {
+ getListCollectionsCursor({cursor: []});
+ });
+ assert.throws(function() {
+ getListCollectionsCursor({cursor: {foo: 1}});
+ });
+ assert.throws(function() {
+ getListCollectionsCursor({cursor: {batchSize: -1}});
+ });
+ assert.throws(function() {
+ getListCollectionsCursor({cursor: {batchSize: 'x'}});
+ });
+ assert.throws(function() {
+ getListCollectionsCursor({cursor: {batchSize: {}}});
+ });
+ assert.throws(function() {
+ getListCollectionsCursor({cursor: {batchSize: 2, foo: 1}});
+ });
//
// Test more than 2 batches of results.
@@ -186,7 +245,11 @@
assert.commandWorked(mydb.dropDatabase());
cursor = getListCollectionsCursor();
- assert.eq(0, cursorCountMatching(cursor, function(c) { return c.name === "foo"; }));
+ assert.eq(0,
+ cursorCountMatching(cursor,
+ function(c) {
+ return c.name === "foo";
+ }));
//
// Test on empty database.
@@ -196,7 +259,11 @@
assert.commandWorked(mydb.createCollection("foo"));
mydb.foo.drop();
cursor = getListCollectionsCursor();
- assert.eq(0, cursorCountMatching(cursor, function(c) { return c.name === "foo"; }));
+ assert.eq(0,
+ cursorCountMatching(cursor,
+ function(c) {
+ return c.name === "foo";
+ }));
//
// Test killCursors against a listCollections cursor.
@@ -211,7 +278,9 @@
res = mydb.runCommand("listCollections", {cursor: {batchSize: 0}});
cursor = new DBCommandCursor(mydb.getMongo(), res, 2);
cursor = null;
- gc(); // Shell will send a killCursors message when cleaning up underlying cursor.
+ gc(); // Shell will send a killCursors message when cleaning up underlying cursor.
cursor = new DBCommandCursor(mydb.getMongo(), res, 2);
- assert.throws(function() { cursor.hasNext(); });
+ assert.throws(function() {
+ cursor.hasNext();
+ });
}());
diff --git a/jstests/core/list_collections_filter.js b/jstests/core/list_collections_filter.js
index 39dd6da235c..4b5c42bbc78 100644
--- a/jstests/core/list_collections_filter.js
+++ b/jstests/core/list_collections_filter.js
@@ -67,16 +67,10 @@
// Filter with $and and $in.
testListCollections({name: {$in: ["lists", /.*_sets$/]}, options: {}},
["lists", "ordered_sets", "unordered_sets"]);
- testListCollections({$and: [
- {name: {$in: ["lists", /.*_sets$/]}},
- {name: "lists"},
- {options: {}},
- ]},
- ["lists"]);
- testListCollections({$and: [
- {name: {$in: ["lists", /.*_sets$/]}},
- {name: "non-existent"},
- {options: {}},
- ]},
- []);
+ testListCollections(
+ {$and: [{name: {$in: ["lists", /.*_sets$/]}}, {name: "lists"}, {options: {}}, ]},
+ ["lists"]);
+ testListCollections(
+ {$and: [{name: {$in: ["lists", /.*_sets$/]}}, {name: "non-existent"}, {options: {}}, ]},
+ []);
}());
diff --git a/jstests/core/list_indexes.js b/jstests/core/list_indexes.js
index db3f895bc20..520406be59f 100644
--- a/jstests/core/list_indexes.js
+++ b/jstests/core/list_indexes.js
@@ -1,6 +1,6 @@
// Basic functional tests for the listIndexes command.
-(function () {
+(function() {
"use strict";
var coll = db.list_indexes1;
@@ -27,17 +27,20 @@
//
var getListIndexesCursor = function(coll, options, subsequentBatchSize) {
- return new DBCommandCursor(coll.getDB().getMongo(),
- coll.runCommand("listIndexes", options),
- subsequentBatchSize);
+ return new DBCommandCursor(
+ coll.getDB().getMongo(), coll.runCommand("listIndexes", options), subsequentBatchSize);
};
var cursorGetIndexSpecs = function(cursor) {
- return cursor.toArray().sort(function(a, b) { return a.name > b.name; });
+ return cursor.toArray().sort(function(a, b) {
+ return a.name > b.name;
+ });
};
var cursorGetIndexNames = function(cursor) {
- return cursorGetIndexSpecs(cursor).map(function(spec) { return spec.name; });
+ return cursorGetIndexSpecs(cursor).map(function(spec) {
+ return spec.name;
+ });
};
coll.drop();
@@ -162,7 +165,9 @@
res = coll.runCommand("listIndexes", {cursor: {batchSize: 0}});
cursor = new DBCommandCursor(coll.getDB().getMongo(), res, 2);
cursor = null;
- gc(); // Shell will send a killCursors message when cleaning up underlying cursor.
+ gc(); // Shell will send a killCursors message when cleaning up underlying cursor.
cursor = new DBCommandCursor(coll.getDB().getMongo(), res, 2);
- assert.throws(function() { cursor.hasNext(); });
+ assert.throws(function() {
+ cursor.hasNext();
+ });
}());
diff --git a/jstests/core/list_indexes_invalid.js b/jstests/core/list_indexes_invalid.js
index 5db1077aecb..0e9c5ffa88c 100644
--- a/jstests/core/list_indexes_invalid.js
+++ b/jstests/core/list_indexes_invalid.js
@@ -6,11 +6,27 @@ coll.drop();
assert.commandWorked(coll.getDB().createCollection(coll.getName()));
assert.commandWorked(coll.ensureIndex({a: 1}, {unique: true}));
-assert.throws(function() { getListIndexesCursor(coll, {cursor: 0}); });
-assert.throws(function() { getListIndexesCursor(coll, {cursor: 'x'}); });
-assert.throws(function() { getListIndexesCursor(coll, {cursor: []}); });
-assert.throws(function() { getListIndexesCursor(coll, {cursor: {foo: 1}}); });
-assert.throws(function() { getListIndexesCursor(coll, {cursor: {batchSize: -1}}); });
-assert.throws(function() { getListIndexesCursor(coll, {cursor: {batchSize: 'x'}}); });
-assert.throws(function() { getListIndexesCursor(coll, {cursor: {batchSize: {}}}); });
-assert.throws(function() { getListIndexesCursor(coll, {cursor: {batchSize: 2, foo: 1}}); });
+assert.throws(function() {
+ getListIndexesCursor(coll, {cursor: 0});
+});
+assert.throws(function() {
+ getListIndexesCursor(coll, {cursor: 'x'});
+});
+assert.throws(function() {
+ getListIndexesCursor(coll, {cursor: []});
+});
+assert.throws(function() {
+ getListIndexesCursor(coll, {cursor: {foo: 1}});
+});
+assert.throws(function() {
+ getListIndexesCursor(coll, {cursor: {batchSize: -1}});
+});
+assert.throws(function() {
+ getListIndexesCursor(coll, {cursor: {batchSize: 'x'}});
+});
+assert.throws(function() {
+ getListIndexesCursor(coll, {cursor: {batchSize: {}}});
+});
+assert.throws(function() {
+ getListIndexesCursor(coll, {cursor: {batchSize: 2, foo: 1}});
+});
diff --git a/jstests/core/loadserverscripts.js b/jstests/core/loadserverscripts.js
index 4288ae973a0..daf87b2475b 100644
--- a/jstests/core/loadserverscripts.js
+++ b/jstests/core/loadserverscripts.js
@@ -15,48 +15,50 @@ x = testdb.system.js.findOne();
assert.isnull(x, "Test for empty collection");
// User functions should not be defined yet
-assert.eq( typeof myfunc, "undefined", "Checking that myfunc() is undefined" );
-assert.eq( typeof myfunc2, "undefined", "Checking that myfunc2() is undefined" );
+assert.eq(typeof myfunc, "undefined", "Checking that myfunc() is undefined");
+assert.eq(typeof myfunc2, "undefined", "Checking that myfunc2() is undefined");
// Insert a function in the context of this process: make sure it's in the collection
-testdb.system.js.insert( { _id: "myfunc", "value": function(){ return "myfunc"; } } );
-testdb.system.js.insert( { _id: "mystring", "value": "var root = this;" } );
-testdb.system.js.insert( { _id: "changeme", "value": false });
+testdb.system.js.insert({
+ _id: "myfunc",
+ "value": function() {
+ return "myfunc";
+ }
+});
+testdb.system.js.insert({_id: "mystring", "value": "var root = this;"});
+testdb.system.js.insert({_id: "changeme", "value": false});
x = testdb.system.js.count();
-assert.eq( x, 3, "Should now be one function in the system.js collection");
+assert.eq(x, 3, "Should now be one function in the system.js collection");
// Set a global variable that will be over-written
var changeme = true;
// Load that function
testdb.loadServerScripts();
-assert.eq( typeof myfunc, "function", "Checking that myfunc() loaded correctly" );
-assert.eq( typeof mystring, "string", "Checking that mystring round-tripped correctly" );
-assert.eq( changeme, false, "Checking that global var was overwritten" );
+assert.eq(typeof myfunc, "function", "Checking that myfunc() loaded correctly");
+assert.eq(typeof mystring, "string", "Checking that mystring round-tripped correctly");
+assert.eq(changeme, false, "Checking that global var was overwritten");
// Make sure it works
x = myfunc();
assert.eq(x, "myfunc", "Checking that myfunc() returns the correct value");
// Insert value into collection from another process
-var coproc = startParallelShell(
- 'db.getSisterDB("loadserverscripts").system.js.insert' +
- ' ( {_id: "myfunc2", "value": function(){ return "myfunc2"; } } );'
- );
+var coproc =
+ startParallelShell('db.getSisterDB("loadserverscripts").system.js.insert' +
+ ' ( {_id: "myfunc2", "value": function(){ return "myfunc2"; } } );');
// wait for results
coproc();
// Make sure the collection's been updated
x = testdb.system.js.count();
-assert.eq( x, 4, "Should now be two functions in the system.js collection");
-
+assert.eq(x, 4, "Should now be two functions in the system.js collection");
// Load the new functions: test them as above
testdb.loadServerScripts();
-assert.eq( typeof myfunc2, "function", "Checking that myfunc2() loaded correctly" );
+assert.eq(typeof myfunc2, "function", "Checking that myfunc2() loaded correctly");
x = myfunc2();
assert.eq(x, "myfunc2", "Checking that myfunc2() returns the correct value");
jsTest.log("completed test of db.loadServerScripts()");
-
diff --git a/jstests/core/loglong.js b/jstests/core/loglong.js
index 0a8889c2b25..0447d915d10 100644
--- a/jstests/core/loglong.js
+++ b/jstests/core/loglong.js
@@ -4,26 +4,28 @@
t = db.loglong;
t.drop();
-t.insert( { x : 1 } );
+t.insert({x: 1});
n = 0;
-query = { x : [] };
-while ( Object.bsonsize( query ) < 30000 ) {
- query.x.push( n++ );
+query = {
+ x: []
+};
+while (Object.bsonsize(query) < 30000) {
+ query.x.push(n++);
}
-before = db.adminCommand( { setParameter : 1 , logLevel : 1 } );
+before = db.adminCommand({setParameter: 1, logLevel: 1});
-t.findOne( query );
+t.findOne(query);
-x = db.adminCommand( { setParameter : 1 , logLevel : before.was } );
-assert.eq( 1 , x.was , tojson( x ) );
+x = db.adminCommand({setParameter: 1, logLevel: before.was});
+assert.eq(1, x.was, tojson(x));
-log = db.adminCommand( { getLog : "global" } ).log;
+log = db.adminCommand({getLog: "global"}).log;
found = false;
-for ( i=log.length - 1; i>= 0; i-- ) {
- if ( log[i].indexOf( "warning: log line attempted (16kB)" ) >= 0 ) {
+for (i = log.length - 1; i >= 0; i--) {
+ if (log[i].indexOf("warning: log line attempted (16kB)") >= 0) {
found = true;
break;
}
diff --git a/jstests/core/logprocessdetails.js b/jstests/core/logprocessdetails.js
index c53655843e1..1ff4fff1112 100644
--- a/jstests/core/logprocessdetails.js
+++ b/jstests/core/logprocessdetails.js
@@ -7,9 +7,9 @@
* Returns true if regex matches a string in the array
*/
doesLogMatchRegex = function(logArray, regex) {
- for (var i = (logArray.length - 1); i >= 0; i--){
+ for (var i = (logArray.length - 1); i >= 0; i--) {
var regexInLine = regex.exec(logArray[i]);
- if (regexInLine != null){
+ if (regexInLine != null) {
return true;
}
}
@@ -17,18 +17,18 @@ doesLogMatchRegex = function(logArray, regex) {
};
doTest = function() {
- var log = db.adminCommand({ getLog: 'global'});
- //this regex will need to change if output changes
+ var log = db.adminCommand({getLog: 'global'});
+ // this regex will need to change if output changes
var re = new RegExp(".*conn.*options.*");
assert.neq(null, log);
var lineCount = log.totalLinesWritten;
assert.neq(0, lineCount);
- var result = db.adminCommand({ logRotate: 1});
+ var result = db.adminCommand({logRotate: 1});
assert.eq(1, result.ok);
- var log2 = db.adminCommand({ getLog: 'global'});
+ var log2 = db.adminCommand({getLog: 'global'});
assert.neq(null, log2);
assert.gte(log2.totalLinesWritten, lineCount);
diff --git a/jstests/core/long_index_rename.js b/jstests/core/long_index_rename.js
index 27517ac8e5a..df3397bbb46 100644
--- a/jstests/core/long_index_rename.js
+++ b/jstests/core/long_index_rename.js
@@ -7,11 +7,15 @@ t = db.long_index_rename;
t.drop();
for (i = 1; i < 10; i++) {
- t.save({a:i});
+ t.save({a: i});
}
-t.createIndex({a:1}, {name: "aaa"});
-var result = t.createIndex({a:1}, {name: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" +
- "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"});
-assert( !result.ok );
-assert( result.errmsg.indexOf( "too long" ) >= 0 );
+t.createIndex({a: 1}, {name: "aaa"});
+var result = t.createIndex(
+ {a: 1},
+ {
+ name: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" +
+ "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
+ });
+assert(!result.ok);
+assert(result.errmsg.indexOf("too long") >= 0);
diff --git a/jstests/core/map1.js b/jstests/core/map1.js
index 5697e41f32c..ea2dec5db69 100644
--- a/jstests/core/map1.js
+++ b/jstests/core/map1.js
@@ -1,24 +1,22 @@
-function basic1( key , lookup , shouldFail){
+function basic1(key, lookup, shouldFail) {
var m = new Map();
- m.put( key , 17 );
-
- var out = m.get( lookup || key );
+ m.put(key, 17);
- if ( ! shouldFail ){
- assert.eq( 17 , out , "basic1 missing: " + tojson( key ) );
- }
- else {
- assert.isnull( out , "basic1 not missing: " + tojson( key ) );
- }
+ var out = m.get(lookup || key);
+ if (!shouldFail) {
+ assert.eq(17, out, "basic1 missing: " + tojson(key));
+ } else {
+ assert.isnull(out, "basic1 not missing: " + tojson(key));
+ }
}
-basic1( 6 );
-basic1( new Date() );
-basic1( "eliot" );
-basic1( { a : 1 } );
-basic1( { a : 1 , b : 1 } );
-basic1( { a : 1 } , { b : 1 } , true );
-basic1( { a : 1 , b : 1 } , { b : 1 , a : 1 } , true );
-basic1( { a : 1 } , { a : 2 } , true );
+basic1(6);
+basic1(new Date());
+basic1("eliot");
+basic1({a: 1});
+basic1({a: 1, b: 1});
+basic1({a: 1}, {b: 1}, true);
+basic1({a: 1, b: 1}, {b: 1, a: 1}, true);
+basic1({a: 1}, {a: 2}, true);
diff --git a/jstests/core/max_doc_size.js b/jstests/core/max_doc_size.js
index 509d0b4b2ea..03deeafb307 100644
--- a/jstests/core/max_doc_size.js
+++ b/jstests/core/max_doc_size.js
@@ -1,36 +1,32 @@
var maxBsonObjectSize = db.isMaster().maxBsonObjectSize;
-var docOverhead = Object.bsonsize({ _id: new ObjectId(), x: '' });
+var docOverhead = Object.bsonsize({_id: new ObjectId(), x: ''});
var maxStrSize = maxBsonObjectSize - docOverhead;
-
var maxStr = 'a';
-while (maxStr.length < maxStrSize) maxStr += 'a';
+while (maxStr.length < maxStrSize)
+ maxStr += 'a';
var coll = db.max_doc_size;
coll.drop();
-var res = db.runCommand({ insert: coll.getName(),
- documents: [{ _id: new ObjectId(), x: maxStr }] });
+var res = db.runCommand({insert: coll.getName(), documents: [{_id: new ObjectId(), x: maxStr}]});
assert(res.ok);
assert.eq(null, res.writeErrors);
coll.drop();
-res = db.runCommand({ update: coll.getName(),
- ordered: true,
- updates: [{ q: { a: 1 },
- u: { _id: new ObjectId(), x: maxStr },
- upsert: true
- }]});
+res = db.runCommand({
+ update: coll.getName(),
+ ordered: true,
+ updates: [{q: {a: 1}, u: {_id: new ObjectId(), x: maxStr}, upsert: true}]
+});
assert(res.ok);
assert.eq(null, res.writeErrors);
coll.drop();
var id = new ObjectId();
-coll.insert({ _id: id });
-res = db.runCommand({ update: coll.getName(),
- ordered: true,
- updates: [{ q: { _id: id },
- u: { $set: { x: maxStr }}}] });
+coll.insert({_id: id});
+res = db.runCommand(
+ {update: coll.getName(), ordered: true, updates: [{q: {_id: id}, u: {$set: {x: maxStr}}}]});
assert(res.ok);
assert.eq(null, res.writeErrors);
@@ -41,28 +37,26 @@ assert.eq(null, res.writeErrors);
var overBigStr = maxStr + 'a';
coll.drop();
-res = db.runCommand({ insert: coll.getName(),
- documents: [{ _id: new ObjectId(), x: overBigStr }] });
+res = db.runCommand({insert: coll.getName(), documents: [{_id: new ObjectId(), x: overBigStr}]});
assert(res.ok);
assert.neq(null, res.writeErrors);
coll.drop();
-res = db.runCommand({ update: coll.getName(),
- ordered: true,
- updates: [{ q: { a: 1 },
- u: { _id: new ObjectId(), x: overBigStr },
- upsert: true
- }]});
+res = db.runCommand({
+ update: coll.getName(),
+ ordered: true,
+ updates: [{q: {a: 1}, u: {_id: new ObjectId(), x: overBigStr}, upsert: true}]
+});
assert(res.ok);
assert.neq(null, res.writeErrors);
coll.drop();
id = new ObjectId();
-coll.insert({ _id: id });
-res = db.runCommand({ update: coll.getName(),
- ordered: true,
- updates: [{ q: { _id: id },
- u: { $set: { x: overBigStr }}}] });
+coll.insert({_id: id});
+res = db.runCommand({
+ update: coll.getName(),
+ ordered: true,
+ updates: [{q: {_id: id}, u: {$set: {x: overBigStr}}}]
+});
assert(res.ok);
assert.neq(null, res.writeErrors);
-
diff --git a/jstests/core/max_time_ms.js b/jstests/core/max_time_ms.js
index 9c2797ca12b..e70ae8cb0fe 100644
--- a/jstests/core/max_time_ms.js
+++ b/jstests/core/max_time_ms.js
@@ -1,7 +1,7 @@
// Tests query/command option $maxTimeMS.
var t = db.max_time_ms;
-var exceededTimeLimit = 50; // ErrorCodes::ExceededTimeLimit
+var exceededTimeLimit = 50; // ErrorCodes::ExceededTimeLimit
var cursor;
var res;
@@ -10,10 +10,17 @@ var res;
//
t.drop();
-t.insert([{},{},{}]);
-cursor = t.find({$where: function() { sleep(100); return true; }});
+t.insert([{}, {}, {}]);
+cursor = t.find({
+ $where: function() {
+ sleep(100);
+ return true;
+ }
+});
cursor.maxTimeMS(100);
-assert.throws(function() { cursor.itcount(); }, [], "expected query to abort due to time limit");
+assert.throws(function() {
+ cursor.itcount();
+}, [], "expected query to abort due to time limit");
//
// Simple negative test for query: a ~300ms query with a 10s time limit should not hit the time
@@ -21,12 +28,17 @@ assert.throws(function() { cursor.itcount(); }, [], "expected query to abort due
//
t.drop();
-t.insert([{},{},{}]);
-cursor = t.find({$where: function() { sleep(100); return true; }});
-cursor.maxTimeMS(10*1000);
-assert.doesNotThrow(function() { cursor.itcount(); },
- [],
- "expected query to not hit the time limit");
+t.insert([{}, {}, {}]);
+cursor = t.find({
+ $where: function() {
+ sleep(100);
+ return true;
+ }
+});
+cursor.maxTimeMS(10 * 1000);
+assert.doesNotThrow(function() {
+ cursor.itcount();
+}, [], "expected query to not hit the time limit");
//
// Simple positive test for getmore:
@@ -36,22 +48,28 @@ assert.doesNotThrow(function() { cursor.itcount(); },
//
t.drop();
-t.insert([{},{},{}]); // fast batch
-t.insert([{slow: true},{slow: true},{slow: true}]); // slow batch
-cursor = t.find({$where: function() {
- if (this.slow) {
- sleep(5*1000);
+t.insert([{}, {}, {}]); // fast batch
+t.insert([{slow: true}, {slow: true}, {slow: true}]); // slow batch
+cursor = t.find({
+ $where: function() {
+ if (this.slow) {
+ sleep(5 * 1000);
+ }
+ return true;
}
- return true;
-}});
+});
cursor.batchSize(3);
cursor.maxTimeMS(1000);
-assert.doesNotThrow(function() { cursor.next(); cursor.next(); cursor.next(); },
- [],
- "expected batch 1 (query) to not hit the time limit");
-assert.throws(function() { cursor.next(); cursor.next(); cursor.next(); },
- [],
- "expected batch 2 (getmore) to abort due to time limit");
+assert.doesNotThrow(function() {
+ cursor.next();
+ cursor.next();
+ cursor.next();
+}, [], "expected batch 1 (query) to not hit the time limit");
+assert.throws(function() {
+ cursor.next();
+ cursor.next();
+ cursor.next();
+}, [], "expected batch 2 (getmore) to abort due to time limit");
//
// Simple negative test for getmore:
@@ -61,22 +79,28 @@ assert.throws(function() { cursor.next(); cursor.next(); cursor.next(); },
//
t.drop();
-t.insert([{},{},{}]); // fast batch
-t.insert([{},{},{slow: true}]); // slow batch
-cursor = t.find({$where: function() {
- if (this.slow) {
- sleep(2*1000);
+t.insert([{}, {}, {}]); // fast batch
+t.insert([{}, {}, {slow: true}]); // slow batch
+cursor = t.find({
+ $where: function() {
+ if (this.slow) {
+ sleep(2 * 1000);
+ }
+ return true;
}
- return true;
-}});
+});
cursor.batchSize(3);
-cursor.maxTimeMS(10*1000);
-assert.doesNotThrow(function() { cursor.next(); cursor.next(); cursor.next(); },
- [],
- "expected batch 1 (query) to not hit the time limit");
-assert.doesNotThrow(function() { cursor.next(); cursor.next(); cursor.next(); },
- [],
- "expected batch 2 (getmore) to not hit the time limit");
+cursor.maxTimeMS(10 * 1000);
+assert.doesNotThrow(function() {
+ cursor.next();
+ cursor.next();
+ cursor.next();
+}, [], "expected batch 1 (query) to not hit the time limit");
+assert.doesNotThrow(function() {
+ cursor.next();
+ cursor.next();
+ cursor.next();
+}, [], "expected batch 2 (getmore) to not hit the time limit");
//
// Many-batch positive test for getmore:
@@ -85,18 +109,22 @@ assert.doesNotThrow(function() { cursor.next(); cursor.next(); cursor.next(); },
//
t.drop();
-for (var i=0; i<5; i++) {
- t.insert([{},{},{slow:true}]);
+for (var i = 0; i < 5; i++) {
+ t.insert([{}, {}, {slow: true}]);
}
-cursor = t.find({$where: function() {
- if (this.slow) {
- sleep(2*1000);
+cursor = t.find({
+ $where: function() {
+ if (this.slow) {
+ sleep(2 * 1000);
+ }
+ return true;
}
- return true;
-}});
+});
cursor.batchSize(3);
-cursor.maxTimeMS(6*1000);
-assert.throws(function() { cursor.itcount(); }, [], "expected find() to abort due to time limit");
+cursor.maxTimeMS(6 * 1000);
+assert.throws(function() {
+ cursor.itcount();
+}, [], "expected find() to abort due to time limit");
//
// Many-batch negative test for getmore:
@@ -105,20 +133,22 @@ assert.throws(function() { cursor.itcount(); }, [], "expected find() to abort du
//
t.drop();
-for (var i=0; i<5; i++) {
- t.insert([{},{},{slow:true}]);
+for (var i = 0; i < 5; i++) {
+ t.insert([{}, {}, {slow: true}]);
}
-cursor = t.find({$where: function() {
- if (this.slow) {
- sleep(2*1000);
+cursor = t.find({
+ $where: function() {
+ if (this.slow) {
+ sleep(2 * 1000);
+ }
+ return true;
}
- return true;
-}});
+});
cursor.batchSize(3);
-cursor.maxTimeMS(20*1000);
-assert.doesNotThrow(function() { cursor.itcount(); },
- [],
- "expected find() to not hit the time limit");
+cursor.maxTimeMS(20 * 1000);
+assert.doesNotThrow(function() {
+ cursor.itcount();
+}, [], "expected find() to not hit the time limit");
//
// Simple positive test for commands: a ~300ms command with a 100ms time limit should be aborted.
@@ -135,7 +165,7 @@ assert(res.ok == 0 && res.code == exceededTimeLimit,
//
t.drop();
-res = t.getDB().adminCommand({sleep: 1, millis: 300, maxTimeMS: 10*1000});
+res = t.getDB().adminCommand({sleep: 1, millis: 300, maxTimeMS: 10 * 1000});
assert(res.ok == 1,
"expected sleep command to not hit the time limit, ok=" + res.ok + ", code=" + res.code);
@@ -148,45 +178,81 @@ t.insert({});
// Verify lower boundary for acceptable input (0 is acceptable, 1 isn't).
-assert.doesNotThrow.automsg(function() { t.find().maxTimeMS(0).itcount(); });
-assert.doesNotThrow.automsg(function() { t.find().maxTimeMS(NumberInt(0)).itcount(); });
-assert.doesNotThrow.automsg(function() { t.find().maxTimeMS(NumberLong(0)).itcount(); });
+assert.doesNotThrow.automsg(function() {
+ t.find().maxTimeMS(0).itcount();
+});
+assert.doesNotThrow.automsg(function() {
+ t.find().maxTimeMS(NumberInt(0)).itcount();
+});
+assert.doesNotThrow.automsg(function() {
+ t.find().maxTimeMS(NumberLong(0)).itcount();
+});
assert.eq(1, t.getDB().runCommand({ping: 1, maxTimeMS: 0}).ok);
assert.eq(1, t.getDB().runCommand({ping: 1, maxTimeMS: NumberInt(0)}).ok);
assert.eq(1, t.getDB().runCommand({ping: 1, maxTimeMS: NumberLong(0)}).ok);
-assert.throws.automsg(function() { t.find().maxTimeMS(-1).itcount(); });
-assert.throws.automsg(function() { t.find().maxTimeMS(NumberInt(-1)).itcount(); });
-assert.throws.automsg(function() { t.find().maxTimeMS(NumberLong(-1)).itcount(); });
+assert.throws.automsg(function() {
+ t.find().maxTimeMS(-1).itcount();
+});
+assert.throws.automsg(function() {
+ t.find().maxTimeMS(NumberInt(-1)).itcount();
+});
+assert.throws.automsg(function() {
+ t.find().maxTimeMS(NumberLong(-1)).itcount();
+});
assert.eq(0, t.getDB().runCommand({ping: 1, maxTimeMS: -1}).ok);
assert.eq(0, t.getDB().runCommand({ping: 1, maxTimeMS: NumberInt(-1)}).ok);
assert.eq(0, t.getDB().runCommand({ping: 1, maxTimeMS: NumberLong(-1)}).ok);
// Verify upper boundary for acceptable input (2^31-1 is acceptable, 2^31 isn't).
-var maxValue = Math.pow(2,31)-1;
+var maxValue = Math.pow(2, 31) - 1;
-assert.doesNotThrow.automsg(function() { t.find().maxTimeMS(maxValue).itcount(); });
-assert.doesNotThrow.automsg(function() { t.find().maxTimeMS(NumberInt(maxValue)).itcount(); });
-assert.doesNotThrow.automsg(function() { t.find().maxTimeMS(NumberLong(maxValue)).itcount(); });
+assert.doesNotThrow.automsg(function() {
+ t.find().maxTimeMS(maxValue).itcount();
+});
+assert.doesNotThrow.automsg(function() {
+ t.find().maxTimeMS(NumberInt(maxValue)).itcount();
+});
+assert.doesNotThrow.automsg(function() {
+ t.find().maxTimeMS(NumberLong(maxValue)).itcount();
+});
assert.eq(1, t.getDB().runCommand({ping: 1, maxTimeMS: maxValue}).ok);
assert.eq(1, t.getDB().runCommand({ping: 1, maxTimeMS: NumberInt(maxValue)}).ok);
assert.eq(1, t.getDB().runCommand({ping: 1, maxTimeMS: NumberLong(maxValue)}).ok);
-assert.throws.automsg(function() { t.find().maxTimeMS(maxValue+1).itcount(); });
-assert.throws.automsg(function() { t.find().maxTimeMS(NumberInt(maxValue+1)).itcount(); });
-assert.throws.automsg(function() { t.find().maxTimeMS(NumberLong(maxValue+1)).itcount(); });
-assert.eq(0, t.getDB().runCommand({ping: 1, maxTimeMS: maxValue+1}).ok);
-assert.eq(0, t.getDB().runCommand({ping: 1, maxTimeMS: NumberInt(maxValue+1)}).ok);
-assert.eq(0, t.getDB().runCommand({ping: 1, maxTimeMS: NumberLong(maxValue+1)}).ok);
+assert.throws.automsg(function() {
+ t.find().maxTimeMS(maxValue + 1).itcount();
+});
+assert.throws.automsg(function() {
+ t.find().maxTimeMS(NumberInt(maxValue + 1)).itcount();
+});
+assert.throws.automsg(function() {
+ t.find().maxTimeMS(NumberLong(maxValue + 1)).itcount();
+});
+assert.eq(0, t.getDB().runCommand({ping: 1, maxTimeMS: maxValue + 1}).ok);
+assert.eq(0, t.getDB().runCommand({ping: 1, maxTimeMS: NumberInt(maxValue + 1)}).ok);
+assert.eq(0, t.getDB().runCommand({ping: 1, maxTimeMS: NumberLong(maxValue + 1)}).ok);
// Verify invalid values are rejected.
-assert.throws.automsg(function() { t.find().maxTimeMS(0.1).itcount(); });
-assert.throws.automsg(function() { t.find().maxTimeMS(-0.1).itcount(); });
-assert.throws.automsg(function() { t.find().maxTimeMS().itcount(); });
-assert.throws.automsg(function() { t.find().maxTimeMS("").itcount(); });
-assert.throws.automsg(function() { t.find().maxTimeMS(true).itcount(); });
-assert.throws.automsg(function() { t.find().maxTimeMS({}).itcount(); });
+assert.throws.automsg(function() {
+ t.find().maxTimeMS(0.1).itcount();
+});
+assert.throws.automsg(function() {
+ t.find().maxTimeMS(-0.1).itcount();
+});
+assert.throws.automsg(function() {
+ t.find().maxTimeMS().itcount();
+});
+assert.throws.automsg(function() {
+ t.find().maxTimeMS("").itcount();
+});
+assert.throws.automsg(function() {
+ t.find().maxTimeMS(true).itcount();
+});
+assert.throws.automsg(function() {
+ t.find().maxTimeMS({}).itcount();
+});
assert.eq(0, t.getDB().runCommand({ping: 1, maxTimeMS: 0.1}).ok);
assert.eq(0, t.getDB().runCommand({ping: 1, maxTimeMS: -0.1}).ok);
assert.eq(0, t.getDB().runCommand({ping: 1, maxTimeMS: undefined}).ok);
@@ -210,9 +276,9 @@ assert.eq(0, cursor.next().ok);
// TODO: rewrite to use runCommandWithMetadata when we have a shell helper so that
// we can test server side validation.
assert.throws(function() {
- cursor = t.getDB().$cmd.find({ping: 1}).limit(-1).maxTimeMS(0);
- cursor._ensureSpecial();
- cursor.next();
+ cursor = t.getDB().$cmd.find({ping: 1}).limit(-1).maxTimeMS(0);
+ cursor._ensureSpecial();
+ cursor.next();
});
//
@@ -221,77 +287,91 @@ assert.throws(function() {
// maxTimeAlwaysTimeOut positive test for command.
t.drop();
-assert.eq(1, t.getDB().adminCommand({configureFailPoint: "maxTimeAlwaysTimeOut",
- mode: "alwaysOn"}).ok);
-res = t.getDB().runCommand({ping: 1, maxTimeMS: 10*1000});
+assert.eq(
+ 1, t.getDB().adminCommand({configureFailPoint: "maxTimeAlwaysTimeOut", mode: "alwaysOn"}).ok);
+res = t.getDB().runCommand({ping: 1, maxTimeMS: 10 * 1000});
assert(res.ok == 0 && res.code == exceededTimeLimit,
- "expected command to trigger maxTimeAlwaysTimeOut fail point, ok=" + res.ok + ", code="
- + res.code);
+ "expected command to trigger maxTimeAlwaysTimeOut fail point, ok=" + res.ok + ", code=" +
+ res.code);
assert.eq(1, t.getDB().adminCommand({configureFailPoint: "maxTimeAlwaysTimeOut", mode: "off"}).ok);
// maxTimeNeverTimeOut positive test for command.
t.drop();
-assert.eq(1, t.getDB().adminCommand({configureFailPoint: "maxTimeNeverTimeOut",
- mode: "alwaysOn"}).ok);
+assert.eq(1,
+ t.getDB().adminCommand({configureFailPoint: "maxTimeNeverTimeOut", mode: "alwaysOn"}).ok);
res = t.getDB().adminCommand({sleep: 1, millis: 300, maxTimeMS: 100});
assert(res.ok == 1,
- "expected command to trigger maxTimeNeverTimeOut fail point, ok=" + res.ok + ", code="
- + res.code);
+ "expected command to trigger maxTimeNeverTimeOut fail point, ok=" + res.ok + ", code=" +
+ res.code);
assert.eq(1, t.getDB().adminCommand({configureFailPoint: "maxTimeNeverTimeOut", mode: "off"}).ok);
// maxTimeAlwaysTimeOut positive test for query.
t.drop();
-assert.eq(1, t.getDB().adminCommand({configureFailPoint: "maxTimeAlwaysTimeOut",
- mode: "alwaysOn"}).ok);
-assert.throws(function() { t.find().maxTimeMS(10*1000).itcount(); },
- [],
- "expected query to trigger maxTimeAlwaysTimeOut fail point");
+assert.eq(
+ 1, t.getDB().adminCommand({configureFailPoint: "maxTimeAlwaysTimeOut", mode: "alwaysOn"}).ok);
+assert.throws(function() {
+ t.find().maxTimeMS(10 * 1000).itcount();
+}, [], "expected query to trigger maxTimeAlwaysTimeOut fail point");
assert.eq(1, t.getDB().adminCommand({configureFailPoint: "maxTimeAlwaysTimeOut", mode: "off"}).ok);
// maxTimeNeverTimeOut positive test for query.
-assert.eq(1, t.getDB().adminCommand({configureFailPoint: "maxTimeNeverTimeOut",
- mode: "alwaysOn"}).ok);
+assert.eq(1,
+ t.getDB().adminCommand({configureFailPoint: "maxTimeNeverTimeOut", mode: "alwaysOn"}).ok);
t.drop();
-t.insert([{},{},{}]);
-cursor = t.find({$where: function() { sleep(100); return true; }});
+t.insert([{}, {}, {}]);
+cursor = t.find({
+ $where: function() {
+ sleep(100);
+ return true;
+ }
+});
cursor.maxTimeMS(100);
-assert.doesNotThrow(function() { cursor.itcount(); },
- [],
- "expected query to trigger maxTimeNeverTimeOut fail point");
+assert.doesNotThrow(function() {
+ cursor.itcount();
+}, [], "expected query to trigger maxTimeNeverTimeOut fail point");
assert.eq(1, t.getDB().adminCommand({configureFailPoint: "maxTimeNeverTimeOut", mode: "off"}).ok);
// maxTimeAlwaysTimeOut positive test for getmore.
t.drop();
-t.insert([{},{},{}]);
-cursor = t.find().maxTimeMS(10*1000).batchSize(2);
-assert.doesNotThrow.automsg(function() { cursor.next(); cursor.next(); });
-assert.eq(1, t.getDB().adminCommand({configureFailPoint: "maxTimeAlwaysTimeOut",
- mode: "alwaysOn"}).ok);
-assert.throws(function() { cursor.next(); },
- [],
- "expected getmore to trigger maxTimeAlwaysTimeOut fail point");
+t.insert([{}, {}, {}]);
+cursor = t.find().maxTimeMS(10 * 1000).batchSize(2);
+assert.doesNotThrow.automsg(function() {
+ cursor.next();
+ cursor.next();
+});
+assert.eq(
+ 1, t.getDB().adminCommand({configureFailPoint: "maxTimeAlwaysTimeOut", mode: "alwaysOn"}).ok);
+assert.throws(function() {
+ cursor.next();
+}, [], "expected getmore to trigger maxTimeAlwaysTimeOut fail point");
assert.eq(1, t.getDB().adminCommand({configureFailPoint: "maxTimeAlwaysTimeOut", mode: "off"}).ok);
// maxTimeNeverTimeOut positive test for getmore.
t.drop();
-t.insert([{},{},{}]); // fast batch
-t.insert([{slow: true},{slow: true},{slow: true}]); // slow batch
-cursor = t.find({$where: function() {
- if (this.slow) {
- sleep(2*1000);
+t.insert([{}, {}, {}]); // fast batch
+t.insert([{slow: true}, {slow: true}, {slow: true}]); // slow batch
+cursor = t.find({
+ $where: function() {
+ if (this.slow) {
+ sleep(2 * 1000);
+ }
+ return true;
}
- return true;
-}});
+});
cursor.batchSize(3);
-cursor.maxTimeMS(2*1000);
-assert.doesNotThrow(function() { cursor.next(); cursor.next(); cursor.next(); },
- [],
- "expected batch 1 (query) to not hit the time limit");
-assert.eq(1, t.getDB().adminCommand({configureFailPoint: "maxTimeNeverTimeOut",
- mode: "alwaysOn"}).ok);
-assert.doesNotThrow(function() { cursor.next(); cursor.next(); cursor.next(); },
- [],
- "expected batch 2 (getmore) to trigger maxTimeNeverTimeOut fail point");
+cursor.maxTimeMS(2 * 1000);
+assert.doesNotThrow(function() {
+ cursor.next();
+ cursor.next();
+ cursor.next();
+}, [], "expected batch 1 (query) to not hit the time limit");
+assert.eq(1,
+ t.getDB().adminCommand({configureFailPoint: "maxTimeNeverTimeOut", mode: "alwaysOn"}).ok);
+assert.doesNotThrow(function() {
+ cursor.next();
+ cursor.next();
+ cursor.next();
+}, [], "expected batch 2 (getmore) to trigger maxTimeNeverTimeOut fail point");
assert.eq(1, t.getDB().adminCommand({configureFailPoint: "maxTimeNeverTimeOut", mode: "off"}).ok);
//
@@ -299,33 +379,36 @@ assert.eq(1, t.getDB().adminCommand({configureFailPoint: "maxTimeNeverTimeOut",
//
// "aggregate" command.
-res = t.runCommand("aggregate", {pipeline: [], maxTimeMS: 60*1000});
+res = t.runCommand("aggregate", {pipeline: [], maxTimeMS: 60 * 1000});
assert(res.ok == 1,
"expected aggregate with maxtime to succeed, ok=" + res.ok + ", code=" + res.code);
// "collMod" command.
-res = t.runCommand("collMod", {usePowerOf2Sizes: true, maxTimeMS: 60*1000});
+res = t.runCommand("collMod", {usePowerOf2Sizes: true, maxTimeMS: 60 * 1000});
assert(res.ok == 1,
"expected collmod with maxtime to succeed, ok=" + res.ok + ", code=" + res.code);
//
// Test maxTimeMS for parallelCollectionScan
//
-res = t.runCommand({parallelCollectionScan: t.getName(), numCursors: 1, maxTimeMS: 60*1000});
+res = t.runCommand({parallelCollectionScan: t.getName(), numCursors: 1, maxTimeMS: 60 * 1000});
assert.commandWorked(res);
-var cursor = new DBCommandCursor( t.getDB().getMongo(), res.cursors[0], 5 );
-assert.commandWorked(t.getDB().adminCommand({
- configureFailPoint: "maxTimeAlwaysTimeOut",
- mode: "alwaysOn"
-}));
-assert.throws(function() { cursor.itcount(); }, [], "expected query to abort due to time limit");
-assert.commandWorked(t.getDB().adminCommand({configureFailPoint: "maxTimeAlwaysTimeOut", mode: "off"}));
+var cursor = new DBCommandCursor(t.getDB().getMongo(), res.cursors[0], 5);
+assert.commandWorked(
+ t.getDB().adminCommand({configureFailPoint: "maxTimeAlwaysTimeOut", mode: "alwaysOn"}));
+assert.throws(function() {
+ cursor.itcount();
+}, [], "expected query to abort due to time limit");
+assert.commandWorked(
+ t.getDB().adminCommand({configureFailPoint: "maxTimeAlwaysTimeOut", mode: "off"}));
//
// test count shell helper SERVER-13334
//
t.drop();
-assert.eq(1, t.getDB().adminCommand({configureFailPoint: "maxTimeNeverTimeOut",
- mode: "alwaysOn"}).ok);
-assert.doesNotThrow(function() { t.find({}).maxTimeMS(1).count(); });
+assert.eq(1,
+ t.getDB().adminCommand({configureFailPoint: "maxTimeNeverTimeOut", mode: "alwaysOn"}).ok);
+assert.doesNotThrow(function() {
+ t.find({}).maxTimeMS(1).count();
+});
assert.eq(1, t.getDB().adminCommand({configureFailPoint: "maxTimeNeverTimeOut", mode: "off"}).ok);
diff --git a/jstests/core/maxscan.js b/jstests/core/maxscan.js
index ab490f51de0..a862ed426a0 100644
--- a/jstests/core/maxscan.js
+++ b/jstests/core/maxscan.js
@@ -3,16 +3,16 @@ t = db.maxscan;
t.drop();
N = 100;
-for ( i=0; i<N; i++ ){
- t.insert( { _id : i , x : i % 10 } );
+for (i = 0; i < N; i++) {
+ t.insert({_id: i, x: i % 10});
}
-assert.eq( N , t.find().itcount() , "A" );
-assert.eq( 50 , t.find().maxScan(50).itcount() , "B" );
+assert.eq(N, t.find().itcount(), "A");
+assert.eq(50, t.find().maxScan(50).itcount(), "B");
-assert.eq( 10 , t.find( { x : 2 } ).itcount() , "C" );
-assert.eq( 5 , t.find( { x : 2 } ).maxScan(50).itcount() , "D" );
+assert.eq(10, t.find({x: 2}).itcount(), "C");
+assert.eq(5, t.find({x: 2}).maxScan(50).itcount(), "D");
t.ensureIndex({x: 1});
-assert.eq( 10, t.find( { x : 2 } ).hint({x:1}).maxScan(N).itcount() , "E" );
-assert.eq( 0, t.find( { x : 2 } ).hint({x:1}).maxScan(1).itcount() , "E" );
+assert.eq(10, t.find({x: 2}).hint({x: 1}).maxScan(N).itcount(), "E");
+assert.eq(0, t.find({x: 2}).hint({x: 1}).maxScan(1).itcount(), "E");
diff --git a/jstests/core/minmax.js b/jstests/core/minmax.js
index d6dc7cc1034..670c7d2f8b2 100644
--- a/jstests/core/minmax.js
+++ b/jstests/core/minmax.js
@@ -1,56 +1,66 @@
// test min / max query parameters
addData = function() {
- t.save( { a: 1, b: 1 } );
- t.save( { a: 1, b: 2 } );
- t.save( { a: 2, b: 1 } );
- t.save( { a: 2, b: 2 } );
+ t.save({a: 1, b: 1});
+ t.save({a: 1, b: 2});
+ t.save({a: 2, b: 1});
+ t.save({a: 2, b: 2});
};
t = db.jstests_minmax;
t.drop();
-t.ensureIndex( { a: 1, b: 1 } );
+t.ensureIndex({a: 1, b: 1});
addData();
-printjson( t.find().min( { a: 1, b: 2 } ).max( { a: 2, b: 1 } ).toArray() );
-assert.eq( 1, t.find().min( { a: 1, b: 2 } ).max( { a: 2, b: 1 } ).toArray().length );
-assert.eq( 2, t.find().min( { a: 1, b: 2 } ).max( { a: 2, b: 1.5 } ).toArray().length );
-assert.eq( 2, t.find().min( { a: 1, b: 2 } ).max( { a: 2, b: 2 } ).toArray().length );
+printjson(t.find().min({a: 1, b: 2}).max({a: 2, b: 1}).toArray());
+assert.eq(1, t.find().min({a: 1, b: 2}).max({a: 2, b: 1}).toArray().length);
+assert.eq(2, t.find().min({a: 1, b: 2}).max({a: 2, b: 1.5}).toArray().length);
+assert.eq(2, t.find().min({a: 1, b: 2}).max({a: 2, b: 2}).toArray().length);
// just one bound
-assert.eq( 3, t.find().min( { a: 1, b: 2 } ).toArray().length );
-assert.eq( 3, t.find().max( { a: 2, b: 1.5 } ).toArray().length );
-assert.eq( 3, t.find().min( { a: 1, b: 2 } ).hint( { a: 1, b: 1 } ).toArray().length );
-assert.eq( 3, t.find().max( { a: 2, b: 1.5 } ).hint( { a: 1, b: 1 } ).toArray().length );
+assert.eq(3, t.find().min({a: 1, b: 2}).toArray().length);
+assert.eq(3, t.find().max({a: 2, b: 1.5}).toArray().length);
+assert.eq(3, t.find().min({a: 1, b: 2}).hint({a: 1, b: 1}).toArray().length);
+assert.eq(3, t.find().max({a: 2, b: 1.5}).hint({a: 1, b: 1}).toArray().length);
t.drop();
-t.ensureIndex( { a: 1, b: -1 } );
+t.ensureIndex({a: 1, b: -1});
addData();
-assert.eq( 4, t.find().min( { a: 1, b: 2 } ).toArray().length );
-assert.eq( 4, t.find().max( { a: 2, b: 0.5 } ).toArray().length );
-assert.eq( 1, t.find().min( { a: 2, b: 1 } ).toArray().length );
-assert.eq( 1, t.find().max( { a: 1, b: 1.5 } ).toArray().length );
-assert.eq( 4, t.find().min( { a: 1, b: 2 } ).hint( { a: 1, b: -1 } ).toArray().length );
-assert.eq( 4, t.find().max( { a: 2, b: 0.5 } ).hint( { a: 1, b: -1 } ).toArray().length );
-assert.eq( 1, t.find().min( { a: 2, b: 1 } ).hint( { a: 1, b: -1 } ).toArray().length );
-assert.eq( 1, t.find().max( { a: 1, b: 1.5 } ).hint( { a: 1, b: -1 } ).toArray().length );
+assert.eq(4, t.find().min({a: 1, b: 2}).toArray().length);
+assert.eq(4, t.find().max({a: 2, b: 0.5}).toArray().length);
+assert.eq(1, t.find().min({a: 2, b: 1}).toArray().length);
+assert.eq(1, t.find().max({a: 1, b: 1.5}).toArray().length);
+assert.eq(4, t.find().min({a: 1, b: 2}).hint({a: 1, b: -1}).toArray().length);
+assert.eq(4, t.find().max({a: 2, b: 0.5}).hint({a: 1, b: -1}).toArray().length);
+assert.eq(1, t.find().min({a: 2, b: 1}).hint({a: 1, b: -1}).toArray().length);
+assert.eq(1, t.find().max({a: 1, b: 1.5}).hint({a: 1, b: -1}).toArray().length);
// hint doesn't match
-assert.throws( function() { t.find().min( { a: 1 } ).hint( { a: 1, b: -1 } ).toArray(); } );
-assert.throws( function() { t.find().min( { a: 1, b: 1 } ).max( { a: 1 } ).hint( { a: 1, b: -1 } ).toArray(); } );
-assert.throws( function() { t.find().min( { b: 1 } ).max( { a: 1, b: 2 } ).hint( { a: 1, b: -1 } ).toArray(); } );
-assert.throws( function() { t.find().min( { a: 1 } ).hint( { $natural: 1 } ).toArray(); } );
-assert.throws( function() { t.find().max( { a: 1 } ).hint( { $natural: 1 } ).toArray(); } );
+assert.throws(function() {
+ t.find().min({a: 1}).hint({a: 1, b: -1}).toArray();
+});
+assert.throws(function() {
+ t.find().min({a: 1, b: 1}).max({a: 1}).hint({a: 1, b: -1}).toArray();
+});
+assert.throws(function() {
+ t.find().min({b: 1}).max({a: 1, b: 2}).hint({a: 1, b: -1}).toArray();
+});
+assert.throws(function() {
+ t.find().min({a: 1}).hint({$natural: 1}).toArray();
+});
+assert.throws(function() {
+ t.find().max({a: 1}).hint({$natural: 1}).toArray();
+});
// Reverse direction scan of the a:1 index between a:6 (inclusive) and a:3 (exclusive).
t.drop();
-t.ensureIndex( { a:1 } );
-for( i = 0; i < 10; ++i ) {
- t.save( { _id:i, a:i } );
+t.ensureIndex({a: 1});
+for (i = 0; i < 10; ++i) {
+ t.save({_id: i, a: i});
}
-if ( 0 ) { // SERVER-3766
-reverseResult = t.find().min( { a:6 } ).max( { a:3 } ).sort( { a:-1 } ).hint( { a:1 } ).toArray();
-assert.eq( [ { _id:6, a:6 }, { _id:5, a:5 }, { _id:4, a:4 } ], reverseResult );
+if (0) { // SERVER-3766
+ reverseResult = t.find().min({a: 6}).max({a: 3}).sort({a: -1}).hint({a: 1}).toArray();
+ assert.eq([{_id: 6, a: 6}, {_id: 5, a: 5}, {_id: 4, a: 4}], reverseResult);
}
//
diff --git a/jstests/core/minmax_edge.js b/jstests/core/minmax_edge.js
index a22367cc2a9..abd39724a80 100644
--- a/jstests/core/minmax_edge.js
+++ b/jstests/core/minmax_edge.js
@@ -10,15 +10,15 @@ var t = db.minmax_edge;
* Results is the cursor toArray, expectedIds is a list of _ids
*/
function verifyResultIds(results, expectedIds) {
- //check they are the same length
+ // check they are the same length
assert.eq(results.length, expectedIds.length);
- function compare(a,b) {
- if (a._id < b._id)
- return -1;
- if (a._id > b._id)
- return 1;
- return 0;
+ function compare(a, b) {
+ if (a._id < b._id)
+ return -1;
+ if (a._id > b._id)
+ return 1;
+ return 0;
}
results.sort(compare);
@@ -62,22 +62,22 @@ reset(t);
assert.commandWorked(t.ensureIndex({a: 1}));
verifyMin({a: Infinity}, []);
-verifyMax({a: Infinity}, [0,1,2,3,4,5,6,7,8]);
+verifyMax({a: Infinity}, [0, 1, 2, 3, 4, 5, 6, 7, 8]);
-verifyMin({a: -Infinity}, [0,1,2,3,4,5,6,7,8]);
+verifyMin({a: -Infinity}, [0, 1, 2, 3, 4, 5, 6, 7, 8]);
verifyMax({a: -Infinity}, []);
// NaN < all ints.
-verifyMin({a: NaN}, [0,1,2,3,4,5,6,7,8]);
+verifyMin({a: NaN}, [0, 1, 2, 3, 4, 5, 6, 7, 8]);
verifyMax({a: NaN}, []);
// {a: 1} > all ints.
verifyMin({a: {a: 1}}, []);
-verifyMax({a: {a: 1}}, [0,1,2,3,4,5,6,7,8]);
+verifyMax({a: {a: 1}}, [0, 1, 2, 3, 4, 5, 6, 7, 8]);
// 'a' > all ints.
verifyMin({a: 'a'}, []);
-verifyMax({a: 'a'}, [0,1,2,3,4,5,6,7,8]);
+verifyMax({a: 'a'}, [0, 1, 2, 3, 4, 5, 6, 7, 8]);
verifyResultIds(t.find().min({a: 4}).max({a: 4}).toArray(), []);
@@ -86,64 +86,64 @@ reset(t);
assert.commandWorked(t.ensureIndex({a: 1, b: -1}));
// Same as single-key index assertions, with b field present.
-verifyMin({a: NaN, b: 1}, [0,1,2,3,4,5,6,7,8]);
+verifyMin({a: NaN, b: 1}, [0, 1, 2, 3, 4, 5, 6, 7, 8]);
verifyMax({a: NaN, b: 1}, []);
verifyMin({a: Infinity, b: 1}, []);
-verifyMax({a: Infinity, b: 1}, [0,1,2,3,4,5,6,7,8]);
+verifyMax({a: Infinity, b: 1}, [0, 1, 2, 3, 4, 5, 6, 7, 8]);
-verifyMin({a: -Infinity, b: 1}, [0,1,2,3,4,5,6,7,8]);
+verifyMin({a: -Infinity, b: 1}, [0, 1, 2, 3, 4, 5, 6, 7, 8]);
verifyMax({a: -Infinity, b: 1}, []);
verifyMin({a: {a: 1}, b: 1}, []);
-verifyMax({a: {a: 1}, b: 1}, [0,1,2,3,4,5,6,7,8]);
+verifyMax({a: {a: 1}, b: 1}, [0, 1, 2, 3, 4, 5, 6, 7, 8]);
verifyMin({a: 'a', b: 1}, []);
-verifyMax({a: 'a', b: 1}, [0,1,2,3,4,5,6,7,8]);
+verifyMax({a: 'a', b: 1}, [0, 1, 2, 3, 4, 5, 6, 7, 8]);
verifyResultIds(t.find().min({a: 4, b: 1}).max({a: 4, b: 1}).toArray(), []);
// Edge cases on b values
-verifyMin({a: 1, b: Infinity}, [0,1,2,3,4,5,6,7,8]);
-verifyMin({a: 2, b: Infinity}, [3,4,5,6,7,8]);
-verifyMin({a: 3, b: Infinity}, [6,7,8]);
+verifyMin({a: 1, b: Infinity}, [0, 1, 2, 3, 4, 5, 6, 7, 8]);
+verifyMin({a: 2, b: Infinity}, [3, 4, 5, 6, 7, 8]);
+verifyMin({a: 3, b: Infinity}, [6, 7, 8]);
verifyMax({a: 1, b: Infinity}, []);
-verifyMax({a: 2, b: Infinity}, [0,1,2]);
-verifyMax({a: 3, b: Infinity}, [0,1,2,3,4,5]);
+verifyMax({a: 2, b: Infinity}, [0, 1, 2]);
+verifyMax({a: 3, b: Infinity}, [0, 1, 2, 3, 4, 5]);
-verifyMin({a: 1, b: -Infinity}, [3,4,5,6,7,8]);
-verifyMin({a: 2, b: -Infinity}, [6,7,8]);
+verifyMin({a: 1, b: -Infinity}, [3, 4, 5, 6, 7, 8]);
+verifyMin({a: 2, b: -Infinity}, [6, 7, 8]);
verifyMin({a: 3, b: -Infinity}, []);
-verifyMax({a: 1, b: -Infinity}, [0,1,2]);
-verifyMax({a: 2, b: -Infinity}, [0,1,2,3,4,5]);
-verifyMax({a: 3, b: -Infinity}, [0,1,2,3,4,5,6,7,8]);
+verifyMax({a: 1, b: -Infinity}, [0, 1, 2]);
+verifyMax({a: 2, b: -Infinity}, [0, 1, 2, 3, 4, 5]);
+verifyMax({a: 3, b: -Infinity}, [0, 1, 2, 3, 4, 5, 6, 7, 8]);
-verifyMin({a: 2, b: NaN}, [6,7,8]);
-verifyMax({a: 2, b: NaN}, [0,1,2,3,4,5]);
+verifyMin({a: 2, b: NaN}, [6, 7, 8]);
+verifyMax({a: 2, b: NaN}, [0, 1, 2, 3, 4, 5]);
-verifyMin({a: 2, b: {b: 1}}, [3,4,5,6,7,8]);
-verifyMax({a: 2, b: {b: 1}}, [0,1,2]);
+verifyMin({a: 2, b: {b: 1}}, [3, 4, 5, 6, 7, 8]);
+verifyMax({a: 2, b: {b: 1}}, [0, 1, 2]);
-verifyMin({a: 2, b: 'b'}, [3,4,5,6,7,8]);
-verifyMax({a: 2, b: 'b'}, [0,1,2]);
+verifyMin({a: 2, b: 'b'}, [3, 4, 5, 6, 7, 8]);
+verifyMax({a: 2, b: 'b'}, [0, 1, 2]);
// Test descending index.
reset(t);
t.ensureIndex({a: -1});
verifyMin({a: NaN}, []);
-verifyMax({a: NaN}, [0,1,2,3,4,5,6,7,8]);
+verifyMax({a: NaN}, [0, 1, 2, 3, 4, 5, 6, 7, 8]);
-verifyMin({a: Infinity}, [0,1,2,3,4,5,6,7,8]);
+verifyMin({a: Infinity}, [0, 1, 2, 3, 4, 5, 6, 7, 8]);
verifyMax({a: Infinity}, []);
verifyMin({a: -Infinity}, []);
-verifyMax({a: -Infinity}, [0,1,2,3,4,5,6,7,8]);
+verifyMax({a: -Infinity}, [0, 1, 2, 3, 4, 5, 6, 7, 8]);
-verifyMin({a: {a: 1}}, [0,1,2,3,4,5,6,7,8]);
+verifyMin({a: {a: 1}}, [0, 1, 2, 3, 4, 5, 6, 7, 8]);
verifyMax({a: {a: 1}}, []);
-verifyMin({a: 'a'}, [0,1,2,3,4,5,6,7,8]);
+verifyMin({a: 'a'}, [0, 1, 2, 3, 4, 5, 6, 7, 8]);
verifyMax({a: 'a'}, []);
verifyResultIds(t.find().min({a: 4}).max({a: 4}).toArray(), []);
@@ -154,43 +154,43 @@ t.ensureIndex({a: -1, b: -1});
// Same as single-key index assertions, with b field present.
verifyMin({a: NaN, b: 1}, []);
-verifyMax({a: NaN, b: 1}, [0,1,2,3,4,5,6,7,8]);
+verifyMax({a: NaN, b: 1}, [0, 1, 2, 3, 4, 5, 6, 7, 8]);
-verifyMin({a: Infinity, b: 1}, [0,1,2,3,4,5,6,7,8]);
+verifyMin({a: Infinity, b: 1}, [0, 1, 2, 3, 4, 5, 6, 7, 8]);
verifyMax({a: Infinity, b: 1}, []);
verifyMin({a: -Infinity, b: 1}, []);
-verifyMax({a: -Infinity, b: 1}, [0,1,2,3,4,5,6,7,8]);
+verifyMax({a: -Infinity, b: 1}, [0, 1, 2, 3, 4, 5, 6, 7, 8]);
-verifyMin({a: {a: 1}, b: 1}, [0,1,2,3,4,5,6,7,8]);
+verifyMin({a: {a: 1}, b: 1}, [0, 1, 2, 3, 4, 5, 6, 7, 8]);
verifyMax({a: {a: 1}, b: 1}, []);
-verifyMin({a: 'a', b: 1}, [0,1,2,3,4,5,6,7,8]);
+verifyMin({a: 'a', b: 1}, [0, 1, 2, 3, 4, 5, 6, 7, 8]);
verifyMax({a: 'a', b: 1}, []);
// Edge cases on b values.
-verifyMin({a: 1, b: Infinity}, [0,1,2]);
-verifyMin({a: 2, b: Infinity}, [0,1,2,3,4,5]);
-verifyMin({a: 3, b: Infinity}, [0,1,2,3,4,5,6,7,8]);
-verifyMax({a: 1, b: Infinity}, [3,4,5,6,7,8]);
-verifyMax({a: 2, b: Infinity}, [6,7,8]);
+verifyMin({a: 1, b: Infinity}, [0, 1, 2]);
+verifyMin({a: 2, b: Infinity}, [0, 1, 2, 3, 4, 5]);
+verifyMin({a: 3, b: Infinity}, [0, 1, 2, 3, 4, 5, 6, 7, 8]);
+verifyMax({a: 1, b: Infinity}, [3, 4, 5, 6, 7, 8]);
+verifyMax({a: 2, b: Infinity}, [6, 7, 8]);
verifyMax({a: 3, b: Infinity}, []);
verifyMin({a: 1, b: -Infinity}, []);
-verifyMin({a: 2, b: -Infinity}, [0,1,2]);
-verifyMin({a: 3, b: -Infinity}, [0,1,2,3,4,5]);
-verifyMax({a: 1, b: -Infinity}, [0,1,2,3,4,5,6,7,8]);
-verifyMax({a: 2, b: -Infinity}, [3,4,5,6,7,8]);
-verifyMax({a: 3, b: -Infinity}, [6,7,8]);
+verifyMin({a: 2, b: -Infinity}, [0, 1, 2]);
+verifyMin({a: 3, b: -Infinity}, [0, 1, 2, 3, 4, 5]);
+verifyMax({a: 1, b: -Infinity}, [0, 1, 2, 3, 4, 5, 6, 7, 8]);
+verifyMax({a: 2, b: -Infinity}, [3, 4, 5, 6, 7, 8]);
+verifyMax({a: 3, b: -Infinity}, [6, 7, 8]);
-verifyMin({a: 2, b: NaN}, [0,1,2]);
-verifyMax({a: 2, b: NaN}, [3,4,5,6,7,8]);
+verifyMin({a: 2, b: NaN}, [0, 1, 2]);
+verifyMax({a: 2, b: NaN}, [3, 4, 5, 6, 7, 8]);
-verifyMin({a: 2, b: {b: 1}}, [3,4,5,6,7,8]);
-verifyMax({a: 2, b: {b: 1}}, [0,1,2]);
+verifyMin({a: 2, b: {b: 1}}, [3, 4, 5, 6, 7, 8]);
+verifyMax({a: 2, b: {b: 1}}, [0, 1, 2]);
-verifyMin({a: 2, b: 'b'}, [3,4,5,6,7,8]);
-verifyMax({a: 2, b: 'b'}, [0,1,2]);
+verifyMin({a: 2, b: 'b'}, [3, 4, 5, 6, 7, 8]);
+verifyMax({a: 2, b: 'b'}, [0, 1, 2]);
// Now a couple cases with an extra compound index.
t.drop();
@@ -205,27 +205,27 @@ t.insert({_id: 5, a: 2, b: 'b', c: 2});
t.insert({_id: 6, a: 2, b: 'a', c: 1});
t.insert({_id: 7, a: 2, b: 'a', c: 2});
-verifyMin({a: 1, b: 'a', c: 1}, [2,3,4,5,6,7]);
+verifyMin({a: 1, b: 'a', c: 1}, [2, 3, 4, 5, 6, 7]);
verifyMin({a: 2, b: 'a', c: 2}, [7]);
-verifyMax({a: 1, b: 'a', c: 1}, [0,1]);
-verifyMax({a: 2, b: 'a', c: 2}, [0,1,2,3,4,5,6]);
+verifyMax({a: 1, b: 'a', c: 1}, [0, 1]);
+verifyMax({a: 2, b: 'a', c: 2}, [0, 1, 2, 3, 4, 5, 6]);
verifyMin({a: Infinity, b: 'a', c: 2}, []);
-verifyMax({a: Infinity, b: 'a', c: 2}, [0,1,2,3,4,5,6,7]);
+verifyMax({a: Infinity, b: 'a', c: 2}, [0, 1, 2, 3, 4, 5, 6, 7]);
-verifyMin({a: -Infinity, b: 'a', c: 2}, [0,1,2,3,4,5,6,7]);
+verifyMin({a: -Infinity, b: 'a', c: 2}, [0, 1, 2, 3, 4, 5, 6, 7]);
verifyMax({a: -Infinity, b: 'a', c: 2}, []);
// 'a' > Infinity, actually.
-verifyMin({a: 1, b: Infinity, c: 2}, [4,5,6,7]);
-verifyMax({a: 1, b: Infinity, c: 2}, [0,1,2,3]);
+verifyMin({a: 1, b: Infinity, c: 2}, [4, 5, 6, 7]);
+verifyMax({a: 1, b: Infinity, c: 2}, [0, 1, 2, 3]);
// Also, 'a' > -Infinity.
-verifyMin({a: 1, b: -Infinity, c: 2}, [4,5,6,7]);
-verifyMax({a: 1, b: -Infinity, c: 2}, [0,1,2,3]);
+verifyMin({a: 1, b: -Infinity, c: 2}, [4, 5, 6, 7]);
+verifyMax({a: 1, b: -Infinity, c: 2}, [0, 1, 2, 3]);
-verifyMin({a: 1, b: 'a', c: Infinity}, [4,5,6,7]);
-verifyMax({a: 1, b: 'a', c: Infinity}, [0,1,2,3]);
+verifyMin({a: 1, b: 'a', c: Infinity}, [4, 5, 6, 7]);
+verifyMax({a: 1, b: 'a', c: Infinity}, [0, 1, 2, 3]);
-verifyMin({a: 1, b: 'a', c: -Infinity}, [2,3,4,5,6,7]);
-verifyMax({a: 1, b: 'a', c: -Infinity}, [0,1]);
+verifyMin({a: 1, b: 'a', c: -Infinity}, [2, 3, 4, 5, 6, 7]);
+verifyMax({a: 1, b: 'a', c: -Infinity}, [0, 1]);
diff --git a/jstests/core/mod1.js b/jstests/core/mod1.js
index 834084e9301..11be6b1b293 100644
--- a/jstests/core/mod1.js
+++ b/jstests/core/mod1.js
@@ -2,26 +2,28 @@
t = db.mod1;
t.drop();
-t.save( { a : 1 } );
-t.save( { a : 2 } );
-t.save( { a : 11 } );
-t.save( { a : 20 } );
-t.save( { a : "asd" } );
-t.save( { a : "adasdas" } );
+t.save({a: 1});
+t.save({a: 2});
+t.save({a: 11});
+t.save({a: 20});
+t.save({a: "asd"});
+t.save({a: "adasdas"});
-assert.eq( 2 , t.find( "this.a % 10 == 1" ).itcount() , "A1" );
-assert.eq( 2 , t.find( { a : { $mod : [ 10 , 1 ] } } ).itcount() , "A2" );
-assert.eq( 0 , t.find( { a : { $mod : [ 10 , 1 ] } } ).explain("executionStats")
- .executionStats.totalKeysExamined , "A3" );
+assert.eq(2, t.find("this.a % 10 == 1").itcount(), "A1");
+assert.eq(2, t.find({a: {$mod: [10, 1]}}).itcount(), "A2");
+assert.eq(0,
+ t.find({a: {$mod: [10, 1]}}).explain("executionStats").executionStats.totalKeysExamined,
+ "A3");
-t.ensureIndex( { a : 1 } );
+t.ensureIndex({a: 1});
-assert.eq( 2 , t.find( "this.a % 10 == 1" ).itcount() , "B1" );
-assert.eq( 2 , t.find( { a : { $mod : [ 10 , 1 ] } } ).itcount() , "B2" );
+assert.eq(2, t.find("this.a % 10 == 1").itcount(), "B1");
+assert.eq(2, t.find({a: {$mod: [10, 1]}}).itcount(), "B2");
-assert.eq( 1 , t.find( "this.a % 10 == 0" ).itcount() , "B3" );
-assert.eq( 1 , t.find( { a : { $mod : [ 10 , 0 ] } } ).itcount() , "B4" );
-assert.eq( 4 , t.find( { a : { $mod : [ 10 , 1 ] } } ).explain("executionStats")
- .executionStats.totalKeysExamined, "B5" );
+assert.eq(1, t.find("this.a % 10 == 0").itcount(), "B3");
+assert.eq(1, t.find({a: {$mod: [10, 0]}}).itcount(), "B4");
+assert.eq(4,
+ t.find({a: {$mod: [10, 1]}}).explain("executionStats").executionStats.totalKeysExamined,
+ "B5");
-assert.eq( 1, t.find( { a: { $gt: 5, $mod : [ 10, 1 ] } } ).itcount() );
+assert.eq(1, t.find({a: {$gt: 5, $mod: [10, 1]}}).itcount());
diff --git a/jstests/core/mr1.js b/jstests/core/mr1.js
index 2d64a1375d7..0225ab3cb62 100644
--- a/jstests/core/mr1.js
+++ b/jstests/core/mr1.js
@@ -2,182 +2,199 @@
t = db.mr1;
t.drop();
-t.save( { x : 1 , tags : [ "a" , "b" ] } );
-t.save( { x : 2 , tags : [ "b" , "c" ] } );
-t.save( { x : 3 , tags : [ "c" , "a" ] } );
-t.save( { x : 4 , tags : [ "b" , "c" ] } );
+t.save({x: 1, tags: ["a", "b"]});
+t.save({x: 2, tags: ["b", "c"]});
+t.save({x: 3, tags: ["c", "a"]});
+t.save({x: 4, tags: ["b", "c"]});
emit = printjson;
-function d( x ){
- printjson( x );
+function d(x) {
+ printjson(x);
}
ks = "_id";
-if ( db.version() == "1.1.1" )
+if (db.version() == "1.1.1")
ks = "key";
-
-m = function(){
- this.tags.forEach(
- function(z){
- emit( z , { count : 1 } );
- }
- );
+m = function() {
+ this.tags.forEach(function(z) {
+ emit(z, {count: 1});
+ });
};
-m2 = function(){
- for ( var i=0; i<this.tags.length; i++ ){
- emit( this.tags[i] , 1 );
+m2 = function() {
+ for (var i = 0; i < this.tags.length; i++) {
+ emit(this.tags[i], 1);
}
};
-
-r = function( key , values ){
+r = function(key, values) {
var total = 0;
- for ( var i=0; i<values.length; i++ ){
+ for (var i = 0; i < values.length; i++) {
total += values[i].count;
}
- return { count : total };
+ return {
+ count: total
+ };
};
-r2 = function( key , values ){
+r2 = function(key, values) {
var total = 0;
- for ( var i=0; i<values.length; i++ ){
+ for (var i = 0; i < values.length; i++) {
total += values[i];
}
return total;
};
-res = db.runCommand( { mapreduce : "mr1" , map : m , reduce : r , out : "mr1_out" } );
-d( res );
-if ( ks == "_id" ) assert( res.ok , "not ok" );
-assert.eq( 4 , res.counts.input , "A" );
+res = db.runCommand({mapreduce: "mr1", map: m, reduce: r, out: "mr1_out"});
+d(res);
+if (ks == "_id")
+ assert(res.ok, "not ok");
+assert.eq(4, res.counts.input, "A");
x = db[res.result];
-assert.eq( 3 , x.find().count() , "B" );
-x.find().forEach( d );
+assert.eq(3, x.find().count(), "B");
+x.find().forEach(d);
z = {};
-x.find().forEach( function(a){ z[a[ks]] = a.value.count; } );
-d( z );
-assert.eq( 3 , Object.keySet( z ).length , "C" );
-assert.eq( 2 , z.a , "D" );
-assert.eq( 3 , z.b , "E" );
-assert.eq( 3 , z.c , "F" );
+x.find().forEach(function(a) {
+ z[a[ks]] = a.value.count;
+});
+d(z);
+assert.eq(3, Object.keySet(z).length, "C");
+assert.eq(2, z.a, "D");
+assert.eq(3, z.b, "E");
+assert.eq(3, z.c, "F");
x.drop();
-res = db.runCommand( { mapreduce : "mr1" , map : m , reduce : r , query : { x : { "$gt" : 2 } } , out : "mr1_out" } );
-d( res );
-assert.eq( 2 , res.counts.input , "B" );
+res = db.runCommand({mapreduce: "mr1", map: m, reduce: r, query: {x: {"$gt": 2}}, out: "mr1_out"});
+d(res);
+assert.eq(2, res.counts.input, "B");
x = db[res.result];
z = {};
-x.find().forEach( function(a){ z[a[ks]] = a.value.count; } );
-assert.eq( 1 , z.a , "C1" );
-assert.eq( 1 , z.b , "C2" );
-assert.eq( 2 , z.c , "C3" );
+x.find().forEach(function(a) {
+ z[a[ks]] = a.value.count;
+});
+assert.eq(1, z.a, "C1");
+assert.eq(1, z.b, "C2");
+assert.eq(2, z.c, "C3");
x.drop();
-res = db.runCommand( { mapreduce : "mr1" , map : m2 , reduce : r2 , query : { x : { "$gt" : 2 } } , out : "mr1_out" } );
-d( res );
-assert.eq( 2 , res.counts.input , "B" );
+res =
+ db.runCommand({mapreduce: "mr1", map: m2, reduce: r2, query: {x: {"$gt": 2}}, out: "mr1_out"});
+d(res);
+assert.eq(2, res.counts.input, "B");
x = db[res.result];
z = {};
-x.find().forEach( function(a){ z[a[ks]] = a.value; } );
-assert.eq( 1 , z.a , "C1z" );
-assert.eq( 1 , z.b , "C2z" );
-assert.eq( 2 , z.c , "C3z" );
+x.find().forEach(function(a) {
+ z[a[ks]] = a.value;
+});
+assert.eq(1, z.a, "C1z");
+assert.eq(1, z.b, "C2z");
+assert.eq(2, z.c, "C3z");
x.drop();
-res = db.runCommand( { mapreduce : "mr1" , out : "mr1_foo" , map : m , reduce : r , query : { x : { "$gt" : 2 } } } );
-d( res );
-assert.eq( 2 , res.counts.input , "B2" );
-assert.eq( "mr1_foo" , res.result , "B2-c" );
+res = db.runCommand({mapreduce: "mr1", out: "mr1_foo", map: m, reduce: r, query: {x: {"$gt": 2}}});
+d(res);
+assert.eq(2, res.counts.input, "B2");
+assert.eq("mr1_foo", res.result, "B2-c");
x = db[res.result];
z = {};
-x.find().forEach( function(a){ z[a[ks]] = a.value.count; } );
-assert.eq( 1 , z.a , "C1a" );
-assert.eq( 1 , z.b , "C2a" );
-assert.eq( 2 , z.c , "C3a" );
+x.find().forEach(function(a) {
+ z[a[ks]] = a.value.count;
+});
+assert.eq(1, z.a, "C1a");
+assert.eq(1, z.b, "C2a");
+assert.eq(2, z.c, "C3a");
x.drop();
-for ( i=5; i<1000; i++ ){
- t.save( { x : i , tags : [ "b" , "d" ] } );
+for (i = 5; i < 1000; i++) {
+ t.save({x: i, tags: ["b", "d"]});
}
-res = db.runCommand( { mapreduce : "mr1" , map : m , reduce : r , out : "mr1_out" } );
-d( res );
-assert.eq( 999 , res.counts.input , "Z1" );
+res = db.runCommand({mapreduce: "mr1", map: m, reduce: r, out: "mr1_out"});
+d(res);
+assert.eq(999, res.counts.input, "Z1");
x = db[res.result];
-x.find().forEach( d );
-assert.eq( 4 , x.find().count() , "Z2" );
-assert.eq( "a,b,c,d" , x.distinct( ks ) , "Z3" );
+x.find().forEach(d);
+assert.eq(4, x.find().count(), "Z2");
+assert.eq("a,b,c,d", x.distinct(ks), "Z3");
-function getk( k ){
+function getk(k) {
var o = {};
o[ks] = k;
- return x.findOne( o );
+ return x.findOne(o);
}
-assert.eq( 2 , getk( "a" ).value.count , "ZA" );
-assert.eq( 998 , getk( "b" ).value.count , "ZB" );
-assert.eq( 3 , getk( "c" ).value.count , "ZC" );
-assert.eq( 995 , getk( "d" ).value.count , "ZD" );
+assert.eq(2, getk("a").value.count, "ZA");
+assert.eq(998, getk("b").value.count, "ZB");
+assert.eq(3, getk("c").value.count, "ZC");
+assert.eq(995, getk("d").value.count, "ZD");
x.drop();
-if ( true ){
- printjson( db.runCommand( { mapreduce : "mr1" , map : m , reduce : r , verbose : true , out : "mr1_out" } ) );
+if (true) {
+ printjson(db.runCommand({mapreduce: "mr1", map: m, reduce: r, verbose: true, out: "mr1_out"}));
}
-print( "t1: " + Date.timeFunc(
- function(){
- var out = db.runCommand( { mapreduce : "mr1" , map : m , reduce : r , out : "mr1_out" } );
- if ( ks == "_id" ) assert( out.ok , "XXX : " + tojson( out ) );
- db[out.result].drop();
- } , 10 ) + " (~500 on 2.8ghz) - itcount: " + Date.timeFunc( function(){ db.mr1.find().itcount(); } , 10 ) );
-
-
+print("t1: " +
+ Date.timeFunc(
+ function() {
+ var out = db.runCommand({mapreduce: "mr1", map: m, reduce: r, out: "mr1_out"});
+ if (ks == "_id")
+ assert(out.ok, "XXX : " + tojson(out));
+ db[out.result].drop();
+ },
+ 10) +
+ " (~500 on 2.8ghz) - itcount: " +
+ Date.timeFunc(function() {
+ db.mr1.find().itcount();
+ }, 10));
// test doesn't exist
-res = db.runCommand( { mapreduce : "lasjdlasjdlasjdjasldjalsdj12e" , map : m , reduce : r , out : "mr1_out" } );
-assert( ! res.ok , "should be not ok" );
+res =
+ db.runCommand({mapreduce: "lasjdlasjdlasjdjasldjalsdj12e", map: m, reduce: r, out: "mr1_out"});
+assert(!res.ok, "should be not ok");
-if ( true ){
+if (true) {
correct = {};
-
- for ( i=0; i<20000; i++ ){
+
+ for (i = 0; i < 20000; i++) {
k = "Z" + i % 10000;
- if ( correct[k] )
+ if (correct[k])
correct[k]++;
else
correct[k] = 1;
- t.save( { x : i , tags : [ k ] } );
+ t.save({x: i, tags: [k]});
}
-
- res = db.runCommand( { mapreduce : "mr1" , out : "mr1_foo" , map : m , reduce : r } );
- d( res );
- print( "t2: " + res.timeMillis + " (~3500 on 2.8ghz) - itcount: " + Date.timeFunc( function(){ db.mr1.find().itcount(); } ) );
+
+ res = db.runCommand({mapreduce: "mr1", out: "mr1_foo", map: m, reduce: r});
+ d(res);
+ print("t2: " + res.timeMillis + " (~3500 on 2.8ghz) - itcount: " +
+ Date.timeFunc(function() {
+ db.mr1.find().itcount();
+ }));
x = db[res.result];
z = {};
- x.find().forEach( function(a){ z[a[ks]] = a.value.count; } );
- for ( zz in z ){
- if ( zz.indexOf( "Z" ) == 0 ){
- assert.eq( correct[zz] , z[zz] , "ZZ : " + zz );
+ x.find().forEach(function(a) {
+ z[a[ks]] = a.value.count;
+ });
+ for (zz in z) {
+ if (zz.indexOf("Z") == 0) {
+ assert.eq(correct[zz], z[zz], "ZZ : " + zz);
}
}
x.drop();
-
- res = db.runCommand( { mapreduce : "mr1" , out : "mr1_out" , map : m2 , reduce : r2 } );
- d(res);
- print( "t3: " + res.timeMillis + " (~3500 on 2.8ghz)" );
- res = db.runCommand( { mapreduce : "mr1" , map : m2 , reduce : r2 , out : { inline : true } } );
- print( "t4: " + res.timeMillis );
+ res = db.runCommand({mapreduce: "mr1", out: "mr1_out", map: m2, reduce: r2});
+ d(res);
+ print("t3: " + res.timeMillis + " (~3500 on 2.8ghz)");
+ res = db.runCommand({mapreduce: "mr1", map: m2, reduce: r2, out: {inline: true}});
+ print("t4: " + res.timeMillis);
}
-
-res = db.runCommand( { mapreduce : "mr1" , map : m , reduce : r , out : "mr1_out" } );
-assert( res.ok , "should be ok" );
+res = db.runCommand({mapreduce: "mr1", map: m, reduce: r, out: "mr1_out"});
+assert(res.ok, "should be ok");
t.drop();
t1 = db.mr1_out;
diff --git a/jstests/core/mr2.js b/jstests/core/mr2.js
index 21091c591b1..c13ff447970 100644
--- a/jstests/core/mr2.js
+++ b/jstests/core/mr2.js
@@ -3,83 +3,79 @@
t = db.mr2;
t.drop();
-t.save( { comments : [ { who : "a" , txt : "asdasdasd" } ,
- { who : "b" , txt : "asdasdasdasdasdasdas" } ] } );
+t.save({comments: [{who: "a", txt: "asdasdasd"}, {who: "b", txt: "asdasdasdasdasdasdas"}]});
-t.save( { comments : [ { who : "b" , txt : "asdasdasdaaa" } ,
- { who : "c" , txt : "asdasdasdaasdasdas" } ] } );
+t.save({comments: [{who: "b", txt: "asdasdasdaaa"}, {who: "c", txt: "asdasdasdaasdasdas"}]});
-
-
-function m(){
- for ( var i=0; i<this.comments.length; i++ ){
+function m() {
+ for (var i = 0; i < this.comments.length; i++) {
var c = this.comments[i];
- emit( c.who , { totalSize : c.txt.length , num : 1 } );
+ emit(c.who, {totalSize: c.txt.length, num: 1});
}
}
-function r( who , values ){
- var n = { totalSize : 0 , num : 0 };
- for ( var i=0; i<values.length; i++ ){
+function r(who, values) {
+ var n = {
+ totalSize: 0,
+ num: 0
+ };
+ for (var i = 0; i < values.length; i++) {
n.totalSize += values[i].totalSize;
n.num += values[i].num;
}
return n;
}
-function reformat( r ){
+function reformat(r) {
var x = {};
var cursor;
- if ( r.results )
+ if (r.results)
cursor = r.results;
else
cursor = r.find();
- cursor.forEach(
- function(z){
- x[z._id] = z.value;
- }
- );
+ cursor.forEach(function(z) {
+ x[z._id] = z.value;
+ });
return x;
}
-function f( who , res ){
+function f(who, res) {
res.avg = res.totalSize / res.num;
return res;
}
-res = t.mapReduce( m , r , { finalize : f , out : "mr2_out" } );
-printjson( res );
-x = reformat( res );
-assert.eq( 9 , x.a.avg , "A1" );
-assert.eq( 16 , x.b.avg , "A2" );
-assert.eq( 18 , x.c.avg , "A3" );
+res = t.mapReduce(m, r, {finalize: f, out: "mr2_out"});
+printjson(res);
+x = reformat(res);
+assert.eq(9, x.a.avg, "A1");
+assert.eq(16, x.b.avg, "A2");
+assert.eq(18, x.c.avg, "A3");
res.drop();
// inline does needs to exist - so set it to false to make sure the code is just checking for
// existence
-res = t.mapReduce( m , r , { finalize : f , out : { inline : 0 } } );
-printjson( res );
-x = reformat( res );
-assert.eq( 9 , x.a.avg , "B1" );
-assert.eq( 16 , x.b.avg , "B2" );
-assert.eq( 18 , x.c.avg , "B3" );
+res = t.mapReduce(m, r, {finalize: f, out: {inline: 0}});
+printjson(res);
+x = reformat(res);
+assert.eq(9, x.a.avg, "B1");
+assert.eq(16, x.b.avg, "B2");
+assert.eq(18, x.c.avg, "B3");
res.drop();
-assert( ! ( "result" in res ) , "B4" );
+assert(!("result" in res), "B4");
-res = t.mapReduce( m , r , { finalize : f , out : "mr2_out", jsMode: true } );
-printjson( res );
-x = reformat( res );
-assert.eq( 9 , x.a.avg , "A1" );
-assert.eq( 16 , x.b.avg , "A2" );
-assert.eq( 18 , x.c.avg , "A3" );
+res = t.mapReduce(m, r, {finalize: f, out: "mr2_out", jsMode: true});
+printjson(res);
+x = reformat(res);
+assert.eq(9, x.a.avg, "A1");
+assert.eq(16, x.b.avg, "A2");
+assert.eq(18, x.c.avg, "A3");
res.drop();
-res = t.mapReduce( m , r , { finalize : f , out : { inline : 5 }, jsMode: true } );
-printjson( res );
-x = reformat( res );
-assert.eq( 9 , x.a.avg , "B1" );
-assert.eq( 16 , x.b.avg , "B2" );
-assert.eq( 18 , x.c.avg , "B3" );
+res = t.mapReduce(m, r, {finalize: f, out: {inline: 5}, jsMode: true});
+printjson(res);
+x = reformat(res);
+assert.eq(9, x.a.avg, "B1");
+assert.eq(16, x.b.avg, "B2");
+assert.eq(18, x.c.avg, "B3");
res.drop();
-assert( ! ( "result" in res ) , "B4" );
-
+assert(!("result" in res), "B4");
diff --git a/jstests/core/mr3.js b/jstests/core/mr3.js
index 48b38e430f0..a2cee1f2d8f 100644
--- a/jstests/core/mr3.js
+++ b/jstests/core/mr3.js
@@ -2,46 +2,46 @@
t = db.mr3;
t.drop();
-t.save( { x : 1 , tags : [ "a" , "b" ] } );
-t.save( { x : 2 , tags : [ "b" , "c" ] } );
-t.save( { x : 3 , tags : [ "c" , "a" ] } );
-t.save( { x : 4 , tags : [ "b" , "c" ] } );
+t.save({x: 1, tags: ["a", "b"]});
+t.save({x: 2, tags: ["b", "c"]});
+t.save({x: 3, tags: ["c", "a"]});
+t.save({x: 4, tags: ["b", "c"]});
-m = function( n , x ){
+m = function(n, x) {
x = x || 1;
- this.tags.forEach(
- function(z){
- for ( var i=0; i<x; i++ )
- emit( z , { count : n || 1 } );
- }
- );
+ this.tags.forEach(function(z) {
+ for (var i = 0; i < x; i++)
+ emit(z, {count: n || 1});
+ });
};
-r = function( key , values ){
+r = function(key, values) {
var total = 0;
- for ( var i=0; i<values.length; i++ ){
+ for (var i = 0; i < values.length; i++) {
total += values[i].count;
}
- return { count : total };
+ return {
+ count: total
+ };
};
-res = t.mapReduce( m , r , { out : "mr3_out" } );
+res = t.mapReduce(m, r, {out: "mr3_out"});
z = res.convertToSingleObject();
-assert.eq( 3 , Object.keySet( z ).length , "A1" );
-assert.eq( 2 , z.a.count , "A2" );
-assert.eq( 3 , z.b.count , "A3" );
-assert.eq( 3 , z.c.count , "A4" );
+assert.eq(3, Object.keySet(z).length, "A1");
+assert.eq(2, z.a.count, "A2");
+assert.eq(3, z.b.count, "A3");
+assert.eq(3, z.c.count, "A4");
res.drop();
-res = t.mapReduce( m , r , { out : "mr3_out" , mapparams : [ 2 , 2 ] } );
+res = t.mapReduce(m, r, {out: "mr3_out", mapparams: [2, 2]});
z = res.convertToSingleObject();
-assert.eq( 3 , Object.keySet( z ).length , "B1" );
-assert.eq( 8 , z.a.count , "B2" );
-assert.eq( 12 , z.b.count , "B3" );
-assert.eq( 12 , z.c.count , "B4" );
+assert.eq(3, Object.keySet(z).length, "B1");
+assert.eq(8, z.a.count, "B2");
+assert.eq(12, z.b.count, "B3");
+assert.eq(12, z.c.count, "B4");
res.drop();
@@ -49,25 +49,28 @@ res.drop();
realm = m;
-m = function(){
- emit( this._id , 1 );
+m = function() {
+ emit(this._id, 1);
};
-res = t.mapReduce( m , r , { out : "mr3_out" } );
+res = t.mapReduce(m, r, {out: "mr3_out"});
res.drop();
-m = function(){
- emit( this._id , this.xzz.a );
+m = function() {
+ emit(this._id, this.xzz.a);
};
before = db.getCollectionNames().length;
-assert.throws( function(){ t.mapReduce( m , r , { out : "mr3_out" } ); } );
-assert.eq( before , db.getCollectionNames().length , "after throw crap" );
-
+assert.throws(function() {
+ t.mapReduce(m, r, {out: "mr3_out"});
+});
+assert.eq(before, db.getCollectionNames().length, "after throw crap");
m = realm;
-r = function( k , v ){
+r = function(k, v) {
return v.x.x.x;
};
before = db.getCollectionNames().length;
-assert.throws( function(){ t.mapReduce( m , r , "mr3_out" ); } );
-assert.eq( before , db.getCollectionNames().length , "after throw crap" );
+assert.throws(function() {
+ t.mapReduce(m, r, "mr3_out");
+});
+assert.eq(before, db.getCollectionNames().length, "after throw crap");
diff --git a/jstests/core/mr4.js b/jstests/core/mr4.js
index 2e989c19bff..ae5e11528af 100644
--- a/jstests/core/mr4.js
+++ b/jstests/core/mr4.js
@@ -2,44 +2,43 @@
t = db.mr4;
t.drop();
-t.save( { x : 1 , tags : [ "a" , "b" ] } );
-t.save( { x : 2 , tags : [ "b" , "c" ] } );
-t.save( { x : 3 , tags : [ "c" , "a" ] } );
-t.save( { x : 4 , tags : [ "b" , "c" ] } );
-
-m = function(){
- this.tags.forEach(
- function(z){
- emit( z , { count : xx } );
- }
- );
+t.save({x: 1, tags: ["a", "b"]});
+t.save({x: 2, tags: ["b", "c"]});
+t.save({x: 3, tags: ["c", "a"]});
+t.save({x: 4, tags: ["b", "c"]});
+
+m = function() {
+ this.tags.forEach(function(z) {
+ emit(z, {count: xx});
+ });
};
-r = function( key , values ){
+r = function(key, values) {
var total = 0;
- for ( var i=0; i<values.length; i++ ){
+ for (var i = 0; i < values.length; i++) {
total += values[i].count;
}
- return { count : total };
+ return {
+ count: total
+ };
};
-res = t.mapReduce( m , r , { out : "mr4_out" , scope : { xx : 1 } } );
+res = t.mapReduce(m, r, {out: "mr4_out", scope: {xx: 1}});
z = res.convertToSingleObject();
-assert.eq( 3 , Object.keySet( z ).length , "A1" );
-assert.eq( 2 , z.a.count , "A2" );
-assert.eq( 3 , z.b.count , "A3" );
-assert.eq( 3 , z.c.count , "A4" );
+assert.eq(3, Object.keySet(z).length, "A1");
+assert.eq(2, z.a.count, "A2");
+assert.eq(3, z.b.count, "A3");
+assert.eq(3, z.c.count, "A4");
res.drop();
-
-res = t.mapReduce( m , r , { scope : { xx : 2 } , out : "mr4_out" } );
+res = t.mapReduce(m, r, {scope: {xx: 2}, out: "mr4_out"});
z = res.convertToSingleObject();
-assert.eq( 3 , Object.keySet( z ).length , "A1" );
-assert.eq( 4 , z.a.count , "A2" );
-assert.eq( 6 , z.b.count , "A3" );
-assert.eq( 6 , z.c.count , "A4" );
+assert.eq(3, Object.keySet(z).length, "A1");
+assert.eq(4, z.a.count, "A2");
+assert.eq(6, z.b.count, "A3");
+assert.eq(6, z.c.count, "A4");
res.drop();
diff --git a/jstests/core/mr5.js b/jstests/core/mr5.js
index 786ef2cb8bf..537625e954b 100644
--- a/jstests/core/mr5.js
+++ b/jstests/core/mr5.js
@@ -2,57 +2,58 @@
t = db.mr5;
t.drop();
-t.save( { "partner" : 1, "visits" : 9 } );
-t.save( { "partner" : 2, "visits" : 9 } );
-t.save( { "partner" : 1, "visits" : 11 } );
-t.save( { "partner" : 1, "visits" : 30 } );
-t.save( { "partner" : 2, "visits" : 41 } );
-t.save( { "partner" : 2, "visits" : 41 } );
-
-m = function(){
- emit( this.partner , { stats : [ this.visits ] } );
+t.save({"partner": 1, "visits": 9});
+t.save({"partner": 2, "visits": 9});
+t.save({"partner": 1, "visits": 11});
+t.save({"partner": 1, "visits": 30});
+t.save({"partner": 2, "visits": 41});
+t.save({"partner": 2, "visits": 41});
+
+m = function() {
+ emit(this.partner, {stats: [this.visits]});
};
-r = function( k , v ){
+r = function(k, v) {
var stats = [];
var total = 0;
- for ( var i=0; i<v.length; i++ ){
- for ( var j in v[i].stats ) {
- stats.push( v[i].stats[j] );
+ for (var i = 0; i < v.length; i++) {
+ for (var j in v[i].stats) {
+ stats.push(v[i].stats[j]);
total += v[i].stats[j];
}
}
- return { stats : stats , total : total };
+ return {
+ stats: stats,
+ total: total
+ };
};
-res = t.mapReduce( m , r , { out : "mr5_out" , scope : { xx : 1 } } );
-//res.find().forEach( printjson )
+res = t.mapReduce(m, r, {out: "mr5_out", scope: {xx: 1}});
+// res.find().forEach( printjson )
z = res.convertToSingleObject();
-assert.eq( 2 , Object.keySet( z ).length , "A1" );
-assert.eq( [ 9 , 11 , 30 ] , z["1"].stats , "A2" );
-assert.eq( [ 9 , 41 , 41 ] , z["2"].stats , "A3" );
-
+assert.eq(2, Object.keySet(z).length, "A1");
+assert.eq([9, 11, 30], z["1"].stats, "A2");
+assert.eq([9, 41, 41], z["2"].stats, "A3");
res.drop();
-m = function(){
+m = function() {
var x = "partner";
var y = "visits";
- emit( this[x] , { stats : [ this[y] ] } );
+ emit(this [x],
+ {
+ stats:
+ [this[y]]
+ });
};
-
-
-res = t.mapReduce( m , r , { out : "mr5_out" , scope : { xx : 1 } } );
-//res.find().forEach( printjson )
+res = t.mapReduce(m, r, {out: "mr5_out", scope: {xx: 1}});
+// res.find().forEach( printjson )
z = res.convertToSingleObject();
-assert.eq( 2 , Object.keySet( z ).length , "B1" );
-assert.eq( [ 9 , 11 , 30 ] , z["1"].stats , "B2" );
-assert.eq( [ 9 , 41 , 41 ] , z["2"].stats , "B3" );
-
+assert.eq(2, Object.keySet(z).length, "B1");
+assert.eq([9, 11, 30], z["1"].stats, "B2");
+assert.eq([9, 41, 41], z["2"].stats, "B3");
res.drop();
-
-
diff --git a/jstests/core/mr_bigobject.js b/jstests/core/mr_bigobject.js
index b7bfed4e4ab..d87b2af4cdc 100644
--- a/jstests/core/mr_bigobject.js
+++ b/jstests/core/mr_bigobject.js
@@ -4,35 +4,36 @@ t.drop();
var large = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa";
var s = large;
-while ( s.length < ( 6 * 1024 * 1024 ) ){
+while (s.length < (6 * 1024 * 1024)) {
s += large;
}
-for ( i=0; i<5; i++ )
- t.insert( { _id : i , s : s } );
+for (i = 0; i < 5; i++)
+ t.insert({_id: i, s: s});
-m = function(){
- emit( 1 , this.s + this.s );
+m = function() {
+ emit(1, this.s + this.s);
};
-r = function( k , v ){
+r = function(k, v) {
return 1;
};
-assert.throws( function(){ r = t.mapReduce( m , r , "mr_bigobject_out" ); } , null , "emit should fail" );
+assert.throws(function() {
+ r = t.mapReduce(m, r, "mr_bigobject_out");
+}, null, "emit should fail");
-
-m = function(){
- emit( 1 , this.s );
+m = function() {
+ emit(1, this.s);
};
-assert.eq( { 1 : 1 } , t.mapReduce( m , r , "mr_bigobject_out" ).convertToSingleObject() , "A1" );
+assert.eq({1: 1}, t.mapReduce(m, r, "mr_bigobject_out").convertToSingleObject(), "A1");
-r = function( k , v ){
+r = function(k, v) {
total = 0;
- for ( var i=0; i<v.length; i++ ){
+ for (var i = 0; i < v.length; i++) {
var x = v[i];
- if ( typeof( x ) == "number" )
+ if (typeof(x) == "number")
total += x;
else
total += x.length;
@@ -40,6 +41,8 @@ r = function( k , v ){
return total;
};
-assert.eq( { 1 : t.count() * s.length } , t.mapReduce( m , r , "mr_bigobject_out" ).convertToSingleObject() , "A1" );
+assert.eq({1: t.count() * s.length},
+ t.mapReduce(m, r, "mr_bigobject_out").convertToSingleObject(),
+ "A1");
t.drop();
diff --git a/jstests/core/mr_bigobject_replace.js b/jstests/core/mr_bigobject_replace.js
index dbed4664e7a..28a295c1b2e 100644
--- a/jstests/core/mr_bigobject_replace.js
+++ b/jstests/core/mr_bigobject_replace.js
@@ -14,7 +14,10 @@
// Returns a document of the form { _id: ObjectId(...), value: '...' } with the specified
// 'targetSize' in bytes.
function makeDocWithSize(targetSize) {
- var doc = {_id: new ObjectId(), value: ''};
+ var doc = {
+ _id: new ObjectId(),
+ value: ''
+ };
var size = Object.bsonsize(doc);
assert.gte(targetSize, size);
@@ -38,17 +41,25 @@
// Insert a document so the mapper gets run.
assert.writeOK(db.input.insert({}));
- var res = db.runCommand(Object.extend({
- mapReduce: "input",
- map: mapper,
- out: {replace: "mr_bigobject_replace"},
- }, testOptions));
+ var res = db.runCommand(Object.extend(
+ {
+ mapReduce: "input",
+ map: mapper,
+ out: {replace: "mr_bigobject_replace"},
+ },
+ testOptions));
assert.commandFailed(res, "creating a document larger than 16MB didn't fail");
- assert.lte(0, res.errmsg.indexOf("object to insert too large"),
+ assert.lte(0,
+ res.errmsg.indexOf("object to insert too large"),
"map-reduce command failed for a reason other than inserting a large document");
}
runTest({reduce: createBigDocument});
- runTest({reduce: function() { return 1; }, finalize: createBigDocument});
+ runTest({
+ reduce: function() {
+ return 1;
+ },
+ finalize: createBigDocument
+ });
})();
diff --git a/jstests/core/mr_comments.js b/jstests/core/mr_comments.js
index 406ddb40a45..503bded9bd0 100644
--- a/jstests/core/mr_comments.js
+++ b/jstests/core/mr_comments.js
@@ -2,27 +2,27 @@
t = db.mr_comments;
t.drop();
-t.insert( { foo : 1 } );
-t.insert( { foo : 1 } );
-t.insert( { foo : 2 } );
+t.insert({foo: 1});
+t.insert({foo: 1});
+t.insert({foo: 2});
-res = db.runCommand(
- { mapreduce : "mr_comments",
- map : "// This will fail\n\n // Emit some stuff\n emit(this.foo, 1)\n",
- reduce : function(key, values){
- return Array.sum(values);
- },
- out: "mr_comments_out"
- });
-assert.eq( 3 , res.counts.emit );
+res = db.runCommand({
+ mapreduce: "mr_comments",
+ map: "// This will fail\n\n // Emit some stuff\n emit(this.foo, 1)\n",
+ reduce: function(key, values) {
+ return Array.sum(values);
+ },
+ out: "mr_comments_out"
+});
+assert.eq(3, res.counts.emit);
-res = db.runCommand(
- { mapreduce : "mr_comments",
- map : "// This will fail\nfunction(){\n // Emit some stuff\n emit(this.foo, 1)\n}\n",
- reduce : function(key, values){
- return Array.sum(values);
- },
- out: "mr_comments_out"
- });
+res = db.runCommand({
+ mapreduce: "mr_comments",
+ map: "// This will fail\nfunction(){\n // Emit some stuff\n emit(this.foo, 1)\n}\n",
+ reduce: function(key, values) {
+ return Array.sum(values);
+ },
+ out: "mr_comments_out"
+});
-assert.eq( 3 , res.counts.emit );
+assert.eq(3, res.counts.emit);
diff --git a/jstests/core/mr_errorhandling.js b/jstests/core/mr_errorhandling.js
index 1bd94bbd56e..280d6e76891 100644
--- a/jstests/core/mr_errorhandling.js
+++ b/jstests/core/mr_errorhandling.js
@@ -2,48 +2,49 @@
t = db.mr_errorhandling;
t.drop();
-t.save( { a : [ 1 , 2 , 3 ] } );
-t.save( { a : [ 2 , 3 , 4 ] } );
+t.save({a: [1, 2, 3]});
+t.save({a: [2, 3, 4]});
-m_good = function(){
- for ( var i=0; i<this.a.length; i++ ){
- emit( this.a[i] , 1 );
+m_good = function() {
+ for (var i = 0; i < this.a.length; i++) {
+ emit(this.a[i], 1);
}
};
-m_bad = function(){
- for ( var i=0; i<this.a.length; i++ ){
- emit( this.a[i] );
+m_bad = function() {
+ for (var i = 0; i < this.a.length; i++) {
+ emit(this.a[i]);
}
};
-r = function( k , v ){
+r = function(k, v) {
var total = 0;
- for ( var i=0; i<v.length; i++ )
+ for (var i = 0; i < v.length; i++)
total += v[i];
return total;
};
-res = t.mapReduce( m_good , r , "mr_errorhandling_out" );
-assert.eq( { 1 : 1 , 2 : 2 , 3 : 2 , 4 : 1 } , res.convertToSingleObject() , "A" );
+res = t.mapReduce(m_good, r, "mr_errorhandling_out");
+assert.eq({1: 1, 2: 2, 3: 2, 4: 1}, res.convertToSingleObject(), "A");
res.drop();
res = null;
theerror = null;
try {
- res = t.mapReduce( m_bad , r , "mr_errorhandling_out" );
-}
-catch ( e ){
+ res = t.mapReduce(m_bad, r, "mr_errorhandling_out");
+} catch (e) {
theerror = e.toString();
}
-assert.isnull( res , "B1" );
-assert( theerror , "B2" );
-assert( theerror.indexOf( "emit" ) >= 0 , "B3" );
+assert.isnull(res, "B1");
+assert(theerror, "B2");
+assert(theerror.indexOf("emit") >= 0, "B3");
// test things are still in an ok state
-res = t.mapReduce( m_good , r , "mr_errorhandling_out" );
-assert.eq( { 1 : 1 , 2 : 2 , 3 : 2 , 4 : 1 } , res.convertToSingleObject() , "A" );
+res = t.mapReduce(m_good, r, "mr_errorhandling_out");
+assert.eq({1: 1, 2: 2, 3: 2, 4: 1}, res.convertToSingleObject(), "A");
res.drop();
-assert.throws( function(){ t.mapReduce( m_good , r , { out : "xxx" , query : "foo" } ); } );
+assert.throws(function() {
+ t.mapReduce(m_good, r, {out: "xxx", query: "foo"});
+});
diff --git a/jstests/core/mr_index.js b/jstests/core/mr_index.js
index fd650852871..796dbe5c562 100644
--- a/jstests/core/mr_index.js
+++ b/jstests/core/mr_index.js
@@ -6,34 +6,34 @@ outName = "mr_index_out";
out = db[outName];
out.drop();
-t.insert( { tags : [ 1 ] } );
-t.insert( { tags : [ 1 , 2 ] } );
-t.insert( { tags : [ 1 , 2 , 3 ] } );
-t.insert( { tags : [ 3 ] } );
-t.insert( { tags : [ 2 , 3 ] } );
-t.insert( { tags : [ 2 , 3 ] } );
-t.insert( { tags : [ 1 , 2 ] } );
-
-m = function(){
- for ( i=0; i<this.tags.length; i++ )
- emit( this.tags[i] , 1 );
+t.insert({tags: [1]});
+t.insert({tags: [1, 2]});
+t.insert({tags: [1, 2, 3]});
+t.insert({tags: [3]});
+t.insert({tags: [2, 3]});
+t.insert({tags: [2, 3]});
+t.insert({tags: [1, 2]});
+
+m = function() {
+ for (i = 0; i < this.tags.length; i++)
+ emit(this.tags[i], 1);
};
-r = function( k , vs ){
- return Array.sum( vs );
+r = function(k, vs) {
+ return Array.sum(vs);
};
-ex = function(){
- return out.find().sort( { value : 1 } ).explain("executionStats");
+ex = function() {
+ return out.find().sort({value: 1}).explain("executionStats");
};
-res = t.mapReduce( m , r , { out : outName } );
+res = t.mapReduce(m, r, {out: outName});
-assert.eq( 3 , ex().executionStats.nReturned , "A1" );
-out.ensureIndex( { value : 1 } );
-assert.eq( 3 , ex().executionStats.nReturned , "A2" );
+assert.eq(3, ex().executionStats.nReturned, "A1");
+out.ensureIndex({value: 1});
+assert.eq(3, ex().executionStats.nReturned, "A2");
-res = t.mapReduce( m , r , { out : outName } );
+res = t.mapReduce(m, r, {out: outName});
-assert.eq( 3 , ex().executionStats.nReturned , "B1" );
+assert.eq(3, ex().executionStats.nReturned, "B1");
res.drop();
diff --git a/jstests/core/mr_index2.js b/jstests/core/mr_index2.js
index 3255f5507f4..99741fedcbd 100644
--- a/jstests/core/mr_index2.js
+++ b/jstests/core/mr_index2.js
@@ -2,21 +2,24 @@
t = db.mr_index2;
t.drop();
-t.save( { arr : [1, 2] } );
+t.save({arr: [1, 2]});
-map = function() { emit(this._id, 1); };
-reduce = function(k,vals) { return Array.sum( vals ); };
+map = function() {
+ emit(this._id, 1);
+};
+reduce = function(k, vals) {
+ return Array.sum(vals);
+};
-res = t.mapReduce(map,reduce, { out : "mr_index2_out" , query : {} });
-assert.eq( 1 ,res.counts.input , "A" );
+res = t.mapReduce(map, reduce, {out: "mr_index2_out", query: {}});
+assert.eq(1, res.counts.input, "A");
res.drop();
-res = t.mapReduce(map,reduce, { out : "mr_index2_out" , query : { arr: {$gte:0} } });
-assert.eq( 1 ,res.counts.input , "B" );
+res = t.mapReduce(map, reduce, {out: "mr_index2_out", query: {arr: {$gte: 0}}});
+assert.eq(1, res.counts.input, "B");
res.drop();
-t.ensureIndex({arr:1});
-res = t.mapReduce(map,reduce, { out : "mr_index2_out" , query : { arr: {$gte:0} } });
-assert.eq( 1 ,res.counts.input , "C" );
+t.ensureIndex({arr: 1});
+res = t.mapReduce(map, reduce, {out: "mr_index2_out", query: {arr: {$gte: 0}}});
+assert.eq(1, res.counts.input, "C");
res.drop();
-
diff --git a/jstests/core/mr_index3.js b/jstests/core/mr_index3.js
index d667a844ec9..bac61cb6bc1 100644
--- a/jstests/core/mr_index3.js
+++ b/jstests/core/mr_index3.js
@@ -1,50 +1,61 @@
t = db.mr_index3;
-t.drop();
-
-t.insert( { _id : 1, name : 'name1', tags : ['dog', 'cat'] } );
-t.insert( { _id : 2, name : 'name2', tags : ['cat'] } );
-t.insert( { _id : 3, name : 'name3', tags : ['mouse', 'cat', 'dog'] } );
-t.insert( { _id : 4, name : 'name4', tags : [] } );
-
-m = function(){
- for ( var i=0; i<this.tags.length; i++ )
- emit( this.tags[i] , 1 );
-};
-
-r = function( key , values ){
- return Array.sum( values );
-};
-
-a1 = db.runCommand({ mapreduce : 'mr_index3', map : m, reduce : r , out : { inline : true } } ).results;
-a2 = db.runCommand({ mapreduce : 'mr_index3', map : m, reduce : r, query: {name : 'name1'} , out : { inline : true }}).results;
-a3 = db.runCommand({ mapreduce : 'mr_index3', map : m, reduce : r, query: {name : {$gt:'name'} } , out : { inline : true }}).results;
-
-assert.eq( [
- {
- "_id" : "cat",
- "value" : 3
- },
- {
- "_id" : "dog",
- "value" : 2
- },
- {
- "_id" : "mouse",
- "value" : 1
- }
-] , a1 , "A1" );
-assert.eq( [ { "_id" : "cat", "value" : 1 }, { "_id" : "dog", "value" : 1 } ] , a2 , "A2" );
-assert.eq( a1 , a3 , "A3" );
-
-t.ensureIndex({name:1, tags:1});
-
-b1 = db.runCommand({ mapreduce : 'mr_index3', map : m, reduce : r , out : { inline : true } } ).results;
-b2 = db.runCommand({ mapreduce : 'mr_index3', map : m, reduce : r, query: {name : 'name1'} , out : { inline : true }}).results;
-b3 = db.runCommand({ mapreduce : 'mr_index3', map : m, reduce : r, query: {name : {$gt:'name'} } , out : { inline : true }}).results;
-
-assert.eq( a1 , b1 , "AB1" );
-assert.eq( a2 , b2 , "AB2" );
-assert.eq( a3 , b3 , "AB3" );
-
-
+t.drop();
+
+t.insert({_id: 1, name: 'name1', tags: ['dog', 'cat']});
+t.insert({_id: 2, name: 'name2', tags: ['cat']});
+t.insert({_id: 3, name: 'name3', tags: ['mouse', 'cat', 'dog']});
+t.insert({_id: 4, name: 'name4', tags: []});
+
+m = function() {
+ for (var i = 0; i < this.tags.length; i++)
+ emit(this.tags[i], 1);
+};
+
+r = function(key, values) {
+ return Array.sum(values);
+};
+
+a1 = db.runCommand({mapreduce: 'mr_index3', map: m, reduce: r, out: {inline: true}}).results;
+a2 = db.runCommand({
+ mapreduce: 'mr_index3',
+ map: m,
+ reduce: r,
+ query: {name: 'name1'},
+ out: {inline: true}
+}).results;
+a3 = db.runCommand({
+ mapreduce: 'mr_index3',
+ map: m,
+ reduce: r,
+ query: {name: {$gt: 'name'}},
+ out: {inline: true}
+}).results;
+
+assert.eq([{"_id": "cat", "value": 3}, {"_id": "dog", "value": 2}, {"_id": "mouse", "value": 1}],
+ a1,
+ "A1");
+assert.eq([{"_id": "cat", "value": 1}, {"_id": "dog", "value": 1}], a2, "A2");
+assert.eq(a1, a3, "A3");
+
+t.ensureIndex({name: 1, tags: 1});
+
+b1 = db.runCommand({mapreduce: 'mr_index3', map: m, reduce: r, out: {inline: true}}).results;
+b2 = db.runCommand({
+ mapreduce: 'mr_index3',
+ map: m,
+ reduce: r,
+ query: {name: 'name1'},
+ out: {inline: true}
+}).results;
+b3 = db.runCommand({
+ mapreduce: 'mr_index3',
+ map: m,
+ reduce: r,
+ query: {name: {$gt: 'name'}},
+ out: {inline: true}
+}).results;
+
+assert.eq(a1, b1, "AB1");
+assert.eq(a2, b2, "AB2");
+assert.eq(a3, b3, "AB3");
diff --git a/jstests/core/mr_killop.js b/jstests/core/mr_killop.js
index 3f9cf52052d..c4d8b666f11 100644
--- a/jstests/core/mr_killop.js
+++ b/jstests/core/mr_killop.js
@@ -5,32 +5,26 @@ t.drop();
t2 = db.jstests_mr_killop_out;
t2.drop();
-function debug( x ) {
-// printjson( x );
+function debug(x) {
+ // printjson( x );
}
/** @return op code for map reduce op created by spawned shell, or that op's child */
-function op( childLoop ) {
+function op(childLoop) {
p = db.currentOp().inprog;
- debug( p );
- for ( var i in p ) {
- var o = p[ i ];
+ debug(p);
+ for (var i in p) {
+ var o = p[i];
// Identify a map/reduce or where distinct operation by its collection, whether or not
// it is currently active.
- if ( childLoop ) {
- if ( ( o.active || o.waitingForLock ) &&
- o.query &&
- o.query.query &&
- o.query.query.$where &&
- o.query.distinct == "jstests_mr_killop" ) {
+ if (childLoop) {
+ if ((o.active || o.waitingForLock) && o.query && o.query.query &&
+ o.query.query.$where && o.query.distinct == "jstests_mr_killop") {
return o.opid;
}
- }
- else {
- if ( ( o.active || o.waitingForLock ) &&
- o.query &&
- o.query.mapreduce &&
- o.query.mapreduce == "jstests_mr_killop" ) {
+ } else {
+ if ((o.active || o.waitingForLock) && o.query && o.query.mapreduce &&
+ o.query.mapreduce == "jstests_mr_killop") {
return o.opid;
}
}
@@ -46,113 +40,139 @@ function op( childLoop ) {
* This is necessay for a child distinct $where of a map reduce op because child
* ops currently mask parent ops in currentOp.
*/
-function testOne( map, reduce, finalize, scope, childLoop, wait ) {
- debug( "testOne - map = " + tojson( map ) + "; reduce = " + tojson( reduce ) +
- "; finalize = " + tojson( finalize ) + "; scope = " + tojson( scope ) +
- "; childLoop = " + childLoop + "; wait = " + wait );
-
+function testOne(map, reduce, finalize, scope, childLoop, wait) {
+ debug("testOne - map = " + tojson(map) + "; reduce = " + tojson(reduce) + "; finalize = " +
+ tojson(finalize) + "; scope = " + tojson(scope) + "; childLoop = " + childLoop +
+ "; wait = " + wait);
+
t.drop();
t2.drop();
// Ensure we have 2 documents for the reduce to run
- t.save( {a:1} );
- t.save( {a:1} );
+ t.save({a: 1});
+ t.save({a: 1});
spec = {
- mapreduce:"jstests_mr_killop",
- out:"jstests_mr_killop_out",
+ mapreduce: "jstests_mr_killop",
+ out: "jstests_mr_killop_out",
map: map,
reduce: reduce
};
- if ( finalize ) {
- spec[ "finalize" ] = finalize;
+ if (finalize) {
+ spec["finalize"] = finalize;
}
- if ( scope ) {
- spec[ "scope" ] = scope;
+ if (scope) {
+ spec["scope"] = scope;
}
// Windows shell strips all double quotes from command line, so use
// single quotes.
- stringifiedSpec = tojson( spec ).toString().replace( /\n/g, ' ' ).replace( /\"/g, "\'" );
-
+ stringifiedSpec = tojson(spec).toString().replace(/\n/g, ' ').replace(/\"/g, "\'");
+
// The assert below won't be caught by this test script, but it will cause error messages
// to be printed.
- var awaitShell = startParallelShell( "assert.commandWorked( db.runCommand( " +
- stringifiedSpec + " ) );" );
-
- if ( wait ) {
- sleep( 2000 );
+ var awaitShell =
+ startParallelShell("assert.commandWorked( db.runCommand( " + stringifiedSpec + " ) );");
+
+ if (wait) {
+ sleep(2000);
}
-
+
o = null;
- assert.soon( function() { o = op( childLoop ); return o != -1; } );
+ assert.soon(function() {
+ o = op(childLoop);
+ return o != -1;
+ });
+
+ res = db.killOp(o);
+ debug("did kill : " + tojson(res));
- res = db.killOp( o );
- debug( "did kill : " + tojson( res ) );
-
// When the map reduce op is killed, the spawned shell will exit
var exitCode = awaitShell({checkExitSuccess: false});
- assert.neq(0, exitCode,
+ assert.neq(0,
+ exitCode,
"expected shell to exit abnormally due to map-reduce execution being terminated");
- debug( "parallel shell completed" );
-
- assert.eq( -1, op( childLoop ) );
+ debug("parallel shell completed");
+
+ assert.eq(-1, op(childLoop));
}
/** Test using wait and non wait modes */
-function test( map, reduce, finalize, scope, childLoop ) {
- debug( " Non wait mode" );
- testOne( map, reduce, finalize, scope, childLoop, false );
+function test(map, reduce, finalize, scope, childLoop) {
+ debug(" Non wait mode");
+ testOne(map, reduce, finalize, scope, childLoop, false);
- debug( " Wait mode" );
- testOne( map, reduce, finalize, scope, childLoop, true );
+ debug(" Wait mode");
+ testOne(map, reduce, finalize, scope, childLoop, true);
}
/** Test looping in map and reduce functions */
-function runMRTests( loop, childLoop ) {
- debug( " Running MR test - loop map function. no scope " );
- test( loop, // map
- function( k, v ) { return v[ 0 ]; }, // reduce
- null, // finalize
- null, // scope
- childLoop );
-
- debug( " Running MR test - loop reduce function " );
- test( function() { emit( this.a, 1 ); }, // map
- loop, // reduce
- null, // finalize
- null, // scope
- childLoop );
-
- debug( " Running finalization test - loop map function. with scope " );
- test( function() { loop(); }, // map
- function( k, v ) { return v[ 0 ]; }, // reduce
- null, // finalize
- { loop: loop }, // scope
- childLoop );
+function runMRTests(loop, childLoop) {
+ debug(" Running MR test - loop map function. no scope ");
+ test(loop, // map
+ function(k, v) {
+ return v[0];
+ }, // reduce
+ null, // finalize
+ null, // scope
+ childLoop);
+
+ debug(" Running MR test - loop reduce function ");
+ test(
+ function() {
+ emit(this.a, 1);
+ }, // map
+ loop, // reduce
+ null, // finalize
+ null, // scope
+ childLoop);
+
+ debug(" Running finalization test - loop map function. with scope ");
+ test(
+ function() {
+ loop();
+ }, // map
+ function(k, v) {
+ return v[0];
+ }, // reduce
+ null, // finalize
+ {loop: loop}, // scope
+ childLoop);
}
/** Test looping in finalize function */
-function runFinalizeTests( loop, childLoop ) {
- debug( " Running finalization test - no scope " );
- test( function() { emit( this.a, 1 ); }, // map
- function( k, v ) { return v[ 0 ]; }, // reduce
- loop, // finalize
- null, // scope
- childLoop );
-
- debug( " Running finalization test - with scope " );
- test( function() { emit( this.a, 1 ); }, // map
- function( k, v ) { return v[ 0 ]; }, // reduce
- function( a, b ) { loop(); }, // finalize
- { loop: loop }, // scope
- childLoop );
+function runFinalizeTests(loop, childLoop) {
+ debug(" Running finalization test - no scope ");
+ test(
+ function() {
+ emit(this.a, 1);
+ }, // map
+ function(k, v) {
+ return v[0];
+ }, // reduce
+ loop, // finalize
+ null, // scope
+ childLoop);
+
+ debug(" Running finalization test - with scope ");
+ test(
+ function() {
+ emit(this.a, 1);
+ }, // map
+ function(k, v) {
+ return v[0];
+ }, // reduce
+ function(a, b) {
+ loop();
+ }, // finalize
+ {loop: loop}, // scope
+ childLoop);
}
// Run inside server. No access to debug().
var loop = function() {
- while( 1 ) {
- sleep( 1000 );
+ while (1) {
+ sleep(1000);
}
};
-runMRTests( loop, false );
-runFinalizeTests( loop, false );
+runMRTests(loop, false);
+runFinalizeTests(loop, false);
diff --git a/jstests/core/mr_merge.js b/jstests/core/mr_merge.js
index 83d00f39392..92490cdd6fd 100644
--- a/jstests/core/mr_merge.js
+++ b/jstests/core/mr_merge.js
@@ -2,59 +2,66 @@
t = db.mr_merge;
t.drop();
-t.insert( { a : [ 1 , 2 ] } );
-t.insert( { a : [ 2 , 3 ] } );
-t.insert( { a : [ 3 , 4 ] } );
+t.insert({a: [1, 2]});
+t.insert({a: [2, 3]});
+t.insert({a: [3, 4]});
outName = "mr_merge_out";
out = db[outName];
out.drop();
-m = function(){ for (i=0; i<this.a.length; i++ ) emit( this.a[i] , 1 ); };
-r = function(k,vs){ return Array.sum( vs ); };
+m = function() {
+ for (i = 0; i < this.a.length; i++)
+ emit(this.a[i], 1);
+};
+r = function(k, vs) {
+ return Array.sum(vs);
+};
-function tos( o ){
+function tos(o) {
var s = "";
- for ( var i=0; i<100; i++ ){
- if ( o[i] )
+ for (var i = 0; i < 100; i++) {
+ if (o[i])
s += i + "_" + o[i];
}
return s;
}
+res = t.mapReduce(m, r, {out: outName});
-res = t.mapReduce( m , r , { out : outName } );
+expected = {
+ "1": 1,
+ "2": 2,
+ "3": 2,
+ "4": 1
+};
+assert.eq(tos(expected), tos(res.convertToSingleObject()), "A");
-
-expected = { "1" : 1 , "2" : 2 , "3" : 2 , "4" : 1 };
-assert.eq( tos( expected ) , tos( res.convertToSingleObject() ) , "A" );
-
-t.insert( { a : [ 4 , 5 ] } );
-out.insert( { _id : 10 , value : "5" } );
-res = t.mapReduce( m , r , { out : outName } );
+t.insert({a: [4, 5]});
+out.insert({_id: 10, value: "5"});
+res = t.mapReduce(m, r, {out: outName});
expected["4"]++;
expected["5"] = 1;
-assert.eq( tos( expected ) , tos( res.convertToSingleObject() ) , "B" );
+assert.eq(tos(expected), tos(res.convertToSingleObject()), "B");
-t.insert( { a : [ 5 , 6 ] } );
-out.insert( { _id : 10 , value : "5" } );
-res = t.mapReduce( m , r , { out : { merge : outName } } );
+t.insert({a: [5, 6]});
+out.insert({_id: 10, value: "5"});
+res = t.mapReduce(m, r, {out: {merge: outName}});
expected["5"]++;
expected["10"] = 5;
expected["6"] = 1;
-assert.eq( tos( expected ) , tos( res.convertToSingleObject() ) , "C" );
+assert.eq(tos(expected), tos(res.convertToSingleObject()), "C");
// test that the nonAtomic output gives valid result
-t.insert( { a : [ 6 , 7 ] } );
-out.insert( { _id : 20 , value : "10" } );
-res = t.mapReduce( m , r , { out : { merge : outName, nonAtomic: true } } );
+t.insert({a: [6, 7]});
+out.insert({_id: 20, value: "10"});
+res = t.mapReduce(m, r, {out: {merge: outName, nonAtomic: true}});
expected["6"]++;
expected["20"] = 10;
expected["7"] = 1;
-assert.eq( tos( expected ) , tos( res.convertToSingleObject() ) , "D" );
-
+assert.eq(tos(expected), tos(res.convertToSingleObject()), "D");
diff --git a/jstests/core/mr_merge2.js b/jstests/core/mr_merge2.js
index a4835e4397a..e324c124f29 100644
--- a/jstests/core/mr_merge2.js
+++ b/jstests/core/mr_merge2.js
@@ -2,36 +2,46 @@
t = db.mr_merge2;
t.drop();
-t.insert( { a : [ 1 , 2 ] } );
-t.insert( { a : [ 2 , 3 ] } );
-t.insert( { a : [ 3 , 4 ] } );
+t.insert({a: [1, 2]});
+t.insert({a: [2, 3]});
+t.insert({a: [3, 4]});
outName = "mr_merge2_out";
out = db[outName];
out.drop();
-m = function(){ for (i=0; i<this.a.length; i++ ) emit( this.a[i] , 1 ); };
-r = function(k,vs){ return Array.sum( vs ); };
+m = function() {
+ for (i = 0; i < this.a.length; i++)
+ emit(this.a[i], 1);
+};
+r = function(k, vs) {
+ return Array.sum(vs);
+};
-function tos( o ){
+function tos(o) {
var s = "";
- for ( var i=0; i<100; i++ ){
- if ( o[i] )
+ for (var i = 0; i < 100; i++) {
+ if (o[i])
s += i + "_" + o[i] + "|";
}
return s;
}
-
-outOptions = { out : { merge : outName } };
-
-res = t.mapReduce( m , r , outOptions );
-expected = { "1" : 1 , "2" : 2 , "3" : 2 , "4" : 1 };
-assert.eq( tos( expected ) , tos( res.convertToSingleObject() ) , "A" );
-
-t.insert( { a : [ 4 , 5 ] } );
-res = t.mapReduce( m , r , outOptions );
+outOptions = {
+ out: {merge: outName}
+};
+
+res = t.mapReduce(m, r, outOptions);
+expected = {
+ "1": 1,
+ "2": 2,
+ "3": 2,
+ "4": 1
+};
+assert.eq(tos(expected), tos(res.convertToSingleObject()), "A");
+
+t.insert({a: [4, 5]});
+res = t.mapReduce(m, r, outOptions);
expected["4"]++;
expected["5"] = 1;
-assert.eq( tos( expected ) , tos( res.convertToSingleObject() ) , "B" );
-
+assert.eq(tos(expected), tos(res.convertToSingleObject()), "B");
diff --git a/jstests/core/mr_mutable_properties.js b/jstests/core/mr_mutable_properties.js
index e46f35b5079..12c52385275 100644
--- a/jstests/core/mr_mutable_properties.js
+++ b/jstests/core/mr_mutable_properties.js
@@ -4,40 +4,56 @@
var collection = db.mrMutableReceiver;
collection.drop();
-collection.insert({a:1});
+collection.insert({a: 1});
var map = function() {
// set property on receiver
- this.feed = {beef:1};
+ this.feed = {
+ beef: 1
+ };
// modify property on receiever
- this.a = {cake:1};
+ this.a = {
+ cake: 1
+ };
emit(this._id, this.feed);
emit(this._id, this.a);
};
var reduce = function(key, values) {
// set property on receiver
- this.feed = {beat:1};
+ this.feed = {
+ beat: 1
+ };
// set property on key arg
- key.fed = {mochi:1};
+ key.fed = {
+ mochi: 1
+ };
// push properties onto values array arg
values.push(this.feed);
values.push(key.fed);
// modify each value in the (modified) array arg
- values.forEach(function(val) { val.mod = 1; });
- return {food:values};
+ values.forEach(function(val) {
+ val.mod = 1;
+ });
+ return {
+ food: values
+ };
};
var finalize = function(key, values) {
// set property on receiver
- this.feed = {ice:1};
+ this.feed = {
+ ice: 1
+ };
// set property on key arg
- key.fed = {cream:1};
+ key.fed = {
+ cream: 1
+ };
// push properties onto values array arg
printjson(values);
@@ -45,7 +61,9 @@ var finalize = function(key, values) {
values.food.push(key.fed);
// modify each value in the (modified) array arg
- values.food.forEach(function(val) { val.mod = 1; });
+ values.food.forEach(function(val) {
+ val.mod = 1;
+ });
return values;
};
@@ -59,4 +77,6 @@ assert.eq(mr.results[0].value.food[2].beat, 1);
assert.eq(mr.results[0].value.food[3].mochi, 1);
assert.eq(mr.results[0].value.food[4].ice, 1);
assert.eq(mr.results[0].value.food[5].cream, 1);
-mr.results[0].value.food.forEach(function(val) { assert.eq(val.mod, 1); });
+mr.results[0].value.food.forEach(function(val) {
+ assert.eq(val.mod, 1);
+});
diff --git a/jstests/core/mr_optim.js b/jstests/core/mr_optim.js
index 65550d1c841..7437753ca67 100644
--- a/jstests/core/mr_optim.js
+++ b/jstests/core/mr_optim.js
@@ -4,43 +4,41 @@ t = db.mr_optim;
t.drop();
for (var i = 0; i < 1000; ++i) {
- t.save( {a: Math.random(1000), b: Math.random(10000)} );
+ t.save({a: Math.random(1000), b: Math.random(10000)});
}
-function m(){
+function m() {
emit(this._id, 13);
}
-function r( key , values ){
+function r(key, values) {
return "bad";
}
-function reformat( r ){
+function reformat(r) {
var x = {};
var cursor;
- if ( r.results )
+ if (r.results)
cursor = r.results;
else
cursor = r.find();
- cursor.forEach(
- function(z){
- x[z._id] = z.value;
- }
- );
+ cursor.forEach(function(z) {
+ x[z._id] = z.value;
+ });
return x;
}
-res = t.mapReduce( m , r , { out : "mr_optim_out" } );
-printjson( res );
-x = reformat( res );
+res = t.mapReduce(m, r, {out: "mr_optim_out"});
+printjson(res);
+x = reformat(res);
for (var key in x) {
assert.eq(x[key], 13, "value is not equal to original, maybe reduce has run");
}
res.drop();
-res = t.mapReduce( m , r , { out : { inline : 1 } } );
-//printjson( res )
-x2 = reformat( res );
+res = t.mapReduce(m, r, {out: {inline: 1}});
+// printjson( res )
+x2 = reformat(res);
res.drop();
assert.eq(x, x2, "object from inline and collection are not equal");
diff --git a/jstests/core/mr_outreduce.js b/jstests/core/mr_outreduce.js
index d380d169751..d23b0714a51 100644
--- a/jstests/core/mr_outreduce.js
+++ b/jstests/core/mr_outreduce.js
@@ -2,48 +2,55 @@
t = db.mr_outreduce;
t.drop();
-t.insert( { _id : 1 , a : [ 1 , 2 ] } );
-t.insert( { _id : 2 , a : [ 2 , 3 ] } );
-t.insert( { _id : 3 , a : [ 3 , 4 ] } );
+t.insert({_id: 1, a: [1, 2]});
+t.insert({_id: 2, a: [2, 3]});
+t.insert({_id: 3, a: [3, 4]});
outName = "mr_outreduce_out";
out = db[outName];
out.drop();
-m = function(){ for (i=0; i<this.a.length; i++ ) emit( this.a[i] , 1 ); };
-r = function(k,vs){ return Array.sum( vs ); };
+m = function() {
+ for (i = 0; i < this.a.length; i++)
+ emit(this.a[i], 1);
+};
+r = function(k, vs) {
+ return Array.sum(vs);
+};
-function tos( o ){
+function tos(o) {
var s = "";
- for ( var i=0; i<100; i++ ){
- if ( o[i] )
+ for (var i = 0; i < 100; i++) {
+ if (o[i])
s += i + "_" + o[i] + "|";
}
return s;
}
+res = t.mapReduce(m, r, {out: outName});
-res = t.mapReduce( m , r , { out : outName } );
+expected = {
+ "1": 1,
+ "2": 2,
+ "3": 2,
+ "4": 1
+};
+assert.eq(tos(expected), tos(res.convertToSingleObject()), "A");
-
-expected = { "1" : 1 , "2" : 2 , "3" : 2 , "4" : 1 };
-assert.eq( tos( expected ) , tos( res.convertToSingleObject() ) , "A" );
-
-t.insert( { _id : 4 , a : [ 4 , 5 ] } );
-out.insert( { _id : 10 , value : "5" } ); // this is a sentinal to make sure it wasn't killed
-res = t.mapReduce( m , r , { out : { reduce : outName } , query : { _id : { $gt : 3 } } } );
+t.insert({_id: 4, a: [4, 5]});
+out.insert({_id: 10, value: "5"}); // this is a sentinal to make sure it wasn't killed
+res = t.mapReduce(m, r, {out: {reduce: outName}, query: {_id: {$gt: 3}}});
expected["4"]++;
expected["5"] = 1;
expected["10"] = 5;
-assert.eq( tos( expected ) , tos( res.convertToSingleObject() ) , "B" );
+assert.eq(tos(expected), tos(res.convertToSingleObject()), "B");
-t.insert( { _id : 5 , a : [ 5 , 6 ] } );
-out.insert( { _id : 20 , value : "10" } ); // this is a sentinal to make sure it wasn't killed
-res = t.mapReduce( m , r , { out : { reduce : outName, nonAtomic: true } , query : { _id : { $gt : 4 } } } );
+t.insert({_id: 5, a: [5, 6]});
+out.insert({_id: 20, value: "10"}); // this is a sentinal to make sure it wasn't killed
+res = t.mapReduce(m, r, {out: {reduce: outName, nonAtomic: true}, query: {_id: {$gt: 4}}});
expected["5"]++;
expected["6"] = 1;
expected["20"] = 10;
-assert.eq( tos( expected ) , tos( res.convertToSingleObject() ) , "C" );
-
+assert.eq(tos(expected), tos(res.convertToSingleObject()), "C");
diff --git a/jstests/core/mr_outreduce2.js b/jstests/core/mr_outreduce2.js
index 45ec9be7ee2..a08b6b51527 100644
--- a/jstests/core/mr_outreduce2.js
+++ b/jstests/core/mr_outreduce2.js
@@ -7,21 +7,24 @@ t.drop();
db[out].drop();
-t.insert( { _id : 1 , x : 1 } );
-t.insert( { _id : 2 , x : 1 } );
-t.insert( { _id : 3 , x : 2 } );
+t.insert({_id: 1, x: 1});
+t.insert({_id: 2, x: 1});
+t.insert({_id: 3, x: 2});
-m = function(){ emit( this.x , 1 ); };
-r = function(k,v){ return Array.sum( v ); };
+m = function() {
+ emit(this.x, 1);
+};
+r = function(k, v) {
+ return Array.sum(v);
+};
-res = t.mapReduce( m , r , { out : { reduce : out } , query : { _id : { $gt : 0 } } } );
+res = t.mapReduce(m, r, {out: {reduce: out}, query: {_id: {$gt: 0}}});
-assert.eq( 2 , db[out].findOne( { _id : 1 } ).value , "A1" );
-assert.eq( 1 , db[out].findOne( { _id : 2 } ).value , "A2" );
+assert.eq(2, db[out].findOne({_id: 1}).value, "A1");
+assert.eq(1, db[out].findOne({_id: 2}).value, "A2");
+t.insert({_id: 4, x: 2});
+res = t.mapReduce(m, r, {out: {reduce: out}, query: {_id: {$gt: 3}}, finalize: null});
-t.insert( { _id : 4 , x : 2 } );
-res = t.mapReduce( m , r , { out : { reduce : out } , query : { _id : { $gt : 3 } } , finalize : null } );
-
-assert.eq( 2 , db[out].findOne( { _id : 1 } ).value , "B1" );
-assert.eq( 2 , db[out].findOne( { _id : 2 } ).value , "B2" );
+assert.eq(2, db[out].findOne({_id: 1}).value, "B1");
+assert.eq(2, db[out].findOne({_id: 2}).value, "B2");
diff --git a/jstests/core/mr_replaceIntoDB.js b/jstests/core/mr_replaceIntoDB.js
index 34c208255ee..6ffc8546c2d 100644
--- a/jstests/core/mr_replaceIntoDB.js
+++ b/jstests/core/mr_replaceIntoDB.js
@@ -2,44 +2,52 @@
t = db.mr_replace;
t.drop();
-t.insert( { a : [ 1 , 2 ] } );
-t.insert( { a : [ 2 , 3 ] } );
-t.insert( { a : [ 3 , 4 ] } );
+t.insert({a: [1, 2]});
+t.insert({a: [2, 3]});
+t.insert({a: [3, 4]});
outCollStr = "mr_replace_col";
outDbStr = "mr_db";
-m = function(){ for (i=0; i<this.a.length; i++ ) emit( this.a[i] , 1 ); };
-r = function(k,vs){ return Array.sum( vs ); };
+m = function() {
+ for (i = 0; i < this.a.length; i++)
+ emit(this.a[i], 1);
+};
+r = function(k, vs) {
+ return Array.sum(vs);
+};
-function tos( o ){
+function tos(o) {
var s = "";
- for ( var i=0; i<100; i++ ){
- if ( o[i] )
+ for (var i = 0; i < 100; i++) {
+ if (o[i])
s += i + "_" + o[i];
}
return s;
}
print("Testing mr replace into other DB");
-res = t.mapReduce( m , r , { out : { replace: outCollStr, db: outDbStr } } );
-printjson( res );
-expected = { "1" : 1 , "2" : 2 , "3" : 2 , "4" : 1 };
+res = t.mapReduce(m, r, {out: {replace: outCollStr, db: outDbStr}});
+printjson(res);
+expected = {
+ "1": 1,
+ "2": 2,
+ "3": 2,
+ "4": 1
+};
outDb = db.getMongo().getDB(outDbStr);
outColl = outDb[outCollStr];
-str = tos( outColl.convertToSingleObject("value") );
+str = tos(outColl.convertToSingleObject("value"));
print("Received result: " + str);
-assert.eq( tos( expected ) , str , "A Received wrong result " + str );
+assert.eq(tos(expected), str, "A Received wrong result " + str);
print("checking result field");
assert.eq(res.result.collection, outCollStr, "B1 Wrong collection " + res.result.collection);
assert.eq(res.result.db, outDbStr, "B2 Wrong db " + res.result.db);
print("Replace again and check");
-outColl.save({_id: "5", value : 1});
-t.mapReduce( m , r , { out : { replace: outCollStr, db: outDbStr } } );
-str = tos( outColl.convertToSingleObject("value") );
+outColl.save({_id: "5", value: 1});
+t.mapReduce(m, r, {out: {replace: outCollStr, db: outDbStr}});
+str = tos(outColl.convertToSingleObject("value"));
print("Received result: " + str);
-assert.eq( tos( expected ) , str , "C1 Received wrong result " + str );
-
-
+assert.eq(tos(expected), str, "C1 Received wrong result " + str);
diff --git a/jstests/core/mr_sort.js b/jstests/core/mr_sort.js
index 8d0ba96ad82..b90ad3f6bf5 100644
--- a/jstests/core/mr_sort.js
+++ b/jstests/core/mr_sort.js
@@ -2,43 +2,38 @@
t = db.mr_sort;
t.drop();
-t.ensureIndex( { x : 1 } );
-
-t.insert( { x : 1 } );
-t.insert( { x : 10 } );
-t.insert( { x : 2 } );
-t.insert( { x : 9 } );
-t.insert( { x : 3 } );
-t.insert( { x : 8 } );
-t.insert( { x : 4 } );
-t.insert( { x : 7 } );
-t.insert( { x : 5 } );
-t.insert( { x : 6 } );
-
-m = function(){
- emit( "a" , this.x );
+t.ensureIndex({x: 1});
+
+t.insert({x: 1});
+t.insert({x: 10});
+t.insert({x: 2});
+t.insert({x: 9});
+t.insert({x: 3});
+t.insert({x: 8});
+t.insert({x: 4});
+t.insert({x: 7});
+t.insert({x: 5});
+t.insert({x: 6});
+
+m = function() {
+ emit("a", this.x);
};
-r = function( k , v ){
- return Array.sum( v );
+r = function(k, v) {
+ return Array.sum(v);
};
-
-res = t.mapReduce( m , r , "mr_sort_out " );
+res = t.mapReduce(m, r, "mr_sort_out ");
x = res.convertToSingleObject();
res.drop();
-assert.eq( { "a" : 55 } , x , "A1" );
+assert.eq({"a": 55}, x, "A1");
-res = t.mapReduce( m , r , { out : "mr_sort_out" , query : { x : { $lt : 3 } } } );
+res = t.mapReduce(m, r, {out: "mr_sort_out", query: {x: {$lt: 3}}});
x = res.convertToSingleObject();
res.drop();
-assert.eq( { "a" : 3 } , x , "A2" );
+assert.eq({"a": 3}, x, "A2");
-res = t.mapReduce( m , r , { out : "mr_sort_out" , sort : { x : 1 } , limit : 2 } );
+res = t.mapReduce(m, r, {out: "mr_sort_out", sort: {x: 1}, limit: 2});
x = res.convertToSingleObject();
res.drop();
-assert.eq( { "a" : 3 } , x , "A3" );
-
-
-
-
+assert.eq({"a": 3}, x, "A3");
diff --git a/jstests/core/mr_stored.js b/jstests/core/mr_stored.js
index 3403411ea70..63fa301e66d 100644
--- a/jstests/core/mr_stored.js
+++ b/jstests/core/mr_stored.js
@@ -2,65 +2,77 @@
t = db.mr_stored;
t.drop();
-t.save( { "partner" : 1, "visits" : 9 } );
-t.save( { "partner" : 2, "visits" : 9 } );
-t.save( { "partner" : 1, "visits" : 11 } );
-t.save( { "partner" : 1, "visits" : 30 } );
-t.save( { "partner" : 2, "visits" : 41 } );
-t.save( { "partner" : 2, "visits" : 41 } );
+t.save({"partner": 1, "visits": 9});
+t.save({"partner": 2, "visits": 9});
+t.save({"partner": 1, "visits": 11});
+t.save({"partner": 1, "visits": 30});
+t.save({"partner": 2, "visits": 41});
+t.save({"partner": 2, "visits": 41});
-m = function(obj){
- emit( obj.partner , { stats : [ obj.visits ] } );
+m = function(obj) {
+ emit(obj.partner, {stats: [obj.visits]});
};
-r = function( k , v ){
+r = function(k, v) {
var stats = [];
var total = 0;
- for ( var i=0; i<v.length; i++ ){
- for ( var j in v[i].stats ) {
- stats.push( v[i].stats[j] );
+ for (var i = 0; i < v.length; i++) {
+ for (var j in v[i].stats) {
+ stats.push(v[i].stats[j]);
total += v[i].stats[j];
}
}
- return { stats : stats , total : total };
+ return {
+ stats: stats,
+ total: total
+ };
};
// Test that map reduce works with stored javascript
-db.system.js.save( { _id : "mr_stored_map" , value : m } );
-db.system.js.save( { _id : "mr_stored_reduce" , value : r } );
+db.system.js.save({_id: "mr_stored_map", value: m});
+db.system.js.save({_id: "mr_stored_reduce", value: r});
-res = t.mapReduce( function () { mr_stored_map(this); } ,
- function ( k , v ) { return mr_stored_reduce( k , v ); } ,
- { out : "mr_stored_out" , scope : { xx : 1 } } );
-//res.find().forEach( printjson )
+res = t.mapReduce(
+ function() {
+ mr_stored_map(this);
+ },
+ function(k, v) {
+ return mr_stored_reduce(k, v);
+ },
+ {out: "mr_stored_out", scope: {xx: 1}});
+// res.find().forEach( printjson )
z = res.convertToSingleObject();
-assert.eq( 2 , Object.keySet( z ).length , "A1" );
-assert.eq( [ 9 , 11 , 30 ] , z["1"].stats , "A2" );
-assert.eq( [ 9 , 41 , 41 ] , z["2"].stats , "A3" );
-
+assert.eq(2, Object.keySet(z).length, "A1");
+assert.eq([9, 11, 30], z["1"].stats, "A2");
+assert.eq([9, 41, 41], z["2"].stats, "A3");
res.drop();
-m = function(obj){
+m = function(obj) {
var x = "partner";
var y = "visits";
- emit( obj[x] , { stats : [ obj[y] ] } );
+ emit(obj[x], {stats: [obj[y]]});
};
-db.system.js.save( { _id : "mr_stored_map" , value : m } );
+db.system.js.save({_id: "mr_stored_map", value: m});
-res = t.mapReduce( function () { mr_stored_map(this); } ,
- function ( k , v ) { return mr_stored_reduce( k , v ); } ,
- { out : "mr_stored_out" , scope : { xx : 1 } } );
-//res.find().forEach( printjson )
+res = t.mapReduce(
+ function() {
+ mr_stored_map(this);
+ },
+ function(k, v) {
+ return mr_stored_reduce(k, v);
+ },
+ {out: "mr_stored_out", scope: {xx: 1}});
+// res.find().forEach( printjson )
z = res.convertToSingleObject();
-assert.eq( 2 , Object.keySet( z ).length , "B1" );
-assert.eq( [ 9 , 11 , 30 ] , z["1"].stats , "B2" );
-assert.eq( [ 9 , 41 , 41 ] , z["2"].stats , "B3" );
+assert.eq(2, Object.keySet(z).length, "B1");
+assert.eq([9, 11, 30], z["1"].stats, "B2");
+assert.eq([9, 41, 41], z["2"].stats, "B3");
-db.system.js.remove( { _id : "mr_stored_map" } );
-db.system.js.remove( { _id : "mr_stored_reduce" } );
+db.system.js.remove({_id: "mr_stored_map"});
+db.system.js.remove({_id: "mr_stored_reduce"});
res.drop();
diff --git a/jstests/core/mr_undef.js b/jstests/core/mr_undef.js
index 1bf89e3acc2..de3b61543d7 100644
--- a/jstests/core/mr_undef.js
+++ b/jstests/core/mr_undef.js
@@ -6,17 +6,23 @@ outname = "mr_undef_out";
out = db[outname];
out.drop();
-t.insert({x : 0});
+t.insert({x: 0});
-var m = function() { emit(this.mod, this.x); };
-var r = function(k,v) { total = 0; for(i in v) { total+= v[i]; } return total; };
+var m = function() {
+ emit(this.mod, this.x);
+};
+var r = function(k, v) {
+ total = 0;
+ for (i in v) {
+ total += v[i];
+ }
+ return total;
+};
-res = t.mapReduce(m, r, {out : outname } );
+res = t.mapReduce(m, r, {out: outname});
-assert.eq( 0 , out.find( { _id : { $type : 6 } } ).itcount() , "A1" );
-assert.eq( 1 , out.find( { _id : { $type : 10 } } ).itcount() , "A2" );
+assert.eq(0, out.find({_id: {$type: 6}}).itcount(), "A1");
+assert.eq(1, out.find({_id: {$type: 10}}).itcount(), "A2");
x = out.findOne();
-assert.eq( x , out.findOne( { _id : x["_id"] } ) , "A3" );
-
-
+assert.eq(x, out.findOne({_id: x["_id"]}), "A3");
diff --git a/jstests/core/multi.js b/jstests/core/multi.js
index eb6cad348cd..c7853b18f25 100644
--- a/jstests/core/multi.js
+++ b/jstests/core/multi.js
@@ -1,24 +1,24 @@
t = db.jstests_multi;
t.drop();
-t.ensureIndex( { a: 1 } );
-t.save( { a: [ 1, 2 ] } );
-assert.eq( 1, t.find( { a: { $gt: 0 } } ).count() , "A" );
-assert.eq( 1, t.find( { a: { $gt: 0 } } ).toArray().length , "B" );
+t.ensureIndex({a: 1});
+t.save({a: [1, 2]});
+assert.eq(1, t.find({a: {$gt: 0}}).count(), "A");
+assert.eq(1, t.find({a: {$gt: 0}}).toArray().length, "B");
t.drop();
-t.save( { a: [ [ [ 1 ] ] ] } );
-assert.eq( 0, t.find( { a:1 } ).count() , "C" );
-assert.eq( 0, t.find( { a: [ 1 ] } ).count() , "D" );
-assert.eq( 1, t.find( { a: [ [ 1 ] ] } ).count() , "E" );
-assert.eq( 1, t.find( { a: [ [ [ 1 ] ] ] } ).count() , "F" );
+t.save({a: [[[1]]]});
+assert.eq(0, t.find({a: 1}).count(), "C");
+assert.eq(0, t.find({a: [1]}).count(), "D");
+assert.eq(1, t.find({a: [[1]]}).count(), "E");
+assert.eq(1, t.find({a: [[[1]]]}).count(), "F");
t.drop();
-t.save( { a: [ 1, 2 ] } );
-assert.eq( 0, t.find( { a: { $ne: 1 } } ).count() , "G" );
+t.save({a: [1, 2]});
+assert.eq(0, t.find({a: {$ne: 1}}).count(), "G");
t.drop();
-t.save( { a: [ { b: 1 }, { b: 2 } ] } );
-assert.eq( 0, t.find( { 'a.b': { $ne: 1 } } ).count() , "H" );
+t.save({a: [{b: 1}, {b: 2}]});
+assert.eq(0, t.find({'a.b': {$ne: 1}}).count(), "H");
// TODO - run same tests with an index on a
diff --git a/jstests/core/multi2.js b/jstests/core/multi2.js
index 7c72722fd34..d5111c31913 100644
--- a/jstests/core/multi2.js
+++ b/jstests/core/multi2.js
@@ -2,22 +2,20 @@
t = db.multi2;
t.drop();
-t.save( { x : 1 , a : [ 1 ] } );
-t.save( { x : 1 , a : [] } );
-t.save( { x : 1 , a : null } );
-t.save( {} );
+t.save({x: 1, a: [1]});
+t.save({x: 1, a: []});
+t.save({x: 1, a: null});
+t.save({});
-assert.eq( 3 , t.find( { x : 1 } ).count() , "A" );
-
-t.ensureIndex( { x : 1 } );
-assert.eq( 3 , t.find( { x : 1 } ).count() , "B" );
-assert.eq( 4 , t.find().sort( { x : 1 , a : 1 } ).count() , "s1" );
-assert.eq( 1 , t.find( { x : 1 , a : null } ).count() , "B2" );
-
-t.dropIndex( { x : 1 } );
-t.ensureIndex( { x : 1 , a : 1 } );
-assert.eq( 3 , t.find( { x : 1 } ).count() , "C" ); // SERVER-279
-assert.eq( 4 , t.find().sort( { x : 1 , a : 1 } ).count() , "s2" );
-assert.eq( 1 , t.find( { x : 1 , a : null } ).count() , "C2" );
+assert.eq(3, t.find({x: 1}).count(), "A");
+t.ensureIndex({x: 1});
+assert.eq(3, t.find({x: 1}).count(), "B");
+assert.eq(4, t.find().sort({x: 1, a: 1}).count(), "s1");
+assert.eq(1, t.find({x: 1, a: null}).count(), "B2");
+t.dropIndex({x: 1});
+t.ensureIndex({x: 1, a: 1});
+assert.eq(3, t.find({x: 1}).count(), "C"); // SERVER-279
+assert.eq(4, t.find().sort({x: 1, a: 1}).count(), "s2");
+assert.eq(1, t.find({x: 1, a: null}).count(), "C2");
diff --git a/jstests/core/multikey_geonear.js b/jstests/core/multikey_geonear.js
index 7f5bbe3f75f..6d796cb62ff 100644
--- a/jstests/core/multikey_geonear.js
+++ b/jstests/core/multikey_geonear.js
@@ -49,15 +49,15 @@ t.insert({_id: 0, a: [{b: 0}, {c: {type: "Point", coordinates: [0, 0]}}]});
t.insert({_id: 1, a: [{b: 1}, {c: {type: "Point", coordinates: [1, 1]}}]});
t.insert({_id: 2, a: [{b: 2}, {c: {type: "Point", coordinates: [2, 2]}}]});
-cursor = t.find({"a.b": {$gte: 0}, "a.c": {$near:
- {$geometry: {type: "Point", coordinates: [2, 2]}}}});
+cursor =
+ t.find({"a.b": {$gte: 0}, "a.c": {$near: {$geometry: {type: "Point", coordinates: [2, 2]}}}});
checkResults(cursor);
// Double check that we're not intersecting bounds. Doing so should cause us to
// miss the result here.
t.insert({_id: 3, a: [{b: 10}, {b: -1}, {c: {type: "Point", coordinates: [0, 0]}}]});
-cursor = t.find({"a.b": {$lt: 0, $gt: 9}, "a.c": {$near:
- {$geometry: {type: "Point", coordinates: [0, 0]}}}});
+cursor = t.find(
+ {"a.b": {$lt: 0, $gt: 9}, "a.c": {$near: {$geometry: {type: "Point", coordinates: [0, 0]}}}});
assert.eq(3, cursor.next()["_id"]);
assert(!cursor.hasNext());
diff --git a/jstests/core/ne1.js b/jstests/core/ne1.js
index e1c5656b5c8..ba0bf7a3de7 100644
--- a/jstests/core/ne1.js
+++ b/jstests/core/ne1.js
@@ -2,10 +2,10 @@
t = db.ne1;
t.drop();
-t.save( { x : 1 } );
-t.save( { x : 2 } );
-t.save( { x : 3 } );
+t.save({x: 1});
+t.save({x: 2});
+t.save({x: 3});
-assert.eq( 2 , t.find( { x : { $ne : 2 } } ).itcount() , "A" );
-t.ensureIndex( { x : 1 } );
-assert.eq( 2 , t.find( { x : { $ne : 2 } } ).itcount() , "B" );
+assert.eq(2, t.find({x: {$ne: 2}}).itcount(), "A");
+t.ensureIndex({x: 1});
+assert.eq(2, t.find({x: {$ne: 2}}).itcount(), "B");
diff --git a/jstests/core/ne2.js b/jstests/core/ne2.js
index b0960d69cfa..8814688a45b 100644
--- a/jstests/core/ne2.js
+++ b/jstests/core/ne2.js
@@ -2,15 +2,15 @@
t = db.jstests_ne2;
t.drop();
-t.ensureIndex( {a:1} );
+t.ensureIndex({a: 1});
-t.save( { a:-0.5 } );
-t.save( { a:0 } );
-t.save( { a:0 } );
-t.save( { a:0.5 } );
+t.save({a: -0.5});
+t.save({a: 0});
+t.save({a: 0});
+t.save({a: 0.5});
-e = t.find( { a: { $ne: 0 } } ).explain( true );
-assert.eq( 2, e.executionStats.nReturned, 'A' );
+e = t.find({a: {$ne: 0}}).explain(true);
+assert.eq(2, e.executionStats.nReturned, 'A');
-e = t.find( { a: { $gt: -1, $lt: 1, $ne: 0 } } ).explain( true );
-assert.eq( 2, e.executionStats.nReturned, 'B' );
+e = t.find({a: {$gt: -1, $lt: 1, $ne: 0}}).explain(true);
+assert.eq(2, e.executionStats.nReturned, 'B');
diff --git a/jstests/core/ne3.js b/jstests/core/ne3.js
index 3260fd3c40f..5c38858c019 100644
--- a/jstests/core/ne3.js
+++ b/jstests/core/ne3.js
@@ -3,10 +3,20 @@
t = db.jstests_ne3;
t.drop();
-assert.throws( function() { t.findOne( { t: { $ne: /a/ } } ); } );
-assert.throws( function() { t.findOne( { t: { $gt: /a/ } } ); } );
-assert.throws( function() { t.findOne( { t: { $gte: /a/ } } ); } );
-assert.throws( function() { t.findOne( { t: { $lt: /a/ } } ); } );
-assert.throws( function() { t.findOne( { t: { $lte: /a/ } } ); } );
+assert.throws(function() {
+ t.findOne({t: {$ne: /a/}});
+});
+assert.throws(function() {
+ t.findOne({t: {$gt: /a/}});
+});
+assert.throws(function() {
+ t.findOne({t: {$gte: /a/}});
+});
+assert.throws(function() {
+ t.findOne({t: {$lt: /a/}});
+});
+assert.throws(function() {
+ t.findOne({t: {$lte: /a/}});
+});
-assert.eq( 0, t.count( { t: { $in: [ /a/ ] } } ) );
+assert.eq(0, t.count({t: {$in: [/a/]}}));
diff --git a/jstests/core/nestedarr1.js b/jstests/core/nestedarr1.js
index b3bc9b73156..98ddc2193ea 100644
--- a/jstests/core/nestedarr1.js
+++ b/jstests/core/nestedarr1.js
@@ -1,30 +1,33 @@
// make sure that we don't crash on large nested arrays but correctly do not index them
// SERVER-5127, SERVER-5036
-function makeNestArr(depth){
- if(depth == 1){
- return {a : [depth]};
- }
- else{
- return {a : [makeNestArr(depth - 1)] };
+function makeNestArr(depth) {
+ if (depth == 1) {
+ return {
+ a: [depth]
+ };
+ } else {
+ return {
+ a: [makeNestArr(depth - 1)]
+ };
}
}
t = db.arrNestTest;
t.drop();
-t.ensureIndex({a:1});
+t.ensureIndex({a: 1});
n = 1;
-while ( true ) {
+while (true) {
var before = t.count();
- t.insert( { _id : n, a : makeNestArr(n) } );
+ t.insert({_id: n, a: makeNestArr(n)});
var after = t.count();
- if ( before == after )
+ if (before == after)
break;
n++;
}
-assert( n > 30, "not enough n: " + n );
+assert(n > 30, "not enough n: " + n);
-assert.eq( t.count(), t.find( { _id : { $gt : 0 } } ).hint( { a : 1 } ).itcount() );
+assert.eq(t.count(), t.find({_id: {$gt: 0}}).hint({a: 1}).itcount());
diff --git a/jstests/core/nestedobj1.js b/jstests/core/nestedobj1.js
index 45ef0c530d4..97b9460da6f 100644
--- a/jstests/core/nestedobj1.js
+++ b/jstests/core/nestedobj1.js
@@ -1,10 +1,14 @@
-//SERVER-5127, SERVER-5036
+// SERVER-5127, SERVER-5036
-function makeNestObj(depth){
- toret = { a : 1};
+function makeNestObj(depth) {
+ toret = {
+ a: 1
+ };
- for(i = 1; i < depth; i++){
- toret = {a : toret};
+ for (i = 1; i < depth; i++) {
+ toret = {
+ a: toret
+ };
}
return toret;
@@ -13,18 +17,18 @@ function makeNestObj(depth){
t = db.objNestTest;
t.drop();
-t.ensureIndex({a:1});
+t.ensureIndex({a: 1});
n = 1;
-while ( true ) {
+while (true) {
var before = t.count();
- t.insert( { _id : n, a : makeNestObj(n) } );
+ t.insert({_id: n, a: makeNestObj(n)});
var after = t.count();
- if ( before == after )
+ if (before == after)
break;
n++;
}
-assert( n > 30, "not enough n: " + n );
+assert(n > 30, "not enough n: " + n);
-assert.eq( t.count(), t.find( { _id : { $gt : 0 } } ).hint( { a : 1 } ).itcount() );
+assert.eq(t.count(), t.find({_id: {$gt: 0}}).hint({a: 1}).itcount());
diff --git a/jstests/core/nin.js b/jstests/core/nin.js
index 7a25afd382e..d6cd78ee7a4 100644
--- a/jstests/core/nin.js
+++ b/jstests/core/nin.js
@@ -1,58 +1,64 @@
t = db.jstests_nin;
t.drop();
-function checkEqual( name , key , value ){
+function checkEqual(name, key, value) {
var o = {};
- o[key] = { $in : [ value ] };
- var i = t.find( o ).count();
- o[key] = { $nin : [ value ] };
- var n = t.find( o ).count();
-
- assert.eq( t.find().count() , i + n ,
- "checkEqual " + name + " $in + $nin != total | " + i + " + " + n + " != " + t.find().count() );
+ o[key] = {
+ $in: [value]
+ };
+ var i = t.find(o).count();
+ o[key] = {
+ $nin: [value]
+ };
+ var n = t.find(o).count();
+
+ assert.eq(t.find().count(),
+ i + n,
+ "checkEqual " + name + " $in + $nin != total | " + i + " + " + n + " != " +
+ t.find().count());
}
-doTest = function( n ) {
-
- t.save( { a:[ 1,2,3 ] } );
- t.save( { a:[ 1,2,4 ] } );
- t.save( { a:[ 1,8,5 ] } );
- t.save( { a:[ 1,8,6 ] } );
- t.save( { a:[ 1,9,7 ] } );
-
- assert.eq( 5, t.find( { a: { $nin: [ 10 ] } } ).count() , n + " A" );
- assert.eq( 0, t.find( { a: { $ne: 1 } } ).count() , n + " B" );
- assert.eq( 0, t.find( { a: { $nin: [ 1 ] } } ).count() , n + " C" );
- assert.eq( 0, t.find( { a: { $nin: [ 1, 2 ] } } ).count() , n + " D" );
- assert.eq( 3, t.find( { a: { $nin: [ 2 ] } } ).count() , n + " E" );
- assert.eq( 3, t.find( { a: { $nin: [ 8 ] } } ).count() , n + " F" );
- assert.eq( 4, t.find( { a: { $nin: [ 9 ] } } ).count() , n + " G" );
- assert.eq( 4, t.find( { a: { $nin: [ 3 ] } } ).count() , n + " H" );
- assert.eq( 3, t.find( { a: { $nin: [ 2, 3 ] } } ).count() , n + " I" );
- assert.eq( 1, t.find( { a: { $ne: 8, $nin: [ 2, 3 ] } } ).count() , n + " I2" );
-
- checkEqual( n + " A" , "a" , 5 );
-
- t.save( { a: [ 2, 2 ] } );
- assert.eq( 3, t.find( { a: { $nin: [ 2, 2 ] } } ).count() , n + " J" );
-
- t.save( { a: [ [ 2 ] ] } );
- assert.eq( 4, t.find( { a: { $nin: [ 2 ] } } ).count() , n + " K" );
-
- t.save( { a: [ { b: [ 10, 11 ] }, 11 ] } );
- checkEqual( n + " B" , "a" , 5 );
- checkEqual( n + " C" , "a.b" , 5 );
-
- assert.eq( 7, t.find( { 'a.b': { $nin: [ 10 ] } } ).count() , n + " L" );
- assert.eq( 7, t.find( { 'a.b': { $nin: [ [ 10, 11 ] ] } } ).count() , n + " M" );
- assert.eq( 7, t.find( { a: { $nin: [ 11 ] } } ).count() , n + " N" );
-
- t.save( { a: { b: [ 20, 30 ] } } );
- assert.eq( 1, t.find( { 'a.b': { $all: [ 20 ] } } ).count() , n + " O" );
- assert.eq( 1, t.find( { 'a.b': { $all: [ 20, 30 ] } } ).count() , n + " P" );
+doTest = function(n) {
+
+ t.save({a: [1, 2, 3]});
+ t.save({a: [1, 2, 4]});
+ t.save({a: [1, 8, 5]});
+ t.save({a: [1, 8, 6]});
+ t.save({a: [1, 9, 7]});
+
+ assert.eq(5, t.find({a: {$nin: [10]}}).count(), n + " A");
+ assert.eq(0, t.find({a: {$ne: 1}}).count(), n + " B");
+ assert.eq(0, t.find({a: {$nin: [1]}}).count(), n + " C");
+ assert.eq(0, t.find({a: {$nin: [1, 2]}}).count(), n + " D");
+ assert.eq(3, t.find({a: {$nin: [2]}}).count(), n + " E");
+ assert.eq(3, t.find({a: {$nin: [8]}}).count(), n + " F");
+ assert.eq(4, t.find({a: {$nin: [9]}}).count(), n + " G");
+ assert.eq(4, t.find({a: {$nin: [3]}}).count(), n + " H");
+ assert.eq(3, t.find({a: {$nin: [2, 3]}}).count(), n + " I");
+ assert.eq(1, t.find({a: {$ne: 8, $nin: [2, 3]}}).count(), n + " I2");
+
+ checkEqual(n + " A", "a", 5);
+
+ t.save({a: [2, 2]});
+ assert.eq(3, t.find({a: {$nin: [2, 2]}}).count(), n + " J");
+
+ t.save({a: [[2]]});
+ assert.eq(4, t.find({a: {$nin: [2]}}).count(), n + " K");
+
+ t.save({a: [{b: [10, 11]}, 11]});
+ checkEqual(n + " B", "a", 5);
+ checkEqual(n + " C", "a.b", 5);
+
+ assert.eq(7, t.find({'a.b': {$nin: [10]}}).count(), n + " L");
+ assert.eq(7, t.find({'a.b': {$nin: [[10, 11]]}}).count(), n + " M");
+ assert.eq(7, t.find({a: {$nin: [11]}}).count(), n + " N");
+
+ t.save({a: {b: [20, 30]}});
+ assert.eq(1, t.find({'a.b': {$all: [20]}}).count(), n + " O");
+ assert.eq(1, t.find({'a.b': {$all: [20, 30]}}).count(), n + " P");
};
-doTest( "no index" );
+doTest("no index");
t.drop();
-t.ensureIndex( {a:1} );
-doTest( "with index" );
+t.ensureIndex({a: 1});
+doTest("with index");
diff --git a/jstests/core/nin2.js b/jstests/core/nin2.js
index afdbb0494da..41996c1f4bb 100644
--- a/jstests/core/nin2.js
+++ b/jstests/core/nin2.js
@@ -4,64 +4,64 @@ t = db.jstests_nin2;
t.drop();
// Check various operator types.
-function checkOperators( array, inMatches ) {
+function checkOperators(array, inMatches) {
inCount = inMatches ? 1 : 0;
notInCount = 1 - inCount;
- assert.eq( inCount, t.count( {foo:{$in:array}} ) );
- assert.eq( notInCount, t.count( {foo:{$not:{$in:array}}} ) );
- assert.eq( notInCount, t.count( {foo:{$nin:array}} ) );
- assert.eq( inCount, t.count( {foo:{$not:{$nin:array}}} ) );
+ assert.eq(inCount, t.count({foo: {$in: array}}));
+ assert.eq(notInCount, t.count({foo: {$not: {$in: array}}}));
+ assert.eq(notInCount, t.count({foo: {$nin: array}}));
+ assert.eq(inCount, t.count({foo: {$not: {$nin: array}}}));
}
t.save({});
-assert.eq( 1, t.count( {foo:null} ) );
-assert.eq( 0, t.count( {foo:{$ne:null}} ) );
-assert.eq( 0, t.count( {foo:1} ) );
+assert.eq(1, t.count({foo: null}));
+assert.eq(0, t.count({foo: {$ne: null}}));
+assert.eq(0, t.count({foo: 1}));
// Check matching null against missing field.
-checkOperators( [null], true );
-checkOperators( [null,1], true );
-checkOperators( [1,null], true );
+checkOperators([null], true);
+checkOperators([null, 1], true);
+checkOperators([1, null], true);
t.remove({});
-t.save({foo:null});
+t.save({foo: null});
-assert.eq( 1, t.count( {foo:null} ) );
-assert.eq( 0, t.count( {foo:{$ne:null}} ) );
-assert.eq( 0, t.count( {foo:1} ) );
+assert.eq(1, t.count({foo: null}));
+assert.eq(0, t.count({foo: {$ne: null}}));
+assert.eq(0, t.count({foo: 1}));
// Check matching empty set.
-checkOperators( [], false );
+checkOperators([], false);
// Check matching null against missing null field.
-checkOperators( [null], true );
-checkOperators( [null,1], true );
-checkOperators( [1,null], true );
+checkOperators([null], true);
+checkOperators([null, 1], true);
+checkOperators([1, null], true);
t.remove({});
-t.save({foo:1});
+t.save({foo: 1});
-assert.eq( 0, t.count( {foo:null} ) );
-assert.eq( 1, t.count( {foo:{$ne:null}} ) );
-assert.eq( 1, t.count( {foo:1} ) );
+assert.eq(0, t.count({foo: null}));
+assert.eq(1, t.count({foo: {$ne: null}}));
+assert.eq(1, t.count({foo: 1}));
// Check matching null against 1.
-checkOperators( [null], false );
-checkOperators( [null,1], true );
-checkOperators( [1,null], true );
+checkOperators([null], false);
+checkOperators([null, 1], true);
+checkOperators([1, null], true);
t.remove({});
-t.save( {foo:[0,1]} );
+t.save({foo: [0, 1]});
// Check exact match of embedded array.
-checkOperators( [[0,1]], true );
+checkOperators([[0, 1]], true);
t.remove({});
-t.save( {foo:[]} );
+t.save({foo: []});
// Check exact match of embedded empty array.
-checkOperators( [[]], true );
+checkOperators([[]], true);
t.remove({});
-t.save( {foo:'foo'} );
+t.save({foo: 'foo'});
// Check regex match.
-checkOperators( [/o/], true );
+checkOperators([/o/], true);
diff --git a/jstests/core/no_db_created.js b/jstests/core/no_db_created.js
index 3c10dbc5772..3491914d470 100644
--- a/jstests/core/no_db_created.js
+++ b/jstests/core/no_db_created.js
@@ -1,15 +1,14 @@
// checks that operations do not create a database
-(function() {
+(function() {
"use strict";
var adminDB = db.getSiblingDB("admin");
var noDB = function(db) {
var dbName = db.getName();
var dbsRes = assert.commandWorked(adminDB.runCommand("listDatabases"));
dbsRes.databases.forEach(function(e) {
- assert.neq(dbName,
- e.name,
- "Found db which shouldn't exist:" + dbName + "; " + tojson(dbsRes));
+ assert.neq(
+ dbName, e.name, "Found db which shouldn't exist:" + dbName + "; " + tojson(dbsRes));
});
};
var mydb = db.getSiblingDB("neverCreated");
@@ -19,7 +18,7 @@
var coll = mydb.fake;
// force:true is for replset passthroughs
- assert.commandFailed(coll.runCommand("compact", {force:true}));
+ assert.commandFailed(coll.runCommand("compact", {force: true}));
noDB(mydb);
assert.writeOK(coll.insert({}));
mydb.dropDatabase();
@@ -29,7 +28,7 @@
assert.writeOK(coll.insert({}));
mydb.dropDatabase();
- assert.commandFailed(coll.runCommand("collMod", {expireAfterSeconds:1}));
+ assert.commandFailed(coll.runCommand("collMod", {expireAfterSeconds: 1}));
noDB(mydb);
assert.writeOK(coll.insert({}));
mydb.dropDatabase();
diff --git a/jstests/core/not1.js b/jstests/core/not1.js
index 6ff509f8d80..576c5817940 100644
--- a/jstests/core/not1.js
+++ b/jstests/core/not1.js
@@ -2,19 +2,18 @@
t = db.not1;
t.drop();
-
-t.insert({a:1});
-t.insert({a:2});
+t.insert({a: 1});
+t.insert({a: 2});
t.insert({});
-function test( name ){
- assert.eq( 3 , t.find().count() , name + "A" );
- assert.eq( 1 , t.find( { a : 1 } ).count() , name + "B" );
- assert.eq( 2 , t.find( { a : { $ne : 1 } } ).count() , name + "C" ); // SERVER-198
- assert.eq( 1 , t.find({a:{$in:[1]}}).count() , name + "D" );
- assert.eq( 2 , t.find({a:{$nin:[1]}}).count() , name + "E" ); // SERVER-198
+function test(name) {
+ assert.eq(3, t.find().count(), name + "A");
+ assert.eq(1, t.find({a: 1}).count(), name + "B");
+ assert.eq(2, t.find({a: {$ne: 1}}).count(), name + "C"); // SERVER-198
+ assert.eq(1, t.find({a: {$in: [1]}}).count(), name + "D");
+ assert.eq(2, t.find({a: {$nin: [1]}}).count(), name + "E"); // SERVER-198
}
-test( "no index" );
-t.ensureIndex( { a : 1 } );
-test( "with index" );
+test("no index");
+t.ensureIndex({a: 1});
+test("with index");
diff --git a/jstests/core/not2.js b/jstests/core/not2.js
index 43dbfb20e3e..98eb19cee6f 100644
--- a/jstests/core/not2.js
+++ b/jstests/core/not2.js
@@ -1,84 +1,84 @@
t = db.jstests_not2;
t.drop();
-check = function( query, expected, size ) {
- if ( size == null ) {
+check = function(query, expected, size) {
+ if (size == null) {
size = 1;
}
- assert.eq( size, t.find( query ).itcount(), tojson( query ) );
- if ( size > 0 ) {
- assert.eq( expected, t.findOne( query ).i, tojson( query ) );
+ assert.eq(size, t.find(query).itcount(), tojson(query));
+ if (size > 0) {
+ assert.eq(expected, t.findOne(query).i, tojson(query));
}
};
-fail = function( query ) {
+fail = function(query) {
try {
- t.find( query ).itcount();
- assert( false, tojson( query ) );
- } catch ( e ) {
+ t.find(query).itcount();
+ assert(false, tojson(query));
+ } catch (e) {
// expected
}
};
doTest = function() {
-t.remove( {} );
-
-t.save( {i:"a"} );
-t.save( {i:"b"} );
+ t.remove({});
-fail( {i:{$not:"a"}} );
-// SERVER-12735: We currently do not handle double negatives
-// during query canonicalization.
-//fail( {i:{$not:{$not:"a"}}} );
-//fail( {i:{$not:{$not:{$gt:"a"}}}} );
-fail( {i:{$not:{$ref:"foo"}}} );
-fail( {i:{$not:{}}} );
-check( {i:{$gt:"a"}}, "b" );
-check( {i:{$not:{$gt:"a"}}}, "a" );
-check( {i:{$not:{$ne:"a"}}}, "a" );
-check( {i:{$not:{$gte:"b"}}}, "a" );
-check( {i:{$exists:true}}, "a", 2 );
-check( {i:{$not:{$exists:true}}}, "", 0 );
-check( {j:{$not:{$exists:false}}}, "", 0 );
-check( {j:{$not:{$exists:true}}}, "a", 2 );
-check( {i:{$not:{$in:["a"]}}}, "b" );
-check( {i:{$not:{$in:["a", "b"]}}}, "", 0 );
-check( {i:{$not:{$in:["g"]}}}, "a", 2 );
-check( {i:{$not:{$nin:["a"]}}}, "a" );
-check( {i:{$not:/a/}}, "b" );
-check( {i:{$not:/(a|b)/}}, "", 0 );
-check( {i:{$not:/a/,$regex:"a"}}, "", 0 );
-check( {i:{$not:/aa/}}, "a", 2 );
-fail( {i:{$not:{$regex:"a"}}} );
-fail( {i:{$not:{$options:"a"}}} );
-check( {i:{$type:2}}, "a", 2 );
-check( {i:{$not:{$type:1}}}, "a", 2 );
-check( {i:{$not:{$type:2}}}, "", 0 );
+ t.save({i: "a"});
+ t.save({i: "b"});
-t.remove( {} );
-t.save( {i:1} );
-check( {i:{$not:{$mod:[5,1]}}}, null, 0 );
-check( {i:{$mod:[5,2]}}, null, 0 );
-check( {i:{$not:{$mod:[5,2]}}}, 1, 1 );
+ fail({i: {$not: "a"}});
+ // SERVER-12735: We currently do not handle double negatives
+ // during query canonicalization.
+ // fail( {i:{$not:{$not:"a"}}} );
+ // fail( {i:{$not:{$not:{$gt:"a"}}}} );
+ fail({i: {$not: {$ref: "foo"}}});
+ fail({i: {$not: {}}});
+ check({i: {$gt: "a"}}, "b");
+ check({i: {$not: {$gt: "a"}}}, "a");
+ check({i: {$not: {$ne: "a"}}}, "a");
+ check({i: {$not: {$gte: "b"}}}, "a");
+ check({i: {$exists: true}}, "a", 2);
+ check({i: {$not: {$exists: true}}}, "", 0);
+ check({j: {$not: {$exists: false}}}, "", 0);
+ check({j: {$not: {$exists: true}}}, "a", 2);
+ check({i: {$not: {$in: ["a"]}}}, "b");
+ check({i: {$not: {$in: ["a", "b"]}}}, "", 0);
+ check({i: {$not: {$in: ["g"]}}}, "a", 2);
+ check({i: {$not: {$nin: ["a"]}}}, "a");
+ check({i: {$not: /a/}}, "b");
+ check({i: {$not: /(a|b)/}}, "", 0);
+ check({i: {$not: /a/, $regex: "a"}}, "", 0);
+ check({i: {$not: /aa/}}, "a", 2);
+ fail({i: {$not: {$regex: "a"}}});
+ fail({i: {$not: {$options: "a"}}});
+ check({i: {$type: 2}}, "a", 2);
+ check({i: {$not: {$type: 1}}}, "a", 2);
+ check({i: {$not: {$type: 2}}}, "", 0);
-t.remove( {} );
-t.save( {i:["a","b"]} );
-check( {i:{$not:{$size:2}}}, null, 0 );
-check( {i:{$not:{$size:3}}}, ["a","b"] );
-check( {i:{$not:{$gt:"a"}}}, null, 0 );
-check( {i:{$not:{$gt:"c"}}}, ["a","b"] );
-check( {i:{$not:{$all:["a","b"]}}}, null, 0 );
-check( {i:{$not:{$all:["c"]}}}, ["a","b"] );
+ t.remove({});
+ t.save({i: 1});
+ check({i: {$not: {$mod: [5, 1]}}}, null, 0);
+ check({i: {$mod: [5, 2]}}, null, 0);
+ check({i: {$not: {$mod: [5, 2]}}}, 1, 1);
-t.remove( {} );
-t.save( {i:[{j:"a"}]} );
-t.save( {i:[{j:"b"}]} );
-check( {i:{$not:{$elemMatch:{j:"a"}}}}, [{j:"b"}] );
-check( {i:{$not:{$elemMatch:{j:"f"}}}}, [{j:"a"}], 2 );
+ t.remove({});
+ t.save({i: ["a", "b"]});
+ check({i: {$not: {$size: 2}}}, null, 0);
+ check({i: {$not: {$size: 3}}}, ["a", "b"]);
+ check({i: {$not: {$gt: "a"}}}, null, 0);
+ check({i: {$not: {$gt: "c"}}}, ["a", "b"]);
+ check({i: {$not: {$all: ["a", "b"]}}}, null, 0);
+ check({i: {$not: {$all: ["c"]}}}, ["a", "b"]);
+
+ t.remove({});
+ t.save({i: [{j: "a"}]});
+ t.save({i: [{j: "b"}]});
+ check({i: {$not: {$elemMatch: {j: "a"}}}}, [{j: "b"}]);
+ check({i: {$not: {$elemMatch: {j: "f"}}}}, [{j: "a"}], 2);
};
doTest();
-t.ensureIndex( {i:1} );
+t.ensureIndex({i: 1});
doTest();
diff --git a/jstests/core/not3.js b/jstests/core/not3.js
index 9f3014f2c1a..9699f3838d1 100644
--- a/jstests/core/not3.js
+++ b/jstests/core/not3.js
@@ -9,11 +9,15 @@ t.save({_id: 0, arr: [1, 2, 3]});
t.save({_id: 1, arr: [10, 11]});
// Case 1: simple $ne over array field.
-var case1 = {arr: {$ne: 3}};
+var case1 = {
+ arr: {$ne: 3}
+};
assert.eq(1, t.find(case1).itcount(), "Case 1: wrong number of results");
assert.eq(1, t.findOne(case1)._id, "Case 1: wrong _id");
// Case 2: simple $not over array field.
-var case2 = {arr: {$not: {$gt: 6}}};
+var case2 = {
+ arr: {$not: {$gt: 6}}
+};
assert.eq(1, t.find(case2).itcount(), "Case 2: wrong number of results");
assert.eq(0, t.findOne(case2)._id, "Case 2: wrong _id");
diff --git a/jstests/core/notablescan.js b/jstests/core/notablescan.js
index f2ca68d2912..80306c08cf2 100644
--- a/jstests/core/notablescan.js
+++ b/jstests/core/notablescan.js
@@ -4,28 +4,38 @@ t = db.test_notablescan;
t.drop();
try {
- assert.commandWorked( db._adminCommand( { setParameter:1, notablescan:true } ) );
+ assert.commandWorked(db._adminCommand({setParameter: 1, notablescan: true}));
// commented lines are SERVER-2222
- if ( 0 ) { // SERVER-2222
- assert.throws( function() { t.find( {a:1} ).toArray(); } );
+ if (0) { // SERVER-2222
+ assert.throws(function() {
+ t.find({a: 1}).toArray();
+ });
}
- t.save( {a:1} );
- if ( 0 ) { // SERVER-2222
- assert.throws( function() { t.count( {a:1} ); } );
- assert.throws( function() { t.find( {} ).toArray(); } );
+ t.save({a: 1});
+ if (0) { // SERVER-2222
+ assert.throws(function() {
+ t.count({a: 1});
+ });
+ assert.throws(function() {
+ t.find({}).toArray();
+ });
}
- assert.eq( 1, t.find( {} ).itcount() ); // SERVER-274
- assert.throws( function() { t.find( {a:1} ).toArray(); } );
- assert.throws( function() { t.find( {a:1} ).hint( {$natural:1} ).toArray(); } );
- t.ensureIndex( {a:1} );
- assert.eq( 0, t.find( {a:1,b:1} ).itcount() );
- assert.eq( 1, t.find( {a:1,b:null} ).itcount() );
+ assert.eq(1, t.find({}).itcount()); // SERVER-274
+ assert.throws(function() {
+ t.find({a: 1}).toArray();
+ });
+ assert.throws(function() {
+ t.find({a: 1}).hint({$natural: 1}).toArray();
+ });
+ t.ensureIndex({a: 1});
+ assert.eq(0, t.find({a: 1, b: 1}).itcount());
+ assert.eq(1, t.find({a: 1, b: null}).itcount());
// SERVER-4327
- assert.eq( 0, t.find( {a:{$in:[]}} ).itcount() );
- assert.eq( 0, t.find( {a:{$in:[]},b:0} ).itcount() );
+ assert.eq(0, t.find({a: {$in: []}}).itcount());
+ assert.eq(0, t.find({a: {$in: []}, b: 0}).itcount());
} finally {
// We assume notablescan was false before this test started and restore that
// expected value.
- assert.commandWorked( db._adminCommand( { setParameter:1, notablescan:false } ) );
+ assert.commandWorked(db._adminCommand({setParameter: 1, notablescan: false}));
}
diff --git a/jstests/core/ns_length.js b/jstests/core/ns_length.js
index 20825818174..17e5cbc0e25 100644
--- a/jstests/core/ns_length.js
+++ b/jstests/core/ns_length.js
@@ -5,14 +5,14 @@ var maxNsLength = 127;
var maxNsCollectionLength = 120;
var myDb = db.getSiblingDB("ns_length");
-myDb.dropDatabase(); // start empty
+myDb.dropDatabase(); // start empty
function mkStr(length) {
s = "";
while (s.length < length) {
s += "x";
}
- return s;
+ return s;
}
function canMakeCollectionWithName(name) {
@@ -36,7 +36,7 @@ function canMakeCollectionWithName(name) {
}
function canMakeIndexWithName(collection, name) {
- var success = collection.ensureIndex({x:1}, {name: name}).ok;
+ var success = collection.ensureIndex({x: 1}, {name: name}).ok;
if (success) {
assert.commandWorked(collection.dropIndex(name));
}
@@ -80,12 +80,11 @@ for (var i = maxCollectionNameLength - 3; i <= maxCollectionNameLength + 3; i++)
}
// test renaming collections with the destination around the name limit due to long indexe names
-myDb.from.ensureIndex({a:1}, {name: mkStr(100)});
-var indexNsNameOverhead = (myDb.getName() + "..$").length + 100; // index ns name - collection name
+myDb.from.ensureIndex({a: 1}, {name: mkStr(100)});
+var indexNsNameOverhead = (myDb.getName() + "..$").length + 100; // index ns name - collection name
var maxCollectionNameWithIndex = maxNsLength - indexNsNameOverhead;
for (var i = maxCollectionNameWithIndex - 3; i <= maxCollectionNameWithIndex + 3; i++) {
assert.eq(canRenameCollection("from", mkStr(i)),
i <= maxCollectionNameWithIndex,
"index ns name length = " + (indexNsNameOverhead + i));
}
-
diff --git a/jstests/core/null.js b/jstests/core/null.js
index f4bdeb44a4d..b5508c689d9 100644
--- a/jstests/core/null.js
+++ b/jstests/core/null.js
@@ -2,25 +2,25 @@
t = db.null1;
t.drop();
-t.save( { x : 1 } );
-t.save( { x : null } );
+t.save({x: 1});
+t.save({x: null});
-assert.eq( 1 , t.find( { x : null } ).count() , "A" );
-assert.eq( 1 , t.find( { x : { $ne : null } } ).count() , "B" );
+assert.eq(1, t.find({x: null}).count(), "A");
+assert.eq(1, t.find({x: {$ne: null}}).count(), "B");
-t.ensureIndex( { x : 1 } );
+t.ensureIndex({x: 1});
-assert.eq( 1 , t.find( { x : null } ).count() , "C" );
-assert.eq( 1 , t.find( { x : { $ne : null } } ).count() , "D" );
+assert.eq(1, t.find({x: null}).count(), "C");
+assert.eq(1, t.find({x: {$ne: null}}).count(), "D");
// -----
-assert.eq( 2, t.find( { y : null } ).count(), "E" );
+assert.eq(2, t.find({y: null}).count(), "E");
-t.ensureIndex( { y : 1 } );
-assert.eq( 2, t.find( { y : null } ).count(), "E" );
+t.ensureIndex({y: 1});
+assert.eq(2, t.find({y: null}).count(), "E");
-t.dropIndex( { y : 1 } );
+t.dropIndex({y: 1});
-t.ensureIndex( { y : 1 }, { sparse : true } );
-assert.eq( 2, t.find( { y : null } ).count(), "E" );
+t.ensureIndex({y: 1}, {sparse: true});
+assert.eq(2, t.find({y: null}).count(), "E");
diff --git a/jstests/core/null2.js b/jstests/core/null2.js
index 841c26ac22f..0e8173bf874 100644
--- a/jstests/core/null2.js
+++ b/jstests/core/null2.js
@@ -2,44 +2,38 @@
t = db.null2;
t.drop();
-t.insert( { _id : 1, a : [ { b : 5 } ] } );
-t.insert( { _id : 2, a : [ {} ] } );
-t.insert( { _id : 3, a : [] } );
-t.insert( { _id : 4, a : [ {}, { b : 5 } ] } );
-t.insert( { _id : 5, a : [ 5, { b : 5 } ] } );
-
-function doQuery( query ) {
- printjson( query );
- t.find( query ).forEach(
- function(z) {
- print( "\t" + tojson(z) );
- }
- );
- return t.find( query ).count();
+t.insert({_id: 1, a: [{b: 5}]});
+t.insert({_id: 2, a: [{}]});
+t.insert({_id: 3, a: []});
+t.insert({_id: 4, a: [{}, {b: 5}]});
+t.insert({_id: 5, a: [5, {b: 5}]});
+
+function doQuery(query) {
+ printjson(query);
+ t.find(query).forEach(function(z) {
+ print("\t" + tojson(z));
+ });
+ return t.find(query).count();
}
-function getIds( query ) {
+function getIds(query) {
var ids = [];
- t.find( query ).forEach(
- function(z) {
- ids.push( z._id );
- }
- );
+ t.find(query).forEach(function(z) {
+ ids.push(z._id);
+ });
return ids;
}
-theQueries = [ { "a.b" : null }, { "a.b" : { $in : [ null ] } } ];
+theQueries = [{"a.b": null}, {"a.b": {$in: [null]}}];
-for ( var i=0; i < theQueries.length; i++ ) {
- assert.eq( 2, doQuery( theQueries[i] ) );
- assert.eq( [2,4], getIds( theQueries[i] ) );
+for (var i = 0; i < theQueries.length; i++) {
+ assert.eq(2, doQuery(theQueries[i]));
+ assert.eq([2, 4], getIds(theQueries[i]));
}
-t.ensureIndex( { "a.b" : 1 } );
+t.ensureIndex({"a.b": 1});
-for ( var i=0; i < theQueries.length; i++ ) {
- assert.eq( 2, doQuery( theQueries[i] ) );
- assert.eq( [2,4], getIds( theQueries[i] ) );
+for (var i = 0; i < theQueries.length; i++) {
+ assert.eq(2, doQuery(theQueries[i]));
+ assert.eq([2, 4], getIds(theQueries[i]));
}
-
-
diff --git a/jstests/core/null_field_name.js b/jstests/core/null_field_name.js
index 7fa14b0a1bc..f51e2fbff6a 100644
--- a/jstests/core/null_field_name.js
+++ b/jstests/core/null_field_name.js
@@ -1,8 +1,8 @@
// SERVER-10313: Test that null char in field name causes an error when converting to bson
-assert.throws( function () { Object.bsonsize({"a\0":1}); },
- null,
- "null char in field name");
+assert.throws(function() {
+ Object.bsonsize({"a\0": 1});
+}, null, "null char in field name");
-assert.throws( function () { Object.bsonsize({"\0asdf":1}); },
- null,
- "null char in field name"); \ No newline at end of file
+assert.throws(function() {
+ Object.bsonsize({"\0asdf": 1});
+}, null, "null char in field name"); \ No newline at end of file
diff --git a/jstests/core/numberint.js b/jstests/core/numberint.js
index f786a515c29..55c923aea79 100644
--- a/jstests/core/numberint.js
+++ b/jstests/core/numberint.js
@@ -1,77 +1,79 @@
-assert.eq.automsg( "0", "new NumberInt()" );
-
-n = new NumberInt( 4 );
-assert.eq.automsg( "4", "n" );
-assert.eq.automsg( "4", "n.toNumber()" );
-assert.eq.automsg( "8", "n + 4" );
-assert.eq.automsg( "'NumberInt(4)'", "n.toString()" );
-assert.eq.automsg( "'NumberInt(4)'", "tojson( n )" );
+assert.eq.automsg("0", "new NumberInt()");
+
+n = new NumberInt(4);
+assert.eq.automsg("4", "n");
+assert.eq.automsg("4", "n.toNumber()");
+assert.eq.automsg("8", "n + 4");
+assert.eq.automsg("'NumberInt(4)'", "n.toString()");
+assert.eq.automsg("'NumberInt(4)'", "tojson( n )");
a = {};
a.a = n;
-p = tojson( a );
-assert.eq.automsg( "'{ \"a\" : NumberInt(4) }'", "p" );
-
-assert.eq.automsg( "NumberInt(4 )", "eval( tojson( NumberInt( 4 ) ) )" );
-assert.eq.automsg( "a", "eval( tojson( a ) )" );
-
-n = new NumberInt( -4 );
-assert.eq.automsg( "-4", "n" );
-assert.eq.automsg( "-4", "n.toNumber()" );
-assert.eq.automsg( "0", "n + 4" );
-assert.eq.automsg( "'NumberInt(-4)'", "n.toString()" );
-assert.eq.automsg( "'NumberInt(-4)'", "tojson( n )" );
+p = tojson(a);
+assert.eq.automsg("'{ \"a\" : NumberInt(4) }'", "p");
+
+assert.eq.automsg("NumberInt(4 )", "eval( tojson( NumberInt( 4 ) ) )");
+assert.eq.automsg("a", "eval( tojson( a ) )");
+
+n = new NumberInt(-4);
+assert.eq.automsg("-4", "n");
+assert.eq.automsg("-4", "n.toNumber()");
+assert.eq.automsg("0", "n + 4");
+assert.eq.automsg("'NumberInt(-4)'", "n.toString()");
+assert.eq.automsg("'NumberInt(-4)'", "tojson( n )");
a = {};
a.a = n;
-p = tojson( a );
-assert.eq.automsg( "'{ \"a\" : NumberInt(-4) }'", "p" );
+p = tojson(a);
+assert.eq.automsg("'{ \"a\" : NumberInt(-4) }'", "p");
-n = new NumberInt( "11111" );
-assert.eq.automsg( "'NumberInt(11111)'", "n.toString()" );
-assert.eq.automsg( "'NumberInt(11111)'", "tojson( n )" );
+n = new NumberInt("11111");
+assert.eq.automsg("'NumberInt(11111)'", "n.toString()");
+assert.eq.automsg("'NumberInt(11111)'", "tojson( n )");
a = {};
a.a = n;
-p = tojson( a );
-assert.eq.automsg( "'{ \"a\" : NumberInt(11111) }'", "p" );
+p = tojson(a);
+assert.eq.automsg("'{ \"a\" : NumberInt(11111) }'", "p");
-assert.eq.automsg( "NumberInt('11111' )", "eval( tojson( NumberInt( '11111' ) ) )" );
-assert.eq.automsg( "a", "eval( tojson( a ) )" );
+assert.eq.automsg("NumberInt('11111' )", "eval( tojson( NumberInt( '11111' ) ) )");
+assert.eq.automsg("a", "eval( tojson( a ) )");
-n = new NumberInt( "-11111" );
-assert.eq.automsg( "-11111", "n.toNumber()" );
-assert.eq.automsg( "-11107", "n + 4" );
-assert.eq.automsg( "'NumberInt(-11111)'", "n.toString()" );
-assert.eq.automsg( "'NumberInt(-11111)'", "tojson( n )" );
+n = new NumberInt("-11111");
+assert.eq.automsg("-11111", "n.toNumber()");
+assert.eq.automsg("-11107", "n + 4");
+assert.eq.automsg("'NumberInt(-11111)'", "n.toString()");
+assert.eq.automsg("'NumberInt(-11111)'", "tojson( n )");
a = {};
a.a = n;
-p = tojson( a );
-assert.eq.automsg( "'{ \"a\" : NumberInt(-11111) }'", "p" );
+p = tojson(a);
+assert.eq.automsg("'{ \"a\" : NumberInt(-11111) }'", "p");
// parsing: SpiderMonkey evaluates non-numeric strings as 0, which is not bad
-//assert.throws.automsg( function() { new NumberInt( "" ); } );
-//assert.throws.automsg( function() { new NumberInt( "y" ); } );
+// assert.throws.automsg( function() { new NumberInt( "" ); } );
+// assert.throws.automsg( function() { new NumberInt( "y" ); } );
// eq
-assert.eq( { x : 5 } , { x : new NumberInt( "5" ) } );
+assert.eq({x: 5}, {x: new NumberInt("5")});
-assert( 5 == NumberInt( 5 ) , "eq" );
-assert( 5 < NumberInt( 6 ) , "lt" );
-assert( 5 > NumberInt( 4 ) , "lt" );
-assert( NumberInt( 1 ) , "to bool a" );
+assert(5 == NumberInt(5), "eq");
+assert(5 < NumberInt(6), "lt");
+assert(5 > NumberInt(4), "lt");
+assert(NumberInt(1), "to bool a");
// objects are always considered thruthy
-//assert( ! NumberInt( 0 ) , "to bool b" );
+// assert( ! NumberInt( 0 ) , "to bool b" );
// create doc with int value in db
-t = db.getCollection( "numberint" );
+t = db.getCollection("numberint");
t.drop();
-o = { a : NumberInt(42) };
-t.save( o );
+o = {
+ a: NumberInt(42)
+};
+t.save(o);
-assert.eq( 42 , t.findOne().a , "save doc 1" );
-assert.eq( 1 , t.find({a: {$type: 16}}).count() , "save doc 2" );
-assert.eq( 0 , t.find({a: {$type: 1}}).count() , "save doc 3" );
+assert.eq(42, t.findOne().a, "save doc 1");
+assert.eq(1, t.find({a: {$type: 16}}).count(), "save doc 2");
+assert.eq(0, t.find({a: {$type: 1}}).count(), "save doc 3");
// roundtripping
mod = t.findOne({a: 42});
@@ -79,14 +81,12 @@ mod.a += 10;
mod.b = "foo";
delete mod._id;
t.save(mod);
-assert.eq( 2 , t.find({a: {$type: 16}}).count() , "roundtrip 1" );
-assert.eq( 0 , t.find({a: {$type: 1}}).count() , "roundtrip 2" );
-assert.eq( 1 , t.find({a: 52}).count() , "roundtrip 3" );
+assert.eq(2, t.find({a: {$type: 16}}).count(), "roundtrip 1");
+assert.eq(0, t.find({a: {$type: 1}}).count(), "roundtrip 2");
+assert.eq(1, t.find({a: 52}).count(), "roundtrip 3");
// save regular number
t.save({a: 42});
-assert.eq( 2 , t.find({a: {$type: 16}}).count() , "normal 1" );
-assert.eq( 1 , t.find({a: {$type: 1}}).count() , "normal 2" );
-assert.eq( 2 , t.find({a: 42}).count() , "normal 3" );
-
-
+assert.eq(2, t.find({a: {$type: 16}}).count(), "normal 1");
+assert.eq(1, t.find({a: {$type: 1}}).count(), "normal 2");
+assert.eq(2, t.find({a: 42}).count(), "normal 3");
diff --git a/jstests/core/numberlong.js b/jstests/core/numberlong.js
index adda61f8acb..884a301440e 100644
--- a/jstests/core/numberlong.js
+++ b/jstests/core/numberlong.js
@@ -1,93 +1,102 @@
-assert.eq.automsg( "0", "new NumberLong()" );
-
-n = new NumberLong( 4 );
-assert.eq.automsg( "4", "n" );
-assert.eq.automsg( "4", "n.toNumber()" );
-assert.eq.automsg( "8", "n + 4" );
-assert.eq.automsg( "'NumberLong(4)'", "n.toString()" );
-assert.eq.automsg( "'NumberLong(4)'", "tojson( n )" );
+assert.eq.automsg("0", "new NumberLong()");
+
+n = new NumberLong(4);
+assert.eq.automsg("4", "n");
+assert.eq.automsg("4", "n.toNumber()");
+assert.eq.automsg("8", "n + 4");
+assert.eq.automsg("'NumberLong(4)'", "n.toString()");
+assert.eq.automsg("'NumberLong(4)'", "tojson( n )");
a = {};
a.a = n;
-p = tojson( a );
-assert.eq.automsg( "'{ \"a\" : NumberLong(4) }'", "p" );
-
-assert.eq.automsg( "NumberLong(4 )", "eval( tojson( NumberLong( 4 ) ) )" );
-assert.eq.automsg( "a", "eval( tojson( a ) )" );
-
-n = new NumberLong( -4 );
-assert.eq.automsg( "-4", "n" );
-assert.eq.automsg( "-4", "n.toNumber()" );
-assert.eq.automsg( "0", "n + 4" );
-assert.eq.automsg( "'NumberLong(-4)'", "n.toString()" );
-assert.eq.automsg( "'NumberLong(-4)'", "tojson( n )" );
+p = tojson(a);
+assert.eq.automsg("'{ \"a\" : NumberLong(4) }'", "p");
+
+assert.eq.automsg("NumberLong(4 )", "eval( tojson( NumberLong( 4 ) ) )");
+assert.eq.automsg("a", "eval( tojson( a ) )");
+
+n = new NumberLong(-4);
+assert.eq.automsg("-4", "n");
+assert.eq.automsg("-4", "n.toNumber()");
+assert.eq.automsg("0", "n + 4");
+assert.eq.automsg("'NumberLong(-4)'", "n.toString()");
+assert.eq.automsg("'NumberLong(-4)'", "tojson( n )");
a = {};
a.a = n;
-p = tojson( a );
-assert.eq.automsg( "'{ \"a\" : NumberLong(-4) }'", "p" );
+p = tojson(a);
+assert.eq.automsg("'{ \"a\" : NumberLong(-4) }'", "p");
// double
-n = new NumberLong(4294967296); // 2^32
-assert.eq.automsg( "4294967296", "n" );
-assert.eq.automsg( "4294967296", "n.toNumber()" );
-assert.eq.automsg( "4294967295", "n - 1" );
-assert.eq.automsg( "'NumberLong(\"4294967296\")'", "n.toString()" );
-assert.eq.automsg( "'NumberLong(\"4294967296\")'", "tojson( n )" );
-assert.eq.automsg( "4294967296", "n.floatApprox" );
-assert.eq.automsg( "", "n.top" );
-assert.eq.automsg( "", "n.bottom" );
+n = new NumberLong(4294967296); // 2^32
+assert.eq.automsg("4294967296", "n");
+assert.eq.automsg("4294967296", "n.toNumber()");
+assert.eq.automsg("4294967295", "n - 1");
+assert.eq.automsg("'NumberLong(\"4294967296\")'", "n.toString()");
+assert.eq.automsg("'NumberLong(\"4294967296\")'", "tojson( n )");
+assert.eq.automsg("4294967296", "n.floatApprox");
+assert.eq.automsg("", "n.top");
+assert.eq.automsg("", "n.bottom");
a = {};
a.a = n;
-p = tojson( a );
-assert.eq.automsg( "'{ \"a\" : NumberLong(\"4294967296\") }'", "p" );
+p = tojson(a);
+assert.eq.automsg("'{ \"a\" : NumberLong(\"4294967296\") }'", "p");
// too big to fit in double
-n = new NumberLong( "11111111111111111" );
-assert.eq.automsg( "11111111111111112", "n.toNumber()" );
-assert.eq.automsg( "11111111111111116", "n + 4" );
-assert.eq.automsg( "'NumberLong(\"11111111111111111\")'", "n.toString()" );
-assert.eq.automsg( "'NumberLong(\"11111111111111111\")'", "tojson( n )" );
+n = new NumberLong("11111111111111111");
+assert.eq.automsg("11111111111111112", "n.toNumber()");
+assert.eq.automsg("11111111111111116", "n + 4");
+assert.eq.automsg("'NumberLong(\"11111111111111111\")'", "n.toString()");
+assert.eq.automsg("'NumberLong(\"11111111111111111\")'", "tojson( n )");
a = {};
a.a = n;
-p = tojson( a );
-assert.eq.automsg( "'{ \"a\" : NumberLong(\"11111111111111111\") }'", "p" );
-
-assert.eq.automsg( "NumberLong('11111111111111111' )", "eval( tojson( NumberLong( '11111111111111111' ) ) )" );
-assert.eq.automsg( "a", "eval( tojson( a ) )" );
-
-n = new NumberLong( "-11111111111111111" );
-assert.eq.automsg( "-11111111111111112", "n.toNumber()" );
-assert.eq.automsg( "-11111111111111108", "n + 4" );
-assert.eq.automsg( "'NumberLong(\"-11111111111111111\")'", "n.toString()" );
-assert.eq.automsg( "'NumberLong(\"-11111111111111111\")'", "tojson( n )" );
-assert.eq.automsg( "-11111111111111112", "n.floatApprox" );
-assert.eq.automsg( "4292380288", "n.top" );
-assert.eq.automsg( "3643379257", "n.bottom" );
+p = tojson(a);
+assert.eq.automsg("'{ \"a\" : NumberLong(\"11111111111111111\") }'", "p");
+
+assert.eq.automsg("NumberLong('11111111111111111' )",
+ "eval( tojson( NumberLong( '11111111111111111' ) ) )");
+assert.eq.automsg("a", "eval( tojson( a ) )");
+
+n = new NumberLong("-11111111111111111");
+assert.eq.automsg("-11111111111111112", "n.toNumber()");
+assert.eq.automsg("-11111111111111108", "n + 4");
+assert.eq.automsg("'NumberLong(\"-11111111111111111\")'", "n.toString()");
+assert.eq.automsg("'NumberLong(\"-11111111111111111\")'", "tojson( n )");
+assert.eq.automsg("-11111111111111112", "n.floatApprox");
+assert.eq.automsg("4292380288", "n.top");
+assert.eq.automsg("3643379257", "n.bottom");
a = {};
a.a = n;
-p = tojson( a );
-assert.eq.automsg( "'{ \"a\" : NumberLong(\"-11111111111111111\") }'", "p" );
+p = tojson(a);
+assert.eq.automsg("'{ \"a\" : NumberLong(\"-11111111111111111\") }'", "p");
-n = new NumberLong( "9223372036854775807" );
-assert.eq.automsg( "9223372036854775807", "n.floatApprox" );
-assert.eq.automsg( "2147483647", "n.top" );
-assert.eq.automsg( "4294967295", "n.bottom" );
+n = new NumberLong("9223372036854775807");
+assert.eq.automsg("9223372036854775807", "n.floatApprox");
+assert.eq.automsg("2147483647", "n.top");
+assert.eq.automsg("4294967295", "n.bottom");
-n = new NumberLong( 9223372036854775807, 2147483647, 4294967295 );
-assert.eq.automsg( "9223372036854775807", "n.floatApprox" );
-assert.eq.automsg( "2147483647", "n.top" );
-assert.eq.automsg( "4294967295", "n.bottom" );
+n = new NumberLong(9223372036854775807, 2147483647, 4294967295);
+assert.eq.automsg("9223372036854775807", "n.floatApprox");
+assert.eq.automsg("2147483647", "n.top");
+assert.eq.automsg("4294967295", "n.bottom");
// parsing
-assert.throws.automsg( function() { new NumberLong( "" ); } );
-assert.throws.automsg( function() { new NumberLong( "y" ); } );
-assert.throws.automsg( function() { new NumberLong( "11111111111111111111" ); } );
+assert.throws.automsg(function() {
+ new NumberLong("");
+});
+assert.throws.automsg(function() {
+ new NumberLong("y");
+});
+assert.throws.automsg(function() {
+ new NumberLong("11111111111111111111");
+});
// create NumberLong from NumberInt (SERVER-9973)
-assert.doesNotThrow.automsg( function() { new NumberLong(NumberInt(1)); } );
+assert.doesNotThrow.automsg(function() {
+ new NumberLong(NumberInt(1));
+});
// check that creating a NumberLong from a NumberLong bigger than a double doesn't
// get a truncated value (SERVER-9973)
-n = new NumberLong(NumberLong( "11111111111111111" ));
+n = new NumberLong(NumberLong("11111111111111111"));
assert.eq.automsg("n.toString()", "'NumberLong(\"11111111111111111\")'");
//
@@ -112,10 +121,24 @@ assert.eq(left.compare(left), 0);
assert.eq(right.compare(right), 0);
// Bad input to .compare().
-assert.throws(function() { NumberLong("0").compare(); });
-assert.throws(function() { NumberLong("0").compare(null); });
-assert.throws(function() { NumberLong("0").compare(undefined); });
-assert.throws(function() { NumberLong("0").compare(3); });
-assert.throws(function() { NumberLong("0").compare("foo"); });
-assert.throws(function() { NumberLong("0").compare(NumberLong("0"), 3); });
-assert.throws(function() { NumberLong("0").compare({'replSet2Members': 6}); });
+assert.throws(function() {
+ NumberLong("0").compare();
+});
+assert.throws(function() {
+ NumberLong("0").compare(null);
+});
+assert.throws(function() {
+ NumberLong("0").compare(undefined);
+});
+assert.throws(function() {
+ NumberLong("0").compare(3);
+});
+assert.throws(function() {
+ NumberLong("0").compare("foo");
+});
+assert.throws(function() {
+ NumberLong("0").compare(NumberLong("0"), 3);
+});
+assert.throws(function() {
+ NumberLong("0").compare({'replSet2Members': 6});
+});
diff --git a/jstests/core/numberlong2.js b/jstests/core/numberlong2.js
index c730345f307..59ca199259f 100644
--- a/jstests/core/numberlong2.js
+++ b/jstests/core/numberlong2.js
@@ -3,13 +3,13 @@
t = db.jstests_numberlong2;
t.drop();
-t.ensureIndex( {x:1} );
+t.ensureIndex({x: 1});
function chk(longNum) {
t.remove({});
- t.save({ x: longNum });
- assert.eq(longNum, t.find().hint({ x: 1 }).next().x);
- assert.eq(longNum, t.find({}, { _id: 0, x: 1 }).hint({ x: 1 }).next().x);
+ t.save({x: longNum});
+ assert.eq(longNum, t.find().hint({x: 1}).next().x);
+ assert.eq(longNum, t.find({}, {_id: 0, x: 1}).hint({x: 1}).next().x);
}
chk(NumberLong("1123539983311657217"));
@@ -21,8 +21,9 @@ chk(NumberLong("4503599627370497"));
t.remove({});
s = "11235399833116571";
-for( i = 99; i >= 0; --i ) {
- t.save( {x:NumberLong( s + i )} );
+for (i = 99; i >= 0; --i) {
+ t.save({x: NumberLong(s + i)});
}
-assert.eq( t.find().sort( {x:1} ).hint( {$natural:1} ).toArray(), t.find().sort( {x:1} ).hint( {x:1} ).toArray() );
+assert.eq(t.find().sort({x: 1}).hint({$natural: 1}).toArray(),
+ t.find().sort({x: 1}).hint({x: 1}).toArray());
diff --git a/jstests/core/numberlong3.js b/jstests/core/numberlong3.js
index b8a8c9c468e..b62d1865ff4 100644
--- a/jstests/core/numberlong3.js
+++ b/jstests/core/numberlong3.js
@@ -4,22 +4,24 @@ t = db.jstests_numberlong3;
t.drop();
s = "11235399833116571";
-for( i = 10; i >= 0; --i ) {
- n = NumberLong( s + i );
- t.save( {x:n} );
- if ( 0 ) { // SERVER-3719
- t.save( {x:n.floatApprox} );
+for (i = 10; i >= 0; --i) {
+ n = NumberLong(s + i);
+ t.save({x: n});
+ if (0) { // SERVER-3719
+ t.save({x: n.floatApprox});
}
}
-ret = t.find().sort({x:1}).toArray().filter( function( x ) { return typeof( x.x.floatApprox ) != 'undefined'; } );
+ret = t.find().sort({x: 1}).toArray().filter(function(x) {
+ return typeof(x.x.floatApprox) != 'undefined';
+});
-//printjson( ret );
+// printjson( ret );
-for( i = 1; i < ret.length; ++i ) {
- first = ret[i-1].x.toString();
+for (i = 1; i < ret.length; ++i) {
+ first = ret[i - 1].x.toString();
second = ret[i].x.toString();
- if ( first.length == second.length ) {
- assert.lte( ret[i-1].x.toString(), ret[i].x.toString() );
+ if (first.length == second.length) {
+ assert.lte(ret[i - 1].x.toString(), ret[i].x.toString());
}
}
diff --git a/jstests/core/numberlong4.js b/jstests/core/numberlong4.js
index 0924931efaf..f81a9599e21 100644
--- a/jstests/core/numberlong4.js
+++ b/jstests/core/numberlong4.js
@@ -1,21 +1,21 @@
-// Test handling of comparison between long longs and their double approximations in btrees - SERVER-3719.
+// Test handling of comparison between long longs and their double approximations in btrees -
+// SERVER-3719.
t = db.jstests_numberlong4;
t.drop();
-if ( 0 ) { // SERVER-3719
+if (0) { // SERVER-3719
-t.ensureIndex({x:1});
+ t.ensureIndex({x: 1});
-Random.setRandomSeed();
+ Random.setRandomSeed();
-s = "11235399833116571";
-for( i = 0; i < 10000; ++i ) {
- n = NumberLong( s + Random.randInt( 10 ) );
- t.insert( { x: ( Random.randInt( 2 ) ? n : n.floatApprox ) } );
-}
-
-// If this does not return, there is a problem with index structure.
-t.find().hint({x:1}).itcount();
+ s = "11235399833116571";
+ for (i = 0; i < 10000; ++i) {
+ n = NumberLong(s + Random.randInt(10));
+ t.insert({x: (Random.randInt(2) ? n : n.floatApprox)});
+ }
+ // If this does not return, there is a problem with index structure.
+ t.find().hint({x: 1}).itcount();
}
diff --git a/jstests/core/objid1.js b/jstests/core/objid1.js
index f1a9fbe0fbd..d08089c26db 100644
--- a/jstests/core/objid1.js
+++ b/jstests/core/objid1.js
@@ -2,15 +2,17 @@ t = db.objid1;
t.drop();
b = new ObjectId();
-assert( b.str , "A" );
+assert(b.str, "A");
-a = new ObjectId( b.str );
-assert.eq( a.str , b.str , "B" );
+a = new ObjectId(b.str);
+assert.eq(a.str, b.str, "B");
-t.save( { a : a } );
-assert( t.findOne().a.isObjectId , "C" );
-assert.eq( a.str , t.findOne().a.str , "D" );
+t.save({a: a});
+assert(t.findOne().a.isObjectId, "C");
+assert.eq(a.str, t.findOne().a.str, "D");
-x = { a : new ObjectId() };
-eval( " y = " + tojson( x ) );
-assert.eq( x.a.str , y.a.str , "E" );
+x = {
+ a: new ObjectId()
+};
+eval(" y = " + tojson(x));
+assert.eq(x.a.str, y.a.str, "E");
diff --git a/jstests/core/objid2.js b/jstests/core/objid2.js
index 0805dffced7..247843b587b 100644
--- a/jstests/core/objid2.js
+++ b/jstests/core/objid2.js
@@ -1,7 +1,7 @@
t = db.objid2;
t.drop();
-t.save( { _id : 517 , a : "hello" } );
+t.save({_id: 517, a: "hello"});
-assert.eq( t.findOne().a , "hello" );
-assert.eq( t.findOne()._id , 517 );
+assert.eq(t.findOne().a, "hello");
+assert.eq(t.findOne()._id, 517);
diff --git a/jstests/core/objid3.js b/jstests/core/objid3.js
index ddf20d9af27..12d45530e52 100644
--- a/jstests/core/objid3.js
+++ b/jstests/core/objid3.js
@@ -1,9 +1,8 @@
t = db.objid3;
t.drop();
-t.save( { a : "bob" , _id : 517 } );
-for ( var k in t.findOne() ){
- assert.eq( k , "_id" , "keys out of order" );
+t.save({a: "bob", _id: 517});
+for (var k in t.findOne()) {
+ assert.eq(k, "_id", "keys out of order");
break;
}
-
diff --git a/jstests/core/objid4.js b/jstests/core/objid4.js
index 1ae55a558f2..7513e077029 100644
--- a/jstests/core/objid4.js
+++ b/jstests/core/objid4.js
@@ -1,16 +1,19 @@
-
o = new ObjectId();
-assert( o.str );
+assert(o.str);
-a = new ObjectId( o.str );
-assert.eq( o.str , a.str );
-assert.eq( a.str , a.str.toString() );
+a = new ObjectId(o.str);
+assert.eq(o.str, a.str);
+assert.eq(a.str, a.str.toString());
-b = ObjectId( o.str );
-assert.eq( o.str , b.str );
-assert.eq( b.str , b.str.toString() );
+b = ObjectId(o.str);
+assert.eq(o.str, b.str);
+assert.eq(b.str, b.str.toString());
-assert.throws( function(z){ return new ObjectId( "a" ); } );
-assert.throws( function(z){ return new ObjectId( "12345678901234567890123z" ); } );
+assert.throws(function(z) {
+ return new ObjectId("a");
+});
+assert.throws(function(z) {
+ return new ObjectId("12345678901234567890123z");
+});
diff --git a/jstests/core/objid5.js b/jstests/core/objid5.js
index 6189032df6e..5b3917727e9 100644
--- a/jstests/core/objid5.js
+++ b/jstests/core/objid5.js
@@ -2,18 +2,18 @@
t = db.objid5;
t.drop();
-t.save( { _id : 5.5 } );
-assert.eq( 18 , Object.bsonsize( t.findOne() ) , "A" );
+t.save({_id: 5.5});
+assert.eq(18, Object.bsonsize(t.findOne()), "A");
-x = db.runCommand( { features : 1 } );
-y = db.runCommand( { features : 1 , oidReset : 1 } );
+x = db.runCommand({features: 1});
+y = db.runCommand({features: 1, oidReset: 1});
-if( !x.ok )
+if (!x.ok)
print("x: " + tojson(x));
-assert( x.oidMachine , "B1" );
-assert.neq( x.oidMachine , y.oidMachine , "B2" );
-assert.eq( x.oidMachine , y.oidMachineOld , "B3" );
+assert(x.oidMachine, "B1");
+assert.neq(x.oidMachine, y.oidMachine, "B2");
+assert.eq(x.oidMachine, y.oidMachineOld, "B3");
-assert.eq( 18 , Object.bsonsize( { _id : 7.7 } ) , "C1" );
-assert.eq( 0 , Object.bsonsize( null ) , "C2" );
+assert.eq(18, Object.bsonsize({_id: 7.7}), "C1");
+assert.eq(0, Object.bsonsize(null), "C2");
diff --git a/jstests/core/objid7.js b/jstests/core/objid7.js
index 520289c8c37..4c3505f8965 100644
--- a/jstests/core/objid7.js
+++ b/jstests/core/objid7.js
@@ -1,13 +1,12 @@
-a = new ObjectId( "4c1a478603eba73620000000" );
-b = new ObjectId( "4c1a478603eba73620000000" );
+a = new ObjectId("4c1a478603eba73620000000");
+b = new ObjectId("4c1a478603eba73620000000");
c = new ObjectId();
-assert.eq( a.toString() , b.toString() , "A" );
-assert.eq( a.toString() , "ObjectId(\"4c1a478603eba73620000000\")" , "B" );
+assert.eq(a.toString(), b.toString(), "A");
+assert.eq(a.toString(), "ObjectId(\"4c1a478603eba73620000000\")", "B");
-assert( a.equals( b ) , "C" );
-
-assert.neq( a.toString() , c.toString() , "D" );
-assert( ! a.equals( c ) , "E" );
+assert(a.equals(b), "C");
+assert.neq(a.toString(), c.toString(), "D");
+assert(!a.equals(c), "E");
diff --git a/jstests/core/opcounters_active.js b/jstests/core/opcounters_active.js
index 56330cbbebc..c184eab3d1d 100644
--- a/jstests/core/opcounters_active.js
+++ b/jstests/core/opcounters_active.js
@@ -2,51 +2,32 @@
(function() {
"use strict";
- //Test the getActiveCommands function
- //Should remove the listCollections section but keep the rest
+ // Test the getActiveCommands function
+ // Should remove the listCollections section but keep the rest
var testInput = {
- "isMaster" : {
- "failed" : NumberLong(0),
- "total" : NumberLong(3)
- },
- "mapreduce" : {
- "shardedfinish" : {
- "failed" : NumberLong(0),
- "total" : NumberLong(1)
- }
- },
- "listCollections" : {
- "failed" : NumberLong(0),
- "total" : NumberLong(0)
- }
+ "isMaster": {"failed": NumberLong(0), "total": NumberLong(3)},
+ "mapreduce": {"shardedfinish": {"failed": NumberLong(0), "total": NumberLong(1)}},
+ "listCollections": {"failed": NumberLong(0), "total": NumberLong(0)}
};
var testExpected = {
- "isMaster" : {
- "failed" : NumberLong(0),
- "total" : NumberLong(3)
- },
- "mapreduce" : {
- "shardedfinish" : {
- "failed" : NumberLong(0),
- "total" : NumberLong(1)
- }
- }
+ "isMaster": {"failed": NumberLong(0), "total": NumberLong(3)},
+ "mapreduce": {"shardedfinish": {"failed": NumberLong(0), "total": NumberLong(1)}}
};
var testResult = getActiveCommands(testInput);
assert.eq(testResult, testExpected, "getActiveCommands did not return the expected result");
- //Test that the serverstatus helper works
+ // Test that the serverstatus helper works
var result = db.serverStatus();
assert.neq(undefined, result, result);
- //Test that the metrics tree returns
+ // Test that the metrics tree returns
assert.neq(undefined, result.metrics, result);
- //Test that the metrics.commands tree returns
+ // Test that the metrics.commands tree returns
assert.neq(undefined, result.metrics.commands, result);
- //Test that the metrics.commands.serverStatus value is non-zero
+ // Test that the metrics.commands.serverStatus value is non-zero
assert.neq(0, result.metrics.commands.serverStatus.total, result);
- //Test that the command returns successfully when no metrics tree is present
- var result = db.serverStatus({"metrics":0});
+ // Test that the command returns successfully when no metrics tree is present
+ var result = db.serverStatus({"metrics": 0});
assert.eq(undefined, result.metrics, result);
}()); \ No newline at end of file
diff --git a/jstests/core/opcounters_write_cmd.js b/jstests/core/opcounters_write_cmd.js
index 660b82bd501..21e42be16b5 100644
--- a/jstests/core/opcounters_write_cmd.js
+++ b/jstests/core/opcounters_write_cmd.js
@@ -27,35 +27,34 @@ t.drop();
// Single insert, no error.
opCounters = newdb.serverStatus().opcounters;
-res = t.insert({_id:0});
+res = t.insert({_id: 0});
assert.writeOK(res);
assert.eq(opCounters.insert + 1, newdb.serverStatus().opcounters.insert);
// Bulk insert, no error.
opCounters = newdb.serverStatus().opcounters;
-res = t.insert([{_id:1},{_id:2}]);
+res = t.insert([{_id: 1}, {_id: 2}]);
assert.writeOK(res);
assert.eq(opCounters.insert + 2, newdb.serverStatus().opcounters.insert);
-
// Test is not run when in compatibility mode as errors are not counted
-if (t.getMongo().writeMode() != "compatibility"){
+if (t.getMongo().writeMode() != "compatibility") {
// Single insert, with error.
opCounters = newdb.serverStatus().opcounters;
- res = t.insert({_id:0});
+ res = t.insert({_id: 0});
assert.writeError(res);
assert.eq(opCounters.insert + 1, newdb.serverStatus().opcounters.insert);
// Bulk insert, with error, ordered.
opCounters = newdb.serverStatus().opcounters;
- res = t.insert([{_id:3},{_id:3},{_id:4}]);
+ res = t.insert([{_id: 3}, {_id: 3}, {_id: 4}]);
assert.writeError(res);
assert.eq(opCounters.insert + 2, newdb.serverStatus().opcounters.insert);
// Bulk insert, with error, unordered.
var continueOnErrorFlag = 1;
opCounters = newdb.serverStatus().opcounters;
- res = t.insert([{_id:5},{_id:5},{_id:6}], continueOnErrorFlag);
+ res = t.insert([{_id: 5}, {_id: 5}, {_id: 6}], continueOnErrorFlag);
assert.writeError(res);
assert.eq(opCounters.insert + 3, newdb.serverStatus().opcounters.insert);
}
@@ -64,17 +63,17 @@ if (t.getMongo().writeMode() != "compatibility"){
//
t.drop();
-t.insert({_id:0});
+t.insert({_id: 0});
// Update, no error.
opCounters = newdb.serverStatus().opcounters;
-res = t.update({_id:0}, {$set:{a:1}});
+res = t.update({_id: 0}, {$set: {a: 1}});
assert.writeOK(res);
assert.eq(opCounters.update + 1, newdb.serverStatus().opcounters.update);
// Update, with error.
opCounters = newdb.serverStatus().opcounters;
-res = t.update({_id:0}, {$set:{_id:1}});
+res = t.update({_id: 0}, {$set: {_id: 1}});
assert.writeError(res);
assert.eq(opCounters.update + 1, newdb.serverStatus().opcounters.update);
@@ -83,17 +82,17 @@ assert.eq(opCounters.update + 1, newdb.serverStatus().opcounters.update);
//
t.drop();
-t.insert([{_id:0},{_id:1}]);
+t.insert([{_id: 0}, {_id: 1}]);
// Delete, no error.
opCounters = newdb.serverStatus().opcounters;
-res = t.remove({_id:0});
+res = t.remove({_id: 0});
assert.writeOK(res);
assert.eq(opCounters.delete + 1, newdb.serverStatus().opcounters.delete);
// Delete, with error.
opCounters = newdb.serverStatus().opcounters;
-res = t.remove({_id:{$invalidOp:1}});
+res = t.remove({_id: {$invalidOp: 1}});
assert.writeError(res);
assert.eq(opCounters.delete + 1, newdb.serverStatus().opcounters.delete);
@@ -104,7 +103,7 @@ assert.eq(opCounters.delete + 1, newdb.serverStatus().opcounters.delete);
//
t.drop();
-t.insert({_id:0});
+t.insert({_id: 0});
// Query, no error.
opCounters = newdb.serverStatus().opcounters;
@@ -113,7 +112,9 @@ assert.eq(opCounters.query + 1, newdb.serverStatus().opcounters.query);
// Query, with error.
opCounters = newdb.serverStatus().opcounters;
-assert.throws(function() { t.findOne({_id:{$invalidOp:1}}); });
+assert.throws(function() {
+ t.findOne({_id: {$invalidOp: 1}});
+});
assert.eq(opCounters.query + 1, newdb.serverStatus().opcounters.query);
//
@@ -123,11 +124,11 @@ assert.eq(opCounters.query + 1, newdb.serverStatus().opcounters.query);
//
t.drop();
-t.insert([{_id:0},{_id:1},{_id:2}]);
+t.insert([{_id: 0}, {_id: 1}, {_id: 2}]);
// Getmore, no error.
opCounters = newdb.serverStatus().opcounters;
-t.find().batchSize(2).toArray(); // 3 documents, batchSize=2 => 1 query + 1 getmore
+t.find().batchSize(2).toArray(); // 3 documents, batchSize=2 => 1 query + 1 getmore
assert.eq(opCounters.query + 1, newdb.serverStatus().opcounters.query);
assert.eq(opCounters.getmore + 1, newdb.serverStatus().opcounters.getmore);
@@ -142,41 +143,46 @@ assert.eq(opCounters.getmore + 1, newdb.serverStatus().opcounters.getmore);
//
t.drop();
-t.insert({_id:0});
+t.insert({_id: 0});
// Command, recognized, no error.
serverStatus = newdb.runCommand({serverStatus: 1});
opCounters = serverStatus.opcounters;
metricsObj = serverStatus.metrics.commands;
-assert.eq(opCounters.command + 1, newdb.serverStatus().opcounters.command); // "serverStatus" counted
+assert.eq(opCounters.command + 1,
+ newdb.serverStatus().opcounters.command); // "serverStatus" counted
// Count this and the last run of "serverStatus"
assert.eq(metricsObj.serverStatus.total + 2,
- newdb.serverStatus().metrics.commands.serverStatus.total,
- "total ServerStatus command counter did not increment"); // "serverStatus" counted
-assert.eq(metricsObj.serverStatus.failed,
- newdb.serverStatus().metrics.commands.serverStatus.failed,
- "failed ServerStatus command counter incremented!"); // "serverStatus" counted
+ newdb.serverStatus().metrics.commands.serverStatus.total,
+ "total ServerStatus command counter did not increment"); // "serverStatus" counted
+assert.eq(metricsObj.serverStatus.failed,
+ newdb.serverStatus().metrics.commands.serverStatus.failed,
+ "failed ServerStatus command counter incremented!"); // "serverStatus" counted
// Command, recognized, with error.
-countVal = { "total" : 0, "failed" : 0 };
+countVal = {
+ "total": 0,
+ "failed": 0
+};
if (metricsObj.count != null) {
countVal = metricsObj.count;
}
-res = t.runCommand("count", {query:{$invalidOp:1}}); // "count command" counted
+res = t.runCommand("count", {query: {$invalidOp: 1}}); // "count command" counted
assert.eq(0, res.ok);
assert.eq(opCounters.command + 5,
- newdb.serverStatus().opcounters.command); // "serverStatus", "count" counted
+ newdb.serverStatus().opcounters.command); // "serverStatus", "count" counted
-assert.eq(countVal.total +1,
- newdb.serverStatus().metrics.commands.count.total,
- "total count command counter did not incremented"); // "serverStatus", "count" counted
-assert.eq(countVal.failed + 1,
- newdb.serverStatus().metrics.commands.count.failed,
- "failed count command counter did not increment"); // "serverStatus", "count" counted
+assert.eq(countVal.total + 1,
+ newdb.serverStatus().metrics.commands.count.total,
+ "total count command counter did not incremented"); // "serverStatus", "count" counted
+assert.eq(countVal.failed + 1,
+ newdb.serverStatus().metrics.commands.count.failed,
+ "failed count command counter did not increment"); // "serverStatus", "count" counted
// Command, unrecognized.
res = t.runCommand("invalid");
assert.eq(0, res.ok);
-assert.eq(opCounters.command + 8, newdb.serverStatus().opcounters.command); // "serverStatus" counted
+assert.eq(opCounters.command + 8,
+ newdb.serverStatus().opcounters.command); // "serverStatus" counted
assert.eq(null, newdb.serverStatus().metrics.commands.invalid);
assert.eq(metricsObj['<UNKNOWN>'] + 1, newdb.serverStatus().metrics.commands['<UNKNOWN>']);
diff --git a/jstests/core/or1.js b/jstests/core/or1.js
index cc6d7aa37e8..0552524eb4c 100644
--- a/jstests/core/or1.js
+++ b/jstests/core/or1.js
@@ -1,43 +1,60 @@
t = db.jstests_or1;
t.drop();
-checkArrs = function( a, b ) {
- assert.eq( a.length, b.length );
+checkArrs = function(a, b) {
+ assert.eq(a.length, b.length);
aStr = [];
bStr = [];
- a.forEach( function( x ) { aStr.push( tojson( x ) ); } );
- b.forEach( function( x ) { bStr.push( tojson( x ) ); } );
- for ( i = 0; i < aStr.length; ++i ) {
- assert.neq( -1, bStr.indexOf( aStr[ i ] ) );
+ a.forEach(function(x) {
+ aStr.push(tojson(x));
+ });
+ b.forEach(function(x) {
+ bStr.push(tojson(x));
+ });
+ for (i = 0; i < aStr.length; ++i) {
+ assert.neq(-1, bStr.indexOf(aStr[i]));
}
};
doTest = function() {
-t.save( {_id:0,a:1} );
-t.save( {_id:1,a:2} );
-t.save( {_id:2,b:1} );
-t.save( {_id:3,b:2} );
-t.save( {_id:4,a:1,b:1} );
-t.save( {_id:5,a:1,b:2} );
-t.save( {_id:6,a:2,b:1} );
-t.save( {_id:7,a:2,b:2} );
+ t.save({_id: 0, a: 1});
+ t.save({_id: 1, a: 2});
+ t.save({_id: 2, b: 1});
+ t.save({_id: 3, b: 2});
+ t.save({_id: 4, a: 1, b: 1});
+ t.save({_id: 5, a: 1, b: 2});
+ t.save({_id: 6, a: 2, b: 1});
+ t.save({_id: 7, a: 2, b: 2});
-assert.throws( function() { t.find( { $or:"a" } ).toArray(); } );
-assert.throws( function() { t.find( { $or:[] } ).toArray(); } );
-assert.throws( function() { t.find( { $or:[ "a" ] } ).toArray(); } );
+ assert.throws(function() {
+ t.find({$or: "a"}).toArray();
+ });
+ assert.throws(function() {
+ t.find({$or: []}).toArray();
+ });
+ assert.throws(function() {
+ t.find({$or: ["a"]}).toArray();
+ });
-a1 = t.find( { $or: [ { a : 1 } ] } ).toArray();
-checkArrs( [ { _id:0, a:1 }, { _id:4, a:1, b:1 }, { _id:5, a:1, b:2 } ], a1 );
+ a1 = t.find({$or: [{a: 1}]}).toArray();
+ checkArrs([{_id: 0, a: 1}, {_id: 4, a: 1, b: 1}, {_id: 5, a: 1, b: 2}], a1);
-a1b2 = t.find( { $or: [ { a : 1 }, { b : 2 } ] } ).toArray();
-checkArrs( [ { _id:0, a:1 }, { _id:3, b:2 }, { _id:4, a:1, b:1 }, { _id:5, a:1, b:2 }, { _id:7, a:2, b:2 } ], a1b2 );
+ a1b2 = t.find({$or: [{a: 1}, {b: 2}]}).toArray();
+ checkArrs([
+ {_id: 0, a: 1},
+ {_id: 3, b: 2},
+ {_id: 4, a: 1, b: 1},
+ {_id: 5, a: 1, b: 2},
+ {_id: 7, a: 2, b: 2}
+ ],
+ a1b2);
-t.drop();
-t.save( {a:[0,1],b:[0,1]} );
-assert.eq( 1, t.find( { $or: [ { a: {$in:[0,1]}} ] } ).toArray().length );
-assert.eq( 1, t.find( { $or: [ { b: {$in:[0,1]}} ] } ).toArray().length );
-assert.eq( 1, t.find( { $or: [ { a: {$in:[0,1]}}, { b: {$in:[0,1]}} ] } ).toArray().length );
+ t.drop();
+ t.save({a: [0, 1], b: [0, 1]});
+ assert.eq(1, t.find({$or: [{a: {$in: [0, 1]}}]}).toArray().length);
+ assert.eq(1, t.find({$or: [{b: {$in: [0, 1]}}]}).toArray().length);
+ assert.eq(1, t.find({$or: [{a: {$in: [0, 1]}}, {b: {$in: [0, 1]}}]}).toArray().length);
};
@@ -45,13 +62,13 @@ doTest();
// not part of SERVER-1003, but good check for subseq. implementations
t.drop();
-t.ensureIndex( {a:1} );
+t.ensureIndex({a: 1});
doTest();
t.drop();
-t.ensureIndex( {b:1} );
+t.ensureIndex({b: 1});
doTest();
t.drop();
-t.ensureIndex( {a:1,b:1} );
+t.ensureIndex({a: 1, b: 1});
doTest();
diff --git a/jstests/core/or2.js b/jstests/core/or2.js
index 11cfc44ff7f..2624c213fad 100644
--- a/jstests/core/or2.js
+++ b/jstests/core/or2.js
@@ -4,47 +4,58 @@ t.drop();
// Include helpers for analyzing explain output.
load("jstests/libs/analyze_plan.js");
-checkArrs = function( a, b ) {
- assert.eq( a.length, b.length );
+checkArrs = function(a, b) {
+ assert.eq(a.length, b.length);
aStr = [];
bStr = [];
- a.forEach( function( x ) { aStr.push( tojson( x ) ); } );
- b.forEach( function( x ) { bStr.push( tojson( x ) ); } );
- for ( i = 0; i < aStr.length; ++i ) {
- assert.neq( -1, bStr.indexOf( aStr[ i ] ) );
+ a.forEach(function(x) {
+ aStr.push(tojson(x));
+ });
+ b.forEach(function(x) {
+ bStr.push(tojson(x));
+ });
+ for (i = 0; i < aStr.length; ++i) {
+ assert.neq(-1, bStr.indexOf(aStr[i]));
}
};
-doTest = function( index ) {
- if ( index == null ) {
+doTest = function(index) {
+ if (index == null) {
index = true;
}
- t.save( {_id:0,x:0,a:1} );
- t.save( {_id:1,x:0,a:2} );
- t.save( {_id:2,x:0,b:1} );
- t.save( {_id:3,x:0,b:2} );
- t.save( {_id:4,x:1,a:1,b:1} );
- t.save( {_id:5,x:1,a:1,b:2} );
- t.save( {_id:6,x:1,a:2,b:1} );
- t.save( {_id:7,x:1,a:2,b:2} );
+ t.save({_id: 0, x: 0, a: 1});
+ t.save({_id: 1, x: 0, a: 2});
+ t.save({_id: 2, x: 0, b: 1});
+ t.save({_id: 3, x: 0, b: 2});
+ t.save({_id: 4, x: 1, a: 1, b: 1});
+ t.save({_id: 5, x: 1, a: 1, b: 2});
+ t.save({_id: 6, x: 1, a: 2, b: 1});
+ t.save({_id: 7, x: 1, a: 2, b: 2});
- assert.throws( function() { t.find( { x:0,$or:"a" } ).toArray(); } );
- assert.throws( function() { t.find( { x:0,$or:[] } ).toArray(); } );
- assert.throws( function() { t.find( { x:0,$or:[ "a" ] } ).toArray(); } );
+ assert.throws(function() {
+ t.find({x: 0, $or: "a"}).toArray();
+ });
+ assert.throws(function() {
+ t.find({x: 0, $or: []}).toArray();
+ });
+ assert.throws(function() {
+ t.find({x: 0, $or: ["a"]}).toArray();
+ });
- a1 = t.find( { x:0, $or: [ { a : 1 } ] } ).toArray();
- checkArrs( [ { _id:0, x:0, a:1 } ], a1 );
- if ( index ) {
- var explain = t.find( { x:0,$or: [ { a : 1 } ] } ).explain();
- assert( isIxscan(explain.queryPlanner.winningPlan) );
+ a1 = t.find({x: 0, $or: [{a: 1}]}).toArray();
+ checkArrs([{_id: 0, x: 0, a: 1}], a1);
+ if (index) {
+ var explain = t.find({x: 0, $or: [{a: 1}]}).explain();
+ assert(isIxscan(explain.queryPlanner.winningPlan));
}
- a1b2 = t.find( { x:1, $or: [ { a : 1 }, { b : 2 } ] } ).toArray();
- checkArrs( [ { _id:4, x:1, a:1, b:1 }, { _id:5, x:1, a:1, b:2 }, { _id:7, x:1, a:2, b:2 } ], a1b2 );
- if ( index ) {
- var explain = t.find( { x:0,$or: [ { a : 1 } ] } ).explain();
- assert( isIxscan(explain.queryPlanner.winningPlan) );
+ a1b2 = t.find({x: 1, $or: [{a: 1}, {b: 2}]}).toArray();
+ checkArrs([{_id: 4, x: 1, a: 1, b: 1}, {_id: 5, x: 1, a: 1, b: 2}, {_id: 7, x: 1, a: 2, b: 2}],
+ a1b2);
+ if (index) {
+ var explain = t.find({x: 0, $or: [{a: 1}]}).explain();
+ assert(isIxscan(explain.queryPlanner.winningPlan));
}
/*
@@ -56,19 +67,19 @@ doTest = function( index ) {
*/
};
-doTest( false );
+doTest(false);
-t.ensureIndex( { x:1 } );
+t.ensureIndex({x: 1});
doTest();
t.drop();
-t.ensureIndex( { x:1,a:1 } );
+t.ensureIndex({x: 1, a: 1});
doTest();
t.drop();
-t.ensureIndex( {x:1,b:1} );
+t.ensureIndex({x: 1, b: 1});
doTest();
t.drop();
-t.ensureIndex( {x:1,a:1,b:1} );
+t.ensureIndex({x: 1, a: 1, b: 1});
doTest();
diff --git a/jstests/core/or3.js b/jstests/core/or3.js
index 1ca0ac29d80..50434965bae 100644
--- a/jstests/core/or3.js
+++ b/jstests/core/or3.js
@@ -4,63 +4,73 @@ t.drop();
// Include helpers for analyzing explain output.
load("jstests/libs/analyze_plan.js");
-checkArrs = function( a, b ) {
- assert.eq( a.length, b.length );
+checkArrs = function(a, b) {
+ assert.eq(a.length, b.length);
aStr = [];
bStr = [];
- a.forEach( function( x ) { aStr.push( tojson( x ) ); } );
- b.forEach( function( x ) { bStr.push( tojson( x ) ); } );
- for ( i = 0; i < aStr.length; ++i ) {
- assert.neq( -1, bStr.indexOf( aStr[ i ] ) );
+ a.forEach(function(x) {
+ aStr.push(tojson(x));
+ });
+ b.forEach(function(x) {
+ bStr.push(tojson(x));
+ });
+ for (i = 0; i < aStr.length; ++i) {
+ assert.neq(-1, bStr.indexOf(aStr[i]));
}
};
-doTest = function( index ) {
- if ( index == null ) {
+doTest = function(index) {
+ if (index == null) {
index = true;
}
-
- t.save( {_id:0,x:0,a:1} );
- t.save( {_id:1,x:0,a:2} );
- t.save( {_id:2,x:0,b:1} );
- t.save( {_id:3,x:0,b:2} );
- t.save( {_id:4,x:1,a:1,b:1} );
- t.save( {_id:5,x:1,a:1,b:2} );
- t.save( {_id:6,x:1,a:2,b:1} );
- t.save( {_id:7,x:1,a:2,b:2} );
-
- assert.throws( function() { t.find( { x:0,$nor:"a" } ).toArray(); } );
- assert.throws( function() { t.find( { x:0,$nor:[] } ).toArray(); } );
- assert.throws( function() { t.find( { x:0,$nor:[ "a" ] } ).toArray(); } );
- an1 = t.find( { $nor: [ { a : 1 } ] } ).toArray();
- checkArrs( t.find( {a:{$ne:1}} ).toArray(), an1 );
-
- an1bn2 = t.find( { x:1, $nor: [ { a : 1 }, { b : 2 } ] } ).toArray();
- checkArrs( [ { _id:6, x:1, a:2, b:1 } ], an1bn2 );
- checkArrs( t.find( { x:1, a:{$ne:1}, b:{$ne:2} } ).toArray(), an1bn2 );
- if ( index ) {
- var explain = t.find( { x:1, $nor: [ { a : 1 }, { b : 2 } ] } ).explain();
- assert( isIxscan(explain.queryPlanner.winningPlan) );
+ t.save({_id: 0, x: 0, a: 1});
+ t.save({_id: 1, x: 0, a: 2});
+ t.save({_id: 2, x: 0, b: 1});
+ t.save({_id: 3, x: 0, b: 2});
+ t.save({_id: 4, x: 1, a: 1, b: 1});
+ t.save({_id: 5, x: 1, a: 1, b: 2});
+ t.save({_id: 6, x: 1, a: 2, b: 1});
+ t.save({_id: 7, x: 1, a: 2, b: 2});
+
+ assert.throws(function() {
+ t.find({x: 0, $nor: "a"}).toArray();
+ });
+ assert.throws(function() {
+ t.find({x: 0, $nor: []}).toArray();
+ });
+ assert.throws(function() {
+ t.find({x: 0, $nor: ["a"]}).toArray();
+ });
+
+ an1 = t.find({$nor: [{a: 1}]}).toArray();
+ checkArrs(t.find({a: {$ne: 1}}).toArray(), an1);
+
+ an1bn2 = t.find({x: 1, $nor: [{a: 1}, {b: 2}]}).toArray();
+ checkArrs([{_id: 6, x: 1, a: 2, b: 1}], an1bn2);
+ checkArrs(t.find({x: 1, a: {$ne: 1}, b: {$ne: 2}}).toArray(), an1bn2);
+ if (index) {
+ var explain = t.find({x: 1, $nor: [{a: 1}, {b: 2}]}).explain();
+ assert(isIxscan(explain.queryPlanner.winningPlan));
}
-
- an1b2 = t.find( { $nor: [ { a : 1 } ], $or: [ { b : 2 } ] } ).toArray();
- checkArrs( t.find( {a:{$ne:1},b:2} ).toArray(), an1b2 );
+
+ an1b2 = t.find({$nor: [{a: 1}], $or: [{b: 2}]}).toArray();
+ checkArrs(t.find({a: {$ne: 1}, b: 2}).toArray(), an1b2);
};
-doTest( false );
+doTest(false);
-t.ensureIndex( { x:1 } );
+t.ensureIndex({x: 1});
doTest();
t.drop();
-t.ensureIndex( { x:1,a:1 } );
+t.ensureIndex({x: 1, a: 1});
doTest();
t.drop();
-t.ensureIndex( {x:1,b:1} );
+t.ensureIndex({x: 1, b: 1});
doTest();
t.drop();
-t.ensureIndex( {x:1,a:1,b:1} );
+t.ensureIndex({x: 1, a: 1, b: 1});
doTest();
diff --git a/jstests/core/or4.js b/jstests/core/or4.js
index a02150ff1d0..6053295a7d1 100644
--- a/jstests/core/or4.js
+++ b/jstests/core/or4.js
@@ -1,82 +1,90 @@
t = db.jstests_or4;
t.drop();
-checkArrs = function( a, b ) {
+checkArrs = function(a, b) {
m = "[" + a + "] != [" + b + "]";
- a = eval( a );
- b = eval( b );
- assert.eq( a.length, b.length, m );
+ a = eval(a);
+ b = eval(b);
+ assert.eq(a.length, b.length, m);
aStr = [];
bStr = [];
- a.forEach( function( x ) { aStr.push( tojson( x ) ); } );
- b.forEach( function( x ) { bStr.push( tojson( x ) ); } );
- for ( i = 0; i < aStr.length; ++i ) {
- assert( -1 != bStr.indexOf( aStr[ i ] ), m );
+ a.forEach(function(x) {
+ aStr.push(tojson(x));
+ });
+ b.forEach(function(x) {
+ bStr.push(tojson(x));
+ });
+ for (i = 0; i < aStr.length; ++i) {
+ assert(-1 != bStr.indexOf(aStr[i]), m);
}
};
-t.ensureIndex( {a:1} );
-t.ensureIndex( {b:1} );
+t.ensureIndex({a: 1});
+t.ensureIndex({b: 1});
-t.save( {a:2} );
-t.save( {b:3} );
-t.save( {b:3} );
-t.save( {a:2,b:3} );
+t.save({a: 2});
+t.save({b: 3});
+t.save({b: 3});
+t.save({a: 2, b: 3});
-assert.eq.automsg( "4", "t.count( {$or:[{a:2},{b:3}]} )" );
-assert.eq.automsg( "2", "t.count( {$or:[{a:2},{a:2}]} )" );
+assert.eq.automsg("4", "t.count( {$or:[{a:2},{b:3}]} )");
+assert.eq.automsg("2", "t.count( {$or:[{a:2},{a:2}]} )");
-assert.eq.automsg( "2", "t.find( {} ).skip( 2 ).count( true )" );
-assert.eq.automsg( "2", "t.find( {$or:[{a:2},{b:3}]} ).skip( 2 ).count( true )" );
-assert.eq.automsg( "1", "t.find( {$or:[{a:2},{b:3}]} ).skip( 3 ).count( true )" );
+assert.eq.automsg("2", "t.find( {} ).skip( 2 ).count( true )");
+assert.eq.automsg("2", "t.find( {$or:[{a:2},{b:3}]} ).skip( 2 ).count( true )");
+assert.eq.automsg("1", "t.find( {$or:[{a:2},{b:3}]} ).skip( 3 ).count( true )");
-assert.eq.automsg( "2", "t.find( {} ).limit( 2 ).count( true )" );
-assert.eq.automsg( "1", "t.find( {$or:[{a:2},{b:3}]} ).limit( 1 ).count( true )" );
-assert.eq.automsg( "2", "t.find( {$or:[{a:2},{b:3}]} ).limit( 2 ).count( true )" );
-assert.eq.automsg( "3", "t.find( {$or:[{a:2},{b:3}]} ).limit( 3 ).count( true )" );
-assert.eq.automsg( "4", "t.find( {$or:[{a:2},{b:3}]} ).limit( 4 ).count( true )" );
+assert.eq.automsg("2", "t.find( {} ).limit( 2 ).count( true )");
+assert.eq.automsg("1", "t.find( {$or:[{a:2},{b:3}]} ).limit( 1 ).count( true )");
+assert.eq.automsg("2", "t.find( {$or:[{a:2},{b:3}]} ).limit( 2 ).count( true )");
+assert.eq.automsg("3", "t.find( {$or:[{a:2},{b:3}]} ).limit( 3 ).count( true )");
+assert.eq.automsg("4", "t.find( {$or:[{a:2},{b:3}]} ).limit( 4 ).count( true )");
-t.remove({ $or: [{ a: 2 }, { b: 3}] });
-assert.eq.automsg( "0", "t.count()" );
+t.remove({$or: [{a: 2}, {b: 3}]});
+assert.eq.automsg("0", "t.count()");
-t.save( {b:3} );
-t.remove({ $or: [{ a: 2 }, { b: 3}] });
-assert.eq.automsg( "0", "t.count()" );
+t.save({b: 3});
+t.remove({$or: [{a: 2}, {b: 3}]});
+assert.eq.automsg("0", "t.count()");
-t.save( {a:2} );
-t.save( {b:3} );
-t.save( {a:2,b:3} );
+t.save({a: 2});
+t.save({b: 3});
+t.save({a: 2, b: 3});
-t.update( {$or:[{a:2},{b:3}]}, {$set:{z:1}}, false, true );
-assert.eq.automsg( "3", "t.count( {z:1} )" );
+t.update({$or: [{a: 2}, {b: 3}]}, {$set: {z: 1}}, false, true);
+assert.eq.automsg("3", "t.count( {z:1} )");
-assert.eq.automsg( "3", "t.find( {$or:[{a:2},{b:3}]} ).toArray().length" );
-checkArrs( "t.find().toArray()", "t.find( {$or:[{a:2},{b:3}]} ).toArray()" );
-assert.eq.automsg( "2", "t.find( {$or:[{a:2},{b:3}]} ).skip(1).toArray().length" );
+assert.eq.automsg("3", "t.find( {$or:[{a:2},{b:3}]} ).toArray().length");
+checkArrs("t.find().toArray()", "t.find( {$or:[{a:2},{b:3}]} ).toArray()");
+assert.eq.automsg("2", "t.find( {$or:[{a:2},{b:3}]} ).skip(1).toArray().length");
-assert.eq.automsg( "3", "t.find( {$or:[{a:2},{b:3}]} ).batchSize( 2 ).toArray().length" );
+assert.eq.automsg("3", "t.find( {$or:[{a:2},{b:3}]} ).batchSize( 2 ).toArray().length");
-t.save( {a:1} );
-t.save( {b:4} );
-t.save( {a:2} );
+t.save({a: 1});
+t.save({b: 4});
+t.save({a: 2});
-assert.eq.automsg( "4", "t.find( {$or:[{a:2},{b:3}]} ).batchSize( 2 ).toArray().length" );
-assert.eq.automsg( "4", "t.find( {$or:[{a:2},{b:3}]} ).snapshot().toArray().length" );
+assert.eq.automsg("4", "t.find( {$or:[{a:2},{b:3}]} ).batchSize( 2 ).toArray().length");
+assert.eq.automsg("4", "t.find( {$or:[{a:2},{b:3}]} ).snapshot().toArray().length");
-t.save( {a:1,b:3} );
-assert.eq.automsg( "4", "t.find( {$or:[{a:2},{b:3}]} ).limit(4).toArray().length" );
+t.save({a: 1, b: 3});
+assert.eq.automsg("4", "t.find( {$or:[{a:2},{b:3}]} ).limit(4).toArray().length");
-assert.eq.automsg( "[1,2]", "Array.sort( t.distinct( 'a', {$or:[{a:2},{b:3}]} ) )" );
+assert.eq.automsg("[1,2]", "Array.sort( t.distinct( 'a', {$or:[{a:2},{b:3}]} ) )");
-assert.eq.automsg( "[{a:2},{a:null},{a:1}]", "t.group( {key:{a:1}, cond:{$or:[{a:2},{b:3}]}, reduce:function( x, y ) { }, initial:{} } )" );
-assert.eq.automsg( "5", "t.mapReduce( function() { emit( 'a', this.a ); }, function( key, vals ) { return vals.length; }, {out:{inline:true},query:{$or:[{a:2},{b:3}]}} ).counts.input" );
+assert.eq.automsg(
+ "[{a:2},{a:null},{a:1}]",
+ "t.group( {key:{a:1}, cond:{$or:[{a:2},{b:3}]}, reduce:function( x, y ) { }, initial:{} } )");
+assert.eq.automsg(
+ "5",
+ "t.mapReduce( function() { emit( 'a', this.a ); }, function( key, vals ) { return vals.length; }, {out:{inline:true},query:{$or:[{a:2},{b:3}]}} ).counts.input");
-t.remove( {} );
+t.remove({});
-t.save( {a:[1,2]} );
-assert.eq.automsg( "1", "t.find( {$or:[{a:1},{a:2}]} ).toArray().length" );
-assert.eq.automsg( "1", "t.count( {$or:[{a:1},{a:2}]} )" );
-assert.eq.automsg( "1", "t.find( {$or:[{a:2},{a:1}]} ).toArray().length" );
-assert.eq.automsg( "1", "t.count( {$or:[{a:2},{a:1}]} )" );
+t.save({a: [1, 2]});
+assert.eq.automsg("1", "t.find( {$or:[{a:1},{a:2}]} ).toArray().length");
+assert.eq.automsg("1", "t.count( {$or:[{a:1},{a:2}]} )");
+assert.eq.automsg("1", "t.find( {$or:[{a:2},{a:1}]} ).toArray().length");
+assert.eq.automsg("1", "t.count( {$or:[{a:2},{a:1}]} )");
t.remove({});
diff --git a/jstests/core/or5.js b/jstests/core/or5.js
index 8d9d8802860..dd32c1c3c50 100644
--- a/jstests/core/or5.js
+++ b/jstests/core/or5.js
@@ -1,63 +1,65 @@
t = db.jstests_or5;
t.drop();
-t.ensureIndex( {a:1} );
-t.ensureIndex( {b:1} );
-
-t.ensureIndex( {c:1} );
-
-t.save( {a:2} );
-t.save( {b:3} );
-t.save( {c:4} );
-t.save( {a:2,b:3} );
-t.save( {a:2,c:4} );
-t.save( {b:3,c:4} );
-t.save( {a:2,b:3,c:4} );
-
-assert.eq.automsg( "7", "t.count( {$or:[{a:2},{b:3},{c:4}]} )" );
-assert.eq.automsg( "6", "t.count( {$or:[{a:6},{b:3},{c:4}]} )" );
-assert.eq.automsg( "6", "t.count( {$or:[{a:2},{b:6},{c:4}]} )" );
-assert.eq.automsg( "6", "t.count( {$or:[{a:2},{b:3},{c:6}]} )" );
-
-assert.eq.automsg( "7", "t.find( {$or:[{a:2},{b:3},{c:4}]} ).toArray().length" );
-assert.eq.automsg( "6", "t.find( {$or:[{a:6},{b:3},{c:4}]} ).toArray().length" );
-assert.eq.automsg( "6", "t.find( {$or:[{a:2},{b:6},{c:4}]} ).toArray().length" );
-assert.eq.automsg( "6", "t.find( {$or:[{a:2},{b:3},{c:6}]} ).toArray().length" );
-
-for( i = 2; i <= 7; ++i ) {
-assert.eq.automsg( "7", "t.find( {$or:[{a:2},{b:3},{c:4}]} ).batchSize( i ).toArray().length" );
-assert.eq.automsg( "6", "t.find( {$or:[{a:6},{b:3},{c:4}]} ).batchSize( i ).toArray().length" );
-assert.eq.automsg( "6", "t.find( {$or:[{a:2},{b:6},{c:4}]} ).batchSize( i ).toArray().length" );
-assert.eq.automsg( "6", "t.find( {$or:[{a:2},{b:3},{c:6}]} ).batchSize( i ).toArray().length" );
+t.ensureIndex({a: 1});
+t.ensureIndex({b: 1});
+
+t.ensureIndex({c: 1});
+
+t.save({a: 2});
+t.save({b: 3});
+t.save({c: 4});
+t.save({a: 2, b: 3});
+t.save({a: 2, c: 4});
+t.save({b: 3, c: 4});
+t.save({a: 2, b: 3, c: 4});
+
+assert.eq.automsg("7", "t.count( {$or:[{a:2},{b:3},{c:4}]} )");
+assert.eq.automsg("6", "t.count( {$or:[{a:6},{b:3},{c:4}]} )");
+assert.eq.automsg("6", "t.count( {$or:[{a:2},{b:6},{c:4}]} )");
+assert.eq.automsg("6", "t.count( {$or:[{a:2},{b:3},{c:6}]} )");
+
+assert.eq.automsg("7", "t.find( {$or:[{a:2},{b:3},{c:4}]} ).toArray().length");
+assert.eq.automsg("6", "t.find( {$or:[{a:6},{b:3},{c:4}]} ).toArray().length");
+assert.eq.automsg("6", "t.find( {$or:[{a:2},{b:6},{c:4}]} ).toArray().length");
+assert.eq.automsg("6", "t.find( {$or:[{a:2},{b:3},{c:6}]} ).toArray().length");
+
+for (i = 2; i <= 7; ++i) {
+ assert.eq.automsg("7", "t.find( {$or:[{a:2},{b:3},{c:4}]} ).batchSize( i ).toArray().length");
+ assert.eq.automsg("6", "t.find( {$or:[{a:6},{b:3},{c:4}]} ).batchSize( i ).toArray().length");
+ assert.eq.automsg("6", "t.find( {$or:[{a:2},{b:6},{c:4}]} ).batchSize( i ).toArray().length");
+ assert.eq.automsg("6", "t.find( {$or:[{a:2},{b:3},{c:6}]} ).batchSize( i ).toArray().length");
}
-t.ensureIndex( {z:"2d"} );
+t.ensureIndex({z: "2d"});
-assert.throws.automsg( function() { return t.find( {$or:[{z:{$near:[50,50]}},{a:2}]} ).toArray(); } );
+assert.throws.automsg(function() {
+ return t.find({$or: [{z: {$near: [50, 50]}}, {a: 2}]}).toArray();
+});
function reset() {
t.drop();
-
- t.ensureIndex( {a:1} );
- t.ensureIndex( {b:1} );
- t.ensureIndex( {c:1} );
-
- t.save( {a:2} );
- t.save( {a:2} );
- t.save( {b:3} );
- t.save( {b:3} );
- t.save( {c:4} );
- t.save( {c:4} );
+
+ t.ensureIndex({a: 1});
+ t.ensureIndex({b: 1});
+ t.ensureIndex({c: 1});
+
+ t.save({a: 2});
+ t.save({a: 2});
+ t.save({b: 3});
+ t.save({b: 3});
+ t.save({c: 4});
+ t.save({c: 4});
}
reset();
-assert.eq.automsg( "6", "t.find( {$or:[{a:2},{b:3},{c:4}]} ).batchSize( 1 ).itcount()" );
-assert.eq.automsg( "6", "t.find( {$or:[{a:2},{b:3},{c:4}]} ).batchSize( 2 ).itcount()" );
+assert.eq.automsg("6", "t.find( {$or:[{a:2},{b:3},{c:4}]} ).batchSize( 1 ).itcount()");
+assert.eq.automsg("6", "t.find( {$or:[{a:2},{b:3},{c:4}]} ).batchSize( 2 ).itcount()");
t.drop();
-t.save( {a:[1,2]} );
-assert.eq.automsg( "1", "t.find( {$or:[{a:[1,2]}]} ).itcount()" );
-assert.eq.automsg( "1", "t.find( {$or:[{a:{$all:[1,2]}}]} ).itcount()" );
-assert.eq.automsg( "0", "t.find( {$or:[{a:{$all:[1,3]}}]} ).itcount()" );
+t.save({a: [1, 2]});
+assert.eq.automsg("1", "t.find( {$or:[{a:[1,2]}]} ).itcount()");
+assert.eq.automsg("1", "t.find( {$or:[{a:{$all:[1,2]}}]} ).itcount()");
+assert.eq.automsg("0", "t.find( {$or:[{a:{$all:[1,3]}}]} ).itcount()");
diff --git a/jstests/core/or7.js b/jstests/core/or7.js
index 916158047d8..49fd936d7eb 100644
--- a/jstests/core/or7.js
+++ b/jstests/core/or7.js
@@ -1,41 +1,41 @@
t = db.jstests_or7;
t.drop();
-t.ensureIndex( {a:1} );
-t.save( {a:2} );
+t.ensureIndex({a: 1});
+t.save({a: 2});
-assert.eq.automsg( "1", "t.count( {$or:[{a:{$in:[1,3]}},{a:2}]} )" );
+assert.eq.automsg("1", "t.count( {$or:[{a:{$in:[1,3]}},{a:2}]} )");
-//SERVER-1201 ...
+// SERVER-1201 ...
t.remove({});
-t.save( {a:"aa"} );
-t.save( {a:"ab"} );
-t.save( {a:"ad"} );
+t.save({a: "aa"});
+t.save({a: "ab"});
+t.save({a: "ad"});
-assert.eq.automsg( "3", "t.count( {$or:[{a:/^ab/},{a:/^a/}]} )" );
+assert.eq.automsg("3", "t.count( {$or:[{a:/^ab/},{a:/^a/}]} )");
t.remove({});
-t.save( {a:"aa"} );
-t.save( {a:"ad"} );
+t.save({a: "aa"});
+t.save({a: "ad"});
-assert.eq.automsg( "2", "t.count( {$or:[{a:/^ab/},{a:/^a/}]} )" );
+assert.eq.automsg("2", "t.count( {$or:[{a:/^ab/},{a:/^a/}]} )");
t.remove({});
-t.save( {a:"aa"} );
-t.save( {a:"ac"} );
+t.save({a: "aa"});
+t.save({a: "ac"});
-assert.eq.automsg( "2", "t.count( {$or:[{a:/^ab/},{a:/^a/}]} )" );
+assert.eq.automsg("2", "t.count( {$or:[{a:/^ab/},{a:/^a/}]} )");
-assert.eq.automsg( "2", "t.count( {$or:[{a:/^ab/},{a:/^a/}]} )" );
+assert.eq.automsg("2", "t.count( {$or:[{a:/^ab/},{a:/^a/}]} )");
-t.save( {a:"ab"} );
-assert.eq.automsg( "3", "t.count( {$or:[{a:{$in:[/^ab/],$gte:'abc'}},{a:/^a/}]} )" );
+t.save({a: "ab"});
+assert.eq.automsg("3", "t.count( {$or:[{a:{$in:[/^ab/],$gte:'abc'}},{a:/^a/}]} )");
t.remove({});
-t.save( {a:"a"} );
-t.save( {a:"b"} );
-assert.eq.automsg( "2", "t.count( {$or:[{a:{$gt:'a',$lt:'b'}},{a:{$gte:'a',$lte:'b'}}]} )" );
+t.save({a: "a"});
+t.save({a: "b"});
+assert.eq.automsg("2", "t.count( {$or:[{a:{$gt:'a',$lt:'b'}},{a:{$gte:'a',$lte:'b'}}]} )");
diff --git a/jstests/core/or8.js b/jstests/core/or8.js
index 40d5b38cede..c778238b96e 100644
--- a/jstests/core/or8.js
+++ b/jstests/core/or8.js
@@ -3,26 +3,26 @@
t = db.jstests_or8;
t.drop();
-t.find({ "$or": [ { "PropA": { "$lt": "b" } }, { "PropA": { "$lt": "b", "$gt": "a" } } ] }).toArray();
+t.find({"$or": [{"PropA": {"$lt": "b"}}, {"PropA": {"$lt": "b", "$gt": "a"}}]}).toArray();
// empty $in
-t.save( {a:1} );
-t.save( {a:3} );
-t.ensureIndex( {a:1} );
-t.find({ $or: [ { a: {$in:[]} } ] } ).toArray();
-assert.eq.automsg( "2", "t.find({ $or: [ { a: {$in:[]} }, {a:1}, {a:3} ] } ).toArray().length" );
-assert.eq.automsg( "2", "t.find({ $or: [ {a:1}, { a: {$in:[]} }, {a:3} ] } ).toArray().length" );
-assert.eq.automsg( "2", "t.find({ $or: [ {a:1}, {a:3}, { a: {$in:[]} } ] } ).toArray().length" );
+t.save({a: 1});
+t.save({a: 3});
+t.ensureIndex({a: 1});
+t.find({$or: [{a: {$in: []}}]}).toArray();
+assert.eq.automsg("2", "t.find({ $or: [ { a: {$in:[]} }, {a:1}, {a:3} ] } ).toArray().length");
+assert.eq.automsg("2", "t.find({ $or: [ {a:1}, { a: {$in:[]} }, {a:3} ] } ).toArray().length");
+assert.eq.automsg("2", "t.find({ $or: [ {a:1}, {a:3}, { a: {$in:[]} } ] } ).toArray().length");
// nested negate field
t.drop();
-t.save( {a:{b:1,c:1}} );
-t.ensureIndex( { 'a.b':1 } );
-t.ensureIndex( { 'a.c':1 } );
-assert.eq( 1, t.find( {$or: [ { 'a.b':1 }, { 'a.c':1 } ] } ).itcount() );
+t.save({a: {b: 1, c: 1}});
+t.ensureIndex({'a.b': 1});
+t.ensureIndex({'a.c': 1});
+assert.eq(1, t.find({$or: [{'a.b': 1}, {'a.c': 1}]}).itcount());
t.remove({});
-t.save( {a:[{b:1,c:1},{b:2,c:1}]} );
-assert.eq( 1, t.find( {$or: [ { 'a.b':2 }, { 'a.c':1 } ] } ).itcount() );
+t.save({a: [{b: 1, c: 1}, {b: 2, c: 1}]});
+assert.eq(1, t.find({$or: [{'a.b': 2}, {'a.c': 1}]}).itcount());
diff --git a/jstests/core/or9.js b/jstests/core/or9.js
index c76c5407b6f..d203d3d5f4d 100644
--- a/jstests/core/or9.js
+++ b/jstests/core/or9.js
@@ -3,56 +3,56 @@
t = db.jstests_or9;
t.drop();
-t.ensureIndex( {a:1,b:1} );
+t.ensureIndex({a: 1, b: 1});
-t.save( {a:2,b:2} );
+t.save({a: 2, b: 2});
-function check( a, q ) {
+function check(a, q) {
count = a;
query = q;
- assert.eq.automsg( "count", "t.count( query )" );
+ assert.eq.automsg("count", "t.count( query )");
}
// SERVER-12594: there are two clauses in this case, because we do
// not yet collapse OR of ANDs to a single ixscan.
-check( 1, { $or: [ { a: { $gte:1,$lte:3 } }, { a: 2 } ] } );
+check(1, {$or: [{a: {$gte: 1, $lte: 3}}, {a: 2}]});
-check( 1, { $or: [ { a: { $gt:2,$lte:3 } }, { a: 2 } ] } );
+check(1, {$or: [{a: {$gt: 2, $lte: 3}}, {a: 2}]});
-check( 1, { $or: [ { b: { $gte:1,$lte:3 } }, { b: 2 } ] } );
-check( 1, { $or: [ { b: { $gte:2,$lte:3 } }, { b: 2 } ] } );
-check( 1, { $or: [ { b: { $gt:2,$lte:3 } }, { b: 2 } ] } );
+check(1, {$or: [{b: {$gte: 1, $lte: 3}}, {b: 2}]});
+check(1, {$or: [{b: {$gte: 2, $lte: 3}}, {b: 2}]});
+check(1, {$or: [{b: {$gt: 2, $lte: 3}}, {b: 2}]});
// SERVER-12594: there are two clauses in this case, because we do
// not yet collapse OR of ANDs to a single ixscan.
-check( 1, { $or: [ { a: { $gte:1,$lte:3 } }, { a: 2, b: 2 } ] } );
+check(1, {$or: [{a: {$gte: 1, $lte: 3}}, {a: 2, b: 2}]});
-check( 1, { $or: [ { a: { $gte:1,$lte:3 }, b:3 }, { a: 2 } ] } );
+check(1, {$or: [{a: {$gte: 1, $lte: 3}, b: 3}, {a: 2}]});
-check( 1, { $or: [ { b: { $gte:1,$lte:3 } }, { b: 2, a: 2 } ] } );
+check(1, {$or: [{b: {$gte: 1, $lte: 3}}, {b: 2, a: 2}]});
-check( 1, { $or: [ { b: { $gte:1,$lte:3 }, a:3 }, { b: 2 } ] } );
+check(1, {$or: [{b: {$gte: 1, $lte: 3}, a: 3}, {b: 2}]});
-check( 1, { $or: [ { a: { $gte:1,$lte:3 }, b: 3 }, { a: 2, b: 2 } ] } );
-check( 1, { $or: [ { a: { $gte:2,$lte:3 }, b: 3 }, { a: 2, b: 2 } ] } );
+check(1, {$or: [{a: {$gte: 1, $lte: 3}, b: 3}, {a: 2, b: 2}]});
+check(1, {$or: [{a: {$gte: 2, $lte: 3}, b: 3}, {a: 2, b: 2}]});
// SERVER-12594: there are two clauses in this case, because we do
// not yet collapse OR of ANDs to a single ixscan.
-check( 1, { $or: [ { a: { $gte:1,$lte:3 }, b: 2 }, { a: 2, b: 2 } ] } );
+check(1, {$or: [{a: {$gte: 1, $lte: 3}, b: 2}, {a: 2, b: 2}]});
-check( 1, { $or: [ { b: { $gte:1,$lte:3 }, a: 3 }, { a: 2, b: 2 } ] } );
-check( 1, { $or: [ { b: { $gte:2,$lte:3 }, a: 3 }, { a: 2, b: 2 } ] } );
+check(1, {$or: [{b: {$gte: 1, $lte: 3}, a: 3}, {a: 2, b: 2}]});
+check(1, {$or: [{b: {$gte: 2, $lte: 3}, a: 3}, {a: 2, b: 2}]});
// SERVER-12594: there are two clauses in this case, because we do
// not yet collapse OR of ANDs to a single ixscan.
-check( 1, { $or: [ { b: { $gte:1,$lte:3 }, a: 2 }, { a: 2, b: 2 } ] } );
+check(1, {$or: [{b: {$gte: 1, $lte: 3}, a: 2}, {a: 2, b: 2}]});
t.remove({});
-t.save( {a:1,b:5} );
-t.save( {a:5,b:1} );
+t.save({a: 1, b: 5});
+t.save({a: 5, b: 1});
// SERVER-12594: there are two clauses in the case below, because we do
// not yet collapse OR of ANDs to a single ixscan.
-check( 2, { $or: [ { a: { $in:[1,5] }, b: { $in:[1,5] } }, { a: { $in:[1,5] }, b: { $in:[1,5] } } ] } );
+check(2, {$or: [{a: {$in: [1, 5]}, b: {$in: [1, 5]}}, {a: {$in: [1, 5]}, b: {$in: [1, 5]}}]});
-check( 2, { $or: [ { a: { $in:[1] }, b: { $in:[1,5] } }, { a: { $in:[1,5] }, b: { $in:[1,5] } } ] } );
-check( 2, { $or: [ { a: { $in:[1] }, b: { $in:[1] } }, { a: { $in:[1,5] }, b: { $in:[1,5] } } ] } );
+check(2, {$or: [{a: {$in: [1]}, b: {$in: [1, 5]}}, {a: {$in: [1, 5]}, b: {$in: [1, 5]}}]});
+check(2, {$or: [{a: {$in: [1]}, b: {$in: [1]}}, {a: {$in: [1, 5]}, b: {$in: [1, 5]}}]});
diff --git a/jstests/core/or_inexact.js b/jstests/core/or_inexact.js
index 722d47ee05a..8c9db1cc7ba 100644
--- a/jstests/core/or_inexact.js
+++ b/jstests/core/or_inexact.js
@@ -38,8 +38,8 @@ t.insert({_id: 0, names: ["thomas", "alexandra"]});
t.insert({_id: 1, names: "frank"});
t.insert({_id: 2, names: "alice"});
t.insert({_id: 3, names: ["dave"]});
-cursor = t.find({$or: [{names: "frank"}, {names: /^al(ice|ex)/},
- {names: {$elemMatch: {$eq: "thomas"}}}]});
+cursor = t.find(
+ {$or: [{names: "frank"}, {names: /^al(ice|ex)/}, {names: {$elemMatch: {$eq: "thomas"}}}]});
assert.eq(3, cursor.itcount(), "case 3");
// Case 4: Two INEXACT_FETCH.
@@ -48,8 +48,8 @@ t.ensureIndex({names: 1});
t.insert({_id: 0, names: ["thomas", "alexandra"]});
t.insert({_id: 1, names: ["frank", "alice"]});
t.insert({_id: 2, names: "frank"});
-cursor = t.find({$or: [{names: {$elemMatch: {$eq: "alexandra"}}},
- {names: {$elemMatch: {$eq: "frank"}}}]});
+cursor = t.find(
+ {$or: [{names: {$elemMatch: {$eq: "alexandra"}}}, {names: {$elemMatch: {$eq: "frank"}}}]});
assert.eq(2, cursor.itcount(), "case 4");
// Case 5: Two indices. One has EXACT and INEXACT_COVERED. The other
@@ -62,8 +62,8 @@ t.insert({_id: 1, first: "john", last: "doe"});
t.insert({_id: 2, first: "dave", last: "st"});
t.insert({_id: 3, first: ["dave", "david"], last: "pasette"});
t.insert({_id: 4, first: "joanna", last: ["smith", "doe"]});
-cursor = t.find({$or: [{first: "frank"}, {last: {$elemMatch: {$eq: "doe"}}},
- {first: /david/}, {last: "st"}]});
+cursor = t.find(
+ {$or: [{first: "frank"}, {last: {$elemMatch: {$eq: "doe"}}}, {first: /david/}, {last: "st"}]});
assert.eq(4, cursor.itcount(), "case 5");
// Case 6: Multikey with only EXACT predicates.
@@ -116,12 +116,32 @@ t.drop();
t.ensureIndex({pre: 1, loc: "2dsphere"});
t.insert({_id: 0, pre: 3, loc: {type: "Point", coordinates: [40, 5]}});
t.insert({_id: 1, pre: 4, loc: {type: "Point", coordinates: [0, 0]}});
-cursor = t.find({$or: [{pre: 3, loc: {$geoWithin: {$geometry:
- {type: "Polygon",
- coordinates: [[[39,4], [41,4], [41,6], [39,6], [39,4]]]}}}},
- {pre: 4, loc: {$geoWithin: {$geometry:
- {type: "Polygon",
- coordinates: [[[-1,-1], [1,-1], [1,1], [-1,1], [-1,-1]]]}}}}]});
+cursor = t.find({
+ $or: [
+ {
+ pre: 3,
+ loc: {
+ $geoWithin: {
+ $geometry: {
+ type: "Polygon",
+ coordinates: [[[39, 4], [41, 4], [41, 6], [39, 6], [39, 4]]]
+ }
+ }
+ }
+ },
+ {
+ pre: 4,
+ loc: {
+ $geoWithin: {
+ $geometry: {
+ type: "Polygon",
+ coordinates: [[[-1, -1], [1, -1], [1, 1], [-1, 1], [-1, -1]]]
+ }
+ }
+ }
+ }
+ ]
+});
assert.eq(2, cursor.itcount(), "case 11");
// Case 12: GEO with non-geo, same index, 2d.
@@ -129,12 +149,32 @@ t.drop();
t.ensureIndex({pre: 1, loc: "2d"});
t.insert({_id: 0, pre: 3, loc: {type: "Point", coordinates: [40, 5]}});
t.insert({_id: 1, pre: 4, loc: {type: "Point", coordinates: [0, 0]}});
-cursor = t.find({$or: [{pre: 3, loc: {$geoWithin: {$geometry:
- {type: "Polygon",
- coordinates: [[[39,4], [41,4], [41,6], [39,6], [39,4]]]}}}},
- {pre: 4, loc: {$geoWithin: {$geometry:
- {type: "Polygon",
- coordinates: [[[-1,-1], [1,-1], [1,1], [-1,1], [-1,-1]]]}}}}]});
+cursor = t.find({
+ $or: [
+ {
+ pre: 3,
+ loc: {
+ $geoWithin: {
+ $geometry: {
+ type: "Polygon",
+ coordinates: [[[39, 4], [41, 4], [41, 6], [39, 6], [39, 4]]]
+ }
+ }
+ }
+ },
+ {
+ pre: 4,
+ loc: {
+ $geoWithin: {
+ $geometry: {
+ type: "Polygon",
+ coordinates: [[[-1, -1], [1, -1], [1, 1], [-1, 1], [-1, -1]]]
+ }
+ }
+ }
+ }
+ ]
+});
assert.eq(2, cursor.itcount(), "case 12");
// Case 13: $elemMatch object.
@@ -142,8 +182,7 @@ t.drop();
t.ensureIndex({"a.b": 1});
t.insert({_id: 0, a: [{b: 1}, {b: 2}]});
t.insert({_id: 1, a: [{b: 3}, {b: 4}]});
-cursor = t.find({$or: [{a: {$elemMatch: {b: {$lte: 1}}}},
- {a: {$elemMatch: {b: {$gte: 4}}}}]});
+cursor = t.find({$or: [{a: {$elemMatch: {b: {$lte: 1}}}}, {a: {$elemMatch: {b: {$gte: 4}}}}]});
assert.eq(2, cursor.itcount(), "case 13");
// Case 14: $elemMatch object, below an AND.
@@ -151,8 +190,8 @@ t.drop();
t.ensureIndex({"a.b": 1});
t.insert({_id: 0, a: [{b: 1}, {b: 2}]});
t.insert({_id: 1, a: [{b: 2}, {b: 4}]});
-cursor = t.find({"a.b": 2, $or: [{a: {$elemMatch: {b: {$lte: 1}}}},
- {a: {$elemMatch: {b: {$gte: 4}}}}]});
+cursor = t.find(
+ {"a.b": 2, $or: [{a: {$elemMatch: {b: {$lte: 1}}}}, {a: {$elemMatch: {b: {$gte: 4}}}}]});
assert.eq(2, cursor.itcount(), "case 14");
// Case 15: $or below $elemMatch.
@@ -196,8 +235,7 @@ t.ensureIndex({name: 1});
t.insert({_id: 0, name: "thomas"});
t.insert({_id: 1, name: "alexandra"});
t.insert({_id: 2});
-cursor = t.find({$or: [{name: {$in: ["thomas", /^alexand(er|ra)/]}},
- {name: {$exists: false}}]});
+cursor = t.find({$or: [{name: {$in: ["thomas", /^alexand(er|ra)/]}}, {name: {$exists: false}}]});
assert.eq(3, cursor.itcount(), "case 19");
// Case 20: $in with EXACT, INEXACT_COVERED, and INEXACT_FETCH, two indices.
@@ -209,8 +247,8 @@ t.insert({_id: 1, a: "z", b: "z"});
t.insert({_id: 2});
t.insert({_id: 3, a: "w", b: "x"});
t.insert({_id: 4, a: "l", b: "p"});
-cursor = t.find({$or: [{a: {$in: [/z/, /x/]}}, {a: "w"},
- {b: {$exists: false}}, {b: {$in: ["p"]}}]});
+cursor =
+ t.find({$or: [{a: {$in: [/z/, /x/]}}, {a: "w"}, {b: {$exists: false}}, {b: {$in: ["p"]}}]});
assert.eq(5, cursor.itcount(), "case 19");
// Case 21: two $geoWithin that collapse to a single GEO index scan.
@@ -218,10 +256,28 @@ t.drop();
t.ensureIndex({loc: "2dsphere"});
t.insert({_id: 0, loc: {type: "Point", coordinates: [40, 5]}});
t.insert({_id: 1, loc: {type: "Point", coordinates: [0, 0]}});
-cursor = t.find({$or: [{loc: {$geoWithin: {$geometry:
- {type: "Polygon",
- coordinates: [[[39,4], [41,4], [41,6], [39,6], [39,4]]]}}}},
- {loc: {$geoWithin: {$geometry:
- {type: "Polygon",
- coordinates: [[[-1,-1], [1,-1], [1,1], [-1,1], [-1,-1]]]}}}}]});
+cursor = t.find({
+ $or: [
+ {
+ loc: {
+ $geoWithin: {
+ $geometry: {
+ type: "Polygon",
+ coordinates: [[[39, 4], [41, 4], [41, 6], [39, 6], [39, 4]]]
+ }
+ }
+ }
+ },
+ {
+ loc: {
+ $geoWithin: {
+ $geometry: {
+ type: "Polygon",
+ coordinates: [[[-1, -1], [1, -1], [1, 1], [-1, 1], [-1, -1]]]
+ }
+ }
+ }
+ }
+ ]
+});
assert.eq(2, cursor.itcount(), "case 21");
diff --git a/jstests/core/ora.js b/jstests/core/ora.js
index 67af4c191ec..f50f0d13027 100644
--- a/jstests/core/ora.js
+++ b/jstests/core/ora.js
@@ -6,12 +6,16 @@ for (var i = 0; i < 10; i += 1) {
t.save({x: i, y: 10 - i});
}
assert.eq.automsg("1", "t.find({$or: [{$where: 'this.x === 2'}]}).count()");
-assert.eq.automsg("2", "t.find({$or: [{$where: 'this.x === 2'}, {$where: 'this.y === 2'}]}).count()");
-assert.eq.automsg("1", "t.find({$or: [{$where: 'this.x === 2'}, {$where: 'this.y === 8'}]}).count()");
+assert.eq.automsg("2",
+ "t.find({$or: [{$where: 'this.x === 2'}, {$where: 'this.y === 2'}]}).count()");
+assert.eq.automsg("1",
+ "t.find({$or: [{$where: 'this.x === 2'}, {$where: 'this.y === 8'}]}).count()");
assert.eq.automsg("10", "t.find({$or: [{$where: 'this.x === 2'}, {x: {$ne: 2}}]}).count()");
// geo
t.drop();
t.ensureIndex({loc: "2d"});
-assert.throws(function () {t.find({$or: [{loc: {$near: [11, 11]}}]}).limit(1).next()['_id'];});
+assert.throws(function() {
+ t.find({$or: [{loc: {$near: [11, 11]}}]}).limit(1).next()['_id'];
+});
diff --git a/jstests/core/orb.js b/jstests/core/orb.js
index a4abdeecabf..345ac92d26e 100644
--- a/jstests/core/orb.js
+++ b/jstests/core/orb.js
@@ -3,15 +3,16 @@
var t = db.jstests_orb;
t.drop();
-t.save( {a:1} );
-t.ensureIndex( {a:-1} );
+t.save({a: 1});
+t.ensureIndex({a: -1});
-assert.eq.automsg( "1", "t.count( {$or: [ { a: { $gt:0,$lt:2 } }, { a: { $gt:-1,$lt:3 } } ] } )" );
+assert.eq.automsg("1", "t.count( {$or: [ { a: { $gt:0,$lt:2 } }, { a: { $gt:-1,$lt:3 } } ] } )");
t.drop();
-t.save( {a:1,b:1} );
-t.ensureIndex( {a:1,b:-1} );
+t.save({a: 1, b: 1});
+t.ensureIndex({a: 1, b: -1});
-assert.eq.automsg( "1", "t.count( {$or: [ { a: { $gt:0,$lt:2 } }, { a: { $gt:-1,$lt:3 } } ] } )" );
-assert.eq.automsg( "1", "t.count( {$or: [ { a:1, b: { $gt:0,$lt:2 } }, { a:1, b: { $gt:-1,$lt:3 } } ] } )" ); \ No newline at end of file
+assert.eq.automsg("1", "t.count( {$or: [ { a: { $gt:0,$lt:2 } }, { a: { $gt:-1,$lt:3 } } ] } )");
+assert.eq.automsg(
+ "1", "t.count( {$or: [ { a:1, b: { $gt:0,$lt:2 } }, { a:1, b: { $gt:-1,$lt:3 } } ] } )"); \ No newline at end of file
diff --git a/jstests/core/orc.js b/jstests/core/orc.js
index dec6a7b920d..7d686972898 100644
--- a/jstests/core/orc.js
+++ b/jstests/core/orc.js
@@ -2,28 +2,50 @@
t = db.jstests_orc;
t.drop();
-// The goal here will be to ensure the full range of valid values is scanned for each or clause, in order to ensure that
-// duplicates are eliminated properly in the cases below when field range elimination is not employed. The deduplication
-// of interest will occur on field a. The range specifications for fields b and c are such that (in the current
-// implementation) field range elimination will not occur between the or clauses, meaning that the full range of valid values
+// The goal here will be to ensure the full range of valid values is scanned for each or clause, in
+// order to ensure that
+// duplicates are eliminated properly in the cases below when field range elimination is not
+// employed. The deduplication
+// of interest will occur on field a. The range specifications for fields b and c are such that (in
+// the current
+// implementation) field range elimination will not occur between the or clauses, meaning that the
+// full range of valid values
// will be scanned for each clause and deduplication will be forced.
-// NOTE This test uses some tricks to avoid or range elimination, but in future implementations these tricks may not apply.
-// Perhaps it would be worthwhile to create a mode where range elimination is disabled so it will be possible to write a more
+// NOTE This test uses some tricks to avoid or range elimination, but in future implementations
+// these tricks may not apply.
+// Perhaps it would be worthwhile to create a mode where range elimination is disabled so it will be
+// possible to write a more
// robust test.
-t.ensureIndex( {a:-1,b:1,c:1} );
+t.ensureIndex({a: -1, b: 1, c: 1});
// sanity test
-t.save( {a:null,b:4,c:4} );
-assert.eq( 1, t.count( {$or:[{a:null,b:{$gte:0,$lte:5},c:{$gte:0,$lte:5}},{a:null,b:{$gte:3,$lte:8},c:{$gte:3,$lte:8}}]} ) );
+t.save({a: null, b: 4, c: 4});
+assert.eq(1,
+ t.count({
+ $or: [
+ {a: null, b: {$gte: 0, $lte: 5}, c: {$gte: 0, $lte: 5}},
+ {a: null, b: {$gte: 3, $lte: 8}, c: {$gte: 3, $lte: 8}}
+ ]
+ }));
// from here on is SERVER-2245
t.remove({});
-t.save( {b:4,c:4} );
-assert.eq( 1, t.count( {$or:[{a:null,b:{$gte:0,$lte:5},c:{$gte:0,$lte:5}},{a:null,b:{$gte:3,$lte:8},c:{$gte:3,$lte:8}}]} ) );
+t.save({b: 4, c: 4});
+assert.eq(1,
+ t.count({
+ $or: [
+ {a: null, b: {$gte: 0, $lte: 5}, c: {$gte: 0, $lte: 5}},
+ {a: null, b: {$gte: 3, $lte: 8}, c: {$gte: 3, $lte: 8}}
+ ]
+ }));
-//t.remove({});
-//t.save( {a:[],b:4,c:4} );
-//printjson( t.find( {$or:[{a:[],b:{$gte:0,$lte:5},c:{$gte:0,$lte:5}},{a:[],b:{$gte:3,$lte:8},c:{$gte:3,$lte:8}}]} ).explain() );
-//assert.eq( 1, t.count( {$or:[{a:[],b:{$gte:0,$lte:5},c:{$gte:0,$lte:5}},{a:[],b:{$gte:3,$lte:8},c:{$gte:3,$lte:8}}]} ) );
+// t.remove({});
+// t.save( {a:[],b:4,c:4} );
+// printjson( t.find(
+// {$or:[{a:[],b:{$gte:0,$lte:5},c:{$gte:0,$lte:5}},{a:[],b:{$gte:3,$lte:8},c:{$gte:3,$lte:8}}]}
+// ).explain() );
+// assert.eq( 1, t.count(
+// {$or:[{a:[],b:{$gte:0,$lte:5},c:{$gte:0,$lte:5}},{a:[],b:{$gte:3,$lte:8},c:{$gte:3,$lte:8}}]} )
+// );
diff --git a/jstests/core/ord.js b/jstests/core/ord.js
index df47c405146..46607b539e8 100644
--- a/jstests/core/ord.js
+++ b/jstests/core/ord.js
@@ -1,6 +1,6 @@
// check that we don't crash if an index used by an earlier or clause is dropped
-// Dropping an index kills all cursors on the indexed namespace, not just those
+// Dropping an index kills all cursors on the indexed namespace, not just those
// cursors using the dropped index. This test is to serve as a reminder that
// the $or implementation may need minor adjustments (memory ownership) if this
// behavior is changed.
@@ -8,27 +8,27 @@
t = db.jstests_ord;
t.drop();
-t.ensureIndex( {a:1} );
-t.ensureIndex( {b:1} );
+t.ensureIndex({a: 1});
+t.ensureIndex({b: 1});
-for( i = 0; i < 80; ++i ) {
- t.save( {a:1} );
+for (i = 0; i < 80; ++i) {
+ t.save({a: 1});
}
-for( i = 0; i < 100; ++i ) {
- t.save( {b:1} );
+for (i = 0; i < 100; ++i) {
+ t.save({b: 1});
}
-c = t.find( { $or: [ {a:1}, {b:1} ] } ).batchSize( 100 );
-for( i = 0; i < 90; ++i ) {
+c = t.find({$or: [{a: 1}, {b: 1}]}).batchSize(100);
+for (i = 0; i < 90; ++i) {
c.next();
}
// At this point, our initial query has ended and there is a client cursor waiting
// to read additional documents from index {b:1}. Deduping is performed against
// the index key {a:1}.
-t.dropIndex( {a:1} );
+t.dropIndex({a: 1});
-// Dropping an index kills all cursors on the indexed namespace, not just those
+// Dropping an index kills all cursors on the indexed namespace, not just those
// cursors using the dropped index.
-assert.throws( c.next() );
+assert.throws(c.next());
diff --git a/jstests/core/ore.js b/jstests/core/ore.js
index 93538f8b46b..959f43f5101 100644
--- a/jstests/core/ore.js
+++ b/jstests/core/ore.js
@@ -4,10 +4,10 @@
t = db.jstests_ore;
t.drop();
-t.ensureIndex( {a:-1} );
-t.ensureIndex( {b:1} );
+t.ensureIndex({a: -1});
+t.ensureIndex({b: 1});
-t.save( {a:1,b:1} );
-t.save( {a:2,b:1} );
+t.save({a: 1, b: 1});
+t.save({a: 2, b: 1});
-assert.eq( 2, t.count( {$or:[{a:{$in:[1,2]}},{b:1}]} ) );
+assert.eq(2, t.count({$or: [{a: {$in: [1, 2]}}, {b: 1}]}));
diff --git a/jstests/core/orf.js b/jstests/core/orf.js
index bae8c61f89a..5d58e59c74f 100644
--- a/jstests/core/orf.js
+++ b/jstests/core/orf.js
@@ -5,18 +5,20 @@ t.drop();
var a = [];
var expectBounds = [];
-for( var i = 0; i < 200; ++i ) {
- a.push( {_id:i} );
+for (var i = 0; i < 200; ++i) {
+ a.push({_id: i});
expectBounds.push([i, i]);
}
-a.forEach( function( x ) { t.save( x ); } );
+a.forEach(function(x) {
+ t.save(x);
+});
// This $or query is answered as an index scan over
// a series of _id index point intervals.
-explain = t.find( {$or:a} ).hint( {_id: 1} ).explain( true );
-printjson( explain );
-assert.eq( 200, explain.executionStats.nReturned, 'n' );
-assert.eq( 200, explain.executionStats.totalKeysExamined, 'keys examined' );
-assert.eq( 200, explain.executionStats.totalDocsExamined, 'docs examined' );
+explain = t.find({$or: a}).hint({_id: 1}).explain(true);
+printjson(explain);
+assert.eq(200, explain.executionStats.nReturned, 'n');
+assert.eq(200, explain.executionStats.totalKeysExamined, 'keys examined');
+assert.eq(200, explain.executionStats.totalDocsExamined, 'docs examined');
-assert.eq( 200, t.count( {$or:a} ) );
+assert.eq(200, t.count({$or: a}));
diff --git a/jstests/core/org.js b/jstests/core/org.js
index 19239f96c10..4bc4a813b4c 100644
--- a/jstests/core/org.js
+++ b/jstests/core/org.js
@@ -3,17 +3,17 @@
t = db.jstests_org;
t.drop();
-t.ensureIndex( {a:1}, {sparse:true} );
-t.ensureIndex( {b:1} );
+t.ensureIndex({a: 1}, {sparse: true});
+t.ensureIndex({b: 1});
t.remove({});
-t.save( {a:1,b:2} );
-assert.eq( 1, t.count( {$or:[{a:1},{b:2}]} ) );
+t.save({a: 1, b: 2});
+assert.eq(1, t.count({$or: [{a: 1}, {b: 2}]}));
t.remove({});
-t.save( {a:null,b:2} );
-assert.eq( 1, t.count( {$or:[{a:null},{b:2}]} ) );
+t.save({a: null, b: 2});
+assert.eq(1, t.count({$or: [{a: null}, {b: 2}]}));
t.remove({});
-t.save( {b:2} );
-assert.eq( 1, t.count( {$or:[{a:null},{b:2}]} ) );
+t.save({b: 2});
+assert.eq(1, t.count({$or: [{a: null}, {b: 2}]}));
diff --git a/jstests/core/orh.js b/jstests/core/orh.js
index 5fb845fd01c..357bb1ea5c3 100644
--- a/jstests/core/orh.js
+++ b/jstests/core/orh.js
@@ -3,15 +3,15 @@
t = db.jstests_orh;
t.drop();
-t.ensureIndex( {a:1}, {sparse:true} );
-t.ensureIndex( {b:1,a:1} );
+t.ensureIndex({a: 1}, {sparse: true});
+t.ensureIndex({b: 1, a: 1});
t.remove({});
-t.save( {b:2} );
-assert.eq( 1, t.count( {a:null} ) );
-assert.eq( 1, t.count( {b:2,a:null} ) );
+t.save({b: 2});
+assert.eq(1, t.count({a: null}));
+assert.eq(1, t.count({b: 2, a: null}));
-assert.eq( 1, t.count( {$or:[{b:2,a:null},{a:null}]} ) );
+assert.eq(1, t.count({$or: [{b: 2, a: null}, {a: null}]}));
// Is this desired?
-assert.eq( 1, t.count( {$or:[{a:null},{b:2,a:null}]} ) );
+assert.eq(1, t.count({$or: [{a: null}, {b: 2, a: null}]}));
diff --git a/jstests/core/orj.js b/jstests/core/orj.js
index fa234f36cb5..683f45253a8 100644
--- a/jstests/core/orj.js
+++ b/jstests/core/orj.js
@@ -3,119 +3,159 @@
t = db.jstests_orj;
t.drop();
-t.save( {a:1,b:2} );
+t.save({a: 1, b: 2});
function check() {
-
-assert.throws( function() { t.find( { x:0,$or:"a" } ).toArray(); } );
-assert.throws( function() { t.find( { x:0,$or:[] } ).toArray(); } );
-assert.throws( function() { t.find( { x:0,$or:[ "a" ] } ).toArray(); } );
-
-assert.throws( function() { t.find( { x:0,$or:[{$or:"a"}] } ).toArray(); } );
-assert.throws( function() { t.find( { x:0,$or:[{$or:[]}] } ).toArray(); } );
-assert.throws( function() { t.find( { x:0,$or:[{$or:[ "a" ]}] } ).toArray(); } );
-
-assert.throws( function() { t.find( { x:0,$nor:"a" } ).toArray(); } );
-assert.throws( function() { t.find( { x:0,$nor:[] } ).toArray(); } );
-assert.throws( function() { t.find( { x:0,$nor:[ "a" ] } ).toArray(); } );
-
-assert.throws( function() { t.find( { x:0,$nor:[{$nor:"a"}] } ).toArray(); } );
-assert.throws( function() { t.find( { x:0,$nor:[{$nor:[]}] } ).toArray(); } );
-assert.throws( function() { t.find( { x:0,$nor:[{$nor:[ "a" ]}] } ).toArray(); } );
-
-assert.eq( 1, t.find( {a:1,b:2} ).itcount() );
-
-assert.eq( 1, t.find( {a:1,$or:[{b:2}]} ).itcount() );
-assert.eq( 0, t.find( {a:1,$or:[{b:3}]} ).itcount() );
-
-assert.eq( 1, t.find( {a:1,$or:[{$or:[{b:2}]}]} ).itcount() );
-assert.eq( 1, t.find( {a:1,$or:[{$or:[{b:2}]}]} ).itcount() );
-assert.eq( 0, t.find( {a:1,$or:[{$or:[{b:3}]}]} ).itcount() );
-
-assert.eq( 1, t.find( {$or:[{$or:[{a:2},{b:2}]}]} ).itcount() );
-assert.eq( 1, t.find( {$or:[{a:2},{$or:[{b:2}]}]} ).itcount() );
-assert.eq( 1, t.find( {$or:[{a:1},{$or:[{b:3}]}]} ).itcount() );
-
-assert.eq( 1, t.find( {$or:[{$or:[{a:1},{a:2}]},{$or:[{b:3},{b:4}]}]} ).itcount() );
-assert.eq( 1, t.find( {$or:[{$or:[{a:0},{a:2}]},{$or:[{b:2},{b:4}]}]} ).itcount() );
-assert.eq( 0, t.find( {$or:[{$or:[{a:0},{a:2}]},{$or:[{b:3},{b:4}]}]} ).itcount() );
-
-assert.eq( 1, t.find( {a:1,$and:[{$or:[{$or:[{b:2}]}]}]} ).itcount() );
-assert.eq( 0, t.find( {a:1,$and:[{$or:[{$or:[{b:3}]}]}]} ).itcount() );
-
-assert.eq( 1, t.find( {$and:[{$or:[{a:1},{a:2}]},{$or:[{b:1},{b:2}]}]} ).itcount() );
-assert.eq( 0, t.find( {$and:[{$or:[{a:3},{a:2}]},{$or:[{b:1},{b:2}]}]} ).itcount() );
-assert.eq( 0, t.find( {$and:[{$or:[{a:1},{a:2}]},{$or:[{b:3},{b:1}]}]} ).itcount() );
-
-assert.eq( 0, t.find( {$and:[{$nor:[{a:1},{a:2}]},{$nor:[{b:1},{b:2}]}]} ).itcount() );
-assert.eq( 0, t.find( {$and:[{$nor:[{a:3},{a:2}]},{$nor:[{b:1},{b:2}]}]} ).itcount() );
-assert.eq( 1, t.find( {$and:[{$nor:[{a:3},{a:2}]},{$nor:[{b:3},{b:1}]}]} ).itcount() );
-
-assert.eq( 1, t.find( {$and:[{$or:[{a:1},{a:2}]},{$nor:[{b:1},{b:3}]}]} ).itcount() );
-assert.eq( 0, t.find( {$and:[{$or:[{a:3},{a:2}]},{$nor:[{b:1},{b:3}]}]} ).itcount() );
-assert.eq( 0, t.find( {$and:[{$or:[{a:1},{a:2}]},{$nor:[{b:1},{b:2}]}]} ).itcount() );
-
+ assert.throws(function() {
+ t.find({x: 0, $or: "a"}).toArray();
+ });
+ assert.throws(function() {
+ t.find({x: 0, $or: []}).toArray();
+ });
+ assert.throws(function() {
+ t.find({x: 0, $or: ["a"]}).toArray();
+ });
+
+ assert.throws(function() {
+ t.find({x: 0, $or: [{$or: "a"}]}).toArray();
+ });
+ assert.throws(function() {
+ t.find({x: 0, $or: [{$or: []}]}).toArray();
+ });
+ assert.throws(function() {
+ t.find({x: 0, $or: [{$or: ["a"]}]}).toArray();
+ });
+
+ assert.throws(function() {
+ t.find({x: 0, $nor: "a"}).toArray();
+ });
+ assert.throws(function() {
+ t.find({x: 0, $nor: []}).toArray();
+ });
+ assert.throws(function() {
+ t.find({x: 0, $nor: ["a"]}).toArray();
+ });
+
+ assert.throws(function() {
+ t.find({x: 0, $nor: [{$nor: "a"}]}).toArray();
+ });
+ assert.throws(function() {
+ t.find({x: 0, $nor: [{$nor: []}]}).toArray();
+ });
+ assert.throws(function() {
+ t.find({x: 0, $nor: [{$nor: ["a"]}]}).toArray();
+ });
+
+ assert.eq(1, t.find({a: 1, b: 2}).itcount());
+
+ assert.eq(1, t.find({a: 1, $or: [{b: 2}]}).itcount());
+ assert.eq(0, t.find({a: 1, $or: [{b: 3}]}).itcount());
+
+ assert.eq(1, t.find({a: 1, $or: [{$or: [{b: 2}]}]}).itcount());
+ assert.eq(1, t.find({a: 1, $or: [{$or: [{b: 2}]}]}).itcount());
+ assert.eq(0, t.find({a: 1, $or: [{$or: [{b: 3}]}]}).itcount());
+
+ assert.eq(1, t.find({$or: [{$or: [{a: 2}, {b: 2}]}]}).itcount());
+ assert.eq(1, t.find({$or: [{a: 2}, {$or: [{b: 2}]}]}).itcount());
+ assert.eq(1, t.find({$or: [{a: 1}, {$or: [{b: 3}]}]}).itcount());
+
+ assert.eq(1, t.find({$or: [{$or: [{a: 1}, {a: 2}]}, {$or: [{b: 3}, {b: 4}]}]}).itcount());
+ assert.eq(1, t.find({$or: [{$or: [{a: 0}, {a: 2}]}, {$or: [{b: 2}, {b: 4}]}]}).itcount());
+ assert.eq(0, t.find({$or: [{$or: [{a: 0}, {a: 2}]}, {$or: [{b: 3}, {b: 4}]}]}).itcount());
+
+ assert.eq(1, t.find({a: 1, $and: [{$or: [{$or: [{b: 2}]}]}]}).itcount());
+ assert.eq(0, t.find({a: 1, $and: [{$or: [{$or: [{b: 3}]}]}]}).itcount());
+
+ assert.eq(1, t.find({$and: [{$or: [{a: 1}, {a: 2}]}, {$or: [{b: 1}, {b: 2}]}]}).itcount());
+ assert.eq(0, t.find({$and: [{$or: [{a: 3}, {a: 2}]}, {$or: [{b: 1}, {b: 2}]}]}).itcount());
+ assert.eq(0, t.find({$and: [{$or: [{a: 1}, {a: 2}]}, {$or: [{b: 3}, {b: 1}]}]}).itcount());
+
+ assert.eq(0, t.find({$and: [{$nor: [{a: 1}, {a: 2}]}, {$nor: [{b: 1}, {b: 2}]}]}).itcount());
+ assert.eq(0, t.find({$and: [{$nor: [{a: 3}, {a: 2}]}, {$nor: [{b: 1}, {b: 2}]}]}).itcount());
+ assert.eq(1, t.find({$and: [{$nor: [{a: 3}, {a: 2}]}, {$nor: [{b: 3}, {b: 1}]}]}).itcount());
+
+ assert.eq(1, t.find({$and: [{$or: [{a: 1}, {a: 2}]}, {$nor: [{b: 1}, {b: 3}]}]}).itcount());
+ assert.eq(0, t.find({$and: [{$or: [{a: 3}, {a: 2}]}, {$nor: [{b: 1}, {b: 3}]}]}).itcount());
+ assert.eq(0, t.find({$and: [{$or: [{a: 1}, {a: 2}]}, {$nor: [{b: 1}, {b: 2}]}]}).itcount());
}
check();
-t.ensureIndex( {a:1} );
+t.ensureIndex({a: 1});
check();
t.dropIndexes();
-t.ensureIndex( {b:1} );
+t.ensureIndex({b: 1});
check();
t.dropIndexes();
-t.ensureIndex( {a:1} );
-t.ensureIndex( {b:1} );
+t.ensureIndex({a: 1});
+t.ensureIndex({b: 1});
check();
t.dropIndexes();
-t.ensureIndex( {a:1,b:1} );
+t.ensureIndex({a: 1, b: 1});
check();
t.dropIndexes();
-t.ensureIndex( {a:1} );
-t.ensureIndex( {b:1} );
-t.ensureIndex( {a:1,b:1} );
+t.ensureIndex({a: 1});
+t.ensureIndex({b: 1});
+t.ensureIndex({a: 1, b: 1});
check();
-function checkHinted( hint ) {
- assert.eq( 1, t.find( {a:1,b:2} ).hint( hint ).itcount() );
-
- assert.eq( 1, t.find( {a:1,$or:[{b:2}]} ).hint( hint ).itcount() );
- assert.eq( 0, t.find( {a:1,$or:[{b:3}]} ).hint( hint ).itcount() );
-
- assert.eq( 1, t.find( {a:1,$or:[{$or:[{b:2}]}]} ).hint( hint ).itcount() );
- assert.eq( 1, t.find( {a:1,$or:[{$or:[{b:2}]}]} ).hint( hint ).itcount() );
- assert.eq( 0, t.find( {a:1,$or:[{$or:[{b:3}]}]} ).hint( hint ).itcount() );
-
- assert.eq( 1, t.find( {$or:[{$or:[{a:2},{b:2}]}]} ).hint( hint ).itcount() );
- assert.eq( 1, t.find( {$or:[{a:2},{$or:[{b:2}]}]} ).hint( hint ).itcount() );
- assert.eq( 1, t.find( {$or:[{a:1},{$or:[{b:3}]}]} ).hint( hint ).itcount() );
-
- assert.eq( 1, t.find( {$or:[{$or:[{a:1},{a:2}]},{$or:[{b:3},{b:4}]}]} ).hint( hint ).itcount() );
- assert.eq( 1, t.find( {$or:[{$or:[{a:0},{a:2}]},{$or:[{b:2},{b:4}]}]} ).hint( hint ).itcount() );
- assert.eq( 0, t.find( {$or:[{$or:[{a:0},{a:2}]},{$or:[{b:3},{b:4}]}]} ).hint( hint ).itcount() );
-
- assert.eq( 1, t.find( {a:1,$and:[{$or:[{$or:[{b:2}]}]}]} ).hint( hint ).itcount() );
- assert.eq( 0, t.find( {a:1,$and:[{$or:[{$or:[{b:3}]}]}]} ).hint( hint ).itcount() );
-
- assert.eq( 1, t.find( {$and:[{$or:[{a:1},{a:2}]},{$or:[{b:1},{b:2}]}]} ).hint( hint ).itcount() );
- assert.eq( 0, t.find( {$and:[{$or:[{a:3},{a:2}]},{$or:[{b:1},{b:2}]}]} ).hint( hint ).itcount() );
- assert.eq( 0, t.find( {$and:[{$or:[{a:1},{a:2}]},{$or:[{b:3},{b:1}]}]} ).hint( hint ).itcount() );
-
- assert.eq( 0, t.find( {$and:[{$nor:[{a:1},{a:2}]},{$nor:[{b:1},{b:2}]}]} ).hint( hint ).itcount() );
- assert.eq( 0, t.find( {$and:[{$nor:[{a:3},{a:2}]},{$nor:[{b:1},{b:2}]}]} ).hint( hint ).itcount() );
- assert.eq( 1, t.find( {$and:[{$nor:[{a:3},{a:2}]},{$nor:[{b:3},{b:1}]}]} ).hint( hint ).itcount() );
-
- assert.eq( 1, t.find( {$and:[{$or:[{a:1},{a:2}]},{$nor:[{b:1},{b:3}]}]} ).hint( hint ).itcount() );
- assert.eq( 0, t.find( {$and:[{$or:[{a:3},{a:2}]},{$nor:[{b:1},{b:3}]}]} ).hint( hint ).itcount() );
- assert.eq( 0, t.find( {$and:[{$or:[{a:1},{a:2}]},{$nor:[{b:1},{b:2}]}]} ).hint( hint ).itcount() );
+function checkHinted(hint) {
+ assert.eq(1, t.find({a: 1, b: 2}).hint(hint).itcount());
+
+ assert.eq(1, t.find({a: 1, $or: [{b: 2}]}).hint(hint).itcount());
+ assert.eq(0, t.find({a: 1, $or: [{b: 3}]}).hint(hint).itcount());
+
+ assert.eq(1, t.find({a: 1, $or: [{$or: [{b: 2}]}]}).hint(hint).itcount());
+ assert.eq(1, t.find({a: 1, $or: [{$or: [{b: 2}]}]}).hint(hint).itcount());
+ assert.eq(0, t.find({a: 1, $or: [{$or: [{b: 3}]}]}).hint(hint).itcount());
+
+ assert.eq(1, t.find({$or: [{$or: [{a: 2}, {b: 2}]}]}).hint(hint).itcount());
+ assert.eq(1, t.find({$or: [{a: 2}, {$or: [{b: 2}]}]}).hint(hint).itcount());
+ assert.eq(1, t.find({$or: [{a: 1}, {$or: [{b: 3}]}]}).hint(hint).itcount());
+
+ assert.eq(
+ 1, t.find({$or: [{$or: [{a: 1}, {a: 2}]}, {$or: [{b: 3}, {b: 4}]}]}).hint(hint).itcount());
+ assert.eq(
+ 1, t.find({$or: [{$or: [{a: 0}, {a: 2}]}, {$or: [{b: 2}, {b: 4}]}]}).hint(hint).itcount());
+ assert.eq(
+ 0, t.find({$or: [{$or: [{a: 0}, {a: 2}]}, {$or: [{b: 3}, {b: 4}]}]}).hint(hint).itcount());
+
+ assert.eq(1, t.find({a: 1, $and: [{$or: [{$or: [{b: 2}]}]}]}).hint(hint).itcount());
+ assert.eq(0, t.find({a: 1, $and: [{$or: [{$or: [{b: 3}]}]}]}).hint(hint).itcount());
+
+ assert.eq(
+ 1, t.find({$and: [{$or: [{a: 1}, {a: 2}]}, {$or: [{b: 1}, {b: 2}]}]}).hint(hint).itcount());
+ assert.eq(
+ 0, t.find({$and: [{$or: [{a: 3}, {a: 2}]}, {$or: [{b: 1}, {b: 2}]}]}).hint(hint).itcount());
+ assert.eq(
+ 0, t.find({$and: [{$or: [{a: 1}, {a: 2}]}, {$or: [{b: 3}, {b: 1}]}]}).hint(hint).itcount());
+
+ assert.eq(
+ 0,
+ t.find({$and: [{$nor: [{a: 1}, {a: 2}]}, {$nor: [{b: 1}, {b: 2}]}]}).hint(hint).itcount());
+ assert.eq(
+ 0,
+ t.find({$and: [{$nor: [{a: 3}, {a: 2}]}, {$nor: [{b: 1}, {b: 2}]}]}).hint(hint).itcount());
+ assert.eq(
+ 1,
+ t.find({$and: [{$nor: [{a: 3}, {a: 2}]}, {$nor: [{b: 3}, {b: 1}]}]}).hint(hint).itcount());
+
+ assert.eq(
+ 1,
+ t.find({$and: [{$or: [{a: 1}, {a: 2}]}, {$nor: [{b: 1}, {b: 3}]}]}).hint(hint).itcount());
+ assert.eq(
+ 0,
+ t.find({$and: [{$or: [{a: 3}, {a: 2}]}, {$nor: [{b: 1}, {b: 3}]}]}).hint(hint).itcount());
+ assert.eq(
+ 0,
+ t.find({$and: [{$or: [{a: 1}, {a: 2}]}, {$nor: [{b: 1}, {b: 2}]}]}).hint(hint).itcount());
}
-checkHinted( {$natural:1} );
-checkHinted( {a:1} );
-checkHinted( {b:1} );
-checkHinted( {a:1,b:1} ); \ No newline at end of file
+checkHinted({$natural: 1});
+checkHinted({a: 1});
+checkHinted({b: 1});
+checkHinted({a: 1, b: 1}); \ No newline at end of file
diff --git a/jstests/core/ork.js b/jstests/core/ork.js
index d6d40161e69..f367b6b4bad 100644
--- a/jstests/core/ork.js
+++ b/jstests/core/ork.js
@@ -3,9 +3,21 @@
t = db.jstests_ork;
t.drop();
-t.ensureIndex( {a:1} );
-t.save( {a:[1,2],b:5} );
-t.save( {a:[2,4],b:5} );
+t.ensureIndex({a: 1});
+t.save({a: [1, 2], b: 5});
+t.save({a: [2, 4], b: 5});
-assert.eq( 2, t.find( {$or:[{a:1,$and:[{$or:[{a:2},{a:3}]},{$or:[{b:5}]}]},{a:2,$or:[{a:3},{a:4}]}]} ).itcount() );
-assert.eq( 1, t.find( {$or:[{a:1,$and:[{$or:[{a:2},{a:3}]},{$or:[{b:6}]}]},{a:2,$or:[{a:3},{a:4}]}]} ).itcount() );
+assert.eq(2,
+ t.find({
+ $or: [
+ {a: 1, $and: [{$or: [{a: 2}, {a: 3}]}, {$or: [{b: 5}]}]},
+ {a: 2, $or: [{a: 3}, {a: 4}]}
+ ]
+ }).itcount());
+assert.eq(1,
+ t.find({
+ $or: [
+ {a: 1, $and: [{$or: [{a: 2}, {a: 3}]}, {$or: [{b: 6}]}]},
+ {a: 2, $or: [{a: 3}, {a: 4}]}
+ ]
+ }).itcount());
diff --git a/jstests/core/oro.js b/jstests/core/oro.js
index d93bfa6dd09..be8a99e7e35 100644
--- a/jstests/core/oro.js
+++ b/jstests/core/oro.js
@@ -4,24 +4,25 @@ t = db.jstests_oro;
t.drop();
orClauses = [];
-for( idxKey = 'a'; idxKey <= 'aaaaaaaaaa'; idxKey += 'a' ) {
+for (idxKey = 'a'; idxKey <= 'aaaaaaaaaa'; idxKey += 'a') {
idx = {};
- idx[ idxKey ] = 1;
- t.ensureIndex( idx );
- for( i = 0; i < 200; ++i ) {
- t.insert( idx );
+ idx[idxKey] = 1;
+ t.ensureIndex(idx);
+ for (i = 0; i < 200; ++i) {
+ t.insert(idx);
}
- orClauses.push( idx );
+ orClauses.push(idx);
}
-printjson( t.find({$or:orClauses}).explain() );
-c = t.find({$or:orClauses}).batchSize( 100 );
+printjson(t.find({$or: orClauses}).explain());
+c = t.find({$or: orClauses}).batchSize(100);
count = 0;
-while( c.hasNext() ) {
- for( i = 0; i < 50 && c.hasNext(); ++i, c.next(), ++count );
+while (c.hasNext()) {
+ for (i = 0; i < 50 && c.hasNext(); ++i, c.next(), ++count)
+ ;
// Interleave with another operation.
t.stats();
}
-assert.eq( 10 * 200, count );
+assert.eq(10 * 200, count);
diff --git a/jstests/core/orp.js b/jstests/core/orp.js
index 18abdfbc63a..a706d6f4c1a 100644
--- a/jstests/core/orp.js
+++ b/jstests/core/orp.js
@@ -4,40 +4,39 @@
t = db.jstests_orp;
t.drop();
-t.ensureIndex( { a:1 } );
-t.ensureIndex( { b:1 } );
-t.ensureIndex( { c:1 } );
+t.ensureIndex({a: 1});
+t.ensureIndex({b: 1});
+t.ensureIndex({c: 1});
-for( i = 0; i < 200; ++i ) {
- t.save( { a:1, b:1 } );
+for (i = 0; i < 200; ++i) {
+ t.save({a: 1, b: 1});
}
// Deduping results from the previous clause.
-assert.eq( 200, t.count( { $or:[ { a:1 }, { b:1 } ] } ) );
+assert.eq(200, t.count({$or: [{a: 1}, {b: 1}]}));
// Deduping results from a prior clause.
-assert.eq( 200, t.count( { $or:[ { a:1 }, { c:1 }, { b:1 } ] } ) );
-t.save( { c:1 } );
-assert.eq( 201, t.count( { $or:[ { a:1 }, { c:1 }, { b:1 } ] } ) );
+assert.eq(200, t.count({$or: [{a: 1}, {c: 1}, {b: 1}]}));
+t.save({c: 1});
+assert.eq(201, t.count({$or: [{a: 1}, {c: 1}, {b: 1}]}));
// Deduping results that would normally be index only matches on overlapping and double scanned $or
// field regions.
t.drop();
-t.ensureIndex( { a:1, b:1 } );
-for( i = 0; i < 16; ++i ) {
- for( j = 0; j < 16; ++j ) {
- t.save( { a:i, b:j } );
+t.ensureIndex({a: 1, b: 1});
+for (i = 0; i < 16; ++i) {
+ for (j = 0; j < 16; ++j) {
+ t.save({a: i, b: j});
}
}
-assert.eq( 16 * 16,
- t.count( { $or:[ { a:{ $gte:0 }, b:{ $gte:0 } }, { a:{ $lte:16 }, b:{ $lte:16 } } ] } ) );
+assert.eq(16 * 16, t.count({$or: [{a: {$gte: 0}, b: {$gte: 0}}, {a: {$lte: 16}, b: {$lte: 16}}]}));
// Deduping results from a clause that completed before the multi cursor takeover.
t.drop();
-t.ensureIndex( { a:1 } );
-t.ensureIndex( { b:1 } );
-t.save( { a:1,b:200 } );
-for( i = 0; i < 200; ++i ) {
- t.save( { b:i } );
+t.ensureIndex({a: 1});
+t.ensureIndex({b: 1});
+t.save({a: 1, b: 200});
+for (i = 0; i < 200; ++i) {
+ t.save({b: i});
}
-assert.eq( 201, t.count( { $or:[ { a:1 }, { b:{ $gte:0 } } ] } ) );
+assert.eq(201, t.count({$or: [{a: 1}, {b: {$gte: 0}}]}));
diff --git a/jstests/core/plan_cache_clear.js b/jstests/core/plan_cache_clear.js
index 264c2885f48..8f9cf0ea302 100644
--- a/jstests/core/plan_cache_clear.js
+++ b/jstests/core/plan_cache_clear.js
@@ -23,7 +23,7 @@ t.save({a: 2, b: 2});
// We need two indices so that the MultiPlanRunner is executed.
t.ensureIndex({a: 1});
-t.ensureIndex({a: 1, b:1});
+t.ensureIndex({a: 1, b: 1});
// Run a query so that an entry is inserted into the cache.
assert.eq(1, t.find({a: 1, b: 1}).itcount(), 'unexpected document count');
diff --git a/jstests/core/plan_cache_list_plans.js b/jstests/core/plan_cache_list_plans.js
index 038bcd949eb..b4be4ad46c4 100644
--- a/jstests/core/plan_cache_list_plans.js
+++ b/jstests/core/plan_cache_list_plans.js
@@ -5,11 +5,15 @@ t.drop();
// Utility function to list plans for a query.
function getPlans(query, sort, projection) {
- var key = {query: query, sort: sort, projection: projection};
+ var key = {
+ query: query,
+ sort: sort,
+ projection: projection
+ };
var res = t.runCommand('planCacheListPlans', key);
assert.commandWorked(res, 'planCacheListPlans(' + tojson(key, '', true) + ' failed');
- assert(res.hasOwnProperty('plans'), 'plans missing from planCacheListPlans(' +
- tojson(key, '', true) + ') result');
+ assert(res.hasOwnProperty('plans'),
+ 'plans missing from planCacheListPlans(' + tojson(key, '', true) + ') result');
return res.plans;
}
@@ -20,14 +24,16 @@ t.save({a: 2, b: 2});
// We need two indices so that the MultiPlanRunner is executed.
t.ensureIndex({a: 1});
-t.ensureIndex({a: 1, b:1});
+t.ensureIndex({a: 1, b: 1});
// Invalid key should be an error.
-assert.eq(0, getPlans({unknownfield: 1}, {}, {}),
+assert.eq(0,
+ getPlans({unknownfield: 1}, {}, {}),
'planCacheListPlans should return empty results on unknown query shape');
// Create a cache entry.
-assert.eq(1, t.find({a: 1, b: 1}, {_id: 0, a: 1}).sort({a: -1}).itcount(),
+assert.eq(1,
+ t.find({a: 1, b: 1}, {_id: 0, a: 1}).sort({a: -1}).itcount(),
'unexpected document count');
// Retrieve plans for valid cache entry.
@@ -65,10 +71,10 @@ for (var i = 0; i < plans.length; i++) {
print('plan ' + i + ': ' + tojson(plans[i]));
assert.gt(plans[i].reason.score, 0, 'plan ' + i + ' score is invalid');
if (i > 0) {
- assert.lte(plans[i].reason.score, plans[i-1].reason.score,
+ assert.lte(plans[i].reason.score,
+ plans[i - 1].reason.score,
'plans not sorted by score in descending order. ' +
- 'plan ' + i + ' has a score that is greater than that of the previous plan');
+ 'plan ' + i + ' has a score that is greater than that of the previous plan');
}
assert(plans[i].reason.stats.hasOwnProperty('stage'), 'no stats inserted for plan ' + i);
}
-
diff --git a/jstests/core/plan_cache_list_shapes.js b/jstests/core/plan_cache_list_shapes.js
index c22d7caa2e4..4711940870d 100644
--- a/jstests/core/plan_cache_list_shapes.js
+++ b/jstests/core/plan_cache_list_shapes.js
@@ -20,7 +20,8 @@ function getShapes(collection) {
// and should return an empty array of query shapes.
var missingCollection = db.jstests_query_cache_missing;
missingCollection.drop();
-assert.eq(0, getShapes(missingCollection).length,
+assert.eq(0,
+ getShapes(missingCollection).length,
'planCacheListQueryShapes should return empty array on non-existent collection');
t.save({a: 1, b: 1});
@@ -33,7 +34,8 @@ t.ensureIndex({a: 1});
t.ensureIndex({a: 1, b: 1});
// Run a query.
-assert.eq(1, t.find({a: 1, b: 1}, {_id: 1, a: 1}).sort({a: -1}).itcount(),
+assert.eq(1,
+ t.find({a: 1, b: 1}, {_id: 1, a: 1}).sort({a: -1}).itcount(),
'unexpected document count');
// We now expect the two indices to be compared and a cache entry to exist.
@@ -41,7 +43,8 @@ assert.eq(1, t.find({a: 1, b: 1}, {_id: 1, a: 1}).sort({a: -1}).itcount(),
// Number of shapes should match queries executed by multi-plan runner.
var shapes = getShapes();
assert.eq(1, shapes.length, 'unexpected number of shapes in planCacheListQueryShapes result');
-assert.eq({query: {a: 1, b: 1}, sort: {a: -1}, projection: {_id: 1, a: 1}}, shapes[0],
+assert.eq({query: {a: 1, b: 1}, sort: {a: -1}, projection: {_id: 1, a: 1}},
+ shapes[0],
'unexpected query shape returned from planCacheListQueryShapes');
// Running a different query shape should cause another entry to be cached.
diff --git a/jstests/core/plan_cache_shell_helpers.js b/jstests/core/plan_cache_shell_helpers.js
index c22e0e451eb..a61421afc7b 100644
--- a/jstests/core/plan_cache_shell_helpers.js
+++ b/jstests/core/plan_cache_shell_helpers.js
@@ -16,26 +16,38 @@ function getShapes(collection) {
}
// Utility function to list plans for a query.
function getPlans(query, sort, projection) {
- var key = {query: query, sort: sort, projection: projection};
+ var key = {
+ query: query,
+ sort: sort,
+ projection: projection
+ };
var res = t.runCommand('planCacheListPlans', key);
assert.commandWorked(res, 'planCacheListPlans(' + tojson(key, '', true) + ' failed');
- assert(res.hasOwnProperty('plans'), 'plans missing from planCacheListPlans(' +
- tojson(key, '', true) + ') result');
+ assert(res.hasOwnProperty('plans'),
+ 'plans missing from planCacheListPlans(' + tojson(key, '', true) + ') result');
return res.plans;
}
// Add data an indices.
var n = 200;
for (var i = 0; i < n; i++) {
- t.save({a:i, b: -1, c: 1});
+ t.save({a: i, b: -1, c: 1});
}
t.ensureIndex({a: 1});
t.ensureIndex({b: 1});
// Populate plan cache.
-var queryB = {a: {$gte: 199}, b: -1};
-var projectionB = {_id: 0, b: 1};
-var sortC = {c: -1};
+var queryB = {
+ a: {$gte: 199},
+ b: -1
+};
+var projectionB = {
+ _id: 0,
+ b: 1
+};
+var sortC = {
+ c: -1
+};
assert.eq(1, t.find(queryB, projectionB).sort(sortC).itcount(), 'unexpected document count');
assert.eq(1, t.find(queryB, projectionB).itcount(), 'unexpected document count');
assert.eq(1, t.find(queryB).sort(sortC).itcount(), 'unexpected document count');
@@ -68,10 +80,12 @@ print(planCache);
var missingCollection = db.jstests_plan_cache_missing;
missingCollection.drop();
// should return empty array on non-existent collection.
-assert.eq(0, missingCollection.getPlanCache().listQueryShapes().length,
+assert.eq(0,
+ missingCollection.getPlanCache().listQueryShapes().length,
'collection.getPlanCache().listQueryShapes() should return empty results ' +
- 'on non-existent collection');
-assert.eq(getShapes(), planCache.listQueryShapes(),
+ 'on non-existent collection');
+assert.eq(getShapes(),
+ planCache.listQueryShapes(),
'unexpected collection.getPlanCache().listQueryShapes() shell helper result');
//
@@ -79,21 +93,27 @@ assert.eq(getShapes(), planCache.listQueryShapes(),
//
// should return empty array on non-existent query shape.
-assert.eq(0, planCache.getPlansByQuery({unknownfield: 1}).length,
+assert.eq(0,
+ planCache.getPlansByQuery({unknownfield: 1}).length,
'collection.getPlanCache().getPlansByQuery() should return empty results ' +
- 'on non-existent collection');
+ 'on non-existent collection');
// should error on missing required field query.
-assert.throws(function() { planCache.getPlansByQuery(); });
+assert.throws(function() {
+ planCache.getPlansByQuery();
+});
// Invoke with various permutations of required (query) and optional (projection, sort) arguments.
-assert.eq(getPlans(queryB, sortC, projectionB), planCache.getPlansByQuery(queryB, projectionB,
- sortC),
+assert.eq(getPlans(queryB, sortC, projectionB),
+ planCache.getPlansByQuery(queryB, projectionB, sortC),
'plans from collection.getPlanCache().getPlansByQuery() different from command result');
-assert.eq(getPlans(queryB, {}, projectionB), planCache.getPlansByQuery(queryB, projectionB),
+assert.eq(getPlans(queryB, {}, projectionB),
+ planCache.getPlansByQuery(queryB, projectionB),
'plans from collection.getPlanCache().getPlansByQuery() different from command result');
-assert.eq(getPlans(queryB, sortC, {}), planCache.getPlansByQuery(queryB, undefined, sortC),
+assert.eq(getPlans(queryB, sortC, {}),
+ planCache.getPlansByQuery(queryB, undefined, sortC),
'plans from collection.getPlanCache().getPlansByQuery() different from command result');
-assert.eq(getPlans(queryB, {}, {}), planCache.getPlansByQuery(queryB),
+assert.eq(getPlans(queryB, {}, {}),
+ planCache.getPlansByQuery(queryB),
'plans from collection.getPlanCache().getPlansByQuery() different from command result');
// getPlansByQuery() will also accept a single argument with the query shape object
@@ -104,7 +124,11 @@ assert.eq(getPlans(queryB, {}, {}), planCache.getPlansByQuery(queryB),
// projection: <projection>,
// sort: <sort>
// }
-var shapeB = {query: queryB, projection: projectionB, sort: sortC};
+var shapeB = {
+ query: queryB,
+ projection: projectionB,
+ sort: sortC
+};
assert.eq(getPlans(queryB, sortC, projectionB),
planCache.getPlansByQuery(shapeB),
'collection.getPlanCache().getPlansByQuery() did not accept query shape object');
@@ -113,16 +137,16 @@ assert.eq(getPlans(queryB, sortC, projectionB),
// The entire invalid query shape object will be passed to the command
// as the 'query' component which will result in the server returning an empty
// array of plans.
-assert.eq(0, planCache.getPlansByQuery({query: queryB}).length,
+assert.eq(0,
+ planCache.getPlansByQuery({query: queryB}).length,
'collection.getPlanCache.getPlansByQuery should return empty results on ' +
- 'incomplete query shape');
-assert.eq(0, planCache.getPlansByQuery({query: queryB, sort: sortC,
- projection: projectionB,
- unknown_field: 1}).length,
+ 'incomplete query shape');
+assert.eq(0,
+ planCache.getPlansByQuery(
+ {query: queryB, sort: sortC, projection: projectionB, unknown_field: 1})
+ .length,
'collection.getPlanCache.getPlansByQuery should return empty results on ' +
- 'invalid query shape');
-
-
+ 'invalid query shape');
//
// collection.getPlanCache().clearPlansByQuery
@@ -131,19 +155,24 @@ assert.eq(0, planCache.getPlansByQuery({query: queryB, sort: sortC,
// should not error on non-existent query shape.
planCache.clearPlansByQuery({unknownfield: 1});
// should error on missing required field query.
-assert.throws(function() { planCache.clearPlansByQuery(); });
+assert.throws(function() {
+ planCache.clearPlansByQuery();
+});
// Invoke with various permutations of required (query) and optional (projection, sort) arguments.
planCache.clearPlansByQuery(queryB, projectionB);
-assert.eq(3, getShapes().length,
+assert.eq(3,
+ getShapes().length,
'query shape not dropped after running collection.getPlanCache().clearPlansByQuery()');
planCache.clearPlansByQuery(queryB, undefined, sortC);
-assert.eq(2, getShapes().length,
+assert.eq(2,
+ getShapes().length,
'query shape not dropped after running collection.getPlanCache().clearPlansByQuery()');
planCache.clearPlansByQuery(queryB);
-assert.eq(1, getShapes().length,
+assert.eq(1,
+ getShapes().length,
'query shape not dropped after running collection.getPlanCache().clearPlansByQuery()');
planCache.clear();
@@ -163,15 +192,14 @@ assert.eq(1, t.find(queryB).sort(sortC).itcount(), 'unexpected document count');
// Clear using query shape object.
planCache.clearPlansByQuery({query: queryB, projection: {}, sort: sortC});
-assert.eq(0, getShapes().length,
+assert.eq(0,
+ getShapes().length,
'collection.getPlanCache().clearPlansByQuery() did not accept query shape object');
// Should not error on missing or extra fields in query shape object.
planCache.clearPlansByQuery({query: queryB});
-planCache.clearPlansByQuery({query: queryB, sort: sortC, projection: projectionB,
- unknown_field: 1});
-
-
+planCache.clearPlansByQuery(
+ {query: queryB, sort: sortC, projection: projectionB, unknown_field: 1});
//
// collection.getPlanCache().clear
diff --git a/jstests/core/pop_server_13516.js b/jstests/core/pop_server_13516.js
index 231889b7a7d..8d0bacbb3e5 100644
--- a/jstests/core/pop_server_13516.js
+++ b/jstests/core/pop_server_13516.js
@@ -5,8 +5,8 @@ t.drop();
var id = NumberInt(0);
var object = {
- _id : id,
- data : []
+ _id: id,
+ data: []
};
for (var i = 0; i < 4096; i++) {
@@ -14,7 +14,7 @@ for (var i = 0; i < 4096; i++) {
}
t.insert(object);
-t.update({ _id : id}, { $pop : { data : -1 } });
+t.update({_id: id}, {$pop: {data: -1}});
var modified = t.findOne();
assert.eq(4095, modified.data.length);
diff --git a/jstests/core/profile1.js b/jstests/core/profile1.js
index 4fe9993116f..67f4a53d2b9 100644
--- a/jstests/core/profile1.js
+++ b/jstests/core/profile1.js
@@ -2,13 +2,13 @@
"use strict";
function profileCursor(query) {
query = query || {};
- Object.extend(query, {user:username + "@" + db.getName()});
+ Object.extend(query, {user: username + "@" + db.getName()});
return db.system.profile.find(query);
}
function getProfileAString() {
var s = "\n";
- profileCursor().forEach(function(z){
+ profileCursor().forEach(function(z) {
s += tojson(z) + " ,\n";
});
return s;
@@ -17,7 +17,7 @@
function resetProfile(level, slowms) {
db.setProfilingLevel(0);
db.system.profile.drop();
- db.setProfilingLevel(level,slowms);
+ db.setProfilingLevel(level, slowms);
}
// special db so that it can be run in parallel tests
@@ -29,7 +29,6 @@
db.dropDatabase();
try {
-
db.createUser({user: username, pwd: "password", roles: jsTest.basicUserRoles});
db.auth(username, "password");
@@ -63,7 +62,7 @@
assert.eq(2, profileItems.length, "E2 -- " + msg);
// Make sure we can't drop if profiling is still on
- assert.throws(function(z){
+ assert.throws(function(z) {
db.getCollection("system.profile").drop();
});
@@ -88,8 +87,12 @@
resetProfile(2);
db.profile1.drop();
- var q = {_id: 5};
- var u = {$inc: {x: 1}};
+ var q = {
+ _id: 5
+ };
+ var u = {
+ $inc: {x: 1}
+ };
db.profile1.update(q, u);
var r = profileCursor({ns: db.profile1.getFullName()}).sort({$natural: -1})[0];
assert.eq(q, r.query, "Y1: " + tojson(r));
diff --git a/jstests/core/profile2.js b/jstests/core/profile2.js
index 836fbce8f11..bb1605abd1e 100644
--- a/jstests/core/profile2.js
+++ b/jstests/core/profile2.js
@@ -10,7 +10,7 @@ assert.commandWorked(coll.getDB().runCommand({profile: 2}));
var str = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa";
var hugeStr = str;
-while (hugeStr.length < 2*1024*1024){
+while (hugeStr.length < 2 * 1024 * 1024) {
hugeStr += str;
}
@@ -39,7 +39,7 @@ assert(result.hasOwnProperty('ns'));
assert(result.hasOwnProperty('millis'));
assert(result.hasOwnProperty('query'));
assert.eq('string', typeof(result.query));
-assert(result.query.match(/^{ a: "a+\.\.\." }$/)); // String value is truncated.
+assert(result.query.match(/^{ a: "a+\.\.\." }$/)); // String value is truncated.
assert.commandWorked(coll.getDB().runCommand({profile: 0}));
coll.getDB().system.profile.drop();
@@ -54,7 +54,7 @@ assert(result.hasOwnProperty('ns'));
assert(result.hasOwnProperty('millis'));
assert(result.hasOwnProperty('updateobj'));
assert.eq('string', typeof(result.updateobj));
-assert(result.updateobj.match(/^{ a: "a+\.\.\." }$/)); // String value is truncated.
+assert(result.updateobj.match(/^{ a: "a+\.\.\." }$/)); // String value is truncated.
assert.commandWorked(coll.getDB().runCommand({profile: 0}));
coll.getDB().system.profile.drop();
diff --git a/jstests/core/profile3.js b/jstests/core/profile3.js
index 96b2314ab87..3a22bd1f952 100644
--- a/jstests/core/profile3.js
+++ b/jstests/core/profile3.js
@@ -7,50 +7,47 @@ db.dropAllUsers();
t = db.profile3;
t.drop();
-profileCursor = function( query ) {
- print( "----" );
+profileCursor = function(query) {
+ print("----");
query = query || {};
- Object.extend( query, { user: username + "@" + db.getName() } );
- return db.system.profile.find( query );
+ Object.extend(query, {user: username + "@" + db.getName()});
+ return db.system.profile.find(query);
};
try {
username = "jstests_profile3_user";
db.createUser({user: username, pwd: "password", roles: jsTest.basicUserRoles});
- db.auth( username, "password" );
-
+ db.auth(username, "password");
+
db.setProfilingLevel(0);
db.system.profile.drop();
- assert.eq( 0 , profileCursor().count() );
-
+ assert.eq(0, profileCursor().count());
+
db.setProfilingLevel(2);
-
+
db.createCollection(t.getName());
- t.insert( { x : 1 } );
- t.findOne( { x : 1 } );
- t.find( { x : 1 } ).count();
- t.update( { x : 1 }, {$inc:{a:1}, $set: {big: Array(128).toString()}} );
- t.update( { x : 1 }, {$inc:{a:1}} );
- t.update( { x : 0 }, {$inc:{a:1}} );
-
- profileCursor().forEach( printjson );
+ t.insert({x: 1});
+ t.findOne({x: 1});
+ t.find({x: 1}).count();
+ t.update({x: 1}, {$inc: {a: 1}, $set: {big: Array(128).toString()}});
+ t.update({x: 1}, {$inc: {a: 1}});
+ t.update({x: 0}, {$inc: {a: 1}});
- db.setProfilingLevel(0);
+ profileCursor().forEach(printjson);
+ db.setProfilingLevel(0);
- assert.eq(profileCursor({nMatched: {$exists:1}}).count(), 3);
+ assert.eq(profileCursor({nMatched: {$exists: 1}}).count(), 3);
assert.eq(profileCursor({nMatched: 1}).count(), 2);
assert.eq(profileCursor({nMatched: 0}).count(), 1);
- if ( db.serverStatus().storageEngine.name == "mmapv1" ) {
- assert.eq(profileCursor({nmoved: 1}).count(), 1 );
+ if (db.serverStatus().storageEngine.name == "mmapv1") {
+ assert.eq(profileCursor({nmoved: 1}).count(), 1);
}
db.system.profile.drop();
-}
-finally {
+} finally {
db.setProfilingLevel(0);
db = stddb;
}
-
diff --git a/jstests/core/profile4.js b/jstests/core/profile4.js
index 8dd722f4406..05c96260a88 100644
--- a/jstests/core/profile4.js
+++ b/jstests/core/profile4.js
@@ -30,7 +30,7 @@ try {
// Clear the profiling collection.
db.setProfilingLevel(0);
db.system.profile.drop();
- assert.eq(0 , profileCursor().count());
+ assert.eq(0, profileCursor().count());
// Enable profiling. It will be disabled again at the end of the test, or if the test fails.
db.setProfilingLevel(2);
@@ -93,7 +93,9 @@ try {
// For queries with a lot of stats data, the execution stats in the profile is replaced by
// the plan summary.
var orClauses = 32;
- var bigOrQuery = { $or: [] };
+ var bigOrQuery = {
+ $or: []
+ };
for (var i = 0; i < orClauses; ++i) {
var indexSpec = {};
indexSpec["a" + i] = 1;
@@ -107,15 +109,15 @@ try {
// Confirm "cursorExhausted" not set when cursor is open.
coll.drop();
coll.insert([{_id: 0}, {_id: 1}, {_id: 2}, {_id: 3}, {_id: 4}]);
- coll.find().batchSize(2).next(); // Query performed leaving open cursor
+ coll.find().batchSize(2).next(); // Query performed leaving open cursor
lastOp = getLastOp();
assert.eq(lastOp.op, "query");
assert(!("cursorExhausted" in lastOp));
var cursor = coll.find().batchSize(2);
- cursor.next(); // Perform initial query and consume first of 2 docs returned.
- cursor.next(); // Consume second of 2 docs from initial query.
- cursor.next(); // getMore performed, leaving open cursor.
+ cursor.next(); // Perform initial query and consume first of 2 docs returned.
+ cursor.next(); // Consume second of 2 docs from initial query.
+ cursor.next(); // getMore performed, leaving open cursor.
lastOp = getLastOp();
assert.eq(lastOp.op, "getmore");
assert(!("cursorExhausted" in lastOp));
@@ -235,12 +237,9 @@ try {
assert.eq(lastOp.ndeleted, 1);
// Update with {upsert: true} as findAndModify.
- assert.eq({_id: 2, a: 2, b: 1}, coll.findAndModify({
- query: {_id: 2, a: 2},
- update: {$inc: {b: 1}},
- upsert: true,
- new: true
- }));
+ assert.eq({_id: 2, a: 2, b: 1},
+ coll.findAndModify(
+ {query: {_id: 2, a: 2}, update: {$inc: {b: 1}}, upsert: true, new: true}));
lastOp = getLastOp();
assert.eq(lastOp.op, "command");
assert.eq(lastOp.ns, coll.getFullName());
@@ -256,10 +255,7 @@ try {
assert.eq(lastOp.upsert, true);
// Idhack update as findAndModify.
- assert.eq({_id: 2, a: 2, b: 1}, coll.findAndModify({
- query: {_id: 2},
- update: {$inc: {b: 1}}
- }));
+ assert.eq({_id: 2, a: 2, b: 1}, coll.findAndModify({query: {_id: 2}, update: {$inc: {b: 1}}}));
lastOp = getLastOp();
assert.eq(lastOp.keysExamined, 1);
assert.eq(lastOp.docsExamined, 1);
@@ -267,11 +263,8 @@ try {
assert.eq(lastOp.nModified, 1);
// Update as findAndModify with projection.
- assert.eq({a: 2}, coll.findAndModify({
- query: {a: 2},
- update: {$inc: {b: 1}},
- fields: {_id: 0, a: 1}
- }));
+ assert.eq({a: 2},
+ coll.findAndModify({query: {a: 2}, update: {$inc: {b: 1}}, fields: {_id: 0, a: 1}}));
lastOp = getLastOp();
assert.eq(lastOp.op, "command");
assert.eq(lastOp.ns, coll.getFullName());
@@ -285,11 +278,7 @@ try {
assert.eq(lastOp.nModified, 1);
// Delete as findAndModify with projection.
- assert.eq({a: 2}, coll.findAndModify({
- query: {a: 2},
- remove: true,
- fields: {_id: 0, a: 1}
- }));
+ assert.eq({a: 2}, coll.findAndModify({query: {a: 2}, remove: true, fields: {_id: 0, a: 1}}));
lastOp = getLastOp();
assert.eq(lastOp.op, "command");
assert.eq(lastOp.ns, coll.getFullName());
@@ -308,7 +297,8 @@ try {
// Update
coll.update({a: 2}, {$inc: {b: 1}});
lastOp = getLastOp();
- assert.eq(lastOp.op, "update"); assert.eq(lastOp.ns, coll.getFullName());
+ assert.eq(lastOp.op, "update");
+ assert.eq(lastOp.ns, coll.getFullName());
assert.eq(lastOp.query, {a: 2});
assert.eq(lastOp.updateobj, {$inc: {b: 1}});
assert.eq(lastOp.keysExamined, 0);
@@ -331,8 +321,7 @@ try {
db.setProfilingLevel(0);
db.system.profile.drop();
-}
-finally {
+} finally {
db.setProfilingLevel(0);
db = stddb;
}
diff --git a/jstests/core/profile5.js b/jstests/core/profile5.js
index 11b4bbe7d59..d507b864906 100644
--- a/jstests/core/profile5.js
+++ b/jstests/core/profile5.js
@@ -20,7 +20,8 @@ t.update({x: {$gt: 3}}, {$set: {y: true}}, {multi: true});
printjson(t.find().toArray());
-assert.eq(1, db.system.profile.count({op: "update"}),
+assert.eq(1,
+ db.system.profile.count({op: "update"}),
"expected exactly one update op in system.profile");
var prof = db.system.profile.findOne({op: "update"});
printjson(prof);
diff --git a/jstests/core/profile_no_such_db.js b/jstests/core/profile_no_such_db.js
index e11d93ca66c..51e70f4dc5d 100644
--- a/jstests/core/profile_no_such_db.js
+++ b/jstests/core/profile_no_such_db.js
@@ -1,38 +1,40 @@
// Test that reading the profiling level doesn't create databases, but setting it does.
-(function (db) {
-'use strict';
+(function(db) {
+ 'use strict';
-function dbExists() {
- return Array.contains(db.getMongo().getDBNames(), db.getName());
-}
+ function dbExists() {
+ return Array.contains(db.getMongo().getDBNames(), db.getName());
+ }
-db = db.getSiblingDB('profile_no_such_db'); // Note: changes db argument not global var.
-assert.commandWorked(db.dropDatabase());
-assert(!dbExists());
-
-// Reading the profiling level shouldn't create the database.
-var defaultProfilingLevel = db.getProfilingLevel();
-assert(!dbExists());
-
-// This test assumes that the default profiling level hasn't been changed.
-assert.eq(defaultProfilingLevel, 0);
-
-[0,1,2].forEach(function(level) {
- jsTest.log('Testing profiling level ' + level);
-
- // Setting the profiling level creates the database.
- // Note: in storage engines other than MMAPv1 setting the profiling level to 0 puts the database
- // in a weird state where it exists internally, but doesn't show up in listDatabases, and won't
- // exist if you restart the server.
- var res = db.setProfilingLevel(level);
- assert.eq(res.was, defaultProfilingLevel);
- assert(dbExists() || level == 0);
- assert.eq(db.getProfilingLevel(), level);
-
- // Dropping the db reverts the profiling level to the default.
+ db = db.getSiblingDB('profile_no_such_db'); // Note: changes db argument not global var.
assert.commandWorked(db.dropDatabase());
- assert.eq(db.getProfilingLevel(), defaultProfilingLevel);
assert(!dbExists());
-});
+
+ // Reading the profiling level shouldn't create the database.
+ var defaultProfilingLevel = db.getProfilingLevel();
+ assert(!dbExists());
+
+ // This test assumes that the default profiling level hasn't been changed.
+ assert.eq(defaultProfilingLevel, 0);
+
+ [0, 1, 2].forEach(function(level) {
+ jsTest.log('Testing profiling level ' + level);
+
+ // Setting the profiling level creates the database.
+ // Note: in storage engines other than MMAPv1 setting the profiling level to 0 puts the
+ // database
+ // in a weird state where it exists internally, but doesn't show up in listDatabases, and
+ // won't
+ // exist if you restart the server.
+ var res = db.setProfilingLevel(level);
+ assert.eq(res.was, defaultProfilingLevel);
+ assert(dbExists() || level == 0);
+ assert.eq(db.getProfilingLevel(), level);
+
+ // Dropping the db reverts the profiling level to the default.
+ assert.commandWorked(db.dropDatabase());
+ assert.eq(db.getProfilingLevel(), defaultProfilingLevel);
+ assert(!dbExists());
+ });
}(db));
diff --git a/jstests/core/proj_key1.js b/jstests/core/proj_key1.js
index 264c941192f..5ef6be3f51b 100644
--- a/jstests/core/proj_key1.js
+++ b/jstests/core/proj_key1.js
@@ -4,14 +4,15 @@ t.drop();
as = [];
-for ( i=0; i<10; i++ ){
- as.push( { a : i } );
- t.insert( { a : i , b : i } );
+for (i = 0; i < 10; i++) {
+ as.push({a: i});
+ t.insert({a: i, b: i});
}
-t.ensureIndex( { a : 1 } );
+t.ensureIndex({a: 1});
-// assert( t.find( {} , { a : 1 , _id : 0 } ).explain().indexOnly , "A4" ); // TODO: need to modify query optimier SERVER-2109
+// assert( t.find( {} , { a : 1 , _id : 0 } ).explain().indexOnly , "A4" ); // TODO: need to modify
+// query optimier SERVER-2109
-assert.eq( as , t.find( { a : { $gte : 0 } } , { a : 1 , _id : 0 } ).toArray() , "B1" );
-assert.eq( as , t.find( { a : { $gte : 0 } } , { a : 1 , _id : 0 } ).batchSize(2).toArray() , "B1" );
+assert.eq(as, t.find({a: {$gte: 0}}, {a: 1, _id: 0}).toArray(), "B1");
+assert.eq(as, t.find({a: {$gte: 0}}, {a: 1, _id: 0}).batchSize(2).toArray(), "B1");
diff --git a/jstests/core/pull.js b/jstests/core/pull.js
index 392d8bec227..d2d03e85144 100644
--- a/jstests/core/pull.js
+++ b/jstests/core/pull.js
@@ -1,33 +1,33 @@
t = db.jstests_pull;
t.drop();
-t.save( { a: [ 1, 2, 3 ] } );
-t.update( {}, { $pull: { a: 2 } } );
-t.update( {}, { $pull: { a: 6 } } );
-assert.eq( [ 1, 3 ], t.findOne().a );
+t.save({a: [1, 2, 3]});
+t.update({}, {$pull: {a: 2}});
+t.update({}, {$pull: {a: 6}});
+assert.eq([1, 3], t.findOne().a);
t.drop();
-t.save( { a: [ 1, 2, 3 ] } );
-t.update( {}, { $pull: { a: 2 } } );
-t.update( {}, { $pull: { a: 2 } } );
-assert.eq( [ 1, 3 ], t.findOne().a );
+t.save({a: [1, 2, 3]});
+t.update({}, {$pull: {a: 2}});
+t.update({}, {$pull: {a: 2}});
+assert.eq([1, 3], t.findOne().a);
t.drop();
-t.save( { a: [ 2 ] } );
-t.update( {}, { $pull: { a: 2 } } );
-t.update( {}, { $pull: { a: 6 } } );
-assert.eq( [], t.findOne().a );
+t.save({a: [2]});
+t.update({}, {$pull: {a: 2}});
+t.update({}, {$pull: {a: 6}});
+assert.eq([], t.findOne().a);
// SERVER-6047: $pull creates empty nested docs for dotted fields
// that don't exist.
t.drop();
-t.save({ m : 1 } );
-t.update( { m : 1 }, { $pull : { 'a.b' : [ 1 ] } } );
-assert( ('a' in t.findOne()) == false );
+t.save({m: 1});
+t.update({m: 1}, {$pull: {'a.b': [1]}});
+assert(('a' in t.findOne()) == false);
// Non-obvious bit: the implementation of non-in-place update
// might do different things depending on whether the "new" field
// comes before or after existing fields in the document.
// So for now it's worth testing that too. Sorry, future; blame the past.
-t.update( { m : 1 }, { $pull : { 'x.y' : [ 1 ] } } );
-assert( ('z' in t.findOne()) == false );
-// End SERVER-6047
+t.update({m: 1}, {$pull: {'x.y': [1]}});
+assert(('z' in t.findOne()) == false);
+// End SERVER-6047
diff --git a/jstests/core/pull2.js b/jstests/core/pull2.js
index b5a4f8f9870..861d5164c03 100644
--- a/jstests/core/pull2.js
+++ b/jstests/core/pull2.js
@@ -2,30 +2,32 @@
t = db.pull2;
t.drop();
-t.save( { a : [ { x : 1 } , { x : 1 , b : 2 } ] } );
-assert.eq( 2 , t.findOne().a.length , "A" );
+t.save({a: [{x: 1}, {x: 1, b: 2}]});
+assert.eq(2, t.findOne().a.length, "A");
-t.update( {} , { $pull : { a : { x : 1 } } } );
-assert.eq( 0 , t.findOne().a.length , "B" );
+t.update({}, {$pull: {a: {x: 1}}});
+assert.eq(0, t.findOne().a.length, "B");
-assert.eq( 1 , t.find().count() , "C1" );
+assert.eq(1, t.find().count(), "C1");
-t.update( {} , { $push : { a : { x : 1 } } } );
-t.update( {} , { $push : { a : { x : 1 , b : 2 } } } );
-assert.eq( 2 , t.findOne().a.length , "C" );
+t.update({}, {$push: {a: {x: 1}}});
+t.update({}, {$push: {a: {x: 1, b: 2}}});
+assert.eq(2, t.findOne().a.length, "C");
-t.update( {} , { $pullAll : { a : [ { x : 1 } ] } } );
-assert.eq( 1 , t.findOne().a.length , "D" );
+t.update({}, {$pullAll: {a: [{x: 1}]}});
+assert.eq(1, t.findOne().a.length, "D");
-t.update( {} , { $push : { a : { x : 2 , b : 2 } } } );
-t.update( {} , { $push : { a : { x : 3 , b : 2 } } } );
-t.update( {} , { $push : { a : { x : 4 , b : 2 } } } );
-assert.eq( 4 , t.findOne().a.length , "E" );
+t.update({}, {$push: {a: {x: 2, b: 2}}});
+t.update({}, {$push: {a: {x: 3, b: 2}}});
+t.update({}, {$push: {a: {x: 4, b: 2}}});
+assert.eq(4, t.findOne().a.length, "E");
-assert.eq( 1 , t.find().count() , "C2" );
-
-
-t.update( {} , { $pull : { a : { x : { $lt : 3 } } } } );
-assert.eq( 2 , t.findOne().a.length , "F" );
-assert.eq( [ 3 , 4 ] , t.findOne().a.map( function(z){ return z.x; } ) , "G" );
+assert.eq(1, t.find().count(), "C2");
+t.update({}, {$pull: {a: {x: {$lt: 3}}}});
+assert.eq(2, t.findOne().a.length, "F");
+assert.eq([3, 4],
+ t.findOne().a.map(function(z) {
+ return z.x;
+ }),
+ "G");
diff --git a/jstests/core/pull_or.js b/jstests/core/pull_or.js
index 905c7a87060..a91d88c2c18 100644
--- a/jstests/core/pull_or.js
+++ b/jstests/core/pull_or.js
@@ -2,20 +2,19 @@
t = db.pull_or;
t.drop();
-doc = { _id : 1 , a : { b : [ { x : 1 },
- { y : 'y' },
- { x : 2 },
- { z : 'z' } ] } };
+doc = {
+ _id: 1,
+ a: {b: [{x: 1}, {y: 'y'}, {x: 2}, {z: 'z'}]}
+};
-t.insert( doc );
+t.insert(doc);
-t.update({}, { $pull : { 'a.b' : { 'y' : { $exists : true } } } } );
+t.update({}, {$pull: {'a.b': {'y': {$exists: true}}}});
-assert.eq( [ { x : 1 }, { x : 2 }, { z : 'z' } ], t.findOne().a.b );
+assert.eq([{x: 1}, {x: 2}, {z: 'z'}], t.findOne().a.b);
t.drop();
-t.insert( doc );
-t.update({}, { $pull : { 'a.b' : { $or : [ { 'y' : { $exists : true } },
- { 'z' : { $exists : true } } ] } } } );
+t.insert(doc);
+t.update({}, {$pull: {'a.b': {$or: [{'y': {$exists: true}}, {'z': {$exists: true}}]}}});
-assert.eq( [ { x : 1 }, { x : 2 } ], t.findOne().a.b );
+assert.eq([{x: 1}, {x: 2}], t.findOne().a.b);
diff --git a/jstests/core/pull_remove1.js b/jstests/core/pull_remove1.js
index 90460eb2d6e..926dcbf9575 100644
--- a/jstests/core/pull_remove1.js
+++ b/jstests/core/pull_remove1.js
@@ -2,13 +2,17 @@
t = db.pull_remove1;
t.drop();
-o = { _id : 1 , a : [ 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 ] };
-t.insert( o );
+o = {
+ _id: 1,
+ a: [1, 2, 3, 4, 5, 6, 7, 8]
+};
+t.insert(o);
-assert.eq( o , t.findOne() , "A1" );
+assert.eq(o, t.findOne(), "A1");
-o.a = o.a.filter( function(z){ return z >= 6; } );
-t.update( {} , { $pull : { a : { $lt : 6 } } } );
-
-assert.eq( o.a , t.findOne().a , "A2" );
+o.a = o.a.filter(function(z) {
+ return z >= 6;
+});
+t.update({}, {$pull: {a: {$lt: 6}}});
+assert.eq(o.a, t.findOne().a, "A2");
diff --git a/jstests/core/pullall.js b/jstests/core/pullall.js
index e66e8b10d35..2925a45623f 100644
--- a/jstests/core/pullall.js
+++ b/jstests/core/pullall.js
@@ -1,31 +1,31 @@
t = db.jstests_pullall;
t.drop();
-t.save( { a: [ 1, 2, 3 ] } );
-t.update( {}, { $pullAll: { a: [ 3 ] } } );
-assert.eq( [ 1, 2 ], t.findOne().a );
-t.update( {}, { $pullAll: { a: [ 3 ] } } );
-assert.eq( [ 1, 2 ], t.findOne().a );
+t.save({a: [1, 2, 3]});
+t.update({}, {$pullAll: {a: [3]}});
+assert.eq([1, 2], t.findOne().a);
+t.update({}, {$pullAll: {a: [3]}});
+assert.eq([1, 2], t.findOne().a);
t.drop();
-t.save( { a: [ 1, 2, 3 ] } );
-t.update( {}, { $pullAll: { a: [ 2, 3 ] } } );
-assert.eq( [ 1 ], t.findOne().a );
-t.update( {}, { $pullAll: { a: [] } } );
-assert.eq( [ 1 ], t.findOne().a );
-t.update( {}, { $pullAll: { a: [ 1, 5 ] } } );
-assert.eq( [], t.findOne().a );
+t.save({a: [1, 2, 3]});
+t.update({}, {$pullAll: {a: [2, 3]}});
+assert.eq([1], t.findOne().a);
+t.update({}, {$pullAll: {a: []}});
+assert.eq([1], t.findOne().a);
+t.update({}, {$pullAll: {a: [1, 5]}});
+assert.eq([], t.findOne().a);
// SERVER-6047: $pullAll creates empty nested docs for dotted fields
// that don't exist.
t.drop();
-t.save({ m : 1 } );
-t.update( { m : 1 }, { $pullAll : { 'a.b' : [ 1 ] } } );
-assert( ('a' in t.findOne()) == false );
+t.save({m: 1});
+t.update({m: 1}, {$pullAll: {'a.b': [1]}});
+assert(('a' in t.findOne()) == false);
// Non-obvious bit: the implementation of non-in-place update
// might do different things depending on whether the "new" field
// comes before or after existing fields in the document.
// So for now it's worth testing that too. Sorry, future; blame the past.
-t.update( { m : 1 }, { $pullAll : { 'x.y' : [ 1 ] } } );
-assert( ('z' in t.findOne()) == false );
+t.update({m: 1}, {$pullAll: {'x.y': [1]}});
+assert(('z' in t.findOne()) == false);
// End SERVER-6047
diff --git a/jstests/core/pullall2.js b/jstests/core/pullall2.js
index c97c4e43aba..92d835c0dab 100644
--- a/jstests/core/pullall2.js
+++ b/jstests/core/pullall2.js
@@ -2,19 +2,28 @@
t = db.pullall2;
t.drop();
-o = { _id : 1 , a : [] };
-for ( i=0; i<5; i++ )
- o.a.push( { x : i , y : i } );
+o = {
+ _id: 1,
+ a: []
+};
+for (i = 0; i < 5; i++)
+ o.a.push({x: i, y: i});
-t.insert( o );
+t.insert(o);
-assert.eq( o , t.findOne() , "A" );
+assert.eq(o, t.findOne(), "A");
-t.update( {} , { $pull : { a : { x : 3 } } } );
-o.a = o.a.filter( function(z){ return z.x != 3; } );
-assert.eq( o , t.findOne() , "B" );
+t.update({}, {$pull: {a: {x: 3}}});
+o.a = o.a.filter(function(z) {
+ return z.x != 3;
+});
+assert.eq(o, t.findOne(), "B");
-t.update( {} , { $pull : { a : { x : { $in : [ 1 , 4 ] } } } } );
-o.a = o.a.filter( function(z){ return z.x != 1; } );
-o.a = o.a.filter( function(z){ return z.x != 4; } );
-assert.eq( o , t.findOne() , "C" );
+t.update({}, {$pull: {a: {x: {$in: [1, 4]}}}});
+o.a = o.a.filter(function(z) {
+ return z.x != 1;
+});
+o.a = o.a.filter(function(z) {
+ return z.x != 4;
+});
+assert.eq(o, t.findOne(), "C");
diff --git a/jstests/core/push.js b/jstests/core/push.js
index cefccbb79d4..50ff92cc2cd 100644
--- a/jstests/core/push.js
+++ b/jstests/core/push.js
@@ -3,53 +3,50 @@ var res;
t = db.push;
t.drop();
-t.save( { _id : 2 , a : [ 1 ] } );
-t.update( { _id : 2 } , { $push : { a : 2 } } );
-assert.eq( "1,2" , t.findOne().a.toString() , "A" );
-t.update( { _id : 2 } , { $push : { a : 3 } } );
-assert.eq( "1,2,3" , t.findOne().a.toString() , "B" );
+t.save({_id: 2, a: [1]});
+t.update({_id: 2}, {$push: {a: 2}});
+assert.eq("1,2", t.findOne().a.toString(), "A");
+t.update({_id: 2}, {$push: {a: 3}});
+assert.eq("1,2,3", t.findOne().a.toString(), "B");
-t.update( { _id : 2 } , { $pop : { a : 1 } } );
-assert.eq( "1,2" , t.findOne().a.toString() , "C" );
-t.update( { _id : 2 } , { $pop : { a : -1 } } );
-assert.eq( "2" , t.findOne().a.toString() , "D" );
+t.update({_id: 2}, {$pop: {a: 1}});
+assert.eq("1,2", t.findOne().a.toString(), "C");
+t.update({_id: 2}, {$pop: {a: -1}});
+assert.eq("2", t.findOne().a.toString(), "D");
+t.update({_id: 2}, {$push: {a: 3}});
+t.update({_id: 2}, {$push: {a: 4}});
+t.update({_id: 2}, {$push: {a: 5}});
+assert.eq("2,3,4,5", t.findOne().a.toString(), "E1");
-t.update( { _id : 2 } , { $push : { a : 3 } } );
-t.update( { _id : 2 } , { $push : { a : 4 } } );
-t.update( { _id : 2 } , { $push : { a : 5 } } );
-assert.eq( "2,3,4,5" , t.findOne().a.toString() , "E1" );
+t.update({_id: 2}, {$pop: {a: -1}});
+assert.eq("3,4,5", t.findOne().a.toString(), "E2");
-t.update( { _id : 2 } , { $pop : { a : -1 } } );
-assert.eq( "3,4,5" , t.findOne().a.toString() , "E2" );
+t.update({_id: 2}, {$pop: {a: -1}});
+assert.eq("4,5", t.findOne().a.toString(), "E3");
-t.update( { _id : 2 } , { $pop : { a : -1 } } );
-assert.eq( "4,5" , t.findOne().a.toString() , "E3" );
+res = t.update({_id: 2}, {$pop: {a: -1}});
+assert.writeOK(res);
+assert.eq("5", t.findOne().a.toString(), "E4");
-res = t.update( { _id : 2 } , { $pop : { a : -1 } } );
-assert.writeOK( res );
-assert.eq( "5" , t.findOne().a.toString() , "E4" );
+res = t.update({_id: 2}, {$pop: {a: -1}});
+assert.writeOK(res);
+assert.eq("", t.findOne().a.toString(), "E5");
+res = t.update({_id: 2}, {$pop: {a: -1}});
+assert.writeOK(res);
+assert.eq("", t.findOne().a.toString(), "E6");
-res = t.update( { _id : 2 } , { $pop : { a : -1 } } );
-assert.writeOK( res );
-assert.eq( "" , t.findOne().a.toString() , "E5" );
+res = t.update({_id: 2}, {$pop: {a: -1}});
+assert.writeOK(res);
+assert.eq("", t.findOne().a.toString(), "E7");
-res = t.update( { _id : 2 } , { $pop : { a : -1 } } );
-assert.writeOK( res );
-assert.eq( "" , t.findOne().a.toString() , "E6" );
+res = t.update({_id: 2}, {$pop: {a: 1}});
+assert.writeOK(res);
+assert.eq("", t.findOne().a.toString(), "E8");
-res = t.update( { _id : 2 } , { $pop : { a : -1 } } );
-assert.writeOK( res );
-assert.eq( "" , t.findOne().a.toString() , "E7" );
-
-res = t.update( { _id : 2 } , { $pop : { a : 1 } } );
-assert.writeOK( res );
-assert.eq( "" , t.findOne().a.toString() , "E8" );
-
-res = t.update( { _id : 2 } , { $pop : { b : -1 } } );
-assert.writeOK( res );
-
-res = t.update( { _id : 2 } , { $pop : { b : 1 } } );
-assert.writeOK( res );
+res = t.update({_id: 2}, {$pop: {b: -1}});
+assert.writeOK(res);
+res = t.update({_id: 2}, {$pop: {b: 1}});
+assert.writeOK(res);
diff --git a/jstests/core/push2.js b/jstests/core/push2.js
index 5161cc373b6..431e86f596a 100644
--- a/jstests/core/push2.js
+++ b/jstests/core/push2.js
@@ -2,20 +2,20 @@
t = db.push2;
t.drop();
-t.save( { _id : 1 , a : [] } );
+t.save({_id: 1, a: []});
s = new Array(700000).toString();
gotError = null;
-for ( x=0; x<100; x++ ){
- print (x + " pushes");
- var res = t.update( {} , { $push : { a : s } } );
+for (x = 0; x < 100; x++) {
+ print(x + " pushes");
+ var res = t.update({}, {$push: {a: s}});
gotError = res.hasWriteError();
- if ( gotError )
+ if (gotError)
break;
}
-assert( gotError , "should have gotten error" );
+assert(gotError, "should have gotten error");
t.drop();
diff --git a/jstests/core/push_sort.js b/jstests/core/push_sort.js
index 9bbf5447ae6..0e407d969ba 100644
--- a/jstests/core/push_sort.js
+++ b/jstests/core/push_sort.js
@@ -11,93 +11,94 @@ t.drop();
//
// $slice amount is too large to kick in.
-t.save( { _id: 1, x: [ {a:1}, {a:2} ] } );
-t.update( {_id:1}, { $push: { x: { $each: [ {a:3} ], $slice:-5, $sort: {a:1} } } } );
-assert.eq( [{a:1}, {a:2}, {a:3}] , t.findOne( {_id:1} ).x );
+t.save({_id: 1, x: [{a: 1}, {a: 2}]});
+t.update({_id: 1}, {$push: {x: {$each: [{a: 3}], $slice: -5, $sort: {a: 1}}}});
+assert.eq([{a: 1}, {a: 2}, {a: 3}], t.findOne({_id: 1}).x);
// $slice amount kicks in using values of both the base doc and of the $each clause.
-t.save({ _id: 2, x: [ {a:1}, {a:3} ] } );
-t.update( {_id:2}, { $push: { x: { $each: [ {a:2} ], $slice:-2, $sort: {a:1} } } } );
-assert.eq( [{a:2}, {a:3}], t.findOne( {_id:2} ).x );
+t.save({_id: 2, x: [{a: 1}, {a: 3}]});
+t.update({_id: 2}, {$push: {x: {$each: [{a: 2}], $slice: -2, $sort: {a: 1}}}});
+assert.eq([{a: 2}, {a: 3}], t.findOne({_id: 2}).x);
// $sort is descending and $slice is too large to kick in.
-t.save({ _id: 3, x: [ {a:1}, {a:3} ] } );
-t.update( {_id:3}, { $push: { x: { $each: [ {a:2} ], $slice:-5, $sort: {a:-1} } } } );
-assert.eq( [{a:3}, {a:2}, {a:1}], t.findOne( {_id:3} ).x );
+t.save({_id: 3, x: [{a: 1}, {a: 3}]});
+t.update({_id: 3}, {$push: {x: {$each: [{a: 2}], $slice: -5, $sort: {a: -1}}}});
+assert.eq([{a: 3}, {a: 2}, {a: 1}], t.findOne({_id: 3}).x);
// $sort is descending and $slice kicks in using values of both the base doc and of
// the $each clause.
-t.save({ _id: 4, x: [ {a:1}, {a:3} ] } );
-t.update( {_id:4}, { $push: { x: { $each: [ {a:2} ], $slice:-2, $sort: {a:-1} } } } );
-assert.eq( [{a:2}, {a:1}], t.findOne( {_id:4} ).x );
+t.save({_id: 4, x: [{a: 1}, {a: 3}]});
+t.update({_id: 4}, {$push: {x: {$each: [{a: 2}], $slice: -2, $sort: {a: -1}}}});
+assert.eq([{a: 2}, {a: 1}], t.findOne({_id: 4}).x);
// $sort over only a portion of the array's elements objects and #slice kicking in
// using values of both the base doc and of the $each clause.
-t.save({ _id: 5, x: [ {a:1,b:2}, {a:3,b:1} ] } );
-t.update( {_id:5}, { $push: { x: { $each: [ {a:2,b:3} ], $slice:-2, $sort: {b:1} } } } );
-assert.eq( [{a:1, b:2}, {a:2,b:3}], t.findOne( {_id:5} ).x );
+t.save({_id: 5, x: [{a: 1, b: 2}, {a: 3, b: 1}]});
+t.update({_id: 5}, {$push: {x: {$each: [{a: 2, b: 3}], $slice: -2, $sort: {b: 1}}}});
+assert.eq([{a: 1, b: 2}, {a: 2, b: 3}], t.findOne({_id: 5}).x);
// $sort over an array of nested objects and $slice too large to kick in.
-t.save({ _id: 6, x: [ {a:{b:2}}, {a:{b:1}} ] } );
-t.update( {_id:6}, { $push: { x: { $each: [ {a:{b:3}} ], $slice:-5, $sort: {'a.b':1} } } } );
-assert.eq( [{a:{b:1}}, {a:{b:2}}, {a:{b:3}}], t.findOne( {_id:6} ).x );
+t.save({_id: 6, x: [{a: {b: 2}}, {a: {b: 1}}]});
+t.update({_id: 6}, {$push: {x: {$each: [{a: {b: 3}}], $slice: -5, $sort: {'a.b': 1}}}});
+assert.eq([{a: {b: 1}}, {a: {b: 2}}, {a: {b: 3}}], t.findOne({_id: 6}).x);
// $sort over an array of nested objects and $slice kicking in using values of both the
// base doc and of the $each clause.
-t.save({ _id: 7, x: [ {a:{b:2}}, {a:{b:1}} ] } );
-t.update( {_id:7}, { $push: { x: { $each: [ {a:{b:3}} ], $slice:-2, $sort: {'a.b':1} } } } );
-assert.eq( [{a:{b:2}}, {a:{b:3}}], t.findOne( {_id:7} ).x );
+t.save({_id: 7, x: [{a: {b: 2}}, {a: {b: 1}}]});
+t.update({_id: 7}, {$push: {x: {$each: [{a: {b: 3}}], $slice: -2, $sort: {'a.b': 1}}}});
+assert.eq([{a: {b: 2}}, {a: {b: 3}}], t.findOne({_id: 7}).x);
//
// Invalid Cases
//
// $push with $sort should not push a "$sort" field
-var doc8 = {_id: 8, x: [{a:1}, {a:2}]};
+var doc8 = {
+ _id: 8,
+ x: [{a: 1}, {a: 2}]
+};
t.save(doc8);
var res = t.update({_id: 8}, {$push: {x: {$sort: {a: -1}}}});
assert.writeError(res);
-assert.docEq(t.findOne({_id:8}), doc8); //ensure doc was not changed
+assert.docEq(t.findOne({_id: 8}), doc8); // ensure doc was not changed
-t.save({ _id: 100, x: [ {a:1} ] } );
+t.save({_id: 100, x: [{a: 1}]});
// For now, elements of the $each vector need to be objects. In here, '2' is an invalide $each.
-assert.throws( t.update( {_id:100}, { $push: { x: { $each: [ 2 ], $slice:-2, $sort:{a:1} } } } ) );
+assert.throws(t.update({_id: 100}, {$push: {x: {$each: [2], $slice: -2, $sort: {a: 1}}}}));
// For the same reason as above, '1' is an invalid $each element.
-assert.throws( t.update( {_id:100}, { $push: { x: { $each: [{a:2},1], $slice:-2, $sort:{a:1} } } }));
+assert.throws(t.update({_id: 100}, {$push: {x: {$each: [{a: 2}, 1], $slice: -2, $sort: {a: 1}}}}));
// The sort key pattern cannot be empty.
-assert.throws( t.update( {_id:100}, { $push: { x: { $each: [{a:2}], $slice:-2, $sort:{} } } } ) );
+assert.throws(t.update({_id: 100}, {$push: {x: {$each: [{a: 2}], $slice: -2, $sort: {}}}}));
// For now, we do not support positive $slice's (ie, trimming from the array's front).
-assert.throws( t.update( {_id:100}, { $push: { x: { $each: [{a:2}], $slice:2, $sort: {a:1} } } }));
+assert.throws(t.update({_id: 100}, {$push: {x: {$each: [{a: 2}], $slice: 2, $sort: {a: 1}}}}));
// A $slice cannot be a fractional value.
-assert.throws( t.update( {_id:100}, { $push: { x: { $each: [{a:2}], $slice:-2.1, $sort: {a:1} } }}));
+assert.throws(t.update({_id: 100}, {$push: {x: {$each: [{a: 2}], $slice: -2.1, $sort: {a: 1}}}}));
// The sort key pattern's value must be either 1 or -1. In here, {a:-2} is an invalid value.
-assert.throws( t.update( {_id:100}, { $push: { x: { $each: [{a:2}], $slice:-2, $sort: {a:-2} } } }));
+assert.throws(t.update({_id: 100}, {$push: {x: {$each: [{a: 2}], $slice: -2, $sort: {a: -2}}}}));
// For now, we are not supporting sorting of basic elements (non-object, non-arrays). In here,
// the $sort clause would need to have a key pattern value rather than 1.
-assert.throws( t.update( {_id:100}, { $push: { x: { $each: [{a:2}], $slice:-2, $sort: 1 } } } ) );
+assert.throws(t.update({_id: 100}, {$push: {x: {$each: [{a: 2}], $slice: -2, $sort: 1}}}));
// The key pattern 'a.' is an invalid value for $sort.
-assert.throws( t.update( {_id:100}, { $push: { x: { $each: [{a:2}], $slice:-2, $sort: {'a.':1} }}}));
+assert.throws(t.update({_id: 100}, {$push: {x: {$each: [{a: 2}], $slice: -2, $sort: {'a.': 1}}}}));
// An empty key pattern is not a valid $sort value.
-assert.throws( t.update( {_id:100}, { $push: { x: { $each: [{a:2}], $slice:-2, $sort: {'':1} } } }));
+assert.throws(t.update({_id: 100}, {$push: {x: {$each: [{a: 2}], $slice: -2, $sort: {'': 1}}}}));
// If a $slice is used, the only other $sort clause that's accepted is $sort. In here, $xxx
// is not a valid clause.
-assert.throws( t.update( {_id:100}, { $push: { x: { $each: [{a:2}], $slice:-2, $xxx: {s:1} } } } ) );
+assert.throws(t.update({_id: 100}, {$push: {x: {$each: [{a: 2}], $slice: -2, $xxx: {s: 1}}}}));
t.remove({});
// Ensure that existing values are validated in the array as objects during a $sort with $each,
// not only the elements in the $each array.
-t.save({ _id: 100, x: [ 1, "foo" ] } );
-assert.throws(t.update(
- {_id: 100},
- { $push: { x: { $each: [{a:2}], $slice:-2, $sort: {a:1} } } } ) );
+t.save({_id: 100, x: [1, "foo"]});
+assert.throws(t.update({_id: 100}, {$push: {x: {$each: [{a: 2}], $slice: -2, $sort: {a: 1}}}}));
diff --git a/jstests/core/pushall.js b/jstests/core/pushall.js
index 736d3a3fc14..e9d388e6f6f 100644
--- a/jstests/core/pushall.js
+++ b/jstests/core/pushall.js
@@ -1,18 +1,18 @@
t = db.jstests_pushall;
t.drop();
-t.save( { _id: 1, a: [ 1, 2, 3 ] } );
-t.update( { _id: 1 }, { $pushAll: { a: [ 4 ] } } );
-assert.eq( [ 1, 2, 3, 4 ], t.findOne( { _id: 1 } ).a );
-t.update( {}, { $pushAll: { a: [ 4 ] } } );
-assert.eq( [ 1, 2, 3, 4, 4 ], t.findOne( { _id: 1 } ).a );
+t.save({_id: 1, a: [1, 2, 3]});
+t.update({_id: 1}, {$pushAll: {a: [4]}});
+assert.eq([1, 2, 3, 4], t.findOne({_id: 1}).a);
+t.update({}, {$pushAll: {a: [4]}});
+assert.eq([1, 2, 3, 4, 4], t.findOne({_id: 1}).a);
-t.save( { _id: 2, a: [ 1, 2, 3 ] } );
-t.update( { _id: 2 }, { $pushAll: { a: [ 4, 5 ] } } );
-assert.eq( [ 1, 2, 3, 4, 5 ], t.findOne( { _id: 2 } ).a );
-t.update( { _id: 2 }, { $pushAll: { a: [] } } );
-assert.eq( [ 1, 2, 3, 4, 5 ], t.findOne( { _id: 2 } ).a );
+t.save({_id: 2, a: [1, 2, 3]});
+t.update({_id: 2}, {$pushAll: {a: [4, 5]}});
+assert.eq([1, 2, 3, 4, 5], t.findOne({_id: 2}).a);
+t.update({_id: 2}, {$pushAll: {a: []}});
+assert.eq([1, 2, 3, 4, 5], t.findOne({_id: 2}).a);
-t.save( { _id: 3 } );
-t.update( { _id: 3 }, { $pushAll: { a: [ 1, 2 ] } } );
-assert.eq( [ 1, 2 ], t.findOne( { _id: 3 } ).a );
+t.save({_id: 3});
+t.update({_id: 3}, {$pushAll: {a: [1, 2]}});
+assert.eq([1, 2], t.findOne({_id: 3}).a);
diff --git a/jstests/core/query1.js b/jstests/core/query1.js
index 78d70eb55fb..9936ce18e92 100644
--- a/jstests/core/query1.js
+++ b/jstests/core/query1.js
@@ -2,25 +2,23 @@
t = db.query1;
t.drop();
-t.save( { num : 1 } );
-t.save( { num : 3 } );
-t.save( { num : 4 } );
+t.save({num: 1});
+t.save({num: 3});
+t.save({num: 4});
num = 0;
total = 0;
-t.find().forEach(
- function(z){
- num++;
- total += z.num;
- }
-);
+t.find().forEach(function(z) {
+ num++;
+ total += z.num;
+});
-assert.eq( num , 3 , "num" );
-assert.eq( total , 8 , "total" );
+assert.eq(num, 3, "num");
+assert.eq(total, 8, "total");
-assert.eq( 3 , t.find().comment("this is a test").itcount() , "B1" );
-assert.eq( 3 , t.find().comment("this is a test").count() , "B2" );
+assert.eq(3, t.find().comment("this is a test").itcount(), "B1");
+assert.eq(3, t.find().comment("this is a test").count(), "B2");
-assert.eq( 3 , t.find().comment("yo ho ho").itcount() , "C1" );
-assert.eq( 3 , t.find().comment("this is a test").count() , "C2" );
+assert.eq(3, t.find().comment("yo ho ho").itcount(), "C1");
+assert.eq(3, t.find().comment("this is a test").count(), "C2");
diff --git a/jstests/core/queryoptimizer3.js b/jstests/core/queryoptimizer3.js
index 8cace47dc66..4bc3754ff7a 100644
--- a/jstests/core/queryoptimizer3.js
+++ b/jstests/core/queryoptimizer3.js
@@ -3,35 +3,31 @@
t = db.jstests_queryoptimizer3;
t.drop();
-p = startParallelShell( 'for( i = 0; i < 400; ++i ) { sleep( 50 ); db.jstests_queryoptimizer3.drop(); }' );
+p = startParallelShell(
+ 'for( i = 0; i < 400; ++i ) { sleep( 50 ); db.jstests_queryoptimizer3.drop(); }');
-for( i = 0; i < 100; ++i ) {
+for (i = 0; i < 100; ++i) {
t.drop();
- t.ensureIndex({a:1});
- t.ensureIndex({b:1});
- for( j = 0; j < 100; ++j ) {
- t.save({a:j,b:j});
+ t.ensureIndex({a: 1});
+ t.ensureIndex({b: 1});
+ for (j = 0; j < 100; ++j) {
+ t.save({a: j, b: j});
}
try {
m = i % 5;
- if ( m == 0 ) {
- t.count({a:{$gte:0},b:{$gte:0}});
+ if (m == 0) {
+ t.count({a: {$gte: 0}, b: {$gte: 0}});
+ } else if (m == 1) {
+ t.find({a: {$gte: 0}, b: {$gte: 0}}).itcount();
+ } else if (m == 2) {
+ t.remove({a: {$gte: 0}, b: {$gte: 0}});
+ } else if (m == 3) {
+ t.update({a: {$gte: 0}, b: {$gte: 0}}, {});
+ } else if (m == 4) {
+ t.distinct('x', {a: {$gte: 0}, b: {$gte: 0}});
}
- else if ( m == 1 ) {
- t.find({a:{$gte:0},b:{$gte:0}}).itcount();
- }
- else if ( m == 2 ) {
- t.remove({a:{$gte:0},b:{$gte:0}});
- }
- else if ( m == 3 ) {
- t.update({a:{$gte:0},b:{$gte:0}},{});
- }
- else if ( m == 4 ) {
- t.distinct('x',{a:{$gte:0},b:{$gte:0}});
- }
- }
- catch (e) {
+ } catch (e) {
print("Op killed during yield: " + e.message);
}
}
diff --git a/jstests/core/queryoptimizer6.js b/jstests/core/queryoptimizer6.js
index 32efccbdb0b..8e00772aa4e 100644
--- a/jstests/core/queryoptimizer6.js
+++ b/jstests/core/queryoptimizer6.js
@@ -3,7 +3,7 @@
t = db.jstests_queryoptimizer6;
t.drop();
-t.save( {a:1} );
+t.save({a: 1});
// There is a bug in the 2.4.x indexing where the first query below returns 0 results with this
// index, but 1 result without it.
@@ -11,6 +11,6 @@ t.save( {a:1} );
// t.ensureIndex( {b:1}, {sparse:true} );
// The sparse index will be used, and recorded for this query pattern.
-assert.eq( 1, t.find( {a:1,b:{$ne:1}} ).itcount() );
+assert.eq(1, t.find({a: 1, b: {$ne: 1}}).itcount());
// The query pattern should be different, and the sparse index should not be used.
-assert.eq( 1, t.find( {a:1} ).itcount() );
+assert.eq(1, t.find({a: 1}).itcount());
diff --git a/jstests/core/queryoptimizera.js b/jstests/core/queryoptimizera.js
index d705bf580ea..80036cd78c0 100644
--- a/jstests/core/queryoptimizera.js
+++ b/jstests/core/queryoptimizera.js
@@ -2,13 +2,13 @@
// constraint is printed at appropriate times. SERVER-5353
function numWarnings() {
- logs = db.adminCommand( { getLog:"global" } ).log;
+ logs = db.adminCommand({getLog: "global"}).log;
ret = 0;
- logs.forEach( function( x ) {
- if ( x.match( warningMatchRegexp ) ) {
- ++ret;
- }
- } );
+ logs.forEach(function(x) {
+ if (x.match(warningMatchRegexp)) {
+ ++ret;
+ }
+ });
return ret;
}
@@ -17,30 +17,30 @@ collectionNameIndex = 0;
// Generate a collection name not already present in the log.
do {
testCollectionName = 'jstests_queryoptimizera__' + collectionNameIndex++;
- warningMatchString = 'unindexed _id query on capped collection.*collection: test.' +
- testCollectionName;
- warningMatchRegexp = new RegExp( warningMatchString );
-
-} while( numWarnings() > 0 );
+ warningMatchString =
+ 'unindexed _id query on capped collection.*collection: test.' + testCollectionName;
+ warningMatchRegexp = new RegExp(warningMatchString);
-t = db[ testCollectionName ];
+} while (numWarnings() > 0);
+
+t = db[testCollectionName];
t.drop();
notCappedCollectionName = testCollectionName + '_notCapped';
-notCapped = db[ notCappedCollectionName ];
+notCapped = db[notCappedCollectionName];
notCapped.drop();
-db.createCollection( testCollectionName, { capped:true, size:1000 } );
-db.createCollection( notCappedCollectionName, { autoIndexId:false } );
+db.createCollection(testCollectionName, {capped: true, size: 1000});
+db.createCollection(notCappedCollectionName, {autoIndexId: false});
-t.insert( {} );
-notCapped.insert( {} );
+t.insert({});
+notCapped.insert({});
oldNumWarnings = 0;
function assertNoNewWarnings() {
- assert.eq( oldNumWarnings, numWarnings() );
+ assert.eq(oldNumWarnings, numWarnings());
}
function assertNewWarning() {
@@ -48,45 +48,45 @@ function assertNewWarning() {
// Ensure that newNumWarnings > oldNumWarnings. It's not safe to test that oldNumWarnings + 1
// == newNumWarnings, because a (simulated) page fault exception may cause multiple messages to
// be logged instead of only one.
- assert.lt( oldNumWarnings, newNumWarnings );
+ assert.lt(oldNumWarnings, newNumWarnings);
oldNumWarnings = newNumWarnings;
}
// Simple _id query
-t.find( { _id:0 } ).itcount();
+t.find({_id: 0}).itcount();
assertNoNewWarnings();
// Simple _id query without an _id index, on a non capped collection.
-notCapped.find( { _id:0 } ).itcount();
+notCapped.find({_id: 0}).itcount();
assertNoNewWarnings();
// A multi field query, including _id.
-t.find( { _id:0, a:0 } ).itcount();
+t.find({_id: 0, a: 0}).itcount();
assertNoNewWarnings();
// An unsatisfiable query.
-t.find( { _id:0, a:{$in:[]} } ).itcount();
+t.find({_id: 0, a: {$in: []}}).itcount();
assertNoNewWarnings();
// An hinted query.
-t.find( { _id:0 } ).hint( { $natural:1 } ).itcount();
+t.find({_id: 0}).hint({$natural: 1}).itcount();
assertNoNewWarnings();
// Retry a multi field query.
-t.find( { _id:0, a:0 } ).itcount();
+t.find({_id: 0, a: 0}).itcount();
assertNoNewWarnings();
// Warnings should not be printed when an index is added on _id.
-t.ensureIndex( { _id:1 } );
+t.ensureIndex({_id: 1});
-t.find( { _id:0 } ).itcount();
+t.find({_id: 0}).itcount();
assertNoNewWarnings();
-t.find( { _id:0, a:0 } ).itcount();
+t.find({_id: 0, a: 0}).itcount();
assertNoNewWarnings();
-t.find( { _id:0, a:0 } ).itcount();
+t.find({_id: 0, a: 0}).itcount();
assertNoNewWarnings();
-t.drop(); // cleanup
+t.drop(); // cleanup
notCapped.drop(); \ No newline at end of file
diff --git a/jstests/core/read_after_optime.js b/jstests/core/read_after_optime.js
index 4f8d601facf..08275791bde 100644
--- a/jstests/core/read_after_optime.js
+++ b/jstests/core/read_after_optime.js
@@ -1,22 +1,16 @@
// Test that attempting to read after optime fails if replication is not enabled.
(function() {
-"use strict";
+ "use strict";
-var currentTime = new Date();
+ var currentTime = new Date();
-var futureOpTime = new Timestamp((currentTime / 1000 + 3600), 0);
+ var futureOpTime = new Timestamp((currentTime / 1000 + 3600), 0);
-var res = assert.commandFailed(db.runCommand({
- find: 'user',
- filter: { x: 1 },
- readConcern: {
- afterOpTime: { ts: futureOpTime, t: 0 }
- }
-}));
+ var res = assert.commandFailed(db.runCommand(
+ {find: 'user', filter: {x: 1}, readConcern: {afterOpTime: {ts: futureOpTime, t: 0}}}));
-assert.eq(123, res.code); // ErrorCodes::NotAReplicaSet
-assert.eq(null, res.waitedMS);
+ assert.eq(123, res.code); // ErrorCodes::NotAReplicaSet
+ assert.eq(null, res.waitedMS);
})();
-
diff --git a/jstests/core/recursion.js b/jstests/core/recursion.js
index dc7b51be845..926250be20d 100644
--- a/jstests/core/recursion.js
+++ b/jstests/core/recursion.js
@@ -1,7 +1,7 @@
// Basic tests for a form of stack recursion that's been shown to cause C++
// side stack overflows in the past. See SERVER-19614.
-(function () {
+(function() {
"use strict";
db.recursion.drop();
@@ -14,7 +14,7 @@
// Make sure db.eval doesn't blow up
function dbEvalRecursion() {
- db.eval(function () {
+ db.eval(function() {
function recursion() {
recursion.apply();
}
@@ -25,17 +25,17 @@
// Make sure mapReduce doesn't blow up
function mapReduceRecursion() {
- db.recursion.mapReduce(function(){
- (function recursion(){
- recursion.apply();
- })();
- }, function(){
- }, {
- out: 'inline'
- });
+ db.recursion.mapReduce(
+ function() {
+ (function recursion() {
+ recursion.apply();
+ })();
+ },
+ function() {},
+ {out: 'inline'});
}
db.recursion.insert({});
- assert.commandFailedWithCode(
- assert.throws(mapReduceRecursion), ErrorCodes.JSInterpreterFailure);
+ assert.commandFailedWithCode(assert.throws(mapReduceRecursion),
+ ErrorCodes.JSInterpreterFailure);
}());
diff --git a/jstests/core/ref.js b/jstests/core/ref.js
index f519d796bad..02c4cb92a07 100644
--- a/jstests/core/ref.js
+++ b/jstests/core/ref.js
@@ -1,29 +1,32 @@
-// to run:
+// to run:
// ./mongo jstests/ref.js
db.otherthings.drop();
db.things.drop();
-var other = { s : "other thing", n : 1};
+var other = {
+ s: "other thing",
+ n: 1
+};
db.otherthings.save(other);
-db.things.save( { name : "abc" } );
+db.things.save({name: "abc"});
x = db.things.findOne();
-x.o = new DBPointer( "otherthings" , other._id );
+x.o = new DBPointer("otherthings", other._id);
db.things.save(x);
-assert( db.things.findOne().o.fetch().n == 1, "dbref broken 2" );
+assert(db.things.findOne().o.fetch().n == 1, "dbref broken 2");
other.n++;
db.otherthings.save(other);
-assert( db.things.findOne().o.fetch().n == 2, "dbrefs broken" );
+assert(db.things.findOne().o.fetch().n == 2, "dbrefs broken");
db.getSiblingDB("otherdb").dropDatabase();
var objid = new ObjectId();
-db.getSiblingDB("otherdb").getCollection("othercoll").insert({_id:objid, field:"value"});
-var subdoc = db.getSiblingDB("otherdb").getCollection("othercoll").findOne({_id:objid});
+db.getSiblingDB("otherdb").getCollection("othercoll").insert({_id: objid, field: "value"});
+var subdoc = db.getSiblingDB("otherdb").getCollection("othercoll").findOne({_id: objid});
db.mycoll.drop();
-db.mycoll.insert({_id:"asdf", asdf:new DBRef("othercoll", objid, "otherdb")});
-var doc = db.mycoll.findOne({_id:"asdf"}, {_id:0, asdf:1});
+db.mycoll.insert({_id: "asdf", asdf: new DBRef("othercoll", objid, "otherdb")});
+var doc = db.mycoll.findOne({_id: "asdf"}, {_id: 0, asdf: 1});
assert.eq(tojson(doc.asdf.fetch()), tojson(subdoc), "otherdb dbref");
diff --git a/jstests/core/ref2.js b/jstests/core/ref2.js
index 29640cd5da0..d0c21da257f 100644
--- a/jstests/core/ref2.js
+++ b/jstests/core/ref2.js
@@ -2,13 +2,18 @@
t = db.ref2;
t.drop();
-a = { $ref : "foo" , $id : 1 };
-b = { $ref : "foo" , $id : 2 };
+a = {
+ $ref: "foo",
+ $id: 1
+};
+b = {
+ $ref: "foo",
+ $id: 2
+};
+t.save({name: "a", r: a});
+t.save({name: "b", r: b});
-t.save( { name : "a" , r : a } );
-t.save( { name : "b" , r : b } );
-
-assert.eq( 2 , t.find().count() , "A" );
-assert.eq( 1 , t.find( { r : a } ).count() , "B" );
-assert.eq( 1 , t.find( { r : b } ).count() , "C" );
+assert.eq(2, t.find().count(), "A");
+assert.eq(1, t.find({r: a}).count(), "B");
+assert.eq(1, t.find({r: b}).count(), "C");
diff --git a/jstests/core/ref3.js b/jstests/core/ref3.js
index 14037ee4cc8..929e4152daf 100644
--- a/jstests/core/ref3.js
+++ b/jstests/core/ref3.js
@@ -1,19 +1,22 @@
-// to run:
+// to run:
// ./mongo jstests/ref3.js
db.otherthings3.drop();
db.things3.drop();
-var other = { s : "other thing", n : 1};
+var other = {
+ s: "other thing",
+ n: 1
+};
db.otherthings3.save(other);
-db.things3.save( { name : "abc" } );
+db.things3.save({name: "abc"});
x = db.things3.findOne();
-x.o = new DBRef( "otherthings3" , other._id );
+x.o = new DBRef("otherthings3", other._id);
db.things3.save(x);
-assert( db.things3.findOne().o.fetch().n == 1, "dbref broken 2" );
+assert(db.things3.findOne().o.fetch().n == 1, "dbref broken 2");
other.n++;
db.otherthings3.save(other);
-assert( db.things3.findOne().o.fetch().n == 2, "dbrefs broken" );
+assert(db.things3.findOne().o.fetch().n == 2, "dbrefs broken");
diff --git a/jstests/core/ref4.js b/jstests/core/ref4.js
index 1c105ef2795..07796d1e96a 100644
--- a/jstests/core/ref4.js
+++ b/jstests/core/ref4.js
@@ -5,16 +5,18 @@ b = db.ref4b;
a.drop();
b.drop();
-var other = { s : "other thing", n : 17 };
+var other = {
+ s: "other thing",
+ n: 17
+};
b.save(other);
-a.save( { name : "abc" , others : [ new DBRef( "ref4b" , other._id ) , new DBPointer( "ref4b" , other._id ) ] } );
-assert( a.findOne().others[0].fetch().n == 17 , "dbref broken 1" );
+a.save({name: "abc", others: [new DBRef("ref4b", other._id), new DBPointer("ref4b", other._id)]});
+assert(a.findOne().others[0].fetch().n == 17, "dbref broken 1");
-x = Array.fetchRefs( a.findOne().others );
-assert.eq( 2 , x.length , "A" );
-assert.eq( 17 , x[0].n , "B" );
-assert.eq( 17 , x[1].n , "C" );
+x = Array.fetchRefs(a.findOne().others);
+assert.eq(2, x.length, "A");
+assert.eq(17, x[0].n, "B");
+assert.eq(17, x[1].n, "C");
-
-assert.eq( 0 , Array.fetchRefs( a.findOne().others , "z" ).length , "D" );
+assert.eq(0, Array.fetchRefs(a.findOne().others, "z").length, "D");
diff --git a/jstests/core/regex.js b/jstests/core/regex.js
index f431d506ea6..235c1936885 100644
--- a/jstests/core/regex.js
+++ b/jstests/core/regex.js
@@ -1,24 +1,24 @@
t = db.jstests_regex;
t.drop();
-t.save( { a: "bcd" } );
-assert.eq( 1, t.count( { a: /b/ } ) , "A" );
-assert.eq( 1, t.count( { a: /bc/ } ) , "B" );
-assert.eq( 1, t.count( { a: /bcd/ } ) , "C" );
-assert.eq( 0, t.count( { a: /bcde/ } ) , "D" );
+t.save({a: "bcd"});
+assert.eq(1, t.count({a: /b/}), "A");
+assert.eq(1, t.count({a: /bc/}), "B");
+assert.eq(1, t.count({a: /bcd/}), "C");
+assert.eq(0, t.count({a: /bcde/}), "D");
t.drop();
-t.save( { a: { b: "cde" } } );
-assert.eq( 1, t.count( { 'a.b': /de/ } ) , "E" );
+t.save({a: {b: "cde"}});
+assert.eq(1, t.count({'a.b': /de/}), "E");
t.drop();
-t.save( { a: { b: [ "cde" ] } } );
-assert.eq( 1, t.count( { 'a.b': /de/ } ) , "F" );
+t.save({a: {b: ["cde"]}});
+assert.eq(1, t.count({'a.b': /de/}), "F");
t.drop();
-t.save( { a: [ { b: "cde" } ] } );
-assert.eq( 1, t.count( { 'a.b': /de/ } ) , "G" );
+t.save({a: [{b: "cde"}]});
+assert.eq(1, t.count({'a.b': /de/}), "G");
t.drop();
-t.save( { a: [ { b: [ "cde" ] } ] } );
-assert.eq( 1, t.count( { 'a.b': /de/ } ) , "H" );
+t.save({a: [{b: ["cde"]}]});
+assert.eq(1, t.count({'a.b': /de/}), "H");
diff --git a/jstests/core/regex2.js b/jstests/core/regex2.js
index 51e24ae420a..80dec55f184 100644
--- a/jstests/core/regex2.js
+++ b/jstests/core/regex2.js
@@ -2,69 +2,68 @@
t = db.regex2;
t.drop();
-t.save( { a : "test" } );
-t.save( { a : "Test" } );
-
-assert.eq( 2 , t.find().count() , "A" );
-assert.eq( 1 , t.find( { a : "Test" } ).count() , "B" );
-assert.eq( 1 , t.find( { a : "test" } ).count() , "C" );
-assert.eq( 1 , t.find( { a : /Test/ } ).count() , "D" );
-assert.eq( 1 , t.find( { a : /test/ } ).count() , "E" );
-assert.eq( 2 , t.find( { a : /test/i } ).count() , "F" );
+t.save({a: "test"});
+t.save({a: "Test"});
+assert.eq(2, t.find().count(), "A");
+assert.eq(1, t.find({a: "Test"}).count(), "B");
+assert.eq(1, t.find({a: "test"}).count(), "C");
+assert.eq(1, t.find({a: /Test/}).count(), "D");
+assert.eq(1, t.find({a: /test/}).count(), "E");
+assert.eq(2, t.find({a: /test/i}).count(), "F");
t.drop();
a = "\u0442\u0435\u0441\u0442";
b = "\u0422\u0435\u0441\u0442";
-assert( ( new RegExp( a ) ).test( a ) , "B 1" );
-assert( ! ( new RegExp( a ) ).test( b ) , "B 2" );
-assert( ( new RegExp( a , "i" ) ).test( b ) , "B 3 " );
-
-t.save( { a : a } );
-t.save( { a : b } );
+assert((new RegExp(a)).test(a), "B 1");
+assert(!(new RegExp(a)).test(b), "B 2");
+assert((new RegExp(a, "i")).test(b), "B 3 ");
+t.save({a: a});
+t.save({a: b});
-assert.eq( 2 , t.find().count() , "C A" );
-assert.eq( 1 , t.find( { a : a } ).count() , "C B" );
-assert.eq( 1 , t.find( { a : b } ).count() , "C C" );
-assert.eq( 1 , t.find( { a : new RegExp( a ) } ).count() , "C D" );
-assert.eq( 1 , t.find( { a : new RegExp( b ) } ).count() , "C E" );
-assert.eq( 2 , t.find( { a : new RegExp( a , "i" ) } ).count() , "C F is spidermonkey built with UTF-8 support?" );
-
+assert.eq(2, t.find().count(), "C A");
+assert.eq(1, t.find({a: a}).count(), "C B");
+assert.eq(1, t.find({a: b}).count(), "C C");
+assert.eq(1, t.find({a: new RegExp(a)}).count(), "C D");
+assert.eq(1, t.find({a: new RegExp(b)}).count(), "C E");
+assert.eq(2,
+ t.find({a: new RegExp(a, "i")}).count(),
+ "C F is spidermonkey built with UTF-8 support?");
// same tests as above but using {$regex: "a|b", $options: "imx"} syntax.
t.drop();
-t.save( { a : "test" } );
-t.save( { a : "Test" } );
-
-assert.eq( 2 , t.find().count() , "obj A" );
-assert.eq( 1 , t.find( { a : {$regex:"Test"} } ).count() , "obj D" );
-assert.eq( 1 , t.find( { a : {$regex:"test"} } ).count() , "obj E" );
-assert.eq( 2 , t.find( { a : {$regex:"test", $options:"i"} } ).count() , "obj F" );
-assert.eq( 2 , t.find( { a : {$options:"i", $regex:"test"} } ).count() , "obj F rev" ); // both orders should work
+t.save({a: "test"});
+t.save({a: "Test"});
+assert.eq(2, t.find().count(), "obj A");
+assert.eq(1, t.find({a: {$regex: "Test"}}).count(), "obj D");
+assert.eq(1, t.find({a: {$regex: "test"}}).count(), "obj E");
+assert.eq(2, t.find({a: {$regex: "test", $options: "i"}}).count(), "obj F");
+assert.eq(2,
+ t.find({a: {$options: "i", $regex: "test"}}).count(),
+ "obj F rev"); // both orders should work
t.drop();
a = "\u0442\u0435\u0441\u0442";
b = "\u0422\u0435\u0441\u0442";
-t.save( { a : a } );
-t.save( { a : b } );
+t.save({a: a});
+t.save({a: b});
-
-assert.eq( 1 , t.find( { a : {$regex: a} } ).count() , "obj C D" );
-assert.eq( 1 , t.find( { a : {$regex: b} } ).count() , "obj C E" );
-assert.eq( 2 , t.find( { a : {$regex: a , $options: "i" } } ).count() , "obj C F is spidermonkey built with UTF-8 support?" );
+assert.eq(1, t.find({a: {$regex: a}}).count(), "obj C D");
+assert.eq(1, t.find({a: {$regex: b}}).count(), "obj C E");
+assert.eq(2,
+ t.find({a: {$regex: a, $options: "i"}}).count(),
+ "obj C F is spidermonkey built with UTF-8 support?");
// Test s (DOT_ALL) option. Not supported with /regex/opts syntax
t.drop();
-t.save({a:'1 2'});
-t.save({a:'1\n2'});
-assert.eq( 1 , t.find( { a : {$regex: '1.*2'} } ).count() );
-assert.eq( 2 , t.find( { a : {$regex: '1.*2', $options: 's'} } ).count() );
-
-
+t.save({a: '1 2'});
+t.save({a: '1\n2'});
+assert.eq(1, t.find({a: {$regex: '1.*2'}}).count());
+assert.eq(2, t.find({a: {$regex: '1.*2', $options: 's'}}).count());
diff --git a/jstests/core/regex3.js b/jstests/core/regex3.js
index 747fbf4d8a8..bc1623cecea 100644
--- a/jstests/core/regex3.js
+++ b/jstests/core/regex3.js
@@ -2,38 +2,39 @@
t = db.regex3;
t.drop();
-t.save( { name : "eliot" } );
-t.save( { name : "emily" } );
-t.save( { name : "bob" } );
-t.save( { name : "aaron" } );
-
-assert.eq( 2 , t.find( { name : /^e.*/ } ).itcount() , "no index count" );
-assert.eq( 4 , t.find( { name : /^e.*/ } ).explain(true).executionStats.totalDocsExamined ,
- "no index explain" );
-t.ensureIndex( { name : 1 } );
-assert.eq( 2 , t.find( { name : /^e.*/ } ).itcount() , "index count" );
-assert.eq( 2 , t.find( { name : /^e.*/ } ).explain(true).executionStats.totalKeysExamined ,
- "index explain" ); // SERVER-239
+t.save({name: "eliot"});
+t.save({name: "emily"});
+t.save({name: "bob"});
+t.save({name: "aaron"});
+
+assert.eq(2, t.find({name: /^e.*/}).itcount(), "no index count");
+assert.eq(4,
+ t.find({name: /^e.*/}).explain(true).executionStats.totalDocsExamined,
+ "no index explain");
+t.ensureIndex({name: 1});
+assert.eq(2, t.find({name: /^e.*/}).itcount(), "index count");
+assert.eq(2,
+ t.find({name: /^e.*/}).explain(true).executionStats.totalKeysExamined,
+ "index explain"); // SERVER-239
t.drop();
-t.save( { name : "aa" } );
-t.save( { name : "ab" } );
-t.save( { name : "ac" } );
-t.save( { name : "c" } );
+t.save({name: "aa"});
+t.save({name: "ab"});
+t.save({name: "ac"});
+t.save({name: "c"});
-assert.eq( 3 , t.find( { name : /^aa*/ } ).itcount() , "B ni" );
-t.ensureIndex( { name : 1 } );
-assert.eq( 3 , t.find( { name : /^aa*/ } ).itcount() , "B i 1" );
-assert.eq( 4 , t.find( { name : /^aa*/ } ).explain(true).executionStats.totalKeysExamined ,
- "B i 1 e" );
+assert.eq(3, t.find({name: /^aa*/}).itcount(), "B ni");
+t.ensureIndex({name: 1});
+assert.eq(3, t.find({name: /^aa*/}).itcount(), "B i 1");
+assert.eq(4, t.find({name: /^aa*/}).explain(true).executionStats.totalKeysExamined, "B i 1 e");
-assert.eq( 2 , t.find( { name : /^a[ab]/ } ).itcount() , "B i 2" );
-assert.eq( 2 , t.find( { name : /^a[bc]/ } ).itcount() , "B i 3" );
+assert.eq(2, t.find({name: /^a[ab]/}).itcount(), "B i 2");
+assert.eq(2, t.find({name: /^a[bc]/}).itcount(), "B i 3");
t.drop();
-t.save( { name: "" } );
-assert.eq( 1, t.find( { name: /^a?/ } ).itcount() , "C 1" );
-t.ensureIndex( { name: 1 } );
-assert.eq( 1, t.find( { name: /^a?/ } ).itcount(), "C 2");
+t.save({name: ""});
+assert.eq(1, t.find({name: /^a?/}).itcount(), "C 1");
+t.ensureIndex({name: 1});
+assert.eq(1, t.find({name: /^a?/}).itcount(), "C 2");
diff --git a/jstests/core/regex4.js b/jstests/core/regex4.js
index ed5e76331e0..112375e2e09 100644
--- a/jstests/core/regex4.js
+++ b/jstests/core/regex4.js
@@ -2,19 +2,22 @@
t = db.regex4;
t.drop();
-t.save( { name : "eliot" } );
-t.save( { name : "emily" } );
-t.save( { name : "bob" } );
-t.save( { name : "aaron" } );
+t.save({name: "eliot"});
+t.save({name: "emily"});
+t.save({name: "bob"});
+t.save({name: "aaron"});
-assert.eq( 2 , t.find( { name : /^e.*/ } ).count() , "no index count" );
-assert.eq( 4 , t.find( { name : /^e.*/ } ).explain(true).executionStats.totalDocsExamined ,
- "no index explain" );
-//assert.eq( 2 , t.find( { name : { $ne : /^e.*/ } } ).count() , "no index count ne" ); // SERVER-251
+assert.eq(2, t.find({name: /^e.*/}).count(), "no index count");
+assert.eq(4,
+ t.find({name: /^e.*/}).explain(true).executionStats.totalDocsExamined,
+ "no index explain");
+// assert.eq( 2 , t.find( { name : { $ne : /^e.*/ } } ).count() , "no index count ne" ); //
+// SERVER-251
-t.ensureIndex( { name : 1 } );
+t.ensureIndex({name: 1});
-assert.eq( 2 , t.find( { name : /^e.*/ } ).count() , "index count" );
-assert.eq( 2 , t.find( { name : /^e.*/ } ).explain(true).executionStats.totalKeysExamined ,
- "index explain" ); // SERVER-239
-//assert.eq( 2 , t.find( { name : { $ne : /^e.*/ } } ).count() , "index count ne" ); // SERVER-251
+assert.eq(2, t.find({name: /^e.*/}).count(), "index count");
+assert.eq(2,
+ t.find({name: /^e.*/}).explain(true).executionStats.totalKeysExamined,
+ "index explain"); // SERVER-239
+// assert.eq( 2 , t.find( { name : { $ne : /^e.*/ } } ).count() , "index count ne" ); // SERVER-251
diff --git a/jstests/core/regex5.js b/jstests/core/regex5.js
index 5e3b7d0a6d3..36274f8b3ca 100644
--- a/jstests/core/regex5.js
+++ b/jstests/core/regex5.js
@@ -8,34 +8,34 @@ for (var i = 0; i < 10; i++) {
t.save({filler: "filler"});
}
-t.save( { x : [ "abc" , "xyz1" ] } );
-t.save( { x : [ "ac" , "xyz2" ] } );
+t.save({x: ["abc", "xyz1"]});
+t.save({x: ["ac", "xyz2"]});
a = /.*b.*c/;
x = /.*y.*/;
doit = function() {
-
- assert.eq( 1 , t.find( { x : a } ).count() , "A" );
- assert.eq( 2 , t.find( { x : x } ).count() , "B" );
- assert.eq( 2 , t.find( { x : { $in: [ x ] } } ).count() , "C" ); // SERVER-322
- assert.eq( 1 , t.find( { x : { $in: [ a, "xyz1" ] } } ).count() , "D" ); // SERVER-322
- assert.eq( 2 , t.find( { x : { $in: [ a, "xyz2" ] } } ).count() , "E" ); // SERVER-322
- assert.eq( 1 , t.find( { x : { $all : [ a , x ] } } ).count() , "F" ); // SERVER-505
- assert.eq( 1 , t.find( { x : { $all : [ a , "abc" ] } } ).count() , "G" ); // SERVER-505
- assert.eq( 0 , t.find( { x : { $all : [ a , "ac" ] } } ).count() , "H" ); // SERVER-505
- assert.eq( 10 , t.find( { x : { $nin: [ x ] } } ).count() , "I" ); // SERVER-322
- assert.eq( 11 , t.find( { x : { $nin: [ a, "xyz1" ] } } ).count() , "J" ); // SERVER-322
- assert.eq( 10 , t.find( { x : { $nin: [ a, "xyz2" ] } } ).count() , "K" ); // SERVER-322
- assert.eq( 2 , t.find( { x : { $not: { $nin: [ x ] } } } ).count() , "L" ); // SERVER-322
- assert.eq( 11 , t.find( { x : { $nin: [ /^a.c/ ] } } ).count() , "M" ); // SERVER-322
+
+ assert.eq(1, t.find({x: a}).count(), "A");
+ assert.eq(2, t.find({x: x}).count(), "B");
+ assert.eq(2, t.find({x: {$in: [x]}}).count(), "C"); // SERVER-322
+ assert.eq(1, t.find({x: {$in: [a, "xyz1"]}}).count(), "D"); // SERVER-322
+ assert.eq(2, t.find({x: {$in: [a, "xyz2"]}}).count(), "E"); // SERVER-322
+ assert.eq(1, t.find({x: {$all: [a, x]}}).count(), "F"); // SERVER-505
+ assert.eq(1, t.find({x: {$all: [a, "abc"]}}).count(), "G"); // SERVER-505
+ assert.eq(0, t.find({x: {$all: [a, "ac"]}}).count(), "H"); // SERVER-505
+ assert.eq(10, t.find({x: {$nin: [x]}}).count(), "I"); // SERVER-322
+ assert.eq(11, t.find({x: {$nin: [a, "xyz1"]}}).count(), "J"); // SERVER-322
+ assert.eq(10, t.find({x: {$nin: [a, "xyz2"]}}).count(), "K"); // SERVER-322
+ assert.eq(2, t.find({x: {$not: {$nin: [x]}}}).count(), "L"); // SERVER-322
+ assert.eq(11, t.find({x: {$nin: [/^a.c/]}}).count(), "M"); // SERVER-322
};
doit();
-t.ensureIndex( {x:1} );
-print( "now indexed" );
+t.ensureIndex({x: 1});
+print("now indexed");
doit();
// SERVER-505
-assert.eq( 0, t.find( { x : { $all: [ "a", /^a/ ] } } ).itcount());
-assert.eq( 2, t.find( { x : { $all: [ /^a/ ] } } ).itcount());
+assert.eq(0, t.find({x: {$all: ["a", /^a/]}}).itcount());
+assert.eq(2, t.find({x: {$all: [/^a/]}}).itcount());
diff --git a/jstests/core/regex6.js b/jstests/core/regex6.js
index 4380ab1ab6b..7b9ed1910ed 100644
--- a/jstests/core/regex6.js
+++ b/jstests/core/regex6.js
@@ -2,40 +2,54 @@
t = db.regex6;
t.drop();
-t.save( { name : "eliot" } );
-t.save( { name : "emily" } );
-t.save( { name : "bob" } );
-t.save( { name : "aaron" } );
-t.save( { name : "[with]some?symbols" } );
+t.save({name: "eliot"});
+t.save({name: "emily"});
+t.save({name: "bob"});
+t.save({name: "aaron"});
+t.save({name: "[with]some?symbols"});
-t.ensureIndex( { name : 1 } );
+t.ensureIndex({name: 1});
-assert.eq( 0 , t.find( { name : /^\// } ).count() , "index count" );
-assert.eq( 1 , t.find( { name : /^\// } ).explain(true).executionStats.totalKeysExamined ,
- "index explain 1" );
-assert.eq( 0 , t.find( { name : /^é/ } ).explain(true).executionStats.totalKeysExamined ,
- "index explain 2" );
-assert.eq( 0 , t.find( { name : /^\é/ } ).explain(true).executionStats.totalKeysExamined ,
- "index explain 3" );
-assert.eq( 1 , t.find( { name : /^\./ } ).explain(true).executionStats.totalKeysExamined ,
- "index explain 4" );
-assert.eq( 5 , t.find( { name : /^./ } ).explain(true).executionStats.totalKeysExamined ,
- "index explain 5" );
+assert.eq(0, t.find({name: /^\//}).count(), "index count");
+assert.eq(1,
+ t.find({name: /^\//}).explain(true).executionStats.totalKeysExamined,
+ "index explain 1");
+assert.eq(0,
+ t.find({name: /^é/}).explain(true).executionStats.totalKeysExamined,
+ "index explain 2");
+assert.eq(0,
+ t.find({name: /^\é/}).explain(true).executionStats.totalKeysExamined,
+ "index explain 3");
+assert.eq(1,
+ t.find({name: /^\./}).explain(true).executionStats.totalKeysExamined,
+ "index explain 4");
+assert.eq(5,
+ t.find({name: /^./}).explain(true).executionStats.totalKeysExamined,
+ "index explain 5");
// SERVER-2862
-assert.eq( 0 , t.find( { name : /^\Qblah\E/ } ).count() , "index explain 6" );
-assert.eq( 1 , t.find( { name : /^\Qblah\E/ } ).explain(true).executionStats.totalKeysExamined ,
- "index explain 6" );
-assert.eq( 1 , t.find( { name : /^blah/ } ).explain(true).executionStats.totalKeysExamined ,
- "index explain 6" );
-assert.eq( 1 , t.find( { name : /^\Q[\Ewi\Qth]some?s\Eym/ } ).count() , "index count 2" );
-assert.eq( 2 , t.find( { name : /^\Q[\Ewi\Qth]some?s\Eym/ } ).explain(true)
- .executionStats.totalKeysExamined ,
- "index explain 6" );
-assert.eq( 2 , t.find( { name : /^bob/ } ).explain(true).executionStats.totalKeysExamined ,
- "index explain 6" ); // proof executionStats.totalKeysExamined == count+1
+assert.eq(0, t.find({name: /^\Qblah\E/}).count(), "index explain 6");
+assert.eq(1,
+ t.find({name: /^\Qblah\E/}).explain(true).executionStats.totalKeysExamined,
+ "index explain 6");
+assert.eq(1,
+ t.find({name: /^blah/}).explain(true).executionStats.totalKeysExamined,
+ "index explain 6");
+assert.eq(1, t.find({name: /^\Q[\Ewi\Qth]some?s\Eym/}).count(), "index count 2");
+assert.eq(2,
+ t.find({name: /^\Q[\Ewi\Qth]some?s\Eym/}).explain(true).executionStats.totalKeysExamined,
+ "index explain 6");
+assert.eq(2,
+ t.find({name: /^bob/}).explain(true).executionStats.totalKeysExamined,
+ "index explain 6"); // proof executionStats.totalKeysExamined == count+1
-assert.eq( 1, t.find( { name : { $regex : "^e", $gte: "emily" } } ).explain(true)
- .executionStats.totalKeysExamined , "ie7" );
-assert.eq( 1, t.find( { name : { $gt : "a", $regex: "^emily" } } ).explain(true)
- .executionStats.totalKeysExamined , "ie7" );
+assert.eq(1,
+ t.find({name: {$regex: "^e", $gte: "emily"}})
+ .explain(true)
+ .executionStats.totalKeysExamined,
+ "ie7");
+assert.eq(1,
+ t.find({name: {$gt: "a", $regex: "^emily"}})
+ .explain(true)
+ .executionStats.totalKeysExamined,
+ "ie7");
diff --git a/jstests/core/regex7.js b/jstests/core/regex7.js
index 8f32ed603ad..90949ec3723 100644
--- a/jstests/core/regex7.js
+++ b/jstests/core/regex7.js
@@ -1,26 +1,26 @@
t = db.regex_matches_self;
t.drop();
-t.insert({r:/^a/});
-t.insert({r:/^a/i});
-t.insert({r:/^b/});
+t.insert({r: /^a/});
+t.insert({r: /^a/i});
+t.insert({r: /^b/});
// no index
-assert.eq( /^a/, t.findOne({r:/^a/}).r, '1 1 a');
-assert.eq( 1, t.count({r:/^a/}), '1 2');
-assert.eq( /^a/i, t.findOne({r:/^a/i}).r, '2 1 a');
-assert.eq( 1, t.count({r:/^a/i}), '2 2 a');
-assert.eq( /^b/, t.findOne({r:/^b/}).r, '3 1 a');
-assert.eq( 1, t.count({r:/^b/}), '3 2 a');
+assert.eq(/^a/, t.findOne({r: /^a/}).r, '1 1 a');
+assert.eq(1, t.count({r: /^a/}), '1 2');
+assert.eq(/^a/i, t.findOne({r: /^a/i}).r, '2 1 a');
+assert.eq(1, t.count({r: /^a/i}), '2 2 a');
+assert.eq(/^b/, t.findOne({r: /^b/}).r, '3 1 a');
+assert.eq(1, t.count({r: /^b/}), '3 2 a');
// with index
-t.ensureIndex({r:1});
-assert.eq( /^a/, t.findOne({r:/^a/}).r, '1 1 b');
-assert.eq( 1, t.count({r:/^a/}), '1 2 b');
-assert.eq( /^a/i, t.findOne({r:/^a/i}).r, '2 1 b');
-assert.eq( 1, t.count({r:/^a/i}), '2 2 b');
-assert.eq( /^b/, t.findOne({r:/^b/}).r, '3 1 b');
-assert.eq( 1, t.count({r:/^b/}), '3 2 b');
+t.ensureIndex({r: 1});
+assert.eq(/^a/, t.findOne({r: /^a/}).r, '1 1 b');
+assert.eq(1, t.count({r: /^a/}), '1 2 b');
+assert.eq(/^a/i, t.findOne({r: /^a/i}).r, '2 1 b');
+assert.eq(1, t.count({r: /^a/i}), '2 2 b');
+assert.eq(/^b/, t.findOne({r: /^b/}).r, '3 1 b');
+assert.eq(1, t.count({r: /^b/}), '3 2 b');
-t.insert( {r:"a"} );
-assert.eq( 2, t.count({r:/^a/}), 'c' ); \ No newline at end of file
+t.insert({r: "a"});
+assert.eq(2, t.count({r: /^a/}), 'c'); \ No newline at end of file
diff --git a/jstests/core/regex8.js b/jstests/core/regex8.js
index 3bb598831a7..3b731c11edd 100644
--- a/jstests/core/regex8.js
+++ b/jstests/core/regex8.js
@@ -2,18 +2,18 @@
t = db.regex8;
t.drop();
-t.insert( { _id : 1 , a : "abc" } );
-t.insert( { _ud : 2 , a : "abc" } );
-t.insert( { _id : 3 , a : "bdc" } );
+t.insert({_id: 1, a: "abc"});
+t.insert({_ud: 2, a: "abc"});
+t.insert({_id: 3, a: "bdc"});
-function test( msg ){
- assert.eq( 3 , t.find().itcount() , msg + "1" );
- assert.eq( 2 , t.find( { a : /a.*/ } ).itcount() , msg + "2" );
- assert.eq( 3 , t.find( { a : /[ab].*/ } ).itcount() , msg + "3" );
- assert.eq( 3 , t.find( { a : /[a|b].*/ } ).itcount() , msg + "4" );
+function test(msg) {
+ assert.eq(3, t.find().itcount(), msg + "1");
+ assert.eq(2, t.find({a: /a.*/}).itcount(), msg + "2");
+ assert.eq(3, t.find({a: /[ab].*/}).itcount(), msg + "3");
+ assert.eq(3, t.find({a: /[a|b].*/}).itcount(), msg + "4");
}
-test( "A" );
+test("A");
-t.ensureIndex( { a : 1 } );
-test( "B" );
+t.ensureIndex({a: 1});
+test("B");
diff --git a/jstests/core/regex9.js b/jstests/core/regex9.js
index e12310ee848..96188d689dc 100644
--- a/jstests/core/regex9.js
+++ b/jstests/core/regex9.js
@@ -2,10 +2,10 @@
t = db.regex9;
t.drop();
-t.insert( { _id : 1 , a : [ "a" , "b" , "c" ] } );
-t.insert( { _id : 2 , a : [ "a" , "b" , "c" , "d" ] } );
-t.insert( { _id : 3 , a : [ "b" , "c" , "d" ] } );
+t.insert({_id: 1, a: ["a", "b", "c"]});
+t.insert({_id: 2, a: ["a", "b", "c", "d"]});
+t.insert({_id: 3, a: ["b", "c", "d"]});
-assert.eq( 2 , t.find( { a : /a/ } ).itcount() , "A1" );
-assert.eq( 2 , t.find( { a : { $regex : "a" } } ).itcount() , "A2" );
-assert.eq( 2 , t.find( { a : { $regex : /a/ } } ).itcount() , "A3" );
+assert.eq(2, t.find({a: /a/}).itcount(), "A1");
+assert.eq(2, t.find({a: {$regex: "a"}}).itcount(), "A2");
+assert.eq(2, t.find({a: {$regex: /a/}}).itcount(), "A3");
diff --git a/jstests/core/regex_embed1.js b/jstests/core/regex_embed1.js
index 7e08ca66e77..6a47a7919ed 100644
--- a/jstests/core/regex_embed1.js
+++ b/jstests/core/regex_embed1.js
@@ -3,23 +3,20 @@ t = db.regex_embed1;
t.drop();
-t.insert( { _id : 1 , a : [ { x : "abc" } , { x : "def" } ] } );
-t.insert( { _id : 2 , a : [ { x : "ab" } , { x : "de" } ] } );
-t.insert( { _id : 3 , a : [ { x : "ab" } , { x : "de" } , { x : "abc" } ] } );
+t.insert({_id: 1, a: [{x: "abc"}, {x: "def"}]});
+t.insert({_id: 2, a: [{x: "ab"}, {x: "de"}]});
+t.insert({_id: 3, a: [{x: "ab"}, {x: "de"}, {x: "abc"}]});
-function test( m ){
- assert.eq( 3 , t.find().itcount() , m + "1" );
- assert.eq( 2 , t.find( { "a.x" : "abc" } ).itcount() , m + "2" );
- assert.eq( 2 , t.find( { "a.x" : /.*abc.*/ } ).itcount() , m + "3" );
-
- assert.eq( 1 , t.find( { "a.0.x" : "abc" } ).itcount() , m + "4" );
- assert.eq( 1 , t.find( { "a.0.x" : /abc/ } ).itcount() , m + "5" );
-}
-
-test( "A" );
-
-t.ensureIndex( { "a.x" : 1 } );
-test( "B" );
+function test(m) {
+ assert.eq(3, t.find().itcount(), m + "1");
+ assert.eq(2, t.find({"a.x": "abc"}).itcount(), m + "2");
+ assert.eq(2, t.find({"a.x": /.*abc.*/}).itcount(), m + "3");
+ assert.eq(1, t.find({"a.0.x": "abc"}).itcount(), m + "4");
+ assert.eq(1, t.find({"a.0.x": /abc/}).itcount(), m + "5");
+}
+test("A");
+t.ensureIndex({"a.x": 1});
+test("B");
diff --git a/jstests/core/regex_limit.js b/jstests/core/regex_limit.js
index e05dae8ab8b..0a8b3e08593 100644
--- a/jstests/core/regex_limit.js
+++ b/jstests/core/regex_limit.js
@@ -1,22 +1,21 @@
var t = db.regex_limit;
t.drop();
-var repeatStr = function(str, n){
- return new Array(n + 1).join(str);
+var repeatStr = function(str, n) {
+ return new Array(n + 1).join(str);
};
-t.insert({ z: repeatStr('c', 100000) });
+t.insert({z: repeatStr('c', 100000)});
var maxOkStrLen = repeatStr('c', 32764);
var strTooLong = maxOkStrLen + 'c';
-assert(t.findOne({ z: { $regex: maxOkStrLen }}) != null);
+assert(t.findOne({z: {$regex: maxOkStrLen}}) != null);
assert.throws(function() {
- t.findOne({ z: { $regex: strTooLong }});
+ t.findOne({z: {$regex: strTooLong}});
});
-assert(t.findOne({ z: { $in: [ new RegExp(maxOkStrLen) ]}}) != null);
+assert(t.findOne({z: {$in: [new RegExp(maxOkStrLen)]}}) != null);
assert.throws(function() {
- t.findOne({ z: { $in: [ new RegExp(strTooLong) ]}});
+ t.findOne({z: {$in: [new RegExp(strTooLong)]}});
});
-
diff --git a/jstests/core/regex_not_id.js b/jstests/core/regex_not_id.js
index b5d0f1b01a6..1f15250f240 100644
--- a/jstests/core/regex_not_id.js
+++ b/jstests/core/regex_not_id.js
@@ -3,10 +3,10 @@
var testColl = db.regex_not_id;
testColl.drop();
-assert.writeOK(testColl.insert({ _id: "ABCDEF1" }, {writeConcern:{w:1}}));
+assert.writeOK(testColl.insert({_id: "ABCDEF1"}, {writeConcern: {w: 1}}));
// Should be an error.
-assert.writeError(testColl.insert({ _id: /^A/ }, {writeConcern:{w:1}}));
+assert.writeError(testColl.insert({_id: /^A/}, {writeConcern: {w: 1}}));
// _id doesn't have to be first; still disallowed
-assert.writeError(testColl.insert({ xxx: "ABCDEF", _id: /ABCDEF/ }, {writeConcern:{w:1}})); \ No newline at end of file
+assert.writeError(testColl.insert({xxx: "ABCDEF", _id: /ABCDEF/}, {writeConcern: {w: 1}})); \ No newline at end of file
diff --git a/jstests/core/regex_options.js b/jstests/core/regex_options.js
index 3febe2575ab..f661e4812a8 100644
--- a/jstests/core/regex_options.js
+++ b/jstests/core/regex_options.js
@@ -1,7 +1,7 @@
t = db.jstests_regex_options;
t.drop();
-t.save( { a: "foo" } );
-assert.eq( 1, t.count( { a: { "$regex": /O/i } } ) );
-assert.eq( 1, t.count( { a: /O/i } ) );
-assert.eq( 1, t.count( { a: { "$regex": "O", "$options": "i" } } ) );
+t.save({a: "foo"});
+assert.eq(1, t.count({a: {"$regex": /O/i}}));
+assert.eq(1, t.count({a: /O/i}));
+assert.eq(1, t.count({a: {"$regex": "O", "$options": "i"}}));
diff --git a/jstests/core/regex_util.js b/jstests/core/regex_util.js
index 86ba8036516..b0c7791b6c1 100644
--- a/jstests/core/regex_util.js
+++ b/jstests/core/regex_util.js
@@ -1,27 +1,26 @@
// Tests for RegExp.escape
(function() {
- var TEST_STRINGS = [
- "[db]",
- "{ab}",
- "<c2>",
- "(abc)",
- "^first^",
- "&addr",
- "k@10gen.com",
- "#4",
- "!b",
- "<>3",
- "****word+",
- "\t| |\n\r",
- "Mongo-db",
- "[{(<>)}]!@#%^&*+\\"
- ];
+ var TEST_STRINGS = [
+ "[db]",
+ "{ab}",
+ "<c2>",
+ "(abc)",
+ "^first^",
+ "&addr",
+ "k@10gen.com",
+ "#4",
+ "!b",
+ "<>3",
+ "****word+",
+ "\t| |\n\r",
+ "Mongo-db",
+ "[{(<>)}]!@#%^&*+\\"
+ ];
- TEST_STRINGS.forEach(function (str) {
- var escaped = RegExp.escape(str);
- var regex = new RegExp(escaped);
- assert(regex.test(str), "Wrong escape for " + str);
- });
+ TEST_STRINGS.forEach(function(str) {
+ var escaped = RegExp.escape(str);
+ var regex = new RegExp(escaped);
+ assert(regex.test(str), "Wrong escape for " + str);
+ });
})();
-
diff --git a/jstests/core/regexa.js b/jstests/core/regexa.js
index b0d47190e77..b56e2cf405e 100644
--- a/jstests/core/regexa.js
+++ b/jstests/core/regexa.js
@@ -4,16 +4,16 @@ t = db.jstests_regexa;
t.drop();
function check() {
- assert.eq( 1, t.count( {a:/^(z|.)/} ) );
- assert.eq( 1, t.count( {a:/^z|./} ) );
- assert.eq( 0, t.count( {a:/^z(z|.)/} ) );
- assert.eq( 1, t.count( {a:/^zz|./} ) );
+ assert.eq(1, t.count({a: /^(z|.)/}));
+ assert.eq(1, t.count({a: /^z|./}));
+ assert.eq(0, t.count({a: /^z(z|.)/}));
+ assert.eq(1, t.count({a: /^zz|./}));
}
-t.save( {a:'a'} );
+t.save({a: 'a'});
check();
-t.ensureIndex( {a:1} );
-if ( 1 ) { // SERVER-3298
-check();
+t.ensureIndex({a: 1});
+if (1) { // SERVER-3298
+ check();
}
diff --git a/jstests/core/regexb.js b/jstests/core/regexb.js
index 169841239c8..09e3518728b 100644
--- a/jstests/core/regexb.js
+++ b/jstests/core/regexb.js
@@ -3,12 +3,7 @@
t = db.jstests_regexb;
t.drop();
-t.save( {a:'a',b:'b',c:'c',d:'d',e:'e'} );
-
-assert.eq( 1, t.count( {a:/a/,b:/b/,c:/c/,d:/d/,e:/e/} ) );
-assert.eq( 0, t.count( {a:/a/,b:/b/,c:/c/,d:/d/,e:/barf/} ) );
-
-
-
-
+t.save({a: 'a', b: 'b', c: 'c', d: 'd', e: 'e'});
+assert.eq(1, t.count({a: /a/, b: /b/, c: /c/, d: /d/, e: /e/}));
+assert.eq(0, t.count({a: /a/, b: /b/, c: /c/, d: /d/, e: /barf/}));
diff --git a/jstests/core/regexc.js b/jstests/core/regexc.js
index f7690c96496..43ad7fd860c 100644
--- a/jstests/core/regexc.js
+++ b/jstests/core/regexc.js
@@ -8,7 +8,7 @@ t.ensureIndex({a: 1});
t.save({a: "0"});
t.save({a: "1"});
t.save({a: "10"});
-assert.eq( 1, t.find({$and: [{a: /0/}, {a: /1/}]}).itcount() );
+assert.eq(1, t.find({$and: [{a: /0/}, {a: /1/}]}).itcount());
// implicit $and using compound index twice
t.drop();
@@ -16,7 +16,7 @@ t.ensureIndex({a: 1, b: 1});
t.save({a: "0", b: "1"});
t.save({a: "10", b: "10"});
t.save({a: "10", b: "2"});
-assert.eq( 2, t.find({a: /0/, b: /1/}).itcount() );
+assert.eq(2, t.find({a: /0/, b: /1/}).itcount());
// $or using same index twice
t.drop();
@@ -25,4 +25,4 @@ t.save({a: "0"});
t.save({a: "1"});
t.save({a: "2"});
t.save({a: "10"});
-assert.eq( 3, t.find({$or: [{a: /0/}, {a: /1/}]}).itcount() );
+assert.eq(3, t.find({$or: [{a: /0/}, {a: /1/}]}).itcount());
diff --git a/jstests/core/remove.js b/jstests/core/remove.js
index 9a71767c3c7..c4afd07c99c 100644
--- a/jstests/core/remove.js
+++ b/jstests/core/remove.js
@@ -3,17 +3,18 @@
t = db.removetest;
-function f(n,dir) {
- t.ensureIndex({x:dir||1});
- for( i = 0; i < n; i++ ) t.save( { x:3, z:"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" } );
+function f(n, dir) {
+ t.ensureIndex({x: dir || 1});
+ for (i = 0; i < n; i++)
+ t.save({x: 3, z: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"});
- assert.eq( n , t.find().count() );
- t.remove({x:3});
+ assert.eq(n, t.find().count());
+ t.remove({x: 3});
- assert.eq( 0 , t.find().count() );
-
- assert( t.findOne() == null , "A:" + tojson( t.findOne() ) );
- assert( t.validate().valid , "B" );
+ assert.eq(0, t.find().count());
+
+ assert(t.findOne() == null, "A:" + tojson(t.findOne()));
+ assert(t.validate().valid, "B");
}
t.drop();
@@ -21,7 +22,9 @@ f(300, 1);
f(500, -1);
-assert(t.validate().valid , "C" );
+assert(t.validate().valid, "C");
// no query for remove() throws starting in 2.6
-assert.throws(function() { db.t.remove(); });
+assert.throws(function() {
+ db.t.remove();
+});
diff --git a/jstests/core/remove2.js b/jstests/core/remove2.js
index 6605d83e269..81d377c3dfa 100644
--- a/jstests/core/remove2.js
+++ b/jstests/core/remove2.js
@@ -4,33 +4,36 @@
t = db.removetest2;
function f() {
- t.save( { x:[3,3,3,3,3,3,3,3,4,5,6], z:"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" } );
- t.save( { x: 9 } );
- t.save( { x: 1 } );
+ t.save({
+ x: [3, 3, 3, 3, 3, 3, 3, 3, 4, 5, 6],
+ z: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
+ });
+ t.save({x: 9});
+ t.save({x: 1});
- t.remove({x:3});
+ t.remove({x: 3});
- assert( t.findOne({x:3}) == null );
- assert( t.validate().valid );
+ assert(t.findOne({x: 3}) == null);
+ assert(t.validate().valid);
}
x = 0;
function g() {
- t.save( { x:[3,4,5,6], z:"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" } );
- t.save( { x:[7,8,9], z:"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" } );
+ t.save({x: [3, 4, 5, 6], z: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"});
+ t.save({x: [7, 8, 9], z: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"});
var res;
- res = t.remove( {x : {$gte:3}, $atomic:x++ } );
+ res = t.remove({x: {$gte: 3}, $atomic: x++});
- assert.writeOK( res );
+ assert.writeOK(res);
// $atomic within $and is not allowed.
- //res = t.remove( {x : {$gte:3}, $and:[{$atomic:true}] } );
- //assert.writeError( res );
+ // res = t.remove( {x : {$gte:3}, $and:[{$atomic:true}] } );
+ // assert.writeError( res );
- assert( t.findOne({x:3}) == null );
- assert( t.findOne({x:8}) == null );
- assert( t.validate().valid );
+ assert(t.findOne({x: 3}) == null);
+ assert(t.findOne({x: 8}) == null);
+ assert(t.validate().valid);
}
t.drop();
@@ -38,10 +41,9 @@ f();
t.drop();
g();
-t.ensureIndex({x:1});
+t.ensureIndex({x: 1});
t.remove({});
f();
t.drop();
-t.ensureIndex({x:1});
+t.ensureIndex({x: 1});
g();
-
diff --git a/jstests/core/remove3.js b/jstests/core/remove3.js
index 2a51a6e0fd4..75e95860cb1 100644
--- a/jstests/core/remove3.js
+++ b/jstests/core/remove3.js
@@ -2,17 +2,17 @@
t = db.remove3;
t.drop();
-for ( i=1; i<=8; i++){
- t.save( { _id : i , x : i } );
+for (i = 1; i <= 8; i++) {
+ t.save({_id: i, x: i});
}
-assert.eq( 8 , t.count() , "A" );
+assert.eq(8, t.count(), "A");
-t.remove( { x : { $lt : 5 } } );
-assert.eq( 4 , t.count() , "B" );
+t.remove({x: {$lt: 5}});
+assert.eq(4, t.count(), "B");
-t.remove( { _id : 5 } );
-assert.eq( 3 , t.count() , "C" );
+t.remove({_id: 5});
+assert.eq(3, t.count(), "C");
-t.remove( { _id : { $lt : 8 } } );
-assert.eq( 1 , t.count() , "D" );
+t.remove({_id: {$lt: 8}});
+assert.eq(1, t.count(), "D");
diff --git a/jstests/core/remove4.js b/jstests/core/remove4.js
index bd007ed4d27..dde28517031 100644
--- a/jstests/core/remove4.js
+++ b/jstests/core/remove4.js
@@ -1,10 +1,10 @@
t = db.remove4;
t.drop();
-t.save ( { a : 1 , b : 1 } );
-t.save ( { a : 2 , b : 1 } );
-t.save ( { a : 3 , b : 1 } );
+t.save({a: 1, b: 1});
+t.save({a: 2, b: 1});
+t.save({a: 3, b: 1});
-assert.eq( 3 , t.find().length() );
-t.remove( { b : 1 } );
-assert.eq( 0 , t.find().length() );
+assert.eq(3, t.find().length());
+t.remove({b: 1});
+assert.eq(0, t.find().length());
diff --git a/jstests/core/remove6.js b/jstests/core/remove6.js
index 83f5a5e6733..61cc39b6e3e 100644
--- a/jstests/core/remove6.js
+++ b/jstests/core/remove6.js
@@ -4,37 +4,36 @@ t.drop();
N = 1000;
-function pop(){
+function pop() {
t.drop();
var arr = [];
- for ( var i=0; i<N; i++ ){
- arr.push( { x : 1 , tags : [ "a" , "b" , "c" ] } );
+ for (var i = 0; i < N; i++) {
+ arr.push({x: 1, tags: ["a", "b", "c"]});
}
- t.insert( arr );
- assert.eq( t.count(), N );
+ t.insert(arr);
+ assert.eq(t.count(), N);
}
-function del(){
- return t.remove( { tags : { $in : [ "a" , "c" ] } } );
+function del() {
+ return t.remove({tags: {$in: ["a", "c"]}});
}
-function test( n , idx ){
+function test(n, idx) {
pop();
- assert.eq( N , t.count() , n + " A " + idx );
- if ( idx )
- t.ensureIndex( idx );
+ assert.eq(N, t.count(), n + " A " + idx);
+ if (idx)
+ t.ensureIndex(idx);
var res = del();
- assert( !res.hasWriteError() , "error deleting: " + res.toString() );
- assert.eq( 0 , t.count() , n + " B " + idx );
+ assert(!res.hasWriteError(), "error deleting: " + res.toString());
+ assert.eq(0, t.count(), n + " B " + idx);
}
-test( "a" );
-test( "b" , { x : 1 } );
-test( "c" , { tags : 1 } );
+test("a");
+test("b", {x: 1});
+test("c", {tags: 1});
N = 5000;
-test( "a2" );
-test( "b2" , { x : 1 } );
-test( "c2" , { tags : 1 } );
-
+test("a2");
+test("b2", {x: 1});
+test("c2", {tags: 1});
diff --git a/jstests/core/remove7.js b/jstests/core/remove7.js
index f50419b3bd0..ef5500fa1fa 100644
--- a/jstests/core/remove7.js
+++ b/jstests/core/remove7.js
@@ -2,33 +2,32 @@
t = db.remove7;
t.drop();
-
-
-function getTags( n ){
+function getTags(n) {
n = n || 5;
var a = [];
- for ( var i=0; i<n; i++ ){
- var v = Math.ceil( 20 * Math.random() );
- a.push( v );
+ for (var i = 0; i < n; i++) {
+ var v = Math.ceil(20 * Math.random());
+ a.push(v);
}
return a;
}
-for ( i=0; i<1000; i++ ){
- t.save( { tags : getTags() } );
+for (i = 0; i < 1000; i++) {
+ t.save({tags: getTags()});
}
-t.ensureIndex( { tags : 1 } );
-
-for ( i=0; i<200; i++ ){
- for ( var j=0; j<10; j++ )
- t.save( { tags : getTags( 100 ) } );
- var q = { tags : { $in : getTags( 10 ) } };
- var before = t.find( q ).count();
- var res = t.remove( q );
- var after = t.find( q ).count();
- assert.eq( 0 , after , "not zero after!" );
- assert.writeOK( res );
+t.ensureIndex({tags: 1});
+
+for (i = 0; i < 200; i++) {
+ for (var j = 0; j < 10; j++)
+ t.save({tags: getTags(100)});
+ var q = {
+ tags: {$in: getTags(10)}
+ };
+ var before = t.find(q).count();
+ var res = t.remove(q);
+ var after = t.find(q).count();
+ assert.eq(0, after, "not zero after!");
+ assert.writeOK(res);
}
-
diff --git a/jstests/core/remove8.js b/jstests/core/remove8.js
index 32d8270e9c0..563e4708cf9 100644
--- a/jstests/core/remove8.js
+++ b/jstests/core/remove8.js
@@ -4,18 +4,20 @@ t.drop();
N = 1000;
-function fill(){
- for ( var i=0; i<N; i++ ){
- t.save( { x : i } );
+function fill() {
+ for (var i = 0; i < N; i++) {
+ t.save({x: i});
}
}
fill();
-assert.eq( N , t.count() , "A" );
-t.remove( {} );
-assert.eq( 0 , t.count() , "B" );
+assert.eq(N, t.count(), "A");
+t.remove({});
+assert.eq(0, t.count(), "B");
fill();
-assert.eq( N , t.count() , "C" );
-db.eval( function(){ db.remove8.remove( {} ); } );
-assert.eq( 0 , t.count() , "D" );
+assert.eq(N, t.count(), "C");
+db.eval(function() {
+ db.remove8.remove({});
+});
+assert.eq(0, t.count(), "D");
diff --git a/jstests/core/remove9.js b/jstests/core/remove9.js
index 85e9002d108..1c82d7b4fce 100644
--- a/jstests/core/remove9.js
+++ b/jstests/core/remove9.js
@@ -2,15 +2,16 @@
t = db.jstests_remove9;
t.drop();
-t.ensureIndex( {i:1} );
-for( i = 0; i < 1000; ++i ) {
- t.save( {i:i} );
+t.ensureIndex({i: 1});
+for (i = 0; i < 1000; ++i) {
+ t.save({i: i});
}
-s = startParallelShell( 't = db.jstests_remove9; Random.setRandomSeed(); for( j = 0; j < 5000; ++j ) { i = Random.randInt( 499 ) * 2; t.update( {i:i}, {$set:{i:2000}} ); t.remove( {i:2000} ); t.save( {i:i} ); }' );
+s = startParallelShell(
+ 't = db.jstests_remove9; Random.setRandomSeed(); for( j = 0; j < 5000; ++j ) { i = Random.randInt( 499 ) * 2; t.update( {i:i}, {$set:{i:2000}} ); t.remove( {i:2000} ); t.save( {i:i} ); }');
-for( i = 0; i < 1000; ++i ) {
- assert.eq( 500, t.find( {i:{$gte:0,$mod:[2,1]}} ).hint( {i:1} ).itcount() );
+for (i = 0; i < 1000; ++i) {
+ assert.eq(500, t.find({i: {$gte: 0, $mod: [2, 1]}}).hint({i: 1}).itcount());
}
s();
diff --git a/jstests/core/remove_justone.js b/jstests/core/remove_justone.js
index 19bfd6d1a03..04813149dfd 100644
--- a/jstests/core/remove_justone.js
+++ b/jstests/core/remove_justone.js
@@ -2,15 +2,15 @@
t = db.remove_justone;
t.drop();
-t.insert( { x : 1 } );
-t.insert( { x : 1 } );
-t.insert( { x : 1 } );
-t.insert( { x : 1 } );
+t.insert({x: 1});
+t.insert({x: 1});
+t.insert({x: 1});
+t.insert({x: 1});
-assert.eq( 4 , t.count() );
+assert.eq(4, t.count());
-t.remove( { x : 1 } , true );
-assert.eq( 3 , t.count() );
+t.remove({x: 1}, true);
+assert.eq(3, t.count());
-t.remove( { x : 1 } );
-assert.eq( 0 , t.count() );
+t.remove({x: 1});
+assert.eq(0, t.count());
diff --git a/jstests/core/remove_undefined.js b/jstests/core/remove_undefined.js
index 51df72c6bbf..0505e494f55 100644
--- a/jstests/core/remove_undefined.js
+++ b/jstests/core/remove_undefined.js
@@ -1,28 +1,35 @@
t = db.drop_undefined.js;
-t.insert( { _id : 1 } );
-t.insert( { _id : 2 } );
-t.insert( { _id : null } );
-
-z = { foo : 1 , x : null };
-
-t.remove( { x : z.bar } );
-assert.eq( 3 , t.count() , "A1" );
-
-t.remove( { x : undefined } );
-assert.eq( 3 , t.count() , "A2" );
-
-assert.throws( function(){ t.remove( { _id : z.bar } ); } , null , "B1" );
-assert.throws( function(){ t.remove( { _id : undefined } ); } , null , "B2" );
-
-
-t.remove( { _id : z.x } );
-assert.eq( 2 , t.count() , "C1" );
-
-t.insert( { _id : null } );
-assert.eq( 3 , t.count() , "C2" );
-
-assert.throws( function(){ t.remove( { _id : undefined } ); } , null, "C3" );
-assert.eq( 3 , t.count() , "C4" );
-
+t.insert({_id: 1});
+t.insert({_id: 2});
+t.insert({_id: null});
+
+z = {
+ foo: 1,
+ x: null
+};
+
+t.remove({x: z.bar});
+assert.eq(3, t.count(), "A1");
+
+t.remove({x: undefined});
+assert.eq(3, t.count(), "A2");
+
+assert.throws(function() {
+ t.remove({_id: z.bar});
+}, null, "B1");
+assert.throws(function() {
+ t.remove({_id: undefined});
+}, null, "B2");
+
+t.remove({_id: z.x});
+assert.eq(2, t.count(), "C1");
+
+t.insert({_id: null});
+assert.eq(3, t.count(), "C2");
+
+assert.throws(function() {
+ t.remove({_id: undefined});
+}, null, "C3");
+assert.eq(3, t.count(), "C4");
diff --git a/jstests/core/removea.js b/jstests/core/removea.js
index 40ee0e6a186..9b51080910b 100644
--- a/jstests/core/removea.js
+++ b/jstests/core/removea.js
@@ -5,27 +5,27 @@ t = db.jstests_removea;
Random.setRandomSeed();
-for( v = 0; v < 2; ++v ) { // Try each index version.
+for (v = 0; v < 2; ++v) { // Try each index version.
t.drop();
- t.ensureIndex( { a:1 }, { v:v } );
+ t.ensureIndex({a: 1}, {v: v});
S = 100;
B = 100;
- for ( var x = 0; x < S; x++ ) {
+ for (var x = 0; x < S; x++) {
var batch = [];
- for ( var y = 0; y < B; y++ ) {
- var i = y + ( B * x );
- batch.push( { a : i } );
+ for (var y = 0; y < B; y++) {
+ var i = y + (B * x);
+ batch.push({a: i});
}
- t.insert( batch );
+ t.insert(batch);
}
- assert.eq( t.count(), S * B );
+ assert.eq(t.count(), S * B);
toDrop = [];
- for( i = 0; i < S * B ; ++i ) {
- toDrop.push( Random.randInt( 10000 ) ); // Dups in the query will be ignored.
+ for (i = 0; i < S * B; ++i) {
+ toDrop.push(Random.randInt(10000)); // Dups in the query will be ignored.
}
// Remove many of the documents; $atomic prevents use of a ClientCursor, which would invoke a
// different bucket deallocation procedure than the one to be tested (see SERVER-4575).
- var res = t.remove( { a:{ $in:toDrop }, $atomic:true } );
- assert.writeOK( res );
+ var res = t.remove({a: {$in: toDrop}, $atomic: true});
+ assert.writeOK(res);
}
diff --git a/jstests/core/removeb.js b/jstests/core/removeb.js
index 1e6658bd7a9..2141e138254 100644
--- a/jstests/core/removeb.js
+++ b/jstests/core/removeb.js
@@ -3,34 +3,33 @@
t = db.jstests_removeb;
t.drop();
-t.ensureIndex( { a:1 } );
+t.ensureIndex({a: 1});
// Make the index multikey to trigger cursor dedup checking.
-t.insert( { a:[ -1, -2 ] } );
+t.insert({a: [-1, -2]});
t.remove({});
// Insert some data.
-for( i = 0; i < 20000; ++i ) {
- t.insert( { a:i } );
+for (i = 0; i < 20000; ++i) {
+ t.insert({a: i});
}
p = startParallelShell(
- // Wait until the remove operation (below) begins running.
- 'while( db.jstests_removeb.count() == 20000 );' +
- // Insert documents with increasing 'a' values. These inserted documents may
- // reuse Records freed by the remove operation in progress and will be
- // visited by the remove operation if it has not completed.
- 'for( i = 20000; i < 40000; ++i ) {' +
- ' db.jstests_removeb.insert( { a:i } );' +
- ' if (i % 1000 == 0) {' +
- ' print( i-20000 + \" of 20000 documents inserted\" );' +
- ' }' +
- '}'
- );
+ // Wait until the remove operation (below) begins running.
+ 'while( db.jstests_removeb.count() == 20000 );' +
+ // Insert documents with increasing 'a' values. These inserted documents may
+ // reuse Records freed by the remove operation in progress and will be
+ // visited by the remove operation if it has not completed.
+ 'for( i = 20000; i < 40000; ++i ) {' +
+ ' db.jstests_removeb.insert( { a:i } );' +
+ ' if (i % 1000 == 0) {' +
+ ' print( i-20000 + \" of 20000 documents inserted\" );' +
+ ' }' +
+ '}');
// Remove using the a:1 index in ascending direction.
-var res = t.remove( { a:{ $gte:0 } } );
-assert( !res.hasWriteError(), 'The remove operation failed.' );
+var res = t.remove({a: {$gte: 0}});
+assert(!res.hasWriteError(), 'The remove operation failed.');
p();
diff --git a/jstests/core/removec.js b/jstests/core/removec.js
index b062399bdb5..f2c4e29e4fc 100644
--- a/jstests/core/removec.js
+++ b/jstests/core/removec.js
@@ -2,37 +2,35 @@
t = db.jstests_removec;
t.drop();
-t.ensureIndex( { a:1 } );
+t.ensureIndex({a: 1});
/** @return an array containing a sequence of numbers from i to i + 10. */
-function runStartingWith( i ) {
+function runStartingWith(i) {
ret = [];
- for( j = 0; j < 11; ++j ) {
- ret.push( i + j );
+ for (j = 0; j < 11; ++j) {
+ ret.push(i + j);
}
return ret;
}
// Insert some documents with adjacent index keys.
-for( i = 0; i < 1100; i += 11 ) {
- t.save( { a:runStartingWith( i ) } );
+for (i = 0; i < 1100; i += 11) {
+ t.save({a: runStartingWith(i)});
}
// Remove and then reinsert random documents in the background.
-s = startParallelShell(
- 't = db.jstests_removec;' +
+s = startParallelShell('t = db.jstests_removec;' +
'Random.setRandomSeed();' +
'for( j = 0; j < 1000; ++j ) {' +
' o = t.findOne( { a:Random.randInt( 1100 ) } );' +
' t.remove( { _id:o._id } );' +
' t.insert( o );' +
- '}'
- );
+ '}');
// Find operations are error free. Note that the cursor throws if it detects the $err
// field in the returned document.
-for( i = 0; i < 200; ++i ) {
- t.find( { a:{ $gte:0 } } ).hint( { a:1 } ).itcount();
+for (i = 0; i < 200; ++i) {
+ t.find({a: {$gte: 0}}).hint({a: 1}).itcount();
}
s();
diff --git a/jstests/core/rename.js b/jstests/core/rename.js
index b2695d95e0f..3287159f850 100644
--- a/jstests/core/rename.js
+++ b/jstests/core/rename.js
@@ -1,4 +1,4 @@
-admin = db.getMongo().getDB( "admin" );
+admin = db.getMongo().getDB("admin");
a = db.jstests_rename_a;
b = db.jstests_rename_b;
@@ -8,22 +8,24 @@ a.drop();
b.drop();
c.drop();
-a.save( {a: 1} );
-a.save( {a: 2} );
-a.ensureIndex( {a:1} );
-a.ensureIndex( {b:1} );
+a.save({a: 1});
+a.save({a: 2});
+a.ensureIndex({a: 1});
+a.ensureIndex({b: 1});
-c.save( {a: 100} );
-assert.commandFailed( admin.runCommand( {renameCollection:"test.jstests_rename_a", to:"test.jstests_rename_c"} ) );
+c.save({a: 100});
+assert.commandFailed(
+ admin.runCommand({renameCollection: "test.jstests_rename_a", to: "test.jstests_rename_c"}));
-assert.commandWorked( admin.runCommand( {renameCollection:"test.jstests_rename_a", to:"test.jstests_rename_b"} ) );
-assert.eq( 0, a.find().count() );
+assert.commandWorked(
+ admin.runCommand({renameCollection: "test.jstests_rename_a", to: "test.jstests_rename_b"}));
+assert.eq(0, a.find().count());
-assert.eq( 2, b.find().count() );
-assert( db.getCollectionNames().indexOf( "jstests_rename_b" ) >= 0 );
-assert( db.getCollectionNames().indexOf( "jstests_rename_a" ) < 0 );
-assert.eq( 3, db.jstests_rename_b.getIndexes().length );
-assert.eq( 0, db.jstests_rename_a.getIndexes().length );
+assert.eq(2, b.find().count());
+assert(db.getCollectionNames().indexOf("jstests_rename_b") >= 0);
+assert(db.getCollectionNames().indexOf("jstests_rename_a") < 0);
+assert.eq(3, db.jstests_rename_b.getIndexes().length);
+assert.eq(0, db.jstests_rename_a.getIndexes().length);
// now try renaming a capped collection
@@ -33,25 +35,26 @@ c.drop();
// TODO: too many numbers hard coded here
// this test depends precisely on record size and hence may not be very reliable
-// note we use floats to make sure numbers are represented as doubles for SpiderMonkey, since test relies on record size
-db.createCollection( "jstests_rename_a", {capped:true,size:10000} );
-for( i = 0.1; i < 10; ++i ) {
- a.save( { i: i } );
+// note we use floats to make sure numbers are represented as doubles for SpiderMonkey, since test
+// relies on record size
+db.createCollection("jstests_rename_a", {capped: true, size: 10000});
+for (i = 0.1; i < 10; ++i) {
+ a.save({i: i});
}
-assert.commandWorked( admin.runCommand( {renameCollection:"test.jstests_rename_a", to:"test.jstests_rename_b"} ) );
-assert.eq( 1, b.count( {i:9.1} ) );
-printjson( b.stats() );
-for( i = 10.1; i < 1000; ++i ) {
- b.save( { i: i } );
+assert.commandWorked(
+ admin.runCommand({renameCollection: "test.jstests_rename_a", to: "test.jstests_rename_b"}));
+assert.eq(1, b.count({i: 9.1}));
+printjson(b.stats());
+for (i = 10.1; i < 1000; ++i) {
+ b.save({i: i});
}
-printjson( b.stats() );
-//res = b.find().sort({i:1});
-//while (res.hasNext()) printjson(res.next());
+printjson(b.stats());
+// res = b.find().sort({i:1});
+// while (res.hasNext()) printjson(res.next());
-assert.eq( 1, b.count( {i:i-1} ) ); // make sure last is there
-assert.eq( 0, b.count( {i:9.1} ) ); // make sure early one is gone
+assert.eq(1, b.count({i: i - 1})); // make sure last is there
+assert.eq(0, b.count({i: 9.1})); // make sure early one is gone
-
-assert( db.getCollectionNames().indexOf( "jstests_rename_b" ) >= 0 );
-assert( db.getCollectionNames().indexOf( "jstests_rename_a" ) < 0 );
-assert( db.jstests_rename_b.stats().capped );
+assert(db.getCollectionNames().indexOf("jstests_rename_b") >= 0);
+assert(db.getCollectionNames().indexOf("jstests_rename_a") < 0);
+assert(db.jstests_rename_b.stats().capped);
diff --git a/jstests/core/rename2.js b/jstests/core/rename2.js
index c913bcbe66e..efbc943ce4f 100644
--- a/jstests/core/rename2.js
+++ b/jstests/core/rename2.js
@@ -6,14 +6,14 @@ b = db.rename2b;
a.drop();
b.drop();
-a.save( { x : 1 } );
-a.save( { x : 2 } );
-a.save( { x : 3 } );
+a.save({x: 1});
+a.save({x: 2});
+a.save({x: 3});
-assert.eq( 3 , a.count() , "A" );
-assert.eq( 0 , b.count() , "B" );
+assert.eq(3, a.count(), "A");
+assert.eq(0, b.count(), "B");
-assert( a.renameCollection( "rename2b" ) , "the command" );
+assert(a.renameCollection("rename2b"), "the command");
-assert.eq( 0 , a.count() , "C" );
-assert.eq( 3 , b.count() , "D" );
+assert.eq(0, a.count(), "C");
+assert.eq(3, b.count(), "D");
diff --git a/jstests/core/rename3.js b/jstests/core/rename3.js
index 31a91772a75..aa1bd986b47 100644
--- a/jstests/core/rename3.js
+++ b/jstests/core/rename3.js
@@ -6,20 +6,20 @@ b = db.rename3b;
a.drop();
b.drop();
-a.save( { x : 1 } );
-b.save( { x : 2 } );
+a.save({x: 1});
+b.save({x: 2});
-assert.eq( 1 , a.findOne().x , "before 1a" );
-assert.eq( 2 , b.findOne().x , "before 2a" );
+assert.eq(1, a.findOne().x, "before 1a");
+assert.eq(2, b.findOne().x, "before 2a");
-res = b.renameCollection( a._shortName );
-assert.eq( 0 , res.ok , "should fail: " + tojson( res ) );
+res = b.renameCollection(a._shortName);
+assert.eq(0, res.ok, "should fail: " + tojson(res));
-assert.eq( 1 , a.findOne().x , "before 1b" );
-assert.eq( 2 , b.findOne().x , "before 2b" );
+assert.eq(1, a.findOne().x, "before 1b");
+assert.eq(2, b.findOne().x, "before 2b");
-res = b.renameCollection( a._shortName , true );
-assert.eq( 1 , res.ok , "should succeed:" + tojson( res ) );
+res = b.renameCollection(a._shortName, true);
+assert.eq(1, res.ok, "should succeed:" + tojson(res));
-assert.eq( 2 , a.findOne().x , "after 1" );
-assert.isnull( b.findOne() , "after 2" );
+assert.eq(2, a.findOne().x, "after 1");
+assert.isnull(b.findOne(), "after 2");
diff --git a/jstests/core/rename4.js b/jstests/core/rename4.js
index 92a26ef6ac0..904709175f9 100644
--- a/jstests/core/rename4.js
+++ b/jstests/core/rename4.js
@@ -1,113 +1,114 @@
t = db.jstests_rename4;
t.drop();
-function bad( f ) {
+function bad(f) {
var docsBeforeUpdate = t.find().toArray();
- var res = eval( f );
+ var res = eval(f);
- //Ensure error
+ // Ensure error
if (!res.hasWriteError()) {
print("Error:" + res.toString());
print("Existing docs (before)");
printjson(docsBeforeUpdate);
print("Existing docs (after)");
printjson(t.find().toArray());
- assert( false, "Expected error but didn't get one for: " + f );
+ assert(false, "Expected error but didn't get one for: " + f);
}
}
-bad( "t.update( {}, {$rename:{'a':'a'}} )" );
-bad( "t.update( {}, {$rename:{'':'a'}} )" );
-bad( "t.update( {}, {$rename:{'a':''}} )" );
-bad( "t.update( {}, {$rename:{'.a':'b'}} )" );
-bad( "t.update( {}, {$rename:{'a':'.b'}} )" );
-bad( "t.update( {}, {$rename:{'a.':'b'}} )" );
-bad( "t.update( {}, {$rename:{'a':'b.'}} )" );
-bad( "t.update( {}, {$rename:{'a.b':'a'}} )" );
-bad( "t.update( {}, {$rename:{'a.$':'b'}} )" );
-bad( "t.update( {}, {$rename:{'a':'b.$'}} )" );
+bad("t.update( {}, {$rename:{'a':'a'}} )");
+bad("t.update( {}, {$rename:{'':'a'}} )");
+bad("t.update( {}, {$rename:{'a':''}} )");
+bad("t.update( {}, {$rename:{'.a':'b'}} )");
+bad("t.update( {}, {$rename:{'a':'.b'}} )");
+bad("t.update( {}, {$rename:{'a.':'b'}} )");
+bad("t.update( {}, {$rename:{'a':'b.'}} )");
+bad("t.update( {}, {$rename:{'a.b':'a'}} )");
+bad("t.update( {}, {$rename:{'a.$':'b'}} )");
+bad("t.update( {}, {$rename:{'a':'b.$'}} )");
// Only bad if input doc has field resulting in conflict
-t.save( {_id:1, a:2} );
-bad( "t.update( {}, {$rename:{'_id':'a'}} )" );
-bad( "t.update( {}, {$set:{b:1},$rename:{'a':'b'}} )" );
-bad( "t.update( {}, {$rename:{'a':'b'},$set:{b:1}} )" );
-bad( "t.update( {}, {$rename:{'a':'b'},$set:{a:1}} )" );
-bad( "t.update( {}, {$set:{'b.c':1},$rename:{'a':'b'}} )" );
-bad( "t.update( {}, {$set:{b:1},$rename:{'a':'b.c'}} )" );
-bad( "t.update( {}, {$rename:{'a':'b'},$set:{'b.c':1}} )" );
-bad( "t.update( {}, {$rename:{'a':'b.c'},$set:{b:1}} )" );
-
+t.save({_id: 1, a: 2});
+bad("t.update( {}, {$rename:{'_id':'a'}} )");
+bad("t.update( {}, {$set:{b:1},$rename:{'a':'b'}} )");
+bad("t.update( {}, {$rename:{'a':'b'},$set:{b:1}} )");
+bad("t.update( {}, {$rename:{'a':'b'},$set:{a:1}} )");
+bad("t.update( {}, {$set:{'b.c':1},$rename:{'a':'b'}} )");
+bad("t.update( {}, {$set:{b:1},$rename:{'a':'b.c'}} )");
+bad("t.update( {}, {$rename:{'a':'b'},$set:{'b.c':1}} )");
+bad("t.update( {}, {$rename:{'a':'b.c'},$set:{b:1}} )");
t.remove({});
-t.save( {a:[1],b:{c:[2]},d:[{e:3}],f:4} );
-bad( "t.update( {}, {$rename:{'a.0':'f'}} )" );
-bad( "t.update( {}, {$rename:{'a.0':'g'}} )" );
-bad( "t.update( {}, {$rename:{'f':'a.0'}} )" );
-bad( "t.update( {}, {$rename:{'b.c.0':'f'}} )" );
-bad( "t.update( {}, {$rename:{'f':'b.c.0'}} )" );
-bad( "t.update( {}, {$rename:{'d.e':'d.f'}} )" );
-bad( "t.update( {}, {$rename:{'d.e':'f'}} )" );
-bad( "t.update( {}, {$rename:{'d.f':'d.e'}} )" );
-bad( "t.update( {}, {$rename:{'f':'d.e'}} )" );
-bad( "t.update( {}, {$rename:{'d.0.e':'d.f'}} )" );
-bad( "t.update( {}, {$rename:{'d.0.e':'f'}} )" );
-bad( "t.update( {}, {$rename:{'d.f':'d.0.e'}} )" );
-bad( "t.update( {}, {$rename:{'f':'d.0.e'}} )" );
-bad( "t.update( {}, {$rename:{'f.g':'a'}} )" );
-bad( "t.update( {}, {$rename:{'a':'f.g'}} )" );
+t.save({a: [1], b: {c: [2]}, d: [{e: 3}], f: 4});
+bad("t.update( {}, {$rename:{'a.0':'f'}} )");
+bad("t.update( {}, {$rename:{'a.0':'g'}} )");
+bad("t.update( {}, {$rename:{'f':'a.0'}} )");
+bad("t.update( {}, {$rename:{'b.c.0':'f'}} )");
+bad("t.update( {}, {$rename:{'f':'b.c.0'}} )");
+bad("t.update( {}, {$rename:{'d.e':'d.f'}} )");
+bad("t.update( {}, {$rename:{'d.e':'f'}} )");
+bad("t.update( {}, {$rename:{'d.f':'d.e'}} )");
+bad("t.update( {}, {$rename:{'f':'d.e'}} )");
+bad("t.update( {}, {$rename:{'d.0.e':'d.f'}} )");
+bad("t.update( {}, {$rename:{'d.0.e':'f'}} )");
+bad("t.update( {}, {$rename:{'d.f':'d.0.e'}} )");
+bad("t.update( {}, {$rename:{'f':'d.0.e'}} )");
+bad("t.update( {}, {$rename:{'f.g':'a'}} )");
+bad("t.update( {}, {$rename:{'a':'f.g'}} )");
-function good( start, mod, expected ) {
+function good(start, mod, expected) {
t.remove({});
- t.save( start );
- var res = t.update( {}, mod );
- assert.writeOK( res );
+ t.save(start);
+ var res = t.update({}, mod);
+ assert.writeOK(res);
var got = t.findOne();
delete got._id;
- assert.docEq( expected, got );
+ assert.docEq(expected, got);
}
-good( {a:1}, {$rename:{a:'b'}}, {b:1} );
-good( {a:1}, {$rename:{a:'bb'}}, {bb:1} );
-good( {b:1}, {$rename:{b:'a'}}, {a:1} );
-good( {bb:1}, {$rename:{bb:'a'}}, {a:1} );
-good( {a:{y:1}}, {$rename:{'a.y':'a.z'}}, {a:{z:1}} );
-good( {a:{yy:1}}, {$rename:{'a.yy':'a.z'}}, {a:{z:1}} );
-good( {a:{z:1}}, {$rename:{'a.z':'a.y'}}, {a:{y:1}} );
-good( {a:{zz:1}}, {$rename:{'a.zz':'a.y'}}, {a:{y:1}} );
-good( {a:{c:1}}, {$rename:{a:'b'}}, {b:{c:1}} );
-good( {aa:{c:1}}, {$rename:{aa:'b'}}, {b:{c:1}} );
-good( {a:1,b:2}, {$rename:{a:'b'}}, {b:1} );
-good( {aa:1,b:2}, {$rename:{aa:'b'}}, {b:1} );
-good( {a:1,bb:2}, {$rename:{a:'bb'}}, {bb:1} );
-good( {a:1}, {$rename:{a:'b.c'}}, {b:{c:1}} );
-good( {aa:1}, {$rename:{aa:'b.c'}}, {b:{c:1}} );
-good( {a:1,b:{}}, {$rename:{a:'b.c'}}, {b:{c:1}} );
-good( {aa:1,b:{}}, {$rename:{aa:'b.c'}}, {b:{c:1}} );
-good( {a:1}, {$rename:{b:'c'}}, {a:1} );
-good( {aa:1}, {$rename:{b:'c'}}, {aa:1} );
-good( {}, {$rename:{b:'c'}}, {} );
-good( {a:{b:1,c:2}}, {$rename:{'a.b':'d'}}, {a:{c:2},d:1} );
-good( {a:{bb:1,c:2}}, {$rename:{'a.bb':'d'}}, {a:{c:2},d:1} );
-good( {a:{b:1}}, {$rename:{'a.b':'d'}}, {a:{},d:1} );
-good( {a:[5]}, {$rename:{a:'b'}}, {b:[5]} );
-good( {aa:[5]}, {$rename:{aa:'b'}}, {b:[5]} );
-good( {'0':1}, {$rename:{'0':'5'}}, {'5':1} );
-good( {a:1,b:2}, {$rename:{a:'c'},$set:{b:5}}, {b:5,c:1} );
-good( {aa:1,b:2}, {$rename:{aa:'c'},$set:{b:5}}, {b:5,c:1} );
-good( {a:1,b:2}, {$rename:{z:'c'},$set:{b:5}}, {a:1,b:5} );
-good( {aa:1,b:2}, {$rename:{z:'c'},$set:{b:5}}, {aa:1,b:5} );
+good({a: 1}, {$rename: {a: 'b'}}, {b: 1});
+good({a: 1}, {$rename: {a: 'bb'}}, {bb: 1});
+good({b: 1}, {$rename: {b: 'a'}}, {a: 1});
+good({bb: 1}, {$rename: {bb: 'a'}}, {a: 1});
+good({a: {y: 1}}, {$rename: {'a.y': 'a.z'}}, {a: {z: 1}});
+good({a: {yy: 1}}, {$rename: {'a.yy': 'a.z'}}, {a: {z: 1}});
+good({a: {z: 1}}, {$rename: {'a.z': 'a.y'}}, {a: {y: 1}});
+good({a: {zz: 1}}, {$rename: {'a.zz': 'a.y'}}, {a: {y: 1}});
+good({a: {c: 1}}, {$rename: {a: 'b'}}, {b: {c: 1}});
+good({aa: {c: 1}}, {$rename: {aa: 'b'}}, {b: {c: 1}});
+good({a: 1, b: 2}, {$rename: {a: 'b'}}, {b: 1});
+good({aa: 1, b: 2}, {$rename: {aa: 'b'}}, {b: 1});
+good({a: 1, bb: 2}, {$rename: {a: 'bb'}}, {bb: 1});
+good({a: 1}, {$rename: {a: 'b.c'}}, {b: {c: 1}});
+good({aa: 1}, {$rename: {aa: 'b.c'}}, {b: {c: 1}});
+good({a: 1, b: {}}, {$rename: {a: 'b.c'}}, {b: {c: 1}});
+good({aa: 1, b: {}}, {$rename: {aa: 'b.c'}}, {b: {c: 1}});
+good({a: 1}, {$rename: {b: 'c'}}, {a: 1});
+good({aa: 1}, {$rename: {b: 'c'}}, {aa: 1});
+good({}, {$rename: {b: 'c'}}, {});
+good({a: {b: 1, c: 2}}, {$rename: {'a.b': 'd'}}, {a: {c: 2}, d: 1});
+good({a: {bb: 1, c: 2}}, {$rename: {'a.bb': 'd'}}, {a: {c: 2}, d: 1});
+good({a: {b: 1}}, {$rename: {'a.b': 'd'}}, {a: {}, d: 1});
+good({a: [5]}, {$rename: {a: 'b'}}, {b: [5]});
+good({aa: [5]}, {$rename: {aa: 'b'}}, {b: [5]});
+good({'0': 1}, {$rename: {'0': '5'}}, {'5': 1});
+good({a: 1, b: 2}, {$rename: {a: 'c'}, $set: {b: 5}}, {b: 5, c: 1});
+good({aa: 1, b: 2}, {$rename: {aa: 'c'}, $set: {b: 5}}, {b: 5, c: 1});
+good({a: 1, b: 2}, {$rename: {z: 'c'}, $set: {b: 5}}, {a: 1, b: 5});
+good({aa: 1, b: 2}, {$rename: {z: 'c'}, $set: {b: 5}}, {aa: 1, b: 5});
// (formerly) rewriting single field
-good( {a:{z:1,b:1}}, {$rename:{'a.b':'a.c'}}, {a:{c:1,z:1}} );
-good( {a:{z:1,tomato:1}}, {$rename:{'a.tomato':'a.potato'}}, {a:{potato:1,z:1}} );
-good( {a:{z:1,b:1,c:1}}, {$rename:{'a.b':'a.c'}}, {a:{c:1,z:1}} );
-good( {a:{z:1,tomato:1,potato:1}}, {$rename:{'a.tomato':'a.potato'}}, {a:{potato:1,z:1}} );
-good( {a:{z:1,b:1}}, {$rename:{'a.b':'a.cc'}}, {a:{cc:1,z:1}} );
-good( {a:{z:1,b:1,c:1}}, {$rename:{'a.b':'aa.c'}}, {a:{c:1,z:1},aa:{c:1}} );
+good({a: {z: 1, b: 1}}, {$rename: {'a.b': 'a.c'}}, {a: {c: 1, z: 1}});
+good({a: {z: 1, tomato: 1}}, {$rename: {'a.tomato': 'a.potato'}}, {a: {potato: 1, z: 1}});
+good({a: {z: 1, b: 1, c: 1}}, {$rename: {'a.b': 'a.c'}}, {a: {c: 1, z: 1}});
+good({a: {z: 1, tomato: 1, potato: 1}},
+ {$rename: {'a.tomato': 'a.potato'}},
+ {a: {potato: 1, z: 1}});
+good({a: {z: 1, b: 1}}, {$rename: {'a.b': 'a.cc'}}, {a: {cc: 1, z: 1}});
+good({a: {z: 1, b: 1, c: 1}}, {$rename: {'a.b': 'aa.c'}}, {a: {c: 1, z: 1}, aa: {c: 1}});
// invalid target, but missing source
-good( {a:1,c:4}, {$rename:{b:'c.d'}}, {a:1,c:4} );
+good({a: 1, c: 4}, {$rename: {b: 'c.d'}}, {a: 1, c: 4});
// TODO: This should be supported, and it is with the new update framework, but not with the
// old, and we currently don't have a good way to check which mode we are in. When we do have
@@ -119,19 +120,19 @@ good( {a:1,c:4}, {$rename:{b:'c.d'}}, {a:1,c:4} );
// check index
t.drop();
-t.ensureIndex( {a:1} );
+t.ensureIndex({a: 1});
-function l( start, mod, query, expected ) {
+function l(start, mod, query, expected) {
t.remove({});
- t.save( start );
- var res = t.update( {}, mod );
- assert.writeOK( res );
- var got = t.find( query ).hint( {a:1} ).next();
+ t.save(start);
+ var res = t.update({}, mod);
+ assert.writeOK(res);
+ var got = t.find(query).hint({a: 1}).next();
delete got._id;
- assert.docEq( expected, got );
+ assert.docEq(expected, got);
}
-l( {a:1}, {$rename:{a:'b'}}, {a:null}, {b:1} );
-l( {a:1}, {$rename:{a:'bb'}}, {a:null}, {bb:1} );
-l( {b:1}, {$rename:{b:'a'}}, {a:1}, {a:1} );
-l( {bb:1}, {$rename:{bb:'a'}}, {a:1}, {a:1} );
+l({a: 1}, {$rename: {a: 'b'}}, {a: null}, {b: 1});
+l({a: 1}, {$rename: {a: 'bb'}}, {a: null}, {bb: 1});
+l({b: 1}, {$rename: {b: 'a'}}, {a: 1}, {a: 1});
+l({bb: 1}, {$rename: {bb: 'a'}}, {a: 1}, {a: 1});
diff --git a/jstests/core/rename5.js b/jstests/core/rename5.js
index 927c767b981..313b520ed7d 100644
--- a/jstests/core/rename5.js
+++ b/jstests/core/rename5.js
@@ -3,44 +3,44 @@
t = db.jstests_rename5;
t.drop();
-t.ensureIndex( { a:1 } );
-t.save( { b:1 } );
+t.ensureIndex({a: 1});
+t.save({b: 1});
-t.update( {}, { $rename:{ a:'b' } } );
-assert.eq( 1, t.findOne().b );
+t.update({}, {$rename: {a: 'b'}});
+assert.eq(1, t.findOne().b);
// Test with another modifier.
-t.update( {}, { $rename:{ a:'b' }, $set:{ x:1 } } );
-assert.eq( 1, t.findOne().b );
-assert.eq( 1, t.findOne().x );
+t.update({}, {$rename: {a: 'b'}, $set: {x: 1}});
+assert.eq(1, t.findOne().b);
+assert.eq(1, t.findOne().x);
// Test with an in place modifier.
-t.update( {}, { $rename:{ a:'b' }, $inc:{ x:1 } } );
-assert.eq( 1, t.findOne().b );
-assert.eq( 2, t.findOne().x );
+t.update({}, {$rename: {a: 'b'}, $inc: {x: 1}});
+assert.eq(1, t.findOne().b);
+assert.eq(2, t.findOne().x);
// Check similar cases with upserts.
t.drop();
t.remove({});
-t.update( { b:1 }, { $rename:{ a:'b' } }, true );
-assert.eq( 1, t.findOne().b );
+t.update({b: 1}, {$rename: {a: 'b'}}, true);
+assert.eq(1, t.findOne().b);
t.remove({});
-t.update( { b:1 }, { $rename:{ a:'b' }, $set:{ c:1 } }, true );
-assert.eq( 1, t.findOne().b );
-assert.eq( 1, t.findOne().c );
+t.update({b: 1}, {$rename: {a: 'b'}, $set: {c: 1}}, true);
+assert.eq(1, t.findOne().b);
+assert.eq(1, t.findOne().c);
t.remove({});
-t.update( { b:1, c:2 }, { $rename:{ a:'b' }, $inc:{ c:1 } }, true );
-assert.eq( 1, t.findOne().b );
-assert.eq( 3, t.findOne().c );
+t.update({b: 1, c: 2}, {$rename: {a: 'b'}, $inc: {c: 1}}, true);
+assert.eq(1, t.findOne().b);
+assert.eq(3, t.findOne().c);
// Check a similar case with multiple renames of an unindexed document.
t.drop();
-t.save( { b:1, x:1 } );
-t.update( {}, { $rename: { a:'b', x:'y' } } );
-assert.eq( 1, t.findOne().b );
-assert.eq( 1, t.findOne().y );
-assert( !t.findOne().x );
+t.save({b: 1, x: 1});
+t.update({}, {$rename: {a: 'b', x: 'y'}});
+assert.eq(1, t.findOne().b);
+assert.eq(1, t.findOne().y);
+assert(!t.findOne().x);
diff --git a/jstests/core/rename6.js b/jstests/core/rename6.js
index 5e77b4c45a6..159e0e7d1b5 100644
--- a/jstests/core/rename6.js
+++ b/jstests/core/rename6.js
@@ -8,17 +8,19 @@ c = "rename2c";
dbc = testDB.getCollection(c);
d = "dest4567890123456789012345678901234567890123456789012345678901234567890";
dbd = testDB.getCollection(d);
-dbc.ensureIndex({ "name" : 1,
- "date" : 1,
- "time" : 1,
- "renameCollection" : 1,
- "mongodb" : 1,
- "testing" : 1,
- "data" : 1});
-//Checking for the newly created index and the _id index in original collection
+dbc.ensureIndex({
+ "name": 1,
+ "date": 1,
+ "time": 1,
+ "renameCollection": 1,
+ "mongodb": 1,
+ "testing": 1,
+ "data": 1
+});
+// Checking for the newly created index and the _id index in original collection
assert.eq(2, dbc.getIndexes().length, "Long Rename Init");
-//Should fail to rename collection as the index namespace is too long
-assert.commandFailed( dbc.renameCollection( dbd ) , "Long Rename Exec" );
-//Since we failed we should have the 2 indexes unmoved and no indexes under the new collection name
+// Should fail to rename collection as the index namespace is too long
+assert.commandFailed(dbc.renameCollection(dbd), "Long Rename Exec");
+// Since we failed we should have the 2 indexes unmoved and no indexes under the new collection name
assert.eq(2, dbc.getIndexes().length, "Long Rename Result 1");
assert.eq(0, dbd.getIndexes().length, "Long Rename Result 2");
diff --git a/jstests/core/rename7.js b/jstests/core/rename7.js
index 4b9258fd671..85b48c64ce7 100644
--- a/jstests/core/rename7.js
+++ b/jstests/core/rename7.js
@@ -4,9 +4,9 @@
// ***************************************************************
// Set up namespaces a and b.
-var admin = db.getMongo().getDB( "admin" );
-var db_a = db.getMongo().getDB( "db_a" );
-var db_b = db.getMongo().getDB( "db_b" );
+var admin = db.getMongo().getDB("admin");
+var db_a = db.getMongo().getDB("db_a");
+var db_b = db.getMongo().getDB("db_b");
var a = db_a.rename7;
var b = db_b.rename7;
@@ -19,54 +19,54 @@ a.drop();
b.drop();
// Put some documents and indexes in a.
-a.save( {a: 1} );
-a.save( {a: 2} );
-a.save( {a: 3} );
-a.ensureIndex( {a: 1} );
-a.ensureIndex( {b: 1} );
+a.save({a: 1});
+a.save({a: 2});
+a.save({a: 3});
+a.ensureIndex({a: 1});
+a.ensureIndex({b: 1});
-assert.commandWorked( admin.runCommand( {renameCollection: "db_a.rename7", to: "db_b.rename7"} ) );
+assert.commandWorked(admin.runCommand({renameCollection: "db_a.rename7", to: "db_b.rename7"}));
-assert.eq( 0, a.find().count() );
-assert( db_a.getCollectionNames().indexOf( "rename7" ) < 0 );
+assert.eq(0, a.find().count());
+assert(db_a.getCollectionNames().indexOf("rename7") < 0);
-assert.eq( 3, b.find().count() );
-assert( db_b.getCollectionNames().indexOf( "rename7" ) >= 0 );
+assert.eq(3, b.find().count());
+assert(db_b.getCollectionNames().indexOf("rename7") >= 0);
a.drop();
b.drop();
// Test that the dropTarget option works when renaming across databases.
-a.save( {} );
-b.save( {} );
-assert.commandFailed( admin.runCommand( {renameCollection: "db_a.rename7", to: "db_b.rename7"} ) );
-assert.commandWorked( admin.runCommand( {renameCollection: "db_a.rename7",
- to: "db_b.rename7", dropTarget: true} ) );
+a.save({});
+b.save({});
+assert.commandFailed(admin.runCommand({renameCollection: "db_a.rename7", to: "db_b.rename7"}));
+assert.commandWorked(
+ admin.runCommand({renameCollection: "db_a.rename7", to: "db_b.rename7", dropTarget: true}));
a.drop();
b.drop();
// Capped collection testing.
-db_a.createCollection( "rename7_capped", {capped:true, size:10000} );
+db_a.createCollection("rename7_capped", {capped: true, size: 10000});
a = db_a.rename7_capped;
b = db_b.rename7_capped;
-a.save( {a: 1} );
-a.save( {a: 2} );
-a.save( {a: 3} );
+a.save({a: 1});
+a.save({a: 2});
+a.save({a: 3});
previousMaxSize = a.stats().maxSize;
-assert.commandWorked( admin.runCommand( {renameCollection: "db_a.rename7_capped",
- to: "db_b.rename7_capped"} ) );
+assert.commandWorked(
+ admin.runCommand({renameCollection: "db_a.rename7_capped", to: "db_b.rename7_capped"}));
-assert.eq( 0, a.find().count() );
-assert( db_a.getCollectionNames().indexOf( "rename7_capped" ) < 0 );
+assert.eq(0, a.find().count());
+assert(db_a.getCollectionNames().indexOf("rename7_capped") < 0);
-assert.eq( 3, b.find().count() );
-assert( db_b.getCollectionNames().indexOf( "rename7_capped" ) >= 0 );
-printjson( db_b.rename7_capped.stats() );
-assert( db_b.rename7_capped.stats().capped );
-assert.eq( previousMaxSize, b.stats().maxSize );
+assert.eq(3, b.find().count());
+assert(db_b.getCollectionNames().indexOf("rename7_capped") >= 0);
+printjson(db_b.rename7_capped.stats());
+assert(db_b.rename7_capped.stats().capped);
+assert.eq(previousMaxSize, b.stats().maxSize);
a.drop();
b.drop();
diff --git a/jstests/core/rename8.js b/jstests/core/rename8.js
index 8b955824ea8..af332e30239 100644
--- a/jstests/core/rename8.js
+++ b/jstests/core/rename8.js
@@ -1,6 +1,7 @@
// SERVER-12591: prevent renaming to arbitrary system collections.
-var testdb = db.getSiblingDB("rename8"); // to avoid breaking other tests when we touch system.users
+var testdb =
+ db.getSiblingDB("rename8"); // to avoid breaking other tests when we touch system.users
var coll = testdb.rename8;
var systemNamespaces = testdb.system.namespaces;
var systemFoo = testdb.system.foo;
@@ -17,7 +18,7 @@ assert.commandFailed(systemFoo.renameCollection(coll.getName()));
// same with system.namespaces, even though it does exist
assert.commandFailed(coll.renameCollection(systemNamespaces.getName()));
-assert.commandFailed(coll.renameCollection(systemNamespaces.getName(), /*dropTarget*/true));
+assert.commandFailed(coll.renameCollection(systemNamespaces.getName(), /*dropTarget*/ true));
assert.commandFailed(systemNamespaces.renameCollection(coll.getName()));
// system.users is whitelisted so these should work
diff --git a/jstests/core/rename_stayTemp.js b/jstests/core/rename_stayTemp.js
index ccada6abf39..d8451af2d2d 100644
--- a/jstests/core/rename_stayTemp.js
+++ b/jstests/core/rename_stayTemp.js
@@ -4,17 +4,19 @@ dest = 'rename_stayTemp_dest';
db[orig].drop();
db[dest].drop();
-function ns(coll){ return db[coll].getFullName(); }
+function ns(coll) {
+ return db[coll].getFullName();
+}
-function istemp( name ) {
- var result = db.runCommand( "listCollections", { filter : { name : name } } );
- assert( result.ok );
- var collections = new DBCommandCursor( db.getMongo(), result ).toArray();
- assert.eq( 1, collections.length );
+function istemp(name) {
+ var result = db.runCommand("listCollections", {filter: {name: name}});
+ assert(result.ok);
+ var collections = new DBCommandCursor(db.getMongo(), result).toArray();
+ assert.eq(1, collections.length);
return collections[0].options.temp ? true : false;
}
-db.runCommand({create: orig, temp:1});
+db.runCommand({create: orig, temp: 1});
assert(istemp(orig));
db.adminCommand({renameCollection: ns(orig), to: ns(dest)});
@@ -22,11 +24,8 @@ assert(!istemp(dest));
db[dest].drop();
-db.runCommand({create: orig, temp:1});
-assert( istemp(orig) );
+db.runCommand({create: orig, temp: 1});
+assert(istemp(orig));
db.adminCommand({renameCollection: ns(orig), to: ns(dest), stayTemp: true});
-assert( istemp(dest) );
-
-
-
+assert(istemp(dest));
diff --git a/jstests/core/repair_database.js b/jstests/core/repair_database.js
index c7ac82320c7..45f936b022a 100644
--- a/jstests/core/repair_database.js
+++ b/jstests/core/repair_database.js
@@ -7,23 +7,26 @@
*/
// 1. Drop db
-var mydb = db.getSisterDB( "repairDB" );
+var mydb = db.getSisterDB("repairDB");
mydb.dropDatabase();
var myColl = mydb.a;
// 2
-var doc = {_id:1, a:"hello world"};
+var doc = {
+ _id: 1,
+ a: "hello world"
+};
myColl.insert(doc);
-myColl.ensureIndex({a:1});
+myColl.ensureIndex({a: 1});
mydb.repairDatabase();
var foundDoc = myColl.findOne();
assert.neq(null, foundDoc);
assert.eq(1, foundDoc._id);
-assert.docEq(doc, myColl.findOne({a:doc.a}));
-assert.docEq(doc, myColl.findOne({_id:1}));
+assert.docEq(doc, myColl.findOne({a: doc.a}));
+assert.docEq(doc, myColl.findOne({_id: 1}));
// 3
var myColl2 = mydb.b;
@@ -35,5 +38,5 @@ var myColl2 = mydb.b;
myColl.insert(doc);
myColl2.insert(doc);
mydb.repairDatabase();
-assert.docEq(doc, myColl.findOne({a:doc.a}));
-assert.docEq(doc, myColl2.findOne({a:doc.a}));
+assert.docEq(doc, myColl.findOne({a: doc.a}));
+assert.docEq(doc, myColl2.findOne({a: doc.a}));
diff --git a/jstests/core/repair_server12955.js b/jstests/core/repair_server12955.js
index 65c9b5f241a..ce0ffa9d11e 100644
--- a/jstests/core/repair_server12955.js
+++ b/jstests/core/repair_server12955.js
@@ -1,9 +1,9 @@
-mydb = db.getSisterDB( "repair_server12955" );
+mydb = db.getSisterDB("repair_server12955");
mydb.dropDatabase();
-mydb.foo.ensureIndex({a:"text"});
-mydb.foo.insert({a:"hello world"});
+mydb.foo.ensureIndex({a: "text"});
+mydb.foo.insert({a: "hello world"});
before = mydb.stats().dataFileVersion;
@@ -11,5 +11,5 @@ mydb.repairDatabase();
after = mydb.stats().dataFileVersion;
-assert.eq( before, after );
+assert.eq(before, after);
mydb.dropDatabase();
diff --git a/jstests/core/return_key.js b/jstests/core/return_key.js
index cf02357759d..b39764846d4 100644
--- a/jstests/core/return_key.js
+++ b/jstests/core/return_key.js
@@ -58,19 +58,25 @@ load("jstests/libs/analyze_plan.js");
assert(isIndexOnly(explain.queryPlanner.winningPlan));
// Unlike other projections, sortKey meta-projection can co-exist with returnKey.
- results = coll.find({}, {c: {$meta: 'sortKey'}})
- .hint({a: 1}).sort({a: -1}).returnKey().toArray();
+ results =
+ coll.find({}, {c: {$meta: 'sortKey'}}).hint({a: 1}).sort({a: -1}).returnKey().toArray();
assert.eq(results, [{a: 3, c: {'': 3}}, {a: 2, c: {'': 2}}, {a: 1, c: {'': 1}}]);
// returnKey with sortKey $meta where there is an in-memory sort.
- results = coll.find({}, {c: {$meta: 'sortKey'}})
- .hint({a: 1}).sort({b: 1}).returnKey().toArray();
+ results =
+ coll.find({}, {c: {$meta: 'sortKey'}}).hint({a: 1}).sort({b: 1}).returnKey().toArray();
assert.eq(results, [{a: 3, c: {'': 1}}, {a: 2, c: {'': 2}}, {a: 1, c: {'': 3}}]);
// returnKey with multiple sortKey $meta projections.
results = coll.find({}, {c: {$meta: 'sortKey'}, d: {$meta: 'sortKey'}})
- .hint({a: 1}).sort({b: 1}).returnKey().toArray();
- assert.eq(results, [{a: 3, c: {'': 1}, d: {'': 1}},
- {a: 2, c: {'': 2}, d: {'': 2}},
- {a: 1, c: {'': 3}, d: {'': 3}}]);
+ .hint({a: 1})
+ .sort({b: 1})
+ .returnKey()
+ .toArray();
+ assert.eq(results,
+ [
+ {a: 3, c: {'': 1}, d: {'': 1}},
+ {a: 2, c: {'': 2}, d: {'': 2}},
+ {a: 1, c: {'': 3}, d: {'': 3}}
+ ]);
})();
diff --git a/jstests/core/role_management_helpers.js b/jstests/core/role_management_helpers.js
index 9c24f552f84..fa25d8a2d57 100644
--- a/jstests/core/role_management_helpers.js
+++ b/jstests/core/role_management_helpers.js
@@ -26,112 +26,126 @@ function assertHasPrivilege(privilegeArray, privilege) {
return;
}
}
- assert(false, "Privilege " + tojson(privilege) + " not found in privilege array: " +
- tojson(privilegeArray));
+ assert(false,
+ "Privilege " + tojson(privilege) + " not found in privilege array: " +
+ tojson(privilegeArray));
}
(function(db) {
- var db = db.getSiblingDB("role_management_helpers");
- db.dropDatabase();
- db.dropAllRoles();
-
- db.createRole({role:'roleA',
- roles: [],
- privileges: [{resource: {db:db.getName(), collection: "foo"},
- actions: ['find']}]});
- db.createRole({role:'roleB', privileges: [], roles: ["roleA"]});
- db.createRole({role:'roleC', privileges: [], roles: []});
-
- // Test getRole
- var roleObj = db.getRole("roleA");
- assert.eq(0, roleObj.roles.length);
- assert.eq(null, roleObj.privileges);
- roleObj = db.getRole("roleA", {showPrivileges: true});
- assert.eq(1, roleObj.privileges.length);
- assertHasPrivilege(roleObj.privileges,
- {resource: {db:db.getName(), collection:"foo"}, actions:['find']});
- roleObj = db.getRole("roleB", {showPrivileges: true});
- assert.eq(1, roleObj.inheritedPrivileges.length); // inherited from roleA
- assertHasPrivilege(roleObj.inheritedPrivileges,
- {resource: {db:db.getName(), collection:"foo"}, actions:['find']});
- assert.eq(1, roleObj.roles.length);
- assertHasRole(roleObj.roles, "roleA", db.getName());
-
- // Test getRoles
- var roles = db.getRoles();
- assert.eq(3, roles.length);
- printjson(roles);
- assert(roles[0].role == 'roleA' || roles[1].role == 'roleA' || roles[2].role == 'roleA');
- assert(roles[0].role == 'roleB' || roles[1].role == 'roleB' || roles[2].role == 'roleB');
- assert(roles[0].role == 'roleC' || roles[1].role == 'roleC' || roles[2].role == 'roleC');
- assert.eq(null, roles[0].inheritedPrivileges);
- var roles = db.getRoles({showPrivileges: true, showBuiltinRoles: true});
- assert.eq(9, roles.length);
- assert.neq(null, roles[0].inheritedPrivileges);
-
-
- // Granting roles to nonexistent role fails
- assert.throws(function() { db.grantRolesToRole("fakeRole", ['dbAdmin']); });
- // Granting roles to built-in role fails
- assert.throws(function() { db.grantRolesToRole("readWrite", ['dbAdmin']); });
- // Granting non-existant role fails
- assert.throws(function() { db.grantRolesToRole("roleB", ['dbAdmin', 'fakeRole']); });
-
- roleObj = db.getRole("roleB", {showPrivileges: true});
- assert.eq(1, roleObj.inheritedPrivileges.length);
- assert.eq(1, roleObj.roles.length);
- assertHasRole(roleObj.roles, "roleA", db.getName());
-
- // Granting a role you already have is no problem
- db.grantRolesToRole("roleB", ['readWrite', 'roleC']);
- roleObj = db.getRole("roleB", {showPrivileges: true});
- assert.gt(roleObj.inheritedPrivileges.length, 1); // Got privileges from readWrite role
- assert.eq(3, roleObj.roles.length);
- assertHasRole(roleObj.roles, "readWrite", db.getName());
- assertHasRole(roleObj.roles, "roleA", db.getName());
- assertHasRole(roleObj.roles, "roleC", db.getName());
-
- // Revoking roles the role doesn't have is fine
- db.revokeRolesFromRole("roleB", ['roleA', 'readWrite', 'dbAdmin']);
- roleObj = db.getRole("roleB", {showPrivileges: true});
- assert.eq(0, roleObj.inheritedPrivileges.length);
- assert.eq(1, roleObj.roles.length);
- assertHasRole(roleObj.roles, "roleC", db.getName());
-
- // Privileges on the same resource get collapsed
- db.grantPrivilegesToRole("roleA",
- [{resource: {db:db.getName(), collection:""}, actions:['dropDatabase']},
- {resource: {db:db.getName(), collection:"foo"}, actions:['insert']}]);
- roleObj = db.getRole("roleA", {showPrivileges: true});
- assert.eq(0, roleObj.roles.length);
- assert.eq(2, roleObj.privileges.length);
- assertHasPrivilege(roleObj.privileges,
- {resource: {db:db.getName(), collection:"foo"}, actions:['find', 'insert']});
- assertHasPrivilege(roleObj.privileges,
- {resource: {db:db.getName(), collection:""}, actions:['dropDatabase']});
-
- // Update role
- db.updateRole("roleA", {roles:['roleB'],
- privileges:[{resource: {db: db.getName(), collection:"foo"},
- actions:['find']}]});
- roleObj = db.getRole("roleA", {showPrivileges: true});
- assert.eq(1, roleObj.roles.length);
- assertHasRole(roleObj.roles, "roleB", db.getName());
- assert.eq(1, roleObj.privileges.length);
- assertHasPrivilege(roleObj.privileges,
- {resource: {db:db.getName(), collection:"foo"}, actions:['find']});
-
- // Test dropRole
- db.dropRole('roleC');
- assert.eq(null, db.getRole('roleC'));
- roleObj = db.getRole("roleB", {showPrivileges: true});
- assert.eq(0, roleObj.privileges.length);
- assert.eq(0, roleObj.roles.length);
-
- // Test dropAllRoles
- db.dropAllRoles();
- assert.eq(null, db.getRole('roleA'));
- assert.eq(null, db.getRole('roleB'));
- assert.eq(null, db.getRole('roleC'));
+ var db = db.getSiblingDB("role_management_helpers");
+ db.dropDatabase();
+ db.dropAllRoles();
+
+ db.createRole({
+ role: 'roleA',
+ roles: [],
+ privileges: [{resource: {db: db.getName(), collection: "foo"}, actions: ['find']}]
+ });
+ db.createRole({role: 'roleB', privileges: [], roles: ["roleA"]});
+ db.createRole({role: 'roleC', privileges: [], roles: []});
+
+ // Test getRole
+ var roleObj = db.getRole("roleA");
+ assert.eq(0, roleObj.roles.length);
+ assert.eq(null, roleObj.privileges);
+ roleObj = db.getRole("roleA", {showPrivileges: true});
+ assert.eq(1, roleObj.privileges.length);
+ assertHasPrivilege(roleObj.privileges,
+ {resource: {db: db.getName(), collection: "foo"}, actions: ['find']});
+ roleObj = db.getRole("roleB", {showPrivileges: true});
+ assert.eq(1, roleObj.inheritedPrivileges.length); // inherited from roleA
+ assertHasPrivilege(roleObj.inheritedPrivileges,
+ {resource: {db: db.getName(), collection: "foo"}, actions: ['find']});
+ assert.eq(1, roleObj.roles.length);
+ assertHasRole(roleObj.roles, "roleA", db.getName());
+
+ // Test getRoles
+ var roles = db.getRoles();
+ assert.eq(3, roles.length);
+ printjson(roles);
+ assert(roles[0].role == 'roleA' || roles[1].role == 'roleA' || roles[2].role == 'roleA');
+ assert(roles[0].role == 'roleB' || roles[1].role == 'roleB' || roles[2].role == 'roleB');
+ assert(roles[0].role == 'roleC' || roles[1].role == 'roleC' || roles[2].role == 'roleC');
+ assert.eq(null, roles[0].inheritedPrivileges);
+ var roles = db.getRoles({showPrivileges: true, showBuiltinRoles: true});
+ assert.eq(9, roles.length);
+ assert.neq(null, roles[0].inheritedPrivileges);
+
+ // Granting roles to nonexistent role fails
+ assert.throws(function() {
+ db.grantRolesToRole("fakeRole", ['dbAdmin']);
+ });
+ // Granting roles to built-in role fails
+ assert.throws(function() {
+ db.grantRolesToRole("readWrite", ['dbAdmin']);
+ });
+ // Granting non-existant role fails
+ assert.throws(function() {
+ db.grantRolesToRole("roleB", ['dbAdmin', 'fakeRole']);
+ });
+
+ roleObj = db.getRole("roleB", {showPrivileges: true});
+ assert.eq(1, roleObj.inheritedPrivileges.length);
+ assert.eq(1, roleObj.roles.length);
+ assertHasRole(roleObj.roles, "roleA", db.getName());
+
+ // Granting a role you already have is no problem
+ db.grantRolesToRole("roleB", ['readWrite', 'roleC']);
+ roleObj = db.getRole("roleB", {showPrivileges: true});
+ assert.gt(roleObj.inheritedPrivileges.length, 1); // Got privileges from readWrite role
+ assert.eq(3, roleObj.roles.length);
+ assertHasRole(roleObj.roles, "readWrite", db.getName());
+ assertHasRole(roleObj.roles, "roleA", db.getName());
+ assertHasRole(roleObj.roles, "roleC", db.getName());
+
+ // Revoking roles the role doesn't have is fine
+ db.revokeRolesFromRole("roleB", ['roleA', 'readWrite', 'dbAdmin']);
+ roleObj = db.getRole("roleB", {showPrivileges: true});
+ assert.eq(0, roleObj.inheritedPrivileges.length);
+ assert.eq(1, roleObj.roles.length);
+ assertHasRole(roleObj.roles, "roleC", db.getName());
+
+ // Privileges on the same resource get collapsed
+ db.grantPrivilegesToRole(
+ "roleA",
+ [
+ {resource: {db: db.getName(), collection: ""}, actions: ['dropDatabase']},
+ {resource: {db: db.getName(), collection: "foo"}, actions: ['insert']}
+ ]);
+ roleObj = db.getRole("roleA", {showPrivileges: true});
+ assert.eq(0, roleObj.roles.length);
+ assert.eq(2, roleObj.privileges.length);
+ assertHasPrivilege(
+ roleObj.privileges,
+ {resource: {db: db.getName(), collection: "foo"}, actions: ['find', 'insert']});
+ assertHasPrivilege(roleObj.privileges,
+ {resource: {db: db.getName(), collection: ""}, actions: ['dropDatabase']});
+
+ // Update role
+ db.updateRole(
+ "roleA",
+ {
+ roles: ['roleB'],
+ privileges: [{resource: {db: db.getName(), collection: "foo"}, actions: ['find']}]
+ });
+ roleObj = db.getRole("roleA", {showPrivileges: true});
+ assert.eq(1, roleObj.roles.length);
+ assertHasRole(roleObj.roles, "roleB", db.getName());
+ assert.eq(1, roleObj.privileges.length);
+ assertHasPrivilege(roleObj.privileges,
+ {resource: {db: db.getName(), collection: "foo"}, actions: ['find']});
+
+ // Test dropRole
+ db.dropRole('roleC');
+ assert.eq(null, db.getRole('roleC'));
+ roleObj = db.getRole("roleB", {showPrivileges: true});
+ assert.eq(0, roleObj.privileges.length);
+ assert.eq(0, roleObj.roles.length);
+
+ // Test dropAllRoles
+ db.dropAllRoles();
+ assert.eq(null, db.getRole('roleA'));
+ assert.eq(null, db.getRole('roleB'));
+ assert.eq(null, db.getRole('roleC'));
}(db));
diff --git a/jstests/core/run_program1.js b/jstests/core/run_program1.js
index 871b53c8ddd..e5f320b0bf4 100644
--- a/jstests/core/run_program1.js
+++ b/jstests/core/run_program1.js
@@ -1,19 +1,19 @@
-if ( ! _isWindows() ) {
-
+if (!_isWindows()) {
// note that normal program exit returns 0
- assert.eq (0, runProgram('true'));
+ assert.eq(0, runProgram('true'));
assert.neq(0, runProgram('false'));
assert.neq(0, runProgram('this_program_doesnt_exit'));
- //verify output visually
+ // verify output visually
runProgram('echo', 'Hello', 'World.', 'How are you?');
- runProgram('bash', '-c', 'echo Hello World. "How are you?"'); // only one space is printed between Hello and World
+ runProgram('bash', '-c', 'echo Hello World. "How are you?"'); // only one space is
+ // printed between Hello
+ // and World
// numbers can be passed as numbers or strings
runProgram('sleep', 0.5);
runProgram('sleep', '0.5');
} else {
-
runProgram('cmd', '/c', 'echo hello windows');
}
diff --git a/jstests/core/server1470.js b/jstests/core/server1470.js
index 3ab39bb3c38..42587961665 100644
--- a/jstests/core/server1470.js
+++ b/jstests/core/server1470.js
@@ -2,19 +2,17 @@
t = db.server1470;
t.drop();
-q = { "name" : "first" , "pic" : { "$ref" : "foo", "$id" : ObjectId("4c48d04cd33a5a92628c9af6") } };
-t.update( q , {$set:{ x : 1 } } , true, true );
+q = {
+ "name": "first",
+ "pic": {"$ref": "foo", "$id": ObjectId("4c48d04cd33a5a92628c9af6")}
+};
+t.update(q, {$set: {x: 1}}, true, true);
ref = t.findOne().pic;
-assert.eq( "object", typeof( ref ) );
-assert.eq( q.pic["$ref"] , ref["$ref"] );
-assert.eq( q.pic["$id"] , ref["$id"] );
+assert.eq("object", typeof(ref));
+assert.eq(q.pic["$ref"], ref["$ref"]);
+assert.eq(q.pic["$id"], ref["$id"]);
// just make we haven't broken other update operators
t.drop();
-t.update( { _id : 1 , x : { $gt : 5 } } , { $set : { y : 1 } } , true );
-assert.eq( { _id : 1 , y : 1 } , t.findOne() );
-
-
-
-
-
+t.update({_id: 1, x: {$gt: 5}}, {$set: {y: 1}}, true);
+assert.eq({_id: 1, y: 1}, t.findOne());
diff --git a/jstests/core/server14753.js b/jstests/core/server14753.js
index 81d865996db..cd6ea309399 100644
--- a/jstests/core/server14753.js
+++ b/jstests/core/server14753.js
@@ -9,7 +9,11 @@
t.drop();
t.ensureIndex({a: 1});
t.ensureIndex({b: 1});
- for (var i = 0; i < 20; i++) { t.insert({b: i}); }
- for (var i = 0; i < 20; i++) { t.find({b: 1}).sort({a: 1}).next(); }
+ for (var i = 0; i < 20; i++) {
+ t.insert({b: i});
+ }
+ for (var i = 0; i < 20; i++) {
+ t.find({b: 1}).sort({a: 1}).next();
+ }
}());
diff --git a/jstests/core/server5346.js b/jstests/core/server5346.js
index f627d0e68bd..18f2f019e5e 100644
--- a/jstests/core/server5346.js
+++ b/jstests/core/server5346.js
@@ -2,14 +2,16 @@
t = db.server5346;
t.drop();
-x = { _id : 1 , versions : {} };
-t.insert( x );
+x = {
+ _id: 1,
+ versions: {}
+};
+t.insert(x);
-t.update({ _id : 1 }, { $inc : { "versions.2_01" : 1 } } );
-t.update({ _id : 1 }, { $inc : { "versions.2_1" : 2 } } );
-t.update({ _id : 1 }, { $inc : { "versions.01" : 3 } } );
-t.update({ _id : 1 }, { $inc : { "versions.1" : 4 } } );
+t.update({_id: 1}, {$inc: {"versions.2_01": 1}});
+t.update({_id: 1}, {$inc: {"versions.2_1": 2}});
+t.update({_id: 1}, {$inc: {"versions.01": 3}});
+t.update({_id: 1}, {$inc: {"versions.1": 4}});
// Make sure the correct fields are set, without duplicates.
-assert.docEq( { "_id" : 1, "versions" : { "01" : 3, "1" : 4, "2_01" : 1, "2_1" : 2 } },
- t.findOne());
+assert.docEq({"_id": 1, "versions": {"01": 3, "1": 4, "2_01": 1, "2_1": 2}}, t.findOne());
diff --git a/jstests/core/server7756.js b/jstests/core/server7756.js
index 5a7177ebcc9..844c3a40d4d 100644
--- a/jstests/core/server7756.js
+++ b/jstests/core/server7756.js
@@ -2,11 +2,10 @@
t = db.server7756;
t.drop();
-t.save( { a:[ { 1:'x' }, 'y' ] } );
+t.save({a: [{1: 'x'}, 'y']});
-assert.eq( 1, t.count( { 'a.1':'x' } ) );
-assert.eq( 1, t.count( { 'a.1':'y' } ) );
-
-assert.eq( 1, t.count( { 'a.1':/x/ } ) );
-assert.eq( 1, t.count( { 'a.1':/y/ } ) );
+assert.eq(1, t.count({'a.1': 'x'}));
+assert.eq(1, t.count({'a.1': 'y'}));
+assert.eq(1, t.count({'a.1': /x/}));
+assert.eq(1, t.count({'a.1': /y/}));
diff --git a/jstests/core/server9385.js b/jstests/core/server9385.js
index ee86891ce2a..5e2a82a7ef2 100644
--- a/jstests/core/server9385.js
+++ b/jstests/core/server9385.js
@@ -2,15 +2,15 @@
t = db.server9385;
t.drop();
-t.insert( { _id : 1, x : 1 } );
+t.insert({_id: 1, x: 1});
x = t.findOne();
x._id = 2;
-t.save( x );
+t.save(x);
-t.find().forEach( printjson );
+t.find().forEach(printjson);
-assert.eq( 2, t.find().count() );
-assert.eq( 2, t.find().itcount() );
+assert.eq(2, t.find().count());
+assert.eq(2, t.find().itcount());
-assert( t.findOne( { _id : 1 } ), "original insert missing" );
-assert( t.findOne( { _id : 2 } ), "save didn't work?" );
+assert(t.findOne({_id: 1}), "original insert missing");
+assert(t.findOne({_id: 2}), "save didn't work?");
diff --git a/jstests/core/server9547.js b/jstests/core/server9547.js
index 67cacfc22a7..9717893cbfb 100644
--- a/jstests/core/server9547.js
+++ b/jstests/core/server9547.js
@@ -4,7 +4,7 @@
var t = db.server9547;
t.drop();
-for (var i=0; i<10; i++) {
+for (var i = 0; i < 10; i++) {
t.save({a: i});
}
diff --git a/jstests/core/set1.js b/jstests/core/set1.js
index d741387af58..33840e3f431 100644
--- a/jstests/core/set1.js
+++ b/jstests/core/set1.js
@@ -2,8 +2,6 @@
t = db.set1;
t.drop();
-t.insert( { _id : 1, emb : {} });
-t.update( { _id : 1 }, { $set : { emb : { 'a.dot' : 'data'} }});
-assert.eq( { _id : 1 , emb : {} } , t.findOne() , "A" );
-
-
+t.insert({_id: 1, emb: {}});
+t.update({_id: 1}, {$set: {emb: {'a.dot': 'data'}}});
+assert.eq({_id: 1, emb: {}}, t.findOne(), "A");
diff --git a/jstests/core/set2.js b/jstests/core/set2.js
index 221ee407759..c5b6e1c9553 100644
--- a/jstests/core/set2.js
+++ b/jstests/core/set2.js
@@ -2,17 +2,16 @@
t = db.set2;
t.drop();
-t.save( { _id : 1 , x : true , y : { x : true } } );
-assert.eq( true , t.findOne().x );
+t.save({_id: 1, x: true, y: {x: true}});
+assert.eq(true, t.findOne().x);
-t.update( { _id : 1 } , { $set : { x : 17 } } );
-assert.eq( 17 , t.findOne().x );
+t.update({_id: 1}, {$set: {x: 17}});
+assert.eq(17, t.findOne().x);
-assert.eq( true , t.findOne().y.x );
-t.update( { _id : 1 } , { $set : { "y.x" : 17 } } );
-assert.eq( 17 , t.findOne().y.x );
-
-t.update( { _id : 1 } , { $set : { a : 2 , b : 3 } } );
-assert.eq( 2 , t.findOne().a );
-assert.eq( 3 , t.findOne().b );
+assert.eq(true, t.findOne().y.x);
+t.update({_id: 1}, {$set: {"y.x": 17}});
+assert.eq(17, t.findOne().y.x);
+t.update({_id: 1}, {$set: {a: 2, b: 3}});
+assert.eq(2, t.findOne().a);
+assert.eq(3, t.findOne().b);
diff --git a/jstests/core/set3.js b/jstests/core/set3.js
index f654ab64889..8f7d78d894f 100644
--- a/jstests/core/set3.js
+++ b/jstests/core/set3.js
@@ -2,10 +2,9 @@
t = db.set3;
t.drop();
-t.insert( { "test1" : { "test2" : { "abcdefghijklmnopqrstu" : {"id":1} } } } );
-t.update( {}, {"$set":{"test1.test2.abcdefghijklmnopqrstuvwxyz":{"id":2}}});
+t.insert({"test1": {"test2": {"abcdefghijklmnopqrstu": {"id": 1}}}});
+t.update({}, {"$set": {"test1.test2.abcdefghijklmnopqrstuvwxyz": {"id": 2}}});
x = t.findOne();
-assert.eq( 1 , x.test1.test2.abcdefghijklmnopqrstu.id , "A" );
-assert.eq( 2 , x.test1.test2.abcdefghijklmnopqrstuvwxyz.id , "B" );
-
+assert.eq(1, x.test1.test2.abcdefghijklmnopqrstu.id, "A");
+assert.eq(2, x.test1.test2.abcdefghijklmnopqrstuvwxyz.id, "B");
diff --git a/jstests/core/set4.js b/jstests/core/set4.js
index d26a241f322..989cf82b223 100644
--- a/jstests/core/set4.js
+++ b/jstests/core/set4.js
@@ -2,14 +2,20 @@
t = db.set4;
t.drop();
-orig = { _id:1 , a : [ { x : 1 } ]};
-t.insert( orig );
+orig = {
+ _id: 1,
+ a: [{x: 1}]
+};
+t.insert(orig);
-t.update( {}, { $set : { 'a.0.x' : 2, 'foo.bar' : 3 } } );
-orig.a[0].x = 2; orig.foo = { bar : 3 };
-assert.eq( orig , t.findOne() , "A" );
-
-t.update( {}, { $set : { 'a.0.x' : 4, 'foo.bar' : 5 } } );
-orig.a[0].x = 4; orig.foo.bar = 5;
-assert.eq( orig , t.findOne() , "B" );
+t.update({}, {$set: {'a.0.x': 2, 'foo.bar': 3}});
+orig.a[0].x = 2;
+orig.foo = {
+ bar: 3
+};
+assert.eq(orig, t.findOne(), "A");
+t.update({}, {$set: {'a.0.x': 4, 'foo.bar': 5}});
+orig.a[0].x = 4;
+orig.foo.bar = 5;
+assert.eq(orig, t.findOne(), "B");
diff --git a/jstests/core/set5.js b/jstests/core/set5.js
index e24c4fdf6a7..dfa28b1e3e8 100644
--- a/jstests/core/set5.js
+++ b/jstests/core/set5.js
@@ -2,16 +2,16 @@
t = db.set5;
t.drop();
-function check( want , err ){
+function check(want, err) {
var x = t.findOne();
delete x._id;
- assert.docEq( want , x , err );
+ assert.docEq(want, x, err);
}
-t.update( { a : 5 } , { $set : { a : 6 , b : null } } , true );
-check( { a : 6 , b : null } , "A" );
+t.update({a: 5}, {$set: {a: 6, b: null}}, true);
+check({a: 6, b: null}, "A");
t.drop();
-t.update( { z : 5 } , { $set : { z : 6 , b : null } } , true );
-check( { b : null , z : 6 } , "B" );
+t.update({z: 5}, {$set: {z: 6, b: null}}, true);
+check({b: null, z: 6}, "B");
diff --git a/jstests/core/set6.js b/jstests/core/set6.js
index 87a8100d232..bf0ece0dbf8 100644
--- a/jstests/core/set6.js
+++ b/jstests/core/set6.js
@@ -2,19 +2,21 @@
t = db.set6;
t.drop();
-x = { _id : 1 , r : new DBRef( "foo" , new ObjectId() ) };
-t.insert( x );
-assert.eq( x , t.findOne() , "A" );
+x = {
+ _id: 1,
+ r: new DBRef("foo", new ObjectId())
+};
+t.insert(x);
+assert.eq(x, t.findOne(), "A");
x.r.$id = new ObjectId();
-t.update({}, { $set : { r : x.r } } );
-assert.eq( x , t.findOne() , "B");
+t.update({}, {$set: {r: x.r}});
+assert.eq(x, t.findOne(), "B");
-x.r2 = new DBRef( "foo2" , 5 );
-t.update( {} , { $set : { "r2" : x.r2 } } );
-assert.eq( x , t.findOne() , "C" );
+x.r2 = new DBRef("foo2", 5);
+t.update({}, {$set: {"r2": x.r2}});
+assert.eq(x, t.findOne(), "C");
x.r.$id = 2;
-t.update( {} , { $set : { "r.$id" : 2 } } );
-assert.eq( x.r.$id , t.findOne().r.$id , "D");
-
+t.update({}, {$set: {"r.$id": 2}});
+assert.eq(x.r.$id, t.findOne().r.$id, "D");
diff --git a/jstests/core/set7.js b/jstests/core/set7.js
index bf9870eb015..8aba33aee94 100644
--- a/jstests/core/set7.js
+++ b/jstests/core/set7.js
@@ -5,64 +5,64 @@ var res;
t.drop();
-t.save( {a:[0,1,2,3]} );
-t.update( {}, {$set:{"a.0":2}} );
-assert.eq( [2,1,2,3], t.findOne().a );
+t.save({a: [0, 1, 2, 3]});
+t.update({}, {$set: {"a.0": 2}});
+assert.eq([2, 1, 2, 3], t.findOne().a);
-t.update( {}, {$set:{"a.4":5}} );
-assert.eq( [2,1,2,3,5], t.findOne().a );
+t.update({}, {$set: {"a.4": 5}});
+assert.eq([2, 1, 2, 3, 5], t.findOne().a);
-t.update( {}, {$set:{"a.9":9}} );
-assert.eq( [2,1,2,3,5,null,null,null,null,9], t.findOne().a );
+t.update({}, {$set: {"a.9": 9}});
+assert.eq([2, 1, 2, 3, 5, null, null, null, null, 9], t.findOne().a);
t.drop();
-t.save( {a:[0,1,2,3]} );
-t.update( {}, {$set:{"a.9":9,"a.7":7}} );
-assert.eq( [0,1,2,3,null,null,null,7,null,9], t.findOne().a );
+t.save({a: [0, 1, 2, 3]});
+t.update({}, {$set: {"a.9": 9, "a.7": 7}});
+assert.eq([0, 1, 2, 3, null, null, null, 7, null, 9], t.findOne().a);
t.drop();
-t.save( {a:[0,1,2,3,4,5,6,7,8,9,10]} );
-t.update( {}, {$set:{"a.11":11} } );
-assert.eq( [0,1,2,3,4,5,6,7,8,9,10,11], t.findOne().a );
+t.save({a: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]});
+t.update({}, {$set: {"a.11": 11}});
+assert.eq([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11], t.findOne().a);
t.drop();
-t.save( {} );
-t.update( {}, {$set:{"a.0":4}} );
-assert.eq( {"0":4}, t.findOne().a );
+t.save({});
+t.update({}, {$set: {"a.0": 4}});
+assert.eq({"0": 4}, t.findOne().a);
t.drop();
-t.update( {"a.0":4}, {$set:{b:1}}, true );
-assert.eq( {"0":4}, t.findOne().a );
+t.update({"a.0": 4}, {$set: {b: 1}}, true);
+assert.eq({"0": 4}, t.findOne().a);
t.drop();
-t.save( {a:[]} );
-res = t.update( {}, {$set:{"a.f":1}} );
-assert.writeError( res );
-assert.eq( [], t.findOne().a );
+t.save({a: []});
+res = t.update({}, {$set: {"a.f": 1}});
+assert.writeError(res);
+assert.eq([], t.findOne().a);
// Test requiring proper ordering of multiple mods.
t.drop();
-t.save( {a:[0,1,2,3,4,5,6,7,8,9,10]} );
-t.update( {}, {$set:{"a.11":11,"a.2":-2}} );
-assert.eq( [0,1,-2,3,4,5,6,7,8,9,10,11], t.findOne().a );
+t.save({a: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]});
+t.update({}, {$set: {"a.11": 11, "a.2": -2}});
+assert.eq([0, 1, -2, 3, 4, 5, 6, 7, 8, 9, 10, 11], t.findOne().a);
// Test upsert case
t.drop();
-t.update( {a:[0,1,2,3,4,5,6,7,8,9,10]}, {$set:{"a.11":11} }, true );
-assert.eq( [0,1,2,3,4,5,6,7,8,9,10,11], t.findOne().a );
+t.update({a: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]}, {$set: {"a.11": 11}}, true);
+assert.eq([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11], t.findOne().a);
// SERVER-3750
t.drop();
-t.save( {a:[]} );
-res = t.update( {}, {$set:{"a.1500000":1}} ); // current limit
-assert.writeOK( res );
+t.save({a: []});
+res = t.update({}, {$set: {"a.1500000": 1}}); // current limit
+assert.writeOK(res);
t.drop();
-t.save( {a:[]} );
-res = t.update( {}, {$set:{"a.1500001":1}} ); // 1 over limit
-assert.writeError( res );
+t.save({a: []});
+res = t.update({}, {$set: {"a.1500001": 1}}); // 1 over limit
+assert.writeError(res);
t.drop();
-t.save( {a:[]} );
-res = t.update( {}, {$set:{"a.1000000000":1}} ); // way over limit
-assert.writeError( res );
+t.save({a: []});
+res = t.update({}, {$set: {"a.1000000000": 1}}); // way over limit
+assert.writeError(res);
diff --git a/jstests/core/set_param1.js b/jstests/core/set_param1.js
index 3b31d2d8caa..2df37442518 100644
--- a/jstests/core/set_param1.js
+++ b/jstests/core/set_param1.js
@@ -1,14 +1,14 @@
// Tests for accessing logLevel server parameter using getParameter/setParameter commands
// and shell helpers.
-old = db.adminCommand( { "getParameter" : "*" } );
-tmp1 = db.adminCommand( { "setParameter" : 1 , "logLevel" : 5 } );
-tmp2 = db.adminCommand( { "setParameter" : 1 , "logLevel" : old.logLevel } );
-now = db.adminCommand( { "getParameter" : "*" } );
+old = db.adminCommand({"getParameter": "*"});
+tmp1 = db.adminCommand({"setParameter": 1, "logLevel": 5});
+tmp2 = db.adminCommand({"setParameter": 1, "logLevel": old.logLevel});
+now = db.adminCommand({"getParameter": "*"});
-assert.eq( old , now , "A" );
-assert.eq( old.logLevel , tmp1.was , "B" );
-assert.eq( 5 , tmp2.was , "C" );
+assert.eq(old, now, "A");
+assert.eq(old.logLevel, tmp1.was, "B");
+assert.eq(5, tmp2.was, "C");
//
// component verbosity
@@ -16,111 +16,103 @@ assert.eq( 5 , tmp2.was , "C" );
// verbosity for log component hierarchy
printjson(old.logComponentVerbosity);
-assert.neq( undefined, old.logComponentVerbosity, "log component verbosity not available" );
-assert.eq( old.logLevel, old.logComponentVerbosity.verbosity,
- "default component verbosity should match logLevel" );
-assert.neq( undefined, old.logComponentVerbosity.storage.journal.verbosity,
- "journal verbosity not available" );
+assert.neq(undefined, old.logComponentVerbosity, "log component verbosity not available");
+assert.eq(old.logLevel,
+ old.logComponentVerbosity.verbosity,
+ "default component verbosity should match logLevel");
+assert.neq(undefined,
+ old.logComponentVerbosity.storage.journal.verbosity,
+ "journal verbosity not available");
// Non-object log component verbosity should be rejected.
-assert.commandFailed(db.adminCommand( { "setParameter" : 1 ,
- logComponentVerbosity : "not an object" } ) );
+assert.commandFailed(db.adminCommand({"setParameter": 1, logComponentVerbosity: "not an object"}));
// Non-numeric verbosity for component should be rejected.
-assert.commandFailed( db.adminCommand( { "setParameter" : 1 ,
- logComponentVerbosity :
- { storage : { journal : {
- verbosity : "not a number" } } } } ) );
+assert.commandFailed(db.adminCommand({
+ "setParameter": 1,
+ logComponentVerbosity: {storage: {journal: {verbosity: "not a number"}}}
+}));
// Invalid component shall be rejected
-assert.commandFailed( db.adminCommand( { "setParameter" : 1 ,
- logComponentVerbosity :
- { NoSuchComponent : { verbosity : 2 } } } ) );
-
+assert.commandFailed(db.adminCommand(
+ {"setParameter": 1, logComponentVerbosity: {NoSuchComponent: {verbosity: 2}}}));
// Set multiple component log levels at once.
-(function () {
- assert.commandWorked( db.adminCommand( {
- "setParameter" : 1 ,
- logComponentVerbosity : {
- verbosity : 2,
- accessControl : { verbosity : 0 },
- storage : {
- verbosity : 3,
- journal : { verbosity : 5 }
- } } } ) );
-
- var result = assert.commandWorked( db.adminCommand(
- { "getParameter": 1, logComponentVerbosity : 1} )).logComponentVerbosity;
-
- assert.eq( 2, result.verbosity );
- assert.eq( 0, result.accessControl.verbosity );
- assert.eq( 3, result.storage.verbosity );
- assert.eq( 5, result.storage.journal.verbosity );
+(function() {
+ assert.commandWorked(db.adminCommand({
+ "setParameter": 1,
+ logComponentVerbosity: {
+ verbosity: 2,
+ accessControl: {verbosity: 0},
+ storage: {verbosity: 3, journal: {verbosity: 5}}
+ }
+ }));
+
+ var result =
+ assert.commandWorked(db.adminCommand({"getParameter": 1, logComponentVerbosity: 1}))
+ .logComponentVerbosity;
+
+ assert.eq(2, result.verbosity);
+ assert.eq(0, result.accessControl.verbosity);
+ assert.eq(3, result.storage.verbosity);
+ assert.eq(5, result.storage.journal.verbosity);
})();
-
// Set multiple component log levels at once.
// Unrecognized field names not mapping to a log component shall be rejected
// No changes shall apply.
-(function () {
- assert.commandFailed( db.adminCommand( {
- "setParameter" : 1 ,
- logComponentVerbosity : {
- verbosity : 6,
- accessControl : { verbosity : 5 },
- storage : {
- verbosity : 4,
- journal : { verbosity : 6 }
- },
- NoSuchComponent : { verbosity : 2 },
- extraField : 123 } } ) );
-
- var result = assert.commandWorked( db.adminCommand(
- { "getParameter": 1, logComponentVerbosity : 1} )).logComponentVerbosity;
-
- assert.eq( 2, result.verbosity );
- assert.eq( 0, result.accessControl.verbosity );
- assert.eq( 3, result.storage.verbosity );
- assert.eq( 5, result.storage.journal.verbosity );
+(function() {
+ assert.commandFailed(db.adminCommand({
+ "setParameter": 1,
+ logComponentVerbosity: {
+ verbosity: 6,
+ accessControl: {verbosity: 5},
+ storage: {verbosity: 4, journal: {verbosity: 6}},
+ NoSuchComponent: {verbosity: 2},
+ extraField: 123
+ }
+ }));
+
+ var result =
+ assert.commandWorked(db.adminCommand({"getParameter": 1, logComponentVerbosity: 1}))
+ .logComponentVerbosity;
+
+ assert.eq(2, result.verbosity);
+ assert.eq(0, result.accessControl.verbosity);
+ assert.eq(3, result.storage.verbosity);
+ assert.eq(5, result.storage.journal.verbosity);
})();
-
// Clear verbosity for default and journal.
-(function () {
- assert.commandWorked( db.adminCommand( {
- "setParameter" : 1 ,
- logComponentVerbosity : {
- verbosity: -1,
- storage : {
- journal : { verbosity : -1 } } } } ) );
-
- var result = assert.commandWorked( db.adminCommand(
- { "getParameter": 1, logComponentVerbosity : 1} )).logComponentVerbosity;
-
- assert.eq( 0, result.verbosity );
- assert.eq( 0, result.accessControl.verbosity );
- assert.eq( 3, result.storage.verbosity );
- assert.eq( -1, result.storage.journal.verbosity );
+(function() {
+ assert.commandWorked(db.adminCommand({
+ "setParameter": 1,
+ logComponentVerbosity: {verbosity: -1, storage: {journal: {verbosity: -1}}}
+ }));
+
+ var result =
+ assert.commandWorked(db.adminCommand({"getParameter": 1, logComponentVerbosity: 1}))
+ .logComponentVerbosity;
+
+ assert.eq(0, result.verbosity);
+ assert.eq(0, result.accessControl.verbosity);
+ assert.eq(3, result.storage.verbosity);
+ assert.eq(-1, result.storage.journal.verbosity);
})();
-
// Set accessControl verbosity using numerical level instead of
// subdocument with 'verbosity' field.
-(function () {
- assert.commandWorked( db.adminCommand( {
- "setParameter" : 1,
- logComponentVerbosity : {
- accessControl : 5 } } ) );
+(function() {
+ assert.commandWorked(
+ db.adminCommand({"setParameter": 1, logComponentVerbosity: {accessControl: 5}}));
- var result = assert.commandWorked( db.adminCommand(
- { "getParameter": 1, logComponentVerbosity : 1} )).logComponentVerbosity;
+ var result =
+ assert.commandWorked(db.adminCommand({"getParameter": 1, logComponentVerbosity: 1}))
+ .logComponentVerbosity;
- assert.eq( 5, result.accessControl.verbosity );
+ assert.eq(5, result.accessControl.verbosity);
})();
-
// Restore old verbosity values.
-assert.commandWorked( db.adminCommand( {
- "setParameter" : 1 ,
- logComponentVerbosity : old.logComponentVerbosity } ) );
+assert.commandWorked(
+ db.adminCommand({"setParameter": 1, logComponentVerbosity: old.logComponentVerbosity}));
diff --git a/jstests/core/shell1.js b/jstests/core/shell1.js
index 6d200e7a5b1..7ea23f8d3a5 100644
--- a/jstests/core/shell1.js
+++ b/jstests/core/shell1.js
@@ -1,8 +1,8 @@
x = 1;
-shellHelper( "show", "tables;" );
-shellHelper( "show", "tables" );
-shellHelper( "show", "tables ;" );
+shellHelper("show", "tables;");
+shellHelper("show", "tables");
+shellHelper("show", "tables ;");
// test slaveOk levels
assert(!db.getSlaveOk() && !db.test.getSlaveOk() && !db.getMongo().getSlaveOk(), "slaveOk 1");
@@ -12,4 +12,3 @@ db.setSlaveOk(false);
assert(!db.getSlaveOk() && !db.test.getSlaveOk() && db.getMongo().getSlaveOk(), "slaveOk 3");
db.test.setSlaveOk(true);
assert(!db.getSlaveOk() && db.test.getSlaveOk() && db.getMongo().getSlaveOk(), "slaveOk 4");
-
diff --git a/jstests/core/shell_writeconcern.js b/jstests/core/shell_writeconcern.js
index 272a84c0a2c..f3f190061cf 100644
--- a/jstests/core/shell_writeconcern.js
+++ b/jstests/core/shell_writeconcern.js
@@ -9,11 +9,11 @@ collA.drop();
collB.drop();
// test inheritance
-db.setWriteConcern({w:1});
+db.setWriteConcern({w: 1});
assert.eq(1, db.getWriteConcern().toJSON().w);
assert.eq(1, collB.getWriteConcern().toJSON().w);
-collA.setWriteConcern({w:2});
+collA.setWriteConcern({w: 2});
assert.eq(2, collA.getWriteConcern().toJSON().w);
collA.unsetWriteConcern();
assert.eq(1, collA.getWriteConcern().toJSON().w);
@@ -24,55 +24,57 @@ assert.eq(undefined, collB.getWriteConcern());
assert.eq(undefined, db.getWriteConcern());
// test methods, by generating an error
-var res = assert.writeOK(collA.save({_id:1}, {writeConcern:{w:1}}));
-if (!db.getMongo().useWriteCommands() ) {
+var res = assert.writeOK(collA.save({_id: 1}, {writeConcern: {w: 1}}));
+if (!db.getMongo().useWriteCommands()) {
assert.eq(1, res.n, tojson(res));
assert.eq(1, res.upserted, tojson(res));
} else {
assert.eq(1, res.nUpserted, tojson(res));
}
-var res = assert.writeOK(collA.update({_id:1}, {_id:1}, {writeConcern:{w:1}}));
-if (!db.getMongo().useWriteCommands() ) {
+var res = assert.writeOK(collA.update({_id: 1}, {_id: 1}, {writeConcern: {w: 1}}));
+if (!db.getMongo().useWriteCommands()) {
assert.eq(1, res.n, tojson(res));
} else {
assert.eq(1, res.nMatched, tojson(res));
}
-var res = assert.writeOK(collA.update({_id:1}, {_id:1}, {writeConcern:{w:1}}));
-if (!db.getMongo().useWriteCommands() ) {
+var res = assert.writeOK(collA.update({_id: 1}, {_id: 1}, {writeConcern: {w: 1}}));
+if (!db.getMongo().useWriteCommands()) {
assert.eq(1, res.n, tojson(res));
} else {
assert.eq(1, res.nMatched, tojson(res));
}
-var res = assert.writeOK(collA.insert({_id:2}, {writeConcern:{w:1}}));
-if (!db.getMongo().useWriteCommands() ) {
+var res = assert.writeOK(collA.insert({_id: 2}, {writeConcern: {w: 1}}));
+if (!db.getMongo().useWriteCommands()) {
assert.eq(0, res.n, tojson(res));
} else {
assert.eq(1, res.nInserted, tojson(res));
}
-var res = assert.writeOK(collA.remove({_id:3}, {writeConcern:{w:1}}));
-if (!db.getMongo().useWriteCommands() ) {
+var res = assert.writeOK(collA.remove({_id: 3}, {writeConcern: {w: 1}}));
+if (!db.getMongo().useWriteCommands()) {
assert.eq(0, res.n, tojson(res));
} else {
assert.eq(0, res.nRemoved, tojson(res));
}
-var res = assert.writeOK(collA.remove({}, {justOne:true, writeConcern:{w:1}}));
-if (!db.getMongo().useWriteCommands() ) {
+var res = assert.writeOK(collA.remove({}, {justOne: true, writeConcern: {w: 1}}));
+if (!db.getMongo().useWriteCommands()) {
assert.eq(1, res.n, tojson(res));
} else {
assert.eq(1, res.nRemoved, tojson(res));
}
// Test ordered write concern, and that the write concern isn't run/error.
-assert.writeOK(collA.insert({_id:1}));
+assert.writeOK(collA.insert({_id: 1}));
-var res = assert.writeError(collA.insert([{_id:1}, {_id:1}], {ordered:true, writeConcern:{w:1}}));
+var res =
+ assert.writeError(collA.insert([{_id: 1}, {_id: 1}], {ordered: true, writeConcern: {w: 1}}));
assert.eq(1, res.getWriteErrors().length, tojson(res));
assert.eq(undefined, res.writeConcernErrors, tojson(res));
-var res = assert.writeError(collA.insert([{_id:1}, {_id:1}], {ordered:false, writeConcern:{w:1}}));
+var res =
+ assert.writeError(collA.insert([{_id: 1}, {_id: 1}], {ordered: false, writeConcern: {w: 1}}));
assert.eq(2, res.getWriteErrors().length, tojson(res));
assert.eq(undefined, res.writeConcernErrors, tojson(res));
diff --git a/jstests/core/shellkillop.js b/jstests/core/shellkillop.js
index b54ff74e237..0ac2ad681ed 100644
--- a/jstests/core/shellkillop.js
+++ b/jstests/core/shellkillop.js
@@ -1,61 +1,64 @@
baseName = "jstests_shellkillop";
-// 'retry' should be set to true in contexts where an exception should cause the test to be retried rather than to fail.
+// 'retry' should be set to true in contexts where an exception should cause the test to be retried
+// rather than to fail.
retry = false;
function testShellAutokillop() {
+ if (true) { // toggle to disable test
+ db[baseName].drop();
-if (true) { // toggle to disable test
- db[baseName].drop();
+ print("shellkillop.js insert data");
+ for (i = 0; i < 100000; ++i) {
+ db[baseName].insert({i: 1});
+ }
+ assert.eq(100000, db[baseName].count());
- print("shellkillop.js insert data");
- for (i = 0; i < 100000; ++i) {
- db[baseName].insert({ i: 1 });
- }
- assert.eq(100000, db[baseName].count());
+ // mongo --autokillop suppressed the ctrl-c "do you want to kill current operation" message
+ // it's just for testing purposes and thus not in the shell help
+ var evalStr = "print('SKO subtask started'); db." + baseName +
+ ".update( {}, {$set:{i:'abcdefghijkl'}}, false, true ); db." + baseName + ".count();";
+ print("shellkillop.js evalStr:" + evalStr);
+ spawn = startMongoProgramNoConnect(
+ "mongo", "--autokillop", "--port", myPort(), "--eval", evalStr);
- // mongo --autokillop suppressed the ctrl-c "do you want to kill current operation" message
- // it's just for testing purposes and thus not in the shell help
- var evalStr = "print('SKO subtask started'); db." + baseName + ".update( {}, {$set:{i:'abcdefghijkl'}}, false, true ); db." + baseName + ".count();";
- print("shellkillop.js evalStr:" + evalStr);
- spawn = startMongoProgramNoConnect("mongo", "--autokillop", "--port", myPort(), "--eval", evalStr);
+ sleep(100);
+ retry = true;
+ assert(db[baseName].find({i: 'abcdefghijkl'}).count() < 100000,
+ "update ran too fast, test won't be valid");
+ retry = false;
- sleep(100);
- retry = true;
- assert(db[baseName].find({ i: 'abcdefghijkl' }).count() < 100000, "update ran too fast, test won't be valid");
- retry = false;
+ stopMongoProgramByPid(spawn);
- stopMongoProgramByPid(spawn);
+ sleep(100);
- sleep(100);
+ print("count abcdefghijkl:" + db[baseName].find({i: 'abcdefghijkl'}).count());
- print("count abcdefghijkl:" + db[baseName].find({ i: 'abcdefghijkl' }).count());
+ var inprog = db.currentOp().inprog;
+ for (i in inprog) {
+ if (inprog[i].ns == "test." + baseName)
+ throw Error("shellkillop.js op is still running: " + tojson(inprog[i]));
+ }
- var inprog = db.currentOp().inprog;
- for (i in inprog) {
- if (inprog[i].ns == "test." + baseName)
- throw Error( "shellkillop.js op is still running: " + tojson( inprog[i] ) );
+ retry = true;
+ assert(db[baseName].find({i: 'abcdefghijkl'}).count() < 100000,
+ "update ran too fast, test was not valid");
+ retry = false;
}
-
- retry = true;
- assert(db[baseName].find({ i: 'abcdefghijkl' }).count() < 100000, "update ran too fast, test was not valid");
- retry = false;
-}
-
}
-for( var nTries = 0; nTries < 10 && retry; ++nTries ) {
+for (var nTries = 0; nTries < 10 && retry; ++nTries) {
try {
testShellAutokillop();
- } catch (e) {
- if ( !retry ) {
+ } catch (e) {
+ if (!retry) {
throw e;
}
- printjson( e );
- print( "retrying..." );
+ printjson(e);
+ print("retrying...");
}
}
-assert( !retry, "retried too many times" );
+assert(!retry, "retried too many times");
print("shellkillop.js SUCCESS");
diff --git a/jstests/core/shelltypes.js b/jstests/core/shelltypes.js
index e39c63272aa..d04b1b026bc 100644
--- a/jstests/core/shelltypes.js
+++ b/jstests/core/shelltypes.js
@@ -26,21 +26,25 @@ b = Timestamp(a.t, a.i);
printjson(a);
assert.eq(tojson(a), tojson(b), "timestamp");
-assert.throws(function() { Timestamp(-2, 3); }, null,
- "Timestamp time must not accept negative time" );
-assert.throws(function() { Timestamp(0, -1); }, null,
- "Timestamp increment must not accept negative time" );
-assert.throws(function() { Timestamp(0x10000 * 0x10000, 0); }, null,
- "Timestamp time must not accept values larger than 2**32 - 1");
-assert.throws(function() { Timestamp(0, 0x10000 * 0x10000); }, null,
- "Timestamp increment must not accept values larger than 2**32 - 1");
+assert.throws(function() {
+ Timestamp(-2, 3);
+}, null, "Timestamp time must not accept negative time");
+assert.throws(function() {
+ Timestamp(0, -1);
+}, null, "Timestamp increment must not accept negative time");
+assert.throws(function() {
+ Timestamp(0x10000 * 0x10000, 0);
+}, null, "Timestamp time must not accept values larger than 2**32 - 1");
+assert.throws(function() {
+ Timestamp(0, 0x10000 * 0x10000);
+}, null, "Timestamp increment must not accept values larger than 2**32 - 1");
a = new Timestamp(0x80008000, 0x80008000 + 0.5);
b = Timestamp(a.t, Math.round(a.i));
printjson(a);
assert.eq(tojson(a), tojson(b), "timestamp");
-a = new BinData(3,"VQ6EAOKbQdSnFkRmVUQAAA==");
+a = new BinData(3, "VQ6EAOKbQdSnFkRmVUQAAA==");
b = BinData(a.type, a.base64());
printjson(a);
assert.eq(tojson(a), tojson(b), "bindata");
@@ -77,36 +81,38 @@ var timestampA = a.getTimestamp();
var dateA = new Date(timestampA.getTime());
// ObjectId.fromDate - invalid input types
-assert.throws(function() { ObjectId.fromDate(undefined); }, null,
- "ObjectId.fromDate should error on undefined date" );
+assert.throws(function() {
+ ObjectId.fromDate(undefined);
+}, null, "ObjectId.fromDate should error on undefined date");
-assert.throws(function() { ObjectId.fromDate(12345); }, null,
- "ObjectId.fromDate should error on numerical value" );
+assert.throws(function() {
+ ObjectId.fromDate(12345);
+}, null, "ObjectId.fromDate should error on numerical value");
-assert.throws(function() { ObjectId.fromDate(dateA.toISOString()); }, null,
- "ObjectId.fromDate should error on string value" );
+assert.throws(function() {
+ ObjectId.fromDate(dateA.toISOString());
+}, null, "ObjectId.fromDate should error on string value");
// SERVER-14623 dates less than or equal to 1978-07-04T21:24:15Z fail
var checkFromDate = function(millis, expected, comment) {
var oid = ObjectId.fromDate(new Date(millis));
assert.eq(oid.valueOf(), expected, comment);
};
-checkFromDate(Math.pow(2,28) * 1000, "100000000000000000000000", "1978-07-04T21:24:16Z");
-checkFromDate((Math.pow(2,28) * 1000) - 1 , "0fffffff0000000000000000", "1978-07-04T21:24:15Z");
+checkFromDate(Math.pow(2, 28) * 1000, "100000000000000000000000", "1978-07-04T21:24:16Z");
+checkFromDate((Math.pow(2, 28) * 1000) - 1, "0fffffff0000000000000000", "1978-07-04T21:24:15Z");
checkFromDate(0, "000000000000000000000000", "start of epoch");
// test date upper limit
-checkFromDate((Math.pow(2,32) * 1000) - 1, "ffffffff0000000000000000", "last valid date");
-assert.throws(function() { ObjectId.fromDate(new Date(Math.pow(2,32) * 1000)); }, null,
- "ObjectId limited to 4 bytes for seconds" );
+checkFromDate((Math.pow(2, 32) * 1000) - 1, "ffffffff0000000000000000", "last valid date");
+assert.throws(function() {
+ ObjectId.fromDate(new Date(Math.pow(2, 32) * 1000));
+}, null, "ObjectId limited to 4 bytes for seconds");
// ObjectId.fromDate - Date
b = ObjectId.fromDate(dateA);
printjson(a);
assert.eq(tojson(a.getTimestamp()), tojson(b.getTimestamp()), "ObjectId.fromDate - Date");
-
-
// tojsonObject
// Empty object
@@ -131,4 +137,3 @@ assert.eq('{ "a" : 1, "b" : { "bb" : 2, "cc" : 3 } }',
tojsonObject({a: 1, b: {bb: 2, cc: 3}}, '', true));
assert.eq('{\n\t\t\t"a" : 1,\n\t\t\t"b" : {\n\t\t\t\t"bb" : 2,\n\t\t\t\t"cc" : 3\n\t\t\t}\n\t\t}',
tojsonObject({a: 1, b: {bb: 2, cc: 3}}, '\t\t'));
-
diff --git a/jstests/core/show_record_id.js b/jstests/core/show_record_id.js
index 566bdfff9a9..32fab6828a9 100644
--- a/jstests/core/show_record_id.js
+++ b/jstests/core/show_record_id.js
@@ -3,28 +3,28 @@
var t = db.show_record_id;
t.drop();
-function checkResults( arr ) {
- for( i in arr ) {
- a = arr[ i ];
- assert( a['$recordId'] );
+function checkResults(arr) {
+ for (i in arr) {
+ a = arr[i];
+ assert(a['$recordId']);
}
}
// Check query.
-t.save( {} );
-checkResults( t.find().showRecordId().toArray() );
+t.save({});
+checkResults(t.find().showRecordId().toArray());
// Check query and get more.
-t.save( {} );
-t.save( {} );
-checkResults( t.find().batchSize( 2 ).showRecordId().toArray() );
+t.save({});
+t.save({});
+checkResults(t.find().batchSize(2).showRecordId().toArray());
// Check with a covered index.
-t.ensureIndex( { a:1 } );
-checkResults( t.find( {}, { _id:0, a:1 } ).hint( { a:1 } ).showRecordId().toArray() );
-checkResults( t.find( {}, { _id:0, a:1 } ).hint( { a:1 } ).showRecordId().toArray() );
+t.ensureIndex({a: 1});
+checkResults(t.find({}, {_id: 0, a: 1}).hint({a: 1}).showRecordId().toArray());
+checkResults(t.find({}, {_id: 0, a: 1}).hint({a: 1}).showRecordId().toArray());
// Check with an idhack query.
t.drop();
t.save({_id: 0, a: 1});
-checkResults( t.find( { _id: 0 } ).showRecordId().toArray() );
+checkResults(t.find({_id: 0}).showRecordId().toArray());
diff --git a/jstests/core/skip1.js b/jstests/core/skip1.js
index c856e92cf72..ae8f589d616 100644
--- a/jstests/core/skip1.js
+++ b/jstests/core/skip1.js
@@ -3,16 +3,16 @@
var t = db.jstests_skip1;
-if ( 0 ) { // SERVER-2845
-t.drop();
+if (0) { // SERVER-2845
+ t.drop();
-t.ensureIndex( {a:1} );
-t.save( {a:5} );
-t.save( {a:5} );
-t.save( {a:5} );
+ t.ensureIndex({a: 1});
+ t.save({a: 5});
+ t.save({a: 5});
+ t.save({a: 5});
-assert.eq( 3, t.find( {a:5} ).skip( 2 ).explain().nscanned );
-assert.eq( 1, t.find( {a:5} ).skip( 2 ).explain().nscannedObjects );
+ assert.eq(3, t.find({a: 5}).skip(2).explain().nscanned);
+ assert.eq(1, t.find({a: 5}).skip(2).explain().nscannedObjects);
}
// SERVER-13537: Ensure that combinations of skip and limit don't crash
@@ -21,16 +21,16 @@ t.drop();
for (var i = 0; i < 10; i++) {
t.save({a: i});
}
-assert.eq( 9, t.find().sort({a: 1}).limit(2147483647).skip(1).itcount() );
-assert.eq( 0, t.find().sort({a: 1}).skip(2147483647).limit(1).itcount() );
+assert.eq(9, t.find().sort({a: 1}).limit(2147483647).skip(1).itcount());
+assert.eq(0, t.find().sort({a: 1}).skip(2147483647).limit(1).itcount());
if (!db.getMongo().useReadCommands()) {
// If we're using OP_QUERY/OP_GET_MORE reads rather than find/getMore command, then the skip and
// limit fields must fit inside a 32-bit signed integer.
- assert.throws( function() {
- assert.eq( 0, t.find().sort({a: 1}).skip(2147483648).itcount() );
+ assert.throws(function() {
+ assert.eq(0, t.find().sort({a: 1}).skip(2147483648).itcount());
});
- assert.throws( function() {
- assert.eq( 0, t.find().sort({a: 1}).limit(2147483648).itcount() );
+ assert.throws(function() {
+ assert.eq(0, t.find().sort({a: 1}).limit(2147483648).itcount());
});
}
diff --git a/jstests/core/slice1.js b/jstests/core/slice1.js
index 6037fe0eb1a..d723219af10 100644
--- a/jstests/core/slice1.js
+++ b/jstests/core/slice1.js
@@ -1,68 +1,68 @@
t = db.slice1;
t.drop();
-t.insert({_id:1, a:[0,1,2,3,4,5,-5,-4,-3,-2,-1], b:1, c:1});
+t.insert({_id: 1, a: [0, 1, 2, 3, 4, 5, -5, -4, -3, -2, -1], b: 1, c: 1});
// first three
-out = t.findOne({}, {a:{$slice:3}});
-assert.eq(out.a , [0,1,2], '1');
+out = t.findOne({}, {a: {$slice: 3}});
+assert.eq(out.a, [0, 1, 2], '1');
// last three
-out = t.findOne({}, {a:{$slice:-3}});
-assert.eq(out.a , [-3, -2, -1], '2');
+out = t.findOne({}, {a: {$slice: -3}});
+assert.eq(out.a, [-3, -2, -1], '2');
// skip 2, limit 3
-out = t.findOne({}, {a:{$slice:[2, 3]}});
-assert.eq(out.a , [2,3,4], '3');
+out = t.findOne({}, {a: {$slice: [2, 3]}});
+assert.eq(out.a, [2, 3, 4], '3');
// skip to fifth from last, limit 4
-out = t.findOne({}, {a:{$slice:[-5, 4]}});
-assert.eq(out.a , [-5, -4, -3, -2], '4');
+out = t.findOne({}, {a: {$slice: [-5, 4]}});
+assert.eq(out.a, [-5, -4, -3, -2], '4');
// skip to fifth from last, limit 10
-out = t.findOne({}, {a:{$slice:[-5, 10]}});
-assert.eq(out.a , [-5, -4, -3, -2, -1], '5');
-
+out = t.findOne({}, {a: {$slice: [-5, 10]}});
+assert.eq(out.a, [-5, -4, -3, -2, -1], '5');
// interaction with other fields
-out = t.findOne({}, {a:{$slice:3}});
-assert.eq(out.a , [0,1,2], 'A 1');
-assert.eq(out.b , 1, 'A 2');
-assert.eq(out.c , 1, 'A 3');
+out = t.findOne({}, {a: {$slice: 3}});
+assert.eq(out.a, [0, 1, 2], 'A 1');
+assert.eq(out.b, 1, 'A 2');
+assert.eq(out.c, 1, 'A 3');
-out = t.findOne({}, {a:{$slice:3}, b:true});
-assert.eq(out.a , [0,1,2], 'B 1');
-assert.eq(out.b , 1, 'B 2');
-assert.eq(out.c , undefined);
+out = t.findOne({}, {a: {$slice: 3}, b: true});
+assert.eq(out.a, [0, 1, 2], 'B 1');
+assert.eq(out.b, 1, 'B 2');
+assert.eq(out.c, undefined);
-out = t.findOne({}, {a:{$slice:3}, b:false});
-assert.eq(out.a , [0,1,2]);
-assert.eq(out.b , undefined);
-assert.eq(out.c , 1);
+out = t.findOne({}, {a: {$slice: 3}, b: false});
+assert.eq(out.a, [0, 1, 2]);
+assert.eq(out.b, undefined);
+assert.eq(out.c, 1);
t.drop();
-t.insert({comments: [{id:0, text:'a'},{id:1, text:'b'},{id:2, text:'c'},{id:3, text:'d'}], title:'foo'});
-
+t.insert({
+ comments: [{id: 0, text: 'a'}, {id: 1, text: 'b'}, {id: 2, text: 'c'}, {id: 3, text: 'd'}],
+ title: 'foo'
+});
-out = t.findOne({}, {comments:{$slice:2}, 'comments.id':true});
-assert.eq(out.comments , [{id:0}, {id:1}]);
-assert.eq(out.title , undefined);
+out = t.findOne({}, {comments: {$slice: 2}, 'comments.id': true});
+assert.eq(out.comments, [{id: 0}, {id: 1}]);
+assert.eq(out.title, undefined);
-out = t.findOne({}, {comments:{$slice:2}, 'comments.id':false});
-assert.eq(out.comments , [{text: 'a'}, {text: 'b'}]);
-assert.eq(out.title , 'foo');
+out = t.findOne({}, {comments: {$slice: 2}, 'comments.id': false});
+assert.eq(out.comments, [{text: 'a'}, {text: 'b'}]);
+assert.eq(out.title, 'foo');
-//nested arrays
+// nested arrays
t.drop();
-t.insert({_id:1, a:[[1,1,1], [2,2,2], [3,3,3]], b:1, c:1});
-
-out = t.findOne({}, {a:{$slice:1}});
-assert.eq(out.a , [[1,1,1]], 'n 1');
+t.insert({_id: 1, a: [[1, 1, 1], [2, 2, 2], [3, 3, 3]], b: 1, c: 1});
-out = t.findOne({}, {a:{$slice:-1}});
-assert.eq(out.a , [[3,3,3]], 'n 2');
+out = t.findOne({}, {a: {$slice: 1}});
+assert.eq(out.a, [[1, 1, 1]], 'n 1');
-out = t.findOne({}, {a:{$slice:[0,2]}});
-assert.eq(out.a , [[1,1,1],[2,2,2]], 'n 2');
+out = t.findOne({}, {a: {$slice: -1}});
+assert.eq(out.a, [[3, 3, 3]], 'n 2');
+out = t.findOne({}, {a: {$slice: [0, 2]}});
+assert.eq(out.a, [[1, 1, 1], [2, 2, 2]], 'n 2');
diff --git a/jstests/core/snapshot_queries.js b/jstests/core/snapshot_queries.js
index 684a4b9459e..e4aec435b10 100644
--- a/jstests/core/snapshot_queries.js
+++ b/jstests/core/snapshot_queries.js
@@ -20,7 +20,7 @@
assert.eq(1, cursor.next()["_id"]);
// Force a document move (on MMAP) while the query is yielded for a getMore.
- var bigString = Array(1024*1024).toString();
+ var bigString = Array(1024 * 1024).toString();
assert.writeOK(coll.update({_id: 1}, {$set: {padding: bigString}}));
assert.eq(2, cursor.next()["_id"]);
diff --git a/jstests/core/sort1.js b/jstests/core/sort1.js
index ce530872e8c..9bf92601bd1 100644
--- a/jstests/core/sort1.js
+++ b/jstests/core/sort1.js
@@ -1,48 +1,47 @@
-debug = function( s ){
- //print( s );
+debug = function(s) {
+ // print( s );
};
t = db.sort1;
t.drop();
-t.save({x:3,z:33});
-t.save({x:5,z:33});
-t.save({x:2,z:33});
-t.save({x:3,z:33});
-t.save({x:1,z:33});
-
-debug( "a" );
-for( var pass = 0; pass < 2; pass++ ) {
- assert( t.find().sort({x:1})[0].x == 1 );
- assert( t.find().sort({x:1}).skip(1)[0].x == 2 );
- assert( t.find().sort({x:-1})[0].x == 5 );
- assert( t.find().sort({x:-1})[1].x == 3 );
- assert.eq( t.find().sort({x:-1}).skip(0)[0].x , 5 );
- assert.eq( t.find().sort({x:-1}).skip(1)[0].x , 3 );
- t.ensureIndex({x:1});
-
+t.save({x: 3, z: 33});
+t.save({x: 5, z: 33});
+t.save({x: 2, z: 33});
+t.save({x: 3, z: 33});
+t.save({x: 1, z: 33});
+
+debug("a");
+for (var pass = 0; pass < 2; pass++) {
+ assert(t.find().sort({x: 1})[0].x == 1);
+ assert(t.find().sort({x: 1}).skip(1)[0].x == 2);
+ assert(t.find().sort({x: -1})[0].x == 5);
+ assert(t.find().sort({x: -1})[1].x == 3);
+ assert.eq(t.find().sort({x: -1}).skip(0)[0].x, 5);
+ assert.eq(t.find().sort({x: -1}).skip(1)[0].x, 3);
+ t.ensureIndex({x: 1});
}
-debug( "b" );
+debug("b");
assert(t.validate().valid);
t.drop();
-t.save({x:'a'});
-t.save({x:'aba'});
-t.save({x:'zed'});
-t.save({x:'foo'});
-
-debug( "c" );
-
-for( var pass = 0; pass < 2; pass++ ) {
- debug( tojson( t.find().sort( { "x" : 1 } ).limit(1).next() ) );
- assert.eq( "a" , t.find().sort({'x': 1}).limit(1).next().x , "c.1" );
- assert.eq( "a" , t.find().sort({'x': 1}).next().x , "c.2" );
- assert.eq( "zed" , t.find().sort({'x': -1}).limit(1).next().x , "c.3" );
- assert.eq( "zed" , t.find().sort({'x': -1}).next().x , "c.4" );
- t.ensureIndex({x:1});
+t.save({x: 'a'});
+t.save({x: 'aba'});
+t.save({x: 'zed'});
+t.save({x: 'foo'});
+
+debug("c");
+
+for (var pass = 0; pass < 2; pass++) {
+ debug(tojson(t.find().sort({"x": 1}).limit(1).next()));
+ assert.eq("a", t.find().sort({'x': 1}).limit(1).next().x, "c.1");
+ assert.eq("a", t.find().sort({'x': 1}).next().x, "c.2");
+ assert.eq("zed", t.find().sort({'x': -1}).limit(1).next().x, "c.3");
+ assert.eq("zed", t.find().sort({'x': -1}).next().x, "c.4");
+ t.ensureIndex({x: 1});
}
-debug( "d" );
+debug("d");
assert(t.validate().valid);
diff --git a/jstests/core/sort10.js b/jstests/core/sort10.js
index 657da665499..207be0226fa 100644
--- a/jstests/core/sort10.js
+++ b/jstests/core/sort10.js
@@ -3,23 +3,21 @@ t = db.sort10;
function checkSorting1(opts) {
t.drop();
- t.insert({ x: new Date(50000) });
- t.insert({ x: new Date(-50) });
+ t.insert({x: new Date(50000)});
+ t.insert({x: new Date(-50)});
var d = new Date(-50);
for (var pass = 0; pass < 2; pass++) {
- assert(t.find().sort({x:1})[0].x.valueOf() == d.valueOf());
- t.ensureIndex({ x: 1 }, opts);
- t.insert({ x: new Date() });
+ assert(t.find().sort({x: 1})[0].x.valueOf() == d.valueOf());
+ t.ensureIndex({x: 1}, opts);
+ t.insert({x: new Date()});
}
}
checkSorting1({});
-checkSorting1({"background":true});
-
-
+checkSorting1({"background": true});
function checkSorting2(dates, sortOrder) {
- cur = t.find().sort({x:sortOrder});
+ cur = t.find().sort({x: sortOrder});
assert.eq(dates.length, cur.count(), "Incorrect number of results returned");
index = 0;
while (cur.hasNext()) {
@@ -32,17 +30,19 @@ function checkSorting2(dates, sortOrder) {
t.drop();
dates = [new Date(-5000000000000), new Date(5000000000000), new Date(0), new Date(5), new Date(-5)];
for (var i = 0; i < dates.length; i++) {
- t.insert({x:dates[i]});
+ t.insert({x: dates[i]});
}
-dates.sort(function(a,b){return a - b;});
+dates.sort(function(a, b) {
+ return a - b;
+});
reverseDates = dates.slice(0).reverse();
checkSorting2(dates, 1);
checkSorting2(reverseDates, -1);
-t.ensureIndex({x:1});
+t.ensureIndex({x: 1});
checkSorting2(dates, 1);
checkSorting2(reverseDates, -1);
t.dropIndexes();
-t.ensureIndex({x:-1});
+t.ensureIndex({x: -1});
checkSorting2(dates, 1);
checkSorting2(reverseDates, -1);
diff --git a/jstests/core/sort2.js b/jstests/core/sort2.js
index 2528751cfc1..2cfb6baafc3 100644
--- a/jstests/core/sort2.js
+++ b/jstests/core/sort2.js
@@ -3,30 +3,30 @@
t = db.sort2;
t.drop();
-t.save({x:1, y:{a:5,b:4}});
-t.save({x:1, y:{a:7,b:3}});
-t.save({x:1, y:{a:2,b:3}});
-t.save({x:1, y:{a:9,b:3}});
-for( var pass = 0; pass < 2; pass++ ) {
- var res = t.find().sort({'y.a':1}).toArray();
- assert( res[0].y.a == 2 );
- assert( res[1].y.a == 5 );
- assert( res.length == 4 );
- t.ensureIndex({"y.a":1});
+t.save({x: 1, y: {a: 5, b: 4}});
+t.save({x: 1, y: {a: 7, b: 3}});
+t.save({x: 1, y: {a: 2, b: 3}});
+t.save({x: 1, y: {a: 9, b: 3}});
+for (var pass = 0; pass < 2; pass++) {
+ var res = t.find().sort({'y.a': 1}).toArray();
+ assert(res[0].y.a == 2);
+ assert(res[1].y.a == 5);
+ assert(res.length == 4);
+ t.ensureIndex({"y.a": 1});
}
assert(t.validate().valid);
t.drop();
-t.insert({ x: 1 });
-t.insert({ x: 5000000000 });
-t.insert({ x: NaN });
-t.insert({ x: Infinity });
-t.insert({ x: -Infinity });
+t.insert({x: 1});
+t.insert({x: 5000000000});
+t.insert({x: NaN});
+t.insert({x: Infinity});
+t.insert({x: -Infinity});
var good = [NaN, -Infinity, 1, 5000000000, Infinity];
for (var pass = 0; pass < 2; pass++) {
- var res = t.find({}, { _id: 0 }).sort({ x: 1 }).toArray();
+ var res = t.find({}, {_id: 0}).sort({x: 1}).toArray();
for (var i = 0; i < good.length; i++) {
assert(good[i].toString() == res[i].x.toString());
}
- t.ensureIndex({ x : 1 });
+ t.ensureIndex({x: 1});
}
diff --git a/jstests/core/sort3.js b/jstests/core/sort3.js
index f65b7445903..bfc1ee5134c 100644
--- a/jstests/core/sort3.js
+++ b/jstests/core/sort3.js
@@ -1,11 +1,20 @@
t = db.sort3;
t.drop();
-t.save( { a : 1 } );
-t.save( { a : 5 } );
-t.save( { a : 3 } );
+t.save({a: 1});
+t.save({a: 5});
+t.save({a: 3});
-assert.eq( "1,5,3" , t.find().toArray().map( function(z){ return z.a; } ) );
+assert.eq("1,5,3",
+ t.find().toArray().map(function(z) {
+ return z.a;
+ }));
-assert.eq( "1,3,5" , t.find().sort( { a : 1 } ).toArray().map( function(z){ return z.a; } ) );
-assert.eq( "5,3,1" , t.find().sort( { a : -1 } ).toArray().map( function(z){ return z.a; } ) );
+assert.eq("1,3,5",
+ t.find().sort({a: 1}).toArray().map(function(z) {
+ return z.a;
+ }));
+assert.eq("5,3,1",
+ t.find().sort({a: -1}).toArray().map(function(z) {
+ return z.a;
+ }));
diff --git a/jstests/core/sort4.js b/jstests/core/sort4.js
index 9e4076b1c71..41b4e25fe11 100644
--- a/jstests/core/sort4.js
+++ b/jstests/core/sort4.js
@@ -1,22 +1,19 @@
t = db.sort4;
t.drop();
-
-function nice( sort , correct , extra ){
- var c = t.find().sort( sort );
+function nice(sort, correct, extra) {
+ var c = t.find().sort(sort);
var s = "";
- c.forEach(
- function(z){
- if ( s.length )
- s += ",";
- s += z.name;
- if ( z.prename )
- s += z.prename;
- }
- );
- print( tojson( sort ) + "\t" + s );
- if ( correct )
- assert.eq( correct , s , tojson( sort ) + "(" + extra + ")" );
+ c.forEach(function(z) {
+ if (s.length)
+ s += ",";
+ s += z.name;
+ if (z.prename)
+ s += z.prename;
+ });
+ print(tojson(sort) + "\t" + s);
+ if (correct)
+ assert.eq(correct, s, tojson(sort) + "(" + extra + ")");
return s;
}
@@ -25,19 +22,19 @@ t.save({name: 'A', prename: 'C'});
t.save({name: 'B', prename: 'B'});
t.save({name: 'B', prename: 'D'});
-nice( { name:1 } , "AB,AC,BB,BD" , "s1" );
-nice( { prename : 1 } , "AB,BB,AC,BD" , "s2" );
-nice( {name:1, prename:1} , "AB,AC,BB,BD" , "s3" );
+nice({name: 1}, "AB,AC,BB,BD", "s1");
+nice({prename: 1}, "AB,BB,AC,BD", "s2");
+nice({name: 1, prename: 1}, "AB,AC,BB,BD", "s3");
-t.save({name: 'A'});
-nice( {name:1, prename:1} , "A,AB,AC,BB,BD" , "e1" );
+t.save({name: 'A'});
+nice({name: 1, prename: 1}, "A,AB,AC,BB,BD", "e1");
-t.save({name: 'C'});
-nice( {name:1, prename:1} , "A,AB,AC,BB,BD,C" , "e2" ); // SERVER-282
+t.save({name: 'C'});
+nice({name: 1, prename: 1}, "A,AB,AC,BB,BD,C", "e2"); // SERVER-282
-t.ensureIndex( { name : 1 , prename : 1 } );
-nice( {name:1, prename:1} , "A,AB,AC,BB,BD,C" , "e2ia" ); // SERVER-282
+t.ensureIndex({name: 1, prename: 1});
+nice({name: 1, prename: 1}, "A,AB,AC,BB,BD,C", "e2ia"); // SERVER-282
t.dropIndexes();
-t.ensureIndex( { name : 1 } );
-nice( {name:1, prename:1} , "A,AB,AC,BB,BD,C" , "e2ib" ); // SERVER-282
+t.ensureIndex({name: 1});
+nice({name: 1, prename: 1}, "A,AB,AC,BB,BD,C", "e2ib"); // SERVER-282
diff --git a/jstests/core/sort5.js b/jstests/core/sort5.js
index b90256ef79d..399c9fb4e28 100644
--- a/jstests/core/sort5.js
+++ b/jstests/core/sort5.js
@@ -8,14 +8,38 @@ t.save({_id: 9, x: 4, y: {a: 9, b: 3}});
// test compound sorting
-assert.eq( [4,2,3,1] , t.find().sort({"y.b": 1 , "y.a" : -1 }).map( function(z){ return z.x; } ) , "A no index" );
+assert.eq([4, 2, 3, 1],
+ t.find()
+ .sort({"y.b": 1, "y.a": -1})
+ .map(function(z) {
+ return z.x;
+ }),
+ "A no index");
t.ensureIndex({"y.b": 1, "y.a": -1});
-assert.eq( [4,2,3,1] , t.find().sort({"y.b": 1 , "y.a" : -1 }).map( function(z){ return z.x; } ) , "A index" );
+assert.eq([4, 2, 3, 1],
+ t.find()
+ .sort({"y.b": 1, "y.a": -1})
+ .map(function(z) {
+ return z.x;
+ }),
+ "A index");
assert(t.validate().valid, "A valid");
// test sorting on compound key involving _id
-assert.eq( [4,2,3,1] , t.find().sort({"y.b": 1 , _id : -1 }).map( function(z){ return z.x; } ) , "B no index" );
+assert.eq([4, 2, 3, 1],
+ t.find()
+ .sort({"y.b": 1, _id: -1})
+ .map(function(z) {
+ return z.x;
+ }),
+ "B no index");
t.ensureIndex({"y.b": 1, "_id": -1});
-assert.eq( [4,2,3,1] , t.find().sort({"y.b": 1 , _id : -1 }).map( function(z){ return z.x; } ) , "B index" );
+assert.eq([4, 2, 3, 1],
+ t.find()
+ .sort({"y.b": 1, _id: -1})
+ .map(function(z) {
+ return z.x;
+ }),
+ "B index");
assert(t.validate().valid, "B valid");
diff --git a/jstests/core/sort6.js b/jstests/core/sort6.js
index 323fb92a335..03a1b559831 100644
--- a/jstests/core/sort6.js
+++ b/jstests/core/sort6.js
@@ -1,38 +1,38 @@
t = db.sort6;
-function get( x ){
- return t.find().sort( { c : x } ).map( function(z){ return z._id; } );
+function get(x) {
+ return t.find().sort({c: x}).map(function(z) {
+ return z._id;
+ });
}
// part 1
t.drop();
-t.insert({_id:1,c:null});
-t.insert({_id:2,c:1});
-t.insert({_id:3,c:2});
+t.insert({_id: 1, c: null});
+t.insert({_id: 2, c: 1});
+t.insert({_id: 3, c: 2});
+assert.eq([3, 2, 1], get(-1), "A1"); // SERVER-635
+assert.eq([1, 2, 3], get(1), "A2");
-assert.eq( [3,2,1] , get( -1 ) , "A1" ); // SERVER-635
-assert.eq( [1,2,3] , get( 1 ) , "A2" );
-
-t.ensureIndex( { c : 1 } );
-
-assert.eq( [3,2,1] , get( -1 ) , "B1" );
-assert.eq( [1,2,3] , get( 1 ) , "B2" );
+t.ensureIndex({c: 1});
+assert.eq([3, 2, 1], get(-1), "B1");
+assert.eq([1, 2, 3], get(1), "B2");
// part 2
t.drop();
-t.insert({_id:1});
-t.insert({_id:2,c:1});
-t.insert({_id:3,c:2});
+t.insert({_id: 1});
+t.insert({_id: 2, c: 1});
+t.insert({_id: 3, c: 2});
-assert.eq( [3,2,1] , get( -1 ) , "C1" ); // SERVER-635
-assert.eq( [1,2,3] , get( 1 ) , "C2" );
+assert.eq([3, 2, 1], get(-1), "C1"); // SERVER-635
+assert.eq([1, 2, 3], get(1), "C2");
-t.ensureIndex( { c : 1 } );
+t.ensureIndex({c: 1});
-assert.eq( [3,2,1] , get( -1 ) , "D1" );
-assert.eq( [1,2,3] , get( 1 ) , "X2" );
+assert.eq([3, 2, 1], get(-1), "D1");
+assert.eq([1, 2, 3], get(1), "X2");
diff --git a/jstests/core/sort7.js b/jstests/core/sort7.js
index 0b98734e5ff..4377cd5d17b 100644
--- a/jstests/core/sort7.js
+++ b/jstests/core/sort7.js
@@ -5,21 +5,21 @@ t.drop();
// Compare indexed and unindexed sort order for an array embedded field.
-t.save( { a : [ { x : 2 } ] } );
-t.save( { a : [ { x : 1 } ] } );
-t.save( { a : [ { x : 3 } ] } );
-unindexed = t.find().sort( {"a.x":1} ).toArray();
-t.ensureIndex( { "a.x" : 1 } );
-indexed = t.find().sort( {"a.x":1} ).hint( {"a.x":1} ).toArray();
-assert.eq( unindexed, indexed );
+t.save({a: [{x: 2}]});
+t.save({a: [{x: 1}]});
+t.save({a: [{x: 3}]});
+unindexed = t.find().sort({"a.x": 1}).toArray();
+t.ensureIndex({"a.x": 1});
+indexed = t.find().sort({"a.x": 1}).hint({"a.x": 1}).toArray();
+assert.eq(unindexed, indexed);
// Now check when there are two objects in the array.
t.remove({});
-t.save( { a : [ { x : 2 }, { x : 3 } ] } );
-t.save( { a : [ { x : 1 }, { x : 4 } ] } );
-t.save( { a : [ { x : 3 }, { x : 2 } ] } );
-unindexed = t.find().sort( {"a.x":1} ).toArray();
-t.ensureIndex( { "a.x" : 1 } );
-indexed = t.find().sort( {"a.x":1} ).hint( {"a.x":1} ).toArray();
-assert.eq( unindexed, indexed );
+t.save({a: [{x: 2}, {x: 3}]});
+t.save({a: [{x: 1}, {x: 4}]});
+t.save({a: [{x: 3}, {x: 2}]});
+unindexed = t.find().sort({"a.x": 1}).toArray();
+t.ensureIndex({"a.x": 1});
+indexed = t.find().sort({"a.x": 1}).hint({"a.x": 1}).toArray();
+assert.eq(unindexed, indexed);
diff --git a/jstests/core/sort8.js b/jstests/core/sort8.js
index 916075502d7..72e5ce54d23 100644
--- a/jstests/core/sort8.js
+++ b/jstests/core/sort8.js
@@ -3,28 +3,28 @@
t = db.jstests_sort8;
t.drop();
-t.save( {a:[1,10]} );
-t.save( {a:5} );
-unindexedForward = t.find().sort( {a:1} ).toArray();
-unindexedReverse = t.find().sort( {a:-1} ).toArray();
-t.ensureIndex( {a:1} );
-indexedForward = t.find().sort( {a:1} ).hint( {a:1} ).toArray();
-indexedReverse = t.find().sort( {a:-1} ).hint( {a:1} ).toArray();
+t.save({a: [1, 10]});
+t.save({a: 5});
+unindexedForward = t.find().sort({a: 1}).toArray();
+unindexedReverse = t.find().sort({a: -1}).toArray();
+t.ensureIndex({a: 1});
+indexedForward = t.find().sort({a: 1}).hint({a: 1}).toArray();
+indexedReverse = t.find().sort({a: -1}).hint({a: 1}).toArray();
-assert.eq( unindexedForward, indexedForward );
-assert.eq( unindexedReverse, indexedReverse );
+assert.eq(unindexedForward, indexedForward);
+assert.eq(unindexedReverse, indexedReverse);
// Sorting is based on array members, not the array itself.
-assert.eq( [1,10], unindexedForward[ 0 ].a );
-assert.eq( [1,10], unindexedReverse[ 0 ].a );
+assert.eq([1, 10], unindexedForward[0].a);
+assert.eq([1, 10], unindexedReverse[0].a);
// Now try with a bounds constraint.
t.dropIndexes();
-unindexedForward = t.find({a:{$gte:5}}).sort( {a:1} ).toArray();
-unindexedReverse = t.find({a:{$lte:5}}).sort( {a:-1} ).toArray();
-t.ensureIndex( {a:1} );
-indexedForward = t.find({a:{$gte:5}}).sort( {a:1} ).hint( {a:1} ).toArray();
-indexedReverse = t.find({a:{$lte:5}}).sort( {a:-1} ).hint( {a:1} ).toArray();
+unindexedForward = t.find({a: {$gte: 5}}).sort({a: 1}).toArray();
+unindexedReverse = t.find({a: {$lte: 5}}).sort({a: -1}).toArray();
+t.ensureIndex({a: 1});
+indexedForward = t.find({a: {$gte: 5}}).sort({a: 1}).hint({a: 1}).toArray();
+indexedReverse = t.find({a: {$lte: 5}}).sort({a: -1}).hint({a: 1}).toArray();
-assert.eq( unindexedForward, indexedForward );
-assert.eq( unindexedReverse, indexedReverse );
+assert.eq(unindexedForward, indexedForward);
+assert.eq(unindexedReverse, indexedReverse);
diff --git a/jstests/core/sort9.js b/jstests/core/sort9.js
index 62407d6e96d..57496b40da1 100644
--- a/jstests/core/sort9.js
+++ b/jstests/core/sort9.js
@@ -3,24 +3,24 @@
t = db.jstests_sort9;
t.drop();
-t.save( {a:[]} );
-t.save( {a:[[]]} );
-assert.eq( 2, t.find( {a:{$ne:4}} ).sort( {a:1} ).itcount() );
-assert.eq( 2, t.find( {'a.b':{$ne:4}} ).sort( {'a.b':1} ).itcount() );
-assert.eq( 2, t.find( {a:{$ne:4}} ).sort( {'a.b':1} ).itcount() );
+t.save({a: []});
+t.save({a: [[]]});
+assert.eq(2, t.find({a: {$ne: 4}}).sort({a: 1}).itcount());
+assert.eq(2, t.find({'a.b': {$ne: 4}}).sort({'a.b': 1}).itcount());
+assert.eq(2, t.find({a: {$ne: 4}}).sort({'a.b': 1}).itcount());
t.drop();
-t.save( {} );
-assert.eq( 1, t.find( {a:{$ne:4}} ).sort( {a:1} ).itcount() );
-assert.eq( 1, t.find( {'a.b':{$ne:4}} ).sort( {'a.b':1} ).itcount() );
-assert.eq( 1, t.find( {a:{$ne:4}} ).sort( {'a.b':1} ).itcount() );
-assert.eq( 1, t.find( {a:{$exists:0}} ).sort( {a:1} ).itcount() );
-assert.eq( 1, t.find( {a:{$exists:0}} ).sort( {'a.b':1} ).itcount() );
+t.save({});
+assert.eq(1, t.find({a: {$ne: 4}}).sort({a: 1}).itcount());
+assert.eq(1, t.find({'a.b': {$ne: 4}}).sort({'a.b': 1}).itcount());
+assert.eq(1, t.find({a: {$ne: 4}}).sort({'a.b': 1}).itcount());
+assert.eq(1, t.find({a: {$exists: 0}}).sort({a: 1}).itcount());
+assert.eq(1, t.find({a: {$exists: 0}}).sort({'a.b': 1}).itcount());
t.drop();
-t.save( {a:{}} );
-assert.eq( 1, t.find( {a:{$ne:4}} ).sort( {a:1} ).itcount() );
-assert.eq( 1, t.find( {'a.b':{$ne:4}} ).sort( {'a.b':1} ).itcount() );
-assert.eq( 1, t.find( {a:{$ne:4}} ).sort( {'a.b':1} ).itcount() );
-assert.eq( 1, t.find( {'a.b':{$exists:0}} ).sort( {a:1} ).itcount() );
-assert.eq( 1, t.find( {'a.b':{$exists:0}} ).sort( {'a.b':1} ).itcount() );
+t.save({a: {}});
+assert.eq(1, t.find({a: {$ne: 4}}).sort({a: 1}).itcount());
+assert.eq(1, t.find({'a.b': {$ne: 4}}).sort({'a.b': 1}).itcount());
+assert.eq(1, t.find({a: {$ne: 4}}).sort({'a.b': 1}).itcount());
+assert.eq(1, t.find({'a.b': {$exists: 0}}).sort({a: 1}).itcount());
+assert.eq(1, t.find({'a.b': {$exists: 0}}).sort({'a.b': 1}).itcount());
diff --git a/jstests/core/sort_numeric.js b/jstests/core/sort_numeric.js
index 807f23dfe8d..df4e914b87b 100644
--- a/jstests/core/sort_numeric.js
+++ b/jstests/core/sort_numeric.js
@@ -5,31 +5,29 @@ t.drop();
// there are two numeric types int he db; make sure it handles them right
// for comparisons.
-t.save( { a : 3 } );
-t.save( { a : 3.1 } );
-t.save( { a : 2.9 } );
-t.save( { a : 1 } );
-t.save( { a : 1.9 } );
-t.save( { a : 5 } );
-t.save( { a : 4.9 } );
-t.save( { a : 2.91 } );
-
-for( var pass = 0; pass < 2; pass++ ) {
-
- var c = t.find().sort({a:1});
+t.save({a: 3});
+t.save({a: 3.1});
+t.save({a: 2.9});
+t.save({a: 1});
+t.save({a: 1.9});
+t.save({a: 5});
+t.save({a: 4.9});
+t.save({a: 2.91});
+
+for (var pass = 0; pass < 2; pass++) {
+ var c = t.find().sort({a: 1});
var last = 0;
- while( c.hasNext() ) {
+ while (c.hasNext()) {
current = c.next();
- assert( current.a > last );
+ assert(current.a > last);
last = current.a;
}
- assert( t.find({a:3}).count() == 1 );
- assert( t.find({a:3.0}).count() == 1 );
- assert( t.find({a:3.0}).length() == 1 );
+ assert(t.find({a: 3}).count() == 1);
+ assert(t.find({a: 3.0}).count() == 1);
+ assert(t.find({a: 3.0}).length() == 1);
- t.ensureIndex({a:1});
+ t.ensureIndex({a: 1});
}
assert(t.validate().valid);
-
diff --git a/jstests/core/sortb.js b/jstests/core/sortb.js
index e16c7d650e6..e4feea7ecfc 100644
--- a/jstests/core/sortb.js
+++ b/jstests/core/sortb.js
@@ -4,24 +4,28 @@
t = db.jstests_sortb;
t.drop();
-t.ensureIndex({b:1});
+t.ensureIndex({b: 1});
-for( i = 0; i < 100; ++i ) {
- t.save( {a:i,b:i} );
+for (i = 0; i < 100; ++i) {
+ t.save({a: i, b: i});
}
// These large documents will not be part of the initial set of "top 100" matches, and they will
// not be part of the final set of "top 100" matches returned to the client. However, they are an
// intermediate set of "top 100" matches and should trigger an in memory sort capacity exception.
-big = new Array( 1024 * 1024 ).toString();
-for( i = 100; i < 200; ++i ) {
- t.save( {a:i,b:i,big:big} );
+big = new Array(1024 * 1024).toString();
+for (i = 100; i < 200; ++i) {
+ t.save({a: i, b: i, big: big});
}
-for( i = 200; i < 300; ++i ) {
- t.save( {a:i,b:i} );
+for (i = 200; i < 300; ++i) {
+ t.save({a: i, b: i});
}
-assert.throws( function() { t.find().sort( {a:-1} ).hint( {b:1} ).limit( 100 ).itcount(); } );
-assert.throws( function() { t.find().sort( {a:-1} ).hint( {b:1} ).showDiskLoc().limit( 100 ).itcount(); } );
+assert.throws(function() {
+ t.find().sort({a: -1}).hint({b: 1}).limit(100).itcount();
+});
+assert.throws(function() {
+ t.find().sort({a: -1}).hint({b: 1}).showDiskLoc().limit(100).itcount();
+});
t.drop(); \ No newline at end of file
diff --git a/jstests/core/sortc.js b/jstests/core/sortc.js
index f9aa202508b..e2443599955 100644
--- a/jstests/core/sortc.js
+++ b/jstests/core/sortc.js
@@ -3,35 +3,35 @@
t = db.jstests_sortc;
t.drop();
-t.save( {a:1} );
-t.save( {a:2} );
+t.save({a: 1});
+t.save({a: 2});
-function checkA( a, sort, skip, query ) {
+function checkA(a, sort, skip, query) {
query = query || {};
- assert.eq( a, t.find( query ).sort( sort ).skip( skip )[ 0 ].a );
+ assert.eq(a, t.find(query).sort(sort).skip(skip)[0].a);
}
function checkSortAndSkip() {
- checkA( 1, {a:1}, 0 );
- checkA( 2, {a:1}, 1 );
+ checkA(1, {a: 1}, 0);
+ checkA(2, {a: 1}, 1);
- checkA( 1, {a:1}, 0, {a:{$gt:0},b:null} );
- checkA( 2, {a:1}, 1, {a:{$gt:0},b:null} );
+ checkA(1, {a: 1}, 0, {a: {$gt: 0}, b: null});
+ checkA(2, {a: 1}, 1, {a: {$gt: 0}, b: null});
- checkA( 2, {a:-1}, 0 );
- checkA( 1, {a:-1}, 1 );
+ checkA(2, {a: -1}, 0);
+ checkA(1, {a: -1}, 1);
- checkA( 2, {a:-1}, 0, {a:{$gt:0},b:null} );
- checkA( 1, {a:-1}, 1, {a:{$gt:0},b:null} );
+ checkA(2, {a: -1}, 0, {a: {$gt: 0}, b: null});
+ checkA(1, {a: -1}, 1, {a: {$gt: 0}, b: null});
- checkA( 1, {$natural:1}, 0 );
- checkA( 2, {$natural:1}, 1 );
+ checkA(1, {$natural: 1}, 0);
+ checkA(2, {$natural: 1}, 1);
- checkA( 2, {$natural:-1}, 0 );
- checkA( 1, {$natural:-1}, 1 );
+ checkA(2, {$natural: -1}, 0);
+ checkA(1, {$natural: -1}, 1);
}
checkSortAndSkip();
-t.ensureIndex( {a:1} );
+t.ensureIndex({a: 1});
checkSortAndSkip();
diff --git a/jstests/core/sortd.js b/jstests/core/sortd.js
index 963d32b0ca4..7012915f3ca 100644
--- a/jstests/core/sortd.js
+++ b/jstests/core/sortd.js
@@ -2,69 +2,68 @@
t = db.jstests_sortd;
-function checkNumSorted( n, query ) {
+function checkNumSorted(n, query) {
docs = query.toArray();
- assert.eq( n, docs.length );
- for( i = 1; i < docs.length; ++i ) {
- assert.lte( docs[ i-1 ].a, docs[ i ].a );
+ assert.eq(n, docs.length);
+ for (i = 1; i < docs.length; ++i) {
+ assert.lte(docs[i - 1].a, docs[i].a);
}
}
-
// Test results added by ordered and unordered plans, unordered plan finishes.
t.drop();
-t.save( {a:[1,2,3,4,5]} );
-t.save( {a:10} );
-t.ensureIndex( {a:1} );
+t.save({a: [1, 2, 3, 4, 5]});
+t.save({a: 10});
+t.ensureIndex({a: 1});
-assert.eq( 2, t.find( {a:{$gt:0}} ).sort( {a:1} ).itcount() );
-assert.eq( 2, t.find( {a:{$gt:0},b:null} ).sort( {a:1} ).itcount() );
+assert.eq(2, t.find({a: {$gt: 0}}).sort({a: 1}).itcount());
+assert.eq(2, t.find({a: {$gt: 0}, b: null}).sort({a: 1}).itcount());
// Test results added by ordered and unordered plans, ordered plan finishes.
t.drop();
-t.save( {a:1} );
-t.save( {a:10} );
-for( i = 2; i <= 9; ++i ) {
- t.save( {a:i} );
+t.save({a: 1});
+t.save({a: 10});
+for (i = 2; i <= 9; ++i) {
+ t.save({a: i});
}
-for( i = 0; i < 30; ++i ) {
- t.save( {a:100} );
+for (i = 0; i < 30; ++i) {
+ t.save({a: 100});
}
-t.ensureIndex( {a:1} );
+t.ensureIndex({a: 1});
-checkNumSorted( 10, t.find( {a:{$gte:0,$lte:10}} ).sort( {a:1} ) );
-checkNumSorted( 10, t.find( {a:{$gte:0,$lte:10},b:null} ).sort( {a:1} ) );
+checkNumSorted(10, t.find({a: {$gte: 0, $lte: 10}}).sort({a: 1}));
+checkNumSorted(10, t.find({a: {$gte: 0, $lte: 10}, b: null}).sort({a: 1}));
-// Test results added by ordered and unordered plans, ordered plan finishes and continues with getmore.
+// Test results added by ordered and unordered plans, ordered plan finishes and continues with
+// getmore.
t.drop();
-t.save( {a:1} );
-t.save( {a:200} );
-for( i = 2; i <= 199; ++i ) {
- t.save( {a:i} );
+t.save({a: 1});
+t.save({a: 200});
+for (i = 2; i <= 199; ++i) {
+ t.save({a: i});
}
-for( i = 0; i < 30; ++i ) {
- t.save( {a:2000} );
+for (i = 0; i < 30; ++i) {
+ t.save({a: 2000});
}
-t.ensureIndex( {a:1} );
+t.ensureIndex({a: 1});
-checkNumSorted( 200, t.find( {a:{$gte:0,$lte:200}} ).sort( {a:1} ) );
-checkNumSorted( 200, t.find( {a:{$gte:0,$lte:200},b:null} ).sort( {a:1} ) );
+checkNumSorted(200, t.find({a: {$gte: 0, $lte: 200}}).sort({a: 1}));
+checkNumSorted(200, t.find({a: {$gte: 0, $lte: 200}, b: null}).sort({a: 1}));
// Test results added by ordered and unordered plans, with unordered results excluded during
// getmore.
t.drop();
-for( i = 399; i >= 0; --i ) {
- t.save( {a:i} );
+for (i = 399; i >= 0; --i) {
+ t.save({a: i});
}
-t.ensureIndex( {a:1} );
-
-checkNumSorted( 400, t.find( {a:{$gte:0,$lte:400},b:null} ).batchSize( 50 ).sort( {a:1} ) );
+t.ensureIndex({a: 1});
+checkNumSorted(400, t.find({a: {$gte: 0, $lte: 400}, b: null}).batchSize(50).sort({a: 1}));
diff --git a/jstests/core/sortf.js b/jstests/core/sortf.js
index 615791e25a5..1cd3449aa4f 100644
--- a/jstests/core/sortf.js
+++ b/jstests/core/sortf.js
@@ -4,17 +4,17 @@
t = db.jstests_sortf;
t.drop();
-t.ensureIndex( {a:1} );
-t.ensureIndex( {b:1} );
+t.ensureIndex({a: 1});
+t.ensureIndex({b: 1});
-for( i = 0; i < 100; ++i ) {
- t.save( {a:0,b:0} );
+for (i = 0; i < 100; ++i) {
+ t.save({a: 0, b: 0});
}
-big = new Array( 10 * 1000 * 1000 ).toString();
-for( i = 0; i < 5; ++i ) {
- t.save( {a:1,b:1,big:big} );
+big = new Array(10 * 1000 * 1000).toString();
+for (i = 0; i < 5; ++i) {
+ t.save({a: 1, b: 1, big: big});
}
-assert.eq( 5, t.find( {a:1} ).sort( {b:1} ).itcount() );
+assert.eq(5, t.find({a: 1}).sort({b: 1}).itcount());
t.drop(); \ No newline at end of file
diff --git a/jstests/core/sortg.js b/jstests/core/sortg.js
index ec69d8fc772..726fe9184a6 100644
--- a/jstests/core/sortg.js
+++ b/jstests/core/sortg.js
@@ -3,57 +3,57 @@
t = db.jstests_sortg;
t.drop();
-big = new Array( 1000000 ).toString();
+big = new Array(1000000).toString();
-for( i = 0; i < 100; ++i ) {
- t.save( {b:0} );
+for (i = 0; i < 100; ++i) {
+ t.save({b: 0});
}
-for( i = 0; i < 40; ++i ) {
- t.save( {a:0,x:big} );
+for (i = 0; i < 40; ++i) {
+ t.save({a: 0, x: big});
}
-function memoryException( sortSpec, querySpec ) {
+function memoryException(sortSpec, querySpec) {
querySpec = querySpec || {};
- var ex = assert.throws( function() {
- t.find( querySpec ).sort( sortSpec ).batchSize( 1000 ).itcount();
- } );
- assert( ex.toString().match( /Sort/ ) );
+ var ex = assert.throws(function() {
+ t.find(querySpec).sort(sortSpec).batchSize(1000).itcount();
+ });
+ assert(ex.toString().match(/Sort/));
}
-function noMemoryException( sortSpec, querySpec ) {
+function noMemoryException(sortSpec, querySpec) {
querySpec = querySpec || {};
- t.find( querySpec ).sort( sortSpec ).batchSize( 1000 ).itcount();
+ t.find(querySpec).sort(sortSpec).batchSize(1000).itcount();
}
// Unindexed sorts.
-memoryException( {a:1} );
-memoryException( {b:1} );
+memoryException({a: 1});
+memoryException({b: 1});
// Indexed sorts.
-noMemoryException( {_id:1} );
-noMemoryException( {$natural:1} );
+noMemoryException({_id: 1});
+noMemoryException({$natural: 1});
-assert.eq( 1, t.getIndexes().length );
+assert.eq(1, t.getIndexes().length);
-t.ensureIndex( {a:1} );
-t.ensureIndex( {b:1} );
-t.ensureIndex( {c:1} );
+t.ensureIndex({a: 1});
+t.ensureIndex({b: 1});
+t.ensureIndex({c: 1});
-assert.eq( 4, t.getIndexes().length );
+assert.eq(4, t.getIndexes().length);
// These sorts are now indexed.
-noMemoryException( {a:1} );
-noMemoryException( {b:1} );
+noMemoryException({a: 1});
+noMemoryException({b: 1});
// A memory exception is triggered for an unindexed sort involving multiple plans.
-memoryException( {d:1}, {b:null,c:null} );
+memoryException({d: 1}, {b: null, c: null});
// With an indexed plan on _id:1 and an unindexed plan on b:1, the indexed plan
// should succeed even if the unindexed one would exhaust its memory limit.
-noMemoryException( {_id:1}, {b:null} );
+noMemoryException({_id: 1}, {b: null});
// With an unindexed plan on b:1 recorded for a query, the query should be
// retried when the unindexed plan exhausts its memory limit.
-noMemoryException( {_id:1}, {b:null} );
+noMemoryException({_id: 1}, {b: null});
t.drop();
diff --git a/jstests/core/sorth.js b/jstests/core/sorth.js
index e520ee50454..afad8b0cca8 100644
--- a/jstests/core/sorth.js
+++ b/jstests/core/sorth.js
@@ -1,140 +1,136 @@
// Tests for the $in/sort/limit optimization combined with inequality bounds. SERVER-5777
-
t = db.jstests_sorth;
t.drop();
/** Assert that the 'a' and 'b' fields of the documents match. */
-function assertMatch( expectedMatch, match ) {
+function assertMatch(expectedMatch, match) {
if (undefined !== expectedMatch.a) {
- assert.eq( expectedMatch.a, match.a );
+ assert.eq(expectedMatch.a, match.a);
}
if (undefined !== expectedMatch.b) {
- assert.eq( expectedMatch.b, match.b );
+ assert.eq(expectedMatch.b, match.b);
}
}
/** Assert an expected document or array of documents matches the 'matches' array. */
-function assertMatches( expectedMatches, matches ) {
- if ( expectedMatches.length == null ) {
- assertMatch( expectedMatches, matches[ 0 ] );
+function assertMatches(expectedMatches, matches) {
+ if (expectedMatches.length == null) {
+ assertMatch(expectedMatches, matches[0]);
}
- for( i = 0; i < expectedMatches.length; ++i ) {
- assertMatch( expectedMatches[ i ], matches[ i ] );
+ for (i = 0; i < expectedMatches.length; ++i) {
+ assertMatch(expectedMatches[i], matches[i]);
}
}
/** Generate a cursor using global parameters. */
-function find( query ) {
- return t.find( query ).sort( _sort ).limit( _limit ).hint( _hint );
+function find(query) {
+ return t.find(query).sort(_sort).limit(_limit).hint(_hint);
}
/** Check the expected matches for a query. */
-function checkMatches( expectedMatch, query ) {
- result = find( query ).toArray();
- assertMatches( expectedMatch, result );
- var count = find( query ).itcount();
- assert.eq( expectedMatch.length || 1, count );
+function checkMatches(expectedMatch, query) {
+ result = find(query).toArray();
+ assertMatches(expectedMatch, result);
+ var count = find(query).itcount();
+ assert.eq(expectedMatch.length || 1, count);
}
/** Reset data, index, and _sort and _hint globals. */
-function reset( sort, index ) {
+function reset(sort, index) {
t.drop();
- t.save( { a:1, b:1 } );
- t.save( { a:1, b:2 } );
- t.save( { a:1, b:3 } );
- t.save( { a:2, b:0 } );
- t.save( { a:2, b:3 } );
- t.save( { a:2, b:5 } );
- t.ensureIndex( index );
+ t.save({a: 1, b: 1});
+ t.save({a: 1, b: 2});
+ t.save({a: 1, b: 3});
+ t.save({a: 2, b: 0});
+ t.save({a: 2, b: 3});
+ t.save({a: 2, b: 5});
+ t.ensureIndex(index);
_sort = sort;
_hint = index;
}
-function checkForwardDirection( sort, index ) {
- reset( sort, index );
+function checkForwardDirection(sort, index) {
+ reset(sort, index);
_limit = -1;
// Lower bound checks.
- checkMatches( { a:2, b:0 }, { a:{ $in:[ 1, 2 ] }, b:{ $gte:0 } } );
- checkMatches( { a:1, b:1 }, { a:{ $in:[ 1, 2 ] }, b:{ $gt:0 } } );
- checkMatches( { a:1, b:1 }, { a:{ $in:[ 1, 2 ] }, b:{ $gte:1 } } );
- checkMatches( { a:1, b:2 }, { a:{ $in:[ 1, 2 ] }, b:{ $gt:1 } } );
- checkMatches( { a:1, b:2 }, { a:{ $in:[ 1, 2 ] }, b:{ $gte:2 } } );
- checkMatches( { a:1, b:3 }, { a:{ $in:[ 1, 2 ] }, b:{ $gt:2 } } );
- checkMatches( { a:1, b:3 }, { a:{ $in:[ 1, 2 ] }, b:{ $gte:3 } } );
- checkMatches( { a:2, b:5 }, { a:{ $in:[ 1, 2 ] }, b:{ $gt:3 } } );
- checkMatches( { a:2, b:5 }, { a:{ $in:[ 1, 2 ] }, b:{ $gte:4 } } );
- checkMatches( { a:2, b:5 }, { a:{ $in:[ 1, 2 ] }, b:{ $gt:4 } } );
- checkMatches( { a:2, b:5 }, { a:{ $in:[ 1, 2 ] }, b:{ $gte:5 } } );
+ checkMatches({a: 2, b: 0}, {a: {$in: [1, 2]}, b: {$gte: 0}});
+ checkMatches({a: 1, b: 1}, {a: {$in: [1, 2]}, b: {$gt: 0}});
+ checkMatches({a: 1, b: 1}, {a: {$in: [1, 2]}, b: {$gte: 1}});
+ checkMatches({a: 1, b: 2}, {a: {$in: [1, 2]}, b: {$gt: 1}});
+ checkMatches({a: 1, b: 2}, {a: {$in: [1, 2]}, b: {$gte: 2}});
+ checkMatches({a: 1, b: 3}, {a: {$in: [1, 2]}, b: {$gt: 2}});
+ checkMatches({a: 1, b: 3}, {a: {$in: [1, 2]}, b: {$gte: 3}});
+ checkMatches({a: 2, b: 5}, {a: {$in: [1, 2]}, b: {$gt: 3}});
+ checkMatches({a: 2, b: 5}, {a: {$in: [1, 2]}, b: {$gte: 4}});
+ checkMatches({a: 2, b: 5}, {a: {$in: [1, 2]}, b: {$gt: 4}});
+ checkMatches({a: 2, b: 5}, {a: {$in: [1, 2]}, b: {$gte: 5}});
// Upper bound checks.
- checkMatches( { a:2, b:0 }, { a:{ $in:[ 1, 2 ] }, b:{ $lte:0 } } );
- checkMatches( { a:2, b:0 }, { a:{ $in:[ 1, 2 ] }, b:{ $lt:1 } } );
- checkMatches( { a:2, b:0 }, { a:{ $in:[ 1, 2 ] }, b:{ $lte:1 } } );
- checkMatches( { a:2, b:0 }, { a:{ $in:[ 1, 2 ] }, b:{ $lt:3 } } );
+ checkMatches({a: 2, b: 0}, {a: {$in: [1, 2]}, b: {$lte: 0}});
+ checkMatches({a: 2, b: 0}, {a: {$in: [1, 2]}, b: {$lt: 1}});
+ checkMatches({a: 2, b: 0}, {a: {$in: [1, 2]}, b: {$lte: 1}});
+ checkMatches({a: 2, b: 0}, {a: {$in: [1, 2]}, b: {$lt: 3}});
// Lower and upper bounds checks.
- checkMatches( { a:2, b:0 }, { a:{ $in:[ 1, 2 ] }, b:{ $gte:0, $lte:0 } } );
- checkMatches( { a:2, b:0 }, { a:{ $in:[ 1, 2 ] }, b:{ $gte:0, $lt:1 } } );
- checkMatches( { a:2, b:0 }, { a:{ $in:[ 1, 2 ] }, b:{ $gte:0, $lte:1 } } );
- checkMatches( { a:1, b:1 }, { a:{ $in:[ 1, 2 ] }, b:{ $gt:0, $lte:1 } } );
- checkMatches( { a:1, b:2 }, { a:{ $in:[ 1, 2 ] }, b:{ $gte:2, $lt:3 } } );
- checkMatches( { a:1, b:3 }, { a:{ $in:[ 1, 2 ] }, b:{ $gte:2.5, $lte:3 } } );
- checkMatches( { a:1, b:3 }, { a:{ $in:[ 1, 2 ] }, b:{ $gt:2.5, $lte:3 } } );
+ checkMatches({a: 2, b: 0}, {a: {$in: [1, 2]}, b: {$gte: 0, $lte: 0}});
+ checkMatches({a: 2, b: 0}, {a: {$in: [1, 2]}, b: {$gte: 0, $lt: 1}});
+ checkMatches({a: 2, b: 0}, {a: {$in: [1, 2]}, b: {$gte: 0, $lte: 1}});
+ checkMatches({a: 1, b: 1}, {a: {$in: [1, 2]}, b: {$gt: 0, $lte: 1}});
+ checkMatches({a: 1, b: 2}, {a: {$in: [1, 2]}, b: {$gte: 2, $lt: 3}});
+ checkMatches({a: 1, b: 3}, {a: {$in: [1, 2]}, b: {$gte: 2.5, $lte: 3}});
+ checkMatches({a: 1, b: 3}, {a: {$in: [1, 2]}, b: {$gt: 2.5, $lte: 3}});
// Limit is -2.
_limit = -2;
- checkMatches( [ { a:2, b:0 }, { a:1, b:1 } ],
- { a:{ $in:[ 1, 2 ] }, b:{ $gte:0 } } );
+ checkMatches([{a: 2, b: 0}, {a: 1, b: 1}], {a: {$in: [1, 2]}, b: {$gte: 0}});
// We omit 'a' here because it's not defined whether or not we will see
// {a:2, b:3} or {a:1, b:3} first as our sort is over 'b'.
- checkMatches( [ { a:1, b:2 }, { b:3 } ],
- { a:{ $in:[ 1, 2 ] }, b:{ $gt:1 } } );
- checkMatches( { a:2, b:5 }, { a:{ $in:[ 1, 2 ] }, b:{ $gt:4 } } );
+ checkMatches([{a: 1, b: 2}, {b: 3}], {a: {$in: [1, 2]}, b: {$gt: 1}});
+ checkMatches({a: 2, b: 5}, {a: {$in: [1, 2]}, b: {$gt: 4}});
// With an additional document between the $in values.
- t.save( { a:1.5, b:3 } );
- checkMatches( [ { a:2, b:0 }, { a:1, b:1 } ],
- { a:{ $in:[ 1, 2 ] }, b:{ $gte:0 } } );
+ t.save({a: 1.5, b: 3});
+ checkMatches([{a: 2, b: 0}, {a: 1, b: 1}], {a: {$in: [1, 2]}, b: {$gte: 0}});
}
// Basic test with an index suffix order.
-checkForwardDirection( { b:1 }, { a:1, b:1 } );
+checkForwardDirection({b: 1}, {a: 1, b: 1});
// With an additonal index field.
-checkForwardDirection( { b:1 }, { a:1, b:1, c:1 } );
+checkForwardDirection({b: 1}, {a: 1, b: 1, c: 1});
// With an additonal reverse direction index field.
-checkForwardDirection( { b:1 }, { a:1, b:1, c:-1 } );
+checkForwardDirection({b: 1}, {a: 1, b: 1, c: -1});
// With an additonal ordered index field.
-checkForwardDirection( { b:1, c:1 }, { a:1, b:1, c:1 } );
+checkForwardDirection({b: 1, c: 1}, {a: 1, b: 1, c: 1});
// With an additonal reverse direction ordered index field.
-checkForwardDirection( { b:1, c:-1 }, { a:1, b:1, c:-1 } );
+checkForwardDirection({b: 1, c: -1}, {a: 1, b: 1, c: -1});
-function checkReverseDirection( sort, index ) {
- reset( sort, index );
+function checkReverseDirection(sort, index) {
+ reset(sort, index);
_limit = -1;
- checkMatches( { a:2, b:5 }, { a:{ $in:[ 1, 2 ] }, b:{ $gte:0 } } );
- checkMatches( { a:2, b:5 }, { a:{ $in:[ 1, 2 ] }, b:{ $gte:5 } } );
-
- checkMatches( { a:2, b:5 }, { a:{ $in:[ 1, 2 ] }, b:{ $lte:5 } } );
- checkMatches( { a:1, b:3 }, { a:{ $in:[ 1, 2 ] }, b:{ $lt:5 } } );
- checkMatches( { a:1, b:2 }, { a:{ $in:[ 1, 2 ] }, b:{ $lt:3 } } );
- checkMatches( { a:1, b:3 }, { a:{ $in:[ 1, 2 ] }, b:{ $lt:3.1 } } );
- checkMatches( { a:1, b:3 }, { a:{ $in:[ 1, 2 ] }, b:{ $lt:3.5 } } );
- checkMatches( { a:1, b:3 }, { a:{ $in:[ 1, 2 ] }, b:{ $lte:3 } } );
-
- checkMatches( { a:2, b:5 }, { a:{ $in:[ 1, 2 ] }, b:{ $lte:5, $gte:5 } } );
- checkMatches( { a:1, b:1 }, { a:{ $in:[ 1, 2 ] }, b:{ $lt:2, $gte:1 } } );
- checkMatches( { a:1, b:2 }, { a:{ $in:[ 1, 2 ] }, b:{ $lt:3, $gt:1 } } );
- checkMatches( { a:1, b:3 }, { a:{ $in:[ 1, 2 ] }, b:{ $lt:3.5, $gte:3 } } );
- checkMatches( { a:1, b:3 }, { a:{ $in:[ 1, 2 ] }, b:{ $lte:3, $gt:0 } } );
+ checkMatches({a: 2, b: 5}, {a: {$in: [1, 2]}, b: {$gte: 0}});
+ checkMatches({a: 2, b: 5}, {a: {$in: [1, 2]}, b: {$gte: 5}});
+
+ checkMatches({a: 2, b: 5}, {a: {$in: [1, 2]}, b: {$lte: 5}});
+ checkMatches({a: 1, b: 3}, {a: {$in: [1, 2]}, b: {$lt: 5}});
+ checkMatches({a: 1, b: 2}, {a: {$in: [1, 2]}, b: {$lt: 3}});
+ checkMatches({a: 1, b: 3}, {a: {$in: [1, 2]}, b: {$lt: 3.1}});
+ checkMatches({a: 1, b: 3}, {a: {$in: [1, 2]}, b: {$lt: 3.5}});
+ checkMatches({a: 1, b: 3}, {a: {$in: [1, 2]}, b: {$lte: 3}});
+
+ checkMatches({a: 2, b: 5}, {a: {$in: [1, 2]}, b: {$lte: 5, $gte: 5}});
+ checkMatches({a: 1, b: 1}, {a: {$in: [1, 2]}, b: {$lt: 2, $gte: 1}});
+ checkMatches({a: 1, b: 2}, {a: {$in: [1, 2]}, b: {$lt: 3, $gt: 1}});
+ checkMatches({a: 1, b: 3}, {a: {$in: [1, 2]}, b: {$lt: 3.5, $gte: 3}});
+ checkMatches({a: 1, b: 3}, {a: {$in: [1, 2]}, b: {$lte: 3, $gt: 0}});
}
// With a descending order index.
-checkReverseDirection( { b:-1 }, { a:1, b:-1 } );
-checkReverseDirection( { b:-1 }, { a:1, b:-1, c:1 } );
-checkReverseDirection( { b:-1 }, { a:1, b:-1, c:-1 } );
-checkReverseDirection( { b:-1, c:1 }, { a:1, b:-1, c:1 } );
-checkReverseDirection( { b:-1, c:-1 }, { a:1, b:-1, c:-1 } );
+checkReverseDirection({b: -1}, {a: 1, b: -1});
+checkReverseDirection({b: -1}, {a: 1, b: -1, c: 1});
+checkReverseDirection({b: -1}, {a: 1, b: -1, c: -1});
+checkReverseDirection({b: -1, c: 1}, {a: 1, b: -1, c: 1});
+checkReverseDirection({b: -1, c: -1}, {a: 1, b: -1, c: -1});
diff --git a/jstests/core/sorti.js b/jstests/core/sorti.js
index 2e5cfe110d7..b6518818683 100644
--- a/jstests/core/sorti.js
+++ b/jstests/core/sorti.js
@@ -3,23 +3,23 @@
t = db.jstests_sorti;
t.drop();
-t.save( { a:1, b:0 } );
-t.save( { a:3, b:1 } );
-t.save( { a:2, b:2 } );
-t.save( { a:4, b:3 } );
+t.save({a: 1, b: 0});
+t.save({a: 3, b: 1});
+t.save({a: 2, b: 2});
+t.save({a: 4, b: 3});
-function checkBOrder( query ) {
+function checkBOrder(query) {
arr = query.toArray();
order = [];
- for( i in arr ) {
- a = arr[ i ];
- order.push( a.b );
+ for (i in arr) {
+ a = arr[i];
+ order.push(a.b);
}
- assert.eq( [ 0, 2, 1, 3 ], order );
+ assert.eq([0, 2, 1, 3], order);
}
-checkBOrder( t.find().sort( { a:1 } ) );
-checkBOrder( t.find( {}, { _id:0, b:1 } ).sort( { a:1 } ) );
-t.ensureIndex( { b:1 } );
-checkBOrder( t.find( {}, { _id:0, b:1 } ).sort( { a:1 } ) );
-checkBOrder( t.find( {}, { _id:0, b:1 } ).sort( { a:1 } ).hint( { b:1 } ) );
+checkBOrder(t.find().sort({a: 1}));
+checkBOrder(t.find({}, {_id: 0, b: 1}).sort({a: 1}));
+t.ensureIndex({b: 1});
+checkBOrder(t.find({}, {_id: 0, b: 1}).sort({a: 1}));
+checkBOrder(t.find({}, {_id: 0, b: 1}).sort({a: 1}).hint({b: 1}));
diff --git a/jstests/core/sortj.js b/jstests/core/sortj.js
index 7a73829b94e..4d8baa47e8f 100644
--- a/jstests/core/sortj.js
+++ b/jstests/core/sortj.js
@@ -4,14 +4,14 @@
t = db.jstests_sortj;
t.drop();
-t.ensureIndex( { a:1 } );
+t.ensureIndex({a: 1});
-big = new Array( 100000 ).toString();
-for( i = 0; i < 1000; ++i ) {
- t.save( { a:1, b:big } );
+big = new Array(100000).toString();
+for (i = 0; i < 1000; ++i) {
+ t.save({a: 1, b: big});
}
-assert.throws( function() {
- t.find( { a:{ $gte:0 }, c:null } ).sort( { d:1 } ).itcount();
- } );
+assert.throws(function() {
+ t.find({a: {$gte: 0}, c: null}).sort({d: 1}).itcount();
+});
t.drop(); \ No newline at end of file
diff --git a/jstests/core/sortk.js b/jstests/core/sortk.js
index da00fe80ba5..7ecb86fa6df 100644
--- a/jstests/core/sortk.js
+++ b/jstests/core/sortk.js
@@ -5,136 +5,146 @@ t.drop();
function resetCollection() {
t.drop();
- t.save( { a:1, b:1 } );
- t.save( { a:1, b:2 } );
- t.save( { a:1, b:3 } );
- t.save( { a:2, b:4 } );
- t.save( { a:2, b:5 } );
- t.save( { a:2, b:0 } );
+ t.save({a: 1, b: 1});
+ t.save({a: 1, b: 2});
+ t.save({a: 1, b: 3});
+ t.save({a: 2, b: 4});
+ t.save({a: 2, b: 5});
+ t.save({a: 2, b: 0});
}
resetCollection();
-t.ensureIndex( { a:1, b:1 } );
-
-function simpleQuery( extraFields, sort, hint ) {
- query = { a:{ $in:[ 1, 2 ] } };
- Object.extend( query, extraFields );
- sort = sort || { b:1 };
- hint = hint || { a:1, b:1 };
- return t.find( query ).sort( sort ).hint( hint );
+t.ensureIndex({a: 1, b: 1});
+
+function simpleQuery(extraFields, sort, hint) {
+ query = {
+ a: {$in: [1, 2]}
+ };
+ Object.extend(query, extraFields);
+ sort = sort || {
+ b: 1
+ };
+ hint = hint || {
+ a: 1,
+ b: 1
+ };
+ return t.find(query).sort(sort).hint(hint);
}
-function simpleQueryWithLimit( limit ) {
- return simpleQuery().limit( limit );
+function simpleQueryWithLimit(limit) {
+ return simpleQuery().limit(limit);
}
// The limit is -1.
-assert.eq( 0, simpleQueryWithLimit( -1 )[ 0 ].b );
+assert.eq(0, simpleQueryWithLimit(-1)[0].b);
// The limit is -2.
-assert.eq( 0, simpleQueryWithLimit( -2 )[ 0 ].b );
-assert.eq( 1, simpleQueryWithLimit( -2 )[ 1 ].b );
+assert.eq(0, simpleQueryWithLimit(-2)[0].b);
+assert.eq(1, simpleQueryWithLimit(-2)[1].b);
// A skip is applied.
-assert.eq( 1, simpleQueryWithLimit( -1 ).skip( 1 )[ 0 ].b );
+assert.eq(1, simpleQueryWithLimit(-1).skip(1)[0].b);
// No limit is applied.
-assert.eq( 6, simpleQueryWithLimit( 0 ).itcount() );
-assert.eq( 6, simpleQueryWithLimit( 0 ).explain( true ).executionStats.totalKeysExamined );
-assert.eq( 5, simpleQueryWithLimit( 0 ).skip( 1 ).itcount() );
+assert.eq(6, simpleQueryWithLimit(0).itcount());
+assert.eq(6, simpleQueryWithLimit(0).explain(true).executionStats.totalKeysExamined);
+assert.eq(5, simpleQueryWithLimit(0).skip(1).itcount());
// The query has additional constriants, preventing limit optimization.
-assert.eq( 2, simpleQuery( { $where:'this.b>=2' } ).limit( -1 )[ 0 ].b );
+assert.eq(2, simpleQuery({$where: 'this.b>=2'}).limit(-1)[0].b);
// The sort order is the reverse of the index order.
-assert.eq( 5, simpleQuery( {}, { b:-1 } ).limit( -1 )[ 0 ].b );
+assert.eq(5, simpleQuery({}, {b: -1}).limit(-1)[0].b);
// The sort order is the reverse of the index order on a constrained field.
-assert.eq( 0, simpleQuery( {}, { a:-1, b:1 } ).limit( -1 )[ 0 ].b );
+assert.eq(0, simpleQuery({}, {a: -1, b: 1}).limit(-1)[0].b);
// Without a hint, multiple cursors are attempted.
-assert.eq( 0, t.find( { a:{ $in:[ 1, 2 ] } } ).sort( { b:1 } ).limit( -1 )[ 0 ].b );
-explain = t.find( { a:{ $in:[ 1, 2 ] } } ).sort( { b:1 } ).limit( -1 ).explain( true );
-assert.eq( 1, explain.executionStats.nReturned );
+assert.eq(0, t.find({a: {$in: [1, 2]}}).sort({b: 1}).limit(-1)[0].b);
+explain = t.find({a: {$in: [1, 2]}}).sort({b: 1}).limit(-1).explain(true);
+assert.eq(1, explain.executionStats.nReturned);
// The expected first result now comes from the first interval.
-t.remove( { b:0 } );
-assert.eq( 1, simpleQueryWithLimit( -1 )[ 0 ].b );
+t.remove({b: 0});
+assert.eq(1, simpleQueryWithLimit(-1)[0].b);
// With three intervals.
-function inThreeIntervalQueryWithLimit( limit ) {
- return t.find( { a:{ $in: [ 1, 2, 3 ] } } ).sort( { b:1 } ).hint( { a:1, b:1 } ).limit( limit );
+function inThreeIntervalQueryWithLimit(limit) {
+ return t.find({a: {$in: [1, 2, 3]}}).sort({b: 1}).hint({a: 1, b: 1}).limit(limit);
}
-assert.eq( 1, inThreeIntervalQueryWithLimit( -1 )[ 0 ].b );
-assert.eq( 1, inThreeIntervalQueryWithLimit( -2 )[ 0 ].b );
-assert.eq( 2, inThreeIntervalQueryWithLimit( -2 )[ 1 ].b );
-t.save( { a:3, b:0 } );
-assert.eq( 0, inThreeIntervalQueryWithLimit( -1 )[ 0 ].b );
-assert.eq( 0, inThreeIntervalQueryWithLimit( -2 )[ 0 ].b );
-assert.eq( 1, inThreeIntervalQueryWithLimit( -2 )[ 1 ].b );
+assert.eq(1, inThreeIntervalQueryWithLimit(-1)[0].b);
+assert.eq(1, inThreeIntervalQueryWithLimit(-2)[0].b);
+assert.eq(2, inThreeIntervalQueryWithLimit(-2)[1].b);
+t.save({a: 3, b: 0});
+assert.eq(0, inThreeIntervalQueryWithLimit(-1)[0].b);
+assert.eq(0, inThreeIntervalQueryWithLimit(-2)[0].b);
+assert.eq(1, inThreeIntervalQueryWithLimit(-2)[1].b);
// The index is multikey.
t.remove({});
-t.save( { a:1, b:[ 0, 1, 2 ] } );
-t.save( { a:2, b:[ 0, 1, 2 ] } );
-t.save( { a:1, b:5 } );
-assert.eq( 3, simpleQueryWithLimit( -3 ).itcount() );
+t.save({a: 1, b: [0, 1, 2]});
+t.save({a: 2, b: [0, 1, 2]});
+t.save({a: 1, b: 5});
+assert.eq(3, simpleQueryWithLimit(-3).itcount());
// The index ordering is reversed.
resetCollection();
-t.ensureIndex( { a:1, b:-1 } );
+t.ensureIndex({a: 1, b: -1});
// The sort order is consistent with the index order.
-assert.eq( 5, simpleQuery( {}, { b:-1 }, { a:1, b:-1 } ).limit( -1 )[ 0 ].b );
+assert.eq(5, simpleQuery({}, {b: -1}, {a: 1, b: -1}).limit(-1)[0].b);
// The sort order is the reverse of the index order.
-assert.eq( 0, simpleQuery( {}, { b:1 }, { a:1, b:-1 } ).limit( -1 )[ 0 ].b );
+assert.eq(0, simpleQuery({}, {b: 1}, {a: 1, b: -1}).limit(-1)[0].b);
// An equality constraint precedes the $in constraint.
t.drop();
-t.ensureIndex( { a:1, b:1, c:1 } );
-t.save( { a:0, b:0, c:-1 } );
-t.save( { a:0, b:2, c:1 } );
-t.save( { a:1, b:1, c:1 } );
-t.save( { a:1, b:1, c:2 } );
-t.save( { a:1, b:1, c:3 } );
-t.save( { a:1, b:2, c:4 } );
-t.save( { a:1, b:2, c:5 } );
-t.save( { a:1, b:2, c:0 } );
-
-function eqInQueryWithLimit( limit ) {
- return t.find( { a:1, b:{ $in:[ 1, 2 ] } } ).sort( { c: 1 } ).hint( { a:1, b:1, c:1 } ).
- limit( limit );
+t.ensureIndex({a: 1, b: 1, c: 1});
+t.save({a: 0, b: 0, c: -1});
+t.save({a: 0, b: 2, c: 1});
+t.save({a: 1, b: 1, c: 1});
+t.save({a: 1, b: 1, c: 2});
+t.save({a: 1, b: 1, c: 3});
+t.save({a: 1, b: 2, c: 4});
+t.save({a: 1, b: 2, c: 5});
+t.save({a: 1, b: 2, c: 0});
+
+function eqInQueryWithLimit(limit) {
+ return t.find({a: 1, b: {$in: [1, 2]}}).sort({c: 1}).hint({a: 1, b: 1, c: 1}).limit(limit);
}
-function andEqInQueryWithLimit( limit ) {
- return t.find( { $and:[ { a:1 }, { b:{ $in:[ 1, 2 ] } } ] } ).sort( { c: 1 } ).
- hint( { a:1, b:1, c:1 } ).limit( limit );
+function andEqInQueryWithLimit(limit) {
+ return t.find({$and: [{a: 1}, {b: {$in: [1, 2]}}]})
+ .sort({c: 1})
+ .hint({a: 1, b: 1, c: 1})
+ .limit(limit);
}
// The limit is -1.
-assert.eq( 0, eqInQueryWithLimit( -1 )[ 0 ].c );
-assert.eq( 0, andEqInQueryWithLimit( -1 )[ 0 ].c );
+assert.eq(0, eqInQueryWithLimit(-1)[0].c);
+assert.eq(0, andEqInQueryWithLimit(-1)[0].c);
// The limit is -2.
-assert.eq( 0, eqInQueryWithLimit( -2 )[ 0 ].c );
-assert.eq( 1, eqInQueryWithLimit( -2 )[ 1 ].c );
-assert.eq( 0, andEqInQueryWithLimit( -2 )[ 0 ].c );
-assert.eq( 1, andEqInQueryWithLimit( -2 )[ 1 ].c );
-
-function inQueryWithLimit( limit, sort ) {
- sort = sort || { b:1 };
- return t.find( { a:{ $in:[ 0, 1 ] } } ).sort( sort ).hint( { a:1, b:1, c:1 } ).limit( limit );
+assert.eq(0, eqInQueryWithLimit(-2)[0].c);
+assert.eq(1, eqInQueryWithLimit(-2)[1].c);
+assert.eq(0, andEqInQueryWithLimit(-2)[0].c);
+assert.eq(1, andEqInQueryWithLimit(-2)[1].c);
+
+function inQueryWithLimit(limit, sort) {
+ sort = sort || {
+ b: 1
+ };
+ return t.find({a: {$in: [0, 1]}}).sort(sort).hint({a: 1, b: 1, c: 1}).limit(limit);
}
// The index has two suffix fields unconstrained by the query.
-assert.eq( 0, inQueryWithLimit( -1 )[ 0 ].b );
+assert.eq(0, inQueryWithLimit(-1)[0].b);
// The index has two ordered suffix fields unconstrained by the query.
-assert.eq( 0, inQueryWithLimit( -1, { b:1, c:1 } )[ 0 ].b );
+assert.eq(0, inQueryWithLimit(-1, {b: 1, c: 1})[0].b);
// The index has two ordered suffix fields unconstrained by the query and the limit is -2.
-assert.eq( 0, inQueryWithLimit( -2, { b:1, c:1 } )[ 0 ].b );
-assert.eq( 1, inQueryWithLimit( -2, { b:1, c:1 } )[ 1 ].b );
+assert.eq(0, inQueryWithLimit(-2, {b: 1, c: 1})[0].b);
+assert.eq(1, inQueryWithLimit(-2, {b: 1, c: 1})[1].b);
diff --git a/jstests/core/sortl.js b/jstests/core/sortl.js
index b7cf9b34958..247a175a6f0 100644
--- a/jstests/core/sortl.js
+++ b/jstests/core/sortl.js
@@ -14,18 +14,22 @@
assert.eq(res.next(), {_id: 1, a: 2, b: {"": 2}});
assert.eq(res.hasNext(), false);
- res = db.runCommand({findAndModify: coll.getName(),
- query: {_id: 1},
- update: {$set: {b: 1}},
- sort: {a: 1},
- fields: {c: {$meta: "sortKey"}}});
+ res = db.runCommand({
+ findAndModify: coll.getName(),
+ query: {_id: 1},
+ update: {$set: {b: 1}},
+ sort: {a: 1},
+ fields: {c: {$meta: "sortKey"}}
+ });
assert.commandFailedWithCode(res, ErrorCodes.BadValue, "$meta sortKey update");
- res = db.runCommand({findAndModify: coll.getName(),
- query: {_id: 1},
- remove: true,
- sort: {b: 1},
- fields: {c: {$meta: "sortKey"}}});
+ res = db.runCommand({
+ findAndModify: coll.getName(),
+ query: {_id: 1},
+ remove: true,
+ sort: {b: 1},
+ fields: {c: {$meta: "sortKey"}}
+ });
assert.commandFailedWithCode(res, ErrorCodes.BadValue, "$meta sortKey delete");
coll.drop();
diff --git a/jstests/core/splitvector.js b/jstests/core/splitvector.js
index 5306bd61ff4..233911d29c6 100644
--- a/jstests/core/splitvector.js
+++ b/jstests/core/splitvector.js
@@ -7,27 +7,26 @@
// collection in 'maxChunkSize' approximately-sized chunks. Its asserts fail otherwise.
// @param splitVec: an array with keys for field 'x'
// e.g. [ { x : 1927 }, { x : 3855 }, ...
-// @param numDocs: domain of 'x' field
+// @param numDocs: domain of 'x' field
// e.g. 20000
// @param maxChunkSize is in MBs.
//
-assertChunkSizes = function ( splitVec , numDocs , maxChunkSize , msg ){
- splitVec = [{ x: -1 }].concat( splitVec );
- splitVec.push( { x: numDocs+1 } );
- for ( i=0; i<splitVec.length-1; i++) {
+assertChunkSizes = function(splitVec, numDocs, maxChunkSize, msg) {
+ splitVec = [{x: -1}].concat(splitVec);
+ splitVec.push({x: numDocs + 1});
+ for (i = 0; i < splitVec.length - 1; i++) {
min = splitVec[i];
- max = splitVec[i+1];
+ max = splitVec[i + 1];
var avgObjSize = db.jstests_splitvector.stats().avgObjSize;
- size = db.runCommand( { datasize: "test.jstests_splitvector" , min: min , max: max } ).size;
-
+ size = db.runCommand({datasize: "test.jstests_splitvector", min: min, max: max}).size;
+
// It is okay for the last chunk to be smaller. A collection's size does not
// need to be exactly a multiple of maxChunkSize.
- if ( i < splitVec.length - 2 ) {
+ if (i < splitVec.length - 2) {
// We are within one object of the correct chunk size.
- assert.lt( Math.abs(maxChunkSize - size), avgObjSize , "A"+i );
- }
- else {
- assert.gt( maxChunkSize , size , "A"+i , msg + "b" );
+ assert.lt(Math.abs(maxChunkSize - size), avgObjSize, "A" + i);
+ } else {
+ assert.gt(maxChunkSize, size, "A" + i, msg + "b");
}
}
};
@@ -36,15 +35,15 @@ assertChunkSizes = function ( splitVec , numDocs , maxChunkSize , msg ){
// This is useful for checking that splitPoints have the same format as the original key pattern,
// even when sharding on a prefix key.
// Not very efficient, so only call when # of field names is small
-var assertFieldNamesMatch = function( splitPoint , keyPattern ){
- for ( var p in splitPoint ) {
- if( splitPoint.hasOwnProperty( p ) ) {
- assert( keyPattern.hasOwnProperty( p ) , "property " + p + " not in keyPattern" );
+var assertFieldNamesMatch = function(splitPoint, keyPattern) {
+ for (var p in splitPoint) {
+ if (splitPoint.hasOwnProperty(p)) {
+ assert(keyPattern.hasOwnProperty(p), "property " + p + " not in keyPattern");
}
}
- for ( var p in keyPattern ) {
- if( keyPattern.hasOwnProperty( p ) ){
- assert( splitPoint.hasOwnProperty( p ) , "property " + p + " not in splitPoint" );
+ for (var p in keyPattern) {
+ if (keyPattern.hasOwnProperty(p)) {
+ assert(splitPoint.hasOwnProperty(p), "property " + p + " not in splitPoint");
}
}
};
@@ -60,54 +59,63 @@ f = db.jstests_splitvector;
resetCollection();
// -------------------------
-// Case 1: missing parameters
-
-assert.eq( false, db.runCommand( { splitVector: "test.jstests_splitvector" } ).ok , "1a" );
-assert.eq( false, db.runCommand( { splitVector: "test.jstests_splitvector" , maxChunkSize: 1} ).ok , "1b" );
+// Case 1: missing parameters
+assert.eq(false, db.runCommand({splitVector: "test.jstests_splitvector"}).ok, "1a");
+assert.eq(false,
+ db.runCommand({splitVector: "test.jstests_splitvector", maxChunkSize: 1}).ok,
+ "1b");
// -------------------------
// Case 2: missing index
-assert.eq( false, db.runCommand( { splitVector: "test.jstests_splitvector" , keyPattern: {x:1} , maxChunkSize: 1 } ).ok , "2");
-
+assert.eq(false,
+ db.runCommand(
+ {splitVector: "test.jstests_splitvector", keyPattern: {x: 1}, maxChunkSize: 1}).ok,
+ "2");
// -------------------------
// Case 3: empty collection
-f.ensureIndex( { x: 1} );
-assert.eq( [], db.runCommand( { splitVector: "test.jstests_splitvector" , keyPattern: {x:1} , maxChunkSize: 1 } ).splitKeys , "3");
-
+f.ensureIndex({x: 1});
+assert.eq(
+ [],
+ db.runCommand({splitVector: "test.jstests_splitvector", keyPattern: {x: 1}, maxChunkSize: 1})
+ .splitKeys,
+ "3");
// -------------------------
// Case 4: uniform collection
resetCollection();
-f.ensureIndex( { x: 1 } );
+f.ensureIndex({x: 1});
var case4 = function() {
// Get baseline document size
filler = "";
- while( filler.length < 500 ) filler += "a";
- f.save( { x: 0, y: filler } );
- docSize = db.runCommand( { datasize: "test.jstests_splitvector" } ).size;
- assert.gt( docSize, 500 , "4a" );
+ while (filler.length < 500)
+ filler += "a";
+ f.save({x: 0, y: filler});
+ docSize = db.runCommand({datasize: "test.jstests_splitvector"}).size;
+ assert.gt(docSize, 500, "4a");
// Fill collection and get split vector for 1MB maxChunkSize
numDocs = 4500;
- for( i=1; i<numDocs; i++ ){
- f.save( { x: i, y: filler } );
+ for (i = 1; i < numDocs; i++) {
+ f.save({x: i, y: filler});
}
- res = db.runCommand( { splitVector: "test.jstests_splitvector" , keyPattern: {x:1} , maxChunkSize: 1 } );
+ res = db.runCommand(
+ {splitVector: "test.jstests_splitvector", keyPattern: {x: 1}, maxChunkSize: 1});
// splitVector aims at getting half-full chunks after split
- factor = 0.5;
-
- assert.eq( true , res.ok , "4b" );
- assert.close( numDocs*docSize / ((1<<20) * factor), res.splitKeys.length , "num split keys" , -1 );
- assertChunkSizes( res.splitKeys , numDocs, (1<<20) * factor , "4d" );
- for( i=0; i < res.splitKeys.length; i++ ){
- assertFieldNamesMatch( res.splitKeys[i] , {x : 1} );
+ factor = 0.5;
+
+ assert.eq(true, res.ok, "4b");
+ assert.close(
+ numDocs * docSize / ((1 << 20) * factor), res.splitKeys.length, "num split keys", -1);
+ assertChunkSizes(res.splitKeys, numDocs, (1 << 20) * factor, "4d");
+ for (i = 0; i < res.splitKeys.length; i++) {
+ assertFieldNamesMatch(res.splitKeys[i], {x: 1});
}
};
case4();
@@ -116,20 +124,25 @@ case4();
// Case 5: limit number of split points
resetCollection();
-f.ensureIndex( { x: 1 } );
+f.ensureIndex({x: 1});
var case5 = function() {
// Fill collection and get split vector for 1MB maxChunkSize
numDocs = 4500;
- for( i=1; i<numDocs; i++ ){
- f.save( { x: i, y: filler } );
+ for (i = 1; i < numDocs; i++) {
+ f.save({x: i, y: filler});
}
- res = db.runCommand( { splitVector: "test.jstests_splitvector" , keyPattern: {x:1} , maxChunkSize: 1 , maxSplitPoints: 1} );
-
- assert.eq( true , res.ok , "5a" );
- assert.eq( 1 , res.splitKeys.length , "5b" );
- for( i=0; i < res.splitKeys.length; i++ ){
- assertFieldNamesMatch( res.splitKeys[i] , {x : 1} );
+ res = db.runCommand({
+ splitVector: "test.jstests_splitvector",
+ keyPattern: {x: 1},
+ maxChunkSize: 1,
+ maxSplitPoints: 1
+ });
+
+ assert.eq(true, res.ok, "5a");
+ assert.eq(1, res.splitKeys.length, "5b");
+ for (i = 0; i < res.splitKeys.length; i++) {
+ assertFieldNamesMatch(res.splitKeys[i], {x: 1});
}
};
case5();
@@ -138,20 +151,25 @@ case5();
// Case 6: limit number of objects in a chunk
resetCollection();
-f.ensureIndex( { x: 1 } );
+f.ensureIndex({x: 1});
var case6 = function() {
// Fill collection and get split vector for 1MB maxChunkSize
numDocs = 2000;
- for( i=1; i<numDocs; i++ ){
- f.save( { x: i, y: filler } );
+ for (i = 1; i < numDocs; i++) {
+ f.save({x: i, y: filler});
}
- res = db.runCommand( { splitVector: "test.jstests_splitvector" , keyPattern: {x:1} , maxChunkSize: 1 , maxChunkObjects: 500} );
-
- assert.eq( true , res.ok , "6a" );
- assert.eq( 3 , res.splitKeys.length , "6b" );
- for( i=0; i < res.splitKeys.length; i++ ){
- assertFieldNamesMatch( res.splitKeys[i] , {x : 1} );
+ res = db.runCommand({
+ splitVector: "test.jstests_splitvector",
+ keyPattern: {x: 1},
+ maxChunkSize: 1,
+ maxChunkObjects: 500
+ });
+
+ assert.eq(true, res.ok, "6a");
+ assert.eq(3, res.splitKeys.length, "6b");
+ for (i = 0; i < res.splitKeys.length; i++) {
+ assertFieldNamesMatch(res.splitKeys[i], {x: 1});
}
};
case6();
@@ -161,24 +179,25 @@ case6();
// [1111111111111111,2,3)
resetCollection();
-f.ensureIndex( { x: 1 } );
+f.ensureIndex({x: 1});
var case7 = function() {
// Fill collection and get split vector for 1MB maxChunkSize
numDocs = 2100;
- for( i=1; i<numDocs; i++ ){
- f.save( { x: 1, y: filler } );
+ for (i = 1; i < numDocs; i++) {
+ f.save({x: 1, y: filler});
}
- for( i=1; i<10; i++ ){
- f.save( { x: 2, y: filler } );
+ for (i = 1; i < 10; i++) {
+ f.save({x: 2, y: filler});
}
- res = db.runCommand( { splitVector: "test.jstests_splitvector" , keyPattern: {x:1} , maxChunkSize: 1 } );
+ res = db.runCommand(
+ {splitVector: "test.jstests_splitvector", keyPattern: {x: 1}, maxChunkSize: 1});
- assert.eq( true , res.ok , "7a" );
- assert.eq( 2 , res.splitKeys[0].x, "7b");
- for( i=0; i < res.splitKeys.length; i++ ){
- assertFieldNamesMatch( res.splitKeys[i] , {x : 1} );
+ assert.eq(true, res.ok, "7a");
+ assert.eq(2, res.splitKeys[0].x, "7b");
+ for (i = 0; i < res.splitKeys.length; i++) {
+ assertFieldNamesMatch(res.splitKeys[i], {x: 1});
}
};
case7();
@@ -188,30 +207,31 @@ case7();
// [1, 22222222222222, 3)
resetCollection();
-f.ensureIndex( { x: 1 } );
+f.ensureIndex({x: 1});
var case8 = function() {
- for( i=1; i<10; i++ ){
- f.save( { x: 1, y: filler } );
+ for (i = 1; i < 10; i++) {
+ f.save({x: 1, y: filler});
}
numDocs = 2100;
- for( i=1; i<numDocs; i++ ){
- f.save( { x: 2, y: filler } );
+ for (i = 1; i < numDocs; i++) {
+ f.save({x: 2, y: filler});
}
- for( i=1; i<10; i++ ){
- f.save( { x: 3, y: filler } );
+ for (i = 1; i < 10; i++) {
+ f.save({x: 3, y: filler});
}
- res = db.runCommand( { splitVector: "test.jstests_splitvector" , keyPattern: {x:1} , maxChunkSize: 1 } );
+ res = db.runCommand(
+ {splitVector: "test.jstests_splitvector", keyPattern: {x: 1}, maxChunkSize: 1});
- assert.eq( true , res.ok , "8a" );
- assert.eq( 2 , res.splitKeys.length , "8b" );
- assert.eq( 2 , res.splitKeys[0].x , "8c" );
- assert.eq( 3 , res.splitKeys[1].x , "8d" );
- for( i=0; i < res.splitKeys.length; i++ ){
- assertFieldNamesMatch( res.splitKeys[i] , {x : 1} );
+ assert.eq(true, res.ok, "8a");
+ assert.eq(2, res.splitKeys.length, "8b");
+ assert.eq(2, res.splitKeys[0].x, "8c");
+ assert.eq(3, res.splitKeys[1].x, "8d");
+ for (i = 0; i < res.splitKeys.length; i++) {
+ assertFieldNamesMatch(res.splitKeys[i], {x: 1});
}
};
case8();
@@ -221,30 +241,31 @@ case8();
//
resetCollection();
-f.ensureIndex( { x: 1 } );
+f.ensureIndex({x: 1});
var case9 = function() {
- f.save( { x: 1 } );
- f.save( { x: 2 } );
- f.save( { x: 3 } );
-
- assert.eq( 3 , f.count() );
- print( f.getFullName() );
-
- res = db.runCommand( { splitVector: f.getFullName() , keyPattern: {x:1} , force : true } );
-
- assert.eq( true , res.ok , "9a" );
- assert.eq( 1 , res.splitKeys.length , "9b" );
- assert.eq( 2 , res.splitKeys[0].x , "9c" );
-
- if ( db.runCommand( "isMaster" ).msg != "isdbgrid" ) {
- res = db.adminCommand( { splitVector: "test.jstests_splitvector" , keyPattern: {x:1} , force : true } );
-
- assert.eq( true , res.ok , "9a: " + tojson(res) );
- assert.eq( 1 , res.splitKeys.length , "9b: " + tojson(res) );
- assert.eq( 2 , res.splitKeys[0].x , "9c: " + tojson(res) );
- for( i=0; i < res.splitKeys.length; i++ ){
- assertFieldNamesMatch( res.splitKeys[i] , {x : 1} );
+ f.save({x: 1});
+ f.save({x: 2});
+ f.save({x: 3});
+
+ assert.eq(3, f.count());
+ print(f.getFullName());
+
+ res = db.runCommand({splitVector: f.getFullName(), keyPattern: {x: 1}, force: true});
+
+ assert.eq(true, res.ok, "9a");
+ assert.eq(1, res.splitKeys.length, "9b");
+ assert.eq(2, res.splitKeys[0].x, "9c");
+
+ if (db.runCommand("isMaster").msg != "isdbgrid") {
+ res = db.adminCommand(
+ {splitVector: "test.jstests_splitvector", keyPattern: {x: 1}, force: true});
+
+ assert.eq(true, res.ok, "9a: " + tojson(res));
+ assert.eq(1, res.splitKeys.length, "9b: " + tojson(res));
+ assert.eq(2, res.splitKeys[0].x, "9c: " + tojson(res));
+ for (i = 0; i < res.splitKeys.length; i++) {
+ assertFieldNamesMatch(res.splitKeys[i], {x: 1});
}
}
};
@@ -255,51 +276,51 @@ case9();
//
resetCollection();
-f.ensureIndex( { x: 1, y: 1 } );
+f.ensureIndex({x: 1, y: 1});
case4();
resetCollection();
-f.ensureIndex( { x: 1, y: -1 , z : 1 } );
+f.ensureIndex({x: 1, y: -1, z: 1});
case4();
resetCollection();
-f.ensureIndex( { x: 1, y: 1 } );
+f.ensureIndex({x: 1, y: 1});
case5();
resetCollection();
-f.ensureIndex( { x: 1, y: -1 , z : 1 } );
+f.ensureIndex({x: 1, y: -1, z: 1});
case5();
resetCollection();
-f.ensureIndex( { x: 1, y: 1 } );
+f.ensureIndex({x: 1, y: 1});
case6();
resetCollection();
-f.ensureIndex( { x: 1, y: -1 , z : 1 } );
+f.ensureIndex({x: 1, y: -1, z: 1});
case6();
resetCollection();
-f.ensureIndex( { x: 1, y: 1 } );
+f.ensureIndex({x: 1, y: 1});
case7();
resetCollection();
-f.ensureIndex( { x: 1, y: -1 , z : 1 } );
+f.ensureIndex({x: 1, y: -1, z: 1});
case7();
resetCollection();
-f.ensureIndex( { x: 1, y: 1 } );
+f.ensureIndex({x: 1, y: 1});
case8();
resetCollection();
-f.ensureIndex( { x: 1, y: -1 , z : 1 } );
+f.ensureIndex({x: 1, y: -1, z: 1});
case8();
resetCollection();
-f.ensureIndex( { x: 1, y: 1 } );
+f.ensureIndex({x: 1, y: 1});
case9();
resetCollection();
-f.ensureIndex( { x: 1, y: -1 , z : 1 } );
+f.ensureIndex({x: 1, y: -1, z: 1});
case9();
print("PASSED");
diff --git a/jstests/core/stages_and_hash.js b/jstests/core/stages_and_hash.js
index 3bf1bde4951..8dcc8cf1345 100644
--- a/jstests/core/stages_and_hash.js
+++ b/jstests/core/stages_and_hash.js
@@ -13,25 +13,49 @@ t.ensureIndex({bar: 1});
t.ensureIndex({baz: 1});
// Scan foo <= 20
-ixscan1 = {ixscan: {args:{name: "stages_and_hashed", keyPattern:{foo: 1},
- startKey: {"": 20}, endKey: {},
- endKeyInclusive: true, direction: -1}}};
+ixscan1 = {
+ ixscan: {
+ args: {
+ name: "stages_and_hashed",
+ keyPattern: {foo: 1},
+ startKey: {"": 20},
+ endKey: {},
+ endKeyInclusive: true,
+ direction: -1
+ }
+ }
+};
// Scan bar >= 40
-ixscan2 = {ixscan: {args:{name: "stages_and_hashed", keyPattern:{bar: 1},
- startKey: {"": 40}, endKey: {},
- endKeyInclusive: true, direction: 1}}};
+ixscan2 = {
+ ixscan: {
+ args: {
+ name: "stages_and_hashed",
+ keyPattern: {bar: 1},
+ startKey: {"": 40},
+ endKey: {},
+ endKeyInclusive: true,
+ direction: 1
+ }
+ }
+};
// bar = 50 - foo
// Intersection is (foo=0 bar=50, foo=1 bar=49, ..., foo=10 bar=40)
-andix1ix2 = {andHash: {args: { nodes: [ixscan1, ixscan2]}}};
+andix1ix2 = {
+ andHash: {args: {nodes: [ixscan1, ixscan2]}}
+};
res = db.runCommand({stageDebug: {plan: andix1ix2, collection: collname}});
assert.eq(res.ok, 1);
assert.eq(res.results.length, 11);
// Filter predicates from 2 indices. Tests that we union the idx info.
-andix1ix2filter = {fetch: {filter: {bar: {$in: [45, 46, 48]}, foo: {$in: [4,5,6]}},
- args: {node: {andHash: {args: {nodes: [ixscan1, ixscan2]}}}}}};
+andix1ix2filter = {
+ fetch: {
+ filter: {bar: {$in: [45, 46, 48]}, foo: {$in: [4, 5, 6]}},
+ args: {node: {andHash: {args: {nodes: [ixscan1, ixscan2]}}}}
+ }
+};
res = db.runCommand({stageDebug: {collection: collname, plan: andix1ix2filter}});
assert.eq(res.ok, 1);
assert.eq(res.results.length, 2);
diff --git a/jstests/core/stages_and_sorted.js b/jstests/core/stages_and_sorted.js
index e6e493de5f7..c29db7ce2eb 100644
--- a/jstests/core/stages_and_sorted.js
+++ b/jstests/core/stages_and_sorted.js
@@ -10,9 +10,9 @@ for (var i = 0; i < N; ++i) {
t.insert({baz: 12});
t.insert({bar: 1});
// This is the only thing that should be outputted in the and.
- t.insert({foo: 1, bar:1, baz: 12});
+ t.insert({foo: 1, bar: 1, baz: 12});
t.insert({bar: 1});
- t.insert({bar:1, baz: 12});
+ t.insert({bar: 1, baz: 12});
t.insert({baz: 12});
t.insert({foo: 1, baz: 12});
t.insert({baz: 12});
@@ -23,29 +23,60 @@ t.ensureIndex({bar: 1});
t.ensureIndex({baz: 1});
// Scan foo == 1
-ixscan1 = {ixscan: {args:{name: "stages_and_sorted", keyPattern:{foo: 1},
- startKey: {"": 1}, endKey: {"": 1},
- endKeyInclusive: true, direction: 1}}};
+ixscan1 = {
+ ixscan: {
+ args: {
+ name: "stages_and_sorted",
+ keyPattern: {foo: 1},
+ startKey: {"": 1},
+ endKey: {"": 1},
+ endKeyInclusive: true,
+ direction: 1
+ }
+ }
+};
// Scan bar == 1
-ixscan2 = {ixscan: {args:{name: "stages_and_sorted", keyPattern:{bar: 1},
- startKey: {"": 1}, endKey: {"": 1},
- endKeyInclusive: true, direction: 1}}};
+ixscan2 = {
+ ixscan: {
+ args: {
+ name: "stages_and_sorted",
+ keyPattern: {bar: 1},
+ startKey: {"": 1},
+ endKey: {"": 1},
+ endKeyInclusive: true,
+ direction: 1
+ }
+ }
+};
// Scan baz == 12
-ixscan3 = {ixscan: {args:{name: "stages_and_sorted", keyPattern:{baz: 1},
- startKey: {"": 12}, endKey: {"": 12},
- endKeyInclusive: true, direction: 1}}};
+ixscan3 = {
+ ixscan: {
+ args: {
+ name: "stages_and_sorted",
+ keyPattern: {baz: 1},
+ startKey: {"": 12},
+ endKey: {"": 12},
+ endKeyInclusive: true,
+ direction: 1
+ }
+ }
+};
// Intersect foo==1 with bar==1 with baz==12.
-andix1ix2 = {andSorted: {args: {nodes: [ixscan1, ixscan2, ixscan3]}}};
+andix1ix2 = {
+ andSorted: {args: {nodes: [ixscan1, ixscan2, ixscan3]}}
+};
res = db.runCommand({stageDebug: {collection: collname, plan: andix1ix2}});
printjson(res);
assert.eq(res.ok, 1);
assert.eq(res.results.length, N);
// Might as well make sure that hashed does the same thing.
-andix1ix2hash = {andHash: {args: {nodes: [ixscan1, ixscan2, ixscan3]}}};
+andix1ix2hash = {
+ andHash: {args: {nodes: [ixscan1, ixscan2, ixscan3]}}
+};
res = db.runCommand({stageDebug: {collection: collname, plan: andix1ix2hash}});
assert.eq(res.ok, 1);
assert.eq(res.results.length, N);
diff --git a/jstests/core/stages_collection_scan.js b/jstests/core/stages_collection_scan.js
index fddd22f624a..47fb6edbdde 100644
--- a/jstests/core/stages_collection_scan.js
+++ b/jstests/core/stages_collection_scan.js
@@ -8,7 +8,9 @@ for (var i = 0; i < N; ++i) {
t.insert({foo: i});
}
-forward = {cscan: {args: {direction: 1}}};
+forward = {
+ cscan: {args: {direction: 1}}
+};
res = db.runCommand({stageDebug: {collection: collname, plan: forward}});
assert.eq(res.ok, 1);
assert.eq(res.results.length, N);
@@ -16,23 +18,27 @@ assert.eq(res.results[0].foo, 0);
assert.eq(res.results[49].foo, 49);
// And, backwards.
-backward = {cscan: {args: {direction: -1}}};
+backward = {
+ cscan: {args: {direction: -1}}
+};
res = db.runCommand({stageDebug: {collection: collname, plan: backward}});
assert.eq(res.ok, 1);
assert.eq(res.results.length, N);
assert.eq(res.results[0].foo, 49);
assert.eq(res.results[49].foo, 0);
-forwardFiltered = {cscan: {args: {direction: 1},
- filter: {foo: {$lt: 25}}}};
+forwardFiltered = {
+ cscan: {args: {direction: 1}, filter: {foo: {$lt: 25}}}
+};
res = db.runCommand({stageDebug: {collection: collname, plan: forwardFiltered}});
assert.eq(res.ok, 1);
assert.eq(res.results.length, 25);
assert.eq(res.results[0].foo, 0);
assert.eq(res.results[24].foo, 24);
-backwardFiltered = {cscan: {args: {direction: -1},
- filter: {foo: {$lt: 25}}}};
+backwardFiltered = {
+ cscan: {args: {direction: -1}, filter: {foo: {$lt: 25}}}
+};
res = db.runCommand({stageDebug: {collection: collname, plan: backwardFiltered}});
assert.eq(res.ok, 1);
assert.eq(res.results.length, 25);
diff --git a/jstests/core/stages_delete.js b/jstests/core/stages_delete.js
index ddfff2c2ea5..1624b1fcc6a 100644
--- a/jstests/core/stages_delete.js
+++ b/jstests/core/stages_delete.js
@@ -1,6 +1,8 @@
// Test basic delete stage functionality.
var coll = db.stages_delete;
-var collScanStage = {cscan: {args: {direction: 1}, filter: {deleteMe: true}}};
+var collScanStage = {
+ cscan: {args: {direction: 1}, filter: {deleteMe: true}}
+};
var deleteStage;
// Test delete stage with isMulti: true.
@@ -8,7 +10,9 @@ coll.drop();
assert.writeOK(coll.insert({deleteMe: true}));
assert.writeOK(coll.insert({deleteMe: true}));
assert.writeOK(coll.insert({deleteMe: false}));
-deleteStage = {delete: {args: {node: collScanStage, isMulti: true}}};
+deleteStage = {
+ delete: {args: {node: collScanStage, isMulti: true}}
+};
assert.eq(coll.count(), 3);
assert.commandWorked(db.runCommand({stageDebug: {collection: coll.getName(), plan: deleteStage}}));
assert.eq(coll.count(), 1);
@@ -19,7 +23,9 @@ coll.drop();
assert.writeOK(coll.insert({deleteMe: true}));
assert.writeOK(coll.insert({deleteMe: true}));
assert.writeOK(coll.insert({deleteMe: false}));
-deleteStage = {delete: {args: {node: collScanStage, isMulti: false}}};
+deleteStage = {
+ delete: {args: {node: collScanStage, isMulti: false}}
+};
assert.eq(coll.count(), 3);
assert.commandWorked(db.runCommand({stageDebug: {collection: coll.getName(), plan: deleteStage}}));
assert.eq(coll.count(), 2);
diff --git a/jstests/core/stages_fetch.js b/jstests/core/stages_fetch.js
index 2bff065a5d7..7adc52c67c5 100644
--- a/jstests/core/stages_fetch.js
+++ b/jstests/core/stages_fetch.js
@@ -12,20 +12,36 @@ t.ensureIndex({foo: 1});
// 20 <= foo <= 30
// bar == 25 (not covered, should error.)
-ixscan1 = {ixscan: {args:{keyPattern:{foo:1},
- startKey: {"": 20},
- endKey: {"" : 30}, endKeyInclusive: true,
- direction: 1},
- filter: {bar: 25}}};
+ixscan1 = {
+ ixscan: {
+ args: {
+ keyPattern: {foo: 1},
+ startKey: {"": 20},
+ endKey: {"": 30},
+ endKeyInclusive: true,
+ direction: 1
+ },
+ filter: {bar: 25}
+ }
+};
res = db.runCommand({stageDebug: {collection: collname, plan: ixscan1}});
assert.eq(res.ok, 0);
// Now, add a fetch. We should be able to filter on the non-covered field since we fetched the obj.
-ixscan2 = {ixscan: {args:{keyPattern:{foo:1},
- startKey: {"": 20},
- endKey: {"" : 30}, endKeyInclusive: true,
- direction: 1}}};
-fetch = {fetch: {args: {node: ixscan2}, filter: {bar: 25}}};
+ixscan2 = {
+ ixscan: {
+ args: {
+ keyPattern: {foo: 1},
+ startKey: {"": 20},
+ endKey: {"": 30},
+ endKeyInclusive: true,
+ direction: 1
+ }
+ }
+};
+fetch = {
+ fetch: {args: {node: ixscan2}, filter: {bar: 25}}
+};
res = db.runCommand({stageDebug: {collection: collname, plan: fetch}});
printjson(res);
assert.eq(res.ok, 1);
diff --git a/jstests/core/stages_ixscan.js b/jstests/core/stages_ixscan.js
index 3f920f70241..d6b8a7b1aee 100644
--- a/jstests/core/stages_ixscan.js
+++ b/jstests/core/stages_ixscan.js
@@ -12,60 +12,102 @@ t.ensureIndex({foo: 1});
t.ensureIndex({foo: 1, baz: 1});
// foo <= 20
-ixscan1 = {ixscan: {args:{keyPattern:{foo: 1},
- startKey: {"": 20},
- endKey: {}, endKeyInclusive: true,
- direction: -1}}};
+ixscan1 = {
+ ixscan: {
+ args: {
+ keyPattern: {foo: 1},
+ startKey: {"": 20},
+ endKey: {},
+ endKeyInclusive: true,
+ direction: -1
+ }
+ }
+};
res = db.runCommand({stageDebug: {collection: collname, plan: ixscan1}});
assert.eq(res.ok, 1);
assert.eq(res.results.length, 21);
// 20 <= foo < 30
-ixscan1 = {ixscan: {args:{keyPattern:{foo: 1},
- startKey: {"": 20},
- endKey: {"" : 30}, endKeyInclusive: false,
- direction: 1}}};
+ixscan1 = {
+ ixscan: {
+ args: {
+ keyPattern: {foo: 1},
+ startKey: {"": 20},
+ endKey: {"": 30},
+ endKeyInclusive: false,
+ direction: 1
+ }
+ }
+};
res = db.runCommand({stageDebug: {collection: collname, plan: ixscan1}});
assert.eq(res.ok, 1);
assert.eq(res.results.length, 10);
// 20 <= foo <= 30
-ixscan1 = {ixscan: {args:{keyPattern:{foo: 1},
- startKey: {"": 20},
- endKey: {"" : 30}, endKeyInclusive: true,
- direction: 1}}};
+ixscan1 = {
+ ixscan: {
+ args: {
+ keyPattern: {foo: 1},
+ startKey: {"": 20},
+ endKey: {"": 30},
+ endKeyInclusive: true,
+ direction: 1
+ }
+ }
+};
res = db.runCommand({stageDebug: {collection: collname, plan: ixscan1}});
assert.eq(res.ok, 1);
assert.eq(res.results.length, 11);
// 20 <= foo <= 30
// foo == 25
-ixscan1 = {ixscan: {args:{keyPattern:{foo: 1},
- startKey: {"": 20},
- endKey: {"" : 30}, endKeyInclusive: true,
- direction: 1},
- filter: {foo: 25}}};
+ixscan1 = {
+ ixscan: {
+ args: {
+ keyPattern: {foo: 1},
+ startKey: {"": 20},
+ endKey: {"": 30},
+ endKeyInclusive: true,
+ direction: 1
+ },
+ filter: {foo: 25}
+ }
+};
res = db.runCommand({stageDebug: {collection: collname, plan: ixscan1}});
assert.eq(res.ok, 1);
assert.eq(res.results.length, 1);
// 20 <= foo <= 30
// baz == 25 (in index so we can match against it.)
-ixscan1 = {ixscan: {args:{keyPattern:{foo:1, baz: 1},
- startKey: {foo: 20, baz: MinKey},
- endKey: {foo: 30, baz: MaxKey}, endKeyInclusive: true,
- direction: 1},
- filter: {baz: 25}}};
+ixscan1 = {
+ ixscan: {
+ args: {
+ keyPattern: {foo: 1, baz: 1},
+ startKey: {foo: 20, baz: MinKey},
+ endKey: {foo: 30, baz: MaxKey},
+ endKeyInclusive: true,
+ direction: 1
+ },
+ filter: {baz: 25}
+ }
+};
res = db.runCommand({stageDebug: {collection: collname, plan: ixscan1}});
assert.eq(res.ok, 1);
assert.eq(res.results.length, 1);
// 20 <= foo <= 30
// bar == 25 (not covered, should error.)
-ixscan1 = {ixscan: {args:{keyPattern:{foo:1, baz: 1},
- startKey: {foo: 20, baz: MinKey},
- endKey: {foo: 30, baz: MaxKey}, endKeyInclusive: true,
- direction: 1},
- filter: {bar: 25}}};
+ixscan1 = {
+ ixscan: {
+ args: {
+ keyPattern: {foo: 1, baz: 1},
+ startKey: {foo: 20, baz: MinKey},
+ endKey: {foo: 30, baz: MaxKey},
+ endKeyInclusive: true,
+ direction: 1
+ },
+ filter: {bar: 25}
+ }
+};
res = db.runCommand({stageDebug: {collection: collname, plan: ixscan1}});
assert.eq(res.ok, 0);
diff --git a/jstests/core/stages_limit_skip.js b/jstests/core/stages_limit_skip.js
index 7ca7b89b180..c582cb6b1e4 100644
--- a/jstests/core/stages_limit_skip.js
+++ b/jstests/core/stages_limit_skip.js
@@ -12,11 +12,20 @@ t.ensureIndex({foo: 1});
// foo <= 20, decreasing
// Limit of 5 results.
-ixscan1 = {ixscan: {args:{keyPattern:{foo: 1},
- startKey: {"": 20},
- endKey: {}, endKeyInclusive: true,
- direction: -1}}};
-limit1 = {limit: {args: {node: ixscan1, num: 5}}};
+ixscan1 = {
+ ixscan: {
+ args: {
+ keyPattern: {foo: 1},
+ startKey: {"": 20},
+ endKey: {},
+ endKeyInclusive: true,
+ direction: -1
+ }
+ }
+};
+limit1 = {
+ limit: {args: {node: ixscan1, num: 5}}
+};
res = db.runCommand({stageDebug: {collection: collname, plan: limit1}});
assert.eq(res.ok, 1);
assert.eq(res.results.length, 5);
@@ -25,7 +34,9 @@ assert.eq(res.results[4].foo, 16);
// foo <= 20, decreasing
// Skip 5 results.
-skip1 = {skip: {args: {node: ixscan1, num: 5}}};
+skip1 = {
+ skip: {args: {node: ixscan1, num: 5}}
+};
res = db.runCommand({stageDebug: {collection: collname, plan: skip1}});
assert.eq(res.ok, 1);
assert.eq(res.results.length, 16);
diff --git a/jstests/core/stages_mergesort.js b/jstests/core/stages_mergesort.js
index 18c52bef40c..5156743078c 100644
--- a/jstests/core/stages_mergesort.js
+++ b/jstests/core/stages_mergesort.js
@@ -9,22 +9,38 @@ for (var i = 0; i < N; ++i) {
t.insert({baz: 1, bar: i});
}
-t.ensureIndex({foo: 1, bar:1});
-t.ensureIndex({baz: 1, bar:1});
+t.ensureIndex({foo: 1, bar: 1});
+t.ensureIndex({baz: 1, bar: 1});
// foo == 1
// We would (internally) use "": MinKey and "": MaxKey for the bar index bounds.
-ixscan1 = {ixscan: {args:{keyPattern:{foo: 1, bar:1},
- startKey: {foo: 1, bar: 0},
- endKey: {foo: 1, bar: 100000}, endKeyInclusive: true,
- direction: 1}}};
+ixscan1 = {
+ ixscan: {
+ args: {
+ keyPattern: {foo: 1, bar: 1},
+ startKey: {foo: 1, bar: 0},
+ endKey: {foo: 1, bar: 100000},
+ endKeyInclusive: true,
+ direction: 1
+ }
+ }
+};
// baz == 1
-ixscan2 = {ixscan: {args:{keyPattern:{baz: 1, bar:1},
- startKey: {baz: 1, bar: 0},
- endKey: {baz: 1, bar: 100000}, endKeyInclusive: true,
- direction: 1}}};
+ixscan2 = {
+ ixscan: {
+ args: {
+ keyPattern: {baz: 1, bar: 1},
+ startKey: {baz: 1, bar: 0},
+ endKey: {baz: 1, bar: 100000},
+ endKeyInclusive: true,
+ direction: 1
+ }
+ }
+};
-mergesort = {mergeSort: {args: {nodes: [ixscan1, ixscan2], pattern: {bar: 1}}}};
+mergesort = {
+ mergeSort: {args: {nodes: [ixscan1, ixscan2], pattern: {bar: 1}}}
+};
res = db.runCommand({stageDebug: {plan: mergesort, collection: collname}});
assert.eq(res.ok, 1);
assert.eq(res.results.length, 2 * N);
diff --git a/jstests/core/stages_or.js b/jstests/core/stages_or.js
index 0d4a47c5e06..6ea73efd1ed 100644
--- a/jstests/core/stages_or.js
+++ b/jstests/core/stages_or.js
@@ -13,22 +13,42 @@ t.ensureIndex({bar: 1});
t.ensureIndex({baz: 1});
// baz >= 40
-ixscan1 = {ixscan: {args:{keyPattern:{baz: 1},
- startKey: {"": 40}, endKey: {},
- endKeyInclusive: true, direction: 1}}};
+ixscan1 = {
+ ixscan: {
+ args: {
+ keyPattern: {baz: 1},
+ startKey: {"": 40},
+ endKey: {},
+ endKeyInclusive: true,
+ direction: 1
+ }
+ }
+};
// foo >= 40
-ixscan2 = {ixscan: {args:{keyPattern:{foo: 1},
- startKey: {"": 40}, endKey: {},
- endKeyInclusive: true, direction: 1}}};
+ixscan2 = {
+ ixscan: {
+ args: {
+ keyPattern: {foo: 1},
+ startKey: {"": 40},
+ endKey: {},
+ endKeyInclusive: true,
+ direction: 1
+ }
+ }
+};
// OR of baz and foo. Baz == foo and we dedup.
-orix1ix2 = {or: {args: {nodes: [ixscan1, ixscan2], dedup:true}}};
+orix1ix2 = {
+ or: {args: {nodes: [ixscan1, ixscan2], dedup: true}}
+};
res = db.runCommand({stageDebug: {collection: collname, plan: orix1ix2}});
assert.eq(res.ok, 1);
assert.eq(res.results.length, 10);
// No deduping, 2x the results.
-orix1ix2nodd = {or: {args: {nodes: [ixscan1, ixscan2], dedup:false}}};
+orix1ix2nodd = {
+ or: {args: {nodes: [ixscan1, ixscan2], dedup: false}}
+};
res = db.runCommand({stageDebug: {collection: collname, plan: orix1ix2nodd}});
assert.eq(res.ok, 1);
assert.eq(res.results.length, 20);
diff --git a/jstests/core/stages_sort.js b/jstests/core/stages_sort.js
index 89b71a28f8b..b6cb5a456af 100644
--- a/jstests/core/stages_sort.js
+++ b/jstests/core/stages_sort.js
@@ -11,13 +11,23 @@ if (false) {
t.ensureIndex({foo: 1});
// Foo <= 20, descending.
- ixscan1 = {ixscan: {args:{name: "stages_sort", keyPattern:{foo: 1},
- startKey: {"": 20},
- endKey: {}, endKeyInclusive: true,
- direction: -1}}};
+ ixscan1 = {
+ ixscan: {
+ args: {
+ name: "stages_sort",
+ keyPattern: {foo: 1},
+ startKey: {"": 20},
+ endKey: {},
+ endKeyInclusive: true,
+ direction: -1
+ }
+ }
+ };
// Sort with foo ascending.
- sort1 = {sort: {args: {node: ixscan1, pattern: {foo: 1}}}};
+ sort1 = {
+ sort: {args: {node: ixscan1, pattern: {foo: 1}}}
+ };
res = db.runCommand({stageDebug: sort1});
assert.eq(res.ok, 1);
assert.eq(res.results.length, 21);
@@ -25,10 +35,10 @@ if (false) {
assert.eq(res.results[20].foo, 20);
// Sort with a limit.
- //sort2 = {sort: {args: {node: ixscan1, pattern: {foo: 1}, limit: 2}}};
- //res = db.runCommand({stageDebug: sort2});
- //assert.eq(res.ok, 1);
- //assert.eq(res.results.length, 2);
- //assert.eq(res.results[0].foo, 0);
- //assert.eq(res.results[1].foo, 1);
+ // sort2 = {sort: {args: {node: ixscan1, pattern: {foo: 1}, limit: 2}}};
+ // res = db.runCommand({stageDebug: sort2});
+ // assert.eq(res.ok, 1);
+ // assert.eq(res.results.length, 2);
+ // assert.eq(res.results[0].foo, 0);
+ // assert.eq(res.results[1].foo, 1);
}
diff --git a/jstests/core/stages_text.js b/jstests/core/stages_text.js
index 6598d135b9f..d38ef316663 100644
--- a/jstests/core/stages_text.js
+++ b/jstests/core/stages_text.js
@@ -9,13 +9,11 @@ t.save({x: "az b x"});
t.ensureIndex({x: "text"});
// We expect to retrieve 'b'
-res = db.runCommand({stageDebug: {collection: collname,
- plan: {text: {args: {search: "b"}}}}});
+res = db.runCommand({stageDebug: {collection: collname, plan: {text: {args: {search: "b"}}}}});
assert.eq(res.ok, 1);
assert.eq(res.results.length, 1);
// I have not been indexed yet.
-res = db.runCommand({stageDebug: {collection: collname,
- plan: {text: {args: {search: "hari"}}}}});
+res = db.runCommand({stageDebug: {collection: collname, plan: {text: {args: {search: "hari"}}}}});
assert.eq(res.ok, 1);
assert.eq(res.results.length, 0);
diff --git a/jstests/core/startup_log.js b/jstests/core/startup_log.js
index e1a62991981..3b0cbe3464d 100644
--- a/jstests/core/startup_log.js
+++ b/jstests/core/startup_log.js
@@ -1,76 +1,101 @@
load('jstests/aggregation/extras/utils.js');
(function() {
-'use strict';
+ 'use strict';
-// Check that smallArray is entirely contained by largeArray
-// returns false if a member of smallArray is not in largeArray
-function arrayIsSubset(smallArray, largeArray) {
- for(var i = 0; i < smallArray.length; i++) {
- if(!Array.contains(largeArray, smallArray[i])) {
- print("Could not find " + smallArray[i] + " in largeArray");
- return false;
+ // Check that smallArray is entirely contained by largeArray
+ // returns false if a member of smallArray is not in largeArray
+ function arrayIsSubset(smallArray, largeArray) {
+ for (var i = 0; i < smallArray.length; i++) {
+ if (!Array.contains(largeArray, smallArray[i])) {
+ print("Could not find " + smallArray[i] + " in largeArray");
+ return false;
+ }
}
- }
- return true;
-}
+ return true;
+ }
-// Test startup_log
-var stats = db.getSisterDB( "local" ).startup_log.stats();
-assert(stats.capped);
+ // Test startup_log
+ var stats = db.getSisterDB("local").startup_log.stats();
+ assert(stats.capped);
-var latestStartUpLog = db.getSisterDB( "local" ).startup_log.find().sort( { $natural: -1 } ).limit(1).next();
-var serverStatus = db._adminCommand( "serverStatus" );
-var cmdLine = db._adminCommand( "getCmdLineOpts" ).parsed;
+ var latestStartUpLog =
+ db.getSisterDB("local").startup_log.find().sort({$natural: -1}).limit(1).next();
+ var serverStatus = db._adminCommand("serverStatus");
+ var cmdLine = db._adminCommand("getCmdLineOpts").parsed;
-// Test that the startup log has the expected keys
-var verbose = false;
-var expectedKeys = ["_id", "hostname", "startTime", "startTimeLocal", "cmdLine", "pid", "buildinfo"];
-var keys = Object.keySet(latestStartUpLog);
-assert(arrayEq(expectedKeys, keys, verbose), 'startup_log keys failed');
+ // Test that the startup log has the expected keys
+ var verbose = false;
+ var expectedKeys =
+ ["_id", "hostname", "startTime", "startTimeLocal", "cmdLine", "pid", "buildinfo"];
+ var keys = Object.keySet(latestStartUpLog);
+ assert(arrayEq(expectedKeys, keys, verbose), 'startup_log keys failed');
-// Tests _id implicitly - should be comprised of host-timestamp
-// Setup expected startTime and startTimeLocal from the supplied timestamp
-var _id = latestStartUpLog._id.split('-'); // _id should consist of host-timestamp
-var _idUptime = _id.pop();
-var _idHost = _id.join('-');
-var uptimeSinceEpochRounded = Math.floor(_idUptime/1000) * 1000;
-var startTime = new Date(uptimeSinceEpochRounded); // Expected startTime
+ // Tests _id implicitly - should be comprised of host-timestamp
+ // Setup expected startTime and startTimeLocal from the supplied timestamp
+ var _id = latestStartUpLog._id.split('-'); // _id should consist of host-timestamp
+ var _idUptime = _id.pop();
+ var _idHost = _id.join('-');
+ var uptimeSinceEpochRounded = Math.floor(_idUptime / 1000) * 1000;
+ var startTime = new Date(uptimeSinceEpochRounded); // Expected startTime
-assert.eq(_idHost, latestStartUpLog.hostname, "Hostname doesn't match one from _id");
-assert.eq(serverStatus.host.split(':')[0], latestStartUpLog.hostname, "Hostname doesn't match one in server status");
-assert.closeWithinMS(startTime, latestStartUpLog.startTime,
- "StartTime doesn't match one from _id", 2000); // Expect less than 2 sec delta
-assert.eq(cmdLine, latestStartUpLog.cmdLine, "cmdLine doesn't match that from getCmdLineOpts");
-assert.eq(serverStatus.pid, latestStartUpLog.pid, "pid doesn't match that from serverStatus");
+ assert.eq(_idHost, latestStartUpLog.hostname, "Hostname doesn't match one from _id");
+ assert.eq(serverStatus.host.split(':')[0],
+ latestStartUpLog.hostname,
+ "Hostname doesn't match one in server status");
+ assert.closeWithinMS(startTime,
+ latestStartUpLog.startTime,
+ "StartTime doesn't match one from _id",
+ 2000); // Expect less than 2 sec delta
+ assert.eq(cmdLine, latestStartUpLog.cmdLine, "cmdLine doesn't match that from getCmdLineOpts");
+ assert.eq(serverStatus.pid, latestStartUpLog.pid, "pid doesn't match that from serverStatus");
-// Test buildinfo
-var buildinfo = db.runCommand( "buildinfo" );
-delete buildinfo.ok; // Delete extra meta info not in startup_log
-var isMaster = db._adminCommand( "ismaster" );
+ // Test buildinfo
+ var buildinfo = db.runCommand("buildinfo");
+ delete buildinfo.ok; // Delete extra meta info not in startup_log
+ var isMaster = db._adminCommand("ismaster");
-// Test buildinfo has the expected keys
-var expectedKeys = ["version", "gitVersion", "allocator", "versionArray", "javascriptEngine",
- "openssl", "buildEnvironment", "debug", "maxBsonObjectSize", "bits", "modules" ];
+ // Test buildinfo has the expected keys
+ var expectedKeys = [
+ "version",
+ "gitVersion",
+ "allocator",
+ "versionArray",
+ "javascriptEngine",
+ "openssl",
+ "buildEnvironment",
+ "debug",
+ "maxBsonObjectSize",
+ "bits",
+ "modules"
+ ];
-var keys = Object.keySet(latestStartUpLog.buildinfo);
-// Disabled to check
-assert(arrayIsSubset(expectedKeys, keys), "buildinfo keys failed! \n expected:\t" + expectedKeys + "\n actual:\t" + keys);
-assert.eq(buildinfo, latestStartUpLog.buildinfo, "buildinfo doesn't match that from buildinfo command");
+ var keys = Object.keySet(latestStartUpLog.buildinfo);
+ // Disabled to check
+ assert(arrayIsSubset(expectedKeys, keys),
+ "buildinfo keys failed! \n expected:\t" + expectedKeys + "\n actual:\t" + keys);
+ assert.eq(buildinfo,
+ latestStartUpLog.buildinfo,
+ "buildinfo doesn't match that from buildinfo command");
-// Test version and version Array
-var version = latestStartUpLog.buildinfo.version.split('-')[0];
-var versionArray = latestStartUpLog.buildinfo.versionArray;
-var versionArrayCleaned = versionArray.slice(0, 3);
-if (versionArray[3] == -100) {
- versionArrayCleaned[2] -= 1;
-}
+ // Test version and version Array
+ var version = latestStartUpLog.buildinfo.version.split('-')[0];
+ var versionArray = latestStartUpLog.buildinfo.versionArray;
+ var versionArrayCleaned = versionArray.slice(0, 3);
+ if (versionArray[3] == -100) {
+ versionArrayCleaned[2] -= 1;
+ }
-assert.eq(serverStatus.version, latestStartUpLog.buildinfo.version, "Mongo version doesn't match that from ServerStatus");
-assert.eq(version, versionArrayCleaned.join('.'), "version doesn't match that from the versionArray");
-var jsEngine = latestStartUpLog.buildinfo.javascriptEngine;
-assert((jsEngine == "none") || jsEngine.startsWith("mozjs"));
-assert.eq(isMaster.maxBsonObjectSize, latestStartUpLog.buildinfo.maxBsonObjectSize, "maxBsonObjectSize doesn't match one from ismaster");
+ assert.eq(serverStatus.version,
+ latestStartUpLog.buildinfo.version,
+ "Mongo version doesn't match that from ServerStatus");
+ assert.eq(
+ version, versionArrayCleaned.join('.'), "version doesn't match that from the versionArray");
+ var jsEngine = latestStartUpLog.buildinfo.javascriptEngine;
+ assert((jsEngine == "none") || jsEngine.startsWith("mozjs"));
+ assert.eq(isMaster.maxBsonObjectSize,
+ latestStartUpLog.buildinfo.maxBsonObjectSize,
+ "maxBsonObjectSize doesn't match one from ismaster");
})();
diff --git a/jstests/core/storageDetailsCommand.js b/jstests/core/storageDetailsCommand.js
index 12baf9c4b92..cfd370cadaf 100644
--- a/jstests/core/storageDetailsCommand.js
+++ b/jstests/core/storageDetailsCommand.js
@@ -12,7 +12,6 @@ for (var i = 0; i < 3000; ++i) {
function test() {
var result = t.diskStorageStats({numberOfSlices: 100});
-
if (result["code"] === COMMAND_NOT_FOUND_CODE) {
print("storageDetails command not available: skipping");
return;
diff --git a/jstests/core/storefunc.js b/jstests/core/storefunc.js
index fae9e58bfa4..8598e9cc62b 100644
--- a/jstests/core/storefunc.js
+++ b/jstests/core/storefunc.js
@@ -4,42 +4,57 @@ var res;
s = testdb.system.js;
s.remove({});
-assert.eq( 0 , s.count() , "setup - A" );
-
-res = s.save( { _id : "x" , value : "3" } );
-assert( !res.hasWriteError() , "setup - B" );
-assert.eq( 1 , s.count() , "setup - C" );
-
-s.remove( { _id : "x" } );
-assert.eq( 0 , s.count() , "setup - D" );
-s.save( { _id : "x" , value : "4" } );
-assert.eq( 1 , s.count() , "setup - E" );
-
-assert.eq( 4 , s.findOne( { _id : "x" } ).value , "E2 " );
-
-assert.eq( 4 , s.findOne().value , "setup - F" );
-s.update( { _id : "x" } , { $set : { value : 5 } } );
-assert.eq( 1 , s.count() , "setup - G" );
-assert.eq( 5 , s.findOne().value , "setup - H" );
-
-assert.eq( 5 , testdb.eval( "return x" ) , "exec - 1 " );
-
-s.update( { _id : "x" } , { $set : { value : 6 } } );
-assert.eq( 1 , s.count() , "setup2 - A" );
-assert.eq( 6 , s.findOne().value , "setup - B" );
-
-assert.eq( 6 , testdb.eval( "return x" ) , "exec - 2 " );
-
-
-
-s.insert( { _id : "bar" , value : function( z ){ return 17 + z; } } );
-assert.eq( 22 , testdb.eval( "return bar(5);" ) , "exec - 3 " );
-
-assert( s.getIndexKeys().length > 0 , "no indexes" );
-assert( s.getIndexKeys()[0]._id , "no _id index" );
-
-assert.eq( "undefined" , testdb.eval( function(){ return typeof(zzz); } ) , "C1" );
-s.save( { _id : "zzz" , value : 5 } );
-assert.eq( "number" , testdb.eval( function(){ return typeof(zzz); } ) , "C2" );
-s.remove( { _id : "zzz" } );
-assert.eq( "undefined" , testdb.eval( function(){ return typeof(zzz); } ) , "C3" );
+assert.eq(0, s.count(), "setup - A");
+
+res = s.save({_id: "x", value: "3"});
+assert(!res.hasWriteError(), "setup - B");
+assert.eq(1, s.count(), "setup - C");
+
+s.remove({_id: "x"});
+assert.eq(0, s.count(), "setup - D");
+s.save({_id: "x", value: "4"});
+assert.eq(1, s.count(), "setup - E");
+
+assert.eq(4, s.findOne({_id: "x"}).value, "E2 ");
+
+assert.eq(4, s.findOne().value, "setup - F");
+s.update({_id: "x"}, {$set: {value: 5}});
+assert.eq(1, s.count(), "setup - G");
+assert.eq(5, s.findOne().value, "setup - H");
+
+assert.eq(5, testdb.eval("return x"), "exec - 1 ");
+
+s.update({_id: "x"}, {$set: {value: 6}});
+assert.eq(1, s.count(), "setup2 - A");
+assert.eq(6, s.findOne().value, "setup - B");
+
+assert.eq(6, testdb.eval("return x"), "exec - 2 ");
+
+s.insert({
+ _id: "bar",
+ value: function(z) {
+ return 17 + z;
+ }
+});
+assert.eq(22, testdb.eval("return bar(5);"), "exec - 3 ");
+
+assert(s.getIndexKeys().length > 0, "no indexes");
+assert(s.getIndexKeys()[0]._id, "no _id index");
+
+assert.eq("undefined",
+ testdb.eval(function() {
+ return typeof(zzz);
+ }),
+ "C1");
+s.save({_id: "zzz", value: 5});
+assert.eq("number",
+ testdb.eval(function() {
+ return typeof(zzz);
+ }),
+ "C2");
+s.remove({_id: "zzz"});
+assert.eq("undefined",
+ testdb.eval(function() {
+ return typeof(zzz);
+ }),
+ "C3");
diff --git a/jstests/core/string_with_nul_bytes.js b/jstests/core/string_with_nul_bytes.js
index a1f6e395dd2..e72cc0b6dc1 100644
--- a/jstests/core/string_with_nul_bytes.js
+++ b/jstests/core/string_with_nul_bytes.js
@@ -4,6 +4,6 @@ t = db.string_with_nul_bytes.js;
t.drop();
string = "string with a NUL (\0) byte";
-t.insert({str:string});
+t.insert({str: string});
assert.eq(t.findOne().str, string);
-assert.eq(t.findOne().str.length, string.length); // just to be sure
+assert.eq(t.findOne().str.length, string.length); // just to be sure
diff --git a/jstests/core/sub1.js b/jstests/core/sub1.js
index 324b21b75e8..d42677f3266 100644
--- a/jstests/core/sub1.js
+++ b/jstests/core/sub1.js
@@ -3,12 +3,15 @@
t = db.sub1;
t.drop();
-x = { a : 1 , b : { c : { d : 2 } } };
+x = {
+ a: 1,
+ b: {c: {d: 2}}
+};
-t.save( x );
+t.save(x);
y = t.findOne();
-assert.eq( 1 , y.a );
-assert.eq( 2 , y.b.c.d );
-print( tojson( y ) );
+assert.eq(1, y.a);
+assert.eq(2, y.b.c.d);
+print(tojson(y));
diff --git a/jstests/core/system_profile.js b/jstests/core/system_profile.js
index abfa0c98832..73d303a3277 100644
--- a/jstests/core/system_profile.js
+++ b/jstests/core/system_profile.js
@@ -26,21 +26,25 @@ assert.writeError(testDB.system.profile.remove({}));
// Using findAndModify to write to "system.profile" should fail.
assert.commandWorked(testDB.dropDatabase());
assert.commandWorked(testDB.createCollection("system.profile"));
-assert.commandFailed(
- testDB.system.profile.runCommand("findAndModify", {query: {}, update: {a: 1}}));
-assert.commandFailed(
- testDB.system.profile.runCommand("findAndModify", {query: {}, update: {a: 1}, upsert: true}));
+assert.commandFailed(testDB.system.profile.runCommand("findAndModify",
+ {query: {}, update: {a: 1}}));
+assert.commandFailed(testDB.system.profile.runCommand("findAndModify",
+ {query: {}, update: {a: 1}, upsert: true}));
assert.commandFailed(testDB.system.profile.runCommand("findAndModify", {query: {}, remove: true}));
// Using mapReduce to write to "system.profile" should fail.
assert.commandWorked(testDB.dropDatabase());
assert.writeOK(testDB.foo.insert({val: 1}));
assert.commandFailed(testDB.foo.runCommand("mapReduce",
- {map: function() { emit(0, this.val); },
- reduce: function(id, values) {
+ {
+ map: function() {
+ emit(0, this.val);
+ },
+ reduce: function(id, values) {
return Array.sum(values);
- },
- out: "system.profile"}));
+ },
+ out: "system.profile"
+ }));
// Using aggregate to write to "system.profile" should fail.
assert.commandWorked(testDB.dropDatabase());
@@ -50,12 +54,12 @@ assert.commandFailed(testDB.foo.runCommand("aggregate", {pipeline: [{$out: "syst
// Renaming to/from "system.profile" should fail.
assert.commandWorked(testDB.dropDatabase());
assert.commandWorked(testDB.createCollection("system.profile"));
-assert.commandFailed(testDB.adminCommand({renameCollection: testDB.system.profile.getFullName(),
- to: testDB.foo.getFullName()}));
+assert.commandFailed(testDB.adminCommand(
+ {renameCollection: testDB.system.profile.getFullName(), to: testDB.foo.getFullName()}));
assert.commandWorked(testDB.dropDatabase());
assert.commandWorked(testDB.createCollection("foo"));
-assert.commandFailed(testDB.adminCommand({renameCollection: testDB.foo.getFullName(),
- to: testDB.system.profile.getFullName()}));
+assert.commandFailed(testDB.adminCommand(
+ {renameCollection: testDB.foo.getFullName(), to: testDB.system.profile.getFullName()}));
// Copying a database containing "system.profile" should succeed. The "system.profile" collection
// should not be copied.
@@ -63,7 +67,7 @@ assert.commandWorked(testDB.dropDatabase());
assert.commandWorked(testDB.createCollection("foo"));
assert.commandWorked(testDB.createCollection("system.profile"));
assert.commandWorked(testDBCopy.dropDatabase());
-assert.commandWorked(testDB.adminCommand({copydb: 1, fromdb: testDB.getName(),
- todb: testDBCopy.getName()}));
+assert.commandWorked(
+ testDB.adminCommand({copydb: 1, fromdb: testDB.getName(), todb: testDBCopy.getName()}));
assert.commandWorked(testDBCopy.foo.stats());
assert.commandFailed(testDBCopy.system.profile.stats());
diff --git a/jstests/core/tailable_skip_limit.js b/jstests/core/tailable_skip_limit.js
index da2f80d0311..f771621ad83 100644
--- a/jstests/core/tailable_skip_limit.js
+++ b/jstests/core/tailable_skip_limit.js
@@ -55,7 +55,9 @@
assert.eq(7, cursor.next()["_id"]);
// Tailable with negative limit is an error.
- assert.throws(function() { t.find().addOption(2).limit(-100).next(); });
+ assert.throws(function() {
+ t.find().addOption(2).limit(-100).next();
+ });
// Tests that a tailable cursor over an empty capped collection produces a dead cursor, intended
// to be run on both mongod and mongos. For SERVER-20720.
diff --git a/jstests/core/temp_cleanup.js b/jstests/core/temp_cleanup.js
index d9dc7fdcccf..895f7c5f8b9 100644
--- a/jstests/core/temp_cleanup.js
+++ b/jstests/core/temp_cleanup.js
@@ -1,16 +1,22 @@
-mydb = db.getSisterDB( "temp_cleanup_test" );
+mydb = db.getSisterDB("temp_cleanup_test");
t = mydb.tempCleanup;
t.drop();
-t.insert( { x : 1 } );
+t.insert({x: 1});
-res = t.mapReduce( function(){ emit(1,1); } , function(){ return 1; } , "xyz" );
-printjson( res );
+res = t.mapReduce(
+ function() {
+ emit(1, 1);
+ },
+ function() {
+ return 1;
+ },
+ "xyz");
+printjson(res);
-assert.eq( 1 , t.count() , "A1" );
-assert.eq( 1 , mydb[res.result].count() , "A2" );
+assert.eq(1, t.count(), "A1");
+assert.eq(1, mydb[res.result].count(), "A2");
mydb.dropDatabase();
-
diff --git a/jstests/core/test_command_line_test_helpers.js b/jstests/core/test_command_line_test_helpers.js
index a66bd713327..6e001075ee0 100644
--- a/jstests/core/test_command_line_test_helpers.js
+++ b/jstests/core/test_command_line_test_helpers.js
@@ -1,6 +1,6 @@
load('jstests/libs/command_line/test_parsed_options.js');
-assert.docEq({ x : 1, y : 1 }, mergeOptions({ x : 1 }, { y : 1 }));
-assert.docEq({ x : 1, y : 1 }, mergeOptions({ x : 1, y : 2 }, { y : 1 }));
-assert.docEq({ x : { z : 1 }, y : 1 }, mergeOptions({ x : { z : 1 } }, { y : 1 }));
-assert.docEq({ x : { z : 1 } }, mergeOptions({ x : { z : 2 } }, { x : { z : 1 } }));
+assert.docEq({x: 1, y: 1}, mergeOptions({x: 1}, {y: 1}));
+assert.docEq({x: 1, y: 1}, mergeOptions({x: 1, y: 2}, {y: 1}));
+assert.docEq({x: {z: 1}, y: 1}, mergeOptions({x: {z: 1}}, {y: 1}));
+assert.docEq({x: {z: 1}}, mergeOptions({x: {z: 2}}, {x: {z: 1}}));
diff --git a/jstests/core/testminmax.js b/jstests/core/testminmax.js
index 803f1b48a0b..5e874397a04 100644
--- a/jstests/core/testminmax.js
+++ b/jstests/core/testminmax.js
@@ -1,14 +1,31 @@
t = db.minmaxtest;
t.drop();
-t.insert({"_id" : "IBM.N|00001264779918428889", "DESCRIPTION" : { "n" : "IBMSTK2", "o" : "IBM STK", "s" : "changed" } });
-t.insert({ "_id" : "VOD.N|00001264779918433344", "COMPANYNAME" : { "n" : "Vodafone Group PLC 2", "o" : "Vodafone Group PLC", "s" : "changed" } });
-t.insert({ "_id" : "IBM.N|00001264779918437075", "DESCRIPTION" : { "n" : "IBMSTK3", "o" : "IBM STK2", "s" : "changed" } });
-t.insert({ "_id" : "VOD.N|00001264779918441426", "COMPANYNAME" : { "n" : "Vodafone Group PLC 3", "o" : "Vodafone Group PLC 2", "s" : "changed" } });
+t.insert({
+ "_id": "IBM.N|00001264779918428889",
+ "DESCRIPTION": {"n": "IBMSTK2", "o": "IBM STK", "s": "changed"}
+});
+t.insert({
+ "_id": "VOD.N|00001264779918433344",
+ "COMPANYNAME": {"n": "Vodafone Group PLC 2", "o": "Vodafone Group PLC", "s": "changed"}
+});
+t.insert({
+ "_id": "IBM.N|00001264779918437075",
+ "DESCRIPTION": {"n": "IBMSTK3", "o": "IBM STK2", "s": "changed"}
+});
+t.insert({
+ "_id": "VOD.N|00001264779918441426",
+ "COMPANYNAME": {"n": "Vodafone Group PLC 3", "o": "Vodafone Group PLC 2", "s": "changed"}
+});
// temp:
-printjson( t.find().min({"_id":"IBM.N|00000000000000000000"}).max({"_id":"IBM.N|99999999999999999999"}).toArray() );
+printjson(t.find()
+ .min({"_id": "IBM.N|00000000000000000000"})
+ .max({"_id": "IBM.N|99999999999999999999"})
+ .toArray());
// this should be 2!! add assertion when fixed
// http://jira.mongodb.org/browse/SERVER-675
-print( t.find().min({"_id":"IBM.N|00000000000000000000"}).max({"_id":"IBM.N|99999999999999999999"}).count() );
-
+print(t.find()
+ .min({"_id": "IBM.N|00000000000000000000"})
+ .max({"_id": "IBM.N|99999999999999999999"})
+ .count());
diff --git a/jstests/core/top.js b/jstests/core/top.js
index 9dc1aad684e..1aff2a4136b 100644
--- a/jstests/core/top.js
+++ b/jstests/core/top.js
@@ -9,8 +9,8 @@ var testColl = testDB[name + "coll"];
// Ensure an empty collection exists for first top command
testColl.drop();
-testColl.insert({x:0});
-testColl.remove({x:0});
+testColl.insert({x: 0});
+testColl.remove({x: 0});
// get top statistics for the test collection
function getTop() {
@@ -23,22 +23,24 @@ var lastTop = getTop();
// return the number of operations since the last call to diffTop for the specified key
function diffTop(key) {
var thisTop = getTop();
- difference = { time : thisTop[key].time - lastTop[key].time,
- count : thisTop[key].count - lastTop[key].count };
+ difference = {
+ time: thisTop[key].time - lastTop[key].time,
+ count: thisTop[key].count - lastTop[key].count
+ };
lastTop[key] = thisTop[key];
assert.gte(difference.count, 0, "non-decreasing count");
assert.gte(difference.time, 0, "non-decreasing time");
// Time should advance iff operations were performed
- assert.eq(difference.count != 0, difference.time > 0,"non-zero time iff non-zero count");
+ assert.eq(difference.count != 0, difference.time > 0, "non-zero time iff non-zero count");
return difference;
}
var numRecords = 100;
// check stats for specified key are as expected
-var checked = { };
+var checked = {};
function checkStats(key, expected) {
checked[key]++;
var actual = diffTop(key).count;
@@ -46,28 +48,28 @@ function checkStats(key, expected) {
}
// Insert
-for(i = 0; i < numRecords; i++) {
- testColl.insert({_id:i});
+for (i = 0; i < numRecords; i++) {
+ testColl.insert({_id: i});
}
checkStats("insert", numRecords);
checkStats("writeLock", numRecords);
// Update
-for(i = 0; i < numRecords; i++) {
- testColl.update({_id:i},{x:i});
+for (i = 0; i < numRecords; i++) {
+ testColl.update({_id: i}, {x: i});
}
checkStats("update", numRecords);
// Queries
-var query = { };
-for(i = 0; i < numRecords; i++) {
- query[i] = testColl.find({x : {$gte:i}}).batchSize(2);
+var query = {};
+for (i = 0; i < numRecords; i++) {
+ query[i] = testColl.find({x: {$gte: i}}).batchSize(2);
assert.eq(query[i].next()._id, i);
}
-checkStats("queries" ,numRecords);
+checkStats("queries", numRecords);
// Getmore
-for(i = 0; i < numRecords / 2; i++) {
+for (i = 0; i < numRecords / 2; i++) {
assert.eq(query[i].next()._id, i + 1);
assert.eq(query[i].next()._id, i + 2);
assert.eq(query[i].next()._id, i + 3);
@@ -76,28 +78,26 @@ for(i = 0; i < numRecords / 2; i++) {
checkStats("getmore", numRecords);
// Remove
-for(i = 0; i < numRecords; i++) {
- testColl.remove({_id : 1});
+for (i = 0; i < numRecords; i++) {
+ testColl.remove({_id: 1});
}
checkStats("remove", numRecords);
// Upsert, note that these are counted as updates, not inserts
-for(i = 0; i < numRecords; i++) {
- testColl.update({_id:i},{x:i},{upsert:1});
+for (i = 0; i < numRecords; i++) {
+ testColl.update({_id: i}, {x: i}, {upsert: 1});
}
checkStats("update", numRecords);
-
// Commands
-diffTop("commands"); // ignore any commands before this
-for(i = 0; i < numRecords; i++) {
- assert.eq(testDB.runCommand({count:"toptestcoll"}).n, numRecords);
+diffTop("commands"); // ignore any commands before this
+for (i = 0; i < numRecords; i++) {
+ assert.eq(testDB.runCommand({count: "toptestcoll"}).n, numRecords);
}
checkStats("commands", numRecords);
-for(key in lastTop) {
- if (!(key in checked)) {
- printjson({key:key, stats:diffTop(key)});
- }
+for (key in lastTop) {
+ if (!(key in checked)) {
+ printjson({key: key, stats: diffTop(key)});
+ }
}
-
diff --git a/jstests/core/ts1.js b/jstests/core/ts1.js
index 34efa15f981..342ff3215d7 100644
--- a/jstests/core/ts1.js
+++ b/jstests/core/ts1.js
@@ -3,36 +3,35 @@ t.drop();
N = 20;
-for ( i=0; i<N; i++ ){
- t.insert( { _id : i , x : new Timestamp() } );
- sleep( 100 );
+for (i = 0; i < N; i++) {
+ t.insert({_id: i, x: new Timestamp()});
+ sleep(100);
}
-function get(i){
- return t.findOne( { _id : i } ).x;
+function get(i) {
+ return t.findOne({_id: i}).x;
}
-function cmp( a , b ){
- if ( a.t < b.t )
+function cmp(a, b) {
+ if (a.t < b.t)
return -1;
- if ( a.t > b.t )
+ if (a.t > b.t)
return 1;
-
+
return a.i - b.i;
}
-for ( i=0; i<N-1; i++ ){
+for (i = 0; i < N - 1; i++) {
a = get(i);
- b = get(i+1);
- //print( tojson(a) + "\t" + tojson(b) + "\t" + cmp(a,b) );
- assert.gt( 0 , cmp( a , b ) , "cmp " + i );
+ b = get(i + 1);
+ // print( tojson(a) + "\t" + tojson(b) + "\t" + cmp(a,b) );
+ assert.gt(0, cmp(a, b), "cmp " + i);
}
-assert.eq( N , t.find( { x : { $type : 17 } } ).itcount() , "B1" );
-assert.eq( 0 , t.find( { x : { $type : 3 } } ).itcount() , "B2" );
-
-t.insert( { _id : 100 , x : new Timestamp( 123456 , 50 ) } );
-x = t.findOne( { _id : 100 } ).x;
-assert.eq( 123456 , x.t , "C1" );
-assert.eq( 50 , x.i , "C2" );
+assert.eq(N, t.find({x: {$type: 17}}).itcount(), "B1");
+assert.eq(0, t.find({x: {$type: 3}}).itcount(), "B2");
+t.insert({_id: 100, x: new Timestamp(123456, 50)});
+x = t.findOne({_id: 100}).x;
+assert.eq(123456, x.t, "C1");
+assert.eq(50, x.i, "C2");
diff --git a/jstests/core/type1.js b/jstests/core/type1.js
index 7f101a2c027..78c5f9b033c 100644
--- a/jstests/core/type1.js
+++ b/jstests/core/type1.js
@@ -2,21 +2,20 @@
t = db.type1;
t.drop();
-t.save( { x : 1.1 } );
-t.save( { x : "3" } );
-t.save( { x : "asd" } );
-t.save( { x : "foo" } );
+t.save({x: 1.1});
+t.save({x: "3"});
+t.save({x: "asd"});
+t.save({x: "foo"});
-assert.eq( 4 , t.find().count() , "A1" );
-assert.eq( 1 , t.find( { x : { $type : 1 } } ).count() , "A2" );
-assert.eq( 3 , t.find( { x : { $type : 2 } } ).count() , "A3" );
-assert.eq( 0 , t.find( { x : { $type : 3 } } ).count() , "A4" );
+assert.eq(4, t.find().count(), "A1");
+assert.eq(1, t.find({x: {$type: 1}}).count(), "A2");
+assert.eq(3, t.find({x: {$type: 2}}).count(), "A3");
+assert.eq(0, t.find({x: {$type: 3}}).count(), "A4");
+t.ensureIndex({x: 1});
-t.ensureIndex( { x : 1 } );
-
-assert.eq( 4 , t.find().count() , "B1" );
-assert.eq( 1 , t.find( { x : { $type : 1 } } ).count() , "B2" );
-assert.eq( 3 , t.find( { x : { $type : 2 } } ).count() , "B3" );
-assert.eq( 0 , t.find( { x : { $type : 3 } } ).count() , "B4" );
-assert.eq( 1 , t.find( { x : { $regex:"f", $type : 2 } } ).count() , "B3" );
+assert.eq(4, t.find().count(), "B1");
+assert.eq(1, t.find({x: {$type: 1}}).count(), "B2");
+assert.eq(3, t.find({x: {$type: 2}}).count(), "B3");
+assert.eq(0, t.find({x: {$type: 3}}).count(), "B4");
+assert.eq(1, t.find({x: {$regex: "f", $type: 2}}).count(), "B3");
diff --git a/jstests/core/type2.js b/jstests/core/type2.js
index 820607e0b30..9c6baa37b94 100644
--- a/jstests/core/type2.js
+++ b/jstests/core/type2.js
@@ -3,17 +3,17 @@
t = db.jstests_type2;
t.drop();
-t.save( {a:null} );
-t.save( {} );
-t.save( {a:'a'} );
+t.save({a: null});
+t.save({});
+t.save({a: 'a'});
function test() {
- assert.eq( 2, t.count( {a:null} ) );
- assert.eq( 1, t.count( {a:{$type:10}} ) );
- assert.eq( 2, t.count( {a:{$exists:true}} ) );
- assert.eq( 1, t.count( {a:{$exists:false}} ) );
+ assert.eq(2, t.count({a: null}));
+ assert.eq(1, t.count({a: {$type: 10}}));
+ assert.eq(2, t.count({a: {$exists: true}}));
+ assert.eq(1, t.count({a: {$exists: false}}));
}
test();
-t.ensureIndex( {a:1} );
+t.ensureIndex({a: 1});
test(); \ No newline at end of file
diff --git a/jstests/core/type3.js b/jstests/core/type3.js
index aad21ca3ecb..fce2b03f6c4 100644
--- a/jstests/core/type3.js
+++ b/jstests/core/type3.js
@@ -3,55 +3,59 @@
t = db.jstests_type3;
t.drop();
-t.ensureIndex( {a:1} );
+t.ensureIndex({a: 1});
// Type Object
-t.save( {a:{'':''}} );
-assert.eq( 1, t.find( {a:{$type:3}} ).hint( {a:1} ).itcount() );
+t.save({a: {'': ''}});
+assert.eq(1, t.find({a: {$type: 3}}).hint({a: 1}).itcount());
// Type Array
t.remove({});
-t.save( {a:[['c']]} );
-assert.eq( 1, t.find( {a:{$type:4}} ).hint( {a:1} ).itcount() );
+t.save({a: [['c']]});
+assert.eq(1, t.find({a: {$type: 4}}).hint({a: 1}).itcount());
// Type RegEx
t.remove({});
-t.save( {a:/r/} );
-assert.eq( 1, t.find( {a:{$type:11}} ).hint( {a:1} ).itcount() );
+t.save({a: /r/});
+assert.eq(1, t.find({a: {$type: 11}}).hint({a: 1}).itcount());
// Type jstNULL
t.remove({});
-t.save( {a:null} );
-assert.eq( 1, t.find( {a:{$type:10}} ).hint( {a:1} ).itcount() );
+t.save({a: null});
+assert.eq(1, t.find({a: {$type: 10}}).hint({a: 1}).itcount());
// Type Undefined
t.remove({});
-t.save( {a:undefined} );
-assert.eq( 1, t.find( {a:{$type:6}} ).hint( {a:1} ).itcount() );
+t.save({a: undefined});
+assert.eq(1, t.find({a: {$type: 6}}).hint({a: 1}).itcount());
// This one won't be returned.
-t.save( {a:null} );
-assert.eq( 1, t.find( {a:{$type:6}} ).hint( {a:1} ).itcount() );
+t.save({a: null});
+assert.eq(1, t.find({a: {$type: 6}}).hint({a: 1}).itcount());
// Type Code
t.remove({});
-t.save( {a:function(){var a = 0;}} );
-assert.eq( 1, t.find( {a:{$type:13}} ).itcount() );
+t.save({
+ a: function() {
+ var a = 0;
+ }
+});
+assert.eq(1, t.find({a: {$type: 13}}).itcount());
// Type BinData
t.remove({});
-t.save( {a:new BinData(0,'')} );
-assert.eq( 1, t.find( {a:{$type:5}} ).itcount() );
+t.save({a: new BinData(0, '')});
+assert.eq(1, t.find({a: {$type: 5}}).itcount());
// Type Timestamp
t.remove({});
-t.save( {a:new Timestamp()} );
-t.save( {a:new Timestamp(0x80008000, 0)} );
-assert.eq( 2, t.find( {a:{$type:17}} ).itcount() );
-assert.eq( 0, t.find( {a:{$type:9}} ).itcount() );
+t.save({a: new Timestamp()});
+t.save({a: new Timestamp(0x80008000, 0)});
+assert.eq(2, t.find({a: {$type: 17}}).itcount());
+assert.eq(0, t.find({a: {$type: 9}}).itcount());
// Type Date
t.remove({});
-t.save( {a:new Date()} );
-assert.eq( 0, t.find( {a:{$type:17}} ).itcount() );
-assert.eq( 1, t.find( {a:{$type:9}} ).itcount() );
+t.save({a: new Date()});
+assert.eq(0, t.find({a: {$type: 17}}).itcount());
+assert.eq(1, t.find({a: {$type: 9}}).itcount());
diff --git a/jstests/core/type4.js b/jstests/core/type4.js
index 86e2f32c5d7..82197d4f1e2 100644
--- a/jstests/core/type4.js
+++ b/jstests/core/type4.js
@@ -1,4 +1,4 @@
-(function(){
+(function() {
"use strict";
// Tests for SERVER-20080
@@ -13,16 +13,16 @@
var oldReadMode = db.getMongo().readMode();
- assert.throws(function(){
+ assert.throws(function() {
(new _rand())();
}, [], "invoke constructor on natively injected function");
- assert.throws(function(){
- var doc = db.test.findOne();
- new doc();
+ assert.throws(function() {
+ var doc = db.test.findOne();
+ new doc();
}, [], "invoke constructor on BSON");
- assert.throws(function(){
+ assert.throws(function() {
db.getMongo().forceReadMode("commands");
var cursor = t.find();
cursor.next();
@@ -30,7 +30,7 @@
new cursor._cursor._cursorHandle();
}, [], "invoke constructor on CursorHandle");
- assert.throws(function(){
+ assert.throws(function() {
db.getMongo().forceReadMode("legacy");
var cursor = t.find();
cursor.next();
diff --git a/jstests/core/type5.js b/jstests/core/type5.js
index 414af2be7eb..d4dfc42d9f6 100644
--- a/jstests/core/type5.js
+++ b/jstests/core/type5.js
@@ -1,4 +1,4 @@
-(function(){
+(function() {
"use strict";
// This checks SERVER-20375 - Constrain JS method thisv
@@ -7,14 +7,14 @@
// prototypes of objects that aren't intended to have methods invoked on
// them.
- assert.throws(function(){
+ assert.throws(function() {
HexData(0, "aaaa").hex.apply({});
}, [], "invoke method on object of incorrect type");
- assert.throws(function(){
+ assert.throws(function() {
var x = HexData(0, "aaaa");
x.hex.apply(10);
}, [], "invoke method on incorrect type");
- assert.throws(function(){
+ assert.throws(function() {
var x = HexData(0, "aaaa");
x.hex.apply(x.__proto__);
}, [], "invoke method on prototype of correct type");
diff --git a/jstests/core/type6.js b/jstests/core/type6.js
index f8b29fe217d..39c3e2567bb 100644
--- a/jstests/core/type6.js
+++ b/jstests/core/type6.js
@@ -1,15 +1,17 @@
-(function(){
+(function() {
"use strict";
// SERVER-20319 Min/MaxKey check type of singleton
//
// make sure swapping min/max key's prototype doesn't blow things up
- assert.throws(function(){
- MinKey().__proto__.singleton = 1000; MinKey();
+ assert.throws(function() {
+ MinKey().__proto__.singleton = 1000;
+ MinKey();
}, [], "make sure manipulating MinKey's proto is safe");
- assert.throws(function(){
- MaxKey().__proto__.singleton = 1000; MaxKey();
+ assert.throws(function() {
+ MaxKey().__proto__.singleton = 1000;
+ MaxKey();
}, [], "make sure manipulating MaxKey's proto is safe");
})();
diff --git a/jstests/core/type7.js b/jstests/core/type7.js
index 870e48ad164..1d67922d491 100644
--- a/jstests/core/type7.js
+++ b/jstests/core/type7.js
@@ -1,4 +1,4 @@
-(function(){
+(function() {
"use strict";
// SERVER-20332 make JS NumberLong more robust
diff --git a/jstests/core/type8.js b/jstests/core/type8.js
index 246133b33d9..ceb4993ecb1 100644
--- a/jstests/core/type8.js
+++ b/jstests/core/type8.js
@@ -1,4 +1,4 @@
-(function(){
+(function() {
"use strict";
// SERVER-8246 Min/MaxKey should be comparable
diff --git a/jstests/core/uniqueness.js b/jstests/core/uniqueness.js
index 124748a91f4..8b919b9a6a9 100644
--- a/jstests/core/uniqueness.js
+++ b/jstests/core/uniqueness.js
@@ -6,51 +6,50 @@ t.drop();
// test uniqueness of _id
-res = t.save( { _id : 3 } );
-assert.writeOK( res );
+res = t.save({_id: 3});
+assert.writeOK(res);
// this should yield an error
-res = t.insert( { _id : 3 } );
-assert.writeError( res );
-assert.eq( 1, t.count() );
+res = t.insert({_id: 3});
+assert.writeError(res);
+assert.eq(1, t.count());
-res = t.insert( { _id : 4, x : 99 } );
-assert.writeOK( res );
+res = t.insert({_id: 4, x: 99});
+assert.writeOK(res);
// this should yield an error
-res = t.update( { _id : 4 } , { _id : 3, x : 99 } );
-assert.writeError( res );
-assert( t.findOne( {_id:4} ) );
+res = t.update({_id: 4}, {_id: 3, x: 99});
+assert.writeError(res);
+assert(t.findOne({_id: 4}));
// Check for an error message when we index and there are dups
db.jstests_uniqueness2.drop();
-db.jstests_uniqueness2.insert({a:3});
-db.jstests_uniqueness2.insert({a:3});
-assert.eq( 2, db.jstests_uniqueness2.count() );
-res = db.jstests_uniqueness2.ensureIndex({a:1}, true);
-assert.commandFailed( res );
-assert( res.errmsg.match( /E11000/ ) );
+db.jstests_uniqueness2.insert({a: 3});
+db.jstests_uniqueness2.insert({a: 3});
+assert.eq(2, db.jstests_uniqueness2.count());
+res = db.jstests_uniqueness2.ensureIndex({a: 1}, true);
+assert.commandFailed(res);
+assert(res.errmsg.match(/E11000/));
// Check for an error message when we index in the background and there are dups
db.jstests_uniqueness2.drop();
-db.jstests_uniqueness2.insert({a:3});
-db.jstests_uniqueness2.insert({a:3});
-assert.eq( 2, db.jstests_uniqueness2.count() );
-res = db.jstests_uniqueness2.ensureIndex({a:1}, {unique:true,background:true});
-assert.commandFailed( res );
-assert( res.errmsg.match( /E11000/ ) );
+db.jstests_uniqueness2.insert({a: 3});
+db.jstests_uniqueness2.insert({a: 3});
+assert.eq(2, db.jstests_uniqueness2.count());
+res = db.jstests_uniqueness2.ensureIndex({a: 1}, {unique: true, background: true});
+assert.commandFailed(res);
+assert(res.errmsg.match(/E11000/));
/* Check that if we update and remove _id, it gets added back by the DB */
/* - test when object grows */
t.drop();
-t.save( { _id : 'Z' } );
-t.update( {}, { k : 2 } );
-assert.eq( 'Z', t.findOne()._id, "uniqueness.js problem with adding back _id" );
+t.save({_id: 'Z'});
+t.update({}, {k: 2});
+assert.eq('Z', t.findOne()._id, "uniqueness.js problem with adding back _id");
/* - test when doesn't grow */
t.drop();
-t.save( { _id : 'Z', k : 3 } );
-t.update( {}, { k : 2 } );
-assert.eq( 'Z', t.findOne()._id, "uniqueness.js problem with adding back _id (2)" );
-
+t.save({_id: 'Z', k: 3});
+t.update({}, {k: 2});
+assert.eq('Z', t.findOne()._id, "uniqueness.js problem with adding back _id (2)");
diff --git a/jstests/core/unset.js b/jstests/core/unset.js
index f3cdcf03deb..14e18229723 100644
--- a/jstests/core/unset.js
+++ b/jstests/core/unset.js
@@ -1,19 +1,22 @@
t = db.unset;
t.drop();
-orig = { _id : 1, emb : {} };
+orig = {
+ _id: 1,
+ emb: {}
+};
t.insert(orig);
-t.update( { _id : 1 }, { $unset : { 'emb.a' : 1 }});
-t.update( { _id : 1 }, { $unset : { 'z' : 1 }});
-assert.eq( orig , t.findOne() , "A" );
+t.update({_id: 1}, {$unset: {'emb.a': 1}});
+t.update({_id: 1}, {$unset: {'z': 1}});
+assert.eq(orig, t.findOne(), "A");
-t.update( { _id : 1 }, { $set : { 'emb.a' : 1 }});
-t.update( { _id : 1 }, { $set : { 'z' : 1 }});
+t.update({_id: 1}, {$set: {'emb.a': 1}});
+t.update({_id: 1}, {$set: {'z': 1}});
-t.update( { _id : 1 }, { $unset : { 'emb.a' : 1 }});
-t.update( { _id : 1 }, { $unset : { 'z' : 1 }});
-assert.eq( orig , t.findOne() , "B" ); // note that emb isn't removed
+t.update({_id: 1}, {$unset: {'emb.a': 1}});
+t.update({_id: 1}, {$unset: {'z': 1}});
+assert.eq(orig, t.findOne(), "B"); // note that emb isn't removed
-t.update( { _id : 1 }, { $unset : { 'emb' : 1 }});
-assert.eq( {_id :1} , t.findOne() , "C" );
+t.update({_id: 1}, {$unset: {'emb': 1}});
+assert.eq({_id: 1}, t.findOne(), "C");
diff --git a/jstests/core/unset2.js b/jstests/core/unset2.js
index 501f9f16331..ec2772af507 100644
--- a/jstests/core/unset2.js
+++ b/jstests/core/unset2.js
@@ -3,23 +3,23 @@ var res;
t = db.unset2;
t.drop();
-t.save( {a:["a","b","c","d"]} );
-t.update( {}, {$unset:{"a.3":1}} );
-assert.eq( ["a","b","c",null], t.findOne().a );
-t.update( {}, {$unset:{"a.1":1}} );
-assert.eq( ["a",null,"c",null], t.findOne().a );
-t.update( {}, {$unset:{"a.0":1}} );
-assert.eq( [null,null,"c",null], t.findOne().a );
-t.update( {}, {$unset:{"a.4":1}} );
-assert.eq( [null,null,"c",null], t.findOne().a ); // no change
+t.save({a: ["a", "b", "c", "d"]});
+t.update({}, {$unset: {"a.3": 1}});
+assert.eq(["a", "b", "c", null], t.findOne().a);
+t.update({}, {$unset: {"a.1": 1}});
+assert.eq(["a", null, "c", null], t.findOne().a);
+t.update({}, {$unset: {"a.0": 1}});
+assert.eq([null, null, "c", null], t.findOne().a);
+t.update({}, {$unset: {"a.4": 1}});
+assert.eq([null, null, "c", null], t.findOne().a); // no change
t.drop();
-t.save( {a:["a","b","c","d","e"]} );
-t.update( {}, {$unset:{"a.2":1},$set:{"a.3":3,"a.4":4,"a.5":5}} );
-assert.eq( ["a","b",null,3,4,5], t.findOne().a );
+t.save({a: ["a", "b", "c", "d", "e"]});
+t.update({}, {$unset: {"a.2": 1}, $set: {"a.3": 3, "a.4": 4, "a.5": 5}});
+assert.eq(["a", "b", null, 3, 4, 5], t.findOne().a);
t.drop();
-t.save( {a:["a","b","c","d","e"]} );
-res = t.update( {}, {$unset:{"a.2":1},$set:{"a.2":4}} );
-assert.writeError( res );
-assert.eq( ["a","b","c","d","e"], t.findOne().a );
+t.save({a: ["a", "b", "c", "d", "e"]});
+res = t.update({}, {$unset: {"a.2": 1}, $set: {"a.2": 4}});
+assert.writeError(res);
+assert.eq(["a", "b", "c", "d", "e"], t.findOne().a);
diff --git a/jstests/core/update2.js b/jstests/core/update2.js
index 654914c1f45..15d84c81b41 100644
--- a/jstests/core/update2.js
+++ b/jstests/core/update2.js
@@ -1,18 +1,18 @@
f = db.ed_db_update2;
f.drop();
-f.save( { a: 4 } );
-f.update( { a: 4 }, { $inc: { a: 2 } } );
-assert.eq( 6, f.findOne().a );
+f.save({a: 4});
+f.update({a: 4}, {$inc: {a: 2}});
+assert.eq(6, f.findOne().a);
f.drop();
-f.save( { a: 4 } );
-f.ensureIndex( { a: 1 } );
-f.update( { a: 4 }, { $inc: { a: 2 } } );
-assert.eq( 6, f.findOne().a );
+f.save({a: 4});
+f.ensureIndex({a: 1});
+f.update({a: 4}, {$inc: {a: 2}});
+assert.eq(6, f.findOne().a);
// Verify that drop clears the index
f.drop();
-f.save( { a: 4 } );
-f.update( { a: 4 }, { $inc: { a: 2 } } );
-assert.eq( 6, f.findOne().a );
+f.save({a: 4});
+f.update({a: 4}, {$inc: {a: 2}});
+assert.eq(6, f.findOne().a);
diff --git a/jstests/core/update3.js b/jstests/core/update3.js
index d29d073a40a..79562fe72d0 100644
--- a/jstests/core/update3.js
+++ b/jstests/core/update3.js
@@ -3,26 +3,26 @@
f = db.jstests_update3;
f.drop();
-f.save( { a:1 } );
-f.update( {}, {$inc:{ a:1 }} );
-assert.eq( 2, f.findOne().a , "A" );
+f.save({a: 1});
+f.update({}, {$inc: {a: 1}});
+assert.eq(2, f.findOne().a, "A");
f.drop();
-f.save( { a:{ b: 1 } } );
-f.update( {}, {$inc:{ "a.b":1 }} );
-assert.eq( 2, f.findOne().a.b , "B" );
+f.save({a: {b: 1}});
+f.update({}, {$inc: {"a.b": 1}});
+assert.eq(2, f.findOne().a.b, "B");
f.drop();
-f.save( { a:{ b: 1 } } );
-f.update( {}, {$set:{ "a.b":5 }} );
-assert.eq( 5, f.findOne().a.b , "C" );
+f.save({a: {b: 1}});
+f.update({}, {$set: {"a.b": 5}});
+assert.eq(5, f.findOne().a.b, "C");
f.drop();
-f.save( {'_id':0} );
-f.update( {}, {$set:{'_id':5}} );
-assert.eq( 0, f.findOne()._id , "D" );
+f.save({'_id': 0});
+f.update({}, {$set: {'_id': 5}});
+assert.eq(0, f.findOne()._id, "D");
f.drop();
-f.save({_id:1, a:1});
-f.update({}, {$unset:{"a":1, "b.c":1}});
-assert.docEq(f.findOne(), {_id:1}, "E"); \ No newline at end of file
+f.save({_id: 1, a: 1});
+f.update({}, {$unset: {"a": 1, "b.c": 1}});
+assert.docEq(f.findOne(), {_id: 1}, "E"); \ No newline at end of file
diff --git a/jstests/core/update5.js b/jstests/core/update5.js
index 2728000f2d4..3ee44d2fba0 100644
--- a/jstests/core/update5.js
+++ b/jstests/core/update5.js
@@ -1,41 +1,39 @@
t = db.update5;
-function go( key ){
-
+function go(key) {
t.drop();
- function check( num , name ){
- assert.eq( 1 , t.find().count() , tojson( key ) + " count " + name );
- assert.eq( num , t.findOne().n , tojson( key ) + " value " + name );
+ function check(num, name) {
+ assert.eq(1, t.find().count(), tojson(key) + " count " + name);
+ assert.eq(num, t.findOne().n, tojson(key) + " value " + name);
}
-
- t.update( key , { $inc : { n : 1 } } , true );
- check( 1 , "A" );
-
- t.update( key , { $inc : { n : 1 } } , true );
- check( 2 , "B" );
-
- t.update( key , { $inc : { n : 1 } } , true );
- check( 3 , "C" );
-
+
+ t.update(key, {$inc: {n: 1}}, true);
+ check(1, "A");
+
+ t.update(key, {$inc: {n: 1}}, true);
+ check(2, "B");
+
+ t.update(key, {$inc: {n: 1}}, true);
+ check(3, "C");
+
var ik = {};
- for ( k in key )
+ for (k in key)
ik[k] = 1;
- t.ensureIndex( ik );
-
- t.update( key , { $inc : { n : 1 } } , true );
- check( 4 , "D" );
-
+ t.ensureIndex(ik);
+
+ t.update(key, {$inc: {n: 1}}, true);
+ check(4, "D");
}
-go( { a : 5 } );
-go( { a : 5 } );
+go({a: 5});
+go({a: 5});
-go( { a : 5 , b : 7 } );
-go( { a : null , b : 7 } );
+go({a: 5, b: 7});
+go({a: null, b: 7});
-go( { referer: 'blah' } );
-go( { referer: 'blah', lame: 'bar' } );
-go( { referer: 'blah', name: 'bar' } );
-go( { date: null, referer: 'blah', name: 'bar' } );
+go({referer: 'blah'});
+go({referer: 'blah', lame: 'bar'});
+go({referer: 'blah', name: 'bar'});
+go({date: null, referer: 'blah', name: 'bar'});
diff --git a/jstests/core/update6.js b/jstests/core/update6.js
index eda470abf45..76b676260f6 100644
--- a/jstests/core/update6.js
+++ b/jstests/core/update6.js
@@ -2,45 +2,42 @@
t = db.update6;
t.drop();
-t.save( { a : 1 , b : { c : 1 , d : 1 } } );
+t.save({a: 1, b: {c: 1, d: 1}});
-t.update( { a : 1 } , { $inc : { "b.c" : 1 } } );
-assert.eq( 2 , t.findOne().b.c , "A" );
-assert.eq( "c,d" , Object.keySet( t.findOne().b ).toString() , "B" );
+t.update({a: 1}, {$inc: {"b.c": 1}});
+assert.eq(2, t.findOne().b.c, "A");
+assert.eq("c,d", Object.keySet(t.findOne().b).toString(), "B");
-t.update( { a : 1 } , { $inc : { "b.0e" : 1 } } );
-assert.eq( 1 , t.findOne().b["0e"] , "C" );
-assert.docEq( { "c" : 2, "d" : 1, "0e" : 1 }, t.findOne().b, "D" );
+t.update({a: 1}, {$inc: {"b.0e": 1}});
+assert.eq(1, t.findOne().b["0e"], "C");
+assert.docEq({"c": 2, "d": 1, "0e": 1}, t.findOne().b, "D");
// -----
t.drop();
-t.save( {"_id" : 2 ,
- "b3" : {"0720" : 5 , "0721" : 12 , "0722" : 11 , "0723" : 3} ,
- //"b323" : {"0720" : 1} ,
- }
- );
-
-
-assert.eq( 4 , Object.keySet( t.find({_id:2},{b3:1})[0].b3 ).length , "test 1 : ks before" );
-t.update({_id:2},{$inc: { 'b3.0719' : 1}},true);
-assert.eq( 5 , Object.keySet( t.find({_id:2},{b3:1})[0].b3 ).length , "test 1 : ks after" );
+t.save({
+ "_id": 2,
+ "b3": {"0720": 5, "0721": 12, "0722": 11, "0723": 3},
+ //"b323" : {"0720" : 1} ,
+});
+assert.eq(4, Object.keySet(t.find({_id: 2}, {b3: 1})[0].b3).length, "test 1 : ks before");
+t.update({_id: 2}, {$inc: {'b3.0719': 1}}, true);
+assert.eq(5, Object.keySet(t.find({_id: 2}, {b3: 1})[0].b3).length, "test 1 : ks after");
// -----
t.drop();
-t.save( {"_id" : 2 ,
- "b3" : {"0720" : 5 , "0721" : 12 , "0722" : 11 , "0723" : 3} ,
- "b324" : {"0720" : 1} ,
- }
- );
-
-
-assert.eq( 4 , Object.keySet( t.find({_id:2},{b3:1})[0].b3 ).length , "test 2 : ks before" );
-printjson( t.find({_id:2},{b3:1})[0].b3 );
-t.update({_id:2},{$inc: { 'b3.0719' : 1}} );
-printjson( t.find({_id:2},{b3:1})[0].b3 );
-assert.eq( 5 , Object.keySet( t.find({_id:2},{b3:1})[0].b3 ).length , "test 2 : ks after" );
+t.save({
+ "_id": 2,
+ "b3": {"0720": 5, "0721": 12, "0722": 11, "0723": 3},
+ "b324": {"0720": 1},
+});
+
+assert.eq(4, Object.keySet(t.find({_id: 2}, {b3: 1})[0].b3).length, "test 2 : ks before");
+printjson(t.find({_id: 2}, {b3: 1})[0].b3);
+t.update({_id: 2}, {$inc: {'b3.0719': 1}});
+printjson(t.find({_id: 2}, {b3: 1})[0].b3);
+assert.eq(5, Object.keySet(t.find({_id: 2}, {b3: 1})[0].b3).length, "test 2 : ks after");
diff --git a/jstests/core/update7.js b/jstests/core/update7.js
index 199a331f9b2..9f92c3382ef 100644
--- a/jstests/core/update7.js
+++ b/jstests/core/update7.js
@@ -2,137 +2,136 @@
t = db.update7;
t.drop();
-function s(){
- return t.find().sort( { _id : 1 } ).map( function(z){ return z.x; } );
+function s() {
+ return t.find().sort({_id: 1}).map(function(z) {
+ return z.x;
+ });
}
-t.save( { _id : 1 , x : 1 } );
-t.save( { _id : 2 , x : 5 } );
+t.save({_id: 1, x: 1});
+t.save({_id: 2, x: 5});
-assert.eq( "1,5" , s() , "A" );
+assert.eq("1,5", s(), "A");
-t.update( {} , { $inc : { x : 1 } } );
-assert.eq( "2,5" , s() , "B" );
+t.update({}, {$inc: {x: 1}});
+assert.eq("2,5", s(), "B");
-t.update( { _id : 1 } , { $inc : { x : 1 } } );
-assert.eq( "3,5" , s() , "C" );
+t.update({_id: 1}, {$inc: {x: 1}});
+assert.eq("3,5", s(), "C");
-t.update( { _id : 2 } , { $inc : { x : 1 } } );
-assert.eq( "3,6" , s() , "D" );
+t.update({_id: 2}, {$inc: {x: 1}});
+assert.eq("3,6", s(), "D");
-t.update( {} , { $inc : { x : 1 } } , false , true );
-assert.eq( "4,7" , s() , "E" );
+t.update({}, {$inc: {x: 1}}, false, true);
+assert.eq("4,7", s(), "E");
-t.update( {} , { $set : { x : 2 } } , false , true );
-assert.eq( "2,2" , s() , "F" );
+t.update({}, {$set: {x: 2}}, false, true);
+assert.eq("2,2", s(), "F");
// non-matching in cursor
t.drop();
-t.save( { _id : 1 , x : 1 , a : 1 , b : 1 } );
-t.save( { _id : 2 , x : 5 , a : 1 , b : 2 } );
-assert.eq( "1,5" , s() , "B1" );
+t.save({_id: 1, x: 1, a: 1, b: 1});
+t.save({_id: 2, x: 5, a: 1, b: 2});
+assert.eq("1,5", s(), "B1");
-t.update( { a : 1 } , { $inc : { x : 1 } } , false , true );
-assert.eq( "2,6" , s() , "B2" );
+t.update({a: 1}, {$inc: {x: 1}}, false, true);
+assert.eq("2,6", s(), "B2");
-t.update( { b : 1 } , { $inc : { x : 1 } } , false , true );
-assert.eq( "3,6" , s() , "B3" );
+t.update({b: 1}, {$inc: {x: 1}}, false, true);
+assert.eq("3,6", s(), "B3");
-t.update( { b : 3 } , { $inc : { x : 1 } } , false , true );
-assert.eq( "3,6" , s() , "B4" );
+t.update({b: 3}, {$inc: {x: 1}}, false, true);
+assert.eq("3,6", s(), "B4");
-t.ensureIndex( { a : 1 } );
-t.ensureIndex( { b : 1 } );
+t.ensureIndex({a: 1});
+t.ensureIndex({b: 1});
-t.update( { a : 1 } , { $inc : { x : 1 } } , false , true );
-assert.eq( "4,7" , s() , "B5" );
+t.update({a: 1}, {$inc: {x: 1}}, false, true);
+assert.eq("4,7", s(), "B5");
-t.update( { b : 1 } , { $inc : { x : 1 } } , false , true );
-assert.eq( "5,7" , s() , "B6" );
+t.update({b: 1}, {$inc: {x: 1}}, false, true);
+assert.eq("5,7", s(), "B6");
-t.update( { b : 3 } , { $inc : { x : 1 } } , false , true );
-assert.eq( "5,7" , s() , "B7" );
-
-t.update( { b : 2 } , { $inc : { x : 1 } } , false , true );
-assert.eq( "5,8" , s() , "B7" );
+t.update({b: 3}, {$inc: {x: 1}}, false, true);
+assert.eq("5,7", s(), "B7");
+t.update({b: 2}, {$inc: {x: 1}}, false, true);
+assert.eq("5,8", s(), "B7");
// multi-key
t.drop();
-t.save( { _id : 1 , x : 1 , a : [ 1 , 2 ] } );
-t.save( { _id : 2 , x : 5 , a : [ 2 , 3 ] } );
-assert.eq( "1,5" , s() , "C1" );
-
-t.update( { a : 1 } , { $inc : { x : 1 } } , false , true );
-assert.eq( "2,5" , s() , "C2" );
+t.save({_id: 1, x: 1, a: [1, 2]});
+t.save({_id: 2, x: 5, a: [2, 3]});
+assert.eq("1,5", s(), "C1");
-t.update( { a : 1 } , { $inc : { x : 1 } } , false , true );
-assert.eq( "3,5" , s() , "C3" );
+t.update({a: 1}, {$inc: {x: 1}}, false, true);
+assert.eq("2,5", s(), "C2");
-t.update( { a : 3 } , { $inc : { x : 1 } } , false , true );
-assert.eq( "3,6" , s() , "C4" );
+t.update({a: 1}, {$inc: {x: 1}}, false, true);
+assert.eq("3,5", s(), "C3");
-t.update( { a : 2 } , { $inc : { x : 1 } } , false , true );
-assert.eq( "4,7" , s() , "C5" );
+t.update({a: 3}, {$inc: {x: 1}}, false, true);
+assert.eq("3,6", s(), "C4");
-t.update( { a : { $gt : 0 } } , { $inc : { x : 1 } } , false , true );
-assert.eq( "5,8" , s() , "C6" );
+t.update({a: 2}, {$inc: {x: 1}}, false, true);
+assert.eq("4,7", s(), "C5");
+t.update({a: {$gt: 0}}, {$inc: {x: 1}}, false, true);
+assert.eq("5,8", s(), "C6");
t.drop();
-t.save( { _id : 1 , x : 1 , a : [ 1 , 2 ] } );
-t.save( { _id : 2 , x : 5 , a : [ 2 , 3 ] } );
-t.ensureIndex( { a : 1 } );
-assert.eq( "1,5" , s() , "D1" );
-
-t.update( { a : 1 } , { $inc : { x : 1 } } , false , true );
-assert.eq( "2,5" , s() , "D2" );
+t.save({_id: 1, x: 1, a: [1, 2]});
+t.save({_id: 2, x: 5, a: [2, 3]});
+t.ensureIndex({a: 1});
+assert.eq("1,5", s(), "D1");
-t.update( { a : 1 } , { $inc : { x : 1 } } , false , true );
-assert.eq( "3,5" , s() , "D3" );
+t.update({a: 1}, {$inc: {x: 1}}, false, true);
+assert.eq("2,5", s(), "D2");
-t.update( { a : 3 } , { $inc : { x : 1 } } , false , true );
-assert.eq( "3,6" , s() , "D4" );
+t.update({a: 1}, {$inc: {x: 1}}, false, true);
+assert.eq("3,5", s(), "D3");
-t.update( { a : 2 } , { $inc : { x : 1 } } , false , true );
-assert.eq( "4,7" , s() , "D5" );
+t.update({a: 3}, {$inc: {x: 1}}, false, true);
+assert.eq("3,6", s(), "D4");
-t.update( { a : { $gt : 0 } } , { $inc : { x : 1 } } , false , true );
-assert.eq( "5,8" , s() , "D6" );
+t.update({a: 2}, {$inc: {x: 1}}, false, true);
+assert.eq("4,7", s(), "D5");
-t.update( { a : { $lt : 10 } } , { $inc : { x : -1 } } , false , true );
-assert.eq( "4,7" , s() , "D7" );
+t.update({a: {$gt: 0}}, {$inc: {x: 1}}, false, true);
+assert.eq("5,8", s(), "D6");
-// ---
+t.update({a: {$lt: 10}}, {$inc: {x: -1}}, false, true);
+assert.eq("4,7", s(), "D7");
-t.save( { _id : 3 } );
-assert.eq( "4,7," , s() , "E1" );
-t.update( {} , { $inc : { x : 1 } } , false , true );
-assert.eq( "5,8,1" , s() , "E2" );
+// ---
-for ( i = 4; i<8; i++ )
- t.save( { _id : i } );
-t.save( { _id : i , x : 1 } );
-assert.eq( "5,8,1,,,,,1" , s() , "E4" );
-t.update( {} , { $inc : { x : 1 } } , false , true );
-assert.eq( "6,9,2,1,1,1,1,2" , s() , "E5" );
+t.save({_id: 3});
+assert.eq("4,7,", s(), "E1");
+t.update({}, {$inc: {x: 1}}, false, true);
+assert.eq("5,8,1", s(), "E2");
+for (i = 4; i < 8; i++)
+ t.save({_id: i});
+t.save({_id: i, x: 1});
+assert.eq("5,8,1,,,,,1", s(), "E4");
+t.update({}, {$inc: {x: 1}}, false, true);
+assert.eq("6,9,2,1,1,1,1,2", s(), "E5");
// --- $inc indexed field
t.drop();
-t.save( { x : 1 } );
-t.save( { x : 2 } );
-t.save( { x : 3 } );
+t.save({x: 1});
+t.save({x: 2});
+t.save({x: 3});
-t.ensureIndex( { x : 1 } );
+t.ensureIndex({x: 1});
-assert.eq( "1,2,3" , s() , "F1" );
-t.update( { x : { $gt : 0 } } , { $inc : { x : 5 } } , false , true );
-assert.eq( "6,7,8" , s() , "F1" );
+assert.eq("1,2,3", s(), "F1");
+t.update({x: {$gt: 0}}, {$inc: {x: 5}}, false, true);
+assert.eq("6,7,8", s(), "F1");
diff --git a/jstests/core/update8.js b/jstests/core/update8.js
index f59bea15e15..596bc8695dd 100644
--- a/jstests/core/update8.js
+++ b/jstests/core/update8.js
@@ -2,10 +2,10 @@
t = db.update8;
t.drop();
-t.update( { _id : 1 , tags: {"$ne": "a"}}, {"$push": { tags : "a" } } , true );
-assert.eq( { _id : 1 , tags : [ "a" ] } , t.findOne() , "A" );
+t.update({_id: 1, tags: {"$ne": "a"}}, {"$push": {tags: "a"}}, true);
+assert.eq({_id: 1, tags: ["a"]}, t.findOne(), "A");
t.drop();
-//SERVER-390
-//t.update( { "x.y" : 1 } , { $inc : { i : 1 } } , true );
-//printjson( t.findOne() );
+// SERVER-390
+// t.update( { "x.y" : 1 } , { $inc : { i : 1 } } , true );
+// printjson( t.findOne() );
diff --git a/jstests/core/update9.js b/jstests/core/update9.js
index 0a51d658199..d119681a09e 100644
--- a/jstests/core/update9.js
+++ b/jstests/core/update9.js
@@ -2,18 +2,17 @@
t = db.update9;
t.drop();
-orig = { "_id" : 1 ,
- "question" : "a",
- "choices" : { "1" : { "choice" : "b" },
- "0" : { "choice" : "c" } } ,
-
- };
+orig = {
+ "_id": 1,
+ "question": "a",
+ "choices": {"1": {"choice": "b"}, "0": {"choice": "c"}},
-t.save( orig );
-assert.eq( orig , t.findOne() , "A" );
+};
-t.update({_id: 1, 'choices.0.votes': {$ne: 1}}, {$push: {'choices.0.votes': 1}});
+t.save(orig);
+assert.eq(orig, t.findOne(), "A");
-orig.choices["0"].votes = [ 1 ] ;
-assert.eq( orig.choices["0"] , t.findOne().choices["0"] , "B" );
+t.update({_id: 1, 'choices.0.votes': {$ne: 1}}, {$push: {'choices.0.votes': 1}});
+orig.choices["0"].votes = [1];
+assert.eq(orig.choices["0"], t.findOne().choices["0"], "B");
diff --git a/jstests/core/update_addToSet.js b/jstests/core/update_addToSet.js
index c12f029f6ae..05437148b2f 100644
--- a/jstests/core/update_addToSet.js
+++ b/jstests/core/update_addToSet.js
@@ -2,57 +2,66 @@
t = db.update_addToSet1;
t.drop();
-o = { _id : 1 , a : [ 2 , 1 ] };
-t.insert( o );
+o = {
+ _id: 1,
+ a: [2, 1]
+};
+t.insert(o);
-assert.eq( o , t.findOne() , "A1" );
+assert.eq(o, t.findOne(), "A1");
-t.update( {} , { $addToSet : { a : 3 } } );
-o.a.push( 3 );
-assert.eq( o , t.findOne() , "A2" );
+t.update({}, {$addToSet: {a: 3}});
+o.a.push(3);
+assert.eq(o, t.findOne(), "A2");
-t.update( {} , { $addToSet : { a : 3 } } );
-assert.eq( o , t.findOne() , "A3" );
+t.update({}, {$addToSet: {a: 3}});
+assert.eq(o, t.findOne(), "A3");
// SERVER-628
-t.update( {} , { $addToSet : { a : { $each : [ 3 , 5 , 6 ] } } } );
-o.a.push( 5 );
-o.a.push( 6 );
-assert.eq( o , t.findOne() , "B1" );
+t.update({}, {$addToSet: {a: {$each: [3, 5, 6]}}});
+o.a.push(5);
+o.a.push(6);
+assert.eq(o, t.findOne(), "B1");
t.drop();
-o = { _id : 1 , a : [ 3 , 5 , 6 ] };
-t.insert( o );
-t.update( {} , { $addToSet : { a : { $each : [ 3 , 5 , 6 ] } } } );
-assert.eq( o , t.findOne() , "B2" );
+o = {
+ _id: 1,
+ a: [3, 5, 6]
+};
+t.insert(o);
+t.update({}, {$addToSet: {a: {$each: [3, 5, 6]}}});
+assert.eq(o, t.findOne(), "B2");
t.drop();
-t.update( { _id : 1 } , { $addToSet : { a : { $each : [ 3 , 5 , 6 ] } } } , true );
-assert.eq( o , t.findOne() , "B3" );
-t.update( { _id : 1 } , { $addToSet : { a : { $each : [ 3 , 5 , 6 ] } } } , true );
-assert.eq( o , t.findOne() , "B4" );
+t.update({_id: 1}, {$addToSet: {a: {$each: [3, 5, 6]}}}, true);
+assert.eq(o, t.findOne(), "B3");
+t.update({_id: 1}, {$addToSet: {a: {$each: [3, 5, 6]}}}, true);
+assert.eq(o, t.findOne(), "B4");
// SERVER-630
t.drop();
-t.update( { _id : 2 } , { $addToSet : { a : 3 } } , true );
-assert.eq( 1 , t.count() , "C1" );
-assert.eq( { _id : 2 , a : [ 3 ] } , t.findOne() , "C2" );
+t.update({_id: 2}, {$addToSet: {a: 3}}, true);
+assert.eq(1, t.count(), "C1");
+assert.eq({_id: 2, a: [3]}, t.findOne(), "C2");
// SERVER-3245
-o = {_id: 1, a: [1,2]};
+o = {
+ _id: 1,
+ a: [1, 2]
+};
t.drop();
-t.update( {_id: 1}, {$addToSet: {a: {$each: [1,2]}}}, true );
-assert.eq( o, t.findOne(), "D1" );
+t.update({_id: 1}, {$addToSet: {a: {$each: [1, 2]}}}, true);
+assert.eq(o, t.findOne(), "D1");
t.drop();
-t.update( {_id: 1}, {$addToSet: {a: {$each: [1,2,1,2]}}}, true );
-assert.eq( o, t.findOne(), "D2" );
+t.update({_id: 1}, {$addToSet: {a: {$each: [1, 2, 1, 2]}}}, true);
+assert.eq(o, t.findOne(), "D2");
t.drop();
-t.insert( {_id: 1} );
-t.update( {_id: 1}, {$addToSet: {a: {$each: [1,2,2,1]}}} );
-assert.eq( o, t.findOne(), "D3" );
+t.insert({_id: 1});
+t.update({_id: 1}, {$addToSet: {a: {$each: [1, 2, 2, 1]}}});
+assert.eq(o, t.findOne(), "D3");
-t.update( {_id: 1}, {$addToSet: {a: {$each: [3,2,2,3,3]}}} );
-o.a.push( 3 );
-assert.eq( o, t.findOne(), "D4" );
+t.update({_id: 1}, {$addToSet: {a: {$each: [3, 2, 2, 3, 3]}}});
+o.a.push(3);
+assert.eq(o, t.findOne(), "D4");
diff --git a/jstests/core/update_addToSet2.js b/jstests/core/update_addToSet2.js
index dd73a4f3531..44ba8bce671 100644
--- a/jstests/core/update_addToSet2.js
+++ b/jstests/core/update_addToSet2.js
@@ -2,10 +2,12 @@
t = db.update_addToSet2;
t.drop();
-o = { _id : 1 };
-t.insert( { _id : 1 } );
+o = {
+ _id: 1
+};
+t.insert({_id: 1});
-t.update({},{$addToSet : {'kids' :{ 'name' : 'Bob', 'age': '4'}}});
-t.update({},{$addToSet : {'kids' :{ 'name' : 'Dan', 'age': '2'}}});
+t.update({}, {$addToSet: {'kids': {'name': 'Bob', 'age': '4'}}});
+t.update({}, {$addToSet: {'kids': {'name': 'Dan', 'age': '2'}}});
-printjson( t.findOne() );
+printjson(t.findOne());
diff --git a/jstests/core/update_addToSet3.js b/jstests/core/update_addToSet3.js
index fb6df7645f0..b37112042cc 100644
--- a/jstests/core/update_addToSet3.js
+++ b/jstests/core/update_addToSet3.js
@@ -2,17 +2,16 @@
t = db.update_addToSet3;
t.drop();
-t.insert( { _id : 1 } );
+t.insert({_id: 1});
-t.update( { _id : 1 } , { $addToSet : { a : { $each : [ 6 , 5 , 4 ] } } } );
-assert.eq( t.findOne() , { _id : 1 , a : [ 6 , 5 , 4 ] } , "A1" );
+t.update({_id: 1}, {$addToSet: {a: {$each: [6, 5, 4]}}});
+assert.eq(t.findOne(), {_id: 1, a: [6, 5, 4]}, "A1");
-t.update( { _id : 1 } , { $addToSet : { a : { $each : [ 3 , 2 , 1 ] } } } );
-assert.eq( t.findOne() , { _id : 1 , a : [ 6 , 5 , 4 , 3 , 2 , 1 ] } , "A2" );
+t.update({_id: 1}, {$addToSet: {a: {$each: [3, 2, 1]}}});
+assert.eq(t.findOne(), {_id: 1, a: [6, 5, 4, 3, 2, 1]}, "A2");
-t.update( { _id : 1 } , { $addToSet : { a : { $each : [ 4 , 7 , 9 , 2 ] } } } );
-assert.eq( t.findOne() , { _id : 1 , a : [ 6 , 5 , 4 , 3 , 2 , 1 , 7 , 9 ] } , "A3" );
-
-t.update( { _id : 1 } , { $addToSet : { a : { $each : [ 12 , 13 , 12 ] } } } );
-assert.eq( t.findOne() , { _id : 1 , a : [ 6 , 5 , 4 , 3 , 2 , 1 , 7 , 9 , 12 , 13 ] } , "A4" );
+t.update({_id: 1}, {$addToSet: {a: {$each: [4, 7, 9, 2]}}});
+assert.eq(t.findOne(), {_id: 1, a: [6, 5, 4, 3, 2, 1, 7, 9]}, "A3");
+t.update({_id: 1}, {$addToSet: {a: {$each: [12, 13, 12]}}});
+assert.eq(t.findOne(), {_id: 1, a: [6, 5, 4, 3, 2, 1, 7, 9, 12, 13]}, "A4");
diff --git a/jstests/core/update_arraymatch1.js b/jstests/core/update_arraymatch1.js
index 9c1907b63f2..b8d78c3daee 100644
--- a/jstests/core/update_arraymatch1.js
+++ b/jstests/core/update_arraymatch1.js
@@ -2,15 +2,20 @@
t = db.update_arraymatch1;
t.drop();
-o = { _id : 1 , a : [ { x : 1 , y : 1 } , { x : 2 , y : 2 } , { x : 3 , y : 3 } ] };
-t.insert( o );
-assert.eq( o , t.findOne() , "A1" );
+o = {
+ _id: 1,
+ a: [{x: 1, y: 1}, {x: 2, y: 2}, {x: 3, y: 3}]
+};
+t.insert(o);
+assert.eq(o, t.findOne(), "A1");
-q = { "a.x" : 2 };
-t.update( q , { $set : { b : 5 } } );
+q = {
+ "a.x": 2
+};
+t.update(q, {$set: {b: 5}});
o.b = 5;
-assert.eq( o , t.findOne() , "A2" );
+assert.eq(o, t.findOne(), "A2");
-t.update( { "a.x" : 2 } , { $inc : { "a.$.y" : 1 } } );
+t.update({"a.x": 2}, {$inc: {"a.$.y": 1}});
o.a[1].y++;
-assert.eq( o , t.findOne() , "A3" );
+assert.eq(o, t.findOne(), "A3");
diff --git a/jstests/core/update_arraymatch2.js b/jstests/core/update_arraymatch2.js
index fc1e2f93fc5..ede1e0ad69a 100644
--- a/jstests/core/update_arraymatch2.js
+++ b/jstests/core/update_arraymatch2.js
@@ -1,16 +1,16 @@
t = db.update_arraymatch2;
t.drop();
-t.insert( { } );
-t.insert( { x : [1,2,3] } );
-t.insert( { x : 99 } );
-t.update( {x : 2}, { $inc : { "x.$" : 1 } } , false, true );
-assert( t.findOne({x:1}).x[1] == 3, "A1" );
+t.insert({});
+t.insert({x: [1, 2, 3]});
+t.insert({x: 99});
+t.update({x: 2}, {$inc: {"x.$": 1}}, false, true);
+assert(t.findOne({x: 1}).x[1] == 3, "A1");
-t.insert( { x : { y : [8,7,6] } } );
-t.update( {'x.y' : 7}, { $inc : { "x.y.$" : 1 } } , false, true );
-assert.eq( 8 , t.findOne({"x.y" : 8}).x.y[1] , "B1" );
+t.insert({x: {y: [8, 7, 6]}});
+t.update({'x.y': 7}, {$inc: {"x.y.$": 1}}, false, true);
+assert.eq(8, t.findOne({"x.y": 8}).x.y[1], "B1");
-t.insert( { x : [90,91,92], y : ['a', 'b', 'c'] } );
-t.update( { x : 92} , { $set : { 'y.$' : 'z' } }, false, true );
-assert.eq( 'z', t.findOne({x:92}).y[2], "B2" );
+t.insert({x: [90, 91, 92], y: ['a', 'b', 'c']});
+t.update({x: 92}, {$set: {'y.$': 'z'}}, false, true);
+assert.eq('z', t.findOne({x: 92}).y[2], "B2");
diff --git a/jstests/core/update_arraymatch3.js b/jstests/core/update_arraymatch3.js
index 96fa0a5cbb5..5fe2c4a1f16 100644
--- a/jstests/core/update_arraymatch3.js
+++ b/jstests/core/update_arraymatch3.js
@@ -2,16 +2,15 @@
t = db.update_arraymatch3;
t.drop();
-o = { _id : 1 ,
- title : "ABC",
- comments : [ { "by" : "joe", "votes" : 3 },
- { "by" : "jane", "votes" : 7 }
- ]
- };
+o = {
+ _id: 1,
+ title: "ABC",
+ comments: [{"by": "joe", "votes": 3}, {"by": "jane", "votes": 7}]
+};
-t.save( o );
-assert.eq( o , t.findOne() , "A1" );
+t.save(o);
+assert.eq(o, t.findOne(), "A1");
-t.update( {'comments.by':'joe'}, {$inc:{'comments.$.votes':1}}, false, true );
+t.update({'comments.by': 'joe'}, {$inc: {'comments.$.votes': 1}}, false, true);
o.comments[0].votes++;
-assert.eq( o , t.findOne() , "A2" );
+assert.eq(o, t.findOne(), "A2");
diff --git a/jstests/core/update_arraymatch4.js b/jstests/core/update_arraymatch4.js
index d445168ca25..fabe07f7337 100644
--- a/jstests/core/update_arraymatch4.js
+++ b/jstests/core/update_arraymatch4.js
@@ -2,17 +2,18 @@
t = db.update_arraymatch4;
t.drop();
-x = { _id : 1 , arr : ["A1","B1","C1"] };
-t.insert( x );
-assert.eq( x , t.findOne() , "A1" );
+x = {
+ _id: 1,
+ arr: ["A1", "B1", "C1"]
+};
+t.insert(x);
+assert.eq(x, t.findOne(), "A1");
x.arr[0] = "A2";
-t.update( { arr : "A1" } , { $set : { "arr.$" : "A2" } } );
-assert.eq( x , t.findOne() , "A2" );
+t.update({arr: "A1"}, {$set: {"arr.$": "A2"}});
+assert.eq(x, t.findOne(), "A2");
-t.ensureIndex( { arr : 1 } );
+t.ensureIndex({arr: 1});
x.arr[0] = "A3";
-t.update( { arr : "A2" } , { $set : { "arr.$" : "A3" } } );
-assert.eq( x , t.findOne() , "A3" ); // SERVER-1055
-
-
+t.update({arr: "A2"}, {$set: {"arr.$": "A3"}});
+assert.eq(x, t.findOne(), "A3"); // SERVER-1055
diff --git a/jstests/core/update_arraymatch5.js b/jstests/core/update_arraymatch5.js
index b468d0113ea..39768c8d2c5 100644
--- a/jstests/core/update_arraymatch5.js
+++ b/jstests/core/update_arraymatch5.js
@@ -2,14 +2,17 @@
t = db.update_arraymatch5;
t.drop();
-t.insert({abc:{visible:true}, testarray:[{foobar_id:316, visible:true, xxx: 1}]});
-t.ensureIndex({'abc.visible':1, 'testarray.visible':1 , 'testarray.xxx': 1});
-assert( t.findOne({'abc.visible':true, testarray:{'$elemMatch': {visible:true, xxx:1}}}) , "A1" );
-assert( t.findOne({testarray:{'$elemMatch': {visible:true, xxx:1}}}) , "A2" );
+t.insert({abc: {visible: true}, testarray: [{foobar_id: 316, visible: true, xxx: 1}]});
+t.ensureIndex({'abc.visible': 1, 'testarray.visible': 1, 'testarray.xxx': 1});
+assert(t.findOne({'abc.visible': true, testarray: {'$elemMatch': {visible: true, xxx: 1}}}), "A1");
+assert(t.findOne({testarray: {'$elemMatch': {visible: true, xxx: 1}}}), "A2");
-t.update({'testarray.foobar_id':316}, {'$set': {'testarray.$.visible': true, 'testarray.$.xxx': 2}}, false, true);
+t.update({'testarray.foobar_id': 316},
+ {'$set': {'testarray.$.visible': true, 'testarray.$.xxx': 2}},
+ false,
+ true);
-assert( t.findOne() , "B1" );
-assert( t.findOne({testarray:{'$elemMatch': {visible:true, xxx:2}}}) , "B2" );
-assert( t.findOne({'abc.visible':true, testarray:{'$elemMatch': {visible:true, xxx:2}}}) , "B3" );
-assert.eq( 1 , t.find().count() , "B4" );
+assert(t.findOne(), "B1");
+assert(t.findOne({testarray: {'$elemMatch': {visible: true, xxx: 2}}}), "B2");
+assert(t.findOne({'abc.visible': true, testarray: {'$elemMatch': {visible: true, xxx: 2}}}), "B3");
+assert.eq(1, t.find().count(), "B4");
diff --git a/jstests/core/update_arraymatch6.js b/jstests/core/update_arraymatch6.js
index 71e443fa44f..fe4b09de8a0 100644
--- a/jstests/core/update_arraymatch6.js
+++ b/jstests/core/update_arraymatch6.js
@@ -3,13 +3,13 @@ t = db.jstests_update_arraymatch6;
t.drop();
function doTest() {
- t.save( {a: [{id: 1, x: [5,6,7]}, {id: 2, x: [8,9,10]}]} );
- res = t.update({'a.id': 1}, {$set: {'a.$.x': [1,1,1]}});
- assert.writeOK( res );
- assert.eq.automsg( "1", "t.findOne().a[ 0 ].x[ 0 ]" );
+ t.save({a: [{id: 1, x: [5, 6, 7]}, {id: 2, x: [8, 9, 10]}]});
+ res = t.update({'a.id': 1}, {$set: {'a.$.x': [1, 1, 1]}});
+ assert.writeOK(res);
+ assert.eq.automsg("1", "t.findOne().a[ 0 ].x[ 0 ]");
}
doTest();
t.drop();
-t.ensureIndex( { 'a.id':1 } );
+t.ensureIndex({'a.id': 1});
doTest(); \ No newline at end of file
diff --git a/jstests/core/update_arraymatch7.js b/jstests/core/update_arraymatch7.js
index 5621f60c39e..4c0302dbfec 100644
--- a/jstests/core/update_arraymatch7.js
+++ b/jstests/core/update_arraymatch7.js
@@ -6,14 +6,14 @@ t.drop();
function testPositionalInc() {
t.remove({});
- t.save( { a:[ { b:'match', count:0 } ] } );
- t.update( { 'a.b':'match' }, { $inc:{ 'a.$.count':1 } } );
+ t.save({a: [{b: 'match', count: 0}]});
+ t.update({'a.b': 'match'}, {$inc: {'a.$.count': 1}});
// Check that the positional $inc succeeded.
- assert( t.findOne( { 'a.count':1 } ) );
+ assert(t.findOne({'a.count': 1}));
}
testPositionalInc();
// Now check with a non multikey index.
-t.ensureIndex( { 'a.b' : 1 } );
+t.ensureIndex({'a.b': 1});
testPositionalInc();
diff --git a/jstests/core/update_arraymatch8.js b/jstests/core/update_arraymatch8.js
index 1e8ce377862..7e4eb59f37a 100644
--- a/jstests/core/update_arraymatch8.js
+++ b/jstests/core/update_arraymatch8.js
@@ -4,155 +4,155 @@
// array.$.name
t = db.jstests_update_arraymatch8;
t.drop();
-t.ensureIndex( {'array.name': 1} );
-t.insert( {'array': [{'name': 'old'}]} );
-assert( t.findOne({'array.name': 'old'}) );
-t.update( {'array.name': 'old'}, {$set: {'array.$.name': 'new'}} );
-assert( t.findOne({'array.name': 'new'}) );
-assert( !t.findOne({'array.name': 'old'}) );
+t.ensureIndex({'array.name': 1});
+t.insert({'array': [{'name': 'old'}]});
+assert(t.findOne({'array.name': 'old'}));
+t.update({'array.name': 'old'}, {$set: {'array.$.name': 'new'}});
+assert(t.findOne({'array.name': 'new'}));
+assert(!t.findOne({'array.name': 'old'}));
// array.$ (failed in 2.2.2)
t = db.jstests_update_arraymatch8;
t.drop();
-t.ensureIndex( {'array.name': 1} );
-t.insert( {'array': [{'name': 'old'}]} );
-assert( t.findOne({'array.name': 'old'}) );
-t.update( {'array.name': 'old'}, {$set: {'array.$': {'name':'new'}}} );
-assert( t.findOne({'array.name': 'new'}) );
-assert( !t.findOne({'array.name': 'old'}) );
+t.ensureIndex({'array.name': 1});
+t.insert({'array': [{'name': 'old'}]});
+assert(t.findOne({'array.name': 'old'}));
+t.update({'array.name': 'old'}, {$set: {'array.$': {'name': 'new'}}});
+assert(t.findOne({'array.name': 'new'}));
+assert(!t.findOne({'array.name': 'old'}));
// array.0.name
t = db.jstests_update_arraymatch8;
t.drop();
-t.ensureIndex( {'array.name': 1} );
-t.insert( {'array': [{'name': 'old'}]} );
-assert( t.findOne({'array.name': 'old'}) );
-t.update( {'array.name': 'old'}, {$set: {'array.0.name': 'new'}} );
-assert( t.findOne({'array.name': 'new'}) );
-assert( !t.findOne({'array.name': 'old'}) );
+t.ensureIndex({'array.name': 1});
+t.insert({'array': [{'name': 'old'}]});
+assert(t.findOne({'array.name': 'old'}));
+t.update({'array.name': 'old'}, {$set: {'array.0.name': 'new'}});
+assert(t.findOne({'array.name': 'new'}));
+assert(!t.findOne({'array.name': 'old'}));
// array.0 (failed in 2.2.2)
t = db.jstests_update_arraymatch8;
t.drop();
-t.ensureIndex( {'array.name': 1} );
-t.insert( {'array': [{'name': 'old'}]} );
-assert( t.findOne({'array.name': 'old'}) );
-t.update( {'array.name': 'old'}, {$set: {'array.0': {'name':'new'}}} );
-assert( t.findOne({'array.name': 'new'}) );
-assert( !t.findOne({'array.name': 'old'}) );
+t.ensureIndex({'array.name': 1});
+t.insert({'array': [{'name': 'old'}]});
+assert(t.findOne({'array.name': 'old'}));
+t.update({'array.name': 'old'}, {$set: {'array.0': {'name': 'new'}}});
+assert(t.findOne({'array.name': 'new'}));
+assert(!t.findOne({'array.name': 'old'}));
// // array.12.name
t = db.jstests_update_arraymatch8;
t.drop();
arr = new Array();
-for (var i=0; i<20; i++) {
+for (var i = 0; i < 20; i++) {
arr.push({'name': 'old'});
}
-t.ensureIndex( {'array.name': 1} );
-t.insert( {_id:0, 'array': arr} );
-assert( t.findOne({'array.name': 'old'}) );
-t.update( {_id:0}, {$set: {'array.12.name': 'new'}} );
+t.ensureIndex({'array.name': 1});
+t.insert({_id: 0, 'array': arr});
+assert(t.findOne({'array.name': 'old'}));
+t.update({_id: 0}, {$set: {'array.12.name': 'new'}});
// note: both documents now have to be in the array
-assert( t.findOne({'array.name': 'new'}) );
-assert( t.findOne({'array.name': 'old'}) );
+assert(t.findOne({'array.name': 'new'}));
+assert(t.findOne({'array.name': 'old'}));
// array.12 (failed in 2.2.2)
t = db.jstests_update_arraymatch8;
t.drop();
arr = new Array();
-for (var i=0; i<20; i++) {
+for (var i = 0; i < 20; i++) {
arr.push({'name': 'old'});
}
-t.ensureIndex( {'array.name': 1} );
-t.insert( {_id:0, 'array': arr} );
-assert( t.findOne({'array.name': 'old'}) );
-t.update( {_id:0}, {$set: {'array.12': {'name':'new'}}} );
+t.ensureIndex({'array.name': 1});
+t.insert({_id: 0, 'array': arr});
+assert(t.findOne({'array.name': 'old'}));
+t.update({_id: 0}, {$set: {'array.12': {'name': 'new'}}});
// note: both documents now have to be in the array
-assert( t.findOne({'array.name': 'new'}) );
-assert( t.findOne({'array.name': 'old'}) );
+assert(t.findOne({'array.name': 'new'}));
+assert(t.findOne({'array.name': 'old'}));
// array.$.123a.name
t = db.jstests_update_arraymatch8;
t.drop();
-t.ensureIndex( {'array.123a.name': 1} );
-t.insert( {'array': [{'123a':{'name': 'old'}}]} );
-assert( t.findOne({'array.123a.name': 'old'}) );
-t.update( {'array.123a.name': 'old'}, {$set: {'array.$.123a.name': 'new'}} );
-assert( t.findOne({'array.123a.name': 'new'}) );
-assert( !t.findOne({'array.123a.name': 'old'}) );
+t.ensureIndex({'array.123a.name': 1});
+t.insert({'array': [{'123a': {'name': 'old'}}]});
+assert(t.findOne({'array.123a.name': 'old'}));
+t.update({'array.123a.name': 'old'}, {$set: {'array.$.123a.name': 'new'}});
+assert(t.findOne({'array.123a.name': 'new'}));
+assert(!t.findOne({'array.123a.name': 'old'}));
// array.$.123a
t = db.jstests_update_arraymatch8;
t.drop();
-t.ensureIndex( {'array.name': 1} );
-t.insert( {'array': [{'123a':{'name': 'old'}}]} );
-assert( t.findOne({'array.123a.name': 'old'}) );
-t.update( {'array.123a.name': 'old'}, {$set: {'array.$.123a': {'name': 'new'}}} );
-assert( t.findOne({'array.123a.name': 'new'}) );
-assert( !t.findOne({'array.123a.name': 'old'}) );
+t.ensureIndex({'array.name': 1});
+t.insert({'array': [{'123a': {'name': 'old'}}]});
+assert(t.findOne({'array.123a.name': 'old'}));
+t.update({'array.123a.name': 'old'}, {$set: {'array.$.123a': {'name': 'new'}}});
+assert(t.findOne({'array.123a.name': 'new'}));
+assert(!t.findOne({'array.123a.name': 'old'}));
// array.0.123a.name
t = db.jstests_update_arraymatch8;
t.drop();
-t.ensureIndex( {'array.123a.name': 1} );
-t.insert( {'array': [{'123a':{'name': 'old'}}]} );
-assert( t.findOne({'array.123a.name': 'old'}) );
-t.update( {'array.123a.name': 'old'}, {$set: {'array.0.123a.name': 'new'}} );
-assert( t.findOne({'array.123a.name': 'new'}) );
-assert( !t.findOne({'array.123a.name': 'old'}) );
+t.ensureIndex({'array.123a.name': 1});
+t.insert({'array': [{'123a': {'name': 'old'}}]});
+assert(t.findOne({'array.123a.name': 'old'}));
+t.update({'array.123a.name': 'old'}, {$set: {'array.0.123a.name': 'new'}});
+assert(t.findOne({'array.123a.name': 'new'}));
+assert(!t.findOne({'array.123a.name': 'old'}));
// array.0.123a
t = db.jstests_update_arraymatch8;
t.drop();
-t.ensureIndex( {'array.name': 1} );
-t.insert( {'array': [{'123a':{'name': 'old'}}]} );
-assert( t.findOne({'array.123a.name': 'old'}) );
-t.update( {'array.123a.name': 'old'}, {$set: {'array.0.123a': {'name': 'new'}}} );
-assert( t.findOne({'array.123a.name': 'new'}) );
-assert( !t.findOne({'array.123a.name': 'old'}) );
+t.ensureIndex({'array.name': 1});
+t.insert({'array': [{'123a': {'name': 'old'}}]});
+assert(t.findOne({'array.123a.name': 'old'}));
+t.update({'array.123a.name': 'old'}, {$set: {'array.0.123a': {'name': 'new'}}});
+assert(t.findOne({'array.123a.name': 'new'}));
+assert(!t.findOne({'array.123a.name': 'old'}));
// a.0.b
t = db.jstests_update_arraymatch8;
t.drop();
-t.ensureIndex( {'a.0.b': 1} );
-t.insert( {'a': [ [ { b:'old' } ] ] } );
-assert( t.findOne({'a.0.0.b': 'old'}) );
-assert( t.findOne({'a.0.b': 'old'}) );
-t.update( {}, {$set: {'a.0.0.b': 'new'}} );
-assert( t.findOne({'a.0.b': 'new'}) );
-assert( !t.findOne({'a.0.b': 'old'}) );
+t.ensureIndex({'a.0.b': 1});
+t.insert({'a': [[{b: 'old'}]]});
+assert(t.findOne({'a.0.0.b': 'old'}));
+assert(t.findOne({'a.0.b': 'old'}));
+t.update({}, {$set: {'a.0.0.b': 'new'}});
+assert(t.findOne({'a.0.b': 'new'}));
+assert(!t.findOne({'a.0.b': 'old'}));
// a.0.b.c
t = db.jstests_update_arraymatch8;
t.drop();
-t.ensureIndex( {'a.0.b.c': 1} );
-t.insert( {'a': [ { b:[ { c:'old' } ] } ] } );
-assert( t.findOne({'a.0.b.0.c': 'old'}) );
-assert( t.findOne({'a.b.0.c': 'old'}) );
-assert( t.findOne({'a.0.b.c': 'old'}) );
-assert( t.findOne({'a.b.c': 'old'}) );
-t.update( {}, {$set: {'a.0.b.0.c': 'new'}} );
-assert( t.findOne({'a.0.b.c': 'new'}) );
-assert( !t.findOne({'a.0.b.c': 'old'}) );
+t.ensureIndex({'a.0.b.c': 1});
+t.insert({'a': [{b: [{c: 'old'}]}]});
+assert(t.findOne({'a.0.b.0.c': 'old'}));
+assert(t.findOne({'a.b.0.c': 'old'}));
+assert(t.findOne({'a.0.b.c': 'old'}));
+assert(t.findOne({'a.b.c': 'old'}));
+t.update({}, {$set: {'a.0.b.0.c': 'new'}});
+assert(t.findOne({'a.0.b.c': 'new'}));
+assert(!t.findOne({'a.0.b.c': 'old'}));
// a.b.$ref
t = db.jstests_update_arraymatch8;
t.drop();
-t.ensureIndex( {'a.b.$ref': 1} );
-t.insert( {'a': [ { 'b':{ '$ref':'old', '$id':0 } } ] } );
-assert( t.findOne({'a.b.$ref': 'old'}) );
-assert( t.findOne({'a.0.b.$ref': 'old'}) );
-t.update( {}, {$set: {'a.0.b.$ref': 'new'}} );
-assert( t.findOne({'a.b.$ref': 'new'}) );
-assert( !t.findOne({'a.b.$ref': 'old'}) );
+t.ensureIndex({'a.b.$ref': 1});
+t.insert({'a': [{'b': {'$ref': 'old', '$id': 0}}]});
+assert(t.findOne({'a.b.$ref': 'old'}));
+assert(t.findOne({'a.0.b.$ref': 'old'}));
+t.update({}, {$set: {'a.0.b.$ref': 'new'}});
+assert(t.findOne({'a.b.$ref': 'new'}));
+assert(!t.findOne({'a.b.$ref': 'old'}));
// a.b and a-b
t = db.jstests_update_arraymatch8;
t.drop();
-t.ensureIndex( {'a.b': 1} );
-t.ensureIndex( {'a-b': 1} );
-t.insert( {'a':{'b':'old'}} );
-assert( t.findOne({'a.b': 'old'}) );
-t.update( {}, {$set: {'a': {'b': 'new'}}} );
-assert( t.findOne({'a.b': 'new'}) );
-assert( !t.findOne({'a.b': 'old'}) );
+t.ensureIndex({'a.b': 1});
+t.ensureIndex({'a-b': 1});
+t.insert({'a': {'b': 'old'}});
+assert(t.findOne({'a.b': 'old'}));
+t.update({}, {$set: {'a': {'b': 'new'}}});
+assert(t.findOne({'a.b': 'new'}));
+assert(!t.findOne({'a.b': 'old'}));
diff --git a/jstests/core/update_bit_examples.js b/jstests/core/update_bit_examples.js
index 3374b502055..adcf6976572 100644
--- a/jstests/core/update_bit_examples.js
+++ b/jstests/core/update_bit_examples.js
@@ -5,21 +5,21 @@ coll.drop();
// $bit and
coll.remove({});
-coll.save({_id:1, a:NumberInt(2)});
+coll.save({_id: 1, a: NumberInt(2)});
res = coll.update({}, {$bit: {a: {and: NumberInt(4)}}});
assert.writeOK(res);
assert.eq(coll.findOne().a, 0);
// $bit or
coll.remove({});
-coll.save({_id:1, a:NumberInt(2)});
+coll.save({_id: 1, a: NumberInt(2)});
res = coll.update({}, {$bit: {a: {or: NumberInt(4)}}});
assert.writeOK(res);
assert.eq(coll.findOne().a, 6);
// $bit xor
coll.remove({});
-coll.save({_id:1, a:NumberInt(0)});
+coll.save({_id: 1, a: NumberInt(0)});
res = coll.update({}, {$bit: {a: {xor: NumberInt(4)}}});
assert.writeOK(res);
assert.eq(coll.findOne().a, 4);
diff --git a/jstests/core/update_blank1.js b/jstests/core/update_blank1.js
index 8fe58419ddc..0777c68c745 100644
--- a/jstests/core/update_blank1.js
+++ b/jstests/core/update_blank1.js
@@ -2,9 +2,12 @@
t = db.update_blank1;
t.drop();
-orig = { "" : 1 , _id : 2 , "a" : 3 , "b" : 4 };
-t.insert( orig );
-var res = t.update( {} , { $set : { "c" : 5 } } );
-print( res );
+orig = {
+ "": 1,
+ _id: 2, "a": 3, "b": 4
+};
+t.insert(orig);
+var res = t.update({}, {$set: {"c": 5}});
+print(res);
orig["c"] = 5;
-assert.docEq( orig , t.findOne() , "after $set" ); // SERVER-2651
+assert.docEq(orig, t.findOne(), "after $set"); // SERVER-2651
diff --git a/jstests/core/update_currentdate_examples.js b/jstests/core/update_currentdate_examples.js
index 3430c261481..466ce96e5cc 100644
--- a/jstests/core/update_currentdate_examples.js
+++ b/jstests/core/update_currentdate_examples.js
@@ -5,21 +5,21 @@ coll.drop();
// $currentDate default
coll.remove({});
-coll.save({_id:1, a:2});
+coll.save({_id: 1, a: 2});
res = coll.update({}, {$currentDate: {a: true}});
assert.writeOK(res);
assert(coll.findOne().a.constructor == Date);
// $currentDate type = date
coll.remove({});
-coll.save({_id:1, a:2});
+coll.save({_id: 1, a: 2});
res = coll.update({}, {$currentDate: {a: {$type: "date"}}});
assert.writeOK(res);
assert(coll.findOne().a.constructor == Date);
// $currentDate type = timestamp
coll.remove({});
-coll.save({_id:1, a:2});
+coll.save({_id: 1, a: 2});
res = coll.update({}, {$currentDate: {a: {$type: "timestamp"}}});
assert.writeOK(res);
assert(coll.findOne().a.constructor == Timestamp);
diff --git a/jstests/core/update_dbref.js b/jstests/core/update_dbref.js
index d4c9ed7354f..71729c203e5 100644
--- a/jstests/core/update_dbref.js
+++ b/jstests/core/update_dbref.js
@@ -4,37 +4,39 @@ var res;
t = db.jstests_update_dbref;
t.drop();
-res = t.save({_id:1, a: new DBRef("a", "b")});
+res = t.save({_id: 1, a: new DBRef("a", "b")});
assert(!res.hasWriteError(), "failed to save dbref");
-assert.docEq({_id:1, a: new DBRef("a", "b")}, t.findOne());
+assert.docEq({_id: 1, a: new DBRef("a", "b")}, t.findOne());
res = t.update({}, {$set: {"a.$id": 2}});
assert(!res.hasWriteError(), "a.$id update");
-assert.docEq({_id:1, a: new DBRef("a", 2)}, t.findOne());
+assert.docEq({_id: 1, a: new DBRef("a", 2)}, t.findOne());
res = t.update({}, {$set: {"a.$ref": "b"}});
assert(!res.hasWriteError(), "a.$ref update");
-assert.docEq({_id:1, a: new DBRef("b", 2)}, t.findOne());
+assert.docEq({_id: 1, a: new DBRef("b", 2)}, t.findOne());
// Bad updates
res = t.update({}, {$set: {"$id": 3}});
assert.writeError(res);
assert(/\$id/.test(res.getWriteError()), "expected bad update because of $id");
-assert.docEq({_id:1, a: new DBRef("b", 2)}, t.findOne());
+assert.docEq({_id: 1, a: new DBRef("b", 2)}, t.findOne());
res = t.update({}, {$set: {"$ref": "foo"}});
assert.writeError(res);
assert(/\$ref/.test(res.getWriteError()), "expected bad update because of $ref");
-assert.docEq({_id:1, a: new DBRef("b", 2)}, t.findOne());
+assert.docEq({_id: 1, a: new DBRef("b", 2)}, t.findOne());
res = t.update({}, {$set: {"$db": "aDB"}});
assert.writeError(res);
assert(/\$db/.test(res.getWriteError()), "expected bad update because of $db");
-assert.docEq({_id:1, a: new DBRef("b", 2)}, t.findOne());
+assert.docEq({_id: 1, a: new DBRef("b", 2)}, t.findOne());
res = t.update({}, {$set: {"b.$id": 2}});
-assert(res.hasWriteError(), "b.$id update should fail -- doc:" + tojson(t.findOne()) + " result:" + res.toString());
+assert(res.hasWriteError(),
+ "b.$id update should fail -- doc:" + tojson(t.findOne()) + " result:" + res.toString());
res = t.update({}, {$set: {"b.$ref": 2}});
-assert(res.hasWriteError(), "b.$ref update should fail -- doc:" + tojson(t.findOne()) + " result:" + res.toString());
+assert(res.hasWriteError(),
+ "b.$ref update should fail -- doc:" + tojson(t.findOne()) + " result:" + res.toString());
diff --git a/jstests/core/update_find_and_modify_id.js b/jstests/core/update_find_and_modify_id.js
index 12720be9d84..a75a5595451 100644
--- a/jstests/core/update_find_and_modify_id.js
+++ b/jstests/core/update_find_and_modify_id.js
@@ -2,7 +2,10 @@
// an _id in the update document, as long as the _id will not be modified
var t = db.jstests_server4516;
-var startingDoc = {_id: 1, a: 1};
+var startingDoc = {
+ _id: 1,
+ a: 1
+};
function prepare() {
t.drop();
@@ -32,7 +35,7 @@ function update_fails(updateDoc, qid) {
assert.eq(t.findOne(), startingDoc);
prepare();
- assert.throws(function () {
+ assert.throws(function() {
t.findAndModify({query: {_id: qid}, update: updateDoc, upsert: true});
});
assert.eq(t.count(), 1);
diff --git a/jstests/core/update_invalid1.js b/jstests/core/update_invalid1.js
index 46b68f7db63..3fd96d61f38 100644
--- a/jstests/core/update_invalid1.js
+++ b/jstests/core/update_invalid1.js
@@ -2,5 +2,5 @@
t = db.update_invalid1;
t.drop();
-t.update( { _id : 5 } , { $set : { $inc : { x : 5 } } } , true );
-assert.eq( 0 , t.count() , "A1" );
+t.update({_id: 5}, {$set: {$inc: {x: 5}}}, true);
+assert.eq(0, t.count(), "A1");
diff --git a/jstests/core/update_min_max_examples.js b/jstests/core/update_min_max_examples.js
index 7acfd5bb45b..a8a86f22986 100644
--- a/jstests/core/update_min_max_examples.js
+++ b/jstests/core/update_min_max_examples.js
@@ -4,59 +4,65 @@ var coll = db.update_min_max;
coll.drop();
// $min for number
-coll.insert({_id:1, a:2});
-res = coll.update({_id:1}, {$min: {a: 1}});
+coll.insert({_id: 1, a: 2});
+res = coll.update({_id: 1}, {$min: {a: 1}});
assert.writeOK(res);
-assert.eq(coll.findOne({_id:1}).a, 1);
+assert.eq(coll.findOne({_id: 1}).a, 1);
// $max for number
-coll.insert({_id:2, a:2});
-res = coll.update({_id:2}, {$max: {a: 1}});
+coll.insert({_id: 2, a: 2});
+res = coll.update({_id: 2}, {$max: {a: 1}});
assert.writeOK(res);
-assert.eq(coll.findOne({_id:2}).a, 2);
+assert.eq(coll.findOne({_id: 2}).a, 2);
// $min for Date
-coll.insert({_id:3, a: new Date()});
-var origDoc = coll.findOne({_id:3});
+coll.insert({_id: 3, a: new Date()});
+var origDoc = coll.findOne({_id: 3});
sleep(2);
-res = coll.update({_id:3}, {$min: {a: new Date()}});
+res = coll.update({_id: 3}, {$min: {a: new Date()}});
assert.writeOK(res);
-assert.eq(coll.findOne({_id:3}).a, origDoc.a);
+assert.eq(coll.findOne({_id: 3}).a, origDoc.a);
// $max for Date
-coll.insert({_id:4, a: new Date()});
+coll.insert({_id: 4, a: new Date()});
sleep(2);
var newDate = new Date();
-res = coll.update({_id:4}, {$max: {a: newDate}});
+res = coll.update({_id: 4}, {$max: {a: newDate}});
assert.writeOK(res);
-assert.eq(coll.findOne({_id:4}).a, newDate);
+assert.eq(coll.findOne({_id: 4}).a, newDate);
// $max for small number
-coll.insert({_id:5, a: 1e-15 });
+coll.insert({_id: 5, a: 1e-15});
// Slightly bigger than 1e-15.
var biggerval = 0.000000000000001000000000000001;
-res = coll.update({_id:5}, {$max: {a : biggerval}});
+res = coll.update({_id: 5}, {$max: {a: biggerval}});
assert.writeOK(res);
-assert.eq(coll.findOne({_id:5}).a, biggerval);
+assert.eq(coll.findOne({_id: 5}).a, biggerval);
// $min for a small number
-coll.insert({_id:6, a: biggerval });
-res = coll.update({_id:6}, {$min: {a : 1e-15}});
+coll.insert({_id: 6, a: biggerval});
+res = coll.update({_id: 6}, {$min: {a: 1e-15}});
assert.writeOK(res);
-assert.eq(coll.findOne({_id:6}).a, 1e-15);
+assert.eq(coll.findOne({_id: 6}).a, 1e-15);
// $max with positional operator
-var insertdoc = {_id:7, y: [{a:2}, {a:6}, {a:[9,1,1]}]};
+var insertdoc = {
+ _id: 7,
+ y: [{a: 2}, {a: 6}, {a: [9, 1, 1]}]
+};
coll.insert(insertdoc);
-res = coll.update({_id: 7, "y.a": 6 }, {$max: {"y.$.a" : 7 }});
+res = coll.update({_id: 7, "y.a": 6}, {$max: {"y.$.a": 7}});
assert.writeOK(res);
insertdoc.y[1].a = 7;
-assert.docEq(coll.findOne({_id:7}), insertdoc);
+assert.docEq(coll.findOne({_id: 7}), insertdoc);
// $min with positional operator
-insertdoc = {_id:8, y: [{a:2}, {a:6}, {a:[9,1,1]}]};
+insertdoc = {
+ _id: 8,
+ y: [{a: 2}, {a: 6}, {a: [9, 1, 1]}]
+};
coll.insert(insertdoc);
-res = coll.update({_id: 8, "y.a": 6 }, {$min: {"y.$.a" : 5 }});
+res = coll.update({_id: 8, "y.a": 6}, {$min: {"y.$.a": 5}});
assert.writeOK(res);
insertdoc.y[1].a = 5;
-assert.docEq(coll.findOne({_id:8}), insertdoc);
+assert.docEq(coll.findOne({_id: 8}), insertdoc);
diff --git a/jstests/core/update_mul_examples.js b/jstests/core/update_mul_examples.js
index 36738b93990..390ae57efd8 100644
--- a/jstests/core/update_mul_examples.js
+++ b/jstests/core/update_mul_examples.js
@@ -5,35 +5,35 @@ coll.drop();
// $mul positive
coll.remove({});
-coll.save({_id:1, a:2});
+coll.save({_id: 1, a: 2});
res = coll.update({}, {$mul: {a: 10}});
assert.writeOK(res);
assert.eq(coll.findOne().a, 20);
// $mul negative
coll.remove({});
-coll.save({_id:1, a:2});
+coll.save({_id: 1, a: 2});
res = coll.update({}, {$mul: {a: -10}});
assert.writeOK(res);
assert.eq(coll.findOne().a, -20);
// $mul zero
coll.remove({});
-coll.save({_id:1, a:2});
+coll.save({_id: 1, a: 2});
res = coll.update({}, {$mul: {a: 0}});
assert.writeOK(res);
assert.eq(coll.findOne().a, 0);
// $mul decimal
coll.remove({});
-coll.save({_id:1, a:2});
+coll.save({_id: 1, a: 2});
res = coll.update({}, {$mul: {a: 1.1}});
assert.writeOK(res);
assert.eq(coll.findOne().a, 2.2);
// $mul negative decimal
coll.remove({});
-coll.save({_id:1, a:2});
+coll.save({_id: 1, a: 2});
res = coll.update({}, {$mul: {a: -0.1}});
assert.writeOK(res);
assert.eq(coll.findOne().a, -0.2);
diff --git a/jstests/core/update_multi3.js b/jstests/core/update_multi3.js
index 38341dcd13f..10e639803b5 100644
--- a/jstests/core/update_multi3.js
+++ b/jstests/core/update_multi3.js
@@ -1,25 +1,22 @@
t = db.update_multi3;
-function test( useIndex ){
+function test(useIndex) {
t.drop();
-
- if ( useIndex )
- t.ensureIndex({k:1});
- for (i=0; i<10; i++) {
- t.save({ _id : i , k: 'x', a: []});
+ if (useIndex)
+ t.ensureIndex({k: 1});
+
+ for (i = 0; i < 10; i++) {
+ t.save({_id: i, k: 'x', a: []});
}
-
- t.update({k: 'x'}, {$push: {a: 'y'}}, false, true);
- t.find( { k : "x" } ).forEach(
- function(z){
- assert.eq( [ "y" ] , z.a , "useIndex: " + useIndex );
- }
- );
+ t.update({k: 'x'}, {$push: {a: 'y'}}, false, true);
+ t.find({k: "x"}).forEach(function(z) {
+ assert.eq(["y"], z.a, "useIndex: " + useIndex);
+ });
}
-test( false );
-test( true );
+test(false);
+test(true);
diff --git a/jstests/core/update_multi4.js b/jstests/core/update_multi4.js
index f290894298c..3b5c2f04b29 100644
--- a/jstests/core/update_multi4.js
+++ b/jstests/core/update_multi4.js
@@ -2,17 +2,15 @@
t = db.update_mulit4;
t.drop();
-for(i=0;i<1000;i++){
- t.insert( { _id:i ,
- k:i%12,
- v:"v"+i%12 } );
-}
+for (i = 0; i < 1000; i++) {
+ t.insert({_id: i, k: i % 12, v: "v" + i % 12});
+}
-t.ensureIndex({k:1});
+t.ensureIndex({k: 1});
-assert.eq( 84 , t.count({k:2,v:"v2"} ) , "A0" );
+assert.eq(84, t.count({k: 2, v: "v2"}), "A0");
-t.update({k:2},{$set:{v:"two v2"}},false,true);
+t.update({k: 2}, {$set: {v: "two v2"}}, false, true);
-assert.eq( 0 , t.count({k:2,v:"v2"} ) , "A1" );
-assert.eq( 84 , t.count({k:2,v:"two v2"} ) , "A2" );
+assert.eq(0, t.count({k: 2, v: "v2"}), "A1");
+assert.eq(84, t.count({k: 2, v: "two v2"}), "A2");
diff --git a/jstests/core/update_multi5.js b/jstests/core/update_multi5.js
index c6d51ef0196..e29ad562d8c 100644
--- a/jstests/core/update_multi5.js
+++ b/jstests/core/update_multi5.js
@@ -3,15 +3,11 @@ t = db.update_multi5;
t.drop();
-t.insert({path: 'r1', subscribers: [1,2]});
-t.insert({path: 'r2', subscribers: [3,4]});
+t.insert({path: 'r1', subscribers: [1, 2]});
+t.insert({path: 'r2', subscribers: [3, 4]});
t.update({}, {$addToSet: {subscribers: 5}}, false, true);
-t.find().forEach(
- function(z){
- assert.eq( 3 , z.subscribers.length , z );
- }
-);
-
-
+t.find().forEach(function(z) {
+ assert.eq(3, z.subscribers.length, z);
+});
diff --git a/jstests/core/update_multi6.js b/jstests/core/update_multi6.js
index 39434d3f512..57e8112031c 100644
--- a/jstests/core/update_multi6.js
+++ b/jstests/core/update_multi6.js
@@ -3,9 +3,8 @@ var res;
t = db.update_multi6;
t.drop();
-t.update( { _id : 1 } , { _id : 1 , x : 1 , y : 2 } , true , false );
-assert( t.findOne( { _id : 1 } ) , "A" );
-
-res = t.update( { _id : 2 } , { _id : 2 , x : 1 , y : 2 } , true , true );
-assert.writeError( res );
+t.update({_id: 1}, {_id: 1, x: 1, y: 2}, true, false);
+assert(t.findOne({_id: 1}), "A");
+res = t.update({_id: 2}, {_id: 2, x: 1, y: 2}, true, true);
+assert.writeError(res);
diff --git a/jstests/core/update_replace.js b/jstests/core/update_replace.js
index 54f13f7ded2..44099851ef4 100644
--- a/jstests/core/update_replace.js
+++ b/jstests/core/update_replace.js
@@ -14,19 +14,19 @@ var res;
conn._skipValidation = true;
// Should not allow "." in field names
-res = t.save({_id:1, "a.a":1});
+res = t.save({_id: 1, "a.a": 1});
assert(res.hasWriteError(), "a.a");
// Should not allow "." in field names, embedded
-res = t.save({_id:1, a :{"a.a":1}});
+res = t.save({_id: 1, a: {"a.a": 1}});
assert(res.hasWriteError(), "a: a.a");
// Should not allow "$"-prefixed field names, caught before "." check
-res = t.save({_id:1, $a :{"a.a":1}});
+res = t.save({_id: 1, $a: {"a.a": 1}});
assert(res.hasWriteError(), "$a: a.a");
// Should not allow "$"-prefixed field names
-res = t.save({_id:1, $a: 1});
+res = t.save({_id: 1, $a: 1});
assert(res.hasWriteError(), "$a");
// _id validation checks
@@ -36,7 +36,7 @@ res = t.save({_id: /a/});
assert(res.hasWriteError(), "_id regex");
// Should not allow regex _id, even if not first
-res = t.save({a:2, _id: /a/});
+res = t.save({a: 2, _id: /a/});
assert(res.hasWriteError(), "a _id regex");
// Should not allow array _id
@@ -44,9 +44,9 @@ res = t.save({_id: [9]});
assert(res.hasWriteError(), "_id array");
// This is fine since _id isn't a top level field
-res = t.save({a :{ _id: [9]}});
+res = t.save({a: {_id: [9]}});
assert(!res.hasWriteError(), "embedded _id array");
// This is fine since _id isn't a top level field
-res = t.save({b:1, a :{ _id: [9]}});
+res = t.save({b: 1, a: {_id: [9]}});
assert(!res.hasWriteError(), "b embedded _id array");
diff --git a/jstests/core/update_server-12848.js b/jstests/core/update_server-12848.js
index f5ee9a8f2fa..c33e8dd9f62 100644
--- a/jstests/core/update_server-12848.js
+++ b/jstests/core/update_server-12848.js
@@ -8,13 +8,19 @@ var res;
var t = db.update_server_12848;
t.drop();
-var orig = { "_id" : 1, "a" : [ 1, [ ] ] };
+var orig = {
+ "_id": 1,
+ "a": [1, []]
+};
res = t.insert(orig);
assert.writeOK(res, "insert");
assert.eq(orig, t.findOne());
-res = t.update({ "_id" : 1 }, { $addToSet : { "a.1" : 1 } });
+res = t.update({"_id": 1}, {$addToSet: {"a.1": 1}});
assert.writeOK(res, "update");
-var updated = { "_id" : 1, "a" : [ 1, [ 1 ] ] };
+var updated = {
+ "_id": 1,
+ "a": [1, [1]]
+};
assert.eq(updated, t.findOne());
diff --git a/jstests/core/update_setOnInsert.js b/jstests/core/update_setOnInsert.js
index 4f53d0311b5..430f23ce6dd 100644
--- a/jstests/core/update_setOnInsert.js
+++ b/jstests/core/update_setOnInsert.js
@@ -2,32 +2,32 @@
t = db.update_setOnInsert;
var res;
-function dotest( useIndex ) {
+function dotest(useIndex) {
t.drop();
- if ( useIndex ) {
- t.ensureIndex( { a : 1 } );
+ if (useIndex) {
+ t.ensureIndex({a: 1});
}
- t.update( { _id: 5 }, { $inc : { x: 2 }, $setOnInsert : { a : 3 } }, true );
- assert.docEq( { _id : 5, a: 3, x : 2 }, t.findOne() );
+ t.update({_id: 5}, {$inc: {x: 2}, $setOnInsert: {a: 3}}, true);
+ assert.docEq({_id: 5, a: 3, x: 2}, t.findOne());
- t.update( { _id: 5 }, { $set : { a : 4 } }, true );
+ t.update({_id: 5}, {$set: {a: 4}}, true);
- t.update( { _id: 5 }, { $inc : { x: 2 }, $setOnInsert : { a : 3 } }, true );
- assert.docEq( { _id : 5, a: 4, x : 4 }, t.findOne() );
+ t.update({_id: 5}, {$inc: {x: 2}, $setOnInsert: {a: 3}}, true);
+ assert.docEq({_id: 5, a: 4, x: 4}, t.findOne());
}
-dotest( false );
-dotest( true );
+dotest(false);
+dotest(true);
// Cases for SERVER-9958 -- Allow _id $setOnInsert during insert (if upsert:true, and not doc found)
t.drop();
-res = t.update( {_id: 1} , { $setOnInsert: { "_id.a": new Date() } } , true );
-assert.writeError(res, "$setOnInsert _id.a worked" );
+res = t.update({_id: 1}, {$setOnInsert: {"_id.a": new Date()}}, true);
+assert.writeError(res, "$setOnInsert _id.a worked");
-res = t.update( {"_id.a": 4} , { $setOnInsert: { "_id.b": 1 } } , true );
-assert.writeError(res, "$setOnInsert _id.a/b worked" );
+res = t.update({"_id.a": 4}, {$setOnInsert: {"_id.b": 1}}, true);
+assert.writeError(res, "$setOnInsert _id.a/b worked");
-res = t.update( {"_id.a": 4} , { $setOnInsert: { "_id": {a:4, b:1} } } , true );
-assert.writeError(res, "$setOnInsert _id.a/a+b worked" );
+res = t.update({"_id.a": 4}, {$setOnInsert: {"_id": {a: 4, b: 1}}}, true);
+assert.writeError(res, "$setOnInsert _id.a/a+b worked");
diff --git a/jstests/core/updatea.js b/jstests/core/updatea.js
index 13d2dd0acfc..0c7a9e1c504 100644
--- a/jstests/core/updatea.js
+++ b/jstests/core/updatea.js
@@ -3,66 +3,71 @@ var res;
t = db.updatea;
t.drop();
-orig = { _id : 1 , a : [ { x : 1 , y : 2 } , { x : 10 , y : 11 } ] };
+orig = {
+ _id: 1,
+ a: [{x: 1, y: 2}, {x: 10, y: 11}]
+};
-res = t.save( orig );
+res = t.save(orig);
assert.writeOK(res);
// SERVER-181
-res = t.update( {} , { $set : { "a.0.x" : 3 } } );
+res = t.update({}, {$set: {"a.0.x": 3}});
assert.writeOK(res);
orig.a[0].x = 3;
-assert.eq( orig , t.findOne() , "A1" );
+assert.eq(orig, t.findOne(), "A1");
-res = t.update( {} , { $set : { "a.1.z" : 17 } } );
+res = t.update({}, {$set: {"a.1.z": 17}});
assert.writeOK(res);
orig.a[1].z = 17;
-assert.eq( orig , t.findOne() , "A2" );
+assert.eq(orig, t.findOne(), "A2");
// SERVER-273
-res = t.update( {} , { $unset : { "a.1.y" : 1 } } );
+res = t.update({}, {$unset: {"a.1.y": 1}});
assert.writeOK(res);
delete orig.a[1].y;
-assert.eq( orig , t.findOne() , "A3" );
+assert.eq(orig, t.findOne(), "A3");
// SERVER-333
t.drop();
-orig = { _id : 1 , comments : [ { name : "blah" , rate_up : 0 , rate_ups : [] } ] };
-res = t.save( orig );
+orig = {
+ _id: 1,
+ comments: [{name: "blah", rate_up: 0, rate_ups: []}]
+};
+res = t.save(orig);
assert.writeOK(res);
-
-res = t.update( {} , { $inc: { "comments.0.rate_up" : 1 } , $push: { "comments.0.rate_ups" : 99 } } );
+res = t.update({}, {$inc: {"comments.0.rate_up": 1}, $push: {"comments.0.rate_ups": 99}});
assert.writeOK(res);
orig.comments[0].rate_up++;
-orig.comments[0].rate_ups.push( 99 );
-assert.eq( orig , t.findOne() , "B1" );
+orig.comments[0].rate_ups.push(99);
+assert.eq(orig, t.findOne(), "B1");
t.drop();
-orig = { _id : 1 , a : [] };
-for ( i=0; i<12; i++ )
- orig.a.push( i );
-
-
-res = t.save( orig );
+orig = {
+ _id: 1,
+ a: []
+};
+for (i = 0; i < 12; i++)
+ orig.a.push(i);
+
+res = t.save(orig);
assert.writeOK(res);
-assert.eq( orig , t.findOne() , "C1" );
+assert.eq(orig, t.findOne(), "C1");
-res = t.update( {} , { $inc: { "a.0" : 1 } } );
+res = t.update({}, {$inc: {"a.0": 1}});
assert.writeOK(res);
orig.a[0]++;
-assert.eq( orig , t.findOne() , "C2" );
+assert.eq(orig, t.findOne(), "C2");
-res = t.update( {} , { $inc: { "a.10" : 1 } } );
+res = t.update({}, {$inc: {"a.10": 1}});
assert.writeOK(res);
orig.a[10]++;
-
// SERVER-3218
t.drop();
-t.insert({"a":{"c00":1}, 'c':2});
-res = t.update({"c":2}, {'$inc':{'a.c000':1}});
+t.insert({"a": {"c00": 1}, 'c': 2});
+res = t.update({"c": 2}, {'$inc': {'a.c000': 1}});
assert.writeOK(res);
-assert.eq( { "c00" : 1 , "c000" : 1 } , t.findOne().a , "D1" );
-
+assert.eq({"c00": 1, "c000": 1}, t.findOne().a, "D1");
diff --git a/jstests/core/updateb.js b/jstests/core/updateb.js
index 59e6348a47a..f90ac62b6c3 100644
--- a/jstests/core/updateb.js
+++ b/jstests/core/updateb.js
@@ -2,10 +2,12 @@
t = db.updateb;
t.drop();
-t.update( { "x.y" : 2 } , { $inc : { a : 7 } } , true );
+t.update({"x.y": 2}, {$inc: {a: 7}}, true);
-correct = { a : 7 , x : { y : 2 } };
+correct = {
+ a: 7,
+ x: {y: 2}
+};
got = t.findOne();
delete got._id;
-assert.docEq( correct , got , "A" );
-
+assert.docEq(correct, got, "A");
diff --git a/jstests/core/updatec.js b/jstests/core/updatec.js
index 0c77b8b3cda..8ce8cf4ecdd 100644
--- a/jstests/core/updatec.js
+++ b/jstests/core/updatec.js
@@ -2,13 +2,7 @@
t = db.updatec;
t.drop();
-t.update( { "_id" : 123 }, { $set : { "v" : { "i" : 123, "a":456 } }, $push : { "f" : 234} }, 1, 0 );
-t.update( { "_id" : 123 }, { $set : { "v" : { "i" : 123, "a":456 } }, $push : { "f" : 234} }, 1, 0 );
-
-assert.docEq(
- {
- "_id" : 123,
- "f" : [ 234, 234 ] ,
- "v" : { "i" : 123, "a" : 456 }
- } , t.findOne() );
+t.update({"_id": 123}, {$set: {"v": {"i": 123, "a": 456}}, $push: {"f": 234}}, 1, 0);
+t.update({"_id": 123}, {$set: {"v": {"i": 123, "a": 456}}, $push: {"f": 234}}, 1, 0);
+assert.docEq({"_id": 123, "f": [234, 234], "v": {"i": 123, "a": 456}}, t.findOne());
diff --git a/jstests/core/updated.js b/jstests/core/updated.js
index da314268eb0..1eaaae3b051 100644
--- a/jstests/core/updated.js
+++ b/jstests/core/updated.js
@@ -2,19 +2,25 @@
t = db.updated;
t.drop();
-o = { _id : Math.random() ,
- items:[null,null,null,null]
- };
+o = {
+ _id: Math.random(),
+ items: [null, null, null, null]
+};
-t.insert( o );
-assert.docEq( o , t.findOne() , "A1" );
+t.insert(o);
+assert.docEq(o, t.findOne(), "A1");
-o.items[0] = {amount:9000,itemId:1};
-t.update({},{$set:{"items.0":o.items[0]}});
-assert.docEq( o , t.findOne() , "A2" );
+o.items[0] = {
+ amount: 9000,
+ itemId: 1
+};
+t.update({}, {$set: {"items.0": o.items[0]}});
+assert.docEq(o, t.findOne(), "A2");
o.items[0].amount += 1000;
-o.items[1] = {amount:1,itemId:2};
-t.update({},{$inc:{"items.0.amount":1000},$set:{"items.1":o.items[1]}});
-assert.docEq( o , t.findOne() , "A3" );
-
+o.items[1] = {
+ amount: 1,
+ itemId: 2
+};
+t.update({}, {$inc: {"items.0.amount": 1000}, $set: {"items.1": o.items[1]}});
+assert.docEq(o, t.findOne(), "A3");
diff --git a/jstests/core/updatee.js b/jstests/core/updatee.js
index 85ba37c5c05..e2fac8af287 100644
--- a/jstests/core/updatee.js
+++ b/jstests/core/updatee.js
@@ -3,21 +3,22 @@
t = db.updatee;
t.drop();
-var o = { "_id" : 1,
- "actual" : {
- "key1" : "val1",
- "key2" : "val2",
- "001" : "val3",
- "002" : "val4",
- "0020000000000000000000" : "val5"
- },
- "profile-id" : "test" };
-
-
-t.insert( o );
-assert.eq( o , t.findOne() , "A1" );
-
-t.update({"profile-id" : "test"}, {$set: {"actual.0030000000000000000000": "val6"}});
+var o = {
+ "_id": 1,
+ "actual": {
+ "key1": "val1",
+ "key2": "val2",
+ "001": "val3",
+ "002": "val4",
+ "0020000000000000000000": "val5"
+ },
+ "profile-id": "test"
+};
+
+t.insert(o);
+assert.eq(o, t.findOne(), "A1");
+
+t.update({"profile-id": "test"}, {$set: {"actual.0030000000000000000000": "val6"}});
var q = t.findOne();
@@ -25,47 +26,47 @@ var q = t.findOne();
assert.eq(q.actual["0020000000000000000000"], "val5", "A2");
assert.eq(q.actual["0030000000000000000000"], "val6", "A3");
-t.update({"profile-id" : "test"}, {$set: {"actual.02": "v4"}});
+t.update({"profile-id": "test"}, {$set: {"actual.02": "v4"}});
q = t.findOne();
assert.eq(q.actual["02"], "v4", "A4");
assert.eq(q.actual["002"], "val4", "A5");
-t.update({"_id" : 1}, {$set : {"actual.2139043290148390248219423941.b" : 4}});
+t.update({"_id": 1}, {$set: {"actual.2139043290148390248219423941.b": 4}});
q = t.findOne();
assert.eq(q.actual["2139043290148390248219423941"].b, 4, "A6");
// non-nested
-t.update({"_id" : 1}, {$set : {"7213647182934612837492342341" : 1}});
-t.update({"_id" : 1}, {$set : {"7213647182934612837492342342" : 2}});
+t.update({"_id": 1}, {$set: {"7213647182934612837492342341": 1}});
+t.update({"_id": 1}, {$set: {"7213647182934612837492342342": 2}});
q = t.findOne();
assert.eq(q["7213647182934612837492342341"], 1, "A7 1");
assert.eq(q["7213647182934612837492342342"], 2, "A7 2");
// 0s
-t.update({"_id" : 1}, {$set : {"actual.000" : "val000"}});
+t.update({"_id": 1}, {$set: {"actual.000": "val000"}});
q = t.findOne();
assert.eq(q.actual["000"], "val000", "A8 zeros");
-t.update({"_id" : 1}, {$set : {"actual.00" : "val00"}});
+t.update({"_id": 1}, {$set: {"actual.00": "val00"}});
q = t.findOne();
assert.eq(q.actual["00"], "val00", "A8 00");
assert.eq(q.actual["000"], "val000", "A9");
-t.update({"_id" : 1}, {$set : {"actual.000" : "val000"}});
+t.update({"_id": 1}, {$set: {"actual.000": "val000"}});
q = t.findOne();
assert.eq(q.actual["000"], "val000", "A9");
assert.eq(q.actual["00"], "val00", "A10");
-t.update({"_id" : 1}, {$set : {"actual.01" : "val01"}});
+t.update({"_id": 1}, {$set: {"actual.01": "val01"}});
q = t.findOne();
assert.eq(q.actual["000"], "val000", "A11");
assert.eq(q.actual["01"], "val01", "A12");
// shouldn't work, but shouldn't do anything too heinous, either
-t.update({"_id" : 1}, {$set : {"0.." : "val01"}});
-t.update({"_id" : 1}, {$set : {"0..0" : "val01"}});
-t.update({"_id" : 1}, {$set : {".0" : "val01"}});
-t.update({"_id" : 1}, {$set : {"..0" : "val01"}});
-t.update({"_id" : 1}, {$set : {"0.0..0" : "val01"}});
+t.update({"_id": 1}, {$set: {"0..": "val01"}});
+t.update({"_id": 1}, {$set: {"0..0": "val01"}});
+t.update({"_id": 1}, {$set: {".0": "val01"}});
+t.update({"_id": 1}, {$set: {"..0": "val01"}});
+t.update({"_id": 1}, {$set: {"0.0..0": "val01"}});
diff --git a/jstests/core/updatef.js b/jstests/core/updatef.js
index e1164f2b939..6bc8df4e0c1 100644
--- a/jstests/core/updatef.js
+++ b/jstests/core/updatef.js
@@ -1,23 +1,24 @@
// Test unsafe management of nsdt on update command yield SERVER-3208
prefixNS = db.jstests_updatef;
-prefixNS.save( {} );
+prefixNS.save({});
t = db.jstests_updatef_actual;
t.drop();
-t.save( {a:0,b:[]} );
-for( i = 0; i < 1000; ++i ) {
- t.save( {a:100} );
+t.save({a: 0, b: []});
+for (i = 0; i < 1000; ++i) {
+ t.save({a: 100});
}
-t.save( {a:0,b:[]} );
+t.save({a: 0, b: []});
// Repeatedly rename jstests_updatef to jstests_updatef_ and back. This will
// invalidate the jstests_updatef_actual NamespaceDetailsTransient object.
-s = startParallelShell( "for( i=0; i < 100; ++i ) { db.jstests_updatef.renameCollection( 'jstests_updatef_' ); db.jstests_updatef_.renameCollection( 'jstests_updatef' ); }" );
+s = startParallelShell(
+ "for( i=0; i < 100; ++i ) { db.jstests_updatef.renameCollection( 'jstests_updatef_' ); db.jstests_updatef_.renameCollection( 'jstests_updatef' ); }");
-for( i=0; i < 20; ++i ) {
- t.update( {a:0}, {$push:{b:i}}, false, true );
+for (i = 0; i < 20; ++i) {
+ t.update({a: 0}, {$push: {b: i}}, false, true);
}
s();
diff --git a/jstests/core/updateg.js b/jstests/core/updateg.js
index f8d452f71b2..908df755376 100644
--- a/jstests/core/updateg.js
+++ b/jstests/core/updateg.js
@@ -3,15 +3,15 @@
t = db.jstests_updateg;
t.drop();
-t.update({}, { '$inc' : { 'all.t' : 1, 'all-copy.t' : 1 }}, true);
-assert.eq( 1, t.count( {all:{t:1},'all-copy':{t:1}} ) );
+t.update({}, {'$inc': {'all.t': 1, 'all-copy.t': 1}}, true);
+assert.eq(1, t.count({all: {t: 1}, 'all-copy': {t: 1}}));
t.drop();
-t.save({ 'all' : {}, 'all-copy' : {}});
-t.update({}, { '$inc' : { 'all.t' : 1, 'all-copy.t' : 1 }});
-assert.eq( 1, t.count( {all:{t:1},'all-copy':{t:1}} ) );
+t.save({'all': {}, 'all-copy': {}});
+t.update({}, {'$inc': {'all.t': 1, 'all-copy.t': 1}});
+assert.eq(1, t.count({all: {t: 1}, 'all-copy': {t: 1}}));
t.drop();
-t.save({ 'all11' : {}, 'all2' : {}});
-t.update({}, { '$inc' : { 'all11.t' : 1, 'all2.t' : 1 }});
-assert.eq( 1, t.count( {all11:{t:1},'all2':{t:1}} ) );
+t.save({'all11': {}, 'all2': {}});
+t.update({}, {'$inc': {'all11.t': 1, 'all2.t': 1}});
+assert.eq(1, t.count({all11: {t: 1}, 'all2': {t: 1}}));
diff --git a/jstests/core/updateh.js b/jstests/core/updateh.js
index c409b49edaf..91c985ac014 100644
--- a/jstests/core/updateh.js
+++ b/jstests/core/updateh.js
@@ -4,82 +4,82 @@ var res;
t = db.jstest_updateh;
t.drop();
-t.insert( {x:1} );
+t.insert({x: 1});
-res = t.update( {x:1}, {$set: {y:1}} ); // ok
-assert.writeOK( res );
+res = t.update({x: 1}, {$set: {y: 1}}); // ok
+assert.writeOK(res);
-res = t.update( {x:1}, {$set: {$z:1}} ); // not ok
-assert.writeError( res );
+res = t.update({x: 1}, {$set: {$z: 1}}); // not ok
+assert.writeError(res);
-res = t.update( {x:1}, {$set: {'a.$b':1}} ); // not ok
-assert.writeError( res );
+res = t.update({x: 1}, {$set: {'a.$b': 1}}); // not ok
+assert.writeError(res);
-res = t.update( {x:1}, {$inc: {$z:1}} ); // not ok
-assert.writeError( res );
+res = t.update({x: 1}, {$inc: {$z: 1}}); // not ok
+assert.writeError(res);
-res = t.update( {x:1}, {$pushAll: {$z:[1,2,3]}} ); // not ok
-assert.writeError( res );
+res = t.update({x: 1}, {$pushAll: {$z: [1, 2, 3]}}); // not ok
+assert.writeError(res);
-//Second section
+// Second section
t.drop();
-t.save( {_id:0, n: 0} );
+t.save({_id: 0, n: 0});
// Test that '$' cannot be the first character in a field.
// SERVER-7150
-res = t.update({ n: 0 }, { $set: { $x: 1 }});
+res = t.update({n: 0}, {$set: {$x: 1}});
assert.writeError(res);
-res = t.update({ n: 0 }, { $set: { $$$: 1 }});
+res = t.update({n: 0}, {$set: {$$$: 1}});
assert.writeError(res);
-res = t.update({ n: 0 }, { $set: { "sneaky.$x": 1 }});
+res = t.update({n: 0}, {$set: {"sneaky.$x": 1}});
assert.writeError(res);
-res = t.update({ n: 0 }, { $set: { "secret.agent$.$x": 1 }});
+res = t.update({n: 0}, {$set: {"secret.agent$.$x": 1}});
assert.writeError(res);
-res = t.update({ n: 0 }, { $set: { "$secret.agent.x": 1 }});
+res = t.update({n: 0}, {$set: {"$secret.agent.x": 1}});
assert.writeError(res);
-res = t.update({ n: 0 }, { $set: { "secret.agent$": 1 }});
+res = t.update({n: 0}, {$set: {"secret.agent$": 1}});
assert.writeOK(res);
-t.save( {_id:0, n: 0} );
+t.save({_id: 0, n: 0});
// Test that you cannot update database references into top level fields
// Enable after SERVER-14252 fixed: currently validation does not catch DBRef
// fields at the top level for update and will not cause an error here
-//res = t.update({ n: 0 }, { $set: {$ref: "1", $id: 1, $db: "1"}});
-//assert.writeError(res);
+// res = t.update({ n: 0 }, { $set: {$ref: "1", $id: 1, $db: "1"}});
+// assert.writeError(res);
-//res = t.update({ n: 0 }, { $set: {$ref: "1", $id: 1}});
-//assert.writeError(res);
+// res = t.update({ n: 0 }, { $set: {$ref: "1", $id: 1}});
+// assert.writeError(res);
// SERVER-11241: Validation used to allow any DBRef field name as a prefix
// thus allowing things like $idXXX
-res = t.update({ n: 0 }, { $set: { $reffoo: 1 }});
+res = t.update({n: 0}, {$set: {$reffoo: 1}});
assert.writeError(res);
-res = t.update({ n: 0 }, { $set: { $idbar: 1 }});
+res = t.update({n: 0}, {$set: {$idbar: 1}});
assert.writeError(res);
-res = t.update({ n: 0 }, { $set: { $dbbaz: 1 }});
+res = t.update({n: 0}, {$set: {$dbbaz: 1}});
assert.writeError(res);
// Test that '$id', '$db', and '$ref' are acceptable field names in
// the correct case ( subdoc)
// SERVER-3231
-res = t.update({ n: 0 }, { $set: { 'x.$ref': '1', 'x.$id': 1, 'x.$db': '1' }});
+res = t.update({n: 0}, {$set: {'x.$ref': '1', 'x.$id': 1, 'x.$db': '1'}});
assert.writeOK(res);
-t.save( {_id:0, n: 0} );
+t.save({_id: 0, n: 0});
// Test that '$' can occur elsewhere in a field name.
// SERVER-7557
-res = t.update({n: 0 }, { $set: { ke$sha: 1 }});
+res = t.update({n: 0}, {$set: {ke$sha: 1}});
assert.writeOK(res);
-t.save( {_id:0, n: 0} );
+t.save({_id: 0, n: 0});
-res = t.update({n: 0 }, { $set: { more$$moreproblem$: 1 }});
+res = t.update({n: 0}, {$set: {more$$moreproblem$: 1}});
assert.writeOK(res);
-t.save( {_id:0, n: 0} );
+t.save({_id: 0, n: 0});
diff --git a/jstests/core/updatei.js b/jstests/core/updatei.js
index e45b3fde5bb..d5bc3500ab0 100644
--- a/jstests/core/updatei.js
+++ b/jstests/core/updatei.js
@@ -6,81 +6,81 @@ t = db.updatei;
t.drop();
-for (i=0; i<10; i++) {
- t.save({ _id : i, k: "x", a: [] });
+for (i = 0; i < 10; i++) {
+ t.save({_id: i, k: "x", a: []});
}
-t.update({ k: "x" }, { $push: { a: "y" }}, { multi: true });
-t.find({ k : "x" }).forEach(function(z) {
- assert.eq([ "y" ], z.a, "multi update using object arg");
+t.update({k: "x"}, {$push: {a: "y"}}, {multi: true});
+t.find({k: "x"}).forEach(function(z) {
+ assert.eq(["y"], z.a, "multi update using object arg");
});
t.drop();
// Using a single update
-for (i=0; i<10; i++) {
- t.save({ _id : i, k: "x", a: [] });
+for (i = 0; i < 10; i++) {
+ t.save({_id: i, k: "x", a: []});
}
-t.update({ k: "x" }, { $push: { a: "y" }}, { multi: false });
-assert.eq(1, t.find({ "a": "y" }).count(), "update using object arg");
+t.update({k: "x"}, {$push: {a: "y"}}, {multi: false});
+assert.eq(1, t.find({"a": "y"}).count(), "update using object arg");
t.drop();
// Using upsert, found
-for (i=0; i<10; i++) {
- t.save({ _id : i, k: "x", a: [] });
+for (i = 0; i < 10; i++) {
+ t.save({_id: i, k: "x", a: []});
}
-t.update({ k: "x" }, { $push: { a: "y" }}, { upsert: true });
-assert.eq(1, t.find({ "k": "x", "a": "y" }).count(), "upsert (found) using object arg");
+t.update({k: "x"}, {$push: {a: "y"}}, {upsert: true});
+assert.eq(1, t.find({"k": "x", "a": "y"}).count(), "upsert (found) using object arg");
t.drop();
// Using upsert + multi, found
-for (i=0; i<10; i++) {
- t.save({ _id : i, k: "x", a: [] });
+for (i = 0; i < 10; i++) {
+ t.save({_id: i, k: "x", a: []});
}
-t.update({ k: "x" }, { $push: { a: "y" }}, { upsert: true, multi: true });
-t.find({ k : "x" }).forEach(function(z) {
- assert.eq([ "y" ], z.a, "multi + upsert (found) using object arg");
+t.update({k: "x"}, {$push: {a: "y"}}, {upsert: true, multi: true});
+t.find({k: "x"}).forEach(function(z) {
+ assert.eq(["y"], z.a, "multi + upsert (found) using object arg");
});
t.drop();
// Using upsert, not found
-for (i=0; i<10; i++) {
- t.save({ _id : i, k: "x", a: [] });
+for (i = 0; i < 10; i++) {
+ t.save({_id: i, k: "x", a: []});
}
-t.update({ k: "y" }, { $push: { a: "y" }}, { upsert: true });
-assert.eq(1, t.find({ "k": "y", "a": "y" }).count(), "upsert (not found) using object arg");
+t.update({k: "y"}, {$push: {a: "y"}}, {upsert: true});
+assert.eq(1, t.find({"k": "y", "a": "y"}).count(), "upsert (not found) using object arg");
t.drop();
// Without upsert, found
-for (i=0; i<10; i++) {
- t.save({ _id : i, k: "x", a: [] });
+for (i = 0; i < 10; i++) {
+ t.save({_id: i, k: "x", a: []});
}
-t.update({ k: "x" }, { $push: { a: "y" }}, { upsert: false });
-assert.eq(1, t.find({ "a": "y" }).count(), "no upsert (found) using object arg");
+t.update({k: "x"}, {$push: {a: "y"}}, {upsert: false});
+assert.eq(1, t.find({"a": "y"}).count(), "no upsert (found) using object arg");
t.drop();
// Without upsert, not found
-for (i=0; i<10; i++) {
- t.save({ _id : i, k: "x", a: [] });
+for (i = 0; i < 10; i++) {
+ t.save({_id: i, k: "x", a: []});
}
-t.update({ k: "y" }, { $push: { a: "y" }}, { upsert: false });
-assert.eq(0, t.find({ "a": "y" }).count(), "no upsert (not found) using object arg");
+t.update({k: "y"}, {$push: {a: "y"}}, {upsert: false});
+assert.eq(0, t.find({"a": "y"}).count(), "no upsert (not found) using object arg");
t.drop();
diff --git a/jstests/core/updatej.js b/jstests/core/updatej.js
index 6a70a4c2d51..378a29e4573 100644
--- a/jstests/core/updatej.js
+++ b/jstests/core/updatej.js
@@ -4,9 +4,9 @@
t = db.jstests_updatej;
t.drop();
-t.save( {a:[]} );
-t.save( {a:1} );
-t.save( {a:[]} );
+t.save({a: []});
+t.save({a: 1});
+t.save({a: []});
-t.update( {}, {$push:{a:2}}, false, true );
-assert.eq( 1, t.count( {a:2} ) );
+t.update({}, {$push: {a: 2}}, false, true);
+assert.eq(1, t.count({a: 2}));
diff --git a/jstests/core/updatek.js b/jstests/core/updatek.js
index b96f3138a81..1af9e6112e7 100644
--- a/jstests/core/updatek.js
+++ b/jstests/core/updatek.js
@@ -3,12 +3,11 @@
t = db.jstests_updatek;
t.drop();
-t.save( { _id:0, '1':{}, '01':{} } );
-t.update( {}, { $set:{ '1.b':1, '1.c':2 } } );
-assert.docEq( { "01" : { }, "1" : { "b" : 1, "c" : 2 }, "_id" : 0 }, t.findOne() );
+t.save({_id: 0, '1': {}, '01': {}});
+t.update({}, {$set: {'1.b': 1, '1.c': 2}});
+assert.docEq({"01": {}, "1": {"b": 1, "c": 2}, "_id": 0}, t.findOne());
t.drop();
-t.save( { _id:0, '1':{}, '01':{} } );
-t.update( {}, { $set:{ '1.b':1, '01.c':2 } } );
-assert.docEq( { "01" : { "c" : 2 }, "1" : { "b" : 1 }, "_id" : 0 }, t.findOne() );
-
+t.save({_id: 0, '1': {}, '01': {}});
+t.update({}, {$set: {'1.b': 1, '01.c': 2}});
+assert.docEq({"01": {"c": 2}, "1": {"b": 1}, "_id": 0}, t.findOne());
diff --git a/jstests/core/updatel.js b/jstests/core/updatel.js
index 6b6c8b20613..69aa997a224 100644
--- a/jstests/core/updatel.js
+++ b/jstests/core/updatel.js
@@ -9,71 +9,62 @@ var res;
t = db.jstests_updatel;
t.drop();
-
-
// The collection is empty, forcing an upsert. In this case the query has no array position match
// to substiture for the positional operator. SERVER-4713
-assert.writeError(t.update( {}, { $set:{ 'a.$.b':1 } }, true ));
-assert.eq( 0, t.count(), "No upsert occurred." );
-
-
+assert.writeError(t.update({}, {$set: {'a.$.b': 1}}, true));
+assert.eq(0, t.count(), "No upsert occurred.");
// Save a document to the collection so it is no longer empty.
-assert.writeOK(t.save( { _id:0 } ));
+assert.writeOK(t.save({_id: 0}));
// Now, with an existing document, trigger an update rather than an upsert. The query has no array
// position match to substiture for the positional operator. SERVER-6669
-assert.writeError(t.update( {}, { $set:{ 'a.$.b':1 } } ));
-assert.eq( [ { _id:0 } ], t.find().toArray(), "No update occurred." );
-
-
+assert.writeError(t.update({}, {$set: {'a.$.b': 1}}));
+assert.eq([{_id: 0}], t.find().toArray(), "No update occurred.");
// Now, try with an update by _id (without a query array match).
-assert.writeError(t.update( { _id:0 }, { $set:{ 'a.$.b':1 } } ));
-assert.eq( [ { _id:0 } ], t.find().toArray(), "No update occurred." );
-
-
+assert.writeError(t.update({_id: 0}, {$set: {'a.$.b': 1}}));
+assert.eq([{_id: 0}], t.find().toArray(), "No update occurred.");
// Seed the collection with a document suitable for the following check.
assert.writeOK(t.remove({}));
-assert.writeOK(t.save( { _id:0, a:[ { b:{ c:1 } } ] } ));
+assert.writeOK(t.save({_id: 0, a: [{b: {c: 1}}]}));
// Now, attempt to apply an update with two nested positional operators. There is a positional
// query match for the first positional operator but not the second. Note that dollar sign
// substitution for multiple positional opertors is not implemented (SERVER-831).
-assert.writeError(t.update( { 'a.b.c':1 }, { $set:{ 'a.$.b.$.c':2 } } ));
-assert.eq( [ { _id:0, a:[ { b:{ c:1 } } ] } ], t.find().toArray(), "No update occurred." );
-
+assert.writeError(t.update({'a.b.c': 1}, {$set: {'a.$.b.$.c': 2}}));
+assert.eq([{_id: 0, a: [{b: {c: 1}}]}], t.find().toArray(), "No update occurred.");
// SERVER-1155 test an update with the positional operator
// that has a regex in the query field
t.drop();
-assert.writeOK(t.insert({_id:1, arr:[{a:"z", b:1}]}));
+assert.writeOK(t.insert({_id: 1, arr: [{a: "z", b: 1}]}));
assert.writeOK(t.update({"arr.a": /^z$/}, {$set: {"arr.$.b": 2}}, false, true));
-assert.eq(t.findOne().arr[0], {a:"z", b:2});
+assert.eq(t.findOne().arr[0], {a: "z", b: 2});
t.drop();
-assert.writeOK(t.insert({_id:1, arr:[{a:"z",b:1}, {a:"abc",b:2}, {a:"lmn",b:3}]}));
+assert.writeOK(t.insert({_id: 1, arr: [{a: "z", b: 1}, {a: "abc", b: 2}, {a: "lmn", b: 3}]}));
assert.writeOK(t.update({"arr.a": /l/}, {$inc: {"arr.$.b": 2}}, false, true));
-assert.eq(t.findOne().arr[2], {a:"lmn", b:5});
+assert.eq(t.findOne().arr[2], {a: "lmn", b: 5});
// Test updates with ambiguous positional operator.
t.drop();
assert.writeOK(t.insert({_id: 0, a: [1, 2]}));
assert.writeError(t.update({$and: [{a: 1}, {a: 2}]}, {$set: {'a.$': 5}}));
-assert.eq( [ {_id: 0, a: [1, 2]} ], t.find().toArray(), "No update occurred." );
+assert.eq([{_id: 0, a: [1, 2]}], t.find().toArray(), "No update occurred.");
t.drop();
assert.writeOK(t.insert({_id: 0, a: [1], b: [2]}));
assert.writeError(t.update({a: 1, b: 2}, {$set: {'a.$': 5}}));
-assert.eq( [ {_id: 0, a: [1], b: [2]} ], t.find().toArray(), "No update occurred." );
+assert.eq([{_id: 0, a: [1], b: [2]}], t.find().toArray(), "No update occurred.");
t.drop();
assert.writeOK(t.insert({_id: 0, a: [1], b: [2]}));
assert.writeError(t.update({a: {$elemMatch: {$lt: 2}}, b: 2}, {$set: {'a.$': 5}}));
-assert.eq( [ {_id: 0, a: [1], b: [2]} ], t.find().toArray(), "No update occurred." );
+assert.eq([{_id: 0, a: [1], b: [2]}], t.find().toArray(), "No update occurred.");
t.drop();
assert.writeOK(t.insert({_id: 0, a: [{b: 1}, {c: 2}]}));
assert.writeError(t.update({'a.b': 1, 'a.c': 2}, {$set: {'a.$': 5}}));
-assert.eq( [ {_id: 0, a: [{b: 1}, {c: 2}]} ], t.find().toArray(), "No update occurred." ); \ No newline at end of file
+assert.eq([{_id: 0, a: [{b: 1}, {c: 2}]}], t.find().toArray(), "No update occurred."); \ No newline at end of file
diff --git a/jstests/core/updatem.js b/jstests/core/updatem.js
index a636ac99a59..1346a293f66 100644
--- a/jstests/core/updatem.js
+++ b/jstests/core/updatem.js
@@ -4,17 +4,17 @@ t = db.jstests_updatem;
t.drop();
// new _id from insert (upsert:true)
-t.update({a:1}, {$inc:{b:1}}, true);
-var doc = t.findOne({a:1});
+t.update({a: 1}, {$inc: {b: 1}}, true);
+var doc = t.findOne({a: 1});
assert(doc["_id"], "missing _id");
// new _id from insert (upsert:true)
-t.update({a:1}, {$inc:{b:1}}, true);
-var doc = t.findOne({a:1});
+t.update({a: 1}, {$inc: {b: 1}}, true);
+var doc = t.findOne({a: 1});
assert(doc["_id"], "missing _id");
// no _id on existing doc
-t.getDB().runCommand({godinsert:t.getName(), obj:{a:2}});
-t.update({a:2}, {$inc:{b:1}}, true);
-var doc = t.findOne({a:2});
+t.getDB().runCommand({godinsert: t.getName(), obj: {a: 2}});
+t.update({a: 2}, {$inc: {b: 1}}, true);
+var doc = t.findOne({a: 2});
assert(doc["_id"], "missing _id after update");
diff --git a/jstests/core/upsert_and.js b/jstests/core/upsert_and.js
index 744c9a9331d..f99fd995edb 100644
--- a/jstests/core/upsert_and.js
+++ b/jstests/core/upsert_and.js
@@ -3,35 +3,37 @@ var res;
coll = db.upsert4;
coll.drop();
-res = coll.update({_id: 1, $and: [{c: 1}, {d: 1}], a: 12} , {$inc: {y: 1}} , true);
+res = coll.update({_id: 1, $and: [{c: 1}, {d: 1}], a: 12}, {$inc: {y: 1}}, true);
assert.writeOK(res);
assert.docEq(coll.findOne(), {_id: 1, c: 1, d: 1, a: 12, y: 1});
coll.remove({});
-res = coll.update({$and: [{c: 1}, {d: 1}]} , {$setOnInsert: {_id: 1}} , true);
+res = coll.update({$and: [{c: 1}, {d: 1}]}, {$setOnInsert: {_id: 1}}, true);
assert.writeOK(res);
assert.docEq(coll.findOne(), {_id: 1, c: 1, d: 1});
coll.remove({});
-res = coll.update({$and: [{c: 1}, {d: 1}, {$or: [{x:1}]}]} , {$setOnInsert: {_id: 1}} , true);
+res = coll.update({$and: [{c: 1}, {d: 1}, {$or: [{x: 1}]}]}, {$setOnInsert: {_id: 1}}, true);
assert.writeOK(res);
-assert.docEq(coll.findOne(), {_id: 1, c: 1, d: 1, x:1});
+assert.docEq(coll.findOne(), {_id: 1, c: 1, d: 1, x: 1});
coll.remove({});
-res = coll.update({$and: [{c: 1}, {d: 1}], $or: [{x: 1}, {x: 2}]} , {$setOnInsert: {_id: 1}} , true);
+res = coll.update({$and: [{c: 1}, {d: 1}], $or: [{x: 1}, {x: 2}]}, {$setOnInsert: {_id: 1}}, true);
assert.writeOK(res);
assert.docEq(coll.findOne(), {_id: 1, c: 1, d: 1});
coll.remove({});
-res = coll.update({r: {$gt: 3}, $and: [{c: 1}, {d: 1}], $or: [{x:1}, {x:2}]} , {$setOnInsert: {_id: 1}} , true);
+res = coll.update(
+ {r: {$gt: 3}, $and: [{c: 1}, {d: 1}], $or: [{x: 1}, {x: 2}]}, {$setOnInsert: {_id: 1}}, true);
assert.writeOK(res);
assert.docEq(coll.findOne(), {_id: 1, c: 1, d: 1});
coll.remove({});
-res = coll.update({r: /s/, $and: [{c: 1}, {d: 1}], $or: [{x:1}, {x:2}]} , {$setOnInsert: {_id: 1}} , true);
+res = coll.update(
+ {r: /s/, $and: [{c: 1}, {d: 1}], $or: [{x: 1}, {x: 2}]}, {$setOnInsert: {_id: 1}}, true);
assert.writeOK(res);
assert.docEq(coll.findOne(), {_id: 1, c: 1, d: 1});
coll.remove({});
-res = coll.update({c:2, $and: [{c: 1}, {d: 1}]} , {$setOnInsert: {_id: 1}} , true);
+res = coll.update({c: 2, $and: [{c: 1}, {d: 1}]}, {$setOnInsert: {_id: 1}}, true);
assert.writeError(res);
diff --git a/jstests/core/upsert_fields.js b/jstests/core/upsert_fields.js
index c4e322b6910..86f5032f6dd 100644
--- a/jstests/core/upsert_fields.js
+++ b/jstests/core/upsert_fields.js
@@ -7,7 +7,7 @@ coll.drop();
var upsertedResult = function(query, expr) {
coll.drop();
- result = coll.update(query, expr, { upsert : true });
+ result = coll.update(query, expr, {upsert: true});
return result;
};
@@ -34,93 +34,95 @@ var upsertedXVal = function(query, expr) {
assert.neq(null, upsertedId({}, {}));
// _id autogenerated with add'l fields
-assert.neq(null, upsertedId({}, { a : 1 }));
-assert.eq(1, upsertedField({}, { a : 1 }, "a"));
-assert.neq(null, upsertedId({}, { $set : { a : 1 } }, "a"));
-assert.eq(1, upsertedField({}, { $set : { a : 1 } }, "a"));
-assert.neq(null, upsertedId({}, { $setOnInsert : { a : 1 } }, "a"));
-assert.eq(1, upsertedField({}, { $setOnInsert : { a : 1 } }, "a"));
+assert.neq(null, upsertedId({}, {a: 1}));
+assert.eq(1, upsertedField({}, {a: 1}, "a"));
+assert.neq(null, upsertedId({}, {$set: {a: 1}}, "a"));
+assert.eq(1, upsertedField({}, {$set: {a: 1}}, "a"));
+assert.neq(null, upsertedId({}, {$setOnInsert: {a: 1}}, "a"));
+assert.eq(1, upsertedField({}, {$setOnInsert: {a: 1}}, "a"));
// _id not autogenerated
-assert.eq(1, upsertedId({}, { _id : 1 }));
-assert.eq(1, upsertedId({}, { $set : { _id : 1 } }));
-assert.eq(1, upsertedId({}, { $setOnInsert : { _id : 1 } }));
+assert.eq(1, upsertedId({}, {_id: 1}));
+assert.eq(1, upsertedId({}, {$set: {_id: 1}}));
+assert.eq(1, upsertedId({}, {$setOnInsert: {_id: 1}}));
// _id type error
-assert.writeError(upsertedResult({}, { _id : [1, 2] }));
-assert.writeError(upsertedResult({}, { _id : undefined }));
-assert.writeError(upsertedResult({}, { $set : { _id : [1, 2] } }));
+assert.writeError(upsertedResult({}, {_id: [1, 2]}));
+assert.writeError(upsertedResult({}, {_id: undefined}));
+assert.writeError(upsertedResult({}, {$set: {_id: [1, 2]}}));
// Fails in v2.6, no validation
-assert.writeError(upsertedResult({}, { $setOnInsert : { _id : undefined } }));
+assert.writeError(upsertedResult({}, {$setOnInsert: {_id: undefined}}));
// Check things that are pretty much the same for replacement and $op style upserts
for (var i = 0; i < 3; i++) {
-
// replacement style
var expr = {};
// $op style
if (i == 1)
- expr = { $set : { a : 1 } };
+ expr = {
+ $set: {a: 1}
+ };
if (i == 2)
- expr = { $setOnInsert : { a : 1 } };
+ expr = {
+ $setOnInsert: {a: 1}
+ };
var isReplStyle = i == 0;
// _id extracted
- assert.eq(1, upsertedId({ _id : 1 }, expr));
+ assert.eq(1, upsertedId({_id: 1}, expr));
// All below fail in v2.6, no $ops for _id and $and/$or not explored
- assert.eq(1, upsertedId({ _id : { $eq : 1 } }, expr));
- assert.eq(1, upsertedId({ _id : { $all : [1] } }, expr));
- assert.eq(1, upsertedId({ $and : [{ _id : 1 }] }, expr));
- assert.eq(1, upsertedId({ $and : [{ _id : { $eq : 1 } }] }, expr));
- assert.eq(1, upsertedId({ $or : [{ _id : 1 }] }, expr));
- assert.eq(1, upsertedId({ $or : [{ _id : { $eq : 1 } }] }, expr));
+ assert.eq(1, upsertedId({_id: {$eq: 1}}, expr));
+ assert.eq(1, upsertedId({_id: {$all: [1]}}, expr));
+ assert.eq(1, upsertedId({$and: [{_id: 1}]}, expr));
+ assert.eq(1, upsertedId({$and: [{_id: {$eq: 1}}]}, expr));
+ assert.eq(1, upsertedId({$or: [{_id: 1}]}, expr));
+ assert.eq(1, upsertedId({$or: [{_id: {$eq: 1}}]}, expr));
// _id not extracted, autogenerated
- assert.neq(1, upsertedId({ _id : { $gt : 1 } }, expr));
- assert.neq(1, upsertedId({ _id : { $ne : 1 } }, expr));
- assert.neq(1, upsertedId({ _id : { $in : [1] } }, expr));
- assert.neq(1, upsertedId({ _id : { $in : [1, 2] } }, expr));
- assert.neq(1, upsertedId({ _id : { $elemMatch : { $eq : 1 } } }, expr));
- assert.neq(1, upsertedId({ _id : { $exists : true } }, expr));
- assert.neq(1, upsertedId({ _id : { $not : { $eq : 1 } } }, expr));
- assert.neq(1, upsertedId({ $or : [{ _id : 1 }, { _id : 1 }] }, expr));
- assert.neq(1, upsertedId({ $or : [{ _id : { $eq : 1 } }, { _id : 2 }] }, expr));
- assert.neq(1, upsertedId({ $nor : [{ _id : 1 }] }, expr));
- assert.neq(1, upsertedId({ $nor : [{ _id : { $eq : 1 } }] }, expr));
- assert.neq(1, upsertedId({ $nor : [{ _id : { $eq : 1 } }, { _id : 1 }] }, expr));
+ assert.neq(1, upsertedId({_id: {$gt: 1}}, expr));
+ assert.neq(1, upsertedId({_id: {$ne: 1}}, expr));
+ assert.neq(1, upsertedId({_id: {$in: [1]}}, expr));
+ assert.neq(1, upsertedId({_id: {$in: [1, 2]}}, expr));
+ assert.neq(1, upsertedId({_id: {$elemMatch: {$eq: 1}}}, expr));
+ assert.neq(1, upsertedId({_id: {$exists: true}}, expr));
+ assert.neq(1, upsertedId({_id: {$not: {$eq: 1}}}, expr));
+ assert.neq(1, upsertedId({$or: [{_id: 1}, {_id: 1}]}, expr));
+ assert.neq(1, upsertedId({$or: [{_id: {$eq: 1}}, {_id: 2}]}, expr));
+ assert.neq(1, upsertedId({$nor: [{_id: 1}]}, expr));
+ assert.neq(1, upsertedId({$nor: [{_id: {$eq: 1}}]}, expr));
+ assert.neq(1, upsertedId({$nor: [{_id: {$eq: 1}}, {_id: 1}]}, expr));
// _id extraction errors
- assert.writeError(upsertedResult({ _id : [1, 2] }, expr));
- assert.writeError(upsertedResult({ _id : undefined }, expr));
- assert.writeError(upsertedResult({ _id : { $eq : [1, 2] } }, expr));
- assert.writeError(upsertedResult({ _id : { $eq : undefined } }, expr));
- assert.writeError(upsertedResult({ _id : { $all : [ 1, 2 ] } }, expr));
+ assert.writeError(upsertedResult({_id: [1, 2]}, expr));
+ assert.writeError(upsertedResult({_id: undefined}, expr));
+ assert.writeError(upsertedResult({_id: {$eq: [1, 2]}}, expr));
+ assert.writeError(upsertedResult({_id: {$eq: undefined}}, expr));
+ assert.writeError(upsertedResult({_id: {$all: [1, 2]}}, expr));
// All below fail in v2.6, non-_id fields completely ignored
- assert.writeError(upsertedResult({ $and : [{ _id : 1 }, { _id : 1 }] }, expr));
- assert.writeError(upsertedResult({ $and : [{ _id : { $eq : 1 } }, { _id : 2 }] }, expr));
- assert.writeError(upsertedResult({ _id : 1, "_id.x" : 1 }, expr));
- assert.writeError(upsertedResult({ _id : { x : 1 }, "_id.x" : 1 }, expr));
+ assert.writeError(upsertedResult({$and: [{_id: 1}, {_id: 1}]}, expr));
+ assert.writeError(upsertedResult({$and: [{_id: {$eq: 1}}, {_id: 2}]}, expr));
+ assert.writeError(upsertedResult({_id: 1, "_id.x": 1}, expr));
+ assert.writeError(upsertedResult({_id: {x: 1}, "_id.x": 1}, expr));
// Special case - nested _id fields only used on $op-style updates
if (isReplStyle) {
// Fails in v2.6
- assert.writeError(upsertedResult({ "_id.x" : 1, "_id.y" : 2 }, expr));
- }
- else {
+ assert.writeError(upsertedResult({"_id.x": 1, "_id.y": 2}, expr));
+ } else {
// Fails in v2.6
- assert.docEq({ x : 1, y : 2 }, upsertedId({ "_id.x" : 1, "_id.y" : 2 }, expr));
+ assert.docEq({x: 1, y: 2}, upsertedId({"_id.x": 1, "_id.y": 2}, expr));
}
}
// regex _id in expression is an error, no regex ids allowed
-assert.writeError(upsertedResult({}, { _id : /abc/ }));
+assert.writeError(upsertedResult({}, {_id: /abc/}));
// Fails in v2.6, no validation
-assert.writeError(upsertedResult({}, { $set : { _id : /abc/ } }));
+assert.writeError(upsertedResult({}, {$set: {_id: /abc/}}));
// no regex _id extraction from query
-assert.neq(/abc/, upsertedId({ _id : /abc/ }, {}));
+assert.neq(/abc/, upsertedId({_id: /abc/}, {}));
//
// Regular field extraction
@@ -128,102 +130,103 @@ assert.neq(/abc/, upsertedId({ _id : /abc/ }, {}));
// Check things that are pretty much the same for replacement and $op style upserts
for (var i = 0; i < 3; i++) {
-
// replacement style
var expr = {};
// $op style
if (i == 1)
- expr = { $set : { a : 1 } };
+ expr = {
+ $set: {a: 1}
+ };
if (i == 2)
- expr = { $setOnInsert : { a : 1 } };
+ expr = {
+ $setOnInsert: {a: 1}
+ };
var isReplStyle = i == 0;
// field extracted when replacement style
var value = isReplStyle ? undefined : 1;
- assert.eq(value, upsertedXVal({ x : 1 }, expr));
- assert.eq(value, upsertedXVal({ x : { $eq : 1 } }, expr));
- assert.eq(value, upsertedXVal({ x : { $all : [1] } }, expr));
- assert.eq(value, upsertedXVal({ $and : [{ x : 1 }] }, expr));
- assert.eq(value, upsertedXVal({ $and : [{ x : { $eq : 1 } }] }, expr));
- assert.eq(value, upsertedXVal({ $or : [{ x : 1 }] }, expr));
- assert.eq(value, upsertedXVal({ $or : [{ x : { $eq : 1 } }] }, expr));
+ assert.eq(value, upsertedXVal({x: 1}, expr));
+ assert.eq(value, upsertedXVal({x: {$eq: 1}}, expr));
+ assert.eq(value, upsertedXVal({x: {$all: [1]}}, expr));
+ assert.eq(value, upsertedXVal({$and: [{x: 1}]}, expr));
+ assert.eq(value, upsertedXVal({$and: [{x: {$eq: 1}}]}, expr));
+ assert.eq(value, upsertedXVal({$or: [{x: 1}]}, expr));
+ assert.eq(value, upsertedXVal({$or: [{x: {$eq: 1}}]}, expr));
// Special types extracted
- assert.eq(isReplStyle ? undefined : [1, 2], upsertedXVal({ x : [1, 2] }, expr));
+ assert.eq(isReplStyle ? undefined : [1, 2], upsertedXVal({x: [1, 2]}, expr));
// field not extracted
- assert.eq(undefined, upsertedXVal({ x : { $gt : 1 } }, expr));
- assert.eq(undefined, upsertedXVal({ x : { $ne : 1 } }, expr));
- assert.eq(undefined, upsertedXVal({ x : { $in : [1] } }, expr));
- assert.eq(undefined, upsertedXVal({ x : { $in : [1, 2] } }, expr));
- assert.eq(undefined, upsertedXVal({ x : { $elemMatch : { $eq : 1 } } }, expr));
- assert.eq(undefined, upsertedXVal({ x : { $exists : true } }, expr));
- assert.eq(undefined, upsertedXVal({ x : { $not : { $eq : 1 } } }, expr));
- assert.eq(undefined, upsertedXVal({ $or : [{ x : 1 }, { x : 1 }] }, expr));
- assert.eq(undefined, upsertedXVal({ $or : [{ x : { $eq : 1 } }, { x : 2 }] }, expr));
- assert.eq(undefined, upsertedXVal({ $nor : [{ x : 1 }] }, expr));
- assert.eq(undefined, upsertedXVal({ $nor : [{ x : { $eq : 1 } }] }, expr));
- assert.eq(undefined, upsertedXVal({ $nor : [{ x : { $eq : 1 } }, { x : 1 }] }, expr));
+ assert.eq(undefined, upsertedXVal({x: {$gt: 1}}, expr));
+ assert.eq(undefined, upsertedXVal({x: {$ne: 1}}, expr));
+ assert.eq(undefined, upsertedXVal({x: {$in: [1]}}, expr));
+ assert.eq(undefined, upsertedXVal({x: {$in: [1, 2]}}, expr));
+ assert.eq(undefined, upsertedXVal({x: {$elemMatch: {$eq: 1}}}, expr));
+ assert.eq(undefined, upsertedXVal({x: {$exists: true}}, expr));
+ assert.eq(undefined, upsertedXVal({x: {$not: {$eq: 1}}}, expr));
+ assert.eq(undefined, upsertedXVal({$or: [{x: 1}, {x: 1}]}, expr));
+ assert.eq(undefined, upsertedXVal({$or: [{x: {$eq: 1}}, {x: 2}]}, expr));
+ assert.eq(undefined, upsertedXVal({$nor: [{x: 1}]}, expr));
+ assert.eq(undefined, upsertedXVal({$nor: [{x: {$eq: 1}}]}, expr));
+ assert.eq(undefined, upsertedXVal({$nor: [{x: {$eq: 1}}, {x: 1}]}, expr));
// field extraction errors
- assert.writeError(upsertedResult({ x : undefined }, expr));
+ assert.writeError(upsertedResult({x: undefined}, expr));
if (!isReplStyle) {
- assert.writeError(upsertedResult({ x : { 'x.x' : 1 } }, expr));
- assert.writeError(upsertedResult({ x : { $all : [ 1, 2 ] } }, expr));
- assert.writeError(upsertedResult({ $and : [{ x : 1 }, { x : 1 }] }, expr));
- assert.writeError(upsertedResult({ $and : [{ x : { $eq : 1 } }, { x : 2 }] }, expr));
- }
- else {
- assert.eq(undefined, upsertedXVal({ x : { 'x.x' : 1 } }, expr));
- assert.eq(undefined, upsertedXVal({ x : { $all : [ 1, 2 ] } }, expr));
- assert.eq(undefined, upsertedXVal({ $and : [{ x : 1 }, { x : 1 }] }, expr));
- assert.eq(undefined, upsertedXVal({ $and : [{ x : { $eq : 1 } }, { x : 2 }] }, expr));
+ assert.writeError(upsertedResult({x: {'x.x': 1}}, expr));
+ assert.writeError(upsertedResult({x: {$all: [1, 2]}}, expr));
+ assert.writeError(upsertedResult({$and: [{x: 1}, {x: 1}]}, expr));
+ assert.writeError(upsertedResult({$and: [{x: {$eq: 1}}, {x: 2}]}, expr));
+ } else {
+ assert.eq(undefined, upsertedXVal({x: {'x.x': 1}}, expr));
+ assert.eq(undefined, upsertedXVal({x: {$all: [1, 2]}}, expr));
+ assert.eq(undefined, upsertedXVal({$and: [{x: 1}, {x: 1}]}, expr));
+ assert.eq(undefined, upsertedXVal({$and: [{x: {$eq: 1}}, {x: 2}]}, expr));
}
// nested field extraction
- var docValue = isReplStyle ? undefined : { x : 1 };
- assert.docEq(docValue, upsertedXVal({ "x.x" : 1 }, expr));
- assert.docEq(docValue, upsertedXVal({ "x.x" : { $eq : 1 } }, expr));
- assert.docEq(docValue, upsertedXVal({ "x.x" : { $all : [1] } }, expr));
- assert.docEq(docValue, upsertedXVal({ $and : [{ "x.x" : 1 }] }, expr));
- assert.docEq(docValue, upsertedXVal({ $and : [{ "x.x" : { $eq : 1 } }] }, expr));
- assert.docEq(docValue, upsertedXVal({ $or : [{ "x.x" : 1 }] }, expr));
- assert.docEq(docValue, upsertedXVal({ $or : [{ "x.x" : { $eq : 1 } }] }, expr));
+ var docValue = isReplStyle ? undefined : {
+ x: 1
+ };
+ assert.docEq(docValue, upsertedXVal({"x.x": 1}, expr));
+ assert.docEq(docValue, upsertedXVal({"x.x": {$eq: 1}}, expr));
+ assert.docEq(docValue, upsertedXVal({"x.x": {$all: [1]}}, expr));
+ assert.docEq(docValue, upsertedXVal({$and: [{"x.x": 1}]}, expr));
+ assert.docEq(docValue, upsertedXVal({$and: [{"x.x": {$eq: 1}}]}, expr));
+ assert.docEq(docValue, upsertedXVal({$or: [{"x.x": 1}]}, expr));
+ assert.docEq(docValue, upsertedXVal({$or: [{"x.x": {$eq: 1}}]}, expr));
// nested field conflicts
if (!isReplStyle) {
- assert.writeError(upsertedResult({ x : 1, "x.x" : 1 }, expr));
- assert.writeError(upsertedResult({ x : {}, "x.x" : 1 }, expr));
- assert.writeError(upsertedResult({ x : { x : 1 }, "x.x" : 1 }, expr));
- assert.writeError(upsertedResult({ x : { x : 1 }, "x.y" : 1 }, expr));
- assert.writeError(upsertedResult({ x : [1, { x : 1 }], "x.x" : 1 }, expr));
- }
- else {
- assert.eq(undefined, upsertedXVal({ x : 1, "x.x" : 1 }, expr));
- assert.eq(undefined, upsertedXVal({ x : {}, "x.x" : 1 }, expr));
- assert.eq(undefined, upsertedXVal({ x : { x : 1 }, "x.x" : 1 }, expr));
- assert.eq(undefined, upsertedXVal({ x : { x : 1 }, "x.y" : 1 }, expr));
- assert.eq(undefined, upsertedXVal({ x : [1, { x : 1 }], "x.x" : 1 }, expr));
+ assert.writeError(upsertedResult({x: 1, "x.x": 1}, expr));
+ assert.writeError(upsertedResult({x: {}, "x.x": 1}, expr));
+ assert.writeError(upsertedResult({x: {x: 1}, "x.x": 1}, expr));
+ assert.writeError(upsertedResult({x: {x: 1}, "x.y": 1}, expr));
+ assert.writeError(upsertedResult({x: [1, {x: 1}], "x.x": 1}, expr));
+ } else {
+ assert.eq(undefined, upsertedXVal({x: 1, "x.x": 1}, expr));
+ assert.eq(undefined, upsertedXVal({x: {}, "x.x": 1}, expr));
+ assert.eq(undefined, upsertedXVal({x: {x: 1}, "x.x": 1}, expr));
+ assert.eq(undefined, upsertedXVal({x: {x: 1}, "x.y": 1}, expr));
+ assert.eq(undefined, upsertedXVal({x: [1, {x: 1}], "x.x": 1}, expr));
}
-
}
// regex field in expression is a value
-assert.eq(/abc/, upsertedXVal({}, { x : /abc/ }));
-assert.eq(/abc/, upsertedXVal({}, { $set : { x : /abc/ } }));
+assert.eq(/abc/, upsertedXVal({}, {x: /abc/}));
+assert.eq(/abc/, upsertedXVal({}, {$set: {x: /abc/}}));
// no regex field extraction from query unless $eq'd
-assert.eq(/abc/, upsertedXVal({ x : { $eq : /abc/ } }, { $set : { a : 1 } }));
-assert.eq(undefined, upsertedXVal({ x : /abc/ }, { $set : { a : 1 } }));
+assert.eq(/abc/, upsertedXVal({x: {$eq: /abc/}}, {$set: {a: 1}}));
+assert.eq(undefined, upsertedXVal({x: /abc/}, {$set: {a: 1}}));
// replacement-style updates ignore conflicts *except* on _id field
-assert.eq(1, upsertedId({ _id : 1, x : [1, { x : 1 }], "x.x" : 1 }, {}));
+assert.eq(1, upsertedId({_id: 1, x: [1, {x: 1}], "x.x": 1}, {}));
// DBRef special cases
// make sure query doesn't error when creating doc for insert, since it's missing the rest of the
// dbref fields. SERVER-14024
// Fails in 2.6.1->3
-assert.docEq(tojson(DBRef("a", 1)), upsertedXVal({ "x.$id" : 1 },
- { $set : { x : DBRef("a", 1) } }));
+assert.docEq(tojson(DBRef("a", 1)), upsertedXVal({"x.$id": 1}, {$set: {x: DBRef("a", 1)}}));
diff --git a/jstests/core/upsert_shell.js b/jstests/core/upsert_shell.js
index a4cf3f4b923..5b7ac501aef 100644
--- a/jstests/core/upsert_shell.js
+++ b/jstests/core/upsert_shell.js
@@ -4,49 +4,49 @@ t = db.upsert1;
t.drop();
// make sure the new _id is returned when $mods are used
-l = t.update( { x : 1 } , { $inc : { y : 1 } } , true );
-assert( l.getUpsertedId() , "A1 - " + tojson(l) );
-assert.eq( l.getUpsertedId()._id.str , t.findOne()._id.str , "A2" );
+l = t.update({x: 1}, {$inc: {y: 1}}, true);
+assert(l.getUpsertedId(), "A1 - " + tojson(l));
+assert.eq(l.getUpsertedId()._id.str, t.findOne()._id.str, "A2");
// make sure the new _id is returned on a replacement (no $mod in update)
-l = t.update( { x : 2 } , { x : 2 , y : 3 } , true );
-assert( l.getUpsertedId() , "B1 - " + tojson(l) );
-assert.eq( l.getUpsertedId()._id.str , t.findOne( { x : 2 } )._id.str , "B2" );
-assert.eq( 2 , t.find().count() , "B3" );
+l = t.update({x: 2}, {x: 2, y: 3}, true);
+assert(l.getUpsertedId(), "B1 - " + tojson(l));
+assert.eq(l.getUpsertedId()._id.str, t.findOne({x: 2})._id.str, "B2");
+assert.eq(2, t.find().count(), "B3");
// use the _id from the query for the insert
-l = t.update({_id:3}, {$set: {a:'123'}}, true);
-assert( l.getUpsertedId() , "C1 - " + tojson(l) );
-assert.eq( l.getUpsertedId()._id , 3 , "C2 - " + tojson(l) );
+l = t.update({_id: 3}, {$set: {a: '123'}}, true);
+assert(l.getUpsertedId(), "C1 - " + tojson(l));
+assert.eq(l.getUpsertedId()._id, 3, "C2 - " + tojson(l));
// test with an embedded doc for the _id field
-l = t.update({_id:{a:1}}, {$set: {a:123}}, true);
-assert( l.getUpsertedId() , "D1 - " + tojson(l) );
-assert.eq( l.getUpsertedId()._id , {a:1} , "D2 - " + tojson(l) );
+l = t.update({_id: {a: 1}}, {$set: {a: 123}}, true);
+assert(l.getUpsertedId(), "D1 - " + tojson(l));
+assert.eq(l.getUpsertedId()._id, {a: 1}, "D2 - " + tojson(l));
// test with a range query
-l = t.update({_id: {$gt:100}}, {$set: {a:123}}, true);
-assert( l.getUpsertedId() , "E1 - " + tojson(l) );
-assert.neq( l.getUpsertedId()._id , 100 , "E2 - " + tojson(l) );
+l = t.update({_id: {$gt: 100}}, {$set: {a: 123}}, true);
+assert(l.getUpsertedId(), "E1 - " + tojson(l));
+assert.neq(l.getUpsertedId()._id, 100, "E2 - " + tojson(l));
// test with an _id query
-l = t.update({_id: 1233}, {$set: {a:123}}, true);
-assert( l.getUpsertedId() , "F1 - " + tojson(l) );
-assert.eq( l.getUpsertedId()._id , 1233 , "F2 - " + tojson(l) );
+l = t.update({_id: 1233}, {$set: {a: 123}}, true);
+assert(l.getUpsertedId(), "F1 - " + tojson(l));
+assert.eq(l.getUpsertedId()._id, 1233, "F2 - " + tojson(l));
// test with an embedded _id query
-l = t.update({_id: {a:1, b:2}}, {$set: {a:123}}, true);
-assert( l.getUpsertedId() , "G1 - " + tojson(l) );
-assert.eq( l.getUpsertedId()._id , {a:1, b:2} , "G2 - " + tojson(l) );
+l = t.update({_id: {a: 1, b: 2}}, {$set: {a: 123}}, true);
+assert(l.getUpsertedId(), "G1 - " + tojson(l));
+assert.eq(l.getUpsertedId()._id, {a: 1, b: 2}, "G2 - " + tojson(l));
// test with no _id inserted
db.no_id.drop();
-db.createCollection("no_id", {autoIndexId:false});
-l = db.no_id.update({foo:1}, {$set:{a:1}}, true);
-assert( l.getUpsertedId() , "H1 - " + tojson(l) );
-assert( !l.hasWriteError(), "H1.5 No error expected - " + tojson(l) );
-assert.eq( 0, db.no_id.getIndexes().length, "H2" );
-assert.eq( 1, db.no_id.count(), "H3" );
-var newDoc = db.no_id.findOne();
+db.createCollection("no_id", {autoIndexId: false});
+l = db.no_id.update({foo: 1}, {$set: {a: 1}}, true);
+assert(l.getUpsertedId(), "H1 - " + tojson(l));
+assert(!l.hasWriteError(), "H1.5 No error expected - " + tojson(l));
+assert.eq(0, db.no_id.getIndexes().length, "H2");
+assert.eq(1, db.no_id.count(), "H3");
+var newDoc = db.no_id.findOne();
delete newDoc["_id"];
-assert.eq( { foo : 1, a : 1 }, newDoc, "H4" );
+assert.eq({foo: 1, a: 1}, newDoc, "H4");
diff --git a/jstests/core/useindexonobjgtlt.js b/jstests/core/useindexonobjgtlt.js
index 026b1802843..c790019af9e 100755..100644
--- a/jstests/core/useindexonobjgtlt.js
+++ b/jstests/core/useindexonobjgtlt.js
@@ -1,14 +1,10 @@
t = db.factories;
t.drop();
-t.insert( { name: "xyz", metro: { city: "New York", state: "NY" } } );
-t.ensureIndex( { metro : 1 } );
+t.insert({name: "xyz", metro: {city: "New York", state: "NY"}});
+t.ensureIndex({metro: 1});
-assert( db.factories.find().count() );
+assert(db.factories.find().count());
-assert.eq( 1, db.factories.find( { metro: { city: "New York", state: "NY" } } )
- .hint({metro: 1})
- .count() );
+assert.eq(1, db.factories.find({metro: {city: "New York", state: "NY"}}).hint({metro: 1}).count());
-assert.eq( 1, db.factories.find( { metro: { $gte : { city: "New York" } } } )
- .hint({metro: 1})
- .count() );
+assert.eq(1, db.factories.find({metro: {$gte: {city: "New York"}}}).hint({metro: 1}).count());
diff --git a/jstests/core/user_management_helpers.js b/jstests/core/user_management_helpers.js
index e06302deba5..4db373a1def 100644
--- a/jstests/core/user_management_helpers.js
+++ b/jstests/core/user_management_helpers.js
@@ -10,87 +10,98 @@ function assertHasRole(rolesArray, roleName, roleDB) {
assert(false, "role " + roleName + "@" + roleDB + " not found in array: " + tojson(rolesArray));
}
-
function runTest(db) {
- var db = db.getSiblingDB("user_management_helpers");
- db.dropDatabase();
- db.dropAllUsers();
-
- db.createUser({user: "spencer", pwd: "password", roles: ['readWrite']});
- db.createUser({user: "andy", pwd: "password", roles: ['readWrite']});
-
- // Test getUser
- var userObj = db.getUser('spencer');
- assert.eq(1, userObj.roles.length);
- assertHasRole(userObj.roles, "readWrite", db.getName());
-
- // Test getUsers
- var users = db.getUsers();
- assert.eq(2, users.length);
- assert(users[0].user == 'spencer' || users[1].user == 'spencer');
- assert(users[0].user == 'andy' || users[1].user == 'andy');
- assert.eq(1, users[0].roles.length);
- assert.eq(1, users[1].roles.length);
- assertHasRole(users[0].roles, "readWrite", db.getName());
- assertHasRole(users[1].roles, "readWrite", db.getName());
-
- // Granting roles to nonexistent user fails
- assert.throws(function() { db.grantRolesToUser("fakeUser", ['dbAdmin']); });
- // Granting non-existant role fails
- assert.throws(function() { db.grantRolesToUser("spencer", ['dbAdmin', 'fakeRole']); });
-
- userObj = db.getUser('spencer');
- assert.eq(1, userObj.roles.length);
- assertHasRole(userObj.roles, "readWrite", db.getName());
-
- // Granting a role you already have is no problem
- db.grantRolesToUser("spencer", ['readWrite', 'dbAdmin']);
- userObj = db.getUser('spencer');
- assert.eq(2, userObj.roles.length);
- assertHasRole(userObj.roles, "readWrite", db.getName());
- assertHasRole(userObj.roles, "dbAdmin", db.getName());
-
- // Revoking roles the user doesn't have is fine
- db.revokeRolesFromUser("spencer", ['dbAdmin', 'read']);
- userObj = db.getUser('spencer');
- assert.eq(1, userObj.roles.length);
- assertHasRole(userObj.roles, "readWrite", db.getName());
-
- // Update user
- db.updateUser("spencer", {customData: {hello: 'world'}, roles:['read']});
- userObj = db.getUser('spencer');
- assert.eq('world', userObj.customData.hello);
- assert.eq(1, userObj.roles.length);
- assertHasRole(userObj.roles, "read", db.getName());
-
- // Test dropUser
- db.dropUser('andy');
- assert.eq(null, db.getUser('andy'));
-
- // Test dropAllUsers
- db.dropAllUsers();
- assert.eq(0, db.getUsers().length);
-
- // Test password digestion
- assert.throws(function() {
- db.createUser({user:'user1', pwd:'x', roles:[], digestPassword: true});});
- assert.throws(function() {
- db.createUser({user:'user1', pwd:'x', roles:[], digestPassword: false});});
- assert.throws(function() {
- db.createUser({user:'user1', pwd:'x', roles:[], passwordDigestor: 'foo'});});
- db.createUser({user:'user1', pwd:'x', roles:[], passwordDigestor:"server"});
- db.createUser({user:'user2', pwd:'x', roles:[], passwordDigestor:"client"});
- assert(db.auth('user1', 'x'));
- assert(db.auth('user2', 'x'));
-
- assert.throws(function() { db.updateUser('user1', {pwd:'y', digestPassword: true});});
- assert.throws(function() { db.updateUser('user1', {pwd:'y', digestPassword: false});});
- assert.throws(function() { db.updateUser('user1', {pwd:'y', passwordDigestor: 'foo'});});
- db.updateUser('user1', {pwd:'y', passwordDigestor: 'server'});
- db.updateUser('user2', {pwd:'y', passwordDigestor: 'client'});
- assert(db.auth('user1', 'y'));
- assert(db.auth('user2', 'y'));
-
+ var db = db.getSiblingDB("user_management_helpers");
+ db.dropDatabase();
+ db.dropAllUsers();
+
+ db.createUser({user: "spencer", pwd: "password", roles: ['readWrite']});
+ db.createUser({user: "andy", pwd: "password", roles: ['readWrite']});
+
+ // Test getUser
+ var userObj = db.getUser('spencer');
+ assert.eq(1, userObj.roles.length);
+ assertHasRole(userObj.roles, "readWrite", db.getName());
+
+ // Test getUsers
+ var users = db.getUsers();
+ assert.eq(2, users.length);
+ assert(users[0].user == 'spencer' || users[1].user == 'spencer');
+ assert(users[0].user == 'andy' || users[1].user == 'andy');
+ assert.eq(1, users[0].roles.length);
+ assert.eq(1, users[1].roles.length);
+ assertHasRole(users[0].roles, "readWrite", db.getName());
+ assertHasRole(users[1].roles, "readWrite", db.getName());
+
+ // Granting roles to nonexistent user fails
+ assert.throws(function() {
+ db.grantRolesToUser("fakeUser", ['dbAdmin']);
+ });
+ // Granting non-existant role fails
+ assert.throws(function() {
+ db.grantRolesToUser("spencer", ['dbAdmin', 'fakeRole']);
+ });
+
+ userObj = db.getUser('spencer');
+ assert.eq(1, userObj.roles.length);
+ assertHasRole(userObj.roles, "readWrite", db.getName());
+
+ // Granting a role you already have is no problem
+ db.grantRolesToUser("spencer", ['readWrite', 'dbAdmin']);
+ userObj = db.getUser('spencer');
+ assert.eq(2, userObj.roles.length);
+ assertHasRole(userObj.roles, "readWrite", db.getName());
+ assertHasRole(userObj.roles, "dbAdmin", db.getName());
+
+ // Revoking roles the user doesn't have is fine
+ db.revokeRolesFromUser("spencer", ['dbAdmin', 'read']);
+ userObj = db.getUser('spencer');
+ assert.eq(1, userObj.roles.length);
+ assertHasRole(userObj.roles, "readWrite", db.getName());
+
+ // Update user
+ db.updateUser("spencer", {customData: {hello: 'world'}, roles: ['read']});
+ userObj = db.getUser('spencer');
+ assert.eq('world', userObj.customData.hello);
+ assert.eq(1, userObj.roles.length);
+ assertHasRole(userObj.roles, "read", db.getName());
+
+ // Test dropUser
+ db.dropUser('andy');
+ assert.eq(null, db.getUser('andy'));
+
+ // Test dropAllUsers
+ db.dropAllUsers();
+ assert.eq(0, db.getUsers().length);
+
+ // Test password digestion
+ assert.throws(function() {
+ db.createUser({user: 'user1', pwd: 'x', roles: [], digestPassword: true});
+ });
+ assert.throws(function() {
+ db.createUser({user: 'user1', pwd: 'x', roles: [], digestPassword: false});
+ });
+ assert.throws(function() {
+ db.createUser({user: 'user1', pwd: 'x', roles: [], passwordDigestor: 'foo'});
+ });
+ db.createUser({user: 'user1', pwd: 'x', roles: [], passwordDigestor: "server"});
+ db.createUser({user: 'user2', pwd: 'x', roles: [], passwordDigestor: "client"});
+ assert(db.auth('user1', 'x'));
+ assert(db.auth('user2', 'x'));
+
+ assert.throws(function() {
+ db.updateUser('user1', {pwd: 'y', digestPassword: true});
+ });
+ assert.throws(function() {
+ db.updateUser('user1', {pwd: 'y', digestPassword: false});
+ });
+ assert.throws(function() {
+ db.updateUser('user1', {pwd: 'y', passwordDigestor: 'foo'});
+ });
+ db.updateUser('user1', {pwd: 'y', passwordDigestor: 'server'});
+ db.updateUser('user2', {pwd: 'y', passwordDigestor: 'client'});
+ assert(db.auth('user1', 'y'));
+ assert(db.auth('user2', 'y'));
}
try {
diff --git a/jstests/core/validate_cmd_ns.js b/jstests/core/validate_cmd_ns.js
index 4caf038a129..93f4a798a61 100644
--- a/jstests/core/validate_cmd_ns.js
+++ b/jstests/core/validate_cmd_ns.js
@@ -5,21 +5,20 @@
*/
// Note: _exec gives you the raw response from the server.
-var res = db.$cmd.find({ whatsmyuri: 1 })._exec().next();
+var res = db.$cmd.find({whatsmyuri: 1})._exec().next();
assert.commandFailed(res);
assert(res.errmsg.indexOf('bad numberToReturn') > -1);
-res = db.$cmd.find({ whatsmyuri: 1 }).limit(0)._exec().next();
+res = db.$cmd.find({whatsmyuri: 1}).limit(0)._exec().next();
assert.commandFailed(res);
assert(res.errmsg.indexOf('bad numberToReturn') > -1);
-res = db.$cmd.find({ whatsmyuri: 1 }).limit(-2)._exec().next();
+res = db.$cmd.find({whatsmyuri: 1}).limit(-2)._exec().next();
assert.commandFailed(res);
assert(res.errmsg.indexOf('bad numberToReturn') > -1);
-res = db.$cmd.find({ whatsmyuri: 1 }).limit(1).next();
+res = db.$cmd.find({whatsmyuri: 1}).limit(1).next();
assert.commandWorked(res);
-res = db.$cmd.find({ whatsmyuri: 1 }).limit(-1).next();
+res = db.$cmd.find({whatsmyuri: 1}).limit(-1).next();
assert.commandWorked(res);
-
diff --git a/jstests/core/validate_pseudocommand_ns.js b/jstests/core/validate_pseudocommand_ns.js
index bee314d421d..8faf4f802d8 100644
--- a/jstests/core/validate_pseudocommand_ns.js
+++ b/jstests/core/validate_pseudocommand_ns.js
@@ -4,10 +4,20 @@
if (!db.getMongo().useReadCommands()) {
var inprog = db.$cmd.sys.inprog;
// nToReturn must be 1 or -1.
- assert.doesNotThrow(function() { inprog.find().limit(-1).next(); });
- assert.doesNotThrow(function() { inprog.find().limit(1).next(); });
- assert.throws(function() { inprog.find().limit(0).next(); });
- assert.throws(function() { inprog.find().limit(-2).next(); });
- assert.throws(function() { inprog.find().limit(99).next(); });
+ assert.doesNotThrow(function() {
+ inprog.find().limit(-1).next();
+ });
+ assert.doesNotThrow(function() {
+ inprog.find().limit(1).next();
+ });
+ assert.throws(function() {
+ inprog.find().limit(0).next();
+ });
+ assert.throws(function() {
+ inprog.find().limit(-2).next();
+ });
+ assert.throws(function() {
+ inprog.find().limit(99).next();
+ });
}
})();
diff --git a/jstests/core/validate_user_documents.js b/jstests/core/validate_user_documents.js
index 825e1e7de11..9c12e6075a7 100644
--- a/jstests/core/validate_user_documents.js
+++ b/jstests/core/validate_user_documents.js
@@ -1,11 +1,10 @@
// Ensure that inserts and updates of the system.users collection validate the schema of inserted
// documents.
-mydb = db.getSisterDB( "validate_user_documents" );
+mydb = db.getSisterDB("validate_user_documents");
function assertGLEOK(status) {
- assert(status.ok && status.err === null,
- "Expected OK status object; found " + tojson(status));
+ assert(status.ok && status.err === null, "Expected OK status object; found " + tojson(status));
}
function assertGLENotOK(status) {
@@ -21,34 +20,26 @@ mydb.dropAllUsers();
//
// V0 user document document; insert should fail.
-assert.commandFailed(mydb.runCommand({ createUser:1,
- user: "spencer",
- pwd: "password",
- readOnly: true }));
+assert.commandFailed(
+ mydb.runCommand({createUser: 1, user: "spencer", pwd: "password", readOnly: true}));
// V1 user document; insert should fail.
-assert.commandFailed(mydb.runCommand({ createUser:1,
- user: "spencer",
- userSource: "test2",
- roles: ["dbAdmin"] }));
+assert.commandFailed(
+ mydb.runCommand({createUser: 1, user: "spencer", userSource: "test2", roles: ["dbAdmin"]}));
// Valid V2 user document; insert should succeed.
-assert.commandWorked(mydb.runCommand({ createUser: "spencer",
- pwd: "password",
- roles: ["dbAdmin"] }));
+assert.commandWorked(mydb.runCommand({createUser: "spencer", pwd: "password", roles: ["dbAdmin"]}));
// Valid V2 user document; insert should succeed.
-assert.commandWorked(mydb.runCommand({ createUser: "andy",
- pwd: "password",
- roles: [{role: "dbAdmin",
- db: "validate_user_documents",
- hasRole: true,
- canDelegate: false}] }));
+assert.commandWorked(mydb.runCommand({
+ createUser: "andy",
+ pwd: "password",
+ roles:
+ [{role: "dbAdmin", db: "validate_user_documents", hasRole: true, canDelegate: false}]
+}));
// Non-existent role; insert should fail
-assert.commandFailed(mydb.runCommand({ createUser: "bob",
- pwd: "password",
- roles: ["fakeRole123"] }));
+assert.commandFailed(mydb.runCommand({createUser: "bob", pwd: "password", roles: ["fakeRole123"]}));
//
// Tests of the update path
diff --git a/jstests/core/verify_update_mods.js b/jstests/core/verify_update_mods.js
index 7806b7804d3..efd0beb80cc 100644
--- a/jstests/core/verify_update_mods.js
+++ b/jstests/core/verify_update_mods.js
@@ -3,81 +3,81 @@ var res;
t = db.update_mods;
t.drop();
-t.save({_id:1});
-res = t.update({}, {$set:{a:1}});
+t.save({_id: 1});
+res = t.update({}, {$set: {a: 1}});
assert.writeOK(res);
t.remove({});
-t.save({_id:1});
-res = t.update({}, {$unset:{a:1}});
+t.save({_id: 1});
+res = t.update({}, {$unset: {a: 1}});
assert.writeOK(res);
t.remove({});
-t.save({_id:1});
-res = t.update({}, {$inc:{a:1}});
+t.save({_id: 1});
+res = t.update({}, {$inc: {a: 1}});
assert.writeOK(res);
t.remove({});
-t.save({_id:1});
-res = t.update({}, {$mul:{a:1}});
+t.save({_id: 1});
+res = t.update({}, {$mul: {a: 1}});
assert.writeOK(res);
t.remove({});
-t.save({_id:1});
-res = t.update({}, {$push:{a:1}});
+t.save({_id: 1});
+res = t.update({}, {$push: {a: 1}});
assert.writeOK(res);
t.remove({});
-t.save({_id:1});
-res = t.update({}, {$pushAll:{a:[1]}});
+t.save({_id: 1});
+res = t.update({}, {$pushAll: {a: [1]}});
assert.writeOK(res);
t.remove({});
-t.save({_id:1});
-res = t.update({}, {$addToSet:{a:1}});
+t.save({_id: 1});
+res = t.update({}, {$addToSet: {a: 1}});
assert.writeOK(res);
t.remove({});
-t.save({_id:1});
-res = t.update({}, {$pull:{a:1}});
+t.save({_id: 1});
+res = t.update({}, {$pull: {a: 1}});
assert.writeOK(res);
t.remove({});
-t.save({_id:1});
-res = t.update({}, {$pop:{a:true}});
+t.save({_id: 1});
+res = t.update({}, {$pop: {a: true}});
assert.writeOK(res);
t.remove({});
-t.save({_id:1});
-res = t.update({}, {$rename:{a:"b"}});
+t.save({_id: 1});
+res = t.update({}, {$rename: {a: "b"}});
assert.writeOK(res);
t.remove({});
-t.save({_id:1});
-res = t.update({}, {$bit:{a:{and:NumberLong(1)}}});
+t.save({_id: 1});
+res = t.update({}, {$bit: {a: {and: NumberLong(1)}}});
assert.writeOK(res);
t.remove({});
// SERVER-3223 test $bit can do an upsert
-t.update({_id:1}, {$bit:{a:{and:NumberLong(3)}}}, true);
-assert.eq(t.findOne({_id:1}).a, NumberLong(0), "$bit upsert with and");
-t.update({_id:2}, {$bit:{b:{or:NumberLong(3)}}}, true);
-assert.eq(t.findOne({_id:2}).b, NumberLong(3), "$bit upsert with or (long)");
-t.update({_id:3}, {$bit:{"c.d":{or:NumberInt(3)}}}, true);
-assert.eq(t.findOne({_id:3}).c.d, NumberInt(3), "$bit upsert with or (int)");
+t.update({_id: 1}, {$bit: {a: {and: NumberLong(3)}}}, true);
+assert.eq(t.findOne({_id: 1}).a, NumberLong(0), "$bit upsert with and");
+t.update({_id: 2}, {$bit: {b: {or: NumberLong(3)}}}, true);
+assert.eq(t.findOne({_id: 2}).b, NumberLong(3), "$bit upsert with or (long)");
+t.update({_id: 3}, {$bit: {"c.d": {or: NumberInt(3)}}}, true);
+assert.eq(t.findOne({_id: 3}).c.d, NumberInt(3), "$bit upsert with or (int)");
t.remove({});
-t.save({_id:1});
-res = t.update({}, {$currentDate:{a:true}});
+t.save({_id: 1});
+res = t.update({}, {$currentDate: {a: true}});
assert.writeOK(res);
t.remove({});
-t.save({_id:1});
-res = t.update({}, {$max:{a:1}});
+t.save({_id: 1});
+res = t.update({}, {$max: {a: 1}});
assert.writeOK(res);
t.remove({});
-t.save({_id:1});
-res = t.update({}, {$min:{a:1}});
+t.save({_id: 1});
+res = t.update({}, {$min: {a: 1}});
assert.writeOK(res);
t.remove({});
diff --git a/jstests/core/where1.js b/jstests/core/where1.js
index 6e3d693b996..85466901016 100644
--- a/jstests/core/where1.js
+++ b/jstests/core/where1.js
@@ -1,39 +1,49 @@
-t = db.getCollection( "where1" );
+t = db.getCollection("where1");
t.drop();
-t.save( { a : 1 } );
-t.save( { a : 2 } );
-t.save( { a : 3 } );
+t.save({a: 1});
+t.save({a: 2});
+t.save({a: 3});
-assert.eq( 1 , t.find( function(){ return this.a == 2; } ).length() , "A" );
+assert.eq(1,
+ t.find(function() {
+ return this.a == 2;
+ }).length(),
+ "A");
-assert.eq( 1 , t.find( { $where : "return this.a == 2" } ).toArray().length , "B" );
-assert.eq( 1 , t.find( { $where : "this.a == 2" } ).toArray().length , "C" );
+assert.eq(1, t.find({$where: "return this.a == 2"}).toArray().length, "B");
+assert.eq(1, t.find({$where: "this.a == 2"}).toArray().length, "C");
-assert.eq( 1 , t.find( "this.a == 2" ).toArray().length , "D" );
+assert.eq(1, t.find("this.a == 2").toArray().length, "D");
// SERVER-12117
// positional $ projection should fail on a $where query
-assert.throws( function() { t.find( { $where : "return this.a;" }, { 'a.$' : 1 } ).itcount(); } );
+assert.throws(function() {
+ t.find({$where: "return this.a;"}, {'a.$': 1}).itcount();
+});
// SERVER-12439: $where must be top-level
-assert.throws( function() { t.find( { a: 1, b: { $where : "this.a;" } } ).itcount(); } );
-assert.throws( function() { t.find( { a: { $where : "this.a;" } } ).itcount(); } );
-assert.throws( function() {
- t.find( { a: { $elemMatch : { $where : "this.a;" } } } ).itcount();
-} );
-assert.throws( function() {
- t.find( { a: 3, "b.c": { $where : "this.a;" } } ).itcount();
-} );
+assert.throws(function() {
+ t.find({a: 1, b: {$where: "this.a;"}}).itcount();
+});
+assert.throws(function() {
+ t.find({a: {$where: "this.a;"}}).itcount();
+});
+assert.throws(function() {
+ t.find({a: {$elemMatch: {$where: "this.a;"}}}).itcount();
+});
+assert.throws(function() {
+ t.find({a: 3, "b.c": {$where: "this.a;"}}).itcount();
+});
// SERVER-13503
-assert.throws( function() {
- t.find( { a: { $elemMatch : { $where : "this.a;", b : 1 } } } ).itcount();
-} );
-assert.throws( function() {
- t.find( { a: { $elemMatch : { b : 1, $where : "this.a;" } } } ).itcount();
-} );
-assert.throws( function() {
- t.find( { a: { $elemMatch : { $and : [ { b : 1 }, { $where : "this.a;" } ] } } } ).itcount();
-} );
+assert.throws(function() {
+ t.find({a: {$elemMatch: {$where: "this.a;", b: 1}}}).itcount();
+});
+assert.throws(function() {
+ t.find({a: {$elemMatch: {b: 1, $where: "this.a;"}}}).itcount();
+});
+assert.throws(function() {
+ t.find({a: {$elemMatch: {$and: [{b: 1}, {$where: "this.a;"}]}}}).itcount();
+});
diff --git a/jstests/core/where2.js b/jstests/core/where2.js
index 9262b3076b3..8b4314beb88 100644
--- a/jstests/core/where2.js
+++ b/jstests/core/where2.js
@@ -1,10 +1,10 @@
-t = db.getCollection( "where2" );
+t = db.getCollection("where2");
t.drop();
-t.save( { a : 1 } );
-t.save( { a : 2 } );
-t.save( { a : 3 } );
+t.save({a: 1});
+t.save({a: 2});
+t.save({a: 3});
-assert.eq( 1 , t.find( { $where : "this.a == 2" } ).toArray().length , "A" );
-assert.eq( 1 , t.find( { $where : "\nthis.a == 2" } ).toArray().length , "B" );
+assert.eq(1, t.find({$where: "this.a == 2"}).toArray().length, "A");
+assert.eq(1, t.find({$where: "\nthis.a == 2"}).toArray().length, "B");
diff --git a/jstests/core/where3.js b/jstests/core/where3.js
index 2eb53f82370..633276489a5 100644
--- a/jstests/core/where3.js
+++ b/jstests/core/where3.js
@@ -2,12 +2,16 @@
t = db.where3;
t.drop();
-t.save( { returned_date : 5 } );
-t.save( { returned_date : 6 } );
+t.save({returned_date: 5});
+t.save({returned_date: 6});
-assert.eq( 1, t.find(function(){ return this.returned_date == 5; }).count(), "A" );
-assert.eq( 1, t.find({ $where : "return this.returned_date == 5;" }).count(), "B" );
-assert.eq( 1, t.find({ $where : "this.returned_date == 5;" }).count(), "C" );
-assert.eq( 1, t.find({ $where : "(this.returned_date == 5);" }).count(), "D" );
-assert.eq( 1, t.find({ $where : "((this.returned_date == 5) && (5 == 5));"}).count(), "E" );
-assert.eq( 1, t.find({ $where : "x=this.returned_date;x == 5;" }).count(), "F" );
+assert.eq(1,
+ t.find(function() {
+ return this.returned_date == 5;
+ }).count(),
+ "A");
+assert.eq(1, t.find({$where: "return this.returned_date == 5;"}).count(), "B");
+assert.eq(1, t.find({$where: "this.returned_date == 5;"}).count(), "C");
+assert.eq(1, t.find({$where: "(this.returned_date == 5);"}).count(), "D");
+assert.eq(1, t.find({$where: "((this.returned_date == 5) && (5 == 5));"}).count(), "E");
+assert.eq(1, t.find({$where: "x=this.returned_date;x == 5;"}).count(), "F");
diff --git a/jstests/core/where4.js b/jstests/core/where4.js
index 685665c982a..28d621374c3 100644
--- a/jstests/core/where4.js
+++ b/jstests/core/where4.js
@@ -1,27 +1,37 @@
db.where4.drop();
-db.system.js.insert( { _id : "w4" , value : "5" } );
+db.system.js.insert({_id: "w4", value: "5"});
-db.where4.insert( { x : 1 , y : 1 } );
-db.where4.insert( { x : 2 , y : 1 } );
+db.where4.insert({x: 1, y: 1});
+db.where4.insert({x: 2, y: 1});
-db.where4.update( { $where : function() { return this.x == 1; } } ,
- { $inc : { y : 1 } } , false , true );
+db.where4.update(
+ {
+ $where: function() {
+ return this.x == 1;
+ }
+ },
+ {$inc: {y: 1}},
+ false,
+ true);
-
-assert.eq( 2 , db.where4.findOne( { x : 1 } ).y );
-assert.eq( 1 , db.where4.findOne( { x : 2 } ).y );
+assert.eq(2, db.where4.findOne({x: 1}).y);
+assert.eq(1, db.where4.findOne({x: 2}).y);
// Test that where queries work with stored javascript
-db.system.js.save( { _id : "where4_addOne" , value : function(x) { return x + 1; } } );
+db.system.js.save({
+ _id: "where4_addOne",
+ value: function(x) {
+ return x + 1;
+ }
+});
-db.where4.update( { $where : "where4_addOne(this.x) == 2" } ,
- { $inc : { y : 1 } } , false , true );
+db.where4.update({$where: "where4_addOne(this.x) == 2"}, {$inc: {y: 1}}, false, true);
-assert.eq( 3 , db.where4.findOne( { x : 1 } ).y );
-assert.eq( 1 , db.where4.findOne( { x : 2 } ).y );
+assert.eq(3, db.where4.findOne({x: 1}).y);
+assert.eq(1, db.where4.findOne({x: 2}).y);
-db.system.js.remove( { _id : "where4_equalsOne" } );
+db.system.js.remove({_id: "where4_equalsOne"});
-db.system.js.remove( { _id : "w4" } );
+db.system.js.remove({_id: "w4"});
diff --git a/jstests/core/write_result.js b/jstests/core/write_result.js
index 8cf505f688e..86486089c68 100644
--- a/jstests/core/write_result.js
+++ b/jstests/core/write_result.js
@@ -12,7 +12,7 @@ var result = null;
//
// Basic insert
coll.remove({});
-printjson( result = coll.insert({ foo : "bar" }) );
+printjson(result = coll.insert({foo: "bar"}));
assert.eq(result.nInserted, 1);
assert.eq(result.nUpserted, 0);
assert.eq(result.nMatched, 0);
@@ -28,7 +28,7 @@ assert.eq(coll.count(), 1);
// Basic upsert (using save)
coll.remove({});
var id = new ObjectId();
-printjson( result = coll.save({ _id : id, foo : "bar" }) );
+printjson(result = coll.save({_id: id, foo: "bar"}));
assert.eq(result.nInserted, 0);
assert.eq(result.nUpserted, 1);
assert.eq(result.nMatched, 0);
@@ -43,8 +43,8 @@ assert.eq(coll.count(), 1);
//
// Basic update
coll.remove({});
-coll.insert({ foo : "bar" });
-printjson( result = coll.update({ foo : "bar" }, { $set : { foo : "baz" } }) );
+coll.insert({foo: "bar"});
+printjson(result = coll.update({foo: "bar"}, {$set: {foo: "baz"}}));
assert.eq(result.nInserted, 0);
assert.eq(result.nUpserted, 0);
assert.eq(result.nMatched, 1);
@@ -59,11 +59,9 @@ assert.eq(coll.count(), 1);
//
// Basic multi-update
coll.remove({});
-coll.insert({ foo : "bar" });
-coll.insert({ foo : "bar", set : ['value'] });
-printjson( result = coll.update({ foo : "bar" },
- { $addToSet : { set : 'value' } },
- { multi : true }) );
+coll.insert({foo: "bar"});
+coll.insert({foo: "bar", set: ['value']});
+printjson(result = coll.update({foo: "bar"}, {$addToSet: {set: 'value'}}, {multi: true}));
assert.eq(result.nInserted, 0);
assert.eq(result.nUpserted, 0);
assert.eq(result.nMatched, 2);
@@ -78,8 +76,8 @@ assert.eq(coll.count(), 2);
//
// Basic remove
coll.remove({});
-coll.insert({ foo : "bar" });
-printjson( result = coll.remove({}) );
+coll.insert({foo: "bar"});
+printjson(result = coll.remove({}));
assert.eq(result.nInserted, 0);
assert.eq(result.nUpserted, 0);
assert.eq(result.nMatched, 0);
@@ -95,8 +93,8 @@ assert.eq(coll.count(), 0);
// Insert with error
coll.remove({});
var id = new ObjectId();
-coll.insert({ _id : id, foo : "bar" });
-printjson( result = coll.insert({ _id : id, foo : "baz" }) );
+coll.insert({_id: id, foo: "bar"});
+printjson(result = coll.insert({_id: id, foo: "baz"}));
assert.eq(result.nInserted, 0);
assert(result.getWriteError());
assert(result.getWriteError().errmsg);
@@ -106,8 +104,8 @@ assert.eq(coll.count(), 1);
//
// Update with error
coll.remove({});
-coll.insert({ foo : "bar" });
-printjson( result = coll.update({ foo : "bar" }, { $invalid : "expr" }) );
+coll.insert({foo: "bar"});
+printjson(result = coll.update({foo: "bar"}, {$invalid: "expr"}));
assert.eq(result.nUpserted, 0);
assert.eq(result.nMatched, 0);
if (coll.getMongo().writeMode() == "commands")
@@ -121,13 +119,12 @@ assert.eq(coll.count(), 1);
// Multi-update with error
coll.remove({});
var id = new ObjectId();
-for (var i = 0; i < 10; ++i) coll.insert({ value : NumberInt(i) });
-coll.insert({ value : "not a number" });
+for (var i = 0; i < 10; ++i)
+ coll.insert({value: NumberInt(i)});
+coll.insert({value: "not a number"});
// $bit operator fails when the field is not integer
// Note that multi-updates do not currently report partial stats if they fail
-printjson( result = coll.update({},
- { $bit : { value : { and : NumberInt(0) } } },
- { multi : true }) );
+printjson(result = coll.update({}, {$bit: {value: {and: NumberInt(0)}}}, {multi: true}));
assert.eq(result.nUpserted, 0);
assert.eq(result.nMatched, 0);
if (coll.getMongo().writeMode() == "commands")
@@ -140,7 +137,7 @@ assert.eq(coll.count(), 11);
//
// Bulk insert
coll.remove({});
-printjson( result = coll.insert([{ foo : "bar" }, { foo : "baz" }]) );
+printjson(result = coll.insert([{foo: "bar"}, {foo: "baz"}]));
assert.eq(result.nInserted, 2);
assert(!result.hasWriteErrors());
assert(!result.hasWriteConcernError());
@@ -151,8 +148,7 @@ assert.eq(coll.count(), 2);
coll.remove({});
var id = new ObjectId();
// Second insert fails with duplicate _id
-printjson( result = coll.insert([{ _id : id, foo : "bar" },
- { _id : id, foo : "baz" }]) );
+printjson(result = coll.insert([{_id: id, foo: "bar"}, {_id: id, foo: "baz"}]));
assert.eq(result.nInserted, 1);
assert(result.hasWriteErrors());
assert(!result.hasWriteConcernError());
@@ -162,8 +158,8 @@ assert.eq(coll.count(), 1);
// Custom write concern
// (More detailed write concern tests require custom/replicated servers)
coll.remove({});
-coll.setWriteConcern({ w : "majority" });
-printjson( result = coll.insert({ foo : "bar" }) );
+coll.setWriteConcern({w: "majority"});
+printjson(result = coll.insert({foo: "bar"}));
assert.eq(result.nInserted, 1);
assert(!result.getWriteError());
assert(!result.getWriteConcernError());
@@ -174,13 +170,10 @@ coll.unsetWriteConcern();
// Write concern error
// NOTE: In a replica set write concern is checked after write
coll.remove({});
-var wRes = assert.writeError( coll.insert({ foo : "bar" }, { writeConcern : { w : "invalid" } }) );
+var wRes = assert.writeError(coll.insert({foo: "bar"}, {writeConcern: {w: "invalid"}}));
var res = assert.commandWorked(db.isMaster());
var replSet = res.hasOwnProperty("setName");
if (!replSet && coll.getMongo().writeMode() == "commands")
assert.eq(coll.count(), 0, "not-replset || command mode");
-else // compatibility,
+else // compatibility,
assert.eq(coll.count(), 1, "replset || non-command mode");
-
-
-
diff --git a/jstests/decimal/decimal_constructors.js b/jstests/decimal/decimal_constructors.js
index b07fc38ca81..d83568faf39 100644
--- a/jstests/decimal/decimal_constructors.js
+++ b/jstests/decimal/decimal_constructors.js
@@ -8,18 +8,19 @@
// Insert some sample data.
assert.writeOK(col.insert([
- { "decimal" : NumberDecimal("1") },
- { "decimal" : NumberDecimal(1) },
- { "decimal" : NumberDecimal(NumberLong("1")) },
- { "decimal" : NumberDecimal(NumberInt("1")) },
- ]), "Initial insertion of decimals failed");
+ {"decimal": NumberDecimal("1")},
+ {"decimal": NumberDecimal(1)},
+ {"decimal": NumberDecimal(NumberLong("1"))},
+ {"decimal": NumberDecimal(NumberInt("1"))},
+ ]),
+ "Initial insertion of decimals failed");
// Find values with various types and NumberDecimal constructed types
- assert.eq(col.find({ "decimal" : NumberDecimal("1") }).count(), "4");
- assert.eq(col.find({ "decimal" : NumberDecimal(1) }).count(), "4");
- assert.eq(col.find({ "decimal" : NumberDecimal(NumberLong(1)) }).count(), "4");
- assert.eq(col.find({ "decimal" : NumberDecimal(NumberInt(1)) }).count(), "4");
- assert.eq(col.find({ "decimal" : 1 }).count(), "4");
- assert.eq(col.find({ "decimal" : NumberLong(1) }).count(), "4");
- assert.eq(col.find({ "decimal" : NumberInt(1) }).count(), "4");
+ assert.eq(col.find({"decimal": NumberDecimal("1")}).count(), "4");
+ assert.eq(col.find({"decimal": NumberDecimal(1)}).count(), "4");
+ assert.eq(col.find({"decimal": NumberDecimal(NumberLong(1))}).count(), "4");
+ assert.eq(col.find({"decimal": NumberDecimal(NumberInt(1))}).count(), "4");
+ assert.eq(col.find({"decimal": 1}).count(), "4");
+ assert.eq(col.find({"decimal": NumberLong(1)}).count(), "4");
+ assert.eq(col.find({"decimal": NumberInt(1)}).count(), "4");
}());
diff --git a/jstests/decimal/decimal_find_basic.js b/jstests/decimal/decimal_find_basic.js
index ab09ad65417..6527957a93f 100644
--- a/jstests/decimal/decimal_find_basic.js
+++ b/jstests/decimal/decimal_find_basic.js
@@ -8,50 +8,48 @@
// Insert some sample data.
assert.writeOK(col.insert([
- { "decimal" : NumberDecimal("0") },
- { "decimal" : NumberDecimal("0.00") },
- { "decimal" : NumberDecimal("-0") },
- { "decimal" : NumberDecimal("1.0") },
- { "decimal" : NumberDecimal("1.00") },
- { "decimal" : NumberDecimal("2.00") },
- { "decimal" : NumberDecimal("12345678901234567890.12345678901234") },
- { "decimal" : NumberDecimal("NaN") },
- { "decimal" : NumberDecimal("-NaN") },
- { "decimal" : NumberDecimal("-Infinity") },
- { "decimal" : NumberDecimal("Infinity") },
- ]), "Initial insertion of decimals failed");
+ {"decimal": NumberDecimal("0")},
+ {"decimal": NumberDecimal("0.00")},
+ {"decimal": NumberDecimal("-0")},
+ {"decimal": NumberDecimal("1.0")},
+ {"decimal": NumberDecimal("1.00")},
+ {"decimal": NumberDecimal("2.00")},
+ {"decimal": NumberDecimal("12345678901234567890.12345678901234")},
+ {"decimal": NumberDecimal("NaN")},
+ {"decimal": NumberDecimal("-NaN")},
+ {"decimal": NumberDecimal("-Infinity")},
+ {"decimal": NumberDecimal("Infinity")},
+ ]),
+ "Initial insertion of decimals failed");
// Zeros
- assert.eq(col.find({ "decimal" : NumberDecimal("0") }).count(), "3");
+ assert.eq(col.find({"decimal": NumberDecimal("0")}).count(), "3");
// NaNs
- assert.eq(col.find({ "decimal" : NumberDecimal("NaN") }).count(), 2, "NaN find failed");
+ assert.eq(col.find({"decimal": NumberDecimal("NaN")}).count(), 2, "NaN find failed");
- var theNaNs = [{ "decimal" : NumberDecimal("NaN") }, { "decimal" : NumberDecimal("-NaN") }];
+ var theNaNs = [{"decimal": NumberDecimal("NaN")}, {"decimal": NumberDecimal("-NaN")}];
assert(bsonWoCompare(theNaNs[0], theNaNs[1]) == 0, "NaN compares equal");
// Infinity
- assert.eq(col.find({ "decimal" : NumberDecimal("Infinity") }).count(), 1,
- "Infinity count wrong");
- assert.eq(col.find({ "decimal" : NumberDecimal("-Infinity") }).count(), 1,
- "Infinity count wrong");
+ assert.eq(col.find({"decimal": NumberDecimal("Infinity")}).count(), 1, "Infinity count wrong");
+ assert.eq(col.find({"decimal": NumberDecimal("-Infinity")}).count(), 1, "Infinity count wrong");
// Maximum Precision
- assert.eq(
- col.find({ "decimal" : NumberDecimal("12345678901234567890.12345678901234") }).count(), 1,
- "Maximum precision decimal not found.");
+ assert.eq(col.find({"decimal": NumberDecimal("12345678901234567890.12345678901234")}).count(),
+ 1,
+ "Maximum precision decimal not found.");
col.drop();
// Maximum and Minimum Values
assert.writeOK(col.insert([
- { "max" : NumberDecimal("9999999999999999999999999999999999E6144") },
- { "min" : NumberDecimal("1E-6176") }
+ {"max": NumberDecimal("9999999999999999999999999999999999E6144")},
+ {"min": NumberDecimal("1E-6176")}
]));
- assert.eq(col.find({ "max" : NumberDecimal("9999999999999999999999999999999999E6144") }).count(),
- 1);
- assert.eq(col.find({ "min" : NumberDecimal("1E-6176") }).count(),
+ assert.eq(col.find({"max": NumberDecimal("9999999999999999999999999999999999E6144")}).count(),
1);
+ assert.eq(col.find({"min": NumberDecimal("1E-6176")}).count(), 1);
}());
diff --git a/jstests/decimal/decimal_find_mixed.js b/jstests/decimal/decimal_find_mixed.js
index c93c7f5676d..02efc213656 100644
--- a/jstests/decimal/decimal_find_mixed.js
+++ b/jstests/decimal/decimal_find_mixed.js
@@ -1,6 +1,6 @@
// Tests finding NumberDecimal from the shell in mixed collections.
-(function () {
+(function() {
"use strict";
var col = db.decimal_find_mixed;
col.drop();
@@ -8,93 +8,85 @@
// Insert some sample data.
assert.writeOK(col.insert([
- { "a" : -1 },
- { "a" : NumberDecimal("-1") },
- { "a" : NumberLong("-1") },
- { "a" : NumberInt("-1") },
- { "a" : -0.3 },
- { "a" : NumberDecimal("-0.3") },
- { "a" : -0.1 },
- { "a" : NumberDecimal("-0.1") },
- { "a": NumberDecimal("0") },
- { "a" : 0 },
- { "a" : NumberDecimal("-0") },
- { "a": NumberDecimal("0.00") },
- { "a" : NumberDecimal("0.1") },
- { "a" : 0.1 },
- { "a" : NumberDecimal("0.3") },
- { "a" : 0.3 },
- { "a" : NumberDecimal("0.5") },
- { "a" : 0.5 },
- { "a" : NumberDecimal("1.0") },
- { "a" : NumberLong("1") },
- { "a" : NumberDecimal("1.00") },
- { "a" : NumberDecimal("2.00") },
- { "a" : NumberDecimal("12345678901234567890.12345678901234") },
- { "a" : NumberDecimal("NaN") },
- { "a" : NumberDecimal("-NaN") },
- { "a" : NaN },
- { "a" : NumberDecimal("Infinity") },
- { "a" : Infinity }
- ]), "Initial decimal insertion failed");
+ {"a": -1},
+ {"a": NumberDecimal("-1")},
+ {"a": NumberLong("-1")},
+ {"a": NumberInt("-1")},
+ {"a": -0.3},
+ {"a": NumberDecimal("-0.3")},
+ {"a": -0.1},
+ {"a": NumberDecimal("-0.1")},
+ {"a": NumberDecimal("0")},
+ {"a": 0},
+ {"a": NumberDecimal("-0")},
+ {"a": NumberDecimal("0.00")},
+ {"a": NumberDecimal("0.1")},
+ {"a": 0.1},
+ {"a": NumberDecimal("0.3")},
+ {"a": 0.3},
+ {"a": NumberDecimal("0.5")},
+ {"a": 0.5},
+ {"a": NumberDecimal("1.0")},
+ {"a": NumberLong("1")},
+ {"a": NumberDecimal("1.00")},
+ {"a": NumberDecimal("2.00")},
+ {"a": NumberDecimal("12345678901234567890.12345678901234")},
+ {"a": NumberDecimal("NaN")},
+ {"a": NumberDecimal("-NaN")},
+ {"a": NaN},
+ {"a": NumberDecimal("Infinity")},
+ {"a": Infinity}
+ ]),
+ "Initial decimal insertion failed");
// Simple finds
- assert.eq(col.find({ "a" : -1 }).count(), 4, "A1");
- assert.eq(col.find({ "a" : NumberLong("-1") }).count(), 4, "A2");
- assert.eq(col.find({ "a" : NumberInt("-1") }).count(), 4, "A3");
- assert.eq(col.find({ "a" : NumberDecimal("-1") }).count(), 4, "A4");
+ assert.eq(col.find({"a": -1}).count(), 4, "A1");
+ assert.eq(col.find({"a": NumberLong("-1")}).count(), 4, "A2");
+ assert.eq(col.find({"a": NumberInt("-1")}).count(), 4, "A3");
+ assert.eq(col.find({"a": NumberDecimal("-1")}).count(), 4, "A4");
- assert.eq(col.find({ "a": NaN }).count(), 3, "B1");
- assert.eq(col.find({ "a": NumberDecimal("NaN") }).count(), 3, "B2");
- assert.eq(col.find({ "a": Infinity }).count(), 2, "B3");
- assert.eq(col.find({ "a": NumberDecimal("Infinity") }).count(), 2, "B4");
+ assert.eq(col.find({"a": NaN}).count(), 3, "B1");
+ assert.eq(col.find({"a": NumberDecimal("NaN")}).count(), 3, "B2");
+ assert.eq(col.find({"a": Infinity}).count(), 2, "B3");
+ assert.eq(col.find({"a": NumberDecimal("Infinity")}).count(), 2, "B4");
- assert.eq(col.find({ $and : [ { "a": { $gte : 0 }}, { "a" : { $lte: 2 }}]}).count(),
- 14, "C1");
+ assert.eq(col.find({$and: [{"a": {$gte: 0}}, {"a": {$lte: 2}}]}).count(), 14, "C1");
// Proper mixed ordering of decimals and doubles
col.drop();
- assert.writeOK(col.insert([
- { "a" : NumberDecimal("0.3") },
- { "a" : 0.3 }
- ], "2 insertion failed"));
+ assert.writeOK(col.insert([{"a": NumberDecimal("0.3")}, {"a": 0.3}], "2 insertion failed"));
- assert.eq(col.find({ "a" : { $lt : NumberDecimal("0.3") }}).count(), 1, "D1");
- assert.eq(col.find({ "a" : { $gt : 0.3 }}).count(), 1, "D1");
+ assert.eq(col.find({"a": {$lt: NumberDecimal("0.3")}}).count(), 1, "D1");
+ assert.eq(col.find({"a": {$gt: 0.3}}).count(), 1, "D1");
// Find with NumberLong, but not Double
col.drop();
- assert.writeOK(col.insert([
- { "a" : NumberDecimal("36028797018963967") }
- ], "3 insertion failed"));
+ assert.writeOK(col.insert([{"a": NumberDecimal("36028797018963967")}], "3 insertion failed"));
- assert.eq(col.find({ "a" : NumberDecimal("36028797018963967") }).count(), 1, "E1");
+ assert.eq(col.find({"a": NumberDecimal("36028797018963967")}).count(), 1, "E1");
// Not representable as double
- assert.eq(col.find({ "a" : 36028797018963967 }).count(), 0, "E2");
- assert.eq(col.find({ "a" : NumberLong("36028797018963967") }).count(), 1, "E3");
+ assert.eq(col.find({"a": 36028797018963967}).count(), 0, "E2");
+ assert.eq(col.find({"a": NumberLong("36028797018963967")}).count(), 1, "E3");
// Doubles not representable as decimal
col.drop();
var exactDouble = 1427247692705959881058285969449495136382746624;
var exactDoubleString = "1427247692705959881058285969449495136382746624";
- assert.writeOK(col.insert([
- { "a" : exactDouble } // Exact double (46 digits)
- ], "4 insertion failed"));
+ assert.writeOK(col.insert([{"a": exactDouble} // Exact double (46 digits)
+ ],
+ "4 insertion failed"));
- assert.eq(col.find({ "a" : NumberDecimal(exactDoubleString) }).count(), 0, "F1");
- assert.eq(col.find({ "a" : { $gt : NumberDecimal(exactDoubleString) }}).count(), 1, "E2");
+ assert.eq(col.find({"a": NumberDecimal(exactDoubleString)}).count(), 0, "F1");
+ assert.eq(col.find({"a": {$gt: NumberDecimal(exactDoubleString)}}).count(), 1, "E2");
- var exactDoubleTiny = 1/1606938044258990275541962092341162602522202993782792835301376;
- var exactDoubleTinyString = "0.00000000000000000000000000000000000000000000000000000000000062230152778611417071440640537801242405902521687211671331011166147896988340353834411839448231257136169569665895551224821247160434722900390625";
+ var exactDoubleTiny = 1 / 1606938044258990275541962092341162602522202993782792835301376;
+ var exactDoubleTinyString =
+ "0.00000000000000000000000000000000000000000000000000000000000062230152778611417071440640537801242405902521687211671331011166147896988340353834411839448231257136169569665895551224821247160434722900390625";
col.drop();
- assert.writeOK(col.insert([
- { "a" : exactDoubleTiny }
- ], "5 insertion failed"));
+ assert.writeOK(col.insert([{"a": exactDoubleTiny}], "5 insertion failed"));
- assert.eq(col.find({ "a" : NumberDecimal(exactDoubleTinyString) }).count(),
- 0, "F1");
- assert.eq(col.find({ "a" : { $gt : NumberDecimal(exactDoubleTinyString) }}).count(),
- 1, "F2");
+ assert.eq(col.find({"a": NumberDecimal(exactDoubleTinyString)}).count(), 0, "F1");
+ assert.eq(col.find({"a": {$gt: NumberDecimal(exactDoubleTinyString)}}).count(), 1, "F2");
}());
diff --git a/jstests/decimal/decimal_find_query.js b/jstests/decimal/decimal_find_query.js
index 23689074ec9..dbdc3b69d0c 100644
--- a/jstests/decimal/decimal_find_query.js
+++ b/jstests/decimal/decimal_find_query.js
@@ -1,6 +1,6 @@
// Find the decimal using query operators
-(function () {
+(function() {
'use strict';
var col = db.decimal_find_query;
col.drop();
@@ -8,42 +8,43 @@
// Insert some sample data.
assert.writeOK(col.insert([
- { 'decimal': NumberDecimal('0') },
- { 'decimal': NumberDecimal('0.00') },
- { 'decimal' : NumberDecimal('-0') },
- { 'decimal' : NumberDecimal('1.0') },
- { 'decimal' : NumberDecimal('1.00') },
- { 'decimal' : NumberDecimal('2.00') },
- { 'decimal' : NumberDecimal('1234567890123456789012.12345678901234') },
- { 'decimal' : NumberDecimal('NaN') },
- { 'decimal' : NumberDecimal('-NaN') },
- { 'decimal' : NumberDecimal('Infinity') },
- { 'decimal' : NumberDecimal('-Infinity') },
- ]), 'Initial insertion failed');
+ {'decimal': NumberDecimal('0')},
+ {'decimal': NumberDecimal('0.00')},
+ {'decimal': NumberDecimal('-0')},
+ {'decimal': NumberDecimal('1.0')},
+ {'decimal': NumberDecimal('1.00')},
+ {'decimal': NumberDecimal('2.00')},
+ {'decimal': NumberDecimal('1234567890123456789012.12345678901234')},
+ {'decimal': NumberDecimal('NaN')},
+ {'decimal': NumberDecimal('-NaN')},
+ {'decimal': NumberDecimal('Infinity')},
+ {'decimal': NumberDecimal('-Infinity')},
+ ]),
+ 'Initial insertion failed');
- assert.eq(col.find({ 'decimal' : { $eq: NumberDecimal('1') }}).count(), '2');
- assert.eq(col.find({ 'decimal': { $lt: NumberDecimal('1.00000000000001') }}).count(),
- 6);
- assert.eq(col.find({ 'decimal': { $gt: NumberDecimal('1.5')}}).count(), 3);
+ assert.eq(col.find({'decimal': {$eq: NumberDecimal('1')}}).count(), '2');
+ assert.eq(col.find({'decimal': {$lt: NumberDecimal('1.00000000000001')}}).count(), 6);
+ assert.eq(col.find({'decimal': {$gt: NumberDecimal('1.5')}}).count(), 3);
- assert.eq(col.find({ 'decimal' : { $gte: NumberDecimal('2.000') }}).count(), 3);
- assert.eq(col.find({ 'decimal' : { $lte : NumberDecimal('0.9999999999999999')}}).count(),
- 4);
+ assert.eq(col.find({'decimal': {$gte: NumberDecimal('2.000')}}).count(), 3);
+ assert.eq(col.find({'decimal': {$lte: NumberDecimal('0.9999999999999999')}}).count(), 4);
assert.eq(
- col.find({ 'decimal': { $nin: [NumberDecimal('Infinity'),
- NumberDecimal('-Infinity')]}}).count(), 9,
+ col.find({'decimal': {$nin: [NumberDecimal('Infinity'), NumberDecimal('-Infinity')]}})
+ .count(),
+ 9,
'Infinity count incorrect');
// Test $mod
col.drop();
assert.writeOK(col.insert([
- { 'decimal': NumberDecimal('0') },
- { 'decimal': NumberDecimal('0.00') },
- { 'decimal' : NumberDecimal('-0') },
- { 'decimal' : NumberDecimal('1.0') },
- { 'decimal' : NumberDecimal('1.00') },
- { 'decimal' : NumberDecimal('2.00') },
- ]), '2 insertion failed');
- assert.eq(col.find({'decimal' : { $mod: [2, 0] }}).count(), 4, "$mod count incorrect");
+ {'decimal': NumberDecimal('0')},
+ {'decimal': NumberDecimal('0.00')},
+ {'decimal': NumberDecimal('-0')},
+ {'decimal': NumberDecimal('1.0')},
+ {'decimal': NumberDecimal('1.00')},
+ {'decimal': NumberDecimal('2.00')},
+ ]),
+ '2 insertion failed');
+ assert.eq(col.find({'decimal': {$mod: [2, 0]}}).count(), 4, "$mod count incorrect");
}());
diff --git a/jstests/decimal/decimal_update.js b/jstests/decimal/decimal_update.js
index 32824f8e0b9..6be2bd9e3e6 100644
--- a/jstests/decimal/decimal_update.js
+++ b/jstests/decimal/decimal_update.js
@@ -1,41 +1,40 @@
// Test decimal updates
-(function () {
+(function() {
"use strict";
var col = db.decimal_updates;
col.drop();
// Insert some sample data.
var docs = [
- { 'a' : NumberDecimal("1.0") },
- { 'a' : NumberDecimal("0.0") },
- { 'a' : NumberDecimal("1.00") },
- { 'a' : NumberLong("1") },
- { 'a' : 1 }
+ {'a': NumberDecimal("1.0")},
+ {'a': NumberDecimal("0.0")},
+ {'a': NumberDecimal("1.00")},
+ {'a': NumberLong("1")},
+ {'a': 1}
];
assert.writeOK(col.insert(docs), "Initial insertion failed");
- assert.writeOK(col.update({}, { $inc: { 'a' : NumberDecimal("10") }}, { multi : true }),
+ assert.writeOK(col.update({}, {$inc: {'a': NumberDecimal("10")}}, {multi: true}),
"update $inc failed");
- assert.eq(col.find({ a : 11 }).count(), 4, "count after $inc incorrect");
- assert.writeOK(col.update({}, { $inc: { 'a' : NumberDecimal("0") }}, { multi : true }),
+ assert.eq(col.find({a: 11}).count(), 4, "count after $inc incorrect");
+ assert.writeOK(col.update({}, {$inc: {'a': NumberDecimal("0")}}, {multi: true}),
"update $inc 0 failed");
- assert.eq(col.find({ a : 11 }).count(), 4, "count after $inc 0 incorrect");
+ assert.eq(col.find({a: 11}).count(), 4, "count after $inc 0 incorrect");
col.drop();
assert.writeOK(col.insert(docs), "Second insertion failed");
- assert.writeOK(col.update({}, { $mul: { 'a' : NumberDecimal("1") }}, { multi : true }),
+ assert.writeOK(col.update({}, {$mul: {'a': NumberDecimal("1")}}, {multi: true}),
"update $mul failed");
- assert.eq(col.find({ a : 1 }).count(), 4, "count after $mul incorrect");
- assert.writeOK(col.update({}, { $mul: { 'a' : NumberDecimal("2") }}, { multi : true }),
+ assert.eq(col.find({a: 1}).count(), 4, "count after $mul incorrect");
+ assert.writeOK(col.update({}, {$mul: {'a': NumberDecimal("2")}}, {multi: true}),
"update $mul 2 failed");
- assert.eq(col.find({ a : 2 }).count(), 4, "count after $mul incorrect");
- assert.writeOK(col.update({}, { $mul: { 'a' : NumberDecimal("0") }}, { multi : true }),
+ assert.eq(col.find({a: 2}).count(), 4, "count after $mul incorrect");
+ assert.writeOK(col.update({}, {$mul: {'a': NumberDecimal("0")}}, {multi: true}),
"update $mul 0 failed");
- assert.eq(col.find({ a : 0 }).count(), 5, "count after $mul 0 incorrect");
+ assert.eq(col.find({a: 0}).count(), 5, "count after $mul 0 incorrect");
- assert.writeError(col.update({}, { $bit: { 'a': { and : 1 }}}, { multi : true }),
- "$bit should fail");
+ assert.writeError(col.update({}, {$bit: {'a': {and: 1}}}, {multi: true}), "$bit should fail");
}());
diff --git a/jstests/disk/datafile_options.js b/jstests/disk/datafile_options.js
index 62d4b137311..a7639c43d7d 100644
--- a/jstests/disk/datafile_options.js
+++ b/jstests/disk/datafile_options.js
@@ -4,51 +4,35 @@ load('jstests/libs/command_line/test_parsed_options.js');
jsTest.log("Testing \"noprealloc\" command line option");
var expectedResult = {
- "parsed" : {
- "storage" : {
- "mmapv1" : {
- "preallocDataFiles" : false
- }
- }
- }
+ "parsed": {"storage": {"mmapv1": {"preallocDataFiles": false}}}
};
-testGetCmdLineOptsMongod({ noprealloc : "" }, expectedResult);
+testGetCmdLineOptsMongod({noprealloc: ""}, expectedResult);
jsTest.log("Testing \"storage.mmapv1.preallocDataFiles\" config file option");
expectedResult = {
- "parsed" : {
- "config" : "jstests/libs/config_files/enable_prealloc.json",
- "storage" : {
- "mmapv1" : {
- "preallocDataFiles" : true
- }
- }
+ "parsed": {
+ "config": "jstests/libs/config_files/enable_prealloc.json",
+ "storage": {"mmapv1": {"preallocDataFiles": true}}
}
};
-testGetCmdLineOptsMongod({ config : "jstests/libs/config_files/enable_prealloc.json" },
+testGetCmdLineOptsMongod({config: "jstests/libs/config_files/enable_prealloc.json"},
expectedResult);
jsTest.log("Testing with no explicit data file option setting");
expectedResult = {
- "parsed" : {
- "storage" : { }
- }
+ "parsed": {"storage": {}}
};
testGetCmdLineOptsMongod({}, expectedResult);
// Test that we preserve switches explicitly set to false in config files. See SERVER-13439.
jsTest.log("Testing explicitly disabled \"noprealloc\" config file option");
expectedResult = {
- "parsed" : {
- "config" : "jstests/libs/config_files/disable_noprealloc.ini",
- "storage" : {
- "mmapv1" : {
- "preallocDataFiles" : true
- }
- }
+ "parsed": {
+ "config": "jstests/libs/config_files/disable_noprealloc.ini",
+ "storage": {"mmapv1": {"preallocDataFiles": true}}
}
};
-testGetCmdLineOptsMongod({ config : "jstests/libs/config_files/disable_noprealloc.ini" },
+testGetCmdLineOptsMongod({config: "jstests/libs/config_files/disable_noprealloc.ini"},
expectedResult);
print(baseName + " succeeded.");
diff --git a/jstests/disk/dbNoCreate.js b/jstests/disk/dbNoCreate.js
index f365e0730e9..f3498fcedb4 100644
--- a/jstests/disk/dbNoCreate.js
+++ b/jstests/disk/dbNoCreate.js
@@ -2,16 +2,16 @@ var baseName = "jstests_dbNoCreate";
var m = MongoRunner.runMongod({});
-var t = m.getDB( baseName ).t;
+var t = m.getDB(baseName).t;
-assert.eq( 0, t.find().toArray().length );
+assert.eq(0, t.find().toArray().length);
t.remove({});
-t.update( {}, { a:1 } );
+t.update({}, {a: 1});
t.drop();
MongoRunner.stopMongod(m);
-m = MongoRunner.runMongod({restart:true, cleanData:false, dbpath: m.dbpath});
-assert.eq( -1,
- m.getDBNames().indexOf( baseName ),
- "found " + baseName + " in " + tojson(m.getDBNames()));
+m = MongoRunner.runMongod({restart: true, cleanData: false, dbpath: m.dbpath});
+assert.eq(-1,
+ m.getDBNames().indexOf(baseName),
+ "found " + baseName + " in " + tojson(m.getDBNames()));
diff --git a/jstests/disk/directoryperdb.js b/jstests/disk/directoryperdb.js
index 9587908355f..3c889cb5b1f 100644
--- a/jstests/disk/directoryperdb.js
+++ b/jstests/disk/directoryperdb.js
@@ -5,86 +5,83 @@ var storageEngine = db.serverStatus().storageEngine.name;
// The pattern which matches the names of database files
var dbFileMatcher;
-if ( storageEngine == "mmapv1" ) {
+if (storageEngine == "mmapv1") {
// Matches mmapv1 *.ns and *.0, *.1, etc files.
dbFileMatcher = /\.(ns|\d+)$/;
-} else if ( storageEngine == "wiredTiger" ) {
+} else if (storageEngine == "wiredTiger") {
// Matches wiredTiger collection-*.wt and index-*.wt files
dbFileMatcher = /(collection|index)-.+\.wt$/;
} else {
- assert( false, "This test must be run against mmapv1 or wiredTiger" );
+ assert(false, "This test must be run against mmapv1 or wiredTiger");
}
// Set up helper functions.
assertDocumentCount = function(db, count) {
- assert.eq( count, db[ baseName ].count() ,
- "Expected " + count + " documents in " + db._name + "." + baseName + ". " +
- "Found: " + tojson( db[ baseName ].find().toArray() ) );
+ assert.eq(count,
+ db[baseName].count(),
+ "Expected " + count + " documents in " + db._name + "." + baseName + ". " +
+ "Found: " + tojson(db[baseName].find().toArray()));
};
-
-checkDBFilesInDBDirectory = function( db ) {
- db.adminCommand( { fsync : 1 } );
+checkDBFilesInDBDirectory = function(db) {
+ db.adminCommand({fsync: 1});
var dir = dbpath + db._name;
- files = listFiles( dir );
+ files = listFiles(dir);
var fileCount = 0;
- for( f in files ) {
- if ( files[ f ].isDirectory )
+ for (f in files) {
+ if (files[f].isDirectory)
continue;
fileCount += 1;
- assert( dbFileMatcher.test( files[ f ].name ),
- "In directory:" + dir + " found unexpected file: " + files[ f ].name );
+ assert(dbFileMatcher.test(files[f].name),
+ "In directory:" + dir + " found unexpected file: " + files[f].name);
}
- assert( fileCount > 0, "Expected more than zero nondirectory files in database directory" );
+ assert(fileCount > 0, "Expected more than zero nondirectory files in database directory");
};
-checkDBDirectoryNonexistent = function( db ) {
- db.adminCommand( { fsync : 1 } );
+checkDBDirectoryNonexistent = function(db) {
+ db.adminCommand({fsync: 1});
- var files = listFiles( dbpath );
+ var files = listFiles(dbpath);
// Check that there are no files in the toplevel dbpath.
- for ( f in files ) {
- if ( !files[ f ].isDirectory ) {
- assert( !dbFileMatcher.test( files[ f ].name ),
- "Database file" + files[ f ].name +
- " exists in dbpath after deleting all non-directoryperdb databases");
+ for (f in files) {
+ if (!files[f].isDirectory) {
+ assert(!dbFileMatcher.test(files[f].name),
+ "Database file" + files[f].name +
+ " exists in dbpath after deleting all non-directoryperdb databases");
}
}
// Check db directories to ensure db files in them have been destroyed.
// mmapv1 removes the database directory, pending SERVER-1379.
- if ( storageEngine == "mmapv1" ) {
- var files = listFiles( dbpath );
+ if (storageEngine == "mmapv1") {
+ var files = listFiles(dbpath);
var fileNotFound = true;
- for ( f in files ) {
- assert( files[ f ].name != db._name,
- "Directory " + db._name + " still exists" );
+ for (f in files) {
+ assert(files[f].name != db._name, "Directory " + db._name + " still exists");
}
- } else if ( storageEngine == "wiredTiger" ) {
- var files = listFiles( dbpath + db._name );
- assert.eq( files.length, 0,
- "Files left behind in database directory" );
+ } else if (storageEngine == "wiredTiger") {
+ var files = listFiles(dbpath + db._name);
+ assert.eq(files.length, 0, "Files left behind in database directory");
}
};
// Start the directoryperdb instance of mongod.
-var m = MongoRunner.runMongod( { storageEngine : storageEngine, dbpath : dbpath,
- directoryperdb : "" } );
+var m = MongoRunner.runMongod({storageEngine: storageEngine, dbpath: dbpath, directoryperdb: ""});
// Check that the 'local' db has allocated data.
-var localDb = m.getDB( "local" );
-checkDBFilesInDBDirectory( localDb );
+var localDb = m.getDB("local");
+checkDBFilesInDBDirectory(localDb);
// Create database with directoryperdb.
-var dbBase = m.getDB( baseName );
-dbBase[ baseName ].insert( {} );
-assertDocumentCount( dbBase, 1 );
-checkDBFilesInDBDirectory( dbBase );
+var dbBase = m.getDB(baseName);
+dbBase[baseName].insert({});
+assertDocumentCount(dbBase, 1);
+checkDBFilesInDBDirectory(dbBase);
// Drop a database created with directoryperdb.
-assert.commandWorked( dbBase.runCommand( { dropDatabase : 1 } ) );
-assertDocumentCount( dbBase, 0 );
-checkDBDirectoryNonexistent( dbBase );
+assert.commandWorked(dbBase.runCommand({dropDatabase: 1}));
+assertDocumentCount(dbBase, 0);
+checkDBDirectoryNonexistent(dbBase);
// It should be impossible to create a database named "journal" with directoryperdb, as that
// directory exists. This test has been disabled until SERVER-2460 is resolved.
@@ -95,42 +92,41 @@ assert.writeError(db[ "journal" ].insert( {} ));
// Using WiredTiger, it should be impossible to create a database named "WiredTiger" with
// directoryperdb, as that file is created by the WiredTiger storageEngine.
-if ( storageEngine == "wiredTiger" ) {
- var dbW = m.getDB( "WiredTiger" );
- assert.writeError( dbW[ baseName ].insert( {} ) );
+if (storageEngine == "wiredTiger") {
+ var dbW = m.getDB("WiredTiger");
+ assert.writeError(dbW[baseName].insert({}));
}
// Create a database named 'a' repeated 63 times.
-var dbNameAA = Array( 64 ).join( 'a' );
-var dbAA = m.getDB( dbNameAA );
-assert.writeOK( dbAA[ baseName ].insert( {} ) );
-assertDocumentCount( dbAA, 1 );
-checkDBFilesInDBDirectory( dbAA );
+var dbNameAA = Array(64).join('a');
+var dbAA = m.getDB(dbNameAA);
+assert.writeOK(dbAA[baseName].insert({}));
+assertDocumentCount(dbAA, 1);
+checkDBFilesInDBDirectory(dbAA);
// Create a database named '&'.
-var dbAnd = m.getDB( '&' );
-assert.writeOK( dbAnd[ baseName ].insert( {} ) );
-assertDocumentCount( dbAnd, 1 );
-checkDBFilesInDBDirectory( dbAnd );
-
+var dbAnd = m.getDB('&');
+assert.writeOK(dbAnd[baseName].insert({}));
+assertDocumentCount(dbAnd, 1);
+checkDBFilesInDBDirectory(dbAnd);
// Unicode directoryperdb databases do not work on Windows.
// Disabled until https://jira.mongodb.org/browse/SERVER-16725
// is resolved.
-if ( !_isWindows() ) {
+if (!_isWindows()) {
// Create a database named '処'.
var dbNameU = '処';
- var dbU = m.getDB( dbNameU );
- assert.writeOK( dbU[ baseName ].insert( {} ) );
- assertDocumentCount( dbU, 1 );
- checkDBFilesInDBDirectory( dbU );
+ var dbU = m.getDB(dbNameU);
+ assert.writeOK(dbU[baseName].insert({}));
+ assertDocumentCount(dbU, 1);
+ checkDBFilesInDBDirectory(dbU);
// Create a database named '処' repeated 21 times.
var dbNameUU = Array(22).join('処');
- var dbUU = m.getDB( dbNameUU );
- assert.writeOK( dbUU[ baseName ].insert( {} ) );
- assertDocumentCount( dbUU, 1 );
- checkDBFilesInDBDirectory( dbUU );
+ var dbUU = m.getDB(dbNameUU);
+ assert.writeOK(dbUU[baseName].insert({}));
+ assertDocumentCount(dbUU, 1);
+ checkDBFilesInDBDirectory(dbUU);
}
print("SUCCESS directoryperdb.js");
diff --git a/jstests/disk/diskfull.js b/jstests/disk/diskfull.js
index 6bd086080eb..cf24d4468d3 100644
--- a/jstests/disk/diskfull.js
+++ b/jstests/disk/diskfull.js
@@ -4,9 +4,9 @@ assert.commandWorked(db.adminCommand({configureFailPoint: "allocateDiskFull", mo
var d = db.getSisterDB("DiskFullTestDB");
var c = d.getCollection("DiskFullTestCollection");
-var writeError1 = c.insert({ a : 6 }).getWriteError();
+var writeError1 = c.insert({a: 6}).getWriteError();
assert.eq(12520, writeError1.code);
// All subsequent requests should fail
-var writeError2 = c.insert({ a : 6 }).getWriteError();
+var writeError2 = c.insert({a: 6}).getWriteError();
assert.eq(12520, writeError2.code);
diff --git a/jstests/disk/filesize.js b/jstests/disk/filesize.js
index 1f2da5db80e..954139c25fc 100644
--- a/jstests/disk/filesize.js
+++ b/jstests/disk/filesize.js
@@ -4,7 +4,7 @@ var baseName = "filesize";
// Start mongod with --smallfiles
var m = MongoRunner.runMongod({nojournal: "", smallfiles: ""});
-var db = m.getDB( baseName );
+var db = m.getDB(baseName);
// Skip on 32 bits, since 32-bit servers don't warn about small files
if (db.serverBuildInfo().bits == 32) {
@@ -20,21 +20,20 @@ if (db.serverBuildInfo().bits == 32) {
nojournal: "",
});
- db = m.getDB( baseName );
- var log = db.adminCommand( { getLog : "global" } ).log;
+ db = m.getDB(baseName);
+ var log = db.adminCommand({getLog: "global"}).log;
// Find log message like:
// "openExisting file size 16777216 but
// mmapv1GlobalOptions.smallfiles=false: /data/db/filesize/local.0"
var found = false, logline = '';
- for ( i=log.length - 1; i>= 0; i-- ) {
+ for (i = log.length - 1; i >= 0; i--) {
logline = log[i];
- if ( logline.indexOf( "openExisting file" ) >= 0
- && logline.indexOf( "local.0" ) >= 0 ) {
+ if (logline.indexOf("openExisting file") >= 0 && logline.indexOf("local.0") >= 0) {
found = true;
break;
}
}
- assert( found );
+ assert(found);
}
diff --git a/jstests/disk/index_options.js b/jstests/disk/index_options.js
index d8a3b267333..68710de75a1 100644
--- a/jstests/disk/index_options.js
+++ b/jstests/disk/index_options.js
@@ -4,44 +4,34 @@ load('jstests/libs/command_line/test_parsed_options.js');
jsTest.log("Testing \"noIndexBuildRetry\" command line option");
var expectedResult = {
- "parsed" : {
- "storage" : {
- "indexBuildRetry" : false
- }
- }
+ "parsed": {"storage": {"indexBuildRetry": false}}
};
-testGetCmdLineOptsMongod({ noIndexBuildRetry : "" }, expectedResult);
+testGetCmdLineOptsMongod({noIndexBuildRetry: ""}, expectedResult);
jsTest.log("Testing \"storage.indexBuildRetry\" config file option");
expectedResult = {
- "parsed" : {
- "config" : "jstests/libs/config_files/enable_indexbuildretry.json",
- "storage" : {
- "indexBuildRetry" : true
- }
+ "parsed": {
+ "config": "jstests/libs/config_files/enable_indexbuildretry.json",
+ "storage": {"indexBuildRetry": true}
}
};
-testGetCmdLineOptsMongod({ config : "jstests/libs/config_files/enable_indexbuildretry.json" },
- expectedResult);
+testGetCmdLineOptsMongod({config: "jstests/libs/config_files/enable_indexbuildretry.json"},
+ expectedResult);
jsTest.log("Testing with no explicit index option setting");
expectedResult = {
- "parsed" : {
- "storage" : { }
- }
+ "parsed": {"storage": {}}
};
testGetCmdLineOptsMongod({}, expectedResult);
jsTest.log("Testing explicitly disabled \"noIndexBuildRetry\" config file option");
expectedResult = {
- "parsed" : {
- "config" : "jstests/libs/config_files/disable_noindexbuildretry.ini",
- "storage" : {
- "indexBuildRetry" : true
- }
+ "parsed": {
+ "config": "jstests/libs/config_files/disable_noindexbuildretry.ini",
+ "storage": {"indexBuildRetry": true}
}
};
-testGetCmdLineOptsMongod({ config : "jstests/libs/config_files/disable_noindexbuildretry.ini" },
- expectedResult);
+testGetCmdLineOptsMongod({config: "jstests/libs/config_files/disable_noindexbuildretry.ini"},
+ expectedResult);
print(baseName + " succeeded.");
diff --git a/jstests/disk/killall.js b/jstests/disk/killall.js
index beb3e99e778..675c2a42c5f 100644
--- a/jstests/disk/killall.js
+++ b/jstests/disk/killall.js
@@ -10,14 +10,13 @@ var baseName = "jstests_disk_killall";
var dbpath = MongoRunner.dataPath + baseName;
var mongod = MongoRunner.runMongod({dbpath: dbpath});
-var db = mongod.getDB( "test" );
-var collection = db.getCollection( baseName );
+var db = mongod.getDB("test");
+var collection = db.getCollection(baseName);
assert.writeOK(collection.insert({}));
var awaitShell = startParallelShell(
- "db." + baseName + ".count( { $where: function() { while( 1 ) { ; } } } )",
- mongod.port);
-sleep( 1000 );
+ "db." + baseName + ".count( { $where: function() { while( 1 ) { ; } } } )", mongod.port);
+sleep(1000);
/**
* 0 == mongod's exit code on Windows, or when it receives TERM, HUP or INT signals. On UNIX
@@ -33,16 +32,12 @@ assert.eq(0, exitCode, "got unexpected exitCode");
exitCode = awaitShell({checkExitSuccess: false});
assert.neq(0, exitCode, "expected shell to exit abnormally due to mongod being terminated");
-mongod = MongoRunner.runMongod({
- port: mongod.port,
- restart: true,
- cleanData: false,
- dbpath: mongod.dbpath
-});
-db = mongod.getDB( "test" );
-collection = db.getCollection( baseName );
-
-assert( collection.stats().ok );
-assert( collection.drop() );
+mongod = MongoRunner.runMongod(
+ {port: mongod.port, restart: true, cleanData: false, dbpath: mongod.dbpath});
+db = mongod.getDB("test");
+collection = db.getCollection(baseName);
+
+assert(collection.stats().ok);
+assert(collection.drop());
MongoRunner.stopMongod(mongod);
diff --git a/jstests/disk/newcollection.js b/jstests/disk/newcollection.js
index e6a79948c9a..aab959b44df 100644
--- a/jstests/disk/newcollection.js
+++ b/jstests/disk/newcollection.js
@@ -2,27 +2,28 @@
var baseName = "jstests_disk_newcollection";
var m = MongoRunner.runMongod({noprealloc: "", smallfiles: ""});
-db = m.getDB( "test" );
+db = m.getDB("test");
var t = db[baseName];
var getTotalNonLocalSize = function() {
var totalNonLocalDBSize = 0;
- m.getDBs().databases.forEach( function(dbStats) {
+ m.getDBs().databases.forEach(function(dbStats) {
// We accept the local database's space overhead.
- if (dbStats.name == "local") return;
+ if (dbStats.name == "local")
+ return;
// Databases with "sizeOnDisk=1" and "empty=true" dont' actually take up space o disk.
// See SERVER-11051.
- if (dbStats.sizeOnDisk == 1 && dbStats.empty) return;
+ if (dbStats.sizeOnDisk == 1 && dbStats.empty)
+ return;
totalNonLocalDBSize += dbStats.sizeOnDisk;
});
return totalNonLocalDBSize;
};
for (var pass = 0; pass <= 1; pass++) {
-
- db.createCollection(baseName, { size: 15.8 * 1024 * 1024 });
- if( pass == 0 )
+ db.createCollection(baseName, {size: 15.8 * 1024 * 1024});
+ if (pass == 0)
t.drop();
size = getTotalNonLocalSize();
diff --git a/jstests/disk/preallocate.js b/jstests/disk/preallocate.js
index 68010dba173..2a01dd89820 100644
--- a/jstests/disk/preallocate.js
+++ b/jstests/disk/preallocate.js
@@ -6,30 +6,32 @@ var m = MongoRunner.runMongod({});
var getTotalNonLocalSize = function() {
var totalNonLocalDBSize = 0;
- m.getDBs().databases.forEach( function(dbStats) {
+ m.getDBs().databases.forEach(function(dbStats) {
// We accept the local database's space overhead.
- if (dbStats.name == "local") return;
+ if (dbStats.name == "local")
+ return;
// Databases with "sizeOnDisk=1" and "empty=true" dont' actually take up space o disk.
// See SERVER-11051.
- if (dbStats.sizeOnDisk == 1 && dbStats.empty) return;
+ if (dbStats.sizeOnDisk == 1 && dbStats.empty)
+ return;
totalNonLocalDBSize += dbStats.sizeOnDisk;
});
return totalNonLocalDBSize;
};
-assert.eq( 0, getTotalNonLocalSize() );
+assert.eq(0, getTotalNonLocalSize());
-m.getDB( baseName ).createCollection( baseName + "1" );
+m.getDB(baseName).createCollection(baseName + "1");
// Windows does not currently use preallocation
expectedMB = 64 + 16;
-if ( m.getDB( baseName ).serverBits() < 64 )
+if (m.getDB(baseName).serverBits() < 64)
expectedMB /= 4;
-assert.soon(function() { return getTotalNonLocalSize() >= expectedMB * 1024 * 1024; },
- "\n\n\nFAIL preallocate.js expected second file to bring total size over " +
- expectedMB + "MB" );
+assert.soon(function() {
+ return getTotalNonLocalSize() >= expectedMB * 1024 * 1024;
+}, "\n\n\nFAIL preallocate.js expected second file to bring total size over " + expectedMB + "MB");
MongoRunner.stopMongod(m);
@@ -37,8 +39,8 @@ m = MongoRunner.runMongod({restart: true, cleanData: false, dbpath: m.dbpath});
size = getTotalNonLocalSize();
-m.getDB( baseName ).createCollection( baseName + "2" );
+m.getDB(baseName).createCollection(baseName + "2");
-sleep( 2000 ); // give prealloc a chance
+sleep(2000); // give prealloc a chance
-assert.eq( size, getTotalNonLocalSize() );
+assert.eq(size, getTotalNonLocalSize());
diff --git a/jstests/disk/preallocate2.js b/jstests/disk/preallocate2.js
index cf191f4265a..f4d9eb2f253 100644
--- a/jstests/disk/preallocate2.js
+++ b/jstests/disk/preallocate2.js
@@ -4,11 +4,13 @@ var baseName = "jstests_preallocate2";
var m = MongoRunner.runMongod({});
-m.getDB( baseName )[ baseName ].save( {i:1} );
+m.getDB(baseName)[baseName].save({i: 1});
// Windows does not currently use preallocation
-expectedMB = ( _isWindows() ? 70 : 100 );
-if ( m.getDB( baseName ).serverBits() < 64 )
+expectedMB = (_isWindows() ? 70 : 100);
+if (m.getDB(baseName).serverBits() < 64)
expectedMB /= 4;
-assert.soon( function() { return m.getDBs().totalSize > expectedMB * 1000000; }, "\n\n\nFAIL preallocate.js expected second file to bring total size over " + expectedMB + "MB" );
+assert.soon(function() {
+ return m.getDBs().totalSize > expectedMB * 1000000;
+}, "\n\n\nFAIL preallocate.js expected second file to bring total size over " + expectedMB + "MB");
diff --git a/jstests/disk/preallocate_directoryperdb.js b/jstests/disk/preallocate_directoryperdb.js
index c21710e579c..cc17677c680 100644
--- a/jstests/disk/preallocate_directoryperdb.js
+++ b/jstests/disk/preallocate_directoryperdb.js
@@ -10,23 +10,24 @@ var baseName3 = "preallocate_directoryperdb3";
dbpath = MongoRunner.dataPath + baseDir + "/";
function checkDb2DirAbsent() {
- files = listFiles( dbpath );
-// printjson( files );
- for( var f in files ) {
- var name = files[ f ].name;
- assert.eq( -1, name.indexOf( dbpath + baseName2 ), "baseName2 dir still present" );
- }
+ files = listFiles(dbpath);
+ // printjson( files );
+ for (var f in files) {
+ var name = files[f].name;
+ assert.eq(-1, name.indexOf(dbpath + baseName2), "baseName2 dir still present");
+ }
}
-var m = MongoRunner.runMongod({smallfiles: "", directoryperdb: "", dbpath: dbpath, bind_ip: "127.0.0.1"});
-db = m.getDB( baseName );
-db2 = m.getDB( baseName2 );
-var bulk = db[ baseName ].initializeUnorderedBulkOp();
-var bulk2 = db2[ baseName2 ].initializeUnorderedBulkOp();
-var big = new Array( 5000 ).toString();
-for( var i = 0; i < 3000; ++i ) {
- bulk.insert({ b:big });
- bulk2.insert({ b:big });
+var m = MongoRunner.runMongod(
+ {smallfiles: "", directoryperdb: "", dbpath: dbpath, bind_ip: "127.0.0.1"});
+db = m.getDB(baseName);
+db2 = m.getDB(baseName2);
+var bulk = db[baseName].initializeUnorderedBulkOp();
+var bulk2 = db2[baseName2].initializeUnorderedBulkOp();
+var big = new Array(5000).toString();
+for (var i = 0; i < 3000; ++i) {
+ bulk.insert({b: big});
+ bulk2.insert({b: big});
}
assert.writeOK(bulk.execute());
assert.writeOK(bulk2.execute());
@@ -41,9 +42,9 @@ checkDb2DirAbsent();
db.dropDatabase();
// Try writing a new database, to ensure file allocator is still working.
-db3 = m.getDB( baseName3 );
-c3 = db[ baseName3 ];
-assert.writeOK(c3.insert( {} ));
-assert.eq( 1, c3.count() );
+db3 = m.getDB(baseName3);
+c3 = db[baseName3];
+assert.writeOK(c3.insert({}));
+assert.eq(1, c3.count());
checkDb2DirAbsent();
diff --git a/jstests/disk/quota.js b/jstests/disk/quota.js
index a959f344531..c1815f1c1f7 100644
--- a/jstests/disk/quota.js
+++ b/jstests/disk/quota.js
@@ -1,44 +1,46 @@
-// Check functioning of --quotaFiles parameter, including with respect to SERVER-3293 ('local' database).
+// Check functioning of --quotaFiles parameter, including with respect to SERVER-3293 ('local'
+// database).
baseName = "jstests_disk_quota";
var m = MongoRunner.runMongod({quotaFiles: 2, smallfiles: ""});
-db = m.getDB( baseName );
+db = m.getDB(baseName);
-big = new Array( 10000 ).toString();
+big = new Array(10000).toString();
// Insert documents until quota is exhausted.
-var coll = db[ baseName ];
-var res = coll.insert({ b: big });
-while( !res.hasWriteError() ) {
- res = coll.insert({ b: big });
+var coll = db[baseName];
+var res = coll.insert({b: big});
+while (!res.hasWriteError()) {
+ res = coll.insert({b: big});
}
dotTwoDataFile = baseName + ".2";
-files = listFiles( m.dbpath );
-for( i in files ) {
- // Since only one data file is allowed, a .0 file is expected and a .1 file may be preallocated (SERVER-3410) but no .2 file is expected.
- assert.neq( dotTwoDataFile, files[ i ].baseName );
+files = listFiles(m.dbpath);
+for (i in files) {
+ // Since only one data file is allowed, a .0 file is expected and a .1 file may be preallocated
+ // (SERVER-3410) but no .2 file is expected.
+ assert.neq(dotTwoDataFile, files[i].baseName);
}
dotTwoDataFile = "local" + ".2";
// Check that quota does not apply to local db, and a .2 file can be created.
-l = m.getDB( "local" )[ baseName ];
-for( i = 0; i < 10000; ++i ) {
- assert.writeOK(l.insert({ b: big }));
+l = m.getDB("local")[baseName];
+for (i = 0; i < 10000; ++i) {
+ assert.writeOK(l.insert({b: big}));
dotTwoFound = false;
- if ( i % 100 != 0 ) {
+ if (i % 100 != 0) {
continue;
}
- files = listFiles( m.dbpath );
- for( f in files ) {
- if ( files[ f ].baseName == dotTwoDataFile ) {
- dotTwoFound = true;
+ files = listFiles(m.dbpath);
+ for (f in files) {
+ if (files[f].baseName == dotTwoDataFile) {
+ dotTwoFound = true;
}
}
- if ( dotTwoFound ) {
- break;
+ if (dotTwoFound) {
+ break;
}
}
-assert( dotTwoFound );
+assert(dotTwoFound);
diff --git a/jstests/disk/quota2.js b/jstests/disk/quota2.js
index a53e6dec6b8..797034a65af 100644
--- a/jstests/disk/quota2.js
+++ b/jstests/disk/quota2.js
@@ -1,34 +1,33 @@
// Test for quotaFiles off by one file limit issue - SERVER-3420.
-if ( 0 ) { // SERVER-3420
+if (0) { // SERVER-3420
-baseName = "jstests_disk_quota2";
+ baseName = "jstests_disk_quota2";
-var m = MongoRunner.runMongod({quotaFiles: 2, smallfiles: ""});
-db = m.getDB( baseName );
+ var m = MongoRunner.runMongod({quotaFiles: 2, smallfiles: ""});
+ db = m.getDB(baseName);
-big = new Array( 10000 ).toString();
+ big = new Array(10000).toString();
-// Insert documents until quota is exhausted.
-var coll = db[ baseName ];
-var res = coll.insert({ b: big });
-while( !res.hasWriteError() ) {
- res = coll.insert({ b: big });
-}
-
-// Trigger allocation of an additional file for a 'special' namespace.
-for( n = 0; !db.getLastError(); ++n ) {
- db.createCollection( '' + n );
-}
+ // Insert documents until quota is exhausted.
+ var coll = db[baseName];
+ var res = coll.insert({b: big});
+ while (!res.hasWriteError()) {
+ res = coll.insert({b: big});
+ }
-// Check that new docs are saved in the .0 file.
-for( i = 0; i < n; ++i ) {
- c = db[ ''+i ];
- res = c.insert({ b: big });
- if( !res.hasWriteError() ) {
- var recordId = c.find().showRecord()[0].$recordId;
- assert.eq(0, recordId >> 32);
+ // Trigger allocation of an additional file for a 'special' namespace.
+ for (n = 0; !db.getLastError(); ++n) {
+ db.createCollection('' + n);
}
-}
+ // Check that new docs are saved in the .0 file.
+ for (i = 0; i < n; ++i) {
+ c = db['' + i];
+ res = c.insert({b: big});
+ if (!res.hasWriteError()) {
+ var recordId = c.find().showRecord()[0].$recordId;
+ assert.eq(0, recordId >> 32);
+ }
+ }
}
diff --git a/jstests/disk/quota3.js b/jstests/disk/quota3.js
index 43227d21bfc..0332667e53e 100644
--- a/jstests/disk/quota3.js
+++ b/jstests/disk/quota3.js
@@ -1,21 +1,20 @@
// Test for quotaFiles being ignored allocating a large collection - SERVER-3511.
-if ( 0 ) { // SERVER-3511
+if (0) { // SERVER-3511
-baseName = "jstests_disk_quota3";
-dbpath = MongoRunner.dataPath + baseName;
+ baseName = "jstests_disk_quota3";
+ dbpath = MongoRunner.dataPath + baseName;
-var m = MongoRunner.runMongod({dbpath: dbpath, quotaFiles: 3, smallfiles: ""});
-db = m.getDB( baseName );
+ var m = MongoRunner.runMongod({dbpath: dbpath, quotaFiles: 3, smallfiles: ""});
+ db = m.getDB(baseName);
-db.createCollection( baseName, {size:128*1024*1024} );
-assert( db.getLastError() );
-
-dotFourDataFile = dbpath + "/" + baseName + ".4";
-files = listFiles( dbpath );
-for( i in files ) {
- // .3 file may be preallocated but not .4
- assert.neq( dotFourDataFile, files[ i ].name );
-}
+ db.createCollection(baseName, {size: 128 * 1024 * 1024});
+ assert(db.getLastError());
+ dotFourDataFile = dbpath + "/" + baseName + ".4";
+ files = listFiles(dbpath);
+ for (i in files) {
+ // .3 file may be preallocated but not .4
+ assert.neq(dotFourDataFile, files[i].name);
+ }
} \ No newline at end of file
diff --git a/jstests/disk/repair.js b/jstests/disk/repair.js
index 072afa3cf08..a38870ce24e 100644
--- a/jstests/disk/repair.js
+++ b/jstests/disk/repair.js
@@ -4,59 +4,59 @@ var baseName = "jstests_disk_repair";
var dbpath = MongoRunner.dataPath + baseName + "/";
var repairpath = dbpath + "repairDir/";
-resetDbpath( dbpath );
-resetDbpath( repairpath );
+resetDbpath(dbpath);
+resetDbpath(repairpath);
var m = MongoRunner.runMongod({
dbpath: dbpath,
repairpath: repairpath,
noCleanData: true,
});
-db = m.getDB( baseName );
-db[ baseName ].save( {} );
-assert.commandWorked( db.runCommand( {repairDatabase:1, backupOriginalFiles:true} ) );
+db = m.getDB(baseName);
+db[baseName].save({});
+assert.commandWorked(db.runCommand({repairDatabase: 1, backupOriginalFiles: true}));
function check() {
- files = listFiles( dbpath );
- for( f in files ) {
- assert( ! new RegExp( "^" + dbpath + "backup_" ).test( files[ f ].name ), "backup dir in dbpath" );
+ files = listFiles(dbpath);
+ for (f in files) {
+ assert(!new RegExp("^" + dbpath + "backup_").test(files[f].name), "backup dir in dbpath");
}
- assert.eq.automsg( "1", "db[ baseName ].count()" );
+ assert.eq.automsg("1", "db[ baseName ].count()");
}
check();
-MongoRunner.stopMongod( m.port );
+MongoRunner.stopMongod(m.port);
-resetDbpath( repairpath );
+resetDbpath(repairpath);
m = MongoRunner.runMongod({
port: m.port,
dbpath: dbpath,
noCleanData: true,
});
-db = m.getDB( baseName );
-assert.commandWorked( db.runCommand( {repairDatabase:1} ) );
+db = m.getDB(baseName);
+assert.commandWorked(db.runCommand({repairDatabase: 1}));
check();
-MongoRunner.stopMongod( m.port );
+MongoRunner.stopMongod(m.port);
-resetDbpath( repairpath );
-rc = runMongoProgram("mongod", "--repair", "--port", m.port, "--dbpath", dbpath,
- "--repairpath", repairpath);
-assert.eq.automsg( "0", "rc" );
+resetDbpath(repairpath);
+rc = runMongoProgram(
+ "mongod", "--repair", "--port", m.port, "--dbpath", dbpath, "--repairpath", repairpath);
+assert.eq.automsg("0", "rc");
m = MongoRunner.runMongod({
port: m.port,
dbpath: dbpath,
noCleanData: true,
});
-db = m.getDB( baseName );
+db = m.getDB(baseName);
check();
-MongoRunner.stopMongod( m.port );
+MongoRunner.stopMongod(m.port);
-resetDbpath( repairpath );
+resetDbpath(repairpath);
rc = runMongoProgram("mongod", "--repair", "--port", m.port, "--dbpath", dbpath);
-assert.eq.automsg( "0", "rc" );
+assert.eq.automsg("0", "rc");
m = MongoRunner.runMongod({
port: m.port,
dbpath: dbpath,
noCleanData: true,
});
-db = m.getDB( baseName );
+db = m.getDB(baseName);
check();
diff --git a/jstests/disk/repair2.js b/jstests/disk/repair2.js
index 1ee79dff3d0..67dd0f4cdf7 100644
--- a/jstests/disk/repair2.js
+++ b/jstests/disk/repair2.js
@@ -3,13 +3,13 @@
var baseName = "jstests_disk_repair2";
function check() {
- files = listFiles( dbpath );
- for( f in files ) {
- assert( ! new RegExp( "^" + dbpath + "backup_" ).test( files[ f ].name ),
- "backup dir " + files[ f ].name + " in dbpath" );
+ files = listFiles(dbpath);
+ for (f in files) {
+ assert(!new RegExp("^" + dbpath + "backup_").test(files[f].name),
+ "backup dir " + files[f].name + " in dbpath");
}
- assert.eq.automsg( "1", "db[ baseName ].count()" );
+ assert.eq.automsg("1", "db[ baseName ].count()");
}
var dbpath = MongoRunner.dataPath + baseName + "/";
@@ -17,8 +17,8 @@ var repairpath = dbpath + "repairDir/";
var longDBName = Array(61).join('a');
var longRepairPath = dbpath + Array(61).join('b') + '/';
-resetDbpath( dbpath );
-resetDbpath( repairpath );
+resetDbpath(dbpath);
+resetDbpath(repairpath);
var m = MongoRunner.runMongod({
directoryperdb: "",
@@ -26,54 +26,54 @@ var m = MongoRunner.runMongod({
repairpath: repairpath,
noCleanData: true,
});
-db = m.getDB( baseName );
-db[ baseName ].save( {} );
-assert.commandWorked( db.runCommand( {repairDatabase:1, backupOriginalFiles:true} ) );
+db = m.getDB(baseName);
+db[baseName].save({});
+assert.commandWorked(db.runCommand({repairDatabase: 1, backupOriginalFiles: true}));
-//Check that repair files exist in the repair directory, and nothing else
-db.adminCommand( { fsync : 1 } );
-files = listFiles( repairpath + "/backup_repairDatabase_0/" + baseName );
+// Check that repair files exist in the repair directory, and nothing else
+db.adminCommand({fsync: 1});
+files = listFiles(repairpath + "/backup_repairDatabase_0/" + baseName);
var fileCount = 0;
-for( f in files ) {
- print( files[ f ].name );
- if ( files[ f ].isDirectory )
+for (f in files) {
+ print(files[f].name);
+ if (files[f].isDirectory)
continue;
fileCount += 1;
- assert( /\.bak$/.test( files[ f ].name ),
- "In database repair directory, found unexpected file: " + files[ f ].name );
+ assert(/\.bak$/.test(files[f].name),
+ "In database repair directory, found unexpected file: " + files[f].name);
}
-assert( fileCount > 0, "Expected more than zero nondirectory files in the database directory" );
+assert(fileCount > 0, "Expected more than zero nondirectory files in the database directory");
check();
-MongoRunner.stopMongod( m.port );
+MongoRunner.stopMongod(m.port);
-resetDbpath( repairpath );
+resetDbpath(repairpath);
m = MongoRunner.runMongod({
port: m.port,
directoryperdb: "",
dbpath: dbpath,
noCleanData: true,
});
-db = m.getDB( baseName );
-assert.commandWorked( db.runCommand( {repairDatabase:1} ) );
+db = m.getDB(baseName);
+assert.commandWorked(db.runCommand({repairDatabase: 1}));
check();
-MongoRunner.stopMongod( m.port );
+MongoRunner.stopMongod(m.port);
-//Test long database names
-resetDbpath( repairpath );
+// Test long database names
+resetDbpath(repairpath);
m = MongoRunner.runMongod({
port: m.port,
directoryperdb: "",
dbpath: dbpath,
noCleanData: true,
});
-db = m.getDB( longDBName );
-assert.writeOK(db[ baseName ].save( {} ));
-assert.commandWorked( db.runCommand( {repairDatabase:1} ) );
-MongoRunner.stopMongod( m.port );
+db = m.getDB(longDBName);
+assert.writeOK(db[baseName].save({}));
+assert.commandWorked(db.runCommand({repairDatabase: 1}));
+MongoRunner.stopMongod(m.port);
-//Test long repairPath
-resetDbpath( longRepairPath );
+// Test long repairPath
+resetDbpath(longRepairPath);
m = MongoRunner.runMongod({
port: m.port,
directoryperdb: "",
@@ -81,19 +81,22 @@ m = MongoRunner.runMongod({
repairpath: longRepairPath,
noCleanData: true,
});
-db = m.getDB( longDBName );
-assert.commandWorked( db.runCommand( {repairDatabase:1, backupOriginalFiles: true} ) );
+db = m.getDB(longDBName);
+assert.commandWorked(db.runCommand({repairDatabase: 1, backupOriginalFiles: true}));
check();
-MongoRunner.stopMongod( m.port );
+MongoRunner.stopMongod(m.port);
-//Test database name and repairPath with --repair
-resetDbpath( longRepairPath );
+// Test database name and repairPath with --repair
+resetDbpath(longRepairPath);
var returnCode = runMongoProgram("mongod",
- "--port", m.port,
+ "--port",
+ m.port,
"--repair",
"--directoryperdb",
- "--dbpath", dbpath,
- "--repairpath", longRepairPath);
+ "--dbpath",
+ dbpath,
+ "--repairpath",
+ longRepairPath);
assert.eq(returnCode, 0);
m = MongoRunner.runMongod({
port: m.port,
@@ -101,17 +104,20 @@ m = MongoRunner.runMongod({
dbpath: dbpath,
noCleanData: true,
});
-db = m.getDB( longDBName );
+db = m.getDB(longDBName);
check();
-MongoRunner.stopMongod( m.port );
+MongoRunner.stopMongod(m.port);
-resetDbpath( repairpath );
+resetDbpath(repairpath);
returnCode = runMongoProgram("mongod",
- "--port", m.port,
+ "--port",
+ m.port,
"--repair",
"--directoryperdb",
- "--dbpath", dbpath,
- "--repairpath", repairpath);
+ "--dbpath",
+ dbpath,
+ "--repairpath",
+ repairpath);
assert.eq(returnCode, 0);
m = MongoRunner.runMongod({
port: m.port,
@@ -120,16 +126,13 @@ m = MongoRunner.runMongod({
repairpath: repairpath,
noCleanData: true,
});
-db = m.getDB( baseName );
+db = m.getDB(baseName);
check();
-MongoRunner.stopMongod( m.port );
+MongoRunner.stopMongod(m.port);
-resetDbpath( repairpath );
-returnCode = runMongoProgram("mongod",
- "--port", m.port,
- "--repair",
- "--directoryperdb",
- "--dbpath", dbpath);
+resetDbpath(repairpath);
+returnCode =
+ runMongoProgram("mongod", "--port", m.port, "--repair", "--directoryperdb", "--dbpath", dbpath);
assert.eq(returnCode, 0);
m = MongoRunner.runMongod({
port: m.port,
@@ -137,5 +140,5 @@ m = MongoRunner.runMongod({
dbpath: dbpath,
noCleanData: true,
});
-db = m.getDB( baseName );
+db = m.getDB(baseName);
check();
diff --git a/jstests/disk/repair3.js b/jstests/disk/repair3.js
index 57e73477aed..83fbbf7b9f3 100644
--- a/jstests/disk/repair3.js
+++ b/jstests/disk/repair3.js
@@ -5,23 +5,23 @@ var repairbase = MongoRunner.dataDir + "/repairpartitiontest";
var repairpath = repairbase + "/dir";
doIt = false;
-files = listFiles( MongoRunner.dataDir );
-for ( i in files ) {
- if ( files[ i ].name == repairbase ) {
+files = listFiles(MongoRunner.dataDir);
+for (i in files) {
+ if (files[i].name == repairbase) {
doIt = true;
}
}
-if ( !doIt ) {
- print( "path " + repairpath + " missing, skipping repair3 test" );
+if (!doIt) {
+ print("path " + repairpath + " missing, skipping repair3 test");
doIt = false;
}
if (doIt) {
var dbpath = MongoRunner.dataPath + baseName + "/";
- resetDbpath( dbpath );
- resetDbpath( repairpath );
+ resetDbpath(dbpath);
+ resetDbpath(repairpath);
var m = MongoRunner.runMongod({
nssize: 8,
@@ -30,25 +30,36 @@ if (doIt) {
dbpath: dbpath,
repairpath: repairpath,
});
- db = m.getDB( baseName );
- db[ baseName ].save( {} );
- assert.commandWorked( db.runCommand( {repairDatabase:1, backupOriginalFiles:false} ) );
+ db = m.getDB(baseName);
+ db[baseName].save({});
+ assert.commandWorked(db.runCommand({repairDatabase: 1, backupOriginalFiles: false}));
function check() {
- files = listFiles( dbpath );
- for( f in files ) {
- assert( ! new RegExp( "^" + dbpath + "backup_" ).test( files[ f ].name ), "backup dir in dbpath" );
+ files = listFiles(dbpath);
+ for (f in files) {
+ assert(!new RegExp("^" + dbpath + "backup_").test(files[f].name),
+ "backup dir in dbpath");
}
-
- assert.eq.automsg( "1", "db[ baseName ].count()" );
+
+ assert.eq.automsg("1", "db[ baseName ].count()");
}
check();
- MongoRunner.stopMongod( m.port );
+ MongoRunner.stopMongod(m.port);
- resetDbpath( repairpath );
- var rc = runMongoProgram("mongod", "--nssize", "8", "--noprealloc", "--smallfiles", "--repair",
- "--port", m.port, "--dbpath", dbpath, "--repairpath", repairpath);
- assert.eq.automsg( "0", "rc" );
+ resetDbpath(repairpath);
+ var rc = runMongoProgram("mongod",
+ "--nssize",
+ "8",
+ "--noprealloc",
+ "--smallfiles",
+ "--repair",
+ "--port",
+ m.port,
+ "--dbpath",
+ dbpath,
+ "--repairpath",
+ repairpath);
+ assert.eq.automsg("0", "rc");
m = MongoRunner.runMongod({
nssize: 8,
noprealloc: "",
@@ -57,7 +68,7 @@ if (doIt) {
dbpath: dbpath,
repairpath: repairpath,
});
- db = m.getDB( baseName );
+ db = m.getDB(baseName);
check();
- MongoRunner.stopMongod( m.port );
+ MongoRunner.stopMongod(m.port);
}
diff --git a/jstests/disk/repair4.js b/jstests/disk/repair4.js
index ff99389a380..f38f9036e47 100644
--- a/jstests/disk/repair4.js
+++ b/jstests/disk/repair4.js
@@ -5,24 +5,24 @@ var smallbase = MongoRunner.dataDir + "/repairpartitiontest";
var smallpath = smallbase + "/dir";
doIt = false;
-files = listFiles( MongoRunner.dataDir );
-for ( i in files ) {
- if ( files[ i ].name == smallbase ) {
+files = listFiles(MongoRunner.dataDir);
+for (i in files) {
+ if (files[i].name == smallbase) {
doIt = true;
}
}
-if ( !doIt ) {
- print( "path " + smallpath + " missing, skipping repair4 test" );
+if (!doIt) {
+ print("path " + smallpath + " missing, skipping repair4 test");
doIt = false;
}
-if ( doIt ) {
+if (doIt) {
var repairpath = MongoRunner.dataPath + baseName + "/";
- resetDbpath( smallpath );
- resetDbpath( repairpath );
-
+ resetDbpath(smallpath);
+ resetDbpath(repairpath);
+
var m = MongoRunner.runMongod({
nssize: "8",
noprealloc: "",
@@ -33,19 +33,19 @@ if ( doIt ) {
bind_ip: "127.0.0.1",
});
- db = m.getDB( baseName );
- db[ baseName ].save( {} );
- assert.commandWorked( db.runCommand( {repairDatabase:1, backupOriginalFiles:true} ) );
+ db = m.getDB(baseName);
+ db[baseName].save({});
+ assert.commandWorked(db.runCommand({repairDatabase: 1, backupOriginalFiles: true}));
function check() {
- files = listFiles( smallpath );
- for( f in files ) {
- assert( ! new RegExp( "^" + smallpath + "backup_" ).test( files[ f ].name ), "backup dir in dbpath" );
+ files = listFiles(smallpath);
+ for (f in files) {
+ assert(!new RegExp("^" + smallpath + "backup_").test(files[f].name),
+ "backup dir in dbpath");
}
-
- assert.eq.automsg( "1", "db[ baseName ].count()" );
+
+ assert.eq.automsg("1", "db[ baseName ].count()");
}
-
- check();
- MongoRunner.stopMongod( port );
+ check();
+ MongoRunner.stopMongod(port);
}
diff --git a/jstests/disk/repair5.js b/jstests/disk/repair5.js
index 2dc9d7798e9..b8663c55e7c 100644
--- a/jstests/disk/repair5.js
+++ b/jstests/disk/repair5.js
@@ -6,34 +6,36 @@
var dbpath = MongoRunner.dataPath + baseName + "/";
var repairpath = dbpath + "repairDir/";
- resetDbpath( dbpath );
- resetDbpath( repairpath );
+ resetDbpath(dbpath);
+ resetDbpath(repairpath);
- var m = MongoRunner.runMongod({dbpath: dbpath,
- repairpath: repairpath,
- restart:true,
- cleanData: false}); // So that the repair dir won't get removed
+ var m = MongoRunner.runMongod({
+ dbpath: dbpath,
+ repairpath: repairpath,
+ restart: true,
+ cleanData: false
+ }); // So that the repair dir won't get removed
- var dbTest = m.getDB( baseName );
+ var dbTest = m.getDB(baseName);
// Insert a lot of data so repair runs a long time
var bulk = dbTest[baseName].initializeUnorderedBulkOp();
- var big = new Array( 5000 ).toString();
- for(var i = 0; i < 20000; ++i) {
- bulk.insert( {i:i,b:big} );
+ var big = new Array(5000).toString();
+ for (var i = 0; i < 20000; ++i) {
+ bulk.insert({i: i, b: big});
}
assert.writeOK(bulk.execute());
function killRepair() {
- while( 1 ) {
+ while (1) {
var p = db.currentOp().inprog;
- for( var i in p ) {
- var o = p[ i ];
- printjson( o );
+ for (var i in p) {
+ var o = p[i];
+ printjson(o);
// Find the active 'repairDatabase' op and kill it.
- if ( o.active && o.query && o.query.repairDatabase ) {
- db.killOp( o.opid );
+ if (o.active && o.query && o.query.repairDatabase) {
+ db.killOp(o.opid);
return;
}
}
@@ -41,15 +43,15 @@
}
var s = startParallelShell(killRepair.toString() + "; killRepair();", m.port);
- sleep(100); // make sure shell is actually running, lame
+ sleep(100); // make sure shell is actually running, lame
// Repair should fail due to killOp.
- assert.commandFailed( dbTest.runCommand( {repairDatabase:1, backupOriginalFiles:true} ) );
+ assert.commandFailed(dbTest.runCommand({repairDatabase: 1, backupOriginalFiles: true}));
s();
- assert.eq( 20000, dbTest[ baseName ].find().itcount() );
- assert( dbTest[ baseName ].validate().valid );
+ assert.eq(20000, dbTest[baseName].find().itcount());
+ assert(dbTest[baseName].validate().valid);
MongoRunner.stopMongod(m);
})();
diff --git a/jstests/disk/too_many_fds.js b/jstests/disk/too_many_fds.js
index edb232c2f70..0397a29e08b 100644
--- a/jstests/disk/too_many_fds.js
+++ b/jstests/disk/too_many_fds.js
@@ -4,26 +4,25 @@
function doTest() {
var baseName = "jstests_disk_too_many_fds";
- var m = MongoRunner.runMongod( { smallfiles: "" , nssize: 1 } );
+ var m = MongoRunner.runMongod({smallfiles: "", nssize: 1});
// Make 1026 collections, each in a separate database. On some storage engines, this may cause
// 1026 files to be created.
for (var i = 1; i < 1026; ++i) {
var db = m.getDB("db" + i);
var coll = db.getCollection("coll" + i);
- assert.writeOK(coll.insert( {} ));
+ assert.writeOK(coll.insert({}));
}
- MongoRunner.stopMongod( m );
+ MongoRunner.stopMongod(m);
// Ensure we can still start up with that many files.
- var m2 = MongoRunner.runMongod( { dbpath: m.dbpath, smallfiles: "" , nssize: 1,
- restart: true, cleanData: false } );
+ var m2 = MongoRunner.runMongod(
+ {dbpath: m.dbpath, smallfiles: "", nssize: 1, restart: true, cleanData: false});
assert.eq(1, m2.getDB("db1025").getCollection("coll1025").count());
}
if (db.serverBuildInfo().bits == 64) {
doTest();
-}
-else {
+} else {
print("Skipping. Only run this test on 64bit builds");
}
diff --git a/jstests/dur/a_quick.js b/jstests/dur/a_quick.js
index 5770b528e4d..5e1ff17a758 100755..100644
--- a/jstests/dur/a_quick.js
+++ b/jstests/dur/a_quick.js
@@ -7,11 +7,11 @@
testname = "a_quick";
tst = {};
-tst.log = function (optional_msg) {
+tst.log = function(optional_msg) {
print("\n\nstep " + ++this._step + " " + (optional_msg || ""));
};
-tst.success = function () {
+tst.success = function() {
print(testname + " SUCCESS");
};
@@ -36,20 +36,22 @@ tst.diff = function(a, b) {
};
print(testname + " BEGIN");
tst._step = 0;
-
-function checkNoJournalFiles(path, pass) {
- var files = listFiles(path);
- if (files.some(function (f) { return f.name.indexOf("prealloc") < 0; })) {
- if (pass == null) {
- // wait a bit longer for mongod to potentially finish if it is still running.
- sleep(10000);
- return checkNoJournalFiles(path, 1);
- }
- print("\n\n\n");
- print("FAIL path:" + path);
- print("unexpected files:");
- printjson(files);
- assert(false, "FAIL a journal/lsn file is present which is unexpected");
+
+function checkNoJournalFiles(path, pass) {
+ var files = listFiles(path);
+ if (files.some(function(f) {
+ return f.name.indexOf("prealloc") < 0;
+ })) {
+ if (pass == null) {
+ // wait a bit longer for mongod to potentially finish if it is still running.
+ sleep(10000);
+ return checkNoJournalFiles(path, 1);
+ }
+ print("\n\n\n");
+ print("FAIL path:" + path);
+ print("unexpected files:");
+ printjson(files);
+ assert(false, "FAIL a journal/lsn file is present which is unexpected");
}
}
@@ -62,7 +64,7 @@ tst.log("start mongod without dur");
var conn = MongoRunner.runMongod({dbpath: path1, nojournal: ""});
tst.log("without dur work");
var d = conn.getDB("test");
-assert.writeOK(d.foo.insert({ _id: 123 }));
+assert.writeOK(d.foo.insert({_id: 123}));
tst.log("stop without dur");
MongoRunner.stopMongod(conn);
@@ -71,9 +73,9 @@ tst.log("start mongod with dur");
conn = MongoRunner.runMongod({dbpath: path2, journal: "", journalOptions: 8});
tst.log("with dur work");
d = conn.getDB("test");
-assert.writeOK(d.foo.insert({ _id: 123 }));
+assert.writeOK(d.foo.insert({_id: 123}));
-// we could actually do getlasterror fsync:1 now, but maybe this is agood
+// we could actually do getlasterror fsync:1 now, but maybe this is agood
// as it will assure that commits happen on a timely basis. a bunch of the other dur/*js
// tests use fsync
tst.log("sleep a bit for a group commit");
@@ -81,21 +83,23 @@ sleep(8000);
// kill the process hard
tst.log("kill -9 mongod");
-MongoRunner.stopMongod(conn.port, /*signal*/9);
+MongoRunner.stopMongod(conn.port, /*signal*/ 9);
// journal file should be present, and non-empty as we killed hard
// we will force removal of a datafile to be sure we can recreate everything
-// without it being present.
-removeFile(path2 + "/test.0");
-
-// for that to work, we can't skip anything though:
+// without it being present.
+removeFile(path2 + "/test.0");
+
+// for that to work, we can't skip anything though:
removeFile(path2 + "/journal/lsn");
// with the file deleted, we MUST start from the beginning of the journal.
// thus this check to be careful
var files = listFiles(path2 + "/journal/");
-if (files.some(function (f) { return f.name.indexOf("lsn") >= 0; })) {
+if (files.some(function(f) {
+ return f.name.indexOf("lsn") >= 0;
+ })) {
print("\n\n\n");
print(path2);
printjson(files);
@@ -104,16 +108,13 @@ if (files.some(function (f) { return f.name.indexOf("lsn") >= 0; })) {
// restart and recover
tst.log("restart and recover");
-conn = MongoRunner.runMongod({restart: true,
- cleanData: false,
- dbpath: path2,
- journal: "",
- journalOptions: 9});
-tst.log("check data results");
-d = conn.getDB("test");
-
-var countOk = (d.foo.count() == 1);
-if (!countOk) {
+conn = MongoRunner.runMongod(
+ {restart: true, cleanData: false, dbpath: path2, journal: "", journalOptions: 9});
+tst.log("check data results");
+d = conn.getDB("test");
+
+var countOk = (d.foo.count() == 1);
+if (!countOk) {
print("\n\n\na_quick.js FAIL count " + d.foo.count() + " is wrong\n\n\n");
// keep going - want to see if the diff matches. if so the sleep() above was too short?
}
@@ -139,7 +140,7 @@ function showfiles() {
}
if (diff != "") {
- showfiles();
+ showfiles();
assert(diff == "", "error test.ns files differ");
}
@@ -148,8 +149,8 @@ print("diff of .0 files returns:" + diff);
if (diff != "") {
showfiles();
assert(diff == "", "error test.0 files differ");
-}
-
-assert(countOk, "a_quick.js document count after recovery was not the expected value");
+}
+
+assert(countOk, "a_quick.js document count after recovery was not the expected value");
-tst.success();
+tst.success();
diff --git a/jstests/dur/checksum.js b/jstests/dur/checksum.js
index 3914e3b84a0..0076024de66 100644
--- a/jstests/dur/checksum.js
+++ b/jstests/dur/checksum.js
@@ -11,12 +11,12 @@ if (0) {
// each insert is in it's own commit.
db.foo.insert({a: 1});
- db.runCommand({getlasterror:1, j:1});
+ db.runCommand({getlasterror: 1, j: 1});
db.foo.insert({a: 2});
- db.runCommand({getlasterror:1, j:1});
+ db.runCommand({getlasterror: 1, j: 1});
- MongoRunner.stopMongod(conn.port, /*signal*/9);
+ MongoRunner.stopMongod(conn.port, /*signal*/ 9);
jsTest.log("Journal file left at " + path + "/journal/j._0");
quit();
@@ -25,15 +25,16 @@ if (0) {
}
function startMongodWithJournal() {
- return MongoRunner.runMongod({restart: true,
- cleanData: false,
- dbpath: path,
- journal: "",
- smallfiles: "",
- journalOptions: 1 /*DurDumpJournal*/});
+ return MongoRunner.runMongod({
+ restart: true,
+ cleanData: false,
+ dbpath: path,
+ journal: "",
+ smallfiles: "",
+ journalOptions: 1 /*DurDumpJournal*/
+ });
}
-
jsTest.log("Starting with good.journal to make sure everything works");
resetDbpath(path);
mkdir(path + '/journal');
@@ -43,7 +44,6 @@ var db = conn.getDB('test');
assert.eq(db.foo.count(), 2);
MongoRunner.stopMongod(conn.port);
-
// dur_checksum_bad_last.journal is good.journal with the bad checksum on the last section.
jsTest.log("Starting with bad_last.journal");
resetDbpath(path);
@@ -51,10 +51,9 @@ mkdir(path + '/journal');
copyFile("jstests/libs/dur_checksum_bad_last.journal", path + "/journal/j._0");
conn = startMongodWithJournal();
var db = conn.getDB('test');
-assert.eq(db.foo.count(), 1); // 2nd insert "never happened"
+assert.eq(db.foo.count(), 1); // 2nd insert "never happened"
MongoRunner.stopMongod(conn.port);
-
// dur_checksum_bad_first.journal is good.journal with the bad checksum on the prior section.
// This means there is a good commit after the bad one. We currently ignore this, but a future
// version of the server may be able to detect this case.
@@ -64,7 +63,7 @@ mkdir(path + '/journal');
copyFile("jstests/libs/dur_checksum_bad_first.journal", path + "/journal/j._0");
conn = startMongodWithJournal();
var db = conn.getDB('test');
-assert.eq(db.foo.count(), 0); // Neither insert happened.
+assert.eq(db.foo.count(), 0); // Neither insert happened.
MongoRunner.stopMongod(conn.port);
// If we detect an error in a non-final journal file, that is considered an error.
@@ -75,12 +74,16 @@ copyFile("jstests/libs/dur_checksum_bad_first.journal", path + "/journal/j._0");
copyFile("jstests/libs/dur_checksum_good.journal", path + "/journal/j._1");
exitCode = runMongoProgram("mongod",
- "--port", allocatePort(),
- "--dbpath", path,
+ "--port",
+ allocatePort(),
+ "--dbpath",
+ path,
"--journal",
"--smallfiles",
- "--journalOptions", 1 /*DurDumpJournal*/
- + 2 /*DurScanOnly*/);
+ "--journalOptions",
+ 1 /*DurDumpJournal*/
+ +
+ 2 /*DurScanOnly*/);
assert.eq(exitCode, 100 /*EXIT_UNCAUGHT*/);
diff --git a/jstests/dur/closeall.js b/jstests/dur/closeall.js
index 9e442e16498..f20449a5ef1 100644
--- a/jstests/dur/closeall.js
+++ b/jstests/dur/closeall.js
@@ -5,41 +5,39 @@ function f(variant, quickCommits, paranoid) {
var ourdb = "closealltest";
print("closeall.js start mongod variant:" + variant + "." + quickCommits + "." + paranoid);
- var options = (paranoid==1 ? 8 : 0); // 8 is DurParanoid
+ var options = (paranoid == 1 ? 8 : 0); // 8 is DurParanoid
print("closeall.js --journalOptions " + options);
var N = 1000;
- if (options)
+ if (options)
N = 300;
- // use replication to exercise that code too with a close, and also to test local.sources with a close
- var conn = MongoRunner.runMongod({journal: "",
- journalOptions: options + "",
- master: "",
- oplogSize: 64});
- var connSlave = MongoRunner.runMongod({journal: "",
- journalOptions: options + "",
- slave: "",
- source: "localhost:" + conn.port});
+ // use replication to exercise that code too with a close, and also to test local.sources with a
+ // close
+ var conn = MongoRunner.runMongod(
+ {journal: "", journalOptions: options + "", master: "", oplogSize: 64});
+ var connSlave = MongoRunner.runMongod(
+ {journal: "", journalOptions: options + "", slave: "", source: "localhost:" + conn.port});
var slave = connSlave.getDB(ourdb);
// we'll use two connections to make a little parallelism
var db1 = conn.getDB(ourdb);
var db2 = new Mongo(db1.getMongo().host).getDB(ourdb);
- if( quickCommits ) {
+ if (quickCommits) {
print("closeall.js QuickCommits variant (using a small syncdelay)");
- assert( db2.adminCommand({setParameter:1, syncdelay:5}).ok );
+ assert(db2.adminCommand({setParameter: 1, syncdelay: 5}).ok);
}
print("closeall.js run test");
- print("wait for initial sync to finish"); // SERVER-4852
- assert.writeOK(db1.foo.insert({}, { writeConcern: { w: 2 }}));
- assert.writeOK(db1.foo.remove({}, { writeConcern: { w: 2 }}));
+ print("wait for initial sync to finish"); // SERVER-4852
+ assert.writeOK(db1.foo.insert({}, {writeConcern: {w: 2}}));
+ assert.writeOK(db1.foo.remove({}, {writeConcern: {w: 2}}));
print("initial sync done");
var writeOps = startParallelShell('var coll = db.getSiblingDB("' + ourdb + '").foo; \
- for( var i = 0; i < ' + N + '; i++ ) { \
+ for( var i = 0; i < ' +
+ N + '; i++ ) { \
var bulk = coll.initializeUnorderedBulkOp(); \
bulk.insert({ x: 1 }); \
if ( i % 7 == 0 ) \
@@ -50,35 +48,35 @@ function f(variant, quickCommits, paranoid) {
if( i == 800 ) \
coll.ensureIndex({ x: 1 }); \
assert.writeOK(bulk.execute()); \
- }', conn.port);
+ }',
+ conn.port);
- for( var i = 0; i < N; i++ ) {
+ for (var i = 0; i < N; i++) {
var res = null;
try {
- if( variant == 1 )
+ if (variant == 1)
sleep(0);
- else if( variant == 2 )
+ else if (variant == 2)
sleep(1);
- else if( variant == 3 && i % 10 == 0 )
+ else if (variant == 3 && i % 10 == 0)
print(i);
res = db2.dropDatabase();
- }
- catch (e) {
- print("\n\n\nFAIL closeall.js dropDatabase command invocation threw an exception. i:" + i);
+ } catch (e) {
+ print("\n\n\nFAIL closeall.js dropDatabase command invocation threw an exception. i:" +
+ i);
try {
print("getlasterror:");
printjson(db2.getLastErrorObj());
print("trying one more dropDatabase:");
res = db2.dropDatabase();
printjson(res);
- }
- catch (e) {
+ } catch (e) {
print("got another exception : " + e);
}
print("\n\n\n");
throw e;
}
- assert( res.ok, "dropDatabase res.ok=false");
+ assert(res.ok, "dropDatabase res.ok=false");
}
writeOps();
@@ -93,13 +91,12 @@ function f(variant, quickCommits, paranoid) {
// Skip this test on 32-bit Windows (unfixable failures in MapViewOfFileEx)
//
-if (_isWindows() && getBuildInfo().bits == 32 ) {
+if (_isWindows() && getBuildInfo().bits == 32) {
print("Skipping closeall.js on 32-bit Windows");
-}
-else {
- for (var variant=0; variant < 4; variant++){
- for (var quickCommits=0; quickCommits <= 1; quickCommits++){ // false then true
- for (var paranoid=0; paranoid <= 1; paranoid++){ // false then true
+} else {
+ for (var variant = 0; variant < 4; variant++) {
+ for (var quickCommits = 0; quickCommits <= 1; quickCommits++) { // false then true
+ for (var paranoid = 0; paranoid <= 1; paranoid++) { // false then true
f(variant, quickCommits, paranoid);
sleep(500);
}
diff --git a/jstests/dur/diskfull.js b/jstests/dur/diskfull.js
index f8204462a93..628db20bd92 100644
--- a/jstests/dur/diskfull.js
+++ b/jstests/dur/diskfull.js
@@ -1,4 +1,4 @@
-/** Test running out of disk space with durability enabled.
+/** Test running out of disk space with durability enabled.
To set up the test, it's required to set up a small partition something like the following:
sudo umount /data/db/diskfulltest/
rm -rf /data/db/diskfulltest
@@ -13,21 +13,23 @@ startPath = MongoRunner.dataDir + "/diskfulltest";
recoverPath = MongoRunner.dataDir + "/dur_diskfull";
doIt = false;
-files = listFiles( MongoRunner.dataDir );
-for ( i in files ) {
- if ( files[ i ].name == startPath ) {
+files = listFiles(MongoRunner.dataDir);
+for (i in files) {
+ if (files[i].name == startPath) {
doIt = true;
}
}
-if ( !doIt ) {
- print( "path " + startPath + " missing, skipping diskfull test" );
+if (!doIt) {
+ print("path " + startPath + " missing, skipping diskfull test");
doIt = false;
}
function checkNoJournalFiles(path, pass) {
var files = listFiles(path);
- if (files.some(function (f) { return f.name.indexOf("prealloc") < 0; })) {
+ if (files.some(function(f) {
+ return f.name.indexOf("prealloc") < 0;
+ })) {
if (pass == null) {
// wait a bit longer for mongod to potentially finish if it is still running.
sleep(10000);
@@ -43,94 +45,100 @@ function checkNoJournalFiles(path, pass) {
/** Clear dbpath without removing and recreating diskfulltest directory, as resetDbpath does */
function clear() {
- files = listFiles( startPath );
- files.forEach( function( x ) { removeFile( x.name ); } );
+ files = listFiles(startPath);
+ files.forEach(function(x) {
+ removeFile(x.name);
+ });
}
function log(str) {
print();
- if(str)
- print(testname+" step " + step++ + " " + str);
+ if (str)
+ print(testname + " step " + step++ + " " + str);
else
- print(testname+" step " + step++);
+ print(testname + " step " + step++);
}
function work() {
log("work");
try {
var d = conn.getDB("test");
- var big = new Array( 5000 ).toString();
+ var big = new Array(5000).toString();
var bulk = d.foo.initializeUnorderedBulkOp();
// This part of the test depends on the partition size used in the build env
// Currently, unused, but with larger partitions insert enough documents here
// to create a second db file
- for( i = 0; i < 1; ++i ) {
- bulk.insert({ _id: i, b: big });
+ for (i = 0; i < 1; ++i) {
+ bulk.insert({_id: i, b: big});
}
assert.writeOK(bulk.execute());
- } catch ( e ) {
- print( e );
- raise( e );
+ } catch (e) {
+ print(e);
+ raise(e);
} finally {
log("endwork");
}
}
-function verify() {
+function verify() {
log("verify");
var d = conn.getDB("test");
c = d.foo.count();
v = d.foo.validate();
// not much we can guarantee about the writes, just validate when possible
- if ( c != 0 && !v.valid ) {
- printjson( v );
- print( c );
- assert( v.valid );
- assert.gt( c, 0 );
+ if (c != 0 && !v.valid) {
+ printjson(v);
+ print(c);
+ assert(v.valid);
+ assert.gt(c, 0);
}
}
function runFirstMongodAndFillDisk() {
log();
-
+
clear();
- conn = MongoRunner.runMongod({restart: true,
- cleanData: false,
- dbpath: startPath,
- journal: "",
- smallfiles: "",
- journalOptions: 8+64,
- noprealloc: ""});
-
- assert.throws( work, null, "no exception thrown when exceeding disk capacity" );
+ conn = MongoRunner.runMongod({
+ restart: true,
+ cleanData: false,
+ dbpath: startPath,
+ journal: "",
+ smallfiles: "",
+ journalOptions: 8 + 64,
+ noprealloc: ""
+ });
+
+ assert.throws(work, null, "no exception thrown when exceeding disk capacity");
MongoRunner.stopMongod(conn);
- sleep(5000);
+ sleep(5000);
}
function runSecondMongdAndRecover() {
// restart and recover
log();
- conn = MongoRunner.runMongod({restart: true,
- cleanData: false,
- dbpath: startPath,
- journal: "",
- smallfiles: "",
- journalOptions: 8+64,
- noprealloc: ""});
+ conn = MongoRunner.runMongod({
+ restart: true,
+ cleanData: false,
+ dbpath: startPath,
+ journal: "",
+ smallfiles: "",
+ journalOptions: 8 + 64,
+ noprealloc: ""
+ });
verify();
-
+
log("stop");
MongoRunner.stopMongod(conn);
-
+
// stopMongod seems to be asynchronous (hmmm) so we sleep here.
sleep(5000);
-
+
// at this point, after clean shutdown, there should be no journal files
log("check no journal files");
checkNoJournalFiles(startPath + "/journal/");
-
- log();
+
+ log();
}
function someWritesInJournal() {
@@ -139,20 +147,19 @@ function someWritesInJournal() {
}
function noWritesInJournal() {
- // It is too difficult to consistently trigger cases where there are no existing journal files due to lack of disk space, but
+ // It is too difficult to consistently trigger cases where there are no existing journal files
+ // due to lack of disk space, but
// if we were to test this case we would need to manualy remove the lock file.
-// removeFile( startPath + "/mongod.lock" );
+ // removeFile( startPath + "/mongod.lock" );
}
-if ( doIt ) {
-
+if (doIt) {
var testname = "dur_diskfull";
var step = 1;
var conn = null;
-
+
someWritesInJournal();
noWritesInJournal();
-
- print(testname + " SUCCESS");
+ print(testname + " SUCCESS");
}
diff --git a/jstests/dur/dropdb.js b/jstests/dur/dropdb.js
index 8c847c3a0aa..9d60f60869a 100644
--- a/jstests/dur/dropdb.js
+++ b/jstests/dur/dropdb.js
@@ -8,7 +8,9 @@ var conn = null;
function checkNoJournalFiles(path, pass) {
var files = listFiles(path);
- if (files.some(function (f) { return f.name.indexOf("prealloc") < 0; })) {
+ if (files.some(function(f) {
+ return f.name.indexOf("prealloc") < 0;
+ })) {
if (pass == null) {
// wait a bit longer for mongod to potentially finish if it is still running.
sleep(10000);
@@ -48,22 +50,22 @@ function log(str) {
print("\n" + testname + " step " + step++);
}
-// if you do inserts here, you will want to set _id. otherwise they won't match on different
+// if you do inserts here, you will want to set _id. otherwise they won't match on different
// runs so we can't do a binary diff of the resulting files to check they are consistent.
function work() {
log("work (add data, drop database)");
var e = conn.getDB("teste");
- e.foo.insert({ _id: 99 });
+ e.foo.insert({_id: 99});
var d = conn.getDB("test");
- d.foo.insert({ _id: 3, x: 22 });
- d.bar.insert({ _id: 3, x: 22 });
+ d.foo.insert({_id: 3, x: 22});
+ d.bar.insert({_id: 3, x: 22});
d.dropDatabase();
// assure writes applied in case we kill -9 on return from this function
- assert.writeOK(d.foo.insert({ _id: 100 }, { writeConcern: { fsync: 1 }}));
+ assert.writeOK(d.foo.insert({_id: 100}, {writeConcern: {fsync: 1}}));
}
function verify() {
@@ -71,10 +73,10 @@ function verify() {
var d = conn.getDB("test");
var count = d.foo.count();
if (count != 1) {
- print("going to fail, test.foo.count() != 1 in verify()");
- sleep(10000); // easier to read the output this way
+ print("going to fail, test.foo.count() != 1 in verify()");
+ sleep(10000); // easier to read the output this way
print("\n\n\ndropdb.js FAIL test.foo.count() should be 1 but is : " + count);
- print(d.foo.count() + "\n\n\n");
+ print(d.foo.count() + "\n\n\n");
assert(false);
}
assert(d.foo.findOne()._id == 100, "100");
@@ -86,11 +88,11 @@ function verify() {
var testecount = teste.foo.count();
if (testecount != 1) {
print("going to fail, teste.foo.count() != 1 in verify()");
- sleep(10000); // easier to read the output this way
+ sleep(10000); // easier to read the output this way
print("\n\n\ndropdb.js FAIL teste.foo.count() should be 1 but is : " + testecount);
print("\n\n\n");
assert(false);
- }
+ }
print("teste.foo.count() = " + teste.foo.count());
assert(teste.foo.findOne()._id == 99, "teste");
}
@@ -123,7 +125,7 @@ verify();
// kill the process hard
log("kill 9");
-MongoRunner.stopMongod(conn.port, /*signal*/9);
+MongoRunner.stopMongod(conn.port, /*signal*/ 9);
// journal file should be present, and non-empty as we killed hard
@@ -133,12 +135,14 @@ removeFile(path2 + "/test.0");
removeFile(path2 + "/lsn");
log("restart and recover");
-conn = MongoRunner.runMongod({restart: true,
- cleanData: false,
- dbpath: path2,
- journal: "",
- smallfiles: "",
- journalOptions: 9});
+conn = MongoRunner.runMongod({
+ restart: true,
+ cleanData: false,
+ dbpath: path2,
+ journal: "",
+ smallfiles: "",
+ journalOptions: 9
+});
log("verify after recovery");
verify();
@@ -170,4 +174,3 @@ assert(diff == "", "error test.0 files differ");
log("check data matches done");
print(testname + " SUCCESS");
-
diff --git a/jstests/dur/dur1.js b/jstests/dur/dur1.js
index a4cf88cf22e..568ba4ada55 100755..100644
--- a/jstests/dur/dur1.js
+++ b/jstests/dur/dur1.js
@@ -1,27 +1,29 @@
-/*
+/*
test durability
*/
var debugging = false;
var testname = "dur1";
var step = 1;
-var conn = null;
-
-function checkNoJournalFiles(path, pass) {
- var files = listFiles(path);
- if (files.some(function (f) { return f.name.indexOf("prealloc") < 0; })) {
- if (pass == null) {
- // wait a bit longer for mongod to potentially finish if it is still running.
- sleep(10000);
- return checkNoJournalFiles(path, 1);
- }
- print("\n\n\n");
- print("FAIL path:" + path);
- print("unexpected files:");
- printjson(files);
- assert(false, "FAIL a journal/lsn file is present which is unexpected");
- }
-}
+var conn = null;
+
+function checkNoJournalFiles(path, pass) {
+ var files = listFiles(path);
+ if (files.some(function(f) {
+ return f.name.indexOf("prealloc") < 0;
+ })) {
+ if (pass == null) {
+ // wait a bit longer for mongod to potentially finish if it is still running.
+ sleep(10000);
+ return checkNoJournalFiles(path, 1);
+ }
+ print("\n\n\n");
+ print("FAIL path:" + path);
+ print("unexpected files:");
+ printjson(files);
+ assert(false, "FAIL a journal/lsn file is present which is unexpected");
+ }
+}
function runDiff(a, b) {
function reSlash(s) {
@@ -44,41 +46,42 @@ function runDiff(a, b) {
function log(str) {
print();
- if(str)
- print(testname+" step " + step++ + " " + str);
+ if (str)
+ print(testname + " step " + step++ + " " + str);
else
- print(testname+" step " + step++);
+ print(testname + " step " + step++);
}
-// if you do inserts here, you will want to set _id. otherwise they won't match on different
+// if you do inserts here, you will want to set _id. otherwise they won't match on different
// runs so we can't do a binary diff of the resulting files to check they are consistent.
function work() {
log("work");
var d = conn.getDB("test");
- d.foo.insert({ _id: 3, x: 22 });
- d.foo.insert({ _id: 4, x: 22 });
- d.a.insert({ _id: 3, x: 22, y: [1, 2, 3] });
- d.a.insert({ _id: 4, x: 22, y: [1, 2, 3] });
- d.a.update({ _id: 4 }, { $inc: { x: 1} });
+ d.foo.insert({_id: 3, x: 22});
+ d.foo.insert({_id: 4, x: 22});
+ d.a.insert({_id: 3, x: 22, y: [1, 2, 3]});
+ d.a.insert({_id: 4, x: 22, y: [1, 2, 3]});
+ d.a.update({_id: 4}, {$inc: {x: 1}});
- // try building an index. however, be careful as object id's in system.indexes would vary, so we do it manually:
- d.system.indexes.insert({ _id: 99, ns: "test.a", key: { x: 1 }, name: "x_1" });
+ // try building an index. however, be careful as object id's in system.indexes would vary, so
+ // we do it manually:
+ d.system.indexes.insert({_id: 99, ns: "test.a", key: {x: 1}, name: "x_1"});
- log("endwork");
+ log("endwork");
return d;
}
-function verify() {
- log("verify test.foo.count == 2");
- var d = conn.getDB("test");
- var ct = d.foo.count();
- if (ct != 2) {
- print("\n\n\nFAIL dur1.js count is wrong in verify(): " + ct + "\n\n\n");
- assert(ct == 2);
+function verify() {
+ log("verify test.foo.count == 2");
+ var d = conn.getDB("test");
+ var ct = d.foo.count();
+ if (ct != 2) {
+ print("\n\n\nFAIL dur1.js count is wrong in verify(): " + ct + "\n\n\n");
+ assert(ct == 2);
}
}
-if( debugging ) {
+if (debugging) {
// mongod already running in debugger
conn = db.getMongo();
work();
@@ -89,8 +92,8 @@ if( debugging ) {
log();
// directories
-var path1 = MongoRunner.dataPath + testname+"nodur";
-var path2 = MongoRunner.dataPath + testname+"dur";
+var path1 = MongoRunner.dataPath + testname + "nodur";
+var path2 = MongoRunner.dataPath + testname + "dur";
// non-durable version
log("run mongod without journaling");
@@ -104,22 +107,24 @@ conn = MongoRunner.runMongod({dbpath: path2, journal: "", smallfiles: "", journa
work();
// wait for group commit.
-printjson(conn.getDB('admin').runCommand({getlasterror:1, fsync:1}));
-
-// kill the process hard
+printjson(conn.getDB('admin').runCommand({getlasterror: 1, fsync: 1}));
+
+// kill the process hard
log("kill 9");
-MongoRunner.stopMongod(conn.port, /*signal*/9);
+MongoRunner.stopMongod(conn.port, /*signal*/ 9);
// journal file should be present, and non-empty as we killed hard
// restart and recover
log("restart mongod --journal and recover");
-conn = MongoRunner.runMongod({restart: true,
- cleanData: false,
- dbpath: path2,
- journal: "",
- smallfiles: "",
- journalOptions: 8});
+conn = MongoRunner.runMongod({
+ restart: true,
+ cleanData: false,
+ dbpath: path2,
+ journal: "",
+ smallfiles: "",
+ journalOptions: 8
+});
verify();
log("stop mongod");
@@ -151,4 +156,3 @@ assert(diff == "", "error test.0 files differ");
log("check data matches done");
print(testname + " SUCCESS");
-
diff --git a/jstests/dur/dur1_tool.js b/jstests/dur/dur1_tool.js
index f0c2916012d..e96e1f20c6d 100755..100644
--- a/jstests/dur/dur1_tool.js
+++ b/jstests/dur/dur1_tool.js
@@ -1,27 +1,29 @@
-/*
+/*
test durability option with tools (same a dur1.js but use mongorestore to do repair)
*/
var debugging = false;
var testname = "dur1_tool";
var step = 1;
-var conn = null;
-
-function checkNoJournalFiles(path, pass) {
- var files = listFiles(path);
- if (files.some(function (f) { return f.name.indexOf("prealloc") < 0; })) {
- if (pass == null) {
- // wait a bit longer for mongod to potentially finish if it is still running.
- sleep(10000);
- return checkNoJournalFiles(path, 1);
- }
- print("\n\n\n");
- print("FAIL path:" + path);
- print("unexpected files:");
- printjson(files);
- assert(false, "FAIL a journal/lsn file is present which is unexpected");
- }
-}
+var conn = null;
+
+function checkNoJournalFiles(path, pass) {
+ var files = listFiles(path);
+ if (files.some(function(f) {
+ return f.name.indexOf("prealloc") < 0;
+ })) {
+ if (pass == null) {
+ // wait a bit longer for mongod to potentially finish if it is still running.
+ sleep(10000);
+ return checkNoJournalFiles(path, 1);
+ }
+ print("\n\n\n");
+ print("FAIL path:" + path);
+ print("unexpected files:");
+ printjson(files);
+ assert(false, "FAIL a journal/lsn file is present which is unexpected");
+ }
+}
function runDiff(a, b) {
function reSlash(s) {
@@ -44,40 +46,41 @@ function runDiff(a, b) {
function log(str) {
print();
- if(str)
- print(testname+" step " + step++ + " " + str);
+ if (str)
+ print(testname + " step " + step++ + " " + str);
else
- print(testname+" step " + step++);
+ print(testname + " step " + step++);
}
-// if you do inserts here, you will want to set _id. otherwise they won't match on different
+// if you do inserts here, you will want to set _id. otherwise they won't match on different
// runs so we can't do a binary diff of the resulting files to check they are consistent.
function work() {
log("work");
var d = conn.getDB("test");
- d.foo.insert({ _id: 3, x: 22 });
- d.foo.insert({ _id: 4, x: 22 });
- d.a.insert({ _id: 3, x: 22, y: [1, 2, 3] });
- d.a.insert({ _id: 4, x: 22, y: [1, 2, 3] });
- d.a.update({ _id: 4 }, { $inc: { x: 1} });
-
- // try building an index. however, be careful as object id's in system.indexes would vary, so we do it manually:
- d.system.indexes.insert({ _id: 99, ns: "test.a", key: { x: 1 }, name: "x_1" });
- log("endwork");
+ d.foo.insert({_id: 3, x: 22});
+ d.foo.insert({_id: 4, x: 22});
+ d.a.insert({_id: 3, x: 22, y: [1, 2, 3]});
+ d.a.insert({_id: 4, x: 22, y: [1, 2, 3]});
+ d.a.update({_id: 4}, {$inc: {x: 1}});
+
+ // try building an index. however, be careful as object id's in system.indexes would vary, so
+ // we do it manually:
+ d.system.indexes.insert({_id: 99, ns: "test.a", key: {x: 1}, name: "x_1"});
+ log("endwork");
return d;
}
-function verify() {
- log("verify test.foo.count == 2");
- var d = conn.getDB("test");
- var ct = d.foo.count();
- if (ct != 2) {
- print("\n\n\nFAIL dur1.js count is wrong in verify(): " + ct + "\n\n\n");
- assert(ct == 2);
+function verify() {
+ log("verify test.foo.count == 2");
+ var d = conn.getDB("test");
+ var ct = d.foo.count();
+ if (ct != 2) {
+ print("\n\n\nFAIL dur1.js count is wrong in verify(): " + ct + "\n\n\n");
+ assert(ct == 2);
}
}
-if( debugging ) {
+if (debugging) {
// mongod already running in debugger
conn = db.getMongo();
work();
@@ -88,8 +91,8 @@ if( debugging ) {
log();
// directories
-var path1 = MongoRunner.dataPath + testname+"nodur";
-var path2 = MongoRunner.dataPath + testname+"dur";
+var path1 = MongoRunner.dataPath + testname + "nodur";
+var path2 = MongoRunner.dataPath + testname + "dur";
// non-durable version
log("run mongod without journaling");
@@ -104,24 +107,26 @@ conn = MongoRunner.runMongod({dbpath: path2, journal: "", smallfiles: "", journa
work();
// wait for group commit.
-printjson(conn.getDB('admin').runCommand({getlasterror:1, fsync:1}));
-
-// kill the process hard
+printjson(conn.getDB('admin').runCommand({getlasterror: 1, fsync: 1}));
+
+// kill the process hard
log("kill 9");
-MongoRunner.stopMongod(conn, /*signal*/9);
+MongoRunner.stopMongod(conn, /*signal*/ 9);
// journal file should be present, and non-empty as we killed hard
// mongod with --dbpath and --journal options should do a recovery pass
// empty.bson is an empty file so it won't actually insert anything
log("use mongod to recover");
-conn = MongoRunner.runMongod({restart: true,
- cleanData: false,
- dbpath: path2,
- journal: "",
- smallfiles: "",
- noprealloc: "",
- bind_ip: "127.0.0.1"});
+conn = MongoRunner.runMongod({
+ restart: true,
+ cleanData: false,
+ dbpath: path2,
+ journal: "",
+ smallfiles: "",
+ noprealloc: "",
+ bind_ip: "127.0.0.1"
+});
verify();
MongoRunner.stopMongod(conn);
@@ -148,4 +153,3 @@ assert(diff == "", "error test.0 files differ");
log("check data matches done");
print(testname + " SUCCESS");
-
diff --git a/jstests/dur/dur2.js b/jstests/dur/dur2.js
index 1a412c2489d..5ff6aa9840e 100644
--- a/jstests/dur/dur2.js
+++ b/jstests/dur/dur2.js
@@ -13,23 +13,25 @@ function howLongSecs() {
}
function log(str) {
- if(str)
- print("\n" + testname+" step " + step++ + " " + str);
+ if (str)
+ print("\n" + testname + " step " + step++ + " " + str);
else
- print(testname+" step " + step++);
+ print(testname + " step " + step++);
}
function verify() {
log("verify");
var d = conn.getDB("test");
var mycount = d.foo.count();
- //print("count:" + mycount);
- assert(mycount>2, "count wrong");
+ // print("count:" + mycount);
+ assert(mycount > 2, "count wrong");
}
function work() {
log("work");
- x = 'x'; while(x.length < 1024) x+=x;
+ x = 'x';
+ while (x.length < 1024)
+ x += x;
var d = conn.getDB("test");
d.foo.drop();
d.foo.insert({});
@@ -38,13 +40,13 @@ function work() {
var j = 2;
var MaxTime = 90;
while (1) {
- d.foo.insert({ _id: j, z: x });
- d.foo.update({ _id: j }, { $inc: { a: 1} });
+ d.foo.insert({_id: j, z: x});
+ d.foo.update({_id: j}, {$inc: {a: 1}});
if (j % 25 == 0)
- d.foo.remove({ _id: j });
+ d.foo.remove({_id: j});
j++;
- if( j % 3 == 0 )
- d.foo.update({ _id: j }, { $inc: { a: 1} }, true);
+ if (j % 3 == 0)
+ d.foo.update({_id: j}, {$inc: {a: 1}}, true);
if (j % 10000 == 0)
print(j);
if (howLongSecs() > MaxTime)
@@ -52,12 +54,13 @@ function work() {
}
verify();
- d.runCommand({ getLastError: 1, fsync: 1 });
+ d.runCommand({getLastError: 1, fsync: 1});
}
-if( debugging ) {
+if (debugging) {
// mongod already running in debugger
- print("DOING DEBUG MODE BEHAVIOR AS 'db' IS DEFINED -- RUN mongo --nodb FOR REGULAR TEST BEHAVIOR");
+ print(
+ "DOING DEBUG MODE BEHAVIOR AS 'db' IS DEFINED -- RUN mongo --nodb FOR REGULAR TEST BEHAVIOR");
conn = db.getMongo();
work();
sleep(30000);
@@ -65,33 +68,38 @@ if( debugging ) {
}
// directories
-var path = MongoRunner.dataPath + testname+"dur";
+var path = MongoRunner.dataPath + testname + "dur";
log("run mongod with --dur");
-conn = MongoRunner.runMongod({dbpath: path,
- journal: "",
- smallfiles: "",
- journalOptions: 8 /*DurParanoid*/,
- master: "",
- oplogSize: 64});
-work();
-
+conn = MongoRunner.runMongod({
+ dbpath: path,
+ journal: "",
+ smallfiles: "",
+ journalOptions: 8 /*DurParanoid*/,
+ master: "",
+ oplogSize: 64
+});
+work();
+
log("kill -9");
-MongoRunner.stopMongod(conn, /*signal*/9);
-
-// journal file should be present, and non-empty as we killed hard
-assert(listFiles(path + "/journal/").length > 0, "journal directory is unexpectantly empty after kill");
+MongoRunner.stopMongod(conn, /*signal*/ 9);
+
+// journal file should be present, and non-empty as we killed hard
+assert(listFiles(path + "/journal/").length > 0,
+ "journal directory is unexpectantly empty after kill");
// restart and recover
log("restart mongod and recover");
-conn = MongoRunner.runMongod({restart: true,
- cleanData: false,
- dbpath: path,
- journal: "",
- smallfiles: "",
- journalOptions: 8,
- master: "",
- oplogSize: 64});
+conn = MongoRunner.runMongod({
+ restart: true,
+ cleanData: false,
+ dbpath: path,
+ journal: "",
+ smallfiles: "",
+ journalOptions: 8,
+ master: "",
+ oplogSize: 64
+});
verify();
log("stopping mongod " + conn.port);
diff --git a/jstests/dur/indexbg.js b/jstests/dur/indexbg.js
index 60904acd917..55bd0c0098a 100644
--- a/jstests/dur/indexbg.js
+++ b/jstests/dur/indexbg.js
@@ -1,7 +1,7 @@
path = MongoRunner.dataDir + '/indexbg_dur';
var m = MongoRunner.runMongod({journal: "", smallfiles: "", journalOptions: 24});
-t = m.getDB( 'test' ).test;
-t.save( {x:1} );
-t.createIndex( {x:1}, {background:true} );
+t = m.getDB('test').test;
+t.save({x: 1});
+t.createIndex({x: 1}, {background: true});
t.count();
diff --git a/jstests/dur/indexbg2.js b/jstests/dur/indexbg2.js
index b3234af432f..5403f2d86f5 100644
--- a/jstests/dur/indexbg2.js
+++ b/jstests/dur/indexbg2.js
@@ -2,18 +2,17 @@ path = MongoRunner.dataDir + '/indexbg2_dur';
var m = MongoRunner.runMongod({journal: "", smallfiles: ""});
-t = m.getDB( 'test' ).test;
-t.createIndex( {a:1} );
-t.createIndex( {b:1} );
-t.createIndex( {x:1}, {background:true} );
-for( var i = 0; i < 1000; ++i ) {
- t.insert( {_id:i,a:'abcd',b:'bcde',x:'four score and seven years ago'} );
- t.remove( {_id:i} );
+t = m.getDB('test').test;
+t.createIndex({a: 1});
+t.createIndex({b: 1});
+t.createIndex({x: 1}, {background: true});
+for (var i = 0; i < 1000; ++i) {
+ t.insert({_id: i, a: 'abcd', b: 'bcde', x: 'four score and seven years ago'});
+ t.remove({_id: i});
}
-sleep( 1000 );
-for( var i = 1000; i < 2000; ++i ) {
- t.insert( {_id:i,a:'abcd',b:'bcde',x:'four score and seven years ago'} );
- t.remove( {_id:i} );
+sleep(1000);
+for (var i = 1000; i < 2000; ++i) {
+ t.insert({_id: i, a: 'abcd', b: 'bcde', x: 'four score and seven years ago'});
+ t.remove({_id: i});
}
-assert.writeOK(t.insert({ _id: 2000, a: 'abcd', b: 'bcde', x: 'four score and seven years ago' }));
-
+assert.writeOK(t.insert({_id: 2000, a: 'abcd', b: 'bcde', x: 'four score and seven years ago'}));
diff --git a/jstests/dur/journaling_options.js b/jstests/dur/journaling_options.js
index 820c493bce1..d0600009a70 100644
--- a/jstests/dur/journaling_options.js
+++ b/jstests/dur/journaling_options.js
@@ -6,178 +6,117 @@ function doTest() {
jsTest.log("Testing \"dur\" command line option");
var expectedResult = {
- "parsed" : {
- "storage" : {
- "journal" : {
- "enabled" : true
- }
- }
- }
+ "parsed": {"storage": {"journal": {"enabled": true}}}
};
- testGetCmdLineOptsMongod({ dur : "" }, expectedResult);
+ testGetCmdLineOptsMongod({dur: ""}, expectedResult);
jsTest.log("Testing \"nodur\" command line option");
expectedResult = {
- "parsed" : {
- "storage" : {
- "journal" : {
- "enabled" : false
- }
- }
- }
+ "parsed": {"storage": {"journal": {"enabled": false}}}
};
- testGetCmdLineOptsMongod({ nodur : "" }, expectedResult);
+ testGetCmdLineOptsMongod({nodur: ""}, expectedResult);
jsTest.log("Testing \"journal\" command line option");
expectedResult = {
- "parsed" : {
- "storage" : {
- "journal" : {
- "enabled" : true
- }
- }
- }
+ "parsed": {"storage": {"journal": {"enabled": true}}}
};
- testGetCmdLineOptsMongod({ journal : "" }, expectedResult);
+ testGetCmdLineOptsMongod({journal: ""}, expectedResult);
jsTest.log("Testing \"nojournal\" command line option");
expectedResult = {
- "parsed" : {
- "storage" : {
- "journal" : {
- "enabled" : false
- }
- }
- }
+ "parsed": {"storage": {"journal": {"enabled": false}}}
};
- testGetCmdLineOptsMongod({ nojournal : "" }, expectedResult);
+ testGetCmdLineOptsMongod({nojournal: ""}, expectedResult);
jsTest.log("Testing \"storage.journal.enabled\" config file option");
expectedResult = {
- "parsed" : {
- "config" : "jstests/libs/config_files/enable_journal.json",
- "storage" : {
- "journal" : {
- "enabled" : false
- }
- }
+ "parsed": {
+ "config": "jstests/libs/config_files/enable_journal.json",
+ "storage": {"journal": {"enabled": false}}
}
};
- testGetCmdLineOptsMongod({ config : "jstests/libs/config_files/enable_journal.json" },
+ testGetCmdLineOptsMongod({config: "jstests/libs/config_files/enable_journal.json"},
expectedResult);
// Test that we preserve switches explicitly set to false in config files. See SERVER-13439.
jsTest.log("Testing explicitly disabled \"journal\" config file option");
expectedResult = {
- "parsed" : {
- "config" : "jstests/libs/config_files/disable_journal.ini",
- "storage" : {
- "journal" : {
- "enabled" : false
- }
- }
+ "parsed": {
+ "config": "jstests/libs/config_files/disable_journal.ini",
+ "storage": {"journal": {"enabled": false}}
}
};
- testGetCmdLineOptsMongod({ config : "jstests/libs/config_files/disable_journal.ini" },
+ testGetCmdLineOptsMongod({config: "jstests/libs/config_files/disable_journal.ini"},
expectedResult);
jsTest.log("Testing explicitly disabled \"nojournal\" config file option");
expectedResult = {
- "parsed" : {
- "config" : "jstests/libs/config_files/disable_nojournal.ini",
- "storage" : {
- "journal" : {
- "enabled" : true
- }
- }
+ "parsed": {
+ "config": "jstests/libs/config_files/disable_nojournal.ini",
+ "storage": {"journal": {"enabled": true}}
}
};
- testGetCmdLineOptsMongod({ config : "jstests/libs/config_files/disable_nojournal.ini" },
+ testGetCmdLineOptsMongod({config: "jstests/libs/config_files/disable_nojournal.ini"},
expectedResult);
jsTest.log("Testing explicitly disabled \"dur\" config file option");
expectedResult = {
- "parsed" : {
- "config" : "jstests/libs/config_files/disable_dur.ini",
- "storage" : {
- "journal" : {
- "enabled" : false
- }
- }
+ "parsed": {
+ "config": "jstests/libs/config_files/disable_dur.ini",
+ "storage": {"journal": {"enabled": false}}
}
};
- testGetCmdLineOptsMongod({ config : "jstests/libs/config_files/disable_dur.ini" },
- expectedResult);
+ testGetCmdLineOptsMongod({config: "jstests/libs/config_files/disable_dur.ini"}, expectedResult);
jsTest.log("Testing explicitly disabled \"nodur\" config file option");
expectedResult = {
- "parsed" : {
- "config" : "jstests/libs/config_files/disable_nodur.ini",
- "storage" : {
- "journal" : {
- "enabled" : true
- }
- }
+ "parsed": {
+ "config": "jstests/libs/config_files/disable_nodur.ini",
+ "storage": {"journal": {"enabled": true}}
}
};
- testGetCmdLineOptsMongod({ config : "jstests/libs/config_files/disable_nodur.ini" },
+ testGetCmdLineOptsMongod({config: "jstests/libs/config_files/disable_nodur.ini"},
expectedResult);
// Test that switches in old config files with no value have an implicit value of true
jsTest.log("Testing implicitly enabled \"journal\" config file option");
expectedResult = {
- "parsed" : {
- "config" : "jstests/libs/config_files/implicitly_enable_journal.ini",
- "storage" : {
- "journal" : {
- "enabled" : true
- }
- }
+ "parsed": {
+ "config": "jstests/libs/config_files/implicitly_enable_journal.ini",
+ "storage": {"journal": {"enabled": true}}
}
};
- testGetCmdLineOptsMongod({ config : "jstests/libs/config_files/implicitly_enable_journal.ini" },
+ testGetCmdLineOptsMongod({config: "jstests/libs/config_files/implicitly_enable_journal.ini"},
expectedResult);
jsTest.log("Testing implicitly enabled \"nojournal\" config file option");
expectedResult = {
- "parsed" : {
- "config" : "jstests/libs/config_files/implicitly_enable_nojournal.ini",
- "storage" : {
- "journal" : {
- "enabled" : false
- }
- }
+ "parsed": {
+ "config": "jstests/libs/config_files/implicitly_enable_nojournal.ini",
+ "storage": {"journal": {"enabled": false}}
}
};
- testGetCmdLineOptsMongod({ config : "jstests/libs/config_files/implicitly_enable_nojournal.ini" },
+ testGetCmdLineOptsMongod({config: "jstests/libs/config_files/implicitly_enable_nojournal.ini"},
expectedResult);
jsTest.log("Testing implicitly enabled \"dur\" config file option");
expectedResult = {
- "parsed" : {
- "config" : "jstests/libs/config_files/implicitly_enable_dur.ini",
- "storage" : {
- "journal" : {
- "enabled" : true
- }
- }
+ "parsed": {
+ "config": "jstests/libs/config_files/implicitly_enable_dur.ini",
+ "storage": {"journal": {"enabled": true}}
}
};
- testGetCmdLineOptsMongod({ config : "jstests/libs/config_files/implicitly_enable_dur.ini" },
+ testGetCmdLineOptsMongod({config: "jstests/libs/config_files/implicitly_enable_dur.ini"},
expectedResult);
jsTest.log("Testing implicitly enabled \"nodur\" config file option");
expectedResult = {
- "parsed" : {
- "config" : "jstests/libs/config_files/implicitly_enable_nodur.ini",
- "storage" : {
- "journal" : {
- "enabled" : false
- }
- }
+ "parsed": {
+ "config": "jstests/libs/config_files/implicitly_enable_nodur.ini",
+ "storage": {"journal": {"enabled": false}}
}
};
- testGetCmdLineOptsMongod({ config : "jstests/libs/config_files/implicitly_enable_nodur.ini" },
+ testGetCmdLineOptsMongod({config: "jstests/libs/config_files/implicitly_enable_nodur.ini"},
expectedResult);
print(baseName + " succeeded.");
diff --git a/jstests/dur/lsn.js b/jstests/dur/lsn.js
index 475263ce72a..679475ad6ea 100755..100644
--- a/jstests/dur/lsn.js
+++ b/jstests/dur/lsn.js
@@ -14,10 +14,10 @@ function howLongSecs() {
}
function log(str) {
- if(str)
- print("\n" + testname+" step " + step++ + " " + str);
+ if (str)
+ print("\n" + testname + " step " + step++ + " " + str);
else
- print(testname+" step " + step++);
+ print(testname + " step " + step++);
}
function verify() {
@@ -25,14 +25,16 @@ function verify() {
var d = conn.getDB("test");
var mycount = d.foo.count();
print("count:" + mycount);
- assert(mycount>2, "count wrong");
+ assert(mycount > 2, "count wrong");
}
-// if you do inserts here, you will want to set _id. otherwise they won't match on different
+// if you do inserts here, you will want to set _id. otherwise they won't match on different
// runs so we can't do a binary diff of the resulting files to check they are consistent.
function work() {
log("work");
- x = 'x'; while(x.length < 1024) x+=x;
+ x = 'x';
+ while (x.length < 1024)
+ x += x;
var d = conn.getDB("test");
d.foo.drop();
d.foo.insert({});
@@ -45,13 +47,13 @@ function work() {
MaxTime = 90;
}
while (1) {
- d.foo.insert({ _id: j, z: x });
- d.foo.update({ _id: j }, { $inc: { a: 1} });
+ d.foo.insert({_id: j, z: x});
+ d.foo.update({_id: j}, {$inc: {a: 1}});
if (j % 25 == 0)
- d.foo.remove({ _id: j });
+ d.foo.remove({_id: j});
j++;
- if( j % 3 == 0 )
- d.foo.update({ _id: j }, { $inc: { a: 1} }, true);
+ if (j % 3 == 0)
+ d.foo.update({_id: j}, {$inc: {a: 1}}, true);
if (j % 10000 == 0)
print(j);
if (howLongSecs() > MaxTime)
@@ -59,12 +61,13 @@ function work() {
}
verify();
- d.runCommand({ getLastError: 1, fsync: 1 });
+ d.runCommand({getLastError: 1, fsync: 1});
}
-if( debugging ) {
+if (debugging) {
// mongod already running in debugger
- print("DOING DEBUG MODE BEHAVIOR AS 'db' IS DEFINED -- RUN mongo --nodb FOR REGULAR TEST BEHAVIOR");
+ print(
+ "DOING DEBUG MODE BEHAVIOR AS 'db' IS DEFINED -- RUN mongo --nodb FOR REGULAR TEST BEHAVIOR");
conn = db.getMongo();
work();
sleep(30000);
@@ -72,31 +75,35 @@ if( debugging ) {
}
// directories
-var path2 = MongoRunner.dataPath + testname+"dur";
+var path2 = MongoRunner.dataPath + testname + "dur";
// run mongod with a short --syncdelay to make LSN writing sooner
log("run mongod --journal and a short --syncdelay");
-conn = MongoRunner.runMongod({dbpath: path2,
- syncdelay: 2,
- journal: "",
- smallfiles: "",
- journalOptions: 8 /*DurParanoid*/,
- master: "",
- oplogSize: 64});
+conn = MongoRunner.runMongod({
+ dbpath: path2,
+ syncdelay: 2,
+ journal: "",
+ smallfiles: "",
+ journalOptions: 8 /*DurParanoid*/,
+ master: "",
+ oplogSize: 64
+});
work();
log("wait a while for a sync and an lsn write");
-sleep(14); // wait for lsn write
+sleep(14); // wait for lsn write
log("kill mongod -9");
-MongoRunner.stopMongod(conn, /*signal*/9);
+MongoRunner.stopMongod(conn, /*signal*/ 9);
// journal file should be present, and non-empty as we killed hard
// check that there is an lsn file
{
var files = listFiles(path2 + "/journal/");
- assert(files.some(function (f) { return f.name.indexOf("lsn") >= 0; }),
+ assert(files.some(function(f) {
+ return f.name.indexOf("lsn") >= 0;
+ }),
"lsn.js FAIL no lsn file found after kill, yet one is expected");
}
/*assert.soon(
@@ -109,28 +116,31 @@ MongoRunner.stopMongod(conn, /*signal*/9);
// restart and recover
log("restart mongod, recover, verify");
-conn = MongoRunner.runMongod({restart:true,
- cleanData: false,
- dbpath: path2,
- journal: "",
- smallfiles: "",
- journalOptions: 24,
- master: "",
- oplogSize: 64});
+conn = MongoRunner.runMongod({
+ restart: true,
+ cleanData: false,
+ dbpath: path2,
+ journal: "",
+ smallfiles: "",
+ journalOptions: 24,
+ master: "",
+ oplogSize: 64
+});
verify();
-// idea here is to verify (in a simplistic way) that we are in a good state to do further ops after recovery
+// idea here is to verify (in a simplistic way) that we are in a good state to do further ops after
+// recovery
log("add data after recovery");
{
var d = conn.getDB("test");
- d.xyz.insert({ x: 1 });
- d.xyz.insert({ x: 1 });
- d.xyz.insert({ x: 1 });
- d.xyz.update({}, { $set: { x: "aaaaaaaaaaaa"} });
+ d.xyz.insert({x: 1});
+ d.xyz.insert({x: 1});
+ d.xyz.insert({x: 1});
+ d.xyz.update({}, {$set: {x: "aaaaaaaaaaaa"}});
d.xyz.reIndex();
d.xyz.drop();
sleep(1);
- d.xyz.insert({ x: 1 });
+ d.xyz.insert({x: 1});
}
log("stop mongod " + conn.port);
diff --git a/jstests/dur/manyRestart.js b/jstests/dur/manyRestart.js
index 7bcae61f082..f4d03f60edc 100755..100644
--- a/jstests/dur/manyRestart.js
+++ b/jstests/dur/manyRestart.js
@@ -1,4 +1,4 @@
-/*
+/*
test durability
*/
@@ -9,7 +9,9 @@ var conn = null;
function checkNoJournalFiles(path, pass) {
var files = listFiles(path);
- if (files.some(function (f) { return f.name.indexOf("prealloc") < 0; })) {
+ if (files.some(function(f) {
+ return f.name.indexOf("prealloc") < 0;
+ })) {
if (pass == null) {
// wait a bit longer for mongod to potentially finish if it is still running.
sleep(10000);
@@ -44,25 +46,26 @@ function runDiff(a, b) {
function log(str) {
print();
- if(str)
- print(testname+" step " + step++ + " " + str);
+ if (str)
+ print(testname + " step " + step++ + " " + str);
else
- print(testname+" step " + step++);
+ print(testname + " step " + step++);
}
-// if you do inserts here, you will want to set _id. otherwise they won't match on different
+// if you do inserts here, you will want to set _id. otherwise they won't match on different
// runs so we can't do a binary diff of the resulting files to check they are consistent.
function work() {
log("work");
var d = conn.getDB("test");
- d.foo.insert({ _id: 3, x: 22 });
- d.foo.insert({ _id: 4, x: 22 });
- d.a.insert({ _id: 3, x: 22, y: [1, 2, 3] });
- d.a.insert({ _id: 4, x: 22, y: [1, 2, 3] });
- d.a.update({ _id: 4 }, { $inc: { x: 1} });
-
- // try building an index. however, be careful as object id's in system.indexes would vary, so we do it manually:
- d.system.indexes.insert({ _id: 99, ns: "test.a", key: { x: 1 }, name: "x_1" });
+ d.foo.insert({_id: 3, x: 22});
+ d.foo.insert({_id: 4, x: 22});
+ d.a.insert({_id: 3, x: 22, y: [1, 2, 3]});
+ d.a.insert({_id: 4, x: 22, y: [1, 2, 3]});
+ d.a.update({_id: 4}, {$inc: {x: 1}});
+
+ // try building an index. however, be careful as object id's in system.indexes would vary, so
+ // we do it manually:
+ d.system.indexes.insert({_id: 99, ns: "test.a", key: {x: 1}, name: "x_1"});
log("endwork");
return d;
}
@@ -72,12 +75,12 @@ function addRows() {
log("add rows " + rand);
var d = conn.getDB("test");
for (var j = 0; j < rand; ++j) {
- d.rows.insert({a:1, b: "blah"});
+ d.rows.insert({a: 1, b: "blah"});
}
return rand;
}
-function verify() {
+function verify() {
log("verify");
var d = conn.getDB("test");
assert.eq(d.foo.count(), 2, "collection count is wrong");
@@ -90,7 +93,7 @@ function verifyRows(nrows) {
assert.eq(d.rows.count(), nrows, "collection count is wrong");
}
-if( debugging ) {
+if (debugging) {
// mongod already running in debugger
conn = db.getMongo();
work();
@@ -101,8 +104,8 @@ if( debugging ) {
log();
// directories
-var path1 = MongoRunner.dataPath + testname+"nodur";
-var path2 = MongoRunner.dataPath + testname+"dur";
+var path1 = MongoRunner.dataPath + testname + "nodur";
+var path2 = MongoRunner.dataPath + testname + "dur";
// non-durable version
log("starting first mongod");
@@ -111,38 +114,40 @@ work();
MongoRunner.stopMongod(conn);
// hail mary for windows
-// Sat Jun 11 14:07:57 Error: boost::filesystem::create_directory: Access is denied: "\data\db\manyRestartsdur" (anon):1
+// Sat Jun 11 14:07:57 Error: boost::filesystem::create_directory: Access is denied:
+// "\data\db\manyRestartsdur" (anon):1
sleep(1000);
log("starting second mongod");
conn = MongoRunner.runMongod({dbpath: path2, journal: "", smallfiles: "", journalOptions: 8});
work();
// wait for group commit.
-printjson(conn.getDB('admin').runCommand({getlasterror:1, fsync:1}));
+printjson(conn.getDB('admin').runCommand({getlasterror: 1, fsync: 1}));
MongoRunner.stopMongod(conn);
sleep(5000);
for (var i = 0; i < 3; ++i) {
-
// durable version
log("restarting second mongod");
- conn = MongoRunner.runMongod({restart: true,
- cleanData: false,
- dbpath: path2,
- journal: "",
- smallfiles: "",
- journalOptions: 8});
+ conn = MongoRunner.runMongod({
+ restart: true,
+ cleanData: false,
+ dbpath: path2,
+ journal: "",
+ smallfiles: "",
+ journalOptions: 8
+ });
// wait for group commit.
- printjson(conn.getDB('admin').runCommand({getlasterror:1, fsync:1}));
-
+ printjson(conn.getDB('admin').runCommand({getlasterror: 1, fsync: 1}));
+
verify();
-
+
// kill the process hard
log("hard kill");
- MongoRunner.stopMongod(conn, /*signal*/9);
-
+ MongoRunner.stopMongod(conn, /*signal*/ 9);
+
sleep(5000);
}
@@ -150,12 +155,14 @@ for (var i = 0; i < 3; ++i) {
// restart and recover
log("restart");
-conn = MongoRunner.runMongod({restart: true,
- cleanData: false,
- dbpath: path2,
- journal: "",
- smallfiles: "",
- journalOptions: 8});
+conn = MongoRunner.runMongod({
+ restart: true,
+ cleanData: false,
+ dbpath: path2,
+ journal: "",
+ smallfiles: "",
+ journalOptions: 8
+});
log("verify");
verify();
log("stop");
@@ -179,27 +186,27 @@ log("check data matches done");
Random.setRandomSeed();
var nrows = 0;
for (var i = 0; i < 5; ++i) {
-
// durable version
log("restarting second mongod");
- conn = MongoRunner.runMongod({restart: true,
- cleanData: false,
- dbpath: path2,
- journal: "",
- smallfiles: "",
- journalOptions: 8});
+ conn = MongoRunner.runMongod({
+ restart: true,
+ cleanData: false,
+ dbpath: path2,
+ journal: "",
+ smallfiles: "",
+ journalOptions: 8
+ });
nrows += addRows();
// wait for group commit.
- printjson(conn.getDB('admin').runCommand({getlasterror:1, fsync:1}));
-
+ printjson(conn.getDB('admin').runCommand({getlasterror: 1, fsync: 1}));
+
verifyRows(nrows);
-
+
// kill the process hard
log("hard kill");
- MongoRunner.stopMongod(conn, /*signal*/9);
-
+ MongoRunner.stopMongod(conn, /*signal*/ 9);
+
sleep(5000);
}
print(testname + " SUCCESS");
-
diff --git a/jstests/dur/oplog.js b/jstests/dur/oplog.js
index 56cbac28d71..be0bdd17948 100755..100644
--- a/jstests/dur/oplog.js
+++ b/jstests/dur/oplog.js
@@ -3,23 +3,25 @@
var debugging = false;
var testname = "oplog";
var step = 1;
-var conn = null;
-
-function checkNoJournalFiles(path, pass) {
- var files = listFiles(path);
- if (files.some(function (f) { return f.name.indexOf("prealloc") < 0; })) {
- if (pass == null) {
- // wait a bit longer for mongod to potentially finish if it is still running.
- sleep(10000);
- return checkNoJournalFiles(path, 1);
- }
- print("\n\n\n");
- print("FAIL path:" + path);
- print("unexpected files:");
- printjson(files);
- assert(false, "FAIL a journal/lsn file is present which is unexpected");
- }
-}
+var conn = null;
+
+function checkNoJournalFiles(path, pass) {
+ var files = listFiles(path);
+ if (files.some(function(f) {
+ return f.name.indexOf("prealloc") < 0;
+ })) {
+ if (pass == null) {
+ // wait a bit longer for mongod to potentially finish if it is still running.
+ sleep(10000);
+ return checkNoJournalFiles(path, 1);
+ }
+ print("\n\n\n");
+ print("FAIL path:" + path);
+ print("unexpected files:");
+ printjson(files);
+ assert(false, "FAIL a journal/lsn file is present which is unexpected");
+ }
+}
function runDiff(a, b) {
function reSlash(s) {
@@ -42,33 +44,33 @@ function runDiff(a, b) {
function log(str) {
print();
- if(str)
- print(testname+" step " + step++ + " " + str);
+ if (str)
+ print(testname + " step " + step++ + " " + str);
else
- print(testname+" step " + step++);
+ print(testname + " step " + step++);
}
function verify() {
log("verify");
var d = conn.getDB("local");
- var mycount = d.oplog.$main.find({ "o.z": 3 }).count();
+ var mycount = d.oplog.$main.find({"o.z": 3}).count();
print(mycount);
assert(mycount == 3, "oplog doesnt match");
}
-// if you do inserts here, you will want to set _id. otherwise they won't match on different
+// if you do inserts here, you will want to set _id. otherwise they won't match on different
// runs so we can't do a binary diff of the resulting files to check they are consistent.
function work() {
log("work");
var d = conn.getDB("test");
- var q = conn.getDB("testq"); // use tewo db's to exercise JDbContext a bit.
- d.foo.insert({ _id: 3, x: 22 });
- d.foo.insert({ _id: 4, x: 22 });
- q.foo.insert({ _id: 4, x: 22 });
- d.a.insert({ _id: 3, x: 22, y: [1, 2, 3] });
- q.a.insert({ _id: 3, x: 22, y: [1, 2, 3] });
- d.a.insert({ _id: 4, x: 22, y: [1, 2, 3] });
- d.a.update({ _id: 4 }, { $inc: { x: 1} });
+ var q = conn.getDB("testq"); // use tewo db's to exercise JDbContext a bit.
+ d.foo.insert({_id: 3, x: 22});
+ d.foo.insert({_id: 4, x: 22});
+ q.foo.insert({_id: 4, x: 22});
+ d.a.insert({_id: 3, x: 22, y: [1, 2, 3]});
+ q.a.insert({_id: 3, x: 22, y: [1, 2, 3]});
+ d.a.insert({_id: 4, x: 22, y: [1, 2, 3]});
+ d.a.update({_id: 4}, {$inc: {x: 1}});
// OpCode_ObjCopy fires on larger operations so make one that isn't tiny
var big = "axxxxxxxxxxxxxxb";
big = big + big;
@@ -76,19 +78,20 @@ function work() {
big = big + big;
big = big + big;
big = big + big;
- d.foo.insert({ _id: 5, q: "aaaaa", b: big, z: 3 });
- q.foo.insert({ _id: 5, q: "aaaaa", b: big, z: 3 });
- d.foo.insert({ _id: 6, q: "aaaaa", b: big, z: 3 });
- d.foo.update({ _id: 5 }, { $set: { z: 99} });
+ d.foo.insert({_id: 5, q: "aaaaa", b: big, z: 3});
+ q.foo.insert({_id: 5, q: "aaaaa", b: big, z: 3});
+ d.foo.insert({_id: 6, q: "aaaaa", b: big, z: 3});
+ d.foo.update({_id: 5}, {$set: {z: 99}});
log("endwork");
verify();
}
-if( debugging ) {
+if (debugging) {
// mongod already running in debugger
- print("DOING DEBUG MODE BEHAVIOR AS 'db' IS DEFINED -- RUN mongo --nodb FOR REGULAR TEST BEHAVIOR");
+ print(
+ "DOING DEBUG MODE BEHAVIOR AS 'db' IS DEFINED -- RUN mongo --nodb FOR REGULAR TEST BEHAVIOR");
conn = db.getMongo();
work();
sleep(30000);
@@ -98,43 +101,48 @@ if( debugging ) {
log();
// directories
-var path1 = MongoRunner.dataPath + testname+"nodur";
-var path2 = MongoRunner.dataPath + testname+"dur";
+var path1 = MongoRunner.dataPath + testname + "nodur";
+var path2 = MongoRunner.dataPath + testname + "dur";
// non-durable version
log();
-conn = MongoRunner.runMongod({dbpath: path1, nojournal: "", smallfiles: "", master: "", oplogSize: 64});
+conn = MongoRunner.runMongod(
+ {dbpath: path1, nojournal: "", smallfiles: "", master: "", oplogSize: 64});
work();
MongoRunner.stopMongod(conn);
// durable version
log();
-conn = MongoRunner.runMongod({dbpath: path2,
- journal: "",
- smallfiles: "",
- journalOptions: 8 /*DurParanoid*/,
- master: "",
- oplogSize: 64});
+conn = MongoRunner.runMongod({
+ dbpath: path2,
+ journal: "",
+ smallfiles: "",
+ journalOptions: 8 /*DurParanoid*/,
+ master: "",
+ oplogSize: 64
+});
work();
// wait for group commit.
-printjson(conn.getDB('admin').runCommand({getlasterror:1, fsync:1}));
+printjson(conn.getDB('admin').runCommand({getlasterror: 1, fsync: 1}));
// kill the process hard
-MongoRunner.stopMongod(conn, /*signal*/9);
+MongoRunner.stopMongod(conn, /*signal*/ 9);
// journal file should be present, and non-empty as we killed hard
// restart and recover
log();
-conn = MongoRunner.runMongod({restart: true,
- cleanData: false,
- dbpath: path2,
- journal: "",
- smallfiles: "",
- journalOptions: 8,
- master: "",
- oplogSize: 64});
+conn = MongoRunner.runMongod({
+ restart: true,
+ cleanData: false,
+ dbpath: path2,
+ journal: "",
+ smallfiles: "",
+ journalOptions: 8,
+ master: "",
+ oplogSize: 64
+});
verify();
log("stop");
@@ -144,7 +152,7 @@ MongoRunner.stopMongod(conn);
sleep(5000);
// at this point, after clean shutdown, there should be no journal files
-log("check no journal files");
+log("check no journal files");
checkNoJournalFiles(path2 + "/journal");
log("check data matches ns");
diff --git a/jstests/fail_point/fail_point.js b/jstests/fail_point/fail_point.js
index 7a3ba663c0e..ccf5ac4c16f 100644
--- a/jstests/fail_point/fail_point.js
+++ b/jstests/fail_point/fail_point.js
@@ -27,36 +27,36 @@ var runTest = function(adminDB) {
}
};
- expectedFPState(adminDB.runCommand({ configureFailPoint: 'dummy' }), 0, {});
+ expectedFPState(adminDB.runCommand({configureFailPoint: 'dummy'}), 0, {});
// Test non-existing fail point
- assert.commandFailed(adminDB.runCommand({ configureFailPoint: 'fpNotExist',
- mode: 'alwaysOn', data: { x: 1 }}));
+ assert.commandFailed(
+ adminDB.runCommand({configureFailPoint: 'fpNotExist', mode: 'alwaysOn', data: {x: 1}}));
// Test bad mode string
- assert.commandFailed(adminDB.runCommand({ configureFailPoint: 'dummy',
- mode: 'madMode', data: { x: 1 }}));
- expectedFPState(adminDB.runCommand({ configureFailPoint: 'dummy' }), 0, {});
+ assert.commandFailed(
+ adminDB.runCommand({configureFailPoint: 'dummy', mode: 'madMode', data: {x: 1}}));
+ expectedFPState(adminDB.runCommand({configureFailPoint: 'dummy'}), 0, {});
// Test bad mode obj
- assert.commandFailed(adminDB.runCommand({ configureFailPoint: 'dummy',
- mode: { foo: 3 }, data: { x: 1 }}));
- expectedFPState(adminDB.runCommand({ configureFailPoint: 'dummy' }), 0, {});
+ assert.commandFailed(
+ adminDB.runCommand({configureFailPoint: 'dummy', mode: {foo: 3}, data: {x: 1}}));
+ expectedFPState(adminDB.runCommand({configureFailPoint: 'dummy'}), 0, {});
// Test bad mode type
- assert.commandFailed(adminDB.runCommand({ configureFailPoint: 'dummy',
- mode: true, data: { x: 1 }}));
- expectedFPState(adminDB.runCommand({ configureFailPoint: 'dummy' }), 0, {});
+ assert.commandFailed(
+ adminDB.runCommand({configureFailPoint: 'dummy', mode: true, data: {x: 1}}));
+ expectedFPState(adminDB.runCommand({configureFailPoint: 'dummy'}), 0, {});
// Test bad data type
- assert.commandFailed(adminDB.runCommand({ configureFailPoint: 'dummy',
- mode: 'alwaysOn', data: 'data'}));
- expectedFPState(adminDB.runCommand({ configureFailPoint: 'dummy' }), 0, {});
+ assert.commandFailed(
+ adminDB.runCommand({configureFailPoint: 'dummy', mode: 'alwaysOn', data: 'data'}));
+ expectedFPState(adminDB.runCommand({configureFailPoint: 'dummy'}), 0, {});
// Test good command w/ data
- assert.commandWorked(adminDB.runCommand({ configureFailPoint: 'dummy',
- mode: 'alwaysOn', data: { x: 1 }}));
- expectedFPState(adminDB.runCommand({ configureFailPoint: 'dummy' }), 1, { x: 1 });
+ assert.commandWorked(
+ adminDB.runCommand({configureFailPoint: 'dummy', mode: 'alwaysOn', data: {x: 1}}));
+ expectedFPState(adminDB.runCommand({configureFailPoint: 'dummy'}), 1, {x: 1});
};
var conn = MongoRunner.runMongod();
@@ -65,7 +65,6 @@ MongoRunner.stopMongod(conn.port);
///////////////////////////////////////////////////////////
// Test mongos
-var st = new ShardingTest({ shards: 1 });
+var st = new ShardingTest({shards: 1});
runTest(st.s.getDB('admin'));
st.stop();
-
diff --git a/jstests/gle/block2.js b/jstests/gle/block2.js
index 142d51519b2..2d185ca0426 100644
--- a/jstests/gle/block2.js
+++ b/jstests/gle/block2.js
@@ -3,56 +3,51 @@
* available at jstests/repl.
*/
-var rt = new ReplTest( "block1" );
+var rt = new ReplTest("block1");
-var m = rt.start( true );
-var s = rt.start( false );
+var m = rt.start(true);
+var s = rt.start(false);
if (m.writeMode() == 'commands') {
jsTest.log('Skipping test since commands mode is already tested in repl/');
-}
-else {
-
- function setup(){
-
- dbm = m.getDB( "foo" );
- dbs = s.getDB( "foo" );
+} else {
+ function setup() {
+ dbm = m.getDB("foo");
+ dbs = s.getDB("foo");
tm = dbm.bar;
ts = dbs.bar;
}
setup();
- function check( msg ){
- assert.eq( tm.count() , ts.count() , "check: " + msg );
+ function check(msg) {
+ assert.eq(tm.count(), ts.count(), "check: " + msg);
}
- function worked( w , wtimeout ){
- var gle = dbm.getLastError( w , wtimeout );
+ function worked(w, wtimeout) {
+ var gle = dbm.getLastError(w, wtimeout);
if (gle != null) {
printjson(gle);
}
return gle == null;
}
- check( "A" );
+ check("A");
- tm.save( { x : 1 } );
- assert( worked( 2 ) , "B" );
+ tm.save({x: 1});
+ assert(worked(2), "B");
- tm.save( { x : 2 } );
- assert( worked( 2 , 3000 ) , "C" );
+ tm.save({x: 2});
+ assert(worked(2, 3000), "C");
- rt.stop( false );
- tm.save( { x : 3 } );
- assert.eq( 3 , tm.count() , "D1" );
- assert( ! worked( 2 , 3000 ) , "D2" );
+ rt.stop(false);
+ tm.save({x: 3});
+ assert.eq(3, tm.count(), "D1");
+ assert(!worked(2, 3000), "D2");
- s = rt.start( false );
+ s = rt.start(false);
setup();
- assert( worked( 2 , 30000 ) , "E" );
-
+ assert(worked(2, 30000), "E");
}
rt.stop();
-
diff --git a/jstests/gle/core/error1.js b/jstests/gle/core/error1.js
index b29986976c4..44667867d4c 100644
--- a/jstests/gle/core/error1.js
+++ b/jstests/gle/core/error1.js
@@ -1,41 +1,40 @@
db.jstests_error1.drop();
// test 1
-db.runCommand({reseterror:1});
-assert( db.runCommand({getlasterror:1}).err == null, "A" );
-assert( db.runCommand({getpreverror:1}).err == null, "B" );
+db.runCommand({reseterror: 1});
+assert(db.runCommand({getlasterror: 1}).err == null, "A");
+assert(db.runCommand({getpreverror: 1}).err == null, "B");
db.resetError();
-assert( db.getLastError() == null, "C" );
-assert( db.getPrevError().err == null , "preverror 1" );
+assert(db.getLastError() == null, "C");
+assert(db.getPrevError().err == null, "preverror 1");
// test 2
-db.runCommand({forceerror:1});
-assert( db.runCommand({getlasterror:1}).err != null, "D" );
-assert( db.runCommand({getpreverror:1}).err != null, "E" );
+db.runCommand({forceerror: 1});
+assert(db.runCommand({getlasterror: 1}).err != null, "D");
+assert(db.runCommand({getpreverror: 1}).err != null, "E");
-
-assert( db.getLastError() != null, "F" );
-assert( db.getPrevError().err != null , "preverror 2" );
-assert( db.getPrevError().nPrev == 1, "G" );
+assert(db.getLastError() != null, "F");
+assert(db.getPrevError().err != null, "preverror 2");
+assert(db.getPrevError().nPrev == 1, "G");
db.jstests_error1.findOne();
-assert( db.runCommand({getlasterror:1}).err == null, "H" );
-assert( db.runCommand({getpreverror:1}).err != null, "I" );
-assert( db.runCommand({getpreverror:1}).nPrev == 2, "J" );
+assert(db.runCommand({getlasterror: 1}).err == null, "H");
+assert(db.runCommand({getpreverror: 1}).err != null, "I");
+assert(db.runCommand({getpreverror: 1}).nPrev == 2, "J");
db.jstests_error1.findOne();
-assert( db.runCommand({getlasterror:1}).err == null, "K" );
-assert( db.runCommand({getpreverror:1}).err != null, "L" );
-assert( db.runCommand({getpreverror:1}).nPrev == 3, "M" );
+assert(db.runCommand({getlasterror: 1}).err == null, "K");
+assert(db.runCommand({getpreverror: 1}).err != null, "L");
+assert(db.runCommand({getpreverror: 1}).nPrev == 3, "M");
db.resetError();
db.forceError();
db.jstests_error1.findOne();
-assert( db.getLastError() == null , "getLastError 5" );
-assert( db.getPrevError().err != null , "preverror 3" );
+assert(db.getLastError() == null, "getLastError 5");
+assert(db.getPrevError().err != null, "preverror 3");
// test 3
-db.runCommand({reseterror:1});
-assert( db.runCommand({getpreverror:1}).err == null, "N" );
+db.runCommand({reseterror: 1});
+assert(db.runCommand({getpreverror: 1}).err == null, "N");
diff --git a/jstests/gle/core/error3.js b/jstests/gle/core/error3.js
index 9f7f298cb5e..7067f68f8a8 100644
--- a/jstests/gle/core/error3.js
+++ b/jstests/gle/core/error3.js
@@ -1,5 +1,5 @@
-db.runCommand( "forceerror" );
-assert.eq( "forced error" , db.getLastError() );
-db.runCommand( "switchtoclienterrors" );
-assert.isnull( db.getLastError() );
+db.runCommand("forceerror");
+assert.eq("forced error", db.getLastError());
+db.runCommand("switchtoclienterrors");
+assert.isnull(db.getLastError());
diff --git a/jstests/gle/core/gle_example.js b/jstests/gle/core/gle_example.js
index 8c6e481b5a7..6096f605217 100644
--- a/jstests/gle/core/gle_example.js
+++ b/jstests/gle/core/gle_example.js
@@ -5,13 +5,13 @@
var coll = db.getCollection("gle_example");
coll.drop();
-coll.insert({ hello : "world" });
-assert.eq( null, coll.getDB().getLastError() );
+coll.insert({hello: "world"});
+assert.eq(null, coll.getDB().getLastError());
// Error on insert.
coll.drop();
-coll.insert({ _id: 1 });
-coll.insert({ _id: 1 });
+coll.insert({_id: 1});
+coll.insert({_id: 1});
var gle = db.getLastErrorObj();
assert.neq(null, gle.err);
@@ -22,8 +22,7 @@ assert.eq(null, gle.err);
// Error on upsert.
coll.drop();
-coll.insert({ _id: 1 });
-coll.update({ y: 1 }, { _id: 1 }, true);
+coll.insert({_id: 1});
+coll.update({y: 1}, {_id: 1}, true);
gle = db.getLastErrorObj();
assert.neq(null, gle.err);
-
diff --git a/jstests/gle/core/gle_shell_server5441.js b/jstests/gle/core/gle_shell_server5441.js
index dedb135f20a..e488c289299 100644
--- a/jstests/gle/core/gle_shell_server5441.js
+++ b/jstests/gle/core/gle_shell_server5441.js
@@ -12,16 +12,16 @@ function checkgle(iteration) {
assert.eq(2, gle.n, "failed on iteration " + iteration + ", getLastErrorObj()=" + tojson(gle));
}
-t.insert( { x : 1 } );
-t.insert( { x : 1 } );
-updateReturn = t.update( {} , { $inc : { x : 2 } } , false , true );
+t.insert({x: 1});
+t.insert({x: 1});
+updateReturn = t.update({}, {$inc: {x: 2}}, false, true);
-for ( i=0; i<100; i++ ) {
- checkgle(""+i);
+for (i = 0; i < 100; i++) {
+ checkgle("" + i);
}
-db.adminCommand( { replSetGetStatus : 1 , forShell : 1 } );
-shellPrintHelper( updateReturn );
+db.adminCommand({replSetGetStatus: 1, forShell: 1});
+shellPrintHelper(updateReturn);
defaultPrompt();
checkgle("'final'");
diff --git a/jstests/gle/core/remove5.js b/jstests/gle/core/remove5.js
index 6558854264f..4a9393ea202 100644
--- a/jstests/gle/core/remove5.js
+++ b/jstests/gle/core/remove5.js
@@ -2,23 +2,23 @@ f = db.jstests_remove5;
f.drop();
getLastError = function() {
- return db.runCommand( { getlasterror : 1 } );
+ return db.runCommand({getlasterror: 1});
};
-f.remove( {} );
-assert.eq( 0, getLastError().n );
-f.save( {a:1} );
-f.remove( {} );
-assert.eq( 1, getLastError().n );
-for( i = 0; i < 10; ++i ) {
- f.save( {i:i} );
+f.remove({});
+assert.eq(0, getLastError().n);
+f.save({a: 1});
+f.remove({});
+assert.eq(1, getLastError().n);
+for (i = 0; i < 10; ++i) {
+ f.save({i: i});
}
-f.remove( {} );
-assert.eq( 10, getLastError().n );
-assert.eq( 10, db.getPrevError().n );
-assert.eq( 1, db.getPrevError().nPrev );
+f.remove({});
+assert.eq(10, getLastError().n);
+assert.eq(10, db.getPrevError().n);
+assert.eq(1, db.getPrevError().nPrev);
f.findOne();
-assert.eq( 0, getLastError().n );
-assert.eq( 10, db.getPrevError().n );
-assert.eq( 2, db.getPrevError().nPrev );
+assert.eq(0, getLastError().n);
+assert.eq(10, db.getPrevError().n);
+assert.eq(2, db.getPrevError().nPrev);
diff --git a/jstests/gle/core/update4.js b/jstests/gle/core/update4.js
index 3d68dc24916..83dbf717019 100644
--- a/jstests/gle/core/update4.js
+++ b/jstests/gle/core/update4.js
@@ -2,32 +2,32 @@ f = db.jstests_update4;
f.drop();
getLastError = function() {
- ret = db.runCommand( { getlasterror : 1 } );
-// printjson( ret );
+ ret = db.runCommand({getlasterror: 1});
+ // printjson( ret );
return ret;
};
-f.save( {a:1} );
-f.update( {a:1}, {a:2} );
-assert.eq( true, getLastError().updatedExisting , "A" );
-assert.eq( 1, getLastError().n , "B" );
-f.update( {a:1}, {a:2} );
-assert.eq( false, getLastError().updatedExisting , "C" );
-assert.eq( 0, getLastError().n , "D" );
+f.save({a: 1});
+f.update({a: 1}, {a: 2});
+assert.eq(true, getLastError().updatedExisting, "A");
+assert.eq(1, getLastError().n, "B");
+f.update({a: 1}, {a: 2});
+assert.eq(false, getLastError().updatedExisting, "C");
+assert.eq(0, getLastError().n, "D");
-f.update( {a:1}, {a:1}, true );
-assert.eq( false, getLastError().updatedExisting , "E" );
-assert.eq( 1, getLastError().n , "F" );
-f.update( {a:1}, {a:1}, true );
-assert.eq( true, getLastError().updatedExisting , "G" );
-assert.eq( 1, getLastError().n , "H" );
-assert.eq( true, db.getPrevError().updatedExisting , "I" );
-assert.eq( 1, db.getPrevError().nPrev , "J" );
+f.update({a: 1}, {a: 1}, true);
+assert.eq(false, getLastError().updatedExisting, "E");
+assert.eq(1, getLastError().n, "F");
+f.update({a: 1}, {a: 1}, true);
+assert.eq(true, getLastError().updatedExisting, "G");
+assert.eq(1, getLastError().n, "H");
+assert.eq(true, db.getPrevError().updatedExisting, "I");
+assert.eq(1, db.getPrevError().nPrev, "J");
f.findOne();
-assert.eq( undefined, getLastError().updatedExisting , "K" );
-assert.eq( true, db.getPrevError().updatedExisting , "L" );
-assert.eq( 2, db.getPrevError().nPrev , "M" );
+assert.eq(undefined, getLastError().updatedExisting, "K");
+assert.eq(true, db.getPrevError().updatedExisting, "L");
+assert.eq(2, db.getPrevError().nPrev, "M");
db.forceError();
-assert.eq( undefined, getLastError().updatedExisting , "N" );
+assert.eq(undefined, getLastError().updatedExisting, "N");
diff --git a/jstests/gle/create_index_gle.js b/jstests/gle/create_index_gle.js
index 18732fb50fa..ce3dcb58c03 100644
--- a/jstests/gle/create_index_gle.js
+++ b/jstests/gle/create_index_gle.js
@@ -1,48 +1,48 @@
load('jstests/replsets/rslib.js');
-(function () {
-"use strict";
-
-var st = new ShardingTest({
- shards: {
- rs0: {
- nodes: { n0: {}, n1: { rsConfig: { priority: 0 } } },
- oplogSize: 10,
- }
- },
-});
-var replTest = st.rs0;
-
-var config = replTest.getReplSetConfig();
-// Add a delay long enough so getLastError would actually 'wait' for write concern.
-config.members[1].slaveDelay = 3;
-config.version = 2;
-
-reconfig(replTest, config, true);
-
-assert.soon(function() {
- var secConn = replTest.getSecondary();
- var config = secConn.getDB('local').system.replset.findOne();
- return config.members[1].slaveDelay == 3;
-});
+(function() {
+ "use strict";
+
+ var st = new ShardingTest({
+ shards: {
+ rs0: {
+ nodes: {n0: {}, n1: {rsConfig: {priority: 0}}},
+ oplogSize: 10,
+ }
+ },
+ });
+ var replTest = st.rs0;
+
+ var config = replTest.getReplSetConfig();
+ // Add a delay long enough so getLastError would actually 'wait' for write concern.
+ config.members[1].slaveDelay = 3;
+ config.version = 2;
-replTest.awaitSecondaryNodes();
+ reconfig(replTest, config, true);
-var testDB = st.s.getDB('test');
-testDB.adminCommand({ connPoolSync: 1 });
+ assert.soon(function() {
+ var secConn = replTest.getSecondary();
+ var config = secConn.getDB('local').system.replset.findOne();
+ return config.members[1].slaveDelay == 3;
+ });
-var secConn = replTest.getSecondary();
-var testDB2 = secConn.getDB('test');
+ replTest.awaitSecondaryNodes();
+
+ var testDB = st.s.getDB('test');
+ testDB.adminCommand({connPoolSync: 1});
+
+ var secConn = replTest.getSecondary();
+ var testDB2 = secConn.getDB('test');
-testDB.user.insert({ x: 1 });
+ testDB.user.insert({x: 1});
-testDB.user.ensureIndex({ x: 1 });
-assert.gleOK(testDB.runCommand({ getLastError: 1, w: 2 }));
+ testDB.user.ensureIndex({x: 1});
+ assert.gleOK(testDB.runCommand({getLastError: 1, w: 2}));
-var priIdx = testDB.user.getIndexes();
-var secIdx = testDB2.user.getIndexes();
+ var priIdx = testDB.user.getIndexes();
+ var secIdx = testDB2.user.getIndexes();
-assert.eq(priIdx.length, secIdx.length, 'pri: ' + tojson(priIdx) + ', sec: ' + tojson(secIdx));
+ assert.eq(priIdx.length, secIdx.length, 'pri: ' + tojson(priIdx) + ', sec: ' + tojson(secIdx));
-st.stop();
+ st.stop();
}());
diff --git a/jstests/gle/get_last_error.js b/jstests/gle/get_last_error.js
index 3b5d6368c61..cc356ebf539 100644
--- a/jstests/gle/get_last_error.js
+++ b/jstests/gle/get_last_error.js
@@ -1,17 +1,17 @@
// Check that the wtime and writtenTo fields are set or unset depending on the writeConcern used.
// First check on a replica set with different combinations of writeConcern
var name = "SERVER-9005";
-var replTest = new ReplSetTest({name: name, oplogSize: 1, nodes: 3,
- settings: {chainingAllowed: false}});
+var replTest =
+ new ReplSetTest({name: name, oplogSize: 1, nodes: 3, settings: {chainingAllowed: false}});
var nodes = replTest.startSet();
replTest.initiate();
var master = replTest.getPrimary();
var mdb = master.getDB("test");
// synchronize replication
-assert.writeOK(mdb.foo.insert({ _id: "1" }, {writeConcern: {w: 3, wtimeout:30000}}));
+assert.writeOK(mdb.foo.insert({_id: "1"}, {writeConcern: {w: 3, wtimeout: 30000}}));
-var gle = master.getDB("test").runCommand({getLastError : 1, j : true});
+var gle = master.getDB("test").runCommand({getLastError: 1, j: true});
print('Trying j=true');
printjson(gle);
if (gle.err === null) {
@@ -20,8 +20,7 @@ if (gle.err === null) {
assert.eq(gle.waited, null);
assert.eq(gle.wtime, null);
assert.eq(gle.wtimeout, null);
-}
-else {
+} else {
// Bad GLE is a permissible error here, if journaling is disabled.
assert(gle.badGLE);
assert.eq(gle.code, 2);
@@ -52,7 +51,7 @@ replTest.stop(2);
master = replTest.getPrimary();
mdb = master.getDB("test");
// do w:2 write so secondary is caught up before calling {gle w:3}.
-assert.writeOK(mdb.foo.insert({_id: "3"}, {writeConcern: {w: 2, wtimeout:30000}}));
+assert.writeOK(mdb.foo.insert({_id: "3"}, {writeConcern: {w: 2, wtimeout: 30000}}));
gle = mdb.getLastErrorObj(3, 1000);
print('Trying w=3 with 2 nodes up, 1000ms timeout.');
printjson(gle);
@@ -83,7 +82,7 @@ var mongod = MongoRunner.runMongod({});
var sdb = mongod.getDB("test");
sdb.foo.drop();
-sdb.foo.insert({ _id: "1" });
+sdb.foo.insert({_id: "1"});
gle = sdb.getLastErrorObj(1);
print('Trying standalone server with w=1.');
@@ -95,7 +94,7 @@ assert.eq(gle.wtime, null);
assert.eq(gle.waited, null);
assert.eq(gle.wtimeout, null);
-gle = sdb.runCommand({getLastError : 1, w : 2, wtimeout : 10 });
+gle = sdb.runCommand({getLastError: 1, w: 2, wtimeout: 10});
print('Trying standalone server with w=2 and 10ms timeout.');
// This is an error in 2.6
printjson(gle);
diff --git a/jstests/gle/gle_explicit_optime.js b/jstests/gle/gle_explicit_optime.js
index 7bc9e35b3a9..476409c57b4 100644
--- a/jstests/gle/gle_explicit_optime.js
+++ b/jstests/gle/gle_explicit_optime.js
@@ -5,53 +5,62 @@
// support the command.
// @tags: [requires_fsync]
-var rst = new ReplSetTest({ nodes : 2 });
+var rst = new ReplSetTest({nodes: 2});
rst.startSet();
rst.initiate();
var primary = rst.getPrimary();
var secondary = rst.getSecondary();
-var coll = primary.getCollection( "foo.bar" );
+var coll = primary.getCollection("foo.bar");
// Insert a doc and replicate it to two servers
-coll.insert({ some : "doc" });
-var gleObj = coll.getDB().getLastErrorObj( 2 ); // w : 2
-assert.eq( null, gleObj.err );
+coll.insert({some: "doc"});
+var gleObj = coll.getDB().getLastErrorObj(2); // w : 2
+assert.eq(null, gleObj.err);
var opTimeBeforeFailure = gleObj.lastOp;
// Lock the secondary
assert.commandWorked(secondary.getDB("admin").fsyncLock());
// Insert a doc and replicate it to the primary only
-coll.insert({ some : "doc" });
-gleObj = coll.getDB().getLastErrorObj( 1 ); // w : 1
-assert.eq( null, gleObj.err );
+coll.insert({some: "doc"});
+gleObj = coll.getDB().getLastErrorObj(1); // w : 1
+assert.eq(null, gleObj.err);
var opTimeAfterFailure = gleObj.lastOp;
printjson(opTimeBeforeFailure);
printjson(opTimeAfterFailure);
-printjson( primary.getDB("admin").runCommand({ replSetGetStatus : true }) );
+printjson(primary.getDB("admin").runCommand({replSetGetStatus: true}));
// Create a new connection with new client and no opTime
-var newClientConn = new Mongo( primary.host );
+var newClientConn = new Mongo(primary.host);
// New client has no set opTime, so w : 2 has no impact
-gleObj = newClientConn.getCollection( coll.toString() ).getDB().getLastErrorObj( 2 ); // w : 2
-assert.eq( null, gleObj.err );
+gleObj = newClientConn.getCollection(coll.toString()).getDB().getLastErrorObj(2); // w : 2
+assert.eq(null, gleObj.err);
// Using an explicit optime on the new client should work if the optime is earlier than the
// secondary was locked
-var gleOpTimeBefore = { getLastError : true, w : 2, wOpTime : opTimeBeforeFailure };
-gleObj = newClientConn.getCollection( coll.toString() ).getDB().runCommand( gleOpTimeBefore );
-assert.eq( null, gleObj.err );
+var gleOpTimeBefore = {
+ getLastError: true,
+ w: 2,
+ wOpTime: opTimeBeforeFailure
+};
+gleObj = newClientConn.getCollection(coll.toString()).getDB().runCommand(gleOpTimeBefore);
+assert.eq(null, gleObj.err);
// Using an explicit optime on the new client should not work if the optime is later than the
// secondary was locked
-var gleOpTimeAfter = { getLastError : true, w : 2, wtimeout : 1000, wOpTime : opTimeAfterFailure };
-gleObj = newClientConn.getCollection( coll.toString() ).getDB().runCommand( gleOpTimeAfter );
-assert.neq( null, gleObj.err );
-assert( gleObj.wtimeout );
+var gleOpTimeAfter = {
+ getLastError: true,
+ w: 2,
+ wtimeout: 1000,
+ wOpTime: opTimeAfterFailure
+};
+gleObj = newClientConn.getCollection(coll.toString()).getDB().runCommand(gleOpTimeAfter);
+assert.neq(null, gleObj.err);
+assert(gleObj.wtimeout);
jsTest.log("DONE!");
diff --git a/jstests/gle/gle_sharded_wc.js b/jstests/gle/gle_sharded_wc.js
index 71883b12c0f..ba7594588e3 100644
--- a/jstests/gle/gle_sharded_wc.js
+++ b/jstests/gle/gle_sharded_wc.js
@@ -6,129 +6,131 @@
// @tags: [SERVER-21420]
(function() {
-'use strict';
-
-// Options for a cluster with two replica set shards, the first with two nodes the second with one
-// This lets us try a number of GLE scenarios
-var options = { rs : true,
- rsOptions : { nojournal : "" },
- // Options for each replica set shard
- rs0 : { nodes : 3 },
- rs1 : { nodes : 3 } };
-
-var st = new ShardingTest({ shards: 2, other : options });
-
-var mongos = st.s0;
-var admin = mongos.getDB( "admin" );
-var config = mongos.getDB( "config" );
-var coll = mongos.getCollection( jsTestName() + ".coll" );
-var shards = config.shards.find().toArray();
-
-assert.commandWorked( admin.runCommand({ enableSharding : coll.getDB().toString() }) );
-printjson( admin.runCommand({ movePrimary : coll.getDB().toString(), to : shards[0]._id }) );
-assert.commandWorked( admin.runCommand({ shardCollection : coll.toString(), key : { _id : 1 } }) );
-assert.commandWorked( admin.runCommand({ split : coll.toString(), middle : { _id : 0 } }) );
-assert.commandWorked( admin.runCommand({ moveChunk : coll.toString(),
- find : { _id : 0 },
- to : shards[1]._id }) );
-
-st.printShardingStatus();
-
-var gle = null;
-
-//
-// No journal insert, GLE fails
-coll.remove({});
-coll.insert({ _id : 1 });
-printjson(gle = coll.getDB().runCommand({ getLastError : 1, j : true }));
-assert(!gle.ok);
-assert(gle.errmsg);
-
-//
-// Successful insert, write concern mode invalid
-coll.remove({});
-coll.insert({ _id : -1 });
-printjson(gle = coll.getDB().runCommand({ getLastError : 1, w : 'invalid' }));
-assert(!gle.ok);
-assert(!gle.err);
-assert(gle.errmsg);
-assert.eq(gle.code, 79); // UnknownReplWriteConcern - needed for backwards compatibility
-assert.eq(coll.count(), 1);
-
-//
-// Error on insert (dup key), write concern error not reported
-coll.remove({});
-coll.insert({ _id : -1 });
-coll.insert({ _id : -1 });
-printjson(gle = coll.getDB().runCommand({ getLastError : 1, w : 'invalid' }));
-assert(gle.ok);
-assert(gle.err);
-assert(gle.code);
-assert(!gle.errmsg);
-assert.eq(coll.count(), 1);
-
-//
-// Successful remove on one shard, write concern timeout on the other
-var s0Id = st.rs0.getNodeId(st.rs0.liveNodes.slaves[0]);
-st.rs0.stop(s0Id);
-coll.remove({});
-st.rs1.awaitReplication(); // To ensure the first shard won't timeout
-printjson(gle = coll.getDB().runCommand({ getLastError : 1, w : 3, wtimeout : 5 * 1000 }));
-assert(gle.ok);
-assert.eq(gle.err, 'timeout');
-assert(gle.wtimeout);
-assert(gle.shards);
-assert.eq(coll.count(), 0);
-
-//
-// Successful remove on two hosts, write concern timeout on both
-// We don't aggregate two timeouts together
-var s1Id = st.rs1.getNodeId(st.rs1.liveNodes.slaves[0]);
-st.rs1.stop(s1Id);
-// new writes to both shards to ensure that remove will do something on both of them
-coll.insert({ _id : -1 });
-coll.insert({ _id : 1 });
-
-coll.remove({});
-printjson(gle = coll.getDB().runCommand({ getLastError : 1, w : 3, wtimeout : 5 * 1000 }));
-
-assert(!gle.ok);
-assert(gle.errmsg);
-assert.eq(gle.code, 64); // WriteConcernFailed - needed for backwards compatibility
-assert(!gle.wtimeout);
-assert(gle.shards);
-assert(gle.errs);
-assert.eq(coll.count(), 0);
-
-//
-// First replica set with no primary
-//
-
-//
-// Successful bulk insert on two hosts, host changes before gle (error contacting host)
-coll.remove({});
-coll.insert([{ _id : 1 }, { _id : -1 }]);
-// Wait for write to be written to shards before shutting it down.
-printjson(gle = coll.getDB().runCommand({ getLastError : 1 }));
-st.rs0.stop(st.rs0.getPrimary(), true); // wait for stop
-printjson(gle = coll.getDB().runCommand({ getLastError : 1 }));
-// Should get an error about contacting dead host.
-assert(!gle.ok);
-assert(gle.errmsg);
-assert.eq(coll.count({ _id : 1 }), 1);
-
-//
-// Failed insert on two hosts, first replica set with no primary
-// NOTE: This is DIFFERENT from 2.4, since we don't need to contact a host we didn't get
-// successful writes from.
-coll.remove({ _id : 1 });
-coll.insert([{ _id : 1 }, { _id : -1 }]);
-
-printjson(gle = coll.getDB().runCommand({ getLastError : 1 }));
-assert(gle.ok);
-assert(gle.err);
-assert.eq(coll.count({ _id : 1 }), 1);
-
-st.stop();
+ 'use strict';
+
+ // Options for a cluster with two replica set shards, the first with two nodes the second with
+ // one
+ // This lets us try a number of GLE scenarios
+ var options = {
+ rs: true,
+ rsOptions: {nojournal: ""},
+ // Options for each replica set shard
+ rs0: {nodes: 3},
+ rs1: {nodes: 3}
+ };
+
+ var st = new ShardingTest({shards: 2, other: options});
+
+ var mongos = st.s0;
+ var admin = mongos.getDB("admin");
+ var config = mongos.getDB("config");
+ var coll = mongos.getCollection(jsTestName() + ".coll");
+ var shards = config.shards.find().toArray();
+
+ assert.commandWorked(admin.runCommand({enableSharding: coll.getDB().toString()}));
+ printjson(admin.runCommand({movePrimary: coll.getDB().toString(), to: shards[0]._id}));
+ assert.commandWorked(admin.runCommand({shardCollection: coll.toString(), key: {_id: 1}}));
+ assert.commandWorked(admin.runCommand({split: coll.toString(), middle: {_id: 0}}));
+ assert.commandWorked(
+ admin.runCommand({moveChunk: coll.toString(), find: {_id: 0}, to: shards[1]._id}));
+
+ st.printShardingStatus();
+
+ var gle = null;
+
+ //
+ // No journal insert, GLE fails
+ coll.remove({});
+ coll.insert({_id: 1});
+ printjson(gle = coll.getDB().runCommand({getLastError: 1, j: true}));
+ assert(!gle.ok);
+ assert(gle.errmsg);
+
+ //
+ // Successful insert, write concern mode invalid
+ coll.remove({});
+ coll.insert({_id: -1});
+ printjson(gle = coll.getDB().runCommand({getLastError: 1, w: 'invalid'}));
+ assert(!gle.ok);
+ assert(!gle.err);
+ assert(gle.errmsg);
+ assert.eq(gle.code, 79); // UnknownReplWriteConcern - needed for backwards compatibility
+ assert.eq(coll.count(), 1);
+
+ //
+ // Error on insert (dup key), write concern error not reported
+ coll.remove({});
+ coll.insert({_id: -1});
+ coll.insert({_id: -1});
+ printjson(gle = coll.getDB().runCommand({getLastError: 1, w: 'invalid'}));
+ assert(gle.ok);
+ assert(gle.err);
+ assert(gle.code);
+ assert(!gle.errmsg);
+ assert.eq(coll.count(), 1);
+
+ //
+ // Successful remove on one shard, write concern timeout on the other
+ var s0Id = st.rs0.getNodeId(st.rs0.liveNodes.slaves[0]);
+ st.rs0.stop(s0Id);
+ coll.remove({});
+ st.rs1.awaitReplication(); // To ensure the first shard won't timeout
+ printjson(gle = coll.getDB().runCommand({getLastError: 1, w: 3, wtimeout: 5 * 1000}));
+ assert(gle.ok);
+ assert.eq(gle.err, 'timeout');
+ assert(gle.wtimeout);
+ assert(gle.shards);
+ assert.eq(coll.count(), 0);
+
+ //
+ // Successful remove on two hosts, write concern timeout on both
+ // We don't aggregate two timeouts together
+ var s1Id = st.rs1.getNodeId(st.rs1.liveNodes.slaves[0]);
+ st.rs1.stop(s1Id);
+ // new writes to both shards to ensure that remove will do something on both of them
+ coll.insert({_id: -1});
+ coll.insert({_id: 1});
+
+ coll.remove({});
+ printjson(gle = coll.getDB().runCommand({getLastError: 1, w: 3, wtimeout: 5 * 1000}));
+
+ assert(!gle.ok);
+ assert(gle.errmsg);
+ assert.eq(gle.code, 64); // WriteConcernFailed - needed for backwards compatibility
+ assert(!gle.wtimeout);
+ assert(gle.shards);
+ assert(gle.errs);
+ assert.eq(coll.count(), 0);
+
+ //
+ // First replica set with no primary
+ //
+
+ //
+ // Successful bulk insert on two hosts, host changes before gle (error contacting host)
+ coll.remove({});
+ coll.insert([{_id: 1}, {_id: -1}]);
+ // Wait for write to be written to shards before shutting it down.
+ printjson(gle = coll.getDB().runCommand({getLastError: 1}));
+ st.rs0.stop(st.rs0.getPrimary(), true); // wait for stop
+ printjson(gle = coll.getDB().runCommand({getLastError: 1}));
+ // Should get an error about contacting dead host.
+ assert(!gle.ok);
+ assert(gle.errmsg);
+ assert.eq(coll.count({_id: 1}), 1);
+
+ //
+ // Failed insert on two hosts, first replica set with no primary
+ // NOTE: This is DIFFERENT from 2.4, since we don't need to contact a host we didn't get
+ // successful writes from.
+ coll.remove({_id: 1});
+ coll.insert([{_id: 1}, {_id: -1}]);
+
+ printjson(gle = coll.getDB().runCommand({getLastError: 1}));
+ assert(gle.ok);
+ assert(gle.err);
+ assert.eq(coll.count({_id: 1}), 1);
+
+ st.stop();
})();
diff --git a/jstests/gle/gle_sharded_write.js b/jstests/gle/gle_sharded_write.js
index cfe3c8f1ad2..f1feffed5b2 100644
--- a/jstests/gle/gle_sharded_write.js
+++ b/jstests/gle/gle_sharded_write.js
@@ -3,22 +3,21 @@
// Note that test should work correctly with and without write commands.
//
-var st = new ShardingTest({ shards : 2, mongos : 1 });
+var st = new ShardingTest({shards: 2, mongos: 1});
st.stopBalancer();
var mongos = st.s0;
-var admin = mongos.getDB( "admin" );
-var config = mongos.getDB( "config" );
-var coll = mongos.getCollection( jsTestName() + ".coll" );
+var admin = mongos.getDB("admin");
+var config = mongos.getDB("config");
+var coll = mongos.getCollection(jsTestName() + ".coll");
var shards = config.shards.find().toArray();
-assert.commandWorked( admin.runCommand({ enableSharding : coll.getDB().toString() }) );
-printjson( admin.runCommand({ movePrimary : coll.getDB().toString(), to : shards[0]._id }) );
-assert.commandWorked( admin.runCommand({ shardCollection : coll.toString(), key : { _id : 1 } }) );
-assert.commandWorked( admin.runCommand({ split : coll.toString(), middle : { _id : 0 } }) );
-assert.commandWorked( admin.runCommand({ moveChunk : coll.toString(),
- find : { _id : 0 },
- to : shards[1]._id }) );
+assert.commandWorked(admin.runCommand({enableSharding: coll.getDB().toString()}));
+printjson(admin.runCommand({movePrimary: coll.getDB().toString(), to: shards[0]._id}));
+assert.commandWorked(admin.runCommand({shardCollection: coll.toString(), key: {_id: 1}}));
+assert.commandWorked(admin.runCommand({split: coll.toString(), middle: {_id: 0}}));
+assert.commandWorked(
+ admin.runCommand({moveChunk: coll.toString(), find: {_id: 0}, to: shards[1]._id}));
st.printShardingStatus();
@@ -27,8 +26,8 @@ var gle = null;
//
// Successful insert
coll.remove({});
-coll.insert({ _id : -1 });
-printjson(gle = coll.getDB().runCommand({ getLastError : 1 }));
+coll.insert({_id: -1});
+printjson(gle = coll.getDB().runCommand({getLastError: 1}));
assert(gle.ok);
assert('err' in gle);
assert(!gle.err);
@@ -37,9 +36,9 @@ assert.eq(coll.count(), 1);
//
// Successful update
coll.remove({});
-coll.insert({ _id : 1 });
-coll.update({ _id : 1 }, { $set : { foo : "bar" } });
-printjson(gle = coll.getDB().runCommand({ getLastError : 1 }));
+coll.insert({_id: 1});
+coll.update({_id: 1}, {$set: {foo: "bar"}});
+printjson(gle = coll.getDB().runCommand({getLastError: 1}));
assert(gle.ok);
assert('err' in gle);
assert(!gle.err);
@@ -50,9 +49,9 @@ assert.eq(coll.count(), 1);
//
// Successful multi-update
coll.remove({});
-coll.insert({ _id : 1 });
-coll.update({ }, { $set : { foo : "bar" } }, false, true);
-printjson(gle = coll.getDB().runCommand({ getLastError : 1 }));
+coll.insert({_id: 1});
+coll.update({}, {$set: {foo: "bar"}}, false, true);
+printjson(gle = coll.getDB().runCommand({getLastError: 1}));
assert(gle.ok);
assert('err' in gle);
assert(!gle.err);
@@ -63,8 +62,8 @@ assert.eq(coll.count(), 1);
//
// Successful upsert
coll.remove({});
-coll.update({ _id : 1 }, { _id : 1 }, true);
-printjson(gle = coll.getDB().runCommand({ getLastError : 1 }));
+coll.update({_id: 1}, {_id: 1}, true);
+printjson(gle = coll.getDB().runCommand({getLastError: 1}));
assert(gle.ok);
assert('err' in gle);
assert(!gle.err);
@@ -76,9 +75,9 @@ assert.eq(coll.count(), 1);
//
// Successful upserts
coll.remove({});
-coll.update({ _id : -1 }, { _id : -1 }, true);
-coll.update({ _id : 1 }, { _id : 1 }, true);
-printjson(gle = coll.getDB().runCommand({ getLastError : 1 }));
+coll.update({_id: -1}, {_id: -1}, true);
+coll.update({_id: 1}, {_id: 1}, true);
+printjson(gle = coll.getDB().runCommand({getLastError: 1}));
assert(gle.ok);
assert('err' in gle);
assert(!gle.err);
@@ -90,9 +89,9 @@ assert.eq(coll.count(), 2);
//
// Successful remove
coll.remove({});
-coll.insert({ _id : 1 });
-coll.remove({ _id : 1 });
-printjson(gle = coll.getDB().runCommand({ getLastError : 1 }));
+coll.insert({_id: 1});
+coll.remove({_id: 1});
+printjson(gle = coll.getDB().runCommand({getLastError: 1}));
assert(gle.ok);
assert('err' in gle);
assert(!gle.err);
@@ -102,8 +101,8 @@ assert.eq(coll.count(), 0);
//
// Error on one host during update
coll.remove({});
-coll.update({ _id : 1 }, { $invalid : "xxx" }, true);
-printjson(gle = coll.getDB().runCommand({ getLastError : 1 }));
+coll.update({_id: 1}, {$invalid: "xxx"}, true);
+printjson(gle = coll.getDB().runCommand({getLastError: 1}));
assert(gle.ok);
assert(gle.err);
assert(gle.code);
@@ -114,8 +113,8 @@ assert.eq(coll.count(), 0);
//
// Error on two hosts during remove
coll.remove({});
-coll.remove({ $invalid : 'remove' });
-printjson(gle = coll.getDB().runCommand({ getLastError : 1 }));
+coll.remove({$invalid: 'remove'});
+printjson(gle = coll.getDB().runCommand({getLastError: 1}));
assert(gle.ok);
assert(gle.err);
assert(gle.code);
@@ -126,14 +125,14 @@ assert.eq(coll.count(), 0);
//
// Repeated calls to GLE should work
coll.remove({});
-coll.update({ _id : 1 }, { $invalid : "xxx" }, true);
-printjson(gle = coll.getDB().runCommand({ getLastError : 1 }));
+coll.update({_id: 1}, {$invalid: "xxx"}, true);
+printjson(gle = coll.getDB().runCommand({getLastError: 1}));
assert(gle.ok);
assert(gle.err);
assert(gle.code);
assert(!gle.errmsg);
assert(gle.singleShard);
-printjson(gle = coll.getDB().runCommand({ getLastError : 1 }));
+printjson(gle = coll.getDB().runCommand({getLastError: 1}));
assert(gle.ok);
assert(gle.err);
assert(gle.code);
@@ -143,21 +142,18 @@ assert.eq(coll.count(), 0);
//
// Geo $near is not supported on mongos
-coll.ensureIndex( { loc: "2dsphere" } );
+coll.ensureIndex({loc: "2dsphere"});
coll.remove({});
var query = {
- loc : {
- $near : {
- $geometry : {
- type : "Point" ,
- coordinates : [ 0 , 0 ]
- },
- $maxDistance : 1000,
+ loc: {
+ $near: {
+ $geometry: {type: "Point", coordinates: [0, 0]},
+ $maxDistance: 1000,
+ }
}
- }
};
printjson(coll.remove(query));
-printjson(gle = coll.getDB().runCommand({ getLastError : 1 }));
+printjson(gle = coll.getDB().runCommand({getLastError: 1}));
assert(gle.ok);
assert(gle.err);
assert(gle.code);
@@ -172,11 +168,11 @@ assert.eq(coll.count(), 0);
//
// Successful bulk insert on two hosts, host dies before gle (error contacting host)
coll.remove({});
-coll.insert([{ _id : 1 }, { _id : -1 }]);
+coll.insert([{_id: 1}, {_id: -1}]);
// Wait for write to be written to shards before shutting it down.
-printjson(gle = coll.getDB().runCommand({ getLastError : 1 }));
-MongoRunner.stopMongod( st.shard0 );
-printjson(gle = coll.getDB().runCommand({ getLastError : 1 }));
+printjson(gle = coll.getDB().runCommand({getLastError: 1}));
+MongoRunner.stopMongod(st.shard0);
+printjson(gle = coll.getDB().runCommand({getLastError: 1}));
// Should get an error about contacting dead host.
assert(!gle.ok);
assert(gle.errmsg);
@@ -185,14 +181,13 @@ assert(gle.errmsg);
// Failed insert on two hosts, first host dead
// NOTE: This is DIFFERENT from 2.4, since we don't need to contact a host we didn't get
// successful writes from.
-coll.remove({ _id : 1 });
-coll.insert([{ _id : 1 }, { _id : -1 }]);
-printjson(gle = coll.getDB().runCommand({ getLastError : 1 }));
+coll.remove({_id: 1});
+coll.insert([{_id: 1}, {_id: -1}]);
+printjson(gle = coll.getDB().runCommand({getLastError: 1}));
assert(gle.ok);
assert(gle.err);
-assert.eq(coll.count({ _id : 1 }), 1);
+assert.eq(coll.count({_id: 1}), 1);
jsTest.log("DONE!");
st.stop();
-
diff --git a/jstests/gle/opcounters_legacy.js b/jstests/gle/opcounters_legacy.js
index 7f267fbe155..c31494b6c01 100644
--- a/jstests/gle/opcounters_legacy.js
+++ b/jstests/gle/opcounters_legacy.js
@@ -4,7 +4,9 @@
// Remember the global 'db' var
var lastDB = db;
var mongo = new Mongo(db.getMongo().host);
-mongo.writeMode = function() { return "legacy"; };
+mongo.writeMode = function() {
+ return "legacy";
+};
db = mongo.getDB(db.toString());
var t = db.opcounters;
@@ -29,33 +31,33 @@ t.drop();
// Single insert, no error.
opCounters = db.serverStatus().opcounters;
-t.insert({_id:0});
+t.insert({_id: 0});
assert(!db.getLastError());
assert.eq(opCounters.insert + 1, db.serverStatus().opcounters.insert);
// Bulk insert, no error.
opCounters = db.serverStatus().opcounters;
-t.insert([{_id:1},{_id:2}]);
+t.insert([{_id: 1}, {_id: 2}]);
assert(!db.getLastError());
assert.eq(opCounters.insert + 2, db.serverStatus().opcounters.insert);
// Single insert, with error.
opCounters = db.serverStatus().opcounters;
-t.insert({_id:0});
-print( db.getLastError() );
+t.insert({_id: 0});
+print(db.getLastError());
assert(db.getLastError());
assert.eq(opCounters.insert + (isMongos ? 1 : 0), db.serverStatus().opcounters.insert);
// Bulk insert, with error, continueOnError=false.
opCounters = db.serverStatus().opcounters;
-t.insert([{_id:3},{_id:3},{_id:4}]);
+t.insert([{_id: 3}, {_id: 3}, {_id: 4}]);
assert(db.getLastError());
assert.eq(opCounters.insert + (isMongos ? 2 : 1), db.serverStatus().opcounters.insert);
// Bulk insert, with error, continueOnError=true.
var continueOnErrorFlag = 1;
opCounters = db.serverStatus().opcounters;
-t.insert([{_id:5},{_id:5},{_id:6}], continueOnErrorFlag);
+t.insert([{_id: 5}, {_id: 5}, {_id: 6}], continueOnErrorFlag);
assert(db.getLastError());
assert.eq(opCounters.insert + 2, db.serverStatus().opcounters.insert);
@@ -66,17 +68,17 @@ assert.eq(opCounters.insert + 2, db.serverStatus().opcounters.insert);
//
t.drop();
-t.insert({_id:0});
+t.insert({_id: 0});
// Update, no error.
opCounters = db.serverStatus().opcounters;
-t.update({_id:0}, {$set:{a:1}});
+t.update({_id: 0}, {$set: {a: 1}});
assert(!db.getLastError());
assert.eq(opCounters.update + 1, db.serverStatus().opcounters.update);
// Update, with error.
opCounters = db.serverStatus().opcounters;
-t.update({_id:0}, {$set:{_id:1}});
+t.update({_id: 0}, {$set: {_id: 1}});
assert(db.getLastError());
assert.eq(opCounters.update + 1, db.serverStatus().opcounters.update);
@@ -87,17 +89,17 @@ assert.eq(opCounters.update + 1, db.serverStatus().opcounters.update);
//
t.drop();
-t.insert([{_id:0},{_id:1}]);
+t.insert([{_id: 0}, {_id: 1}]);
// Delete, no error.
opCounters = db.serverStatus().opcounters;
-t.remove({_id:0});
+t.remove({_id: 0});
assert(!db.getLastError());
assert.eq(opCounters.delete + 1, db.serverStatus().opcounters.delete);
// Delete, with error.
opCounters = db.serverStatus().opcounters;
-t.remove({_id:{$invalidOp:1}});
+t.remove({_id: {$invalidOp: 1}});
assert(db.getLastError());
assert.eq(opCounters.delete + 1, db.serverStatus().opcounters.delete);
@@ -109,7 +111,7 @@ assert.eq(opCounters.delete + 1, db.serverStatus().opcounters.delete);
//
t.drop();
-t.insert({_id:0});
+t.insert({_id: 0});
// Query, no error.
opCounters = db.serverStatus().opcounters;
@@ -118,7 +120,9 @@ assert.eq(opCounters.query + 1, db.serverStatus().opcounters.query);
// Query, with error.
opCounters = db.serverStatus().opcounters;
-assert.throws(function() { t.findOne({_id:{$invalidOp:1}}); });
+assert.throws(function() {
+ t.findOne({_id: {$invalidOp: 1}});
+});
assert.eq(opCounters.query + (isMongos ? 0 : 1), db.serverStatus().opcounters.query);
//
@@ -128,11 +132,11 @@ assert.eq(opCounters.query + (isMongos ? 0 : 1), db.serverStatus().opcounters.qu
//
t.drop();
-t.insert([{_id:0},{_id:1},{_id:2}]);
+t.insert([{_id: 0}, {_id: 1}, {_id: 2}]);
// Getmore, no error.
opCounters = db.serverStatus().opcounters;
-t.find().batchSize(2).toArray(); // 3 documents, batchSize=2 => 1 query + 1 getmore
+t.find().batchSize(2).toArray(); // 3 documents, batchSize=2 => 1 query + 1 getmore
assert.eq(opCounters.query + 1, db.serverStatus().opcounters.query);
assert.eq(opCounters.getmore + 1, db.serverStatus().opcounters.getmore);
@@ -147,47 +151,50 @@ assert.eq(opCounters.getmore + 1, db.serverStatus().opcounters.getmore);
//
t.drop();
-t.insert({_id:0});
+t.insert({_id: 0});
// Command, recognized, no error.
-serverStatus = db.runCommand({serverStatus:1});
+serverStatus = db.runCommand({serverStatus: 1});
opCounters = serverStatus.opcounters;
metricsObj = serverStatus.metrics.commands;
-assert.eq(opCounters.command + 1, db.serverStatus().opcounters.command); // "serverStatus" counted
+assert.eq(opCounters.command + 1, db.serverStatus().opcounters.command); // "serverStatus" counted
// Count this and the last run of "serverStatus"
-assert.eq( metricsObj.serverStatus.total + 2,
- db.serverStatus().metrics.commands.serverStatus.total,
- "total ServerStatus command counter did not increment" );
-assert.eq( metricsObj.serverStatus.failed,
- db.serverStatus().metrics.commands.serverStatus.failed,
- "failed ServerStatus command counter incremented!" );
+assert.eq(metricsObj.serverStatus.total + 2,
+ db.serverStatus().metrics.commands.serverStatus.total,
+ "total ServerStatus command counter did not increment");
+assert.eq(metricsObj.serverStatus.failed,
+ db.serverStatus().metrics.commands.serverStatus.failed,
+ "failed ServerStatus command counter incremented!");
// Command, recognized, with error.
-serverStatus = db.runCommand({serverStatus:1});
+serverStatus = db.runCommand({serverStatus: 1});
opCounters = serverStatus.opcounters;
metricsObj = serverStatus.metrics.commands;
-var countVal = { "total" : 0, "failed" : 0 };
-if (metricsObj.count != null){
+var countVal = {
+ "total": 0,
+ "failed": 0
+};
+if (metricsObj.count != null) {
countVal = metricsObj.count;
}
-res = t.runCommand("count", {query:{$invalidOp:1}});
+res = t.runCommand("count", {query: {$invalidOp: 1}});
assert.eq(0, res.ok);
assert.eq(opCounters.command + 2,
- db.serverStatus().opcounters.command); // "serverStatus", "count" counted
+ db.serverStatus().opcounters.command); // "serverStatus", "count" counted
-assert.eq( countVal.total +1,
- db.serverStatus().metrics.commands.count.total,
- "total count command counter did not incremented" );
-assert.eq( countVal.failed + 1,
- db.serverStatus().metrics.commands.count.failed,
- "failed count command counter did not increment" );
+assert.eq(countVal.total + 1,
+ db.serverStatus().metrics.commands.count.total,
+ "total count command counter did not incremented");
+assert.eq(countVal.failed + 1,
+ db.serverStatus().metrics.commands.count.failed,
+ "failed count command counter did not increment");
// Command, unrecognized.
-serverStatus = db.runCommand({serverStatus:1});
+serverStatus = db.runCommand({serverStatus: 1});
opCounters = serverStatus.opcounters;
metricsObj = serverStatus.metrics.commands;
res = t.runCommand("invalid");
assert.eq(0, res.ok);
-assert.eq(opCounters.command + 1, db.serverStatus().opcounters.command); // "serverStatus" counted
+assert.eq(opCounters.command + 1, db.serverStatus().opcounters.command); // "serverStatus" counted
assert.eq(null, db.serverStatus().metrics.commands.invalid);
-assert.eq(metricsObj['<UNKNOWN>'] +1, db.serverStatus().metrics.commands['<UNKNOWN>']);
+assert.eq(metricsObj['<UNKNOWN>'] + 1, db.serverStatus().metrics.commands['<UNKNOWN>']);
diff --git a/jstests/gle/sync1.js b/jstests/gle/sync1.js
index d370c53f949..1cdd27530a0 100644
--- a/jstests/gle/sync1.js
+++ b/jstests/gle/sync1.js
@@ -4,58 +4,62 @@
// A restarted standalone will lose all data when using an ephemeral storage engine.
// @tags: [requires_persistence]
-var test = new SyncCCTest( "sync1" );
+var test = new SyncCCTest("sync1");
if (test.conn.writeMode() == 'commands') {
jsTest.log('Skipping test not compatible with write commands');
-}
-else {
-
- db = test.conn.getDB( "test" );
+} else {
+ db = test.conn.getDB("test");
t = db.sync1;
- t.save( { x : 1 } );
- assert.eq( 1 , t.find().itcount() , "A1" );
- assert.eq( 1 , t.find().count() , "A2" );
- t.save( { x : 2 } );
- assert.eq( 2 , t.find().itcount() , "A3" );
- assert.eq( 2 , t.find().count() , "A4" );
+ t.save({x: 1});
+ assert.eq(1, t.find().itcount(), "A1");
+ assert.eq(1, t.find().count(), "A2");
+ t.save({x: 2});
+ assert.eq(2, t.find().itcount(), "A3");
+ assert.eq(2, t.find().count(), "A4");
- test.checkHashes( "test" , "A3" );
+ test.checkHashes("test", "A3");
test.tempKill();
- assert.throws( function(){ t.save( { x : 3 } ); } , null , "B1" );
+ assert.throws(function() {
+ t.save({x: 3});
+ }, null, "B1");
// It's ok even for some of the mongod to be unreachable for read-only cmd
- assert.eq( 2, t.find({}).count() );
+ assert.eq(2, t.find({}).count());
// It's NOT ok for some of the mongod to be unreachable for write cmd
- assert.throws( function(){ t.getDB().runCommand({ profile: 1 }); });
- assert.eq( 2 , t.find().itcount() , "B2" );
+ assert.throws(function() {
+ t.getDB().runCommand({profile: 1});
+ });
+ assert.eq(2, t.find().itcount(), "B2");
test.tempStart();
- test.checkHashes( "test" , "B3" );
+ test.checkHashes("test", "B3");
// Trying killing the second mongod
- test.tempKill( 1 );
- assert.throws( function(){ t.save( { x : 3 } ); } );
+ test.tempKill(1);
+ assert.throws(function() {
+ t.save({x: 3});
+ });
// It's ok even for some of the mongod to be unreachable for read-only cmd
- assert.eq( 2, t.find({}).count() );
+ assert.eq(2, t.find({}).count());
// It's NOT ok for some of the mongod to be unreachable for write cmd
- assert.throws( function(){ t.getDB().runCommand({ profile: 1 }); });
- assert.eq( 2 , t.find().itcount() );
- test.tempStart( 1 );
+ assert.throws(function() {
+ t.getDB().runCommand({profile: 1});
+ });
+ assert.eq(2, t.find().itcount());
+ test.tempStart(1);
- assert.eq( 2 , t.find().itcount() , "C1" );
- assert.soon( function(){
- try {
- t.remove( { x : 1 } );
+ assert.eq(2, t.find().itcount(), "C1");
+ assert.soon(function() {
+ try {
+ t.remove({x: 1});
return true;
- }
- catch ( e ){
- print( e );
+ } catch (e) {
+ print(e);
}
return false;
- } );
- t.find().forEach( printjson );
- assert.eq( 1 , t.find().itcount() , "C2" );
+ });
+ t.find().forEach(printjson);
+ assert.eq(1, t.find().itcount(), "C2");
test.stop();
-
}
diff --git a/jstests/gle/sync4.js b/jstests/gle/sync4.js
index a33f9b8a132..01b98eb1221 100644
--- a/jstests/gle/sync4.js
+++ b/jstests/gle/sync4.js
@@ -1,26 +1,22 @@
// TODO: remove test after we deprecate SyncClusterConnection
-test = new SyncCCTest( "sync4" );
+test = new SyncCCTest("sync4");
if (test.conn.writeMode() == 'commands') {
jsTest.log('Skipping test not compatible with write commands');
-}
-else {
-
- db = test.conn.getDB( "test" );
+} else {
+ db = test.conn.getDB("test");
t = db.sync4;
- for ( i=0; i<1000; i++ ){
- t.insert( { _id : i , x : "asdasdsdasdas" } );
+ for (i = 0; i < 1000; i++) {
+ t.insert({_id: i, x: "asdasdsdasdas"});
}
db.getLastError();
- test.checkHashes( "test" , "A0" );
- assert.eq( 1000 , t.find().count() , "A1" );
- assert.eq( 1000 , t.find().itcount() , "A2" );
- assert.eq( 1000 , t.find().snapshot().batchSize(10).itcount() , "A2" );
+ test.checkHashes("test", "A0");
+ assert.eq(1000, t.find().count(), "A1");
+ assert.eq(1000, t.find().itcount(), "A2");
+ assert.eq(1000, t.find().snapshot().batchSize(10).itcount(), "A2");
test.stop();
-
}
-
diff --git a/jstests/gle/sync8.js b/jstests/gle/sync8.js
index 81404785ac3..f2a32ce418c 100644
--- a/jstests/gle/sync8.js
+++ b/jstests/gle/sync8.js
@@ -2,20 +2,18 @@
// Test for SERVER-11492 - make sure that upserting a new document reports n:1 in GLE
-var test = new SyncCCTest( "sync1" );
+var test = new SyncCCTest("sync1");
if (test.conn.writeMode() == 'commands') {
jsTest.log('Skipping test not compatible with write commands');
-}
-else {
- var db = test.conn.getDB( "test" );
+} else {
+ var db = test.conn.getDB("test");
var t = db.sync8;
t.remove({});
- t.update({_id:1}, {$set:{a:1}}, true);
+ t.update({_id: 1}, {$set: {a: 1}}, true);
var le = db.getLastErrorObj();
assert.eq(1, le.n);
test.stop();
-
}
diff --git a/jstests/gle/updated_existing.js b/jstests/gle/updated_existing.js
index 5e9891ccf85..ff485530e35 100644
--- a/jstests/gle/updated_existing.js
+++ b/jstests/gle/updated_existing.js
@@ -3,26 +3,24 @@
* an upsert is not missing when autosplit takes place.
*/
-var st = new ShardingTest({ shards : 1, mongos : 1, verbose : 1, chunkSize: 1 });
+var st = new ShardingTest({shards: 1, mongos: 1, verbose: 1, chunkSize: 1});
var testDB = st.getDB("test");
var coll = "foo";
testDB[coll].drop();
-st.adminCommand({ enablesharding : 'test' });
-st.adminCommand({ shardcollection : 'test.' + coll, key : { "shardkey2" : 1, "shardkey1" : 1 } });
+st.adminCommand({enablesharding: 'test'});
+st.adminCommand({shardcollection: 'test.' + coll, key: {"shardkey2": 1, "shardkey1": 1}});
var bigString = "";
-while ( bigString.length < 1024 * 50 )
+while (bigString.length < 1024 * 50)
bigString += "asocsancdnsjfnsdnfsjdhfasdfasdfasdfnsadofnsadlkfnsaldknfsad";
-
-
for (var i = 0; i < 10000; ++i) {
- testDB[coll].update({ "shardkey1" : "test" + i, "shardkey2" : "test" + i },
- { $set : { "test_upsert": bigString } },
- true, // upsert
- false); // multi
+ testDB[coll].update({"shardkey1": "test" + i, "shardkey2": "test" + i},
+ {$set: {"test_upsert": bigString}},
+ true, // upsert
+ false); // multi
assert.eq(testDB.getLastErrorObj().updatedExisting, false);
}
diff --git a/jstests/httpinterface/httpinterface.js b/jstests/httpinterface/httpinterface.js
index 38ca96b6be6..dc21422ff46 100644
--- a/jstests/httpinterface/httpinterface.js
+++ b/jstests/httpinterface/httpinterface.js
@@ -4,21 +4,21 @@ var httpPort = conn.port + 1000;
tryHttp = function() {
try {
- var mongo = new Mongo('localhost:' + httpPort) ;
- }
- catch (e) {
+ var mongo = new Mongo('localhost:' + httpPort);
+ } catch (e) {
return false;
}
// if we managed to start and connect a new mongo then the web interface is working
return true;
};
-assert.throws(function() {assert.soon(tryHttp, "tryHttp failed, like we expected it to");});
+assert.throws(function() {
+ assert.soon(tryHttp, "tryHttp failed, like we expected it to");
+});
MongoRunner.stopMongod(conn);
conn = MongoRunner.runMongod({port: conn.port, smallfiles: "", httpinterface: ""});
-assert.soon(tryHttp,
- "the web interface should be running on " + httpPort);
+assert.soon(tryHttp, "the web interface should be running on " + httpPort);
MongoRunner.stopMongod(conn);
diff --git a/jstests/httpinterface/network_options.js b/jstests/httpinterface/network_options.js
index 33f117f980b..0302c9ac15b 100644
--- a/jstests/httpinterface/network_options.js
+++ b/jstests/httpinterface/network_options.js
@@ -7,284 +7,185 @@ load('jstests/libs/command_line/test_parsed_options.js');
// Object Check
jsTest.log("Testing \"objcheck\" command line option");
var expectedResult = {
- "parsed" : {
- "net" : {
- "wireObjectCheck" : true
- }
- }
+ "parsed": {"net": {"wireObjectCheck": true}}
};
-testGetCmdLineOptsMongod({ objcheck : "" }, expectedResult);
+testGetCmdLineOptsMongod({objcheck: ""}, expectedResult);
jsTest.log("Testing \"noobjcheck\" command line option");
expectedResult = {
- "parsed" : {
- "net" : {
- "wireObjectCheck" : false
- }
- }
+ "parsed": {"net": {"wireObjectCheck": false}}
};
-testGetCmdLineOptsMongod({ noobjcheck : "" }, expectedResult);
+testGetCmdLineOptsMongod({noobjcheck: ""}, expectedResult);
jsTest.log("Testing \"net.wireObjectCheck\" config file option");
expectedResult = {
- "parsed" : {
- "config" : "jstests/libs/config_files/enable_objcheck.json",
- "net" : {
- "wireObjectCheck" : true
- }
+ "parsed": {
+ "config": "jstests/libs/config_files/enable_objcheck.json",
+ "net": {"wireObjectCheck": true}
}
};
-testGetCmdLineOptsMongod({ config : "jstests/libs/config_files/enable_objcheck.json" },
+testGetCmdLineOptsMongod({config: "jstests/libs/config_files/enable_objcheck.json"},
expectedResult);
jsTest.log("Testing with no explicit network option setting");
expectedResult = {
- "parsed" : {
- "net" : { }
- }
+ "parsed": {"net": {}}
};
testGetCmdLineOptsMongod({}, expectedResult);
-
-
// HTTP Interface
jsTest.log("Testing \"httpinterface\" command line option");
var expectedResult = {
- "parsed" : {
- "net" : {
- "http" : {
- "enabled" : true
- }
- }
- }
+ "parsed": {"net": {"http": {"enabled": true}}}
};
-testGetCmdLineOptsMongod({ httpinterface : "" }, expectedResult);
+testGetCmdLineOptsMongod({httpinterface: ""}, expectedResult);
jsTest.log("Testing \"nohttpinterface\" command line option");
expectedResult = {
- "parsed" : {
- "net" : {
- "http" : {
- "enabled" : false
- }
- }
- }
+ "parsed": {"net": {"http": {"enabled": false}}}
};
-testGetCmdLineOptsMongod({ nohttpinterface : "" }, expectedResult);
+testGetCmdLineOptsMongod({nohttpinterface: ""}, expectedResult);
jsTest.log("Testing implicit enabling of http interface with \"jsonp\" command line option");
expectedResult = {
- "parsed" : {
- "net" : {
- "http" : {
- "JSONPEnabled" : true,
- "enabled" : true
- }
- }
- }
+ "parsed": {"net": {"http": {"JSONPEnabled": true, "enabled": true}}}
};
-testGetCmdLineOptsMongod({ jsonp : "" }, expectedResult);
+testGetCmdLineOptsMongod({jsonp: ""}, expectedResult);
jsTest.log("Testing implicit enabling of http interface with \"rest\" command line option");
expectedResult = {
- "parsed" : {
- "net" : {
- "http" : {
- "RESTInterfaceEnabled" : true,
- "enabled" : true
- }
- }
- }
+ "parsed": {"net": {"http": {"RESTInterfaceEnabled": true, "enabled": true}}}
};
-testGetCmdLineOptsMongod({ rest : "" }, expectedResult);
+testGetCmdLineOptsMongod({rest: ""}, expectedResult);
jsTest.log("Testing \"net.http.enabled\" config file option");
expectedResult = {
- "parsed" : {
- "config" : "jstests/libs/config_files/enable_httpinterface.json",
- "net" : {
- "http" : {
- "enabled" : true
- }
- }
+ "parsed": {
+ "config": "jstests/libs/config_files/enable_httpinterface.json",
+ "net": {"http": {"enabled": true}}
}
};
-testGetCmdLineOptsMongod({ config : "jstests/libs/config_files/enable_httpinterface.json" },
+testGetCmdLineOptsMongod({config: "jstests/libs/config_files/enable_httpinterface.json"},
expectedResult);
jsTest.log("Testing with no explicit network option setting");
expectedResult = {
- "parsed" : {
- "net" : { }
- }
+ "parsed": {"net": {}}
};
testGetCmdLineOptsMongod({}, expectedResult);
-
-
// Unix Socket
if (!_isWindows()) {
jsTest.log("Testing \"nounixsocket\" command line option");
expectedResult = {
- "parsed" : {
- "net" : {
- "unixDomainSocket" : {
- "enabled" : false
- }
- }
- }
+ "parsed": {"net": {"unixDomainSocket": {"enabled": false}}}
};
- testGetCmdLineOptsMongod({ nounixsocket : "" }, expectedResult);
+ testGetCmdLineOptsMongod({nounixsocket: ""}, expectedResult);
jsTest.log("Testing \"net.wireObjectCheck\" config file option");
expectedResult = {
- "parsed" : {
- "config" : "jstests/libs/config_files/enable_unixsocket.json",
- "net" : {
- "unixDomainSocket" : {
- "enabled" : true
- }
- }
+ "parsed": {
+ "config": "jstests/libs/config_files/enable_unixsocket.json",
+ "net": {"unixDomainSocket": {"enabled": true}}
}
};
- testGetCmdLineOptsMongod({ config : "jstests/libs/config_files/enable_unixsocket.json" },
- expectedResult);
+ testGetCmdLineOptsMongod({config: "jstests/libs/config_files/enable_unixsocket.json"},
+ expectedResult);
jsTest.log("Testing with no explicit network option setting");
expectedResult = {
- "parsed" : {
- "net" : { }
- }
+ "parsed": {"net": {}}
};
testGetCmdLineOptsMongod({}, expectedResult);
}
-
// Test that we preserve switches explicitly set to false in config files. See SERVER-13439.
jsTest.log("Testing explicitly disabling \"net.http.RESTInterfaceEnabled\" config file option");
expectedResult = {
- "parsed" : {
- "config" : "jstests/libs/config_files/disable_rest_interface.json",
- "net" : {
- "http" : {
- "RESTInterfaceEnabled" : false
- }
- }
+ "parsed": {
+ "config": "jstests/libs/config_files/disable_rest_interface.json",
+ "net": {"http": {"RESTInterfaceEnabled": false}}
}
};
-testGetCmdLineOptsMongod({ config : "jstests/libs/config_files/disable_rest_interface.json" },
+testGetCmdLineOptsMongod({config: "jstests/libs/config_files/disable_rest_interface.json"},
expectedResult);
jsTest.log("Testing explicitly disabling \"net.http.JSONPEnabled\" config file option on mongoD");
expectedResult = {
- "parsed" : {
- "config" : "jstests/libs/config_files/disable_jsonp.json",
- "net" : {
- "http" : {
- "JSONPEnabled" : false
- }
- }
+ "parsed": {
+ "config": "jstests/libs/config_files/disable_jsonp.json",
+ "net": {"http": {"JSONPEnabled": false}}
}
};
-testGetCmdLineOptsMongod({ config : "jstests/libs/config_files/disable_jsonp.json" },
- expectedResult);
+testGetCmdLineOptsMongod({config: "jstests/libs/config_files/disable_jsonp.json"}, expectedResult);
// jsonp on mongos is legacy and not supported in json/yaml config files since this interface is not
// well defined. See SERVER-11707 for an example.
jsTest.log("Testing explicitly disabling \"jsonp\" config file option on mongoS");
expectedResult = {
- "parsed" : {
- "config" : "jstests/libs/config_files/disable_jsonp.ini",
- "net" : {
- "http" : {
- "JSONPEnabled" : false
- }
- }
+ "parsed": {
+ "config": "jstests/libs/config_files/disable_jsonp.ini",
+ "net": {"http": {"JSONPEnabled": false}}
}
};
-testGetCmdLineOptsMongos({ config : "jstests/libs/config_files/disable_jsonp.ini" },
- expectedResult);
+testGetCmdLineOptsMongos({config: "jstests/libs/config_files/disable_jsonp.ini"}, expectedResult);
jsTest.log("Testing explicitly disabled \"objcheck\" config file option");
expectedResult = {
- "parsed" : {
- "config" : "jstests/libs/config_files/disable_objcheck.ini",
- "net" : {
- "wireObjectCheck" : false
- }
+ "parsed": {
+ "config": "jstests/libs/config_files/disable_objcheck.ini",
+ "net": {"wireObjectCheck": false}
}
};
-testGetCmdLineOptsMongod({ config : "jstests/libs/config_files/disable_objcheck.ini" },
+testGetCmdLineOptsMongod({config: "jstests/libs/config_files/disable_objcheck.ini"},
expectedResult);
jsTest.log("Testing explicitly disabled \"noobjcheck\" config file option");
expectedResult = {
- "parsed" : {
- "config" : "jstests/libs/config_files/disable_noobjcheck.ini",
- "net" : {
- "wireObjectCheck" : true
- }
+ "parsed": {
+ "config": "jstests/libs/config_files/disable_noobjcheck.ini",
+ "net": {"wireObjectCheck": true}
}
};
-testGetCmdLineOptsMongod({ config : "jstests/libs/config_files/disable_noobjcheck.ini" },
+testGetCmdLineOptsMongod({config: "jstests/libs/config_files/disable_noobjcheck.ini"},
expectedResult);
jsTest.log("Testing explicitly disabled \"httpinterface\" config file option");
expectedResult = {
- "parsed" : {
- "config" : "jstests/libs/config_files/disable_httpinterface.ini",
- "net" : {
- "http" : {
- "enabled" : false
- }
- }
+ "parsed": {
+ "config": "jstests/libs/config_files/disable_httpinterface.ini",
+ "net": {"http": {"enabled": false}}
}
};
-testGetCmdLineOptsMongod({ config : "jstests/libs/config_files/disable_httpinterface.ini" },
+testGetCmdLineOptsMongod({config: "jstests/libs/config_files/disable_httpinterface.ini"},
expectedResult);
jsTest.log("Testing explicitly disabled \"nohttpinterface\" config file option");
expectedResult = {
- "parsed" : {
- "config" : "jstests/libs/config_files/disable_nohttpinterface.ini",
- "net" : {
- "http" : {
- "enabled" : true
- }
- }
+ "parsed": {
+ "config": "jstests/libs/config_files/disable_nohttpinterface.ini",
+ "net": {"http": {"enabled": true}}
}
};
-testGetCmdLineOptsMongod({ config : "jstests/libs/config_files/disable_nohttpinterface.ini" },
+testGetCmdLineOptsMongod({config: "jstests/libs/config_files/disable_nohttpinterface.ini"},
expectedResult);
jsTest.log("Testing explicitly disabled \"ipv6\" config file option");
expectedResult = {
- "parsed" : {
- "config" : "jstests/libs/config_files/disable_ipv6.ini",
- "net" : {
- "ipv6" : false
- }
- }
+ "parsed": {"config": "jstests/libs/config_files/disable_ipv6.ini", "net": {"ipv6": false}}
};
-testGetCmdLineOptsMongod({ config : "jstests/libs/config_files/disable_ipv6.ini" },
- expectedResult);
+testGetCmdLineOptsMongod({config: "jstests/libs/config_files/disable_ipv6.ini"}, expectedResult);
if (!_isWindows()) {
jsTest.log("Testing explicitly disabled \"nounixsocket\" config file option");
expectedResult = {
- "parsed" : {
- "config" : "jstests/libs/config_files/disable_nounixsocket.ini",
- "net" : {
- "unixDomainSocket" : {
- "enabled" : true
- }
- }
+ "parsed": {
+ "config": "jstests/libs/config_files/disable_nounixsocket.ini",
+ "net": {"unixDomainSocket": {"enabled": true}}
}
};
- testGetCmdLineOptsMongod({ config : "jstests/libs/config_files/disable_nounixsocket.ini" },
+ testGetCmdLineOptsMongod({config: "jstests/libs/config_files/disable_nounixsocket.ini"},
expectedResult);
}
-
-
print(baseName + " succeeded.");
diff --git a/jstests/httpinterface/sharding_configdb_on_default_ports.js b/jstests/httpinterface/sharding_configdb_on_default_ports.js
index 4472a7a515b..4ea1c461304 100644
--- a/jstests/httpinterface/sharding_configdb_on_default_ports.js
+++ b/jstests/httpinterface/sharding_configdb_on_default_ports.js
@@ -19,15 +19,11 @@
// The config servers must support readConcern: majority to be run as a replica set, so
// explicitly set storage engine to wiredTiger.
- c1 = MongoRunner.runMongod({
- configsvr: "",
- port: 27019,
- replSet: "csrs",
- storageEngine: "wiredTiger"
- });
+ c1 = MongoRunner.runMongod(
+ {configsvr: "", port: 27019, replSet: "csrs", storageEngine: "wiredTiger"});
assert.commandWorked(c1.adminCommand("replSetInitiate"));
c2 = MongoRunner.runMongod({configsvr: "", storageEngine: "wiredTiger"});
c3 = MongoRunner.runMongod({configsvr: "", storageEngine: "wiredTiger"});
- assert(MongoRunner.runMongos({configdb: "csrs/" + getHostPart(c1.host) }));
+ assert(MongoRunner.runMongos({configdb: "csrs/" + getHostPart(c1.host)}));
}());
diff --git a/jstests/libs/analyze_plan.js b/jstests/libs/analyze_plan.js
index 12a28671866..e9c9b4c09fa 100644
--- a/jstests/libs/analyze_plan.js
+++ b/jstests/libs/analyze_plan.js
@@ -43,11 +43,10 @@ function getPlanStage(root, stage) {
if (planStageList.length === 0) {
return null;
- }
- else {
+ } else {
assert(planStageList.length === 1,
- "getPlanStage expects to find 0 or 1 matching stages. planStageList: "
- + tojson(planStageList));
+ "getPlanStage expects to find 0 or 1 matching stages. planStageList: " +
+ tojson(planStageList));
return planStageList[0];
}
}
@@ -100,11 +99,9 @@ function isCollscan(root) {
function getChunkSkips(root) {
if (root.stage === "SHARDING_FILTER") {
return root.chunkSkips;
- }
- else if ("inputStage" in root) {
+ } else if ("inputStage" in root) {
return getChunkSkips(root.inputStage);
- }
- else if ("inputStages" in root) {
+ } else if ("inputStages" in root) {
var skips = 0;
for (var i = 0; i < root.inputStages.length; i++) {
skips += getChunkSkips(root.inputStages[0]);
diff --git a/jstests/libs/chunk_manipulation_util.js b/jstests/libs/chunk_manipulation_util.js
index 9408ef7a544..f03adb1714c 100644
--- a/jstests/libs/chunk_manipulation_util.js
+++ b/jstests/libs/chunk_manipulation_util.js
@@ -2,7 +2,7 @@
// Utilities for testing chunk manipulation: moveChunk, mergeChunks, etc.
//
-load( './jstests/libs/test_background_ops.js' );
+load('./jstests/libs/test_background_ops.js');
//
// Start a background moveChunk.
@@ -19,31 +19,18 @@ load( './jstests/libs/test_background_ops.js' );
// Returns a join function; call it to wait for moveChunk to complete.
//
-function moveChunkParallel(
- staticMongod,
- mongosURL,
- findCriteria,
- bounds,
- ns,
- toShardId) {
-
+function moveChunkParallel(staticMongod, mongosURL, findCriteria, bounds, ns, toShardId) {
assert((findCriteria || bounds) && !(findCriteria && bounds),
'Specify either findCriteria or bounds, but not both.');
- function runMoveChunk(
- mongosURL,
- findCriteria,
- bounds,
- ns,
- toShardId) {
-
+ function runMoveChunk(mongosURL, findCriteria, bounds, ns, toShardId) {
assert(mongosURL && ns && toShardId, 'Missing arguments.');
assert((findCriteria || bounds) && !(findCriteria && bounds),
'Specify either findCriteria or bounds, but not both.');
- var mongos = new Mongo( mongosURL ),
- admin = mongos.getDB( 'admin' ),
- cmd = { moveChunk : ns };
+ var mongos = new Mongo(mongosURL), admin = mongos.getDB('admin'), cmd = {
+ moveChunk: ns
+ };
if (findCriteria) {
cmd.find = findCriteria;
@@ -56,14 +43,13 @@ function moveChunkParallel(
printjson(cmd);
var result = admin.runCommand(cmd);
- printjson( result );
- assert( result.ok );
+ printjson(result);
+ assert(result.ok);
}
// Return the join function.
return startParallelOps(
- staticMongod, runMoveChunk,
- [ mongosURL, findCriteria, bounds, ns, toShardId ] );
+ staticMongod, runMoveChunk, [mongosURL, findCriteria, bounds, ns, toShardId]);
}
// moveChunk starts at step 0 and proceeds to 1 (it has *finished* parsing
@@ -77,10 +63,9 @@ var moveChunkStepNames = {
done: 6
};
-function numberToName( names, stepNumber ) {
- for ( var name in names) {
- if ( names.hasOwnProperty(name)
- && names[name] == stepNumber ) {
+function numberToName(names, stepNumber) {
+ for (var name in names) {
+ if (names.hasOwnProperty(name) && names[name] == stepNumber) {
return name;
}
}
@@ -91,60 +76,59 @@ function numberToName( names, stepNumber ) {
//
// Configure a failpoint to make moveChunk hang at a step.
//
-function pauseMoveChunkAtStep( shardConnection, stepNumber ) {
- configureMoveChunkFailPoint( shardConnection, stepNumber, 'alwaysOn' );
+function pauseMoveChunkAtStep(shardConnection, stepNumber) {
+ configureMoveChunkFailPoint(shardConnection, stepNumber, 'alwaysOn');
}
//
// Allow moveChunk to proceed past a step.
//
-function unpauseMoveChunkAtStep( shardConnection, stepNumber ) {
- configureMoveChunkFailPoint( shardConnection, stepNumber, 'off' );
+function unpauseMoveChunkAtStep(shardConnection, stepNumber) {
+ configureMoveChunkFailPoint(shardConnection, stepNumber, 'off');
}
-function proceedToMoveChunkStep( shardConnection, stepNumber ) {
- jsTest.log( 'moveChunk proceeding from step "'
- + numberToName( moveChunkStepNames, stepNumber - 1 )
- + '" to "' + numberToName( moveChunkStepNames, stepNumber )
- + '".' );
+function proceedToMoveChunkStep(shardConnection, stepNumber) {
+ jsTest.log('moveChunk proceeding from step "' +
+ numberToName(moveChunkStepNames, stepNumber - 1) + '" to "' +
+ numberToName(moveChunkStepNames, stepNumber) + '".');
- pauseMoveChunkAtStep( shardConnection, stepNumber );
- unpauseMoveChunkAtStep( shardConnection, stepNumber - 1 );
- waitForMoveChunkStep( shardConnection, stepNumber );
+ pauseMoveChunkAtStep(shardConnection, stepNumber);
+ unpauseMoveChunkAtStep(shardConnection, stepNumber - 1);
+ waitForMoveChunkStep(shardConnection, stepNumber);
}
-
-function configureMoveChunkFailPoint( shardConnection, stepNumber, mode ) {
- assert.between(migrateStepNames.copiedIndexes, stepNumber,
- migrateStepNames.done, "incorrect stepNumber", true);
- var admin = shardConnection.getDB( 'admin' );
- admin.runCommand({ configureFailPoint: 'moveChunkHangAtStep' + stepNumber,
- mode: mode });
+function configureMoveChunkFailPoint(shardConnection, stepNumber, mode) {
+ assert.between(migrateStepNames.copiedIndexes,
+ stepNumber,
+ migrateStepNames.done,
+ "incorrect stepNumber",
+ true);
+ var admin = shardConnection.getDB('admin');
+ admin.runCommand({configureFailPoint: 'moveChunkHangAtStep' + stepNumber, mode: mode});
}
//
// Wait for moveChunk to reach a step (1 through 6). Assumes only one moveChunk
// is in mongos's currentOp.
//
-function waitForMoveChunkStep( shardConnection, stepNumber ) {
- var searchString = 'step ' + stepNumber,
- admin = shardConnection.getDB( 'admin' );
+function waitForMoveChunkStep(shardConnection, stepNumber) {
+ var searchString = 'step ' + stepNumber, admin = shardConnection.getDB('admin');
- assert.between(migrateStepNames.copiedIndexes, stepNumber,
- migrateStepNames.done, "incorrect stepNumber", true);
+ assert.between(migrateStepNames.copiedIndexes,
+ stepNumber,
+ migrateStepNames.done,
+ "incorrect stepNumber",
+ true);
- var msg = (
- 'moveChunk on ' + shardConnection.shardName
- + ' never reached step "'
- + numberToName( moveChunkStepNames, stepNumber )
- + '".');
+ var msg = ('moveChunk on ' + shardConnection.shardName + ' never reached step "' +
+ numberToName(moveChunkStepNames, stepNumber) + '".');
- assert.soon( function() {
+ assert.soon(function() {
var in_progress = admin.currentOp().inprog;
- for ( var i = 0; i < in_progress.length; ++i ) {
+ for (var i = 0; i < in_progress.length; ++i) {
var op = in_progress[i];
- if ( op.query && op.query.moveChunk ) {
- return op.msg && op.msg.startsWith( searchString );
+ if (op.query && op.query.moveChunk) {
+ return op.msg && op.msg.startsWith(searchString);
}
}
@@ -164,61 +148,61 @@ var migrateStepNames = {
//
// Configure a failpoint to make migration thread hang at a step (1 through 5).
//
-function pauseMigrateAtStep( shardConnection, stepNumber ) {
- configureMigrateFailPoint( shardConnection, stepNumber, 'alwaysOn' );
+function pauseMigrateAtStep(shardConnection, stepNumber) {
+ configureMigrateFailPoint(shardConnection, stepNumber, 'alwaysOn');
}
//
// Allow _recvChunkStart to proceed past a step.
//
-function unpauseMigrateAtStep( shardConnection, stepNumber ) {
- configureMigrateFailPoint( shardConnection, stepNumber, 'off' );
+function unpauseMigrateAtStep(shardConnection, stepNumber) {
+ configureMigrateFailPoint(shardConnection, stepNumber, 'off');
}
-function proceedToMigrateStep( shardConnection, stepNumber ) {
- jsTest.log( 'Migration thread proceeding from step "'
- + numberToName( migrateStepNames, stepNumber - 1 )
- + '" to "' + numberToName( migrateStepNames, stepNumber )
- + '".');
+function proceedToMigrateStep(shardConnection, stepNumber) {
+ jsTest.log('Migration thread proceeding from step "' +
+ numberToName(migrateStepNames, stepNumber - 1) + '" to "' +
+ numberToName(migrateStepNames, stepNumber) + '".');
- pauseMigrateAtStep( shardConnection, stepNumber );
- unpauseMigrateAtStep( shardConnection, stepNumber - 1 );
- waitForMigrateStep( shardConnection, stepNumber );
+ pauseMigrateAtStep(shardConnection, stepNumber);
+ unpauseMigrateAtStep(shardConnection, stepNumber - 1);
+ waitForMigrateStep(shardConnection, stepNumber);
}
-function configureMigrateFailPoint( shardConnection, stepNumber, mode ) {
- assert.between( migrateStepNames.copiedIndexes, stepNumber,
- migrateStepNames.done, "incorrect stepNumber", true);
+function configureMigrateFailPoint(shardConnection, stepNumber, mode) {
+ assert.between(migrateStepNames.copiedIndexes,
+ stepNumber,
+ migrateStepNames.done,
+ "incorrect stepNumber",
+ true);
- var admin = shardConnection.getDB( 'admin' );
- admin.runCommand({ configureFailPoint: 'migrateThreadHangAtStep' + stepNumber,
- mode: mode });
+ var admin = shardConnection.getDB('admin');
+ admin.runCommand({configureFailPoint: 'migrateThreadHangAtStep' + stepNumber, mode: mode});
}
//
// Wait for moveChunk to reach a step (1 through 6).
//
-function waitForMigrateStep( shardConnection, stepNumber ) {
- var searchString = 'step ' + stepNumber,
- admin = shardConnection.getDB( 'admin' );
+function waitForMigrateStep(shardConnection, stepNumber) {
+ var searchString = 'step ' + stepNumber, admin = shardConnection.getDB('admin');
- assert.between(migrateStepNames.copiedIndexes, stepNumber,
- migrateStepNames.done, "incorrect stepNumber", true);
+ assert.between(migrateStepNames.copiedIndexes,
+ stepNumber,
+ migrateStepNames.done,
+ "incorrect stepNumber",
+ true);
- var msg = (
- 'Migrate thread on ' + shardConnection.shardName
- + ' never reached step "'
- + numberToName( migrateStepNames, stepNumber )
- + '".');
+ var msg = ('Migrate thread on ' + shardConnection.shardName + ' never reached step "' +
+ numberToName(migrateStepNames, stepNumber) + '".');
- assert.soon( function() {
+ assert.soon(function() {
// verbose = True so we see the migration thread.
var in_progress = admin.currentOp(true).inprog;
- for ( var i = 0; i < in_progress.length; ++i ) {
+ for (var i = 0; i < in_progress.length; ++i) {
var op = in_progress[i];
- if ( op.desc && op.desc === 'migrateThread' ) {
+ if (op.desc && op.desc === 'migrateThread') {
if (op.hasOwnProperty('msg')) {
- return op.msg.startsWith( searchString );
+ return op.msg.startsWith(searchString);
} else {
return false;
}
diff --git a/jstests/libs/cleanup_orphaned_util.js b/jstests/libs/cleanup_orphaned_util.js
index 912a4506c90..cfd69ab128f 100644
--- a/jstests/libs/cleanup_orphaned_util.js
+++ b/jstests/libs/cleanup_orphaned_util.js
@@ -7,25 +7,25 @@
// expected number of times before stopping.
//
function cleanupOrphaned(shardConnection, ns, expectedIterations) {
- var admin = shardConnection.getDB('admin'),
- result = admin.runCommand({cleanupOrphaned: ns}),
+ var admin = shardConnection.getDB('admin'), result = admin.runCommand({cleanupOrphaned: ns}),
iterations = 1;
- if (!result.ok) { printjson(result); }
+ if (!result.ok) {
+ printjson(result);
+ }
assert(result.ok);
while (result.stoppedAtKey) {
- result = admin.runCommand({
- cleanupOrphaned: ns,
- startingFromKey: result.stoppedAtKey
- });
+ result = admin.runCommand({cleanupOrphaned: ns, startingFromKey: result.stoppedAtKey});
assert(result.ok);
++iterations;
}
- assert.eq(iterations, expectedIterations, 'Expected to run ' +
- 'cleanupOrphaned' + expectedIterations + ' times, but it only ran ' +
- iterations + ' times before stoppedAtKey was null.');
+ assert.eq(iterations,
+ expectedIterations,
+ 'Expected to run ' +
+ 'cleanupOrphaned' + expectedIterations + ' times, but it only ran ' + iterations +
+ ' times before stoppedAtKey was null.');
}
// Shards data from key range, then inserts orphan documents, runs cleanupOrphans
@@ -38,45 +38,32 @@ function cleanupOrphaned(shardConnection, ns, expectedIterations) {
function testCleanupOrphaned(options) {
var st = new ShardingTest({shards: 2, mongos: 2});
- var mongos = st.s0,
- admin = mongos.getDB('admin'),
+ var mongos = st.s0, admin = mongos.getDB('admin'),
shards = mongos.getCollection('config.shards').find().toArray(),
coll = mongos.getCollection('foo.bar'),
- shard0Coll = st.shard0.getCollection(coll.getFullName()),
- keys = options.keyGen(),
- beginning = keys[0],
- oneQuarter = keys[Math.round(keys.length / 4)],
+ shard0Coll = st.shard0.getCollection(coll.getFullName()), keys = options.keyGen(),
+ beginning = keys[0], oneQuarter = keys[Math.round(keys.length / 4)],
middle = keys[Math.round(keys.length / 2)],
threeQuarters = keys[Math.round(3 * keys.length / 4)];
- assert.commandWorked(admin.runCommand({
- enableSharding: coll.getDB().getName()
- }));
+ assert.commandWorked(admin.runCommand({enableSharding: coll.getDB().getName()}));
- printjson( admin.runCommand({ movePrimary : coll.getDB() + "", to : shards[0]._id }) );
+ printjson(admin.runCommand({movePrimary: coll.getDB() + "", to: shards[0]._id}));
- assert.commandWorked(admin.runCommand({
- shardCollection: coll.getFullName(),
- key: options.shardKey
- }));
+ assert.commandWorked(
+ admin.runCommand({shardCollection: coll.getFullName(), key: options.shardKey}));
st.printShardingStatus();
jsTest.log('Inserting some regular docs...');
- assert.commandWorked(admin.runCommand({
- split: coll.getFullName(),
- middle: middle
- }));
+ assert.commandWorked(admin.runCommand({split: coll.getFullName(), middle: middle}));
- assert.commandWorked(admin.runCommand({
- moveChunk: coll.getFullName(),
- find: middle,
- to: shards[1]._id,
- _waitForDelete: true
- }));
+ assert.commandWorked(admin.runCommand(
+ {moveChunk: coll.getFullName(), find: middle, to: shards[1]._id, _waitForDelete: true}));
- for (var i = 0; i < keys.length; i++) coll.insert(keys[i]);
+ for (var i = 0; i < keys.length; i++)
+ coll.insert(keys[i]);
assert.eq(null, coll.getDB().getLastError());
// Half of the data is on each shard:
@@ -105,10 +92,7 @@ function testCleanupOrphaned(options) {
jsTest.log('Moving half the data out again (making a hole)...');
- assert.commandWorked(admin.runCommand({
- split: coll.getFullName(),
- middle: oneQuarter
- }));
+ assert.commandWorked(admin.runCommand({split: coll.getFullName(), middle: oneQuarter}));
assert.commandWorked(admin.runCommand({
moveChunk: coll.getFullName(),
diff --git a/jstests/libs/command_line/test_parsed_options.js b/jstests/libs/command_line/test_parsed_options.js
index 1f3a626ee02..c38d07668af 100644
--- a/jstests/libs/command_line/test_parsed_options.js
+++ b/jstests/libs/command_line/test_parsed_options.js
@@ -4,26 +4,22 @@
function mergeOptions(obj1, obj2) {
var obj3 = {};
for (var attrname in obj1) {
- if (typeof obj1[attrname] === "object" &&
- typeof obj2[attrname] !== "undefined") {
+ if (typeof obj1[attrname] === "object" && typeof obj2[attrname] !== "undefined") {
if (typeof obj2[attrname] !== "object") {
throw Error("Objects being merged must have the same structure");
}
obj3[attrname] = mergeOptions(obj1[attrname], obj2[attrname]);
- }
- else {
+ } else {
obj3[attrname] = obj1[attrname];
}
}
for (var attrname in obj2) {
- if (typeof obj2[attrname] === "object" &&
- typeof obj1[attrname] !== "undefined") {
+ if (typeof obj2[attrname] === "object" && typeof obj1[attrname] !== "undefined") {
if (typeof obj1[attrname] !== "object") {
throw Error("Objects being merged must have the same structure");
}
// Already handled above
- }
- else {
+ } else {
obj3[attrname] = obj2[attrname];
}
}
@@ -45,12 +41,10 @@ function mergeOptions(obj1, obj2) {
//
var getCmdLineOptsBaseMongod;
function testGetCmdLineOptsMongod(mongoRunnerConfig, expectedResult) {
-
// Get the options object returned by "getCmdLineOpts" when we spawn a mongod using our test
// framework without passing any additional options. We need this because the framework adds
// options of its own, and we only want to compare against the options we care about.
function getBaseOptsObject() {
-
// Start mongod with no options
var baseMongod = MongoRunner.runMongod();
@@ -96,8 +90,7 @@ function testGetCmdLineOptsMongod(mongoRunnerConfig, expectedResult) {
try {
mongod.getDB("admin").createUser({user: "root", pwd: "pass", roles: ["root"]});
mongod.getDB("admin").auth("root", "pass");
- }
- catch (ex) {
+ } catch (ex) {
}
// Get the parsed options
@@ -146,16 +139,11 @@ function testGetCmdLineOptsMongos(mongoRunnerConfig, expectedResult) {
// options of its own, and we only want to compare against the options we care about.
function getCmdLineOptsFromMongos(mongosOptions) {
// Start mongod with no options
- var baseMongod = MongoRunner.runMongod({
- configsvr: "",
- journal: "",
- replSet: "csrs",
- storageEngine: "wiredTiger"
- });
- assert.commandWorked(baseMongod.adminCommand( {
- replSetInitiate: {
- _id: "csrs", configsvr: true, members: [{_id: 0, host: baseMongod.host}]
- }
+ var baseMongod = MongoRunner.runMongod(
+ {configsvr: "", journal: "", replSet: "csrs", storageEngine: "wiredTiger"});
+ assert.commandWorked(baseMongod.adminCommand({
+ replSetInitiate:
+ {_id: "csrs", configsvr: true, members: [{_id: 0, host: baseMongod.host}]}
}));
var configdbStr = "csrs/" + baseMongod.host;
var ismasterResult;
@@ -164,7 +152,7 @@ function testGetCmdLineOptsMongos(mongoRunnerConfig, expectedResult) {
ismasterResult = baseMongod.adminCommand("ismaster");
return ismasterResult.ismaster;
},
- function () {
+ function() {
return tojson(ismasterResult);
});
diff --git a/jstests/libs/csrs_upgrade_util.js b/jstests/libs/csrs_upgrade_util.js
index 106a9847088..7dccccc3e7c 100644
--- a/jstests/libs/csrs_upgrade_util.js
+++ b/jstests/libs/csrs_upgrade_util.js
@@ -8,233 +8,245 @@
load("jstests/replsets/rslib.js");
var CSRSUpgradeCoordinator = function() {
-"use strict";
-
-var testDBName = jsTestName();
-var dataCollectionName = testDBName + ".data";
-var csrsName = jsTestName() + "-csrs";
-var numCsrsMembers;
-var st;
-var shardConfigs;
-var csrsConfig;
-var csrs;
-var csrs0Opts;
-
-this.getTestDBName = function() {
- return testDBName;
-};
-
-this.getDataCollectionName = function() {
- return dataCollectionName;
-};
+ "use strict";
+
+ var testDBName = jsTestName();
+ var dataCollectionName = testDBName + ".data";
+ var csrsName = jsTestName() + "-csrs";
+ var numCsrsMembers;
+ var st;
+ var shardConfigs;
+ var csrsConfig;
+ var csrs;
+ var csrs0Opts;
+
+ this.getTestDBName = function() {
+ return testDBName;
+ };
-/**
- * Returns an array of connections to the CSRS nodes.
- */
-this.getCSRSNodes = function() {
- return csrs;
-};
+ this.getDataCollectionName = function() {
+ return dataCollectionName;
+ };
-/**
- * Returns the replica set name of the config server replica set.
- */
-this.getCSRSName = function() {
- return csrsName;
-};
+ /**
+ * Returns an array of connections to the CSRS nodes.
+ */
+ this.getCSRSNodes = function() {
+ return csrs;
+ };
-/**
- * Returns a copy of the options used for starting a mongos in the coordinator's cluster.
- */
-this.getMongosConfig = function() {
- var sconfig = Object.extend({}, st.s0.fullOptions, /* deep */ true);
- delete sconfig.port;
- return sconfig;
-};
-
-this.getMongos = function(n) {
- return st._mongos[n];
-};
-
-this.getShardName = function(n) {
- return shardConfigs[n]._id;
-};
+ /**
+ * Returns the replica set name of the config server replica set.
+ */
+ this.getCSRSName = function() {
+ return csrsName;
+ };
-/**
- * Returns the ShardingTest fixture backing this CSRSUpgradeCoordinator.
- */
-this.getShardingTestFixture = function() {
- return st;
-};
+ /**
+ * Returns a copy of the options used for starting a mongos in the coordinator's cluster.
+ */
+ this.getMongosConfig = function() {
+ var sconfig = Object.extend({}, st.s0.fullOptions, /* deep */ true);
+ delete sconfig.port;
+ return sconfig;
+ };
-/**
- * Private helper method for waiting for a given node to return ismaster:true in its ismaster
- * command response.
- */
-var _waitUntilMaster = function (dnode) {
- var isMasterReply;
- assert.soon(function () {
- isMasterReply = dnode.adminCommand({ismaster: 1});
- return isMasterReply.ismaster;
- }, function () {
- return "Expected " + dnode.name + " to respond ismaster:true, but got " +
- tojson(isMasterReply);
- });
-};
+ this.getMongos = function(n) {
+ return st._mongos[n];
+ };
-/**
-* Sets up the underlying sharded cluster in SCCC mode, and shards the test collection on _id.
-*/
-this.setupSCCCCluster = function() {
- if (TestData.storageEngine == "wiredTiger" || TestData.storageEngine == "") {
- // TODO(schwerin): SERVER-19739 Support testing CSRS with storage engines other than wired
- // tiger, when such other storage engines support majority read concern.
- numCsrsMembers = 3;
- } else {
- numCsrsMembers = 4;
- }
-
- jsTest.log("Setting up SCCC sharded cluster");
-
- st = new ShardingTest({name: "csrsUpgrade",
- mongos: 2,
- rs: { nodes: 3 },
- shards: 2,
- nopreallocj: true,
- other: {
- sync: true,
- enableBalancer: false,
- useHostname: true
- }});
-
- shardConfigs = st.s0.getCollection("config.shards").find().toArray();
- assert.eq(2, shardConfigs.length);
-
- jsTest.log("Enabling sharding on " + testDBName + " and making " + this.getShardName(0) +
- " the primary shard");
- assert.commandWorked(st.s0.adminCommand({enablesharding: testDBName}));
- st.ensurePrimaryShard(testDBName, this.getShardName(0));
-
- jsTest.log("Creating a sharded collection " + dataCollectionName);
- assert.commandWorked(st.s0.adminCommand({shardcollection: dataCollectionName,
- key: { _id: 1 }
- }));
-};
+ this.getShardName = function(n) {
+ return shardConfigs[n]._id;
+ };
-/**
- * Restarts the first config server as a single node replica set, while still leaving the cluster
- * operating in SCCC mode.
- */
-this.restartFirstConfigAsReplSet = function() {
- jsTest.log("Restarting " + st.c0.name + " as a standalone replica set");
- csrsConfig = {
- _id: csrsName,
- version: 1,
- configsvr: true,
- members: [ { _id: 0, host: st.c0.name }]
- };
- assert.commandWorked(st.c0.adminCommand({replSetInitiate: csrsConfig}));
- csrs = [];
- csrs0Opts = Object.extend({}, st.c0.fullOptions, /* deep */ true);
- csrs0Opts.restart = true; // Don't clean the data files from the old c0.
- csrs0Opts.replSet = csrsName;
- csrs0Opts.configsvrMode = "sccc";
- MongoRunner.stopMongod(st.c0);
- csrs.push(MongoRunner.runMongod(csrs0Opts));
- _waitUntilMaster(csrs[0]);
-};
+ /**
+ * Returns the ShardingTest fixture backing this CSRSUpgradeCoordinator.
+ */
+ this.getShardingTestFixture = function() {
+ return st;
+ };
-/**
- * Starts up the new members of the config server replica set as non-voting, priority zero nodes.
- */
-this.startNewCSRSNodes = function() {
- jsTest.log("Starting new CSRS nodes");
- for (var i = 1; i < numCsrsMembers; ++i) {
- csrs.push(MongoRunner.runMongod({replSet: csrsName,
- configsvr: "",
- storageEngine: "wiredTiger"
- }));
- csrsConfig.members.push({ _id: i, host: csrs[i].name, votes: 0, priority: 0 });
- }
- csrsConfig.version = 2;
- jsTest.log("Adding non-voting members to csrs set: " + tojson(csrsConfig));
- assert.commandWorked(csrs[0].adminCommand({replSetReconfig: csrsConfig}));
-};
-
-this.waitUntilConfigsCaughtUp = function() {
- waitUntilAllNodesCaughtUp(csrs, 60000);
-};
+ /**
+ * Private helper method for waiting for a given node to return ismaster:true in its ismaster
+ * command response.
+ */
+ var _waitUntilMaster = function(dnode) {
+ var isMasterReply;
+ assert.soon(
+ function() {
+ isMasterReply = dnode.adminCommand({ismaster: 1});
+ return isMasterReply.ismaster;
+ },
+ function() {
+ return "Expected " + dnode.name + " to respond ismaster:true, but got " +
+ tojson(isMasterReply);
+ });
+ };
-/**
- * Stops one of the SCCC config servers, thus disabling changes to cluster metadata and preventing
- * any further writes to the config servers until the upgrade to CSRS is completed.
- */
-this.shutdownOneSCCCNode = function() {
- // Only shut down one of the SCCC config servers to avoid any period without any config servers
- // online.
- jsTest.log("Shutting down third SCCC config server node");
- MongoRunner.stopMongod(st.c2);
-};
+ /**
+ * Sets up the underlying sharded cluster in SCCC mode, and shards the test collection on _id.
+ */
+ this.setupSCCCCluster = function() {
+ if (TestData.storageEngine == "wiredTiger" || TestData.storageEngine == "") {
+ // TODO(schwerin): SERVER-19739 Support testing CSRS with storage engines other than
+ // wired
+ // tiger, when such other storage engines support majority read concern.
+ numCsrsMembers = 3;
+ } else {
+ numCsrsMembers = 4;
+ }
-/**
- * Allows all CSRS members to vote, in preparation for switching fully to CSRS mode.
- */
-this.allowAllCSRSNodesToVote = function() {
- csrsConfig.members.forEach(function (member) { member.votes = 1; member.priority = 1;});
- csrsConfig.version = 3;
- jsTest.log("Allowing all csrs members to vote: " + tojson(csrsConfig));
- assert.commandWorked(csrs[0].adminCommand({replSetReconfig: csrsConfig}));
-};
+ jsTest.log("Setting up SCCC sharded cluster");
-/**
- * Restarts the first member of the config server replica set without the --configsvrMode flag,
- * marking the official switchover from SCCC to CSRS mode. If the first config server doesn't
- * support readCommitted, waits for it to automatically go into the REMOVED state. Finally,
- * it shuts down the one remaining SCCC config server node now that it is no longer needed.
- */
-this.switchToCSRSMode = function() {
- jsTest.log("Restarting " + csrs[0].name + " in csrs mode");
- delete csrs0Opts.configsvrMode;
- try {
- csrs[0].adminCommand({replSetStepDown: 60});
- } catch (e) {} // Expected
- MongoRunner.stopMongod(csrs[0]);
- csrs[0] = MongoRunner.runMongod(csrs0Opts);
- var csrsStatus;
- assert.soon(function () {
- csrsStatus = csrs[0].adminCommand({replSetGetStatus: 1});
- if (csrsStatus.members[0].stateStr == "STARTUP" ||
- csrsStatus.members[0].stateStr == "STARTUP2" ||
- csrsStatus.members[0].stateStr == "RECOVERING") {
- // Make sure first node is fully online or else mongoses still in SCCC mode might not
- // find any node online to talk to.
- return false;
+ st = new ShardingTest({
+ name: "csrsUpgrade",
+ mongos: 2,
+ rs: {nodes: 3},
+ shards: 2,
+ nopreallocj: true,
+ other: {sync: true, enableBalancer: false, useHostname: true}
+ });
+
+ shardConfigs = st.s0.getCollection("config.shards").find().toArray();
+ assert.eq(2, shardConfigs.length);
+
+ jsTest.log("Enabling sharding on " + testDBName + " and making " + this.getShardName(0) +
+ " the primary shard");
+ assert.commandWorked(st.s0.adminCommand({enablesharding: testDBName}));
+ st.ensurePrimaryShard(testDBName, this.getShardName(0));
+
+ jsTest.log("Creating a sharded collection " + dataCollectionName);
+ assert.commandWorked(
+ st.s0.adminCommand({shardcollection: dataCollectionName, key: {_id: 1}}));
+ };
+
+ /**
+ * Restarts the first config server as a single node replica set, while still leaving the
+ * cluster
+ * operating in SCCC mode.
+ */
+ this.restartFirstConfigAsReplSet = function() {
+ jsTest.log("Restarting " + st.c0.name + " as a standalone replica set");
+ csrsConfig = {
+ _id: csrsName,
+ version: 1,
+ configsvr: true,
+ members: [{_id: 0, host: st.c0.name}]
+ };
+ assert.commandWorked(st.c0.adminCommand({replSetInitiate: csrsConfig}));
+ csrs = [];
+ csrs0Opts = Object.extend({}, st.c0.fullOptions, /* deep */ true);
+ csrs0Opts.restart = true; // Don't clean the data files from the old c0.
+ csrs0Opts.replSet = csrsName;
+ csrs0Opts.configsvrMode = "sccc";
+ MongoRunner.stopMongod(st.c0);
+ csrs.push(MongoRunner.runMongod(csrs0Opts));
+ _waitUntilMaster(csrs[0]);
+ };
+
+ /**
+ * Starts up the new members of the config server replica set as non-voting, priority zero
+ * nodes.
+ */
+ this.startNewCSRSNodes = function() {
+ jsTest.log("Starting new CSRS nodes");
+ for (var i = 1; i < numCsrsMembers; ++i) {
+ csrs.push(MongoRunner.runMongod(
+ {replSet: csrsName, configsvr: "", storageEngine: "wiredTiger"}));
+ csrsConfig.members.push({_id: i, host: csrs[i].name, votes: 0, priority: 0});
}
+ csrsConfig.version = 2;
+ jsTest.log("Adding non-voting members to csrs set: " + tojson(csrsConfig));
+ assert.commandWorked(csrs[0].adminCommand({replSetReconfig: csrsConfig}));
+ };
- var i;
- for (i = 0; i < csrsStatus.members.length; ++i) {
- if (csrsStatus.members[i].name == csrs[0].name) {
- var supportsCommitted =
- csrs[0].getDB("admin").serverStatus().storageEngine.supportsCommittedReads;
- var stateIsRemoved = csrsStatus.members[i].stateStr == "REMOVED";
- // If the storage engine supports committed reads, it shouldn't go into REMOVED
- // state, but if it does not then it should.
- if (supportsCommitted) {
- assert(!stateIsRemoved);
- } else if (!stateIsRemoved) {
+ this.waitUntilConfigsCaughtUp = function() {
+ waitUntilAllNodesCaughtUp(csrs, 60000);
+ };
+
+ /**
+ * Stops one of the SCCC config servers, thus disabling changes to cluster metadata and
+ * preventing
+ * any further writes to the config servers until the upgrade to CSRS is completed.
+ */
+ this.shutdownOneSCCCNode = function() {
+ // Only shut down one of the SCCC config servers to avoid any period without any config
+ // servers
+ // online.
+ jsTest.log("Shutting down third SCCC config server node");
+ MongoRunner.stopMongod(st.c2);
+ };
+
+ /**
+ * Allows all CSRS members to vote, in preparation for switching fully to CSRS mode.
+ */
+ this.allowAllCSRSNodesToVote = function() {
+ csrsConfig.members.forEach(function(member) {
+ member.votes = 1;
+ member.priority = 1;
+ });
+ csrsConfig.version = 3;
+ jsTest.log("Allowing all csrs members to vote: " + tojson(csrsConfig));
+ assert.commandWorked(csrs[0].adminCommand({replSetReconfig: csrsConfig}));
+ };
+
+ /**
+ * Restarts the first member of the config server replica set without the --configsvrMode flag,
+ * marking the official switchover from SCCC to CSRS mode. If the first config server doesn't
+ * support readCommitted, waits for it to automatically go into the REMOVED state. Finally,
+ * it shuts down the one remaining SCCC config server node now that it is no longer needed.
+ */
+ this.switchToCSRSMode = function() {
+ jsTest.log("Restarting " + csrs[0].name + " in csrs mode");
+ delete csrs0Opts.configsvrMode;
+ try {
+ csrs[0].adminCommand({replSetStepDown: 60});
+ } catch (e) {
+ } // Expected
+ MongoRunner.stopMongod(csrs[0]);
+ csrs[0] = MongoRunner.runMongod(csrs0Opts);
+ var csrsStatus;
+ assert.soon(
+ function() {
+ csrsStatus = csrs[0].adminCommand({replSetGetStatus: 1});
+ if (csrsStatus.members[0].stateStr == "STARTUP" ||
+ csrsStatus.members[0].stateStr == "STARTUP2" ||
+ csrsStatus.members[0].stateStr == "RECOVERING") {
+ // Make sure first node is fully online or else mongoses still in SCCC mode
+ // might not
+ // find any node online to talk to.
return false;
}
- }
- if (csrsStatus.members[i].stateStr == "PRIMARY") {
- return csrs[i].adminCommand({ismaster: 1}).ismaster;
- }
- }
- return false;
- }, function() {
- return "No primary or non-WT engine not removed in " + tojson(csrsStatus);
- });
-
- jsTest.log("Shutting down final SCCC config server now that upgrade is complete");
- MongoRunner.stopMongod(st.c1);
-};
+
+ var i;
+ for (i = 0; i < csrsStatus.members.length; ++i) {
+ if (csrsStatus.members[i].name == csrs[0].name) {
+ var supportsCommitted = csrs[0]
+ .getDB("admin")
+ .serverStatus()
+ .storageEngine.supportsCommittedReads;
+ var stateIsRemoved = csrsStatus.members[i].stateStr == "REMOVED";
+ // If the storage engine supports committed reads, it shouldn't go into
+ // REMOVED
+ // state, but if it does not then it should.
+ if (supportsCommitted) {
+ assert(!stateIsRemoved);
+ } else if (!stateIsRemoved) {
+ return false;
+ }
+ }
+ if (csrsStatus.members[i].stateStr == "PRIMARY") {
+ return csrs[i].adminCommand({ismaster: 1}).ismaster;
+ }
+ }
+ return false;
+ },
+ function() {
+ return "No primary or non-WT engine not removed in " + tojson(csrsStatus);
+ });
+
+ jsTest.log("Shutting down final SCCC config server now that upgrade is complete");
+ MongoRunner.stopMongod(st.c1);
+ };
}; \ No newline at end of file
diff --git a/jstests/libs/election_timing_test.js b/jstests/libs/election_timing_test.js
index 518ff2881f1..f462d7f2dc5 100644
--- a/jstests/libs/election_timing_test.js
+++ b/jstests/libs/election_timing_test.js
@@ -11,11 +11,7 @@ var ElectionTimingTest = function(opts) {
// The config is set to two electable nodes since we use waitForMemberState
// to wait for the electable secondary to become primary.
- this.nodes = opts.nodes || [
- {},
- {},
- {rsConfig: {arbiterOnly: true}}
- ];
+ this.nodes = opts.nodes || [{}, {}, {rsConfig: {arbiterOnly: true}}];
// The name of the replica set and of the collection.
this.name = opts.name || "election_timing";
@@ -52,10 +48,14 @@ var ElectionTimingTest = function(opts) {
ElectionTimingTest.prototype._runTimingTest = function() {
for (var run = 0; run < this.testRuns; run++) {
var collectionName = "test." + this.name;
- var cycleData = {testRun: run, results: []};
+ var cycleData = {
+ testRun: run,
+ results: []
+ };
jsTestLog("Starting ReplSetTest for test " + this.name + " run: " + run);
- this.rst = new ReplSetTest({name: this.name, nodes: this.nodes, nodeOptions: {verbose:""}});
+ this.rst =
+ new ReplSetTest({name: this.name, nodes: this.nodes, nodeOptions: {verbose: ""}});
this.rst.startSet();
// Get the replset config and apply the settings object.
@@ -87,9 +87,7 @@ ElectionTimingTest.prototype._runTimingTest = function() {
var coll = primary.getCollection(collectionName);
for (var i = 0; i < 100; i++) {
- assert.writeOK(coll.insert({_id: i,
- x: i * 3,
- arbitraryStr: "this is a string"}));
+ assert.writeOK(coll.insert({_id: i, x: i * 3, arbitraryStr: "this is a string"}));
}
// Run the election tests on this ReplSetTest instance.
@@ -123,10 +121,8 @@ ElectionTimingTest.prototype._runTimingTest = function() {
} catch (e) {
// If we didn"t find a primary, save the error, break so this
// ReplSetTest is stopped. We can"t continue from a flaky state.
- this.testErrors.push({testRun: run,
- cycle: cycle,
- status: "new primary not elected",
- error: e});
+ this.testErrors.push(
+ {testRun: run, cycle: cycle, status: "new primary not elected", error: e});
break;
}
@@ -140,16 +136,17 @@ ElectionTimingTest.prototype._runTimingTest = function() {
assert.neq(undefined, newElectionId, "isMaster() failed to return a valid electionId");
if (bsonWoCompare(oldElectionId, newElectionId) !== 0) {
- this.testErrors.push({testRun: run,
- cycle: cycle,
- status: "electionId not changed, no election was triggered"});
+ this.testErrors.push({
+ testRun: run,
+ cycle: cycle,
+ status: "electionId not changed, no election was triggered"
+ });
break;
}
if (primary.host === newPrimary.host) {
- this.testErrors.push({testRun: run,
- cycle: cycle,
- status: "Previous primary was re-elected"});
+ this.testErrors.push(
+ {testRun: run, cycle: cycle, status: "Previous primary was re-elected"});
break;
}
@@ -160,10 +157,8 @@ ElectionTimingTest.prototype._runTimingTest = function() {
try {
this.testReset();
} catch (e) {
- this.testErrors.push({testRun: run,
- cycle: cycle,
- status: "testReset() failed",
- error: e});
+ this.testErrors.push(
+ {testRun: run, cycle: cycle, status: "testReset() failed", error: e});
break;
}
}
@@ -192,14 +187,12 @@ ElectionTimingTest.prototype.stepDownPrimaryReset = function() {
};
ElectionTimingTest.prototype.waitForNewPrimary = function(rst, secondary) {
- assert.commandWorked(
- secondary.adminCommand({
- replSetTest: 1,
- waitForMemberState: ReplSetTest.State.PRIMARY,
- timeoutMillis: 60 * 1000
- }),
- "node " + secondary.host + " failed to become primary"
- );
+ assert.commandWorked(secondary.adminCommand({
+ replSetTest: 1,
+ waitForMemberState: ReplSetTest.State.PRIMARY,
+ timeoutMillis: 60 * 1000
+ }),
+ "node " + secondary.host + " failed to become primary");
};
/**
@@ -227,8 +220,7 @@ ElectionTimingTest.calculateElectionTimeoutLimitMillis = function(primary) {
var assertSoonIntervalMillis = 200; // from assert.js
var applierDrainWaitMillis = 1000; // from SyncTail::tryPopAndWaitForMore()
var electionTimeoutLimitMillis =
- (1 + electionTimeoutOffsetLimitFraction) * electionTimeoutMillis +
- applierDrainWaitMillis +
+ (1 + electionTimeoutOffsetLimitFraction) * electionTimeoutMillis + applierDrainWaitMillis +
assertSoonIntervalMillis;
return electionTimeoutLimitMillis;
};
diff --git a/jstests/libs/fts.js b/jstests/libs/fts.js
index ff802bdbfea..eb5baec8a5a 100644
--- a/jstests/libs/fts.js
+++ b/jstests/libs/fts.js
@@ -1,25 +1,34 @@
// Utility functions for FTS tests
//
-function queryIDS( coll, search, filter, extra, limit ){
- var query = { "$text" : { "$search" : search }};
- if ( extra )
- query = { "$text" : Object.extend( { "$search" : search }, extra ) };
- if ( filter )
- Object.extend( query, filter );
+function queryIDS(coll, search, filter, extra, limit) {
+ var query = {
+ "$text": {"$search": search}
+ };
+ if (extra)
+ query = {
+ "$text": Object.extend({"$search": search}, extra)
+ };
+ if (filter)
+ Object.extend(query, filter);
var result;
if (limit)
- result = coll.find( query, { score: { "$meta" : "textScore" } } ).sort( { score: { "$meta" : "textScore" } }).limit(limit);
+ result = coll.find(query, {score: {"$meta": "textScore"}})
+ .sort({score: {"$meta": "textScore"}})
+ .limit(limit);
else
- result = coll.find( query, { score: { "$meta" : "textScore" } } ).sort( { score: { "$meta" : "textScore" } });
+ result = coll.find(query, {score: {"$meta": "textScore"}})
+ .sort({score: {"$meta": "textScore"}});
- return getIDS( result );
+ return getIDS(result);
}
// Return an array of _ids from a cursor
-function getIDS( cursor ){
- if ( ! cursor)
+function getIDS(cursor) {
+ if (!cursor)
return [];
- return cursor.map( function(z){ return z._id; } );
+ return cursor.map(function(z) {
+ return z._id;
+ });
}
diff --git a/jstests/libs/geo_near_random.js b/jstests/libs/geo_near_random.js
index fa2811e506d..2af13814173 100644
--- a/jstests/libs/geo_near_random.js
+++ b/jstests/libs/geo_near_random.js
@@ -10,19 +10,20 @@ GeoNearRandomTest = function(name) {
print("starting test: " + name);
};
-
-GeoNearRandomTest.prototype.mkPt = function mkPt(scale, indexBounds){
- if(!indexBounds){
- scale = scale || 1; // scale is good for staying away from edges
+GeoNearRandomTest.prototype.mkPt = function mkPt(scale, indexBounds) {
+ if (!indexBounds) {
+ scale = scale || 1; // scale is good for staying away from edges
return [((Random.rand() * 359.8) - 179.9) * scale, ((Random.rand() * 180) - 90) * scale];
- }
- else{
+ } else {
var range = indexBounds.max - indexBounds.min;
var eps = Math.pow(2, -40);
// Go very close to the borders but not quite there.
- return [( Random.rand() * (range - eps) + eps) + indexBounds.min, ( Random.rand() * (range - eps) + eps ) + indexBounds.min];
+ return [
+ (Random.rand() * (range - eps) + eps) + indexBounds.min,
+ (Random.rand() * (range - eps) + eps) + indexBounds.min
+ ];
}
-
+
};
GeoNearRandomTest.prototype.insertPts = function(nPts, indexBounds, scale) {
@@ -30,75 +31,79 @@ GeoNearRandomTest.prototype.insertPts = function(nPts, indexBounds, scale) {
this.nPts = nPts;
var bulk = this.t.initializeUnorderedBulkOp();
- for (var i=0; i<nPts; i++){
- bulk.insert({ _id: i, loc: this.mkPt(scale, indexBounds) });
+ for (var i = 0; i < nPts; i++) {
+ bulk.insert({_id: i, loc: this.mkPt(scale, indexBounds)});
}
assert.writeOK(bulk.execute());
-
- if(!indexBounds)
+
+ if (!indexBounds)
this.t.ensureIndex({loc: '2d'});
else
this.t.ensureIndex({loc: '2d'}, indexBounds);
};
GeoNearRandomTest.prototype.assertIsPrefix = function(short, long) {
- for (var i=0; i < short.length; i++){
-
+ for (var i = 0; i < short.length; i++) {
var xS = short[i].obj ? short[i].obj.loc[0] : short[i].loc[0];
var yS = short[i].obj ? short[i].obj.loc[1] : short[i].loc[1];
var dS = short[i].obj ? short[i].dis : 1;
-
+
var xL = long[i].obj ? long[i].obj.loc[0] : long[i].loc[0];
var yL = long[i].obj ? long[i].obj.loc[1] : long[i].loc[1];
var dL = long[i].obj ? long[i].dis : 1;
-
+
assert.eq([xS, yS, dS], [xL, yL, dL]);
}
-};
+};
GeoNearRandomTest.prototype.testPt = function(pt, opts) {
assert.neq(this.nPts, 0, "insertPoints not yet called");
opts = opts || {};
opts['sphere'] = opts['sphere'] || 0;
- opts['nToTest'] = opts['nToTest'] || this.nPts; // be careful, test is O( N^2 )
+ opts['nToTest'] = opts['nToTest'] || this.nPts; // be careful, test is O( N^2 )
print("testing point: " + tojson(pt) + " opts: " + tojson(opts));
-
- var cmd = {geoNear:this.t.getName(), near: pt, num: 1, spherical:opts.sphere};
+ var cmd = {
+ geoNear: this.t.getName(),
+ near: pt,
+ num: 1,
+ spherical: opts.sphere
+ };
var last = db.runCommand(cmd).results;
- for (var i=2; i <= opts.nToTest; i++){
- //print(i); // uncomment to watch status
+ for (var i = 2; i <= opts.nToTest; i++) {
+ // print(i); // uncomment to watch status
cmd.num = i;
var ret = db.runCommand(cmd).results;
try {
this.assertIsPrefix(last, ret);
} catch (e) {
- print("*** failed while compairing " + (i-1) + " and " + i);
+ print("*** failed while compairing " + (i - 1) + " and " + i);
printjson(cmd);
- throw e; // rethrow
+ throw e; // rethrow
}
-
+
// Make sure distances are in increasing order
assert.gte(ret[ret.length - 1].dis, last[last.length - 1].dis);
last = ret;
}
-
- if (!opts.sharded){
- last = last.map(function(x){return x.obj;});
+ if (!opts.sharded) {
+ last = last.map(function(x) {
+ return x.obj;
+ });
- var query = {loc:{}};
- query.loc[ opts.sphere ? '$nearSphere' : '$near' ] = pt;
+ var query = {
+ loc: {}
+ };
+ query.loc[opts.sphere ? '$nearSphere' : '$near'] = pt;
var near = this.t.find(query).limit(opts.nToTest).toArray();
this.assertIsPrefix(last, near);
assert.eq(last, near);
}
};
-
-
diff --git a/jstests/libs/host_ipaddr.js b/jstests/libs/host_ipaddr.js
index d6d5059aa19..d0c191f8ef2 100644
--- a/jstests/libs/host_ipaddr.js
+++ b/jstests/libs/host_ipaddr.js
@@ -10,12 +10,12 @@ function get_ipaddr() {
// Terminate path with / if defined
path += "/";
}
+ } catch (err) {
}
- catch (err) {}
- var ipFile = path+"ipaddr.log";
- var windowsCmd = "ipconfig > "+ipFile;
- var unixCmd = "/sbin/ifconfig | grep inet | grep -v '127.0.0.1' > "+ipFile;
+ var ipFile = path + "ipaddr.log";
+ var windowsCmd = "ipconfig > " + ipFile;
+ var unixCmd = "/sbin/ifconfig | grep inet | grep -v '127.0.0.1' > " + ipFile;
var ipAddr = null;
var hostType = null;
@@ -27,11 +27,10 @@ function get_ipaddr() {
runProgram('cmd.exe', '/c', windowsCmd);
ipAddr = cat(ipFile).match(/IPv4.*: (.*)/)[1];
} else {
- runProgram('bash', '-c', unixCmd);
+ runProgram('bash', '-c', unixCmd);
ipAddr = cat(ipFile).replace(/addr:/g, "").match(/inet (.[^ ]*) /)[1];
}
- }
- finally {
+ } finally {
removeFile(ipFile);
}
return ipAddr;
diff --git a/jstests/libs/override_methods/find_batch_size.js b/jstests/libs/override_methods/find_batch_size.js
index b7507c38ee7..ab773ded7ed 100644
--- a/jstests/libs/override_methods/find_batch_size.js
+++ b/jstests/libs/override_methods/find_batch_size.js
@@ -9,7 +9,7 @@
// TODO: Add support for overriding batch sizes in DBCommandCursor.prototype._runGetMoreCommand.
// TODO: Add support for overriding batch sizes in the bulk API.
-(function () {
+(function() {
'use strict';
// Save a reference to the original find method in the IIFE's scope.
diff --git a/jstests/libs/override_methods/implicitly_shard_accessed_collections.js b/jstests/libs/override_methods/implicitly_shard_accessed_collections.js
index ab69f1cb817..313bd7faf7c 100644
--- a/jstests/libs/override_methods/implicitly_shard_accessed_collections.js
+++ b/jstests/libs/override_methods/implicitly_shard_accessed_collections.js
@@ -6,7 +6,7 @@
* on the db object.
*/
-(function () {
+(function() {
'use strict';
// Save a reference to the original getCollection method in the IIFE's scope.
@@ -14,11 +14,7 @@
var originalGetCollection = DB.prototype.getCollection;
// Blacklisted namespaces that should not be sharded.
- var blacklistedNamespaces = [
- /\$cmd/,
- /^admin\./,
- /\.system\./,
- ];
+ var blacklistedNamespaces = [/\$cmd/, /^admin\./, /\.system\./, ];
DB.prototype.getCollection = function() {
var dbName = this.getName();
diff --git a/jstests/libs/override_methods/set_majority_read_and_write_concerns.js b/jstests/libs/override_methods/set_majority_read_and_write_concerns.js
index 7485be823a6..4e01fd9753a 100644
--- a/jstests/libs/override_methods/set_majority_read_and_write_concerns.js
+++ b/jstests/libs/override_methods/set_majority_read_and_write_concerns.js
@@ -4,7 +4,10 @@
*/
(function() {
"use strict";
- var defaultWriteConcern = {w: "majority", wtimeout: 60000};
+ var defaultWriteConcern = {
+ w: "majority",
+ wtimeout: 60000
+ };
var originalStartParallelShell = startParallelShell;
startParallelShell = function(jsCode, port, noConnect) {
@@ -58,19 +61,11 @@
// These commands do writes but do not support a writeConcern argument. Emulate it with a
// getLastError command.
- var commandsToEmulateWriteConcern = [
- "createIndexes",
- ];
+ var commandsToEmulateWriteConcern = ["createIndexes", ];
// These are reading commands that support majority readConcern.
- var commandsToForceReadConcern = [
- "count",
- "distinct",
- "find",
- "geoNear",
- "geoSearch",
- "group",
- ];
+ var commandsToForceReadConcern =
+ ["count", "distinct", "find", "geoNear", "geoSearch", "group", ];
var forceWriteConcern = Array.contains(commandsToForceWriteConcern, cmdName);
var emulateWriteConcern = Array.contains(commandsToEmulateWriteConcern, cmdName);
@@ -80,9 +75,7 @@
// Aggregate can be either a read or a write depending on whether it has a $out stage.
// $out is required to be the last stage of the pipeline.
var stages = obj.pipeline;
- var hasOut = stages &&
- (stages.length !== 0) &&
- ('$out' in stages[stages.length - 1]);
+ var hasOut = stages && (stages.length !== 0) && ('$out' in stages[stages.length - 1]);
if (hasOut) {
emulateWriteConcern = true;
} else {
@@ -93,16 +86,18 @@
if (forceWriteConcern) {
if (obj.hasOwnProperty("writeConcern")) {
jsTestLog("Warning: overriding existing writeConcern of: " +
- tojson(obj.writeConcern));
+ tojson(obj.writeConcern));
}
obj.writeConcern = defaultWriteConcern;
} else if (forceReadConcern) {
if (obj.hasOwnProperty("readConcern")) {
jsTestLog("Warning: overriding existing readConcern of: " +
- tojson(obj.readConcern));
+ tojson(obj.readConcern));
}
- obj.readConcern = {level: "majority"};
+ obj.readConcern = {
+ level: "majority"
+ };
}
var res = this.getMongo().runCommand(dbName, obj, options);
@@ -123,4 +118,3 @@
};
})();
-
diff --git a/jstests/libs/override_methods/sharding_continuous_config_stepdown.js b/jstests/libs/override_methods/sharding_continuous_config_stepdown.js
index b2f9b6d1c79..e111886f1b9 100644
--- a/jstests/libs/override_methods/sharding_continuous_config_stepdown.js
+++ b/jstests/libs/override_methods/sharding_continuous_config_stepdown.js
@@ -8,202 +8,223 @@ load('jstests/libs/parallelTester.js');
load("jstests/replsets/rslib.js");
(function() {
-'use strict';
+ 'use strict';
-// Preserve the original ReplSetTest and ShardingTest constructors, because we are overriding them
-var originalReplSetTest = ReplSetTest;
-var originalShardingTest = ShardingTest;
-
-/**
- * Overrides the ReplSetTest constructor to start the continuous config server stepdown thread.
- */
-ReplSetTest = function ReplSetTestWithContinuousPrimaryStepdown() {
- // Construct the original object
- originalReplSetTest.apply(this, arguments);
+ // Preserve the original ReplSetTest and ShardingTest constructors, because we are overriding
+ // them
+ var originalReplSetTest = ReplSetTest;
+ var originalShardingTest = ShardingTest;
/**
- * This function is intended to be called in a separate thread and it continuously steps down
- * the current primary for a number of attempts.
- *
- * @param {string} seedNode The connection string of a node from which to discover the primary
- * of the replica set.
- * @param {CountDownLatch} stopCounter Object, which can be used to stop the thread.
- *
- * @return Object with the following fields:
- * ok {integer}: 0 if it failed, 1 if it succeeded.
- * error {string}: Only present if ok == 0. Contains the cause for the error.
- * stack {string}: Only present if ok == 0. Contains the stack at the time of the error.
+ * Overrides the ReplSetTest constructor to start the continuous config server stepdown thread.
*/
- function _continuousPrimaryStepdownFn(seedNode, stopCounter) {
- 'use strict';
-
- var stepdownDelaySeconds = 10;
-
- print('*** Continuous stepdown thread running with seed node ' + seedNode);
-
- // The config primary may unexpectedly step down during startup if under heavy load and
- // too slowly processing heartbeats. When it steps down, it closes all of its connections.
- // This can happen during the call to new ReplSetTest, so in order to account for this and
- // make the tests stable, retry discovery of the replica set's configuration once
- // (SERVER-22794).
- var replSet;
- var networkErrorRetries = 1;
- while (networkErrorRetries >= 0) {
- try {
- replSet = new ReplSetTest(seedNode);
- break;
- } catch (e) {
- if ( ((networkErrorRetries--) > 0) &&
- (e.toString().indexOf("network error") > -1) ) {
- print("Error: " + e.toString() + "\nStacktrace: " + e.stack);
- print("Stepdown thread's config server connection was closed, retrying.");
- } else {
- print('*** Continuous stepdown thread failed to connect to the ' +
- 'config server: ' + tojson(e));
- return { ok: 0, error: e.toString(), stack: e.stack };
+ ReplSetTest = function ReplSetTestWithContinuousPrimaryStepdown() {
+ // Construct the original object
+ originalReplSetTest.apply(this, arguments);
+
+ /**
+ * This function is intended to be called in a separate thread and it continuously steps
+ *down
+ * the current primary for a number of attempts.
+ *
+ * @param {string} seedNode The connection string of a node from which to discover the
+ *primary
+ * of the replica set.
+ * @param {CountDownLatch} stopCounter Object, which can be used to stop the thread.
+ *
+ * @return Object with the following fields:
+ * ok {integer}: 0 if it failed, 1 if it succeeded.
+ * error {string}: Only present if ok == 0. Contains the cause for the error.
+ * stack {string}: Only present if ok == 0. Contains the stack at the time of the
+ *error.
+ */
+ function _continuousPrimaryStepdownFn(seedNode, stopCounter) {
+ 'use strict';
+
+ var stepdownDelaySeconds = 10;
+
+ print('*** Continuous stepdown thread running with seed node ' + seedNode);
+
+ // The config primary may unexpectedly step down during startup if under heavy load and
+ // too slowly processing heartbeats. When it steps down, it closes all of its
+ // connections.
+ // This can happen during the call to new ReplSetTest, so in order to account for this
+ // and
+ // make the tests stable, retry discovery of the replica set's configuration once
+ // (SERVER-22794).
+ var replSet;
+ var networkErrorRetries = 1;
+ while (networkErrorRetries >= 0) {
+ try {
+ replSet = new ReplSetTest(seedNode);
+ break;
+ } catch (e) {
+ if (((networkErrorRetries--) > 0) &&
+ (e.toString().indexOf("network error") > -1)) {
+ print("Error: " + e.toString() + "\nStacktrace: " + e.stack);
+ print("Stepdown thread's config server connection was closed, retrying.");
+ } else {
+ print('*** Continuous stepdown thread failed to connect to the ' +
+ 'config server: ' + tojson(e));
+ return {
+ ok: 0,
+ error: e.toString(),
+ stack: e.stack
+ };
+ }
}
}
- }
- try {
- var primary = replSet.getPrimary();
+ try {
+ var primary = replSet.getPrimary();
- while (stopCounter.getCount() > 0) {
- print('*** Stepping down ' + primary);
+ while (stopCounter.getCount() > 0) {
+ print('*** Stepping down ' + primary);
- assert.throws(function() {
- var result = primary.adminCommand({
- replSetStepDown: stepdownDelaySeconds,
- force: true });
- print('replSetStepDown command did not throw and returned: ' + tojson(result));
+ assert.throws(function() {
+ var result = primary.adminCommand(
+ {replSetStepDown: stepdownDelaySeconds, force: true});
+ print('replSetStepDown command did not throw and returned: ' +
+ tojson(result));
- // The call to replSetStepDown should never succeed
- assert.commandWorked(result);
- });
+ // The call to replSetStepDown should never succeed
+ assert.commandWorked(result);
+ });
- // Wait for primary to get elected and allow the test to make some progress before
- // attempting another stepdown.
- if (stopCounter.getCount() > 0)
- primary = replSet.getPrimary();
+ // Wait for primary to get elected and allow the test to make some progress
+ // before
+ // attempting another stepdown.
+ if (stopCounter.getCount() > 0)
+ primary = replSet.getPrimary();
- if (stopCounter.getCount() > 0)
- sleep(8000);
- }
+ if (stopCounter.getCount() > 0)
+ sleep(8000);
+ }
- print('*** Continuous stepdown thread completed successfully');
- return { ok: 1 };
- }
- catch (e) {
- print('*** Continuous stepdown thread caught exception: ' + tojson(e));
- return { ok: 0, error: e.toString(), stack: e.stack };
+ print('*** Continuous stepdown thread completed successfully');
+ return {
+ ok: 1
+ };
+ } catch (e) {
+ print('*** Continuous stepdown thread caught exception: ' + tojson(e));
+ return {
+ ok: 0,
+ error: e.toString(),
+ stack: e.stack
+ };
+ }
}
- }
- // Preserve the original stopSet method, because we are overriding it to stop the continuous
- // stepdown thread.
- var _originalStartSetFn = this.startSet;
- var _originalStopSetFn = this.stopSet;
+ // Preserve the original stopSet method, because we are overriding it to stop the continuous
+ // stepdown thread.
+ var _originalStartSetFn = this.startSet;
+ var _originalStopSetFn = this.stopSet;
+
+ // These two manage the scoped failover thread
+ var _scopedPrimaryStepdownThread;
+ var _scopedPrimaryStepdownThreadStopCounter;
+
+ /**
+ * Overrides the startSet call so we can increase the logging verbosity
+ */
+ this.startSet = function(options) {
+ if (!options) {
+ options = {};
+ }
+ options.verbose = 2;
+ return _originalStartSetFn.call(this, options);
+ };
+
+ /**
+ * Overrides the stopSet call so it terminates the failover thread.
+ */
+ this.stopSet = function() {
+ this.stopContinuousFailover();
+ _originalStopSetFn.apply(this, arguments);
+ };
+
+ /**
+ * Spawns a thread to invoke continuousPrimaryStepdownFn. See its comments for more
+ * information.
+ */
+ this.startContinuousFailover = function() {
+ if (_scopedPrimaryStepdownThread) {
+ throw new Error('Continuous failover thread is already active');
+ }
- // These two manage the scoped failover thread
- var _scopedPrimaryStepdownThread;
- var _scopedPrimaryStepdownThreadStopCounter;
+ _scopedPrimaryStepdownThreadStopCounter = new CountDownLatch(1);
+ _scopedPrimaryStepdownThread =
+ new ScopedThread(_continuousPrimaryStepdownFn,
+ this.nodes[0].host,
+ _scopedPrimaryStepdownThreadStopCounter);
+ _scopedPrimaryStepdownThread.start();
+ };
+
+ /**
+ * Blocking method, which tells the thread running continuousPrimaryStepdownFn to stop and
+ * waits
+ * for it to terminate.
+ */
+ this.stopContinuousFailover = function() {
+ if (!_scopedPrimaryStepdownThread) {
+ return;
+ }
- /**
- * Overrides the startSet call so we can increase the logging verbosity
- */
- this.startSet = function(options) {
- if (!options) {
- options = {};
- }
- options.verbose = 2;
- return _originalStartSetFn.call(this, options);
- };
+ _scopedPrimaryStepdownThreadStopCounter.countDown();
+ _scopedPrimaryStepdownThreadStopCounter = null;
- /**
- * Overrides the stopSet call so it terminates the failover thread.
- */
- this.stopSet = function() {
- this.stopContinuousFailover();
- _originalStopSetFn.apply(this, arguments);
- };
+ _scopedPrimaryStepdownThread.join();
- /**
- * Spawns a thread to invoke continuousPrimaryStepdownFn. See its comments for more information.
- */
- this.startContinuousFailover = function() {
- if (_scopedPrimaryStepdownThread) {
- throw new Error('Continuous failover thread is already active');
- }
+ var retVal = _scopedPrimaryStepdownThread.returnData();
+ _scopedPrimaryStepdownThread = null;
- _scopedPrimaryStepdownThreadStopCounter = new CountDownLatch(1);
- _scopedPrimaryStepdownThread = new ScopedThread(_continuousPrimaryStepdownFn,
- this.nodes[0].host,
- _scopedPrimaryStepdownThreadStopCounter);
- _scopedPrimaryStepdownThread.start();
+ return assert.commandWorked(retVal);
+ };
};
+ Object.extend(ReplSetTest, originalReplSetTest);
+
/**
- * Blocking method, which tells the thread running continuousPrimaryStepdownFn to stop and waits
- * for it to terminate.
+ * Overrides the ShardingTest constructor to start the continuous config server stepdown thread.
*/
- this.stopContinuousFailover = function() {
- if (!_scopedPrimaryStepdownThread) {
- return;
+ ShardingTest = function ShardingTestWithContinuousConfigPrimaryStepdown() {
+ if (!arguments[0].other) {
+ arguments[0].other = {};
+ }
+ arguments[0].verbose = 2;
+
+ // Set electionTimeoutMillis to 5 seconds, from 10, so that chunk migrations don't
+ // time out because of the CSRS primary being down so often for so long.
+ arguments[0].configReplSetTestOptions = Object.merge(arguments[0].configReplSetTestOptions,
+ {
+ settings: {
+ electionTimeoutMillis: 5000,
+ },
+ });
+
+ // Construct the original object
+ originalShardingTest.apply(this, arguments);
+
+ if (!this.configRS) {
+ throw new Error('Continuous config server step down only available with CSRS');
}
- _scopedPrimaryStepdownThreadStopCounter.countDown();
- _scopedPrimaryStepdownThreadStopCounter = null;
-
- _scopedPrimaryStepdownThread.join();
-
- var retVal = _scopedPrimaryStepdownThread.returnData();
- _scopedPrimaryStepdownThread = null;
-
- return assert.commandWorked(retVal);
- };
-};
-
-Object.extend(ReplSetTest, originalReplSetTest);
+ /**
+ * This method is disabled because it runs aggregation, which doesn't handle config server
+ * stepdown correctly.
+ */
+ this.printShardingStatus = function() {
-/**
- * Overrides the ShardingTest constructor to start the continuous config server stepdown thread.
- */
-ShardingTest = function ShardingTestWithContinuousConfigPrimaryStepdown() {
- if (!arguments[0].other) {
- arguments[0].other = {};
- }
- arguments[0].verbose = 2;
-
- // Set electionTimeoutMillis to 5 seconds, from 10, so that chunk migrations don't
- // time out because of the CSRS primary being down so often for so long.
- arguments[0].configReplSetTestOptions = Object.merge(arguments[0].configReplSetTestOptions, {
- settings: {
- electionTimeoutMillis: 5000,
- },
- });
-
- // Construct the original object
- originalShardingTest.apply(this, arguments);
-
- if (!this.configRS) {
- throw new Error('Continuous config server step down only available with CSRS');
- }
+ };
- /**
- * This method is disabled because it runs aggregation, which doesn't handle config server
- * stepdown correctly.
- */
- this.printShardingStatus = function() {
+ assert.eq(this.configRS.getReplSetConfigFromNode().settings.electionTimeoutMillis,
+ 5000,
+ "Failed to set the electionTimeoutMillis to 5000 milliseconds");
+ // Start the continuous config server stepdown thread
+ this.configRS.startContinuousFailover();
};
- assert.eq(this.configRS.getReplSetConfigFromNode().settings.electionTimeoutMillis, 5000,
- "Failed to set the electionTimeoutMillis to 5000 milliseconds");
-
- // Start the continuous config server stepdown thread
- this.configRS.startContinuousFailover();
-};
-
-Object.extend(ShardingTest, originalShardingTest);
+ Object.extend(ShardingTest, originalShardingTest);
})();
diff --git a/jstests/libs/parallelTester.js b/jstests/libs/parallelTester.js
index 0b230727cef..3639ab84bc1 100644
--- a/jstests/libs/parallelTester.js
+++ b/jstests/libs/parallelTester.js
@@ -1,225 +1,226 @@
/**
- * The ParallelTester class is used to test more than one test concurrently
+ * The ParallelTester class is used to test more than one test concurrently
*/
if (typeof _threadInject != "undefined") {
- Thread = function(){
- this.init.apply( this, arguments );
+ Thread = function() {
+ this.init.apply(this, arguments);
};
- _threadInject( Thread.prototype );
+ _threadInject(Thread.prototype);
ScopedThread = function() {
- this.init.apply( this, arguments );
+ this.init.apply(this, arguments);
};
- ScopedThread.prototype = new Thread( function() {} );
- _scopedThreadInject( ScopedThread.prototype );
+ ScopedThread.prototype = new Thread(function() {});
+ _scopedThreadInject(ScopedThread.prototype);
fork = function() {
- var t = new Thread( function() {} );
- Thread.apply( t, arguments );
+ var t = new Thread(function() {});
+ Thread.apply(t, arguments);
return t;
- };
+ };
// Helper class to generate a list of events which may be executed by a ParallelTester
- EventGenerator = function( me, collectionName, mean, host ) {
+ EventGenerator = function(me, collectionName, mean, host) {
this.mean = mean;
- if (host == undefined) host = db.getMongo().host;
- this.events = new Array( me, collectionName, host );
+ if (host == undefined)
+ host = db.getMongo().host;
+ this.events = new Array(me, collectionName, host);
};
- EventGenerator.prototype._add = function( action ) {
- this.events.push( [ Random.genExp( this.mean ), action ] );
+ EventGenerator.prototype._add = function(action) {
+ this.events.push([Random.genExp(this.mean), action]);
};
-
- EventGenerator.prototype.addInsert = function( obj ) {
- this._add( "t.insert( " + tojson( obj ) + " )" );
+
+ EventGenerator.prototype.addInsert = function(obj) {
+ this._add("t.insert( " + tojson(obj) + " )");
};
- EventGenerator.prototype.addRemove = function( obj ) {
- this._add( "t.remove( " + tojson( obj ) + " )" );
+ EventGenerator.prototype.addRemove = function(obj) {
+ this._add("t.remove( " + tojson(obj) + " )");
};
- EventGenerator.prototype.addUpdate = function( objOld, objNew ) {
- this._add( "t.update( " + tojson( objOld ) + ", " + tojson( objNew ) + " )" );
+ EventGenerator.prototype.addUpdate = function(objOld, objNew) {
+ this._add("t.update( " + tojson(objOld) + ", " + tojson(objNew) + " )");
};
-
- EventGenerator.prototype.addCheckCount = function( count, query, shouldPrint, checkQuery ) {
+
+ EventGenerator.prototype.addCheckCount = function(count, query, shouldPrint, checkQuery) {
query = query || {};
shouldPrint = shouldPrint || false;
checkQuery = checkQuery || false;
- var action = "assert.eq( " + count + ", t.count( " + tojson( query ) + " ) );";
- if ( checkQuery ) {
- action += " assert.eq( " + count + ", t.find( " + tojson( query ) + " ).toArray().length );";
+ var action = "assert.eq( " + count + ", t.count( " + tojson(query) + " ) );";
+ if (checkQuery) {
+ action +=
+ " assert.eq( " + count + ", t.find( " + tojson(query) + " ).toArray().length );";
}
- if ( shouldPrint ) {
+ if (shouldPrint) {
action += " print( me + ' ' + " + count + " );";
}
- this._add( action );
+ this._add(action);
};
-
+
EventGenerator.prototype.getEvents = function() {
return this.events;
};
-
+
EventGenerator.dispatch = function() {
var args = Array.from(arguments);
var me = args.shift();
var collectionName = args.shift();
var host = args.shift();
- var m = new Mongo( host );
- var t = m.getDB( "test" )[ collectionName ];
- for( var i in args ) {
- sleep( args[ i ][ 0 ] );
- eval( args[ i ][ 1 ] );
+ var m = new Mongo(host);
+ var t = m.getDB("test")[collectionName];
+ for (var i in args) {
+ sleep(args[i][0]);
+ eval(args[i][1]);
}
};
-
+
// Helper class for running tests in parallel. It assembles a set of tests
// and then calls assert.parallelests to run them.
ParallelTester = function() {
assert.neq(db.getMongo().writeMode(), "legacy", "wrong shell write mode");
this.params = new Array();
};
-
- ParallelTester.prototype.add = function( fun, args ) {
+
+ ParallelTester.prototype.add = function(fun, args) {
args = args || [];
- args.unshift( fun );
- this.params.push( args );
+ args.unshift(fun);
+ this.params.push(args);
};
-
- ParallelTester.prototype.run = function( msg, newScopes ) {
+
+ ParallelTester.prototype.run = function(msg, newScopes) {
newScopes = newScopes || false;
- assert.parallelTests( this.params, msg, newScopes );
+ assert.parallelTests(this.params, msg, newScopes);
};
-
+
// creates lists of tests from jstests dir in a format suitable for use by
// ParallelTester.fileTester. The lists will be in random order.
// n: number of lists to split these tests into
- ParallelTester.createJstestsLists = function( n ) {
+ ParallelTester.createJstestsLists = function(n) {
var params = new Array();
- for( var i = 0; i < n; ++i ) {
- params.push( [] );
+ for (var i = 0; i < n; ++i) {
+ params.push([]);
}
- var makeKeys = function( a ) {
+ var makeKeys = function(a) {
var ret = {};
- for( var i in a ) {
- ret[ a[ i ] ] = 1;
+ for (var i in a) {
+ ret[a[i]] = 1;
}
return ret;
};
-
+
// some tests can't run in parallel with most others
- var skipTests = makeKeys([ "repair.js",
- "cursor8.js",
- "recstore.js",
- "extent.js",
- "indexb.js",
-
- // Tests that set a parameter that causes the server to ignore
- // long index keys.
- "index_bigkeys_nofail.js",
- "index_bigkeys_validation.js",
-
- // tests turn on profiling
- "profile1.js",
- "profile3.js",
- "profile4.js",
- "profile5.js",
- "geo_s2cursorlimitskip.js",
-
- "mr_drop.js",
- "mr3.js",
- "indexh.js",
- "apitest_db.js",
- "evalb.js",
- "evald.js",
- "evalf.js",
- "killop.js",
- "run_program1.js",
- "notablescan.js",
- "drop2.js",
- "dropdb_race.js",
- "fsync2.js", // May be placed in serialTestsArr once SERVER-4243 is fixed.
- "bench_test1.js",
- "padding.js",
- "queryoptimizera.js",
- "loglong.js",// log might overflow before
- // this has a chance to see the message
- "connections_opened.js", // counts connections, globally
- "opcounters_write_cmd.js",
- "currentop.js", // SERVER-8673, plus rwlock yielding issues
- "set_param1.js", // changes global state
- "geo_update_btree2.js", // SERVER-11132 test disables table scans
- "update_setOnInsert.js", // SERVER-9982
- "max_time_ms.js", // Sensitive to query execution time, by design
- "collection_info_cache_race.js", // Requires collection exists
-
- // This overwrites MinKey/MaxKey's singleton which breaks
- // any other test that uses MinKey/MaxKey
- "type6.js",
-
- // Assumes that other tests are not creating cursors.
- "kill_cursors.js",
- ] );
-
+ var skipTests = makeKeys([
+ "repair.js",
+ "cursor8.js",
+ "recstore.js",
+ "extent.js",
+ "indexb.js",
+
+ // Tests that set a parameter that causes the server to ignore
+ // long index keys.
+ "index_bigkeys_nofail.js",
+ "index_bigkeys_validation.js",
+
+ // tests turn on profiling
+ "profile1.js",
+ "profile3.js",
+ "profile4.js",
+ "profile5.js",
+ "geo_s2cursorlimitskip.js",
+
+ "mr_drop.js",
+ "mr3.js",
+ "indexh.js",
+ "apitest_db.js",
+ "evalb.js",
+ "evald.js",
+ "evalf.js",
+ "killop.js",
+ "run_program1.js",
+ "notablescan.js",
+ "drop2.js",
+ "dropdb_race.js",
+ "fsync2.js", // May be placed in serialTestsArr once SERVER-4243 is fixed.
+ "bench_test1.js",
+ "padding.js",
+ "queryoptimizera.js",
+ "loglong.js", // log might overflow before
+ // this has a chance to see the message
+ "connections_opened.js", // counts connections, globally
+ "opcounters_write_cmd.js",
+ "currentop.js", // SERVER-8673, plus rwlock yielding issues
+ "set_param1.js", // changes global state
+ "geo_update_btree2.js", // SERVER-11132 test disables table scans
+ "update_setOnInsert.js", // SERVER-9982
+ "max_time_ms.js", // Sensitive to query execution time, by design
+ "collection_info_cache_race.js", // Requires collection exists
+
+ // This overwrites MinKey/MaxKey's singleton which breaks
+ // any other test that uses MinKey/MaxKey
+ "type6.js",
+
+ // Assumes that other tests are not creating cursors.
+ "kill_cursors.js",
+ ]);
+
var parallelFilesDir = "jstests/core";
-
+
// some tests can't be run in parallel with each other
- var serialTestsArr = [ parallelFilesDir + "/fsync.js",
- parallelFilesDir + "/auth1.js",
-
- // These tests expect the profiler to be on or off at specific points
- // during the test run.
- parallelFilesDir + "/cursor6.js",
- parallelFilesDir + "/profile2.js",
- parallelFilesDir + "/updatee.js"
- ];
- var serialTests = makeKeys( serialTestsArr );
-
- // prefix the first thread with the serialTests
+ var serialTestsArr = [
+ parallelFilesDir + "/fsync.js",
+ parallelFilesDir + "/auth1.js",
+
+ // These tests expect the profiler to be on or off at specific points
+ // during the test run.
+ parallelFilesDir + "/cursor6.js",
+ parallelFilesDir + "/profile2.js",
+ parallelFilesDir + "/updatee.js"
+ ];
+ var serialTests = makeKeys(serialTestsArr);
+
+ // prefix the first thread with the serialTests
// (which we will exclude from the rest of the threads below)
- params[ 0 ] = serialTestsArr;
- var files = listFiles( parallelFilesDir );
- files = Array.shuffle( files );
-
+ params[0] = serialTestsArr;
+ var files = listFiles(parallelFilesDir);
+ files = Array.shuffle(files);
+
var i = 0;
- files.forEach(
- function(x) {
- if ( ( /[\/\\]_/.test(x.name) ) ||
- ( ! /\.js$/.test(x.name) ) ||
- ( x.name.match(parallelFilesDir + "/(.*\.js)")[1] in skipTests ) || //
- ( x.name in serialTests )) {
- print(" >>>>>>>>>>>>>>> skipping " + x.name);
- return;
- }
- // add the test to run in one of the threads.
- params[ i % n ].push( x.name );
- ++i;
- }
- );
+ files.forEach(function(x) {
+ if ((/[\/\\]_/.test(x.name)) || (!/\.js$/.test(x.name)) ||
+ (x.name.match(parallelFilesDir + "/(.*\.js)")[1] in skipTests) || //
+ (x.name in serialTests)) {
+ print(" >>>>>>>>>>>>>>> skipping " + x.name);
+ return;
+ }
+ // add the test to run in one of the threads.
+ params[i % n].push(x.name);
+ ++i;
+ });
// randomize ordering of the serialTests
- params[ 0 ] = Array.shuffle( params[ 0 ] );
+ params[0] = Array.shuffle(params[0]);
- for( var i in params ) {
- params[ i ].unshift( i );
+ for (var i in params) {
+ params[i].unshift(i);
}
return params;
};
-
+
// runs a set of test files
// first argument is an identifier for this tester, remaining arguments are file names
ParallelTester.fileTester = function() {
var args = Array.from(arguments);
var suite = args.shift();
- args.forEach(
- function( x ) {
- print(" S" + suite + " Test : " + x + " ...");
- var time = Date.timeFunc( function() { load(x); }, 1);
- print(" S" + suite + " Test : " + x + " " + time + "ms" );
- }
- );
+ args.forEach(function(x) {
+ print(" S" + suite + " Test : " + x + " ...");
+ var time = Date.timeFunc(function() {
+ load(x);
+ }, 1);
+ print(" S" + suite + " Test : " + x + " " + time + "ms");
+ });
};
// params: array of arrays, each element of which consists of a function followed
@@ -227,52 +228,49 @@ if (typeof _threadInject != "undefined") {
// be called in a separate thread.
// msg: failure message
// newScopes: if true, each thread starts in a fresh scope
- assert.parallelTests = function( params, msg, newScopes ) {
+ assert.parallelTests = function(params, msg, newScopes) {
newScopes = newScopes || false;
- var wrapper = function( fun, argv ) {
- eval (
- "var z = function() {" +
- "TestData = " + tojson(TestData) + ";" +
- "var __parallelTests__fun = " + fun.toString() + ";" +
- "var __parallelTests__argv = " + tojson( argv ) + ";" +
- "var __parallelTests__passed = false;" +
- "try {" +
- "__parallelTests__fun.apply( 0, __parallelTests__argv );" +
- "__parallelTests__passed = true;" +
- "} catch ( e ) {" +
- "print('');" +
- "print( '********** Parallel Test FAILED: ' + tojson(e) );" +
- "print('');" +
- "}" +
- "return __parallelTests__passed;" +
- "}"
- );
+ var wrapper = function(fun, argv) {
+ eval("var z = function() {" + "TestData = " + tojson(TestData) + ";" +
+ "var __parallelTests__fun = " + fun.toString() + ";" +
+ "var __parallelTests__argv = " + tojson(argv) + ";" +
+ "var __parallelTests__passed = false;" + "try {" +
+ "__parallelTests__fun.apply( 0, __parallelTests__argv );" +
+ "__parallelTests__passed = true;" + "} catch ( e ) {" + "print('');" +
+ "print( '********** Parallel Test FAILED: ' + tojson(e) );" + "print('');" + "}" +
+ "return __parallelTests__passed;" + "}");
return z;
};
var runners = new Array();
- for( var i in params ) {
- var param = params[ i ];
+ for (var i in params) {
+ var param = params[i];
var test = param.shift();
var t;
- if ( newScopes )
- t = new ScopedThread( wrapper( test, param ) );
+ if (newScopes)
+ t = new ScopedThread(wrapper(test, param));
else
- t = new Thread( wrapper( test, param ) );
- runners.push( t );
+ t = new Thread(wrapper(test, param));
+ runners.push(t);
}
-
- runners.forEach( function( x ) { x.start(); } );
+
+ runners.forEach(function(x) {
+ x.start();
+ });
var nFailed = 0;
// SpiderMonkey doesn't like it if we exit before all threads are joined
// (see SERVER-19615 for a similar issue).
- runners.forEach( function( x ) { if( !x.returnData() ) { ++nFailed; } } );
- assert.eq( 0, nFailed, msg );
+ runners.forEach(function(x) {
+ if (!x.returnData()) {
+ ++nFailed;
+ }
+ });
+ assert.eq(0, nFailed, msg);
};
}
-if ( typeof CountDownLatch !== 'undefined' ) {
+if (typeof CountDownLatch !== 'undefined') {
CountDownLatch = Object.extend(function(count) {
- if (! (this instanceof CountDownLatch)) {
+ if (!(this instanceof CountDownLatch)) {
return new CountDownLatch(count);
}
this._descriptor = CountDownLatch._new.apply(null, arguments);
diff --git a/jstests/libs/slow_weekly_util.js b/jstests/libs/slow_weekly_util.js
index ba0c6a78229..7bc60bfcd61 100644
--- a/jstests/libs/slow_weekly_util.js
+++ b/jstests/libs/slow_weekly_util.js
@@ -1,5 +1,5 @@
-SlowWeeklyMongod = function( name ) {
+SlowWeeklyMongod = function(name) {
this.name = name;
this.start = new Date();
@@ -7,13 +7,13 @@ SlowWeeklyMongod = function( name ) {
this.port = this.conn.port;
};
-SlowWeeklyMongod.prototype.getDB = function( name ) {
- return this.conn.getDB( name );
+SlowWeeklyMongod.prototype.getDB = function(name) {
+ return this.conn.getDB(name);
};
-SlowWeeklyMongod.prototype.stop = function(){
- MongoRunner.stopMongod( this.conn );
+SlowWeeklyMongod.prototype.stop = function() {
+ MongoRunner.stopMongod(this.conn);
var end = new Date();
- print( "slowWeekly test: " + this.name + " completed successfully in " + ( ( end.getTime() - this.start.getTime() ) / 1000 ) + " seconds" );
+ print("slowWeekly test: " + this.name + " completed successfully in " +
+ ((end.getTime() - this.start.getTime()) / 1000) + " seconds");
};
-
diff --git a/jstests/libs/ssl_test.js b/jstests/libs/ssl_test.js
index 59380776bf8..11874ebc01e 100644
--- a/jstests/libs/ssl_test.js
+++ b/jstests/libs/ssl_test.js
@@ -16,7 +16,6 @@
* "jstests/libs/ca.pem").
*/
function SSLTest(serverOpts, clientOpts) {
-
var canonicalServerOpts = function(userProvidedOpts) {
var canonical = Object.extend({}, userProvidedOpts || {});
@@ -51,14 +50,14 @@ SSLTest.prototype.defaultSSLClientOptions = {
"ssl": "",
"sslPEMKeyFile": "jstests/libs/client.pem",
"sslAllowInvalidCertificates": "",
- "eval": ";" // prevent the shell from entering interactive mode
+ "eval": ";" // prevent the shell from entering interactive mode
};
/**
* The default shell arguments for a shell without SSL enabled.
*/
SSLTest.prototype.noSSLClientOptions = {
- eval: ";" // prevent the shell from entering interactive mode
+ eval: ";" // prevent the shell from entering interactive mode
};
/**
@@ -75,8 +74,7 @@ SSLTest.prototype.connectWorked = function() {
var serverPID = _startMongoProgram.apply(null, serverArgv);
try {
assert.soon(function() {
- return checkProgram(serverPID) &&
- (0 === _runMongoProgram.apply(null, clientArgv));
+ return checkProgram(serverPID) && (0 === _runMongoProgram.apply(null, clientArgv));
}, "connect failed", connectTimeoutMillis);
} catch (ex) {
return false;
diff --git a/jstests/libs/test_background_ops.js b/jstests/libs/test_background_ops.js
index 7adf41e9780..7c8ebc16883 100644
--- a/jstests/libs/test_background_ops.js
+++ b/jstests/libs/test_background_ops.js
@@ -5,340 +5,331 @@
/**
* Allows synchronization between background ops and the test operations
*/
-var waitForLock = function( mongo, name ){
-
+var waitForLock = function(mongo, name) {
+
var ts = new ObjectId();
- var lockColl = mongo.getCollection( "config.testLocks" );
-
- lockColl.update({ _id : name, state : 0 }, { $set : { state : 0 } }, true);
-
+ var lockColl = mongo.getCollection("config.testLocks");
+
+ lockColl.update({_id: name, state: 0}, {$set: {state: 0}}, true);
+
//
// Wait until we can set the state to 1 with our id
//
-
+
var startTime = new Date().getTime();
-
- assert.soon( function() {
- lockColl.update({ _id : name, state : 0 }, { $set : { ts : ts, state : 1 } });
+
+ assert.soon(function() {
+ lockColl.update({_id: name, state: 0}, {$set: {ts: ts, state: 1}});
var gleObj = lockColl.getDB().getLastErrorObj();
-
- if( new Date().getTime() - startTime > 20 * 1000 ){
- print( "Waiting for..." );
- printjson( gleObj );
- printjson( lockColl.findOne() );
- printjson( ts );
+
+ if (new Date().getTime() - startTime > 20 * 1000) {
+ print("Waiting for...");
+ printjson(gleObj);
+ printjson(lockColl.findOne());
+ printjson(ts);
}
-
+
return gleObj.n == 1 || gleObj.updatedExisting;
- }, "could not acquire lock", 30 * 1000, 100 );
-
- print( "Acquired lock " + tojson( { _id : name, ts : ts } ) + " curr : " +
- tojson( lockColl.findOne({ _id : name }) ) );
-
+ }, "could not acquire lock", 30 * 1000, 100);
+
+ print("Acquired lock " + tojson({_id: name, ts: ts}) + " curr : " +
+ tojson(lockColl.findOne({_id: name})));
+
// Set the state back to 0
- var unlock = function(){
- print( "Releasing lock " + tojson( { _id : name, ts : ts } ) + " curr : " +
- tojson( lockColl.findOne({ _id : name }) ) );
- lockColl.update({ _id : name, ts : ts }, { $set : { state : 0 } });
+ var unlock = function() {
+ print("Releasing lock " + tojson({_id: name, ts: ts}) + " curr : " +
+ tojson(lockColl.findOne({_id: name})));
+ lockColl.update({_id: name, ts: ts}, {$set: {state: 0}});
};
-
+
// Return an object we can invoke unlock on
- return { unlock : unlock };
+ return {
+ unlock: unlock
+ };
};
/**
* Allows a test or background op to say it's finished
*/
-var setFinished = function( mongo, name, finished ){
- if( finished || finished == undefined )
- mongo.getCollection( "config.testFinished" ).update({ _id : name }, { _id : name }, true );
+var setFinished = function(mongo, name, finished) {
+ if (finished || finished == undefined)
+ mongo.getCollection("config.testFinished").update({_id: name}, {_id: name}, true);
else
- mongo.getCollection( "config.testFinished" ).remove({ _id : name });
+ mongo.getCollection("config.testFinished").remove({_id: name});
};
/**
* Checks whether a test or background op is finished
*/
-var isFinished = function( mongo, name ){
- return mongo.getCollection( "config.testFinished" ).findOne({ _id : name }) != null;
+var isFinished = function(mongo, name) {
+ return mongo.getCollection("config.testFinished").findOne({_id: name}) != null;
};
/**
* Sets the result of a background op
*/
-var setResult = function( mongo, name, result, err ){
- mongo.getCollection( "config.testResult" ).update({ _id : name }, { _id : name, result : result, err : err }, true );
+var setResult = function(mongo, name, result, err) {
+ mongo.getCollection("config.testResult")
+ .update({_id: name}, {_id: name, result: result, err: err}, true);
};
/**
* Gets the result for a background op
*/
-var getResult = function( mongo, name ){
- return mongo.getCollection( "config.testResult" ).findOne({ _id : name });
+var getResult = function(mongo, name) {
+ return mongo.getCollection("config.testResult").findOne({_id: name});
};
/**
* Overrides the parallel shell code in mongo
*/
-function startParallelShell( jsCode, port ){
-
+function startParallelShell(jsCode, port) {
var x;
- if ( port ) {
- x = startMongoProgramNoConnect( "mongo" , "--port" , port , "--eval" , jsCode );
+ if (port) {
+ x = startMongoProgramNoConnect("mongo", "--port", port, "--eval", jsCode);
} else {
- x = startMongoProgramNoConnect( "mongo" , "--eval" , jsCode , db ? db.getMongo().host : null );
+ x = startMongoProgramNoConnect("mongo", "--eval", jsCode, db ? db.getMongo().host : null);
}
-
- return function(){
- jsTestLog( "Waiting for shell " + x + "..." );
- waitProgram( x );
- jsTestLog( "Shell " + x + " finished." );
+
+ return function() {
+ jsTestLog("Waiting for shell " + x + "...");
+ waitProgram(x);
+ jsTestLog("Shell " + x + " finished.");
};
}
-startParallelOps = function( mongo, proc, args, context ){
-
+startParallelOps = function(mongo, proc, args, context) {
+
var procName = proc.name + "-" + new ObjectId();
- var seed = new ObjectId( new ObjectId().valueOf().split("").reverse().join("") )
- .getTimestamp().getTime();
-
+ var seed = new ObjectId(new ObjectId().valueOf().split("").reverse().join(""))
+ .getTimestamp()
+ .getTime();
+
// Make sure we aren't finished before we start
- setFinished( mongo, procName, false );
- setResult( mongo, procName, undefined, undefined );
-
+ setFinished(mongo, procName, false);
+ setResult(mongo, procName, undefined, undefined);
+
// TODO: Make this a context of its own
- var procContext = { procName : procName,
- seed : seed,
- waitForLock : waitForLock,
- setFinished : setFinished,
- isFinished : isFinished,
- setResult : setResult,
-
- setup : function( context, stored ){
-
- waitForLock = function(){
- return context.waitForLock( db.getMongo(), context.procName );
- };
- setFinished = function( finished ){
- return context.setFinished( db.getMongo(), context.procName, finished );
- };
- isFinished = function(){
- return context.isFinished( db.getMongo(), context.procName );
- };
- setResult = function( result, err ){
- return context.setResult( db.getMongo(), context.procName, result, err );
- };
- }};
-
- var bootstrapper = function( stored ){
-
- var procContext = stored.procContext;
- procContext.setup( procContext, stored );
-
- var contexts = stored.contexts;
- eval( "contexts = " + contexts );
-
- for( var i = 0; i < contexts.length; i++ ){
- if( typeof( contexts[i] ) != "undefined" ){
+ var procContext = {
+ procName: procName,
+ seed: seed,
+ waitForLock: waitForLock,
+ setFinished: setFinished,
+ isFinished: isFinished,
+ setResult: setResult,
+
+ setup: function(context, stored) {
+
+ waitForLock = function() {
+ return context.waitForLock(db.getMongo(), context.procName);
+ };
+ setFinished = function(finished) {
+ return context.setFinished(db.getMongo(), context.procName, finished);
+ };
+ isFinished = function() {
+ return context.isFinished(db.getMongo(), context.procName);
+ };
+ setResult = function(result, err) {
+ return context.setResult(db.getMongo(), context.procName, result, err);
+ };
+ }
+ };
+
+ var bootstrapper = function(stored) {
+
+ var procContext = stored.procContext;
+ procContext.setup(procContext, stored);
+
+ var contexts = stored.contexts;
+ eval("contexts = " + contexts);
+
+ for (var i = 0; i < contexts.length; i++) {
+ if (typeof(contexts[i]) != "undefined") {
// Evaluate all contexts
- contexts[i]( procContext );
+ contexts[i](procContext);
}
}
-
+
var operation = stored.operation;
- eval( "operation = " + operation );
-
+ eval("operation = " + operation);
+
var args = stored.args;
- eval( "args = " + args );
-
+ eval("args = " + args);
+
result = undefined;
err = undefined;
-
- try{
- result = operation.apply( null, args );
- }
- catch( e ){
- err = e;
+
+ try {
+ result = operation.apply(null, args);
+ } catch (e) {
+ err = e;
}
-
- setResult( result, err );
+
+ setResult(result, err);
};
-
- var contexts = [ RandomFunctionContext, context ];
-
- var testDataColl = mongo.getCollection( "config.parallelTest" );
-
- testDataColl.insert({ _id : procName,
- bootstrapper : tojson( bootstrapper ),
- operation : tojson( proc ),
- args : tojson( args ),
- procContext : procContext,
- contexts : tojson( contexts ) });
-
- assert.eq( null, testDataColl.getDB().getLastError() );
-
- var bootstrapStartup =
- "{ var procName = '" + procName + "'; " +
- "var stored = db.getMongo().getCollection( '" + testDataColl + "' )" +
- ".findOne({ _id : procName }); " +
- "var bootstrapper = stored.bootstrapper; " +
- "eval( 'bootstrapper = ' + bootstrapper ); " +
- "bootstrapper( stored ); " +
- "}";
-
+
+ var contexts = [RandomFunctionContext, context];
+
+ var testDataColl = mongo.getCollection("config.parallelTest");
+
+ testDataColl.insert({
+ _id: procName,
+ bootstrapper: tojson(bootstrapper),
+ operation: tojson(proc),
+ args: tojson(args),
+ procContext: procContext,
+ contexts: tojson(contexts)
+ });
+
+ assert.eq(null, testDataColl.getDB().getLastError());
+
+ var bootstrapStartup = "{ var procName = '" + procName + "'; " +
+ "var stored = db.getMongo().getCollection( '" + testDataColl + "' )" +
+ ".findOne({ _id : procName }); " + "var bootstrapper = stored.bootstrapper; " +
+ "eval( 'bootstrapper = ' + bootstrapper ); " + "bootstrapper( stored ); " + "}";
+
// Save the global db object if it exists, so that we can restore it after starting the parallel
// shell.
var oldDB = undefined;
if (typeof db !== 'undefined') {
oldDB = db;
}
- db = mongo.getDB( "test" );
-
- jsTest.log( "Starting " + proc.name + " operations..." );
-
- var rawJoin = startParallelShell( bootstrapStartup );
-
+ db = mongo.getDB("test");
+
+ jsTest.log("Starting " + proc.name + " operations...");
+
+ var rawJoin = startParallelShell(bootstrapStartup);
+
db = oldDB;
-
-
- var join = function(){
- setFinished( mongo, procName, true );
-
+
+ var join = function() {
+ setFinished(mongo, procName, true);
+
rawJoin();
- result = getResult( mongo, procName );
-
- assert.neq( result, null );
-
- if( result.err ) throw Error("Error in parallel ops " + procName + " : "
- + tojson( result.err ) );
-
- else return result.result;
+ result = getResult(mongo, procName);
+
+ assert.neq(result, null);
+
+ if (result.err)
+ throw Error("Error in parallel ops " + procName + " : " + tojson(result.err));
+
+ else
+ return result.result;
};
-
- join.isFinished = function(){
- return isFinished( mongo, procName );
+
+ join.isFinished = function() {
+ return isFinished(mongo, procName);
};
-
- join.setFinished = function( finished ){
- return setFinished( mongo, procName, finished );
+
+ join.setFinished = function(finished) {
+ return setFinished(mongo, procName, finished);
};
-
- join.waitForLock = function( name ){
- return waitForLock( mongo, name );
+
+ join.waitForLock = function(name) {
+ return waitForLock(mongo, name);
};
-
+
return join;
};
-var RandomFunctionContext = function( context ){
-
- Random.srand( context.seed );
-
- Random.randBool = function(){ return Random.rand() > 0.5; };
-
- Random.randInt = function( min, max ){
-
- if( max == undefined ){
+var RandomFunctionContext = function(context) {
+
+ Random.srand(context.seed);
+
+ Random.randBool = function() {
+ return Random.rand() > 0.5;
+ };
+
+ Random.randInt = function(min, max) {
+
+ if (max == undefined) {
max = min;
min = 0;
}
-
- return min + Math.floor( Random.rand() * max );
+
+ return min + Math.floor(Random.rand() * max);
};
-
- Random.randShardKey = function(){
-
- var numFields = 2; //Random.randInt(1, 3)
-
+
+ Random.randShardKey = function() {
+
+ var numFields = 2; // Random.randInt(1, 3)
+
var key = {};
- for( var i = 0; i < numFields; i++ ){
- var field = String.fromCharCode( "a".charCodeAt() + i );
- key[ field ] = 1;
+ for (var i = 0; i < numFields; i++) {
+ var field = String.fromCharCode("a".charCodeAt() + i);
+ key[field] = 1;
}
-
+
return key;
};
-
- Random.randShardKeyValue = function( shardKey ){
-
+
+ Random.randShardKeyValue = function(shardKey) {
+
var keyValue = {};
- for( field in shardKey ){
- keyValue[ field ] = Random.randInt(1, 100);
+ for (field in shardKey) {
+ keyValue[field] = Random.randInt(1, 100);
}
-
+
return keyValue;
};
-
- Random.randCluster = function(){
-
- var numShards = 2; //Random.randInt( 1, 10 )
- var rs = false; //Random.randBool()
- var st = new ShardingTest({ shards : numShards,
- mongos : 4,
- other : { rs : rs } });
-
+
+ Random.randCluster = function() {
+
+ var numShards = 2; // Random.randInt( 1, 10 )
+ var rs = false; // Random.randBool()
+ var st = new ShardingTest({shards: numShards, mongos: 4, other: {rs: rs}});
+
return st;
};
};
-
//
// Some utility operations
//
-function moveOps( collName, options ){
-
+function moveOps(collName, options) {
options = options || {};
-
- var admin = db.getMongo().getDB( "admin" );
- var config = db.getMongo().getDB( "config" );
+
+ var admin = db.getMongo().getDB("admin");
+ var config = db.getMongo().getDB("config");
var shards = config.shards.find().toArray();
- var shardKey = config.collections.findOne({ _id : collName }).key;
-
- while( ! isFinished() ){
-
- var findKey = Random.randShardKeyValue( shardKey );
- var toShard = shards[ Random.randInt( shards.length ) ]._id;
-
+ var shardKey = config.collections.findOne({_id: collName}).key;
+
+ while (!isFinished()) {
+ var findKey = Random.randShardKeyValue(shardKey);
+ var toShard = shards[Random.randInt(shards.length)]._id;
+
try {
- printjson( admin.runCommand({ moveChunk : collName,
- find : findKey,
- to : toShard }) );
- }
- catch( e ){
- printjson( e );
+ printjson(admin.runCommand({moveChunk: collName, find: findKey, to: toShard}));
+ } catch (e) {
+ printjson(e);
}
-
- sleep( 1000 );
+
+ sleep(1000);
}
-
- jsTest.log( "Stopping moveOps..." );
+
+ jsTest.log("Stopping moveOps...");
}
-function splitOps( collName, options ){
-
+function splitOps(collName, options) {
options = options || {};
-
- var admin = db.getMongo().getDB( "admin" );
- var config = db.getMongo().getDB( "config" );
+
+ var admin = db.getMongo().getDB("admin");
+ var config = db.getMongo().getDB("config");
var shards = config.shards.find().toArray();
- var shardKey = config.collections.findOne({ _id : collName }).key;
-
- while( ! isFinished() ){
-
- var middleKey = Random.randShardKeyValue( shardKey );
-
+ var shardKey = config.collections.findOne({_id: collName}).key;
+
+ while (!isFinished()) {
+ var middleKey = Random.randShardKeyValue(shardKey);
+
try {
- printjson( admin.runCommand({ split : collName,
- middle : middleKey }) );
+ printjson(admin.runCommand({split: collName, middle: middleKey}));
+ } catch (e) {
+ printjson(e);
}
- catch( e ){
- printjson( e );
- }
-
- sleep( 1000 );
+
+ sleep(1000);
}
-
- jsTest.log( "Stopping splitOps..." );
-}
+ jsTest.log("Stopping splitOps...");
+}
diff --git a/jstests/libs/trace_missing_docs.js b/jstests/libs/trace_missing_docs.js
index c70798fe99b..3bc9ef75333 100644
--- a/jstests/libs/trace_missing_docs.js
+++ b/jstests/libs/trace_missing_docs.js
@@ -3,86 +3,90 @@
// On error inserting documents, traces back and shows where the document was dropped
//
-function traceMissingDoc( coll, doc, mongos ) {
+function traceMissingDoc(coll, doc, mongos) {
+ if (mongos)
+ coll = mongos.getCollection(coll + "");
+ else
+ mongos = coll.getMongo();
- if (mongos) coll = mongos.getCollection(coll + "");
- else mongos = coll.getMongo();
-
- var config = mongos.getDB( "config" );
+ var config = mongos.getDB("config");
var shards = config.shards.find().toArray();
- for ( var i = 0; i < shards.length; i++ ) {
- shards[i].conn = new Mongo( shards[i].host );
+ for (var i = 0; i < shards.length; i++) {
+ shards[i].conn = new Mongo(shards[i].host);
}
-
- var shardKeyPatt = config.collections.findOne({ _id : coll + "" }).key;
-
+
+ var shardKeyPatt = config.collections.findOne({_id: coll + ""}).key;
+
// Project out the shard key
var shardKey = {};
- for ( var k in shardKeyPatt ) {
- if ( doc[k] == undefined ) {
- jsTest.log( "Shard key " + tojson( shardKey ) +
- " not found in doc " + tojson( doc ) +
- ", falling back to _id search..." );
- shardKeyPatt = { _id : 1 };
- shardKey = { _id : doc['_id'] };
+ for (var k in shardKeyPatt) {
+ if (doc[k] == undefined) {
+ jsTest.log("Shard key " + tojson(shardKey) + " not found in doc " + tojson(doc) +
+ ", falling back to _id search...");
+ shardKeyPatt = {
+ _id: 1
+ };
+ shardKey = {
+ _id: doc['_id']
+ };
break;
}
shardKey[k] = doc[k];
}
-
- if ( doc['_id'] == undefined ) {
- jsTest.log( "Id not found in doc " + tojson( doc ) + " cannot trace oplog entries." );
+
+ if (doc['_id'] == undefined) {
+ jsTest.log("Id not found in doc " + tojson(doc) + " cannot trace oplog entries.");
return;
}
-
- jsTest.log( "Using shard key : " + tojson( shardKey ) );
-
+
+ jsTest.log("Using shard key : " + tojson(shardKey));
+
var allOps = [];
- for ( var i = 0; i < shards.length; i++ ) {
-
- var oplog = shards[i].conn.getCollection( "local.oplog.rs" );
- if ( !oplog.findOne() ) {
- oplog = shards[i].conn.getCollection( "local.oplog.$main" );
+ for (var i = 0; i < shards.length; i++) {
+ var oplog = shards[i].conn.getCollection("local.oplog.rs");
+ if (!oplog.findOne()) {
+ oplog = shards[i].conn.getCollection("local.oplog.$main");
}
-
- if ( !oplog.findOne() ) {
- jsTest.log( "No oplog was found on shard " + shards[i]._id );
+
+ if (!oplog.findOne()) {
+ jsTest.log("No oplog was found on shard " + shards[i]._id);
continue;
}
-
- var addKeyQuery = function( query, prefix ) {
- for ( var k in shardKey ) {
+
+ var addKeyQuery = function(query, prefix) {
+ for (var k in shardKey) {
query[prefix + '.' + k] = shardKey[k];
}
return query;
};
-
- var addToOps = function( cursor ) {
- cursor.forEach( function( doc ) {
+
+ var addToOps = function(cursor) {
+ cursor.forEach(function(doc) {
doc.shard = shards[i]._id;
- doc.realTime = new Date( doc.ts.getTime() * 1000 );
- allOps.push( doc );
+ doc.realTime = new Date(doc.ts.getTime() * 1000);
+ allOps.push(doc);
});
};
// Find ops
- addToOps( oplog.find( addKeyQuery( { op : 'i' }, 'o' ) ) );
- var updateQuery = { $or : [ addKeyQuery( { op : 'u' }, 'o2' ),
- { op : 'u', 'o2._id' : doc['_id'] } ] };
- addToOps( oplog.find( updateQuery ) );
- addToOps( oplog.find({ op : 'd', 'o._id' : doc['_id'] }) );
+ addToOps(oplog.find(addKeyQuery({op: 'i'}, 'o')));
+ var updateQuery = {
+ $or: [addKeyQuery({op: 'u'}, 'o2'), {op: 'u', 'o2._id': doc['_id']}]
+ };
+ addToOps(oplog.find(updateQuery));
+ addToOps(oplog.find({op: 'd', 'o._id': doc['_id']}));
}
-
- var compareOps = function( opA, opB ) {
- return bsonWoCompare( opA.ts, opB.ts );
+
+ var compareOps = function(opA, opB) {
+ return bsonWoCompare(opA.ts, opB.ts);
};
-
- allOps.sort( compareOps );
-
- print( "Ops found for doc " + tojson( doc ) + " on each shard:\n" );
- for ( var i = 0; i < allOps.length; i++ ) {
- printjson( allOps[i] );
+
+ allOps.sort(compareOps);
+
+ print("Ops found for doc " + tojson(doc) + " on each shard:\n");
+ for (var i = 0; i < allOps.length; i++) {
+ printjson(allOps[i]);
}
-
+
return allOps;
} \ No newline at end of file
diff --git a/jstests/mmap_v1/capped2.js b/jstests/mmap_v1/capped2.js
index 3ecb6c219b0..ae74a396f98 100644
--- a/jstests/mmap_v1/capped2.js
+++ b/jstests/mmap_v1/capped2.js
@@ -1,70 +1,72 @@
db.capped2.drop();
-db._dbCommand( { create: "capped2", capped: true, size: 1000, $nExtents: 11, autoIndexId: false } );
+db._dbCommand({create: "capped2", capped: true, size: 1000, $nExtents: 11, autoIndexId: false});
tzz = db.capped2;
-function debug( x ) {
-// print( x );
+function debug(x) {
+ // print( x );
}
-var val = new Array( 2000 );
+var val = new Array(2000);
var c = "";
-for( i = 0; i < 2000; ++i, c += "---" ) { // bigger and bigger objects through the array...
- val[ i ] = { a: c };
+for (i = 0; i < 2000; ++i, c += "---") { // bigger and bigger objects through the array...
+ val[i] = {
+ a: c
+ };
}
-function checkIncreasing( i ) {
- res = tzz.find().sort( { $natural: -1 } );
- assert( res.hasNext(), "A" );
+function checkIncreasing(i) {
+ res = tzz.find().sort({$natural: -1});
+ assert(res.hasNext(), "A");
var j = i;
- while( res.hasNext() ) {
+ while (res.hasNext()) {
try {
- assert.eq( val[ j-- ].a, res.next().a, "B" );
- } catch( e ) {
- debug( "capped2 err " + j );
+ assert.eq(val[j--].a, res.next().a, "B");
+ } catch (e) {
+ debug("capped2 err " + j);
throw e;
}
}
- res = tzz.find().sort( { $natural: 1 } );
- assert( res.hasNext(), "C" );
- while( res.hasNext() )
- assert.eq( val[ ++j ].a, res.next().a, "D" );
- assert.eq( j, i, "E" );
+ res = tzz.find().sort({$natural: 1});
+ assert(res.hasNext(), "C");
+ while (res.hasNext())
+ assert.eq(val[++j].a, res.next().a, "D");
+ assert.eq(j, i, "E");
}
-function checkDecreasing( i ) {
- res = tzz.find().sort( { $natural: -1 } );
- assert( res.hasNext(), "F" );
+function checkDecreasing(i) {
+ res = tzz.find().sort({$natural: -1});
+ assert(res.hasNext(), "F");
var j = i;
- while( res.hasNext() ) {
- assert.eq( val[ j++ ].a, res.next().a, "G" );
+ while (res.hasNext()) {
+ assert.eq(val[j++].a, res.next().a, "G");
}
- res = tzz.find().sort( { $natural: 1 } );
- assert( res.hasNext(), "H" );
- while( res.hasNext() )
- assert.eq( val[ --j ].a, res.next().a, "I" );
- assert.eq( j, i, "J" );
+ res = tzz.find().sort({$natural: 1});
+ assert(res.hasNext(), "H");
+ while (res.hasNext())
+ assert.eq(val[--j].a, res.next().a, "I");
+ assert.eq(j, i, "J");
}
-for( i = 0 ;; ++i ) {
- debug( "capped 2: " + i );
- tzz.insert( val[ i ] );
+for (i = 0;; ++i) {
+ debug("capped 2: " + i);
+ tzz.insert(val[i]);
var err = db.getLastError();
- if ( err ) {
+ if (err) {
debug(err);
debug(tzz.count());
- assert( i > 100, "K" );
+ assert(i > 100, "K");
break;
}
- checkIncreasing( i );
+ checkIncreasing(i);
}
// drop and recreate. Test used to rely on the last insert emptying the collection, which it no
// longer does now that we rollback on failed inserts.
tzz.drop();
-db._dbCommand( { create: "capped2", capped: true, size: 1000, $nExtents: 11, autoIndexId: false } );
+db._dbCommand({create: "capped2", capped: true, size: 1000, $nExtents: 11, autoIndexId: false});
-for( i = 600 ; i >= 0 ; --i ) {
- debug( "capped 2: " + i );
- tzz.insert( val[ i ] );
- checkDecreasing( i );
+for (i = 600; i >= 0; --i) {
+ debug("capped 2: " + i);
+ tzz.insert(val[i]);
+ checkDecreasing(i);
}
diff --git a/jstests/mmap_v1/capped3.js b/jstests/mmap_v1/capped3.js
index 2e5e6790cb7..b01bc843c2c 100644
--- a/jstests/mmap_v1/capped3.js
+++ b/jstests/mmap_v1/capped3.js
@@ -2,44 +2,54 @@ t = db.jstests_capped3;
t2 = db.jstests_capped3_clone;
t.drop();
t2.drop();
-for( i = 0; i < 1000; ++i ) {
- t.save( {i:i} );
+for (i = 0; i < 1000; ++i) {
+ t.save({i: i});
}
-assert.commandWorked( db.runCommand( { cloneCollectionAsCapped:"jstests_capped3", toCollection:"jstests_capped3_clone", size:100000 } ), "A" );
+assert.commandWorked(db.runCommand({
+ cloneCollectionAsCapped: "jstests_capped3",
+ toCollection: "jstests_capped3_clone",
+ size: 100000
+}),
+ "A");
c = t2.find();
-for( i = 0; i < 1000; ++i ) {
- assert.eq( i, c.next().i, "B" );
+for (i = 0; i < 1000; ++i) {
+ assert.eq(i, c.next().i, "B");
}
-assert( !c.hasNext(), "C" );
+assert(!c.hasNext(), "C");
t.drop();
t2.drop();
-for( i = 0; i < 1000; ++i ) {
- t.save( {i:i} );
+for (i = 0; i < 1000; ++i) {
+ t.save({i: i});
}
-assert.commandWorked( db.runCommand( { cloneCollectionAsCapped:"jstests_capped3", toCollection:"jstests_capped3_clone", size:1000 } ), "D" );
-c = t2.find().sort( {$natural:-1} );
+assert.commandWorked(db.runCommand({
+ cloneCollectionAsCapped: "jstests_capped3",
+ toCollection: "jstests_capped3_clone",
+ size: 1000
+}),
+ "D");
+c = t2.find().sort({$natural: -1});
i = 999;
-while( c.hasNext() ) {
- assert.eq( i--, c.next().i, "E" );
+while (c.hasNext()) {
+ assert.eq(i--, c.next().i, "E");
}
-//print( "i: " + i );
-var str = tojson( t2.stats() );
-//print( "stats: " + tojson( t2.stats() ) );
-assert( i < 990, "F" );
+// print( "i: " + i );
+var str = tojson(t2.stats());
+// print( "stats: " + tojson( t2.stats() ) );
+assert(i < 990, "F");
t.drop();
t2.drop();
-for( i = 0; i < 1000; ++i ) {
- t.save( {i:i} );
+for (i = 0; i < 1000; ++i) {
+ t.save({i: i});
}
-assert.commandWorked( t.convertToCapped( 1000 ), "G" );
-c = t.find().sort( {$natural:-1} );
+assert.commandWorked(t.convertToCapped(1000), "G");
+c = t.find().sort({$natural: -1});
i = 999;
-while( c.hasNext() ) {
- assert.eq( i--, c.next().i, "H" );
+while (c.hasNext()) {
+ assert.eq(i--, c.next().i, "H");
}
-assert( i < 990, "I" );
-assert( i > 900, "J" );
+assert(i < 990, "I");
+assert(i > 900, "J");
diff --git a/jstests/mmap_v1/capped7.js b/jstests/mmap_v1/capped7.js
index 0405cefa5a0..fa19c17af12 100644
--- a/jstests/mmap_v1/capped7.js
+++ b/jstests/mmap_v1/capped7.js
@@ -3,10 +3,10 @@
Random.setRandomSeed();
db.capped7.drop();
-db._dbCommand( { create: "capped7", capped: true, size: 1000, $nExtents: 11, autoIndexId: false } );
+db._dbCommand({create: "capped7", capped: true, size: 1000, $nExtents: 11, autoIndexId: false});
tzz = db.capped7;
-var ten = new Array( 11 ).toString().replace( /,/g, "-" );
+var ten = new Array(11).toString().replace(/,/g, "-");
count = 0;
@@ -15,16 +15,16 @@ count = 0;
* count doesn't increase on insert.
*/
function insertUntilFull() {
-count = tzz.count();
+ count = tzz.count();
var j = 0;
-while( 1 ) {
- tzz.save( {i:ten,j:j++} );
- var newCount = tzz.count();
- if ( count == newCount ) {
- break;
+ while (1) {
+ tzz.save({i: ten, j: j++});
+ var newCount = tzz.count();
+ if (count == newCount) {
+ break;
+ }
+ count = newCount;
}
- count = newCount;
-}
}
insertUntilFull();
@@ -32,58 +32,58 @@ insertUntilFull();
// oldCount == count before empty
oldCount = count;
-assert.eq.automsg( "11", "tzz.stats().numExtents" );
+assert.eq.automsg("11", "tzz.stats().numExtents");
// oldSize == size before empty
var oldSize = tzz.stats().storageSize;
-assert.commandWorked( db._dbCommand( { emptycapped: "capped7" } ) );
+assert.commandWorked(db._dbCommand({emptycapped: "capped7"}));
// check that collection storage parameters are the same after empty
-assert.eq.automsg( "11", "tzz.stats().numExtents" );
-assert.eq.automsg( "oldSize", "tzz.stats().storageSize" );
+assert.eq.automsg("11", "tzz.stats().numExtents");
+assert.eq.automsg("oldSize", "tzz.stats().storageSize");
// check that the collection is empty after empty
-assert.eq.automsg( "0", "tzz.find().itcount()" );
-assert.eq.automsg( "0", "tzz.count()" );
+assert.eq.automsg("0", "tzz.find().itcount()");
+assert.eq.automsg("0", "tzz.count()");
// check that we can reuse the empty collection, inserting as many documents
// as we were able to the first time through.
insertUntilFull();
-assert.eq.automsg( "oldCount", "count" );
-assert.eq.automsg( "oldCount", "tzz.find().itcount()" );
-assert.eq.automsg( "oldCount", "tzz.count()" );
+assert.eq.automsg("oldCount", "count");
+assert.eq.automsg("oldCount", "tzz.find().itcount()");
+assert.eq.automsg("oldCount", "tzz.count()");
-assert.eq.automsg( "11", "tzz.stats().numExtents" );
+assert.eq.automsg("11", "tzz.stats().numExtents");
var oldSize = tzz.stats().storageSize;
-assert.commandWorked( db._dbCommand( { emptycapped: "capped7" } ) );
+assert.commandWorked(db._dbCommand({emptycapped: "capped7"}));
// check that the collection storage parameters are unchanged after another empty
-assert.eq.automsg( "11", "tzz.stats().numExtents" );
-assert.eq.automsg( "oldSize", "tzz.stats().storageSize" );
+assert.eq.automsg("11", "tzz.stats().numExtents");
+assert.eq.automsg("oldSize", "tzz.stats().storageSize");
// insert an arbitrary number of documents
-var total = Random.randInt( 2000 );
-for( var j = 1; j <= total; ++j ) {
- tzz.save( {i:ten,j:j} );
+var total = Random.randInt(2000);
+for (var j = 1; j <= total; ++j) {
+ tzz.save({i: ten, j: j});
// occasionally check that only the oldest documents are removed to make room
// for the newest documents
- if ( Random.rand() > 0.95 ) {
- assert.automsg( "j >= tzz.count()" );
- assert.eq.automsg( "tzz.count()", "tzz.find().itcount()" );
- var c = tzz.find().sort( {$natural:-1} );
+ if (Random.rand() > 0.95) {
+ assert.automsg("j >= tzz.count()");
+ assert.eq.automsg("tzz.count()", "tzz.find().itcount()");
+ var c = tzz.find().sort({$natural: -1});
var k = j;
- assert.automsg( "c.hasNext()" );
- while( c.hasNext() ) {
- assert.eq.automsg( "c.next().j", "k--" );
+ assert.automsg("c.hasNext()");
+ while (c.hasNext()) {
+ assert.eq.automsg("c.next().j", "k--");
}
// check the same thing with a reverse iterator as well
- var c = tzz.find().sort( {$natural:1} );
- assert.automsg( "c.hasNext()" );
- while( c.hasNext() ) {
- assert.eq.automsg( "c.next().j", "++k" );
- }
- assert.eq.automsg( "j", "k" );
+ var c = tzz.find().sort({$natural: 1});
+ assert.automsg("c.hasNext()");
+ while (c.hasNext()) {
+ assert.eq.automsg("c.next().j", "++k");
+ }
+ assert.eq.automsg("j", "k");
}
}
diff --git a/jstests/mmap_v1/capped8.js b/jstests/mmap_v1/capped8.js
index 81c261ef985..68b3deb0b2a 100644
--- a/jstests/mmap_v1/capped8.js
+++ b/jstests/mmap_v1/capped8.js
@@ -4,17 +4,20 @@ Random.setRandomSeed();
t = db.jstests_capped8;
-function debug( x ) {
-// printjson( x );
-}
+function debug(x) {
+ // printjson( x );
+}
/** Generate an object with a string field of specified length */
-function obj( size, x ) {
- return {X:x, a:new Array( size + 1 ).toString()};
+function obj(size, x) {
+ return {
+ X: x,
+ a: new Array(size + 1).toString()
+ };
}
-function withinTwo( a, b ) {
- assert( Math.abs( a - b ) <= 2, "not within one: " + a + ", " + b );
+function withinTwo(a, b) {
+ assert(Math.abs(a - b) <= 2, "not within one: " + a + ", " + b);
}
var X = 0;
@@ -23,16 +26,16 @@ var X = 0;
* Insert enough documents of the given size spec that the collection will
* contain only documents having this size spec.
*/
-function insertManyRollingOver( objsize ) {
+function insertManyRollingOver(objsize) {
// Add some variability, as the precise number can trigger different cases.
X++;
n = 250 + Random.randInt(10);
assert(t.count() == 0 || t.findOne().X != X);
- for( i = 0; i < n; ++i ) {
- t.save( obj( objsize, X ) );
- debug( t.count() );
+ for (i = 0; i < n; ++i) {
+ t.save(obj(objsize, X));
+ debug(t.count());
}
if (t.findOne().X != X) {
@@ -48,62 +51,62 @@ function insertManyRollingOver( objsize ) {
* Insert some documents in such a way that there may be an empty extent, then
* truncate the capped collection.
*/
-function insertAndTruncate( first ) {
+function insertAndTruncate(first) {
myInitialCount = t.count();
// Insert enough documents to make the capped allocation loop over.
- insertManyRollingOver( 150 );
+ insertManyRollingOver(150);
myFiftyCount = t.count();
// Insert documents that are too big to fit in the smaller extents.
- insertManyRollingOver( 3000 );
+ insertManyRollingOver(3000);
myTwokCount = t.count();
- if ( first ) {
+ if (first) {
initialCount = myInitialCount;
fiftyCount = myFiftyCount;
twokCount = myTwokCount;
// Sanity checks for collection count
- assert( fiftyCount > initialCount );
- assert( fiftyCount > twokCount );
+ assert(fiftyCount > initialCount);
+ assert(fiftyCount > twokCount);
} else {
// Check that we are able to insert roughly the same number of documents
// after truncating. The exact values are slightly variable as a result
// of the capped allocation algorithm and where the remaining entry is.
- withinTwo( initialCount, myInitialCount );
- withinTwo( fiftyCount, myFiftyCount );
- withinTwo( twokCount, myTwokCount );
+ withinTwo(initialCount, myInitialCount);
+ withinTwo(fiftyCount, myFiftyCount);
+ withinTwo(twokCount, myTwokCount);
}
count = t.count();
// Check that we can truncate the collection successfully.
- assert.commandWorked( db.runCommand( { captrunc:"jstests_capped8", n:count - 1, inc:false } ) );
- assert.eq( 1, t.count() );
+ assert.commandWorked(db.runCommand({captrunc: "jstests_capped8", n: count - 1, inc: false}));
+ assert.eq(1, t.count());
}
/** Test truncating and subsequent inserts */
function testTruncate() {
- insertAndTruncate( true );
- insertAndTruncate( false );
- insertAndTruncate( false );
+ insertAndTruncate(true);
+ insertAndTruncate(false);
+ insertAndTruncate(false);
}
var pass = 1;
print("pass " + pass++);
t.drop();
-db._dbCommand( { create:"jstests_capped8", capped: true, $nExtents: [ 10000, 10000, 4000 ] } );
+db._dbCommand({create: "jstests_capped8", capped: true, $nExtents: [10000, 10000, 4000]});
testTruncate();
print("pass " + pass++);
t.drop();
-db._dbCommand( { create:"jstests_capped8", capped: true, $nExtents: [ 10000, 1000, 4000 ] } );
+db._dbCommand({create: "jstests_capped8", capped: true, $nExtents: [10000, 1000, 4000]});
testTruncate();
print("pass " + pass++);
t.drop();
-db._dbCommand( { create:"jstests_capped8", capped: true, $nExtents: [ 10000, 4000 ] } );
+db._dbCommand({create: "jstests_capped8", capped: true, $nExtents: [10000, 4000]});
testTruncate();
print("pass " + pass++);
t.drop();
-db._dbCommand( { create:"jstests_capped8", capped: true, $nExtents: [ 10000 ] } );
+db._dbCommand({create: "jstests_capped8", capped: true, $nExtents: [10000]});
testTruncate();
t.drop();
diff --git a/jstests/mmap_v1/capped_max.js b/jstests/mmap_v1/capped_max.js
index 44e5da1d4ae..a30e8c2a1d3 100644
--- a/jstests/mmap_v1/capped_max.js
+++ b/jstests/mmap_v1/capped_max.js
@@ -3,27 +3,26 @@ t = db.capped_max;
sz = 1024 * 16;
t.drop();
-db.createCollection( t.getName() , {capped: true, size: sz } );
-assert.lt( Math.pow( 2, 62 ), t.stats().max.floatApprox );
+db.createCollection(t.getName(), {capped: true, size: sz});
+assert.lt(Math.pow(2, 62), t.stats().max.floatApprox);
t.drop();
-db.createCollection( t.getName() , {capped: true, size: sz, max: 123456 } );
-assert.eq( 123456, t.stats().max );
+db.createCollection(t.getName(), {capped: true, size: sz, max: 123456});
+assert.eq(123456, t.stats().max);
// create a collection with the max possible doc cap (2^31-2 docs)
t.drop();
mm = Math.pow(2, 31) - 2;
-db.createCollection( t.getName() , {capped: true, size: sz, max: mm } );
-assert.eq( mm, t.stats().max );
+db.createCollection(t.getName(), {capped: true, size: sz, max: mm});
+assert.eq(mm, t.stats().max);
// create a collection with the 'no max' value (2^31-1 docs)
t.drop();
mm = Math.pow(2, 31) - 1;
-db.createCollection( t.getName() , {capped: true, size: sz, max: mm } );
-assert.eq(NumberLong("9223372036854775807"), t.stats().max );
+db.createCollection(t.getName(), {capped: true, size: sz, max: mm});
+assert.eq(NumberLong("9223372036854775807"), t.stats().max);
t.drop();
-res = db.createCollection( t.getName() , {capped: true, size: sz, max: Math.pow(2, 31) } );
-assert.eq( 0, res.ok, tojson(res) );
-assert.eq( 0, t.stats().ok );
-
+res = db.createCollection(t.getName(), {capped: true, size: sz, max: Math.pow(2, 31)});
+assert.eq(0, res.ok, tojson(res));
+assert.eq(0, t.stats().ok);
diff --git a/jstests/mmap_v1/capped_server13912.js b/jstests/mmap_v1/capped_server13912.js
index 36ff437afcd..438c2b17b34 100644
--- a/jstests/mmap_v1/capped_server13912.js
+++ b/jstests/mmap_v1/capped_server13912.js
@@ -1,10 +1,10 @@
// SERVER-13912 Capped collections with size=0 are promoted to the minimum Extent size
var name = "capped_server13912";
-var minExtentSize = 0x1000; // from ExtentManager::minSize()
+var minExtentSize = 0x1000; // from ExtentManager::minSize()
var t = db.getCollection(name);
t.drop();
-db.createCollection(name, {capped: true , size: 0});
+db.createCollection(name, {capped: true, size: 0});
assert.eq(t.stats().storageSize, minExtentSize);
diff --git a/jstests/mmap_v1/capped_server2639.js b/jstests/mmap_v1/capped_server2639.js
index 574c1755789..751699b58b7 100644
--- a/jstests/mmap_v1/capped_server2639.js
+++ b/jstests/mmap_v1/capped_server2639.js
@@ -1,26 +1,25 @@
name = "server2639";
-t = db.getCollection( name );
+t = db.getCollection(name);
t.drop();
-
-db.createCollection( name , { capped : true , size : 1 } );
+db.createCollection(name, {capped: true, size: 1});
size = t.stats().storageSize;
bigString = "";
-while ( bigString.length < size )
+while (bigString.length < size)
bigString += ".";
-t.insert( { x : 1 } );
+t.insert({x: 1});
-var res = t.insert( { x : 2 , bigString : bigString } );
-assert.writeError( res );
+var res = t.insert({x: 2, bigString: bigString});
+assert.writeError(res);
-assert.eq( 1 , t.count() ); // make sure small doc didn't get deleted
-assert.eq( 1 , t.findOne().x );
+assert.eq(1, t.count()); // make sure small doc didn't get deleted
+assert.eq(1, t.findOne().x);
// make sure can still insert
-t.insert( { x : 2 } );
-assert.eq( 2 , t.count() );
+t.insert({x: 2});
+assert.eq(2, t.count());
diff --git a/jstests/mmap_v1/capped_server7543.js b/jstests/mmap_v1/capped_server7543.js
index 514cd7964b2..625f62cd4a7 100644
--- a/jstests/mmap_v1/capped_server7543.js
+++ b/jstests/mmap_v1/capped_server7543.js
@@ -1,11 +1,10 @@
-mydb = db.getSisterDB( "capped_server7543" );
+mydb = db.getSisterDB("capped_server7543");
mydb.dropDatabase();
-mydb.createCollection( "foo" , { capped : true , size : 12288 } );
+mydb.createCollection("foo", {capped: true, size: 12288});
-assert.eq( 12288, mydb.foo.stats().storageSize );
-assert.eq( 1, mydb.foo.validate(true).extentCount );
+assert.eq(12288, mydb.foo.stats().storageSize);
+assert.eq(1, mydb.foo.validate(true).extentCount);
mydb.dropDatabase();
-
diff --git a/jstests/mmap_v1/collmod.js b/jstests/mmap_v1/collmod.js
index d2056a530ab..53c83f7d927 100644
--- a/jstests/mmap_v1/collmod.js
+++ b/jstests/mmap_v1/collmod.js
@@ -1,82 +1,82 @@
// Basic js tests for the collMod command.
// Test setting the usePowerOf2Sizes flag, and modifying TTL indexes.
-function debug( x ) {
- //printjson( x );
+function debug(x) {
+ // printjson( x );
}
var coll = "collModTest";
-var t = db.getCollection( coll );
+var t = db.getCollection(coll);
t.drop();
-db.createCollection( coll );
-
+db.createCollection(coll);
// Verify the new collection has userFlags set to 1
printjson(t.stats());
-assert.eq( t.stats().userFlags , 1 , "fresh collection doesn't have userFlags = 1 ");
+assert.eq(t.stats().userFlags, 1, "fresh collection doesn't have userFlags = 1 ");
// Modify the collection with the usePowerOf2Sizes flag. Verify userFlags now = 0.
-var res = db.runCommand( { "collMod" : coll, "usePowerOf2Sizes" : false } );
-debug( res );
-assert.eq( res.ok , 1 , "collMod failed" );
-assert.eq( t.stats().userFlags , 0 , "modified collection should have userFlags = 0 ");
-var nso = db.system.namespaces.findOne( { name : t.getFullName() } );
-debug( nso );
-assert.eq( 0, nso.options.flags, "options didn't sync to system.namespaces: " + tojson( nso ) );
+var res = db.runCommand({"collMod": coll, "usePowerOf2Sizes": false});
+debug(res);
+assert.eq(res.ok, 1, "collMod failed");
+assert.eq(t.stats().userFlags, 0, "modified collection should have userFlags = 0 ");
+var nso = db.system.namespaces.findOne({name: t.getFullName()});
+debug(nso);
+assert.eq(0, nso.options.flags, "options didn't sync to system.namespaces: " + tojson(nso));
// Try to modify it with some unrecognized value
-var res = db.runCommand( { "collMod" : coll, "unrecognized" : true } );
-debug( res );
-assert.eq( res.ok , 0 , "collMod shouldn't return ok with unrecognized value" );
+var res = db.runCommand({"collMod": coll, "unrecognized": true});
+debug(res);
+assert.eq(res.ok, 0, "collMod shouldn't return ok with unrecognized value");
// add a TTL index
-t.ensureIndex( {a : 1}, { "expireAfterSeconds": 50 } );
-assert.eq( 1, db.system.indexes.count( { key : {a:1}, expireAfterSeconds : 50 } ),
- "TTL index not added" );
+t.ensureIndex({a: 1}, {"expireAfterSeconds": 50});
+assert.eq(1, db.system.indexes.count({key: {a: 1}, expireAfterSeconds: 50}), "TTL index not added");
// try to modify it with a bad key pattern
-var res = db.runCommand( { "collMod" : coll,
- "index" : { "keyPattern" : "bad" , "expireAfterSeconds" : 100 } } );
-debug( res );
-assert.eq( 0 , res.ok , "mod shouldn't work with bad keypattern");
+var res =
+ db.runCommand({"collMod": coll, "index": {"keyPattern": "bad", "expireAfterSeconds": 100}});
+debug(res);
+assert.eq(0, res.ok, "mod shouldn't work with bad keypattern");
// try to modify it without expireAfterSeconds field
-var res = db.runCommand( { "collMod" : coll,
- "index" : { "keyPattern" : {a : 1} } } );
-debug( res );
-assert.eq( 0 , res.ok , "TTL mod shouldn't work without expireAfterSeconds");
+var res = db.runCommand({"collMod": coll, "index": {"keyPattern": {a: 1}}});
+debug(res);
+assert.eq(0, res.ok, "TTL mod shouldn't work without expireAfterSeconds");
// try to modify it with a non-numeric expireAfterSeconds field
-var res = db.runCommand( { "collMod" : coll,
- "index" : { "keyPattern" : {a : 1}, "expireAfterSeconds" : "100" } } );
-debug( res );
-assert.eq( 0 , res.ok , "TTL mod shouldn't work with non-numeric expireAfterSeconds");
+var res =
+ db.runCommand({"collMod": coll, "index": {"keyPattern": {a: 1}, "expireAfterSeconds": "100"}});
+debug(res);
+assert.eq(0, res.ok, "TTL mod shouldn't work with non-numeric expireAfterSeconds");
// this time modifying should finally work
-var res = db.runCommand( { "collMod" : coll,
- "index" : { "keyPattern" : {a : 1}, "expireAfterSeconds" : 100 } } );
-debug( res );
-assert.eq( 1, db.system.indexes.count( { key : {a:1}, expireAfterSeconds : 100 } ),
- "TTL index not modified" );
+var res =
+ db.runCommand({"collMod": coll, "index": {"keyPattern": {a: 1}, "expireAfterSeconds": 100}});
+debug(res);
+assert.eq(1,
+ db.system.indexes.count({key: {a: 1}, expireAfterSeconds: 100}),
+ "TTL index not modified");
// try to modify a faulty TTL index with a non-numeric expireAfterSeconds field
-t.dropIndex( {a : 1 } );
-t.ensureIndex( {a : 1} , { "expireAfterSeconds": "50" } );
-var res = db.runCommand( { "collMod" : coll,
- "index" : { "keyPattern" : {a : 1} , "expireAfterSeconds" : 100 } } );
-debug( res );
-assert.eq( 0, res.ok, "shouldn't be able to modify faulty index spec" );
+t.dropIndex({a: 1});
+t.ensureIndex({a: 1}, {"expireAfterSeconds": "50"});
+var res =
+ db.runCommand({"collMod": coll, "index": {"keyPattern": {a: 1}, "expireAfterSeconds": 100}});
+debug(res);
+assert.eq(0, res.ok, "shouldn't be able to modify faulty index spec");
// try with new index, this time set both expireAfterSeconds and the usePowerOf2Sizes flag
-t.dropIndex( {a : 1 } );
-t.ensureIndex( {a : 1} , { "expireAfterSeconds": 50 } );
-var res = db.runCommand( { "collMod" : coll ,
- "usePowerOf2Sizes" : true,
- "index" : { "keyPattern" : {a : 1} , "expireAfterSeconds" : 100 } } );
-debug( res );
-assert.eq( 1, res.ok, "should be able to modify both userFlags and expireAfterSeconds" );
-assert.eq( t.stats().userFlags , 1 , "userflags should be 1 now");
-assert.eq( 1, db.system.indexes.count( { key : {a:1}, expireAfterSeconds : 100 } ),
- "TTL index should be 100 now" );
-
+t.dropIndex({a: 1});
+t.ensureIndex({a: 1}, {"expireAfterSeconds": 50});
+var res = db.runCommand({
+ "collMod": coll,
+ "usePowerOf2Sizes": true,
+ "index": {"keyPattern": {a: 1}, "expireAfterSeconds": 100}
+});
+debug(res);
+assert.eq(1, res.ok, "should be able to modify both userFlags and expireAfterSeconds");
+assert.eq(t.stats().userFlags, 1, "userflags should be 1 now");
+assert.eq(1,
+ db.system.indexes.count({key: {a: 1}, expireAfterSeconds: 100}),
+ "TTL index should be 100 now");
diff --git a/jstests/mmap_v1/compact.js b/jstests/mmap_v1/compact.js
index bab91f542a7..91ce1ba5dbc 100644
--- a/jstests/mmap_v1/compact.js
+++ b/jstests/mmap_v1/compact.js
@@ -7,22 +7,22 @@ t.drop();
// Assert that you can't compact a capped collection in MMAP.
assert.commandWorked(mydb.createCollection(t.getName(), {size: 4096, capped: true}));
assert.commandFailedWithCode(t.runCommand('compact'), ErrorCodes.CommandNotSupported);
-t.drop(); // uncap the collection.
+t.drop(); // uncap the collection.
-t.insert({ x: 3 });
-t.insert({ x: 3 });
-t.insert({ x: 5 });
-t.insert({ x: 4, z: 2, k: 'aaa' });
-t.insert({ x: 4, z: 2, k: 'aaa' });
-t.insert({ x: 4, z: 2, k: 'aaa' });
-t.insert({ x: 4, z: 2, k: 'aaa' });
-t.insert({ x: 4, z: 2, k: 'aaa' });
-t.insert({ x: 4, z: 2, k: 'aaa' });
-t.ensureIndex({ x: 1 });
+t.insert({x: 3});
+t.insert({x: 3});
+t.insert({x: 5});
+t.insert({x: 4, z: 2, k: 'aaa'});
+t.insert({x: 4, z: 2, k: 'aaa'});
+t.insert({x: 4, z: 2, k: 'aaa'});
+t.insert({x: 4, z: 2, k: 'aaa'});
+t.insert({x: 4, z: 2, k: 'aaa'});
+t.insert({x: 4, z: 2, k: 'aaa'});
+t.ensureIndex({x: 1});
print("1");
-var res = mydb.runCommand({ compact: 'compacttest', dev: true, force: true });
+var res = mydb.runCommand({compact: 'compacttest', dev: true, force: true});
printjson(res);
assert(res.ok);
assert(t.count() == 9);
@@ -34,35 +34,37 @@ assert(t.getIndexes().length == 2);
var ssize = t.stats().storageSize;
print("2");
-res = mydb.runCommand({ compact: 'compacttest', dev: true,paddingBytes:1000, force:true });
+res = mydb.runCommand({compact: 'compacttest', dev: true, paddingBytes: 1000, force: true});
assert(res.ok);
assert(t.count() == 9);
var v = t.validate(true);
assert(v.ok);
-assert(t.stats().storageSize > ssize, "expected more storage given padding is higher. however it rounds off so if something changed this could be");
-//printjson(t.stats());
+assert(
+ t.stats().storageSize > ssize,
+ "expected more storage given padding is higher. however it rounds off so if something changed this could be");
+// printjson(t.stats());
print("z");
-t.insert({ x: 4, z: 2, k: { a: "", b: ""} });
-t.insert({ x: 4, z: 2, k: { a: "", b: ""} });
-t.insert({ x: 4, z: 2, k: { a: "", b: ""} });
-t.insert({ x: 4, z: 2, k: { a: "", b: ""} });
-t.insert({ x: 4, z: null, k: { f: "", b: ""} });
-t.insert({ x: 4, z: null, k: { c: ""} });
-t.insert({ x: 4, z: null, k: { h: ""} });
-t.insert({ x: 4, z: null });
-t.insert({ x: 4, z: 3});
-t.insert({ x: 4, z: 2, k: { a: "", b: ""} });
-t.insert({ x: 4, z: null, k: { c: ""} });
-t.insert({ x: 4, z: null, k: { c: ""} });
-t.insert({ x: 4, z: 3, k: { c: ""} });
+t.insert({x: 4, z: 2, k: {a: "", b: ""}});
+t.insert({x: 4, z: 2, k: {a: "", b: ""}});
+t.insert({x: 4, z: 2, k: {a: "", b: ""}});
+t.insert({x: 4, z: 2, k: {a: "", b: ""}});
+t.insert({x: 4, z: null, k: {f: "", b: ""}});
+t.insert({x: 4, z: null, k: {c: ""}});
+t.insert({x: 4, z: null, k: {h: ""}});
+t.insert({x: 4, z: null});
+t.insert({x: 4, z: 3});
+t.insert({x: 4, z: 2, k: {a: "", b: ""}});
+t.insert({x: 4, z: null, k: {c: ""}});
+t.insert({x: 4, z: null, k: {c: ""}});
+t.insert({x: 4, z: 3, k: {c: ""}});
-t.ensureIndex({ z: 1, k: 1 });
-//t.ensureIndex({ z: 1, k: 1 }, { unique: true });
-//t.ensureIndex({ z: 1, k: 1 }, { dropDups: true, unique:true });
+t.ensureIndex({z: 1, k: 1});
+// t.ensureIndex({ z: 1, k: 1 }, { unique: true });
+// t.ensureIndex({ z: 1, k: 1 }, { dropDups: true, unique:true });
-res = mydb.runCommand({ compact: 'compacttest', dev: true, paddingFactor: 1.2, force:true });
+res = mydb.runCommand({compact: 'compacttest', dev: true, paddingFactor: 1.2, force: true});
printjson(res);
assert(res.ok);
assert(t.count() > 13);
@@ -73,10 +75,9 @@ print("3");
// works on an empty collection?
t.remove({});
-assert(mydb.runCommand({ compact: 'compacttest', dev: true, force:true }).ok);
+assert(mydb.runCommand({compact: 'compacttest', dev: true, force: true}).ok);
assert(t.count() == 0);
v = t.validate(true);
assert(v.ok);
assert(v.extentCount == 1);
assert(t.getIndexes().length == 3);
-
diff --git a/jstests/mmap_v1/compactPreservePadding.js b/jstests/mmap_v1/compactPreservePadding.js
index 52a3436bf12..211ecd5a087 100644
--- a/jstests/mmap_v1/compactPreservePadding.js
+++ b/jstests/mmap_v1/compactPreservePadding.js
@@ -5,13 +5,13 @@ var collName = "compactPreservePadding";
var t = mydb.getCollection(collName);
t.drop();
-// use larger keyname to avoid hitting an edge case with extents
+// use larger keyname to avoid hitting an edge case with extents
for (i = 0; i < 10000; i++) {
- t.insert({useLargerKeyName:i});
+ t.insert({useLargerKeyName: i});
}
// remove half the entries
-t.remove({useLargerKeyName:{$mod:[2,0]}});
+t.remove({useLargerKeyName: {$mod: [2, 0]}});
printjson(t.stats());
originalSize = t.stats().size;
originalStorage = t.stats().storageSize;
diff --git a/jstests/mmap_v1/datasize.js b/jstests/mmap_v1/datasize.js
index 85c32413b61..8c61b927748 100644
--- a/jstests/mmap_v1/datasize.js
+++ b/jstests/mmap_v1/datasize.js
@@ -2,33 +2,53 @@
f = db.jstests_datasize;
f.drop();
-assert.eq( 0, db.runCommand( {datasize:"test.jstests_datasize"} ).size );
-f.save( {qq:'c'} );
+assert.eq(0, db.runCommand({datasize: "test.jstests_datasize"}).size);
+f.save({qq: 'c'});
printjson(f.stats());
-assert.eq( 48, db.runCommand( {datasize:"test.jstests_datasize"} ).size );
-f.save( {qq:'fg'} );
+assert.eq(48, db.runCommand({datasize: "test.jstests_datasize"}).size);
+f.save({qq: 'fg'});
printjson(f.stats());
-assert.eq( 96, db.runCommand( {datasize:"test.jstests_datasize"} ).size );
+assert.eq(96, db.runCommand({datasize: "test.jstests_datasize"}).size);
f.drop();
-f.ensureIndex( {qq:1} );
-assert.eq( 0, db.runCommand( {datasize:"test.jstests_datasize"} ).size );
-f.save( {qq:'c'} );
+f.ensureIndex({qq: 1});
+assert.eq(0, db.runCommand({datasize: "test.jstests_datasize"}).size);
+f.save({qq: 'c'});
printjson(f.stats());
-assert.eq( 48, db.runCommand( {datasize:"test.jstests_datasize"} ).size );
-f.save( {qq:'fg'} );
+assert.eq(48, db.runCommand({datasize: "test.jstests_datasize"}).size);
+f.save({qq: 'fg'});
printjson(f.stats());
-assert.eq( 96, db.runCommand( {datasize:"test.jstests_datasize"} ).size );
+assert.eq(96, db.runCommand({datasize: "test.jstests_datasize"}).size);
-assert.eq( 0, db.runCommand( {datasize:"test.jstests_datasize", min:{qq:'a'}} ).ok );
+assert.eq(0, db.runCommand({datasize: "test.jstests_datasize", min: {qq: 'a'}}).ok);
-assert.eq( 96, db.runCommand( {datasize:"test.jstests_datasize", min:{qq:'a'}, max:{qq:'z' }} ).size );
-assert.eq( 48, db.runCommand( {datasize:"test.jstests_datasize", min:{qq:'a'}, max:{qq:'d' }} ).size );
-assert.eq( 48, db.runCommand( {datasize:"test.jstests_datasize", min:{qq:'a'}, max:{qq:'d' }, keyPattern:{qq:1}} ).size );
-assert.eq( 48, db.runCommand( {datasize:"test.jstests_datasize", min:{qq:'d'}, max:{qq:'z' }, keyPattern:{qq:1}} ).size );
+assert.eq(96,
+ db.runCommand({datasize: "test.jstests_datasize", min: {qq: 'a'}, max: {qq: 'z'}}).size);
+assert.eq(48,
+ db.runCommand({datasize: "test.jstests_datasize", min: {qq: 'a'}, max: {qq: 'd'}}).size);
+assert.eq(48,
+ db.runCommand({
+ datasize: "test.jstests_datasize",
+ min: {qq: 'a'},
+ max: {qq: 'd'},
+ keyPattern: {qq: 1}
+ }).size);
+assert.eq(48,
+ db.runCommand({
+ datasize: "test.jstests_datasize",
+ min: {qq: 'd'},
+ max: {qq: 'z'},
+ keyPattern: {qq: 1}
+ }).size);
-assert.eq( 0, db.runCommand( {datasize:"test.jstests_datasize", min:{qq:'c'}, max:{qq:'c' }} ).size );
-assert.eq( 48, db.runCommand( {datasize:"test.jstests_datasize", min:{qq:'c'}, max:{qq:'d' }} ).size );
+assert.eq(0,
+ db.runCommand({datasize: "test.jstests_datasize", min: {qq: 'c'}, max: {qq: 'c'}}).size);
+assert.eq(48,
+ db.runCommand({datasize: "test.jstests_datasize", min: {qq: 'c'}, max: {qq: 'd'}}).size);
-assert.eq( 0, db.runCommand( {datasize:"test.jstests_datasize", min:{qq:'a'}, max:{qq:'d' }, keyPattern:{a:1}} ).ok );
+assert.eq(
+ 0,
+ db.runCommand(
+ {datasize: "test.jstests_datasize", min: {qq: 'a'}, max: {qq: 'd'}, keyPattern: {a: 1}})
+ .ok);
diff --git a/jstests/mmap_v1/datasize3.js b/jstests/mmap_v1/datasize3.js
index b706437369a..cefcdcf9949 100644
--- a/jstests/mmap_v1/datasize3.js
+++ b/jstests/mmap_v1/datasize3.js
@@ -2,33 +2,30 @@
t = db.datasize3;
t.drop();
-function run( options ){
- var c = { dataSize : "test.datasize3" };
- if ( options )
- Object.extend( c , options );
- return db.runCommand( c );
+function run(options) {
+ var c = {
+ dataSize: "test.datasize3"
+ };
+ if (options)
+ Object.extend(c, options);
+ return db.runCommand(c);
}
-t.insert( { x : 1 } );
+t.insert({x: 1});
a = run();
-b = run( { estimate : true } );
-printjson( t.stats() );
-assert.eq( a.size , b.size );
+b = run({estimate: true});
+printjson(t.stats());
+assert.eq(a.size, b.size);
+t.ensureIndex({x: 1});
-t.ensureIndex( { x : 1 } );
-
-for ( i=2; i<100; i++ )
- t.insert( { x : i } );
-
-a = run( { min : { x : 20 } , max : { x : 50 } } ).size;
-b = run( { min : { x : 20 } , max : { x : 50 } , estimate : true } ).size;
-
-ratio = Math.min( a , b ) / Math.max( a , b );
-
-assert.lt( 0.97 , ratio , "sizes not equal a: " + a + " b: " + b );
-
+for (i = 2; i < 100; i++)
+ t.insert({x: i});
+a = run({min: {x: 20}, max: {x: 50}}).size;
+b = run({min: {x: 20}, max: {x: 50}, estimate: true}).size;
+ratio = Math.min(a, b) / Math.max(a, b);
+assert.lt(0.97, ratio, "sizes not equal a: " + a + " b: " + b);
diff --git a/jstests/mmap_v1/disk_reuse1.js b/jstests/mmap_v1/disk_reuse1.js
index c419792b5d9..01090667820 100644
--- a/jstests/mmap_v1/disk_reuse1.js
+++ b/jstests/mmap_v1/disk_reuse1.js
@@ -1,24 +1,24 @@
-load( "jstests/libs/slow_weekly_util.js" );
-test = new SlowWeeklyMongod( "conc_update" );
-db = test.getDB( "test" );
+load("jstests/libs/slow_weekly_util.js");
+test = new SlowWeeklyMongod("conc_update");
+db = test.getDB("test");
t = db.disk_reuse1;
t.drop();
N = 10000;
-function k(){
- return Math.floor( Math.random() * N );
+function k() {
+ return Math.floor(Math.random() * N);
}
s = "";
-while ( s.length < 1024 )
+while (s.length < 1024)
s += "abc";
state = {};
var bulk = t.initializeUnorderedBulkOp();
for (var i = 0; i < N; i++) {
- bulk.insert({ _id: i, s: s });
+ bulk.insert({_id: i, s: s});
}
assert.writeOK(bulk.execute());
@@ -28,24 +28,24 @@ t.remove({});
bulk = t.initializeUnorderedBulkOp();
for (i = 0; i < N; i++) {
- bulk.insert({ _id: i, s: s });
+ bulk.insert({_id: i, s: s});
}
assert.writeOK(bulk.execute());
-assert.eq( orig.storageSize , t.stats().storageSize , "A" );
+assert.eq(orig.storageSize, t.stats().storageSize, "A");
-for (j = 0; j < 100; j++){
- for (i = 0; i < N; i++){
+for (j = 0; j < 100; j++) {
+ for (i = 0; i < N; i++) {
bulk = t.initializeUnorderedBulkOp();
var r = Math.random();
- if ( r > .5 )
- bulk.find({ _id: i }).remove();
+ if (r > .5)
+ bulk.find({_id: i}).remove();
else
- bulk.find({ _id: i }).upsert().updateOne({ _id: i, s: s });
+ bulk.find({_id: i}).upsert().updateOne({_id: i, s: s});
}
assert.writeOK(bulk.execute());
- assert.eq( orig.storageSize , t.stats().storageSize , "B" + j );
+ assert.eq(orig.storageSize, t.stats().storageSize, "B" + j);
}
test.stop();
diff --git a/jstests/mmap_v1/drop.js b/jstests/mmap_v1/drop.js
index 154c35d1db3..efb50a3bd08 100644
--- a/jstests/mmap_v1/drop.js
+++ b/jstests/mmap_v1/drop.js
@@ -3,22 +3,21 @@ var coll = db.jstests_drop;
coll.drop();
res = coll.runCommand("drop");
-assert( !res.ok, tojson( res ) );
+assert(!res.ok, tojson(res));
-
-assert.eq(0, db.system.indexes.find({ns : coll + ""}).count(), "A");
+assert.eq(0, db.system.indexes.find({ns: coll + ""}).count(), "A");
coll.save({});
-assert.eq(1, db.system.indexes.find({ns : coll + ""}).count(), "B");
-coll.ensureIndex({a : 1});
-assert.eq(2, db.system.indexes.find({ns : coll + ""}).count(), "C");
-assert.commandWorked(db.runCommand({drop : coll.getName()}));
-assert.eq(0, db.system.indexes.find({ns : coll + ""}).count(), "D");
+assert.eq(1, db.system.indexes.find({ns: coll + ""}).count(), "B");
+coll.ensureIndex({a: 1});
+assert.eq(2, db.system.indexes.find({ns: coll + ""}).count(), "C");
+assert.commandWorked(db.runCommand({drop: coll.getName()}));
+assert.eq(0, db.system.indexes.find({ns: coll + ""}).count(), "D");
-coll.ensureIndex({a : 1});
-assert.eq(2, db.system.indexes.find({ns : coll + ""}).count(), "E");
-assert.commandWorked(db.runCommand({deleteIndexes : coll.getName(), index : "*"}),
+coll.ensureIndex({a: 1});
+assert.eq(2, db.system.indexes.find({ns: coll + ""}).count(), "E");
+assert.commandWorked(db.runCommand({deleteIndexes: coll.getName(), index: "*"}),
"delete indexes A");
-assert.eq(1, db.system.indexes.find({ns : coll + ""}).count(), "G");
+assert.eq(1, db.system.indexes.find({ns: coll + ""}).count(), "G");
// make sure we can still use it
coll.save({});
diff --git a/jstests/mmap_v1/dur_big_atomic_update.js b/jstests/mmap_v1/dur_big_atomic_update.js
index 56ea6f7ec33..41c85adf80a 100644
--- a/jstests/mmap_v1/dur_big_atomic_update.js
+++ b/jstests/mmap_v1/dur_big_atomic_update.js
@@ -7,37 +7,36 @@ d = conn.getDB("test");
d.foo.drop();
var bulk = d.foo.initializeUnorderedBulkOp();
-for (var i = 0; i < 1024; i++){
- bulk.insert({ _id: i });
+for (var i = 0; i < 1024; i++) {
+ bulk.insert({_id: i});
}
assert.writeOK(bulk.execute());
var server_bits = db.serverStatus().mem.bits;
-var big_string_size = (server_bits == 32 ? 64 * 1024 : 1024*1024);
+var big_string_size = (server_bits == 32 ? 64 * 1024 : 1024 * 1024);
var big_string = 'xxxxxxxxxxxxxxxx';
while (big_string.length < big_string_size) {
big_string += big_string;
}
-var res = assert.writeOK(d.foo.update({ $atomic: 1 },
- { $set: { big_string: big_string }},
- false, true /* multi */ ));
+var res = assert.writeOK(
+ d.foo.update({$atomic: 1}, {$set: {big_string: big_string}}, false, true /* multi */));
assert.eq(1024, res.nModified);
d.dropDatabase();
bulk = d.foo.initializeUnorderedBulkOp();
-for (var i = 0; i < 1024; i++){
- bulk.insert({ _id: i });
+for (var i = 0; i < 1024; i++) {
+ bulk.insert({_id: i});
}
assert.writeOK(bulk.execute());
// Do it again but in a db.eval
-d.eval(
- function(big_string) {
- new Mongo().getDB("test").foo.update({}, {$set: {big_string: big_string}}, false, /*multi*/true);
- }, big_string); // Can't pass in connection or DB objects
+d.eval(function(big_string) {
+ new Mongo().getDB("test").foo.update(
+ {}, {$set: {big_string: big_string}}, false, /*multi*/ true);
+}, big_string); // Can't pass in connection or DB objects
err = d.getLastErrorObj();
diff --git a/jstests/mmap_v1/dur_remove_old_journals.js b/jstests/mmap_v1/dur_remove_old_journals.js
index de6f38dfaaa..6da9dfbd6ee 100644
--- a/jstests/mmap_v1/dur_remove_old_journals.js
+++ b/jstests/mmap_v1/dur_remove_old_journals.js
@@ -36,7 +36,7 @@ if (db.serverBuildInfo().bits == 32) {
return latest;
}
- var stringSize = 1024*1024;
+ var stringSize = 1024 * 1024;
var longString = new Array(stringSize).join("x");
// Insert some data to create the first journal file.
@@ -51,11 +51,11 @@ if (db.serverBuildInfo().bits == 32) {
while (firstJournalFileExists() && getLatestJournalFileNum() < maxJournalFiles) {
db.foo.insert({_id: numInserted++, s: longString});
- if (numInserted % 100 == 0){
+ if (numInserted % 100 == 0) {
jsTestLog("numInserted: " + numInserted);
- db.adminCommand({fsync:1});
+ db.adminCommand({fsync: 1});
db.foo.remove({});
- db.adminCommand({fsync:1});
+ db.adminCommand({fsync: 1});
}
}
diff --git a/jstests/mmap_v1/extent.js b/jstests/mmap_v1/extent.js
index 47ae868606a..35b9213dca4 100644
--- a/jstests/mmap_v1/extent.js
+++ b/jstests/mmap_v1/extent.js
@@ -1,11 +1,10 @@
t = db.reclaimExtentsTest;
t.drop();
-for ( var i=0; i<50; i++ ) { // enough iterations to break 32 bit.
- db.createCollection('reclaimExtentsTest', { size : 100000000 });
- t.insert({x:1});
- assert( t.count() == 1 );
+for (var i = 0; i < 50; i++) { // enough iterations to break 32 bit.
+ db.createCollection('reclaimExtentsTest', {size: 100000000});
+ t.insert({x: 1});
+ assert(t.count() == 1);
t.drop();
}
t.drop();
-
diff --git a/jstests/mmap_v1/extent2.js b/jstests/mmap_v1/extent2.js
index 3b4dc54ca74..269ac645986 100644
--- a/jstests/mmap_v1/extent2.js
+++ b/jstests/mmap_v1/extent2.js
@@ -1,15 +1,15 @@
-mydb = db.getSisterDB( "test_extent2" );
+mydb = db.getSisterDB("test_extent2");
mydb.dropDatabase();
t = mydb.foo;
-function insert(){
- t.insert( { _id : 1 , x : 1 } );
- t.insert( { _id : 2 , x : 1 } );
- t.insert( { _id : 3 , x : 1 } );
- t.ensureIndex( { x : 1 } );
+function insert() {
+ t.insert({_id: 1, x: 1});
+ t.insert({_id: 2, x: 1});
+ t.insert({_id: 3, x: 1});
+ t.ensureIndex({x: 1});
}
insert();
@@ -17,18 +17,18 @@ t.drop();
start = mydb.stats();
-for ( i=0; i<100; i++ ) {
+for (i = 0; i < 100; i++) {
insert();
t.drop();
}
end = mydb.stats();
-printjson( start );
-printjson( end );
-assert.eq( start.extentFreeList.num, end.extentFreeList.num );
+printjson(start);
+printjson(end);
+assert.eq(start.extentFreeList.num, end.extentFreeList.num);
// 3: 1 data, 1 _id idx, 1 x idx
// used to be 4, but we no longer waste an extent for the freelist
-assert.eq( 3, start.extentFreeList.num );
-assert.eq( 3, end.extentFreeList.num );
+assert.eq(3, start.extentFreeList.num);
+assert.eq(3, end.extentFreeList.num);
diff --git a/jstests/mmap_v1/index_check1.js b/jstests/mmap_v1/index_check1.js
index 7f46e477d98..609eb2e8fab 100644
--- a/jstests/mmap_v1/index_check1.js
+++ b/jstests/mmap_v1/index_check1.js
@@ -1,31 +1,31 @@
db.somecollection.drop();
-assert.eq(0, db.system.namespaces.find({name:/somecollection/}).length(), "1");
+assert.eq(0, db.system.namespaces.find({name: /somecollection/}).length(), "1");
-db.somecollection.save({a:1});
+db.somecollection.save({a: 1});
-assert.eq(2, db.system.namespaces.find({name:/somecollection/}).length(), "2");
+assert.eq(2, db.system.namespaces.find({name: /somecollection/}).length(), "2");
-db.somecollection.ensureIndex({a:1});
+db.somecollection.ensureIndex({a: 1});
-var z = db.system.namespaces.find({name:/somecollection/}).length();
-assert.gte(z, 1 , "3");
+var z = db.system.namespaces.find({name: /somecollection/}).length();
+assert.gte(z, 1, "3");
-if( z == 1 )
+if (z == 1)
print("warning: z==1, should only happen with alternate storage engines");
db.somecollection.drop();
-assert.eq(0, db.system.namespaces.find({name:/somecollection/}).length(), "4");
+assert.eq(0, db.system.namespaces.find({name: /somecollection/}).length(), "4");
-db.somecollection.save({a:1});
+db.somecollection.save({a: 1});
-assert.eq(2, db.system.namespaces.find({name:/somecollection/}).length(), "5");
+assert.eq(2, db.system.namespaces.find({name: /somecollection/}).length(), "5");
-db.somecollection.ensureIndex({a:1});
+db.somecollection.ensureIndex({a: 1});
-var x = db.system.namespaces.find({name:/somecollection/}).length();
-assert( x == 2 || x == z, "6");
+var x = db.system.namespaces.find({name: /somecollection/}).length();
+assert(x == 2 || x == z, "6");
assert(db.somecollection.validate().valid, "7");
diff --git a/jstests/mmap_v1/indexh.js b/jstests/mmap_v1/indexh.js
index ac2a93ec62b..50d3e40e11e 100644
--- a/jstests/mmap_v1/indexh.js
+++ b/jstests/mmap_v1/indexh.js
@@ -2,40 +2,40 @@
t = db.jstests_indexh;
-function debug( t ) {
- print( t );
+function debug(t) {
+ print(t);
}
function extraDebug() {
-// printjson( db.stats() );
-// db.printCollectionStats();
+ // printjson( db.stats() );
+ // db.printCollectionStats();
}
// index extent freeing
t.drop();
-t.save( {} );
+t.save({});
var s1 = db.stats().dataSize;
-debug( "s1: " + s1 );
+debug("s1: " + s1);
extraDebug();
-t.ensureIndex( {a:1} );
+t.ensureIndex({a: 1});
var s2 = db.stats().dataSize;
-debug( "s2: " + s2 );
-assert.automsg( "s1 < s2" );
-t.dropIndex( {a:1} );
+debug("s2: " + s2);
+assert.automsg("s1 < s2");
+t.dropIndex({a: 1});
var s3 = db.stats().dataSize;
-debug( "s3: " + s3 );
+debug("s3: " + s3);
extraDebug();
-assert.eq.automsg( "s1", "s3" );
+assert.eq.automsg("s1", "s3");
// index node freeing
t.drop();
-t.ensureIndex( {a:1} );
-for( i = 'a'; i.length < 500; i += 'a' ) {
- t.save( {a:i} );
+t.ensureIndex({a: 1});
+for (i = 'a'; i.length < 500; i += 'a') {
+ t.save({a: i});
}
var s4 = db.stats().indexSize;
-debug( "s4: " + s4 );
-t.remove( {} );
+debug("s4: " + s4);
+t.remove({});
var s5 = db.stats().indexSize;
-debug( "s5: " + s5 );
-assert.automsg( "s5 < s4" ); \ No newline at end of file
+debug("s5: " + s5);
+assert.automsg("s5 < s4"); \ No newline at end of file
diff --git a/jstests/mmap_v1/indexi.js b/jstests/mmap_v1/indexi.js
index e23d70e7b6d..2d6c501a31e 100644
--- a/jstests/mmap_v1/indexi.js
+++ b/jstests/mmap_v1/indexi.js
@@ -7,11 +7,11 @@ idx = db.jstests_indexi.$_id_;
// Test that accessing the index namespace fails.
function checkFailingOperations() {
- assert.writeError( idx.insert({ x: 1 }) );
- assert.writeError( idx.update({ x: 1 }, { x: 2 }) );
- assert.writeError( idx.remove({ x: 1 }) );
- assert.commandFailed( idx.runCommand( 'compact' ) );
- assert.commandFailed( idx.ensureIndex({ x: 1 }));
+ assert.writeError(idx.insert({x: 1}));
+ assert.writeError(idx.update({x: 1}, {x: 2}));
+ assert.writeError(idx.remove({x: 1}));
+ assert.commandFailed(idx.runCommand('compact'));
+ assert.commandFailed(idx.ensureIndex({x: 1}));
}
// Check with base collection not present.
@@ -20,4 +20,3 @@ t.save({});
// Check with base collection present.
checkFailingOperations();
-
diff --git a/jstests/mmap_v1/list_collections2.js b/jstests/mmap_v1/list_collections2.js
index ff5415a5d08..fdd77767be7 100644
--- a/jstests/mmap_v1/list_collections2.js
+++ b/jstests/mmap_v1/list_collections2.js
@@ -1,33 +1,39 @@
// Test the listCollections command and system.namespaces
-mydb = db.getSisterDB( "list_collections1" );
+mydb = db.getSisterDB("list_collections1");
mydb.dropDatabase();
-mydb.foo.insert( { x : 5 } );
+mydb.foo.insert({x: 5});
-mydb.runCommand( { create : "bar", temp : true } );
+mydb.runCommand({create: "bar", temp: true});
-res = mydb.runCommand( "listCollections" );
-collections = new DBCommandCursor( db.getMongo(), res ).toArray();
+res = mydb.runCommand("listCollections");
+collections = new DBCommandCursor(db.getMongo(), res).toArray();
-bar = collections.filter( function(x){ return x.name == "bar"; } )[0];
-foo = collections.filter( function(x){ return x.name == "foo" ; } )[0];
+bar = collections.filter(function(x) {
+ return x.name == "bar";
+})[0];
+foo = collections.filter(function(x) {
+ return x.name == "foo";
+})[0];
-assert( bar );
-assert( foo );
+assert(bar);
+assert(foo);
-assert.eq( bar.name, mydb.bar.getName() );
-assert.eq( foo.name, mydb.foo.getName() );
+assert.eq(bar.name, mydb.bar.getName());
+assert.eq(foo.name, mydb.foo.getName());
-assert( mydb.bar.temp, tojson( bar ) );
+assert(mydb.bar.temp, tojson(bar));
-getCollectionName = function(infoObj) { return infoObj.name; };
+getCollectionName = function(infoObj) {
+ return infoObj.name;
+};
-assert.eq( mydb._getCollectionInfosSystemNamespaces().map(getCollectionName),
- mydb._getCollectionInfosCommand().map(getCollectionName) );
+assert.eq(mydb._getCollectionInfosSystemNamespaces().map(getCollectionName),
+ mydb._getCollectionInfosCommand().map(getCollectionName));
-assert.eq( mydb.getCollectionInfos().map(getCollectionName),
- mydb._getCollectionInfosCommand().map(getCollectionName) );
+assert.eq(mydb.getCollectionInfos().map(getCollectionName),
+ mydb._getCollectionInfosCommand().map(getCollectionName));
// Test the listCollections command and querying system.namespaces when a filter is specified.
assert.eq(mydb._getCollectionInfosSystemNamespaces({name: "foo"}).map(getCollectionName),
diff --git a/jstests/mmap_v1/list_indexes2.js b/jstests/mmap_v1/list_indexes2.js
index b235ddf3b0b..71d15b3e900 100644
--- a/jstests/mmap_v1/list_indexes2.js
+++ b/jstests/mmap_v1/list_indexes2.js
@@ -3,15 +3,12 @@
t = db.list_indexes2;
t.drop();
-t.insert( { x : 1 } );
+t.insert({x: 1});
-assert.eq( t._getIndexesSystemIndexes(),
- t._getIndexesCommand() );
+assert.eq(t._getIndexesSystemIndexes(), t._getIndexesCommand());
-t.ensureIndex( { x : 1 } );
+t.ensureIndex({x: 1});
-assert.eq( t._getIndexesSystemIndexes(),
- t._getIndexesCommand() );
+assert.eq(t._getIndexesSystemIndexes(), t._getIndexesCommand());
-assert.eq( t.getIndexes(),
- t._getIndexesCommand() );
+assert.eq(t.getIndexes(), t._getIndexesCommand());
diff --git a/jstests/mmap_v1/repair_cursor1.js b/jstests/mmap_v1/repair_cursor1.js
index 93362ff8c24..fefc9bbdecb 100644
--- a/jstests/mmap_v1/repair_cursor1.js
+++ b/jstests/mmap_v1/repair_cursor1.js
@@ -2,20 +2,18 @@
t = db.repair_cursor1;
t.drop();
-t.insert( { x : 1 } );
-t.insert( { x : 2 } );
+t.insert({x: 1});
+t.insert({x: 2});
-res = t.runCommand( "repairCursor" );
-assert( res.ok, tojson( res ) );
+res = t.runCommand("repairCursor");
+assert(res.ok, tojson(res));
t2 = db.repair_cursor1a;
t2.drop();
-cursor = new DBCommandCursor( db._mongo, res );
-cursor.forEach( function(z){ t2.insert(z); } );
-assert.eq( t.find().itcount(), t2.find().itcount() );
-assert.eq( t.hashAllDocs(), t2.hashAllDocs() );
-
-
-
-
+cursor = new DBCommandCursor(db._mongo, res);
+cursor.forEach(function(z) {
+ t2.insert(z);
+});
+assert.eq(t.find().itcount(), t2.find().itcount());
+assert.eq(t.hashAllDocs(), t2.hashAllDocs());
diff --git a/jstests/mmap_v1/reverse_empty_extent.js b/jstests/mmap_v1/reverse_empty_extent.js
index bb661952fc9..9b2cb4a1002 100644
--- a/jstests/mmap_v1/reverse_empty_extent.js
+++ b/jstests/mmap_v1/reverse_empty_extent.js
@@ -2,24 +2,23 @@
// Create a collection with three small extents
db.jstests_reversecursor.drop();
-db.runCommand({"create":"jstests_reversecursor", $nExtents: [4096,4096,4096]});
+db.runCommand({"create": "jstests_reversecursor", $nExtents: [4096, 4096, 4096]});
// Function to check whether all three extents are non empty
function extentsSpanned() {
var extents = db.jstests_reversecursor.validate(true).extents;
- return (extents[0].firstRecord != "null" &&
- extents[1].firstRecord != "null" &&
+ return (extents[0].firstRecord != "null" && extents[1].firstRecord != "null" &&
extents[2].firstRecord != "null");
}
// Insert enough documents to span all three extents
a = 0;
while (!extentsSpanned()) {
- db.jstests_reversecursor.insert({a:a++});
+ db.jstests_reversecursor.insert({a: a++});
}
// Delete all the elements in the middle
-db.jstests_reversecursor.remove({a:{$gt:0,$lt:a-1}});
+db.jstests_reversecursor.remove({a: {$gt: 0, $lt: a - 1}});
// Make sure the middle extent is empty and that both end extents are not empty
assert.eq(db.jstests_reversecursor.validate(true).extents[1].firstRecord, "null");
@@ -30,5 +29,5 @@ assert.neq(db.jstests_reversecursor.validate(true).extents[2].firstRecord, "null
assert.neq(db.jstests_reversecursor.validate(true).extents[2].lastRecord, "null");
// Make sure that we get the same number of elements for both the forward and reverse cursors
-assert.eq(db.jstests_reversecursor.find().sort({$natural:1}).toArray().length, 2);
-assert.eq(db.jstests_reversecursor.find().sort({$natural:-1}).toArray().length, 2);
+assert.eq(db.jstests_reversecursor.find().sort({$natural: 1}).toArray().length, 2);
+assert.eq(db.jstests_reversecursor.find().sort({$natural: -1}).toArray().length, 2);
diff --git a/jstests/mmap_v1/stats.js b/jstests/mmap_v1/stats.js
index ae9affc9ec8..9d1e95773b7 100644
--- a/jstests/mmap_v1/stats.js
+++ b/jstests/mmap_v1/stats.js
@@ -1,23 +1,23 @@
-var statsDB = db.getSiblingDB( "stats" );
+var statsDB = db.getSiblingDB("stats");
statsDB.dropDatabase();
var t = statsDB.stats1;
-t.save( { a : 1 } );
+t.save({a: 1});
-assert.lt( 0 , t.dataSize() , "A" );
-assert.lt( t.dataSize() , t.storageSize() , "B" );
-assert.lt( 0 , t.totalIndexSize() , "C" );
+assert.lt(0, t.dataSize(), "A");
+assert.lt(t.dataSize(), t.storageSize(), "B");
+assert.lt(0, t.totalIndexSize(), "C");
var stats = statsDB.stats();
-assert.gt( stats.fileSize, 0 );
-assert.eq( stats.dataFileVersion.major, 4 );
-assert.eq( stats.dataFileVersion.minor, 22 );
+assert.gt(stats.fileSize, 0);
+assert.eq(stats.dataFileVersion.major, 4);
+assert.eq(stats.dataFileVersion.minor, 22);
// test empty database; should be no dataFileVersion
statsDB.dropDatabase();
var statsEmptyDB = statsDB.stats();
-assert.eq( statsEmptyDB.fileSize, 0 );
-assert.isnull( statsEmptyDB.dataFileVersion );
+assert.eq(statsEmptyDB.fileSize, 0);
+assert.isnull(statsEmptyDB.dataFileVersion);
statsDB.dropDatabase();
diff --git a/jstests/mmap_v1/touch1.js b/jstests/mmap_v1/touch1.js
index 8de16c7131d..91f7d1378a5 100644
--- a/jstests/mmap_v1/touch1.js
+++ b/jstests/mmap_v1/touch1.js
@@ -2,12 +2,12 @@
t = db.touch1;
t.drop();
-t.insert( { x : 1 } );
-t.ensureIndex( { x : 1 } );
+t.insert({x: 1});
+t.ensureIndex({x: 1});
-res = t.runCommand( "touch" );
-assert( !res.ok, tojson( res ) );
+res = t.runCommand("touch");
+assert(!res.ok, tojson(res));
-res = t.runCommand( "touch", { data : true, index : true } );
-assert.eq( 1, res.data.numRanges, tojson( res ) );
-assert.eq( 1, res.ok, tojson( res ) );
+res = t.runCommand("touch", {data: true, index: true});
+assert.eq(1, res.data.numRanges, tojson(res));
+assert.eq(1, res.ok, tojson(res));
diff --git a/jstests/mmap_v1/update.js b/jstests/mmap_v1/update.js
index 818c6fd6574..fd96337aacf 100644
--- a/jstests/mmap_v1/update.js
+++ b/jstests/mmap_v1/update.js
@@ -1,36 +1,42 @@
-asdf = db.getCollection( "asdf" );
+asdf = db.getCollection("asdf");
asdf.drop();
var txt = "asdf";
-for(var i=0; i<10; i++) {
+for (var i = 0; i < 10; i++) {
txt = txt + txt;
}
var iterations = _isWindows() ? 2500 : 5000;
// fill db
-for(var i=1; i<=iterations; i++) {
- var obj = {txt : txt};
+for (var i = 1; i <= iterations; i++) {
+ var obj = {
+ txt: txt
+ };
asdf.save(obj);
- var obj2 = {txt: txt, comments: [{num: i, txt: txt}, {num: [], txt: txt}, {num: true, txt: txt}]};
+ var obj2 = {
+ txt: txt,
+ comments: [{num: i, txt: txt}, {num: [], txt: txt}, {num: true, txt: txt}]
+ };
asdf.update(obj, obj2);
- if(i%100 == 0) {
+ if (i % 100 == 0) {
var c = asdf.count();
- assert.eq(c , i);
+ assert.eq(c, i);
}
}
assert(asdf.validate().valid);
-var stats = db.runCommand({ collstats: "asdf" });
+var stats = db.runCommand({collstats: "asdf"});
-// some checks. want to check that padding factor is working; in addition this lets us do a little basic
+// some checks. want to check that padding factor is working; in addition this lets us do a little
+// basic
// testing of the collstats command at the same time
assert(stats.count == iterations);
-assert(stats.size < 140433012 * 5 && stats.size > 1000000);
+assert(stats.size<140433012 * 5 && stats.size> 1000000);
assert(stats.numExtents < 20);
assert(stats.nindexes == 1);
diff --git a/jstests/mmap_v1/use_power_of_2.js b/jstests/mmap_v1/use_power_of_2.js
index 7e7d8a466b2..a192a79653d 100644
--- a/jstests/mmap_v1/use_power_of_2.js
+++ b/jstests/mmap_v1/use_power_of_2.js
@@ -1,15 +1,23 @@
/*
* This test ensures that the usePowerOf2 user flag effectively reuses space.
- *
+ *
* As of SERVER-15273 usePowerOf2 is silently ignored so the behavior is the same regardless.
*/
// prepare a doc of 14K
-var doc = { _id: new Object(), data: "a" };
-var bigDoc = { _id: new Object(), data: "a" };
-
-while (doc.data.length < 14 * 1024) doc.data += "a";
-while (bigDoc.data.length < 15 * 1024) bigDoc.data += "a";
+var doc = {
+ _id: new Object(),
+ data: "a"
+};
+var bigDoc = {
+ _id: new Object(),
+ data: "a"
+};
+
+while (doc.data.length < 14 * 1024)
+ doc.data += "a";
+while (bigDoc.data.length < 15 * 1024)
+ bigDoc.data += "a";
var collName = "usepower1";
var t = db.getCollection(collName);
@@ -32,23 +40,21 @@ function checkStorageSize(expectedSize, sameLoc) {
t.drop();
db.createCollection(collName);
-var res = db.runCommand( { "collMod" : collName , "usePowerOf2Sizes" : false } );
-assert( res.ok, "collMod failed" );
-checkStorageSize(16*1023, true); // 15344 = 14369 (bsonsize) + overhead
+var res = db.runCommand({"collMod": collName, "usePowerOf2Sizes": false});
+assert(res.ok, "collMod failed");
+checkStorageSize(16 * 1023, true); // 15344 = 14369 (bsonsize) + overhead
t.drop();
db.createCollection(collName);
-var res = db.runCommand( { "collMod" : collName , "usePowerOf2Sizes" : true } );
-assert( res.ok, "collMod failed" );
-checkStorageSize(16 * 1023, true); // power of 2
-
+var res = db.runCommand({"collMod": collName, "usePowerOf2Sizes": true});
+assert(res.ok, "collMod failed");
+checkStorageSize(16 * 1023, true); // power of 2
// Create collection with flag
t.drop();
-db.runCommand({"create" : collName, "flags" : 0 });
-checkStorageSize(16*1023, true);
+db.runCommand({"create": collName, "flags": 0});
+checkStorageSize(16 * 1023, true);
t.drop();
-db.runCommand({"create" : collName, "flags" : 1 });
-checkStorageSize(16 * 1023, true); // power of 2
-
+db.runCommand({"create": collName, "flags": 1});
+checkStorageSize(16 * 1023, true); // power of 2
diff --git a/jstests/mmap_v1/use_power_of_2_a.js b/jstests/mmap_v1/use_power_of_2_a.js
index 598655429d0..3b8642f09f9 100644
--- a/jstests/mmap_v1/use_power_of_2_a.js
+++ b/jstests/mmap_v1/use_power_of_2_a.js
@@ -15,22 +15,22 @@ function test(defaultMode) {
// capped should obey default (even though it is ignored)
db.c.drop();
- db.createCollection('c', {capped:true, size: 10});
+ db.createCollection('c', {capped: true, size: 10});
assert.eq(db.c.stats().userFlags & 1, defaultMode);
// capped explicitly off should be 0
db.d.drop();
- db.createCollection('d', {capped:true, size: 10, usePowerOf2Sizes: false});
+ db.createCollection('d', {capped: true, size: 10, usePowerOf2Sizes: false});
assert.eq(db.d.stats().userFlags & 1, 0);
// capped and ask explicitly for powerOf2 should be 1
db.e.drop();
- db.createCollection('e', {capped:true, size: 10, usePowerOf2Sizes: true});
+ db.createCollection('e', {capped: true, size: 10, usePowerOf2Sizes: true});
assert.eq(db.e.stats().userFlags & 1, 1);
}
-assert.eq(db.adminCommand({getParameter:1,
- newCollectionsUsePowerOf2Sizes: true}).newCollectionsUsePowerOf2Sizes, true);
+assert.eq(db.adminCommand({getParameter: 1, newCollectionsUsePowerOf2Sizes: true})
+ .newCollectionsUsePowerOf2Sizes,
+ true);
test(1);
-
diff --git a/jstests/multiVersion/1_test_launching_replset.js b/jstests/multiVersion/1_test_launching_replset.js
index af1bdf6b330..e9709b6dd2b 100644
--- a/jstests/multiVersion/1_test_launching_replset.js
+++ b/jstests/multiVersion/1_test_launching_replset.js
@@ -3,51 +3,50 @@
//
// Check our latest versions
-var versionsToCheck = [ "last-stable",
- "latest" ];
+var versionsToCheck = ["last-stable", "latest"];
load('./jstests/multiVersion/libs/verify_versions.js');
-jsTest.log( "Testing legacy versions..." );
+jsTest.log("Testing legacy versions...");
-for( var i = 0; i < versionsToCheck.length; i++ ){
+for (var i = 0; i < versionsToCheck.length; i++) {
+ var version = versionsToCheck[i];
- var version = versionsToCheck[ i ];
-
// Set up a replica set
-
- var rst = new ReplSetTest({ nodes : 2 });
-
- rst.startSet({ binVersion : version });
-
+
+ var rst = new ReplSetTest({nodes: 2});
+
+ rst.startSet({binVersion: version});
+
var nodes = rst.nodes;
-
+
// Make sure the started versions are actually the correct versions
- for( var j = 0; j < nodes.length; j++ ) assert.binVersion(nodes[j], version);
-
+ for (var j = 0; j < nodes.length; j++)
+ assert.binVersion(nodes[j], version);
+
rst.stopSet();
}
-jsTest.log( "Testing mixed versions..." );
+jsTest.log("Testing mixed versions...");
// Set up a multi-version replica set
-var rst = new ReplSetTest({ nodes : 2 });
+var rst = new ReplSetTest({nodes: 2});
-rst.startSet({ binVersion : versionsToCheck });
+rst.startSet({binVersion: versionsToCheck});
var nodes = rst.nodes;
-//Make sure we have hosts of all the different versions
+// Make sure we have hosts of all the different versions
var versionsFound = [];
-for( var j = 0; j < nodes.length; j++ )
+for (var j = 0; j < nodes.length; j++)
versionsFound.push(nodes[j].getBinVersion());
assert.allBinVersions(versionsToCheck, versionsFound);
rst.stopSet();
-jsTest.log( "Done!" );
+jsTest.log("Done!");
//
// End
diff --git a/jstests/multiVersion/2_test_launching_cluster.js b/jstests/multiVersion/2_test_launching_cluster.js
index b8588ce530a..4ecc4431e8b 100644
--- a/jstests/multiVersion/2_test_launching_cluster.js
+++ b/jstests/multiVersion/2_test_launching_cluster.js
@@ -5,134 +5,140 @@
load('./jstests/multiVersion/libs/verify_versions.js');
(function() {
-"use strict";
-// Check our latest versions
-//var versionsToCheck = [ "last-stable", "latest" ];
-//var versionsToCheckMongos = [ "last-stable" ];
-// TODO put this back when SERVER-22761 is resolved
-
-var versionsToCheck = [ "latest" ];
-var versionsToCheckMongos = [ "latest" ];
-
-jsTest.log( "Testing legacy versions..." );
-
-for( var i = 0; i < versionsToCheck.length; i++ ){
-
- var version = versionsToCheck[ i ];
-
- // Set up a cluster
-
- var st = new ShardingTest({ shards : 2,
- mongos : 2,
- other : {
- mongosOptions : { binVersion : version },
- configOptions : { binVersion : version },
- shardOptions : { binVersion : version }
- } });
-
- var shards = [ st.shard0, st.shard1 ];
- var mongoses = [ st.s0, st.s1 ];
- var configs = [ st.config0 ];
-
- // Make sure the started versions are actually the correct versions
- for( var j = 0; j < shards.length; j++ ) assert.binVersion( shards[j], version );
- for( j = 0; j < mongoses.length; j++ ) assert.binVersion( mongoses[j], version );
- for( j = 0; j < configs.length; j++ ) assert.binVersion( configs[j], version );
-
+ "use strict";
+ // Check our latest versions
+ // var versionsToCheck = [ "last-stable", "latest" ];
+ // var versionsToCheckMongos = [ "last-stable" ];
+ // TODO put this back when SERVER-22761 is resolved
+
+ var versionsToCheck = ["latest"];
+ var versionsToCheckMongos = ["latest"];
+
+ jsTest.log("Testing legacy versions...");
+
+ for (var i = 0; i < versionsToCheck.length; i++) {
+ var version = versionsToCheck[i];
+
+ // Set up a cluster
+
+ var st = new ShardingTest({
+ shards: 2,
+ mongos: 2,
+ other: {
+ mongosOptions: {binVersion: version},
+ configOptions: {binVersion: version},
+ shardOptions: {binVersion: version}
+ }
+ });
+
+ var shards = [st.shard0, st.shard1];
+ var mongoses = [st.s0, st.s1];
+ var configs = [st.config0];
+
+ // Make sure the started versions are actually the correct versions
+ for (var j = 0; j < shards.length; j++)
+ assert.binVersion(shards[j], version);
+ for (j = 0; j < mongoses.length; j++)
+ assert.binVersion(mongoses[j], version);
+ for (j = 0; j < configs.length; j++)
+ assert.binVersion(configs[j], version);
+
+ st.stop();
+ }
+
+ jsTest.log("Testing mixed versions...");
+
+ // Set up a multi-version cluster
+
+ st = new ShardingTest({
+ shards: 2,
+ mongos: 2,
+ other: {
+ mongosOptions: {binVersion: versionsToCheckMongos},
+ configOptions: {binVersion: versionsToCheck},
+ shardOptions: {binVersion: versionsToCheck}
+
+ }
+ });
+
+ shards = [st.shard0, st.shard1];
+ mongoses = [st.s0, st.s1];
+ configs = [st.config0, st.config1, st.config2];
+
+ // Make sure we have hosts of all the different versions
+ var versionsFound = [];
+ for (j = 0; j < shards.length; j++)
+ versionsFound.push(shards[j].getBinVersion());
+
+ assert.allBinVersions(versionsToCheck, versionsFound);
+
+ versionsFound = [];
+ for (j = 0; j < mongoses.length; j++)
+ versionsFound.push(mongoses[j].getBinVersion());
+
+ assert.allBinVersions(versionsToCheckMongos, versionsFound);
+
+ versionsFound = [];
+ for (j = 0; j < configs.length; j++)
+ versionsFound.push(configs[j].getBinVersion());
+
+ assert.allBinVersions(versionsToCheck, versionsFound);
+
st.stop();
-}
-
-jsTest.log( "Testing mixed versions..." );
-
-// Set up a multi-version cluster
-
-st = new ShardingTest({ shards : 2,
- mongos : 2,
- other : {
- mongosOptions : { binVersion : versionsToCheckMongos },
- configOptions : { binVersion : versionsToCheck },
- shardOptions : { binVersion : versionsToCheck }
-
- } });
-
-shards = [ st.shard0, st.shard1 ];
-mongoses = [ st.s0, st.s1 ];
-configs = [ st.config0, st.config1, st.config2 ];
-
-// Make sure we have hosts of all the different versions
-var versionsFound = [];
-for ( j = 0; j < shards.length; j++ )
- versionsFound.push( shards[j].getBinVersion() );
-
-assert.allBinVersions( versionsToCheck, versionsFound );
-
-versionsFound = [];
-for ( j = 0; j < mongoses.length; j++ )
- versionsFound.push( mongoses[j].getBinVersion() );
-
-assert.allBinVersions( versionsToCheckMongos, versionsFound );
-
-versionsFound = [];
-for ( j = 0; j < configs.length; j++ )
- versionsFound.push( configs[j].getBinVersion() );
-
-assert.allBinVersions( versionsToCheck, versionsFound );
-
-st.stop();
-
-
-jsTest.log( "Testing mixed versions with replica sets..." );
-
-// Set up a multi-version cluster w/ replica sets
-
-st = new ShardingTest({ shards : 2,
- mongos : 2,
- other : {
- // Replica set shards
- rs : true,
-
- mongosOptions : { binVersion : versionsToCheckMongos },
- configOptions : { binVersion : versionsToCheck },
- rsOptions : { binVersion : versionsToCheck, protocolVersion: 0 }
- } });
-
-var nodesA = st.rs0.nodes;
-var nodesB = st.rs1.nodes;
-mongoses = [ st.s0, st.s1 ];
-configs = [ st.config0, st.config1, st.config2 ];
-
-var getVersion = function( mongo ){
- var result = mongo.getDB( "admin" ).runCommand({ serverStatus : 1 });
- return result.version;
-};
-
-// Make sure we have hosts of all the different versions
-versionsFound = [];
-for ( j = 0; j < nodesA.length; j++ )
- versionsFound.push( nodesA[j].getBinVersion() );
-
-assert.allBinVersions( versionsToCheck, versionsFound );
-
-versionsFound = [];
-for ( j = 0; j < nodesB.length; j++ )
- versionsFound.push( nodesB[j].getBinVersion() );
-
-assert.allBinVersions( versionsToCheck, versionsFound );
-
-versionsFound = [];
-for ( j = 0; j < mongoses.length; j++ )
- versionsFound.push( mongoses[j].getBinVersion() );
-
-assert.allBinVersions( versionsToCheckMongos, versionsFound );
-
-versionsFound = [];
-for ( j = 0; j < configs.length; j++ )
- versionsFound.push( configs[j].getBinVersion() );
-
-assert.allBinVersions( versionsToCheck, versionsFound );
-
-jsTest.log("DONE!");
-
-st.stop();
-})();
+ jsTest.log("Testing mixed versions with replica sets...");
+
+ // Set up a multi-version cluster w/ replica sets
+
+ st = new ShardingTest({
+ shards: 2,
+ mongos: 2,
+ other: {
+ // Replica set shards
+ rs: true,
+
+ mongosOptions: {binVersion: versionsToCheckMongos},
+ configOptions: {binVersion: versionsToCheck},
+ rsOptions: {binVersion: versionsToCheck, protocolVersion: 0}
+ }
+ });
+
+ var nodesA = st.rs0.nodes;
+ var nodesB = st.rs1.nodes;
+ mongoses = [st.s0, st.s1];
+ configs = [st.config0, st.config1, st.config2];
+
+ var getVersion = function(mongo) {
+ var result = mongo.getDB("admin").runCommand({serverStatus: 1});
+ return result.version;
+ };
+
+ // Make sure we have hosts of all the different versions
+ versionsFound = [];
+ for (j = 0; j < nodesA.length; j++)
+ versionsFound.push(nodesA[j].getBinVersion());
+
+ assert.allBinVersions(versionsToCheck, versionsFound);
+
+ versionsFound = [];
+ for (j = 0; j < nodesB.length; j++)
+ versionsFound.push(nodesB[j].getBinVersion());
+
+ assert.allBinVersions(versionsToCheck, versionsFound);
+
+ versionsFound = [];
+ for (j = 0; j < mongoses.length; j++)
+ versionsFound.push(mongoses[j].getBinVersion());
+
+ assert.allBinVersions(versionsToCheckMongos, versionsFound);
+
+ versionsFound = [];
+ for (j = 0; j < configs.length; j++)
+ versionsFound.push(configs[j].getBinVersion());
+
+ assert.allBinVersions(versionsToCheck, versionsFound);
+
+ jsTest.log("DONE!");
+
+ st.stop();
+})();
diff --git a/jstests/multiVersion/3_upgrade_replset.js b/jstests/multiVersion/3_upgrade_replset.js
index d713a59bb43..45d3a7f4844 100644
--- a/jstests/multiVersion/3_upgrade_replset.js
+++ b/jstests/multiVersion/3_upgrade_replset.js
@@ -2,84 +2,77 @@
// Tests upgrading a replica set
//
-load( './jstests/multiVersion/libs/multi_rs.js' );
-load( './jstests/libs/test_background_ops.js' );
+load('./jstests/multiVersion/libs/multi_rs.js');
+load('./jstests/libs/test_background_ops.js');
var oldVersion = "last-stable";
-var nodes = { n1 : { binVersion : oldVersion },
- n2 : { binVersion : oldVersion },
- a3 : { binVersion : oldVersion } };
+var nodes = {
+ n1: {binVersion: oldVersion},
+ n2: {binVersion: oldVersion},
+ a3: {binVersion: oldVersion}
+};
-var rst = new ReplSetTest({ nodes : nodes });
+var rst = new ReplSetTest({nodes: nodes});
rst.startSet();
rst.initiate();
// Wait for a primary node...
var primary = rst.getPrimary();
-var otherOpConn = new Mongo( rst.getURL() );
+var otherOpConn = new Mongo(rst.getURL());
var insertNS = "test.foo";
+jsTest.log("Starting parallel operations during upgrade...");
-jsTest.log( "Starting parallel operations during upgrade..." );
-
-function findAndInsert( rsURL, coll ){
-
- var coll = new Mongo( rsURL ).getCollection( coll + "" );
+function findAndInsert(rsURL, coll) {
+ var coll = new Mongo(rsURL).getCollection(coll + "");
var count = 0;
-
- jsTest.log( "Starting finds and inserts..." );
-
- while( ! isFinished() ){
-
- try{
-
- coll.insert({ _id : count, hello : "world" });
- assert.eq( null, coll.getDB().getLastError() );
- assert.neq( null, coll.findOne({ _id : count }) );
- }
- catch( e ){
- printjson( e );
+
+ jsTest.log("Starting finds and inserts...");
+
+ while (!isFinished()) {
+ try {
+ coll.insert({_id: count, hello: "world"});
+ assert.eq(null, coll.getDB().getLastError());
+ assert.neq(null, coll.findOne({_id: count}));
+ } catch (e) {
+ printjson(e);
}
-
+
count++;
}
-
- jsTest.log( "Finished finds and inserts..." );
+
+ jsTest.log("Finished finds and inserts...");
return count;
}
-var joinFindInsert =
- startParallelOps( primary, // The connection where the test info is passed and stored
- findAndInsert,
- [ rst.getURL(), insertNS ] );
-
+var joinFindInsert =
+ startParallelOps(primary, // The connection where the test info is passed and stored
+ findAndInsert,
+ [rst.getURL(), insertNS]);
-jsTest.log( "Upgrading replica set..." );
+jsTest.log("Upgrading replica set...");
-rst.upgradeSet({ binVersion: "latest" });
+rst.upgradeSet({binVersion: "latest"});
-jsTest.log( "Replica set upgraded." );
+jsTest.log("Replica set upgraded.");
// Wait for primary
var primary = rst.getPrimary();
-printjson( rst.status() );
-
+printjson(rst.status());
// Allow more valid writes to go through
-sleep( 10 * 1000 );
-
+sleep(10 * 1000);
joinFindInsert();
-var totalInserts = primary.getCollection( insertNS ).find().sort({ _id : -1 }).next()._id + 1;
-var dataFound = primary.getCollection( insertNS ).count();
+var totalInserts = primary.getCollection(insertNS).find().sort({_id: -1}).next()._id + 1;
+var dataFound = primary.getCollection(insertNS).count();
-jsTest.log( "Found " + dataFound + " docs out of " + tojson( totalInserts ) + " inserted." );
+jsTest.log("Found " + dataFound + " docs out of " + tojson(totalInserts) + " inserted.");
-assert.gt( dataFound / totalInserts, 0.5 );
+assert.gt(dataFound / totalInserts, 0.5);
rst.stopSet();
-
diff --git a/jstests/multiVersion/balancer_multiVersion_detect.js b/jstests/multiVersion/balancer_multiVersion_detect.js
index c1db3036cfb..3ebf847829a 100644
--- a/jstests/multiVersion/balancer_multiVersion_detect.js
+++ b/jstests/multiVersion/balancer_multiVersion_detect.js
@@ -2,28 +2,28 @@
// Test checks whether the balancer correctly detects a mixed set of shards
//
-jsTest.log( "Starting cluster..." );
+jsTest.log("Starting cluster...");
var options = {
-
- mongosOptions : { verbose : 1, useLogFiles : true },
- configOptions : { },
- shardOptions : { binVersion : [ "latest", "last-stable" ] },
- sync : false,
- enableBalancer : true
+
+ mongosOptions: {verbose: 1, useLogFiles: true},
+ configOptions: {},
+ shardOptions: {binVersion: ["latest", "last-stable"]},
+ sync: false,
+ enableBalancer: true
};
-var st = new ShardingTest({ shards : 3, mongos : 1, other : options });
+var st = new ShardingTest({shards: 3, mongos: 1, other: options});
var mongos = st.s0;
var admin = mongos.getDB("admin");
var coll = mongos.getCollection("foo.bar");
-printjson(admin.runCommand({ enableSharding : coll.getDB() + "" }));
+printjson(admin.runCommand({enableSharding: coll.getDB() + ""}));
st.ensurePrimaryShard(coll.getDB().getName(), 'shard0001');
-printjson(admin.runCommand({ shardCollection : coll + "", key : { _id : 1 } }));
+printjson(admin.runCommand({shardCollection: coll + "", key: {_id: 1}}));
-assert.soon( function() {
+assert.soon(function() {
var log = cat(mongos.fullOptions.logFile);
return /multiVersion cluster detected/.test(log);
}, "multiVersion warning not printed!", 30 * 16 * 60 * 1000, 5 * 1000);
diff --git a/jstests/multiVersion/downgrade_replset.js b/jstests/multiVersion/downgrade_replset.js
index 03c40114b0e..abffcf3875e 100644
--- a/jstests/multiVersion/downgrade_replset.js
+++ b/jstests/multiVersion/downgrade_replset.js
@@ -8,9 +8,11 @@ var newVersion = "latest";
var oldVersion = "last-stable";
var name = "replsetdowngrade";
-var nodes = {n1: {binVersion: newVersion},
- n2: {binVersion: newVersion},
- n3: {binVersion: newVersion}};
+var nodes = {
+ n1: {binVersion: newVersion},
+ n2: {binVersion: newVersion},
+ n3: {binVersion: newVersion}
+};
var rst = new ReplSetTest({name: name, nodes: nodes, nodeOptions: {storageEngine: 'mmapv1'}});
rst.startSet();
@@ -22,7 +24,7 @@ var primary = rst.getPrimary();
var coll = "test.foo";
jsTest.log("Inserting documents into collection.");
-for (var i=0; i<10; i++) {
+for (var i = 0; i < 10; i++) {
primary.getCollection(coll).insert({_id: i, str: "hello world"});
}
@@ -39,7 +41,7 @@ jsTest.log("Starting parallel operations during downgrade..");
var joinFindInsert = startParallelOps(primary, insertDocuments, [rst.getURL(), coll]);
jsTest.log("Downgrading replica set..");
-rst.upgradeSet({ binVersion: oldVersion });
+rst.upgradeSet({binVersion: oldVersion});
jsTest.log("Downgrade complete.");
primary = rst.getPrimary();
diff --git a/jstests/multiVersion/dumprestore.js b/jstests/multiVersion/dumprestore.js
index 07b7895c07f..8d692e4abb6 100644
--- a/jstests/multiVersion/dumprestore.js
+++ b/jstests/multiVersion/dumprestore.js
@@ -1,7 +1,6 @@
// dumprestore.js
-load( './jstests/multiVersion/libs/dumprestore_helpers.js' );
-
+load('./jstests/multiVersion/libs/dumprestore_helpers.js');
// The base name to use for various things in the test, including the dbpath and the database name
var testBaseName = "jstests_tool_dumprestore";
@@ -12,14 +11,13 @@ var testDbpath = MongoRunner.dataPath + testBaseName + "_dbpath_external/";
// Start with basic multiversion tests just running against a single mongod
var singleNodeTests = {
- 'serverSourceVersion' : [ "latest", "last-stable" ],
- 'serverDestVersion' :[ "latest", "last-stable" ],
- 'mongoDumpVersion' :[ "latest", "last-stable" ],
- 'mongoRestoreVersion' :[ "latest", "last-stable" ],
- 'dumpDir' : [ dumpDir ],
- 'testDbpath' : [ testDbpath ],
- 'dumpType' : [ "mongod" ],
- 'restoreType' : [ "mongod" ]
+ 'serverSourceVersion': ["latest", "last-stable"],
+ 'serverDestVersion': ["latest", "last-stable"],
+ 'mongoDumpVersion': ["latest", "last-stable"],
+ 'mongoRestoreVersion': ["latest", "last-stable"],
+ 'dumpDir': [dumpDir],
+ 'testDbpath': [testDbpath],
+ 'dumpType': ["mongod"],
+ 'restoreType': ["mongod"]
};
runAllDumpRestoreTests(singleNodeTests);
-
diff --git a/jstests/multiVersion/dumprestore_sharded.js b/jstests/multiVersion/dumprestore_sharded.js
index 5d4a603dd3f..dd916164b0c 100644
--- a/jstests/multiVersion/dumprestore_sharded.js
+++ b/jstests/multiVersion/dumprestore_sharded.js
@@ -1,7 +1,6 @@
// dumprestore_sharded.js
-load( './jstests/multiVersion/libs/dumprestore_helpers.js' );
-
+load('./jstests/multiVersion/libs/dumprestore_helpers.js');
// The base name to use for various things in the test, including the dbpath and the database name
var testBaseName = "jstests_tool_dumprestore_sharded";
@@ -10,33 +9,28 @@ var testBaseName = "jstests_tool_dumprestore_sharded";
var dumpDir = MongoRunner.dataPath + testBaseName + "_dump_external/";
var testDbpath = MongoRunner.dataPath + testBaseName + "_dbpath_external/";
-
-
// Test dumping from a sharded cluster across versions
var shardedDumpTests = {
- 'serverSourceVersion' : [ "latest", "last-stable" ],
- 'serverDestVersion' :[ "latest", "last-stable" ],
- 'mongoDumpVersion' :[ "latest", "last-stable" ],
- 'mongoRestoreVersion' :[ "latest", "last-stable" ],
- 'dumpDir' : [ dumpDir ],
- 'testDbpath' : [ testDbpath ],
- 'dumpType' : [ "mongos" ],
- 'restoreType' : [ "mongod" ]
+ 'serverSourceVersion': ["latest", "last-stable"],
+ 'serverDestVersion': ["latest", "last-stable"],
+ 'mongoDumpVersion': ["latest", "last-stable"],
+ 'mongoRestoreVersion': ["latest", "last-stable"],
+ 'dumpDir': [dumpDir],
+ 'testDbpath': [testDbpath],
+ 'dumpType': ["mongos"],
+ 'restoreType': ["mongod"]
};
runAllDumpRestoreTests(shardedDumpTests);
-
-
// Test restoring to a sharded cluster across versions
var shardedRestoreTests = {
- 'serverSourceVersion' : [ "latest", "last-stable" ],
- 'serverDestVersion' :[ "latest", "last-stable" ],
- 'mongoDumpVersion' :[ "latest", "last-stable" ],
- 'mongoRestoreVersion' :[ "latest", "last-stable" ],
- 'dumpDir' : [ dumpDir ],
- 'testDbpath' : [ testDbpath ],
- 'dumpType' : [ "mongod" ],
- 'restoreType' : [ "mongos" ]
+ 'serverSourceVersion': ["latest", "last-stable"],
+ 'serverDestVersion': ["latest", "last-stable"],
+ 'mongoDumpVersion': ["latest", "last-stable"],
+ 'mongoRestoreVersion': ["latest", "last-stable"],
+ 'dumpDir': [dumpDir],
+ 'testDbpath': [testDbpath],
+ 'dumpType': ["mongod"],
+ 'restoreType': ["mongos"]
};
runAllDumpRestoreTests(shardedRestoreTests);
-
diff --git a/jstests/multiVersion/geo_2dsphere_v2_to_v3.js b/jstests/multiVersion/geo_2dsphere_v2_to_v3.js
index 3cd0e616c7b..9fa3773e2a8 100644
--- a/jstests/multiVersion/geo_2dsphere_v2_to_v3.js
+++ b/jstests/multiVersion/geo_2dsphere_v2_to_v3.js
@@ -4,10 +4,7 @@ function generatePoint() {
var longitude = Math.random() * 10 - 5;
var latitude = Math.random() * 10 - 5;
var pt = {
- geometry : {
- type : "Point",
- coordinates : [longitude, latitude]
- }
+ geometry: {type: "Point", coordinates: [longitude, latitude]}
};
return pt;
}
@@ -28,17 +25,12 @@ function generatePolygons(amount) {
var numpoints = 4 + Math.floor(Math.random() * 10);
var dist = Math.random() * 5 + .01;
var coordinates = [];
- for (var j = 0; j < numpoints-1; j++) {
- var angle = (j/numpoints) * 2 * Math.PI;
+ for (var j = 0; j < numpoints - 1; j++) {
+ var angle = (j / numpoints) * 2 * Math.PI;
coordinates.push([dist * Math.cos(angle), dist * Math.sin(angle)]);
}
coordinates.push(coordinates[0]);
- polygons.push({
- geometry: {
- type: "Polygon",
- coordinates: [coordinates]
- }
- });
+ polygons.push({geometry: {type: "Polygon", coordinates: [coordinates]}});
}
return polygons;
}
@@ -58,14 +50,7 @@ function get2dsphereIndexVersion(coll) {
}
var nearQuery = {
- geometry: {
- $near : {
- $geometry : {
- type: "Point",
- coordinates: [0,0]
- }
- }
- }
+ geometry: {$near: {$geometry: {type: "Point", coordinates: [0, 0]}}}
};
var mongod = MongoRunner.runMongod({binVersion: "3.0"});
diff --git a/jstests/multiVersion/initialize_from_old_node.js b/jstests/multiVersion/initialize_from_old_node.js
index ad37397d50c..79aaafb6a4e 100644
--- a/jstests/multiVersion/initialize_from_old_node.js
+++ b/jstests/multiVersion/initialize_from_old_node.js
@@ -1,5 +1,5 @@
/*
- * This is a regression test for SERVER-16189, to make sure a replica set with both current and
+ * This is a regression test for SERVER-16189, to make sure a replica set with both current and
* prior version nodes can be initialized from the prior version node.
*/
diff --git a/jstests/multiVersion/initialsync.js b/jstests/multiVersion/initialsync.js
index dac71ff24f4..bbc06c11490 100644
--- a/jstests/multiVersion/initialsync.js
+++ b/jstests/multiVersion/initialsync.js
@@ -8,8 +8,10 @@ var newVersion = "latest";
var name = "multiversioninitsync";
var multitest = function(replSetVersion, newNodeVersion) {
- var nodes = {n1: {binVersion: replSetVersion},
- n2: {binVersion: replSetVersion}};
+ var nodes = {
+ n1: {binVersion: replSetVersion},
+ n2: {binVersion: replSetVersion}
+ };
print("Start up a two-node " + replSetVersion + " replica set.");
var rst = new ReplSetTest({name: name, nodes: nodes});
@@ -25,13 +27,12 @@ var multitest = function(replSetVersion, newNodeVersion) {
var primary = rst.getPrimary();
// Insert some data and wait for replication.
- for (var i=0; i<25; i++) {
+ for (var i = 0; i < 25; i++) {
primary.getDB("foo").foo.insert({_id: i});
}
rst.awaitReplication();
- print("Bring up a new node with version " + newNodeVersion +
- " and add to set.");
+ print("Bring up a new node with version " + newNodeVersion + " and add to set.");
rst.add({binVersion: newNodeVersion});
rst.reInitiate();
diff --git a/jstests/multiVersion/invalid_key_pattern_upgrade.js b/jstests/multiVersion/invalid_key_pattern_upgrade.js
index 27aa8590989..ce71333ef40 100644
--- a/jstests/multiVersion/invalid_key_pattern_upgrade.js
+++ b/jstests/multiVersion/invalid_key_pattern_upgrade.js
@@ -8,11 +8,7 @@
(function() {
'use strict';
- var testCases = [
- {a: 0},
- {a: NaN},
- {a: true},
- ];
+ var testCases = [{a: 0}, {a: NaN}, {a: true}, ];
// The mongod should not start up when an index with an invalid key pattern exists.
testCases.forEach(function(indexKeyPattern) {
@@ -30,8 +26,8 @@
// Start the old version.
var oldVersionOptions = Object.extend({binVersion: '3.2'}, defaultOptions);
var conn = MongoRunner.runMongod(oldVersionOptions);
- assert.neq(null, conn, 'mongod was unable to start up with options ' +
- tojson(oldVersionOptions));
+ assert.neq(
+ null, conn, 'mongod was unable to start up with options ' + tojson(oldVersionOptions));
// Use write commands in order to make assertions about the success of operations based on
// the response from the server.
@@ -42,8 +38,10 @@
// Start the newest version.
conn = MongoRunner.runMongod(defaultOptions);
- assert.eq(null, conn, 'mongod should not have been able to start up when an index with' +
- ' an invalid key pattern' + tojson(indexKeyPattern) + ' exists');
+ assert.eq(null,
+ conn,
+ 'mongod should not have been able to start up when an index with' +
+ ' an invalid key pattern' + tojson(indexKeyPattern) + ' exists');
});
// Create a replica set with a primary running 3.2 and a secondary running the latest version.
@@ -51,10 +49,7 @@
// replicates.
testCases.forEach(function(indexKeyPattern) {
var replSetName = 'invalid_key_pattern_replset';
- var nodes = [
- {binVersion: '3.2'},
- {binVersion: 'latest'},
- ];
+ var nodes = [{binVersion: '3.2'}, {binVersion: 'latest'}, ];
var rst = new ReplSetTest({name: replSetName, nodes: nodes});
@@ -80,15 +75,17 @@
// Verify that the secondary running the latest version terminates when the command to build
// an index with an invalid key pattern replicates.
- assert.soon(function() {
- try {
- secondaryLatest.getDB('test').runCommand({ping: 1});
- } catch (e) {
- return true;
- }
- return false;
- }, 'secondary should have terminated due to request to build an index with an invalid key' +
- ' pattern ' + tojson(indexKeyPattern));
+ assert.soon(
+ function() {
+ try {
+ secondaryLatest.getDB('test').runCommand({ping: 1});
+ } catch (e) {
+ return true;
+ }
+ return false;
+ },
+ 'secondary should have terminated due to request to build an index with an invalid key' +
+ ' pattern ' + tojson(indexKeyPattern));
rst.stopSet(undefined, undefined, {allowedExitCodes: [MongoRunner.EXIT_ABRUPT]});
});
diff --git a/jstests/multiVersion/libs/auth_helpers.js b/jstests/multiVersion/libs/auth_helpers.js
index ecbf2047212..99c0a64b05b 100644
--- a/jstests/multiVersion/libs/auth_helpers.js
+++ b/jstests/multiVersion/libs/auth_helpers.js
@@ -1,15 +1,15 @@
// Helpers for auth upgrade tests.
// Get a user document for username in db.
-var getUserDoc = function(db, username){
- return db.runCommand({'usersInfo': {user: username, db: db._name},
- showCredentials: true}).users[0];
+var getUserDoc = function(db, username) {
+ return db.runCommand({'usersInfo': {user: username, db: db._name}, showCredentials: true})
+ .users[0];
};
// Verify that the user document for username in db
// has MONGODB-CR credentials (or not) and SCRAM-SHA-1
// credentials (or not).
-var verifyUserDoc = function(db, username, hasCR, hasSCRAM, hasExternal = false){
+var verifyUserDoc = function(db, username, hasCR, hasSCRAM, hasExternal = false) {
var userDoc = getUserDoc(db, username);
assert.eq(hasCR, 'MONGODB-CR' in userDoc.credentials);
assert.eq(hasSCRAM, 'SCRAM-SHA-1' in userDoc.credentials);
@@ -18,10 +18,8 @@ var verifyUserDoc = function(db, username, hasCR, hasSCRAM, hasExternal = false)
// Verify that that we can authenticate (or not) using MONGODB-CR
// and SCRAM-SHA-1 to db using username and password.
-var verifyAuth = function(db, username, password, passCR, passSCRAM){
- assert.eq(passCR, db.auth({mechanism: 'MONGODB-CR',
- user: username, pwd: password}));
- assert.eq(passSCRAM, db.auth({mechanism: 'SCRAM-SHA-1',
- user: username, pwd: password}));
+var verifyAuth = function(db, username, password, passCR, passSCRAM) {
+ assert.eq(passCR, db.auth({mechanism: 'MONGODB-CR', user: username, pwd: password}));
+ assert.eq(passSCRAM, db.auth({mechanism: 'SCRAM-SHA-1', user: username, pwd: password}));
db.logout();
};
diff --git a/jstests/multiVersion/libs/data_generators.js b/jstests/multiVersion/libs/data_generators.js
index 6f57e3e0060..c2af0638a5f 100644
--- a/jstests/multiVersion/libs/data_generators.js
+++ b/jstests/multiVersion/libs/data_generators.js
@@ -18,7 +18,6 @@
// }
//
function DataGenerator() {
-
var hexChars = "0123456789abcdefABCDEF";
var regexOptions = "igm";
var stringChars = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ";
@@ -26,52 +25,54 @@ function DataGenerator() {
// Generator functions
// BSON Type: -1
- function GenMinKey (seed) {
+ function GenMinKey(seed) {
return MinKey();
}
// BSON Type: 0 (EOO)
// No Shell Equivalent
// BSON Type: 1
- function GenNumberDouble (seed) {
+ function GenNumberDouble(seed) {
var seed = seed || 0;
return Number(seed);
}
// BSON Type: 2
- function GenString (seed) {
+ function GenString(seed) {
var seed = seed || 0;
var text = "";
- for (var i=0; i < (seed % 1000) + 1; i++) {
+ for (var i = 0; i < (seed % 1000) + 1; i++) {
text += stringChars.charAt((seed + (i % 10)) % stringChars.length);
}
return text;
}
// Javascript Dates get stored as strings
- function GenDate (seed) {
+ function GenDate(seed) {
// The "Date" constructor without "new" ignores its arguments anyway, so don't bother
// using the seed.
return Date();
}
// BSON Type: 3
- function GenObject (seed) {
+ function GenObject(seed) {
var seed = seed || 0;
- return { "object" : true };
+ return {
+ "object": true
+ };
}
// BSON Type: 4
- function GenArray (seed) {
+ function GenArray(seed) {
var seed = seed || 0;
- return [ "array", true ];
+ return ["array", true];
}
// BSON Type: 5
- function GenBinData (seed) {
+ function GenBinData(seed) {
var seed = seed || 0;
var text = "";
- for (var i=0; i < (seed % 1000) + 1; i++) {
+ for (var i = 0; i < (seed % 1000) + 1; i++) {
text += base64Chars.charAt((seed + (i % 10)) % base64Chars.length);
}
@@ -82,29 +83,29 @@ function DataGenerator() {
return BinData(seed % 6, text);
}
// BSON Type: 6
- function GenUndefined (seed) {
+ function GenUndefined(seed) {
return undefined;
}
// BSON Type: 7
- function GenObjectId (seed) {
+ function GenObjectId(seed) {
var seed = seed || 0;
var hexString = "";
- for (var i=0; i < 24; i++) {
+ for (var i = 0; i < 24; i++) {
hexString += hexChars.charAt((seed + (i % 10)) % hexChars.length);
}
return ObjectId(hexString);
}
// BSON Type: 8
- function GenBool (seed) {
+ function GenBool(seed) {
var seed = seed || 0;
return (seed % 2) === 0;
}
// BSON Type: 9
// Our ISODate constructor equals the Date BSON type
- function GenISODate (seed) {
+ function GenISODate(seed) {
var seed = seed || 0;
var year = (seed % (2037 - 1970)) + 1970;
@@ -116,7 +117,6 @@ function DataGenerator() {
var millis = seed % 1000;
function pad(number, length) {
-
var str = '' + number;
while (str.length < length) {
@@ -126,32 +126,32 @@ function DataGenerator() {
return str;
}
- return ISODate(pad(year, 4) + "-" + pad(month, 2) + "-" + pad(day, 2) + "T" +
- pad(hour, 2) + ":" + pad(minute, 2) + ":" + pad(second, 2) + "." + pad(millis, 3));
+ return ISODate(pad(year, 4) + "-" + pad(month, 2) + "-" + pad(day, 2) + "T" + pad(hour, 2) +
+ ":" + pad(minute, 2) + ":" + pad(second, 2) + "." + pad(millis, 3));
}
// BSON Type: 10
- function GenNull (seed) {
+ function GenNull(seed) {
return null;
}
// BSON Type: 11
- function GenRegExp (seed) {
+ function GenRegExp(seed) {
var seed = seed || 0;
var options = "";
- for (var i=0; i < (seed % 3) + 1; i++) {
+ for (var i = 0; i < (seed % 3) + 1; i++) {
options += regexOptions.charAt((seed + (i % 10)) % regexOptions.length);
}
return RegExp(GenString(seed), options);
}
- function GenRegExpLiteral (seed) {
+ function GenRegExpLiteral(seed) {
// We can't pass variables to a regex literal, so we can't programmatically generate the
// data. Instead we rely on the "RegExp" constructor.
return /regexliteral/;
}
// BSON Type: 12
// The DBPointer type in the shell equals the DBRef BSON type
- function GenDBPointer (seed) {
+ function GenDBPointer(seed) {
var seed = seed || 0;
return DBPointer(GenString(seed), GenObjectId(seed));
@@ -163,13 +163,13 @@ function DataGenerator() {
// BSON Type: 15 (CodeWScope)
// No Shell Equivalent
// BSON Type: 16
- function GenNumberInt (seed) {
+ function GenNumberInt(seed) {
var seed = seed || 0;
return NumberInt(seed);
}
// BSON Type: 17
- function GenTimestamp (seed) {
+ function GenTimestamp(seed) {
var seed = seed || 0;
// Make sure our timestamp is not zero, because that doesn't round trip from 2.4 to latest.
@@ -181,17 +181,17 @@ function DataGenerator() {
return Timestamp(seed, (seed * 100000) / 99999);
}
// BSON Type: 18
- function GenNumberLong (seed) {
+ function GenNumberLong(seed) {
var seed = seed || 0;
return NumberLong(seed);
}
// BSON Type: 127
- function GenMaxKey (seed) {
+ function GenMaxKey(seed) {
return MaxKey();
}
// The DBRef type is not a BSON type but is treated specially in the shell:
- function GenDBRef (seed) {
+ function GenDBRef(seed) {
var seed = seed || 0;
return DBRef(GenString(seed), GenObjectId(seed));
@@ -199,34 +199,34 @@ function DataGenerator() {
function GenFlatObjectAllTypes(seed) {
return {
- "MinKey" : GenMinKey(seed),
- "NumberDouble" : GenNumberDouble(seed),
- "String" : GenString(seed),
+ "MinKey": GenMinKey(seed),
+ "NumberDouble": GenNumberDouble(seed),
+ "String": GenString(seed),
// Javascript Dates get stored as strings
- "Date" : GenDate(seed),
+ "Date": GenDate(seed),
// BSON Type: 3
- "Object" : GenObject(seed),
+ "Object": GenObject(seed),
// BSON Type: 4
- "Array" : GenArray(seed),
+ "Array": GenArray(seed),
// BSON Type: 5
- "BinData" : GenBinData(seed),
+ "BinData": GenBinData(seed),
// BSON Type: 6
- "Undefined" : undefined,
+ "Undefined": undefined,
// BSON Type: 7
- "jstOID" : GenObjectId(seed),
+ "jstOID": GenObjectId(seed),
// BSON Type: 8
- "Bool" : GenBool(seed),
+ "Bool": GenBool(seed),
// BSON Type: 9
// Our ISODate constructor equals the Date BSON type
- "ISODate" : GenISODate(seed),
+ "ISODate": GenISODate(seed),
// BSON Type: 10
- "jstNULL" : GenNull(seed),
+ "jstNULL": GenNull(seed),
// BSON Type: 11
- "RegExp" : GenRegExp(seed),
- "RegExpLiteral" : GenRegExpLiteral(seed),
+ "RegExp": GenRegExp(seed),
+ "RegExpLiteral": GenRegExpLiteral(seed),
// BSON Type: 12
// The DBPointer type in the shell equals the DBRef BSON type
- "DBPointer" : GenDBPointer(seed),
+ "DBPointer": GenDBPointer(seed),
// BSON Type: 13 (Code)
// No Shell Equivalent
// BSON Type: 14 (Symbol)
@@ -234,54 +234,54 @@ function DataGenerator() {
// BSON Type: 15 (CodeWScope)
// No Shell Equivalent
// BSON Type: 16
- "NumberInt" : GenNumberInt(seed),
+ "NumberInt": GenNumberInt(seed),
// BSON Type: 17
- "Timestamp" : GenTimestamp(seed),
+ "Timestamp": GenTimestamp(seed),
// BSON Type: 18
- "NumberLong" : GenNumberLong(seed),
+ "NumberLong": GenNumberLong(seed),
// BSON Type: 127
- "MaxKey" : GenMaxKey(seed),
+ "MaxKey": GenMaxKey(seed),
// The DBRef type is not a BSON type but is treated specially in the shell:
- "DBRef" : GenDBRef(seed),
+ "DBRef": GenDBRef(seed),
};
}
function GenFlatObjectAllTypesHardCoded() {
return {
// BSON Type: -1
- "MinKey" : MinKey(),
+ "MinKey": MinKey(),
// BSON Type: 0 (EOO)
// No Shell Equivalent
// BSON Type: 1
- "NumberDouble" : Number(4.0),
+ "NumberDouble": Number(4.0),
// BSON Type: 2
- "String" : "string",
+ "String": "string",
// Javascript Dates get stored as strings
- "Date" : Date("2013-12-11T19:38:24.055Z"),
- "Date2" : GenDate(10000),
+ "Date": Date("2013-12-11T19:38:24.055Z"),
+ "Date2": GenDate(10000),
// BSON Type: 3
- "Object" : { "object" : true },
+ "Object": {"object": true},
// BSON Type: 4
- "Array" : [ "array", true ],
+ "Array": ["array", true],
// BSON Type: 5
- "BinData" : BinData(0, "aaaa"),
+ "BinData": BinData(0, "aaaa"),
// BSON Type: 6
- "Undefined" : undefined,
+ "Undefined": undefined,
// BSON Type: 7
- "jstOID" : ObjectId("aaaaaaaaaaaaaaaaaaaaaaaa"),
+ "jstOID": ObjectId("aaaaaaaaaaaaaaaaaaaaaaaa"),
// BSON Type: 8
- "Bool" : true,
+ "Bool": true,
// BSON Type: 9
// Our ISODate constructor equals the Date BSON type
- "ISODate" : ISODate("2013-12-11T19:38:24.055Z"),
+ "ISODate": ISODate("2013-12-11T19:38:24.055Z"),
// BSON Type: 10
- "jstNULL" : null,
+ "jstNULL": null,
// BSON Type: 11
- "RegExp" : RegExp("a"),
- "RegExpLiteral" : /a/,
+ "RegExp": RegExp("a"),
+ "RegExpLiteral": /a/,
// BSON Type: 12
// The DBPointer type in the shell equals the DBRef BSON type
- "DBPointer" : DBPointer("foo", ObjectId("bbbbbbbbbbbbbbbbbbbbbbbb")),
+ "DBPointer": DBPointer("foo", ObjectId("bbbbbbbbbbbbbbbbbbbbbbbb")),
// BSON Type: 13 (Code)
// No Shell Equivalent
// BSON Type: 14 (Symbol)
@@ -289,15 +289,15 @@ function DataGenerator() {
// BSON Type: 15 (CodeWScope)
// No Shell Equivalent
// BSON Type: 16
- "NumberInt" : NumberInt(5),
+ "NumberInt": NumberInt(5),
// BSON Type: 17
- "Timestamp" : Timestamp(1,2),
+ "Timestamp": Timestamp(1, 2),
// BSON Type: 18
- "NumberLong" : NumberLong(6),
+ "NumberLong": NumberLong(6),
// BSON Type: 127
- "MaxKey" : MaxKey(),
+ "MaxKey": MaxKey(),
// The DBRef type is not a BSON type but is treated specially in the shell:
- "DBRef" : DBRef("bar", 2)
+ "DBRef": DBRef("bar", 2)
};
}
@@ -317,10 +317,10 @@ function DataGenerator() {
// Cursor interface
var i = 0;
return {
- "hasNext" : function () {
+ "hasNext": function() {
return i < testData.length;
},
- "next" : function () {
+ "next": function() {
if (i >= testData.length) {
return undefined;
}
@@ -356,14 +356,15 @@ function DataGenerator() {
// }
//
function IndexDataGenerator(options) {
-
// getNextUniqueKey()
//
// This function returns a new key each time it is called and is guaranteed to not return
// duplicates.
//
- // The sequence of values returned is a-z then A-Z. When "Z" is reached, a new character is added
- // and the first one wraps around, resulting in "aa". The process is repeated, so we get a sequence
+ // The sequence of values returned is a-z then A-Z. When "Z" is reached, a new character is
+ // added
+ // and the first one wraps around, resulting in "aa". The process is repeated, so we get a
+ // sequence
// like this:
//
// "a"
@@ -378,9 +379,8 @@ function IndexDataGenerator(options) {
// ...
var currentKey = "";
function getNextUniqueKey() {
-
function setCharAt(str, index, chr) {
- if (index > str.length-1) {
+ if (index > str.length - 1) {
return str;
}
return str.substr(0, index) + chr + str.substr(index + 1);
@@ -401,8 +401,8 @@ function IndexDataGenerator(options) {
// Find the character (index into keyChars) that we currently have at this position, set
// this position to the next character in the keyChars sequence
keyCharsIndex = keyChars.search(currentKey[currentKeyIndex]);
- currentKey = setCharAt(currentKey, currentKeyIndex,
- keyChars[(keyCharsIndex + 1) % keyChars.length]);
+ currentKey = setCharAt(
+ currentKey, currentKeyIndex, keyChars[(keyCharsIndex + 1) % keyChars.length]);
currentKeyIndex = currentKeyIndex + 1;
// Loop again if we advanced the character past the end of keyChars and wrapped around,
@@ -486,11 +486,10 @@ function IndexDataGenerator(options) {
}
if (propertyType == 1) {
attributes["sparse"] = true;
- }
- else {
+ } else {
// TODO: We have to test this as a separate stage because we want to round trip
// multiple documents
- //attributes["unique"] = true;
+ // attributes["unique"] = true;
}
}
return attributes;
@@ -512,8 +511,7 @@ function IndexDataGenerator(options) {
}
if (propertyType == 2) {
attributes["max"] = ((seed + i) * 10000) % 100 + 10;
- }
- else {
+ } else {
}
}
// The region specified in a 2d index must be positive
@@ -551,48 +549,48 @@ function IndexDataGenerator(options) {
testIndexes = [
// Single Field Indexes
- { "spec" : GenSingleFieldIndex(1), "options" : GenIndexOptions(0) },
- { "spec" : GenSingleFieldIndex(0), "options" : GenIndexOptions(1) },
+ {"spec": GenSingleFieldIndex(1), "options": GenIndexOptions(0)},
+ {"spec": GenSingleFieldIndex(0), "options": GenIndexOptions(1)},
// Compound Indexes
- { "spec" : GenCompoundIndex(0), "options" : GenIndexOptions(2) },
- { "spec" : GenCompoundIndex(1), "options" : GenIndexOptions(3) },
- { "spec" : GenCompoundIndex(2), "options" : GenIndexOptions(4) },
- { "spec" : GenCompoundIndex(3), "options" : GenIndexOptions(5) },
- { "spec" : GenCompoundIndex(4), "options" : GenIndexOptions(6) },
- { "spec" : GenCompoundIndex(5), "options" : GenIndexOptions(7) },
- { "spec" : GenCompoundIndex(6), "options" : GenIndexOptions(8) },
+ {"spec": GenCompoundIndex(0), "options": GenIndexOptions(2)},
+ {"spec": GenCompoundIndex(1), "options": GenIndexOptions(3)},
+ {"spec": GenCompoundIndex(2), "options": GenIndexOptions(4)},
+ {"spec": GenCompoundIndex(3), "options": GenIndexOptions(5)},
+ {"spec": GenCompoundIndex(4), "options": GenIndexOptions(6)},
+ {"spec": GenCompoundIndex(5), "options": GenIndexOptions(7)},
+ {"spec": GenCompoundIndex(6), "options": GenIndexOptions(8)},
// Multikey Indexes
// (Same index spec as single field)
// Nested Indexes
- { "spec" : GenNestedIndex(0), "options" : GenIndexOptions(9) },
- { "spec" : GenNestedIndex(1), "options" : GenIndexOptions(10) },
- { "spec" : GenNestedIndex(2), "options" : GenIndexOptions(11) },
+ {"spec": GenNestedIndex(0), "options": GenIndexOptions(9)},
+ {"spec": GenNestedIndex(1), "options": GenIndexOptions(10)},
+ {"spec": GenNestedIndex(2), "options": GenIndexOptions(11)},
// Geospatial Indexes
// 2dsphere
- { "spec" : Gen2dsphereIndex(7), "options" : Gen2dSphereIndexOptions(12) },
+ {"spec": Gen2dsphereIndex(7), "options": Gen2dSphereIndexOptions(12)},
// 2d
- { "spec" : Gen2dIndex(8), "options" : Gen2dIndexOptions(13) },
+ {"spec": Gen2dIndex(8), "options": Gen2dIndexOptions(13)},
// Haystack
- { "spec" : GenHaystackIndex(9), "options" : GenHaystackIndexOptions(13) },
+ {"spec": GenHaystackIndex(9), "options": GenHaystackIndexOptions(13)},
// Text Indexes
- { "spec" : GenTextIndex(10), "options" : GenTextIndexOptions(14) },
+ {"spec": GenTextIndex(10), "options": GenTextIndexOptions(14)},
// Hashed Index
- { "spec" : GenHashedIndex(10), "options" : GenIndexOptions(14) },
+ {"spec": GenHashedIndex(10), "options": GenIndexOptions(14)},
];
// Cursor interface
var i = 0;
return {
- "hasNext" : function () {
+ "hasNext": function() {
return i < testIndexes.length;
},
- "next" : function () {
+ "next": function() {
if (i >= testIndexes.length) {
return undefined;
}
@@ -620,7 +618,6 @@ function IndexDataGenerator(options) {
// var metadata = generator.get();
//
function CollectionMetadataGenerator(options) {
-
var capped = true;
var options = options || {};
@@ -628,32 +625,34 @@ function CollectionMetadataGenerator(options) {
if (options.hasOwnProperty(option)) {
if (option === 'capped') {
if (typeof(options['capped']) !== 'boolean') {
- throw Error("\"capped\" options must be boolean in CollectionMetadataGenerator");
+ throw Error(
+ "\"capped\" options must be boolean in CollectionMetadataGenerator");
}
capped = options['capped'];
- }
- else {
- throw Error("Unsupported key in options passed to CollectionMetadataGenerator: " + option);
+ } else {
+ throw Error("Unsupported key in options passed to CollectionMetadataGenerator: " +
+ option);
}
}
}
// Collection metadata we are using as a source for testing
- //db.createCollection(name, {capped: <Boolean>, autoIndexId: <Boolean>, size: <number>, max <number>} )
+ // db.createCollection(name, {capped: <Boolean>, autoIndexId: <Boolean>, size: <number>, max
+ // <number>} )
var cappedCollectionMetadata = {
- "capped" : true,
- "size" : 100000,
- "max" : 2000,
- "usePowerOf2Sizes" : true,
+ "capped": true,
+ "size": 100000,
+ "max": 2000,
+ "usePowerOf2Sizes": true,
//"autoIndexId" : false // XXX: this doesn't exist in 2.4
};
// We need to explicitly enable usePowerOf2Sizes, since it's the default in 2.6 but not in 2.4
var normalCollectionMetadata = {
- "usePowerOf2Sizes" : true
+ "usePowerOf2Sizes": true
};
return {
- "get" : function () {
+ "get": function() {
return capped ? cappedCollectionMetadata : normalCollectionMetadata;
}
};
@@ -664,8 +663,8 @@ function CollectionMetadataGenerator(options) {
//
function CollectionDataGenerator(options) {
return {
- "data" : new DataGenerator(),
- "indexes" : new IndexDataGenerator(),
- "collectionMetadata" : new CollectionMetadataGenerator(options)
+ "data": new DataGenerator(),
+ "indexes": new IndexDataGenerator(),
+ "collectionMetadata": new CollectionMetadataGenerator(options)
};
}
diff --git a/jstests/multiVersion/libs/dumprestore_helpers.js b/jstests/multiVersion/libs/dumprestore_helpers.js
index 2f65a28190e..a2b7d22d20c 100644
--- a/jstests/multiVersion/libs/dumprestore_helpers.js
+++ b/jstests/multiVersion/libs/dumprestore_helpers.js
@@ -1,6 +1,6 @@
// dumprestore_helpers.js
-load( './jstests/multiVersion/libs/verify_collection_data.js' );
+load('./jstests/multiVersion/libs/verify_collection_data.js');
// Given a "test spec" object, runs the specified test.
//
@@ -28,7 +28,6 @@ load( './jstests/multiVersion/libs/verify_collection_data.js' );
// - "mongos" - Do the dump or restore by connecting to a sharded cluster
//
function multiVersionDumpRestoreTest(configObj) {
-
// First sanity check the arguments in our configObj
var requiredKeys = [
'serverSourceVersion',
@@ -52,25 +51,24 @@ function multiVersionDumpRestoreTest(configObj) {
resetDbpath(configObj.testDbpath);
if (configObj.dumpType === "mongos") {
var shardingTestConfig = {
- sync: true, // Mixed version clusters can't use replsets for config servers
- name : testBaseName + "_sharded_source",
- mongos : [{ binVersion : configObj.serverSourceVersion }],
- shards : [{ binVersion : configObj.serverSourceVersion }],
- config : [{ binVersion : configObj.serverSourceVersion }]
+ sync: true, // Mixed version clusters can't use replsets for config servers
+ name: testBaseName + "_sharded_source",
+ mongos: [{binVersion: configObj.serverSourceVersion}],
+ shards: [{binVersion: configObj.serverSourceVersion}],
+ config: [{binVersion: configObj.serverSourceVersion}]
};
var shardingTest = new ShardingTest(shardingTestConfig);
var serverSource = shardingTest.s;
- }
- else {
- var serverSource = MongoRunner.runMongod({ binVersion : configObj.serverSourceVersion,
- dbpath : configObj.testDbpath });
+ } else {
+ var serverSource = MongoRunner.runMongod(
+ {binVersion: configObj.serverSourceVersion, dbpath: configObj.testDbpath});
}
var sourceDB = serverSource.getDB(testBaseName);
// Create generators to create collections with our seed data
// Testing with both a capped collection and a normal collection
- var cappedCollGen = new CollectionDataGenerator({ "capped" : true });
- var collGen = new CollectionDataGenerator({ "capped" : false });
+ var cappedCollGen = new CollectionDataGenerator({"capped": true});
+ var collGen = new CollectionDataGenerator({"capped": false});
// Create collections using the different generators
var sourceCollCapped = createCollectionWithData(sourceDB, "cappedColl", cappedCollGen);
@@ -84,43 +82,53 @@ function multiVersionDumpRestoreTest(configObj) {
// Dump using the specified version of mongodump from the running mongod or mongos instance.
if (configObj.dumpType === "mongod") {
- MongoRunner.runMongoTool("mongodump", { out : configObj.dumpDir,
- binVersion : configObj.mongoDumpVersion,
- host : serverSource.host,
- db : testBaseName });
+ MongoRunner.runMongoTool("mongodump",
+ {
+ out: configObj.dumpDir,
+ binVersion: configObj.mongoDumpVersion,
+ host: serverSource.host,
+ db: testBaseName
+ });
MongoRunner.stopMongod(serverSource.port);
- }
- else { /* "mongos" */
- MongoRunner.runMongoTool("mongodump", { out : configObj.dumpDir,
- binVersion : configObj.mongoDumpVersion,
- host : serverSource.host,
- db : testBaseName });
+ } else { /* "mongos" */
+ MongoRunner.runMongoTool("mongodump",
+ {
+ out: configObj.dumpDir,
+ binVersion: configObj.mongoDumpVersion,
+ host: serverSource.host,
+ db: testBaseName
+ });
shardingTest.stop();
}
// Restore using the specified version of mongorestore
if (configObj.restoreType === "mongod") {
- var serverDest = MongoRunner.runMongod({ binVersion : configObj.serverDestVersion });
-
- MongoRunner.runMongoTool("mongorestore", { dir : configObj.dumpDir + "/" + testBaseName,
- binVersion : configObj.mongoRestoreVersion,
- host : serverDest.host,
- db : testBaseName });
- }
- else { /* "mongos" */
+ var serverDest = MongoRunner.runMongod({binVersion: configObj.serverDestVersion});
+
+ MongoRunner.runMongoTool("mongorestore",
+ {
+ dir: configObj.dumpDir + "/" + testBaseName,
+ binVersion: configObj.mongoRestoreVersion,
+ host: serverDest.host,
+ db: testBaseName
+ });
+ } else { /* "mongos" */
var shardingTestConfig = {
- sync: true, // Mixed version clusters can't use replsets for config servers
- name : testBaseName + "_sharded_dest",
- mongos : [{ binVersion : configObj.serverDestVersion }],
- shards : [{ binVersion : configObj.serverDestVersion }],
- config : [{ binVersion : configObj.serverDestVersion }]
+ sync: true, // Mixed version clusters can't use replsets for config servers
+ name: testBaseName + "_sharded_dest",
+ mongos: [{binVersion: configObj.serverDestVersion}],
+ shards: [{binVersion: configObj.serverDestVersion}],
+ config: [{binVersion: configObj.serverDestVersion}]
};
var shardingTest = new ShardingTest(shardingTestConfig);
serverDest = shardingTest.s;
- MongoRunner.runMongoTool("mongorestore", { dir : configObj.dumpDir + "/" + testBaseName,
- binVersion : configObj.mongoRestoreVersion,
- host : serverDest.host,
- db : testBaseName });
+ MongoRunner.runMongoTool("mongorestore",
+ {
+ dir: configObj.dumpDir + "/" + testBaseName,
+ binVersion: configObj.mongoRestoreVersion,
+ host: serverDest.host,
+ db: testBaseName
+ });
}
var destDB = serverDest.getDB(testBaseName);
@@ -141,8 +149,7 @@ function multiVersionDumpRestoreTest(configObj) {
if (configObj.restoreType === "mongos") {
shardingTest.stop();
- }
- else {
+ } else {
MongoRunner.stopMongod(serverDest.port);
}
}
@@ -164,9 +171,7 @@ function multiVersionDumpRestoreTest(configObj) {
// { "a" : 0, "b" : 3 }
// { "a" : 1, "b" : 3 }
function getPermutationIterator(permsObj) {
-
function getAllPermutations(permsObj) {
-
// Split our permutations object into "first" and "rest"
var gotFirst = false;
var firstKey;
@@ -176,8 +181,7 @@ function getPermutationIterator(permsObj) {
if (permsObj.hasOwnProperty(key)) {
if (gotFirst) {
restObj[key] = permsObj[key];
- }
- else {
+ } else {
firstKey = key;
firstValues = permsObj[key];
gotFirst = true;
@@ -209,10 +213,10 @@ function getPermutationIterator(permsObj) {
var currentPermutation = 0;
return {
- "next" : function () {
+ "next": function() {
return allPermutations[currentPermutation++];
},
- "hasNext" : function () {
+ "hasNext": function() {
return currentPermutation < allPermutations.length;
}
};
diff --git a/jstests/multiVersion/libs/multi_cluster.js b/jstests/multiVersion/libs/multi_cluster.js
index cf60f531307..5d98f942546 100644
--- a/jstests/multiVersion/libs/multi_cluster.js
+++ b/jstests/multiVersion/libs/multi_cluster.js
@@ -15,11 +15,14 @@
* upgradeMongos: <bool>, // defaults to true
* }
*/
-ShardingTest.prototype.upgradeCluster = function( binVersion, options ){
+ShardingTest.prototype.upgradeCluster = function(binVersion, options) {
options = options || {};
- if (options.upgradeShards == undefined) options.upgradeShards = true;
- if (options.upgradeConfigs == undefined) options.upgradeConfigs = true;
- if (options.upgradeMongos == undefined) options.upgradeMongos = true;
+ if (options.upgradeShards == undefined)
+ options.upgradeShards = true;
+ if (options.upgradeConfigs == undefined)
+ options.upgradeConfigs = true;
+ if (options.upgradeMongos == undefined)
+ options.upgradeMongos = true;
var upgradedSingleShards = [];
@@ -32,12 +35,10 @@ ShardingTest.prototype.upgradeCluster = function( binVersion, options ){
if (configSvr.host in upgradedSingleShards) {
configSvr = upgradedSingleShards[configSvr.host];
- }
- else {
+ } else {
MongoRunner.stopMongod(configSvr);
- configSvr = MongoRunner.runMongod({ restart: configSvr,
- binVersion: binVersion,
- appendOptions: true });
+ configSvr = MongoRunner.runMongod(
+ {restart: configSvr, binVersion: binVersion, appendOptions: true});
}
this["config" + i] = this["c" + i] = this._configServers[i] = configSvr;
@@ -49,18 +50,16 @@ ShardingTest.prototype.upgradeCluster = function( binVersion, options ){
// Upgrade shards
for (var i = 0; i < numShards; i++) {
- if( this._rs && this._rs[i] ){
+ if (this._rs && this._rs[i]) {
// Upgrade replica set
var rst = this._rs[i].test;
- rst.upgradeSet({ binVersion: binVersion });
- }
- else {
+ rst.upgradeSet({binVersion: binVersion});
+ } else {
// Upgrade shard
var shard = this._connections[i];
MongoRunner.stopMongod(shard);
- shard = MongoRunner.runMongod({ restart: shard,
- binVersion: binVersion,
- appendOptions: true });
+ shard = MongoRunner.runMongod(
+ {restart: shard, binVersion: binVersion, appendOptions: true});
upgradedSingleShards[shard.host] = shard;
this["shard" + i] = this["d" + i] = this._connections[i] = shard;
@@ -76,12 +75,12 @@ ShardingTest.prototype.upgradeCluster = function( binVersion, options ){
var mongos = this._mongos[i];
MongoRunner.stopMongos(mongos);
- mongos = MongoRunner.runMongos({ restart : mongos,
- binVersion : binVersion,
- appendOptions : true });
+ mongos = MongoRunner.runMongos(
+ {restart: mongos, binVersion: binVersion, appendOptions: true});
this["s" + i] = this._mongos[i] = mongos;
- if (i == 0) this.s = mongos;
+ if (i == 0)
+ this.s = mongos;
}
this.config = this.s.getDB("config");
@@ -90,22 +89,22 @@ ShardingTest.prototype.upgradeCluster = function( binVersion, options ){
};
ShardingTest.prototype.restartMongoses = function() {
-
+
var numMongoses = this._mongos.length;
-
+
for (var i = 0; i < numMongoses; i++) {
-
var mongos = this._mongos[i];
-
+
MongoRunner.stopMongos(mongos);
- mongos = MongoRunner.runMongos({ restart : mongos });
-
- this[ "s" + i ] = this._mongos[i] = mongos;
- if( i == 0 ) this.s = mongos;
+ mongos = MongoRunner.runMongos({restart: mongos});
+
+ this["s" + i] = this._mongos[i] = mongos;
+ if (i == 0)
+ this.s = mongos;
}
-
- this.config = this.s.getDB( "config" );
- this.admin = this.s.getDB( "admin" );
+
+ this.config = this.s.getDB("config");
+ this.admin = this.s.getDB("admin");
};
ShardingTest.prototype.getMongosAtVersion = function(binVersion) {
@@ -116,8 +115,7 @@ ShardingTest.prototype.getMongosAtVersion = function(binVersion) {
if (version.indexOf(binVersion) == 0) {
return mongoses[i];
}
- }
- catch (e) {
+ } catch (e) {
printjson(e);
print(mongoses[i]);
}
diff --git a/jstests/multiVersion/libs/multi_rs.js b/jstests/multiVersion/libs/multi_rs.js
index 4e943f39531..109db580453 100644
--- a/jstests/multiVersion/libs/multi_rs.js
+++ b/jstests/multiVersion/libs/multi_rs.js
@@ -22,14 +22,14 @@ ReplSetTest.prototype.upgradeSet = function(options, user, pwd) {
var noDowntimePossible = this.nodes.length > 2;
for (var i = 0; i < nodesToUpgrade.length; i++) {
- var node = nodesToUpgrade[ i ];
+ var node = nodesToUpgrade[i];
if (node == primary) {
node = this.stepdown(node);
primary = this.getPrimary();
}
var prevPrimaryId = this.getNodeId(primary);
- //merge new options into node settings...
+ // merge new options into node settings...
for (var nodeName in this.nodeOptions) {
this.nodeOptions[nodeName] = Object.merge(this.nodeOptions[nodeName], options);
}
@@ -46,7 +46,7 @@ ReplSetTest.prototype.upgradeNode = function(node, opts, user, pwd) {
assert.eq(1, node.getDB("admin").auth(user, pwd));
}
- var isMaster = node.getDB('admin').runCommand({ isMaster: 1 });
+ var isMaster = node.getDB('admin').runCommand({isMaster: 1});
if (!isMaster.arbiterOnly) {
assert.commandWorked(node.adminCommand("replSetMaintenance"));
@@ -58,9 +58,8 @@ ReplSetTest.prototype.upgradeNode = function(node, opts, user, pwd) {
newNode.getDB("admin").auth(user, pwd);
}
- var waitForStates = [ ReplSetTest.State.PRIMARY,
- ReplSetTest.State.SECONDARY,
- ReplSetTest.State.ARBITER ];
+ var waitForStates =
+ [ReplSetTest.State.PRIMARY, ReplSetTest.State.SECONDARY, ReplSetTest.State.ARBITER];
this.waitForState(newNode, waitForStates);
return newNode;
@@ -72,10 +71,9 @@ ReplSetTest.prototype.stepdown = function(nodeId) {
var node = this.nodes[nodeId];
try {
- node.getDB("admin").runCommand({ replSetStepDown: 50, force: true });
+ node.getDB("admin").runCommand({replSetStepDown: 50, force: true});
assert(false);
- }
- catch (ex) {
+ } catch (ex) {
print('Caught exception after stepDown cmd: ' + tojson(ex));
}
@@ -87,17 +85,18 @@ ReplSetTest.prototype.reconnect = function(node) {
this.nodes[nodeId] = new Mongo(node.host);
var except = {};
for (var i in node) {
- if (typeof(node[i]) == "function") continue;
+ if (typeof(node[i]) == "function")
+ continue;
this.nodes[nodeId][i] = node[i];
}
return this.nodes[nodeId];
};
-ReplSetTest.prototype.conf = function () {
+ReplSetTest.prototype.conf = function() {
var admin = this.getPrimary().getDB('admin');
- var resp = admin.runCommand({replSetGetConfig:1});
+ var resp = admin.runCommand({replSetGetConfig: 1});
if (resp.ok && !(resp.errmsg) && resp.config)
return resp.config;
@@ -107,4 +106,3 @@ ReplSetTest.prototype.conf = function () {
throw new Error("Could not retrieve replica set config: " + tojson(resp));
};
-
diff --git a/jstests/multiVersion/libs/verify_collection_data.js b/jstests/multiVersion/libs/verify_collection_data.js
index 73e54f32f48..1b0437917ee 100644
--- a/jstests/multiVersion/libs/verify_collection_data.js
+++ b/jstests/multiVersion/libs/verify_collection_data.js
@@ -18,10 +18,10 @@
// 4. Do round trip or other testing
// 5. Validate that collection has not changed using the CollectionDataValidator class
-load( './jstests/multiVersion/libs/data_generators.js' );
+load('./jstests/multiVersion/libs/data_generators.js');
// Function to actually add the data generated by the given dataGenerator to a collection
-createCollectionWithData = function (db, collectionName, dataGenerator) {
+createCollectionWithData = function(db, collectionName, dataGenerator) {
// Drop collection if exists
// TODO: add ability to control this
@@ -67,7 +67,6 @@ createCollectionWithData = function (db, collectionName, dataGenerator) {
// Class to save the state of a collection and later compare the current state of a collection to
// the saved state
function CollectionDataValidator() {
-
var _initialized = false;
var _collectionInfo = {};
var _indexData = [];
@@ -81,25 +80,27 @@ function CollectionDataValidator() {
};
// Saves the current state of the collection passed in
- this.recordCollectionData = function (collection) {
+ this.recordCollectionData = function(collection) {
// Save the metadata for this collection for later comparison.
_collectionInfo = this.getCollectionInfo(collection);
// Save the indexes for this collection for later comparison
- _indexData = collection.getIndexes().sort(function(a,b) {
- if (a.name > b.name) return 1;
- else return -1;
+ _indexData = collection.getIndexes().sort(function(a, b) {
+ if (a.name > b.name)
+ return 1;
+ else
+ return -1;
});
// Save the data for this collection for later comparison
- _collectionData = collection.find().sort({"_id":1}).toArray();
+ _collectionData = collection.find().sort({"_id": 1}).toArray();
_initialized = true;
return collection;
};
- this.validateCollectionData = function (collection) {
+ this.validateCollectionData = function(collection) {
if (!_initialized) {
throw Error("validateCollectionWithAllData called, but data is not initialized");
@@ -111,16 +112,18 @@ function CollectionDataValidator() {
assert.docEq(_collectionInfo, newCollectionInfo, "collection metadata not equal");
// Get the indexes for this collection
- var newIndexData = collection.getIndexes().sort(function(a,b) {
- if (a.name > b.name) return 1;
- else return -1;
+ var newIndexData = collection.getIndexes().sort(function(a, b) {
+ if (a.name > b.name)
+ return 1;
+ else
+ return -1;
});
for (var i = 0; i < newIndexData.length; i++) {
assert.docEq(_indexData[i], newIndexData[i], "indexes not equal");
}
// Save the data for this collection for later comparison
- var newCollectionData = collection.find().sort({"_id":1}).toArray();
+ var newCollectionData = collection.find().sort({"_id": 1}).toArray();
for (var i = 0; i < newCollectionData.length; i++) {
assert.docEq(_collectionData[i], newCollectionData[i], "data not equal");
}
@@ -130,52 +133,59 @@ function CollectionDataValidator() {
// Tests of the functions and classes in this file
function collectionDataValidatorTests() {
-
// TODO: These tests are hackish and depend on implementation details, but they are good enough
// for now to convince us that the CollectionDataValidator is actually checking something
var myValidator;
var myGenerator;
var collection;
- myGenerator = new CollectionDataGenerator({ "capped" : true });
+ myGenerator = new CollectionDataGenerator({"capped": true});
collection = createCollectionWithData(db, "test", myGenerator);
myValidator = new CollectionDataValidator();
myValidator.recordCollectionData(collection);
- db.test.dropIndex(db.test.getIndexKeys().filter(function(key) { return key.a != null; })[0]);
- assert.throws(myValidator.validateCollectionData, [collection], "Validation function should have thrown since we modified the collection");
-
-
- myGenerator = new CollectionDataGenerator({ "capped" : true });
+ db.test.dropIndex(db.test.getIndexKeys().filter(function(key) {
+ return key.a != null;
+ })[0]);
+ assert.throws(myValidator.validateCollectionData,
+ [collection],
+ "Validation function should have thrown since we modified the collection");
+
+ myGenerator = new CollectionDataGenerator({"capped": true});
collection = createCollectionWithData(db, "test", myGenerator);
myValidator = new CollectionDataValidator();
myValidator.recordCollectionData(collection);
- db.test.update({_id:0}, {dummy:1});
- assert.throws(myValidator.validateCollectionData, [collection], "Validation function should have thrown since we modified the collection");
+ db.test.update({_id: 0}, {dummy: 1});
+ assert.throws(myValidator.validateCollectionData,
+ [collection],
+ "Validation function should have thrown since we modified the collection");
-
- myGenerator = new CollectionDataGenerator({ "capped" : true });
+ myGenerator = new CollectionDataGenerator({"capped": true});
collection = createCollectionWithData(db, "test", myGenerator);
myValidator = new CollectionDataValidator();
myValidator.recordCollectionData(collection);
assert(myValidator.validateCollectionData(collection), "Validation function failed");
- myGenerator = new CollectionDataGenerator({ "capped" : false });
+ myGenerator = new CollectionDataGenerator({"capped": false});
collection = createCollectionWithData(db, "test", myGenerator);
myValidator = new CollectionDataValidator();
myValidator.recordCollectionData(collection);
- db.test.dropIndex(db.test.getIndexKeys().filter(function(key) { return key.a != null; })[0]);
- assert.throws(myValidator.validateCollectionData, [collection], "Validation function should have thrown since we modified the collection");
-
-
- myGenerator = new CollectionDataGenerator({ "capped" : false });
+ db.test.dropIndex(db.test.getIndexKeys().filter(function(key) {
+ return key.a != null;
+ })[0]);
+ assert.throws(myValidator.validateCollectionData,
+ [collection],
+ "Validation function should have thrown since we modified the collection");
+
+ myGenerator = new CollectionDataGenerator({"capped": false});
collection = createCollectionWithData(db, "test", myGenerator);
myValidator = new CollectionDataValidator();
myValidator.recordCollectionData(collection);
- db.test.update({_id:0}, {dummy:1});
- assert.throws(myValidator.validateCollectionData, [collection], "Validation function should have thrown since we modified the collection");
-
+ db.test.update({_id: 0}, {dummy: 1});
+ assert.throws(myValidator.validateCollectionData,
+ [collection],
+ "Validation function should have thrown since we modified the collection");
- myGenerator = new CollectionDataGenerator({ "capped" : false });
+ myGenerator = new CollectionDataGenerator({"capped": false});
collection = createCollectionWithData(db, "test", myGenerator);
myValidator = new CollectionDataValidator();
myValidator.recordCollectionData(collection);
diff --git a/jstests/multiVersion/libs/verify_versions.js b/jstests/multiVersion/libs/verify_versions.js
index 89641b5c493..f34b28b9ee5 100644
--- a/jstests/multiVersion/libs/verify_versions.js
+++ b/jstests/multiVersion/libs/verify_versions.js
@@ -3,7 +3,7 @@
*/
Mongo.prototype.getBinVersion = function() {
- var result = this.getDB( "admin" ).runCommand({ serverStatus : 1 });
+ var result = this.getDB("admin").runCommand({serverStatus: 1});
return result.version;
};
@@ -12,28 +12,26 @@ assert.binVersion = function(mongo, version) {
var currVersion = mongo.getBinVersion();
assert(MongoRunner.areBinVersionsTheSame(MongoRunner.getBinVersionFor(currVersion),
MongoRunner.getBinVersionFor(version)),
- "version " + version + " (" + MongoRunner.getBinVersionFor(version) + ")" +
- " is not the same as " + currVersion);
+ "version " + version + " (" + MongoRunner.getBinVersionFor(version) + ")" +
+ " is not the same as " + currVersion);
};
-
// Compares an array of desired versions and an array of found versions,
// looking for versions not found
assert.allBinVersions = function(versionsWanted, versionsFound) {
-
+
for (var i = 0; i < versionsWanted.length; i++) {
var version = versionsWanted[i];
var found = false;
for (var j = 0; j < versionsFound.length; j++) {
- if (MongoRunner.areBinVersionsTheSame(version,
- versionsFound[j])) {
+ if (MongoRunner.areBinVersionsTheSame(version, versionsFound[j])) {
found = true;
break;
}
}
- assert(found, "could not find version " +
- version + " (" + MongoRunner.getBinVersionFor(version) + ")" +
- " in " + versionsFound);
+ assert(found,
+ "could not find version " + version + " (" + MongoRunner.getBinVersionFor(version) +
+ ")" + " in " + versionsFound);
}
};
diff --git a/jstests/multiVersion/migration_between_mixed_version_mongods.js b/jstests/multiVersion/migration_between_mixed_version_mongods.js
index 9ffd9f65d38..12e9c6d2628 100644
--- a/jstests/multiVersion/migration_between_mixed_version_mongods.js
+++ b/jstests/multiVersion/migration_between_mixed_version_mongods.js
@@ -7,91 +7,99 @@
load("./jstests/multiVersion/libs/verify_versions.js");
(function() {
-"use strict";
-
-var options = {shards: [{binVersion : "last-stable"},
- {binVersion : "last-stable"},
- {binVersion : "latest"},
- {binVersion : "latest"}],
- mongos: 1,
- other: {mongosOptions: {binVersion : "last-stable"}}
-};
-
-var st = new ShardingTest(options);
-
-assert.binVersion(st.shard0, "last-stable");
-assert.binVersion(st.shard1, "last-stable");
-assert.binVersion(st.shard2, "latest");
-assert.binVersion(st.shard3, "latest");
-assert.binVersion(st.s0, "last-stable");
-
-var mongos = st.s0,
- admin = mongos.getDB('admin'),
- shards = mongos.getCollection('config.shards').find().toArray(),
-
- fooDB = "fooTest",
- fooNS = fooDB + ".foo",
- fooColl = mongos.getCollection(fooNS),
- fooDonor = st.shard0,
- fooRecipient = st.shard2,
- fooDonorColl = fooDonor.getCollection(fooNS),
- fooRecipientColl = fooRecipient.getCollection(fooNS),
-
- barDB = "barTest",
- barNS = barDB + ".foo",
- barColl = mongos.getCollection(barNS),
- barDonor = st.shard3,
- barRecipient = st.shard1,
- barDonorColl = barDonor.getCollection(barNS),
- barRecipientColl = barRecipient.getCollection(barNS);
-
-assert.commandWorked(admin.runCommand({enableSharding: fooDB}));
-assert.commandWorked(admin.runCommand({enableSharding: barDB}));
-st.ensurePrimaryShard(fooDB, shards[0]._id);
-st.ensurePrimaryShard(barDB, shards[3]._id);
-
-assert.commandWorked(admin.runCommand({shardCollection: fooNS, key: {a: 1}}));
-assert.commandWorked(admin.runCommand({split: fooNS, middle: {a: 10}}));
-assert.commandWorked(admin.runCommand({shardCollection: barNS, key: {a: 1}}));
-assert.commandWorked(admin.runCommand({split: barNS, middle: {a: 10}}));
-
-fooColl.insert({a: 0});
-assert.eq(null, fooColl.getDB().getLastError());
-fooColl.insert({a: 10});
-assert.eq(null, fooColl.getDB().getLastError());
-assert.eq(0, fooRecipientColl.count());
-assert.eq(2, fooDonorColl.count());
-assert.eq(2, fooColl.count());
-
-barColl.insert({a: 0});
-assert.eq(null, barColl.getDB().getLastError());
-barColl.insert({a: 10});
-assert.eq(null, barColl.getDB().getLastError());
-assert.eq(0, barRecipientColl.count());
-assert.eq(2, barDonorColl.count());
-assert.eq(2, barColl.count());
-
-/**
- * Perform two migrations:
- * shard0 (last-stable) -> foo chunk -> shard2 (latest)
- * shard3 (latest) -> bar chunk -> shard1 (last-stable)
- */
-
-assert.commandWorked(admin.runCommand({moveChunk: fooNS, find: {a: 10}, to: shards[2]._id}));
-assert.commandWorked(admin.runCommand({moveChunk: barNS, find: {a: 10}, to: shards[1]._id}));
-assert.eq(1, fooRecipientColl.count(), "Foo collection migration failed. " +
- "Last-stable -> latest mongod version migration failure.");
-assert.eq(1, fooDonorColl.count(), "Foo donor lost its document. " +
- "Last-stable -> latest mongod version migration failure.");
-assert.eq(2, fooColl.count(), "Incorrect number of documents in foo collection. " +
- "Last-stable -> latest mongod version migration failure.");
-assert.eq(1, barRecipientColl.count(), "Bar collection migration failed. " +
- "Latest -> last-stable mongod version migration failure.");
-assert.eq(1, barDonorColl.count(), "Bar donor lost its document. " +
- "Latest -> last-stable mongod version migration failure.");
-assert.eq(2, barColl.count(), "Incorrect number of documents in bar collection. " +
- "Latest -> last-stable mongod version migration failure.");
-
-st.stop();
+ "use strict";
+
+ var options = {
+ shards: [
+ {binVersion: "last-stable"},
+ {binVersion: "last-stable"},
+ {binVersion: "latest"},
+ {binVersion: "latest"}
+ ],
+ mongos: 1,
+ other: {mongosOptions: {binVersion: "last-stable"}}
+ };
+
+ var st = new ShardingTest(options);
+
+ assert.binVersion(st.shard0, "last-stable");
+ assert.binVersion(st.shard1, "last-stable");
+ assert.binVersion(st.shard2, "latest");
+ assert.binVersion(st.shard3, "latest");
+ assert.binVersion(st.s0, "last-stable");
+
+ var mongos = st.s0, admin = mongos.getDB('admin'),
+ shards = mongos.getCollection('config.shards').find().toArray(),
+
+ fooDB = "fooTest", fooNS = fooDB + ".foo", fooColl = mongos.getCollection(fooNS),
+ fooDonor = st.shard0, fooRecipient = st.shard2,
+ fooDonorColl = fooDonor.getCollection(fooNS),
+ fooRecipientColl = fooRecipient.getCollection(fooNS),
+
+ barDB = "barTest", barNS = barDB + ".foo", barColl = mongos.getCollection(barNS),
+ barDonor = st.shard3, barRecipient = st.shard1,
+ barDonorColl = barDonor.getCollection(barNS),
+ barRecipientColl = barRecipient.getCollection(barNS);
+
+ assert.commandWorked(admin.runCommand({enableSharding: fooDB}));
+ assert.commandWorked(admin.runCommand({enableSharding: barDB}));
+ st.ensurePrimaryShard(fooDB, shards[0]._id);
+ st.ensurePrimaryShard(barDB, shards[3]._id);
+
+ assert.commandWorked(admin.runCommand({shardCollection: fooNS, key: {a: 1}}));
+ assert.commandWorked(admin.runCommand({split: fooNS, middle: {a: 10}}));
+ assert.commandWorked(admin.runCommand({shardCollection: barNS, key: {a: 1}}));
+ assert.commandWorked(admin.runCommand({split: barNS, middle: {a: 10}}));
+
+ fooColl.insert({a: 0});
+ assert.eq(null, fooColl.getDB().getLastError());
+ fooColl.insert({a: 10});
+ assert.eq(null, fooColl.getDB().getLastError());
+ assert.eq(0, fooRecipientColl.count());
+ assert.eq(2, fooDonorColl.count());
+ assert.eq(2, fooColl.count());
+
+ barColl.insert({a: 0});
+ assert.eq(null, barColl.getDB().getLastError());
+ barColl.insert({a: 10});
+ assert.eq(null, barColl.getDB().getLastError());
+ assert.eq(0, barRecipientColl.count());
+ assert.eq(2, barDonorColl.count());
+ assert.eq(2, barColl.count());
+
+ /**
+ * Perform two migrations:
+ * shard0 (last-stable) -> foo chunk -> shard2 (latest)
+ * shard3 (latest) -> bar chunk -> shard1 (last-stable)
+ */
+
+ assert.commandWorked(admin.runCommand({moveChunk: fooNS, find: {a: 10}, to: shards[2]._id}));
+ assert.commandWorked(admin.runCommand({moveChunk: barNS, find: {a: 10}, to: shards[1]._id}));
+ assert.eq(1,
+ fooRecipientColl.count(),
+ "Foo collection migration failed. " +
+ "Last-stable -> latest mongod version migration failure.");
+ assert.eq(1,
+ fooDonorColl.count(),
+ "Foo donor lost its document. " +
+ "Last-stable -> latest mongod version migration failure.");
+ assert.eq(2,
+ fooColl.count(),
+ "Incorrect number of documents in foo collection. " +
+ "Last-stable -> latest mongod version migration failure.");
+ assert.eq(1,
+ barRecipientColl.count(),
+ "Bar collection migration failed. " +
+ "Latest -> last-stable mongod version migration failure.");
+ assert.eq(1,
+ barDonorColl.count(),
+ "Bar donor lost its document. " +
+ "Latest -> last-stable mongod version migration failure.");
+ assert.eq(2,
+ barColl.count(),
+ "Incorrect number of documents in bar collection. " +
+ "Latest -> last-stable mongod version migration failure.");
+
+ st.stop();
})();
diff --git a/jstests/multiVersion/minor_version_downgrade_replset.js b/jstests/multiVersion/minor_version_downgrade_replset.js
index e91c22250ac..e0cc1fe5812 100644
--- a/jstests/multiVersion/minor_version_downgrade_replset.js
+++ b/jstests/multiVersion/minor_version_downgrade_replset.js
@@ -9,9 +9,11 @@ var oldVersion = "3.2.1";
var newVersion = "latest";
var name = "replsetdowngrade";
-var nodes = {n1: {binVersion: newVersion},
- n2: {binVersion: newVersion},
- n3: {binVersion: newVersion}};
+var nodes = {
+ n1: {binVersion: newVersion},
+ n2: {binVersion: newVersion},
+ n3: {binVersion: newVersion}
+};
var rst = new ReplSetTest({name: name, nodes: nodes, nodeOptions: {storageEngine: 'mmapv1'}});
rst.startSet();
@@ -23,7 +25,7 @@ var primary = rst.getPrimary();
var coll = "test.foo";
jsTest.log("Inserting documents into collection.");
-for (var i=0; i<10; i++) {
+for (var i = 0; i < 10; i++) {
primary.getCollection(coll).insert({_id: i, str: "hello world"});
}
@@ -40,7 +42,7 @@ jsTest.log("Starting parallel operations during downgrade..");
var joinFindInsert = startParallelOps(primary, insertDocuments, [rst.getURL(), coll]);
jsTest.log("Downgrading replica set..");
-rst.upgradeSet({ binVersion: oldVersion });
+rst.upgradeSet({binVersion: oldVersion});
jsTest.log("Downgrade complete.");
primary = rst.getPrimary();
diff --git a/jstests/multiVersion/minor_version_tags_new_old_new.js b/jstests/multiVersion/minor_version_tags_new_old_new.js
index bddc283558a..f39b3da4c68 100644
--- a/jstests/multiVersion/minor_version_tags_new_old_new.js
+++ b/jstests/multiVersion/minor_version_tags_new_old_new.js
@@ -5,11 +5,13 @@
// 3.2.1 is the final version to use the old style replSetUpdatePosition command.
var oldVersion = "3.2.1";
var newVersion = "latest";
- var nodes = { n1 : { binVersion : newVersion },
- n2 : { binVersion : oldVersion },
- n3 : { binVersion : newVersion },
- n4 : { binVersion : oldVersion },
- n5 : { binVersion : newVersion } };
+ var nodes = {
+ n1: {binVersion: newVersion},
+ n2: {binVersion: oldVersion},
+ n3: {binVersion: newVersion},
+ n4: {binVersion: oldVersion},
+ n5: {binVersion: newVersion}
+ };
var host = getHostName();
var name = 'tags';
@@ -19,63 +21,62 @@
var port = replTest.ports;
replTest.initiate({
_id: name,
- members : [
+ members: [
{
- _id: 0,
- host: nodes[0],
- tags: {
- server: '0',
- dc: 'ny',
- ny: '1',
- rack: 'ny.rk1',
- },
+ _id: 0,
+ host: nodes[0],
+ tags: {
+ server: '0',
+ dc: 'ny',
+ ny: '1',
+ rack: 'ny.rk1',
+ },
},
{
- _id: 1,
- host: nodes[1],
- priority: 2,
- tags: {
- server: '1',
- dc: 'ny',
- ny: '2',
- rack: 'ny.rk1',
- },
+ _id: 1,
+ host: nodes[1],
+ priority: 2,
+ tags: {
+ server: '1',
+ dc: 'ny',
+ ny: '2',
+ rack: 'ny.rk1',
+ },
},
{
- _id: 2,
- host: nodes[2],
- priority: 3,
- tags: {
- server: '2',
- dc: 'ny',
- ny: '3',
- rack: 'ny.rk2',
- 2: 'this',
- },
+ _id: 2,
+ host: nodes[2],
+ priority: 3,
+ tags: {
+ server: '2',
+ dc: 'ny',
+ ny: '3',
+ rack: 'ny.rk2', 2: 'this',
+ },
},
{
- _id: 3,
- host: nodes[3],
- tags: {
- server: '3',
- dc: 'sf',
- sf: '1',
- rack: 'sf.rk1',
- },
+ _id: 3,
+ host: nodes[3],
+ tags: {
+ server: '3',
+ dc: 'sf',
+ sf: '1',
+ rack: 'sf.rk1',
+ },
},
{
- _id: 4,
- host: nodes[4],
- tags: {
- server: '4',
- dc: 'sf',
- sf: '2',
- rack: 'sf.rk2',
- },
+ _id: 4,
+ host: nodes[4],
+ tags: {
+ server: '4',
+ dc: 'sf',
+ sf: '2',
+ rack: 'sf.rk2',
+ },
},
],
- settings : {
- getLastErrorModes : {
+ settings: {
+ getLastErrorModes: {
'2 dc and 3 server': {
dc: 2,
server: 3,
@@ -108,7 +109,9 @@
replTest.waitForState(replTest.nodes[nodeId], ReplSetTest.State.PRIMARY, 60 * 1000);
primary = replTest.getPrimary();
primary.forceWriteMode('commands');
- var writeConcern = {writeConcern: {w: expectedWritableNodes, wtimeout: 30 * 1000}};
+ var writeConcern = {
+ writeConcern: {w: expectedWritableNodes, wtimeout: 30 * 1000}
+ };
assert.writeOK(primary.getDB('foo').bar.insert({x: 100}, writeConcern));
return primary;
};
@@ -133,7 +136,9 @@
jsTestLog('partitions: nodes with each set of brackets [N1, N2, N3] form a complete network.');
jsTestLog('partitions: [0-1-2] [3] [4] (only nodes 0 and 1 can replicate from primary node 2');
- var doc = {x: 1};
+ var doc = {
+ x: 1
+ };
// This timeout should be shorter in duration than the server parameter maxSyncSourceLagSecs.
// Some writes are expected to block for this 'timeout' duration before failing.
@@ -146,15 +151,20 @@
primary = ensurePrimary(2, 3);
jsTestLog('Non-existent write concern should be rejected.');
- options = {writeConcern: {w: 'blahblah', wtimeout: timeout}};
+ options = {
+ writeConcern: {w: 'blahblah', wtimeout: timeout}
+ };
assert.writeOK(primary.getDB('foo').bar.insert(doc));
var result = assert.writeError(primary.getDB('foo').bar.insert(doc, options));
assert.neq(null, result.getWriteConcernError());
- assert.eq(ErrorCodes.UnknownReplWriteConcern, result.getWriteConcernError().code,
+ assert.eq(ErrorCodes.UnknownReplWriteConcern,
+ result.getWriteConcernError().code,
tojson(result.getWriteConcernError()));
jsTestLog('Write concern "3 or 4" should fail - 3 and 4 are not connected to the primary.');
- var options = {writeConcern: {w: '3 or 4', wtimeout: timeout}};
+ var options = {
+ writeConcern: {w: '3 or 4', wtimeout: timeout}
+ };
assert.writeOK(primary.getDB('foo').bar.insert(doc));
result = primary.getDB('foo').bar.insert(doc, options);
assert.neq(null, result.getWriteConcernError());
@@ -167,12 +177,16 @@
jsTestLog('Write concern "3 or 4" should work - 4 is now connected to the primary ' +
primary.host + ' via node 1 ' + replTest.nodes[1].host);
- options = {writeConcern: {w: '3 or 4', wtimeout: timeout}};
+ options = {
+ writeConcern: {w: '3 or 4', wtimeout: timeout}
+ };
assert.writeOK(primary.getDB('foo').bar.insert(doc));
assert.writeOK(primary.getDB('foo').bar.insert(doc, options));
jsTestLog('Write concern "3 and 4" should fail - 3 is not connected to the primary.');
- options = {writeConcern: {w: '3 and 4', wtimeout: timeout}};
+ options = {
+ writeConcern: {w: '3 and 4', wtimeout: timeout}
+ };
assert.writeOK(primary.getDB('foo').bar.insert(doc));
result = assert.writeError(primary.getDB('foo').bar.insert(doc, options));
assert.neq(null, result.getWriteConcernError());
@@ -187,23 +201,31 @@
jsTestLog('31003 should sync from 31004 (31024)');
jsTestLog('Write concern "3 and 4" should work - ' +
'nodes 3 and 4 are connected to primary via node 1.');
- options = {writeConcern: {w: '3 and 4', wtimeout: timeout}};
+ options = {
+ writeConcern: {w: '3 and 4', wtimeout: timeout}
+ };
assert.writeOK(primary.getDB('foo').bar.insert(doc));
assert.writeOK(primary.getDB('foo').bar.insert(doc, options));
jsTestLog('Write concern "2" - writes to primary only.');
- options = {writeConcern: {w: '2', wtimeout: 0}};
+ options = {
+ writeConcern: {w: '2', wtimeout: 0}
+ };
assert.writeOK(primary.getDB('foo').bar.insert(doc));
assert.writeOK(primary.getDB('foo').bar.insert(doc, options));
jsTestLog('Write concern "1 and 2"');
- options = {writeConcern: {w: '1 and 2', wtimeout: 0}};
+ options = {
+ writeConcern: {w: '1 and 2', wtimeout: 0}
+ };
assert.writeOK(primary.getDB('foo').bar.insert(doc));
assert.writeOK(primary.getDB('foo').bar.insert(doc, options));
jsTestLog('Write concern "2 dc and 3 server"');
primary = ensurePrimary(2, 5);
- options = {writeConcern: {w: '2 dc and 3 server', wtimeout: timeout}};
+ options = {
+ writeConcern: {w: '2 dc and 3 server', wtimeout: timeout}
+ };
assert.writeOK(primary.getDB('foo').bar.insert(doc));
assert.writeOK(primary.getDB('foo').bar.insert(doc, options));
@@ -220,7 +242,7 @@
// Is this necessary when we partition node 2 off from the rest of the nodes?
replTest.stop(2);
jsTestLog('partitions: [0-1] [2] [1-3-4] ' +
- '(all secondaries except down node 2 can replicate from new primary node 1)');
+ '(all secondaries except down node 2 can replicate from new primary node 1)');
// Node 1 with slightly higher priority will take over.
jsTestLog('1 must become primary here because otherwise the other members will take too ' +
@@ -228,13 +250,17 @@
primary = ensurePrimary(1, 4);
jsTestLog('Write concern "3 and 4" should still work with new primary node 1 ' + primary.host);
- options = {writeConcern: {w: '3 and 4', wtimeout: timeout}};
+ options = {
+ writeConcern: {w: '3 and 4', wtimeout: timeout}
+ };
assert.writeOK(primary.getDB('foo').bar.insert(doc));
assert.writeOK(primary.getDB('foo').bar.insert(doc, options));
jsTestLog('Write concern "2" should fail because node 2 ' + replTest.nodes[2].host +
' is down.');
- options = {writeConcern: {w: '2', wtimeout: timeout}};
+ options = {
+ writeConcern: {w: '2', wtimeout: timeout}
+ };
assert.writeOK(primary.getDB('foo').bar.insert(doc));
result = assert.writeError(primary.getDB('foo').bar.insert(doc, options));
assert.neq(null, result.getWriteConcernError());
diff --git a/jstests/multiVersion/minor_version_tags_old_new_old.js b/jstests/multiVersion/minor_version_tags_old_new_old.js
index d1de541acf2..056aab2972a 100644
--- a/jstests/multiVersion/minor_version_tags_old_new_old.js
+++ b/jstests/multiVersion/minor_version_tags_old_new_old.js
@@ -5,11 +5,13 @@
// 3.2.1 is the final version to use the old style replSetUpdatePosition command.
var oldVersion = "3.2.1";
var newVersion = "latest";
- var nodes = { n1 : { binVersion : oldVersion },
- n2 : { binVersion : newVersion },
- n3 : { binVersion : oldVersion },
- n4 : { binVersion : newVersion },
- n5 : { binVersion : oldVersion } };
+ var nodes = {
+ n1: {binVersion: oldVersion},
+ n2: {binVersion: newVersion},
+ n3: {binVersion: oldVersion},
+ n4: {binVersion: newVersion},
+ n5: {binVersion: oldVersion}
+ };
var host = getHostName();
var name = 'tags';
@@ -19,63 +21,62 @@
var port = replTest.ports;
replTest.initiate({
_id: name,
- members : [
+ members: [
{
- _id: 0,
- host: nodes[0],
- tags: {
- server: '0',
- dc: 'ny',
- ny: '1',
- rack: 'ny.rk1',
- },
+ _id: 0,
+ host: nodes[0],
+ tags: {
+ server: '0',
+ dc: 'ny',
+ ny: '1',
+ rack: 'ny.rk1',
+ },
},
{
- _id: 1,
- host: nodes[1],
- priority: 2,
- tags: {
- server: '1',
- dc: 'ny',
- ny: '2',
- rack: 'ny.rk1',
- },
+ _id: 1,
+ host: nodes[1],
+ priority: 2,
+ tags: {
+ server: '1',
+ dc: 'ny',
+ ny: '2',
+ rack: 'ny.rk1',
+ },
},
{
- _id: 2,
- host: nodes[2],
- priority: 3,
- tags: {
- server: '2',
- dc: 'ny',
- ny: '3',
- rack: 'ny.rk2',
- 2: 'this',
- },
+ _id: 2,
+ host: nodes[2],
+ priority: 3,
+ tags: {
+ server: '2',
+ dc: 'ny',
+ ny: '3',
+ rack: 'ny.rk2', 2: 'this',
+ },
},
{
- _id: 3,
- host: nodes[3],
- tags: {
- server: '3',
- dc: 'sf',
- sf: '1',
- rack: 'sf.rk1',
- },
+ _id: 3,
+ host: nodes[3],
+ tags: {
+ server: '3',
+ dc: 'sf',
+ sf: '1',
+ rack: 'sf.rk1',
+ },
},
{
- _id: 4,
- host: nodes[4],
- tags: {
- server: '4',
- dc: 'sf',
- sf: '2',
- rack: 'sf.rk2',
- },
+ _id: 4,
+ host: nodes[4],
+ tags: {
+ server: '4',
+ dc: 'sf',
+ sf: '2',
+ rack: 'sf.rk2',
+ },
},
],
- settings : {
- getLastErrorModes : {
+ settings: {
+ getLastErrorModes: {
'2 dc and 3 server': {
dc: 2,
server: 3,
@@ -108,7 +109,9 @@
replTest.waitForState(replTest.nodes[nodeId], ReplSetTest.State.PRIMARY, 60 * 1000);
primary = replTest.getPrimary();
primary.forceWriteMode('commands');
- var writeConcern = {writeConcern: {w: expectedWritableNodes, wtimeout: 30 * 1000}};
+ var writeConcern = {
+ writeConcern: {w: expectedWritableNodes, wtimeout: 30 * 1000}
+ };
assert.writeOK(primary.getDB('foo').bar.insert({x: 100}, writeConcern));
return primary;
};
@@ -133,7 +136,9 @@
jsTestLog('partitions: nodes with each set of brackets [N1, N2, N3] form a complete network.');
jsTestLog('partitions: [0-1-2] [3] [4] (only nodes 0 and 1 can replicate from primary node 2');
- var doc = {x: 1};
+ var doc = {
+ x: 1
+ };
// This timeout should be shorter in duration than the server parameter maxSyncSourceLagSecs.
// Some writes are expected to block for this 'timeout' duration before failing.
@@ -146,15 +151,20 @@
primary = ensurePrimary(2, 3);
jsTestLog('Non-existent write concern should be rejected.');
- options = {writeConcern: {w: 'blahblah', wtimeout: timeout}};
+ options = {
+ writeConcern: {w: 'blahblah', wtimeout: timeout}
+ };
assert.writeOK(primary.getDB('foo').bar.insert(doc));
var result = assert.writeError(primary.getDB('foo').bar.insert(doc, options));
assert.neq(null, result.getWriteConcernError());
- assert.eq(ErrorCodes.UnknownReplWriteConcern, result.getWriteConcernError().code,
+ assert.eq(ErrorCodes.UnknownReplWriteConcern,
+ result.getWriteConcernError().code,
tojson(result.getWriteConcernError()));
jsTestLog('Write concern "3 or 4" should fail - 3 and 4 are not connected to the primary.');
- var options = {writeConcern: {w: '3 or 4', wtimeout: timeout}};
+ var options = {
+ writeConcern: {w: '3 or 4', wtimeout: timeout}
+ };
assert.writeOK(primary.getDB('foo').bar.insert(doc));
result = primary.getDB('foo').bar.insert(doc, options);
assert.neq(null, result.getWriteConcernError());
@@ -167,12 +177,16 @@
jsTestLog('Write concern "3 or 4" should work - 4 is now connected to the primary ' +
primary.host + ' via node 1 ' + replTest.nodes[1].host);
- options = {writeConcern: {w: '3 or 4', wtimeout: timeout}};
+ options = {
+ writeConcern: {w: '3 or 4', wtimeout: timeout}
+ };
assert.writeOK(primary.getDB('foo').bar.insert(doc));
assert.writeOK(primary.getDB('foo').bar.insert(doc, options));
jsTestLog('Write concern "3 and 4" should fail - 3 is not connected to the primary.');
- options = {writeConcern: {w: '3 and 4', wtimeout: timeout}};
+ options = {
+ writeConcern: {w: '3 and 4', wtimeout: timeout}
+ };
assert.writeOK(primary.getDB('foo').bar.insert(doc));
result = assert.writeError(primary.getDB('foo').bar.insert(doc, options));
assert.neq(null, result.getWriteConcernError());
@@ -187,23 +201,31 @@
jsTestLog('31003 should sync from 31004 (31024)');
jsTestLog('Write concern "3 and 4" should work - ' +
'nodes 3 and 4 are connected to primary via node 1.');
- options = {writeConcern: {w: '3 and 4', wtimeout: timeout}};
+ options = {
+ writeConcern: {w: '3 and 4', wtimeout: timeout}
+ };
assert.writeOK(primary.getDB('foo').bar.insert(doc));
assert.writeOK(primary.getDB('foo').bar.insert(doc, options));
jsTestLog('Write concern "2" - writes to primary only.');
- options = {writeConcern: {w: '2', wtimeout: 0}};
+ options = {
+ writeConcern: {w: '2', wtimeout: 0}
+ };
assert.writeOK(primary.getDB('foo').bar.insert(doc));
assert.writeOK(primary.getDB('foo').bar.insert(doc, options));
jsTestLog('Write concern "1 and 2"');
- options = {writeConcern: {w: '1 and 2', wtimeout: 0}};
+ options = {
+ writeConcern: {w: '1 and 2', wtimeout: 0}
+ };
assert.writeOK(primary.getDB('foo').bar.insert(doc));
assert.writeOK(primary.getDB('foo').bar.insert(doc, options));
jsTestLog('Write concern "2 dc and 3 server"');
primary = ensurePrimary(2, 5);
- options = {writeConcern: {w: '2 dc and 3 server', wtimeout: timeout}};
+ options = {
+ writeConcern: {w: '2 dc and 3 server', wtimeout: timeout}
+ };
assert.writeOK(primary.getDB('foo').bar.insert(doc));
assert.writeOK(primary.getDB('foo').bar.insert(doc, options));
@@ -220,7 +242,7 @@
// Is this necessary when we partition node 2 off from the rest of the nodes?
replTest.stop(2);
jsTestLog('partitions: [0-1] [2] [1-3-4] ' +
- '(all secondaries except down node 2 can replicate from new primary node 1)');
+ '(all secondaries except down node 2 can replicate from new primary node 1)');
// Node 1 with slightly higher priority will take over.
jsTestLog('1 must become primary here because otherwise the other members will take too ' +
@@ -228,13 +250,17 @@
primary = ensurePrimary(1, 4);
jsTestLog('Write concern "3 and 4" should still work with new primary node 1 ' + primary.host);
- options = {writeConcern: {w: '3 and 4', wtimeout: timeout}};
+ options = {
+ writeConcern: {w: '3 and 4', wtimeout: timeout}
+ };
assert.writeOK(primary.getDB('foo').bar.insert(doc));
assert.writeOK(primary.getDB('foo').bar.insert(doc, options));
jsTestLog('Write concern "2" should fail because node 2 ' + replTest.nodes[2].host +
' is down.');
- options = {writeConcern: {w: '2', wtimeout: timeout}};
+ options = {
+ writeConcern: {w: '2', wtimeout: timeout}
+ };
assert.writeOK(primary.getDB('foo').bar.insert(doc));
result = assert.writeError(primary.getDB('foo').bar.insert(doc, options));
assert.neq(null, result.getWriteConcernError());
diff --git a/jstests/multiVersion/minor_version_upgrade_replset.js b/jstests/multiVersion/minor_version_upgrade_replset.js
index 1c153a1e675..7f784f5c100 100644
--- a/jstests/multiVersion/minor_version_upgrade_replset.js
+++ b/jstests/multiVersion/minor_version_upgrade_replset.js
@@ -2,85 +2,78 @@
// Tests upgrading a replica set
//
-load( './jstests/multiVersion/libs/multi_rs.js' );
-load( './jstests/libs/test_background_ops.js' );
+load('./jstests/multiVersion/libs/multi_rs.js');
+load('./jstests/libs/test_background_ops.js');
// 3.2.1 is the final version to use the old style replSetUpdatePosition command.
var oldVersion = "3.2.1";
-var nodes = { n1 : { binVersion : oldVersion },
- n2 : { binVersion : oldVersion },
- a3 : { binVersion : oldVersion } };
+var nodes = {
+ n1: {binVersion: oldVersion},
+ n2: {binVersion: oldVersion},
+ a3: {binVersion: oldVersion}
+};
-var rst = new ReplSetTest({ nodes : nodes });
+var rst = new ReplSetTest({nodes: nodes});
rst.startSet();
rst.initiate();
// Wait for a primary node...
var primary = rst.getPrimary();
-var otherOpConn = new Mongo( rst.getURL() );
+var otherOpConn = new Mongo(rst.getURL());
var insertNS = "test.foo";
+jsTest.log("Starting parallel operations during upgrade...");
-jsTest.log( "Starting parallel operations during upgrade..." );
-
-function findAndInsert( rsURL, coll ){
-
- var coll = new Mongo( rsURL ).getCollection( coll + "" );
+function findAndInsert(rsURL, coll) {
+ var coll = new Mongo(rsURL).getCollection(coll + "");
var count = 0;
-
- jsTest.log( "Starting finds and inserts..." );
-
- while( ! isFinished() ){
-
- try{
-
- coll.insert({ _id : count, hello : "world" });
- assert.eq( null, coll.getDB().getLastError() );
- assert.neq( null, coll.findOne({ _id : count }) );
- }
- catch( e ){
- printjson( e );
+
+ jsTest.log("Starting finds and inserts...");
+
+ while (!isFinished()) {
+ try {
+ coll.insert({_id: count, hello: "world"});
+ assert.eq(null, coll.getDB().getLastError());
+ assert.neq(null, coll.findOne({_id: count}));
+ } catch (e) {
+ printjson(e);
}
-
+
count++;
}
-
- jsTest.log( "Finished finds and inserts..." );
+
+ jsTest.log("Finished finds and inserts...");
return count;
}
-var joinFindInsert =
- startParallelOps( primary, // The connection where the test info is passed and stored
- findAndInsert,
- [ rst.getURL(), insertNS ] );
-
+var joinFindInsert =
+ startParallelOps(primary, // The connection where the test info is passed and stored
+ findAndInsert,
+ [rst.getURL(), insertNS]);
-jsTest.log( "Upgrading replica set..." );
+jsTest.log("Upgrading replica set...");
-rst.upgradeSet({ binVersion: "latest" });
+rst.upgradeSet({binVersion: "latest"});
-jsTest.log( "Replica set upgraded." );
+jsTest.log("Replica set upgraded.");
// Wait for primary
var primary = rst.getPrimary();
-printjson( rst.status() );
-
+printjson(rst.status());
// Allow more valid writes to go through
-sleep( 10 * 1000 );
-
+sleep(10 * 1000);
joinFindInsert();
-var totalInserts = primary.getCollection( insertNS ).find().sort({ _id : -1 }).next()._id + 1;
-var dataFound = primary.getCollection( insertNS ).count();
+var totalInserts = primary.getCollection(insertNS).find().sort({_id: -1}).next()._id + 1;
+var dataFound = primary.getCollection(insertNS).count();
-jsTest.log( "Found " + dataFound + " docs out of " + tojson( totalInserts ) + " inserted." );
+jsTest.log("Found " + dataFound + " docs out of " + tojson(totalInserts) + " inserted.");
-assert.gt( dataFound / totalInserts, 0.5 );
+assert.gt(dataFound / totalInserts, 0.5);
rst.stopSet();
-
diff --git a/jstests/multiVersion/mixed_storage_version_replication.js b/jstests/multiVersion/mixed_storage_version_replication.js
index 89da0c5bcd6..34338765ed4 100644
--- a/jstests/multiVersion/mixed_storage_version_replication.js
+++ b/jstests/multiVersion/mixed_storage_version_replication.js
@@ -18,9 +18,20 @@ var RandomOps = {
verbose: false,
// 'Random' documents will have various combinations of these names mapping to these values
fieldNames: ["a", "b", "c", "longerName", "numbered10", "dashed-name"],
- fieldValues: [ true, false, 0, 44, -123, "", "String", [], [false, "x"],
- ["array", 1, {doc: true}, new Date().getTime()], {},
- {embedded: "document", weird: ["values", 0, false]}, new Date().getTime()
+ fieldValues: [
+ true,
+ false,
+ 0,
+ 44,
+ -123,
+ "",
+ "String",
+ [],
+ [false, "x"],
+ ["array", 1, {doc: true}, new Date().getTime()],
+ {},
+ {embedded: "document", weird: ["values", 0, false]},
+ new Date().getTime()
],
/*
@@ -35,7 +46,7 @@ var RandomOps = {
while (x === 1.0) { // Would be out of bounds
x = Random.rand();
}
- var i = Math.floor(x*a.length);
+ var i = Math.floor(x * a.length);
return a[i];
},
@@ -45,7 +56,7 @@ var RandomOps = {
*/
randomNewDoc: function() {
var doc = {};
- for (var i = 0; i < Random.randInt(0,this.fieldNames.length); i++) {
+ for (var i = 0; i < Random.randInt(0, this.fieldNames.length); i++) {
doc[this.randomChoice(this.fieldNames)] = this.randomChoice(this.fieldValues);
}
return doc;
@@ -72,7 +83,9 @@ var RandomOps = {
*/
getRandomExistingCollection: function(conn) {
var dbs = this.getCreatedDatabases(conn);
- if (dbs.length === 0) { return null; }
+ if (dbs.length === 0) {
+ return null;
+ }
var dbName = this.randomChoice(dbs);
var db = conn.getDB(dbName);
if (db.getCollectionNames().length <= 1) {
@@ -89,7 +102,7 @@ var RandomOps = {
try {
var randIndex = Random.randInt(0, collection.find().count());
return collection.find().sort({$natural: 1}).skip(randIndex).limit(1)[0];
- } catch(e) {
+ } catch (e) {
return undefined;
}
},
@@ -111,7 +124,9 @@ var RandomOps = {
}
}
}
- if (matched.length === 0) { return null; }
+ if (matched.length === 0) {
+ return null;
+ }
return this.randomChoice(matched);
},
@@ -140,11 +155,12 @@ var RandomOps = {
printjson(doc);
print("With write concern: " + writeConcern + " and journal: " + journal);
}
- var result = conn.getDB(db)[coll].insert(doc,
- {writeConcern: {w: writeConcern},
- journal: journal});
+ var result =
+ conn.getDB(db)[coll].insert(doc, {writeConcern: {w: writeConcern}, journal: journal});
assert.eq(result.ok, 1);
- if (this.verbose) { print("done."); }
+ if (this.verbose) {
+ print("done.");
+ }
},
/*
@@ -168,10 +184,14 @@ var RandomOps = {
}
try {
coll.remove(doc);
- } catch(e) {
- if (this.verbose) { print("Caught exception in remove: " + e); }
+ } catch (e) {
+ if (this.verbose) {
+ print("Caught exception in remove: " + e);
+ }
+ }
+ if (this.verbose) {
+ print("done.");
}
- if (this.verbose) { print("done."); }
},
/*
@@ -191,7 +211,9 @@ var RandomOps = {
}
var field = this.randomChoice(this.fieldNames);
- var updateDoc = {$set: {}};
+ var updateDoc = {
+ $set: {}
+ };
updateDoc.$set[field] = this.randomChoice(this.fieldValues);
if (this.verbose) {
print("Updating:");
@@ -202,10 +224,14 @@ var RandomOps = {
// If multithreaded, doc might not exist anymore.
try {
coll.update(doc, updateDoc);
- } catch(e) {
- if (this.verbose) { print("Caught exception in update: " + e); }
+ } catch (e) {
+ if (this.verbose) {
+ print("Caught exception in update: " + e);
+ }
+ }
+ if (this.verbose) {
+ print("done.");
}
- if (this.verbose) { print("done."); }
},
//////////////////////////////////////////////////////////////////////////////////
@@ -217,15 +243,18 @@ var RandomOps = {
*/
renameCollection: function(conn) {
var coll = this.getRandomExistingCollection(conn);
- if (coll === null) { return null; }
+ if (coll === null) {
+ return null;
+ }
var newName = coll.getDB() + "." + new ObjectId().str;
if (this.verbose) {
print("renaming collection " + coll.getFullName() + " to " + newName);
}
assert.commandWorked(
- conn.getDB("admin").runCommand({renameCollection: coll.getFullName(), to: newName})
- );
- if (this.verbose) { print("done."); }
+ conn.getDB("admin").runCommand({renameCollection: coll.getFullName(), to: newName}));
+ if (this.verbose) {
+ print("done.");
+ }
},
/*
@@ -233,15 +262,17 @@ var RandomOps = {
*/
dropDatabase: function(conn) {
var dbs = this.getCreatedDatabases(conn);
- if (dbs.length === 0) { return null; }
+ if (dbs.length === 0) {
+ return null;
+ }
var dbName = this.randomChoice(dbs);
if (this.verbose) {
print("Dropping database " + dbName);
}
- assert.commandWorked(
- conn.getDB(dbName).runCommand({dropDatabase: 1})
- );
- if (this.verbose) { print("done."); }
+ assert.commandWorked(conn.getDB(dbName).runCommand({dropDatabase: 1}));
+ if (this.verbose) {
+ print("done.");
+ }
},
/*
@@ -249,14 +280,16 @@ var RandomOps = {
*/
dropCollection: function(conn) {
var coll = this.getRandomExistingCollection(conn);
- if (coll === null) { return null; }
+ if (coll === null) {
+ return null;
+ }
if (this.verbose) {
print("Dropping collection " + coll.getFullName());
}
- assert.commandWorked(
- conn.getDB(coll.getDB()).runCommand({drop: coll.getName()})
- );
- if (this.verbose) { print("done."); }
+ assert.commandWorked(conn.getDB(coll.getDB()).runCommand({drop: coll.getName()}));
+ if (this.verbose) {
+ print("done.");
+ }
},
/*
@@ -264,14 +297,18 @@ var RandomOps = {
*/
createIndex: function(conn) {
var coll = this.getRandomExistingCollection(conn);
- if (coll === null) { return null; }
+ if (coll === null) {
+ return null;
+ }
var index = {};
index[this.randomChoice(this.fieldNames)] = this.randomChoice([-1, 1]);
if (this.verbose) {
print("Adding index " + tojsononeline(index) + " to " + coll.getFullName());
}
coll.ensureIndex(index);
- if (this.verbose) { print("done."); }
+ if (this.verbose) {
+ print("done.");
+ }
},
/*
@@ -279,7 +316,9 @@ var RandomOps = {
*/
dropIndex: function(conn) {
var coll = this.getRandomExistingCollection(conn);
- if (coll === null) { return null; }
+ if (coll === null) {
+ return null;
+ }
var index = this.randomChoice(coll.getIndices());
if (index.name === "_id_") {
return null; // Don't drop that one.
@@ -287,10 +326,10 @@ var RandomOps = {
if (this.verbose) {
print("Dropping index " + tojsononeline(index.key) + " from " + coll.getFullName());
}
- assert.commandWorked(
- coll.dropIndex(index.name)
- );
- if (this.verbose) { print("done."); }
+ assert.commandWorked(coll.dropIndex(index.name));
+ if (this.verbose) {
+ print("done.");
+ }
},
/*
@@ -298,14 +337,18 @@ var RandomOps = {
*/
collMod: function(conn) {
var coll = this.getRandomExistingCollection(conn);
- if (coll === null) { return null; }
+ if (coll === null) {
+ return null;
+ }
var toggle = !coll.stats().userFlags;
if (this.verbose) {
print("Modifying usePowerOf2Sizes to " + toggle + " on collection " +
coll.getFullName());
}
conn.getDB(coll.getDB()).runCommand({collMod: coll.getName(), usePowerOf2Sizes: toggle});
- if (this.verbose) { print("done."); }
+ if (this.verbose) {
+ print("done.");
+ }
},
/*
@@ -316,14 +359,16 @@ var RandomOps = {
return conn.getDB(dbName)[coll].isCapped();
};
var coll = this.getRandomCollectionWFilter(conn, isCapped);
- if (coll === null) { return null; }
+ if (coll === null) {
+ return null;
+ }
if (this.verbose) {
print("Emptying capped collection: " + coll.getFullName());
}
- assert.commandWorked(
- conn.getDB(coll.getDB()).runCommand({emptycapped: coll.getName()})
- );
- if (this.verbose) { print("done."); }
+ assert.commandWorked(conn.getDB(coll.getDB()).runCommand({emptycapped: coll.getName()}));
+ if (this.verbose) {
+ print("done.");
+ }
},
/*
@@ -331,7 +376,9 @@ var RandomOps = {
*/
applyOps: function(conn) {
// Check if there are any valid collections to choose from.
- if (this.getRandomExistingCollection(conn) === null) { return null; }
+ if (this.getRandomExistingCollection(conn) === null) {
+ return null;
+ }
var ops = [];
// Insert between 1 and 10 things.
for (var i = 0; i < Random.randInt(1, 10); i++) {
@@ -346,10 +393,10 @@ var RandomOps = {
print("Applying the following ops: ");
printjson(ops);
}
- assert.commandWorked(
- conn.getDB("admin").runCommand({applyOps: ops})
- );
- if (this.verbose) { print("done."); }
+ assert.commandWorked(conn.getDB("admin").runCommand({applyOps: ops}));
+ if (this.verbose) {
+ print("done.");
+ }
},
/*
@@ -357,16 +404,18 @@ var RandomOps = {
*/
createCollection: function(conn) {
var dbs = this.getCreatedDatabases(conn);
- if (dbs.length === 0) { return null; }
+ if (dbs.length === 0) {
+ return null;
+ }
var dbName = this.randomChoice(dbs);
var newName = new ObjectId().str;
if (this.verbose) {
print("Creating new collection: " + "dbName" + "." + newName);
}
- assert.commandWorked(
- conn.getDB(dbName).runCommand({create: newName})
- );
- if (this.verbose) { print("done."); }
+ assert.commandWorked(conn.getDB(dbName).runCommand({create: newName}));
+ if (this.verbose) {
+ print("done.");
+ }
},
/*
@@ -377,14 +426,18 @@ var RandomOps = {
return conn.getDB(dbName)[coll].isCapped();
};
var coll = this.getRandomCollectionWFilter(conn, isNotCapped);
- if (coll === null) { return null; }
+ if (coll === null) {
+ return null;
+ }
if (this.verbose) {
print("Converting " + coll.getFullName() + " to a capped collection.");
}
assert.commandWorked(
- conn.getDB(coll.getDB()).runCommand({convertToCapped: coll.getName(), size: 1024*1024})
- );
- if (this.verbose) { print("done."); }
+ conn.getDB(coll.getDB())
+ .runCommand({convertToCapped: coll.getName(), size: 1024 * 1024}));
+ if (this.verbose) {
+ print("done.");
+ }
},
appendOplogNote: function(conn) {
@@ -393,9 +446,10 @@ var RandomOps = {
print("Appending oplog note: " + note);
}
assert.commandWorked(
- conn.getDB("admin").runCommand({appendOplogNote: note, data: {some: 'doc'}})
- );
- if (this.verbose) { print("done."); }
+ conn.getDB("admin").runCommand({appendOplogNote: note, data: {some: 'doc'}}));
+ if (this.verbose) {
+ print("done.");
+ }
},
/*
@@ -483,33 +537,32 @@ function assertDBsEq(db1, db2) {
// We don't expect the entire local collection to be the same, not even the oplog, since
// it's a capped collection.
return;
- }
- else if (hash1.md5 != hash2.md5) {
+ } else if (hash1.md5 != hash2.md5) {
for (var i = 0; i < Math.min(collNames1.length, collNames2.length); i++) {
var collName = collNames1[i];
if (hash1.collections[collName] !== hash2.collections[collName]) {
if (db1[collName].stats().capped) {
if (!db2[collName].stats().capped) {
success = false;
- diffText += "\n" + collName + " is capped on " + host1 + " but not on " +
- host2;
- }
- else {
+ diffText +=
+ "\n" + collName + " is capped on " + host1 + " but not on " + host2;
+ } else {
// Skip capped collections. They are not expected to be the same from host
// to host.
continue;
}
- }
- else {
+ } else {
success = false;
- diffText += "\n" + collName + " differs: " +
- getCollectionDiff(db1, db2, collName);
+ diffText +=
+ "\n" + collName + " differs: " + getCollectionDiff(db1, db2, collName);
}
}
}
}
- assert.eq(success, true, "Database " + db1.getName() + " differs on " + host1 + " and " +
- host2 + "\nCollections: " + collNames1 + " vs. " + collNames2 + "\n" + diffText);
+ assert.eq(success,
+ true,
+ "Database " + db1.getName() + " differs on " + host1 + " and " + host2 +
+ "\nCollections: " + collNames1 + " vs. " + collNames2 + "\n" + diffText);
}
/*
@@ -535,9 +588,20 @@ function assertSameData(primary, conns) {
*/
function startCmds(randomOps, host) {
var ops = [
- "insert", "remove", "update", "renameCollection", "dropDatabase",
- "dropCollection", "createIndex", "dropIndex", "collMod", "emptyCapped", "applyOps",
- "createCollection", "convertToCapped", "appendOplogNote"
+ "insert",
+ "remove",
+ "update",
+ "renameCollection",
+ "dropDatabase",
+ "dropCollection",
+ "createIndex",
+ "dropIndex",
+ "collMod",
+ "emptyCapped",
+ "applyOps",
+ "createCollection",
+ "convertToCapped",
+ "appendOplogNote"
];
var m = new Mongo(host);
var numOps = 200;
@@ -593,9 +657,10 @@ function doMultiThreadedWork(primary, numThreads) {
// Create a replica set with 2 nodes of each of the types below, plus one arbiter.
var oldVersion = "last-stable";
var newVersion = "latest";
- var setups = [{binVersion: newVersion, storageEngine: 'mmapv1'},
- {binVersion: newVersion, storageEngine: 'wiredTiger'},
- {binVersion: oldVersion}
+ var setups = [
+ {binVersion: newVersion, storageEngine: 'mmapv1'},
+ {binVersion: newVersion, storageEngine: 'wiredTiger'},
+ {binVersion: oldVersion}
];
var nodes = {};
var node = 0;
@@ -606,14 +671,18 @@ function doMultiThreadedWork(primary, numThreads) {
nodes["n" + node] = setups[i];
node++;
}
- nodes["n" + 2 * setups.length] = {arbiter: true};
+ nodes["n" + 2 * setups.length] = {
+ arbiter: true
+ };
var replTest = new ReplSetTest({nodes: nodes, name: name});
var conns = replTest.startSet();
var config = replTest.getReplSetConfig();
// Make sure everyone is syncing from the primary, to ensure we have all combinations of
// primary/secondary syncing.
- config.settings = {chainingAllowed: false};
+ config.settings = {
+ chainingAllowed: false
+ };
config.protocolVersion = 0;
replTest.initiate(config);
// Ensure all are synced.
@@ -624,32 +693,31 @@ function doMultiThreadedWork(primary, numThreads) {
// Keep track of the indices of different types of primaries.
// We'll rotate to get a primary of each type.
- var possiblePrimaries = [0,2,4];
+ var possiblePrimaries = [0, 2, 4];
var highestPriority = 2;
while (possiblePrimaries.length > 0) {
config = primary.getDB("local").system.replset.findOne();
var primaryIndex = RandomOps.randomChoice(possiblePrimaries);
- print("TRANSITIONING to " + tojsononeline(setups[primaryIndex/2]) + " as primary");
+ print("TRANSITIONING to " + tojsononeline(setups[primaryIndex / 2]) + " as primary");
// Remove chosen type from future choices.
removeFromArray(primaryIndex, possiblePrimaries);
config.members[primaryIndex].priority = highestPriority;
if (config.version === undefined) {
config.version = 2;
- }
- else {
+ } else {
config.version++;
}
highestPriority++;
printjson(config);
try {
primary.getDB("admin").runCommand({replSetReconfig: config});
- }
- catch(e) {
+ } catch (e) {
// Expected to fail, as we'll have to reconnect.
}
- replTest.awaitReplication(60000); // 2 times the election period.
+ replTest.awaitReplication(60000); // 2 times the election period.
assert.soon(primaryChanged(conns, replTest, primaryIndex),
- "waiting for higher priority primary to be elected", 100000);
+ "waiting for higher priority primary to be elected",
+ 100000);
print("New primary elected, doing a bunch of work");
primary = replTest.getPrimary();
doMultiThreadedWork(primary, 10);
diff --git a/jstests/multiVersion/mmapv1_overrides_default_storage_engine.js b/jstests/multiVersion/mmapv1_overrides_default_storage_engine.js
index 9cad40c23bd..7e6d61e9f1d 100644
--- a/jstests/multiVersion/mmapv1_overrides_default_storage_engine.js
+++ b/jstests/multiVersion/mmapv1_overrides_default_storage_engine.js
@@ -7,18 +7,18 @@
var testCases = [
{
- binVersion: '2.6',
+ binVersion: '2.6',
},
{
- binVersion: '2.6',
- directoryperdb: '',
+ binVersion: '2.6',
+ directoryperdb: '',
},
{
- binVersion: '3.0',
+ binVersion: '3.0',
},
{
- binVersion: '3.0',
- directoryperdb: '',
+ binVersion: '3.0',
+ directoryperdb: '',
},
];
@@ -26,11 +26,11 @@
// --storageEngine=mmapv1 is explicitly specified.
testCases.forEach(function(testCase) {
[null, 'mmapv1'].forEach(function(storageEngine) {
- jsTest.log('Upgrading from a ' + testCase.binVersion + ' instance with options='
- + tojson(testCase) + ' to the latest version. This should succeed when the'
- + ' latest version '
- + (storageEngine ? ('explicitly specifies --storageEngine=' + storageEngine)
- : 'omits the --storageEngine flag'));
+ jsTest.log('Upgrading from a ' + testCase.binVersion + ' instance with options=' +
+ tojson(testCase) + ' to the latest version. This should succeed when the' +
+ ' latest version ' +
+ (storageEngine ? ('explicitly specifies --storageEngine=' + storageEngine)
+ : 'omits the --storageEngine flag'));
var dbpath = MongoRunner.dataPath + 'mmapv1_overrides_default_storage_engine';
resetDbpath(dbpath);
@@ -43,8 +43,8 @@
// Start the old version.
var mongodOptions = Object.merge(defaultOptions, testCase);
var conn = MongoRunner.runMongod(mongodOptions);
- assert.neq(null, conn,
- 'mongod was unable to start up with options ' + tojson(mongodOptions));
+ assert.neq(
+ null, conn, 'mongod was unable to start up with options ' + tojson(mongodOptions));
assert.commandWorked(conn.getDB('test').runCommand({ping: 1}));
MongoRunner.stopMongod(conn);
@@ -57,8 +57,8 @@
mongodOptions.directoryperdb = testCase.directoryperdb;
}
conn = MongoRunner.runMongod(mongodOptions);
- assert.neq(null, conn,
- 'mongod was unable to start up with options ' + tojson(mongodOptions));
+ assert.neq(
+ null, conn, 'mongod was unable to start up with options ' + tojson(mongodOptions));
assert.commandWorked(conn.getDB('test').runCommand({ping: 1}));
MongoRunner.stopMongod(conn);
});
@@ -66,9 +66,9 @@
// The mongod should not start up when --storageEngine=wiredTiger is specified.
testCases.forEach(function(testCase) {
- jsTest.log('Upgrading from a ' + testCase.binVersion + ' instance with options='
- + tojson(testCase) + ' to the latest version. This should fail when the latest'
- + ' version specifies --storageEngine=wiredTiger');
+ jsTest.log('Upgrading from a ' + testCase.binVersion + ' instance with options=' +
+ tojson(testCase) + ' to the latest version. This should fail when the latest' +
+ ' version specifies --storageEngine=wiredTiger');
var dbpath = MongoRunner.dataPath + 'mmapv1_overrides_default_storage_engine';
resetDbpath(dbpath);
@@ -81,16 +81,17 @@
// Start the old version.
var mongodOptions = Object.merge(defaultOptions, testCase);
var conn = MongoRunner.runMongod(mongodOptions);
- assert.neq(null, conn,
- 'mongod was unable to start up with options ' + tojson(mongodOptions));
+ assert.neq(
+ null, conn, 'mongod was unable to start up with options ' + tojson(mongodOptions));
assert.commandWorked(conn.getDB('test').runCommand({ping: 1}));
MongoRunner.stopMongod(conn);
// Start the newest version.
mongodOptions = Object.extend({storageEngine: 'wiredTiger'}, defaultOptions);
conn = MongoRunner.runMongod(mongodOptions);
- assert.eq(null, conn,
- 'mongod should not have been able to start up with options '
- + tojson(mongodOptions));
+ assert.eq(
+ null,
+ conn,
+ 'mongod should not have been able to start up with options ' + tojson(mongodOptions));
});
}());
diff --git a/jstests/multiVersion/partial_index_upgrade.js b/jstests/multiVersion/partial_index_upgrade.js
index 938db4759c5..474252b4dc8 100644
--- a/jstests/multiVersion/partial_index_upgrade.js
+++ b/jstests/multiVersion/partial_index_upgrade.js
@@ -9,14 +9,14 @@
var testCases = [
{
- partialFilterExpression: 'not an object',
+ partialFilterExpression: 'not an object',
},
{
- partialFilterExpression: {field: {$regex: 'not a supported operator'}},
+ partialFilterExpression: {field: {$regex: 'not a supported operator'}},
},
{
- partialFilterExpression: {field: 'cannot be combined with sparse=true'},
- sparse: true,
+ partialFilterExpression: {field: 'cannot be combined with sparse=true'},
+ sparse: true,
},
];
@@ -36,8 +36,8 @@
// Start the old version.
var oldVersionOptions = Object.extend({binVersion: '3.0'}, defaultOptions);
var conn = MongoRunner.runMongod(oldVersionOptions);
- assert.neq(null, conn, 'mongod was unable to start up with options ' +
- tojson(oldVersionOptions));
+ assert.neq(
+ null, conn, 'mongod was unable to start up with options ' + tojson(oldVersionOptions));
// Use write commands in order to make assertions about the success of operations based on
// the response from the server.
@@ -48,18 +48,17 @@
// Start the newest version.
conn = MongoRunner.runMongod(defaultOptions);
- assert.eq(null, conn, 'mongod should not have been able to start up when an index with' +
- ' options ' + tojson(indexOptions) + ' exists');
+ assert.eq(null,
+ conn,
+ 'mongod should not have been able to start up when an index with' +
+ ' options ' + tojson(indexOptions) + ' exists');
});
// Create a replica set with a primary running 3.0 and a secondary running the latest version.
// The secondary should terminate when the command to build an invalid partial index replicates.
testCases.forEach(function(indexOptions) {
var replSetName = 'partial_index_replset';
- var nodes = [
- {binVersion: '3.0'},
- {binVersion: 'latest'},
- ];
+ var nodes = [{binVersion: '3.0'}, {binVersion: 'latest'}, ];
var rst = new ReplSetTest({name: replSetName, nodes: nodes});
@@ -85,16 +84,18 @@
// Verify that the secondary running the latest version terminates when the command to build
// an invalid partial index replicates.
- assert.soon(function() {
- try {
- secondaryLatest.getDB('test').runCommand({ping: 1});
- } catch (e) {
- return true;
- }
- return false;
- }, 'secondary should have terminated due to request to build an invalid partial index' +
- ' with options ' + tojson(indexOptions));
-
- rst.stopSet(undefined, undefined, { allowedExitCodes: [ MongoRunner.EXIT_ABRUPT ] });
+ assert.soon(
+ function() {
+ try {
+ secondaryLatest.getDB('test').runCommand({ping: 1});
+ } catch (e) {
+ return true;
+ }
+ return false;
+ },
+ 'secondary should have terminated due to request to build an invalid partial index' +
+ ' with options ' + tojson(indexOptions));
+
+ rst.stopSet(undefined, undefined, {allowedExitCodes: [MongoRunner.EXIT_ABRUPT]});
});
})();
diff --git a/jstests/multiVersion/transitioning_to_and_from_WT.js b/jstests/multiVersion/transitioning_to_and_from_WT.js
index 5c56e96919d..97ac9b7af74 100644
--- a/jstests/multiVersion/transitioning_to_and_from_WT.js
+++ b/jstests/multiVersion/transitioning_to_and_from_WT.js
@@ -7,10 +7,11 @@
jsTestLog("Setting up initial data set with the last stable version of mongod");
- var toolTest = new ToolTest('transitioning_to_and_from_WT', {
- binVersion: MongoRunner.getBinVersionFor("last-stable"),
- storageEngine: "mmapv1",
- });
+ var toolTest = new ToolTest('transitioning_to_and_from_WT',
+ {
+ binVersion: MongoRunner.getBinVersionFor("last-stable"),
+ storageEngine: "mmapv1",
+ });
toolTest.dbpath = toolTest.root + "/original/";
resetDbpath(toolTest.dbpath);
@@ -54,62 +55,62 @@
var modes = [
// to the latest version with wiredTiger
{
- binVersion: "latest",
- storageEngine: "wiredTiger",
+ binVersion: "latest",
+ storageEngine: "wiredTiger",
},
// back to the last stable version with mmapv1
{
- binVersion: "last-stable",
- storageEngine: "mmapv1",
+ binVersion: "last-stable",
+ storageEngine: "mmapv1",
},
// to the latest version with mmapv1
{
- binVersion: "latest",
- storageEngine: "mmapv1",
+ binVersion: "latest",
+ storageEngine: "mmapv1",
},
// to latest version with wiredTiger
{
- binVersion: "latest",
- storageEngine: "wiredTiger",
+ binVersion: "latest",
+ storageEngine: "wiredTiger",
},
// back to the latest version with mmapv1
{
- binVersion: "latest",
- storageEngine: "mmapv1",
+ binVersion: "latest",
+ storageEngine: "mmapv1",
},
// to the last stable version with mmapv1 and directory per db
{
- binVersion: "last-stable",
- storageEngine: "mmapv1",
- directoryperdb: "",
+ binVersion: "last-stable",
+ storageEngine: "mmapv1",
+ directoryperdb: "",
},
// to the latest version with wiredTiger
{
- binVersion: "latest",
- storageEngine: "wiredTiger",
+ binVersion: "latest",
+ storageEngine: "wiredTiger",
},
// back to the last stable version with mmapv1 and directory per db
{
- binVersion: "last-stable",
- storageEngine: "mmapv1",
- directoryperdb: "",
+ binVersion: "last-stable",
+ storageEngine: "mmapv1",
+ directoryperdb: "",
},
// to latest version with mmapv1 and directory per db
{
- binVersion: "latest",
- storageEngine: "mmapv1",
- directoryperdb: "",
+ binVersion: "latest",
+ storageEngine: "mmapv1",
+ directoryperdb: "",
},
// to the latest with wiredTiger
{
- binVersion: "latest",
- storageEngine: "wiredTiger",
+ binVersion: "latest",
+ storageEngine: "wiredTiger",
},
// back to latest version with mmapv1 and directory per db
{
- binVersion: "latest",
- storageEngine: "mmapv1",
- directoryperdb: "",
+ binVersion: "latest",
+ storageEngine: "mmapv1",
+ directoryperdb: "",
},
];
@@ -129,8 +130,8 @@
// set up new node configuration info
toolTest.options.binVersion = MongoRunner.getBinVersionFor(entry.binVersion);
- toolTest.dbpath = toolTest.root + "/" + idx + "-" + entry.binVersion + "-"
- + entry.storageEngine + "/";
+ toolTest.dbpath =
+ toolTest.root + "/" + idx + "-" + entry.binVersion + "-" + entry.storageEngine + "/";
if (entry.hasOwnProperty("storageEngine")) {
toolTest.options.storageEngine = entry.storageEngine;
diff --git a/jstests/multiVersion/upgrade_cluster.js b/jstests/multiVersion/upgrade_cluster.js
index 8049e716217..09a8c86d3d8 100644
--- a/jstests/multiVersion/upgrade_cluster.js
+++ b/jstests/multiVersion/upgrade_cluster.js
@@ -7,88 +7,86 @@ load('./jstests/multiVersion/libs/multi_cluster.js');
(function() {
-/**
- * @param isRSCluster {bool} use replica set shards.
- */
-var runTest = function(isRSCluster) {
-"use strict";
-
-jsTest.log( "Starting" + ( isRSCluster ? " (replica set)" : "" ) + " cluster" + "..." );
-
-var options = {
- mongosOptions : { binVersion : "last-stable" },
- configOptions : { binVersion : "last-stable" },
- shardOptions : { binVersion : "last-stable" },
-
- rsOptions : { binVersion : "last-stable" },
- rs : isRSCluster
-};
-
-var testCRUD = function(mongos) {
- assert.commandWorked(mongos.getDB('test').runCommand({ dropDatabase: 1 }));
- assert.commandWorked(mongos.getDB('unsharded').runCommand({ dropDatabase: 1 }));
-
- var unshardedDB = mongos.getDB('unshareded');
- assert.commandWorked(unshardedDB.runCommand({ insert: 'foo', documents: [{ x: 1 }]}));
- assert.commandWorked(unshardedDB.runCommand({ update: 'foo',
- updates: [{ q: { x: 1 },
- u: { $set: { y: 1 }}}]}));
- var doc = unshardedDB.foo.findOne({ x: 1 });
- assert.eq(1, doc.y);
- assert.commandWorked(unshardedDB.runCommand({ delete: 'foo', deletes: [{ q: { x: 1 },
- limit: 1 }]}));
- doc = unshardedDB.foo.findOne();
- assert.eq(null, doc);
-
- assert.commandWorked(mongos.adminCommand({ enableSharding: 'test' }));
- assert.commandWorked(mongos.adminCommand({ shardCollection: 'test.user', key: { x: 1 }}));
-
- var shardedDB = mongos.getDB('shareded');
- assert.commandWorked(shardedDB.runCommand({ insert: 'foo', documents: [{ x: 1 }]}));
- assert.commandWorked(shardedDB.runCommand({ update: 'foo',
- updates: [{ q: { x: 1 },
- u: { $set: { y: 1 }}}]}));
- doc = shardedDB.foo.findOne({ x: 1 });
- assert.eq(1, doc.y);
- assert.commandWorked(shardedDB.runCommand({ delete: 'foo', deletes: [{ q: { x: 1 },
- limit: 1 }]}));
- doc = shardedDB.foo.findOne();
- assert.eq(null, doc);
-};
-
-var st = new ShardingTest({ shards: 2, mongos: 1, other: options });
-
-var version = st.s.getCollection('config.version').findOne();
-
-assert.eq(version.minCompatibleVersion, 5);
-assert.eq(version.currentVersion, 6);
-var clusterID = version.clusterId;
-assert.neq(null, clusterID);
-assert.eq(version.excluding, undefined);
-
-// upgrade everything except for mongos
-st.upgradeCluster("latest", { upgradeMongos: false });
-st.restartMongoses();
-
-testCRUD(st.s);
-
-// upgrade mongos
-st.upgradeCluster("latest", { upgradeConfigs: false, upgradeShards: false });
-st.restartMongoses();
-
-// Check that version document is unmodified.
-version = st.s.getCollection('config.version').findOne();
-assert.eq(version.minCompatibleVersion, 5);
-assert.eq(version.currentVersion, 6);
-assert.neq(clusterID, version.clusterId);
-assert.eq(version.excluding, undefined);
-
-testCRUD(st.s);
-
-st.stop();
-};
-
-runTest(false);
-runTest(true);
+ /**
+ * @param isRSCluster {bool} use replica set shards.
+ */
+ var runTest = function(isRSCluster) {
+ "use strict";
+
+ jsTest.log("Starting" + (isRSCluster ? " (replica set)" : "") + " cluster" + "...");
+
+ var options = {
+ mongosOptions: {binVersion: "last-stable"},
+ configOptions: {binVersion: "last-stable"},
+ shardOptions: {binVersion: "last-stable"},
+
+ rsOptions: {binVersion: "last-stable"},
+ rs: isRSCluster
+ };
+
+ var testCRUD = function(mongos) {
+ assert.commandWorked(mongos.getDB('test').runCommand({dropDatabase: 1}));
+ assert.commandWorked(mongos.getDB('unsharded').runCommand({dropDatabase: 1}));
+
+ var unshardedDB = mongos.getDB('unshareded');
+ assert.commandWorked(unshardedDB.runCommand({insert: 'foo', documents: [{x: 1}]}));
+ assert.commandWorked(unshardedDB.runCommand(
+ {update: 'foo', updates: [{q: {x: 1}, u: {$set: {y: 1}}}]}));
+ var doc = unshardedDB.foo.findOne({x: 1});
+ assert.eq(1, doc.y);
+ assert.commandWorked(
+ unshardedDB.runCommand({delete: 'foo', deletes: [{q: {x: 1}, limit: 1}]}));
+ doc = unshardedDB.foo.findOne();
+ assert.eq(null, doc);
+
+ assert.commandWorked(mongos.adminCommand({enableSharding: 'test'}));
+ assert.commandWorked(mongos.adminCommand({shardCollection: 'test.user', key: {x: 1}}));
+
+ var shardedDB = mongos.getDB('shareded');
+ assert.commandWorked(shardedDB.runCommand({insert: 'foo', documents: [{x: 1}]}));
+ assert.commandWorked(
+ shardedDB.runCommand({update: 'foo', updates: [{q: {x: 1}, u: {$set: {y: 1}}}]}));
+ doc = shardedDB.foo.findOne({x: 1});
+ assert.eq(1, doc.y);
+ assert.commandWorked(
+ shardedDB.runCommand({delete: 'foo', deletes: [{q: {x: 1}, limit: 1}]}));
+ doc = shardedDB.foo.findOne();
+ assert.eq(null, doc);
+ };
+
+ var st = new ShardingTest({shards: 2, mongos: 1, other: options});
+
+ var version = st.s.getCollection('config.version').findOne();
+
+ assert.eq(version.minCompatibleVersion, 5);
+ assert.eq(version.currentVersion, 6);
+ var clusterID = version.clusterId;
+ assert.neq(null, clusterID);
+ assert.eq(version.excluding, undefined);
+
+ // upgrade everything except for mongos
+ st.upgradeCluster("latest", {upgradeMongos: false});
+ st.restartMongoses();
+
+ testCRUD(st.s);
+
+ // upgrade mongos
+ st.upgradeCluster("latest", {upgradeConfigs: false, upgradeShards: false});
+ st.restartMongoses();
+
+ // Check that version document is unmodified.
+ version = st.s.getCollection('config.version').findOne();
+ assert.eq(version.minCompatibleVersion, 5);
+ assert.eq(version.currentVersion, 6);
+ assert.neq(clusterID, version.clusterId);
+ assert.eq(version.excluding, undefined);
+
+ testCRUD(st.s);
+
+ st.stop();
+ };
+
+ runTest(false);
+ runTest(true);
})();
diff --git a/jstests/multiVersion/wt_index_option_defaults_replset.js b/jstests/multiVersion/wt_index_option_defaults_replset.js
index 451c56f807c..9156d0b06ae 100644
--- a/jstests/multiVersion/wt_index_option_defaults_replset.js
+++ b/jstests/multiVersion/wt_index_option_defaults_replset.js
@@ -32,21 +32,23 @@
var secondary30 = conns[1].getDB('test');
// Create a collection with "indexOptionDefaults" specified.
- var indexOptions = {storageEngine: {wiredTiger: {configString: 'prefix_compression=false'}}};
+ var indexOptions = {
+ storageEngine: {wiredTiger: {configString: 'prefix_compression=false'}}
+ };
assert.commandWorked(primary32.runCommand({create: 'coll', indexOptionDefaults: indexOptions}));
// Verify that the "indexOptionDefaults" field is present in the corresponding oplog entry.
- var entry = primary32.getSiblingDB('local').oplog.rs.find()
- .sort({$natural: -1})
- .limit(1)
- .next();
- assert.docEq(indexOptions, entry.o.indexOptionDefaults,
+ var entry =
+ primary32.getSiblingDB('local').oplog.rs.find().sort({$natural: -1}).limit(1).next();
+ assert.docEq(indexOptions,
+ entry.o.indexOptionDefaults,
'indexOptionDefaults were not replicated: ' + tojson(entry));
rst.awaitReplication();
var collectionInfos = secondary30.getCollectionInfos({name: 'coll'});
- assert.eq(1, collectionInfos.length,
+ assert.eq(1,
+ collectionInfos.length,
'collection "coll" was not created on the secondary: ' + tojson(collectionInfos));
assert(!collectionInfos[0].options.hasOwnProperty('indexOptionDefaults'),
@@ -88,24 +90,27 @@
var secondary32 = conns[1].getDB('test');
// Create a collection with "indexOptionDefaults" specified.
- var indexOptions = {storageEngine: {wiredTiger: {configString: 'prefix_compression=false'}}};
+ var indexOptions = {
+ storageEngine: {wiredTiger: {configString: 'prefix_compression=false'}}
+ };
assert.commandWorked(primary30.runCommand({create: 'coll', indexOptionDefaults: indexOptions}));
// Verify that the "indexOptionDefaults" field is present in the corresponding oplog entry.
- var entry = primary30.getSiblingDB('local').oplog.rs.find()
- .sort({$natural: -1})
- .limit(1)
- .next();
- assert.docEq(indexOptions, entry.o.indexOptionDefaults,
+ var entry =
+ primary30.getSiblingDB('local').oplog.rs.find().sort({$natural: -1}).limit(1).next();
+ assert.docEq(indexOptions,
+ entry.o.indexOptionDefaults,
'indexOptionDefaults were not replicated: ' + tojson(entry));
rst.awaitReplication();
var collectionInfos = secondary32.getCollectionInfos({name: 'coll'});
- assert.eq(1, collectionInfos.length,
+ assert.eq(1,
+ collectionInfos.length,
'collection "coll" was not created on the secondary: ' + tojson(collectionInfos));
- assert.docEq(indexOptions, collectionInfos[0].options.indexOptionDefaults,
+ assert.docEq(indexOptions,
+ collectionInfos[0].options.indexOptionDefaults,
'indexOptionDefaults were not applied: ' + tojson(collectionInfos));
rst.stopSet();
diff --git a/jstests/noPassthrough/awaitdata_getmore_cmd.js b/jstests/noPassthrough/awaitdata_getmore_cmd.js
index 276e464145b..ef47efb0e67 100644
--- a/jstests/noPassthrough/awaitdata_getmore_cmd.js
+++ b/jstests/noPassthrough/awaitdata_getmore_cmd.js
@@ -47,13 +47,8 @@
assert.eq(cmdRes.cursor.ns, coll.getFullName());
// Should also succeed if maxTimeMS is supplied on the original find.
- cmdRes = db.runCommand({
- find: collName,
- batchSize: 2,
- awaitData: true,
- tailable: true,
- maxTimeMS: 2000
- });
+ cmdRes = db.runCommand(
+ {find: collName, batchSize: 2, awaitData: true, tailable: true, maxTimeMS: 2000});
assert.commandWorked(cmdRes);
assert.gt(cmdRes.cursor.id, NumberLong(0));
assert.eq(cmdRes.cursor.ns, coll.getFullName());
@@ -105,33 +100,23 @@
assert.gte((new Date()) - now, 2000);
// Repeat the test, this time tailing the oplog rather than a user-created capped collection.
- cmdRes = localDB.runCommand({
- find: oplogColl.getName(),
- batchSize: 2,
- awaitData: true,
- tailable: true
- });
+ cmdRes = localDB.runCommand(
+ {find: oplogColl.getName(), batchSize: 2, awaitData: true, tailable: true});
assert.commandWorked(cmdRes);
assert.gt(cmdRes.cursor.id, NumberLong(0));
assert.eq(cmdRes.cursor.ns, oplogColl.getFullName());
assert.eq(cmdRes.cursor.firstBatch.length, 2);
- cmdRes = localDB.runCommand({
- getMore: cmdRes.cursor.id,
- collection: oplogColl.getName(),
- maxTimeMS: 1000
- });
+ cmdRes = localDB.runCommand(
+ {getMore: cmdRes.cursor.id, collection: oplogColl.getName(), maxTimeMS: 1000});
assert.commandWorked(cmdRes);
assert.gt(cmdRes.cursor.id, NumberLong(0));
assert.eq(cmdRes.cursor.ns, oplogColl.getFullName());
while (cmdRes.cursor.nextBatch.length > 0) {
now = new Date();
- cmdRes = localDB.runCommand({
- getMore: cmdRes.cursor.id,
- collection: oplogColl.getName(),
- maxTimeMS: 4000
- });
+ cmdRes = localDB.runCommand(
+ {getMore: cmdRes.cursor.id, collection: oplogColl.getName(), maxTimeMS: 4000});
assert.commandWorked(cmdRes);
assert.gt(cmdRes.cursor.id, NumberLong(0));
assert.eq(cmdRes.cursor.ns, oplogColl.getFullName());
diff --git a/jstests/noPassthrough/backup_restore.js b/jstests/noPassthrough/backup_restore.js
index 40d283873e7..e994aeda66c 100644
--- a/jstests/noPassthrough/backup_restore.js
+++ b/jstests/noPassthrough/backup_restore.js
@@ -17,52 +17,39 @@
(function() {
"use strict";
- function runCmd (cmd) {
- runProgram('bash', '-c', cmd);
+ function runCmd(cmd) {
+ runProgram('bash', '-c', cmd);
}
function crudClient(host, dbName, coll) {
// Launch CRUD client
- var crudClientCmds = "var bulkNum = 1000;" +
- "var baseNum = 100000;" +
+ var crudClientCmds = "var bulkNum = 1000;" + "var baseNum = 100000;" +
"var coll = db.getSiblingDB('" + dbName + "')." + coll + ";" +
- "coll.ensureIndex({x: 1});" +
- "var largeValue = new Array(1024).join('L');" +
+ "coll.ensureIndex({x: 1});" + "var largeValue = new Array(1024).join('L');" +
"Random.setRandomSeed();" +
// run indefinitely
- "while (true) {" +
- " try {" +
- " var op = Random.rand();" +
- " var match = Math.floor(Random.rand() * baseNum);" +
- " if (op < 0.2) {" +
+ "while (true) {" + " try {" + " var op = Random.rand();" +
+ " var match = Math.floor(Random.rand() * baseNum);" + " if (op < 0.2) {" +
// 20% of the operations: bulk insert bulkNum docs
" var bulk = coll.initializeUnorderedBulkOp();" +
" for (var i = 0; i < bulkNum; i++) {" +
" bulk.insert({x: (match * i) % baseNum," +
" doc: largeValue.substring(0, match % largeValue.length)});" +
- " }" +
- " assert.writeOK(bulk.execute());" +
+ " }" + " assert.writeOK(bulk.execute());" +
" } else if (op < 0.4) {" +
// 20% of the operations: update docs;
" var updateOpts = {upsert: true, multi: true};" +
- " assert.writeOK(coll.update(" +
- " {x: {$gte: match}}," +
+ " assert.writeOK(coll.update(" + " {x: {$gte: match}}," +
" {$inc: {x: baseNum}, $set: {n: 'hello'}}," +
- " updateOpts));" +
- " } else if (op < 0.9) {" +
+ " updateOpts));" + " } else if (op < 0.9) {" +
// 50% of the operations: find matchings docs
// itcount() consumes the cursor
- " coll.find({x: {$gte: match}}).itcount();" +
- " } else {" +
+ " coll.find({x: {$gte: match}}).itcount();" + " } else {" +
// 10% of the operations: remove matching docs
- " assert.writeOK(coll.remove({x: {$gte: match}}));" +
- " }" +
+ " assert.writeOK(coll.remove({x: {$gte: match}}));" + " }" +
" } catch(e) {" +
" if (e instanceof ReferenceError || e instanceof TypeError) {" +
- " throw e;" +
- " }" +
- " }" +
- "}";
+ " throw e;" + " }" + " }" + "}";
// Returns the pid of the started mongo shell so the CRUD test client can be terminated
// without waiting for its execution to finish.
@@ -75,26 +62,17 @@
// started without any cluster options. Since the shell running this test was started with
// --nodb, another mongo shell is used to allow implicit connections to be made to the
// primary of the replica set.
- var fsmClientCmds = "'use strict';" +
- "load('jstests/concurrency/fsm_libs/runner.js');" +
- "var dir = 'jstests/concurrency/fsm_workloads';" +
- "var blacklist = [" +
- " 'agg_group_external.js'," +
- " 'agg_sort_external.js'," +
- " 'auth_create_role.js'," +
- " 'auth_create_user.js'," +
- " 'auth_drop_role.js'," +
- " 'auth_drop_user.js'," +
- " 'reindex_background.js'," +
- " 'yield_sort.js'," +
- "].map(function(file) { return dir + '/' + file; });" +
- "Random.setRandomSeed();" +
+ var fsmClientCmds = "'use strict';" + "load('jstests/concurrency/fsm_libs/runner.js');" +
+ "var dir = 'jstests/concurrency/fsm_workloads';" + "var blacklist = [" +
+ " 'agg_group_external.js'," + " 'agg_sort_external.js'," +
+ " 'auth_create_role.js'," + " 'auth_create_user.js'," +
+ " 'auth_drop_role.js'," + " 'auth_drop_user.js'," +
+ " 'reindex_background.js'," + " 'yield_sort.js'," +
+ "].map(function(file) { return dir + '/' + file; });" + "Random.setRandomSeed();" +
// run indefinitely
- "while (true) {" +
- " try {" +
+ "while (true) {" + " try {" +
" var workloads = Array.shuffle(ls(dir).filter(function(file) {" +
- " return !Array.contains(blacklist, file);" +
- " }));" +
+ " return !Array.contains(blacklist, file);" + " }));" +
// Run workloads one at a time, so we ensure replication completes
" workloads.forEach(function(workload) {" +
" runWorkloadsSerially([workload]," +
@@ -104,14 +82,10 @@
" var result = db.getSiblingDB('test').fsm_teardown.insert({ a: 1 }, wc);" +
" assert.writeOK(result, 'teardown insert failed: ' + tojson(result));" +
" result = db.getSiblingDB('test').fsm_teardown.drop();" +
- " assert(result, 'teardown drop failed');" +
- " });" +
+ " assert(result, 'teardown drop failed');" + " });" +
" } catch(e) {" +
" if (e instanceof ReferenceError || e instanceof TypeError) {" +
- " throw e;" +
- " }" +
- " }" +
- "}";
+ " throw e;" + " }" + " }" + "}";
// Returns the pid of the started mongo shell so the FSM test client can be terminated
// without waiting for its execution to finish.
@@ -132,9 +106,10 @@
// Backup type (must be specified)
var allowedBackupKeys = ['fsyncLock', 'stopStart', 'rolling'];
assert(options.backup, "Backup option not supplied");
- assert.contains(options.backup, allowedBackupKeys,
- 'invalid option: ' + tojson(options.backup) +
- '; valid options are: ' + tojson(allowedBackupKeys));
+ assert.contains(options.backup,
+ allowedBackupKeys,
+ 'invalid option: ' + tojson(options.backup) + '; valid options are: ' +
+ tojson(allowedBackupKeys));
// Number of nodes in initial replica set (default 3)
var numNodes = options.nodes || 3;
@@ -152,11 +127,7 @@
var rst = new ReplSetTest({
name: replSetName,
nodes: numNodes,
- nodeOptions: {
- oplogSize: 1024,
- storageEngine: storageEngine,
- dbpath: dbpathFormat
- }
+ nodeOptions: {oplogSize: 1024, storageEngine: storageEngine, dbpath: dbpathFormat}
});
var nodes = rst.startSet();
@@ -179,12 +150,12 @@
// Perform fsync to create checkpoint. We doublecheck if the storage engine
// supports fsync here.
- var ret = primary.adminCommand({fsync : 1});
+ var ret = primary.adminCommand({fsync: 1});
if (!ret.ok) {
assert.commandFailedWithCode(ret, ErrorCodes.CommandNotSupported);
- jsTestLog("Skipping test of " + options.backup
- + " for " + storageEngine + ' as it does not support fsync');
+ jsTestLog("Skipping test of " + options.backup + " for " + storageEngine +
+ ' as it does not support fsync');
return;
}
@@ -211,8 +182,8 @@
var ret = secondary.getDB("admin").fsyncLock();
if (!ret.ok) {
assert.commandFailedWithCode(ret, ErrorCodes.CommandNotSupported);
- jsTestLog("Skipping test of " + options.backup
- + " for " + storageEngine + ' as it does not support fsync');
+ jsTestLog("Skipping test of " + options.backup + " for " + storageEngine +
+ ' as it does not support fsync');
return;
}
@@ -223,8 +194,8 @@
copiedFiles = ls(hiddenDbpath);
print("Copied files:", tojson(copiedFiles));
assert.gt(copiedFiles.length, 0, testName + ' no files copied');
- assert.commandWorked(secondary.getDB("admin").fsyncUnlock(), testName +
- ' failed to fsyncUnlock');
+ assert.commandWorked(secondary.getDB("admin").fsyncUnlock(),
+ testName + ' failed to fsyncUnlock');
} else if (options.backup == 'rolling') {
var rsyncCmd = "rsync -aKkz --del " + sourcePath + " " + destPath;
// Simulate a rolling rsync, do it 3 times before stopping process
@@ -271,7 +242,8 @@
// Note the dbhash can only run when the DB is inactive to get a result
// that can be compared, which is only in the fsyncLock/fsynUnlock case
if (dbHash !== undefined) {
- assert.eq(dbHash, rst.nodes[numNodes].getDB(crudDb).runCommand({dbhash: 1}).md5,
+ assert.eq(dbHash,
+ rst.nodes[numNodes].getDB(crudDb).runCommand({dbhash: 1}).md5,
testName + ' dbHash');
}
@@ -285,8 +257,8 @@
hidden: true
};
rsConfig.members.push(hiddenMember);
- assert.commandWorked(primary.adminCommand({replSetReconfig : rsConfig}), testName +
- ' failed to reconfigure replSet ' + tojson(rsConfig));
+ assert.commandWorked(primary.adminCommand({replSetReconfig: rsConfig}),
+ testName + ' failed to reconfigure replSet ' + tojson(rsConfig));
// Wait up to 60 seconds until the new hidden node is in state RECOVERING.
rst.waitForState(rst.nodes[numNodes],
@@ -314,7 +286,7 @@
// Main
// Add storage engines which are to be skipped entirely to this array
- var noBackupTests = [ 'inMemoryExperiment' ];
+ var noBackupTests = ['inMemoryExperiment'];
// Grab the storage engine, default is wiredTiger
var storageEngine = jsTest.options().storageEngine || "wiredTiger";
@@ -338,7 +310,7 @@
}
}
- // Run the fsyncLock test. Will return before testing for any engine that doesn't
+ // Run the fsyncLock test. Will return before testing for any engine that doesn't
// support fsyncLock
runTest({
name: storageEngine + ' fsyncLock/fsyncUnlock',
diff --git a/jstests/noPassthrough/balancer_window.js b/jstests/noPassthrough/balancer_window.js
index 15df48c634f..9f100846f1e 100644
--- a/jstests/noPassthrough/balancer_window.js
+++ b/jstests/noPassthrough/balancer_window.js
@@ -11,124 +11,128 @@
* sure that some chunks are moved.
*/
(function() {
-/**
- * Simple representation for wall clock time. Hour and minutes should be integers.
- */
-var HourAndMinute = function(hour, minutes) {
- return {
- /**
- * Returns a new HourAndMinute object with the amount of hours added.
- * Amount can be negative.
- */
- addHour: function(amount) {
- var newHour = (hour + amount) % 24;
- if (newHour < 0) {
- newHour += 24;
+ /**
+ * Simple representation for wall clock time. Hour and minutes should be integers.
+ */
+ var HourAndMinute = function(hour, minutes) {
+ return {
+ /**
+ * Returns a new HourAndMinute object with the amount of hours added.
+ * Amount can be negative.
+ */
+ addHour: function(amount) {
+ var newHour = (hour + amount) % 24;
+ if (newHour < 0) {
+ newHour += 24;
+ }
+
+ return new HourAndMinute(newHour, minutes);
+ },
+
+ /**
+ * Returns a string representation that is compatible with the format for the balancer
+ * window settings.
+ */
+ toString: function() {
+ var minStr = (minutes < 10) ? ('0' + minutes) : ('' + minutes);
+ var hourStr = (hour < 10) ? ('0' + hour) : ('' + hour);
+ return hourStr + ':' + minStr;
}
+ };
+ };
+
+ /**
+ * Waits until at least one balancing round has passed.
+ *
+ * Note: This relies on the fact that the balancer pings the config.mongos document every round.
+ */
+ var waitForAtLeastOneBalanceRound = function(mongosHost, timeoutMS) {
+ var mongos = new Mongo(mongosHost);
+ var configDB = mongos.getDB('config');
+
+ // Wait for ts to change twice because:
+ // 1st: for start of the balancing round.
+ // 2nd: for the start of the next round, which implies that the previous one has ended.
+ var waitForTSChangeNTimes = 2;
+ var lastPing = new Date(0);
+
+ assert.soon(
+ function() {
+ // Note: The balancer pings twice, once with { waiting: false } at the beginning
+ // and another { waiting: true } at the end. Poll for the negative edge since
+ // the smallest granurality should be a second, if for some reason the interval
+ // became less than a second, it can cause this to miss the negative edge and
+ // wake it wait longer than it should.
+ var currentPing = configDB.mongos.findOne({_id: mongosHost, waiting: true});
+ if (currentPing == null) {
+ return false;
+ }
+
+ if (currentPing.ping.valueOf() != lastPing.valueOf()) {
+ waitForTSChangeNTimes--;
+ lastPing = currentPing.ping;
+ }
+
+ return waitForTSChangeNTimes <= 0;
+ },
+ 'Timed out waiting for mongos ping to change ' + waitForTSChangeNTimes + ' more times',
+ timeoutMS,
+ 500);
+ };
- return new HourAndMinute(newHour, minutes);
+ var st = new ShardingTest({shards: 2});
+ var configDB = st.s.getDB('config');
+ assert.commandWorked(configDB.adminCommand({
+ configureFailPoint: 'balancerRoundIntervalSetting',
+ mode: 'alwaysOn',
+ data: {sleepSecs: 1}
+ }));
+
+ configDB.adminCommand({enableSharding: 'test'});
+ configDB.adminCommand({shardCollection: 'test.user', key: {_id: 1}});
+
+ // Disable balancer so it will not interfere with the chunk distribution setup.
+ st.stopBalancer();
+
+ for (var x = 0; x < 150; x += 10) {
+ configDB.adminCommand({split: 'test.user', middle: {_id: x}});
+ }
+
+ var shard0Chunks = configDB.chunks.find({ns: 'test.user', shard: 'shard0000'}).count();
+
+ var startDate = new Date();
+ var hourMinStart = new HourAndMinute(startDate.getHours(), startDate.getMinutes());
+ configDB.settings.update({_id: 'balancer'},
+ {
+ $set: {
+ activeWindow: {
+ start: hourMinStart.addHour(-2).toString(),
+ stop: hourMinStart.addHour(-1).toString()
+ },
+ stopped: false
+ }
+ },
+ true);
+
+ waitForAtLeastOneBalanceRound(st.s.host, 60 * 1000);
+
+ var shard0ChunksAfter = configDB.chunks.find({ns: 'test.user', shard: 'shard0000'}).count();
+ assert.eq(shard0Chunks, shard0ChunksAfter);
+
+ configDB.settings.update(
+ {_id: 'balancer'},
+ {
+ $set: {
+ activeWindow:
+ {start: hourMinStart.toString(), stop: hourMinStart.addHour(2).toString()}
+ }
},
+ true);
- /**
- * Returns a string representation that is compatible with the format for the balancer
- * window settings.
- */
- toString: function() {
- var minStr = (minutes < 10) ? ('0' + minutes) : ('' + minutes);
- var hourStr = (hour < 10) ? ('0' + hour) : ('' + hour);
- return hourStr + ':' + minStr;
- }
- };
-};
+ waitForAtLeastOneBalanceRound(st.s.host, 60 * 1000);
-/**
- * Waits until at least one balancing round has passed.
- *
- * Note: This relies on the fact that the balancer pings the config.mongos document every round.
- */
-var waitForAtLeastOneBalanceRound = function(mongosHost, timeoutMS) {
- var mongos = new Mongo(mongosHost);
- var configDB = mongos.getDB('config');
-
- // Wait for ts to change twice because:
- // 1st: for start of the balancing round.
- // 2nd: for the start of the next round, which implies that the previous one has ended.
- var waitForTSChangeNTimes = 2;
- var lastPing = new Date(0);
-
- assert.soon(function() {
- // Note: The balancer pings twice, once with { waiting: false } at the beginning
- // and another { waiting: true } at the end. Poll for the negative edge since
- // the smallest granurality should be a second, if for some reason the interval
- // became less than a second, it can cause this to miss the negative edge and
- // wake it wait longer than it should.
- var currentPing = configDB.mongos.findOne({ _id: mongosHost, waiting: true });
- if (currentPing == null) {
- return false;
- }
-
- if (currentPing.ping.valueOf() != lastPing.valueOf()) {
- waitForTSChangeNTimes--;
- lastPing = currentPing.ping;
- }
-
- return waitForTSChangeNTimes <= 0;
- }, 'Timed out waiting for mongos ping to change ' + waitForTSChangeNTimes + ' more times',
- timeoutMS, 500);
-};
-
-var st = new ShardingTest({ shards: 2 });
-var configDB = st.s.getDB('config');
-assert.commandWorked(configDB.adminCommand({ configureFailPoint: 'balancerRoundIntervalSetting',
- mode: 'alwaysOn',
- data: { sleepSecs: 1 }}));
-
-configDB.adminCommand({ enableSharding: 'test' });
-configDB.adminCommand({ shardCollection: 'test.user', key: { _id: 1 }});
-
-// Disable balancer so it will not interfere with the chunk distribution setup.
-st.stopBalancer();
-
-for (var x = 0; x < 150; x += 10) {
- configDB.adminCommand({ split: 'test.user', middle: { _id: x }});
-}
-
-var shard0Chunks = configDB.chunks.find({ ns: 'test.user', shard: 'shard0000' }).count();
-
-var startDate = new Date();
-var hourMinStart = new HourAndMinute(startDate.getHours(), startDate.getMinutes());
-configDB.settings.update({ _id: 'balancer' },
- {
- $set: {
- activeWindow: {
- start: hourMinStart.addHour(-2).toString(),
- stop: hourMinStart.addHour(-1).toString()
- },
- stopped: false
- }
- },
- true);
-
-waitForAtLeastOneBalanceRound(st.s.host, 60 * 1000);
-
-var shard0ChunksAfter = configDB.chunks.find({ ns: 'test.user', shard: 'shard0000' }).count();
-assert.eq(shard0Chunks, shard0ChunksAfter);
-
-configDB.settings.update({ _id: 'balancer' },
- {
- $set: {
- activeWindow: {
- start: hourMinStart.toString(),
- stop: hourMinStart.addHour(2).toString()
- }
- }
- },
- true);
-
-waitForAtLeastOneBalanceRound(st.s.host, 60 * 1000);
-
-shard0ChunksAfter = configDB.chunks.find({ ns: 'test.user', shard: 'shard0000' }).count();
-assert.neq(shard0Chunks, shard0ChunksAfter);
-
-st.stop();
+ shard0ChunksAfter = configDB.chunks.find({ns: 'test.user', shard: 'shard0000'}).count();
+ assert.neq(shard0Chunks, shard0ChunksAfter);
+
+ st.stop();
})();
diff --git a/jstests/noPassthrough/command_line_parsing.js b/jstests/noPassthrough/command_line_parsing.js
index 91aa2e5c077..7e6fcd8fcf6 100644
--- a/jstests/noPassthrough/command_line_parsing.js
+++ b/jstests/noPassthrough/command_line_parsing.js
@@ -4,27 +4,25 @@ var baseName = "jstests_slowNightly_command_line_parsing";
// test notablescan
var m = MongoRunner.runMongod({notablescan: ""});
-m.getDB( baseName ).getCollection( baseName ).save( {a:1} );
-assert.throws( function() { m.getDB( baseName ).getCollection( baseName ).find( {a:1} ).toArray(); } );
+m.getDB(baseName).getCollection(baseName).save({a: 1});
+assert.throws(function() {
+ m.getDB(baseName).getCollection(baseName).find({a: 1}).toArray();
+});
-// test config file
+// test config file
var m2 = MongoRunner.runMongod({config: "jstests/libs/testconfig"});
var m2expected = {
- "parsed" : {
- "config" : "jstests/libs/testconfig",
- "storage" : {
- "dbPath" : m2.dbpath
- },
- "net" : {
- "port" : m2.port
- },
- "help" : false,
- "version" : false,
- "sysinfo" : false
+ "parsed": {
+ "config": "jstests/libs/testconfig",
+ "storage": {"dbPath": m2.dbpath},
+ "net": {"port": m2.port},
+ "help": false,
+ "version": false,
+ "sysinfo": false
}
};
-var m2result = m2.getDB("admin").runCommand( "getCmdLineOpts" );
+var m2result = m2.getDB("admin").runCommand("getCmdLineOpts");
// remove variables that depend on the way the test is started.
delete m2result.parsed.nopreallocj;
@@ -32,26 +30,22 @@ delete m2result.parsed.setParameter;
delete m2result.parsed.storage.engine;
delete m2result.parsed.storage.wiredTiger;
delete m2result.parsed.storage.journal;
-assert.docEq( m2expected.parsed, m2result.parsed );
+assert.docEq(m2expected.parsed, m2result.parsed);
// test JSON config file
var m3 = MongoRunner.runMongod({config: "jstests/libs/testconfig"});
var m3expected = {
- "parsed" : {
- "config" : "jstests/libs/testconfig",
- "storage" : {
- "dbPath" : m3.dbpath
- },
- "net" : {
- "port" : m3.port
- },
- "help" : false,
- "version" : false,
- "sysinfo" : false
+ "parsed": {
+ "config": "jstests/libs/testconfig",
+ "storage": {"dbPath": m3.dbpath},
+ "net": {"port": m3.port},
+ "help": false,
+ "version": false,
+ "sysinfo": false
}
};
-var m3result = m3.getDB("admin").runCommand( "getCmdLineOpts" );
+var m3result = m3.getDB("admin").runCommand("getCmdLineOpts");
// remove variables that depend on the way the test is started.
delete m3result.parsed.nopreallocj;
@@ -59,4 +53,4 @@ delete m3result.parsed.setParameter;
delete m3result.parsed.storage.engine;
delete m3result.parsed.storage.wiredTiger;
delete m3result.parsed.storage.journal;
-assert.docEq( m3expected.parsed, m3result.parsed );
+assert.docEq(m3expected.parsed, m3result.parsed);
diff --git a/jstests/noPassthrough/commands_handle_kill.js b/jstests/noPassthrough/commands_handle_kill.js
index ba2de60de6d..b7ad6ea1bea 100644
--- a/jstests/noPassthrough/commands_handle_kill.js
+++ b/jstests/noPassthrough/commands_handle_kill.js
@@ -11,9 +11,11 @@
assert.writeOK(coll.insert({}));
// Enable a failpoint that causes plan executors to be killed immediately.
- assert.commandWorked(coll.getDB().adminCommand({configureFailPoint: "planExecutorAlwaysDead",
- namespace: coll.getFullName(),
- mode: "alwaysOn"}));
+ assert.commandWorked(coll.getDB().adminCommand({
+ configureFailPoint: "planExecutorAlwaysDead",
+ namespace: coll.getFullName(),
+ mode: "alwaysOn"
+ }));
var res;
@@ -48,30 +50,40 @@
assert(res.errmsg.indexOf("hit planExecutorAlwaysDead fail point") > -1);
// Build geo index.
- assert.commandWorked(coll.getDB().adminCommand({configureFailPoint: "planExecutorAlwaysDead",
- namespace: coll.getFullName(),
- mode: "off"}));
+ assert.commandWorked(coll.getDB().adminCommand({
+ configureFailPoint: "planExecutorAlwaysDead",
+ namespace: coll.getFullName(),
+ mode: "off"
+ }));
assert.commandWorked(coll.createIndex({geoField: "2dsphere"}));
- assert.commandWorked(coll.getDB().adminCommand({configureFailPoint: "planExecutorAlwaysDead",
- namespace: coll.getFullName(),
- mode: "alwaysOn"}));
+ assert.commandWorked(coll.getDB().adminCommand({
+ configureFailPoint: "planExecutorAlwaysDead",
+ namespace: coll.getFullName(),
+ mode: "alwaysOn"
+ }));
// geoNear command errors if plan executor is killed.
- res = db.runCommand({geoNear: collName,
- near: {type: "Point", coordinates: [0,0]}, spherical: true});
+ res = db.runCommand(
+ {geoNear: collName, near: {type: "Point", coordinates: [0, 0]}, spherical: true});
assert.commandFailed(res);
assert(res.errmsg.indexOf("hit planExecutorAlwaysDead fail point") > -1);
// group command errors if plan executor is killed.
- res = db.runCommand({group: {ns: coll.getFullName(),
- key: "_id",
- $reduce: function (curr, result) {},
- initial: {}}});
+ res = db.runCommand({
+ group: {
+ ns: coll.getFullName(),
+ key: "_id",
+ $reduce: function(curr, result) {},
+ initial: {}
+ }
+ });
assert.commandFailed(res);
assert(res.errmsg.indexOf("hit planExecutorAlwaysDead fail point") > -1);
// find throws if plan executor is killed.
- res = assert.throws(function() { coll.find().itcount(); });
+ res = assert.throws(function() {
+ coll.find().itcount();
+ });
assert(res.message.indexOf("hit planExecutorAlwaysDead fail point") > -1);
// update errors if plan executor is killed.
diff --git a/jstests/noPassthrough/count_helper_read_preference.js b/jstests/noPassthrough/count_helper_read_preference.js
index e2701139126..a049e586598 100644
--- a/jstests/noPassthrough/count_helper_read_preference.js
+++ b/jstests/noPassthrough/count_helper_read_preference.js
@@ -5,11 +5,15 @@
var commandsRan = [];
// Create a new DB object backed by a mock connection.
- function MockMongo() {}
+ function MockMongo() {
+ }
MockMongo.prototype = Mongo.prototype;
MockMongo.prototype.runCommand = function(db, cmd, opts) {
- commandsRan.push({db: db, cmd: cmd, opts:opts});
- return {ok: 1, n: 100};
+ commandsRan.push({db: db, cmd: cmd, opts: opts});
+ return {
+ ok: 1,
+ n: 100
+ };
};
var db = new DB(new MockMongo(), "test");
@@ -31,10 +35,8 @@
// Check that we have wrapped the command and attached the read preference.
assert.eq(commandsRan.length, 1);
- assert.docEq(commandsRan[0].cmd,
- {query: {count: "foo",
- fields: {},
- query: {}},
- $readPreference: {mode: "secondary"}});
+ assert.docEq(
+ commandsRan[0].cmd,
+ {query: {count: "foo", fields: {}, query: {}}, $readPreference: {mode: "secondary"}});
})();
diff --git a/jstests/noPassthrough/devnull.js b/jstests/noPassthrough/devnull.js
index 19c353cfba9..7a492a3225d 100644
--- a/jstests/noPassthrough/devnull.js
+++ b/jstests/noPassthrough/devnull.js
@@ -1,8 +1,8 @@
var mongo = MongoRunner.runMongod({smallfiles: "", storageEngine: "devnull"});
-db = mongo.getDB( "test" );
+db = mongo.getDB("test");
-res = db.foo.insert( { x : 1 } );
-assert.eq( 1, res.nInserted, tojson( res ) );
+res = db.foo.insert({x: 1});
+assert.eq(1, res.nInserted, tojson(res));
MongoRunner.stopMongod(mongo);
diff --git a/jstests/noPassthrough/dir_per_db_and_split.js b/jstests/noPassthrough/dir_per_db_and_split.js
index b402e0375f1..a9ddb0eb6d0 100644
--- a/jstests/noPassthrough/dir_per_db_and_split.js
+++ b/jstests/noPassthrough/dir_per_db_and_split.js
@@ -1,37 +1,25 @@
if (!jsTest.options().storageEngine || jsTest.options().storageEngine === "wiredTiger") {
-
var baseDir = "jstests_per_db_and_split_c_and_i";
var dbpath = MongoRunner.dataPath + baseDir + "/";
- var m = MongoRunner.runMongod({
- dbpath: dbpath,
- wiredTigerDirectoryForIndexes: '',
- directoryperdb: ''});
- db = m.getDB( "foo" );
- db.bar.insert( { x : 1 } );
- assert.eq( 1, db.bar.count() );
+ var m = MongoRunner.runMongod(
+ {dbpath: dbpath, wiredTigerDirectoryForIndexes: '', directoryperdb: ''});
+ db = m.getDB("foo");
+ db.bar.insert({x: 1});
+ assert.eq(1, db.bar.count());
- db.adminCommand( {fsync:1} );
+ db.adminCommand({fsync: 1});
- assert( listFiles( dbpath + "/foo/index" ).length > 0 );
- assert( listFiles( dbpath + "/foo/collection" ).length > 0 );
+ assert(listFiles(dbpath + "/foo/index").length > 0);
+ assert(listFiles(dbpath + "/foo/collection").length > 0);
MongoRunner.stopMongod(m.port);
// Subsequent attempts to start server using same dbpath but different
// wiredTigerDirectoryForIndexes and directoryperdb options should fail.
- assert.isnull(MongoRunner.runMongod({
- dbpath: dbpath,
- port: m.port,
- restart: true}));
- assert.isnull(MongoRunner.runMongod({
- dbpath: dbpath,
- port: m.port,
- restart: true,
- directoryperdb: ''}));
- assert.isnull(MongoRunner.runMongod({
- dbpath: dbpath,
- port: m.port,
- restart: true,
- wiredTigerDirectoryForIndexes: ''}));
+ assert.isnull(MongoRunner.runMongod({dbpath: dbpath, port: m.port, restart: true}));
+ assert.isnull(
+ MongoRunner.runMongod({dbpath: dbpath, port: m.port, restart: true, directoryperdb: ''}));
+ assert.isnull(MongoRunner.runMongod(
+ {dbpath: dbpath, port: m.port, restart: true, wiredTigerDirectoryForIndexes: ''}));
}
diff --git a/jstests/noPassthrough/directoryperdb.js b/jstests/noPassthrough/directoryperdb.js
index feabcb47a07..52d3d700006 100644
--- a/jstests/noPassthrough/directoryperdb.js
+++ b/jstests/noPassthrough/directoryperdb.js
@@ -13,42 +13,34 @@
var baseDir = "jstests_directoryperdb";
var dbpath = MongoRunner.dataPath + baseDir + "/";
- var isDirectoryPerDBSupported =
- jsTest.options().storageEngine == "mmapv1" ||
- jsTest.options().storageEngine == "wiredTiger" ||
- !jsTest.options().storageEngine;
+ var isDirectoryPerDBSupported = jsTest.options().storageEngine == "mmapv1" ||
+ jsTest.options().storageEngine == "wiredTiger" || !jsTest.options().storageEngine;
- var m = MongoRunner.runMongod({
- dbpath: dbpath,
- directoryperdb: ''});
+ var m = MongoRunner.runMongod({dbpath: dbpath, directoryperdb: ''});
if (!isDirectoryPerDBSupported) {
assert.isnull(m, 'storage engine without directoryperdb support should fail to start up');
return;
- }
- else {
+ } else {
assert(m, 'storage engine with directoryperdb support failed to start up');
}
- var db = m.getDB( "foo" );
- db.bar.insert( { x : 1 } );
- assert.eq( 1, db.bar.count() );
+ var db = m.getDB("foo");
+ db.bar.insert({x: 1});
+ assert.eq(1, db.bar.count());
- db.adminCommand( {fsync:1} );
+ db.adminCommand({fsync: 1});
var dbpathFiles = listFiles(dbpath);
- var files = dbpathFiles.filter( function(z) {
- return z.name.endsWith( "/foo" );
- } );
- assert.eq(1, files.length,
- 'dbpath does not contain "foo" directory: ' + tojson(dbpathFiles));
+ var files = dbpathFiles.filter(function(z) {
+ return z.name.endsWith("/foo");
+ });
+ assert.eq(1, files.length, 'dbpath does not contain "foo" directory: ' + tojson(dbpathFiles));
- files = listFiles( files[0].name );
- assert( files.length > 0 );
+ files = listFiles(files[0].name);
+ assert(files.length > 0);
MongoRunner.stopMongod(m.port);
// Subsequent attempt to start server using same dbpath without directoryperdb should fail.
- assert.isnull(MongoRunner.runMongod({
- dbpath: dbpath,
- restart: true}));
+ assert.isnull(MongoRunner.runMongod({dbpath: dbpath, restart: true}));
}());
diff --git a/jstests/noPassthrough/exit_logging.js b/jstests/noPassthrough/exit_logging.js
index c40df9152e2..3fe7d923d0e 100644
--- a/jstests/noPassthrough/exit_logging.js
+++ b/jstests/noPassthrough/exit_logging.js
@@ -2,22 +2,19 @@
* Tests that various forms of normal and abnormal shutdown write to the log files as expected.
*/
-(function () {
+(function() {
function makeShutdownByCrashFn(crashHow) {
- return function (conn) {
+ return function(conn) {
var admin = conn.getDB("admin");
- assert.commandWorked(admin.runCommand({
- configureFailPoint: "crashOnShutdown",
- mode: "alwaysOn",
- data: { how: crashHow }
- }));
+ assert.commandWorked(admin.runCommand(
+ {configureFailPoint: "crashOnShutdown", mode: "alwaysOn", data: {how: crashHow}}));
admin.shutdownServer();
};
}
function makeRegExMatchFn(pattern) {
- return function (text) {
+ return function(text) {
return pattern.test(text);
};
}
@@ -28,48 +25,52 @@
function checkOutput() {
var logContents = "";
- assert.soon(() => {
- logContents = rawMongoProgramOutput();
- return matchFn(logContents);
- }, function() {
- // We can't just return a string because it will be well over the max line length.
- // So we just print manually.
- print("================ BEGIN LOG CONTENTS ==================");
- logContents.split(/\n/).forEach((line) => { print(line); });
- print("================ END LOG CONTENTS =====================");
- return "";
- }, 30000);
+ assert.soon(() =>
+ {
+ logContents = rawMongoProgramOutput();
+ return matchFn(logContents);
+ },
+ function() {
+ // We can't just return a string because it will be well over the max
+ // line length.
+ // So we just print manually.
+ print("================ BEGIN LOG CONTENTS ==================");
+ logContents.split(/\n/).forEach((line) => {
+ print(line);
+ });
+ print("================ END LOG CONTENTS =====================");
+ return "";
+ },
+ 30000);
}
try {
crashFn(conn);
checkOutput();
- }
- finally {
- launcher.stop(conn, undefined, { allowedExitCodes: [ expectedExitCode ] });
+ } finally {
+ launcher.stop(conn, undefined, {allowedExitCodes: [expectedExitCode]});
}
}
function runAllTests(launcher) {
const SIGSEGV = 11;
const SIGABRT = 6;
- testShutdownLogging(
- launcher,
- function (conn) { conn.getDB('admin').shutdownServer(); },
- makeRegExMatchFn(/shutdown command received[\s\S]*dbexit:/),
- MongoRunner.EXIT_CLEAN);
-
- testShutdownLogging(
- launcher,
- makeShutdownByCrashFn('fault'),
- makeRegExMatchFn(/Invalid access at address[\s\S]*printStackTrace/),
- -SIGSEGV);
-
- testShutdownLogging(
- launcher,
- makeShutdownByCrashFn('abort'),
- makeRegExMatchFn(/Got signal[\s\S]*printStackTrace/),
- -SIGABRT);
+ testShutdownLogging(launcher,
+ function(conn) {
+ conn.getDB('admin').shutdownServer();
+ },
+ makeRegExMatchFn(/shutdown command received[\s\S]*dbexit:/),
+ MongoRunner.EXIT_CLEAN);
+
+ testShutdownLogging(launcher,
+ makeShutdownByCrashFn('fault'),
+ makeRegExMatchFn(/Invalid access at address[\s\S]*printStackTrace/),
+ -SIGSEGV);
+
+ testShutdownLogging(launcher,
+ makeShutdownByCrashFn('abort'),
+ makeRegExMatchFn(/Got signal[\s\S]*printStackTrace/),
+ -SIGABRT);
}
if (_isWindows()) {
@@ -86,8 +87,10 @@
print("********************\nTesting exit logging in mongod\n********************");
runAllTests({
- start: function (opts) {
- var actualOpts = { nojournal: "" };
+ start: function(opts) {
+ var actualOpts = {
+ nojournal: ""
+ };
Object.extend(actualOpts, opts);
return MongoRunner.runMongod(actualOpts);
},
@@ -99,13 +102,12 @@
(function testMongos() {
print("********************\nTesting exit logging in mongos\n********************");
- var st = new ShardingTest({
- shards: 1,
- other: { shardOptions: { nojournal: "" } }
- });
+ var st = new ShardingTest({shards: 1, other: {shardOptions: {nojournal: ""}}});
var mongosLauncher = {
- start: function (opts) {
- var actualOpts = { configdb: st._configDB };
+ start: function(opts) {
+ var actualOpts = {
+ configdb: st._configDB
+ };
Object.extend(actualOpts, opts);
return MongoRunner.runMongos(actualOpts);
},
diff --git a/jstests/noPassthrough/ftdc_setparam.js b/jstests/noPassthrough/ftdc_setparam.js
index cc9973d7565..73e3f8720a6 100644
--- a/jstests/noPassthrough/ftdc_setparam.js
+++ b/jstests/noPassthrough/ftdc_setparam.js
@@ -1,18 +1,20 @@
// validate command line ftdc parameter parsing
-(function () {
+(function() {
'use strict';
var m = MongoRunner.runMongod({setParameter: "diagnosticDataCollectionPeriodMillis=101"});
// Check the defaults are correct
//
function getparam(field) {
- var q = { getParameter : 1 };
+ var q = {
+ getParameter: 1
+ };
q[field] = 1;
- var ret = m.getDB("admin").runCommand( q );
+ var ret = m.getDB("admin").runCommand(q);
return ret[field];
}
assert.eq(getparam("diagnosticDataCollectionPeriodMillis"), 101);
-}) ();
+})();
diff --git a/jstests/noPassthrough/geo_full.js b/jstests/noPassthrough/geo_full.js
index 2fe9d8d7438..505a0efab33 100644
--- a/jstests/noPassthrough/geo_full.js
+++ b/jstests/noPassthrough/geo_full.js
@@ -2,84 +2,89 @@
// Integration test of the geo code
//
// Basically, this tests adds a random number of docs with a random number of points,
-// given a 2d environment of random precision which is either randomly earth-like or of
+// given a 2d environment of random precision which is either randomly earth-like or of
// random bounds, and indexes these points after a random amount of points have been added
// with a random number of additional fields which correspond to whether the documents are
-// in randomly generated circular, spherical, box, and box-polygon shapes (and exact),
-// queried randomly from a set of query types. Each point is randomly either and object
+// in randomly generated circular, spherical, box, and box-polygon shapes (and exact),
+// queried randomly from a set of query types. Each point is randomly either and object
// or array, and all points and document data fields are nested randomly in arrays (or not).
//
// We approximate the user here as a random function :-)
//
-// These random point fields can then be tested against all types of geo queries using these random shapes.
-//
+// These random point fields can then be tested against all types of geo queries using these random
+// shapes.
+//
// Tests can be easily reproduced by getting the test number from the output directly before a
// test fails, and hard-wiring that as the test number.
//
-load( "jstests/libs/slow_weekly_util.js" );
-testServer = new SlowWeeklyMongod( "geo_full" );
-db = testServer.getDB( "test" );
+load("jstests/libs/slow_weekly_util.js");
+testServer = new SlowWeeklyMongod("geo_full");
+db = testServer.getDB("test");
-var randEnvironment = function(){
+var randEnvironment = function() {
// Normal earth environment
- if( Random.rand() < 0.5 ){
- return { max : 180,
- min : -180,
- bits : Math.floor( Random.rand() * 32 ) + 1,
- earth : true,
- bucketSize : 360 / ( 4 * 1024 * 1024 * 1024 ) };
+ if (Random.rand() < 0.5) {
+ return {
+ max: 180,
+ min: -180,
+ bits: Math.floor(Random.rand() * 32) + 1,
+ earth: true,
+ bucketSize: 360 / (4 * 1024 * 1024 * 1024)
+ };
}
- var scales = [ 0.0001, 0.001, 0.01, 0.1, 1, 10, 100, 1000, 10000, 100000 ];
- var scale = scales[ Math.floor( Random.rand() * scales.length ) ];
+ var scales = [0.0001, 0.001, 0.01, 0.1, 1, 10, 100, 1000, 10000, 100000];
+ var scale = scales[Math.floor(Random.rand() * scales.length)];
var offset = Random.rand() * scale;
var max = Random.rand() * scale + offset;
- var min = - Random.rand() * scale + offset;
- var bits = Math.floor( Random.rand() * 32 ) + 1;
- var bits = Math.floor( Random.rand() * 32 ) + 1;
+ var min = -Random.rand() * scale + offset;
+ var bits = Math.floor(Random.rand() * 32) + 1;
+ var bits = Math.floor(Random.rand() * 32) + 1;
var range = max - min;
- var bucketSize = range / ( 4 * 1024 * 1024 * 1024 );
-
- return { max : max,
- min : min,
- bits : bits,
- earth : false,
- bucketSize : bucketSize };
+ var bucketSize = range / (4 * 1024 * 1024 * 1024);
+
+ return {
+ max: max,
+ min: min,
+ bits: bits,
+ earth: false,
+ bucketSize: bucketSize
+ };
};
-var randPoint = function( env, query ) {
+var randPoint = function(env, query) {
- if( query && Random.rand() > 0.5 )
+ if (query && Random.rand() > 0.5)
return query.exact;
- if( env.earth )
- return [ Random.rand() * 360 - 180, Random.rand() * 180 - 90 ];
+ if (env.earth)
+ return [Random.rand() * 360 - 180, Random.rand() * 180 - 90];
var range = env.max - env.min;
- return [ Random.rand() * range + env.min, Random.rand() * range + env.min ];
+ return [Random.rand() * range + env.min, Random.rand() * range + env.min];
};
-var randLocType = function( loc, wrapIn ){
- return randLocTypes( [ loc ], wrapIn )[0];
+var randLocType = function(loc, wrapIn) {
+ return randLocTypes([loc], wrapIn)[0];
};
-var randLocTypes = function( locs, wrapIn ) {
+var randLocTypes = function(locs, wrapIn) {
var rLocs = [];
- for( var i = 0; i < locs.length; i++ ){
- rLocs.push( locs[i] );
+ for (var i = 0; i < locs.length; i++) {
+ rLocs.push(locs[i]);
}
- if( wrapIn ){
+ if (wrapIn) {
var wrappedLocs = [];
- for( var i = 0; i < rLocs.length; i++ ){
+ for (var i = 0; i < rLocs.length; i++) {
var wrapper = {};
wrapper[wrapIn] = rLocs[i];
- wrappedLocs.push( wrapper );
+ wrappedLocs.push(wrapper);
}
return wrappedLocs;
@@ -90,24 +95,30 @@ var randLocTypes = function( locs, wrapIn ) {
var randDataType = function() {
- var scales = [ 1, 10, 100, 1000, 10000 ];
- var docScale = scales[ Math.floor( Random.rand() * scales.length ) ];
- var locScale = scales[ Math.floor( Random.rand() * scales.length ) ];
+ var scales = [1, 10, 100, 1000, 10000];
+ var docScale = scales[Math.floor(Random.rand() * scales.length)];
+ var locScale = scales[Math.floor(Random.rand() * scales.length)];
var numDocs = 40000;
var maxLocs = 40000;
// Make sure we don't blow past our test resources
- while( numDocs * maxLocs > 40000 ){
- numDocs = Math.floor( Random.rand() * docScale ) + 1;
- maxLocs = Math.floor( Random.rand() * locScale ) + 1;
+ while (numDocs * maxLocs > 40000) {
+ numDocs = Math.floor(Random.rand() * docScale) + 1;
+ maxLocs = Math.floor(Random.rand() * locScale) + 1;
}
- return { numDocs : numDocs,
- maxLocs : maxLocs };
+ return {
+ numDocs: numDocs,
+ maxLocs: maxLocs
+ };
};
-function deg2rad(arg) { return arg * Math.PI / 180.0; }
-function rad2deg(arg) { return arg * 180.0 / Math.PI; }
+function deg2rad(arg) {
+ return arg * Math.PI / 180.0;
+}
+function rad2deg(arg) {
+ return arg * 180.0 / Math.PI;
+}
function computexscandist(latDegrees, maxDistDegrees) {
// See s2cap.cc
@@ -151,200 +162,220 @@ function pointIsOK(startPoint, radius, env) {
return (startPoint[0] + xscandist < 180) && (startPoint[0] - xscandist > -180);
}
-var randQuery = function( env ) {
- var center = randPoint( env );
+var randQuery = function(env) {
+ var center = randPoint(env);
var sphereRadius = -1;
var sphereCenter = null;
- if( env.earth ){
+ if (env.earth) {
// Get a start point that doesn't require wrapping
// TODO: Are we a bit too aggressive with wrapping issues?
var i;
- for( i = 0; i < 5; i++ ){
+ for (i = 0; i < 5; i++) {
sphereRadius = Random.rand() * 45 * Math.PI / 180;
- sphereCenter = randPoint( env );
- if (pointIsOK(sphereCenter, sphereRadius, env)) { break; }
+ sphereCenter = randPoint(env);
+ if (pointIsOK(sphereCenter, sphereRadius, env)) {
+ break;
+ }
}
- if( i == 5 ) sphereRadius = -1;
-
+ if (i == 5)
+ sphereRadius = -1;
}
- var box = [ randPoint( env ), randPoint( env ) ];
+ var box = [randPoint(env), randPoint(env)];
- var boxPoly = [[ box[0][0], box[0][1] ],
- [ box[0][0], box[1][1] ],
- [ box[1][0], box[1][1] ],
- [ box[1][0], box[0][1] ] ];
+ var boxPoly = [
+ [box[0][0], box[0][1]],
+ [box[0][0], box[1][1]],
+ [box[1][0], box[1][1]],
+ [box[1][0], box[0][1]]
+ ];
- if( box[0][0] > box[1][0] ){
+ if (box[0][0] > box[1][0]) {
var swap = box[0][0];
box[0][0] = box[1][0];
box[1][0] = swap;
}
- if( box[0][1] > box[1][1] ){
+ if (box[0][1] > box[1][1]) {
var swap = box[0][1];
box[0][1] = box[1][1];
box[1][1] = swap;
}
- return { center : center,
- radius : box[1][0] - box[0][0],
- exact : randPoint( env ),
- sphereCenter : sphereCenter,
- sphereRadius : sphereRadius,
- box : box,
- boxPoly : boxPoly };
+ return {
+ center: center,
+ radius: box[1][0] - box[0][0],
+ exact: randPoint(env),
+ sphereCenter: sphereCenter,
+ sphereRadius: sphereRadius,
+ box: box,
+ boxPoly: boxPoly
+ };
};
var resultTypes = {
-"exact" : function( loc ){
- return query.exact[0] == loc[0] && query.exact[1] == loc[1];
-},
-"center" : function( loc ){
- return Geo.distance( query.center, loc ) <= query.radius;
-},
-"box" : function( loc ){
- return loc[0] >= query.box[0][0] && loc[0] <= query.box[1][0] &&
- loc[1] >= query.box[0][1] && loc[1] <= query.box[1][1];
-
-},
-"sphere" : function( loc ){
- return ( query.sphereRadius >= 0 ?
- ( Geo.sphereDistance( query.sphereCenter, loc ) <= query.sphereRadius ) : false );
-},
-"poly" : function( loc ){
- return loc[0] >= query.box[0][0] && loc[0] <= query.box[1][0] &&
- loc[1] >= query.box[0][1] && loc[1] <= query.box[1][1];
-}};
-
-var queryResults = function( locs, query, results ){
-
- if( ! results["center"] ){
- for( var type in resultTypes ){
+ "exact": function(loc) {
+ return query.exact[0] == loc[0] && query.exact[1] == loc[1];
+ },
+ "center": function(loc) {
+ return Geo.distance(query.center, loc) <= query.radius;
+ },
+ "box": function(loc) {
+ return loc[0] >= query.box[0][0] && loc[0] <= query.box[1][0] &&
+ loc[1] >= query.box[0][1] && loc[1] <= query.box[1][1];
+
+ },
+ "sphere": function(loc) {
+ return (query.sphereRadius >= 0
+ ? (Geo.sphereDistance(query.sphereCenter, loc) <= query.sphereRadius)
+ : false);
+ },
+ "poly": function(loc) {
+ return loc[0] >= query.box[0][0] && loc[0] <= query.box[1][0] &&
+ loc[1] >= query.box[0][1] && loc[1] <= query.box[1][1];
+ }
+};
+
+var queryResults = function(locs, query, results) {
+
+ if (!results["center"]) {
+ for (var type in resultTypes) {
results[type] = {
- docsIn : 0,
- docsOut : 0,
- locsIn : 0,
- locsOut : 0
+ docsIn: 0,
+ docsOut: 0,
+ locsIn: 0,
+ locsOut: 0
};
}
}
var indResults = {};
- for( var type in resultTypes ){
+ for (var type in resultTypes) {
indResults[type] = {
- docIn : false,
- locsIn : 0,
- locsOut : 0
+ docIn: false,
+ locsIn: 0,
+ locsOut: 0
};
}
- for( var type in resultTypes ){
-
+ for (var type in resultTypes) {
var docIn = false;
- for( var i = 0; i < locs.length; i++ ){
- if( resultTypes[type]( locs[i] ) ){
+ for (var i = 0; i < locs.length; i++) {
+ if (resultTypes[type](locs[i])) {
results[type].locsIn++;
indResults[type].locsIn++;
indResults[type].docIn = true;
- }
- else{
+ } else {
results[type].locsOut++;
indResults[type].locsOut++;
}
}
- if( indResults[type].docIn ) results[type].docsIn++;
- else results[type].docsOut++;
-
+ if (indResults[type].docIn)
+ results[type].docsIn++;
+ else
+ results[type].docsOut++;
}
return indResults;
};
-var randQueryAdditions = function( doc, indResults ){
+var randQueryAdditions = function(doc, indResults) {
- for( var type in resultTypes ){
+ for (var type in resultTypes) {
var choice = Random.rand();
- if( Random.rand() < 0.25 )
- doc[type] = ( indResults[type].docIn ? { docIn : "yes" } : { docIn : "no" } );
- else if( Random.rand() < 0.5 )
- doc[type] = ( indResults[type].docIn ? { docIn : [ "yes" ] } : { docIn : [ "no" ] } );
- else if( Random.rand() < 0.75 )
- doc[type] = ( indResults[type].docIn ? [ { docIn : "yes" } ] : [ { docIn : "no" } ] );
+ if (Random.rand() < 0.25)
+ doc[type] = (indResults[type].docIn ? {docIn: "yes"} : {docIn: "no"});
+ else if (Random.rand() < 0.5)
+ doc[type] = (indResults[type].docIn ? {docIn: ["yes"]} : {docIn: ["no"]});
+ else if (Random.rand() < 0.75)
+ doc[type] = (indResults[type].docIn ? [{docIn: "yes"}] : [{docIn: "no"}]);
else
- doc[type] = ( indResults[type].docIn ? [{ docIn: [ "yes" ] }] : [{ docIn: [ "no" ] }]);
+ doc[type] = (indResults[type].docIn ? [{docIn: ["yes"]}] : [{docIn: ["no"]}]);
}
};
-var randIndexAdditions = function( indexDoc ){
-
- for( var type in resultTypes ){
+var randIndexAdditions = function(indexDoc) {
- if( Random.rand() < 0.5 ) continue;
+ for (var type in resultTypes) {
+ if (Random.rand() < 0.5)
+ continue;
var choice = Random.rand();
- if( Random.rand() < 0.5 )
+ if (Random.rand() < 0.5)
indexDoc[type] = 1;
else
indexDoc[type + ".docIn"] = 1;
}
};
-var randYesQuery = function(){
+var randYesQuery = function() {
- var choice = Math.floor( Random.rand() * 7 );
- if( choice == 0 )
- return { $ne : "no" };
- else if( choice == 1 )
+ var choice = Math.floor(Random.rand() * 7);
+ if (choice == 0)
+ return {
+ $ne: "no"
+ };
+ else if (choice == 1)
return "yes";
- else if( choice == 2 )
+ else if (choice == 2)
return /^yes/;
- else if( choice == 3 )
- return { $in : [ "good", "yes", "ok" ] };
- else if( choice == 4 )
- return { $exists : true };
- else if( choice == 5 )
- return { $nin : [ "bad", "no", "not ok" ] };
- else if( choice == 6 )
- return { $not : /^no/ };
+ else if (choice == 3)
+ return {
+ $in: ["good", "yes", "ok"]
+ };
+ else if (choice == 4)
+ return {
+ $exists: true
+ };
+ else if (choice == 5)
+ return {
+ $nin: ["bad", "no", "not ok"]
+ };
+ else if (choice == 6)
+ return {
+ $not: /^no/
+ };
};
-var locArray = function( loc ){
- if( loc.x ) return [ loc.x, loc.y ];
- if( ! loc.length ) return [ loc[0], loc[1] ];
+var locArray = function(loc) {
+ if (loc.x)
+ return [loc.x, loc.y];
+ if (!loc.length)
+ return [loc[0], loc[1]];
return loc;
};
-var locsArray = function( locs ){
- if( locs.loc ){
+var locsArray = function(locs) {
+ if (locs.loc) {
arr = [];
- for( var i = 0; i < locs.loc.length; i++ ) arr.push( locArray( locs.loc[i] ) );
+ for (var i = 0; i < locs.loc.length; i++)
+ arr.push(locArray(locs.loc[i]));
return arr;
- }
- else{
+ } else {
arr = [];
- for( var i = 0; i < locs.length; i++ ) arr.push( locArray( locs[i].loc ) );
+ for (var i = 0; i < locs.length; i++)
+ arr.push(locArray(locs[i].loc));
return arr;
}
};
-var minBoxSize = function( env, box ){
- return env.bucketSize * Math.pow( 2, minBucketScale( env, box ) );
+var minBoxSize = function(env, box) {
+ return env.bucketSize * Math.pow(2, minBucketScale(env, box));
};
-var minBucketScale = function( env, box ){
+var minBucketScale = function(env, box) {
- if( box.length && box[0].length )
- box = [ box[0][0] - box[1][0], box[0][1] - box[1][1] ];
+ if (box.length && box[0].length)
+ box = [box[0][0] - box[1][0], box[0][1] - box[1][1]];
- if( box.length )
- box = Math.max( box[0], box[1] );
+ if (box.length)
+ box = Math.max(box[0], box[1]);
- print( box );
- print( env.bucketSize );
+ print(box);
+ print(env.bucketSize);
- return Math.ceil( Math.log( box / env.bucketSize ) / Math.log( 2 ) );
+ return Math.ceil(Math.log(box / env.bucketSize) / Math.log(2));
};
@@ -355,196 +386,232 @@ var numTests = 100;
// each individual test will be reproducible given
// that seed and test number
var seed = new Date().getTime();
-//seed = 175 + 288 + 12
+// seed = 175 + 288 + 12
-for ( var test = 0; test < numTests; test++ ) {
-
- Random.srand( seed + test );
- //Random.srand( 42240 )
- //Random.srand( 7344 )
+for (var test = 0; test < numTests; test++) {
+ Random.srand(seed + test);
+ // Random.srand( 42240 )
+ // Random.srand( 7344 )
var t = db.testAllGeo;
t.drop();
- print( "Generating test environment #" + test );
+ print("Generating test environment #" + test);
var env = randEnvironment();
- //env.bits = 11
- var query = randQuery( env );
+ // env.bits = 11
+ var query = randQuery(env);
var data = randDataType();
- //data.numDocs = 5; data.maxLocs = 1;
- var paddingSize = Math.floor( Random.rand() * 10 + 1 );
+ // data.numDocs = 5; data.maxLocs = 1;
+ var paddingSize = Math.floor(Random.rand() * 10 + 1);
var results = {};
var totalPoints = 0;
- print( "Calculating target results for " + data.numDocs + " docs with max " + data.maxLocs + " locs " );
+ print("Calculating target results for " + data.numDocs + " docs with max " + data.maxLocs +
+ " locs ");
var bulk = t.initializeUnorderedBulkOp();
- for ( var i = 0; i < data.numDocs; i++ ) {
- var numLocs = Math.floor( Random.rand() * data.maxLocs + 1 );
+ for (var i = 0; i < data.numDocs; i++) {
+ var numLocs = Math.floor(Random.rand() * data.maxLocs + 1);
totalPoints += numLocs;
var multiPoint = [];
- for ( var p = 0; p < numLocs; p++ ) {
- var point = randPoint( env, query );
- multiPoint.push( point );
+ for (var p = 0; p < numLocs; p++) {
+ var point = randPoint(env, query);
+ multiPoint.push(point);
}
- var indResults = queryResults( multiPoint, query, results );
+ var indResults = queryResults(multiPoint, query, results);
var doc;
// Nest the keys differently
- if( Random.rand() < 0.5 )
- doc = { locs : { loc : randLocTypes( multiPoint ) } };
+ if (Random.rand() < 0.5)
+ doc = {
+ locs: {loc: randLocTypes(multiPoint)}
+ };
else
- doc = { locs : randLocTypes( multiPoint, "loc" ) };
+ doc = {
+ locs: randLocTypes(multiPoint, "loc")
+ };
- randQueryAdditions( doc, indResults );
+ randQueryAdditions(doc, indResults);
doc._id = i;
- bulk.insert( doc );
+ bulk.insert(doc);
}
assert.writeOK(bulk.execute());
- var indexDoc = { "locs.loc" : "2d" };
- randIndexAdditions( indexDoc );
- t.ensureIndex( indexDoc, env );
- assert.isnull( db.getLastError() );
+ var indexDoc = {
+ "locs.loc": "2d"
+ };
+ randIndexAdditions(indexDoc);
+ t.ensureIndex(indexDoc, env);
+ assert.isnull(db.getLastError());
var padding = "x";
- for( var i = 0; i < paddingSize; i++ ) padding = padding + padding;
+ for (var i = 0; i < paddingSize; i++)
+ padding = padding + padding;
- print( padding );
+ print(padding);
- printjson( { seed : seed,
- test: test,
- env : env,
- query : query,
- data : data,
- results : results,
- paddingSize : paddingSize } );
+ printjson({
+ seed: seed,
+ test: test,
+ env: env,
+ query: query,
+ data: data,
+ results: results,
+ paddingSize: paddingSize
+ });
// exact
- print( "Exact query..." );
- assert.eq( results.exact.docsIn, t.find( { "locs.loc" : randLocType( query.exact ), "exact.docIn" : randYesQuery() } ).count() );
+ print("Exact query...");
+ assert.eq(
+ results.exact.docsIn,
+ t.find({"locs.loc": randLocType(query.exact), "exact.docIn": randYesQuery()}).count());
// $center
- print( "Center query..." );
- print( "Min box : " + minBoxSize( env, query.radius ) );
- assert.eq( results.center.docsIn, t.find( { "locs.loc" : { $within : { $center : [ query.center, query.radius ], $uniqueDocs : 1 } }, "center.docIn" : randYesQuery() } ).count() );
-
- print( "Center query update..." );
- var res = t.update({ "locs.loc": { $within: { $center: [ query.center, query.radius ],
- $uniqueDocs: true }},
- "center.docIn": randYesQuery() },
- { $set: { centerPaddingA: padding }}, false, true);
- assert.eq( results.center.docsIn, res.nModified );
-
- if( query.sphereRadius >= 0 ){
-
- print( "Center sphere query...");
+ print("Center query...");
+ print("Min box : " + minBoxSize(env, query.radius));
+ assert.eq(results.center.docsIn,
+ t.find({
+ "locs.loc": {$within: {$center: [query.center, query.radius], $uniqueDocs: 1}},
+ "center.docIn": randYesQuery()
+ }).count());
+
+ print("Center query update...");
+ var res = t.update(
+ {
+ "locs.loc": {$within: {$center: [query.center, query.radius], $uniqueDocs: true}},
+ "center.docIn": randYesQuery()
+ },
+ {$set: {centerPaddingA: padding}},
+ false,
+ true);
+ assert.eq(results.center.docsIn, res.nModified);
+
+ if (query.sphereRadius >= 0) {
+ print("Center sphere query...");
// $centerSphere
- assert.eq( results.sphere.docsIn, t.find( { "locs.loc" : { $within : { $centerSphere : [ query.sphereCenter, query.sphereRadius ] } }, "sphere.docIn" : randYesQuery() } ).count() );
-
- print( "Center sphere query update..." );
- res = t.update({ "locs.loc": { $within: {
- $centerSphere: [ query.sphereCenter, query.sphereRadius ],
- $uniqueDocs: true } },
- "sphere.docIn" : randYesQuery() },
- { $set: { spherePaddingA: padding } }, false, true);
- assert.eq( results.sphere.docsIn, res.nModified );
+ assert.eq(
+ results.sphere.docsIn,
+ t.find({
+ "locs.loc": {$within: {$centerSphere: [query.sphereCenter, query.sphereRadius]}},
+ "sphere.docIn": randYesQuery()
+ }).count());
+
+ print("Center sphere query update...");
+ res = t.update(
+ {
+ "locs.loc": {
+ $within: {
+ $centerSphere: [query.sphereCenter, query.sphereRadius],
+ $uniqueDocs: true
+ }
+ },
+ "sphere.docIn": randYesQuery()
+ },
+ {$set: {spherePaddingA: padding}},
+ false,
+ true);
+ assert.eq(results.sphere.docsIn, res.nModified);
}
// $box
- print( "Box query..." );
- assert.eq( results.box.docsIn, t.find( { "locs.loc" : { $within : { $box : query.box, $uniqueDocs : true } }, "box.docIn" : randYesQuery() } ).count() );
+ print("Box query...");
+ assert.eq(results.box.docsIn,
+ t.find({
+ "locs.loc": {$within: {$box: query.box, $uniqueDocs: true}},
+ "box.docIn": randYesQuery()
+ }).count());
// $polygon
- print( "Polygon query..." );
- assert.eq( results.poly.docsIn, t.find( { "locs.loc" : { $within : { $polygon : query.boxPoly } }, "poly.docIn" : randYesQuery() } ).count() );
+ print("Polygon query...");
+ assert.eq(
+ results.poly.docsIn,
+ t.find({"locs.loc": {$within: {$polygon: query.boxPoly}}, "poly.docIn": randYesQuery()})
+ .count());
var defaultDocLimit = 100;
// $near
- print( "Near query..." );
- assert.eq( results.center.docsIn,
- t.find( { "locs.loc" : { $near : query.center,
- $maxDistance : query.radius } } ).count( true ),
- "Near query: center: " + query.center +
- "; radius: " + query.radius +
- "; docs: " + results.center.docsIn +
- "; locs: " + results.center.locsIn );
-
- if( query.sphereRadius >= 0 ){
- print( "Near sphere query...");
+ print("Near query...");
+ assert.eq(results.center.docsIn,
+ t.find({"locs.loc": {$near: query.center, $maxDistance: query.radius}}).count(true),
+ "Near query: center: " + query.center + "; radius: " + query.radius + "; docs: " +
+ results.center.docsIn + "; locs: " + results.center.locsIn);
+
+ if (query.sphereRadius >= 0) {
+ print("Near sphere query...");
// $centerSphere
- assert.eq( results.sphere.docsIn,
- t.find( { "locs.loc" : { $nearSphere : query.sphereCenter,
- $maxDistance : query.sphereRadius } } ).count( true ),
- "Near sphere query: sphere center: " + query.sphereCenter +
- "; radius: " + query.sphereRadius +
- "; docs: " + results.sphere.docsIn + "; locs: " + results.sphere.locsIn );
+ assert.eq(
+ results.sphere.docsIn,
+ t.find({
+ "locs.loc": {$nearSphere: query.sphereCenter, $maxDistance: query.sphereRadius}
+ }).count(true),
+ "Near sphere query: sphere center: " + query.sphereCenter + "; radius: " +
+ query.sphereRadius + "; docs: " + results.sphere.docsIn + "; locs: " +
+ results.sphere.locsIn);
}
// geoNear
// results limited by size of objects
- if( data.maxLocs < defaultDocLimit ){
-
+ if (data.maxLocs < defaultDocLimit) {
// GeoNear query
- print( "GeoNear query..." );
+ print("GeoNear query...");
// GeoNear command has a default doc limit 100.
- assert.eq( Math.min( defaultDocLimit, results.center.docsIn ),
- t.getDB().runCommand( { geoNear : "testAllGeo", near : query.center,
- maxDistance : query.radius } ).results.length,
- "GeoNear query: center: " + query.center +
- "; radius: " + query.radius +
- "; docs: " + results.center.docsIn + "; locs: " + results.center.locsIn );
-
-
- var num = Math.min( 2* defaultDocLimit, 2 * results.center.docsIn);
-
- var output = db.runCommand( {
- geoNear : "testAllGeo",
- near : query.center,
- maxDistance : query.radius ,
- includeLocs : true,
- num : num } ).results;
-
- assert.eq( Math.min( num, results.center.docsIn ),
- output.length,
- "GeoNear query with limit of " + num +
- ": center: " + query.center +
- "; radius: " + query.radius +
- "; docs: " + results.center.docsIn + "; locs: " + results.center.locsIn );
+ assert.eq(
+ Math.min(defaultDocLimit, results.center.docsIn),
+ t.getDB()
+ .runCommand({geoNear: "testAllGeo", near: query.center, maxDistance: query.radius})
+ .results.length,
+ "GeoNear query: center: " + query.center + "; radius: " + query.radius + "; docs: " +
+ results.center.docsIn + "; locs: " + results.center.locsIn);
+
+ var num = Math.min(2 * defaultDocLimit, 2 * results.center.docsIn);
+
+ var output = db.runCommand({
+ geoNear: "testAllGeo",
+ near: query.center,
+ maxDistance: query.radius,
+ includeLocs: true,
+ num: num
+ }).results;
+
+ assert.eq(Math.min(num, results.center.docsIn),
+ output.length,
+ "GeoNear query with limit of " + num + ": center: " + query.center +
+ "; radius: " + query.radius + "; docs: " + results.center.docsIn +
+ "; locs: " + results.center.locsIn);
var distance = 0;
- for ( var i = 0; i < output.length; i++ ) {
+ for (var i = 0; i < output.length; i++) {
var retDistance = output[i].dis;
- var retLoc = locArray( output[i].loc );
+ var retLoc = locArray(output[i].loc);
- var arrLocs = locsArray( output[i].obj.locs );
+ var arrLocs = locsArray(output[i].obj.locs);
- assert.contains( retLoc, arrLocs );
+ assert.contains(retLoc, arrLocs);
var distInObj = false;
- for ( var j = 0; j < arrLocs.length && distInObj == false; j++ ) {
- var newDistance = Geo.distance( locArray( query.center ) , arrLocs[j] );
- distInObj = ( newDistance >= retDistance - 0.0001 && newDistance <= retDistance + 0.0001 );
+ for (var j = 0; j < arrLocs.length && distInObj == false; j++) {
+ var newDistance = Geo.distance(locArray(query.center), arrLocs[j]);
+ distInObj =
+ (newDistance >= retDistance - 0.0001 && newDistance <= retDistance + 0.0001);
}
- assert( distInObj );
- assert.between( retDistance - 0.0001 , Geo.distance( locArray( query.center ), retLoc ), retDistance + 0.0001 );
- assert.lte( retDistance, query.radius );
- assert.gte( retDistance, distance );
+ assert(distInObj);
+ assert.between(retDistance - 0.0001,
+ Geo.distance(locArray(query.center), retLoc),
+ retDistance + 0.0001);
+ assert.lte(retDistance, query.radius);
+ assert.gte(retDistance, distance);
distance = retDistance;
}
-
}
// $polygon
- print( "Polygon remove..." );
- res = t.remove({ "locs.loc": { $within: { $polygon: query.boxPoly }},
- "poly.docIn": randYesQuery() });
- assert.eq( results.poly.docsIn, res.nRemoved );
-
+ print("Polygon remove...");
+ res =
+ t.remove({"locs.loc": {$within: {$polygon: query.boxPoly}}, "poly.docIn": randYesQuery()});
+ assert.eq(results.poly.docsIn, res.nRemoved);
}
-
testServer.stop();
diff --git a/jstests/noPassthrough/geo_mnypts_plus_fields.js b/jstests/noPassthrough/geo_mnypts_plus_fields.js
index 7058d2795c1..801c7dcfc8b 100644
--- a/jstests/noPassthrough/geo_mnypts_plus_fields.js
+++ b/jstests/noPassthrough/geo_mnypts_plus_fields.js
@@ -1,105 +1,104 @@
// Test sanity of geo queries with a lot of points
-load( "jstests/libs/slow_weekly_util.js" );
-testServer = new SlowWeeklyMongod( "geo_mnypts_plus_fields" );
-db = testServer.getDB( "test" );
+load("jstests/libs/slow_weekly_util.js");
+testServer = new SlowWeeklyMongod("geo_mnypts_plus_fields");
+db = testServer.getDB("test");
var maxFields = 3;
-for( var fields = 1; fields < maxFields; fields++ ){
-
+for (var fields = 1; fields < maxFields; fields++) {
var coll = db.testMnyPts;
coll.drop();
-
+
var totalPts = 500 * 1000;
var bulk = coll.initializeUnorderedBulkOp();
// Add points in a 100x100 grid
- for( var i = 0; i < totalPts; i++ ){
+ for (var i = 0; i < totalPts; i++) {
var ii = i % 10000;
-
- var doc = { loc : [ ii % 100, Math.floor( ii / 100 ) ] };
-
+
+ var doc = {
+ loc: [ii % 100, Math.floor(ii / 100)]
+ };
+
// Add fields with different kinds of data
- for( var j = 0; j < fields; j++ ){
-
+ for (var j = 0; j < fields; j++) {
var field = null;
-
- if( j % 3 == 0 ){
+
+ if (j % 3 == 0) {
// Make half the points not searchable
- field = "abcdefg" + ( i % 2 == 0 ? "h" : "" );
- }
- else if( j % 3 == 1 ){
+ field = "abcdefg" + (i % 2 == 0 ? "h" : "");
+ } else if (j % 3 == 1) {
field = new Date();
- }
- else{
+ } else {
field = true;
}
-
- doc[ "field" + j ] = field;
+
+ doc["field" + j] = field;
}
- bulk.insert( doc );
+ bulk.insert(doc);
}
assert.writeOK(bulk.execute());
// Create the query for the additional fields
queryFields = {};
- for( var j = 0; j < fields; j++ ){
-
+ for (var j = 0; j < fields; j++) {
var field = null;
-
- if( j % 3 == 0 ){
+
+ if (j % 3 == 0) {
field = "abcdefg";
- }
- else if( j % 3 == 1 ){
- field = { $lte : new Date() };
- }
- else{
+ } else if (j % 3 == 1) {
+ field = {
+ $lte: new Date()
+ };
+ } else {
field = true;
}
-
- queryFields[ "field" + j ] = field;
+
+ queryFields["field" + j] = field;
}
-
- coll.ensureIndex({ loc : "2d" });
-
+
+ coll.ensureIndex({loc: "2d"});
+
// Check that quarter of points in each quadrant
- for( var i = 0; i < 4; i++ ){
+ for (var i = 0; i < 4; i++) {
var x = i % 2;
- var y = Math.floor( i / 2 );
-
+ var y = Math.floor(i / 2);
+
var box = [[0, 0], [49, 49]];
- box[0][0] += ( x == 1 ? 50 : 0 );
- box[1][0] += ( x == 1 ? 50 : 0 );
- box[0][1] += ( y == 1 ? 50 : 0 );
- box[1][1] += ( y == 1 ? 50 : 0 );
-
+ box[0][0] += (x == 1 ? 50 : 0);
+ box[1][0] += (x == 1 ? 50 : 0);
+ box[0][1] += (y == 1 ? 50 : 0);
+ box[1][1] += (y == 1 ? 50 : 0);
+
// Now only half of each result comes back
- assert.eq( totalPts / ( 4 * 2 ), coll.find(Object.extend( { loc : { $within : { $box : box } } }, queryFields ) ).count() );
- assert.eq( totalPts / ( 4 * 2 ), coll.find(Object.extend( { loc : { $within : { $box : box } } }, queryFields ) ).itcount() );
-
+ assert.eq(totalPts / (4 * 2),
+ coll.find(Object.extend({loc: {$within: {$box: box}}}, queryFields)).count());
+ assert.eq(totalPts / (4 * 2),
+ coll.find(Object.extend({loc: {$within: {$box: box}}}, queryFields)).itcount());
}
-
+
// Check that half of points in each half
- for( var i = 0; i < 2; i++ ){
-
+ for (var i = 0; i < 2; i++) {
var box = [[0, 0], [49, 99]];
- box[0][0] += ( i == 1 ? 50 : 0 );
- box[1][0] += ( i == 1 ? 50 : 0 );
-
- assert.eq( totalPts / ( 2 * 2 ), coll.find(Object.extend( { loc : { $within : { $box : box } } }, queryFields ) ).count() );
- assert.eq( totalPts / ( 2 * 2 ), coll.find(Object.extend( { loc : { $within : { $box : box } } }, queryFields ) ).itcount() );
-
+ box[0][0] += (i == 1 ? 50 : 0);
+ box[1][0] += (i == 1 ? 50 : 0);
+
+ assert.eq(totalPts / (2 * 2),
+ coll.find(Object.extend({loc: {$within: {$box: box}}}, queryFields)).count());
+ assert.eq(totalPts / (2 * 2),
+ coll.find(Object.extend({loc: {$within: {$box: box}}}, queryFields)).itcount());
}
-
+
// Check that all but corner set of points in radius
- var circle = [[0, 0], (100 - 1) * Math.sqrt( 2 ) - 0.25 ];
-
+ var circle = [[0, 0], (100 - 1) * Math.sqrt(2) - 0.25];
+
// All [99,x] pts are field0 : "abcdefg"
- assert.eq( totalPts / 2 - totalPts / ( 100 * 100 ), coll.find(Object.extend( { loc : { $within : { $center : circle } } }, queryFields ) ).count() );
- assert.eq( totalPts / 2 - totalPts / ( 100 * 100 ), coll.find(Object.extend( { loc : { $within : { $center : circle } } }, queryFields ) ).itcount() );
-
+ assert.eq(totalPts / 2 - totalPts / (100 * 100),
+ coll.find(Object.extend({loc: {$within: {$center: circle}}}, queryFields)).count());
+ assert.eq(totalPts / 2 - totalPts / (100 * 100),
+ coll.find(Object.extend({loc: {$within: {$center: circle}}}, queryFields)).itcount());
}
testServer.stop();
diff --git a/jstests/noPassthrough/geo_near_random1.js b/jstests/noPassthrough/geo_near_random1.js
index 77f7fdb28bf..1e70ae39c8a 100644
--- a/jstests/noPassthrough/geo_near_random1.js
+++ b/jstests/noPassthrough/geo_near_random1.js
@@ -1,21 +1,18 @@
// this tests all points using $near
load("jstests/libs/geo_near_random.js");
-load( "jstests/libs/slow_weekly_util.js" );
-
-testServer = new SlowWeeklyMongod( "geo_near_random1" );
-db = testServer.getDB( "test" );
-
+load("jstests/libs/slow_weekly_util.js");
+testServer = new SlowWeeklyMongod("geo_near_random1");
+db = testServer.getDB("test");
var test = new GeoNearRandomTest("weekly.geo_near_random1");
test.insertPts(1000);
-test.testPt([0,0]);
+test.testPt([0, 0]);
test.testPt(test.mkPt());
test.testPt(test.mkPt());
test.testPt(test.mkPt());
test.testPt(test.mkPt());
-
testServer.stop();
diff --git a/jstests/noPassthrough/geo_near_random2.js b/jstests/noPassthrough/geo_near_random2.js
index 030a6b8c4ac..1c04cf2d223 100644
--- a/jstests/noPassthrough/geo_near_random2.js
+++ b/jstests/noPassthrough/geo_near_random2.js
@@ -1,23 +1,26 @@
// this tests 1% of all points using $near and $nearSphere
load("jstests/libs/geo_near_random.js");
-load( "jstests/libs/slow_weekly_util.js" );
+load("jstests/libs/slow_weekly_util.js");
-testServer = new SlowWeeklyMongod( "geo_near_random2" );
-db = testServer.getDB( "test" );
+testServer = new SlowWeeklyMongod("geo_near_random2");
+db = testServer.getDB("test");
var test = new GeoNearRandomTest("weekly.geo_near_random2");
test.insertPts(50000);
-opts = {sphere:0, nToTest:test.nPts*0.01};
-test.testPt([0,0], opts);
+opts = {
+ sphere: 0,
+ nToTest: test.nPts * 0.01
+};
+test.testPt([0, 0], opts);
test.testPt(test.mkPt(), opts);
test.testPt(test.mkPt(), opts);
test.testPt(test.mkPt(), opts);
test.testPt(test.mkPt(), opts);
opts.sphere = 1;
-test.testPt([0,0], opts);
+test.testPt([0, 0], opts);
test.testPt(test.mkPt(0.8), opts);
test.testPt(test.mkPt(0.8), opts);
test.testPt(test.mkPt(0.8), opts);
diff --git a/jstests/noPassthrough/index_partial_no_explain_cmds.js b/jstests/noPassthrough/index_partial_no_explain_cmds.js
index 10cdaf932d1..69af783c1a9 100644
--- a/jstests/noPassthrough/index_partial_no_explain_cmds.js
+++ b/jstests/noPassthrough/index_partial_no_explain_cmds.js
@@ -10,18 +10,24 @@
assert.commandWorked(coll.ensureIndex({x: 1}, {partialFilterExpression: {a: 1}}));
- assert.writeOK(coll.insert({_id: 1, x: 5, a: 2})); // Not in index.
- assert.writeOK(coll.insert({_id: 2, x: 6, a: 1})); // In index.
+ assert.writeOK(coll.insert({_id: 1, x: 5, a: 2})); // Not in index.
+ assert.writeOK(coll.insert({_id: 2, x: 6, a: 1})); // In index.
// Verify we will throw if the partial index can't be used.
- assert.throws(function() { coll.find({x: {$gt: 1}, a: 2}).itcount(); });
+ assert.throws(function() {
+ coll.find({x: {$gt: 1}, a: 2}).itcount();
+ });
//
// Test mapReduce.
//
- var mapFunc = function() { emit(this._id, 1); };
- var reduceFunc = function (keyId, countArray) { return Array.sum(countArray); };
+ var mapFunc = function() {
+ emit(this._id, 1);
+ };
+ var reduceFunc = function(keyId, countArray) {
+ return Array.sum(countArray);
+ };
ret = coll.mapReduce(mapFunc, reduceFunc, {out: "inline", query: {x: {$gt: 1}, a: 1}});
assert.eq(1, ret.counts.input);
@@ -34,8 +40,12 @@
assert.eq(1, ret.length);
ret = coll.distinct("x", {x: {$gt: 1}, a: 1});
assert.eq(1, ret.length);
- assert.throws(function() { printjson(coll.distinct("a", {a: 0})); });
- assert.throws(function() { printjson(coll.distinct("x", {a: 0})); });
+ assert.throws(function() {
+ printjson(coll.distinct("a", {a: 0}));
+ });
+ assert.throws(function() {
+ printjson(coll.distinct("x", {a: 0}));
+ });
// SERVER-19511 regression test: distinct with no query predicate should return the correct
// number of results. This query should not be allowed to use the partial index, so it should
diff --git a/jstests/noPassthrough/indexbg1.js b/jstests/noPassthrough/indexbg1.js
index db38d59504e..c876851bdc1 100644
--- a/jstests/noPassthrough/indexbg1.js
+++ b/jstests/noPassthrough/indexbg1.js
@@ -1,13 +1,13 @@
// Test background index creation
-load( "jstests/libs/slow_weekly_util.js" );
+load("jstests/libs/slow_weekly_util.js");
-var testServer = new SlowWeeklyMongod( "indexbg1" );
-var db = testServer.getDB( "test" );
+var testServer = new SlowWeeklyMongod("indexbg1");
+var db = testServer.getDB("test");
var baseName = "jstests_indexbg1";
var parallel = function() {
- return db[ baseName + "_parallelStatus" ];
+ return db[baseName + "_parallelStatus"];
};
var resetParallel = function() {
@@ -17,7 +17,10 @@ var resetParallel = function() {
var doParallel = function(work) {
resetParallel();
print("doParallel: " + work);
- startMongoProgramNoConnect("mongo", "--eval", work + "; db." + baseName + "_parallelStatus.save( {done:1} );", db.getMongo().host);
+ startMongoProgramNoConnect("mongo",
+ "--eval",
+ work + "; db." + baseName + "_parallelStatus.save( {done:1} );",
+ db.getMongo().host);
};
var doneParallel = function() {
@@ -25,91 +28,93 @@ var doneParallel = function() {
};
var waitParallel = function() {
- assert.soon( function() { return doneParallel(); }, "parallel did not finish in time", 300000, 1000 );
+ assert.soon(function() {
+ return doneParallel();
+ }, "parallel did not finish in time", 300000, 1000);
};
var size = 400 * 1000;
-while( 1 ) { // if indexing finishes before we can run checks, try indexing w/ more data
- print( "size: " + size );
+while (1) { // if indexing finishes before we can run checks, try indexing w/ more data
+ print("size: " + size);
var fullName = "db." + baseName;
- var t = db[ baseName ];
+ var t = db[baseName];
t.drop();
var bulk = db.jstests_indexbg1.initializeUnorderedBulkOp();
- for( var i = 0; i < size; ++i ) {
- bulk.insert({ i: i });
+ for (var i = 0; i < size; ++i) {
+ bulk.insert({i: i});
}
assert.writeOK(bulk.execute());
- assert.eq( size, t.count() );
-
- doParallel( fullName + ".ensureIndex( {i:1}, {background:true} )" );
+ assert.eq(size, t.count());
+
+ doParallel(fullName + ".ensureIndex( {i:1}, {background:true} )");
try {
// wait for indexing to start
print("wait for indexing to start");
- assert.soon(
- function() { return 2 === t.getIndexes().length; }, "no index created", 30000, 50);
+ assert.soon(function() {
+ return 2 === t.getIndexes().length;
+ }, "no index created", 30000, 50);
print("started.");
- sleep(1000); // there is a race between when the index build shows up in curop and
- // when it first attempts to grab a write lock.
- assert.eq( size, t.count() );
- assert.eq( 100, t.findOne( {i:100} ).i );
+ sleep(1000); // there is a race between when the index build shows up in curop and
+ // when it first attempts to grab a write lock.
+ assert.eq(size, t.count());
+ assert.eq(100, t.findOne({i: 100}).i);
var q = t.find();
- for( i = 0; i < 120; ++i ) { // getmore
+ for (i = 0; i < 120; ++i) { // getmore
q.next();
- assert( q.hasNext(), "no next" );
+ assert(q.hasNext(), "no next");
}
- var ex = t.find( {i:100} ).limit(-1).explain("executionStats");
+ var ex = t.find({i: 100}).limit(-1).explain("executionStats");
printjson(ex);
- assert( ex.executionStats.totalKeysExamined < 1000 ,
- "took too long to find 100: " + tojson( ex ) );
-
+ assert(ex.executionStats.totalKeysExamined < 1000,
+ "took too long to find 100: " + tojson(ex));
- assert.writeOK(t.remove({ i: 40 }, true )); // table scan
- assert.writeOK(t.update({ i: 10 }, { i :-10 })); // should scan 10
+ assert.writeOK(t.remove({i: 40}, true)); // table scan
+ assert.writeOK(t.update({i: 10}, {i: -10})); // should scan 10
- var id = t.find().hint( {$natural:-1} ).next()._id;
+ var id = t.find().hint({$natural: -1}).next()._id;
- assert.writeOK(t.update({ _id: id }, { i: -2 } ));
- assert.writeOK(t.save({ i: -50 }));
- assert.writeOK(t.save({ i: size + 2 }));
+ assert.writeOK(t.update({_id: id}, {i: -2}));
+ assert.writeOK(t.save({i: -50}));
+ assert.writeOK(t.save({i: size + 2}));
- assert.eq( size + 1, t.count() );
+ assert.eq(size + 1, t.count());
- print( "finished with checks" );
- } catch( e ) {
+ print("finished with checks");
+ } catch (e) {
// only a failure if we're still indexing
// wait for parallel status to update to reflect indexing status
- print("caught exception: " + e );
- sleep( 1000 );
- if ( !doneParallel() ) {
+ print("caught exception: " + e);
+ sleep(1000);
+ if (!doneParallel()) {
throw e;
}
print("but that's OK");
}
- print( "going to check if index is done" );
- if ( !doneParallel() ) {
+ print("going to check if index is done");
+ if (!doneParallel()) {
break;
}
- print( "indexing finished too soon, retrying..." );
+ print("indexing finished too soon, retrying...");
size *= 2;
- assert( size < 200000000, "unable to run checks in parallel with index creation" );
+ assert(size < 200000000, "unable to run checks in parallel with index creation");
}
print("our tests done, waiting for parallel to finish");
waitParallel();
print("finished");
-assert.eq( 1, t.count( {i:-10} ) );
-assert.eq( 1, t.count( {i:-2} ) );
-assert.eq( 1, t.count( {i:-50} ) );
-assert.eq( 1, t.count( {i:size+2} ) );
-assert.eq( 0, t.count( {i:40} ) );
+assert.eq(1, t.count({i: -10}));
+assert.eq(1, t.count({i: -2}));
+assert.eq(1, t.count({i: -50}));
+assert.eq(1, t.count({i: size + 2}));
+assert.eq(0, t.count({i: 40}));
print("about to drop index");
-t.dropIndex( {i:1} );
+t.dropIndex({i: 1});
var gle = db.getLastError();
-printjson( gle );
-assert( !gle );
+printjson(gle);
+assert(!gle);
testServer.stop();
diff --git a/jstests/noPassthrough/indexbg2.js b/jstests/noPassthrough/indexbg2.js
index dc4959afd4a..5fcd975a98f 100644
--- a/jstests/noPassthrough/indexbg2.js
+++ b/jstests/noPassthrough/indexbg2.js
@@ -1,22 +1,25 @@
// Test background index creation w/ constraints
-load( "jstests/libs/slow_weekly_util.js" );
+load("jstests/libs/slow_weekly_util.js");
-var testServer = new SlowWeeklyMongod( "indexbg2" );
-var db = testServer.getDB( "test" );
+var testServer = new SlowWeeklyMongod("indexbg2");
+var db = testServer.getDB("test");
var baseName = "jstests_index12";
var parallel = function() {
- return db[ baseName + "_parallelStatus" ];
+ return db[baseName + "_parallelStatus"];
};
var resetParallel = function() {
parallel().drop();
};
-var doParallel = function( work ) {
+var doParallel = function(work) {
resetParallel();
- startMongoProgramNoConnect( "mongo", "--eval", work + "; db." + baseName + "_parallelStatus.save( {done:1} );", db.getMongo().host );
+ startMongoProgramNoConnect("mongo",
+ "--eval",
+ work + "; db." + baseName + "_parallelStatus.save( {done:1} );",
+ db.getMongo().host);
};
var doneParallel = function() {
@@ -24,30 +27,33 @@ var doneParallel = function() {
};
var waitParallel = function() {
- assert.soon( function() { return doneParallel(); }, "parallel did not finish in time", 300000, 1000 );
+ assert.soon(function() {
+ return doneParallel();
+ }, "parallel did not finish in time", 300000, 1000);
};
var doTest = function() {
"use strict";
var size = 10000;
- while (1) { // if indexing finishes before we can run checks, try indexing w/ more data
+ while (1) { // if indexing finishes before we can run checks, try indexing w/ more data
print("size: " + size);
var fullName = "db." + baseName;
var t = db[baseName];
t.drop();
for (var i = 0; i < size; ++i) {
- db.jstests_index12.save({ i: i });
+ db.jstests_index12.save({i: i});
}
assert.eq(size, t.count());
doParallel(fullName + ".ensureIndex( {i:1}, {background:true, unique:true} )");
try {
// wait for indexing to start
- assert.soon(
- function() { return 2 === t.getIndexes().length; }, "no index created", 30000, 50);
- assert.writeError(t.save({ i: 0, n: true })); // duplicate key violation
- assert.writeOK(t.save({ i: size - 1, n: true }));
+ assert.soon(function() {
+ return 2 === t.getIndexes().length;
+ }, "no index created", 30000, 50);
+ assert.writeError(t.save({i: 0, n: true})); // duplicate key violation
+ assert.writeOK(t.save({i: size - 1, n: true}));
} catch (e) {
// only a failure if we're still indexing
// wait for parallel status to update to reflect indexing status
@@ -67,7 +73,7 @@ var doTest = function() {
waitParallel();
/* it could be that there is more than size now but the index failed
- to build - which is valid. we check index isn't there.
+ to build - which is valid. we check index isn't there.
*/
if (t.count() != size) {
assert.eq(1, t.getIndexes().length, "change in # of elems yet index is there");
diff --git a/jstests/noPassthrough/initial_sync_cloner_dups.js b/jstests/noPassthrough/initial_sync_cloner_dups.js
index f43c2cf43c4..162e5292de1 100644
--- a/jstests/noPassthrough/initial_sync_cloner_dups.js
+++ b/jstests/noPassthrough/initial_sync_cloner_dups.js
@@ -1,128 +1,130 @@
/**
* Test for SERVER-17487
- * 3 node replset
+ * 3 node replset
* insert docs with numeric _ids
* start deleting/re-inserting docs from collection in a loop
* add new secondary to force initialSync
* verify collection and both indexes on the secondary have the right number of docs
*/
(function() {
-'use strict';
-load('jstests/libs/parallelTester.js');
+ 'use strict';
+ load('jstests/libs/parallelTester.js');
-Random.setRandomSeed();
+ Random.setRandomSeed();
-var awaitTimeout = 2*60*1000;
-// used to parse RAM log file
-var contains = function(logLines, func) {
- var i = logLines.length;
- while (i--) {
- printjson(logLines[i]);
- if (func(logLines[i])) {
- return true;
+ var awaitTimeout = 2 * 60 * 1000;
+ // used to parse RAM log file
+ var contains = function(logLines, func) {
+ var i = logLines.length;
+ while (i--) {
+ printjson(logLines[i]);
+ if (func(logLines[i])) {
+ return true;
+ }
}
- }
- return false;
-};
+ return false;
+ };
-var replTest = new ReplSetTest({name: 'cloner', nodes: 3, oplogSize: 150 /*~1.5x data size*/});
-replTest.startSet();
-var conf = replTest.getReplSetConfig();
-conf.settings = {};
-conf.settings.chainingAllowed = false;
-replTest.initiate(conf);
-replTest.awaitSecondaryNodes(awaitTimeout);
-var primary = replTest.getPrimary();
-var coll = primary.getDB('test').cloner;
-coll.drop();
-coll.createIndex({k: 1});
+ var replTest = new ReplSetTest({name: 'cloner', nodes: 3, oplogSize: 150 /*~1.5x data size*/});
+ replTest.startSet();
+ var conf = replTest.getReplSetConfig();
+ conf.settings = {};
+ conf.settings.chainingAllowed = false;
+ replTest.initiate(conf);
+ replTest.awaitSecondaryNodes(awaitTimeout);
+ var primary = replTest.getPrimary();
+ var coll = primary.getDB('test').cloner;
+ coll.drop();
+ coll.createIndex({k: 1});
-// These need to be big enough to force initial-sync to use many batches
-var numDocs = 100*1000;
-var bigStr = Array(1001).toString();
-var batch = coll.initializeUnorderedBulkOp();
-for (var i=0; i < numDocs; i++) {
- batch.insert({_id: i, bigStr: bigStr});
-}
-batch.execute();
+ // These need to be big enough to force initial-sync to use many batches
+ var numDocs = 100 * 1000;
+ var bigStr = Array(1001).toString();
+ var batch = coll.initializeUnorderedBulkOp();
+ for (var i = 0; i < numDocs; i++) {
+ batch.insert({_id: i, bigStr: bigStr});
+ }
+ batch.execute();
-replTest.awaitReplication(awaitTimeout);
+ replTest.awaitReplication(awaitTimeout);
-jsTestLog("Start remove/insert on primary");
-var insertAndRemove = function(host) {
- jsTestLog("starting bg writes on " + host);
- var m = new Mongo(host);
- var db = m.getDB('test');
- var coll = db.cloner;
- var numDocs = coll.count();
- for (var i=0; !db.stop.findOne(); i++) {
+ jsTestLog("Start remove/insert on primary");
+ var insertAndRemove = function(host) {
+ jsTestLog("starting bg writes on " + host);
+ var m = new Mongo(host);
+ var db = m.getDB('test');
+ var coll = db.cloner;
+ var numDocs = coll.count();
+ for (var i = 0; !db.stop.findOne(); i++) {
var id = Random.randInt(numDocs);
coll.remove({_id: id});
coll.insert({_id: id});
var id = i % numDocs;
- //print(id);
+ // print(id);
coll.remove({_id: id});
coll.insert({_id: id});
// Try to throttle this thread to prevent overloading slow machines.
sleep(1);
- }
+ }
- jsTestLog("finished bg writes on " + host);
-};
-var worker = new ScopedThread(insertAndRemove, primary.host);
-worker.start();
+ jsTestLog("finished bg writes on " + host);
+ };
+ var worker = new ScopedThread(insertAndRemove, primary.host);
+ worker.start();
-jsTestLog("add a new secondary");
-var secondary = replTest.add({});
-replTest.reInitiate(awaitTimeout * 2);
-secondary.setSlaveOk();
-// Wait for the secondary to get ReplSetInitiate command.
-replTest.waitForState(secondary,
- [ReplSetTest.State.STARTUP_2,
- ReplSetTest.State.RECOVERING,
- ReplSetTest.State.SECONDARY],
- 60 * 1000);
+ jsTestLog("add a new secondary");
+ var secondary = replTest.add({});
+ replTest.reInitiate(awaitTimeout * 2);
+ secondary.setSlaveOk();
+ // Wait for the secondary to get ReplSetInitiate command.
+ replTest.waitForState(
+ secondary,
+ [ReplSetTest.State.STARTUP_2, ReplSetTest.State.RECOVERING, ReplSetTest.State.SECONDARY],
+ 60 * 1000);
-// This fail point will cause the first intial sync to fail, and leave an op in the buffer to
-// verify the fix from SERVER-17807
-print("=================== failpoint enabled ==============");
-printjson(assert.commandWorked(secondary.getDB("admin").adminCommand(
- { configureFailPoint: 'failInitSyncWithBufferedEntriesLeft',
- mode: {times: 1}} )));
-printjson(assert.commandWorked(secondary.getDB("admin").adminCommand( { resync:true } )));
+ // This fail point will cause the first intial sync to fail, and leave an op in the buffer to
+ // verify the fix from SERVER-17807
+ print("=================== failpoint enabled ==============");
+ printjson(assert.commandWorked(secondary.getDB("admin").adminCommand(
+ {configureFailPoint: 'failInitSyncWithBufferedEntriesLeft', mode: {times: 1}})));
+ printjson(assert.commandWorked(secondary.getDB("admin").adminCommand({resync: true})));
-// NOTE: This is here to prevent false negatives, but it is racy and dependent on magic numbers.
-// Removed the assertion because it was too flaky. Printing a warning instead (dan)
-jsTestLog("making sure we dropped some dups");
-var res = secondary.adminCommand({getLog:"global"});
-var droppedDups = (contains(res.log, function(v) {
- return v.indexOf("index build dropped"/* NNN dups*/) != -1;
-}));
-if (!droppedDups) {
- jsTestLog("Warning: Test did not trigger duplicate documents, this run will be a false negative");
-}
+ // NOTE: This is here to prevent false negatives, but it is racy and dependent on magic numbers.
+ // Removed the assertion because it was too flaky. Printing a warning instead (dan)
+ jsTestLog("making sure we dropped some dups");
+ var res = secondary.adminCommand({getLog: "global"});
+ var droppedDups = (contains(res.log,
+ function(v) {
+ return v.indexOf("index build dropped" /* NNN dups*/) != -1;
+ }));
+ if (!droppedDups) {
+ jsTestLog(
+ "Warning: Test did not trigger duplicate documents, this run will be a false negative");
+ }
-jsTestLog("stopping writes and waiting for replica set to coalesce");
-primary.getDB('test').stop.insert({});
-worker.join();
-//make sure all secondaries are caught up, after init sync
-reconnect(secondary.getDB("test"));
-replTest.awaitSecondaryNodes(awaitTimeout);
-replTest.awaitReplication(awaitTimeout);
+ jsTestLog("stopping writes and waiting for replica set to coalesce");
+ primary.getDB('test').stop.insert({});
+ worker.join();
+ // make sure all secondaries are caught up, after init sync
+ reconnect(secondary.getDB("test"));
+ replTest.awaitSecondaryNodes(awaitTimeout);
+ replTest.awaitReplication(awaitTimeout);
-jsTestLog("check that secondary has correct counts");
-var secondaryColl = secondary.getDB('test').getCollection('cloner');
-var index = secondaryColl.find({},{_id:1}).hint({_id:1}).itcount();
-var secondary_index = secondaryColl.find({},{_id:1}).hint({k:1}).itcount();
-var table = secondaryColl.find({},{_id:1}).hint({$natural:1}).itcount();
-if (index != table || index != secondary_index) {
- printjson({name: coll,
- _id_index_count:index,
- secondary_index_count: secondary_index,
- table_count: table});
-}
-assert.eq(index, table) ;
-assert.eq(table, secondary_index);
+ jsTestLog("check that secondary has correct counts");
+ var secondaryColl = secondary.getDB('test').getCollection('cloner');
+ var index = secondaryColl.find({}, {_id: 1}).hint({_id: 1}).itcount();
+ var secondary_index = secondaryColl.find({}, {_id: 1}).hint({k: 1}).itcount();
+ var table = secondaryColl.find({}, {_id: 1}).hint({$natural: 1}).itcount();
+ if (index != table || index != secondary_index) {
+ printjson({
+ name: coll,
+ _id_index_count: index,
+ secondary_index_count: secondary_index,
+ table_count: table
+ });
+ }
+ assert.eq(index, table);
+ assert.eq(table, secondary_index);
})();
diff --git a/jstests/noPassthrough/javascript_options.js b/jstests/noPassthrough/javascript_options.js
index 850e505195e..e0f1690bd5d 100644
--- a/jstests/noPassthrough/javascript_options.js
+++ b/jstests/noPassthrough/javascript_options.js
@@ -2,46 +2,30 @@ var baseName = "jstests_nopassthrough_javascript_options";
load('jstests/libs/command_line/test_parsed_options.js');
-
-
jsTest.log("Testing \"noscripting\" command line option");
var expectedResult = {
- "parsed" : {
- "security" : {
- "javascriptEnabled" : false
- }
- }
+ "parsed": {"security": {"javascriptEnabled": false}}
};
-testGetCmdLineOptsMongod({ noscripting : "" }, expectedResult);
-
-
+testGetCmdLineOptsMongod({noscripting: ""}, expectedResult);
jsTest.log("Testing explicitly disabled \"noscripting\" config file option");
expectedResult = {
- "parsed" : {
- "config" : "jstests/libs/config_files/disable_noscripting.ini",
- "security" : {
- "javascriptEnabled" : true
- }
+ "parsed": {
+ "config": "jstests/libs/config_files/disable_noscripting.ini",
+ "security": {"javascriptEnabled": true}
}
};
-testGetCmdLineOptsMongod({ config : "jstests/libs/config_files/disable_noscripting.ini" },
+testGetCmdLineOptsMongod({config: "jstests/libs/config_files/disable_noscripting.ini"},
expectedResult);
-
-
jsTest.log("Testing \"scriptingEnabled\" config file option");
expectedResult = {
- "parsed" : {
- "config" : "jstests/libs/config_files/enable_scripting.json",
- "security" : {
- "javascriptEnabled" : true
- }
+ "parsed": {
+ "config": "jstests/libs/config_files/enable_scripting.json",
+ "security": {"javascriptEnabled": true}
}
};
-testGetCmdLineOptsMongod({ config : "jstests/libs/config_files/enable_scripting.json" },
+testGetCmdLineOptsMongod({config: "jstests/libs/config_files/enable_scripting.json"},
expectedResult);
-
-
print(baseName + " succeeded.");
diff --git a/jstests/noPassthrough/js_protection.js b/jstests/noPassthrough/js_protection.js
index dde571499a8..243bd67342e 100644
--- a/jstests/noPassthrough/js_protection.js
+++ b/jstests/noPassthrough/js_protection.js
@@ -11,57 +11,70 @@
*/
(function() {
-"use strict";
+ "use strict";
-var testServer = MongoRunner.runMongod({setParameter: 'javascriptProtection=true'});
-var db = testServer.getDB("test");
-var t = db.foo;
-var funcToStore = function(x) { return x + 1; };
+ var testServer = MongoRunner.runMongod({setParameter: 'javascriptProtection=true'});
+ var db = testServer.getDB("test");
+ var t = db.foo;
+ var funcToStore = function(x) {
+ return x + 1;
+ };
-function assertMongoClientCorrect() {
- var mongo = runMongoProgram("mongo",
- "--port", testServer.port,
- "--enableJavaScriptProtection",
- "--eval",
- // stored functions in objects
- "var x = db.foo.findOne({'_id' : 0});" +
- "assert.neq(typeof x.foo, 'function');" +
- // retain db.loadServerScripts() functionality
- "db.loadServerScripts();" +
- "assert.eq(stored_func(4), 5);" +
+ function assertMongoClientCorrect() {
+ var mongo = runMongoProgram("mongo",
+ "--port",
+ testServer.port,
+ "--enableJavaScriptProtection",
+ "--eval",
+ // stored functions in objects
+ "var x = db.foo.findOne({'_id' : 0});" +
+ "assert.neq(typeof x.foo, 'function');" +
+ // retain db.loadServerScripts() functionality
+ "db.loadServerScripts();" +
+ "assert.eq(stored_func(4), 5);" +
- "print(\"completed gracefully\");"
- );
+ "print(\"completed gracefully\");");
- var mongoOutput = rawMongoProgramOutput();
- assert(!mongoOutput.match(/assert failed/));
- assert(mongoOutput.match(/completed gracefully/));
-}
+ var mongoOutput = rawMongoProgramOutput();
+ assert(!mongoOutput.match(/assert failed/));
+ assert(mongoOutput.match(/completed gracefully/));
+ }
-function assertNoStoredWhere() {
- t.insertOne({name: 'testdoc', val : 0, y : 0});
- t.update( { $where : "stored_func(this.val) == 1" },
- { $set : { y : 100 } } , false , true );
+ function assertNoStoredWhere() {
+ t.insertOne({name: 'testdoc', val: 0, y: 0});
+ t.update({$where: "stored_func(this.val) == 1"}, {$set: {y: 100}}, false, true);
- var x = t.findOne({name: 'testdoc'});
- assert.eq(x.y, 0);
+ var x = t.findOne({name: 'testdoc'});
+ assert.eq(x.y, 0);
- t.update( { $where : function() { return this.val == 0;} } ,
- { $set : { y : 100 } } , false , true );
+ t.update(
+ {
+ $where: function() {
+ return this.val == 0;
+ }
+ },
+ {$set: {y: 100}},
+ false,
+ true);
- x = t.findOne({name: 'testdoc'});
- assert.eq(x.y, 100);
-}
+ x = t.findOne({name: 'testdoc'});
+ assert.eq(x.y, 100);
+ }
-/**
- * ACTUAL TEST
- */
+ /**
+ * ACTUAL TEST
+ */
-db.system.js.save( { _id : "stored_func" , value : funcToStore } );
-t.insertOne({'_id': 0, 'myFunc': function() { return 'tesval'; } });
+ db.system.js.save({_id: "stored_func", value: funcToStore});
+ t.insertOne({
+ '_id': 0,
+ 'myFunc': function() {
+ return 'tesval';
+ }
+ });
-assertMongoClientCorrect();
-assertNoStoredWhere();
+ assertMongoClientCorrect();
+ assertNoStoredWhere();
-MongoRunner.stopMongod(testServer);
+ MongoRunner.stopMongod(testServer);
})();
diff --git a/jstests/noPassthrough/js_protection_roundtrip.js b/jstests/noPassthrough/js_protection_roundtrip.js
index 6cd82f81e94..b85e8cfac43 100644
--- a/jstests/noPassthrough/js_protection_roundtrip.js
+++ b/jstests/noPassthrough/js_protection_roundtrip.js
@@ -8,41 +8,44 @@
* 'CodeWScope'.
*/
(function() {
-"use strict";
-
-var testServer = MongoRunner.runMongod({setParameter: 'javascriptProtection=true'}),
- db = testServer.getDB("test"),
- t = db.foo,
- x;
-
-function makeRoundTrip() {
- var mongo = runMongoProgram("mongo",
- "--port", testServer.port,
- "--enableJavaScriptProtection",
- "--eval",
- "var x = db.foo.findOne({'_id' : 0});" +
- "db.foo.insertOne({'_id': 1, myFunc: x.myFunc});" +
- "print(\"completed gracefully\");"
- );
-
- var mongoOutput = rawMongoProgramOutput();
- assert(!mongoOutput.match(/assert failed/));
- assert(mongoOutput.match(/completed gracefully/));
-}
-
-/**
- * ACTUAL TEST
- */
-
-t.insertOne({'_id': 0, 'myFunc': function() { return 'yes'; } });
-
-makeRoundTrip();
-
-x = t.findOne({'_id': 1});
-
-if (!x.myFunc() == 'yes') {
- assert(0);
-}
-
-MongoRunner.stopMongod(testServer);
+ "use strict";
+
+ var testServer = MongoRunner.runMongod({setParameter: 'javascriptProtection=true'}),
+ db = testServer.getDB("test"), t = db.foo, x;
+
+ function makeRoundTrip() {
+ var mongo = runMongoProgram("mongo",
+ "--port",
+ testServer.port,
+ "--enableJavaScriptProtection",
+ "--eval",
+ "var x = db.foo.findOne({'_id' : 0});" +
+ "db.foo.insertOne({'_id': 1, myFunc: x.myFunc});" +
+ "print(\"completed gracefully\");");
+
+ var mongoOutput = rawMongoProgramOutput();
+ assert(!mongoOutput.match(/assert failed/));
+ assert(mongoOutput.match(/completed gracefully/));
+ }
+
+ /**
+ * ACTUAL TEST
+ */
+
+ t.insertOne({
+ '_id': 0,
+ 'myFunc': function() {
+ return 'yes';
+ }
+ });
+
+ makeRoundTrip();
+
+ x = t.findOne({'_id': 1});
+
+ if (!x.myFunc() == 'yes') {
+ assert(0);
+ }
+
+ MongoRunner.stopMongod(testServer);
})();
diff --git a/jstests/noPassthrough/lock_file.js b/jstests/noPassthrough/lock_file.js
index cb7e96c2563..152a9a35f32 100644
--- a/jstests/noPassthrough/lock_file.js
+++ b/jstests/noPassthrough/lock_file.js
@@ -19,12 +19,13 @@
// Test framework will append --storageEngine command line option if provided to smoke.py.
var mongod = MongoRunner.runMongod({dbpath: dbpath, smallfiles: ""});
- assert.neq(0, getMongodLockFileSize(dbpath),
+ assert.neq(0,
+ getMongodLockFileSize(dbpath),
'mongod.lock should not be empty while server is running');
MongoRunner.stopMongod(mongod);
// mongod.lock must be empty after shutting server down.
- assert.eq(0, getMongodLockFileSize(dbpath),
- 'mongod.lock not truncated after shutting server down');
+ assert.eq(
+ 0, getMongodLockFileSize(dbpath), 'mongod.lock not truncated after shutting server down');
}());
diff --git a/jstests/noPassthrough/lock_file_fail_to_open.js b/jstests/noPassthrough/lock_file_fail_to_open.js
index 4fc0c6fd2c2..517fd5ec774 100644
--- a/jstests/noPassthrough/lock_file_fail_to_open.js
+++ b/jstests/noPassthrough/lock_file_fail_to_open.js
@@ -8,17 +8,18 @@
var dbPath = MongoRunner.dataPath + baseName + "/";
// Start a MongoD just to get a lockfile in place.
- var mongo1 = MongoRunner.runMongod({dbpath: dbPath, waitForConnect: true});
+ var mongo1 = MongoRunner.runMongod({dbpath: dbPath, waitForConnect: true});
try {
clearRawMongoProgramOutput();
- // Start another one which should fail to start as there is already a lockfile in its dbpath.
+ // Start another one which should fail to start as there is already a lockfile in its
+ // dbpath.
var mongo2 = null;
try {
// Can't use assert.throws as behavior is different on Windows/Linux.
- mongo2 = MongoRunner.runMongod({dbpath: dbPath,
- noCleanData: true});
- } catch (ex) {}
+ mongo2 = MongoRunner.runMongod({dbpath: dbPath, noCleanData: true});
+ } catch (ex) {
+ }
// We should have failed to start.
assert(mongo2 === null);
assert.soon(() => {
diff --git a/jstests/noPassthrough/lock_stats.js b/jstests/noPassthrough/lock_stats.js
index d32cd9f3af1..078a22ead2d 100644
--- a/jstests/noPassthrough/lock_stats.js
+++ b/jstests/noPassthrough/lock_stats.js
@@ -16,8 +16,10 @@
// Wait until we see somebody waiting to acquire the lock, defend against unset stats.
assert.soon((function() {
var stats = db.serverStatus().locks.Global;
- if (!stats.acquireWaitCount || !stats.acquireWaitCount.W) return false;
- if (!startStats.acquireWaitCount || !startStats.acquireWaitCount.W) return true;
+ if (!stats.acquireWaitCount || !stats.acquireWaitCount.W)
+ return false;
+ if (!startStats.acquireWaitCount || !startStats.acquireWaitCount.W)
+ return true;
return stats.acquireWaitCount.W > startStats.acquireWaitCount.W;
}));
@@ -34,23 +36,29 @@
// The server was just started, so initial stats may be missing.
if (!startStats.acquireWaitCount || !startStats.acquireWaitCount.W) {
- startStats.acquireWaitCount = { W : 0 };
+ startStats.acquireWaitCount = {
+ W: 0
+ };
}
if (!startStats.timeAcquiringMicros || !startStats.timeAcquiringMicros.W) {
- startStats.timeAcquiringMicros = { W : 0 };
+ startStats.timeAcquiringMicros = {
+ W: 0
+ };
}
var acquireWaitCount = endStats.acquireWaitCount.W - startStats.acquireWaitCount.W;
- var blockedMillis = Math.floor((endStats.timeAcquiringMicros.W -
- startStats.timeAcquiringMicros.W) / 1000);
+ var blockedMillis =
+ Math.floor((endStats.timeAcquiringMicros.W - startStats.timeAcquiringMicros.W) / 1000);
// Require that no other commands run (and maybe acquire locks) in parallel.
assert.eq(acquireWaitCount, 1, "other commands ran in parallel, can't check timing");
assert.gte(blockedMillis, minBlockedMillis, "reported time acquiring lock is too low");
assert.lte(blockedMillis, maxBlockedMillis, "reported time acquiring lock is too high");
- return({ blockedMillis: blockedMillis,
- minBlockedMillis: minBlockedMillis,
- maxBlockedMillis: maxBlockedMillis});
+ return ({
+ blockedMillis: blockedMillis,
+ minBlockedMillis: minBlockedMillis,
+ maxBlockedMillis: maxBlockedMillis
+ });
}
var conn = MongoRunner.runMongod();
diff --git a/jstests/noPassthrough/logging_options.js b/jstests/noPassthrough/logging_options.js
index 75777e93990..794680fa937 100644
--- a/jstests/noPassthrough/logging_options.js
+++ b/jstests/noPassthrough/logging_options.js
@@ -5,94 +5,60 @@ load('jstests/libs/command_line/test_parsed_options.js');
// Verbosity testing
jsTest.log("Testing \"verbose\" command line option with no args");
var expectedResult = {
- "parsed" : {
- "systemLog" : {
- "verbosity" : 1
- }
- }
+ "parsed": {"systemLog": {"verbosity": 1}}
};
-testGetCmdLineOptsMongod({ verbose : "" }, expectedResult);
+testGetCmdLineOptsMongod({verbose: ""}, expectedResult);
jsTest.log("Testing \"verbose\" command line option with one \"v\"");
var expectedResult = {
- "parsed" : {
- "systemLog" : {
- "verbosity" : 1
- }
- }
+ "parsed": {"systemLog": {"verbosity": 1}}
};
-testGetCmdLineOptsMongod({ verbose : "v" }, expectedResult);
+testGetCmdLineOptsMongod({verbose: "v"}, expectedResult);
jsTest.log("Testing \"verbose\" command line option with two \"v\"s");
var expectedResult = {
- "parsed" : {
- "systemLog" : {
- "verbosity" : 2
- }
- }
+ "parsed": {"systemLog": {"verbosity": 2}}
};
-testGetCmdLineOptsMongod({ verbose : "vv" }, expectedResult);
+testGetCmdLineOptsMongod({verbose: "vv"}, expectedResult);
jsTest.log("Testing \"v\" command line option");
var expectedResult = {
- "parsed" : {
- "systemLog" : {
- "verbosity" : 1
- }
- }
+ "parsed": {"systemLog": {"verbosity": 1}}
};
// Currently the test converts "{ v : 1 }" to "-v" when it spawns the binary.
-testGetCmdLineOptsMongod({ v : 1 }, expectedResult);
+testGetCmdLineOptsMongod({v: 1}, expectedResult);
jsTest.log("Testing \"vv\" command line option");
var expectedResult = {
- "parsed" : {
- "systemLog" : {
- "verbosity" : 2
- }
- }
+ "parsed": {"systemLog": {"verbosity": 2}}
};
// Currently the test converts "{ v : 2 }" to "-vv" when it spawns the binary.
-testGetCmdLineOptsMongod({ v : 2 }, expectedResult);
+testGetCmdLineOptsMongod({v: 2}, expectedResult);
jsTest.log("Testing \"systemLog.verbosity\" config file option");
expectedResult = {
- "parsed" : {
- "config" : "jstests/libs/config_files/set_verbosity.json",
- "systemLog" : {
- "verbosity" : 5
- }
- }
+ "parsed":
+ {"config": "jstests/libs/config_files/set_verbosity.json", "systemLog": {"verbosity": 5}}
};
-testGetCmdLineOptsMongod({ config : "jstests/libs/config_files/set_verbosity.json" },
- expectedResult);
+testGetCmdLineOptsMongod({config: "jstests/libs/config_files/set_verbosity.json"}, expectedResult);
// log component verbosity
jsTest.log("Testing \"systemLog.component.verbosity\" config file option");
expectedResult = {
- "parsed" : {
- "config" : "jstests/libs/config_files/set_component_verbosity.json",
- "systemLog" : {
- "verbosity" : 2,
- "component" : {
- "accessControl" : {
- "verbosity" : 0
- },
- "storage" : {
- "verbosity" : 3,
- "journal" : {
- "verbosity" : 5
- }
- }
+ "parsed": {
+ "config": "jstests/libs/config_files/set_component_verbosity.json",
+ "systemLog": {
+ "verbosity": 2,
+ "component": {
+ "accessControl": {"verbosity": 0},
+ "storage": {"verbosity": 3, "journal": {"verbosity": 5}}
}
}
}
};
-testGetCmdLineOptsMongod({ config : "jstests/libs/config_files/set_component_verbosity.json" },
+testGetCmdLineOptsMongod({config: "jstests/libs/config_files/set_component_verbosity.json"},
expectedResult);
-
-
// Log output testing
var baseDir = MongoRunner.dataPath + baseName;
var logDir = MongoRunner.dataPath + baseName + "/logs/";
@@ -103,20 +69,13 @@ assert(mkdir(logDir));
jsTest.log("Testing \"logpath\" command line option");
var expectedResult = {
- "parsed" : {
- "systemLog" : {
- "destination" : "file",
- "path" : logDir + "/mylog.log"
- }
- }
+ "parsed": {"systemLog": {"destination": "file", "path": logDir + "/mylog.log"}}
};
-testGetCmdLineOptsMongod({ logpath : logDir + "/mylog.log" }, expectedResult);
-
-
+testGetCmdLineOptsMongod({logpath: logDir + "/mylog.log"}, expectedResult);
jsTest.log("Testing with no explicit logging setting");
expectedResult = {
- "parsed" : { }
+ "parsed": {}
};
testGetCmdLineOptsMongod({}, expectedResult);
diff --git a/jstests/noPassthrough/minvalid.js b/jstests/noPassthrough/minvalid.js
index 8686152ec56..6f22e65e2ca 100644
--- a/jstests/noPassthrough/minvalid.js
+++ b/jstests/noPassthrough/minvalid.js
@@ -3,7 +3,7 @@
// their stored minvalid
var name = "minvalid";
-var replTest = new ReplSetTest({name: name, nodes: 1, oplogSize:1});
+var replTest = new ReplSetTest({name: name, nodes: 1, oplogSize: 1});
var host = getHostName();
var nodes = replTest.startSet();
@@ -17,21 +17,22 @@ mdb.foo.save({a: 1000});
print("2. get last op");
var local = master.getDB("local");
-var lastOp = local.oplog.rs.find().sort({$natural:-1}).limit(1).next();
+var lastOp = local.oplog.rs.find().sort({$natural: -1}).limit(1).next();
printjson(lastOp);
print("3: change minvalid");
// primaries don't populate minvalid by default
-local.replset.minvalid.insert({ts:new Timestamp(lastOp.ts.t, lastOp.ts.i+1), h:new NumberLong("1234567890")});
+local.replset.minvalid.insert(
+ {ts: new Timestamp(lastOp.ts.t, lastOp.ts.i + 1), h: new NumberLong("1234567890")});
printjson(local.replset.minvalid.findOne());
print("4: restart");
replTest.restart(0);
print("5: make sure it stays in recovering");
-var timeout = (new Date()).getTime()+30000;
+var timeout = (new Date()).getTime() + 30000;
while ((new Date().getTime()) < timeout) {
- var status = replTest.nodes[0].getDB("admin").runCommand({isMaster:1});
+ var status = replTest.nodes[0].getDB("admin").runCommand({isMaster: 1});
assert(!status.secondary && !status.primary, tojson(status));
sleep(2000);
}
diff --git a/jstests/noPassthrough/minvalid2.js b/jstests/noPassthrough/minvalid2.js
index 148e6f4fc4d..a9096805b66 100644
--- a/jstests/noPassthrough/minvalid2.js
+++ b/jstests/noPassthrough/minvalid2.js
@@ -20,15 +20,18 @@
print("1. make 3-member set w/arb (2)");
var name = "minvalid";
-var replTest = new ReplSetTest({name: name, nodes: 3, oplogSize:1});
+var replTest = new ReplSetTest({name: name, nodes: 3, oplogSize: 1});
var host = getHostName();
var nodes = replTest.startSet();
-replTest.initiate({_id : name, members : [
- {_id : 0, host : host+":"+replTest.ports[0]},
- {_id : 1, host : host+":"+replTest.ports[1]},
- {_id : 2, host : host+":"+replTest.ports[2], arbiterOnly : true}
-]});
+replTest.initiate({
+ _id: name,
+ members: [
+ {_id: 0, host: host + ":" + replTest.ports[0]},
+ {_id: 1, host: host + ":" + replTest.ports[1]},
+ {_id: 2, host: host + ":" + replTest.ports[2], arbiterOnly: true}
+ ]
+});
var slaves = replTest.liveNodes.slaves;
var master = replTest.getPrimary();
var masterId = replTest.getNodeId(master);
@@ -43,16 +46,18 @@ print("2: shut down slave");
replTest.stop(slaveId);
print("3: write to master");
-assert.writeOK(mdb.foo.insert({a: 1001}, { writeConcern: { w: 1 } }));
+assert.writeOK(mdb.foo.insert({a: 1001}, {writeConcern: {w: 1}}));
print("4: modify master's minvalid");
var local = master.getDB("local");
-var lastOp = local.oplog.rs.find().sort({$natural:-1}).limit(1).next();
+var lastOp = local.oplog.rs.find().sort({$natural: -1}).limit(1).next();
printjson(lastOp);
// Overwrite minvalid document to simulate an inconsistent state (as might result from a server
// crash.
-local.replset.minvalid.update({},{ ts:new Timestamp(lastOp.ts.t, lastOp.ts.i+1) }, {upsert: true});
+local.replset.minvalid.update({},
+ {ts: new Timestamp(lastOp.ts.t, lastOp.ts.i + 1)},
+ {upsert: true});
printjson(local.replset.minvalid.findOne());
print("5: shut down master");
@@ -64,15 +69,15 @@ replTest.restart(slaveId);
print("7: writes on former slave");
master = replTest.getPrimary();
mdb1 = master.getDB("foo");
-mdb1.foo.save({a:1002});
+mdb1.foo.save({a: 1002});
print("8: start up former master");
clearRawMongoProgramOutput();
replTest.restart(masterId);
print("9: check former master does not roll back");
-assert.soon(function(){
+assert.soon(function() {
return rawMongoProgramOutput().match("need to rollback, but in inconsistent state");
});
-replTest.stopSet(undefined, undefined, { allowedExitCodes: [ MongoRunner.EXIT_ABRUPT ] });
+replTest.stopSet(undefined, undefined, {allowedExitCodes: [MongoRunner.EXIT_ABRUPT]});
diff --git a/jstests/noPassthrough/ns1.js b/jstests/noPassthrough/ns1.js
index ae5c6efd2f5..adc5f22a82d 100644
--- a/jstests/noPassthrough/ns1.js
+++ b/jstests/noPassthrough/ns1.js
@@ -1,51 +1,50 @@
-load( "jstests/libs/slow_weekly_util.js" );
+load("jstests/libs/slow_weekly_util.js");
-testServer = new SlowWeeklyMongod( "ns1" );
-mydb = testServer.getDB( "test_ns1" );
+testServer = new SlowWeeklyMongod("ns1");
+mydb = testServer.getDB("test_ns1");
-check = function( n , isNew ){
+check = function(n, isNew) {
var coll = mydb["x" + n];
- if ( isNew ){
- assert.eq( 0 , coll.count() , "pop a: " + n );
- coll.insert( { _id : n } );
+ if (isNew) {
+ assert.eq(0, coll.count(), "pop a: " + n);
+ coll.insert({_id: n});
}
- assert.eq( 1 , coll.count() , "pop b: " + n );
- assert.eq( n , coll.findOne()._id , "pop c: " + n );
+ assert.eq(1, coll.count(), "pop b: " + n);
+ assert.eq(n, coll.findOne()._id, "pop c: " + n);
return coll;
};
max = 0;
-for ( ; max<1000; max++ ){
- check(max,true);
+for (; max < 1000; max++) {
+ check(max, true);
}
-function checkall( removed ){
- for ( var i=0; i<max; i++ ){
- if ( removed == i ){
- assert.eq( 0 , mydb["x"+i].count() , "should be 0 : " + removed );
- }
- else {
- check( i , false );
+function checkall(removed) {
+ for (var i = 0; i < max; i++) {
+ if (removed == i) {
+ assert.eq(0, mydb["x" + i].count(), "should be 0 : " + removed);
+ } else {
+ check(i, false);
}
}
}
checkall();
-Random.srand( 123124 );
+Random.srand(123124);
its = max / 2;
-print( "its: " + its );
-for ( i=0; i<its; i++ ){
- x = Random.randInt( max );
- check( x , false ).drop();
- checkall( x );
- check( x , true );
- if ( ( i + 1 ) % 20 == 0 ){
- print( i + "/" + its );
+print("its: " + its);
+for (i = 0; i < its; i++) {
+ x = Random.randInt(max);
+ check(x, false).drop();
+ checkall(x);
+ check(x, true);
+ if ((i + 1) % 20 == 0) {
+ print(i + "/" + its);
}
-}
-print( "yay" );
+}
+print("yay");
mydb.dropDatabase();
diff --git a/jstests/noPassthrough/parameters.js b/jstests/noPassthrough/parameters.js
index 139e6cb5e29..a4fe35446b4 100644
--- a/jstests/noPassthrough/parameters.js
+++ b/jstests/noPassthrough/parameters.js
@@ -2,13 +2,17 @@ var dbConn = MongoRunner.runMongod();
function setAndCheckParameter(dbConn, parameterName, newValue, expectedResult) {
jsTest.log("Test setting parameter: " + parameterName + " to value: " + newValue);
- var getParameterCommand = { getParameter : 1 };
+ var getParameterCommand = {
+ getParameter: 1
+ };
getParameterCommand[parameterName] = 1;
var ret = dbConn.adminCommand(getParameterCommand);
assert.eq(ret.ok, 1, tojson(ret));
oldValue = ret[parameterName];
- var setParameterCommand = { setParameter : 1 };
+ var setParameterCommand = {
+ setParameter: 1
+ };
setParameterCommand[parameterName] = newValue;
var ret = dbConn.adminCommand(setParameterCommand);
assert.eq(ret.ok, 1, tojson(ret));
@@ -20,8 +24,7 @@ function setAndCheckParameter(dbConn, parameterName, newValue, expectedResult) {
// cases where the server does some type coersion that changes the value.
if (typeof expectedResult === "undefined") {
assert.eq(ret[parameterName], newValue, tojson(ret));
- }
- else {
+ } else {
assert.eq(ret[parameterName], expectedResult, tojson(ret));
}
return newValue;
@@ -42,7 +45,9 @@ setAndCheckParameter(dbConn, "replMonitorMaxFailedChecks", -30);
function ensureSetParameterFailure(dbConn, parameterName, newValue) {
jsTest.log("Test setting parameter: " + parameterName + " to invalid value: " + newValue);
- var setParameterCommand = { setParameter : 1 };
+ var setParameterCommand = {
+ setParameter: 1
+ };
setParameterCommand[parameterName] = newValue;
var ret = dbConn.adminCommand(setParameterCommand);
assert.eq(ret.ok, 0, tojson(ret));
diff --git a/jstests/noPassthrough/profile_options.js b/jstests/noPassthrough/profile_options.js
index b0101d47283..0e45391a7ef 100644
--- a/jstests/noPassthrough/profile_options.js
+++ b/jstests/noPassthrough/profile_options.js
@@ -4,46 +4,29 @@ load('jstests/libs/command_line/test_parsed_options.js');
jsTest.log("Testing \"profile\" command line option with profiling off");
var expectedResult = {
- "parsed" : {
- "operationProfiling" : {
- "mode" : "off"
- }
- }
+ "parsed": {"operationProfiling": {"mode": "off"}}
};
-testGetCmdLineOptsMongod({ profile : "0" }, expectedResult);
+testGetCmdLineOptsMongod({profile: "0"}, expectedResult);
jsTest.log("Testing \"profile\" command line option with profiling slow operations on");
var expectedResult = {
- "parsed" : {
- "operationProfiling" : {
- "mode" : "slowOp"
- }
- }
+ "parsed": {"operationProfiling": {"mode": "slowOp"}}
};
-testGetCmdLineOptsMongod({ profile : "1" }, expectedResult);
+testGetCmdLineOptsMongod({profile: "1"}, expectedResult);
jsTest.log("Testing \"profile\" command line option with profiling all on");
var expectedResult = {
- "parsed" : {
- "operationProfiling" : {
- "mode" : "all"
- }
- }
+ "parsed": {"operationProfiling": {"mode": "all"}}
};
-testGetCmdLineOptsMongod({ profile : "2" }, expectedResult);
+testGetCmdLineOptsMongod({profile: "2"}, expectedResult);
jsTest.log("Testing \"operationProfiling.mode\" config file option");
expectedResult = {
- "parsed" : {
- "config" : "jstests/libs/config_files/set_profiling.json",
- "operationProfiling" : {
- "mode" : "all"
- }
+ "parsed": {
+ "config": "jstests/libs/config_files/set_profiling.json",
+ "operationProfiling": {"mode": "all"}
}
};
-testGetCmdLineOptsMongod({ config : "jstests/libs/config_files/set_profiling.json" },
- expectedResult);
-
-
+testGetCmdLineOptsMongod({config: "jstests/libs/config_files/set_profiling.json"}, expectedResult);
print(baseName + " succeeded.");
diff --git a/jstests/noPassthrough/query_yield1.js b/jstests/noPassthrough/query_yield1.js
index 50b7d4bf60d..22e95a1fb32 100644
--- a/jstests/noPassthrough/query_yield1.js
+++ b/jstests/noPassthrough/query_yield1.js
@@ -1,89 +1,91 @@
-if (0) { // Test disabled until SERVER-8579 is finished. Reminder ticket: SERVER-8342
+if (0) { // Test disabled until SERVER-8579 is finished. Reminder ticket: SERVER-8342
-load( "jstests/libs/slow_weekly_util.js" );
-testServer = new SlowWeeklyMongod( "query_yield1" );
-db = testServer.getDB( "test" );
+ load("jstests/libs/slow_weekly_util.js");
+ testServer = new SlowWeeklyMongod("query_yield1");
+ db = testServer.getDB("test");
-t = db.query_yield1;
-t.drop();
+ t = db.query_yield1;
+ t.drop();
-N = 20000;
-i = 0;
+ N = 20000;
+ i = 0;
-q = function(){ var x=this.n; for ( var i=0; i<250; i++ ){ x = x * 2; } return false; };
-
-while ( true ){
- function fill(){
- var bulk = t.initializeUnorderedBulkOp();
- for ( ; i<N; i++ ){
- bulk.insert({ _id: i, n: 1 });
+ q = function() {
+ var x = this.n;
+ for (var i = 0; i < 250; i++) {
+ x = x * 2;
}
- assert.writeOK(bulk.execute());
- }
-
- function timeQuery(){
- return Date.timeFunc(
- function(){
- assert.eq( 0 , t.find( q ).itcount() );
+ return false;
+ };
+
+ while (true) {
+ function fill() {
+ var bulk = t.initializeUnorderedBulkOp();
+ for (; i < N; i++) {
+ bulk.insert({_id: i, n: 1});
}
- );
-
+ assert.writeOK(bulk.execute());
+ }
+
+ function timeQuery() {
+ return Date.timeFunc(function() {
+ assert.eq(0, t.find(q).itcount());
+ });
+ }
+
+ fill();
+ timeQuery();
+ timeQuery();
+ time = timeQuery();
+ print(N + "\t" + time);
+ if (time > 2000)
+ break;
+
+ N *= 2;
}
-
- fill();
- timeQuery();
- timeQuery();
- time = timeQuery();
- print( N + "\t" + time );
- if ( time > 2000 )
- break;
-
- N *= 2;
-}
-// --- test 1
+ // --- test 1
-assert.eq( 0, db.currentOp().inprog.length , "setup broken" );
+ assert.eq(0, db.currentOp().inprog.length, "setup broken");
-join = startParallelShell( "print( 0 == db.query_yield1.find( function(){ var x=this.n; for ( var i=0; i<500; i++ ){ x = x * 2; } return false; } ).itcount() ); " );
+ join = startParallelShell(
+ "print( 0 == db.query_yield1.find( function(){ var x=this.n; for ( var i=0; i<500; i++ ){ x = x * 2; } return false; } ).itcount() ); ");
-assert.soon(
- function(){
+ assert.soon(function() {
var x = db.currentOp().inprog;
return x.length > 0;
- } , "never doing query" , 2000 , 1
-);
+ }, "never doing query", 2000, 1);
-print( "start query" );
+ print("start query");
-num = 0;
-start = new Date();
-biggestMe = 0;
-while ( ( (new Date()).getTime() - start ) < ( time * 2 ) ){
- var me = Date.timeFunc( function(){ t.insert( { x : 1 } ); });
- var x = db.currentOp();
+ num = 0;
+ start = new Date();
+ biggestMe = 0;
+ while (((new Date()).getTime() - start) < (time * 2)) {
+ var me = Date.timeFunc(function() {
+ t.insert({x: 1});
+ });
+ var x = db.currentOp();
- if ( num++ == 0 ){
- assert.eq( 1 , x.inprog.length , "nothing in prog" );
- }
-
- if ( me > biggestMe ) {
- biggestMe = me;
- print( "biggestMe: " + biggestMe );
- }
-
- assert.gt( 200 , me , "took too long for me to run" );
+ if (num++ == 0) {
+ assert.eq(1, x.inprog.length, "nothing in prog");
+ }
- if ( x.inprog.length == 0 )
- break;
+ if (me > biggestMe) {
+ biggestMe = me;
+ print("biggestMe: " + biggestMe);
+ }
-}
+ assert.gt(200, me, "took too long for me to run");
-join();
+ if (x.inprog.length == 0)
+ break;
+ }
-var x = db.currentOp();
-assert.eq( 0 , x.inprog.length , "weird 2" );
+ join();
-testServer.stop();
+ var x = db.currentOp();
+ assert.eq(0, x.inprog.length, "weird 2");
+ testServer.stop();
}
diff --git a/jstests/noPassthrough/query_yield2.js b/jstests/noPassthrough/query_yield2.js
index 5997fa15ec8..8e5dc8dc4ec 100644
--- a/jstests/noPassthrough/query_yield2.js
+++ b/jstests/noPassthrough/query_yield2.js
@@ -1,138 +1,150 @@
-if (0) { // Test disabled until SERVER-8579 is finished. Reminder ticket: SERVER-8342
+if (0) { // Test disabled until SERVER-8579 is finished. Reminder ticket: SERVER-8342
-var currentOp;
-var N;
-var i;
-var t;
-var q;
-var len;
-var num;
-var start;
-var insertTime;
+ var currentOp;
+ var N;
+ var i;
+ var t;
+ var q;
+ var len;
+ var num;
+ var start;
+ var insertTime;
-load( "jstests/libs/slow_weekly_util.js" );
-testServer = new SlowWeeklyMongod( "query_yield2" );
-db = testServer.getDB( "test" );
+ load("jstests/libs/slow_weekly_util.js");
+ testServer = new SlowWeeklyMongod("query_yield2");
+ db = testServer.getDB("test");
-t = db.query_yield2;
-t.drop();
+ t = db.query_yield2;
+ t.drop();
-N = 200;
-i = 0;
+ N = 200;
+ i = 0;
-q = function() { var x=this.n; for ( var i=0; i<25000; i++ ) { x = x * 2; } return false; };
-
-print( "Shell ==== Creating test.query_yield2 collection ..." );
-print( "Shell ==== Adding documents until a time-wasting query takes over 2 seconds to complete" );
-while ( true ){
- function fill() {
- var bulk = t.initializeUnorderedBulkOp();
- for ( ; i < N; ++i ) {
- bulk.insert({ _id: i , n: 1 });
+ q = function() {
+ var x = this.n;
+ for (var i = 0; i < 25000; i++) {
+ x = x * 2;
}
- assert.writeOK(bulk.execute());
- }
- function timeQuery() {
- return Date.timeFunc(
- function() {
- assert.eq( 0 , t.find( q ).itcount() );
+ return false;
+ };
+
+ print("Shell ==== Creating test.query_yield2 collection ...");
+ print(
+ "Shell ==== Adding documents until a time-wasting query takes over 2 seconds to complete");
+ while (true) {
+ function fill() {
+ var bulk = t.initializeUnorderedBulkOp();
+ for (; i < N; ++i) {
+ bulk.insert({_id: i, n: 1});
}
- );
- }
- print( "Shell ==== Adding document IDs from " + i + " to " + (N - 1) );
- fill();
- print( "Shell ==== Running warm-up query 1" );
- timeQuery();
- print( "Shell ==== Running warm-up query 2" );
- timeQuery();
- print( "Shell ==== Running timed query ..." );
- time = timeQuery();
- print( "Shell ==== Query across " + N + " documents took " + time + " ms" );
- if ( time > 2000 ) {
- print( "Shell ==== Reached desired 2000 ms mark (at " + time + " ms), proceding to next step" );
- break;
+ assert.writeOK(bulk.execute());
+ }
+ function timeQuery() {
+ return Date.timeFunc(function() {
+ assert.eq(0, t.find(q).itcount());
+ });
+ }
+ print("Shell ==== Adding document IDs from " + i + " to " + (N - 1));
+ fill();
+ print("Shell ==== Running warm-up query 1");
+ timeQuery();
+ print("Shell ==== Running warm-up query 2");
+ timeQuery();
+ print("Shell ==== Running timed query ...");
+ time = timeQuery();
+ print("Shell ==== Query across " + N + " documents took " + time + " ms");
+ if (time > 2000) {
+ print("Shell ==== Reached desired 2000 ms mark (at " + time +
+ " ms), proceding to next step");
+ break;
+ }
+ N *= 2;
+ print("Shell ==== Did not reach 2000 ms, increasing fill point to " + N + " documents");
}
- N *= 2;
- print( "Shell ==== Did not reach 2000 ms, increasing fill point to " + N + " documents" );
-}
-print( "Shell ==== Testing db.currentOp to make sure nothing is in progress" );
-print( "Shell ==== Dump of db.currentOp:" );
-currentOp = db.currentOp();
-print( tojson( currentOp ) );
-len = currentOp.inprog.length;
-if ( len ) {
- print( "Shell ==== This test is broken: db.currentOp().inprog.length is " + len );
- throw Error("query_yield2.js test is broken");
-}
-print( "Shell ==== The test is working so far: db.currentOp().inprog.length is " + len );
+ print("Shell ==== Testing db.currentOp to make sure nothing is in progress");
+ print("Shell ==== Dump of db.currentOp:");
+ currentOp = db.currentOp();
+ print(tojson(currentOp));
+ len = currentOp.inprog.length;
+ if (len) {
+ print("Shell ==== This test is broken: db.currentOp().inprog.length is " + len);
+ throw Error("query_yield2.js test is broken");
+ }
+ print("Shell ==== The test is working so far: db.currentOp().inprog.length is " + len);
-print( "Shell ==== Starting parallel shell to test if slow query will yield to write" );
-join = startParallelShell( "print( 0 == db.query_yield2.find( function(){ var x=this.n; for ( var i=0; i<50000; i++ ){ x = x * 2; } return false; } ).itcount() ); " );
+ print("Shell ==== Starting parallel shell to test if slow query will yield to write");
+ join = startParallelShell(
+ "print( 0 == db.query_yield2.find( function(){ var x=this.n; for ( var i=0; i<50000; i++ ){ x = x * 2; } return false; } ).itcount() ); ");
-print( "Shell ==== Waiting until db.currentOp().inprog becomes non-empty" );
-assert.soon(
- function(){
+ print("Shell ==== Waiting until db.currentOp().inprog becomes non-empty");
+ assert.soon(function() {
currentOp = db.currentOp();
len = currentOp.inprog.length;
- if ( len ) {
- print( "Shell ==== Wait satisfied: db.currentOp().inprog.length is " + len );
- print( "Shell ==== Dump of db.currentOp:" );
- print( tojson( currentOp ) );
- print( "Shell ==== Checking if this currentOp is the query we are waiting for" );
- if ( currentOp.inprog[0].ns == "test.query_yield2" && currentOp.inprog[0].query["$where"] ) {
- print( "Shell ==== Yes, we found the query we are waiting for" );
+ if (len) {
+ print("Shell ==== Wait satisfied: db.currentOp().inprog.length is " + len);
+ print("Shell ==== Dump of db.currentOp:");
+ print(tojson(currentOp));
+ print("Shell ==== Checking if this currentOp is the query we are waiting for");
+ if (currentOp.inprog[0].ns == "test.query_yield2" &&
+ currentOp.inprog[0].query["$where"]) {
+ print("Shell ==== Yes, we found the query we are waiting for");
return true;
}
- if ( currentOp.inprog[0].ns == "" && currentOp.inprog[0].query["whatsmyuri"] ) {
- print( "Shell ==== No, we found a \"whatsmyuri\" query, waiting some more" );
+ if (currentOp.inprog[0].ns == "" && currentOp.inprog[0].query["whatsmyuri"]) {
+ print("Shell ==== No, we found a \"whatsmyuri\" query, waiting some more");
return false;
}
- print( "Shell ==== No, we found something other than our query or a \"whatsmyuri\", waiting some more" );
+ print(
+ "Shell ==== No, we found something other than our query or a \"whatsmyuri\", waiting some more");
return false;
}
return len > 0;
- } , "Wait failed, db.currentOp().inprog never became non-empty" , 2000 , 1
-);
+ }, "Wait failed, db.currentOp().inprog never became non-empty", 2000, 1);
-print( "Shell ==== Now that we have seen db.currentOp().inprog show that our query is running, we start the real test" );
-num = 0;
-start = new Date();
-while ( ( (new Date()).getTime() - start ) < ( time * 2 ) ) {
- if ( num == 0 ) {
- print( "Shell ==== Starting loop " + num + ", inserting 1 document" );
- }
- insertTime = Date.timeFunc( function() { t.insert({ x: 1 } ); });
- currentOp = db.currentOp();
- len = currentOp.inprog.length;
- print( "Shell ==== Time to insert document " + num + " was " + insertTime + " ms, db.currentOp().inprog.length is " + len );
- if ( num++ == 0 ) {
- if ( len != 1 ) {
- print( "Shell ==== TEST FAILED! db.currentOp().inprog.length is " + len );
- print( "Shell ==== Dump of db.currentOp:" );
- print( tojson( currentOp ) );
- throw Error("TEST FAILED!");
+ print(
+ "Shell ==== Now that we have seen db.currentOp().inprog show that our query is running, we start the real test");
+ num = 0;
+ start = new Date();
+ while (((new Date()).getTime() - start) < (time * 2)) {
+ if (num == 0) {
+ print("Shell ==== Starting loop " + num + ", inserting 1 document");
+ }
+ insertTime = Date.timeFunc(function() {
+ t.insert({x: 1});
+ });
+ currentOp = db.currentOp();
+ len = currentOp.inprog.length;
+ print("Shell ==== Time to insert document " + num + " was " + insertTime +
+ " ms, db.currentOp().inprog.length is " + len);
+ if (num++ == 0) {
+ if (len != 1) {
+ print("Shell ==== TEST FAILED! db.currentOp().inprog.length is " + len);
+ print("Shell ==== Dump of db.currentOp:");
+ print(tojson(currentOp));
+ throw Error("TEST FAILED!");
+ }
+ }
+ assert.gt(200,
+ insertTime,
+ "Insert took too long (" + insertTime + " ms), should be less than 200 ms");
+ if (currentOp.inprog.length == 0) {
+ break;
}
}
- assert.gt( 200, insertTime, "Insert took too long (" + insertTime + " ms), should be less than 200 ms" );
- if ( currentOp.inprog.length == 0 ) {
- break;
- }
-}
-
-print( "Shell ==== Finished inserting documents, reader also finished" );
-print( "Shell ==== Waiting for parallel shell to exit" );
-join();
-currentOp = db.currentOp();
-len = currentOp.inprog.length;
-if ( len != 0 ) {
- print( "Shell ==== Final sanity check FAILED! db.currentOp().inprog.length is " + len );
- print( "Shell ==== Dump of db.currentOp:" );
- print( tojson( currentOp ) );
- throw Error("TEST FAILED!");
-}
-print( "Shell ==== Test completed successfully, shutting down server" );
-testServer.stop();
+ print("Shell ==== Finished inserting documents, reader also finished");
+ print("Shell ==== Waiting for parallel shell to exit");
+ join();
+ currentOp = db.currentOp();
+ len = currentOp.inprog.length;
+ if (len != 0) {
+ print("Shell ==== Final sanity check FAILED! db.currentOp().inprog.length is " + len);
+ print("Shell ==== Dump of db.currentOp:");
+ print(tojson(currentOp));
+ throw Error("TEST FAILED!");
+ }
+ print("Shell ==== Test completed successfully, shutting down server");
+ testServer.stop();
}
diff --git a/jstests/noPassthrough/query_yield_reset_timer.js b/jstests/noPassthrough/query_yield_reset_timer.js
index 8d2e26cc9e5..1ecb4ec7b60 100644
--- a/jstests/noPassthrough/query_yield_reset_timer.js
+++ b/jstests/noPassthrough/query_yield_reset_timer.js
@@ -9,14 +9,16 @@
// Configure the server so that queries are expected to yield after every 10 work cycles, or
// after every 500 milliseconds (whichever comes first). In addition, enable a failpoint that
// introduces a sleep delay of 1 second during each yield.
- assert.commandWorked(coll.getDB().adminCommand({setParameter: 1,
- internalQueryExecYieldIterations: 10}));
- assert.commandWorked(coll.getDB().adminCommand({setParameter: 1,
- internalQueryExecYieldPeriodMS: 500}));
- assert.commandWorked(coll.getDB().adminCommand({configureFailPoint: "setYieldAllLocksWait",
- namespace: coll.getFullName(),
- mode: "alwaysOn",
- data: {waitForMillis: 1000}}));
+ assert.commandWorked(
+ coll.getDB().adminCommand({setParameter: 1, internalQueryExecYieldIterations: 10}));
+ assert.commandWorked(
+ coll.getDB().adminCommand({setParameter: 1, internalQueryExecYieldPeriodMS: 500}));
+ assert.commandWorked(coll.getDB().adminCommand({
+ configureFailPoint: "setYieldAllLocksWait",
+ namespace: coll.getFullName(),
+ mode: "alwaysOn",
+ data: {waitForMillis: 1000}
+ }));
// Insert 40 documents in the collection, perform a collection scan, and verify that it yields
// about 4 times. Since each group of 10 documents should always be processed in less than 500
@@ -30,17 +32,13 @@
// not during query execution, it should never count towards our 500 millisecond threshold for a
// timing-based yield (incorrect accounting for timing-based yields was the cause for
// SERVER-21341).
- for (var i=0; i<40; ++i) {
+ for (var i = 0; i < 40; ++i) {
assert.writeOK(coll.insert({}));
}
var explainRes = coll.find().explain("executionStats");
// We expect 4 yields, but we throw in a fudge factor of 2 for test reliability. We also can
// use "saveState" calls as a proxy for "number of yields" here, because we expect our entire
// result set to be returned in a single batch.
- assert.gt(explainRes.executionStats.executionStages.saveState,
- 4 / 2,
- tojson(explainRes));
- assert.lt(explainRes.executionStats.executionStages.saveState,
- 4 * 2,
- tojson(explainRes));
+ assert.gt(explainRes.executionStats.executionStages.saveState, 4 / 2, tojson(explainRes));
+ assert.lt(explainRes.executionStats.executionStages.saveState, 4 * 2, tojson(explainRes));
})();
diff --git a/jstests/noPassthrough/read_committed_lookup.js b/jstests/noPassthrough/read_committed_lookup.js
index 1569f28fe25..368dfa311a1 100644
--- a/jstests/noPassthrough/read_committed_lookup.js
+++ b/jstests/noPassthrough/read_committed_lookup.js
@@ -61,7 +61,9 @@ load("jstests/replsets/rslib.js"); // For startSetIfSupportsReadMajority.
}
// Seed matching data.
- var majorityWriteConcernObj = {writeConcern: {w: "majority", wtimeout: 60*1000}};
+ var majorityWriteConcernObj = {
+ writeConcern: {w: "majority", wtimeout: 60 * 1000}
+ };
var localId = db.local.insertOne({foreignKey: "x"}, majorityWriteConcernObj).insertedId;
var foreignId = db.foreign.insertOne({matchedField: "x"}, majorityWriteConcernObj).insertedId;
@@ -71,34 +73,28 @@ load("jstests/replsets/rslib.js"); // For startSetIfSupportsReadMajority.
aggregate: "local",
pipeline: [
{
- $lookup: {
- from: "foreign",
- localField: "foreignKey",
- foreignField: "matchedField",
- as: "match",
- }
+ $lookup: {
+ from: "foreign",
+ localField: "foreignKey",
+ foreignField: "matchedField",
+ as: "match",
+ }
},
],
readConcern: {
level: "majority",
}
};
- var expectedMatchedResult = [
- {
- _id: localId,
- foreignKey: "x",
- match: [
- {_id: foreignId, matchedField: "x"},
- ],
- }
- ];
- var expectedUnmatchedResult = [
- {
- _id: localId,
- foreignKey: "x",
- match: [],
- }
- ];
+ var expectedMatchedResult = [{
+ _id: localId,
+ foreignKey: "x",
+ match: [{_id: foreignId, matchedField: "x"}, ],
+ }];
+ var expectedUnmatchedResult = [{
+ _id: localId,
+ foreignKey: "x",
+ match: [],
+ }];
var result = db.runCommand(aggCmdObj).result;
assert.eq(result, expectedMatchedResult);
diff --git a/jstests/noPassthrough/read_concern_helper.js b/jstests/noPassthrough/read_concern_helper.js
index 09146092629..dfc4909d6f6 100644
--- a/jstests/noPassthrough/read_concern_helper.js
+++ b/jstests/noPassthrough/read_concern_helper.js
@@ -2,33 +2,49 @@
// should fail without --enableMajorityReadConcern enabled and then reruns the find commands with
// that option enabled.
(function() {
-"use strict";
-var testServer = MongoRunner.runMongod();
-if (!testServer.getDB('admin').serverStatus().storageEngine.supportsCommittedReads) {
- jsTest.log("skipping test since storage engine doesn't support committed reads");
- MongoRunner.stopMongod(testServer);
- return;
-}
-var coll = testServer.getDB("test").readMajority;
+ "use strict";
+ var testServer = MongoRunner.runMongod();
+ if (!testServer.getDB('admin').serverStatus().storageEngine.supportsCommittedReads) {
+ jsTest.log("skipping test since storage engine doesn't support committed reads");
+ MongoRunner.stopMongod(testServer);
+ return;
+ }
+ var coll = testServer.getDB("test").readMajority;
-assert.writeOK(coll.insert({_id: "foo"}));
-assert.throws(function() { coll.find({_id: "foo"}).readConcern("majority").itcount(); });
-assert.throws(function() { coll.findOne({_id: "foo"}, {}, {}, "majority"); });
-assert.throws(function() { coll.count({_id: "foo"}, {readConcern: "majority"}); });
-assert.throws(function() { coll.find({_id: "foo"}).readConcern("majority").count(); });
+ assert.writeOK(coll.insert({_id: "foo"}));
+ assert.throws(function() {
+ coll.find({_id: "foo"}).readConcern("majority").itcount();
+ });
+ assert.throws(function() {
+ coll.findOne({_id: "foo"}, {}, {}, "majority");
+ });
+ assert.throws(function() {
+ coll.count({_id: "foo"}, {readConcern: "majority"});
+ });
+ assert.throws(function() {
+ coll.find({_id: "foo"}).readConcern("majority").count();
+ });
-MongoRunner.stopMongod(testServer);
-testServer = MongoRunner.runMongod({
- restart: true,
- port: testServer.port,
- enableMajorityReadConcern: "",
-});
-coll = testServer.getDB("test").readMajority;
+ MongoRunner.stopMongod(testServer);
+ testServer = MongoRunner.runMongod({
+ restart: true,
+ port: testServer.port,
+ enableMajorityReadConcern: "",
+ });
+ coll = testServer.getDB("test").readMajority;
-assert.doesNotThrow(function() { coll.find({_id: "foo"}).readConcern("majority").itcount(); });
-assert.doesNotThrow(function() { coll.findOne({_id: "foo"}, {}, {}, "majority"); });
-assert.doesNotThrow(function() { coll.count({_id: "foo"}, {readConcern: "majority"}); });
-assert.doesNotThrow(function() { coll.find({_id: "foo"}).readConcern("majority").count(); });
+ assert.doesNotThrow(function() {
+ coll.find({_id: "foo"}).readConcern("majority").itcount();
+ });
+ assert.doesNotThrow(function() {
+ coll.findOne({_id: "foo"}, {}, {}, "majority");
+ });
+ assert.doesNotThrow(function() {
+ coll.count({_id: "foo"}, {readConcern: "majority"});
+ });
+ assert.doesNotThrow(function() {
+ coll.find({_id: "foo"}).readConcern("majority").count();
+ });
-MongoRunner.stopMongod(testServer);
+ MongoRunner.stopMongod(testServer);
}());
diff --git a/jstests/noPassthrough/read_majority.js b/jstests/noPassthrough/read_majority.js
index b1f1a5801f8..3d84fce0911 100644
--- a/jstests/noPassthrough/read_majority.js
+++ b/jstests/noPassthrough/read_majority.js
@@ -17,181 +17,191 @@
load("jstests/libs/analyze_plan.js");
(function() {
-"use strict";
-
-// This test needs its own mongod since the snapshot names must be in increasing order and once you
-// have a majority commit point it is impossible to go back to not having one.
-var testServer = MongoRunner.runMongod({setParameter: 'testingSnapshotBehaviorInIsolation=true'});
-var db = testServer.getDB("test");
-var t = db.readMajority;
-
-function assertNoReadMajoritySnapshotAvailable() {
- var res = t.runCommand('find',
- {batchSize: 2, readConcern: {level: "majority"}, maxTimeMS: 1000});
- assert.commandFailed(res);
- assert.eq(res.code, ErrorCodes.ExceededTimeLimit);
-}
-
-function getReadMajorityCursor() {
- var res = t.runCommand('find', {batchSize: 2, readConcern: {level: "majority"}});
- assert.commandWorked(res);
- return new DBCommandCursor(db.getMongo(), res, 2);
-}
-
-function getReadMajorityAggCursor() {
- var res = t.runCommand('aggregate', {cursor:{batchSize: 2}, readConcern: {level: "majority"}});
- assert.commandWorked(res);
- return new DBCommandCursor(db.getMongo(), res, 2);
-}
-
-function getExplainPlan(query) {
- var res = db.runCommand({explain: {find: t.getName(), filter: query}});
- return assert.commandWorked(res).queryPlanner.winningPlan;
-}
-
-//
-// Actual Test
-//
-
-if (!db.serverStatus().storageEngine.supportsCommittedReads) {
- print("Skipping read_majority.js since storageEngine doesn't support it.");
- return;
-}
-
-// Ensure killOp will work on an op that is waiting for snapshots to be created
-var blockedReader = startParallelShell(
+ "use strict";
+
+ // This test needs its own mongod since the snapshot names must be in increasing order and once
+ // you
+ // have a majority commit point it is impossible to go back to not having one.
+ var testServer =
+ MongoRunner.runMongod({setParameter: 'testingSnapshotBehaviorInIsolation=true'});
+ var db = testServer.getDB("test");
+ var t = db.readMajority;
+
+ function assertNoReadMajoritySnapshotAvailable() {
+ var res = t.runCommand('find',
+ {batchSize: 2, readConcern: {level: "majority"}, maxTimeMS: 1000});
+ assert.commandFailed(res);
+ assert.eq(res.code, ErrorCodes.ExceededTimeLimit);
+ }
+
+ function getReadMajorityCursor() {
+ var res = t.runCommand('find', {batchSize: 2, readConcern: {level: "majority"}});
+ assert.commandWorked(res);
+ return new DBCommandCursor(db.getMongo(), res, 2);
+ }
+
+ function getReadMajorityAggCursor() {
+ var res =
+ t.runCommand('aggregate', {cursor: {batchSize: 2}, readConcern: {level: "majority"}});
+ assert.commandWorked(res);
+ return new DBCommandCursor(db.getMongo(), res, 2);
+ }
+
+ function getExplainPlan(query) {
+ var res = db.runCommand({explain: {find: t.getName(), filter: query}});
+ return assert.commandWorked(res).queryPlanner.winningPlan;
+ }
+
+ //
+ // Actual Test
+ //
+
+ if (!db.serverStatus().storageEngine.supportsCommittedReads) {
+ print("Skipping read_majority.js since storageEngine doesn't support it.");
+ return;
+ }
+
+ // Ensure killOp will work on an op that is waiting for snapshots to be created
+ var blockedReader = startParallelShell(
"db.readMajority.runCommand('find', {batchSize: 2, readConcern: {level: 'majority'}});",
testServer.port);
-assert.soon(function() {
- var curOps = db.currentOp(true);
- jsTestLog("curOp output: " + tojson(curOps));
- for (var i in curOps.inprog) {
- var op = curOps.inprog[i];
- if (op.op === 'query' && op.ns === "test.$cmd" && op.query.find === 'readMajority') {
- db.killOp(op.opid);
- return true;
+ assert.soon(function() {
+ var curOps = db.currentOp(true);
+ jsTestLog("curOp output: " + tojson(curOps));
+ for (var i in curOps.inprog) {
+ var op = curOps.inprog[i];
+ if (op.op === 'query' && op.ns === "test.$cmd" && op.query.find === 'readMajority') {
+ db.killOp(op.opid);
+ return true;
+ }
}
+ return false;
+ }, "could not kill an op that was waiting for a snapshot", 60 * 1000);
+ blockedReader();
+
+ var snapshot1 = assert.commandWorked(db.adminCommand("makeSnapshot")).name;
+ assert.commandWorked(db.runCommand({create: "readMajority"}));
+ var snapshot2 = assert.commandWorked(db.adminCommand("makeSnapshot")).name;
+
+ for (var i = 0; i < 10; i++) {
+ assert.writeOK(t.insert({_id: i, version: 3}));
}
- return false;
-}, "could not kill an op that was waiting for a snapshot", 60 * 1000);
-blockedReader();
-
-var snapshot1 = assert.commandWorked(db.adminCommand("makeSnapshot")).name;
-assert.commandWorked(db.runCommand({create: "readMajority"}));
-var snapshot2 = assert.commandWorked(db.adminCommand("makeSnapshot")).name;
-
-for (var i = 0; i < 10; i++) { assert.writeOK(t.insert({_id: i, version: 3})); }
-
-assertNoReadMajoritySnapshotAvailable();
-
-var snapshot3 = assert.commandWorked(db.adminCommand("makeSnapshot")).name;
-
-assertNoReadMajoritySnapshotAvailable();
-
-assert.writeOK(t.update({}, {$set: {version: 4}}, false, true));
-var snapshot4 = assert.commandWorked(db.adminCommand("makeSnapshot")).name;
-
-// Collection didn't exist in snapshot 1.
-assert.commandWorked(db.adminCommand({"setCommittedSnapshot": snapshot1}));
-assertNoReadMajoritySnapshotAvailable();
-
-// Collection existed but was empty in snapshot 2.
-assert.commandWorked(db.adminCommand({"setCommittedSnapshot": snapshot2}));
-assert.eq(getReadMajorityCursor().itcount(), 0);
-assert.eq(getReadMajorityAggCursor().itcount(), 0);
-
-// In snapshot 3 the collection was filled with {version: 3} documents.
-assert.commandWorked(db.adminCommand({"setCommittedSnapshot": snapshot3}));
-assert.eq(getReadMajorityAggCursor().itcount(), 10);
-getReadMajorityAggCursor().forEach(function(doc) {
- // Note: agg uses internal batching so can't reliably test flipping snapshot. However, it uses
- // the same mechanism as find, so if one works, both should.
- assert.eq(doc.version, 3);
-});
-
-assert.eq(getReadMajorityCursor().itcount(), 10);
-var cursor = getReadMajorityCursor(); // Note: uses batchsize=2.
-assert.eq(cursor.next().version, 3);
-assert.eq(cursor.next().version, 3);
-assert(!cursor.objsLeftInBatch());
-
-// In snapshot 4 the collection was filled with {version: 3} documents.
-assert.commandWorked(db.adminCommand({"setCommittedSnapshot": snapshot4}));
-
-// This triggers a getMore which sees the new version.
-assert.eq(cursor.next().version, 4);
-assert.eq(cursor.next().version, 4);
-
-// Adding an index bumps the min snapshot for a collection as of SERVER-20260. This may change to
-// just filter that index out from query planning as part of SERVER-20439.
-t.ensureIndex({version: 1});
-assertNoReadMajoritySnapshotAvailable();
-
-// To use the index, a snapshot created after the index was completed must be marked committed.
-var newSnapshot = assert.commandWorked(db.adminCommand("makeSnapshot")).name;
-assertNoReadMajoritySnapshotAvailable();
-assert.commandWorked(db.adminCommand({"setCommittedSnapshot": newSnapshot}));
-assert.eq(getReadMajorityCursor().itcount(), 10);
-assert.eq(getReadMajorityAggCursor().itcount(), 10);
-assert(isIxscan(getExplainPlan({version: 1})));
-
-// Dropping an index does bump the min snapshot.
-t.dropIndex({version: 1});
-assertNoReadMajoritySnapshotAvailable();
-
-// To use the collection again, a snapshot created after the dropIndex must be marked committed.
-newSnapshot = assert.commandWorked(db.adminCommand("makeSnapshot")).name;
-assertNoReadMajoritySnapshotAvailable();
-assert.commandWorked(db.adminCommand({"setCommittedSnapshot": newSnapshot}));
-assert.eq(getReadMajorityCursor().itcount(), 10);
-
-// Reindex bumps the min snapshot.
-t.reIndex();
-assertNoReadMajoritySnapshotAvailable();
-newSnapshot = assert.commandWorked(db.adminCommand("makeSnapshot")).name;
-assertNoReadMajoritySnapshotAvailable();
-assert.commandWorked(db.adminCommand({"setCommittedSnapshot": newSnapshot}));
-assert.eq(getReadMajorityCursor().itcount(), 10);
-
-// Repair bumps the min snapshot.
-db.repairDatabase();
-assertNoReadMajoritySnapshotAvailable();
-newSnapshot = assert.commandWorked(db.adminCommand("makeSnapshot")).name;
-assertNoReadMajoritySnapshotAvailable();
-assert.commandWorked(db.adminCommand({"setCommittedSnapshot": newSnapshot}));
-assert.eq(getReadMajorityCursor().itcount(), 10);
-assert.eq(getReadMajorityAggCursor().itcount(), 10);
-
-// Dropping the collection is visible in the committed snapshot, even though it hasn't been marked
-// committed yet. This is allowed by the current specification even though it violates strict
-// read-committed semantics since we don't guarantee them on metadata operations.
-t.drop();
-assert.eq(getReadMajorityCursor().itcount(), 0);
-assert.eq(getReadMajorityAggCursor().itcount(), 0);
-
-// Creating a new collection with the same name hides the collection until that operation is in the
-// committed view.
-t.insert({_id:0, version: 8});
-assertNoReadMajoritySnapshotAvailable();
-newSnapshot = assert.commandWorked(db.adminCommand("makeSnapshot")).name;
-assertNoReadMajoritySnapshotAvailable();
-assert.commandWorked(db.adminCommand({"setCommittedSnapshot": newSnapshot}));
-assert.eq(getReadMajorityCursor().itcount(), 1);
-assert.eq(getReadMajorityAggCursor().itcount(), 1);
-
-// Commands that only support read concern 'local', (such as ping) must work when it is explicitly
-// specified and fail when 'majority' is specified.
-assert.commandWorked(db.adminCommand({ping: 1, readConcern: {level: 'local'}}));
-var res = assert.commandFailed(db.adminCommand({ping: 1, readConcern: {level: 'majority'}}));
-assert.eq(res.code, ErrorCodes.InvalidOptions);
-
-// Agg $out also doesn't support read concern majority.
-assert.commandWorked(t.runCommand('aggregate', {pipeline: [{$out: 'out'}],
- readConcern: {level: 'local'}}));
-var res = assert.commandFailed(t.runCommand('aggregate', {pipeline: [{$out: 'out'}],
- readConcern: {level: 'majority'}}));
-assert.eq(res.code, ErrorCodes.InvalidOptions);
-
-MongoRunner.stopMongod(testServer);
+
+ assertNoReadMajoritySnapshotAvailable();
+
+ var snapshot3 = assert.commandWorked(db.adminCommand("makeSnapshot")).name;
+
+ assertNoReadMajoritySnapshotAvailable();
+
+ assert.writeOK(t.update({}, {$set: {version: 4}}, false, true));
+ var snapshot4 = assert.commandWorked(db.adminCommand("makeSnapshot")).name;
+
+ // Collection didn't exist in snapshot 1.
+ assert.commandWorked(db.adminCommand({"setCommittedSnapshot": snapshot1}));
+ assertNoReadMajoritySnapshotAvailable();
+
+ // Collection existed but was empty in snapshot 2.
+ assert.commandWorked(db.adminCommand({"setCommittedSnapshot": snapshot2}));
+ assert.eq(getReadMajorityCursor().itcount(), 0);
+ assert.eq(getReadMajorityAggCursor().itcount(), 0);
+
+ // In snapshot 3 the collection was filled with {version: 3} documents.
+ assert.commandWorked(db.adminCommand({"setCommittedSnapshot": snapshot3}));
+ assert.eq(getReadMajorityAggCursor().itcount(), 10);
+ getReadMajorityAggCursor().forEach(function(doc) {
+ // Note: agg uses internal batching so can't reliably test flipping snapshot. However, it
+ // uses
+ // the same mechanism as find, so if one works, both should.
+ assert.eq(doc.version, 3);
+ });
+
+ assert.eq(getReadMajorityCursor().itcount(), 10);
+ var cursor = getReadMajorityCursor(); // Note: uses batchsize=2.
+ assert.eq(cursor.next().version, 3);
+ assert.eq(cursor.next().version, 3);
+ assert(!cursor.objsLeftInBatch());
+
+ // In snapshot 4 the collection was filled with {version: 3} documents.
+ assert.commandWorked(db.adminCommand({"setCommittedSnapshot": snapshot4}));
+
+ // This triggers a getMore which sees the new version.
+ assert.eq(cursor.next().version, 4);
+ assert.eq(cursor.next().version, 4);
+
+ // Adding an index bumps the min snapshot for a collection as of SERVER-20260. This may change
+ // to
+ // just filter that index out from query planning as part of SERVER-20439.
+ t.ensureIndex({version: 1});
+ assertNoReadMajoritySnapshotAvailable();
+
+ // To use the index, a snapshot created after the index was completed must be marked committed.
+ var newSnapshot = assert.commandWorked(db.adminCommand("makeSnapshot")).name;
+ assertNoReadMajoritySnapshotAvailable();
+ assert.commandWorked(db.adminCommand({"setCommittedSnapshot": newSnapshot}));
+ assert.eq(getReadMajorityCursor().itcount(), 10);
+ assert.eq(getReadMajorityAggCursor().itcount(), 10);
+ assert(isIxscan(getExplainPlan({version: 1})));
+
+ // Dropping an index does bump the min snapshot.
+ t.dropIndex({version: 1});
+ assertNoReadMajoritySnapshotAvailable();
+
+ // To use the collection again, a snapshot created after the dropIndex must be marked committed.
+ newSnapshot = assert.commandWorked(db.adminCommand("makeSnapshot")).name;
+ assertNoReadMajoritySnapshotAvailable();
+ assert.commandWorked(db.adminCommand({"setCommittedSnapshot": newSnapshot}));
+ assert.eq(getReadMajorityCursor().itcount(), 10);
+
+ // Reindex bumps the min snapshot.
+ t.reIndex();
+ assertNoReadMajoritySnapshotAvailable();
+ newSnapshot = assert.commandWorked(db.adminCommand("makeSnapshot")).name;
+ assertNoReadMajoritySnapshotAvailable();
+ assert.commandWorked(db.adminCommand({"setCommittedSnapshot": newSnapshot}));
+ assert.eq(getReadMajorityCursor().itcount(), 10);
+
+ // Repair bumps the min snapshot.
+ db.repairDatabase();
+ assertNoReadMajoritySnapshotAvailable();
+ newSnapshot = assert.commandWorked(db.adminCommand("makeSnapshot")).name;
+ assertNoReadMajoritySnapshotAvailable();
+ assert.commandWorked(db.adminCommand({"setCommittedSnapshot": newSnapshot}));
+ assert.eq(getReadMajorityCursor().itcount(), 10);
+ assert.eq(getReadMajorityAggCursor().itcount(), 10);
+
+ // Dropping the collection is visible in the committed snapshot, even though it hasn't been
+ // marked
+ // committed yet. This is allowed by the current specification even though it violates strict
+ // read-committed semantics since we don't guarantee them on metadata operations.
+ t.drop();
+ assert.eq(getReadMajorityCursor().itcount(), 0);
+ assert.eq(getReadMajorityAggCursor().itcount(), 0);
+
+ // Creating a new collection with the same name hides the collection until that operation is in
+ // the
+ // committed view.
+ t.insert({_id: 0, version: 8});
+ assertNoReadMajoritySnapshotAvailable();
+ newSnapshot = assert.commandWorked(db.adminCommand("makeSnapshot")).name;
+ assertNoReadMajoritySnapshotAvailable();
+ assert.commandWorked(db.adminCommand({"setCommittedSnapshot": newSnapshot}));
+ assert.eq(getReadMajorityCursor().itcount(), 1);
+ assert.eq(getReadMajorityAggCursor().itcount(), 1);
+
+ // Commands that only support read concern 'local', (such as ping) must work when it is
+ // explicitly
+ // specified and fail when 'majority' is specified.
+ assert.commandWorked(db.adminCommand({ping: 1, readConcern: {level: 'local'}}));
+ var res = assert.commandFailed(db.adminCommand({ping: 1, readConcern: {level: 'majority'}}));
+ assert.eq(res.code, ErrorCodes.InvalidOptions);
+
+ // Agg $out also doesn't support read concern majority.
+ assert.commandWorked(
+ t.runCommand('aggregate', {pipeline: [{$out: 'out'}], readConcern: {level: 'local'}}));
+ var res = assert.commandFailed(
+ t.runCommand('aggregate', {pipeline: [{$out: 'out'}], readConcern: {level: 'majority'}}));
+ assert.eq(res.code, ErrorCodes.InvalidOptions);
+
+ MongoRunner.stopMongod(testServer);
}());
diff --git a/jstests/noPassthrough/read_only_command_line.js b/jstests/noPassthrough/read_only_command_line.js
index cbbe2f56a6a..f3cbed2429f 100644
--- a/jstests/noPassthrough/read_only_command_line.js
+++ b/jstests/noPassthrough/read_only_command_line.js
@@ -8,13 +8,14 @@
var dbpath = mongod.dbpath;
// ensure dbpath gets set up.
- assert.writeOK(mongod.getDB("foo").x.insert({x:1}));
+ assert.writeOK(mongod.getDB("foo").x.insert({x: 1}));
assert(!mongod.getDB("admin").isMaster().readOnly);
assert(!mongod.getDB("admin").serverStatus().storageEngine.readOnly);
MongoRunner.stopMongod(mongod);
- mongod = MongoRunner.runMongod({storageEngine: "mmapv1", readOnly: "", dbpath: dbpath, noCleanData: true});
+ mongod = MongoRunner.runMongod(
+ {storageEngine: "mmapv1", readOnly: "", dbpath: dbpath, noCleanData: true});
assert(mongod.getDB("admin").isMaster().readOnly);
assert(mongod.getDB("admin").serverStatus().storageEngine.readOnly);
MongoRunner.stopMongod(mongod);
diff --git a/jstests/noPassthrough/refresh_syncclusterconn.js b/jstests/noPassthrough/refresh_syncclusterconn.js
index b12cf504d75..90135fb9d51 100644
--- a/jstests/noPassthrough/refresh_syncclusterconn.js
+++ b/jstests/noPassthrough/refresh_syncclusterconn.js
@@ -14,14 +14,14 @@ var mongoC = MongoRunner.runMongod({});
var mongoSCC = new Mongo(mongoA.host + "," + mongoB.host + "," + mongoC.host);
MongoRunner.stopMongod(mongoA);
-MongoRunner.runMongod({ restart: mongoA.runId });
+MongoRunner.runMongod({restart: mongoA.runId});
try {
- mongoSCC.getCollection("foo.bar").insert({ x : 1});
- assert(false , "must throw an insert exception");
+ mongoSCC.getCollection("foo.bar").insert({x: 1});
+ assert(false, "must throw an insert exception");
} catch (e) {
printjson(e);
}
-mongoSCC.getCollection("foo.bar").insert({ blah : "blah" });
+mongoSCC.getCollection("foo.bar").insert({blah: "blah"});
assert.eq(null, mongoSCC.getDB("foo").getLastError());
diff --git a/jstests/noPassthrough/repair2.js b/jstests/noPassthrough/repair2.js
index ec3e1f8a299..58032bd17d4 100644
--- a/jstests/noPassthrough/repair2.js
+++ b/jstests/noPassthrough/repair2.js
@@ -2,30 +2,29 @@
baseName = "jstests_repair2";
-load( "jstests/libs/slow_weekly_util.js" );
-testServer = new SlowWeeklyMongod( baseName );
+load("jstests/libs/slow_weekly_util.js");
+testServer = new SlowWeeklyMongod(baseName);
-t = testServer.getDB( baseName )[ baseName ];
+t = testServer.getDB(baseName)[baseName];
t.drop();
-var awaitShell = startParallelShell( "db = db.getSiblingDB( '" + baseName + "');" +
- "for( i = 0; i < 10; ++i ) { " +
- "db.repairDatabase();" +
- "sleep( 5000 );" +
- " }", testServer.port );
+var awaitShell = startParallelShell("db = db.getSiblingDB( '" + baseName + "');" +
+ "for( i = 0; i < 10; ++i ) { " + "db.repairDatabase();" +
+ "sleep( 5000 );" + " }",
+ testServer.port);
-for( i = 0; i < 30; ++i ) {
+for (i = 0; i < 30; ++i) {
var bulk = t.initializeOrderedBulkOp();
- for( j = 0; j < 5000; ++j ) {
- bulk.insert({ _id: j } );
+ for (j = 0; j < 5000; ++j) {
+ bulk.insert({_id: j});
}
- for( j = 0; j < 5000; ++j ) {
- bulk.find({ _id: j, $isolated: 1 }).remove();
+ for (j = 0; j < 5000; ++j) {
+ bulk.find({_id: j, $isolated: 1}).remove();
}
assert.writeOK(bulk.execute());
- assert.eq( 0, t.count() );
+ assert.eq(0, t.count());
}
awaitShell();
diff --git a/jstests/noPassthrough/repl_write_threads_start_param.js b/jstests/noPassthrough/repl_write_threads_start_param.js
index 1a69da206dc..f3a0556a43b 100644
--- a/jstests/noPassthrough/repl_write_threads_start_param.js
+++ b/jstests/noPassthrough/repl_write_threads_start_param.js
@@ -26,13 +26,13 @@
mongo = MongoRunner.runMongod({setParameter: 'replWriterThreadCount=24'});
assert.neq(null, mongo, "mongod failed to start with a suitable replWriterThreadCount value");
assert(!rawMongoProgramOutput().match("replWriterThreadCount must be between 1 and 256"),
- "despite accepting the replWriterThreadCount value, mongod logged an error");
+ "despite accepting the replWriterThreadCount value, mongod logged an error");
// getParameter to confirm the value was set
var result = mongo.getDB("admin").runCommand({getParameter: 1, replWriterThreadCount: 1});
assert.eq(24, result.replWriterThreadCount, "replWriterThreadCount was not set internally");
// setParameter to ensure it is not possible
- assert.commandFailed(mongo.getDB("admin").runCommand({setParameter: 1,
- replWriterThreadCount: 1}));
+ assert.commandFailed(
+ mongo.getDB("admin").runCommand({setParameter: 1, replWriterThreadCount: 1}));
}());
diff --git a/jstests/noPassthrough/server22767.js b/jstests/noPassthrough/server22767.js
index 9b915ce0940..74efd3eade4 100644
--- a/jstests/noPassthrough/server22767.js
+++ b/jstests/noPassthrough/server22767.js
@@ -1,14 +1,15 @@
// test that the mongos doesn't segfault when it receives malformed BSON
-var st = new ShardingTest({shards:1});
+var st = new ShardingTest({shards: 1});
var testDB = st.getDB('test');
-testDB.test.insert({a:1});
+testDB.test.insert({a: 1});
try {
testDB.test.find({key: {$regex: 'abcd\0xyz'}}).explain();
} catch (e) {
/*
* if the mongos segfaults, the error is the msg:
- * "Error: error doing query: failed: network error while attempting to run command 'explain' on host '127.0.0.1:20014'"
+ * "Error: error doing query: failed: network error while attempting to run command 'explain' on
+ *host '127.0.0.1:20014'"
*
* if the mongos doesn't segfault, the error is the object:
* "Error: explain failed: {
diff --git a/jstests/noPassthrough/server_status.js b/jstests/noPassthrough/server_status.js
index 7c265a4740e..d19621c07de 100644
--- a/jstests/noPassthrough/server_status.js
+++ b/jstests/noPassthrough/server_status.js
@@ -10,12 +10,11 @@
if (serverStatus.storageEngine.name == 'mmapv1') {
assert(serverStatus.backgroundFlushing,
'mmapv1 db.serverStatus() result must contain backgroundFlushing document: ' +
- tojson(serverStatus));
- }
- else {
+ tojson(serverStatus));
+ } else {
assert(!serverStatus.backgroundFlushing,
'Unexpected backgroundFlushing document in non-mmapv1 db.serverStatus() result: ' +
- tojson(serverStatus));
+ tojson(serverStatus));
}
MongoRunner.stopMongod(mongo);
@@ -24,14 +23,13 @@
testDB = mongo.getDB('test');
serverStatus = assert.commandWorked(testDB.serverStatus());
if (serverStatus.storageEngine.name == 'mmapv1') {
- assert(serverStatus.dur,
- 'mmapv1 db.serverStatus() result must contain "dur" document: ' +
- tojson(serverStatus));
- }
- else {
+ assert(
+ serverStatus.dur,
+ 'mmapv1 db.serverStatus() result must contain "dur" document: ' + tojson(serverStatus));
+ } else {
assert(!serverStatus.dur,
'Unexpected "dur" document in non-mmapv1 db.serverStatus() result: ' +
- tojson(serverStatus));
+ tojson(serverStatus));
}
MongoRunner.stopMongod(mongo);
mongo = MongoRunner.runMongod({smallfiles: "", nojournal: ""});
@@ -39,6 +37,6 @@
serverStatus = assert.commandWorked(testDB.serverStatus());
assert(!serverStatus.dur,
'Unexpected "dur" document in db.serverStatus() result when journaling is disabled: ' +
- tojson(serverStatus));
+ tojson(serverStatus));
MongoRunner.stopMongod(mongo);
}());
diff --git a/jstests/noPassthrough/shard_does_not_hang_on_bad_config_server.js b/jstests/noPassthrough/shard_does_not_hang_on_bad_config_server.js
index 7588c50d0ee..31a0f7a85a5 100644
--- a/jstests/noPassthrough/shard_does_not_hang_on_bad_config_server.js
+++ b/jstests/noPassthrough/shard_does_not_hang_on_bad_config_server.js
@@ -1,28 +1,30 @@
(function() {
-'use strict';
+ 'use strict';
-var conn = MongoRunner.runMongod();
-var connA = new Mongo(conn.host);
-var connB = new Mongo(conn.host);
+ var conn = MongoRunner.runMongod();
+ var connA = new Mongo(conn.host);
+ var connB = new Mongo(conn.host);
-var res;
+ var res;
-res = assert.commandFailed(
- connA.adminCommand({ moveChunk: 'DummyDB.DummyColl',
- find: { e: 0 },
- to: 'DummyShard',
- configdb: 'localhost:1',
- maxTimeMS: 10000}));
-assert.eq(ErrorCodes.ExceededTimeLimit, res.code);
+ res = assert.commandFailed(connA.adminCommand({
+ moveChunk: 'DummyDB.DummyColl',
+ find: {e: 0},
+ to: 'DummyShard',
+ configdb: 'localhost:1',
+ maxTimeMS: 10000
+ }));
+ assert.eq(ErrorCodes.ExceededTimeLimit, res.code);
-res = assert.commandFailed(
- connB.adminCommand({ moveChunk: 'DummyDB.DummyColl',
- find: { e: 0 },
- to: 'DummyShard',
- configdb: 'localhost:1',
- maxTimeMS: 10000}));
-assert.eq(ErrorCodes.ExceededTimeLimit, res.code);
+ res = assert.commandFailed(connB.adminCommand({
+ moveChunk: 'DummyDB.DummyColl',
+ find: {e: 0},
+ to: 'DummyShard',
+ configdb: 'localhost:1',
+ maxTimeMS: 10000
+ }));
+ assert.eq(ErrorCodes.ExceededTimeLimit, res.code);
-MongoRunner.stopMongod(conn);
+ MongoRunner.stopMongod(conn);
})();
diff --git a/jstests/noPassthrough/split_collections_and_indexes.js b/jstests/noPassthrough/split_collections_and_indexes.js
index 991a5fe2937..3ad5f95d999 100644
--- a/jstests/noPassthrough/split_collections_and_indexes.js
+++ b/jstests/noPassthrough/split_collections_and_indexes.js
@@ -3,32 +3,22 @@ if (!jsTest.options().storageEngine || jsTest.options().storageEngine === "wired
var dbpath = MongoRunner.dataPath + baseDir + "/";
- var m = MongoRunner.runMongod({
- dbpath: dbpath,
- wiredTigerDirectoryForIndexes: ''});
- db = m.getDB( "foo" );
- db.bar.insert( { x : 1 } );
- assert.eq( 1, db.bar.count() );
+ var m = MongoRunner.runMongod({dbpath: dbpath, wiredTigerDirectoryForIndexes: ''});
+ db = m.getDB("foo");
+ db.bar.insert({x: 1});
+ assert.eq(1, db.bar.count());
- db.adminCommand( {fsync:1} );
+ db.adminCommand({fsync: 1});
- assert( listFiles( dbpath + "/index" ).length > 0 );
- assert( listFiles( dbpath + "/collection" ).length > 0 );
+ assert(listFiles(dbpath + "/index").length > 0);
+ assert(listFiles(dbpath + "/collection").length > 0);
MongoRunner.stopMongod(m.port);
// Subsequent attempts to start server using same dbpath but different
// wiredTigerDirectoryForIndexes and directoryperdb options should fail.
- assert.isnull(MongoRunner.runMongod({
- dbpath: dbpath,
- restart: true}));
- assert.isnull(MongoRunner.runMongod({
- dbpath: dbpath,
- restart: true,
- directoryperdb: ''}));
- assert.isnull(MongoRunner.runMongod({
- dbpath: dbpath,
- restart: true,
- wiredTigerDirectoryForIndexes: '',
- directoryperdb: ''}));
+ assert.isnull(MongoRunner.runMongod({dbpath: dbpath, restart: true}));
+ assert.isnull(MongoRunner.runMongod({dbpath: dbpath, restart: true, directoryperdb: ''}));
+ assert.isnull(MongoRunner.runMongod(
+ {dbpath: dbpath, restart: true, wiredTigerDirectoryForIndexes: '', directoryperdb: ''}));
}
diff --git a/jstests/noPassthrough/stepdown_query.js b/jstests/noPassthrough/stepdown_query.js
index ff6b39e5500..05d22f34a63 100644
--- a/jstests/noPassthrough/stepdown_query.js
+++ b/jstests/noPassthrough/stepdown_query.js
@@ -28,13 +28,15 @@
// network errors, we run a dummy operation here to force the shell to reconnect.
try {
conn.getDB("admin").runCommand("ping");
+ } catch (e) {
}
- catch (e) {}
// Even though our connection doesn't have slaveOk set, we should still be able to iterate
// our cursor and kill our cursor.
assert(cursor.hasNext());
- assert.doesNotThrow(function() { cursor.close(); });
+ assert.doesNotThrow(function() {
+ cursor.close();
+ });
}
// Test querying a replica set primary directly.
diff --git a/jstests/noPassthrough/sync_write.js b/jstests/noPassthrough/sync_write.js
index b2f2fa24a45..04c654cff72 100644
--- a/jstests/noPassthrough/sync_write.js
+++ b/jstests/noPassthrough/sync_write.js
@@ -11,7 +11,11 @@
var dbpath = MongoRunner.dataPath + 'sync_write';
resetDbpath(dbpath);
- var mongodArgs = {dbpath: dbpath, noCleanData: true, journal: ''};
+ var mongodArgs = {
+ dbpath: dbpath,
+ noCleanData: true,
+ journal: ''
+ };
// Start a mongod.
var conn = MongoRunner.runMongod(mongodArgs);
diff --git a/jstests/noPassthrough/ttl_capped.js b/jstests/noPassthrough/ttl_capped.js
index a087af2e7f7..c5b26c048d4 100644
--- a/jstests/noPassthrough/ttl_capped.js
+++ b/jstests/noPassthrough/ttl_capped.js
@@ -63,18 +63,21 @@
function msg() {
return "TTL monitor didn't run within " + timeoutSeconds + " seconds";
},
- timeoutSeconds * 1000
- );
+ timeoutSeconds * 1000);
for (var i = 0; i < numCollectionsToCreate; i++) {
var coll = testDB["ttl" + i.zeroPad(width)];
var count = coll.count();
if (i % 3 === 1) {
- assert.eq(1, count, "the TTL monitor shouldn't have removed expired documents from" +
- " the capped collection '" + coll.getFullName() + "'");
+ assert.eq(1,
+ count,
+ "the TTL monitor shouldn't have removed expired documents from" +
+ " the capped collection '" + coll.getFullName() + "'");
} else {
- assert.eq(0, count, "the TTL monitor didn't removed expired documents from the" +
- " collection '" + coll.getFullName() + "'");
+ assert.eq(0,
+ count,
+ "the TTL monitor didn't removed expired documents from the" +
+ " collection '" + coll.getFullName() + "'");
}
}
diff --git a/jstests/noPassthrough/ttl_partial_index.js b/jstests/noPassthrough/ttl_partial_index.js
index 65e5fadc7d4..3da3a8a1513 100644
--- a/jstests/noPassthrough/ttl_partial_index.js
+++ b/jstests/noPassthrough/ttl_partial_index.js
@@ -8,8 +8,8 @@
coll.drop();
// Create TTL partial index.
- assert.commandWorked(coll.ensureIndex({x: 1}, {expireAfterSeconds: 0,
- partialFilterExpression: {z: {$exists: true}}}));
+ assert.commandWorked(coll.ensureIndex(
+ {x: 1}, {expireAfterSeconds: 0, partialFilterExpression: {z: {$exists: true}}}));
var now = new Date();
assert.writeOK(coll.insert({x: now, z: 2}));
@@ -19,12 +19,12 @@
// collection when it ran the first time).
var ttlPass = coll.getDB().serverStatus().metrics.ttl.passes;
assert.soon(function() {
- return coll.getDB().serverStatus().metrics.ttl.passes >= ttlPass + 2;
- },
- "TTL monitor didn't run before timing out.");
+ return coll.getDB().serverStatus().metrics.ttl.passes >= ttlPass + 2;
+ }, "TTL monitor didn't run before timing out.");
- assert.eq(0, coll.find({z: {$exists: true}}).hint({x: 1}).itcount(),
+ assert.eq(0,
+ coll.find({z: {$exists: true}}).hint({x: 1}).itcount(),
"Wrong number of documents in partial index, after TTL monitor run");
- assert.eq(1, coll.find().itcount(),
- "Wrong number of documents in collection, after TTL monitor run");
+ assert.eq(
+ 1, coll.find().itcount(), "Wrong number of documents in collection, after TTL monitor run");
})();
diff --git a/jstests/noPassthrough/update_server-5552.js b/jstests/noPassthrough/update_server-5552.js
index 9459cfcc95e..146dd0dab31 100644
--- a/jstests/noPassthrough/update_server-5552.js
+++ b/jstests/noPassthrough/update_server-5552.js
@@ -1,6 +1,6 @@
-load( "jstests/libs/slow_weekly_util.js" );
-testServer = new SlowWeeklyMongod( "update_server-5552" );
-db = testServer.getDB( "test" );
+load("jstests/libs/slow_weekly_util.js");
+testServer = new SlowWeeklyMongod("update_server-5552");
+db = testServer.getDB("test");
t = db.foo;
t.drop();
@@ -8,18 +8,28 @@ t.drop();
N = 10000;
var bulk = t.initializeUnorderedBulkOp();
-for ( i=0; i<N; i++ ) {
- bulk.insert({ _id: i, x: 1 });
+for (i = 0; i < N; i++) {
+ bulk.insert({_id: i, x: 1});
}
assert.writeOK(bulk.execute());
-join = startParallelShell( "while( db.foo.findOne( { _id : 0 } ).x == 1 ); db.foo.ensureIndex( { x : 1 } );" );
-
-t.update( { $where : function(){ sleep(1); return true; } } , { $set : { x : 5 } } , false , true );
+join = startParallelShell(
+ "while( db.foo.findOne( { _id : 0 } ).x == 1 ); db.foo.ensureIndex( { x : 1 } );");
+
+t.update(
+ {
+ $where: function() {
+ sleep(1);
+ return true;
+ }
+ },
+ {$set: {x: 5}},
+ false,
+ true);
db.getLastError();
join();
-assert.eq( N , t.find( { x : 5 } ).count() );
+assert.eq(N, t.find({x: 5}).count());
testServer.stop();
diff --git a/jstests/noPassthrough/update_yield1.js b/jstests/noPassthrough/update_yield1.js
index 6f8004149fb..0135be037ce 100644
--- a/jstests/noPassthrough/update_yield1.js
+++ b/jstests/noPassthrough/update_yield1.js
@@ -31,11 +31,8 @@
assert.gt(yieldCount, (nDocsToInsert / worksPerYield) - 2);
// A multi-update shouldn't yield if it has $isolated.
- explain = coll.explain('executionStats').update(
- {$isolated: true},
- {$inc: {counter: 1}},
- {multi: true}
- );
+ explain = coll.explain('executionStats')
+ .update({$isolated: true}, {$inc: {counter: 1}}, {multi: true});
assert.commandWorked(explain);
yieldCount = explain.executionStats.executionStages.saveState;
assert.eq(yieldCount, 0, 'yielded during $isolated multi-update');
diff --git a/jstests/noPassthrough/write_local.js b/jstests/noPassthrough/write_local.js
index 2f9cedba080..019b8c437f7 100644
--- a/jstests/noPassthrough/write_local.js
+++ b/jstests/noPassthrough/write_local.js
@@ -3,32 +3,32 @@
'use strict';
// Limit concurrent WiredTiger transactions to maximize locking issues, harmless for other SEs.
- var options = { verbose: 1 };
+ var options = {
+ verbose: 1
+ };
// Create a new single node replicaSet
- var replTest = new ReplSetTest({ name: "write_local",
- nodes: 1,
- oplogSize: 1,
- nodeOptions: options });
+ var replTest =
+ new ReplSetTest({name: "write_local", nodes: 1, oplogSize: 1, nodeOptions: options});
replTest.startSet();
replTest.initiate();
var mongod = replTest.getPrimary();
- mongod.adminCommand({ setParameter: 1, wiredTigerConcurrentWriteTransactions: 1 });
+ mongod.adminCommand({setParameter: 1, wiredTigerConcurrentWriteTransactions: 1});
var local = mongod.getDB('local');
// Start inserting documents in test.capped and local.capped capped collections.
- var shells = ['test', 'local'].map(function(dbname){
+ var shells = ['test', 'local'].map(function(dbname) {
var mydb = local.getSiblingDB(dbname);
mydb.capped.drop();
- mydb.createCollection('capped', { capped: true, size: 20*1000 });
- return startParallelShell(
- 'var mydb=db.getSiblingDB("' + dbname + '"); ' +
- '(function() { ' +
- ' for(var i=0; i < 10*1000; i++) { ' +
- ' mydb.capped.insert({ x: i }); ' +
- ' } ' +
- '})();', mongod.port);
+ mydb.createCollection('capped', {capped: true, size: 20 * 1000});
+ return startParallelShell('var mydb=db.getSiblingDB("' + dbname + '"); ' +
+ '(function() { ' +
+ ' for(var i=0; i < 10*1000; i++) { ' +
+ ' mydb.capped.insert({ x: i }); ' +
+ ' } ' +
+ '})();',
+ mongod.port);
});
// The following causes inconsistent locking order in the ticket system, depending on
@@ -42,6 +42,6 @@
// Wait for parallel shells to terminate and stop our replset.
shells.forEach((function(f) {
f();
- }));
+ }));
replTest.stopSet();
}());
diff --git a/jstests/noPassthrough/wt_index_option_defaults.js b/jstests/noPassthrough/wt_index_option_defaults.js
index 231162f207a..2516cc28d24 100644
--- a/jstests/noPassthrough/wt_index_option_defaults.js
+++ b/jstests/noPassthrough/wt_index_option_defaults.js
@@ -56,23 +56,20 @@
// Start a mongod with system-wide defaults for engine-specific index options.
var conn = MongoRunner.runMongod({
dbpath: dbpath,
- noCleanData: true,
- [engine + 'IndexConfigString']: systemWideConfigString,
+ noCleanData: true, [engine + 'IndexConfigString']: systemWideConfigString,
});
assert.neq(null, conn, 'mongod was unable to start up');
var testDB = conn.getDB('test');
- var cmdObj = {create: 'coll'};
+ var cmdObj = {
+ create: 'coll'
+ };
// Apply collection-wide defaults for engine-specific index options if any were
// specified.
if (hasIndexOptionDefaults) {
cmdObj.indexOptionDefaults = {
- storageEngine: {
- [engine]: {
- configString: collOptions.indexOptionDefaults
- }
- }
+ storageEngine: {[engine]: {configString: collOptions.indexOptionDefaults}}
};
}
assert.commandWorked(testDB.runCommand(cmdObj));
@@ -81,10 +78,12 @@
assert.commandWorked(testDB.coll.createIndex({a: 1}, {name: 'without_options'}));
// Create an index that specifies engine-specific index options.
- assert.commandWorked(testDB.coll.createIndex({b: 1}, {
- name: 'with_options',
- storageEngine: {[engine]: {configString: indexSpecificConfigString}}
- }));
+ assert.commandWorked(testDB.coll.createIndex(
+ {b: 1},
+ {
+ name: 'with_options',
+ storageEngine: {[engine]: {configString: indexSpecificConfigString}}
+ }));
var collStats = testDB.runCommand({collStats: 'coll'});
assert.commandWorked(collStats);
@@ -98,49 +97,57 @@
var indexSpec = getIndexSpecByName(testDB.coll, 'without_options');
assert(!indexSpec.hasOwnProperty('storageEngine'),
'no storage engine options should have been set in the index spec: ' +
- tojson(indexSpec));
+ tojson(indexSpec));
var creationString = indexDetails.without_options.creationString;
if (hasIndexOptionDefaults) {
- assert.eq(-1, creationString.indexOf(systemWideConfigString),
+ assert.eq(-1,
+ creationString.indexOf(systemWideConfigString),
'system-wide index option present in the creation string even though a ' +
- 'collection-wide option was specified: ' + creationString);
- assert.lte(0, creationString.indexOf(collectionWideConfigString),
+ 'collection-wide option was specified: ' + creationString);
+ assert.lte(0,
+ creationString.indexOf(collectionWideConfigString),
'collection-wide index option not present in the creation string: ' +
- creationString);
+ creationString);
} else {
- assert.lte(0, creationString.indexOf(systemWideConfigString),
+ assert.lte(0,
+ creationString.indexOf(systemWideConfigString),
'system-wide index option not present in the creation string: ' +
- creationString);
- assert.eq(-1, creationString.indexOf(collectionWideConfigString),
+ creationString);
+ assert.eq(-1,
+ creationString.indexOf(collectionWideConfigString),
'collection-wide index option present in creation string even though ' +
- 'it was not specified: ' + creationString);
+ 'it was not specified: ' + creationString);
}
- assert.eq(-1, creationString.indexOf(indexSpecificConfigString),
+ assert.eq(-1,
+ creationString.indexOf(indexSpecificConfigString),
'index-specific option present in creation string even though it was not' +
- ' specified: ' + creationString);
+ ' specified: ' + creationString);
}
function checkIndexWithOptions(indexDetails) {
var indexSpec = getIndexSpecByName(testDB.coll, 'with_options');
assert(indexSpec.hasOwnProperty('storageEngine'),
'storage engine options should have been set in the index spec: ' +
- tojson(indexSpec));
+ tojson(indexSpec));
assert.docEq({[engine]: {configString: indexSpecificConfigString}},
indexSpec.storageEngine,
engine + ' index options not present in the index spec');
var creationString = indexDetails.with_options.creationString;
- assert.eq(-1, creationString.indexOf(systemWideConfigString),
+ assert.eq(-1,
+ creationString.indexOf(systemWideConfigString),
'system-wide index option present in the creation string even though an ' +
- 'index-specific option was specified: ' + creationString);
- assert.eq(-1, creationString.indexOf(collectionWideConfigString),
+ 'index-specific option was specified: ' + creationString);
+ assert.eq(-1,
+ creationString.indexOf(collectionWideConfigString),
'system-wide index option present in the creation string even though an ' +
- 'index-specific option was specified: ' + creationString);
- assert.lte(0, creationString.indexOf(indexSpecificConfigString),
- 'index-specific option not present in the creation string: ' +
- creationString);
+ 'index-specific option was specified: ' + creationString);
+ assert.lte(
+ 0,
+ creationString.indexOf(indexSpecificConfigString),
+ 'index-specific option not present in the creation string: ' + creationString);
}
}
diff --git a/jstests/noPassthrough/wt_nojournal_fsync.js b/jstests/noPassthrough/wt_nojournal_fsync.js
index a6e4f5b07ff..e5be8b9659e 100644
--- a/jstests/noPassthrough/wt_nojournal_fsync.js
+++ b/jstests/noPassthrough/wt_nojournal_fsync.js
@@ -7,34 +7,34 @@
*/
function writeDataAndRestart(doFsync) {
-
jsTestLog("add some data");
- for (var i=0; i<100; i++) {
- conn.getDB(name).foo.insert({x:i});
+ for (var i = 0; i < 100; i++) {
+ conn.getDB(name).foo.insert({x: i});
}
if (doFsync) {
jsTestLog("run fsync on the node");
- assert.commandWorked(conn.getDB("admin").runCommand({fsync : 1}));
+ assert.commandWorked(conn.getDB("admin").runCommand({fsync: 1}));
}
jsTestLog("kill -9");
MongoRunner.stopMongod(conn, /*signal*/ 9);
jsTestLog("restart node");
- conn = MongoRunner.runMongod({restart: true,
- port: conn.port,
- cleanData: false,
- storageEngine: "wiredTiger",
- nojournal: ""});
+ conn = MongoRunner.runMongod({
+ restart: true,
+ port: conn.port,
+ cleanData: false,
+ storageEngine: "wiredTiger",
+ nojournal: ""
+ });
return conn;
}
// This test can only be run if the storageEngine is wiredTiger
if (jsTest.options().storageEngine && jsTest.options().storageEngine !== "wiredTiger") {
jsTestLog("Skipping test because storageEngine is not wiredTiger");
-}
-else {
+} else {
var name = "wt_nojournal_fsync";
jsTestLog("run mongod without journaling");
diff --git a/jstests/noPassthrough/wt_nojournal_repl.js b/jstests/noPassthrough/wt_nojournal_repl.js
index 5525db9526b..b9c58a516db 100644
--- a/jstests/noPassthrough/wt_nojournal_repl.js
+++ b/jstests/noPassthrough/wt_nojournal_repl.js
@@ -28,11 +28,17 @@ var contains = function(logLines, func) {
// This test can only be run if the storageEngine is wiredTiger
if (jsTest.options().storageEngine && jsTest.options().storageEngine !== "wiredTiger") {
jsTestLog("Skipping test because storageEngine is not wiredTiger");
-}
-else {
+} else {
var name = "wt_nojournal_repl";
- var replTest = new ReplSetTest( {name: name, nodes: 3, oplogSize: 2,
- nodeOptions: {nojournal: "", storageEngine: "wiredTiger",} } );
+ var replTest = new ReplSetTest({
+ name: name,
+ nodes: 3,
+ oplogSize: 2,
+ nodeOptions: {
+ nojournal: "",
+ storageEngine: "wiredTiger",
+ }
+ });
var nodes = replTest.startSet();
// make sure node 0 becomes primary initially
@@ -44,21 +50,21 @@ else {
var secondary1 = replTest.liveNodes.slaves[0];
jsTestLog("add some data to collection foo");
- for (var i=0; i<100; i++) {
- masterDB.foo.insert({x:i});
+ for (var i = 0; i < 100; i++) {
+ masterDB.foo.insert({x: i});
}
replTest.awaitReplication();
assert.eq(secondary1.getDB("test").foo.count(), 100);
jsTestLog("run fsync on the secondary to ensure it remains after restart");
- assert.commandWorked(secondary1.getDB("admin").runCommand({fsync : 1}));
+ assert.commandWorked(secondary1.getDB("admin").runCommand({fsync: 1}));
jsTestLog("kill -9 secondary 1");
MongoRunner.stopMongod(secondary1.port, /*signal*/ 9);
jsTestLog("add some data to a new collection bar");
- for (var i=0; i<100; i++) {
- masterDB.bar.insert({x:i});
+ for (var i = 0; i < 100; i++) {
+ masterDB.bar.insert({x: i});
}
jsTestLog("restart secondary 1 and let it catch up");
@@ -66,10 +72,11 @@ else {
replTest.awaitReplication();
// Test that the restarted secondary did NOT do an initial sync by checking the log
- var res = secondary1.adminCommand({getLog:"global"});
- assert(!contains(res.log, function(v) {
- return v.indexOf("initial sync") != -1;
- }));
+ var res = secondary1.adminCommand({getLog: "global"});
+ assert(!contains(res.log,
+ function(v) {
+ return v.indexOf("initial sync") != -1;
+ }));
jsTestLog("check data is in both collections");
assert.eq(secondary1.getDB("test").foo.count(), 100);
diff --git a/jstests/noPassthrough/wt_nojournal_skip_recovery.js b/jstests/noPassthrough/wt_nojournal_skip_recovery.js
index e742c2eabf9..fa0d32b93c4 100644
--- a/jstests/noPassthrough/wt_nojournal_skip_recovery.js
+++ b/jstests/noPassthrough/wt_nojournal_skip_recovery.js
@@ -83,9 +83,10 @@
var count = conn.getDB('test').nojournal.count({journaled: {$exists: true}});
assert.lte(90, count, 'missing documents that were present in the last checkpoint');
- assert.gte(90, count,
+ assert.gte(90,
+ count,
'journaled write operations since the last checkpoint should not have been' +
- ' replayed');
+ ' replayed');
MongoRunner.stopMongod(conn);
})();
diff --git a/jstests/noPassthrough/wt_nojournal_toggle.js b/jstests/noPassthrough/wt_nojournal_toggle.js
index 20236e47459..8973c37b2f5 100644
--- a/jstests/noPassthrough/wt_nojournal_toggle.js
+++ b/jstests/noPassthrough/wt_nojournal_toggle.js
@@ -29,9 +29,10 @@
}
};
- return '(' + insertFunction.toString().replace('__checkpoint_template_placeholder__',
- checkpoint.toString()) +
- ')();';
+ return '(' +
+ insertFunction.toString().replace('__checkpoint_template_placeholder__',
+ checkpoint.toString()) +
+ ')();';
}
function runTest(options) {
@@ -77,14 +78,15 @@
assert.neq(null, conn, 'mongod was unable to restart after receiving a SIGKILL');
var testDB = conn.getDB('test');
- assert.eq(1, testDB.nojournal.count({final: true}),
- 'final journaled write was not found');
- assert.lte(100, testDB.nojournal.count({journaled: {$exists: true}}),
+ assert.eq(1, testDB.nojournal.count({final: true}), 'final journaled write was not found');
+ assert.lte(100,
+ testDB.nojournal.count({journaled: {$exists: true}}),
'journaled write operations since the last checkpoint were not replayed');
var initialNumLogWrites = testDB.serverStatus().wiredTiger.log['log write operations'];
assert.writeOK(testDB.nojournal.insert({a: 1}, {writeConcern: {fsync: true}}));
- assert.eq(initialNumLogWrites, testDB.serverStatus().wiredTiger.log['log write operations'],
+ assert.eq(initialNumLogWrites,
+ testDB.serverStatus().wiredTiger.log['log write operations'],
'journaling is still enabled even though --nojournal was specified');
MongoRunner.stopMongod(conn);
@@ -102,7 +104,8 @@
initialNumLogWrites = testDB.serverStatus().wiredTiger.log['log write operations'];
assert.writeOK(testDB.nojournal.insert({a: 1}, {writeConcern: {fsync: true}}));
- assert.lt(initialNumLogWrites, testDB.serverStatus().wiredTiger.log['log write operations'],
+ assert.lt(initialNumLogWrites,
+ testDB.serverStatus().wiredTiger.log['log write operations'],
'journaling is still disabled even though --journal was specified');
MongoRunner.stopMongod(conn);
diff --git a/jstests/noPassthroughWithMongod/apply_ops_errors.js b/jstests/noPassthroughWithMongod/apply_ops_errors.js
index 8cc5a8ad4cb..31353523810 100644
--- a/jstests/noPassthroughWithMongod/apply_ops_errors.js
+++ b/jstests/noPassthroughWithMongod/apply_ops_errors.js
@@ -19,19 +19,11 @@
// Scenario 1: only one operation
assert.eq(0, coll.find().count(), "test collection not empty");
- coll.ensureIndex({x:1},{unique:true});
- coll.insert({ _id: 1, x: "init" });
+ coll.ensureIndex({x: 1}, {unique: true});
+ coll.insert({_id: 1, x: "init"});
- var res = db.runCommand({ applyOps: [
- {
- op: "i",
- ns: coll.getFullName(),
- o: {
- _id: 2,
- x: "init"
- }
- },
- ]});
+ var res =
+ db.runCommand({applyOps: [{op: "i", ns: coll.getFullName(), o: {_id: 2, x: "init"}}, ]});
assert.eq(1, res.applied);
assert(res.code);
@@ -43,35 +35,16 @@
// Scenario 2: Three operations, first two should run, second should fail.
assert.eq(0, coll.find().count(), "test collection not empty");
- coll.ensureIndex({x:1},{unique:true});
- coll.insert({ _id: 1, x: "init" });
-
- var res = db.runCommand({ applyOps: [
- {
- op: "i",
- ns: coll.getFullName(),
- o: {
- _id: 3,
- x: "not init"
- }
- },
- {
- op: "i",
- ns: coll.getFullName(),
- o: {
- _id: 4,
- x: "init"
- }
- },
- {
- op: "i",
- ns: coll.getFullName(),
- o: {
- _id: 5,
- x: "not init again"
- }
- },
- ]});
+ coll.ensureIndex({x: 1}, {unique: true});
+ coll.insert({_id: 1, x: "init"});
+
+ var res = db.runCommand({
+ applyOps: [
+ {op: "i", ns: coll.getFullName(), o: {_id: 3, x: "not init"}},
+ {op: "i", ns: coll.getFullName(), o: {_id: 4, x: "init"}},
+ {op: "i", ns: coll.getFullName(), o: {_id: 5, x: "not init again"}},
+ ]
+ });
assert.eq(2, res.applied);
assert(res.code);
diff --git a/jstests/noPassthroughWithMongod/background.js b/jstests/noPassthroughWithMongod/background.js
index 742390b54c7..7624d24471c 100644
--- a/jstests/noPassthroughWithMongod/background.js
+++ b/jstests/noPassthroughWithMongod/background.js
@@ -1,16 +1,16 @@
// background indexing test during inserts.
-assert( db.getName() == "test" );
+assert(db.getName() == "test");
t = db.bg1;
t.drop();
-var a = new Mongo( db.getMongo().host ).getDB( db.getName() );
+var a = new Mongo(db.getMongo().host).getDB(db.getName());
var bulk = t.initializeUnorderedBulkOp();
-for( var i = 0; i < 100000; i++ ) {
- bulk.insert({ y: 'aaaaaaaaaaaa', i: i });
- if( i % 10000 == 0 ) {
+for (var i = 0; i < 100000; i++) {
+ bulk.insert({y: 'aaaaaaaaaaaa', i: i});
+ if (i % 10000 == 0) {
assert.writeOK(bulk.execute());
bulk = t.initializeUnorderedBulkOp();
print(i);
@@ -18,14 +18,14 @@ for( var i = 0; i < 100000; i++ ) {
}
// start bg indexing
-a.bg1.ensureIndex({i:1}, {name:"i_1", background:true});
+a.bg1.ensureIndex({i: 1}, {name: "i_1", background: true});
// add more data
bulk = t.initializeUnorderedBulkOp();
-for( var i = 0; i < 100000; i++ ) {
- bulk.insert({ i: i });
- if( i % 10000 == 0 ) {
- printjson( db.currentOp() );
+for (var i = 0; i < 100000; i++) {
+ bulk.insert({i: i});
+ if (i % 10000 == 0) {
+ printjson(db.currentOp());
assert.writeOK(bulk.execute());
bulk = t.initializeUnorderedBulkOp();
print(i);
@@ -34,14 +34,14 @@ for( var i = 0; i < 100000; i++ ) {
assert.writeOK(bulk.execute());
-printjson( db.currentOp() );
+printjson(db.currentOp());
-for( var i = 0; i < 40; i++ ) {
- if( db.currentOp().inprog.length == 0 )
+for (var i = 0; i < 40; i++) {
+ if (db.currentOp().inprog.length == 0)
break;
print("waiting");
sleep(1000);
}
var idx = t.getIndexes();
-assert( idx[1].key.i == 1 );
+assert(idx[1].key.i == 1);
diff --git a/jstests/noPassthroughWithMongod/bench_test_crud_commands.js b/jstests/noPassthroughWithMongod/bench_test_crud_commands.js
index 25060999d59..14ee7d0fdb7 100644
--- a/jstests/noPassthroughWithMongod/bench_test_crud_commands.js
+++ b/jstests/noPassthroughWithMongod/bench_test_crud_commands.js
@@ -7,9 +7,11 @@
assert.commandWorked(coll.getDB().createCollection(coll.getName()));
function makeDocument(docSize) {
- var doc = { "fieldName":"" };
+ var doc = {
+ "fieldName": ""
+ };
var longString = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx";
- while(Object.bsonsize(doc) < docSize) {
+ while (Object.bsonsize(doc) < docSize) {
if (Object.bsonsize(doc) < docSize - longString.length) {
doc.fieldName += longString;
} else {
@@ -20,7 +22,12 @@
}
function executeBenchRun(benchOps) {
- var benchArgs = {ops: benchOps, parallel: 2, seconds: 1, host: db.getMongo().host};
+ var benchArgs = {
+ ops: benchOps,
+ parallel: 2,
+ seconds: 1,
+ host: db.getMongo().host
+ };
if (jsTest.options().auth) {
benchArgs['db'] = 'admin';
benchArgs['username'] = jsTest.options().adminUser;
@@ -32,14 +39,16 @@
function testInsert(docs, writeCmd, wc) {
coll.drop();
- var res = executeBenchRun([{ns: coll.getFullName(),
- op: "insert",
- doc: docs,
- writeCmd: writeCmd,
- writeConcern : wc}]);
+ var res = executeBenchRun([{
+ ns: coll.getFullName(),
+ op: "insert",
+ doc: docs,
+ writeCmd: writeCmd,
+ writeConcern: wc
+ }]);
assert.gt(coll.count(), 0);
- assert.eq(coll.findOne({}, {_id:0}), docs[0]);
+ assert.eq(coll.findOne({}, {_id: 0}), docs[0]);
}
function testFind(readCmd) {
@@ -48,11 +57,13 @@
assert.writeOK(coll.insert({}));
}
- var res = executeBenchRun([{ns: coll.getFullName(),
- op: "find",
- query: {},
- batchSize: NumberInt(10),
- readCmd: readCmd}]);
+ var res = executeBenchRun([{
+ ns: coll.getFullName(),
+ op: "find",
+ query: {},
+ batchSize: NumberInt(10),
+ readCmd: readCmd
+ }]);
assert.gt(res.query, 0, tojson(res));
}
@@ -62,10 +73,8 @@
assert.writeOK(coll.insert({}));
}
- var res = executeBenchRun([{ns: coll.getFullName(),
- op: "findOne",
- query: {},
- readCmd: readCmd}]);
+ var res = executeBenchRun(
+ [{ns: coll.getFullName(), op: "findOne", query: {}, readCmd: readCmd}]);
assert.gt(res.findOne, 0, tojson(res));
}
@@ -78,9 +87,9 @@
testInsert([bigDoc], writeCmd, {});
testInsert(docs, writeCmd, {});
- testInsert(docs, writeCmd, {"writeConcern" : {"w" : "majority"}});
- testInsert(docs, writeCmd, {"writeConcern" : {"w" : 1, "j": false}});
- testInsert(docs, writeCmd, {"writeConcern" : {"j" : true}});
+ testInsert(docs, writeCmd, {"writeConcern": {"w": "majority"}});
+ testInsert(docs, writeCmd, {"writeConcern": {"w": 1, "j": false}});
+ testInsert(docs, writeCmd, {"writeConcern": {"j": true}});
}
testWriteConcern(false);
diff --git a/jstests/noPassthroughWithMongod/benchrun_substitution.js b/jstests/noPassthroughWithMongod/benchrun_substitution.js
index afc79b4cc49..ddcef69d73d 100644
--- a/jstests/noPassthroughWithMongod/benchrun_substitution.js
+++ b/jstests/noPassthroughWithMongod/benchrun_substitution.js
@@ -2,75 +2,73 @@ function benchrun_sub_insert(use_write_command) {
t = db.benchrun_sub;
t.drop();
var offset = 10000;
- ops = [{op: "insert", ns: "test.benchrun_sub",
- doc: {x: { "#RAND_INT" : [ 0, 100 ] },
- curDate: { "#CUR_DATE" : 0 } ,
- futureDate: { "#CUR_DATE" : offset} ,
- pastDate: { "#CUR_DATE" : (0 - offset) } },
- writeCmd: use_write_command,
- }];
+ ops = [{
+ op: "insert",
+ ns: "test.benchrun_sub",
+ doc: {
+ x: {"#RAND_INT": [0, 100]},
+ curDate: {"#CUR_DATE": 0},
+ futureDate: {"#CUR_DATE": offset},
+ pastDate: {"#CUR_DATE": (0 - offset)}
+ },
+ writeCmd: use_write_command,
+ }];
- res = benchRun({parallel: 1,
- seconds: 10,
- ops : ops,
- host: db.getMongo().host});
+ res = benchRun({parallel: 1, seconds: 10, ops: ops, host: db.getMongo().host});
assert.gt(res.insert, 0);
t.find().forEach(function(doc) {
- var field = doc.x;
- assert.gte(field, 0);
- assert.lt(field, 100);
- assert.lt(doc.pastDate, doc.curDate);
- assert.lt(doc.curDate, doc.futureDate);
- }
- );
+ var field = doc.x;
+ assert.gte(field, 0);
+ assert.lt(field, 100);
+ assert.lt(doc.pastDate, doc.curDate);
+ assert.lt(doc.curDate, doc.futureDate);
+ });
}
function benchrun_sub_update(use_write_command) {
t = db.benchrun_sub;
t.drop();
- ops = [{op: "update", ns: "test.benchrun_sub",
- query: {x: {"#RAND_INT": [0, 100]}},
- update: {$inc : {x : 1}},
- writeCmd: use_write_command}];
+ ops = [{
+ op: "update",
+ ns: "test.benchrun_sub",
+ query: {x: {"#RAND_INT": [0, 100]}},
+ update: {$inc: {x: 1}},
+ writeCmd: use_write_command
+ }];
for (var i = 0; i < 100; ++i) {
t.insert({x: i});
}
- res = benchRun({parallel: 1,
- seconds: 10,
- ops: ops,
- host: db.getMongo().host});
+ res = benchRun({parallel: 1, seconds: 10, ops: ops, host: db.getMongo().host});
var field_sum = 0;
t.find().forEach(function(doc) {
- field_sum += doc.x;
- }
- );
+ field_sum += doc.x;
+ });
- assert.gt(field_sum, 4950); // 1 + 2 + .. 99 = 4950
+ assert.gt(field_sum, 4950); // 1 + 2 + .. 99 = 4950
}
function benchrun_sub_remove(use_write_command) {
t = db.benchrun_sub;
t.drop();
- ops = [{op: "remove", ns: "test.benchrun_sub",
- query: {x: {"#RAND_INT": [0, 100]}},
- writeCmd: use_write_command,
- }];
+ ops = [{
+ op: "remove",
+ ns: "test.benchrun_sub",
+ query: {x: {"#RAND_INT": [0, 100]}},
+ writeCmd: use_write_command,
+ }];
for (var i = 0; i < 100; ++i) {
t.insert({x: i});
}
- res = benchRun({parallel: 1,
- seconds: 10,
- ops: ops,
- host: db.getMongo().host});
+ res = benchRun({parallel: 1, seconds: 10, ops: ops, host: db.getMongo().host});
assert.eq(t.count(), 0);
}
diff --git a/jstests/noPassthroughWithMongod/btreedel.js b/jstests/noPassthroughWithMongod/btreedel.js
index 89af6aa7d5d..13c3bb3b685 100644
--- a/jstests/noPassthroughWithMongod/btreedel.js
+++ b/jstests/noPassthroughWithMongod/btreedel.js
@@ -5,13 +5,13 @@ t.remove({});
var bulk = t.initializeUnorderedBulkOp();
for (var i = 0; i < 1000000; i++) {
- bulk.insert({ _id: i, x: 'a b' });
+ bulk.insert({_id: i, x: 'a b'});
}
assert.writeOK(bulk.execute());
print("1 insert done count: " + t.count());
-var c = t.find({y:null}).sort({ _id: 1 });
+var c = t.find({y: null}).sort({_id: 1});
for (var j = 0; j < 400000; j++) {
c.next();
if (j % 200000 == 0)
@@ -19,12 +19,12 @@ for (var j = 0; j < 400000; j++) {
}
printjson(c.next());
-var d = t.find({ _id: { $gt: 300000} }).sort({ _id: -1 });
+var d = t.find({_id: {$gt: 300000}}).sort({_id: -1});
d.next();
print("2");
-t.remove({ _id: { $gt: 200000, $lt: 600000} });
+t.remove({_id: {$gt: 200000, $lt: 600000}});
print("3");
print(d.hasNext());
diff --git a/jstests/noPassthroughWithMongod/bulk_api_limits.js b/jstests/noPassthroughWithMongod/bulk_api_limits.js
index b7bab04f6d1..3dada22c519 100644
--- a/jstests/noPassthroughWithMongod/bulk_api_limits.js
+++ b/jstests/noPassthroughWithMongod/bulk_api_limits.js
@@ -17,7 +17,7 @@ var executeTestsUnordered = function() {
// Create unique index
coll.dropIndexes();
coll.remove({});
- coll.ensureIndex({a : 1}, {unique : true});
+ coll.ensureIndex({a: 1}, {unique: true});
/**
* Fail during batch construction due to single document > maxBSONSize
@@ -25,18 +25,19 @@ var executeTestsUnordered = function() {
// Set up a giant string to blow through the max message size
var hugeString = "";
// Create it bigger than 16MB
- for(var i = 0; i < (1024 * 1100); i++) {
+ for (var i = 0; i < (1024 * 1100); i++) {
hugeString = hugeString + "1234567890123456";
}
// Set up the batch
var batch = coll.initializeUnorderedBulkOp();
- batch.insert({b:1, a:1});
+ batch.insert({b: 1, a: 1});
// Should fail on insert due to string being to big
try {
batch.insert({string: hugeString});
assert(false);
- } catch(err) {}
+ } catch (err) {
+ }
// Create unique index
coll.dropIndexes();
@@ -48,18 +49,18 @@ var executeTestsUnordered = function() {
// Set up a giant string to blow through the max message size
var hugeString = "";
// Create 4 MB strings to test splitting
- for(var i = 0; i < (1024 * 256); i++) {
+ for (var i = 0; i < (1024 * 256); i++) {
hugeString = hugeString + "1234567890123456";
}
// Insert the string a couple of times, should force split into multiple batches
var batch = coll.initializeUnorderedBulkOp();
- batch.insert({a:1, b: hugeString});
- batch.insert({a:2, b: hugeString});
- batch.insert({a:3, b: hugeString});
- batch.insert({a:4, b: hugeString});
- batch.insert({a:5, b: hugeString});
- batch.insert({a:6, b: hugeString});
+ batch.insert({a: 1, b: hugeString});
+ batch.insert({a: 2, b: hugeString});
+ batch.insert({a: 3, b: hugeString});
+ batch.insert({a: 4, b: hugeString});
+ batch.insert({a: 5, b: hugeString});
+ batch.insert({a: 6, b: hugeString});
var result = batch.execute();
printjson(JSON.stringify(result));
@@ -81,18 +82,19 @@ var executeTestsOrdered = function() {
// Set up a giant string to blow through the max message size
var hugeString = "";
// Create it bigger than 16MB
- for(var i = 0; i < (1024 * 1100); i++) {
+ for (var i = 0; i < (1024 * 1100); i++) {
hugeString = hugeString + "1234567890123456";
}
// Set up the batch
var batch = coll.initializeOrderedBulkOp();
- batch.insert({b:1, a:1});
+ batch.insert({b: 1, a: 1});
// Should fail on insert due to string being to big
try {
batch.insert({string: hugeString});
assert(false);
- } catch(err) {}
+ } catch (err) {
+ }
// Create unique index
coll.dropIndexes();
@@ -104,18 +106,18 @@ var executeTestsOrdered = function() {
// Set up a giant string to blow through the max message size
var hugeString = "";
// Create 4 MB strings to test splitting
- for(var i = 0; i < (1024 * 256); i++) {
+ for (var i = 0; i < (1024 * 256); i++) {
hugeString = hugeString + "1234567890123456";
}
// Insert the string a couple of times, should force split into multiple batches
var batch = coll.initializeOrderedBulkOp();
- batch.insert({a:1, b: hugeString});
- batch.insert({a:2, b: hugeString});
- batch.insert({a:3, b: hugeString});
- batch.insert({a:4, b: hugeString});
- batch.insert({a:5, b: hugeString});
- batch.insert({a:6, b: hugeString});
+ batch.insert({a: 1, b: hugeString});
+ batch.insert({a: 2, b: hugeString});
+ batch.insert({a: 3, b: hugeString});
+ batch.insert({a: 4, b: hugeString});
+ batch.insert({a: 5, b: hugeString});
+ batch.insert({a: 6, b: hugeString});
var result = batch.execute();
// Basic properties check
@@ -127,14 +129,14 @@ var executeTestsOrdered = function() {
coll.remove({});
};
-var buildVersion = parseInt(db.runCommand({buildInfo:1}).versionArray.slice(0, 3).join(""), 10);
+var buildVersion = parseInt(db.runCommand({buildInfo: 1}).versionArray.slice(0, 3).join(""), 10);
// Save the existing useWriteCommands function
var _useWriteCommands = coll.getMongo().useWriteCommands;
//
// Only execute write command tests if we have > 2.5.5 otherwise
// execute the down converted version
-if(buildVersion >= 255) {
+if (buildVersion >= 255) {
// Force the use of useWriteCommands
coll._mongo.useWriteCommands = function() {
return true;
diff --git a/jstests/noPassthroughWithMongod/capped4.js b/jstests/noPassthroughWithMongod/capped4.js
index f1371e8fa00..039f2557866 100644
--- a/jstests/noPassthroughWithMongod/capped4.js
+++ b/jstests/noPassthroughWithMongod/capped4.js
@@ -1,28 +1,28 @@
t = db.jstests_capped4;
t.drop();
-db.createCollection( "jstests_capped4", {size:1000,capped:true} );
-t.ensureIndex( { i: 1 } );
-for( i = 0; i < 20; ++i ) {
- t.save( { i : i } );
+db.createCollection("jstests_capped4", {size: 1000, capped: true});
+t.ensureIndex({i: 1});
+for (i = 0; i < 20; ++i) {
+ t.save({i: i});
}
-c = t.find().sort( { $natural: -1 } ).limit( 2 );
+c = t.find().sort({$natural: -1}).limit(2);
c.next();
c.next();
-d = t.find().sort( { i: -1 } ).limit( 2 );
+d = t.find().sort({i: -1}).limit(2);
d.next();
d.next();
-for( i = 20; t.findOne( { i:19 } ); ++i ) {
- t.save( { i : i } );
+for (i = 20; t.findOne({i: 19}); ++i) {
+ t.save({i: i});
}
-//assert( !t.findOne( { i : 19 } ), "A" );
-assert( !c.hasNext(), "B" );
-assert( !d.hasNext(), "C" );
-assert( t.find().sort( { i : 1 } ).hint( { i : 1 } ).toArray().length > 10, "D" );
+// assert( !t.findOne( { i : 19 } ), "A" );
+assert(!c.hasNext(), "B");
+assert(!d.hasNext(), "C");
+assert(t.find().sort({i: 1}).hint({i: 1}).toArray().length > 10, "D");
-assert( t.findOne( { i : i - 1 } ), "E" );
-var res = assert.writeError(t.remove( { i : i - 1 } ));
-assert( res.getWriteError().errmsg.indexOf( "capped" ) >= 0, "F" );
+assert(t.findOne({i: i - 1}), "E");
+var res = assert.writeError(t.remove({i: i - 1}));
+assert(res.getWriteError().errmsg.indexOf("capped") >= 0, "F");
-assert( t.validate().valid, "G" );
+assert(t.validate().valid, "G");
diff --git a/jstests/noPassthroughWithMongod/capped_truncate.js b/jstests/noPassthroughWithMongod/capped_truncate.js
index 8408ea7294b..c35318a6649 100644
--- a/jstests/noPassthroughWithMongod/capped_truncate.js
+++ b/jstests/noPassthroughWithMongod/capped_truncate.js
@@ -11,48 +11,47 @@
'use strict';
db.capped_truncate.drop();
- assert.commandWorked(db.runCommand({ create: "capped_truncate",
- capped: true,
- size: 1000,
- autoIndexId: true }));
+ assert.commandWorked(
+ db.runCommand({create: "capped_truncate", capped: true, size: 1000, autoIndexId: true}));
var t = db.capped_truncate;
// It is an error to remove a non-positive number of documents.
- assert.commandFailed(db.runCommand({ captrunc: "capped_truncate", n: -1 }),
+ assert.commandFailed(db.runCommand({captrunc: "capped_truncate", n: -1}),
"captrunc didn't return an error when attempting to remove a negative " +
- "number of documents");
- assert.commandFailed(db.runCommand({ captrunc: "capped_truncate", n: 0 }),
+ "number of documents");
+ assert.commandFailed(db.runCommand({captrunc: "capped_truncate", n: 0}),
"captrunc didn't return an error when attempting to remove 0 documents");
for (var j = 1; j <= 10; j++) {
- assert.writeOK(t.insert({x:j}));
+ assert.writeOK(t.insert({x: j}));
}
// It is an error to try and remove more documents than what exist in the capped collection.
- assert.commandFailed(db.runCommand({ captrunc: "capped_truncate", n: 20 }),
+ assert.commandFailed(db.runCommand({captrunc: "capped_truncate", n: 20}),
"captrunc didn't return an error when attempting to remove more" +
- " documents than what the collection contains");
+ " documents than what the collection contains");
- assert.commandWorked(db.runCommand({ captrunc: "capped_truncate", n: 5, inc: false }));
+ assert.commandWorked(db.runCommand({captrunc: "capped_truncate", n: 5, inc: false}));
assert.eq(5, t.count(), "wrong number of documents in capped collection after truncate");
assert.eq(5, t.distinct("_id").length, "wrong number of entries in _id index after truncate");
- var last = t.find({},{_id:1}).sort({_id:-1}).next();
- assert.neq(null, t.findOne({_id: last._id}),
+ var last = t.find({}, {_id: 1}).sort({_id: -1}).next();
+ assert.neq(null,
+ t.findOne({_id: last._id}),
tojson(last) + " is in _id index, but not in capped collection after truncate");
// It is an error to run the captrunc command on a nonexistent collection.
- assert.commandFailed(db.runCommand({ captrunc: "nonexistent", n: 1 }),
+ assert.commandFailed(db.runCommand({captrunc: "nonexistent", n: 1}),
"captrunc didn't return an error for a nonexistent collection");
// It is an error to run the captrunc command on a non-capped collection.
var collName = "noncapped";
db[collName].drop();
- assert.commandWorked(db.runCommand({ create: collName, capped: false }));
+ assert.commandWorked(db.runCommand({create: collName, capped: false}));
for (var j = 1; j <= 10; j++) {
- assert.writeOK(db[collName].insert({x:j}));
+ assert.writeOK(db[collName].insert({x: j}));
}
- assert.commandFailed(db.runCommand({ captrunc: collName, n: 5 }),
+ assert.commandFailed(db.runCommand({captrunc: collName, n: 5}),
"captrunc didn't return an error for a non-capped collection");
})();
diff --git a/jstests/noPassthroughWithMongod/clonecollection.js b/jstests/noPassthroughWithMongod/clonecollection.js
index 022ef58f595..a3633a12e58 100644
--- a/jstests/noPassthroughWithMongod/clonecollection.js
+++ b/jstests/noPassthroughWithMongod/clonecollection.js
@@ -6,49 +6,53 @@ var toMongod = MongoRunner.runMongod({bind_ip: "127.0.0.1"});
var f = fromMongod.getDB(baseName);
var t = toMongod.getDB(baseName);
-for( i = 0; i < 1000; ++i ) {
- f.a.save( { i: i } );
+for (i = 0; i < 1000; ++i) {
+ f.a.save({i: i});
}
-assert.eq( 1000, f.a.find().count() , "A1" );
+assert.eq(1000, f.a.find().count(), "A1");
-assert.commandWorked( t.cloneCollection( "localhost:" + fromMongod.port, "a" ) );
-assert.eq( 1000, t.a.find().count() , "A2" );
+assert.commandWorked(t.cloneCollection("localhost:" + fromMongod.port, "a"));
+assert.eq(1000, t.a.find().count(), "A2");
t.a.drop();
-assert.commandWorked( t.cloneCollection( "localhost:" + fromMongod.port, "a", { i: { $gte: 10, $lt: 20 } } ) );
-assert.eq( 10, t.a.find().count() , "A3" );
+assert.commandWorked(t.cloneCollection("localhost:" + fromMongod.port,
+ "a",
+ {i: {$gte: 10, $lt: 20}}));
+assert.eq(10, t.a.find().count(), "A3");
t.a.drop();
-assert.eq( 0, t.a.getIndexes().length, "prep 2");
+assert.eq(0, t.a.getIndexes().length, "prep 2");
-f.a.ensureIndex( { i: 1 } );
-assert.eq( 2, f.a.getIndexes().length, "expected index missing" );
-assert.commandWorked( t.cloneCollection( "localhost:" + fromMongod.port, "a" ) );
-if ( t.a.getIndexes().length != 2 ) {
- printjson( t.a.getIndexes());
+f.a.ensureIndex({i: 1});
+assert.eq(2, f.a.getIndexes().length, "expected index missing");
+assert.commandWorked(t.cloneCollection("localhost:" + fromMongod.port, "a"));
+if (t.a.getIndexes().length != 2) {
+ printjson(t.a.getIndexes());
}
-assert.eq( 2, t.a.getIndexes().length, "expected index missing" );
+assert.eq(2, t.a.getIndexes().length, "expected index missing");
// Verify index works
-x = t.a.find( { i: 50 } ).hint( { i: 1 } ).explain("executionStats");
-printjson( x );
-assert.eq( 1, x.executionStats.nReturned , "verify 1" );
-assert.eq( 1, t.a.find( { i: 50 } ).hint( { i: 1 } ).toArray().length, "match length did not match expected" );
+x = t.a.find({i: 50}).hint({i: 1}).explain("executionStats");
+printjson(x);
+assert.eq(1, x.executionStats.nReturned, "verify 1");
+assert.eq(1,
+ t.a.find({i: 50}).hint({i: 1}).toArray().length,
+ "match length did not match expected");
// Check that capped-ness is preserved on clone
f.a.drop();
t.a.drop();
-f.createCollection( "a", {capped:true,size:1000} );
-assert( f.a.isCapped() );
-assert.commandWorked( t.cloneCollection( "localhost:" + fromMongod.port, "a" ) );
-assert( t.a.isCapped(), "cloned collection not capped" );
+f.createCollection("a", {capped: true, size: 1000});
+assert(f.a.isCapped());
+assert.commandWorked(t.cloneCollection("localhost:" + fromMongod.port, "a"));
+assert(t.a.isCapped(), "cloned collection not capped");
// Check that cloning to "system.profile" is disallowed.
f.a.drop();
f.system.profile.drop();
-assert.commandWorked( f.setProfilingLevel( 2 ) );
-assert.writeOK( f.a.insert( {} ) );
-assert.gt( f.system.profile.count(), 0 );
+assert.commandWorked(f.setProfilingLevel(2));
+assert.writeOK(f.a.insert({}));
+assert.gt(f.system.profile.count(), 0);
t.system.profile.drop();
-assert.commandFailed( t.cloneCollection( "localhost:" + fromMongod.port, "system.profile" ) );
+assert.commandFailed(t.cloneCollection("localhost:" + fromMongod.port, "system.profile"));
diff --git a/jstests/noPassthroughWithMongod/connections_opened.js b/jstests/noPassthroughWithMongod/connections_opened.js
index e3f25b11fce..2ec192ed1e2 100644
--- a/jstests/noPassthroughWithMongod/connections_opened.js
+++ b/jstests/noPassthroughWithMongod/connections_opened.js
@@ -8,54 +8,55 @@ var mongo = MongoRunner.runMongod({});
var db = mongo.getDB("test");
var availableConnections = db.serverStatus().connections.available;
-if ( availableConnections < ( numPerTypeToCreate * 10 ) ) {
- numPerTypeToCreate = Math.floor( availableConnections / 10 );
+if (availableConnections < (numPerTypeToCreate * 10)) {
+ numPerTypeToCreate = Math.floor(availableConnections / 10);
}
-print( "numPerTypeToCreate: " + numPerTypeToCreate );
+print("numPerTypeToCreate: " + numPerTypeToCreate);
var testDB = 'connectionsOpenedTest';
var signalCollection = 'keepRunning';
function createPersistentConnection() {
assert.soon(function() {
- try {
- return new Mongo(db.getMongo().host);
- } catch (x) {
- return false;
- }}, "Timed out waiting for persistent connection to connect", 30000, 5000);
+ try {
+ return new Mongo(db.getMongo().host);
+ } catch (x) {
+ return false;
+ }
+ }, "Timed out waiting for persistent connection to connect", 30000, 5000);
}
function createTemporaryConnection() {
// Retry connecting until you are successful
- var pollString = "var conn = null;" +
- "assert.soon(function() {" +
- "try { conn = new Mongo(\"" + db.getMongo().host + "\"); return conn" +
- "} catch (x) {return false;}}, " +
+ var pollString = "var conn = null;" + "assert.soon(function() {" + "try { conn = new Mongo(\"" +
+ db.getMongo().host + "\"); return conn" + "} catch (x) {return false;}}, " +
"\"Timed out waiting for temporary connection to connect\", 30000, 5000);";
// Poll the signal collection until it is told to terminate.
- pollString += "assert.soon(function() {"
- + "return conn.getDB('" + testDB + "').getCollection('" + signalCollection + "')"
- + ".findOne().stop;}, \"Parallel shell never told to terminate\", 10 * 60000);";
+ pollString += "assert.soon(function() {" + "return conn.getDB('" + testDB +
+ "').getCollection('" + signalCollection + "')" +
+ ".findOne().stop;}, \"Parallel shell never told to terminate\", 10 * 60000);";
return startParallelShell(pollString, null, true);
}
function waitForConnections(expectedCurrentConnections, expectedTotalConnections) {
- assert.soon(function() {
- var currentConnInfo = db.serverStatus().connections;
- return (expectedCurrentConnections == currentConnInfo.current) &&
- (expectedTotalConnections, currentConnInfo.totalCreated);
- },
- {toString: function() {
- return "Incorrect connection numbers. Expected " + expectedCurrentConnections +
- " current connections and " + expectedTotalConnections + " total" +
- " connections. Connection info from serverStatus: " +
- tojson(db.serverStatus().connections); } },
- 5 * 60000);
-
+ assert.soon(
+ function() {
+ var currentConnInfo = db.serverStatus().connections;
+ return (expectedCurrentConnections == currentConnInfo.current) &&
+ (expectedTotalConnections, currentConnInfo.totalCreated);
+ },
+ {
+ toString: function() {
+ return "Incorrect connection numbers. Expected " + expectedCurrentConnections +
+ " current connections and " + expectedTotalConnections + " total" +
+ " connections. Connection info from serverStatus: " +
+ tojson(db.serverStatus().connections);
+ }
+ },
+ 5 * 60000);
}
-
var originalConnInfo = db.serverStatus().connections;
assert.gt(originalConnInfo.current, 0);
assert.gt(originalConnInfo.totalCreated, 0);
@@ -72,7 +73,7 @@ waitForConnections(originalConnInfo.current + numPerTypeToCreate,
jsTestLog("Creating temporary connections");
db.getSiblingDB(testDB).dropDatabase();
-db.getSiblingDB(testDB).getCollection(signalCollection).insert({stop:false});
+db.getSiblingDB(testDB).getCollection(signalCollection).insert({stop: false});
var tempConns = [];
for (var i = 0; i < numPerTypeToCreate; i++) {
@@ -80,21 +81,21 @@ for (var i = 0; i < numPerTypeToCreate; i++) {
}
jsTestLog("Testing that temporary connections increased the current and totalCreated counters");
-waitForConnections(originalConnInfo.current + numPerTypeToCreate*2,
- originalConnInfo.totalCreated + numPerTypeToCreate*2);
+waitForConnections(originalConnInfo.current + numPerTypeToCreate * 2,
+ originalConnInfo.totalCreated + numPerTypeToCreate * 2);
jsTestLog("Waiting for all temporary connections to be closed");
// Notify waiting parallel shells to terminate, causing the connection count to go back down.
-db.getSiblingDB(testDB).getCollection(signalCollection).update({}, {$set : {stop:true}});
+db.getSiblingDB(testDB).getCollection(signalCollection).update({}, {$set: {stop: true}});
for (var i = 0; i < tempConns.length; i++) {
- tempConns[i](); // wait on parallel shell to terminate
+ tempConns[i](); // wait on parallel shell to terminate
}
jsTestLog("Testing that current connections counter went down after temporary connections closed");
waitForConnections(originalConnInfo.current + numPerTypeToCreate,
- originalConnInfo.totalCreated + numPerTypeToCreate*2);
+ originalConnInfo.totalCreated + numPerTypeToCreate * 2);
persistent = null;
gc();
-MongoRunner.stopMongod( mongo );
+MongoRunner.stopMongod(mongo);
diff --git a/jstests/noPassthroughWithMongod/create_indexes_shell_helper.js b/jstests/noPassthroughWithMongod/create_indexes_shell_helper.js
index 8aa0c79b5a6..f9f9f7b9f06 100644
--- a/jstests/noPassthroughWithMongod/create_indexes_shell_helper.js
+++ b/jstests/noPassthroughWithMongod/create_indexes_shell_helper.js
@@ -13,16 +13,22 @@
_writeMode = mode;
},
writeMode: function() {
- return _writeMode;
+ return _writeMode;
+ },
+ getSlaveOk: function() {
+ return true;
},
- getSlaveOk: function() { return true; },
runCommand: function(db, cmd, opts) {
- commandsRan.push({db: db, cmd: cmd, opts: opts});
- return {ok: 1.0};
+ commandsRan.push({db: db, cmd: cmd, opts: opts});
+ return {
+ ok: 1.0
+ };
},
insert: function(db, indexSpecs, opts) {
- insertsRan.push({db: db, indexSpecs: indexSpecs, opts: opts});
- return {ok: 1.0};
+ insertsRan.push({db: db, indexSpecs: indexSpecs, opts: opts});
+ return {
+ ok: 1.0
+ };
},
getWriteConcern: function() {
return null;
@@ -43,25 +49,25 @@
assert.eq(commandsRan.length, 1);
assert(commandsRan[0].cmd.hasOwnProperty("createIndexes"));
assert.eq(commandsRan[0].cmd["indexes"][0],
- {ns: "test.create_indexes_shell_helper", key: {x:1}, name: "x_1"});
+ {ns: "test.create_indexes_shell_helper", key: {x: 1}, name: "x_1"});
commandsRan = [];
t.createIndexes([{y: 1}, {z: -1}]);
assert.eq(commandsRan.length, 1);
- assert( commandsRan[0].cmd.hasOwnProperty("createIndexes"));
+ assert(commandsRan[0].cmd.hasOwnProperty("createIndexes"));
assert.eq(commandsRan[0].cmd["indexes"][0],
- {ns: "test.create_indexes_shell_helper", key: {y:1}, name: "y_1"});
+ {ns: "test.create_indexes_shell_helper", key: {y: 1}, name: "y_1"});
assert.eq(commandsRan[0].cmd["indexes"][1],
- {ns: "test.create_indexes_shell_helper", key: {z:-1}, name: "z_-1"});
+ {ns: "test.create_indexes_shell_helper", key: {z: -1}, name: "z_-1"});
commandsRan = [];
t.createIndex({a: 1});
assert.eq(commandsRan.length, 1);
- assert( commandsRan[0].cmd.hasOwnProperty("createIndexes"));
+ assert(commandsRan[0].cmd.hasOwnProperty("createIndexes"));
assert.eq(commandsRan[0].cmd["indexes"][0],
- {ns: "test.create_indexes_shell_helper", key: {a:1}, name: "a_1"});
+ {ns: "test.create_indexes_shell_helper", key: {a: 1}, name: "a_1"});
db.getMongo().forceWriteMode("compatibility");
@@ -70,13 +76,12 @@
t.createIndex({b: 1});
assert.eq(insertsRan.length, 1);
assert.eq(insertsRan[0]["indexSpecs"]["ns"], "test.create_indexes_shell_helper");
- assert.eq(insertsRan[0]["indexSpecs"]["key"], {b:1});
+ assert.eq(insertsRan[0]["indexSpecs"]["key"], {b: 1});
assert.eq(insertsRan[0]["indexSpecs"]["name"], "b_1");
- //getLastError is called in the course of the bulk insert
+ // getLastError is called in the course of the bulk insert
assert.eq(commandsRan.length, 1);
assert(commandsRan[0].cmd.hasOwnProperty("getlasterror"));
- }
- finally {
+ } finally {
db._mongo = mongo;
}
}());
diff --git a/jstests/noPassthroughWithMongod/cursor8.js b/jstests/noPassthroughWithMongod/cursor8.js
index 34058b391e3..bcaf2387ae4 100644
--- a/jstests/noPassthroughWithMongod/cursor8.js
+++ b/jstests/noPassthroughWithMongod/cursor8.js
@@ -2,24 +2,24 @@
var t = db.cursor8;
t.drop();
-t.save( {} );
-t.save( {} );
-t.save( {} );
+t.save({});
+t.save({});
+t.save({});
-assert.eq( 3 , t.find().count() , "A0" );
+assert.eq(3, t.find().count(), "A0");
var initialTotalOpen = db.serverStatus().metrics.cursor.open.total;
-function test( want , msg ){
+function test(want, msg) {
var res = db.serverStatus().metrics.cursor;
assert.eq(want + initialTotalOpen, res.open.total, msg + " " + tojson(res));
}
-test( 0 , "A1" );
-assert.eq( 3 , t.find().count() , "A2" );
-assert.eq( 3 , t.find( {} ).count() , "A3" );
+test(0, "A1");
+assert.eq(3, t.find().count(), "A2");
+assert.eq(3, t.find({}).count(), "A3");
// This cursor should remain open on the server.
-var cursor = t.find( {} ).batchSize( 2 );
+var cursor = t.find({}).batchSize(2);
cursor.next();
-test( 1 , "B1" );
+test(1, "B1");
diff --git a/jstests/noPassthroughWithMongod/default_read_pref.js b/jstests/noPassthroughWithMongod/default_read_pref.js
index 05be7915626..46967012a31 100644
--- a/jstests/noPassthroughWithMongod/default_read_pref.js
+++ b/jstests/noPassthroughWithMongod/default_read_pref.js
@@ -8,18 +8,25 @@
try {
var commandsRan = [];
db._mongo = {
- getSlaveOk: function() { return false; },
- getReadPrefMode: function() { return mongo.getReadPrefMode(); },
- getReadPref: function() { return mongo.getReadPref(); },
+ getSlaveOk: function() {
+ return false;
+ },
+ getReadPrefMode: function() {
+ return mongo.getReadPrefMode();
+ },
+ getReadPref: function() {
+ return mongo.getReadPref();
+ },
runCommand: function(db, cmd, opts) {
- commandsRan.push({db: db, cmd: cmd, opts:opts});
+ commandsRan.push({db: db, cmd: cmd, opts: opts});
}
};
db.runReadCommand({ping: 1});
assert.eq(commandsRan.length, 1);
assert.docEq(commandsRan[0].cmd, {ping: 1}, "The command should not have been wrapped.");
- assert.eq(commandsRan[0].opts & DBQuery.Option.slaveOk, 0, "The slaveOk bit should not be set.");
+ assert.eq(
+ commandsRan[0].opts & DBQuery.Option.slaveOk, 0, "The slaveOk bit should not be set.");
} finally {
db._mongo = mongo;
diff --git a/jstests/noPassthroughWithMongod/dup_bgindex.js b/jstests/noPassthroughWithMongod/dup_bgindex.js
index 3d36e90a4ae..cd0078248bb 100644
--- a/jstests/noPassthroughWithMongod/dup_bgindex.js
+++ b/jstests/noPassthroughWithMongod/dup_bgindex.js
@@ -2,15 +2,15 @@
// This test runs fairly quickly but cannot be in /jstests/. So it lives in slowNightly for now.
var t = db.duplIndexTest;
t.drop();
-for (var i=0; i<10000; i++) {
- t.insert( { name : "foo" , z : { a : 17 , b : 4}, i: i } );
+for (var i = 0; i < 10000; i++) {
+ t.insert({name: "foo", z: {a: 17, b: 4}, i: i});
}
var cmd = "db.duplIndexTest.ensureIndex( { i : 1 }, {background:true} );";
var join1 = startParallelShell(cmd);
var join2 = startParallelShell(cmd);
-t.ensureIndex( { i : 1 }, {background:true} );
-assert.eq(1, t.find({i:1}).count(), "Should find only one doc");
-t.dropIndex({ i : 1 });
-assert.eq(1, t.find({i:1}).count(), "Should find only one doc");
+t.ensureIndex({i: 1}, {background: true});
+assert.eq(1, t.find({i: 1}).count(), "Should find only one doc");
+t.dropIndex({i: 1});
+assert.eq(1, t.find({i: 1}).count(), "Should find only one doc");
join1();
join2();
diff --git a/jstests/noPassthroughWithMongod/explain1.js b/jstests/noPassthroughWithMongod/explain1.js
index 81baeb6e918..021108028d4 100644
--- a/jstests/noPassthroughWithMongod/explain1.js
+++ b/jstests/noPassthroughWithMongod/explain1.js
@@ -4,13 +4,16 @@ t = db.jstests_slowNightly_explain1;
t.drop();
// Periodically drops the collection, invalidating client cursors for s2's operations.
-s1 = startParallelShell( "t = db.jstests_slowNightly_explain1; for( var i = 0; i < 80; ++i ) { t.drop(); t.ensureIndex({x:1}); for( var j = 0; j < 1000; ++j ) { t.save( {x:j,y:1} ) }; sleep( 100 ); }" );
+s1 = startParallelShell(
+ "t = db.jstests_slowNightly_explain1; for( var i = 0; i < 80; ++i ) { t.drop(); t.ensureIndex({x:1}); for( var j = 0; j < 1000; ++j ) { t.save( {x:j,y:1} ) }; sleep( 100 ); }");
// Query repeatedly.
-s2 = startParallelShell( "t = db.jstests_slowNightly_explain1; for( var i = 0; i < 500; ++i ) { try { z = t.find( {x:{$gt:0},y:1} ).explain(); t.count( {x:{$gt:0},y:1} ); } catch( e ) {} }" );
+s2 = startParallelShell(
+ "t = db.jstests_slowNightly_explain1; for( var i = 0; i < 500; ++i ) { try { z = t.find( {x:{$gt:0},y:1} ).explain(); t.count( {x:{$gt:0},y:1} ); } catch( e ) {} }");
// Put pressure on s2 to yield more often.
-s3 = startParallelShell( "t = db.jstests_slowNightly_explain1; for( var i = 0; i < 200; ++i ) { t.validate({scandata:true}); }" );
+s3 = startParallelShell(
+ "t = db.jstests_slowNightly_explain1; for( var i = 0; i < 200; ++i ) { t.validate({scandata:true}); }");
s1();
s2();
diff --git a/jstests/noPassthroughWithMongod/explain2.js b/jstests/noPassthroughWithMongod/explain2.js
index 032f0fa8de8..81b8951488f 100644
--- a/jstests/noPassthroughWithMongod/explain2.js
+++ b/jstests/noPassthroughWithMongod/explain2.js
@@ -2,17 +2,17 @@
collName = 'jstests_slowNightly_explain2';
-t = db[ collName ];
+t = db[collName];
t.drop();
-db.createCollection( collName, {capped:true,size:100000} );
-t = db[ collName ];
-t.ensureIndex( {x:1} );
+db.createCollection(collName, {capped: true, size: 100000});
+t = db[collName];
+t.ensureIndex({x: 1});
-a = startParallelShell( 'for( i = 0; i < 50000; ++i ) { db.' + collName + '.insert( {x:i,y:1} ); }' );
+a = startParallelShell('for( i = 0; i < 50000; ++i ) { db.' + collName + '.insert( {x:i,y:1} ); }');
-for( i = 0; i < 800; ++i ) {
- t.find( {x:{$gt:-1},y:1} ).sort({x:-1}).explain();
+for (i = 0; i < 800; ++i) {
+ t.find({x: {$gt: -1}, y: 1}).sort({x: -1}).explain();
}
a(); \ No newline at end of file
diff --git a/jstests/noPassthroughWithMongod/explain3.js b/jstests/noPassthroughWithMongod/explain3.js
index af6fde7b81b..ed22604f02c 100644
--- a/jstests/noPassthroughWithMongod/explain3.js
+++ b/jstests/noPassthroughWithMongod/explain3.js
@@ -4,13 +4,16 @@ t = db.jstests_slowNightly_explain3;
t.drop();
// Periodically drops the collection, invalidating client cursors for s2's operations.
-s1 = startParallelShell( "t = db.jstests_slowNightly_explain1; for( var i = 0; i < 80; ++i ) { t.drop(); t.ensureIndex({x:1}); for( var j = 0; j < 1000; ++j ) { t.save( {x:j,y:1} ) }; sleep( 100 ); }" );
+s1 = startParallelShell(
+ "t = db.jstests_slowNightly_explain1; for( var i = 0; i < 80; ++i ) { t.drop(); t.ensureIndex({x:1}); for( var j = 0; j < 1000; ++j ) { t.save( {x:j,y:1} ) }; sleep( 100 ); }");
// Query repeatedly.
-s2 = startParallelShell( "t = db.jstests_slowNightly_explain1; for( var i = 0; i < 500; ++i ) { try { z = t.find( {x:{$gt:0},y:1} ).sort({x:1}).explain(); } catch( e ) {} }" );
+s2 = startParallelShell(
+ "t = db.jstests_slowNightly_explain1; for( var i = 0; i < 500; ++i ) { try { z = t.find( {x:{$gt:0},y:1} ).sort({x:1}).explain(); } catch( e ) {} }");
// Put pressure on s2 to yield more often.
-s3 = startParallelShell( "t = db.jstests_slowNightly_explain1; for( var i = 0; i < 200; ++i ) { t.validate({scandata:true}); }" );
+s3 = startParallelShell(
+ "t = db.jstests_slowNightly_explain1; for( var i = 0; i < 200; ++i ) { t.validate({scandata:true}); }");
s1();
s2();
diff --git a/jstests/noPassthroughWithMongod/external_sort_text_agg.js b/jstests/noPassthroughWithMongod/external_sort_text_agg.js
index 5479d1b30b7..ecb843ae9e5 100644
--- a/jstests/noPassthroughWithMongod/external_sort_text_agg.js
+++ b/jstests/noPassthroughWithMongod/external_sort_text_agg.js
@@ -3,15 +3,16 @@ var t = db.external_sort_text_agg;
t.drop();
t.ensureIndex({text: "text"});
for (i = 0; i < 100; i++) {
- t.insert({_id:i, text: Array(210000).join("asdf ")});
+ t.insert({_id: i, text: Array(210000).join("asdf ")});
// string over 1MB to hit the 100MB threshold for external sort
}
var score = t.find({$text: {$search: "asdf"}}, {score: {$meta: 'textScore'}}).next().score;
-var res = t.aggregate([{$match: {$text: {$search: "asdf"}}},
- {$sort: {"_id": 1}},
- {$project: {string: "$text", score: {$meta: "textScore"}}}
- ],
+var res = t.aggregate([
+ {$match: {$text: {$search: "asdf"}}},
+ {$sort: {"_id": 1}},
+ {$project: {string: "$text", score: {$meta: "textScore"}}}
+],
{allowDiskUse: true});
// we must use .next() rather than a $limit because a $limit will optimize away the external sort
printjson(res.next());
diff --git a/jstests/noPassthroughWithMongod/find_and_modify_server16469.js b/jstests/noPassthroughWithMongod/find_and_modify_server16469.js
index 5c54c05f6e2..43f6ed3910c 100644
--- a/jstests/noPassthroughWithMongod/find_and_modify_server16469.js
+++ b/jstests/noPassthroughWithMongod/find_and_modify_server16469.js
@@ -14,8 +14,8 @@ var result = db.adminCommand({getParameter: 1, internalQueryExecMaxBlockingSortB
assert.commandWorked(result);
var oldSortLimit = result.internalQueryExecMaxBlockingSortBytes;
var newSortLimit = 1024 * 1024;
-assert.commandWorked(db.adminCommand({setParameter: 1,
- internalQueryExecMaxBlockingSortBytes: newSortLimit}));
+assert.commandWorked(
+ db.adminCommand({setParameter: 1, internalQueryExecMaxBlockingSortBytes: newSortLimit}));
try {
// Insert ~3MB of data.
@@ -28,18 +28,19 @@ try {
}
// Verify that an unindexed sort of this data fails with a find() if no limit is specified.
- assert.throws(function() { coll.find({}).sort({b: 1}).itcount(); });
+ assert.throws(function() {
+ coll.find({}).sort({b: 1}).itcount();
+ });
// Verify that an unindexed sort of this data succeeds with findAndModify (which should be
// requesting a top-K sort).
- result = coll.runCommand({findAndModify: coll.getName(), query: {}, update: {$set: {c: 1}},
- sort: {b: 1}});
+ result = coll.runCommand(
+ {findAndModify: coll.getName(), query: {}, update: {$set: {c: 1}}, sort: {b: 1}});
assert.commandWorked(result);
assert.neq(result.value, null);
assert.eq(result.value.b, 0);
-}
-finally {
+} finally {
// Restore the orginal sort memory limit.
- assert.commandWorked(db.adminCommand({setParameter: 1,
- internalQueryExecMaxBlockingSortBytes: oldSortLimit}));
+ assert.commandWorked(
+ db.adminCommand({setParameter: 1, internalQueryExecMaxBlockingSortBytes: oldSortLimit}));
}
diff --git a/jstests/noPassthroughWithMongod/fsync2.js b/jstests/noPassthroughWithMongod/fsync2.js
index dad1c6088bf..5710f57e33c 100644
--- a/jstests/noPassthroughWithMongod/fsync2.js
+++ b/jstests/noPassthroughWithMongod/fsync2.js
@@ -1,59 +1,56 @@
-function debug( msg ) {
- print( "fsync2: " + msg );
+function debug(msg) {
+ print("fsync2: " + msg);
}
var loops = 200;
-if ( db.getSisterDB("local").slaves.count() > 0 ) {
+if (db.getSisterDB("local").slaves.count() > 0) {
// replication can cause some write locks on local
// therefore this test is flaky with replication on
loops = 1;
}
-
function doTest() {
db.fsync2.drop();
// Make write ops asynchronous so the test won't hang when in fsync lock mode.
db.getMongo().forceWriteMode('legacy');
- db.fsync2.save( {x:1} );
-
- d = db.getSisterDB( "admin" );
+ db.fsync2.save({x: 1});
+
+ d = db.getSisterDB("admin");
// Don't test if the engine doesn't support fsyncLock
- var ret = d.runCommand( {fsync:1, lock: 1 } );
+ var ret = d.runCommand({fsync: 1, lock: 1});
if (!ret.ok) {
assert.commandFailedWithCode(ret, ErrorCodes.CommandNotSupported);
jsTestLog("Skipping test as engine does not support fsyncLock");
return;
}
-
- assert.commandWorked( ret );
- debug( "after lock" );
-
+ assert.commandWorked(ret);
- for ( var i=0; i<loops; i++) {
- debug( "loop: " + i );
+ debug("after lock");
+
+ for (var i = 0; i < loops; i++) {
+ debug("loop: " + i);
assert.eq(1, db.fsync2.count());
sleep(100);
}
-
- debug( "about to save" );
- db.fsync2.save( {x:1} );
- debug( "save done" );
-
- m = new Mongo( db.getMongo().host );
-
+
+ debug("about to save");
+ db.fsync2.save({x: 1});
+ debug("save done");
+
+ m = new Mongo(db.getMongo().host);
+
// Uncomment once SERVER-4243 is fixed
- //assert.eq(1, m.getDB(db.getName()).fsync2.count());
-
- assert( m.getDB("admin").fsyncUnlock().ok );
+ // assert.eq(1, m.getDB(db.getName()).fsync2.count());
+
+ assert(m.getDB("admin").fsyncUnlock().ok);
- assert.eq( 2, db.fsync2.count() );
-
+ assert.eq(2, db.fsync2.count());
}
-if (!jsTest.options().auth) { // SERVER-4243
+if (!jsTest.options().auth) { // SERVER-4243
doTest();
}
diff --git a/jstests/noPassthroughWithMongod/ftdc_params.js b/jstests/noPassthroughWithMongod/ftdc_params.js
index 9a0fe20d965..5fae9e77c49 100644
--- a/jstests/noPassthroughWithMongod/ftdc_params.js
+++ b/jstests/noPassthroughWithMongod/ftdc_params.js
@@ -1,16 +1,18 @@
// FTDC test cases
//
-(function () {
+(function() {
'use strict';
- var admin = db.getSiblingDB( "admin" );
+ var admin = db.getSiblingDB("admin");
// Check the defaults are correct
//
function getparam(field) {
- var q = { getParameter : 1 };
+ var q = {
+ getParameter: 1
+ };
q[field] = 1;
- var ret = admin.runCommand( q );
+ var ret = admin.runCommand(q);
return ret[field];
}
@@ -23,7 +25,7 @@
assert.eq(getparam("diagnosticDataCollectionSamplesPerInterimUpdate"), 10);
function setparam(obj) {
- var ret = admin.runCommand( Object.extend({ setParameter : 1 }, obj));
+ var ret = admin.runCommand(Object.extend({setParameter: 1}, obj));
return ret;
}
@@ -55,4 +57,4 @@
assert.commandWorked(setparam({"diagnosticDataCollectionPeriodMillis": 1000}));
assert.commandWorked(setparam({"diagnosticDataCollectionSamplesPerChunk": 300}));
assert.commandWorked(setparam({"diagnosticDataCollectionSamplesPerInterimUpdate": 10}));
-}) ();
+})();
diff --git a/jstests/noPassthroughWithMongod/geo_axis_aligned.js b/jstests/noPassthroughWithMongod/geo_axis_aligned.js
index 7cd33b2d638..47c0369e5e0 100644
--- a/jstests/noPassthroughWithMongod/geo_axis_aligned.js
+++ b/jstests/noPassthroughWithMongod/geo_axis_aligned.js
@@ -3,27 +3,28 @@
t = db.axisaligned;
t.drop();
-scale = [ 1, 10, 1000, 10000 ];
-bits = [ 2, 3, 4, 5, 6, 7, 8, 9 ];
-radius = [ 0.0001, 0.001, 0.01, 0.1 ];
-center = [ [ 5, 52 ], [ 6, 53 ], [ 7, 54 ], [ 8, 55 ], [ 9, 56 ] ];
+scale = [1, 10, 1000, 10000];
+bits = [2, 3, 4, 5, 6, 7, 8, 9];
+radius = [0.0001, 0.001, 0.01, 0.1];
+center = [[5, 52], [6, 53], [7, 54], [8, 55], [9, 56]];
bound = [];
-for( var j = 0; j < center.length; j++ ) bound.push( [-180, 180] );
+for (var j = 0; j < center.length; j++)
+ bound.push([-180, 180]);
// Scale all our values to test different sizes
radii = [];
centers = [];
bounds = [];
-for( var s = 0; s < scale.length; s++ ){
- for ( var i = 0; i < radius.length; i++ ) {
- radii.push( radius[i] * scale[s] );
+for (var s = 0; s < scale.length; s++) {
+ for (var i = 0; i < radius.length; i++) {
+ radii.push(radius[i] * scale[s]);
}
- for ( var j = 0; j < center.length; j++ ) {
- centers.push( [ center[j][0] * scale[s], center[j][1] * scale[s] ] );
- bounds.push( [ bound[j][0] * scale[s], bound[j][1] * scale[s] ] );
+ for (var j = 0; j < center.length; j++) {
+ centers.push([center[j][0] * scale[s], center[j][1] * scale[s]]);
+ bounds.push([bound[j][0] * scale[s], bound[j][1] * scale[s]]);
}
}
@@ -31,76 +32,85 @@ radius = radii;
center = centers;
bound = bounds;
+for (var b = 0; b < bits.length; b++) {
+ printjson(radius);
+ printjson(centers);
-for ( var b = 0; b < bits.length; b++ ) {
- printjson( radius );
- printjson( centers );
-
- for ( var i = 0; i < radius.length; i++ ) {
- for ( var j = 0; j < center.length; j++ ) {
- printjson( { center : center[j], radius : radius[i], bits : bits[b] } );
+ for (var i = 0; i < radius.length; i++) {
+ for (var j = 0; j < center.length; j++) {
+ printjson({center: center[j], radius: radius[i], bits: bits[b]});
t.drop();
// Make sure our numbers are precise enough for this test
- if( (center[j][0] - radius[i] == center[j][0]) || (center[j][1] - radius[i] == center[j][1]) )
+ if ((center[j][0] - radius[i] == center[j][0]) ||
+ (center[j][1] - radius[i] == center[j][1]))
continue;
- t.save( { "_id" : 1, "loc" : { "x" : center[j][0] - radius[i], "y" : center[j][1] } } );
- t.save( { "_id" : 2, "loc" : { "x" : center[j][0], "y" : center[j][1] } } );
- t.save( { "_id" : 3, "loc" : { "x" : center[j][0] + radius[i], "y" : center[j][1] } } );
- t.save( { "_id" : 4, "loc" : { "x" : center[j][0], "y" : center[j][1] + radius[i] } } );
- t.save( { "_id" : 5, "loc" : { "x" : center[j][0], "y" : center[j][1] - radius[i] } } );
- t.save( { "_id" : 6, "loc" : { "x" : center[j][0] - radius[i], "y" : center[j][1] + radius[i] } } );
- t.save( { "_id" : 7, "loc" : { "x" : center[j][0] + radius[i], "y" : center[j][1] + radius[i] } } );
- t.save( { "_id" : 8, "loc" : { "x" : center[j][0] - radius[i], "y" : center[j][1] - radius[i] } } );
- t.save( { "_id" : 9, "loc" : { "x" : center[j][0] + radius[i], "y" : center[j][1] - radius[i] } } );
-
- var res = t.ensureIndex({ loc: "2d" },
- { max: bound[j][1],
- min : bound[j][0],
- bits : bits[b] });
+ t.save({"_id": 1, "loc": {"x": center[j][0] - radius[i], "y": center[j][1]}});
+ t.save({"_id": 2, "loc": {"x": center[j][0], "y": center[j][1]}});
+ t.save({"_id": 3, "loc": {"x": center[j][0] + radius[i], "y": center[j][1]}});
+ t.save({"_id": 4, "loc": {"x": center[j][0], "y": center[j][1] + radius[i]}});
+ t.save({"_id": 5, "loc": {"x": center[j][0], "y": center[j][1] - radius[i]}});
+ t.save(
+ {"_id": 6, "loc": {"x": center[j][0] - radius[i], "y": center[j][1] + radius[i]}});
+ t.save(
+ {"_id": 7, "loc": {"x": center[j][0] + radius[i], "y": center[j][1] + radius[i]}});
+ t.save(
+ {"_id": 8, "loc": {"x": center[j][0] - radius[i], "y": center[j][1] - radius[i]}});
+ t.save(
+ {"_id": 9, "loc": {"x": center[j][0] + radius[i], "y": center[j][1] - radius[i]}});
+
+ var res =
+ t.ensureIndex({loc: "2d"}, {max: bound[j][1], min: bound[j][0], bits: bits[b]});
// ensureIndex fails when this iteration inserted coordinates that are out of bounds.
// These are invalid cases, so we skip them.
- if (!res.ok) continue;
+ if (!res.ok)
+ continue;
- print( "DOING WITHIN QUERY ");
- r = t.find( { "loc" : { "$within" : { "$center" : [ center[j], radius[i] ] } } } );
+ print("DOING WITHIN QUERY ");
+ r = t.find({"loc": {"$within": {"$center": [center[j], radius[i]]}}});
- assert.eq( 5, r.count() );
+ assert.eq(5, r.count());
// FIXME: surely code like this belongs in utils.js.
a = r.toArray();
x = [];
- for ( k in a )
- x.push( a[k]["_id"] );
+ for (k in a)
+ x.push(a[k]["_id"]);
x.sort();
- assert.eq( [ 1, 2, 3, 4, 5 ], x );
+ assert.eq([1, 2, 3, 4, 5], x);
- print( " DOING NEAR QUERY ");
- //printjson( center[j] )
- r = t.find( { loc : { $near : center[j], $maxDistance : radius[i] } }, { _id : 1 } );
- assert.eq( 5, r.count() );
+ print(" DOING NEAR QUERY ");
+ // printjson( center[j] )
+ r = t.find({loc: {$near: center[j], $maxDistance: radius[i]}}, {_id: 1});
+ assert.eq(5, r.count());
- print( " DOING DIST QUERY ");
+ print(" DOING DIST QUERY ");
- a = db.runCommand({ geoNear : "axisaligned", near : center[j], maxDistance : radius[i] }).results;
- assert.eq( 5, a.length );
+ a = db.runCommand({geoNear: "axisaligned", near: center[j], maxDistance: radius[i]})
+ .results;
+ assert.eq(5, a.length);
var distance = 0;
- for( var k = 0; k < a.length; k++ ){
- assert.gte( a[k].dis, distance );
-
+ for (var k = 0; k < a.length; k++) {
+ assert.gte(a[k].dis, distance);
}
- r = t.find({ loc: { $within: { $box: [ [ center[j][0] - radius[i],
- center[j][1] - radius[i] ],
- [ center[j][0] + radius[i],
- center[j][1] + radius[i] ]]}}},
- { _id: 1 } );
- assert.eq( 9, r.count() );
-
+ r = t.find(
+ {
+ loc: {
+ $within: {
+ $box: [
+ [center[j][0] - radius[i], center[j][1] - radius[i]],
+ [center[j][0] + radius[i], center[j][1] + radius[i]]
+ ]
+ }
+ }
+ },
+ {_id: 1});
+ assert.eq(9, r.count());
}
}
}
diff --git a/jstests/noPassthroughWithMongod/geo_mnypts.js b/jstests/noPassthroughWithMongod/geo_mnypts.js
index d70a2bdb60a..5a936a3490d 100644
--- a/jstests/noPassthroughWithMongod/geo_mnypts.js
+++ b/jstests/noPassthroughWithMongod/geo_mnypts.js
@@ -7,47 +7,43 @@ var totalPts = 500 * 1000;
// Add points in a 100x100 grid
var bulk = coll.initializeUnorderedBulkOp();
-for( var i = 0; i < totalPts; i++ ){
+for (var i = 0; i < totalPts; i++) {
var ii = i % 10000;
- bulk.insert({ loc : [ ii % 100, Math.floor( ii / 100 ) ] });
+ bulk.insert({loc: [ii % 100, Math.floor(ii / 100)]});
}
assert.writeOK(bulk.execute());
-coll.ensureIndex({ loc : "2d" });
+coll.ensureIndex({loc: "2d"});
// Check that quarter of points in each quadrant
-for( var i = 0; i < 4; i++ ){
+for (var i = 0; i < 4; i++) {
var x = i % 2;
- var y = Math.floor( i / 2 );
-
- var box = [[0, 0], [49, 49]];
- box[0][0] += ( x == 1 ? 50 : 0 );
- box[1][0] += ( x == 1 ? 50 : 0 );
- box[0][1] += ( y == 1 ? 50 : 0 );
- box[1][1] += ( y == 1 ? 50 : 0 );
+ var y = Math.floor(i / 2);
- assert.eq( totalPts / 4, coll.find({ loc : { $within : { $box : box } } }).count() );
- assert.eq( totalPts / 4, coll.find({ loc : { $within : { $box : box } } }).itcount() );
+ var box = [[0, 0], [49, 49]];
+ box[0][0] += (x == 1 ? 50 : 0);
+ box[1][0] += (x == 1 ? 50 : 0);
+ box[0][1] += (y == 1 ? 50 : 0);
+ box[1][1] += (y == 1 ? 50 : 0);
+ assert.eq(totalPts / 4, coll.find({loc: {$within: {$box: box}}}).count());
+ assert.eq(totalPts / 4, coll.find({loc: {$within: {$box: box}}}).itcount());
}
// Check that half of points in each half
-for( var i = 0; i < 2; i++ ){
-
+for (var i = 0; i < 2; i++) {
var box = [[0, 0], [49, 99]];
- box[0][0] += ( i == 1 ? 50 : 0 );
- box[1][0] += ( i == 1 ? 50 : 0 );
-
- assert.eq( totalPts / 2, coll.find({ loc : { $within : { $box : box } } }).count() );
- assert.eq( totalPts / 2, coll.find({ loc : { $within : { $box : box } } }).itcount() );
+ box[0][0] += (i == 1 ? 50 : 0);
+ box[1][0] += (i == 1 ? 50 : 0);
+ assert.eq(totalPts / 2, coll.find({loc: {$within: {$box: box}}}).count());
+ assert.eq(totalPts / 2, coll.find({loc: {$within: {$box: box}}}).itcount());
}
// Check that all but corner set of points in radius
-var circle = [[0, 0], (100 - 1) * Math.sqrt( 2 ) - 0.25 ];
-
-assert.eq( totalPts - totalPts / ( 100 * 100 ), coll.find({ loc : { $within : { $center : circle } } }).count() );
-assert.eq( totalPts - totalPts / ( 100 * 100 ), coll.find({ loc : { $within : { $center : circle } } }).itcount() );
-
-
+var circle = [[0, 0], (100 - 1) * Math.sqrt(2) - 0.25];
+assert.eq(totalPts - totalPts / (100 * 100),
+ coll.find({loc: {$within: {$center: circle}}}).count());
+assert.eq(totalPts - totalPts / (100 * 100),
+ coll.find({loc: {$within: {$center: circle}}}).itcount());
diff --git a/jstests/noPassthroughWithMongod/geo_near_random1.js b/jstests/noPassthroughWithMongod/geo_near_random1.js
index ad67bdc2734..c9ea5f1da7c 100644
--- a/jstests/noPassthroughWithMongod/geo_near_random1.js
+++ b/jstests/noPassthroughWithMongod/geo_near_random1.js
@@ -5,9 +5,8 @@ var test = new GeoNearRandomTest("nightly.geo_near_random1");
test.insertPts(200);
-test.testPt([0,0]);
+test.testPt([0, 0]);
test.testPt(test.mkPt());
test.testPt(test.mkPt());
test.testPt(test.mkPt());
test.testPt(test.mkPt());
-
diff --git a/jstests/noPassthroughWithMongod/geo_near_random2.js b/jstests/noPassthroughWithMongod/geo_near_random2.js
index ac729b140e6..2fafb7d4c80 100644
--- a/jstests/noPassthroughWithMongod/geo_near_random2.js
+++ b/jstests/noPassthroughWithMongod/geo_near_random2.js
@@ -5,17 +5,19 @@ var test = new GeoNearRandomTest("nightly.geo_near_random2");
test.insertPts(10000);
-opts = {sphere:0, nToTest:test.nPts*0.01};
-test.testPt([0,0], opts);
+opts = {
+ sphere: 0,
+ nToTest: test.nPts * 0.01
+};
+test.testPt([0, 0], opts);
test.testPt(test.mkPt(), opts);
test.testPt(test.mkPt(), opts);
test.testPt(test.mkPt(), opts);
test.testPt(test.mkPt(), opts);
opts.sphere = 1;
-test.testPt([0,0], opts);
+test.testPt([0, 0], opts);
test.testPt(test.mkPt(0.8), opts);
test.testPt(test.mkPt(0.8), opts);
test.testPt(test.mkPt(0.8), opts);
test.testPt(test.mkPt(0.8), opts);
-
diff --git a/jstests/noPassthroughWithMongod/geo_polygon.js b/jstests/noPassthroughWithMongod/geo_polygon.js
index 50bc5c29d68..073ffdeb72d 100644
--- a/jstests/noPassthroughWithMongod/geo_polygon.js
+++ b/jstests/noPassthroughWithMongod/geo_polygon.js
@@ -3,39 +3,79 @@ t.drop();
num = 0;
var bulk = t.initializeUnorderedBulkOp();
-for ( x = -180; x < 180; x += .5 ){
- for ( y = -180; y < 180; y += .5 ){
- o = { _id : num++ , loc : [ x , y ] };
- bulk.insert( o );
+for (x = -180; x < 180; x += .5) {
+ for (y = -180; y < 180; y += .5) {
+ o = {
+ _id: num++,
+ loc: [x, y]
+ };
+ bulk.insert(o);
}
}
assert.writeOK(bulk.execute());
var numTests = 31;
-for( var n = 0; n < numTests; n++ ){
+for (var n = 0; n < numTests; n++) {
t.dropIndexes();
- t.ensureIndex( { loc : "2d" }, { bits : 2 + n } );
+ t.ensureIndex({loc: "2d"}, {bits: 2 + n});
- assert.between( 9 - 2 , t.find( { loc: { "$within": { "$polygon" : [[0,0], [1,1], [0,2]] }}} ).count() , 9, "Triangle Test", true);
- assert.eq( num , t.find( { loc : { "$within" : { "$polygon" : [ [-180,-180], [-180,180], [180,180], [180,-180] ] } } } ).count() , "Bounding Box Test" );
+ assert.between(9 - 2,
+ t.find({loc: {"$within": {"$polygon": [[0, 0], [1, 1], [0, 2]]}}}).count(),
+ 9,
+ "Triangle Test",
+ true);
+ assert.eq(
+ num,
+ t.find({
+ loc: {"$within": {"$polygon": [[-180, -180], [-180, 180], [180, 180], [180, -180]]}}
+ }).count(),
+ "Bounding Box Test");
- assert.eq( 441 , t.find( { loc : { "$within" : { "$polygon" : [ [0,0], [0,10], [10,10], [10,0] ] } } } ).count() , "Square Test" );
- assert.eq( 25 , t.find( { loc : { "$within" : { "$polygon" : [ [0,0], [0,2], [2,2], [2,0] ] } } } ).count() , "Square Test 2" );
+ assert.eq(
+ 441,
+ t.find({loc: {"$within": {"$polygon": [[0, 0], [0, 10], [10, 10], [10, 0]]}}}).count(),
+ "Square Test");
+ assert.eq(25,
+ t.find({loc: {"$within": {"$polygon": [[0, 0], [0, 2], [2, 2], [2, 0]]}}}).count(),
+ "Square Test 2");
- if(1){ // SERVER-3726
- // Points exactly on diagonals may be in or out, depending on how the error calculating the slope falls.
- assert.between( 341 - 18 , t.find( { loc : { "$within" : { "$polygon" : [ [0,0], [0,10], [10,10], [10,0], [5,5] ] } } } ).count(), 341, "Square Missing Chunk Test", true );
- assert.between( 21 - 2 , t.find( { loc : { "$within" : { "$polygon" : [ [0,0], [0,2], [2,2], [2,0], [1,1] ] } } } ).count(), 21 , "Square Missing Chunk Test 2", true );
+ if (1) { // SERVER-3726
+ // Points exactly on diagonals may be in or out, depending on how the error calculating the
+ // slope falls.
+ assert.between(
+ 341 - 18,
+ t.find({loc: {"$within": {"$polygon": [[0, 0], [0, 10], [10, 10], [10, 0], [5, 5]]}}})
+ .count(),
+ 341,
+ "Square Missing Chunk Test",
+ true);
+ assert.between(
+ 21 - 2,
+ t.find({loc: {"$within": {"$polygon": [[0, 0], [0, 2], [2, 2], [2, 0], [1, 1]]}}})
+ .count(),
+ 21,
+ "Square Missing Chunk Test 2",
+ true);
}
- assert.eq( 1 , t.find( { loc: { "$within": { "$polygon" : [[0,0], [0,0], [0,0]] }}} ).count() , "Point Test" );
+ assert.eq(1,
+ t.find({loc: {"$within": {"$polygon": [[0, 0], [0, 0], [0, 0]]}}}).count(),
+ "Point Test");
// SERVER-3725
{
- assert.eq( 5 , t.find( { loc: { "$within": { "$polygon" : [[0,0], [1,0], [2,0]] }}} ).count() , "Line Test 1" );
- assert.eq( 3 , t.find( { loc: { "$within": { "$polygon" : [[0,0], [0,0], [1,0]] }}} ).count() , "Line Test 2" );
- assert.eq( 5 , t.find( { loc: { "$within": { "$polygon" : [[0,2], [0,1], [0,0]] }}} ).count() , "Line Test 3" );
+ assert.eq(5,
+ t.find({loc: {"$within": {"$polygon": [[0, 0], [1, 0], [2, 0]]}}}).count(),
+ "Line Test 1");
+ assert.eq(3,
+ t.find({loc: {"$within": {"$polygon": [[0, 0], [0, 0], [1, 0]]}}}).count(),
+ "Line Test 2");
+ assert.eq(5,
+ t.find({loc: {"$within": {"$polygon": [[0, 2], [0, 1], [0, 0]]}}}).count(),
+ "Line Test 3");
}
- assert.eq( 3 , t.find( { loc: { "$within": { "$polygon" : [[0,1], [0,0], [0,0]] }}} ).count() , "Line Test 4" );
+ assert.eq(3,
+ t.find({loc: {"$within": {"$polygon": [[0, 1], [0, 0], [0, 0]]}}}).count(),
+ "Line Test 4");
}
diff --git a/jstests/noPassthroughWithMongod/getmore_error.js b/jstests/noPassthroughWithMongod/getmore_error.js
index 8eebd7205ae..4fed6c38d3d 100644
--- a/jstests/noPassthroughWithMongod/getmore_error.js
+++ b/jstests/noPassthroughWithMongod/getmore_error.js
@@ -2,20 +2,20 @@
var t = db.getmore_error;
-for (var i=0; i < 10; i++) {
+for (var i = 0; i < 10; i++) {
t.insert({_id: i});
}
-var cursor = t.find().batchSize(2); // 1 is a special case
+var cursor = t.find().batchSize(2); // 1 is a special case
// first batch (only one from OP_QUERY)
-assert.eq(cursor.next(), {_id:0});
-assert.eq(cursor.next(), {_id:1});
+assert.eq(cursor.next(), {_id: 0});
+assert.eq(cursor.next(), {_id: 1});
assert.eq(cursor.objsLeftInBatch(), 0);
// second batch (first from OP_GETMORE)
-assert.eq(cursor.next(), {_id:2});
-assert.eq(cursor.next(), {_id:3});
+assert.eq(cursor.next(), {_id: 2});
+assert.eq(cursor.next(), {_id: 3});
assert.eq(cursor.objsLeftInBatch(), 0);
/*
diff --git a/jstests/noPassthroughWithMongod/huge_multikey_index.js b/jstests/noPassthroughWithMongod/huge_multikey_index.js
index 14f110ff3bb..fce643eab8a 100644
--- a/jstests/noPassthroughWithMongod/huge_multikey_index.js
+++ b/jstests/noPassthroughWithMongod/huge_multikey_index.js
@@ -6,14 +6,14 @@ t.drop();
function doit() {
arr = [];
- for (var i=0; i< 1000*1000;i++)
+ for (var i = 0; i < 1000 * 1000; i++)
arr.push(i);
- t.insert({a:arr});
+ t.insert({a: arr});
- //t.ensureIndex({a:1}, {background:true}) // always worked
+ // t.ensureIndex({a:1}, {background:true}) // always worked
- t.ensureIndex({a:1}); // used to fail server with out of fds error
+ t.ensureIndex({a: 1}); // used to fail server with out of fds error
}
doit();
diff --git a/jstests/noPassthroughWithMongod/index_check10.js b/jstests/noPassthroughWithMongod/index_check10.js
index 25d9eed5ca8..30ed9c17eac 100644
--- a/jstests/noPassthroughWithMongod/index_check10.js
+++ b/jstests/noPassthroughWithMongod/index_check10.js
@@ -6,121 +6,122 @@ Random.setRandomSeed();
t = db.test_index_check10;
function doIt() {
-
t.drop();
function sort() {
var sort = {};
- for( var i = 0; i < n; ++i ) {
- sort[ fields[ i ] ] = Random.rand() > 0.5 ? 1 : -1;
+ for (var i = 0; i < n; ++i) {
+ sort[fields[i]] = Random.rand() > 0.5 ? 1 : -1;
}
return sort;
}
- var fields = [ 'a', 'b', 'c', 'd', 'e' ];
- n = Random.randInt( 5 ) + 1;
+ var fields = ['a', 'b', 'c', 'd', 'e'];
+ n = Random.randInt(5) + 1;
var idx = sort();
var chars = "abcdefghijklmnopqrstuvwxyz";
function obj() {
var ret = {};
- for( var i = 0; i < n; ++i ) {
- ret[ fields[ i ] ] = r();
+ for (var i = 0; i < n; ++i) {
+ ret[fields[i]] = r();
}
return ret;
}
function r() {
- var len = Random.randInt( 700 / n );
+ var len = Random.randInt(700 / n);
buf = "";
- for( var i = 0; i < len; ++i ) {
- buf += chars.charAt( Random.randInt( chars.length ) );
+ for (var i = 0; i < len; ++i) {
+ buf += chars.charAt(Random.randInt(chars.length));
}
return buf;
}
function check() {
var v = t.validate();
- if ( !v.valid ) {
- printjson( v );
- assert( v.valid );
+ if (!v.valid) {
+ printjson(v);
+ assert(v.valid);
}
var spec = {};
- for( var i = 0; i < n; ++i ) {
- if ( Random.rand() > 0.5 ) {
- var bounds = [ r(), r() ];
- if ( bounds[ 0 ] > bounds[ 1 ] ) {
+ for (var i = 0; i < n; ++i) {
+ if (Random.rand() > 0.5) {
+ var bounds = [r(), r()];
+ if (bounds[0] > bounds[1]) {
bounds.reverse();
}
var s = {};
- if ( Random.rand() > 0.5 ) {
- s[ "$gte" ] = bounds[ 0 ];
+ if (Random.rand() > 0.5) {
+ s["$gte"] = bounds[0];
} else {
- s[ "$gt" ] = bounds[ 0 ];
+ s["$gt"] = bounds[0];
}
- if ( Random.rand() > 0.5 ) {
- s[ "$lte" ] = bounds[ 1 ];
+ if (Random.rand() > 0.5) {
+ s["$lte"] = bounds[1];
} else {
- s[ "$lt" ] = bounds[ 1 ];
+ s["$lt"] = bounds[1];
}
- spec[ fields[ i ] ] = s;
+ spec[fields[i]] = s;
} else {
var vals = [];
- for( var j = 0; j < Random.randInt( 15 ); ++j ) {
- vals.push( r() );
+ for (var j = 0; j < Random.randInt(15); ++j) {
+ vals.push(r());
}
- spec[ fields[ i ] ] = { $in: vals };
+ spec[fields[i]] = {
+ $in: vals
+ };
}
}
s = sort();
- c1 = t.find( spec, { _id:null } ).sort( s ).hint( idx ).toArray();
+ c1 = t.find(spec, {_id: null}).sort(s).hint(idx).toArray();
try {
- c3 = t.find( spec, { _id:null } ).sort( s ).hint( {$natural:1} ).toArray();
- } catch( e ) {
+ c3 = t.find(spec, {_id: null}).sort(s).hint({$natural: 1}).toArray();
+ } catch (e) {
// may assert if too much data for in memory sort
- print( "retrying check..." );
- check(); // retry with different bounds
+ print("retrying check...");
+ check(); // retry with different bounds
return;
}
var j = 0;
- for( var i = 0; i < c3.length; ++i ) {
- if( friendlyEqual( c1[ j ], c3[ i ] ) ) {
+ for (var i = 0; i < c3.length; ++i) {
+ if (friendlyEqual(c1[j], c3[i])) {
++j;
} else {
- var o = c3[ i ];
- var size = Object.bsonsize( o );
- for( var f in o ) {
- size -= f.length;
+ var o = c3[i];
+ var size = Object.bsonsize(o);
+ for (var f in o) {
+ size -= f.length;
}
- var max = 818; // KeyMax
- if ( size <= max ) {
- assert.eq( c1, c3 , "size: " + size );
+ var max = 818; // KeyMax
+ if (size <= max) {
+ assert.eq(c1, c3, "size: " + size);
}
}
}
}
var bulk = t.initializeUnorderedBulkOp();
- for( var i = 0; i < 10000; ++i ) {
- bulk.insert( obj() );
+ for (var i = 0; i < 10000; ++i) {
+ bulk.insert(obj());
}
assert.writeOK(bulk.execute());
- t.ensureIndex( idx );
+ t.ensureIndex(idx);
check();
bulk = t.initializeUnorderedBulkOp();
- for( var i = 0; i < 10000; ++i ) {
- if ( Random.rand() > 0.9 ) {
- bulk.insert( obj() );
+ for (var i = 0; i < 10000; ++i) {
+ if (Random.rand() > 0.9) {
+ bulk.insert(obj());
} else {
- bulk.find( obj() ).remove(); // improve
+ bulk.find(obj()).remove(); // improve
}
- if( Random.rand() > 0.999 ) {
- print( i );
+ if (Random.rand() > 0.999) {
+ print(i);
assert.writeOK(bulk.execute());
check();
bulk = t.initializeUnorderedBulkOp();
@@ -128,9 +129,8 @@ function doIt() {
}
assert.writeOK(bulk.execute());
check();
-
}
-for( var z = 0; z < 5; ++z ) {
+for (var z = 0; z < 5; ++z) {
doIt();
}
diff --git a/jstests/noPassthroughWithMongod/index_check9.js b/jstests/noPassthroughWithMongod/index_check9.js
index 3271d7245f5..a801b473a44 100644
--- a/jstests/noPassthroughWithMongod/index_check9.js
+++ b/jstests/noPassthroughWithMongod/index_check9.js
@@ -5,135 +5,135 @@ Random.setRandomSeed();
t = db.test_index_check9;
function doIt() {
+ t.drop();
-t.drop();
+ function sort() {
+ var sort = {};
+ for (var i = 0; i < n; ++i) {
+ sort[fields[i]] = Random.rand() > 0.5 ? 1 : -1;
+ }
+ return sort;
+ }
-function sort() {
- var sort = {};
- for( var i = 0; i < n; ++i ) {
- sort[ fields[ i ] ] = Random.rand() > 0.5 ? 1 : -1;
- }
- return sort;
-}
+ var fields = ['a', 'b', 'c', 'd', 'e'];
+ n = Random.randInt(5) + 1;
+ var idx = sort();
-var fields = [ 'a', 'b', 'c', 'd', 'e' ];
-n = Random.randInt( 5 ) + 1;
-var idx = sort();
+ var chars = "abcdefghijklmnopqrstuvwxyz";
+ var alphas = [];
+ for (var i = 0; i < n; ++i) {
+ alphas.push(Random.rand() > 0.5);
+ }
-var chars = "abcdefghijklmnopqrstuvwxyz";
-var alphas = [];
-for( var i = 0; i < n; ++i ) {
- alphas.push( Random.rand() > 0.5 );
-}
-
-t.ensureIndex( idx );
+ t.ensureIndex(idx);
-function obj() {
- var ret = {};
- for( var i = 0; i < n; ++i ) {
- ret[ fields[ i ] ] = r( alphas[ i ] );
+ function obj() {
+ var ret = {};
+ for (var i = 0; i < n; ++i) {
+ ret[fields[i]] = r(alphas[i]);
+ }
+ return ret;
}
- return ret;
-}
-function r( alpha ) {
- if ( !alpha ) {
- return Random.randInt( 10 );
- } else {
- var len = Random.randInt( 10 );
- buf = "";
- for( var i = 0; i < len; ++i ) {
- buf += chars.charAt( Random.randInt( chars.length ) );
+ function r(alpha) {
+ if (!alpha) {
+ return Random.randInt(10);
+ } else {
+ var len = Random.randInt(10);
+ buf = "";
+ for (var i = 0; i < len; ++i) {
+ buf += chars.charAt(Random.randInt(chars.length));
+ }
+ return buf;
}
- return buf;
}
-}
-function check() {
- var v = t.validate();
- if ( !t.valid ) {
- printjson( t );
- assert( t.valid );
- }
- var spec = {};
- for( var i = 0; i < n; ++i ) {
- var predicateType = Random.randInt( 4 );
- switch( predicateType ) {
- case 0 /* range */ : {
- var bounds = [ r( alphas[ i ] ), r( alphas[ i ] ) ];
- if ( bounds[ 0 ] > bounds[ 1 ] ) {
- bounds.reverse();
- }
- var s = {};
- if ( Random.rand() > 0.5 ) {
- s[ "$gte" ] = bounds[ 0 ];
- } else {
- s[ "$gt" ] = bounds[ 0 ];
- }
- if ( Random.rand() > 0.5 ) {
- s[ "$lte" ] = bounds[ 1 ];
- } else {
- s[ "$lt" ] = bounds[ 1 ];
- }
- spec[ fields[ i ] ] = s;
- break;
+ function check() {
+ var v = t.validate();
+ if (!t.valid) {
+ printjson(t);
+ assert(t.valid);
}
- case 1 /* $in */ : {
- var vals = [];
- var inLength = Random.randInt( 15 );
- for( var j = 0; j < inLength; ++j ) {
- vals.push( r( alphas[ i ] ) );
+ var spec = {};
+ for (var i = 0; i < n; ++i) {
+ var predicateType = Random.randInt(4);
+ switch (predicateType) {
+ case 0 /* range */: {
+ var bounds = [r(alphas[i]), r(alphas[i])];
+ if (bounds[0] > bounds[1]) {
+ bounds.reverse();
+ }
+ var s = {};
+ if (Random.rand() > 0.5) {
+ s["$gte"] = bounds[0];
+ } else {
+ s["$gt"] = bounds[0];
+ }
+ if (Random.rand() > 0.5) {
+ s["$lte"] = bounds[1];
+ } else {
+ s["$lt"] = bounds[1];
+ }
+ spec[fields[i]] = s;
+ break;
+ }
+ case 1 /* $in */: {
+ var vals = [];
+ var inLength = Random.randInt(15);
+ for (var j = 0; j < inLength; ++j) {
+ vals.push(r(alphas[i]));
+ }
+ spec[fields[i]] = {
+ $in: vals
+ };
+ break;
+ }
+ case 2 /* equality */: {
+ spec[fields[i]] = r(alphas[i]);
+ break;
+ }
+ default /* no predicate */:
+ break;
}
- spec[ fields[ i ] ] = { $in: vals };
- break;
- }
- case 2 /* equality */ : {
- spec[ fields[ i ] ] = r( alphas[ i ] );
- break;
- }
- default /* no predicate */ :
- break;
}
+ s = sort();
+ c1 = t.find(spec, {_id: null}).sort(s).hint(idx).toArray();
+ c2 = t.find(spec, {_id: null}).sort(s).hint({$natural: 1}).toArray();
+ count = t.count(spec);
+ assert.eq(c1, c2);
+ assert.eq(c2.length, count);
}
- s = sort();
- c1 = t.find( spec, { _id:null } ).sort( s ).hint( idx ).toArray();
- c2 = t.find( spec, { _id:null } ).sort( s ).hint( {$natural:1} ).toArray();
- count = t.count( spec );
- assert.eq( c1, c2 );
- assert.eq( c2.length, count );
-}
-var bulk = t.initializeUnorderedBulkOp();
-for( var i = 0; i < 10000; ++i ) {
- bulk.insert( obj() );
- if( Random.rand() > 0.999 ) {
- print( i );
- assert.writeOK(bulk.execute());
- check();
- bulk = t.initializeUnorderedBulkOp();
+ var bulk = t.initializeUnorderedBulkOp();
+ for (var i = 0; i < 10000; ++i) {
+ bulk.insert(obj());
+ if (Random.rand() > 0.999) {
+ print(i);
+ assert.writeOK(bulk.execute());
+ check();
+ bulk = t.initializeUnorderedBulkOp();
+ }
}
-}
-bulk = t.initializeUnorderedBulkOp();
-for( var i = 0; i < 100000; ++i ) {
- if ( Random.rand() > 0.9 ) {
- bulk.insert( obj() );
- } else {
- bulk.find( obj() ).remove(); // improve
- }
- if( Random.rand() > 0.999 ) {
- print( i );
- assert.writeOK(bulk.execute());
- check();
- bulk = t.initializeUnorderedBulkOp();
+ bulk = t.initializeUnorderedBulkOp();
+ for (var i = 0; i < 100000; ++i) {
+ if (Random.rand() > 0.9) {
+ bulk.insert(obj());
+ } else {
+ bulk.find(obj()).remove(); // improve
+ }
+ if (Random.rand() > 0.999) {
+ print(i);
+ assert.writeOK(bulk.execute());
+ check();
+ bulk = t.initializeUnorderedBulkOp();
+ }
}
-}
-assert.writeOK(bulk.execute());
-
-check();
+ assert.writeOK(bulk.execute());
+ check();
}
-for( var z = 0; z < 5; ++z ) {
+for (var z = 0; z < 5; ++z) {
doIt();
}
diff --git a/jstests/noPassthroughWithMongod/index_hammer1.js b/jstests/noPassthroughWithMongod/index_hammer1.js
index 4617eb3fd98..5d89223093e 100644
--- a/jstests/noPassthroughWithMongod/index_hammer1.js
+++ b/jstests/noPassthroughWithMongod/index_hammer1.js
@@ -3,22 +3,37 @@ t = db.index_hammer1;
t.drop();
var bulk = t.initializeUnorderedBulkOp();
-for ( i=0; i<10000; i++ )
- bulk.insert({ x: i, y: i });
+for (i = 0; i < 10000; i++)
+ bulk.insert({x: i, y: i});
assert.writeOK(bulk.execute());
ops = [];
-for ( i=0; i<50; i++ )
- ops.push( { op : "find" , ns : t.getFullName() , query : { x : { $gt : 5000 } , y : { $gt : 5000 } } } );
-
-ops[10] = { op : "createIndex" , ns : t.getFullName() , key : { x : 1 } };
-ops[20] = { op : "createIndex" , ns : t.getFullName() , key : { y : 1 } };
-ops[30] = { op : "dropIndex" , ns : t.getFullName() , key : { x : 1 } };
-ops[40] = { op : "dropIndex" , ns : t.getFullName() , key : { y : 1 } };
-
-res = benchRun( { ops : ops , parallel : 5 , seconds : 20 , host : db.getMongo().host } );
-printjson( res );
-
-assert.eq( 10000 , t.count() );
-
+for (i = 0; i < 50; i++)
+ ops.push({op: "find", ns: t.getFullName(), query: {x: {$gt: 5000}, y: {$gt: 5000}}});
+
+ops[10] = {
+ op: "createIndex",
+ ns: t.getFullName(),
+ key: {x: 1}
+};
+ops[20] = {
+ op: "createIndex",
+ ns: t.getFullName(),
+ key: {y: 1}
+};
+ops[30] = {
+ op: "dropIndex",
+ ns: t.getFullName(),
+ key: {x: 1}
+};
+ops[40] = {
+ op: "dropIndex",
+ ns: t.getFullName(),
+ key: {y: 1}
+};
+
+res = benchRun({ops: ops, parallel: 5, seconds: 20, host: db.getMongo().host});
+printjson(res);
+
+assert.eq(10000, t.count());
diff --git a/jstests/noPassthroughWithMongod/index_killop.js b/jstests/noPassthroughWithMongod/index_killop.js
index 71e1cfc650b..95b0f995848 100644
--- a/jstests/noPassthroughWithMongod/index_killop.js
+++ b/jstests/noPassthroughWithMongod/index_killop.js
@@ -6,56 +6,58 @@ t.drop();
// Insert a large number of documents, enough to ensure that an index build on these documents will
// be interrupted before complete.
var bulk = t.initializeUnorderedBulkOp();
-for( i = 0; i < 1e6; ++i ) {
- bulk.insert({ a: i });
+for (i = 0; i < 1e6; ++i) {
+ bulk.insert({a: i});
}
assert.writeOK(bulk.execute());
-function debug( x ) {
-// printjson( x );
+function debug(x) {
+ // printjson( x );
}
/** @return the op id for the running index build, or -1 if there is no current index build. */
function getIndexBuildOpId() {
inprog = db.currentOp().inprog;
- debug( inprog );
+ debug(inprog);
indexBuildOpId = -1;
- inprog.forEach( function( op ) {
- // Identify the index build as the createIndex command
- // It is assumed that no other clients are concurrently
- // accessing the 'test' database.
- if ( (op.op == 'query' ||
- op.op == 'command') && 'createIndexes' in op.query ) {
- debug( op.opid );
- indexBuildOpId = op.opid;
- }
- } );
+ inprog.forEach(function(op) {
+ // Identify the index build as the createIndex command
+ // It is assumed that no other clients are concurrently
+ // accessing the 'test' database.
+ if ((op.op == 'query' || op.op == 'command') && 'createIndexes' in op.query) {
+ debug(op.opid);
+ indexBuildOpId = op.opid;
+ }
+ });
return indexBuildOpId;
}
/** Test that building an index with @param 'options' can be aborted using killop. */
-function testAbortIndexBuild( options ) {
- var createIdx = startParallelShell(
- 'var coll = db.jstests_slownightly_index_killop;' +
- 'assert.commandWorked(coll.createIndex({ a: 1 }, ' + tojson(options) + '));'
- );
+function testAbortIndexBuild(options) {
+ var createIdx = startParallelShell('var coll = db.jstests_slownightly_index_killop;' +
+ 'assert.commandWorked(coll.createIndex({ a: 1 }, ' +
+ tojson(options) + '));');
// When the index build starts, find its op id.
- assert.soon( function() { return ( opId = getIndexBuildOpId() ) != -1; } );
+ assert.soon(function() {
+ return (opId = getIndexBuildOpId()) != -1;
+ });
// Kill the index build.
- db.killOp( opId );
+ db.killOp(opId);
// Wait for the index build to stop.
- assert.soon( function() { return getIndexBuildOpId() == -1; } );
+ assert.soon(function() {
+ return getIndexBuildOpId() == -1;
+ });
var exitCode = createIdx({checkExitSuccess: false});
- assert.neq(0, exitCode,
- 'expected shell to exit abnormally due to index build being terminated');
+ assert.neq(
+ 0, exitCode, 'expected shell to exit abnormally due to index build being terminated');
// Check that no new index has been created. This verifies that the index build was aborted
// rather than successfully completed.
- assert.eq( [ { _id:1 } ], t.getIndexKeys() );
+ assert.eq([{_id: 1}], t.getIndexKeys());
}
-testAbortIndexBuild( { background:false } );
-testAbortIndexBuild( { background:true } );
+testAbortIndexBuild({background: false});
+testAbortIndexBuild({background: true});
diff --git a/jstests/noPassthroughWithMongod/index_multi.js b/jstests/noPassthroughWithMongod/index_multi.js
index ec162821d4b..8d728fac8d9 100644
--- a/jstests/noPassthroughWithMongod/index_multi.js
+++ b/jstests/noPassthroughWithMongod/index_multi.js
@@ -9,19 +9,20 @@ db.results.drop();
var bulk = coll.initializeUnorderedBulkOp();
print("Populate the collection with random data");
for (var i = 0; i < 1e4; i++) {
- var doc = {"_id" : i};
+ var doc = {
+ "_id": i
+ };
- for (var j=0; j<100; j++) {
+ for (var j = 0; j < 100; j++) {
// Skip some of the fields
if (Random.rand() < .1) {
continue;
}
// Make 0, 10, etc. multikey indexes
else if (j % 10 == 0) {
- doc["field"+j] = [Random.rand(), Random.rand(), Random.rand()];
- }
- else {
- doc["field"+j] = Random.rand();
+ doc["field" + j] = [Random.rand(), Random.rand(), Random.rand()];
+ } else {
+ doc["field" + j] = Random.rand();
}
}
@@ -33,66 +34,54 @@ assert.writeOK(bulk.execute());
var specs = [];
var multikey = [];
-var setupDBStr =
- "var conn = null;" +
- "assert.soon(function() {" +
- " try {" +
- " conn = new Mongo(\"" + db.getMongo().host + "\");" +
- " return conn;" +
- " } catch (x) {" +
- " return false;" +
- " }" +
- "}, 'Timed out waiting for temporary connection to connect', 30000, 5000);" +
- "var db = conn.getDB('" + db.getName() + "');";
+var setupDBStr = "var conn = null;" + "assert.soon(function() {" + " try {" +
+ " conn = new Mongo(\"" + db.getMongo().host + "\");" + " return conn;" +
+ " } catch (x) {" + " return false;" + " }" +
+ "}, 'Timed out waiting for temporary connection to connect', 30000, 5000);" +
+ "var db = conn.getDB('" + db.getName() + "');";
var indexJobs = [];
print("Create 3 triple indexes");
for (var i = 90; i < 93; i++) {
var spec = {};
- spec["field"+i] = 1;
- spec["field"+(i+1)] = 1;
- spec["field"+(i+2)] = 1;
- indexJobs.push(startParallelShell(setupDBStr +
- "printjson(db.index_multi.createIndex(" + tojson(spec) + "," +
- "{ background: true }));" +
- "db.results.insert(Object.extend(" +
- "db.runCommand({ getlasterror: 1 }), " + tojson(spec) +
- ") );",
- null, // port
- true)); // noconnect
+ spec["field" + i] = 1;
+ spec["field" + (i + 1)] = 1;
+ spec["field" + (i + 2)] = 1;
+ indexJobs.push(startParallelShell(
+ setupDBStr + "printjson(db.index_multi.createIndex(" + tojson(spec) + "," +
+ "{ background: true }));" + "db.results.insert(Object.extend(" +
+ "db.runCommand({ getlasterror: 1 }), " + tojson(spec) + ") );",
+ null, // port
+ true)); // noconnect
specs.push(spec);
- multikey.push(i % 10 == 0 || (i+1) % 10 == 0 || (i+2) % 10 == 0);
+ multikey.push(i % 10 == 0 || (i + 1) % 10 == 0 || (i + 2) % 10 == 0);
}
print("Create 30 compound indexes");
for (var i = 30; i < 90; i += 2) {
var spec = {};
- spec["field"+i] = 1;
- spec["field"+(i+1)] = 1;
- indexJobs.push(startParallelShell(setupDBStr +
- "printjson(db.index_multi.createIndex(" + tojson(spec) + ", " +
- "{ background: true }));" +
- "db.results.insert(Object.extend(" +
- "db.runCommand({ getlasterror: 1 }), " + tojson(spec) +
- ") );",
- null, // port
- true)); // noconnect
+ spec["field" + i] = 1;
+ spec["field" + (i + 1)] = 1;
+ indexJobs.push(startParallelShell(
+ setupDBStr + "printjson(db.index_multi.createIndex(" + tojson(spec) + ", " +
+ "{ background: true }));" + "db.results.insert(Object.extend(" +
+ "db.runCommand({ getlasterror: 1 }), " + tojson(spec) + ") );",
+ null, // port
+ true)); // noconnect
specs.push(spec);
- multikey.push(i % 10 == 0 || (i+1) % 10 == 0);
+ multikey.push(i % 10 == 0 || (i + 1) % 10 == 0);
}
print("Create 30 indexes");
for (var i = 0; i < 30; i++) {
var spec = {};
- spec["field"+i] = 1;
- indexJobs.push(startParallelShell(setupDBStr +
- "printjson(db.index_multi.createIndex(" + tojson(spec) + ", " +
- "{ background: true }));" +
- "db.results.insert(Object.extend(" +
- "db.runCommand({ getlasterror: 1 }), " + tojson(spec) +
- ") );",
- null, // port
- true)); // noconnect
+ spec["field" + i] = 1;
+ indexJobs.push(startParallelShell(
+ setupDBStr + "printjson(db.index_multi.createIndex(" + tojson(spec) + ", " +
+ "{ background: true }));" + "db.results.insert(Object.extend(" +
+ "db.runCommand({ getlasterror: 1 }), " + tojson(spec) + ") );",
+ null, // port
+ true)); // noconnect
specs.push(spec);
multikey.push(i % 10 == 0);
}
@@ -100,15 +89,16 @@ for (var i = 0; i < 30; i++) {
print("Do some sets and unsets");
bulk = coll.initializeUnorderedBulkOp();
for (i = 0; i < 1e4; i++) {
- var criteria = {_id: Random.randInt(1e5)};
+ var criteria = {
+ _id: Random.randInt(1e5)
+ };
var mod = {};
if (Random.rand() < .5) {
mod['$set'] = {};
- mod['$set']['field'+Random.randInt(100)] = Random.rand();
- }
- else {
+ mod['$set']['field' + Random.randInt(100)] = Random.rand();
+ } else {
mod['$unset'] = {};
- mod['$unset']['field'+Random.randInt(100)] = true;
+ mod['$unset']['field' + Random.randInt(100)] = true;
}
bulk.find(criteria).update(mod);
@@ -120,7 +110,7 @@ indexJobs.forEach(function(join) {
});
printjson(db.results.find().toArray());
-//assert.eq(coll.getIndexes().length, 64, "didn't see 64 indexes");
+// assert.eq(coll.getIndexes().length, 64, "didn't see 64 indexes");
print("Make sure we end up with 64 indexes");
for (var i in specs) {
diff --git a/jstests/noPassthroughWithMongod/index_no_retry.js b/jstests/noPassthroughWithMongod/index_no_retry.js
index fd7f070736d..ff09b70d039 100644
--- a/jstests/noPassthroughWithMongod/index_no_retry.js
+++ b/jstests/noPassthroughWithMongod/index_no_retry.js
@@ -7,9 +7,7 @@
var baseName = 'index_retry';
var dbpath = MongoRunner.dataPath + baseName;
- var conn = MongoRunner.runMongod({
- dbpath: dbpath,
- journal: ''});
+ var conn = MongoRunner.runMongod({dbpath: dbpath, journal: ''});
var test = conn.getDB("test");
@@ -21,7 +19,7 @@
// can be interrupted before complete.
var bulk = t.initializeUnorderedBulkOp();
for (var i = 0; i < 5e5; ++i) {
- bulk.insert({ a: i });
+ bulk.insert({a: i});
if (i % 10000 == 0) {
print("i: " + i);
}
@@ -39,42 +37,34 @@
var inprog = test.currentOp().inprog;
debug(inprog);
var indexBuildOpId = -1;
- inprog.forEach(
- function( op ) {
- // Identify the index build as a createIndexes command.
- // It is assumed that no other clients are concurrently
- // accessing the 'test' database.
- if ( (op.op == 'query' || op.op == 'command') && 'createIndexes' in op.query ) {
- debug(op.opid);
- var idxSpec = op.query.indexes[0];
- // SERVER-4295 Make sure the index details are there
- // we can't assert these things, since there is a race in reporting
- // but we won't count if they aren't
- if ( "a_1" == idxSpec.name &&
- 1 == idxSpec.key.a &&
- idxSpec.background &&
- op.progress &&
- (op.progress.done / op.progress.total) > 0.20) {
- indexBuildOpId = op.opid;
- }
+ inprog.forEach(function(op) {
+ // Identify the index build as a createIndexes command.
+ // It is assumed that no other clients are concurrently
+ // accessing the 'test' database.
+ if ((op.op == 'query' || op.op == 'command') && 'createIndexes' in op.query) {
+ debug(op.opid);
+ var idxSpec = op.query.indexes[0];
+ // SERVER-4295 Make sure the index details are there
+ // we can't assert these things, since there is a race in reporting
+ // but we won't count if they aren't
+ if ("a_1" == idxSpec.name && 1 == idxSpec.key.a && idxSpec.background &&
+ op.progress && (op.progress.done / op.progress.total) > 0.20) {
+ indexBuildOpId = op.opid;
}
}
- );
+ });
return indexBuildOpId != -1;
}
function abortDuringIndexBuild(options) {
var createIdx = startParallelShell(
- 'db.' + name + '.createIndex({ a: 1 }, { background: true });',
- conn.port);
+ 'db.' + name + '.createIndex({ a: 1 }, { background: true });', conn.port);
// Wait for the index build to start.
var times = 0;
- assert.soon(
- function() {
- return indexBuildInProgress() && times++ >= 2;
- }
- );
+ assert.soon(function() {
+ return indexBuildInProgress() && times++ >= 2;
+ });
print("killing the mongod");
MongoRunner.stopMongod(conn.port, /* signal */ 9);
@@ -85,17 +75,14 @@
abortDuringIndexBuild();
- conn = MongoRunner.runMongod({
- dbpath: dbpath,
- journal: '',
- noIndexBuildRetry: '',
- restart: true});
+ conn =
+ MongoRunner.runMongod({dbpath: dbpath, journal: '', noIndexBuildRetry: '', restart: true});
test = conn.getDB("test");
t = test.getCollection(name);
- assert.throws(function() { t.find({a: 42}).hint({a: 1}).next(); },
- null,
- 'index {a: 1} was rebuilt in spite of --noIndexBuildRetry');
+ assert.throws(function() {
+ t.find({a: 42}).hint({a: 1}).next();
+ }, null, 'index {a: 1} was rebuilt in spite of --noIndexBuildRetry');
var indexes = t.getIndexes();
assert.eq(1, indexes.length, 'unfinished indexes in listIndexes result: ' + tojson(indexes));
diff --git a/jstests/noPassthroughWithMongod/index_retry.js b/jstests/noPassthroughWithMongod/index_retry.js
index e420edb1914..cb33de4b95b 100644
--- a/jstests/noPassthroughWithMongod/index_retry.js
+++ b/jstests/noPassthroughWithMongod/index_retry.js
@@ -7,9 +7,7 @@
var baseName = 'index_retry';
var dbpath = MongoRunner.dataPath + baseName;
- var conn = MongoRunner.runMongod({
- dbpath: dbpath,
- journal: ''});
+ var conn = MongoRunner.runMongod({dbpath: dbpath, journal: ''});
var test = conn.getDB("test");
@@ -21,7 +19,7 @@
// can be interrupted before complete.
var bulk = t.initializeUnorderedBulkOp();
for (var i = 0; i < 5e5; ++i) {
- bulk.insert({ a: i });
+ bulk.insert({a: i});
if (i % 10000 == 0) {
print("i: " + i);
}
@@ -39,42 +37,34 @@
var inprog = test.currentOp().inprog;
debug(inprog);
var indexBuildOpId = -1;
- inprog.forEach(
- function( op ) {
- // Identify the index build as a createIndexes command.
- // It is assumed that no other clients are concurrently
- // accessing the 'test' database.
- if ( (op.op == 'query' || op.op == 'command') && 'createIndexes' in op.query ) {
- debug(op.opid);
- var idxSpec = op.query.indexes[0];
- // SERVER-4295 Make sure the index details are there
- // we can't assert these things, since there is a race in reporting
- // but we won't count if they aren't
- if ( "a_1" == idxSpec.name &&
- 1 == idxSpec.key.a &&
- idxSpec.background &&
- op.progress &&
- (op.progress.done / op.progress.total) > 0.20) {
- indexBuildOpId = op.opid;
- }
+ inprog.forEach(function(op) {
+ // Identify the index build as a createIndexes command.
+ // It is assumed that no other clients are concurrently
+ // accessing the 'test' database.
+ if ((op.op == 'query' || op.op == 'command') && 'createIndexes' in op.query) {
+ debug(op.opid);
+ var idxSpec = op.query.indexes[0];
+ // SERVER-4295 Make sure the index details are there
+ // we can't assert these things, since there is a race in reporting
+ // but we won't count if they aren't
+ if ("a_1" == idxSpec.name && 1 == idxSpec.key.a && idxSpec.background &&
+ op.progress && (op.progress.done / op.progress.total) > 0.20) {
+ indexBuildOpId = op.opid;
}
}
- );
+ });
return indexBuildOpId != -1;
}
function abortDuringIndexBuild(options) {
var createIdx = startParallelShell(
- 'db.' + name + '.createIndex({ a: 1 }, { background: true });',
- conn.port);
+ 'db.' + name + '.createIndex({ a: 1 }, { background: true });', conn.port);
// Wait for the index build to start.
var times = 0;
- assert.soon(
- function() {
- return indexBuildInProgress() && times++ >= 2;
- }
- );
+ assert.soon(function() {
+ return indexBuildInProgress() && times++ >= 2;
+ });
print("killing the mongod");
MongoRunner.stopMongod(conn.port, /* signal */ 9);
@@ -85,18 +75,17 @@
abortDuringIndexBuild();
- conn = MongoRunner.runMongod({
- dbpath: dbpath,
- journal: '',
- restart: true});
+ conn = MongoRunner.runMongod({dbpath: dbpath, journal: '', restart: true});
test = conn.getDB("test");
t = test.getCollection(name);
- assert.eq({a: 42}, t.find({a: 42}, {_id: 0}).hint({a: 1}).next(),
+ assert.eq({a: 42},
+ t.find({a: 42}, {_id: 0}).hint({a: 1}).next(),
'index {a: 1} was rebuilt on startup');
var indexes = t.getIndexes();
- assert.eq(2, indexes.length,
+ assert.eq(2,
+ indexes.length,
'unexpected number of indexes in listIndexes result: ' + tojson(indexes));
print("Index built");
diff --git a/jstests/noPassthroughWithMongod/indexbg_drop.js b/jstests/noPassthroughWithMongod/indexbg_drop.js
index 261ab342e48..863730ae35b 100644
--- a/jstests/noPassthroughWithMongod/indexbg_drop.js
+++ b/jstests/noPassthroughWithMongod/indexbg_drop.js
@@ -15,17 +15,20 @@ var collection = 'jstests_feh';
var size = 500000;
// Set up replica set
-var replTest = new ReplSetTest({ name: 'bgIndex', nodes: 3 });
+var replTest = new ReplSetTest({name: 'bgIndex', nodes: 3});
var nodes = replTest.nodeList();
printjson(nodes);
// We need an arbiter to ensure that the primary doesn't step down when we restart the secondary
replTest.startSet();
-replTest.initiate({"_id" : "bgIndex",
- "members" : [
- {"_id" : 0, "host" : nodes[0]},
- {"_id" : 1, "host" : nodes[1]},
- {"_id" : 2, "host" : nodes[2], "arbiterOnly" : true}]});
+replTest.initiate({
+ "_id": "bgIndex",
+ "members": [
+ {"_id": 0, "host": nodes[0]},
+ {"_id": 1, "host": nodes[1]},
+ {"_id": 2, "host": nodes[2], "arbiterOnly": true}
+ ]
+});
var master = replTest.getPrimary();
var second = replTest.getSecondary();
@@ -36,43 +39,43 @@ var secondId = replTest.getNodeId(second);
var masterDB = master.getDB(dbname);
var secondDB = second.getDB(dbname);
-
-var dc = {dropIndexes: collection, index: "i_1"};
+var dc = {
+ dropIndexes: collection,
+ index: "i_1"
+};
// set up collections
masterDB.dropDatabase();
jsTest.log("creating test data " + size + " documents");
Random.setRandomSeed();
var bulk = masterDB.getCollection(collection).initializeUnorderedBulkOp();
-for( i = 0; i < size; ++i ) {
- bulk.insert({ i: Random.rand() });
+for (i = 0; i < size; ++i) {
+ bulk.insert({i: Random.rand()});
}
assert.writeOK(bulk.execute());
jsTest.log("Starting background indexing for test of: " + tojson(dc));
// Add another index to be sure the drop command works.
-masterDB.getCollection(collection).ensureIndex({b:1});
+masterDB.getCollection(collection).ensureIndex({b: 1});
-masterDB.getCollection(collection).ensureIndex( {i:1}, {background:true} );
-assert.eq(3, masterDB.getCollection(collection).getIndexes().length );
+masterDB.getCollection(collection).ensureIndex({i: 1}, {background: true});
+assert.eq(3, masterDB.getCollection(collection).getIndexes().length);
// Wait for the secondary to get the index entry
-assert.soon(
- function() { return 3 == secondDB.getCollection(collection).getIndexes().length; },
- "index not created on secondary (prior to drop)", 240000 );
+assert.soon(function() {
+ return 3 == secondDB.getCollection(collection).getIndexes().length;
+}, "index not created on secondary (prior to drop)", 240000);
jsTest.log("Index created and index entry exists on secondary");
-
// make sure the index build has started on secondary
assert.soon(function() {
var curOp = secondDB.currentOp();
printjson(curOp);
- for (var i=0; i < curOp.inprog.length; i++) {
+ for (var i = 0; i < curOp.inprog.length; i++) {
try {
- if (curOp.inprog[i].insert.background){
-
- return true;
+ if (curOp.inprog[i].insert.background) {
+ return true;
}
} catch (e) {
// catchem if you can
@@ -81,9 +84,8 @@ assert.soon(function() {
return false;
}, "waiting for secondary bg index build", 20000, 10);
-
jsTest.log("dropping index");
-masterDB.runCommand( {dropIndexes: collection, index: "*"});
+masterDB.runCommand({dropIndexes: collection, index: "*"});
jsTest.log("Waiting on replication");
replTest.awaitReplication();
@@ -93,13 +95,12 @@ masterDB.getCollection(collection).getIndexes().forEach(printjson);
// we need to assert.soon because the drop only marks the index for removal
// the removal itself is asynchronous and may take another moment before it happens
var i = 0;
-assert.soon( function() {
+assert.soon(function() {
print("index list on secondary (run " + i + "):");
secondDB.getCollection(collection).getIndexes().forEach(printjson);
i++;
return 1 === secondDB.getCollection(collection).getIndexes().length;
- }, "secondary did not drop index"
-);
+}, "secondary did not drop index");
replTest.stopSet();
diff --git a/jstests/noPassthroughWithMongod/indexbg_interrupts.js b/jstests/noPassthroughWithMongod/indexbg_interrupts.js
index 2d3ddd9099e..0a50951c75d 100644
--- a/jstests/noPassthroughWithMongod/indexbg_interrupts.js
+++ b/jstests/noPassthroughWithMongod/indexbg_interrupts.js
@@ -10,16 +10,16 @@
/**
* Starts a replica set with arbiter, builds an index in background,
- * run through drop indexes, drop collection, drop database.
+ * run through drop indexes, drop collection, drop database.
*/
var checkOp = function(checkDB) {
var curOp = checkDB.currentOp(true);
- for (var i=0; i < curOp.inprog.length; i++) {
+ for (var i = 0; i < curOp.inprog.length; i++) {
try {
- if (curOp.inprog[i].query.background){
+ if (curOp.inprog[i].query.background) {
printjson(curOp.inprog[i].msg);
- return true;
+ return true;
}
} catch (e) {
// catchem if you can
@@ -33,16 +33,19 @@ var collection = 'jstests_feh';
var size = 100000;
// Set up replica set
-var replTest = new ReplSetTest({ name: 'bgIndex', nodes: 3 });
+var replTest = new ReplSetTest({name: 'bgIndex', nodes: 3});
var nodes = replTest.nodeList();
// We need an arbiter to ensure that the primary doesn't step down when we restart the secondary
replTest.startSet();
-replTest.initiate({"_id" : "bgIndex",
- "members" : [
- {"_id" : 0, "host" : nodes[0]},
- {"_id" : 1, "host" : nodes[1]},
- {"_id" : 2, "host" : nodes[2], "arbiterOnly" : true}]});
+replTest.initiate({
+ "_id": "bgIndex",
+ "members": [
+ {"_id": 0, "host": nodes[0]},
+ {"_id": 1, "host": nodes[1]},
+ {"_id": 2, "host": nodes[2], "arbiterOnly": true}
+ ]
+});
var master = replTest.getPrimary();
var second = replTest.getSecondary();
@@ -50,15 +53,14 @@ var second = replTest.getSecondary();
var masterDB = master.getDB(dbname);
var secondDB = second.getDB(dbname);
-var dropAction = [
+var dropAction = [
{dropIndexes: collection, index: "*"},
{dropIndexes: collection, index: "i_1"},
{drop: collection},
- {dropDatabase: 1 },
+ {dropDatabase: 1},
{convertToCapped: collection, size: 20000}
];
-
for (var idx = 0; idx < dropAction.length; idx++) {
var dc = dropAction[idx];
jsTest.log("Setting up collection " + collection + " for test of: " + JSON.stringify(dc));
@@ -67,31 +69,31 @@ for (var idx = 0; idx < dropAction.length; idx++) {
masterDB.dropDatabase();
jsTest.log("creating test data " + size + " documents");
var bulk = masterDB.getCollection(collection).initializeUnorderedBulkOp();
- for(var i = 0; i < size; ++i ) {
- bulk.insert({ i: i });
+ for (var i = 0; i < size; ++i) {
+ bulk.insert({i: i});
}
assert.writeOK(bulk.execute());
jsTest.log("Starting background indexing for test of: " + JSON.stringify(dc));
- masterDB.getCollection(collection).ensureIndex( {i:1}, {background:true} );
- assert.eq(2, masterDB.getCollection(collection).getIndexes().length );
+ masterDB.getCollection(collection).ensureIndex({i: 1}, {background: true});
+ assert.eq(2, masterDB.getCollection(collection).getIndexes().length);
// Wait for the secondary to get the index entry
- assert.soon( function() {
+ assert.soon(function() {
return 2 == secondDB.getCollection(collection).getIndexes().length;
- }, "index not created on secondary", 240000 );
+ }, "index not created on secondary", 240000);
jsTest.log("Index created and index info exists on secondary");
jsTest.log("running command " + JSON.stringify(dc));
- assert.commandWorked(masterDB.runCommand( dc ));
-
+ assert.commandWorked(masterDB.runCommand(dc));
+
jsTest.log("Waiting on replication");
- replTest.awaitReplication(60*1000);
+ replTest.awaitReplication(60 * 1000);
// we need to assert.soon because the drop only marks the index for removal
// the removal itself is asynchronous and may take another moment before it happens
- assert.soon( function() {
+ assert.soon(function() {
var idx_count = secondDB.getCollection(collection).getIndexes().length;
return idx_count == 1 || idx_count == 0;
}, "secondary did not drop index for " + dc.toString());
diff --git a/jstests/noPassthroughWithMongod/indexbg_restart_secondary.js b/jstests/noPassthroughWithMongod/indexbg_restart_secondary.js
index 80379b64844..e6b1a14f5f8 100644
--- a/jstests/noPassthroughWithMongod/indexbg_restart_secondary.js
+++ b/jstests/noPassthroughWithMongod/indexbg_restart_secondary.js
@@ -10,22 +10,24 @@
/**
* Starts a replica set with arbiter, builds an index in background
- * restart secondary once it starts building index, secondary should
+ * restart secondary once it starts building index, secondary should
* restart when index build after it restarts
*/
-
// Set up replica set
-var replTest = new ReplSetTest({ name: 'bgIndex', nodes: 3 });
+var replTest = new ReplSetTest({name: 'bgIndex', nodes: 3});
var nodes = replTest.nodeList();
// We need an arbiter to ensure that the primary doesn't step down when we restart the secondary
replTest.startSet();
-replTest.initiate({"_id" : "bgIndex",
- "members" : [
- {"_id" : 0, "host" : nodes[0]},
- {"_id" : 1, "host" : nodes[1]},
- {"_id" : 2, "host" : nodes[2], "arbiterOnly" : true}]});
+replTest.initiate({
+ "_id": "bgIndex",
+ "members": [
+ {"_id": 0, "host": nodes[0]},
+ {"_id": 1, "host": nodes[1]},
+ {"_id": 2, "host": nodes[2], "arbiterOnly": true}
+ ]
+});
var master = replTest.getPrimary();
var second = replTest.getSecondary();
@@ -39,37 +41,36 @@ var size = 500000;
jsTest.log("creating test data " + size + " documents");
var bulk = masterDB.jstests_bgsec.initializeUnorderedBulkOp();
-for(var i = 0; i < size; ++i) {
- bulk.insert({ i: i });
+for (var i = 0; i < size; ++i) {
+ bulk.insert({i: i});
}
assert.writeOK(bulk.execute());
jsTest.log("Starting background indexing");
-masterDB.jstests_bgsec.ensureIndex( {i:1}, {background:true} );
+masterDB.jstests_bgsec.ensureIndex({i: 1}, {background: true});
assert.eq(2, masterDB.jstests_bgsec.getIndexes().length);
// Wait for the secondary to get the index entry
-assert.soon( function() {
- return 2 == secondDB.jstests_bgsec.getIndexes().length; },
- "index not created on secondary (prior to restart)", 5 * 60 * 1000 );
+assert.soon(function() {
+ return 2 == secondDB.jstests_bgsec.getIndexes().length;
+}, "index not created on secondary (prior to restart)", 5 * 60 * 1000);
// restart secondary and reconnect
jsTest.log("Restarting secondary");
-replTest.restart(secondId, {}, /*wait=*/true);
+replTest.restart(secondId, {}, /*wait=*/true);
// Make sure secondary comes back
-assert.soon( function() {
+assert.soon(function() {
try {
- secondDB.jstests_bgsec.getIndexes().length; // trigger a reconnect if needed
- return true;
+ secondDB.jstests_bgsec.getIndexes().length; // trigger a reconnect if needed
+ return true;
} catch (e) {
- return false;
+ return false;
}
-} , "secondary didn't restart", 30000, 1000);
+}, "secondary didn't restart", 30000, 1000);
-assert.soon( function() {
- return 2 == secondDB.jstests_bgsec.getIndexes().length; },
- "Index build not resumed after restart", 30000, 50 );
+assert.soon(function() {
+ return 2 == secondDB.jstests_bgsec.getIndexes().length;
+}, "Index build not resumed after restart", 30000, 50);
jsTest.log("indexbg-restart-secondary.js complete");
-
diff --git a/jstests/noPassthroughWithMongod/indexbg_restart_sigkill_secondary_noretry.js b/jstests/noPassthroughWithMongod/indexbg_restart_sigkill_secondary_noretry.js
index c97d8320422..c7a793e2a51 100644
--- a/jstests/noPassthroughWithMongod/indexbg_restart_sigkill_secondary_noretry.js
+++ b/jstests/noPassthroughWithMongod/indexbg_restart_sigkill_secondary_noretry.js
@@ -14,7 +14,7 @@
* Start with noIndexBuildRetry option, should *not* build index on secondary
*/
-(function () {
+(function() {
var assert_trueTimeout = function(f, msg, timeout /*ms*/, interval) {
var start = new Date();
timeout = timeout || 30000;
@@ -28,12 +28,12 @@
if (diff > timeout)
return;
sleep(interval);
- }
+ }
};
// Set up replica set
- var replTest = new ReplSetTest({ name: 'bgIndexNoRetry', nodes: 3,
- nodeOptions : {noIndexBuildRetry:"", syncdelay:1} });
+ var replTest = new ReplSetTest(
+ {name: 'bgIndexNoRetry', nodes: 3, nodeOptions: {noIndexBuildRetry: "", syncdelay: 1}});
var nodenames = replTest.nodeList();
// We can't use an arbiter as the third node because the -auth test tries to log on there
@@ -47,11 +47,14 @@
return;
}
- replTest.initiate({"_id" : "bgIndexNoRetry",
- "members" : [
- {"_id" : 0, "host" : nodenames[0]},
- {"_id" : 1, "host" : nodenames[1]},
- {"_id" : 2, "host" : nodenames[2], arbiterOnly: true}]});
+ replTest.initiate({
+ "_id": "bgIndexNoRetry",
+ "members": [
+ {"_id": 0, "host": nodenames[0]},
+ {"_id": 1, "host": nodenames[1]},
+ {"_id": 2, "host": nodenames[2], arbiterOnly: true}
+ ]
+ });
var master = replTest.getPrimary();
var second = replTest.getSecondary();
@@ -65,18 +68,18 @@
jsTest.log("creating test data " + size + " documents");
var bulk = masterDB.jstests_bgsec.initializeUnorderedBulkOp();
- for( i = 0; i < size; ++i ) {
- bulk.insert({ i : i });
+ for (i = 0; i < size; ++i) {
+ bulk.insert({i: i});
}
assert.writeOK(bulk.execute());
jsTest.log("Starting background indexing");
- masterDB.jstests_bgsec.ensureIndex( {i:1}, {background:true} );
+ masterDB.jstests_bgsec.ensureIndex({i: 1}, {background: true});
assert.eq(2, masterDB.jstests_bgsec.getIndexes().length);
// Do one more write, so that later on, the secondary doesn't restart with the index build
// as the last op in the oplog -- it will redo this op otherwise.
- masterDB.jstests_bgsec.insert( { i : -1 } );
+ masterDB.jstests_bgsec.insert({i: -1});
// Wait for the secondary to get caught up
jsTest.log("Waiting for replication");
@@ -84,29 +87,26 @@
// Make sure a journal flush for the oplog occurs, by doing a local journaled write to the
// secondary
- assert.writeOK(second.getDB('local').foo.insert({ a: 1 }, { writeConcern: { j: true }}));
+ assert.writeOK(second.getDB('local').foo.insert({a: 1}, {writeConcern: {j: true}}));
// restart secondary and reconnect
jsTest.log("Restarting secondary");
- replTest.restart(secondId, {}, /*signal=*/ 9, /*wait=*/true);
+ replTest.restart(secondId, {}, /*signal=*/9, /*wait=*/true);
// Make sure secondary comes back
- assert.soon( function() {
+ assert.soon(function() {
try {
- secondDB.isMaster(); // trigger a reconnect if needed
+ secondDB.isMaster(); // trigger a reconnect if needed
return true;
} catch (e) {
- return false;
+ return false;
}
- } , "secondary didn't restart", 60000, 1000);
+ }, "secondary didn't restart", 60000, 1000);
- assert_trueTimeout(
- function() {
- return 2 == secondDB.jstests_bgsec.getIndexes().length;
- },
- "index created on secondary after restart with --noIndexBuildRetry",
- 30000, 200);
+ assert_trueTimeout(function() {
+ return 2 == secondDB.jstests_bgsec.getIndexes().length;
+ }, "index created on secondary after restart with --noIndexBuildRetry", 30000, 200);
- assert.neq(2, secondDB.jstests_bgsec.getIndexes().length );
+ assert.neq(2, secondDB.jstests_bgsec.getIndexes().length);
replTest.stopSet();
}());
diff --git a/jstests/noPassthroughWithMongod/insertMulti.js b/jstests/noPassthroughWithMongod/insertMulti.js
index b857e8c159a..e2a70307550 100644
--- a/jstests/noPassthroughWithMongod/insertMulti.js
+++ b/jstests/noPassthroughWithMongod/insertMulti.js
@@ -4,9 +4,11 @@
"use strict";
function makeDocument(docSize) {
- var doc = { "fieldName":"" };
+ var doc = {
+ "fieldName": ""
+ };
var longString = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx";
- while(Object.bsonsize(doc) < docSize) {
+ while (Object.bsonsize(doc) < docSize) {
if (Object.bsonsize(doc) < docSize - longString.length) {
doc.fieldName += longString;
} else {
@@ -20,24 +22,24 @@
var t = db.foo;
t.drop();
- t.insert([{_id:1},{_id:2}]);
+ t.insert([{_id: 1}, {_id: 2}]);
assert.eq(t.count(), 2);
- t.insert([{_id:3},{_id:2},{_id:4}], 0); // no ContinueOnError
+ t.insert([{_id: 3}, {_id: 2}, {_id: 4}], 0); // no ContinueOnError
assert.eq(t.count(), 3);
- assert.eq(t.count({ "_id" : 1 }), 1);
- assert.eq(t.count({ "_id" : 2 }), 1);
- assert.eq(t.count({ "_id" : 3 }), 1);
- assert.eq(t.count({ "_id" : 4 }), 0);
+ assert.eq(t.count({"_id": 1}), 1);
+ assert.eq(t.count({"_id": 2}), 1);
+ assert.eq(t.count({"_id": 3}), 1);
+ assert.eq(t.count({"_id": 4}), 0);
t.drop();
- t.insert([{_id:1},{_id:2}]);
+ t.insert([{_id: 1}, {_id: 2}]);
assert.eq(t.count(), 2);
- t.insert([{_id:3},{_id:2},{_id:4}], 1); // ContinueOnError
+ t.insert([{_id: 3}, {_id: 2}, {_id: 4}], 1); // ContinueOnError
assert.eq(t.count(), 4);
- assert.eq(t.count({ "_id" : 1 }), 1);
- assert.eq(t.count({ "_id" : 2 }), 1);
- assert.eq(t.count({ "_id" : 3 }), 1);
- assert.eq(t.count({ "_id" : 4 }), 1);
+ assert.eq(t.count({"_id": 1}), 1);
+ assert.eq(t.count({"_id": 2}), 1);
+ assert.eq(t.count({"_id": 3}), 1);
+ assert.eq(t.count({"_id": 4}), 1);
// Push a large vector in bigger than the subset size we'll break it up into
t.drop();
diff --git a/jstests/noPassthroughWithMongod/ipv6_connection_string_validation.js b/jstests/noPassthroughWithMongod/ipv6_connection_string_validation.js
index 9f544248ef1..e0a83397ee9 100644
--- a/jstests/noPassthroughWithMongod/ipv6_connection_string_validation.js
+++ b/jstests/noPassthroughWithMongod/ipv6_connection_string_validation.js
@@ -13,13 +13,18 @@ if ("undefined" == typeof inner_mode) {
// tested manually), so 127.0.0.1 is also present so the test mongo shell can connect
// with that address.
var mongod = MongoRunner.runMongod({ipv6: "", bind_ip: "::1,127.0.0.1"});
- var args = ["mongo",
- "--nodb",
- "--ipv6",
- "--host", "::1",
- "--port", mongod.port,
- "--eval", "inner_mode=true;port=" + mongod.port + ";",
- "jstests/noPassthroughWithMongod/ipv6_connection_string_validation.js" ];
+ var args = [
+ "mongo",
+ "--nodb",
+ "--ipv6",
+ "--host",
+ "::1",
+ "--port",
+ mongod.port,
+ "--eval",
+ "inner_mode=true;port=" + mongod.port + ";",
+ "jstests/noPassthroughWithMongod/ipv6_connection_string_validation.js"
+ ];
var exitCode = _runMongoProgram.apply(null, args);
jsTest.log("Inner mode test finished, exit code was " + exitCode);
@@ -32,50 +37,49 @@ if ("undefined" == typeof inner_mode) {
}
var goodStrings = [
- "localhost:27999/test",
- "[::1]:27999/test",
- "[0:0:0:0:0:0:0:1]:27999/test",
- "[0000:0000:0000:0000:0000:0000:0000:0001]:27999/test"
+ "localhost:27999/test",
+ "[::1]:27999/test",
+ "[0:0:0:0:0:0:0:1]:27999/test",
+ "[0000:0000:0000:0000:0000:0000:0000:0001]:27999/test"
];
var badStrings = [
- { s: undefined, r: /^Missing connection string$/ },
- { s: 7, r: /^Incorrect type/ },
- { s: null, r: /^Incorrect type/ },
- { s: "", r: /^Empty connection string$/ },
- { s: " ", r: /^Empty connection string$/ },
- { s: ":", r: /^Missing host name/ },
- { s: "/", r: /^Missing host name/ },
- { s: ":/", r: /^Missing host name/ },
- { s: ":/test", r: /^Missing host name/ },
- { s: ":27999/", r: /^Missing host name/ },
- { s: ":27999/test", r: /^Missing host name/ },
- { s: "/test", r: /^Missing host name/ },
- { s: "localhost:/test", r: /^Missing port number/ },
- { s: "::1:/test", r: /^Missing port number/ },
- { s: "::1:cat/test", r: /^Invalid port number/ },
- { s: "::1:1cat/test", r: /^Invalid port number/ },
- { s: "::1:123456/test", r: /^Invalid port number/ },
- { s: "::1:65536/test", r: /^Invalid port number/ },
- { s: "127.0.0.1:65536/test", r: /^Invalid port number/ },
- { s: "::1:27999/", r: /^Missing database name/ },
- { s: "127.0.0.1:27999/", r: /^Missing database name/ },
- { s: "::1:27999/test", r: /^More than one ':'/ },
- { s: "0:0::0:0:1:27999/test", r: /^More than one ':'/ },
- { s: "0000:0000:0000:0000:0000:0000:0000:0001:27999/test", r: /^More than one ':'/ },
- { s: "a[127.0.0.1]:27999/", r: /^Missing database name/ },
- { s: "a[::1:]27999/", r: /^Invalid port number/ },
- { s: "[::1:27999/", r: /^Missing database name/ },
- { s: "[::1:]27999/", r: /^Invalid port number/ },
- { s: "::1]:27999/", r: /^Missing database name/ }
+ {s: undefined, r: /^Missing connection string$/},
+ {s: 7, r: /^Incorrect type/},
+ {s: null, r: /^Incorrect type/},
+ {s: "", r: /^Empty connection string$/},
+ {s: " ", r: /^Empty connection string$/},
+ {s: ":", r: /^Missing host name/},
+ {s: "/", r: /^Missing host name/},
+ {s: ":/", r: /^Missing host name/},
+ {s: ":/test", r: /^Missing host name/},
+ {s: ":27999/", r: /^Missing host name/},
+ {s: ":27999/test", r: /^Missing host name/},
+ {s: "/test", r: /^Missing host name/},
+ {s: "localhost:/test", r: /^Missing port number/},
+ {s: "::1:/test", r: /^Missing port number/},
+ {s: "::1:cat/test", r: /^Invalid port number/},
+ {s: "::1:1cat/test", r: /^Invalid port number/},
+ {s: "::1:123456/test", r: /^Invalid port number/},
+ {s: "::1:65536/test", r: /^Invalid port number/},
+ {s: "127.0.0.1:65536/test", r: /^Invalid port number/},
+ {s: "::1:27999/", r: /^Missing database name/},
+ {s: "127.0.0.1:27999/", r: /^Missing database name/},
+ {s: "::1:27999/test", r: /^More than one ':'/},
+ {s: "0:0::0:0:1:27999/test", r: /^More than one ':'/},
+ {s: "0000:0000:0000:0000:0000:0000:0000:0001:27999/test", r: /^More than one ':'/},
+ {s: "a[127.0.0.1]:27999/", r: /^Missing database name/},
+ {s: "a[::1:]27999/", r: /^Invalid port number/},
+ {s: "[::1:27999/", r: /^Missing database name/},
+ {s: "[::1:]27999/", r: /^Invalid port number/},
+ {s: "::1]:27999/", r: /^Missing database name/}
];
var substitutePort = function(connectionString) {
// This will be called with non-strings as well as strings, so we need to catch exceptions
try {
return connectionString.replace("27999", "" + port);
- }
- catch (e) {
+ } catch (e) {
return connectionString;
}
};
@@ -87,18 +91,17 @@ var testGood = function(i, connectionString) {
try {
var connectDB = connect(connectionString);
connectDB = null;
- }
- catch (e) {
+ } catch (e) {
gotException = true;
exception = e;
}
if (!gotException) {
- print("Good connection string " + i +
- " (\"" + connectionString + "\") correctly validated");
+ print("Good connection string " + i + " (\"" + connectionString +
+ "\") correctly validated");
return;
}
- var message = "FAILED to correctly validate goodString " + i +
- " (\"" + connectionString + "\"): exception was \"" + tojson(exception) + "\"";
+ var message = "FAILED to correctly validate goodString " + i + " (\"" + connectionString +
+ "\"): exception was \"" + tojson(exception) + "\"";
doassert(message);
};
@@ -110,8 +113,7 @@ var testBad = function(i, connectionString, errorRegex) {
try {
var connectDB = connect(connectionString);
connectDB = null;
- }
- catch (e) {
+ } catch (e) {
gotException = true;
exception = e;
if (errorRegex.test(e.message)) {
@@ -123,13 +125,12 @@ var testBad = function(i, connectionString, errorRegex) {
"\") correctly rejected:\n" + tojson(exception));
return;
}
- var message = "FAILED to generate correct exception for badString " + i +
- " (\"" + connectionString + "\"): ";
+ var message = "FAILED to generate correct exception for badString " + i + " (\"" +
+ connectionString + "\"): ";
if (gotException) {
- message += "exception was \"" + tojson(exception) +
- "\", it should have matched \"" + errorRegex.toString() + "\"";
- }
- else {
+ message += "exception was \"" + tojson(exception) + "\", it should have matched \"" +
+ errorRegex.toString() + "\"";
+ } else {
message += "no exception was thrown";
}
doassert(message);
diff --git a/jstests/noPassthroughWithMongod/log_component_helpers.js b/jstests/noPassthroughWithMongod/log_component_helpers.js
index 044c1ba5b4a..dfe6523ac35 100644
--- a/jstests/noPassthroughWithMongod/log_component_helpers.js
+++ b/jstests/noPassthroughWithMongod/log_component_helpers.js
@@ -5,9 +5,9 @@
var mongo = db.getMongo();
// Get current log component setttings. We will reset to these later.
- var originalSettings = assert.commandWorked(
- db.adminCommand({ getParameter:1, logComponentVerbosity:1 })
- ).logComponentVerbosity;
+ var originalSettings =
+ assert.commandWorked(db.adminCommand({getParameter: 1, logComponentVerbosity: 1}))
+ .logComponentVerbosity;
// getLogComponents
var components1 = mongo.getLogComponents();
@@ -37,6 +37,5 @@
// Restore originalSettings
assert.commandWorked(
- db.adminCommand({setParameter:1, logComponentVerbosity:originalSettings })
- );
- }(db));
+ db.adminCommand({setParameter: 1, logComponentVerbosity: originalSettings}));
+}(db));
diff --git a/jstests/noPassthroughWithMongod/logop_rollback.js b/jstests/noPassthroughWithMongod/logop_rollback.js
index cc585c59a8e..f6ff75e3ce6 100644
--- a/jstests/noPassthroughWithMongod/logop_rollback.js
+++ b/jstests/noPassthroughWithMongod/logop_rollback.js
@@ -5,7 +5,7 @@
'use strict';
function checkForLogOpRollback(coll) {
- var res = coll.runCommand({ getLog: 'global' });
+ var res = coll.runCommand({getLog: 'global'});
assert.commandWorked(res);
for (var i = res.log.length - 1; i >= 0; i--) {
@@ -30,15 +30,14 @@
// must be in 'legacy' or 'compatibility' mode
db.getMongo().forceWriteMode('compatibility');
- var res = coll.insert({ _id: new Array(1025).join('x') });
+ var res = coll.insert({_id: new Array(1025).join('x')});
assert(res.hasWriteError());
// ErrorCodes::KeyTooLong == 17280
assert.eq(17280, res.getWriteError().code);
assert(checkForLogOpRollback(coll));
- }
- finally {
+ } finally {
db.getMongo().forceWriteMode(prevWriteMode);
db.setLogLevel(prevVerbosityLevel);
}
diff --git a/jstests/noPassthroughWithMongod/logpath.js b/jstests/noPassthroughWithMongod/logpath.js
index 1634495cd17..3b6b2069937 100644
--- a/jstests/noPassthroughWithMongod/logpath.js
+++ b/jstests/noPassthroughWithMongod/logpath.js
@@ -3,7 +3,7 @@
var name = "logpath";
var token = "logpath_token";
-var dbdir = MongoRunner.dataPath + name + "/"; // this will work under windows as well as linux
+var dbdir = MongoRunner.dataPath + name + "/"; // this will work under windows as well as linux
var basedir = MongoRunner.dataPath + name + "files" + "/";
var logdir = basedir + "logdir/";
var testdir = basedir + "testdir/";
@@ -19,15 +19,15 @@ assert(mkdir(basedir));
assert(mkdir(logdir));
assert(mkdir(testdir));
-var cleanupFiles = function() {
+var cleanupFiles = function() {
var files = listFiles(logdir);
- for(f in files) {
+ for (f in files) {
var name = files[f].name;
// mostly here for safety
- if(name.indexOf(token) != -1) {
- removeFile(name);
+ if (name.indexOf(token) != -1) {
+ removeFile(name);
}
}
};
@@ -37,8 +37,8 @@ var logCount = function(fpattern, prefix) {
var pat = RegExp(fpattern + (prefix ? "" : "$"));
var cnt = 0;
- for(f in files) {
- if(pat.test(files[f].name)) {
+ for (f in files) {
+ if (pat.test(files[f].name)) {
cnt++;
}
}
@@ -53,14 +53,14 @@ cleanupFiles();
assert.eq(logCount(logs[0]), 0);
print("------ Start mongod with logpath set to new file");
-var m = MongoRunner.runMongod({ port: port[0], dbpath: dbdir, logpath: logdir + logs[0]});
+var m = MongoRunner.runMongod({port: port[0], dbpath: dbdir, logpath: logdir + logs[0]});
// log should now exist (and no rotations should exist)
assert.eq(logCount(logs[0], true), 1);
MongoRunner.stopMongod(port[0]);
print("------ Start mongod with logpath set to existing file");
-m = MongoRunner.runMongod({ port: port[1], dbpath: dbdir, logpath: logdir + logs[0]});
+m = MongoRunner.runMongod({port: port[1], dbpath: dbdir, logpath: logdir + logs[0]});
// log should continue to exist
assert.eq(logCount(logs[0]), 1);
@@ -73,36 +73,43 @@ MongoRunner.stopMongod(port[1]);
// Blocking on SERVER-5117:
// MongoRunner currently hangs if mongod fails to start so these tests don't work
-if ( false ) {
+if (false) {
// only run forking test on *nix (not supported on Windows)
- if ( _isWindows() ) {
+ if (_isWindows()) {
print("------ Skipping fork tests... (Windows)");
} else {
print("------ Start mongod with logpath set to new file, fork");
- var m = MongoRunner.runMongod({ port: port[2], dbpath: dbdir, logpath: logdir + logs[1], fork: true});
-
+ var m = MongoRunner.runMongod(
+ {port: port[2], dbpath: dbdir, logpath: logdir + logs[1], fork: true});
+
// log should now exist (and no rotations should exist)
assert.eq(logCount(logs[1], true), 1);
MongoRunner.stopMongod(port[2]);
-
+
print("------ Start mongod with logpath set to existing file, fork");
- m = MongoRunner.runMongod({ port: port[3], dbpath: dbdir, logpath: logdir + logs[1], fork: true});
-
+ m = MongoRunner.runMongod(
+ {port: port[3], dbpath: dbdir, logpath: logdir + logs[1], fork: true});
+
// log should continue to exist
assert.eq(logCount(logs[1]), 1);
-
+
// but now there should be a rotation file
assert.eq(logCount(logs[1], true), 2);
cleanupFiles();
-
+
MongoRunner.stopMongod(port[3]);
}
-
- // the following tests depend on undefined behavior; assume that MongoRunner raises exception on error
+
+ // the following tests depend on undefined behavior; assume that MongoRunner raises exception on
+ // error
print("------ Confirm that launch fails with directory");
- assert.throws(function() { MongoRunner.runMongod({ port: port[4], dbpath: dbdir, logpath: testdir }); });
+ assert.throws(function() {
+ MongoRunner.runMongod({port: port[4], dbpath: dbdir, logpath: testdir});
+ });
print("------ Confirm that launch fails with special file");
- assert.throws(function() { MongoRunner.runMongod({ port: port[5], dbpath: dbdir, logpath: sfile }); });
+ assert.throws(function() {
+ MongoRunner.runMongod({port: port[5], dbpath: dbdir, logpath: sfile});
+ });
}
diff --git a/jstests/noPassthroughWithMongod/mapreduce_intermediate_reduce.js b/jstests/noPassthroughWithMongod/mapreduce_intermediate_reduce.js
index 857c18c297c..589e072b631 100644
--- a/jstests/noPassthroughWithMongod/mapreduce_intermediate_reduce.js
+++ b/jstests/noPassthroughWithMongod/mapreduce_intermediate_reduce.js
@@ -2,8 +2,7 @@
// in-memory state small. See SERVER-12949 for more details.
//
function assertGLEOK(status) {
- assert(status.ok && status.err === null,
- "Expected OK status object; found " + tojson(status));
+ assert(status.ok && status.err === null, "Expected OK status object; found " + tojson(status));
}
var db = db.getSisterDB("MapReduceTestDB");
@@ -18,16 +17,20 @@ var expectedOutColl = [];
var bulk = coll.initializeUnorderedBulkOp();
for (var i = 0; i < 10; i++) {
for (var j = 1; j < 50; j++) {
- bulk.insert({ idx: i, j: j });
+ bulk.insert({idx: i, j: j});
}
- expectedOutColl.push ({ _id: i, value: j - 1 });
+ expectedOutColl.push({_id: i, value: j - 1});
}
assert.writeOK(bulk.execute());
-function mapFn() { emit(this.idx, 1); }
-function reduceFn(key, values) { return Array.sum(values); }
+function mapFn() {
+ emit(this.idx, 1);
+}
+function reduceFn(key, values) {
+ return Array.sum(values);
+}
-var out = coll.mapReduce(mapFn, reduceFn, { out: { replace: "mrOutput" } });
+var out = coll.mapReduce(mapFn, reduceFn, {out: {replace: "mrOutput"}});
// Check the output is as expected
//
@@ -38,7 +41,7 @@ assert.eq(out.counts.input, 490, "input count is wrong");
assert.eq(out.counts.emit, 490, "emit count is wrong");
// If this fails, most probably some of the configuration settings under mongo::mr::Config have
-// changed, such as reduceTriggerRatio or maxInMemSize. If not the case, then something else
+// changed, such as reduceTriggerRatio or maxInMemSize. If not the case, then something else
// must have changed with when intermediate reduces occur (see mongo::mr::State::checkSize).
//
assert.eq(out.counts.reduce, 14, "reduce count is wrong");
diff --git a/jstests/noPassthroughWithMongod/moveprimary-replset.js b/jstests/noPassthroughWithMongod/moveprimary-replset.js
index 2725585a08e..0f956f638ea 100755..100644
--- a/jstests/noPassthroughWithMongod/moveprimary-replset.js
+++ b/jstests/noPassthroughWithMongod/moveprimary-replset.js
@@ -5,67 +5,72 @@
(function() {
"use strict";
-var numDocs = 10000;
-var baseName = "moveprimary-replset";
-var testDBName = baseName;
-var testCollName = 'coll';
+ var numDocs = 10000;
+ var baseName = "moveprimary-replset";
+ var testDBName = baseName;
+ var testCollName = 'coll';
-jsTest.log("Spinning up a sharded cluster, but not adding the shards");
-var shardingTestConfig = {
- name : baseName,
- mongos : 1,
- shards : 2,
- config : 3,
- rs : { nodes : 3 },
- other : { manualAddShard : true }
-};
-var shardingTest = new ShardingTest(shardingTestConfig);
+ jsTest.log("Spinning up a sharded cluster, but not adding the shards");
+ var shardingTestConfig = {
+ name: baseName,
+ mongos: 1,
+ shards: 2,
+ config: 3,
+ rs: {nodes: 3},
+ other: {manualAddShard: true}
+ };
+ var shardingTest = new ShardingTest(shardingTestConfig);
-jsTest.log("Geting connections to the individual shards");
-var replSet1 = shardingTest.rs0;
-var replSet2 = shardingTest.rs1;
+ jsTest.log("Geting connections to the individual shards");
+ var replSet1 = shardingTest.rs0;
+ var replSet2 = shardingTest.rs1;
-jsTest.log("Adding data to our first replica set");
-var repset1DB = replSet1.getPrimary().getDB(testDBName);
-for (var i = 1; i <= numDocs; i++) {
- repset1DB[testCollName].insert({ x : i });
-}
-replSet1.awaitReplication();
+ jsTest.log("Adding data to our first replica set");
+ var repset1DB = replSet1.getPrimary().getDB(testDBName);
+ for (var i = 1; i <= numDocs; i++) {
+ repset1DB[testCollName].insert({x: i});
+ }
+ replSet1.awaitReplication();
-jsTest.log("Geting connection to mongos for the cluster");
-var mongosConn = shardingTest.s;
-var testDB = mongosConn.getDB(testDBName);
+ jsTest.log("Geting connection to mongos for the cluster");
+ var mongosConn = shardingTest.s;
+ var testDB = mongosConn.getDB(testDBName);
-jsTest.log("Adding replSet1 as only shard");
-mongosConn.adminCommand({ addshard : replSet1.getURL() });
+ jsTest.log("Adding replSet1 as only shard");
+ mongosConn.adminCommand({addshard: replSet1.getURL()});
-jsTest.log("Updating the data via mongos and making sure all documents are updated and present");
-testDB[testCollName].update({}, { $set : { y : 'hello' } }, false/*upsert*/, true/*multi*/);
-assert.eq(testDB[testCollName].count({ y : 'hello' }), numDocs,
- 'updating and counting docs via mongos failed');
+ jsTest.log(
+ "Updating the data via mongos and making sure all documents are updated and present");
+ testDB[testCollName].update({}, {$set: {y: 'hello'}}, false /*upsert*/, true /*multi*/);
+ assert.eq(testDB[testCollName].count({y: 'hello'}),
+ numDocs,
+ 'updating and counting docs via mongos failed');
-jsTest.log("Adding replSet2 as second shard");
-mongosConn.adminCommand({ addshard : replSet2.getURL() });
+ jsTest.log("Adding replSet2 as second shard");
+ mongosConn.adminCommand({addshard: replSet2.getURL()});
-mongosConn.getDB('admin').printShardingStatus();
-printjson(replSet2.getPrimary().getDBs());
+ mongosConn.getDB('admin').printShardingStatus();
+ printjson(replSet2.getPrimary().getDBs());
-jsTest.log("Moving test db from replSet1 to replSet2");
-assert.commandWorked(mongosConn.getDB('admin').runCommand({ moveprimary: testDBName,
- to: replSet2.getURL() }));
-mongosConn.getDB('admin').printShardingStatus();
-printjson(replSet2.getPrimary().getDBs());
-assert.eq(testDB.getSiblingDB("config").databases.findOne({ "_id" : testDBName }).primary,
- replSet2.name, "Failed to change primary shard for unsharded database.");
+ jsTest.log("Moving test db from replSet1 to replSet2");
+ assert.commandWorked(
+ mongosConn.getDB('admin').runCommand({moveprimary: testDBName, to: replSet2.getURL()}));
+ mongosConn.getDB('admin').printShardingStatus();
+ printjson(replSet2.getPrimary().getDBs());
+ assert.eq(testDB.getSiblingDB("config").databases.findOne({"_id": testDBName}).primary,
+ replSet2.name,
+ "Failed to change primary shard for unsharded database.");
-jsTest.log("Updating the data via mongos and making sure all documents are updated and present");
-testDB[testCollName].update({}, { $set : { z : 'world' } }, false/*upsert*/, true/*multi*/);
-assert.eq(testDB[testCollName].count({ z : 'world' }), numDocs,
- 'updating and counting docs via mongos failed');
+ jsTest.log(
+ "Updating the data via mongos and making sure all documents are updated and present");
+ testDB[testCollName].update({}, {$set: {z: 'world'}}, false /*upsert*/, true /*multi*/);
+ assert.eq(testDB[testCollName].count({z: 'world'}),
+ numDocs,
+ 'updating and counting docs via mongos failed');
-jsTest.log("Shutting down cluster");
-shardingTest.stop();
+ jsTest.log("Shutting down cluster");
+ shardingTest.stop();
-print('moveprimary-replset.js SUCCESS');
+ print('moveprimary-replset.js SUCCESS');
})();
diff --git a/jstests/noPassthroughWithMongod/mr_noscripting.js b/jstests/noPassthroughWithMongod/mr_noscripting.js
index cd7f53ae28d..7a6ca1555a8 100644
--- a/jstests/noPassthroughWithMongod/mr_noscripting.js
+++ b/jstests/noPassthroughWithMongod/mr_noscripting.js
@@ -1,24 +1,21 @@
-var conn = MongoRunner.runMongod({ noscripting: '' });
-var testDB = conn.getDB( 'foo' );
+var conn = MongoRunner.runMongod({noscripting: ''});
+var testDB = conn.getDB('foo');
var coll = testDB.bar;
-coll.insert({ x: 1 });
+coll.insert({x: 1});
var map = function() {
- emit( this.x, 1 );
+ emit(this.x, 1);
};
-var reduce = function( key, values ) {
+var reduce = function(key, values) {
return 1;
};
-var mrResult = testDB.runCommand({ mapReduce: 'bar', map: map, reduce: reduce,
- out: { inline: 1 }});
+var mrResult = testDB.runCommand({mapReduce: 'bar', map: map, reduce: reduce, out: {inline: 1}});
-assert.eq( 0, mrResult.ok, 'mr result: ' + tojson( mrResult ));
+assert.eq(0, mrResult.ok, 'mr result: ' + tojson(mrResult));
// Confirm that mongod did not crash
-var cmdResult = testDB.adminCommand({ serverStatus: 1 });
-assert( cmdResult.ok, 'serverStatus failed, result: ' +
- tojson( cmdResult ));
-
+var cmdResult = testDB.adminCommand({serverStatus: 1});
+assert(cmdResult.ok, 'serverStatus failed, result: ' + tojson(cmdResult));
diff --git a/jstests/noPassthroughWithMongod/mr_writeconflict.js b/jstests/noPassthroughWithMongod/mr_writeconflict.js
index 60adb0ac0ce..baae608b59e 100644
--- a/jstests/noPassthroughWithMongod/mr_writeconflict.js
+++ b/jstests/noPassthroughWithMongod/mr_writeconflict.js
@@ -5,41 +5,41 @@
load('jstests/libs/parallelTester.js');
var makeDoc = function(keyLimit, valueLimit) {
- return {
- _id: ObjectId(),
- key: Random.randInt(keyLimit),
- value: Random.randInt(valueLimit)
- };
+ return {
+ _id: ObjectId(),
+ key: Random.randInt(keyLimit),
+ value: Random.randInt(valueLimit)
+ };
};
var main = function() {
- function mapper() {
- var obj = {};
- obj[this.value] = 1;
- emit(this.key, obj);
- }
+ function mapper() {
+ var obj = {};
+ obj[this.value] = 1;
+ emit(this.key, obj);
+ }
- function reducer(key, values) {
- var res = {};
+ function reducer(key, values) {
+ var res = {};
- values.forEach(function(obj) {
- Object.keys(obj).forEach(function(value) {
- if (!res.hasOwnProperty(value)) {
- res[value] = 0;
- }
- res[value] += obj[value];
- });
- });
+ values.forEach(function(obj) {
+ Object.keys(obj).forEach(function(value) {
+ if (!res.hasOwnProperty(value)) {
+ res[value] = 0;
+ }
+ res[value] += obj[value];
+ });
+ });
- return res;
- }
+ return res;
+ }
- for (var i = 0; i < 10; i++) {
- // Have all threads combine their results into the same collection
- var res = db.source.mapReduce(mapper, reducer, { out: { reduce: 'dest' } });
- assert.commandWorked(res);
- }
+ for (var i = 0; i < 10; i++) {
+ // Have all threads combine their results into the same collection
+ var res = db.source.mapReduce(mapper, reducer, {out: {reduce: 'dest'}});
+ assert.commandWorked(res);
+ }
};
Random.setRandomSeed();
@@ -48,8 +48,8 @@
var bulk = db.source.initializeUnorderedBulkOp();
var i;
for (i = 0; i < numDocs; ++i) {
- var doc = makeDoc(numDocs / 100, numDocs / 10);
- bulk.insert(doc);
+ var doc = makeDoc(numDocs / 100, numDocs / 10);
+ bulk.insert(doc);
}
var res = bulk.execute();
@@ -62,12 +62,12 @@
var numThreads = 6;
var t = [];
for (i = 0; i < numThreads - 1; ++i) {
- t[i] = new ScopedThread(main);
- t[i].start();
+ t[i] = new ScopedThread(main);
+ t[i].start();
}
main();
for (i = 0; i < numThreads - 1; ++i) {
- t[i].join();
+ t[i].join();
}
}());
diff --git a/jstests/noPassthroughWithMongod/newcollection2.js b/jstests/noPassthroughWithMongod/newcollection2.js
index 104eec7e897..46cd1316c90 100644
--- a/jstests/noPassthroughWithMongod/newcollection2.js
+++ b/jstests/noPassthroughWithMongod/newcollection2.js
@@ -2,16 +2,16 @@
var baseName = "jstests_disk_newcollection2";
var m = MongoRunner.runMongod({noprealloc: "", smallfiles: ""});
-db = m.getDB( "test" );
+db = m.getDB("test");
-db.createCollection( baseName, {size:0x1FFC0000-0x10-8192} );
-var v = db[ baseName ].validate();
-printjson( v );
-assert( v.valid );
+db.createCollection(baseName, {size: 0x1FFC0000 - 0x10 - 8192});
+var v = db[baseName].validate();
+printjson(v);
+assert(v.valid);
// Try creating collections with some invalid names and confirm that they
// don't crash MongoD.
-db.runCommand({ applyOps: [ { op: 'u', ns: 'a\0b' } ] });
+db.runCommand({applyOps: [{op: 'u', ns: 'a\0b'}]});
var res = db["a\0a"].insert({});
assert(res.hasWriteError(), "A write to collection a\0a succceeded");
diff --git a/jstests/noPassthroughWithMongod/no_balance_collection.js b/jstests/noPassthroughWithMongod/no_balance_collection.js
index 45736c26a4e..cfec6199ca2 100644
--- a/jstests/noPassthroughWithMongod/no_balance_collection.js
+++ b/jstests/noPassthroughWithMongod/no_balance_collection.js
@@ -1,91 +1,95 @@
// Tests whether the noBalance flag disables balancing for collections
-var st = new ShardingTest({ shards : 2, mongos : 1, verbose : 1 });
+var st = new ShardingTest({shards: 2, mongos: 1, verbose: 1});
// First, test that shell helpers require an argument
assert.throws(sh.disableBalancing, [], "sh.disableBalancing requires a collection");
assert.throws(sh.enableBalancing, [], "sh.enableBalancing requires a collection");
-
// Initially stop balancing
st.stopBalancer();
var shardAName = st._shardNames[0];
var shardBName = st._shardNames[1];
-var collA = st.s.getCollection( jsTest.name() + ".collA" );
-var collB = st.s.getCollection( jsTest.name() + ".collB" );
+var collA = st.s.getCollection(jsTest.name() + ".collA");
+var collB = st.s.getCollection(jsTest.name() + ".collB");
// Shard two collections
-st.shardColl( collA, { _id : 1 }, false );
-st.shardColl( collB, { _id : 1 }, false );
+st.shardColl(collA, {_id: 1}, false);
+st.shardColl(collB, {_id: 1}, false);
// Split into a lot of chunks so balancing can occur
-for( var i = 0; i < 10 - 1; i++ ){ // 10 chunks total
- collA.getMongo().getDB("admin").runCommand({ split : collA + "", middle : { _id : i } });
- collA.getMongo().getDB("admin").runCommand({ split : collB + "", middle : { _id : i } });
+for (var i = 0; i < 10 - 1; i++) { // 10 chunks total
+ collA.getMongo().getDB("admin").runCommand({split: collA + "", middle: {_id: i}});
+ collA.getMongo().getDB("admin").runCommand({split: collB + "", middle: {_id: i}});
}
// Disable balancing on one collection
-sh.disableBalancing( collB );
+sh.disableBalancing(collB);
-jsTest.log( "Balancing disabled on " + collB );
-printjson( collA.getDB().getSisterDB( "config" ).collections.find().toArray() );
+jsTest.log("Balancing disabled on " + collB);
+printjson(collA.getDB().getSisterDB("config").collections.find().toArray());
st.startBalancer();
// Make sure collA gets balanced
-assert.soon( function(){
- var shardAChunks = st.s.getDB( "config" ).chunks.find({ _id : sh._collRE( collA ), shard : shardAName }).itcount();
- var shardBChunks = st.s.getDB( "config" ).chunks.find({ _id : sh._collRE( collA ), shard : shardBName }).itcount();
- printjson({ shardA : shardAChunks, shardB : shardBChunks });
+assert.soon(function() {
+ var shardAChunks =
+ st.s.getDB("config").chunks.find({_id: sh._collRE(collA), shard: shardAName}).itcount();
+ var shardBChunks =
+ st.s.getDB("config").chunks.find({_id: sh._collRE(collA), shard: shardBName}).itcount();
+ printjson({shardA: shardAChunks, shardB: shardBChunks});
return shardAChunks == shardBChunks;
-}, "" + collA + " chunks not balanced!", 5 * 60 * 1000 );
+}, "" + collA + " chunks not balanced!", 5 * 60 * 1000);
-jsTest.log( "Chunks for " + collA + " are balanced." );
+jsTest.log("Chunks for " + collA + " are balanced.");
// Check that the collB chunks were not moved
-var shardAChunks = st.s.getDB( "config" ).chunks.find({ _id : sh._collRE( collB ), shard : shardAName }).itcount();
-var shardBChunks = st.s.getDB( "config" ).chunks.find({ _id : sh._collRE( collB ), shard : shardBName }).itcount();
-printjson({ shardA : shardAChunks, shardB : shardBChunks });
-assert( shardAChunks == 0 || shardBChunks == 0 );
+var shardAChunks =
+ st.s.getDB("config").chunks.find({_id: sh._collRE(collB), shard: shardAName}).itcount();
+var shardBChunks =
+ st.s.getDB("config").chunks.find({_id: sh._collRE(collB), shard: shardBName}).itcount();
+printjson({shardA: shardAChunks, shardB: shardBChunks});
+assert(shardAChunks == 0 || shardBChunks == 0);
// Re-enable balancing for collB
-sh.enableBalancing( collB );
+sh.enableBalancing(collB);
// Make sure that collB is now balanced
-assert.soon( function(){
- var shardAChunks = st.s.getDB( "config" ).chunks.find({ _id : sh._collRE( collB ), shard : shardAName }).itcount();
- var shardBChunks = st.s.getDB( "config" ).chunks.find({ _id : sh._collRE( collB ), shard : shardBName }).itcount();
- printjson({ shardA : shardAChunks, shardB : shardBChunks });
+assert.soon(function() {
+ var shardAChunks =
+ st.s.getDB("config").chunks.find({_id: sh._collRE(collB), shard: shardAName}).itcount();
+ var shardBChunks =
+ st.s.getDB("config").chunks.find({_id: sh._collRE(collB), shard: shardBName}).itcount();
+ printjson({shardA: shardAChunks, shardB: shardBChunks});
return shardAChunks == shardBChunks;
-}, "" + collB + " chunks not balanced!", 5 * 60 * 1000 );
+}, "" + collB + " chunks not balanced!", 5 * 60 * 1000);
-jsTest.log( "Chunks for " + collB + " are balanced." );
+jsTest.log("Chunks for " + collB + " are balanced.");
// Re-disable balancing for collB
-sh.disableBalancing( collB );
+sh.disableBalancing(collB);
// Wait for the balancer to fully finish the last migration and write the changelog
// MUST set db var here, ugly but necessary
db = st.s0.getDB("config");
sh.waitForBalancer(true);
// Make sure auto-migrates on insert don't move chunks
-var lastMigration = sh._lastMigration( collB );
+var lastMigration = sh._lastMigration(collB);
var bulk = collB.initializeUnorderedBulkOp();
-for( var i = 0; i < 1000000; i++ ){
- bulk.insert({ _id: i, hello: "world" });
+for (var i = 0; i < 1000000; i++) {
+ bulk.insert({_id: i, hello: "world"});
}
assert.writeOK(bulk.execute());
-printjson( lastMigration );
-printjson( sh._lastMigration( collB ) );
+printjson(lastMigration);
+printjson(sh._lastMigration(collB));
-if(lastMigration == null) {
+if (lastMigration == null) {
assert.eq(null, sh._lastMigration(collB));
-}
-else {
+} else {
assert.eq(lastMigration.time, sh._lastMigration(collB).time);
}
diff --git a/jstests/noPassthroughWithMongod/parallel_collection_scan.js b/jstests/noPassthroughWithMongod/parallel_collection_scan.js
index 44e5d361e45..11fa5d0bd75 100644
--- a/jstests/noPassthroughWithMongod/parallel_collection_scan.js
+++ b/jstests/noPassthroughWithMongod/parallel_collection_scan.js
@@ -3,27 +3,27 @@ t = db.parallel_collection_scan;
t.drop();
s = "";
-while ( s.length < 10000 )
+while (s.length < 10000)
s += ".";
var bulk = t.initializeUnorderedBulkOp();
-for ( i = 0; i < 8000; i++ ) {
- bulk.insert({ x: i, s: s });
- }
+for (i = 0; i < 8000; i++) {
+ bulk.insert({x: i, s: s});
+}
assert.writeOK(bulk.execute());
function iterateSliced() {
- var res = t.runCommand( "parallelCollectionScan", { numCursors : 3 } );
- assert( res.ok, tojson( res ) );
+ var res = t.runCommand("parallelCollectionScan", {numCursors: 3});
+ assert(res.ok, tojson(res));
var count = 0;
- for ( var i = 0; i < res.cursors.length; i++ ) {
+ for (var i = 0; i < res.cursors.length; i++) {
var x = res.cursors[i];
- var cursor = new DBCommandCursor( db.getMongo(), x, 5 );
+ var cursor = new DBCommandCursor(db.getMongo(), x, 5);
count += cursor.itcount();
}
return count;
}
-assert.eq( iterateSliced(), t.count() );
-assert.eq( iterateSliced(), i );
+assert.eq(iterateSliced(), t.count());
+assert.eq(iterateSliced(), i);
diff --git a/jstests/noPassthroughWithMongod/query_oplogreplay.js b/jstests/noPassthroughWithMongod/query_oplogreplay.js
index fc2760577bc..7d4e632f431 100644
--- a/jstests/noPassthroughWithMongod/query_oplogreplay.js
+++ b/jstests/noPassthroughWithMongod/query_oplogreplay.js
@@ -2,7 +2,7 @@
function test(t) {
t.drop();
- assert.commandWorked(t.getDB().createCollection(t.getName(), {capped: true, size: 16*1024}));
+ assert.commandWorked(t.getDB().createCollection(t.getName(), {capped: true, size: 16 * 1024}));
function makeTS(i) {
return Timestamp(1000, i);
@@ -23,11 +23,13 @@ function test(t) {
// 'ts' field is not top-level.
assert.throws(function() {
t.find({$or: [{ts: {$gt: makeTS(3)}}, {foo: 3}]})
- .addOption(DBQuery.Option.oplogReplay).next();
+ .addOption(DBQuery.Option.oplogReplay)
+ .next();
});
assert.throws(function() {
t.find({$nor: [{ts: {$gt: makeTS(4)}}, {foo: 4}]})
- .addOption(DBQuery.Option.oplogReplay).next();
+ .addOption(DBQuery.Option.oplogReplay)
+ .next();
});
// Predicate over 'ts' is not $gt or $gte.
@@ -61,5 +63,5 @@ var coll = db.jstests_query_oplogreplay;
coll.drop();
assert.commandWorked(coll.getDB().createCollection(coll.getName()));
var res = assert.throws(function() {
- coll.find({ts: {$gt: "abcd"}}).addOption(DBQuery.Option.oplogReplay).next();
- });
+ coll.find({ts: {$gt: "abcd"}}).addOption(DBQuery.Option.oplogReplay).next();
+});
diff --git a/jstests/noPassthroughWithMongod/reconfigwt.js b/jstests/noPassthroughWithMongod/reconfigwt.js
index e75495608be..4176022e971 100644
--- a/jstests/noPassthroughWithMongod/reconfigwt.js
+++ b/jstests/noPassthroughWithMongod/reconfigwt.js
@@ -8,14 +8,13 @@ var ss = db.serverStatus();
// Test is only valid in the WT suites which run against a mongod with WiredTiger enabled
if (ss.storageEngine.name !== "wiredTiger") {
print("Skipping reconfigwt.js since this server does not have WiredTiger enabled");
-}
-else {
+} else {
var conn = MongoRunner.runMongod();
- var admin = conn.getDB( "admin" );
+ var admin = conn.getDB("admin");
function reconfigure(str) {
- ret = admin.runCommand( { setParameter : 1, "wiredTigerEngineRuntimeConfig" : str });
+ ret = admin.runCommand({setParameter: 1, "wiredTigerEngineRuntimeConfig": str});
print("ret: " + tojson(ret));
return ret;
}
diff --git a/jstests/noPassthroughWithMongod/recstore.js b/jstests/noPassthroughWithMongod/recstore.js
index 339fdf2992c..fdb41af568b 100644
--- a/jstests/noPassthroughWithMongod/recstore.js
+++ b/jstests/noPassthroughWithMongod/recstore.js
@@ -7,16 +7,16 @@ t = db.storetest;
t.drop();
-t.save({z:3});
-t.save({z:2});
+t.save({z: 3});
+t.save({z: 2});
-t.ensureIndex({z:1});
-t.ensureIndex({q:1});
-assert( t.find().sort({z:1})[0].z == 2 );
+t.ensureIndex({z: 1});
+t.ensureIndex({q: 1});
+assert(t.find().sort({z: 1})[0].z == 2);
t.dropIndexes();
-assert( t.find().sort({z:1})[0].z == 2 );
+assert(t.find().sort({z: 1})[0].z == 2);
-t.ensureIndex({z:1});
-t.ensureIndex({q:1});
+t.ensureIndex({z: 1});
+t.ensureIndex({q: 1});
diff --git a/jstests/noPassthroughWithMongod/remove9.js b/jstests/noPassthroughWithMongod/remove9.js
index 3135514e4dc..b7da7b58f95 100644
--- a/jstests/noPassthroughWithMongod/remove9.js
+++ b/jstests/noPassthroughWithMongod/remove9.js
@@ -1,12 +1,13 @@
t = db.jstests_remove9;
t.drop();
-js = "while( 1 ) { for( i = 0; i < 10000; ++i ) { db.jstests_remove9.save( {i:i} ); } db.jstests_remove9.remove( {i: {$gte:0} } ); }";
-pid = startMongoProgramNoConnect( "mongo" , "--eval" , js , db ? db.getMongo().host : null );
+js =
+ "while( 1 ) { for( i = 0; i < 10000; ++i ) { db.jstests_remove9.save( {i:i} ); } db.jstests_remove9.remove( {i: {$gte:0} } ); }";
+pid = startMongoProgramNoConnect("mongo", "--eval", js, db ? db.getMongo().host : null);
Random.setRandomSeed();
-for( var i = 0; i < 10000; ++i ) {
- assert.writeOK(t.remove( { i: Random.randInt( 10000 )} ));
+for (var i = 0; i < 10000; ++i) {
+ assert.writeOK(t.remove({i: Random.randInt(10000)}));
}
-stopMongoProgramByPid( pid );
+stopMongoProgramByPid(pid);
diff --git a/jstests/noPassthroughWithMongod/replReads.js b/jstests/noPassthroughWithMongod/replReads.js
index a5b60ffea9c..45e0a4d49a6 100644
--- a/jstests/noPassthroughWithMongod/replReads.js
+++ b/jstests/noPassthroughWithMongod/replReads.js
@@ -1,16 +1,15 @@
// Test that doing slaveOk reads from secondaries hits all the secondaries evenly
function testReadLoadBalancing(numReplicas) {
+ var s =
+ new ShardingTest({shards: {rs0: {nodes: numReplicas}}, verbose: 2, other: {chunkSize: 1}});
- var s = new ShardingTest({ shards: { rs0: { nodes: numReplicas }},
- verbose: 2, other: { chunkSize: 1 }});
-
- s.adminCommand({enablesharding : "test"});
+ s.adminCommand({enablesharding: "test"});
s.config.settings.find().forEach(printjson);
- s.adminCommand({shardcollection : "test.foo", key : {_id : 1}});
+ s.adminCommand({shardcollection: "test.foo", key: {_id: 1}});
- s.getDB("test").foo.insert({a : 123});
+ s.getDB("test").foo.insert({a: 123});
primary = s._rs[0].test.liveNodes.master;
secondaries = s._rs[0].test.liveNodes.slaves;
@@ -18,30 +17,30 @@ function testReadLoadBalancing(numReplicas) {
function rsStats() {
return s.getDB("admin").runCommand("connPoolStats")["replicaSets"][s.rs0.name];
}
-
- assert.eq( numReplicas , rsStats().hosts.length );
-
- function isMasterOrSecondary( info ){
- if ( ! info.ok )
+
+ assert.eq(numReplicas, rsStats().hosts.length);
+
+ function isMasterOrSecondary(info) {
+ if (!info.ok)
return false;
- if ( info.ismaster )
+ if (info.ismaster)
return true;
- return info.secondary && ! info.hidden;
+ return info.secondary && !info.hidden;
}
- assert.soon(
- function() {
- var x = rsStats().hosts;
- printjson(x);
- for ( var i=0; i<x.length; i++ )
- if ( ! isMasterOrSecondary( x[i] ) )
- return false;
- return true;
- }
- );
-
+ assert.soon(function() {
+ var x = rsStats().hosts;
+ printjson(x);
+ for (var i = 0; i < x.length; i++)
+ if (!isMasterOrSecondary(x[i]))
+ return false;
+ return true;
+ });
+
for (var i = 0; i < secondaries.length; i++) {
- assert.soon( function(){ return secondaries[i].getDB("test").foo.count() > 0; } );
+ assert.soon(function() {
+ return secondaries[i].getDB("test").foo.count() > 0;
+ });
secondaries[i].getDB('test').setProfilingLevel(2);
}
// Primary may change with reconfig
@@ -57,50 +56,53 @@ function testReadLoadBalancing(numReplicas) {
connections.push(conn);
}
- var profileCriteria = { op: 'query', ns: 'test.foo' };
+ var profileCriteria = {
+ op: 'query',
+ ns: 'test.foo'
+ };
for (var i = 0; i < secondaries.length; i++) {
var profileCollection = secondaries[i].getDB('test').system.profile;
- assert.eq(10, profileCollection.find(profileCriteria).count(),
- "Wrong number of read queries sent to secondary " + i +
- " " + tojson( profileCollection.find().toArray() ));
+ assert.eq(10,
+ profileCollection.find(profileCriteria).count(),
+ "Wrong number of read queries sent to secondary " + i + " " +
+ tojson(profileCollection.find().toArray()));
}
-
- db = primary.getDB( "test" );
-
+
+ db = primary.getDB("test");
+
printjson(rs.status());
c = rs.conf();
- print( "config before: " + tojson(c) );
- for ( i=0; i<c.members.length; i++ ) {
- if ( c.members[i].host == db.runCommand( "ismaster" ).primary )
+ print("config before: " + tojson(c));
+ for (i = 0; i < c.members.length; i++) {
+ if (c.members[i].host == db.runCommand("ismaster").primary)
continue;
c.members[i].hidden = true;
c.members[i].priority = 0;
break;
}
- rs.reconfig( c );
- print( "config after: " + tojson( rs.conf() ) );
-
- assert.soon(
- function() {
- var x = rsStats();
- printjson(x);
- var numOk = 0;
- // Now wait until the host disappears, since now we actually update our
- // replica sets via isMaster in mongos
- if( x.hosts.length == c["members"].length - 1 ) return true;
- /*
- for ( var i=0; i<x.hosts.length; i++ )
- if ( x.hosts[i].hidden )
- return true;
- */
- return false;
- } , "one slave not ok" , 180000 , 5000
- );
-
+ rs.reconfig(c);
+ print("config after: " + tojson(rs.conf()));
+
+ assert.soon(function() {
+ var x = rsStats();
+ printjson(x);
+ var numOk = 0;
+ // Now wait until the host disappears, since now we actually update our
+ // replica sets via isMaster in mongos
+ if (x.hosts.length == c["members"].length - 1)
+ return true;
+ /*
+ for ( var i=0; i<x.hosts.length; i++ )
+ if ( x.hosts[i].hidden )
+ return true;
+ */
+ return false;
+ }, "one slave not ok", 180000, 5000);
+
// Secondaries may change here
secondaries = s._rs[0].test.liveNodes.slaves;
-
+
for (var i = 0; i < secondaries.length * 10; i++) {
conn = new Mongo(s._mongos[0].host);
conn.setSlaveOk();
@@ -111,16 +113,16 @@ function testReadLoadBalancing(numReplicas) {
var counts = [];
for (var i = 0; i < secondaries.length; i++) {
var profileCollection = secondaries[i].getDB('test').system.profile;
- counts.push( profileCollection.find(profileCriteria).count() );
+ counts.push(profileCollection.find(profileCriteria).count());
}
counts = counts.sort();
- assert.eq( 20 , Math.abs( counts[1] - counts[0] ), "counts wrong: " + tojson( counts ) );
+ assert.eq(20, Math.abs(counts[1] - counts[0]), "counts wrong: " + tojson(counts));
s.stop();
}
-//for (var i = 1; i < 10; i++) {
+// for (var i = 1; i < 10; i++) {
// testReadLoadBalancing(i)
//}
diff --git a/jstests/noPassthroughWithMongod/replica_set_shard_version.js b/jstests/noPassthroughWithMongod/replica_set_shard_version.js
index f853c74603e..b8fe681cc06 100644
--- a/jstests/noPassthroughWithMongod/replica_set_shard_version.js
+++ b/jstests/noPassthroughWithMongod/replica_set_shard_version.js
@@ -1,8 +1,8 @@
// Tests whether a Replica Set in a mongos cluster can cause versioning problems
-jsTestLog( "Starting sharded cluster..." );
+jsTestLog("Starting sharded cluster...");
-var st = new ShardingTest( { shards : 1, mongos : 2, other : { rs : true } } );
+var st = new ShardingTest({shards: 1, mongos: 2, other: {rs: true}});
// Uncomment to stop the balancer, since the balancer usually initializes the shard automatically
// SERVER-4921 is otherwise hard to manifest
@@ -12,48 +12,50 @@ var mongosA = st.s0;
var mongosB = st.s1;
var shard = st.shard0;
-coll = mongosA.getCollection( jsTestName() + ".coll" );
+coll = mongosA.getCollection(jsTestName() + ".coll");
// Wait for primary and then initialize shard SERVER-5130
st.rs0.getPrimary();
coll.findOne();
-var sadmin = shard.getDB( "admin" );
-assert.throws(function() { sadmin.runCommand({ replSetStepDown : 3000, force : true }); });
+var sadmin = shard.getDB("admin");
+assert.throws(function() {
+ sadmin.runCommand({replSetStepDown: 3000, force: true});
+});
st.rs0.getPrimary();
-mongosA.getDB("admin").runCommand({ setParameter : 1, traceExceptions : true });
+mongosA.getDB("admin").runCommand({setParameter: 1, traceExceptions: true});
-try{
- // This _almost_ always fails, unless the new primary is already detected. If if fails, it should
+try {
+ // This _almost_ always fails, unless the new primary is already detected. If if fails, it
+ // should
// mark the master as bad, so mongos will reload the replica set master next request
// TODO: Can we just retry and succeed here?
coll.findOne();
-}
-catch( e ){
- print( "This error is expected : " );
- printjson( e );
+} catch (e) {
+ print("This error is expected : ");
+ printjson(e);
}
-jsTest.log( "Running query which should succeed..." );
+jsTest.log("Running query which should succeed...");
// This should always succeed without throwing an error
coll.findOne();
-mongosA.getDB("admin").runCommand({ setParameter : 1, traceExceptions : false });
+mongosA.getDB("admin").runCommand({setParameter: 1, traceExceptions: false});
// now check secondary
-assert.throws(function() { sadmin.runCommand({ replSetStepDown : 3000, force : true }); });
+assert.throws(function() {
+ sadmin.runCommand({replSetStepDown: 3000, force: true});
+});
// Can't use the mongosB - SERVER-5128
-other = new Mongo( mongosA.host );
-other.setSlaveOk( true );
-other = other.getCollection( jsTestName() + ".coll" );
-
-print( "eliot: " + tojson( other.findOne() ) );
-
+other = new Mongo(mongosA.host);
+other.setSlaveOk(true);
+other = other.getCollection(jsTestName() + ".coll");
+print("eliot: " + tojson(other.findOne()));
st.stop();
diff --git a/jstests/noPassthroughWithMongod/rpc_protocols.js b/jstests/noPassthroughWithMongod/rpc_protocols.js
index 2720d30b88d..7e33c3986d3 100644
--- a/jstests/noPassthroughWithMongod/rpc_protocols.js
+++ b/jstests/noPassthroughWithMongod/rpc_protocols.js
@@ -4,7 +4,7 @@
// startup using the "--rpcProtocols" command line option, or at runtime using the
// "setClientRPCProtocols" method on the Mongo object.
-var RPC_PROTOCOLS = {
+var RPC_PROTOCOLS = {
OP_QUERY: "opQueryOnly",
OP_COMMAND: "opCommandOnly"
};
@@ -19,53 +19,59 @@ var RPC_PROTOCOLS = {
assert.commandWorked(db.setProfilingLevel(2));
function runInShell(rpcProtocol, func) {
- assert (0 == _runMongoProgram("mongo",
- "--rpcProtocols="+rpcProtocol,
- "--readMode=commands", // ensure we use the find command.
- "--eval",
- "(" + func.toString() + ")();",
- db.getMongo().host));
-
+ assert(0 == _runMongoProgram("mongo",
+ "--rpcProtocols=" + rpcProtocol,
+ "--readMode=commands", // ensure we use the find command.
+ "--eval",
+ "(" + func.toString() + ")();",
+ db.getMongo().host));
}
// Test that --rpcProtocols=opQueryOnly forces OP_QUERY commands.
- runInShell(RPC_PROTOCOLS.OP_QUERY, function() {
- assert(db.getMongo().getClientRPCProtocols() === "opQueryOnly");
- db.getSiblingDB("test").rpcProtocols.find().comment("opQueryCommandLine").itcount();
- });
+ runInShell(
+ RPC_PROTOCOLS.OP_QUERY,
+ function() {
+ assert(db.getMongo().getClientRPCProtocols() === "opQueryOnly");
+ db.getSiblingDB("test").rpcProtocols.find().comment("opQueryCommandLine").itcount();
+ });
var profileDoc = db.system.profile.findOne({"query.comment": "opQueryCommandLine"});
assert(profileDoc !== null);
assert.eq(profileDoc.protocol, "op_query");
// Test that --rpcProtocols=opCommandOnly forces OP_COMMAND commands.
- runInShell(RPC_PROTOCOLS.OP_COMMAND, function() {
- assert(db.getMongo().getClientRPCProtocols() === "opCommandOnly");
- db.getSiblingDB("test").rpcProtocols.find().comment("opCommandCommandLine").itcount();
- });
+ runInShell(
+ RPC_PROTOCOLS.OP_COMMAND,
+ function() {
+ assert(db.getMongo().getClientRPCProtocols() === "opCommandOnly");
+ db.getSiblingDB("test").rpcProtocols.find().comment("opCommandCommandLine").itcount();
+ });
profileDoc = db.system.profile.findOne({"query.comment": "opCommandCommandLine"});
assert(profileDoc !== null);
assert.eq(profileDoc.protocol, "op_command");
// Test that .setClientRPCProtocols("opQueryOnly") forces OP_QUERY commands. We start the shell
// in OP_COMMAND only mode, then switch it to OP_QUERY mode at runtime.
- runInShell(RPC_PROTOCOLS.OP_COMMAND, function() {
- assert(db.getMongo().getClientRPCProtocols() === "opCommandOnly");
- db.getMongo().setClientRPCProtocols("opQueryOnly");
- assert(db.getMongo().getClientRPCProtocols() === "opQueryOnly");
- db.getSiblingDB("test").rpcProtocols.find().comment("opQueryRuntime").itcount();
- });
+ runInShell(RPC_PROTOCOLS.OP_COMMAND,
+ function() {
+ assert(db.getMongo().getClientRPCProtocols() === "opCommandOnly");
+ db.getMongo().setClientRPCProtocols("opQueryOnly");
+ assert(db.getMongo().getClientRPCProtocols() === "opQueryOnly");
+ db.getSiblingDB("test").rpcProtocols.find().comment("opQueryRuntime").itcount();
+ });
profileDoc = db.system.profile.findOne({"query.comment": "opQueryRuntime"});
assert(profileDoc !== null);
assert.eq(profileDoc.protocol, "op_query");
// Test that .setClientRPCProtocols("opCommandOnly") forces OP_COMMAND commands. We start the
// shell in OP_QUERY only mode, then switch it to OP_COMMAND mode at runtime.
- runInShell(RPC_PROTOCOLS.OP_QUERY, function() {
- assert(db.getMongo().getClientRPCProtocols() === "opQueryOnly");
- db.getMongo().setClientRPCProtocols("opCommandOnly");
- assert(db.getMongo().getClientRPCProtocols() === "opCommandOnly");
- db.getSiblingDB("test").rpcProtocols.find().comment("opCommandRuntime").itcount();
- });
+ runInShell(
+ RPC_PROTOCOLS.OP_QUERY,
+ function() {
+ assert(db.getMongo().getClientRPCProtocols() === "opQueryOnly");
+ db.getMongo().setClientRPCProtocols("opCommandOnly");
+ assert(db.getMongo().getClientRPCProtocols() === "opCommandOnly");
+ db.getSiblingDB("test").rpcProtocols.find().comment("opCommandRuntime").itcount();
+ });
profileDoc = db.system.profile.findOne({"query.comment": "opCommandRuntime"});
assert(profileDoc !== null);
assert.eq(profileDoc.protocol, "op_command");
diff --git a/jstests/noPassthroughWithMongod/server7428.js b/jstests/noPassthroughWithMongod/server7428.js
index d077e126d8a..745f11021f4 100644
--- a/jstests/noPassthroughWithMongod/server7428.js
+++ b/jstests/noPassthroughWithMongod/server7428.js
@@ -8,15 +8,15 @@
(function() {
-// Setup fromDb with no auth
-var fromDb = MongoRunner.runMongod();
+ // Setup fromDb with no auth
+ var fromDb = MongoRunner.runMongod();
-// Setup toDb with auth
-var toDb = MongoRunner.runMongod({auth: ""});
-var admin = toDb.getDB("admin");
-admin.createUser({user: "foo", pwd: "bar", roles: jsTest.adminUserRoles});
-admin.auth("foo","bar");
+ // Setup toDb with auth
+ var toDb = MongoRunner.runMongod({auth: ""});
+ var admin = toDb.getDB("admin");
+ admin.createUser({user: "foo", pwd: "bar", roles: jsTest.adminUserRoles});
+ admin.auth("foo", "bar");
-admin.copyDatabase('test', 'test', fromDb.host);
+ admin.copyDatabase('test', 'test', fromDb.host);
})();
diff --git a/jstests/noPassthroughWithMongod/sharding_migrate_large_docs.js b/jstests/noPassthroughWithMongod/sharding_migrate_large_docs.js
index b112590d0a0..0f4b19e22e3 100644
--- a/jstests/noPassthroughWithMongod/sharding_migrate_large_docs.js
+++ b/jstests/noPassthroughWithMongod/sharding_migrate_large_docs.js
@@ -2,62 +2,66 @@
// Tests migration behavior of large documents
//
-var st = new ShardingTest({ shards : 2, mongos : 1,
- other : { mongosOptions : { noAutoSplit : "" },
- shardOptions : { /* binVersion : "latest" */ } } });
+var st = new ShardingTest({
+ shards: 2,
+ mongos: 1,
+ other: {mongosOptions: {noAutoSplit: ""}, shardOptions: {/* binVersion : "latest" */}}
+});
st.stopBalancer();
var mongos = st.s0;
-var coll = mongos.getCollection( "foo.bar" );
-var admin = mongos.getDB( "admin" );
-var shards = mongos.getCollection( "config.shards" ).find().toArray();
-var shardAdmin = st.shard0.getDB( "admin" );
+var coll = mongos.getCollection("foo.bar");
+var admin = mongos.getDB("admin");
+var shards = mongos.getCollection("config.shards").find().toArray();
+var shardAdmin = st.shard0.getDB("admin");
-assert( admin.runCommand({ enableSharding : coll.getDB() + "" }).ok );
-printjson( admin.runCommand({ movePrimary : coll.getDB() + "", to : shards[0]._id }) );
-assert( admin.runCommand({ shardCollection : coll + "", key : { _id : 1 } }).ok );
-assert( admin.runCommand({ split : coll + "", middle : { _id : 0 } }).ok );
+assert(admin.runCommand({enableSharding: coll.getDB() + ""}).ok);
+printjson(admin.runCommand({movePrimary: coll.getDB() + "", to: shards[0]._id}));
+assert(admin.runCommand({shardCollection: coll + "", key: {_id: 1}}).ok);
+assert(admin.runCommand({split: coll + "", middle: {_id: 0}}).ok);
-jsTest.log( "Preparing large insert..." );
+jsTest.log("Preparing large insert...");
var data1MB = "x";
-while ( data1MB.length < 1024 * 1024 )
+while (data1MB.length < 1024 * 1024)
data1MB += data1MB;
var data15MB = "";
-for ( var i = 0; i < 15; i++ ) data15MB += data1MB;
+for (var i = 0; i < 15; i++)
+ data15MB += data1MB;
var data15PlusMB = data15MB;
-for ( var i = 0; i < 1023 * 1024; i++ ) data15PlusMB += "x";
+for (var i = 0; i < 1023 * 1024; i++)
+ data15PlusMB += "x";
-print("~15MB object size is : " + Object.bsonsize({ _id : 0, d : data15PlusMB }));
+print("~15MB object size is : " + Object.bsonsize({_id: 0, d: data15PlusMB}));
-jsTest.log( "Inserting docs of large and small sizes..." );
+jsTest.log("Inserting docs of large and small sizes...");
// Two large docs next to each other
-coll.insert({ _id : -2, d : data15PlusMB });
-coll.insert({ _id : -1, d : data15PlusMB });
+coll.insert({_id: -2, d: data15PlusMB});
+coll.insert({_id: -1, d: data15PlusMB});
// Docs of assorted sizes
-assert.writeOK(coll.insert({ _id : 0, d : "x" }));
-assert.writeOK(coll.insert({ _id : 1, d : data15PlusMB }));
-assert.writeOK(coll.insert({ _id : 2, d : "x" }));
-assert.writeOK(coll.insert({ _id : 3, d : data15MB }));
-assert.writeOK(coll.insert({ _id : 4, d : "x" }));
-assert.writeOK(coll.insert({ _id : 5, d : data1MB }));
-assert.writeOK(coll.insert({ _id : 6, d : "x" }));
+assert.writeOK(coll.insert({_id: 0, d: "x"}));
+assert.writeOK(coll.insert({_id: 1, d: data15PlusMB}));
+assert.writeOK(coll.insert({_id: 2, d: "x"}));
+assert.writeOK(coll.insert({_id: 3, d: data15MB}));
+assert.writeOK(coll.insert({_id: 4, d: "x"}));
+assert.writeOK(coll.insert({_id: 5, d: data1MB}));
+assert.writeOK(coll.insert({_id: 6, d: "x"}));
-assert.eq( 9, coll.find().itcount() );
+assert.eq(9, coll.find().itcount());
-jsTest.log( "Starting migration..." );
+jsTest.log("Starting migration...");
-assert( admin.runCommand({ moveChunk : coll + "", find : { _id : 0 }, to : shards[1]._id }).ok );
-assert( admin.runCommand({ moveChunk : coll + "", find : { _id : -1 }, to : shards[1]._id }).ok );
+assert(admin.runCommand({moveChunk: coll + "", find: {_id: 0}, to: shards[1]._id}).ok);
+assert(admin.runCommand({moveChunk: coll + "", find: {_id: -1}, to: shards[1]._id}).ok);
// Ensure that the doc count is correct and that the mongos query path can handle docs near the 16MB
// user BSON size limit.
-assert.eq( 9, coll.find().itcount() );
+assert.eq(9, coll.find().itcount());
-jsTest.log( "DONE!" );
+jsTest.log("DONE!");
st.stop();
diff --git a/jstests/noPassthroughWithMongod/sharding_rs_arb1.js b/jstests/noPassthroughWithMongod/sharding_rs_arb1.js
index ba3142bbe5c..8b53f07d3a3 100644
--- a/jstests/noPassthroughWithMongod/sharding_rs_arb1.js
+++ b/jstests/noPassthroughWithMongod/sharding_rs_arb1.js
@@ -1,27 +1,28 @@
var name = "sharding_rs_arb1";
-var replTest = new ReplSetTest( { name : name , nodes : 3 } );
+var replTest = new ReplSetTest({name: name, nodes: 3});
replTest.startSet();
var port = replTest.ports;
-replTest.initiate({_id : name, members :
- [
- {_id:0, host : getHostName()+":"+port[0]},
- {_id:1, host : getHostName()+":"+port[1]},
- {_id:2, host : getHostName()+":"+port[2], arbiterOnly : true},
- ],
- });
+replTest.initiate({
+ _id: name,
+ members: [
+ {_id: 0, host: getHostName() + ":" + port[0]},
+ {_id: 1, host: getHostName() + ":" + port[1]},
+ {_id: 2, host: getHostName() + ":" + port[2], arbiterOnly: true},
+ ],
+});
replTest.awaitReplication();
var master = replTest.getPrimary();
-var db = master.getDB( "test" );
-printjson( rs.status() );
+var db = master.getDB("test");
+printjson(rs.status());
var st = new ShardingTest({numShards: 0});
var admin = st.getDB('admin');
-var res = admin.runCommand( { addshard : replTest.getURL() } );
-printjson( res );
-assert( res.ok , tojson(res) );
+var res = admin.runCommand({addshard: replTest.getURL()});
+printjson(res);
+assert(res.ok, tojson(res));
st.stop();
replTest.stopSet();
diff --git a/jstests/noPassthroughWithMongod/shelllimit.js b/jstests/noPassthroughWithMongod/shelllimit.js
index cc7e7359ef6..3b270bddc12 100644
--- a/jstests/noPassthroughWithMongod/shelllimit.js
+++ b/jstests/noPassthroughWithMongod/shelllimit.js
@@ -7,15 +7,15 @@
t.drop();
var pre = db.serverStatus().metrics.cursor.open.total;
- for (var i=1; i<=5; i++) {
- t.save( { a : i } );
+ for (var i = 1; i <= 5; i++) {
+ t.save({a: i});
}
var c = t.find().limit(3);
- while(c.hasNext()) {
+ while (c.hasNext()) {
var v = c.next();
}
- assert.eq(pre,db.serverStatus().metrics.cursor.open.total);
+ assert.eq(pre, db.serverStatus().metrics.cursor.open.total);
t.drop();
}());
diff --git a/jstests/noPassthroughWithMongod/temp_namespace.js b/jstests/noPassthroughWithMongod/temp_namespace.js
index f74ac73bcea..a2f1aa21a80 100644
--- a/jstests/noPassthroughWithMongod/temp_namespace.js
+++ b/jstests/noPassthroughWithMongod/temp_namespace.js
@@ -7,31 +7,34 @@ testname = 'temp_namespace_sw';
var conn = MongoRunner.runMongod({smallfiles: "", noprealloc: "", nopreallocj: ""});
d = conn.getDB('test');
-d.runCommand({create: testname+'temp1', temp: true});
-d[testname+'temp1'].ensureIndex({x:1});
-d.runCommand({create: testname+'temp2', temp: 1});
-d[testname+'temp2'].ensureIndex({x:1});
-d.runCommand({create: testname+'keep1', temp: false});
-d.runCommand({create: testname+'keep2', temp: 0});
-d.runCommand({create: testname+'keep3'});
-d[testname+'keep4'].insert({});
+d.runCommand({create: testname + 'temp1', temp: true});
+d[testname + 'temp1'].ensureIndex({x: 1});
+d.runCommand({create: testname + 'temp2', temp: 1});
+d[testname + 'temp2'].ensureIndex({x: 1});
+d.runCommand({create: testname + 'keep1', temp: false});
+d.runCommand({create: testname + 'keep2', temp: 0});
+d.runCommand({create: testname + 'keep3'});
+d[testname + 'keep4'].insert({});
-function countCollectionNames( theDB, regex ) {
- return theDB.getCollectionNames().filter( function(z) {
- return z.match( regex ); } ).length;
+function countCollectionNames(theDB, regex) {
+ return theDB.getCollectionNames().filter(function(z) {
+ return z.match(regex);
+ }).length;
}
-assert.eq(countCollectionNames( d, /temp\d$/) , 2);
-assert.eq(countCollectionNames( d, /keep\d$/) , 4);
+assert.eq(countCollectionNames(d, /temp\d$/), 2);
+assert.eq(countCollectionNames(d, /keep\d$/), 4);
MongoRunner.stopMongod(conn);
-conn = MongoRunner.runMongod({restart:true,
- cleanData: false,
- dbpath: conn.dbpath,
- smallfiles: "",
- noprealloc: "",
- nopreallocj: ""});
+conn = MongoRunner.runMongod({
+ restart: true,
+ cleanData: false,
+ dbpath: conn.dbpath,
+ smallfiles: "",
+ noprealloc: "",
+ nopreallocj: ""
+});
d = conn.getDB('test');
-assert.eq(countCollectionNames( d, /temp\d$/) , 0);
-assert.eq(countCollectionNames( d, /keep\d$/) , 4);
+assert.eq(countCollectionNames(d, /temp\d$/), 0);
+assert.eq(countCollectionNames(d, /keep\d$/), 4);
MongoRunner.stopMongod(conn);
diff --git a/jstests/noPassthroughWithMongod/testing_only_commands.js b/jstests/noPassthroughWithMongod/testing_only_commands.js
index 51d104bee48..3ac3db8ed67 100644
--- a/jstests/noPassthroughWithMongod/testing_only_commands.js
+++ b/jstests/noPassthroughWithMongod/testing_only_commands.js
@@ -3,14 +3,16 @@
* via the --enableTestCommands flag fail when that flag isn't provided.
*/
-var testOnlyCommands = ['configureFailPoint',
- '_hashBSONElement',
- 'replSetTest',
- 'journalLatencyTest',
- 'godinsert',
- 'sleep',
- 'captrunc',
- 'emptycapped'];
+var testOnlyCommands = [
+ 'configureFailPoint',
+ '_hashBSONElement',
+ 'replSetTest',
+ 'journalLatencyTest',
+ 'godinsert',
+ 'sleep',
+ 'captrunc',
+ 'emptycapped'
+];
var assertCmdNotFound = function(db, cmdName) {
var res = db.runCommand(cmdName);
@@ -21,9 +23,10 @@ var assertCmdNotFound = function(db, cmdName) {
var assertCmdFound = function(db, cmdName) {
var res = db.runCommand(cmdName);
if (!res.ok) {
- assert.neq(59, res.code,
+ assert.neq(59,
+ res.code,
'test command ' + cmdName + ' should either have succeeded or ' +
- 'failed with an error code other than CommandNotFound(59)');
+ 'failed with an error code other than CommandNotFound(59)');
}
};
diff --git a/jstests/noPassthroughWithMongod/ttl1.js b/jstests/noPassthroughWithMongod/ttl1.js
index 906f2be75cf..60df6537023 100644
--- a/jstests/noPassthroughWithMongod/ttl1.js
+++ b/jstests/noPassthroughWithMongod/ttl1.js
@@ -11,9 +11,9 @@
assertEntryMatches = function(array, regex) {
var found = false;
- for (i=0; i<array.length; i++) {
+ for (i = 0; i < array.length; i++) {
if (regex.test(array[i])) {
- found = true;
+ found = true;
}
}
assert(found,
@@ -22,23 +22,23 @@ assertEntryMatches = function(array, regex) {
// Part 1
var t = db.ttl1;
t.drop();
-t.runCommand( "create", { flags : 0 } );
+t.runCommand("create", {flags: 0});
var now = (new Date()).getTime();
-for (i=0; i<24; i++) {
+for (i = 0; i < 24; i++) {
var past = new Date(now - (3600 * 1000 * i));
t.insert({x: past, y: past, z: past});
}
-t.insert( { a : 1 } ); //no x value
-t.insert( { x: null } ); //non-date value
-t.insert( { x : true } ); //non-date value
-t.insert( { x : "yo" } ); //non-date value
-t.insert( { x : 3 } ); //non-date value
-t.insert( { x : /foo/ } ); //non-date value
+t.insert({a: 1}); // no x value
+t.insert({x: null}); // non-date value
+t.insert({x: true}); // non-date value
+t.insert({x: "yo"}); // non-date value
+t.insert({x: 3}); // non-date value
+t.insert({x: /foo/}); // non-date value
-assert.eq( 30 , t.count() );
+assert.eq(30, t.count());
-t.ensureIndex( { z : 1 } , { expireAfterSeconds : "20000" } );
+t.ensureIndex({z: 1}, {expireAfterSeconds: "20000"});
sleep(70 * 1000);
@@ -51,33 +51,29 @@ var msg = RegExp("ttl indexes require the expireAfterSeconds" +
assertEntryMatches(log, msg);
// Part 2
-t.ensureIndex( { x : 1 } , { expireAfterSeconds : 20000 } );
+t.ensureIndex({x: 1}, {expireAfterSeconds: 20000});
-assert.soon(
- function() {
- return t.count() < 30;
- }, "TTL index on x didn't delete" , 70 * 1000);
+assert.soon(function() {
+ return t.count() < 30;
+}, "TTL index on x didn't delete", 70 * 1000);
// We know the TTL thread has started deleting. Wait a few seconds to give it a chance to finish.
-assert.soon(
- function() {
- return t.find( { x : { $lt : new Date( now - ( 20000 * 1000 ) ) } } ).count() === 0;
- }, "TTL index on x didn't finish deleting", 5 * 1000);
-assert.eq( 12 , t.count() );
+assert.soon(function() {
+ return t.find({x: {$lt: new Date(now - (20000 * 1000))}}).count() === 0;
+}, "TTL index on x didn't finish deleting", 5 * 1000);
+assert.eq(12, t.count());
-assert.lte( 18, db.serverStatus().metrics.ttl.deletedDocuments );
-assert.lte( 1, db.serverStatus().metrics.ttl.passes );
+assert.lte(18, db.serverStatus().metrics.ttl.deletedDocuments);
+assert.lte(1, db.serverStatus().metrics.ttl.passes);
// Part 3
-t.ensureIndex( { y : 1 } , { expireAfterSeconds : 10000 } );
+t.ensureIndex({y: 1}, {expireAfterSeconds: 10000});
-assert.soon(
- function() {
- return t.count() < 12;
- }, "TTL index on y didn't delete" , 70 * 1000);
+assert.soon(function() {
+ return t.count() < 12;
+}, "TTL index on y didn't delete", 70 * 1000);
-assert.soon(
- function() {
- return t.find( { y : { $lt : new Date( now - ( 10000 * 1000 ) ) } } ).count() === 0;
- }, "TTL index on y didn't finish deleting", 5 * 1000);
-assert.eq( 9 , t.count() );
+assert.soon(function() {
+ return t.find({y: {$lt: new Date(now - (10000 * 1000))}}).count() === 0;
+}, "TTL index on y didn't finish deleting", 5 * 1000);
+assert.eq(9, t.count());
diff --git a/jstests/noPassthroughWithMongod/ttl_repl.js b/jstests/noPassthroughWithMongod/ttl_repl.js
index 4c16c7f6306..794f0c3ad90 100644
--- a/jstests/noPassthroughWithMongod/ttl_repl.js
+++ b/jstests/noPassthroughWithMongod/ttl_repl.js
@@ -7,7 +7,7 @@
load("jstests/replsets/rslib.js");
-var rt = new ReplSetTest( { name : "ttl_repl" , nodes: 2 } );
+var rt = new ReplSetTest({name: "ttl_repl", nodes: 2});
/******** Part 1 ***************/
@@ -19,10 +19,10 @@ rt.awaitSecondaryNodes();
var slave1 = rt.liveNodes.slaves[0];
// shortcuts
-var masterdb = master.getDB( 'd' );
-var slave1db = slave1.getDB( 'd' );
-var mastercol = masterdb[ 'c' ];
-var slave1col = slave1db[ 'c' ];
+var masterdb = master.getDB('d');
+var slave1db = slave1.getDB('d');
+var mastercol = masterdb['c'];
+var slave1col = slave1db['c'];
// turn off usePowerOf2Sizes as this tests the flag is set automatically
mastercol.drop();
@@ -31,36 +31,35 @@ masterdb.createCollection(mastercol.getName(), {usePowerOf2Sizes: false});
// create new collection. insert 24 docs, aged at one-hour intervalss
now = (new Date()).getTime();
var bulk = mastercol.initializeUnorderedBulkOp();
-for ( i=0; i<24; i++ ) {
- bulk.insert({ x: new Date( now - ( 3600 * 1000 * i )) });
+for (i = 0; i < 24; i++) {
+ bulk.insert({x: new Date(now - (3600 * 1000 * i))});
}
assert.writeOK(bulk.execute());
rt.awaitReplication();
-assert.eq( 24 , mastercol.count() , "docs not inserted on primary" );
-assert.eq( 24 , slave1col.count() , "docs not inserted on secondary" );
+assert.eq(24, mastercol.count(), "docs not inserted on primary");
+assert.eq(24, slave1col.count(), "docs not inserted on secondary");
print("Initial Stats:");
print("Master:");
-printjson( mastercol.stats() );
+printjson(mastercol.stats());
print("Slave1:");
-printjson( slave1col.stats() );
+printjson(slave1col.stats());
// create TTL index, wait for TTL monitor to kick in, then check that
// the correct number of docs age out
-assert.commandWorked(mastercol.ensureIndex({ x: 1 }, { expireAfterSeconds: 20000 }));
+assert.commandWorked(mastercol.ensureIndex({x: 1}, {expireAfterSeconds: 20000}));
rt.awaitReplication();
-sleep(70*1000); // TTL monitor runs every 60 seconds, so wait 70
+sleep(70 * 1000); // TTL monitor runs every 60 seconds, so wait 70
print("Stats after waiting for TTL Monitor:");
print("Master:");
-printjson( mastercol.stats() );
+printjson(mastercol.stats());
print("Slave1:");
-printjson( slave1col.stats() );
-
-assert.eq( 6 , mastercol.count() , "docs not deleted on primary" );
-assert.eq( 6 , slave1col.count() , "docs not deleted on secondary" );
+printjson(slave1col.stats());
+assert.eq(6, mastercol.count(), "docs not deleted on primary");
+assert.eq(6, slave1col.count(), "docs not deleted on secondary");
/******** Part 2 ***************/
@@ -70,33 +69,31 @@ var config = rt.getReplSetConfig();
config.version = 2;
reconfig(rt, config);
-var slave2col = slave.getDB( 'd' )[ 'c' ];
+var slave2col = slave.getDB('d')['c'];
// check that the new secondary has the correct number of docs
print("New Slave stats:");
-printjson( slave2col.stats() );
-
-assert.eq( 6 , slave2col.count() , "wrong number of docs on new secondary");
+printjson(slave2col.stats());
+assert.eq(6, slave2col.count(), "wrong number of docs on new secondary");
/******* Part 3 *****************/
-//Check that the collMod command successfully updates the expireAfterSeconds field
-masterdb.runCommand( { collMod : "c",
- index : { keyPattern : {x : 1}, expireAfterSeconds : 10000} } );
+// Check that the collMod command successfully updates the expireAfterSeconds field
+masterdb.runCommand({collMod: "c", index: {keyPattern: {x: 1}, expireAfterSeconds: 10000}});
rt.awaitReplication();
-function getTTLTime( theCollection, theKey ) {
+function getTTLTime(theCollection, theKey) {
var indexes = theCollection.getIndexes();
- for ( var i = 0; i < indexes.length; i++ ) {
- if ( friendlyEqual( theKey, indexes[i].key ) )
+ for (var i = 0; i < indexes.length; i++) {
+ if (friendlyEqual(theKey, indexes[i].key))
return indexes[i].expireAfterSeconds;
}
throw "not found";
}
-printjson( masterdb.c.getIndexes() );
-assert.eq( 10000, getTTLTime( masterdb.c, { x : 1 } ) );
-assert.eq( 10000, getTTLTime( slave1db.c, { x : 1 } ) );
+printjson(masterdb.c.getIndexes());
+assert.eq(10000, getTTLTime(masterdb.c, {x: 1}));
+assert.eq(10000, getTTLTime(slave1db.c, {x: 1}));
// finish up
rt.stopSet();
diff --git a/jstests/noPassthroughWithMongod/ttl_repl_maintenance.js b/jstests/noPassthroughWithMongod/ttl_repl_maintenance.js
index 740f49b665d..45a5d752106 100644
--- a/jstests/noPassthroughWithMongod/ttl_repl_maintenance.js
+++ b/jstests/noPassthroughWithMongod/ttl_repl_maintenance.js
@@ -11,20 +11,20 @@ var conn;
var primeSystemReplset = function() {
conn = MongoRunner.runMongod();
var localDB = conn.getDB("local");
- localDB.system.replset.insert({x:1});
+ localDB.system.replset.insert({x: 1});
print("create a TTL collection");
var testDB = conn.getDB("test");
- assert.commandWorked(testDB.foo.ensureIndex({ x: 1 }, { expireAfterSeconds: 2 }));
+ assert.commandWorked(testDB.foo.ensureIndex({x: 1}, {expireAfterSeconds: 2}));
};
var restartWithConfig = function() {
MongoRunner.stopMongod(conn.port, 15);
- conn = MongoRunner.runMongod({restart:true, cleanData: false, dbpath: conn.dbpath});
+ conn = MongoRunner.runMongod({restart: true, cleanData: false, dbpath: conn.dbpath});
testDB = conn.getDB("test");
var n = 100;
- for (var i=0; i<n; i++) {
- testDB.foo.insert({x : new Date()});
+ for (var i = 0; i < n; i++) {
+ testDB.foo.insert({x: new Date()});
}
print("sleeping 65 seconds");
@@ -39,7 +39,7 @@ var restartWithoutConfig = function() {
MongoRunner.stopMongod(conn.port, 15);
- conn = MongoRunner.runMongod({restart:true, cleanData: false, dbpath: conn.dbpath});
+ conn = MongoRunner.runMongod({restart: true, cleanData: false, dbpath: conn.dbpath});
assert.soon(function() {
return conn.getDB("test").foo.count() < 100;
diff --git a/jstests/noPassthroughWithMongod/ttl_repl_secondary_disabled.js b/jstests/noPassthroughWithMongod/ttl_repl_secondary_disabled.js
index a4319a15c13..1ec78f6ee65 100644
--- a/jstests/noPassthroughWithMongod/ttl_repl_secondary_disabled.js
+++ b/jstests/noPassthroughWithMongod/ttl_repl_secondary_disabled.js
@@ -1,7 +1,7 @@
/** Test TTL docs are not deleted from secondaries directly
*/
-var rt = new ReplSetTest( { name : "ttl_repl" , nodes: 2 } );
+var rt = new ReplSetTest({name: "ttl_repl", nodes: 2});
// setup set
var nodes = rt.startSet();
@@ -11,26 +11,26 @@ rt.awaitSecondaryNodes();
var slave1 = rt.getSecondary();
// shortcuts
-var masterdb = master.getDB( 'd' );
-var slave1db = slave1.getDB( 'd' );
-var mastercol = masterdb[ 'c' ];
-var slave1col = slave1db[ 'c' ];
+var masterdb = master.getDB('d');
+var slave1db = slave1.getDB('d');
+var mastercol = masterdb['c'];
+var slave1col = slave1db['c'];
// create TTL index, wait for TTL monitor to kick in, then check things
-mastercol.ensureIndex( { x : 1 } , { expireAfterSeconds : 10 } );
+mastercol.ensureIndex({x: 1}, {expireAfterSeconds: 10});
rt.awaitReplication();
-//increase logging
-assert.commandWorked(slave1col.getDB().adminCommand({setParameter:1, logLevel:1}));
+// increase logging
+assert.commandWorked(slave1col.getDB().adminCommand({setParameter: 1, logLevel: 1}));
-//insert old doc (10 minutes old) directly on secondary using godinsert
-assert.commandWorked(slave1col.runCommand("godinsert",
- {obj: {_id: new Date(), x: new Date( (new Date()).getTime() - 600000 ) } }));
-assert.eq(1, slave1col.count(), "missing inserted doc" );
+// insert old doc (10 minutes old) directly on secondary using godinsert
+assert.commandWorked(slave1col.runCommand(
+ "godinsert", {obj: {_id: new Date(), x: new Date((new Date()).getTime() - 600000)}}));
+assert.eq(1, slave1col.count(), "missing inserted doc");
-sleep(70*1000); //wait for 70seconds
-assert.eq(1, slave1col.count(), "ttl deleted my doc!" );
+sleep(70 * 1000); // wait for 70seconds
+assert.eq(1, slave1col.count(), "ttl deleted my doc!");
// looking for these errors : "Assertion: 13312:replSet error : logOp() but not primary",
// "replSet error : logOp() but can't accept write to collection <ns>/n" + "Fatal Assertion 17405"
@@ -38,13 +38,13 @@ assert.eq(1, slave1col.count(), "ttl deleted my doc!" );
var errorStrings = ["Assertion: 13312", "Assertion 17405"];
var foundError = false;
var foundLine = "";
-var globalLogLines = assert.commandWorked(slave1col.getDB().adminCommand({getLog:"global"})).log;
+var globalLogLines = assert.commandWorked(slave1col.getDB().adminCommand({getLog: "global"})).log;
for (i in globalLogLines) {
var line = globalLogLines[i];
errorStrings.forEach(function(errorString) {
- if (line.match( errorString )) {
+ if (line.match(errorString)) {
foundError = true;
- foundLine = line; // replace error string with what we found.
+ foundLine = line; // replace error string with what we found.
}
});
}
diff --git a/jstests/noPassthroughWithMongod/ttl_sharded.js b/jstests/noPassthroughWithMongod/ttl_sharded.js
index e1c550d74c6..d6896665b65 100644
--- a/jstests/noPassthroughWithMongod/ttl_sharded.js
+++ b/jstests/noPassthroughWithMongod/ttl_sharded.js
@@ -7,85 +7,76 @@
*/
// start up a new sharded cluster
-var s = new ShardingTest({ shards : 2, mongos : 1});
+var s = new ShardingTest({shards: 2, mongos: 1});
var dbname = "testDB";
var coll = "ttl_sharded";
var ns = dbname + "." + coll;
-t = s.getDB( dbname ).getCollection( coll );
+t = s.getDB(dbname).getCollection(coll);
// enable sharding of the collection. Only 1 chunk initially
-s.adminCommand( { enablesharding : dbname } );
+s.adminCommand({enablesharding: dbname});
s.ensurePrimaryShard(dbname, 'shard0001');
-s.adminCommand( { shardcollection : ns , key: { _id : 1 } } );
+s.adminCommand({shardcollection: ns, key: {_id: 1}});
// insert 24 docs, with timestamps at one hour intervals
var now = (new Date()).getTime();
var bulk = t.initializeUnorderedBulkOp();
for (var i = 0; i < 24; i++) {
- var past = new Date( now - ( 3600 * 1000 * i ) );
- bulk.insert({ _id: i, x: past });
+ var past = new Date(now - (3600 * 1000 * i));
+ bulk.insert({_id: i, x: past});
}
assert.writeOK(bulk.execute());
-assert.eq( t.count() , 24 , "initial docs not inserted");
+assert.eq(t.count(), 24, "initial docs not inserted");
// create the TTL index which delete anything older than ~5.5 hours
-t.ensureIndex( { x : 1 } , { expireAfterSeconds : 20000 } );
+t.ensureIndex({x: 1}, {expireAfterSeconds: 20000});
// split chunk in half by _id, and move one chunk to the other shard
-s.adminCommand( {split : ns , middle : {_id : 12 } } );
-s.adminCommand( {moveChunk : ns ,
- find : {_id : 0} ,
- to : s.getOther(s.getPrimaryShard(dbname)).name } );
+s.adminCommand({split: ns, middle: {_id: 12}});
+s.adminCommand({moveChunk: ns, find: {_id: 0}, to: s.getOther(s.getPrimaryShard(dbname)).name});
// one shard will lose 12/12 docs, the other 6/12, so count will go
// from 24 -> 18 or 12 -> 6
-assert.soon(
- function() {
- return t.count() < 7;
- }, "TTL index on x didn't delete enough" , 70 * 1000
-);
+assert.soon(function() {
+ return t.count() < 7;
+}, "TTL index on x didn't delete enough", 70 * 1000);
// ensure that count ultimately ends up at 6
-assert.eq( 0 , t.find( { x : { $lt : new Date( now - 20000000 ) } } ).count() );
-assert.eq( 6 , t.count() );
+assert.eq(0, t.find({x: {$lt: new Date(now - 20000000)}}).count());
+assert.eq(6, t.count());
// now lets check things explicily on each shard
-var shard0 = s._connections[0].getDB( dbname );
-var shard1 = s._connections[1].getDB( dbname );
+var shard0 = s._connections[0].getDB(dbname);
+var shard1 = s._connections[1].getDB(dbname);
print("Shard 0 coll stats:");
-printjson( shard0.getCollection( coll ).stats() );
+printjson(shard0.getCollection(coll).stats());
print("Shard 1 coll stats:");
-printjson( shard1.getCollection( coll ).stats() );
+printjson(shard1.getCollection(coll).stats());
-
-function getTTLTime( theCollection, theKey ) {
+function getTTLTime(theCollection, theKey) {
var indexes = theCollection.getIndexes();
- for ( var i = 0; i < indexes.length; i++ ) {
- if ( friendlyEqual( theKey, indexes[i].key ) )
+ for (var i = 0; i < indexes.length; i++) {
+ if (friendlyEqual(theKey, indexes[i].key))
return indexes[i].expireAfterSeconds;
}
throw "not found";
}
// Check that TTL index (with expireAfterSeconds field) appears on both shards
-assert.eq( 20000, getTTLTime( shard0.getCollection(coll), { x : 1 } ) );
-assert.eq( 20000, getTTLTime( shard1.getCollection(coll), { x : 1 } ) );
+assert.eq(20000, getTTLTime(shard0.getCollection(coll), {x: 1}));
+assert.eq(20000, getTTLTime(shard1.getCollection(coll), {x: 1}));
// Check that the collMod command successfully updates the expireAfterSeconds field
-s.getDB( dbname ).runCommand( { collMod : coll,
- index : { keyPattern : {x : 1}, expireAfterSeconds : 10000} } );
-assert.eq( 10000, getTTLTime( shard0.getCollection(coll), { x : 1 } ) );
-assert.eq( 10000, getTTLTime( shard1.getCollection(coll), { x : 1 } ) );
+s.getDB(dbname).runCommand({collMod: coll, index: {keyPattern: {x: 1}, expireAfterSeconds: 10000}});
+assert.eq(10000, getTTLTime(shard0.getCollection(coll), {x: 1}));
+assert.eq(10000, getTTLTime(shard1.getCollection(coll), {x: 1}));
-assert.soon(
- function() {
- return t.count() < 6;
- }, "new expireAfterSeconds value not taking effect" , 70 * 1000
-);
-assert.eq( 0 , t.find( { x : { $lt : new Date( now - 10000000 ) } } ).count() );
-assert.eq( 3 , t.count() );
+assert.soon(function() {
+ return t.count() < 6;
+}, "new expireAfterSeconds value not taking effect", 70 * 1000);
+assert.eq(0, t.find({x: {$lt: new Date(now - 10000000)}}).count());
+assert.eq(3, t.count());
s.stop();
-
diff --git a/jstests/noPassthroughWithMongod/unix_socket1.js b/jstests/noPassthroughWithMongod/unix_socket1.js
index 3cd64c3370e..d6f10062565 100644
--- a/jstests/noPassthroughWithMongod/unix_socket1.js
+++ b/jstests/noPassthroughWithMongod/unix_socket1.js
@@ -1,5 +1,5 @@
doesLogMatchRegex = function(logArray, regex) {
- for (var i = (logArray.length - 1); i >= 0; i--) {
+ for (var i = (logArray.length - 1); i >= 0; i--) {
var regexInLine = regex.exec(logArray[i]);
if (regexInLine != null) {
return true;
@@ -8,11 +8,10 @@ doesLogMatchRegex = function(logArray, regex) {
return false;
};
-
-if ( ! _isWindows() ) {
+if (!_isWindows()) {
hoststring = db.getMongo().host;
index = hoststring.lastIndexOf(':');
- if (index == -1){
+ if (index == -1) {
port = '27017';
} else {
port = hoststring.substr(index + 1);
@@ -20,24 +19,24 @@ if ( ! _isWindows() ) {
sock = new Mongo('/tmp/mongodb-' + port + '.sock');
sockdb = sock.getDB(db.getName());
- assert( sockdb.runCommand('ping').ok );
+ assert(sockdb.runCommand('ping').ok);
// Test unix socket path
var path = MongoRunner.dataDir + "/sockpath";
mkdir(path);
var dataPath = MongoRunner.dataDir + "/sockpath_data";
-
+
var conn = MongoRunner.runMongod({dbpath: dataPath, unixSocketPrefix: path});
-
+
var sock2 = new Mongo(path + "/mongodb-" + conn.port + ".sock");
sockdb2 = sock2.getDB(db.getName());
- assert( sockdb2.runCommand('ping').ok );
+ assert(sockdb2.runCommand('ping').ok);
// Test the naming of the unix socket
- var log = db.adminCommand({ getLog: 'global' });
+ var log = db.adminCommand({getLog: 'global'});
var ll = log.log;
var re = new RegExp("anonymous unix socket");
- assert( doesLogMatchRegex( ll, re ), "Log message did not contain 'anonymous unix socket'");
+ assert(doesLogMatchRegex(ll, re), "Log message did not contain 'anonymous unix socket'");
} else {
print("Not testing unix sockets on Windows");
}
diff --git a/jstests/noPassthroughWithMongod/validate_command.js b/jstests/noPassthroughWithMongod/validate_command.js
index 6e243070142..9c52c9acad7 100644
--- a/jstests/noPassthroughWithMongod/validate_command.js
+++ b/jstests/noPassthroughWithMongod/validate_command.js
@@ -23,19 +23,18 @@
t = db.jstests_validate;
t.drop();
- for(var i = 0; i < count; i++){
- t.insert({x:i});
+ for (var i = 0; i < count; i++) {
+ t.insert({x: i});
}
- t.ensureIndex({x:1}, {name: "forward"});
- t.ensureIndex({x:-1}, {name: "reverse"});
-
+ t.ensureIndex({x: 1}, {name: "forward"});
+ t.ensureIndex({x: -1}, {name: "reverse"});
// TEST NORMAL VALIDATE
var output = t.validate();
testValidate(output);
// TEST FULL
- var output = t.validate({full:true});
+ var output = t.validate({full: true});
testValidate(output);
}()); \ No newline at end of file
diff --git a/jstests/noPassthroughWithMongod/wt_roundtrip_creation_string.js b/jstests/noPassthroughWithMongod/wt_roundtrip_creation_string.js
index d927cbb541a..65ebef5ccf3 100644
--- a/jstests/noPassthroughWithMongod/wt_roundtrip_creation_string.js
+++ b/jstests/noPassthroughWithMongod/wt_roundtrip_creation_string.js
@@ -24,25 +24,21 @@
var collStats = db.runCommand({collStats: collNamePrefix + '.source'});
assert.commandWorked(collStats);
- assert.commandWorked(db.runCommand({
- create: collNamePrefix + '.dest',
- storageEngine: {
- wiredTiger: {
- configString: collStats.wiredTiger.creationString
- }
- }
- }), 'unable to create collection using the creation string of another collection');
+ assert.commandWorked(
+ db.runCommand({
+ create: collNamePrefix + '.dest',
+ storageEngine: {wiredTiger: {configString: collStats.wiredTiger.creationString}}
+ }),
+ 'unable to create collection using the creation string of another collection');
assert.commandWorked(db.runCommand({
createIndexes: collNamePrefix + '.dest',
indexes: [{
key: {b: 1},
name: 'b_1',
- storageEngine: {
- wiredTiger: {
- configString: collStats.indexDetails.a_1.creationString
- }
- }
+ storageEngine:
+ {wiredTiger: {configString: collStats.indexDetails.a_1.creationString}}
}]
- }), 'unable to create index using the creation string of another index');
+ }),
+ 'unable to create index using the creation string of another index');
})();
diff --git a/jstests/parallel/allops.js b/jstests/parallel/allops.js
index aff77ced7fe..b0d6e7188a3 100644
--- a/jstests/parallel/allops.js
+++ b/jstests/parallel/allops.js
@@ -8,34 +8,34 @@ Random.setRandomSeed();
t = new ParallelTester();
-for( id = 0; id < 10; ++id ) {
- var g = new EventGenerator( id, "jstests_parallel_allops", Random.randInt( 20 ) );
- for( var j = 0; j < 1000; ++j ) {
- var op = Random.randInt( 3 );
- switch( op ) {
- case 0: // insert
- g.addInsert( { _id:Random.randInt( 1000 ) } );
+for (id = 0; id < 10; ++id) {
+ var g = new EventGenerator(id, "jstests_parallel_allops", Random.randInt(20));
+ for (var j = 0; j < 1000; ++j) {
+ var op = Random.randInt(3);
+ switch (op) {
+ case 0: // insert
+ g.addInsert({_id: Random.randInt(1000)});
break;
- case 1: // remove
- g.addRemove( { _id:Random.randInt( 1000 ) } );
+ case 1: // remove
+ g.addRemove({_id: Random.randInt(1000)});
break;
- case 2: // update
- g.addUpdate( {_id:{$lt:1000}}, { _id:Random.randInt( 1000 ) } );
+ case 2: // update
+ g.addUpdate({_id: {$lt: 1000}}, {_id: Random.randInt(1000)});
break;
default:
- assert( false, "Invalid op code" );
+ assert(false, "Invalid op code");
}
}
- t.add( EventGenerator.dispatch, g.getEvents() );
+ t.add(EventGenerator.dispatch, g.getEvents());
}
-var g = new EventGenerator( id, "jstests_parallel_allops", Random.randInt( 5 ) );
-for( var j = 1000; j < 3000; ++j ) {
- g.addCheckCount( j - 1000, { _id: {$gte:1000} }, j % 100 == 0, j % 500 == 0 );
- g.addInsert( {_id:j} );
+var g = new EventGenerator(id, "jstests_parallel_allops", Random.randInt(5));
+for (var j = 1000; j < 3000; ++j) {
+ g.addCheckCount(j - 1000, {_id: {$gte: 1000}}, j % 100 == 0, j % 500 == 0);
+ g.addInsert({_id: j});
}
-t.add( EventGenerator.dispatch, g.getEvents() );
+t.add(EventGenerator.dispatch, g.getEvents());
-t.run( "one or more tests failed" );
+t.run("one or more tests failed");
-assert( f.validate().valid );
+assert(f.validate().valid);
diff --git a/jstests/parallel/basic.js b/jstests/parallel/basic.js
index 5d9183556a1..e863d0d5f45 100644
--- a/jstests/parallel/basic.js
+++ b/jstests/parallel/basic.js
@@ -3,15 +3,15 @@ load('jstests/libs/parallelTester.js');
Random.setRandomSeed();
-var params = ParallelTester.createJstestsLists( 4 );
+var params = ParallelTester.createJstestsLists(4);
var t = new ParallelTester();
-for( i in params ) {
- t.add( ParallelTester.fileTester, params[ i ] );
+for (i in params) {
+ t.add(ParallelTester.fileTester, params[i]);
}
-t.run( "one or more tests failed", true );
+t.run("one or more tests failed", true);
-db.getCollectionNames().forEach( function( x ) {
- v = db[ x ].validate();
- assert( v.valid, "validate failed for " + x + " with " + tojson( v ) );
- } ); \ No newline at end of file
+db.getCollectionNames().forEach(function(x) {
+ v = db[x].validate();
+ assert(v.valid, "validate failed for " + x + " with " + tojson(v));
+}); \ No newline at end of file
diff --git a/jstests/parallel/basicPlus.js b/jstests/parallel/basicPlus.js
index 2153923c67a..2ecb20d6c21 100644
--- a/jstests/parallel/basicPlus.js
+++ b/jstests/parallel/basicPlus.js
@@ -6,26 +6,29 @@ c.drop();
Random.setRandomSeed();
-var params = ParallelTester.createJstestsLists( 4 );
+var params = ParallelTester.createJstestsLists(4);
var t = new ParallelTester();
-for( i in params ) {
- t.add( ParallelTester.fileTester, params[ i ] );
+for (i in params) {
+ t.add(ParallelTester.fileTester, params[i]);
}
-for( var i = 4; i < 8; ++i ) {
- var g = new EventGenerator( i, "jstests_parallel_basicPlus", Random.randInt( 20 ) );
- for( var j = ( i - 4 ) * 3000; j < ( i - 3 ) * 3000; ++j ) {
- var expected = j - ( ( i - 4 ) * 3000 );
- g.addCheckCount( expected, {_id:{$gte:((i-4)*3000),$lt:((i-3)*3000)}}, expected % 1000 == 0, expected % 500 == 0 );
- g.addInsert( {_id:j} );
+for (var i = 4; i < 8; ++i) {
+ var g = new EventGenerator(i, "jstests_parallel_basicPlus", Random.randInt(20));
+ for (var j = (i - 4) * 3000; j < (i - 3) * 3000; ++j) {
+ var expected = j - ((i - 4) * 3000);
+ g.addCheckCount(expected,
+ {_id: {$gte: ((i - 4) * 3000), $lt: ((i - 3) * 3000)}},
+ expected % 1000 == 0,
+ expected % 500 == 0);
+ g.addInsert({_id: j});
}
- t.add( EventGenerator.dispatch, g.getEvents() );
+ t.add(EventGenerator.dispatch, g.getEvents());
}
-t.run( "one or more tests failed", true );
+t.run("one or more tests failed", true);
-assert( c.validate().valid, "validate failed" );
-db.getCollectionNames().forEach( function( x ) {
- v = db[ x ].validate();
- assert( v.valid, "validate failed for " + x + " with " + tojson( v ) );
- } ); \ No newline at end of file
+assert(c.validate().valid, "validate failed");
+db.getCollectionNames().forEach(function(x) {
+ v = db[x].validate();
+ assert(v.valid, "validate failed for " + x + " with " + tojson(v));
+}); \ No newline at end of file
diff --git a/jstests/parallel/checkMultiThread.js b/jstests/parallel/checkMultiThread.js
index 0332fe32e84..a6b92689bec 100644
--- a/jstests/parallel/checkMultiThread.js
+++ b/jstests/parallel/checkMultiThread.js
@@ -2,13 +2,19 @@ load('jstests/libs/parallelTester.js');
var start = new Date();
print("start: " + start);
-var func = function() { db.runCommand({$eval: "sleep(10000);", nolock: true}); return new Date();};
-a = new ScopedThread( func );
-b = new ScopedThread( func );
+var func = function() {
+ db.runCommand({$eval: "sleep(10000);", nolock: true});
+ return new Date();
+};
+a = new ScopedThread(func);
+b = new ScopedThread(func);
a.start();
b.start();
a.join();
b.join();
-assert.lt( a.returnData().getMilliseconds(), start.getMilliseconds() + 15000, "A took more than 15s" );
-assert.lt( b.returnData().getMilliseconds(), start.getMilliseconds() + 15000, "B took more than 15s" );
-
+assert.lt(a.returnData().getMilliseconds(),
+ start.getMilliseconds() + 15000,
+ "A took more than 15s");
+assert.lt(b.returnData().getMilliseconds(),
+ start.getMilliseconds() + 15000,
+ "B took more than 15s");
diff --git a/jstests/parallel/del.js b/jstests/parallel/del.js
index 17c97bc499b..3128f89d05e 100644
--- a/jstests/parallel/del.js
+++ b/jstests/parallel/del.js
@@ -3,71 +3,66 @@ load('jstests/libs/parallelTester.js');
N = 1000;
HOST = db.getMongo().host;
-a = db.getSisterDB( "fooa" );
-b = db.getSisterDB( "foob" );
+a = db.getSisterDB("fooa");
+b = db.getSisterDB("foob");
a.dropDatabase();
b.dropDatabase();
-function del1( dbname, host, max ){
- var m = new Mongo( host );
- var db = m.getDB( "foo" + dbname );
+function del1(dbname, host, max) {
+ var m = new Mongo(host);
+ var db = m.getDB("foo" + dbname);
var t = db.del;
- while ( !db.del_parallel.count() ){
+ while (!db.del_parallel.count()) {
var r = Math.random();
- var n = Math.floor( Math.random() * max );
- if ( r < .9 ){
- t.insert( { x : n } );
+ var n = Math.floor(Math.random() * max);
+ if (r < .9) {
+ t.insert({x: n});
+ } else if (r < .98) {
+ t.remove({x: n});
+ } else if (r < .99) {
+ t.remove({x: {$lt: n}});
+ } else {
+ t.remove({x: {$gt: n}});
}
- else if ( r < .98 ){
- t.remove( { x : n } );
- }
- else if ( r < .99 ){
- t.remove( { x : { $lt : n } } );
- }
- else {
- t.remove( { x : { $gt : n } } );
- }
- if ( r > .9999 )
- print( t.count() );
+ if (r > .9999)
+ print(t.count());
}
}
-function del2( dbname, host, max ){
- var m = new Mongo( host );
- var db = m.getDB( "foo" + dbname );
+function del2(dbname, host, max) {
+ var m = new Mongo(host);
+ var db = m.getDB("foo" + dbname);
var t = db.del;
- while ( !db.del_parallel.count() ){
+ while (!db.del_parallel.count()) {
var r = Math.random();
- var n = Math.floor( Math.random() * max );
+ var n = Math.floor(Math.random() * max);
var s = Math.random() > .5 ? 1 : -1;
-
- if ( r < .5 ){
- t.findOne( { x : n } );
- }
- else if ( r < .75 ){
- t.find( { x : { $lt : n } } ).sort( { x : s } ).itcount();
- }
- else {
- t.find( { x : { $gt : n } } ).sort( { x : s } ).itcount();
+
+ if (r < .5) {
+ t.findOne({x: n});
+ } else if (r < .75) {
+ t.find({x: {$lt: n}}).sort({x: s}).itcount();
+ } else {
+ t.find({x: {$gt: n}}).sort({x: s}).itcount();
}
}
}
all = [];
-all.push( fork( del1 , "a", HOST, N ) );
-all.push( fork( del2 , "a", HOST, N ) );
-all.push( fork( del1 , "b", HOST, N ) );
-all.push( fork( del2 , "b", HOST, N ) );
+all.push(fork(del1, "a", HOST, N));
+all.push(fork(del2, "a", HOST, N));
+all.push(fork(del1, "b", HOST, N));
+all.push(fork(del2, "b", HOST, N));
-for ( i=0; i<all.length; i++ )
+for (i = 0; i < all.length; i++)
all[i].start();
-for ( i=0; i<10; i++ ){
- sleep( 2000 );
- print( "dropping" );
+for (i = 0; i < 10; i++) {
+ sleep(2000);
+ print("dropping");
a.dropDatabase();
b.dropDatabase();
}
@@ -75,5 +70,5 @@ for ( i=0; i<10; i++ ){
a.del_parallel.save({done: 1});
b.del_parallel.save({done: 1});
-for ( i=0; i<all.length; i++ )
+for (i = 0; i < all.length; i++)
all[i].join();
diff --git a/jstests/parallel/insert.js b/jstests/parallel/insert.js
index 64a357905c5..e2dee8053d9 100644
--- a/jstests/parallel/insert.js
+++ b/jstests/parallel/insert.js
@@ -3,23 +3,23 @@ load('jstests/libs/parallelTester.js');
f = db.jstests_parallel_insert;
f.drop();
-f.ensureIndex( {who:1} );
+f.ensureIndex({who: 1});
Random.setRandomSeed();
t = new ParallelTester();
-for( id = 0; id < 10; ++id ) {
- var g = new EventGenerator( id, "jstests_parallel_insert", Random.randInt( 20 ) );
- for( j = 0; j < 1000; ++j ) {
- if ( j % 50 == 0 ) {
- g.addCheckCount( j, {who:id} );
+for (id = 0; id < 10; ++id) {
+ var g = new EventGenerator(id, "jstests_parallel_insert", Random.randInt(20));
+ for (j = 0; j < 1000; ++j) {
+ if (j % 50 == 0) {
+ g.addCheckCount(j, {who: id});
}
- g.addInsert( { i:j, who:id } );
+ g.addInsert({i: j, who: id});
}
- t.add( EventGenerator.dispatch, g.getEvents() );
+ t.add(EventGenerator.dispatch, g.getEvents());
}
-t.run( "one or more tests failed" );
+t.run("one or more tests failed");
-assert( f.validate().valid );
+assert(f.validate().valid);
diff --git a/jstests/parallel/manyclients.js b/jstests/parallel/manyclients.js
index 1e845601aef..ca31f333635 100644
--- a/jstests/parallel/manyclients.js
+++ b/jstests/parallel/manyclients.js
@@ -3,7 +3,7 @@ load('jstests/libs/parallelTester.js');
f = db.jstests_parallel_manyclients;
f.drop();
-f.ensureIndex( {who:1} );
+f.ensureIndex({who: 1});
Random.setRandomSeed();
@@ -11,31 +11,29 @@ t = new ParallelTester();
// Reducing the number of threads to 100 because of WT-1989
numThreads = 100;
-buildInfo = db.adminCommand( "buildInfo" );
+buildInfo = db.adminCommand("buildInfo");
-if ( buildInfo.bits < 64 ||
- buildInfo.buildEnvironment.target_os != "linux" ||
- buildInfo.debug ) {
+if (buildInfo.bits < 64 || buildInfo.buildEnvironment.target_os != "linux" || buildInfo.debug) {
numThreads = 50;
}
-numThreads = Math.min( numThreads, db.serverStatus().connections.available / 3 );
+numThreads = Math.min(numThreads, db.serverStatus().connections.available / 3);
-print( "numThreads: " + numThreads );
+print("numThreads: " + numThreads);
-for( id = 0; id < numThreads; ++id ) {
- var g = new EventGenerator( id, "jstests_parallel_manyclients", Random.randInt( 20 ) );
- for( j = 0; j < 1000; ++j ) {
- if ( j % 50 == 0 ) {
- g.addCheckCount( j, {who:id}, false );
+for (id = 0; id < numThreads; ++id) {
+ var g = new EventGenerator(id, "jstests_parallel_manyclients", Random.randInt(20));
+ for (j = 0; j < 1000; ++j) {
+ if (j % 50 == 0) {
+ g.addCheckCount(j, {who: id}, false);
}
- g.addInsert( { i:j, who:id } );
+ g.addInsert({i: j, who: id});
}
- t.add( EventGenerator.dispatch, g.getEvents() );
+ t.add(EventGenerator.dispatch, g.getEvents());
}
-print( "done preparing test" );
+print("done preparing test");
-t.run( "one or more tests failed" );
+t.run("one or more tests failed");
-assert( f.validate().valid );
+assert(f.validate().valid);
diff --git a/jstests/parallel/repl.js b/jstests/parallel/repl.js
index 714d0f7742e..314cd4df335 100644
--- a/jstests/parallel/repl.js
+++ b/jstests/parallel/repl.js
@@ -3,59 +3,59 @@ load('jstests/libs/parallelTester.js');
baseName = "parallel_repl";
-rt = new ReplTest( baseName );
+rt = new ReplTest(baseName);
-m = rt.start( true );
-s = rt.start( false );
+m = rt.start(true);
+s = rt.start(false);
// tests need to run against master server
-db = m.getDB( "test" );
+db = m.getDB("test");
host = db.getMongo().host;
Random.setRandomSeed();
t = new ParallelTester();
-for( id = 0; id < 10; ++id ) {
- var g = new EventGenerator( id, baseName, Random.randInt( 20 ), host );
- for( var j = 0; j < 1000; ++j ) {
- var op = Random.randInt( 3 );
- switch( op ) {
- case 0: // insert
- g.addInsert( { _id:Random.randInt( 1000 ) } );
+for (id = 0; id < 10; ++id) {
+ var g = new EventGenerator(id, baseName, Random.randInt(20), host);
+ for (var j = 0; j < 1000; ++j) {
+ var op = Random.randInt(3);
+ switch (op) {
+ case 0: // insert
+ g.addInsert({_id: Random.randInt(1000)});
break;
- case 1: // remove
- g.addRemove( { _id:Random.randInt( 1000 ) } );
+ case 1: // remove
+ g.addRemove({_id: Random.randInt(1000)});
break;
- case 2: // update
- g.addUpdate( {_id:{$lt:1000}}, {$inc:{a:5}} );
+ case 2: // update
+ g.addUpdate({_id: {$lt: 1000}}, {$inc: {a: 5}});
break;
default:
- assert( false, "Invalid op code" );
+ assert(false, "Invalid op code");
}
}
- t.add( EventGenerator.dispatch, g.getEvents() );
+ t.add(EventGenerator.dispatch, g.getEvents());
}
-var g = new EventGenerator( id, baseName, Random.randInt( 5 ), host );
-for( var j = 1000; j < 3000; ++j ) {
- g.addCheckCount( j - 1000, { _id: {$gte:1000} }, j % 100 == 0, j % 500 == 0 );
- g.addInsert( {_id:j} );
+var g = new EventGenerator(id, baseName, Random.randInt(5), host);
+for (var j = 1000; j < 3000; ++j) {
+ g.addCheckCount(j - 1000, {_id: {$gte: 1000}}, j % 100 == 0, j % 500 == 0);
+ g.addInsert({_id: j});
}
-t.add( EventGenerator.dispatch, g.getEvents() );
+t.add(EventGenerator.dispatch, g.getEvents());
-t.run( "one or more tests failed" );
+t.run("one or more tests failed");
-masterValidation = m.getDB( "test" )[ baseName ].validate();
-assert( masterValidation.valid, tojson( masterValidation ) );
+masterValidation = m.getDB("test")[baseName].validate();
+assert(masterValidation.valid, tojson(masterValidation));
-slaveValidation = s.getDB( "test" )[ baseName ].validate();
-assert( slaveValidation.valid, tojson( slaveValidation ) );
+slaveValidation = s.getDB("test")[baseName].validate();
+assert(slaveValidation.valid, tojson(slaveValidation));
-assert.soon( function() {
- mh = m.getDB( "test" ).runCommand( "dbhash" );
-// printjson( mh );
- sh = s.getDB( "test" ).runCommand( "dbhash" );
-// printjson( sh );
- return mh.md5 == sh.md5;
- } );
+assert.soon(function() {
+ mh = m.getDB("test").runCommand("dbhash");
+ // printjson( mh );
+ sh = s.getDB("test").runCommand("dbhash");
+ // printjson( sh );
+ return mh.md5 == sh.md5;
+});
diff --git a/jstests/parallel/shellfork.js b/jstests/parallel/shellfork.js
index ad05c0d4fbe..571f917fc4a 100644
--- a/jstests/parallel/shellfork.js
+++ b/jstests/parallel/shellfork.js
@@ -1,36 +1,40 @@
load('jstests/libs/parallelTester.js');
-a = fork( function( a, b ) { return a / b; }, 10, 2 );
+a = fork(function(a, b) {
+ return a / b;
+}, 10, 2);
a.start();
-b = fork( function( a, b, c ) { return a + b + c; }, 18, " is a ", "multiple of 3" );
-makeFunny = function( text ) {
+b = fork(function(a, b, c) {
+ return a + b + c;
+}, 18, " is a ", "multiple of 3");
+makeFunny = function(text) {
return text + " ha ha!";
};
-c = fork( makeFunny, "paisley" );
+c = fork(makeFunny, "paisley");
c.start();
b.start();
b.join();
-assert.eq( 5, a.returnData() );
-assert.eq( "18 is a multiple of 3", b.returnData() );
-assert.eq( "paisley ha ha!", c.returnData() );
+assert.eq(5, a.returnData());
+assert.eq("18 is a multiple of 3", b.returnData());
+assert.eq("paisley ha ha!", c.returnData());
-z = fork( function( a ) {
- load('jstests/libs/parallelTester.js');
- var y = fork( function( a ) {
- return a + 1; }, 5 );
- y.start();
- return y.returnData() + a;
- }, 1 );
+z = fork(function(a) {
+ load('jstests/libs/parallelTester.js');
+ var y = fork(function(a) {
+ return a + 1;
+ }, 5);
+ y.start();
+ return y.returnData() + a;
+}, 1);
z.start();
-assert.eq( 7, z.returnData() );
-
+assert.eq(7, z.returnData());
t = 1;
-z = new ScopedThread( function() {
- assert( typeof( t ) == "undefined", "t not undefined" );
- t = 5;
- return t;
- } );
+z = new ScopedThread(function() {
+ assert(typeof(t) == "undefined", "t not undefined");
+ t = 5;
+ return t;
+});
z.start();
-assert.eq( 5, z.returnData() );
-assert.eq( 1, t ); \ No newline at end of file
+assert.eq(5, z.returnData());
+assert.eq(1, t); \ No newline at end of file
diff --git a/jstests/parallel/update_serializability1.js b/jstests/parallel/update_serializability1.js
index 55fc119101d..6fc41204c3c 100644
--- a/jstests/parallel/update_serializability1.js
+++ b/jstests/parallel/update_serializability1.js
@@ -5,23 +5,25 @@ t.drop();
N = 100000;
bulk = t.initializeUnorderedBulkOp();
-for ( var i = 0; i < N; i++ ) {
- bulk.insert( { _id : i, a : i, b: N-i, x : 1, y : 1 } );
+for (var i = 0; i < N; i++) {
+ bulk.insert({_id: i, a: i, b: N - i, x: 1, y: 1});
}
bulk.execute();
-t.ensureIndex( { a : 1 } );
-t.ensureIndex( { b : 1 } );
+t.ensureIndex({a: 1});
+t.ensureIndex({b: 1});
-s1 = startParallelShell( "db.update_serializability1.update( { a : { $gte : 0 } }, { $set : { b : " + (N+1) + ", x : 2 } }, false, true );" );
-s2 = startParallelShell( "db.update_serializability1.update( { b : { $lte : " + N + " } }, { $set : { a : -1, y : 2 } }, false, true );" );
+s1 = startParallelShell("db.update_serializability1.update( { a : { $gte : 0 } }, { $set : { b : " +
+ (N + 1) + ", x : 2 } }, false, true );");
+s2 = startParallelShell("db.update_serializability1.update( { b : { $lte : " + N +
+ " } }, { $set : { a : -1, y : 2 } }, false, true );");
s1();
s2();
// some of each type should have gotten done
-assert( t.find( { x : 2 } ).count() > 0 );
-assert( t.find( { y : 2 } ).count() > 0 );
+assert(t.find({x: 2}).count() > 0);
+assert(t.find({y: 2}).count() > 0);
// both operations should never happen on a document
-assert.eq( 0, t.find( { x : 2, y : 2 } ).count() );
+assert.eq(0, t.find({x: 2, y: 2}).count());
diff --git a/jstests/parallel/update_serializability2.js b/jstests/parallel/update_serializability2.js
index c99eafea9ef..c2672491177 100644
--- a/jstests/parallel/update_serializability2.js
+++ b/jstests/parallel/update_serializability2.js
@@ -6,27 +6,28 @@ function test() {
var N = 100000;
var bulk = t.initializeUnorderedBulkOp();
- for ( var i = 0; i < N; i++ ) {
- bulk.insert( { _id : i, a : i, b: N-i, x : 1, y : 1 } );
+ for (var i = 0; i < N; i++) {
+ bulk.insert({_id: i, a: i, b: N - i, x: 1, y: 1});
}
bulk.execute();
- t.ensureIndex( { a : 1 } );
- t.ensureIndex( { b : 1 } );
+ t.ensureIndex({a: 1});
+ t.ensureIndex({b: 1});
- var s1 = startParallelShell( "db.update_serializability1.update( { a : { $gte : 0 } }, { $set : { x : 2 } }, false, true );" );
- var s2 = startParallelShell( "db.update_serializability1.update( { b : { $lte : " + N + " } }, { $set : { y : 2 } }, false, true );" );
+ var s1 = startParallelShell(
+ "db.update_serializability1.update( { a : { $gte : 0 } }, { $set : { x : 2 } }, false, true );");
+ var s2 = startParallelShell("db.update_serializability1.update( { b : { $lte : " + N +
+ " } }, { $set : { y : 2 } }, false, true );");
s1();
s2();
// both operations should happen on every document
- assert.eq( N, t.find( { x : 2, y : 2 } ).count() );
+ assert.eq(N, t.find({x: 2, y: 2}).count());
}
if (db.serverStatus().storageEngine.name == 'mmapv1') {
- jsTest.log('skipping test on mmapv1'); // This is only guaranteed on other engines.
-}
-else {
+ jsTest.log('skipping test on mmapv1'); // This is only guaranteed on other engines.
+} else {
test();
}
diff --git a/jstests/perf/compact_speed_test.js b/jstests/perf/compact_speed_test.js
index 61733f104c2..440affd390b 100755..100644
--- a/jstests/perf/compact_speed_test.js
+++ b/jstests/perf/compact_speed_test.js
@@ -1,20 +1,23 @@
if (1) {
-
t = db.compactspeedtest;
t.drop();
- var obj = { x: 1, y: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", z: [1, 2] };
+ var obj = {
+ x: 1,
+ y: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
+ z: [1, 2]
+ };
var start = new Date();
function timed() {
db.getLastError();
var dt = (new Date()) - start;
- //print("time: " + dt);
+ // print("time: " + dt);
start = new Date();
return dt;
}
- //print("adding data");
+ // print("adding data");
var N = 100000;
if (db.adminCommand("buildInfo").debug)
N = 10000;
@@ -25,27 +28,27 @@ if (1) {
}
var a = timed();
- //print("index");
- t.ensureIndex({ x: 1 });
- //print("index");
- t.ensureIndex({ y: 1 });
- //print("index");
- t.ensureIndex({ z: 1 });
+ // print("index");
+ t.ensureIndex({x: 1});
+ // print("index");
+ t.ensureIndex({y: 1});
+ // print("index");
+ t.ensureIndex({z: 1});
a += timed();
- //print("count:" + t.count());
+ // print("count:" + t.count());
timed();
{
- //print("compact");
- var res = db.runCommand({ compact: 'compactspeedtest', dev: true });
+ // print("compact");
+ var res = db.runCommand({compact: 'compactspeedtest', dev: true});
b = timed();
- //printjson(res);
+ // printjson(res);
assert(res.ok);
- //print("validate");
+ // print("validate");
var v = t.validate(true);
assert(v.ok);
diff --git a/jstests/perf/find1.js b/jstests/perf/find1.js
index ecd94e52579..6a9d3b75355 100644
--- a/jstests/perf/find1.js
+++ b/jstests/perf/find1.js
@@ -10,21 +10,21 @@ function testSetup(dbConn) {
var t = dbConn[collection_name];
t.drop();
- for (var i=0; i<size; i++){
- t.save({ num : i });
- if (i == 0 )
- t.ensureIndex( { num : 1 } );
+ for (var i = 0; i < size; i++) {
+ t.save({num: i});
+ if (i == 0)
+ t.ensureIndex({num: 1});
}
}
-function resetQueryCache( db ) {
- db[ collection_name ].createIndex( { a: 1 }, "dumbIndex" );
- db[ collection_name ].dropIndex( "dumbIndex" );
+function resetQueryCache(db) {
+ db[collection_name].createIndex({a: 1}, "dumbIndex");
+ db[collection_name].dropIndex("dumbIndex");
}
-function between( low, high, val, msg ) {
- assert( low < val, msg );
- assert( val < high, msg );
+function between(low, high, val, msg) {
+ assert(low < val, msg);
+ assert(val < high, msg);
}
/**
@@ -32,56 +32,51 @@ function between( low, high, val, msg ) {
* from front of collection vs end, using $lt
*/
function testFindLTFrontBack(dbConn) {
-
var results = {};
var t = dbConn[collection_name];
- resetQueryCache( dbConn );
- results.oneInOrderLTFirst = Date.timeFunc(
- function(){
- assert( t.find( { num : {$lt : 20} } ).sort( { num : 1 } ).limit(10).toArray().length == 10);
- } , calls );
-
- resetQueryCache( dbConn );
- results.oneInOrderLTLast = Date.timeFunc(
- function(){
- assert( t.find( { num : {$lt : size-20 }} ).sort( { num : 1 } ).limit(10).toArray().length == 10);
- } , calls );
-
-
- between( 0.9, 1.1, results.oneInOrderLTFirst / results.oneInOrderLTLast,
- "first / last (" + results.oneInOrderLTFirst + " / " + results.oneInOrderLTLast + " ) = " +
- results.oneInOrderLTFirst / results.oneInOrderLTLast + " not in [0.9, 1.1]" );
+ resetQueryCache(dbConn);
+ results.oneInOrderLTFirst = Date.timeFunc(function() {
+ assert(t.find({num: {$lt: 20}}).sort({num: 1}).limit(10).toArray().length == 10);
+ }, calls);
+
+ resetQueryCache(dbConn);
+ results.oneInOrderLTLast = Date.timeFunc(function() {
+ assert(t.find({num: {$lt: size - 20}}).sort({num: 1}).limit(10).toArray().length == 10);
+ }, calls);
+
+ between(0.9,
+ 1.1,
+ results.oneInOrderLTFirst / results.oneInOrderLTLast,
+ "first / last (" + results.oneInOrderLTFirst + " / " + results.oneInOrderLTLast +
+ " ) = " + results.oneInOrderLTFirst / results.oneInOrderLTLast +
+ " not in [0.9, 1.1]");
}
-
-
/**
* Tests fetching a set of 10 objects in sorted order, comparing getting
* from front of collection vs end
*/
function testFindGTFrontBack(dbConn) {
-
var results = {};
var t = dbConn[collection_name];
-
- resetQueryCache( dbConn );
- results.oneInOrderGTFirst = Date.timeFunc(
- function(){
- assert( t.find( { num : {$gt : 5} } ).sort( { num : 1 } ).limit(10).toArray().length == 10);
- } , calls );
-
- resetQueryCache( dbConn );
- results.oneInOrderGTLast = Date.timeFunc(
- function(){
- assert( t.find( { num : {$gt : size-20 }} ).sort( { num : 1 } ).limit(10).toArray().length == 10);
- } , calls );
-
-
- between( 0.25, 4.0, results.oneInOrderGTFirst / results.oneInOrderGTLast,
- "first / last (" + results.oneInOrderGTFirst + " / " + results.oneInOrderGTLast + " ) = " +
- results.oneInOrderGTFirst / results.oneInOrderGTLast + " not in [0.25, 4.0]" );
+ resetQueryCache(dbConn);
+ results.oneInOrderGTFirst = Date.timeFunc(function() {
+ assert(t.find({num: {$gt: 5}}).sort({num: 1}).limit(10).toArray().length == 10);
+ }, calls);
+
+ resetQueryCache(dbConn);
+ results.oneInOrderGTLast = Date.timeFunc(function() {
+ assert(t.find({num: {$gt: size - 20}}).sort({num: 1}).limit(10).toArray().length == 10);
+ }, calls);
+
+ between(0.25,
+ 4.0,
+ results.oneInOrderGTFirst / results.oneInOrderGTLast,
+ "first / last (" + results.oneInOrderGTFirst + " / " + results.oneInOrderGTLast +
+ " ) = " + results.oneInOrderGTFirst / results.oneInOrderGTLast +
+ " not in [0.25, 4.0]");
}
testSetup(db);
diff --git a/jstests/perf/geo_near1.js b/jstests/perf/geo_near1.js
index 36cb9da2cfb..c16c41bd080 100644
--- a/jstests/perf/geo_near1.js
+++ b/jstests/perf/geo_near1.js
@@ -1,11 +1,10 @@
var t = db.bench.geo_near1;
t.drop();
-var numPts = 1000*1000;
+var numPts = 1000 * 1000;
-
-for (var i=0; i < numPts; i++){
+for (var i = 0; i < numPts; i++) {
x = (Math.random() * 100) - 50;
y = (Math.random() * 100) - 50;
- t.insert({loc: [x,y], i: i});
+ t.insert({loc: [x, y], i: i});
}
diff --git a/jstests/perf/index1.js b/jstests/perf/index1.js
index 7bcf4b71ab1..ecb749069b9 100644
--- a/jstests/perf/index1.js
+++ b/jstests/perf/index1.js
@@ -2,19 +2,23 @@
t = db.perf.index1;
t.drop();
-for ( var i=0; i<100000; i++ ){
- t.save( { x : i } );
+for (var i = 0; i < 100000; i++) {
+ t.save({x: i});
}
t.findOne();
-printjson( db.serverStatus().mem );
+printjson(db.serverStatus().mem);
-for ( var i=0; i<5; i++ ){
- nonu = Date.timeFunc( function(){ t.ensureIndex( { x : 1 } ); } );
- t.dropIndex( { x : 1 } );
- u = Date.timeFunc( function(){ t.ensureIndex( { x : 1 }, { unique : 1 } ); } );
- t.dropIndex( { x : 1 } );
- print( "non unique: " + nonu + " unique: " + u );
- printjson( db.serverStatus().mem );
+for (var i = 0; i < 5; i++) {
+ nonu = Date.timeFunc(function() {
+ t.ensureIndex({x: 1});
+ });
+ t.dropIndex({x: 1});
+ u = Date.timeFunc(function() {
+ t.ensureIndex({x: 1}, {unique: 1});
+ });
+ t.dropIndex({x: 1});
+ print("non unique: " + nonu + " unique: " + u);
+ printjson(db.serverStatus().mem);
}
diff --git a/jstests/perf/mr_bench.js b/jstests/perf/mr_bench.js
index 38ef57f0835..c48a9ddbe36 100644
--- a/jstests/perf/mr_bench.js
+++ b/jstests/perf/mr_bench.js
@@ -2,15 +2,18 @@
t = db.mr_bench;
t.drop();
-function getRandomStr(L){
- var s= '';
- var randomchar=function(){
- var n= Math.floor(Math.random()*62);
- if(n<10) return n; //1-10
- if(n<36) return String.fromCharCode(n+55); //A-Z
- return String.fromCharCode(n+61); //a-z
+function getRandomStr(L) {
+ var s = '';
+ var randomchar = function() {
+ var n = Math.floor(Math.random() * 62);
+ if (n < 10)
+ return n; // 1-10
+ if (n < 36)
+ return String.fromCharCode(n + 55); // A-Z
+ return String.fromCharCode(n + 61); // a-z
};
- while(s.length< L) s+= randomchar();
+ while (s.length < L)
+ s += randomchar();
return s;
}
@@ -22,36 +25,37 @@ while (largeStr.length < 512) {
}
largeStr = largeStr.substr(512);
-for ( i = 0; i < 100000; ++i ) {
+for (i = 0; i < 100000; ++i) {
t.save({rand: getRandomStr(20), same: "the same string", str: largeStr});
}
emit = printjson;
count = t.count();
-function d( x ){
- printjson( x );
+function d(x) {
+ printjson(x);
}
-m = function(){
+m = function() {
emit(this.rand, {id: this._id, str: this.str});
};
-m2 = function(){
+m2 = function() {
emit(this.same, this.rand);
};
-r = function(k,vals) {
- var tmp = {};
- vals.forEach(function(i) {
- if(typeof(i) == 'string') {
- tmp[i] = true;
+r = function(k, vals) {
+ var tmp = {};
+ vals.forEach(function(i) {
+ if (typeof(i) == 'string') {
+ tmp[i] = true;
} else {
- for(var z in i) tmp[z] = true;
+ for (var z in i)
+ tmp[z] = true;
}
- });
+ });
- return tmp;
+ return tmp;
};
// following time limits are passing fine on a laptop with a debug build
@@ -59,25 +63,22 @@ r = function(k,vals) {
// 1st MR just uses random unique keys, with no reduce involved
// this should be straightforward for perf, but could lead to OOM if settings are bad
-assert.time(
-function() {
-res = db.runCommand( { mapreduce : "mr_bench" , map : m , reduce : r , out : "mr_bench_out" } );
-d( res );
-assert.eq( count , res.counts.input , "A" );
-x = db[res.result];
-assert.eq( count , x.find().count() , "B" );
-return 1;
+assert.time(function() {
+ res = db.runCommand({mapreduce: "mr_bench", map: m, reduce: r, out: "mr_bench_out"});
+ d(res);
+ assert.eq(count, res.counts.input, "A");
+ x = db[res.result];
+ assert.eq(count, x.find().count(), "B");
+ return 1;
}, "unique key mr", 15000);
// 2nd MR emits the same key, and a unique value is added as key to same object
// if object is kept in ram and being reduced, this can be really slow
-assert.time(
-function() {
-res = db.runCommand( { mapreduce : "mr_bench" , map : m2 , reduce : r , out : "mr_bench_out" } );
-d( res );
-assert.eq( count , res.counts.input , "A" );
-x = db[res.result];
-assert.eq( 1 , x.find().count() , "B" );
-return 1;
+assert.time(function() {
+ res = db.runCommand({mapreduce: "mr_bench", map: m2, reduce: r, out: "mr_bench_out"});
+ d(res);
+ assert.eq(count, res.counts.input, "A");
+ x = db[res.result];
+ assert.eq(1, x.find().count(), "B");
+ return 1;
}, "single key mr", 20000);
-
diff --git a/jstests/perf/remove1.js b/jstests/perf/remove1.js
index 3e1a1a6b71c..09f1948572b 100644
--- a/jstests/perf/remove1.js
+++ b/jstests/perf/remove1.js
@@ -10,16 +10,16 @@ var msg = "Hello from remove test";
function testSetup(dbConn) {
var t = dbConn[collection_name];
t.drop();
- t.ensureIndex( { num : 1 } );
+ t.ensureIndex({num: 1});
- for (var i=0; i<size; i++){
- t.save({ num : i, msg : msg });
+ for (var i = 0; i < size; i++) {
+ t.save({num: i, msg: msg});
}
}
-function between( low, high, val, msg ) {
- assert( low < val, msg );
- assert( val < high, msg );
+function between(low, high, val, msg) {
+ assert(low < val, msg);
+ assert(val < high, msg);
}
/**
@@ -29,40 +29,36 @@ function between( low, high, val, msg ) {
* @param dbConn
*/
function testRemoveWithMultiField(dbConn) {
-
var results = {};
var t = dbConn[collection_name];
testSetup(dbConn);
- t.remove( {num:0 } );
- results.indexOnly = Date.timeFunc(
- function(){
- for (var i = 1; i < removals; i++) {
- t.remove({num : i});
- }
-
- t.findOne();
+ t.remove({num: 0});
+ results.indexOnly = Date.timeFunc(function() {
+ for (var i = 1; i < removals; i++) {
+ t.remove({num: i});
}
- );
+
+ t.findOne();
+ });
testSetup(dbConn);
-
- t.remove( {num: 0, msg: msg } );
- results.withAnother = Date.timeFunc(
- function(){
- for (var i = 1; i < removals; i++) {
- t.remove({num : i, msg : msg});
- }
- t.findOne();
+ t.remove({num: 0, msg: msg});
+ results.withAnother = Date.timeFunc(function() {
+ for (var i = 1; i < removals; i++) {
+ t.remove({num: i, msg: msg});
}
- );
+ t.findOne();
+ });
- between( 0.65, 1.35, (results.indexOnly / results.withAnother),
- "indexOnly / withAnother (" + results.indexOnly + " / " + results.withAnother + " ) = " +
- results.indexOnly / results.withAnother + " not in [0.65, 1.35]" );
+ between(0.65,
+ 1.35,
+ (results.indexOnly / results.withAnother),
+ "indexOnly / withAnother (" + results.indexOnly + " / " + results.withAnother +
+ " ) = " + results.indexOnly / results.withAnother + " not in [0.65, 1.35]");
}
testRemoveWithMultiField(db);
diff --git a/jstests/perf/v8_mapreduce.js b/jstests/perf/v8_mapreduce.js
index b98cdc5fc0d..7ff329c5284 100644
--- a/jstests/perf/v8_mapreduce.js
+++ b/jstests/perf/v8_mapreduce.js
@@ -2,20 +2,21 @@
// Our server and client need to be running V8 and the host we are running on needs at least two
// cores. Update this if you are testing more than three threads in parallel.
-if (/V8/.test(interpreterVersion()) &&
- db.runCommand({buildinfo:1}).javascriptEngine == "V8" &&
+if (/V8/.test(interpreterVersion()) && db.runCommand({buildinfo: 1}).javascriptEngine == "V8" &&
db.hostInfo().system.numCores >= 2) {
-
// function timeSingleThread
// Description: Gathers data about how long it takes to run a given job
// Args: job - job to run
// tid - thread id passed as an argument to the job, default 0
// Returns: { threadStart : <time job started> , threadEnd : <time job completed> }
- var timeSingleThread = function (job, tid) {
+ var timeSingleThread = function(job, tid) {
var tid = tid || 0;
var threadStart = new Date();
job(tid);
- return { "threadStart" : threadStart , "threadEnd" : new Date() };
+ return {
+ "threadStart": threadStart,
+ "threadEnd": new Date()
+ };
};
// function timeMultipleThreads
@@ -28,7 +29,7 @@ if (/V8/.test(interpreterVersion()) &&
// threadEnd : <time elapsed before thread completed work> } ,
// ...
// ]
- var timeMultipleThreads = function (job, nthreads, stagger) {
+ var timeMultipleThreads = function(job, nthreads, stagger) {
var i = 0;
var threads = [];
@@ -59,7 +60,7 @@ if (/V8/.test(interpreterVersion()) &&
// Display and analysis helper functions
- var getLastCompletion = function (threadTimes) {
+ var getLastCompletion = function(threadTimes) {
var lastCompletion = 0;
for (var i = 0; i < threadTimes.length; i++) {
lastCompletion = Math.max(lastCompletion, threadTimes[i].threadEnd);
@@ -71,14 +72,17 @@ if (/V8/.test(interpreterVersion()) &&
db.v8_parallel_mr_src.drop();
- for (j=0; j<100; j++) for (i=0; i<512; i++){ db.v8_parallel_mr_src.save({j:j, i:i});}
+ for (j = 0; j < 100; j++)
+ for (i = 0; i < 512; i++) {
+ db.v8_parallel_mr_src.save({j: j, i: i});
+ }
db.getLastError();
- var mrWorkFunction = function () {
+ var mrWorkFunction = function() {
function verifyOutput(out) {
- //printjson(out);
+ // printjson(out);
assert.eq(out.counts.input, 51200, "input count is wrong");
assert.eq(out.counts.emit, 51200, "emit count is wrong");
assert.gt(out.counts.reduce, 99, "reduce count is wrong");
@@ -87,22 +91,21 @@ if (/V8/.test(interpreterVersion()) &&
function map() {
if (this.j % 2 == 0) {
- emit(this.i, this.j*this.j);
- }
- else {
- emit(this.i, this.j+this.j);
+ emit(this.i, this.j * this.j);
+ } else {
+ emit(this.i, this.j + this.j);
}
}
function reduce(key, values) {
- values_halved = values.map(function (value) {
+ values_halved = values.map(function(value) {
return value / 2;
});
values_halved_sum = Array.sum(values_halved);
return values_halved_sum;
}
- var out = db.v8_parallel_mr_src.mapReduce(map, reduce, { out : "v8_parallel_mr_out" });
+ var out = db.v8_parallel_mr_src.mapReduce(map, reduce, {out: "v8_parallel_mr_out"});
verifyOutput(out);
};
diff --git a/jstests/readonly/aggregate.js b/jstests/readonly/aggregate.js
index 5710dec9139..9f74f739c4d 100644
--- a/jstests/readonly/aggregate.js
+++ b/jstests/readonly/aggregate.js
@@ -6,99 +6,84 @@ runReadOnlyTest(function() {
name: 'aggregate',
load: function(writableCollection) {
- assert.doesNotThrow(() => { writableCollection.insertMany([
- {award: "Best Picture",
- nominations: [
- {title: "The Big Short"},
- {title: "Bridge of Spies"},
- {title: "Brooklyn"},
- {title: "Max Max: Fury Road"},
- {title: "The Martian"},
- {title: "The Revenant"},
- {title: "Room"},
- {title: "Spotlight"}
- ]
- },
- {award: "Best Actor",
- nominations: [
- {title: "Trumbo",
- person: "Bryan Cranston"},
- {title: "The Martian",
- person: "Matt Damon"},
- {title: "The Revenant",
- person: "Leonardo DiCaprio"},
- {title: "Steve Jobs",
- person: "Michael Fassbender"},
- {title: "The Danish Girl",
- person: "Eddie Redmayne"}
- ]
- },
- {award: "Best Actress",
- nominations: [
- {title: "Carol",
- person: "Cate Blanchett"},
- {title: "Room",
- person: "Brie Larson"},
- {title: "Joy",
- person: "Jennifer Lawrence"},
- {title: "45 Years",
- person: "Charlotte Rampling"},
- {title: "Brooklyn",
- person: "Saoirse Ronan"}
- ]
- },
- {award: "Best Supporting Actor",
- nominations: [
- {title: "The Big Short",
- person: "Christian Bale"},
- {title: "The Revenant",
- person: "Tom Hardy"},
- {title: "Spotlight",
- person: "Mark Ruffalo"},
- {title: "Bridge Of Spies",
- person: "Mark Rylance"},
- {title: "Creed",
- person: "Sylvester Stallone"}
- ]
- },
- {award: "Best Supporting Actress",
- nominations: [
- {title: "The Hateful Eight",
- person: "Jennifer Jason Leigh"},
- {title: "Carol",
- person: "Rooney Mara"},
- {title: "Spotlight",
- person: "Rachel McAdams"},
- {title: "The Danish Girl",
- person: "Alicia Vikander"},
- {title: "Steve Jobs",
- person: "Kate Winslet"}
- ]
- }
- ]); });
+ assert.doesNotThrow(() => {
+ writableCollection.insertMany([
+ {
+ award: "Best Picture",
+ nominations: [
+ {title: "The Big Short"},
+ {title: "Bridge of Spies"},
+ {title: "Brooklyn"},
+ {title: "Max Max: Fury Road"},
+ {title: "The Martian"},
+ {title: "The Revenant"},
+ {title: "Room"},
+ {title: "Spotlight"}
+ ]
+ },
+ {
+ award: "Best Actor",
+ nominations: [
+ {title: "Trumbo", person: "Bryan Cranston"},
+ {title: "The Martian", person: "Matt Damon"},
+ {title: "The Revenant", person: "Leonardo DiCaprio"},
+ {title: "Steve Jobs", person: "Michael Fassbender"},
+ {title: "The Danish Girl", person: "Eddie Redmayne"}
+ ]
+ },
+ {
+ award: "Best Actress",
+ nominations: [
+ {title: "Carol", person: "Cate Blanchett"},
+ {title: "Room", person: "Brie Larson"},
+ {title: "Joy", person: "Jennifer Lawrence"},
+ {title: "45 Years", person: "Charlotte Rampling"},
+ {title: "Brooklyn", person: "Saoirse Ronan"}
+ ]
+ },
+ {
+ award: "Best Supporting Actor",
+ nominations: [
+ {title: "The Big Short", person: "Christian Bale"},
+ {title: "The Revenant", person: "Tom Hardy"},
+ {title: "Spotlight", person: "Mark Ruffalo"},
+ {title: "Bridge Of Spies", person: "Mark Rylance"},
+ {title: "Creed", person: "Sylvester Stallone"}
+ ]
+ },
+ {
+ award: "Best Supporting Actress",
+ nominations: [
+ {title: "The Hateful Eight", person: "Jennifer Jason Leigh"},
+ {title: "Carol", person: "Rooney Mara"},
+ {title: "Spotlight", person: "Rachel McAdams"},
+ {title: "The Danish Girl", person: "Alicia Vikander"},
+ {title: "Steve Jobs", person: "Kate Winslet"}
+ ]
+ }
+ ]);
+ });
},
exec: function(readableCollection) {
// Find titles nominated for the most awards.
var mostAwardsPipeline = [
{$unwind: "$nominations"},
- {$group: {
- _id: "$nominations.title",
- count: {$sum: 1}}},
+ {$group: {_id: "$nominations.title", count: {$sum: 1}}},
{$sort: {count: -1}},
{$limit: 2}
];
- assert.docEq(readableCollection.aggregate(mostAwardsPipeline).toArray(), [
- {_id: "The Revenant", count: 3},
- {_id: "Spotlight", count: 3}
- ]);
+ assert.docEq(readableCollection.aggregate(mostAwardsPipeline).toArray(),
+ [{_id: "The Revenant", count: 3}, {_id: "Spotlight", count: 3}]);
// Check that pipelines fail with allowDiskUse true. We use runCommand manually because
// the helper has conflicting error handling logic.
- var allowDiskUseCmd = {aggregate: readableCollection.getName(),
- pipeline: [],
- allowDiskUse: true};
+ var allowDiskUseCmd = {
+ aggregate: readableCollection.getName(),
+ pipeline: [],
+ allowDiskUse: true
+ };
assert.commandFailedWithCode(readableCollection.runCommand(allowDiskUseCmd),
ErrorCodes.IllegalOperation,
diff --git a/jstests/readonly/count.js b/jstests/readonly/count.js
index 62571f9d7a0..cc13f5c3337 100644
--- a/jstests/readonly/count.js
+++ b/jstests/readonly/count.js
@@ -1,7 +1,7 @@
load('jstests/readonly/lib/read_only_test.js');
runReadOnlyTest(function() {
- 'use strict' ;
+ 'use strict';
return {
name: 'count',
@@ -28,9 +28,9 @@ runReadOnlyTest(function() {
assert.writeOK(bulk.execute());
},
exec: function(readableCollection) {
- assert.eq(readableCollection.find({x: {$lt : 10}}).count(), this.countLt10);
- assert.eq(readableCollection.find({x: {$eq : 35}}).count(), this.countEq35);
- assert.eq(readableCollection.find({x: {$gte : 10}}).count(), this.countGte10);
+ assert.eq(readableCollection.find({x: {$lt: 10}}).count(), this.countLt10);
+ assert.eq(readableCollection.find({x: {$eq: 35}}).count(), this.countEq35);
+ assert.eq(readableCollection.find({x: {$gte: 10}}).count(), this.countGte10);
assert.eq(readableCollection.count(), this.count);
}
};
diff --git a/jstests/readonly/find.js b/jstests/readonly/find.js
index a2cede9da7d..6dd458c5374 100644
--- a/jstests/readonly/find.js
+++ b/jstests/readonly/find.js
@@ -6,7 +6,7 @@ runReadOnlyTest(function() {
name: 'find',
load: function(writableCollection) {
for (var i = 0; i < 10; ++i) {
- assert.writeOK(writableCollection.insert({x: i, y : 2 * i}));
+ assert.writeOK(writableCollection.insert({x: i, y: 2 * i}));
}
},
exec: function(readableCollection) {
@@ -16,10 +16,7 @@ runReadOnlyTest(function() {
assert.eq(readableCollection.find({x: {$gt: 3, $lte: 6}}).count(), 3);
assert.eq(readableCollection.find({y: {$lte: -1}}).count(), 0);
- assert.eq(readableCollection.find({$or: [
- {x: {$lte: 2}},
- {y: {$gte: 16}}
- ]}).count(), 5);
+ assert.eq(readableCollection.find({$or: [{x: {$lte: 2}}, {y: {$gte: 16}}]}).count(), 5);
}
};
}());
diff --git a/jstests/readonly/geo.js b/jstests/readonly/geo.js
index 1127a70a18f..367cb89be96 100644
--- a/jstests/readonly/geo.js
+++ b/jstests/readonly/geo.js
@@ -10,28 +10,33 @@ runReadOnlyTest(function() {
assert.commandWorked(writableCollection.createIndex({loc: "2dsphere"}));
var locDocs = [
- {name: "Berry Park",
- loc: {type: "Point", coordinates: [40.722396, -73.9573645]}},
- {name: "Northern Territory",
- loc: {type: "Point", coordinates: [40.7252334, -73.9595218]}},
- {name: "Kent Ale House",
- loc: {type: "Point", coordinates: [40.7223364, -73.9614495]}},
- {name: "The Shanty",
- loc: {type: "Point", coordinates: [40.7185752, -73.9510538]}},
- {name: "The Counting Room",
- loc: {type: "Point", coordinates: [40.7209601, -73.9588041]}},
- {name: "Kinfolk 94",
- loc: {type: "Point", coordinates: [40.7217058, -73.9605489]}}
+ {name: "Berry Park", loc: {type: "Point", coordinates: [40.722396, -73.9573645]}},
+ {
+ name: "Northern Territory",
+ loc: {type: "Point", coordinates: [40.7252334, -73.9595218]}
+ },
+ {
+ name: "Kent Ale House",
+ loc: {type: "Point", coordinates: [40.7223364, -73.9614495]}
+ },
+ {name: "The Shanty", loc: {type: "Point", coordinates: [40.7185752, -73.9510538]}},
+ {
+ name: "The Counting Room",
+ loc: {type: "Point", coordinates: [40.7209601, -73.9588041]}
+ },
+ {
+ name: "Kinfolk 94",
+ loc: {type: "Point", coordinates: [40.7217058, -73.9605489]}
+ }
];
writableCollection.insertMany(locDocs);
},
exec: function(readableCollection) {
- var res = readableCollection.find({loc: {$near: {
- $geometry: {
- type: "Point",
- coordinates: [40.7211404, -73.9591494]
- }}}}).limit(1).toArray();
+ var res = readableCollection.find({
+ loc:
+ {$near: {$geometry: {type: "Point", coordinates: [40.7211404, -73.9591494]}}}
+ }).limit(1).toArray();
assert.eq(res[0].name, "The Counting Room");
}
};
diff --git a/jstests/readonly/lib/read_only_test.js b/jstests/readonly/lib/read_only_test.js
index d544cf5bd08..55af207a399 100644
--- a/jstests/readonly/lib/read_only_test.js
+++ b/jstests/readonly/lib/read_only_test.js
@@ -17,7 +17,6 @@ function makeDirectoryWritable(dir) {
}
function runReadOnlyTest(test) {
-
printjson(test);
assert.eq(typeof(test.exec), 'function');
@@ -40,10 +39,8 @@ function runReadOnlyTest(test) {
makeDirectoryReadOnly(dbpath);
try {
- var readOnlyOptions = Object.extend(options,
- {readOnly: '',
- dbpath: dbpath,
- noCleanData: true});
+ var readOnlyOptions =
+ Object.extend(options, {readOnly: '', dbpath: dbpath, noCleanData: true});
var readOnlyMongod = MongoRunner.runMongod(readOnlyOptions);
@@ -59,13 +56,13 @@ function runReadOnlyTest(test) {
}
}
-function* cycleN(arr, N) {
+function * cycleN(arr, N) {
for (var i = 0; i < N; ++i) {
yield arr[i % arr.length];
}
}
-function* zip2(iter1, iter2) {
+function * zip2(iter1, iter2) {
var n1 = iter1.next();
var n2 = iter2.next();
while (!n1.done || !n2.done) {
diff --git a/jstests/readonly/server_status.js b/jstests/readonly/server_status.js
index cbb70f52560..207b5b48dfd 100644
--- a/jstests/readonly/server_status.js
+++ b/jstests/readonly/server_status.js
@@ -6,8 +6,7 @@ runReadOnlyTest(function() {
return {
name: 'server_status',
- load: function(writableCollection) {
- },
+ load: function(writableCollection) {},
exec: function(readableCollection) {
assert.commandWorked(readableCollection.getDB().serverStatus());
}
diff --git a/jstests/repl/basic1.js b/jstests/repl/basic1.js
index 7069b898a77..515667c48e4 100644
--- a/jstests/repl/basic1.js
+++ b/jstests/repl/basic1.js
@@ -2,163 +2,179 @@
// test repl basics
// data on master/slave is the same
-var rt = new ReplTest( "basic1" );
+var rt = new ReplTest("basic1");
-m = rt.start( true );
-s = rt.start( false );
+m = rt.start(true);
+s = rt.start(false);
-function block(){
- am.runCommand( { getlasterror : 1 , w : 2 , wtimeout : 3000 } );
+function block() {
+ am.runCommand({getlasterror: 1, w: 2, wtimeout: 3000});
}
-am = m.getDB( "foo" );
-as = s.getDB( "foo" );
+am = m.getDB("foo");
+as = s.getDB("foo");
-function check( note ){
+function check(note) {
var start = new Date();
- var x,y;
- while ( (new Date()).getTime() - start.getTime() < 30000 ){
- x = am.runCommand( "dbhash" );
- y = as.runCommand( "dbhash" );
- if ( x.md5 == y.md5 )
+ var x, y;
+ while ((new Date()).getTime() - start.getTime() < 30000) {
+ x = am.runCommand("dbhash");
+ y = as.runCommand("dbhash");
+ if (x.md5 == y.md5)
return;
- sleep( 200 );
+ sleep(200);
}
- lastOpLogEntry = m.getDB("local").oplog.$main.find({op:{$ne:"n"}}).sort({$natural:-1}).limit(-1).next();
- note = note + tojson(am.a.find().toArray()) + " != " + tojson(as.a.find().toArray())
- + "last oplog:" + tojson(lastOpLogEntry);
- assert.eq( x.md5 , y.md5 , note );
+ lastOpLogEntry =
+ m.getDB("local").oplog.$main.find({op: {$ne: "n"}}).sort({$natural: -1}).limit(-1).next();
+ note = note + tojson(am.a.find().toArray()) + " != " + tojson(as.a.find().toArray()) +
+ "last oplog:" + tojson(lastOpLogEntry);
+ assert.eq(x.md5, y.md5, note);
}
-am.a.save( { x : 1 } );
-check( "A" );
+am.a.save({x: 1});
+check("A");
-am.a.save( { x : 5 } );
+am.a.save({x: 5});
-am.a.update( {} , { $inc : { x : 1 } } );
-check( "B" );
+am.a.update({}, {$inc: {x: 1}});
+check("B");
-am.a.update( {} , { $inc : { x : 1 } } , false , true );
-check( "C" );
+am.a.update({}, {$inc: {x: 1}}, false, true);
+check("C");
// ----- check features -------
// map/reduce
-assert.writeOK(am.mr.insert({ tags: [ "a" ]}));
-assert.writeOK(am.mr.insert({ tags: [ "a", "b" ]}));
-check( "mr setup" );
-
-m = function(){
- for ( var i=0; i<this.tags.length; i++ ){
- print( "\t " + i );
- emit( this.tags[i] , 1 );
+assert.writeOK(am.mr.insert({tags: ["a"]}));
+assert.writeOK(am.mr.insert({tags: ["a", "b"]}));
+check("mr setup");
+
+m = function() {
+ for (var i = 0; i < this.tags.length; i++) {
+ print("\t " + i);
+ emit(this.tags[i], 1);
}
};
-r = function( key , v ){
- return Array.sum( v );
+r = function(key, v) {
+ return Array.sum(v);
};
-correct = { a : 2 , b : 1 };
+correct = {
+ a: 2,
+ b: 1
+};
-function checkMR( t ){
- var res = t.mapReduce( m , r , { out : { inline : 1 } } );
- assert.eq( correct , res.convertToSingleObject() , "checkMR: " + tojson( t ) );
+function checkMR(t) {
+ var res = t.mapReduce(m, r, {out: {inline: 1}});
+ assert.eq(correct, res.convertToSingleObject(), "checkMR: " + tojson(t));
}
-function checkNumCollections( msg , diff ){
- if ( ! diff ) diff = 0;
+function checkNumCollections(msg, diff) {
+ if (!diff)
+ diff = 0;
var m = am.getCollectionNames();
var s = as.getCollectionNames();
- assert.eq( m.length + diff , s.length , msg + " lengths bad \n" + tojson( m ) + "\n" + tojson( s ) );
+ assert.eq(m.length + diff, s.length, msg + " lengths bad \n" + tojson(m) + "\n" + tojson(s));
}
-checkNumCollections( "MR1" );
-checkMR( am.mr );
-checkMR( as.mr );
-checkNumCollections( "MR2" );
+checkNumCollections("MR1");
+checkMR(am.mr);
+checkMR(as.mr);
+checkNumCollections("MR2");
block();
-checkNumCollections( "MR3" );
+checkNumCollections("MR3");
-var res = am.mr.mapReduce( m , r , { out : "xyz" } );
+var res = am.mr.mapReduce(m, r, {out: "xyz"});
block();
-checkNumCollections( "MR4" );
+checkNumCollections("MR4");
var t = am.rpos;
-var writeOption = { writeConcern: { w: 2, wtimeout: 3000 }};
-t.insert({ _id: 1, a: [{ n: "a", c: 1 }, { n: "b", c: 1 }, { n: "c", c: 1 }], b: [ 1, 2, 3 ]},
- writeOption);
-check( "after pos 1 " );
-
-t.update({ "a.n": "b" }, { $inc: { "a.$.c": 1 }}, writeOption);
-check( "after pos 2 " );
+var writeOption = {
+ writeConcern: {w: 2, wtimeout: 3000}
+};
+t.insert({_id: 1, a: [{n: "a", c: 1}, {n: "b", c: 1}, {n: "c", c: 1}], b: [1, 2, 3]}, writeOption);
+check("after pos 1 ");
-t.update({ b: 2 }, { $inc: { "b.$": 1 }}, writeOption);
-check( "after pos 3 " );
+t.update({"a.n": "b"}, {$inc: {"a.$.c": 1}}, writeOption);
+check("after pos 2 ");
-t.update({ b: 3 }, { $set: { "b.$": 17 }}, writeOption);
-check( "after pos 4 " );
+t.update({b: 2}, {$inc: {"b.$": 1}}, writeOption);
+check("after pos 3 ");
+t.update({b: 3}, {$set: {"b.$": 17}}, writeOption);
+check("after pos 4 ");
-printjson( am.rpos.findOne() );
-printjson( as.rpos.findOne() );
+printjson(am.rpos.findOne());
+printjson(as.rpos.findOne());
-//am.getSisterDB( "local" ).getCollection( "oplog.$main" ).find().limit(10).sort( { $natural : -1 } ).forEach( printjson )
+// am.getSisterDB( "local" ).getCollection( "oplog.$main" ).find().limit(10).sort( { $natural : -1 }
+// ).forEach( printjson )
t = am.b;
-var updateOption = { upsert: true, multi: false, writeConcern: { w: 2, wtimeout: 3000 }};
-t.update({ _id: "fun" }, { $inc: { "a.b.c.x": 6743 }}, updateOption);
-check( "b 1" );
+var updateOption = {
+ upsert: true,
+ multi: false,
+ writeConcern: {w: 2, wtimeout: 3000}
+};
+t.update({_id: "fun"}, {$inc: {"a.b.c.x": 6743}}, updateOption);
+check("b 1");
-t.update({ _id: "fun" }, { $inc: { "a.b.c.x": 5 }}, updateOption);
-check( "b 2" );
+t.update({_id: "fun"}, {$inc: {"a.b.c.x": 5}}, updateOption);
+check("b 2");
-t.update({ _id: "fun" }, { $inc: { "a.b.c.x": 100, "a.b.c.y": 911 }}, updateOption);
-assert.eq( { _id : "fun" , a : { b : { c : { x : 6848 , y : 911 } } } } , as.b.findOne() , "b 3" );
-check( "b 4" );
+t.update({_id: "fun"}, {$inc: {"a.b.c.x": 100, "a.b.c.y": 911}}, updateOption);
+assert.eq({_id: "fun", a: {b: {c: {x: 6848, y: 911}}}}, as.b.findOne(), "b 3");
+check("b 4");
// lots of indexes
-am.lotOfIndexes.insert( { x : 1 } );
-for ( i=0; i<200; i++ ){
+am.lotOfIndexes.insert({x: 1});
+for (i = 0; i < 200; i++) {
var idx = {};
- idx["x"+i] = 1;
- am.lotOfIndexes.ensureIndex( idx );
+ idx["x" + i] = 1;
+ am.lotOfIndexes.ensureIndex(idx);
}
-assert.soon( function(){ return am.lotOfIndexes.getIndexes().length == as.lotOfIndexes.getIndexes().length; } , "lots of indexes a" );
+assert.soon(function() {
+ return am.lotOfIndexes.getIndexes().length == as.lotOfIndexes.getIndexes().length;
+}, "lots of indexes a");
-assert.eq( am.lotOfIndexes.getIndexes().length , as.lotOfIndexes.getIndexes().length , "lots of indexes b" );
+assert.eq(am.lotOfIndexes.getIndexes().length,
+ as.lotOfIndexes.getIndexes().length,
+ "lots of indexes b");
// multi-update with $inc
-am.mu1.update( { _id : 1 , $atomic : 1 } , { $inc : { x : 1 } } , true , true );
-x = { _id : 1 , x : 1 };
-assert.eq( x , am.mu1.findOne() , "mu1" );
-assert.soon( function(){ z = as.mu1.findOne(); printjson( z ); return friendlyEqual( x , z ); } , "mu2" );
+am.mu1.update({_id: 1, $atomic: 1}, {$inc: {x: 1}}, true, true);
+x = {
+ _id: 1,
+ x: 1
+};
+assert.eq(x, am.mu1.findOne(), "mu1");
+assert.soon(function() {
+ z = as.mu1.findOne();
+ printjson(z);
+ return friendlyEqual(x, z);
+}, "mu2");
// profiling - this should be last
-am.setProfilingLevel( 2 );
-am.foo.insert({ x: 1 }, writeOption);
+am.setProfilingLevel(2);
+am.foo.insert({x: 1}, writeOption);
am.foo.findOne();
-assert.eq( 2 , am.system.profile.count() , "P1" );
-assert.eq( 0 , as.system.profile.count() , "P2" );
+assert.eq(2, am.system.profile.count(), "P1");
+assert.eq(0, as.system.profile.count(), "P2");
-assert.eq( 1 , as.foo.findOne().x , "P3" );
-assert.eq( 0 , as.system.profile.count() , "P4" );
+assert.eq(1, as.foo.findOne().x, "P3");
+assert.eq(0, as.system.profile.count(), "P4");
-assert( as.getCollectionNames().indexOf( "system.profile" ) < 0 , "P4.5" );
+assert(as.getCollectionNames().indexOf("system.profile") < 0, "P4.5");
as.setProfilingLevel(2);
as.foo.findOne();
-assert.eq( 1 , as.system.profile.count() , "P5" );
-
+assert.eq(1, as.system.profile.count(), "P5");
rt.stop();
-
-
-
-
diff --git a/jstests/repl/batch_write_command_wc_repl.js b/jstests/repl/batch_write_command_wc_repl.js
index 41de494ab0c..3e28b330bf1 100644
--- a/jstests/repl/batch_write_command_wc_repl.js
+++ b/jstests/repl/batch_write_command_wc_repl.js
@@ -12,15 +12,14 @@ jsTest.log("Starting legacy repl tests...");
// Start a master node
// Allows testing legacy repl failures
-var mongod = MongoRunner.runMongod({ master : "", oplogSize : 40, smallfiles : "" });
+var mongod = MongoRunner.runMongod({master: "", oplogSize: 40, smallfiles: ""});
var coll = mongod.getCollection("test.batch_write_command_wc_repl");
//
// Basic insert, default WC
coll.remove({});
-printjson( request = {insert : coll.getName(),
- documents: [{a:1}]});
-printjson( result = coll.runCommand(request) );
+printjson(request = {insert: coll.getName(), documents: [{a: 1}]});
+printjson(result = coll.runCommand(request));
assert(result.ok);
assert.eq(1, result.n);
assert.eq(1, coll.count());
@@ -28,10 +27,8 @@ assert.eq(1, coll.count());
//
// Basic insert, majority WC
coll.remove({});
-printjson( request = {insert : coll.getName(),
- documents: [{a:1}],
- writeConcern: {w: 'majority'}});
-printjson( result = coll.runCommand(request) );
+printjson(request = {insert: coll.getName(), documents: [{a: 1}], writeConcern: {w: 'majority'}});
+printjson(result = coll.runCommand(request));
assert(result.ok);
assert.eq(1, result.n);
assert.eq(1, coll.count());
@@ -39,20 +36,17 @@ assert.eq(1, coll.count());
//
// Basic insert, immediate bad wMode error
coll.remove({});
-printjson( request = {insert : coll.getName(),
- documents: [{a:1}],
- writeConcern: {w: 'invalid'}});
-printjson( result = coll.runCommand(request) );
+printjson(request = {insert: coll.getName(), documents: [{a: 1}], writeConcern: {w: 'invalid'}});
+printjson(result = coll.runCommand(request));
assert(!result.ok);
assert.eq(0, coll.count());
//
// Basic insert, error on WC with wtimeout
coll.remove({});
-printjson( request = {insert : coll.getName(),
- documents: [{a:1}],
- writeConcern: {w:2, wtimeout: 1}});
-printjson( result = coll.runCommand(request) );
+printjson(
+ request = {insert: coll.getName(), documents: [{a: 1}], writeConcern: {w: 2, wtimeout: 1}});
+printjson(result = coll.runCommand(request));
assert(result.ok);
assert.eq(1, result.n);
assert(result.writeConcernError);
@@ -61,4 +55,3 @@ assert.eq(1, coll.count());
jsTest.log("DONE legacy repl tests");
MongoRunner.stopMongod(mongod);
-
diff --git a/jstests/repl/block1.js b/jstests/repl/block1.js
index ef36c3efb34..ab427270340 100644
--- a/jstests/repl/block1.js
+++ b/jstests/repl/block1.js
@@ -1,23 +1,19 @@
-var rt = new ReplTest( "block1" );
+var rt = new ReplTest("block1");
-m = rt.start( true );
-s = rt.start( false );
+m = rt.start(true);
+s = rt.start(false);
-dbm = m.getDB( "foo" );
-dbs = s.getDB( "foo" );
+dbm = m.getDB("foo");
+dbs = s.getDB("foo");
tm = dbm.bar;
ts = dbs.bar;
-for ( var i=0; i<1000; i++ ){
- tm.insert({ _id: i }, { writeConcern: { w: 2 }});
- assert.eq( i + 1 , ts.count() , "A" + i );
- assert.eq( i + 1 , tm.count() , "B" + i );
+for (var i = 0; i < 1000; i++) {
+ tm.insert({_id: i}, {writeConcern: {w: 2}});
+ assert.eq(i + 1, ts.count(), "A" + i);
+ assert.eq(i + 1, tm.count(), "B" + i);
}
rt.stop();
-
-
-
-
diff --git a/jstests/repl/block2.js b/jstests/repl/block2.js
index fc35b2774c4..0eb19771228 100644
--- a/jstests/repl/block2.js
+++ b/jstests/repl/block2.js
@@ -1,31 +1,29 @@
-var rt = new ReplTest( "block1" );
+var rt = new ReplTest("block1");
-m = rt.start( true );
-s = rt.start( false );
+m = rt.start(true);
+s = rt.start(false);
-function setup(){
-
- dbm = m.getDB( "foo" );
- dbs = s.getDB( "foo" );
+function setup() {
+ dbm = m.getDB("foo");
+ dbs = s.getDB("foo");
tm = dbm.bar;
ts = dbs.bar;
}
setup();
-function check( msg ){
- assert.eq( tm.count() , ts.count() , "check: " + msg );
+function check(msg) {
+ assert.eq(tm.count(), ts.count(), "check: " + msg);
}
-check( "A" );
+check("A");
-assert.writeOK(tm.insert({ x: 1 }, { writeConcern: { w: 2 }}));
-assert.writeOK(tm.insert({ x: 2 }, { writeConcern: { w: 2, wtimeout: 3000 }}));
+assert.writeOK(tm.insert({x: 1}, {writeConcern: {w: 2}}));
+assert.writeOK(tm.insert({x: 2}, {writeConcern: {w: 2, wtimeout: 3000}}));
-rt.stop( false );
-assert.writeError(tm.insert({ x: 3 }, { writeConcern: { w: 2, wtimeout: 3000 }}));
-assert.eq( 3 , tm.count() , "D1" );
+rt.stop(false);
+assert.writeError(tm.insert({x: 3}, {writeConcern: {w: 2, wtimeout: 3000}}));
+assert.eq(3, tm.count(), "D1");
rt.stop();
-
diff --git a/jstests/repl/master1.js b/jstests/repl/master1.js
index 1da0f29b9d4..85f116fb242 100644
--- a/jstests/repl/master1.js
+++ b/jstests/repl/master1.js
@@ -9,49 +9,49 @@
var baseName = "jstests_repl_master1test";
oplog = function() {
- return m.getDB( "local" ).oplog.$main;
+ return m.getDB("local").oplog.$main;
};
lastop = function() {
- return oplog().find().sort( {$natural:-1} ).next();
+ return oplog().find().sort({$natural: -1}).next();
};
am = function() {
- return m.getDB( baseName ).a;
+ return m.getDB(baseName).a;
};
-rt = new ReplTest( baseName );
+rt = new ReplTest(baseName);
-m = rt.start( true );
+m = rt.start(true);
-am().save( {} );
-assert.eq( "i", lastop().op );
+am().save({});
+assert.eq("i", lastop().op);
op = lastop();
-printjson( op );
-op.ts.t = op.ts.t + 600000; // 10 minutes
-assert.commandWorked(m.getDB( "local" ).runCommand( {godinsert:"oplog.$main", obj:op} ));
+printjson(op);
+op.ts.t = op.ts.t + 600000; // 10 minutes
+assert.commandWorked(m.getDB("local").runCommand({godinsert: "oplog.$main", obj: op}));
-rt.stop( true );
-m = rt.start( true, null, true );
+rt.stop(true);
+m = rt.start(true, null, true);
-assert.eq( op.ts.t, lastop().ts.t );
-am().save( {} );
-assert.eq( op.ts.t, lastop().ts.t );
-assert.eq( op.ts.i + 1, lastop().ts.i );
+assert.eq(op.ts.t, lastop().ts.t);
+am().save({});
+assert.eq(op.ts.t, lastop().ts.t);
+assert.eq(op.ts.i + 1, lastop().ts.i);
op = lastop();
-printjson( op );
-op.ts.i = Math.pow(2,31)-1;
-printjson( op );
-assert.commandWorked(m.getDB( "local" ).runCommand( {godinsert:"oplog.$main", obj:op} ));
+printjson(op);
+op.ts.i = Math.pow(2, 31) - 1;
+printjson(op);
+assert.commandWorked(m.getDB("local").runCommand({godinsert: "oplog.$main", obj: op}));
-rt.stop( true );
-m = rt.start( true, null, true );
-assert.eq( op.ts.i, lastop().ts.i );
+rt.stop(true);
+m = rt.start(true, null, true);
+assert.eq(op.ts.i, lastop().ts.i);
assert.throws(function() {
- am().save( {} ); // triggers fassert because ofclock skew
+ am().save({}); // triggers fassert because ofclock skew
});
-assert.neq(0, rt.stop( true )); // fasserted
+assert.neq(0, rt.stop(true)); // fasserted
diff --git a/jstests/repl/mod_move.js b/jstests/repl/mod_move.js
index 6b5d7d8a5aa..2ec8996dce3 100644
--- a/jstests/repl/mod_move.js
+++ b/jstests/repl/mod_move.js
@@ -2,23 +2,23 @@
// test repl basics
// data on master/slave is the same
-var rt = new ReplTest( "mod_move" );
+var rt = new ReplTest("mod_move");
-m = rt.start( true , { oplogSize : 50 } );
+m = rt.start(true, {oplogSize: 50});
-am = m.getDB( "foo" );
+am = m.getDB("foo");
-function check( note ){
+function check(note) {
var start = new Date();
- var x,y;
- while ( (new Date()).getTime() - start.getTime() < 5 * 60 * 1000 ){
- x = am.runCommand( "dbhash" );
- y = as.runCommand( "dbhash" );
- if ( x.md5 == y.md5 )
+ var x, y;
+ while ((new Date()).getTime() - start.getTime() < 5 * 60 * 1000) {
+ x = am.runCommand("dbhash");
+ y = as.runCommand("dbhash");
+ if (x.md5 == y.md5)
return;
- sleep( 200 );
+ sleep(200);
}
- assert.eq( x.md5 , y.md5 , note );
+ assert.eq(x.md5, y.md5, note);
}
// insert a lot of 'big' docs
@@ -29,30 +29,30 @@ N = BIG * 2;
var bulk = am.a.initializeUnorderedBulkOp();
for (var i = 0; i < BIG; i++) {
- bulk.insert({ _id: i, s: 1, x: 1 });
+ bulk.insert({_id: i, s: 1, x: 1});
}
for (; i < N; i++) {
- bulk.insert({ _id: i, s: 1 });
+ bulk.insert({_id: i, s: 1});
}
for (i = 0; i < BIG; i++) {
- bulk.find({ _id: i }).remove();
+ bulk.find({_id: i}).remove();
}
assert.writeOK(bulk.execute());
-assert.eq( BIG , am.a.count() );
+assert.eq(BIG, am.a.count());
-if ( am.serverStatus().storageEngine.name == "mmapv1" ) {
- assert.eq( 1 , am.a.stats().paddingFactor , "A2" );
+if (am.serverStatus().storageEngine.name == "mmapv1") {
+ assert.eq(1, am.a.stats().paddingFactor, "A2");
}
// start slave
-s = rt.start( false );
-as = s.getDB( "foo" );
+s = rt.start(false);
+as = s.getDB("foo");
bulk = am.a.initializeUnorderedBulkOp();
for (i = N - 1; i >= BIG; i--) {
- bulk.find({ _id: i }).update({ $set: { x: 1 }});
+ bulk.find({_id: i}).update({$set: {x: 1}});
}
assert.writeOK(bulk.execute());
-check( "B" );
+check("B");
rt.stop();
diff --git a/jstests/repl/repair.js b/jstests/repl/repair.js
index 9bdaef35962..b654244eb2b 100644
--- a/jstests/repl/repair.js
+++ b/jstests/repl/repair.js
@@ -2,13 +2,13 @@
var baseName = "jstests_repl_repair";
-rt = new ReplTest( baseName );
+rt = new ReplTest(baseName);
-m = rt.start( true );
+m = rt.start(true);
-m.getDB( baseName )[ baseName ].save( {} );
-var c = m.getDB( 'local' ).oplog.$main.count();
-assert.automsg( "c > 0" );
+m.getDB(baseName)[baseName].save({});
+var c = m.getDB('local').oplog.$main.count();
+assert.automsg("c > 0");
-assert.commandWorked( m.getDB( "local" ).repairDatabase() );
-assert.automsg( "c <= m.getDB( 'local' ).oplog.$main.count()" );
+assert.commandWorked(m.getDB("local").repairDatabase());
+assert.automsg("c <= m.getDB( 'local' ).oplog.$main.count()");
diff --git a/jstests/repl/repl1.js b/jstests/repl/repl1.js
index 1bdfaa00bdd..ea8f85a6260 100644
--- a/jstests/repl/repl1.js
+++ b/jstests/repl/repl1.js
@@ -8,55 +8,57 @@
var baseName = "jstests_repl1test";
-soonCount = function( count ) {
- assert.soon( function() {
-// print( "check count" );
-// print( "count: " + s.getDB( baseName ).z.find().count() );
- return s.getDB( baseName ).a.find().count() == count;
- } );
+soonCount = function(count) {
+ assert.soon(function() {
+ // print( "check count" );
+ // print( "count: " + s.getDB( baseName ).z.find().count() );
+ return s.getDB(baseName).a.find().count() == count;
+ });
};
-doTest = function( signal ) {
-
- rt = new ReplTest( "repl1tests" );
-
- m = rt.start( true );
- s = rt.start( false );
-
- am = m.getDB( baseName ).a;
-
- for( i = 0; i < 1000; ++i )
- am.save( { _id: new ObjectId(), i: i } );
-
- soonCount( 1000 );
- as = s.getDB( baseName ).a;
- assert.eq( 1, as.find( { i: 0 } ).count() );
- assert.eq( 1, as.find( { i: 999 } ).count() );
-
- rt.stop( false, signal );
-
- for( i = 1000; i < 1010; ++i )
- am.save( { _id: new ObjectId(), i: i } );
-
- s = rt.start( false, null, true );
- soonCount( 1010 );
- as = s.getDB( baseName ).a;
- assert.eq( 1, as.find( { i: 1009 } ).count() );
-
- rt.stop( true, signal );
-
- m = rt.start( true, null, true );
- am = m.getDB( baseName ).a;
-
- for( i = 1010; i < 1020; ++i )
- am.save( { _id: new ObjectId(), i: i } );
-
- assert.soon( function() { return as.find().count() == 1020; } );
- assert.eq( 1, as.find( { i: 1019 } ).count() );
-
- assert.automsg( "m.getDB( 'local' ).getCollection( 'oplog.$main' ).stats().size > 0" );
-
+doTest = function(signal) {
+
+ rt = new ReplTest("repl1tests");
+
+ m = rt.start(true);
+ s = rt.start(false);
+
+ am = m.getDB(baseName).a;
+
+ for (i = 0; i < 1000; ++i)
+ am.save({_id: new ObjectId(), i: i});
+
+ soonCount(1000);
+ as = s.getDB(baseName).a;
+ assert.eq(1, as.find({i: 0}).count());
+ assert.eq(1, as.find({i: 999}).count());
+
+ rt.stop(false, signal);
+
+ for (i = 1000; i < 1010; ++i)
+ am.save({_id: new ObjectId(), i: i});
+
+ s = rt.start(false, null, true);
+ soonCount(1010);
+ as = s.getDB(baseName).a;
+ assert.eq(1, as.find({i: 1009}).count());
+
+ rt.stop(true, signal);
+
+ m = rt.start(true, null, true);
+ am = m.getDB(baseName).a;
+
+ for (i = 1010; i < 1020; ++i)
+ am.save({_id: new ObjectId(), i: i});
+
+ assert.soon(function() {
+ return as.find().count() == 1020;
+ });
+ assert.eq(1, as.find({i: 1019}).count());
+
+ assert.automsg("m.getDB( 'local' ).getCollection( 'oplog.$main' ).stats().size > 0");
+
rt.stop();
};
-doTest( 15 ); // SIGTERM
+doTest(15); // SIGTERM
diff --git a/jstests/repl/repl10.js b/jstests/repl/repl10.js
index 65deb3a2744..dfe3aab9fbf 100644
--- a/jstests/repl/repl10.js
+++ b/jstests/repl/repl10.js
@@ -2,39 +2,39 @@
var baseName = "jstests_repl10test";
-soonCount = function( count ) {
- assert.soon( function() {
- // print( "check count" );
- // print( "count: " + s.getDB( baseName ).z.find().count() );
- return s.getDB( baseName ).a.find().count() == count;
- } );
+soonCount = function(count) {
+ assert.soon(function() {
+ // print( "check count" );
+ // print( "count: " + s.getDB( baseName ).z.find().count() );
+ return s.getDB(baseName).a.find().count() == count;
+ });
};
-doTest = function( signal ) {
-
- rt = new ReplTest( "repl10tests" );
-
- m = rt.start( true );
- s = rt.start( false, { "slavedelay": "10" } );
-
- am = m.getDB( baseName ).a;
-
- am.save( {i:1} );
-
- soonCount( 1 );
-
- am.save( {i:2} );
- assert.eq( 2, am.count() );
- sleep( 3000 );
- assert.eq( 1, s.getDB( baseName ).a.count() );
-
- soonCount( 2 );
+doTest = function(signal) {
+
+ rt = new ReplTest("repl10tests");
+
+ m = rt.start(true);
+ s = rt.start(false, {"slavedelay": "10"});
+
+ am = m.getDB(baseName).a;
+
+ am.save({i: 1});
+
+ soonCount(1);
+
+ am.save({i: 2});
+ assert.eq(2, am.count());
+ sleep(3000);
+ assert.eq(1, s.getDB(baseName).a.count());
+
+ soonCount(2);
rt.stop();
};
print("repl10.js dotest(15)");
-doTest(15); // SIGTERM
+doTest(15); // SIGTERM
print("repl10.js dotest(15)");
doTest(9); // SIGKILL
print("repl10.js SUCCESS");
diff --git a/jstests/repl/repl12.js b/jstests/repl/repl12.js
index 4d41462afde..9771a48178e 100644
--- a/jstests/repl/repl12.js
+++ b/jstests/repl/repl12.js
@@ -1,25 +1,25 @@
// SERVER-1626
// check for initial sync of multiple db's
-function debug( x ) {
- print( "DEBUG:" + tojson( x ) );
+function debug(x) {
+ print("DEBUG:" + tojson(x));
}
-rt = new ReplTest( "repl12tests" );
+rt = new ReplTest("repl12tests");
-m = rt.start( true );
+m = rt.start(true);
usedDBs = [];
a = "a";
-for( i = 0; i < 3; ++i ) {
- usedDBs.push( a );
- m.getDB( a ).c.save( {} );
+for (i = 0; i < 3; ++i) {
+ usedDBs.push(a);
+ m.getDB(a).c.save({});
a += "a";
}
-//print("\n\n\n DB NAMES MASTER:");
-//printjson(m.getDBNames());
+// print("\n\n\n DB NAMES MASTER:");
+// printjson(m.getDBNames());
var z = 10500;
print("sleeping " + z + "ms");
@@ -27,26 +27,24 @@ sleep(z);
s = rt.start(false);
-function countHave(){
+function countHave() {
var have = 0;
- for ( var i=0; i<usedDBs.length; i++ ){
- if ( s.getDB( usedDBs[i] ).c.findOne() )
+ for (var i = 0; i < usedDBs.length; i++) {
+ if (s.getDB(usedDBs[i]).c.findOne())
have++;
}
return have;
}
-assert.soon(
- function() {
- try {
- var c = countHave();
- debug( "count: " + c );
- return c == 3;
- } catch (e) {
- printjson(e);
- return false;
- }
+assert.soon(function() {
+ try {
+ var c = countHave();
+ debug("count: " + c);
+ return c == 3;
+ } catch (e) {
+ printjson(e);
+ return false;
}
-);
+});
-//printjson(s.getDBNames());
+// printjson(s.getDBNames());
diff --git a/jstests/repl/repl13.js b/jstests/repl/repl13.js
index 80d86ffd92d..78fe9adfc81 100644
--- a/jstests/repl/repl13.js
+++ b/jstests/repl/repl13.js
@@ -1,58 +1,66 @@
// Test update modifier uassert during initial sync. SERVER-4781
var debuggingEnabled = false;
-function debug( x ) {
- if ( debuggingEnabled ) {
- printjson( x );
+function debug(x) {
+ if (debuggingEnabled) {
+ printjson(x);
}
}
-rt = new ReplTest( "repl13tests" );
+rt = new ReplTest("repl13tests");
-m = rt.start( true );
-mc = m.getDB( 'd' )[ 'c' ];
+m = rt.start(true);
+mc = m.getDB('d')['c'];
// Insert some documents with a:{} fields.
var bulk = mc.initializeUnorderedBulkOp();
-for(var i = 0; i < 100000; ++i) {
- bulk.insert({ _id: i, a: {}});
+for (var i = 0; i < 100000; ++i) {
+ bulk.insert({_id: i, a: {}});
}
assert.writeOK(bulk.execute());
-s = rt.start( false );
-sc = s.getDB( 'd' )[ 'c' ];
+s = rt.start(false);
+sc = s.getDB('d')['c'];
// Wait for the initial clone to begin.
-assert.soon( function() { debug( sc.count() ); return sc.count() > 0; } );
+assert.soon(function() {
+ debug(sc.count());
+ return sc.count() > 0;
+});
// Update documents that will be cloned last with the intent that an updated version will be cloned.
// This may cause an assertion when an update that was successfully applied to the original version
// of a document is replayed against an updated version of the same document.
bulk = mc.initializeUnorderedBulkOp();
-for( i = 99999; i >= 90000; --i ) {
+for (i = 99999; i >= 90000; --i) {
// If the document is cloned as {a:1}, the {$set:{'a.b':1}} modifier will uassert.
- bulk.find({ _id: i }).update({ $set: { 'a.b': 1 }});
- bulk.find({ _id: i }).update({ $set: { a: 1 }});
+ bulk.find({_id: i}).update({$set: {'a.b': 1}});
+ bulk.find({_id: i}).update({$set: {a: 1}});
}
assert.writeOK(bulk.execute());
// The initial sync completes and subsequent writes succeed, in spite of any assertions that occur
// when the update operations above are replicated.
-mc.save( {} );
-assert.eq( 100001 , mc.count() );
-assert.soon( function() { return sc.count() == 100001; } );
-mc.save( {} );
-assert.eq( 100002 , mc.count() );
-assert.soon( function() { return sc.count() == 100002; } );
+mc.save({});
+assert.eq(100001, mc.count());
+assert.soon(function() {
+ return sc.count() == 100001;
+});
+mc.save({});
+assert.eq(100002, mc.count());
+assert.soon(function() {
+ return sc.count() == 100002;
+});
-debug( sc.findOne( {_id:99999} ) );
-debug( sc.findOne( {_id:90000} ) );
+debug(sc.findOne({_id: 99999}));
+debug(sc.findOne({_id: 90000}));
-assert.eq( 1, sc.findOne( {_id:99999} ).a );
-assert.eq( 1, sc.findOne( {_id:90000} ).a );
+assert.eq(1, sc.findOne({_id: 99999}).a);
+assert.eq(1, sc.findOne({_id: 90000}).a);
-m_hash = m.getDB( "d" ).runCommand( "dbhash" );
-s_hash = s.getDB( "d" ).runCommand( "dbhash" );
-
-assert.eq( m_hash.collections.c , s_hash.collections.c , "sad " + tojson( m_hash ) + " " + tojson( s_hash ) );
+m_hash = m.getDB("d").runCommand("dbhash");
+s_hash = s.getDB("d").runCommand("dbhash");
+assert.eq(m_hash.collections.c,
+ s_hash.collections.c,
+ "sad " + tojson(m_hash) + " " + tojson(s_hash));
diff --git a/jstests/repl/repl14.js b/jstests/repl/repl14.js
index 5359221e607..c9d39686034 100644
--- a/jstests/repl/repl14.js
+++ b/jstests/repl/repl14.js
@@ -1,65 +1,74 @@
// Test replication of an array by $push-ing to a missing field in the presence of a sparse index on
// the field. SERVER-4907
-function testWithCollectionIndexIds( capped, sparse, useIds ) {
- printjson( { capped:capped, sparse:sparse, useIds:useIds } );
-
- rt = new ReplTest( "repl14tests" );
-
- m = rt.start( true ); // master
- if ( capped ) {
- m.getDB( 'd' ).createCollection( 'c', { capped:true, size:5*1024 } );
+function testWithCollectionIndexIds(capped, sparse, useIds) {
+ printjson({capped: capped, sparse: sparse, useIds: useIds});
+
+ rt = new ReplTest("repl14tests");
+
+ m = rt.start(true); // master
+ if (capped) {
+ m.getDB('d').createCollection('c', {capped: true, size: 5 * 1024});
}
- mc = m.getDB( 'd' )[ 'c' ]; // master collection
-
- mc.ensureIndex( {a:1}, {sparse:sparse} );
+ mc = m.getDB('d')['c']; // master collection
+
+ mc.ensureIndex({a: 1}, {sparse: sparse});
toInsert = {};
- if ( capped ) {
+ if (capped) {
// Add a singleton array as padding, so the push later on will not change document size.
- toInsert = {p:[1]};
- }
- if ( useIds ) { // Insert wiith an auto generated _id.
- mc.insert( toInsert );
+ toInsert = {
+ p: [1]
+ };
}
- else { // Otherwise avoid the auto generated _id.
- mc._mongo.insert( mc._fullName, toInsert, 0 );
+ if (useIds) { // Insert wiith an auto generated _id.
+ mc.insert(toInsert);
+ } else { // Otherwise avoid the auto generated _id.
+ mc._mongo.insert(mc._fullName, toInsert, 0);
}
assert.eq(mc.count(), 1);
-
- s = rt.start( false ); // slave
- sc = s.getDB( 'd' )[ 'c' ]; // slave collection
+
+ s = rt.start(false); // slave
+ sc = s.getDB('d')['c']; // slave collection
// Wait for the document to be cloned.
- assert.soon( function() { return sc.count() > 0; },"doc not replicated soon enough", 60*1000 );
-
- modifiers = {$push:{a:1}};
- if ( capped ) {
+ assert.soon(function() {
+ return sc.count() > 0;
+ }, "doc not replicated soon enough", 60 * 1000);
+
+ modifiers = {
+ $push: {a: 1}
+ };
+ if (capped) {
// Delete our singleton array to balance the new singleton array we're going to create.
- modifiers['$unset'] = {p:1};
+ modifiers['$unset'] = {
+ p: 1
+ };
}
- assert.writeOK(mc.update( {}, modifiers ));
-
+ assert.writeOK(mc.update({}, modifiers));
+
// Wait for the update to be replicated.
- assert.soon( function() { return sc.count( {a:1} ) > 0; } );
-
+ assert.soon(function() {
+ return sc.count({a: 1}) > 0;
+ });
+
rt.stop();
}
-function testWithCollectionIndex( capped, sparse ) {
- testWithCollectionIndexIds( capped, sparse, true );
- if ( capped ) {
- testWithCollectionIndexIds( capped, sparse, false );
+function testWithCollectionIndex(capped, sparse) {
+ testWithCollectionIndexIds(capped, sparse, true);
+ if (capped) {
+ testWithCollectionIndexIds(capped, sparse, false);
}
}
-function testWithCollection( capped ) {
- testWithCollectionIndex( capped, true );
- testWithCollectionIndex( capped, false );
+function testWithCollection(capped) {
+ testWithCollectionIndex(capped, true);
+ testWithCollectionIndex(capped, false);
}
function test() {
- testWithCollection( true );
- testWithCollection( false );
+ testWithCollection(true);
+ testWithCollection(false);
}
test();
diff --git a/jstests/repl/repl15.js b/jstests/repl/repl15.js
index 2545ec0b9d5..b5ec0a0ade3 100644
--- a/jstests/repl/repl15.js
+++ b/jstests/repl/repl15.js
@@ -1,39 +1,40 @@
// Test a case were an update can grow a document on master but growth is prevented on slave.
// SERVER-4939
-if ( 0 ) { // SERVER-4939
-
-function doTest( capped ) {
-
- rt = new ReplTest( "repl15tests" );
- master = rt.start( true );
- if ( capped ) {
- master.getDB( 'd' ).createCollection( 'c', { capped:true, size:5*1024 } );
+if (0) { // SERVER-4939
+
+ function doTest(capped) {
+ rt = new ReplTest("repl15tests");
+ master = rt.start(true);
+ if (capped) {
+ master.getDB('d').createCollection('c', {capped: true, size: 5 * 1024});
+ }
+ mc = master.getDB('d')['c'];
+
+ big = new Array(1000).toString();
+ // Insert a document, then make it slightly smaller.
+ mc.insert({a: big});
+ mc.update({}, {$set: {a: 'b'}});
+
+ slave = rt.start(false);
+ sc = slave.getDB('d')['c'];
+
+ // Slave will copy the smaller doc.
+ assert.soon(function() {
+ return sc.count({a: 'b'}) > 0;
+ });
+
+ // Update the primary doc to its original size.
+ mc.update({}, {$set: {a: big}});
+
+ // Wait for secondary to clone the update.
+ assert.soon(function() {
+ return sc.count({a: big}) > 0;
+ });
+
+ rt.stop();
}
- mc = master.getDB( 'd' )[ 'c' ];
-
- big = new Array( 1000 ).toString();
- // Insert a document, then make it slightly smaller.
- mc.insert( {a:big} );
- mc.update( {}, {$set:{a:'b'}} );
-
- slave = rt.start( false );
- sc = slave.getDB( 'd' )[ 'c' ];
-
- // Slave will copy the smaller doc.
- assert.soon( function() { return sc.count( {a:'b'} ) > 0; } );
-
- // Update the primary doc to its original size.
- mc.update( {}, {$set:{a:big}} );
-
- // Wait for secondary to clone the update.
- assert.soon( function() { return sc.count( {a:big} ) > 0; } );
-
- rt.stop();
-
-}
-
-doTest( false );
-doTest( true );
+ doTest(false);
+ doTest(true);
}
diff --git a/jstests/repl/repl16.js b/jstests/repl/repl16.js
index e0b5540ee95..84d0073eff8 100644
--- a/jstests/repl/repl16.js
+++ b/jstests/repl/repl16.js
@@ -1,40 +1,40 @@
// Test deduping of new documents without an _id index
// SERVER-14132
-if ( 0 ) {
-
-function doTest( insert ) {
-
- rt = new ReplTest( "repl16tests" );
- master = rt.start( true );
- master.getDB( 'd' ).createCollection( 'c', { capped:true, size:5*1024, autoIndexId:false } );
- mc = master.getDB( 'd' )[ 'c' ];
-
- insert( {_id:1} );
- insert( {_id:2} );
-
- slave = rt.start( false );
- sc = slave.getDB( 'd' )[ 'c' ];
-
- // Wait for the slave to copy the documents.
- assert.soon( function() { return sc.count() == 2; } );
-
- insert( {_id:1} );
- insert( {_id:2} );
- insert( {_id:3} );
- assert.eq( 5, mc.count() );
-
- // Wait for the slave to apply the operations.
- assert.soon( function() { return sc.count() == 5; } );
-
- rt.stop();
-
-}
-
-function insertWithIds( obj ) {
- mc.insert( obj );
-}
-
-doTest( insertWithIds );
-
+if (0) {
+ function doTest(insert) {
+ rt = new ReplTest("repl16tests");
+ master = rt.start(true);
+ master.getDB('d').createCollection('c', {capped: true, size: 5 * 1024, autoIndexId: false});
+ mc = master.getDB('d')['c'];
+
+ insert({_id: 1});
+ insert({_id: 2});
+
+ slave = rt.start(false);
+ sc = slave.getDB('d')['c'];
+
+ // Wait for the slave to copy the documents.
+ assert.soon(function() {
+ return sc.count() == 2;
+ });
+
+ insert({_id: 1});
+ insert({_id: 2});
+ insert({_id: 3});
+ assert.eq(5, mc.count());
+
+ // Wait for the slave to apply the operations.
+ assert.soon(function() {
+ return sc.count() == 5;
+ });
+
+ rt.stop();
+ }
+
+ function insertWithIds(obj) {
+ mc.insert(obj);
+ }
+
+ doTest(insertWithIds);
}
diff --git a/jstests/repl/repl17.js b/jstests/repl/repl17.js
index 651bebdaa09..cd62e6c7ac6 100644
--- a/jstests/repl/repl17.js
+++ b/jstests/repl/repl17.js
@@ -1,37 +1,40 @@
// Test collection rename during initial sync.
// SERVER-4941
-if ( 0 ) { // SERVER-4941
+if (0) { // SERVER-4941
-rt = new ReplTest( "repl17tests" );
+ rt = new ReplTest("repl17tests");
-master = rt.start( true );
-md = master.getDB( 'd' );
+ master = rt.start(true);
+ md = master.getDB('d');
-for( i = 0; i < 1000; ++i ) {
- md[ ''+i ].save( {} );
-}
+ for (i = 0; i < 1000; ++i) {
+ md['' + i].save({});
+ }
-slave = rt.start( false );
-sd = slave.getDB( 'd' );
+ slave = rt.start(false);
+ sd = slave.getDB('d');
-function checkSlaveCount( collection, expectedCount ) {
- var count = sd[ collection ].count();
- var debug = false;
- if ( debug ) {
- print( collection + ': ' + count );
+ function checkSlaveCount(collection, expectedCount) {
+ var count = sd[collection].count();
+ var debug = false;
+ if (debug) {
+ print(collection + ': ' + count);
+ }
+ return count == expectedCount;
}
- return count == expectedCount;
-}
-
-// Wait for the slave to start cloning
-assert.soon( function() { return checkSlaveCount( '0', 1 ); } );
-assert.commandWorked( md[ '999' ].renameCollection( 'renamed' ) );
+ // Wait for the slave to start cloning
+ assert.soon(function() {
+ return checkSlaveCount('0', 1);
+ });
-// Check for renamed collection on slave.
-assert.soon( function() { return checkSlaveCount( '999', 0 ) && checkSlaveCount( 'renamed', 1 ); } );
+ assert.commandWorked(md['999'].renameCollection('renamed'));
-rt.stop();
+ // Check for renamed collection on slave.
+ assert.soon(function() {
+ return checkSlaveCount('999', 0) && checkSlaveCount('renamed', 1);
+ });
+ rt.stop();
}
diff --git a/jstests/repl/repl2.js b/jstests/repl/repl2.js
index 020caa8bc71..c23c2a994c4 100644
--- a/jstests/repl/repl2.js
+++ b/jstests/repl/repl2.js
@@ -13,57 +13,58 @@
// slave will not have any data and will start an initial sync, rejecting the resync command.
// @tags: [requires_persistence]
-soonCount = function( count ) {
- assert.soon( function() {
-// print( "check count" );
-// print( "count: " + s.getDB( baseName ).z.find().count() );
- return s.getDB("foo").a.find().count() == count;
- } );
+soonCount = function(count) {
+ assert.soon(function() {
+ // print( "check count" );
+ // print( "count: " + s.getDB( baseName ).z.find().count() );
+ return s.getDB("foo").a.find().count() == count;
+ });
};
doTest = function(signal, extraOpts) {
- print("signal: "+signal);
+ print("signal: " + signal);
- var rt = new ReplTest( "repl2tests" );
+ var rt = new ReplTest("repl2tests");
// implicit small oplog makes slave get out of sync
- m = rt.start( true, { oplogSize : "1" } );
+ m = rt.start(true, {oplogSize: "1"});
s = rt.start(false, extraOpts);
am = m.getDB("foo").a;
- am.save( { _id: new ObjectId() } );
- soonCount( 1 );
- assert.eq( 0, s.getDB( "admin" ).runCommand( { "resync" : 1 } ).ok );
- rt.stop( false , signal );
+ am.save({_id: new ObjectId()});
+ soonCount(1);
+ assert.eq(0, s.getDB("admin").runCommand({"resync": 1}).ok);
+ rt.stop(false, signal);
- big = new Array( 2000 ).toString();
- for( i = 0; i < 1000; ++i )
- am.save( { _id: new ObjectId(), i: i, b: big } );
+ big = new Array(2000).toString();
+ for (i = 0; i < 1000; ++i)
+ am.save({_id: new ObjectId(), i: i, b: big});
s = rt.start(false, extraOpts, true);
- print("earliest op in master: "+tojson(m.getDB("local").oplog.$main.find().sort({$natural:1}).limit(1).next()));
- print("latest op on slave: "+tojson(s.getDB("local").sources.findOne()));
+ print("earliest op in master: " +
+ tojson(m.getDB("local").oplog.$main.find().sort({$natural: 1}).limit(1).next()));
+ print("latest op on slave: " + tojson(s.getDB("local").sources.findOne()));
- assert.soon( function() {
- var result = s.getDB( "admin" ).runCommand( { "resync" : 1 } );
- print("resync says: "+tojson(result));
+ assert.soon(function() {
+ var result = s.getDB("admin").runCommand({"resync": 1});
+ print("resync says: " + tojson(result));
return result.ok == 1;
- } );
+ });
- soonCount( 1001 );
- assert.automsg( "m.getDB( 'local' ).getCollection( 'oplog.$main' ).stats().size > 0" );
+ soonCount(1001);
+ assert.automsg("m.getDB( 'local' ).getCollection( 'oplog.$main' ).stats().size > 0");
as = s.getDB("foo").a;
- assert.eq( 1, as.find( { i: 0 } ).count() );
- assert.eq( 1, as.find( { i: 999 } ).count() );
+ assert.eq(1, as.find({i: 0}).count());
+ assert.eq(1, as.find({i: 999}).count());
- assert.eq( 0, s.getDB( "admin" ).runCommand( { "resync" : 1 } ).ok );
+ assert.eq(0, s.getDB("admin").runCommand({"resync": 1}).ok);
rt.stop();
};
-doTest(15, {"vv": null}); // SIGTERM
-doTest(9, {"vv": null, journal: null }); // SIGKILL
+doTest(15, {"vv": null}); // SIGTERM
+doTest(9, {"vv": null, journal: null}); // SIGKILL
diff --git a/jstests/repl/repl3.js b/jstests/repl/repl3.js
index 78e76347115..fb31e9208f0 100644
--- a/jstests/repl/repl3.js
+++ b/jstests/repl/repl3.js
@@ -2,15 +2,16 @@
var baseName = "jstests_repl3test";
-soonCount = function( count ) {
- assert.soon( function() {
-// print( "check count" );
-// print( "count: " + s.getDB( baseName ).z.find().count() + ", expected: " + count );
- return s.getDB( baseName ).a.find().itcount() == count;
- } );
+soonCount = function(count) {
+ assert.soon(function() {
+ // print( "check count" );
+ // print( "count: " + s.getDB( baseName ).z.find().count() + ", expected: " +
+ // count );
+ return s.getDB(baseName).a.find().itcount() == count;
+ });
};
-doTest = function (signal) {
+doTest = function(signal) {
print("repl3.js doTest(" + signal + ")");
@@ -21,15 +22,15 @@ doTest = function (signal) {
am = m.getDB(baseName).a;
- am.save({ _id: new ObjectId() });
+ am.save({_id: new ObjectId()});
soonCount(1);
rt.stop(false, signal);
big = new Array(2000).toString();
for (i = 0; i < 1000; ++i)
- am.save({ _id: new ObjectId(), i: i, b: big });
+ am.save({_id: new ObjectId(), i: i, b: big});
- s = rt.start(false, { autoresync: null }, true);
+ s = rt.start(false, {autoresync: null}, true);
// after SyncException, mongod waits 10 secs.
sleep(15000);
@@ -37,15 +38,15 @@ doTest = function (signal) {
// Need the 2 additional seconds timeout, since commands don't work on an 'allDead' node.
soonCount(1001);
as = s.getDB(baseName).a;
- assert.eq(1, as.find({ i: 0 }).count());
- assert.eq(1, as.find({ i: 999 }).count());
+ assert.eq(1, as.find({i: 0}).count());
+ assert.eq(1, as.find({i: 999}).count());
- assert.commandFailed(s.getDB("admin").runCommand({ "resync": 1 }));
+ assert.commandFailed(s.getDB("admin").runCommand({"resync": 1}));
rt.stop();
};
-doTest( 15 ); // SIGTERM
-doTest( 9 ); // SIGKILL
+doTest(15); // SIGTERM
+doTest(9); // SIGKILL
print("repl3.js OK");
diff --git a/jstests/repl/repl4.js b/jstests/repl/repl4.js
index b540e8545fe..e28986e1573 100644
--- a/jstests/repl/repl4.js
+++ b/jstests/repl/repl4.js
@@ -1,38 +1,38 @@
// Test replication 'only' mode
-soonCount = function( db, coll, count ) {
- assert.soon( function() {
- return s.getDB( db )[ coll ].find().count() == count;
- } );
+soonCount = function(db, coll, count) {
+ assert.soon(function() {
+ return s.getDB(db)[coll].find().count() == count;
+ });
};
doTest = function() {
- rt = new ReplTest( "repl4tests" );
-
- m = rt.start( true );
- s = rt.start( false, { only: "c" } );
-
- cm = m.getDB( "c" ).c;
- bm = m.getDB( "b" ).b;
-
- cm.save( { x:1 } );
- bm.save( { x:2 } );
-
- soonCount( "c", "c", 1 );
- assert.eq( 1, s.getDB( "c" ).c.findOne().x );
- sleep( 10000 );
- printjson( s.getDBNames() );
- assert.eq( -1, s.getDBNames().indexOf( "b" ) );
- assert.eq( 0, s.getDB( "b" ).b.find().count() );
-
- rt.stop( false );
-
- cm.save( { x:3 } );
- bm.save( { x:4 } );
-
- s = rt.start( false, { only: "c" }, true );
- soonCount( "c", "c", 2 );
+ rt = new ReplTest("repl4tests");
+
+ m = rt.start(true);
+ s = rt.start(false, {only: "c"});
+
+ cm = m.getDB("c").c;
+ bm = m.getDB("b").b;
+
+ cm.save({x: 1});
+ bm.save({x: 2});
+
+ soonCount("c", "c", 1);
+ assert.eq(1, s.getDB("c").c.findOne().x);
+ sleep(10000);
+ printjson(s.getDBNames());
+ assert.eq(-1, s.getDBNames().indexOf("b"));
+ assert.eq(0, s.getDB("b").b.find().count());
+
+ rt.stop(false);
+
+ cm.save({x: 3});
+ bm.save({x: 4});
+
+ s = rt.start(false, {only: "c"}, true);
+ soonCount("c", "c", 2);
};
// Disabled because of SERVER-10344
diff --git a/jstests/repl/repl5.js b/jstests/repl/repl5.js
index 91834637947..514d1560376 100644
--- a/jstests/repl/repl5.js
+++ b/jstests/repl/repl5.js
@@ -1,34 +1,34 @@
// Test auto reclone after failed initial clone
-soonCountAtLeast = function( db, coll, count ) {
- assert.soon( function() {
-// print( "count: " + s.getDB( db )[ coll ].find().count() );
- return s.getDB( db )[ coll ].find().itcount() >= count;
- } );
+soonCountAtLeast = function(db, coll, count) {
+ assert.soon(function() {
+ // print( "count: " + s.getDB( db )[ coll ].find().count() );
+ return s.getDB(db)[coll].find().itcount() >= count;
+ });
};
doTest = function(signal, extraOpts) {
- rt = new ReplTest( "repl5tests" );
-
- m = rt.start( true );
-
- ma = m.getDB( "a" ).a;
+ rt = new ReplTest("repl5tests");
+
+ m = rt.start(true);
+
+ ma = m.getDB("a").a;
var bulk = ma.initializeUnorderedBulkOp();
- for( i = 0; i < 10000; ++i )
- bulk.insert({ i: i });
+ for (i = 0; i < 10000; ++i)
+ bulk.insert({i: i});
assert.writeOK(bulk.execute());
-
+
s = rt.start(false, extraOpts);
- soonCountAtLeast( "a", "a", 1 );
- rt.stop( false, signal );
+ soonCountAtLeast("a", "a", 1);
+ rt.stop(false, signal);
s = rt.start(false, extraOpts, true);
- sleep( 1000 );
- soonCountAtLeast( "a", "a", 10000 );
+ sleep(1000);
+ soonCountAtLeast("a", "a", 10000);
rt.stop();
};
-doTest( 15 ); // SIGTERM
-doTest(9, { journal: null }); // SIGKILL
+doTest(15); // SIGTERM
+doTest(9, {journal: null}); // SIGKILL
diff --git a/jstests/repl/repl6.js b/jstests/repl/repl6.js
index a26a7be94e5..c9ccdbdd559 100644
--- a/jstests/repl/repl6.js
+++ b/jstests/repl/repl6.js
@@ -8,75 +8,75 @@
var baseName = "jstests_repl6test";
-soonCount = function( m, count ) {
- assert.soon( function() {
- return m.getDB( baseName ).a.find().count() == count;
- }, "expected count: " + count + " from : " + m );
+soonCount = function(m, count) {
+ assert.soon(function() {
+ return m.getDB(baseName).a.find().count() == count;
+ }, "expected count: " + count + " from : " + m);
};
-doTest = function( signal ) {
-
- ports = allocatePorts( 3 );
-
- ms1 = new ReplTest( "repl6tests-1", [ ports[ 0 ], ports[ 1 ] ] );
- ms2 = new ReplTest( "repl6tests-2", [ ports[ 0 ], ports[ 2 ] ] );
-
- m = ms1.start( true );
- s1 = ms1.start( false );
- s2 = ms2.start( false );
-
- am = m.getDB( baseName ).a;
-
- for( i = 0; i < 1000; ++i )
- am.save( { _id: new ObjectId(), i: i } );
-
- soonCount( s1, 1000 );
- soonCount( s2, 1000 );
-
- check = function( as ) {
- assert.eq( 1, as.find( { i: 0 } ).count() );
- assert.eq( 1, as.find( { i: 999 } ).count() );
+doTest = function(signal) {
+
+ ports = allocatePorts(3);
+
+ ms1 = new ReplTest("repl6tests-1", [ports[0], ports[1]]);
+ ms2 = new ReplTest("repl6tests-2", [ports[0], ports[2]]);
+
+ m = ms1.start(true);
+ s1 = ms1.start(false);
+ s2 = ms2.start(false);
+
+ am = m.getDB(baseName).a;
+
+ for (i = 0; i < 1000; ++i)
+ am.save({_id: new ObjectId(), i: i});
+
+ soonCount(s1, 1000);
+ soonCount(s2, 1000);
+
+ check = function(as) {
+ assert.eq(1, as.find({i: 0}).count());
+ assert.eq(1, as.find({i: 999}).count());
};
-
- as = s1.getDB( baseName ).a;
- check( as );
- as = s2.getDB( baseName ).a;
- check( as );
- ms1.stop( false, signal );
- ms2.stop( false, signal );
-
- for( i = 1000; i < 1010; ++i )
- am.save( { _id: new ObjectId(), i: i } );
+ as = s1.getDB(baseName).a;
+ check(as);
+ as = s2.getDB(baseName).a;
+ check(as);
+
+ ms1.stop(false, signal);
+ ms2.stop(false, signal);
+
+ for (i = 1000; i < 1010; ++i)
+ am.save({_id: new ObjectId(), i: i});
- s1 = ms1.start( false, null, true );
- soonCount( s1, 1010 );
- as = s1.getDB( baseName ).a;
- assert.eq( 1, as.find( { i: 1009 } ).count() );
+ s1 = ms1.start(false, null, true);
+ soonCount(s1, 1010);
+ as = s1.getDB(baseName).a;
+ assert.eq(1, as.find({i: 1009}).count());
- ms1.stop( true, signal );
+ ms1.stop(true, signal);
// Need to pause here on Windows, since killing processes does not synchronously close their
// open file handles.
sleep(5000);
- m = ms1.start( true, null, true );
- am = m.getDB( baseName ).a;
-
- for( i = 1010; i < 1020; ++i )
- am.save( { _id: new ObjectId(), i: i } );
-
- soonCount( s1, 1020 );
- assert.eq( 1, as.find( { i: 1019 } ).count() );
-
- s2 = ms2.start( false, null, true );
- soonCount( s2, 1020 );
- as = s2.getDB( baseName ).a;
- assert.eq( 1, as.find( { i: 1009 } ).count() );
- assert.eq( 1, as.find( { i: 1019 } ).count() );
+ m = ms1.start(true, null, true);
+ am = m.getDB(baseName).a;
+
+ for (i = 1010; i < 1020; ++i)
+ am.save({_id: new ObjectId(), i: i});
+
+ soonCount(s1, 1020);
+ assert.eq(1, as.find({i: 1019}).count());
+
+ s2 = ms2.start(false, null, true);
+ soonCount(s2, 1020);
+ as = s2.getDB(baseName).a;
+ assert.eq(1, as.find({i: 1009}).count());
+ assert.eq(1, as.find({i: 1019}).count());
ms1.stop();
- ms2.stop( false );
+ ms2.stop(false);
};
-doTest( 15 ); // SIGTERM
+doTest(15); // SIGTERM
diff --git a/jstests/repl/repl7.js b/jstests/repl/repl7.js
index 2907acc087c..790aef03420 100644
--- a/jstests/repl/repl7.js
+++ b/jstests/repl/repl7.js
@@ -11,44 +11,45 @@ var getDBNamesNoThrow = function(conn) {
doTest = function(signal, extraOpts) {
- rt = new ReplTest( "repl7tests" );
-
- m = rt.start( true );
+ rt = new ReplTest("repl7tests");
- for( n = "a"; n != "aaaaa"; n += "a" ) {
- m.getDB( n ).a.save( {x:1} );
+ m = rt.start(true);
+
+ for (n = "a"; n != "aaaaa"; n += "a") {
+ m.getDB(n).a.save({x: 1});
}
s = rt.start(false, extraOpts);
-
- assert.soon( function() {
- return -1 != getDBNamesNoThrow(s).indexOf( "aa" );
- }, "aa timeout", 60000, 1000 );
-
- rt.stop( false, signal );
+
+ assert.soon(function() {
+ return -1 != getDBNamesNoThrow(s).indexOf("aa");
+ }, "aa timeout", 60000, 1000);
+
+ rt.stop(false, signal);
s = rt.start(false, extraOpts, signal);
-
- assert.soon( function() {
- for( n = "a"; n != "aaaaa"; n += "a" ) {
- if ( -1 == getDBNamesNoThrow(s).indexOf( n ) )
- return false;
- }
- return true;
- }, "a-aaaa timeout", 60000, 1000 );
-
- assert.soon( function() {
- for( n = "a"; n != "aaaaa"; n += "a" ) {
- if ( 1 != m.getDB( n ).a.find().count() ) {
- return false;
- }
- }
- return true; }, "a-aaaa count timeout" );
-
- sleep( 300 );
-
+
+ assert.soon(function() {
+ for (n = "a"; n != "aaaaa"; n += "a") {
+ if (-1 == getDBNamesNoThrow(s).indexOf(n))
+ return false;
+ }
+ return true;
+ }, "a-aaaa timeout", 60000, 1000);
+
+ assert.soon(function() {
+ for (n = "a"; n != "aaaaa"; n += "a") {
+ if (1 != m.getDB(n).a.find().count()) {
+ return false;
+ }
+ }
+ return true;
+ }, "a-aaaa count timeout");
+
+ sleep(300);
+
rt.stop();
};
-doTest( 15 ); // SIGTERM
-doTest(9, { journal: null }); // SIGKILL
+doTest(15); // SIGTERM
+doTest(9, {journal: null}); // SIGKILL
diff --git a/jstests/repl/repl8.js b/jstests/repl/repl8.js
index 64e65cc0e16..e9ddba94729 100644
--- a/jstests/repl/repl8.js
+++ b/jstests/repl/repl8.js
@@ -2,29 +2,42 @@
baseName = "jstests_repl_repl8";
-rt = new ReplTest( "repl8tests" );
-
-m = rt.start( true );
-
-m.getDB( baseName ).createCollection( "first", {capped:true,size:1000} );
-assert( m.getDB( baseName ).getCollection( "first" ).isCapped() );
-
-s = rt.start( false );
-
-assert.soon( function() { return s.getDB( baseName ).getCollection( "first" ).isCapped(); } );
-
-m.getDB( baseName ).createCollection( "second", {capped:true,size:1000} );
-assert.soon( function() { return s.getDB( baseName ).getCollection( "second" ).isCapped(); } );
-
-m.getDB( baseName ).getCollection( "third" ).save( { a: 1 } );
-assert.soon( function() { return s.getDB( baseName ).getCollection( "third" ).exists(); } );
-assert.commandWorked( m.getDB( "admin" ).runCommand( {renameCollection:"jstests_repl_repl8.third", to:"jstests_repl_repl8.third_rename"} ) );
-assert( m.getDB( baseName ).getCollection( "third_rename" ).exists() );
-assert( !m.getDB( baseName ).getCollection( "third" ).exists() );
-assert.soon( function() { return s.getDB( baseName ).getCollection( "third_rename" ).exists(); } );
-assert.soon( function() { return !s.getDB( baseName ).getCollection( "third" ).exists(); } );
-
-m.getDB( baseName ).getCollection( "fourth" ).save( {a:1} );
-assert.commandWorked( m.getDB( baseName ).getCollection( "fourth" ).convertToCapped( 1000 ) );
-assert( m.getDB( baseName ).getCollection( "fourth" ).isCapped() );
-assert.soon( function() { return s.getDB( baseName ).getCollection( "fourth" ).isCapped(); } );
+rt = new ReplTest("repl8tests");
+
+m = rt.start(true);
+
+m.getDB(baseName).createCollection("first", {capped: true, size: 1000});
+assert(m.getDB(baseName).getCollection("first").isCapped());
+
+s = rt.start(false);
+
+assert.soon(function() {
+ return s.getDB(baseName).getCollection("first").isCapped();
+});
+
+m.getDB(baseName).createCollection("second", {capped: true, size: 1000});
+assert.soon(function() {
+ return s.getDB(baseName).getCollection("second").isCapped();
+});
+
+m.getDB(baseName).getCollection("third").save({a: 1});
+assert.soon(function() {
+ return s.getDB(baseName).getCollection("third").exists();
+});
+assert.commandWorked(m.getDB("admin").runCommand(
+ {renameCollection: "jstests_repl_repl8.third", to: "jstests_repl_repl8.third_rename"}));
+assert(m.getDB(baseName).getCollection("third_rename").exists());
+assert(!m.getDB(baseName).getCollection("third").exists());
+assert.soon(function() {
+ return s.getDB(baseName).getCollection("third_rename").exists();
+});
+assert.soon(function() {
+ return !s.getDB(baseName).getCollection("third").exists();
+});
+
+m.getDB(baseName).getCollection("fourth").save({a: 1});
+assert.commandWorked(m.getDB(baseName).getCollection("fourth").convertToCapped(1000));
+assert(m.getDB(baseName).getCollection("fourth").isCapped());
+assert.soon(function() {
+ return s.getDB(baseName).getCollection("fourth").isCapped();
+});
diff --git a/jstests/repl/repl9.js b/jstests/repl/repl9.js
index 61f52d377e9..4e50a657809 100644
--- a/jstests/repl/repl9.js
+++ b/jstests/repl/repl9.js
@@ -2,47 +2,54 @@
baseName = "jstests_repl_repl9";
-rt = new ReplTest( "repl9tests" );
+rt = new ReplTest("repl9tests");
-m = rt.start( true );
-s = rt.start( false );
+m = rt.start(true);
+s = rt.start(false);
-admin = m.getDB( "admin" );
+admin = m.getDB("admin");
-debug = function( foo ) {}; // print( foo ); }
+debug = function(foo) {}; // print( foo ); }
// rename within db
-m.getDB( baseName ).one.save( { a: 1 } );
-assert.soon( function() { v = s.getDB( baseName ).one.findOne(); return v && 1 == v.a; } );
-
-assert.commandWorked( admin.runCommand( {renameCollection:"jstests_repl_repl9.one", to:"jstests_repl_repl9.two"} ) );
-assert.soon( function() {
- if ( -1 == s.getDB( baseName ).getCollectionNames().indexOf( "two" ) ) {
- debug( "no two coll" );
- debug( tojson( s.getDB( baseName ).getCollectionNames() ) );
- return false;
- }
- if ( !s.getDB( baseName ).two.findOne() ) {
- debug( "no two object" );
- return false;
- }
- return 1 == s.getDB( baseName ).two.findOne().a; });
-assert.eq( -1, s.getDB( baseName ).getCollectionNames().indexOf( "one" ) );
+m.getDB(baseName).one.save({a: 1});
+assert.soon(function() {
+ v = s.getDB(baseName).one.findOne();
+ return v && 1 == v.a;
+});
+
+assert.commandWorked(
+ admin.runCommand({renameCollection: "jstests_repl_repl9.one", to: "jstests_repl_repl9.two"}));
+assert.soon(function() {
+ if (-1 == s.getDB(baseName).getCollectionNames().indexOf("two")) {
+ debug("no two coll");
+ debug(tojson(s.getDB(baseName).getCollectionNames()));
+ return false;
+ }
+ if (!s.getDB(baseName).two.findOne()) {
+ debug("no two object");
+ return false;
+ }
+ return 1 == s.getDB(baseName).two.findOne().a;
+});
+assert.eq(-1, s.getDB(baseName).getCollectionNames().indexOf("one"));
// rename to new db
first = baseName + "_first";
second = baseName + "_second";
-m.getDB( first ).one.save( { a: 1 } );
-assert.soon( function() { return s.getDB( first ).one.findOne() && 1 == s.getDB( first ).one.findOne().a; } );
-
-assert.commandWorked( admin.runCommand( {renameCollection:"jstests_repl_repl9_first.one", to:"jstests_repl_repl9_second.two"} ) );
-assert.soon( function() {
- return -1 != s.getDBNames().indexOf( second ) &&
- -1 != s.getDB( second ).getCollectionNames().indexOf( "two" ) &&
- s.getDB( second ).two.findOne() &&
- 1 == s.getDB( second ).two.findOne().a; } );
-assert.eq( -1, s.getDB( first ).getCollectionNames().indexOf( "one" ) );
-
+m.getDB(first).one.save({a: 1});
+assert.soon(function() {
+ return s.getDB(first).one.findOne() && 1 == s.getDB(first).one.findOne().a;
+});
+
+assert.commandWorked(admin.runCommand(
+ {renameCollection: "jstests_repl_repl9_first.one", to: "jstests_repl_repl9_second.two"}));
+assert.soon(function() {
+ return -1 != s.getDBNames().indexOf(second) &&
+ -1 != s.getDB(second).getCollectionNames().indexOf("two") &&
+ s.getDB(second).two.findOne() && 1 == s.getDB(second).two.findOne().a;
+});
+assert.eq(-1, s.getDB(first).getCollectionNames().indexOf("one"));
diff --git a/jstests/repl/repl_sync_only_db_with_special_chars.js b/jstests/repl/repl_sync_only_db_with_special_chars.js
index 68089e9db9a..1daaf1abe85 100644
--- a/jstests/repl/repl_sync_only_db_with_special_chars.js
+++ b/jstests/repl/repl_sync_only_db_with_special_chars.js
@@ -1,21 +1,21 @@
doTest = function() {
-var rt = new ReplTest( "repl_sync_only_db_with_special_chars" );
-var normalDB = "abc";
-var specialDB = "[a-z]+";
-var master = rt.start( true );
-var slave = rt.start( false, { only: specialDB } );
+ var rt = new ReplTest("repl_sync_only_db_with_special_chars");
+ var normalDB = "abc";
+ var specialDB = "[a-z]+";
+ var master = rt.start(true);
+ var slave = rt.start(false, {only: specialDB});
-master.getDB( normalDB ).data.save( { a: 1 } );
-master.getDB( specialDB ).data.save( { z: 1 } );
+ master.getDB(normalDB).data.save({a: 1});
+ master.getDB(specialDB).data.save({z: 1});
-assert.soon( function() {
- var normalDocs = slave.getDB( normalDB ).data.find().count();
- var specialDocs = slave.getDB( specialDB ).data.find().count();
+ assert.soon(function() {
+ var normalDocs = slave.getDB(normalDB).data.find().count();
+ var specialDocs = slave.getDB(specialDB).data.find().count();
- return normalDocs == 0 && specialDocs == 1;
-}, "Failed to only sync to " + specialDB );
+ return normalDocs == 0 && specialDocs == 1;
+ }, "Failed to only sync to " + specialDB);
};
diff --git a/jstests/repl/snapshot1.js b/jstests/repl/snapshot1.js
index 0d3313dad97..8db8e232e3b 100644
--- a/jstests/repl/snapshot1.js
+++ b/jstests/repl/snapshot1.js
@@ -5,35 +5,41 @@
// work. It also requires the fsync command to enduce replication lag.
// @tags: [requires_persistence, requires_fsync]
-ports = allocatePorts( 3 );
+ports = allocatePorts(3);
var baseName = "repl_snapshot1";
-rt1 = new ReplTest( "repl_snapshot1-1", [ ports[ 0 ], ports[ 1 ] ] );
-rt2 = new ReplTest( "repl_snapshot1-2", [ ports[ 0 ], ports[ 2 ] ] );
-m = rt1.start( true );
-
-big = new Array( 2000 ).toString();
-for( i = 0; i < 1000; ++i )
- m.getDB( baseName )[ baseName ].save( { _id: new ObjectId(), i: i, b: big } );
-
-m.getDB( "admin" ).runCommand( {fsync:1,lock:1} );
-copyDbpath( rt1.getPath( true ), rt1.getPath( false ) );
-m.getDB( "admin" ).fsyncUnlock();
-
-s1 = rt1.start( false, null, true );
-assert.eq( 1000, s1.getDB( baseName )[ baseName ].count() );
-m.getDB( baseName )[ baseName ].save( {i:1000} );
-assert.soon( function() { return 1001 == s1.getDB( baseName )[ baseName ].count(); } );
-
-s1.getDB( "admin" ).runCommand( {fsync:1,lock:1} );
-copyDbpath( rt1.getPath( false ), rt2.getPath( false ) );
-s1.getDB( "admin" ).fsyncUnlock();
-
-s2 = rt2.start( false, null, true );
-assert.eq( 1001, s2.getDB( baseName )[ baseName ].count() );
-m.getDB( baseName )[ baseName ].save( {i:1001} );
-assert.soon( function() { return 1002 == s2.getDB( baseName )[ baseName ].count(); } );
-assert.soon( function() { return 1002 == s1.getDB( baseName )[ baseName ].count(); } );
-
-assert( !rawMongoProgramOutput().match( /resync/ ) );
+rt1 = new ReplTest("repl_snapshot1-1", [ports[0], ports[1]]);
+rt2 = new ReplTest("repl_snapshot1-2", [ports[0], ports[2]]);
+m = rt1.start(true);
+
+big = new Array(2000).toString();
+for (i = 0; i < 1000; ++i)
+ m.getDB(baseName)[baseName].save({_id: new ObjectId(), i: i, b: big});
+
+m.getDB("admin").runCommand({fsync: 1, lock: 1});
+copyDbpath(rt1.getPath(true), rt1.getPath(false));
+m.getDB("admin").fsyncUnlock();
+
+s1 = rt1.start(false, null, true);
+assert.eq(1000, s1.getDB(baseName)[baseName].count());
+m.getDB(baseName)[baseName].save({i: 1000});
+assert.soon(function() {
+ return 1001 == s1.getDB(baseName)[baseName].count();
+});
+
+s1.getDB("admin").runCommand({fsync: 1, lock: 1});
+copyDbpath(rt1.getPath(false), rt2.getPath(false));
+s1.getDB("admin").fsyncUnlock();
+
+s2 = rt2.start(false, null, true);
+assert.eq(1001, s2.getDB(baseName)[baseName].count());
+m.getDB(baseName)[baseName].save({i: 1001});
+assert.soon(function() {
+ return 1002 == s2.getDB(baseName)[baseName].count();
+});
+assert.soon(function() {
+ return 1002 == s1.getDB(baseName)[baseName].count();
+});
+
+assert(!rawMongoProgramOutput().match(/resync/));
diff --git a/jstests/replsets/apply_batch_only_goes_forward.js b/jstests/replsets/apply_batch_only_goes_forward.js
index e380c981566..4744fcf4342 100644
--- a/jstests/replsets/apply_batch_only_goes_forward.js
+++ b/jstests/replsets/apply_batch_only_goes_forward.js
@@ -35,7 +35,10 @@
var sLocal = slave.getDB("local");
var sMinvalid = sLocal["replset.minvalid"];
var stepDownSecs = 30;
- var stepDownCmd = {replSetStepDown: stepDownSecs, force: true};
+ var stepDownCmd = {
+ replSetStepDown: stepDownSecs,
+ force: true
+ };
// Write op
assert.writeOK(mTest.foo.save({}, {writeConcern: {w: 3}}));
@@ -44,21 +47,20 @@
// Set minvalid to something far in the future for the current primary, to simulate recovery.
// Note: This is so far in the future (5 days) that it will never become secondary.
- var farFutureTS = new Timestamp(Math.floor(new Date().getTime()/1000) +
- (60 * 60 * 24 * 5 /* in five days*/), 0);
+ var farFutureTS = new Timestamp(
+ Math.floor(new Date().getTime() / 1000) + (60 * 60 * 24 * 5 /* in five days*/), 0);
var rsgs = assert.commandWorked(mLocal.adminCommand("replSetGetStatus"));
- var primaryOpTime = rsgs.members.filter( function (member) {
- return member.self;}
- )[0].optime;
+ var primaryOpTime = rsgs.members.filter(function(member) {
+ return member.self;
+ })[0].optime;
jsTest.log("future TS: " + tojson(farFutureTS) + ", date:" + tsToDate(farFutureTS));
// We do an update in case there is a minvalid document on the primary already.
// If the doc doesn't exist then upsert:true will create it, and the writeConcern ensures
// that update returns details of the write, like whether an update or insert was performed.
- printjson(assert.writeOK(mMinvalid.update({},
- { ts: farFutureTS,
- t: NumberLong(-1),
- begin: primaryOpTime},
- { upsert: true, writeConcern: {w: 1}})));
+ printjson(
+ assert.writeOK(mMinvalid.update({},
+ {ts: farFutureTS, t: NumberLong(-1), begin: primaryOpTime},
+ {upsert: true, writeConcern: {w: 1}})));
jsTest.log("restart primary");
replTest.restart(master);
@@ -70,9 +72,13 @@
assert.soon(function() {
var mv;
- try {mv = mMinvalid.findOne();} catch (e) { return false; }
- var msg = "ts !=, " + farFutureTS +
- "(" + tsToDate(farFutureTS) + "), mv:" + tojson(mv) + " - " + tsToDate(mv.ts);
+ try {
+ mv = mMinvalid.findOne();
+ } catch (e) {
+ return false;
+ }
+ var msg = "ts !=, " + farFutureTS + "(" + tsToDate(farFutureTS) + "), mv:" + tojson(mv) +
+ " - " + tsToDate(mv.ts);
assert.eq(farFutureTS, mv.ts, msg);
return true;
});
diff --git a/jstests/replsets/apply_ops_lastop.js b/jstests/replsets/apply_ops_lastop.js
index 7a0faadd72b..1e7df9a9035 100644
--- a/jstests/replsets/apply_ops_lastop.js
+++ b/jstests/replsets/apply_ops_lastop.js
@@ -3,66 +3,58 @@
// lastOp is used as the optime to wait for when write concern waits for replication.
//
-(function () {
-"use strict";
-
-var rs = new ReplSetTest({name: "applyOpsOptimeTest", nodes: 3});
-rs.startSet();
-var nodes = rs.nodeList();
-rs.initiate({"_id": "applyOpsOptimeTest",
- "members": [
- {"_id" : 0, "host" : nodes[0]},
- {"_id" : 1, "host" : nodes[1]},
- {"_id" : 2, "host" : nodes[2], "arbiterOnly" : true} ]});
-var primary = rs.getPrimary();
-var db = primary.getDB('foo');
-var coll = primary.getCollection('foo.bar');
-// Two connections
-var m1 = new Mongo(primary.host);
-var m2 = new Mongo(primary.host);
-
-var insertApplyOps = [
- {
- op: "i",
- ns: 'foo.bar',
- o: { _id: 1, a: "b" }
- }
- ];
-var deleteApplyOps = [
- {
- op: "d",
- ns: 'foo.bar',
- o: { _id: 1, a: "b" }
- }
- ];
-var badPreCondition = [
- {
- ns: 'foo.bar',
- q: { _id: 10, a: "aaa" },
- res: { a: "aaa" }
- }
- ];
-var majorityWriteConcern = { w: 'majority', wtimeout: 30000 };
-
-// Set up some data
-assert.writeOK(coll.insert({x: 1})); // creating the collection so applyOps works
-assert.commandWorked(m1.getDB('foo').runCommand({ applyOps: insertApplyOps,
- writeConcern: majorityWriteConcern }));
-var insertOp = m1.getDB('foo').getLastErrorObj('majority', 30000).lastOp;
-
-// No-op applyOps
-var res = m2.getDB('foo').runCommand({ applyOps: deleteApplyOps,
- preCondition: badPreCondition,
- writeConcern: majorityWriteConcern });
-assert.commandFailed(res, "The applyOps command was expected to fail, but instead succeeded.");
-assert.eq(res.errmsg, "pre-condition failed", "The applyOps command failed for the wrong reason.");
-var noOp = m2.getDB('foo').getLastErrorObj('majority', 30000).lastOp;
-
-// Check that each connection has the same last optime
-assert.eq(noOp, insertOp, "The connections' last optimes do " +
- "not match: applyOps failed to update lastop on no-op");
-
-rs.stopSet();
+(function() {
+ "use strict";
+
+ var rs = new ReplSetTest({name: "applyOpsOptimeTest", nodes: 3});
+ rs.startSet();
+ var nodes = rs.nodeList();
+ rs.initiate({
+ "_id": "applyOpsOptimeTest",
+ "members": [
+ {"_id": 0, "host": nodes[0]},
+ {"_id": 1, "host": nodes[1]},
+ {"_id": 2, "host": nodes[2], "arbiterOnly": true}
+ ]
+ });
+ var primary = rs.getPrimary();
+ var db = primary.getDB('foo');
+ var coll = primary.getCollection('foo.bar');
+ // Two connections
+ var m1 = new Mongo(primary.host);
+ var m2 = new Mongo(primary.host);
+
+ var insertApplyOps = [{op: "i", ns: 'foo.bar', o: {_id: 1, a: "b"}}];
+ var deleteApplyOps = [{op: "d", ns: 'foo.bar', o: {_id: 1, a: "b"}}];
+ var badPreCondition = [{ns: 'foo.bar', q: {_id: 10, a: "aaa"}, res: {a: "aaa"}}];
+ var majorityWriteConcern = {
+ w: 'majority',
+ wtimeout: 30000
+ };
+
+ // Set up some data
+ assert.writeOK(coll.insert({x: 1})); // creating the collection so applyOps works
+ assert.commandWorked(m1.getDB('foo').runCommand(
+ {applyOps: insertApplyOps, writeConcern: majorityWriteConcern}));
+ var insertOp = m1.getDB('foo').getLastErrorObj('majority', 30000).lastOp;
+
+ // No-op applyOps
+ var res = m2.getDB('foo').runCommand({
+ applyOps: deleteApplyOps,
+ preCondition: badPreCondition,
+ writeConcern: majorityWriteConcern
+ });
+ assert.commandFailed(res, "The applyOps command was expected to fail, but instead succeeded.");
+ assert.eq(
+ res.errmsg, "pre-condition failed", "The applyOps command failed for the wrong reason.");
+ var noOp = m2.getDB('foo').getLastErrorObj('majority', 30000).lastOp;
+
+ // Check that each connection has the same last optime
+ assert.eq(noOp,
+ insertOp,
+ "The connections' last optimes do " +
+ "not match: applyOps failed to update lastop on no-op");
+
+ rs.stopSet();
})();
-
diff --git a/jstests/replsets/apply_ops_wc.js b/jstests/replsets/apply_ops_wc.js
index cb50b9b9070..0b8a49e19bd 100644
--- a/jstests/replsets/apply_ops_wc.js
+++ b/jstests/replsets/apply_ops_wc.js
@@ -12,7 +12,7 @@
(function() {
"use strict";
var nodeCount = 3;
- var replTest = new ReplSetTest({ name: 'applyOpsWCSet', nodes: nodeCount});
+ var replTest = new ReplSetTest({name: 'applyOpsWCSet', nodes: nodeCount});
replTest.startSet();
var cfg = replTest.getReplSetConfig();
cfg.settings = {};
@@ -34,32 +34,13 @@
dropTestCollection();
// Set up the applyOps command.
- var applyOpsReq = { applyOps: [
- {
- op: "i",
- ns: coll.getFullName(),
- o: {
- _id: 2,
- x: "b"
- }
- },
- {
- op: "i",
- ns: coll.getFullName(),
- o: {
- _id: 3,
- x: "c"
- }
- },
- {
- op: "i",
- ns: coll.getFullName(),
- o: {
- _id: 4,
- x: "d"
- }
- },
- ]};
+ var applyOpsReq = {
+ applyOps: [
+ {op: "i", ns: coll.getFullName(), o: {_id: 2, x: "b"}},
+ {op: "i", ns: coll.getFullName(), o: {_id: 3, x: "c"}},
+ {op: "i", ns: coll.getFullName(), o: {_id: 4, x: "d"}},
+ ]
+ };
function assertApplyOpsCommandWorked(res) {
assert.eq(3, res.applied);
@@ -73,10 +54,7 @@
assert(res.writeConcernError.errmsg);
}
- var invalidWriteConcerns = [
- { w: 'invalid' },
- { w: nodeCount + 1 }
- ];
+ var invalidWriteConcerns = [{w: 'invalid'}, {w: nodeCount + 1}];
function testInvalidWriteConcern(wc) {
jsTest.log("Testing invalid write concern " + tojson(wc));
@@ -85,28 +63,24 @@
var res = coll.runCommand(applyOpsReq);
assertApplyOpsCommandWorked(res);
assertWriteConcernError(res);
-
}
// Verify that invalid write concerns yield an error.
- coll.insert({ _id: 1, x: "a" });
+ coll.insert({_id: 1, x: "a"});
invalidWriteConcerns.forEach(testInvalidWriteConcern);
var secondaries = replTest.getSecondaries();
- var majorityWriteConcerns = [
- { w: 2, wtimeout: 30000 },
- { w: 'majority', wtimeout: 30000 },
- ];
+ var majorityWriteConcerns = [{w: 2, wtimeout: 30000}, {w: 'majority', wtimeout: 30000}, ];
function testMajorityWriteConcerns(wc) {
jsTest.log("Testing " + tojson(wc));
// Reset secondaries to ensure they can replicate.
- secondaries[0].getDB('admin').runCommand({ configureFailPoint: 'rsSyncApplyStop',
- mode: 'off' });
- secondaries[1].getDB('admin').runCommand({ configureFailPoint: 'rsSyncApplyStop',
- mode: 'off' });
+ secondaries[0].getDB('admin').runCommand(
+ {configureFailPoint: 'rsSyncApplyStop', mode: 'off'});
+ secondaries[1].getDB('admin').runCommand(
+ {configureFailPoint: 'rsSyncApplyStop', mode: 'off'});
// Set the writeConcern of the applyOps command.
applyOpsReq.writeConcern = wc;
@@ -114,36 +88,37 @@
dropTestCollection();
// applyOps with a full replica set should succeed.
- coll.insert({ _id: 1, x: "a" });
+ coll.insert({_id: 1, x: "a"});
var res = db.runCommand(applyOpsReq);
assertApplyOpsCommandWorked(res);
- assert(!res.writeConcernError, 'applyOps on a full replicaset had writeConcern error ' +
- tojson(res.writeConcernError));
+ assert(!res.writeConcernError,
+ 'applyOps on a full replicaset had writeConcern error ' +
+ tojson(res.writeConcernError));
dropTestCollection();
// Stop replication at one secondary.
- secondaries[0].getDB('admin').runCommand({ configureFailPoint: 'rsSyncApplyStop',
- mode: 'alwaysOn' });
+ secondaries[0].getDB('admin').runCommand(
+ {configureFailPoint: 'rsSyncApplyStop', mode: 'alwaysOn'});
// applyOps should succeed with only 1 node not replicating.
- coll.insert({ _id: 1, x: "a" });
+ coll.insert({_id: 1, x: "a"});
res = db.runCommand(applyOpsReq);
assertApplyOpsCommandWorked(res);
assert(!res.writeConcernError,
- 'applyOps on a replicaset with 2 working nodes had writeConcern error ' +
- tojson(res.writeConcernError));
+ 'applyOps on a replicaset with 2 working nodes had writeConcern error ' +
+ tojson(res.writeConcernError));
dropTestCollection();
// Stop replication at a second secondary.
- secondaries[1].getDB('admin').runCommand({ configureFailPoint: 'rsSyncApplyStop',
- mode: 'alwaysOn' });
+ secondaries[1].getDB('admin').runCommand(
+ {configureFailPoint: 'rsSyncApplyStop', mode: 'alwaysOn'});
// applyOps should fail after two nodes have stopped replicating.
- coll.insert({ _id: 1, x: "a" });
+ coll.insert({_id: 1, x: "a"});
applyOpsReq.writeConcern.wtimeout = 5000;
res = db.runCommand(applyOpsReq);
diff --git a/jstests/replsets/auth1.js b/jstests/replsets/auth1.js
index b665eec2d90..d41ef9ba5ef 100644
--- a/jstests/replsets/auth1.js
+++ b/jstests/replsets/auth1.js
@@ -10,30 +10,31 @@ var port = allocatePorts(5);
var path = "jstests/libs/";
// These keyFiles have their permissions set to 600 later in the test.
-var key1_600 = path+"key1";
-var key2_600 = path+"key2";
+var key1_600 = path + "key1";
+var key2_600 = path + "key2";
// This keyFile has its permissions set to 644 later in the test.
-var key1_644 = path+"key1_644";
+var key1_644 = path + "key1_644";
print("try starting mongod with auth");
-var m = MongoRunner.runMongod({auth : "", port : port[4], dbpath : MongoRunner.dataDir + "/wrong-auth"});
+var m =
+ MongoRunner.runMongod({auth: "", port: port[4], dbpath: MongoRunner.dataDir + "/wrong-auth"});
assert.eq(m.getDB("local").auth("__system", ""), 0);
MongoRunner.stopMongod(m);
-
print("reset permissions");
run("chmod", "644", key1_644);
-
print("try starting mongod");
-m = runMongoProgram( "mongod", "--keyFile", key1_644, "--port", port[0], "--dbpath", MongoRunner.dataPath + name);
-
+m = runMongoProgram(
+ "mongod", "--keyFile", key1_644, "--port", port[0], "--dbpath", MongoRunner.dataPath + name);
print("should fail with wrong permissions");
-assert.eq(m, _isWindows()? 100 : 1, "mongod should exit w/ 1 (EXIT_FAILURE): permissions too open");
+assert.eq(m,
+ _isWindows() ? 100 : 1,
+ "mongod should exit w/ 1 (EXIT_FAILURE): permissions too open");
MongoRunner.stopMongod(port[0]);
print("add a user to server0: foo");
@@ -44,27 +45,27 @@ print("make sure user is written before shutting down");
MongoRunner.stopMongod(m);
print("start up rs");
-var rs = new ReplSetTest({"name" : name, "nodes" : 3});
+var rs = new ReplSetTest({"name": name, "nodes": 3});
print("restart 0 with keyFile");
-m = rs.restart(0, {"keyFile" : key1_600});
+m = rs.restart(0, {"keyFile": key1_600});
print("restart 1 with keyFile");
-rs.start(1, {"keyFile" : key1_600});
+rs.start(1, {"keyFile": key1_600});
print("restart 2 with keyFile");
-rs.start(2, {"keyFile" : key1_600});
+rs.start(2, {"keyFile": key1_600});
var result = m.getDB("admin").auth("foo", "bar");
assert.eq(result, 1, "login failed");
print("Initializing replSet with config: " + tojson(rs.getReplSetConfig()));
-result = m.getDB("admin").runCommand({replSetInitiate : rs.getReplSetConfig()});
-assert.eq(result.ok, 1, "couldn't initiate: "+tojson(result));
-m.getDB('admin').logout(); // In case this node doesn't become primary, make sure its not auth'd
+result = m.getDB("admin").runCommand({replSetInitiate: rs.getReplSetConfig()});
+assert.eq(result.ok, 1, "couldn't initiate: " + tojson(result));
+m.getDB('admin').logout(); // In case this node doesn't become primary, make sure its not auth'd
var master = rs.getPrimary();
rs.awaitSecondaryNodes();
var mId = rs.getNodeId(master);
var slave = rs.liveNodes.slaves[0];
assert.eq(1, master.getDB("admin").auth("foo", "bar"));
-assert.writeOK(master.getDB("test").foo.insert({ x: 1 }, { writeConcern: { w:3, wtimeout:60000 }}));
+assert.writeOK(master.getDB("test").foo.insert({x: 1}, {writeConcern: {w: 3, wtimeout: 60000}}));
print("try some legal and illegal reads");
var r = master.getDB("test").foo.findOne();
@@ -73,7 +74,7 @@ assert.eq(r.x, 1);
slave.setSlaveOk();
function doQueryOn(p) {
- var error = assert.throws( function() {
+ var error = assert.throws(function() {
r = p.getDB("test").foo.findOne();
}, [], "find did not throw, returned: " + tojson(r)).toString();
printjson(error);
@@ -81,28 +82,26 @@ function doQueryOn(p) {
}
doQueryOn(slave);
-master.adminCommand({logout:1});
+master.adminCommand({logout: 1});
print("unauthorized:");
-printjson(master.adminCommand({replSetGetStatus : 1}));
+printjson(master.adminCommand({replSetGetStatus: 1}));
doQueryOn(master);
-
result = slave.getDB("test").auth("bar", "baz");
assert.eq(result, 1);
r = slave.getDB("test").foo.findOne();
assert.eq(r.x, 1);
-
print("add some data");
master.getDB("test").auth("bar", "baz");
var bulk = master.getDB("test").foo.initializeUnorderedBulkOp();
-for (var i=0; i<1000; i++) {
- bulk.insert({ x: i, foo: "bar" });
+for (var i = 0; i < 1000; i++) {
+ bulk.insert({x: i, foo: "bar"});
}
-assert.writeOK(bulk.execute({ w: 3, wtimeout: 60000 }));
+assert.writeOK(bulk.execute({w: 3, wtimeout: 60000}));
print("fail over");
rs.stop(mId);
@@ -112,86 +111,84 @@ master = rs.getPrimary();
print("add some more data 1");
master.getDB("test").auth("bar", "baz");
bulk = master.getDB("test").foo.initializeUnorderedBulkOp();
-for (var i=0; i<1000; i++) {
- bulk.insert({ x: i, foo: "bar" });
+for (var i = 0; i < 1000; i++) {
+ bulk.insert({x: i, foo: "bar"});
}
-assert.writeOK(bulk.execute({ w: 2 }));
+assert.writeOK(bulk.execute({w: 2}));
print("resync");
-rs.restart(mId, {"keyFile" : key1_600});
+rs.restart(mId, {"keyFile": key1_600});
master = rs.getPrimary();
print("add some more data 2");
bulk = master.getDB("test").foo.initializeUnorderedBulkOp();
-for (var i=0; i<1000; i++) {
- bulk.insert({ x: i, foo: "bar" });
+for (var i = 0; i < 1000; i++) {
+ bulk.insert({x: i, foo: "bar"});
}
-bulk.execute({ w:3, wtimeout:60000 });
+bulk.execute({w: 3, wtimeout: 60000});
print("add member with wrong key");
-var conn = MongoRunner.runMongod({dbpath: MongoRunner.dataPath + name + "-3",
- port: port[3],
- replSet: "rs_auth1",
- oplogSize: 2,
- keyFile: key2_600});
-
+var conn = MongoRunner.runMongod({
+ dbpath: MongoRunner.dataPath + name + "-3",
+ port: port[3],
+ replSet: "rs_auth1",
+ oplogSize: 2,
+ keyFile: key2_600
+});
master.getDB("admin").auth("foo", "bar");
var config = master.getDB("local").system.replset.findOne();
-config.members.push({_id : 3, host : rs.host+":"+port[3]});
+config.members.push({_id: 3, host: rs.host + ":" + port[3]});
config.version++;
try {
- master.adminCommand({replSetReconfig:config});
-}
-catch (e) {
- print("error: "+e);
+ master.adminCommand({replSetReconfig: config});
+} catch (e) {
+ print("error: " + e);
}
master = rs.getPrimary();
master.getDB("admin").auth("foo", "bar");
-
print("shouldn't ever sync");
-for (var i = 0; i<10; i++) {
- print("iteration: " +i);
- var results = master.adminCommand({replSetGetStatus:1});
+for (var i = 0; i < 10; i++) {
+ print("iteration: " + i);
+ var results = master.adminCommand({replSetGetStatus: 1});
printjson(results);
assert(results.members[3].state != 2);
sleep(1000);
}
-
print("stop member");
MongoRunner.stopMongod(conn);
-
print("start back up with correct key");
-var conn = MongoRunner.runMongod({dbpath: MongoRunner.dataPath + name + "-3",
- port: port[3],
- replSet: "rs_auth1",
- oplogSize: 2,
- keyFile: key1_600});
+var conn = MongoRunner.runMongod({
+ dbpath: MongoRunner.dataPath + name + "-3",
+ port: port[3],
+ replSet: "rs_auth1",
+ oplogSize: 2,
+ keyFile: key1_600
+});
wait(function() {
try {
- var results = master.adminCommand({replSetGetStatus:1});
+ var results = master.adminCommand({replSetGetStatus: 1});
printjson(results);
return results.members[3].state == 2;
- }
- catch (e) {
+ } catch (e) {
print(e);
}
return false;
- });
+});
print("make sure it has the config, too");
assert.soon(function() {
- for (var i in rs.nodes) {
- rs.nodes[i].setSlaveOk();
- rs.nodes[i].getDB("admin").auth("foo","bar");
- config = rs.nodes[i].getDB("local").system.replset.findOne();
- if (config.version != 2) {
- return false;
- }
+ for (var i in rs.nodes) {
+ rs.nodes[i].setSlaveOk();
+ rs.nodes[i].getDB("admin").auth("foo", "bar");
+ config = rs.nodes[i].getDB("local").system.replset.findOne();
+ if (config.version != 2) {
+ return false;
}
- return true;
- });
+ }
+ return true;
+});
diff --git a/jstests/replsets/auth2.js b/jstests/replsets/auth2.js
index b7776d94572..f7b8d8ab468 100644
--- a/jstests/replsets/auth2.js
+++ b/jstests/replsets/auth2.js
@@ -18,39 +18,41 @@ var testInvalidAuthStates = function() {
rs.waitForState(rs.nodes[0], ReplSetTest.State.SECONDARY);
- rs.restart(1, {"keyFile" : key1});
- rs.restart(2, {"keyFile" : key1});
+ rs.restart(1, {"keyFile": key1});
+ rs.restart(2, {"keyFile": key1});
};
var name = "rs_auth2";
var path = "jstests/libs/";
// These keyFiles have their permissions set to 600 later in the test.
-var key1 = path+"key1";
-var key2 = path+"key2";
+var key1 = path + "key1";
+var key2 = path + "key2";
var rs = new ReplSetTest({name: name, nodes: 3});
var nodes = rs.startSet();
var hostnames = rs.nodeList();
-rs.initiate({ "_id" : name,
- "members" : [
- {"_id" : 0, "host" : hostnames[0], "priority" : 2},
- {"_id" : 1, "host" : hostnames[1], priority: 0},
- {"_id" : 2, "host" : hostnames[2], priority: 0}
- ]});
+rs.initiate({
+ "_id": name,
+ "members": [
+ {"_id": 0, "host": hostnames[0], "priority": 2},
+ {"_id": 1, "host": hostnames[1], priority: 0},
+ {"_id": 2, "host": hostnames[2], priority: 0}
+ ]
+});
var master = rs.getPrimary();
print("add an admin user");
-master.getDB("admin").createUser({user: "foo", pwd: "bar", roles: jsTest.adminUserRoles},
- {w: 3, wtimeout: 30000});
+master.getDB("admin")
+ .createUser({user: "foo", pwd: "bar", roles: jsTest.adminUserRoles}, {w: 3, wtimeout: 30000});
var m = rs.nodes[0];
print("starting 1 and 2 with key file");
rs.stop(1);
-rs.restart(1, {"keyFile" : key1});
+rs.restart(1, {"keyFile": key1});
rs.stop(2);
-rs.restart(2, {"keyFile" : key1});
+rs.restart(2, {"keyFile": key1});
// auth to all nodes with auth
rs.nodes[1].getDB("admin").auth("foo", "bar");
@@ -60,15 +62,15 @@ testInvalidAuthStates();
print("restart mongod with bad keyFile");
rs.stop(0);
-m = rs.restart(0, {"keyFile" : key2});
+m = rs.restart(0, {"keyFile": key2});
-//auth to all nodes
+// auth to all nodes
rs.nodes[0].getDB("admin").auth("foo", "bar");
rs.nodes[1].getDB("admin").auth("foo", "bar");
rs.nodes[2].getDB("admin").auth("foo", "bar");
testInvalidAuthStates();
rs.stop(0);
-m = rs.restart(0, {"keyFile" : key1});
+m = rs.restart(0, {"keyFile": key1});
print("0 becomes a secondary");
diff --git a/jstests/replsets/auth3.js b/jstests/replsets/auth3.js
index 504bfeffe9c..3ac812bcfa1 100644
--- a/jstests/replsets/auth3.js
+++ b/jstests/replsets/auth3.js
@@ -8,14 +8,11 @@
// run on ephemeral storage engines.
// @tags: [requires_persistence]
-(function () {
+(function() {
"use strict";
var keyfile = "jstests/libs/key1";
var master;
- var rs = new ReplSetTest({
- nodes : { node0 : {}, node1 : {}, arbiter : {}},
- keyFile : keyfile
- });
+ var rs = new ReplSetTest({nodes: {node0: {}, node1: {}, arbiter: {}}, keyFile: keyfile});
rs.startSet();
rs.initiate();
@@ -27,11 +24,11 @@
var safeInsert = function() {
master = rs.getPrimary();
master.getDB("admin").auth("foo", "bar");
- assert.writeOK(master.getDB("foo").bar.insert({ x: 1 }));
+ assert.writeOK(master.getDB("foo").bar.insert({x: 1}));
};
jsTest.log("authing");
- for (var i=0; i<2; i++) {
+ for (var i = 0; i < 2; i++) {
assert(rs.nodes[i].getDB("admin").auth("foo", "bar"),
"could not log into " + rs.nodes[i].host);
}
@@ -39,7 +36,11 @@
jsTest.log("make common point");
safeInsert();
- authutil.asCluster(rs.nodes, keyfile, function() { rs.awaitReplication(); });
+ authutil.asCluster(rs.nodes,
+ keyfile,
+ function() {
+ rs.awaitReplication();
+ });
jsTest.log("write stuff to 0&2");
rs.stop(1);
@@ -48,7 +49,7 @@
master.getDB("admin").auth("foo", "bar");
master.getDB("foo").bar.drop();
jsTest.log("last op: " +
- tojson(master.getDB("local").oplog.rs.find().sort({$natural:-1}).limit(1).next()));
+ tojson(master.getDB("local").oplog.rs.find().sort({$natural: -1}).limit(1).next()));
jsTest.log("write stuff to 1&2");
rs.stop(0);
@@ -56,12 +57,16 @@
safeInsert();
jsTest.log("last op: " +
- tojson(master.getDB("local").oplog.rs.find().sort({$natural:-1}).limit(1).next()));
+ tojson(master.getDB("local").oplog.rs.find().sort({$natural: -1}).limit(1).next()));
rs.restart(0);
jsTest.log("doing rollback!");
- authutil.asCluster(rs.nodes, keyfile, function () { rs.awaitSecondaryNodes(); });
+ authutil.asCluster(rs.nodes,
+ keyfile,
+ function() {
+ rs.awaitSecondaryNodes();
+ });
}());
diff --git a/jstests/replsets/auth_no_pri.js b/jstests/replsets/auth_no_pri.js
index cce4e8020d6..d35d0ec2919 100644
--- a/jstests/replsets/auth_no_pri.js
+++ b/jstests/replsets/auth_no_pri.js
@@ -1,32 +1,32 @@
// Test that you can still authenticate a replset connection to a RS with no primary (SERVER-6665).
-(function () {
-'use strict';
+(function() {
+ 'use strict';
-var NODE_COUNT = 3;
-var rs = new ReplSetTest({"nodes" : NODE_COUNT, keyFile : "jstests/libs/key1"});
-var nodes = rs.startSet();
-rs.initiate();
+ var NODE_COUNT = 3;
+ var rs = new ReplSetTest({"nodes": NODE_COUNT, keyFile: "jstests/libs/key1"});
+ var nodes = rs.startSet();
+ rs.initiate();
-// Add user
-var master = rs.getPrimary();
-master.getDB("admin").createUser({user: "admin", pwd: "pwd", roles: ["root"]}, {w: NODE_COUNT});
+ // Add user
+ var master = rs.getPrimary();
+ master.getDB("admin").createUser({user: "admin", pwd: "pwd", roles: ["root"]}, {w: NODE_COUNT});
-// Can authenticate replset connection when whole set is up.
-var conn = new Mongo(rs.getURL());
-assert(conn.getDB('admin').auth('admin', 'pwd'));
-assert.writeOK(conn.getDB('admin').foo.insert({a:1}, { writeConcern: { w: NODE_COUNT } }));
+ // Can authenticate replset connection when whole set is up.
+ var conn = new Mongo(rs.getURL());
+ assert(conn.getDB('admin').auth('admin', 'pwd'));
+ assert.writeOK(conn.getDB('admin').foo.insert({a: 1}, {writeConcern: {w: NODE_COUNT}}));
-// Make sure there is no primary
-rs.stop(0);
-rs.stop(1);
-rs.waitForState(nodes[2], ReplSetTest.State.SECONDARY);
+ // Make sure there is no primary
+ rs.stop(0);
+ rs.stop(1);
+ rs.waitForState(nodes[2], ReplSetTest.State.SECONDARY);
-// Make sure you can still authenticate a replset connection with no primary
-var conn2 = new Mongo(rs.getURL());
-conn2.setSlaveOk(true);
-assert(conn2.getDB('admin').auth({user:'admin', pwd:'pwd', mechanism:"SCRAM-SHA-1"}));
-assert.eq(1, conn2.getDB('admin').foo.findOne().a);
+ // Make sure you can still authenticate a replset connection with no primary
+ var conn2 = new Mongo(rs.getURL());
+ conn2.setSlaveOk(true);
+ assert(conn2.getDB('admin').auth({user: 'admin', pwd: 'pwd', mechanism: "SCRAM-SHA-1"}));
+ assert.eq(1, conn2.getDB('admin').foo.findOne().a);
-rs.stopSet();
+ rs.stopSet();
}());
diff --git a/jstests/replsets/await_replication_timeout.js b/jstests/replsets/await_replication_timeout.js
index 03ebfa5f8a9..b0fb605567d 100644
--- a/jstests/replsets/await_replication_timeout.js
+++ b/jstests/replsets/await_replication_timeout.js
@@ -1,47 +1,49 @@
// Tests timeout behavior of waiting for write concern as well as its interaction with maxTimeMs
(function() {
-"use strict";
+ "use strict";
-var exceededTimeLimitCode = 50;
-var writeConcernFailedCode = 64;
-var replTest = new ReplSetTest({ nodes: 3 });
-replTest.startSet();
-replTest.initiate();
-replTest.stop(0); // Make sure that there are only 2 nodes up so w:3 writes will always time out
-var primary = replTest.getPrimary();
-var testDB = primary.getDB('test');
+ var exceededTimeLimitCode = 50;
+ var writeConcernFailedCode = 64;
+ var replTest = new ReplSetTest({nodes: 3});
+ replTest.startSet();
+ replTest.initiate();
+ replTest.stop(
+ 0); // Make sure that there are only 2 nodes up so w:3 writes will always time out
+ var primary = replTest.getPrimary();
+ var testDB = primary.getDB('test');
-// Test wtimeout
-var res = testDB.runCommand({insert: 'foo',
- documents: [{a:1}],
- writeConcern: {w: 3, wtimeout: 1000}});
-assert.commandWorked(res); // Commands with write concern errors still report success.
-assert.eq(writeConcernFailedCode, res.writeConcernError.code);
+ // Test wtimeout
+ var res = testDB.runCommand(
+ {insert: 'foo', documents: [{a: 1}], writeConcern: {w: 3, wtimeout: 1000}});
+ assert.commandWorked(res); // Commands with write concern errors still report success.
+ assert.eq(writeConcernFailedCode, res.writeConcernError.code);
-// Test maxTimeMS timeout
-res = testDB.runCommand({insert: 'foo',
- documents: [{a:1}],
- writeConcern: {w: 3},
- maxTimeMS: 1000});
-assert.commandWorked(res); // Commands with write concern errors still report success.
-assert.eq(exceededTimeLimitCode, res.writeConcernError.code);
+ // Test maxTimeMS timeout
+ res = testDB.runCommand(
+ {insert: 'foo', documents: [{a: 1}], writeConcern: {w: 3}, maxTimeMS: 1000});
+ assert.commandWorked(res); // Commands with write concern errors still report success.
+ assert.eq(exceededTimeLimitCode, res.writeConcernError.code);
-// Test with wtimeout < maxTimeMS
-res = testDB.runCommand({insert: 'foo',
- documents: [{a:1}],
- writeConcern: {w: 3, wtimeout: 1000},
- maxTimeMS: 10 * 1000});
-assert.commandWorked(res); // Commands with write concern errors still report success.
-assert.eq(writeConcernFailedCode, res.writeConcernError.code);
+ // Test with wtimeout < maxTimeMS
+ res = testDB.runCommand({
+ insert: 'foo',
+ documents: [{a: 1}],
+ writeConcern: {w: 3, wtimeout: 1000},
+ maxTimeMS: 10 * 1000
+ });
+ assert.commandWorked(res); // Commands with write concern errors still report success.
+ assert.eq(writeConcernFailedCode, res.writeConcernError.code);
-// Test with wtimeout > maxTimeMS
-res = testDB.runCommand({insert: 'foo',
- documents: [{a:1}],
- writeConcern: {w: 3, wtimeout: 10* 1000},
- maxTimeMS: 1000});
-assert.commandWorked(res); // Commands with write concern errors still report success.
-assert.eq(exceededTimeLimitCode, res.writeConcernError.code);
-replTest.stopSet();
+ // Test with wtimeout > maxTimeMS
+ res = testDB.runCommand({
+ insert: 'foo',
+ documents: [{a: 1}],
+ writeConcern: {w: 3, wtimeout: 10 * 1000},
+ maxTimeMS: 1000
+ });
+ assert.commandWorked(res); // Commands with write concern errors still report success.
+ assert.eq(exceededTimeLimitCode, res.writeConcernError.code);
+ replTest.stopSet();
})();
diff --git a/jstests/replsets/background_index.js b/jstests/replsets/background_index.js
index 9c92f8ca8f4..6d891a66a85 100644
--- a/jstests/replsets/background_index.js
+++ b/jstests/replsets/background_index.js
@@ -17,8 +17,8 @@
var coll = primary.getCollection("test.foo");
var adminDB = primary.getDB("admin");
- for (var i=0; i<100; i++) {
- assert.writeOK(coll.insert({_id: i, x: i*3, str: "hello world"}));
+ for (var i = 0; i < 100; i++) {
+ assert.writeOK(coll.insert({_id: i, x: i * 3, str: "hello world"}));
}
// Add a background index.
@@ -26,9 +26,8 @@
// Rename the collection.
assert.commandWorked(
- adminDB.runCommand({renameCollection: "test.foo", to: "bar.test", dropTarget: true}),
- "Call to renameCollection failed."
- );
+ adminDB.runCommand({renameCollection: "test.foo", to: "bar.test", dropTarget: true}),
+ "Call to renameCollection failed.");
// Await replication.
rst.awaitReplication();
diff --git a/jstests/replsets/batch_write_command_wc.js b/jstests/replsets/batch_write_command_wc.js
index c71fa18bb45..d6f83c08e3a 100644
--- a/jstests/replsets/batch_write_command_wc.js
+++ b/jstests/replsets/batch_write_command_wc.js
@@ -13,8 +13,8 @@ jsTest.log("Starting no journal/repl set tests...");
// Start a single-node replica set with no journal
// Allows testing immediate write concern failures and wc application failures
-var rst = new ReplSetTest({ nodes : 2 });
-rst.startSet({ nojournal : "" });
+var rst = new ReplSetTest({nodes: 2});
+rst.startSet({nojournal: ""});
rst.initiate();
var mongod = rst.getPrimary();
var coll = mongod.getCollection("test.batch_write_command_wc");
@@ -22,9 +22,8 @@ var coll = mongod.getCollection("test.batch_write_command_wc");
//
// Basic insert, default WC
coll.remove({});
-printjson( request = {insert : coll.getName(),
- documents: [{a:1}]});
-printjson( result = coll.runCommand(request) );
+printjson(request = {insert: coll.getName(), documents: [{a: 1}]});
+printjson(result = coll.runCommand(request));
assert(result.ok);
assert.eq(1, result.n);
assert.eq(1, coll.count());
@@ -32,10 +31,8 @@ assert.eq(1, coll.count());
//
// Basic insert, majority WC
coll.remove({});
-printjson( request = {insert : coll.getName(),
- documents: [{a:1}],
- writeConcern: {w: 'majority'}});
-printjson( result = coll.runCommand(request) );
+printjson(request = {insert: coll.getName(), documents: [{a: 1}], writeConcern: {w: 'majority'}});
+printjson(result = coll.runCommand(request));
assert(result.ok);
assert.eq(1, result.n);
assert.eq(1, coll.count());
@@ -43,10 +40,8 @@ assert.eq(1, coll.count());
//
// Basic insert, w:2 WC
coll.remove({});
-printjson( request = {insert : coll.getName(),
- documents: [{a:1}],
- writeConcern: {w:2}});
-printjson( result = coll.runCommand(request) );
+printjson(request = {insert: coll.getName(), documents: [{a: 1}], writeConcern: {w: 2}});
+printjson(result = coll.runCommand(request));
assert(result.ok);
assert.eq(1, result.n);
assert.eq(1, coll.count());
@@ -54,20 +49,17 @@ assert.eq(1, coll.count());
//
// Basic insert, immediate nojournal error
coll.remove({});
-printjson( request = {insert : coll.getName(),
- documents: [{a:1}],
- writeConcern: {j:true}});
-printjson( result = coll.runCommand(request) );
+printjson(request = {insert: coll.getName(), documents: [{a: 1}], writeConcern: {j: true}});
+printjson(result = coll.runCommand(request));
assert(!result.ok);
assert.eq(0, coll.count());
//
// Basic insert, timeout wc error
coll.remove({});
-printjson( request = {insert : coll.getName(),
- documents: [{a:1}],
- writeConcern: {w:3, wtimeout: 1}});
-printjson( result = coll.runCommand(request) );
+printjson(
+ request = {insert: coll.getName(), documents: [{a: 1}], writeConcern: {w: 3, wtimeout: 1}});
+printjson(result = coll.runCommand(request));
assert(result.ok);
assert.eq(1, result.n);
assert(result.writeConcernError);
@@ -77,10 +69,8 @@ assert.eq(1, coll.count());
//
// Basic insert, wmode wc error
coll.remove({});
-printjson( request = {insert : coll.getName(),
- documents: [{a:1}],
- writeConcern: {w: 'invalid'}});
-printjson( result = coll.runCommand(request) );
+printjson(request = {insert: coll.getName(), documents: [{a: 1}], writeConcern: {w: 'invalid'}});
+printjson(result = coll.runCommand(request));
assert(result.ok);
assert.eq(1, result.n);
assert(result.writeConcernError);
@@ -89,10 +79,12 @@ assert.eq(1, coll.count());
//
// Two ordered inserts, write error and wc error both reported
coll.remove({});
-printjson( request = {insert : coll.getName(),
- documents: [{a:1},{$invalid:'doc'}],
- writeConcern: {w: 'invalid'}});
-printjson( result = coll.runCommand(request) );
+printjson(request = {
+ insert: coll.getName(),
+ documents: [{a: 1}, {$invalid: 'doc'}],
+ writeConcern: {w: 'invalid'}
+});
+printjson(result = coll.runCommand(request));
assert(result.ok);
assert.eq(1, result.n);
assert.eq(result.writeErrors.length, 1);
@@ -103,11 +95,13 @@ assert.eq(1, coll.count());
//
// Two unordered inserts, write error and wc error reported
coll.remove({});
-printjson( request = {insert : coll.getName(),
- documents: [{a:1},{$invalid:'doc'}],
- writeConcern: {w: 'invalid'},
- ordered: false});
-printjson( result = coll.runCommand(request) );
+printjson(request = {
+ insert: coll.getName(),
+ documents: [{a: 1}, {$invalid: 'doc'}],
+ writeConcern: {w: 'invalid'},
+ ordered: false
+});
+printjson(result = coll.runCommand(request));
assert(result.ok);
assert.eq(1, result.n);
assert.eq(result.writeErrors.length, 1);
@@ -118,10 +112,12 @@ assert.eq(1, coll.count());
//
// Write error with empty writeConcern object.
coll.remove({});
-request = { insert: coll.getName(),
- documents: [{ _id: 1 }, { _id: 1 }],
- writeConcern: {},
- ordered: false };
+request = {
+ insert: coll.getName(),
+ documents: [{_id: 1}, {_id: 1}],
+ writeConcern: {},
+ ordered: false
+};
result = coll.runCommand(request);
assert(result.ok);
assert.eq(1, result.n);
@@ -133,10 +129,12 @@ assert.eq(1, coll.count());
//
// Write error with unspecified w.
coll.remove({});
-request = { insert: coll.getName(),
- documents: [{ _id: 1 }, { _id: 1 }],
- writeConcern: { wTimeout: 1 },
- ordered: false };
+request = {
+ insert: coll.getName(),
+ documents: [{_id: 1}, {_id: 1}],
+ writeConcern: {wTimeout: 1},
+ ordered: false
+};
result = coll.runCommand(request);
assert(result.ok);
assert.eq(1, result.n);
@@ -147,4 +145,3 @@ assert.eq(1, coll.count());
jsTest.log("DONE no journal/repl tests");
rst.stopSet();
-
diff --git a/jstests/replsets/buildindexes.js b/jstests/replsets/buildindexes.js
index a114011c3a0..f6a8a781014 100644
--- a/jstests/replsets/buildindexes.js
+++ b/jstests/replsets/buildindexes.js
@@ -2,65 +2,65 @@
(function() {
- var name = "buildIndexes";
- var host = getHostName();
-
- var replTest = new ReplSetTest( {name: name, nodes: 3} );
-
- var nodes = replTest.startSet();
-
- var config = replTest.getReplSetConfig();
- config.members[2].priority = 0;
- config.members[2].buildIndexes = false;
-
- replTest.initiate(config);
-
- var master = replTest.getPrimary().getDB(name);
- var slaveConns = replTest.liveNodes.slaves;
- var slave = [];
- for (var i in slaveConns) {
- slaveConns[i].setSlaveOk();
- slave.push(slaveConns[i].getDB(name));
- }
- replTest.awaitReplication();
-
- master.x.ensureIndex({y : 1});
-
- for (i = 0; i < 100; i++) {
- master.x.insert({x:1,y:"abc",c:1});
- }
-
- replTest.awaitReplication();
-
- assert.commandWorked(slave[0].runCommand({count: "x"}));
-
- var indexes = slave[0].stats().indexes;
- assert.eq(indexes, 2, 'number of indexes');
-
- indexes = slave[1].stats().indexes;
- assert.eq(indexes, 1);
-
- indexes = slave[0].x.stats().indexSizes;
-
- var count = 0;
- for (i in indexes) {
- count++;
- if (i == "_id_") {
- continue;
+ var name = "buildIndexes";
+ var host = getHostName();
+
+ var replTest = new ReplSetTest({name: name, nodes: 3});
+
+ var nodes = replTest.startSet();
+
+ var config = replTest.getReplSetConfig();
+ config.members[2].priority = 0;
+ config.members[2].buildIndexes = false;
+
+ replTest.initiate(config);
+
+ var master = replTest.getPrimary().getDB(name);
+ var slaveConns = replTest.liveNodes.slaves;
+ var slave = [];
+ for (var i in slaveConns) {
+ slaveConns[i].setSlaveOk();
+ slave.push(slaveConns[i].getDB(name));
}
- assert(i.match(/y_/));
- }
+ replTest.awaitReplication();
+
+ master.x.ensureIndex({y: 1});
+
+ for (i = 0; i < 100; i++) {
+ master.x.insert({x: 1, y: "abc", c: 1});
+ }
+
+ replTest.awaitReplication();
- assert.eq(count, 2);
-
- indexes = slave[1].x.stats().indexSizes;
+ assert.commandWorked(slave[0].runCommand({count: "x"}));
- count = 0;
- for (i in indexes) {
- count++;
- }
+ var indexes = slave[0].stats().indexes;
+ assert.eq(indexes, 2, 'number of indexes');
+
+ indexes = slave[1].stats().indexes;
+ assert.eq(indexes, 1);
+
+ indexes = slave[0].x.stats().indexSizes;
+
+ var count = 0;
+ for (i in indexes) {
+ count++;
+ if (i == "_id_") {
+ continue;
+ }
+ assert(i.match(/y_/));
+ }
+
+ assert.eq(count, 2);
+
+ indexes = slave[1].x.stats().indexSizes;
+
+ count = 0;
+ for (i in indexes) {
+ count++;
+ }
- assert.eq(count, 1);
+ assert.eq(count, 1);
- replTest.stopSet();
+ replTest.stopSet();
}());
diff --git a/jstests/replsets/bulk_api_wc.js b/jstests/replsets/bulk_api_wc.js
index a92b536dda1..f08e4df64b9 100644
--- a/jstests/replsets/bulk_api_wc.js
+++ b/jstests/replsets/bulk_api_wc.js
@@ -7,9 +7,9 @@
jsTest.log("Starting bulk api write concern tests...");
// Start a 2-node replica set with no journal
-//Allows testing immediate write concern failures and wc application failures
-var rst = new ReplSetTest({ nodes : 2 });
-rst.startSet({ nojournal : "" });
+// Allows testing immediate write concern failures and wc application failures
+var rst = new ReplSetTest({nodes: 2});
+rst.startSet({nojournal: ""});
rst.initiate();
var mongod = rst.getPrimary();
var coll = mongod.getCollection("test.bulk_api_wc");
@@ -18,7 +18,7 @@ var executeTests = function() {
// Create a unique index, legacy writes validate too early to use invalid documents for write
// error testing
- coll.ensureIndex({ a : 1 }, { unique : true });
+ coll.ensureIndex({a: 1}, {unique: true});
//
// Ordered
@@ -28,18 +28,22 @@ var executeTests = function() {
// Fail due to nojournal
coll.remove({});
var bulk = coll.initializeOrderedBulkOp();
- bulk.insert({a:1});
- bulk.insert({a:2});
- assert.throws( function(){ bulk.execute({ j : true }); } );
+ bulk.insert({a: 1});
+ bulk.insert({a: 2});
+ assert.throws(function() {
+ bulk.execute({j: true});
+ });
//
// Fail with write error, no write concern error even though it would fail on apply for ordered
coll.remove({});
var bulk = coll.initializeOrderedBulkOp();
- bulk.insert({a:1});
- bulk.insert({a:2});
- bulk.insert({a:2});
- var result = assert.throws( function() { bulk.execute({ w : 'invalid' }); } );
+ bulk.insert({a: 1});
+ bulk.insert({a: 2});
+ bulk.insert({a: 2});
+ var result = assert.throws(function() {
+ bulk.execute({w: 'invalid'});
+ });
assert.eq(result.nInserted, 2);
assert.eq(result.getWriteErrors()[0].index, 2);
assert(!result.getWriteConcernError());
@@ -53,10 +57,12 @@ var executeTests = function() {
// Fail with write error, write concern error reported when unordered
coll.remove({});
var bulk = coll.initializeUnorderedBulkOp();
- bulk.insert({a:1});
- bulk.insert({a:2});
- bulk.insert({a:2});
- var result = assert.throws( function(){ bulk.execute({ w : 'invalid' }); } );
+ bulk.insert({a: 1});
+ bulk.insert({a: 2});
+ bulk.insert({a: 2});
+ var result = assert.throws(function() {
+ bulk.execute({w: 'invalid'});
+ });
assert.eq(result.nInserted, 2);
assert.eq(result.getWriteErrors()[0].index, 2);
assert(result.getWriteConcernError());
@@ -68,10 +74,12 @@ var executeTests = function() {
// multiple wc errors
coll.remove({});
var bulk = coll.initializeUnorderedBulkOp();
- bulk.insert({a:1});
- bulk.insert({a:2});
- bulk.insert({a:2});
- var result = assert.throws( function() { bulk.execute({ w : 3, wtimeout : 1 }); } );
+ bulk.insert({a: 1});
+ bulk.insert({a: 2});
+ bulk.insert({a: 2});
+ var result = assert.throws(function() {
+ bulk.execute({w: 3, wtimeout: 1});
+ });
assert.eq(result.nInserted, 2);
assert.eq(result.getWriteErrors()[0].index, 2);
assert.eq(100, result.getWriteConcernError().code);
@@ -81,11 +89,13 @@ var executeTests = function() {
// Fail with write error and upserted, write concern error reported when unordered
coll.remove({});
var bulk = coll.initializeUnorderedBulkOp();
- bulk.insert({a:1});
- bulk.insert({a:2});
- bulk.find({a:3}).upsert().updateOne({a:3});
- bulk.insert({a:3});
- var result = assert.throws( function(){ bulk.execute({ w : 'invalid' }); } );
+ bulk.insert({a: 1});
+ bulk.insert({a: 2});
+ bulk.find({a: 3}).upsert().updateOne({a: 3});
+ bulk.insert({a: 3});
+ var result = assert.throws(function() {
+ bulk.execute({w: 'invalid'});
+ });
assert.eq(result.nInserted, 2);
assert.eq(result.nUpserted, 1);
assert.eq(result.getUpsertedIds()[0].index, 2);
@@ -95,12 +105,16 @@ var executeTests = function() {
};
// Use write commands
-coll.getMongo().useWriteCommands = function() { return true; };
+coll.getMongo().useWriteCommands = function() {
+ return true;
+};
executeTests();
// FAILING currently due to incorrect batch api reading of GLE
// Use legacy opcodes
-coll.getMongo().useWriteCommands = function() { return false; };
+coll.getMongo().useWriteCommands = function() {
+ return false;
+};
executeTests();
jsTest.log("DONE bulk api wc tests");
diff --git a/jstests/replsets/capped_id.js b/jstests/replsets/capped_id.js
index 8ba37ea7c14..8708f5752f8 100644
--- a/jstests/replsets/capped_id.js
+++ b/jstests/replsets/capped_id.js
@@ -8,7 +8,7 @@
// and check it got created on secondaries.
// Create a new replica set test with name 'testSet' and 3 members
-var replTest = new ReplSetTest( {name: 'testSet', nodes: 3} );
+var replTest = new ReplSetTest({name: 'testSet', nodes: 3});
// call startSet() to start each mongod in the replica set
// this returns a list of nodes
@@ -31,90 +31,86 @@ var slave2 = replTest.liveNodes.slaves[1];
// Calling getPrimary made available the liveNodes structure,
// which looks like this:
// liveNodes = {master: masterNode, slaves: [slave1, slave2] }
-printjson( replTest.liveNodes );
+printjson(replTest.liveNodes);
// define db names to use for this test
var dbname = "dbname";
-var masterdb = master.getDB( dbname );
-var slave1db = slave1.getDB( dbname );
-var slave2db = slave2.getDB( dbname );
+var masterdb = master.getDB(dbname);
+var slave1db = slave1.getDB(dbname);
+var slave2db = slave2.getDB(dbname);
function countIdIndexes(theDB, coll) {
- return theDB[coll].getIndexes().filter(function(idx) {
- return friendlyEqual(idx.key, {_id: 1});
- }).length;
+ return theDB[coll].getIndexes().filter(function(idx) {
+ return friendlyEqual(idx.key, {_id: 1});
+ }).length;
}
var numtests = 4;
-for( testnum=0; testnum < numtests; testnum++ ){
-
- //define collection name
+for (testnum = 0; testnum < numtests; testnum++) {
+ // define collection name
coll = "coll" + testnum;
// drop the coll on the master (just in case it already existed)
// and wait for the drop to replicate
- masterdb.getCollection( coll ).drop();
+ masterdb.getCollection(coll).drop();
replTest.awaitReplication();
- if ( testnum == 0 ){
+ if (testnum == 0) {
// create a capped collection on the master
// insert a bunch of things in it
// wait for it to replicate
- masterdb.runCommand( {create : coll , capped : true , size : 1024} );
- for(i=0; i < 500 ; i++){
- masterdb.getCollection( coll ).insert( {a: 1000} );
+ masterdb.runCommand({create: coll, capped: true, size: 1024});
+ for (i = 0; i < 500; i++) {
+ masterdb.getCollection(coll).insert({a: 1000});
}
replTest.awaitReplication();
- }
- else if ( testnum == 1 ){
+ } else if (testnum == 1) {
// create a non-capped collection on the master
// insert a bunch of things in it
// wait for it to replicate
- masterdb.runCommand( {create : coll } );
- for(i=0; i < 500 ; i++){
- masterdb.getCollection( coll ).insert( {a: 1000} );
+ masterdb.runCommand({create: coll});
+ for (i = 0; i < 500; i++) {
+ masterdb.getCollection(coll).insert({a: 1000});
}
replTest.awaitReplication();
// make sure _id index exists on primary
- assert.eq( 1 ,
- countIdIndexes(masterdb, coll),
- "master does not have _id index on normal collection");
+ assert.eq(1,
+ countIdIndexes(masterdb, coll),
+ "master does not have _id index on normal collection");
// then convert it to capped
- masterdb.runCommand({convertToCapped: coll , size: 1024 } );
+ masterdb.runCommand({convertToCapped: coll, size: 1024});
replTest.awaitReplication();
- }
- else if ( testnum == 2 ){
+ } else if (testnum == 2) {
// similar to first test, but check that a bunch of updates instead
// of inserts triggers the _id index creation on secondaries.
- masterdb.runCommand( {create : coll , capped : true , size : 1024} );
- masterdb.getCollection( coll ).insert( {a : 0} );
- for(i=0; i < 500 ; i++){
- masterdb.getCollection( coll ).update( {} , {$inc : {a : 1} } );
+ masterdb.runCommand({create: coll, capped: true, size: 1024});
+ masterdb.getCollection(coll).insert({a: 0});
+ for (i = 0; i < 500; i++) {
+ masterdb.getCollection(coll).update({}, {$inc: {a: 1}});
}
replTest.awaitReplication();
- }
- else if ( testnum == 3 ){
+ } else if (testnum == 3) {
// explicitly set autoIndexId : false
- masterdb.runCommand( {create : coll , capped : true , size : 1024 , autoIndexId : false } );
- for(i=0; i < 500 ; i++){
- masterdb.getCollection( coll ).insert( {a: 1000} );
+ masterdb.runCommand({create: coll, capped: true, size: 1024, autoIndexId: false});
+ for (i = 0; i < 500; i++) {
+ masterdb.getCollection(coll).insert({a: 1000});
}
replTest.awaitReplication();
- assert.eq( 0 ,
- countIdIndexes(masterdb, coll),
- "master has an _id index on capped collection when autoIndexId is false");
- assert.eq( 0 ,
- countIdIndexes(slave1db, coll),
- "slave1 has an _id index on capped collection when autoIndexId is false");
- assert.eq( 0 ,
- countIdIndexes(slave2db, coll),
- "slave2 has an _id index on capped collection when autoIndexId is false");
+ assert.eq(0,
+ countIdIndexes(masterdb, coll),
+ "master has an _id index on capped collection when autoIndexId is false");
+ assert.eq(0,
+ countIdIndexes(slave1db, coll),
+ "slave1 has an _id index on capped collection when autoIndexId is false");
+ assert.eq(0,
+ countIdIndexes(slave2db, coll),
+ "slave2 has an _id index on capped collection when autoIndexId is false");
// now create the index and make sure it works
- masterdb.getCollection( coll ).ensureIndex( { "_id" : 1 } );
+ masterdb.getCollection(coll).ensureIndex({"_id": 1});
replTest.awaitReplication();
}
@@ -132,20 +128,14 @@ for( testnum=0; testnum < numtests; testnum++ ){
print("");
// ensure all nodes have _id index
- assert.eq( 1 ,
- countIdIndexes(masterdb, coll),
- "master has an _id index on capped collection");
- assert.eq( 1 ,
- countIdIndexes(slave1db, coll),
- "slave1 does not have _id index on capped collection");
- assert.eq( 1 ,
- countIdIndexes(slave2db, coll),
- "slave2 does not have _id index on capped collection");
+ assert.eq(1, countIdIndexes(masterdb, coll), "master has an _id index on capped collection");
+ assert.eq(
+ 1, countIdIndexes(slave1db, coll), "slave1 does not have _id index on capped collection");
+ assert.eq(
+ 1, countIdIndexes(slave2db, coll), "slave2 does not have _id index on capped collection");
print("capped_id.js Test # " + testnum + " SUCCESS");
}
-//Finally, stop set
+// Finally, stop set
replTest.stopSet();
-
-
diff --git a/jstests/replsets/capped_insert_order.js b/jstests/replsets/capped_insert_order.js
index 9b39021732c..0b17f9ff144 100644
--- a/jstests/replsets/capped_insert_order.js
+++ b/jstests/replsets/capped_insert_order.js
@@ -20,7 +20,7 @@
var slaveColl = slaveDb[collectionName];
// Making a large capped collection to ensure that every document fits.
- masterDb.createCollection(collectionName, {capped: true, size: 1024*1024});
+ masterDb.createCollection(collectionName, {capped: true, size: 1024 * 1024});
// Insert 1000 docs with _id from 0 to 999 inclusive.
const nDocuments = 1000;
diff --git a/jstests/replsets/chaining_removal.js b/jstests/replsets/chaining_removal.js
index 79fc89c8ecd..29b50609754 100644
--- a/jstests/replsets/chaining_removal.js
+++ b/jstests/replsets/chaining_removal.js
@@ -9,15 +9,16 @@
var replTest = new ReplSetTest({name: name, nodes: numNodes});
var nodes = replTest.startSet();
var port = replTest.ports;
- replTest.initiate({_id: name, members:
- [
- {_id: 0, host: nodes[0].host, priority: 3},
- {_id: 1, host: nodes[1].host, priority: 0},
- {_id: 2, host: nodes[2].host, priority: 0},
- {_id: 3, host: nodes[3].host, priority: 0},
- {_id: 4, host: nodes[4].host, priority: 0},
- ],
- });
+ replTest.initiate({
+ _id: name,
+ members: [
+ {_id: 0, host: nodes[0].host, priority: 3},
+ {_id: 1, host: nodes[1].host, priority: 0},
+ {_id: 2, host: nodes[2].host, priority: 0},
+ {_id: 3, host: nodes[3].host, priority: 0},
+ {_id: 4, host: nodes[4].host, priority: 0},
+ ],
+ });
replTest.waitForState(nodes[0], ReplSetTest.State.PRIMARY, 60 * 1000);
var primary = replTest.getPrimary();
replTest.awaitReplication();
@@ -25,21 +26,31 @@
// Force node 1 to sync directly from node 0.
assert.commandWorked(nodes[1].getDB("admin").runCommand({"replSetSyncFrom": nodes[0].host}));
var res;
- assert.soon(function() {
- res = nodes[1].getDB("admin").runCommand({"replSetGetStatus": 1});
- return res.syncingTo === nodes[0].host;
- }, function() { return "node 1 failed to start syncing from node 0: " + tojson(res); } );
+ assert.soon(
+ function() {
+ res = nodes[1].getDB("admin").runCommand({"replSetGetStatus": 1});
+ return res.syncingTo === nodes[0].host;
+ },
+ function() {
+ return "node 1 failed to start syncing from node 0: " + tojson(res);
+ });
// Force node 4 to sync through node 1.
assert.commandWorked(nodes[4].getDB("admin").runCommand({"replSetSyncFrom": nodes[1].host}));
- assert.soon(function() {
- res = nodes[4].getDB("admin").runCommand({"replSetGetStatus": 1});
- return res.syncingTo === nodes[1].host;
- }, function() { return "node 4 failed to start chaining through node 1: " + tojson(res); } );
+ assert.soon(
+ function() {
+ res = nodes[4].getDB("admin").runCommand({"replSetGetStatus": 1});
+ return res.syncingTo === nodes[1].host;
+ },
+ function() {
+ return "node 4 failed to start chaining through node 1: " + tojson(res);
+ });
// write that should reach all nodes
var timeout = 15 * 1000;
- var options = {writeConcern: {w: numNodes, wtimeout: timeout}};
+ var options = {
+ writeConcern: {w: numNodes, wtimeout: timeout}
+ };
assert.writeOK(primary.getDB(name).foo.insert({x: 1}, options));
var config = primary.getDB("local").system.replset.findOne();
@@ -48,10 +59,9 @@
// remove node 4
replTest.stop(4);
try {
- primary.adminCommand({replSetReconfig:config});
- }
- catch (e) {
- print("error: "+e);
+ primary.adminCommand({replSetReconfig: config});
+ } catch (e) {
+ print("error: " + e);
}
// ensure writing to all four nodes still works
@@ -59,6 +69,6 @@
replTest.awaitReplication();
options.writeConcern.w = 4;
assert.writeOK(primary.getDB(name).foo.insert({x: 2}, options));
-
+
replTest.stopSet();
}());
diff --git a/jstests/replsets/cloneDb.js b/jstests/replsets/cloneDb.js
index ca53d370f4a..f0eb6a2171e 100644
--- a/jstests/replsets/cloneDb.js
+++ b/jstests/replsets/cloneDb.js
@@ -4,95 +4,104 @@
(function() {
"use strict";
-if (jsTest.options().keyFile) {
- jsTest.log("Skipping test because clone command doesn't work with authentication enabled:" +
- " SERVER-4245");
-} else {
- var numDocs = 2000;
-
- // 1kb string
- var str = new Array(1000).toString();
-
- var replsetDBName = 'cloneDBreplset';
- var standaloneDBName = 'cloneDBstandalone';
- var testColName = 'foo';
-
- jsTest.log("Create replica set");
- var replTest = new ReplSetTest({name: 'testSet', nodes: 3});
- replTest.startSet();
- replTest.initiate();
- var master = replTest.getPrimary();
- var secondary = replTest.liveNodes.slaves[0];
- var masterDB = master.getDB(replsetDBName);
- masterDB.dropDatabase();
-
- jsTest.log("Create standalone server");
- var standalone = MongoRunner.runMongod();
- standalone.getDB("admin").runCommand({setParameter:1,logLevel:5});
- var standaloneDB = standalone.getDB(replsetDBName);
- standaloneDB.dropDatabase();
-
- jsTest.log("Insert data into replica set");
- var bulk = masterDB[testColName].initializeUnorderedBulkOp();
- for (var i = 0; i < numDocs; i++) {
- bulk.insert({x: i, text: str});
- }
- assert.writeOK(bulk.execute({w: 3}));
-
- jsTest.log("Clone db from replica set to standalone server");
- standaloneDB.cloneDatabase(replTest.getURL());
- assert.eq(numDocs, standaloneDB[testColName].count(),
- 'cloneDatabase from replset to standalone failed (document counts do not match)');
-
- jsTest.log("Clone db from replica set PRIMARY to standalone server");
- standaloneDB.dropDatabase();
- standaloneDB.cloneDatabase(master.host);
- assert.eq(numDocs, standaloneDB[testColName].count(),
- 'cloneDatabase from PRIMARY to standalone failed (document counts do not match)');
-
- jsTest.log("Clone db from replica set SECONDARY to standalone server (should not copy)");
- standaloneDB.dropDatabase();
- standaloneDB.cloneDatabase(secondary.host);
- assert.eq(0, standaloneDB[testColName].count(),
- 'cloneDatabase from SECONDARY to standalone copied documents without slaveOk: true');
-
- jsTest.log("Clone db from replica set SECONDARY to standalone server using slaveOk");
- standaloneDB.dropDatabase();
- standaloneDB.runCommand({clone: secondary.host, slaveOk: true});
- assert.eq(numDocs, standaloneDB[testColName].count(),
- 'cloneDatabase from SECONDARY to standalone failed (document counts do not match)');
-
- jsTest.log("Switch db and insert data into standalone server");
- masterDB = master.getDB(standaloneDBName);
- var secondaryDB = secondary.getDB(standaloneDBName);
- standaloneDB = standalone.getDB(standaloneDBName);
- masterDB.dropDatabase();
- secondaryDB.dropDatabase();
- standaloneDB.dropDatabase();
-
- bulk = standaloneDB[testColName].initializeUnorderedBulkOp();
- for (var i = 0; i < numDocs; i++) {
- bulk.insert({x: i, text: str});
+ if (jsTest.options().keyFile) {
+ jsTest.log("Skipping test because clone command doesn't work with authentication enabled:" +
+ " SERVER-4245");
+ } else {
+ var numDocs = 2000;
+
+ // 1kb string
+ var str = new Array(1000).toString();
+
+ var replsetDBName = 'cloneDBreplset';
+ var standaloneDBName = 'cloneDBstandalone';
+ var testColName = 'foo';
+
+ jsTest.log("Create replica set");
+ var replTest = new ReplSetTest({name: 'testSet', nodes: 3});
+ replTest.startSet();
+ replTest.initiate();
+ var master = replTest.getPrimary();
+ var secondary = replTest.liveNodes.slaves[0];
+ var masterDB = master.getDB(replsetDBName);
+ masterDB.dropDatabase();
+
+ jsTest.log("Create standalone server");
+ var standalone = MongoRunner.runMongod();
+ standalone.getDB("admin").runCommand({setParameter: 1, logLevel: 5});
+ var standaloneDB = standalone.getDB(replsetDBName);
+ standaloneDB.dropDatabase();
+
+ jsTest.log("Insert data into replica set");
+ var bulk = masterDB[testColName].initializeUnorderedBulkOp();
+ for (var i = 0; i < numDocs; i++) {
+ bulk.insert({x: i, text: str});
+ }
+ assert.writeOK(bulk.execute({w: 3}));
+
+ jsTest.log("Clone db from replica set to standalone server");
+ standaloneDB.cloneDatabase(replTest.getURL());
+ assert.eq(numDocs,
+ standaloneDB[testColName].count(),
+ 'cloneDatabase from replset to standalone failed (document counts do not match)');
+
+ jsTest.log("Clone db from replica set PRIMARY to standalone server");
+ standaloneDB.dropDatabase();
+ standaloneDB.cloneDatabase(master.host);
+ assert.eq(numDocs,
+ standaloneDB[testColName].count(),
+ 'cloneDatabase from PRIMARY to standalone failed (document counts do not match)');
+
+ jsTest.log("Clone db from replica set SECONDARY to standalone server (should not copy)");
+ standaloneDB.dropDatabase();
+ standaloneDB.cloneDatabase(secondary.host);
+ assert.eq(
+ 0,
+ standaloneDB[testColName].count(),
+ 'cloneDatabase from SECONDARY to standalone copied documents without slaveOk: true');
+
+ jsTest.log("Clone db from replica set SECONDARY to standalone server using slaveOk");
+ standaloneDB.dropDatabase();
+ standaloneDB.runCommand({clone: secondary.host, slaveOk: true});
+ assert.eq(
+ numDocs,
+ standaloneDB[testColName].count(),
+ 'cloneDatabase from SECONDARY to standalone failed (document counts do not match)');
+
+ jsTest.log("Switch db and insert data into standalone server");
+ masterDB = master.getDB(standaloneDBName);
+ var secondaryDB = secondary.getDB(standaloneDBName);
+ standaloneDB = standalone.getDB(standaloneDBName);
+ masterDB.dropDatabase();
+ secondaryDB.dropDatabase();
+ standaloneDB.dropDatabase();
+
+ bulk = standaloneDB[testColName].initializeUnorderedBulkOp();
+ for (var i = 0; i < numDocs; i++) {
+ bulk.insert({x: i, text: str});
+ }
+ assert.writeOK(bulk.execute());
+
+ jsTest.log("Clone db from standalone server to replica set PRIMARY");
+ masterDB.cloneDatabase(standalone.host);
+ replTest.awaitReplication();
+ assert.eq(numDocs,
+ masterDB[testColName].count(),
+ 'cloneDatabase from standalone to PRIMARY failed (document counts do not match)');
+
+ jsTest.log("Clone db from standalone server to replica set SECONDARY");
+ masterDB.dropDatabase();
+ replTest.awaitReplication();
+ secondaryDB.cloneDatabase(standalone.host);
+ assert.eq(
+ 0,
+ secondaryDB[testColName].count(),
+ 'cloneDatabase from standalone to SECONDARY succeeded and should not accept writes');
+
+ jsTest.log("Shut down replica set and standalone server");
+ MongoRunner.stopMongod(standalone.port);
+
+ replTest.stopSet();
}
- assert.writeOK(bulk.execute());
-
- jsTest.log("Clone db from standalone server to replica set PRIMARY");
- masterDB.cloneDatabase(standalone.host);
- replTest.awaitReplication();
- assert.eq(numDocs, masterDB[testColName].count(),
- 'cloneDatabase from standalone to PRIMARY failed (document counts do not match)');
-
- jsTest.log("Clone db from standalone server to replica set SECONDARY");
- masterDB.dropDatabase();
- replTest.awaitReplication();
- secondaryDB.cloneDatabase(standalone.host);
- assert.eq(0, secondaryDB[testColName].count(),
- 'cloneDatabase from standalone to SECONDARY succeeded and should not accept writes');
-
- jsTest.log("Shut down replica set and standalone server");
- MongoRunner.stopMongod(standalone.port);
-
- replTest.stopSet();
-}
})();
diff --git a/jstests/replsets/config_server_checks.js b/jstests/replsets/config_server_checks.js
index 2c6128d75e8..66d30535677 100644
--- a/jstests/replsets/config_server_checks.js
+++ b/jstests/replsets/config_server_checks.js
@@ -5,149 +5,157 @@
function expectState(rst, state) {
assert.soon(function() {
- var status = rst.status();
- if (status.myState != state) {
- print("Waiting for state " + state +
- " in replSetGetStatus output: " + tojson(status));
- }
- return status.myState == state;
- });
+ var status = rst.status();
+ if (status.myState != state) {
+ print("Waiting for state " + state + " in replSetGetStatus output: " + tojson(status));
+ }
+ return status.myState == state;
+ });
}
(function() {
-"use strict";
-
-(function() {
-// Test that node with --configsvr cmd line and configsvr in replset config goes
-// into REMOVED state if storage engine is not WiredTiger
-jsTestLog("configsvr in rs config and --configsvr cmd line, but mmapv1");
-var rst = new ReplSetTest({name: "configrs3", nodes: 1, nodeOptions: {configsvr: "",
- journal: "",
- storageEngine: "mmapv1"}});
-
-rst.startSet();
-var conf = rst.getReplSetConfig();
-conf.configsvr = true;
-try {
- rst.nodes[0].adminCommand({replSetInitiate: conf});
-} catch (e) {
- // expected since we close all connections after going into REMOVED
-}
-expectState(rst, ReplSetTest.State.REMOVED);
-rst.stopSet();
-})();
-
-(function() {
-// Test that node with --configsvr cmd line and configsvr in replset config does NOT go
-// into REMOVED state if storage engine is not WiredTiger but we're running in SCCC mode
-jsTestLog("configsvr in rs config and --configsvr cmd line, but mmapv1 with configSvrMode=sccc");
-var rst = new ReplSetTest({name: "configrs4", nodes: 1, nodeOptions: {configsvr: "",
- journal: "",
- storageEngine: "mmapv1",
- configsvrMode: "sccc"}});
-
-rst.startSet();
-var conf = rst.getReplSetConfig();
-conf.configsvr = true;
-assert.commandWorked(rst.nodes[0].adminCommand({replSetInitiate: conf}));
-
-rst.getPrimary();
-expectState(rst, ReplSetTest.State.PRIMARY);
-rst.stopSet();
-})();
-
-(function() {
-// Test that node with --configsvr cmd line and configsvr in replset config and using wiredTiger
-// does NOT go into REMOVED state.
-jsTestLog("configsvr in rs config and --configsvr cmd line, normal case");
-var rst = new ReplSetTest({name: "configrs5",
- nodes: 1,
- nodeOptions: {configsvr: "",
- journal: "",
- storageEngine: "wiredTiger"}});
-
-rst.startSet();
-var conf = rst.getReplSetConfig();
-conf.configsvr = true;
-assert.commandWorked(rst.nodes[0].adminCommand({replSetInitiate: conf}));
-
-rst.getPrimary();
-expectState(rst, ReplSetTest.State.PRIMARY);
-
-var conf = rst.getPrimary().getDB('local').system.replset.findOne();
-assert(conf.configsvr, tojson(conf));
-
-rst.stopSet();
-})();
-
-(function() {
-// Test that node with --configsvr cmd line and initiated with an empty replset config
-// will result in configsvr:true getting automatically added to the config (SERVER-20247).
-jsTestLog("--configsvr cmd line, empty config to replSetInitiate");
-var rst = new ReplSetTest({name: "configrs6",
- nodes: 1,
- nodeOptions: {configsvr: "",
- journal: "",
- storageEngine: "wiredTiger"}});
-
-rst.startSet();
-assert.commandWorked(rst.nodes[0].adminCommand({replSetInitiate: 1}));
-
-rst.getPrimary();
-expectState(rst, ReplSetTest.State.PRIMARY);
-rst.stopSet();
-})();
-
-(function() {
-// Test that a set initialized without --configsvr but then restarted with --configsvr will fail to
-// start up and won't automatically add "configsvr" to the replset config (SERVER-21236).
-jsTestLog("set initiated without configsvr, restarted adding --configsvr cmd line");
-var rst = new ReplSetTest({name: "configrs7",
- nodes: 1,
- nodeOptions: {journal: "",
- storageEngine: "wiredTiger"}});
-
-rst.startSet();
-var conf = rst.getReplSetConfig();
-assert.commandWorked(rst.nodes[0].adminCommand({replSetInitiate: conf}));
-
-rst.getPrimary();
-expectState(rst, ReplSetTest.State.PRIMARY);
-assert.throws(function() {
- rst.restart(0, {configsvr: ""});
- });
-
-rst.stopSet();
-})();
-
-(function() {
-// Test that a set initialized with --configsvr but then restarted without --configsvr will fail to
-// start up.
-jsTestLog("set initiated with configsvr, restarted without --configsvr cmd line");
-var rst = new ReplSetTest({name: "configrs8",
- nodes: 1,
- nodeOptions: {configsvr: "",
- journal: "",
- storageEngine: "wiredTiger"}});
-
-rst.startSet();
-var conf = rst.getReplSetConfig();
-conf.configsvr = true;
-assert.commandWorked(rst.nodes[0].adminCommand({replSetInitiate: conf}));
-
-rst.getPrimary();
-expectState(rst, ReplSetTest.State.PRIMARY);
-
-var node = rst.nodes[0];
-var options = node.savedOptions;
-delete options.configsvr;
-options.noCleanData = true;
-
-MongoRunner.stopMongod(node);
-var conn = MongoRunner.runMongod(options);
-assert.eq(null, conn, "Mongod should have failed to start, but didn't");
-
-rst.stopSet();
-})();
+ "use strict";
+
+ (function() {
+ // Test that node with --configsvr cmd line and configsvr in replset config goes
+ // into REMOVED state if storage engine is not WiredTiger
+ jsTestLog("configsvr in rs config and --configsvr cmd line, but mmapv1");
+ var rst = new ReplSetTest({
+ name: "configrs3",
+ nodes: 1,
+ nodeOptions: {configsvr: "", journal: "", storageEngine: "mmapv1"}
+ });
+
+ rst.startSet();
+ var conf = rst.getReplSetConfig();
+ conf.configsvr = true;
+ try {
+ rst.nodes[0].adminCommand({replSetInitiate: conf});
+ } catch (e) {
+ // expected since we close all connections after going into REMOVED
+ }
+ expectState(rst, ReplSetTest.State.REMOVED);
+ rst.stopSet();
+ })();
+
+ (function() {
+ // Test that node with --configsvr cmd line and configsvr in replset config does NOT go
+ // into REMOVED state if storage engine is not WiredTiger but we're running in SCCC mode
+ jsTestLog(
+ "configsvr in rs config and --configsvr cmd line, but mmapv1 with configSvrMode=sccc");
+ var rst = new ReplSetTest({
+ name: "configrs4",
+ nodes: 1,
+ nodeOptions:
+ {configsvr: "", journal: "", storageEngine: "mmapv1", configsvrMode: "sccc"}
+ });
+
+ rst.startSet();
+ var conf = rst.getReplSetConfig();
+ conf.configsvr = true;
+ assert.commandWorked(rst.nodes[0].adminCommand({replSetInitiate: conf}));
+
+ rst.getPrimary();
+ expectState(rst, ReplSetTest.State.PRIMARY);
+ rst.stopSet();
+ })();
+
+ (function() {
+ // Test that node with --configsvr cmd line and configsvr in replset config and using
+ // wiredTiger
+ // does NOT go into REMOVED state.
+ jsTestLog("configsvr in rs config and --configsvr cmd line, normal case");
+ var rst = new ReplSetTest({
+ name: "configrs5",
+ nodes: 1,
+ nodeOptions: {configsvr: "", journal: "", storageEngine: "wiredTiger"}
+ });
+
+ rst.startSet();
+ var conf = rst.getReplSetConfig();
+ conf.configsvr = true;
+ assert.commandWorked(rst.nodes[0].adminCommand({replSetInitiate: conf}));
+
+ rst.getPrimary();
+ expectState(rst, ReplSetTest.State.PRIMARY);
+
+ var conf = rst.getPrimary().getDB('local').system.replset.findOne();
+ assert(conf.configsvr, tojson(conf));
+
+ rst.stopSet();
+ })();
+
+ (function() {
+ // Test that node with --configsvr cmd line and initiated with an empty replset config
+ // will result in configsvr:true getting automatically added to the config (SERVER-20247).
+ jsTestLog("--configsvr cmd line, empty config to replSetInitiate");
+ var rst = new ReplSetTest({
+ name: "configrs6",
+ nodes: 1,
+ nodeOptions: {configsvr: "", journal: "", storageEngine: "wiredTiger"}
+ });
+
+ rst.startSet();
+ assert.commandWorked(rst.nodes[0].adminCommand({replSetInitiate: 1}));
+
+ rst.getPrimary();
+ expectState(rst, ReplSetTest.State.PRIMARY);
+ rst.stopSet();
+ })();
+
+ (function() {
+ // Test that a set initialized without --configsvr but then restarted with --configsvr will
+ // fail to
+ // start up and won't automatically add "configsvr" to the replset config (SERVER-21236).
+ jsTestLog("set initiated without configsvr, restarted adding --configsvr cmd line");
+ var rst = new ReplSetTest({
+ name: "configrs7",
+ nodes: 1,
+ nodeOptions: {journal: "", storageEngine: "wiredTiger"}
+ });
+
+ rst.startSet();
+ var conf = rst.getReplSetConfig();
+ assert.commandWorked(rst.nodes[0].adminCommand({replSetInitiate: conf}));
+
+ rst.getPrimary();
+ expectState(rst, ReplSetTest.State.PRIMARY);
+ assert.throws(function() {
+ rst.restart(0, {configsvr: ""});
+ });
+
+ rst.stopSet();
+ })();
+
+ (function() {
+ // Test that a set initialized with --configsvr but then restarted without --configsvr will
+ // fail to
+ // start up.
+ jsTestLog("set initiated with configsvr, restarted without --configsvr cmd line");
+ var rst = new ReplSetTest({
+ name: "configrs8",
+ nodes: 1,
+ nodeOptions: {configsvr: "", journal: "", storageEngine: "wiredTiger"}
+ });
+
+ rst.startSet();
+ var conf = rst.getReplSetConfig();
+ conf.configsvr = true;
+ assert.commandWorked(rst.nodes[0].adminCommand({replSetInitiate: conf}));
+
+ rst.getPrimary();
+ expectState(rst, ReplSetTest.State.PRIMARY);
+
+ var node = rst.nodes[0];
+ var options = node.savedOptions;
+ delete options.configsvr;
+ options.noCleanData = true;
+
+ MongoRunner.stopMongod(node);
+ var conn = MongoRunner.runMongod(options);
+ assert.eq(null, conn, "Mongod should have failed to start, but didn't");
+
+ rst.stopSet();
+ })();
})();
diff --git a/jstests/replsets/copydb.js b/jstests/replsets/copydb.js
index 59730f70084..dcbe1deefc2 100644
--- a/jstests/replsets/copydb.js
+++ b/jstests/replsets/copydb.js
@@ -27,14 +27,16 @@
assert.commandWorked(primarySourceDB.foo.ensureIndex({a: 1}),
'failed to create index in source collection on primary');
- assert.eq(1, primarySourceDB.foo.find().itcount(),
+ assert.eq(1,
+ primarySourceDB.foo.find().itcount(),
'incorrect number of documents in source collection on primary before copy');
- assert.eq(0, primaryTargetDB.foo.find().itcount(),
+ assert.eq(0,
+ primaryTargetDB.foo.find().itcount(),
'target collection on primary should be empty before copy');
- assert.commandWorked(primarySourceDB.copyDatabase(primarySourceDB.getName(),
- primaryTargetDB.getName()),
- 'failed to copy database');
+ assert.commandWorked(
+ primarySourceDB.copyDatabase(primarySourceDB.getName(), primaryTargetDB.getName()),
+ 'failed to copy database');
assert.eq(primarySourceDB.foo.find().itcount(),
primaryTargetDB.foo.find().itcount(),
diff --git a/jstests/replsets/disallow_adding_initialized_node1.js b/jstests/replsets/disallow_adding_initialized_node1.js
index fe348a81e54..8d4491975b6 100644
--- a/jstests/replsets/disallow_adding_initialized_node1.js
+++ b/jstests/replsets/disallow_adding_initialized_node1.js
@@ -3,20 +3,16 @@
// Initialize two replica sets A and B with the same name: A_0; B_0
// Add B_0 to the replica set A. This operation should fail on replica set A should fail on
// detecting an inconsistent replica set ID in the heartbeat response metadata from B_0.
-(function () {
+(function() {
'use strict';
var name = 'disallow_adding_initialized_node1';
- var replSetA = new ReplSetTest({name: name, nodes: [
- {rsConfig: {_id: 10}},
- ]});
- replSetA.startSet({dbpath : "$set-A-$node"});
+ var replSetA = new ReplSetTest({name: name, nodes: [{rsConfig: {_id: 10}}, ]});
+ replSetA.startSet({dbpath: "$set-A-$node"});
replSetA.initiate();
- var replSetB = new ReplSetTest({name: name, nodes: [
- {rsConfig: {_id: 20}},
- ]});
- replSetB.startSet({dbpath : "$set-B-$node"});
+ var replSetB = new ReplSetTest({name: name, nodes: [{rsConfig: {_id: 20}}, ]});
+ replSetB.startSet({dbpath: "$set-B-$node"});
replSetB.initiate();
var primaryA = replSetA.getPrimary();
@@ -34,12 +30,11 @@
jsTestLog("Adding replica set B's primary " + primaryB.host + " to replica set A's config");
configA.version++;
configA.members.push({_id: 11, host: primaryB.host});
- var reconfigResult = assert.commandFailedWithCode(
- primaryA.adminCommand({replSetReconfig: configA}),
- ErrorCodes.NewReplicaSetConfigurationIncompatible);
- var msgA =
- 'Our replica set ID of ' + configA.settings.replicaSetId + ' did not match that of ' +
- primaryB.host + ', which is ' + configB.settings.replicaSetId;
+ var reconfigResult =
+ assert.commandFailedWithCode(primaryA.adminCommand({replSetReconfig: configA}),
+ ErrorCodes.NewReplicaSetConfigurationIncompatible);
+ var msgA = 'Our replica set ID of ' + configA.settings.replicaSetId +
+ ' did not match that of ' + primaryB.host + ', which is ' + configB.settings.replicaSetId;
assert.neq(-1, reconfigResult.errmsg.indexOf(msgA));
var newPrimaryA = replSetA.getPrimary();
@@ -61,8 +56,7 @@
return false;
}, 'Did not see a log entry containing the following message: ' + msg, 10000, 1000);
};
- var msgB =
- "replica set IDs do not match, ours: " + configB.settings.replicaSetId +
+ var msgB = "replica set IDs do not match, ours: " + configB.settings.replicaSetId +
"; remote node's: " + configA.settings.replicaSetId;
checkLog(primaryB, msgB);
diff --git a/jstests/replsets/disallow_adding_initialized_node2.js b/jstests/replsets/disallow_adding_initialized_node2.js
index cc1cd09bf1f..c4125f7c069 100644
--- a/jstests/replsets/disallow_adding_initialized_node2.js
+++ b/jstests/replsets/disallow_adding_initialized_node2.js
@@ -8,21 +8,17 @@
// This test requires users to persist across a restart.
// @tags: [requires_persistence]
-(function () {
+(function() {
'use strict';
var name = 'disallow_adding_initialized_node2';
- var replSetA = new ReplSetTest({name: name, nodes: [
- {rsConfig: {_id: 10}},
- {rsConfig: {_id: 11, arbiterOnly: true}},
- ]});
- replSetA.startSet({dbpath : "$set-A-$node"});
+ var replSetA = new ReplSetTest(
+ {name: name, nodes: [{rsConfig: {_id: 10}}, {rsConfig: {_id: 11, arbiterOnly: true}}, ]});
+ replSetA.startSet({dbpath: "$set-A-$node"});
replSetA.initiate();
- var replSetB = new ReplSetTest({name: name, nodes: [
- {rsConfig: {_id: 20}},
- ]});
- replSetB.startSet({dbpath : "$set-B-$node"});
+ var replSetB = new ReplSetTest({name: name, nodes: [{rsConfig: {_id: 20}}, ]});
+ replSetB.startSet({dbpath: "$set-B-$node"});
replSetB.initiate();
var primaryA = replSetA.getPrimary();
@@ -46,7 +42,7 @@
assert.commandWorked(primaryA.adminCommand({replSetReconfig: configA}));
jsTestLog("Restarting B's primary " + primaryB.host);
- primaryB = replSetB.start(0, {dbpath : "$set-B-$node", restart: true});
+ primaryB = replSetB.start(0, {dbpath: "$set-B-$node", restart: true});
var newPrimaryA = replSetA.getPrimary();
var newPrimaryB = replSetB.getPrimary();
@@ -67,11 +63,9 @@
return false;
}, 'Did not see a log entry containing the following message: ' + msg, 10000, 1000);
};
- var msgA =
- "replica set IDs do not match, ours: " + configA.settings.replicaSetId +
+ var msgA = "replica set IDs do not match, ours: " + configA.settings.replicaSetId +
"; remote node's: " + configB.settings.replicaSetId;
- var msgB =
- "replica set IDs do not match, ours: " + configB.settings.replicaSetId +
+ var msgB = "replica set IDs do not match, ours: " + configB.settings.replicaSetId +
"; remote node's: " + configA.settings.replicaSetId;
checkLog(primaryA, msgA);
checkLog(primaryB, msgB);
diff --git a/jstests/replsets/drain.js b/jstests/replsets/drain.js
index 95472471f48..5d20ff6a9d6 100644
--- a/jstests/replsets/drain.js
+++ b/jstests/replsets/drain.js
@@ -9,17 +9,19 @@
// 7. Enable applying ops.
// 8. Ensure the ops in queue are applied and that the PRIMARY begins to accept writes as usual.
-(function () {
+(function() {
"use strict";
var replSet = new ReplSetTest({name: 'testSet', nodes: 3});
var nodes = replSet.nodeList();
replSet.startSet();
- replSet.initiate({"_id" : "testSet",
- "members" : [
- {"_id" : 0, "host" : nodes[0]},
- {"_id" : 1, "host" : nodes[1]},
- {"_id" : 2, "host" : nodes[2], "arbiterOnly" : true}]});
-
+ replSet.initiate({
+ "_id": "testSet",
+ "members": [
+ {"_id": 0, "host": nodes[0]},
+ {"_id": 1, "host": nodes[1]},
+ {"_id": 2, "host": nodes[2], "arbiterOnly": true}
+ ]
+ });
var primary = replSet.getPrimary();
var secondary = replSet.getSecondary();
@@ -28,18 +30,16 @@
// Do an initial insert to prevent the secondary from going into recovery
var numDocuments = 20;
var bulk = primary.getDB("foo").foo.initializeUnorderedBulkOp();
- var bigString = Array(1024*1024).toString();
- assert.writeOK(primary.getDB("foo").foo.insert({ big: bigString}));
+ var bigString = Array(1024 * 1024).toString();
+ assert.writeOK(primary.getDB("foo").foo.insert({big: bigString}));
replSet.awaitReplication();
- assert.commandWorked(
- secondary.getDB("admin").runCommand({
- configureFailPoint: 'rsSyncApplyStop',
- mode: 'alwaysOn'}),
- 'failed to enable fail point on secondary');
+ assert.commandWorked(secondary.getDB("admin").runCommand(
+ {configureFailPoint: 'rsSyncApplyStop', mode: 'alwaysOn'}),
+ 'failed to enable fail point on secondary');
var bufferCountBefore = secondary.getDB('foo').serverStatus().metrics.repl.buffer.count;
for (var i = 1; i < numDocuments; ++i) {
- bulk.insert({ big: bigString});
+ bulk.insert({big: bigString});
}
assert.writeOK(bulk.execute());
jsTestLog('Number of documents inserted into collection on primary: ' + numDocuments);
@@ -50,19 +50,19 @@
var bufferCount = serverStatus.metrics.repl.buffer.count;
var bufferCountChange = bufferCount - bufferCountBefore;
jsTestLog('Number of operations buffered on secondary since stopping applier: ' +
- bufferCountChange);
+ bufferCountChange);
return bufferCountChange >= numDocuments - 1;
}, 'secondary did not buffer operations for new inserts on primary', 30000, 1000);
// Kill primary; secondary will enter drain mode to catch up
- primary.getDB("admin").shutdownServer({force:true});
+ primary.getDB("admin").shutdownServer({force: true});
- var electionTimeout = (isPV0 ? 60 : 20 ) * 1000; // Timeout in milliseconds
+ var electionTimeout = (isPV0 ? 60 : 20) * 1000; // Timeout in milliseconds
replSet.waitForState(secondary, ReplSetTest.State.PRIMARY, electionTimeout);
// Ensure new primary is not yet writable
jsTestLog('New primary should not be writable yet');
- assert.writeError(secondary.getDB("foo").flag.insert({sentinel:2}));
+ assert.writeError(secondary.getDB("foo").flag.insert({sentinel: 2}));
assert(!secondary.getDB("admin").runCommand({"isMaster": 1}).ismaster);
// Ensure new primary is not yet readable without slaveOk bit.
@@ -70,14 +70,16 @@
jsTestLog('New primary should not be readable yet, without slaveOk bit');
var res = secondary.getDB("foo").runCommand({find: "foo"});
assert.commandFailed(res);
- assert.eq(ErrorCodes.NotMasterNoSlaveOk, res.code,
- "find failed with unexpected error code: " + tojson(res));
+ assert.eq(ErrorCodes.NotMasterNoSlaveOk,
+ res.code,
+ "find failed with unexpected error code: " + tojson(res));
// Nor should it be readable with the slaveOk bit.
secondary.slaveOk = true;
res = secondary.getDB("foo").runCommand({find: "foo"});
assert.commandFailed(res);
- assert.eq(ErrorCodes.NotMasterOrSecondary, res.code,
- "find failed with unexpected error code: " + tojson(res));
+ assert.eq(ErrorCodes.NotMasterOrSecondary,
+ res.code,
+ "find failed with unexpected error code: " + tojson(res));
secondary.slaveOk = false;
assert.commandFailedWithCode(
@@ -86,8 +88,7 @@
waitForDrainFinish: 5000,
}),
ErrorCodes.ExceededTimeLimit,
- 'replSetTest waitForDrainFinish should time out when draining is not allowed to complete'
- );
+ 'replSetTest waitForDrainFinish should time out when draining is not allowed to complete');
// Allow draining to complete
jsTestLog('Disabling fail point on new primary to allow draining to complete');
@@ -95,18 +96,17 @@
secondary.getDB("admin").runCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'off'}),
'failed to disable fail point on new primary');
primary = replSet.getPrimary();
-
+
assert.commandWorked(
secondary.adminCommand({
replSetTest: 1,
waitForDrainFinish: 5000,
}),
- 'replSetTest waitForDrainFinish should work when draining is allowed to complete'
- );
+ 'replSetTest waitForDrainFinish should work when draining is allowed to complete');
// Ensure new primary is writable
jsTestLog('New primary should be writable after draining is complete');
- assert.writeOK(primary.getDB("foo").flag.insert({sentinel:1}));
+ assert.writeOK(primary.getDB("foo").flag.insert({sentinel: 1}));
// Check for at least two entries. There was one prior to freezing op application on the
// secondary and we cannot guarantee all writes reached the secondary's op queue prior to
// shutting down the original primary.
diff --git a/jstests/replsets/drop_oplog.js b/jstests/replsets/drop_oplog.js
index 8a84bb2050e..ddac3904457 100644
--- a/jstests/replsets/drop_oplog.js
+++ b/jstests/replsets/drop_oplog.js
@@ -1,17 +1,17 @@
// Test that dropping either the replset oplog or the local database is prohibited in a replset.
-(function () {
+(function() {
"use strict";
- var rt = new ReplSetTest( { name : "drop_oplog" , nodes: 1, oplogSize: 30 } );
+ var rt = new ReplSetTest({name: "drop_oplog", nodes: 1, oplogSize: 30});
var nodes = rt.startSet();
rt.initiate();
var master = rt.getPrimary();
- var ml = master.getDB( 'local' );
+ var ml = master.getDB('local');
var threw = false;
- var ret = assert.commandFailed(ml.runCommand({ drop: 'oplog.rs' }));
+ var ret = assert.commandFailed(ml.runCommand({drop: 'oplog.rs'}));
assert.eq('can\'t drop live oplog while replicating', ret.errmsg);
var dropOutput = ml.dropDatabase();
@@ -20,13 +20,11 @@
var renameOutput = ml.oplog.rs.renameCollection("poison");
assert.eq(renameOutput.ok, 0);
- assert.eq(renameOutput.errmsg,
- "can't rename live oplog while replicating");
+ assert.eq(renameOutput.errmsg, "can't rename live oplog while replicating");
- assert.writeOK(ml.foo.insert( {a:1} ));
+ assert.writeOK(ml.foo.insert({a: 1}));
renameOutput = ml.foo.renameCollection("oplog.rs");
assert.eq(renameOutput.ok, 0);
- assert.eq(renameOutput.errmsg,
- "can't rename to live oplog while replicating");
+ assert.eq(renameOutput.errmsg, "can't rename to live oplog while replicating");
}());
diff --git a/jstests/replsets/election_id.js b/jstests/replsets/election_id.js
index 917be72d561..ff079ca1172 100644
--- a/jstests/replsets/election_id.js
+++ b/jstests/replsets/election_id.js
@@ -8,94 +8,93 @@ load("jstests/replsets/rslib.js");
// any PV0 election id. On downgrade, the election id will be updated to old PV0 format.
(function() {
-"use strict";
-
-function checkPV1ElectionId(electionId) {
- var electionIdStr = electionId.valueOf();
- assert.eq(electionIdStr.slice(0, 8), "7fffffff");
- var res = assert.commandWorked(rst.getPrimary().adminCommand({replSetGetStatus: 1}));
- var termStr = "" + res.term;
- assert.eq(electionIdStr.slice(-termStr.length), termStr);
-}
-
-var name = "election_id";
-var rst = new ReplSetTest({name: name, nodes: 3});
-
-rst.startSet();
-// Initiate the replset in protocol version 0.
-var conf = rst.getReplSetConfig();
-conf.protocolVersion = 0;
-rst.initiate(conf);
-rst.awaitSecondaryNodes();
-
-var primary = rst.getPrimary();
-var primaryColl = primary.getDB("test").coll;
-
-// Do a write, this will set up sync sources on secondaries.
-assert.writeOK(primaryColl.insert({x: 1}, {writeConcern: {w: 3}}));
-
-var res = assert.commandWorked(primary.adminCommand({serverStatus: 1}));
-var oldElectionId = res.repl.electionId;
-
-// Upgrade protocol version
-//
-conf = rst.getReplSetConfigFromNode();
-conf.protocolVersion = 1;
-conf.version++;
-reconfig(rst, conf);
-// This write will block until all nodes finish upgrade.
-assert.writeOK(primaryColl.insert({x: 2}, {writeConcern: {w: 3}}));
-
-// Check election id after upgrade
-res = assert.commandWorked(rst.getPrimary().adminCommand({serverStatus: 1}));
-var newElectionId = res.repl.electionId;
-assert.lt(oldElectionId.valueOf(), newElectionId.valueOf());
-checkPV1ElectionId(newElectionId);
-oldElectionId = newElectionId;
-
-// Step down
-assert.throws(function() {
- var res = primary.adminCommand({replSetStepDown: 60, secondaryCatchUpPeriodSecs: 30});
- // Error out if stepdown command failed to run and throw.
- printjson(res);
-});
-rst.awaitSecondaryNodes();
-res = assert.commandWorked(rst.getPrimary().adminCommand({serverStatus: 1}));
-var newElectionId = res.repl.electionId;
-
-// Compare the string of ObjectId
-assert.lt(oldElectionId.valueOf(), newElectionId.valueOf());
-checkPV1ElectionId(newElectionId);
-oldElectionId = newElectionId;
-
-
-// Downgrade protocol version
-//
-conf = rst.getReplSetConfigFromNode();
-conf.protocolVersion = 0;
-conf.version++;
-reconfig(rst, conf);
-// This write will block until all nodes finish upgrade.
-assert.writeOK(rst.getPrimary().getDB("test").coll.insert({x: 2}, {writeConcern: {w: 3}}));
-
-// Check election id after downgrade
-res = assert.commandWorked(rst.getPrimary().adminCommand({serverStatus: 1}));
-var newElectionId = res.repl.electionId;
-// new election id in PV0 is less than the old one in PV1.
-assert.gt(oldElectionId.valueOf(), newElectionId.valueOf());
-oldElectionId = newElectionId;
-
-
-// Step down
-assert.throws(function() {
- var res = rst.getPrimary().adminCommand({replSetStepDown: 60, secondaryCatchUpPeriodSecs: 30});
- // Error out if stepdown command failed to run and throw.
- printjson(res);
-});
-rst.awaitSecondaryNodes();
-res = assert.commandWorked(rst.getPrimary().adminCommand({serverStatus: 1}));
-var newElectionId = res.repl.electionId;
-assert.lt(oldElectionId.valueOf(), newElectionId.valueOf());
-oldElectionId = newElectionId;
+ "use strict";
+
+ function checkPV1ElectionId(electionId) {
+ var electionIdStr = electionId.valueOf();
+ assert.eq(electionIdStr.slice(0, 8), "7fffffff");
+ var res = assert.commandWorked(rst.getPrimary().adminCommand({replSetGetStatus: 1}));
+ var termStr = "" + res.term;
+ assert.eq(electionIdStr.slice(-termStr.length), termStr);
+ }
+
+ var name = "election_id";
+ var rst = new ReplSetTest({name: name, nodes: 3});
+
+ rst.startSet();
+ // Initiate the replset in protocol version 0.
+ var conf = rst.getReplSetConfig();
+ conf.protocolVersion = 0;
+ rst.initiate(conf);
+ rst.awaitSecondaryNodes();
+
+ var primary = rst.getPrimary();
+ var primaryColl = primary.getDB("test").coll;
+
+ // Do a write, this will set up sync sources on secondaries.
+ assert.writeOK(primaryColl.insert({x: 1}, {writeConcern: {w: 3}}));
+
+ var res = assert.commandWorked(primary.adminCommand({serverStatus: 1}));
+ var oldElectionId = res.repl.electionId;
+
+ // Upgrade protocol version
+ //
+ conf = rst.getReplSetConfigFromNode();
+ conf.protocolVersion = 1;
+ conf.version++;
+ reconfig(rst, conf);
+ // This write will block until all nodes finish upgrade.
+ assert.writeOK(primaryColl.insert({x: 2}, {writeConcern: {w: 3}}));
+
+ // Check election id after upgrade
+ res = assert.commandWorked(rst.getPrimary().adminCommand({serverStatus: 1}));
+ var newElectionId = res.repl.electionId;
+ assert.lt(oldElectionId.valueOf(), newElectionId.valueOf());
+ checkPV1ElectionId(newElectionId);
+ oldElectionId = newElectionId;
+
+ // Step down
+ assert.throws(function() {
+ var res = primary.adminCommand({replSetStepDown: 60, secondaryCatchUpPeriodSecs: 30});
+ // Error out if stepdown command failed to run and throw.
+ printjson(res);
+ });
+ rst.awaitSecondaryNodes();
+ res = assert.commandWorked(rst.getPrimary().adminCommand({serverStatus: 1}));
+ var newElectionId = res.repl.electionId;
+
+ // Compare the string of ObjectId
+ assert.lt(oldElectionId.valueOf(), newElectionId.valueOf());
+ checkPV1ElectionId(newElectionId);
+ oldElectionId = newElectionId;
+
+ // Downgrade protocol version
+ //
+ conf = rst.getReplSetConfigFromNode();
+ conf.protocolVersion = 0;
+ conf.version++;
+ reconfig(rst, conf);
+ // This write will block until all nodes finish upgrade.
+ assert.writeOK(rst.getPrimary().getDB("test").coll.insert({x: 2}, {writeConcern: {w: 3}}));
+
+ // Check election id after downgrade
+ res = assert.commandWorked(rst.getPrimary().adminCommand({serverStatus: 1}));
+ var newElectionId = res.repl.electionId;
+ // new election id in PV0 is less than the old one in PV1.
+ assert.gt(oldElectionId.valueOf(), newElectionId.valueOf());
+ oldElectionId = newElectionId;
+
+ // Step down
+ assert.throws(function() {
+ var res =
+ rst.getPrimary().adminCommand({replSetStepDown: 60, secondaryCatchUpPeriodSecs: 30});
+ // Error out if stepdown command failed to run and throw.
+ printjson(res);
+ });
+ rst.awaitSecondaryNodes();
+ res = assert.commandWorked(rst.getPrimary().adminCommand({serverStatus: 1}));
+ var newElectionId = res.repl.electionId;
+ assert.lt(oldElectionId.valueOf(), newElectionId.valueOf());
+ oldElectionId = newElectionId;
})();
diff --git a/jstests/replsets/election_not_blocked.js b/jstests/replsets/election_not_blocked.js
index ec916f72cf7..95b53be1ebc 100644
--- a/jstests/replsets/election_not_blocked.js
+++ b/jstests/replsets/election_not_blocked.js
@@ -1,5 +1,5 @@
/* Check that the fsyncLock'ed secondary will not veto an election
- *
+ *
* 1. start a three node set with a hidden, priority:0 node which we will fsyncLock
* 2. do a write to master
* 3. fsyncLock the hidden, priority:0 node
@@ -10,24 +10,26 @@
(function() {
"use strict";
var name = "electionNotBlocked";
- var replTest = new ReplSetTest({ name: name, nodes: 3 });
+ var replTest = new ReplSetTest({name: name, nodes: 3});
var host = replTest.host;
var nodes = replTest.startSet();
var port = replTest.ports;
- replTest.initiate({_id: name, members:
- [
- {_id: 0, host: host+":"+port[0], priority: 3},
- {_id: 1, host: host+":"+port[1]},
- {_id: 2, host: host+":"+port[2], hidden: true, priority: 0},
- ],
- // In PV1, a voter writes the last vote to disk before granting the vote,
- // so it cannot vote while fsync locked in PV1. Use PV0 explicitly here.
- protocolVersion: 0});
+ replTest.initiate({
+ _id: name,
+ members: [
+ {_id: 0, host: host + ":" + port[0], priority: 3},
+ {_id: 1, host: host + ":" + port[1]},
+ {_id: 2, host: host + ":" + port[2], hidden: true, priority: 0},
+ ],
+ // In PV1, a voter writes the last vote to disk before granting the vote,
+ // so it cannot vote while fsync locked in PV1. Use PV0 explicitly here.
+ protocolVersion: 0
+ });
replTest.waitForState(replTest.nodes[0], ReplSetTest.State.PRIMARY, 60 * 1000);
var master = replTest.getPrimary();
// do a write
- assert.writeOK(master.getDB("foo").bar.insert({x:1}, {writeConcern: {w: 3}}));
+ assert.writeOK(master.getDB("foo").bar.insert({x: 1}, {writeConcern: {w: 3}}));
var slave = replTest.liveNodes.slaves[0];
// lock secondary
@@ -37,7 +39,7 @@
// take down master
replTest.stop(0);
- replTest.waitForState(slave, ReplSetTest.State.PRIMARY, 90*1000);
+ replTest.waitForState(slave, ReplSetTest.State.PRIMARY, 90 * 1000);
locked.getDB("admin").fsyncUnlock();
replTest.stopSet();
diff --git a/jstests/replsets/explain_slaveok.js b/jstests/replsets/explain_slaveok.js
index 93069e6ac01..8cd715af648 100644
--- a/jstests/replsets/explain_slaveok.js
+++ b/jstests/replsets/explain_slaveok.js
@@ -38,23 +38,13 @@ assert.eq(1, secondary.getDB("test").explain_slaveok.findOne({a: 1})["a"]);
//
// Explain a count on the primary.
-var explainOut = primary.getDB("test").runCommand({
- explain: {
- count: "explain_slaveok",
- query: {a: 1}
- },
- verbosity: "executionStats"
-});
+var explainOut = primary.getDB("test").runCommand(
+ {explain: {count: "explain_slaveok", query: {a: 1}}, verbosity: "executionStats"});
assert.commandWorked(explainOut, "explain read op on primary");
// Explain an update on the primary.
explainOut = primary.getDB("test").runCommand({
- explain: {
- update: "explain_slaveok",
- updates: [
- {q: {a: 1}, u: {$set: {a: 5}}}
- ]
- },
+ explain: {update: "explain_slaveok", updates: [{q: {a: 1}, u: {$set: {a: 5}}}]},
verbosity: "executionStats"
});
assert.commandWorked(explainOut, "explain write op on primary");
@@ -78,67 +68,54 @@ assert.eq(1, secondary.getDB("test").explain_slaveok.findOne({a: 1})["a"]);
// Explain a count on the secondary with slaveOk off. Should fail because
// slaveOk is required for explains on a secondary.
secondary.getDB("test").getMongo().setSlaveOk(false);
-explainOut = secondary.getDB("test").runCommand({
- explain: {
- count: "explain_slaveok",
- query: {a: 1}
- },
- verbosity: "executionStats"
-});
+explainOut = secondary.getDB("test").runCommand(
+ {explain: {count: "explain_slaveok", query: {a: 1}}, verbosity: "executionStats"});
assert.commandFailed(explainOut, "explain read op on secondary, slaveOk false");
// Explain of count should succeed once slaveOk is true.
secondary.getDB("test").getMongo().setSlaveOk(true);
-explainOut = secondary.getDB("test").runCommand({
- explain: {
- count: "explain_slaveok",
- query: {a: 1}
- },
- verbosity: "executionStats"
-});
+explainOut = secondary.getDB("test").runCommand(
+ {explain: {count: "explain_slaveok", query: {a: 1}}, verbosity: "executionStats"});
assert.commandWorked(explainOut, "explain read op on secondary, slaveOk true");
// Explain .find() on a secondary, setting slaveOk directly on the query.
secondary.getDB("test").getMongo().setSlaveOk(false);
assert.throws(function() {
- secondary.getDB("test").explain_slaveok.explain("executionStats")
- .find({a: 1})
- .finish();
+ secondary.getDB("test").explain_slaveok.explain("executionStats").find({a: 1}).finish();
});
secondary.getDB("test").getMongo().setSlaveOk(false);
-explainOut = secondary.getDB("test").explain_slaveok.explain("executionStats")
- .find({a: 1})
- .addOption(DBQuery.Option.slaveOk)
- .finish();
+explainOut = secondary.getDB("test")
+ .explain_slaveok.explain("executionStats")
+ .find({a: 1})
+ .addOption(DBQuery.Option.slaveOk)
+ .finish();
assert.commandWorked(explainOut, "explain read op on secondary, slaveOk set to true on query");
secondary.getDB("test").getMongo().setSlaveOk(true);
-explainOut = secondary.getDB("test").explain_slaveok.explain("executionStats")
- .find({a: 1})
- .finish();
+explainOut =
+ secondary.getDB("test").explain_slaveok.explain("executionStats").find({a: 1}).finish();
assert.commandWorked(explainOut, "explain .find() on secondary, slaveOk set to true");
// Explain .find() on a secondary, setting slaveOk to false with various read preferences.
var readPrefModes = ["secondary", "secondaryPreferred", "primaryPreferred", "nearest"];
readPrefModes.forEach(function(prefString) {
secondary.getDB("test").getMongo().setSlaveOk(false);
- explainOut = secondary.getDB("test").explain_slaveok.explain("executionStats")
- .find({a: 1})
- .readPref(prefString)
- .finish();
- assert.commandWorked(explainOut, "explain .find() on secondary, '"
- + prefString
- + "' read preference on query");
+ explainOut = secondary.getDB("test")
+ .explain_slaveok.explain("executionStats")
+ .find({a: 1})
+ .readPref(prefString)
+ .finish();
+ assert.commandWorked(
+ explainOut, "explain .find() on secondary, '" + prefString + "' read preference on query");
// Similarly should succeed if a read preference is set on the connection.
secondary.setReadPref(prefString);
- explainOut = secondary.getDB("test").explain_slaveok.explain("executionStats")
- .find({a: 1})
- .finish();
- assert.commandWorked(explainOut, "explain .find() on secondary, '"
- + prefString
- + "' read preference on connection");
+ explainOut =
+ secondary.getDB("test").explain_slaveok.explain("executionStats").find({a: 1}).finish();
+ assert.commandWorked(
+ explainOut,
+ "explain .find() on secondary, '" + prefString + "' read preference on connection");
// Unset read pref on the connection.
secondary.setReadPref();
});
@@ -146,24 +123,14 @@ readPrefModes.forEach(function(prefString) {
// Fail explain find() on a secondary, setting slaveOk to false with read preference set to primary.
var prefStringPrimary = "primary";
secondary.getDB("test").getMongo().setSlaveOk(false);
-explainOut = secondary.getDB("test").runCommand({
- explain: {
- find: "explain_slaveok",
- query: {a: 1}
- },
- verbosity: "executionStats"
-});
+explainOut = secondary.getDB("test").runCommand(
+ {explain: {find: "explain_slaveok", query: {a: 1}}, verbosity: "executionStats"});
assert.commandFailed(explainOut, "not master and slaveOk=false");
// Similarly should fail if a read preference is set on the connection.
secondary.setReadPref(prefStringPrimary);
-explainOut = secondary.getDB("test").runCommand({
- explain: {
- find: "explain_slaveok",
- query: {a: 1}
- },
- verbosity: "executionStats"
-});
+explainOut = secondary.getDB("test").runCommand(
+ {explain: {find: "explain_slaveok", query: {a: 1}}, verbosity: "executionStats"});
assert.commandFailed(explainOut, "not master and slaveOk=false");
// Unset read pref on the connection.
secondary.setReadPref();
@@ -172,12 +139,7 @@ secondary.setReadPref();
// slaveOk is required for explains on a secondary.
secondary.getDB("test").getMongo().setSlaveOk(false);
explainOut = secondary.getDB("test").runCommand({
- explain: {
- update: "explain_slaveok",
- updates: [
- {q: {a: 1}, u: {$set: {a: 5}}}
- ]
- },
+ explain: {update: "explain_slaveok", updates: [{q: {a: 1}, u: {$set: {a: 5}}}]},
verbosity: "executionStats"
});
assert.commandFailed(explainOut, "explain write op on secondary, slaveOk false");
@@ -185,12 +147,7 @@ assert.commandFailed(explainOut, "explain write op on secondary, slaveOk false")
// Explain of the update should also fail with slaveOk on.
secondary.getDB("test").getMongo().setSlaveOk(true);
explainOut = secondary.getDB("test").runCommand({
- explain: {
- update: "explain_slaveok",
- updates: [
- {q: {a: 1}, u: {$set: {a: 5}}}
- ]
- },
+ explain: {update: "explain_slaveok", updates: [{q: {a: 1}, u: {$set: {a: 5}}}]},
verbosity: "executionStats"
});
assert.commandFailed(explainOut, "explain write op on secondary, slaveOk true");
diff --git a/jstests/replsets/find_and_modify_wc.js b/jstests/replsets/find_and_modify_wc.js
index 21725c0e6d8..f6cdb092697 100644
--- a/jstests/replsets/find_and_modify_wc.js
+++ b/jstests/replsets/find_and_modify_wc.js
@@ -5,8 +5,8 @@
'use strict';
var nodeCount = 3;
- var rst = new ReplSetTest({ nodes: nodeCount });
- rst.startSet({ nojournal: "" });
+ var rst = new ReplSetTest({nodes: nodeCount});
+ rst.startSet({nojournal: ""});
rst.initiate();
var primary = rst.getPrimary();
@@ -16,20 +16,19 @@
// insert some documents
var docs = [];
for (var i = 1; i <= 5; ++i) {
- docs.push({ i: i, j: 2*i });
+ docs.push({i: i, j: 2 * i});
}
- var res = coll.runCommand({ insert: coll.getName(),
- documents: docs,
- writeConcern: { w: nodeCount } });
+ var res =
+ coll.runCommand({insert: coll.getName(), documents: docs, writeConcern: {w: nodeCount}});
assert(res.ok);
assert.eq(5, coll.count());
// use for updates in subsequent runCommand calls
var reqUpdate = {
findAndModify: coll.getName(),
- query: { i: 3 },
- update: { $inc: { j: 1 } },
- writeConcern: { w: 'majority' }
+ query: {i: 3},
+ update: {$inc: {j: 1}},
+ writeConcern: {w: 'majority'}
};
// Verify findAndModify returns old document new: false
@@ -50,22 +49,15 @@
assert(!res.writeConcernError);
// Verify findAndModify remove works
- res = coll.runCommand({
- findAndModify: coll.getName(),
- sort: { i: 1 },
- remove: true,
- writeConcern: { w: nodeCount }
- });
+ res = coll.runCommand(
+ {findAndModify: coll.getName(), sort: {i: 1}, remove: true, writeConcern: {w: nodeCount}});
assert.eq(res.value.i, 1);
assert.eq(coll.count(), 4);
assert(!res.writeConcernError);
// Verify findAndModify returns writeConcernError
// when given invalid writeConcerns
- [
- { w: 'invalid' },
- { w: nodeCount + 1 }
- ].forEach(function(wc) {
+ [{w: 'invalid'}, {w: nodeCount + 1}].forEach(function(wc) {
reqUpdate.writeConcern = wc;
res = coll.runCommand(reqUpdate);
diff --git a/jstests/replsets/fsync_lock_read_secondaries.js b/jstests/replsets/fsync_lock_read_secondaries.js
index b8f8c0aa149..8b9127c8c43 100644
--- a/jstests/replsets/fsync_lock_read_secondaries.js
+++ b/jstests/replsets/fsync_lock_read_secondaries.js
@@ -1,5 +1,5 @@
/* @file : jstests/fsync_lock_read_secondaries.js
- *
+ *
* SERVER 4243 : If there is a pending write due to an fsync lock, all reads are blocked
*
* This test validates part of SERVER-4243 ticket. Allow reading on secondaries with fsyncLock
@@ -22,52 +22,53 @@
* witness as an increase in the count of documents stored on the secondary.
*/
(function() {
-"use strict";
-// Load utility methods for replica set tests
-load("jstests/replsets/rslib.js");
+ "use strict";
+ // Load utility methods for replica set tests
+ load("jstests/replsets/rslib.js");
-var replTest = new ReplSetTest({name: 'testSet', nodes: 2, oplogSize: 5});
-// Start each mongod in the replica set. Returns a list of nodes
-var nodes = replTest.startSet();
-// This will wait for initiation
-replTest.initiate();
-var master = replTest.getPrimary();
+ var replTest = new ReplSetTest({name: 'testSet', nodes: 2, oplogSize: 5});
+ // Start each mongod in the replica set. Returns a list of nodes
+ var nodes = replTest.startSet();
+ // This will wait for initiation
+ replTest.initiate();
+ var master = replTest.getPrimary();
-var ret = master.getDB("admin").fsyncLock();
-if (!ret.ok) {
- assert.commandFailedWithCode(ret, ErrorCodes.CommandNotSupported);
- jsTestLog("Storage Engine does not support fsyncLock, so bailing");
- return;
-}
-master.getDB("admin").fsyncUnlock();
+ var ret = master.getDB("admin").fsyncLock();
+ if (!ret.ok) {
+ assert.commandFailedWithCode(ret, ErrorCodes.CommandNotSupported);
+ jsTestLog("Storage Engine does not support fsyncLock, so bailing");
+ return;
+ }
+ master.getDB("admin").fsyncUnlock();
-var docNum = 100;
-for(var i=0; i<docNum; i++) {
- master.getDB("foo").bar.save({a: i});
-}
-waitForAllMembers(master.getDB("foo"));
-replTest.awaitReplication();
+ var docNum = 100;
+ for (var i = 0; i < docNum; i++) {
+ master.getDB("foo").bar.save({a: i});
+ }
+ waitForAllMembers(master.getDB("foo"));
+ replTest.awaitReplication();
-// Calling getPrimary also makes available the liveNodes structure, which looks like this:
-// liveNodes = {master: masterNode, slaves: [slave1, slave2] }
-var slaves = replTest.liveNodes.slaves;
-slaves[0].setSlaveOk();
+ // Calling getPrimary also makes available the liveNodes structure, which looks like this:
+ // liveNodes = {master: masterNode, slaves: [slave1, slave2] }
+ var slaves = replTest.liveNodes.slaves;
+ slaves[0].setSlaveOk();
-assert.commandWorked(slaves[0].getDB("admin").runCommand({fsync:1, lock: 1}));
-var docNum = 1000;
-for (var i=0; i<docNum; i++) {
- master.getDB("foo").bar.save({a: i});
-}
-// Issue a read query on the secondary while holding the fsync lock.
-// This is what we are testing. Previously this would block. After the fix
-// this should work just fine.
-var slave0count = slaves[0].getDB("foo").bar.count();
-assert.eq(slave0count, 100, "Doc count in fsync lock wrong. Expected (=100), found " + slave0count);
-assert(slaves[0].getDB("admin").fsyncUnlock().ok);
+ assert.commandWorked(slaves[0].getDB("admin").runCommand({fsync: 1, lock: 1}));
+ var docNum = 1000;
+ for (var i = 0; i < docNum; i++) {
+ master.getDB("foo").bar.save({a: i});
+ }
+ // Issue a read query on the secondary while holding the fsync lock.
+ // This is what we are testing. Previously this would block. After the fix
+ // this should work just fine.
+ var slave0count = slaves[0].getDB("foo").bar.count();
+ assert.eq(
+ slave0count, 100, "Doc count in fsync lock wrong. Expected (=100), found " + slave0count);
+ assert(slaves[0].getDB("admin").fsyncUnlock().ok);
-// The secondary should have equal or more documents than what it had before.
-assert.soon(function() {
+ // The secondary should have equal or more documents than what it had before.
+ assert.soon(function() {
return slaves[0].getDB("foo").bar.count() > 100;
}, "count of documents stored on the secondary did not increase");
-replTest.stopSet();
+ replTest.stopSet();
}());
diff --git a/jstests/replsets/get_replication_info_helper.js b/jstests/replsets/get_replication_info_helper.js
index c031fb58779..cd6ef7d8a10 100644
--- a/jstests/replsets/get_replication_info_helper.js
+++ b/jstests/replsets/get_replication_info_helper.js
@@ -1,6 +1,6 @@
// Tests the output of db.getReplicationInfo() and tests db.printSlaveReplicationInfo().
-(function () {
+(function() {
"use strict";
var name = "getReplicationInfo";
var replSet = new ReplSetTest({name: name, nodes: 3, oplogSize: 50});
@@ -10,7 +10,7 @@
var primary = replSet.getPrimary();
for (var i = 0; i < 100; i++) {
- primary.getDB('test').foo.insert({a:i});
+ primary.getDB('test').foo.insert({a: i});
}
replSet.awaitReplication();
@@ -28,8 +28,8 @@
// calling this function with and without a primary, should provide sufficient code coverage
// to catch any JS errors
- var mongo = startParallelShell("db.getSiblingDB('admin').printSlaveReplicationInfo();",
- primary.port);
+ var mongo =
+ startParallelShell("db.getSiblingDB('admin').printSlaveReplicationInfo();", primary.port);
mongo();
assert.soon(function() {
return rawMongoProgramOutput().match("behind the primary");
@@ -42,11 +42,11 @@
}
try {
primary.getDB('admin').runCommand({replSetStepDown: 120, force: true});
+ } catch (e) {
}
- catch (e) {}
- mongo = startParallelShell("db.getSiblingDB('admin').printSlaveReplicationInfo();",
- primary.port);
+ mongo =
+ startParallelShell("db.getSiblingDB('admin').printSlaveReplicationInfo();", primary.port);
mongo();
assert.soon(function() {
return rawMongoProgramOutput().match("behind the freshest");
diff --git a/jstests/replsets/get_status.js b/jstests/replsets/get_status.js
index c69764c3d9a..31a49dc1300 100644
--- a/jstests/replsets/get_status.js
+++ b/jstests/replsets/get_status.js
@@ -3,7 +3,7 @@
* functionality, so we'll just check that it succeeds and fails when it's supposed to.
*/
-(function () {
+(function() {
"use strict";
var name = "getstatus";
var numNodes = 4;
@@ -12,15 +12,15 @@
var config = replTest.getReplSetConfig();
config.members[numNodes - 1].arbiterOnly = true;
- //An invalid time to get status
+ // An invalid time to get status
var statusBeforeInitCode = 94;
assert.commandFailedWithCode(nodes[0].getDB("admin").runCommand({replSetGetStatus: 1}),
statusBeforeInitCode,
- "replSetGetStatus should fail before initializing." );
+ "replSetGetStatus should fail before initializing.");
replTest.initiate(config);
replTest.awaitSecondaryNodes();
- //A valid status
+ // A valid status
var primary = replTest.getPrimary();
assert.commandWorked(primary.getDB("admin").runCommand({replSetGetStatus: 1}));
diff --git a/jstests/replsets/groupAndMapReduce.js b/jstests/replsets/groupAndMapReduce.js
index 9fcdcbeee0e..15dea43c231 100644
--- a/jstests/replsets/groupAndMapReduce.js
+++ b/jstests/replsets/groupAndMapReduce.js
@@ -1,6 +1,6 @@
load("jstests/replsets/rslib.js");
-doTest = function( signal ) {
+doTest = function(signal) {
// Test basic replica set functionality.
// -- Replication
@@ -8,7 +8,7 @@ doTest = function( signal ) {
// Replica set testing API
// Create a new replica set test. Specify set name and the number of nodes you want.
- var replTest = new ReplSetTest( {name: 'testSet', nodes: 3} );
+ var replTest = new ReplSetTest({name: 'testSet', nodes: 3});
// call startSet() to start each mongod in the replica set
// this returns a list of nodes
@@ -34,41 +34,59 @@ doTest = function( signal ) {
replTest.awaitReplication();
slaves = replTest.liveNodes.slaves;
- assert( slaves.length == 2, "Expected 2 slaves but length was " + slaves.length );
+ assert(slaves.length == 2, "Expected 2 slaves but length was " + slaves.length);
slaves.forEach(function(slave) {
// try to read from slave
slave.slaveOk = true;
var count = slave.getDB("foo").foo.count();
- printjson( count );
- assert.eq( len , count , "slave count wrong: " + slave );
-
- print("Doing a findOne to verify we can get a row");
+ printjson(count);
+ assert.eq(len, count, "slave count wrong: " + slave);
+
+ print("Doing a findOne to verify we can get a row");
var one = slave.getDB("foo").foo.findOne();
printjson(one);
-// stats = slave.getDB("foo").adminCommand({replSetGetStatus:1});
-// printjson(stats);
-
+ // stats = slave.getDB("foo").adminCommand({replSetGetStatus:1});
+ // printjson(stats);
+
print("Calling group() with slaveOk=true, must succeed");
slave.slaveOk = true;
- count = slave.getDB("foo").foo.group({initial: {n:0}, reduce: function(obj,out){out.n++;}});
- printjson( count );
- assert.eq( len , count[0].n , "slave group count wrong: " + slave );
-
- print("Calling group() with slaveOk=false, must fail");
+ count = slave.getDB("foo").foo.group({
+ initial: {n: 0},
+ reduce: function(obj, out) {
+ out.n++;
+ }
+ });
+ printjson(count);
+ assert.eq(len, count[0].n, "slave group count wrong: " + slave);
+
+ print("Calling group() with slaveOk=false, must fail");
slave.slaveOk = false;
try {
- count = slave.getDB("foo").foo.group({initial: {n:0}, reduce: function(obj,out){out.n++;}});
+ count = slave.getDB("foo").foo.group({
+ initial: {n: 0},
+ reduce: function(obj, out) {
+ out.n++;
+ }
+ });
assert(false, "group() succeeded with slaveOk=false");
} catch (e) {
print("Received exception: " + e);
}
-
- print("Calling inline mr() with slaveOk=true, must succeed");
+
+ print("Calling inline mr() with slaveOk=true, must succeed");
slave.slaveOk = true;
- map = function() { emit(this.a, 1); };
- reduce = function(key, vals) { var sum = 0; for (var i = 0; i < vals.length; ++i) { sum += vals[i]; } return sum; };
- slave.getDB("foo").foo.mapReduce(map, reduce, {out: { "inline" : 1}});
+ map = function() {
+ emit(this.a, 1);
+ };
+ reduce = function(key, vals) {
+ var sum = 0;
+ for (var i = 0; i < vals.length; ++i) {
+ sum += vals[i];
+ }
+ return sum;
+ };
+ slave.getDB("foo").foo.mapReduce(map, reduce, {out: {"inline": 1}});
print("Calling mr() to collection with slaveOk=true, must fail");
try {
@@ -78,10 +96,10 @@ doTest = function( signal ) {
print("Received exception: " + e);
}
- print("Calling inline mr() with slaveOk=false, must fail");
+ print("Calling inline mr() with slaveOk=false, must fail");
slave.slaveOk = false;
try {
- slave.getDB("foo").foo.mapReduce(map, reduce, {out: { "inline" : 1}});
+ slave.getDB("foo").foo.mapReduce(map, reduce, {out: {"inline": 1}});
assert(false, "mapReduce() succeeded on slave with slaveOk=false");
} catch (e) {
print("Received exception: " + e);
@@ -96,11 +114,9 @@ doTest = function( signal ) {
});
-
-
// Shut down the set and finish the test.
- replTest.stopSet( signal );
+ replTest.stopSet(signal);
};
-doTest( 15 );
+doTest(15);
print("SUCCESS");
diff --git a/jstests/replsets/index_delete.js b/jstests/replsets/index_delete.js
index e43e1e9a55d..9013f8d4ab0 100644
--- a/jstests/replsets/index_delete.js
+++ b/jstests/replsets/index_delete.js
@@ -7,19 +7,19 @@
*/
/**
- * Starts a replica set with arbiter, build an index
- * drop index once secondary starts building index,
+ * Starts a replica set with arbiter, build an index
+ * drop index once secondary starts building index,
* index should not exist on secondary afterwards
*/
var checkOp = function(checkDB) {
var curOp = checkDB.currentOp(true);
- for (var i=0; i < curOp.inprog.length; i++) {
+ for (var i = 0; i < curOp.inprog.length; i++) {
try {
- if (curOp.inprog[i].query.background){
- // should throw something when string contains > 90%
+ if (curOp.inprog[i].query.background) {
+ // should throw something when string contains > 90%
printjson(curOp.inprog[i].msg);
- return true;
+ return true;
}
} catch (e) {
// catchem if you can
@@ -28,16 +28,19 @@ var checkOp = function(checkDB) {
return false;
};
// Set up replica set
-var replTest = new ReplSetTest({ name: 'fgIndex', nodes: 3 });
+var replTest = new ReplSetTest({name: 'fgIndex', nodes: 3});
var nodes = replTest.nodeList();
// We need an arbiter to ensure that the primary doesn't step down when we restart the secondary
replTest.startSet();
-replTest.initiate({"_id" : "fgIndex",
- "members" : [
- {"_id" : 0, "host" : nodes[0]},
- {"_id" : 1, "host" : nodes[1]},
- {"_id" : 2, "host" : nodes[2], "arbiterOnly" : true}]});
+replTest.initiate({
+ "_id": "fgIndex",
+ "members": [
+ {"_id": 0, "host": nodes[0]},
+ {"_id": 1, "host": nodes[1]},
+ {"_id": 2, "host": nodes[2], "arbiterOnly": true}
+ ]
+});
var master = replTest.getPrimary();
var second = replTest.getSecondary();
@@ -48,30 +51,31 @@ var size = 50000;
jsTest.log("creating test data " + size + " documents");
var bulk = masterDB.jstests_fgsec.initializeUnorderedBulkOp();
-for(var i = 0; i < size; ++i) {
- bulk.insert({ i: i });
+for (var i = 0; i < size; ++i) {
+ bulk.insert({i: i});
}
assert.writeOK(bulk.execute());
jsTest.log("Creating index");
-masterDB.jstests_fgsec.ensureIndex( {i:1} );
-assert.eq(2, masterDB.jstests_fgsec.getIndexes().length );
+masterDB.jstests_fgsec.ensureIndex({i: 1});
+assert.eq(2, masterDB.jstests_fgsec.getIndexes().length);
// Wait for the secondary to get the index entry
-assert.soon( function() {
- return 2 == secondDB.jstests_fgsec.getIndexes().length; },
- "index not created on secondary", 1000*60*10, 50 );
+assert.soon(function() {
+ return 2 == secondDB.jstests_fgsec.getIndexes().length;
+}, "index not created on secondary", 1000 * 60 * 10, 50);
jsTest.log("Index created on secondary");
-masterDB.runCommand( {dropIndexes: "jstests_fgsec", index: "i_1"} );
+masterDB.runCommand({dropIndexes: "jstests_fgsec", index: "i_1"});
jsTest.log("Waiting on replication");
replTest.awaitReplication();
-assert.soon( function() {return !checkOp(secondDB);}, "index not cancelled on secondary", 30000, 50);
+assert.soon(function() {
+ return !checkOp(secondDB);
+}, "index not cancelled on secondary", 30000, 50);
masterDB.jstests_fgsec.getIndexes().forEach(printjson);
secondDB.jstests_fgsec.getIndexes().forEach(printjson);
-assert.soon( function() {
- return 1 == secondDB.jstests_fgsec.getIndexes().length; },
- "Index not dropped on secondary", 30000, 50 );
+assert.soon(function() {
+ return 1 == secondDB.jstests_fgsec.getIndexes().length;
+}, "Index not dropped on secondary", 30000, 50);
jsTest.log("index-restart-secondary.js complete");
-
diff --git a/jstests/replsets/index_restart_secondary.js b/jstests/replsets/index_restart_secondary.js
index 7308de83271..be328ede8bc 100644
--- a/jstests/replsets/index_restart_secondary.js
+++ b/jstests/replsets/index_restart_secondary.js
@@ -1,13 +1,13 @@
/**
- * Starts a replica set with arbiter, build an index
- * restart secondary once it starts building index,
+ * Starts a replica set with arbiter, build an index
+ * restart secondary once it starts building index,
* index build restarts after secondary restarts
*/
var replTest = new ReplSetTest({
name: 'fgIndex',
nodes: 3,
- oplogSize: 100, // This test inserts enough data to wrap the default 40MB oplog.
+ oplogSize: 100, // This test inserts enough data to wrap the default 40MB oplog.
});
var nodes = replTest.nodeList();
@@ -18,11 +18,14 @@ var conns = replTest.startSet();
// don't run on 32-bit builders since they are slow and single core, which leads to heartbeats
// failing and loss of primary during the bulk write
if (conns[0].getDB('test').serverBuildInfo().bits !== 32) {
- replTest.initiate({"_id" : "fgIndex",
- "members" : [
- {"_id" : 0, "host" : nodes[0]},
- {"_id" : 1, "host" : nodes[1]},
- {"_id" : 2, "host" : nodes[2], "arbiterOnly" : true}]});
+ replTest.initiate({
+ "_id": "fgIndex",
+ "members": [
+ {"_id": 0, "host": nodes[0]},
+ {"_id": 1, "host": nodes[1]},
+ {"_id": 2, "host": nodes[2], "arbiterOnly": true}
+ ]
+ });
var master = replTest.getPrimary();
var second = replTest.getSecondary();
@@ -36,39 +39,39 @@ if (conns[0].getDB('test').serverBuildInfo().bits !== 32) {
jsTest.log("creating test data " + size + " documents");
var bulk = masterDB.jstests_fgsec.initializeUnorderedBulkOp();
- for(var i = 0; i < size; ++i) {
- bulk.insert({ i: i });
+ for (var i = 0; i < size; ++i) {
+ bulk.insert({i: i});
}
- assert.writeOK(bulk.execute( { w: "majority" } ));
+ assert.writeOK(bulk.execute({w: "majority"}));
jsTest.log("Creating index");
- masterDB.jstests_fgsec.ensureIndex( {i:1} );
+ masterDB.jstests_fgsec.ensureIndex({i: 1});
assert.eq(2, masterDB.jstests_fgsec.getIndexes().length);
// Wait for the secondary to get the index entry
- assert.soon( function() {
- return 2 == secondDB.jstests_fgsec.getIndexes().length; },
- "index not created on secondary (prior to restart)", 800000, 50 );
+ assert.soon(function() {
+ return 2 == secondDB.jstests_fgsec.getIndexes().length;
+ }, "index not created on secondary (prior to restart)", 800000, 50);
jsTest.log("Index created on secondary");
// restart secondary and reconnect
jsTest.log("Restarting secondary");
- replTest.restart(secondId, {}, /*wait=*/true);
+ replTest.restart(secondId, {}, /*wait=*/true);
// Make sure secondary comes back
- assert.soon( function() {
+ assert.soon(function() {
try {
- secondDB.isMaster(); // trigger a reconnect if needed
+ secondDB.isMaster(); // trigger a reconnect if needed
return true;
} catch (e) {
- return false;
+ return false;
}
- } , "secondary didn't restart", 30000, 1000);
+ }, "secondary didn't restart", 30000, 1000);
- assert.soon( function() {
- return 2 == secondDB.jstests_fgsec.getIndexes().length; },
- "Index build not resumed after restart", 30000, 50 );
+ assert.soon(function() {
+ return 2 == secondDB.jstests_fgsec.getIndexes().length;
+ }, "Index build not resumed after restart", 30000, 50);
jsTest.log("index-restart-secondary.js complete");
}
diff --git a/jstests/replsets/initial_sync1.js b/jstests/replsets/initial_sync1.js
index 4047180d783..3977445743e 100644
--- a/jstests/replsets/initial_sync1.js
+++ b/jstests/replsets/initial_sync1.js
@@ -20,8 +20,8 @@ print("1. Bring up set");
// SERVER-7455, this test is called from ssl/auth_x509.js
var x509_options1;
var x509_options2;
-var replTest = new ReplSetTest({name: basename,
- nodes : {node0 : x509_options1, node1 : x509_options2}});
+var replTest =
+ new ReplSetTest({name: basename, nodes: {node0: x509_options1, node1: x509_options2}});
var conns = replTest.startSet();
replTest.initiate();
@@ -37,19 +37,16 @@ var local_s1 = slave1.getDB("local");
print("2. Insert some data");
var bulk = foo.bar.initializeUnorderedBulkOp();
for (var i = 0; i < 100; i++) {
- bulk.insert({ date: new Date(), x: i, str: "all the talk on the market" });
+ bulk.insert({date: new Date(), x: i, str: "all the talk on the market"});
}
assert.writeOK(bulk.execute());
-print("total in foo: "+foo.bar.count());
-
+print("total in foo: " + foo.bar.count());
print("4. Make sure synced");
replTest.awaitReplication();
-
print("5. Freeze #2");
-admin_s1.runCommand({replSetFreeze:999999});
-
+admin_s1.runCommand({replSetFreeze: 999999});
print("6. Bring up #3");
var hostname = getHostName();
@@ -61,12 +58,11 @@ var admin_s2 = slave2.getDB("admin");
var config = replTest.getReplSetConfig();
config.version = 2;
-config.members.push({_id:2, host: slave2.host});
+config.members.push({_id: 2, host: slave2.host});
try {
- admin.runCommand({replSetReconfig:config});
-}
-catch(e) {
- print(e);
+ admin.runCommand({replSetReconfig: config});
+} catch (e) {
+ print(e);
}
reconnect(slave1);
reconnect(slave2);
@@ -78,17 +74,16 @@ wait(function() {
printjson(config2);
printjson(config3);
- return config2.version == config.version &&
- (config3 && config3.version == config.version);
- });
+ return config2.version == config.version && (config3 && config3.version == config.version);
+});
-replTest.waitForState(
- slave2, [ReplSetTest.State.SECONDARY, ReplSetTest.State.RECOVERING], 60 * 1000);
+replTest.waitForState(slave2,
+ [ReplSetTest.State.SECONDARY, ReplSetTest.State.RECOVERING],
+ 60 * 1000);
print("7. Kill the secondary in the middle of syncing");
replTest.stop(slave1);
-
print("8. Eventually the new node should become a secondary");
print("if initial sync has started, this will cause it to fail and sleep for 5 minutes");
replTest.waitForState(slave2, ReplSetTest.State.SECONDARY, 60 * 1000);
@@ -102,7 +97,7 @@ print("10. Insert some stuff");
master = replTest.getPrimary();
bulk = foo.bar.initializeUnorderedBulkOp();
for (var i = 0; i < 100; i++) {
- bulk.insert({ date: new Date(), x: i, str: "all the talk on the market" });
+ bulk.insert({date: new Date(), x: i, str: "all the talk on the market"});
}
assert.writeOK(bulk.execute());
diff --git a/jstests/replsets/initial_sync2.js b/jstests/replsets/initial_sync2.js
index 9a913aeafc5..afff58d6336 100644
--- a/jstests/replsets/initial_sync2.js
+++ b/jstests/replsets/initial_sync2.js
@@ -20,136 +20,125 @@ var basename = "jstests_initsync2";
var doTest = function() {
-print("1. Bring up set");
-var replTest = new ReplSetTest( {name: basename, nodes: 2} );
-var conns = replTest.startSet();
-replTest.initiate();
+ print("1. Bring up set");
+ var replTest = new ReplSetTest({name: basename, nodes: 2});
+ var conns = replTest.startSet();
+ replTest.initiate();
-var master = replTest.getPrimary();
-var origMaster = master;
-var foo = master.getDB("foo");
-var admin = master.getDB("admin");
+ var master = replTest.getPrimary();
+ var origMaster = master;
+ var foo = master.getDB("foo");
+ var admin = master.getDB("admin");
-var slave1 = replTest.liveNodes.slaves[0];
-var admin_s1 = slave1.getDB("admin");
-var local_s1 = slave1.getDB("local");
+ var slave1 = replTest.liveNodes.slaves[0];
+ var admin_s1 = slave1.getDB("admin");
+ var local_s1 = slave1.getDB("local");
-print("2. Insert some data");
-for (var i=0; i<10000; i++) {
- foo.bar.insert({date : new Date(), x : i, str : "all the talk on the market"});
-}
-print("total in foo: "+foo.bar.count());
+ print("2. Insert some data");
+ for (var i = 0; i < 10000; i++) {
+ foo.bar.insert({date: new Date(), x: i, str: "all the talk on the market"});
+ }
+ print("total in foo: " + foo.bar.count());
+ print("4. Make sure synced");
+ replTest.awaitReplication();
-print("4. Make sure synced");
-replTest.awaitReplication();
+ print("5. Freeze #2");
+ admin_s1.runCommand({replSetFreeze: 999999});
+ print("6. Bring up #3");
+ var hostname = getHostName();
-print("5. Freeze #2");
-admin_s1.runCommand({replSetFreeze:999999});
+ var slave2 = MongoRunner.runMongod({replSet: basename, oplogSize: 2});
+ var local_s2 = slave2.getDB("local");
+ var admin_s2 = slave2.getDB("admin");
-print("6. Bring up #3");
-var hostname = getHostName();
+ var config = replTest.getReplSetConfig();
+ config.version = 2;
-var slave2 = MongoRunner.runMongod({replSet: basename, oplogSize: 2});
+ // Add #3 using rs.add() configuration document.
+ // Since 'db' currently points to slave2, reset 'db' to admin db on master before running
+ // rs.add().
+ db = admin;
-var local_s2 = slave2.getDB("local");
-var admin_s2 = slave2.getDB("admin");
+ // If _id is not provided, rs.add() will generate _id for #3 based on existing members' _ids.
+ assert.commandWorked(rs.add({host: hostname + ":" + slave2.port}),
+ "failed to add #3 to replica set");
-var config = replTest.getReplSetConfig();
-config.version = 2;
+ reconnect(slave1);
+ reconnect(slave2);
-// Add #3 using rs.add() configuration document.
-// Since 'db' currently points to slave2, reset 'db' to admin db on master before running rs.add().
-db = admin;
+ wait(function() {
+ var config2 = local_s1.system.replset.findOne();
+ var config3 = local_s2.system.replset.findOne();
-// If _id is not provided, rs.add() will generate _id for #3 based on existing members' _ids.
-assert.commandWorked(rs.add({host:hostname+":"+slave2.port}), "failed to add #3 to replica set");
-
-reconnect(slave1);
-reconnect(slave2);
-
-wait(function() {
- var config2 = local_s1.system.replset.findOne();
- var config3 = local_s2.system.replset.findOne();
-
- printjson(config2);
- printjson(config3);
-
- return config2.version == config.version &&
- (config3 && config3.version == config.version);
- });
-admin_s2.runCommand({replSetFreeze:999999});
-
-
-wait(function() {
- var status = admin_s2.runCommand({replSetGetStatus:1});
- printjson(status);
- return status.members &&
- (status.members[2].state == 3 || status.members[2].state == 2);
- });
+ printjson(config2);
+ printjson(config3);
+ return config2.version == config.version && (config3 && config3.version == config.version);
+ });
+ admin_s2.runCommand({replSetFreeze: 999999});
-print("7. Kill #1 in the middle of syncing");
-replTest.stop(0);
+ wait(function() {
+ var status = admin_s2.runCommand({replSetGetStatus: 1});
+ printjson(status);
+ return status.members && (status.members[2].state == 3 || status.members[2].state == 2);
+ });
+ print("7. Kill #1 in the middle of syncing");
+ replTest.stop(0);
-print("8. Check that #3 makes it into secondary state");
-wait(function() {
- var status = admin_s2.runCommand({replSetGetStatus:1});
- occasionally(function() { printjson(status);}, 10);
+ print("8. Check that #3 makes it into secondary state");
+ wait(function() {
+ var status = admin_s2.runCommand({replSetGetStatus: 1});
+ occasionally(function() {
+ printjson(status);
+ }, 10);
if (status.members[2].state == 2 || status.members[2].state == 1) {
return true;
}
return false;
});
+ print("9. Bring #1 back up");
+ replTest.start(0, {}, true);
+ reconnect(master);
+ wait(function() {
+ var status = admin.runCommand({replSetGetStatus: 1});
+ printjson(status);
+ return status.members && (status.members[0].state == 1 || status.members[0].state == 2);
+ });
+
+ print("10. Initial sync should succeed");
+ wait(function() {
+ var status = admin_s2.runCommand({replSetGetStatus: 1});
+ printjson(status);
+ return status.members && status.members[2].state == 2 || status.members[2].state == 1;
+ });
+
+ print("11. Insert some stuff");
+ // ReplSetTest doesn't find master correctly unless all nodes are defined by
+ // ReplSetTest
+ for (var i = 0; i < 30; i++) {
+ var result = admin.runCommand({isMaster: 1});
+ if (result.ismaster) {
+ break;
+ } else if (result.primary) {
+ master = connect(result.primary + "/admin").getMongo();
+ break;
+ }
+ sleep(1000);
+ }
+
+ for (var i = 0; i < 10000; i++) {
+ foo.bar.insert({date: new Date(), x: i, str: "all the talk on the market"});
+ }
+
+ print("12. Everyone happy eventually");
+ replTest.awaitReplication(2 * 60 * 1000);
-print("9. Bring #1 back up");
-replTest.start(0, {}, true);
-reconnect(master);
-wait(function() {
- var status = admin.runCommand({replSetGetStatus:1});
- printjson(status);
- return status.members &&
- (status.members[0].state == 1 || status.members[0].state == 2);
- });
-
-
-print("10. Initial sync should succeed");
-wait(function() {
- var status = admin_s2.runCommand({replSetGetStatus:1});
- printjson(status);
- return status.members &&
- status.members[2].state == 2 || status.members[2].state == 1;
- });
-
-
-print("11. Insert some stuff");
-// ReplSetTest doesn't find master correctly unless all nodes are defined by
-// ReplSetTest
-for (var i = 0; i<30; i++) {
- var result = admin.runCommand({isMaster : 1});
- if (result.ismaster) {
- break;
- }
- else if (result.primary) {
- master = connect(result.primary+"/admin").getMongo();
- break;
- }
- sleep(1000);
-}
-
-for (var i=0; i<10000; i++) {
- foo.bar.insert({date : new Date(), x : i, str : "all the talk on the market"});
-}
-
-
-print("12. Everyone happy eventually");
-replTest.awaitReplication(2 * 60 * 1000);
-
-replTest.stopSet();
+ replTest.stopSet();
};
doTest();
diff --git a/jstests/replsets/initial_sync3.js b/jstests/replsets/initial_sync3.js
index 4456cfbd498..5dfbf60c455 100644
--- a/jstests/replsets/initial_sync3.js
+++ b/jstests/replsets/initial_sync3.js
@@ -10,13 +10,12 @@
* @tags: [requires_persistence]
*/
-
load("jstests/replsets/rslib.js");
var name = "initialsync3";
var host = getHostName();
print("Start set with three nodes");
-var replTest = new ReplSetTest( {name: name, nodes: 3} );
+var replTest = new ReplSetTest({name: name, nodes: 3});
var nodes = replTest.startSet();
replTest.initiate({
_id: name,
@@ -30,7 +29,7 @@ replTest.initiate({
var master = replTest.getPrimary();
print("Initial sync");
-master.getDB("foo").bar.baz.insert({x:1});
+master.getDB("foo").bar.baz.insert({x: 1});
replTest.awaitReplication();
replTest.stop(0);
@@ -41,7 +40,7 @@ replTest.start(1);
print("make sure 1 does not become a secondary (because it cannot clone from 2)");
sleep(10000);
-var result = nodes[1].getDB("admin").runCommand({isMaster : 1});
+var result = nodes[1].getDB("admin").runCommand({isMaster: 1});
assert(!result.ismaster, tojson(result));
assert(!result.secondary, tojson(result));
@@ -52,7 +51,7 @@ master = replTest.getPrimary();
print("now 1 should be able to initial sync");
assert.soon(function() {
- var result = nodes[1].getDB("admin").runCommand({isMaster : 1});
+ var result = nodes[1].getDB("admin").runCommand({isMaster: 1});
printjson(result);
return result.secondary;
});
diff --git a/jstests/replsets/initial_sync4.js b/jstests/replsets/initial_sync4.js
index c25bc94c4ed..c2fcede9969 100644
--- a/jstests/replsets/initial_sync4.js
+++ b/jstests/replsets/initial_sync4.js
@@ -4,7 +4,7 @@ load("jstests/replsets/rslib.js");
basename = "jstests_initsync4";
print("1. Bring up set");
-replTest = new ReplSetTest( {name: basename, nodes: 1} );
+replTest = new ReplSetTest({name: basename, nodes: 1});
replTest.startSet();
replTest.initiate();
@@ -14,10 +14,10 @@ mc = m.getDB("d")["c"];
print("2. Insert some data");
N = 5000;
-mc.ensureIndex({x:1});
+mc.ensureIndex({x: 1});
var bulk = mc.initializeUnorderedBulkOp();
-for( i = 0; i < N; ++i ) {
- bulk.insert({ _id: i, x: i, a: {} });
+for (i = 0; i < N; ++i) {
+ bulk.insert({_id: i, x: i, a: {}});
}
assert.writeOK(bulk.execute());
@@ -31,11 +31,10 @@ s = MongoRunner.runMongod({replSet: basename, oplogSize: 2});
var config = replTest.getReplSetConfig();
config.version = 2;
-config.members.push({_id:2, host:hostname+":"+s.port});
+config.members.push({_id: 2, host: hostname + ":" + s.port});
try {
- m.getDB("admin").runCommand({replSetReconfig:config});
-}
-catch(e) {
+ m.getDB("admin").runCommand({replSetReconfig: config});
+} catch (e) {
print(e);
}
reconnect(s);
@@ -45,39 +44,38 @@ print("5. Wait for new node to start cloning");
s.setSlaveOk();
sc = s.getDB("d")["c"];
-wait( function() { printjson( sc.stats() ); return sc.stats().count > 0; } );
+wait(function() {
+ printjson(sc.stats());
+ return sc.stats().count > 0;
+});
print("6. Start updating documents on primary");
-for( i = N-1; i >= N-10000; --i ) {
+for (i = N - 1; i >= N - 10000; --i) {
// If the document is cloned as {a:1}, the {$set:{'a.b':1}} modifier will uassert.
- mc.update( {_id:i}, {$set:{'a.b':1}} );
- mc.update( {_id:i}, {$set:{a:1}} );
+ mc.update({_id: i}, {$set: {'a.b': 1}});
+ mc.update({_id: i}, {$set: {a: 1}});
}
-for ( i = N; i < N*2; i++ ) {
- mc.insert( { _id : i, x : i } );
+for (i = N; i < N * 2; i++) {
+ mc.insert({_id: i, x: i});
}
-assert.eq( N*2, mc.count() );
+assert.eq(N * 2, mc.count());
print("7. Wait for new node to become SECONDARY");
wait(function() {
- var status = s.getDB("admin").runCommand({replSetGetStatus:1});
- printjson(status);
- return status.members &&
- (status.members[1].state == 2);
- });
+ var status = s.getDB("admin").runCommand({replSetGetStatus: 1});
+ printjson(status);
+ return status.members && (status.members[1].state == 2);
+});
print("8. Wait for new node to have all the data");
wait(function() {
return sc.count() == mc.count();
-} );
-
+});
-assert.eq( mc.getIndexKeys().length,
- sc.getIndexKeys().length );
+assert.eq(mc.getIndexKeys().length, sc.getIndexKeys().length);
-assert.eq( mc.find().sort( { x : 1 } ).itcount(),
- sc.find().sort( { x : 1 } ).itcount() );
+assert.eq(mc.find().sort({x: 1}).itcount(), sc.find().sort({x: 1}).itcount());
-replTest.stopSet( 15 );
+replTest.stopSet(15);
diff --git a/jstests/replsets/initial_sync_unsupported_auth_schema.js b/jstests/replsets/initial_sync_unsupported_auth_schema.js
index 28c4d1e8826..e27d25aaac0 100644
--- a/jstests/replsets/initial_sync_unsupported_auth_schema.js
+++ b/jstests/replsets/initial_sync_unsupported_auth_schema.js
@@ -4,8 +4,7 @@
function checkedReInitiate(rst) {
try {
rst.reInitiate();
- }
- catch (e) {
+ } catch (e) {
// reInitiate can throw because it tries to run an ismaster command on
// all secondaries, including the new one that may have already aborted
var errMsg = tojson(e);
@@ -49,10 +48,12 @@ function testInitialSyncAbortsWithUnsupportedAuthSchema(schema) {
var assertFn = function() {
return rawMongoProgramOutput().match(msg);
};
- assert.soon(assertFn, 'Initial sync should have aborted due to an invalid or unsupported' +
- ' authSchema version: ' + tojson(schema), 60000);
+ assert.soon(assertFn,
+ 'Initial sync should have aborted due to an invalid or unsupported' +
+ ' authSchema version: ' + tojson(schema),
+ 60000);
- rst.stopSet(undefined, undefined, { allowedExitCodes: [ MongoRunner.EXIT_ABRUPT ] });
+ rst.stopSet(undefined, undefined, {allowedExitCodes: [MongoRunner.EXIT_ABRUPT]});
}
function testInitialSyncAbortsWithExistingUserAndNoAuthSchema() {
@@ -81,10 +82,12 @@ function testInitialSyncAbortsWithExistingUserAndNoAuthSchema() {
return rawMongoProgramOutput().match(msg);
};
- assert.soon(assertFn, 'Initial sync should have aborted due to an existing user document and' +
- ' a missing auth schema', 60000);
+ assert.soon(assertFn,
+ 'Initial sync should have aborted due to an existing user document and' +
+ ' a missing auth schema',
+ 60000);
- rst.stopSet(undefined, undefined, { allowedExitCodes: [ MongoRunner.EXIT_ABRUPT ] });
+ rst.stopSet(undefined, undefined, {allowedExitCodes: [MongoRunner.EXIT_ABRUPT]});
}
testInitialSyncAbortsWithUnsupportedAuthSchema({_id: 'authSchema'});
diff --git a/jstests/replsets/initiate.js b/jstests/replsets/initiate.js
index 41d53202f80..0afa0c85bcd 100644
--- a/jstests/replsets/initiate.js
+++ b/jstests/replsets/initiate.js
@@ -2,20 +2,19 @@
* Sanity check that initializing will fail with bad input. There are C++ unit tests for most bad
* configs, so this is just seeing if it fails when it's supposed to.
*/
-(function () {
+(function() {
"use strict";
- var replTest = new ReplSetTest({name : 'testSet2', nodes : 1});
+ var replTest = new ReplSetTest({name: 'testSet2', nodes: 1});
var nodes = replTest.startSet();
assert.soon(function() {
try {
var result = nodes[0].getDB("admin").runCommand(
- {replSetInitiate: {_id: "testSet2", members: [{_id : 0, tags : ["member0"]}]}});
+ {replSetInitiate: {_id: "testSet2", members: [{_id: 0, tags: ["member0"]}]}});
printjson(result);
return (result.errmsg.match(/bad or missing host field/) ||
result.errmsg.match(/Missing expected field \"host\"/));
- }
- catch (e) {
+ } catch (e) {
print(e);
}
return false;
diff --git a/jstests/replsets/initiate_prohibits_w0.js b/jstests/replsets/initiate_prohibits_w0.js
index 9bd5d4a599d..e0d100e7251 100644
--- a/jstests/replsets/initiate_prohibits_w0.js
+++ b/jstests/replsets/initiate_prohibits_w0.js
@@ -23,13 +23,11 @@ function testInitiate(gleDefaults) {
/*
* Try to initiate with w: 0 in getLastErrorDefaults.
*/
-testInitiate({
- getLastErrorDefaults: {w: 0}});
+testInitiate({getLastErrorDefaults: {w: 0}});
/*
* Try to initiate with w: 0 and other options in getLastErrorDefaults.
*/
-testInitiate({
- getLastErrorDefaults: {w: 0, j: false, wtimeout: 100, fsync: true}});
+testInitiate({getLastErrorDefaults: {w: 0, j: false, wtimeout: 100, fsync: true}});
replTest.stopSet();
diff --git a/jstests/replsets/initiate_without_replset_name_at_startup.js b/jstests/replsets/initiate_without_replset_name_at_startup.js
index 4deac03ca0b..cccb7137ba6 100644
--- a/jstests/replsets/initiate_without_replset_name_at_startup.js
+++ b/jstests/replsets/initiate_without_replset_name_at_startup.js
@@ -10,33 +10,28 @@
* will not persist across a restart and they will not transition to PRIMARY as described above.
* @tags: [requires_persistence]
*/
-(function () {
+(function() {
"use strict";
var baseName = 'testInitiateWithoutReplSetNameAtStartup';
var port = allocatePorts(1)[0];
var dbpath = MongoRunner.dataPath + baseName + '/';
- var mongod = MongoRunner.runMongod({
- dbpath: dbpath,
- port: port});
+ var mongod = MongoRunner.runMongod({dbpath: dbpath, port: port});
var config = {
_id: baseName,
version: 1,
- members: [
- {_id: 0, host: mongod.name},
- ],
+ members: [{_id: 0, host: mongod.name}, ],
};
var result = assert.commandFailedWithCode(
mongod.getDB('admin').runCommand({replSetInitiate: config}),
ErrorCodes.NoReplicationEnabled,
'replSetInitiate should fail when both --configsvr and --replSet are missing.');
- assert(
- result.errmsg.match(/This node was not started with the replSet option/),
- 'unexpected error message when both --configsvr and --replSet are missing. ' +
- 'configuration: ' + tojson(result));
+ assert(result.errmsg.match(/This node was not started with the replSet option/),
+ 'unexpected error message when both --configsvr and --replSet are missing. ' +
+ 'configuration: ' + tojson(result));
// The rest of this test can only be run if the storageEngine supports committed reads.
var supportsCommittedReads =
@@ -49,96 +44,88 @@
return;
}
- mongod = MongoRunner.runMongod({
- configsvr: '',
- dbpath: dbpath,
- port: port,
- restart: true});
+ mongod = MongoRunner.runMongod({configsvr: '', dbpath: dbpath, port: port, restart: true});
- assert.commandWorked(
- mongod.getDB('admin').runCommand({replSetInitiate: config}),
- 'replSetInitiate should not fail when given a valid configuration');
+ assert.commandWorked(mongod.getDB('admin').runCommand({replSetInitiate: config}),
+ 'replSetInitiate should not fail when given a valid configuration');
// Check saved config
var systemReplsetCollection = mongod.getDB('local').system.replset;
- assert.eq(1, systemReplsetCollection.count(),
- 'replSetInitiate did not save configuration in ' +
- systemReplsetCollection.getFullName());
+ assert.eq(
+ 1,
+ systemReplsetCollection.count(),
+ 'replSetInitiate did not save configuration in ' + systemReplsetCollection.getFullName());
var savedConfig = systemReplsetCollection.findOne();
- assert.eq(config._id, savedConfig._id,
+ assert.eq(config._id,
+ savedConfig._id,
'config passed to replSetInitiate (left side) does not match config saved in ' +
- systemReplsetCollection.getFullName() + ' (right side)');
+ systemReplsetCollection.getFullName() + ' (right side)');
result = assert.commandFailedWithCode(
- mongod.getDB('admin').runCommand({replSetInitiate: {
- _id: baseName + '-2',
- version: 1,
- members: [
- {_id: 0, host: mongod.name},
- ],
- }}),
+ mongod.getDB('admin').runCommand({
+ replSetInitiate: {
+ _id: baseName + '-2',
+ version: 1,
+ members: [{_id: 0, host: mongod.name}, ],
+ }
+ }),
ErrorCodes.AlreadyInitialized,
'expected AlreadyInitialized error code when configuration already exists in ' +
- systemReplsetCollection.getFullName());
- assert(result.errmsg.match(/already initialized/),
- 'unexpected error message when replica set configuration already exists ' +
- tojson(result));
+ systemReplsetCollection.getFullName());
+ assert(
+ result.errmsg.match(/already initialized/),
+ 'unexpected error message when replica set configuration already exists ' + tojson(result));
systemReplsetCollection = mongod.getDB('local').system.replset;
savedConfig = systemReplsetCollection.findOne();
- assert.eq(config._id, savedConfig._id,
+ assert.eq(config._id,
+ savedConfig._id,
'config passed to replSetInitiate (left side) does not match config saved in ' +
- systemReplsetCollection.getFullName() + ' (right side)');
+ systemReplsetCollection.getFullName() + ' (right side)');
var oplogCollection = mongod.getDB('local').oplog.rs;
assert(oplogCollection.exists(),
'oplog collection ' + oplogCollection.getFullName() +
- ' not created after successful replSetInitiate. Collections in local database: ' +
- mongod.getDB('local').getCollectionNames().join(', '));
+ ' not created after successful replSetInitiate. Collections in local database: ' +
+ mongod.getDB('local').getCollectionNames().join(', '));
assert(oplogCollection.isCapped(),
'oplog collection ' + oplogCollection.getFullName() + ' must be capped');
- assert.eq(1, oplogCollection.count(),
+ assert.eq(1,
+ oplogCollection.count(),
'oplog collection ' + oplogCollection.getFullName() +
- ' is not initialized with first entry.');
+ ' is not initialized with first entry.');
var oplogEntry = oplogCollection.findOne();
assert.eq('n', oplogEntry.op, 'unexpected first oplog entry type: ' + tojson(oplogEntry));
MongoRunner.stopMongod(port);
// Restart server and attempt to save a different config.
- mongod = MongoRunner.runMongod({
- configsvr: '',
- dbpath: dbpath,
- port: port,
- restart: true});
+ mongod = MongoRunner.runMongod({configsvr: '', dbpath: dbpath, port: port, restart: true});
result = assert.commandFailedWithCode(
- mongod.getDB('admin').runCommand({replSetInitiate: {
- _id: baseName + '-2',
- version: 1,
- members: [
- {_id: 0, host: mongod.name},
- ],
- }}),
+ mongod.getDB('admin').runCommand({
+ replSetInitiate: {
+ _id: baseName + '-2',
+ version: 1,
+ members: [{_id: 0, host: mongod.name}, ],
+ }
+ }),
ErrorCodes.AlreadyInitialized,
'expected AlreadyInitialized error code when configuration already exists in ' +
- systemReplsetCollection.getFullName() + ' after restarting');
+ systemReplsetCollection.getFullName() + ' after restarting');
assert(result.errmsg.match(/already initialized/),
'unexpected error message when replica set configuration already exists ' +
- '(after restarting without --replSet): ' + tojson(result));
+ '(after restarting without --replSet): ' + tojson(result));
systemReplsetCollection = mongod.getDB('local').system.replset;
savedConfig = systemReplsetCollection.findOne();
- assert.eq(config._id, savedConfig._id,
+ assert.eq(config._id,
+ savedConfig._id,
'config passed to replSetInitiate (left side) does not match config saved in ' +
- systemReplsetCollection.getFullName() + ' (right side)');
+ systemReplsetCollection.getFullName() + ' (right side)');
MongoRunner.stopMongod(port);
// Restart server with --replSet and check own replica member state.
- mongod = MongoRunner.runMongod({
- configsvr: '',
- dbpath: dbpath,
- port: port,
- replSet: config._id,
- restart: true});
+ mongod = MongoRunner.runMongod(
+ {configsvr: '', dbpath: dbpath, port: port, replSet: config._id, restart: true});
// Wait for member state to become PRIMARY.
assert.soon(
@@ -146,8 +133,8 @@
result = assert.commandWorked(
mongod.getDB('admin').runCommand({replSetGetStatus: 1}),
'failed to get replica set status after restarting server with --replSet option');
- assert.eq(1, result.members.length,
- 'replica set status should contain exactly 1 member');
+ assert.eq(
+ 1, result.members.length, 'replica set status should contain exactly 1 member');
var member = result.members[0];
print('Current replica member state = ' + member.state + ' (' + member.stateStr + ')');
return member.state == ReplSetTest.State.PRIMARY;
@@ -158,27 +145,25 @@
// Write/read a single document to ensure basic functionality.
var t = mongod.getDB('config').getCollection(baseName);
- var doc = {_id: 0};
- assert.soon(
- function() {
- result = t.save(doc);
- assert(result instanceof WriteResult);
- if (result.hasWriteError()) {
- print('Failed with write error saving document after transitioning to primary: ' +
- tojson(result) + '. Retrying...');
- return false;
- }
- if (result.hasWriteConcernError()) {
- print('Failed with write concern error saving document after transitioning to ' +
- 'primary: ' + tojson(result) + '. Retrying...');
- return false;
- }
- print('Successfully saved document after transitioning to primary: ' + tojson(result));
- return true;
- },
- 'failed to save document after transitioning to primary',
- 5000,
- 1000);
+ var doc = {
+ _id: 0
+ };
+ assert.soon(function() {
+ result = t.save(doc);
+ assert(result instanceof WriteResult);
+ if (result.hasWriteError()) {
+ print('Failed with write error saving document after transitioning to primary: ' +
+ tojson(result) + '. Retrying...');
+ return false;
+ }
+ if (result.hasWriteConcernError()) {
+ print('Failed with write concern error saving document after transitioning to ' +
+ 'primary: ' + tojson(result) + '. Retrying...');
+ return false;
+ }
+ print('Successfully saved document after transitioning to primary: ' + tojson(result));
+ return true;
+ }, 'failed to save document after transitioning to primary', 5000, 1000);
assert.eq(1, t.count(), 'incorrect collection size after successful write');
assert.eq(doc, t.findOne());
diff --git a/jstests/replsets/ismaster1.js b/jstests/replsets/ismaster1.js
index 8acefc875d5..2d469f385a2 100644
--- a/jstests/replsets/ismaster1.js
+++ b/jstests/replsets/ismaster1.js
@@ -6,7 +6,7 @@
load("jstests/replsets/rslib.js");
// function create the error message if an assert fails
-var generateErrorString = function (badFields, missingFields, badValues, result) {
+var generateErrorString = function(badFields, missingFields, badValues, result) {
var str = "\nThe result was:\n" + tojson(result);
if (badFields.length !== 0) {
str += "\nIt had the following fields which it shouldn't have: ";
@@ -17,28 +17,28 @@ var generateErrorString = function (badFields, missingFields, badValues, result)
str += missingFields;
}
if (badValues.length !== 0) {
- for (i = 0; i < badValues.length; i+=3) {
- str += "\nIts value for " + badValues[i] + " is " + badValues[i+1];
- str += " but should be " + badValues[i+2];
+ for (i = 0; i < badValues.length; i += 3) {
+ str += "\nIts value for " + badValues[i] + " is " + badValues[i + 1];
+ str += " but should be " + badValues[i + 2];
}
}
return str;
};
// function to check a single result
-var checkMember = function (memberInfo) {
+var checkMember = function(memberInfo) {
// run isMaster on the connection
- result = memberInfo.conn.getDB("admin").runCommand({isMaster:1});
+ result = memberInfo.conn.getDB("admin").runCommand({isMaster: 1});
// make sure result doesn't contain anything it shouldn't
var badFields = [];
for (field in result) {
- if (!result.hasOwnProperty(field)){
- continue;
- }
- if (Array.contains(memberInfo.unwantedFields, field)) {
- badFields.push(field);
- }
+ if (!result.hasOwnProperty(field)) {
+ continue;
+ }
+ if (Array.contains(memberInfo.unwantedFields, field)) {
+ badFields.push(field);
+ }
}
// make sure result contains the fields we want
@@ -52,7 +52,7 @@ var checkMember = function (memberInfo) {
}
// make sure the result has proper values for fields with known values
- var badValues = []; // each mistake will be saved as three entries (key, badvalue, goodvalue)
+ var badValues = []; // each mistake will be saved as three entries (key, badvalue, goodvalue)
for (field in memberInfo.goodValues) {
if (typeof(memberInfo.goodValues[field]) === "object") {
// assumes nested obj is disk in tags this is currently true, but may change
@@ -61,8 +61,7 @@ var checkMember = function (memberInfo) {
badValues.push(result[field].disk);
badValues.push(memberInfo.goodValues[field].disk);
}
- }
- else {
+ } else {
if (result[field] !== memberInfo.goodValues[field]) {
badValues.push(field);
badValues.push(result[field]);
@@ -71,8 +70,8 @@ var checkMember = function (memberInfo) {
}
}
assert(badFields.length === 0 && missingFields.length === 0 && badValues.length === 0,
- memberInfo.name + " had the following problems."
- + generateErrorString(badFields, missingFields, badValues, result));
+ memberInfo.name + " had the following problems." +
+ generateErrorString(badFields, missingFields, badValues, result));
};
// start of test code
@@ -89,192 +88,201 @@ config.members[2].buildIndexes = false;
config.members[3].arbiterOnly = true;
replTest.initiate(config);
-var agreeOnPrimaryAndSetVersion = function( setVersion ) {
-
- print( "Waiting for primary and replica set version " + setVersion );
-
+var agreeOnPrimaryAndSetVersion = function(setVersion) {
+
+ print("Waiting for primary and replica set version " + setVersion);
+
var nodes = replTest.nodes;
var primary = undefined;
- var lastSetVersion = setVersion;
- for ( var i = 0; i < nodes.length; i++ ) {
+ var lastSetVersion = setVersion;
+ for (var i = 0; i < nodes.length; i++) {
try {
- var isMasterResult = nodes[i].getDB( "admin" ).runCommand({ isMaster : 1 });
- }
- catch (e) {
+ var isMasterResult = nodes[i].getDB("admin").runCommand({isMaster: 1});
+ } catch (e) {
// handle reconnect errors due to step downs
print("Error while calling isMaster on " + nodes[i] + ": " + e);
return false;
}
- printjson( isMasterResult );
- if ( !primary ) primary = isMasterResult.primary;
- if ( !lastSetVersion ) lastSetVersion = isMasterResult.setVersion;
- if ( isMasterResult.primary != primary || !primary ) return false;
- if ( isMasterResult.setVersion != lastSetVersion ) return false;
+ printjson(isMasterResult);
+ if (!primary)
+ primary = isMasterResult.primary;
+ if (!lastSetVersion)
+ lastSetVersion = isMasterResult.setVersion;
+ if (isMasterResult.primary != primary || !primary)
+ return false;
+ if (isMasterResult.setVersion != lastSetVersion)
+ return false;
}
-
+
return true;
};
var master = replTest.getPrimary();
-assert.soon( function() { return agreeOnPrimaryAndSetVersion( 1 ); },
- "Nodes did not initiate in less than a minute", 60000 );
+assert.soon(function() {
+ return agreeOnPrimaryAndSetVersion(1);
+}, "Nodes did not initiate in less than a minute", 60000);
// check to see if the information from isMaster() is correct at each node
// the checker only checks that the field exists when its value is "has"
-checkMember({ conn: master,
- name: "master",
- goodValues: {
- setName: "ismaster",
- setVersion: 1,
- ismaster: true,
- secondary: false,
- ok: 1
- },
- wantedFields: ["hosts", "passives", "arbiters", "primary", "me", "maxBsonObjectSize",
- "localTime"],
- unwantedFields: ["arbiterOnly", "passive", "slaveDelay", "hidden", "tags",
- "buildIndexes"]
- });
+checkMember({
+ conn: master,
+ name: "master",
+ goodValues: {setName: "ismaster", setVersion: 1, ismaster: true, secondary: false, ok: 1},
+ wantedFields:
+ ["hosts", "passives", "arbiters", "primary", "me", "maxBsonObjectSize", "localTime"],
+ unwantedFields: ["arbiterOnly", "passive", "slaveDelay", "hidden", "tags", "buildIndexes"]
+});
-checkMember({ conn: replTest.liveNodes.slaves[0],
- name: "slave",
- goodValues: {
- setName: "ismaster",
- setVersion: 1,
- ismaster: false,
- secondary: true,
- passive: true,
- ok: 1
- },
- wantedFields: ["hosts", "passives", "arbiters", "primary", "me", "maxBsonObjectSize",
- "localTime"],
- unwantedFields: ["arbiterOnly", "slaveDelay", "hidden", "tags", "buildIndexes"]
- });
+checkMember({
+ conn: replTest.liveNodes.slaves[0],
+ name: "slave",
+ goodValues: {
+ setName: "ismaster",
+ setVersion: 1,
+ ismaster: false,
+ secondary: true,
+ passive: true,
+ ok: 1
+ },
+ wantedFields:
+ ["hosts", "passives", "arbiters", "primary", "me", "maxBsonObjectSize", "localTime"],
+ unwantedFields: ["arbiterOnly", "slaveDelay", "hidden", "tags", "buildIndexes"]
+});
-checkMember({ conn: replTest.liveNodes.slaves[1],
- name: "delayed_slave",
- goodValues: {
- setName: "ismaster",
- setVersion: 1,
- ismaster: false,
- secondary: true,
- passive: true,
- slaveDelay: 3,
- buildIndexes: false,
- ok: 1
- },
- wantedFields: ["hosts", "passives", "arbiters", "primary", "me", "maxBsonObjectSize",
- "localTime"],
- unwantedFields: ["arbiterOnly", "tags"]
- });
+checkMember({
+ conn: replTest.liveNodes.slaves[1],
+ name: "delayed_slave",
+ goodValues: {
+ setName: "ismaster",
+ setVersion: 1,
+ ismaster: false,
+ secondary: true,
+ passive: true,
+ slaveDelay: 3,
+ buildIndexes: false,
+ ok: 1
+ },
+ wantedFields:
+ ["hosts", "passives", "arbiters", "primary", "me", "maxBsonObjectSize", "localTime"],
+ unwantedFields: ["arbiterOnly", "tags"]
+});
-checkMember({ conn: replTest.liveNodes.slaves[2],
- name: "arbiter",
- goodValues: {
- setName: "ismaster",
- setVersion: 1,
- ismaster: false,
- secondary: false,
- arbiterOnly: true,
- ok: 1
- },
- wantedFields: ["hosts", "passives", "arbiters", "primary", "me", "maxBsonObjectSize",
- "localTime"],
- unwantedFields: ["slaveDelay", "hidden", "tags", "buildIndexes", "passive"]
- });
+checkMember({
+ conn: replTest.liveNodes.slaves[2],
+ name: "arbiter",
+ goodValues: {
+ setName: "ismaster",
+ setVersion: 1,
+ ismaster: false,
+ secondary: false,
+ arbiterOnly: true,
+ ok: 1
+ },
+ wantedFields:
+ ["hosts", "passives", "arbiters", "primary", "me", "maxBsonObjectSize", "localTime"],
+ unwantedFields: ["slaveDelay", "hidden", "tags", "buildIndexes", "passive"]
+});
// reconfigure and make sure the changes show up in ismaster on all members
config = master.getDB("local").system.replset.findOne();
-config.version = config.version+1;
-config.members[0].tags = {disk: "ssd"};
-config.members[1].tags = {disk: "ssd"};
+config.version = config.version + 1;
+config.members[0].tags = {
+ disk: "ssd"
+};
+config.members[1].tags = {
+ disk: "ssd"
+};
config.members[1].hidden = true;
config.members[2].slaveDelay = 300000;
-config.members[2].tags = {disk: "hdd"};
+config.members[2].tags = {
+ disk: "hdd"
+};
try {
- result = master.getDB("admin").runCommand({replSetReconfig : config});
-}
-catch(e) {
+ result = master.getDB("admin").runCommand({replSetReconfig: config});
+} catch (e) {
print(e);
}
master = replTest.getPrimary();
-assert.soon( function() { return agreeOnPrimaryAndSetVersion( 2 ); },
- "Nodes did not sync in less than a minute", 60000 );
+assert.soon(function() {
+ return agreeOnPrimaryAndSetVersion(2);
+}, "Nodes did not sync in less than a minute", 60000);
// check nodes for their new settings
-checkMember({ conn: master,
- name: "master2",
- goodValues: {
- setName: "ismaster",
- setVersion: 2,
- ismaster: true,
- secondary: false,
- tags: {"disk": "ssd"},
- ok: 1
- },
- wantedFields: ["hosts", "arbiters", "primary", "me", "maxBsonObjectSize",
- "localTime"],
- unwantedFields: ["arbiterOnly", "passives", "passive", "slaveDelay", "hidden", "buildIndexes"]
- });
+checkMember({
+ conn: master,
+ name: "master2",
+ goodValues: {
+ setName: "ismaster",
+ setVersion: 2,
+ ismaster: true,
+ secondary: false,
+ tags: {"disk": "ssd"},
+ ok: 1
+ },
+ wantedFields: ["hosts", "arbiters", "primary", "me", "maxBsonObjectSize", "localTime"],
+ unwantedFields:
+ ["arbiterOnly", "passives", "passive", "slaveDelay", "hidden", "buildIndexes"]
+});
-checkMember({ conn: replTest.liveNodes.slaves[0],
- name: "first_slave",
- goodValues: {
- setName: "ismaster",
- setVersion: 2,
- ismaster: false,
- secondary: true,
- tags: {"disk": "ssd"},
- passive: true,
- hidden: true,
- ok: 1
- },
- wantedFields: ["hosts", "arbiters", "primary", "me", "maxBsonObjectSize",
- "localTime"],
- unwantedFields: ["arbiterOnly", "passives", "slaveDelayed", "buildIndexes"]
- });
+checkMember({
+ conn: replTest.liveNodes.slaves[0],
+ name: "first_slave",
+ goodValues: {
+ setName: "ismaster",
+ setVersion: 2,
+ ismaster: false,
+ secondary: true,
+ tags: {"disk": "ssd"},
+ passive: true,
+ hidden: true,
+ ok: 1
+ },
+ wantedFields: ["hosts", "arbiters", "primary", "me", "maxBsonObjectSize", "localTime"],
+ unwantedFields: ["arbiterOnly", "passives", "slaveDelayed", "buildIndexes"]
+});
-checkMember({ conn: replTest.liveNodes.slaves[1],
- name: "very_delayed_slave",
- goodValues: {
- setName: "ismaster",
- setVersion: 2,
- ismaster: false,
- secondary: true,
- tags: {"disk": "hdd"},
- passive: true,
- slaveDelay: 300000,
- buildIndexes: false,
- hidden: true,
- ok: 1
- },
- wantedFields: ["hosts", "arbiters", "primary", "me", "maxBsonObjectSize",
- "localTime"],
- unwantedFields: ["arbiterOnly", "passives"]
- });
+checkMember({
+ conn: replTest.liveNodes.slaves[1],
+ name: "very_delayed_slave",
+ goodValues: {
+ setName: "ismaster",
+ setVersion: 2,
+ ismaster: false,
+ secondary: true,
+ tags: {"disk": "hdd"},
+ passive: true,
+ slaveDelay: 300000,
+ buildIndexes: false,
+ hidden: true,
+ ok: 1
+ },
+ wantedFields: ["hosts", "arbiters", "primary", "me", "maxBsonObjectSize", "localTime"],
+ unwantedFields: ["arbiterOnly", "passives"]
+});
-checkMember({ conn: replTest.liveNodes.slaves[2],
- name: "arbiter",
- goodValues: {
- setName: "ismaster",
- setVersion: 2,
- ismaster: false,
- secondary: false,
- arbiterOnly: true,
- ok: 1
- },
- wantedFields: ["hosts", "arbiters", "primary", "me", "maxBsonObjectSize",
- "localTime"],
- unwantedFields: ["slaveDelay", "hidden", "tags", "buildIndexes", "passive"]
- });
+checkMember({
+ conn: replTest.liveNodes.slaves[2],
+ name: "arbiter",
+ goodValues: {
+ setName: "ismaster",
+ setVersion: 2,
+ ismaster: false,
+ secondary: false,
+ arbiterOnly: true,
+ ok: 1
+ },
+ wantedFields: ["hosts", "arbiters", "primary", "me", "maxBsonObjectSize", "localTime"],
+ unwantedFields: ["slaveDelay", "hidden", "tags", "buildIndexes", "passive"]
+});
// force reconfig and ensure all have the same setVersion afterwards
config = master.getDB("local").system.replset.findOne();
-master.getDB("admin").runCommand({replSetReconfig : config, force: true});
+master.getDB("admin").runCommand({replSetReconfig: config, force: true});
-assert.soon( function() { return agreeOnPrimaryAndSetVersion(); },
- "Nodes did not sync in less than a minute after forced reconfig", 60000 );
+assert.soon(function() {
+ return agreeOnPrimaryAndSetVersion();
+}, "Nodes did not sync in less than a minute after forced reconfig", 60000);
replTest.stopSet();
diff --git a/jstests/replsets/last_op_visible.js b/jstests/replsets/last_op_visible.js
index 486230edf73..32df53c93d5 100644
--- a/jstests/replsets/last_op_visible.js
+++ b/jstests/replsets/last_op_visible.js
@@ -5,64 +5,61 @@
// majority read.
(function() {
-"use strict";
+ "use strict";
-var name = 'lastOpVisible';
-var replTest = new ReplSetTest({name: name,
- nodes: 3,
- nodeOptions: {enableMajorityReadConcern: ''}});
+ var name = 'lastOpVisible';
+ var replTest =
+ new ReplSetTest({name: name, nodes: 3, nodeOptions: {enableMajorityReadConcern: ''}});
-try {
- replTest.startSet();
-} catch (e) {
- var conn = MongoRunner.runMongod();
- if (!conn.getDB('admin').serverStatus().storageEngine.supportsCommittedReads) {
- print("Skipping read_majority.js since storageEngine doesn't support it.");
- MongoRunner.stopMongod(conn);
- return;
+ try {
+ replTest.startSet();
+ } catch (e) {
+ var conn = MongoRunner.runMongod();
+ if (!conn.getDB('admin').serverStatus().storageEngine.supportsCommittedReads) {
+ print("Skipping read_majority.js since storageEngine doesn't support it.");
+ MongoRunner.stopMongod(conn);
+ return;
+ }
+ throw e;
}
- throw e;
-}
-replTest.initiate();
+ replTest.initiate();
-var primary = replTest.getPrimary();
+ var primary = replTest.getPrimary();
-// Do an insert without writeConcern.
-var res = primary.getDB(name).runCommandWithMetadata("insert",
- {insert: name, documents: [{x:1}]},
- {"$replData": 1});
-assert.commandWorked(res.commandReply);
-var last_op_visible = res.metadata["$replData"].lastOpVisible;
+ // Do an insert without writeConcern.
+ var res = primary.getDB(name).runCommandWithMetadata(
+ "insert", {insert: name, documents: [{x: 1}]}, {"$replData": 1});
+ assert.commandWorked(res.commandReply);
+ var last_op_visible = res.metadata["$replData"].lastOpVisible;
-// A find should return the same lastVisibleOp.
-res = primary.getDB(name).runCommandWithMetadata("find",
- {find: name, readConcern: {level: "local"}},
- {"$replData": 1});
-assert.commandWorked(res.commandReply);
-assert.eq(last_op_visible, res.metadata["$replData"].lastOpVisible);
+ // A find should return the same lastVisibleOp.
+ res = primary.getDB(name).runCommandWithMetadata(
+ "find", {find: name, readConcern: {level: "local"}}, {"$replData": 1});
+ assert.commandWorked(res.commandReply);
+ assert.eq(last_op_visible, res.metadata["$replData"].lastOpVisible);
-// A majority readConcern with afterOpTime: lastOpVisible should also return the same lastVisibleOp.
-res = primary.getDB(name).runCommandWithMetadata(
+ // A majority readConcern with afterOpTime: lastOpVisible should also return the same
+ // lastVisibleOp.
+ res = primary.getDB(name).runCommandWithMetadata(
"find",
{find: name, readConcern: {level: "majority", afterOpTime: last_op_visible}},
{"$replData": 1});
-assert.commandWorked(res.commandReply);
-assert.eq(last_op_visible, res.metadata["$replData"].lastOpVisible);
+ assert.commandWorked(res.commandReply);
+ assert.eq(last_op_visible, res.metadata["$replData"].lastOpVisible);
-// Do an insert without writeConcern.
-res = primary.getDB(name).runCommandWithMetadata(
+ // Do an insert without writeConcern.
+ res = primary.getDB(name).runCommandWithMetadata(
"insert",
- {insert: name, documents: [{x:1}], writeConcern: {w: "majority"}},
+ {insert: name, documents: [{x: 1}], writeConcern: {w: "majority"}},
{"$replData": 1});
-assert.commandWorked(res.commandReply);
-last_op_visible = res.metadata["$replData"].lastOpVisible;
+ assert.commandWorked(res.commandReply);
+ last_op_visible = res.metadata["$replData"].lastOpVisible;
-// A majority readConcern should return the same lastVisibleOp.
-res = primary.getDB(name).runCommandWithMetadata("find",
- {find: name, readConcern: {level: "majority"}},
- {"$replData": 1});
-assert.commandWorked(res.commandReply);
-assert.eq(last_op_visible, res.metadata["$replData"].lastOpVisible);
+ // A majority readConcern should return the same lastVisibleOp.
+ res = primary.getDB(name).runCommandWithMetadata(
+ "find", {find: name, readConcern: {level: "majority"}}, {"$replData": 1});
+ assert.commandWorked(res.commandReply);
+ assert.eq(last_op_visible, res.metadata["$replData"].lastOpVisible);
}());
diff --git a/jstests/replsets/lastop.js b/jstests/replsets/lastop.js
index f3eca2ccb3d..e1bf6c6fbd4 100644
--- a/jstests/replsets/lastop.js
+++ b/jstests/replsets/lastop.js
@@ -1,8 +1,8 @@
// Test that lastOp is updated properly in the face of no-op writes and for writes that generate
// errors based on the preexisting data (e.g. duplicate key errors, but not parse errors).
// lastOp is used as the optime to wait for when write concern waits for replication.
-(function () {
- var replTest = new ReplSetTest({ name: 'testSet', nodes: 1 });
+(function() {
+ var replTest = new ReplSetTest({name: 'testSet', nodes: 1});
replTest.startSet();
replTest.initiate();
@@ -14,79 +14,77 @@
// Do a write with m1, then a write with m2, then a no-op write with m1. m1 should have a lastOp
// of m2's write.
-
- assert.writeOK(m1.getCollection("test.foo").insert({ m1 : 1 }));
+
+ assert.writeOK(m1.getCollection("test.foo").insert({m1: 1}));
var firstOp = m1.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
-
- assert.writeOK(m2.getCollection("test.foo").insert({ m2 : 99 }));
+
+ assert.writeOK(m2.getCollection("test.foo").insert({m2: 99}));
var secondOp = m2.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
// No-op update
- assert.writeOK(m1.getCollection("test.foo").update({ m1 : 1 }, { $set: { m1 : 1 }}));
+ assert.writeOK(m1.getCollection("test.foo").update({m1: 1}, {$set: {m1: 1}}));
var noOp = m1.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
assert.eq(noOp, secondOp);
- assert.writeOK(m1.getCollection("test.foo").remove({ m1 : 1 }));
+ assert.writeOK(m1.getCollection("test.foo").remove({m1: 1}));
var thirdOp = m1.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
- assert.writeOK(m2.getCollection("test.foo").insert({ m2 : 98 }));
+ assert.writeOK(m2.getCollection("test.foo").insert({m2: 98}));
var fourthOp = m2.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
// No-op delete
- assert.writeOK(m1.getCollection("test.foo").remove({ m1 : 1 }));
+ assert.writeOK(m1.getCollection("test.foo").remove({m1: 1}));
noOp = m1.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
assert.eq(noOp, fourthOp);
-
// Dummy write, for a new lastOp.
- assert.writeOK(m1.getCollection("test.foo").insert({ m1 : 99 }));
+ assert.writeOK(m1.getCollection("test.foo").insert({m1: 99}));
var fifthOp = m1.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
- assert.writeOK(m2.getCollection("test.foo").insert({ m2 : 97 }));
+ assert.writeOK(m2.getCollection("test.foo").insert({m2: 97}));
var sixthOp = m2.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
// No-op find-and-modify delete
- m1.getCollection("test.foo").findAndModify( { query: { m1 : 1 } , remove: 'true'} );
+ m1.getCollection("test.foo").findAndModify({query: {m1: 1}, remove: 'true'});
noOp = m1.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
assert.eq(noOp, sixthOp);
- assert.commandWorked(m1.getCollection("test.foo").createIndex({x:1}));
+ assert.commandWorked(m1.getCollection("test.foo").createIndex({x: 1}));
var seventhOp = m1.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
- assert.writeOK(m2.getCollection("test.foo").insert({ m2 : 96 }));
+ assert.writeOK(m2.getCollection("test.foo").insert({m2: 96}));
var eighthOp = m2.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
// No-op create index.
- assert.commandWorked(m1.getCollection("test.foo").createIndex({x:1}));
+ assert.commandWorked(m1.getCollection("test.foo").createIndex({x: 1}));
noOp = m1.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
assert.eq(noOp, eighthOp);
- assert.writeOK(m1.getCollection("test.foo").insert({ _id : 1, x : 1 }));
+ assert.writeOK(m1.getCollection("test.foo").insert({_id: 1, x: 1}));
var ninthOp = m1.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
- assert.writeOK(m2.getCollection("test.foo").insert({ m2 : 991 }));
+ assert.writeOK(m2.getCollection("test.foo").insert({m2: 991}));
var tenthOp = m2.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
// update with immutable field error
- assert.writeError(m1.getCollection("test.foo").update({ _id : 1, x : 1 },
- { $set: { _id : 2 }}));
+ assert.writeError(m1.getCollection("test.foo").update({_id: 1, x: 1}, {$set: {_id: 2}}));
// "After applying the update to the document {_id: 1.0 , ...}, the (immutable) field '_id'
// was found to have been altered to _id: 2.0"
noOp = m1.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
assert.eq(noOp, tenthOp);
- assert.writeOK(m2.getCollection("test.foo").insert({ m2 : 992 }));
+ assert.writeOK(m2.getCollection("test.foo").insert({m2: 992}));
var eleventhOp = m2.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
// find-and-modify immutable field error
try {
- m1.getCollection("test.foo").findAndModify( { query: { _id : 1, x : 1 },
- update: { $set: { _id : 2 } } } );
+ m1.getCollection("test.foo")
+ .findAndModify({query: {_id: 1, x: 1}, update: {$set: {_id: 2}}});
// The findAndModify shell helper should throw.
assert(false);
} catch (e) {
@@ -97,24 +95,24 @@
assert.eq(noOp, eleventhOp);
var bigString = new Array(3000).toString();
- assert.writeOK(m2.getCollection("test.foo").insert({ m2 : 994, m3: bigString}));
+ assert.writeOK(m2.getCollection("test.foo").insert({m2: 994, m3: bigString}));
// createIndex with a >1024 byte field fails.
var twelfthOp = m2.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
- assert.commandFailed(m1.getCollection("test.foo").createIndex({m3:1}));
+ assert.commandFailed(m1.getCollection("test.foo").createIndex({m3: 1}));
noOp = m1.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
assert.eq(noOp, twelfthOp);
// No-op insert
- assert.writeOK(m1.getCollection("test.foo").insert({ _id : 5, x : 5 }));
+ assert.writeOK(m1.getCollection("test.foo").insert({_id: 5, x: 5}));
var thirteenthOp = m1.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
- assert.writeOK(m2.getCollection("test.foo").insert({ m2 : 991 }));
+ assert.writeOK(m2.getCollection("test.foo").insert({m2: 991}));
var fourteenthOp = m2.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
// Hits DuplicateKey error and fails insert -- no-op
- assert.writeError(m1.getCollection("test.foo").insert({ _id : 5, x : 5 }));
+ assert.writeError(m1.getCollection("test.foo").insert({_id: 5, x: 5}));
noOp = m1.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
assert.eq(noOp, fourteenthOp);
@@ -122,14 +120,14 @@
// Test update and delete failures in legacy write mode.
m2.forceWriteMode('legacy');
m1.forceWriteMode('legacy');
- m2.getCollection("test.foo").insert({ m2 : 995 });
+ m2.getCollection("test.foo").insert({m2: 995});
var fifthteenthOp = m2.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
- m1.getCollection("test.foo").remove({ m1 : 1 });
+ m1.getCollection("test.foo").remove({m1: 1});
noOp = m1.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
assert.eq(noOp, fifthteenthOp);
- m1.getCollection("test.foo").update({ m1 : 1 }, {$set: {m1: 4}});
+ m1.getCollection("test.foo").update({m1: 1}, {$set: {m1: 4}});
noOp = m1.getCollection("test.foo").getDB().getLastErrorObj().lastOp;
assert.eq(noOp, fifthteenthOp);
diff --git a/jstests/replsets/localhostAuthBypass.js b/jstests/replsets/localhostAuthBypass.js
index 68300c7aeb2..2b2332d258a 100644
--- a/jstests/replsets/localhostAuthBypass.js
+++ b/jstests/replsets/localhostAuthBypass.js
@@ -1,7 +1,7 @@
-//SERVER-6591: Localhost authentication exception doesn't work right on sharded cluster
+// SERVER-6591: Localhost authentication exception doesn't work right on sharded cluster
//
-//This test is to ensure that localhost authentication works correctly against a replica set
-//whether they are hosted with "localhost" or a hostname.
+// This test is to ensure that localhost authentication works correctly against a replica set
+// whether they are hosted with "localhost" or a hostname.
var replSetName = "replsets_server-6591";
var keyfile = "jstests/libs/key1";
@@ -19,55 +19,69 @@ var assertCannotRunCommands = function(mongo, isPrimary) {
print("============ ensuring that commands cannot be run.");
var test = mongo.getDB("test");
- assert.throws( function() { test.system.users.findOne(); });
- assert.throws( function() { test.foo.findOne({ _id: 0 }); });
+ assert.throws(function() {
+ test.system.users.findOne();
+ });
+ assert.throws(function() {
+ test.foo.findOne({_id: 0});
+ });
if (isPrimary) {
- assert.writeError(test.foo.save({ _id: 0 }));
- assert.writeError(test.foo.update({ _id: 0 }, { $set: { x: 20 }}));
- assert.writeError(test.foo.remove({ _id: 0 }));
+ assert.writeError(test.foo.save({_id: 0}));
+ assert.writeError(test.foo.update({_id: 0}, {$set: {x: 20}}));
+ assert.writeError(test.foo.remove({_id: 0}));
}
- assert.throws(function() {
+ assert.throws(function() {
test.foo.mapReduce(
- function() { emit(1, 1); },
- function(id, count) { return Array.sum(count); },
- { out: "other" });
+ function() {
+ emit(1, 1);
+ },
+ function(id, count) {
+ return Array.sum(count);
+ },
+ {out: "other"});
});
// DB operations
var authorizeErrorCode = 13;
- assert.commandFailedWithCode(mongo.getDB("test").copyDatabase("admin", "admin2"),
- authorizeErrorCode, "copyDatabase");
+ assert.commandFailedWithCode(
+ mongo.getDB("test").copyDatabase("admin", "admin2"), authorizeErrorCode, "copyDatabase");
// Create collection
- assert.commandFailedWithCode(mongo.getDB("test").createCollection(
- "log", { capped: true, size: 5242880, max: 5000 } ),
- authorizeErrorCode, "createCollection");
+ assert.commandFailedWithCode(
+ mongo.getDB("test").createCollection("log", {capped: true, size: 5242880, max: 5000}),
+ authorizeErrorCode,
+ "createCollection");
// Set/Get system parameters
- var params = [{ param: "journalCommitInterval", val: 200 },
- { param: "logLevel", val: 2 },
- { param: "logUserIds", val: 1 },
- { param: "notablescan", val: 1 },
- { param: "quiet", val: 1 },
- { param: "replApplyBatchSize", val: 10 },
- { param: "replIndexPrefetch", val: "none" },
- { param: "syncdelay", val: 30 },
- { param: "traceExceptions", val: true },
- { param: "sslMode", val: "preferSSL" },
- { param: "clusterAuthMode", val: "sendX509" },
- { param: "userCacheInvalidationIntervalSecs", val: 300 }
- ];
+ var params = [
+ {param: "journalCommitInterval", val: 200},
+ {param: "logLevel", val: 2},
+ {param: "logUserIds", val: 1},
+ {param: "notablescan", val: 1},
+ {param: "quiet", val: 1},
+ {param: "replApplyBatchSize", val: 10},
+ {param: "replIndexPrefetch", val: "none"},
+ {param: "syncdelay", val: 30},
+ {param: "traceExceptions", val: true},
+ {param: "sslMode", val: "preferSSL"},
+ {param: "clusterAuthMode", val: "sendX509"},
+ {param: "userCacheInvalidationIntervalSecs", val: 300}
+ ];
params.forEach(function(p) {
- var cmd = { setParameter: 1 };
+ var cmd = {
+ setParameter: 1
+ };
cmd[p.param] = p.val;
- assert.commandFailedWithCode(mongo.getDB("admin").runCommand(cmd),
- authorizeErrorCode, "setParameter: "+p.param);
+ assert.commandFailedWithCode(
+ mongo.getDB("admin").runCommand(cmd), authorizeErrorCode, "setParameter: " + p.param);
});
params.forEach(function(p) {
- var cmd = { getParameter: 1 };
+ var cmd = {
+ getParameter: 1
+ };
cmd[p.param] = 1;
- assert.commandFailedWithCode(mongo.getDB("admin").runCommand(cmd),
- authorizeErrorCode, "getParameter: "+p.param);
+ assert.commandFailedWithCode(
+ mongo.getDB("admin").runCommand(cmd), authorizeErrorCode, "getParameter: " + p.param);
});
};
@@ -78,16 +92,18 @@ var assertCanRunCommands = function(mongo) {
// will throw on failure
test.system.users.findOne();
- assert.writeOK(test.foo.save({_id: 0 }));
- assert.writeOK(test.foo.update({ _id: 0 }, { $set: { x: 20 }}));
- assert.writeOK(test.foo.remove({ _id: 0 }));
-
+ assert.writeOK(test.foo.save({_id: 0}));
+ assert.writeOK(test.foo.update({_id: 0}, {$set: {x: 20}}));
+ assert.writeOK(test.foo.remove({_id: 0}));
test.foo.mapReduce(
- function() { emit(1, 1); },
- function(id, count) { return Array.sum(count); },
- { out: "other" }
- );
+ function() {
+ emit(1, 1);
+ },
+ function(id, count) {
+ return Array.sum(count);
+ },
+ {out: "other"});
assert.commandWorked(mongo.getDB("admin").runCommand({replSetGetStatus: 1}));
};
@@ -98,11 +114,8 @@ var authenticate = function(mongo) {
};
var start = function(useHostName) {
- var rs = new ReplSetTest({name: replSetName,
- nodes : 3,
- keyFile : keyfile,
- auth: "",
- useHostName: useHostName});
+ var rs = new ReplSetTest(
+ {name: replSetName, nodes: 3, keyFile: keyfile, auth: "", useHostName: useHostName});
rs.startSet();
rs.initiate();
@@ -111,9 +124,9 @@ var start = function(useHostName) {
var shutdown = function(rs) {
print("============ shutting down.");
- rs.stopSet(/*signal*/false,
- /*forRestart*/false,
- { auth: { user: username, pwd: password}});
+ rs.stopSet(/*signal*/ false,
+ /*forRestart*/ false,
+ {auth: {user: username, pwd: password}});
};
var runTest = function(useHostName) {
@@ -181,7 +194,7 @@ var runTest = function(useHostName) {
var runNonlocalTest = function(ipAddr) {
print("==========================");
- print("starting mongod: non-local host access "+ipAddr);
+ print("starting mongod: non-local host access " + ipAddr);
print("==========================");
var rs = start(false);
@@ -190,7 +203,7 @@ var runNonlocalTest = function(ipAddr) {
var secHosts = [];
rs.getSecondaries().forEach(function(sec) {
- secHosts.push(ipAddr + ":" + rs.getPort(sec));
+ secHosts.push(ipAddr + ":" + rs.getPort(sec));
});
var mongo = new Mongo(host);
@@ -207,8 +220,10 @@ var runNonlocalTest = function(ipAddr) {
assertCannotRunCommands(m, false);
});
- assert.throws(function() { mongo.getDB("admin").createUser
- ({ user:username, pwd: password, roles: jsTest.adminUserRoles }); });
+ assert.throws(function() {
+ mongo.getDB("admin")
+ .createUser({user: username, pwd: password, roles: jsTest.adminUserRoles});
+ });
shutdown(rs);
};
diff --git a/jstests/replsets/maintenance.js b/jstests/replsets/maintenance.js
index 28032383e76..7e49e07e396 100644
--- a/jstests/replsets/maintenance.js
+++ b/jstests/replsets/maintenance.js
@@ -1,7 +1,7 @@
-var replTest = new ReplSetTest( {name: 'unicomplex', nodes: 2} );
-var conns = replTest.startSet({ verbose: 1 });
+var replTest = new ReplSetTest({name: 'unicomplex', nodes: 2});
+var conns = replTest.startSet({verbose: 1});
var config = replTest.getReplSetConfig();
config.members[0].priority = 2;
replTest.initiate(config);
@@ -11,17 +11,20 @@ replTest.waitForState(replTest.nodes[0], ReplSetTest.State.PRIMARY, 60000);
var master = replTest.getPrimary();
for (i = 0; i < 20; i++) {
- master.getDB("bar").foo.insert({x:1,y:i,abc:123,str:"foo bar baz"});
+ master.getDB("bar").foo.insert({x: 1, y: i, abc: 123, str: "foo bar baz"});
}
for (i = 0; i < 20; i++) {
- master.getDB("bar").foo.update({ y: i }, { $push: { foo: "barrrrrrrrrrrrrrrrrrrrrrrrrrrrrrr"}});
+ master.getDB("bar").foo.update({y: i}, {$push: {foo: "barrrrrrrrrrrrrrrrrrrrrrrrrrrrrrr"}});
}
replTest.awaitReplication();
-assert.soon(function() { return conns[1].getDB("admin").isMaster().secondary; });
+assert.soon(function() {
+ return conns[1].getDB("admin").isMaster().secondary;
+});
-join = startParallelShell( "db.getSisterDB('bar').runCommand({compact : 'foo'});", replTest.ports[1] );
+join =
+ startParallelShell("db.getSisterDB('bar').runCommand({compact : 'foo'});", replTest.ports[1]);
print("joining");
join();
@@ -31,7 +34,8 @@ var secondarySoon = function() {
var x = 0;
assert.soon(function() {
var im = conns[1].getDB("admin").isMaster();
- if (x++ % 5 == 0) printjson(im);
+ if (x++ % 5 == 0)
+ printjson(im);
return im.secondary;
});
};
@@ -41,7 +45,7 @@ secondarySoon();
print("make sure compact works on a secondary (SERVER-3923)");
master.getDB("foo").bar.drop();
replTest.awaitReplication();
-var result = conns[1].getDB("foo").runCommand({compact : "bar"});
+var result = conns[1].getDB("foo").runCommand({compact: "bar"});
assert.eq(result.ok, 0, tojson(result));
secondarySoon();
@@ -49,7 +53,7 @@ secondarySoon();
print("use replSetMaintenance command to go in/out of maintence mode");
print("primary cannot go into maintence mode");
-result = master.getDB("admin").runCommand({replSetMaintenance : 1});
+result = master.getDB("admin").runCommand({replSetMaintenance: 1});
assert.eq(result.ok, 0, tojson(result));
print("check getMore works on a secondary, not on a recovering node");
@@ -59,32 +63,29 @@ for (var i = 0; i < 5; i++) {
}
print("secondary can");
-result = conns[1].getDB("admin").runCommand({replSetMaintenance : 1});
+result = conns[1].getDB("admin").runCommand({replSetMaintenance: 1});
assert.eq(result.ok, 1, tojson(result));
print("make sure secondary goes into recovering");
var x = 0;
assert.soon(function() {
var im = conns[1].getDB("admin").isMaster();
- if (x++ % 5 == 0) printjson(im);
+ if (x++ % 5 == 0)
+ printjson(im);
return !im.secondary && !im.ismaster;
});
print("now getmore shouldn't work");
-var ex = assert.throws(
- function(){
- lastDoc = null;
- while (cursor.hasNext()) {
- lastDoc = cursor.next();
- }
- },
- [] /*no params*/,
- "getmore didn't fail");
-
-assert(ex.message.match("13436"), "wrong error code -- " + ex );
-
-result = conns[1].getDB("admin").runCommand({replSetMaintenance : 0});
+var ex = assert.throws(function() {
+ lastDoc = null;
+ while (cursor.hasNext()) {
+ lastDoc = cursor.next();
+ }
+}, [] /*no params*/, "getmore didn't fail");
+
+assert(ex.message.match("13436"), "wrong error code -- " + ex);
+
+result = conns[1].getDB("admin").runCommand({replSetMaintenance: 0});
assert.eq(result.ok, 1, tojson(result));
secondarySoon();
-
diff --git a/jstests/replsets/maintenance2.js b/jstests/replsets/maintenance2.js
index f1bce2159d5..c5e6d9c07e6 100644
--- a/jstests/replsets/maintenance2.js
+++ b/jstests/replsets/maintenance2.js
@@ -5,7 +5,7 @@
// Replica set testing API
// Create a new replica set test. Specify set name and the number of nodes you want.
- var replTest = new ReplSetTest( {name: 'testSet', nodes: 3} );
+ var replTest = new ReplSetTest({name: 'testSet', nodes: 3});
// call startSet() to start each mongod in the replica set
// this returns a list of nodes
@@ -34,15 +34,22 @@
slaves.forEach(function(slave) {
// put slave into maintenance (recovery) mode
- slave.getDB("foo").adminCommand({replSetMaintenance:1});
+ slave.getDB("foo").adminCommand({replSetMaintenance: 1});
- var stats = slave.getDB("foo").adminCommand({replSetGetStatus:1});
+ var stats = slave.getDB("foo").adminCommand({replSetGetStatus: 1});
assert.eq(stats.myState, 3, "Slave should be in recovering state.");
print("group should fail in recovering state...");
slave.slaveOk = true;
- assert.commandFailed(slave.getDB("foo").foo.runCommand(
- {group: {ns: "foo", initial: {n:0}, $reduce: function(obj,out){out.n++;}}}));
+ assert.commandFailed(slave.getDB("foo").foo.runCommand({
+ group: {
+ ns: "foo",
+ initial: {n: 0},
+ $reduce: function(obj, out) {
+ out.n++;
+ }
+ }
+ }));
print("count should fail in recovering state...");
slave.slaveOk = true;
diff --git a/jstests/replsets/maintenance_non-blocking.js b/jstests/replsets/maintenance_non-blocking.js
index 5815893e5df..4606bcc1985 100644
--- a/jstests/replsets/maintenance_non-blocking.js
+++ b/jstests/replsets/maintenance_non-blocking.js
@@ -1,7 +1,7 @@
// This test ensures that the replSetMaintenance command will not block, nor block-on, a db write
doTest = function() {
"use strict";
- var replTest = new ReplSetTest( {name: 'testSet', nodes: 2} );
+ var replTest = new ReplSetTest({name: 'testSet', nodes: 2});
var nodes = replTest.startSet();
replTest.initiate();
@@ -29,7 +29,7 @@ doTest = function() {
var ismaster = assert.commandWorked(sColl.runCommand("ismaster"));
assert.eq(false, ismaster.ismaster);
assert.eq(false, ismaster.secondary);
-
+
print("******* writing to primary ************* ");
assert.writeOK(mColl.save({_id: -1}));
printjson(sDB.currentOp());
diff --git a/jstests/replsets/maxSyncSourceLagSecs.js b/jstests/replsets/maxSyncSourceLagSecs.js
index 8d44dd5ddb2..0e7fe04355b 100644
--- a/jstests/replsets/maxSyncSourceLagSecs.js
+++ b/jstests/replsets/maxSyncSourceLagSecs.js
@@ -5,18 +5,22 @@
(function() {
"use strict";
var name = "maxSyncSourceLagSecs";
- var replTest = new ReplSetTest({name: name,
- nodes: 3,
- oplogSize: 5,
- nodeOptions: {setParameter: "maxSyncSourceLagSecs=3"}});
+ var replTest = new ReplSetTest({
+ name: name,
+ nodes: 3,
+ oplogSize: 5,
+ nodeOptions: {setParameter: "maxSyncSourceLagSecs=3"}
+ });
var nodes = replTest.nodeList();
replTest.startSet();
- replTest.initiate({"_id": name,
- "members": [
- { "_id": 0, "host": nodes[0], priority: 3 },
- { "_id": 1, "host": nodes[1], priority: 0 },
- { "_id": 2, "host": nodes[2], priority: 0 }],
- });
+ replTest.initiate({
+ "_id": name,
+ "members": [
+ {"_id": 0, "host": nodes[0], priority: 3},
+ {"_id": 1, "host": nodes[1], priority: 0},
+ {"_id": 2, "host": nodes[2], priority: 0}
+ ],
+ });
var master = replTest.getPrimary();
master.getDB("foo").bar.save({a: 1});
@@ -30,24 +34,24 @@
jsTestLog("Setting sync target of slave 2 to slave 1");
assert.commandWorked(slaves[1].getDB("admin").runCommand({replSetSyncFrom: slaves[0].name}));
assert.soon(function() {
- var res = slaves[1].getDB("admin").runCommand({"replSetGetStatus": 1});
- return res.syncingTo === slaves[0].name;
- }, "sync target not changed to other slave");
+ var res = slaves[1].getDB("admin").runCommand({"replSetGetStatus": 1});
+ return res.syncingTo === slaves[0].name;
+ }, "sync target not changed to other slave");
printjson(replTest.status());
jsTestLog("Lock slave 1 and add some docs. Force sync target for slave 2 to change to primary");
- assert.commandWorked(slaves[0].getDB("admin").runCommand({fsync:1, lock: 1}));
+ assert.commandWorked(slaves[0].getDB("admin").runCommand({fsync: 1, lock: 1}));
master.getDB("foo").bar.save({a: 2});
assert.soon(function() {
- var res = slaves[1].getDB("admin").runCommand({"replSetGetStatus": 1});
- return res.syncingTo === master.name;
- }, "sync target not changed back to primary");
+ var res = slaves[1].getDB("admin").runCommand({"replSetGetStatus": 1});
+ return res.syncingTo === master.name;
+ }, "sync target not changed back to primary");
printjson(replTest.status());
assert.soon(function() {
- return (slaves[1].getDB("foo").bar.count() === 2);
- }, "slave should have caught up after syncing to primary.");
+ return (slaves[1].getDB("foo").bar.count() === 2);
+ }, "slave should have caught up after syncing to primary.");
assert.commandWorked(slaves[0].getDB("admin").fsyncUnlock());
replTest.stopSet();
diff --git a/jstests/replsets/no_chaining.js b/jstests/replsets/no_chaining.js
index 88bbe7a78d0..ad086c72f9a 100644
--- a/jstests/replsets/no_chaining.js
+++ b/jstests/replsets/no_chaining.js
@@ -1,47 +1,39 @@
-function myprint( x ) {
- print( "chaining output: " + x );
+function myprint(x) {
+ print("chaining output: " + x);
}
var replTest = new ReplSetTest({name: 'testSet', nodes: 3, useBridge: true});
var nodes = replTest.startSet();
var hostnames = replTest.nodeList();
-replTest.initiate(
- {
- "_id" : "testSet",
- "members" : [
- {"_id" : 0, "host" : hostnames[0], priority: 2},
- {"_id" : 1, "host" : hostnames[1], priority: 0},
- {"_id" : 2, "host" : hostnames[2], priority: 0}
- ],
- "settings" : {
- "chainingAllowed" : false
- }
- }
-);
+replTest.initiate({
+ "_id": "testSet",
+ "members": [
+ {"_id": 0, "host": hostnames[0], priority: 2},
+ {"_id": 1, "host": hostnames[1], priority: 0},
+ {"_id": 2, "host": hostnames[2], priority: 0}
+ ],
+ "settings": {"chainingAllowed": false}
+});
var master = replTest.getPrimary();
replTest.awaitReplication();
-
var breakNetwork = function() {
nodes[0].disconnect(nodes[2]);
master = replTest.getPrimary();
};
var checkNoChaining = function() {
- master.getDB("test").foo.insert({x:1});
+ master.getDB("test").foo.insert({x: 1});
- assert.soon(
- function() {
- return nodes[1].getDB("test").foo.findOne() != null;
- }
- );
+ assert.soon(function() {
+ return nodes[1].getDB("test").foo.findOne() != null;
+ });
- var endTime = (new Date()).getTime()+10000;
+ var endTime = (new Date()).getTime() + 10000;
while ((new Date()).getTime() < endTime) {
- assert(nodes[2].getDB("test").foo.findOne() == null,
- 'Check that 2 does not catch up');
+ assert(nodes[2].getDB("test").foo.findOne() == null, 'Check that 2 does not catch up');
}
};
@@ -53,13 +45,10 @@ var forceSync = function() {
config = nodes[2].getDB("local").system.replset.findOne();
}
var targetHost = config.members[1].host;
- printjson(nodes[2].getDB("admin").runCommand({replSetSyncFrom : targetHost}));
- assert.soon(
- function() {
- return nodes[2].getDB("test").foo.findOne() != null;
- },
- 'Check for data after force sync'
- );
+ printjson(nodes[2].getDB("admin").runCommand({replSetSyncFrom: targetHost}));
+ assert.soon(function() {
+ return nodes[2].getDB("test").foo.findOne() != null;
+ }, 'Check for data after force sync');
};
// SERVER-12922
diff --git a/jstests/replsets/oplog_format.js b/jstests/replsets/oplog_format.js
index e27374eaf6e..5dc60e33434 100644
--- a/jstests/replsets/oplog_format.js
+++ b/jstests/replsets/oplog_format.js
@@ -1,117 +1,117 @@
/**
* These tests verify that the oplog entries are created correctly for updates
- *
+ *
* Do not add more tests here but instead add C++ unit tests in db/ops/modifier*_test files
*
- */
+ */
"use strict";
-var replTest = new ReplSetTest( { nodes: 1, oplogSize:2, nodeOptions: {smallfiles:""}} );
+var replTest = new ReplSetTest({nodes: 1, oplogSize: 2, nodeOptions: {smallfiles: ""}});
var nodes = replTest.startSet();
replTest.initiate();
var master = replTest.getPrimary();
var coll = master.getDB("o").fake;
var cdb = coll.getDB();
-var assertLastOplog = function( o, o2 , msg) {
- var last = master.getDB("local").oplog.rs.find().limit(1).sort({$natural:-1}).next();
+var assertLastOplog = function(o, o2, msg) {
+ var last = master.getDB("local").oplog.rs.find().limit(1).sort({$natural: -1}).next();
assert.eq(last.ns, coll.getFullName(), "ns bad : " + msg);
assert.docEq(last.o, o, "o bad : " + msg);
- if(o2)
+ if (o2)
assert.docEq(last.o2, o2, "o2 bad : " + msg);
return last.ts;
};
// set things up.
-coll.save({_id:1});
-assertLastOplog({_id:1}, null, "save -- setup ");
+coll.save({_id: 1});
+assertLastOplog({_id: 1}, null, "save -- setup ");
/**
* The first ones are from the old updatetests which tested the internal impl using a modSet
*/
var msg = "IncRewriteExistingField: $inc $set";
-coll.save({_id:1, a:2});
-assertLastOplog({_id:1, a:2}, {_id:1}, "save " + msg);
-var res = assert.writeOK(coll.update({}, { $inc: { a: 1 }, $set: { b: 2 }}));
+coll.save({_id: 1, a: 2});
+assertLastOplog({_id: 1, a: 2}, {_id: 1}, "save " + msg);
+var res = assert.writeOK(coll.update({}, {$inc: {a: 1}, $set: {b: 2}}));
assert.eq(res.nModified, 1, "update failed for '" + msg + "': " + res.toString());
-assert.docEq({_id:1, a:3, b:2}, coll.findOne({}), msg);
-assertLastOplog({$set:{a:3, b:2}}, {_id:1}, msg);
+assert.docEq({_id: 1, a: 3, b: 2}, coll.findOne({}), msg);
+assertLastOplog({$set: {a: 3, b: 2}}, {_id: 1}, msg);
var msg = "IncRewriteNonExistingField: $inc $set";
-coll.save({_id:1, c:0});
-assertLastOplog({_id:1, c:0}, {_id:1}, "save " + msg);
-res = assert.writeOK(coll.update({}, { $inc: { a: 1 }, $set: { b: 2 }}));
+coll.save({_id: 1, c: 0});
+assertLastOplog({_id: 1, c: 0}, {_id: 1}, "save " + msg);
+res = assert.writeOK(coll.update({}, {$inc: {a: 1}, $set: {b: 2}}));
assert.eq(res.nModified, 1, "update failed for '" + msg + "': " + res.toString());
-assert.docEq({_id:1, c:0, a:1, b:2}, coll.findOne({}), msg);
-assertLastOplog({$set:{a:1, b:2}}, {_id:1}, msg);
+assert.docEq({_id: 1, c: 0, a: 1, b: 2}, coll.findOne({}), msg);
+assertLastOplog({$set: {a: 1, b: 2}}, {_id: 1}, msg);
var msg = "TwoNestedPulls: two $pull";
-coll.save({_id:1, a:{ b:[ 1, 2 ], c:[ 1, 2 ] }});
-assertLastOplog({_id:1, a:{ b:[ 1, 2 ], c:[ 1, 2 ] }}, {_id:1}, "save " + msg);
-res = assert.writeOK(coll.update({}, { $pull: { 'a.b': 2, 'a.c': 2 }}));
+coll.save({_id: 1, a: {b: [1, 2], c: [1, 2]}});
+assertLastOplog({_id: 1, a: {b: [1, 2], c: [1, 2]}}, {_id: 1}, "save " + msg);
+res = assert.writeOK(coll.update({}, {$pull: {'a.b': 2, 'a.c': 2}}));
assert.eq(res.nModified, 1, "update failed for '" + msg + "': " + res.toString());
-assert.docEq({_id:1, a:{ b:[ 1 ], c:[ 1 ] }}, coll.findOne({}), msg);
-assertLastOplog({$set:{'a.b':[1], 'a.c':[1]}}, {_id:1}, msg);
+assert.docEq({_id: 1, a: {b: [1], c: [1]}}, coll.findOne({}), msg);
+assertLastOplog({$set: {'a.b': [1], 'a.c': [1]}}, {_id: 1}, msg);
var msg = "MultiSets: two $set";
-coll.save({_id:1, a:1, b:1});
-assertLastOplog({_id:1, a:1, b:1}, {_id:1}, "save " + msg);
-res = assert.writeOK(coll.update({}, { $set: { a: 2, b: 2 }}));
+coll.save({_id: 1, a: 1, b: 1});
+assertLastOplog({_id: 1, a: 1, b: 1}, {_id: 1}, "save " + msg);
+res = assert.writeOK(coll.update({}, {$set: {a: 2, b: 2}}));
assert.eq(res.nModified, 1, "update failed for '" + msg + "': " + res.toString());
-assert.docEq({_id:1, a:2, b:2}, coll.findOne({}), msg);
-assertLastOplog({$set:{a:2, b:2}}, {_id:1}, msg);
+assert.docEq({_id: 1, a: 2, b: 2}, coll.findOne({}), msg);
+assertLastOplog({$set: {a: 2, b: 2}}, {_id: 1}, msg);
// More tests to validate the oplog format and correct excution
var msg = "bad single $set";
-coll.save({_id:1, a:1});
-assertLastOplog({_id:1, a:1}, {_id:1}, "save " + msg);
-res = assert.writeOK(coll.update({}, { $set: { a: 2 }}));
+coll.save({_id: 1, a: 1});
+assertLastOplog({_id: 1, a: 1}, {_id: 1}, "save " + msg);
+res = assert.writeOK(coll.update({}, {$set: {a: 2}}));
assert.eq(res.nModified, 1, "update failed for '" + msg + "': " + res.toString());
-assert.docEq({_id:1, a:2}, coll.findOne({}), msg);
-assertLastOplog({$set:{a:2}}, {_id:1}, msg);
+assert.docEq({_id: 1, a: 2}, coll.findOne({}), msg);
+assertLastOplog({$set: {a: 2}}, {_id: 1}, msg);
var msg = "bad single $inc";
-res = assert.writeOK(coll.update({}, { $inc: { a: 1 }}));
+res = assert.writeOK(coll.update({}, {$inc: {a: 1}}));
assert.eq(res.nModified, 1, "update failed for '" + msg + "': " + res.toString());
-assert.docEq({_id:1, a:3}, coll.findOne({}), msg);
-assertLastOplog({$set:{a:3}}, {_id:1}, msg);
+assert.docEq({_id: 1, a: 3}, coll.findOne({}), msg);
+assertLastOplog({$set: {a: 3}}, {_id: 1}, msg);
var msg = "bad double $set";
-res = assert.writeOK(coll.update({}, { $set: { a: 2, b: 2 }}));
+res = assert.writeOK(coll.update({}, {$set: {a: 2, b: 2}}));
assert.eq(res.nModified, 1, "update failed for '" + msg + "': " + res.toString());
-assert.docEq({_id:1, a:2, b:2}, coll.findOne({}), msg);
-assertLastOplog({$set:{a:2, b:2}}, {_id:1}, msg);
+assert.docEq({_id: 1, a: 2, b: 2}, coll.findOne({}), msg);
+assertLastOplog({$set: {a: 2, b: 2}}, {_id: 1}, msg);
var msg = "bad save";
-assert.writeOK(coll.save({ _id: 1, a: [2] }));
-assert.docEq({_id:1, a:[2]}, coll.findOne({}), msg);
-assertLastOplog({_id:1, a:[2]}, {_id:1}, msg);
+assert.writeOK(coll.save({_id: 1, a: [2]}));
+assert.docEq({_id: 1, a: [2]}, coll.findOne({}), msg);
+assertLastOplog({_id: 1, a: [2]}, {_id: 1}, msg);
var msg = "bad array $inc";
-res = assert.writeOK(coll.update({}, { $inc: { "a.0": 1 }}));
+res = assert.writeOK(coll.update({}, {$inc: {"a.0": 1}}));
assert.eq(res.nModified, 1, "update failed for '" + msg + "': " + res.toString());
-assert.docEq({_id:1, a:[3]}, coll.findOne({}), msg);
-var lastTS = assertLastOplog({$set:{"a.0": 3}}, {_id:1}, msg);
+assert.docEq({_id: 1, a: [3]}, coll.findOne({}), msg);
+var lastTS = assertLastOplog({$set: {"a.0": 3}}, {_id: 1}, msg);
var msg = "bad $setOnInsert";
-res = assert.writeOK(coll.update({}, { $setOnInsert: { a: -1 }}));
+res = assert.writeOK(coll.update({}, {$setOnInsert: {a: -1}}));
assert.eq(res.nMatched, 1, "update failed for '" + msg + "': " + res.toString());
-assert.docEq({_id:1, a:[3]}, coll.findOne({}), msg); // No-op
-var otherTS = assertLastOplog({$set:{"a.0": 3}}, {_id:1}, msg); // Nothing new
-assert.eq(lastTS, otherTS, "new oplog was not expected -- " + msg); // No new oplog entry
+assert.docEq({_id: 1, a: [3]}, coll.findOne({}), msg); // No-op
+var otherTS = assertLastOplog({$set: {"a.0": 3}}, {_id: 1}, msg); // Nothing new
+assert.eq(lastTS, otherTS, "new oplog was not expected -- " + msg); // No new oplog entry
coll.remove({});
assert.eq(coll.count(), 0, "collection not empty");
var msg = "bad $setOnInsert w/upsert";
-res = assert.writeOK(coll.update({}, { $setOnInsert: { a: 200 }}, { upsert: true })); // upsert
+res = assert.writeOK(coll.update({}, {$setOnInsert: {a: 200}}, {upsert: true})); // upsert
assert.eq(res.nUpserted, 1, "update failed for '" + msg + "': " + res.toString());
var id = res.getUpsertedId()._id;
-assert.docEq({_id: id, a: 200 }, coll.findOne({}), msg); // No-op
-assertLastOplog({ _id: id, a: 200 }, null, msg); // No new oplog entry
+assert.docEq({_id: id, a: 200}, coll.findOne({}), msg); // No-op
+assertLastOplog({_id: id, a: 200}, null, msg); // No new oplog entry
coll.remove({});
assert.eq(coll.count(), 0, "collection not empty-2");
@@ -130,54 +130,49 @@ assertLastOplog({$set:{"a": [1,2,3]}}, {_id:1}, msg); // new format
*/
var msg = "bad array $push 2";
-coll.save({_id:1, a:"foo"});
-res = assert.writeOK(coll.update({}, { $push: { c: 18 }}));
+coll.save({_id: 1, a: "foo"});
+res = assert.writeOK(coll.update({}, {$push: {c: 18}}));
assert.eq(res.nModified, 1, "update failed for '" + msg + "': " + res.toString());
-assert.docEq({_id:1, a:"foo", c:[18]}, coll.findOne({}), msg);
-assertLastOplog({$set:{"c": [18]}}, {_id:1}, msg);
+assert.docEq({_id: 1, a: "foo", c: [18]}, coll.findOne({}), msg);
+assertLastOplog({$set: {"c": [18]}}, {_id: 1}, msg);
var msg = "bad array $push $slice";
-coll.save({_id:1, a:{b:[18]}});
-res = assert.writeOK(coll.update({ _id: { $gt: 0 }},
- { $push: { "a.b": { $each: [1, 2], $slice: -2 }}}));
+coll.save({_id: 1, a: {b: [18]}});
+res = assert.writeOK(coll.update({_id: {$gt: 0}}, {$push: {"a.b": {$each: [1, 2], $slice: -2}}}));
assert.eq(res.nModified, 1, "update failed for '" + msg + "': " + res.toString());
-assert.docEq({_id:1, a: {b:[1,2]}}, coll.findOne({}), msg);
-assertLastOplog({$set:{"a.b": [1,2]}}, {_id:1}, msg);
+assert.docEq({_id: 1, a: {b: [1, 2]}}, coll.findOne({}), msg);
+assertLastOplog({$set: {"a.b": [1, 2]}}, {_id: 1}, msg);
var msg = "bad array $push $sort ($slice -100)";
-coll.save({_id:1, a:{b:[{c:2}, {c:1}]}});
-res = assert.writeOK(coll.update({}, { $push: { "a.b": { $each: [{ c: -1 }],
- $sort: { c: 1 },
- $slice: -100 }}}));
+coll.save({_id: 1, a: {b: [{c: 2}, {c: 1}]}});
+res = assert.writeOK(
+ coll.update({}, {$push: {"a.b": {$each: [{c: -1}], $sort: {c: 1}, $slice: -100}}}));
assert.eq(res.nModified, 1, "update failed for '" + msg + "': " + res.toString());
-assert.docEq({_id:1, a: {b:[{c:-1}, {c:1}, {c:2}]}}, coll.findOne({}), msg);
-assertLastOplog({$set:{"a.b": [{c:-1},{c:1}, {c:2}]}}, {_id:1}, msg);
+assert.docEq({_id: 1, a: {b: [{c: -1}, {c: 1}, {c: 2}]}}, coll.findOne({}), msg);
+assertLastOplog({$set: {"a.b": [{c: -1}, {c: 1}, {c: 2}]}}, {_id: 1}, msg);
var msg = "bad array $push $slice $sort";
-coll.save({_id:1, a:[{b:2}, {b:1}]});
-res = assert.writeOK(coll.update({ _id: { $gt: 0 }}, { $push: { a: { $each: [{ b: -1 }],
- $slice:-2,
- $sort: { b: 1 }}}}));
+coll.save({_id: 1, a: [{b: 2}, {b: 1}]});
+res = assert.writeOK(
+ coll.update({_id: {$gt: 0}}, {$push: {a: {$each: [{b: -1}], $slice: -2, $sort: {b: 1}}}}));
assert.eq(res.nModified, 1, "update failed for '" + msg + "': " + res.toString());
-assert.docEq({_id:1, a: [{b:1}, {b:2}]}, coll.findOne({}), msg);
-assertLastOplog({$set:{a: [{b:1},{b:2}]}}, {_id:1}, msg);
+assert.docEq({_id: 1, a: [{b: 1}, {b: 2}]}, coll.findOne({}), msg);
+assertLastOplog({$set: {a: [{b: 1}, {b: 2}]}}, {_id: 1}, msg);
var msg = "bad array $push $slice $sort first two";
-coll.save({_id:1, a:{b:[{c:2}, {c:1}]}});
-res = assert.writeOK(coll.update({ _id: { $gt: 0 }}, { $push: { "a.b": { $each: [{ c: -1 }],
- $slice: -2,
- $sort: { c: 1 }}}}));
+coll.save({_id: 1, a: {b: [{c: 2}, {c: 1}]}});
+res = assert.writeOK(
+ coll.update({_id: {$gt: 0}}, {$push: {"a.b": {$each: [{c: -1}], $slice: -2, $sort: {c: 1}}}}));
assert.eq(res.nModified, 1, "update failed for '" + msg + "': " + res.toString());
-assert.docEq({_id:1, a: {b:[{c:1}, {c:2}]}}, coll.findOne({}), msg);
-assertLastOplog({$set:{"a.b": [{c:1},{c:2}]}}, {_id:1}, msg);
+assert.docEq({_id: 1, a: {b: [{c: 1}, {c: 2}]}}, coll.findOne({}), msg);
+assertLastOplog({$set: {"a.b": [{c: 1}, {c: 2}]}}, {_id: 1}, msg);
var msg = "bad array $push $slice $sort reversed first two";
-coll.save({_id:1, a:{b:[{c:1}, {c:2}]}});
-res = assert.writeOK(coll.update({ _id: { $gt: 0 }}, { $push: { "a.b": { $each: [{ c: -1 }],
- $slice: -2,
- $sort: { c: -1 }}}}));
+coll.save({_id: 1, a: {b: [{c: 1}, {c: 2}]}});
+res = assert.writeOK(coll.update(
+ {_id: {$gt: 0}}, {$push: {"a.b": {$each: [{c: -1}], $slice: -2, $sort: {c: -1}}}}));
assert.eq(res.nModified, 1, "update failed for '" + msg + "': " + res.toString());
-assert.docEq({_id:1, a: {b:[{c:1}, {c:-1}]}}, coll.findOne({}), msg);
-assertLastOplog({$set:{"a.b": [{c:1},{c:-1}]}}, {_id:1}, msg);
+assert.docEq({_id: 1, a: {b: [{c: 1}, {c: -1}]}}, coll.findOne({}), msg);
+assertLastOplog({$set: {"a.b": [{c: 1}, {c: -1}]}}, {_id: 1}, msg);
replTest.stopSet();
diff --git a/jstests/replsets/oplog_note_cmd.js b/jstests/replsets/oplog_note_cmd.js
index ecd7e47ca38..0c92609535a 100644
--- a/jstests/replsets/oplog_note_cmd.js
+++ b/jstests/replsets/oplog_note_cmd.js
@@ -6,7 +6,7 @@ rs.initiate();
var primary = rs.getPrimary();
var db = primary.getDB('admin');
-db.foo.insert({a:1});
+db.foo.insert({a: 1});
// Make sure "optime" field gets updated
var statusBefore = db.runCommand({replSetGetStatus: 1});
diff --git a/jstests/replsets/oplog_term.js b/jstests/replsets/oplog_term.js
index 6aa6ca612ae..76ba6babfa7 100644
--- a/jstests/replsets/oplog_term.js
+++ b/jstests/replsets/oplog_term.js
@@ -1,52 +1,56 @@
// Term counter should be present in oplog entries under protocol version 1 but should be absent
// protocol version 0.
-(function () {
- 'use strict';
- load('jstests/replsets/rslib.js');
+(function() {
+ 'use strict';
+ load('jstests/replsets/rslib.js');
- var name = 'oplog_term';
- var replSet = new ReplSetTest({name: name, nodes: 1, protocolVersion: 0});
- replSet.startSet();
- replSet.initiate();
- replSet.waitForState(replSet.nodes[0], ReplSetTest.State.PRIMARY, 5 * 1000);
+ var name = 'oplog_term';
+ var replSet = new ReplSetTest({name: name, nodes: 1, protocolVersion: 0});
+ replSet.startSet();
+ replSet.initiate();
+ replSet.waitForState(replSet.nodes[0], ReplSetTest.State.PRIMARY, 5 * 1000);
- // Protocol version 0 - 'term' field should be absent from oplog entry.
- var primary = replSet.getPrimary();
- var collection = primary.getDB('test').getCollection(name);
- assert.writeOK(collection.save({_id: 1}));
+ // Protocol version 0 - 'term' field should be absent from oplog entry.
+ var primary = replSet.getPrimary();
+ var collection = primary.getDB('test').getCollection(name);
+ assert.writeOK(collection.save({_id: 1}));
- var oplogEntry = getLatestOp(primary);
- assert(oplogEntry, 'unexpected empty oplog');
- assert.eq(collection.getFullName(), oplogEntry.ns,
- 'unexpected namespace in oplog entry: ' + tojson(oplogEntry));
- assert.eq(1, oplogEntry.o._id,
- 'oplog entry does not refer to most recently inserted document: ' +
- tojson(oplogEntry));
- assert(!oplogEntry.hasOwnProperty('t'),
- 'oplog entry must not contain term: ' + tojson(oplogEntry));
+ var oplogEntry = getLatestOp(primary);
+ assert(oplogEntry, 'unexpected empty oplog');
+ assert.eq(collection.getFullName(),
+ oplogEntry.ns,
+ 'unexpected namespace in oplog entry: ' + tojson(oplogEntry));
+ assert.eq(
+ 1,
+ oplogEntry.o._id,
+ 'oplog entry does not refer to most recently inserted document: ' + tojson(oplogEntry));
+ assert(!oplogEntry.hasOwnProperty('t'),
+ 'oplog entry must not contain term: ' + tojson(oplogEntry));
- // Protocol version 1 - 'term' field should present in oplog entry.
- var config = assert.commandWorked(primary.adminCommand({replSetGetConfig: 1})).config;
- config.protocolVersion = 1;
- config.version++;
- assert.commandWorked(primary.adminCommand({replSetReconfig: config}));
- replSet.waitForState(replSet.nodes[0], ReplSetTest.State.PRIMARY, 5 * 1000);
+ // Protocol version 1 - 'term' field should present in oplog entry.
+ var config = assert.commandWorked(primary.adminCommand({replSetGetConfig: 1})).config;
+ config.protocolVersion = 1;
+ config.version++;
+ assert.commandWorked(primary.adminCommand({replSetReconfig: config}));
+ replSet.waitForState(replSet.nodes[0], ReplSetTest.State.PRIMARY, 5 * 1000);
- primary = replSet.getPrimary();
- collection = primary.getDB('test').getCollection(name);
- assert.writeOK(collection.save({_id: 2}));
+ primary = replSet.getPrimary();
+ collection = primary.getDB('test').getCollection(name);
+ assert.writeOK(collection.save({_id: 2}));
- oplogEntry = getLatestOp(primary);
- assert(oplogEntry, 'unexpected empty oplog');
- assert.eq(collection.getFullName(), oplogEntry.ns,
- 'unexpected namespace in oplog entry: ' + tojson(oplogEntry));
- assert.eq(2, oplogEntry.o._id,
- 'oplog entry does not refer to most recently inserted document: ' +
- tojson(oplogEntry));
- assert(oplogEntry.hasOwnProperty('t'),
- 'oplog entry must contain term: ' + tojson(oplogEntry));
+ oplogEntry = getLatestOp(primary);
+ assert(oplogEntry, 'unexpected empty oplog');
+ assert.eq(collection.getFullName(),
+ oplogEntry.ns,
+ 'unexpected namespace in oplog entry: ' + tojson(oplogEntry));
+ assert.eq(
+ 2,
+ oplogEntry.o._id,
+ 'oplog entry does not refer to most recently inserted document: ' + tojson(oplogEntry));
+ assert(oplogEntry.hasOwnProperty('t'), 'oplog entry must contain term: ' + tojson(oplogEntry));
- var status = assert.commandWorked(primary.adminCommand({replSetGetStatus:1}));
- assert.eq(status.term, oplogEntry.t,
- 'term in oplog entry does not match term in status: ' + tojson(oplogEntry));
+ var status = assert.commandWorked(primary.adminCommand({replSetGetStatus: 1}));
+ assert.eq(status.term,
+ oplogEntry.t,
+ 'term in oplog entry does not match term in status: ' + tojson(oplogEntry));
})();
diff --git a/jstests/replsets/oplog_truncated_on_recovery.js b/jstests/replsets/oplog_truncated_on_recovery.js
index 4fd4690f0c6..f4e1bf9b1ec 100644
--- a/jstests/replsets/oplog_truncated_on_recovery.js
+++ b/jstests/replsets/oplog_truncated_on_recovery.js
@@ -28,11 +28,7 @@
jsTest.log(tojson(arg));
}
- var replTest = new ReplSetTest(
- {
- name : "oplog_truncated_on_recovery",
- nodes : 1
- });
+ var replTest = new ReplSetTest({name: "oplog_truncated_on_recovery", nodes: 1});
var nodes = replTest.startSet();
replTest.initiate();
@@ -42,22 +38,12 @@
var minvalidColl = localDB["replset.minvalid"];
// Write op
- log(assert.writeOK(testDB.foo.save(
- {
- _id : 1,
- a : 1
- },
- {
- writeConcern :
- {
- w : 1
- }
- })));
+ log(assert.writeOK(testDB.foo.save({_id: 1, a: 1}, {writeConcern: {w: 1}})));
// Set minvalid to something far in the future for the current primary, to simulate recovery.
// Note: This is so far in the future (5 days) that it will never become secondary.
- var farFutureTS = new Timestamp(Math.floor(new Date().getTime() / 1000)
- + (60 * 60 * 24 * 5 /* in five days */), 0);
+ var farFutureTS = new Timestamp(
+ Math.floor(new Date().getTime() / 1000) + (60 * 60 * 24 * 5 /* in five days */), 0);
var rsgs = assert.commandWorked(localDB.adminCommand("replSetGetStatus"));
log(rsgs);
var primaryOpTime = rsgs.members[0].optime;
@@ -69,31 +55,15 @@
// We do an update in case there is a minvalid document on the primary already.
// If the doc doesn't exist then upsert:true will create it, and the writeConcern ensures
// that update returns details of the write, like whether an update or insert was performed.
- log(assert.writeOK(minvalidColl.update(
- {},
- {
- ts : farFutureTS,
- t : NumberLong(-1),
- begin : primaryOpTime
- },
- {
- upsert : true,
- writeConcern :
- {
- w : 1
- }
- })));
+ log(assert.writeOK(
+ minvalidColl.update({},
+ {ts: farFutureTS, t: NumberLong(-1), begin: primaryOpTime},
+ {upsert: true, writeConcern: {w: 1}})));
// Insert a diverged oplog entry that will be truncated after restart.
var divergedTS = new Timestamp(primaryOpTime.ts.t, primaryOpTime.ts.i + 1);
log(assert.writeOK(localDB.oplog.rs.insert(
- {
- _id : 0,
- ts : divergedTS,
- op : "n",
- h: NumberLong(0),
- t : NumberLong(-1)
- })));
+ {_id: 0, ts: divergedTS, op: "n", h: NumberLong(0), t: NumberLong(-1)})));
log(localDB.oplog.rs.find().toArray());
log(assert.commandWorked(localDB.adminCommand("replSetGetStatus")));
log("restart primary");
@@ -104,18 +74,14 @@
var mv;
try {
mv = minvalidColl.findOne();
- }
- catch (e) {
+ } catch (e) {
return false;
}
- var msg = "ts !=, " + farFutureTS + "(" + tsToDate(farFutureTS) + "), mv:" + tojson(mv)
- + " - " + tsToDate(mv.ts);
+ var msg = "ts !=, " + farFutureTS + "(" + tsToDate(farFutureTS) + "), mv:" + tojson(mv) +
+ " - " + tsToDate(mv.ts);
assert.eq(farFutureTS, mv.ts, msg);
- var lastTS = localDB.oplog.rs.find().sort(
- {
- $natural : -1
- }).limit(-1).next().ts;
+ var lastTS = localDB.oplog.rs.find().sort({$natural: -1}).limit(-1).next().ts;
log(localDB.oplog.rs.find().toArray());
assert.eq(primaryOpTime.ts, lastTS);
return true;
diff --git a/jstests/replsets/optime.js b/jstests/replsets/optime.js
index 5d64719fe8c..a716ca3dbca 100644
--- a/jstests/replsets/optime.js
+++ b/jstests/replsets/optime.js
@@ -17,9 +17,9 @@ function timestampCompare(o1, o2) {
}
function optimesAreEqual(replTest) {
- var prevStatus = replTest.nodes[0].getDB('admin').serverStatus({oplog:true}).oplog;
+ var prevStatus = replTest.nodes[0].getDB('admin').serverStatus({oplog: true}).oplog;
for (var i = 1; i < replTest.nodes.length; i++) {
- var status = replTest.nodes[i].getDB('admin').serverStatus({oplog:true}).oplog;
+ var status = replTest.nodes[i].getDB('admin').serverStatus({oplog: true}).oplog;
if (timestampCompare(prevStatus.latestOptime, status.latestOptime) != 0) {
return false;
}
@@ -28,7 +28,7 @@ function optimesAreEqual(replTest) {
return true;
}
-var replTest = new ReplSetTest( { name : "replStatus" , nodes: 3, oplogSize: 1 } );
+var replTest = new ReplSetTest({name: "replStatus", nodes: 3, oplogSize: 1});
replTest.startSet();
replTest.initiate();
@@ -38,27 +38,31 @@ replTest.awaitSecondaryNodes();
// Check initial optimes
assert(optimesAreEqual(replTest));
-var initialInfo = master.getDB('admin').serverStatus({oplog:true}).oplog;
+var initialInfo = master.getDB('admin').serverStatus({oplog: true}).oplog;
// Do an insert to increment optime, but without rolling the oplog
// latestOptime should be updated, but earliestOptime should be unchanged
-var options = { writeConcern: { w: replTest.nodes.length }};
-assert.writeOK(master.getDB('test').foo.insert({ a: 1 }, options));
+var options = {
+ writeConcern: {w: replTest.nodes.length}
+};
+assert.writeOK(master.getDB('test').foo.insert({a: 1}, options));
assert(optimesAreEqual(replTest));
-var info = master.getDB('admin').serverStatus({oplog:true}).oplog;
+var info = master.getDB('admin').serverStatus({oplog: true}).oplog;
assert.gt(timestampCompare(info.latestOptime, initialInfo.latestOptime), 0);
assert.eq(timestampCompare(info.earliestOptime, initialInfo.earliestOptime), 0);
// Insert some large documents to force the oplog to roll over
-var largeString = new Array(1024*10).toString();
+var largeString = new Array(1024 * 10).toString();
for (var i = 0; i < 2000; i++) {
- master.getDB('test').foo.insert({ largeString: largeString }, options);
+ master.getDB('test').foo.insert({largeString: largeString}, options);
}
-assert.soon(function() { return optimesAreEqual(replTest); } );
+assert.soon(function() {
+ return optimesAreEqual(replTest);
+});
// Test that earliestOptime was updated
-info = master.getDB('admin').serverStatus({oplog:true}).oplog;
+info = master.getDB('admin').serverStatus({oplog: true}).oplog;
assert.gt(timestampCompare(info.latestOptime, initialInfo.latestOptime), 0);
assert.gt(timestampCompare(info.earliestOptime, initialInfo.earliestOptime), 0);
diff --git a/jstests/replsets/pipelineout.js b/jstests/replsets/pipelineout.js
index 97accba2eec..bb86f98c4e9 100644
--- a/jstests/replsets/pipelineout.js
+++ b/jstests/replsets/pipelineout.js
@@ -1,36 +1,32 @@
// test $out in a replicated environment
var name = "pipelineout";
-var replTest = new ReplSetTest( {name: name, nodes: 2} );
+var replTest = new ReplSetTest({name: name, nodes: 2});
var nodes = replTest.nodeList();
replTest.startSet();
-replTest.initiate({"_id" : name,
- "members" : [
- {"_id" : 0, "host" : nodes[0]},
- {"_id" : 1, "host" : nodes[1]}
- ]});
+replTest.initiate(
+ {"_id": name, "members": [{"_id": 0, "host": nodes[0]}, {"_id": 1, "host": nodes[1]}]});
var primary = replTest.getPrimary().getDB(name);
var secondary = replTest.liveNodes.slaves[0].getDB(name);
// populate the collection
-for (i=0; i<5; i++) {
- primary.in.insert({x:i});
+for (i = 0; i < 5; i++) {
+ primary.in.insert({x: i});
}
replTest.awaitReplication();
// make sure $out cannot be run on a secondary
assert.throws(function() {
- secondary.in.aggregate({$out: "out"}).itcount;
- });
+ secondary.in.aggregate({$out: "out"}).itcount;
+});
// even if slaveOk
secondary.setSlaveOk();
assert.throws(function() {
- secondary.in.aggregate({$out: "out"}).itcount;
- });
+ secondary.in.aggregate({$out: "out"}).itcount;
+});
// run one and check for proper replication
primary.in.aggregate({$out: "out"}).itcount;
replTest.awaitReplication();
-assert.eq(primary.out.find().sort( { x : 1 } ).toArray(),
- secondary.out.find().sort( { x : 1 } ).toArray());
+assert.eq(primary.out.find().sort({x: 1}).toArray(), secondary.out.find().sort({x: 1}).toArray());
diff --git a/jstests/replsets/plan_cache_slaveok.js b/jstests/replsets/plan_cache_slaveok.js
index a63be51fae1..2de5749f086 100644
--- a/jstests/replsets/plan_cache_slaveok.js
+++ b/jstests/replsets/plan_cache_slaveok.js
@@ -5,76 +5,44 @@ var name = "plan_cache_slaveok";
function assertPlanCacheCommandsSucceed(db) {
// .listQueryShapes()
- assert.commandWorked(db.runCommand({
- planCacheListQueryShapes: name
- }));
+ assert.commandWorked(db.runCommand({planCacheListQueryShapes: name}));
// .getPlansByQuery()
- assert.commandWorked(db.runCommand({
- planCacheListPlans: name,
- query: {a: 1}
- }));
+ assert.commandWorked(db.runCommand({planCacheListPlans: name, query: {a: 1}}));
// .clear()
- assert.commandWorked(db.runCommand({
- planCacheClear: name,
- query: {a: 1}
- }));
+ assert.commandWorked(db.runCommand({planCacheClear: name, query: {a: 1}}));
// setFilter
- assert.commandWorked(db.runCommand({
- planCacheSetFilter: name,
- query: {a: 1},
- indexes: [{a: 1}]
- }));
+ assert.commandWorked(
+ db.runCommand({planCacheSetFilter: name, query: {a: 1}, indexes: [{a: 1}]}));
// listFilters
- assert.commandWorked(db.runCommand({
- planCacheListFilters: name
- }));
+ assert.commandWorked(db.runCommand({planCacheListFilters: name}));
// clearFilters
- assert.commandWorked(db.runCommand({
- planCacheClearFilters: name,
- query: {a: 1}
- }));
+ assert.commandWorked(db.runCommand({planCacheClearFilters: name, query: {a: 1}}));
}
function assertPlanCacheCommandsFail(db) {
// .listQueryShapes()
- assert.commandFailed(db.runCommand({
- planCacheListQueryShapes: name
- }));
+ assert.commandFailed(db.runCommand({planCacheListQueryShapes: name}));
// .getPlansByQuery()
- assert.commandFailed(db.runCommand({
- planCacheListPlans: name,
- query: {a: 1}
- }));
+ assert.commandFailed(db.runCommand({planCacheListPlans: name, query: {a: 1}}));
// .clear()
- assert.commandFailed(db.runCommand({
- planCacheClear: name,
- query: {a: 1}
- }));
+ assert.commandFailed(db.runCommand({planCacheClear: name, query: {a: 1}}));
// setFilter
- assert.commandFailed(db.runCommand({
- planCacheSetFilter: name,
- query: {a: 1},
- indexes: [{a: 1}]
- }));
+ assert.commandFailed(
+ db.runCommand({planCacheSetFilter: name, query: {a: 1}, indexes: [{a: 1}]}));
// listFilters
- assert.commandFailed(db.runCommand({
- planCacheListFilters: name
- }));
+ assert.commandFailed(db.runCommand({planCacheListFilters: name}));
// clearFilters
- assert.commandFailed(db.runCommand({
- planCacheClearFilters: name,
- query: {a: 1}
- }));
+ assert.commandFailed(db.runCommand({planCacheClearFilters: name, query: {a: 1}}));
}
print("Start replica set with two nodes");
diff --git a/jstests/replsets/priority_takeover_cascading_priorities.js b/jstests/replsets/priority_takeover_cascading_priorities.js
index f2dc98490f4..b4559fda16f 100644
--- a/jstests/replsets/priority_takeover_cascading_priorities.js
+++ b/jstests/replsets/priority_takeover_cascading_priorities.js
@@ -3,18 +3,21 @@
// Start replica set. Ensure that highest priority node becomes primary eventually.
// Shut down the primary and confirm that the next highest priority node becomes primary.
// Repeat until 3 nodes are left standing.
-(function () {
+(function() {
'use strict';
load('jstests/replsets/rslib.js');
var name = 'priority_takeover_cascading_priorities';
- var replSet = new ReplSetTest({name: name, nodes: [
- {rsConfig: {priority: 5}},
- {rsConfig: {priority: 4}},
- {rsConfig: {priority: 3}},
- {rsConfig: {priority: 2}},
- {rsConfig: {priority: 1}},
- ]});
+ var replSet = new ReplSetTest({
+ name: name,
+ nodes: [
+ {rsConfig: {priority: 5}},
+ {rsConfig: {priority: 4}},
+ {rsConfig: {priority: 3}},
+ {rsConfig: {priority: 2}},
+ {rsConfig: {priority: 1}},
+ ]
+ });
replSet.startSet();
replSet.initiate();
@@ -27,8 +30,7 @@
waitForMemberState: ReplSetTest.State.PRIMARY,
timeoutMillis: 60 * 1000,
}),
- 'node ' + i + ' ' + replSet.nodes[i].host + ' failed to become primary'
- );
+ 'node ' + i + ' ' + replSet.nodes[i].host + ' failed to become primary');
};
waitForPrimary(0);
diff --git a/jstests/replsets/priority_takeover_one_node_higher_priority.js b/jstests/replsets/priority_takeover_one_node_higher_priority.js
index aeb550966c0..e718ef131f9 100644
--- a/jstests/replsets/priority_takeover_one_node_higher_priority.js
+++ b/jstests/replsets/priority_takeover_one_node_higher_priority.js
@@ -3,16 +3,13 @@
// Wait for replica set to stabilize with higher priority node as primary.
// Step down high priority node. Wait for the lower priority electable node to become primary.
// Eventually high priority node will run a priority takeover election to become primary.
-(function () {
+(function() {
'use strict';
load('jstests/replsets/rslib.js');
var name = 'priority_takeover_one_node_higher_priority';
- var replSet = new ReplSetTest({name: name, nodes: [
- {rsConfig: {priority: 3}},
- {},
- {rsConfig: {arbiterOnly: true}},
- ]});
+ var replSet = new ReplSetTest(
+ {name: name, nodes: [{rsConfig: {priority: 3}}, {}, {rsConfig: {arbiterOnly: true}}, ]});
replSet.startSet();
replSet.initiate();
@@ -30,12 +27,14 @@
var result = primary.adminCommand({replSetStepDown: stepDownGuardMillis / 1000});
print('replSetStepDown did not throw exception but returned: ' + tojson(result));
});
- assert.neq(-1, tojson(stepDownException).indexOf('error doing query'),
+ assert.neq(-1,
+ tojson(stepDownException).indexOf('error doing query'),
'replSetStepDown did not disconnect client');
// Step down primary and wait for node 1 to be promoted to primary.
replSet.waitForState(replSet.nodes[1], ReplSetTest.State.PRIMARY, 60 * 1000);
// Eventually node 0 will stand for election again because it has a higher priorty.
- replSet.waitForState(replSet.nodes[0], ReplSetTest.State.PRIMARY, stepDownGuardMillis + 60 * 1000);
+ replSet.waitForState(
+ replSet.nodes[0], ReplSetTest.State.PRIMARY, stepDownGuardMillis + 60 * 1000);
})();
diff --git a/jstests/replsets/priority_takeover_two_nodes_equal_priority.js b/jstests/replsets/priority_takeover_two_nodes_equal_priority.js
index 1cffd1ecef5..717e9b945e7 100644
--- a/jstests/replsets/priority_takeover_two_nodes_equal_priority.js
+++ b/jstests/replsets/priority_takeover_two_nodes_equal_priority.js
@@ -4,56 +4,51 @@
// Step down the primary and confirm that the next highest priority node becomes primary.
load('jstests/replsets/rslib.js');
-(function () {
-'use strict';
+(function() {
+ 'use strict';
-var name = 'priority_takeover_two_nodes_equal_priority';
-var replSet = new ReplSetTest({name: name, nodes: [
- {rsConfig: {priority: 3}},
- {rsConfig: {priority: 3}},
- {},
-]});
-replSet.startSet();
-replSet.initiate();
+ var name = 'priority_takeover_two_nodes_equal_priority';
+ var replSet = new ReplSetTest(
+ {name: name, nodes: [{rsConfig: {priority: 3}}, {rsConfig: {priority: 3}}, {}, ]});
+ replSet.startSet();
+ replSet.initiate();
-var primary;
-var primaryIndex = -1;
-var defaultPriorityNodeIndex = 2;
-assert.soon(
- function() {
- primary = replSet.getPrimary();
- replSet.nodes.find(function(node, index, array) {
- if (primary.host == node.host) {
- primaryIndex = index;
- return true;
- }
- return false;
- });
- return primaryIndex !== defaultPriorityNodeIndex;
- },
- 'neither of the priority 3 nodes was elected primary',
- 60000, // timeout
- 1000 // interval
-);
+ var primary;
+ var primaryIndex = -1;
+ var defaultPriorityNodeIndex = 2;
+ assert.soon(
+ function() {
+ primary = replSet.getPrimary();
+ replSet.nodes.find(function(node, index, array) {
+ if (primary.host == node.host) {
+ primaryIndex = index;
+ return true;
+ }
+ return false;
+ });
+ return primaryIndex !== defaultPriorityNodeIndex;
+ },
+ 'neither of the priority 3 nodes was elected primary',
+ 60000, // timeout
+ 1000 // interval
+ );
-try {
- assert.commandWorked(primary.getDB('admin').runCommand({replSetStepDown: 90}));
-} catch (x) {
- // expected
-}
-var newPrimaryIndex = primaryIndex === 0 ? 1 : 0;
+ try {
+ assert.commandWorked(primary.getDB('admin').runCommand({replSetStepDown: 90}));
+ } catch (x) {
+ // expected
+ }
+ var newPrimaryIndex = primaryIndex === 0 ? 1 : 0;
-// Refresh connections to nodes.
-replSet.status();
+ // Refresh connections to nodes.
+ replSet.status();
-assert.commandWorked(
- replSet.nodes[newPrimaryIndex].adminCommand({
+ assert.commandWorked(replSet.nodes[newPrimaryIndex].adminCommand({
replSetTest: 1,
waitForMemberState: ReplSetTest.State.PRIMARY,
timeoutMillis: 60 * 1000,
}),
- 'node ' + newPrimaryIndex + ' ' + replSet.nodes[newPrimaryIndex].host +
- ' failed to become primary'
-);
+ 'node ' + newPrimaryIndex + ' ' + replSet.nodes[newPrimaryIndex].host +
+ ' failed to become primary');
})();
diff --git a/jstests/replsets/protocol_version_upgrade_downgrade.js b/jstests/replsets/protocol_version_upgrade_downgrade.js
index 3d406baf717..58210853c91 100644
--- a/jstests/replsets/protocol_version_upgrade_downgrade.js
+++ b/jstests/replsets/protocol_version_upgrade_downgrade.js
@@ -2,86 +2,88 @@ load("jstests/replsets/rslib.js");
(function() {
-"use strict";
-var name = "protocol_version_upgrade_downgrade";
-var rst = new ReplSetTest({name: name, nodes: 3});
+ "use strict";
+ var name = "protocol_version_upgrade_downgrade";
+ var rst = new ReplSetTest({name: name, nodes: 3});
-rst.startSet();
-// Initiate the replset in protocol version 0.
-var conf = rst.getReplSetConfig();
-conf.settings = conf.settings || { };
-conf.protocolVersion = 0;
-// The first node will always be the primary.
-conf.members[0].priority = 1;
-conf.members[1].priority = 0;
-conf.members[2].priority = 0;
-rst.initiate(conf);
-rst.awaitSecondaryNodes();
+ rst.startSet();
+ // Initiate the replset in protocol version 0.
+ var conf = rst.getReplSetConfig();
+ conf.settings = conf.settings || {};
+ conf.protocolVersion = 0;
+ // The first node will always be the primary.
+ conf.members[0].priority = 1;
+ conf.members[1].priority = 0;
+ conf.members[2].priority = 0;
+ rst.initiate(conf);
+ rst.awaitSecondaryNodes();
-var primary = rst.getPrimary();
-var primaryColl = primary.getDB("test").coll;
+ var primary = rst.getPrimary();
+ var primaryColl = primary.getDB("test").coll;
-// Set verbosity for replication on all nodes.
-var verbosity = {
- "setParameter" : 1,
- "logComponentVerbosity" : {
- "replication" : { "verbosity" : 3 },
- }
-};
-primary.adminCommand(verbosity);
-rst.getSecondaries().forEach(function (node) {node.adminCommand(verbosity);});
+ // Set verbosity for replication on all nodes.
+ var verbosity = {
+ "setParameter": 1,
+ "logComponentVerbosity": {
+ "replication": {"verbosity": 3},
+ }
+ };
+ primary.adminCommand(verbosity);
+ rst.getSecondaries().forEach(function(node) {
+ node.adminCommand(verbosity);
+ });
-// Do a write, this will set up sync sources on secondaries.
-print("do a write");
-assert.writeOK(primaryColl.bar.insert({x: 1}, {writeConcern: {w: 3}}));
-// Check optime format in protocol version 0, which is a Timestamp.
-var res = primary.adminCommand({replSetGetStatus: 1});
-assert.commandWorked(res);
-// Check the optime is a Timestamp, not an OpTime { ts: Timestamp, t: int }
-assert.eq(res.members[0].optime.ts, null);
+ // Do a write, this will set up sync sources on secondaries.
+ print("do a write");
+ assert.writeOK(primaryColl.bar.insert({x: 1}, {writeConcern: {w: 3}}));
+ // Check optime format in protocol version 0, which is a Timestamp.
+ var res = primary.adminCommand({replSetGetStatus: 1});
+ assert.commandWorked(res);
+ // Check the optime is a Timestamp, not an OpTime { ts: Timestamp, t: int }
+ assert.eq(res.members[0].optime.ts, null);
-//
-// Upgrade protocol version
-//
-res = primary.adminCommand({replSetGetConfig: 1});
-assert.commandWorked(res);
-conf = res.config;
-assert.eq(conf.protocolVersion, undefined);
-// Change protocol version
-conf.protocolVersion = 1;
-conf.version++;
-reconfig(rst, conf);
-// This write may block until all nodes finish upgrade, because replSetUpdatePosition may be
-// rejected by the primary for mismatched config version before secondaries get reconfig.
-// This will make secondaries wait for 0.5 seconds and retry.
-assert.writeOK(primaryColl.bar.insert({x: 2}, {writeConcern: {w: 3}}));
+ //
+ // Upgrade protocol version
+ //
+ res = primary.adminCommand({replSetGetConfig: 1});
+ assert.commandWorked(res);
+ conf = res.config;
+ assert.eq(conf.protocolVersion, undefined);
+ // Change protocol version
+ conf.protocolVersion = 1;
+ conf.version++;
+ reconfig(rst, conf);
+ // This write may block until all nodes finish upgrade, because replSetUpdatePosition may be
+ // rejected by the primary for mismatched config version before secondaries get reconfig.
+ // This will make secondaries wait for 0.5 seconds and retry.
+ assert.writeOK(primaryColl.bar.insert({x: 2}, {writeConcern: {w: 3}}));
-// Check optime format in protocol version 1, which is an object including the term.
-res = primary.adminCommand({replSetGetStatus: 1});
-assert.commandWorked(res);
-assert.eq(res.members[0].optime.t, NumberLong(0));
+ // Check optime format in protocol version 1, which is an object including the term.
+ res = primary.adminCommand({replSetGetStatus: 1});
+ assert.commandWorked(res);
+ assert.eq(res.members[0].optime.t, NumberLong(0));
-// Check last vote.
-var lastVote = primary.getDB("local")['replset.election'].findOne();
-assert.eq(lastVote.term, NumberLong(0));
-assert.eq(lastVote.candidateIndex, NumberLong(-1));
+ // Check last vote.
+ var lastVote = primary.getDB("local")['replset.election'].findOne();
+ assert.eq(lastVote.term, NumberLong(0));
+ assert.eq(lastVote.candidateIndex, NumberLong(-1));
-//
-// Downgrade protocol version
-//
-res = primary.adminCommand({replSetGetConfig: 1});
-assert.commandWorked(res);
-conf = res.config;
-assert.eq(conf.protocolVersion, 1);
-// Change protocol version
-conf.protocolVersion = 0;
-conf.version++;
-reconfig(rst, conf);
-assert.writeOK(primaryColl.bar.insert({x: 3}, {writeConcern: {w: 3}}));
+ //
+ // Downgrade protocol version
+ //
+ res = primary.adminCommand({replSetGetConfig: 1});
+ assert.commandWorked(res);
+ conf = res.config;
+ assert.eq(conf.protocolVersion, 1);
+ // Change protocol version
+ conf.protocolVersion = 0;
+ conf.version++;
+ reconfig(rst, conf);
+ assert.writeOK(primaryColl.bar.insert({x: 3}, {writeConcern: {w: 3}}));
-// Check optime format in protocol version 0, which is a Timestamp.
-res = primary.adminCommand({replSetGetStatus: 1});
-assert.commandWorked(res);
-assert.eq(res.members[0].optime.ts, null);
+ // Check optime format in protocol version 0, which is a Timestamp.
+ res = primary.adminCommand({replSetGetStatus: 1});
+ assert.commandWorked(res);
+ assert.eq(res.members[0].optime.ts, null);
})();
diff --git a/jstests/replsets/read_after_optime.js b/jstests/replsets/read_after_optime.js
index cff0896344e..30cf7782679 100644
--- a/jstests/replsets/read_after_optime.js
+++ b/jstests/replsets/read_after_optime.js
@@ -1,108 +1,104 @@
// Test read after opTime functionality with maxTimeMS.
(function() {
-"use strict";
+ "use strict";
-var replTest = new ReplSetTest({ nodes: 2 });
-replTest.startSet();
-replTest.initiate();
-var config = replTest.getReplSetConfigFromNode();
+ var replTest = new ReplSetTest({nodes: 2});
+ replTest.startSet();
+ replTest.initiate();
+ var config = replTest.getReplSetConfigFromNode();
-var runTest = function(testDB, primaryConn) {
- primaryConn.getDB('test').user.insert({ x: 1 }, { writeConcern: { w: 2 }});
+ var runTest = function(testDB, primaryConn) {
+ primaryConn.getDB('test').user.insert({x: 1}, {writeConcern: {w: 2}});
- var localDB = primaryConn.getDB('local');
+ var localDB = primaryConn.getDB('local');
- var oplogTS = localDB.oplog.rs.find().sort({ $natural: -1 }).limit(1).next();
- var twoSecTS = new Timestamp(oplogTS.ts.getTime() + 2, 0);
+ var oplogTS = localDB.oplog.rs.find().sort({$natural: -1}).limit(1).next();
+ var twoSecTS = new Timestamp(oplogTS.ts.getTime() + 2, 0);
- var term = -1;
- if (config.protocolVersion === 1) {
- term = oplogTS.t;
- }
+ var term = -1;
+ if (config.protocolVersion === 1) {
+ term = oplogTS.t;
+ }
- // Test timeout with maxTimeMS
- var runTimeoutTest = function() {
- var timeoutResult = assert.commandFailedWithCode(
- testDB.runCommand({
+ // Test timeout with maxTimeMS
+ var runTimeoutTest = function() {
+ var timeoutResult = assert.commandFailedWithCode(testDB.runCommand({
find: 'user',
- filter: { x: 1 },
- readConcern: {
- afterOpTime: { ts: twoSecTS, t: term }
- },
+ filter: {x: 1},
+ readConcern: {afterOpTime: {ts: twoSecTS, t: term}},
maxTimeMS: 5000,
}),
- ErrorCodes.ExceededTimeLimit
- );
- assert.gt(timeoutResult.waitedMS, 500);
- };
-
- var countLogMessages = function(msg) {
- var total = 0;
- var logMessages = assert.commandWorked(testDB.adminCommand({getLog: 'global'})).log;
- for (var i = 0; i < logMessages.length; i++) {
- if (logMessages[i].indexOf(msg) != -1) {
- total++;
+ ErrorCodes.ExceededTimeLimit);
+ assert.gt(timeoutResult.waitedMS, 500);
+ };
+
+ var countLogMessages = function(msg) {
+ var total = 0;
+ var logMessages = assert.commandWorked(testDB.adminCommand({getLog: 'global'})).log;
+ for (var i = 0; i < logMessages.length; i++) {
+ if (logMessages[i].indexOf(msg) != -1) {
+ total++;
+ }
}
- }
- return total;
- };
-
- var checkLog = function(msg, expectedCount) {
- var count;
- assert.soon(function() {
- count = countLogMessages(msg);
- return expectedCount == count;
+ return total;
+ };
+
+ var checkLog = function(msg, expectedCount) {
+ var count;
+ assert.soon(
+ function() {
+ count = countLogMessages(msg);
+ return expectedCount == count;
+ },
+ 'Expected ' + expectedCount + ', but instead saw ' + count +
+ ' log entries containing the following message: ' + msg,
+ 60000,
+ 300);
+ };
+
+ // Run the time out test 3 times with replication debug log level increased to 2
+ // for first and last run. The time out message should be logged twice.
+ testDB.setLogLevel(2, 'command');
+ runTimeoutTest();
+ testDB.setLogLevel(0, 'command');
+
+ var msg = 'Command on database ' + testDB.getName() +
+ ' timed out waiting for read concern to be satisfied. Command:';
+ checkLog(msg, 1);
+
+ // Read concern timed out message should not be logged.
+ runTimeoutTest();
+
+ testDB.setLogLevel(2, 'command');
+ runTimeoutTest();
+ testDB.setLogLevel(0, 'command');
+
+ checkLog(msg, 2);
+
+ // Test read on future afterOpTime that will eventually occur.
+ var insertFunc = startParallelShell(
+ "sleep(2100); db.user.insert({ y: 1 }, { writeConcern: { w: 2 }});", primaryConn.port);
+
+ var res = assert.commandWorked(testDB.runCommand({
+ find: 'user',
+ filter: {x: 1},
+ readConcern: {
+ afterOpTime: {ts: twoSecTS, t: term},
},
- 'Expected ' + expectedCount + ', but instead saw ' + count +
- ' log entries containing the following message: ' + msg,
- 60000,
- 300);
- };
+ maxTimeMS: 10 * 1000,
+ }));
- // Run the time out test 3 times with replication debug log level increased to 2
- // for first and last run. The time out message should be logged twice.
- testDB.setLogLevel(2, 'command');
- runTimeoutTest();
- testDB.setLogLevel(0, 'command');
+ assert.eq(null, res.code);
+ assert.gt(res.waitedMS, 0);
- var msg = 'Command on database ' + testDB.getName() +
- ' timed out waiting for read concern to be satisfied. Command:';
- checkLog(msg, 1);
-
- // Read concern timed out message should not be logged.
- runTimeoutTest();
-
- testDB.setLogLevel(2, 'command');
- runTimeoutTest();
- testDB.setLogLevel(0, 'command');
-
- checkLog(msg, 2);
-
- // Test read on future afterOpTime that will eventually occur.
- var insertFunc = startParallelShell(
- "sleep(2100); db.user.insert({ y: 1 }, { writeConcern: { w: 2 }});",
- primaryConn.port);
-
- var res = assert.commandWorked(testDB.runCommand({
- find: 'user',
- filter: { x: 1 },
- readConcern: {
- afterOpTime: { ts: twoSecTS, t: term },
- },
- maxTimeMS: 10 * 1000,
- }));
-
- assert.eq(null, res.code);
- assert.gt(res.waitedMS, 0);
-
- insertFunc();
-};
+ insertFunc();
+ };
-var primary = replTest.getPrimary();
-runTest(primary.getDB('test'), primary);
-runTest(replTest.getSecondary().getDB('test'), primary);
+ var primary = replTest.getPrimary();
+ runTest(primary.getDB('test'), primary);
+ runTest(replTest.getSecondary().getDB('test'), primary);
-replTest.stopSet();
+ replTest.stopSet();
})();
diff --git a/jstests/replsets/read_committed.js b/jstests/replsets/read_committed.js
index 2ed51300534..02b220c33e4 100644
--- a/jstests/replsets/read_committed.js
+++ b/jstests/replsets/read_committed.js
@@ -11,68 +11,69 @@
load("jstests/replsets/rslib.js"); // For startSetIfSupportsReadMajority.
(function() {
-"use strict";
+ "use strict";
-// Set up a set and grab things for later.
-var name = "read_committed";
-var replTest = new ReplSetTest({name: name,
- nodes: 3,
- nodeOptions: {enableMajorityReadConcern: ''}});
+ // Set up a set and grab things for later.
+ var name = "read_committed";
+ var replTest =
+ new ReplSetTest({name: name, nodes: 3, nodeOptions: {enableMajorityReadConcern: ''}});
-if (!startSetIfSupportsReadMajority(replTest)) {
- jsTest.log("skipping test since storage engine doesn't support committed reads");
- return;
-}
+ if (!startSetIfSupportsReadMajority(replTest)) {
+ jsTest.log("skipping test since storage engine doesn't support committed reads");
+ return;
+ }
-var nodes = replTest.nodeList();
-replTest.initiate({"_id": name,
- "members": [
- { "_id": 0, "host": nodes[0] },
- { "_id": 1, "host": nodes[1], priority: 0 },
- { "_id": 2, "host": nodes[2], arbiterOnly: true}]
- });
+ var nodes = replTest.nodeList();
+ replTest.initiate({
+ "_id": name,
+ "members": [
+ {"_id": 0, "host": nodes[0]},
+ {"_id": 1, "host": nodes[1], priority: 0},
+ {"_id": 2, "host": nodes[2], arbiterOnly: true}
+ ]
+ });
-// Get connections and collection.
-var primary = replTest.getPrimary();
-var secondary = replTest.liveNodes.slaves[0];
-var secondaryId = replTest.getNodeId(secondary);
-var db = primary.getDB(name);
-var t = db[name];
+ // Get connections and collection.
+ var primary = replTest.getPrimary();
+ var secondary = replTest.liveNodes.slaves[0];
+ var secondaryId = replTest.getNodeId(secondary);
+ var db = primary.getDB(name);
+ var t = db[name];
-function doDirtyRead() {
- var res = t.runCommand('find', {"readConcern": {"level": "local"}});
- assert.commandWorked(res);
- return new DBCommandCursor(db.getMongo(), res).toArray()[0].state;
-}
+ function doDirtyRead() {
+ var res = t.runCommand('find', {"readConcern": {"level": "local"}});
+ assert.commandWorked(res);
+ return new DBCommandCursor(db.getMongo(), res).toArray()[0].state;
+ }
-function doCommittedRead() {
- var res = t.runCommand('find', {"readConcern": {"level": "majority"}});
- assert.commandWorked(res);
- return new DBCommandCursor(db.getMongo(), res).toArray()[0].state;
-}
+ function doCommittedRead() {
+ var res = t.runCommand('find', {"readConcern": {"level": "majority"}});
+ assert.commandWorked(res);
+ return new DBCommandCursor(db.getMongo(), res).toArray()[0].state;
+ }
-// Do a write, wait for it to replicate, and ensure it is visible.
-assert.writeOK(t.save({_id: 1, state: 0}, {writeConcern: {w: "majority", wtimeout: 60*1000}}));
-assert.eq(doDirtyRead(), 0);
-assert.eq(doCommittedRead(), 0);
+ // Do a write, wait for it to replicate, and ensure it is visible.
+ assert.writeOK(
+ t.save({_id: 1, state: 0}, {writeConcern: {w: "majority", wtimeout: 60 * 1000}}));
+ assert.eq(doDirtyRead(), 0);
+ assert.eq(doCommittedRead(), 0);
-replTest.stop(secondaryId);
+ replTest.stop(secondaryId);
-// Do a write and ensure it is only visible to dirty reads
-assert.writeOK(t.save({_id: 1, state: 1}));
-assert.eq(doDirtyRead(), 1);
-assert.eq(doCommittedRead(), 0);
+ // Do a write and ensure it is only visible to dirty reads
+ assert.writeOK(t.save({_id: 1, state: 1}));
+ assert.eq(doDirtyRead(), 1);
+ assert.eq(doCommittedRead(), 0);
-// Try the committed read again after sleeping to ensure it doesn't only work for queries
-// immediately after the write.
-sleep(1000);
-assert.eq(doCommittedRead(), 0);
-
-// Restart the node and ensure the committed view is updated.
-replTest.restart(secondaryId);
-db.getLastError("majority", 60 * 1000);
-assert.eq(doDirtyRead(), 1);
-assert.eq(doCommittedRead(), 1);
+ // Try the committed read again after sleeping to ensure it doesn't only work for queries
+ // immediately after the write.
+ sleep(1000);
+ assert.eq(doCommittedRead(), 0);
+ // Restart the node and ensure the committed view is updated.
+ replTest.restart(secondaryId);
+ db.getLastError("majority", 60 * 1000);
+ assert.eq(doDirtyRead(), 1);
+ assert.eq(doCommittedRead(), 1);
}());
diff --git a/jstests/replsets/read_committed_no_snapshots.js b/jstests/replsets/read_committed_no_snapshots.js
index 2abf15beb2d..25eb18a9cae 100644
--- a/jstests/replsets/read_committed_no_snapshots.js
+++ b/jstests/replsets/read_committed_no_snapshots.js
@@ -10,73 +10,74 @@
load("jstests/replsets/rslib.js"); // For reconfig and startSetIfSupportsReadMajority.
(function() {
-"use strict";
+ "use strict";
-// Set up a set and grab things for later.
-var name = "read_committed_no_snapshots";
-var replTest = new ReplSetTest({name: name,
- nodes: 3,
- nodeOptions: {enableMajorityReadConcern: ''}});
+ // Set up a set and grab things for later.
+ var name = "read_committed_no_snapshots";
+ var replTest =
+ new ReplSetTest({name: name, nodes: 3, nodeOptions: {enableMajorityReadConcern: ''}});
-if (!startSetIfSupportsReadMajority(replTest)) {
- jsTest.log("skipping test since storage engine doesn't support committed reads");
- return;
-}
+ if (!startSetIfSupportsReadMajority(replTest)) {
+ jsTest.log("skipping test since storage engine doesn't support committed reads");
+ return;
+ }
-var nodes = replTest.nodeList();
-replTest.initiate({"_id": name,
- "members": [
- { "_id": 0, "host": nodes[0] },
- { "_id": 1, "host": nodes[1], priority: 0 },
- { "_id": 2, "host": nodes[2], arbiterOnly: true }],
- "protocolVersion": 1
- });
+ var nodes = replTest.nodeList();
+ replTest.initiate({
+ "_id": name,
+ "members": [
+ {"_id": 0, "host": nodes[0]},
+ {"_id": 1, "host": nodes[1], priority: 0},
+ {"_id": 2, "host": nodes[2], arbiterOnly: true}
+ ],
+ "protocolVersion": 1
+ });
-// Get connections and collection.
-var primary = replTest.getPrimary();
-var secondary = replTest.liveNodes.slaves[0];
-var secondaryId = replTest.getNodeId(secondary);
-var db = primary.getDB(name);
+ // Get connections and collection.
+ var primary = replTest.getPrimary();
+ var secondary = replTest.liveNodes.slaves[0];
+ var secondaryId = replTest.getNodeId(secondary);
+ var db = primary.getDB(name);
-// Do a write, wait for it to replicate, and ensure it is visible.
-var res = db.runCommandWithMetadata(
- "insert",
- {
- insert: "foo",
- documents: [{_id: 1, state: 0}],
- writeConcern: {w: "majority", wtimeout: 60*1000}
- },
- {"$replData": 1});
-assert.commandWorked(res.commandReply);
+ // Do a write, wait for it to replicate, and ensure it is visible.
+ var res = db.runCommandWithMetadata("insert",
+ {
+ insert: "foo",
+ documents: [{_id: 1, state: 0}],
+ writeConcern: {w: "majority", wtimeout: 60 * 1000}
+ },
+ {"$replData": 1});
+ assert.commandWorked(res.commandReply);
-// We need to propagate the lastOpVisible from the primary as afterOpTime in the secondary to ensure
-// we wait for the write to be in the majority committed view.
-var lastOp = res.metadata["$replData"].lastOpVisible;
+ // We need to propagate the lastOpVisible from the primary as afterOpTime in the secondary to
+ // ensure
+ // we wait for the write to be in the majority committed view.
+ var lastOp = res.metadata["$replData"].lastOpVisible;
-secondary.setSlaveOk();
-// Timeout is based on heartbeat timeout.
-assert.commandWorked(secondary.getDB(name).foo.runCommand(
- 'find', {"readConcern": {"level": "majority", "afterOpTime": lastOp},
- "maxTimeMS": 10 * 1000}));
+ secondary.setSlaveOk();
+ // Timeout is based on heartbeat timeout.
+ assert.commandWorked(secondary.getDB(name).foo.runCommand(
+ 'find',
+ {"readConcern": {"level": "majority", "afterOpTime": lastOp}, "maxTimeMS": 10 * 1000}));
-// Disable snapshotting via failpoint
-secondary.adminCommand({configureFailPoint: 'disableSnapshotting', mode: 'alwaysOn'});
+ // Disable snapshotting via failpoint
+ secondary.adminCommand({configureFailPoint: 'disableSnapshotting', mode: 'alwaysOn'});
-// Resync to drop any existing snapshots
-secondary.adminCommand({resync: 1});
-
-// Ensure maxTimeMS times out while waiting for this snapshot
-assert.commandFailed(secondary.getDB(name).foo.runCommand(
- 'find', {"readConcern": {"level": "majority"}, "maxTimeMS": 1000}));
+ // Resync to drop any existing snapshots
+ secondary.adminCommand({resync: 1});
-// Reconfig to make the secondary the primary
-var config = primary.getDB("local").system.replset.findOne();
-config.members[0].priority = 0;
-config.members[1].priority = 3;
-config.version++;
-primary = reconfig(replTest, config, true);
+ // Ensure maxTimeMS times out while waiting for this snapshot
+ assert.commandFailed(secondary.getDB(name).foo.runCommand(
+ 'find', {"readConcern": {"level": "majority"}, "maxTimeMS": 1000}));
-// Ensure maxTimeMS times out while waiting for this snapshot
-assert.commandFailed(primary.getSiblingDB(name).foo.runCommand(
- 'find', {"readConcern": {"level": "majority"}, "maxTimeMS": 1000}));
+ // Reconfig to make the secondary the primary
+ var config = primary.getDB("local").system.replset.findOne();
+ config.members[0].priority = 0;
+ config.members[1].priority = 3;
+ config.version++;
+ primary = reconfig(replTest, config, true);
+
+ // Ensure maxTimeMS times out while waiting for this snapshot
+ assert.commandFailed(primary.getSiblingDB(name).foo.runCommand(
+ 'find', {"readConcern": {"level": "majority"}, "maxTimeMS": 1000}));
}());
diff --git a/jstests/replsets/read_committed_on_secondary.js b/jstests/replsets/read_committed_on_secondary.js
index 207b1e0373c..468007b7ee8 100644
--- a/jstests/replsets/read_committed_on_secondary.js
+++ b/jstests/replsets/read_committed_on_secondary.js
@@ -9,87 +9,89 @@
load("jstests/replsets/rslib.js"); // For startSetIfSupportsReadMajority.
(function() {
-"use strict";
-
-// Set up a set and grab things for later.
-var name = "read_committed_on_secondary";
-var replTest = new ReplSetTest({name: name,
- nodes: 3,
- nodeOptions: {enableMajorityReadConcern: ''}});
-
-if (!startSetIfSupportsReadMajority(replTest)) {
- jsTest.log("skipping test since storage engine doesn't support committed reads");
- return;
-}
-
-var nodes = replTest.nodeList();
-replTest.initiate({"_id": name,
- "members": [
- { "_id": 0, "host": nodes[0] },
- { "_id": 1, "host": nodes[1], priority: 0 },
- { "_id": 2, "host": nodes[2], arbiterOnly: true}]
- });
-
-// Get connections and collection.
-var primary = replTest.getPrimary();
-var secondary = replTest.liveNodes.slaves[0];
-var secondaryId = replTest.getNodeId(secondary);
-
-var dbPrimary = primary.getDB(name);
-var collPrimary = dbPrimary[name];
-
-var dbSecondary = secondary.getDB(name);
-var collSecondary = dbSecondary[name];
-
-function saveDoc(state) {
- var res = dbPrimary.runCommandWithMetadata(
- 'update',
- {
- update: name,
- writeConcern: {w: 2, wtimeout: 60*1000},
- updates: [{q: {_id: 1}, u: {_id: 1, state: state}, upsert: true}],
- },
- {"$replData": 1});
- assert.commandWorked(res.commandReply);
- assert.eq(res.commandReply.writeErrors, undefined);
- return res.metadata.$replData.lastOpVisible;
-}
-
-function doDirtyRead(lastOp) {
- var res = collSecondary.runCommand('find', {"readConcern": {"level": "local",
- "afterOpTime": lastOp}});
- assert.commandWorked(res);
- return new DBCommandCursor(secondary, res).toArray()[0].state;
-}
-
-function doCommittedRead(lastOp) {
- var res = collSecondary.runCommand('find', {"readConcern": {"level": "majority",
- "afterOpTime": lastOp}});
- assert.commandWorked(res);
- return new DBCommandCursor(secondary, res).toArray()[0].state;
-}
-
-// Do a write, wait for it to replicate, and ensure it is visible.
-var op0 = saveDoc(0);
-assert.eq(doDirtyRead(op0), 0);
-assert.eq(doCommittedRead(op0), 0);
-
-// Disable snapshotting on the secondary.
-secondary.adminCommand({configureFailPoint: 'disableSnapshotting', mode: 'alwaysOn'});
-
-// Do a write and ensure it is only visible to dirty reads
-var op1 = saveDoc(1);
-assert.eq(doDirtyRead(op1), 1);
-assert.eq(doCommittedRead(op0), 0);
-
-// Try the committed read again after sleeping to ensure it doesn't only work for queries
-// immediately after the write.
-sleep(1000);
-assert.eq(doCommittedRead(op0), 0);
-
-// Reenable snapshotting on the secondary and ensure that committed reads are able to see the new
-// state.
-secondary.adminCommand({configureFailPoint: 'disableSnapshotting', mode: 'off'});
-assert.eq(doDirtyRead(op1), 1);
-assert.eq(doCommittedRead(op1), 1);
+ "use strict";
+
+ // Set up a set and grab things for later.
+ var name = "read_committed_on_secondary";
+ var replTest =
+ new ReplSetTest({name: name, nodes: 3, nodeOptions: {enableMajorityReadConcern: ''}});
+
+ if (!startSetIfSupportsReadMajority(replTest)) {
+ jsTest.log("skipping test since storage engine doesn't support committed reads");
+ return;
+ }
+
+ var nodes = replTest.nodeList();
+ replTest.initiate({
+ "_id": name,
+ "members": [
+ {"_id": 0, "host": nodes[0]},
+ {"_id": 1, "host": nodes[1], priority: 0},
+ {"_id": 2, "host": nodes[2], arbiterOnly: true}
+ ]
+ });
+
+ // Get connections and collection.
+ var primary = replTest.getPrimary();
+ var secondary = replTest.liveNodes.slaves[0];
+ var secondaryId = replTest.getNodeId(secondary);
+
+ var dbPrimary = primary.getDB(name);
+ var collPrimary = dbPrimary[name];
+
+ var dbSecondary = secondary.getDB(name);
+ var collSecondary = dbSecondary[name];
+
+ function saveDoc(state) {
+ var res = dbPrimary.runCommandWithMetadata(
+ 'update',
+ {
+ update: name,
+ writeConcern: {w: 2, wtimeout: 60 * 1000},
+ updates: [{q: {_id: 1}, u: {_id: 1, state: state}, upsert: true}],
+ },
+ {"$replData": 1});
+ assert.commandWorked(res.commandReply);
+ assert.eq(res.commandReply.writeErrors, undefined);
+ return res.metadata.$replData.lastOpVisible;
+ }
+
+ function doDirtyRead(lastOp) {
+ var res = collSecondary.runCommand(
+ 'find', {"readConcern": {"level": "local", "afterOpTime": lastOp}});
+ assert.commandWorked(res);
+ return new DBCommandCursor(secondary, res).toArray()[0].state;
+ }
+
+ function doCommittedRead(lastOp) {
+ var res = collSecondary.runCommand(
+ 'find', {"readConcern": {"level": "majority", "afterOpTime": lastOp}});
+ assert.commandWorked(res);
+ return new DBCommandCursor(secondary, res).toArray()[0].state;
+ }
+
+ // Do a write, wait for it to replicate, and ensure it is visible.
+ var op0 = saveDoc(0);
+ assert.eq(doDirtyRead(op0), 0);
+ assert.eq(doCommittedRead(op0), 0);
+
+ // Disable snapshotting on the secondary.
+ secondary.adminCommand({configureFailPoint: 'disableSnapshotting', mode: 'alwaysOn'});
+
+ // Do a write and ensure it is only visible to dirty reads
+ var op1 = saveDoc(1);
+ assert.eq(doDirtyRead(op1), 1);
+ assert.eq(doCommittedRead(op0), 0);
+
+ // Try the committed read again after sleeping to ensure it doesn't only work for queries
+ // immediately after the write.
+ sleep(1000);
+ assert.eq(doCommittedRead(op0), 0);
+
+ // Reenable snapshotting on the secondary and ensure that committed reads are able to see the
+ // new
+ // state.
+ secondary.adminCommand({configureFailPoint: 'disableSnapshotting', mode: 'off'});
+ assert.eq(doDirtyRead(op1), 1);
+ assert.eq(doCommittedRead(op1), 1);
}());
diff --git a/jstests/replsets/read_majority_two_arbs.js b/jstests/replsets/read_majority_two_arbs.js
index 22447975e4b..1995f907dfa 100644
--- a/jstests/replsets/read_majority_two_arbs.js
+++ b/jstests/replsets/read_majority_two_arbs.js
@@ -8,52 +8,54 @@
load("jstests/replsets/rslib.js"); // For startSetIfSupportsReadMajority.
(function() {
-"use strict";
-
-// Set up a set and grab things for later.
-var name = "read_majority_two_arbs";
-var replTest = new ReplSetTest({name: name,
- nodes: 3,
- nodeOptions: {enableMajorityReadConcern: ''}});
-
-if (!startSetIfSupportsReadMajority(replTest)) {
- jsTest.log("skipping test since storage engine doesn't support committed reads");
- return;
-}
-
-var nodes = replTest.nodeList();
-replTest.initiate({"_id": name,
- "members": [
- {"_id": 0, "host": nodes[0]},
- {"_id": 1, "host": nodes[1], arbiterOnly: true},
- {"_id": 2, "host": nodes[2], arbiterOnly: true}]
- });
-
-var primary = replTest.getPrimary();
-var db = primary.getDB(name);
-var t = db[name];
-
-function doRead(readConcern) {
- var res = assert.commandWorked(t.runCommand('find', readConcern));
- var docs = (new DBCommandCursor(db.getMongo(), res)).toArray();
- assert.gt(docs.length, 0, "no docs returned!");
- return docs[0].state;
-}
-
-function doDirtyRead() {
- return doRead({"readConcern": {"level": "local"}});
-}
-
-function doCommittedRead() {
- return doRead({"readConcern": {"level": "majority"}});
-}
-
-jsTest.log("doing write");
-assert.writeOK(t.save({_id: 1, state: 0}, {writeConcern: {w: "majority", wtimeout: 10*1000}}));
-jsTest.log("doing read");
-assert.eq(doDirtyRead(), 0);
-jsTest.log("doing committed read");
-assert.eq(doCommittedRead(), 0);
-jsTest.log("stopping replTest; test completed successfully");
-replTest.stopSet();
+ "use strict";
+
+ // Set up a set and grab things for later.
+ var name = "read_majority_two_arbs";
+ var replTest =
+ new ReplSetTest({name: name, nodes: 3, nodeOptions: {enableMajorityReadConcern: ''}});
+
+ if (!startSetIfSupportsReadMajority(replTest)) {
+ jsTest.log("skipping test since storage engine doesn't support committed reads");
+ return;
+ }
+
+ var nodes = replTest.nodeList();
+ replTest.initiate({
+ "_id": name,
+ "members": [
+ {"_id": 0, "host": nodes[0]},
+ {"_id": 1, "host": nodes[1], arbiterOnly: true},
+ {"_id": 2, "host": nodes[2], arbiterOnly: true}
+ ]
+ });
+
+ var primary = replTest.getPrimary();
+ var db = primary.getDB(name);
+ var t = db[name];
+
+ function doRead(readConcern) {
+ var res = assert.commandWorked(t.runCommand('find', readConcern));
+ var docs = (new DBCommandCursor(db.getMongo(), res)).toArray();
+ assert.gt(docs.length, 0, "no docs returned!");
+ return docs[0].state;
+ }
+
+ function doDirtyRead() {
+ return doRead({"readConcern": {"level": "local"}});
+ }
+
+ function doCommittedRead() {
+ return doRead({"readConcern": {"level": "majority"}});
+ }
+
+ jsTest.log("doing write");
+ assert.writeOK(
+ t.save({_id: 1, state: 0}, {writeConcern: {w: "majority", wtimeout: 10 * 1000}}));
+ jsTest.log("doing read");
+ assert.eq(doDirtyRead(), 0);
+ jsTest.log("doing committed read");
+ assert.eq(doCommittedRead(), 0);
+ jsTest.log("stopping replTest; test completed successfully");
+ replTest.stopSet();
}());
diff --git a/jstests/replsets/reconfig.js b/jstests/replsets/reconfig.js
index c9dac4f6a9e..1351aa1d54b 100644
--- a/jstests/replsets/reconfig.js
+++ b/jstests/replsets/reconfig.js
@@ -2,10 +2,10 @@
* Simple test to ensure that an invalid reconfig fails, a valid one succeeds, and a reconfig won't
* succeed without force if force is needed.
*/
-(function () {
+(function() {
"use strict";
var numNodes = 5;
- var replTest = new ReplSetTest({ name: 'testSet', nodes: numNodes });
+ var replTest = new ReplSetTest({name: 'testSet', nodes: numNodes});
var nodes = replTest.startSet();
replTest.initiate();
@@ -23,10 +23,14 @@
jsTestLog("Invalid reconfig");
config.version++;
- var badMember = {_id: numNodes, host: "localhost:12345", priority: "High"};
+ var badMember = {
+ _id: numNodes,
+ host: "localhost:12345",
+ priority: "High"
+ };
config.members.push(badMember);
var invalidConfigCode = 93;
- assert.commandFailedWithCode(primary.adminCommand({replSetReconfig : config}),
+ assert.commandFailedWithCode(primary.adminCommand({replSetReconfig: config}),
invalidConfigCode);
jsTestLog("No force when needed.");
@@ -35,8 +39,7 @@
config.members[nodes.indexOf(secondary)].priority = 5;
var admin = secondary.getDB("admin");
var forceRequiredCode = 10107;
- assert.commandFailedWithCode(admin.runCommand({replSetReconfig: config}),
- forceRequiredCode);
+ assert.commandFailedWithCode(admin.runCommand({replSetReconfig: config}), forceRequiredCode);
jsTestLog("Force when appropriate");
assert.commandWorked(admin.runCommand({replSetReconfig: config, force: true}));
diff --git a/jstests/replsets/reconfig_prohibits_w0.js b/jstests/replsets/reconfig_prohibits_w0.js
index 9e96fc632ed..4b785d76e90 100644
--- a/jstests/replsets/reconfig_prohibits_w0.js
+++ b/jstests/replsets/reconfig_prohibits_w0.js
@@ -10,9 +10,7 @@ var nodes = replTest.nodeList();
var conns = replTest.startSet();
var admin = conns[0].getDB("admin");
-replTest.initiate({
- _id: 'prohibit_w0',
- members: [{_id: 0, host: nodes[0]}]});
+replTest.initiate({_id: 'prohibit_w0', members: [{_id: 0, host: nodes[0]}]});
function testReconfig(gleDefaults) {
var conf = admin.runCommand({replSetGetConfig: 1}).config;
@@ -28,13 +26,11 @@ function testReconfig(gleDefaults) {
/*
* Try to reconfig with w: 0 in getLastErrorDefaults.
*/
-testReconfig({
- getLastErrorDefaults: {w: 0}});
+testReconfig({getLastErrorDefaults: {w: 0}});
/*
* Try to reconfig with w: 0 and other options in getLastErrorDefaults.
*/
-testReconfig({
- getLastErrorDefaults: {w: 0, j: false, wtimeout: 100, fsync: true}});
+testReconfig({getLastErrorDefaults: {w: 0, j: false, wtimeout: 100, fsync: true}});
replTest.stopSet();
diff --git a/jstests/replsets/reconfig_tags.js b/jstests/replsets/reconfig_tags.js
index 3fa4cb71041..3c4d0e2616d 100644
--- a/jstests/replsets/reconfig_tags.js
+++ b/jstests/replsets/reconfig_tags.js
@@ -1,7 +1,7 @@
// test that reconfigging only tag changes is properly reflected in isMaster
-var replTest = new ReplSetTest({ nodes: 2 });
-replTest.startSet({ oplogSize: 10 });
+var replTest = new ReplSetTest({nodes: 2});
+replTest.startSet({oplogSize: 10});
replTest.initiate();
replTest.awaitSecondaryNodes();
@@ -12,9 +12,15 @@ var rsConfig = primary.getDB("local").system.replset.findOne();
jsTest.log('got rsconf ' + tojson(rsConfig));
rsConfig.members.forEach(function(member) {
if (member.host == primary.host) {
- member.tags = { dc: 'ny', tag: 'one' };
+ member.tags = {
+ dc: 'ny',
+ tag: 'one'
+ };
} else {
- member.tags = { dc: 'ny', tag: 'two' };
+ member.tags = {
+ dc: 'ny',
+ tag: 'two'
+ };
}
});
@@ -23,10 +29,9 @@ rsConfig.version++;
jsTest.log('new rsconf ' + tojson(rsConfig));
try {
- var res = primary.adminCommand({ replSetReconfig: rsConfig });
- jsTest.log('reconfig res: ' + tojson(res)); // Should not see this
-}
-catch(e) {
+ var res = primary.adminCommand({replSetReconfig: rsConfig});
+ jsTest.log('reconfig res: ' + tojson(res)); // Should not see this
+} catch (e) {
jsTest.log('replSetReconfig error: ' + e);
}
@@ -35,7 +40,7 @@ replTest.awaitSecondaryNodes();
var testDB = primary.getDB('test');
var newConn = new Mongo(primary.host);
-var isMaster = newConn.adminCommand({ isMaster: 1 });
+var isMaster = newConn.adminCommand({isMaster: 1});
assert(isMaster.tags != null, 'isMaster: ' + tojson(isMaster));
print('success: ' + tojson(isMaster));
diff --git a/jstests/replsets/reconfig_without_increased_queues.js b/jstests/replsets/reconfig_without_increased_queues.js
index 2ae45988fa4..d54ab6c253b 100644
--- a/jstests/replsets/reconfig_without_increased_queues.js
+++ b/jstests/replsets/reconfig_without_increased_queues.js
@@ -2,11 +2,11 @@
* Test which configures various configs (hidden/priorities/no-chaining) that replExec queues
* stay at reasonable/stable levels after repeated reconfigs/stepdowns
*/
-(function () {
+(function() {
"use strict";
var numNodes = 5;
var maxQueueSizeExpected = 11;
- var replTest = new ReplSetTest({ name: 'testSet', nodes: numNodes });
+ var replTest = new ReplSetTest({name: 'testSet', nodes: numNodes});
var nodes = replTest.startSet();
replTest.initiate();
@@ -53,15 +53,14 @@
return false;
}
return true;
- }, "queues too high", 13 * 1000 /*13 secs*/); // what we are looking for has a 10s timeout.
+ }, "queues too high", 13 * 1000 /*13 secs*/); // what we are looking for has a 10s timeout.
};
var reconfig = function(newConfig) {
newConfig.version += 1;
try {
assert.commandWorked(replTest.getPrimary().adminCommand({replSetReconfig: newConfig}));
- }
- catch (e) {
+ } catch (e) {
if (tojson(e).indexOf("error doing query: failed") < 0) {
throw e;
}
@@ -77,7 +76,7 @@
c.members[2].priority = 0;
reconfig(c);
- for(var i=0;i<50;i++) {
+ for (var i = 0; i < 50; i++) {
reconfig(c);
testQueues();
}
@@ -89,7 +88,7 @@
c.members[4].priority = 1000;
reconfig(c);
- for(var i=0;i<50;i++) {
+ for (var i = 0; i < 50; i++) {
reconfig(c);
testQueues();
}
diff --git a/jstests/replsets/reindex_secondary.js b/jstests/replsets/reindex_secondary.js
index 42c2149126c..8c812a068ad 100644
--- a/jstests/replsets/reindex_secondary.js
+++ b/jstests/replsets/reindex_secondary.js
@@ -1,4 +1,4 @@
-var replTest = new ReplSetTest( {name: 'reindexTest', nodes: 2} );
+var replTest = new ReplSetTest({name: 'reindexTest', nodes: 2});
var nodes = replTest.startSet();
@@ -8,23 +8,22 @@ var master = replTest.getPrimary();
replTest.awaitSecondaryNodes();
var slaves = replTest.liveNodes.slaves;
-assert( slaves.length == 1, "Expected 1 slave but length was " + slaves.length );
+assert(slaves.length == 1, "Expected 1 slave but length was " + slaves.length);
slave = slaves[0];
db = master.getDB("reindexTest");
slaveDb = slave.getDB("reindexTest");
// Setup index
-db.foo.insert({a:1000});
+db.foo.insert({a: 1000});
-db.foo.ensureIndex({a:1});
+db.foo.ensureIndex({a: 1});
replTest.awaitReplication();
assert.eq(2, db.foo.getIndexes().length, "Master didn't have proper indexes before reindex");
assert.eq(2, slaveDb.foo.getIndexes().length, "Slave didn't have proper indexes before reindex");
-
// Try to reindex secondary
slaveDb.foo.reIndex();
diff --git a/jstests/replsets/remove1.js b/jstests/replsets/remove1.js
index eab87207508..cde4974677a 100644
--- a/jstests/replsets/remove1.js
+++ b/jstests/replsets/remove1.js
@@ -13,14 +13,14 @@ var name = "removeNodes";
var host = getHostName();
print("Start set with two nodes");
-var replTest = new ReplSetTest( {name: name, nodes: 2} );
+var replTest = new ReplSetTest({name: name, nodes: 2});
var nodes = replTest.startSet();
replTest.initiate();
var master = replTest.getPrimary();
var secondary = replTest.getSecondary();
print("Initial sync");
-master.getDB("foo").bar.baz.insert({x:1});
+master.getDB("foo").bar.baz.insert({x: 1});
replTest.awaitReplication();
@@ -34,53 +34,53 @@ for (var i = 0; i < config.members.length; i++) {
}
config.version = 2;
-assert.eq(secondary.getDB("admin").runCommand({ping:1}).ok,
+assert.eq(secondary.getDB("admin").runCommand({ping: 1}).ok,
1,
"we should be connected to the secondary");
try {
- master.getDB("admin").runCommand({replSetReconfig:config});
-}
-catch(e) {
+ master.getDB("admin").runCommand({replSetReconfig: config});
+} catch (e) {
print(e);
}
// This tests that the secondary disconnects us when it picks up the new config.
-assert.soon(
- function() {
- try {
- secondary.getDB("admin").runCommand({ping:1});
- } catch (e) {
- return true;
- }
- return false;
+assert.soon(function() {
+ try {
+ secondary.getDB("admin").runCommand({ping: 1});
+ } catch (e) {
+ return true;
}
-);
+ return false;
+});
// Now we should successfully reconnect to the secondary.
-assert.eq(secondary.getDB("admin").runCommand({ping:1}).ok, 1,
+assert.eq(secondary.getDB("admin").runCommand({ping: 1}).ok,
+ 1,
"we aren't connected to the secondary");
reconnect(master);
assert.soon(function() {
- var c = master.getDB("local").system.replset.findOne();
- return c.version == 2;
+ var c = master.getDB("local").system.replset.findOne();
+ return c.version == 2;
});
print("Add it back as a secondary");
-config.members.push({_id:2, host : secondary.host});
+config.members.push({_id: 2, host: secondary.host});
config.version = 3;
// Need to keep retrying reconfig here, as it will not work at first due to the primary's
// perception that the secondary is still "down".
-assert.soon(function() { try {
- reconfig(replTest, config);
- return true;
-} catch (e) {
- return false;
-} });
+assert.soon(function() {
+ try {
+ reconfig(replTest, config);
+ return true;
+ } catch (e) {
+ return false;
+ }
+});
master = replTest.getPrimary();
-printjson(master.getDB("admin").runCommand({replSetGetStatus:1}));
+printjson(master.getDB("admin").runCommand({replSetGetStatus: 1}));
var newConfig = master.getDB("local").system.replset.findOne();
print("newConfig: " + tojson(newConfig));
assert.eq(newConfig.version, 3);
@@ -90,26 +90,24 @@ replTest.stop(secondary);
assert.soon(function() {
try {
- return master.getDB("admin").runCommand({isMaster : 1}).secondary;
- }
- catch(e) {
- print("trying to get master: "+e);
+ return master.getDB("admin").runCommand({isMaster: 1}).secondary;
+ } catch (e) {
+ print("trying to get master: " + e);
}
-},"waiting for primary to step down",(60*1000),1000);
+}, "waiting for primary to step down", (60 * 1000), 1000);
config.version = 4;
config.members.pop();
try {
- master.getDB("admin").runCommand({replSetReconfig : config, force : true});
-}
-catch(e) {
+ master.getDB("admin").runCommand({replSetReconfig: config, force: true});
+} catch (e) {
print(e);
}
reconnect(master);
assert.soon(function() {
- return master.getDB("admin").runCommand({isMaster : 1}).ismaster;
-},"waiting for old primary to accept reconfig and step up",(60*1000),1000);
+ return master.getDB("admin").runCommand({isMaster: 1}).ismaster;
+}, "waiting for old primary to accept reconfig and step up", (60 * 1000), 1000);
config = master.getDB("local").system.replset.findOne();
printjson(config);
diff --git a/jstests/replsets/repl_options.js b/jstests/replsets/repl_options.js
index 1d7a858a473..66a07787889 100644
--- a/jstests/replsets/repl_options.js
+++ b/jstests/replsets/repl_options.js
@@ -4,36 +4,29 @@ load('jstests/libs/command_line/test_parsed_options.js');
jsTest.log("Testing \"replSet\" command line option");
var expectedResult = {
- "parsed" : {
- "replication" : {
- "replSet" : "mycmdlinename"
- }
- }
+ "parsed": {"replication": {"replSet": "mycmdlinename"}}
};
-testGetCmdLineOptsMongod({ replSet : "mycmdlinename" }, expectedResult);
+testGetCmdLineOptsMongod({replSet: "mycmdlinename"}, expectedResult);
jsTest.log("Testing \"replication.replSetName\" config file option");
expectedResult = {
- "parsed" : {
- "config" : "jstests/libs/config_files/set_replsetname.json",
- "replication" : {
- "replSetName" : "myconfigname"
- }
+ "parsed": {
+ "config": "jstests/libs/config_files/set_replsetname.json",
+ "replication": {"replSetName": "myconfigname"}
}
};
-testGetCmdLineOptsMongod({ config : "jstests/libs/config_files/set_replsetname.json" },
+testGetCmdLineOptsMongod({config: "jstests/libs/config_files/set_replsetname.json"},
expectedResult);
jsTest.log("Testing override of \"replication.replSetName\" config file option with \"replSet\"");
expectedResult = {
- "parsed" : {
- "config" : "jstests/libs/config_files/set_replsetname.json",
- "replication" : {
- "replSet" : "mycmdlinename"
- }
+ "parsed": {
+ "config": "jstests/libs/config_files/set_replsetname.json",
+ "replication": {"replSet": "mycmdlinename"}
}
};
-testGetCmdLineOptsMongod({ config : "jstests/libs/config_files/set_replsetname.json",
- replSet : "mycmdlinename" }, expectedResult);
+testGetCmdLineOptsMongod(
+ {config: "jstests/libs/config_files/set_replsetname.json", replSet: "mycmdlinename"},
+ expectedResult);
print(baseName + " succeeded.");
diff --git a/jstests/replsets/replset1.js b/jstests/replsets/replset1.js
index 49bd10fd2d0..ed254f99758 100644
--- a/jstests/replsets/replset1.js
+++ b/jstests/replsets/replset1.js
@@ -2,20 +2,20 @@ var ssl_options1;
var ssl_options2;
var ssl_name;
load("jstests/replsets/rslib.js");
-var doTest = function( signal ) {
+var doTest = function(signal) {
// Test basic replica set functionality.
// -- Replication
// -- Failover
-
-
// Choose a name that is unique to the options specified.
// This is important because we are depending on a fresh replicaSetMonitor for each run;
- // each differently-named replica set gets its own monitor.
+ // each differently-named replica set gets its own monitor.
// n0 and n1 get the same SSL config since there are 3 nodes but only 2 different configs
- var replTest = new ReplSetTest( {name: 'testSet' + ssl_name, nodes:
- {n0: ssl_options1, n1: ssl_options1, n2: ssl_options2}});
+ var replTest = new ReplSetTest({
+ name: 'testSet' + ssl_name,
+ nodes: {n0: ssl_options1, n1: ssl_options1, n2: ssl_options2}
+ });
// call startSet() to start each mongod in the replica set
// this returns a list of nodes
@@ -50,45 +50,42 @@ var doTest = function( signal ) {
// and slaves in the set and wait until the change has replicated.
replTest.awaitReplication();
-
- var cppconn = new Mongo( replTest.getURL() ).getDB( "foo" );
- assert.eq( 1000 , cppconn.foo.findOne().a , "cppconn 1" );
+ var cppconn = new Mongo(replTest.getURL()).getDB("foo");
+ assert.eq(1000, cppconn.foo.findOne().a, "cppconn 1");
{
// check c++ finding other servers
var temp = replTest.getURL();
- temp = temp.substring( 0 , temp.lastIndexOf( "," ) );
- temp = new Mongo( temp ).getDB( "foo" );
- assert.eq( 1000 , temp.foo.findOne().a , "cppconn 1" );
+ temp = temp.substring(0, temp.lastIndexOf(","));
+ temp = new Mongo(temp).getDB("foo");
+ assert.eq(1000, temp.foo.findOne().a, "cppconn 1");
}
-
// Here's how to stop the master node
- var master_id = replTest.getNodeId( master );
- replTest.stop( master_id );
+ var master_id = replTest.getNodeId(master);
+ replTest.stop(master_id);
// Now let's see who the new master is:
var new_master = replTest.getPrimary();
// Is the new master the same as the old master?
- var new_master_id = replTest.getNodeId( new_master );
+ var new_master_id = replTest.getNodeId(new_master);
- assert( master_id != new_master_id, "Old master shouldn't be equal to new master." );
+ assert(master_id != new_master_id, "Old master shouldn't be equal to new master.");
reconnect(cppconn);
- assert.eq( 1000 , cppconn.foo.findOne().a , "cppconn 2" );
+ assert.eq(1000, cppconn.foo.findOne().a, "cppconn 2");
// Now let's write some documents to the new master
var bulk = new_master.getDB("bar").bar.initializeUnorderedBulkOp();
- for(var i=0; i<1000; i++) {
- bulk.insert({ a: i });
+ for (var i = 0; i < 1000; i++) {
+ bulk.insert({a: i});
}
bulk.execute();
// Here's how to restart the old master node:
var slave = replTest.restart(master_id);
-
// Now, let's make sure that the old master comes up as a slave
assert.soon(function() {
var res = slave.getDB("admin").runCommand({ismaster: 1});
@@ -99,23 +96,23 @@ var doTest = function( signal ) {
// And we need to make sure that the replset comes back up
assert.soon(function() {
var res = new_master.getDB("admin").runCommand({replSetGetStatus: 1});
- printjson( res );
+ printjson(res);
return res.myState == 1;
});
// And that both slave nodes have all the updates
new_master = replTest.getPrimary();
- assert.eq( 1000 , new_master.getDB( "bar" ).runCommand( { count:"bar"} ).n , "assumption 2");
+ assert.eq(1000, new_master.getDB("bar").runCommand({count: "bar"}).n, "assumption 2");
replTest.awaitSecondaryNodes();
replTest.awaitReplication();
var slaves = replTest.liveNodes.slaves;
- assert( slaves.length == 2, "Expected 2 slaves but length was " + slaves.length );
+ assert(slaves.length == 2, "Expected 2 slaves but length was " + slaves.length);
slaves.forEach(function(slave) {
slave.setSlaveOk();
var count = slave.getDB("bar").runCommand({count: "bar"});
- printjson( count );
- assert.eq( 1000 , count.n , "slave count wrong: " + slave );
+ printjson(count);
+ assert.eq(1000, count.n, "slave count wrong: " + slave);
});
// last error
@@ -126,33 +123,39 @@ var doTest = function( signal ) {
var db = master.getDB("foo");
var t = db.foo;
- var ts = slaves.map( function(z){ z.setSlaveOk(); return z.getDB( "foo" ).foo; } );
+ var ts = slaves.map(function(z) {
+ z.setSlaveOk();
+ return z.getDB("foo").foo;
+ });
t.save({a: 1000});
- t.ensureIndex( { a : 1 } );
+ t.ensureIndex({a: 1});
- var result = db.runCommand({getLastError : 1, w: 3 , wtimeout :30000 });
+ var result = db.runCommand({getLastError: 1, w: 3, wtimeout: 30000});
printjson(result);
var lastOp = result.lastOp;
- var lastOplogOp = master.getDB("local").oplog.rs.find().sort({$natural : -1}).limit(1).next();
+ var lastOplogOp = master.getDB("local").oplog.rs.find().sort({$natural: -1}).limit(1).next();
if (replTest.getReplSetConfigFromNode().protocolVersion != 1) {
assert.eq(lastOplogOp['ts'], lastOp);
- }
- else {
+ } else {
assert.eq(lastOplogOp['ts'], lastOp['ts']);
assert.eq(lastOplogOp['t'], lastOp['t']);
}
- ts.forEach( function(z){ assert.eq( 2 , z.getIndexKeys().length , "A " + z.getMongo() ); } );
+ ts.forEach(function(z) {
+ assert.eq(2, z.getIndexKeys().length, "A " + z.getMongo());
+ });
t.reIndex();
- db.getLastError( 3 , 30000 );
- ts.forEach( function(z){ assert.eq( 2 , z.getIndexKeys().length , "A " + z.getMongo() ); } );
+ db.getLastError(3, 30000);
+ ts.forEach(function(z) {
+ assert.eq(2, z.getIndexKeys().length, "A " + z.getMongo());
+ });
// Shut down the set and finish the test.
- replTest.stopSet( signal );
+ replTest.stopSet(signal);
};
-doTest( 15 );
+doTest(15);
print("replset1.js SUCCESS");
diff --git a/jstests/replsets/replset2.js b/jstests/replsets/replset2.js
index 45e37b8551e..b50a939242d 100644
--- a/jstests/replsets/replset2.js
+++ b/jstests/replsets/replset2.js
@@ -1,12 +1,12 @@
load("jstests/replsets/rslib.js");
-doTest = function (signal) {
+doTest = function(signal) {
// Test replication with write concern.
// Replica set testing API
// Create a new replica set test. Specify set name and the number of nodes you want.
- var replTest = new ReplSetTest({ name: 'testSet', nodes: 3, oplogSize: 5 });
+ var replTest = new ReplSetTest({name: 'testSet', nodes: 3, oplogSize: 5});
// call startSet() to start each mongod in the replica set
// this returns a list of nodes
@@ -23,13 +23,15 @@ doTest = function (signal) {
var master = replTest.getPrimary();
// Wait for replication to a single node
- master.getDB(testDB).bar.insert({ n: 1 });
+ master.getDB(testDB).bar.insert({n: 1});
// Wait for states to become PRI,SEC,SEC
waitForAllMembers(master.getDB(testDB));
var slaves = replTest.liveNodes.slaves;
- slaves.forEach(function (slave) { slave.setSlaveOk(); });
+ slaves.forEach(function(slave) {
+ slave.setSlaveOk();
+ });
// Test write concern with multiple inserts.
print("\n\nreplset2.js **** Try inserting a multiple records -- first insert ****");
@@ -37,87 +39,88 @@ doTest = function (signal) {
printjson(master.getDB("admin").runCommand("replSetGetStatus"));
var bulk = master.getDB(testDB).foo.initializeUnorderedBulkOp();
- bulk.insert({ n: 1 });
- bulk.insert({ n: 2 });
- bulk.insert({ n: 3 });
+ bulk.insert({n: 1});
+ bulk.insert({n: 2});
+ bulk.insert({n: 3});
print("\nreplset2.js **** TEMP 1 ****");
printjson(master.getDB("admin").runCommand("replSetGetStatus"));
- assert.writeOK(bulk.execute({ w: 3, wtimeout: 25000 }));
+ assert.writeOK(bulk.execute({w: 3, wtimeout: 25000}));
print("replset2.js **** TEMP 1a ****");
- m1 = master.getDB(testDB).foo.findOne({ n: 1 });
+ m1 = master.getDB(testDB).foo.findOne({n: 1});
printjson(m1);
assert(m1['n'] == 1, "replset2.js Failed to save to master on multiple inserts");
print("replset2.js **** TEMP 1b ****");
- var s0 = slaves[0].getDB(testDB).foo.findOne({ n: 1 });
+ var s0 = slaves[0].getDB(testDB).foo.findOne({n: 1});
assert(s0['n'] == 1, "replset2.js Failed to replicate to slave 0 on multiple inserts");
- var s1 = slaves[1].getDB(testDB).foo.findOne({ n: 1 });
+ var s1 = slaves[1].getDB(testDB).foo.findOne({n: 1});
assert(s1['n'] == 1, "replset2.js Failed to replicate to slave 1 on multiple inserts");
// Test write concern with a simple insert
print("replset2.js **** Try inserting a single record ****");
master.getDB(testDB).dropDatabase();
- var options = { writeConcern: { w: 3, wtimeout: 10000 }};
- assert.writeOK(master.getDB(testDB).foo.insert({ n: 1 }, options));
+ var options = {
+ writeConcern: {w: 3, wtimeout: 10000}
+ };
+ assert.writeOK(master.getDB(testDB).foo.insert({n: 1}, options));
- m1 = master.getDB(testDB).foo.findOne({ n: 1 });
+ m1 = master.getDB(testDB).foo.findOne({n: 1});
printjson(m1);
assert(m1['n'] == 1, "replset2.js Failed to save to master");
- s0 = slaves[0].getDB(testDB).foo.findOne({ n: 1 });
+ s0 = slaves[0].getDB(testDB).foo.findOne({n: 1});
assert(s0['n'] == 1, "replset2.js Failed to replicate to slave 0");
- s1 = slaves[1].getDB(testDB).foo.findOne({ n: 1 });
+ s1 = slaves[1].getDB(testDB).foo.findOne({n: 1});
assert(s1['n'] == 1, "replset2.js Failed to replicate to slave 1");
print("replset2.js **** Try inserting many records ****");
try {
- var bigData = new Array(2000).toString();
- bulk = master.getDB(testDB).baz.initializeUnorderedBulkOp();
- for (var n = 0; n < 1000; n++) {
- bulk.insert({ n: n, data: bigData });
- }
- assert.writeOK(bulk.execute({ w: 3, wtimeout: 60000 }));
-
- print("replset2.js **** V1 ");
-
- var verifyReplication = function (nodeName, collection) {
- data = collection.findOne({ n: 1 });
- assert(data['n'] == 1, "replset2.js Failed to save to " + nodeName);
- data = collection.findOne({ n: 999 });
- assert(data['n'] == 999, "replset2.js Failed to save to " + nodeName);
- };
-
- print("replset2.js **** V2 ");
-
- verifyReplication("master", master.getDB(testDB).baz);
- verifyReplication("slave 0", slaves[0].getDB(testDB).baz);
- verifyReplication("slave 1", slaves[1].getDB(testDB).baz);
- }
- catch(e) {
- var errstr = "ERROR: " + e;
- errstr += "\nMaster oplog findOne:\n";
- errstr += tojson(
- master.getDB("local").oplog.rs.find().sort({"$natural":-1}).limit(1).next());
- errstr += "\nSlave 0 oplog findOne:\n";
- errstr += tojson(
- slaves[0].getDB("local").oplog.rs.find().sort({"$natural":-1}).limit(1).next());
- errstr += "\nSlave 1 oplog findOne:\n";
- errstr += tojson(
- slaves[1].getDB("local").oplog.rs.find().sort({"$natural":-1}).limit(1).next());
- assert(false, errstr);
+ var bigData = new Array(2000).toString();
+ bulk = master.getDB(testDB).baz.initializeUnorderedBulkOp();
+ for (var n = 0; n < 1000; n++) {
+ bulk.insert({n: n, data: bigData});
+ }
+ assert.writeOK(bulk.execute({w: 3, wtimeout: 60000}));
+
+ print("replset2.js **** V1 ");
+
+ var verifyReplication = function(nodeName, collection) {
+ data = collection.findOne({n: 1});
+ assert(data['n'] == 1, "replset2.js Failed to save to " + nodeName);
+ data = collection.findOne({n: 999});
+ assert(data['n'] == 999, "replset2.js Failed to save to " + nodeName);
+ };
+
+ print("replset2.js **** V2 ");
+
+ verifyReplication("master", master.getDB(testDB).baz);
+ verifyReplication("slave 0", slaves[0].getDB(testDB).baz);
+ verifyReplication("slave 1", slaves[1].getDB(testDB).baz);
+ } catch (e) {
+ var errstr = "ERROR: " + e;
+ errstr += "\nMaster oplog findOne:\n";
+ errstr +=
+ tojson(master.getDB("local").oplog.rs.find().sort({"$natural": -1}).limit(1).next());
+ errstr += "\nSlave 0 oplog findOne:\n";
+ errstr +=
+ tojson(slaves[0].getDB("local").oplog.rs.find().sort({"$natural": -1}).limit(1).next());
+ errstr += "\nSlave 1 oplog findOne:\n";
+ errstr +=
+ tojson(slaves[1].getDB("local").oplog.rs.find().sort({"$natural": -1}).limit(1).next());
+ assert(false, errstr);
}
replTest.stopSet(signal);
};
-doTest( 15 );
+doTest(15);
print("\nreplset2.js SUCCESS\n");
diff --git a/jstests/replsets/replset3.js b/jstests/replsets/replset3.js
index 6bb29a196ec..2121e395866 100644
--- a/jstests/replsets/replset3.js
+++ b/jstests/replsets/replset3.js
@@ -1,10 +1,10 @@
-var doTest = function (signal) {
+var doTest = function(signal) {
"use strict";
// Test replica set step down
// Replica set testing API
// Create a new replica set test. Specify set name and the number of nodes you want.
- var replTest = new ReplSetTest({ name: 'testSet', nodes: 3 });
+ var replTest = new ReplSetTest({name: 'testSet', nodes: 3});
// call startSet() to start each mongod in the replica set
// this returns a list of nodes
@@ -19,7 +19,7 @@ var doTest = function (signal) {
// Write some data to master
// NOTE: this test fails unless we write some data.
- master.getDB("foo").foo.insert({ a: 1 }, { writeConcern: { w: 3, wtimeout: 20000 }});
+ master.getDB("foo").foo.insert({a: 1}, {writeConcern: {w: 3, wtimeout: 20000}});
var phase = 1;
@@ -27,7 +27,7 @@ var doTest = function (signal) {
// Step down master. Note: this may close our connection!
try {
- master.getDB("admin").runCommand({ replSetStepDown: true, force: 1 });
+ master.getDB("admin").runCommand({replSetStepDown: true, force: 1});
} catch (err) {
print("caught: " + err + " on stepdown");
}
@@ -36,9 +36,8 @@ var doTest = function (signal) {
try {
var new_master = replTest.getPrimary();
- }
- catch (err) {
- throw ("Could not elect new master before timeout.");
+ } catch (err) {
+ throw("Could not elect new master before timeout.");
}
print(phase++);
@@ -48,25 +47,27 @@ var doTest = function (signal) {
print(phase++);
// Make sure that slaves are still up
- var result = new_master.getDB("admin").runCommand({ replSetGetStatus: 1 });
+ var result = new_master.getDB("admin").runCommand({replSetGetStatus: 1});
assert(result['ok'] == 1, "Could not verify that slaves were still up:" + result);
print(phase++);
var slaves = replTest.liveNodes.slaves;
- assert.soon(function () {
+ assert.soon(function() {
try {
- var res = slaves[0].getDB("admin").runCommand({ replSetGetStatus: 1 });
- } catch (err) { }
+ var res = slaves[0].getDB("admin").runCommand({replSetGetStatus: 1});
+ } catch (err) {
+ }
return res.myState == 2;
}, "Slave 0 state not ready.");
print(phase++);
- assert.soon(function () {
+ assert.soon(function() {
try {
- var res = slaves[1].getDB("admin").runCommand({ replSetGetStatus: 1 });
- } catch (err) { }
+ var res = slaves[1].getDB("admin").runCommand({replSetGetStatus: 1});
+ } catch (err) {
+ }
return res.myState == 2;
}, "Slave 1 state not ready.");
@@ -75,4 +76,4 @@ var doTest = function (signal) {
replTest.stopSet(15);
};
-doTest( 15 );
+doTest(15);
diff --git a/jstests/replsets/replset4.js b/jstests/replsets/replset4.js
index 8aecb715130..e6df067d1ea 100644
--- a/jstests/replsets/replset4.js
+++ b/jstests/replsets/replset4.js
@@ -1,7 +1,7 @@
-doTest = function (signal) {
+doTest = function(signal) {
// Test orphaned master steps down
- var replTest = new ReplSetTest({ name: 'testSet', nodes: 3 });
+ var replTest = new ReplSetTest({name: 'testSet', nodes: 3});
replTest.startSet();
replTest.initiate();
@@ -17,23 +17,20 @@ doTest = function (signal) {
print("replset4.js 1");
- assert.soon(
- function () {
- try {
- var result = master.getDB("admin").runCommand({ ismaster: 1 });
- return (result['ok'] == 1 && result['ismaster'] == false);
- } catch (e) {
- print("replset4.js caught " + e);
- return false;
- }
- },
- "Master fails to step down when orphaned."
- );
+ assert.soon(function() {
+ try {
+ var result = master.getDB("admin").runCommand({ismaster: 1});
+ return (result['ok'] == 1 && result['ismaster'] == false);
+ } catch (e) {
+ print("replset4.js caught " + e);
+ return false;
+ }
+ }, "Master fails to step down when orphaned.");
print("replset4.js worked, stopping");
replTest.stopSet(signal);
};
print("replset4.js");
-doTest( 15 );
+doTest(15);
print("replset4.js SUCCESS");
diff --git a/jstests/replsets/replset5.js b/jstests/replsets/replset5.js
index 717a0c8153b..c0aee6e1154 100644
--- a/jstests/replsets/replset5.js
+++ b/jstests/replsets/replset5.js
@@ -1,17 +1,20 @@
// rs test getlasterrordefaults
load("jstests/replsets/rslib.js");
-(function () {
+(function() {
"use strict";
// Test write concern defaults
- var replTest = new ReplSetTest({ name: 'testSet', nodes: 3 });
+ var replTest = new ReplSetTest({name: 'testSet', nodes: 3});
var nodes = replTest.startSet();
// Initiate set with default for write concern
var config = replTest.getReplSetConfig();
config.settings = {};
- config.settings.getLastErrorDefaults = { 'w': 3, 'wtimeout': 20000 };
+ config.settings.getLastErrorDefaults = {
+ 'w': 3,
+ 'wtimeout': 20000
+ };
config.settings.heartbeatTimeoutSecs = 15;
// Prevent node 2 from becoming primary, as we will attempt to set it to hidden later.
config.members[2].priority = 0;
@@ -24,18 +27,18 @@ load("jstests/replsets/rslib.js");
var testDB = "foo";
// Initial replication
- master.getDB("barDB").bar.save({ a: 1 });
+ master.getDB("barDB").bar.save({a: 1});
replTest.awaitReplication();
// These writes should be replicated immediately
var docNum = 5000;
var bulk = master.getDB(testDB).foo.initializeUnorderedBulkOp();
for (var n = 0; n < docNum; n++) {
- bulk.insert({ n: n });
+ bulk.insert({n: n});
}
// should use the configured last error defaults from above, that's what we're testing.
- //
+ //
// If you want to test failure, just add values for w and wtimeout (e.g. w=1)
// to the following command. This will override the default set above and
// prevent replication from happening in time for the count tests below.
diff --git a/jstests/replsets/replset6.js b/jstests/replsets/replset6.js
index a55c44aaea7..29adfa0ae3c 100644
--- a/jstests/replsets/replset6.js
+++ b/jstests/replsets/replset6.js
@@ -3,7 +3,7 @@
baseName = "jstests_replsets_replset6";
-var rt = new ReplSetTest({ name : "replset6tests" , nodes: 2 });
+var rt = new ReplSetTest({name: "replset6tests", nodes: 2});
var nodes = rt.startSet();
rt.initiate();
var m = rt.getPrimary();
@@ -11,42 +11,51 @@ rt.awaitSecondaryNodes();
var slaves = rt.liveNodes.slaves;
s = slaves[0];
s.setSlaveOk();
-admin = m.getDB( "admin" );
+admin = m.getDB("admin");
-debug = function( foo ) {}; // print( foo ); }
+debug = function(foo) {}; // print( foo ); }
// rename within db
-m.getDB( baseName ).one.save( { a: 1 } );
-assert.soon( function() { v = s.getDB( baseName ).one.findOne(); return v && 1 == v.a; } );
-
-assert.commandWorked( admin.runCommand( {renameCollection:"jstests_replsets_replset6.one", to:"jstests_replsets_replset6.two"} ) );
-assert.soon( function() {
- if ( -1 == s.getDB( baseName ).getCollectionNames().indexOf( "two" ) ) {
- debug( "no two coll" );
- debug( tojson( s.getDB( baseName ).getCollectionNames() ) );
- return false;
- }
- if ( !s.getDB( baseName ).two.findOne() ) {
- debug( "no two object" );
- return false;
- }
- return 1 == s.getDB( baseName ).two.findOne().a; });
-assert.eq( -1, s.getDB( baseName ).getCollectionNames().indexOf( "one" ) );
+m.getDB(baseName).one.save({a: 1});
+assert.soon(function() {
+ v = s.getDB(baseName).one.findOne();
+ return v && 1 == v.a;
+});
+
+assert.commandWorked(admin.runCommand(
+ {renameCollection: "jstests_replsets_replset6.one", to: "jstests_replsets_replset6.two"}));
+assert.soon(function() {
+ if (-1 == s.getDB(baseName).getCollectionNames().indexOf("two")) {
+ debug("no two coll");
+ debug(tojson(s.getDB(baseName).getCollectionNames()));
+ return false;
+ }
+ if (!s.getDB(baseName).two.findOne()) {
+ debug("no two object");
+ return false;
+ }
+ return 1 == s.getDB(baseName).two.findOne().a;
+});
+assert.eq(-1, s.getDB(baseName).getCollectionNames().indexOf("one"));
// rename to new db
first = baseName + "_first";
second = baseName + "_second";
-m.getDB( first ).one.save( { a: 1 } );
-assert.soon( function() { return s.getDB( first ).one.findOne() && 1 == s.getDB( first ).one.findOne().a; } );
-
-assert.commandWorked( admin.runCommand( {renameCollection:"jstests_replsets_replset6_first.one", to:"jstests_replsets_replset6_second.two"} ) );
-assert.soon( function() {
- return -1 != s.getDBNames().indexOf( second ) &&
- -1 != s.getDB( second ).getCollectionNames().indexOf( "two" ) &&
- s.getDB( second ).two.findOne() &&
- 1 == s.getDB( second ).two.findOne().a; } );
-assert.eq( -1, s.getDB( first ).getCollectionNames().indexOf( "one" ) );
-
+m.getDB(first).one.save({a: 1});
+assert.soon(function() {
+ return s.getDB(first).one.findOne() && 1 == s.getDB(first).one.findOne().a;
+});
+
+assert.commandWorked(admin.runCommand({
+ renameCollection: "jstests_replsets_replset6_first.one",
+ to: "jstests_replsets_replset6_second.two"
+}));
+assert.soon(function() {
+ return -1 != s.getDBNames().indexOf(second) &&
+ -1 != s.getDB(second).getCollectionNames().indexOf("two") &&
+ s.getDB(second).two.findOne() && 1 == s.getDB(second).two.findOne().a;
+});
+assert.eq(-1, s.getDB(first).getCollectionNames().indexOf("one"));
diff --git a/jstests/replsets/replset7.js b/jstests/replsets/replset7.js
index 1c63fd8f35f..8b13f2ed7e2 100644
--- a/jstests/replsets/replset7.js
+++ b/jstests/replsets/replset7.js
@@ -1,43 +1,42 @@
// test for SERVER-5040 - if documents move forward during an initial sync.
-var rt = new ReplSetTest( { name : "replset7tests" , nodes: 1 } );
+var rt = new ReplSetTest({name: "replset7tests", nodes: 1});
var nodes = rt.startSet();
rt.initiate();
var master = rt.getPrimary();
-var md = master.getDB( 'd' );
-var mdc = md[ 'c' ];
+var md = master.getDB('d');
+var mdc = md['c'];
// prep the data
var doccount = 5000;
var bulk = mdc.initializeUnorderedBulkOp();
-for( i = 0; i < doccount; ++i ) {
- bulk.insert( { _id:i, x:i } );
+for (i = 0; i < doccount; ++i) {
+ bulk.insert({_id: i, x: i});
}
assert.writeOK(bulk.execute());
-assert.commandWorked(mdc.ensureIndex( { x : 1 }, { unique: true } ));
+assert.commandWorked(mdc.ensureIndex({x: 1}, {unique: true}));
// add a secondary
var slave = rt.add();
rt.reInitiate();
-print ("initiation complete!");
-var sc = slave.getDB( 'd' )[ 'c' ];
+print("initiation complete!");
+var sc = slave.getDB('d')['c'];
slave.setSlaveOk();
// Wait for slave to start cloning.
-//assert.soon( function() { c = sc.find( { _id:1, x:1 } ); print( c ); return c > 0; } );
-
+// assert.soon( function() { c = sc.find( { _id:1, x:1 } ); print( c ); return c > 0; } );
// Move all documents to the end by growing it
bulk = mdc.initializeUnorderedBulkOp();
var bigStr = "ayayayayayayayayayayayayayayayayayayayayayayayayayayayayayayayayay" +
- "ayayayayayayayayayayayay";
+ "ayayayayayayayayayayayay";
for (i = 0; i < doccount; ++i) {
- bulk.find({ _id: i, x: i }).remove();
- bulk.insert({ _id: doccount + i, x: i, bigstring: bigStr });
+ bulk.find({_id: i, x: i}).remove();
+ bulk.insert({_id: doccount + i, x: i, bigstring: bigStr});
}
assert.writeOK(bulk.execute());
@@ -45,9 +44,8 @@ assert.writeOK(bulk.execute());
rt.awaitSecondaryNodes();
// Do we have an index?
-assert.eq(1, slave.getDB( 'd' )['c'].getIndexes().filter(function (doc) {
- return (doc.v === 1
- && JSON.stringify(doc.key) === JSON.stringify({x: 1})
- && doc.ns === 'd.c'
- && doc.name === 'x_1');
-}).length);
+assert.eq(1,
+ slave.getDB('d')['c'].getIndexes().filter(function(doc) {
+ return (doc.v === 1 && JSON.stringify(doc.key) === JSON.stringify({x: 1}) &&
+ doc.ns === 'd.c' && doc.name === 'x_1');
+ }).length);
diff --git a/jstests/replsets/replset8.js b/jstests/replsets/replset8.js
index ead9c50f066..69a16daa3a6 100644
--- a/jstests/replsets/replset8.js
+++ b/jstests/replsets/replset8.js
@@ -1,64 +1,64 @@
// test for SERVER-6303 - if documents move backward during an initial sync.
-var rt = new ReplSetTest( { name : "replset8tests" , nodes: 1 } );
+var rt = new ReplSetTest({name: "replset8tests", nodes: 1});
var nodes = rt.startSet();
rt.initiate();
var master = rt.getPrimary();
var bigstring = "a";
-var md = master.getDB( 'd' );
-var mdc = md[ 'c' ];
+var md = master.getDB('d');
+var mdc = md['c'];
// prep the data
// idea: create x documents of increasing size, then create x documents of size n.
-// delete first x documents. start initial sync (cloner). update all remaining
+// delete first x documents. start initial sync (cloner). update all remaining
// documents to be increasing size.
// this should result in the updates moving the docs backwards.
var doccount = 5000;
// Avoid empty extent issues
-mdc.insert( { _id:-1, x:"dummy" } );
+mdc.insert({_id: -1, x: "dummy"});
-print ("inserting bigstrings");
+print("inserting bigstrings");
var bulk = mdc.initializeUnorderedBulkOp();
-for( i = 0; i < doccount; ++i ) {
- bulk.insert( { _id:i, x:bigstring } );
+for (i = 0; i < doccount; ++i) {
+ bulk.insert({_id: i, x: bigstring});
bigstring += "a";
}
assert.writeOK(bulk.execute());
-print ("inserting x");
+print("inserting x");
bulk = mdc.initializeUnorderedBulkOp();
-for( i = doccount; i < doccount*2; ++i ) {
- bulk.insert( { _id:i, x:i } );
+for (i = doccount; i < doccount * 2; ++i) {
+ bulk.insert({_id: i, x: i});
}
assert.writeOK(bulk.execute());
-print ("deleting bigstrings");
+print("deleting bigstrings");
bulk = mdc.initializeUnorderedBulkOp();
-for( i = 0; i < doccount; ++i ) {
- bulk.find({ _id: i }).remove();
+for (i = 0; i < doccount; ++i) {
+ bulk.find({_id: i}).remove();
}
assert.writeOK(bulk.execute());
// add a secondary
var slave = rt.add();
rt.reInitiate();
-print ("initiation complete!");
-var sc = slave.getDB( 'd' )[ 'c' ];
+print("initiation complete!");
+var sc = slave.getDB('d')['c'];
slave.setSlaveOk();
sleep(25000);
-print ("updating documents backwards");
+print("updating documents backwards");
// Move all documents to the beginning by growing them to sizes that should
// fit the holes we made in phase 1
bulk = mdc.initializeUnorderedBulkOp();
-for (i = doccount*2; i > doccount; --i) {
- mdc.update( { _id:i, x:i }, { _id:i, x:bigstring } );
- bigstring = bigstring.slice(0, -1); // remove last char
+for (i = doccount * 2; i > doccount; --i) {
+ mdc.update({_id: i, x: i}, {_id: i, x: bigstring});
+ bigstring = bigstring.slice(0, -1); // remove last char
}
-print ("finished");
+print("finished");
// Wait for replication to catch up.
rt.awaitSecondaryNodes();
-assert.eq(doccount+1, slave.getDB( 'd' )['c'].count());
+assert.eq(doccount + 1, slave.getDB('d')['c'].count());
diff --git a/jstests/replsets/replset9.js b/jstests/replsets/replset9.js
index 8ae46863087..c1493908f12 100644
--- a/jstests/replsets/replset9.js
+++ b/jstests/replsets/replset9.js
@@ -1,47 +1,47 @@
-var rt = new ReplSetTest( { name : "replset9tests" , nodes: 1, oplogSize: 300 } );
+var rt = new ReplSetTest({name: "replset9tests", nodes: 1, oplogSize: 300});
var nodes = rt.startSet();
rt.initiate();
var master = rt.getPrimary();
var bigstring = Array(5000).toString();
-var md = master.getDB( 'd' );
-var mdc = md[ 'c' ];
+var md = master.getDB('d');
+var mdc = md['c'];
// idea: while cloner is running, update some docs and then immediately remove them.
// oplog will have ops referencing docs that no longer exist.
var doccount = 20000;
// Avoid empty extent issues
-mdc.insert( { _id:-1, x:"dummy" } );
+mdc.insert({_id: -1, x: "dummy"});
// Make this db big so that cloner takes a while.
-print ("inserting bigstrings");
+print("inserting bigstrings");
var bulk = mdc.initializeUnorderedBulkOp();
-for( i = 0; i < doccount; ++i ) {
- mdc.insert({ _id: i, x: bigstring });
+for (i = 0; i < doccount; ++i) {
+ mdc.insert({_id: i, x: bigstring});
}
assert.writeOK(bulk.execute());
// Insert some docs to update and remove
-print ("inserting x");
+print("inserting x");
bulk = mdc.initializeUnorderedBulkOp();
-for( i = doccount; i < doccount*2; ++i ) {
- bulk.insert({ _id: i, bs: bigstring, x: i });
+for (i = doccount; i < doccount * 2; ++i) {
+ bulk.insert({_id: i, bs: bigstring, x: i});
}
assert.writeOK(bulk.execute());
// add a secondary; start cloning
var slave = rt.add();
(function reinitiate() {
- var master = rt.nodes[0];
+ var master = rt.nodes[0];
var c = master.getDB("local")['system.replset'].findOne();
- var config = rt.getReplSetConfig();
+ var config = rt.getReplSetConfig();
config.version = c.version + 1;
- var admin = master.getDB("admin");
- var cmd = {};
- var cmdKey = 'replSetReconfig';
+ var admin = master.getDB("admin");
+ var cmd = {};
+ var cmdKey = 'replSetReconfig';
var timeout = timeout || 30000;
cmd[cmdKey] = config;
printjson(cmd);
@@ -53,21 +53,20 @@ var slave = rt.add();
}, "reinitiate replica set", timeout);
})();
-
-print ("initiation complete!");
-var sc = slave.getDB( 'd' )[ 'c' ];
+print("initiation complete!");
+var sc = slave.getDB('d')['c'];
slave.setSlaveOk();
master = rt.getPrimary();
-print ("updating and deleting documents");
+print("updating and deleting documents");
bulk = master.getDB('d')['c'].initializeUnorderedBulkOp();
-for (i = doccount*4; i > doccount; --i) {
- bulk.find({ _id: i }).update({ $inc: { x: 1 }});
- bulk.find({ _id: i }).remove();
- bulk.insert({ bs: bigstring });
+for (i = doccount * 4; i > doccount; --i) {
+ bulk.find({_id: i}).update({$inc: {x: 1}});
+ bulk.find({_id: i}).remove();
+ bulk.insert({bs: bigstring});
}
assert.writeOK(bulk.execute());
-print ("finished");
+print("finished");
// Wait for replication to catch up.
rt.awaitReplication(640000);
diff --git a/jstests/replsets/replsetadd_profile.js b/jstests/replsets/replsetadd_profile.js
index 810b6f5f144..641e7ca7cfd 100644
--- a/jstests/replsets/replsetadd_profile.js
+++ b/jstests/replsets/replsetadd_profile.js
@@ -8,8 +8,7 @@
// the only node is running at a profiling level of 2.
var collectionName = 'jstests_replsetadd_profile';
-var replTest = new ReplSetTest({name: 'ReplSetAddProfileTestSet',
- nodes: [{profile: 2}]});
+var replTest = new ReplSetTest({name: 'ReplSetAddProfileTestSet', nodes: [{profile: 2}]});
replTest.startSet();
replTest.initiate();
var master = replTest.getPrimary();
@@ -25,8 +24,9 @@ replTest.waitForState(replTest.nodes[1], ReplSetTest.State.SECONDARY, 60 * 1000)
replTest.awaitReplication();
var newNodeCollection = newNode.getDB('test').getCollection(collectionName);
-assert.eq(1, newNodeCollection.find({a: 1}).itcount(),
+assert.eq(1,
+ newNodeCollection.find({a: 1}).itcount(),
'expect documents to be present in slave after replication');
var signal = 15;
-replTest.stopSet( signal );
+replTest.stopSet(signal);
diff --git a/jstests/replsets/replsetarb2.js b/jstests/replsets/replsetarb2.js
index 16388c8b92b..8e1712749e4 100644
--- a/jstests/replsets/replsetarb2.js
+++ b/jstests/replsets/replsetarb2.js
@@ -3,16 +3,18 @@
(function() {
"use strict";
- var replTest = new ReplSetTest( {name: 'unicomplex', nodes: 3} );
+ var replTest = new ReplSetTest({name: 'unicomplex', nodes: 3});
var nodes = replTest.nodeList();
var conns = replTest.startSet();
- var r = replTest.initiate({"_id" : "unicomplex",
- "members" : [
- {"_id" : 0, "host" : nodes[0]},
- {"_id" : 1, "host" : nodes[1], "arbiterOnly" : true, "votes": 1},
- {"_id" : 2, "host" : nodes[2]}
- ]});
+ var r = replTest.initiate({
+ "_id": "unicomplex",
+ "members": [
+ {"_id": 0, "host": nodes[0]},
+ {"_id": 1, "host": nodes[1], "arbiterOnly": true, "votes": 1},
+ {"_id": 2, "host": nodes[2]}
+ ]
+ });
// Make sure we have a master
var master = replTest.getPrimary();
@@ -24,7 +26,7 @@
return res.myState === 7;
}, "Aribiter failed to initialize.");
- var result = conns[1].getDB("admin").runCommand({isMaster : 1});
+ var result = conns[1].getDB("admin").runCommand({isMaster: 1});
assert(result.arbiterOnly);
assert(!result.passive);
diff --git a/jstests/replsets/replsetfreeze.js b/jstests/replsets/replsetfreeze.js
index 3467f4bd98f..2629a78fe15 100644
--- a/jstests/replsets/replsetfreeze.js
+++ b/jstests/replsets/replsetfreeze.js
@@ -10,13 +10,12 @@
* 9: check we get a new master within 30 seconds
*/
-
var w = 0;
var wait = function(f) {
w++;
var n = 0;
while (!f()) {
- if( n % 4 == 0 )
+ if (n % 4 == 0)
print("toostale.js waiting " + w);
if (++n == 4) {
print("" + f);
@@ -27,26 +26,29 @@ var wait = function(f) {
};
var reconnect = function(a) {
- wait(function() {
- try {
- a.getDB("foo").bar.stats();
- return true;
- } catch(e) {
- print(e);
- return false;
- }
+ wait(function() {
+ try {
+ a.getDB("foo").bar.stats();
+ return true;
+ } catch (e) {
+ print(e);
+ return false;
+ }
});
};
-
print("1: initialize set");
-var replTest = new ReplSetTest( {name: 'unicomplex', nodes: 3} );
+var replTest = new ReplSetTest({name: 'unicomplex', nodes: 3});
var nodes = replTest.nodeList();
var conns = replTest.startSet();
-var config = {"_id" : "unicomplex", "members" : [
- {"_id" : 0, "host" : nodes[0] },
- {"_id" : 1, "host" : nodes[1] },
- {"_id" : 2, "host" : nodes[2], "arbiterOnly" : true}]};
+var config = {
+ "_id": "unicomplex",
+ "members": [
+ {"_id": 0, "host": nodes[0]},
+ {"_id": 1, "host": nodes[1]},
+ {"_id": 2, "host": nodes[2], "arbiterOnly": true}
+ ]
+};
var r = replTest.initiate(config);
var master = replTest.getPrimary();
var secondary = replTest.getSecondary();
@@ -55,55 +57,46 @@ replTest.awaitSecondaryNodes();
print("2: step down m1");
try {
- master.getDB("admin").runCommand({replSetStepDown : 1, force : 1});
-}
-catch(e) {
- print(e);
+ master.getDB("admin").runCommand({replSetStepDown: 1, force: 1});
+} catch (e) {
+ print(e);
}
reconnect(master);
-printjson( master.getDB("admin").runCommand({replSetGetStatus: 1}) );
+printjson(master.getDB("admin").runCommand({replSetGetStatus: 1}));
print("3: freeze set for 30 seconds");
var start = (new Date()).getTime();
-assert.commandWorked(secondary.getDB("admin").runCommand({replSetFreeze : 30}));
-assert.commandWorked(master.getDB("admin").runCommand({replSetFreeze : 30}));
-
+assert.commandWorked(secondary.getDB("admin").runCommand({replSetFreeze: 30}));
+assert.commandWorked(master.getDB("admin").runCommand({replSetFreeze: 30}));
print("4: check no one is master for 30 seconds");
-while ((new Date()).getTime() - start < (28 * 1000) ) { // we need less 30 since it takes some time to return... hacky
- var result = master.getDB("admin").runCommand({isMaster:1});
- assert.eq(result.ismaster, false);
- assert.eq(result.primary, undefined);
- sleep(1000);
+while ((new Date()).getTime() - start <
+ (28 * 1000)) { // we need less 30 since it takes some time to return... hacky
+ var result = master.getDB("admin").runCommand({isMaster: 1});
+ assert.eq(result.ismaster, false);
+ assert.eq(result.primary, undefined);
+ sleep(1000);
}
-
print("5: check for new master");
master = replTest.getPrimary();
-
print("6: step down new master");
try {
- master.getDB("admin").runCommand({replSetStepDown : 1, force : 1});
-}
-catch(e) {
- print(e);
+ master.getDB("admin").runCommand({replSetStepDown: 1, force: 1});
+} catch (e) {
+ print(e);
}
reconnect(master);
-
print("7: freeze for 30 seconds");
-master.getDB("admin").runCommand({replSetFreeze : 30});
+master.getDB("admin").runCommand({replSetFreeze: 30});
sleep(1000);
-
print("8: unfreeze");
-master.getDB("admin").runCommand({replSetFreeze : 0});
-
+master.getDB("admin").runCommand({replSetFreeze: 0});
print("9: check we get a new master within 30 seconds");
master = replTest.getPrimary();
-
-replTest.stopSet( 15 );
-
+replTest.stopSet(15);
diff --git a/jstests/replsets/replsethostnametrim.js b/jstests/replsets/replsethostnametrim.js
index c303ecdea0d..51edf551f96 100644
--- a/jstests/replsets/replsethostnametrim.js
+++ b/jstests/replsets/replsethostnametrim.js
@@ -1,6 +1,6 @@
// try reconfiguring with space at the end of the host:port
-var replTest = new ReplSetTest({ name: 'testSet', nodes: 1 });
+var replTest = new ReplSetTest({name: 'testSet', nodes: 1});
var nodes = replTest.startSet();
replTest.initiate();
@@ -9,13 +9,13 @@ var config = master.getDB("local").system.replset.findOne();
config.version++;
var origHost = config.members[0].host;
config.members[0].host = origHost + " ";
-var result = master.adminCommand({replSetReconfig : config});
+var result = master.adminCommand({replSetReconfig: config});
assert.eq(result.ok, 1, tojson(result));
-//print("current (bad) config:"); printjson(config);
+// print("current (bad) config:"); printjson(config);
-//check new config to make sure it doesn't have a space in the hostname
+// check new config to make sure it doesn't have a space in the hostname
config = master.getDB("local").system.replset.findOne();
assert.eq(origHost, config.members[0].host);
-//print("current (good) config:"); printjson(config);
+// print("current (good) config:"); printjson(config);
replTest.stopSet(); \ No newline at end of file
diff --git a/jstests/replsets/replsetprio1.js b/jstests/replsets/replsetprio1.js
index 37da3c6474d..d71c10383ea 100644
--- a/jstests/replsets/replsetprio1.js
+++ b/jstests/replsets/replsetprio1.js
@@ -2,15 +2,18 @@
(function() {
"use strict";
- var replTest = new ReplSetTest( {name: 'testSet', nodes: 3} );
+ var replTest = new ReplSetTest({name: 'testSet', nodes: 3});
var nodenames = replTest.nodeList();
var nodes = replTest.startSet();
- replTest.initiate({"_id" : "testSet",
- "members" : [
- {"_id" : 0, "host" : nodenames[0], "priority" : 1},
- {"_id" : 1, "host" : nodenames[1], "priority" : 2},
- {"_id" : 2, "host" : nodenames[2], "priority" : 3}]});
+ replTest.initiate({
+ "_id": "testSet",
+ "members": [
+ {"_id": 0, "host": nodenames[0], "priority": 1},
+ {"_id": 1, "host": nodenames[1], "priority": 2},
+ {"_id": 2, "host": nodenames[2], "priority": 3}
+ ]
+ });
// 2 should be master (give this a while to happen, as 0 will be elected, then demoted)
replTest.waitForState(nodes[2], ReplSetTest.State.PRIMARY, 120000);
@@ -20,15 +23,15 @@
// 1 should eventually be master
replTest.waitForState(nodes[1], ReplSetTest.State.PRIMARY, 60000);
-
+
// do some writes on 1
var master = replTest.getPrimary();
- for (var i=0; i<1000; i++) {
- master.getDB("foo").bar.insert({i:i});
+ for (var i = 0; i < 1000; i++) {
+ master.getDB("foo").bar.insert({i: i});
}
- for (i=0; i<1000; i++) {
- master.getDB("bar").baz.insert({i:i});
+ for (i = 0; i < 1000; i++) {
+ master.getDB("bar").baz.insert({i: i});
}
// bring 2 back up, 2 should wait until caught up and then become master
@@ -37,8 +40,8 @@
// make sure nothing was rolled back
master = replTest.getPrimary();
- for (i=0; i<1000; i++) {
- assert(master.getDB("foo").bar.findOne({i:i}) != null, 'checking '+i);
- assert(master.getDB("bar").baz.findOne({i:i}) != null, 'checking '+i);
+ for (i = 0; i < 1000; i++) {
+ assert(master.getDB("foo").bar.findOne({i: i}) != null, 'checking ' + i);
+ assert(master.getDB("bar").baz.findOne({i: i}) != null, 'checking ' + i);
}
}());
diff --git a/jstests/replsets/replsetrestart1.js b/jstests/replsets/replsetrestart1.js
index cc3281f0af8..870cdcfe233 100644
--- a/jstests/replsets/replsetrestart1.js
+++ b/jstests/replsets/replsetrestart1.js
@@ -16,14 +16,14 @@
assert.eq(c1._id, c2._id, '_id same');
for (var i in c1.members) {
- assert(c2.members[i] !== undefined, 'field '+i+' exists in both configs');
+ assert(c2.members[i] !== undefined, 'field ' + i + ' exists in both configs');
assert.eq(c1.members[i]._id, c2.members[i]._id, 'id is equal in both configs');
assert.eq(c1.members[i].host, c2.members[i].host, 'host is equal in both configs');
}
};
// Create a new replica set test. Specify set name and the number of nodes you want.
- var replTest = new ReplSetTest( {name: 'testSet', nodes: 3} );
+ var replTest = new ReplSetTest({name: 'testSet', nodes: 3});
// call startSet() to start each mongod in the replica set
// this returns a list of nodes
@@ -43,23 +43,23 @@
var config1 = master.getDB("local").system.replset.findOne();
// Now we're going to shut down all nodes
- var mId = replTest.getNodeId( master );
+ var mId = replTest.getNodeId(master);
var s1 = replTest.liveNodes.slaves[0];
var s1Id = replTest.getNodeId(s1);
var s2 = replTest.liveNodes.slaves[1];
var s2Id = replTest.getNodeId(s2);
- replTest.stop( s1Id );
- replTest.stop( s2Id );
+ replTest.stop(s1Id);
+ replTest.stop(s2Id);
replTest.waitForState(s1, ReplSetTest.State.DOWN);
replTest.waitForState(s2, ReplSetTest.State.DOWN);
- replTest.stop( mId );
+ replTest.stop(mId);
// Now let's restart these nodes
- replTest.restart( mId );
- replTest.restart( s1Id );
- replTest.restart( s2Id );
+ replTest.restart(mId);
+ replTest.restart(s1Id);
+ replTest.restart(s2Id);
// Make sure that a new master comes up
master = replTest.getPrimary();
diff --git a/jstests/replsets/restore_term.js b/jstests/replsets/restore_term.js
index 0a0af27c08f..9e679fb67c7 100644
--- a/jstests/replsets/restore_term.js
+++ b/jstests/replsets/restore_term.js
@@ -10,59 +10,59 @@ load("jstests/replsets/rslib.js");
// storage engines.
// @tags: [requires_persistence]
(function() {
-"use strict";
+ "use strict";
-function getCurrentTerm(primary) {
- var res = primary.adminCommand({replSetGetStatus: 1});
- assert.commandWorked(res);
- return res.term;
-}
+ function getCurrentTerm(primary) {
+ var res = primary.adminCommand({replSetGetStatus: 1});
+ assert.commandWorked(res);
+ return res.term;
+ }
-var name = "restore_term";
-var rst = new ReplSetTest({name: name, nodes: 2});
+ var name = "restore_term";
+ var rst = new ReplSetTest({name: name, nodes: 2});
-rst.startSet();
-// Initiate the replset in protocol version 1.
-var conf = rst.getReplSetConfig();
-conf.settings = conf.settings || { };
-conf.settings.electionTimeoutMillis = 2000;
-conf.protocolVersion = 1;
-rst.initiate(conf);
-rst.awaitSecondaryNodes();
+ rst.startSet();
+ // Initiate the replset in protocol version 1.
+ var conf = rst.getReplSetConfig();
+ conf.settings = conf.settings || {};
+ conf.settings.electionTimeoutMillis = 2000;
+ conf.protocolVersion = 1;
+ rst.initiate(conf);
+ rst.awaitSecondaryNodes();
-var primary = rst.getPrimary();
-var primaryColl = primary.getDB("test").coll;
+ var primary = rst.getPrimary();
+ var primaryColl = primary.getDB("test").coll;
-// Current term may be greater than 1 if election race happens.
-var firstSuccessfulTerm = getCurrentTerm(primary);
-assert.gte(firstSuccessfulTerm, 1);
-assert.writeOK(primaryColl.insert({x: 1}, {writeConcern: {w: "majority"}}));
-assert.eq(getCurrentTerm(primary), firstSuccessfulTerm);
+ // Current term may be greater than 1 if election race happens.
+ var firstSuccessfulTerm = getCurrentTerm(primary);
+ assert.gte(firstSuccessfulTerm, 1);
+ assert.writeOK(primaryColl.insert({x: 1}, {writeConcern: {w: "majority"}}));
+ assert.eq(getCurrentTerm(primary), firstSuccessfulTerm);
-// Check that the insert op has the initial term.
-var latestOp = getLatestOp(primary);
-assert.eq(latestOp.op, "i");
-assert.eq(latestOp.t, firstSuccessfulTerm);
+ // Check that the insert op has the initial term.
+ var latestOp = getLatestOp(primary);
+ assert.eq(latestOp.op, "i");
+ assert.eq(latestOp.t, firstSuccessfulTerm);
-// Step down to increase the term.
-try {
- var res = primary.adminCommand({replSetStepDown: 0});
-} catch (err) {
- print("caught: " + err + " on stepdown");
-}
-rst.awaitSecondaryNodes();
-// The secondary became the new primary now with a higher term.
-// Since there's only one secondary who may run for election, the new term is higher by 1.
-assert.eq(getCurrentTerm(rst.getPrimary()), firstSuccessfulTerm + 1);
+ // Step down to increase the term.
+ try {
+ var res = primary.adminCommand({replSetStepDown: 0});
+ } catch (err) {
+ print("caught: " + err + " on stepdown");
+ }
+ rst.awaitSecondaryNodes();
+ // The secondary became the new primary now with a higher term.
+ // Since there's only one secondary who may run for election, the new term is higher by 1.
+ assert.eq(getCurrentTerm(rst.getPrimary()), firstSuccessfulTerm + 1);
-// Restart the replset and verify the term is the same.
-rst.stopSet(null /* signal */, true /* forRestart */);
-rst.startSet({restart: true});
-rst.awaitSecondaryNodes();
-primary = rst.getPrimary();
+ // Restart the replset and verify the term is the same.
+ rst.stopSet(null /* signal */, true /* forRestart */);
+ rst.startSet({restart: true});
+ rst.awaitSecondaryNodes();
+ primary = rst.getPrimary();
-assert.eq(primary.getDB("test").coll.find().itcount(), 1);
-// After restart, the new primary stands up with the newer term.
-assert.gte(getCurrentTerm(primary), firstSuccessfulTerm + 1);
+ assert.eq(primary.getDB("test").coll.find().itcount(), 1);
+ // After restart, the new primary stands up with the newer term.
+ assert.gte(getCurrentTerm(primary), firstSuccessfulTerm + 1);
})();
diff --git a/jstests/replsets/resync.js b/jstests/replsets/resync.js
index 032789649ba..ffcd0325951 100755..100644
--- a/jstests/replsets/resync.js
+++ b/jstests/replsets/resync.js
@@ -12,12 +12,14 @@
var nodes = replTest.nodeList();
var conns = replTest.startSet();
- var r = replTest.initiate({ "_id": "resync",
- "members": [
- {"_id": 0, "host": nodes[0], priority:1},
- {"_id": 1, "host": nodes[1], priority:0},
- {"_id": 2, "host": nodes[2], arbiterOnly:true}]
- });
+ var r = replTest.initiate({
+ "_id": "resync",
+ "members": [
+ {"_id": 0, "host": nodes[0], priority: 1},
+ {"_id": 1, "host": nodes[1], priority: 0},
+ {"_id": 2, "host": nodes[2], arbiterOnly: true}
+ ]
+ });
var a_conn = conns[0];
// Make sure we have a master, and it is conns[0]
@@ -31,14 +33,14 @@
var BID = replTest.getNodeId(b_conn);
// create an oplog entry with an insert
- assert.writeOK( A.foo.insert({ x: 1 }, { writeConcern: { w: 2, wtimeout: 60000 }}));
+ assert.writeOK(A.foo.insert({x: 1}, {writeConcern: {w: 2, wtimeout: 60000}}));
assert.eq(B.foo.findOne().x, 1);
-
+
// run resync and wait for it to happen
- assert.commandWorked(b_conn.getDB("admin").runCommand({resync:1}));
+ assert.commandWorked(b_conn.getDB("admin").runCommand({resync: 1}));
replTest.awaitReplication();
replTest.awaitSecondaryNodes();
-
+
assert.eq(B.foo.findOne().x, 1);
replTest.stop(BID);
@@ -47,9 +49,8 @@
try {
// Collection scan to determine if the oplog entry from the first insert has been
// deleted yet.
- return oplog.find( { "o.x" : 1 } ).sort( { $natural : 1 } ).limit(10).itcount() == 0;
- }
- catch (except) {
+ return oplog.find({"o.x": 1}).sort({$natural: 1}).limit(10).itcount() == 0;
+ } catch (except) {
// An error is expected in the case that capped deletions blow away the position of the
// collection scan during a yield. In this case, we just try again.
var errorRegex = /CappedPositionLost/;
@@ -58,40 +59,39 @@
}
}
- // Make sure the oplog has rolled over on the primary and secondary that is up,
+ // Make sure the oplog has rolled over on the primary and secondary that is up,
// so when we bring up the other replica it is "too stale"
- for ( var cycleNumber = 0; cycleNumber < 10; cycleNumber++ ) {
+ for (var cycleNumber = 0; cycleNumber < 10; cycleNumber++) {
// insert enough to cycle oplog
var bulk = A.foo.initializeUnorderedBulkOp();
- for (var i=2; i < 10000; i++) {
- bulk.insert({x:i});
+ for (var i = 2; i < 10000; i++) {
+ bulk.insert({x: i});
}
// wait for secondary to also have its oplog cycle
- assert.writeOK(bulk.execute({ w: 1, wtimeout : 60000 }));
+ assert.writeOK(bulk.execute({w: 1, wtimeout: 60000}));
- if ( hasCycled() )
+ if (hasCycled())
break;
}
- assert( hasCycled() );
+ assert(hasCycled());
// bring node B and it will enter recovery mode because its newest oplog entry is too old
replTest.restart(BID);
-
+
// check that it is in recovery mode
assert.soon(function() {
try {
var result = b_conn.getDB("admin").runCommand({replSetGetStatus: 1});
return (result.members[1].stateStr === "RECOVERING");
- }
- catch ( e ) {
- print( e );
+ } catch (e) {
+ print(e);
}
}, "node didn't enter RECOVERING state");
// run resync and wait for it to happen
- assert.commandWorked(b_conn.getDB("admin").runCommand({resync:1}));
+ assert.commandWorked(b_conn.getDB("admin").runCommand({resync: 1}));
replTest.awaitReplication();
replTest.awaitSecondaryNodes();
assert.eq(B.foo.findOne().x, 1);
diff --git a/jstests/replsets/resync_with_write_load.js b/jstests/replsets/resync_with_write_load.js
index 392c7254bc5..07e0dc34084 100644
--- a/jstests/replsets/resync_with_write_load.js
+++ b/jstests/replsets/resync_with_write_load.js
@@ -1,8 +1,8 @@
/**
- * This test creates a 2 node replica set and then puts load on the primary with writes during
+ * This test creates a 2 node replica set and then puts load on the primary with writes during
* the resync in order to verify that all phases of the initial sync work correctly.
- *
- * We cannot test each phase of the initial sync directly but by providing constant writes we can
+ *
+ * We cannot test each phase of the initial sync directly but by providing constant writes we can
* assume that each individual phase will have data to work with, and therefore tested.
*/
var testName = "resync_with_write_load";
@@ -10,12 +10,14 @@ var replTest = new ReplSetTest({name: testName, nodes: 3, oplogSize: 100});
var nodes = replTest.nodeList();
var conns = replTest.startSet();
-var config = { "_id": testName,
- "members": [
- {"_id": 0, "host": nodes[0], priority:4},
- {"_id": 1, "host": nodes[1]},
- {"_id": 2, "host": nodes[2]}]
- };
+var config = {
+ "_id": testName,
+ "members": [
+ {"_id": 0, "host": nodes[0], priority: 4},
+ {"_id": 1, "host": nodes[1]},
+ {"_id": 2, "host": nodes[2]}
+ ]
+};
var r = replTest.initiate(config);
replTest.waitForState(replTest.nodes[0], ReplSetTest.State.PRIMARY, 60 * 1000);
// Make sure we have a master
@@ -33,44 +35,43 @@ assert(master == conns[0], "conns[0] assumed to be master");
assert(a_conn.host == master.host);
// create an oplog entry with an insert
-assert.writeOK( A.foo.insert({ x: 1 }, { writeConcern: { w: 1, wtimeout: 60000 }}));
+assert.writeOK(A.foo.insert({x: 1}, {writeConcern: {w: 1, wtimeout: 60000}}));
replTest.stop(BID);
print("******************** starting load for 30 secs *********************");
var work = function() {
- print("starting loadgen");
- var start=new Date().getTime();
-
- assert.writeOK(db.timeToStartTrigger.insert({_id:1}));
+ print("starting loadgen");
+ var start = new Date().getTime();
- while (true) {
- for (x=0; x < 100; x++) {
- db["a" + x].insert({a:x});
- }
-
- var runTime = (new Date().getTime() - start);
- if (runTime > 30000)
- break;
- else if (runTime < 5000) // back-off more during first 2 seconds
- sleep(50);
- else
- sleep(1);
+ assert.writeOK(db.timeToStartTrigger.insert({_id: 1}));
- }
- print("finshing loadgen");
- };
-//insert enough that resync node has to go through oplog replay in each step
+ while (true) {
+ for (x = 0; x < 100; x++) {
+ db["a" + x].insert({a: x});
+ }
+
+ var runTime = (new Date().getTime() - start);
+ if (runTime > 30000)
+ break;
+ else if (runTime < 5000) // back-off more during first 2 seconds
+ sleep(50);
+ else
+ sleep(1);
+ }
+ print("finshing loadgen");
+};
+// insert enough that resync node has to go through oplog replay in each step
var loadGen = startParallelShell(work, replTest.ports[0]);
// wait for document to appear to continue
assert.soon(function() {
try {
return 1 == master.getDB("test")["timeToStartTrigger"].count();
- } catch ( e ) {
- print( e );
+ } catch (e) {
+ print(e);
return false;
}
-}, "waited too long for start trigger", 90 * 1000 /* 90 secs */ );
+}, "waited too long for start trigger", 90 * 1000 /* 90 secs */);
print("*************** STARTING node without data ***************");
replTest.start(BID);
@@ -79,8 +80,8 @@ assert.soon(function() {
try {
var result = b_conn.getDB("admin").runCommand({replSetGetStatus: 1});
return true;
- } catch ( e ) {
- print( e );
+ } catch (e) {
+ print(e);
return false;
}
}, "node didn't come up");
@@ -97,8 +98,8 @@ try {
} catch (e) {
var aDBHash = A.runCommand("dbhash");
var bDBHash = B.runCommand("dbhash");
- assert.eq(aDBHash.md5, bDBHash.md5,
- "hashes differ: " + tojson(aDBHash) + " to " + tojson(bDBHash));
+ assert.eq(
+ aDBHash.md5, bDBHash.md5, "hashes differ: " + tojson(aDBHash) + " to " + tojson(bDBHash));
}
replTest.stopSet();
diff --git a/jstests/replsets/rollback.js b/jstests/replsets/rollback.js
index 6f8f0154481..d0c162ea98d 100644
--- a/jstests/replsets/rollback.js
+++ b/jstests/replsets/rollback.js
@@ -19,7 +19,7 @@
*/
load("jstests/replsets/rslib.js");
-(function () {
+(function() {
"use strict";
// helper function for verifying contents at the end of the test
var checkFinalResults = function(db) {
@@ -32,15 +32,17 @@ load("jstests/replsets/rslib.js");
assert.eq(8, x[4].q);
};
- var replTest = new ReplSetTest({ name: 'unicomplex', nodes: 3, oplogSize: 1, useBridge: true });
+ var replTest = new ReplSetTest({name: 'unicomplex', nodes: 3, oplogSize: 1, useBridge: true});
var nodes = replTest.nodeList();
var conns = replTest.startSet();
- var r = replTest.initiate({ "_id": "unicomplex",
+ var r = replTest.initiate({
+ "_id": "unicomplex",
"members": [
- { "_id": 0, "host": nodes[0], "priority": 3 },
- { "_id": 1, "host": nodes[1] },
- { "_id": 2, "host": nodes[2], arbiterOnly: true}]
+ {"_id": 0, "host": nodes[0], "priority": 3},
+ {"_id": 1, "host": nodes[1]},
+ {"_id": 2, "host": nodes[2], arbiterOnly: true}
+ ]
});
// Make sure we have a master
@@ -63,18 +65,18 @@ load("jstests/replsets/rslib.js");
if (new Date() % 2 == 0) {
jsTest.log("ROLLING OPLOG AS PART OF TEST (we only do this sometimes)");
var pass = 1;
- var first = a.getSisterDB("local").oplog.rs.find().sort({ $natural: 1 }).limit(1)[0];
- a.roll.insert({ x: 1 });
+ var first = a.getSisterDB("local").oplog.rs.find().sort({$natural: 1}).limit(1)[0];
+ a.roll.insert({x: 1});
while (1) {
var bulk = a.roll.initializeUnorderedBulkOp();
for (var i = 0; i < 1000; i++) {
- bulk.find({}).update({ $inc: { x: 1 }});
+ bulk.find({}).update({$inc: {x: 1}});
}
- // unlikely secondary isn't keeping up, but let's avoid possible intermittent
+ // unlikely secondary isn't keeping up, but let's avoid possible intermittent
// issues with that.
- assert.writeOK(bulk.execute({ w: 2 }));
+ assert.writeOK(bulk.execute({w: 2}));
- var op = a.getSisterDB("local").oplog.rs.find().sort({ $natural: 1 }).limit(1)[0];
+ var op = a.getSisterDB("local").oplog.rs.find().sort({$natural: 1}).limit(1)[0];
if (tojson(op.h) != tojson(first.h)) {
printjson(op);
printjson(first);
@@ -83,14 +85,13 @@ load("jstests/replsets/rslib.js");
pass++;
}
jsTest.log("PASSES FOR OPLOG ROLL: " + pass);
- }
- else {
+ } else {
jsTest.log("NO ROLL");
}
- assert.writeOK(a.bar.insert({ q: 1, a: "foo" }));
- assert.writeOK(a.bar.insert({ q: 2, a: "foo", x: 1 }));
- assert.writeOK(a.bar.insert({ q: 3, bb: 9, a: "foo" }, { writeConcern: { w: 2 } }));
+ assert.writeOK(a.bar.insert({q: 1, a: "foo"}));
+ assert.writeOK(a.bar.insert({q: 2, a: "foo", x: 1}));
+ assert.writeOK(a.bar.insert({q: 3, bb: 9, a: "foo"}, {writeConcern: {w: 2}}));
assert.eq(a.bar.count(), 3, "a.count");
assert.eq(b.bar.count(), 3, "b.count");
@@ -99,11 +100,17 @@ load("jstests/replsets/rslib.js");
conns[0].disconnect(conns[2]);
// Wait for election and drain mode to finish on node 1.
- assert.soon(function () { try { return B.isMaster().ismaster; } catch(e) { return false; } });
+ assert.soon(function() {
+ try {
+ return B.isMaster().ismaster;
+ } catch (e) {
+ return false;
+ }
+ });
// These 97 documents will be rolled back eventually.
for (var i = 4; i <= 100; i++) {
- assert.writeOK(b.bar.insert({ q: i }));
+ assert.writeOK(b.bar.insert({q: i}));
}
assert.eq(100, b.bar.count(), "u.count");
@@ -113,13 +120,25 @@ load("jstests/replsets/rslib.js");
conns[0].reconnect(conns[2]);
jsTest.log("*************** B ****************");
- assert.soon(function () { try { return !B.isMaster().ismaster; } catch(e) { return false; } });
+ assert.soon(function() {
+ try {
+ return !B.isMaster().ismaster;
+ } catch (e) {
+ return false;
+ }
+ });
jsTest.log("*************** A ****************");
- assert.soon(function () { try { return A.isMaster().ismaster; } catch(e) { return false; } });
+ assert.soon(function() {
+ try {
+ return A.isMaster().ismaster;
+ } catch (e) {
+ return false;
+ }
+ });
assert(a.bar.count() == 3, "t is 3");
- assert.writeOK(a.bar.insert({ q: 7 }));
- assert.writeOK(a.bar.insert({ q: 8 }));
+ assert.writeOK(a.bar.insert({q: 7}));
+ assert.writeOK(a.bar.insert({q: 8}));
// A is 1 2 3 7 8
// B is 1 2 3 4 5 6 ... 100
@@ -138,8 +157,10 @@ load("jstests/replsets/rslib.js");
var connectionsCreatedOnPrimaryAfterRollback = a.serverStatus().connections.totalCreated;
var connectionsCreatedOnPrimaryDuringRollback =
connectionsCreatedOnPrimaryAfterRollback - connectionsCreatedOnPrimaryBeforeRollback;
- jsTest.log('connections created during rollback = ' + connectionsCreatedOnPrimaryDuringRollback);
- assert.lt(connectionsCreatedOnPrimaryDuringRollback, 50,
+ jsTest.log('connections created during rollback = ' +
+ connectionsCreatedOnPrimaryDuringRollback);
+ assert.lt(connectionsCreatedOnPrimaryDuringRollback,
+ 50,
'excessive number of connections made by secondary to primary during rollback');
replTest.stopSet(15);
diff --git a/jstests/replsets/rollback2.js b/jstests/replsets/rollback2.js
index 3334eada6ce..911a9d3e128 100644
--- a/jstests/replsets/rollback2.js
+++ b/jstests/replsets/rollback2.js
@@ -2,7 +2,7 @@
* Basic test of a succesful replica set rollback for CRUD operations.
*
* This tests sets up a 3 node set, data-bearing nodes A and B and an arbiter.
- *
+ *
* 1. A is elected PRIMARY and receives several writes, which are propagated to B.
* 2. A is isolated from the rest of the set and B is elected PRIMARY.
* 3. B receives several operations, which will later be undone during rollback.
@@ -13,7 +13,7 @@
*/
load("jstests/replsets/rslib.js");
-(function () {
+(function() {
"use strict";
// helper function for verifying contents at the end of the test
var checkFinalResults = function(db) {
@@ -28,16 +28,18 @@ load("jstests/replsets/rslib.js");
};
var name = "rollback2js";
- var replTest = new ReplSetTest({ name: name, nodes: 3, useBridge: true });
+ var replTest = new ReplSetTest({name: name, nodes: 3, useBridge: true});
var nodes = replTest.nodeList();
var conns = replTest.startSet();
- replTest.initiate({ "_id": name,
- "members": [
- { "_id": 0, "host": nodes[0], priority: 3 },
- { "_id": 1, "host": nodes[1] },
- { "_id": 2, "host": nodes[2], arbiterOnly: true}
- ]});
+ replTest.initiate({
+ "_id": name,
+ "members": [
+ {"_id": 0, "host": nodes[0], priority: 3},
+ {"_id": 1, "host": nodes[1]},
+ {"_id": 2, "host": nodes[2], arbiterOnly: true}
+ ]
+ });
// Make sure we have a master and that that master is node A
replTest.waitForState(replTest.nodes[0], ReplSetTest.State.PRIMARY, 60 * 1000);
@@ -56,17 +58,17 @@ load("jstests/replsets/rslib.js");
var b = b_conn.getDB("foo");
// initial data for both nodes
- assert.writeOK(a.bar.insert({ q:0}));
- assert.writeOK(a.bar.insert({ q: 1, a: "foo" }));
- assert.writeOK(a.bar.insert({ q: 2, a: "foo", x: 1 }));
- assert.writeOK(a.bar.insert({ q: 3, bb: 9, a: "foo" }));
- assert.writeOK(a.bar.insert({ q: 40, a: 1 }));
- assert.writeOK(a.bar.insert({ q: 40, a: 2 }));
- assert.writeOK(a.bar.insert({ q: 70, txt: 'willremove' }));
- a.createCollection("kap", { capped: true, size: 5000 });
- assert.writeOK(a.kap.insert({ foo: 1 }));
+ assert.writeOK(a.bar.insert({q: 0}));
+ assert.writeOK(a.bar.insert({q: 1, a: "foo"}));
+ assert.writeOK(a.bar.insert({q: 2, a: "foo", x: 1}));
+ assert.writeOK(a.bar.insert({q: 3, bb: 9, a: "foo"}));
+ assert.writeOK(a.bar.insert({q: 40, a: 1}));
+ assert.writeOK(a.bar.insert({q: 40, a: 2}));
+ assert.writeOK(a.bar.insert({q: 70, txt: 'willremove'}));
+ a.createCollection("kap", {capped: true, size: 5000});
+ assert.writeOK(a.kap.insert({foo: 1}));
// going back to empty on capped is a special case and must be tested
- a.createCollection("kap2", { capped: true, size: 5501 });
+ a.createCollection("kap2", {capped: true, size: 5501});
replTest.awaitReplication();
var timeout;
@@ -78,40 +80,52 @@ load("jstests/replsets/rslib.js");
// isolate A and wait for B to become master
conns[0].disconnect(conns[1]);
conns[0].disconnect(conns[2]);
- assert.soon(function () {
+ assert.soon(function() {
try {
return B.isMaster().ismaster;
- } catch(e) {
+ } catch (e) {
return false;
}
}, "node B did not become master as expected", timeout);
// do operations on B and B alone, these will be rolled back
- assert.writeOK(b.bar.insert({ q: 4 }));
- assert.writeOK(b.bar.update({ q: 3 }, { q: 3, rb: true }));
- assert.writeOK(b.bar.remove({ q: 40 })); // multi remove test
- assert.writeOK(b.bar.update({ q: 2 }, { q: 39, rb: true }));
+ assert.writeOK(b.bar.insert({q: 4}));
+ assert.writeOK(b.bar.update({q: 3}, {q: 3, rb: true}));
+ assert.writeOK(b.bar.remove({q: 40})); // multi remove test
+ assert.writeOK(b.bar.update({q: 2}, {q: 39, rb: true}));
// rolling back a delete will involve reinserting the item(s)
- assert.writeOK(b.bar.remove({ q: 1 }));
- assert.writeOK(b.bar.update({ q: 0 }, { $inc: { y: 1} }));
- assert.writeOK(b.kap.insert({ foo: 2 }));
- assert.writeOK(b.kap2.insert({ foo: 2 }));
+ assert.writeOK(b.bar.remove({q: 1}));
+ assert.writeOK(b.bar.update({q: 0}, {$inc: {y: 1}}));
+ assert.writeOK(b.kap.insert({foo: 2}));
+ assert.writeOK(b.kap2.insert({foo: 2}));
// create a collection (need to roll back the whole thing)
- assert.writeOK(b.newcoll.insert({ a: true }));
+ assert.writeOK(b.newcoll.insert({a: true}));
// create a new empty collection (need to roll back the whole thing)
b.createCollection("abc");
// isolate B, bring A back into contact with the arbiter, then wait for A to become master
// insert new data into A so that B will need to rollback when it reconnects to A
conns[1].disconnect(conns[2]);
- assert.soon(function () { try { return !B.isMaster().ismaster; } catch(e) { return false; } });
+ assert.soon(function() {
+ try {
+ return !B.isMaster().ismaster;
+ } catch (e) {
+ return false;
+ }
+ });
conns[0].reconnect(conns[2]);
- assert.soon(function () { try { return A.isMaster().ismaster; } catch(e) { return false; } });
+ assert.soon(function() {
+ try {
+ return A.isMaster().ismaster;
+ } catch (e) {
+ return false;
+ }
+ });
assert(a.bar.count() >= 1, "count check");
- assert.writeOK(a.bar.insert({ txt: 'foo' }));
- assert.writeOK(a.bar.remove({ q: 70 }));
- assert.writeOK(a.bar.update({ q: 0 }, { $inc: { y: 33} }));
+ assert.writeOK(a.bar.insert({txt: 'foo'}));
+ assert.writeOK(a.bar.remove({q: 70}));
+ assert.writeOK(a.bar.update({q: 0}, {$inc: {y: 33}}));
// A is 1 2 3 7 8
// B is 1 2 3 4 5 6
diff --git a/jstests/replsets/rollback3.js b/jstests/replsets/rollback3.js
index 9abac9c3639..740f9caa383 100755..100644
--- a/jstests/replsets/rollback3.js
+++ b/jstests/replsets/rollback3.js
@@ -2,7 +2,7 @@
* Basic test of a succesful replica set rollback for DDL operations.
*
* This tests sets up a 3 node set, data-bearing nodes A and B and an arbiter.
- *
+ *
* 1. A is elected PRIMARY and receives several writes, which are propagated to B.
* 2. A is isolated from the rest of the set and B is elected PRIMARY.
* 3. B receives several operations, which will later be undone during rollback.
@@ -13,7 +13,7 @@
*/
load("jstests/replsets/rslib.js");
-(function () {
+(function() {
"use strict";
// helper function for verifying contents at the end of the test
var checkFinalResults = function(db) {
@@ -33,16 +33,18 @@ load("jstests/replsets/rslib.js");
};
var name = "rollback2js";
- var replTest = new ReplSetTest({ name: name, nodes: 3, useBridge: true });
+ var replTest = new ReplSetTest({name: name, nodes: 3, useBridge: true});
var nodes = replTest.nodeList();
var conns = replTest.startSet();
- replTest.initiate({ "_id": name,
- "members": [
- { "_id": 0, "host": nodes[0], priority: 3 },
- { "_id": 1, "host": nodes[1] },
- { "_id": 2, "host": nodes[2], arbiterOnly: true}
- ]});
+ replTest.initiate({
+ "_id": name,
+ "members": [
+ {"_id": 0, "host": nodes[0], priority: 3},
+ {"_id": 1, "host": nodes[1]},
+ {"_id": 2, "host": nodes[2], arbiterOnly: true}
+ ]
+ });
// Make sure we have a master and that that master is node A
replTest.waitForState(replTest.nodes[0], ReplSetTest.State.PRIMARY, 60 * 1000);
@@ -61,42 +63,48 @@ load("jstests/replsets/rslib.js");
var b = b_conn.getDB("foo");
// initial data for both nodes
- assert.writeOK(a.b.insert({ x: 1 }));
- a.b.ensureIndex({ x: 1 });
- assert.writeOK(a.oldname.insert({ y: 1 }));
- assert.writeOK(a.oldname.insert({ y: 2 }));
- a.oldname.ensureIndex({ y: 1 },true);
- assert.writeOK(a.bar.insert({ q:0}));
- assert.writeOK(a.bar.insert({ q: 1, a: "foo" }));
- assert.writeOK(a.bar.insert({ q: 2, a: "foo", x: 1 }));
- assert.writeOK(a.bar.insert({ q: 3, bb: 9, a: "foo" }));
- assert.writeOK(a.bar.insert({ q: 40333333, a: 1 }));
+ assert.writeOK(a.b.insert({x: 1}));
+ a.b.ensureIndex({x: 1});
+ assert.writeOK(a.oldname.insert({y: 1}));
+ assert.writeOK(a.oldname.insert({y: 2}));
+ a.oldname.ensureIndex({y: 1}, true);
+ assert.writeOK(a.bar.insert({q: 0}));
+ assert.writeOK(a.bar.insert({q: 1, a: "foo"}));
+ assert.writeOK(a.bar.insert({q: 2, a: "foo", x: 1}));
+ assert.writeOK(a.bar.insert({q: 3, bb: 9, a: "foo"}));
+ assert.writeOK(a.bar.insert({q: 40333333, a: 1}));
for (var i = 0; i < 200; i++) {
- assert.writeOK(a.bar.insert({ i: i }));
+ assert.writeOK(a.bar.insert({i: i}));
}
- assert.writeOK(a.bar.insert({ q: 40, a: 2 }));
- assert.writeOK(a.bar.insert({ q: 70, txt: 'willremove' }));
- a.createCollection("kap", { capped: true, size: 5000 });
- assert.writeOK(a.kap.insert({ foo: 1 }));
+ assert.writeOK(a.bar.insert({q: 40, a: 2}));
+ assert.writeOK(a.bar.insert({q: 70, txt: 'willremove'}));
+ a.createCollection("kap", {capped: true, size: 5000});
+ assert.writeOK(a.kap.insert({foo: 1}));
replTest.awaitReplication();
// isolate A and wait for B to become master
conns[0].disconnect(conns[1]);
conns[0].disconnect(conns[2]);
- assert.soon(function () { try { return B.isMaster().ismaster; } catch(e) { return false; } });
-
+ assert.soon(function() {
+ try {
+ return B.isMaster().ismaster;
+ } catch (e) {
+ return false;
+ }
+ });
+
// do operations on B and B alone, these will be rolled back
- assert.writeOK(b.bar.insert({ q: 4 }));
- assert.writeOK(b.bar.update({ q: 3 }, { q: 3, rb: true }));
- assert.writeOK(b.bar.remove({ q: 40 })); // multi remove test
- assert.writeOK(b.bar.update({ q: 2 }, { q: 39, rb: true }));
+ assert.writeOK(b.bar.insert({q: 4}));
+ assert.writeOK(b.bar.update({q: 3}, {q: 3, rb: true}));
+ assert.writeOK(b.bar.remove({q: 40})); // multi remove test
+ assert.writeOK(b.bar.update({q: 2}, {q: 39, rb: true}));
// rolling back a delete will involve reinserting the item(s)
- assert.writeOK(b.bar.remove({ q: 1 }));
- assert.writeOK(b.bar.update({ q: 0 }, { $inc: { y: 1} }));
- assert.writeOK(b.kap.insert({ foo: 2 }));
- assert.writeOK(b.kap2.insert({ foo: 2 }));
+ assert.writeOK(b.bar.remove({q: 1}));
+ assert.writeOK(b.bar.update({q: 0}, {$inc: {y: 1}}));
+ assert.writeOK(b.kap.insert({foo: 2}));
+ assert.writeOK(b.kap2.insert({foo: 2}));
// create a collection (need to roll back the whole thing)
- assert.writeOK(b.newcoll.insert({ a: true }));
+ assert.writeOK(b.newcoll.insert({a: true}));
// create a new empty collection (need to roll back the whole thing)
b.createCollection("abc");
// drop a collection - we'll need all its data back!
@@ -107,22 +115,34 @@ load("jstests/replsets/rslib.js");
b.oldname.renameCollection("newname");
b.newname.renameCollection("fooname");
assert(b.fooname.count() > 0, "count rename");
- // test roll back (drop) a whole database
+ // test roll back (drop) a whole database
var abc = b.getSisterDB("abc");
- assert.writeOK(abc.foo.insert({ x: 1 }));
- assert.writeOK(abc.bar.insert({ y: 999 }));
+ assert.writeOK(abc.foo.insert({x: 1}));
+ assert.writeOK(abc.bar.insert({y: 999}));
// isolate B, bring A back into contact with the arbiter, then wait for A to become master
// insert new data into A so that B will need to rollback when it reconnects to A
conns[1].disconnect(conns[2]);
- assert.soon(function () { try { return !B.isMaster().ismaster; } catch(e) { return false; } });
+ assert.soon(function() {
+ try {
+ return !B.isMaster().ismaster;
+ } catch (e) {
+ return false;
+ }
+ });
conns[0].reconnect(conns[2]);
- assert.soon(function () { try { return A.isMaster().ismaster; } catch(e) { return false; } });
+ assert.soon(function() {
+ try {
+ return A.isMaster().ismaster;
+ } catch (e) {
+ return false;
+ }
+ });
assert(a.bar.count() >= 1, "count check");
- assert.writeOK(a.bar.insert({ txt: 'foo' }));
- assert.writeOK(a.bar.remove({ q: 70 }));
- assert.writeOK(a.bar.update({ q: 0 }, { $inc: { y: 33} }));
+ assert.writeOK(a.bar.insert({txt: 'foo'}));
+ assert.writeOK(a.bar.remove({q: 70}));
+ assert.writeOK(a.bar.update({q: 0}, {$inc: {y: 33}}));
// A is 1 2 3 7 8
// B is 1 2 3 4 5 6
diff --git a/jstests/replsets/rollback5.js b/jstests/replsets/rollback5.js
index 07f02419a59..e63b7ab34ea 100644
--- a/jstests/replsets/rollback5.js
+++ b/jstests/replsets/rollback5.js
@@ -9,16 +9,18 @@
// run on ephemeral storage engines.
// @tags: [requires_persistence]
-var replTest = new ReplSetTest({ name: 'rollback5', nodes: 3 });
+var replTest = new ReplSetTest({name: 'rollback5', nodes: 3});
var nodes = replTest.nodeList();
var conns = replTest.startSet();
-var r = replTest.initiate({ "_id": "rollback5",
- "members": [
- { "_id": 0, "host": nodes[0], priority: 3 },
- { "_id": 1, "host": nodes[1] },
- { "_id": 2, "host": nodes[2], arbiterOnly: true}]
- });
+var r = replTest.initiate({
+ "_id": "rollback5",
+ "members": [
+ {"_id": 0, "host": nodes[0], priority: 3},
+ {"_id": 1, "host": nodes[1]},
+ {"_id": 2, "host": nodes[2], arbiterOnly: true}
+ ]
+});
// Make sure we have a master
replTest.waitForState(replTest.nodes[0], ReplSetTest.State.PRIMARY, 60 * 1000);
@@ -37,26 +39,35 @@ assert(master == conns[0], "conns[0] assumed to be master");
assert(a_conn.host == master.host);
// Make sure we have an arbiter
-assert.soon(function () {
- res = conns[2].getDB("admin").runCommand({ replSetGetStatus: 1 });
+assert.soon(function() {
+ res = conns[2].getDB("admin").runCommand({replSetGetStatus: 1});
return res.myState == 7;
}, "Arbiter failed to initialize.");
-var options = { writeConcern: { w: 2, wtimeout: 60000 }, upsert: true };
-assert.writeOK(A.foo.update({ key: 'value1' }, { $set: { req: 'req' }}, options));
+var options = {
+ writeConcern: {w: 2, wtimeout: 60000},
+ upsert: true
+};
+assert.writeOK(A.foo.update({key: 'value1'}, {$set: {req: 'req'}}, options));
replTest.stop(AID);
master = replTest.getPrimary();
assert(b_conn.host == master.host);
-options = { writeConcern: { w: 1, wtimeout: 60000 }, upsert: true };
-assert.writeOK(B.foo.update({key:'value1'}, {$set: {res: 'res'}}, options));
+options = {
+ writeConcern: {w: 1, wtimeout: 60000},
+ upsert: true
+};
+assert.writeOK(B.foo.update({key: 'value1'}, {$set: {res: 'res'}}, options));
replTest.stop(BID);
replTest.restart(AID);
master = replTest.getPrimary();
assert(a_conn.host == master.host);
-options = { writeConcern: { w: 1, wtimeout: 60000 }, upsert: true };
-assert.writeOK(A.foo.update({ key: 'value2' }, { $set: { req: 'req' }}, options));
-replTest.restart(BID); // should rollback
+options = {
+ writeConcern: {w: 1, wtimeout: 60000},
+ upsert: true
+};
+assert.writeOK(A.foo.update({key: 'value2'}, {$set: {req: 'req'}}, options));
+replTest.restart(BID); // should rollback
reconnect(B);
print("BEFORE------------------");
@@ -69,12 +80,12 @@ print("AFTER------------------");
printjson(A.foo.find().toArray());
assert.eq(2, A.foo.count());
-assert.eq('req', A.foo.findOne({key:'value1'}).req);
-assert.eq(null, A.foo.findOne({key:'value1'}).res);
+assert.eq('req', A.foo.findOne({key: 'value1'}).req);
+assert.eq(null, A.foo.findOne({key: 'value1'}).res);
reconnect(B);
assert.eq(2, B.foo.count());
-assert.eq('req', B.foo.findOne({key:'value1'}).req);
-assert.eq(null, B.foo.findOne({key:'value1'}).res);
+assert.eq('req', B.foo.findOne({key: 'value1'}).req);
+assert.eq(null, B.foo.findOne({key: 'value1'}).res);
// check here for rollback files
var rollbackDir = Bpath + "rollback/";
@@ -83,7 +94,6 @@ assert(pathExists(rollbackDir), "rollback directory was not created!");
print("rollback5.js SUCCESS");
replTest.stopSet(15);
-
function wait(f) {
var n = 0;
while (!f()) {
@@ -98,14 +108,13 @@ function wait(f) {
}
function reconnect(a) {
- wait(function() {
- try {
- a.bar.stats();
- return true;
- } catch(e) {
- print(e);
- return false;
- }
+ wait(function() {
+ try {
+ a.bar.stats();
+ return true;
+ } catch (e) {
+ print(e);
+ return false;
+ }
});
}
-
diff --git a/jstests/replsets/rollback_auth.js b/jstests/replsets/rollback_auth.js
index 77684550d3e..0c0b35b91ed 100644
--- a/jstests/replsets/rollback_auth.js
+++ b/jstests/replsets/rollback_auth.js
@@ -10,7 +10,7 @@
// run on ephemeral storage engines.
// @tags: [requires_persistence]
-(function () {
+(function() {
"use strict";
// helper function for verifying contents at the end of the test
var checkFinalResults = function(db) {
@@ -26,17 +26,17 @@
jsTestLog("Setting up replica set");
var name = "rollbackAuth";
- var replTest = new ReplSetTest({name: name,
- nodes: 3,
- keyFile: 'jstests/libs/key1' });
+ var replTest = new ReplSetTest({name: name, nodes: 3, keyFile: 'jstests/libs/key1'});
var nodes = replTest.nodeList();
var conns = replTest.startSet();
- replTest.initiate({ "_id": "rollbackAuth",
- "members": [
- { "_id": 0, "host": nodes[0], "priority": 3 },
- { "_id": 1, "host": nodes[1] },
- { "_id": 2, "host": nodes[2], arbiterOnly: true}
- ]});
+ replTest.initiate({
+ "_id": "rollbackAuth",
+ "members": [
+ {"_id": 0, "host": nodes[0], "priority": 3},
+ {"_id": 1, "host": nodes[1]},
+ {"_id": 2, "host": nodes[2], arbiterOnly: true}
+ ]
+ });
// Make sure we have a master
replTest.waitForState(replTest.nodes[0], ReplSetTest.State.PRIMARY, 60 * 1000);
@@ -53,45 +53,49 @@
assert.eq(a_conn, master);
// Make sure we have an arbiter
- assert.soon(function () {
- var res = conns[2].getDB("admin").runCommand({ replSetGetStatus: 1 });
- return res.myState == 7;
- }, "Arbiter failed to initialize.");
-
+ assert.soon(function() {
+ var res = conns[2].getDB("admin").runCommand({replSetGetStatus: 1});
+ return res.myState == 7;
+ }, "Arbiter failed to initialize.");
jsTestLog("Creating initial data");
// Create collections that will be used in test
A.createUser({user: 'admin', pwd: 'pwd', roles: ['root']});
A.auth('admin', 'pwd');
- a.foo.insert({a:1});
- a.bar.insert({a:1});
- a.baz.insert({a:1});
- a.foobar.insert({a:1});
+ a.foo.insert({a: 1});
+ a.bar.insert({a: 1});
+ a.baz.insert({a: 1});
+ a.foobar.insert({a: 1});
// Set up user admin user
A.createUser({user: 'userAdmin', pwd: 'pwd', roles: ['userAdminAnyDatabase']});
- A.auth('userAdmin', 'pwd'); // Logs out of admin@admin user
+ A.auth('userAdmin', 'pwd'); // Logs out of admin@admin user
B.auth('userAdmin', 'pwd');
// Create a basic user and role
- A.createRole({role: 'replStatusRole', // To make awaitReplication() work
- roles: [],
- privileges: [{resource: {cluster: true}, actions: ['replSetGetStatus']},
- {resource: {db: 'local', collection: ''}, actions: ['find']},
- {resource: {db: 'local', collection: 'system.replset'},
- actions: ['find']}]});
- a.createRole({role: 'myRole', roles: [], privileges: [{resource: {db: 'test', collection: ''},
- actions: ['dbStats']}]});
- a.createUser({user: 'spencer',
- pwd: 'pwd',
- roles: ['myRole', {role: 'replStatusRole', db: 'admin'}]});
+ A.createRole({
+ role: 'replStatusRole', // To make awaitReplication() work
+ roles: [],
+ privileges: [
+ {resource: {cluster: true}, actions: ['replSetGetStatus']},
+ {resource: {db: 'local', collection: ''}, actions: ['find']},
+ {resource: {db: 'local', collection: 'system.replset'}, actions: ['find']}
+ ]
+ });
+ a.createRole({
+ role: 'myRole',
+ roles: [],
+ privileges: [{resource: {db: 'test', collection: ''}, actions: ['dbStats']}]
+ });
+ a.createUser(
+ {user: 'spencer', pwd: 'pwd', roles: ['myRole', {role: 'replStatusRole', db: 'admin'}]});
assert(a.auth('spencer', 'pwd'));
// wait for secondary to get this data
assert.soon(function() {
- return b.auth('spencer', 'pwd');
- });
+ return b.auth('spencer', 'pwd');
+ });
assert.commandWorked(a.runCommand({dbStats: 1}));
assert.commandFailedWithCode(a.runCommand({collStats: 'foo'}), authzErrorCode);
@@ -105,30 +109,34 @@
assert.commandFailedWithCode(b.runCommand({collStats: 'baz'}), authzErrorCode);
assert.commandFailedWithCode(b.runCommand({collStats: 'foobar'}), authzErrorCode);
-
jsTestLog("Doing writes that will eventually be rolled back");
// down A and wait for B to become master
replTest.stop(0);
- assert.soon(function () { try { return B.isMaster().ismaster; } catch(e) { return false; } },
- "B didn't become master",
- 60000,
- 1000);
+ assert.soon(function() {
+ try {
+ return B.isMaster().ismaster;
+ } catch (e) {
+ return false;
+ }
+ }, "B didn't become master", 60000, 1000);
printjson(b.adminCommand('replSetGetStatus'));
-
// Modify the the user and role in a way that will be rolled back.
- b.grantPrivilegesToRole('myRole',
- [{resource: {db: 'test', collection: 'foo'}, actions: ['collStats']}],
- {}); // Default write concern will wait for majority, which will time out.
- b.createRole({role: 'temporaryRole',
- roles: [],
- privileges: [{resource: {db: 'test', collection: 'bar'}, actions: ['collStats']}]},
- {}); // Default write concern will wait for majority, which will time out.
+ b.grantPrivilegesToRole(
+ 'myRole',
+ [{resource: {db: 'test', collection: 'foo'}, actions: ['collStats']}],
+ {}); // Default write concern will wait for majority, which will time out.
+ b.createRole(
+ {
+ role: 'temporaryRole',
+ roles: [],
+ privileges: [{resource: {db: 'test', collection: 'bar'}, actions: ['collStats']}]
+ },
+ {}); // Default write concern will wait for majority, which will time out.
b.grantRolesToUser('spencer',
['temporaryRole'],
- {}); // Default write concern will wait for majority, which will time out.
-
+ {}); // Default write concern will wait for majority, which will time out.
assert.commandWorked(b.runCommand({dbStats: 1}));
assert.commandWorked(b.runCommand({collStats: 'foo'}));
@@ -141,10 +149,13 @@
replTest.stop(1);
replTest.restart(0);
- assert.soon(function () { try { return A.isMaster().ismaster; } catch(e) { return false; } },
- "A didn't become master",
- 60000,
- 1000);
+ assert.soon(function() {
+ try {
+ return A.isMaster().ismaster;
+ } catch (e) {
+ return false;
+ }
+ }, "A didn't become master", 60000, 1000);
// A should not have the new data as it was down
assert.commandWorked(a.runCommand({dbStats: 1}));
@@ -158,18 +169,17 @@
A.auth('userAdmin', 'pwd');
// Default write concern will wait for majority, which would time out
// so we override it with an empty write concern
- a.grantPrivilegesToRole('myRole',
- [{resource: {db: 'test', collection: 'baz'}, actions: ['collStats']}],
- {});
-
- a.createRole({role: 'persistentRole',
- roles: [],
- privileges: [{resource: {db: 'test', collection: 'foobar'},
- actions: ['collStats']}]},
- {});
- a.grantRolesToUser('spencer',
- ['persistentRole'],
- {});
+ a.grantPrivilegesToRole(
+ 'myRole', [{resource: {db: 'test', collection: 'baz'}, actions: ['collStats']}], {});
+
+ a.createRole(
+ {
+ role: 'persistentRole',
+ roles: [],
+ privileges: [{resource: {db: 'test', collection: 'foobar'}, actions: ['collStats']}]
+ },
+ {});
+ a.grantRolesToUser('spencer', ['persistentRole'], {});
A.logout();
a.auth('spencer', 'pwd');
@@ -183,10 +193,12 @@
replTest.restart(1);
authutil.asCluster(replTest.nodes,
'jstests/libs/key1',
- function() { replTest.awaitReplication(); });
+ function() {
+ replTest.awaitReplication();
+ });
assert.soon(function() {
- return b.auth('spencer', 'pwd');
- });
+ return b.auth('spencer', 'pwd');
+ });
// Now both A and B should agree
checkFinalResults(a);
checkFinalResults(b);
diff --git a/jstests/replsets/rollback_cmd_unrollbackable.js b/jstests/replsets/rollback_cmd_unrollbackable.js
index a4c3a35b658..801d4c285a7 100644
--- a/jstests/replsets/rollback_cmd_unrollbackable.js
+++ b/jstests/replsets/rollback_cmd_unrollbackable.js
@@ -12,12 +12,14 @@ var name = "rollback_cmd_unrollbackable";
var replTest = new ReplSetTest({name: name, nodes: 3});
var nodes = replTest.nodeList();
var conns = replTest.startSet();
-replTest.initiate({"_id": name,
- "members": [
- { "_id": 0, "host": nodes[0], priority: 3 },
- { "_id": 1, "host": nodes[1] },
- { "_id": 2, "host": nodes[2], arbiterOnly: true}]
- });
+replTest.initiate({
+ "_id": name,
+ "members": [
+ {"_id": 0, "host": nodes[0], priority: 3},
+ {"_id": 1, "host": nodes[1]},
+ {"_id": 2, "host": nodes[2], arbiterOnly: true}
+ ]
+});
var a_conn = conns[0];
var b_conn = conns[1];
var AID = replTest.getNodeId(a_conn);
@@ -28,7 +30,10 @@ replTest.waitForState(replTest.nodes[0], ReplSetTest.State.PRIMARY, 60 * 1000);
var master = replTest.getPrimary();
assert(master === conns[0], "conns[0] assumed to be master");
assert(a_conn.host === master.host, "a_conn assumed to be master");
-var options = {writeConcern: {w: 2, wtimeout: 60000}, upsert: true};
+var options = {
+ writeConcern: {w: 2, wtimeout: 60000},
+ upsert: true
+};
assert.writeOK(a_conn.getDB(name).foo.insert({x: 1}, options));
// shut down master
@@ -37,13 +42,18 @@ replTest.stop(AID);
// insert a fake oplog entry with a non-rollbackworthy command
master = replTest.getPrimary();
assert(b_conn.host === master.host, "b_conn assumed to be master");
-options = {writeConcern: {w: 1, wtimeout: 60000}, upsert: true};
+options = {
+ writeConcern: {w: 1, wtimeout: 60000},
+ upsert: true
+};
// another insert to set minvalid ahead
assert.writeOK(b_conn.getDB(name).foo.insert({x: 123}));
var oplog_entry = b_conn.getDB("local").oplog.rs.find().sort({$natural: -1})[0];
oplog_entry["ts"] = Timestamp(oplog_entry["ts"].t, oplog_entry["ts"].i + 1);
oplog_entry["op"] = "c";
-oplog_entry["o"] = {"replSetSyncFrom": 1};
+oplog_entry["o"] = {
+ "replSetSyncFrom": 1
+};
assert.writeOK(b_conn.getDB("local").oplog.rs.insert(oplog_entry));
// shut down B and bring back the original master
@@ -53,7 +63,10 @@ master = replTest.getPrimary();
assert(a_conn.host === master.host, "a_conn assumed to be master");
// do a write so that B will have to roll back
-options = {writeConcern: {w: 1, wtimeout: 60000}, upsert: true};
+options = {
+ writeConcern: {w: 1, wtimeout: 60000},
+ upsert: true
+};
assert.writeOK(a_conn.getDB(name).foo.insert({x: 2}, options));
// restart B, which should attempt to rollback but then fassert.
@@ -64,4 +77,4 @@ assert.soon(function() {
return rawMongoProgramOutput().match(msg);
}, "Did not see a log entry about skipping the nonrollbackable command during rollback");
-replTest.stopSet(undefined, undefined, { allowedExitCodes: [ MongoRunner.EXIT_ABRUPT ] });
+replTest.stopSet(undefined, undefined, {allowedExitCodes: [MongoRunner.EXIT_ABRUPT]});
diff --git a/jstests/replsets/rollback_collMod_PowerOf2Sizes.js b/jstests/replsets/rollback_collMod_PowerOf2Sizes.js
index bf7799895f9..deab19b2f09 100644
--- a/jstests/replsets/rollback_collMod_PowerOf2Sizes.js
+++ b/jstests/replsets/rollback_collMod_PowerOf2Sizes.js
@@ -7,78 +7,88 @@
// run on ephemeral storage engines.
// @tags: [requires_persistence]
(function() {
-"use strict";
+ "use strict";
-function getOptions(conn) {
- return conn.getDB(name).foo.exists().options;
-}
+ function getOptions(conn) {
+ return conn.getDB(name).foo.exists().options;
+ }
-// Set up a set and grab things for later.
-var name = "rollback_collMod_PowerOf2Sizes";
-var replTest = new ReplSetTest({name: name, nodes: 3});
-var nodes = replTest.nodeList();
-var conns = replTest.startSet();
-replTest.initiate({"_id": name,
- "members": [
- { "_id": 0, "host": nodes[0] },
- { "_id": 1, "host": nodes[1] },
- { "_id": 2, "host": nodes[2], arbiterOnly: true}]
- });
-// Get master and do an initial write.
-var master = replTest.getPrimary();
-var a_conn = master;
-var slaves = replTest.liveNodes.slaves;
-var b_conn = slaves[0];
-var AID = replTest.getNodeId(a_conn);
-var BID = replTest.getNodeId(b_conn);
+ // Set up a set and grab things for later.
+ var name = "rollback_collMod_PowerOf2Sizes";
+ var replTest = new ReplSetTest({name: name, nodes: 3});
+ var nodes = replTest.nodeList();
+ var conns = replTest.startSet();
+ replTest.initiate({
+ "_id": name,
+ "members": [
+ {"_id": 0, "host": nodes[0]},
+ {"_id": 1, "host": nodes[1]},
+ {"_id": 2, "host": nodes[2], arbiterOnly: true}
+ ]
+ });
+ // Get master and do an initial write.
+ var master = replTest.getPrimary();
+ var a_conn = master;
+ var slaves = replTest.liveNodes.slaves;
+ var b_conn = slaves[0];
+ var AID = replTest.getNodeId(a_conn);
+ var BID = replTest.getNodeId(b_conn);
-// Create collection with custom options.
-var originalCollectionOptions = {flags: 0,
- validator: {x: {$exists: 1}},
- validationLevel: "moderate",
- validationAction: "warn"};
-assert.commandWorked(a_conn.getDB(name).createCollection('foo', originalCollectionOptions));
+ // Create collection with custom options.
+ var originalCollectionOptions = {
+ flags: 0,
+ validator: {x: {$exists: 1}},
+ validationLevel: "moderate",
+ validationAction: "warn"
+ };
+ assert.commandWorked(a_conn.getDB(name).createCollection('foo', originalCollectionOptions));
-var options = {writeConcern: {w: 2, wtimeout: 60000}, upsert: true};
-assert.writeOK(a_conn.getDB(name).foo.insert({x: 1}, options));
+ var options = {
+ writeConcern: {w: 2, wtimeout: 60000},
+ upsert: true
+ };
+ assert.writeOK(a_conn.getDB(name).foo.insert({x: 1}, options));
-assert.eq(getOptions(a_conn), originalCollectionOptions);
-assert.eq(getOptions(b_conn), originalCollectionOptions);
+ assert.eq(getOptions(a_conn), originalCollectionOptions);
+ assert.eq(getOptions(b_conn), originalCollectionOptions);
-// Stop the slave so it never sees the collMod.
-replTest.stop(BID);
+ // Stop the slave so it never sees the collMod.
+ replTest.stop(BID);
-// Run the collMod only on A.
-assert.commandWorked(a_conn.getDB(name).runCommand({collMod: "foo",
- usePowerOf2Sizes: false,
- noPadding: true,
- validator: {a: 1},
- validationLevel: "moderate",
- validationAction: "warn"}));
-assert.eq(getOptions(a_conn), {flags: 2,
- validator: {a: 1},
- validationLevel: "moderate",
- validationAction: "warn"});
+ // Run the collMod only on A.
+ assert.commandWorked(a_conn.getDB(name).runCommand({
+ collMod: "foo",
+ usePowerOf2Sizes: false,
+ noPadding: true,
+ validator: {a: 1},
+ validationLevel: "moderate",
+ validationAction: "warn"
+ }));
+ assert.eq(
+ getOptions(a_conn),
+ {flags: 2, validator: {a: 1}, validationLevel: "moderate", validationAction: "warn"});
-// Shut down A and fail over to B.
-replTest.stop(AID);
-replTest.restart(BID);
-master = replTest.getPrimary();
-assert.eq(b_conn.host, master.host, "b_conn assumed to be master");
-b_conn = master;
+ // Shut down A and fail over to B.
+ replTest.stop(AID);
+ replTest.restart(BID);
+ master = replTest.getPrimary();
+ assert.eq(b_conn.host, master.host, "b_conn assumed to be master");
+ b_conn = master;
-// Do a write on B so that A will have to roll back.
-options = {writeConcern: {w: 1, wtimeout: 60000}, upsert: true};
-assert.writeOK(b_conn.getDB(name).foo.insert({x: 2}, options));
+ // Do a write on B so that A will have to roll back.
+ options = {
+ writeConcern: {w: 1, wtimeout: 60000},
+ upsert: true
+ };
+ assert.writeOK(b_conn.getDB(name).foo.insert({x: 2}, options));
-// Restart A, which should rollback the collMod before becoming primary.
-replTest.restart(AID);
-try {
- b_conn.adminCommand({replSetStepDown: 60, secondaryCatchUpPeriodSecs: 60});
-}
-catch (e) {
- // Ignore network disconnect.
-}
-replTest.waitForState(a_conn, ReplSetTest.State.PRIMARY);
-assert.eq(getOptions(a_conn), originalCollectionOptions);
+ // Restart A, which should rollback the collMod before becoming primary.
+ replTest.restart(AID);
+ try {
+ b_conn.adminCommand({replSetStepDown: 60, secondaryCatchUpPeriodSecs: 60});
+ } catch (e) {
+ // Ignore network disconnect.
+ }
+ replTest.waitForState(a_conn, ReplSetTest.State.PRIMARY);
+ assert.eq(getOptions(a_conn), originalCollectionOptions);
}());
diff --git a/jstests/replsets/rollback_collMod_fatal.js b/jstests/replsets/rollback_collMod_fatal.js
index 770165cf88c..c907213f05d 100644
--- a/jstests/replsets/rollback_collMod_fatal.js
+++ b/jstests/replsets/rollback_collMod_fatal.js
@@ -12,12 +12,14 @@ var name = "rollback_collMod_fatal";
var replTest = new ReplSetTest({name: name, nodes: 3});
var nodes = replTest.nodeList();
var conns = replTest.startSet();
-replTest.initiate({"_id": name,
- "members": [
- { "_id": 0, "host": nodes[0], priority: 3 },
- { "_id": 1, "host": nodes[1] },
- { "_id": 2, "host": nodes[2], arbiterOnly: true}]
- });
+replTest.initiate({
+ "_id": name,
+ "members": [
+ {"_id": 0, "host": nodes[0], priority: 3},
+ {"_id": 1, "host": nodes[1]},
+ {"_id": 2, "host": nodes[2], arbiterOnly: true}
+ ]
+});
var a_conn = conns[0];
var b_conn = conns[1];
var AID = replTest.getNodeId(a_conn);
@@ -29,7 +31,10 @@ replTest.waitForState(replTest.nodes[0], ReplSetTest.State.PRIMARY, 60 * 1000);
var master = replTest.getPrimary();
assert(master === conns[0], "conns[0] assumed to be master");
assert(a_conn.host === master.host, "a_conn assumed to be master");
-var options = {writeConcern: {w: 2, wtimeout: 60000}, upsert: true};
+var options = {
+ writeConcern: {w: 2, wtimeout: 60000},
+ upsert: true
+};
a_conn.getDB(name).foo.ensureIndex({x: 1}, {expireAfterSeconds: 3600});
assert.writeOK(a_conn.getDB(name).foo.insert({x: 1}, options));
@@ -39,9 +44,8 @@ replTest.stop(AID);
// do a collMod altering TTL which should cause FATAL when rolled back
master = replTest.getPrimary();
assert(b_conn.host === master.host, "b_conn assumed to be master");
-assert.commandWorked(b_conn.getDB(name).runCommand({collMod: "foo",
- index: {keyPattern: {x:1},
- expireAfterSeconds: 10}}));
+assert.commandWorked(b_conn.getDB(name).runCommand(
+ {collMod: "foo", index: {keyPattern: {x: 1}, expireAfterSeconds: 10}}));
// shut down B and bring back the original master
replTest.stop(BID);
@@ -50,7 +54,10 @@ master = replTest.getPrimary();
assert(a_conn.host === master.host, "a_conn assumed to be master");
// do a write so that B will have to roll back
-options = {writeConcern: {w: 1, wtimeout: 60000}, upsert: true};
+options = {
+ writeConcern: {w: 1, wtimeout: 60000},
+ upsert: true
+};
assert.writeOK(a_conn.getDB(name).foo.insert({x: 2}, options));
// restart B, which should attempt rollback but then fassert
@@ -60,4 +67,4 @@ assert.soon(function() {
return rawMongoProgramOutput().match("cannot rollback a collMod command");
}, "B failed to fassert");
-replTest.stopSet(undefined, undefined, { allowedExitCodes: [ MongoRunner.EXIT_ABRUPT ] });
+replTest.stopSet(undefined, undefined, {allowedExitCodes: [MongoRunner.EXIT_ABRUPT]});
diff --git a/jstests/replsets/rollback_different_h.js b/jstests/replsets/rollback_different_h.js
index 948823aa894..4b9aede1bbc 100644
--- a/jstests/replsets/rollback_different_h.js
+++ b/jstests/replsets/rollback_different_h.js
@@ -23,12 +23,14 @@ var name = "rollback_different_h";
var replTest = new ReplSetTest({name: name, nodes: 3});
var nodes = replTest.nodeList();
var conns = replTest.startSet();
-replTest.initiate({"_id": name,
- "members": [
- { "_id": 0, "host": nodes[0], priority: 3 },
- { "_id": 1, "host": nodes[1] },
- { "_id": 2, "host": nodes[2], arbiterOnly: true}]
- });
+replTest.initiate({
+ "_id": name,
+ "members": [
+ {"_id": 0, "host": nodes[0], priority: 3},
+ {"_id": 1, "host": nodes[1]},
+ {"_id": 2, "host": nodes[2], arbiterOnly: true}
+ ]
+});
var a_conn = conns[0];
var b_conn = conns[1];
var AID = replTest.getNodeId(a_conn);
@@ -40,7 +42,10 @@ replTest.waitForState(replTest.nodes[0], ReplSetTest.State.PRIMARY, 60 * 1000);
var master = replTest.getPrimary();
assert(master === conns[0], "conns[0] assumed to be master");
assert(a_conn.host === master.host, "a_conn assumed to be master");
-var options = {writeConcern: {w: 2, wtimeout: 60000}, upsert: true};
+var options = {
+ writeConcern: {w: 2, wtimeout: 60000},
+ upsert: true
+};
assert.writeOK(a_conn.getDB(name).foo.insert({x: 1}, options));
// shut down master
@@ -49,12 +54,15 @@ replTest.stop(AID);
// change the h value of the most recent entry on B
master = replTest.getPrimary();
assert(b_conn.host === master.host, "b_conn assumed to be master");
-options = {writeConcern: {w: 1, wtimeout: 60000}, upsert: true};
+options = {
+ writeConcern: {w: 1, wtimeout: 60000},
+ upsert: true
+};
var oplog_entry = b_conn.getDB("local").oplog.rs.find().sort({$natural: -1})[0];
oplog_entry["ts"].t++;
oplog_entry["h"] = NumberLong(1);
res = b_conn.getDB("local").oplog.rs.insert(oplog_entry);
-assert( res.nInserted > 0, tojson( res ) );
+assert(res.nInserted > 0, tojson(res));
// another insert to set minvalid ahead
assert.writeOK(b_conn.getDB(name).foo.insert({x: 123}));
@@ -66,7 +74,10 @@ master = replTest.getPrimary();
assert(a_conn.host === master.host, "a_conn assumed to be master");
// do a write so that B will have to roll back
-options = {writeConcern: {w: 1, wtimeout: 60000}, upsert: true};
+options = {
+ writeConcern: {w: 1, wtimeout: 60000},
+ upsert: true
+};
assert.writeOK(a_conn.getDB(name).foo.insert({x: 2}, options));
// restart B, which should rollback and get to the same state as A
@@ -81,8 +92,7 @@ assert.soon(function() {
}
}
return true;
- }
- catch (e) {
+ } catch (e) {
return false;
}
}, "collection on A and B did not match after rollback");
diff --git a/jstests/replsets/rollback_dropdb.js b/jstests/replsets/rollback_dropdb.js
index 5c47e6ab34b..c11b14ab06e 100644
--- a/jstests/replsets/rollback_dropdb.js
+++ b/jstests/replsets/rollback_dropdb.js
@@ -12,12 +12,14 @@ var name = "rollback_dropdb";
var replTest = new ReplSetTest({name: name, nodes: 3});
var nodes = replTest.nodeList();
var conns = replTest.startSet();
-replTest.initiate({"_id": name,
- "members": [
- { "_id": 0, "host": nodes[0], priority: 3 },
- { "_id": 1, "host": nodes[1] },
- { "_id": 2, "host": nodes[2], arbiterOnly: true}]
- });
+replTest.initiate({
+ "_id": name,
+ "members": [
+ {"_id": 0, "host": nodes[0], priority: 3},
+ {"_id": 1, "host": nodes[1]},
+ {"_id": 2, "host": nodes[2], arbiterOnly: true}
+ ]
+});
var a_conn = conns[0];
var b_conn = conns[1];
var AID = replTest.getNodeId(a_conn);
@@ -29,7 +31,10 @@ replTest.waitForState(replTest.nodes[0], ReplSetTest.State.PRIMARY, 60 * 1000);
var master = replTest.getPrimary();
assert(master === conns[0], "conns[0] assumed to be master");
assert(a_conn.host === master.host, "a_conn assumed to be master");
-var options = {writeConcern: {w: 2, wtimeout: 60000}, upsert: true};
+var options = {
+ writeConcern: {w: 2, wtimeout: 60000},
+ upsert: true
+};
assert.writeOK(a_conn.getDB(name).foo.insert({x: 1}, options));
// shut down master
@@ -48,14 +53,18 @@ master = replTest.getPrimary();
assert(a_conn.host === master.host, "a_conn assumed to be master");
// do a write so that B will have to roll back
-options = {writeConcern: {w: 1, wtimeout: 60000}, upsert: true};
+options = {
+ writeConcern: {w: 1, wtimeout: 60000},
+ upsert: true
+};
assert.writeOK(a_conn.getDB(name).foo.insert({x: 2}, options));
// restart B, which should attempt rollback but then fassert
clearRawMongoProgramOutput();
replTest.restart(BID);
assert.soon(function() {
- return rawMongoProgramOutput().match("rollback : can't rollback drop database full resync will be required");
+ return rawMongoProgramOutput().match(
+ "rollback : can't rollback drop database full resync will be required");
}, "B failed to fassert");
-replTest.stopSet(undefined, undefined, { allowedExitCodes: [ MongoRunner.EXIT_ABRUPT ] });
+replTest.stopSet(undefined, undefined, {allowedExitCodes: [MongoRunner.EXIT_ABRUPT]});
diff --git a/jstests/replsets/rollback_empty_ns.js b/jstests/replsets/rollback_empty_ns.js
index 77116668971..f6a07319eb4 100644
--- a/jstests/replsets/rollback_empty_ns.js
+++ b/jstests/replsets/rollback_empty_ns.js
@@ -23,12 +23,14 @@ var name = "rollback_empty_ns";
var replTest = new ReplSetTest({name: name, nodes: 3});
var nodes = replTest.nodeList();
var conns = replTest.startSet();
-replTest.initiate({"_id": name,
- "members": [
- { "_id": 0, "host": nodes[0], priority: 3 },
- { "_id": 1, "host": nodes[1] },
- { "_id": 2, "host": nodes[2], arbiterOnly: true}]
- });
+replTest.initiate({
+ "_id": name,
+ "members": [
+ {"_id": 0, "host": nodes[0], priority: 3},
+ {"_id": 1, "host": nodes[1]},
+ {"_id": 2, "host": nodes[2], arbiterOnly: true}
+ ]
+});
var a_conn = conns[0];
var b_conn = conns[1];
var AID = replTest.getNodeId(a_conn);
@@ -40,7 +42,10 @@ replTest.waitForState(replTest.nodes[0], ReplSetTest.State.PRIMARY, 60 * 1000);
var master = replTest.getPrimary();
assert(master === conns[0], "conns[0] assumed to be master");
assert(a_conn.host === master.host, "a_conn assumed to be master");
-var options = {writeConcern: {w: 2, wtimeout: 60000}, upsert: true};
+var options = {
+ writeConcern: {w: 2, wtimeout: 60000},
+ upsert: true
+};
assert.writeOK(a_conn.getDB(name).foo.insert({x: 1}, options));
// shut down master
@@ -49,7 +54,10 @@ replTest.stop(AID);
// insert a fake oplog entry with an empty ns
master = replTest.getPrimary();
assert(b_conn.host === master.host, "b_conn assumed to be master");
-options = {writeConcern: {w: 1, wtimeout: 60000}, upsert: true};
+options = {
+ writeConcern: {w: 1, wtimeout: 60000},
+ upsert: true
+};
// another insert to set minvalid ahead
assert.writeOK(b_conn.getDB(name).foo.insert({x: 123}));
var oplog_entry = b_conn.getDB("local").oplog.rs.find().sort({$natural: -1})[0];
@@ -64,7 +72,10 @@ master = replTest.getPrimary();
assert(a_conn.host === master.host, "a_conn assumed to be master");
// do a write so that B will have to roll back
-options = {writeConcern: {w: 1, wtimeout: 60000}, upsert: true};
+options = {
+ writeConcern: {w: 1, wtimeout: 60000},
+ upsert: true
+};
assert.writeOK(a_conn.getDB(name).foo.insert({x: 2}, options));
// restart B, which should rollback and log a message about not rolling back empty ns'd oplog entry
@@ -74,8 +85,7 @@ assert.soon(function() {
try {
var log = b_conn.getDB("admin").adminCommand({getLog: "global"}).log;
return doesEntryMatch(log, msg);
- }
- catch (e) {
+ } catch (e) {
return false;
}
}, "Did not see a log entry about skipping the empty ns'd oplog entry during rollback");
diff --git a/jstests/replsets/rollback_empty_o.js b/jstests/replsets/rollback_empty_o.js
index dfc94519cb3..f3468fcde5e 100644
--- a/jstests/replsets/rollback_empty_o.js
+++ b/jstests/replsets/rollback_empty_o.js
@@ -23,12 +23,14 @@ var name = "rollback_empty_o";
var replTest = new ReplSetTest({name: name, nodes: 3});
var nodes = replTest.nodeList();
var conns = replTest.startSet();
-replTest.initiate({"_id": name,
- "members": [
- { "_id": 0, "host": nodes[0], priority: 3 },
- { "_id": 1, "host": nodes[1] },
- { "_id": 2, "host": nodes[2], arbiterOnly: true}]
- });
+replTest.initiate({
+ "_id": name,
+ "members": [
+ {"_id": 0, "host": nodes[0], priority: 3},
+ {"_id": 1, "host": nodes[1]},
+ {"_id": 2, "host": nodes[2], arbiterOnly: true}
+ ]
+});
var a_conn = conns[0];
var b_conn = conns[1];
var AID = replTest.getNodeId(a_conn);
@@ -40,7 +42,10 @@ replTest.waitForState(replTest.nodes[0], ReplSetTest.State.PRIMARY, 60 * 1000);
var master = replTest.getPrimary();
assert(master === conns[0], "conns[0] assumed to be master");
assert(a_conn.host === master.host, "a_conn assumed to be master");
-var options = {writeConcern: {w: 2, wtimeout: 60000}, upsert: true};
+var options = {
+ writeConcern: {w: 2, wtimeout: 60000},
+ upsert: true
+};
assert.writeOK(a_conn.getDB(name).foo.insert({x: 1}, options));
// shut down master
@@ -49,7 +54,10 @@ replTest.stop(AID);
// insert a fake oplog entry with an empty o
master = replTest.getPrimary();
assert(b_conn.host === master.host, "b_conn assumed to be master");
-options = {writeConcern: {w: 1, wtimeout: 60000}, upsert: true};
+options = {
+ writeConcern: {w: 1, wtimeout: 60000},
+ upsert: true
+};
// another insert to set minvalid ahead
assert.writeOK(b_conn.getDB(name).foo.insert({x: 123}));
var oplog_entry = b_conn.getDB("local").oplog.rs.find().sort({$natural: -1})[0];
@@ -64,7 +72,10 @@ master = replTest.getPrimary();
assert(a_conn.host === master.host, "a_conn assumed to be master");
// do a write so that B will have to roll back
-options = {writeConcern: {w: 1, wtimeout: 60000}, upsert: true};
+options = {
+ writeConcern: {w: 1, wtimeout: 60000},
+ upsert: true
+};
assert.writeOK(a_conn.getDB(name).foo.insert({x: 2}, options));
// restart B, which should rollback and log a message about not rolling back empty o'd oplog entry
@@ -74,8 +85,7 @@ assert.soon(function() {
try {
var log = b_conn.getDB("admin").adminCommand({getLog: "global"}).log;
return doesEntryMatch(log, msg);
- }
- catch (e) {
+ } catch (e) {
return false;
}
}, "Did not see a log entry about skipping the empty o'd oplog entry during rollback");
diff --git a/jstests/replsets/rollback_empty_o2.js b/jstests/replsets/rollback_empty_o2.js
index e1e5add816f..56eb8512575 100644
--- a/jstests/replsets/rollback_empty_o2.js
+++ b/jstests/replsets/rollback_empty_o2.js
@@ -23,12 +23,14 @@ var name = "rollback_empty_o2";
var replTest = new ReplSetTest({name: name, nodes: 3});
var nodes = replTest.nodeList();
var conns = replTest.startSet();
-replTest.initiate({"_id": name,
- "members": [
- { "_id": 0, "host": nodes[0], priority: 3 },
- { "_id": 1, "host": nodes[1] },
- { "_id": 2, "host": nodes[2], arbiterOnly: true}]
- });
+replTest.initiate({
+ "_id": name,
+ "members": [
+ {"_id": 0, "host": nodes[0], priority: 3},
+ {"_id": 1, "host": nodes[1]},
+ {"_id": 2, "host": nodes[2], arbiterOnly: true}
+ ]
+});
var a_conn = conns[0];
var b_conn = conns[1];
var AID = replTest.getNodeId(a_conn);
@@ -40,7 +42,10 @@ replTest.waitForState(replTest.nodes[0], ReplSetTest.State.PRIMARY, 60 * 1000);
var master = replTest.getPrimary();
assert(master === conns[0], "conns[0] assumed to be master");
assert(a_conn.host === master.host, "a_conn assumed to be master");
-var options = {writeConcern: {w: 2, wtimeout: 60000}, upsert: true};
+var options = {
+ writeConcern: {w: 2, wtimeout: 60000},
+ upsert: true
+};
assert.writeOK(a_conn.getDB(name).foo.insert({x: 1}, options));
// shut down master
@@ -49,7 +54,10 @@ replTest.stop(AID);
// insert a fake oplog entry with an empty o2
master = replTest.getPrimary();
assert(b_conn.host === master.host, "b_conn assumed to be master");
-options = {writeConcern: {w: 1, wtimeout: 60000}, upsert: true};
+options = {
+ writeConcern: {w: 1, wtimeout: 60000},
+ upsert: true
+};
// another insert to set minvalid ahead
assert.writeOK(b_conn.getDB(name).foo.insert({x: 123}));
var oplog_entry = b_conn.getDB("local").oplog.rs.find().sort({$natural: -1})[0];
@@ -65,7 +73,10 @@ master = replTest.getPrimary();
assert(a_conn.host === master.host, "a_conn assumed to be master");
// do a write so that B will have to roll back
-options = {writeConcern: {w: 1, wtimeout: 60000}, upsert: true};
+options = {
+ writeConcern: {w: 1, wtimeout: 60000},
+ upsert: true
+};
assert.writeOK(a_conn.getDB(name).foo.insert({x: 2}, options));
// restart B, which should rollback and log a message about not rolling back empty o2'd oplog entry
@@ -75,8 +86,7 @@ assert.soon(function() {
try {
var log = b_conn.getDB("admin").adminCommand({getLog: "global"}).log;
return doesEntryMatch(log, msg);
- }
- catch (e) {
+ } catch (e) {
return false;
}
}, "Did not see a log entry about skipping the empty o2'd oplog entry during rollback");
diff --git a/jstests/replsets/rollback_fake_cmd.js b/jstests/replsets/rollback_fake_cmd.js
index b624a8ea80e..175359121f8 100644
--- a/jstests/replsets/rollback_fake_cmd.js
+++ b/jstests/replsets/rollback_fake_cmd.js
@@ -23,12 +23,14 @@ var name = "rollback_fake_cmd";
var replTest = new ReplSetTest({name: name, nodes: 3});
var nodes = replTest.nodeList();
var conns = replTest.startSet();
-replTest.initiate({"_id": name,
- "members": [
- { "_id": 0, "host": nodes[0], priority: 3 },
- { "_id": 1, "host": nodes[1] },
- { "_id": 2, "host": nodes[2], arbiterOnly: true}]
- });
+replTest.initiate({
+ "_id": name,
+ "members": [
+ {"_id": 0, "host": nodes[0], priority: 3},
+ {"_id": 1, "host": nodes[1]},
+ {"_id": 2, "host": nodes[2], arbiterOnly: true}
+ ]
+});
var a_conn = conns[0];
var b_conn = conns[1];
var AID = replTest.getNodeId(a_conn);
@@ -40,7 +42,10 @@ replTest.waitForState(replTest.nodes[0], ReplSetTest.State.PRIMARY, 60 * 1000);
var master = replTest.getPrimary();
assert(master === conns[0], "conns[0] assumed to be master");
assert(a_conn.host === master.host, "a_conn assumed to be master");
-var options = {writeConcern: {w: 2, wtimeout: 60000}, upsert: true};
+var options = {
+ writeConcern: {w: 2, wtimeout: 60000},
+ upsert: true
+};
assert.writeOK(a_conn.getDB(name).foo.insert({x: 1}, options));
// shut down master
@@ -49,13 +54,18 @@ replTest.stop(AID);
// insert a fake oplog entry with a nonexistent command
master = replTest.getPrimary();
assert(b_conn.host === master.host, "b_conn assumed to be master");
-options = {writeConcern: {w: 1, wtimeout: 60000}, upsert: true};
+options = {
+ writeConcern: {w: 1, wtimeout: 60000},
+ upsert: true
+};
// another insert to set minvalid ahead
assert.writeOK(b_conn.getDB(name).foo.insert({x: 123}));
var oplog_entry = b_conn.getDB("local").oplog.rs.find().sort({$natural: -1})[0];
oplog_entry.ts = Timestamp(oplog_entry.ts.t, oplog_entry.ts.i + 1);
oplog_entry.op = "c";
-oplog_entry.o = {fake_command_name: 1};
+oplog_entry.o = {
+ fake_command_name: 1
+};
assert.writeOK(b_conn.getDB("local").oplog.rs.insert(oplog_entry));
jsTestLog('inserted oplog entry with invalid command: ' + tojson(oplog_entry));
@@ -66,7 +76,10 @@ master = replTest.getPrimary();
assert(a_conn.host === master.host, "a_conn assumed to be master");
// do a write so that B will have to roll back
-options = {writeConcern: {w: 1, wtimeout: 60000}, upsert: true};
+options = {
+ writeConcern: {w: 1, wtimeout: 60000},
+ upsert: true
+};
assert.writeOK(a_conn.getDB(name).foo.insert({x: 2}, options));
// restart B, which should rollback and log a message about not rolling back the nonexistent cmd
@@ -77,4 +90,4 @@ assert.soon(function() {
return rawMongoProgramOutput().match(msg);
}, "Did not see a log entry about skipping the nonexistent command during rollback");
-replTest.stopSet(undefined, undefined, { allowedExitCodes: [ MongoRunner.EXIT_ABRUPT ] });
+replTest.stopSet(undefined, undefined, {allowedExitCodes: [MongoRunner.EXIT_ABRUPT]});
diff --git a/jstests/replsets/rollback_index.js b/jstests/replsets/rollback_index.js
index 1a3197f9ed0..6fb3044b740 100644
--- a/jstests/replsets/rollback_index.js
+++ b/jstests/replsets/rollback_index.js
@@ -25,12 +25,14 @@ var name = "rollback_index";
var replTest = new ReplSetTest({name: name, nodes: 3});
var nodes = replTest.nodeList();
var conns = replTest.startSet();
-replTest.initiate({"_id": name,
- "members": [
- { "_id": 0, "host": nodes[0], priority: 3 },
- { "_id": 1, "host": nodes[1] },
- { "_id": 2, "host": nodes[2], arbiterOnly: true}]
- });
+replTest.initiate({
+ "_id": name,
+ "members": [
+ {"_id": 0, "host": nodes[0], priority: 3},
+ {"_id": 1, "host": nodes[1]},
+ {"_id": 2, "host": nodes[2], arbiterOnly: true}
+ ]
+});
var a_conn = conns[0];
var b_conn = conns[1];
var AID = replTest.getNodeId(a_conn);
@@ -42,7 +44,10 @@ replTest.waitForState(replTest.nodes[0], ReplSetTest.State.PRIMARY, 60 * 1000);
var master = replTest.getPrimary();
assert(master === conns[0], "conns[0] assumed to be master");
assert(a_conn.host === master.host, "a_conn assumed to be master");
-var options = {writeConcern: {w: 2, wtimeout: 60000}, upsert: true};
+var options = {
+ writeConcern: {w: 2, wtimeout: 60000},
+ upsert: true
+};
assert.writeOK(a_conn.getDB(name).foo.insert({x: 1}, options));
// shut down master
@@ -52,7 +57,10 @@ replTest.stop(AID);
// cause errors when applying operations from the primary.
master = replTest.getPrimary();
assert(b_conn.host === master.host, "b_conn assumed to be master");
-options = {writeConcern: {w: 1, wtimeout: 60000}, upsert: true};
+options = {
+ writeConcern: {w: 1, wtimeout: 60000},
+ upsert: true
+};
// another insert to set minvalid ahead
assert.writeOK(b_conn.getDB(name).foo.insert({x: 123}));
assert.commandWorked(b_conn.getDB(name).foo.ensureIndex({x: 1}, {unique: true}));
@@ -66,7 +74,9 @@ assert(a_conn.host === master.host, "a_conn assumed to be master");
// Insert a document with the same value for 'x' that should be
// propagated successfully to B if the unique index was dropped successfully.
-options = {writeConcern: {w: 1, wtimeout: 60000}};
+options = {
+ writeConcern: {w: 1, wtimeout: 60000}
+};
assert.writeOK(a_conn.getDB(name).foo.insert({x: 1}, options));
assert.eq(2, a_conn.getDB(name).foo.count(), 'invalid number of documents on A');
@@ -78,14 +88,18 @@ replTest.awaitReplication();
replTest.awaitSecondaryNodes();
// Perform a write that should succeed if there's no unique index on B.
-options = {writeConcern: {w: 'majority', wtimeout: 60000}};
+options = {
+ writeConcern: {w: 'majority', wtimeout: 60000}
+};
assert.writeOK(a_conn.getDB(name).foo.insert({x: 1}, options));
// Check collections and indexes.
-assert.eq(3, b_conn.getDB(name).foo.count(),
+assert.eq(3,
+ b_conn.getDB(name).foo.count(),
'Collection on B does not have the same number of documents as A');
-assert.eq(a_conn.getDB(name).foo.getIndexes().length, b_conn.getDB(name).foo.getIndexes().length,
+assert.eq(a_conn.getDB(name).foo.getIndexes().length,
+ b_conn.getDB(name).foo.getIndexes().length,
'Unique index not dropped during rollback: ' +
- tojson(b_conn.getDB(name).foo.getIndexes()));
+ tojson(b_conn.getDB(name).foo.getIndexes()));
replTest.stopSet();
diff --git a/jstests/replsets/rollback_too_new.js b/jstests/replsets/rollback_too_new.js
index 8f2d43bc8d7..e0a88e12f31 100644
--- a/jstests/replsets/rollback_too_new.js
+++ b/jstests/replsets/rollback_too_new.js
@@ -14,27 +14,31 @@
var replTest = new ReplSetTest({name: name, nodes: 3});
var nodes = replTest.nodeList();
var conns = replTest.startSet();
- replTest.initiate({"_id": name,
- "members": [
- { "_id": 0, "host": nodes[0] },
- { "_id": 1, "host": nodes[1], arbiterOnly: true },
- { "_id": 2, "host": nodes[2], priority: 0 }],
- "settings": {
- "chainingAllowed": false
- }
- });
+ replTest.initiate({
+ "_id": name,
+ "members": [
+ {"_id": 0, "host": nodes[0]},
+ {"_id": 1, "host": nodes[1], arbiterOnly: true},
+ {"_id": 2, "host": nodes[2], priority: 0}
+ ],
+ "settings": {"chainingAllowed": false}
+ });
var c_conn = conns[2];
var CID = replTest.getNodeId(c_conn);
// get master and do an initial write
var master = replTest.getPrimary();
- var options = {writeConcern: {w: 2, wtimeout: 60000}};
+ var options = {
+ writeConcern: {w: 2, wtimeout: 60000}
+ };
assert.writeOK(master.getDB(name).foo.insert({x: 1}, options));
// add an oplog entry from the distant future as the most recent entry on node C
var future_oplog_entry = conns[2].getDB("local").oplog.rs.find().sort({$natural: -1})[0];
future_oplog_entry["ts"] = new Timestamp(future_oplog_entry["ts"].getTime() + 200000, 1);
- options = {writeConcern: {w: 1, wtimeout: 60000}};
+ options = {
+ writeConcern: {w: 1, wtimeout: 60000}
+ };
assert.writeOK(conns[2].getDB("local").oplog.rs.insert(future_oplog_entry, options));
replTest.stop(CID);
@@ -56,6 +60,6 @@
}
}, "node C failed to fassert", 60 * 1000);
- replTest.stopSet(undefined, undefined, { allowedExitCodes: [ MongoRunner.EXIT_ABRUPT ] });
+ replTest.stopSet(undefined, undefined, {allowedExitCodes: [MongoRunner.EXIT_ABRUPT]});
}());
diff --git a/jstests/replsets/rslib.js b/jstests/replsets/rslib.js
index a9c3024b51e..b992464682d 100644
--- a/jstests/replsets/rslib.js
+++ b/jstests/replsets/rslib.js
@@ -8,207 +8,217 @@ var awaitOpTime;
var startSetIfSupportsReadMajority;
var waitUntilAllNodesCaughtUp;
-(function () {
-"use strict";
-var count = 0;
-var w = 0;
-
-wait = function(f,msg) {
- w++;
- var n = 0;
- while (!f()) {
- if( n % 4 == 0 )
- print("waiting " + w);
- if (++n == 4) {
- print("" + f);
- }
- if (n >= 200) {
- throw new Error('tried 200 times, giving up on ' + msg);
- }
- sleep(1000);
- }
-};
-
-/**
- * Use this to do something once every 4 iterations.
- *
- * <pre>
- * for (i=0; i<1000; i++) {
- * occasionally(function() { print("4 more iterations"); });
- * }
- * </pre>
- */
-occasionally = function(f, n) {
- var interval = n || 4;
- if (count % interval == 0) {
- f();
- }
- count++;
-};
-
-reconnect = function(a) {
- wait(function() {
- var db;
- try {
- // make this work with either dbs or connections
- if (typeof(a.getDB) == "function") {
- db = a.getDB('foo');
- }
- else {
- db = a;
- }
- db.bar.stats();
- if (jsTest.options().keyFile) { // SERVER-4241: Shell connections don't re-authenticate on reconnect
- return jsTest.authenticate(db.getMongo());
+(function() {
+ "use strict";
+ var count = 0;
+ var w = 0;
+
+ wait = function(f, msg) {
+ w++;
+ var n = 0;
+ while (!f()) {
+ if (n % 4 == 0)
+ print("waiting " + w);
+ if (++n == 4) {
+ print("" + f);
+ }
+ if (n >= 200) {
+ throw new Error('tried 200 times, giving up on ' + msg);
+ }
+ sleep(1000);
}
- return true;
- } catch(e) {
- print(e);
- return false;
- }
- });
-};
-
-
-getLatestOp = function(server) {
- server.getDB("admin").getMongo().setSlaveOk();
- var log = server.getDB("local")['oplog.rs'];
- var cursor = log.find({}).sort({'$natural': -1}).limit(1);
- if (cursor.hasNext()) {
- return cursor.next();
- }
- return null;
-};
-
-
-waitForAllMembers = function(master, timeout) {
- var failCount = 0;
-
- assert.soon( function() {
- var state = null;
- try {
- state = master.getSisterDB("admin").runCommand({replSetGetStatus:1});
- failCount = 0;
- } catch ( e ) {
- // Connection can get reset on replica set failover causing a socket exception
- print( "Calling replSetGetStatus failed" );
- print( e );
- return false;
+ };
+
+ /**
+ * Use this to do something once every 4 iterations.
+ *
+ * <pre>
+ * for (i=0; i<1000; i++) {
+ * occasionally(function() { print("4 more iterations"); });
+ * }
+ * </pre>
+ */
+ occasionally = function(f, n) {
+ var interval = n || 4;
+ if (count % interval == 0) {
+ f();
}
- occasionally(function() { printjson(state); }, 10);
-
- for (var m in state.members) {
- if (state.members[m].state != 1 && // PRIMARY
- state.members[m].state != 2 && // SECONDARY
- state.members[m].state != 7) { // ARBITER
+ count++;
+ };
+
+ reconnect = function(a) {
+ wait(function() {
+ var db;
+ try {
+ // make this work with either dbs or connections
+ if (typeof(a.getDB) == "function") {
+ db = a.getDB('foo');
+ } else {
+ db = a;
+ }
+ db.bar.stats();
+ if (jsTest.options().keyFile) { // SERVER-4241: Shell connections don't
+ // re-authenticate on reconnect
+ return jsTest.authenticate(db.getMongo());
+ }
+ return true;
+ } catch (e) {
+ print(e);
return false;
}
+ });
+ };
+
+ getLatestOp = function(server) {
+ server.getDB("admin").getMongo().setSlaveOk();
+ var log = server.getDB("local")['oplog.rs'];
+ var cursor = log.find({}).sort({'$natural': -1}).limit(1);
+ if (cursor.hasNext()) {
+ return cursor.next();
}
- printjson( state );
- return true;
- }, "not all members ready", timeout || 60000);
-
- print( "All members are now in state PRIMARY, SECONDARY, or ARBITER" );
-};
-
-reconfig = function(rs, config, force) {
- "use strict";
- var admin = rs.getPrimary().getDB("admin");
- var e;
- var master;
- try {
- assert.commandWorked(admin.runCommand({replSetReconfig: config, force: force}));
- }
- catch (e) {
- if (tojson(e).indexOf( "error doing query: failed" ) < 0) {
- throw e;
- }
- }
-
- var master = rs.getPrimary().getDB("admin");
- waitForAllMembers(master);
-
- return master;
-};
-
-awaitOpTime = function (node, opTime) {
- var ts, ex;
- assert.soon(function () {
+ return null;
+ };
+
+ waitForAllMembers = function(master, timeout) {
+ var failCount = 0;
+
+ assert.soon(function() {
+ var state = null;
+ try {
+ state = master.getSisterDB("admin").runCommand({replSetGetStatus: 1});
+ failCount = 0;
+ } catch (e) {
+ // Connection can get reset on replica set failover causing a socket exception
+ print("Calling replSetGetStatus failed");
+ print(e);
+ return false;
+ }
+ occasionally(function() {
+ printjson(state);
+ }, 10);
+
+ for (var m in state.members) {
+ if (state.members[m].state != 1 && // PRIMARY
+ state.members[m].state != 2 && // SECONDARY
+ state.members[m].state != 7) { // ARBITER
+ return false;
+ }
+ }
+ printjson(state);
+ return true;
+ }, "not all members ready", timeout || 60000);
+
+ print("All members are now in state PRIMARY, SECONDARY, or ARBITER");
+ };
+
+ reconfig = function(rs, config, force) {
+ "use strict";
+ var admin = rs.getPrimary().getDB("admin");
+ var e;
+ var master;
try {
- // The following statement extracts the timestamp field from the most recent element of
- // the oplog, and stores it in "ts".
- ts = node.getDB("local")['oplog.rs'].find({}).sort({'$natural': -1}).limit(1).next().ts;
- if ((ts.t == opTime.t) && (ts.i == opTime.i)) {
- return true;
+ assert.commandWorked(admin.runCommand({replSetReconfig: config, force: force}));
+ } catch (e) {
+ if (tojson(e).indexOf("error doing query: failed") < 0) {
+ throw e;
}
- ex = null;
- return false;
}
- catch (ex) {
- return false;
- }
- }, function () {
- var message = "Node " + node + " only reached optime " + tojson(ts) + " not " +
- tojson(opTime);
- if (ex) {
- message += "; last attempt failed with exception " + tojson(ex);
- }
- return message;
- });
-};
-
-/**
- * Uses the results of running replSetGetStatus against an arbitrary replset node to wait until
- * all nodes in the set are replicated through the same optime.
- * 'rs' is an array of connections to replica set nodes. This function is useful when you
- * don't have a ReplSetTest object to use, otherwise ReplSetTest.awaitReplication is preferred.
- */
-waitUntilAllNodesCaughtUp = function(rs, timeout) {
- var rsStatus;
- var firstConflictingIndex;
- var ot;
- var otherOt;
- assert.soon(function () {
- rsStatus = rs[0].adminCommand('replSetGetStatus');
- if (rsStatus.ok != 1) {
- return false;
- }
- assert.eq(rs.length, rsStatus.members.length, tojson(rsStatus));
- ot = rsStatus.members[0].optime;
- for (var i = 1; i < rsStatus.members.length; ++i) {
- otherOt = rsStatus.members[i].optime;
- if (bsonWoCompare({ts: otherOt.ts}, {ts: ot.ts}) ||
- bsonWoCompare({t: otherOt.t}, {t: ot.t})) {
- firstConflictingIndex = i;
+
+ var master = rs.getPrimary().getDB("admin");
+ waitForAllMembers(master);
+
+ return master;
+ };
+
+ awaitOpTime = function(node, opTime) {
+ var ts, ex;
+ assert.soon(
+ function() {
+ try {
+ // The following statement extracts the timestamp field from the most recent
+ // element of
+ // the oplog, and stores it in "ts".
+ ts = node.getDB("local")['oplog.rs']
+ .find({})
+ .sort({'$natural': -1})
+ .limit(1)
+ .next()
+ .ts;
+ if ((ts.t == opTime.t) && (ts.i == opTime.i)) {
+ return true;
+ }
+ ex = null;
+ return false;
+ } catch (ex) {
+ return false;
+ }
+ },
+ function() {
+ var message = "Node " + node + " only reached optime " + tojson(ts) + " not " +
+ tojson(opTime);
+ if (ex) {
+ message += "; last attempt failed with exception " + tojson(ex);
+ }
+ return message;
+ });
+ };
+
+ /**
+ * Uses the results of running replSetGetStatus against an arbitrary replset node to wait until
+ * all nodes in the set are replicated through the same optime.
+ * 'rs' is an array of connections to replica set nodes. This function is useful when you
+ * don't have a ReplSetTest object to use, otherwise ReplSetTest.awaitReplication is preferred.
+ */
+ waitUntilAllNodesCaughtUp = function(rs, timeout) {
+ var rsStatus;
+ var firstConflictingIndex;
+ var ot;
+ var otherOt;
+ assert.soon(
+ function() {
+ rsStatus = rs[0].adminCommand('replSetGetStatus');
+ if (rsStatus.ok != 1) {
+ return false;
+ }
+ assert.eq(rs.length, rsStatus.members.length, tojson(rsStatus));
+ ot = rsStatus.members[0].optime;
+ for (var i = 1; i < rsStatus.members.length; ++i) {
+ otherOt = rsStatus.members[i].optime;
+ if (bsonWoCompare({ts: otherOt.ts}, {ts: ot.ts}) ||
+ bsonWoCompare({t: otherOt.t}, {t: ot.t})) {
+ firstConflictingIndex = i;
+ return false;
+ }
+ }
+ return true;
+ },
+ function() {
+ return "Optimes of members 0 (" + tojson(ot) + ") and " + firstConflictingIndex +
+ " (" + tojson(otherOt) + ") are different in " + tojson(rsStatus);
+ },
+ timeout);
+ };
+
+ /**
+ * Starts each node in the given replica set if the storage engine supports readConcern
+ *'majority'.
+ * Returns true if the replica set was started successfully and false otherwise.
+ *
+ * @param replSetTest - The instance of {@link ReplSetTest} to start
+ * @param options - The options passed to {@link ReplSetTest.startSet}
+ */
+ startSetIfSupportsReadMajority = function(replSetTest, options) {
+ try {
+ replSetTest.startSet(options);
+ } catch (e) {
+ var conn = MongoRunner.runMongod();
+ if (!conn.getDB("admin").serverStatus().storageEngine.supportsCommittedReads) {
+ MongoRunner.stopMongod(conn);
return false;
}
+ throw e;
}
return true;
- }, function () {
- return "Optimes of members 0 (" + tojson(ot) + ") and " + firstConflictingIndex + " (" +
- tojson(otherOt) + ") are different in " + tojson(rsStatus);
- }, timeout);
-};
-
-/**
- * Starts each node in the given replica set if the storage engine supports readConcern 'majority'.
- * Returns true if the replica set was started successfully and false otherwise.
- *
- * @param replSetTest - The instance of {@link ReplSetTest} to start
- * @param options - The options passed to {@link ReplSetTest.startSet}
- */
-startSetIfSupportsReadMajority = function (replSetTest, options) {
- try {
- replSetTest.startSet(options);
- } catch (e) {
- var conn = MongoRunner.runMongod();
- if (!conn.getDB("admin").serverStatus().storageEngine.supportsCommittedReads) {
- MongoRunner.stopMongod(conn);
- return false;
- }
- throw e;
- }
- return true;
-};
+ };
}());
diff --git a/jstests/replsets/server8070.js b/jstests/replsets/server8070.js
index 2b29a7592d0..e91e95e99a4 100644
--- a/jstests/replsets/server8070.js
+++ b/jstests/replsets/server8070.js
@@ -5,37 +5,27 @@
// helper to ensure two nodes are at the same place in the oplog
var waitForSameOplogPosition = function(db1, db2, errmsg) {
- assert.soon(
- function() {
- var last1 = db1.getSisterDB("local").oplog.rs.find().sort({$natural:-1}).limit(1)
- .next();
- var last2 = db2.getSisterDB("local").oplog.rs.find().sort({$natural:-1}).limit(1)
- .next();
- jsTest.log("primary: " + tojson(last1) + " secondary: " + tojson(last2));
-
- return ((last1.ts.t === last2.ts.t) && (last1.ts.i === last2.ts.i));
- },
- errmsg
- );
+ assert.soon(function() {
+ var last1 = db1.getSisterDB("local").oplog.rs.find().sort({$natural: -1}).limit(1).next();
+ var last2 = db2.getSisterDB("local").oplog.rs.find().sort({$natural: -1}).limit(1).next();
+ jsTest.log("primary: " + tojson(last1) + " secondary: " + tojson(last2));
+
+ return ((last1.ts.t === last2.ts.t) && (last1.ts.i === last2.ts.i));
+ }, errmsg);
};
// start set
var replSet = new ReplSetTest({name: 'testSet', nodes: 3});
replSet.startSet();
-replSet.initiate(
- {
- _id:'testSet',
- members:
- [
- {_id: 0, host: getHostName()+":"+replSet.ports[0]},
- {_id: 1, host: getHostName()+":"+replSet.ports[1], priority: 0},
- {_id: 2, host: getHostName()+":"+replSet.ports[2], priority: 0}
- ],
- settings: {
- chainingAllowed: false
- }
- }
-);
+replSet.initiate({
+ _id: 'testSet',
+ members: [
+ {_id: 0, host: getHostName() + ":" + replSet.ports[0]},
+ {_id: 1, host: getHostName() + ":" + replSet.ports[1], priority: 0},
+ {_id: 2, host: getHostName() + ":" + replSet.ports[2], priority: 0}
+ ],
+ settings: {chainingAllowed: false}
+});
// set up common points of access
var master = replSet.getPrimary();
@@ -46,12 +36,12 @@ var member2 = replSet.nodes[1].getDB("admin");
var member3 = replSet.nodes[2].getDB("admin");
// Do an initial write
-master.getDB("foo").bar.insert({x:1});
+master.getDB("foo").bar.insert({x: 1});
replSet.awaitReplication();
jsTest.log("Make sure 2 & 3 are syncing from the primary");
-member2.adminCommand({replSetSyncFrom : getHostName()+":"+replSet.ports[0]});
-member3.adminCommand({replSetSyncFrom : getHostName()+":"+replSet.ports[0]});
+member2.adminCommand({replSetSyncFrom: getHostName() + ":" + replSet.ports[0]});
+member3.adminCommand({replSetSyncFrom: getHostName() + ":" + replSet.ports[0]});
jsTest.log("Stop 2's replication");
member2.runCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'alwaysOn'});
@@ -69,7 +59,7 @@ jsTest.log("Stop 3's replication");
member3.runCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'alwaysOn'});
// logLevel 3 will allow us to see each op the secondary pulls from the primary so that we can
// determine whether or not all ops are actually being pulled
-member3.runCommand({setParameter: 1, logLevel:3});
+member3.runCommand({setParameter: 1, logLevel: 3});
jsTest.log("Start 2's replication");
member2.runCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'off'});
@@ -92,7 +82,7 @@ for (var i = 50; i < 75; i++) {
}
var primaryCollectionSize = primary.bar.find().itcount();
jsTest.log("primary collection size: " + primaryCollectionSize);
-var last = primary.getSisterDB("local").oplog.rs.find().sort({$natural:-1}).limit(1).next();
+var last = primary.getSisterDB("local").oplog.rs.find().sort({$natural: -1}).limit(1).next();
jsTest.log("waiting a bit for the secondaries to get the write");
sleep(10000);
@@ -105,43 +95,40 @@ replSet.stop(0);
// which would check for 30 seconds that node 3 didn't try to sync from 2
sleep(30 * 1000);
jsTest.log("3 should not attempt to sync from 2, as it cannot clear its buffer");
-var syncingTo = member3.adminCommand({replSetGetStatus:1}).syncingTo;
-assert(syncingTo !== getHostName()+":"+replSet.ports[1], "node 3 is syncing from node 2 :(");
+var syncingTo = member3.adminCommand({replSetGetStatus: 1}).syncingTo;
+assert(syncingTo !== getHostName() + ":" + replSet.ports[1], "node 3 is syncing from node 2 :(");
jsTest.log("Pause 3's bgsync thread");
var rsBgSyncProduceResult3 =
- member3.runCommand({configureFailPoint: 'rsBgSyncProduce', mode: 'alwaysOn'});
+ member3.runCommand({configureFailPoint: 'rsBgSyncProduce', mode: 'alwaysOn'});
assert.eq(1, rsBgSyncProduceResult3.ok, "member 3 rsBgSyncProduce admin command failed");
// count documents in member 3
-assert.eq(26, member3.getSisterDB("foo").bar.find().itcount(),
+assert.eq(26,
+ member3.getSisterDB("foo").bar.find().itcount(),
"collection size incorrect on node 3 before applying ops 25-75");
jsTest.log("Allow 3 to apply ops 25-75");
var rsSyncApplyStopResult3 =
- member3.runCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'off'});
+ member3.runCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'off'});
assert.eq(1, rsSyncApplyStopResult3.ok, "member 3 rsSyncApplyStop admin command failed");
-assert.soon(
- function() {
- var last3 = member3.getSisterDB("local").oplog.rs.find().sort({$natural:-1}).limit(1)
- .next();
- jsTest.log("primary: " + tojson(last, '', true) + " secondary: " + tojson(last3, '', true));
- jsTest.log("member 3 collection size: " + member3.getSisterDB("foo").bar.find().itcount());
- jsTest.log("curop: ");
- printjson(member3.getSisterDB("foo").currentOp(true));
- return ((last.ts.t === last3.ts.t) && (last.ts.i === last3.ts.i));
- },
- "Replication member 3 did not apply ops 25-75"
-);
+assert.soon(function() {
+ var last3 = member3.getSisterDB("local").oplog.rs.find().sort({$natural: -1}).limit(1).next();
+ jsTest.log("primary: " + tojson(last, '', true) + " secondary: " + tojson(last3, '', true));
+ jsTest.log("member 3 collection size: " + member3.getSisterDB("foo").bar.find().itcount());
+ jsTest.log("curop: ");
+ printjson(member3.getSisterDB("foo").currentOp(true));
+ return ((last.ts.t === last3.ts.t) && (last.ts.i === last3.ts.i));
+}, "Replication member 3 did not apply ops 25-75");
jsTest.log("Start 3's bgsync thread");
member3.runCommand({configureFailPoint: 'rsBgSyncProduce', mode: 'off'});
jsTest.log("Node 3 shouldn't hit rollback");
-var end = (new Date()).getTime()+10000;
+var end = (new Date()).getTime() + 10000;
while ((new Date()).getTime() < end) {
- assert('ROLLBACK' !== member3.runCommand({replSetGetStatus:1}).members[2].stateStr);
+ assert('ROLLBACK' !== member3.runCommand({replSetGetStatus: 1}).members[2].stateStr);
sleep(30);
}
diff --git a/jstests/replsets/server_status_metrics.js b/jstests/replsets/server_status_metrics.js
index 46add3f0cd2..553077cdc99 100644
--- a/jstests/replsets/server_status_metrics.js
+++ b/jstests/replsets/server_status_metrics.js
@@ -21,7 +21,7 @@ function testSecondaryMetrics(secondary, opCount, offset) {
assert(ss.metrics.repl.buffer.maxSizeBytes >= 0, "maxSize (bytes) missing");
assert(ss.metrics.repl.preload.docs.num >= 0, "preload.docs num missing");
- assert(ss.metrics.repl.preload.docs.totalMillis >= 0, "preload.docs time missing");
+ assert(ss.metrics.repl.preload.docs.totalMillis >= 0, "preload.docs time missing");
assert(ss.metrics.repl.preload.docs.num >= 0, "preload.indexes num missing");
assert(ss.metrics.repl.preload.indexes.totalMillis >= 0, "preload.indexes time missing");
@@ -30,7 +30,7 @@ function testSecondaryMetrics(secondary, opCount, offset) {
assert.eq(ss.metrics.repl.apply.ops, opCount + offset, "wrong number of applied ops");
}
-var rt = new ReplSetTest( { name : "server_status_metrics" , nodes: 2, oplogSize: 100 } );
+var rt = new ReplSetTest({name: "server_status_metrics", nodes: 2, oplogSize: 100});
rt.startSet();
rt.initiate();
@@ -41,24 +41,28 @@ var primary = rt.getPrimary();
var testDB = primary.getDB("test");
assert.commandWorked(testDB.createCollection('a'));
-assert.writeOK(testDB.b.insert({}, { writeConcern: { w: 2 }}));
+assert.writeOK(testDB.b.insert({}, {writeConcern: {w: 2}}));
var ss = secondary.getDB("test").serverStatus();
var secondaryBaseOplogInserts = ss.metrics.repl.apply.ops;
-//add test docs
+// add test docs
var bulk = testDB.a.initializeUnorderedBulkOp();
-for(x = 0; x < 1000; x++) {
+for (x = 0; x < 1000; x++) {
bulk.insert({});
}
-assert.writeOK(bulk.execute({ w: 2 }));
+assert.writeOK(bulk.execute({w: 2}));
-testSecondaryMetrics(secondary, 1000, secondaryBaseOplogInserts );
+testSecondaryMetrics(secondary, 1000, secondaryBaseOplogInserts);
-var options = { writeConcern: { w: 2 }, multi: true, upsert: true };
-assert.writeOK(testDB.a.update({}, { $set: { d: new Date() }}, options));
+var options = {
+ writeConcern: {w: 2},
+ multi: true,
+ upsert: true
+};
+assert.writeOK(testDB.a.update({}, {$set: {d: new Date()}}, options));
-testSecondaryMetrics(secondary, 2000, secondaryBaseOplogInserts );
+testSecondaryMetrics(secondary, 2000, secondaryBaseOplogInserts);
// Test getLastError.wtime and that it only records stats for w > 1, see SERVER-9005
var startMillis = testDB.serverStatus().metrics.getLastError.wtime.totalMillis;
@@ -66,20 +70,20 @@ var startNum = testDB.serverStatus().metrics.getLastError.wtime.num;
printjson(primary.getDB("test").serverStatus().metrics);
-assert.writeOK(testDB.a.insert({ x: 1 }, { writeConcern: { w: 1, wtimeout: 5000 }}));
+assert.writeOK(testDB.a.insert({x: 1}, {writeConcern: {w: 1, wtimeout: 5000}}));
assert.eq(testDB.serverStatus().metrics.getLastError.wtime.totalMillis, startMillis);
assert.eq(testDB.serverStatus().metrics.getLastError.wtime.num, startNum);
-assert.writeOK(testDB.a.insert({ x: 1 }, { writeConcern: { w: -11, wtimeout: 5000 }}));
+assert.writeOK(testDB.a.insert({x: 1}, {writeConcern: {w: -11, wtimeout: 5000}}));
assert.eq(testDB.serverStatus().metrics.getLastError.wtime.totalMillis, startMillis);
assert.eq(testDB.serverStatus().metrics.getLastError.wtime.num, startNum);
-assert.writeOK(testDB.a.insert({ x: 1 }, { writeConcern: { w: 2, wtimeout: 5000 }}));
+assert.writeOK(testDB.a.insert({x: 1}, {writeConcern: {w: 2, wtimeout: 5000}}));
assert(testDB.serverStatus().metrics.getLastError.wtime.totalMillis >= startMillis);
assert.eq(testDB.serverStatus().metrics.getLastError.wtime.num, startNum + 1);
// Write will fail because there are only 2 nodes
-assert.writeError(testDB.a.insert({ x: 1 }, { writeConcern: { w: 3, wtimeout: 50 }}));
+assert.writeError(testDB.a.insert({x: 1}, {writeConcern: {w: 3, wtimeout: 50}}));
assert.eq(testDB.serverStatus().metrics.getLastError.wtime.num, startNum + 2);
printjson(primary.getDB("test").serverStatus().metrics);
diff --git a/jstests/replsets/server_status_repl.js b/jstests/replsets/server_status_repl.js
index 787cd2356c0..c00fcc8818a 100644
--- a/jstests/replsets/server_status_repl.js
+++ b/jstests/replsets/server_status_repl.js
@@ -1,4 +1,4 @@
-var rt = new ReplSetTest( { name : "server_status_repl" , nodes: 2} );
+var rt = new ReplSetTest({name: "server_status_repl", nodes: 2});
rt.startSet();
rt.initiate();
@@ -9,9 +9,9 @@ var primary = rt.getPrimary();
var testDB = primary.getDB("test");
assert.commandWorked(testDB.createCollection('a'));
-assert.writeOK(testDB.b.insert({}, { writeConcern: { w: 2 }}));
+assert.writeOK(testDB.b.insert({}, {writeConcern: {w: 2}}));
-var ss = primary.getDB("test").serverStatus({repl:1});
+var ss = primary.getDB("test").serverStatus({repl: 1});
assert.neq(ss.repl.replicationProgress, null, tojson(ss.repl));
rt.stopSet(); \ No newline at end of file
diff --git a/jstests/replsets/single_server_majority.js b/jstests/replsets/single_server_majority.js
index 3e3a6dc7bbf..c36f021c989 100644
--- a/jstests/replsets/single_server_majority.js
+++ b/jstests/replsets/single_server_majority.js
@@ -9,4 +9,4 @@ col = db.getCollection("single_server_majority");
col.drop();
// see if we can get a majority write on this single server
-assert.writeOK(col.save({ a: "test" }, { writeConcern: { w: 'majority' }})); \ No newline at end of file
+assert.writeOK(col.save({a: "test"}, {writeConcern: {w: 'majority'}})); \ No newline at end of file
diff --git a/jstests/replsets/sized_zero_capped.js b/jstests/replsets/sized_zero_capped.js
index 149cbaaf1af..41debd6d17c 100644
--- a/jstests/replsets/sized_zero_capped.js
+++ b/jstests/replsets/sized_zero_capped.js
@@ -8,12 +8,14 @@
var replTest = new ReplSetTest({name: name, nodes: 3});
var nodes = replTest.nodeList();
replTest.startSet();
- replTest.initiate({"_id": name,
- "members": [
- { "_id": 0, "host": nodes[0], priority: 3 },
- { "_id": 1, "host": nodes[1], priority: 0 },
- { "_id": 2, "host": nodes[2], priority: 0 }]
- });
+ replTest.initiate({
+ "_id": name,
+ "members": [
+ {"_id": 0, "host": nodes[0], priority: 3},
+ {"_id": 1, "host": nodes[1], priority: 0},
+ {"_id": 2, "host": nodes[2], priority: 0}
+ ]
+ });
var testDB = replTest.getPrimary().getDB(name);
testDB.createCollection(name, {capped: true, size: 0});
@@ -21,7 +23,7 @@
// ensure secondary is still up and responsive
var secondary = replTest.getSecondary();
- assert.commandWorked(secondary.getDB(name).runCommand({ping:1 }));
+ assert.commandWorked(secondary.getDB(name).runCommand({ping: 1}));
replTest.stopSet();
}());
diff --git a/jstests/replsets/slavedelay1.js b/jstests/replsets/slavedelay1.js
index ae97d3373bc..040c47d3e78 100644
--- a/jstests/replsets/slavedelay1.js
+++ b/jstests/replsets/slavedelay1.js
@@ -1,120 +1,121 @@
load("jstests/replsets/rslib.js");
-doTest = function( signal ) {
+doTest = function(signal) {
- var name = "slaveDelay";
- var host = getHostName();
+ var name = "slaveDelay";
+ var host = getHostName();
- var replTest = new ReplSetTest( {name: name, nodes: 3} );
+ var replTest = new ReplSetTest({name: name, nodes: 3});
- var nodes = replTest.startSet();
+ var nodes = replTest.startSet();
- /* set slaveDelay to 30 seconds */
- var config = replTest.getReplSetConfig();
- config.members[2].priority = 0;
- config.members[2].slaveDelay = 30;
+ /* set slaveDelay to 30 seconds */
+ var config = replTest.getReplSetConfig();
+ config.members[2].priority = 0;
+ config.members[2].slaveDelay = 30;
- replTest.initiate(config);
+ replTest.initiate(config);
- var master = replTest.getPrimary().getDB(name);
- var slaveConns = replTest.liveNodes.slaves;
- var slaves = [];
- for (var i in slaveConns) {
- var d = slaveConns[i].getDB(name);
- slaves.push(d);
- }
+ var master = replTest.getPrimary().getDB(name);
+ var slaveConns = replTest.liveNodes.slaves;
+ var slaves = [];
+ for (var i in slaveConns) {
+ var d = slaveConns[i].getDB(name);
+ slaves.push(d);
+ }
- waitForAllMembers(master);
+ waitForAllMembers(master);
- // insert a record
- assert.writeOK(master.foo.insert({ x: 1 }, { writeConcern: { w: 2 }}));
+ // insert a record
+ assert.writeOK(master.foo.insert({x: 1}, {writeConcern: {w: 2}}));
- var doc = master.foo.findOne();
- assert.eq(doc.x, 1);
+ var doc = master.foo.findOne();
+ assert.eq(doc.x, 1);
- // make sure slave has it
- var doc = slaves[0].foo.findOne();
- assert.eq(doc.x, 1);
+ // make sure slave has it
+ var doc = slaves[0].foo.findOne();
+ assert.eq(doc.x, 1);
- // make sure delayed slave doesn't have it
- for (var i=0; i<8; i++) {
- assert.eq(slaves[1].foo.findOne(), null);
- sleep(1000);
- }
+ // make sure delayed slave doesn't have it
+ for (var i = 0; i < 8; i++) {
+ assert.eq(slaves[1].foo.findOne(), null);
+ sleep(1000);
+ }
- // within 30 seconds delayed slave should have it
- assert.soon(function() {
- var z = slaves[1].foo.findOne();
- return z && z.x == 1;
- });
+ // within 30 seconds delayed slave should have it
+ assert.soon(function() {
+ var z = slaves[1].foo.findOne();
+ return z && z.x == 1;
+ });
+ /************* Part 2 *******************/
- /************* Part 2 *******************/
+ // how about if we add a new server? will it sync correctly?
+ conn = replTest.add();
- // how about if we add a new server? will it sync correctly?
- conn = replTest.add();
+ config = master.getSisterDB("local").system.replset.findOne();
+ printjson(config);
+ config.version++;
+ config.members.push({
+ _id: 3,
+ host: host + ":" + replTest.ports[replTest.ports.length - 1],
+ priority: 0,
+ slaveDelay: 30
+ });
- config = master.getSisterDB("local").system.replset.findOne();
- printjson(config);
- config.version++;
- config.members.push({_id: 3,
- host: host+":"+replTest.ports[replTest.ports.length-1],
- priority:0,
- slaveDelay:30});
+ master = reconfig(replTest, config);
+ master = master.getSisterDB(name);
- master = reconfig(replTest, config);
- master = master.getSisterDB(name);
+ // wait for the node to catch up
+ replTest.awaitReplication(90 * 1000);
- // wait for the node to catch up
- replTest.awaitReplication(90*1000);
+ assert.writeOK(master.foo.insert({_id: 123, x: 'foo'}, {writeConcern: {w: 2}}));
- assert.writeOK(master.foo.insert({ _id: 123, x: 'foo' }, { writeConcern: { w: 2 }}));
+ for (var i = 0; i < 8; i++) {
+ assert.eq(conn.getDB(name).foo.findOne({_id: 123}), null);
+ sleep(1000);
+ }
- for (var i=0; i<8; i++) {
- assert.eq(conn.getDB(name).foo.findOne({_id:123}), null);
- sleep(1000);
- }
+ assert.soon(function() {
+ var z = conn.getDB(name).foo.findOne({_id: 123});
+ return z != null && z.x == "foo";
+ });
- assert.soon(function() {
- var z = conn.getDB(name).foo.findOne({_id:123});
- return z != null && z.x == "foo";
- });
+ /************* Part 3 ******************/
- /************* Part 3 ******************/
+ print("reconfigure slavedelay");
- print("reconfigure slavedelay");
+ config.version++;
+ config.members[3].slaveDelay = 15;
- config.version++;
- config.members[3].slaveDelay = 15;
+ reconfig(replTest, config);
+ master = replTest.getPrimary().getDB(name);
+ assert.soon(function() {
+ return conn.getDB("local").system.replset.findOne().version == config.version;
+ });
- reconfig(replTest, config);
- master = replTest.getPrimary().getDB(name);
- assert.soon(function() {
- return conn.getDB("local").system.replset.findOne().version == config.version;
- });
+ // wait for node to become secondary
+ assert.soon(function() {
+ var result = conn.getDB("admin").isMaster();
+ printjson(result);
+ return result.secondary;
+ });
- // wait for node to become secondary
- assert.soon(function() {
- var result = conn.getDB("admin").isMaster();
- printjson(result);
- return result.secondary;
- });
+ print("testing insert");
+ master.foo.insert({_id: 124, "x": "foo"});
+ assert(master.foo.findOne({_id: 124}) != null);
- print("testing insert");
- master.foo.insert({_id : 124, "x" : "foo"});
- assert(master.foo.findOne({_id:124}) != null);
-
- for (var i=0; i<10; i++) {
- assert.eq(conn.getDB(name).foo.findOne({_id:124}), null);
- sleep(1000);
- }
-
- // the node should have the document in 15 seconds (20 for some safety against races)
- assert.soon(function() {
- return conn.getDB(name).foo.findOne({_id:124}) != null;
- }, 10*1000);
-
- replTest.stopSet();
+ for (var i = 0; i < 10; i++) {
+ assert.eq(conn.getDB(name).foo.findOne({_id: 124}), null);
+ sleep(1000);
+ }
+
+ // the node should have the document in 15 seconds (20 for some safety against races)
+ assert.soon(function() {
+ return conn.getDB(name).foo.findOne({_id: 124}) != null;
+ }, 10 * 1000);
+
+ replTest.stopSet();
};
doTest(15);
diff --git a/jstests/replsets/slavedelay3.js b/jstests/replsets/slavedelay3.js
index 5a19027a4ad..2ce6e9b2a80 100644
--- a/jstests/replsets/slavedelay3.js
+++ b/jstests/replsets/slavedelay3.js
@@ -1,7 +1,7 @@
load("jstests/replsets/rslib.js");
var name = 'slavedelay3';
-var replTest = new ReplSetTest({ name: name, nodes: 3, useBridge: true });
+var replTest = new ReplSetTest({name: name, nodes: 3, useBridge: true});
var nodes = replTest.startSet();
var config = replTest.getReplSetConfig();
// ensure member 0 is primary
@@ -24,21 +24,21 @@ for (var i in slaveConns) {
waitForAllMembers(master);
-
-
replTest.awaitReplication();
nodes[0].disconnect(nodes[2]);
-master.foo.insert({x:1});
+master.foo.insert({x: 1});
assert.commandWorked(nodes[1].getDB("admin").runCommand({"replSetSyncFrom": nodes[0].host}));
var res;
assert.soon(function() {
res = nodes[1].getDB("admin").runCommand({"replSetGetStatus": 1});
return res.syncingTo === nodes[0].host;
-}, "node 4 failed to start chaining: "+ tojson(res));
+}, "node 4 failed to start chaining: " + tojson(res));
// make sure the record still appears in the remote slave
-assert.soon( function() { return slave[1].foo.findOne() != null; } );
+assert.soon(function() {
+ return slave[1].foo.findOne() != null;
+});
replTest.stopSet();
diff --git a/jstests/replsets/stepdown.js b/jstests/replsets/stepdown.js
index cd574f725e6..4b736b3b93a 100644
--- a/jstests/replsets/stepdown.js
+++ b/jstests/replsets/stepdown.js
@@ -14,24 +14,9 @@ var errorWasDueToConnectionFailure = function(error) {
};
var replTest = new ReplSetTest({
- name : 'testSet',
- nodes : {
- "n0" : {
- rsConfig : {
- priority : 2
- }
- },
- "n1" : {},
- "n2" : {
- rsConfig : {
- votes : 1,
- priority : 0
- }
- }
- },
- nodeOptions : {
- verbose : 1
- }
+ name: 'testSet',
+ nodes: {"n0": {rsConfig: {priority: 2}}, "n1": {}, "n2": {rsConfig: {votes: 1, priority: 0}}},
+ nodeOptions: {verbose: 1}
});
var nodes = replTest.startSet();
replTest.initiate();
@@ -40,13 +25,13 @@ var master = replTest.getPrimary();
// do a write
print("\ndo a write");
-assert.writeOK(master.getDB("foo").bar.insert({x:1}));
+assert.writeOK(master.getDB("foo").bar.insert({x: 1}));
replTest.awaitReplication();
// lock secondaries
print("\nlock secondaries");
replTest.liveNodes.slaves.forEach(function(slave) {
- printjson(assert.commandWorked(slave.getDB("admin").runCommand({fsync : 1, lock : 1})));
+ printjson(assert.commandWorked(slave.getDB("admin").runCommand({fsync: 1, lock: 1})));
});
print("\nwaiting several seconds before stepdown");
@@ -55,7 +40,7 @@ sleep(2000);
for (var i = 0; i < 11; i++) {
// do another write
- assert.writeOK(master.getDB("foo").bar.insert({x:i}));
+ assert.writeOK(master.getDB("foo").bar.insert({x: i}));
sleep(1000);
}
@@ -66,10 +51,10 @@ printjson(assert.commandFailed(master.getDB("admin").runCommand({replSetStepDown
print("\n do stepdown that should work");
assert.throws(function() {
- assert.commandFailed(master.getDB("admin").runCommand({replSetStepDown:50, force:true}));
+ assert.commandFailed(master.getDB("admin").runCommand({replSetStepDown: 50, force: true}));
});
-var r2 = assert.commandWorked(master.getDB("admin").runCommand({ismaster : 1}));
+var r2 = assert.commandWorked(master.getDB("admin").runCommand({ismaster: 1}));
assert.eq(r2.ismaster, false);
assert.eq(r2.secondary, true);
@@ -79,7 +64,7 @@ replTest.liveNodes.slaves.forEach(function(slave) {
});
print("\nreset stepped down time");
-assert.commandWorked(master.getDB("admin").runCommand({replSetFreeze:0}));
+assert.commandWorked(master.getDB("admin").runCommand({replSetFreeze: 0}));
master = replTest.getPrimary();
print("\nawait");
@@ -99,12 +84,11 @@ assert.soon(function() {
master = replTest.getPrimary();
var firstMaster = master;
-print("\nmaster is now "+firstMaster);
+print("\nmaster is now " + firstMaster);
try {
- assert.commandWorked(master.getDB("admin").runCommand({replSetStepDown : 100, force : true}));
-}
-catch (e) {
+ assert.commandWorked(master.getDB("admin").runCommand({replSetStepDown: 100, force: true}));
+} catch (e) {
// ignore errors due to connection failures as we expect the master to close connections
// on stepdown
if (!errorWasDueToConnectionFailure(e)) {
@@ -128,17 +112,15 @@ master = replTest.liveNodes.master;
var slave = replTest.liveNodes.slaves[0];
try {
- slave.adminCommand({shutdown :1});
-}
-catch (e) {
+ slave.adminCommand({shutdown: 1});
+} catch (e) {
print(e);
}
-
master = replTest.getPrimary();
assert.soon(function() {
try {
- var result = master.getDB("admin").runCommand({replSetGetStatus:1});
+ var result = master.getDB("admin").runCommand({replSetGetStatus: 1});
for (var i in result.members) {
if (result.members[i].self) {
continue;
@@ -146,21 +128,19 @@ assert.soon(function() {
return result.members[i].health == 0;
}
- }
- catch (e) {
+ } catch (e) {
print("error getting status from master: " + e);
master = replTest.getPrimary();
return false;
}
}, 'make sure master knows that slave is down before proceeding');
+print("\nrunning shutdown without force on master: " + master);
-print("\nrunning shutdown without force on master: "+master);
-
-// this should fail because the master can't reach an up-to-date secondary (because the only
+// this should fail because the master can't reach an up-to-date secondary (because the only
// secondary is down)
var now = new Date();
-assert.commandFailed(master.getDB("admin").runCommand({shutdown : 1, timeoutSecs : 3}));
+assert.commandFailed(master.getDB("admin").runCommand({shutdown: 1, timeoutSecs: 3}));
// on windows, javascript and the server perceive time differently, to compensate here we use 2750ms
assert.gte((new Date()) - now, 2750);
@@ -168,20 +148,18 @@ print("\nsend shutdown command");
var currentMaster = replTest.getPrimary();
try {
- printjson(currentMaster.getDB("admin").runCommand({shutdown : 1, force : true}));
-}
-catch (e) {
+ printjson(currentMaster.getDB("admin").runCommand({shutdown: 1, force: true}));
+} catch (e) {
if (!errorWasDueToConnectionFailure(e)) {
throw e;
}
}
-print("checking "+currentMaster+" is actually shutting down");
+print("checking " + currentMaster + " is actually shutting down");
assert.soon(function() {
try {
currentMaster.findOne();
- }
- catch(e) {
+ } catch (e) {
return true;
}
return false;
diff --git a/jstests/replsets/stepdown3.js b/jstests/replsets/stepdown3.js
index 9bfda0ae82b..d0da019f7a1 100644
--- a/jstests/replsets/stepdown3.js
+++ b/jstests/replsets/stepdown3.js
@@ -3,58 +3,59 @@
// This test requires the fsync command to force a secondary to be stale.
// @tags: [requires_fsync]
(function() {
-'use strict';
-
-var replTest = new ReplSetTest({ name: 'testSet', nodes: 2 });
-var nodes = replTest.startSet();
-replTest.initiate();
-var master = replTest.getPrimary();
-
-// do a write to allow stepping down of the primary;
-// otherwise, the primary will refuse to step down
-print("\ndo a write");
-master.getDB("test").foo.insert({x:1});
-replTest.awaitReplication();
-
-// do another write, because the first one might be longer than 10 seconds ago
-// on the secondary (due to starting up), and we need to be within 10 seconds
-// to step down.
-var options = { writeConcern: { w: 2, wtimeout: 30000 }};
-assert.writeOK(master.getDB("test").foo.insert({ x: 2 }, options));
-// lock secondary, to pause replication
-print("\nlock secondary");
-var locked = replTest.liveNodes.slaves[0];
-printjson( locked.getDB("admin").runCommand({fsync : 1, lock : 1}) );
-
-// do a write
-print("\ndo a write");
-master.getDB("test").foo.insert({x:3});
-
-// step down the primary asyncronously
-print("stepdown");
-var command = "sleep(4000); tojson(db.adminCommand( { replSetStepDown : 60, force : 1 } ));";
-var awaitShell = startParallelShell(command, master.port);
-
-print("getlasterror; should assert or return an error, depending on timing");
-var gleFunction = function() {
- var result = master.getDB("test").runCommand({getLastError : 1, w: 2 , wtimeout :30000 });
- if (result.errmsg === "not master" ||
- result.code == ErrorCodes.NotMaster ||
- result.code == ErrorCodes.InterruptedDueToReplStateChange) {
- throw new Error("satisfy assert.throws()");
- }
- print("failed to throw exception; GLE returned: ");
+ 'use strict';
+
+ var replTest = new ReplSetTest({name: 'testSet', nodes: 2});
+ var nodes = replTest.startSet();
+ replTest.initiate();
+ var master = replTest.getPrimary();
+
+ // do a write to allow stepping down of the primary;
+ // otherwise, the primary will refuse to step down
+ print("\ndo a write");
+ master.getDB("test").foo.insert({x: 1});
+ replTest.awaitReplication();
+
+ // do another write, because the first one might be longer than 10 seconds ago
+ // on the secondary (due to starting up), and we need to be within 10 seconds
+ // to step down.
+ var options = {
+ writeConcern: {w: 2, wtimeout: 30000}
+ };
+ assert.writeOK(master.getDB("test").foo.insert({x: 2}, options));
+ // lock secondary, to pause replication
+ print("\nlock secondary");
+ var locked = replTest.liveNodes.slaves[0];
+ printjson(locked.getDB("admin").runCommand({fsync: 1, lock: 1}));
+
+ // do a write
+ print("\ndo a write");
+ master.getDB("test").foo.insert({x: 3});
+
+ // step down the primary asyncronously
+ print("stepdown");
+ var command = "sleep(4000); tojson(db.adminCommand( { replSetStepDown : 60, force : 1 } ));";
+ var awaitShell = startParallelShell(command, master.port);
+
+ print("getlasterror; should assert or return an error, depending on timing");
+ var gleFunction = function() {
+ var result = master.getDB("test").runCommand({getLastError: 1, w: 2, wtimeout: 30000});
+ if (result.errmsg === "not master" || result.code == ErrorCodes.NotMaster ||
+ result.code == ErrorCodes.InterruptedDueToReplStateChange) {
+ throw new Error("satisfy assert.throws()");
+ }
+ print("failed to throw exception; GLE returned: ");
+ printjson(result);
+ };
+ var result = assert.throws(gleFunction);
+ print("result of gle:");
printjson(result);
-};
-var result = assert.throws(gleFunction);
-print("result of gle:");
-printjson(result);
-var exitCode = awaitShell({checkExitSuccess: false});
-assert.neq(0, exitCode, "expected replSetStepDown to close the shell's connection");
+ var exitCode = awaitShell({checkExitSuccess: false});
+ assert.neq(0, exitCode, "expected replSetStepDown to close the shell's connection");
-// unlock and shut down
-printjson(locked.getDB("admin").fsyncUnlock());
-replTest.stopSet();
+ // unlock and shut down
+ printjson(locked.getDB("admin").fsyncUnlock());
+ replTest.stopSet();
})();
diff --git a/jstests/replsets/stepdown_catch_up_opt.js b/jstests/replsets/stepdown_catch_up_opt.js
index a5ccb456762..304927a7838 100644
--- a/jstests/replsets/stepdown_catch_up_opt.js
+++ b/jstests/replsets/stepdown_catch_up_opt.js
@@ -26,41 +26,32 @@
var stringNotIntCode = 14;
// Expect a failure with a string argument.
- assert.commandFailedWithCode(
- primary.getDB('admin').runCommand({replSetStepDown: 10, secondaryCatchUpPeriodSecs: 'STR'}),
- stringNotIntCode,
- 'Expected string argument to secondaryCatchupPeriodSecs to fail.'
- );
+ assert.commandFailedWithCode(primary.getDB('admin').runCommand(
+ {replSetStepDown: 10, secondaryCatchUpPeriodSecs: 'STR'}),
+ stringNotIntCode,
+ 'Expected string argument to secondaryCatchupPeriodSecs to fail.');
// Expect a failure with a longer secondaryCatchupPeriodSecs than the stepdown period.
assert.commandFailedWithCode(
primary.getDB('admin').runCommand({replSetStepDown: 10, secondaryCatchUpPeriodSecs: 20}),
stepDownPeriodTooShortCode,
('Expected replSetStepDown to fail given a stepdown time shorter than' +
- ' secondaryCatchUpPeriodSecs')
- );
+ ' secondaryCatchUpPeriodSecs'));
jsTestLog('Stop secondary syncing.');
- assert.commandWorked(
- secondary.getDB('admin').runCommand(
- {configureFailPoint: 'rsSyncApplyStop', mode: 'alwaysOn'}
- ),
- 'Failed to configure rsSyncApplyStop failpoint.'
- );
+ assert.commandWorked(secondary.getDB('admin').runCommand(
+ {configureFailPoint: 'rsSyncApplyStop', mode: 'alwaysOn'}),
+ 'Failed to configure rsSyncApplyStop failpoint.');
function disableFailPoint() {
- assert.commandWorked(
- secondary.getDB('admin').runCommand(
- {configureFailPoint: 'rsSyncApplyStop', mode: 'off'}
- ),
- 'Failed to disable rsSyncApplyStop failpoint.'
- );
+ assert.commandWorked(secondary.getDB('admin')
+ .runCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'off'}),
+ 'Failed to disable rsSyncApplyStop failpoint.');
}
// If any of these assertions fail, we need to disable the fail point in order for the mongod to
// shut down.
try {
-
jsTestLog('Write to primary to make secondary out of sync.');
assert.writeOK(primary.getDB('test').foo.insert({i: 1}), 'Failed to insert document.');
sleep(1000);
@@ -69,10 +60,10 @@
jsTestLog('Try to step down.');
var startTime = new Date();
assert.commandFailedWithCode(
- primary.getDB('admin').runCommand({replSetStepDown: 10, secondaryCatchUpPeriodSecs: 1}),
+ primary.getDB('admin')
+ .runCommand({replSetStepDown: 10, secondaryCatchUpPeriodSecs: 1}),
noCaughtUpSecondariesCode,
- 'Expected replSetStepDown to fail, since no secondaries should be caught up.'
- );
+ 'Expected replSetStepDown to fail, since no secondaries should be caught up.');
var endTime = new Date();
// Ensure it took at least 1 second to time out. Adjust the timeout a little bit
@@ -80,8 +71,7 @@
assert.lte(0.95,
(endTime - startTime) / 1000,
'Expected replSetStepDown command to fail after 1 second.');
- }
- catch (err) {
+ } catch (err) {
disableFailPoint();
throw err;
}
diff --git a/jstests/replsets/stepdown_kill_other_ops.js b/jstests/replsets/stepdown_kill_other_ops.js
index 6770c9246c9..e55fce17be0 100644
--- a/jstests/replsets/stepdown_kill_other_ops.js
+++ b/jstests/replsets/stepdown_kill_other_ops.js
@@ -1,68 +1,72 @@
// SERVER-15310 Ensure that stepDown kills all other running operations
-(function () {
- "use strict";
- var name = "stepdownKillOps";
- var replSet = new ReplSetTest({name: name, nodes: 3});
- var nodes = replSet.nodeList();
- replSet.startSet();
- replSet.initiate({"_id" : name,
- "members" : [
- {"_id" : 0, "host" : nodes[0], "priority" : 3},
- {"_id" : 1, "host" : nodes[1]},
- {"_id" : 2, "host" : nodes[2], "arbiterOnly" : true}]});
+(function() {
+ "use strict";
+ var name = "stepdownKillOps";
+ var replSet = new ReplSetTest({name: name, nodes: 3});
+ var nodes = replSet.nodeList();
+ replSet.startSet();
+ replSet.initiate({
+ "_id": name,
+ "members": [
+ {"_id": 0, "host": nodes[0], "priority": 3},
+ {"_id": 1, "host": nodes[1]},
+ {"_id": 2, "host": nodes[2], "arbiterOnly": true}
+ ]
+ });
- replSet.waitForState(replSet.nodes[0], ReplSetTest.State.PRIMARY, 60 * 1000);
+ replSet.waitForState(replSet.nodes[0], ReplSetTest.State.PRIMARY, 60 * 1000);
- var primary = replSet.getPrimary();
- assert.eq(primary.host, nodes[0], "primary assumed to be node 0");
- assert.writeOK(primary.getDB(name).foo.insert({x: 1}, {w: 2, wtimeout:10000}));
- replSet.awaitReplication();
+ var primary = replSet.getPrimary();
+ assert.eq(primary.host, nodes[0], "primary assumed to be node 0");
+ assert.writeOK(primary.getDB(name).foo.insert({x: 1}, {w: 2, wtimeout: 10000}));
+ replSet.awaitReplication();
- jsTestLog("Sleeping 30 seconds so the SECONDARY will be considered electable");
- sleep(30000);
+ jsTestLog("Sleeping 30 seconds so the SECONDARY will be considered electable");
+ sleep(30000);
- // Run eval() in a separate thread to take the global write lock which would prevent stepdown
- // from completing if it failed to kill all running operations.
- jsTestLog("Running eval() to grab global write lock");
- var evalCmd = function() {
- db.eval(function() {
- for (var i = 0; i < 60; i++) {
- // Sleep in 1 second intervals so the javascript engine will notice when
- // it's killed
- sleep(1000);
- } });
- };
- var evalRunner = startParallelShell(evalCmd, primary.port);
+ // Run eval() in a separate thread to take the global write lock which would prevent stepdown
+ // from completing if it failed to kill all running operations.
+ jsTestLog("Running eval() to grab global write lock");
+ var evalCmd = function() {
+ db.eval(function() {
+ for (var i = 0; i < 60; i++) {
+ // Sleep in 1 second intervals so the javascript engine will notice when
+ // it's killed
+ sleep(1000);
+ }
+ });
+ };
+ var evalRunner = startParallelShell(evalCmd, primary.port);
- jsTestLog("Confirming that eval() is running and has the global lock");
- assert.soon(function() {
- var res = primary.getDB('admin').currentOp();
- for (var index in res.inprog) {
- var entry = res.inprog[index];
- if (entry["query"] && entry["query"]["$eval"]) {
- assert.eq("W", entry["locks"]["Global"]);
- return true;
- }
- }
- printjson(res);
- return false;
- }, "$eval never ran and grabbed the global write lock");
+ jsTestLog("Confirming that eval() is running and has the global lock");
+ assert.soon(function() {
+ var res = primary.getDB('admin').currentOp();
+ for (var index in res.inprog) {
+ var entry = res.inprog[index];
+ if (entry["query"] && entry["query"]["$eval"]) {
+ assert.eq("W", entry["locks"]["Global"]);
+ return true;
+ }
+ }
+ printjson(res);
+ return false;
+ }, "$eval never ran and grabbed the global write lock");
- jsTestLog("Stepping down");
- try {
- assert.commandWorked(primary.getDB('admin').runCommand({replSetStepDown: 30}));
- } catch (x) {
- // expected
- }
+ jsTestLog("Stepping down");
+ try {
+ assert.commandWorked(primary.getDB('admin').runCommand({replSetStepDown: 30}));
+ } catch (x) {
+ // expected
+ }
- jsTestLog("Waiting for former PRIMARY to become SECONDARY");
- replSet.waitForState(primary, ReplSetTest.State.SECONDARY, 30000);
+ jsTestLog("Waiting for former PRIMARY to become SECONDARY");
+ replSet.waitForState(primary, ReplSetTest.State.SECONDARY, 30000);
- var newPrimary = replSet.getPrimary();
- assert.neq(primary, newPrimary, "SECONDARY did not become PRIMARY");
+ var newPrimary = replSet.getPrimary();
+ assert.neq(primary, newPrimary, "SECONDARY did not become PRIMARY");
- var exitCode = evalRunner({checkExitSuccess: false});
- assert.neq(0, exitCode,
- "expected shell to exit abnormally due to JS execution being terminated");
- })();
+ var exitCode = evalRunner({checkExitSuccess: false});
+ assert.neq(
+ 0, exitCode, "expected shell to exit abnormally due to JS execution being terminated");
+})();
diff --git a/jstests/replsets/stepdown_killop.js b/jstests/replsets/stepdown_killop.js
index a14193112d1..c5fc593239b 100644
--- a/jstests/replsets/stepdown_killop.js
+++ b/jstests/replsets/stepdown_killop.js
@@ -8,98 +8,96 @@
// 5. Once a write is blocked, kill the stepDown operation
// 6. Writes should become unblocked and the primary should stay primary
-(function () {
- "use strict";
- var name = "interruptStepDown";
- var replSet = new ReplSetTest({name: name, nodes: 3});
- var nodes = replSet.nodeList();
- replSet.startSet();
- replSet.initiate({"_id" : name,
- "members" : [
- {"_id" : 0, "host" : nodes[0], "priority" : 3},
- {"_id" : 1, "host" : nodes[1]},
- {"_id" : 2, "host" : nodes[2], "arbiterOnly" : true}]});
+(function() {
+ "use strict";
+ var name = "interruptStepDown";
+ var replSet = new ReplSetTest({name: name, nodes: 3});
+ var nodes = replSet.nodeList();
+ replSet.startSet();
+ replSet.initiate({
+ "_id": name,
+ "members": [
+ {"_id": 0, "host": nodes[0], "priority": 3},
+ {"_id": 1, "host": nodes[1]},
+ {"_id": 2, "host": nodes[2], "arbiterOnly": true}
+ ]
+ });
- replSet.waitForState(replSet.nodes[0], ReplSetTest.State.PRIMARY, 60 * 1000);
+ replSet.waitForState(replSet.nodes[0], ReplSetTest.State.PRIMARY, 60 * 1000);
- var secondary = replSet.getSecondary();
- jsTestLog('Disable replication on the SECONDARY ' + secondary.host);
- assert.commandWorked(
- secondary.getDB('admin').runCommand(
- {configureFailPoint: 'rsSyncApplyStop', mode: 'alwaysOn'}
- ),
- 'Failed to configure rsSyncApplyStop failpoint.'
- );
+ var secondary = replSet.getSecondary();
+ jsTestLog('Disable replication on the SECONDARY ' + secondary.host);
+ assert.commandWorked(secondary.getDB('admin').runCommand(
+ {configureFailPoint: 'rsSyncApplyStop', mode: 'alwaysOn'}),
+ 'Failed to configure rsSyncApplyStop failpoint.');
- replSet.waitForState(replSet.nodes[0], ReplSetTest.State.PRIMARY, 60 * 1000);
+ replSet.waitForState(replSet.nodes[0], ReplSetTest.State.PRIMARY, 60 * 1000);
- var primary = replSet.getPrimary();
- assert.eq(primary.host, nodes[0], "primary assumed to be node 0");
+ var primary = replSet.getPrimary();
+ assert.eq(primary.host, nodes[0], "primary assumed to be node 0");
- // do a write then ask the PRIMARY to stepdown
- jsTestLog("Initiating stepdown");
- assert.writeOK(primary.getDB(name).foo.insert({myDoc: true, x: 1},
- {writeConcern: {w: 1, wtimeout: 60000}}));
- var stepDownCmd = function() {
- var res = db.getSiblingDB('admin').runCommand({replSetStepDown: 60,
- secondaryCatchUpPeriodSecs: 60});
- assert.commandFailedWithCode(res, 11601 /*interrupted*/);
- };
- var stepDowner = startParallelShell(stepDownCmd, primary.port);
- var stepDownOpID = -1;
+ // do a write then ask the PRIMARY to stepdown
+ jsTestLog("Initiating stepdown");
+ assert.writeOK(primary.getDB(name)
+ .foo.insert({myDoc: true, x: 1}, {writeConcern: {w: 1, wtimeout: 60000}}));
+ var stepDownCmd = function() {
+ var res = db.getSiblingDB('admin')
+ .runCommand({replSetStepDown: 60, secondaryCatchUpPeriodSecs: 60});
+ assert.commandFailedWithCode(res, 11601 /*interrupted*/);
+ };
+ var stepDowner = startParallelShell(stepDownCmd, primary.port);
+ var stepDownOpID = -1;
- jsTestLog("Looking for stepdown in currentOp() output");
- assert.soon(function() {
- var res = primary.getDB('admin').currentOp(true);
- for (var index in res.inprog) {
- var entry = res.inprog[index];
- if (entry["query"] && entry["query"]["replSetStepDown"] === 60){
- stepDownOpID = entry.opid;
- return true;
- }
- }
- printjson(res);
- return false;
- }, "global shared lock not acquired");
+ jsTestLog("Looking for stepdown in currentOp() output");
+ assert.soon(function() {
+ var res = primary.getDB('admin').currentOp(true);
+ for (var index in res.inprog) {
+ var entry = res.inprog[index];
+ if (entry["query"] && entry["query"]["replSetStepDown"] === 60) {
+ stepDownOpID = entry.opid;
+ return true;
+ }
+ }
+ printjson(res);
+ return false;
+ }, "global shared lock not acquired");
- jsTestLog("Ensuring writes block on the stepdown");
- // Start repeatedly doing an update until one blocks waiting for the lock.
- // If the test is successful this thread will be terminated when we remove the document
- // being updated.
- var updateCmd = function() {
- while(true) {
- var res = db.getSiblingDB("interruptStepDown").foo.update({myDoc: true},
- {$inc: {x: 1}});
- assert.writeOK(res);
- if (res.nModified == 0) {
- quit(0);
- }
- else {
- printjson(res);
- }
+ jsTestLog("Ensuring writes block on the stepdown");
+ // Start repeatedly doing an update until one blocks waiting for the lock.
+ // If the test is successful this thread will be terminated when we remove the document
+ // being updated.
+ var updateCmd = function() {
+ while (true) {
+ var res =
+ db.getSiblingDB("interruptStepDown").foo.update({myDoc: true}, {$inc: {x: 1}});
+ assert.writeOK(res);
+ if (res.nModified == 0) {
+ quit(0);
+ } else {
+ printjson(res);
+ }
+ }
+ };
+ var writer = startParallelShell(updateCmd, primary.port);
+ assert.soon(function() {
+ var res = primary.getDB(name).currentOp();
+ for (var entry in res.inprog) {
+ if (res.inprog[entry]["waitingForLock"]) {
+ return true;
+ }
+ }
+ printjson(res);
+ return false;
+ }, "write never blocked on the global shared lock");
- }
- };
- var writer = startParallelShell(updateCmd, primary.port);
- assert.soon(function() {
- var res = primary.getDB(name).currentOp();
- for (var entry in res.inprog) {
- if (res.inprog[entry]["waitingForLock"]) {
- return true;
- }
- }
- printjson(res);
- return false;
- }, "write never blocked on the global shared lock");
+ // kill the stepDown and ensure that that unblocks writes to the db
+ jsTestLog("Killing stepdown");
+ primary.getDB('admin').killOp(stepDownOpID);
- // kill the stepDown and ensure that that unblocks writes to the db
- jsTestLog("Killing stepdown");
- primary.getDB('admin').killOp(stepDownOpID);
+ var exitCode = stepDowner();
+ assert.eq(0, exitCode);
- var exitCode = stepDowner();
- assert.eq(0, exitCode);
-
- assert.writeOK(primary.getDB(name).foo.remove({}));
- exitCode = writer();
- assert.eq(0, exitCode);
- })();
+ assert.writeOK(primary.getDB(name).foo.remove({}));
+ exitCode = writer();
+ assert.eq(0, exitCode);
+})();
diff --git a/jstests/replsets/stepdown_long_wait_time.js b/jstests/replsets/stepdown_long_wait_time.js
index db1821cf988..60e0fdb4247 100644
--- a/jstests/replsets/stepdown_long_wait_time.js
+++ b/jstests/replsets/stepdown_long_wait_time.js
@@ -7,45 +7,46 @@
// 5. Once a write is blocked, restart replication on the SECONDARY.
// 6. Wait for PRIMARY to StepDown.
-(function () {
+(function() {
"use strict";
var name = "stepDownWithLongWait";
var replSet = new ReplSetTest({name: name, nodes: 3});
var nodes = replSet.nodeList();
replSet.startSet();
- replSet.initiate({"_id" : name,
- "members" : [
- {"_id" : 0, "host" : nodes[0], "priority" : 3},
- {"_id" : 1, "host" : nodes[1]},
- {"_id" : 2, "host" : nodes[2], "arbiterOnly" : true}]});
+ replSet.initiate({
+ "_id": name,
+ "members": [
+ {"_id": 0, "host": nodes[0], "priority": 3},
+ {"_id": 1, "host": nodes[1]},
+ {"_id": 2, "host": nodes[2], "arbiterOnly": true}
+ ]
+ });
replSet.waitForState(replSet.nodes[0], ReplSetTest.State.PRIMARY, 60 * 1000);
var primary = replSet.getPrimary();
var secondary = replSet.getSecondary();
jsTestLog('Disable replication on the SECONDARY ' + secondary.host);
- assert.commandWorked(
- secondary.getDB('admin').runCommand(
- {configureFailPoint: 'rsSyncApplyStop', mode: 'alwaysOn'}
- ),
- 'Failed to configure rsSyncApplyStop failpoint.'
- );
+ assert.commandWorked(secondary.getDB('admin').runCommand(
+ {configureFailPoint: 'rsSyncApplyStop', mode: 'alwaysOn'}),
+ 'Failed to configure rsSyncApplyStop failpoint.');
jsTestLog("do a write then ask the PRIMARY to stepdown");
- var options = {writeConcern: {w: 1, wtimeout: 60000}};
+ var options = {
+ writeConcern: {w: 1, wtimeout: 60000}
+ };
assert.writeOK(primary.getDB(name).foo.insert({x: 1}, options));
var stepDownSecs = 60;
var secondaryCatchUpPeriodSecs = 60;
- var stepDownCmd = "db.getSiblingDB('admin').runCommand({" +
- "replSetStepDown: " + stepDownSecs + ", " +
- "secondaryCatchUpPeriodSecs: " + secondaryCatchUpPeriodSecs +
- "});";
+ var stepDownCmd = "db.getSiblingDB('admin').runCommand({" + "replSetStepDown: " + stepDownSecs +
+ ", " + "secondaryCatchUpPeriodSecs: " + secondaryCatchUpPeriodSecs + "});";
var stepDowner = startParallelShell(stepDownCmd, primary.port);
assert.soon(function() {
var res = primary.getDB('admin').currentOp(true);
for (var entry in res.inprog) {
- if (res.inprog[entry]["query"] && res.inprog[entry]["query"]["replSetStepDown"] === 60){
+ if (res.inprog[entry]["query"] &&
+ res.inprog[entry]["query"]["replSetStepDown"] === 60) {
return true;
}
}
@@ -60,8 +61,7 @@
var res = db.getSiblingDB("stepDownWithLongWait").foo.update({}, {$inc: {x: 1}});
jsTestLog('Unexpected successful update operation on the primary during step down: ' +
tojson(res));
- }
- catch (e) {
+ } catch (e) {
// Not important what error we get back. The client will probably be disconnected by
// the primary with a "error doing query: failed" message.
jsTestLog('Update operation returned with result: ' + tojson(e));
@@ -81,11 +81,8 @@
jsTestLog('Enable replication on the SECONDARY ' + secondary.host);
assert.commandWorked(
- secondary.getDB('admin').runCommand(
- {configureFailPoint: 'rsSyncApplyStop', mode: 'off'}
- ),
- 'Failed to disable rsSyncApplyStop failpoint.'
- );
+ secondary.getDB('admin').runCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'off'}),
+ 'Failed to disable rsSyncApplyStop failpoint.');
jsTestLog("Wait for PRIMARY " + primary.host + " to completely step down.");
replSet.waitForState(primary, ReplSetTest.State.SECONDARY, secondaryCatchUpPeriodSecs * 1000);
diff --git a/jstests/replsets/stepdown_wrt_electable.js b/jstests/replsets/stepdown_wrt_electable.js
index e6917ec7c3b..c929f2a2c56 100644
--- a/jstests/replsets/stepdown_wrt_electable.js
+++ b/jstests/replsets/stepdown_wrt_electable.js
@@ -1,11 +1,10 @@
// Test that replSetStepDown filters out non-electable nodes
-var replTest = new ReplSetTest({ name: 'testSet', nodes: 2 });
+var replTest = new ReplSetTest({name: 'testSet', nodes: 2});
var nodes = replTest.startSet();
-
// setup config
var c = replTest.getReplSetConfig();
-c.members[1].priority = 0; // not electable
+c.members[1].priority = 0; // not electable
replTest.initiate(c);
var master = replTest.getPrimary();
@@ -14,11 +13,11 @@ var firstPrimary = testDB.isMaster().primary;
// do a write to allow stepping down of the primary;
// otherwise, the primary will refuse to step down
-testDB.foo.insert({x:1});
+testDB.foo.insert({x: 1});
replTest.awaitReplication();
// stepdown should fail since there is no-one to elect within 10 secs
-testDB.adminCommand({replSetStepDown:5});
+testDB.adminCommand({replSetStepDown: 5});
assert(master.getDB("a").isMaster().ismaster, "not master");
// step down the primary asyncronously so it doesn't kill this test
@@ -27,7 +26,7 @@ var exitCode = wait({checkExitSuccess: false});
assert.neq(0, exitCode, "expected replSetStepDown to close the shell's connection");
// check that the old primary is no longer master
-assert.soon( function() {
+assert.soon(function() {
try {
var isMaster = master.getDB("a").isMaster();
printjson(isMaster);
@@ -35,7 +34,7 @@ assert.soon( function() {
} catch (e) {
return false;
}
- }, "they shouldn't be master, but are");
+}, "they shouldn't be master, but are");
// stop
replTest.stopSet();
diff --git a/jstests/replsets/sync2.js b/jstests/replsets/sync2.js
index d847127cae3..481f59a13d9 100644
--- a/jstests/replsets/sync2.js
+++ b/jstests/replsets/sync2.js
@@ -1,20 +1,22 @@
var replTest = new ReplSetTest({name: 'sync2', nodes: 5, useBridge: true});
var nodes = replTest.nodeList();
var conns = replTest.startSet({oplogSize: "2"});
-replTest.initiate({"_id": "sync2",
- "members": [
- {"_id": 0, host: nodes[0], priority: 2},
- {"_id": 1, host: nodes[1]},
- {"_id": 2, host: nodes[2]},
- {"_id": 3, host: nodes[3]},
- {"_id": 4, host: nodes[4]}]
- });
+replTest.initiate({
+ "_id": "sync2",
+ "members": [
+ {"_id": 0, host: nodes[0], priority: 2},
+ {"_id": 1, host: nodes[1]},
+ {"_id": 2, host: nodes[2]},
+ {"_id": 3, host: nodes[3]},
+ {"_id": 4, host: nodes[4]}
+ ]
+});
var master = replTest.getPrimary();
jsTestLog("Replica set test initialized");
// initial sync
-master.getDB("foo").bar.insert({x:1});
+master.getDB("foo").bar.insert({x: 1});
replTest.awaitReplication();
conns[0].disconnect(conns[4]);
@@ -33,18 +35,22 @@ assert.soon(function() {
replTest.awaitReplication();
jsTestLog("Checking that ops still replicate correctly");
-var option = { writeConcern: { w: 5, wtimeout: 30000 }};
+var option = {
+ writeConcern: {w: 5, wtimeout: 30000}
+};
// In PV0, this write can fail as a result of a bad spanning tree. If 2 was syncing from 4 prior to
// bridging, it will not change sync sources and receive the write in time. This was not a problem
// in 3.0 because the old version of mongobridge caused all the nodes to restart during
// partitioning, forcing the set to rebuild the spanning tree.
-assert.writeOK(master.getDB("foo").bar.insert({ x: 1 }, option));
+assert.writeOK(master.getDB("foo").bar.insert({x: 1}, option));
// 4 is connected to 3
conns[4].disconnect(conns[2]);
conns[4].reconnect(conns[3]);
-option = { writeConcern: { w: 5, wtimeout: 30000 }};
-assert.writeOK(master.getDB("foo").bar.insert({ x: 1 }, option));
+option = {
+ writeConcern: {w: 5, wtimeout: 30000}
+};
+assert.writeOK(master.getDB("foo").bar.insert({x: 1}, option));
replTest.stopSet();
diff --git a/jstests/replsets/sync_passive.js b/jstests/replsets/sync_passive.js
index 76db6a4f838..4899385563f 100644
--- a/jstests/replsets/sync_passive.js
+++ b/jstests/replsets/sync_passive.js
@@ -18,8 +18,8 @@ load("jstests/replsets/rslib.js");
var name = "sync_passive";
var host = getHostName();
-
-var replTest = new ReplSetTest( {name: name, nodes: 3} );
+
+var replTest = new ReplSetTest({name: name, nodes: 3});
var nodes = replTest.startSet();
@@ -27,7 +27,7 @@ var nodes = replTest.startSet();
var config = replTest.getReplSetConfig();
config.members[0].priority = 2;
config.members[2].priority = 0;
-
+
replTest.initiate(config);
replTest.waitForState(replTest.nodes[0], ReplSetTest.State.PRIMARY, 60 * 1000);
@@ -36,56 +36,46 @@ var server0 = master;
var server1 = replTest.liveNodes.slaves[0];
print("Initial sync");
-for (var i=0;i<100;i++) {
- master.foo.insert({x:i});
+for (var i = 0; i < 100; i++) {
+ master.foo.insert({x: i});
}
replTest.awaitReplication();
-
print("stop #1");
replTest.stop(1);
-
print("add some data");
-for (var i=0;i<1000;i++) {
- master.bar.insert({x:i});
+for (var i = 0; i < 1000; i++) {
+ master.bar.insert({x: i});
}
replTest.awaitReplication();
-
print("stop #0");
replTest.stop(0);
-
print("restart #1");
replTest.restart(1);
-
print("check sync");
replTest.awaitReplication(60 * 1000);
-
print("add data");
reconnect(server1);
master = replTest.getPrimary().getDB("test");
-for (var i=0;i<1000;i++) {
- master.bar.insert({x:i});
+for (var i = 0; i < 1000; i++) {
+ master.bar.insert({x: i});
}
replTest.awaitReplication();
-
print("kill #1");
replTest.stop(1);
-
print("restart #0");
replTest.restart(0);
reconnect(server0);
-
print("wait for sync");
replTest.awaitReplication();
-
print("bring #1 back up, make sure everything's okay");
replTest.restart(1);
diff --git a/jstests/replsets/system_profile.js b/jstests/replsets/system_profile.js
index 592accb43b8..5a40e594866 100644
--- a/jstests/replsets/system_profile.js
+++ b/jstests/replsets/system_profile.js
@@ -36,8 +36,8 @@
// emptycapped the collection
assert.commandWorked(primaryDB.runCommand({emptycapped: "system.profile"}));
- assert.eq(op, getLatestOp(),
- "oplog entry created when system.profile was emptied via emptycapped");
+ assert.eq(
+ op, getLatestOp(), "oplog entry created when system.profile was emptied via emptycapped");
assert(primaryDB.system.profile.drop());
// convertToCapped
diff --git a/jstests/replsets/tags.js b/jstests/replsets/tags.js
index 87bc0075109..55a0c4e2927 100644
--- a/jstests/replsets/tags.js
+++ b/jstests/replsets/tags.js
@@ -11,63 +11,62 @@
var port = replTest.ports;
replTest.initiate({
_id: name,
- members : [
+ members: [
{
- _id: 0,
- host: nodes[0],
- tags: {
- server: '0',
- dc: 'ny',
- ny: '1',
- rack: 'ny.rk1',
- },
+ _id: 0,
+ host: nodes[0],
+ tags: {
+ server: '0',
+ dc: 'ny',
+ ny: '1',
+ rack: 'ny.rk1',
+ },
},
{
- _id: 1,
- host: nodes[1],
- priority: 2,
- tags: {
- server: '1',
- dc: 'ny',
- ny: '2',
- rack: 'ny.rk1',
- },
+ _id: 1,
+ host: nodes[1],
+ priority: 2,
+ tags: {
+ server: '1',
+ dc: 'ny',
+ ny: '2',
+ rack: 'ny.rk1',
+ },
},
{
- _id: 2,
- host: nodes[2],
- priority: 3,
- tags: {
- server: '2',
- dc: 'ny',
- ny: '3',
- rack: 'ny.rk2',
- 2: 'this',
- },
+ _id: 2,
+ host: nodes[2],
+ priority: 3,
+ tags: {
+ server: '2',
+ dc: 'ny',
+ ny: '3',
+ rack: 'ny.rk2', 2: 'this',
+ },
},
{
- _id: 3,
- host: nodes[3],
- tags: {
- server: '3',
- dc: 'sf',
- sf: '1',
- rack: 'sf.rk1',
- },
+ _id: 3,
+ host: nodes[3],
+ tags: {
+ server: '3',
+ dc: 'sf',
+ sf: '1',
+ rack: 'sf.rk1',
+ },
},
{
- _id: 4,
- host: nodes[4],
- tags: {
- server: '4',
- dc: 'sf',
- sf: '2',
- rack: 'sf.rk2',
- },
+ _id: 4,
+ host: nodes[4],
+ tags: {
+ server: '4',
+ dc: 'sf',
+ sf: '2',
+ rack: 'sf.rk2',
+ },
},
],
- settings : {
- getLastErrorModes : {
+ settings: {
+ getLastErrorModes: {
'2 dc and 3 server': {
dc: 2,
server: 3,
@@ -99,7 +98,9 @@
jsTestLog('Node ' + nodeId + ' (' + replTest.nodes[nodeId].host + ') should be primary.');
replTest.waitForState(replTest.nodes[nodeId], ReplSetTest.State.PRIMARY, 60 * 1000);
primary = replTest.getPrimary();
- var writeConcern = {writeConcern: {w: expectedWritableNodes, wtimeout: 30 * 1000}};
+ var writeConcern = {
+ writeConcern: {w: expectedWritableNodes, wtimeout: 30 * 1000}
+ };
assert.writeOK(primary.getDB('foo').bar.insert({x: 100}, writeConcern));
return primary;
};
@@ -124,7 +125,9 @@
jsTestLog('partitions: nodes with each set of brackets [N1, N2, N3] form a complete network.');
jsTestLog('partitions: [0-1-2] [3] [4] (only nodes 0 and 1 can replicate from primary node 2');
- var doc = {x: 1};
+ var doc = {
+ x: 1
+ };
// This timeout should be shorter in duration than the server parameter maxSyncSourceLagSecs.
// Some writes are expected to block for this 'timeout' duration before failing.
@@ -137,15 +140,20 @@
primary = ensurePrimary(2, 3);
jsTestLog('Non-existent write concern should be rejected.');
- options = {writeConcern: {w: 'blahblah', wtimeout: timeout}};
+ options = {
+ writeConcern: {w: 'blahblah', wtimeout: timeout}
+ };
assert.writeOK(primary.getDB('foo').bar.insert(doc));
var result = assert.writeError(primary.getDB('foo').bar.insert(doc, options));
assert.neq(null, result.getWriteConcernError());
- assert.eq(ErrorCodes.UnknownReplWriteConcern, result.getWriteConcernError().code,
+ assert.eq(ErrorCodes.UnknownReplWriteConcern,
+ result.getWriteConcernError().code,
tojson(result.getWriteConcernError()));
jsTestLog('Write concern "3 or 4" should fail - 3 and 4 are not connected to the primary.');
- var options = {writeConcern: {w: '3 or 4', wtimeout: timeout}};
+ var options = {
+ writeConcern: {w: '3 or 4', wtimeout: timeout}
+ };
assert.writeOK(primary.getDB('foo').bar.insert(doc));
result = primary.getDB('foo').bar.insert(doc, options);
assert.neq(null, result.getWriteConcernError());
@@ -158,12 +166,16 @@
jsTestLog('Write concern "3 or 4" should work - 4 is now connected to the primary ' +
primary.host + ' via node 1 ' + replTest.nodes[1].host);
- options = {writeConcern: {w: '3 or 4', wtimeout: timeout}};
+ options = {
+ writeConcern: {w: '3 or 4', wtimeout: timeout}
+ };
assert.writeOK(primary.getDB('foo').bar.insert(doc));
assert.writeOK(primary.getDB('foo').bar.insert(doc, options));
jsTestLog('Write concern "3 and 4" should fail - 3 is not connected to the primary.');
- options = {writeConcern: {w: '3 and 4', wtimeout: timeout}};
+ options = {
+ writeConcern: {w: '3 and 4', wtimeout: timeout}
+ };
assert.writeOK(primary.getDB('foo').bar.insert(doc));
result = assert.writeError(primary.getDB('foo').bar.insert(doc, options));
assert.neq(null, result.getWriteConcernError());
@@ -178,23 +190,31 @@
jsTestLog('31003 should sync from 31004 (31024)');
jsTestLog('Write concern "3 and 4" should work - ' +
'nodes 3 and 4 are connected to primary via node 1.');
- options = {writeConcern: {w: '3 and 4', wtimeout: timeout}};
+ options = {
+ writeConcern: {w: '3 and 4', wtimeout: timeout}
+ };
assert.writeOK(primary.getDB('foo').bar.insert(doc));
assert.writeOK(primary.getDB('foo').bar.insert(doc, options));
jsTestLog('Write concern "2" - writes to primary only.');
- options = {writeConcern: {w: '2', wtimeout: 0}};
+ options = {
+ writeConcern: {w: '2', wtimeout: 0}
+ };
assert.writeOK(primary.getDB('foo').bar.insert(doc));
assert.writeOK(primary.getDB('foo').bar.insert(doc, options));
jsTestLog('Write concern "1 and 2"');
- options = {writeConcern: {w: '1 and 2', wtimeout: 0}};
+ options = {
+ writeConcern: {w: '1 and 2', wtimeout: 0}
+ };
assert.writeOK(primary.getDB('foo').bar.insert(doc));
assert.writeOK(primary.getDB('foo').bar.insert(doc, options));
jsTestLog('Write concern "2 dc and 3 server"');
primary = ensurePrimary(2, 5);
- options = {writeConcern: {w: '2 dc and 3 server', wtimeout: timeout}};
+ options = {
+ writeConcern: {w: '2 dc and 3 server', wtimeout: timeout}
+ };
assert.writeOK(primary.getDB('foo').bar.insert(doc));
assert.writeOK(primary.getDB('foo').bar.insert(doc, options));
@@ -211,7 +231,7 @@
// Is this necessary when we partition node 2 off from the rest of the nodes?
replTest.stop(2);
jsTestLog('partitions: [0-1] [2] [1-3-4] ' +
- '(all secondaries except down node 2 can replicate from new primary node 1)');
+ '(all secondaries except down node 2 can replicate from new primary node 1)');
// Node 1 with slightly higher priority will take over.
jsTestLog('1 must become primary here because otherwise the other members will take too ' +
@@ -219,13 +239,17 @@
primary = ensurePrimary(1, 4);
jsTestLog('Write concern "3 and 4" should still work with new primary node 1 ' + primary.host);
- options = {writeConcern: {w: '3 and 4', wtimeout: timeout}};
+ options = {
+ writeConcern: {w: '3 and 4', wtimeout: timeout}
+ };
assert.writeOK(primary.getDB('foo').bar.insert(doc));
assert.writeOK(primary.getDB('foo').bar.insert(doc, options));
jsTestLog('Write concern "2" should fail because node 2 ' + replTest.nodes[2].host +
' is down.');
- options = {writeConcern: {w: '2', wtimeout: timeout}};
+ options = {
+ writeConcern: {w: '2', wtimeout: timeout}
+ };
assert.writeOK(primary.getDB('foo').bar.insert(doc));
result = assert.writeError(primary.getDB('foo').bar.insert(doc, options));
assert.neq(null, result.getWriteConcernError());
diff --git a/jstests/replsets/tags2.js b/jstests/replsets/tags2.js
index e4d4ccd50e8..ff0e81fda97 100644
--- a/jstests/replsets/tags2.js
+++ b/jstests/replsets/tags2.js
@@ -1,49 +1,53 @@
// Change a write concern mode from 2 to 3 servers
var host = getHostName();
-var replTest = new ReplSetTest({ name: "rstag", nodes: 4 });
+var replTest = new ReplSetTest({name: "rstag", nodes: 4});
var nodes = replTest.startSet();
var ports = replTest.ports;
-var conf = {_id : "rstag", version: 1, members : [
- {_id : 0, host : host+":"+ports[0], tags : {"backup" : "A"}},
- {_id : 1, host : host+":"+ports[1], tags : {"backup" : "B"}},
- {_id : 2, host : host+":"+ports[2], tags : {"backup" : "C"}},
- {_id : 3, host : host+":"+ports[3], tags : {"backup" : "D"}, arbiterOnly : true}],
- settings : {getLastErrorModes : {
- backedUp : {backup : 2} }} };
+var conf = {
+ _id: "rstag",
+ version: 1,
+ members: [
+ {_id: 0, host: host + ":" + ports[0], tags: {"backup": "A"}},
+ {_id: 1, host: host + ":" + ports[1], tags: {"backup": "B"}},
+ {_id: 2, host: host + ":" + ports[2], tags: {"backup": "C"}},
+ {_id: 3, host: host + ":" + ports[3], tags: {"backup": "D"}, arbiterOnly: true}
+ ],
+ settings: {getLastErrorModes: {backedUp: {backup: 2}}}
+};
print("arbiters can't have tags");
-var result = nodes[0].getDB("admin").runCommand({replSetInitiate : conf});
+var result = nodes[0].getDB("admin").runCommand({replSetInitiate: conf});
printjson(result);
assert.eq(result.ok, 0);
conf.members.pop();
replTest.stop(3);
replTest.remove(3);
-replTest.initiate( conf );
+replTest.initiate(conf);
replTest.awaitReplication();
master = replTest.getPrimary();
var db = master.getDB("test");
-assert.writeOK(db.foo.insert({ x: 1 }, { writeConcern: { w: 'backedUp', wtimeout: 20000 }}));
+assert.writeOK(db.foo.insert({x: 1}, {writeConcern: {w: 'backedUp', wtimeout: 20000}}));
conf.version = 2;
conf.settings.getLastErrorModes.backedUp.backup = 3;
-master.getDB("admin").runCommand( {replSetReconfig: conf} );
+master.getDB("admin").runCommand({replSetReconfig: conf});
replTest.awaitReplication();
master = replTest.getPrimary();
var db = master.getDB("test");
-assert.writeOK(db.foo.insert({ x: 2 }, { writeConcern: { w: 'backedUp', wtimeout: 20000 }}));
+assert.writeOK(db.foo.insert({x: 2}, {writeConcern: {w: 'backedUp', wtimeout: 20000}}));
conf.version = 3;
conf.members[0].priorty = 3;
conf.members[2].priorty = 0;
-master.getDB("admin").runCommand( {replSetReconfig: conf} );
+master.getDB("admin").runCommand({replSetReconfig: conf});
master = replTest.getPrimary();
var db = master.getDB("test");
-assert.writeOK(db.foo.insert({ x: 3 }, { writeConcern: { w: 'backedUp', wtimeout: 20000 }}));
+assert.writeOK(db.foo.insert({x: 3}, {writeConcern: {w: 'backedUp', wtimeout: 20000}}));
replTest.stopSet();
diff --git a/jstests/replsets/tags_with_reconfig.js b/jstests/replsets/tags_with_reconfig.js
index 8f1e01ce176..512a55fe771 100644
--- a/jstests/replsets/tags_with_reconfig.js
+++ b/jstests/replsets/tags_with_reconfig.js
@@ -3,35 +3,40 @@
// time. This would cause us to update stale items in the cache when secondaries
// reported their progress to a primary.
-
// Start a replica set with 3 nodes
var host = getHostName();
-var replTest = new ReplSetTest({ name: "tags_with_reconfig", nodes: 3 });
+var replTest = new ReplSetTest({name: "tags_with_reconfig", nodes: 3});
var nodes = replTest.startSet();
var ports = replTest.ports;
// Set tags and getLastErrorModes
-var conf = {_id : "tags_with_reconfig", version: 1, members : [
- {_id : 0, host : host+":"+ports[0], tags : {"dc" : "bbb"}},
- {_id : 1, host : host+":"+ports[1], tags : {"dc" : "bbb"}},
- {_id : 2, host : host+":"+ports[2], tags : {"dc" : "ccc"}}],
- settings : {getLastErrorModes : {
- anydc : {dc : 1},
- alldc : {dc : 2}, }} };
-
-
-replTest.initiate( conf );
+var conf = {
+ _id: "tags_with_reconfig",
+ version: 1,
+ members: [
+ {_id: 0, host: host + ":" + ports[0], tags: {"dc": "bbb"}},
+ {_id: 1, host: host + ":" + ports[1], tags: {"dc": "bbb"}},
+ {_id: 2, host: host + ":" + ports[2], tags: {"dc": "ccc"}}
+ ],
+ settings: {
+ getLastErrorModes: {
+ anydc: {dc: 1},
+ alldc: {dc: 2},
+ }
+ }
+};
+
+replTest.initiate(conf);
replTest.awaitReplication();
-
master = replTest.getPrimary();
var db = master.getDB("test");
// Insert a document with write concern : anydc
-assert.writeOK(db.foo.insert({ x: 1 }, { writeConcern: { w: 'anydc', wtimeout: 20000 }}));
+assert.writeOK(db.foo.insert({x: 1}, {writeConcern: {w: 'anydc', wtimeout: 20000}}));
// Insert a document with write concern : alldc
-assert.writeOK(db.foo.insert({ x: 2 }, { writeConcern: { w: 'alldc', wtimeout: 20000 }}));
+assert.writeOK(db.foo.insert({x: 2}, {writeConcern: {w: 'alldc', wtimeout: 20000}}));
// Add a new tag to the replica set
var config = master.getDB("local").system.replset.findOne();
@@ -41,9 +46,8 @@ config.version++;
config.members[0].tags.newtag = "newtag";
try {
- master.getDB("admin").runCommand({replSetReconfig : config});
-}
-catch(e) {
+ master.getDB("admin").runCommand({replSetReconfig: config});
+} catch (e) {
print(e);
}
@@ -53,14 +57,13 @@ replTest.awaitReplication();
var config = master.getDB("local").system.replset.findOne();
printjson(config);
-
master = replTest.getPrimary();
var db = master.getDB("test");
// Insert a document with write concern : anydc
-assert.writeOK(db.foo.insert({ x: 3 }, { writeConcern: { w: 'anydc', wtimeout: 20000 }}));
+assert.writeOK(db.foo.insert({x: 3}, {writeConcern: {w: 'anydc', wtimeout: 20000}}));
// Insert a document with write concern : alldc
-assert.writeOK(db.foo.insert({ x: 4 }, { writeConcern: { w: 'alldc', wtimeout: 20000 }}));
+assert.writeOK(db.foo.insert({x: 4}, {writeConcern: {w: 'alldc', wtimeout: 20000}}));
replTest.stopSet();
diff --git a/jstests/replsets/temp_namespace.js b/jstests/replsets/temp_namespace.js
index 07e5291da0e..4efd3e0b7b3 100644
--- a/jstests/replsets/temp_namespace.js
+++ b/jstests/replsets/temp_namespace.js
@@ -1,17 +1,20 @@
// SERVER-10927
// This is to make sure that temp collections get cleaned up on promotion to primary
-var replTest = new ReplSetTest({ name: 'testSet', nodes: 3 });
+var replTest = new ReplSetTest({name: 'testSet', nodes: 3});
var nodes = replTest.nodeList();
printjson(nodes);
// We need an arbiter to ensure that the primary doesn't step down when we restart the secondary
replTest.startSet();
-replTest.initiate({"_id" : "testSet",
- "members" : [
- {"_id" : 0, "host" : nodes[0]},
- {"_id" : 1, "host" : nodes[1]},
- {"_id" : 2, "host" : nodes[2], "arbiterOnly" : true}]});
+replTest.initiate({
+ "_id": "testSet",
+ "members": [
+ {"_id": 0, "host": nodes[0]},
+ {"_id": 1, "host": nodes[1]},
+ {"_id": 2, "host": nodes[2], "arbiterOnly": true}
+ ]
+});
var master = replTest.getPrimary();
var second = replTest.getSecondary();
@@ -24,63 +27,62 @@ var secondDB = second.getDB('test');
// set up collections
masterDB.runCommand({create: 'temp1', temp: true});
-masterDB.temp1.ensureIndex({x:1});
+masterDB.temp1.ensureIndex({x: 1});
masterDB.runCommand({create: 'temp2', temp: 1});
-masterDB.temp2.ensureIndex({x:1});
+masterDB.temp2.ensureIndex({x: 1});
masterDB.runCommand({create: 'keep1', temp: false});
masterDB.runCommand({create: 'keep2', temp: 0});
masterDB.runCommand({create: 'keep3'});
-assert.writeOK(masterDB.keep4.insert({}, { writeConcern: { w: 2 }}));
+assert.writeOK(masterDB.keep4.insert({}, {writeConcern: {w: 2}}));
// make sure they exist on primary and secondary
-function countCollection( mydb, nameFilter ) {
- var result = mydb.runCommand( "listCollections", { filter : { name : nameFilter } } );
- assert.commandWorked( result );
- return new DBCommandCursor( mydb.getMongo(), result ).itcount();
+function countCollection(mydb, nameFilter) {
+ var result = mydb.runCommand("listCollections", {filter: {name: nameFilter}});
+ assert.commandWorked(result);
+ return new DBCommandCursor(mydb.getMongo(), result).itcount();
}
-function countIndexesFor( mydb, nameFilter ) {
- var result = mydb.runCommand( "listCollections", { filter : { name : nameFilter } } );
- assert.commandWorked( result );
- var arr = new DBCommandCursor( mydb.getMongo(), result ).toArray();
+function countIndexesFor(mydb, nameFilter) {
+ var result = mydb.runCommand("listCollections", {filter: {name: nameFilter}});
+ assert.commandWorked(result);
+ var arr = new DBCommandCursor(mydb.getMongo(), result).toArray();
var total = 0;
- for ( var i = 0; i < arr.length; i++ ) {
+ for (var i = 0; i < arr.length; i++) {
var coll = arr[i];
- total += mydb.getCollection( coll.name ).getIndexes().length;
+ total += mydb.getCollection(coll.name).getIndexes().length;
}
return total;
}
-assert.eq(countCollection(masterDB,/temp\d$/), 2); // collections
-assert.eq(countIndexesFor(masterDB,/temp\d$/), 4); // indexes (2 _id + 2 x)
-assert.eq(countCollection(masterDB,/keep\d$/), 4);
+assert.eq(countCollection(masterDB, /temp\d$/), 2); // collections
+assert.eq(countIndexesFor(masterDB, /temp\d$/), 4); // indexes (2 _id + 2 x)
+assert.eq(countCollection(masterDB, /keep\d$/), 4);
-assert.eq(countCollection(secondDB,/temp\d$/), 2); // collections
-assert.eq(countIndexesFor(secondDB,/temp\d$/), 4); // indexes (2 _id + 2 x)
-assert.eq(countCollection(secondDB,/keep\d$/), 4);
+assert.eq(countCollection(secondDB, /temp\d$/), 2); // collections
+assert.eq(countIndexesFor(secondDB, /temp\d$/), 4); // indexes (2 _id + 2 x)
+assert.eq(countCollection(secondDB, /keep\d$/), 4);
// restart secondary and reconnect
-replTest.restart(secondId, {}, /*wait=*/true);
+replTest.restart(secondId, {}, /*wait=*/true);
// wait for the secondary to achieve secondary status
-assert.soon(function () {
- try {
- res = second.getDB("admin").runCommand({ replSetGetStatus: 1 });
- return res.myState == 2;
- }
- catch (e) {
- return false;
- }
- }, "took more than a minute for the secondary to become secondary again", 60*1000);
+assert.soon(function() {
+ try {
+ res = second.getDB("admin").runCommand({replSetGetStatus: 1});
+ return res.myState == 2;
+ } catch (e) {
+ return false;
+ }
+}, "took more than a minute for the secondary to become secondary again", 60 * 1000);
// make sure restarting secondary didn't drop collections
-assert.eq(countCollection(secondDB,/temp\d$/), 2); // collections
-assert.eq(countIndexesFor(secondDB,/temp\d$/), 4); // indexes (2 _id + 2 x)
-assert.eq(countCollection(secondDB,/keep\d$/), 4);
+assert.eq(countCollection(secondDB, /temp\d$/), 2); // collections
+assert.eq(countIndexesFor(secondDB, /temp\d$/), 4); // indexes (2 _id + 2 x)
+assert.eq(countCollection(secondDB, /keep\d$/), 4);
// step down primary and make sure former secondary (now primary) drops collections
try {
- master.adminCommand({replSetStepDown: 50, force : true});
+ master.adminCommand({replSetStepDown: 50, force: true});
} catch (e) {
// ignoring socket errors since they sometimes, but not always, fire after running that command.
}
@@ -89,16 +91,16 @@ assert.soon(function() {
printjson(secondDB.adminCommand("replSetGetStatus"));
printjson(secondDB.isMaster());
return secondDB.isMaster().ismaster;
-}, '', 75*1000); // must wait for secondary to be willing to promote self
+}, '', 75 * 1000); // must wait for secondary to be willing to promote self
-assert.eq(countCollection(secondDB,/temp\d$/), 0); // collections
-assert.eq(countIndexesFor(secondDB,/temp\d$/), 0); // indexes (2 _id + 2 x)
-assert.eq(countCollection(secondDB,/keep\d$/), 4);
+assert.eq(countCollection(secondDB, /temp\d$/), 0); // collections
+assert.eq(countIndexesFor(secondDB, /temp\d$/), 0); // indexes (2 _id + 2 x)
+assert.eq(countCollection(secondDB, /keep\d$/), 4);
// check that former primary dropped collections
replTest.awaitReplication();
-assert.eq(countCollection(masterDB,/temp\d$/), 0); // collections
-assert.eq(countIndexesFor(masterDB,/temp\d$/), 0); // indexes (2 _id + 2 x)
-assert.eq(countCollection(masterDB,/keep\d$/), 4);
+assert.eq(countCollection(masterDB, /temp\d$/), 0); // collections
+assert.eq(countIndexesFor(masterDB, /temp\d$/), 0); // indexes (2 _id + 2 x)
+assert.eq(countCollection(masterDB, /keep\d$/), 4);
replTest.stopSet();
diff --git a/jstests/replsets/test_command.js b/jstests/replsets/test_command.js
index d8d5eb42984..abaf10bc56d 100644
--- a/jstests/replsets/test_command.js
+++ b/jstests/replsets/test_command.js
@@ -2,7 +2,7 @@
// waitForMemberState - waits for node's state to become 'expectedState'.
// waitForDrainFinish - waits for primary to finish draining its applier queue.
-(function () {
+(function() {
'use strict';
var name = 'test_command';
var replSet = new ReplSetTest({name: name, nodes: 3});
@@ -19,14 +19,12 @@
// Stabilize replica set with node 0 as primary.
- assert.commandWorked(
- replSet.nodes[0].adminCommand({
- replSetTest: 1,
- waitForMemberState: ReplSetTest.State.PRIMARY,
- timeoutMillis: 60 * 1000,
- }),
- 'node 0' + replSet.nodes[0].host + ' failed to become primary'
- );
+ assert.commandWorked(replSet.nodes[0].adminCommand({
+ replSetTest: 1,
+ waitForMemberState: ReplSetTest.State.PRIMARY,
+ timeoutMillis: 60 * 1000,
+ }),
+ 'node 0' + replSet.nodes[0].host + ' failed to become primary');
// We need the try/catch to handle that the node may have hung up the connection due
// to a state change.
@@ -42,7 +40,8 @@
replSetTest: 1,
waitForMemberState: ReplSetTest.State.SECONDARY,
timeoutMillis: 60 * 1000,
- }), 'node 1' + replSet.nodes[1].host + ' failed to become secondary');
+ }),
+ 'node 1' + replSet.nodes[1].host + ' failed to become secondary');
}
var primary = replSet.getPrimary();
@@ -50,20 +49,16 @@
// Check replication mode.
- assert.commandFailedWithCode(
- primary.getDB(name).runCommand({
- replSetTest: 1,
- }),
- ErrorCodes.Unauthorized,
- 'replSetTest should fail against non-admin database'
- );
+ assert.commandFailedWithCode(primary.getDB(name).runCommand({
+ replSetTest: 1,
+ }),
+ ErrorCodes.Unauthorized,
+ 'replSetTest should fail against non-admin database');
- assert.commandWorked(
- primary.adminCommand({
- replSetTest: 1,
- }),
- 'failed to check replication mode'
- );
+ assert.commandWorked(primary.adminCommand({
+ replSetTest: 1,
+ }),
+ 'failed to check replication mode');
// waitForMemberState tests.
@@ -74,8 +69,7 @@
timeoutMillis: 1000,
}),
ErrorCodes.TypeMismatch,
- 'replSetTest waitForMemberState should fail on non-numerical state'
- );
+ 'replSetTest waitForMemberState should fail on non-numerical state');
assert.commandFailedWithCode(
primary.adminCommand({
@@ -84,28 +78,23 @@
timeoutMillis: "what timeout",
}),
ErrorCodes.TypeMismatch,
- 'replSetTest waitForMemberState should fail on non-numerical timeout'
- );
-
- assert.commandFailedWithCode(
- primary.adminCommand({
- replSetTest: 1,
- waitForMemberState: 9999,
- timeoutMillis: 1000,
- }),
- ErrorCodes.BadValue,
- 'replSetTest waitForMemberState should fail on invalid state'
- );
-
- assert.commandFailedWithCode(
- primary.adminCommand({
- replSetTest: 1,
- waitForMemberState: ReplSetTest.State.PRIMARY,
- timeoutMillis: -1000,
- }),
- ErrorCodes.BadValue,
- 'replSetTest waitForMemberState should fail on negative timeout'
- );
+ 'replSetTest waitForMemberState should fail on non-numerical timeout');
+
+ assert.commandFailedWithCode(primary.adminCommand({
+ replSetTest: 1,
+ waitForMemberState: 9999,
+ timeoutMillis: 1000,
+ }),
+ ErrorCodes.BadValue,
+ 'replSetTest waitForMemberState should fail on invalid state');
+
+ assert.commandFailedWithCode(primary.adminCommand({
+ replSetTest: 1,
+ waitForMemberState: ReplSetTest.State.PRIMARY,
+ timeoutMillis: -1000,
+ }),
+ ErrorCodes.BadValue,
+ 'replSetTest waitForMemberState should fail on negative timeout');
assert.commandFailedWithCode(
primary.adminCommand({
@@ -114,9 +103,7 @@
timeoutMillis: 1000,
}),
ErrorCodes.ExceededTimeLimit,
- 'replSetTest waitForMemberState(SECONDARY) should time out on node 0 ' +
- primary.host
- );
+ 'replSetTest waitForMemberState(SECONDARY) should time out on node 0 ' + primary.host);
assert.commandWorked(
secondary.adminCommand({
@@ -124,9 +111,7 @@
waitForMemberState: ReplSetTest.State.SECONDARY,
timeoutMillis: 1000,
}),
- 'replSetTest waitForMemberState(SECONDARY) failed on node 1 ' +
- secondary.host
- );
+ 'replSetTest waitForMemberState(SECONDARY) failed on node 1 ' + secondary.host);
// waitForDrainFinish tests.
@@ -136,31 +121,24 @@
waitForDrainFinish: 'what state',
}),
ErrorCodes.TypeMismatch,
- 'replSetTest waitForDrainFinish should fail on non-numerical timeout'
- );
-
- assert.commandFailedWithCode(
- primary.adminCommand({
- replSetTest: 1,
- waitForDrainFinish: -1000,
- }),
- ErrorCodes.BadValue,
- 'replSetTest waitForDrainFinish should fail on negative timeout'
- );
-
- assert.commandWorked(
- primary.adminCommand({
- replSetTest: 1,
- waitForDrainFinish: 1000,
- }),
- 'node 0' + primary.host + ' failed to wait for drain to finish'
- );
-
- assert.commandWorked(
- secondary.adminCommand({
- replSetTest: 1,
- waitForDrainFinish: 0,
- }),
- 'node 1' + primary.host + ' failed to wait for drain to finish'
- );
- })();
+ 'replSetTest waitForDrainFinish should fail on non-numerical timeout');
+
+ assert.commandFailedWithCode(primary.adminCommand({
+ replSetTest: 1,
+ waitForDrainFinish: -1000,
+ }),
+ ErrorCodes.BadValue,
+ 'replSetTest waitForDrainFinish should fail on negative timeout');
+
+ assert.commandWorked(primary.adminCommand({
+ replSetTest: 1,
+ waitForDrainFinish: 1000,
+ }),
+ 'node 0' + primary.host + ' failed to wait for drain to finish');
+
+ assert.commandWorked(secondary.adminCommand({
+ replSetTest: 1,
+ waitForDrainFinish: 0,
+ }),
+ 'node 1' + primary.host + ' failed to wait for drain to finish');
+})();
diff --git a/jstests/replsets/toostale.js b/jstests/replsets/toostale.js
index 32c75e953b8..4f9e10ea94e 100644
--- a/jstests/replsets/toostale.js
+++ b/jstests/replsets/toostale.js
@@ -19,13 +19,12 @@
* 8: check s2.state == 3
*/
-
var w = 0;
var wait = function(f) {
w++;
var n = 0;
while (!f()) {
- if( n % 4 == 0 )
+ if (n % 4 == 0)
print("toostale.js waiting " + w);
if (++n == 4) {
print("" + f);
@@ -36,43 +35,45 @@ var wait = function(f) {
};
var reconnect = function(a) {
- wait(function() {
- try {
- a.bar.stats();
- return true;
- } catch(e) {
- print(e);
- return false;
- }
+ wait(function() {
+ try {
+ a.bar.stats();
+ return true;
+ } catch (e) {
+ print(e);
+ return false;
+ }
});
};
-
var name = "toostale";
-var replTest = new ReplSetTest({ name: name, nodes: 3, oplogSize: 5 });
+var replTest = new ReplSetTest({name: name, nodes: 3, oplogSize: 5});
var host = getHostName();
var nodes = replTest.startSet();
-replTest.initiate({_id : name, members : [
- {_id : 0, host : host+":"+replTest.ports[0], priority: 2},
- {_id : 1, host : host+":"+replTest.ports[1], arbiterOnly : true},
- {_id : 2, host : host+":"+replTest.ports[2], priority: 0}
-]});
+replTest.initiate({
+ _id: name,
+ members: [
+ {_id: 0, host: host + ":" + replTest.ports[0], priority: 2},
+ {_id: 1, host: host + ":" + replTest.ports[1], arbiterOnly: true},
+ {_id: 2, host: host + ":" + replTest.ports[2], priority: 0}
+ ]
+});
var master = replTest.getPrimary();
var mdb = master.getDB("foo");
-
print("1: initial insert");
mdb.foo.save({a: 1000});
-
print("2: initial sync");
replTest.awaitReplication();
print("3: stop s2");
replTest.stop(2);
print("waiting until the master knows the slave is blind");
-assert.soon(function() { return master.getDB("admin").runCommand({replSetGetStatus:1}).members[2].health == 0; });
+assert.soon(function() {
+ return master.getDB("admin").runCommand({replSetGetStatus: 1}).members[2].health == 0;
+});
print("okay");
print("4: overflow oplog");
@@ -80,49 +81,46 @@ reconnect(master.getDB("local"));
var count = master.getDB("local").oplog.rs.count();
var prevCount = -1;
while (count > prevCount) {
- print("inserting 1000");
- var bulk = mdb.bar.initializeUnorderedBulkOp();
- for (var i = 0; i < 1000; i++) {
- bulk.insert({ x: i, date: new Date(), str: "safkaldmfaksndfkjansfdjanfjkafa" });
- }
- assert.writeOK(bulk.execute());
+ print("inserting 1000");
+ var bulk = mdb.bar.initializeUnorderedBulkOp();
+ for (var i = 0; i < 1000; i++) {
+ bulk.insert({x: i, date: new Date(), str: "safkaldmfaksndfkjansfdjanfjkafa"});
+ }
+ assert.writeOK(bulk.execute());
- prevCount = count;
- replTest.awaitReplication();
- count = master.getDB("local").oplog.rs.count();
- print("count: "+count+" prev: "+prevCount);
+ prevCount = count;
+ replTest.awaitReplication();
+ count = master.getDB("local").oplog.rs.count();
+ print("count: " + count + " prev: " + prevCount);
}
-
print("5: restart s2");
replTest.restart(2);
print("waiting until the master knows the slave is not blind");
-assert.soon(function() { return master.getDB("admin").runCommand({replSetGetStatus:1}).members[2].health != 0; });
+assert.soon(function() {
+ return master.getDB("admin").runCommand({replSetGetStatus: 1}).members[2].health != 0;
+});
print("okay");
-
print("6: check s2.state == 3");
var goStale = function() {
- wait(function() {
- var status = master.getDB("admin").runCommand({replSetGetStatus:1});
- printjson(status);
- return status.members[2].state == 3;
+ wait(function() {
+ var status = master.getDB("admin").runCommand({replSetGetStatus: 1});
+ printjson(status);
+ return status.members[2].state == 3;
});
};
goStale();
-
print("7: restart s2");
replTest.stop(2);
replTest.restart(2);
-
print("8: check s2.state == 3");
assert.soon(function() {
- var status = master.getDB("admin").runCommand({replSetGetStatus:1});
+ var status = master.getDB("admin").runCommand({replSetGetStatus: 1});
printjson(status);
return status.members && status.members[2].state == 3;
});
replTest.stop(0);
-
diff --git a/jstests/replsets/two_initsync.js b/jstests/replsets/two_initsync.js
index bdb0c96bc5a..1f2b526d61e 100755..100644
--- a/jstests/replsets/two_initsync.js
+++ b/jstests/replsets/two_initsync.js
@@ -1,7 +1,7 @@
// test initial sync failing
// try running as :
-//
+//
// mongo --nodb two_initsync.js | tee out | grep -v ^m31
//
@@ -15,10 +15,10 @@ function pause(s) {
}
}
-function deb(obj) {
- if( debugging ) {
+function deb(obj) {
+ if (debugging) {
print("\n\n\n" + obj + "\n\n");
- }
+ }
}
w = 0;
@@ -27,7 +27,7 @@ function wait(f) {
w++;
var n = 0;
while (!f()) {
- if( n % 4 == 0 )
+ if (n % 4 == 0)
print("twoinitsync waiting " + w);
if (++n == 4) {
print("" + f);
@@ -37,26 +37,29 @@ function wait(f) {
}
}
-doTest = function (signal) {
- var replTest = new ReplSetTest({ name: 'testSet', nodes: 0 });
+doTest = function(signal) {
+ var replTest = new ReplSetTest({name: 'testSet', nodes: 0});
var first = replTest.add();
// Initiate replica set
- assert.soon(function () {
- var res = first.getDB("admin").runCommand({ replSetInitiate: null });
+ assert.soon(function() {
+ var res = first.getDB("admin").runCommand({replSetInitiate: null});
return res['ok'] == 1;
});
// Get status
- assert.soon(function () {
- var result = first.getDB("admin").runCommand({ replSetGetStatus: true });
+ assert.soon(function() {
+ var result = first.getDB("admin").runCommand({replSetGetStatus: true});
return result['ok'] == 1;
});
var a = replTest.getPrimary().getDB("two");
for (var i = 0; i < 20000; i++)
- a.coll.insert({ i: i, s: "a b" });
+ a.coll.insert({
+ i: i,
+ s: "a b"
+ });
// Start a second node
var second = replTest.add();
@@ -68,11 +71,13 @@ doTest = function (signal) {
var b = second.getDB("admin");
// attempt to interfere with the initial sync
- b._adminCommand({ replSetTest: 1, forceInitialSyncFailure: 1 });
+ b._adminCommand({replSetTest: 1, forceInitialSyncFailure: 1});
// wait(function () { return a._adminCommand("replSetGetStatus").members.length == 2; });
- wait(function () { return b.isMaster().secondary || b.isMaster().ismaster; });
+ wait(function() {
+ return b.isMaster().secondary || b.isMaster().ismaster;
+ });
print("b.isMaster:");
printjson(b.isMaster());
@@ -82,13 +87,16 @@ doTest = function (signal) {
print("b.isMaster:");
printjson(b.isMaster());
- wait(function () { var c = b.getSisterDB("two").coll.count(); print(c); return c == 20000; });
+ wait(function() {
+ var c = b.getSisterDB("two").coll.count();
+ print(c);
+ return c == 20000;
+ });
print("two_initsync.js SUCCESS");
replTest.stopSet(signal);
};
-
print("two_initsync.js");
-doTest( 15 );
+doTest(15);
diff --git a/jstests/replsets/two_nodes_priority_take_over.js b/jstests/replsets/two_nodes_priority_take_over.js
index 403c9ba8464..f6e62fe681d 100644
--- a/jstests/replsets/two_nodes_priority_take_over.js
+++ b/jstests/replsets/two_nodes_priority_take_over.js
@@ -5,60 +5,57 @@
// TODO: We have to disable this test until SERVER-21456 is fixed, due to the
// race of tagging and closing connections on stepdown.
if (false) {
-
-load("jstests/replsets/rslib.js");
-
-(function() {
-
-"use strict";
-var name = "two_nodes_priority_take_over";
-var rst = new ReplSetTest({name: name, nodes: 2});
-
-rst.startSet();
-var conf = rst.getReplSetConfig();
-conf.members[0].priority = 2;
-conf.members[1].priority = 1;
-rst.initiate(conf);
-rst.awaitSecondaryNodes();
-// Set verbosity for replication on all nodes.
-var verbosity = {
- "setParameter" : 1,
- "logComponentVerbosity" : {
- "verbosity": 4,
- "storage" : { "verbosity" : 1 }
- }
-};
-rst.nodes.forEach(function (node) {node.adminCommand(verbosity);});
-
-// The first node will be the primary at the beginning.
-rst.waitForState(rst.nodes[0], ReplSetTest.State.PRIMARY, 60 * 1000);
-
-// Get the term when replset is stable.
-var res = rst.getPrimary().adminCommand("replSetGetStatus");
-assert.commandWorked(res);
-var stableTerm = res.term;
-
-// Reconfig to change priorities. The current primary remains the same until
-// the higher priority node takes over.
-var conf = rst.getReplSetConfig();
-conf.members[0].priority = 1;
-conf.members[1].priority = 2;
-conf.version = 2;
-reconfig(rst, conf);
-
-// The second node will take over the primary.
-rst.waitForState(rst.nodes[1], ReplSetTest.State.PRIMARY, 60 * 1000);
-
-res = rst.getPrimary().adminCommand("replSetGetStatus");
-assert.commandWorked(res);
-var newTerm = res.term;
-
-// Priority takeover should happen smoothly without failed election as there is
-// no current candidate. If vote requests failed (wrongly) for some reason,
-// nodes have to start new elections, which increase the term unnecessarily.
-if (rst.getReplSetConfigFromNode().protocolVersion == 1) {
- assert.eq(newTerm, stableTerm + 1);
-}
-})();
-
+ load("jstests/replsets/rslib.js");
+
+ (function() {
+
+ "use strict";
+ var name = "two_nodes_priority_take_over";
+ var rst = new ReplSetTest({name: name, nodes: 2});
+
+ rst.startSet();
+ var conf = rst.getReplSetConfig();
+ conf.members[0].priority = 2;
+ conf.members[1].priority = 1;
+ rst.initiate(conf);
+ rst.awaitSecondaryNodes();
+ // Set verbosity for replication on all nodes.
+ var verbosity = {
+ "setParameter": 1,
+ "logComponentVerbosity": {"verbosity": 4, "storage": {"verbosity": 1}}
+ };
+ rst.nodes.forEach(function(node) {
+ node.adminCommand(verbosity);
+ });
+
+ // The first node will be the primary at the beginning.
+ rst.waitForState(rst.nodes[0], ReplSetTest.State.PRIMARY, 60 * 1000);
+
+ // Get the term when replset is stable.
+ var res = rst.getPrimary().adminCommand("replSetGetStatus");
+ assert.commandWorked(res);
+ var stableTerm = res.term;
+
+ // Reconfig to change priorities. The current primary remains the same until
+ // the higher priority node takes over.
+ var conf = rst.getReplSetConfig();
+ conf.members[0].priority = 1;
+ conf.members[1].priority = 2;
+ conf.version = 2;
+ reconfig(rst, conf);
+
+ // The second node will take over the primary.
+ rst.waitForState(rst.nodes[1], ReplSetTest.State.PRIMARY, 60 * 1000);
+
+ res = rst.getPrimary().adminCommand("replSetGetStatus");
+ assert.commandWorked(res);
+ var newTerm = res.term;
+
+ // Priority takeover should happen smoothly without failed election as there is
+ // no current candidate. If vote requests failed (wrongly) for some reason,
+ // nodes have to start new elections, which increase the term unnecessarily.
+ if (rst.getReplSetConfigFromNode().protocolVersion == 1) {
+ assert.eq(newTerm, stableTerm + 1);
+ }
+ })();
}
diff --git a/jstests/replsets/zero_vote_arbiter.js b/jstests/replsets/zero_vote_arbiter.js
index cba292d6fb0..bc7552ef47b 100644
--- a/jstests/replsets/zero_vote_arbiter.js
+++ b/jstests/replsets/zero_vote_arbiter.js
@@ -17,12 +17,7 @@ var InvalidReplicaSetConfig = 93;
var arbiterConn = replTest.add();
var admin = replTest.getPrimary().getDB("admin");
var conf = admin.runCommand({replSetGetConfig: 1}).config;
- conf.members.push({
- _id: 3,
- host: arbiterConn.host,
- arbiterOnly: true,
- votes: 0
- });
+ conf.members.push({_id: 3, host: arbiterConn.host, arbiterOnly: true, votes: 0});
conf.version++;
jsTestLog('Add arbiter with zero votes:');
@@ -60,7 +55,6 @@ var InvalidReplicaSetConfig = 93;
replTest.stopSet();
})();
-
/*
* replSetInitiate with a 0-vote arbiter.
*/
@@ -96,12 +90,7 @@ var InvalidReplicaSetConfig = 93;
var arbiterConn = replTest.add();
var admin = replTest.getPrimary().getDB("admin");
var conf = admin.runCommand({replSetGetConfig: 1}).config;
- conf.members.push({
- _id: 7,
- host: arbiterConn.host,
- arbiterOnly: true,
- votes: 0
- });
+ conf.members.push({_id: 7, host: arbiterConn.host, arbiterOnly: true, votes: 0});
conf.version++;
jsTestLog('Add arbiter with zero votes:');
diff --git a/jstests/sharding/SERVER-7379.js b/jstests/sharding/SERVER-7379.js
index c637f10c6b4..bdf311cbf6e 100644
--- a/jstests/sharding/SERVER-7379.js
+++ b/jstests/sharding/SERVER-7379.js
@@ -1,46 +1,48 @@
-var st = new ShardingTest({ shards: 2 });
+var st = new ShardingTest({shards: 2});
-st.adminCommand({ enablesharding: "test" });
+st.adminCommand({enablesharding: "test"});
st.ensurePrimaryShard('test', 'shard0001');
-st.adminCommand({ shardcollection: "test.offerChange", key: { "categoryId": 1, "store": 1, "_id": 1 } });
+st.adminCommand(
+ {shardcollection: "test.offerChange", key: {"categoryId": 1, "store": 1, "_id": 1}});
var db = st.s.getDB('test');
var offerChange = db.getCollection('offerChange');
-var testDoc = { "_id": 123, "categoryId": 9881, "store": "NEW" };
+var testDoc = {
+ "_id": 123,
+ "categoryId": 9881,
+ "store": "NEW"
+};
offerChange.remove({}, false);
offerChange.insert(testDoc);
-assert.writeError(offerChange.update({ _id: 123 }, { $set: { store: "NEWEST" } }, true, false));
+assert.writeError(offerChange.update({_id: 123}, {$set: {store: "NEWEST"}}, true, false));
var doc = offerChange.findOne();
assert(friendlyEqual(doc, testDoc), 'doc changed: ' + tojson(doc));
offerChange.remove({}, false);
offerChange.insert(testDoc);
-assert.writeError(offerChange.update({ _id: 123 },
- { _id: 123, categoryId: 9881, store: "NEWEST" },
- true, false));
+assert.writeError(
+ offerChange.update({_id: 123}, {_id: 123, categoryId: 9881, store: "NEWEST"}, true, false));
doc = offerChange.findOne();
assert(friendlyEqual(doc, testDoc), 'doc changed: ' + tojson(doc));
offerChange.remove({}, false);
offerChange.insert(testDoc);
-assert.writeError(offerChange.save({ "_id": 123, "categoryId": 9881, "store": "NEWEST" }));
+assert.writeError(offerChange.save({"_id": 123, "categoryId": 9881, "store": "NEWEST"}));
doc = offerChange.findOne();
assert(friendlyEqual(doc, testDoc), 'doc changed: ' + tojson(doc));
offerChange.remove({}, false);
offerChange.insert(testDoc);
-assert.writeError(offerChange.update({ _id: 123, store: "NEW" },
- { _id: 123, categoryId: 9881, store: "NEWEST" },
- true, false));
+assert.writeError(offerChange.update(
+ {_id: 123, store: "NEW"}, {_id: 123, categoryId: 9881, store: "NEWEST"}, true, false));
doc = offerChange.findOne();
assert(friendlyEqual(doc, testDoc), 'doc changed: ' + tojson(doc));
offerChange.remove({}, false);
offerChange.insert(testDoc);
-assert.writeError(offerChange.update({ _id: 123, categoryId: 9881 },
- { _id: 123, categoryId: 9881, store: "NEWEST" },
- true, false));
+assert.writeError(offerChange.update(
+ {_id: 123, categoryId: 9881}, {_id: 123, categoryId: 9881, store: "NEWEST"}, true, false));
doc = offerChange.findOne();
assert(friendlyEqual(doc, testDoc), 'doc changed: ' + tojson(doc));
diff --git a/jstests/sharding/add_invalid_shard.js b/jstests/sharding/add_invalid_shard.js
index 7dfa6d0f819..357cf252356 100644
--- a/jstests/sharding/add_invalid_shard.js
+++ b/jstests/sharding/add_invalid_shard.js
@@ -3,47 +3,47 @@
*/
(function() {
-"use strict";
+ "use strict";
-var st = new ShardingTest({ shards: 1 });
+ var st = new ShardingTest({shards: 1});
-var configDB = st.s.getDB('config');
-var shardDoc = configDB.shards.findOne();
+ var configDB = st.s.getDB('config');
+ var shardDoc = configDB.shards.findOne();
-// Can't add mongos as shard.
-assert.commandFailed(st.admin.runCommand({ addshard: st.s.host }));
+ // Can't add mongos as shard.
+ assert.commandFailed(st.admin.runCommand({addshard: st.s.host}));
-// Can't add config servers as shard.
-assert.commandFailed(st.admin.runCommand({ addshard: st._configDB }));
+ // Can't add config servers as shard.
+ assert.commandFailed(st.admin.runCommand({addshard: st._configDB}));
-var replTest = new ReplSetTest({ nodes: 2 });
-replTest.startSet({ oplogSize: 10 });
-replTest.initiate();
+ var replTest = new ReplSetTest({nodes: 2});
+ replTest.startSet({oplogSize: 10});
+ replTest.initiate();
-var rsConnStr = replTest.getURL();
-// Can't add replSet as shard if the name doesn't match the replSet config.
-assert.commandFailed(st.admin.runCommand({ addshard: "prefix_" + rsConnStr }));
+ var rsConnStr = replTest.getURL();
+ // Can't add replSet as shard if the name doesn't match the replSet config.
+ assert.commandFailed(st.admin.runCommand({addshard: "prefix_" + rsConnStr}));
-assert.commandWorked(st.admin.runCommand({ addshard: rsConnStr, name: 'dummyRS' }));
+ assert.commandWorked(st.admin.runCommand({addshard: rsConnStr, name: 'dummyRS'}));
-// Cannot add the same replSet shard host twice.
-assert.commandFailed(st.admin.runCommand({ addshard: rsConnStr }));
+ // Cannot add the same replSet shard host twice.
+ assert.commandFailed(st.admin.runCommand({addshard: rsConnStr}));
-// Cannot add the same replSet shard host twice even when using a unique shard name.
-assert.commandFailed(st.admin.runCommand({ addshard: rsConnStr, name: 'dupRS' }));
+ // Cannot add the same replSet shard host twice even when using a unique shard name.
+ assert.commandFailed(st.admin.runCommand({addshard: rsConnStr, name: 'dupRS'}));
-// Cannot add the same replSet shard host twice even when using an valid variant of the replSet
-// connection string.
-var truncatedRSConnStr = rsConnStr.substring(0, rsConnStr.indexOf(','));
-assert.commandFailed(st.admin.runCommand({ addshard: truncatedRSConnStr, name: 'dupRS' }));
+ // Cannot add the same replSet shard host twice even when using an valid variant of the replSet
+ // connection string.
+ var truncatedRSConnStr = rsConnStr.substring(0, rsConnStr.indexOf(','));
+ assert.commandFailed(st.admin.runCommand({addshard: truncatedRSConnStr, name: 'dupRS'}));
-// Cannot add the same stand alone shard host twice.
-assert.commandFailed(st.admin.runCommand({ addshard: shardDoc.host }));
+ // Cannot add the same stand alone shard host twice.
+ assert.commandFailed(st.admin.runCommand({addshard: shardDoc.host}));
-// Cannot add the same stand alone shard host twice even with a unique shard name.
-assert.commandFailed(st.admin.runCommand({ addshard: shardDoc.host, name: 'dupShard' }));
+ // Cannot add the same stand alone shard host twice even with a unique shard name.
+ assert.commandFailed(st.admin.runCommand({addshard: shardDoc.host, name: 'dupShard'}));
-replTest.stopSet();
-st.stop();
+ replTest.stopSet();
+ st.stop();
})();
diff --git a/jstests/sharding/addshard1.js b/jstests/sharding/addshard1.js
index db8818b1e0f..1bea66e21c6 100644
--- a/jstests/sharding/addshard1.js
+++ b/jstests/sharding/addshard1.js
@@ -1,72 +1,80 @@
(function() {
-'use strict';
+ 'use strict';
-var s = new ShardingTest({name: "add_shard1", shards: 1, useHostname: false });
+ var s = new ShardingTest({name: "add_shard1", shards: 1, useHostname: false});
-// Create a shard and add a database; if the database is not duplicated the mongod should accept
-// it as shard
-var conn1 = MongoRunner.runMongod({});
-var db1 = conn1.getDB("testDB");
+ // Create a shard and add a database; if the database is not duplicated the mongod should accept
+ // it as shard
+ var conn1 = MongoRunner.runMongod({});
+ var db1 = conn1.getDB("testDB");
-var numObjs = 3;
-for (var i = 0; i < numObjs; i++){
- assert.writeOK(db1.foo.save({ a : i }));
-}
+ var numObjs = 3;
+ for (var i = 0; i < numObjs; i++) {
+ assert.writeOK(db1.foo.save({a: i}));
+ }
-var configDB = s.s.getDB('config');
-assert.eq(null, configDB.databases.findOne({ _id: 'testDB' }));
+ var configDB = s.s.getDB('config');
+ assert.eq(null, configDB.databases.findOne({_id: 'testDB'}));
-var newShard = "myShard";
-assert.commandWorked(s.admin.runCommand({ addshard: "localhost:" + conn1.port,
- name: newShard,
- maxSize: 1024 }));
+ var newShard = "myShard";
+ assert.commandWorked(
+ s.admin.runCommand({addshard: "localhost:" + conn1.port, name: newShard, maxSize: 1024}));
-assert.neq(null, configDB.databases.findOne({ _id: 'testDB' }));
+ assert.neq(null, configDB.databases.findOne({_id: 'testDB'}));
-var newShardDoc = configDB.shards.findOne({ _id: newShard });
-assert.eq(1024, newShardDoc.maxSize);
+ var newShardDoc = configDB.shards.findOne({_id: newShard});
+ assert.eq(1024, newShardDoc.maxSize);
-// a mongod with an existing database name should not be allowed to become a shard
-var conn2 = MongoRunner.runMongod({});
+ // a mongod with an existing database name should not be allowed to become a shard
+ var conn2 = MongoRunner.runMongod({});
-var db2 = conn2.getDB("otherDB");
-assert.writeOK(db2.foo.save({ a: 1 }));
+ var db2 = conn2.getDB("otherDB");
+ assert.writeOK(db2.foo.save({a: 1}));
-var db3 = conn2.getDB("testDB");
-assert.writeOK(db3.foo.save({ a: 1 }));
+ var db3 = conn2.getDB("testDB");
+ assert.writeOK(db3.foo.save({a: 1}));
-s.config.databases.find().forEach(printjson);
+ s.config.databases.find().forEach(printjson);
-var rejectedShard = "rejectedShard";
-assert(!s.admin.runCommand({ addshard: "localhost:" + conn2.port, name : rejectedShard }).ok,
- "accepted mongod with duplicate db");
+ var rejectedShard = "rejectedShard";
+ assert(!s.admin.runCommand({addshard: "localhost:" + conn2.port, name: rejectedShard}).ok,
+ "accepted mongod with duplicate db");
-// Check that all collection that were local to the mongod's are accessible through the mongos
-var sdb1 = s.getDB("testDB");
-assert.eq(numObjs, sdb1.foo.count(), "wrong count for database that existed before addshard");
+ // Check that all collection that were local to the mongod's are accessible through the mongos
+ var sdb1 = s.getDB("testDB");
+ assert.eq(numObjs, sdb1.foo.count(), "wrong count for database that existed before addshard");
-var sdb2 = s.getDB("otherDB");
-assert.eq(0, sdb2.foo.count(), "database of rejected shard appears through mongos");
+ var sdb2 = s.getDB("otherDB");
+ assert.eq(0, sdb2.foo.count(), "database of rejected shard appears through mongos");
-// make sure we can move a DB from the original mongod to a previoulsy existing shard
-assert.eq(s.normalize(s.config.databases.findOne({ _id : "testDB" }).primary), newShard, "DB primary is wrong");
+ // make sure we can move a DB from the original mongod to a previoulsy existing shard
+ assert.eq(s.normalize(s.config.databases.findOne({_id: "testDB"}).primary),
+ newShard,
+ "DB primary is wrong");
-var origShard = s.getNonPrimaries("testDB")[0];
-s.adminCommand({ moveprimary : "testDB", to : origShard });
-assert.eq(s.normalize(s.config.databases.findOne({ _id : "testDB" }).primary), origShard, "DB primary didn't move");
-assert.eq(numObjs, sdb1.foo.count(), "wrong count after moving datbase that existed before addshard");
+ var origShard = s.getNonPrimaries("testDB")[0];
+ s.adminCommand({moveprimary: "testDB", to: origShard});
+ assert.eq(s.normalize(s.config.databases.findOne({_id: "testDB"}).primary),
+ origShard,
+ "DB primary didn't move");
+ assert.eq(
+ numObjs, sdb1.foo.count(), "wrong count after moving datbase that existed before addshard");
-// make sure we can shard the original collections
-sdb1.foo.ensureIndex({ a : 1 }, { unique : true }); // can't shard populated collection without an index
-s.adminCommand({ enablesharding : "testDB" });
-s.adminCommand({ shardcollection : "testDB.foo", key: { a : 1 } });
-s.adminCommand({ split : "testDB.foo", middle: { a : Math.floor(numObjs/2) } });
-assert.eq(2, s.config.chunks.count(), "wrong chunk number after splitting collection that existed before");
-assert.eq(numObjs, sdb1.foo.count(), "wrong count after splitting collection that existed before");
+ // make sure we can shard the original collections
+ sdb1.foo.ensureIndex({a: 1},
+ {unique: true}); // can't shard populated collection without an index
+ s.adminCommand({enablesharding: "testDB"});
+ s.adminCommand({shardcollection: "testDB.foo", key: {a: 1}});
+ s.adminCommand({split: "testDB.foo", middle: {a: Math.floor(numObjs / 2)}});
+ assert.eq(2,
+ s.config.chunks.count(),
+ "wrong chunk number after splitting collection that existed before");
+ assert.eq(
+ numObjs, sdb1.foo.count(), "wrong count after splitting collection that existed before");
-MongoRunner.stopMongod(conn1);
-MongoRunner.stopMongod(conn2);
+ MongoRunner.stopMongod(conn1);
+ MongoRunner.stopMongod(conn2);
-s.stop();
+ s.stop();
})();
diff --git a/jstests/sharding/addshard2.js b/jstests/sharding/addshard2.js
index 2bd57cf1da4..7af23a4ab5b 100644
--- a/jstests/sharding/addshard2.js
+++ b/jstests/sharding/addshard2.js
@@ -1,129 +1,129 @@
(function() {
-// Don't start any shards, yet
-var s = new ShardingTest({name: "add_shard2",
- shards: 1,
- mongos: 1,
- other: {useHostname : true} });
-
-// Start two new instances, which will be used for shards
-var conn1 = MongoRunner.runMongod({useHostname: true});
-var conn2 = MongoRunner.runMongod({useHostname: true});
-
-var rs1 = new ReplSetTest( { "name" : "add_shard2_rs1", nodes : 3 } );
-rs1.startSet();
-rs1.initiate();
-var master1 = rs1.getPrimary();
-
-var rs2 = new ReplSetTest( { "name" : "add_shard2_rs2", nodes : 3 } );
-rs2.startSet();
-rs2.initiate();
-var master2 = rs2.getPrimary();
-
-// replica set with set name = 'config'
-var rs3 = new ReplSetTest({ 'name': 'config', nodes: 3 });
-rs3.startSet();
-rs3.initiate();
-
-// replica set with set name = 'admin'
-var rs4 = new ReplSetTest({ 'name': 'admin', nodes: 3 });
-rs4.startSet();
-rs4.initiate();
-
-// replica set with configsvr: true should *not* be allowed to be added as a shard
-var rs5 = new ReplSetTest({name: 'csrs',
- nodes: 3,
- nodeOptions: {configsvr: "",
- journal: "",
- storageEngine: "wiredTiger"}});
-rs5.startSet();
-var conf = rs5.getReplSetConfig();
-conf.configsvr = true;
-rs5.initiate(conf);
-
-
-// step 1. name given. maxSize zero means no limit. Make sure it is allowed.
-assert.commandWorked(s.admin.runCommand({ addshard: getHostName() + ":" + conn1.port,
- name: "bar",
- maxSize: 0 }));
-var shard = s.getDB("config").shards.findOne({"_id" : {"$nin" : ["shard0000"]}});
-assert(shard, "shard wasn't found");
-assert.eq("bar", shard._id, "shard has incorrect name");
-
-// step 2. replica set
-assert(s.admin.runCommand(
- {"addshard" : "add_shard2_rs1/" + getHostName() + ":" + master1.port}).ok,
- "failed to add shard in step 2");
-shard = s.getDB("config").shards.findOne({"_id" : {"$nin" : ["shard0000", "bar"]}});
-assert(shard, "shard wasn't found");
-assert.eq("add_shard2_rs1", shard._id, "t2 name");
-
-// step 3. replica set w/ name given
-assert(s.admin.runCommand({"addshard" : "add_shard2_rs2/" + getHostName() + ":" + master2.port,
- "name" : "myshard"}).ok,
- "failed to add shard in step 4");
-shard = s.getDB("config").shards.findOne({"_id" : {"$nin" : ["shard0000", "bar", "add_shard2_rs1"]}});
-assert(shard, "shard wasn't found");
-assert.eq("myshard", shard._id, "t3 name");
-
-// step 4. no name given
-assert(s.admin.runCommand({"addshard" : getHostName()+":" + conn2.port}).ok,
- "failed to add shard in step 4");
-shard = s.getDB("config").shards.findOne({"_id" : {"$nin" : ["shard0000", "bar", "add_shard2_rs1", "myshard"]}});
-assert(shard, "shard wasn't found");
-assert.eq("shard0001", shard._id, "t4 name");
-
-assert.eq(s.getDB("config").shards.count(), 5, "unexpected number of shards");
-
-// step 5. replica set w/ a wrong host
-var portWithoutHostRunning = allocatePort();
-assert(!s.admin.runCommand({
- addshard: "add_shard2_rs2/NonExistingHost:" + portWithoutHostRunning
- }).ok,
- "accepted bad hostname in step 5");
-
-// step 6. replica set w/ mixed wrong/right hosts
-assert(!s.admin.runCommand({
- addshard: "add_shard2_rs2/" + getHostName() + ":" + master2.port +
- ",foo:" + portWithoutHostRunning
- }).ok,
- "accepted bad hostname in step 6");
-
-// Cannot add invalid stand alone host.
-assert.commandFailed(s.admin.runCommand({ addshard: 'dummy:12345' }));
-
-//
-// SERVER-17231 Adding replica set w/ set name = 'config'
-//
-var configReplURI = 'config/' + getHostName() + ':' + rs3.getPrimary().port;
-
-assert(!s.admin.runCommand({ 'addshard': configReplURI }).ok,
- 'accepted replica set shard with set name "config"');
-// but we should be allowed to add that replica set using a different shard name
-assert(s.admin.runCommand({ 'addshard': configReplURI, name: 'not_config' }).ok,
- 'unable to add replica set using valid replica set name');
-
-shard = s.getDB('config').shards.findOne({ '_id': 'not_config' });
-assert(shard, 'shard with name "not_config" not found');
-
-//
-// SERVER-17232 Try inserting into shard with name 'admin'
-//
-assert(s.admin.runCommand({ 'addshard': 'admin/' + getHostName() + ':' + rs4.getPrimary().port}).ok,
- 'adding replica set with name "admin" should work');
-var wRes = s.getDB('test').foo.insert({ x: 1 });
-assert(!wRes.hasWriteError() && wRes.nInserted === 1,
- 'failed to insert document into "test.foo" unsharded collection');
-
-// SERVER-19545 Should not be able to add config server replsets as shards.
-assert.commandFailed(s.admin.runCommand({addshard: rs5.getURL()}));
-
-s.stop();
-
-rs1.stopSet();
-rs2.stopSet();
-rs3.stopSet();
-rs4.stopSet();
-rs5.stopSet();
+ // Don't start any shards, yet
+ var s =
+ new ShardingTest({name: "add_shard2", shards: 1, mongos: 1, other: {useHostname: true}});
+
+ // Start two new instances, which will be used for shards
+ var conn1 = MongoRunner.runMongod({useHostname: true});
+ var conn2 = MongoRunner.runMongod({useHostname: true});
+
+ var rs1 = new ReplSetTest({"name": "add_shard2_rs1", nodes: 3});
+ rs1.startSet();
+ rs1.initiate();
+ var master1 = rs1.getPrimary();
+
+ var rs2 = new ReplSetTest({"name": "add_shard2_rs2", nodes: 3});
+ rs2.startSet();
+ rs2.initiate();
+ var master2 = rs2.getPrimary();
+
+ // replica set with set name = 'config'
+ var rs3 = new ReplSetTest({'name': 'config', nodes: 3});
+ rs3.startSet();
+ rs3.initiate();
+
+ // replica set with set name = 'admin'
+ var rs4 = new ReplSetTest({'name': 'admin', nodes: 3});
+ rs4.startSet();
+ rs4.initiate();
+
+ // replica set with configsvr: true should *not* be allowed to be added as a shard
+ var rs5 = new ReplSetTest({
+ name: 'csrs',
+ nodes: 3,
+ nodeOptions: {configsvr: "", journal: "", storageEngine: "wiredTiger"}
+ });
+ rs5.startSet();
+ var conf = rs5.getReplSetConfig();
+ conf.configsvr = true;
+ rs5.initiate(conf);
+
+ // step 1. name given. maxSize zero means no limit. Make sure it is allowed.
+ assert.commandWorked(
+ s.admin.runCommand({addshard: getHostName() + ":" + conn1.port, name: "bar", maxSize: 0}));
+ var shard = s.getDB("config").shards.findOne({"_id": {"$nin": ["shard0000"]}});
+ assert(shard, "shard wasn't found");
+ assert.eq("bar", shard._id, "shard has incorrect name");
+
+ // step 2. replica set
+ assert(
+ s.admin.runCommand({"addshard": "add_shard2_rs1/" + getHostName() + ":" + master1.port}).ok,
+ "failed to add shard in step 2");
+ shard = s.getDB("config").shards.findOne({"_id": {"$nin": ["shard0000", "bar"]}});
+ assert(shard, "shard wasn't found");
+ assert.eq("add_shard2_rs1", shard._id, "t2 name");
+
+ // step 3. replica set w/ name given
+ assert(s.admin.runCommand({
+ "addshard": "add_shard2_rs2/" + getHostName() + ":" + master2.port,
+ "name": "myshard"
+ }).ok,
+ "failed to add shard in step 4");
+ shard = s.getDB("config")
+ .shards.findOne({"_id": {"$nin": ["shard0000", "bar", "add_shard2_rs1"]}});
+ assert(shard, "shard wasn't found");
+ assert.eq("myshard", shard._id, "t3 name");
+
+ // step 4. no name given
+ assert(s.admin.runCommand({"addshard": getHostName() + ":" + conn2.port}).ok,
+ "failed to add shard in step 4");
+ shard = s.getDB("config").shards.findOne(
+ {"_id": {"$nin": ["shard0000", "bar", "add_shard2_rs1", "myshard"]}});
+ assert(shard, "shard wasn't found");
+ assert.eq("shard0001", shard._id, "t4 name");
+
+ assert.eq(s.getDB("config").shards.count(), 5, "unexpected number of shards");
+
+ // step 5. replica set w/ a wrong host
+ var portWithoutHostRunning = allocatePort();
+ assert(!s.admin.runCommand(
+ {addshard: "add_shard2_rs2/NonExistingHost:" + portWithoutHostRunning}).ok,
+ "accepted bad hostname in step 5");
+
+ // step 6. replica set w/ mixed wrong/right hosts
+ assert(!s.admin.runCommand({
+ addshard: "add_shard2_rs2/" + getHostName() + ":" + master2.port + ",foo:" +
+ portWithoutHostRunning
+ }).ok,
+ "accepted bad hostname in step 6");
+
+ // Cannot add invalid stand alone host.
+ assert.commandFailed(s.admin.runCommand({addshard: 'dummy:12345'}));
+
+ //
+ // SERVER-17231 Adding replica set w/ set name = 'config'
+ //
+ var configReplURI = 'config/' + getHostName() + ':' + rs3.getPrimary().port;
+
+ assert(!s.admin.runCommand({'addshard': configReplURI}).ok,
+ 'accepted replica set shard with set name "config"');
+ // but we should be allowed to add that replica set using a different shard name
+ assert(s.admin.runCommand({'addshard': configReplURI, name: 'not_config'}).ok,
+ 'unable to add replica set using valid replica set name');
+
+ shard = s.getDB('config').shards.findOne({'_id': 'not_config'});
+ assert(shard, 'shard with name "not_config" not found');
+
+ //
+ // SERVER-17232 Try inserting into shard with name 'admin'
+ //
+ assert(
+ s.admin.runCommand({'addshard': 'admin/' + getHostName() + ':' + rs4.getPrimary().port}).ok,
+ 'adding replica set with name "admin" should work');
+ var wRes = s.getDB('test').foo.insert({x: 1});
+ assert(!wRes.hasWriteError() && wRes.nInserted === 1,
+ 'failed to insert document into "test.foo" unsharded collection');
+
+ // SERVER-19545 Should not be able to add config server replsets as shards.
+ assert.commandFailed(s.admin.runCommand({addshard: rs5.getURL()}));
+
+ s.stop();
+
+ rs1.stopSet();
+ rs2.stopSet();
+ rs3.stopSet();
+ rs4.stopSet();
+ rs5.stopSet();
})();
diff --git a/jstests/sharding/addshard4.js b/jstests/sharding/addshard4.js
index 2a66cbc74fe..de2c8a17c10 100644
--- a/jstests/sharding/addshard4.js
+++ b/jstests/sharding/addshard4.js
@@ -1,60 +1,61 @@
// A replica set's passive nodes should be okay to add as part of a shard config
(function() {
-var s = new ShardingTest({ name: "addshard4",
- shards: 2,
- mongos: 1,
- other: {useHostname : true} });
+ var s = new ShardingTest({name: "addshard4", shards: 2, mongos: 1, other: {useHostname: true}});
-var r = new ReplSetTest({name: "addshard4", nodes: 3});
-r.startSet();
+ var r = new ReplSetTest({name: "addshard4", nodes: 3});
+ r.startSet();
-var config = r.getReplSetConfig();
-config.members[2].priority = 0;
+ var config = r.getReplSetConfig();
+ config.members[2].priority = 0;
-r.initiate(config);
-//Wait for replica set to be fully initialized - could take some time
-//to pre-allocate files on slow systems
-r.awaitReplication();
+ r.initiate(config);
+ // Wait for replica set to be fully initialized - could take some time
+ // to pre-allocate files on slow systems
+ r.awaitReplication();
-var master = r.getPrimary();
+ var master = r.getPrimary();
-var members = config.members.map(function(elem) { return elem.host; });
-var shardName = "addshard4/"+members.join(",");
-var invalidShardName = "addshard4/foobar";
+ var members = config.members.map(function(elem) {
+ return elem.host;
+ });
+ var shardName = "addshard4/" + members.join(",");
+ var invalidShardName = "addshard4/foobar";
-print("adding shard "+shardName);
+ print("adding shard " + shardName);
-// First try adding shard with the correct replica set name but incorrect hostname
-// This will make sure that the metadata for this replica set name is cleaned up
-// so that the set can be added correctly when it has the proper hostnames.
-assert.throws(function() {s.adminCommand({"addshard" : invalidShardName});});
+ // First try adding shard with the correct replica set name but incorrect hostname
+ // This will make sure that the metadata for this replica set name is cleaned up
+ // so that the set can be added correctly when it has the proper hostnames.
+ assert.throws(function() {
+ s.adminCommand({"addshard": invalidShardName});
+ });
-var result = s.adminCommand({"addshard" : shardName});
+ var result = s.adminCommand({"addshard": shardName});
-printjson(result);
-assert.eq(result, true);
+ printjson(result);
+ assert.eq(result, true);
-r = new ReplSetTest({name : "addshard42", nodes : 3});
-r.startSet();
+ r = new ReplSetTest({name: "addshard42", nodes: 3});
+ r.startSet();
-config = r.getReplSetConfig();
-config.members[2].arbiterOnly = true;
+ config = r.getReplSetConfig();
+ config.members[2].arbiterOnly = true;
-r.initiate(config);
-// Wait for replica set to be fully initialized - could take some time
-// to pre-allocate files on slow systems
-r.awaitReplication();
+ r.initiate(config);
+ // Wait for replica set to be fully initialized - could take some time
+ // to pre-allocate files on slow systems
+ r.awaitReplication();
-master = r.getPrimary();
+ master = r.getPrimary();
-print("adding shard addshard42");
+ print("adding shard addshard42");
-result = s.adminCommand({"addshard" : "addshard42/"+config.members[2].host});
+ result = s.adminCommand({"addshard": "addshard42/" + config.members[2].host});
-printjson(result);
-assert.eq(result, true);
+ printjson(result);
+ assert.eq(result, true);
-s.stop();
+ s.stop();
})();
diff --git a/jstests/sharding/addshard5.js b/jstests/sharding/addshard5.js
index bf24943972e..c420c90de51 100644
--- a/jstests/sharding/addshard5.js
+++ b/jstests/sharding/addshard5.js
@@ -3,55 +3,56 @@
(function() {
-'use strict';
+ 'use strict';
-var st = new ShardingTest({ shards: 2, mongos: 1 });
+ var st = new ShardingTest({shards: 2, mongos: 1});
-var mongos = st.s;
-var admin = mongos.getDB('admin');
-var config = mongos.getDB('config');
-var coll = mongos.getCollection('foo.bar');
+ var mongos = st.s;
+ var admin = mongos.getDB('admin');
+ var config = mongos.getDB('config');
+ var coll = mongos.getCollection('foo.bar');
-// Get all the shard info and connections
-var shards = [];
-config.shards.find().sort({ _id: 1 }).forEach(function(doc) {
- shards.push(Object.merge(doc, { conn: new Mongo(doc.host) }));
-});
+ // Get all the shard info and connections
+ var shards = [];
+ config.shards.find().sort({_id: 1}).forEach(function(doc) {
+ shards.push(Object.merge(doc, {conn: new Mongo(doc.host)}));
+ });
-// Shard collection
-assert.commandWorked(mongos.adminCommand({ enableSharding: coll.getDB() + ''}));
+ // Shard collection
+ assert.commandWorked(mongos.adminCommand({enableSharding: coll.getDB() + ''}));
-// Just to be sure what primary we start from
-st.ensurePrimaryShard(coll.getDB().getName(), shards[0]._id);
-assert.commandWorked(mongos.adminCommand({ shardCollection: coll + '', key: { _id: 1 } }));
+ // Just to be sure what primary we start from
+ st.ensurePrimaryShard(coll.getDB().getName(), shards[0]._id);
+ assert.commandWorked(mongos.adminCommand({shardCollection: coll + '', key: {_id: 1}}));
-// Insert one document
-assert.writeOK(coll.insert({ hello: 'world'}));
+ // Insert one document
+ assert.writeOK(coll.insert({hello: 'world'}));
-// Migrate the collection to and from shard1 so shard0 loads the shard1 host
-assert.commandWorked(mongos.adminCommand(
- { moveChunk: coll + '', find: { _id: 0 }, to: shards[1]._id, _waitForDelete: true }));
-assert.commandWorked(mongos.adminCommand(
- { moveChunk: coll + '', find: { _id: 0 }, to: shards[0]._id, _waitForDelete: true }));
+ // Migrate the collection to and from shard1 so shard0 loads the shard1 host
+ assert.commandWorked(mongos.adminCommand(
+ {moveChunk: coll + '', find: {_id: 0}, to: shards[1]._id, _waitForDelete: true}));
+ assert.commandWorked(mongos.adminCommand(
+ {moveChunk: coll + '', find: {_id: 0}, to: shards[0]._id, _waitForDelete: true}));
-// Drop and re-add shard with the same name but a new host.
-assert.commandWorked(mongos.adminCommand({ removeShard: shards[1]._id }));
-assert.commandWorked(mongos.adminCommand({ removeShard: shards[1]._id }));
+ // Drop and re-add shard with the same name but a new host.
+ assert.commandWorked(mongos.adminCommand({removeShard: shards[1]._id}));
+ assert.commandWorked(mongos.adminCommand({removeShard: shards[1]._id}));
-var shard2 = MongoRunner.runMongod({});
-assert.commandWorked(mongos.adminCommand({ addShard: shard2.host, name: shards[1]._id }));
+ var shard2 = MongoRunner.runMongod({});
+ assert.commandWorked(mongos.adminCommand({addShard: shard2.host, name: shards[1]._id}));
-jsTest.log('Shard was dropped and re-added with same name...');
-st.printShardingStatus();
+ jsTest.log('Shard was dropped and re-added with same name...');
+ st.printShardingStatus();
-shards[0].conn.getDB('admin').runCommand({ setParameter: 1, traceExceptions: true });
-shard2.getDB('admin').runCommand({ setParameter: 1, traceExceptions: true });
+ shards[0].conn.getDB('admin').runCommand({setParameter: 1, traceExceptions: true});
+ shard2.getDB('admin').runCommand({setParameter: 1, traceExceptions: true});
-// Try a migration
-assert.commandWorked(mongos.adminCommand({ moveChunk: coll + '', find: { _id: 0 }, to: shards[1]._id }));
+ // Try a migration
+ assert.commandWorked(
+ mongos.adminCommand({moveChunk: coll + '', find: {_id: 0}, to: shards[1]._id}));
-assert.eq('world', shard2.getCollection(coll + '').findOne().hello);
+ assert.eq('world', shard2.getCollection(coll + '').findOne().hello);
-st.stop();
+ st.stop();
})();
diff --git a/jstests/sharding/all_config_hosts_down.js b/jstests/sharding/all_config_hosts_down.js
index 5827480dca4..3abd0d14feb 100644
--- a/jstests/sharding/all_config_hosts_down.js
+++ b/jstests/sharding/all_config_hosts_down.js
@@ -3,43 +3,42 @@
// Should fail sanely
//
(function() {
-"use strict";
+ "use strict";
-var st = new ShardingTest({ shards : 1, mongos : 1 });
+ var st = new ShardingTest({shards: 1, mongos: 1});
-var mongos = st.s;
-var coll = mongos.getCollection( "foo.bar" );
+ var mongos = st.s;
+ var coll = mongos.getCollection("foo.bar");
-jsTestLog( "Stopping config servers" );
-for (var i = 0; i < st._configServers.length; i++) {
- MongoRunner.stopMongod(st._configServers[i]);
-}
-
-// Make sure mongos has no database info currently loaded
-mongos.getDB( "admin" ).runCommand({ flushRouterConfig : 1 });
-
-jsTestLog( "Config flushed and config servers down!" );
-
-// Throws transport error first and subsequent times when loading config data, not no primary
-for( var i = 0; i < 2; i++ ){
- try {
- coll.findOne();
- // Should always throw
- assert( false );
+ jsTestLog("Stopping config servers");
+ for (var i = 0; i < st._configServers.length; i++) {
+ MongoRunner.stopMongod(st._configServers[i]);
}
- catch( e ) {
- printjson( e );
-
- // Make sure we get a transport error, and not a no-primary error
- assert(e.code == 8002 || // SCCC config down, for v3.0 compatibility.
- e.code == 10276 || // Transport error
- e.code == 13328 || // Connect error
- e.code == ErrorCodes.HostUnreachable ||
- e.code == ErrorCodes.FailedToSatisfyReadPreference ||
- e.code == ErrorCodes.ReplicaSetNotFound);
+
+ // Make sure mongos has no database info currently loaded
+ mongos.getDB("admin").runCommand({flushRouterConfig: 1});
+
+ jsTestLog("Config flushed and config servers down!");
+
+ // Throws transport error first and subsequent times when loading config data, not no primary
+ for (var i = 0; i < 2; i++) {
+ try {
+ coll.findOne();
+ // Should always throw
+ assert(false);
+ } catch (e) {
+ printjson(e);
+
+ // Make sure we get a transport error, and not a no-primary error
+ assert(e.code == 8002 || // SCCC config down, for v3.0 compatibility.
+ e.code == 10276 || // Transport error
+ e.code == 13328 || // Connect error
+ e.code == ErrorCodes.HostUnreachable ||
+ e.code == ErrorCodes.FailedToSatisfyReadPreference ||
+ e.code == ErrorCodes.ReplicaSetNotFound);
+ }
}
-}
-st.stop();
+ st.stop();
}());
diff --git a/jstests/sharding/all_config_servers_blackholed_from_mongos.js b/jstests/sharding/all_config_servers_blackholed_from_mongos.js
index cf5ec266093..c3ed68e97de 100644
--- a/jstests/sharding/all_config_servers_blackholed_from_mongos.js
+++ b/jstests/sharding/all_config_servers_blackholed_from_mongos.js
@@ -1,44 +1,41 @@
// Ensures that if the config servers are blackholed from the point of view of MongoS, metadata
// operations do not get stuck forever.
(function() {
-'use strict';
+ 'use strict';
-var st = new ShardingTest({
- name: 'all_config_servers_blackholed_from_mongos',
- shards: 2,
- mongos: 1,
- useBridge: true,
-});
+ var st = new ShardingTest({
+ name: 'all_config_servers_blackholed_from_mongos',
+ shards: 2,
+ mongos: 1,
+ useBridge: true,
+ });
-var testDB = st.s.getDB('BlackHoleDB');
+ var testDB = st.s.getDB('BlackHoleDB');
-assert.commandWorked(testDB.adminCommand({ enableSharding: 'BlackHoleDB' }));
-assert.commandWorked(testDB.adminCommand({
- shardCollection: testDB.ShardedColl.getFullName(),
- key: { _id: 1 }
-}));
+ assert.commandWorked(testDB.adminCommand({enableSharding: 'BlackHoleDB'}));
+ assert.commandWorked(
+ testDB.adminCommand({shardCollection: testDB.ShardedColl.getFullName(), key: {_id: 1}}));
-assert.writeOK(testDB.ShardedColl.insert({ a: 1 }));
+ assert.writeOK(testDB.ShardedColl.insert({a: 1}));
-jsTest.log('Making all the config servers appear as a blackhole to mongos');
-st._configServers.forEach(function(configSvr) {
- configSvr.discardMessagesFrom(st.s, 1.0);
-});
+ jsTest.log('Making all the config servers appear as a blackhole to mongos');
+ st._configServers.forEach(function(configSvr) {
+ configSvr.discardMessagesFrom(st.s, 1.0);
+ });
-assert.commandWorked(testDB.adminCommand({ flushRouterConfig: 1 }));
+ assert.commandWorked(testDB.adminCommand({flushRouterConfig: 1}));
-// This shouldn't stall
-jsTest.log('Doing read operation on the sharded collection');
-assert.throws(function() {
- testDB.ShardedColl.find({}).itcount();
-});
+ // This shouldn't stall
+ jsTest.log('Doing read operation on the sharded collection');
+ assert.throws(function() {
+ testDB.ShardedColl.find({}).itcount();
+ });
-// This should fail, because the primary is not available
-jsTest.log('Doing write operation on a new database and collection');
-assert.writeError(st.s.getDB('NonExistentDB').TestColl.insert({
- _id: 0,
- value: 'This value will never be inserted' }));
+ // This should fail, because the primary is not available
+ jsTest.log('Doing write operation on a new database and collection');
+ assert.writeError(st.s.getDB('NonExistentDB')
+ .TestColl.insert({_id: 0, value: 'This value will never be inserted'}));
-st.stop();
+ st.stop();
}());
diff --git a/jstests/sharding/array_shard_key.js b/jstests/sharding/array_shard_key.js
index c5d63fcae59..4fd60c3f21d 100644
--- a/jstests/sharding/array_shard_key.js
+++ b/jstests/sharding/array_shard_key.js
@@ -1,114 +1,111 @@
// Ensure you can't shard on an array key
-var st = new ShardingTest({ name : jsTestName(), shards : 3 });
+var st = new ShardingTest({name: jsTestName(), shards: 3});
var mongos = st.s0;
-var coll = mongos.getCollection( jsTestName() + ".foo" );
+var coll = mongos.getCollection(jsTestName() + ".foo");
-st.shardColl( coll, { _id : 1, i : 1 }, { _id : ObjectId(), i : 1 } );
+st.shardColl(coll, {_id: 1, i: 1}, {_id: ObjectId(), i: 1});
-printjson( mongos.getDB("config").chunks.find().toArray() );
+printjson(mongos.getDB("config").chunks.find().toArray());
st.printShardingStatus();
-print( "1: insert some invalid data" );
+print("1: insert some invalid data");
var value = null;
-// Insert an object with invalid array key
-assert.writeError(coll.insert({ i : [ 1, 2 ] }));
+// Insert an object with invalid array key
+assert.writeError(coll.insert({i: [1, 2]}));
// Insert an object with all the right fields, but an invalid array val for _id
-assert.writeError(coll.insert({ _id : [ 1, 2 ] , i : 3}));
+assert.writeError(coll.insert({_id: [1, 2], i: 3}));
// Insert an object with valid array key
-assert.writeOK(coll.insert({ i : 1 }));
+assert.writeOK(coll.insert({i: 1}));
// Update the value with valid other field
-value = coll.findOne({ i : 1 });
-assert.writeOK(coll.update( value, { $set : { j : 2 } } ));
+value = coll.findOne({i: 1});
+assert.writeOK(coll.update(value, {$set: {j: 2}}));
// Update the value with invalid other fields
-value = coll.findOne({ i : 1 });
-assert.writeError(coll.update( value, Object.merge( value, { i : [ 3 ] } ) ));
+value = coll.findOne({i: 1});
+assert.writeError(coll.update(value, Object.merge(value, {i: [3]})));
// Multi-update the value with invalid other fields
-value = coll.findOne({ i : 1 });
-assert.writeError(coll.update( value, Object.merge( value, { i : [ 3, 4 ] } ), false, true));
+value = coll.findOne({i: 1});
+assert.writeError(coll.update(value, Object.merge(value, {i: [3, 4]}), false, true));
// Multi-update the value with other fields (won't work, but no error)
-value = coll.findOne({ i : 1 });
-assert.writeOK(coll.update( Object.merge( value, { i : [ 1, 1 ] } ), { $set : { k : 4 } }, false, true));
+value = coll.findOne({i: 1});
+assert.writeOK(coll.update(Object.merge(value, {i: [1, 1]}), {$set: {k: 4}}, false, true));
// Query the value with other fields (won't work, but no error)
-value = coll.findOne({ i : 1 });
-coll.find( Object.merge( value, { i : [ 1, 1 ] } ) ).toArray();
+value = coll.findOne({i: 1});
+coll.find(Object.merge(value, {i: [1, 1]})).toArray();
// Can't remove using multikey, but shouldn't error
-value = coll.findOne({ i : 1 });
-coll.remove( Object.extend( value, { i : [ 1, 2, 3, 4 ] } ) );
+value = coll.findOne({i: 1});
+coll.remove(Object.extend(value, {i: [1, 2, 3, 4]}));
// Can't remove using multikey, but shouldn't error
-value = coll.findOne({ i : 1 });
-assert.writeOK(coll.remove( Object.extend( value, { i : [ 1, 2, 3, 4, 5 ] } ) ));
-assert.eq( coll.find().itcount(), 1 );
+value = coll.findOne({i: 1});
+assert.writeOK(coll.remove(Object.extend(value, {i: [1, 2, 3, 4, 5]})));
+assert.eq(coll.find().itcount(), 1);
-value = coll.findOne({ i : 1 });
-assert.writeOK(coll.remove( Object.extend( value, { i : 1 } ) ));
-assert.eq( coll.find().itcount(), 0 );
+value = coll.findOne({i: 1});
+assert.writeOK(coll.remove(Object.extend(value, {i: 1})));
+assert.eq(coll.find().itcount(), 0);
-coll.ensureIndex({ _id : 1, i : 1, j: 1 });
+coll.ensureIndex({_id: 1, i: 1, j: 1});
// Can insert document that will make index into a multi-key as long as it's not part of shard key.
coll.remove({});
-assert.writeOK(coll.insert({ i: 1, j: [1, 2] }));
-assert.eq( coll.find().itcount(), 1 );
+assert.writeOK(coll.insert({i: 1, j: [1, 2]}));
+assert.eq(coll.find().itcount(), 1);
// Same is true for updates.
coll.remove({});
-coll.insert({ _id: 1, i: 1 });
-assert.writeOK(coll.update({ _id: 1, i: 1 }, { _id: 1, i:1, j: [1, 2] }));
-assert.eq( coll.find().itcount(), 1 );
+coll.insert({_id: 1, i: 1});
+assert.writeOK(coll.update({_id: 1, i: 1}, {_id: 1, i: 1, j: [1, 2]}));
+assert.eq(coll.find().itcount(), 1);
// Same for upserts.
coll.remove({});
-assert.writeOK(coll.update({ _id: 1, i: 1 }, { _id: 1, i:1, j: [1, 2] }, true));
-assert.eq( coll.find().itcount(), 1 );
+assert.writeOK(coll.update({_id: 1, i: 1}, {_id: 1, i: 1, j: [1, 2]}, true));
+assert.eq(coll.find().itcount(), 1);
-printjson( "Sharding-then-inserting-multikey tested, now trying inserting-then-sharding-multikey" );
+printjson("Sharding-then-inserting-multikey tested, now trying inserting-then-sharding-multikey");
// Insert a bunch of data then shard over key which is an array
-var coll = mongos.getCollection( "" + coll + "2" );
-for( var i = 0; i < 10; i++ ){
+var coll = mongos.getCollection("" + coll + "2");
+for (var i = 0; i < 10; i++) {
// TODO : does not check weird cases like [ i, i ]
- assert.writeOK(coll.insert({ i : [ i, i + 1 ] }));
+ assert.writeOK(coll.insert({i: [i, i + 1]}));
}
-coll.ensureIndex({ _id : 1, i : 1 });
+coll.ensureIndex({_id: 1, i: 1});
try {
- st.shardColl( coll, { _id : 1, i : 1 }, { _id : ObjectId(), i : 1 } );
-}
-catch( e ){
- print( "Correctly threw error on sharding with multikey index." );
+ st.shardColl(coll, {_id: 1, i: 1}, {_id: ObjectId(), i: 1});
+} catch (e) {
+ print("Correctly threw error on sharding with multikey index.");
}
st.printShardingStatus();
// Insert a bunch of data then shard over key which is not an array
-var coll = mongos.getCollection( "" + coll + "3" );
-for( var i = 0; i < 10; i++ ){
+var coll = mongos.getCollection("" + coll + "3");
+for (var i = 0; i < 10; i++) {
// TODO : does not check weird cases like [ i, i ]
- assert.writeOK(coll.insert({ i : i }));
+ assert.writeOK(coll.insert({i: i}));
}
-coll.ensureIndex({ _id : 1, i : 1 });
+coll.ensureIndex({_id: 1, i: 1});
-st.shardColl( coll, { _id : 1, i : 1 }, { _id : ObjectId(), i : 1 } );
+st.shardColl(coll, {_id: 1, i: 1}, {_id: ObjectId(), i: 1});
st.printShardingStatus();
-
-
// Finish
st.stop();
diff --git a/jstests/sharding/auth.js b/jstests/sharding/auth.js
index 8d45d4b2de3..7b8d55ee075 100644
--- a/jstests/sharding/auth.js
+++ b/jstests/sharding/auth.js
@@ -1,338 +1,366 @@
// Tests administrative sharding operations and map-reduce work or fail as expected, when key-based
// authentication is used
(function() {
-'use strict';
-
-var adminUser = {
- db : "admin",
- username : "foo",
- password : "bar"
-};
-
-var testUser = {
- db : "test",
- username : "bar",
- password : "baz"
-};
-
-var testUserReadOnly = {
- db : "test",
- username : "sad",
- password : "bat"
-};
-
-function login(userObj, thingToUse) {
- if (!thingToUse) {
- thingToUse = s;
- }
+ 'use strict';
+
+ var adminUser = {
+ db: "admin",
+ username: "foo",
+ password: "bar"
+ };
+
+ var testUser = {
+ db: "test",
+ username: "bar",
+ password: "baz"
+ };
+
+ var testUserReadOnly = {
+ db: "test",
+ username: "sad",
+ password: "bat"
+ };
+
+ function login(userObj, thingToUse) {
+ if (!thingToUse) {
+ thingToUse = s;
+ }
- thingToUse.getDB(userObj.db).auth(userObj.username, userObj.password);
-}
-
-function logout(userObj, thingToUse) {
- if (!thingToUse)
- thingToUse = s;
-
- s.getDB(userObj.db).runCommand({logout:1});
-}
-
-function getShardName(rsTest) {
- var master = rsTest.getPrimary();
- var config = master.getDB("local").system.replset.findOne();
- var members = config.members.map(function(elem) { return elem.host; });
- return config._id+"/"+members.join(",");
-}
-
-var s = new ShardingTest({ name: "auth",
- mongos: 1,
- shards: 0,
- other: {
- extraOptions: { "keyFile": "jstests/libs/key1" },
- noChunkSize: true, }
- });
-
-if (s.getDB('admin').runCommand('buildInfo').bits < 64) {
- print('Skipping test on 32-bit platforms');
- return;
-}
-
-print("Configuration: Add user " + tojson(adminUser));
-s.getDB(adminUser.db).createUser({user: adminUser.username,
- pwd: adminUser.password,
- roles: jsTest.adminUserRoles});
-login(adminUser);
-
-// Set the chunk size, disable the secondary throttle (so the test doesn't run so slow)
-assert.writeOK(s.getDB( "config" ).settings.update({ _id: "chunksize" },
- { $set: { value : 1 } },
- { upsert: true }));
-assert.writeOK(s.getDB( "config" ).settings.update(
- { _id: "balancer" },
- { $set: { "_secondaryThrottle" : false,
- "_waitForDelete" : true } },
- { upsert: true }));
-
-printjson(s.getDB("config").settings.find().toArray());
-
-print("Restart mongos with different auth options");
-s.restartMongos(0, { v: 2,
- configdb: s._configDB,
- keyFile: "jstests/libs/key1",
- chunkSize: 1 });
-login(adminUser);
-
-var d1 = new ReplSetTest({ name : "d1", nodes : 3, useHostName : true });
-d1.startSet({keyFile : "jstests/libs/key2" });
-d1.initiate();
-
-print("d1 initiated");
-var shardName = authutil.asCluster(d1.nodes,
- "jstests/libs/key2",
- function() { return getShardName(d1); });
-
-print("adding shard w/out auth "+shardName);
-logout(adminUser);
-
-var result = s.getDB("admin").runCommand({addShard : shardName});
-printjson(result);
-assert.eq(result.code, 13);
-
-login(adminUser);
-
-print("adding shard w/wrong key "+shardName);
-
-var thrown = false;
-try {
- result = s.adminCommand({addShard : shardName});
-}
-catch(e) {
- thrown = true;
- printjson(e);
-}
-assert(thrown);
+ thingToUse.getDB(userObj.db).auth(userObj.username, userObj.password);
+ }
-print("start rs w/correct key");
+ function logout(userObj, thingToUse) {
+ if (!thingToUse)
+ thingToUse = s;
-d1.stopSet();
-d1.startSet({keyFile : "jstests/libs/key1" });
-d1.initiate();
+ s.getDB(userObj.db).runCommand({logout: 1});
+ }
-var master = d1.getPrimary();
+ function getShardName(rsTest) {
+ var master = rsTest.getPrimary();
+ var config = master.getDB("local").system.replset.findOne();
+ var members = config.members.map(function(elem) {
+ return elem.host;
+ });
+ return config._id + "/" + members.join(",");
+ }
-print("adding shard w/auth " + shardName);
+ var s = new ShardingTest({
+ name: "auth",
+ mongos: 1,
+ shards: 0,
+ other: {
+ extraOptions: {"keyFile": "jstests/libs/key1"},
+ noChunkSize: true,
+ }
+ });
-result = s.getDB("admin").runCommand({addShard : shardName});
-assert.eq(result.ok, 1, tojson(result));
+ if (s.getDB('admin').runCommand('buildInfo').bits < 64) {
+ print('Skipping test on 32-bit platforms');
+ return;
+ }
-s.getDB("admin").runCommand({enableSharding : "test"});
-s.getDB("admin").runCommand({shardCollection : "test.foo", key : {x : 1}});
+ print("Configuration: Add user " + tojson(adminUser));
+ s.getDB(adminUser.db)
+ .createUser(
+ {user: adminUser.username, pwd: adminUser.password, roles: jsTest.adminUserRoles});
+ login(adminUser);
+
+ // Set the chunk size, disable the secondary throttle (so the test doesn't run so slow)
+ assert.writeOK(
+ s.getDB("config").settings.update({_id: "chunksize"}, {$set: {value: 1}}, {upsert: true}));
+ assert.writeOK(s.getDB("config").settings.update(
+ {_id: "balancer"},
+ {$set: {"_secondaryThrottle": false, "_waitForDelete": true}},
+ {upsert: true}));
+
+ printjson(s.getDB("config").settings.find().toArray());
+
+ print("Restart mongos with different auth options");
+ s.restartMongos(0, {v: 2, configdb: s._configDB, keyFile: "jstests/libs/key1", chunkSize: 1});
+ login(adminUser);
+
+ var d1 = new ReplSetTest({name: "d1", nodes: 3, useHostName: true});
+ d1.startSet({keyFile: "jstests/libs/key2"});
+ d1.initiate();
+
+ print("d1 initiated");
+ var shardName = authutil.asCluster(d1.nodes,
+ "jstests/libs/key2",
+ function() {
+ return getShardName(d1);
+ });
+
+ print("adding shard w/out auth " + shardName);
+ logout(adminUser);
+
+ var result = s.getDB("admin").runCommand({addShard: shardName});
+ printjson(result);
+ assert.eq(result.code, 13);
+
+ login(adminUser);
+
+ print("adding shard w/wrong key " + shardName);
+
+ var thrown = false;
+ try {
+ result = s.adminCommand({addShard: shardName});
+ } catch (e) {
+ thrown = true;
+ printjson(e);
+ }
+ assert(thrown);
-d1.waitForState( d1.getSecondaries(), ReplSetTest.State.SECONDARY, 5 * 60 * 1000 );
+ print("start rs w/correct key");
-s.getDB(testUser.db).createUser({user: testUser.username,
- pwd: testUser.password,
- roles: jsTest.basicUserRoles});
-s.getDB(testUserReadOnly.db).createUser({user: testUserReadOnly.username,
- pwd: testUserReadOnly.password,
- roles: jsTest.readOnlyUserRoles});
+ d1.stopSet();
+ d1.startSet({keyFile: "jstests/libs/key1"});
+ d1.initiate();
-logout(adminUser);
+ var master = d1.getPrimary();
-print("query try");
-var e = assert.throws(function() {
- s.s.getDB("foo").bar.findOne();
-});
-printjson(e);
+ print("adding shard w/auth " + shardName);
-print("cmd try");
-assert.eq(0, s.s.getDB("foo").runCommand({listDatabases:1}).ok);
+ result = s.getDB("admin").runCommand({addShard: shardName});
+ assert.eq(result.ok, 1, tojson(result));
-print("insert try 1");
-s.getDB("test").foo.insert({x:1});
+ s.getDB("admin").runCommand({enableSharding: "test"});
+ s.getDB("admin").runCommand({shardCollection: "test.foo", key: {x: 1}});
-login(testUser);
-assert.eq(s.getDB("test").foo.findOne(), null);
+ d1.waitForState(d1.getSecondaries(), ReplSetTest.State.SECONDARY, 5 * 60 * 1000);
-print("insert try 2");
-assert.writeOK(s.getDB("test").foo.insert({ x: 1 }));
-assert.eq( 1 , s.getDB( "test" ).foo.find().itcount() , tojson(result) );
+ s.getDB(testUser.db)
+ .createUser(
+ {user: testUser.username, pwd: testUser.password, roles: jsTest.basicUserRoles});
+ s.getDB(testUserReadOnly.db)
+ .createUser({
+ user: testUserReadOnly.username,
+ pwd: testUserReadOnly.password,
+ roles: jsTest.readOnlyUserRoles
+ });
-logout(testUser);
+ logout(adminUser);
-var d2 = new ReplSetTest({name : "d2", nodes : 3, useHostName : true });
-d2.startSet({keyFile : "jstests/libs/key1" });
-d2.initiate();
-d2.awaitSecondaryNodes();
+ print("query try");
+ var e = assert.throws(function() {
+ s.s.getDB("foo").bar.findOne();
+ });
+ printjson(e);
-shardName = authutil.asCluster(d2.nodes, "jstests/libs/key1",
- function() { return getShardName(d2); });
+ print("cmd try");
+ assert.eq(0, s.s.getDB("foo").runCommand({listDatabases: 1}).ok);
-print("adding shard "+shardName);
-login(adminUser);
-print("logged in");
-result = s.getDB("admin").runCommand({addShard : shardName});
+ print("insert try 1");
+ s.getDB("test").foo.insert({x: 1});
-ReplSetTest.awaitRSClientHosts(s.s, d1.nodes, {ok: true });
-ReplSetTest.awaitRSClientHosts(s.s, d2.nodes, {ok: true });
+ login(testUser);
+ assert.eq(s.getDB("test").foo.findOne(), null);
-s.getDB("test").foo.remove({});
+ print("insert try 2");
+ assert.writeOK(s.getDB("test").foo.insert({x: 1}));
+ assert.eq(1, s.getDB("test").foo.find().itcount(), tojson(result));
-var num = 10000;
-var bulk = s.getDB("test").foo.initializeUnorderedBulkOp();
-for (i=0; i<num; i++) {
- bulk.insert({ _id: i, x: i, abc: "defg", date: new Date(), str: "all the talk on the market" });
-}
-assert.writeOK(bulk.execute());
+ logout(testUser);
-s.startBalancer(60000);
+ var d2 = new ReplSetTest({name: "d2", nodes: 3, useHostName: true});
+ d2.startSet({keyFile: "jstests/libs/key1"});
+ d2.initiate();
+ d2.awaitSecondaryNodes();
-assert.soon(function() {
- var d1Chunks = s.getDB("config").chunks.count({shard : "d1"});
- var d2Chunks = s.getDB("config").chunks.count({shard : "d2"});
- var totalChunks = s.getDB("config").chunks.count({ns : "test.foo"});
+ shardName = authutil.asCluster(d2.nodes,
+ "jstests/libs/key1",
+ function() {
+ return getShardName(d2);
+ });
- print("chunks: " + d1Chunks+" "+d2Chunks+" "+totalChunks);
+ print("adding shard " + shardName);
+ login(adminUser);
+ print("logged in");
+ result = s.getDB("admin").runCommand({addShard: shardName});
- return d1Chunks > 0 && d2Chunks > 0 && (d1Chunks + d2Chunks == totalChunks);
- },
- "Chunks failed to balance",
- 60000,
- 5000);
+ ReplSetTest.awaitRSClientHosts(s.s, d1.nodes, {ok: true});
+ ReplSetTest.awaitRSClientHosts(s.s, d2.nodes, {ok: true});
-//SERVER-3645
-//assert.eq(s.getDB("test").foo.count(), num+1);
-var numDocs = s.getDB("test").foo.find().itcount();
-if (numDocs != num) {
- // Missing documents. At this point we're already in a failure mode, the code in this statement
- // is to get a better idea how/why it's failing.
+ s.getDB("test").foo.remove({});
- var numDocsSeen = 0;
- var lastDocNumber = -1;
- var missingDocNumbers = [];
- var docs = s.getDB("test").foo.find().sort({x:1}).toArray();
- for (var i = 0; i < docs.length; i++) {
- if (docs[i].x != lastDocNumber + 1) {
- for (var missing = lastDocNumber + 1; missing < docs[i].x; missing++) {
- missingDocNumbers.push(missing);
+ var num = 10000;
+ var bulk = s.getDB("test").foo.initializeUnorderedBulkOp();
+ for (i = 0; i < num; i++) {
+ bulk.insert(
+ {_id: i, x: i, abc: "defg", date: new Date(), str: "all the talk on the market"});
+ }
+ assert.writeOK(bulk.execute());
+
+ s.startBalancer(60000);
+
+ assert.soon(function() {
+ var d1Chunks = s.getDB("config").chunks.count({shard: "d1"});
+ var d2Chunks = s.getDB("config").chunks.count({shard: "d2"});
+ var totalChunks = s.getDB("config").chunks.count({ns: "test.foo"});
+
+ print("chunks: " + d1Chunks + " " + d2Chunks + " " + totalChunks);
+
+ return d1Chunks > 0 && d2Chunks > 0 && (d1Chunks + d2Chunks == totalChunks);
+ }, "Chunks failed to balance", 60000, 5000);
+
+ // SERVER-3645
+ // assert.eq(s.getDB("test").foo.count(), num+1);
+ var numDocs = s.getDB("test").foo.find().itcount();
+ if (numDocs != num) {
+ // Missing documents. At this point we're already in a failure mode, the code in this
+ // statement
+ // is to get a better idea how/why it's failing.
+
+ var numDocsSeen = 0;
+ var lastDocNumber = -1;
+ var missingDocNumbers = [];
+ var docs = s.getDB("test").foo.find().sort({x: 1}).toArray();
+ for (var i = 0; i < docs.length; i++) {
+ if (docs[i].x != lastDocNumber + 1) {
+ for (var missing = lastDocNumber + 1; missing < docs[i].x; missing++) {
+ missingDocNumbers.push(missing);
+ }
}
+ lastDocNumber = docs[i].x;
+ numDocsSeen++;
}
- lastDocNumber = docs[i].x;
- numDocsSeen++;
- }
- assert.eq(numDocs, numDocsSeen, "More docs discovered on second find()");
- assert.eq(num - numDocs, missingDocNumbers.length);
+ assert.eq(numDocs, numDocsSeen, "More docs discovered on second find()");
+ assert.eq(num - numDocs, missingDocNumbers.length);
- load('jstests/libs/trace_missing_docs.js');
+ load('jstests/libs/trace_missing_docs.js');
- for ( var i = 0; i < missingDocNumbers.length; i++ ) {
- jsTest.log( "Tracing doc: " + missingDocNumbers[i] );
- traceMissingDoc( s.getDB( "test" ).foo, { _id : missingDocNumbers[i],
- x : missingDocNumbers[i] } );
+ for (var i = 0; i < missingDocNumbers.length; i++) {
+ jsTest.log("Tracing doc: " + missingDocNumbers[i]);
+ traceMissingDoc(s.getDB("test").foo,
+ {_id: missingDocNumbers[i], x: missingDocNumbers[i]});
+ }
+
+ assert(false,
+ "Number of docs found does not equal the number inserted. Missing docs: " +
+ missingDocNumbers);
}
- assert(false, "Number of docs found does not equal the number inserted. Missing docs: " + missingDocNumbers);
-}
+ // We're only sure we aren't duplicating documents iff there's no balancing going on here
+ // This call also waits for any ongoing balancing to stop
+ s.stopBalancer(60000);
-// We're only sure we aren't duplicating documents iff there's no balancing going on here
-// This call also waits for any ongoing balancing to stop
-s.stopBalancer(60000);
+ var cursor = s.getDB("test").foo.find({x: {$lt: 500}});
-var cursor = s.getDB("test").foo.find({x:{$lt : 500}});
+ var count = 0;
+ while (cursor.hasNext()) {
+ cursor.next();
+ count++;
+ }
-var count = 0;
-while (cursor.hasNext()) {
- cursor.next();
- count++;
-}
+ assert.eq(count, 500);
+
+ logout(adminUser);
+
+ d1.waitForState(d1.getSecondaries(), ReplSetTest.State.SECONDARY, 5 * 60 * 1000);
+ d2.waitForState(d2.getSecondaries(), ReplSetTest.State.SECONDARY, 5 * 60 * 1000);
+
+ authutil.asCluster(d1.nodes,
+ "jstests/libs/key1",
+ function() {
+ d1.awaitReplication(120000);
+ });
+ authutil.asCluster(d2.nodes,
+ "jstests/libs/key1",
+ function() {
+ d2.awaitReplication(120000);
+ });
+
+ // add admin on shard itself, hack to prevent localhost auth bypass
+ d1.getPrimary()
+ .getDB(adminUser.db)
+ .createUser(
+ {user: adminUser.username, pwd: adminUser.password, roles: jsTest.adminUserRoles},
+ {w: 3, wtimeout: 60000});
+ d2.getPrimary()
+ .getDB(adminUser.db)
+ .createUser(
+ {user: adminUser.username, pwd: adminUser.password, roles: jsTest.adminUserRoles},
+ {w: 3, wtimeout: 60000});
+
+ login(testUser);
+ print("testing map reduce");
+
+ // Sharded map reduce can be tricky since all components talk to each other. For example
+ // SERVER-4114 is triggered when 1 mongod connects to another for final reduce it's not
+ // properly tested here since addresses are localhost, which is more permissive.
+ var res = s.getDB("test").runCommand({
+ mapreduce: "foo",
+ map: function() {
+ emit(this.x, 1);
+ },
+ reduce: function(key, values) {
+ return values.length;
+ },
+ out: "mrout"
+ });
+ printjson(res);
+ assert.commandWorked(res);
+
+ // Check that dump doesn't get stuck with auth
+ var x = runMongoProgram("mongodump",
+ "--host",
+ s.s.host,
+ "-d",
+ testUser.db,
+ "-u",
+ testUser.username,
+ "-p",
+ testUser.password,
+ "--authenticationMechanism",
+ "SCRAM-SHA-1");
+ print("result: " + x);
+
+ // Test read only users
+ print("starting read only tests");
+
+ var readOnlyS = new Mongo(s.getDB("test").getMongo().host);
+ var readOnlyDB = readOnlyS.getDB("test");
+
+ print(" testing find that should fail");
+ assert.throws(function() {
+ readOnlyDB.foo.findOne();
+ });
-assert.eq(count, 500);
+ print(" logging in");
+ login(testUserReadOnly, readOnlyS);
-logout(adminUser);
+ print(" testing find that should work");
+ readOnlyDB.foo.findOne();
-d1.waitForState( d1.getSecondaries(), ReplSetTest.State.SECONDARY, 5 * 60 * 1000 );
-d2.waitForState( d2.getSecondaries(), ReplSetTest.State.SECONDARY, 5 * 60 * 1000 );
+ print(" testing write that should fail");
+ assert.writeError(readOnlyDB.foo.insert({eliot: 1}));
-authutil.asCluster(d1.nodes, "jstests/libs/key1", function() { d1.awaitReplication(120000); });
-authutil.asCluster(d2.nodes, "jstests/libs/key1", function() { d2.awaitReplication(120000); });
+ print(" testing read command (should succeed)");
+ assert.commandWorked(readOnlyDB.runCommand({count: "foo"}));
-// add admin on shard itself, hack to prevent localhost auth bypass
-d1.getPrimary().getDB(adminUser.db).createUser({user: adminUser.username,
- pwd: adminUser.password,
- roles: jsTest.adminUserRoles},
- {w: 3, wtimeout: 60000});
-d2.getPrimary().getDB(adminUser.db).createUser({user: adminUser.username,
- pwd: adminUser.password,
- roles: jsTest.adminUserRoles},
- {w: 3, wtimeout: 60000});
+ print("make sure currentOp/killOp fail");
+ assert.commandFailed(readOnlyDB.currentOp());
+ assert.commandFailed(readOnlyDB.killOp(123));
-login(testUser);
-print( "testing map reduce" );
+ // fsyncUnlock doesn't work in mongos anyway, so no need check authorization for it
+ /*
+ broken because of SERVER-4156
+ print( " testing write command (should fail)" );
+ assert.commandFailed(readOnlyDB.runCommand(
+ {mapreduce : "foo",
+ map : function() { emit(this.y, 1); },
+ reduce : function(key, values) { return values.length; },
+ out:"blarg"
+ }));
+ */
-// Sharded map reduce can be tricky since all components talk to each other. For example
-// SERVER-4114 is triggered when 1 mongod connects to another for final reduce it's not
-// properly tested here since addresses are localhost, which is more permissive.
-var res = s.getDB("test").runCommand(
- {mapreduce : "foo",
- map : function() { emit(this.x, 1); },
- reduce : function(key, values) { return values.length; },
- out:"mrout"
- });
-printjson(res);
-assert.commandWorked(res);
-
-// Check that dump doesn't get stuck with auth
-var x = runMongoProgram("mongodump",
- "--host", s.s.host,
- "-d", testUser.db,
- "-u", testUser.username,
- "-p", testUser.password,
- "--authenticationMechanism", "SCRAM-SHA-1");
-print("result: " + x);
-
-// Test read only users
-print( "starting read only tests" );
-
-var readOnlyS = new Mongo( s.getDB( "test" ).getMongo().host );
-var readOnlyDB = readOnlyS.getDB( "test" );
-
-print( " testing find that should fail" );
-assert.throws( function(){ readOnlyDB.foo.findOne(); } );
-
-print( " logging in" );
-login( testUserReadOnly , readOnlyS );
-
-print( " testing find that should work" );
-readOnlyDB.foo.findOne();
-
-print( " testing write that should fail" );
-assert.writeError(readOnlyDB.foo.insert({ eliot: 1 }));
-
-print( " testing read command (should succeed)" );
-assert.commandWorked(readOnlyDB.runCommand({count : "foo"}));
-
-print("make sure currentOp/killOp fail");
-assert.commandFailed(readOnlyDB.currentOp());
-assert.commandFailed(readOnlyDB.killOp(123));
-
-// fsyncUnlock doesn't work in mongos anyway, so no need check authorization for it
-/*
-broken because of SERVER-4156
-print( " testing write command (should fail)" );
-assert.commandFailed(readOnlyDB.runCommand(
- {mapreduce : "foo",
- map : function() { emit(this.y, 1); },
- reduce : function(key, values) { return values.length; },
- out:"blarg"
- }));
-*/
-
-print( " testing logout (should succeed)" );
-assert.commandWorked(readOnlyDB.runCommand({logout : 1}));
-
-print("make sure currentOp/killOp fail again");
-assert.commandFailed(readOnlyDB.currentOp());
-assert.commandFailed(readOnlyDB.killOp(123));
-
-s.stop();
+ print(" testing logout (should succeed)");
+ assert.commandWorked(readOnlyDB.runCommand({logout: 1}));
+
+ print("make sure currentOp/killOp fail again");
+ assert.commandFailed(readOnlyDB.currentOp());
+ assert.commandFailed(readOnlyDB.killOp(123));
+
+ s.stop();
})();
diff --git a/jstests/sharding/auth2.js b/jstests/sharding/auth2.js
index e58657e8dba..e26c58dccf1 100644
--- a/jstests/sharding/auth2.js
+++ b/jstests/sharding/auth2.js
@@ -1,6 +1,10 @@
-var st = new ShardingTest({ keyFile : 'jstests/libs/key1', shards : 2, chunkSize: 1, verbose : 2,
- other : { nopreallocj : 1, verbose : 2, useHostname : true,
- configOptions : { verbose : 2 }}});
+var st = new ShardingTest({
+ keyFile: 'jstests/libs/key1',
+ shards: 2,
+ chunkSize: 1,
+ verbose: 2,
+ other: {nopreallocj: 1, verbose: 2, useHostname: true, configOptions: {verbose: 2}}
+});
var mongos = st.s;
var adminDB = mongos.getDB('admin');
@@ -8,13 +12,12 @@ var db = mongos.getDB('test');
adminDB.createUser({user: 'admin', pwd: 'password', roles: jsTest.adminUserRoles});
-jsTestLog( "Add user was successful" );
-
+jsTestLog("Add user was successful");
// Test for SERVER-6549, make sure that repeatedly logging in always passes.
-for ( var i = 0; i < 100; i++ ) {
- adminDB = new Mongo( mongos.host ).getDB('admin');
- assert( adminDB.auth('admin', 'password'), "Auth failed on attempt #: " + i );
+for (var i = 0; i < 100; i++) {
+ adminDB = new Mongo(mongos.host).getDB('admin');
+ assert(adminDB.auth('admin', 'password'), "Auth failed on attempt #: " + i);
}
st.stop();
diff --git a/jstests/sharding/authCommands.js b/jstests/sharding/authCommands.js
index 4de15e2f58a..cb1887d4aae 100644
--- a/jstests/sharding/authCommands.js
+++ b/jstests/sharding/authCommands.js
@@ -3,292 +3,310 @@
*/
var doTest = function() {
-var rsOpts = { oplogSize: 10, useHostname : false };
-var st = new ShardingTest({ keyFile : 'jstests/libs/key1', shards : 2, chunksize : 2,
- rs : rsOpts, other : { nopreallocj : 1, useHostname : false }});
-
-var mongos = st.s;
-var adminDB = mongos.getDB( 'admin' );
-var configDB = mongos.getDB( 'config' );
-var testDB = mongos.getDB( 'test' );
-
-jsTestLog('Setting up initial users');
-var rwUser = 'rwUser';
-var roUser = 'roUser';
-var password = 'password';
-var expectedDocs = 1000;
-
-adminDB.createUser({user: rwUser, pwd: password, roles: jsTest.adminUserRoles});
-
-assert( adminDB.auth( rwUser, password ) );
-
-// Secondaries should be up here, since we awaitReplication in the ShardingTest, but we *don't*
-// wait for the mongos to explicitly detect them.
-ReplSetTest.awaitRSClientHosts( mongos, st.rs0.getSecondaries(), { ok : true, secondary : true });
-ReplSetTest.awaitRSClientHosts( mongos, st.rs1.getSecondaries(), { ok : true, secondary : true });
-
-testDB.createUser({user: rwUser, pwd: password, roles: jsTest.basicUserRoles});
-testDB.createUser({user: roUser, pwd: password, roles: jsTest.readOnlyUserRoles});
-
-authenticatedConn = new Mongo( mongos.host );
-authenticatedConn.getDB( 'admin' ).auth( rwUser, password );
-
-// Add user to shards to prevent localhost connections from having automatic full access
-st.rs0.getPrimary().getDB( 'admin' ).createUser({user: 'user',
- pwd: 'password',
- roles: jsTest.basicUserRoles},
- {w: 3, wtimeout: 30000});
-st.rs1.getPrimary().getDB( 'admin' ).createUser({user: 'user',
- pwd: 'password',
- roles: jsTest.basicUserRoles},
- {w: 3, wtimeout: 30000} );
-
-
-
-jsTestLog('Creating initial data');
-
-st.adminCommand( { enablesharding : "test" } );
-st.ensurePrimaryShard('test', 'test-rs0');
-st.adminCommand( { shardcollection : "test.foo" , key : { i : 1, j : 1 } } );
-
-// Balancer is stopped by default, so no moveChunks will interfere with the splits we're testing
-
-var str = 'a';
-while ( str.length < 8000 ) {
- str += str;
-}
-
-var bulk = testDB.foo.initializeUnorderedBulkOp();
-for ( var i = 0; i < 100; i++ ) {
- for ( var j = 0; j < 10; j++ ) {
- bulk.insert({i:i, j:j, str:str});
+ var rsOpts = {
+ oplogSize: 10,
+ useHostname: false
+ };
+ var st = new ShardingTest({
+ keyFile: 'jstests/libs/key1',
+ shards: 2,
+ chunksize: 2,
+ rs: rsOpts,
+ other: {nopreallocj: 1, useHostname: false}
+ });
+
+ var mongos = st.s;
+ var adminDB = mongos.getDB('admin');
+ var configDB = mongos.getDB('config');
+ var testDB = mongos.getDB('test');
+
+ jsTestLog('Setting up initial users');
+ var rwUser = 'rwUser';
+ var roUser = 'roUser';
+ var password = 'password';
+ var expectedDocs = 1000;
+
+ adminDB.createUser({user: rwUser, pwd: password, roles: jsTest.adminUserRoles});
+
+ assert(adminDB.auth(rwUser, password));
+
+ // Secondaries should be up here, since we awaitReplication in the ShardingTest, but we *don't*
+ // wait for the mongos to explicitly detect them.
+ ReplSetTest.awaitRSClientHosts(mongos, st.rs0.getSecondaries(), {ok: true, secondary: true});
+ ReplSetTest.awaitRSClientHosts(mongos, st.rs1.getSecondaries(), {ok: true, secondary: true});
+
+ testDB.createUser({user: rwUser, pwd: password, roles: jsTest.basicUserRoles});
+ testDB.createUser({user: roUser, pwd: password, roles: jsTest.readOnlyUserRoles});
+
+ authenticatedConn = new Mongo(mongos.host);
+ authenticatedConn.getDB('admin').auth(rwUser, password);
+
+ // Add user to shards to prevent localhost connections from having automatic full access
+ st.rs0.getPrimary().getDB('admin').createUser(
+ {user: 'user', pwd: 'password', roles: jsTest.basicUserRoles}, {w: 3, wtimeout: 30000});
+ st.rs1.getPrimary().getDB('admin').createUser(
+ {user: 'user', pwd: 'password', roles: jsTest.basicUserRoles}, {w: 3, wtimeout: 30000});
+
+ jsTestLog('Creating initial data');
+
+ st.adminCommand({enablesharding: "test"});
+ st.ensurePrimaryShard('test', 'test-rs0');
+ st.adminCommand({shardcollection: "test.foo", key: {i: 1, j: 1}});
+
+ // Balancer is stopped by default, so no moveChunks will interfere with the splits we're testing
+
+ var str = 'a';
+ while (str.length < 8000) {
+ str += str;
}
-}
-assert.writeOK(bulk.execute({ w: "majority"}));
-
-assert.eq(expectedDocs, testDB.foo.count());
-
-// Wait for the balancer to start back up
-assert.writeOK(configDB.settings.update({_id: 'balancer'}, {$set: {_waitForDelete: true}}, true));
-st.startBalancer();
-
-// Make sure we've done at least some splitting, so the balancer will work
-assert.gt( configDB.chunks.find({ ns : 'test.foo' }).count(), 2 );
-
-// Make sure we eventually balance all the chunks we've created
-assert.soon( function() {
- var x = st.chunkDiff( "foo", "test" );
- print( "chunk diff: " + x );
- return x < 2 && configDB.locks.findOne({ _id : 'test.foo' }).state == 0;
-}, "no balance happened", 5 * 60 * 1000 );
-
-var map = function() { emit (this.i, this.j); };
-var reduce = function( key, values ) {
- var jCount = 0;
- values.forEach( function(j) { jCount += j; } );
- return jCount;
-};
-
-var checkCommandSucceeded = function( db, cmdObj ) {
- print( "Running command that should succeed: " );
- printjson( cmdObj );
- resultObj = db.runCommand( cmdObj );
- printjson( resultObj );
- assert ( resultObj.ok );
- return resultObj;
-};
-
-var checkCommandFailed = function( db, cmdObj ) {
- print( "Running command that should fail: " );
- printjson( cmdObj );
- resultObj = db.runCommand( cmdObj );
- printjson( resultObj );
- assert ( !resultObj.ok );
- return resultObj;
-};
-var checkReadOps = function( hasReadAuth ) {
- if ( hasReadAuth ) {
- print( "Checking read operations, should work" );
- assert.eq( expectedDocs, testDB.foo.find().itcount() );
- assert.eq( expectedDocs, testDB.foo.count() );
- // NOTE: This is an explicit check that GLE can be run with read prefs, not the result of
- // above.
- assert.eq( null, testDB.runCommand({getlasterror : 1}).err );
- checkCommandSucceeded( testDB, {dbstats : 1} );
- checkCommandSucceeded( testDB, {collstats : 'foo'} );
-
- // inline map-reduce works read-only
- var res = checkCommandSucceeded( testDB, {mapreduce : 'foo', map : map, reduce : reduce,
- out : {inline : 1}});
- assert.eq( 100, res.results.length );
- assert.eq( 45, res.results[0].value );
-
- res = checkCommandSucceeded( testDB,
- {aggregate:'foo',
- pipeline: [ {$project : {j : 1}},
- {$group : {_id : 'j', sum : {$sum : '$j'}}}]} );
- assert.eq( 4500, res.result[0].sum );
- } else {
- print( "Checking read operations, should fail" );
- assert.throws( function() { testDB.foo.find().itcount(); } );
- checkCommandFailed( testDB, {dbstats : 1} );
- checkCommandFailed( testDB, {collstats : 'foo'} );
- checkCommandFailed( testDB, {mapreduce : 'foo', map : map, reduce : reduce,
- out : { inline : 1 }} );
- checkCommandFailed( testDB, {aggregate:'foo',
- pipeline: [ {$project : {j : 1}},
- {$group : {_id : 'j', sum : {$sum : '$j'}}}]} );
+ var bulk = testDB.foo.initializeUnorderedBulkOp();
+ for (var i = 0; i < 100; i++) {
+ for (var j = 0; j < 10; j++) {
+ bulk.insert({i: i, j: j, str: str});
+ }
}
-};
-
-var checkWriteOps = function( hasWriteAuth ) {
- if ( hasWriteAuth ) {
- print( "Checking write operations, should work" );
- testDB.foo.insert({a : 1, i : 1, j : 1});
- res = checkCommandSucceeded( testDB, { findAndModify: "foo", query: {a:1, i:1, j:1},
- update: {$set: {b:1}}});
- assert.eq(1, res.value.a);
- assert.eq(null, res.value.b);
- assert.eq(1, testDB.foo.findOne({a:1}).b);
- testDB.foo.remove({a : 1});
- assert.eq( null, testDB.runCommand({getlasterror : 1}).err );
- checkCommandSucceeded( testDB, {reIndex:'foo'} );
- checkCommandSucceeded( testDB, {repairDatabase : 1} );
- checkCommandSucceeded( testDB, {mapreduce : 'foo', map : map, reduce : reduce,
- out : 'mrOutput'} );
- assert.eq( 100, testDB.mrOutput.count() );
- assert.eq( 45, testDB.mrOutput.findOne().value );
-
- checkCommandSucceeded( testDB, {drop : 'foo'} );
- assert.eq( 0, testDB.foo.count() );
- testDB.foo.insert({a:1});
- assert.eq( 1, testDB.foo.count() );
- checkCommandSucceeded( testDB, {dropDatabase : 1} );
- assert.eq( 0, testDB.foo.count() );
- checkCommandSucceeded( testDB, {create : 'baz'} );
- } else {
- print( "Checking write operations, should fail" );
- testDB.foo.insert({a : 1, i : 1, j : 1});
- assert.eq(0, authenticatedConn.getDB('test').foo.count({a : 1, i : 1, j : 1}));
- checkCommandFailed( testDB, { findAndModify: "foo", query: {a:1, i:1, j:1},
- update: {$set: {b:1}}} );
- checkCommandFailed( testDB, {reIndex:'foo'} );
- checkCommandFailed( testDB, {repairDatabase : 1} );
- checkCommandFailed( testDB, {mapreduce : 'foo', map : map, reduce : reduce,
- out : 'mrOutput'} );
- checkCommandFailed( testDB, {drop : 'foo'} );
- checkCommandFailed( testDB, {dropDatabase : 1} );
- passed = true;
- try {
- // For some reason when create fails it throws an exception instead of just returning ok:0
- res = testDB.runCommand( {create : 'baz'} );
- if ( !res.ok ) {
+ assert.writeOK(bulk.execute({w: "majority"}));
+
+ assert.eq(expectedDocs, testDB.foo.count());
+
+ // Wait for the balancer to start back up
+ assert.writeOK(
+ configDB.settings.update({_id: 'balancer'}, {$set: {_waitForDelete: true}}, true));
+ st.startBalancer();
+
+ // Make sure we've done at least some splitting, so the balancer will work
+ assert.gt(configDB.chunks.find({ns: 'test.foo'}).count(), 2);
+
+ // Make sure we eventually balance all the chunks we've created
+ assert.soon(function() {
+ var x = st.chunkDiff("foo", "test");
+ print("chunk diff: " + x);
+ return x < 2 && configDB.locks.findOne({_id: 'test.foo'}).state == 0;
+ }, "no balance happened", 5 * 60 * 1000);
+
+ var map = function() {
+ emit(this.i, this.j);
+ };
+ var reduce = function(key, values) {
+ var jCount = 0;
+ values.forEach(function(j) {
+ jCount += j;
+ });
+ return jCount;
+ };
+
+ var checkCommandSucceeded = function(db, cmdObj) {
+ print("Running command that should succeed: ");
+ printjson(cmdObj);
+ resultObj = db.runCommand(cmdObj);
+ printjson(resultObj);
+ assert(resultObj.ok);
+ return resultObj;
+ };
+
+ var checkCommandFailed = function(db, cmdObj) {
+ print("Running command that should fail: ");
+ printjson(cmdObj);
+ resultObj = db.runCommand(cmdObj);
+ printjson(resultObj);
+ assert(!resultObj.ok);
+ return resultObj;
+ };
+
+ var checkReadOps = function(hasReadAuth) {
+ if (hasReadAuth) {
+ print("Checking read operations, should work");
+ assert.eq(expectedDocs, testDB.foo.find().itcount());
+ assert.eq(expectedDocs, testDB.foo.count());
+ // NOTE: This is an explicit check that GLE can be run with read prefs, not the result
+ // of
+ // above.
+ assert.eq(null, testDB.runCommand({getlasterror: 1}).err);
+ checkCommandSucceeded(testDB, {dbstats: 1});
+ checkCommandSucceeded(testDB, {collstats: 'foo'});
+
+ // inline map-reduce works read-only
+ var res = checkCommandSucceeded(
+ testDB, {mapreduce: 'foo', map: map, reduce: reduce, out: {inline: 1}});
+ assert.eq(100, res.results.length);
+ assert.eq(45, res.results[0].value);
+
+ res = checkCommandSucceeded(
+ testDB,
+ {
+ aggregate: 'foo',
+ pipeline: [{$project: {j: 1}}, {$group: {_id: 'j', sum: {$sum: '$j'}}}]
+ });
+ assert.eq(4500, res.result[0].sum);
+ } else {
+ print("Checking read operations, should fail");
+ assert.throws(function() {
+ testDB.foo.find().itcount();
+ });
+ checkCommandFailed(testDB, {dbstats: 1});
+ checkCommandFailed(testDB, {collstats: 'foo'});
+ checkCommandFailed(testDB,
+ {mapreduce: 'foo', map: map, reduce: reduce, out: {inline: 1}});
+ checkCommandFailed(
+ testDB,
+ {
+ aggregate: 'foo',
+ pipeline: [{$project: {j: 1}}, {$group: {_id: 'j', sum: {$sum: '$j'}}}]
+ });
+ }
+ };
+
+ var checkWriteOps = function(hasWriteAuth) {
+ if (hasWriteAuth) {
+ print("Checking write operations, should work");
+ testDB.foo.insert({a: 1, i: 1, j: 1});
+ res = checkCommandSucceeded(
+ testDB, {findAndModify: "foo", query: {a: 1, i: 1, j: 1}, update: {$set: {b: 1}}});
+ assert.eq(1, res.value.a);
+ assert.eq(null, res.value.b);
+ assert.eq(1, testDB.foo.findOne({a: 1}).b);
+ testDB.foo.remove({a: 1});
+ assert.eq(null, testDB.runCommand({getlasterror: 1}).err);
+ checkCommandSucceeded(testDB, {reIndex: 'foo'});
+ checkCommandSucceeded(testDB, {repairDatabase: 1});
+ checkCommandSucceeded(testDB,
+ {mapreduce: 'foo', map: map, reduce: reduce, out: 'mrOutput'});
+ assert.eq(100, testDB.mrOutput.count());
+ assert.eq(45, testDB.mrOutput.findOne().value);
+
+ checkCommandSucceeded(testDB, {drop: 'foo'});
+ assert.eq(0, testDB.foo.count());
+ testDB.foo.insert({a: 1});
+ assert.eq(1, testDB.foo.count());
+ checkCommandSucceeded(testDB, {dropDatabase: 1});
+ assert.eq(0, testDB.foo.count());
+ checkCommandSucceeded(testDB, {create: 'baz'});
+ } else {
+ print("Checking write operations, should fail");
+ testDB.foo.insert({a: 1, i: 1, j: 1});
+ assert.eq(0, authenticatedConn.getDB('test').foo.count({a: 1, i: 1, j: 1}));
+ checkCommandFailed(
+ testDB, {findAndModify: "foo", query: {a: 1, i: 1, j: 1}, update: {$set: {b: 1}}});
+ checkCommandFailed(testDB, {reIndex: 'foo'});
+ checkCommandFailed(testDB, {repairDatabase: 1});
+ checkCommandFailed(testDB,
+ {mapreduce: 'foo', map: map, reduce: reduce, out: 'mrOutput'});
+ checkCommandFailed(testDB, {drop: 'foo'});
+ checkCommandFailed(testDB, {dropDatabase: 1});
+ passed = true;
+ try {
+ // For some reason when create fails it throws an exception instead of just
+ // returning ok:0
+ res = testDB.runCommand({create: 'baz'});
+ if (!res.ok) {
+ passed = false;
+ }
+ } catch (e) {
+ // expected
+ printjson(e);
passed = false;
}
- } catch (e) {
- // expected
- printjson(e);
- passed = false;
+ assert(!passed);
}
- assert( !passed );
- }
-};
-
-var checkAdminOps = function( hasAuth ) {
- if ( hasAuth ) {
- checkCommandSucceeded( adminDB, {getCmdLineOpts : 1} );
- checkCommandSucceeded( adminDB, {serverStatus : 1} );
- checkCommandSucceeded( adminDB, {listShards : 1} );
- checkCommandSucceeded( adminDB, {whatsmyuri : 1} );
- checkCommandSucceeded( adminDB, {isdbgrid : 1} );
- checkCommandSucceeded( adminDB, {ismaster : 1} );
- checkCommandSucceeded( adminDB, {split : 'test.foo', find : {i : 1, j : 1}} );
- chunk = configDB.chunks.findOne({ shard : st.rs0.name });
- checkCommandSucceeded( adminDB, {moveChunk : 'test.foo', find : chunk.min,
- to : st.rs1.name, _waitForDelete : true} );
- } else {
- checkCommandFailed( adminDB, {getCmdLineOpts : 1} );
- checkCommandFailed( adminDB, {serverStatus : 1} );
- checkCommandFailed( adminDB, {listShards : 1} );
- // whatsmyuri, isdbgrid, and ismaster don't require any auth
- checkCommandSucceeded( adminDB, {whatsmyuri : 1} );
- checkCommandSucceeded( adminDB, {isdbgrid : 1} );
- checkCommandSucceeded( adminDB, {ismaster : 1} );
- checkCommandFailed( adminDB, {split : 'test.foo', find : {i : 1, j : 1}} );
- chunkKey = { i : { $minKey : 1 }, j : { $minKey : 1 } };
- checkCommandFailed( adminDB, {moveChunk : 'test.foo', find : chunkKey,
- to : st.rs1.name, _waitForDelete : true} );
-
- }
-};
-
-var checkRemoveShard = function( hasWriteAuth ) {
- if ( hasWriteAuth ) {
- // start draining
- checkCommandSucceeded( adminDB, { removeshard : st.rs1.name } );
- // Wait for shard to be completely removed
- checkRemoveShard = function() {
- res = checkCommandSucceeded( adminDB, { removeshard : st.rs1.name } );
- return res.msg == 'removeshard completed successfully';
- };
- assert.soon( checkRemoveShard , "failed to remove shard" );
- } else {
- checkCommandFailed( adminDB, { removeshard : st.rs1.name } );
- }
-};
-
-var checkAddShard = function( hasWriteAuth ) {
- if ( hasWriteAuth ) {
- checkCommandSucceeded( adminDB, { addshard : st.rs1.getURL() } );
- } else {
- checkCommandFailed( adminDB, { addshard : st.rs1.getURL() } );
- }
-};
-
-
-st.stopBalancer();
-
-jsTestLog("Checking admin commands with admin auth credentials");
-checkAdminOps( true );
-assert( adminDB.logout().ok );
-
-jsTestLog("Checking admin commands with no auth credentials");
-checkAdminOps( false );
-
-jsTestLog("Checking commands with no auth credentials");
-checkReadOps( false );
-checkWriteOps( false );
-
-// Authenticate as read-only user
-jsTestLog("Checking commands with read-only auth credentials");
-assert( testDB.auth( roUser, password ) );
-checkReadOps( true );
-checkWriteOps( false );
-
-// Authenticate as read-write user
-jsTestLog("Checking commands with read-write auth credentials");
-assert( testDB.auth( rwUser, password ) );
-checkReadOps( true );
-checkWriteOps( true );
-
-
-jsTestLog("Check drainging/removing a shard");
-assert( testDB.logout().ok );
-checkRemoveShard( false );
-assert( adminDB.auth( rwUser, password ) );
-assert( testDB.dropDatabase().ok );
-checkRemoveShard( true );
-st.printShardingStatus();
-
-jsTestLog("Check adding a shard");
-assert( adminDB.logout().ok );
-checkAddShard( false );
-assert( adminDB.auth( rwUser, password ) );
-checkAddShard( true );
-st.printShardingStatus();
+ };
+
+ var checkAdminOps = function(hasAuth) {
+ if (hasAuth) {
+ checkCommandSucceeded(adminDB, {getCmdLineOpts: 1});
+ checkCommandSucceeded(adminDB, {serverStatus: 1});
+ checkCommandSucceeded(adminDB, {listShards: 1});
+ checkCommandSucceeded(adminDB, {whatsmyuri: 1});
+ checkCommandSucceeded(adminDB, {isdbgrid: 1});
+ checkCommandSucceeded(adminDB, {ismaster: 1});
+ checkCommandSucceeded(adminDB, {split: 'test.foo', find: {i: 1, j: 1}});
+ chunk = configDB.chunks.findOne({shard: st.rs0.name});
+ checkCommandSucceeded(
+ adminDB,
+ {moveChunk: 'test.foo', find: chunk.min, to: st.rs1.name, _waitForDelete: true});
+ } else {
+ checkCommandFailed(adminDB, {getCmdLineOpts: 1});
+ checkCommandFailed(adminDB, {serverStatus: 1});
+ checkCommandFailed(adminDB, {listShards: 1});
+ // whatsmyuri, isdbgrid, and ismaster don't require any auth
+ checkCommandSucceeded(adminDB, {whatsmyuri: 1});
+ checkCommandSucceeded(adminDB, {isdbgrid: 1});
+ checkCommandSucceeded(adminDB, {ismaster: 1});
+ checkCommandFailed(adminDB, {split: 'test.foo', find: {i: 1, j: 1}});
+ chunkKey = {
+ i: {$minKey: 1},
+ j: {$minKey: 1}
+ };
+ checkCommandFailed(
+ adminDB,
+ {moveChunk: 'test.foo', find: chunkKey, to: st.rs1.name, _waitForDelete: true});
+ }
+ };
+
+ var checkRemoveShard = function(hasWriteAuth) {
+ if (hasWriteAuth) {
+ // start draining
+ checkCommandSucceeded(adminDB, {removeshard: st.rs1.name});
+ // Wait for shard to be completely removed
+ checkRemoveShard = function() {
+ res = checkCommandSucceeded(adminDB, {removeshard: st.rs1.name});
+ return res.msg == 'removeshard completed successfully';
+ };
+ assert.soon(checkRemoveShard, "failed to remove shard");
+ } else {
+ checkCommandFailed(adminDB, {removeshard: st.rs1.name});
+ }
+ };
-st.stop();
+ var checkAddShard = function(hasWriteAuth) {
+ if (hasWriteAuth) {
+ checkCommandSucceeded(adminDB, {addshard: st.rs1.getURL()});
+ } else {
+ checkCommandFailed(adminDB, {addshard: st.rs1.getURL()});
+ }
+ };
+
+ st.stopBalancer();
+
+ jsTestLog("Checking admin commands with admin auth credentials");
+ checkAdminOps(true);
+ assert(adminDB.logout().ok);
+
+ jsTestLog("Checking admin commands with no auth credentials");
+ checkAdminOps(false);
+
+ jsTestLog("Checking commands with no auth credentials");
+ checkReadOps(false);
+ checkWriteOps(false);
+
+ // Authenticate as read-only user
+ jsTestLog("Checking commands with read-only auth credentials");
+ assert(testDB.auth(roUser, password));
+ checkReadOps(true);
+ checkWriteOps(false);
+
+ // Authenticate as read-write user
+ jsTestLog("Checking commands with read-write auth credentials");
+ assert(testDB.auth(rwUser, password));
+ checkReadOps(true);
+ checkWriteOps(true);
+
+ jsTestLog("Check drainging/removing a shard");
+ assert(testDB.logout().ok);
+ checkRemoveShard(false);
+ assert(adminDB.auth(rwUser, password));
+ assert(testDB.dropDatabase().ok);
+ checkRemoveShard(true);
+ st.printShardingStatus();
+
+ jsTestLog("Check adding a shard");
+ assert(adminDB.logout().ok);
+ checkAddShard(false);
+ assert(adminDB.auth(rwUser, password));
+ checkAddShard(true);
+ st.printShardingStatus();
+
+ st.stop();
};
doTest();
diff --git a/jstests/sharding/authConnectionHook.js b/jstests/sharding/authConnectionHook.js
index 4356180107d..516b0d34554 100644
--- a/jstests/sharding/authConnectionHook.js
+++ b/jstests/sharding/authConnectionHook.js
@@ -1,7 +1,12 @@
-// Test for SERVER-8786 - if the first operation on an authenticated shard is moveChunk, it breaks the cluster.
-var st = new ShardingTest({ keyFile : 'jstests/libs/key1', shards : 2, chunkSize: 1, verbose : 2,
- other : { nopreallocj : 1, verbose : 2, useHostname : true,
- configOptions : { verbose : 2 }}});
+// Test for SERVER-8786 - if the first operation on an authenticated shard is moveChunk, it breaks
+// the cluster.
+var st = new ShardingTest({
+ keyFile: 'jstests/libs/key1',
+ shards: 2,
+ chunkSize: 1,
+ verbose: 2,
+ other: {nopreallocj: 1, verbose: 2, useHostname: true, configOptions: {verbose: 2}}
+});
var mongos = st.s;
var adminDB = mongos.getDB('admin');
@@ -11,18 +16,18 @@ adminDB.createUser({user: 'admin', pwd: 'password', roles: jsTest.adminUserRoles
adminDB.auth('admin', 'password');
-adminDB.runCommand({enableSharding : "test"});
+adminDB.runCommand({enableSharding: "test"});
st.ensurePrimaryShard('test', 'shard0001');
-adminDB.runCommand({shardCollection : "test.foo", key : {x : 1}});
+adminDB.runCommand({shardCollection: "test.foo", key: {x: 1}});
for (var i = 0; i < 100; i++) {
- db.foo.insert({x:i});
+ db.foo.insert({x: i});
}
-adminDB.runCommand({split: "test.foo", middle: {x:50}});
-var curShard = st.getShard("test.foo", {x:75});
+adminDB.runCommand({split: "test.foo", middle: {x: 50}});
+var curShard = st.getShard("test.foo", {x: 75});
var otherShard = st.getOther(curShard).name;
-adminDB.runCommand({moveChunk: "test.foo", find: {x:25}, to: otherShard, _waitForDelete:true});
+adminDB.runCommand({moveChunk: "test.foo", find: {x: 25}, to: otherShard, _waitForDelete: true});
st.printShardingStatus();
@@ -30,16 +35,13 @@ MongoRunner.stopMongod(st.shard0);
st.shard0 = MongoRunner.runMongod({restart: st.shard0});
// May fail the first couple times due to socket exceptions
-assert.soon( function() {
- var res = adminDB.runCommand({moveChunk: "test.foo",
- find: {x:75},
- to: otherShard});
- printjson(res);
- return res.ok;
- });
-
-
-printjson(db.foo.findOne({x:25}));
-printjson(db.foo.findOne({x:75}));
+assert.soon(function() {
+ var res = adminDB.runCommand({moveChunk: "test.foo", find: {x: 75}, to: otherShard});
+ printjson(res);
+ return res.ok;
+});
+
+printjson(db.foo.findOne({x: 25}));
+printjson(db.foo.findOne({x: 75}));
st.stop();
diff --git a/jstests/sharding/auth_add_shard.js b/jstests/sharding/auth_add_shard.js
index 8435c768c4f..4f0fec6de83 100644
--- a/jstests/sharding/auth_add_shard.js
+++ b/jstests/sharding/auth_add_shard.js
@@ -2,98 +2,102 @@
// The puporse of this test is to test authentication when adding/removing a shard. The test sets
// up a sharded system, then adds/removes a shard.
(function() {
-'use strict';
+ 'use strict';
-// login method to login into the database
-function login(userObj) {
- var authResult = mongos.getDB(userObj.db).auth(userObj.username, userObj.password);
- printjson(authResult);
-}
+ // login method to login into the database
+ function login(userObj) {
+ var authResult = mongos.getDB(userObj.db).auth(userObj.username, userObj.password);
+ printjson(authResult);
+ }
-// admin user object
-var adminUser = { db: "admin", username: "foo", password: "bar" };
+ // admin user object
+ var adminUser = {
+ db: "admin",
+ username: "foo",
+ password: "bar"
+ };
-//set up a 2 shard cluster with keyfile
-var st = new ShardingTest({ name: "auth_add_shard1", shards: 1,
- mongos: 1, keyFile: "jstests/libs/key1" });
+ // set up a 2 shard cluster with keyfile
+ var st = new ShardingTest(
+ {name: "auth_add_shard1", shards: 1, mongos: 1, keyFile: "jstests/libs/key1"});
-var mongos = st.s0;
-var admin = mongos.getDB("admin");
+ var mongos = st.s0;
+ var admin = mongos.getDB("admin");
-print("1 shard system setup");
+ print("1 shard system setup");
-//add the admin user
-print("adding user");
-mongos.getDB(adminUser.db).createUser({ user: adminUser.username,
- pwd: adminUser.password,
- roles: jsTest.adminUserRoles});
+ // add the admin user
+ print("adding user");
+ mongos.getDB(adminUser.db)
+ .createUser(
+ {user: adminUser.username, pwd: adminUser.password, roles: jsTest.adminUserRoles});
-//login as admin user
-login(adminUser);
+ // login as admin user
+ login(adminUser);
-assert.eq(1, st.config.shards.count() , "initial server count wrong");
+ assert.eq(1, st.config.shards.count(), "initial server count wrong");
-//start a mongod with NO keyfile
-var conn = MongoRunner.runMongod({});
-print(conn);
+ // start a mongod with NO keyfile
+ var conn = MongoRunner.runMongod({});
+ print(conn);
-// --------------- Test 1 --------------------
-// Add shard to the existing cluster (should fail because it was added without a keyfile)
-printjson(assert.commandFailed(admin.runCommand({ addShard: conn.host })));
+ // --------------- Test 1 --------------------
+ // Add shard to the existing cluster (should fail because it was added without a keyfile)
+ printjson(assert.commandFailed(admin.runCommand({addShard: conn.host})));
-// stop mongod
-MongoRunner.stopMongod(conn);
+ // stop mongod
+ MongoRunner.stopMongod(conn);
-//--------------- Test 2 --------------------
-//start mongod again, this time with keyfile
-var conn = MongoRunner.runMongod({keyFile: "jstests/libs/key1"});
-//try adding the new shard
-assert.commandWorked(admin.runCommand({ addShard: conn.host }));
+ //--------------- Test 2 --------------------
+ // start mongod again, this time with keyfile
+ var conn = MongoRunner.runMongod({keyFile: "jstests/libs/key1"});
+ // try adding the new shard
+ assert.commandWorked(admin.runCommand({addShard: conn.host}));
-//Add some data
-var db = mongos.getDB("foo");
-var collA = mongos.getCollection("foo.bar");
+ // Add some data
+ var db = mongos.getDB("foo");
+ var collA = mongos.getCollection("foo.bar");
-// enable sharding on a collection
-assert.commandWorked(admin.runCommand({ enableSharding: "" + collA.getDB() }));
-st.ensurePrimaryShard("foo", "shard0000");
+ // enable sharding on a collection
+ assert.commandWorked(admin.runCommand({enableSharding: "" + collA.getDB()}));
+ st.ensurePrimaryShard("foo", "shard0000");
-assert.commandWorked(admin.runCommand({ shardCollection: "" + collA, key: { _id: 1 } }));
+ assert.commandWorked(admin.runCommand({shardCollection: "" + collA, key: {_id: 1}}));
-// add data to the sharded collection
-for (var i = 0; i < 4; i++) {
- db.bar.save({ _id: i });
- assert.commandWorked(admin.runCommand({ split: "" + collA, middle: { _id: i } }));
-}
+ // add data to the sharded collection
+ for (var i = 0; i < 4; i++) {
+ db.bar.save({_id: i});
+ assert.commandWorked(admin.runCommand({split: "" + collA, middle: {_id: i}}));
+ }
-// move a chunk
-assert.commandWorked(admin.runCommand({ moveChunk: "foo.bar", find: { _id: 1 }, to: "shard0001" }));
+ // move a chunk
+ assert.commandWorked(admin.runCommand({moveChunk: "foo.bar", find: {_id: 1}, to: "shard0001"}));
-//verify the chunk was moved
-admin.runCommand({ flushRouterConfig: 1 });
+ // verify the chunk was moved
+ admin.runCommand({flushRouterConfig: 1});
-var config = mongos.getDB("config");
-st.printShardingStatus(true);
+ var config = mongos.getDB("config");
+ st.printShardingStatus(true);
-// start balancer before removing the shard
-st.startBalancer();
+ // start balancer before removing the shard
+ st.startBalancer();
-//--------------- Test 3 --------------------
-// now drain the shard
-assert.commandWorked(admin.runCommand({removeShard: conn.host}));
+ //--------------- Test 3 --------------------
+ // now drain the shard
+ assert.commandWorked(admin.runCommand({removeShard: conn.host}));
-// give it some time to drain
-assert.soon(function() {
- var result = admin.runCommand({removeShard: conn.host});
- printjson(result);
+ // give it some time to drain
+ assert.soon(function() {
+ var result = admin.runCommand({removeShard: conn.host});
+ printjson(result);
- return result.ok && result.state == "completed";
-}, "failed to drain shard completely", 5 * 60 * 1000);
+ return result.ok && result.state == "completed";
+ }, "failed to drain shard completely", 5 * 60 * 1000);
-assert.eq(1, st.config.shards.count() , "removed server still appears in count");
+ assert.eq(1, st.config.shards.count(), "removed server still appears in count");
-MongoRunner.stopMongod(conn);
+ MongoRunner.stopMongod(conn);
-st.stop();
+ st.stop();
})();
diff --git a/jstests/sharding/auth_copydb.js b/jstests/sharding/auth_copydb.js
index 6ecb45ac201..8c73214019e 100644
--- a/jstests/sharding/auth_copydb.js
+++ b/jstests/sharding/auth_copydb.js
@@ -1,44 +1,41 @@
// Tests the copydb command on mongos with auth
var runTest = function() {
+ var st = new ShardingTest({shards: 1, mongos: 1, keyFile: "jstests/libs/key1"});
+ var mongos = st.s0;
+ var destAdminDB = mongos.getDB('admin');
+ var destTestDB = mongos.getDB('test');
-var st = new ShardingTest({ shards : 1,
- mongos : 1,
- keyFile : "jstests/libs/key1"});
-var mongos = st.s0;
-var destAdminDB = mongos.getDB('admin');
-var destTestDB = mongos.getDB('test');
+ var sourceMongodConn = MongoRunner.runMongod({});
+ var sourceTestDB = sourceMongodConn.getDB('test');
-var sourceMongodConn = MongoRunner.runMongod({});
-var sourceTestDB = sourceMongodConn.getDB('test');
+ sourceTestDB.foo.insert({a: 1});
-sourceTestDB.foo.insert({a:1});
+ destAdminDB.createUser({
+ user: 'admin',
+ pwd: 'password',
+ roles: jsTest.adminUserRoles
+ }); // Turns on access control enforcement
-destAdminDB.createUser({user: 'admin', pwd: 'password', roles: jsTest.adminUserRoles}); // Turns on access control enforcement
+ jsTestLog("Running copydb that should fail");
+ var res = destAdminDB.runCommand(
+ {copydb: 1, fromhost: sourceMongodConn.host, fromdb: 'test', todb: 'test'});
+ printjson(res);
+ assert.commandFailed(res);
-jsTestLog("Running copydb that should fail");
-var res = destAdminDB.runCommand({copydb:1,
- fromhost: sourceMongodConn.host,
- fromdb:'test',
- todb:'test'});
-printjson(res);
-assert.commandFailed(res);
+ destAdminDB.auth('admin', 'password');
+ assert.eq(0, destTestDB.foo.count()); // Be extra sure the copydb didn't secretly succeed.
-destAdminDB.auth('admin', 'password');
-assert.eq(0, destTestDB.foo.count()); // Be extra sure the copydb didn't secretly succeed.
+ jsTestLog("Running copydb that should succeed");
+ res = destAdminDB.runCommand(
+ {copydb: 1, fromhost: sourceMongodConn.host, fromdb: 'test', todb: 'test'});
+ printjson(res);
+ assert.commandWorked(res);
-jsTestLog("Running copydb that should succeed");
-res = destAdminDB.runCommand({copydb:1,
- fromhost: sourceMongodConn.host,
- fromdb:'test',
- todb:'test'});
-printjson(res);
-assert.commandWorked(res);
+ assert.eq(1, destTestDB.foo.count());
+ assert.eq(1, destTestDB.foo.findOne().a);
-assert.eq(1, destTestDB.foo.count());
-assert.eq(1, destTestDB.foo.findOne().a);
-
-st.stop();
+ st.stop();
};
diff --git a/jstests/sharding/auth_repl.js b/jstests/sharding/auth_repl.js
index 568cbc4a5ac..9e1ddb06873 100644
--- a/jstests/sharding/auth_repl.js
+++ b/jstests/sharding/auth_repl.js
@@ -1,5 +1,5 @@
-var replTest = new ReplSetTest({ nodes: 3, useHostName : false, keyFile: 'jstests/libs/key1' });
-replTest.startSet({ oplogSize: 10 });
+var replTest = new ReplSetTest({nodes: 3, useHostName: false, keyFile: 'jstests/libs/key1'});
+replTest.startSet({oplogSize: 10});
replTest.initiate();
replTest.awaitSecondaryNodes();
@@ -33,7 +33,7 @@ priTestDB.createUser({user: 'a', pwd: 'a', roles: jsTest.basicUserRoles},
assert.eq(1, testDB.auth('a', 'a'));
jsTest.log('Sending an authorized query that should be ok');
-assert.writeOK(testColl.insert({ x: 1 }, { writeConcern: { w: nodeCount }}));
+assert.writeOK(testColl.insert({x: 1}, {writeConcern: {w: nodeCount}}));
conn.setSlaveOk(true);
doc = testColl.findOne();
@@ -114,10 +114,10 @@ for (var x = 0; x < nodeCount; x++) {
}
}
-assert(secNodeIdx >= 0); // test sanity check
+assert(secNodeIdx >= 0); // test sanity check
// Kill the cached secondary
-replTest.stop(secNodeIdx, 15, { auth: { user: 'user', pwd: 'user' }});
+replTest.stop(secNodeIdx, 15, {auth: {user: 'user', pwd: 'user'}});
assert(testDB.logout().ok);
@@ -129,4 +129,3 @@ queryToSecShouldFail();
queryToPriShouldFail();
replTest.stopSet();
-
diff --git a/jstests/sharding/auth_slaveok_routing.js b/jstests/sharding/auth_slaveok_routing.js
index 599aed242b5..a01314fe405 100644
--- a/jstests/sharding/auth_slaveok_routing.js
+++ b/jstests/sharding/auth_slaveok_routing.js
@@ -18,25 +18,27 @@
*
* @return {boolean} true if query was routed to a secondary node.
*/
-function doesRouteToSec( coll, query ) {
- var explain = coll.find( query ).explain();
+function doesRouteToSec(coll, query) {
+ var explain = coll.find(query).explain();
assert.eq("SINGLE_SHARD", explain.queryPlanner.winningPlan.stage);
var serverInfo = explain.queryPlanner.winningPlan.shards[0].serverInfo;
- var conn = new Mongo( serverInfo.host + ":" + serverInfo.port.toString());
- var cmdRes = conn.getDB( 'admin' ).runCommand({ isMaster: 1 });
+ var conn = new Mongo(serverInfo.host + ":" + serverInfo.port.toString());
+ var cmdRes = conn.getDB('admin').runCommand({isMaster: 1});
jsTest.log('isMaster: ' + tojson(cmdRes));
return cmdRes.secondary;
}
-var rsOpts = { oplogSize: 50 };
-var st = new ShardingTest({ keyFile: 'jstests/libs/key1', shards: 1,
- rs: rsOpts, other: { nopreallocj: 1 }});
+var rsOpts = {
+ oplogSize: 50
+};
+var st = new ShardingTest(
+ {keyFile: 'jstests/libs/key1', shards: 1, rs: rsOpts, other: {nopreallocj: 1}});
var mongos = st.s;
var replTest = st.rs0;
-var testDB = mongos.getDB( 'AAAAA' );
+var testDB = mongos.getDB('AAAAA');
var coll = testDB.user;
var nodeCount = replTest.nodes.length;
@@ -45,69 +47,65 @@ var nodeCount = replTest.nodes.length;
* connections to access the server from localhost connections if there
* is no admin user.
*/
-var adminDB = mongos.getDB( 'admin' );
+var adminDB = mongos.getDB('admin');
adminDB.createUser({user: 'user', pwd: 'password', roles: jsTest.adminUserRoles});
-adminDB.auth( 'user', 'password' );
-var priAdminDB = replTest.getPrimary().getDB( 'admin' );
-priAdminDB.createUser({user: 'user', pwd: 'password', roles: jsTest.adminUserRoles},
+adminDB.auth('user', 'password');
+var priAdminDB = replTest.getPrimary().getDB('admin');
+priAdminDB.createUser({user: 'user', pwd: 'password', roles: jsTest.adminUserRoles},
{w: 3, wtimeout: 30000});
coll.drop();
-coll.setSlaveOk( true );
+coll.setSlaveOk(true);
/* Secondaries should be up here, but they can still be in RECOVERY
* state, which will make the ReplicaSetMonitor mark them as
* ok = false and not eligible for slaveOk queries.
*/
-ReplSetTest.awaitRSClientHosts( mongos, replTest.getSecondaries(),
- { ok : true, secondary : true });
+ReplSetTest.awaitRSClientHosts(mongos, replTest.getSecondaries(), {ok: true, secondary: true});
var bulk = coll.initializeUnorderedBulkOp();
-for ( var x = 0; x < 20; x++ ) {
- bulk.insert({ v: x, k: 10 });
+for (var x = 0; x < 20; x++) {
+ bulk.insert({v: x, k: 10});
}
-assert.writeOK(bulk.execute({ w: nodeCount }));
+assert.writeOK(bulk.execute({w: nodeCount}));
/* Although mongos never caches query results, try to do a different query
* everytime just to be sure.
*/
var vToFind = 0;
-jsTest.log( 'First query to SEC' );
-assert( doesRouteToSec( coll, { v: vToFind++ }));
+jsTest.log('First query to SEC');
+assert(doesRouteToSec(coll, {v: vToFind++}));
var SIG_TERM = 15;
-replTest.stopSet( SIG_TERM, true, { auth: { user: 'user', pwd: 'password' }});
+replTest.stopSet(SIG_TERM, true, {auth: {user: 'user', pwd: 'password'}});
-for ( var n = 0; n < nodeCount; n++ ) {
- replTest.restart( n, rsOpts );
+for (var n = 0; n < nodeCount; n++) {
+ replTest.restart(n, rsOpts);
}
replTest.awaitSecondaryNodes();
-coll.setSlaveOk( true );
+coll.setSlaveOk(true);
/* replSetMonitor does not refresh the nodes information when getting secondaries.
* A node that is previously labeled as secondary can now be a primary, so we
* wait for the replSetMonitorWatcher thread to refresh the nodes information.
*/
-ReplSetTest.awaitRSClientHosts( mongos, replTest.getSecondaries(),
- { ok : true, secondary : true });
+ReplSetTest.awaitRSClientHosts(mongos, replTest.getSecondaries(), {ok: true, secondary: true});
//
-// We also need to wait for the primary, it's possible that the mongos may think a node is a
+// We also need to wait for the primary, it's possible that the mongos may think a node is a
// secondary but it actually changed to a primary before we send our final query.
//
-ReplSetTest.awaitRSClientHosts( mongos, replTest.getPrimary(),
- { ok : true, ismaster : true });
+ReplSetTest.awaitRSClientHosts(mongos, replTest.getPrimary(), {ok: true, ismaster: true});
// Recheck if we can still query secondaries after refreshing connections.
-jsTest.log( 'Final query to SEC' );
-assert( doesRouteToSec( coll, { v: vToFind++ }));
+jsTest.log('Final query to SEC');
+assert(doesRouteToSec(coll, {v: vToFind++}));
// Cleanup auth so Windows will be able to shutdown gracefully
-priAdminDB = replTest.getPrimary().getDB( 'admin' );
-priAdminDB.auth( 'user', 'password' );
-priAdminDB.dropUser( 'user' );
+priAdminDB = replTest.getPrimary().getDB('admin');
+priAdminDB.auth('user', 'password');
+priAdminDB.dropUser('user');
st.stop();
-
diff --git a/jstests/sharding/authmr.js b/jstests/sharding/authmr.js
index c827b4948b3..6484c729474 100644
--- a/jstests/sharding/authmr.js
+++ b/jstests/sharding/authmr.js
@@ -3,118 +3,117 @@
(function() {
-//
-// User document declarations. All users in this test are added to the admin database.
-//
-
-var adminUser = {
- user: "admin",
- pwd: "a",
- roles: [ "readWriteAnyDatabase",
- "dbAdminAnyDatabase",
- "userAdminAnyDatabase",
- "clusterAdmin" ]
-};
-
-var test1User = {
- user: "test",
- pwd: "a",
- roles: [{role: 'readWrite', db: 'test1', hasRole: true, canDelegate: false}]
-};
-
-function assertRemove(collection, pattern) {
- assert.writeOK(collection.remove(pattern));
-}
-
-function assertInsert(collection, obj) {
- assert.writeOK(collection.insert(obj));
-}
-
-var cluster = new ShardingTest({ name: "authmr",
- shards: 1,
- mongos: 1,
- other: {
- extraOptions: { keyFile: "jstests/libs/key1" }
- }
- });
-
-// Set up the test data.
-(function() {
- var adminDB = cluster.getDB('admin');
- var test1DB = adminDB.getSiblingDB('test1');
- var test2DB = adminDB.getSiblingDB('test2');
- var ex;
- try {
- adminDB.createUser(adminUser);
- assert(adminDB.auth(adminUser.user, adminUser.pwd));
-
- adminDB.dropUser(test1User.user);
- adminDB.createUser(test1User);
-
- assertInsert(test1DB.foo, { a: 1 });
- assertInsert(test1DB.foo, { a: 2 });
- assertInsert(test1DB.foo, { a: 3 });
- assertInsert(test1DB.foo, { a: 4 });
- assertInsert(test2DB.foo, { x: 1 });
- }
- finally {
- adminDB.logout();
- }
-}());
-
-assert.throws(function() {
- var adminDB = cluster.getDB('admin');
- var test1DB;
- var test2DB;
- assert(adminDB.auth(test1User.user, test1User.pwd));
- try {
- test1DB = adminDB.getSiblingDB("test1");
- test2DB = adminDB.getSiblingDB("test2");
-
- // Sanity check. test1User can count (read) test1, but not test2.
- assert.eq(test1DB.foo.count(), 4);
- assert.throws(test2DB.foo.count);
-
- test1DB.foo.mapReduce(
- function () {
- emit(0, this.a);
- var t2 = new Mongo().getDB("test2");
- t2.ad.insert(this);
- },
- function (k, vs) {
- var t2 = new Mongo().getDB("test2");
- t2.reductio.insert(this);
-
- return Array.sum(vs);
- },
- { out: "bar",
- finalize: function (k, v) {
- for (k in this) {
- if (this.hasOwnProperty(k))
- print(k + "=" + v);
- }
- var t2 = new Mongo().getDB("test2");
- t2.absurdum.insert({ key: k, value: v });
- }
- });
+ //
+ // User document declarations. All users in this test are added to the admin database.
+ //
+
+ var adminUser = {
+ user: "admin",
+ pwd: "a",
+ roles: [
+ "readWriteAnyDatabase",
+ "dbAdminAnyDatabase",
+ "userAdminAnyDatabase",
+ "clusterAdmin"
+ ]
+ };
+
+ var test1User = {
+ user: "test",
+ pwd: "a",
+ roles: [{role: 'readWrite', db: 'test1', hasRole: true, canDelegate: false}]
+ };
+
+ function assertRemove(collection, pattern) {
+ assert.writeOK(collection.remove(pattern));
}
- finally {
- adminDB.logout();
- }
-});
-(function() {
- var adminDB = cluster.getDB('admin');
- assert(adminDB.auth(adminUser.user, adminUser.pwd));
- try {
- var test2DB = cluster.getDB('test2');
- assert.eq(test2DB.reductio.count(), 0, "reductio");
- assert.eq(test2DB.ad.count(), 0, "ad");
- assert.eq(test2DB.absurdum.count(), 0, "absurdum");
- }
- finally {
- adminDB.logout();
+ function assertInsert(collection, obj) {
+ assert.writeOK(collection.insert(obj));
}
-}());
+
+ var cluster = new ShardingTest({
+ name: "authmr",
+ shards: 1,
+ mongos: 1,
+ other: {extraOptions: {keyFile: "jstests/libs/key1"}}
+ });
+
+ // Set up the test data.
+ (function() {
+ var adminDB = cluster.getDB('admin');
+ var test1DB = adminDB.getSiblingDB('test1');
+ var test2DB = adminDB.getSiblingDB('test2');
+ var ex;
+ try {
+ adminDB.createUser(adminUser);
+ assert(adminDB.auth(adminUser.user, adminUser.pwd));
+
+ adminDB.dropUser(test1User.user);
+ adminDB.createUser(test1User);
+
+ assertInsert(test1DB.foo, {a: 1});
+ assertInsert(test1DB.foo, {a: 2});
+ assertInsert(test1DB.foo, {a: 3});
+ assertInsert(test1DB.foo, {a: 4});
+ assertInsert(test2DB.foo, {x: 1});
+ } finally {
+ adminDB.logout();
+ }
+ }());
+
+ assert.throws(function() {
+ var adminDB = cluster.getDB('admin');
+ var test1DB;
+ var test2DB;
+ assert(adminDB.auth(test1User.user, test1User.pwd));
+ try {
+ test1DB = adminDB.getSiblingDB("test1");
+ test2DB = adminDB.getSiblingDB("test2");
+
+ // Sanity check. test1User can count (read) test1, but not test2.
+ assert.eq(test1DB.foo.count(), 4);
+ assert.throws(test2DB.foo.count);
+
+ test1DB.foo.mapReduce(
+ function() {
+ emit(0, this.a);
+ var t2 = new Mongo().getDB("test2");
+ t2.ad.insert(this);
+ },
+ function(k, vs) {
+ var t2 = new Mongo().getDB("test2");
+ t2.reductio.insert(this);
+
+ return Array.sum(vs);
+ },
+ {
+ out: "bar",
+ finalize: function(k, v) {
+ for (k in this) {
+ if (this.hasOwnProperty(k))
+ print(k + "=" + v);
+ }
+ var t2 = new Mongo().getDB("test2");
+ t2.absurdum.insert({key: k, value: v});
+ }
+ });
+ } finally {
+ adminDB.logout();
+ }
+ });
+
+ (function() {
+ var adminDB = cluster.getDB('admin');
+ assert(adminDB.auth(adminUser.user, adminUser.pwd));
+ try {
+ var test2DB = cluster.getDB('test2');
+ assert.eq(test2DB.reductio.count(), 0, "reductio");
+ assert.eq(test2DB.ad.count(), 0, "ad");
+ assert.eq(test2DB.absurdum.count(), 0, "absurdum");
+ } finally {
+ adminDB.logout();
+ }
+ }());
})();
diff --git a/jstests/sharding/authwhere.js b/jstests/sharding/authwhere.js
index 3d3d0d8a605..df27078784b 100644
--- a/jstests/sharding/authwhere.js
+++ b/jstests/sharding/authwhere.js
@@ -3,85 +3,84 @@
(function() {
-//
-// User document declarations. All users in this test are added to the admin database.
-//
+ //
+ // User document declarations. All users in this test are added to the admin database.
+ //
-var adminUser = {
- user: "admin",
- pwd: "a",
- roles: [ "readWriteAnyDatabase",
- "dbAdminAnyDatabase",
- "userAdminAnyDatabase",
- "clusterAdmin" ]
-};
+ var adminUser = {
+ user: "admin",
+ pwd: "a",
+ roles: [
+ "readWriteAnyDatabase",
+ "dbAdminAnyDatabase",
+ "userAdminAnyDatabase",
+ "clusterAdmin"
+ ]
+ };
-var test1Reader = {
- user: "test",
- pwd: "a",
- roles: [{role: 'read', db: 'test1', hasRole:true, canDelegate: false}]
-};
+ var test1Reader = {
+ user: "test",
+ pwd: "a",
+ roles: [{role: 'read', db: 'test1', hasRole: true, canDelegate: false}]
+ };
-function assertRemove(collection, pattern) {
- assert.writeOK(collection.remove(pattern));
-}
+ function assertRemove(collection, pattern) {
+ assert.writeOK(collection.remove(pattern));
+ }
-function assertInsert(collection, obj) {
- assert.writeOK(collection.insert(obj));
-}
+ function assertInsert(collection, obj) {
+ assert.writeOK(collection.insert(obj));
+ }
-var cluster = new ShardingTest({ name: "authwhere",
- shards: 1,
- mongos: 1,
- other: {
- extraOptions: { keyFile: "jstests/libs/key1" }
- }
- });
+ var cluster = new ShardingTest({
+ name: "authwhere",
+ shards: 1,
+ mongos: 1,
+ other: {extraOptions: {keyFile: "jstests/libs/key1"}}
+ });
-// Set up the test data.
-(function() {
- var adminDB = cluster.getDB('admin');
- var test1DB = adminDB.getSiblingDB('test1');
- var test2DB = adminDB.getSiblingDB('test2');
- var ex;
- try {
- adminDB.createUser(adminUser);
- assert(adminDB.auth(adminUser.user, adminUser.pwd));
+ // Set up the test data.
+ (function() {
+ var adminDB = cluster.getDB('admin');
+ var test1DB = adminDB.getSiblingDB('test1');
+ var test2DB = adminDB.getSiblingDB('test2');
+ var ex;
+ try {
+ adminDB.createUser(adminUser);
+ assert(adminDB.auth(adminUser.user, adminUser.pwd));
- adminDB.dropUser(test1Reader.user);
- adminDB.createUser(test1Reader);
+ adminDB.dropUser(test1Reader.user);
+ adminDB.createUser(test1Reader);
- assertInsert(test1DB.foo, { a: 1 });
- assertInsert(test2DB.foo, { x: 1 });
- }
- finally {
- adminDB.logout();
- }
-}());
+ assertInsert(test1DB.foo, {a: 1});
+ assertInsert(test2DB.foo, {x: 1});
+ } finally {
+ adminDB.logout();
+ }
+ }());
-(function() {
- var adminDB = cluster.getDB('admin');
- var test1DB;
- var test2DB;
- assert(adminDB.auth(test1Reader.user, test1Reader.pwd));
- try {
- test1DB = adminDB.getSiblingDB("test1");
- test2DB = adminDB.getSiblingDB("test2");
+ (function() {
+ var adminDB = cluster.getDB('admin');
+ var test1DB;
+ var test2DB;
+ assert(adminDB.auth(test1Reader.user, test1Reader.pwd));
+ try {
+ test1DB = adminDB.getSiblingDB("test1");
+ test2DB = adminDB.getSiblingDB("test2");
- // Sanity check. test1Reader can count (read) test1, but not test2.
- assert.eq(test1DB.foo.count(), 1);
- assert.throws(test2DB.foo.count);
+ // Sanity check. test1Reader can count (read) test1, but not test2.
+ assert.eq(test1DB.foo.count(), 1);
+ assert.throws(test2DB.foo.count);
- // Cannot examine second database from a where clause.
- assert.throws(test1DB.foo.count, ["db.getSiblingDB('test2').foo.count() == 1"]);
+ // Cannot examine second database from a where clause.
+ assert.throws(test1DB.foo.count, ["db.getSiblingDB('test2').foo.count() == 1"]);
- // Cannot write test1 via tricky where clause.
- assert.throws(test1DB.foo.count, ["db.foo.insert({b: 1})"]);
- assert.eq(test1DB.foo.count(), 1);
- }
- finally {
- adminDB.logout();
- }
-}());
+ // Cannot write test1 via tricky where clause.
+ assert.throws(test1DB.foo.count, ["db.foo.insert({b: 1})"]);
+ assert.eq(test1DB.foo.count(), 1);
+ } finally {
+ adminDB.logout();
+ }
+ }());
})();
diff --git a/jstests/sharding/auto1.js b/jstests/sharding/auto1.js
index 214e5aae1ed..3fbcef78e82 100644
--- a/jstests/sharding/auto1.js
+++ b/jstests/sharding/auto1.js
@@ -1,80 +1,79 @@
(function() {
-var s = new ShardingTest({ name: "auto1",
- shards: 2,
- mongos: 1,
- other: { enableBalancer : 1 } });
+ var s = new ShardingTest({name: "auto1", shards: 2, mongos: 1, other: {enableBalancer: 1}});
-s.adminCommand( { enablesharding : "test" } );
-s.ensurePrimaryShard('test', 'shard0001');
-s.adminCommand( { shardcollection : "test.foo" , key : { num : 1 } } );
+ s.adminCommand({enablesharding: "test"});
+ s.ensurePrimaryShard('test', 'shard0001');
+ s.adminCommand({shardcollection: "test.foo", key: {num: 1}});
-bigString = "";
-while ( bigString.length < 1024 * 50 )
- bigString += "asocsancdnsjfnsdnfsjdhfasdfasdfasdfnsadofnsadlkfnsaldknfsad";
+ bigString = "";
+ while (bigString.length < 1024 * 50)
+ bigString += "asocsancdnsjfnsdnfsjdhfasdfasdfasdfnsadofnsadlkfnsaldknfsad";
-db = s.getDB( "test" );
-coll = db.foo;
+ db = s.getDB("test");
+ coll = db.foo;
-var i=0;
+ var i = 0;
-var bulk = coll.initializeUnorderedBulkOp();
-for ( ; i<100; i++ ){
- bulk.insert( { num : i , s : bigString } );
-}
-assert.writeOK( bulk.execute() );
+ var bulk = coll.initializeUnorderedBulkOp();
+ for (; i < 100; i++) {
+ bulk.insert({num: i, s: bigString});
+ }
+ assert.writeOK(bulk.execute());
-primary = s.getPrimaryShard( "test" ).getDB( "test" );
+ primary = s.getPrimaryShard("test").getDB("test");
-counts = [];
+ counts = [];
-s.printChunks();
-counts.push( s.config.chunks.count() );
-assert.eq(100, db.foo.find().itcount());
+ s.printChunks();
+ counts.push(s.config.chunks.count());
+ assert.eq(100, db.foo.find().itcount());
-print( "datasize: " + tojson( s.getPrimaryShard( "test" ).getDB( "admin" )
- .runCommand( { datasize : "test.foo" } ) ) );
+ print("datasize: " +
+ tojson(s.getPrimaryShard("test").getDB("admin").runCommand({datasize: "test.foo"})));
-bulk = coll.initializeUnorderedBulkOp();
-for ( ; i<200; i++ ){
- bulk.insert( { num : i , s : bigString } );
-}
-assert.writeOK( bulk.execute() );
+ bulk = coll.initializeUnorderedBulkOp();
+ for (; i < 200; i++) {
+ bulk.insert({num: i, s: bigString});
+ }
+ assert.writeOK(bulk.execute());
-s.printChunks();
-s.printChangeLog();
-counts.push( s.config.chunks.count() );
+ s.printChunks();
+ s.printChangeLog();
+ counts.push(s.config.chunks.count());
-bulk = coll.initializeUnorderedBulkOp();
-for ( ; i<400; i++ ){
- bulk.insert( { num : i , s : bigString } );
-}
-assert.writeOK( bulk.execute() );
+ bulk = coll.initializeUnorderedBulkOp();
+ for (; i < 400; i++) {
+ bulk.insert({num: i, s: bigString});
+ }
+ assert.writeOK(bulk.execute());
-s.printChunks();
-s.printChangeLog();
-counts.push( s.config.chunks.count() );
+ s.printChunks();
+ s.printChangeLog();
+ counts.push(s.config.chunks.count());
-bulk = coll.initializeUnorderedBulkOp();
-for ( ; i<700; i++ ){
- bulk.insert( { num : i , s : bigString } );
-}
-assert.writeOK( bulk.execute() );
+ bulk = coll.initializeUnorderedBulkOp();
+ for (; i < 700; i++) {
+ bulk.insert({num: i, s: bigString});
+ }
+ assert.writeOK(bulk.execute());
-s.printChunks();
-s.printChangeLog();
-counts.push( s.config.chunks.count() );
+ s.printChunks();
+ s.printChangeLog();
+ counts.push(s.config.chunks.count());
-assert( counts[counts.length-1] > counts[0] , "counts 1 : " + tojson( counts ) );
-sorted = counts.slice(0);
-// Sort doesn't sort numbers correctly by default, resulting in fail
-sorted.sort( function(a, b){ return a - b; } );
-assert.eq( counts , sorted , "counts 2 : " + tojson( counts ) );
+ assert(counts[counts.length - 1] > counts[0], "counts 1 : " + tojson(counts));
+ sorted = counts.slice(0);
+ // Sort doesn't sort numbers correctly by default, resulting in fail
+ sorted.sort(function(a, b) {
+ return a - b;
+ });
+ assert.eq(counts, sorted, "counts 2 : " + tojson(counts));
-print( counts );
+ print(counts);
-printjson( db.stats() );
+ printjson(db.stats());
-s.stop();
+ s.stop();
})();
diff --git a/jstests/sharding/auto2.js b/jstests/sharding/auto2.js
index 81f0c1f17ea..3d21559f8d6 100644
--- a/jstests/sharding/auto2.js
+++ b/jstests/sharding/auto2.js
@@ -1,151 +1,152 @@
(function() {
-var s = new ShardingTest({ name: "auto2",
- shards: 2,
- mongos: 2 });
-
-s.adminCommand( { enablesharding : "test" } );
-s.ensurePrimaryShard('test', 'shard0001');
-s.adminCommand( { shardcollection : "test.foo" , key : { num : 1 } } );
-
-var bigString = "";
-while (bigString.length < 1024 * 50) {
- bigString += "asocsancdnsjfnsdnfsjdhfasdfasdfasdfnsadofnsadlkfnsaldknfsad";
-}
-
-var db = s.getDB("test" );
-var coll = db.foo;
-
-var i = 0;
-for (var j = 0; j < 30; j++) {
- print("j:" + j + " : " +
- Date.timeFunc(function() {
- var bulk = coll.initializeUnorderedBulkOp();
- for (var k = 0; k < 100; k++) {
- bulk.insert({ num : i, s : bigString });
- i++;
- }
- assert.writeOK(bulk.execute());
- }));
-}
+ var s = new ShardingTest({name: "auto2", shards: 2, mongos: 2});
+
+ s.adminCommand({enablesharding: "test"});
+ s.ensurePrimaryShard('test', 'shard0001');
+ s.adminCommand({shardcollection: "test.foo", key: {num: 1}});
-s.startBalancer();
+ var bigString = "";
+ while (bigString.length < 1024 * 50) {
+ bigString += "asocsancdnsjfnsdnfsjdhfasdfasdfasdfnsadofnsadlkfnsaldknfsad";
+ }
-assert.eq( i , j * 100 , "setup" );
+ var db = s.getDB("test");
+ var coll = db.foo;
+
+ var i = 0;
+ for (var j = 0; j < 30; j++) {
+ print("j:" + j + " : " +
+ Date.timeFunc(function() {
+ var bulk = coll.initializeUnorderedBulkOp();
+ for (var k = 0; k < 100; k++) {
+ bulk.insert({num: i, s: bigString});
+ i++;
+ }
+ assert.writeOK(bulk.execute());
+ }));
+ }
-// Until SERVER-9715 is fixed, the sync command must be run on a diff connection
-new Mongo(s.s.host).adminCommand("connpoolsync");
+ s.startBalancer();
-print("done inserting data" );
+ assert.eq(i, j * 100, "setup");
-print("datasize: " + tojson( s.getPrimaryShard("test" ).getDB("admin" )
- .runCommand( { datasize : "test.foo" } ) ) );
-s.printChunks();
+ // Until SERVER-9715 is fixed, the sync command must be run on a diff connection
+ new Mongo(s.s.host).adminCommand("connpoolsync");
-function doCountsGlobal(){
- counta = s._connections[0].getDB("test" ).foo.count();
- countb = s._connections[1].getDB("test" ).foo.count();
- return counta + countb;
-}
+ print("done inserting data");
-// Wait for the chunks to distribute
-assert.soon( function(){
- doCountsGlobal();
- print("Counts: " + counta + countb);
+ print("datasize: " +
+ tojson(s.getPrimaryShard("test").getDB("admin").runCommand({datasize: "test.foo"})));
+ s.printChunks();
- return counta > 0 && countb > 0;
-});
+ function doCountsGlobal() {
+ counta = s._connections[0].getDB("test").foo.count();
+ countb = s._connections[1].getDB("test").foo.count();
+ return counta + countb;
+ }
+ // Wait for the chunks to distribute
+ assert.soon(function() {
+ doCountsGlobal();
+ print("Counts: " + counta + countb);
-print("checkpoint B" );
+ return counta > 0 && countb > 0;
+ });
-var missing = [];
+ print("checkpoint B");
-for ( i=0; i<j*100; i++ ){
- var x = coll.findOne( { num : i } );
- if ( ! x ){
- missing.push( i );
- print("can't find: " + i );
- sleep( 5000 );
- x = coll.findOne( { num : i } );
- if ( ! x ){
- print("still can't find: " + i );
+ var missing = [];
- for ( var zzz=0; zzz<s._connections.length; zzz++ ){
- if ( s._connections[zzz].getDB("test" ).foo.findOne( { num : i } ) ){
- print("found on wrong server: " + s._connections[zzz] );
+ for (i = 0; i < j * 100; i++) {
+ var x = coll.findOne({num: i});
+ if (!x) {
+ missing.push(i);
+ print("can't find: " + i);
+ sleep(5000);
+ x = coll.findOne({num: i});
+ if (!x) {
+ print("still can't find: " + i);
+
+ for (var zzz = 0; zzz < s._connections.length; zzz++) {
+ if (s._connections[zzz].getDB("test").foo.findOne({num: i})) {
+ print("found on wrong server: " + s._connections[zzz]);
+ }
}
}
-
}
}
-}
-
-s.printChangeLog();
-
-print("missing: " + tojson( missing ) );
-assert.soon( function(z){ return doCountsGlobal() == j * 100; } , "from each a:" + counta + " b:" + countb + " i:" + i );
-print("checkpoint B.a" );
-s.printChunks();
-assert.eq( j * 100 , coll.find().limit(100000000).itcount() , "itcount A" );
-assert.eq( j * 100 , counta + countb , "from each 2 a:" + counta + " b:" + countb + " i:" + i );
-assert( missing.length == 0 , "missing : " + tojson( missing ) );
-
-print("checkpoint C" );
-
-assert( Array.unique( s.config.chunks.find().toArray().map( function(z){ return z.shard; } ) ).length == 2 , "should be using both servers" );
-
-for ( i=0; i<100; i++ ){
- cursor = coll.find().batchSize(5);
- cursor.next();
- cursor = null;
- gc();
-}
-
-print("checkpoint D");
-
-// test not-sharded cursors
-db = s.getDB("test2" );
-t = db.foobar;
-for ( i =0; i<100; i++ )
- t.save( { _id : i } );
-for ( i=0; i<100; i++ ){
- t.find().batchSize( 2 ).next();
- assert.lt(0 , db.serverStatus().metrics.cursor.open.total, "cursor1");
- gc();
-}
-
-for ( i=0; i<100; i++ ){
- gc();
-}
-assert.eq(0, db.serverStatus().metrics.cursor.open.total, "cursor2");
-
-// Stop the balancer, otherwise it may grab some connections from the pool for itself
-s.stopBalancer();
-
-print("checkpoint E");
-
-assert( t.findOne() , "check close 0" );
-
-for (i = 0; i < 20; i++) {
- var conn = new Mongo( db.getMongo().host );
- temp2 = conn.getDB("test2" ).foobar;
- assert.eq( conn._fullNameSpace , t._fullNameSpace , "check close 1" );
- assert( temp2.findOne() , "check close 2" );
- conn = null;
- gc();
-}
-
-print("checkpoint F");
-
-assert.throws(function() {
- s.getDB("test" ).foo.find().sort({ s : 1 }).forEach(function(x) {
- printjsononeline(x.substring(0, x.length > 30 ? 30 : x.length));
+
+ s.printChangeLog();
+
+ print("missing: " + tojson(missing));
+ assert.soon(function(z) {
+ return doCountsGlobal() == j * 100;
+ }, "from each a:" + counta + " b:" + countb + " i:" + i);
+ print("checkpoint B.a");
+ s.printChunks();
+ assert.eq(j * 100, coll.find().limit(100000000).itcount(), "itcount A");
+ assert.eq(j * 100, counta + countb, "from each 2 a:" + counta + " b:" + countb + " i:" + i);
+ assert(missing.length == 0, "missing : " + tojson(missing));
+
+ print("checkpoint C");
+
+ assert(Array.unique(s.config.chunks.find().toArray().map(function(z) {
+ return z.shard;
+ })).length == 2,
+ "should be using both servers");
+
+ for (i = 0; i < 100; i++) {
+ cursor = coll.find().batchSize(5);
+ cursor.next();
+ cursor = null;
+ gc();
+ }
+
+ print("checkpoint D");
+
+ // test not-sharded cursors
+ db = s.getDB("test2");
+ t = db.foobar;
+ for (i = 0; i < 100; i++)
+ t.save({_id: i});
+ for (i = 0; i < 100; i++) {
+ t.find().batchSize(2).next();
+ assert.lt(0, db.serverStatus().metrics.cursor.open.total, "cursor1");
+ gc();
+ }
+
+ for (i = 0; i < 100; i++) {
+ gc();
+ }
+ assert.eq(0, db.serverStatus().metrics.cursor.open.total, "cursor2");
+
+ // Stop the balancer, otherwise it may grab some connections from the pool for itself
+ s.stopBalancer();
+
+ print("checkpoint E");
+
+ assert(t.findOne(), "check close 0");
+
+ for (i = 0; i < 20; i++) {
+ var conn = new Mongo(db.getMongo().host);
+ temp2 = conn.getDB("test2").foobar;
+ assert.eq(conn._fullNameSpace, t._fullNameSpace, "check close 1");
+ assert(temp2.findOne(), "check close 2");
+ conn = null;
+ gc();
+ }
+
+ print("checkpoint F");
+
+ assert.throws(function() {
+ s.getDB("test").foo.find().sort({s: 1}).forEach(function(x) {
+ printjsononeline(x.substring(0, x.length > 30 ? 30 : x.length));
+ });
});
-});
-print("checkpoint G");
+ print("checkpoint G");
-s.stop();
+ s.stop();
})();
diff --git a/jstests/sharding/auto_rebalance.js b/jstests/sharding/auto_rebalance.js
index f994404ccc5..826e979d68b 100644
--- a/jstests/sharding/auto_rebalance.js
+++ b/jstests/sharding/auto_rebalance.js
@@ -2,56 +2,46 @@
// shards
(function() {
-'use strict';
-
-var st = new ShardingTest({ name: 'auto_rebalance_rs',
- mongos: 1,
- shards: 2,
- chunksize: 1,
- rs: {
- nodes: 3
- }
- });
-
-assert.writeOK(st.getDB( "config" ).settings.update(
- { _id: "balancer" },
- { $set: { "_secondaryThrottle" : false } },
- { upsert: true }));
-
-st.getDB("admin").runCommand({enableSharding : "TestDB_auto_rebalance_rs"});
-st.getDB("admin").runCommand({shardCollection : "TestDB_auto_rebalance_rs.foo", key : {x : 1}});
-
-var dbTest = st.getDB("TestDB_auto_rebalance_rs");
-
-var num = 100000;
-var bulk = dbTest.foo.initializeUnorderedBulkOp();
-for (var i = 0; i < num; i++) {
- bulk.insert({ _id: i, x: i, abc: "defg", date: new Date(), str: "all the talk on the market" });
-}
-assert.writeOK(bulk.execute());
-
-// Wait for the rebalancing to kick in
-st.startBalancer(60000);
-
-assert.soon(function() {
- var s1Chunks = st.getDB("config").chunks.count({shard : "auto_rebalance_rs-rs0"});
- var s2Chunks = st.getDB("config").chunks.count({shard : "auto_rebalance_rs-rs1"});
- var total = st.getDB("config").chunks.count({ns : "TestDB_auto_rebalance_rs.foo"});
-
- print("chunks: " + s1Chunks + " " + s2Chunks + " " + total);
-
- return s1Chunks > 0 && s2Chunks > 0 && (s1Chunks + s2Chunks == total);
- },
- "Chunks failed to balance",
- 60000,
- 5000);
-
-// Ensure the range deleter quiesces
-st.rs0.awaitReplication(120000);
-st.rs1.awaitReplication(120000);
-
-// TODO: mongod only exits with MongoRunner.EXIT_ABRUPT in sharding_legacy_op_query_WT
-// this should be fixed by SERVER-22176
-st.stop({ allowedExitCodes: [ MongoRunner.EXIT_ABRUPT ] });
+ 'use strict';
+
+ var st = new ShardingTest(
+ {name: 'auto_rebalance_rs', mongos: 1, shards: 2, chunksize: 1, rs: {nodes: 3}});
+
+ assert.writeOK(st.getDB("config").settings.update(
+ {_id: "balancer"}, {$set: {"_secondaryThrottle": false}}, {upsert: true}));
+
+ st.getDB("admin").runCommand({enableSharding: "TestDB_auto_rebalance_rs"});
+ st.getDB("admin").runCommand({shardCollection: "TestDB_auto_rebalance_rs.foo", key: {x: 1}});
+
+ var dbTest = st.getDB("TestDB_auto_rebalance_rs");
+
+ var num = 100000;
+ var bulk = dbTest.foo.initializeUnorderedBulkOp();
+ for (var i = 0; i < num; i++) {
+ bulk.insert(
+ {_id: i, x: i, abc: "defg", date: new Date(), str: "all the talk on the market"});
+ }
+ assert.writeOK(bulk.execute());
+
+ // Wait for the rebalancing to kick in
+ st.startBalancer(60000);
+
+ assert.soon(function() {
+ var s1Chunks = st.getDB("config").chunks.count({shard: "auto_rebalance_rs-rs0"});
+ var s2Chunks = st.getDB("config").chunks.count({shard: "auto_rebalance_rs-rs1"});
+ var total = st.getDB("config").chunks.count({ns: "TestDB_auto_rebalance_rs.foo"});
+
+ print("chunks: " + s1Chunks + " " + s2Chunks + " " + total);
+
+ return s1Chunks > 0 && s2Chunks > 0 && (s1Chunks + s2Chunks == total);
+ }, "Chunks failed to balance", 60000, 5000);
+
+ // Ensure the range deleter quiesces
+ st.rs0.awaitReplication(120000);
+ st.rs1.awaitReplication(120000);
+
+ // TODO: mongod only exits with MongoRunner.EXIT_ABRUPT in sharding_legacy_op_query_WT
+ // this should be fixed by SERVER-22176
+ st.stop({allowedExitCodes: [MongoRunner.EXIT_ABRUPT]});
})();
diff --git a/jstests/sharding/autodiscover_config_rs_from_secondary.js b/jstests/sharding/autodiscover_config_rs_from_secondary.js
index d0c4e84d8bd..5d7aa1f7dca 100644
--- a/jstests/sharding/autodiscover_config_rs_from_secondary.js
+++ b/jstests/sharding/autodiscover_config_rs_from_secondary.js
@@ -3,44 +3,44 @@
(function() {
-'use strict';
-
-var rst = new ReplSetTest({name : "configRS",
- nodes: 3,
- nodeOptions: {configsvr: "", storageEngine: "wiredTiger"}});
-rst.startSet();
-var conf = rst.getReplSetConfig();
-conf.members[1].priority = 0;
-conf.members[2].priority = 0;
-rst.initiate(conf);
-
-var seedList = rst.name + "/" + rst.nodes[1].host; // node 1 is guaranteed to not be primary
-{
- // Ensure that mongos can start up when given the CSRS secondary, discover the primary, and
- // perform writes to the config servers.
+ 'use strict';
+
+ var rst = new ReplSetTest(
+ {name: "configRS", nodes: 3, nodeOptions: {configsvr: "", storageEngine: "wiredTiger"}});
+ rst.startSet();
+ var conf = rst.getReplSetConfig();
+ conf.members[1].priority = 0;
+ conf.members[2].priority = 0;
+ rst.initiate(conf);
+
+ var seedList = rst.name + "/" + rst.nodes[1].host; // node 1 is guaranteed to not be primary
+ {
+ // Ensure that mongos can start up when given the CSRS secondary, discover the primary, and
+ // perform writes to the config servers.
+ var mongos = MongoRunner.runMongos({configdb: seedList});
+ var admin = mongos.getDB('admin');
+ assert.writeOK(admin.foo.insert({a: 1}));
+ assert.eq(1, admin.foo.findOne().a);
+ MongoRunner.stopMongos(mongos);
+ }
+
+ // Wait for replication to all config server replica set members to ensure that mongos
+ // doesn't read from a stale config server when trying to verify if the initial cluster metadata
+ // has been properly written.
+ rst.awaitReplication();
+ // Now take down the one electable node
+ rst.stop(0);
+ rst.awaitNoPrimary();
+
+ // Start a mongos when there is no primary
var mongos = MongoRunner.runMongos({configdb: seedList});
+ // Take down the one node the mongos knew about to ensure that it autodiscovered the one
+ // remaining
+ // config server
+ rst.stop(1);
+
var admin = mongos.getDB('admin');
- assert.writeOK(admin.foo.insert({a:1}));
+ mongos.setSlaveOk(true);
assert.eq(1, admin.foo.findOne().a);
- MongoRunner.stopMongos(mongos);
-}
-
-// Wait for replication to all config server replica set members to ensure that mongos
-// doesn't read from a stale config server when trying to verify if the initial cluster metadata
-// has been properly written.
-rst.awaitReplication();
-// Now take down the one electable node
-rst.stop(0);
-rst.awaitNoPrimary();
-
-// Start a mongos when there is no primary
-var mongos = MongoRunner.runMongos({configdb: seedList});
-// Take down the one node the mongos knew about to ensure that it autodiscovered the one remaining
-// config server
-rst.stop(1);
-
-var admin = mongos.getDB('admin');
-mongos.setSlaveOk(true);
-assert.eq(1, admin.foo.findOne().a);
})();
diff --git a/jstests/sharding/autosplit_heuristics.js b/jstests/sharding/autosplit_heuristics.js
index cb5e1260cb9..c4d415ce0de 100644
--- a/jstests/sharding/autosplit_heuristics.js
+++ b/jstests/sharding/autosplit_heuristics.js
@@ -3,83 +3,81 @@
// works as expected even after splitting.
//
-var st = new ShardingTest({ shards : 1,
- mongos : 1,
- other : { mongosOptions : { chunkSize : 1, verbose : 2 }}});
+var st =
+ new ShardingTest({shards: 1, mongos: 1, other: {mongosOptions: {chunkSize: 1, verbose: 2}}});
-// The balancer is by default stopped, thus it will NOT interfere unpredictably with the chunk
+// The balancer is by default stopped, thus it will NOT interfere unpredictably with the chunk
// moves/splits depending on the timing.
// Test is not valid for debug build, heuristics get all mangled by debug reload behavior
-var isDebugBuild = st.s0.getDB( "admin" ).serverBuildInfo().debug;
+var isDebugBuild = st.s0.getDB("admin").serverBuildInfo().debug;
-if ( !isDebugBuild ) {
+if (!isDebugBuild) {
+ var mongos = st.s0;
+ var config = mongos.getDB("config");
+ var admin = mongos.getDB("admin");
+ var coll = mongos.getCollection("foo.hashBar");
-var mongos = st.s0;
-var config = mongos.getDB("config");
-var admin = mongos.getDB("admin");
-var coll = mongos.getCollection("foo.hashBar");
+ printjson(admin.runCommand({enableSharding: coll.getDB() + ""}));
+ printjson(admin.runCommand({shardCollection: coll + "", key: {_id: 1}}));
-printjson(admin.runCommand({ enableSharding : coll.getDB() + "" }));
-printjson(admin.runCommand({ shardCollection : coll + "", key : { _id : 1 } }));
+ var numChunks = 10;
-var numChunks = 10;
+ // Split off the low and high chunks, to get non-special-case behavior
+ printjson(admin.runCommand({split: coll + "", middle: {_id: 0}}));
+ printjson(admin.runCommand({split: coll + "", middle: {_id: numChunks + 1}}));
-// Split off the low and high chunks, to get non-special-case behavior
-printjson( admin.runCommand({ split : coll + "", middle : { _id : 0 } }) );
-printjson( admin.runCommand({ split : coll + "", middle : { _id : numChunks + 1 } }) );
+ // Split all the other chunks, and an extra chunk
+ // We need the extra chunk to compensate for the fact that the chunk differ resets the highest
+ // chunk's (i.e. the last-split-chunk's) data count on reload.
+ for (var i = 1; i < numChunks + 1; i++) {
+ printjson(admin.runCommand({split: coll + "", middle: {_id: i}}));
+ }
-// Split all the other chunks, and an extra chunk
-// We need the extra chunk to compensate for the fact that the chunk differ resets the highest
-// chunk's (i.e. the last-split-chunk's) data count on reload.
-for (var i = 1; i < numChunks + 1; i++) {
- printjson( admin.runCommand({ split : coll + "", middle : { _id : i } }) );
-}
-
-jsTest.log("Setup collection...");
-st.printShardingStatus(true);
+ jsTest.log("Setup collection...");
+ st.printShardingStatus(true);
-var approxSize = Object.bsonsize({ _id : 0.0 });
+ var approxSize = Object.bsonsize({_id: 0.0});
-jsTest.log("Starting inserts of approx size: " + approxSize + "...");
+ jsTest.log("Starting inserts of approx size: " + approxSize + "...");
-var chunkSizeBytes = 1024 * 1024;
+ var chunkSizeBytes = 1024 * 1024;
-// We insert slightly more than the max number of docs per chunk, to test
-// if resetting the chunk size happens during reloads. If the size is
-// reset, we'd expect to split less, since the first split would then
-// disable further splits (statistically, since the decision is randomized).
-// We choose 1.4 since split attempts happen about once every 1/5 chunksize,
-// and we want to be sure we def get a split attempt at a full chunk.
-var insertsForSplit = Math.ceil((chunkSizeBytes * 1.4) / approxSize);
-var totalInserts = insertsForSplit * numChunks;
+ // We insert slightly more than the max number of docs per chunk, to test
+ // if resetting the chunk size happens during reloads. If the size is
+ // reset, we'd expect to split less, since the first split would then
+ // disable further splits (statistically, since the decision is randomized).
+ // We choose 1.4 since split attempts happen about once every 1/5 chunksize,
+ // and we want to be sure we def get a split attempt at a full chunk.
+ var insertsForSplit = Math.ceil((chunkSizeBytes * 1.4) / approxSize);
+ var totalInserts = insertsForSplit * numChunks;
-printjson({ chunkSizeBytes : chunkSizeBytes,
- insertsForSplit : insertsForSplit,
- totalInserts : totalInserts });
+ printjson({
+ chunkSizeBytes: chunkSizeBytes,
+ insertsForSplit: insertsForSplit,
+ totalInserts: totalInserts
+ });
-// Insert enough docs to trigger splits into all chunks
-var bulk = coll.initializeUnorderedBulkOp();
-for (var i = 0; i < totalInserts; i++) {
- bulk.insert({ _id : i % numChunks + (i / totalInserts) });
-}
-assert.writeOK(bulk.execute());
+ // Insert enough docs to trigger splits into all chunks
+ var bulk = coll.initializeUnorderedBulkOp();
+ for (var i = 0; i < totalInserts; i++) {
+ bulk.insert({_id: i % numChunks + (i / totalInserts)});
+ }
+ assert.writeOK(bulk.execute());
-jsTest.log("Inserts completed...");
+ jsTest.log("Inserts completed...");
-st.printShardingStatus(true);
-printjson(coll.stats());
+ st.printShardingStatus(true);
+ printjson(coll.stats());
-// Check that all chunks (except the two extreme chunks)
-// have been split at least once + 1 extra chunk as reload buffer
-assert.gte(config.chunks.count(), numChunks * 2 + 3);
+ // Check that all chunks (except the two extreme chunks)
+ // have been split at least once + 1 extra chunk as reload buffer
+ assert.gte(config.chunks.count(), numChunks * 2 + 3);
-jsTest.log("DONE!");
+ jsTest.log("DONE!");
-}
-else {
- jsTest.log( "Disabled test in debug builds." );
+} else {
+ jsTest.log("Disabled test in debug builds.");
}
st.stop();
-
diff --git a/jstests/sharding/balance_repl.js b/jstests/sharding/balance_repl.js
index 85d029fce72..433e8167829 100644
--- a/jstests/sharding/balance_repl.js
+++ b/jstests/sharding/balance_repl.js
@@ -3,61 +3,59 @@
//
(function() {
-"use strict";
-
-// The mongod secondaries are set to priority 0 and votes 0 to prevent the primaries
-// from stepping down during migrations on slow evergreen builders.
-var s = new ShardingTest({ shards: 2,
- other: {
- chunkSize: 1,
- rs0: {
- nodes: [
- {rsConfig: {votes: 1}},
- {rsConfig: {priority: 0, votes: 0}},
- ],
- },
- rs1: {
- nodes: [
- {rsConfig: {votes: 1}},
- {rsConfig: {priority: 0, votes: 0}},
- ],
- }
- } });
-
-var db = s.getDB( "test" );
-var bulk = db.foo.initializeUnorderedBulkOp();
-for (var i = 0; i < 2100; i++) {
- bulk.insert({ _id: i, x: i });
-}
-assert.writeOK(bulk.execute());
-
-s.adminCommand( { enablesharding : "test" } );
-s.ensurePrimaryShard('test', 'test-rs0');
-s.adminCommand( { shardcollection : "test.foo" , key : { _id : 1 } } );
-
-for ( i=0; i<20; i++ )
- s.adminCommand( { split : "test.foo" , middle : { _id : i * 100 } } );
-
-assert.eq( 2100, db.foo.find().itcount() );
-var coll = db.foo;
-coll.setSlaveOk();
-
-var dbPrimaryShardId = s.getPrimaryShardIdForDatabase( "test" );
-var other = s.config.shards.findOne( { _id : { $ne : dbPrimaryShardId } } );
-
-for ( i=0; i<20; i++ ) {
- // Needs to waitForDelete because we'll be performing a slaveOk query,
- // and secondaries don't have a chunk manager so it doesn't know how to
- // filter out docs it doesn't own.
- assert(s.adminCommand({ moveChunk: "test.foo",
- find: { _id: i * 100 },
- to : other._id,
- _secondaryThrottle: true,
- writeConcern: { w: 2 },
- _waitForDelete: true }));
- assert.eq( 2100, coll.find().itcount() );
-}
-
-s.stop();
+ "use strict";
+
+ // The mongod secondaries are set to priority 0 and votes 0 to prevent the primaries
+ // from stepping down during migrations on slow evergreen builders.
+ var s = new ShardingTest({
+ shards: 2,
+ other: {
+ chunkSize: 1,
+ rs0: {
+ nodes: [{rsConfig: {votes: 1}}, {rsConfig: {priority: 0, votes: 0}}, ],
+ },
+ rs1: {
+ nodes: [{rsConfig: {votes: 1}}, {rsConfig: {priority: 0, votes: 0}}, ],
+ }
+ }
+ });
+
+ var db = s.getDB("test");
+ var bulk = db.foo.initializeUnorderedBulkOp();
+ for (var i = 0; i < 2100; i++) {
+ bulk.insert({_id: i, x: i});
+ }
+ assert.writeOK(bulk.execute());
+
+ s.adminCommand({enablesharding: "test"});
+ s.ensurePrimaryShard('test', 'test-rs0');
+ s.adminCommand({shardcollection: "test.foo", key: {_id: 1}});
+
+ for (i = 0; i < 20; i++)
+ s.adminCommand({split: "test.foo", middle: {_id: i * 100}});
+
+ assert.eq(2100, db.foo.find().itcount());
+ var coll = db.foo;
+ coll.setSlaveOk();
+
+ var dbPrimaryShardId = s.getPrimaryShardIdForDatabase("test");
+ var other = s.config.shards.findOne({_id: {$ne: dbPrimaryShardId}});
+
+ for (i = 0; i < 20; i++) {
+ // Needs to waitForDelete because we'll be performing a slaveOk query,
+ // and secondaries don't have a chunk manager so it doesn't know how to
+ // filter out docs it doesn't own.
+ assert(s.adminCommand({
+ moveChunk: "test.foo",
+ find: {_id: i * 100},
+ to: other._id,
+ _secondaryThrottle: true,
+ writeConcern: {w: 2},
+ _waitForDelete: true
+ }));
+ assert.eq(2100, coll.find().itcount());
+ }
+
+ s.stop();
}());
diff --git a/jstests/sharding/balance_tags1.js b/jstests/sharding/balance_tags1.js
index 19d55bb0270..c1177ac4661 100644
--- a/jstests/sharding/balance_tags1.js
+++ b/jstests/sharding/balance_tags1.js
@@ -1,27 +1,24 @@
// Test balancing all chunks off of one shard
-var st = new ShardingTest({ name: "balance_tags1",
- shards: 3,
- mongos: 1,
- other: { chunkSize: 1,
- enableBalancer : true } });
+var st = new ShardingTest(
+ {name: "balance_tags1", shards: 3, mongos: 1, other: {chunkSize: 1, enableBalancer: true}});
-st.adminCommand({ enablesharding: "test" });
+st.adminCommand({enablesharding: "test"});
st.ensurePrimaryShard('test', 'shard0001');
var db = st.getDB("test");
var bulk = db.foo.initializeUnorderedBulkOp();
for (i = 0; i < 21; i++) {
- bulk.insert({ _id: i, x: i });
+ bulk.insert({_id: i, x: i});
}
assert.writeOK(bulk.execute());
-assert.commandWorked(st.s.adminCommand({ shardCollection: 'test.foo', key: { _id : 1 } }));
+assert.commandWorked(st.s.adminCommand({shardCollection: 'test.foo', key: {_id: 1}}));
st.stopBalancer();
for (i = 0; i < 20; i++) {
- st.adminCommand({ split : "test.foo", middle : { _id : i } });
+ st.adminCommand({split: "test.foo", middle: {_id: i}});
}
st.startBalancer();
@@ -30,39 +27,32 @@ st.printShardingStatus();
// Wait for the initial balance to happen
assert.soon(function() {
- var counts = st.chunkCounts("foo");
- printjson(counts);
- return counts["shard0000"] == 7 &&
- counts["shard0001"] == 7 &&
- counts["shard0002"] == 7;
- },
- "balance 1 didn't happen",
- 1000 * 60 * 10,
- 1000);
+ var counts = st.chunkCounts("foo");
+ printjson(counts);
+ return counts["shard0000"] == 7 && counts["shard0001"] == 7 && counts["shard0002"] == 7;
+}, "balance 1 didn't happen", 1000 * 60 * 10, 1000);
// Quick test of some shell helpers and setting up state
sh.addShardTag("shard0000", "a");
-assert.eq([ "a" ] , st.config.shards.findOne({ _id : "shard0000" }).tags);
+assert.eq(["a"], st.config.shards.findOne({_id: "shard0000"}).tags);
sh.addShardTag("shard0000", "b");
-assert.eq([ "a" , "b" ], st.config.shards.findOne({ _id : "shard0000" }).tags);
+assert.eq(["a", "b"], st.config.shards.findOne({_id: "shard0000"}).tags);
sh.removeShardTag("shard0000", "b");
-assert.eq([ "a" ], st.config.shards.findOne( { _id : "shard0000" } ).tags);
+assert.eq(["a"], st.config.shards.findOne({_id: "shard0000"}).tags);
-sh.addShardTag("shard0001" , "a");
-sh.addTagRange("test.foo" , { _id : -1 } , { _id : 1000 } , "a");
+sh.addShardTag("shard0001", "a");
+sh.addTagRange("test.foo", {_id: -1}, {_id: 1000}, "a");
st.printShardingStatus();
// At this point, everything should drain off shard 2, which does not have the tag
assert.soon(function() {
- var counts = st.chunkCounts("foo");
- printjson(counts);
- return counts["shard0002"] == 0;
- },
- "balance 2 didn't happen",
- 1000 * 60 * 10 , 1000);
+ var counts = st.chunkCounts("foo");
+ printjson(counts);
+ return counts["shard0002"] == 0;
+}, "balance 2 didn't happen", 1000 * 60 * 10, 1000);
st.printShardingStatus();
diff --git a/jstests/sharding/balance_tags2.js b/jstests/sharding/balance_tags2.js
index 0bcedf97e33..8c54b2f3fc6 100644
--- a/jstests/sharding/balance_tags2.js
+++ b/jstests/sharding/balance_tags2.js
@@ -1,27 +1,24 @@
// Test balancing all chunks to one shard by tagging the full shard-key range on that collection
-var s = new ShardingTest({ name: "balance_tags2",
- shards: 3,
- mongos: 1,
- other: { chunkSize: 1,
- enableBalancer : true } });
+var s = new ShardingTest(
+ {name: "balance_tags2", shards: 3, mongos: 1, other: {chunkSize: 1, enableBalancer: true}});
-s.adminCommand({ enablesharding: "test" });
+s.adminCommand({enablesharding: "test"});
s.ensurePrimaryShard('test', 'shard0001');
var db = s.getDB("test");
var bulk = db.foo.initializeUnorderedBulkOp();
for (i = 0; i < 21; i++) {
- bulk.insert({ _id: i, x: i });
+ bulk.insert({_id: i, x: i});
}
assert.writeOK(bulk.execute());
-sh.shardCollection("test.foo", { _id : 1 });
+sh.shardCollection("test.foo", {_id: 1});
sh.stopBalancer();
for (i = 0; i < 20; i++) {
- sh.splitAt("test.foo", {_id : i});
+ sh.splitAt("test.foo", {_id: i});
}
sh.startBalancer();
@@ -30,35 +27,26 @@ s.printShardingStatus(true);
// Wait for the initial balance to happen
assert.soon(function() {
- var counts = s.chunkCounts("foo");
- printjson(counts);
- return counts["shard0000"] == 7 &&
- counts["shard0001"] == 7 &&
- counts["shard0002"] == 7;
- },
- "balance 1 didn't happen",
- 1000 * 60 * 10,
- 1000);
+ var counts = s.chunkCounts("foo");
+ printjson(counts);
+ return counts["shard0000"] == 7 && counts["shard0001"] == 7 && counts["shard0002"] == 7;
+}, "balance 1 didn't happen", 1000 * 60 * 10, 1000);
// Tag one shard
-sh.addShardTag("shard0000" , "a");
-assert.eq([ "a" ] , s.config.shards.findOne({ _id : "shard0000" }).tags);
+sh.addShardTag("shard0000", "a");
+assert.eq(["a"], s.config.shards.findOne({_id: "shard0000"}).tags);
// Tag the whole collection (ns) to one shard
-sh.addTagRange("test.foo", { _id : MinKey }, { _id : MaxKey }, "a");
+sh.addTagRange("test.foo", {_id: MinKey}, {_id: MaxKey}, "a");
// Wait for things to move to that one shard
s.printShardingStatus(true);
assert.soon(function() {
- var counts = s.chunkCounts("foo");
- printjson(counts);
- return counts["shard0001"] == 0 &&
- counts["shard0002"] == 0;
- },
- "balance 2 didn't happen",
- 1000 * 60 * 10,
- 1000);
+ var counts = s.chunkCounts("foo");
+ printjson(counts);
+ return counts["shard0001"] == 0 && counts["shard0002"] == 0;
+}, "balance 2 didn't happen", 1000 * 60 * 10, 1000);
s.printShardingStatus(true);
diff --git a/jstests/sharding/basic_drop_coll.js b/jstests/sharding/basic_drop_coll.js
index af9008cfcac..568d2da9443 100644
--- a/jstests/sharding/basic_drop_coll.js
+++ b/jstests/sharding/basic_drop_coll.js
@@ -3,52 +3,51 @@
* cleanuped up properly.
*/
(function() {
-"use strict";
+ "use strict";
-var st = new ShardingTest({ shards: 2 });
+ var st = new ShardingTest({shards: 2});
-var testDB = st.s.getDB('test');
+ var testDB = st.s.getDB('test');
-// Test dropping an unsharded collection.
+ // Test dropping an unsharded collection.
-assert.writeOK(testDB.bar.insert({ x: 1 }));
-assert.neq(null, testDB.bar.findOne({ x: 1 }));
+ assert.writeOK(testDB.bar.insert({x: 1}));
+ assert.neq(null, testDB.bar.findOne({x: 1}));
-assert.commandWorked(testDB.runCommand({ drop: 'bar' }));
-assert.eq(null, testDB.bar.findOne({ x: 1 }));
+ assert.commandWorked(testDB.runCommand({drop: 'bar'}));
+ assert.eq(null, testDB.bar.findOne({x: 1}));
-// Test dropping a sharded collection.
+ // Test dropping a sharded collection.
-assert.commandWorked(st.s.adminCommand({ enableSharding: 'test' }));
-st.ensurePrimaryShard('test', 'shard0000');
-st.s.adminCommand({ shardCollection: 'test.user', key: { _id: 1 }});
-st.s.adminCommand({ split: 'test.user', middle: { _id: 0 }});
-assert.commandWorked(st.s.adminCommand({ moveChunk: 'test.user',
- find: { _id: 0 },
- to: 'shard0001' }));
+ assert.commandWorked(st.s.adminCommand({enableSharding: 'test'}));
+ st.ensurePrimaryShard('test', 'shard0000');
+ st.s.adminCommand({shardCollection: 'test.user', key: {_id: 1}});
+ st.s.adminCommand({split: 'test.user', middle: {_id: 0}});
+ assert.commandWorked(
+ st.s.adminCommand({moveChunk: 'test.user', find: {_id: 0}, to: 'shard0001'}));
-assert.writeOK(testDB.user.insert({ _id: 10 }));
-assert.writeOK(testDB.user.insert({ _id: -10 }));
+ assert.writeOK(testDB.user.insert({_id: 10}));
+ assert.writeOK(testDB.user.insert({_id: -10}));
-assert.neq(null, st.d0.getDB('test').user.findOne({ _id: -10 }));
-assert.neq(null, st.d1.getDB('test').user.findOne({ _id: 10 }));
+ assert.neq(null, st.d0.getDB('test').user.findOne({_id: -10}));
+ assert.neq(null, st.d1.getDB('test').user.findOne({_id: 10}));
-var configDB = st.s.getDB('config');
-var collDoc = configDB.collections.findOne({ _id: 'test.user' });
-assert(!collDoc.dropped);
+ var configDB = st.s.getDB('config');
+ var collDoc = configDB.collections.findOne({_id: 'test.user'});
+ assert(!collDoc.dropped);
-assert.eq(2, configDB.chunks.count({ ns: 'test.user' }));
+ assert.eq(2, configDB.chunks.count({ns: 'test.user'}));
-assert.commandWorked(testDB.runCommand({ drop: 'user' }));
+ assert.commandWorked(testDB.runCommand({drop: 'user'}));
-assert.eq(null, st.d0.getDB('test').user.findOne());
-assert.eq(null, st.d1.getDB('test').user.findOne());
+ assert.eq(null, st.d0.getDB('test').user.findOne());
+ assert.eq(null, st.d1.getDB('test').user.findOne());
-collDoc = configDB.collections.findOne({ _id: 'test.user' });
-assert(collDoc.dropped);
+ collDoc = configDB.collections.findOne({_id: 'test.user'});
+ assert(collDoc.dropped);
-assert.eq(0, configDB.chunks.count({ ns: 'test.user' }));
+ assert.eq(0, configDB.chunks.count({ns: 'test.user'}));
-st.stop();
+ st.stop();
})();
diff --git a/jstests/sharding/basic_sharding_params.js b/jstests/sharding/basic_sharding_params.js
index 7b23666e702..f21c4d17784 100644
--- a/jstests/sharding/basic_sharding_params.js
+++ b/jstests/sharding/basic_sharding_params.js
@@ -1,31 +1,27 @@
// Test of complex sharding initialization
function shardingTestUsingObjects() {
- var st = new ShardingTest( {
-
- mongos : { s0 : { verbose : 6 }, s1 : { verbose : 5 } },
- config : { c0 : { verbose : 4 } },
- shards : { d0 : { verbose : 3 },
- rs1 : {
- nodes : { d0 : { verbose : 2 },
- a1 : { verbose : 1 } } }
- }
- } );
+ var st = new ShardingTest({
+
+ mongos: {s0: {verbose: 6}, s1: {verbose: 5}},
+ config: {c0: {verbose: 4}},
+ shards: {d0: {verbose: 3}, rs1: {nodes: {d0: {verbose: 2}, a1: {verbose: 1}}}}
+ });
var s0 = st.s0;
- assert.eq( s0, st._mongos[0] );
+ assert.eq(s0, st._mongos[0]);
var s1 = st.s1;
- assert.eq( s1, st._mongos[1] );
+ assert.eq(s1, st._mongos[1]);
var c0 = st.c0;
- assert.eq( c0, st._configServers[0] );
+ assert.eq(c0, st._configServers[0]);
var d0 = st.d0;
- assert.eq( d0, st._connections[0] );
+ assert.eq(d0, st._connections[0]);
var rs1 = st.rs1;
- assert.eq( rs1, st._rsObjects[1] );
+ assert.eq(rs1, st._rsObjects[1]);
var rs1_d0 = rs1.nodes[0];
var rs1_a1 = rs1.nodes[1];
@@ -41,26 +37,26 @@ function shardingTestUsingObjects() {
}
function shardingTestUsingArrays() {
- var st = new ShardingTest( {
- mongos : [{ verbose : 5 }, { verbose : 4 } ],
- config : [{ verbose : 3 }],
- shards : [{ verbose : 2 }, { verbose : 1 } ]
+ var st = new ShardingTest({
+ mongos: [{verbose: 5}, {verbose: 4}],
+ config: [{verbose: 3}],
+ shards: [{verbose: 2}, {verbose: 1}]
});
var s0 = st.s0;
- assert.eq( s0, st._mongos[0] );
+ assert.eq(s0, st._mongos[0]);
var s1 = st.s1;
- assert.eq( s1, st._mongos[1] );
+ assert.eq(s1, st._mongos[1]);
var c0 = st.c0;
- assert.eq( c0, st._configServers[0] );
+ assert.eq(c0, st._configServers[0]);
var d0 = st.d0;
- assert.eq( d0, st._connections[0] );
+ assert.eq(d0, st._connections[0]);
var d1 = st.d1;
- assert.eq( d1, st._connections[1] );
+ assert.eq(d1, st._connections[1]);
assert(s0.commandLine.hasOwnProperty("vvvvv"));
assert(s1.commandLine.hasOwnProperty("vvvv"));
@@ -73,4 +69,3 @@ function shardingTestUsingArrays() {
shardingTestUsingObjects();
shardingTestUsingArrays();
-
diff --git a/jstests/sharding/basic_split.js b/jstests/sharding/basic_split.js
index 0c5f7e0e416..8ddff04007c 100644
--- a/jstests/sharding/basic_split.js
+++ b/jstests/sharding/basic_split.js
@@ -2,91 +2,92 @@
* Perform basic tests for the split command against mongos.
*/
(function() {
-"use strict";
+ "use strict";
-var st = new ShardingTest({ shards: 1, other: { chunkSize: 1 }});
-var configDB = st.s.getDB('config');
+ var st = new ShardingTest({shards: 1, other: {chunkSize: 1}});
+ var configDB = st.s.getDB('config');
-// split on invalid ns.
-assert.commandFailed(configDB.adminCommand({ split: 'user', key: { _id: 1 }}));
+ // split on invalid ns.
+ assert.commandFailed(configDB.adminCommand({split: 'user', key: {_id: 1}}));
-// split on unsharded collection (db is not sharding enabled).
-assert.commandFailed(configDB.adminCommand({ split: 'test.user', key: { _id: 1 }}));
+ // split on unsharded collection (db is not sharding enabled).
+ assert.commandFailed(configDB.adminCommand({split: 'test.user', key: {_id: 1}}));
-configDB.adminCommand({ enableSharding: 'test' });
+ configDB.adminCommand({enableSharding: 'test'});
-// split on unsharded collection (db is sharding enabled).
-assert.commandFailed(configDB.adminCommand({ split: 'test.user', key: { _id: 1 }}));
+ // split on unsharded collection (db is sharding enabled).
+ assert.commandFailed(configDB.adminCommand({split: 'test.user', key: {_id: 1}}));
-assert.commandWorked(configDB.adminCommand({ shardCollection: 'test.user', key: { _id: 1 }}));
+ assert.commandWorked(configDB.adminCommand({shardCollection: 'test.user', key: {_id: 1}}));
-assert.eq(null, configDB.chunks.findOne({ ns: 'test.user', min: { _id: 0 }}));
+ assert.eq(null, configDB.chunks.findOne({ns: 'test.user', min: {_id: 0}}));
-assert.commandWorked(configDB.adminCommand({ split: 'test.user', middle: { _id: 0 }}));
-assert.neq(null, configDB.chunks.findOne({ ns: 'test.user', min: { _id: 0 }}));
+ assert.commandWorked(configDB.adminCommand({split: 'test.user', middle: {_id: 0}}));
+ assert.neq(null, configDB.chunks.findOne({ns: 'test.user', min: {_id: 0}}));
-// Cannot split on existing chunk boundary.
-assert.commandFailed(configDB.adminCommand({ split: 'test.user', middle: { _id: 0 }}));
+ // Cannot split on existing chunk boundary.
+ assert.commandFailed(configDB.adminCommand({split: 'test.user', middle: {_id: 0}}));
-// Attempt to split on a value that is not the shard key.
-assert.commandFailed(configDB.adminCommand({ split: 'test.user', middle: { x: 100 }}));
-assert.commandFailed(configDB.adminCommand({ split: 'test.user', find: { x: 100 }}));
-assert.commandFailed(configDB.adminCommand({ split: 'test.user',
- bounds: [{ x: MinKey }, { x: MaxKey }]}));
+ // Attempt to split on a value that is not the shard key.
+ assert.commandFailed(configDB.adminCommand({split: 'test.user', middle: {x: 100}}));
+ assert.commandFailed(configDB.adminCommand({split: 'test.user', find: {x: 100}}));
+ assert.commandFailed(
+ configDB.adminCommand({split: 'test.user', bounds: [{x: MinKey}, {x: MaxKey}]}));
-// Insert documents large enough to fill up a chunk, but do it directly in the shard in order
-// to bypass the auto-split logic.
-var kiloDoc = new Array(1024).join('x');
-var testDB = st.d0.getDB('test');
-var bulk = testDB.user.initializeUnorderedBulkOp();
-for (var x = -1200; x < 1200; x++) {
- bulk.insert({ _id: x, val: kiloDoc });
-}
-assert.writeOK(bulk.execute());
+ // Insert documents large enough to fill up a chunk, but do it directly in the shard in order
+ // to bypass the auto-split logic.
+ var kiloDoc = new Array(1024).join('x');
+ var testDB = st.d0.getDB('test');
+ var bulk = testDB.user.initializeUnorderedBulkOp();
+ for (var x = -1200; x < 1200; x++) {
+ bulk.insert({_id: x, val: kiloDoc});
+ }
+ assert.writeOK(bulk.execute());
-assert.eq(1, configDB.chunks.find({ ns: 'test.user', min: { $gte: { _id: 0 }}}).itcount());
+ assert.eq(1, configDB.chunks.find({ns: 'test.user', min: {$gte: {_id: 0}}}).itcount());
-// Errors if bounds do not correspond to existing chunk boundaries.
-assert.commandFailed(configDB.adminCommand({ split: 'test.user',
- bounds: [{ _id: 0 }, { _id: 1000 }]}));
-assert.eq(1, configDB.chunks.find({ ns: 'test.user', min: { $gte: { _id: 0 }}}).itcount());
+ // Errors if bounds do not correspond to existing chunk boundaries.
+ assert.commandFailed(
+ configDB.adminCommand({split: 'test.user', bounds: [{_id: 0}, {_id: 1000}]}));
+ assert.eq(1, configDB.chunks.find({ns: 'test.user', min: {$gte: {_id: 0}}}).itcount());
-assert.commandWorked(configDB.adminCommand({ split: 'test.user',
- bounds: [{ _id: 0 }, { _id: MaxKey }]}));
-assert.gt(configDB.chunks.find({ ns: 'test.user', min: { $gte: { _id: 0 }}}).itcount(), 1);
+ assert.commandWorked(
+ configDB.adminCommand({split: 'test.user', bounds: [{_id: 0}, {_id: MaxKey}]}));
+ assert.gt(configDB.chunks.find({ns: 'test.user', min: {$gte: {_id: 0}}}).itcount(), 1);
-assert.eq(1, configDB.chunks.find({ ns: 'test.user', min: { $lt: { _id: 0 }}}).itcount());
-assert.commandWorked(configDB.adminCommand({ split: 'test.user', find: { _id: -1 }}));
-assert.gt(configDB.chunks.find({ ns: 'test.user', min: { $lt: { _id: 0 }}}).itcount(), 1);
+ assert.eq(1, configDB.chunks.find({ns: 'test.user', min: {$lt: {_id: 0}}}).itcount());
+ assert.commandWorked(configDB.adminCommand({split: 'test.user', find: {_id: -1}}));
+ assert.gt(configDB.chunks.find({ns: 'test.user', min: {$lt: {_id: 0}}}).itcount(), 1);
-//
-// Compound Key
-//
+ //
+ // Compound Key
+ //
-assert.commandWorked(configDB.adminCommand({ shardCollection: 'test.compound', key: { x: 1, y: 1 }}));
+ assert.commandWorked(
+ configDB.adminCommand({shardCollection: 'test.compound', key: {x: 1, y: 1}}));
-assert.eq(null, configDB.chunks.findOne({ ns: 'test.compound', min: { x: 0, y: 0 }}));
-assert.commandWorked(configDB.adminCommand({ split: 'test.compound', middle: { x: 0, y: 0 }}));
-assert.neq(null, configDB.chunks.findOne({ ns: 'test.compound', min: { x: 0, y: 0 }}));
+ assert.eq(null, configDB.chunks.findOne({ns: 'test.compound', min: {x: 0, y: 0}}));
+ assert.commandWorked(configDB.adminCommand({split: 'test.compound', middle: {x: 0, y: 0}}));
+ assert.neq(null, configDB.chunks.findOne({ns: 'test.compound', min: {x: 0, y: 0}}));
-// cannot split on existing chunk boundary.
-assert.commandFailed(configDB.adminCommand({ split: 'test.compound', middle: { x: 0, y: 0 }}));
+ // cannot split on existing chunk boundary.
+ assert.commandFailed(configDB.adminCommand({split: 'test.compound', middle: {x: 0, y: 0}}));
-bulk = testDB.compound.initializeUnorderedBulkOp();
-for (x = -1200; x < 1200; x++) {
- bulk.insert({ x: x, y: x, val: kiloDoc });
-}
-assert.writeOK(bulk.execute());
+ bulk = testDB.compound.initializeUnorderedBulkOp();
+ for (x = -1200; x < 1200; x++) {
+ bulk.insert({x: x, y: x, val: kiloDoc});
+ }
+ assert.writeOK(bulk.execute());
-assert.eq(1, configDB.chunks.find({ ns: 'test.compound', min: { $gte: { x: 0, y: 0 }}}).itcount());
-assert.commandWorked(configDB.adminCommand({ split: 'test.compound',
- bounds: [{ x: 0, y: 0 }, { x: MaxKey, y: MaxKey }]}));
-assert.gt(configDB.chunks.find({ ns: 'test.compound', min: { $gte: { x: 0, y: 0 }}}).itcount(), 1);
+ assert.eq(1, configDB.chunks.find({ns: 'test.compound', min: {$gte: {x: 0, y: 0}}}).itcount());
+ assert.commandWorked(configDB.adminCommand(
+ {split: 'test.compound', bounds: [{x: 0, y: 0}, {x: MaxKey, y: MaxKey}]}));
+ assert.gt(configDB.chunks.find({ns: 'test.compound', min: {$gte: {x: 0, y: 0}}}).itcount(), 1);
-assert.eq(1, configDB.chunks.find({ ns: 'test.compound', min: { $lt: { x: 0, y: 0 }}}).itcount());
-assert.commandWorked(configDB.adminCommand({ split: 'test.compound', find: { x: -1, y: -1 }}));
-assert.gt(configDB.chunks.find({ ns: 'test.compound', min: { $lt: { x: 0, y: 0 }}}).itcount(), 1);
+ assert.eq(1, configDB.chunks.find({ns: 'test.compound', min: {$lt: {x: 0, y: 0}}}).itcount());
+ assert.commandWorked(configDB.adminCommand({split: 'test.compound', find: {x: -1, y: -1}}));
+ assert.gt(configDB.chunks.find({ns: 'test.compound', min: {$lt: {x: 0, y: 0}}}).itcount(), 1);
-st.stop();
+ st.stop();
})();
diff --git a/jstests/sharding/batch_write_command_sharded.js b/jstests/sharding/batch_write_command_sharded.js
index 2b88228477b..c82035af8c6 100644
--- a/jstests/sharding/batch_write_command_sharded.js
+++ b/jstests/sharding/batch_write_command_sharded.js
@@ -4,247 +4,239 @@
// *only* mongos-specific tests.
//
(function() {
-"use strict";
-
-// Only reason for using localhost name is to make the test consistent with naming host so it
-// will be easier to check for the host name inside error objects.
-var options = {useHostname: false};
-var st = new ShardingTest({shards: 2, mongos: 1, config: 3, other: options});
-st.stopBalancer();
-
-var mongos = st.s0;
-var admin = mongos.getDB( "admin" );
-var config = mongos.getDB( "config" );
-var shards = config.shards.find().toArray();
-var configConnStr = st._configDB;
-
-jsTest.log("Starting sharding batch write tests...");
-
-var request;
-var result;
-
-// NOTE: ALL TESTS BELOW SHOULD BE SELF-CONTAINED, FOR EASIER DEBUGGING
-
-//
-//
-// Mongos _id autogeneration tests for sharded collections
-
-var coll = mongos.getCollection("foo.bar");
-assert.commandWorked(admin.runCommand({ enableSharding : coll.getDB().toString() }));
-st.ensurePrimaryShard(coll.getDB().getName(), 'shard0001');
-assert.commandWorked(admin.runCommand({ shardCollection : coll.toString(),
- key : { _id : 1 } }));
-
-//
-// Basic insert no _id
-coll.remove({});
-printjson( request = {insert : coll.getName(),
- documents: [{ a : 1 }] } );
-printjson( result = coll.runCommand(request) );
-assert(result.ok);
-assert.eq(1, result.n);
-assert.eq(1, coll.count());
-
-//
-// Multi insert some _ids
-coll.remove({});
-printjson( request = {insert : coll.getName(),
- documents: [{ _id : 0, a : 1 }, { a : 2 }] } );
-printjson( result = coll.runCommand(request) );
-assert(result.ok);
-assert.eq(2, result.n);
-assert.eq(2, coll.count());
-assert.eq(1, coll.count({ _id : 0 }));
-
-//
-// Ensure generating many _ids don't push us over limits
-var maxDocSize = (16 * 1024 * 1024) / 1000;
-var baseDocSize = Object.bsonsize({ a : 1, data : "" });
-var dataSize = maxDocSize - baseDocSize;
-
-var data = "";
-for (var i = 0; i < dataSize; i++)
- data += "x";
-
-var documents = [];
-for (var i = 0; i < 1000; i++) documents.push({ a : i, data : data });
-
-assert.commandWorked(coll.getMongo().getDB("admin").runCommand({ setParameter : 1, logLevel : 4 }));
-coll.remove({});
-request = { insert : coll.getName(),
- documents: documents };
-printjson( result = coll.runCommand(request) );
-assert(result.ok);
-assert.eq(1000, result.n);
-assert.eq(1000, coll.count());
-
-//
-//
-// Config server upserts (against admin db, for example) require _id test
-var adminColl = admin.getCollection(coll.getName());
-
-//
-// Without _id
-adminColl.remove({});
-printjson( request = {update : adminColl.getName(),
- updates : [{ q : { a : 1 }, u : { a : 1 }, upsert : true }]});
-var result = adminColl.runCommand(request);
-assert.commandWorked(result);
-assert.eq(1, result.n);
-assert.eq(1, adminColl.count());
-
-//
-// With _id
-adminColl.remove({});
-printjson( request = {update : adminColl.getName(),
- updates : [{ q : { _id : 1, a : 1 }, u : { a : 1 }, upsert : true }]});
-assert.commandWorked(adminColl.runCommand(request));
-assert.eq(1, result.n);
-assert.eq(1, adminColl.count());
-
-//
-//
-// Stale config progress tests
-// Set up a new collection across two shards, then revert the chunks to an earlier state to put
-// mongos and mongod permanently out of sync.
-
-// START SETUP
-var brokenColl = mongos.getCollection( "broken.coll" );
-assert.commandWorked(admin.runCommand({ enableSharding : brokenColl.getDB().toString() }));
-printjson(admin.runCommand({ movePrimary : brokenColl.getDB().toString(), to : shards[0]._id }));
-assert.commandWorked(admin.runCommand({ shardCollection : brokenColl.toString(),
- key : { _id : 1 } }));
-assert.commandWorked(admin.runCommand({ split : brokenColl.toString(),
- middle : { _id : 0 } }));
-
-var oldChunks = config.chunks.find().toArray();
-
-// Start a new mongos and bring it up-to-date with the chunks so far
-
-var staleMongos = MongoRunner.runMongos({ configdb : configConnStr });
-brokenColl = staleMongos.getCollection(brokenColl.toString());
-assert.writeOK(brokenColl.insert({ hello : "world" }));
-
-// Modify the chunks to make shards at a higher version
-
-assert.commandWorked(admin.runCommand({ moveChunk : brokenColl.toString(),
- find : { _id : 0 },
- to : shards[1]._id }));
-
-// Rewrite the old chunks back to the config server
-
-assert.writeOK(config.chunks.remove({}));
-for ( var i = 0; i < oldChunks.length; i++ )
- assert.writeOK(config.chunks.insert(oldChunks[i]));
-
-// Stale mongos can no longer bring itself up-to-date!
-// END SETUP
-
-//
-// Config server insert, repeatedly stale
-printjson( request = {insert : brokenColl.getName(),
- documents: [{_id:-1}]} );
-printjson( result = brokenColl.runCommand(request) );
-assert(result.ok);
-assert.eq(0, result.n);
-assert.eq(1, result.writeErrors.length);
-assert.eq(0, result.writeErrors[0].index);
-assert.eq(result.writeErrors[0].code, 82); // No Progress Made
-
-//
-// Config server insert to other shard, repeatedly stale
-printjson( request = {insert : brokenColl.getName(),
- documents: [{_id:1}]} );
-printjson( result = brokenColl.runCommand(request) );
-assert(result.ok);
-assert.eq(0, result.n);
-assert.eq(1, result.writeErrors.length);
-assert.eq(0, result.writeErrors[0].index);
-assert.eq(result.writeErrors[0].code, 82); // No Progress Made
-
-//
-//
-// Tests against config server
-var configColl = config.getCollection( "batch_write_protocol_sharded" );
-
-//
-// Basic config server insert
-configColl.remove({});
-printjson( request = {insert : configColl.getName(),
- documents: [{a:1}]} );
-var result = configColl.runCommand(request);
-assert.commandWorked(result);
-assert.eq(1, result.n);
-
-st.configRS.awaitReplication();
-assert.eq(1, st.config0.getCollection(configColl + "").count());
-assert.eq(1, st.config1.getCollection(configColl + "").count());
-assert.eq(1, st.config2.getCollection(configColl + "").count());
-
-//
-// Basic config server update
-configColl.remove({});
-configColl.insert({a:1});
-printjson( request = {update : configColl.getName(),
- updates: [{q: {a:1}, u: {$set: {b:2}}}]} );
-printjson( result = configColl.runCommand(request) );
-assert(result.ok);
-assert.eq(1, result.n);
-
-st.configRS.awaitReplication();
-assert.eq(1, st.config0.getCollection(configColl + "").count({b:2}));
-assert.eq(1, st.config1.getCollection(configColl + "").count({b:2}));
-assert.eq(1, st.config2.getCollection(configColl + "").count({b:2}));
-
-//
-// Basic config server delete
-configColl.remove({});
-configColl.insert({a:1});
-printjson( request = {'delete' : configColl.getName(),
- deletes: [{q: {a:1}, limit: 0}]} );
-printjson( result = configColl.runCommand(request) );
-assert(result.ok);
-assert.eq(1, result.n);
-
-st.configRS.awaitReplication();
-assert.eq(0, st.config0.getCollection(configColl + "").count());
-assert.eq(0, st.config1.getCollection(configColl + "").count());
-assert.eq(0, st.config2.getCollection(configColl + "").count());
-
-MongoRunner.stopMongod(st.config1);
-MongoRunner.stopMongod(st.config2);
-st.configRS.awaitNoPrimary();
-
-// Config server insert with no config PRIMARY
-configColl.remove({});
-printjson( request = {insert : configColl.getName(),
- documents: [{a:1}]} );
-printjson( result = configColl.runCommand(request) );
-assert(!result.ok);
-assert(result.errmsg != null);
-
-
-// Config server insert with no config PRIMARY
-configColl.remove({});
-configColl.insert({a:1});
-printjson( request = {update : configColl.getName(),
- updates: [{q: {a:1}, u: {$set: {b:2}}}]} );
-printjson( result = configColl.runCommand(request) );
-assert(!result.ok);
-assert(result.errmsg != null);
-
-// Config server insert with no config PRIMARY
-configColl.remove({});
-configColl.insert({a:1});
-printjson( request = {delete : configColl.getName(),
- deletes: [{q: {a:1}, limit: 0}]} );
-printjson( result = configColl.runCommand(request) );
-assert(!result.ok);
-assert(result.errmsg != null);
-
-jsTest.log("DONE!");
-
-MongoRunner.stopMongos( staleMongos );
-st.stop();
+ "use strict";
+
+ // Only reason for using localhost name is to make the test consistent with naming host so it
+ // will be easier to check for the host name inside error objects.
+ var options = {
+ useHostname: false
+ };
+ var st = new ShardingTest({shards: 2, mongos: 1, config: 3, other: options});
+ st.stopBalancer();
+
+ var mongos = st.s0;
+ var admin = mongos.getDB("admin");
+ var config = mongos.getDB("config");
+ var shards = config.shards.find().toArray();
+ var configConnStr = st._configDB;
+
+ jsTest.log("Starting sharding batch write tests...");
+
+ var request;
+ var result;
+
+ // NOTE: ALL TESTS BELOW SHOULD BE SELF-CONTAINED, FOR EASIER DEBUGGING
+
+ //
+ //
+ // Mongos _id autogeneration tests for sharded collections
+
+ var coll = mongos.getCollection("foo.bar");
+ assert.commandWorked(admin.runCommand({enableSharding: coll.getDB().toString()}));
+ st.ensurePrimaryShard(coll.getDB().getName(), 'shard0001');
+ assert.commandWorked(admin.runCommand({shardCollection: coll.toString(), key: {_id: 1}}));
+
+ //
+ // Basic insert no _id
+ coll.remove({});
+ printjson(request = {insert: coll.getName(), documents: [{a: 1}]});
+ printjson(result = coll.runCommand(request));
+ assert(result.ok);
+ assert.eq(1, result.n);
+ assert.eq(1, coll.count());
+
+ //
+ // Multi insert some _ids
+ coll.remove({});
+ printjson(request = {insert: coll.getName(), documents: [{_id: 0, a: 1}, {a: 2}]});
+ printjson(result = coll.runCommand(request));
+ assert(result.ok);
+ assert.eq(2, result.n);
+ assert.eq(2, coll.count());
+ assert.eq(1, coll.count({_id: 0}));
+
+ //
+ // Ensure generating many _ids don't push us over limits
+ var maxDocSize = (16 * 1024 * 1024) / 1000;
+ var baseDocSize = Object.bsonsize({a: 1, data: ""});
+ var dataSize = maxDocSize - baseDocSize;
+
+ var data = "";
+ for (var i = 0; i < dataSize; i++)
+ data += "x";
+
+ var documents = [];
+ for (var i = 0; i < 1000; i++)
+ documents.push({a: i, data: data});
+
+ assert.commandWorked(coll.getMongo().getDB("admin").runCommand({setParameter: 1, logLevel: 4}));
+ coll.remove({});
+ request = {
+ insert: coll.getName(),
+ documents: documents
+ };
+ printjson(result = coll.runCommand(request));
+ assert(result.ok);
+ assert.eq(1000, result.n);
+ assert.eq(1000, coll.count());
+
+ //
+ //
+ // Config server upserts (against admin db, for example) require _id test
+ var adminColl = admin.getCollection(coll.getName());
+
+ //
+ // Without _id
+ adminColl.remove({});
+ printjson(
+ request = {update: adminColl.getName(), updates: [{q: {a: 1}, u: {a: 1}, upsert: true}]});
+ var result = adminColl.runCommand(request);
+ assert.commandWorked(result);
+ assert.eq(1, result.n);
+ assert.eq(1, adminColl.count());
+
+ //
+ // With _id
+ adminColl.remove({});
+ printjson(request = {
+ update: adminColl.getName(),
+ updates: [{q: {_id: 1, a: 1}, u: {a: 1}, upsert: true}]
+ });
+ assert.commandWorked(adminColl.runCommand(request));
+ assert.eq(1, result.n);
+ assert.eq(1, adminColl.count());
+
+ //
+ //
+ // Stale config progress tests
+ // Set up a new collection across two shards, then revert the chunks to an earlier state to put
+ // mongos and mongod permanently out of sync.
+
+ // START SETUP
+ var brokenColl = mongos.getCollection("broken.coll");
+ assert.commandWorked(admin.runCommand({enableSharding: brokenColl.getDB().toString()}));
+ printjson(admin.runCommand({movePrimary: brokenColl.getDB().toString(), to: shards[0]._id}));
+ assert.commandWorked(admin.runCommand({shardCollection: brokenColl.toString(), key: {_id: 1}}));
+ assert.commandWorked(admin.runCommand({split: brokenColl.toString(), middle: {_id: 0}}));
+
+ var oldChunks = config.chunks.find().toArray();
+
+ // Start a new mongos and bring it up-to-date with the chunks so far
+
+ var staleMongos = MongoRunner.runMongos({configdb: configConnStr});
+ brokenColl = staleMongos.getCollection(brokenColl.toString());
+ assert.writeOK(brokenColl.insert({hello: "world"}));
+
+ // Modify the chunks to make shards at a higher version
+
+ assert.commandWorked(
+ admin.runCommand({moveChunk: brokenColl.toString(), find: {_id: 0}, to: shards[1]._id}));
+
+ // Rewrite the old chunks back to the config server
+
+ assert.writeOK(config.chunks.remove({}));
+ for (var i = 0; i < oldChunks.length; i++)
+ assert.writeOK(config.chunks.insert(oldChunks[i]));
+
+ // Stale mongos can no longer bring itself up-to-date!
+ // END SETUP
+
+ //
+ // Config server insert, repeatedly stale
+ printjson(request = {insert: brokenColl.getName(), documents: [{_id: -1}]});
+ printjson(result = brokenColl.runCommand(request));
+ assert(result.ok);
+ assert.eq(0, result.n);
+ assert.eq(1, result.writeErrors.length);
+ assert.eq(0, result.writeErrors[0].index);
+ assert.eq(result.writeErrors[0].code, 82); // No Progress Made
+
+ //
+ // Config server insert to other shard, repeatedly stale
+ printjson(request = {insert: brokenColl.getName(), documents: [{_id: 1}]});
+ printjson(result = brokenColl.runCommand(request));
+ assert(result.ok);
+ assert.eq(0, result.n);
+ assert.eq(1, result.writeErrors.length);
+ assert.eq(0, result.writeErrors[0].index);
+ assert.eq(result.writeErrors[0].code, 82); // No Progress Made
+
+ //
+ //
+ // Tests against config server
+ var configColl = config.getCollection("batch_write_protocol_sharded");
+
+ //
+ // Basic config server insert
+ configColl.remove({});
+ printjson(request = {insert: configColl.getName(), documents: [{a: 1}]});
+ var result = configColl.runCommand(request);
+ assert.commandWorked(result);
+ assert.eq(1, result.n);
+
+ st.configRS.awaitReplication();
+ assert.eq(1, st.config0.getCollection(configColl + "").count());
+ assert.eq(1, st.config1.getCollection(configColl + "").count());
+ assert.eq(1, st.config2.getCollection(configColl + "").count());
+
+ //
+ // Basic config server update
+ configColl.remove({});
+ configColl.insert({a: 1});
+ printjson(request = {update: configColl.getName(), updates: [{q: {a: 1}, u: {$set: {b: 2}}}]});
+ printjson(result = configColl.runCommand(request));
+ assert(result.ok);
+ assert.eq(1, result.n);
+
+ st.configRS.awaitReplication();
+ assert.eq(1, st.config0.getCollection(configColl + "").count({b: 2}));
+ assert.eq(1, st.config1.getCollection(configColl + "").count({b: 2}));
+ assert.eq(1, st.config2.getCollection(configColl + "").count({b: 2}));
+
+ //
+ // Basic config server delete
+ configColl.remove({});
+ configColl.insert({a: 1});
+ printjson(request = {'delete': configColl.getName(), deletes: [{q: {a: 1}, limit: 0}]});
+ printjson(result = configColl.runCommand(request));
+ assert(result.ok);
+ assert.eq(1, result.n);
+
+ st.configRS.awaitReplication();
+ assert.eq(0, st.config0.getCollection(configColl + "").count());
+ assert.eq(0, st.config1.getCollection(configColl + "").count());
+ assert.eq(0, st.config2.getCollection(configColl + "").count());
+
+ MongoRunner.stopMongod(st.config1);
+ MongoRunner.stopMongod(st.config2);
+ st.configRS.awaitNoPrimary();
+
+ // Config server insert with no config PRIMARY
+ configColl.remove({});
+ printjson(request = {insert: configColl.getName(), documents: [{a: 1}]});
+ printjson(result = configColl.runCommand(request));
+ assert(!result.ok);
+ assert(result.errmsg != null);
+
+ // Config server insert with no config PRIMARY
+ configColl.remove({});
+ configColl.insert({a: 1});
+ printjson(request = {update: configColl.getName(), updates: [{q: {a: 1}, u: {$set: {b: 2}}}]});
+ printjson(result = configColl.runCommand(request));
+ assert(!result.ok);
+ assert(result.errmsg != null);
+
+ // Config server insert with no config PRIMARY
+ configColl.remove({});
+ configColl.insert({a: 1});
+ printjson(request = {delete: configColl.getName(), deletes: [{q: {a: 1}, limit: 0}]});
+ printjson(result = configColl.runCommand(request));
+ assert(!result.ok);
+ assert(result.errmsg != null);
+
+ jsTest.log("DONE!");
+
+ MongoRunner.stopMongos(staleMongos);
+ st.stop();
}()); \ No newline at end of file
diff --git a/jstests/sharding/bouncing_count.js b/jstests/sharding/bouncing_count.js
index d2df8c92984..62ff26c08a7 100644
--- a/jstests/sharding/bouncing_count.js
+++ b/jstests/sharding/bouncing_count.js
@@ -1,58 +1,57 @@
// Tests whether new sharding is detected on insert by mongos
(function() {
-'use strict';
+ 'use strict';
-var st = new ShardingTest({ shards: 10, mongos: 3 });
+ var st = new ShardingTest({shards: 10, mongos: 3});
-var mongosA = st.s0;
-var mongosB = st.s1;
-var mongosC = st.s2;
+ var mongosA = st.s0;
+ var mongosB = st.s1;
+ var mongosC = st.s2;
-var admin = mongosA.getDB("admin");
-var config = mongosA.getDB("config");
+ var admin = mongosA.getDB("admin");
+ var config = mongosA.getDB("config");
-var collA = mongosA.getCollection("foo.bar");
-var collB = mongosB.getCollection("" + collA);
-var collC = mongosB.getCollection("" + collA);
+ var collA = mongosA.getCollection("foo.bar");
+ var collB = mongosB.getCollection("" + collA);
+ var collC = mongosB.getCollection("" + collA);
-var shards = config.shards.find().sort({ _id: 1 }).toArray();
+ var shards = config.shards.find().sort({_id: 1}).toArray();
-assert.commandWorked(admin.runCommand({ enableSharding: "" + collA.getDB() }));
-st.ensurePrimaryShard(collA.getDB().getName(), shards[1]._id);
-assert.commandWorked(admin.runCommand({ shardCollection: "" + collA, key: { _id: 1 } }));
+ assert.commandWorked(admin.runCommand({enableSharding: "" + collA.getDB()}));
+ st.ensurePrimaryShard(collA.getDB().getName(), shards[1]._id);
+ assert.commandWorked(admin.runCommand({shardCollection: "" + collA, key: {_id: 1}}));
-jsTestLog("Splitting up the collection...");
+ jsTestLog("Splitting up the collection...");
-// Split up the collection
-for(var i = 0; i < shards.length; i++){
- assert.commandWorked(admin.runCommand({ split: "" + collA, middle: { _id: i } }));
- assert.commandWorked(
- admin.runCommand({ moveChunk: "" + collA, find: { _id: i }, to: shards[i]._id }));
-}
+ // Split up the collection
+ for (var i = 0; i < shards.length; i++) {
+ assert.commandWorked(admin.runCommand({split: "" + collA, middle: {_id: i}}));
+ assert.commandWorked(
+ admin.runCommand({moveChunk: "" + collA, find: {_id: i}, to: shards[i]._id}));
+ }
-mongosB.getDB("admin").runCommand({ flushRouterConfig: 1 });
-mongosC.getDB("admin").runCommand({ flushRouterConfig: 1 });
+ mongosB.getDB("admin").runCommand({flushRouterConfig: 1});
+ mongosC.getDB("admin").runCommand({flushRouterConfig: 1});
-printjson(collB.count());
-printjson(collC.count());
+ printjson(collB.count());
+ printjson(collC.count());
-// Change up all the versions...
-for(var i = 0; i < shards.length; i++){
- assert.commandWorked(admin.runCommand({ moveChunk: "" + collA,
- find: { _id: i },
- to: shards[ (i + 1) % shards.length ]._id }));
-}
+ // Change up all the versions...
+ for (var i = 0; i < shards.length; i++) {
+ assert.commandWorked(admin.runCommand(
+ {moveChunk: "" + collA, find: {_id: i}, to: shards[(i + 1) % shards.length]._id}));
+ }
-// Make sure mongos A is up-to-date
-mongosA.getDB("admin").runCommand({ flushRouterConfig: 1 });
+ // Make sure mongos A is up-to-date
+ mongosA.getDB("admin").runCommand({flushRouterConfig: 1});
-st.printShardingStatus(true);
+ st.printShardingStatus(true);
-jsTestLog("Running count!");
+ jsTestLog("Running count!");
-printjson(collB.count());
-printjson(collC.find().toArray());
+ printjson(collB.count());
+ printjson(collC.find().toArray());
-st.stop();
+ st.stop();
})();
diff --git a/jstests/sharding/bulk_insert.js b/jstests/sharding/bulk_insert.js
index 306c2a82020..715660fa67f 100644
--- a/jstests/sharding/bulk_insert.js
+++ b/jstests/sharding/bulk_insert.js
@@ -1,327 +1,285 @@
// Tests bulk inserts to mongos
(function() {
-'use strict';
+ 'use strict';
-var st = new ShardingTest({ shards : 2, mongos : 2 });
+ var st = new ShardingTest({shards: 2, mongos: 2});
-var mongos = st.s;
-var staleMongos = st.s1;
-var config = mongos.getDB("config");
-var admin = mongos.getDB("admin");
-var shards = config.shards.find().toArray();
+ var mongos = st.s;
+ var staleMongos = st.s1;
+ var config = mongos.getDB("config");
+ var admin = mongos.getDB("admin");
+ var shards = config.shards.find().toArray();
-for (var i = 0; i < shards.length; i++) {
- shards[i].conn = new Mongo(shards[i].host);
-}
+ for (var i = 0; i < shards.length; i++) {
+ shards[i].conn = new Mongo(shards[i].host);
+ }
-var collSh = mongos.getCollection(jsTestName() + ".collSharded");
-var collUn = mongos.getCollection(jsTestName() + ".collUnsharded");
-var collDi = shards[0].conn.getCollection(jsTestName() + ".collDirect");
+ var collSh = mongos.getCollection(jsTestName() + ".collSharded");
+ var collUn = mongos.getCollection(jsTestName() + ".collUnsharded");
+ var collDi = shards[0].conn.getCollection(jsTestName() + ".collDirect");
-jsTest.log('Checking write to config collections...');
-assert.writeOK(admin.TestColl.insert({ SingleDoc: 1 }));
-assert.writeError(admin.TestColl.insert([ { Doc1: 1 }, { Doc2: 1 } ]));
+ jsTest.log('Checking write to config collections...');
+ assert.writeOK(admin.TestColl.insert({SingleDoc: 1}));
+ assert.writeError(admin.TestColl.insert([{Doc1: 1}, {Doc2: 1}]));
-jsTest.log("Setting up collections...");
+ jsTest.log("Setting up collections...");
-assert.commandWorked(admin.runCommand({ enableSharding : collSh.getDB() + "" }));
-st.ensurePrimaryShard(collSh.getDB() + "", shards[0]._id);
+ assert.commandWorked(admin.runCommand({enableSharding: collSh.getDB() + ""}));
+ st.ensurePrimaryShard(collSh.getDB() + "", shards[0]._id);
-assert.commandWorked(admin.runCommand({ movePrimary : collUn.getDB() + "",
- to : shards[1]._id}));
+ assert.commandWorked(admin.runCommand({movePrimary: collUn.getDB() + "", to: shards[1]._id}));
-printjson(collSh.ensureIndex({ukey : 1}, {unique : true}));
-printjson(collUn.ensureIndex({ukey : 1}, {unique : true}));
-printjson(collDi.ensureIndex({ukey : 1}, {unique : true}));
+ printjson(collSh.ensureIndex({ukey: 1}, {unique: true}));
+ printjson(collUn.ensureIndex({ukey: 1}, {unique: true}));
+ printjson(collDi.ensureIndex({ukey: 1}, {unique: true}));
-assert.commandWorked(admin.runCommand({ shardCollection : collSh + "",
- key : {ukey : 1} }));
-assert.commandWorked(admin.runCommand({ split : collSh + "",
- middle : {ukey : 0} }));
-assert.commandWorked(admin.runCommand({ moveChunk: collSh + "",
- find: { ukey: 0 },
- to: shards[0]._id,
- _waitForDelete: true }));
+ assert.commandWorked(admin.runCommand({shardCollection: collSh + "", key: {ukey: 1}}));
+ assert.commandWorked(admin.runCommand({split: collSh + "", middle: {ukey: 0}}));
+ assert.commandWorked(admin.runCommand(
+ {moveChunk: collSh + "", find: {ukey: 0}, to: shards[0]._id, _waitForDelete: true}));
-var resetColls = function() {
- assert.writeOK(collSh.remove({}));
- assert.writeOK(collUn.remove({}));
- assert.writeOK(collDi.remove({}));
-};
+ var resetColls = function() {
+ assert.writeOK(collSh.remove({}));
+ assert.writeOK(collUn.remove({}));
+ assert.writeOK(collDi.remove({}));
+ };
-var isDupKeyError = function(err) {
- return /dup key/.test(err + "");
-};
+ var isDupKeyError = function(err) {
+ return /dup key/.test(err + "");
+ };
-jsTest.log("Collections created.");
-st.printShardingStatus();
+ jsTest.log("Collections created.");
+ st.printShardingStatus();
-//
-// BREAK-ON-ERROR
-//
+ //
+ // BREAK-ON-ERROR
+ //
-jsTest.log("Bulk insert (no ContinueOnError) to single shard...");
+ jsTest.log("Bulk insert (no ContinueOnError) to single shard...");
-resetColls();
-var inserts = [{ukey : 0},
- {ukey : 1}];
+ resetColls();
+ var inserts = [{ukey: 0}, {ukey: 1}];
-assert.writeOK(collSh.insert(inserts));
-assert.eq(2, collSh.find().itcount());
+ assert.writeOK(collSh.insert(inserts));
+ assert.eq(2, collSh.find().itcount());
-assert.writeOK(collUn.insert(inserts));
-assert.eq(2, collUn.find().itcount());
+ assert.writeOK(collUn.insert(inserts));
+ assert.eq(2, collUn.find().itcount());
-assert.writeOK(collDi.insert(inserts));
-assert.eq(2, collDi.find().itcount());
+ assert.writeOK(collDi.insert(inserts));
+ assert.eq(2, collDi.find().itcount());
-jsTest.log("Bulk insert (no COE) with mongos error...");
+ jsTest.log("Bulk insert (no COE) with mongos error...");
-resetColls();
-var inserts = [{ukey : 0},
- {hello : "world"},
- {ukey : 1}];
+ resetColls();
+ var inserts = [{ukey: 0}, {hello: "world"}, {ukey: 1}];
-assert.writeError(collSh.insert(inserts));
-assert.eq(1, collSh.find().itcount());
+ assert.writeError(collSh.insert(inserts));
+ assert.eq(1, collSh.find().itcount());
-jsTest.log("Bulk insert (no COE) with mongod error...");
+ jsTest.log("Bulk insert (no COE) with mongod error...");
-resetColls();
-var inserts = [{ukey : 0},
- {ukey : 0},
- {ukey : 1}];
+ resetColls();
+ var inserts = [{ukey: 0}, {ukey: 0}, {ukey: 1}];
-assert.writeError(collSh.insert(inserts));
-assert.eq(1, collSh.find().itcount());
+ assert.writeError(collSh.insert(inserts));
+ assert.eq(1, collSh.find().itcount());
-assert.writeError(collUn.insert(inserts));
-assert.eq(1, collUn.find().itcount());
+ assert.writeError(collUn.insert(inserts));
+ assert.eq(1, collUn.find().itcount());
-assert.writeError(collDi.insert(inserts));
-assert.eq(1, collDi.find().itcount());
+ assert.writeError(collDi.insert(inserts));
+ assert.eq(1, collDi.find().itcount());
-jsTest.log("Bulk insert (no COE) with mongod and mongos error...");
+ jsTest.log("Bulk insert (no COE) with mongod and mongos error...");
-resetColls();
-var inserts = [{ukey : 0},
- {ukey : 0},
- {ukey : 1},
- {hello : "world"}];
+ resetColls();
+ var inserts = [{ukey: 0}, {ukey: 0}, {ukey: 1}, {hello: "world"}];
-var res = assert.writeError(collSh.insert(inserts));
-assert(isDupKeyError(res.getWriteErrorAt(0).errmsg), res.toString());
-assert.eq(1, collSh.find().itcount());
+ var res = assert.writeError(collSh.insert(inserts));
+ assert(isDupKeyError(res.getWriteErrorAt(0).errmsg), res.toString());
+ assert.eq(1, collSh.find().itcount());
-res = assert.writeError(collUn.insert(inserts));
-assert(isDupKeyError(res.getWriteErrorAt(0).errmsg), res.toString());
-assert.eq(1, collUn.find().itcount());
+ res = assert.writeError(collUn.insert(inserts));
+ assert(isDupKeyError(res.getWriteErrorAt(0).errmsg), res.toString());
+ assert.eq(1, collUn.find().itcount());
-res = assert.writeError(collDi.insert(inserts));
-assert(isDupKeyError(res.getWriteErrorAt(0).errmsg), res.toString());
-assert.eq(1, collDi.find().itcount());
+ res = assert.writeError(collDi.insert(inserts));
+ assert(isDupKeyError(res.getWriteErrorAt(0).errmsg), res.toString());
+ assert.eq(1, collDi.find().itcount());
-jsTest.log("Bulk insert (no COE) on second shard...");
+ jsTest.log("Bulk insert (no COE) on second shard...");
-resetColls();
-var inserts = [{ukey : 0},
- {ukey : -1}];
+ resetColls();
+ var inserts = [{ukey: 0}, {ukey: -1}];
-assert.writeOK(collSh.insert(inserts));
-assert.eq(2, collSh.find().itcount());
+ assert.writeOK(collSh.insert(inserts));
+ assert.eq(2, collSh.find().itcount());
-assert.writeOK(collUn.insert(inserts));
-assert.eq(2, collUn.find().itcount());
+ assert.writeOK(collUn.insert(inserts));
+ assert.eq(2, collUn.find().itcount());
-assert.writeOK(collDi.insert(inserts));
-assert.eq(2, collDi.find().itcount());
+ assert.writeOK(collDi.insert(inserts));
+ assert.eq(2, collDi.find().itcount());
-jsTest.log("Bulk insert to second shard (no COE) with mongos error...");
+ jsTest.log("Bulk insert to second shard (no COE) with mongos error...");
-resetColls();
-var inserts = [{ukey : 0},
- {ukey : 1}, // switches shards
- {ukey : -1},
- {hello : "world"}];
+ resetColls();
+ var inserts = [
+ {ukey: 0},
+ {ukey: 1}, // switches shards
+ {ukey: -1},
+ {hello: "world"}
+ ];
-assert.writeError(collSh.insert(inserts));
-assert.eq(3, collSh.find().itcount());
+ assert.writeError(collSh.insert(inserts));
+ assert.eq(3, collSh.find().itcount());
-jsTest.log("Bulk insert to second shard (no COE) with mongod error...");
+ jsTest.log("Bulk insert to second shard (no COE) with mongod error...");
-resetColls();
-var inserts = [{ukey : 0},
- {ukey : 1},
- {ukey : -1},
- {ukey : -2},
- {ukey : -2}];
+ resetColls();
+ var inserts = [{ukey: 0}, {ukey: 1}, {ukey: -1}, {ukey: -2}, {ukey: -2}];
-assert.writeError(collSh.insert(inserts));
-assert.eq(4, collSh.find().itcount());
+ assert.writeError(collSh.insert(inserts));
+ assert.eq(4, collSh.find().itcount());
-assert.writeError(collUn.insert(inserts));
-assert.eq(4, collUn.find().itcount());
+ assert.writeError(collUn.insert(inserts));
+ assert.eq(4, collUn.find().itcount());
-assert.writeError(collDi.insert(inserts));
-assert.eq(4, collDi.find().itcount());
+ assert.writeError(collDi.insert(inserts));
+ assert.eq(4, collDi.find().itcount());
-jsTest.log("Bulk insert to third shard (no COE) with mongod and mongos error...");
+ jsTest.log("Bulk insert to third shard (no COE) with mongod and mongos error...");
-resetColls();
-var inserts = [{ukey : 0},
- {ukey : 1},
- {ukey : -2},
- {ukey : -3},
- {ukey : 4},
- {ukey : 4},
- {hello : "world"}];
+ resetColls();
+ var inserts =
+ [{ukey: 0}, {ukey: 1}, {ukey: -2}, {ukey: -3}, {ukey: 4}, {ukey: 4}, {hello: "world"}];
-res = assert.writeError(collSh.insert(inserts));
-assert(isDupKeyError(res.getWriteErrorAt(0).errmsg), res.toString());
-assert.eq(5, collSh.find().itcount());
+ res = assert.writeError(collSh.insert(inserts));
+ assert(isDupKeyError(res.getWriteErrorAt(0).errmsg), res.toString());
+ assert.eq(5, collSh.find().itcount());
-res = assert.writeError(collUn.insert(inserts));
-assert(isDupKeyError(res.getWriteErrorAt(0).errmsg), res.toString());
-assert.eq(5, collUn.find().itcount());
+ res = assert.writeError(collUn.insert(inserts));
+ assert(isDupKeyError(res.getWriteErrorAt(0).errmsg), res.toString());
+ assert.eq(5, collUn.find().itcount());
-res = assert.writeError(collDi.insert(inserts));
-assert(isDupKeyError(res.getWriteErrorAt(0).errmsg), res.toString());
-assert.eq(5, collDi.find().itcount());
+ res = assert.writeError(collDi.insert(inserts));
+ assert(isDupKeyError(res.getWriteErrorAt(0).errmsg), res.toString());
+ assert.eq(5, collDi.find().itcount());
-//
-// CONTINUE-ON-ERROR
-//
+ //
+ // CONTINUE-ON-ERROR
+ //
-jsTest.log("Bulk insert (yes COE) with mongos error...");
+ jsTest.log("Bulk insert (yes COE) with mongos error...");
-resetColls();
-var inserts = [{ukey : 0},
- {hello : "world"},
- {ukey : 1}];
+ resetColls();
+ var inserts = [{ukey: 0}, {hello: "world"}, {ukey: 1}];
-assert.writeError(collSh.insert(inserts, 1)); // COE
-assert.eq(2, collSh.find().itcount());
+ assert.writeError(collSh.insert(inserts, 1)); // COE
+ assert.eq(2, collSh.find().itcount());
-jsTest.log("Bulk insert (yes COE) with mongod error...");
-
-resetColls();
-var inserts = [{ukey : 0},
- {ukey : 0},
- {ukey : 1}];
+ jsTest.log("Bulk insert (yes COE) with mongod error...");
-assert.writeError(collSh.insert(inserts, 1));
-assert.eq(2, collSh.find().itcount());
+ resetColls();
+ var inserts = [{ukey: 0}, {ukey: 0}, {ukey: 1}];
-assert.writeError(collUn.insert(inserts, 1));
-assert.eq(2, collUn.find().itcount());
-
-assert.writeError(collDi.insert(inserts, 1));
-assert.eq(2, collDi.find().itcount());
-
-jsTest
- .log("Bulk insert to third shard (yes COE) with mongod and mongos error...");
-
-resetColls();
-var inserts = [{ukey : 0},
- {ukey : 1},
- {ukey : -2},
- {ukey : -3},
- {ukey : 4},
- {ukey : 4},
- {hello : "world"}];
-
-// Last error here is mongos error
-res = assert.writeError(collSh.insert(inserts, 1));
-assert(!isDupKeyError(res.getWriteErrorAt(res.getWriteErrorCount() - 1).errmsg), res.toString());
-assert.eq(5, collSh.find().itcount());
-
-// Extra insert goes through, since mongos error "doesn't count"
-res = assert.writeError(collUn.insert(inserts, 1));
-assert.eq(6, res.nInserted, res.toString());
-assert.eq(6, collUn.find().itcount());
-
-res = assert.writeError(collDi.insert(inserts, 1));
-assert.eq(6, res.nInserted, res.toString());
-assert.eq(6, collDi.find().itcount());
-
-jsTest.log("Bulk insert to third shard (yes COE) with mongod and mongos error "
- + "(mongos error first)...");
-
-resetColls();
-var inserts = [{ukey : 0},
- {ukey : 1},
- {ukey : -2},
- {ukey : -3},
- {hello : "world"},
- {ukey : 4},
- {ukey : 4}];
-
-// Last error here is mongos error
-res = assert.writeError(collSh.insert(inserts, 1));
-assert(isDupKeyError(res.getWriteErrorAt(res.getWriteErrorCount() - 1).errmsg), res.toString());
-assert.eq(5, collSh.find().itcount());
-
-// Extra insert goes through, since mongos error "doesn't count"
-res = assert.writeError(collUn.insert(inserts, 1));
-assert(isDupKeyError(res.getWriteErrorAt(res.getWriteErrorCount() - 1).errmsg), res.toString());
-assert.eq(6, collUn.find().itcount());
-
-res = assert.writeError(collDi.insert(inserts, 1));
-assert(isDupKeyError(res.getWriteErrorAt(0).errmsg), res.toString());
-assert.eq(6, collDi.find().itcount());
-
-//
-// Test when WBL has to be invoked mid-insert
-//
-
-jsTest.log("Testing bulk insert (no COE) with WBL...");
-resetColls();
-
-var inserts = [{ukey : 1},
- {ukey : -1}];
-
-var staleCollSh = staleMongos.getCollection(collSh + "");
-assert.eq(null, staleCollSh.findOne(), 'Collections should be empty');
-
-assert.commandWorked(admin.runCommand({ moveChunk : collSh + "",
- find : {ukey : 0},
- to : shards[1]._id,
- _waitForDelete: true }));
-assert.commandWorked(admin.runCommand({ moveChunk : collSh + "",
- find : {ukey : 0},
- to : shards[0]._id,
- _waitForDelete: true}));
-
-assert.writeOK(staleCollSh.insert(inserts));
-
-//
-// Test when the objects to be bulk inserted are 10MB, and so can't be inserted
-// together with WBL.
-//
-
-jsTest.log("Testing bulk insert (no COE) with WBL and large objects...");
-resetColls();
-
-var data10MB = 'x'.repeat(10 * 1024 * 1024);
-var inserts = [{ukey : 1, data : data10MB},
- {ukey : 2, data : data10MB},
- {ukey : -1, data : data10MB},
- {ukey : -2, data : data10MB}];
-
-staleCollSh = staleMongos.getCollection(collSh + "");
-assert.eq(null, staleCollSh.findOne(), 'Collections should be empty');
-
-assert.commandWorked(admin.runCommand({ moveChunk : collSh + "",
- find : {ukey : 0},
- to : shards[1]._id,
- _waitForDelete: true }));
-assert.commandWorked(admin.runCommand({ moveChunk : collSh + "",
- find : {ukey : 0},
- to : shards[0]._id,
- _waitForDelete: true }));
-
-assert.writeOK(staleCollSh.insert(inserts));
-
-st.stop();
+ assert.writeError(collSh.insert(inserts, 1));
+ assert.eq(2, collSh.find().itcount());
+
+ assert.writeError(collUn.insert(inserts, 1));
+ assert.eq(2, collUn.find().itcount());
+
+ assert.writeError(collDi.insert(inserts, 1));
+ assert.eq(2, collDi.find().itcount());
+
+ jsTest.log("Bulk insert to third shard (yes COE) with mongod and mongos error...");
+
+ resetColls();
+ var inserts =
+ [{ukey: 0}, {ukey: 1}, {ukey: -2}, {ukey: -3}, {ukey: 4}, {ukey: 4}, {hello: "world"}];
+
+ // Last error here is mongos error
+ res = assert.writeError(collSh.insert(inserts, 1));
+ assert(!isDupKeyError(res.getWriteErrorAt(res.getWriteErrorCount() - 1).errmsg),
+ res.toString());
+ assert.eq(5, collSh.find().itcount());
+
+ // Extra insert goes through, since mongos error "doesn't count"
+ res = assert.writeError(collUn.insert(inserts, 1));
+ assert.eq(6, res.nInserted, res.toString());
+ assert.eq(6, collUn.find().itcount());
+
+ res = assert.writeError(collDi.insert(inserts, 1));
+ assert.eq(6, res.nInserted, res.toString());
+ assert.eq(6, collDi.find().itcount());
+
+ jsTest.log("Bulk insert to third shard (yes COE) with mongod and mongos error " +
+ "(mongos error first)...");
+
+ resetColls();
+ var inserts =
+ [{ukey: 0}, {ukey: 1}, {ukey: -2}, {ukey: -3}, {hello: "world"}, {ukey: 4}, {ukey: 4}];
+
+ // Last error here is mongos error
+ res = assert.writeError(collSh.insert(inserts, 1));
+ assert(isDupKeyError(res.getWriteErrorAt(res.getWriteErrorCount() - 1).errmsg), res.toString());
+ assert.eq(5, collSh.find().itcount());
+
+ // Extra insert goes through, since mongos error "doesn't count"
+ res = assert.writeError(collUn.insert(inserts, 1));
+ assert(isDupKeyError(res.getWriteErrorAt(res.getWriteErrorCount() - 1).errmsg), res.toString());
+ assert.eq(6, collUn.find().itcount());
+
+ res = assert.writeError(collDi.insert(inserts, 1));
+ assert(isDupKeyError(res.getWriteErrorAt(0).errmsg), res.toString());
+ assert.eq(6, collDi.find().itcount());
+
+ //
+ // Test when WBL has to be invoked mid-insert
+ //
+
+ jsTest.log("Testing bulk insert (no COE) with WBL...");
+ resetColls();
+
+ var inserts = [{ukey: 1}, {ukey: -1}];
+
+ var staleCollSh = staleMongos.getCollection(collSh + "");
+ assert.eq(null, staleCollSh.findOne(), 'Collections should be empty');
+
+ assert.commandWorked(admin.runCommand(
+ {moveChunk: collSh + "", find: {ukey: 0}, to: shards[1]._id, _waitForDelete: true}));
+ assert.commandWorked(admin.runCommand(
+ {moveChunk: collSh + "", find: {ukey: 0}, to: shards[0]._id, _waitForDelete: true}));
+
+ assert.writeOK(staleCollSh.insert(inserts));
+
+ //
+ // Test when the objects to be bulk inserted are 10MB, and so can't be inserted
+ // together with WBL.
+ //
+
+ jsTest.log("Testing bulk insert (no COE) with WBL and large objects...");
+ resetColls();
+
+ var data10MB = 'x'.repeat(10 * 1024 * 1024);
+ var inserts = [
+ {ukey: 1, data: data10MB},
+ {ukey: 2, data: data10MB},
+ {ukey: -1, data: data10MB},
+ {ukey: -2, data: data10MB}
+ ];
+
+ staleCollSh = staleMongos.getCollection(collSh + "");
+ assert.eq(null, staleCollSh.findOne(), 'Collections should be empty');
+
+ assert.commandWorked(admin.runCommand(
+ {moveChunk: collSh + "", find: {ukey: 0}, to: shards[1]._id, _waitForDelete: true}));
+ assert.commandWorked(admin.runCommand(
+ {moveChunk: collSh + "", find: {ukey: 0}, to: shards[0]._id, _waitForDelete: true}));
+
+ assert.writeOK(staleCollSh.insert(inserts));
+
+ st.stop();
})();
diff --git a/jstests/sharding/bulk_shard_insert.js b/jstests/sharding/bulk_shard_insert.js
index 7d42d8b41a2..943fe270ba0 100644
--- a/jstests/sharding/bulk_shard_insert.js
+++ b/jstests/sharding/bulk_shard_insert.js
@@ -1,81 +1,81 @@
// Test bulk inserts with sharding
(function() {
-// Setup randomized test
-var seed = new Date().getTime();
-// seed = 0
+ // Setup randomized test
+ var seed = new Date().getTime();
+ // seed = 0
-Random.srand( seed );
-print( "Seeded with " + seed );
+ Random.srand(seed);
+ print("Seeded with " + seed);
-var st = new ShardingTest({ name : jsTestName(), shards : 4, chunkSize: 1 });
+ var st = new ShardingTest({name: jsTestName(), shards: 4, chunkSize: 1});
-// Setup sharded collection
-var mongos = st.s0;
-var db = mongos.getDB( jsTestName() );
-var coll = db.coll;
-st.shardColl( coll, { _id : 1 }, false );
+ // Setup sharded collection
+ var mongos = st.s0;
+ var db = mongos.getDB(jsTestName());
+ var coll = db.coll;
+ st.shardColl(coll, {_id: 1}, false);
-// Insert lots of bulk documents
-var numDocs = 1000000;
+ // Insert lots of bulk documents
+ var numDocs = 1000000;
-var bulkSize = Math.floor( Random.rand() * 1000 ) + 2;
-bulkSize = 4000;
-var docSize = 128; /* bytes */
-print( "\n\n\nBulk size is " + bulkSize );
+ var bulkSize = Math.floor(Random.rand() * 1000) + 2;
+ bulkSize = 4000;
+ var docSize = 128; /* bytes */
+ print("\n\n\nBulk size is " + bulkSize);
-var data = "x";
-while( Object.bsonsize({ x : data }) < docSize ){
- data += data;
-}
+ var data = "x";
+ while (Object.bsonsize({x: data}) < docSize) {
+ data += data;
+ }
-print( "\n\n\nDocument size is " + Object.bsonsize({ x : data }) );
+ print("\n\n\nDocument size is " + Object.bsonsize({x: data}));
-var docsInserted = 0;
-var balancerOn = false;
+ var docsInserted = 0;
+ var balancerOn = false;
-while (docsInserted < numDocs) {
- var currBulkSize = ( numDocs - docsInserted > bulkSize ) ? bulkSize : ( numDocs - docsInserted );
-
- var bulk = [];
- for( var i = 0; i < currBulkSize; i++ ){
- bulk.push({hi: "there", at: docsInserted, i: i, x: data});
- }
-
- assert.writeOK(coll.insert( bulk ));
-
- if( Math.floor( docsInserted / 10000 ) != Math.floor( ( docsInserted + currBulkSize ) / 10000 ) ){
- print( "Inserted " + (docsInserted + currBulkSize) + " documents." );
- st.printShardingStatus();
- }
-
- docsInserted += currBulkSize;
-
- if( docsInserted > numDocs / 2 && ! balancerOn ){
- print( "Turning on balancer after half documents inserted." );
- st.startBalancer();
- balancerOn = true;
- }
-}
+ while (docsInserted < numDocs) {
+ var currBulkSize =
+ (numDocs - docsInserted > bulkSize) ? bulkSize : (numDocs - docsInserted);
+
+ var bulk = [];
+ for (var i = 0; i < currBulkSize; i++) {
+ bulk.push({hi: "there", at: docsInserted, i: i, x: data});
+ }
-// Check we inserted all the documents
-st.printShardingStatus();
+ assert.writeOK(coll.insert(bulk));
+
+ if (Math.floor(docsInserted / 10000) != Math.floor((docsInserted + currBulkSize) / 10000)) {
+ print("Inserted " + (docsInserted + currBulkSize) + " documents.");
+ st.printShardingStatus();
+ }
+
+ docsInserted += currBulkSize;
+
+ if (docsInserted > numDocs / 2 && !balancerOn) {
+ print("Turning on balancer after half documents inserted.");
+ st.startBalancer();
+ balancerOn = true;
+ }
+ }
-var count = coll.find().count();
-var itcount = count; //coll.find().itcount()
+ // Check we inserted all the documents
+ st.printShardingStatus();
-print("Inserted " + docsInserted + " count : " + count + " itcount : " + itcount);
+ var count = coll.find().count();
+ var itcount = count; // coll.find().itcount()
-st.startBalancer();
+ print("Inserted " + docsInserted + " count : " + count + " itcount : " + itcount);
-var count = coll.find().count();
-var itcount = coll.find().itcount();
+ st.startBalancer();
-print( "Inserted " + docsInserted + " count : " + count + " itcount : " + itcount );
+ var count = coll.find().count();
+ var itcount = coll.find().itcount();
+ print("Inserted " + docsInserted + " count : " + count + " itcount : " + itcount);
-// SERVER-3645
-// assert.eq( docsInserted, count )
-assert.eq(docsInserted, itcount);
+ // SERVER-3645
+ // assert.eq( docsInserted, count )
+ assert.eq(docsInserted, itcount);
})();
diff --git a/jstests/sharding/cleanup_orphaned.js b/jstests/sharding/cleanup_orphaned.js
index bbe383b94ce..a63991f7a23 100644
--- a/jstests/sharding/cleanup_orphaned.js
+++ b/jstests/sharding/cleanup_orphaned.js
@@ -9,7 +9,9 @@ testCleanupOrphaned({
shardKey: {_id: 1},
keyGen: function() {
var ids = [];
- for (var i = -50; i < 50; i++) { ids.push({_id: i}); }
+ for (var i = -50; i < 50; i++) {
+ ids.push({_id: i});
+ }
return ids;
}
});
diff --git a/jstests/sharding/cleanup_orphaned_auth.js b/jstests/sharding/cleanup_orphaned_auth.js
index e1364f4ad12..0b50742ad70 100644
--- a/jstests/sharding/cleanup_orphaned_auth.js
+++ b/jstests/sharding/cleanup_orphaned_auth.js
@@ -2,58 +2,55 @@
// Tests of cleanupOrphaned command permissions.
//
-(function() {
-"use strict";
+(function() {
+ "use strict";
-function assertUnauthorized(res, msg){
- if (assert._debug && msg) print("in assert for: " + msg);
+ function assertUnauthorized(res, msg) {
+ if (assert._debug && msg)
+ print("in assert for: " + msg);
- if (res.ok == 0 && res.errmsg.startsWith('not authorized'))
- return;
+ if (res.ok == 0 && res.errmsg.startsWith('not authorized'))
+ return;
- var finalMsg = "command worked when it should have been unauthorized: " + tojson(res);
- if (msg) { finalMsg += " : " + msg; }
- doassert(finalMsg);
-}
+ var finalMsg = "command worked when it should have been unauthorized: " + tojson(res);
+ if (msg) {
+ finalMsg += " : " + msg;
+ }
+ doassert(finalMsg);
+ }
-var st = new ShardingTest({
- auth: true,
- keyFile: 'jstests/libs/key1',
- other: {useHostname: false}
-});
+ var st =
+ new ShardingTest({auth: true, keyFile: 'jstests/libs/key1', other: {useHostname: false}});
-var shardAdmin = st.shard0.getDB('admin');
-shardAdmin.createUser({user: 'admin', pwd: 'x', roles: ['clusterAdmin', 'userAdminAnyDatabase']});
-shardAdmin.auth('admin', 'x');
+ var shardAdmin = st.shard0.getDB('admin');
+ shardAdmin.createUser(
+ {user: 'admin', pwd: 'x', roles: ['clusterAdmin', 'userAdminAnyDatabase']});
+ shardAdmin.auth('admin', 'x');
-var mongos = st.s0;
-var mongosAdmin = mongos.getDB('admin');
-var coll = mongos.getCollection('foo.bar');
+ var mongos = st.s0;
+ var mongosAdmin = mongos.getDB('admin');
+ var coll = mongos.getCollection('foo.bar');
-mongosAdmin.createUser({user: 'admin', pwd: 'x', roles: ['clusterAdmin', 'userAdminAnyDatabase']});
-mongosAdmin.auth('admin', 'x');
+ mongosAdmin.createUser(
+ {user: 'admin', pwd: 'x', roles: ['clusterAdmin', 'userAdminAnyDatabase']});
+ mongosAdmin.auth('admin', 'x');
-assert.commandWorked(mongosAdmin.runCommand({
- enableSharding: coll.getDB().getName()
-}));
+ assert.commandWorked(mongosAdmin.runCommand({enableSharding: coll.getDB().getName()}));
-assert.commandWorked(mongosAdmin.runCommand({
- shardCollection: coll.getFullName(),
- key: {_id: 'hashed'}
-}));
+ assert.commandWorked(
+ mongosAdmin.runCommand({shardCollection: coll.getFullName(), key: {_id: 'hashed'}}));
+ // cleanupOrphaned requires auth as admin user.
+ assert.commandWorked(shardAdmin.logout());
+ assertUnauthorized(shardAdmin.runCommand({cleanupOrphaned: 'foo.bar'}));
-// cleanupOrphaned requires auth as admin user.
-assert.commandWorked(shardAdmin.logout());
-assertUnauthorized(shardAdmin.runCommand({cleanupOrphaned: 'foo.bar'}));
+ var fooDB = st.shard0.getDB('foo');
+ shardAdmin.auth('admin', 'x');
+ fooDB.createUser({user: 'user', pwd: 'x', roles: ['readWrite', 'dbAdmin']});
+ shardAdmin.logout();
+ fooDB.auth('user', 'x');
+ assertUnauthorized(shardAdmin.runCommand({cleanupOrphaned: 'foo.bar'}));
-var fooDB = st.shard0.getDB('foo');
-shardAdmin.auth('admin', 'x');
-fooDB.createUser({user:'user', pwd:'x', roles:['readWrite', 'dbAdmin']});
-shardAdmin.logout();
-fooDB.auth('user', 'x');
-assertUnauthorized(shardAdmin.runCommand({cleanupOrphaned: 'foo.bar'}));
-
-st.stop();
+ st.stop();
})();
diff --git a/jstests/sharding/cleanup_orphaned_basic.js b/jstests/sharding/cleanup_orphaned_basic.js
index 387863ee75d..3ed9015941b 100644
--- a/jstests/sharding/cleanup_orphaned_basic.js
+++ b/jstests/sharding/cleanup_orphaned_basic.js
@@ -3,142 +3,112 @@
// command fail.
//
-(function() {
-"use strict";
-
-/*****************************************************************************
- * Unsharded mongod.
- ****************************************************************************/
-
-// cleanupOrphaned fails against unsharded mongod.
-var mongod = MongoRunner.runMongod();
-assert.commandFailed(mongod.getDB('admin').runCommand({cleanupOrphaned: 'foo.bar'}));
-
-/*****************************************************************************
- * Bad invocations of cleanupOrphaned command.
- ****************************************************************************/
-
-var st = new ShardingTest({
- other: {
- rs: true,
- rsOptions: {nodes: 2}
- }
-});
-
-var mongos = st.s0;
-var shards = mongos.getCollection('config.shards').find().toArray();
-var mongosAdmin = mongos.getDB('admin');
-var dbName = 'foo';
-var collectionName = 'bar';
-var ns = dbName + '.' + collectionName;
-var coll = mongos.getCollection(ns);
-
-// cleanupOrphaned fails against mongos ('no such command'): it must be run
-// on mongod.
-assert.commandFailed(mongosAdmin.runCommand({cleanupOrphaned: ns}));
-
-// cleanupOrphaned must be run on admin DB.
-var shardFooDB = st.shard0.getDB(dbName);
-assert.commandFailed(shardFooDB.runCommand({cleanupOrphaned: ns}));
-
-// Must be run on primary.
-var secondaryAdmin = st.rs0.getSecondary().getDB('admin');
-var response = secondaryAdmin.runCommand({cleanupOrphaned: ns});
-print('cleanupOrphaned on secondary:');
-printjson(response);
-assert.commandFailed(response);
-
-var shardAdmin = st.shard0.getDB('admin');
-var badNS = ' \\/."*<>:|?';
-assert.commandFailed(shardAdmin.runCommand({cleanupOrphaned: badNS}));
-
-// cleanupOrphaned works on sharded collection.
-assert.commandWorked(mongosAdmin.runCommand({
- enableSharding: coll.getDB().getName()
-}));
-
-st.ensurePrimaryShard(coll.getDB().getName(), shards[0]._id);
-
-assert.commandWorked(mongosAdmin.runCommand({
- shardCollection: ns,
- key: {_id: 1}
-}));
-
-assert.commandWorked(shardAdmin.runCommand({cleanupOrphaned: ns}));
-
-/*****************************************************************************
- * Empty shard.
- ****************************************************************************/
-
-// Ping shard[1] so it will be aware that it is sharded. Otherwise cleanupOrphaned
-// may fail.
-assert.commandWorked(mongosAdmin.runCommand({
- moveChunk: coll.getFullName(),
- find: {_id: 1},
- to: shards[1]._id
-}));
-
-assert.commandWorked(mongosAdmin.runCommand({
- moveChunk: coll.getFullName(),
- find: {_id: 1},
- to: shards[0]._id
-}));
-
-// Collection's home is shard0, there are no chunks assigned to shard1.
-st.shard1.getCollection(ns).insert({});
-assert.eq(null, st.shard1.getDB(dbName).getLastError());
-assert.eq(1, st.shard1.getCollection(ns).count());
-response = st.shard1.getDB('admin').runCommand({cleanupOrphaned: ns});
-assert.commandWorked(response);
-assert.eq({_id: {$maxKey:1}}, response.stoppedAtKey);
-assert.eq(
- 0, st.shard1.getCollection(ns).count(),
- "cleanupOrphaned didn't delete orphan on empty shard.");
-
-/*****************************************************************************
- * Bad startingFromKeys.
- ****************************************************************************/
-
-// startingFromKey of MaxKey.
-response = shardAdmin.runCommand({
- cleanupOrphaned: ns,
- startingFromKey: {_id: MaxKey}
-});
-assert.commandWorked(response);
-assert.eq(null, response.stoppedAtKey);
-
-// startingFromKey doesn't match number of fields in shard key.
-assert.commandFailed(shardAdmin.runCommand({
- cleanupOrphaned: ns,
- startingFromKey: {someKey: 'someValue', someOtherKey: 1}
-}));
-
-// startingFromKey matches number of fields in shard key but not field names.
-assert.commandFailed(shardAdmin.runCommand({
- cleanupOrphaned: ns,
- startingFromKey: {someKey: 'someValue'}
-}));
-
-var coll2 = mongos.getCollection('foo.baz');
-
-assert.commandWorked(mongosAdmin.runCommand({
- shardCollection: coll2.getFullName(),
- key: {a:1, b:1}
-}));
-
-
-// startingFromKey doesn't match number of fields in shard key.
-assert.commandFailed(shardAdmin.runCommand({
- cleanupOrphaned: coll2.getFullName(),
- startingFromKey: {someKey: 'someValue'}
-}));
-
-// startingFromKey matches number of fields in shard key but not field names.
-assert.commandFailed(shardAdmin.runCommand({
- cleanupOrphaned: coll2.getFullName(),
- startingFromKey: {a: 'someValue', c: 1}
-}));
-
-st.stop();
+(function() {
+ "use strict";
+
+ /*****************************************************************************
+ * Unsharded mongod.
+ ****************************************************************************/
+
+ // cleanupOrphaned fails against unsharded mongod.
+ var mongod = MongoRunner.runMongod();
+ assert.commandFailed(mongod.getDB('admin').runCommand({cleanupOrphaned: 'foo.bar'}));
+
+ /*****************************************************************************
+ * Bad invocations of cleanupOrphaned command.
+ ****************************************************************************/
+
+ var st = new ShardingTest({other: {rs: true, rsOptions: {nodes: 2}}});
+
+ var mongos = st.s0;
+ var shards = mongos.getCollection('config.shards').find().toArray();
+ var mongosAdmin = mongos.getDB('admin');
+ var dbName = 'foo';
+ var collectionName = 'bar';
+ var ns = dbName + '.' + collectionName;
+ var coll = mongos.getCollection(ns);
+
+ // cleanupOrphaned fails against mongos ('no such command'): it must be run
+ // on mongod.
+ assert.commandFailed(mongosAdmin.runCommand({cleanupOrphaned: ns}));
+
+ // cleanupOrphaned must be run on admin DB.
+ var shardFooDB = st.shard0.getDB(dbName);
+ assert.commandFailed(shardFooDB.runCommand({cleanupOrphaned: ns}));
+
+ // Must be run on primary.
+ var secondaryAdmin = st.rs0.getSecondary().getDB('admin');
+ var response = secondaryAdmin.runCommand({cleanupOrphaned: ns});
+ print('cleanupOrphaned on secondary:');
+ printjson(response);
+ assert.commandFailed(response);
+
+ var shardAdmin = st.shard0.getDB('admin');
+ var badNS = ' \\/."*<>:|?';
+ assert.commandFailed(shardAdmin.runCommand({cleanupOrphaned: badNS}));
+
+ // cleanupOrphaned works on sharded collection.
+ assert.commandWorked(mongosAdmin.runCommand({enableSharding: coll.getDB().getName()}));
+
+ st.ensurePrimaryShard(coll.getDB().getName(), shards[0]._id);
+
+ assert.commandWorked(mongosAdmin.runCommand({shardCollection: ns, key: {_id: 1}}));
+
+ assert.commandWorked(shardAdmin.runCommand({cleanupOrphaned: ns}));
+
+ /*****************************************************************************
+ * Empty shard.
+ ****************************************************************************/
+
+ // Ping shard[1] so it will be aware that it is sharded. Otherwise cleanupOrphaned
+ // may fail.
+ assert.commandWorked(mongosAdmin.runCommand(
+ {moveChunk: coll.getFullName(), find: {_id: 1}, to: shards[1]._id}));
+
+ assert.commandWorked(mongosAdmin.runCommand(
+ {moveChunk: coll.getFullName(), find: {_id: 1}, to: shards[0]._id}));
+
+ // Collection's home is shard0, there are no chunks assigned to shard1.
+ st.shard1.getCollection(ns).insert({});
+ assert.eq(null, st.shard1.getDB(dbName).getLastError());
+ assert.eq(1, st.shard1.getCollection(ns).count());
+ response = st.shard1.getDB('admin').runCommand({cleanupOrphaned: ns});
+ assert.commandWorked(response);
+ assert.eq({_id: {$maxKey: 1}}, response.stoppedAtKey);
+ assert.eq(0,
+ st.shard1.getCollection(ns).count(),
+ "cleanupOrphaned didn't delete orphan on empty shard.");
+
+ /*****************************************************************************
+ * Bad startingFromKeys.
+ ****************************************************************************/
+
+ // startingFromKey of MaxKey.
+ response = shardAdmin.runCommand({cleanupOrphaned: ns, startingFromKey: {_id: MaxKey}});
+ assert.commandWorked(response);
+ assert.eq(null, response.stoppedAtKey);
+
+ // startingFromKey doesn't match number of fields in shard key.
+ assert.commandFailed(shardAdmin.runCommand(
+ {cleanupOrphaned: ns, startingFromKey: {someKey: 'someValue', someOtherKey: 1}}));
+
+ // startingFromKey matches number of fields in shard key but not field names.
+ assert.commandFailed(
+ shardAdmin.runCommand({cleanupOrphaned: ns, startingFromKey: {someKey: 'someValue'}}));
+
+ var coll2 = mongos.getCollection('foo.baz');
+
+ assert.commandWorked(
+ mongosAdmin.runCommand({shardCollection: coll2.getFullName(), key: {a: 1, b: 1}}));
+
+ // startingFromKey doesn't match number of fields in shard key.
+ assert.commandFailed(shardAdmin.runCommand(
+ {cleanupOrphaned: coll2.getFullName(), startingFromKey: {someKey: 'someValue'}}));
+
+ // startingFromKey matches number of fields in shard key but not field names.
+ assert.commandFailed(shardAdmin.runCommand(
+ {cleanupOrphaned: coll2.getFullName(), startingFromKey: {a: 'someValue', c: 1}}));
+
+ st.stop();
})();
diff --git a/jstests/sharding/cleanup_orphaned_cmd_during_movechunk.js b/jstests/sharding/cleanup_orphaned_cmd_during_movechunk.js
index 1948a4bed5c..db8b6d22010 100644
--- a/jstests/sharding/cleanup_orphaned_cmd_during_movechunk.js
+++ b/jstests/sharding/cleanup_orphaned_cmd_during_movechunk.js
@@ -1,162 +1,150 @@
//
// Tests cleanupOrphaned concurrent with moveChunk.
-// Inserts orphan documents to the donor and recipient shards during the moveChunk and
+// Inserts orphan documents to the donor and recipient shards during the moveChunk and
// verifies that cleanupOrphaned removes orphans.
//
load('./jstests/libs/chunk_manipulation_util.js');
load('./jstests/libs/cleanup_orphaned_util.js');
-(function() {
-"use strict";
-
-var staticMongod = MongoRunner.runMongod({}); // For startParallelOps.
-var st = new ShardingTest({shards: 2, other: { separateConfig: true }});
-
-var mongos = st.s0,
- admin = mongos.getDB('admin'),
- shards = mongos.getCollection('config.shards').find().toArray(),
- dbName = 'foo',
- ns = dbName + '.bar',
- coll = mongos.getCollection(ns),
- donor = st.shard0,
- recipient = st.shard1,
- donorColl = donor.getCollection(ns),
- recipientColl = st.shard1.getCollection(ns);
-
-// Three chunks of 10 documents each, with ids -20, -18, -16, ..., 38.
-// Donor: [minKey, 0) [0, 20)
-// Recipient: [20, maxKey)
-assert.commandWorked( admin.runCommand({enableSharding: dbName}) );
-printjson( admin.runCommand({movePrimary: dbName, to: shards[0]._id}) );
-assert.commandWorked( admin.runCommand({shardCollection: ns, key: {_id: 1}}) );
-assert.commandWorked( admin.runCommand({split: ns, middle: {_id: 0}}) );
-assert.commandWorked( admin.runCommand({split: ns, middle: {_id: 20}}) );
-assert.commandWorked( admin.runCommand({moveChunk: ns,
- find: {_id: 20},
- to: shards[1]._id,
- _waitForDelete: true}) );
-
-jsTest.log('Inserting 40 docs into shard 0....');
-for (var i = -20; i < 20; i += 2) coll.insert({_id: i});
-assert.eq(null, coll.getDB().getLastError());
-assert.eq(20, donorColl.count());
-
-jsTest.log('Inserting 25 docs into shard 1....');
-for (i = 20; i < 40; i += 2) coll.insert({_id: i});
-assert.eq(null, coll.getDB().getLastError());
-assert.eq(10, recipientColl.count());
-
-//
-// Start a moveChunk in the background. Move chunk [0, 20), which has 10 docs,
-// from shard 0 to shard 1. Pause it at some points in the donor's and
-// recipient's work flows, and test cleanupOrphaned on shard 0 and shard 1.
-//
-
-jsTest.log('setting failpoint startedMoveChunk');
-pauseMoveChunkAtStep(donor, moveChunkStepNames.startedMoveChunk);
-pauseMigrateAtStep(recipient, migrateStepNames.cloned);
-var joinMoveChunk = moveChunkParallel(
- staticMongod,
- st.s0.host,
- {_id: 0},
- null,
- coll.getFullName(),
- shards[1]._id);
-
-waitForMoveChunkStep(donor, moveChunkStepNames.startedMoveChunk);
-waitForMigrateStep(recipient, migrateStepNames.cloned);
-// Recipient has run _recvChunkStart and begun its migration thread; docs have
-// been cloned and chunk [0, 20) is noted as 'pending' on recipient.
-
-// Donor: [minKey, 0) [0, 20)
-// Recipient (pending): [0, 20)
-// Recipient: [20, maxKey)
-
-// Create orphans. I'll show an orphaned doc on donor with _id 26 like {26}:
-//
-// Donor: [minKey, 0) [0, 20) {26}
-// Recipient (pending): [0, 20)
-// Recipient: {-1} [20, maxKey)
-donorColl.insert([{_id: 26}]);
-assert.eq(null, donorColl.getDB().getLastError());
-assert.eq(21, donorColl.count());
-recipientColl.insert([{_id: -1}]);
-assert.eq(null, recipientColl.getDB().getLastError());
-assert.eq(21, recipientColl.count());
-
-cleanupOrphaned(donor, ns, 2);
-assert.eq(20, donorColl.count());
-cleanupOrphaned(recipient, ns, 2);
-assert.eq(20, recipientColl.count());
-
-jsTest.log('Inserting document on donor side');
-// Inserted a new document (not an orphan) with id 19, which belongs in the
-// [0, 20) chunk.
-donorColl.insert({_id: 19});
-assert.eq(null, coll.getDB().getLastError());
-assert.eq(21, donorColl.count());
-
-// Recipient transfers this modification.
-jsTest.log('Let migrate proceed to transferredMods');
-pauseMigrateAtStep(recipient, migrateStepNames.catchup);
-unpauseMigrateAtStep(recipient, migrateStepNames.cloned);
-waitForMigrateStep(recipient, migrateStepNames.catchup);
-jsTest.log('Done letting migrate proceed to transferredMods');
-
-assert.eq(
- 21, recipientColl.count(),
- "Recipient didn't transfer inserted document.");
-
-cleanupOrphaned(donor, ns, 2);
-assert.eq(21, donorColl.count());
-cleanupOrphaned(recipient, ns, 2);
-assert.eq(21, recipientColl.count());
-
-// Create orphans.
-donorColl.insert([{_id: 26}]);
-assert.eq(null, donorColl.getDB().getLastError());
-assert.eq(22, donorColl.count());
-recipientColl.insert([{_id: -1}]);
-assert.eq(null, recipientColl.getDB().getLastError());
-assert.eq(22, recipientColl.count());
-
-cleanupOrphaned(donor, ns, 2);
-assert.eq(21, donorColl.count());
-cleanupOrphaned(recipient, ns, 2);
-assert.eq(21, recipientColl.count());
-
-// Recipient has been waiting for donor to call _recvChunkCommit.
-pauseMoveChunkAtStep(donor, moveChunkStepNames.committed);
-unpauseMoveChunkAtStep(donor, moveChunkStepNames.startedMoveChunk);
-proceedToMigrateStep(recipient, migrateStepNames.steady);
-proceedToMigrateStep(recipient, migrateStepNames.done);
-
-// Create orphans.
-donorColl.insert([{_id: 26}]);
-assert.eq(null, donorColl.getDB().getLastError());
-assert.eq(22, donorColl.count());
-recipientColl.insert([{_id: -1}]);
-assert.eq(null, recipientColl.getDB().getLastError());
-assert.eq(22, recipientColl.count());
-
-// cleanupOrphaned should still fail on donor, but should work on the recipient
-cleanupOrphaned(donor, ns, 2);
-assert.eq(10, donorColl.count());
-cleanupOrphaned(recipient, ns, 2);
-assert.eq(21, recipientColl.count());
-
-// Let migration thread complete.
-unpauseMigrateAtStep(recipient, migrateStepNames.done);
-unpauseMoveChunkAtStep(donor, moveChunkStepNames.committed);
-joinMoveChunk();
-
-// Donor has finished post-move delete.
-cleanupOrphaned(donor, ns, 2); // this is necessary for the count to not be 11
-assert.eq(10, donorColl.count());
-assert.eq(21, recipientColl.count());
-assert.eq(31, coll.count());
-
-st.stop();
+(function() {
+ "use strict";
+
+ var staticMongod = MongoRunner.runMongod({}); // For startParallelOps.
+ var st = new ShardingTest({shards: 2, other: {separateConfig: true}});
+
+ var mongos = st.s0, admin = mongos.getDB('admin'),
+ shards = mongos.getCollection('config.shards').find().toArray(), dbName = 'foo',
+ ns = dbName + '.bar', coll = mongos.getCollection(ns), donor = st.shard0,
+ recipient = st.shard1, donorColl = donor.getCollection(ns),
+ recipientColl = st.shard1.getCollection(ns);
+
+ // Three chunks of 10 documents each, with ids -20, -18, -16, ..., 38.
+ // Donor: [minKey, 0) [0, 20)
+ // Recipient: [20, maxKey)
+ assert.commandWorked(admin.runCommand({enableSharding: dbName}));
+ printjson(admin.runCommand({movePrimary: dbName, to: shards[0]._id}));
+ assert.commandWorked(admin.runCommand({shardCollection: ns, key: {_id: 1}}));
+ assert.commandWorked(admin.runCommand({split: ns, middle: {_id: 0}}));
+ assert.commandWorked(admin.runCommand({split: ns, middle: {_id: 20}}));
+ assert.commandWorked(admin.runCommand(
+ {moveChunk: ns, find: {_id: 20}, to: shards[1]._id, _waitForDelete: true}));
+
+ jsTest.log('Inserting 40 docs into shard 0....');
+ for (var i = -20; i < 20; i += 2)
+ coll.insert({_id: i});
+ assert.eq(null, coll.getDB().getLastError());
+ assert.eq(20, donorColl.count());
+
+ jsTest.log('Inserting 25 docs into shard 1....');
+ for (i = 20; i < 40; i += 2)
+ coll.insert({_id: i});
+ assert.eq(null, coll.getDB().getLastError());
+ assert.eq(10, recipientColl.count());
+
+ //
+ // Start a moveChunk in the background. Move chunk [0, 20), which has 10 docs,
+ // from shard 0 to shard 1. Pause it at some points in the donor's and
+ // recipient's work flows, and test cleanupOrphaned on shard 0 and shard 1.
+ //
+
+ jsTest.log('setting failpoint startedMoveChunk');
+ pauseMoveChunkAtStep(donor, moveChunkStepNames.startedMoveChunk);
+ pauseMigrateAtStep(recipient, migrateStepNames.cloned);
+ var joinMoveChunk = moveChunkParallel(
+ staticMongod, st.s0.host, {_id: 0}, null, coll.getFullName(), shards[1]._id);
+
+ waitForMoveChunkStep(donor, moveChunkStepNames.startedMoveChunk);
+ waitForMigrateStep(recipient, migrateStepNames.cloned);
+ // Recipient has run _recvChunkStart and begun its migration thread; docs have
+ // been cloned and chunk [0, 20) is noted as 'pending' on recipient.
+
+ // Donor: [minKey, 0) [0, 20)
+ // Recipient (pending): [0, 20)
+ // Recipient: [20, maxKey)
+
+ // Create orphans. I'll show an orphaned doc on donor with _id 26 like {26}:
+ //
+ // Donor: [minKey, 0) [0, 20) {26}
+ // Recipient (pending): [0, 20)
+ // Recipient: {-1} [20, maxKey)
+ donorColl.insert([{_id: 26}]);
+ assert.eq(null, donorColl.getDB().getLastError());
+ assert.eq(21, donorColl.count());
+ recipientColl.insert([{_id: -1}]);
+ assert.eq(null, recipientColl.getDB().getLastError());
+ assert.eq(21, recipientColl.count());
+
+ cleanupOrphaned(donor, ns, 2);
+ assert.eq(20, donorColl.count());
+ cleanupOrphaned(recipient, ns, 2);
+ assert.eq(20, recipientColl.count());
+
+ jsTest.log('Inserting document on donor side');
+ // Inserted a new document (not an orphan) with id 19, which belongs in the
+ // [0, 20) chunk.
+ donorColl.insert({_id: 19});
+ assert.eq(null, coll.getDB().getLastError());
+ assert.eq(21, donorColl.count());
+
+ // Recipient transfers this modification.
+ jsTest.log('Let migrate proceed to transferredMods');
+ pauseMigrateAtStep(recipient, migrateStepNames.catchup);
+ unpauseMigrateAtStep(recipient, migrateStepNames.cloned);
+ waitForMigrateStep(recipient, migrateStepNames.catchup);
+ jsTest.log('Done letting migrate proceed to transferredMods');
+
+ assert.eq(21, recipientColl.count(), "Recipient didn't transfer inserted document.");
+
+ cleanupOrphaned(donor, ns, 2);
+ assert.eq(21, donorColl.count());
+ cleanupOrphaned(recipient, ns, 2);
+ assert.eq(21, recipientColl.count());
+
+ // Create orphans.
+ donorColl.insert([{_id: 26}]);
+ assert.eq(null, donorColl.getDB().getLastError());
+ assert.eq(22, donorColl.count());
+ recipientColl.insert([{_id: -1}]);
+ assert.eq(null, recipientColl.getDB().getLastError());
+ assert.eq(22, recipientColl.count());
+
+ cleanupOrphaned(donor, ns, 2);
+ assert.eq(21, donorColl.count());
+ cleanupOrphaned(recipient, ns, 2);
+ assert.eq(21, recipientColl.count());
+
+ // Recipient has been waiting for donor to call _recvChunkCommit.
+ pauseMoveChunkAtStep(donor, moveChunkStepNames.committed);
+ unpauseMoveChunkAtStep(donor, moveChunkStepNames.startedMoveChunk);
+ proceedToMigrateStep(recipient, migrateStepNames.steady);
+ proceedToMigrateStep(recipient, migrateStepNames.done);
+
+ // Create orphans.
+ donorColl.insert([{_id: 26}]);
+ assert.eq(null, donorColl.getDB().getLastError());
+ assert.eq(22, donorColl.count());
+ recipientColl.insert([{_id: -1}]);
+ assert.eq(null, recipientColl.getDB().getLastError());
+ assert.eq(22, recipientColl.count());
+
+ // cleanupOrphaned should still fail on donor, but should work on the recipient
+ cleanupOrphaned(donor, ns, 2);
+ assert.eq(10, donorColl.count());
+ cleanupOrphaned(recipient, ns, 2);
+ assert.eq(21, recipientColl.count());
+
+ // Let migration thread complete.
+ unpauseMigrateAtStep(recipient, migrateStepNames.done);
+ unpauseMoveChunkAtStep(donor, moveChunkStepNames.committed);
+ joinMoveChunk();
+
+ // Donor has finished post-move delete.
+ cleanupOrphaned(donor, ns, 2); // this is necessary for the count to not be 11
+ assert.eq(10, donorColl.count());
+ assert.eq(21, recipientColl.count());
+ assert.eq(31, coll.count());
+
+ st.stop();
})();
diff --git a/jstests/sharding/cleanup_orphaned_cmd_during_movechunk_hashed.js b/jstests/sharding/cleanup_orphaned_cmd_during_movechunk_hashed.js
index d4d523bbd06..58ea9e806fd 100644
--- a/jstests/sharding/cleanup_orphaned_cmd_during_movechunk_hashed.js
+++ b/jstests/sharding/cleanup_orphaned_cmd_during_movechunk_hashed.js
@@ -1,129 +1,124 @@
//
//
// Tests cleanupOrphaned concurrent with moveChunk with a hashed shard key.
-// Inserts orphan documents to the donor and recipient shards during the moveChunk and
+// Inserts orphan documents to the donor and recipient shards during the moveChunk and
// verifies that cleanupOrphaned removes orphans.
//
load('./jstests/libs/chunk_manipulation_util.js');
load('./jstests/libs/cleanup_orphaned_util.js');
-(function() {
-"use strict";
-
-var staticMongod = MongoRunner.runMongod({}); // For startParallelOps.
-var st = new ShardingTest({ shards: 2, other: { separateConfig: true } });
-
-var mongos = st.s0,
- admin = mongos.getDB('admin'),
- shards = mongos.getCollection('config.shards').find().toArray(),
- dbName = 'foo',
- ns = dbName + '.bar',
- coll = mongos.getCollection(ns);
-
-assert.commandWorked( admin.runCommand({enableSharding: dbName}) );
-printjson(admin.runCommand({movePrimary: dbName, to: shards[0]._id}));
-assert.commandWorked( admin.runCommand({shardCollection: ns, key: {key: 'hashed'}}) );
-
-// Makes four chunks by default, two on each shard.
-var chunks = st.config.chunks.find().sort({min: 1}).toArray();
-assert.eq(4, chunks.length);
-
-var chunkWithDoc = chunks[1];
-print('Trying to make doc that hashes to this chunk: '
- + tojson(chunkWithDoc));
-
-var found = false;
-for (var i = 0; i < 10000; i++) {
- var doc = {key: ObjectId()},
- hash = mongos.adminCommand({_hashBSONElement: doc.key}).out;
-
- print('doc.key ' + doc.key + ' hashes to ' + hash);
-
- if (mongos.getCollection('config.chunks').findOne({
- _id: chunkWithDoc._id,
- 'min.key': {$lte: hash},
- 'max.key': {$gt: hash}
- })) {
- found = true;
- break;
- }
-}
+(function() {
+ "use strict";
-assert(found, "Couldn't make doc that belongs to chunk 1.");
-print('Doc: ' + tojson(doc));
-coll.insert(doc);
-assert.eq(null, coll.getDB().getLastError());
+ var staticMongod = MongoRunner.runMongod({}); // For startParallelOps.
+ var st = new ShardingTest({shards: 2, other: {separateConfig: true}});
-//
-// Start a moveChunk in the background from shard 0 to shard 1. Pause it at
-// some points in the donor's and recipient's work flows, and test
-// cleanupOrphaned.
-//
+ var mongos = st.s0, admin = mongos.getDB('admin'),
+ shards = mongos.getCollection('config.shards').find().toArray(), dbName = 'foo',
+ ns = dbName + '.bar', coll = mongos.getCollection(ns);
+
+ assert.commandWorked(admin.runCommand({enableSharding: dbName}));
+ printjson(admin.runCommand({movePrimary: dbName, to: shards[0]._id}));
+ assert.commandWorked(admin.runCommand({shardCollection: ns, key: {key: 'hashed'}}));
+
+ // Makes four chunks by default, two on each shard.
+ var chunks = st.config.chunks.find().sort({min: 1}).toArray();
+ assert.eq(4, chunks.length);
+
+ var chunkWithDoc = chunks[1];
+ print('Trying to make doc that hashes to this chunk: ' + tojson(chunkWithDoc));
+
+ var found = false;
+ for (var i = 0; i < 10000; i++) {
+ var doc =
+ {
+ key: ObjectId()
+ },
+ hash = mongos.adminCommand({_hashBSONElement: doc.key}).out;
+
+ print('doc.key ' + doc.key + ' hashes to ' + hash);
+
+ if (mongos.getCollection('config.chunks')
+ .findOne(
+ {_id: chunkWithDoc._id, 'min.key': {$lte: hash}, 'max.key': {$gt: hash}})) {
+ found = true;
+ break;
+ }
+ }
+
+ assert(found, "Couldn't make doc that belongs to chunk 1.");
+ print('Doc: ' + tojson(doc));
+ coll.insert(doc);
+ assert.eq(null, coll.getDB().getLastError());
+
+ //
+ // Start a moveChunk in the background from shard 0 to shard 1. Pause it at
+ // some points in the donor's and recipient's work flows, and test
+ // cleanupOrphaned.
+ //
+
+ var donor, recip;
+ if (chunkWithDoc.shard == st.shard0.shardName) {
+ donor = st.shard0;
+ recip = st.shard1;
+ } else {
+ recip = st.shard0;
+ donor = st.shard1;
+ }
-var donor, recip;
-if (chunkWithDoc.shard == st.shard0.shardName) {
- donor = st.shard0;
- recip = st.shard1;
-} else {
- recip = st.shard0;
- donor = st.shard1;
-}
-
-jsTest.log('setting failpoint startedMoveChunk');
-pauseMoveChunkAtStep(donor, moveChunkStepNames.startedMoveChunk);
-pauseMigrateAtStep(recip, migrateStepNames.cloned);
-
-var joinMoveChunk = moveChunkParallel(
- staticMongod,
- st.s0.host,
- null,
- [chunkWithDoc.min, chunkWithDoc.max], // bounds
- coll.getFullName(),
- recip.shardName);
-
-waitForMoveChunkStep(donor, moveChunkStepNames.startedMoveChunk);
-waitForMigrateStep(recip, migrateStepNames.cloned);
-proceedToMigrateStep(recip, migrateStepNames.catchup);
-// recipient has run _recvChunkStart and begun its migration thread;
-// 'doc' has been cloned and chunkWithDoc is noted as 'pending' on recipient.
-
-var donorColl = donor.getCollection(ns),
- recipColl = recip.getCollection(ns);
-
-assert.eq(1, donorColl.count());
-assert.eq(1, recipColl.count());
-
-// cleanupOrphaned should go through two iterations, since the default chunk
-// setup leaves two unowned ranges on each shard.
-cleanupOrphaned(donor, ns, 2);
-cleanupOrphaned(recip, ns, 2);
-assert.eq(1, donorColl.count());
-assert.eq(1, recipColl.count());
-
-// recip has been waiting for donor to call _recvChunkCommit.
-pauseMoveChunkAtStep(donor, moveChunkStepNames.committed);
-unpauseMoveChunkAtStep(donor, moveChunkStepNames.startedMoveChunk);
-proceedToMigrateStep(recip, migrateStepNames.steady);
-proceedToMigrateStep(recip, migrateStepNames.done);
-
-// cleanupOrphaned removes migrated data from donor. The donor would
-// otherwise clean them up itself, in the post-move delete phase.
-cleanupOrphaned(donor, ns, 2);
-assert.eq(0, donorColl.count());
-cleanupOrphaned(recip, ns, 2);
-assert.eq(1, recipColl.count());
-
-// Let migration thread complete.
-unpauseMigrateAtStep(recip, migrateStepNames.done);
-unpauseMoveChunkAtStep(donor, moveChunkStepNames.committed);
-joinMoveChunk();
-
-// donor has finished post-move delete.
-assert.eq(0, donorColl.count());
-assert.eq(1, recipColl.count());
-assert.eq(1, coll.count());
-
-st.stop();
+ jsTest.log('setting failpoint startedMoveChunk');
+ pauseMoveChunkAtStep(donor, moveChunkStepNames.startedMoveChunk);
+ pauseMigrateAtStep(recip, migrateStepNames.cloned);
+
+ var joinMoveChunk = moveChunkParallel(staticMongod,
+ st.s0.host,
+ null,
+ [chunkWithDoc.min, chunkWithDoc.max], // bounds
+ coll.getFullName(),
+ recip.shardName);
+
+ waitForMoveChunkStep(donor, moveChunkStepNames.startedMoveChunk);
+ waitForMigrateStep(recip, migrateStepNames.cloned);
+ proceedToMigrateStep(recip, migrateStepNames.catchup);
+ // recipient has run _recvChunkStart and begun its migration thread;
+ // 'doc' has been cloned and chunkWithDoc is noted as 'pending' on recipient.
+
+ var donorColl = donor.getCollection(ns), recipColl = recip.getCollection(ns);
+
+ assert.eq(1, donorColl.count());
+ assert.eq(1, recipColl.count());
+
+ // cleanupOrphaned should go through two iterations, since the default chunk
+ // setup leaves two unowned ranges on each shard.
+ cleanupOrphaned(donor, ns, 2);
+ cleanupOrphaned(recip, ns, 2);
+ assert.eq(1, donorColl.count());
+ assert.eq(1, recipColl.count());
+
+ // recip has been waiting for donor to call _recvChunkCommit.
+ pauseMoveChunkAtStep(donor, moveChunkStepNames.committed);
+ unpauseMoveChunkAtStep(donor, moveChunkStepNames.startedMoveChunk);
+ proceedToMigrateStep(recip, migrateStepNames.steady);
+ proceedToMigrateStep(recip, migrateStepNames.done);
+
+ // cleanupOrphaned removes migrated data from donor. The donor would
+ // otherwise clean them up itself, in the post-move delete phase.
+ cleanupOrphaned(donor, ns, 2);
+ assert.eq(0, donorColl.count());
+ cleanupOrphaned(recip, ns, 2);
+ assert.eq(1, recipColl.count());
+
+ // Let migration thread complete.
+ unpauseMigrateAtStep(recip, migrateStepNames.done);
+ unpauseMoveChunkAtStep(donor, moveChunkStepNames.committed);
+ joinMoveChunk();
+
+ // donor has finished post-move delete.
+ assert.eq(0, donorColl.count());
+ assert.eq(1, recipColl.count());
+ assert.eq(1, coll.count());
+
+ st.stop();
})();
diff --git a/jstests/sharding/cleanup_orphaned_cmd_hashed.js b/jstests/sharding/cleanup_orphaned_cmd_hashed.js
index 00294087885..48a08cd43f6 100644
--- a/jstests/sharding/cleanup_orphaned_cmd_hashed.js
+++ b/jstests/sharding/cleanup_orphaned_cmd_hashed.js
@@ -2,71 +2,78 @@
// Tests cleanup of orphaned data in hashed sharded coll via the orphaned data cleanup command
//
-(function() {
-"use strict";
-
-var st = new ShardingTest({ shards : 2, mongos : 1, other : { shardOptions : { verbose : 2 } } });
-
-var mongos = st.s0;
-var admin = mongos.getDB( "admin" );
-var shards = mongos.getCollection( "config.shards" ).find().toArray();
-var coll = mongos.getCollection( "foo.bar" );
-
-assert.commandWorked( admin.runCommand({ enableSharding : coll.getDB() + "" }) );
-printjson( admin.runCommand({ movePrimary : coll.getDB() + "", to : shards[0]._id }) );
-assert.commandWorked( admin.runCommand({ shardCollection : coll + "", key : { _id : "hashed" } }) );
-
-// Create two orphaned data holes, one bounded by min or max on each shard
-
-assert.commandWorked( admin.runCommand({ split : coll + "", middle : { _id : NumberLong(-100) } }) );
-assert.commandWorked( admin.runCommand({ split : coll + "", middle : { _id : NumberLong(-50) } }) );
-assert.commandWorked( admin.runCommand({ split : coll + "", middle : { _id : NumberLong(50) } }) );
-assert.commandWorked( admin.runCommand({ split : coll + "", middle : { _id : NumberLong(100) } }) );
-assert.commandWorked( admin.runCommand({ moveChunk : coll + "", bounds : [{ _id : NumberLong(-100) },
- { _id : NumberLong(-50) }],
- to : shards[1]._id,
- _waitForDelete : true }) );
-assert.commandWorked( admin.runCommand({ moveChunk : coll + "", bounds : [{ _id : NumberLong(50) },
- { _id : NumberLong(100) }],
- to : shards[0]._id,
- _waitForDelete : true }) );
-st.printShardingStatus();
-
-jsTest.log( "Inserting some docs on each shard, so 1/2 will be orphaned..." );
-
-for ( var s = 0; s < 2; s++ ) {
- var shardColl = ( s == 0 ? st.shard0 : st.shard1 ).getCollection( coll + "" );
- var bulk = shardColl.initializeUnorderedBulkOp();
- for ( var i = 0; i < 100; i++ ) bulk.insert({ _id : i });
- assert.writeOK(bulk.execute());
-}
-
-assert.eq( 200, st.shard0.getCollection( coll + "" ).find().itcount() +
- st.shard1.getCollection( coll + "" ).find().itcount() );
-assert.eq( 100, coll.find().itcount() );
-
-jsTest.log( "Cleaning up orphaned data in hashed coll..." );
-
-for ( var s = 0; s < 2; s++ ) {
- var shardAdmin = ( s == 0 ? st.shard0 : st.shard1 ).getDB( "admin" );
-
- var result = shardAdmin.runCommand({ cleanupOrphaned : coll + "" });
- while ( result.ok && result.stoppedAtKey ) {
- printjson( result );
- result = shardAdmin.runCommand({ cleanupOrphaned : coll + "",
- startingFromKey : result.stoppedAtKey });
+(function() {
+ "use strict";
+
+ var st = new ShardingTest({shards: 2, mongos: 1, other: {shardOptions: {verbose: 2}}});
+
+ var mongos = st.s0;
+ var admin = mongos.getDB("admin");
+ var shards = mongos.getCollection("config.shards").find().toArray();
+ var coll = mongos.getCollection("foo.bar");
+
+ assert.commandWorked(admin.runCommand({enableSharding: coll.getDB() + ""}));
+ printjson(admin.runCommand({movePrimary: coll.getDB() + "", to: shards[0]._id}));
+ assert.commandWorked(admin.runCommand({shardCollection: coll + "", key: {_id: "hashed"}}));
+
+ // Create two orphaned data holes, one bounded by min or max on each shard
+
+ assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: NumberLong(-100)}}));
+ assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: NumberLong(-50)}}));
+ assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: NumberLong(50)}}));
+ assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: NumberLong(100)}}));
+ assert.commandWorked(admin.runCommand({
+ moveChunk: coll + "",
+ bounds: [{_id: NumberLong(-100)}, {_id: NumberLong(-50)}],
+ to: shards[1]._id,
+ _waitForDelete: true
+ }));
+ assert.commandWorked(admin.runCommand({
+ moveChunk: coll + "",
+ bounds: [{_id: NumberLong(50)}, {_id: NumberLong(100)}],
+ to: shards[0]._id,
+ _waitForDelete: true
+ }));
+ st.printShardingStatus();
+
+ jsTest.log("Inserting some docs on each shard, so 1/2 will be orphaned...");
+
+ for (var s = 0; s < 2; s++) {
+ var shardColl = (s == 0 ? st.shard0 : st.shard1).getCollection(coll + "");
+ var bulk = shardColl.initializeUnorderedBulkOp();
+ for (var i = 0; i < 100; i++)
+ bulk.insert({_id: i});
+ assert.writeOK(bulk.execute());
}
-
- printjson( result );
- assert( result.ok );
-}
-assert.eq( 100, st.shard0.getCollection( coll + "" ).find().itcount() +
- st.shard1.getCollection( coll + "" ).find().itcount() );
-assert.eq( 100, coll.find().itcount() );
+ assert.eq(200,
+ st.shard0.getCollection(coll + "").find().itcount() +
+ st.shard1.getCollection(coll + "").find().itcount());
+ assert.eq(100, coll.find().itcount());
-jsTest.log( "DONE!" );
+ jsTest.log("Cleaning up orphaned data in hashed coll...");
-st.stop();
+ for (var s = 0; s < 2; s++) {
+ var shardAdmin = (s == 0 ? st.shard0 : st.shard1).getDB("admin");
+
+ var result = shardAdmin.runCommand({cleanupOrphaned: coll + ""});
+ while (result.ok && result.stoppedAtKey) {
+ printjson(result);
+ result = shardAdmin.runCommand(
+ {cleanupOrphaned: coll + "", startingFromKey: result.stoppedAtKey});
+ }
+
+ printjson(result);
+ assert(result.ok);
+ }
+
+ assert.eq(100,
+ st.shard0.getCollection(coll + "").find().itcount() +
+ st.shard1.getCollection(coll + "").find().itcount());
+ assert.eq(100, coll.find().itcount());
+
+ jsTest.log("DONE!");
+
+ st.stop();
})();
diff --git a/jstests/sharding/cleanup_orphaned_cmd_prereload.js b/jstests/sharding/cleanup_orphaned_cmd_prereload.js
index 3dfc68ca9a3..7155baea970 100644
--- a/jstests/sharding/cleanup_orphaned_cmd_prereload.js
+++ b/jstests/sharding/cleanup_orphaned_cmd_prereload.js
@@ -2,94 +2,90 @@
// Tests failed cleanup of orphaned data when we have pending chunks
//
-var st = new ShardingTest({ shards: 2 });
+var st = new ShardingTest({shards: 2});
var mongos = st.s0;
-var admin = mongos.getDB( "admin" );
-var shards = mongos.getCollection( "config.shards" ).find().toArray();
-var coll = mongos.getCollection( "foo.bar" );
+var admin = mongos.getDB("admin");
+var shards = mongos.getCollection("config.shards").find().toArray();
+var coll = mongos.getCollection("foo.bar");
-assert( admin.runCommand({ enableSharding : coll.getDB() + "" }).ok );
-printjson( admin.runCommand({ movePrimary : coll.getDB() + "", to : shards[0]._id }) );
-assert( admin.runCommand({ shardCollection : coll + "", key : { _id : 1 } }).ok );
+assert(admin.runCommand({enableSharding: coll.getDB() + ""}).ok);
+printjson(admin.runCommand({movePrimary: coll.getDB() + "", to: shards[0]._id}));
+assert(admin.runCommand({shardCollection: coll + "", key: {_id: 1}}).ok);
-jsTest.log( "Moving some chunks to shard1..." );
+jsTest.log("Moving some chunks to shard1...");
-assert( admin.runCommand({ split : coll + "", middle : { _id : 0 } }).ok );
-assert( admin.runCommand({ split : coll + "", middle : { _id : 1 } }).ok );
+assert(admin.runCommand({split: coll + "", middle: {_id: 0}}).ok);
+assert(admin.runCommand({split: coll + "", middle: {_id: 1}}).ok);
-assert( admin.runCommand({ moveChunk : coll + "",
- find : { _id : 0 },
- to : shards[1]._id,
- _waitForDelete : true }).ok );
-assert( admin.runCommand({ moveChunk : coll + "",
- find : { _id : 1 },
- to : shards[1]._id,
- _waitForDelete : true }).ok );
+assert(admin.runCommand(
+ {moveChunk: coll + "", find: {_id: 0}, to: shards[1]._id, _waitForDelete: true})
+ .ok);
+assert(admin.runCommand(
+ {moveChunk: coll + "", find: {_id: 1}, to: shards[1]._id, _waitForDelete: true})
+ .ok);
-var metadata = st.shard1.getDB( "admin" )
- .runCommand({ getShardVersion : coll + "", fullMetadata : true }).metadata;
+var metadata =
+ st.shard1.getDB("admin").runCommand({getShardVersion: coll + "", fullMetadata: true}).metadata;
-printjson( metadata );
+printjson(metadata);
-assert.eq( metadata.pending[0][0]._id, 1 );
-assert.eq( metadata.pending[0][1]._id, MaxKey );
+assert.eq(metadata.pending[0][0]._id, 1);
+assert.eq(metadata.pending[0][1]._id, MaxKey);
-jsTest.log( "Ensuring we won't remove orphaned data in pending chunk..." );
+jsTest.log("Ensuring we won't remove orphaned data in pending chunk...");
-assert( !st.shard1.getDB( "admin" )
- .runCommand({ cleanupOrphaned : coll + "", startingFromKey : { _id : 1 } }).stoppedAtKey );
+assert(!st.shard1.getDB("admin")
+ .runCommand({cleanupOrphaned: coll + "", startingFromKey: {_id: 1}})
+ .stoppedAtKey);
-jsTest.log( "Moving some chunks back to shard0 after empty..." );
+jsTest.log("Moving some chunks back to shard0 after empty...");
-admin.runCommand({ moveChunk : coll + "",
- find : { _id : -1 },
- to : shards[1]._id,
- _waitForDelete : true });
+admin.runCommand({moveChunk: coll + "", find: {_id: -1}, to: shards[1]._id, _waitForDelete: true});
-var metadata = st.shard0.getDB( "admin" )
- .runCommand({ getShardVersion : coll + "", fullMetadata : true }).metadata;
+var metadata =
+ st.shard0.getDB("admin").runCommand({getShardVersion: coll + "", fullMetadata: true}).metadata;
-printjson( metadata );
+printjson(metadata);
-assert.eq( metadata.shardVersion.t, 0 );
-assert.neq( metadata.collVersion.t, 0 );
-assert.eq( metadata.pending.length, 0 );
+assert.eq(metadata.shardVersion.t, 0);
+assert.neq(metadata.collVersion.t, 0);
+assert.eq(metadata.pending.length, 0);
-assert( admin.runCommand({ moveChunk : coll + "",
- find : { _id : 1 },
- to : shards[0]._id,
- _waitForDelete : true }).ok );
+assert(admin.runCommand(
+ {moveChunk: coll + "", find: {_id: 1}, to: shards[0]._id, _waitForDelete: true})
+ .ok);
-var metadata = st.shard0.getDB( "admin" )
- .runCommand({ getShardVersion : coll + "", fullMetadata : true }).metadata;
+var metadata =
+ st.shard0.getDB("admin").runCommand({getShardVersion: coll + "", fullMetadata: true}).metadata;
-printjson( metadata );
-assert.eq( metadata.shardVersion.t, 0 );
-assert.neq( metadata.collVersion.t, 0 );
-assert.eq( metadata.pending[0][0]._id, 1 );
-assert.eq( metadata.pending[0][1]._id, MaxKey );
+printjson(metadata);
+assert.eq(metadata.shardVersion.t, 0);
+assert.neq(metadata.collVersion.t, 0);
+assert.eq(metadata.pending[0][0]._id, 1);
+assert.eq(metadata.pending[0][1]._id, MaxKey);
-jsTest.log( "Ensuring again we won't remove orphaned data in pending chunk..." );
+jsTest.log("Ensuring again we won't remove orphaned data in pending chunk...");
-assert( !st.shard0.getDB( "admin" )
- .runCommand({ cleanupOrphaned : coll + "", startingFromKey : { _id : 1 } }).stoppedAtKey );
+assert(!st.shard0.getDB("admin")
+ .runCommand({cleanupOrphaned: coll + "", startingFromKey: {_id: 1}})
+ .stoppedAtKey);
-jsTest.log( "Checking that pending chunk is promoted on reload..." );
+jsTest.log("Checking that pending chunk is promoted on reload...");
-assert.eq( null, coll.findOne({ _id : 1 }) );
+assert.eq(null, coll.findOne({_id: 1}));
-var metadata = st.shard0.getDB( "admin" )
- .runCommand({ getShardVersion : coll + "", fullMetadata : true }).metadata;
+var metadata =
+ st.shard0.getDB("admin").runCommand({getShardVersion: coll + "", fullMetadata: true}).metadata;
-printjson( metadata );
-assert.neq( metadata.shardVersion.t, 0 );
-assert.neq( metadata.collVersion.t, 0 );
-assert.eq( metadata.chunks[0][0]._id, 1 );
-assert.eq( metadata.chunks[0][1]._id, MaxKey );
+printjson(metadata);
+assert.neq(metadata.shardVersion.t, 0);
+assert.neq(metadata.collVersion.t, 0);
+assert.eq(metadata.chunks[0][0]._id, 1);
+assert.eq(metadata.chunks[0][1]._id, MaxKey);
st.printShardingStatus();
-jsTest.log( "DONE!" );
+jsTest.log("DONE!");
st.stop();
diff --git a/jstests/sharding/coll_epoch_test0.js b/jstests/sharding/coll_epoch_test0.js
index d294a7e0998..0ec0a5d3201 100644
--- a/jstests/sharding/coll_epoch_test0.js
+++ b/jstests/sharding/coll_epoch_test0.js
@@ -1,45 +1,51 @@
// Tests whether a split and a migrate in a sharded cluster preserve the epoch
-var st = new ShardingTest( { shards : 2, mongos : 1 } );
+var st = new ShardingTest({shards: 2, mongos: 1});
// Balancer is by default stopped, thus it will not interfere
-var config = st.s.getDB( "config" );
-var admin = st.s.getDB( "admin" );
-var coll = st.s.getCollection( "foo.bar" );
+var config = st.s.getDB("config");
+var admin = st.s.getDB("admin");
+var coll = st.s.getCollection("foo.bar");
// First enable sharding
-admin.runCommand({ enableSharding : coll.getDB() + "" });
+admin.runCommand({enableSharding: coll.getDB() + ""});
st.ensurePrimaryShard(coll.getDB().getName(), 'shard0001');
-admin.runCommand({ shardCollection : coll + "", key : { _id : 1 } });
+admin.runCommand({shardCollection: coll + "", key: {_id: 1}});
-var primary = config.databases.find({ _id : coll.getDB() + "" }).primary;
+var primary = config.databases.find({_id: coll.getDB() + ""}).primary;
var notPrimary = null;
-config.shards.find().forEach( function( doc ){ if( doc._id != primary ) notPrimary = doc._id; } );
+config.shards.find().forEach(function(doc) {
+ if (doc._id != primary)
+ notPrimary = doc._id;
+});
var createdEpoch = null;
-var checkEpochs = function(){
- config.chunks.find({ ns : coll + "" }).forEach( function( chunk ){
-
- // Make sure the epochs exist, are non-zero, and are consistent
- assert( chunk.lastmodEpoch );
- print( chunk.lastmodEpoch + "" );
- assert.neq( chunk.lastmodEpoch + "", "000000000000000000000000" );
- if( createdEpoch == null ) createdEpoch = chunk.lastmodEpoch;
- else assert.eq( createdEpoch, chunk.lastmodEpoch );
-
- });
-};
+var checkEpochs = function() {
+ config.chunks.find({ns: coll + ""})
+ .forEach(function(chunk) {
+
+ // Make sure the epochs exist, are non-zero, and are consistent
+ assert(chunk.lastmodEpoch);
+ print(chunk.lastmodEpoch + "");
+ assert.neq(chunk.lastmodEpoch + "", "000000000000000000000000");
+ if (createdEpoch == null)
+ createdEpoch = chunk.lastmodEpoch;
+ else
+ assert.eq(createdEpoch, chunk.lastmodEpoch);
+
+ });
+};
checkEpochs();
// Now do a split
-printjson( admin.runCommand({ split : coll + "", middle : { _id : 0 } }) );
+printjson(admin.runCommand({split: coll + "", middle: {_id: 0}}));
// Check all the chunks for epochs
checkEpochs();
// Now do a migrate
-printjson( admin.runCommand({ moveChunk : coll + "", find : { _id : 0 }, to : notPrimary }) );
+printjson(admin.runCommand({moveChunk: coll + "", find: {_id: 0}, to: notPrimary}));
// Check all the chunks for epochs
checkEpochs();
diff --git a/jstests/sharding/coll_epoch_test1.js b/jstests/sharding/coll_epoch_test1.js
index fa24a035da7..28962732c25 100644
--- a/jstests/sharding/coll_epoch_test1.js
+++ b/jstests/sharding/coll_epoch_test1.js
@@ -1,6 +1,7 @@
-// Tests various cases of dropping and recreating collections in the same namespace with multiple mongoses
+// Tests various cases of dropping and recreating collections in the same namespace with multiple
+// mongoses
-var st = new ShardingTest({ shards : 3, mongos : 3, verbose : 1 });
+var st = new ShardingTest({shards: 3, mongos: 3, verbose: 1});
// Balancer is by default stopped, thus it will not interfere
// Use separate mongoses for admin, inserting data, and validating results, so no
@@ -8,15 +9,15 @@ var st = new ShardingTest({ shards : 3, mongos : 3, verbose : 1 });
var insertMongos = st.s2;
var staleMongos = st.s1;
-var config = st.s.getDB( "config" );
-var admin = st.s.getDB( "admin" );
-var coll = st.s.getCollection( "foo.bar" );
+var config = st.s.getDB("config");
+var admin = st.s.getDB("admin");
+var coll = st.s.getCollection("foo.bar");
-insertMongos.getDB( "admin" ).runCommand({ setParameter : 1, traceExceptions : true });
+insertMongos.getDB("admin").runCommand({setParameter: 1, traceExceptions: true});
var shards = {};
-config.shards.find().forEach( function( doc ){
- shards[ doc._id ] = new Mongo( doc.host );
+config.shards.find().forEach(function(doc) {
+ shards[doc._id] = new Mongo(doc.host);
});
//
@@ -24,53 +25,54 @@ config.shards.find().forEach( function( doc ){
// in the background
//
-jsTest.log( "Enabling sharding for the first time..." );
+jsTest.log("Enabling sharding for the first time...");
-admin.runCommand({ enableSharding : coll.getDB() + "" });
+admin.runCommand({enableSharding: coll.getDB() + ""});
st.ensurePrimaryShard(coll.getDB().getName(), 'shard0001');
-admin.runCommand({ shardCollection : coll + "", key : { _id : 1 } });
+admin.runCommand({shardCollection: coll + "", key: {_id: 1}});
-var bulk = insertMongos.getCollection( coll + "" ).initializeUnorderedBulkOp();
-for( var i = 0; i < 100; i++ ) {
- bulk.insert({ _id : i, test : "a" });
+var bulk = insertMongos.getCollection(coll + "").initializeUnorderedBulkOp();
+for (var i = 0; i < 100; i++) {
+ bulk.insert({_id: i, test: "a"});
}
-assert.writeOK( bulk.execute() );
-assert.eq( 100, staleMongos.getCollection( coll + "" ).find({ test : "a" }).itcount() );
+assert.writeOK(bulk.execute());
+assert.eq(100, staleMongos.getCollection(coll + "").find({test: "a"}).itcount());
coll.drop();
//
-// Test that inserts and queries go to the correct shard even when the collection has been
+// Test that inserts and queries go to the correct shard even when the collection has been
// re-sharded in the background
//
-jsTest.log( "Re-enabling sharding with a different key..." );
+jsTest.log("Re-enabling sharding with a different key...");
-admin.runCommand({ enableSharding : coll.getDB() + "" });
+admin.runCommand({enableSharding: coll.getDB() + ""});
st.ensurePrimaryShard(coll.getDB().getName(), 'shard0001');
-coll.ensureIndex({ notId : 1 });
-admin.runCommand({ shardCollection : coll + "", key : { notId : 1 } });
+coll.ensureIndex({notId: 1});
+admin.runCommand({shardCollection: coll + "", key: {notId: 1}});
-bulk = insertMongos.getCollection( coll + "" ).initializeUnorderedBulkOp();
-for( var i = 0; i < 100; i++ ) {
- bulk.insert({ notId : i, test : "b" });
+bulk = insertMongos.getCollection(coll + "").initializeUnorderedBulkOp();
+for (var i = 0; i < 100; i++) {
+ bulk.insert({notId: i, test: "b"});
}
-assert.writeOK( bulk.execute() );
-assert.eq( 100, staleMongos.getCollection( coll + "" ).find({ test : "b" }).itcount() );
-assert.eq( 0, staleMongos.getCollection( coll + "" ).find({ test : { $in : [ "a" ] } }).itcount() );
+assert.writeOK(bulk.execute());
+assert.eq(100, staleMongos.getCollection(coll + "").find({test: "b"}).itcount());
+assert.eq(0, staleMongos.getCollection(coll + "").find({test: {$in: ["a"]}}).itcount());
coll.drop();
//
-// Test that inserts and queries go to the correct shard even when the collection has been
+// Test that inserts and queries go to the correct shard even when the collection has been
// unsharded and moved to a different primary
//
-jsTest.log( "Re-creating unsharded collection from a sharded collection on different primary..." );
+jsTest.log("Re-creating unsharded collection from a sharded collection on different primary...");
-var getOtherShard = function( shard ){
- for( id in shards ){
- if( id != shard ) return id;
+var getOtherShard = function(shard) {
+ for (id in shards) {
+ if (id != shard)
+ return id;
}
};
@@ -81,40 +83,42 @@ if (st.configRS) {
// the most recent config data.
st.configRS.awaitLastOpCommitted();
}
-jsTest.log( "moved primary..." );
+jsTest.log("moved primary...");
-bulk = insertMongos.getCollection( coll + "" ).initializeUnorderedBulkOp();
-for( var i = 0; i < 100; i++ )
- bulk.insert({ test : "c" });
-assert.writeOK( bulk.execute() );
+bulk = insertMongos.getCollection(coll + "").initializeUnorderedBulkOp();
+for (var i = 0; i < 100; i++)
+ bulk.insert({test: "c"});
+assert.writeOK(bulk.execute());
-assert.eq( 100, staleMongos.getCollection( coll + "" ).find({ test : "c" }).itcount() );
-assert.eq( 0, staleMongos.getCollection( coll + "" ).find({ test : { $in : [ "a", "b" ] } }).itcount() );
+assert.eq(100, staleMongos.getCollection(coll + "").find({test: "c"}).itcount());
+assert.eq(0, staleMongos.getCollection(coll + "").find({test: {$in: ["a", "b"]}}).itcount());
coll.drop();
//
-// Test that inserts and queries go to correct shard even when the collection has been unsharded,
+// Test that inserts and queries go to correct shard even when the collection has been unsharded,
// resharded, and moved to a different primary
//
-jsTest.log( "Re-creating sharded collection with different primary..." );
+jsTest.log("Re-creating sharded collection with different primary...");
-admin.runCommand({ enableSharding : coll.getDB() + "" });
-admin.runCommand({ movePrimary : coll.getDB() + "",
- to : getOtherShard( config.databases.findOne({ _id : coll.getDB() + "" }).primary ) });
-admin.runCommand({ shardCollection : coll + "", key : { _id : 1 } });
+admin.runCommand({enableSharding: coll.getDB() + ""});
+admin.runCommand({
+ movePrimary: coll.getDB() + "",
+ to: getOtherShard(config.databases.findOne({_id: coll.getDB() + ""}).primary)
+});
+admin.runCommand({shardCollection: coll + "", key: {_id: 1}});
-bulk = insertMongos.getCollection( coll + "" ).initializeUnorderedBulkOp();
-for( var i = 0; i < 100; i++ )
- bulk.insert({ test : "d" });
-assert.writeOK( bulk.execute() );
+bulk = insertMongos.getCollection(coll + "").initializeUnorderedBulkOp();
+for (var i = 0; i < 100; i++)
+ bulk.insert({test: "d"});
+assert.writeOK(bulk.execute());
-assert.eq( 100, staleMongos.getCollection( coll + "" ).find({ test : "d" }).itcount() );
-assert.eq( 0, staleMongos.getCollection( coll + "" ).find({ test : { $in : [ "a", "b", "c" ] } }).itcount() );
+assert.eq(100, staleMongos.getCollection(coll + "").find({test: "d"}).itcount());
+assert.eq(0, staleMongos.getCollection(coll + "").find({test: {$in: ["a", "b", "c"]}}).itcount());
coll.drop();
-jsTest.log( "Done!" );
+jsTest.log("Done!");
st.stop();
diff --git a/jstests/sharding/coll_epoch_test2.js b/jstests/sharding/coll_epoch_test2.js
index 09109ebce43..dbed610cad6 100644
--- a/jstests/sharding/coll_epoch_test2.js
+++ b/jstests/sharding/coll_epoch_test2.js
@@ -1,10 +1,10 @@
// Tests that resharding a collection is detected correctly by all operation types
-//
+//
// The idea here is that a collection may be resharded / unsharded at any point, and any type of
// operation on a mongos may be active when it happens. All operations should handle gracefully.
//
-var st = new ShardingTest({ shards : 2, mongos : 5, verbose : 1 });
+var st = new ShardingTest({shards: 2, mongos: 5, verbose: 1});
// Balancer is by default stopped, thus it will not interfere
// Use separate mongos for reading, updating, inserting, removing data
@@ -13,40 +13,44 @@ var updateMongos = st.s2;
var insertMongos = st.s3;
var removeMongos = st.s4;
-var config = st.s.getDB( "config" );
-var admin = st.s.getDB( "admin" );
-var coll = st.s.getCollection( "foo.bar" );
+var config = st.s.getDB("config");
+var admin = st.s.getDB("admin");
+var coll = st.s.getCollection("foo.bar");
-insertMongos.getDB( "admin" ).runCommand({ setParameter : 1, traceExceptions : true });
+insertMongos.getDB("admin").runCommand({setParameter: 1, traceExceptions: true});
var shards = {};
-config.shards.find().forEach( function( doc ){
- shards[ doc._id ] = new Mongo( doc.host );
+config.shards.find().forEach(function(doc) {
+ shards[doc._id] = new Mongo(doc.host);
});
//
// Set up a sharded collection
//
-jsTest.log( "Enabling sharding for the first time..." );
+jsTest.log("Enabling sharding for the first time...");
-admin.runCommand({ enableSharding : coll.getDB() + "" });
+admin.runCommand({enableSharding: coll.getDB() + ""});
st.ensurePrimaryShard(coll.getDB().getName(), 'shard0001');
-admin.runCommand({ shardCollection : coll + "", key : { _id : 1 } });
-
-assert.writeOK(coll.insert({ hello : "world" }));
-
-jsTest.log( "Sharding collection across multiple shards..." );
-
-var getOtherShard = function( shard ){
- for( id in shards ){
- if( id != shard ) return id;
+admin.runCommand({shardCollection: coll + "", key: {_id: 1}});
+
+assert.writeOK(coll.insert({hello: "world"}));
+
+jsTest.log("Sharding collection across multiple shards...");
+
+var getOtherShard = function(shard) {
+ for (id in shards) {
+ if (id != shard)
+ return id;
}
};
-
-printjson( admin.runCommand({ split : coll + "", middle : { _id : 0 } }) );
-printjson( admin.runCommand({ moveChunk : coll + "", find : { _id : 0 },
- to : getOtherShard( config.databases.findOne({ _id : coll.getDB() + "" }).primary ) }) );
+
+printjson(admin.runCommand({split: coll + "", middle: {_id: 0}}));
+printjson(admin.runCommand({
+ moveChunk: coll + "",
+ find: {_id: 0},
+ to: getOtherShard(config.databases.findOne({_id: coll.getDB() + ""}).primary)
+}));
st.printShardingStatus();
@@ -54,11 +58,11 @@ st.printShardingStatus();
// Force all mongoses to load the current status of the cluster
//
-jsTest.log( "Loading this status in all mongoses..." );
+jsTest.log("Loading this status in all mongoses...");
-for( var i = 0; i < st._mongos.length; i++ ){
- printjson( st._mongos[i].getDB( "admin" ).runCommand({ flushRouterConfig : 1 }) );
- assert.neq( null, st._mongos[i].getCollection( coll + "" ).findOne() );
+for (var i = 0; i < st._mongos.length; i++) {
+ printjson(st._mongos[i].getDB("admin").runCommand({flushRouterConfig: 1}));
+ assert.neq(null, st._mongos[i].getCollection(coll + "").findOne());
}
//
@@ -66,57 +70,60 @@ for( var i = 0; i < st._mongos.length; i++ ){
// versions are the same, but the split is at a different point.
//
-jsTest.log( "Rebuilding sharded collection with different split..." );
+jsTest.log("Rebuilding sharded collection with different split...");
coll.drop();
-var droppedCollDoc = config.collections.findOne({ _id: coll.getFullName() });
+var droppedCollDoc = config.collections.findOne({_id: coll.getFullName()});
assert(droppedCollDoc != null);
assert.eq(true, droppedCollDoc.dropped);
assert(droppedCollDoc.lastmodEpoch != null);
assert(droppedCollDoc.lastmodEpoch.equals(new ObjectId("000000000000000000000000")),
"epoch not zero: " + droppedCollDoc.lastmodEpoch);
-admin.runCommand({ enableSharding : coll.getDB() + "" });
+admin.runCommand({enableSharding: coll.getDB() + ""});
st.ensurePrimaryShard(coll.getDB().getName(), 'shard0001');
-admin.runCommand({ shardCollection : coll + "", key : { _id : 1 } });
+admin.runCommand({shardCollection: coll + "", key: {_id: 1}});
var bulk = coll.initializeUnorderedBulkOp();
-for( var i = 0; i < 100; i++ ) bulk.insert({ _id : i });
+for (var i = 0; i < 100; i++)
+ bulk.insert({_id: i});
assert.writeOK(bulk.execute());
-printjson( admin.runCommand({ split : coll + "", middle : { _id : 200 } }) );
-printjson( admin.runCommand({ moveChunk : coll + "", find : { _id : 200 },
- to : getOtherShard( config.databases.findOne({ _id : coll.getDB() + "" }).primary ) }) );
+printjson(admin.runCommand({split: coll + "", middle: {_id: 200}}));
+printjson(admin.runCommand({
+ moveChunk: coll + "",
+ find: {_id: 200},
+ to: getOtherShard(config.databases.findOne({_id: coll.getDB() + ""}).primary)
+}));
//
// Make sure all operations on mongoses aren't tricked by the change
-//
-
-jsTest.log( "Checking other mongoses for detection of change..." );
+//
+
+jsTest.log("Checking other mongoses for detection of change...");
-jsTest.log( "Checking find..." );
+jsTest.log("Checking find...");
// Ensure that finding an element works when resharding
-assert.neq( null, readMongos.getCollection( coll + "" ).findOne({ _id : 1 }) );
+assert.neq(null, readMongos.getCollection(coll + "").findOne({_id: 1}));
-jsTest.log( "Checking update...");
+jsTest.log("Checking update...");
// Ensure that updating an element finds the right location
-assert.writeOK(updateMongos.getCollection( coll + "" ).update({ _id : 1 },
- { $set : { updated : true } }));
-assert.neq( null, coll.findOne({ updated : true }) );
+assert.writeOK(updateMongos.getCollection(coll + "").update({_id: 1}, {$set: {updated: true}}));
+assert.neq(null, coll.findOne({updated: true}));
-jsTest.log( "Checking insert..." );
+jsTest.log("Checking insert...");
// Ensure that inserting an element finds the right shard
-assert.writeOK(insertMongos.getCollection( coll + "" ).insert({ _id : 101 }));
-assert.neq( null, coll.findOne({ _id : 101 }) );
+assert.writeOK(insertMongos.getCollection(coll + "").insert({_id: 101}));
+assert.neq(null, coll.findOne({_id: 101}));
-jsTest.log( "Checking remove..." );
+jsTest.log("Checking remove...");
// Ensure that removing an element finds the right shard, verified by the mongos doing the sharding
-assert.writeOK(removeMongos.getCollection( coll + "" ).remove({ _id : 2 }));
-assert.eq( null, coll.findOne({ _id : 2 }) );
+assert.writeOK(removeMongos.getCollection(coll + "").remove({_id: 2}));
+assert.eq(null, coll.findOne({_id: 2}));
coll.drop();
-jsTest.log( "Done!" );
+jsTest.log("Done!");
st.stop();
diff --git a/jstests/sharding/conf_server_write_concern.js b/jstests/sharding/conf_server_write_concern.js
index 500061d4ca1..c4e08939548 100644
--- a/jstests/sharding/conf_server_write_concern.js
+++ b/jstests/sharding/conf_server_write_concern.js
@@ -1,24 +1,21 @@
/**
* Test write concern with w parameter when writing directly to the config servers works as expected
*/
-function writeToConfigTest(){
+function writeToConfigTest() {
jsTestLog("Testing data writes to config server with write concern");
- var st = new ShardingTest({ shards: 2 });
- var confDB = st.s.getDB( 'config' );
+ var st = new ShardingTest({shards: 2});
+ var confDB = st.s.getDB('config');
- assert.writeOK(confDB.settings.update({ _id: 'balancer' },
- { $set: { stopped: true }},
- { writeConcern: { w: 'majority' }}));
+ assert.writeOK(confDB.settings.update(
+ {_id: 'balancer'}, {$set: {stopped: true}}, {writeConcern: {w: 'majority'}}));
// w:1 should still work - it gets automatically upconverted to w:majority
- assert.writeOK(confDB.settings.update({ _id: 'balancer' },
- { $set: { stopped: true }},
- { writeConcern: { w: 1 }}));
+ assert.writeOK(confDB.settings.update(
+ {_id: 'balancer'}, {$set: {stopped: true}}, {writeConcern: {w: 1}}));
// Write concerns other than w:1 and w:majority should fail.
- assert.writeError(confDB.settings.update({ _id: 'balancer' },
- { $set: { stopped: true }},
- { writeConcern: { w: 2 }}));
+ assert.writeError(confDB.settings.update(
+ {_id: 'balancer'}, {$set: {stopped: true}}, {writeConcern: {w: 2}}));
st.stop();
}
@@ -27,40 +24,43 @@ function writeToConfigTest(){
* Test write concern with w parameter will not cause an error when writes to mongos
* would trigger writes to config servers (in this test, split chunks is used).
*/
-function configTest(){
+function configTest() {
jsTestLog("Testing metadata writes to config server with write concern");
- var st = new ShardingTest({ shards: 1, rs: true, other: { chunkSize: 1 }});
-
+ var st = new ShardingTest({shards: 1, rs: true, other: {chunkSize: 1}});
+
var mongos = st.s;
- var testDB = mongos.getDB( 'test' );
+ var testDB = mongos.getDB('test');
var coll = testDB.user;
-
- testDB.adminCommand({ enableSharding: testDB.getName() });
- testDB.adminCommand({ shardCollection: coll.getFullName(), key: { x: 1 }});
-
+
+ testDB.adminCommand({enableSharding: testDB.getName()});
+ testDB.adminCommand({shardCollection: coll.getFullName(), key: {x: 1}});
+
var chunkCount = function() {
- return mongos.getDB( 'config' ).chunks.find().count();
+ return mongos.getDB('config').chunks.find().count();
};
-
+
var initChunks = chunkCount();
var currChunks = initChunks;
var gleObj = null;
var x = 0;
- var largeStr = new Array(1024*128).toString();
+ var largeStr = new Array(1024 * 128).toString();
- assert.soon(function() {
- var bulk = coll.initializeUnorderedBulkOp();
- for (var i = 0; i < 100; i++) {
- bulk.insert({x: x++, largeStr: largeStr});
- }
- assert.writeOK(bulk.execute({w: 'majority', wtimeout: 60 * 1000}));
- currChunks = chunkCount();
- return currChunks > initChunks;
- }, function() { return "currChunks: " + currChunks + ", initChunks: " + initChunks; });
+ assert.soon(
+ function() {
+ var bulk = coll.initializeUnorderedBulkOp();
+ for (var i = 0; i < 100; i++) {
+ bulk.insert({x: x++, largeStr: largeStr});
+ }
+ assert.writeOK(bulk.execute({w: 'majority', wtimeout: 60 * 1000}));
+ currChunks = chunkCount();
+ return currChunks > initChunks;
+ },
+ function() {
+ return "currChunks: " + currChunks + ", initChunks: " + initChunks;
+ });
st.stop();
}
writeToConfigTest();
configTest();
-
diff --git a/jstests/sharding/config_rs_change.js b/jstests/sharding/config_rs_change.js
index ac75751ee91..62e6a8f99e4 100644
--- a/jstests/sharding/config_rs_change.js
+++ b/jstests/sharding/config_rs_change.js
@@ -3,9 +3,7 @@
// of the config replset config during startup.
var configRS = new ReplSetTest({name: "configRS", nodes: 1, useHostName: true});
-configRS.startSet({ configsvr: '',
- journal: "",
- storageEngine: 'wiredTiger' });
+configRS.startSet({configsvr: '', journal: "", storageEngine: 'wiredTiger'});
var replConfig = configRS.getReplSetConfig();
replConfig.configsvr = true;
configRS.initiate(replConfig);
diff --git a/jstests/sharding/config_rs_no_primary.js b/jstests/sharding/config_rs_no_primary.js
index 9fce3421474..11d9a8e41aa 100644
--- a/jstests/sharding/config_rs_no_primary.js
+++ b/jstests/sharding/config_rs_no_primary.js
@@ -1,48 +1,54 @@
// Tests operation of the cluster when the config servers have no primary and thus the cluster
// metadata is in read-only mode.
(function() {
-"use strict";
-
-var st = new ShardingTest({shards: 1,
- other: {c0: {}, // Make sure 1st config server is primary
- c1: {rsConfig: {priority: 0}},
- c2: {rsConfig: {priority: 0}}}});
-
-assert.eq(st.config0, st.configRS.getPrimary());
-
-// Create the "test" database while the cluster metadata is still writeable.
-st.s.getDB('test').foo.insert({a:1});
-
-// Take down two of the config servers so the remaining one goes into SECONDARY state.
-st.configRS.stop(1);
-st.configRS.stop(2);
-st.configRS.awaitNoPrimary();
-
-jsTestLog("Starting a new mongos when the config servers have no primary which should work");
-var mongos2 = MongoRunner.runMongos({configdb: st.configRS.getURL()});
-assert.neq(null, mongos2);
-
-var testOps = function(mongos) {
- jsTestLog("Doing ops that don't require metadata writes and thus should succeed against: " +
- mongos);
- var initialCount = mongos.getDB('test').foo.count();
- assert.writeOK(mongos.getDB('test').foo.insert({a:1}));
- assert.eq(initialCount + 1, mongos.getDB('test').foo.count());
-
- assert.throws(function() {mongos.getDB('config').shards.findOne();});
- mongos.setSlaveOk(true);
- var shardDoc = mongos.getDB('config').shards.findOne();
- mongos.setSlaveOk(false);
- assert.neq(null, shardDoc);
-
- jsTestLog("Doing ops that require metadata writes and thus should fail against: " + mongos);
- assert.writeError(mongos.getDB("newDB").foo.insert({a:1}));
- assert.commandFailed(mongos.getDB('admin').runCommand({shardCollection: "test.foo",
- key: {a:1}}));
-};
-
-testOps(mongos2);
-testOps(st.s);
-
-st.stop();
+ "use strict";
+
+ var st = new ShardingTest({
+ shards: 1,
+ other: {
+ c0: {}, // Make sure 1st config server is primary
+ c1: {rsConfig: {priority: 0}},
+ c2: {rsConfig: {priority: 0}}
+ }
+ });
+
+ assert.eq(st.config0, st.configRS.getPrimary());
+
+ // Create the "test" database while the cluster metadata is still writeable.
+ st.s.getDB('test').foo.insert({a: 1});
+
+ // Take down two of the config servers so the remaining one goes into SECONDARY state.
+ st.configRS.stop(1);
+ st.configRS.stop(2);
+ st.configRS.awaitNoPrimary();
+
+ jsTestLog("Starting a new mongos when the config servers have no primary which should work");
+ var mongos2 = MongoRunner.runMongos({configdb: st.configRS.getURL()});
+ assert.neq(null, mongos2);
+
+ var testOps = function(mongos) {
+ jsTestLog("Doing ops that don't require metadata writes and thus should succeed against: " +
+ mongos);
+ var initialCount = mongos.getDB('test').foo.count();
+ assert.writeOK(mongos.getDB('test').foo.insert({a: 1}));
+ assert.eq(initialCount + 1, mongos.getDB('test').foo.count());
+
+ assert.throws(function() {
+ mongos.getDB('config').shards.findOne();
+ });
+ mongos.setSlaveOk(true);
+ var shardDoc = mongos.getDB('config').shards.findOne();
+ mongos.setSlaveOk(false);
+ assert.neq(null, shardDoc);
+
+ jsTestLog("Doing ops that require metadata writes and thus should fail against: " + mongos);
+ assert.writeError(mongos.getDB("newDB").foo.insert({a: 1}));
+ assert.commandFailed(
+ mongos.getDB('admin').runCommand({shardCollection: "test.foo", key: {a: 1}}));
+ };
+
+ testOps(mongos2);
+ testOps(st.s);
+
+ st.stop();
}()); \ No newline at end of file
diff --git a/jstests/sharding/conn_pool_stats.js b/jstests/sharding/conn_pool_stats.js
index 8cbf8788ae7..872c20602a5 100644
--- a/jstests/sharding/conn_pool_stats.js
+++ b/jstests/sharding/conn_pool_stats.js
@@ -4,7 +4,7 @@
var cluster = new ShardingTest({shards: 2});
// Run the connPoolStats command
-stats = cluster.s.getDB("admin").runCommand({connPoolStats : 1});
+stats = cluster.s.getDB("admin").runCommand({connPoolStats: 1});
// Validate output
printjson(stats);
diff --git a/jstests/sharding/copydb_from_mongos.js b/jstests/sharding/copydb_from_mongos.js
index aa6ac16b465..66db42407ca 100644
--- a/jstests/sharding/copydb_from_mongos.js
+++ b/jstests/sharding/copydb_from_mongos.js
@@ -1,26 +1,22 @@
(function() {
-var st = new ShardingTest({ shards: 1 });
+ var st = new ShardingTest({shards: 1});
-var testDB = st.s.getDB('test');
-assert.writeOK(testDB.foo.insert({ a: 1 }));
+ var testDB = st.s.getDB('test');
+ assert.writeOK(testDB.foo.insert({a: 1}));
-var res = testDB.adminCommand({ copydb: 1,
- fromhost: st.s.host,
- fromdb: 'test',
- todb: 'test_copy' });
-assert.commandWorked(res);
+ var res =
+ testDB.adminCommand({copydb: 1, fromhost: st.s.host, fromdb: 'test', todb: 'test_copy'});
+ assert.commandWorked(res);
-var copy = st.s.getDB('test_copy');
-assert.eq(1, copy.foo.count());
-assert.eq(1, copy.foo.findOne().a);
+ var copy = st.s.getDB('test_copy');
+ assert.eq(1, copy.foo.count());
+ assert.eq(1, copy.foo.findOne().a);
-// Test invalid todb database name.
-assert.commandFailed(testDB.adminCommand({ copydb: 1,
- fromhost: st.s.host,
- fromdb: 'test_copy',
- todb: 'test/copy' }));
+ // Test invalid todb database name.
+ assert.commandFailed(testDB.adminCommand(
+ {copydb: 1, fromhost: st.s.host, fromdb: 'test_copy', todb: 'test/copy'}));
-st.stop();
+ st.stop();
})();
diff --git a/jstests/sharding/count1.js b/jstests/sharding/count1.js
index a79d3ebfdd4..4686d317f6d 100644
--- a/jstests/sharding/count1.js
+++ b/jstests/sharding/count1.js
@@ -1,175 +1,181 @@
(function() {
-var s = new ShardingTest({ name: "count1", shards: 2 });
-var db = s.getDB( "test" );
-
-// ************** Test Set #1 *************
-// Basic counts on "bar" collections, not yet sharded
-
-db.bar.save( { n : 1 } );
-db.bar.save( { n : 2 } );
-db.bar.save( { n : 3 } );
-
-assert.eq( 3 , db.bar.find().count() , "bar 1" );
-assert.eq( 1 , db.bar.find( { n : 1 } ).count() , "bar 2" );
-
-//************** Test Set #2 *************
-// Basic counts on sharded "foo" collection.
-// 1. Create foo collection, insert 6 docs
-// 2. Divide into three chunks
-// 3. Test counts before chunk migrations
-// 4. Manually move chunks. Now each shard should have 3 docs.
-// 5. i. Test basic counts on foo
-// ii. Test counts with limit
-// iii. Test counts with skip
-// iv. Test counts with skip + limit
-// v. Test counts with skip + limit + sorting
-// 6. Insert 10 more docs. Further limit/skip testing with a find query
-// 7. test invalid queries/values
-
-// part 1
-s.adminCommand( { enablesharding : "test" } );
-s.ensurePrimaryShard('test', 'shard0001');
-s.adminCommand( { shardcollection : "test.foo" , key : { name : 1 } } );
-
-primary = s.getPrimaryShard( "test" ).getDB( "test" );
-secondary = s.getOther( primary ).getDB( "test" );
-
-assert.eq( 1 , s.config.chunks.count() , "sanity check A" );
-
-db.foo.save( { _id : 1 , name : "eliot" } );
-db.foo.save( { _id : 2 , name : "sara" } );
-db.foo.save( { _id : 3 , name : "bob" } );
-db.foo.save( { _id : 4 , name : "joe" } );
-db.foo.save( { _id : 5 , name : "mark" } );
-db.foo.save( { _id : 6 , name : "allan" } );
-
-assert.eq( 6 , db.foo.find().count() , "basic count" );
-
-// part 2
-s.adminCommand({ split: "test.foo", middle: { name: "allan" }});
-s.adminCommand({ split: "test.foo", middle: { name: "sara" }});
-s.adminCommand({ split: "test.foo", middle: { name: "eliot" }});
-
-// MINKEY->allan,bob->eliot,joe,mark->sara,MAXKEY
-
-s.printChunks();
-
-// part 3
-assert.eq( 6 , db.foo.find().count() , "basic count after split " );
-assert.eq( 6 , db.foo.find().sort( { name : 1 } ).count() , "basic count after split sorted " );
-
-// part 4
-s.adminCommand( { movechunk : "test.foo" , find : { name : "eliot" } , to : secondary.getMongo().name , _waitForDelete : true } );
-
-assert.eq( 3 , primary.foo.find().toArray().length , "primary count" );
-assert.eq( 3 , secondary.foo.find().toArray().length , "secondary count" );
-assert.eq( 3 , primary.foo.find().sort( { name : 1 } ).toArray().length , "primary count sorted" );
-assert.eq( 3 , secondary.foo.find().sort( { name : 1 } ).toArray().length , "secondary count sorted" );
-
-// part 5
-// Some redundant tests, but better safe than sorry. These are fast tests, anyway.
-
-// i.
-assert.eq( 6 , db.foo.find().count() , "total count after move" );
-assert.eq( 6 , db.foo.find().toArray().length , "total count after move" );
-assert.eq( 6 , db.foo.find().sort( { name : 1 } ).toArray().length , "total count() sorted" );
-assert.eq( 6 , db.foo.find().sort( { name : 1 } ).count() , "total count with count() after move" );
-
-// ii.
-assert.eq( 2 , db.foo.find().limit(2).count(true) );
-assert.eq( 2 , db.foo.find().limit(-2).count(true) );
-assert.eq( 6 , db.foo.find().limit(100).count(true) );
-assert.eq( 6 , db.foo.find().limit(-100).count(true) );
-assert.eq( 6 , db.foo.find().limit(0).count(true) );
-
-// iii.
-assert.eq( 6 , db.foo.find().skip(0).count(true) );
-assert.eq( 5 , db.foo.find().skip(1).count(true) );
-assert.eq( 4 , db.foo.find().skip(2).count(true) );
-assert.eq( 3 , db.foo.find().skip(3).count(true) );
-assert.eq( 2 , db.foo.find().skip(4).count(true) );
-assert.eq( 1 , db.foo.find().skip(5).count(true) );
-assert.eq( 0 , db.foo.find().skip(6).count(true) );
-assert.eq( 0 , db.foo.find().skip(7).count(true) );
-
-// iv.
-assert.eq( 2 , db.foo.find().limit(2).skip(1).count(true) );
-assert.eq( 2 , db.foo.find().limit(-2).skip(1).count(true) );
-assert.eq( 5 , db.foo.find().limit(100).skip(1).count(true) );
-assert.eq( 5 , db.foo.find().limit(-100).skip(1).count(true) );
-assert.eq( 5 , db.foo.find().limit(0).skip(1).count(true) );
-
-assert.eq( 0 , db.foo.find().limit(2).skip(10).count(true) );
-assert.eq( 0 , db.foo.find().limit(-2).skip(10).count(true) );
-assert.eq( 0 , db.foo.find().limit(100).skip(10).count(true) );
-assert.eq( 0 , db.foo.find().limit(-100).skip(10).count(true) );
-assert.eq( 0 , db.foo.find().limit(0).skip(10).count(true) );
-
-assert.eq( 2 , db.foo.find().limit(2).itcount() , "LS1" );
-assert.eq( 2 , db.foo.find().skip(2).limit(2).itcount() , "LS2" );
-assert.eq( 1 , db.foo.find().skip(5).limit(2).itcount() , "LS3" );
-assert.eq( 6 , db.foo.find().limit(2).count() , "LSC1" );
-assert.eq( 2 , db.foo.find().limit(2).size() , "LSC2" );
-assert.eq( 2 , db.foo.find().skip(2).limit(2).size() , "LSC3" );
-assert.eq( 1 , db.foo.find().skip(5).limit(2).size() , "LSC4" );
-assert.eq( 4 , db.foo.find().skip(1).limit(4).size() , "LSC5" );
-assert.eq( 5 , db.foo.find().skip(1).limit(6).size() , "LSC6" );
-
-// SERVER-3567 older negative limit tests
-assert.eq( 2 , db.foo.find().limit(2).itcount() , "N1" );
-assert.eq( 2 , db.foo.find().limit(-2).itcount() , "N2" );
-assert.eq( 2 , db.foo.find().skip(4).limit(2).itcount() , "N3" );
-assert.eq( 2 , db.foo.find().skip(4).limit(-2).itcount() , "N4" );
-
-// v.
-function nameString( c ){
- var s = "";
- while ( c.hasNext() ){
- var o = c.next();
- if ( s.length > 0 )
- s += ",";
- s += o.name;
+ var s = new ShardingTest({name: "count1", shards: 2});
+ var db = s.getDB("test");
+
+ // ************** Test Set #1 *************
+ // Basic counts on "bar" collections, not yet sharded
+
+ db.bar.save({n: 1});
+ db.bar.save({n: 2});
+ db.bar.save({n: 3});
+
+ assert.eq(3, db.bar.find().count(), "bar 1");
+ assert.eq(1, db.bar.find({n: 1}).count(), "bar 2");
+
+ //************** Test Set #2 *************
+ // Basic counts on sharded "foo" collection.
+ // 1. Create foo collection, insert 6 docs
+ // 2. Divide into three chunks
+ // 3. Test counts before chunk migrations
+ // 4. Manually move chunks. Now each shard should have 3 docs.
+ // 5. i. Test basic counts on foo
+ // ii. Test counts with limit
+ // iii. Test counts with skip
+ // iv. Test counts with skip + limit
+ // v. Test counts with skip + limit + sorting
+ // 6. Insert 10 more docs. Further limit/skip testing with a find query
+ // 7. test invalid queries/values
+
+ // part 1
+ s.adminCommand({enablesharding: "test"});
+ s.ensurePrimaryShard('test', 'shard0001');
+ s.adminCommand({shardcollection: "test.foo", key: {name: 1}});
+
+ primary = s.getPrimaryShard("test").getDB("test");
+ secondary = s.getOther(primary).getDB("test");
+
+ assert.eq(1, s.config.chunks.count(), "sanity check A");
+
+ db.foo.save({_id: 1, name: "eliot"});
+ db.foo.save({_id: 2, name: "sara"});
+ db.foo.save({_id: 3, name: "bob"});
+ db.foo.save({_id: 4, name: "joe"});
+ db.foo.save({_id: 5, name: "mark"});
+ db.foo.save({_id: 6, name: "allan"});
+
+ assert.eq(6, db.foo.find().count(), "basic count");
+
+ // part 2
+ s.adminCommand({split: "test.foo", middle: {name: "allan"}});
+ s.adminCommand({split: "test.foo", middle: {name: "sara"}});
+ s.adminCommand({split: "test.foo", middle: {name: "eliot"}});
+
+ // MINKEY->allan,bob->eliot,joe,mark->sara,MAXKEY
+
+ s.printChunks();
+
+ // part 3
+ assert.eq(6, db.foo.find().count(), "basic count after split ");
+ assert.eq(6, db.foo.find().sort({name: 1}).count(), "basic count after split sorted ");
+
+ // part 4
+ s.adminCommand({
+ movechunk: "test.foo",
+ find: {name: "eliot"},
+ to: secondary.getMongo().name,
+ _waitForDelete: true
+ });
+
+ assert.eq(3, primary.foo.find().toArray().length, "primary count");
+ assert.eq(3, secondary.foo.find().toArray().length, "secondary count");
+ assert.eq(3, primary.foo.find().sort({name: 1}).toArray().length, "primary count sorted");
+ assert.eq(3, secondary.foo.find().sort({name: 1}).toArray().length, "secondary count sorted");
+
+ // part 5
+ // Some redundant tests, but better safe than sorry. These are fast tests, anyway.
+
+ // i.
+ assert.eq(6, db.foo.find().count(), "total count after move");
+ assert.eq(6, db.foo.find().toArray().length, "total count after move");
+ assert.eq(6, db.foo.find().sort({name: 1}).toArray().length, "total count() sorted");
+ assert.eq(6, db.foo.find().sort({name: 1}).count(), "total count with count() after move");
+
+ // ii.
+ assert.eq(2, db.foo.find().limit(2).count(true));
+ assert.eq(2, db.foo.find().limit(-2).count(true));
+ assert.eq(6, db.foo.find().limit(100).count(true));
+ assert.eq(6, db.foo.find().limit(-100).count(true));
+ assert.eq(6, db.foo.find().limit(0).count(true));
+
+ // iii.
+ assert.eq(6, db.foo.find().skip(0).count(true));
+ assert.eq(5, db.foo.find().skip(1).count(true));
+ assert.eq(4, db.foo.find().skip(2).count(true));
+ assert.eq(3, db.foo.find().skip(3).count(true));
+ assert.eq(2, db.foo.find().skip(4).count(true));
+ assert.eq(1, db.foo.find().skip(5).count(true));
+ assert.eq(0, db.foo.find().skip(6).count(true));
+ assert.eq(0, db.foo.find().skip(7).count(true));
+
+ // iv.
+ assert.eq(2, db.foo.find().limit(2).skip(1).count(true));
+ assert.eq(2, db.foo.find().limit(-2).skip(1).count(true));
+ assert.eq(5, db.foo.find().limit(100).skip(1).count(true));
+ assert.eq(5, db.foo.find().limit(-100).skip(1).count(true));
+ assert.eq(5, db.foo.find().limit(0).skip(1).count(true));
+
+ assert.eq(0, db.foo.find().limit(2).skip(10).count(true));
+ assert.eq(0, db.foo.find().limit(-2).skip(10).count(true));
+ assert.eq(0, db.foo.find().limit(100).skip(10).count(true));
+ assert.eq(0, db.foo.find().limit(-100).skip(10).count(true));
+ assert.eq(0, db.foo.find().limit(0).skip(10).count(true));
+
+ assert.eq(2, db.foo.find().limit(2).itcount(), "LS1");
+ assert.eq(2, db.foo.find().skip(2).limit(2).itcount(), "LS2");
+ assert.eq(1, db.foo.find().skip(5).limit(2).itcount(), "LS3");
+ assert.eq(6, db.foo.find().limit(2).count(), "LSC1");
+ assert.eq(2, db.foo.find().limit(2).size(), "LSC2");
+ assert.eq(2, db.foo.find().skip(2).limit(2).size(), "LSC3");
+ assert.eq(1, db.foo.find().skip(5).limit(2).size(), "LSC4");
+ assert.eq(4, db.foo.find().skip(1).limit(4).size(), "LSC5");
+ assert.eq(5, db.foo.find().skip(1).limit(6).size(), "LSC6");
+
+ // SERVER-3567 older negative limit tests
+ assert.eq(2, db.foo.find().limit(2).itcount(), "N1");
+ assert.eq(2, db.foo.find().limit(-2).itcount(), "N2");
+ assert.eq(2, db.foo.find().skip(4).limit(2).itcount(), "N3");
+ assert.eq(2, db.foo.find().skip(4).limit(-2).itcount(), "N4");
+
+ // v.
+ function nameString(c) {
+ var s = "";
+ while (c.hasNext()) {
+ var o = c.next();
+ if (s.length > 0)
+ s += ",";
+ s += o.name;
+ }
+ return s;
}
- return s;
-}
-assert.eq( "allan,bob,eliot,joe,mark,sara" , nameString( db.foo.find().sort( { name : 1 } ) ) , "sort 1" );
-assert.eq( "sara,mark,joe,eliot,bob,allan" , nameString( db.foo.find().sort( { name : -1 } ) ) , "sort 2" );
-
-assert.eq( "allan,bob" , nameString( db.foo.find().sort( { name : 1 } ).limit(2) ) , "LSD1" );
-assert.eq( "bob,eliot" , nameString( db.foo.find().sort( { name : 1 } ).skip(1).limit(2) ) , "LSD2" );
-assert.eq( "joe,mark" , nameString( db.foo.find().sort( { name : 1 } ).skip(3).limit(2) ) , "LSD3" );
-
-assert.eq( "eliot,sara" , nameString( db.foo.find().sort( { _id : 1 } ).limit(2) ) , "LSE1" );
-assert.eq( "sara,bob" , nameString( db.foo.find().sort( { _id : 1 } ).skip(1).limit(2) ) , "LSE2" );
-assert.eq( "joe,mark" , nameString( db.foo.find().sort( { _id : 1 } ).skip(3).limit(2) ) , "LSE3" );
-
-// part 6
-for ( i=0; i<10; i++ ){
- db.foo.save( { _id : 7 + i , name : "zzz" + i } );
-}
-
-assert.eq( 10 , db.foo.find( { name : { $gt : "z" } } ).itcount() , "LSF1" );
-assert.eq( 10 , db.foo.find( { name : { $gt : "z" } } ).sort( { _id : 1 } ).itcount() , "LSF2" );
-assert.eq( 5 , db.foo.find( { name : { $gt : "z" } } ).sort( { _id : 1 } ).skip(5).itcount() , "LSF3" );
-assert.eq( 3 , db.foo.find( { name : { $gt : "z" } } ).sort( { _id : 1 } ).skip(5).limit(3).itcount() , "LSF4" );
-
-// part 7
-// Make sure count command returns error for invalid queries
-var badCmdResult = db.runCommand({ count: 'foo', query: { $c: { $abc: 3 }}});
-assert( ! badCmdResult.ok , "invalid query syntax didn't return error" );
-assert( badCmdResult.errmsg.length > 0 , "no error msg for invalid query" );
-
-// Negative skip values should return error
-var negSkipResult = db.runCommand({ count: 'foo', skip : -2 });
-assert( ! negSkipResult.ok , "negative skip value shouldn't work" );
-assert( negSkipResult.errmsg.length > 0 , "no error msg for negative skip" );
-
-// Negative skip values with positive limit should return error
-var negSkipLimitResult = db.runCommand({ count: 'foo', skip : -2, limit : 1 });
-assert( ! negSkipLimitResult.ok , "negative skip value with limit shouldn't work" );
-assert( negSkipLimitResult.errmsg.length > 0 , "no error msg for negative skip" );
-
-s.stop();
+ assert.eq("allan,bob,eliot,joe,mark,sara", nameString(db.foo.find().sort({name: 1})), "sort 1");
+ assert.eq(
+ "sara,mark,joe,eliot,bob,allan", nameString(db.foo.find().sort({name: -1})), "sort 2");
+
+ assert.eq("allan,bob", nameString(db.foo.find().sort({name: 1}).limit(2)), "LSD1");
+ assert.eq("bob,eliot", nameString(db.foo.find().sort({name: 1}).skip(1).limit(2)), "LSD2");
+ assert.eq("joe,mark", nameString(db.foo.find().sort({name: 1}).skip(3).limit(2)), "LSD3");
+
+ assert.eq("eliot,sara", nameString(db.foo.find().sort({_id: 1}).limit(2)), "LSE1");
+ assert.eq("sara,bob", nameString(db.foo.find().sort({_id: 1}).skip(1).limit(2)), "LSE2");
+ assert.eq("joe,mark", nameString(db.foo.find().sort({_id: 1}).skip(3).limit(2)), "LSE3");
+
+ // part 6
+ for (i = 0; i < 10; i++) {
+ db.foo.save({_id: 7 + i, name: "zzz" + i});
+ }
+
+ assert.eq(10, db.foo.find({name: {$gt: "z"}}).itcount(), "LSF1");
+ assert.eq(10, db.foo.find({name: {$gt: "z"}}).sort({_id: 1}).itcount(), "LSF2");
+ assert.eq(5, db.foo.find({name: {$gt: "z"}}).sort({_id: 1}).skip(5).itcount(), "LSF3");
+ assert.eq(3, db.foo.find({name: {$gt: "z"}}).sort({_id: 1}).skip(5).limit(3).itcount(), "LSF4");
+
+ // part 7
+ // Make sure count command returns error for invalid queries
+ var badCmdResult = db.runCommand({count: 'foo', query: {$c: {$abc: 3}}});
+ assert(!badCmdResult.ok, "invalid query syntax didn't return error");
+ assert(badCmdResult.errmsg.length > 0, "no error msg for invalid query");
+
+ // Negative skip values should return error
+ var negSkipResult = db.runCommand({count: 'foo', skip: -2});
+ assert(!negSkipResult.ok, "negative skip value shouldn't work");
+ assert(negSkipResult.errmsg.length > 0, "no error msg for negative skip");
+
+ // Negative skip values with positive limit should return error
+ var negSkipLimitResult = db.runCommand({count: 'foo', skip: -2, limit: 1});
+ assert(!negSkipLimitResult.ok, "negative skip value with limit shouldn't work");
+ assert(negSkipLimitResult.errmsg.length > 0, "no error msg for negative skip");
+
+ s.stop();
})();
diff --git a/jstests/sharding/count2.js b/jstests/sharding/count2.js
index cbef67a2db9..8b1346fd0d4 100644
--- a/jstests/sharding/count2.js
+++ b/jstests/sharding/count2.js
@@ -1,56 +1,56 @@
(function() {
-var s1 = new ShardingTest({ name: "count2",
- shards: 2,
- mongos: 2 });
-var s2 = s1._mongos[1];
+ var s1 = new ShardingTest({name: "count2", shards: 2, mongos: 2});
+ var s2 = s1._mongos[1];
-s1.adminCommand( { enablesharding: "test" } );
-s1.ensurePrimaryShard('test', 'shard0001');
-s1.adminCommand( { shardcollection: "test.foo" , key : { name : 1 } } );
+ s1.adminCommand({enablesharding: "test"});
+ s1.ensurePrimaryShard('test', 'shard0001');
+ s1.adminCommand({shardcollection: "test.foo", key: {name: 1}});
-var db1 = s1.getDB( "test" ).foo;
-var db2 = s2.getDB( "test" ).foo;
+ var db1 = s1.getDB("test").foo;
+ var db2 = s2.getDB("test").foo;
-assert.eq( 1, s1.config.chunks.count(), "sanity check A");
+ assert.eq(1, s1.config.chunks.count(), "sanity check A");
-db1.save( { name : "aaa" } );
-db1.save( { name : "bbb" } );
-db1.save( { name : "ccc" } );
-db1.save( { name : "ddd" } );
-db1.save( { name : "eee" } );
-db1.save( { name : "fff" } );
+ db1.save({name: "aaa"});
+ db1.save({name: "bbb"});
+ db1.save({name: "ccc"});
+ db1.save({name: "ddd"});
+ db1.save({name: "eee"});
+ db1.save({name: "fff"});
-s1.adminCommand( { split : "test.foo" , middle : { name : "ddd" } } );
+ s1.adminCommand({split: "test.foo", middle: {name: "ddd"}});
-assert.eq( 3, db1.count( { name : { $gte: "aaa" , $lt: "ddd" } } ) , "initial count mongos1" );
-assert.eq( 3, db2.count( { name : { $gte: "aaa" , $lt: "ddd" } } ) , "initial count mongos2" );
+ assert.eq(3, db1.count({name: {$gte: "aaa", $lt: "ddd"}}), "initial count mongos1");
+ assert.eq(3, db2.count({name: {$gte: "aaa", $lt: "ddd"}}), "initial count mongos2");
-s1.printChunks( "test.foo" );
+ s1.printChunks("test.foo");
-s1.adminCommand( { movechunk : "test.foo",
- find : { name : "aaa" },
- to : s1.getOther( s1.getPrimaryShard( "test" ) ).name,
- _waitForDelete : true });
+ s1.adminCommand({
+ movechunk: "test.foo",
+ find: {name: "aaa"},
+ to: s1.getOther(s1.getPrimaryShard("test")).name,
+ _waitForDelete: true
+ });
-assert.eq( 3, db1.count( { name : { $gte: "aaa" , $lt: "ddd" } } ) , "post count mongos1" );
+ assert.eq(3, db1.count({name: {$gte: "aaa", $lt: "ddd"}}), "post count mongos1");
-// The second mongos still thinks its shard mapping is valid and accepts a cound
-print( "before sleep: " + Date() );
-sleep( 2000 );
-print( "after sleep: " + Date() );
-s1.printChunks( "test.foo" );
-assert.eq( 3, db2.find( { name : { $gte: "aaa" , $lt: "ddd" } } ).count() , "post count mongos2" );
+ // The second mongos still thinks its shard mapping is valid and accepts a cound
+ print("before sleep: " + Date());
+ sleep(2000);
+ print("after sleep: " + Date());
+ s1.printChunks("test.foo");
+ assert.eq(3, db2.find({name: {$gte: "aaa", $lt: "ddd"}}).count(), "post count mongos2");
-db2.findOne();
+ db2.findOne();
-assert.eq( 3, db2.count( { name : { $gte: "aaa" , $lt: "ddd" } } ) );
+ assert.eq(3, db2.count({name: {$gte: "aaa", $lt: "ddd"}}));
-assert.eq( 4, db2.find().limit( 4 ).count( true ));
-assert.eq( 4, db2.find().limit( -4 ).count( true ));
-assert.eq( 6, db2.find().limit( 0 ).count( true ));
-assert.eq( 6, db2.getDB().runCommand({ count: db2.getName(), limit: 0 }).n );
+ assert.eq(4, db2.find().limit(4).count(true));
+ assert.eq(4, db2.find().limit(-4).count(true));
+ assert.eq(6, db2.find().limit(0).count(true));
+ assert.eq(6, db2.getDB().runCommand({count: db2.getName(), limit: 0}).n);
-s1.stop();
+ s1.stop();
})();
diff --git a/jstests/sharding/count_config_servers.js b/jstests/sharding/count_config_servers.js
index 86517073336..ed8bf19cf10 100644
--- a/jstests/sharding/count_config_servers.js
+++ b/jstests/sharding/count_config_servers.js
@@ -3,62 +3,62 @@
* This test fails when run with authentication due to SERVER-6327
*/
(function() {
-"use strict";
+ "use strict";
-var st = new ShardingTest({name: 'sync_conn_cmd', shards: 0});
-st.s.setSlaveOk(true);
+ var st = new ShardingTest({name: 'sync_conn_cmd', shards: 0});
+ st.s.setSlaveOk(true);
-var configDB = st.config;
-var coll = configDB.test;
+ var configDB = st.config;
+ var coll = configDB.test;
-for( var x = 0; x < 10; x++ ){
- assert.writeOK(coll.insert({ v: x }));
-}
+ for (var x = 0; x < 10; x++) {
+ assert.writeOK(coll.insert({v: x}));
+ }
-if (st.configRS) {
- // Make sure the inserts are replicated to all config servers.
- st.configRS.awaitReplication();
-}
+ if (st.configRS) {
+ // Make sure the inserts are replicated to all config servers.
+ st.configRS.awaitReplication();
+ }
-var testNormalCount = function(){
- var cmdRes = configDB.runCommand({ count: coll.getName() });
- assert( cmdRes.ok );
- assert.eq( 10, cmdRes.n );
-};
+ var testNormalCount = function() {
+ var cmdRes = configDB.runCommand({count: coll.getName()});
+ assert(cmdRes.ok);
+ assert.eq(10, cmdRes.n);
+ };
-var testCountWithQuery = function(){
- var cmdRes = configDB.runCommand({ count: coll.getName(), query: { v: { $gt: 6 }}});
- assert( cmdRes.ok );
- assert.eq( 3, cmdRes.n );
-};
+ var testCountWithQuery = function() {
+ var cmdRes = configDB.runCommand({count: coll.getName(), query: {v: {$gt: 6}}});
+ assert(cmdRes.ok);
+ assert.eq(3, cmdRes.n);
+ };
-// Use invalid query operator to make the count return error
-var testInvalidCount = function(){
- var cmdRes = configDB.runCommand({ count: coll.getName(), query: { $c: { $abc: 3 }}});
- assert( !cmdRes.ok );
- assert( cmdRes.errmsg.length > 0 );
-};
+ // Use invalid query operator to make the count return error
+ var testInvalidCount = function() {
+ var cmdRes = configDB.runCommand({count: coll.getName(), query: {$c: {$abc: 3}}});
+ assert(!cmdRes.ok);
+ assert(cmdRes.errmsg.length > 0);
+ };
-// Test with all config servers up
-testNormalCount();
-testCountWithQuery();
-testInvalidCount();
+ // Test with all config servers up
+ testNormalCount();
+ testCountWithQuery();
+ testInvalidCount();
-// Test with the first config server down
-MongoRunner.stopMongod(st.c0);
+ // Test with the first config server down
+ MongoRunner.stopMongod(st.c0);
-testNormalCount();
-testCountWithQuery();
-testInvalidCount();
+ testNormalCount();
+ testCountWithQuery();
+ testInvalidCount();
-// Test with the first and second config server down
-MongoRunner.stopMongod(st.c1);
-jsTest.log( 'Second server is down' );
+ // Test with the first and second config server down
+ MongoRunner.stopMongod(st.c1);
+ jsTest.log('Second server is down');
-testNormalCount();
-testCountWithQuery();
-testInvalidCount();
+ testNormalCount();
+ testCountWithQuery();
+ testInvalidCount();
-st.stop();
+ st.stop();
}());
diff --git a/jstests/sharding/count_slaveok.js b/jstests/sharding/count_slaveok.js
index eaf39a18352..70f0d7091d9 100644
--- a/jstests/sharding/count_slaveok.js
+++ b/jstests/sharding/count_slaveok.js
@@ -1,74 +1,70 @@
// Tests count and distinct using slaveOk. Also tests a scenario querying a set where only one
// secondary is up.
(function() {
-'use strict';
-
-var st = new ShardingTest({ name: "countSlaveOk",
- shards: 1,
- mongos: 1,
- other: { rs: true,
- rs0: { nodes: 2 } } });
-
-var rst = st._rs[0].test;
-
-// Insert data into replica set
-var conn = new Mongo(st.s.host);
-conn.setLogLevel(3);
-
-var coll = conn.getCollection('test.countSlaveOk');
-coll.drop();
-
-var bulk = coll.initializeUnorderedBulkOp();
-for( var i = 0; i < 300; i++ ){
- bulk.insert({ i: i % 10 });
-}
-assert.writeOK(bulk.execute());
-
-var connA = conn;
-var connB = new Mongo( st.s.host );
-var connC = new Mongo( st.s.host );
-
-st.printShardingStatus();
-
-// Wait for client to update itself and replication to finish
-rst.awaitReplication();
-
-var primary = rst.getPrimary();
-var sec = rst.getSecondary();
-
-// Data now inserted... stop the master, since only two in set, other will still be secondary
-rst.stop(rst.getPrimary());
-printjson( rst.status() );
-
-// Wait for the mongos to recognize the slave
-ReplSetTest.awaitRSClientHosts( conn, sec, { ok : true, secondary : true } );
-
-// Make sure that mongos realizes that primary is already down
-ReplSetTest.awaitRSClientHosts( conn, primary, { ok : false });
-
-// Need to check slaveOk=true first, since slaveOk=false will destroy conn in pool when
-// master is down
-conn.setSlaveOk();
-
-// count using the command path
-assert.eq( 30, coll.find({ i : 0 }).count() );
-// count using the query path
-assert.eq( 30, coll.find({ i : 0 }).itcount() );
-assert.eq( 10, coll.distinct("i").length );
-
-try {
- conn.setSlaveOk( false );
- // Should throw exception, since not slaveOk'd
- coll.find({ i : 0 }).count();
-
- print( "Should not reach here!" );
- assert( false );
-
-}
-catch( e ){
- print( "Non-slaveOk'd connection failed." );
-}
-
-st.stop();
+ 'use strict';
+
+ var st = new ShardingTest(
+ {name: "countSlaveOk", shards: 1, mongos: 1, other: {rs: true, rs0: {nodes: 2}}});
+
+ var rst = st._rs[0].test;
+
+ // Insert data into replica set
+ var conn = new Mongo(st.s.host);
+ conn.setLogLevel(3);
+
+ var coll = conn.getCollection('test.countSlaveOk');
+ coll.drop();
+
+ var bulk = coll.initializeUnorderedBulkOp();
+ for (var i = 0; i < 300; i++) {
+ bulk.insert({i: i % 10});
+ }
+ assert.writeOK(bulk.execute());
+
+ var connA = conn;
+ var connB = new Mongo(st.s.host);
+ var connC = new Mongo(st.s.host);
+
+ st.printShardingStatus();
+
+ // Wait for client to update itself and replication to finish
+ rst.awaitReplication();
+
+ var primary = rst.getPrimary();
+ var sec = rst.getSecondary();
+
+ // Data now inserted... stop the master, since only two in set, other will still be secondary
+ rst.stop(rst.getPrimary());
+ printjson(rst.status());
+
+ // Wait for the mongos to recognize the slave
+ ReplSetTest.awaitRSClientHosts(conn, sec, {ok: true, secondary: true});
+
+ // Make sure that mongos realizes that primary is already down
+ ReplSetTest.awaitRSClientHosts(conn, primary, {ok: false});
+
+ // Need to check slaveOk=true first, since slaveOk=false will destroy conn in pool when
+ // master is down
+ conn.setSlaveOk();
+
+ // count using the command path
+ assert.eq(30, coll.find({i: 0}).count());
+ // count using the query path
+ assert.eq(30, coll.find({i: 0}).itcount());
+ assert.eq(10, coll.distinct("i").length);
+
+ try {
+ conn.setSlaveOk(false);
+ // Should throw exception, since not slaveOk'd
+ coll.find({i: 0}).count();
+
+ print("Should not reach here!");
+ assert(false);
+
+ } catch (e) {
+ print("Non-slaveOk'd connection failed.");
+ }
+
+ st.stop();
})();
diff --git a/jstests/sharding/covered_shard_key_indexes.js b/jstests/sharding/covered_shard_key_indexes.js
index e5cd1ce93b0..307dc241d9f 100644
--- a/jstests/sharding/covered_shard_key_indexes.js
+++ b/jstests/sharding/covered_shard_key_indexes.js
@@ -6,147 +6,155 @@
// Include helpers for analyzing explain output.
load("jstests/libs/analyze_plan.js");
-var st = new ShardingTest({ shards : 1 });
+var st = new ShardingTest({shards: 1});
var mongos = st.s0;
-var admin = mongos.getDB( "admin" );
-var shards = mongos.getCollection( "config.shards" ).find().toArray();
-var coll = mongos.getCollection( "foo.bar" );
+var admin = mongos.getDB("admin");
+var shards = mongos.getCollection("config.shards").find().toArray();
+var coll = mongos.getCollection("foo.bar");
//
//
// Tests with _id : 1 shard key
-assert(admin.runCommand({ enableSharding : coll.getDB() + "" }).ok);
-printjson(admin.runCommand({ movePrimary : coll.getDB() + "", to : shards[0]._id }));
-assert(admin.runCommand({ shardCollection : coll + "", key : { _id : 1 }}).ok);
+assert(admin.runCommand({enableSharding: coll.getDB() + ""}).ok);
+printjson(admin.runCommand({movePrimary: coll.getDB() + "", to: shards[0]._id}));
+assert(admin.runCommand({shardCollection: coll + "", key: {_id: 1}}).ok);
st.printShardingStatus();
// Insert some data
-assert.writeOK(coll.insert({ _id : true, a : true, b : true }));
+assert.writeOK(coll.insert({_id: true, a: true, b: true}));
-assert.commandWorked(st.shard0.adminCommand({ setParameter: 1,
- logComponentVerbosity: { query: { verbosity: 5 }}}));
+assert.commandWorked(
+ st.shard0.adminCommand({setParameter: 1, logComponentVerbosity: {query: {verbosity: 5}}}));
//
// Index without shard key query - not covered
-assert.commandWorked(coll.ensureIndex({ a : 1 }));
-assert.eq(1, coll.find({ a : true }).explain(true).executionStats.totalDocsExamined);
-assert.eq(1, coll.find({ a : true }, { _id : 1, a : 1 })
- .explain(true).executionStats.totalDocsExamined);
+assert.commandWorked(coll.ensureIndex({a: 1}));
+assert.eq(1, coll.find({a: true}).explain(true).executionStats.totalDocsExamined);
+assert.eq(1, coll.find({a: true}, {_id: 1, a: 1}).explain(true).executionStats.totalDocsExamined);
//
// Index with shard key query - covered when projecting
assert.commandWorked(coll.dropIndexes());
-assert.commandWorked(coll.ensureIndex({ a : 1, _id : 1 }));
-assert.eq(1, coll.find({ a : true }).explain(true).executionStats.totalDocsExamined);
-assert.eq(0, coll.find({ a : true }, { _id : 1, a : 1 })
- .explain(true).executionStats.totalDocsExamined);
+assert.commandWorked(coll.ensureIndex({a: 1, _id: 1}));
+assert.eq(1, coll.find({a: true}).explain(true).executionStats.totalDocsExamined);
+assert.eq(0, coll.find({a: true}, {_id: 1, a: 1}).explain(true).executionStats.totalDocsExamined);
//
// Compound index with shard key query - covered when projecting
assert.commandWorked(coll.dropIndexes());
-assert.commandWorked(coll.ensureIndex({ a : 1, b : 1, _id : 1 }));
-assert.eq(1, coll.find({ a : true, b : true }).explain(true).executionStats.totalDocsExamined);
-assert.eq(0, coll.find({ a : true, b : true }, { _id : 1, a : 1 })
- .explain(true).executionStats.totalDocsExamined);
+assert.commandWorked(coll.ensureIndex({a: 1, b: 1, _id: 1}));
+assert.eq(1, coll.find({a: true, b: true}).explain(true).executionStats.totalDocsExamined);
+assert.eq(0,
+ coll.find({a: true, b: true}, {_id: 1, a: 1})
+ .explain(true)
+ .executionStats.totalDocsExamined);
//
//
// Tests with _id : hashed shard key
coll.drop();
-assert(admin.runCommand({ shardCollection : coll + "", key : { _id : "hashed" }}).ok);
+assert(admin.runCommand({shardCollection: coll + "", key: {_id: "hashed"}}).ok);
st.printShardingStatus();
// Insert some data
-assert.writeOK(coll.insert({ _id : true, a : true, b : true }));
+assert.writeOK(coll.insert({_id: true, a: true, b: true}));
//
// Index without shard key query - not covered
-assert.commandWorked(coll.ensureIndex({ a : 1 }));
-assert.eq(1, coll.find({ a : true }).explain(true).executionStats.totalDocsExamined);
-assert.eq(1, coll.find({ a : true }, { _id : 0, a : 1 })
- .explain(true).executionStats.totalDocsExamined);
+assert.commandWorked(coll.ensureIndex({a: 1}));
+assert.eq(1, coll.find({a: true}).explain(true).executionStats.totalDocsExamined);
+assert.eq(1, coll.find({a: true}, {_id: 0, a: 1}).explain(true).executionStats.totalDocsExamined);
//
// Index with shard key query - can't be covered since hashed index
-assert.commandWorked(coll.dropIndex({ a : 1 }));
-assert.eq(1, coll.find({ _id : true }).explain(true).executionStats.totalDocsExamined);
-assert.eq(1, coll.find({ _id : true }, { _id : 0 })
- .explain(true).executionStats.totalDocsExamined);
+assert.commandWorked(coll.dropIndex({a: 1}));
+assert.eq(1, coll.find({_id: true}).explain(true).executionStats.totalDocsExamined);
+assert.eq(1, coll.find({_id: true}, {_id: 0}).explain(true).executionStats.totalDocsExamined);
//
//
// Tests with compound shard key
coll.drop();
-assert(admin.runCommand({ shardCollection : coll + "", key : { a : 1, b : 1 }}).ok);
+assert(admin.runCommand({shardCollection: coll + "", key: {a: 1, b: 1}}).ok);
st.printShardingStatus();
// Insert some data
-assert.writeOK(coll.insert({ _id : true, a : true, b : true, c : true, d : true }));
+assert.writeOK(coll.insert({_id: true, a: true, b: true, c: true, d: true}));
//
// Index without shard key query - not covered
-assert.commandWorked(coll.ensureIndex({ c : 1 }));
-assert.eq(1, coll.find({ c : true }).explain(true).executionStats.totalDocsExamined);
-assert.eq(1, coll.find({ c : true }, { _id : 0, a : 1, b : 1, c : 1 })
- .explain(true).executionStats.totalDocsExamined);
+assert.commandWorked(coll.ensureIndex({c: 1}));
+assert.eq(1, coll.find({c: true}).explain(true).executionStats.totalDocsExamined);
+assert.eq(1,
+ coll.find({c: true}, {_id: 0, a: 1, b: 1, c: 1})
+ .explain(true)
+ .executionStats.totalDocsExamined);
//
// Index with shard key query - covered when projecting
-assert.commandWorked(coll.dropIndex({ c : 1 }));
-assert.commandWorked(coll.ensureIndex({ c : 1, b : 1, a : 1 }));
-assert.eq(1, coll.find({ c : true }).explain(true).executionStats.totalDocsExamined);
-assert.eq(0, coll.find({ c : true }, { _id : 0, a : 1, b : 1, c : 1 })
- .explain(true).executionStats.totalDocsExamined);
+assert.commandWorked(coll.dropIndex({c: 1}));
+assert.commandWorked(coll.ensureIndex({c: 1, b: 1, a: 1}));
+assert.eq(1, coll.find({c: true}).explain(true).executionStats.totalDocsExamined);
+assert.eq(0,
+ coll.find({c: true}, {_id: 0, a: 1, b: 1, c: 1})
+ .explain(true)
+ .executionStats.totalDocsExamined);
//
// Compound index with shard key query - covered when projecting
-assert.commandWorked(coll.dropIndex({ c : 1, b : 1, a : 1 }));
-assert.commandWorked(coll.ensureIndex({ c : 1, d : 1, a : 1, b : 1, _id : 1 }));
-assert.eq(1, coll.find({ c : true, d : true }).explain(true).executionStats.totalDocsExamined);
-assert.eq(0, coll.find({ c : true, d : true }, { a : 1, b : 1, c : 1, d : 1 })
- .explain(true).executionStats.totalDocsExamined);
+assert.commandWorked(coll.dropIndex({c: 1, b: 1, a: 1}));
+assert.commandWorked(coll.ensureIndex({c: 1, d: 1, a: 1, b: 1, _id: 1}));
+assert.eq(1, coll.find({c: true, d: true}).explain(true).executionStats.totalDocsExamined);
+assert.eq(0,
+ coll.find({c: true, d: true}, {a: 1, b: 1, c: 1, d: 1})
+ .explain(true)
+ .executionStats.totalDocsExamined);
//
//
// Tests with nested shard key
coll.drop();
-assert(admin.runCommand({ shardCollection : coll + "", key : { 'a.b' : 1 }}).ok);
+assert(admin.runCommand({shardCollection: coll + "", key: {'a.b': 1}}).ok);
st.printShardingStatus();
// Insert some data
-assert.writeOK(coll.insert({ _id : true, a : { b : true }, c : true }));
+assert.writeOK(coll.insert({_id: true, a: {b: true}, c: true}));
//
// Index without shard key query - not covered
-assert.commandWorked(coll.ensureIndex({ c : 1 }));
-assert.eq(1, coll.find({ c : true }).explain(true).executionStats.totalDocsExamined);
-assert.eq(1, coll.find({ c : true }, { _id : 0, 'a.b' : 1, c : 1 })
- .explain(true).executionStats.totalDocsExamined);
+assert.commandWorked(coll.ensureIndex({c: 1}));
+assert.eq(1, coll.find({c: true}).explain(true).executionStats.totalDocsExamined);
+assert.eq(1,
+ coll.find({c: true}, {_id: 0, 'a.b': 1, c: 1})
+ .explain(true)
+ .executionStats.totalDocsExamined);
//
// Index with shard key query - nested query not covered even when projecting
-assert.commandWorked(coll.dropIndex({ c : 1 }));
-assert.commandWorked(coll.ensureIndex({ c : 1, 'a.b' : 1 }));
-assert.eq(1, coll.find({ c : true }).explain(true).executionStats.totalDocsExamined);
-assert.eq(1, coll.find({ c : true }, { _id : 0, 'a.b' : 1, c : 1 })
- .explain(true).executionStats.totalDocsExamined);
+assert.commandWorked(coll.dropIndex({c: 1}));
+assert.commandWorked(coll.ensureIndex({c: 1, 'a.b': 1}));
+assert.eq(1, coll.find({c: true}).explain(true).executionStats.totalDocsExamined);
+assert.eq(1,
+ coll.find({c: true}, {_id: 0, 'a.b': 1, c: 1})
+ .explain(true)
+ .executionStats.totalDocsExamined);
//
//
// Tests with bad data with no shard key
coll.drop();
-assert(admin.runCommand({ shardCollection : coll + "", key : { a : 1 }}).ok);
+assert(admin.runCommand({shardCollection: coll + "", key: {a: 1}}).ok);
st.printShardingStatus();
// Insert some bad data manually
-assert.writeOK(st.shard0.getCollection(coll.toString()).insert({ _id : "bad data", c : true }));
+assert.writeOK(st.shard0.getCollection(coll.toString()).insert({_id: "bad data", c: true}));
//
// Index without shard key query - not covered but succeeds
-assert.commandWorked(coll.ensureIndex({ c : 1 }));
-var explain = coll.find({ c : true }).explain(true).executionStats;
+assert.commandWorked(coll.ensureIndex({c: 1}));
+var explain = coll.find({c: true}).explain(true).executionStats;
assert.eq(0, explain.nReturned);
assert.eq(1, explain.totalDocsExamined);
assert.eq(1, getChunkSkips(explain.executionStages.shards[0].executionStages));
@@ -155,9 +163,9 @@ assert.eq(1, getChunkSkips(explain.executionStages.shards[0].executionStages));
// Index with shard key query - covered and succeeds and returns result
// NOTE: This is weird and only a result of the fact that we don't have a dedicated "does not exist"
// value for indexes
-assert.commandWorked(coll.ensureIndex({ c : 1, a : 1 }));
-jsTest.log(tojson(coll.find({ c : true }, { _id : 0, a : 1, c : 1 }).toArray()));
-var explain = coll.find({ c : true }, { _id : 0, a : 1, c : 1 }).explain(true).executionStats;
+assert.commandWorked(coll.ensureIndex({c: 1, a: 1}));
+jsTest.log(tojson(coll.find({c: true}, {_id: 0, a: 1, c: 1}).toArray()));
+var explain = coll.find({c: true}, {_id: 0, a: 1, c: 1}).explain(true).executionStats;
assert.eq(1, explain.nReturned);
assert.eq(0, explain.totalDocsExamined);
assert.eq(0, getChunkSkips(explain.executionStages.shards[0].executionStages));
diff --git a/jstests/sharding/create_idx_empty_primary.js b/jstests/sharding/create_idx_empty_primary.js
index b6eeacb8cd1..f8beffa7e52 100644
--- a/jstests/sharding/create_idx_empty_primary.js
+++ b/jstests/sharding/create_idx_empty_primary.js
@@ -2,34 +2,33 @@
* Test to make sure that the createIndex command gets sent to all shards.
*/
(function() {
-'use strict';
+ 'use strict';
-var st = new ShardingTest({ shards: 2 });
-assert.commandWorked(st.s.adminCommand({ enablesharding: 'test' }));
-st.ensurePrimaryShard('test', 'shard0001');
+ var st = new ShardingTest({shards: 2});
+ assert.commandWorked(st.s.adminCommand({enablesharding: 'test'}));
+ st.ensurePrimaryShard('test', 'shard0001');
-var testDB = st.s.getDB('test');
-assert.commandWorked(testDB.adminCommand({ shardcollection: 'test.user', key: { _id: 1 }}));
+ var testDB = st.s.getDB('test');
+ assert.commandWorked(testDB.adminCommand({shardcollection: 'test.user', key: {_id: 1}}));
-// Move only chunk out of primary shard.
-assert.commandWorked(testDB.adminCommand({ movechunk: 'test.user',
- find: { _id: 0 },
- to: 'shard0000' }));
+ // Move only chunk out of primary shard.
+ assert.commandWorked(
+ testDB.adminCommand({movechunk: 'test.user', find: {_id: 0}, to: 'shard0000'}));
-assert.writeOK(testDB.user.insert({ _id: 0 }));
+ assert.writeOK(testDB.user.insert({_id: 0}));
-var res = testDB.user.ensureIndex({ i: 1 });
-assert.commandWorked(res);
+ var res = testDB.user.ensureIndex({i: 1});
+ assert.commandWorked(res);
-var indexes = testDB.user.getIndexes();
-assert.eq(2, indexes.length);
+ var indexes = testDB.user.getIndexes();
+ assert.eq(2, indexes.length);
-indexes = st.d0.getDB('test').user.getIndexes();
-assert.eq(2, indexes.length);
+ indexes = st.d0.getDB('test').user.getIndexes();
+ assert.eq(2, indexes.length);
-indexes = st.d1.getDB('test').user.getIndexes();
-assert.eq(2, indexes.length);
+ indexes = st.d1.getDB('test').user.getIndexes();
+ assert.eq(2, indexes.length);
-st.stop();
+ st.stop();
})();
diff --git a/jstests/sharding/cursor1.js b/jstests/sharding/cursor1.js
index 8b799ce1aa6..3aee9ff2cb5 100644
--- a/jstests/sharding/cursor1.js
+++ b/jstests/sharding/cursor1.js
@@ -2,70 +2,71 @@
// checks that cursors survive a chunk's move
(function() {
-var s = new ShardingTest({ name: "sharding_cursor1", shards: 2 });
-s.config.settings.find().forEach( printjson );
+ var s = new ShardingTest({name: "sharding_cursor1", shards: 2});
+ s.config.settings.find().forEach(printjson);
-// create a sharded 'test.foo', for the moment with just one chunk
-s.adminCommand( { enablesharding: "test" } );
-s.ensurePrimaryShard('test', 'shard0001');
-s.adminCommand( { shardcollection: "test.foo", key: { _id: 1 } } );
+ // create a sharded 'test.foo', for the moment with just one chunk
+ s.adminCommand({enablesharding: "test"});
+ s.ensurePrimaryShard('test', 'shard0001');
+ s.adminCommand({shardcollection: "test.foo", key: {_id: 1}});
-db = s.getDB( "test" );
-primary = s.getPrimaryShard( "test" ).getDB( "test" );
-secondary = s.getOther( primary ).getDB( "test" );
+ db = s.getDB("test");
+ primary = s.getPrimaryShard("test").getDB("test");
+ secondary = s.getOther(primary).getDB("test");
-var numObjs = 30;
-var bulk = db.foo.initializeUnorderedBulkOp();
-for (i=0; i < numObjs; i++){
- bulk.insert({ _id: i });
-}
-assert.writeOK(bulk.execute());
-assert.eq( 1, s.config.chunks.count() , "test requires collection to have one chunk initially" );
+ var numObjs = 30;
+ var bulk = db.foo.initializeUnorderedBulkOp();
+ for (i = 0; i < numObjs; i++) {
+ bulk.insert({_id: i});
+ }
+ assert.writeOK(bulk.execute());
+ assert.eq(1, s.config.chunks.count(), "test requires collection to have one chunk initially");
-// we'll split the collection in two and move the second chunk while three cursors are open
-// cursor1 still has more data in the first chunk, the one that didn't move
-// cursor2 buffered the last obj of the first chunk
-// cursor3 buffered data that was moved on the second chunk
-var cursor1 = db.foo.find().batchSize( 3 );
-assert.eq( 3 , cursor1.objsLeftInBatch() );
-var cursor2 = db.foo.find().batchSize( 5 );
-assert.eq( 5 , cursor2.objsLeftInBatch() );
-var cursor3 = db.foo.find().batchSize( 7 );
-assert.eq( 7 , cursor3.objsLeftInBatch() );
+ // we'll split the collection in two and move the second chunk while three cursors are open
+ // cursor1 still has more data in the first chunk, the one that didn't move
+ // cursor2 buffered the last obj of the first chunk
+ // cursor3 buffered data that was moved on the second chunk
+ var cursor1 = db.foo.find().batchSize(3);
+ assert.eq(3, cursor1.objsLeftInBatch());
+ var cursor2 = db.foo.find().batchSize(5);
+ assert.eq(5, cursor2.objsLeftInBatch());
+ var cursor3 = db.foo.find().batchSize(7);
+ assert.eq(7, cursor3.objsLeftInBatch());
-s.adminCommand( { split: "test.foo" , middle : { _id : 5 } } );
-s.adminCommand( { movechunk : "test.foo" , find : { _id : 5 } , to : secondary.getMongo().name } );
-assert.eq( 2, s.config.chunks.count() );
+ s.adminCommand({split: "test.foo", middle: {_id: 5}});
+ s.adminCommand({movechunk: "test.foo", find: {_id: 5}, to: secondary.getMongo().name});
+ assert.eq(2, s.config.chunks.count());
-// the cursors should not have been affected
-assert.eq( numObjs , cursor1.itcount() , "c1" );
-assert.eq( numObjs , cursor2.itcount() , "c2" );
-assert.eq( numObjs , cursor3.itcount() , "c3" );
+ // the cursors should not have been affected
+ assert.eq(numObjs, cursor1.itcount(), "c1");
+ assert.eq(numObjs, cursor2.itcount(), "c2");
+ assert.eq(numObjs, cursor3.itcount(), "c3");
-// Test that a cursor with a 1 second timeout eventually times out.
-gc(); gc();
-var cur = db.foo.find().batchSize( 2 );
-assert( cur.next() , "T1" );
-assert( cur.next() , "T2" );
-assert.commandWorked(s.admin.runCommand({
- setParameter: 1,
- cursorTimeoutMillis: 1000 // 1 second.
-}));
+ // Test that a cursor with a 1 second timeout eventually times out.
+ gc();
+ gc();
+ var cur = db.foo.find().batchSize(2);
+ assert(cur.next(), "T1");
+ assert(cur.next(), "T2");
+ assert.commandWorked(s.admin.runCommand({
+ setParameter: 1,
+ cursorTimeoutMillis: 1000 // 1 second.
+ }));
-assert.soon(function() {
- try {
- cur.next();
- cur.next();
- print("cursor still alive");
- return false;
- }
- catch (e) {
- return true;
- }
-}, "cursor failed to time out", /*timeout*/30000, /*interval*/5000);
+ assert.soon(function() {
+ try {
+ cur.next();
+ cur.next();
+ print("cursor still alive");
+ return false;
+ } catch (e) {
+ return true;
+ }
+ }, "cursor failed to time out", /*timeout*/ 30000, /*interval*/ 5000);
-gc(); gc();
+ gc();
+ gc();
-s.stop();
+ s.stop();
})();
diff --git a/jstests/sharding/cursor_cleanup.js b/jstests/sharding/cursor_cleanup.js
index 4eb200b87e3..5d0ce46f532 100644
--- a/jstests/sharding/cursor_cleanup.js
+++ b/jstests/sharding/cursor_cleanup.js
@@ -2,21 +2,21 @@
// Tests cleanup of sharded and unsharded cursors
//
-var st = new ShardingTest({ shards : 2, mongos : 1 });
+var st = new ShardingTest({shards: 2, mongos: 1});
var mongos = st.s0;
-var admin = mongos.getDB( "admin" );
-var config = mongos.getDB( "config" );
+var admin = mongos.getDB("admin");
+var config = mongos.getDB("config");
var shards = config.shards.find().toArray();
-var coll = mongos.getCollection( "foo.bar" );
-var collUnsharded = mongos.getCollection( "foo.baz" );
+var coll = mongos.getCollection("foo.bar");
+var collUnsharded = mongos.getCollection("foo.baz");
// Shard collection
-printjson(admin.runCommand({ enableSharding : coll.getDB() + "" }));
-printjson(admin.runCommand({ movePrimary : coll.getDB() + "", to : shards[0]._id }));
-printjson(admin.runCommand({ shardCollection : coll + "", key : { _id : 1 } }));
-printjson(admin.runCommand({ split : coll + "", middle : { _id : 0 } }));
-printjson(admin.runCommand({ moveChunk : coll + "", find : { _id : 0 }, to : shards[1]._id }));
+printjson(admin.runCommand({enableSharding: coll.getDB() + ""}));
+printjson(admin.runCommand({movePrimary: coll.getDB() + "", to: shards[0]._id}));
+printjson(admin.runCommand({shardCollection: coll + "", key: {_id: 1}}));
+printjson(admin.runCommand({split: coll + "", middle: {_id: 0}}));
+printjson(admin.runCommand({moveChunk: coll + "", find: {_id: 0}, to: shards[1]._id}));
jsTest.log("Collection set up...");
st.printShardingStatus(true);
@@ -26,8 +26,8 @@ jsTest.log("Insert enough data to overwhelm a query batch.");
var bulk = coll.initializeUnorderedBulkOp();
var bulk2 = collUnsharded.initializeUnorderedBulkOp();
for (var i = -150; i < 150; i++) {
- bulk.insert({ _id : i });
- bulk2.insert({ _id : i });
+ bulk.insert({_id: i});
+ bulk2.insert({_id: i});
}
assert.writeOK(bulk.execute());
assert.writeOK(bulk2.execute());
diff --git a/jstests/sharding/delete_during_migrate.js b/jstests/sharding/delete_during_migrate.js
index e44b4cd4078..982b0c00787 100644
--- a/jstests/sharding/delete_during_migrate.js
+++ b/jstests/sharding/delete_during_migrate.js
@@ -3,42 +3,44 @@
// starts. Protect against that by making chunk very large.
// start up a new sharded cluster
-var st = new ShardingTest({ shards : 2, mongos : 1 });
+var st = new ShardingTest({shards: 2, mongos: 1});
// Balancer is by default stopped, thus we have manual control
var dbname = "testDB";
var coll = "foo";
var ns = dbname + "." + coll;
var s = st.s0;
-var t = s.getDB( dbname ).getCollection( coll );
+var t = s.getDB(dbname).getCollection(coll);
-s.adminCommand({ enablesharding: dbname });
+s.adminCommand({enablesharding: dbname});
st.ensurePrimaryShard(dbname, 'shard0001');
// Create fresh collection with lots of docs
t.drop();
var bulk = t.initializeUnorderedBulkOp();
for (var i = 0; i < 200000; i++) {
- bulk.insert({ a: i });
+ bulk.insert({a: i});
}
assert.writeOK(bulk.execute());
// enable sharding of the collection. Only 1 chunk.
-t.ensureIndex( { a : 1 } );
-s.adminCommand( { shardcollection : ns , key: { a : 1 } } );
+t.ensureIndex({a: 1});
+s.adminCommand({shardcollection: ns, key: {a: 1}});
// start a parallel shell that deletes things
-startMongoProgramNoConnect( "mongo" ,
- "--host" , getHostName() ,
- "--port" , st.s0.port ,
- "--eval" , "db." + coll + ".remove({});" ,
- dbname );
+startMongoProgramNoConnect("mongo",
+ "--host",
+ getHostName(),
+ "--port",
+ st.s0.port,
+ "--eval",
+ "db." + coll + ".remove({});",
+ dbname);
// migrate while deletions are happening
-var moveResult = s.adminCommand( { moveChunk : ns ,
- find : { a : 1 } ,
- to : st.getOther( st.getPrimaryShard( dbname ) ).name } );
+var moveResult = s.adminCommand(
+ {moveChunk: ns, find: {a: 1}, to: st.getOther(st.getPrimaryShard(dbname)).name});
// check if migration worked
-assert( moveResult.ok , "migration didn't work while doing deletes" );
+assert(moveResult.ok, "migration didn't work while doing deletes");
st.stop();
diff --git a/jstests/sharding/diffservers1.js b/jstests/sharding/diffservers1.js
index 9b8500f01d1..a8f5469ed2f 100644
--- a/jstests/sharding/diffservers1.js
+++ b/jstests/sharding/diffservers1.js
@@ -1,24 +1,25 @@
(function() {
-var s = new ShardingTest({ name: "diffservers1", shards: 2 });
+ var s = new ShardingTest({name: "diffservers1", shards: 2});
-assert.eq( 2 , s.config.shards.count() , "server count wrong" );
-assert.eq( 0 , s._connections[0].getDB( "config" ).shards.count() , "shouldn't be here" );
-assert.eq( 0 , s._connections[1].getDB( "config" ).shards.count() , "shouldn't be here" );
+ assert.eq(2, s.config.shards.count(), "server count wrong");
+ assert.eq(0, s._connections[0].getDB("config").shards.count(), "shouldn't be here");
+ assert.eq(0, s._connections[1].getDB("config").shards.count(), "shouldn't be here");
-test1 = s.getDB( "test1" ).foo;
-test1.save( { a : 1 } );
-test1.save( { a : 2 } );
-test1.save( { a : 3 } );
-assert.eq( 3 , test1.count() );
+ test1 = s.getDB("test1").foo;
+ test1.save({a: 1});
+ test1.save({a: 2});
+ test1.save({a: 3});
+ assert.eq(3, test1.count());
-assert( ! s.admin.runCommand( { addshard: "sdd$%" } ).ok , "bad hostname" );
+ assert(!s.admin.runCommand({addshard: "sdd$%"}).ok, "bad hostname");
-var portWithoutHostRunning = allocatePort();
-assert(!s.admin.runCommand({addshard: "127.0.0.1:" + portWithoutHostRunning}).ok, "host not up");
-assert(!s.admin.runCommand({ addshard: "10.0.0.1:" + portWithoutHostRunning}).ok,
- "allowed shard in IP when config is localhost" );
+ var portWithoutHostRunning = allocatePort();
+ assert(!s.admin.runCommand({addshard: "127.0.0.1:" + portWithoutHostRunning}).ok,
+ "host not up");
+ assert(!s.admin.runCommand({addshard: "10.0.0.1:" + portWithoutHostRunning}).ok,
+ "allowed shard in IP when config is localhost");
-s.stop();
+ s.stop();
})();
diff --git a/jstests/sharding/disable_autosplit.js b/jstests/sharding/disable_autosplit.js
index ec3b593ea24..c6b7b7d5e1f 100644
--- a/jstests/sharding/disable_autosplit.js
+++ b/jstests/sharding/disable_autosplit.js
@@ -1,34 +1,32 @@
// Tests disabling of autosplit from mongos
(function() {
-'use strict';
+ 'use strict';
-var chunkSize = 1; // In MB
+ var chunkSize = 1; // In MB
-var st = new ShardingTest({ shards: 1,
- mongos: 1,
- other: { chunksize: chunkSize,
- mongosOptions: { noAutoSplit: "" } } });
+ var st = new ShardingTest(
+ {shards: 1, mongos: 1, other: {chunksize: chunkSize, mongosOptions: {noAutoSplit: ""}}});
-var data = "x";
-while(data.length < chunkSize * 1024 * 1024) {
- data += data;
-}
+ var data = "x";
+ while (data.length < chunkSize * 1024 * 1024) {
+ data += data;
+ }
-var mongos = st.s0;
-var admin = mongos.getDB("admin");
-var config = mongos.getDB("config");
-var coll = mongos.getCollection("foo.bar");
+ var mongos = st.s0;
+ var admin = mongos.getDB("admin");
+ var config = mongos.getDB("config");
+ var coll = mongos.getCollection("foo.bar");
-assert.commandWorked(admin.runCommand({ enableSharding: coll.getDB() + "" }));
-assert.commandWorked(admin.runCommand({ shardCollection: coll + "", key: { _id: 1 } }));
+ assert.commandWorked(admin.runCommand({enableSharding: coll.getDB() + ""}));
+ assert.commandWorked(admin.runCommand({shardCollection: coll + "", key: {_id: 1}}));
-for(var i = 0; i < 20; i++) {
- coll.insert({ data: data });
-}
+ for (var i = 0; i < 20; i++) {
+ coll.insert({data: data});
+ }
-// Make sure we haven't split
-assert.eq(1, config.chunks.find({ ns: coll + "" }).count());
+ // Make sure we haven't split
+ assert.eq(1, config.chunks.find({ns: coll + ""}).count());
-st.stop();
+ st.stop();
})();
diff --git a/jstests/sharding/drop_configdb.js b/jstests/sharding/drop_configdb.js
index 8ac87648dfa..63a3b533597 100644
--- a/jstests/sharding/drop_configdb.js
+++ b/jstests/sharding/drop_configdb.js
@@ -1,35 +1,35 @@
// Test that dropping the config database is completely disabled via
// mongos and via mongod, if started with --configsvr
(function() {
-"use strict";
+ "use strict";
-var getConfigsvrToWriteTo = function(st) {
- if (st.configRS) {
- return st.configRS.getPrimary();
- } else {
- return st._configServers[0];
- }
-};
+ var getConfigsvrToWriteTo = function(st) {
+ if (st.configRS) {
+ return st.configRS.getPrimary();
+ } else {
+ return st._configServers[0];
+ }
+ };
-var st = new ShardingTest({ shards : 2 });
-var mongos = st.s;
-var config = getConfigsvrToWriteTo(st).getDB('config');
+ var st = new ShardingTest({shards: 2});
+ var mongos = st.s;
+ var config = getConfigsvrToWriteTo(st).getDB('config');
-// Try to drop config db via configsvr
+ // Try to drop config db via configsvr
-print ( "1: Try to drop config database via configsvr" );
-assert.eq(0, config.dropDatabase().ok);
-assert.eq("Cannot drop 'config' database if mongod started with --configsvr",
- config.dropDatabase().errmsg);
+ print("1: Try to drop config database via configsvr");
+ assert.eq(0, config.dropDatabase().ok);
+ assert.eq("Cannot drop 'config' database if mongod started with --configsvr",
+ config.dropDatabase().errmsg);
-// Try to drop config db via mongos
-var config = mongos.getDB( "config" );
+ // Try to drop config db via mongos
+ var config = mongos.getDB("config");
-print ( "1: Try to drop config database via mongos" );
-assert.eq(0, config.dropDatabase().ok);
+ print("1: Try to drop config database via mongos");
+ assert.eq(0, config.dropDatabase().ok);
-// 20 = ErrorCodes::IllegalOperation
-assert.eq(20, config.dropDatabase().code);
+ // 20 = ErrorCodes::IllegalOperation
+ assert.eq(20, config.dropDatabase().code);
-st.stop();
+ st.stop();
}()); \ No newline at end of file
diff --git a/jstests/sharding/drop_sharded_db.js b/jstests/sharding/drop_sharded_db.js
index 1c3e95460c2..962ff84fc40 100644
--- a/jstests/sharding/drop_sharded_db.js
+++ b/jstests/sharding/drop_sharded_db.js
@@ -1,66 +1,69 @@
// Tests the dropping of a sharded database SERVER-3471 SERVER-1726
(function() {
-var st = new ShardingTest({ shards: 2 });
+ var st = new ShardingTest({shards: 2});
-var mongos = st.s0;
-var config = mongos.getDB("config");
+ var mongos = st.s0;
+ var config = mongos.getDB("config");
-var dbA = mongos.getDB("DropSharded_A");
-var dbB = mongos.getDB("DropSharded_B");
-var dbC = mongos.getDB("DropSharded_C");
+ var dbA = mongos.getDB("DropSharded_A");
+ var dbB = mongos.getDB("DropSharded_B");
+ var dbC = mongos.getDB("DropSharded_C");
-// Dropping a database that doesn't exist will result in an info field in the response.
-var res = assert.commandWorked(dbA.dropDatabase());
-assert.eq('database does not exist', res.info);
+ // Dropping a database that doesn't exist will result in an info field in the response.
+ var res = assert.commandWorked(dbA.dropDatabase());
+ assert.eq('database does not exist', res.info);
-var numDocs = 3000;
-var numColls = 10;
-for (var i = 0; i < numDocs; i++) {
- dbA.getCollection("data" + (i % numColls)).insert({ _id: i });
- dbB.getCollection("data" + (i % numColls)).insert({ _id: i });
- dbC.getCollection("data" + (i % numColls)).insert({ _id: i });
-}
+ var numDocs = 3000;
+ var numColls = 10;
+ for (var i = 0; i < numDocs; i++) {
+ dbA.getCollection("data" + (i % numColls)).insert({_id: i});
+ dbB.getCollection("data" + (i % numColls)).insert({_id: i});
+ dbC.getCollection("data" + (i % numColls)).insert({_id: i});
+ }
-var key = { _id: 1 };
-for (var i = 0; i < numColls; i++) {
- st.shardColl(dbA.getCollection("data" + i), key);
- st.shardColl(dbB.getCollection("data" + i), key);
- st.shardColl(dbC.getCollection("data" + i), key);
-}
+ var key = {
+ _id: 1
+ };
+ for (var i = 0; i < numColls; i++) {
+ st.shardColl(dbA.getCollection("data" + i), key);
+ st.shardColl(dbB.getCollection("data" + i), key);
+ st.shardColl(dbC.getCollection("data" + i), key);
+ }
-// Insert a document to an unsharded collection and make sure that the document is there.
-assert.writeOK(dbA.unsharded.insert({ dummy: 1 }));
-var shardName = config.databases.findOne({ _id: dbA.getName() }).primary;
-var shardHostConn = new Mongo(config.shards.findOne({ _id: shardName }).host);
-var dbAOnShard = shardHostConn.getDB(dbA.getName());
-assert.neq(null, dbAOnShard.unsharded.findOne({ dummy: 1 }));
+ // Insert a document to an unsharded collection and make sure that the document is there.
+ assert.writeOK(dbA.unsharded.insert({dummy: 1}));
+ var shardName = config.databases.findOne({_id: dbA.getName()}).primary;
+ var shardHostConn = new Mongo(config.shards.findOne({_id: shardName}).host);
+ var dbAOnShard = shardHostConn.getDB(dbA.getName());
+ assert.neq(null, dbAOnShard.unsharded.findOne({dummy: 1}));
-// Drop the non-suffixed db and ensure that it is the only one that was dropped.
-dbA.dropDatabase();
-var dbs = mongos.getDBNames();
-for (var i = 0; i < dbs.length; i++) {
- assert.neq(dbs, "" + dbA);
-}
+ // Drop the non-suffixed db and ensure that it is the only one that was dropped.
+ dbA.dropDatabase();
+ var dbs = mongos.getDBNames();
+ for (var i = 0; i < dbs.length; i++) {
+ assert.neq(dbs, "" + dbA);
+ }
-assert.eq(0, config.databases.count({ _id: dbA.getName() }));
-assert.eq(1, config.databases.count({ _id: dbB.getName() }));
-assert.eq(1, config.databases.count({ _id: dbC.getName() }));
+ assert.eq(0, config.databases.count({_id: dbA.getName()}));
+ assert.eq(1, config.databases.count({_id: dbB.getName()}));
+ assert.eq(1, config.databases.count({_id: dbC.getName()}));
-// 10 dropped collections
-assert.eq(numColls, config.collections.count({ _id: RegExp("^" + dbA + "\\..*"), dropped: true }));
+ // 10 dropped collections
+ assert.eq(numColls,
+ config.collections.count({_id: RegExp("^" + dbA + "\\..*"), dropped: true}));
-// 20 active (dropped is missing)
-assert.eq(numColls, config.collections.count({ _id: RegExp("^" + dbB + "\\..*") }));
-assert.eq(numColls, config.collections.count({ _id: RegExp("^" + dbC + "\\..*") }));
+ // 20 active (dropped is missing)
+ assert.eq(numColls, config.collections.count({_id: RegExp("^" + dbB + "\\..*")}));
+ assert.eq(numColls, config.collections.count({_id: RegExp("^" + dbC + "\\..*")}));
-for (var i = 0; i < numColls; i++) {
- assert.eq(numDocs / numColls, dbB.getCollection("data" + (i % numColls)).find().itcount());
- assert.eq(numDocs / numColls, dbC.getCollection("data" + (i % numColls)).find().itcount());
-}
+ for (var i = 0; i < numColls; i++) {
+ assert.eq(numDocs / numColls, dbB.getCollection("data" + (i % numColls)).find().itcount());
+ assert.eq(numDocs / numColls, dbC.getCollection("data" + (i % numColls)).find().itcount());
+ }
-// Check that the unsharded collection should have been dropped.
-assert.eq(null, dbAOnShard.unsharded.findOne());
+ // Check that the unsharded collection should have been dropped.
+ assert.eq(null, dbAOnShard.unsharded.findOne());
-st.stop();
+ st.stop();
})();
diff --git a/jstests/sharding/dump_coll_metadata.js b/jstests/sharding/dump_coll_metadata.js
index 7fe91e0a75c..eb60af37cb4 100644
--- a/jstests/sharding/dump_coll_metadata.js
+++ b/jstests/sharding/dump_coll_metadata.js
@@ -2,56 +2,56 @@
// Tests that we can dump collection metadata via getShardVersion()
//
(function() {
-'use strict';
+ 'use strict';
-var st = new ShardingTest({ shards : 2, mongos : 1 });
+ var st = new ShardingTest({shards: 2, mongos: 1});
-var mongos = st.s0;
-var coll = mongos.getCollection( "foo.bar" );
-var admin = mongos.getDB( "admin" );
-var shards = mongos.getCollection( "config.shards" ).find().toArray();
-var shardAdmin = st.shard0.getDB( "admin" );
+ var mongos = st.s0;
+ var coll = mongos.getCollection("foo.bar");
+ var admin = mongos.getDB("admin");
+ var shards = mongos.getCollection("config.shards").find().toArray();
+ var shardAdmin = st.shard0.getDB("admin");
-assert.commandWorked(admin.runCommand({ enableSharding : coll.getDB() + "" }));
-st.ensurePrimaryShard(coll.getDB() + "", shards[0]._id);
-assert.commandWorked(admin.runCommand({ shardCollection : coll + "", key : { _id : 1 } }));
+ assert.commandWorked(admin.runCommand({enableSharding: coll.getDB() + ""}));
+ st.ensurePrimaryShard(coll.getDB() + "", shards[0]._id);
+ assert.commandWorked(admin.runCommand({shardCollection: coll + "", key: {_id: 1}}));
-assert.commandWorked(shardAdmin.runCommand({ getShardVersion : coll + "" }));
+ assert.commandWorked(shardAdmin.runCommand({getShardVersion: coll + ""}));
-// Make sure we have chunks information on the shard after the shard collection call
-var result =
- assert.commandWorked(shardAdmin.runCommand({ getShardVersion : coll + "", fullMetadata : true }));
-printjson(result);
-var metadata = result.metadata;
+ // Make sure we have chunks information on the shard after the shard collection call
+ var result = assert.commandWorked(
+ shardAdmin.runCommand({getShardVersion: coll + "", fullMetadata: true}));
+ printjson(result);
+ var metadata = result.metadata;
-assert.eq( metadata.chunks.length, 1 );
-assert.eq( metadata.pending.length, 0 );
-assert( metadata.chunks[0][0]._id + "" == MinKey + "" );
-assert( metadata.chunks[0][1]._id + "" == MaxKey + "" );
-assert( metadata.shardVersion + "" == result.global + "" );
+ assert.eq(metadata.chunks.length, 1);
+ assert.eq(metadata.pending.length, 0);
+ assert(metadata.chunks[0][0]._id + "" == MinKey + "");
+ assert(metadata.chunks[0][1]._id + "" == MaxKey + "");
+ assert(metadata.shardVersion + "" == result.global + "");
-// Make sure a collection with no metadata still returns the metadata field
-assert( shardAdmin.runCommand({ getShardVersion : coll + "xyz", fullMetadata : true })
- .metadata != undefined );
+ // Make sure a collection with no metadata still returns the metadata field
+ assert(shardAdmin.runCommand({getShardVersion: coll + "xyz", fullMetadata: true}).metadata !=
+ undefined);
-// Make sure we get multiple chunks after a split
-assert( admin.runCommand({ split : coll + "", middle : { _id : 0 } }).ok );
+ // Make sure we get multiple chunks after a split
+ assert(admin.runCommand({split: coll + "", middle: {_id: 0}}).ok);
-assert( shardAdmin.runCommand({ getShardVersion : coll + "" }).ok );
-printjson( shardAdmin.runCommand({ getShardVersion : coll + "", fullMetadata : true }) );
+ assert(shardAdmin.runCommand({getShardVersion: coll + ""}).ok);
+ printjson(shardAdmin.runCommand({getShardVersion: coll + "", fullMetadata: true}));
-// Make sure we have chunks info
-result = shardAdmin.runCommand({ getShardVersion : coll + "", fullMetadata : true });
-metadata = result.metadata;
+ // Make sure we have chunks info
+ result = shardAdmin.runCommand({getShardVersion: coll + "", fullMetadata: true});
+ metadata = result.metadata;
-assert.eq( metadata.chunks.length, 2 );
-assert.eq( metadata.pending.length, 0 );
-assert( metadata.chunks[0][0]._id + "" == MinKey + "" );
-assert( metadata.chunks[0][1]._id == 0 );
-assert( metadata.chunks[1][0]._id == 0 );
-assert( metadata.chunks[1][1]._id + "" == MaxKey + "" );
-assert( metadata.shardVersion + "" == result.global + "" );
+ assert.eq(metadata.chunks.length, 2);
+ assert.eq(metadata.pending.length, 0);
+ assert(metadata.chunks[0][0]._id + "" == MinKey + "");
+ assert(metadata.chunks[0][1]._id == 0);
+ assert(metadata.chunks[1][0]._id == 0);
+ assert(metadata.chunks[1][1]._id + "" == MaxKey + "");
+ assert(metadata.shardVersion + "" == result.global + "");
-st.stop();
+ st.stop();
})();
diff --git a/jstests/sharding/empty_cluster_init.js b/jstests/sharding/empty_cluster_init.js
index e1251440c35..dfbc0604fe7 100644
--- a/jstests/sharding/empty_cluster_init.js
+++ b/jstests/sharding/empty_cluster_init.js
@@ -1,13 +1,11 @@
//
// Tests initialization of an empty cluster with multiple mongoses.
-// Starts a bunch of mongoses in parallel, and ensures that there's only a single config
+// Starts a bunch of mongoses in parallel, and ensures that there's only a single config
// version initialization.
//
-var configRS = new ReplSetTest({ name: "configRS", nodes: 3, useHostName: true });
-configRS.startSet({ configsvr: '',
- journal: "",
- storageEngine: 'wiredTiger' });
+var configRS = new ReplSetTest({name: "configRS", nodes: 3, useHostName: true});
+configRS.startSet({configsvr: '', journal: "", storageEngine: 'wiredTiger'});
var replConfig = configRS.getReplSetConfig();
replConfig.configsvr = true;
configRS.initiate(replConfig);
@@ -20,9 +18,8 @@ jsTest.log("Starting first set of mongoses in parallel...");
var mongoses = [];
for (var i = 0; i < 3; i++) {
- var mongos = MongoRunner.runMongos({ binVersion: "latest",
- configdb: configRS.getURL(),
- waitForConnect : false });
+ var mongos = MongoRunner.runMongos(
+ {binVersion: "latest", configdb: configRS.getURL(), waitForConnect: false});
mongoses.push(mongos);
}
@@ -33,13 +30,12 @@ assert.soon(function() {
try {
mongosConn = new Mongo(mongoses[0].host);
return true;
- }
- catch (e) {
+ } catch (e) {
print("Waiting for connect...");
printjson(e);
return false;
}
-}, "Mongos " + mongoses[0].host + " did not start.", 5 * 60 * 1000 );
+}, "Mongos " + mongoses[0].host + " did not start.", 5 * 60 * 1000);
var version = mongosConn.getCollection("config.version").findOne();
@@ -50,9 +46,8 @@ var version = mongosConn.getCollection("config.version").findOne();
jsTest.log("Starting second set of mongoses...");
for (var i = 0; i < 3; i++) {
- var mongos = MongoRunner.runMongos({ binVersion: "latest",
- configdb: configRS.getURL(),
- waitForConnect: false });
+ var mongos = MongoRunner.runMongos(
+ {binVersion: "latest", configdb: configRS.getURL(), waitForConnect: false});
mongoses.push(mongos);
}
@@ -61,8 +56,7 @@ assert.soon(function() {
try {
mongosConn = new Mongo(mongoses[mongoses.length - 1].host);
return true;
- }
- catch (e) {
+ } catch (e) {
print("Waiting for connect...");
printjson(e);
return false;
@@ -84,8 +78,7 @@ assert(version.clusterId);
assert.eq(undefined, version.excluding);
var oplog = configRS.getPrimary().getDB('local').oplog.rs;
-var updates = oplog.find({ ns: "config.version" }).toArray();
+var updates = oplog.find({ns: "config.version"}).toArray();
assert.eq(1, updates.length, 'ops to config.version: ' + tojson(updates));
configRS.stopSet(15);
-
diff --git a/jstests/sharding/empty_doc_results.js b/jstests/sharding/empty_doc_results.js
index 6128bffd37b..be63f509532 100644
--- a/jstests/sharding/empty_doc_results.js
+++ b/jstests/sharding/empty_doc_results.js
@@ -2,25 +2,25 @@
// Verifies that mongos correctly handles empty documents when all fields are projected out
//
-var options = { mongosOptions : { binVersion : "" },
- shardOptions : { binVersion : "" } };
+var options = {
+ mongosOptions: {binVersion: ""},
+ shardOptions: {binVersion: ""}
+};
-var st = new ShardingTest({ shards : 2, other : options });
+var st = new ShardingTest({shards: 2, other: options});
var mongos = st.s0;
var coll = mongos.getCollection("foo.bar");
var admin = mongos.getDB("admin");
var shards = mongos.getDB("config").shards.find().toArray();
-assert.commandWorked(admin.runCommand({ enableSharding : coll.getDB().getName() }));
-printjson(admin.runCommand({ movePrimary : coll.getDB().getName(), to : shards[0]._id }));
-assert.commandWorked(admin.runCommand({ shardCollection: coll.getFullName(),
- key: { _id : 1 } }));
+assert.commandWorked(admin.runCommand({enableSharding: coll.getDB().getName()}));
+printjson(admin.runCommand({movePrimary: coll.getDB().getName(), to: shards[0]._id}));
+assert.commandWorked(admin.runCommand({shardCollection: coll.getFullName(), key: {_id: 1}}));
-assert.commandWorked(admin.runCommand({ split : coll.getFullName(), middle : { _id : 0 } }));
-assert.commandWorked(admin.runCommand({ moveChunk : coll.getFullName(),
- find : { _id : 0 },
- to : shards[1]._id }));
+assert.commandWorked(admin.runCommand({split: coll.getFullName(), middle: {_id: 0}}));
+assert.commandWorked(
+ admin.runCommand({moveChunk: coll.getFullName(), find: {_id: 0}, to: shards[1]._id}));
st.printShardingStatus();
@@ -36,10 +36,10 @@ for (var i = -50; i < 50; i++) {
//
// Ensure projecting out all fields still returns the same number of documents
assert.eq(100, coll.find({}).itcount());
-assert.eq(100, coll.find({}).sort({ positiveId : 1 }).itcount());
-assert.eq(100, coll.find({}, { _id : 0, positiveId : 0 }).itcount());
+assert.eq(100, coll.find({}).sort({positiveId: 1}).itcount());
+assert.eq(100, coll.find({}, {_id: 0, positiveId: 0}).itcount());
// Can't remove sort key from projection (SERVER-11877) but some documents will still be empty
-assert.eq(100, coll.find({}, { _id : 0 }).sort({ positiveId : 1 }).itcount());
+assert.eq(100, coll.find({}, {_id: 0}).sort({positiveId: 1}).itcount());
//
//
@@ -50,8 +50,7 @@ var assertLast50Positive = function(sortedDocs) {
for (var i = 0; i < sortedDocs.length; ++i) {
if (sortedDocs[i].positiveId) {
positiveCount++;
- }
- else {
+ } else {
// Make sure only the last set of documents have "positiveId" set
assert.eq(positiveCount, 0);
}
@@ -59,8 +58,8 @@ var assertLast50Positive = function(sortedDocs) {
assert.eq(positiveCount, 50);
};
-assertLast50Positive(coll.find({}).sort({ positiveId : 1 }).toArray());
-assertLast50Positive(coll.find({}, { _id : 0 }).sort({ positiveId : 1 }).toArray());
+assertLast50Positive(coll.find({}).sort({positiveId: 1}).toArray());
+assertLast50Positive(coll.find({}, {_id: 0}).sort({positiveId: 1}).toArray());
jsTest.log("DONE!");
st.stop(); \ No newline at end of file
diff --git a/jstests/sharding/enable_sharding_basic.js b/jstests/sharding/enable_sharding_basic.js
index eb58df59d57..531a2efe57f 100644
--- a/jstests/sharding/enable_sharding_basic.js
+++ b/jstests/sharding/enable_sharding_basic.js
@@ -3,53 +3,51 @@
//
(function() {
-'use strict';
+ 'use strict';
-var st = new ShardingTest({mongos:1, shards:2});
+ var st = new ShardingTest({mongos: 1, shards: 2});
-var mongos = st.s0;
+ var mongos = st.s0;
-// enableSharing can run only on mongos.
-assert.commandFailedWithCode(st.d0.getDB('admin').runCommand({enableSharding : 'db'}),
- ErrorCodes.CommandNotFound);
+ // enableSharing can run only on mongos.
+ assert.commandFailedWithCode(st.d0.getDB('admin').runCommand({enableSharding: 'db'}),
+ ErrorCodes.CommandNotFound);
-// enableSharing can run only against the admin database.
-assert.commandFailedWithCode(mongos.getDB('test').runCommand({enableSharding : 'db'}),
- ErrorCodes.Unauthorized);
+ // enableSharing can run only against the admin database.
+ assert.commandFailedWithCode(mongos.getDB('test').runCommand({enableSharding: 'db'}),
+ ErrorCodes.Unauthorized);
-// Can't shard 'config' database.
-assert.commandFailed(mongos.adminCommand({enableSharding : 'config'}));
+ // Can't shard 'config' database.
+ assert.commandFailed(mongos.adminCommand({enableSharding: 'config'}));
-// Can't shard 'local' database.
-assert.commandFailed(mongos.adminCommand({enableSharding : 'local'}));
+ // Can't shard 'local' database.
+ assert.commandFailed(mongos.adminCommand({enableSharding: 'local'}));
-// Can't shard 'admin' database.
-assert.commandFailed(mongos.adminCommand({enableSharding : 'admin'}));
+ // Can't shard 'admin' database.
+ assert.commandFailed(mongos.adminCommand({enableSharding: 'admin'}));
-// Can't shard db with the name that just differ on case.
-assert.commandWorked(mongos.adminCommand({enableSharding : 'db'}));
-assert.eq(mongos.getDB('config').databases.findOne({_id: 'db'}).partitioned, true);
+ // Can't shard db with the name that just differ on case.
+ assert.commandWorked(mongos.adminCommand({enableSharding: 'db'}));
+ assert.eq(mongos.getDB('config').databases.findOne({_id: 'db'}).partitioned, true);
-assert.commandFailedWithCode(mongos.adminCommand({enableSharding : 'DB'}),
- ErrorCodes.DatabaseDifferCase);
+ assert.commandFailedWithCode(mongos.adminCommand({enableSharding: 'DB'}),
+ ErrorCodes.DatabaseDifferCase);
-// Can't shard invalid db name.
-assert.commandFailed(mongos.adminCommand({enableSharding : 'a.b'}));
-assert.commandFailed(mongos.adminCommand({enableSharding : ''}));
+ // Can't shard invalid db name.
+ assert.commandFailed(mongos.adminCommand({enableSharding: 'a.b'}));
+ assert.commandFailed(mongos.adminCommand({enableSharding: ''}));
-// Can't shard already sharded database.
-assert.commandFailedWithCode(mongos.adminCommand({enableSharding : 'db'}),
- ErrorCodes.AlreadyInitialized);
-assert.eq(mongos.getDB('config').databases.findOne({_id: 'db'}).partitioned, true);
+ // Can't shard already sharded database.
+ assert.commandFailedWithCode(mongos.adminCommand({enableSharding: 'db'}),
+ ErrorCodes.AlreadyInitialized);
+ assert.eq(mongos.getDB('config').databases.findOne({_id: 'db'}).partitioned, true);
-// Verify config.databases metadata.
-assert.writeOK(mongos.getDB('unsharded').foo.insert({aKey: "aValue"}));
-assert.eq(mongos.getDB('config').databases.findOne({_id: 'unsharded'}).partitioned, false);
-assert.commandWorked(mongos.adminCommand({enableSharding : 'unsharded'}));
-assert.eq(mongos.getDB('config').databases.findOne({_id: 'unsharded'}).partitioned, true);
+ // Verify config.databases metadata.
+ assert.writeOK(mongos.getDB('unsharded').foo.insert({aKey: "aValue"}));
+ assert.eq(mongos.getDB('config').databases.findOne({_id: 'unsharded'}).partitioned, false);
+ assert.commandWorked(mongos.adminCommand({enableSharding: 'unsharded'}));
+ assert.eq(mongos.getDB('config').databases.findOne({_id: 'unsharded'}).partitioned, true);
-st.stop();
+ st.stop();
})();
-
-
diff --git a/jstests/sharding/error_propagation.js b/jstests/sharding/error_propagation.js
index 9948da66190..27336b5efb5 100644
--- a/jstests/sharding/error_propagation.js
+++ b/jstests/sharding/error_propagation.js
@@ -10,14 +10,14 @@
var db = st.getDB('test');
db.setSlaveOk(true);
- assert.writeOK(db.foo.insert({a:1}, {writeConcern: {w:3}}));
- assert.commandWorked(db.runCommand({aggregate: 'foo',
- pipeline: [{$project: {total: {'$add': ['$a', 1]}}}]}));
+ assert.writeOK(db.foo.insert({a: 1}, {writeConcern: {w: 3}}));
+ assert.commandWorked(
+ db.runCommand({aggregate: 'foo', pipeline: [{$project: {total: {'$add': ['$a', 1]}}}]}));
- assert.writeOK(db.foo.insert({a: [1, 2]}, {writeConcern: {w:3}}));
+ assert.writeOK(db.foo.insert({a: [1, 2]}, {writeConcern: {w: 3}}));
- var res = db.runCommand({aggregate: 'foo',
- pipeline: [{$project: {total: {'$add': ['$a', 1]}}}]});
+ var res =
+ db.runCommand({aggregate: 'foo', pipeline: [{$project: {total: {'$add': ['$a', 1]}}}]});
assert.commandFailed(res);
assert.eq("$add only supports numeric or date types, not Array", res.errmsg, printjson(res));
}());
diff --git a/jstests/sharding/exact_shard_key_target.js b/jstests/sharding/exact_shard_key_target.js
index e21e6be95b1..885647ec96e 100644
--- a/jstests/sharding/exact_shard_key_target.js
+++ b/jstests/sharding/exact_shard_key_target.js
@@ -4,70 +4,72 @@
// SERVER-14138
//
-var st = new ShardingTest({ shards : 2, verbose : 4 });
+var st = new ShardingTest({shards: 2, verbose: 4});
var mongos = st.s0;
var coll = mongos.getCollection("foo.bar");
var admin = mongos.getDB("admin");
var shards = mongos.getDB("config").shards.find().toArray();
-assert.commandWorked(admin.runCommand({ enableSharding : coll.getDB().getName() }));
-printjson(admin.runCommand({ movePrimary : coll.getDB().getName(), to : shards[0]._id }));
-assert.commandWorked(admin.runCommand({ shardCollection: coll.getFullName(),
- key: { "a.b": 1 } }));
-assert.commandWorked(admin.runCommand({ split: coll.getFullName(), middle: { "a.b": 0 } }));
-assert.commandWorked(admin.runCommand({ moveChunk: coll.getFullName(),
- find: { "a.b": 0 },
- to: shards[1]._id }));
+assert.commandWorked(admin.runCommand({enableSharding: coll.getDB().getName()}));
+printjson(admin.runCommand({movePrimary: coll.getDB().getName(), to: shards[0]._id}));
+assert.commandWorked(admin.runCommand({shardCollection: coll.getFullName(), key: {"a.b": 1}}));
+assert.commandWorked(admin.runCommand({split: coll.getFullName(), middle: {"a.b": 0}}));
+assert.commandWorked(
+ admin.runCommand({moveChunk: coll.getFullName(), find: {"a.b": 0}, to: shards[1]._id}));
st.printShardingStatus();
//
// JustOne remove
coll.remove({});
-assert.writeOK(coll.insert({ _id : 1, a : { b : -1 } }));
-assert.writeOK(coll.insert({ _id : 2, a : { b : 1 } }));
+assert.writeOK(coll.insert({_id: 1, a: {b: -1}}));
+assert.writeOK(coll.insert({_id: 2, a: {b: 1}}));
// Need orphaned data to see the impact
-assert.writeOK(st.shard0.getCollection(coll.toString()).insert({ _id : 3, a : { b : 1 } }));
-assert.eq(1, coll.remove({ a : { b : 1 } }, { justOne : true }).nRemoved);
-assert.eq(2, st.shard0.getCollection(coll.toString()).count() +
- st.shard1.getCollection(coll.toString()).count() );
+assert.writeOK(st.shard0.getCollection(coll.toString()).insert({_id: 3, a: {b: 1}}));
+assert.eq(1, coll.remove({a: {b: 1}}, {justOne: true}).nRemoved);
+assert.eq(2,
+ st.shard0.getCollection(coll.toString()).count() +
+ st.shard1.getCollection(coll.toString()).count());
//
// Non-multi update
coll.remove({});
-assert.writeOK(coll.insert({ _id : 1, a : { b : 1 } }));
-assert.writeOK(coll.insert({ _id : 2, a : { b : -1 } }));
+assert.writeOK(coll.insert({_id: 1, a: {b: 1}}));
+assert.writeOK(coll.insert({_id: 2, a: {b: -1}}));
// Need orphaned data to see the impact
-assert.writeOK(st.shard0.getCollection(coll.toString()).insert({ _id : 3, a : { b : 1 } }));
-assert.eq(1, coll.update({ a : { b : 1 } }, { $set : { updated : true } },
- { multi : false }).nMatched);
-assert.eq(1, st.shard0.getCollection(coll.toString()).count({ updated : true }) +
- st.shard1.getCollection(coll.toString()).count({ updated : true }) );
+assert.writeOK(st.shard0.getCollection(coll.toString()).insert({_id: 3, a: {b: 1}}));
+assert.eq(1, coll.update({a: {b: 1}}, {$set: {updated: true}}, {multi: false}).nMatched);
+assert.eq(1,
+ st.shard0.getCollection(coll.toString()).count({updated: true}) +
+ st.shard1.getCollection(coll.toString()).count({updated: true}));
//
// Successive upserts (save()-style)
coll.remove({});
-assert.writeOK(coll.update({ _id : 1 }, { _id : 1, a : { b : 1 } }, { upsert : true }));
-assert.writeOK(coll.update({ _id : 1 }, { _id : 1, a : { b : 1 } }, { upsert : true }));
-assert.eq(1, st.shard0.getCollection(coll.toString()).count() +
- st.shard1.getCollection(coll.toString()).count() );
+assert.writeOK(coll.update({_id: 1}, {_id: 1, a: {b: 1}}, {upsert: true}));
+assert.writeOK(coll.update({_id: 1}, {_id: 1, a: {b: 1}}, {upsert: true}));
+assert.eq(1,
+ st.shard0.getCollection(coll.toString()).count() +
+ st.shard1.getCollection(coll.toString()).count());
//
// Successive upserts (replacement-style)
coll.remove({});
-assert.writeOK(coll.update({ a : { b : 1 } }, { a : { b : 1 } }, { upsert : true }));
-assert.writeOK(coll.update({ a : { b : 1 } }, { a : { b : 1 } }, { upsert : true }));
-assert.eq(1, st.shard0.getCollection(coll.toString()).count() +
- st.shard1.getCollection(coll.toString()).count() );
+assert.writeOK(coll.update({a: {b: 1}}, {a: {b: 1}}, {upsert: true}));
+assert.writeOK(coll.update({a: {b: 1}}, {a: {b: 1}}, {upsert: true}));
+assert.eq(1,
+ st.shard0.getCollection(coll.toString()).count() +
+ st.shard1.getCollection(coll.toString()).count());
//
// Successive upserts ($op-style)
coll.remove({});
-assert.writeOK(coll.update({ a : { b : 1 } }, { $set : { upserted : true } }, { upsert : true }));
-assert.writeOK(coll.update({ a : { b : 1 } }, { $set : { upserted : true } }, { upsert : true }));
-assert.eq(1, st.shard0.getCollection(coll.toString()).count() +
- st.shard1.getCollection(coll.toString()).count() );
+assert.writeOK(coll.update({a: {b: 1}}, {$set: {upserted: true}}, {upsert: true}));
+assert.writeOK(coll.update({a: {b: 1}}, {$set: {upserted: true}}, {upsert: true}));
+assert.eq(1,
+ st.shard0.getCollection(coll.toString()).count() +
+ st.shard1.getCollection(coll.toString()).count());
jsTest.log("DONE!");
st.stop();
diff --git a/jstests/sharding/explain_cmd.js b/jstests/sharding/explain_cmd.js
index fa81dbf6b79..767e26c7eb2 100644
--- a/jstests/sharding/explain_cmd.js
+++ b/jstests/sharding/explain_cmd.js
@@ -23,10 +23,8 @@ db.adminCommand({shardCollection: collSharded.getFullName(), key: {a: 1}});
for (var i = 1; i <= 2; i++) {
assert.commandWorked(db.adminCommand({split: collSharded.getFullName(), middle: {a: i}}));
- var shardName = "shard000" + (i-1);
- printjson(db.adminCommand({moveChunk: collSharded.getFullName(),
- find: {a: i},
- to: shardName}));
+ var shardName = "shard000" + (i - 1);
+ printjson(db.adminCommand({moveChunk: collSharded.getFullName(), find: {a: i}, to: shardName}));
}
// Put data on each shard.
@@ -40,13 +38,8 @@ st.printShardingStatus();
assert.eq(3, collSharded.count({b: 1}));
// Explain the scatter-gather count.
-explain = db.runCommand({
- explain: {
- count: collSharded.getName(),
- query: {b: 1}
- },
- verbosity: "allPlansExecution"
-});
+explain = db.runCommand(
+ {explain: {count: collSharded.getName(), query: {b: 1}}, verbosity: "allPlansExecution"});
// Validate some basic properties of the result.
printjson(explain);
@@ -58,10 +51,7 @@ assert.eq(2, explain.executionStats.executionStages.shards.length);
// An explain of a command that doesn't exist should fail gracefully.
explain = db.runCommand({
- explain: {
- nonexistent: collSharded.getName(),
- query: {b: 1}
- },
+ explain: {nonexistent: collSharded.getName(), query: {b: 1}},
verbosity: "allPlansExecution"
});
printjson(explain);
@@ -86,8 +76,8 @@ explain = db.runCommand({
ns: collUnsharded.getName(),
key: "a",
cond: "b",
- $reduce: function (curr, result) { },
- initial: { }
+ $reduce: function(curr, result) {},
+ initial: {}
}
},
verbosity: "allPlansExecution"
@@ -109,8 +99,8 @@ explain = db.runCommand({
ns: collSharded.getName(),
key: "a",
cond: "b",
- $reduce: function (curr, result) { },
- initial: { }
+ $reduce: function(curr, result) {},
+ initial: {}
}
},
verbosity: "allPlansExecution"
@@ -122,12 +112,7 @@ assert.commandFailed(explain);
// Explain a delete operation and verify that it hits all shards without the shard key
explain = db.runCommand({
- explain: {
- delete: collSharded.getName(),
- deletes: [
- {q: {b: 1}, limit: 0}
- ]
- },
+ explain: {delete: collSharded.getName(), deletes: [{q: {b: 1}, limit: 0}]},
verbosity: "allPlansExecution"
});
assert.commandWorked(explain, tojson(explain));
@@ -140,12 +125,7 @@ assert.eq(3, collSharded.count({b: 1}));
// Explain a delete operation and verify that it hits only one shard with the shard key
explain = db.runCommand({
- explain: {
- delete: collSharded.getName(),
- deletes: [
- {q: {a: 1}, limit: 0}
- ]
- },
+ explain: {delete: collSharded.getName(), deletes: [{q: {a: 1}, limit: 0}]},
verbosity: "allPlansExecution"
});
assert.commandWorked(explain, tojson(explain));
@@ -156,23 +136,15 @@ assert.eq(3, collSharded.count({b: 1}));
// Check that we fail gracefully if we try to do an explain of a write batch that has more
// than one operation in it.
explain = db.runCommand({
- explain: {
- delete: collSharded.getName(),
- deletes: [
- {q: {a: 1}, limit: 1},
- {q: {a: 2}, limit: 1}
- ]
- },
+ explain:
+ {delete: collSharded.getName(), deletes: [{q: {a: 1}, limit: 1}, {q: {a: 2}, limit: 1}]},
verbosity: "allPlansExecution"
});
assert.commandFailed(explain, tojson(explain));
// Explain a multi upsert operation and verify that it hits all shards
explain = db.runCommand({
- explain: {
- update: collSharded.getName(),
- updates: [{q: {}, u: {$set: {b: 10}}, multi: true}]
- },
+ explain: {update: collSharded.getName(), updates: [{q: {}, u: {$set: {b: 10}}, multi: true}]},
verbosity: "allPlansExecution"
});
assert.commandWorked(explain, tojson(explain));
@@ -186,10 +158,7 @@ assert.eq(0, collSharded.count({b: 10}));
// Explain an upsert operation and verify that it hits only a single shard
explain = db.runCommand({
- explain: {
- update: collSharded.getName(),
- updates: [{q: {a: 10}, u: {a: 10}, upsert: true}]
- },
+ explain: {update: collSharded.getName(), updates: [{q: {a: 10}, u: {a: 10}, upsert: true}]},
verbosity: "allPlansExecution"
});
assert.commandWorked(explain, tojson(explain));
@@ -199,11 +168,7 @@ assert.eq(0, collSharded.count({a: 10}));
// Explain an upsert operation which cannot be targeted, ensure an error is thrown
explain = db.runCommand({
- explain: {
- update: collSharded.getName(),
- updates: [{q: {b: 10}, u: {b: 10}, upsert: true}]
- },
+ explain: {update: collSharded.getName(), updates: [{q: {b: 10}, u: {b: 10}, upsert: true}]},
verbosity: "allPlansExecution"
});
assert.commandFailed(explain, tojson(explain));
-
diff --git a/jstests/sharding/explain_find_and_modify_sharded.js b/jstests/sharding/explain_find_and_modify_sharded.js
index 2833e2c6e03..7c1b10321c2 100644
--- a/jstests/sharding/explain_find_and_modify_sharded.js
+++ b/jstests/sharding/explain_find_and_modify_sharded.js
@@ -12,7 +12,9 @@
st.stopBalancer();
var testDB = st.s.getDB('test');
- var shardKey = {a: 1};
+ var shardKey = {
+ a: 1
+ };
// Create a collection with an index on the intended shard key.
var shardedColl = testDB.getCollection(collName);
@@ -25,33 +27,21 @@
assert.commandWorked(testDB.adminCommand({enableSharding: testDB.getName()}));
var res = testDB.adminCommand({movePrimary: testDB.getName(), to: 'shard0000'});
assert(res.ok || res.errmsg == "it is already the primary");
- assert.commandWorked(testDB.adminCommand({
- shardCollection: shardedColl.getFullName(),
- key: shardKey
- }));
+ assert.commandWorked(
+ testDB.adminCommand({shardCollection: shardedColl.getFullName(), key: shardKey}));
// Split and move the chunks so that
// chunk { "a" : { "$minKey" : 1 } } -->> { "a" : 10 } is on shard0000
// chunk { "a" : 10 } -->> { "a" : { "$maxKey" : 1 } } is on shard0001
- assert.commandWorked(testDB.adminCommand({
- split: shardedColl.getFullName(),
- middle: {a: 10}
- }));
- assert.commandWorked(testDB.adminCommand({
- moveChunk: shardedColl.getFullName(),
- find: {a: 10},
- to: 'shard0001'
- }));
+ assert.commandWorked(testDB.adminCommand({split: shardedColl.getFullName(), middle: {a: 10}}));
+ assert.commandWorked(testDB.adminCommand(
+ {moveChunk: shardedColl.getFullName(), find: {a: 10}, to: 'shard0001'}));
var res;
// Queries that do not involve the shard key are invalid.
res = testDB.runCommand({
- explain: {
- findAndModify: collName,
- query: {b: 1},
- remove: true
- },
+ explain: {findAndModify: collName, query: {b: 1}, remove: true},
verbosity: 'queryPlanner'
});
assert.commandFailed(res);
@@ -82,12 +72,7 @@
// Test that the explain command is routed to "shard0000" when targeting the lower chunk range.
res = testDB.runCommand({
- explain: {
- findAndModify: collName,
- query: {a: 0},
- update: {$inc: {b: 7}},
- upsert: true
- },
+ explain: {findAndModify: collName, query: {a: 0}, update: {$inc: {b: 7}}, upsert: true},
verbosity: 'queryPlanner'
});
assert.commandWorked(res);
@@ -95,11 +80,7 @@
// Test that the explain command is routed to "shard0001" when targeting the higher chunk range.
res = testDB.runCommand({
- explain: {
- findAndModify: collName,
- query: {a: 20, c: 5},
- remove: true
- },
+ explain: {findAndModify: collName, query: {a: 20, c: 5}, remove: true},
verbosity: 'executionStats'
});
assert.commandWorked(res);
diff --git a/jstests/sharding/explain_read_pref.js b/jstests/sharding/explain_read_pref.js
index eab0a190ad4..cdf1d1e74a4 100644
--- a/jstests/sharding/explain_read_pref.js
+++ b/jstests/sharding/explain_read_pref.js
@@ -12,18 +12,16 @@ var assertCorrectTargeting = function(explain, isMongos, secExpected) {
var serverInfo;
if (isMongos) {
serverInfo = explain.queryPlanner.winningPlan.shards[0].serverInfo;
- }
- else {
+ } else {
serverInfo = explain.serverInfo;
}
var explainDestConn = new Mongo(serverInfo.host + ':' + serverInfo.port);
- var isMaster = explainDestConn.getDB('admin').runCommand({ isMaster: 1 });
+ var isMaster = explainDestConn.getDB('admin').runCommand({isMaster: 1});
if (secExpected) {
assert(isMaster.secondary);
- }
- else {
+ } else {
assert(isMaster.ismaster);
}
};
@@ -34,34 +32,34 @@ var testAllModes = function(conn, isMongos) {
// { tag: 'two' } so we can test the interaction of modes and tags. Test
// a bunch of combinations.
[
- // mode, tagSets, expectedHost
- ['primary', undefined, false],
- ['primary', [{}], false],
+ // mode, tagSets, expectedHost
+ ['primary', undefined, false],
+ ['primary', [{}], false],
- ['primaryPreferred', undefined, false],
- ['primaryPreferred', [{tag: 'one'}], false],
- // Correctly uses primary and ignores the tag
- ['primaryPreferred', [{tag: 'two'}], false],
+ ['primaryPreferred', undefined, false],
+ ['primaryPreferred', [{tag: 'one'}], false],
+ // Correctly uses primary and ignores the tag
+ ['primaryPreferred', [{tag: 'two'}], false],
- ['secondary', undefined, true],
- ['secondary', [{tag: 'two'}], true],
- ['secondary', [{tag: 'doesntexist'}, {}], true],
- ['secondary', [{tag: 'doesntexist'}, {tag:'two'}], true],
+ ['secondary', undefined, true],
+ ['secondary', [{tag: 'two'}], true],
+ ['secondary', [{tag: 'doesntexist'}, {}], true],
+ ['secondary', [{tag: 'doesntexist'}, {tag: 'two'}], true],
- ['secondaryPreferred', undefined, true],
- ['secondaryPreferred', [{tag: 'one'}], false],
- ['secondaryPreferred', [{tag: 'two'}], true],
+ ['secondaryPreferred', undefined, true],
+ ['secondaryPreferred', [{tag: 'one'}], false],
+ ['secondaryPreferred', [{tag: 'two'}], true],
- // We don't have a way to alter ping times so we can't predict where an
- // untagged 'nearest' command should go, hence only test with tags.
- ['nearest', [{tag: 'one'}], false],
- ['nearest', [{tag: 'two'}], true]
+ // We don't have a way to alter ping times so we can't predict where an
+ // untagged 'nearest' command should go, hence only test with tags.
+ ['nearest', [{tag: 'one'}], false],
+ ['nearest', [{tag: 'two'}], true]
].forEach(function(args) {
var mode = args[0], tagSets = args[1], secExpected = args[2];
var testDB = conn.getDB('TestDB');
- conn.setSlaveOk(false); // purely rely on readPref
+ conn.setSlaveOk(false); // purely rely on readPref
jsTest.log('Testing mode: ' + mode + ', tag sets: ' + tojson(tagSets));
// .explain().find()
@@ -85,11 +83,8 @@ var testAllModes = function(conn, isMongos) {
assertCorrectTargeting(explain, isMongos, secExpected);
// .explain().group()
- explain = testDB.user.explain().group({
- key: {_id: 1},
- reduce: function(curr, result) {},
- initial: {}
- });
+ explain = testDB.user.explain().group(
+ {key: {_id: 1}, reduce: function(curr, result) {}, initial: {}});
assertCorrectTargeting(explain, isMongos, secExpected);
} finally {
// Restore old read pref.
@@ -98,7 +93,7 @@ var testAllModes = function(conn, isMongos) {
});
};
-var st = new ShardingTest({ shards: { rs0: { nodes: 2 }}});
+var st = new ShardingTest({shards: {rs0: {nodes: 2}}});
st.stopBalancer();
ReplSetTest.awaitRSClientHosts(st.s, st.rs0.nodes);
@@ -106,8 +101,14 @@ ReplSetTest.awaitRSClientHosts(st.s, st.rs0.nodes);
// Tag primary with { dc: 'ny', tag: 'one' }, secondary with { dc: 'ny', tag: 'two' }
var primary = st.rs0.getPrimary();
var secondary = st.rs0.getSecondary();
-var PRIMARY_TAG = { dc: 'ny', tag: 'one' };
-var SECONDARY_TAG = { dc: 'ny', tag: 'two' };
+var PRIMARY_TAG = {
+ dc: 'ny',
+ tag: 'one'
+};
+var SECONDARY_TAG = {
+ dc: 'ny',
+ tag: 'two'
+};
var rsConfig = primary.getDB("local").system.replset.findOne();
jsTest.log('got rsconf ' + tojson(rsConfig));
@@ -124,9 +125,8 @@ rsConfig.version++;
jsTest.log('new rsconf ' + tojson(rsConfig));
try {
- primary.adminCommand({ replSetReconfig: rsConfig });
-}
-catch(e) {
+ primary.adminCommand({replSetReconfig: rsConfig});
+} catch (e) {
jsTest.log('replSetReconfig error: ' + e);
}
@@ -135,10 +135,9 @@ st.rs0.awaitSecondaryNodes();
// Force mongos to reconnect after our reconfig and also create the test database
assert.soon(function() {
try {
- st.s.getDB('TestDB').runCommand({ create: 'TestColl' });
+ st.s.getDB('TestDB').runCommand({create: 'TestColl'});
return true;
- }
- catch (x) {
+ } catch (x) {
// Intentionally caused an error that forces mongos's monitor to refresh.
jsTest.log('Caught exception while doing dummy command: ' + tojson(x));
return false;
@@ -154,8 +153,8 @@ jsTest.log('got rsconf ' + tojson(rsConfig));
var replConn = new Mongo(st.rs0.getURL());
// Make sure replica set connection is ready
-_awaitRSHostViaRSMonitor(primary.name, { ok: true, tags: PRIMARY_TAG }, st.rs0.name);
-_awaitRSHostViaRSMonitor(secondary.name, { ok: true, tags: SECONDARY_TAG }, st.rs0.name);
+_awaitRSHostViaRSMonitor(primary.name, {ok: true, tags: PRIMARY_TAG}, st.rs0.name);
+_awaitRSHostViaRSMonitor(secondary.name, {ok: true, tags: SECONDARY_TAG}, st.rs0.name);
testAllModes(replConn, false);
diff --git a/jstests/sharding/fair_balancer_round.js b/jstests/sharding/fair_balancer_round.js
index 8373d6fb0d6..90fc345c8cb 100644
--- a/jstests/sharding/fair_balancer_round.js
+++ b/jstests/sharding/fair_balancer_round.js
@@ -2,9 +2,11 @@
// Tests that a balancer round loads newly sharded collection data
//
-var options = {mongosOptions : {verbose : 1}};
+var options = {
+ mongosOptions: {verbose: 1}
+};
-var st = new ShardingTest({shards : 2, mongos : 2, other : options});
+var st = new ShardingTest({shards: 2, mongos: 2, other: options});
// Stop balancer initially
st.stopBalancer();
@@ -14,27 +16,26 @@ var staleMongos = st.s1;
var coll = mongos.getCollection("foo.bar");
// Shard collection through first mongos
-assert(mongos.adminCommand({enableSharding : coll.getDB() + ""}).ok);
+assert(mongos.adminCommand({enableSharding: coll.getDB() + ""}).ok);
st.ensurePrimaryShard(coll.getDB().getName(), 'shard0001');
-assert(mongos.adminCommand({shardCollection : coll + "", key : {_id : 1}}).ok);
+assert(mongos.adminCommand({shardCollection: coll + "", key: {_id: 1}}).ok);
// Create a bunch of chunks
var numSplits = 20;
-for ( var i = 0; i < numSplits; i++) {
- assert(mongos.adminCommand({split : coll + "", middle : {_id : i}}).ok);
+for (var i = 0; i < numSplits; i++) {
+ assert(mongos.adminCommand({split: coll + "", middle: {_id: i}}).ok);
}
// Stop the first mongos who setup the cluster.
st.stopMongos(0);
// Start balancer, which lets the stale mongos balance
-assert.writeOK(staleMongos.getDB("config").settings.update({_id: "balancer"},
- {$set: {stopped: false}},
- true));
+assert.writeOK(staleMongos.getDB("config")
+ .settings.update({_id: "balancer"}, {$set: {stopped: false}}, true));
// Make sure we eventually start moving chunks
assert.soon(function() {
- return staleMongos.getCollection("config.changelog").count({what : /moveChunk/}) > 0;
+ return staleMongos.getCollection("config.changelog").count({what: /moveChunk/}) > 0;
}, "no balance happened", 5 * 60 * 1000);
jsTest.log("DONE!");
diff --git a/jstests/sharding/features1.js b/jstests/sharding/features1.js
index 727f3cb2f09..dc547ddad74 100644
--- a/jstests/sharding/features1.js
+++ b/jstests/sharding/features1.js
@@ -1,204 +1,244 @@
(function() {
-var s = new ShardingTest({ name: "features1", shards: 2, mongos: 1 });
+ var s = new ShardingTest({name: "features1", shards: 2, mongos: 1});
-s.adminCommand( { enablesharding : "test" } );
-s.ensurePrimaryShard('test', 'shard0001');
+ s.adminCommand({enablesharding: "test"});
+ s.ensurePrimaryShard('test', 'shard0001');
-// ---- can't shard system namespaces ----
+ // ---- can't shard system namespaces ----
-assert( ! s.admin.runCommand( { shardcollection : "test.system.blah" , key : { num : 1 } } ).ok , "shard system namespace" );
+ assert(!s.admin.runCommand({shardcollection: "test.system.blah", key: {num: 1}}).ok,
+ "shard system namespace");
-// ---- setup test.foo -----
+ // ---- setup test.foo -----
-s.adminCommand( { shardcollection : "test.foo" , key : { num : 1 } } );
+ s.adminCommand({shardcollection: "test.foo", key: {num: 1}});
-db = s.getDB( "test" );
+ db = s.getDB("test");
-a = s._connections[0].getDB( "test" );
-b = s._connections[1].getDB( "test" );
+ a = s._connections[0].getDB("test");
+ b = s._connections[1].getDB("test");
-db.foo.ensureIndex( { y : 1 } );
+ db.foo.ensureIndex({y: 1});
-s.adminCommand( { split : "test.foo" , middle : { num : 10 } } );
-s.adminCommand( { movechunk : "test.foo",
- find : { num : 20 },
- to : s.getOther( s.getPrimaryShard( "test" ) ).name } );
+ s.adminCommand({split: "test.foo", middle: {num: 10}});
+ s.adminCommand(
+ {movechunk: "test.foo", find: {num: 20}, to: s.getOther(s.getPrimaryShard("test")).name});
-db.foo.save( { num : 5 } );
-db.foo.save( { num : 15 } );
+ db.foo.save({num: 5});
+ db.foo.save({num: 15});
-s.sync();
+ s.sync();
-// ---- make sure shard key index is everywhere ----
+ // ---- make sure shard key index is everywhere ----
-assert.eq( 3 , a.foo.getIndexKeys().length , "a index 1" );
-assert.eq( 3 , b.foo.getIndexKeys().length , "b index 1" );
+ assert.eq(3, a.foo.getIndexKeys().length, "a index 1");
+ assert.eq(3, b.foo.getIndexKeys().length, "b index 1");
-// ---- make sure if you add an index it goes everywhere ------
+ // ---- make sure if you add an index it goes everywhere ------
-db.foo.ensureIndex( { x : 1 } );
+ db.foo.ensureIndex({x: 1});
-s.sync();
+ s.sync();
-assert.eq( 4 , a.foo.getIndexKeys().length , "a index 2" );
-assert.eq( 4 , b.foo.getIndexKeys().length , "b index 2" );
+ assert.eq(4, a.foo.getIndexKeys().length, "a index 2");
+ assert.eq(4, b.foo.getIndexKeys().length, "b index 2");
-// ---- no unique indexes ------
+ // ---- no unique indexes ------
-db.foo.ensureIndex( { z : 1 } , true );
+ db.foo.ensureIndex({z: 1}, true);
-s.sync();
+ s.sync();
-assert.eq( 4 , a.foo.getIndexKeys().length , "a index 3" );
-assert.eq( 4 , b.foo.getIndexKeys().length , "b index 3" );
+ assert.eq(4, a.foo.getIndexKeys().length, "a index 3");
+ assert.eq(4, b.foo.getIndexKeys().length, "b index 3");
-db.foo.ensureIndex( { num : 1 , bar : 1 } , true );
-s.sync();
-assert.eq( 5 , b.foo.getIndexKeys().length , "c index 3" );
+ db.foo.ensureIndex({num: 1, bar: 1}, true);
+ s.sync();
+ assert.eq(5, b.foo.getIndexKeys().length, "c index 3");
-// ---- can't shard thing with unique indexes
+ // ---- can't shard thing with unique indexes
-db.foo2.ensureIndex( { a : 1 } );
-s.sync();
-printjson( db.foo2.getIndexes() );
-assert( s.admin.runCommand( { shardcollection : "test.foo2" , key : { num : 1 } } ).ok , "shard with index" );
+ db.foo2.ensureIndex({a: 1});
+ s.sync();
+ printjson(db.foo2.getIndexes());
+ assert(s.admin.runCommand({shardcollection: "test.foo2", key: {num: 1}}).ok,
+ "shard with index");
-db.foo3.ensureIndex( { a : 1 } , true );
-s.sync();
-printjson( db.foo3.getIndexes() );
-assert( ! s.admin.runCommand( { shardcollection : "test.foo3" , key : { num : 1 } } ).ok , "shard with unique index" );
+ db.foo3.ensureIndex({a: 1}, true);
+ s.sync();
+ printjson(db.foo3.getIndexes());
+ assert(!s.admin.runCommand({shardcollection: "test.foo3", key: {num: 1}}).ok,
+ "shard with unique index");
-db.foo7.ensureIndex( { num : 1 , a : 1 } , true );
-s.sync();
-printjson( db.foo7.getIndexes() );
-assert( s.admin.runCommand( { shardcollection : "test.foo7" , key : { num : 1 } } ).ok , "shard with ok unique index" );
+ db.foo7.ensureIndex({num: 1, a: 1}, true);
+ s.sync();
+ printjson(db.foo7.getIndexes());
+ assert(s.admin.runCommand({shardcollection: "test.foo7", key: {num: 1}}).ok,
+ "shard with ok unique index");
+ // ----- eval -----
-// ----- eval -----
+ db.foo2.save({num: 5, a: 7});
+ db.foo3.save({num: 5, a: 8});
-db.foo2.save( { num : 5 , a : 7 } );
-db.foo3.save( { num : 5 , a : 8 } );
+ assert.eq(1, db.foo3.count(), "eval pre1");
+ assert.eq(1, db.foo2.count(), "eval pre2");
-assert.eq( 1 , db.foo3.count() , "eval pre1" );
-assert.eq( 1 , db.foo2.count() , "eval pre2" );
+ assert.eq(8,
+ db.eval(function() {
+ return db.foo3.findOne().a;
+ }),
+ "eval 1 ");
+ assert.throws(function() {
+ db.eval(function() {
+ return db.foo2.findOne().a;
+ });
+ }, null, "eval 2");
-assert.eq( 8 , db.eval( function(){ return db.foo3.findOne().a; } ), "eval 1 " );
-assert.throws( function(){ db.eval( function(){ return db.foo2.findOne().a; } ); } , null , "eval 2" );
-
-assert.eq( 1 , db.eval( function(){ return db.foo3.count(); } ), "eval 3 " );
-assert.throws( function(){ db.eval( function(){ return db.foo2.count(); } ); } , null , "eval 4" );
-
-// ----- "eval" new command name SERVER-5588 -----
-var result;
-result = db.runCommand({eval: function () { return db.foo3.count(); } }); // non-sharded collection
-assert.eq(1, result.ok, "eval should work for non-sharded collection in cluster");
-
-result = db.runCommand({eval: function () { return db.foo2.count(); } }); // sharded collection
-assert.eq(0, result.ok, "eval should not work for sharded collection in cluster");
-
-
-// ---- unique shard key ----
-
-assert( s.admin.runCommand( { shardcollection : "test.foo4" , key : { num : 1 } , unique : true } ).ok , "shard with index and unique" );
-s.adminCommand( { split : "test.foo4" , middle : { num : 10 } } );
-
-s.admin.runCommand({ movechunk: "test.foo4", find: { num: 20 },
- to: s.getOther( s.getPrimaryShard( "test" ) ).name });
-
-assert.writeOK(db.foo4.save( { num : 5 } ));
-assert.writeOK(db.foo4.save( { num : 15 } ));
-s.sync();
-assert.eq( 1 , a.foo4.count() , "ua1" );
-assert.eq( 1 , b.foo4.count() , "ub1" );
-
-assert.eq( 2 , a.foo4.getIndexes().length , "ua2" );
-assert.eq( 2 , b.foo4.getIndexes().length , "ub2" );
-
-assert( a.foo4.getIndexes()[1].unique , "ua3" );
-assert( b.foo4.getIndexes()[1].unique , "ub3" );
-
-assert.eq( 2 , db.foo4.count() , "uc1" );
-db.foo4.save( { num : 7 } );
-assert.eq( 3 , db.foo4.count() , "uc2" );
-assert.writeError(db.foo4.save( { num : 7 } ));
-assert.eq( 3 , db.foo4.count() , "uc4" );
-
-// --- don't let you convertToCapped ----
-assert( ! db.foo4.isCapped() , "ca1" );
-assert( ! a.foo4.isCapped() , "ca2" );
-assert( ! b.foo4.isCapped() , "ca3" );
-assert( ! db.foo4.convertToCapped( 30000 ).ok , "ca30" );
-assert( ! db.foo4.isCapped() , "ca4" );
-assert( ! a.foo4.isCapped() , "ca5" );
-assert( ! b.foo4.isCapped() , "ca6" );
-
-// make sure i didn't break anything
-db.foo4a.save( { a : 1 } );
-assert( ! db.foo4a.isCapped() , "ca7" );
-db.foo4a.convertToCapped( 30000 );
-assert( db.foo4a.isCapped() , "ca8" );
-
-// --- don't let you shard a capped collection
-
-db.createCollection("foo5", {capped:true, size:30000});
-assert( db.foo5.isCapped() , "cb1" );
-var res = s.admin.runCommand( { shardcollection : "test.foo5" , key : { num : 1 } } );
-assert( !res.ok , "shard capped: " + tojson( res ) );
-
-
-// ----- group ----
-
-db.foo6.save( { a : 1 } );
-db.foo6.save( { a : 3 } );
-db.foo6.save( { a : 3 } );
-db.foo6.ensureIndex( { a : 1 } );
-s.sync();
-printjson( db.foo6.getIndexes() );
-
-assert.eq( 2 , db.foo6.group( { key : { a : 1 } , initial : { count : 0 } ,
- reduce : function(z,prev){ prev.count++; } } ).length );
-
-assert.eq( 3 , db.foo6.find().count() );
-assert( s.admin.runCommand( { shardcollection : "test.foo6" , key : { a : 1 } } ).ok );
-assert.eq( 3 , db.foo6.find().count() );
-
-s.adminCommand( { split : "test.foo6" , middle : { a : 2 } } );
-
-//movechunk commands are wrapped in assert.soon
-//Sometimes the TO-side shard isn't immediately ready, this
-//causes problems on slow hosts.
-//Remove when SERVER-10232 is fixed
-
-assert.soon( function() {
- var cmdRes = s.admin.runCommand( { movechunk : "test.foo6",
- find : { a : 3 },
- to : s.getOther( s.getPrimaryShard( "test" ) ).name } );
- return cmdRes.ok;
-}, 'move chunk test.foo6', 60000, 1000 );
-
-assert.throws( function(){ db.foo6.group( { key : { a : 1 } , initial : { count : 0 } , reduce : function(z,prev){ prev.count++; } } ); } );
-
-
-// ---- can't shard non-empty collection without index -----
-
-assert.writeOK(db.foo8.save( { a : 1 } ));
-assert( ! s.admin.runCommand( { shardcollection : "test.foo8" , key : { a : 1 } } ).ok , "non-empty collection" );
-
-
-// ---- can't shard non-empty collection with null values in shard key ----
-
-assert.writeOK(db.foo9.save( { b : 1 } ));
-db.foo9.ensureIndex( { a : 1 } );
-assert( ! s.admin.runCommand( { shardcollection : "test.foo9" , key : { a : 1 } } ).ok , "entry with null value" );
-
-
-// --- listDatabases ---
-
-r = db.getMongo().getDBs();
-assert.eq(2, r.databases.length, tojson(r));
-assert.eq( "number", typeof(r.totalSize) , "listDatabases 2 : " + tojson( r ) );
-
-s.stop();
+ assert.eq(1,
+ db.eval(function() {
+ return db.foo3.count();
+ }),
+ "eval 3 ");
+ assert.throws(function() {
+ db.eval(function() {
+ return db.foo2.count();
+ });
+ }, null, "eval 4");
+
+ // ----- "eval" new command name SERVER-5588 -----
+ var result;
+ result = db.runCommand({
+ eval: function() {
+ return db.foo3.count();
+ }
+ }); // non-sharded collection
+ assert.eq(1, result.ok, "eval should work for non-sharded collection in cluster");
+
+ result = db.runCommand({
+ eval: function() {
+ return db.foo2.count();
+ }
+ }); // sharded collection
+ assert.eq(0, result.ok, "eval should not work for sharded collection in cluster");
+
+ // ---- unique shard key ----
+
+ assert(s.admin.runCommand({shardcollection: "test.foo4", key: {num: 1}, unique: true}).ok,
+ "shard with index and unique");
+ s.adminCommand({split: "test.foo4", middle: {num: 10}});
+
+ s.admin.runCommand(
+ {movechunk: "test.foo4", find: {num: 20}, to: s.getOther(s.getPrimaryShard("test")).name});
+
+ assert.writeOK(db.foo4.save({num: 5}));
+ assert.writeOK(db.foo4.save({num: 15}));
+ s.sync();
+ assert.eq(1, a.foo4.count(), "ua1");
+ assert.eq(1, b.foo4.count(), "ub1");
+
+ assert.eq(2, a.foo4.getIndexes().length, "ua2");
+ assert.eq(2, b.foo4.getIndexes().length, "ub2");
+
+ assert(a.foo4.getIndexes()[1].unique, "ua3");
+ assert(b.foo4.getIndexes()[1].unique, "ub3");
+
+ assert.eq(2, db.foo4.count(), "uc1");
+ db.foo4.save({num: 7});
+ assert.eq(3, db.foo4.count(), "uc2");
+ assert.writeError(db.foo4.save({num: 7}));
+ assert.eq(3, db.foo4.count(), "uc4");
+
+ // --- don't let you convertToCapped ----
+ assert(!db.foo4.isCapped(), "ca1");
+ assert(!a.foo4.isCapped(), "ca2");
+ assert(!b.foo4.isCapped(), "ca3");
+ assert(!db.foo4.convertToCapped(30000).ok, "ca30");
+ assert(!db.foo4.isCapped(), "ca4");
+ assert(!a.foo4.isCapped(), "ca5");
+ assert(!b.foo4.isCapped(), "ca6");
+
+ // make sure i didn't break anything
+ db.foo4a.save({a: 1});
+ assert(!db.foo4a.isCapped(), "ca7");
+ db.foo4a.convertToCapped(30000);
+ assert(db.foo4a.isCapped(), "ca8");
+
+ // --- don't let you shard a capped collection
+
+ db.createCollection("foo5", {capped: true, size: 30000});
+ assert(db.foo5.isCapped(), "cb1");
+ var res = s.admin.runCommand({shardcollection: "test.foo5", key: {num: 1}});
+ assert(!res.ok, "shard capped: " + tojson(res));
+
+ // ----- group ----
+
+ db.foo6.save({a: 1});
+ db.foo6.save({a: 3});
+ db.foo6.save({a: 3});
+ db.foo6.ensureIndex({a: 1});
+ s.sync();
+ printjson(db.foo6.getIndexes());
+
+ assert.eq(2,
+ db.foo6.group({
+ key: {a: 1},
+ initial: {count: 0},
+ reduce: function(z, prev) {
+ prev.count++;
+ }
+ }).length);
+
+ assert.eq(3, db.foo6.find().count());
+ assert(s.admin.runCommand({shardcollection: "test.foo6", key: {a: 1}}).ok);
+ assert.eq(3, db.foo6.find().count());
+
+ s.adminCommand({split: "test.foo6", middle: {a: 2}});
+
+ // movechunk commands are wrapped in assert.soon
+ // Sometimes the TO-side shard isn't immediately ready, this
+ // causes problems on slow hosts.
+ // Remove when SERVER-10232 is fixed
+
+ assert.soon(function() {
+ var cmdRes = s.admin.runCommand({
+ movechunk: "test.foo6",
+ find: {a: 3},
+ to: s.getOther(s.getPrimaryShard("test")).name
+ });
+ return cmdRes.ok;
+ }, 'move chunk test.foo6', 60000, 1000);
+
+ assert.throws(function() {
+ db.foo6.group({
+ key: {a: 1},
+ initial: {count: 0},
+ reduce: function(z, prev) {
+ prev.count++;
+ }
+ });
+ });
+
+ // ---- can't shard non-empty collection without index -----
+
+ assert.writeOK(db.foo8.save({a: 1}));
+ assert(!s.admin.runCommand({shardcollection: "test.foo8", key: {a: 1}}).ok,
+ "non-empty collection");
+
+ // ---- can't shard non-empty collection with null values in shard key ----
+
+ assert.writeOK(db.foo9.save({b: 1}));
+ db.foo9.ensureIndex({a: 1});
+ assert(!s.admin.runCommand({shardcollection: "test.foo9", key: {a: 1}}).ok,
+ "entry with null value");
+
+ // --- listDatabases ---
+
+ r = db.getMongo().getDBs();
+ assert.eq(2, r.databases.length, tojson(r));
+ assert.eq("number", typeof(r.totalSize), "listDatabases 2 : " + tojson(r));
+
+ s.stop();
})();
diff --git a/jstests/sharding/features2.js b/jstests/sharding/features2.js
index f632e24e80c..010289ac1cc 100644
--- a/jstests/sharding/features2.js
+++ b/jstests/sharding/features2.js
@@ -1,202 +1,210 @@
(function() {
-var s = new ShardingTest({ name: "features2", shards: 2, mongos: 1 });
+ var s = new ShardingTest({name: "features2", shards: 2, mongos: 1});
-s.adminCommand( { enablesharding : "test" } );
-s.ensurePrimaryShard('test', 'shard0001');
+ s.adminCommand({enablesharding: "test"});
+ s.ensurePrimaryShard('test', 'shard0001');
-a = s._connections[0].getDB( "test" );
-b = s._connections[1].getDB( "test" );
+ a = s._connections[0].getDB("test");
+ b = s._connections[1].getDB("test");
-db = s.getDB( "test" );
+ db = s.getDB("test");
-// ---- distinct ----
+ // ---- distinct ----
-db.foo.save( { x : 1 } );
-db.foo.save( { x : 2 } );
-db.foo.save( { x : 3 } );
-db.foo.ensureIndex( { x : 1 } );
+ db.foo.save({x: 1});
+ db.foo.save({x: 2});
+ db.foo.save({x: 3});
+ db.foo.ensureIndex({x: 1});
-assert.eq( "1,2,3" , db.foo.distinct( "x" ) , "distinct 1" );
-assert( a.foo.distinct("x").length == 3 || b.foo.distinct("x").length == 3 , "distinct 2" );
-assert( a.foo.distinct("x").length == 0 || b.foo.distinct("x").length == 0 , "distinct 3" );
+ assert.eq("1,2,3", db.foo.distinct("x"), "distinct 1");
+ assert(a.foo.distinct("x").length == 3 || b.foo.distinct("x").length == 3, "distinct 2");
+ assert(a.foo.distinct("x").length == 0 || b.foo.distinct("x").length == 0, "distinct 3");
-assert.eq( 1 , s.onNumShards( "foo" ) , "A1" );
+ assert.eq(1, s.onNumShards("foo"), "A1");
-s.shardColl( "foo" , { x : 1 } , { x : 2 } , { x : 3 }, null, true /* waitForDelete */ );
+ s.shardColl("foo", {x: 1}, {x: 2}, {x: 3}, null, true /* waitForDelete */);
-assert.eq( 2 , s.onNumShards( "foo" ) , "A2" );
+ assert.eq(2, s.onNumShards("foo"), "A2");
-assert.eq( "1,2,3" , db.foo.distinct( "x" ) , "distinct 4" );
+ assert.eq("1,2,3", db.foo.distinct("x"), "distinct 4");
-// ----- delete ---
+ // ----- delete ---
-assert.eq( 3 , db.foo.count() , "D1" );
+ assert.eq(3, db.foo.count(), "D1");
-db.foo.remove( { x : 3 } );
-assert.eq( 2 , db.foo.count() , "D2" );
+ db.foo.remove({x: 3});
+ assert.eq(2, db.foo.count(), "D2");
-db.foo.save( { x : 3 } );
-assert.eq( 3 , db.foo.count() , "D3" );
+ db.foo.save({x: 3});
+ assert.eq(3, db.foo.count(), "D3");
-db.foo.remove( { x : { $gt : 2 } } );
-assert.eq( 2 , db.foo.count() , "D4" );
+ db.foo.remove({x: {$gt: 2}});
+ assert.eq(2, db.foo.count(), "D4");
-db.foo.remove( { x : { $gt : -1 } } );
-assert.eq( 0 , db.foo.count() , "D5" );
+ db.foo.remove({x: {$gt: -1}});
+ assert.eq(0, db.foo.count(), "D5");
-db.foo.save( { x : 1 } );
-db.foo.save( { x : 2 } );
-db.foo.save( { x : 3 } );
-assert.eq( 3 , db.foo.count() , "D6" );
-db.foo.remove( {} );
-assert.eq( 0 , db.foo.count() , "D7" );
+ db.foo.save({x: 1});
+ db.foo.save({x: 2});
+ db.foo.save({x: 3});
+ assert.eq(3, db.foo.count(), "D6");
+ db.foo.remove({});
+ assert.eq(0, db.foo.count(), "D7");
-// --- _id key ---
+ // --- _id key ---
-db.foo2.save( { _id : new ObjectId() } );
-db.foo2.save( { _id : new ObjectId() } );
-db.foo2.save( { _id : new ObjectId() } );
+ db.foo2.save({_id: new ObjectId()});
+ db.foo2.save({_id: new ObjectId()});
+ db.foo2.save({_id: new ObjectId()});
-assert.eq( 1 , s.onNumShards( "foo2" ) , "F1" );
+ assert.eq(1, s.onNumShards("foo2"), "F1");
-printjson( db.foo2.getIndexes() );
-s.adminCommand( { shardcollection : "test.foo2" , key : { _id : 1 } } );
+ printjson(db.foo2.getIndexes());
+ s.adminCommand({shardcollection: "test.foo2", key: {_id: 1}});
-assert.eq( 3 , db.foo2.count() , "F2" );
-db.foo2.insert( {} );
-assert.eq( 4 , db.foo2.count() , "F3" );
+ assert.eq(3, db.foo2.count(), "F2");
+ db.foo2.insert({});
+ assert.eq(4, db.foo2.count(), "F3");
-// --- map/reduce
+ // --- map/reduce
-db.mr.save( { x : 1 , tags : [ "a" , "b" ] } );
-db.mr.save( { x : 2 , tags : [ "b" , "c" ] } );
-db.mr.save( { x : 3 , tags : [ "c" , "a" ] } );
-db.mr.save( { x : 4 , tags : [ "b" , "c" ] } );
-db.mr.ensureIndex( { x : 1 } );
+ db.mr.save({x: 1, tags: ["a", "b"]});
+ db.mr.save({x: 2, tags: ["b", "c"]});
+ db.mr.save({x: 3, tags: ["c", "a"]});
+ db.mr.save({x: 4, tags: ["b", "c"]});
+ db.mr.ensureIndex({x: 1});
-m = function(){
- this.tags.forEach(
- function(z){
- emit( z , { count : 1 } );
+ m = function() {
+ this.tags.forEach(function(z) {
+ emit(z, {count: 1});
+ });
+ };
+
+ r = function(key, values) {
+ var total = 0;
+ for (var i = 0; i < values.length; i++) {
+ total += values[i].count;
}
- );
-};
+ return {
+ count: total
+ };
+ };
+
+ doMR = function(n) {
+ print(n);
+
+ // on-disk
+
+ var res = db.mr.mapReduce(m, r, "smr1_out");
+ printjson(res);
+ assert.eq(4, res.counts.input, "MR T0 " + n);
+
+ var x = db[res.result];
+ assert.eq(3, x.find().count(), "MR T1 " + n);
+
+ var z = {};
+ x.find().forEach(function(a) {
+ z[a._id] = a.value.count;
+ });
+ assert.eq(3, Object.keySet(z).length, "MR T2 " + n);
+ assert.eq(2, z.a, "MR T3 " + n);
+ assert.eq(3, z.b, "MR T4 " + n);
+ assert.eq(3, z.c, "MR T5 " + n);
+
+ x.drop();
+
+ // inline
+
+ var res = db.mr.mapReduce(m, r, {out: {inline: 1}});
+ printjson(res);
+ assert.eq(4, res.counts.input, "MR T6 " + n);
+
+ var z = {};
+ res.find().forEach(function(a) {
+ z[a._id] = a.value.count;
+ });
+ printjson(z);
+ assert.eq(3, Object.keySet(z).length, "MR T7 " + n);
+ assert.eq(2, z.a, "MR T8 " + n);
+ assert.eq(3, z.b, "MR T9 " + n);
+ assert.eq(3, z.c, "MR TA " + n);
+
+ };
+
+ doMR("before");
-r = function( key , values ){
- var total = 0;
- for ( var i=0; i<values.length; i++ ){
- total += values[i].count;
+ assert.eq(1, s.onNumShards("mr"), "E1");
+ s.shardColl("mr", {x: 1}, {x: 2}, {x: 3}, null, true /* waitForDelete */);
+ assert.eq(2, s.onNumShards("mr"), "E1");
+
+ doMR("after");
+
+ s.adminCommand({split: 'test.mr', middle: {x: 3}});
+ s.adminCommand({split: 'test.mr', middle: {x: 4}});
+ s.adminCommand({movechunk: 'test.mr', find: {x: 3}, to: s.getPrimaryShard('test').name});
+
+ doMR("after extra split");
+
+ cmd = {
+ mapreduce: "mr",
+ map: "emit( ",
+ reduce: "fooz + ",
+ out: "broken1"
+ };
+
+ x = db.runCommand(cmd);
+ y = s._connections[0].getDB("test").runCommand(cmd);
+
+ printjson(x);
+ printjson(y);
+
+ // count
+
+ db.countaa.save({"regex": /foo/i});
+ db.countaa.save({"regex": /foo/i});
+ db.countaa.save({"regex": /foo/i});
+ assert.eq(3, db.countaa.count(), "counta1");
+ assert.eq(3, db.countaa.find().itcount(), "counta1");
+
+ x = null;
+ y = null;
+ try {
+ x = db.runCommand("forceerror");
+ } catch (e) {
+ x = e;
}
- return { count : total };
-};
-
-doMR = function( n ){
- print(n);
-
- // on-disk
-
- var res = db.mr.mapReduce( m , r , "smr1_out" );
- printjson( res );
- assert.eq( 4 , res.counts.input , "MR T0 " + n );
-
- var x = db[res.result];
- assert.eq( 3 , x.find().count() , "MR T1 " + n );
-
- var z = {};
- x.find().forEach( function(a){ z[a._id] = a.value.count; } );
- assert.eq( 3 , Object.keySet( z ).length , "MR T2 " + n );
- assert.eq( 2 , z.a , "MR T3 " + n );
- assert.eq( 3 , z.b , "MR T4 " + n );
- assert.eq( 3 , z.c , "MR T5 " + n );
-
- x.drop();
-
- // inline
-
- var res = db.mr.mapReduce( m , r , { out : { inline : 1 } } );
- printjson( res );
- assert.eq( 4 , res.counts.input , "MR T6 " + n );
-
- var z = {};
- res.find().forEach( function(a){ z[a._id] = a.value.count; } );
- printjson( z );
- assert.eq( 3 , Object.keySet( z ).length , "MR T7 " + n ) ;
- assert.eq( 2 , z.a , "MR T8 " + n );
- assert.eq( 3 , z.b , "MR T9 " + n );
- assert.eq( 3 , z.c , "MR TA " + n );
-
-};
-
-doMR( "before" );
-
-assert.eq( 1 , s.onNumShards( "mr" ) , "E1" );
-s.shardColl( "mr" , { x : 1 } , { x : 2 } , { x : 3 }, null, true /* waitForDelete */ );
-assert.eq( 2 , s.onNumShards( "mr" ) , "E1" );
-
-doMR( "after" );
-
-s.adminCommand({split:'test.mr' , middle:{x:3}} );
-s.adminCommand({split:'test.mr' , middle:{x:4}} );
-s.adminCommand({movechunk:'test.mr', find:{x:3}, to: s.getPrimaryShard('test').name } );
-
-doMR( "after extra split" );
-
-cmd = { mapreduce : "mr" , map : "emit( " , reduce : "fooz + " , out : "broken1" };
-
-x = db.runCommand( cmd );
-y = s._connections[0].getDB( "test" ).runCommand( cmd );
-
-printjson( x );
-printjson( y );
-
-// count
-
-db.countaa.save({"regex" : /foo/i});
-db.countaa.save({"regex" : /foo/i});
-db.countaa.save({"regex" : /foo/i});
-assert.eq( 3 , db.countaa.count() , "counta1" );
-assert.eq( 3 , db.countaa.find().itcount() , "counta1" );
-
-x = null; y = null;
-try {
- x = db.runCommand( "forceerror" );
-}
-catch ( e ){
- x = e;
-}
-try {
- y = s._connections[0].getDB( "test" ).runCommand( "forceerror" );
-}
-catch ( e ){
- y = e;
-}
-
-// As the forceerror command is written, it doesnt set a code in the reply.
-// OP_COMMAND changes will add a code of 121 (CommandFailed) if a failing command
-// does not set one, so this comparison fails as "undefined" != 121.
-//
-// TODO: Uncomment this line when OP_COMMAND is implemented in mongos (SERVER-18292)
-// as then MongoS should set code 121 as well.
-//
-// assert.eq( x.code , y.code , "assert format" )
-assert.eq( x.errmsg , y.errmsg , "assert format" );
-assert.eq( x.ok , y.ok , "assert format" );
-
-// isMaster and query-wrapped-command
-isMaster = db.runCommand({isMaster:1});
-assert( isMaster.ismaster );
-assert.eq( 'isdbgrid', isMaster.msg );
-delete isMaster.localTime;
-
-im2 = db.runCommand({query: {isMaster:1}});
-delete im2.localTime;
-assert.eq( isMaster, im2 );
-
-im2 = db.runCommand({$query: {isMaster:1}});
-delete im2.localTime;
-assert.eq( isMaster, im2 );
-
-s.stop();
+ try {
+ y = s._connections[0].getDB("test").runCommand("forceerror");
+ } catch (e) {
+ y = e;
+ }
+
+ // As the forceerror command is written, it doesnt set a code in the reply.
+ // OP_COMMAND changes will add a code of 121 (CommandFailed) if a failing command
+ // does not set one, so this comparison fails as "undefined" != 121.
+ //
+ // TODO: Uncomment this line when OP_COMMAND is implemented in mongos (SERVER-18292)
+ // as then MongoS should set code 121 as well.
+ //
+ // assert.eq( x.code , y.code , "assert format" )
+ assert.eq(x.errmsg, y.errmsg, "assert format");
+ assert.eq(x.ok, y.ok, "assert format");
+
+ // isMaster and query-wrapped-command
+ isMaster = db.runCommand({isMaster: 1});
+ assert(isMaster.ismaster);
+ assert.eq('isdbgrid', isMaster.msg);
+ delete isMaster.localTime;
+
+ im2 = db.runCommand({query: {isMaster: 1}});
+ delete im2.localTime;
+ assert.eq(isMaster, im2);
+
+ im2 = db.runCommand({$query: {isMaster: 1}});
+ delete im2.localTime;
+ assert.eq(isMaster, im2);
+
+ s.stop();
})();
diff --git a/jstests/sharding/features3.js b/jstests/sharding/features3.js
index 03a5cedc25a..0697e875d58 100644
--- a/jstests/sharding/features3.js
+++ b/jstests/sharding/features3.js
@@ -6,149 +6,149 @@
// - Verifies a $where query can be killed on multiple DBs
// - Tests fsync and fsync+lock permissions on sharded db
(function() {
-'use strict';
-
-var s = new ShardingTest({shards: 2, mongos: 1 });
-var dbForTest = s.getDB("test");
-dbForTest.foo.drop();
-
-var numDocs = 10000;
-
-// shard test.foo and add a split point
-s.adminCommand({enablesharding: "test"});
-s.ensurePrimaryShard('test', 'shard0001');
-s.adminCommand({shardcollection : "test.foo", key: {_id: 1}});
-s.adminCommand({split : "test.foo", middle: {_id: numDocs/2}});
-
-// move a chunk range to the non-primary shard
-s.adminCommand({moveChunk: "test.foo", find: {_id: 3},
- to: s.getNonPrimaries("test")[0], _waitForDelete: true});
-
-// restart balancer
-s.startBalancer();
-
-// insert 10k small documents into the sharded collection
-var bulk = dbForTest.foo.initializeUnorderedBulkOp();
-for (var i = 0; i < numDocs; i++) {
- bulk.insert({ _id: i });
-}
-assert.writeOK(bulk.execute());
-
-var x = dbForTest.foo.stats();
-
-// verify the colleciton has been sharded and documents are evenly distributed
-assert.eq("test.foo", x.ns, "namespace mismatch");
-assert(x.sharded, "collection is not sharded");
-assert.eq(numDocs, x.count, "total count");
-assert.eq(numDocs / 2, x.shards.shard0000.count, "count on shard0000");
-assert.eq(numDocs / 2, x.shards.shard0001.count, "count on shard0001");
-assert(x.totalIndexSize > 0);
-
-// insert one doc into a non-sharded collection
-dbForTest.bar.insert({x: 1});
-var x = dbForTest.bar.stats();
-assert.eq(1, x.count, "XXX1");
-assert.eq("test.bar", x.ns, "XXX2");
-assert(!x.sharded, "XXX3: " + tojson(x));
-
-// fork shell and start querying the data
-var start = new Date();
-
-var whereKillSleepTime = 1000;
-var parallelCommand =
- "db.foo.find(function() { " +
- " sleep(" + whereKillSleepTime + "); " +
- " return false; " +
- "}).itcount(); ";
-
-// fork a parallel shell, but do not wait for it to start
-print("about to fork new shell at: " + Date());
-var awaitShell = startParallelShell(parallelCommand, s.s.port);
-print("done forking shell at: " + Date());
-
-// Get all current $where operations
-function getInProgWhereOps() {
- var inprog = dbForTest.currentOp().inprog;
-
- // Find all the where queries
- var myProcs = [];
- inprog.forEach(function(op) {
- if (op.query && op.query.filter && op.query.filter.$where) {
- myProcs.push(op);
- }
+ 'use strict';
+
+ var s = new ShardingTest({shards: 2, mongos: 1});
+ var dbForTest = s.getDB("test");
+ dbForTest.foo.drop();
+
+ var numDocs = 10000;
+
+ // shard test.foo and add a split point
+ s.adminCommand({enablesharding: "test"});
+ s.ensurePrimaryShard('test', 'shard0001');
+ s.adminCommand({shardcollection: "test.foo", key: {_id: 1}});
+ s.adminCommand({split: "test.foo", middle: {_id: numDocs / 2}});
+
+ // move a chunk range to the non-primary shard
+ s.adminCommand({
+ moveChunk: "test.foo",
+ find: {_id: 3},
+ to: s.getNonPrimaries("test")[0],
+ _waitForDelete: true
});
- if (myProcs.length == 0) {
- print('No $where operations found: ' + tojson(inprog));
- }
- else {
- print('Found ' + myProcs.length + ' $where operations: ' + tojson(myProcs));
- }
-
- return myProcs;
-}
+ // restart balancer
+ s.startBalancer();
-var curOpState = 0; // 0 = not found, 1 = killed
-var killTime = null;
-var mine;
+ // insert 10k small documents into the sharded collection
+ var bulk = dbForTest.foo.initializeUnorderedBulkOp();
+ for (var i = 0; i < numDocs; i++) {
+ bulk.insert({_id: i});
+ }
+ assert.writeOK(bulk.execute());
+
+ var x = dbForTest.foo.stats();
+
+ // verify the colleciton has been sharded and documents are evenly distributed
+ assert.eq("test.foo", x.ns, "namespace mismatch");
+ assert(x.sharded, "collection is not sharded");
+ assert.eq(numDocs, x.count, "total count");
+ assert.eq(numDocs / 2, x.shards.shard0000.count, "count on shard0000");
+ assert.eq(numDocs / 2, x.shards.shard0001.count, "count on shard0001");
+ assert(x.totalIndexSize > 0);
+
+ // insert one doc into a non-sharded collection
+ dbForTest.bar.insert({x: 1});
+ var x = dbForTest.bar.stats();
+ assert.eq(1, x.count, "XXX1");
+ assert.eq("test.bar", x.ns, "XXX2");
+ assert(!x.sharded, "XXX3: " + tojson(x));
+
+ // fork shell and start querying the data
+ var start = new Date();
+
+ var whereKillSleepTime = 1000;
+ var parallelCommand = "db.foo.find(function() { " + " sleep(" + whereKillSleepTime + "); " +
+ " return false; " + "}).itcount(); ";
+
+ // fork a parallel shell, but do not wait for it to start
+ print("about to fork new shell at: " + Date());
+ var awaitShell = startParallelShell(parallelCommand, s.s.port);
+ print("done forking shell at: " + Date());
+
+ // Get all current $where operations
+ function getInProgWhereOps() {
+ var inprog = dbForTest.currentOp().inprog;
+
+ // Find all the where queries
+ var myProcs = [];
+ inprog.forEach(function(op) {
+ if (op.query && op.query.filter && op.query.filter.$where) {
+ myProcs.push(op);
+ }
+ });
-assert.soon(function() {
- // Get all the current operations
- mine = getInProgWhereOps();
+ if (myProcs.length == 0) {
+ print('No $where operations found: ' + tojson(inprog));
+ } else {
+ print('Found ' + myProcs.length + ' $where operations: ' + tojson(myProcs));
+ }
- // Wait for the queries to start (one per shard, so 2 total)
- if (curOpState == 0 && mine.length == 2) {
- // queries started
- curOpState = 1;
- // kill all $where
- mine.forEach(function(z) {
- printjson(dbForTest.getSisterDB("admin").killOp(z.opid));
- });
- killTime = new Date();
- }
- // Wait for killed queries to end
- else if (curOpState == 1 && mine.length == 0) {
- // Queries ended
- curOpState = 2;
- return true;
+ return myProcs;
}
-}, "Couldn't kill the $where operations.", 2 * 60 * 1000);
+ var curOpState = 0; // 0 = not found, 1 = killed
+ var killTime = null;
+ var mine;
+
+ assert.soon(function() {
+ // Get all the current operations
+ mine = getInProgWhereOps();
+
+ // Wait for the queries to start (one per shard, so 2 total)
+ if (curOpState == 0 && mine.length == 2) {
+ // queries started
+ curOpState = 1;
+ // kill all $where
+ mine.forEach(function(z) {
+ printjson(dbForTest.getSisterDB("admin").killOp(z.opid));
+ });
+ killTime = new Date();
+ }
+ // Wait for killed queries to end
+ else if (curOpState == 1 && mine.length == 0) {
+ // Queries ended
+ curOpState = 2;
+ return true;
+ }
+
+ }, "Couldn't kill the $where operations.", 2 * 60 * 1000);
-print("after loop: " + Date());
-assert(killTime, "timed out waiting too kill last mine:" + tojson(mine));
+ print("after loop: " + Date());
+ assert(killTime, "timed out waiting too kill last mine:" + tojson(mine));
-assert.eq( 2 , curOpState , "failed killing" );
+ assert.eq(2, curOpState, "failed killing");
-killTime = new Date().getTime() - killTime.getTime();
-print("killTime: " + killTime);
-print("time if run full: " + (numDocs * whereKillSleepTime));
-assert.gt(whereKillSleepTime * numDocs / 20, killTime, "took too long to kill");
+ killTime = new Date().getTime() - killTime.getTime();
+ print("killTime: " + killTime);
+ print("time if run full: " + (numDocs * whereKillSleepTime));
+ assert.gt(whereKillSleepTime * numDocs / 20, killTime, "took too long to kill");
-// wait for the parallel shell we spawned to complete
-var exitCode = awaitShell({checkExitSuccess: false});
-assert.neq(0, exitCode, "expected shell to exit abnormally due to JS execution being terminated");
+ // wait for the parallel shell we spawned to complete
+ var exitCode = awaitShell({checkExitSuccess: false});
+ assert.neq(
+ 0, exitCode, "expected shell to exit abnormally due to JS execution being terminated");
-var end = new Date();
-print("elapsed: " + (end.getTime() - start.getTime()));
+ var end = new Date();
+ print("elapsed: " + (end.getTime() - start.getTime()));
-// test fsync command on non-admin db
-x = dbForTest.runCommand("fsync");
-assert(!x.ok , "fsync on non-admin namespace should fail : " + tojson(x));
-assert(x.code == 13,
- "fsync on non-admin succeeded, but should have failed: " + tojson(x));
+ // test fsync command on non-admin db
+ x = dbForTest.runCommand("fsync");
+ assert(!x.ok, "fsync on non-admin namespace should fail : " + tojson(x));
+ assert(x.code == 13, "fsync on non-admin succeeded, but should have failed: " + tojson(x));
-// test fsync on admin db
-x = dbForTest._adminCommand("fsync");
-assert(x.ok == 1, "fsync failed: " + tojson(x));
-if ( x.all.shard0000 > 0 ) {
- assert(x.numFiles > 0, "fsync failed: " + tojson(x));
-}
+ // test fsync on admin db
+ x = dbForTest._adminCommand("fsync");
+ assert(x.ok == 1, "fsync failed: " + tojson(x));
+ if (x.all.shard0000 > 0) {
+ assert(x.numFiles > 0, "fsync failed: " + tojson(x));
+ }
-// test fsync+lock on admin db
-x = dbForTest._adminCommand({"fsync" :1, lock:true});
-assert(!x.ok, "lock should fail: " + tojson(x));
+ // test fsync+lock on admin db
+ x = dbForTest._adminCommand({"fsync": 1, lock: true});
+ assert(!x.ok, "lock should fail: " + tojson(x));
-s.stop();
+ s.stop();
})();
diff --git a/jstests/sharding/find_and_modify_after_multi_write.js b/jstests/sharding/find_and_modify_after_multi_write.js
index c8081ce9119..004fe8d8ead 100644
--- a/jstests/sharding/find_and_modify_after_multi_write.js
+++ b/jstests/sharding/find_and_modify_after_multi_write.js
@@ -1,93 +1,74 @@
(function() {
-"use strict";
-
-/**
- * Test that a targetted findAndModify will be properly routed after executing a write that
- * does not perform any shard version checks.
- */
-var runTest = function(writeFunc) {
- var st = new ShardingTest({ shards: 2, mongos: 2 });
-
- var testDB = st.s.getDB('test');
- testDB.dropDatabase();
-
- assert.commandWorked(testDB.adminCommand({ enableSharding: 'test' }));
- st.ensurePrimaryShard('test', 'shard0000');
-
- assert.commandWorked(testDB.adminCommand({ shardCollection: 'test.user', key: { x: 1 }}));
- assert.commandWorked(testDB.adminCommand({ split: 'test.user', middle: { x: 0 }}));
- assert.commandWorked(testDB.adminCommand({ moveChunk: 'test.user',
- find: { x: 0 },
- to: 'shard0001',
- _waitForDelete: true }));
-
- var testDB2 = st.s1.getDB('test');
- testDB2.user.insert({ x: 123456 });
-
- // Move chunk to bump version on a different mongos.
- assert.commandWorked(testDB.adminCommand({ moveChunk: 'test.user',
- find: { x: 0 },
- to: 'shard0000',
- _waitForDelete: true }));
-
- // Issue a targetted findAndModify and check that it was upserted to the right shard.
- assert.commandWorked(testDB2.runCommand({
- findAndModify: 'user',
- query: { x: 100 },
- update: { $set: { y: 1 }},
- upsert: true
- }));
-
- assert.neq(null, st.d0.getDB('test').user.findOne({ x: 100 }));
- assert.eq(null, st.d1.getDB('test').user.findOne({ x: 100 }));
-
- // At this point, s1 thinks the version of 'test.user' is 2, bounce it again so it gets
- // incremented to 3
- assert.commandWorked(testDB.adminCommand({ moveChunk: 'test.user',
- find: { x: 0 },
- to: 'shard0001',
- _waitForDelete: true }));
-
- assert.commandWorked(testDB2.runCommand({
- findAndModify: 'user',
- query: { x: 200 },
- update: { $set: { y: 1 }},
- upsert: true
- }));
-
- assert.eq(null, st.d0.getDB('test').user.findOne({ x: 200 }));
- assert.neq(null, st.d1.getDB('test').user.findOne({ x: 200 }));
-
- // At this point, s0 thinks the version of 'test.user' is 3, bounce it again so it gets
- // incremented to 4
- assert.commandWorked(testDB.adminCommand({ moveChunk: 'test.user',
- find: { x: 0 },
- to: 'shard0000',
- _waitForDelete: true }));
-
- // Ensure that write commands with multi version do not reset the connection shard version to
- // ignored.
- writeFunc(testDB2);
-
- assert.commandWorked(testDB2.runCommand({
- findAndModify: 'user',
- query: { x: 300 },
- update: { $set: { y: 1 }},
- upsert: true
- }));
-
- assert.neq(null, st.d0.getDB('test').user.findOne({ x: 300 }));
- assert.eq(null, st.d1.getDB('test').user.findOne({ x: 300 }));
-
- st.stop();
-};
-
-runTest(function(db) {
- db.user.update({}, { $inc: { y: 987654 }}, false, true);
-});
-
-runTest(function(db) {
- db.user.remove({ y: 'noMatch' }, false);
-});
+ "use strict";
+
+ /**
+ * Test that a targetted findAndModify will be properly routed after executing a write that
+ * does not perform any shard version checks.
+ */
+ var runTest = function(writeFunc) {
+ var st = new ShardingTest({shards: 2, mongos: 2});
+
+ var testDB = st.s.getDB('test');
+ testDB.dropDatabase();
+
+ assert.commandWorked(testDB.adminCommand({enableSharding: 'test'}));
+ st.ensurePrimaryShard('test', 'shard0000');
+
+ assert.commandWorked(testDB.adminCommand({shardCollection: 'test.user', key: {x: 1}}));
+ assert.commandWorked(testDB.adminCommand({split: 'test.user', middle: {x: 0}}));
+ assert.commandWorked(testDB.adminCommand(
+ {moveChunk: 'test.user', find: {x: 0}, to: 'shard0001', _waitForDelete: true}));
+
+ var testDB2 = st.s1.getDB('test');
+ testDB2.user.insert({x: 123456});
+
+ // Move chunk to bump version on a different mongos.
+ assert.commandWorked(testDB.adminCommand(
+ {moveChunk: 'test.user', find: {x: 0}, to: 'shard0000', _waitForDelete: true}));
+
+ // Issue a targetted findAndModify and check that it was upserted to the right shard.
+ assert.commandWorked(testDB2.runCommand(
+ {findAndModify: 'user', query: {x: 100}, update: {$set: {y: 1}}, upsert: true}));
+
+ assert.neq(null, st.d0.getDB('test').user.findOne({x: 100}));
+ assert.eq(null, st.d1.getDB('test').user.findOne({x: 100}));
+
+ // At this point, s1 thinks the version of 'test.user' is 2, bounce it again so it gets
+ // incremented to 3
+ assert.commandWorked(testDB.adminCommand(
+ {moveChunk: 'test.user', find: {x: 0}, to: 'shard0001', _waitForDelete: true}));
+
+ assert.commandWorked(testDB2.runCommand(
+ {findAndModify: 'user', query: {x: 200}, update: {$set: {y: 1}}, upsert: true}));
+
+ assert.eq(null, st.d0.getDB('test').user.findOne({x: 200}));
+ assert.neq(null, st.d1.getDB('test').user.findOne({x: 200}));
+
+ // At this point, s0 thinks the version of 'test.user' is 3, bounce it again so it gets
+ // incremented to 4
+ assert.commandWorked(testDB.adminCommand(
+ {moveChunk: 'test.user', find: {x: 0}, to: 'shard0000', _waitForDelete: true}));
+
+ // Ensure that write commands with multi version do not reset the connection shard version
+ // to
+ // ignored.
+ writeFunc(testDB2);
+
+ assert.commandWorked(testDB2.runCommand(
+ {findAndModify: 'user', query: {x: 300}, update: {$set: {y: 1}}, upsert: true}));
+
+ assert.neq(null, st.d0.getDB('test').user.findOne({x: 300}));
+ assert.eq(null, st.d1.getDB('test').user.findOne({x: 300}));
+
+ st.stop();
+ };
+
+ runTest(function(db) {
+ db.user.update({}, {$inc: {y: 987654}}, false, true);
+ });
+
+ runTest(function(db) {
+ db.user.remove({y: 'noMatch'}, false);
+ });
})();
diff --git a/jstests/sharding/find_getmore_cmd.js b/jstests/sharding/find_getmore_cmd.js
index f47201825ee..52fc6085507 100644
--- a/jstests/sharding/find_getmore_cmd.js
+++ b/jstests/sharding/find_getmore_cmd.js
@@ -28,11 +28,8 @@
st.ensurePrimaryShard(db.getName(), "shard0000");
db.adminCommand({shardCollection: coll.getFullName(), key: {_id: 1}});
assert.commandWorked(db.adminCommand({split: coll.getFullName(), middle: {_id: 0}}));
- assert.commandWorked(db.adminCommand({
- moveChunk: coll.getFullName(),
- find: {_id: 1},
- to: "shard0001"
- }));
+ assert.commandWorked(
+ db.adminCommand({moveChunk: coll.getFullName(), find: {_id: 1}, to: "shard0001"}));
// Find with no options.
cmdRes = db.runCommand({find: coll.getName()});
@@ -131,11 +128,8 @@
// User projection on $sortKey is illegal.
cmdRes = db.runCommand({find: coll.getName(), projection: {$sortKey: 1}, sort: {_id: 1}});
assert.commandFailed(cmdRes);
- cmdRes = db.runCommand({
- find: coll.getName(),
- projection: {$sortKey: {$meta: 'sortKey'}},
- sort: {_id: 1}
- });
+ cmdRes = db.runCommand(
+ {find: coll.getName(), projection: {$sortKey: {$meta: 'sortKey'}}, sort: {_id: 1}});
assert.commandFailed(cmdRes);
// User should be able to issue a sortKey meta-projection, as long as it's not on the reserved
diff --git a/jstests/sharding/findandmodify1.js b/jstests/sharding/findandmodify1.js
index 14b5786a379..08eb6602370 100644
--- a/jstests/sharding/findandmodify1.js
+++ b/jstests/sharding/findandmodify1.js
@@ -1,64 +1,66 @@
(function() {
-var s = new ShardingTest({ name: "find_and_modify_sharded", shards: 2 });
+ var s = new ShardingTest({name: "find_and_modify_sharded", shards: 2});
-s.adminCommand( { enablesharding : "test" } );
-db = s.getDB( "test" );
-s.ensurePrimaryShard('test', 'shard0001');
-primary = s.getPrimaryShard( "test" ).getDB( "test" );
-secondary = s.getOther( primary ).getDB( "test" );
+ s.adminCommand({enablesharding: "test"});
+ db = s.getDB("test");
+ s.ensurePrimaryShard('test', 'shard0001');
+ primary = s.getPrimaryShard("test").getDB("test");
+ secondary = s.getOther(primary).getDB("test");
-numObjs = 20;
+ numObjs = 20;
-// Turn balancer off - with small numbers of chunks the balancer tries to correct all imbalances, not just < 8
-s.s.getDB( "config" ).settings.update({ _id : "balancer" }, { $set : { stopped : true } }, true );
+ // Turn balancer off - with small numbers of chunks the balancer tries to correct all
+ // imbalances, not just < 8
+ s.s.getDB("config").settings.update({_id: "balancer"}, {$set: {stopped: true}}, true);
-s.adminCommand( { shardcollection : "test.stuff" , key : {_id:1} } );
+ s.adminCommand({shardcollection: "test.stuff", key: {_id: 1}});
-// pre-split the collection so to avoid interference from balancer
-s.adminCommand( { split: "test.stuff" , middle : { _id : numObjs/2 } } );
-s.adminCommand( { movechunk : "test.stuff" , find : { _id : numObjs/2 } , to : secondary.getMongo().name } ) ;
+ // pre-split the collection so to avoid interference from balancer
+ s.adminCommand({split: "test.stuff", middle: {_id: numObjs / 2}});
+ s.adminCommand(
+ {movechunk: "test.stuff", find: {_id: numObjs / 2}, to: secondary.getMongo().name});
-var bulk = db.stuff.initializeUnorderedBulkOp();
-for (var i=0; i < numObjs; i++){
- bulk.insert({_id: i});
-}
-assert.writeOK(bulk.execute());
+ var bulk = db.stuff.initializeUnorderedBulkOp();
+ for (var i = 0; i < numObjs; i++) {
+ bulk.insert({_id: i});
+ }
+ assert.writeOK(bulk.execute());
-// put two docs in each chunk (avoid the split in 0, since there are no docs less than 0)
-for (var i=2; i < numObjs; i+=2){
- if (i == numObjs/2)
- continue;
- s.adminCommand( { split: "test.stuff" , middle : {_id: i} } );
-}
+ // put two docs in each chunk (avoid the split in 0, since there are no docs less than 0)
+ for (var i = 2; i < numObjs; i += 2) {
+ if (i == numObjs / 2)
+ continue;
+ s.adminCommand({split: "test.stuff", middle: {_id: i}});
+ }
-s.printChunks();
-assert.eq( numObjs/2, s.config.chunks.count(), "split failed" );
-assert.eq( numObjs/4, s.config.chunks.count({ shard: "shard0000" }) );
-assert.eq( numObjs/4, s.config.chunks.count({ shard: "shard0001" }) );
+ s.printChunks();
+ assert.eq(numObjs / 2, s.config.chunks.count(), "split failed");
+ assert.eq(numObjs / 4, s.config.chunks.count({shard: "shard0000"}));
+ assert.eq(numObjs / 4, s.config.chunks.count({shard: "shard0001"}));
-// update
-for (var i=0; i < numObjs; i++){
- assert.eq(db.stuff.count({b:1}), i, "2 A");
+ // update
+ for (var i = 0; i < numObjs; i++) {
+ assert.eq(db.stuff.count({b: 1}), i, "2 A");
- var out = db.stuff.findAndModify({query: {_id:i, b:null}, update: {$set: {b:1}}});
- assert.eq(out._id, i, "2 E");
+ var out = db.stuff.findAndModify({query: {_id: i, b: null}, update: {$set: {b: 1}}});
+ assert.eq(out._id, i, "2 E");
- assert.eq(db.stuff.count({b:1}), i+1, "2 B");
-}
+ assert.eq(db.stuff.count({b: 1}), i + 1, "2 B");
+ }
-// remove
-for (var i=0; i < numObjs; i++){
- assert.eq(db.stuff.count(), numObjs - i, "3 A");
- assert.eq(db.stuff.count({_id: i}), 1, "3 B");
+ // remove
+ for (var i = 0; i < numObjs; i++) {
+ assert.eq(db.stuff.count(), numObjs - i, "3 A");
+ assert.eq(db.stuff.count({_id: i}), 1, "3 B");
- var out = db.stuff.findAndModify({remove: true, query: {_id:i}});
+ var out = db.stuff.findAndModify({remove: true, query: {_id: i}});
- assert.eq(db.stuff.count(), numObjs - i - 1, "3 C");
- assert.eq(db.stuff.count({_id: i}), 0, "3 D");
- assert.eq(out._id, i, "3 E");
-}
+ assert.eq(db.stuff.count(), numObjs - i - 1, "3 C");
+ assert.eq(db.stuff.count({_id: i}), 0, "3 D");
+ assert.eq(out._id, i, "3 E");
+ }
-s.stop();
+ s.stop();
})();
diff --git a/jstests/sharding/findandmodify2.js b/jstests/sharding/findandmodify2.js
index 2d0b8c6a7fc..afa727e77b9 100644
--- a/jstests/sharding/findandmodify2.js
+++ b/jstests/sharding/findandmodify2.js
@@ -1,10 +1,11 @@
-var s = new ShardingTest({ name: "find_and_modify_sharded_2", shards: 2, mongos: 1, other: { chunkSize: 1 }});
-s.adminCommand( { enablesharding : "test" } );
+var s = new ShardingTest(
+ {name: "find_and_modify_sharded_2", shards: 2, mongos: 1, other: {chunkSize: 1}});
+s.adminCommand({enablesharding: "test"});
-var db = s.getDB( "test" );
+var db = s.getDB("test");
s.ensurePrimaryShard('test', 'shard0001');
-var primary = s.getPrimaryShard( "test" ).getDB( "test" );
-var secondary = s.getOther( primary ).getDB( "test" );
+var primary = s.getPrimaryShard("test").getDB("test");
+var secondary = s.getOther(primary).getDB("test");
var n = 100;
var collection = "stuff";
@@ -18,7 +19,7 @@ var col_fam_upsert = col_fam + '_upsert';
var big = "x";
print("---------- Creating large payload...");
-for(var i=0;i<15;i++) {
+for (var i = 0; i < 15; i++) {
big += big;
}
print("---------- Done.");
@@ -37,46 +38,39 @@ s.adminCommand({shardcollection: 'test.' + col_fam_upsert, key: {_id: 1}});
// update via findAndModify
function via_fam() {
- for (var i=0; i<n; i++){
- db[col_fam].save({ _id: i });
- }
-
- for (var i=0; i<n; i++){
- db[col_fam].findAndModify({query: {_id: i}, update: { $set:
- { big: big }
- }});
- }
+ for (var i = 0; i < n; i++) {
+ db[col_fam].save({_id: i});
+ }
+
+ for (var i = 0; i < n; i++) {
+ db[col_fam].findAndModify({query: {_id: i}, update: {$set: {big: big}}});
+ }
}
// upsert via findAndModify
function via_fam_upsert() {
- for (var i=0; i<n; i++){
- db[col_fam_upsert].findAndModify({query: {_id: i}, update: { $set:
- { big: big }
- }, upsert: true});
- }
+ for (var i = 0; i < n; i++) {
+ db[col_fam_upsert].findAndModify(
+ {query: {_id: i}, update: {$set: {big: big}}, upsert: true});
+ }
}
// update data using basic update
function via_update() {
- for (var i=0; i<n; i++){
- db[col_update].save({ _id: i });
- }
-
- for (var i=0; i<n; i++){
- db[col_update].update({_id: i}, { $set:
- { big: big }
- });
- }
+ for (var i = 0; i < n; i++) {
+ db[col_update].save({_id: i});
+ }
+
+ for (var i = 0; i < n; i++) {
+ db[col_update].update({_id: i}, {$set: {big: big}});
+ }
}
// upsert data using basic update
function via_update_upsert() {
- for (var i=0; i<n; i++){
- db[col_update_upsert].update({_id: i}, { $set:
- { big: big }
- }, true);
- }
+ for (var i = 0; i < n; i++) {
+ db[col_update_upsert].update({_id: i}, {$set: {big: big}}, true);
+ }
}
print("---------- Update via findAndModify...");
@@ -98,14 +92,21 @@ print("---------- Done.");
print("---------- Printing chunks:");
s.printChunks();
-
print("---------- Verifying that both codepaths resulted in splits...");
-assert.gte( s.config.chunks.count({ "ns": "test." + col_fam }), minChunks, "findAndModify update code path didn't result in splits" );
-assert.gte( s.config.chunks.count({ "ns": "test." + col_fam_upsert }), minChunks, "findAndModify upsert code path didn't result in splits" );
-assert.gte( s.config.chunks.count({ "ns": "test." + col_update }), minChunks, "update code path didn't result in splits" );
-assert.gte( s.config.chunks.count({ "ns": "test." + col_update_upsert }), minChunks, "upsert code path didn't result in splits" );
-
-printjson( db[col_update].stats() );
+assert.gte(s.config.chunks.count({"ns": "test." + col_fam}),
+ minChunks,
+ "findAndModify update code path didn't result in splits");
+assert.gte(s.config.chunks.count({"ns": "test." + col_fam_upsert}),
+ minChunks,
+ "findAndModify upsert code path didn't result in splits");
+assert.gte(s.config.chunks.count({"ns": "test." + col_update}),
+ minChunks,
+ "update code path didn't result in splits");
+assert.gte(s.config.chunks.count({"ns": "test." + col_update_upsert}),
+ minChunks,
+ "upsert code path didn't result in splits");
+
+printjson(db[col_update].stats());
// ensure that all chunks are smaller than chunksize
// make sure not teensy
diff --git a/jstests/sharding/forget_mr_temp_ns.js b/jstests/sharding/forget_mr_temp_ns.js
index d972aa7dbe1..fd950bcf43c 100644
--- a/jstests/sharding/forget_mr_temp_ns.js
+++ b/jstests/sharding/forget_mr_temp_ns.js
@@ -2,44 +2,47 @@
// Tests whether we forget M/R's temporary namespaces for sharded output
//
-var st = new ShardingTest({ shards : 1, mongos : 1 });
+var st = new ShardingTest({shards: 1, mongos: 1});
var mongos = st.s0;
-var admin = mongos.getDB( "admin" );
-var coll = mongos.getCollection( "foo.bar" );
-var outputColl = mongos.getCollection( (coll.getDB() + "") + ".mrOutput" );
+var admin = mongos.getDB("admin");
+var coll = mongos.getCollection("foo.bar");
+var outputColl = mongos.getCollection((coll.getDB() + "") + ".mrOutput");
var bulk = coll.initializeUnorderedBulkOp();
-for ( var i = 0; i < 10; i++ ) {
- bulk.insert({ _id : i, even : (i % 2 == 0) });
+for (var i = 0; i < 10; i++) {
+ bulk.insert({_id: i, even: (i % 2 == 0)});
}
assert.writeOK(bulk.execute());
-var map = function() { emit( this.even, 1 ); };
-var reduce = function( key, values ) { return Array.sum(values); };
+var map = function() {
+ emit(this.even, 1);
+};
+var reduce = function(key, values) {
+ return Array.sum(values);
+};
-out = coll.mapReduce( map, reduce, { out: { reduce : outputColl.getName(), sharded: true } } );
+out = coll.mapReduce(map, reduce, {out: {reduce: outputColl.getName(), sharded: true}});
-printjson( out );
-printjson( outputColl.find().toArray() );
+printjson(out);
+printjson(outputColl.find().toArray());
-var mongodThreadStats = st.shard0.getDB( "admin" ).runCommand({ shardConnPoolStats : 1 }).threads;
-var mongosThreadStats = admin.runCommand({ shardConnPoolStats : 1 }).threads;
+var mongodThreadStats = st.shard0.getDB("admin").runCommand({shardConnPoolStats: 1}).threads;
+var mongosThreadStats = admin.runCommand({shardConnPoolStats: 1}).threads;
-printjson( mongodThreadStats );
-printjson( mongosThreadStats );
+printjson(mongodThreadStats);
+printjson(mongosThreadStats);
-var checkForSeenNS = function( threadStats, regex ) {
- for ( var i = 0; i < threadStats.length; i++ ) {
+var checkForSeenNS = function(threadStats, regex) {
+ for (var i = 0; i < threadStats.length; i++) {
var seenNSes = threadStats[i].seenNS;
- for ( var j = 0; j < seenNSes.length; j++ ) {
- assert( !( regex.test( seenNSes ) ) );
+ for (var j = 0; j < seenNSes.length; j++) {
+ assert(!(regex.test(seenNSes)));
}
}
};
-checkForSeenNS( mongodThreadStats, /^foo.tmp/ );
-checkForSeenNS( mongosThreadStats, /^foo.tmp/ );
+checkForSeenNS(mongodThreadStats, /^foo.tmp/);
+checkForSeenNS(mongosThreadStats, /^foo.tmp/);
st.stop();
-
diff --git a/jstests/sharding/fts_score_sort_sharded.js b/jstests/sharding/fts_score_sort_sharded.js
index daab28ff81e..e6bf01503be 100644
--- a/jstests/sharding/fts_score_sort_sharded.js
+++ b/jstests/sharding/fts_score_sort_sharded.js
@@ -13,29 +13,27 @@ var cursor;
// Pre-split collection: shard 0 takes {_id: {$lt: 0}}, shard 1 takes {_id: {$gte: 0}}.
//
assert.commandWorked(admin.runCommand({enableSharding: coll.getDB().getName()}));
-admin.runCommand({movePrimary: coll.getDB().getName(),
- to: "shard0000"});
-assert.commandWorked(admin.runCommand({shardCollection: coll.getFullName(),
- key: {_id: 1}}));
+admin.runCommand({movePrimary: coll.getDB().getName(), to: "shard0000"});
+assert.commandWorked(admin.runCommand({shardCollection: coll.getFullName(), key: {_id: 1}}));
assert.commandWorked(admin.runCommand({split: coll.getFullName(), middle: {_id: 0}}));
-assert.commandWorked(admin.runCommand({moveChunk: coll.getFullName(),
- find: {_id: 0},
- to: "shard0001"}));
+assert.commandWorked(
+ admin.runCommand({moveChunk: coll.getFullName(), find: {_id: 0}, to: "shard0001"}));
//
// Insert documents into collection and create text index.
//
-assert.writeOK(coll.insert({ _id: 1, a: "pizza" }));
-assert.writeOK(coll.insert({ _id: -1, a: "pizza pizza" }));
-assert.writeOK(coll.insert({ _id: 2, a: "pizza pizza pizza" }));
-assert.writeOK(coll.insert({ _id: -2, a: "pizza pizza pizza pizza"}));
-assert.commandWorked(coll.ensureIndex({ a: "text" }));
+assert.writeOK(coll.insert({_id: 1, a: "pizza"}));
+assert.writeOK(coll.insert({_id: -1, a: "pizza pizza"}));
+assert.writeOK(coll.insert({_id: 2, a: "pizza pizza pizza"}));
+assert.writeOK(coll.insert({_id: -2, a: "pizza pizza pizza pizza"}));
+assert.commandWorked(coll.ensureIndex({a: "text"}));
//
// Execute query with sort on document score, verify results are in correct order.
//
-var results = coll.find({$text: {$search: "pizza"}},
- {s: {$meta: "textScore"}}).sort({s: {$meta: "textScore"}}).toArray();
+var results = coll.find({$text: {$search: "pizza"}}, {s: {$meta: "textScore"}})
+ .sort({s: {$meta: "textScore"}})
+ .toArray();
assert.eq(results.length, 4);
assert.eq(results[0]._id, -2);
assert.eq(results[1]._id, 2);
@@ -48,18 +46,26 @@ assert.eq(results[3]._id, 1);
// Projection not specified at all.
cursor = coll.find({$text: {$search: "pizza"}}).sort({s: {$meta: "textScore"}});
-assert.throws(function() { cursor.next(); });
+assert.throws(function() {
+ cursor.next();
+});
// Projection specified with incorrect field name.
-cursor = coll.find({$text: {$search: "pizza"}},
- {t: {$meta: "textScore"}}).sort({s: {$meta: "textScore"}});
-assert.throws(function() { cursor.next(); });
+cursor = coll.find({$text: {$search: "pizza"}}, {t: {$meta: "textScore"}})
+ .sort({s: {$meta: "textScore"}});
+assert.throws(function() {
+ cursor.next();
+});
// Projection specified on correct field but with wrong sort.
cursor = coll.find({$text: {$search: "pizza"}}, {s: 1}).sort({s: {$meta: "textScore"}});
-assert.throws(function() { cursor.next(); });
+assert.throws(function() {
+ cursor.next();
+});
cursor = coll.find({$text: {$search: "pizza"}}, {s: -1}).sort({s: {$meta: "textScore"}});
-assert.throws(function() { cursor.next(); });
+assert.throws(function() {
+ cursor.next();
+});
// TODO Test sort on compound key.
diff --git a/jstests/sharding/geo_near_random1.js b/jstests/sharding/geo_near_random1.js
index bff63ca2b18..0229c84555c 100644
--- a/jstests/sharding/geo_near_random1.js
+++ b/jstests/sharding/geo_near_random1.js
@@ -1,46 +1,52 @@
// This tests all points using $near
(function() {
-load("jstests/libs/geo_near_random.js");
-
-var testName = "geo_near_random1";
-var s = new ShardingTest({ name: testName, shards: 3 });
-
-db = s.getDB("test"); // global db
-
-var test = new GeoNearRandomTest(testName);
-
-s.adminCommand({enablesharding:'test'});
-s.ensurePrimaryShard('test', 'shard0001');
-s.adminCommand({shardcollection: ('test.' + testName), key: {_id:1} });
-
-test.insertPts(50);
-
-for (var i = (test.nPts/10); i < test.nPts; i+= (test.nPts/10)){
- s.adminCommand({split: ('test.' + testName), middle: {_id: i} });
- try {
- s.adminCommand({ moveChunk: ('test.' + testName), find: { _id: i-1 },
- to: ('shard000' + (i%3)), _waitForDelete: true });
- } catch (e) {
- // ignore this error
- if (! e.message.match(/that chunk is already on that shard/)){
- throw e;
+ load("jstests/libs/geo_near_random.js");
+
+ var testName = "geo_near_random1";
+ var s = new ShardingTest({name: testName, shards: 3});
+
+ db = s.getDB("test"); // global db
+
+ var test = new GeoNearRandomTest(testName);
+
+ s.adminCommand({enablesharding: 'test'});
+ s.ensurePrimaryShard('test', 'shard0001');
+ s.adminCommand({shardcollection: ('test.' + testName), key: {_id: 1}});
+
+ test.insertPts(50);
+
+ for (var i = (test.nPts / 10); i < test.nPts; i += (test.nPts / 10)) {
+ s.adminCommand({split: ('test.' + testName), middle: {_id: i}});
+ try {
+ s.adminCommand({
+ moveChunk: ('test.' + testName),
+ find: {_id: i - 1},
+ to: ('shard000' + (i % 3)),
+ _waitForDelete: true
+ });
+ } catch (e) {
+ // ignore this error
+ if (!e.message.match(/that chunk is already on that shard/)) {
+ throw e;
+ }
}
}
-}
-// Turn balancer back on, for actual tests
-// s.startBalancer() // SERVER-13365
+ // Turn balancer back on, for actual tests
+ // s.startBalancer() // SERVER-13365
-printShardingSizes();
+ printShardingSizes();
-var opts = {sharded: true};
-test.testPt([0,0], opts);
-test.testPt(test.mkPt(), opts);
-test.testPt(test.mkPt(), opts);
-test.testPt(test.mkPt(), opts);
-test.testPt(test.mkPt(), opts);
+ var opts = {
+ sharded: true
+ };
+ test.testPt([0, 0], opts);
+ test.testPt(test.mkPt(), opts);
+ test.testPt(test.mkPt(), opts);
+ test.testPt(test.mkPt(), opts);
+ test.testPt(test.mkPt(), opts);
-s.stop();
+ s.stop();
})();
diff --git a/jstests/sharding/geo_near_random2.js b/jstests/sharding/geo_near_random2.js
index cc566607113..cdf8543274a 100644
--- a/jstests/sharding/geo_near_random2.js
+++ b/jstests/sharding/geo_near_random2.js
@@ -1,50 +1,59 @@
// This tests 1% of all points using $near and $nearSphere
(function() {
-load("jstests/libs/geo_near_random.js");
-
-var testName = "geo_near_random2";
-var s = new ShardingTest({ name: testName, shards: 3 });
-
-db = s.getDB("test"); // global db
-
-var test = new GeoNearRandomTest(testName);
-
-s.adminCommand({enablesharding:'test'});
-s.ensurePrimaryShard('test', 'shard0001');
-s.adminCommand({shardcollection: ('test.' + testName), key: {_id:1} });
-
-test.insertPts(5000);
-
-for (var i = (test.nPts/10); i < test.nPts; i+= (test.nPts/10)){
- s.adminCommand({split: ('test.' + testName), middle: {_id: i} });
- try {
- s.adminCommand({moveChunk: ('test.' + testName), find: {_id: i-1}, to: ('shard000' + (i%3)), _waitForDelete : true });
- } catch (e) {
- // ignore this error
- if (! e.message.match(/that chunk is already on that shard/)){
- throw e;
+ load("jstests/libs/geo_near_random.js");
+
+ var testName = "geo_near_random2";
+ var s = new ShardingTest({name: testName, shards: 3});
+
+ db = s.getDB("test"); // global db
+
+ var test = new GeoNearRandomTest(testName);
+
+ s.adminCommand({enablesharding: 'test'});
+ s.ensurePrimaryShard('test', 'shard0001');
+ s.adminCommand({shardcollection: ('test.' + testName), key: {_id: 1}});
+
+ test.insertPts(5000);
+
+ for (var i = (test.nPts / 10); i < test.nPts; i += (test.nPts / 10)) {
+ s.adminCommand({split: ('test.' + testName), middle: {_id: i}});
+ try {
+ s.adminCommand({
+ moveChunk: ('test.' + testName),
+ find: {_id: i - 1},
+ to: ('shard000' + (i % 3)),
+ _waitForDelete: true
+ });
+ } catch (e) {
+ // ignore this error
+ if (!e.message.match(/that chunk is already on that shard/)) {
+ throw e;
+ }
}
}
-}
-
-//Turn balancer back on, for actual tests
-// s.startBalancer(); // SERVER-13365
-
-opts = {sphere:0, nToTest:test.nPts*0.01, sharded:true};
-test.testPt([0,0], opts);
-test.testPt(test.mkPt(), opts);
-test.testPt(test.mkPt(), opts);
-test.testPt(test.mkPt(), opts);
-test.testPt(test.mkPt(), opts);
-
-opts.sphere = 1;
-test.testPt([0,0], opts);
-test.testPt(test.mkPt(0.8), opts);
-test.testPt(test.mkPt(0.8), opts);
-test.testPt(test.mkPt(0.8), opts);
-test.testPt(test.mkPt(0.8), opts);
-
-s.stop();
+
+ // Turn balancer back on, for actual tests
+ // s.startBalancer(); // SERVER-13365
+
+ opts = {
+ sphere: 0,
+ nToTest: test.nPts * 0.01,
+ sharded: true
+ };
+ test.testPt([0, 0], opts);
+ test.testPt(test.mkPt(), opts);
+ test.testPt(test.mkPt(), opts);
+ test.testPt(test.mkPt(), opts);
+ test.testPt(test.mkPt(), opts);
+
+ opts.sphere = 1;
+ test.testPt([0, 0], opts);
+ test.testPt(test.mkPt(0.8), opts);
+ test.testPt(test.mkPt(0.8), opts);
+ test.testPt(test.mkPt(0.8), opts);
+ test.testPt(test.mkPt(0.8), opts);
+
+ s.stop();
})();
diff --git a/jstests/sharding/geo_shardedgeonear.js b/jstests/sharding/geo_shardedgeonear.js
index d00c6a057a3..54bda17cf16 100644
--- a/jstests/sharding/geo_shardedgeonear.js
+++ b/jstests/sharding/geo_shardedgeonear.js
@@ -9,38 +9,48 @@ function test(db, sharded, indexType) {
if (sharded) {
var shards = [];
var config = shardedDB.getSiblingDB("config");
- config.shards.find().forEach(function(shard) { shards.push(shard._id); });
+ config.shards.find().forEach(function(shard) {
+ shards.push(shard._id);
+ });
- shardedDB.adminCommand({shardCollection: shardedDB[coll].getFullName(), key: {rand:1}});
- for (var i=1; i < 10; i++) {
+ shardedDB.adminCommand({shardCollection: shardedDB[coll].getFullName(), key: {rand: 1}});
+ for (var i = 1; i < 10; i++) {
// split at 0.1, 0.2, ... 0.9
- shardedDB.adminCommand({split: shardedDB[coll].getFullName(), middle: {rand: i/10}});
- shardedDB.adminCommand({moveChunk: shardedDB[coll].getFullName(), find: {rand: i/10},
- to: shards[i%shards.length]});
+ shardedDB.adminCommand({split: shardedDB[coll].getFullName(), middle: {rand: i / 10}});
+ shardedDB.adminCommand({
+ moveChunk: shardedDB[coll].getFullName(),
+ find: {rand: i / 10},
+ to: shards[i % shards.length]
+ });
}
assert.eq(config.chunks.count({'ns': shardedDB[coll].getFullName()}), 10);
}
Random.setRandomSeed();
- var numPts = 10*1000;
- for (var i=0; i < numPts; i++) {
+ var numPts = 10 * 1000;
+ for (var i = 0; i < numPts; i++) {
var lat = 90 - Random.rand() * 180;
var lng = 180 - Random.rand() * 360;
- assert.writeOK(db[coll].insert({rand:Math.random(), loc: [lng, lat]}));
+ assert.writeOK(db[coll].insert({rand: Math.random(), loc: [lng, lat]}));
}
assert.eq(db[coll].count(), numPts);
- assert.commandWorked(db[coll].ensureIndex({ loc: indexType }));
+ assert.commandWorked(db[coll].ensureIndex({loc: indexType}));
- var queryPoint = [0,0];
- geoCmd = {geoNear: coll, near: queryPoint, spherical: true, includeLocs: true};
+ var queryPoint = [0, 0];
+ geoCmd = {
+ geoNear: coll,
+ near: queryPoint,
+ spherical: true,
+ includeLocs: true
+ };
assert.commandWorked(db.runCommand(geoCmd), tojson({sharded: sharded, indexType: indexType}));
}
-var sharded = new ShardingTest({ shards: 3, mongos: 1 });
+var sharded = new ShardingTest({shards: 3, mongos: 1});
sharded.stopBalancer();
-sharded.adminCommand( { enablesharding : "test" } );
+sharded.adminCommand({enablesharding: "test"});
var shardedDB = sharded.getDB('test');
sharded.ensurePrimaryShard('test', 'shard0001');
printjson(shardedDB);
diff --git a/jstests/sharding/group_slaveok.js b/jstests/sharding/group_slaveok.js
index 0c7175f8fed..0d9221ed896 100644
--- a/jstests/sharding/group_slaveok.js
+++ b/jstests/sharding/group_slaveok.js
@@ -1,63 +1,68 @@
// Tests group using slaveOk
(function() {
-'use strict';
+ 'use strict';
-var st = new ShardingTest({ name: "groupSlaveOk",
- shards: 1,
- mongos: 1,
- other: { rs: true,
- rs0: { nodes: 2 } } });
+ var st = new ShardingTest(
+ {name: "groupSlaveOk", shards: 1, mongos: 1, other: {rs: true, rs0: {nodes: 2}}});
-var rst = st._rs[0].test;
+ var rst = st._rs[0].test;
-// Insert data into replica set
-var conn = new Mongo(st.s.host);
-conn.setLogLevel(3);
+ // Insert data into replica set
+ var conn = new Mongo(st.s.host);
+ conn.setLogLevel(3);
-var coll = conn.getCollection("test.groupSlaveOk");
-coll.drop();
+ var coll = conn.getCollection("test.groupSlaveOk");
+ coll.drop();
-var bulk = coll.initializeUnorderedBulkOp();
-for(var i = 0; i < 300; i++) {
- bulk.insert({ i: i % 10 });
-}
-assert.writeOK(bulk.execute());
+ var bulk = coll.initializeUnorderedBulkOp();
+ for (var i = 0; i < 300; i++) {
+ bulk.insert({i: i % 10});
+ }
+ assert.writeOK(bulk.execute());
-// Wait for client to update itself and replication to finish
-rst.awaitReplication();
+ // Wait for client to update itself and replication to finish
+ rst.awaitReplication();
-var primary = rst.getPrimary();
-var sec = rst.getSecondary();
+ var primary = rst.getPrimary();
+ var sec = rst.getSecondary();
-// Data now inserted... stop the master, since only two in set, other will still be secondary
-rst.stop(rst.getPrimary());
-printjson(rst.status());
+ // Data now inserted... stop the master, since only two in set, other will still be secondary
+ rst.stop(rst.getPrimary());
+ printjson(rst.status());
-// Wait for the mongos to recognize the slave
-ReplSetTest.awaitRSClientHosts(conn, sec, { ok: true, secondary: true });
+ // Wait for the mongos to recognize the slave
+ ReplSetTest.awaitRSClientHosts(conn, sec, {ok: true, secondary: true});
-// Need to check slaveOk=true first, since slaveOk=false will destroy conn in pool when
-// master is down
-conn.setSlaveOk();
+ // Need to check slaveOk=true first, since slaveOk=false will destroy conn in pool when
+ // master is down
+ conn.setSlaveOk();
-// Should not throw exception, since slaveOk'd
-assert.eq(10, coll.group({ key: { i: true } ,
- reduce: function(obj, ctx) { ctx.count += 1; },
- initial: { count: 0 } }).length);
+ // Should not throw exception, since slaveOk'd
+ assert.eq(10,
+ coll.group({
+ key: {i: true},
+ reduce: function(obj, ctx) {
+ ctx.count += 1;
+ },
+ initial: {count: 0}
+ }).length);
-try {
- conn.setSlaveOk(false);
- var res = coll.group({ key: { i: true },
- reduce: function(obj, ctx) { ctx.count += 1; },
- initial: { count: 0 } });
+ try {
+ conn.setSlaveOk(false);
+ var res = coll.group({
+ key: {i: true},
+ reduce: function(obj, ctx) {
+ ctx.count += 1;
+ },
+ initial: {count: 0}
+ });
- print("Should not reach here! Group result: " + tojson(res));
- assert(false);
-}
-catch(e){
- print("Non-slaveOk'd connection failed." + tojson(e));
-}
+ print("Should not reach here! Group result: " + tojson(res));
+ assert(false);
+ } catch (e) {
+ print("Non-slaveOk'd connection failed." + tojson(e));
+ }
-st.stop();
+ st.stop();
})();
diff --git a/jstests/sharding/hash_basic.js b/jstests/sharding/hash_basic.js
index d7599488695..e4bf6ded27b 100644
--- a/jstests/sharding/hash_basic.js
+++ b/jstests/sharding/hash_basic.js
@@ -1,25 +1,27 @@
-var st = new ShardingTest({ shards: 2, chunkSize: 1 });
+var st = new ShardingTest({shards: 2, chunkSize: 1});
var testDB = st.s.getDB('test');
-testDB.adminCommand({ enableSharding: 'test' });
+testDB.adminCommand({enableSharding: 'test'});
st.ensurePrimaryShard('test', 'shard0001');
-testDB.adminCommand({ shardCollection: 'test.user', key: { x: 'hashed' }});
+testDB.adminCommand({shardCollection: 'test.user', key: {x: 'hashed'}});
var configDB = st.s.getDB('config');
var chunkCountBefore = configDB.chunks.count();
assert.gt(chunkCountBefore, 1);
for (var x = 0; x < 1000; x++) {
- testDB.user.insert({ x: x });
+ testDB.user.insert({x: x});
}
// For debugging
(function() {
- var chunkList = configDB.chunks.find().sort({ min: -1 }).toArray();
- chunkList.forEach(function(chunk) { chunk.count = 0; });
+ var chunkList = configDB.chunks.find().sort({min: -1}).toArray();
+ chunkList.forEach(function(chunk) {
+ chunk.count = 0;
+ });
for (var x = 0; x < 1000; x++) {
- var hashVal = testDB.adminCommand({ _hashBSONElement: x }).out;
+ var hashVal = testDB.adminCommand({_hashBSONElement: x}).out;
var countSet = false;
for (var y = 0; y < chunkList.length - 2; y++) {
@@ -28,8 +30,7 @@ for (var x = 0; x < 1000; x++) {
countSet = true;
chunkDoc.count++;
- print('doc in chunk: x [' + x + '], h[' + hashVal +
- '], min[' + chunkDoc.min.x +
+ print('doc in chunk: x [' + x + '], h[' + hashVal + '], min[' + chunkDoc.min.x +
'], max[' + chunkDoc.max.x + ']');
break;
}
@@ -37,8 +38,7 @@ for (var x = 0; x < 1000; x++) {
if (!countSet) {
chunkDoc = chunkList[chunkList.length - 1];
- print('doc in chunk: x [' + x + '], h[' + hashVal +
- '], min[' + chunkDoc.min.x +
+ print('doc in chunk: x [' + x + '], h[' + hashVal + '], min[' + chunkDoc.min.x +
'], max[' + chunkDoc.max.x + ']');
chunkDoc.count++;
}
@@ -49,40 +49,40 @@ for (var x = 0; x < 1000; x++) {
});
});
-var chunkDoc = configDB.chunks.find().sort({ min: 1 }).next();
+var chunkDoc = configDB.chunks.find().sort({min: 1}).next();
var min = chunkDoc.min;
var max = chunkDoc.max;
// Assumption: There are documents in the MinKey chunk, otherwise, splitVector will
// fail. Note: This chunk will have 267 documents if collection was presplit to 4.
-var cmdRes = testDB.adminCommand({ split: 'test.user', bounds: [ min, max ]});
-assert(cmdRes.ok, 'split on bounds failed on chunk[' + tojson(chunkDoc) +
- ']: ' + tojson(cmdRes));
+var cmdRes = testDB.adminCommand({split: 'test.user', bounds: [min, max]});
+assert(cmdRes.ok, 'split on bounds failed on chunk[' + tojson(chunkDoc) + ']: ' + tojson(cmdRes));
-chunkDoc = configDB.chunks.find().sort({ min: 1 }).skip(1).next();
+chunkDoc = configDB.chunks.find().sort({min: 1}).skip(1).next();
var middle = chunkDoc.min + 1000000;
-cmdRes = testDB.adminCommand({ split: 'test.user', middle: { x: middle }});
+cmdRes = testDB.adminCommand({split: 'test.user', middle: {x: middle}});
assert(cmdRes.ok, 'split failed with middle [' + middle + ']: ' + tojson(cmdRes));
-cmdRes = testDB.adminCommand({ split: 'test.user', find: { x: 7 }});
+cmdRes = testDB.adminCommand({split: 'test.user', find: {x: 7}});
assert(cmdRes.ok, 'split failed with find: ' + tojson(cmdRes));
-var chunkList = configDB.chunks.find().sort({ min: 1 }).toArray();
+var chunkList = configDB.chunks.find().sort({min: 1}).toArray();
assert.eq(chunkCountBefore + 3, chunkList.length);
chunkList.forEach(function(chunkToMove) {
- var toShard = configDB.shards.findOne({ _id: { $ne: chunkToMove.shard }})._id;
+ var toShard = configDB.shards.findOne({_id: {$ne: chunkToMove.shard}})._id;
- print(jsTestName() + " - moving chunk " + chunkToMove._id + " from shard " +
- chunkToMove.shard + " to " + toShard + "...");
+ print(jsTestName() + " - moving chunk " + chunkToMove._id + " from shard " + chunkToMove.shard +
+ " to " + toShard + "...");
- var cmdRes = testDB.adminCommand({ moveChunk: 'test.user',
- bounds: [ chunkToMove.min, chunkToMove.max ],
- to: toShard, _waitForDelete: true });
- print(jsTestName() + " - result from moving chunk " + chunkToMove._id + ": " +
- tojson(cmdRes));
+ var cmdRes = testDB.adminCommand({
+ moveChunk: 'test.user',
+ bounds: [chunkToMove.min, chunkToMove.max],
+ to: toShard,
+ _waitForDelete: true
+ });
+ print(jsTestName() + " - result from moving chunk " + chunkToMove._id + ": " + tojson(cmdRes));
});
st.stop();
-
diff --git a/jstests/sharding/hash_shard1.js b/jstests/sharding/hash_shard1.js
index dc07f3e20f5..10ab1b1308b 100644
--- a/jstests/sharding/hash_shard1.js
+++ b/jstests/sharding/hash_shard1.js
@@ -1,13 +1,13 @@
// Basic test of sharding with a hashed shard key
// - Test basic migrations with moveChunk, using different chunk specification methods
-var s = new ShardingTest( { name : jsTestName() , shards : 3 , mongos : 1, verbose : 1 } );
+var s = new ShardingTest({name: jsTestName(), shards: 3, mongos: 1, verbose: 1});
var dbname = "test";
var coll = "foo";
var ns = dbname + "." + coll;
-var db = s.getDB( dbname );
-var t = db.getCollection( coll );
-db.adminCommand( { enablesharding : dbname } );
+var db = s.getDB(dbname);
+var t = db.getCollection(coll);
+db.adminCommand({enablesharding: dbname});
s.ensurePrimaryShard(dbname, 'shard0001');
// for simplicity start by turning off balancer
@@ -15,50 +15,46 @@ s.stopBalancer();
// shard a fresh collection using a hashed shard key
t.drop();
-var res = db.adminCommand( { shardcollection : ns , key : { a : "hashed" } } );
-assert.gt( s.config.chunks.count({ns:ns}), 3);
-assert.eq( res.ok , 1 , "shardcollection didn't work" );
+var res = db.adminCommand({shardcollection: ns, key: {a: "hashed"}});
+assert.gt(s.config.chunks.count({ns: ns}), 3);
+assert.eq(res.ok, 1, "shardcollection didn't work");
s.printShardingStatus();
// insert stuff
var numitems = 1000;
-for(i = 0; i < numitems; i++ ){
- t.insert( { a: i } );
+for (i = 0; i < numitems; i++) {
+ t.insert({a: i});
}
// check they all got inserted
-assert.eq( t.find().count() , numitems , "count off after inserts" );
-printjson( t.find().explain() );
+assert.eq(t.find().count(), numitems, "count off after inserts");
+printjson(t.find().explain());
// find a chunk that's not on shard0000
-var chunk = s.config.chunks.findOne( {shard : {$ne : "shard0000"} } );
-assert.neq(chunk, null, "all chunks on shard0000!");
+var chunk = s.config.chunks.findOne({shard: {$ne: "shard0000"}});
+assert.neq(chunk, null, "all chunks on shard0000!");
printjson(chunk);
// try to move the chunk using an invalid specification method. should fail.
-var res = db.adminCommand( { movechunk : ns ,
- find : { a : 0 } ,
- bounds : [ chunk.min , chunk.max ] ,
- to: "shard0000" } );
-assert.eq( res.ok , 0 , "moveChunk shouldn't work with invalid specification method");
+var res = db.adminCommand(
+ {movechunk: ns, find: {a: 0}, bounds: [chunk.min, chunk.max], to: "shard0000"});
+assert.eq(res.ok, 0, "moveChunk shouldn't work with invalid specification method");
// now move a chunk using the lower/upper bound method. should work.
-var res = db.adminCommand( { movechunk : ns ,
- bounds : [ chunk.min , chunk.max ] ,
- to: "shard0000" } );
-printjson( res );
-assert.eq( res.ok , 1 , "movechunk using lower/upper bound method didn't work " );
+var res = db.adminCommand({movechunk: ns, bounds: [chunk.min, chunk.max], to: "shard0000"});
+printjson(res);
+assert.eq(res.ok, 1, "movechunk using lower/upper bound method didn't work ");
// check count still correct.
-assert.eq( t.find().itcount() , numitems , "count off after migrate" );
-printjson( t.find().explain() );
+assert.eq(t.find().itcount(), numitems, "count off after migrate");
+printjson(t.find().explain());
// move a chunk using the find method
-var res = db.adminCommand( { movechunk : ns , find : { a : 2 } , to: "shard0002" } );
-printjson( res );
-assert.eq( res.ok , 1 , "movechunk using find query didn't work" );
+var res = db.adminCommand({movechunk: ns, find: {a: 2}, to: "shard0002"});
+printjson(res);
+assert.eq(res.ok, 1, "movechunk using find query didn't work");
// check counts still correct
-assert.eq( t.find().itcount() , numitems , "count off after migrate" );
-printjson( t.find().explain() );
+assert.eq(t.find().itcount(), numitems, "count off after migrate");
+printjson(t.find().explain());
s.stop();
diff --git a/jstests/sharding/hash_shard_non_empty.js b/jstests/sharding/hash_shard_non_empty.js
index 47e2aa37e1b..35c7572bb75 100644
--- a/jstests/sharding/hash_shard_non_empty.js
+++ b/jstests/sharding/hash_shard_non_empty.js
@@ -1,23 +1,22 @@
// Hash sharding on a non empty collection should not pre-split.
-var s = new ShardingTest({ name : jsTestName(), shards : 3, mongos : 1, verbose : 1 });
+var s = new ShardingTest({name: jsTestName(), shards: 3, mongos: 1, verbose: 1});
var dbname = "test";
var coll = "foo";
var db = s.getDB(dbname);
-db.adminCommand({ enablesharding : dbname });
+db.adminCommand({enablesharding: dbname});
s.ensurePrimaryShard('test', 'shard0001');
-//for simplicity turn off balancer
+// for simplicity turn off balancer
s.stopBalancer();
-db.getCollection(coll).insert({ a : 1 });
+db.getCollection(coll).insert({a: 1});
-db.getCollection(coll).ensureIndex({ a: "hashed"});
-var res = db.adminCommand({ shardcollection : dbname + "." + coll, key : { a : "hashed" } });
+db.getCollection(coll).ensureIndex({a: "hashed"});
+var res = db.adminCommand({shardcollection: dbname + "." + coll, key: {a: "hashed"}});
assert.eq(res.ok, 1, "shardcollection didn't work");
s.printShardingStatus();
var numChunks = s.config.chunks.count();
-assert.eq(numChunks, 1 , "sharding non-empty collection should not pre-split");
+assert.eq(numChunks, 1, "sharding non-empty collection should not pre-split");
s.stop();
-
diff --git a/jstests/sharding/hash_shard_num_chunks.js b/jstests/sharding/hash_shard_num_chunks.js
index ad32a080c70..b34ee7ecc92 100644
--- a/jstests/sharding/hash_shard_num_chunks.js
+++ b/jstests/sharding/hash_shard_num_chunks.js
@@ -2,39 +2,38 @@
(function() {
-var s = new ShardingTest({ shards: 3, mongos: 1 });
-s.stopBalancer();
+ var s = new ShardingTest({shards: 3, mongos: 1});
+ s.stopBalancer();
-var dbname = "test";
-var coll = "foo";
-var db = s.getDB(dbname);
+ var dbname = "test";
+ var coll = "foo";
+ var db = s.getDB(dbname);
-assert.commandWorked(db.adminCommand({ enablesharding: dbname }));
-s.ensurePrimaryShard(dbname, 'shard0001');
+ assert.commandWorked(db.adminCommand({enablesharding: dbname}));
+ s.ensurePrimaryShard(dbname, 'shard0001');
-assert.commandWorked(db.adminCommand({ shardcollection: dbname + "." + coll,
- key: { a: "hashed" },
- numInitialChunks: 500 }));
+ assert.commandWorked(db.adminCommand(
+ {shardcollection: dbname + "." + coll, key: {a: "hashed"}, numInitialChunks: 500}));
-s.printShardingStatus();
+ s.printShardingStatus();
-var numChunks = s.config.chunks.count();
-assert.eq(numChunks, 500 , "should be exactly 500 chunks");
+ var numChunks = s.config.chunks.count();
+ assert.eq(numChunks, 500, "should be exactly 500 chunks");
-var shards = s.config.shards.find();
-shards.forEach(
- // check that each shard has one third the numInitialChunks
- function (shard){
- var numChunksOnShard = s.config.chunks.find({"shard": shard._id}).count();
- assert.gte(numChunksOnShard, Math.floor(500/3));
- }
-);
+ var shards = s.config.shards.find();
+ shards.forEach(
+ // check that each shard has one third the numInitialChunks
+ function(shard) {
+ var numChunksOnShard = s.config.chunks.find({"shard": shard._id}).count();
+ assert.gte(numChunksOnShard, Math.floor(500 / 3));
+ });
-// Check that the collection gets dropped correctly (which doesn't happen if pre-splitting fails to
-// create the collection on all shards).
-res = db.runCommand({ "drop": coll });
-assert.eq(res.ok, 1, "couldn't drop empty, pre-split collection");
+ // Check that the collection gets dropped correctly (which doesn't happen if pre-splitting fails
+ // to
+ // create the collection on all shards).
+ res = db.runCommand({"drop": coll});
+ assert.eq(res.ok, 1, "couldn't drop empty, pre-split collection");
-s.stop();
+ s.stop();
})();
diff --git a/jstests/sharding/hash_shard_unique_compound.js b/jstests/sharding/hash_shard_unique_compound.js
index 25acfb87f48..3d82c2452b5 100644
--- a/jstests/sharding/hash_shard_unique_compound.js
+++ b/jstests/sharding/hash_shard_unique_compound.js
@@ -3,26 +3,26 @@
// 1.) shard collection on hashed "a", ensure unique index {a:1, b:1}
// 2.) reverse order
-var s = new ShardingTest( { name : jsTestName() , shards : 1 , mongos : 1, verbose : 1 } );
+var s = new ShardingTest({name: jsTestName(), shards: 1, mongos: 1, verbose: 1});
var dbName = "test";
var collName = "foo";
var ns = dbName + "." + collName;
-var db = s.getDB( dbName );
-var coll = db.getCollection( collName );
+var db = s.getDB(dbName);
+var coll = db.getCollection(collName);
// Enable sharding on DB
-var res = db.adminCommand( { enablesharding : dbName } );
+var res = db.adminCommand({enablesharding: dbName});
// for simplicity start by turning off balancer
var res = s.stopBalancer();
// shard a fresh collection using a hashed shard key
coll.drop();
-assert.commandWorked(db.adminCommand( { shardcollection : ns , key : { a : "hashed" } } ));
+assert.commandWorked(db.adminCommand({shardcollection: ns, key: {a: "hashed"}}));
s.printShardingStatus();
// Create unique index
-assert.commandWorked(coll.ensureIndex({ a: 1, b: 1 }, { unique: true }));
+assert.commandWorked(coll.ensureIndex({a: 1, b: 1}, {unique: true}));
jsTest.log("------ indexes -------");
jsTest.log(tojson(coll.getIndexes()));
@@ -31,11 +31,11 @@ jsTest.log(tojson(coll.getIndexes()));
jsTest.log("------ dropping sharded collection to start part 2 -------");
coll.drop();
-//Create unique index
-assert.commandWorked(coll.ensureIndex({ a: 1, b: 1 }, { unique: true }));
+// Create unique index
+assert.commandWorked(coll.ensureIndex({a: 1, b: 1}, {unique: true}));
// shard a fresh collection using a hashed shard key
-assert.commandWorked(db.adminCommand( { shardcollection : ns , key : { a : "hashed" } } ),
+assert.commandWorked(db.adminCommand({shardcollection: ns, key: {a: "hashed"}}),
"shardcollection didn't worked 2");
s.printShardingStatus();
diff --git a/jstests/sharding/hash_single_shard.js b/jstests/sharding/hash_single_shard.js
index 7dc9d6df078..8018a1ab640 100644
--- a/jstests/sharding/hash_single_shard.js
+++ b/jstests/sharding/hash_single_shard.js
@@ -1,14 +1,14 @@
// Test hashed presplit with 1 shard.
-var st = new ShardingTest({ shards: 1 });
+var st = new ShardingTest({shards: 1});
var testDB = st.getDB('test');
-//create hashed shard key and enable sharding
-testDB.adminCommand({ enablesharding: "test" });
-testDB.adminCommand({ shardCollection: "test.collection", key: { a: "hashed" }});
+// create hashed shard key and enable sharding
+testDB.adminCommand({enablesharding: "test"});
+testDB.adminCommand({shardCollection: "test.collection", key: {a: "hashed"}});
-//check the number of initial chunks.
-assert.eq(2, st.getDB('config').chunks.count(),
- 'Using hashed shard key but failing to do correct presplitting');
+// check the number of initial chunks.
+assert.eq(2,
+ st.getDB('config').chunks.count(),
+ 'Using hashed shard key but failing to do correct presplitting');
st.stop();
-
diff --git a/jstests/sharding/hash_skey_split.js b/jstests/sharding/hash_skey_split.js
index a1f0060feae..fe8cef3e0d3 100644
--- a/jstests/sharding/hash_skey_split.js
+++ b/jstests/sharding/hash_skey_split.js
@@ -1,26 +1,25 @@
(function() {
-var st = new ShardingTest({ shards: 2 });
+ var st = new ShardingTest({shards: 2});
-var configDB = st.s.getDB('config');
-assert.commandWorked(configDB.adminCommand({ enableSharding: 'test' }));
+ var configDB = st.s.getDB('config');
+ assert.commandWorked(configDB.adminCommand({enableSharding: 'test'}));
-st.ensurePrimaryShard('test', 'shard0001');
-assert.commandWorked(configDB.adminCommand({ shardCollection: 'test.user',
- key: { x: 'hashed' },
- numInitialChunks: 2 }));
+ st.ensurePrimaryShard('test', 'shard0001');
+ assert.commandWorked(configDB.adminCommand(
+ {shardCollection: 'test.user', key: {x: 'hashed'}, numInitialChunks: 2}));
-var metadata = st.d0.getDB('admin').runCommand({ getShardVersion: 'test.user',
- fullMetadata: true });
-var chunks = metadata.metadata.chunks.length > 0 ?
- metadata.metadata.chunks : metadata.metadata.pending;
-assert(bsonWoCompare(chunks[0][0], chunks[0][1]) < 0, tojson(metadata));
+ var metadata =
+ st.d0.getDB('admin').runCommand({getShardVersion: 'test.user', fullMetadata: true});
+ var chunks =
+ metadata.metadata.chunks.length > 0 ? metadata.metadata.chunks : metadata.metadata.pending;
+ assert(bsonWoCompare(chunks[0][0], chunks[0][1]) < 0, tojson(metadata));
-metadata = st.d1.getDB('admin').runCommand({ getShardVersion: 'test.user',
- fullMetadata: true });
-chunks = metadata.metadata.chunks.length > 0 ? metadata.metadata.chunks : metadata.metadata.pending;
-assert(bsonWoCompare(chunks[0][0], chunks[0][1]) < 0, tojson(metadata));
+ metadata = st.d1.getDB('admin').runCommand({getShardVersion: 'test.user', fullMetadata: true});
+ chunks =
+ metadata.metadata.chunks.length > 0 ? metadata.metadata.chunks : metadata.metadata.pending;
+ assert(bsonWoCompare(chunks[0][0], chunks[0][1]) < 0, tojson(metadata));
-st.stop();
+ st.stop();
})();
diff --git a/jstests/sharding/idhack_sharded.js b/jstests/sharding/idhack_sharded.js
index 8c45f5d0f00..a5a8ae5df5c 100644
--- a/jstests/sharding/idhack_sharded.js
+++ b/jstests/sharding/idhack_sharded.js
@@ -10,8 +10,8 @@ assert.commandWorked(coll.getDB().adminCommand({enableSharding: coll.getDB().get
coll.getDB().adminCommand({movePrimary: coll.getDB().getName(), to: "shard0000"});
assert.commandWorked(coll.getDB().adminCommand({shardCollection: coll.getFullName(), key: {x: 1}}));
assert.commandWorked(coll.getDB().adminCommand({split: coll.getFullName(), middle: {x: 0}}));
-assert.commandWorked(coll.getDB().adminCommand({moveChunk: coll.getFullName(), find: {x: 0},
- to: "shard0001"}));
+assert.commandWorked(
+ coll.getDB().adminCommand({moveChunk: coll.getFullName(), find: {x: 0}, to: "shard0001"}));
//
// Test that idhack queries with projections that remove the shard key return correct results.
diff --git a/jstests/sharding/implicit_db_creation.js b/jstests/sharding/implicit_db_creation.js
index 19c48a5f03b..ed202ff2ea5 100644
--- a/jstests/sharding/implicit_db_creation.js
+++ b/jstests/sharding/implicit_db_creation.js
@@ -2,45 +2,45 @@
* This tests the basic cases for implicit database creation in a sharded cluster.
*/
(function() {
-"use strict";
+ "use strict";
-var st = new ShardingTest({ shards: 2 });
-var configDB = st.s.getDB('config');
+ var st = new ShardingTest({shards: 2});
+ var configDB = st.s.getDB('config');
-assert.eq(null, configDB.databases.findOne());
+ assert.eq(null, configDB.databases.findOne());
-var testDB = st.s.getDB('test');
+ var testDB = st.s.getDB('test');
-// Test that reads will not result into a new config.databases entry.
-assert.eq(null, testDB.user.findOne());
-assert.eq(null, configDB.databases.findOne({ _id: 'test' }));
+ // Test that reads will not result into a new config.databases entry.
+ assert.eq(null, testDB.user.findOne());
+ assert.eq(null, configDB.databases.findOne({_id: 'test'}));
-assert.writeOK(testDB.user.insert({ x: 1 }));
+ assert.writeOK(testDB.user.insert({x: 1}));
-var testDBDoc = configDB.databases.findOne();
-assert.eq('test', testDBDoc._id, tojson(testDBDoc));
+ var testDBDoc = configDB.databases.findOne();
+ assert.eq('test', testDBDoc._id, tojson(testDBDoc));
-// Test that inserting to another collection in the same database will not modify the existing
-// config.databases entry.
-assert.writeOK(testDB.bar.insert({ y: 1 }));
-assert.eq(testDBDoc, configDB.databases.findOne());
+ // Test that inserting to another collection in the same database will not modify the existing
+ // config.databases entry.
+ assert.writeOK(testDB.bar.insert({y: 1}));
+ assert.eq(testDBDoc, configDB.databases.findOne());
-st.s.adminCommand({ enableSharding: 'foo' });
-var fooDBDoc = configDB.databases.findOne({ _id: 'foo' });
+ st.s.adminCommand({enableSharding: 'foo'});
+ var fooDBDoc = configDB.databases.findOne({_id: 'foo'});
-assert.neq(null, fooDBDoc);
-assert(fooDBDoc.partitioned);
+ assert.neq(null, fooDBDoc);
+ assert(fooDBDoc.partitioned);
-var newShardConn = MongoRunner.runMongod({});
-var unshardedDB = newShardConn.getDB('unshardedDB');
+ var newShardConn = MongoRunner.runMongod({});
+ var unshardedDB = newShardConn.getDB('unshardedDB');
-unshardedDB.user.insert({ z: 1 });
+ unshardedDB.user.insert({z: 1});
-assert.commandWorked(st.s.adminCommand({ addShard: newShardConn.name }));
+ assert.commandWorked(st.s.adminCommand({addShard: newShardConn.name}));
-assert.neq(null, configDB.databases.findOne({ _id: 'unshardedDB' }));
+ assert.neq(null, configDB.databases.findOne({_id: 'unshardedDB'}));
-MongoRunner.stopMongod(newShardConn.port);
-st.stop();
+ MongoRunner.stopMongod(newShardConn.port);
+ st.stop();
})();
diff --git a/jstests/sharding/in_memory_sort_limit.js b/jstests/sharding/in_memory_sort_limit.js
index 83aa8839986..1c7e8c73447 100644
--- a/jstests/sharding/in_memory_sort_limit.js
+++ b/jstests/sharding/in_memory_sort_limit.js
@@ -2,44 +2,48 @@
// doesn't cause the in-memory sort limit to be reached, then make sure the same limit also doesn't
// cause the in-memory sort limit to be reached when running through a mongos.
(function() {
- "use strict";
-
- var st = new ShardingTest({ shards: 2 });
- var db = st.s.getDB('test');
- var mongosCol = db.getCollection('skip');
- db.adminCommand({ enableSharding: 'test' });
- st.ensurePrimaryShard('test', 'shard0001');
- db.adminCommand({ shardCollection: 'test.skip', key: { _id: 1 }});
-
- var filler = new Array(10000).toString();
- var bulk = [];
- // create enough data to exceed 32MB in-memory sort limit.
- for (var i = 0; i < 20000; i++) {
- bulk.push({x:i, str:filler});
- }
- assert.writeOK(mongosCol.insert(bulk));
-
- // Make sure that at least 1 chunk is on another shard so that mongos doesn't treat this as a
- // single-shard query (which doesn't exercise the bug).
- st.startBalancer();
- st.awaitBalance('skip', 'test');
-
- var docCount = mongosCol.count();
- var shardCol = st.shard0.getDB('test').getCollection('skip');
- var passLimit = 2000;
- var failLimit = 4000;
- jsTestLog("Test no error with limit of " + passLimit + " on mongod");
- assert.eq(passLimit, shardCol.find().sort({x:1}).limit(passLimit).itcount());
-
- jsTestLog("Test error with limit of " + failLimit + " on mongod");
- assert.throws( function() {shardCol.find().sort({x:1}).limit(failLimit).itcount(); } );
-
- jsTestLog("Test no error with limit of " + passLimit + " on mongos");
- assert.eq(passLimit, mongosCol.find().sort({x:1}).limit(passLimit).itcount());
-
- jsTestLog("Test error with limit of " + failLimit + " on mongos");
- assert.throws( function() {mongosCol.find().sort({x:1}).limit(failLimit).itcount(); } );
-
- st.stop();
-
- })();
+ "use strict";
+
+ var st = new ShardingTest({shards: 2});
+ var db = st.s.getDB('test');
+ var mongosCol = db.getCollection('skip');
+ db.adminCommand({enableSharding: 'test'});
+ st.ensurePrimaryShard('test', 'shard0001');
+ db.adminCommand({shardCollection: 'test.skip', key: {_id: 1}});
+
+ var filler = new Array(10000).toString();
+ var bulk = [];
+ // create enough data to exceed 32MB in-memory sort limit.
+ for (var i = 0; i < 20000; i++) {
+ bulk.push({x: i, str: filler});
+ }
+ assert.writeOK(mongosCol.insert(bulk));
+
+ // Make sure that at least 1 chunk is on another shard so that mongos doesn't treat this as a
+ // single-shard query (which doesn't exercise the bug).
+ st.startBalancer();
+ st.awaitBalance('skip', 'test');
+
+ var docCount = mongosCol.count();
+ var shardCol = st.shard0.getDB('test').getCollection('skip');
+ var passLimit = 2000;
+ var failLimit = 4000;
+ jsTestLog("Test no error with limit of " + passLimit + " on mongod");
+ assert.eq(passLimit, shardCol.find().sort({x: 1}).limit(passLimit).itcount());
+
+ jsTestLog("Test error with limit of " + failLimit + " on mongod");
+ assert.throws(function() {
+ shardCol.find().sort({x: 1}).limit(failLimit).itcount();
+ });
+
+ jsTestLog("Test no error with limit of " + passLimit + " on mongos");
+ assert.eq(passLimit, mongosCol.find().sort({x: 1}).limit(passLimit).itcount());
+
+ jsTestLog("Test error with limit of " + failLimit + " on mongos");
+ assert.throws(function() {
+ mongosCol.find().sort({x: 1}).limit(failLimit).itcount();
+ });
+
+ st.stop();
+
+})();
diff --git a/jstests/sharding/index1.js b/jstests/sharding/index1.js
index 7f0cea49cf0..766bd96e260 100644
--- a/jstests/sharding/index1.js
+++ b/jstests/sharding/index1.js
@@ -1,391 +1,359 @@
// SERVER-2326 - make sure that sharding only works with unique indices
(function() {
-var s = new ShardingTest({ name: "shard_index", shards: 2, mongos: 1 });
-
-// Regenerate fully because of SERVER-2782
-for ( var i = 0; i < 22; i++ ) {
-
- var coll = s.admin._mongo.getDB( "test" ).getCollection( "foo" + i );
- coll.drop();
-
- var bulk = coll.initializeUnorderedBulkOp();
- for ( var j = 0; j < 300; j++ ) {
- bulk.insert({ num: j, x: 1 });
- }
- assert.writeOK(bulk.execute());
-
- if (i == 0) {
- s.adminCommand({ enablesharding: "" + coll._db });
- s.ensurePrimaryShard(coll.getDB().getName(), 'shard0001');
- }
-
- print("\n\n\n\n\nTest # " + i);
-
- if ( i == 0 ) {
-
- // Unique index exists, but not the right one.
- coll.ensureIndex( { num : 1 }, { unique : true } );
- coll.ensureIndex( { x : 1 } );
-
- passed = false;
- try {
- s.adminCommand( { shardcollection : "" + coll, key : { x : 1 } } );
- passed = true;
- } catch (e) {
- print( e );
- }
- assert( !passed, "Should not shard collection when another unique index exists!");
-
- }
- if ( i == 1 ) {
-
- // Unique index exists as prefix, also index exists
- coll.ensureIndex( { x : 1 } );
- coll.ensureIndex( { x : 1, num : 1 }, { unique : true } );
-
- try{
- s.adminCommand({ shardcollection : "" + coll, key : { x : 1 } });
- }
- catch(e){
- print(e);
- assert( false, "Should be able to shard non-unique index without unique option.");
- }
-
- }
- if ( i == 2 ) {
- // Non-unique index exists as prefix, also index exists. No unique index.
- coll.ensureIndex( { x : 1 } );
- coll.ensureIndex( { x : 1, num : 1 } );
-
- passed = false;
- try{
- s.adminCommand({ shardcollection : "" + coll, key : { x : 1 } });
- passed = true;
-
- }
- catch( e ){
- print(e);
- assert( !passed, "Should be able to shard collection with no unique index if unique not specified.");
- }
- }
- if ( i == 3 ) {
-
- // Unique index exists as prefix, also unique index exists
- coll.ensureIndex( { num : 1 }, { unique : true });
- coll.ensureIndex( { num : 1 , x : 1 }, { unique : true } );
-
- try{
- s.adminCommand({ shardcollection : "" + coll, key : { num : 1 }, unique : true });
- }
- catch( e ){
- print(e);
- assert( false, "Should be able to shard collection with unique prefix index.");
- }
-
- }
- if ( i == 4 ) {
-
- // Unique index exists as id, also unique prefix index exists
- coll.ensureIndex( { _id : 1, num : 1 }, { unique : true } );
-
- try{
- s.adminCommand({ shardcollection : "" + coll, key : { _id : 1 }, unique : true });
- }
- catch( e ){
- print(e);
- assert( false, "Should be able to shard collection with unique id index.");
- }
-
- }
- if ( i == 5 ) {
-
- // Unique index exists as id, also unique prefix index exists
- coll.ensureIndex( { _id : 1, num : 1 }, { unique : true } );
-
- try{
- s.adminCommand({ shardcollection : "" + coll, key : { _id : 1, num : 1 }, unique : true });
- }
- catch( e ){
- print(e);
- assert( false, "Should be able to shard collection with unique combination id index.");
- }
-
- }
- if ( i == 6 ) {
-
- coll.remove({});
-
- // Unique index does not exist, also unique prefix index exists
- coll.ensureIndex( { num : 1, _id : 1 }, { unique : true } );
-
- try{
- s.adminCommand({ shardcollection : "" + coll, key : { num : 1 }, unique : true });
- }
- catch( e ){
- print(e);
- assert( false, "Should be able to shard collection with no unique index but with a unique prefix index.");
- }
-
- printjson( coll.getIndexes() );
-
- // Make sure the index created is unique!
- assert.eq( 1, coll.getIndexes().filter( function(z) { return friendlyEqual( z.key, { num : 1 } ) && z.unique; } ).length );
-
- }
- if ( i == 7 ) {
- coll.remove({});
-
- // No index exists
-
- try{
- assert.eq( coll.find().itcount(), 0 );
- s.adminCommand({ shardcollection : "" + coll, key : { num : 1 } });
- }
- catch( e ){
- print(e);
- assert( false, "Should be able to shard collection with no index on shard key.");
- }
- }
- if ( i == 8 ) {
- coll.remove({});
-
- // No index exists
-
- passed = false;
- try{
- assert.eq( coll.find().itcount(), 0 );
- s.adminCommand({ shardcollection : "" + coll, key : { num : 1 }, unique : true });
- passed = true;
- }
- catch( e ){
- print(e);
- }
- assert( passed, "Should be able to shard collection with unique flag but with no unique index on shard key, if coll empty.");
-
- printjson( coll.getIndexes() );
-
- // Make sure the index created is unique!
- assert.eq( 1, coll.getIndexes().filter( function(z) { return friendlyEqual( z.key, { num : 1 } ) && z.unique; } ).length );
- }
- if ( i == 9 ) {
-
- // Unique index exists on a different field as well
- coll.ensureIndex( { num : 1 }, { unique : true } );
- coll.ensureIndex( { x : 1 } );
-
- passed = false;
- try {
- s.adminCommand( { shardcollection : "" + coll, key : { x : 1 } } );
- passed = true;
- } catch (e) {
- print( e );
- }
- assert( !passed, "Should not shard collection when another unique index exists!" );
- }
- if ( i == 10 ){
-
- //try sharding non-empty collection without any index
- passed = false;
- try{
- s.adminCommand( { shardcollection : "" + coll, key : { num : 1 } } );
- passed = true;
- }
- catch( e ){
- print(e);
- }
- assert( !passed , "Should not be able to shard without index");
-
- //now add containing index and try sharding by prefix
- coll.ensureIndex( {num : 1, x : 1} );
-
- try{
- s.adminCommand( { shardcollection : "" + coll, key : { num : 1 } } );
- passed = true;
- }
- catch( e ){
- print(e);
- }
- assert( passed , "Should be able to shard collection with prefix of existing index");
-
- printjson( coll.getIndexes() );
-
- //make sure no extra index is created
- assert.eq( 2, coll.getIndexes().length );
- }
- if ( i == 11 ){
- coll.remove({});
-
- //empty collection with useful index. check new index not created
- coll.ensureIndex( {num : 1, x : 1} );
-
- try{
- s.adminCommand( { shardcollection : "" + coll, key : { num : 1 } } );
- passed = true;
- }
- catch( e ){
- print(e);
- }
- assert( passed , "Should be able to shard collection with prefix of existing index");
-
- printjson( coll.getIndexes() );
-
- //make sure no extra index is created
- assert.eq( 2, coll.getIndexes().length );
- }
- if ( i == 12 ){
-
- //check multikey values for x make index unusable for shard key
- coll.save({num : 100 , x : [2,3] });
- coll.ensureIndex( {num : 1, x : 1} );
-
- passed = false;
- try{
- s.adminCommand( { shardcollection : "" + coll, key : { num : 1 } } );
- passed = true;
- }
- catch( e ){
- print(e);
- }
- assert( !passed , "Should not be able to shard collection with mulikey index");
- }
- if ( i == 13 ){
-
- coll.save({ num : [100, 200], x : 10});
- coll.ensureIndex( { num : 1, x : 1} );
-
- passed = false;
- try{
- s.adminCommand( { shardcollection : "" + coll, key : { num : 1 } } );
- passed = true;
- }
- catch( e ){
- print(e);
- }
- assert( !passed , "Should not be able to shard collection with mulikey index");
-
- }
- if ( i == 14 ){
-
- coll.save({ num : 100, x : 10, y : [1,2]});
- coll.ensureIndex( { num : 1, x : 1, y : 1} );
-
- passed = false;
- try{
- s.adminCommand( { shardcollection : "" + coll, key : { num : 1 } } );
- passed = true;
- }
- catch( e ){
- print(e);
- }
- assert( !passed , "Should not be able to shard collection with mulikey index");
-
- }
- if ( i == 15 ) {
-
- // try sharding with a hashed index
- coll.ensureIndex( { num : "hashed"} );
-
- try{
- s.adminCommand( { shardcollection : "" + coll, key : { num : "hashed" } } );
- }
- catch( e ){
- print(e);
- assert( false, "Should be able to shard collection with hashed index.");
- }
- }
- if ( i == 16 ) {
-
- // create hashed index, but try to declare it unique when sharding
- coll.ensureIndex( { num : "hashed"} );
-
- passed = false;
- try{
- s.adminCommand({ shardcollection : "" + coll, key : { num : "hashed" }, unique : true});
- passed = true;
- }
- catch( e ){
- print(e);
- }
- assert( !passed , "Should not be able to declare hashed shard key unique.");
-
- }
- if ( i == 17 ) {
-
- // create hashed index, but unrelated unique index present
- coll.ensureIndex( { x : "hashed" } );
- coll.ensureIndex( { num : 1 }, { unique : true} );
-
- passed = false;
- try {
- s.adminCommand( { shardcollection : "" + coll, key : { x : "hashed" } } );
- passed = true;
- }
- catch (e) {
- print( e );
- }
- assert( !passed, "Should not be able to shard on hashed index with another unique index" );
-
- }
- if ( i == 18 ) {
-
- // create hashed index, and a regular unique index exists on same field
- coll.ensureIndex( { num : "hashed" } );
- coll.ensureIndex( { num : 1 }, { unique : true } );
-
- try{
- s.adminCommand({ shardcollection : "" + coll, key : { num : "hashed" } } );
- }
- catch( e ){
- print(e);
- assert( false, "Should be able to shard coll with hashed and regular unique index");
- }
- }
- if ( i == 19 ) {
- // Create sparse index.
- coll.ensureIndex( { x : 1 }, { sparse : true } );
-
- passed = false;
- try {
- s.adminCommand( { shardcollection : "" + coll, key : { x : 1 } } );
- passed = true;
- }
- catch ( e ) {
- print( e );
- }
- assert( !passed, "Should not be able to shard coll with sparse index" );
- }
- if ( i == 20 ) {
- // Create partial index.
- coll.ensureIndex( { x : 1 }, { filter: { num : { $gt : 1 } } } );
-
- passed = false;
- try {
- s.adminCommand( { shardcollection : "" + coll, key : { x : 1 } } );
- passed = true;
- }
- catch ( e ) {
- print( e );
- }
- assert( !passed, "Should not be able to shard coll with partial index" );
- }
- if ( i == 21 ) {
- // Ensure that a collection with a normal index and a partial index can be sharded, where
- // both are prefixed by the shard key.
-
- coll.ensureIndex( { x : 1, num : 1 }, { filter: { num : { $gt : 1 } } } );
- coll.ensureIndex( { x : 1, num : -1 } );
-
- try {
- s.adminCommand( { shardcollection : "" + coll, key : { x : 1 } } );
- }
- catch ( e ) {
- print( e );
- assert( false, "Should be able to shard coll with regular and partial index");
+ var s = new ShardingTest({name: "shard_index", shards: 2, mongos: 1});
+
+ // Regenerate fully because of SERVER-2782
+ for (var i = 0; i < 22; i++) {
+ var coll = s.admin._mongo.getDB("test").getCollection("foo" + i);
+ coll.drop();
+
+ var bulk = coll.initializeUnorderedBulkOp();
+ for (var j = 0; j < 300; j++) {
+ bulk.insert({num: j, x: 1});
+ }
+ assert.writeOK(bulk.execute());
+
+ if (i == 0) {
+ s.adminCommand({enablesharding: "" + coll._db});
+ s.ensurePrimaryShard(coll.getDB().getName(), 'shard0001');
+ }
+
+ print("\n\n\n\n\nTest # " + i);
+
+ if (i == 0) {
+ // Unique index exists, but not the right one.
+ coll.ensureIndex({num: 1}, {unique: true});
+ coll.ensureIndex({x: 1});
+
+ passed = false;
+ try {
+ s.adminCommand({shardcollection: "" + coll, key: {x: 1}});
+ passed = true;
+ } catch (e) {
+ print(e);
+ }
+ assert(!passed, "Should not shard collection when another unique index exists!");
+ }
+ if (i == 1) {
+ // Unique index exists as prefix, also index exists
+ coll.ensureIndex({x: 1});
+ coll.ensureIndex({x: 1, num: 1}, {unique: true});
+
+ try {
+ s.adminCommand({shardcollection: "" + coll, key: {x: 1}});
+ } catch (e) {
+ print(e);
+ assert(false, "Should be able to shard non-unique index without unique option.");
+ }
+ }
+ if (i == 2) {
+ // Non-unique index exists as prefix, also index exists. No unique index.
+ coll.ensureIndex({x: 1});
+ coll.ensureIndex({x: 1, num: 1});
+
+ passed = false;
+ try {
+ s.adminCommand({shardcollection: "" + coll, key: {x: 1}});
+ passed = true;
+
+ } catch (e) {
+ print(e);
+ assert(
+ !passed,
+ "Should be able to shard collection with no unique index if unique not specified.");
+ }
+ }
+ if (i == 3) {
+ // Unique index exists as prefix, also unique index exists
+ coll.ensureIndex({num: 1}, {unique: true});
+ coll.ensureIndex({num: 1, x: 1}, {unique: true});
+
+ try {
+ s.adminCommand({shardcollection: "" + coll, key: {num: 1}, unique: true});
+ } catch (e) {
+ print(e);
+ assert(false, "Should be able to shard collection with unique prefix index.");
+ }
+ }
+ if (i == 4) {
+ // Unique index exists as id, also unique prefix index exists
+ coll.ensureIndex({_id: 1, num: 1}, {unique: true});
+
+ try {
+ s.adminCommand({shardcollection: "" + coll, key: {_id: 1}, unique: true});
+ } catch (e) {
+ print(e);
+ assert(false, "Should be able to shard collection with unique id index.");
+ }
+ }
+ if (i == 5) {
+ // Unique index exists as id, also unique prefix index exists
+ coll.ensureIndex({_id: 1, num: 1}, {unique: true});
+
+ try {
+ s.adminCommand({shardcollection: "" + coll, key: {_id: 1, num: 1}, unique: true});
+ } catch (e) {
+ print(e);
+ assert(false,
+ "Should be able to shard collection with unique combination id index.");
+ }
+ }
+ if (i == 6) {
+ coll.remove({});
+
+ // Unique index does not exist, also unique prefix index exists
+ coll.ensureIndex({num: 1, _id: 1}, {unique: true});
+
+ try {
+ s.adminCommand({shardcollection: "" + coll, key: {num: 1}, unique: true});
+ } catch (e) {
+ print(e);
+ assert(
+ false,
+ "Should be able to shard collection with no unique index but with a unique prefix index.");
+ }
+
+ printjson(coll.getIndexes());
+
+ // Make sure the index created is unique!
+ assert.eq(1,
+ coll.getIndexes().filter(function(z) {
+ return friendlyEqual(z.key, {num: 1}) && z.unique;
+ }).length);
+ }
+ if (i == 7) {
+ coll.remove({});
+
+ // No index exists
+
+ try {
+ assert.eq(coll.find().itcount(), 0);
+ s.adminCommand({shardcollection: "" + coll, key: {num: 1}});
+ } catch (e) {
+ print(e);
+ assert(false, "Should be able to shard collection with no index on shard key.");
+ }
+ }
+ if (i == 8) {
+ coll.remove({});
+
+ // No index exists
+
+ passed = false;
+ try {
+ assert.eq(coll.find().itcount(), 0);
+ s.adminCommand({shardcollection: "" + coll, key: {num: 1}, unique: true});
+ passed = true;
+ } catch (e) {
+ print(e);
+ }
+ assert(
+ passed,
+ "Should be able to shard collection with unique flag but with no unique index on shard key, if coll empty.");
+
+ printjson(coll.getIndexes());
+
+ // Make sure the index created is unique!
+ assert.eq(1,
+ coll.getIndexes().filter(function(z) {
+ return friendlyEqual(z.key, {num: 1}) && z.unique;
+ }).length);
+ }
+ if (i == 9) {
+ // Unique index exists on a different field as well
+ coll.ensureIndex({num: 1}, {unique: true});
+ coll.ensureIndex({x: 1});
+
+ passed = false;
+ try {
+ s.adminCommand({shardcollection: "" + coll, key: {x: 1}});
+ passed = true;
+ } catch (e) {
+ print(e);
+ }
+ assert(!passed, "Should not shard collection when another unique index exists!");
+ }
+ if (i == 10) {
+ // try sharding non-empty collection without any index
+ passed = false;
+ try {
+ s.adminCommand({shardcollection: "" + coll, key: {num: 1}});
+ passed = true;
+ } catch (e) {
+ print(e);
+ }
+ assert(!passed, "Should not be able to shard without index");
+
+ // now add containing index and try sharding by prefix
+ coll.ensureIndex({num: 1, x: 1});
+
+ try {
+ s.adminCommand({shardcollection: "" + coll, key: {num: 1}});
+ passed = true;
+ } catch (e) {
+ print(e);
+ }
+ assert(passed, "Should be able to shard collection with prefix of existing index");
+
+ printjson(coll.getIndexes());
+
+ // make sure no extra index is created
+ assert.eq(2, coll.getIndexes().length);
+ }
+ if (i == 11) {
+ coll.remove({});
+
+ // empty collection with useful index. check new index not created
+ coll.ensureIndex({num: 1, x: 1});
+
+ try {
+ s.adminCommand({shardcollection: "" + coll, key: {num: 1}});
+ passed = true;
+ } catch (e) {
+ print(e);
+ }
+ assert(passed, "Should be able to shard collection with prefix of existing index");
+
+ printjson(coll.getIndexes());
+
+ // make sure no extra index is created
+ assert.eq(2, coll.getIndexes().length);
+ }
+ if (i == 12) {
+ // check multikey values for x make index unusable for shard key
+ coll.save({num: 100, x: [2, 3]});
+ coll.ensureIndex({num: 1, x: 1});
+
+ passed = false;
+ try {
+ s.adminCommand({shardcollection: "" + coll, key: {num: 1}});
+ passed = true;
+ } catch (e) {
+ print(e);
+ }
+ assert(!passed, "Should not be able to shard collection with mulikey index");
+ }
+ if (i == 13) {
+ coll.save({num: [100, 200], x: 10});
+ coll.ensureIndex({num: 1, x: 1});
+
+ passed = false;
+ try {
+ s.adminCommand({shardcollection: "" + coll, key: {num: 1}});
+ passed = true;
+ } catch (e) {
+ print(e);
+ }
+ assert(!passed, "Should not be able to shard collection with mulikey index");
+ }
+ if (i == 14) {
+ coll.save({num: 100, x: 10, y: [1, 2]});
+ coll.ensureIndex({num: 1, x: 1, y: 1});
+
+ passed = false;
+ try {
+ s.adminCommand({shardcollection: "" + coll, key: {num: 1}});
+ passed = true;
+ } catch (e) {
+ print(e);
+ }
+ assert(!passed, "Should not be able to shard collection with mulikey index");
+ }
+ if (i == 15) {
+ // try sharding with a hashed index
+ coll.ensureIndex({num: "hashed"});
+
+ try {
+ s.adminCommand({shardcollection: "" + coll, key: {num: "hashed"}});
+ } catch (e) {
+ print(e);
+ assert(false, "Should be able to shard collection with hashed index.");
+ }
+ }
+ if (i == 16) {
+ // create hashed index, but try to declare it unique when sharding
+ coll.ensureIndex({num: "hashed"});
+
+ passed = false;
+ try {
+ s.adminCommand({shardcollection: "" + coll, key: {num: "hashed"}, unique: true});
+ passed = true;
+ } catch (e) {
+ print(e);
+ }
+ assert(!passed, "Should not be able to declare hashed shard key unique.");
+ }
+ if (i == 17) {
+ // create hashed index, but unrelated unique index present
+ coll.ensureIndex({x: "hashed"});
+ coll.ensureIndex({num: 1}, {unique: true});
+
+ passed = false;
+ try {
+ s.adminCommand({shardcollection: "" + coll, key: {x: "hashed"}});
+ passed = true;
+ } catch (e) {
+ print(e);
+ }
+ assert(!passed,
+ "Should not be able to shard on hashed index with another unique index");
+ }
+ if (i == 18) {
+ // create hashed index, and a regular unique index exists on same field
+ coll.ensureIndex({num: "hashed"});
+ coll.ensureIndex({num: 1}, {unique: true});
+
+ try {
+ s.adminCommand({shardcollection: "" + coll, key: {num: "hashed"}});
+ } catch (e) {
+ print(e);
+ assert(false, "Should be able to shard coll with hashed and regular unique index");
+ }
+ }
+ if (i == 19) {
+ // Create sparse index.
+ coll.ensureIndex({x: 1}, {sparse: true});
+
+ passed = false;
+ try {
+ s.adminCommand({shardcollection: "" + coll, key: {x: 1}});
+ passed = true;
+ } catch (e) {
+ print(e);
+ }
+ assert(!passed, "Should not be able to shard coll with sparse index");
+ }
+ if (i == 20) {
+ // Create partial index.
+ coll.ensureIndex({x: 1}, {filter: {num: {$gt: 1}}});
+
+ passed = false;
+ try {
+ s.adminCommand({shardcollection: "" + coll, key: {x: 1}});
+ passed = true;
+ } catch (e) {
+ print(e);
+ }
+ assert(!passed, "Should not be able to shard coll with partial index");
+ }
+ if (i == 21) {
+ // Ensure that a collection with a normal index and a partial index can be sharded,
+ // where
+ // both are prefixed by the shard key.
+
+ coll.ensureIndex({x: 1, num: 1}, {filter: {num: {$gt: 1}}});
+ coll.ensureIndex({x: 1, num: -1});
+
+ try {
+ s.adminCommand({shardcollection: "" + coll, key: {x: 1}});
+ } catch (e) {
+ print(e);
+ assert(false, "Should be able to shard coll with regular and partial index");
+ }
}
}
-}
-s.stop();
+ s.stop();
})();
diff --git a/jstests/sharding/inserts_consistent.js b/jstests/sharding/inserts_consistent.js
index 7da92837ad7..b4ee71bbe7a 100644
--- a/jstests/sharding/inserts_consistent.js
+++ b/jstests/sharding/inserts_consistent.js
@@ -1,8 +1,8 @@
// Test write re-routing on version mismatch.
-var st = new ShardingTest({ shards : 2, mongos : 2, verbose : 2 });
+var st = new ShardingTest({shards: 2, mongos: 2, verbose: 2});
-jsTest.log( "Doing test setup..." );
+jsTest.log("Doing test setup...");
// Stop balancer, since it'll just get in the way of this
st.stopBalancer();
@@ -10,75 +10,75 @@ st.stopBalancer();
var mongos = st.s;
var admin = mongos.getDB("admin");
var config = mongos.getDB("config");
-var coll = st.s.getCollection( jsTest.name() + ".coll" );
+var coll = st.s.getCollection(jsTest.name() + ".coll");
-st.shardColl( coll, { _id : 1 }, { _id : 0 }, false );
+st.shardColl(coll, {_id: 1}, {_id: 0}, false);
-jsTest.log( "Refreshing second mongos..." );
+jsTest.log("Refreshing second mongos...");
var mongosB = st.s1;
var adminB = mongosB.getDB("admin");
-var collB = mongosB.getCollection( coll + "" );
+var collB = mongosB.getCollection(coll + "");
// Make sure mongosB knows about the coll
-assert.eq( 0, collB.find().itcount() );
+assert.eq(0, collB.find().itcount());
// printjson( adminB.runCommand({ flushRouterConfig : 1 }) )
-jsTest.log( "Moving chunk to create stale mongos..." );
+jsTest.log("Moving chunk to create stale mongos...");
-var otherShard = config.chunks.findOne({ _id : sh._collRE( coll ) }).shard;
-for( var i = 0; i < st._shardNames.length; i++ ){
- if( otherShard != st._shardNames[i] ){
+var otherShard = config.chunks.findOne({_id: sh._collRE(coll)}).shard;
+for (var i = 0; i < st._shardNames.length; i++) {
+ if (otherShard != st._shardNames[i]) {
otherShard = st._shardNames[i];
break;
}
}
-print( "Other shard : " + otherShard );
+print("Other shard : " + otherShard);
-printjson( admin.runCommand({ moveChunk : coll + "", find : { _id : 0 }, to : otherShard }) );
+printjson(admin.runCommand({moveChunk: coll + "", find: {_id: 0}, to: otherShard}));
-jsTest.log( "Inserting docs that needs to be retried..." );
+jsTest.log("Inserting docs that needs to be retried...");
var nextId = -1;
-for( var i = 0; i < 2; i++ ){
- printjson( "Inserting " + nextId );
- assert.writeOK(collB.insert({ _id : nextId--, hello : "world" }));
+for (var i = 0; i < 2; i++) {
+ printjson("Inserting " + nextId);
+ assert.writeOK(collB.insert({_id: nextId--, hello: "world"}));
}
-jsTest.log( "Inserting doc which successfully goes through..." );
+jsTest.log("Inserting doc which successfully goes through...");
// Do second write
-assert.writeOK(collB.insert({ _id : nextId--, goodbye : "world" }));
+assert.writeOK(collB.insert({_id: nextId--, goodbye: "world"}));
// Assert that write went through
-assert.eq( coll.find().itcount(), 3 );
+assert.eq(coll.find().itcount(), 3);
-jsTest.log( "Now try moving the actual chunk we're writing to..." );
+jsTest.log("Now try moving the actual chunk we're writing to...");
// Now move the actual chunk we're writing to
-printjson( admin.runCommand({ moveChunk : coll + "", find : { _id : -1 }, to : otherShard }) );
+printjson(admin.runCommand({moveChunk: coll + "", find: {_id: -1}, to: otherShard}));
-jsTest.log( "Inserting second docs to get written back..." );
+jsTest.log("Inserting second docs to get written back...");
// Will fail entirely if too many of these, waiting for write to get applied can get too long.
-for( var i = 0; i < 2; i++ ){
- collB.insert({ _id : nextId--, hello : "world" });
+for (var i = 0; i < 2; i++) {
+ collB.insert({_id: nextId--, hello: "world"});
}
// Refresh server
-printjson( adminB.runCommand({ flushRouterConfig : 1 }) );
+printjson(adminB.runCommand({flushRouterConfig: 1}));
-jsTest.log( "Inserting second doc which successfully goes through..." );
+jsTest.log("Inserting second doc which successfully goes through...");
// Do second write
-assert.writeOK(collB.insert({ _id : nextId--, goodbye : "world" }));
+assert.writeOK(collB.insert({_id: nextId--, goodbye: "world"}));
-jsTest.log( "All docs written this time!" );
+jsTest.log("All docs written this time!");
// Assert that writes went through.
-assert.eq( coll.find().itcount(), 6 );
+assert.eq(coll.find().itcount(), 6);
-jsTest.log( "DONE" );
+jsTest.log("DONE");
-st.stop();
+st.stop();
diff --git a/jstests/sharding/ismaster.js b/jstests/sharding/ismaster.js
index 3f6005c4807..b3500cf6009 100644
--- a/jstests/sharding/ismaster.js
+++ b/jstests/sharding/ismaster.js
@@ -1,27 +1,38 @@
-var st = new ShardingTest({shards:1, mongos:1});
+var st = new ShardingTest({shards: 1, mongos: 1});
var res = st.s0.getDB("admin").runCommand("ismaster");
// check that the fields that should be there are there and have proper values
-assert( res.maxBsonObjectSize &&
- isNumber(res.maxBsonObjectSize) &&
- res.maxBsonObjectSize > 0, "maxBsonObjectSize possibly missing:" + tojson(res));
-assert( res.maxMessageSizeBytes &&
- isNumber(res.maxMessageSizeBytes) &&
- res.maxBsonObjectSize > 0, "maxMessageSizeBytes possibly missing:" + tojson(res));
+assert(res.maxBsonObjectSize && isNumber(res.maxBsonObjectSize) && res.maxBsonObjectSize > 0,
+ "maxBsonObjectSize possibly missing:" + tojson(res));
+assert(res.maxMessageSizeBytes && isNumber(res.maxMessageSizeBytes) && res.maxBsonObjectSize > 0,
+ "maxMessageSizeBytes possibly missing:" + tojson(res));
assert(res.ismaster, "ismaster missing or false:" + tojson(res));
assert(res.localTime, "localTime possibly missing:" + tojson(res));
assert(res.msg && res.msg == "isdbgrid", "msg possibly missing or wrong:" + tojson(res));
-var unwantedFields = ["setName", "setVersion", "secondary", "hosts", "passives", "arbiters",
- "primary", "aribterOnly", "passive", "slaveDelay", "hidden", "tags",
- "buildIndexes", "me"];
+var unwantedFields = [
+ "setName",
+ "setVersion",
+ "secondary",
+ "hosts",
+ "passives",
+ "arbiters",
+ "primary",
+ "aribterOnly",
+ "passive",
+ "slaveDelay",
+ "hidden",
+ "tags",
+ "buildIndexes",
+ "me"
+];
// check that the fields that shouldn't be there are not there
var badFields = [];
for (field in res) {
- if (!res.hasOwnProperty(field)){
+ if (!res.hasOwnProperty(field)) {
continue;
}
if (Array.contains(unwantedFields, field)) {
badFields.push(field);
}
}
-assert(badFields.length === 0, "\nthe result:\n" + tojson(res)
- + "\ncontained fields it shouldn't have: " + badFields);
+assert(badFields.length === 0,
+ "\nthe result:\n" + tojson(res) + "\ncontained fields it shouldn't have: " + badFields);
diff --git a/jstests/sharding/jumbo1.js b/jstests/sharding/jumbo1.js
index 22b4004635e..1e8a3a9fdb2 100644
--- a/jstests/sharding/jumbo1.js
+++ b/jstests/sharding/jumbo1.js
@@ -1,57 +1,53 @@
(function() {
-var s = new ShardingTest({ name: "jumbo1",
- shards: 2,
- mongos: 1,
- other: { chunkSize: 1 } });
+ var s = new ShardingTest({name: "jumbo1", shards: 2, mongos: 1, other: {chunkSize: 1}});
-s.adminCommand( { enablesharding : "test" } );
-s.ensurePrimaryShard('test', 'shard0001');
-s.adminCommand( { shardcollection : "test.foo" , key : { x : 1 } } );
+ s.adminCommand({enablesharding: "test"});
+ s.ensurePrimaryShard('test', 'shard0001');
+ s.adminCommand({shardcollection: "test.foo", key: {x: 1}});
-db = s.getDB( "test" );
+ db = s.getDB("test");
-big = "";
-while ( big.length < 10000 )
- big += ".";
+ big = "";
+ while (big.length < 10000)
+ big += ".";
-x = 0;
-var bulk = db.foo.initializeUnorderedBulkOp();
-for ( ; x < 500; x++ )
- bulk.insert( { x : x , big : big } );
+ x = 0;
+ var bulk = db.foo.initializeUnorderedBulkOp();
+ for (; x < 500; x++)
+ bulk.insert({x: x, big: big});
-for ( i=0; i<500; i++ )
- bulk.insert( { x : x , big : big } );
+ for (i = 0; i < 500; i++)
+ bulk.insert({x: x, big: big});
-for ( ; x < 2000; x++ )
- bulk.insert( { x : x , big : big } );
+ for (; x < 2000; x++)
+ bulk.insert({x: x, big: big});
-assert.writeOK( bulk.execute() );
+ assert.writeOK(bulk.execute());
-s.printShardingStatus(true);
-
-res = sh.moveChunk( "test.foo" , { x : 0 } , "shard0001" );
-if ( ! res.ok )
- res = sh.moveChunk( "test.foo" , { x : 0 } , "shard0000" );
+ s.printShardingStatus(true);
-s.printShardingStatus(true);
+ res = sh.moveChunk("test.foo", {x: 0}, "shard0001");
+ if (!res.ok)
+ res = sh.moveChunk("test.foo", {x: 0}, "shard0000");
-sh.setBalancerState( true );
+ s.printShardingStatus(true);
-function diff1(){
- var x = s.chunkCounts( "foo" );
- printjson( x );
- return Math.max( x.shard0000 , x.shard0001 ) - Math.min( x.shard0000 , x.shard0001 );
-}
+ sh.setBalancerState(true);
-assert.soon( function(){
- var d = diff1();
- print( "diff: " + d );
- s.printShardingStatus(true);
- return d < 5;
-} , "balance didn't happen" , 1000 * 60 * 5 , 5000 );
+ function diff1() {
+ var x = s.chunkCounts("foo");
+ printjson(x);
+ return Math.max(x.shard0000, x.shard0001) - Math.min(x.shard0000, x.shard0001);
+ }
+ assert.soon(function() {
+ var d = diff1();
+ print("diff: " + d);
+ s.printShardingStatus(true);
+ return d < 5;
+ }, "balance didn't happen", 1000 * 60 * 5, 5000);
-s.stop();
+ s.stop();
})();
diff --git a/jstests/sharding/key_many.js b/jstests/sharding/key_many.js
index 85cde37ba1d..93ce1b4d64a 100644
--- a/jstests/sharding/key_many.js
+++ b/jstests/sharding/key_many.js
@@ -1,169 +1,254 @@
(function() {
-'use strict';
-
-// Values have to be sorted - you must have exactly 6 values in each array
-var types = [
- { name: "string", values: [ "allan", "bob", "eliot", "joe", "mark", "sara" ], keyfield: "k" },
- { name: "double", values: [ 1.2, 3.5, 4.5, 4.6, 6.7, 9.9 ], keyfield: "a" },
- { name: "date", values: [ new Date(1000000), new Date(2000000), new Date(3000000), new Date(4000000), new Date(5000000), new Date(6000000) ], keyfield: "a" },
- { name: "string_id", values: [ "allan", "bob", "eliot", "joe", "mark", "sara" ], keyfield: "_id" },
- { name: "embedded 1", values: [ "allan", "bob", "eliot", "joe", "mark", "sara" ], keyfield: "a.b" },
- { name: "embedded 2", values: [ "allan", "bob", "eliot", "joe", "mark", "sara" ], keyfield: "a.b.c" },
- { name: "object", values: [ {a:1, b:1.2}, {a:1, b:3.5}, {a:1, b:4.5}, {a:2, b:1.2}, {a:2, b:3.5}, {a:2, b:4.5} ], keyfield: "o" },
- { name: "compound", values: [ {a:1, b:1.2}, {a:1, b:3.5}, {a:1, b:4.5}, {a:2, b:1.2}, {a:2, b:3.5}, {a:2, b:4.5} ], keyfield: "o", compound: true },
- { name: "oid_id", values: [ ObjectId(), ObjectId(), ObjectId(), ObjectId(), ObjectId(), ObjectId() ], keyfield: "_id" },
- { name: "oid_other", values: [ ObjectId(), ObjectId(), ObjectId(), ObjectId(), ObjectId(), ObjectId() ], keyfield: "o" },
+ 'use strict';
+
+ // Values have to be sorted - you must have exactly 6 values in each array
+ var types = [
+ {name: "string", values: ["allan", "bob", "eliot", "joe", "mark", "sara"], keyfield: "k"},
+ {name: "double", values: [1.2, 3.5, 4.5, 4.6, 6.7, 9.9], keyfield: "a"},
+ {
+ name: "date",
+ values: [
+ new Date(1000000),
+ new Date(2000000),
+ new Date(3000000),
+ new Date(4000000),
+ new Date(5000000),
+ new Date(6000000)
+ ],
+ keyfield: "a"
+ },
+ {
+ name: "string_id",
+ values: ["allan", "bob", "eliot", "joe", "mark", "sara"],
+ keyfield: "_id"
+ },
+ {
+ name: "embedded 1",
+ values: ["allan", "bob", "eliot", "joe", "mark", "sara"],
+ keyfield: "a.b"
+ },
+ {
+ name: "embedded 2",
+ values: ["allan", "bob", "eliot", "joe", "mark", "sara"],
+ keyfield: "a.b.c"
+ },
+ {
+ name: "object",
+ values: [
+ {a: 1, b: 1.2},
+ {a: 1, b: 3.5},
+ {a: 1, b: 4.5},
+ {a: 2, b: 1.2},
+ {a: 2, b: 3.5},
+ {a: 2, b: 4.5}
+ ],
+ keyfield: "o"
+ },
+ {
+ name: "compound",
+ values: [
+ {a: 1, b: 1.2},
+ {a: 1, b: 3.5},
+ {a: 1, b: 4.5},
+ {a: 2, b: 1.2},
+ {a: 2, b: 3.5},
+ {a: 2, b: 4.5}
+ ],
+ keyfield: "o",
+ compound: true
+ },
+ {
+ name: "oid_id",
+ values: [ObjectId(), ObjectId(), ObjectId(), ObjectId(), ObjectId(), ObjectId()],
+ keyfield: "_id"
+ },
+ {
+ name: "oid_other",
+ values: [ObjectId(), ObjectId(), ObjectId(), ObjectId(), ObjectId(), ObjectId()],
+ keyfield: "o"
+ },
];
-var s = new ShardingTest({ name: "key_many", shards: 2 });
+ var s = new ShardingTest({name: "key_many", shards: 2});
-assert.commandWorked(s.s0.adminCommand({ enableSharding: 'test' }));
-s.ensurePrimaryShard('test', 'shard0001');
+ assert.commandWorked(s.s0.adminCommand({enableSharding: 'test'}));
+ s.ensurePrimaryShard('test', 'shard0001');
-var db = s.getDB('test');
-var primary = s.getPrimaryShard("test").getDB("test");
-var secondary = s.getOther(primary).getDB("test");
+ var db = s.getDB('test');
+ var primary = s.getPrimaryShard("test").getDB("test");
+ var secondary = s.getOther(primary).getDB("test");
-var curT;
+ var curT;
-function makeObjectDotted(v) {
- var o = {};
- if (curT.compound) {
- var prefix = curT.keyfield + '.';
- if (typeof(v) == 'object') {
- for (var key in v)
- o[prefix + key] = v[key];
+ function makeObjectDotted(v) {
+ var o = {};
+ if (curT.compound) {
+ var prefix = curT.keyfield + '.';
+ if (typeof(v) == 'object') {
+ for (var key in v)
+ o[prefix + key] = v[key];
+ } else {
+ for (var key in curT.values[0])
+ o[prefix + key] = v;
+ }
} else {
- for (var key in curT.values[0])
- o[prefix + key] = v;
+ o[curT.keyfield] = v;
}
- } else {
- o[curT.keyfield] = v;
+ return o;
}
- return o;
-}
-function makeObject(v) {
- var o = {};
- var p = o;
+ function makeObject(v) {
+ var o = {};
+ var p = o;
- var keys = curT.keyfield.split('.');
- for(var i=0; i<keys.length-1; i++) {
- p[keys[i]] = {};
- p = p[keys[i]];
- }
-
- p[keys[i]] = v;
+ var keys = curT.keyfield.split('.');
+ for (var i = 0; i < keys.length - 1; i++) {
+ p[keys[i]] = {};
+ p = p[keys[i]];
+ }
- return o;
-}
+ p[keys[i]] = v;
-function makeInQuery() {
- if (curT.compound) {
- // cheating a bit...
- return {'o.a': { $in: [1, 2] }};
- } else {
- return makeObjectDotted({$in: curT.values});
+ return o;
}
-}
-function getKey(o) {
- var keys = curT.keyfield.split('.');
- for(var i = 0; i < keys.length; i++) {
- o = o[keys[i]];
+ function makeInQuery() {
+ if (curT.compound) {
+ // cheating a bit...
+ return {
+ 'o.a': {$in: [1, 2]}
+ };
+ } else {
+ return makeObjectDotted({$in: curT.values});
+ }
}
- return o;
-}
-
-Random.setRandomSeed();
-for (var i = 0; i < types.length; i++) {
- curT = types[i];
-
- print("\n\n#### Now Testing " + curT.name + " ####\n\n");
-
- var shortName = "foo_" + curT.name;
- var longName = "test." + shortName;
-
- var c = db[shortName];
- s.adminCommand({ shardcollection: longName, key: makeObjectDotted(1) });
-
- assert.eq(1, s.config.chunks.find({ ns: longName }).count(), curT.name + " sanity check A");
-
- var unsorted = Array.shuffle(Object.extend([], curT.values));
- c.insert(makeObject(unsorted[0]));
- for (var x = 1; x < unsorted.length; x++) {
- c.save(makeObject(unsorted[x]));
+ function getKey(o) {
+ var keys = curT.keyfield.split('.');
+ for (var i = 0; i < keys.length; i++) {
+ o = o[keys[i]];
+ }
+ return o;
}
- assert.eq(6, c.find().count(), curT.name + " basic count");
+ Random.setRandomSeed();
- s.adminCommand({ split: longName, middle: makeObjectDotted(curT.values[0]) });
- s.adminCommand({ split: longName, middle: makeObjectDotted(curT.values[2]) });
- s.adminCommand({ split: longName, middle: makeObjectDotted(curT.values[5]) });
+ for (var i = 0; i < types.length; i++) {
+ curT = types[i];
- s.adminCommand({ movechunk: longName,
- find: makeObjectDotted(curT.values[2]),
- to: secondary.getMongo().name,
- _waitForDelete: true });
+ print("\n\n#### Now Testing " + curT.name + " ####\n\n");
- s.printChunks();
+ var shortName = "foo_" + curT.name;
+ var longName = "test." + shortName;
- assert.eq(3, primary[shortName].find().toArray().length, curT.name + " primary count");
- assert.eq(3, secondary[shortName].find().toArray().length, curT.name + " secondary count");
+ var c = db[shortName];
+ s.adminCommand({shardcollection: longName, key: makeObjectDotted(1)});
- assert.eq(6, c.find().toArray().length, curT.name + " total count");
- assert.eq(6, c.find().sort(makeObjectDotted(1)).toArray().length, curT.name + " total count sorted");
+ assert.eq(1, s.config.chunks.find({ns: longName}).count(), curT.name + " sanity check A");
- assert.eq(6, c.find().sort(makeObjectDotted(1)).count(), curT.name + " total count with count()");
-
- assert.eq(2, c.find({$or:[makeObjectDotted(curT.values[2]), makeObjectDotted(curT.values[4])]}).count(), curT.name + " $or count()");
- assert.eq(2, c.find({$or:[makeObjectDotted(curT.values[2]), makeObjectDotted(curT.values[4])]}).itcount(), curT.name + " $or itcount()");
- assert.eq(4, c.find({$nor:[makeObjectDotted(curT.values[2]), makeObjectDotted(curT.values[4])]}).count(), curT.name + " $nor count()");
- assert.eq(4, c.find({$nor:[makeObjectDotted(curT.values[2]), makeObjectDotted(curT.values[4])]}).itcount(), curT.name + " $nor itcount()");
-
- var stats = c.stats();
- printjson(stats);
- assert.eq(6, stats.count, curT.name + " total count with stats()");
+ var unsorted = Array.shuffle(Object.extend([], curT.values));
+ c.insert(makeObject(unsorted[0]));
+ for (var x = 1; x < unsorted.length; x++) {
+ c.save(makeObject(unsorted[x]));
+ }
- var count = 0;
- for (var shard in stats.shards) {
- count += stats.shards[shard].count;
+ assert.eq(6, c.find().count(), curT.name + " basic count");
+
+ s.adminCommand({split: longName, middle: makeObjectDotted(curT.values[0])});
+ s.adminCommand({split: longName, middle: makeObjectDotted(curT.values[2])});
+ s.adminCommand({split: longName, middle: makeObjectDotted(curT.values[5])});
+
+ s.adminCommand({
+ movechunk: longName,
+ find: makeObjectDotted(curT.values[2]),
+ to: secondary.getMongo().name,
+ _waitForDelete: true
+ });
+
+ s.printChunks();
+
+ assert.eq(3, primary[shortName].find().toArray().length, curT.name + " primary count");
+ assert.eq(3, secondary[shortName].find().toArray().length, curT.name + " secondary count");
+
+ assert.eq(6, c.find().toArray().length, curT.name + " total count");
+ assert.eq(6,
+ c.find().sort(makeObjectDotted(1)).toArray().length,
+ curT.name + " total count sorted");
+
+ assert.eq(
+ 6, c.find().sort(makeObjectDotted(1)).count(), curT.name + " total count with count()");
+
+ assert.eq(
+ 2,
+ c.find({$or: [makeObjectDotted(curT.values[2]), makeObjectDotted(curT.values[4])]})
+ .count(),
+ curT.name + " $or count()");
+ assert.eq(
+ 2,
+ c.find({$or: [makeObjectDotted(curT.values[2]), makeObjectDotted(curT.values[4])]})
+ .itcount(),
+ curT.name + " $or itcount()");
+ assert.eq(
+ 4,
+ c.find({$nor: [makeObjectDotted(curT.values[2]), makeObjectDotted(curT.values[4])]})
+ .count(),
+ curT.name + " $nor count()");
+ assert.eq(
+ 4,
+ c.find({$nor: [makeObjectDotted(curT.values[2]), makeObjectDotted(curT.values[4])]})
+ .itcount(),
+ curT.name + " $nor itcount()");
+
+ var stats = c.stats();
+ printjson(stats);
+ assert.eq(6, stats.count, curT.name + " total count with stats()");
+
+ var count = 0;
+ for (var shard in stats.shards) {
+ count += stats.shards[shard].count;
+ }
+ assert.eq(6, count, curT.name + " total count with stats() sum");
+
+ assert.eq(curT.values,
+ c.find().sort(makeObjectDotted(1)).toArray().map(getKey),
+ curT.name + " sort 1");
+ assert.eq(curT.values,
+ c.find(makeInQuery()).sort(makeObjectDotted(1)).toArray().map(getKey),
+ curT.name + " sort 1 - $in");
+ assert.eq(curT.values.reverse(),
+ c.find().sort(makeObjectDotted(-1)).toArray().map(getKey),
+ curT.name + " sort 2");
+
+ assert.eq(0, c.find({xx: 17}).sort({zz: 1}).count(), curT.name + " xx 0a ");
+ assert.eq(0, c.find({xx: 17}).sort(makeObjectDotted(1)).count(), curT.name + " xx 0b ");
+ assert.eq(0, c.find({xx: 17}).count(), curT.name + " xx 0c ");
+ assert.eq(0, c.find({xx: {$exists: true}}).count(), curT.name + " xx 1 ");
+
+ c.update(makeObjectDotted(curT.values[3]), {$set: {xx: 17}});
+ assert.eq(1, c.find({xx: {$exists: true}}).count(), curT.name + " xx 2 ");
+ assert.eq(curT.values[3], getKey(c.findOne({xx: 17})), curT.name + " xx 3 ");
+
+ assert.writeOK(
+ c.update(makeObjectDotted(curT.values[3]), {$set: {xx: 17}}, {upsert: true}));
+
+ assert.commandWorked(c.ensureIndex({_id: 1}, {unique: true}));
+
+ // multi update
+ var mysum = 0;
+ c.find().forEach(function(z) {
+ mysum += z.xx || 0;
+ });
+ assert.eq(17, mysum, curT.name + " multi update pre");
+
+ c.update({}, {$inc: {xx: 1}}, false, true);
+
+ var mysum = 0;
+ c.find().forEach(function(z) {
+ mysum += z.xx || 0;
+ });
+ assert.eq(23, mysum, curT.name + " multi update");
}
- assert.eq(6, count, curT.name + " total count with stats() sum");
-
- assert.eq(curT.values, c.find().sort(makeObjectDotted(1)).toArray().map(getKey), curT.name + " sort 1");
- assert.eq(curT.values, c.find(makeInQuery()).sort(makeObjectDotted(1)).toArray().map(getKey), curT.name + " sort 1 - $in");
- assert.eq(curT.values.reverse(), c.find().sort(makeObjectDotted(-1)).toArray().map(getKey), curT.name + " sort 2");
-
- assert.eq(0, c.find({ xx: 17 }).sort({ zz: 1 }).count(), curT.name + " xx 0a ");
- assert.eq(0, c.find({ xx: 17 }).sort(makeObjectDotted(1)).count(), curT.name + " xx 0b ");
- assert.eq(0, c.find({ xx: 17 }).count(), curT.name + " xx 0c ");
- assert.eq(0, c.find({ xx: { $exists: true } }).count(), curT.name + " xx 1 ");
-
- c.update(makeObjectDotted(curT.values[3]), { $set: { xx: 17 } });
- assert.eq(1, c.find({ xx: { $exists: true } }).count(), curT.name + " xx 2 ");
- assert.eq(curT.values[3], getKey(c.findOne({ xx: 17 })), curT.name + " xx 3 ");
-
- assert.writeOK(
- c.update(makeObjectDotted(curT.values[3]), { $set: { xx: 17 }}, { upsert: true }));
-
- assert.commandWorked(c.ensureIndex({ _id: 1 }, { unique: true }));
-
- // multi update
- var mysum = 0;
- c.find().forEach(function(z) {
- mysum += z.xx || 0;
- });
- assert.eq(17, mysum, curT.name + " multi update pre");
-
- c.update({}, { $inc: { xx: 1 } }, false, true);
-
- var mysum = 0;
- c.find().forEach(function(z) { mysum += z.xx || 0; });
- assert.eq(23, mysum, curT.name + " multi update");
-}
-s.stop();
+ s.stop();
})();
diff --git a/jstests/sharding/key_string.js b/jstests/sharding/key_string.js
index c5b2e88b694..414e056bf1f 100644
--- a/jstests/sharding/key_string.js
+++ b/jstests/sharding/key_string.js
@@ -1,51 +1,71 @@
(function() {
-var s = new ShardingTest({ name: "keystring", shards: 2 });
-
-s.adminCommand( { enablesharding : "test" } );
-s.ensurePrimaryShard('test', 'shard0001');
-s.adminCommand( { shardcollection : "test.foo" , key : { name : 1 } } );
-
-primary = s.getPrimaryShard( "test" ).getDB( "test" );
-seconday = s.getOther( primary ).getDB( "test" );
-
-assert.eq( 1 , s.config.chunks.count() , "sanity check A" );
-
-var db = s.getDB( "test" );
-
-db.foo.save( { name : "eliot" } );
-db.foo.save( { name : "sara" } );
-db.foo.save( { name : "bob" } );
-db.foo.save( { name : "joe" } );
-db.foo.save( { name : "mark" } );
-db.foo.save( { name : "allan" } );
-
-assert.eq( 6 , db.foo.find().count() , "basic count" );
-
-s.adminCommand({ split: "test.foo", middle: { name: "allan" }});
-s.adminCommand({ split: "test.foo", middle: { name: "sara" }});
-s.adminCommand({ split: "test.foo", middle: { name: "eliot" }});
-
-s.adminCommand( { movechunk : "test.foo" , find : { name : "eliot" } , to : seconday.getMongo().name, _waitForDelete : true } );
-
-s.printChunks();
-
-assert.eq( 3 , primary.foo.find().toArray().length , "primary count" );
-assert.eq( 3 , seconday.foo.find().toArray().length , "secondary count" );
-
-assert.eq( 6 , db.foo.find().toArray().length , "total count" );
-assert.eq( 6 , db.foo.find().sort( { name : 1 } ).toArray().length , "total count sorted" );
-
-assert.eq( 6 , db.foo.find().sort( { name : 1 } ).count() , "total count with count()" );
-
-assert.eq( "allan,bob,eliot,joe,mark,sara" , db.foo.find().sort( { name : 1 } ).toArray().map( function(z){ return z.name; } ) , "sort 1" );
-assert.eq( "sara,mark,joe,eliot,bob,allan" , db.foo.find().sort( { name : -1 } ).toArray().map( function(z){ return z.name; } ) , "sort 2" );
-
-// make sure we can't foce a split on an extreme key
-// [allan->joe)
-assert.throws( function(){ s.adminCommand( { split : "test.foo" , middle : { name : "allan" } } ); } );
-assert.throws( function(){ s.adminCommand( { split : "test.foo" , middle : { name : "eliot" } } ); } );
-
-s.stop();
+ var s = new ShardingTest({name: "keystring", shards: 2});
+
+ s.adminCommand({enablesharding: "test"});
+ s.ensurePrimaryShard('test', 'shard0001');
+ s.adminCommand({shardcollection: "test.foo", key: {name: 1}});
+
+ primary = s.getPrimaryShard("test").getDB("test");
+ seconday = s.getOther(primary).getDB("test");
+
+ assert.eq(1, s.config.chunks.count(), "sanity check A");
+
+ var db = s.getDB("test");
+
+ db.foo.save({name: "eliot"});
+ db.foo.save({name: "sara"});
+ db.foo.save({name: "bob"});
+ db.foo.save({name: "joe"});
+ db.foo.save({name: "mark"});
+ db.foo.save({name: "allan"});
+
+ assert.eq(6, db.foo.find().count(), "basic count");
+
+ s.adminCommand({split: "test.foo", middle: {name: "allan"}});
+ s.adminCommand({split: "test.foo", middle: {name: "sara"}});
+ s.adminCommand({split: "test.foo", middle: {name: "eliot"}});
+
+ s.adminCommand({
+ movechunk: "test.foo",
+ find: {name: "eliot"},
+ to: seconday.getMongo().name,
+ _waitForDelete: true
+ });
+
+ s.printChunks();
+
+ assert.eq(3, primary.foo.find().toArray().length, "primary count");
+ assert.eq(3, seconday.foo.find().toArray().length, "secondary count");
+
+ assert.eq(6, db.foo.find().toArray().length, "total count");
+ assert.eq(6, db.foo.find().sort({name: 1}).toArray().length, "total count sorted");
+
+ assert.eq(6, db.foo.find().sort({name: 1}).count(), "total count with count()");
+
+ assert.eq("allan,bob,eliot,joe,mark,sara",
+ db.foo.find().sort({name: 1}).toArray().map(function(z) {
+ return z.name;
+ }),
+ "sort 1");
+ assert.eq("sara,mark,joe,eliot,bob,allan",
+ db.foo.find()
+ .sort({name: -1})
+ .toArray()
+ .map(function(z) {
+ return z.name;
+ }),
+ "sort 2");
+
+ // make sure we can't foce a split on an extreme key
+ // [allan->joe)
+ assert.throws(function() {
+ s.adminCommand({split: "test.foo", middle: {name: "allan"}});
+ });
+ assert.throws(function() {
+ s.adminCommand({split: "test.foo", middle: {name: "eliot"}});
+ });
+
+ s.stop();
})();
diff --git a/jstests/sharding/lagged_config_secondary.js b/jstests/sharding/lagged_config_secondary.js
index a2756ad7e70..5c28f79f24f 100644
--- a/jstests/sharding/lagged_config_secondary.js
+++ b/jstests/sharding/lagged_config_secondary.js
@@ -2,37 +2,36 @@
* Test that mongos times out when the config server replica set only contains nodes that
* are behind the majority opTime.
*/
-(function(){
-var st = new ShardingTest({ shards: 1 });
+(function() {
+ var st = new ShardingTest({shards: 1});
-var configSecondaryList = st.configRS.getSecondaries();
-var configSecondaryToKill = configSecondaryList[0];
-var delayedConfigSecondary = configSecondaryList[1];
+ var configSecondaryList = st.configRS.getSecondaries();
+ var configSecondaryToKill = configSecondaryList[0];
+ var delayedConfigSecondary = configSecondaryList[1];
-delayedConfigSecondary.getDB('admin').adminCommand({ configureFailPoint: 'rsSyncApplyStop',
- mode: 'alwaysOn' });
+ delayedConfigSecondary.getDB('admin')
+ .adminCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'alwaysOn'});
-var testDB = st.s.getDB('test');
-testDB.adminCommand({ enableSharding: 'test' });
-testDB.adminCommand({ shardCollection: 'test.user', key: { _id: 1 }});
+ var testDB = st.s.getDB('test');
+ testDB.adminCommand({enableSharding: 'test'});
+ testDB.adminCommand({shardCollection: 'test.user', key: {_id: 1}});
-testDB.user.insert({ _id: 1 });
+ testDB.user.insert({_id: 1});
-st.configRS.stopMaster();
-MongoRunner.stopMongod(configSecondaryToKill.port);
+ st.configRS.stopMaster();
+ MongoRunner.stopMongod(configSecondaryToKill.port);
-// Clears all cached info so mongos will be forced to query from the config.
-st.s.adminCommand({ flushRouterConfig: 1 });
+ // Clears all cached info so mongos will be forced to query from the config.
+ st.s.adminCommand({flushRouterConfig: 1});
-var exception = assert.throws(function() {
- testDB.user.findOne();
-});
+ var exception = assert.throws(function() {
+ testDB.user.findOne();
+ });
-assert.eq(ErrorCodes.ExceededTimeLimit, exception.code);
+ assert.eq(ErrorCodes.ExceededTimeLimit, exception.code);
-var msg = 'Command on database config timed out waiting for read concern to be satisfied.';
-assert.soon(
- function() {
+ var msg = 'Command on database config timed out waiting for read concern to be satisfied.';
+ assert.soon(function() {
var logMessages =
assert.commandWorked(delayedConfigSecondary.adminCommand({getLog: 'global'})).log;
for (var i = 0; i < logMessages.length; i++) {
@@ -41,12 +40,8 @@ assert.soon(
}
}
return false;
- },
- 'Did not see any log entries containing the following message: ' + msg,
- 60000,
- 300
-);
+ }, 'Did not see any log entries containing the following message: ' + msg, 60000, 300);
-st.stop();
+ st.stop();
}());
diff --git a/jstests/sharding/large_chunk.js b/jstests/sharding/large_chunk.js
index ae02733395b..3318142ecac 100644
--- a/jstests/sharding/large_chunk.js
+++ b/jstests/sharding/large_chunk.js
@@ -1,65 +1,66 @@
// Where we test operations dealing with large chunks
(function() {
-// Starts a new sharding environment limiting the chunksize to 1GB (highest value allowed).
-// Note that early splitting will start with a 1/4 of max size currently.
-var s = new ShardingTest({ name: 'large_chunk',
- shards: 2,
- other: { chunkSize: 1024 } });
+ // Starts a new sharding environment limiting the chunksize to 1GB (highest value allowed).
+ // Note that early splitting will start with a 1/4 of max size currently.
+ var s = new ShardingTest({name: 'large_chunk', shards: 2, other: {chunkSize: 1024}});
-// take the balancer out of the equation
-s.config.settings.update( { _id: "balancer" }, { $set : { stopped: true } } , true );
-s.config.settings.find().forEach(printjson);
+ // take the balancer out of the equation
+ s.config.settings.update({_id: "balancer"}, {$set: {stopped: true}}, true);
+ s.config.settings.find().forEach(printjson);
-db = s.getDB( "test" );
+ db = s.getDB("test");
-//
-// Step 1 - Test moving a large chunk
-//
+ //
+ // Step 1 - Test moving a large chunk
+ //
-// Turn on sharding on the 'test.foo' collection and generate a large chunk
-s.adminCommand( { enablesharding : "test" } );
-s.ensurePrimaryShard('test', 'shard0001');
+ // Turn on sharding on the 'test.foo' collection and generate a large chunk
+ s.adminCommand({enablesharding: "test"});
+ s.ensurePrimaryShard('test', 'shard0001');
-bigString = "";
-while ( bigString.length < 10000 )
- bigString += "asdasdasdasdadasdasdasdasdasdasdasdasda";
+ bigString = "";
+ while (bigString.length < 10000)
+ bigString += "asdasdasdasdadasdasdasdasdasdasdasdasda";
-inserted = 0;
-num = 0;
-var bulk = db.foo.initializeUnorderedBulkOp();
-while ( inserted < ( 400 * 1024 * 1024 ) ){
- bulk.insert({ _id: num++, s: bigString });
- inserted += bigString.length;
-}
-assert.writeOK(bulk.execute());
+ inserted = 0;
+ num = 0;
+ var bulk = db.foo.initializeUnorderedBulkOp();
+ while (inserted < (400 * 1024 * 1024)) {
+ bulk.insert({_id: num++, s: bigString});
+ inserted += bigString.length;
+ }
+ assert.writeOK(bulk.execute());
-s.adminCommand( { shardcollection : "test.foo" , key : { _id : 1 } } );
+ s.adminCommand({shardcollection: "test.foo", key: {_id: 1}});
-assert.eq( 1 , s.config.chunks.count() , "step 1 - need one large chunk" );
+ assert.eq(1, s.config.chunks.count(), "step 1 - need one large chunk");
-primary = s.getPrimaryShard( "test" ).getDB( "test" );
-secondary = s.getOther( primary ).getDB( "test" );
+ primary = s.getPrimaryShard("test").getDB("test");
+ secondary = s.getOther(primary).getDB("test");
-// Make sure that we don't move that chunk if it goes past what we consider the maximum chunk size
-print("Checkpoint 1a");
-max = 200 * 1024 * 1024;
-assert.throws(function() {
- s.adminCommand({ movechunk: "test.foo",
- find: { _id: 1 },
- to: secondary.getMongo().name,
- maxChunkSizeBytes: max });
+ // Make sure that we don't move that chunk if it goes past what we consider the maximum chunk
+ // size
+ print("Checkpoint 1a");
+ max = 200 * 1024 * 1024;
+ assert.throws(function() {
+ s.adminCommand({
+ movechunk: "test.foo",
+ find: {_id: 1},
+ to: secondary.getMongo().name,
+ maxChunkSizeBytes: max
+ });
});
-// Move the chunk
-print("checkpoint 1b");
-before = s.config.chunks.find().toArray();
-s.adminCommand( { movechunk : "test.foo" , find : { _id : 1 } , to : secondary.getMongo().name } );
-after = s.config.chunks.find().toArray();
-assert.neq( before[0].shard , after[0].shard , "move chunk did not work" );
+ // Move the chunk
+ print("checkpoint 1b");
+ before = s.config.chunks.find().toArray();
+ s.adminCommand({movechunk: "test.foo", find: {_id: 1}, to: secondary.getMongo().name});
+ after = s.config.chunks.find().toArray();
+ assert.neq(before[0].shard, after[0].shard, "move chunk did not work");
-s.config.changelog.find().forEach( printjson );
+ s.config.changelog.find().forEach(printjson);
-s.stop();
+ s.stop();
})();
diff --git a/jstests/sharding/large_skip_one_shard.js b/jstests/sharding/large_skip_one_shard.js
index 49e6551dec0..99c73eb99b3 100644
--- a/jstests/sharding/large_skip_one_shard.js
+++ b/jstests/sharding/large_skip_one_shard.js
@@ -1,35 +1,30 @@
/**
* Tests that a sharded query targeted to a single shard will use passed-in skip.
*/
-var st = new ShardingTest({ shards : 2, mongos : 1});
+var st = new ShardingTest({shards: 2, mongos: 1});
var mongos = st.s0;
-var shards = mongos.getDB( "config" ).shards.find().toArray();
+var shards = mongos.getDB("config").shards.find().toArray();
-var admin = mongos.getDB( "admin" );
-var collSharded = mongos.getCollection( "testdb.collSharded" );
-var collUnSharded = mongos.getCollection( "testdb.collUnSharded" );
+var admin = mongos.getDB("admin");
+var collSharded = mongos.getCollection("testdb.collSharded");
+var collUnSharded = mongos.getCollection("testdb.collUnSharded");
// Set up a sharded and unsharded collection
-assert( admin.runCommand({ enableSharding : collSharded.getDB() + "" }).ok );
-printjson( admin.runCommand({ movePrimary : collSharded.getDB() + "", to : shards[0]._id }) );
-assert( admin.runCommand({ shardCollection : collSharded + "", key : { _id : 1 } }).ok );
-assert( admin.runCommand({ split : collSharded + "", middle : { _id : 0 } }).ok );
-assert( admin.runCommand({ moveChunk : collSharded + "",
- find : { _id : 0 },
- to : shards[1]._id }).ok );
-
-function testSelectWithSkip(coll){
+assert(admin.runCommand({enableSharding: collSharded.getDB() + ""}).ok);
+printjson(admin.runCommand({movePrimary: collSharded.getDB() + "", to: shards[0]._id}));
+assert(admin.runCommand({shardCollection: collSharded + "", key: {_id: 1}}).ok);
+assert(admin.runCommand({split: collSharded + "", middle: {_id: 0}}).ok);
+assert(admin.runCommand({moveChunk: collSharded + "", find: {_id: 0}, to: shards[1]._id}).ok);
+function testSelectWithSkip(coll) {
for (var i = -100; i < 100; i++) {
- assert.writeOK(coll.insert({ _id : i }));
+ assert.writeOK(coll.insert({_id: i}));
}
// Run a query which only requires 5 results from a single shard
- var explain = coll.find({ _id : { $gt : 1 }}).sort({ _id : 1 })
- .skip(90)
- .limit(5)
- .explain("executionStats");
+ var explain =
+ coll.find({_id: {$gt: 1}}).sort({_id: 1}).skip(90).limit(5).explain("executionStats");
assert.lt(explain.executionStats.nReturned, 90);
}
diff --git a/jstests/sharding/limit_push.js b/jstests/sharding/limit_push.js
index 5e7f1c32ab3..055b5c8b788 100644
--- a/jstests/sharding/limit_push.js
+++ b/jstests/sharding/limit_push.js
@@ -2,53 +2,61 @@
// See: http://jira.mongodb.org/browse/SERVER-1896
(function() {
-var s = new ShardingTest({ name: "limit_push", shards: 2, mongos: 1 });
-var db = s.getDB( "test" );
-
-// Create some data
-for (i=0; i < 100; i++) { db.limit_push.insert({ _id : i, x: i}); }
-db.limit_push.ensureIndex( { x : 1 } );
-assert.eq( 100 , db.limit_push.find().length() , "Incorrect number of documents" );
-
-// Shard the collection
-s.adminCommand( { enablesharding : "test" } );
-s.ensurePrimaryShard('test', 'shard0001');
-s.adminCommand( { shardcollection : "test.limit_push" , key : { x : 1 } } );
-
-// Now split the and move the data between the shards
-s.adminCommand( { split : "test.limit_push", middle : { x : 50 }} );
-s.adminCommand( { moveChunk: "test.limit_push",
- find : { x : 51},
- to : s.getOther( s.getPrimaryShard( "test" ) ).name,
- _waitForDelete : true });
-
-// Check that the chunck have split correctly
-assert.eq( 2 , s.config.chunks.count() , "wrong number of chunks");
-
-// The query is asking for the maximum value below a given value
-// db.limit_push.find( { x : { $lt : 60} } ).sort( { x:-1} ).limit(1)
-q = { x : { $lt : 60} };
-
-// Make sure the basic queries are correct
-assert.eq( 60 , db.limit_push.find( q ).count() , "Did not find 60 documents" );
-//rs = db.limit_push.find( q ).sort( { x:-1} ).limit(1)
-//assert.eq( rs , { _id : "1" , x : 59 } , "Did not find document with value 59" );
-
-// Now make sure that the explain shos that each shard is returning a single document as indicated
-// by the "n" element for each shard
-exp = db.limit_push.find( q ).sort( { x:-1} ).limit(1).explain("executionStats");
-printjson( exp );
-
-var execStages = exp.executionStats.executionStages;
-assert.eq("SHARD_MERGE_SORT", execStages.stage, "Expected SHARD_MERGE_SORT as root stage");
-
-var k = 0;
-for (var j in execStages.shards) {
- assert.eq( 1 , execStages.shards[j].executionStages.nReturned,
- "'n' is not 1 from shard000" + k.toString());
- k++;
-}
-
-s.stop();
+ var s = new ShardingTest({name: "limit_push", shards: 2, mongos: 1});
+ var db = s.getDB("test");
+
+ // Create some data
+ for (i = 0; i < 100; i++) {
+ db.limit_push.insert({_id: i, x: i});
+ }
+ db.limit_push.ensureIndex({x: 1});
+ assert.eq(100, db.limit_push.find().length(), "Incorrect number of documents");
+
+ // Shard the collection
+ s.adminCommand({enablesharding: "test"});
+ s.ensurePrimaryShard('test', 'shard0001');
+ s.adminCommand({shardcollection: "test.limit_push", key: {x: 1}});
+
+ // Now split the and move the data between the shards
+ s.adminCommand({split: "test.limit_push", middle: {x: 50}});
+ s.adminCommand({
+ moveChunk: "test.limit_push",
+ find: {x: 51},
+ to: s.getOther(s.getPrimaryShard("test")).name,
+ _waitForDelete: true
+ });
+
+ // Check that the chunck have split correctly
+ assert.eq(2, s.config.chunks.count(), "wrong number of chunks");
+
+ // The query is asking for the maximum value below a given value
+ // db.limit_push.find( { x : { $lt : 60} } ).sort( { x:-1} ).limit(1)
+ q = {
+ x: {$lt: 60}
+ };
+
+ // Make sure the basic queries are correct
+ assert.eq(60, db.limit_push.find(q).count(), "Did not find 60 documents");
+ // rs = db.limit_push.find( q ).sort( { x:-1} ).limit(1)
+ // assert.eq( rs , { _id : "1" , x : 59 } , "Did not find document with value 59" );
+
+ // Now make sure that the explain shos that each shard is returning a single document as
+ // indicated
+ // by the "n" element for each shard
+ exp = db.limit_push.find(q).sort({x: -1}).limit(1).explain("executionStats");
+ printjson(exp);
+
+ var execStages = exp.executionStats.executionStages;
+ assert.eq("SHARD_MERGE_SORT", execStages.stage, "Expected SHARD_MERGE_SORT as root stage");
+
+ var k = 0;
+ for (var j in execStages.shards) {
+ assert.eq(1,
+ execStages.shards[j].executionStages.nReturned,
+ "'n' is not 1 from shard000" + k.toString());
+ k++;
+ }
+
+ s.stop();
})();
diff --git a/jstests/sharding/listDatabases.js b/jstests/sharding/listDatabases.js
index 240081d642d..f6281a2b025 100644
--- a/jstests/sharding/listDatabases.js
+++ b/jstests/sharding/listDatabases.js
@@ -1,13 +1,13 @@
// tests that listDatabases doesn't show config db on a shard, even if it is there
-var test = new ShardingTest({shards: 1, mongos: 1, other: {chunksize:1}});
+var test = new ShardingTest({shards: 1, mongos: 1, other: {chunksize: 1}});
var mongos = test.s0;
var mongod = test.shard0;
-//grab the config db instance by name
-var getDBSection = function (dbsArray, dbToFind) {
- for(var pos in dbsArray) {
+// grab the config db instance by name
+var getDBSection = function(dbsArray, dbToFind) {
+ for (var pos in dbsArray) {
if (dbsArray[pos].name && dbsArray[pos].name === dbToFind)
return dbsArray[pos];
}
@@ -16,16 +16,16 @@ var getDBSection = function (dbsArray, dbToFind) {
var dbInConfigEntryCheck = function(dbEntry) {
assert.neq(null, dbEntry);
- assert(!dbEntry.shards); // db should not be in shard.
+ assert(!dbEntry.shards); // db should not be in shard.
assert.neq(null, dbEntry.sizeOnDisk);
assert.eq(false, dbEntry.empty);
};
-assert.writeOK(mongos.getDB("blah").foo.insert({ _id: 1 }));
-assert.writeOK(mongos.getDB("foo").foo.insert({ _id: 1 }));
-assert.writeOK(mongos.getDB("raw").foo.insert({ _id: 1 }));
+assert.writeOK(mongos.getDB("blah").foo.insert({_id: 1}));
+assert.writeOK(mongos.getDB("foo").foo.insert({_id: 1}));
+assert.writeOK(mongos.getDB("raw").foo.insert({_id: 1}));
-//verify that the config db is not on a shard
+// verify that the config db is not on a shard
var res = mongos.adminCommand("listDatabases");
var dbArray = res.databases;
dbInConfigEntryCheck(getDBSection(dbArray, "config"));
@@ -39,26 +39,26 @@ var localSection = getDBSection(dbArray, 'local');
assert(!localSection);
// add doc in admin db on the config server.
-assert.writeOK(mongos.getDB('admin').test.insert({ _id: 1 }));
+assert.writeOK(mongos.getDB('admin').test.insert({_id: 1}));
res = mongos.adminCommand("listDatabases");
dbArray = res.databases;
dbInConfigEntryCheck(getDBSection(dbArray, "config"));
dbInConfigEntryCheck(getDBSection(dbArray, 'admin'));
-//add doc in config/admin db on the shard
-mongod.getDB("config").foo.insert({_id:1});
-mongod.getDB("admin").foo.insert({_id:1});
+// add doc in config/admin db on the shard
+mongod.getDB("config").foo.insert({_id: 1});
+mongod.getDB("admin").foo.insert({_id: 1});
-//add doc in admin db (via mongos)
-mongos.getDB("admin").foo.insert({_id:1});
+// add doc in admin db (via mongos)
+mongos.getDB("admin").foo.insert({_id: 1});
-//verify that the config db is not on a shard
+// verify that the config db is not on a shard
res = mongos.adminCommand("listDatabases");
dbArray = res.databases;
-//check config db
+// check config db
assert(getDBSection(dbArray, "config"), "config db not found! 2");
assert(!getDBSection(dbArray, "config").shards, "config db is on a shard! 2");
-//check admin db
+// check admin db
assert(getDBSection(dbArray, "admin"), "admin db not found! 2");
assert(!getDBSection(dbArray, "admin").shards, "admin db is on a shard! 2");
diff --git a/jstests/sharding/listshards.js b/jstests/sharding/listshards.js
index b4c87eda7ab..77ecb53695d 100644
--- a/jstests/sharding/listshards.js
+++ b/jstests/sharding/listshards.js
@@ -2,69 +2,67 @@
// Test the listShards command by adding stand-alone and replica-set shards to a cluster
//
(function() {
-'use strict';
+ 'use strict';
-function checkShardName(shardName, shardsArray) {
- var found = false;
- shardsArray.forEach(function(shardObj) {
- if (shardObj._id === shardName) {
- found = true;
- return;
- }
- });
- return found;
-}
+ function checkShardName(shardName, shardsArray) {
+ var found = false;
+ shardsArray.forEach(function(shardObj) {
+ if (shardObj._id === shardName) {
+ found = true;
+ return;
+ }
+ });
+ return found;
+ }
-var shardTest = new ShardingTest({ name: 'listShardsTest',
- shards: 1,
- mongos: 1,
- other: { useHostname: true } });
+ var shardTest = new ShardingTest(
+ {name: 'listShardsTest', shards: 1, mongos: 1, other: {useHostname: true}});
-var mongos = shardTest.s0;
-var res = mongos.adminCommand('listShards');
-assert.commandWorked(res, 'listShards command failed');
-var shardsArray = res.shards;
-assert.eq(shardsArray.length, 1);
+ var mongos = shardTest.s0;
+ var res = mongos.adminCommand('listShards');
+ assert.commandWorked(res, 'listShards command failed');
+ var shardsArray = res.shards;
+ assert.eq(shardsArray.length, 1);
-// add standalone mongod
-var standaloneShard = MongoRunner.runMongod({useHostName: true});
-res = shardTest.admin.runCommand({ addShard: standaloneShard.host, name: 'standalone' });
-assert.commandWorked(res, 'addShard command failed');
-res = mongos.adminCommand('listShards');
-assert.commandWorked(res, 'listShards command failed');
-shardsArray = res.shards;
-assert.eq(shardsArray.length, 2);
-assert(checkShardName('standalone', shardsArray),
- 'listShards command didn\'t return standalone shard: ' + tojson(shardsArray));
+ // add standalone mongod
+ var standaloneShard = MongoRunner.runMongod({useHostName: true});
+ res = shardTest.admin.runCommand({addShard: standaloneShard.host, name: 'standalone'});
+ assert.commandWorked(res, 'addShard command failed');
+ res = mongos.adminCommand('listShards');
+ assert.commandWorked(res, 'listShards command failed');
+ shardsArray = res.shards;
+ assert.eq(shardsArray.length, 2);
+ assert(checkShardName('standalone', shardsArray),
+ 'listShards command didn\'t return standalone shard: ' + tojson(shardsArray));
-// add replica set named 'repl'
-var rs1 = new ReplSetTest({ name: 'repl', nodes: 1, useHostName: true});
-rs1.startSet();
-rs1.initiate();
-res = shardTest.admin.runCommand({ addShard: rs1.getURL()});
-assert.commandWorked(res, 'addShard command failed');
-res = mongos.adminCommand('listShards');
-assert.commandWorked(res, 'listShards command failed');
-shardsArray = res.shards;
-assert.eq(shardsArray.length, 3);
-assert(checkShardName('repl', shardsArray),
- 'listShards command didn\'t return replica set shard: ' + tojson(shardsArray));
+ // add replica set named 'repl'
+ var rs1 = new ReplSetTest({name: 'repl', nodes: 1, useHostName: true});
+ rs1.startSet();
+ rs1.initiate();
+ res = shardTest.admin.runCommand({addShard: rs1.getURL()});
+ assert.commandWorked(res, 'addShard command failed');
+ res = mongos.adminCommand('listShards');
+ assert.commandWorked(res, 'listShards command failed');
+ shardsArray = res.shards;
+ assert.eq(shardsArray.length, 3);
+ assert(checkShardName('repl', shardsArray),
+ 'listShards command didn\'t return replica set shard: ' + tojson(shardsArray));
-// remove 'repl' shard
-assert.soon(function() {
- var res = shardTest.admin.runCommand({ removeShard: 'repl' });
- assert.commandWorked(res, 'removeShard command failed');
- return res.state === 'completed';
-}, 'failed to remove the replica set shard');
+ // remove 'repl' shard
+ assert.soon(function() {
+ var res = shardTest.admin.runCommand({removeShard: 'repl'});
+ assert.commandWorked(res, 'removeShard command failed');
+ return res.state === 'completed';
+ }, 'failed to remove the replica set shard');
-res = mongos.adminCommand('listShards');
-assert.commandWorked(res, 'listShards command failed');
-shardsArray = res.shards;
-assert.eq(shardsArray.length, 2);
-assert(!checkShardName('repl', shardsArray),
- 'listShards command returned removed replica set shard: ' + tojson(shardsArray));
+ res = mongos.adminCommand('listShards');
+ assert.commandWorked(res, 'listShards command failed');
+ shardsArray = res.shards;
+ assert.eq(shardsArray.length, 2);
+ assert(!checkShardName('repl', shardsArray),
+ 'listShards command returned removed replica set shard: ' + tojson(shardsArray));
-rs1.stopSet();
-shardTest.stop();
+ rs1.stopSet();
+ shardTest.stop();
})();
diff --git a/jstests/sharding/localhostAuthBypass.js b/jstests/sharding/localhostAuthBypass.js
index 857b33fad9e..448d40c5649 100644
--- a/jstests/sharding/localhostAuthBypass.js
+++ b/jstests/sharding/localhostAuthBypass.js
@@ -1,7 +1,7 @@
-//SERVER-6591: Localhost authentication exception doesn't work right on sharded cluster
+// SERVER-6591: Localhost authentication exception doesn't work right on sharded cluster
//
-//This test is to ensure that localhost authentication works correctly against a sharded
-//cluster whether they are hosted with "localhost" or a hostname.
+// This test is to ensure that localhost authentication works correctly against a sharded
+// cluster whether they are hosted with "localhost" or a hostname.
var replSetName = "replsets_server-6591";
var keyfile = "jstests/libs/key1";
@@ -15,7 +15,7 @@ var createUser = function(mongo) {
};
var addUsersToEachShard = function(st) {
- for(i = 0; i < numShards; i++) {
+ for (i = 0; i < numShards; i++) {
print("============ adding a user to shard " + i);
var d = st["shard" + i];
d.getDB("admin").createUser({user: username, pwd: password, roles: jsTest.adminUserRoles});
@@ -23,23 +23,21 @@ var addUsersToEachShard = function(st) {
};
var addShard = function(st, shouldPass) {
- var m = MongoRunner.runMongod({ auth: "", keyFile: keyfile, useHostname: false });
- var res = st.getDB("admin").runCommand({ addShard: m.host });
+ var m = MongoRunner.runMongod({auth: "", keyFile: keyfile, useHostname: false});
+ var res = st.getDB("admin").runCommand({addShard: m.host});
if (shouldPass) {
assert.commandWorked(res, "Add shard");
- }
- else {
+ } else {
assert.commandFailed(res, "Add shard");
}
return m.port;
};
-
var findEmptyShard = function(st, ns) {
- var counts = st.chunkCounts( "foo" );
+ var counts = st.chunkCounts("foo");
- for(shard in counts){
- if(counts[shard] == 0) {
+ for (shard in counts) {
+ if (counts[shard] == 0) {
return shard;
}
}
@@ -52,18 +50,26 @@ var assertCannotRunCommands = function(mongo, st) {
// CRUD
var test = mongo.getDB("test");
- assert.throws( function() { test.system.users.findOne(); });
- assert.writeError(test.foo.save({ _id: 0 }));
- assert.throws( function() { test.foo.findOne({_id:0}); });
- assert.writeError(test.foo.update({ _id: 0 }, { $set: { x: 20 }}));
- assert.writeError(test.foo.remove({ _id: 0 }));
+ assert.throws(function() {
+ test.system.users.findOne();
+ });
+ assert.writeError(test.foo.save({_id: 0}));
+ assert.throws(function() {
+ test.foo.findOne({_id: 0});
+ });
+ assert.writeError(test.foo.update({_id: 0}, {$set: {x: 20}}));
+ assert.writeError(test.foo.remove({_id: 0}));
// Multi-shard
assert.throws(function() {
test.foo.mapReduce(
- function() { emit(1, 1); },
- function(id, count) { return Array.sum(count); },
- { out: "other" });
+ function() {
+ emit(1, 1);
+ },
+ function(id, count) {
+ return Array.sum(count);
+ },
+ {out: "other"});
});
// Config
@@ -75,40 +81,46 @@ var assertCannotRunCommands = function(mongo, st) {
var res = mongo.getDB("admin").runCommand({
moveChunk: "test.foo",
find: {_id: 1},
- to: "shard0000" // Arbitrary shard.
+ to: "shard0000" // Arbitrary shard.
});
assert.commandFailedWithCode(res, authorizeErrorCode, "moveChunk");
- assert.commandFailedWithCode(mongo.getDB("test").copyDatabase("admin", "admin2"),
- authorizeErrorCode, "copyDatabase");
+ assert.commandFailedWithCode(
+ mongo.getDB("test").copyDatabase("admin", "admin2"), authorizeErrorCode, "copyDatabase");
// Create collection
- assert.commandFailedWithCode(mongo.getDB("test").createCollection(
- "log", { capped: true, size: 5242880, max: 5000 } ),
- authorizeErrorCode, "createCollection");
+ assert.commandFailedWithCode(
+ mongo.getDB("test").createCollection("log", {capped: true, size: 5242880, max: 5000}),
+ authorizeErrorCode,
+ "createCollection");
// Set/Get system parameters
- var params = [{ param: "journalCommitInterval", val: 200 },
- { param: "logLevel", val: 2 },
- { param: "logUserIds", val: 1 },
- { param: "notablescan", val: 1 },
- { param: "quiet", val: 1 },
- { param: "replApplyBatchSize", val: 10 },
- { param: "replIndexPrefetch", val: "none" },
- { param: "syncdelay", val: 30 },
- { param: "traceExceptions", val: true },
- { param: "sslMode", val: "preferSSL" },
- { param: "clusterAuthMode", val: "sendX509" },
- { param: "userCacheInvalidationIntervalSecs", val: 300 }
- ];
+ var params = [
+ {param: "journalCommitInterval", val: 200},
+ {param: "logLevel", val: 2},
+ {param: "logUserIds", val: 1},
+ {param: "notablescan", val: 1},
+ {param: "quiet", val: 1},
+ {param: "replApplyBatchSize", val: 10},
+ {param: "replIndexPrefetch", val: "none"},
+ {param: "syncdelay", val: 30},
+ {param: "traceExceptions", val: true},
+ {param: "sslMode", val: "preferSSL"},
+ {param: "clusterAuthMode", val: "sendX509"},
+ {param: "userCacheInvalidationIntervalSecs", val: 300}
+ ];
params.forEach(function(p) {
- var cmd = { setParameter: 1 };
+ var cmd = {
+ setParameter: 1
+ };
cmd[p.param] = p.val;
- assert.commandFailedWithCode(mongo.getDB("admin").runCommand(cmd),
- authorizeErrorCode, "setParameter: "+p.param);
+ assert.commandFailedWithCode(
+ mongo.getDB("admin").runCommand(cmd), authorizeErrorCode, "setParameter: " + p.param);
});
params.forEach(function(p) {
- var cmd = { getParameter: 1 };
+ var cmd = {
+ getParameter: 1
+ };
cmd[p.param] = 1;
- assert.commandFailedWithCode(mongo.getDB("admin").runCommand(cmd),
- authorizeErrorCode, "getParameter: "+p.param);
+ assert.commandFailedWithCode(
+ mongo.getDB("admin").runCommand(cmd), authorizeErrorCode, "getParameter: " + p.param);
});
};
@@ -121,27 +133,26 @@ var assertCanRunCommands = function(mongo, st) {
// this will throw if it fails
test.system.users.findOne();
- assert.writeOK(test.foo.save({ _id: 0 }));
- assert.writeOK(test.foo.update({ _id: 0 }, { $set: { x: 20 }}));
- assert.writeOK(test.foo.remove({ _id: 0 }));
+ assert.writeOK(test.foo.save({_id: 0}));
+ assert.writeOK(test.foo.update({_id: 0}, {$set: {x: 20}}));
+ assert.writeOK(test.foo.remove({_id: 0}));
// Multi-shard
test.foo.mapReduce(
- function() { emit(1, 1); },
- function(id, count) { return Array.sum(count); },
- { out: "other" }
- );
+ function() {
+ emit(1, 1);
+ },
+ function(id, count) {
+ return Array.sum(count);
+ },
+ {out: "other"});
// Config
// this will throw if it fails
mongo.getDB("config").shards.findOne();
to = findEmptyShard(st, "test.foo");
- var res = mongo.getDB("admin").runCommand({
- moveChunk: "test.foo",
- find: {_id: 1},
- to: to
- });
+ var res = mongo.getDB("admin").runCommand({moveChunk: "test.foo", find: {_id: 1}, to: to});
assert.commandWorked(res);
};
@@ -154,12 +165,12 @@ var setupSharding = function(shardingTest) {
var mongo = shardingTest.s;
print("============ enabling sharding on test.foo.");
- mongo.getDB("admin").runCommand({enableSharding : "test"});
+ mongo.getDB("admin").runCommand({enableSharding: "test"});
shardingTest.ensurePrimaryShard('test', 'shard0001');
- mongo.getDB("admin").runCommand({shardCollection : "test.foo", key : {_id : 1}});
+ mongo.getDB("admin").runCommand({shardCollection: "test.foo", key: {_id: 1}});
var test = mongo.getDB("test");
- for(i = 1; i < 20; i++) {
+ for (i = 1; i < 20; i++) {
test.foo.insert({_id: i});
}
};
@@ -167,13 +178,13 @@ var setupSharding = function(shardingTest) {
var start = function() {
return new ShardingTest({
auth: "",
- keyFile: keyfile,
- shards: numShards,
- chunksize: 1,
- other : {
- nopreallocj: 1,
- useHostname: false // Must use localhost to take advantage of the localhost auth bypass
- }
+ keyFile: keyfile,
+ shards: numShards,
+ chunksize: 1,
+ other: {
+ nopreallocj: 1,
+ useHostname: false // Must use localhost to take advantage of the localhost auth bypass
+ }
});
};
@@ -185,31 +196,25 @@ var shutdown = function(st) {
// ShardingTest.stop does not have a way to provide auth
// information. Therefore, we'll do this manually for now.
- for(i = 0; i < st._mongos.length; i++) {
+ for (i = 0; i < st._mongos.length; i++) {
var port = st["s" + i].port;
- MongoRunner.stopMongos(
- port,
- /*signal*/false,
- { auth : { user: username, pwd: password }}
- );
+ MongoRunner.stopMongos(port,
+ /*signal*/ false,
+ {auth: {user: username, pwd: password}});
}
- for(i = 0; i < st._connections.length; i++) {
+ for (i = 0; i < st._connections.length; i++) {
var port = st["shard" + i].port;
- MongoRunner.stopMongod(
- port,
- /*signal*/false,
- { auth : { user: username, pwd: password }}
- );
+ MongoRunner.stopMongod(port,
+ /*signal*/ false,
+ {auth: {user: username, pwd: password}});
}
- for(i = 0; i < st._configServers.length; i++) {
+ for (i = 0; i < st._configServers.length; i++) {
var c = st["config" + i].port;
- MongoRunner.stopMongod(
- port,
- /*signal*/false,
- { auth : { user: username, pwd: password }}
- );
+ MongoRunner.stopMongod(port,
+ /*signal*/ false,
+ {auth: {user: username, pwd: password}});
}
st.stop();
diff --git a/jstests/sharding/major_version_check.js b/jstests/sharding/major_version_check.js
index cef05411e0c..1b4e1906379 100644
--- a/jstests/sharding/major_version_check.js
+++ b/jstests/sharding/major_version_check.js
@@ -2,53 +2,52 @@
// Tests that only a correct major-version is needed to connect to a shard via mongos
//
(function() {
-'use strict';
+ 'use strict';
-var st = new ShardingTest({ shards : 1, mongos : 2 });
+ var st = new ShardingTest({shards: 1, mongos: 2});
-var mongos = st.s0;
-var staleMongos = st.s1;
-var admin = mongos.getDB( "admin" );
-var config = mongos.getDB( "config" );
-var coll = mongos.getCollection( "foo.bar" );
+ var mongos = st.s0;
+ var staleMongos = st.s1;
+ var admin = mongos.getDB("admin");
+ var config = mongos.getDB("config");
+ var coll = mongos.getCollection("foo.bar");
-// Shard collection
-assert.commandWorked(admin.runCommand({ enableSharding : coll.getDB() + "" }));
-assert.commandWorked(admin.runCommand({ shardCollection : coll + "", key : { _id : 1 } }));
+ // Shard collection
+ assert.commandWorked(admin.runCommand({enableSharding: coll.getDB() + ""}));
+ assert.commandWorked(admin.runCommand({shardCollection: coll + "", key: {_id: 1}}));
-// Make sure our stale mongos is up-to-date with no splits
-staleMongos.getCollection( coll + "" ).findOne();
+ // Make sure our stale mongos is up-to-date with no splits
+ staleMongos.getCollection(coll + "").findOne();
-// Run one split
-assert.commandWorked(admin.runCommand({ split : coll + "", middle : { _id : 0 } }));
+ // Run one split
+ assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: 0}}));
-// Make sure our stale mongos is not up-to-date with the split
-printjson( admin.runCommand({ getShardVersion : coll + "" }) );
-printjson( staleMongos.getDB( "admin" ).runCommand({ getShardVersion : coll + "" }) );
+ // Make sure our stale mongos is not up-to-date with the split
+ printjson(admin.runCommand({getShardVersion: coll + ""}));
+ printjson(staleMongos.getDB("admin").runCommand({getShardVersion: coll + ""}));
-// Compare strings b/c timestamp comparison is a bit weird
-assert.eq( Timestamp( 1, 2 ),
- admin.runCommand({ getShardVersion : coll + "" }).version );
-assert.eq( Timestamp( 1, 0 ),
- staleMongos.getDB( "admin" ).runCommand({ getShardVersion : coll + "" }).version );
+ // Compare strings b/c timestamp comparison is a bit weird
+ assert.eq(Timestamp(1, 2), admin.runCommand({getShardVersion: coll + ""}).version);
+ assert.eq(Timestamp(1, 0),
+ staleMongos.getDB("admin").runCommand({getShardVersion: coll + ""}).version);
-// See if our stale mongos is required to catch up to run a findOne on an existing connection
-staleMongos.getCollection( coll + "" ).findOne();
+ // See if our stale mongos is required to catch up to run a findOne on an existing connection
+ staleMongos.getCollection(coll + "").findOne();
-printjson( staleMongos.getDB( "admin" ).runCommand({ getShardVersion : coll + "" }) );
+ printjson(staleMongos.getDB("admin").runCommand({getShardVersion: coll + ""}));
-assert.eq( Timestamp( 1, 0 ),
- staleMongos.getDB( "admin" ).runCommand({ getShardVersion : coll + "" }).version );
-
-// See if our stale mongos is required to catch up to run a findOne on a new connection
-staleMongos = new Mongo( staleMongos.host );
-staleMongos.getCollection( coll + "" ).findOne();
+ assert.eq(Timestamp(1, 0),
+ staleMongos.getDB("admin").runCommand({getShardVersion: coll + ""}).version);
-printjson( staleMongos.getDB( "admin" ).runCommand({ getShardVersion : coll + "" }) );
+ // See if our stale mongos is required to catch up to run a findOne on a new connection
+ staleMongos = new Mongo(staleMongos.host);
+ staleMongos.getCollection(coll + "").findOne();
-assert.eq( Timestamp( 1, 0 ),
- staleMongos.getDB( "admin" ).runCommand({ getShardVersion : coll + "" }).version );
+ printjson(staleMongos.getDB("admin").runCommand({getShardVersion: coll + ""}));
-st.stop();
+ assert.eq(Timestamp(1, 0),
+ staleMongos.getDB("admin").runCommand({getShardVersion: coll + ""}).version);
+
+ st.stop();
})();
diff --git a/jstests/sharding/mapReduce_inSharded.js b/jstests/sharding/mapReduce_inSharded.js
index ff1c76a3534..110be371ba9 100644
--- a/jstests/sharding/mapReduce_inSharded.js
+++ b/jstests/sharding/mapReduce_inSharded.js
@@ -6,27 +6,29 @@ var verifyOutput = function(out) {
assert.eq(out.counts.output, 512, "output count is wrong");
};
-var st = new ShardingTest({ shards : 2,
- verbose : 1,
- mongos : 1,
- other : { chunkSize: 1, enableBalancer: true }});
+var st = new ShardingTest(
+ {shards: 2, verbose: 1, mongos: 1, other: {chunkSize: 1, enableBalancer: true}});
-st.adminCommand( { enablesharding : "mrShard" } );
+st.adminCommand({enablesharding: "mrShard"});
st.ensurePrimaryShard('mrShard', 'shard0001');
-st.adminCommand( { shardcollection : "mrShard.srcSharded", key : { "_id" : 1 } } );
+st.adminCommand({shardcollection: "mrShard.srcSharded", key: {"_id": 1}});
-var db = st.getDB( "mrShard" );
+var db = st.getDB("mrShard");
var bulk = db.srcSharded.initializeUnorderedBulkOp();
for (j = 0; j < 100; j++) {
for (i = 0; i < 512; i++) {
- bulk.insert({ j: j, i: i });
+ bulk.insert({j: j, i: i});
}
}
assert.writeOK(bulk.execute());
-function map() { emit(this.i, 1); }
-function reduce(key, values) { return Array.sum(values); }
+function map() {
+ emit(this.i, 1);
+}
+function reduce(key, values) {
+ return Array.sum(values);
+}
// sharded src
var suffix = "InSharded";
@@ -34,26 +36,27 @@ var suffix = "InSharded";
var out = db.srcSharded.mapReduce(map, reduce, "mrBasic" + suffix);
verifyOutput(out);
-out = db.srcSharded.mapReduce(map, reduce, { out: { replace: "mrReplace" + suffix } });
+out = db.srcSharded.mapReduce(map, reduce, {out: {replace: "mrReplace" + suffix}});
verifyOutput(out);
-out = db.srcSharded.mapReduce(map, reduce, { out: { merge: "mrMerge" + suffix } });
+out = db.srcSharded.mapReduce(map, reduce, {out: {merge: "mrMerge" + suffix}});
verifyOutput(out);
-out = db.srcSharded.mapReduce(map, reduce, { out: { reduce: "mrReduce" + suffix } });
+out = db.srcSharded.mapReduce(map, reduce, {out: {reduce: "mrReduce" + suffix}});
verifyOutput(out);
-out = db.srcSharded.mapReduce(map, reduce, { out: { inline: 1 } });
+out = db.srcSharded.mapReduce(map, reduce, {out: {inline: 1}});
verifyOutput(out);
assert(out.results != 'undefined', "no results for inline");
-out = db.srcSharded.mapReduce(map, reduce, { out: { replace: "mrReplace" + suffix, db: "mrShardOtherDB" } });
+out = db.srcSharded.mapReduce(
+ map, reduce, {out: {replace: "mrReplace" + suffix, db: "mrShardOtherDB"}});
verifyOutput(out);
out = db.runCommand({
- mapReduce: "srcSharded", // use new name mapReduce rather than mapreduce
+ mapReduce: "srcSharded", // use new name mapReduce rather than mapreduce
map: map,
reduce: reduce,
out: "mrBasic" + "srcSharded",
- });
+});
verifyOutput(out);
diff --git a/jstests/sharding/mapReduce_inSharded_outSharded.js b/jstests/sharding/mapReduce_inSharded_outSharded.js
index 1cfce046732..d1aba2599f0 100644
--- a/jstests/sharding/mapReduce_inSharded_outSharded.js
+++ b/jstests/sharding/mapReduce_inSharded_outSharded.js
@@ -6,51 +6,55 @@ var verifyOutput = function(out) {
assert.eq(out.counts.output, 512, "output count is wrong");
};
-var st = new ShardingTest({ shards : 2,
- verbose : 1,
- mongos : 1,
- other : { chunkSize: 1, enableBalancer: true }});
+var st = new ShardingTest(
+ {shards: 2, verbose: 1, mongos: 1, other: {chunkSize: 1, enableBalancer: true}});
-st.adminCommand( { enablesharding : "mrShard" } );
+st.adminCommand({enablesharding: "mrShard"});
st.ensurePrimaryShard('mrShard', 'shard0001');
-st.adminCommand( { shardcollection : "mrShard.srcSharded", key : { "_id" : 1 } } );
+st.adminCommand({shardcollection: "mrShard.srcSharded", key: {"_id": 1}});
-var db = st.getDB( "mrShard" );
+var db = st.getDB("mrShard");
var bulk = db.srcSharded.initializeUnorderedBulkOp();
for (j = 0; j < 100; j++) {
for (i = 0; i < 512; i++) {
- bulk.insert({ j: j, i: i });
+ bulk.insert({j: j, i: i});
}
}
assert.writeOK(bulk.execute());
-function map() { emit(this.i, 1); }
-function reduce(key, values) { return Array.sum(values); }
+function map() {
+ emit(this.i, 1);
+}
+function reduce(key, values) {
+ return Array.sum(values);
+}
// sharded src sharded dst
var suffix = "InShardedOutSharded";
-var out = db.srcSharded.mapReduce(map, reduce, { out: { replace: "mrReplace" + suffix, sharded: true } });
+var out =
+ db.srcSharded.mapReduce(map, reduce, {out: {replace: "mrReplace" + suffix, sharded: true}});
verifyOutput(out);
-out = db.srcSharded.mapReduce(map, reduce, { out: { merge: "mrMerge" + suffix, sharded: true } });
+out = db.srcSharded.mapReduce(map, reduce, {out: {merge: "mrMerge" + suffix, sharded: true}});
verifyOutput(out);
-out = db.srcSharded.mapReduce(map, reduce, { out: { reduce: "mrReduce" + suffix, sharded: true } });
+out = db.srcSharded.mapReduce(map, reduce, {out: {reduce: "mrReduce" + suffix, sharded: true}});
verifyOutput(out);
-out = db.srcSharded.mapReduce(map, reduce, { out: { inline: 1 }});
+out = db.srcSharded.mapReduce(map, reduce, {out: {inline: 1}});
verifyOutput(out);
assert(out.results != 'undefined', "no results for inline");
-out = db.srcSharded.mapReduce(map, reduce, { out: { replace: "mrReplace" + suffix, db: "mrShardOtherDB", sharded: true } });
+out = db.srcSharded.mapReduce(
+ map, reduce, {out: {replace: "mrReplace" + suffix, db: "mrShardOtherDB", sharded: true}});
verifyOutput(out);
out = db.runCommand({
- mapReduce: "srcSharded", // use new name mapReduce rather than mapreduce
+ mapReduce: "srcSharded", // use new name mapReduce rather than mapreduce
map: map,
reduce: reduce,
out: "mrBasic" + "srcSharded",
- });
+});
verifyOutput(out);
diff --git a/jstests/sharding/mapReduce_nonSharded.js b/jstests/sharding/mapReduce_nonSharded.js
index 4e36335047b..40fb098931b 100644
--- a/jstests/sharding/mapReduce_nonSharded.js
+++ b/jstests/sharding/mapReduce_nonSharded.js
@@ -6,27 +6,29 @@ var verifyOutput = function(out) {
assert.eq(out.counts.output, 512, "output count is wrong");
};
-var st = new ShardingTest({ shards : 2,
- verbose : 1,
- mongos : 1,
- other : { chunkSize: 1, enableBalancer: true }});
+var st = new ShardingTest(
+ {shards: 2, verbose: 1, mongos: 1, other: {chunkSize: 1, enableBalancer: true}});
-st.adminCommand( { enablesharding : "mrShard" } );
+st.adminCommand({enablesharding: "mrShard"});
st.ensurePrimaryShard('mrShard', 'shard0001');
-st.adminCommand( { shardcollection : "mrShard.srcSharded", key : { "_id" : 1 } } );
+st.adminCommand({shardcollection: "mrShard.srcSharded", key: {"_id": 1}});
-var db = st.getDB( "mrShard" );
+var db = st.getDB("mrShard");
var bulk = db.srcNonSharded.initializeUnorderedBulkOp();
for (j = 0; j < 100; j++) {
for (i = 0; i < 512; i++) {
- bulk.insert({ j: j, i: i });
+ bulk.insert({j: j, i: i});
}
}
assert.writeOK(bulk.execute());
-function map() { emit(this.i, 1); }
-function reduce(key, values) { return Array.sum(values); }
+function map() {
+ emit(this.i, 1);
+}
+function reduce(key, values) {
+ return Array.sum(values);
+}
// non-sharded in/out
var suffix = "";
@@ -34,26 +36,27 @@ var suffix = "";
out = db.srcNonSharded.mapReduce(map, reduce, "mrBasic" + suffix);
verifyOutput(out);
-out = db.srcNonSharded.mapReduce(map, reduce, { out: { replace: "mrReplace" + suffix } });
+out = db.srcNonSharded.mapReduce(map, reduce, {out: {replace: "mrReplace" + suffix}});
verifyOutput(out);
-out = db.srcNonSharded.mapReduce(map, reduce, { out: { merge: "mrMerge" + suffix } });
+out = db.srcNonSharded.mapReduce(map, reduce, {out: {merge: "mrMerge" + suffix}});
verifyOutput(out);
-out = db.srcNonSharded.mapReduce(map, reduce, { out: { reduce: "mrReduce" + suffix } });
+out = db.srcNonSharded.mapReduce(map, reduce, {out: {reduce: "mrReduce" + suffix}});
verifyOutput(out);
-out = db.srcNonSharded.mapReduce(map, reduce, { out: { inline: 1 }});
+out = db.srcNonSharded.mapReduce(map, reduce, {out: {inline: 1}});
verifyOutput(out);
assert(out.results != 'undefined', "no results for inline");
-out = db.srcNonSharded.mapReduce(map, reduce, { out: { replace: "mrReplace" + suffix, db: "mrShardOtherDB" } });
+out = db.srcNonSharded.mapReduce(
+ map, reduce, {out: {replace: "mrReplace" + suffix, db: "mrShardOtherDB"}});
verifyOutput(out);
out = db.runCommand({
- mapReduce: "srcNonSharded", // use new name mapReduce rather than mapreduce
+ mapReduce: "srcNonSharded", // use new name mapReduce rather than mapreduce
map: map,
reduce: reduce,
out: "mrBasic" + "srcNonSharded",
- });
+});
verifyOutput(out);
diff --git a/jstests/sharding/mapReduce_outSharded.js b/jstests/sharding/mapReduce_outSharded.js
index 5ab50c4c877..34cde2b63ef 100644
--- a/jstests/sharding/mapReduce_outSharded.js
+++ b/jstests/sharding/mapReduce_outSharded.js
@@ -6,51 +6,55 @@ var verifyOutput = function(out) {
assert.eq(out.counts.output, 512, "output count is wrong");
};
-var st = new ShardingTest({ shards : 2,
- verbose : 1,
- mongos : 1,
- other : { chunkSize: 1, enableBalancer: true }});
+var st = new ShardingTest(
+ {shards: 2, verbose: 1, mongos: 1, other: {chunkSize: 1, enableBalancer: true}});
-st.adminCommand( { enablesharding : "mrShard" } );
+st.adminCommand({enablesharding: "mrShard"});
st.ensurePrimaryShard('mrShard', 'shard0001');
-st.adminCommand( { shardcollection : "mrShard.srcSharded", key : { "_id" : 1 } } );
+st.adminCommand({shardcollection: "mrShard.srcSharded", key: {"_id": 1}});
-var db = st.getDB( "mrShard" );
+var db = st.getDB("mrShard");
var bulk = db.srcNonSharded.initializeUnorderedBulkOp();
for (j = 0; j < 100; j++) {
for (i = 0; i < 512; i++) {
- bulk.insert({ j: j, i: i });
+ bulk.insert({j: j, i: i});
}
}
assert.writeOK(bulk.execute());
-function map() { emit(this.i, 1); }
-function reduce(key, values) { return Array.sum(values); }
+function map() {
+ emit(this.i, 1);
+}
+function reduce(key, values) {
+ return Array.sum(values);
+}
// non sharded src sharded dst
var suffix = "OutSharded";
-out = db.srcNonSharded.mapReduce(map, reduce, { out: { replace: "mrReplace" + suffix, sharded: true } });
+out =
+ db.srcNonSharded.mapReduce(map, reduce, {out: {replace: "mrReplace" + suffix, sharded: true}});
verifyOutput(out);
-out = db.srcNonSharded.mapReduce(map, reduce, { out: { merge: "mrMerge" + suffix, sharded: true } });
+out = db.srcNonSharded.mapReduce(map, reduce, {out: {merge: "mrMerge" + suffix, sharded: true}});
verifyOutput(out);
-out = db.srcNonSharded.mapReduce(map, reduce, { out: { reduce: "mrReduce" + suffix, sharded: true } });
+out = db.srcNonSharded.mapReduce(map, reduce, {out: {reduce: "mrReduce" + suffix, sharded: true}});
verifyOutput(out);
-out = db.srcNonSharded.mapReduce(map, reduce, { out: { inline: 1 }});
+out = db.srcNonSharded.mapReduce(map, reduce, {out: {inline: 1}});
verifyOutput(out);
assert(out.results != 'undefined', "no results for inline");
-out = db.srcNonSharded.mapReduce(map, reduce, { out: { replace: "mrReplace" + suffix, db: "mrShardOtherDB", sharded: true } });
+out = db.srcNonSharded.mapReduce(
+ map, reduce, {out: {replace: "mrReplace" + suffix, db: "mrShardOtherDB", sharded: true}});
verifyOutput(out);
out = db.runCommand({
- mapReduce: "srcNonSharded", // use new name mapReduce rather than mapreduce
+ mapReduce: "srcNonSharded", // use new name mapReduce rather than mapreduce
map: map,
reduce: reduce,
out: "mrBasic" + "srcNonSharded",
- });
+});
verifyOutput(out);
diff --git a/jstests/sharding/map_reduce_validation.js b/jstests/sharding/map_reduce_validation.js
index 436ff395ece..7ccbf6130ee 100644
--- a/jstests/sharding/map_reduce_validation.js
+++ b/jstests/sharding/map_reduce_validation.js
@@ -1,29 +1,49 @@
-var st = new ShardingTest({ shards: 1 });
+var st = new ShardingTest({shards: 1});
var testDB = st.s.getDB('test');
-var mapFunc = function() { emit(this.x, 1); };
-var reduceFunc = function(key, values) { return values.length; };
+var mapFunc = function() {
+ emit(this.x, 1);
+};
+var reduceFunc = function(key, values) {
+ return values.length;
+};
-assert.commandFailed(testDB.runCommand({ mapReduce: 'user',
- map: mapFunc,
- reduce: reduceFunc,
- out: { inline: 1, sharded: true }}));
+assert.commandFailed(testDB.runCommand(
+ {mapReduce: 'user', map: mapFunc, reduce: reduceFunc, out: {inline: 1, sharded: true}}));
testDB.bar.insert({i: 1});
-assert.commandFailed(testDB.runCommand({ mapReduce: 'bar',
- map: function() {emit(this.i, this.i*3);},
- reduce: function(key, values) {return Array.sum(values);},
- out: { replace: "foo", db: "admin" }}));
+assert.commandFailed(testDB.runCommand({
+ mapReduce: 'bar',
+ map: function() {
+ emit(this.i, this.i * 3);
+ },
+ reduce: function(key, values) {
+ return Array.sum(values);
+ },
+ out: {replace: "foo", db: "admin"}
+}));
-assert.commandFailed(testDB.runCommand({ mapReduce: 'bar',
- map: function() {emit(this.i, this.i*3);},
- reduce: function(key, values) {return Array.sum(values);},
- out: { replace: "foo", db: "config" }}));
+assert.commandFailed(testDB.runCommand({
+ mapReduce: 'bar',
+ map: function() {
+ emit(this.i, this.i * 3);
+ },
+ reduce: function(key, values) {
+ return Array.sum(values);
+ },
+ out: {replace: "foo", db: "config"}
+}));
-assert.commandWorked(testDB.runCommand({ mapReduce: 'bar',
- map: function() {emit(this.i, this.i*3);},
- reduce: function(key, values) {return Array.sum(values);},
- out: { replace: "foo", db: "test" }}));
+assert.commandWorked(testDB.runCommand({
+ mapReduce: 'bar',
+ map: function() {
+ emit(this.i, this.i * 3);
+ },
+ reduce: function(key, values) {
+ return Array.sum(values);
+ },
+ out: {replace: "foo", db: "test"}
+}));
st.stop();
diff --git a/jstests/sharding/max_time_ms_sharded.js b/jstests/sharding/max_time_ms_sharded.js
index 1a000ea3dde..7194c98750c 100644
--- a/jstests/sharding/max_time_ms_sharded.js
+++ b/jstests/sharding/max_time_ms_sharded.js
@@ -5,213 +5,236 @@
// Note that mongos does not time out commands or query ops (which remains responsibility of mongod,
// pending development of an interrupt framework for mongos).
(function() {
-'use strict';
-
-var st = new ShardingTest({shards: 2});
-
-var mongos = st.s0;
-var shards = [st.shard0, st.shard1];
-var coll = mongos.getCollection("foo.bar");
-var admin = mongos.getDB("admin");
-var exceededTimeLimit = 50; // ErrorCodes::ExceededTimeLimit
-var cursor;
-var res;
-
-// Helper function to configure "maxTimeAlwaysTimeOut" fail point on shards, which forces mongod to
-// throw if it receives an operation with a max time. See fail point declaration for complete
-// description.
-var configureMaxTimeAlwaysTimeOut = function(mode) {
- assert.commandWorked(shards[0].getDB("admin").runCommand(
- {configureFailPoint: "maxTimeAlwaysTimeOut", mode: mode}));
- assert.commandWorked(shards[1].getDB("admin").runCommand(
- {configureFailPoint: "maxTimeAlwaysTimeOut", mode: mode}));
-};
-
-// Helper function to configure "maxTimeAlwaysTimeOut" fail point on shards, which prohibits mongod
-// from enforcing time limits. See fail point declaration for complete description.
-var configureMaxTimeNeverTimeOut = function(mode) {
- assert.commandWorked(shards[0].getDB("admin").runCommand(
- {configureFailPoint: "maxTimeNeverTimeOut", mode: mode}));
- assert.commandWorked(shards[1].getDB("admin").runCommand(
- {configureFailPoint: "maxTimeNeverTimeOut", mode: mode}));
-};
-
-//
-// Pre-split collection: shard 0 takes {_id: {$lt: 0}}, shard 1 takes {_id: {$gte: 0}}.
-//
-assert.commandWorked(admin.runCommand({enableSharding: coll.getDB().getName()}));
-admin.runCommand({movePrimary: coll.getDB().getName(),
- to: "shard0000"});
-assert.commandWorked(admin.runCommand({shardCollection: coll.getFullName(),
- key: {_id: 1}}));
-assert.commandWorked(admin.runCommand({split: coll.getFullName(), middle: {_id: 0}}));
-assert.commandWorked(admin.runCommand({moveChunk: coll.getFullName(),
- find: {_id: 0},
- to: "shard0001"}));
-
-//
-// Insert 100 documents into sharded collection, such that each shard owns 50.
-//
-var bulk = coll.initializeUnorderedBulkOp();
-for (var i = -50; i < 50; i++) {
- bulk.insert({ _id: i });
-}
-assert.writeOK(bulk.execute());
-assert.eq(50, shards[0].getCollection(coll.getFullName()).count());
-assert.eq(50, shards[1].getCollection(coll.getFullName()).count());
-
-
-//
-// Test that mongos correctly forwards max time to shards for sharded queries. Uses
-// maxTimeAlwaysTimeOut to ensure mongod throws if it receives a max time.
-//
-
-// Positive test.
-configureMaxTimeAlwaysTimeOut("alwaysOn");
-cursor = coll.find();
-cursor.maxTimeMS(60*1000);
-assert.throws(function() { cursor.next(); },
- [],
- "expected query to fail in mongod due to maxTimeAlwaysTimeOut fail point");
-
-// Negative test.
-configureMaxTimeAlwaysTimeOut("off");
-cursor = coll.find();
-cursor.maxTimeMS(60*1000);
-assert.doesNotThrow(function() { cursor.next(); },
- [],
- "expected query to not hit time limit in mongod");
-
-//
-// Test that mongos correctly times out max time sharded getmore operations. Uses
-// maxTimeNeverTimeOut to ensure mongod doesn't enforce a time limit.
-//
-// TODO: This is unimplemented. A test for this functionality should be written as
-// part of the work for SERVER-19410.
-//
-
-configureMaxTimeNeverTimeOut("alwaysOn");
-
-// Positive test. TODO: see above.
-
-// Negative test. ~10s operation, with a high (1-day) limit.
-cursor = coll.find({$where: function() { sleep(100); return true; }});
-cursor.batchSize(2);
-cursor.maxTimeMS(1000*60*60*24);
-assert.doesNotThrow(function() { cursor.next(); },
- [],
- "did not expect mongos to time out first batch of query");
-assert.doesNotThrow(function() { cursor.itcount(); },
- [],
- "did not expect getmore ops to hit the time limit");
-
-configureMaxTimeNeverTimeOut("off");
-
-//
-// Test that mongos correctly forwards max time to shards for sharded commands. Uses
-// maxTimeAlwaysTimeOut to ensure mongod throws if it receives a max time.
-//
-
-// Positive test for "validate".
-configureMaxTimeAlwaysTimeOut("alwaysOn");
-res = coll.runCommand("validate", {maxTimeMS: 60*1000});
-assert.commandFailed(res,
- "expected validate to fail in mongod due to maxTimeAlwaysTimeOut fail point");
-assert.eq(res["code"],
- exceededTimeLimit,
- "expected code " + exceededTimeLimit + " from validate, instead got: " + tojson(res));
-
-// Negative test for "validate".
-configureMaxTimeAlwaysTimeOut("off");
-assert.commandWorked(coll.runCommand("validate", {maxTimeMS: 60*1000}),
- "expected validate to not hit time limit in mongod");
-
-// Positive test for "count".
-configureMaxTimeAlwaysTimeOut("alwaysOn");
-res = coll.runCommand("count", {maxTimeMS: 60*1000});
-assert.commandFailed(res,
- "expected count to fail in mongod due to maxTimeAlwaysTimeOut fail point");
-assert.eq(res["code"],
- exceededTimeLimit,
- "expected code " + exceededTimeLimit + " from count , instead got: " + tojson(res));
-
-// Negative test for "count".
-configureMaxTimeAlwaysTimeOut("off");
-assert.commandWorked(coll.runCommand("count", {maxTimeMS: 60*1000}),
- "expected count to not hit time limit in mongod");
-
-// Positive test for "collStats".
-configureMaxTimeAlwaysTimeOut("alwaysOn");
-res = coll.runCommand("collStats", {maxTimeMS: 60*1000});
-assert.commandFailed(res,
- "expected collStats to fail in mongod due to maxTimeAlwaysTimeOut fail point");
-assert.eq(res["code"],
- exceededTimeLimit,
- "expected code " + exceededTimeLimit + " from collStats, instead got: " + tojson(res));
-
-// Negative test for "collStats".
-configureMaxTimeAlwaysTimeOut("off");
-assert.commandWorked(coll.runCommand("collStats", {maxTimeMS: 60*1000}),
- "expected collStats to not hit time limit in mongod");
-
-// Positive test for "mapReduce".
-configureMaxTimeAlwaysTimeOut("alwaysOn");
-res = coll.runCommand("mapReduce", {map: function() { emit(0, 0); },
- reduce: function(key, values) { return 0; },
- out: {inline: 1},
- maxTimeMS: 60*1000});
-assert.commandFailed(res,
- "expected mapReduce to fail in mongod due to maxTimeAlwaysTimeOut fail point");
-assert.eq(res["code"],
- exceededTimeLimit,
- "expected code " + exceededTimeLimit + " from mapReduce, instead got: " + tojson(res));
-
-// Negative test for "mapReduce".
-configureMaxTimeAlwaysTimeOut("off");
-assert.commandWorked(coll.runCommand("mapReduce", {map: function() { emit(0, 0); },
- reduce: function(key, values) { return 0; },
- out: {inline: 1},
- maxTimeMS: 60*1000}),
- "expected mapReduce to not hit time limit in mongod");
-
-// Positive test for "aggregate".
-configureMaxTimeAlwaysTimeOut("alwaysOn");
-res = coll.runCommand("aggregate", {pipeline: [],
- maxTimeMS: 60*1000});
-assert.commandFailed(res,
- "expected aggregate to fail in mongod due to maxTimeAlwaysTimeOut fail point");
-assert.eq(res["code"],
- exceededTimeLimit,
- "expected code " + exceededTimeLimit + " from aggregate , instead got: " + tojson(res));
-
-// Negative test for "aggregate".
-configureMaxTimeAlwaysTimeOut("off");
-assert.commandWorked(coll.runCommand("aggregate", {pipeline: [],
- maxTimeMS: 60*1000}),
- "expected aggregate to not hit time limit in mongod");
-
-// Positive test for "moveChunk".
-configureMaxTimeAlwaysTimeOut("alwaysOn");
-res = admin.runCommand({moveChunk: coll.getFullName(),
- find: {_id: 0},
- to: "shard0000",
- maxTimeMS: 1000*60*60*24});
-assert.commandFailed(res,
- "expected moveChunk to fail in mongod due to maxTimeAlwaysTimeOut fail point");
-assert.eq(res["code"],
- exceededTimeLimit,
- "expected code " + exceededTimeLimit + " from moveChunk, instead got: " + tojson(res));
-
-// Negative test for "moveChunk".
-configureMaxTimeAlwaysTimeOut("off");
-assert.commandWorked(admin.runCommand({moveChunk: coll.getFullName(),
- find: {_id: 0},
- to: "shard0000",
- maxTimeMS: 1000*60*60*24}),
- "expected moveChunk to not hit time limit in mongod");
-
-// TODO Test additional commmands.
-
-st.stop();
+ 'use strict';
+
+ var st = new ShardingTest({shards: 2});
+
+ var mongos = st.s0;
+ var shards = [st.shard0, st.shard1];
+ var coll = mongos.getCollection("foo.bar");
+ var admin = mongos.getDB("admin");
+ var exceededTimeLimit = 50; // ErrorCodes::ExceededTimeLimit
+ var cursor;
+ var res;
+
+ // Helper function to configure "maxTimeAlwaysTimeOut" fail point on shards, which forces mongod
+ // to
+ // throw if it receives an operation with a max time. See fail point declaration for complete
+ // description.
+ var configureMaxTimeAlwaysTimeOut = function(mode) {
+ assert.commandWorked(shards[0].getDB("admin").runCommand(
+ {configureFailPoint: "maxTimeAlwaysTimeOut", mode: mode}));
+ assert.commandWorked(shards[1].getDB("admin").runCommand(
+ {configureFailPoint: "maxTimeAlwaysTimeOut", mode: mode}));
+ };
+
+ // Helper function to configure "maxTimeAlwaysTimeOut" fail point on shards, which prohibits
+ // mongod
+ // from enforcing time limits. See fail point declaration for complete description.
+ var configureMaxTimeNeverTimeOut = function(mode) {
+ assert.commandWorked(shards[0].getDB("admin").runCommand(
+ {configureFailPoint: "maxTimeNeverTimeOut", mode: mode}));
+ assert.commandWorked(shards[1].getDB("admin").runCommand(
+ {configureFailPoint: "maxTimeNeverTimeOut", mode: mode}));
+ };
+
+ //
+ // Pre-split collection: shard 0 takes {_id: {$lt: 0}}, shard 1 takes {_id: {$gte: 0}}.
+ //
+ assert.commandWorked(admin.runCommand({enableSharding: coll.getDB().getName()}));
+ admin.runCommand({movePrimary: coll.getDB().getName(), to: "shard0000"});
+ assert.commandWorked(admin.runCommand({shardCollection: coll.getFullName(), key: {_id: 1}}));
+ assert.commandWorked(admin.runCommand({split: coll.getFullName(), middle: {_id: 0}}));
+ assert.commandWorked(
+ admin.runCommand({moveChunk: coll.getFullName(), find: {_id: 0}, to: "shard0001"}));
+
+ //
+ // Insert 100 documents into sharded collection, such that each shard owns 50.
+ //
+ var bulk = coll.initializeUnorderedBulkOp();
+ for (var i = -50; i < 50; i++) {
+ bulk.insert({_id: i});
+ }
+ assert.writeOK(bulk.execute());
+ assert.eq(50, shards[0].getCollection(coll.getFullName()).count());
+ assert.eq(50, shards[1].getCollection(coll.getFullName()).count());
+
+ //
+ // Test that mongos correctly forwards max time to shards for sharded queries. Uses
+ // maxTimeAlwaysTimeOut to ensure mongod throws if it receives a max time.
+ //
+
+ // Positive test.
+ configureMaxTimeAlwaysTimeOut("alwaysOn");
+ cursor = coll.find();
+ cursor.maxTimeMS(60 * 1000);
+ assert.throws(function() {
+ cursor.next();
+ }, [], "expected query to fail in mongod due to maxTimeAlwaysTimeOut fail point");
+
+ // Negative test.
+ configureMaxTimeAlwaysTimeOut("off");
+ cursor = coll.find();
+ cursor.maxTimeMS(60 * 1000);
+ assert.doesNotThrow(function() {
+ cursor.next();
+ }, [], "expected query to not hit time limit in mongod");
+
+ //
+ // Test that mongos correctly times out max time sharded getmore operations. Uses
+ // maxTimeNeverTimeOut to ensure mongod doesn't enforce a time limit.
+ //
+ // TODO: This is unimplemented. A test for this functionality should be written as
+ // part of the work for SERVER-19410.
+ //
+
+ configureMaxTimeNeverTimeOut("alwaysOn");
+
+ // Positive test. TODO: see above.
+
+ // Negative test. ~10s operation, with a high (1-day) limit.
+ cursor = coll.find({
+ $where: function() {
+ sleep(100);
+ return true;
+ }
+ });
+ cursor.batchSize(2);
+ cursor.maxTimeMS(1000 * 60 * 60 * 24);
+ assert.doesNotThrow(function() {
+ cursor.next();
+ }, [], "did not expect mongos to time out first batch of query");
+ assert.doesNotThrow(function() {
+ cursor.itcount();
+ }, [], "did not expect getmore ops to hit the time limit");
+
+ configureMaxTimeNeverTimeOut("off");
+
+ //
+ // Test that mongos correctly forwards max time to shards for sharded commands. Uses
+ // maxTimeAlwaysTimeOut to ensure mongod throws if it receives a max time.
+ //
+
+ // Positive test for "validate".
+ configureMaxTimeAlwaysTimeOut("alwaysOn");
+ res = coll.runCommand("validate", {maxTimeMS: 60 * 1000});
+ assert.commandFailed(
+ res, "expected validate to fail in mongod due to maxTimeAlwaysTimeOut fail point");
+ assert.eq(res["code"],
+ exceededTimeLimit,
+ "expected code " + exceededTimeLimit + " from validate, instead got: " + tojson(res));
+
+ // Negative test for "validate".
+ configureMaxTimeAlwaysTimeOut("off");
+ assert.commandWorked(coll.runCommand("validate", {maxTimeMS: 60 * 1000}),
+ "expected validate to not hit time limit in mongod");
+
+ // Positive test for "count".
+ configureMaxTimeAlwaysTimeOut("alwaysOn");
+ res = coll.runCommand("count", {maxTimeMS: 60 * 1000});
+ assert.commandFailed(res,
+ "expected count to fail in mongod due to maxTimeAlwaysTimeOut fail point");
+ assert.eq(res["code"],
+ exceededTimeLimit,
+ "expected code " + exceededTimeLimit + " from count , instead got: " + tojson(res));
+
+ // Negative test for "count".
+ configureMaxTimeAlwaysTimeOut("off");
+ assert.commandWorked(coll.runCommand("count", {maxTimeMS: 60 * 1000}),
+ "expected count to not hit time limit in mongod");
+
+ // Positive test for "collStats".
+ configureMaxTimeAlwaysTimeOut("alwaysOn");
+ res = coll.runCommand("collStats", {maxTimeMS: 60 * 1000});
+ assert.commandFailed(
+ res, "expected collStats to fail in mongod due to maxTimeAlwaysTimeOut fail point");
+ assert.eq(
+ res["code"],
+ exceededTimeLimit,
+ "expected code " + exceededTimeLimit + " from collStats, instead got: " + tojson(res));
+
+ // Negative test for "collStats".
+ configureMaxTimeAlwaysTimeOut("off");
+ assert.commandWorked(coll.runCommand("collStats", {maxTimeMS: 60 * 1000}),
+ "expected collStats to not hit time limit in mongod");
+
+ // Positive test for "mapReduce".
+ configureMaxTimeAlwaysTimeOut("alwaysOn");
+ res = coll.runCommand("mapReduce",
+ {
+ map: function() {
+ emit(0, 0);
+ },
+ reduce: function(key, values) {
+ return 0;
+ },
+ out: {inline: 1},
+ maxTimeMS: 60 * 1000
+ });
+ assert.commandFailed(
+ res, "expected mapReduce to fail in mongod due to maxTimeAlwaysTimeOut fail point");
+ assert.eq(
+ res["code"],
+ exceededTimeLimit,
+ "expected code " + exceededTimeLimit + " from mapReduce, instead got: " + tojson(res));
+
+ // Negative test for "mapReduce".
+ configureMaxTimeAlwaysTimeOut("off");
+ assert.commandWorked(coll.runCommand("mapReduce",
+ {
+ map: function() {
+ emit(0, 0);
+ },
+ reduce: function(key, values) {
+ return 0;
+ },
+ out: {inline: 1},
+ maxTimeMS: 60 * 1000
+ }),
+ "expected mapReduce to not hit time limit in mongod");
+
+ // Positive test for "aggregate".
+ configureMaxTimeAlwaysTimeOut("alwaysOn");
+ res = coll.runCommand("aggregate", {pipeline: [], maxTimeMS: 60 * 1000});
+ assert.commandFailed(
+ res, "expected aggregate to fail in mongod due to maxTimeAlwaysTimeOut fail point");
+ assert.eq(
+ res["code"],
+ exceededTimeLimit,
+ "expected code " + exceededTimeLimit + " from aggregate , instead got: " + tojson(res));
+
+ // Negative test for "aggregate".
+ configureMaxTimeAlwaysTimeOut("off");
+ assert.commandWorked(coll.runCommand("aggregate", {pipeline: [], maxTimeMS: 60 * 1000}),
+ "expected aggregate to not hit time limit in mongod");
+
+ // Positive test for "moveChunk".
+ configureMaxTimeAlwaysTimeOut("alwaysOn");
+ res = admin.runCommand({
+ moveChunk: coll.getFullName(),
+ find: {_id: 0},
+ to: "shard0000",
+ maxTimeMS: 1000 * 60 * 60 * 24
+ });
+ assert.commandFailed(
+ res, "expected moveChunk to fail in mongod due to maxTimeAlwaysTimeOut fail point");
+ assert.eq(
+ res["code"],
+ exceededTimeLimit,
+ "expected code " + exceededTimeLimit + " from moveChunk, instead got: " + tojson(res));
+
+ // Negative test for "moveChunk".
+ configureMaxTimeAlwaysTimeOut("off");
+ assert.commandWorked(admin.runCommand({
+ moveChunk: coll.getFullName(),
+ find: {_id: 0},
+ to: "shard0000",
+ maxTimeMS: 1000 * 60 * 60 * 24
+ }),
+ "expected moveChunk to not hit time limit in mongod");
+
+ // TODO Test additional commmands.
+
+ st.stop();
})();
diff --git a/jstests/sharding/merge_chunks_basic.js b/jstests/sharding/merge_chunks_basic.js
index b8ad0040182..8cdcf05f61a 100644
--- a/jstests/sharding/merge_chunks_basic.js
+++ b/jstests/sharding/merge_chunks_basic.js
@@ -3,62 +3,58 @@
//
(function() {
-'use strict';
+ 'use strict';
-var st = new ShardingTest({mongos:1, shards:2});
+ var st = new ShardingTest({mongos: 1, shards: 2});
-var mongos = st.s0;
+ var mongos = st.s0;
-var kDbName = 'db';
+ var kDbName = 'db';
-var shards = mongos.getCollection('config.shards').find().toArray();
+ var shards = mongos.getCollection('config.shards').find().toArray();
-var shard0 = shards[0]._id;
-var shard1 = shards[1]._id;
+ var shard0 = shards[0]._id;
+ var shard1 = shards[1]._id;
-var ns = kDbName + ".foo";
+ var ns = kDbName + ".foo";
-assert.commandWorked(mongos.adminCommand({enableSharding : kDbName}));
+ assert.commandWorked(mongos.adminCommand({enableSharding: kDbName}));
-st.ensurePrimaryShard(kDbName, shard0);
+ st.ensurePrimaryShard(kDbName, shard0);
-// Fail if invalid namespace.
-assert.commandFailed(mongos.adminCommand({mergeChunks: '', bounds: [ {a: -1}, {a: 1} ]}));
+ // Fail if invalid namespace.
+ assert.commandFailed(mongos.adminCommand({mergeChunks: '', bounds: [{a: -1}, {a: 1}]}));
-// Fail if database does not exist.
-assert.commandFailed(mongos.adminCommand({mergeChunks: 'a.b', bounds: [ {a: -1}, {a: 1} ]}));
+ // Fail if database does not exist.
+ assert.commandFailed(mongos.adminCommand({mergeChunks: 'a.b', bounds: [{a: -1}, {a: 1}]}));
-// Fail if collection is unsharded.
-assert.commandFailed(mongos.adminCommand({mergeChunks: kDbName + '.xxx',
- bounds: [ {a: -1}, {a: 1} ]}));
+ // Fail if collection is unsharded.
+ assert.commandFailed(
+ mongos.adminCommand({mergeChunks: kDbName + '.xxx', bounds: [{a: -1}, {a: 1}]}));
-// Errors if either bounds is not a valid shard key.
-assert.eq(0, mongos.getDB('config').chunks.count({ns: ns}));
-assert.commandWorked(mongos.adminCommand({shardCollection: ns, key: {a: 1}}));
-assert.eq(1, mongos.getDB('config').chunks.count({ns: ns}));
-assert.commandWorked(mongos.adminCommand({split: ns, middle: {a: 0}}));
-assert.commandWorked(mongos.adminCommand({split: ns, middle: {a: -1}}));
-assert.commandWorked(mongos.adminCommand({split: ns, middle: {a: 1}}));
+ // Errors if either bounds is not a valid shard key.
+ assert.eq(0, mongos.getDB('config').chunks.count({ns: ns}));
+ assert.commandWorked(mongos.adminCommand({shardCollection: ns, key: {a: 1}}));
+ assert.eq(1, mongos.getDB('config').chunks.count({ns: ns}));
+ assert.commandWorked(mongos.adminCommand({split: ns, middle: {a: 0}}));
+ assert.commandWorked(mongos.adminCommand({split: ns, middle: {a: -1}}));
+ assert.commandWorked(mongos.adminCommand({split: ns, middle: {a: 1}}));
-
-assert.commandFailed(mongos.adminCommand({mergeChunks: ns,
- bounds: [ {x: -1}, {a: 1} ]}));
+ assert.commandFailed(mongos.adminCommand({mergeChunks: ns, bounds: [{x: -1}, {a: 1}]}));
+ // Fail if a wrong key.
+ assert.commandFailed(mongos.adminCommand({mergeChunks: ns, bounds: [{a: -1}, {x: 1}]}));
-// Fail if a wrong key.
-assert.commandFailed(mongos.adminCommand({mergeChunks: ns,
- bounds: [ {a: -1}, {x: 1} ]}));
+ // Fail if chunks do not contain a bound.
+ assert.commandFailed(mongos.adminCommand({mergeChunks: ns, bounds: [{a: -1}, {a: 10}]}));
-// Fail if chunks do not contain a bound.
-assert.commandFailed(mongos.adminCommand({mergeChunks: ns, bounds: [{a: -1}, {a: 10}]}));
+ // Validate metadata.
+ // There are four chunks [{$minKey, -1}, {-1, 0}, {0, 1}, {1, $maxKey}]
+ assert.eq(4, mongos.getDB('config').chunks.count({ns: ns}));
+ assert.commandWorked(mongos.adminCommand({mergeChunks: ns, bounds: [{a: -1}, {a: 1}]}));
+ assert.eq(3, mongos.getDB('config').chunks.count({ns: ns}));
+ assert.eq(1, mongos.getDB('config').chunks.count({ns: ns, min: {a: -1}, max: {a: 1}}));
-// Validate metadata.
-// There are four chunks [{$minKey, -1}, {-1, 0}, {0, 1}, {1, $maxKey}]
-assert.eq(4, mongos.getDB('config').chunks.count({ns: ns}));
-assert.commandWorked(mongos.adminCommand({mergeChunks: ns, bounds: [{a: -1}, {a: 1}]}));
-assert.eq(3, mongos.getDB('config').chunks.count({ns: ns}));
-assert.eq(1, mongos.getDB('config').chunks.count({ns: ns, min: {a: -1}, max: {a: 1}}));
-
-st.stop();
+ st.stop();
})();
diff --git a/jstests/sharding/merge_chunks_test.js b/jstests/sharding/merge_chunks_test.js
index 2a853bbd1d9..6b00a6532ea 100644
--- a/jstests/sharding/merge_chunks_test.js
+++ b/jstests/sharding/merge_chunks_test.js
@@ -2,121 +2,105 @@
// Tests that merging chunks via mongos works/doesn't work with different chunk configurations
//
(function() {
-'use strict';
+ 'use strict';
-var st = new ShardingTest({ shards : 2, mongos : 2 });
+ var st = new ShardingTest({shards: 2, mongos: 2});
-var mongos = st.s0;
-var staleMongos = st.s1;
-var admin = mongos.getDB( "admin" );
-var shards = mongos.getCollection( "config.shards" ).find().toArray();
-var coll = mongos.getCollection( "foo.bar" );
+ var mongos = st.s0;
+ var staleMongos = st.s1;
+ var admin = mongos.getDB("admin");
+ var shards = mongos.getCollection("config.shards").find().toArray();
+ var coll = mongos.getCollection("foo.bar");
-assert( admin.runCommand({ enableSharding : coll.getDB() + "" }).ok );
-printjson( admin.runCommand({ movePrimary : coll.getDB() + "", to : shards[0]._id }) );
-assert( admin.runCommand({ shardCollection : coll + "", key : { _id : 1 } }).ok );
+ assert(admin.runCommand({enableSharding: coll.getDB() + ""}).ok);
+ printjson(admin.runCommand({movePrimary: coll.getDB() + "", to: shards[0]._id}));
+ assert(admin.runCommand({shardCollection: coll + "", key: {_id: 1}}).ok);
-// Create ranges MIN->0,0->10,(hole),20->40,40->50,50->90,(hole),100->110,110->MAX on first shard
-jsTest.log( "Creating ranges..." );
+ // Create ranges MIN->0,0->10,(hole),20->40,40->50,50->90,(hole),100->110,110->MAX on first
+ // shard
+ jsTest.log("Creating ranges...");
-assert( admin.runCommand({ split : coll + "", middle : { _id : 0 } }).ok );
-assert( admin.runCommand({ split : coll + "", middle : { _id : 10 } }).ok );
-assert( admin.runCommand({ split : coll + "", middle : { _id : 20 } }).ok );
-assert( admin.runCommand({ split : coll + "", middle : { _id : 40 } }).ok );
-assert( admin.runCommand({ split : coll + "", middle : { _id : 50 } }).ok );
-assert( admin.runCommand({ split : coll + "", middle : { _id : 90 } }).ok );
-assert( admin.runCommand({ split : coll + "", middle : { _id : 100 } }).ok );
-assert( admin.runCommand({ split : coll + "", middle : { _id : 110 } }).ok );
+ assert(admin.runCommand({split: coll + "", middle: {_id: 0}}).ok);
+ assert(admin.runCommand({split: coll + "", middle: {_id: 10}}).ok);
+ assert(admin.runCommand({split: coll + "", middle: {_id: 20}}).ok);
+ assert(admin.runCommand({split: coll + "", middle: {_id: 40}}).ok);
+ assert(admin.runCommand({split: coll + "", middle: {_id: 50}}).ok);
+ assert(admin.runCommand({split: coll + "", middle: {_id: 90}}).ok);
+ assert(admin.runCommand({split: coll + "", middle: {_id: 100}}).ok);
+ assert(admin.runCommand({split: coll + "", middle: {_id: 110}}).ok);
-assert( admin.runCommand({ moveChunk : coll + "", find : { _id : 10 }, to : shards[1]._id }).ok );
-assert( admin.runCommand({ moveChunk : coll + "", find : { _id : 90 }, to : shards[1]._id }).ok );
+ assert(admin.runCommand({moveChunk: coll + "", find: {_id: 10}, to: shards[1]._id}).ok);
+ assert(admin.runCommand({moveChunk: coll + "", find: {_id: 90}, to: shards[1]._id}).ok);
-st.printShardingStatus();
+ st.printShardingStatus();
-// Insert some data into each of the consolidated ranges
-assert.writeOK(coll.insert({ _id : 0 }));
-assert.writeOK(coll.insert({ _id : 40 }));
-assert.writeOK(coll.insert({ _id : 110 }));
+ // Insert some data into each of the consolidated ranges
+ assert.writeOK(coll.insert({_id: 0}));
+ assert.writeOK(coll.insert({_id: 40}));
+ assert.writeOK(coll.insert({_id: 110}));
-var staleCollection = staleMongos.getCollection( coll + "" );
+ var staleCollection = staleMongos.getCollection(coll + "");
-jsTest.log( "Trying merges that should fail..." );
+ jsTest.log("Trying merges that should fail...");
-// S0: min->0, 0->10, 20->40, 40->50, 50->90, 100->110, 110->max
-// S1: 10->20, 90->100
+ // S0: min->0, 0->10, 20->40, 40->50, 50->90, 100->110, 110->max
+ // S1: 10->20, 90->100
-// Make sure merging non-exact chunks is invalid
+ // Make sure merging non-exact chunks is invalid
-assert( !admin.runCommand({ mergeChunks : coll + "",
- bounds : [{ _id : MinKey }, { _id : 5 }] }).ok );
-assert( !admin.runCommand({ mergeChunks : coll + "",
- bounds : [{ _id : 5 }, { _id : 10 }] }).ok );
-assert( !admin.runCommand({ mergeChunks : coll + "",
- bounds : [{ _id : 15 }, { _id : 50 }] }).ok );
-assert( !admin.runCommand({ mergeChunks : coll + "",
- bounds : [{ _id : 20 }, { _id : 55 }] }).ok );
-assert( !admin.runCommand({ mergeChunks : coll + "",
- bounds : [{ _id : 105 }, { _id : MaxKey }] }).ok );
+ assert(!admin.runCommand({mergeChunks: coll + "", bounds: [{_id: MinKey}, {_id: 5}]}).ok);
+ assert(!admin.runCommand({mergeChunks: coll + "", bounds: [{_id: 5}, {_id: 10}]}).ok);
+ assert(!admin.runCommand({mergeChunks: coll + "", bounds: [{_id: 15}, {_id: 50}]}).ok);
+ assert(!admin.runCommand({mergeChunks: coll + "", bounds: [{_id: 20}, {_id: 55}]}).ok);
+ assert(!admin.runCommand({mergeChunks: coll + "", bounds: [{_id: 105}, {_id: MaxKey}]}).ok);
-// Make sure merging single chunks is invalid
+ // Make sure merging single chunks is invalid
-assert( !admin.runCommand({ mergeChunks : coll + "",
- bounds : [{ _id : MinKey }, { _id : 0 }] }).ok );
-assert( !admin.runCommand({ mergeChunks : coll + "",
- bounds : [{ _id : 20 }, { _id : 40 }] }).ok );
-assert( !admin.runCommand({ mergeChunks : coll + "",
- bounds : [{ _id : 110 }, { _id : MaxKey }] }).ok );
+ assert(!admin.runCommand({mergeChunks: coll + "", bounds: [{_id: MinKey}, {_id: 0}]}).ok);
+ assert(!admin.runCommand({mergeChunks: coll + "", bounds: [{_id: 20}, {_id: 40}]}).ok);
+ assert(!admin.runCommand({mergeChunks: coll + "", bounds: [{_id: 110}, {_id: MaxKey}]}).ok);
-// Make sure merging over holes is invalid
+ // Make sure merging over holes is invalid
-assert( !admin.runCommand({ mergeChunks : coll + "",
- bounds : [{ _id : 0 }, { _id : 40 }] }).ok );
-assert( !admin.runCommand({ mergeChunks : coll + "",
- bounds : [{ _id : 40 }, { _id : 110 }] }).ok );
-assert( !admin.runCommand({ mergeChunks : coll + "",
- bounds : [{ _id : 50 }, { _id : 110 }] }).ok );
+ assert(!admin.runCommand({mergeChunks: coll + "", bounds: [{_id: 0}, {_id: 40}]}).ok);
+ assert(!admin.runCommand({mergeChunks: coll + "", bounds: [{_id: 40}, {_id: 110}]}).ok);
+ assert(!admin.runCommand({mergeChunks: coll + "", bounds: [{_id: 50}, {_id: 110}]}).ok);
-// Make sure merging between shards is invalid
+ // Make sure merging between shards is invalid
-assert( !admin.runCommand({ mergeChunks : coll + "",
- bounds : [{ _id : 0 }, { _id : 20 }] }).ok );
-assert( !admin.runCommand({ mergeChunks : coll + "",
- bounds : [{ _id : 10 }, { _id : 40 }] }).ok );
-assert( !admin.runCommand({ mergeChunks : coll + "",
- bounds : [{ _id : 40 }, { _id : 100 }] }).ok );
+ assert(!admin.runCommand({mergeChunks: coll + "", bounds: [{_id: 0}, {_id: 20}]}).ok);
+ assert(!admin.runCommand({mergeChunks: coll + "", bounds: [{_id: 10}, {_id: 40}]}).ok);
+ assert(!admin.runCommand({mergeChunks: coll + "", bounds: [{_id: 40}, {_id: 100}]}).ok);
-assert.eq( 3, staleCollection.find().itcount() );
+ assert.eq(3, staleCollection.find().itcount());
-jsTest.log( "Trying merges that should succeed..." );
+ jsTest.log("Trying merges that should succeed...");
-assert( admin.runCommand({ mergeChunks : coll + "",
- bounds : [{ _id : MinKey }, { _id : 10 }] }).ok );
+ assert(admin.runCommand({mergeChunks: coll + "", bounds: [{_id: MinKey}, {_id: 10}]}).ok);
-assert.eq( 3, staleCollection.find().itcount() );
+ assert.eq(3, staleCollection.find().itcount());
-// S0: min->10, 20->40, 40->50, 50->90, 100->110, 110->max
-// S1: 10->20, 90->100
+ // S0: min->10, 20->40, 40->50, 50->90, 100->110, 110->max
+ // S1: 10->20, 90->100
-// Make sure merging three chunks is valid.
+ // Make sure merging three chunks is valid.
-jsTest.log(tojson( admin.runCommand({ mergeChunks : coll + "",
- bounds : [{ _id : 20 }, { _id : 90 }] }) ));
+ jsTest.log(tojson(admin.runCommand({mergeChunks: coll + "", bounds: [{_id: 20}, {_id: 90}]})));
-// S0: min->10, 20->90, 100->110, 110->max
-// S1: 10->20, 90->100
+ // S0: min->10, 20->90, 100->110, 110->max
+ // S1: 10->20, 90->100
-assert.eq( 3, staleCollection.find().itcount() );
+ assert.eq(3, staleCollection.find().itcount());
-assert( admin.runCommand({ mergeChunks : coll + "",
- bounds : [{ _id : 100 }, { _id : MaxKey }] }).ok );
+ assert(admin.runCommand({mergeChunks: coll + "", bounds: [{_id: 100}, {_id: MaxKey}]}).ok);
-assert.eq( 3, staleCollection.find().itcount() );
+ assert.eq(3, staleCollection.find().itcount());
-// S0: min->10, 20->90, 100->max
-// S1: 10->20, 90->100
+ // S0: min->10, 20->90, 100->max
+ // S1: 10->20, 90->100
-st.printShardingStatus();
+ st.printShardingStatus();
-st.stop();
+ st.stop();
})();
diff --git a/jstests/sharding/merge_chunks_test_with_md_ops.js b/jstests/sharding/merge_chunks_test_with_md_ops.js
index ddcdfbaf0c2..591413a109c 100644
--- a/jstests/sharding/merge_chunks_test_with_md_ops.js
+++ b/jstests/sharding/merge_chunks_test_with_md_ops.js
@@ -1,57 +1,55 @@
// Tests that merging chunks does not prevent cluster from doing other metadata ops
(function() {
-'use strict';
+ 'use strict';
-var st = new ShardingTest({ shards: 2 });
+ var st = new ShardingTest({shards: 2});
-var mongos = st.s0;
-var admin = mongos.getDB("admin");
-var shards = mongos.getCollection("config.shards").find().toArray();
-var coll = mongos.getCollection("foo.bar");
+ var mongos = st.s0;
+ var admin = mongos.getDB("admin");
+ var shards = mongos.getCollection("config.shards").find().toArray();
+ var coll = mongos.getCollection("foo.bar");
-assert.commandWorked(admin.runCommand({ enableSharding: coll.getDB() + "" }));
-st.ensurePrimaryShard(coll.getDB() + "", shards[0]._id);
-assert.commandWorked(admin.runCommand({ shardCollection: coll + "", key: { _id: 1 } }));
+ assert.commandWorked(admin.runCommand({enableSharding: coll.getDB() + ""}));
+ st.ensurePrimaryShard(coll.getDB() + "", shards[0]._id);
+ assert.commandWorked(admin.runCommand({shardCollection: coll + "", key: {_id: 1}}));
-st.printShardingStatus();
+ st.printShardingStatus();
-// Split and merge the first chunk repeatedly
-jsTest.log("Splitting and merging repeatedly...");
+ // Split and merge the first chunk repeatedly
+ jsTest.log("Splitting and merging repeatedly...");
-for (var i = 0; i < 5; i++) {
- assert.commandWorked(admin.runCommand({ split: coll + "", middle: { _id: i } }));
- assert.commandWorked(admin.runCommand({ mergeChunks: coll + "",
- bounds: [ { _id: MinKey }, { _id: MaxKey } ] }));
- printjson(mongos.getDB("config").chunks.find().toArray());
-}
+ for (var i = 0; i < 5; i++) {
+ assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: i}}));
+ assert.commandWorked(
+ admin.runCommand({mergeChunks: coll + "", bounds: [{_id: MinKey}, {_id: MaxKey}]}));
+ printjson(mongos.getDB("config").chunks.find().toArray());
+ }
-// Move the first chunk to the other shard
-jsTest.log("Moving to another shard...");
+ // Move the first chunk to the other shard
+ jsTest.log("Moving to another shard...");
-assert.commandWorked(admin.runCommand({ moveChunk: coll + "",
- find: { _id: 0 },
- to: shards[1]._id }));
+ assert.commandWorked(
+ admin.runCommand({moveChunk: coll + "", find: {_id: 0}, to: shards[1]._id}));
-// Split and merge the chunk repeatedly
-jsTest.log("Splitting and merging repeatedly (again)...");
+ // Split and merge the chunk repeatedly
+ jsTest.log("Splitting and merging repeatedly (again)...");
-for (var i = 0; i < 5; i++) {
- assert.commandWorked(admin.runCommand({ split: coll + "", middle: { _id: i } }));
- assert.commandWorked(admin.runCommand({ mergeChunks: coll + "",
- bounds: [{ _id: MinKey }, { _id: MaxKey }] }));
- printjson(mongos.getDB("config").chunks.find().toArray());
-}
+ for (var i = 0; i < 5; i++) {
+ assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: i}}));
+ assert.commandWorked(
+ admin.runCommand({mergeChunks: coll + "", bounds: [{_id: MinKey}, {_id: MaxKey}]}));
+ printjson(mongos.getDB("config").chunks.find().toArray());
+ }
-// Move the chunk back to the original shard
-jsTest.log("Moving to original shard...");
+ // Move the chunk back to the original shard
+ jsTest.log("Moving to original shard...");
-assert.commandWorked(admin.runCommand({ moveChunk: coll + "",
- find: { _id: 0 },
- to: shards[0]._id }));
+ assert.commandWorked(
+ admin.runCommand({moveChunk: coll + "", find: {_id: 0}, to: shards[0]._id}));
-st.printShardingStatus();
+ st.printShardingStatus();
-st.stop();
+ st.stop();
})();
diff --git a/jstests/sharding/migrateBig.js b/jstests/sharding/migrateBig.js
index 73a292033bc..8895d14c0d6 100644
--- a/jstests/sharding/migrateBig.js
+++ b/jstests/sharding/migrateBig.js
@@ -1,80 +1,80 @@
(function() {
-var s = new ShardingTest({ name: "migrateBig",
- shards: 2,
- other: { chunkSize: 1 } });
+ var s = new ShardingTest({name: "migrateBig", shards: 2, other: {chunkSize: 1}});
-s.config.settings.update( { _id: "balancer" }, { $set : { _waitForDelete : true } } , true);
-s.adminCommand( { enablesharding : "test" } );
-s.ensurePrimaryShard('test', 'shard0001');
-s.adminCommand( { shardcollection : "test.foo" , key : { x : 1 } } );
+ s.config.settings.update({_id: "balancer"}, {$set: {_waitForDelete: true}}, true);
+ s.adminCommand({enablesharding: "test"});
+ s.ensurePrimaryShard('test', 'shard0001');
+ s.adminCommand({shardcollection: "test.foo", key: {x: 1}});
-db = s.getDB( "test" );
-coll = db.foo;
+ db = s.getDB("test");
+ coll = db.foo;
-big = "";
-while ( big.length < 10000 )
- big += "eliot";
+ big = "";
+ while (big.length < 10000)
+ big += "eliot";
-var bulk = coll.initializeUnorderedBulkOp();
-for ( x=0; x<100; x++ ) {
- bulk.insert( { x : x , big : big } );
-}
-assert.writeOK(bulk.execute());
-
-s.printShardingStatus();
-
-s.adminCommand( { split : "test.foo" , middle : { x : 30 } } );
-s.adminCommand( { split : "test.foo" , middle : { x : 66 } } );
-s.adminCommand( { movechunk : "test.foo" ,
- find : { x : 90 } ,
- to : s.getOther( s.getPrimaryShard( "test" ) ).name } );
-
-s.printShardingStatus();
+ var bulk = coll.initializeUnorderedBulkOp();
+ for (x = 0; x < 100; x++) {
+ bulk.insert({x: x, big: big});
+ }
+ assert.writeOK(bulk.execute());
-print( "YO : " + s.getPrimaryShard( "test" ).host );
-direct = new Mongo( s.getPrimaryShard( "test" ).host );
-print( "direct : " + direct );
+ s.printShardingStatus();
-directDB = direct.getDB( "test" );
+ s.adminCommand({split: "test.foo", middle: {x: 30}});
+ s.adminCommand({split: "test.foo", middle: {x: 66}});
+ s.adminCommand(
+ {movechunk: "test.foo", find: {x: 90}, to: s.getOther(s.getPrimaryShard("test")).name});
-for ( done=0; done<2*1024*1024; done+=big.length ){
- assert.writeOK(directDB.foo.insert( { x : 50 + Math.random() , big : big } ));
-}
+ s.printShardingStatus();
-s.printShardingStatus();
+ print("YO : " + s.getPrimaryShard("test").host);
+ direct = new Mongo(s.getPrimaryShard("test").host);
+ print("direct : " + direct);
-assert.throws( function(){
- s.adminCommand({ movechunk : "test.foo" ,
- find : { x : 50 } ,
- to : s.getOther( s.getPrimaryShard( "test" ) ).name });
-}, [], "move should fail" );
+ directDB = direct.getDB("test");
-for ( i=0; i<20; i+= 2 ) {
- try {
- s.adminCommand( { split : "test.foo" , middle : { x : i } } );
+ for (done = 0; done < 2 * 1024 * 1024; done += big.length) {
+ assert.writeOK(directDB.foo.insert({x: 50 + Math.random(), big: big}));
}
- catch ( e ) {
- // we may have auto split on some of these
- // which is ok
- print(e);
+
+ s.printShardingStatus();
+
+ assert.throws(function() {
+ s.adminCommand({
+ movechunk: "test.foo",
+ find: {x: 50},
+ to: s.getOther(s.getPrimaryShard("test")).name
+ });
+ }, [], "move should fail");
+
+ for (i = 0; i < 20; i += 2) {
+ try {
+ s.adminCommand({split: "test.foo", middle: {x: i}});
+ } catch (e) {
+ // we may have auto split on some of these
+ // which is ok
+ print(e);
+ }
}
-}
-s.printShardingStatus();
+ s.printShardingStatus();
-s.config.settings.update( { _id: "balancer" }, { $set : { stopped: false } } , true );
+ s.config.settings.update({_id: "balancer"}, {$set: {stopped: false}}, true);
-assert.soon( function(){
- var x = s.chunkDiff( "foo" , "test" );
- print( "chunk diff: " + x );
- return x < 2;
-}, "no balance happened" , 8 * 60 * 1000 , 2000 );
+ assert.soon(function() {
+ var x = s.chunkDiff("foo", "test");
+ print("chunk diff: " + x);
+ return x < 2;
+ }, "no balance happened", 8 * 60 * 1000, 2000);
-assert.soon( function(){ return !s.isAnyBalanceInFlight(); } );
+ assert.soon(function() {
+ return !s.isAnyBalanceInFlight();
+ });
-assert.eq( coll.count() , coll.find().itcount() );
+ assert.eq(coll.count(), coll.find().itcount());
-s.stop();
+ s.stop();
})();
diff --git a/jstests/sharding/migrateBig_balancer.js b/jstests/sharding/migrateBig_balancer.js
index 5512eb883db..cd44a225a62 100644
--- a/jstests/sharding/migrateBig_balancer.js
+++ b/jstests/sharding/migrateBig_balancer.js
@@ -1,68 +1,72 @@
(function() {
-var st = new ShardingTest({ name: 'migrateBig_balancer',
- shards: 2,
- other: { enableBalancer: true } });
-var mongos = st.s;
+ var st =
+ new ShardingTest({name: 'migrateBig_balancer', shards: 2, other: {enableBalancer: true}});
+ var mongos = st.s;
-var admin = mongos.getDB("admin");
-db = mongos.getDB("test");
-var coll = db.getCollection("stuff");
+ var admin = mongos.getDB("admin");
+ db = mongos.getDB("test");
+ var coll = db.getCollection("stuff");
-assert.commandWorked(admin.runCommand({ enablesharding : coll.getDB().getName() }));
-st.ensurePrimaryShard(coll.getDB().getName(), 'shard0001');
+ assert.commandWorked(admin.runCommand({enablesharding: coll.getDB().getName()}));
+ st.ensurePrimaryShard(coll.getDB().getName(), 'shard0001');
-var data = "x";
-var nsq = 16;
-var n = 255;
+ var data = "x";
+ var nsq = 16;
+ var n = 255;
-for( var i = 0; i < nsq; i++ ) data += data;
+ for (var i = 0; i < nsq; i++)
+ data += data;
-dataObj = {};
-for( var i = 0; i < n; i++ ) dataObj["data-" + i] = data;
+ dataObj = {};
+ for (var i = 0; i < n; i++)
+ dataObj["data-" + i] = data;
-var bulk = coll.initializeUnorderedBulkOp();
-for( var i = 0; i < 40; i++ ) {
- bulk.insert({ data: dataObj });
-}
-assert.writeOK(bulk.execute());
+ var bulk = coll.initializeUnorderedBulkOp();
+ for (var i = 0; i < 40; i++) {
+ bulk.insert({data: dataObj});
+ }
+ assert.writeOK(bulk.execute());
-assert.eq( 40 , coll.count() , "prep1" );
+ assert.eq(40, coll.count(), "prep1");
-printjson( coll.stats() );
+ printjson(coll.stats());
-admin.printShardingStatus();
+ admin.printShardingStatus();
-admin.runCommand({ shardcollection : "" + coll, key : { _id : 1 } });
+ admin.runCommand({shardcollection: "" + coll, key: {_id: 1}});
-assert.lt( 5 , mongos.getDB( "config" ).chunks.find( { ns : "test.stuff" } ).count() , "not enough chunks" );
+ assert.lt(
+ 5, mongos.getDB("config").chunks.find({ns: "test.stuff"}).count(), "not enough chunks");
-assert.soon(
- function() {
- // On *extremely* slow or variable systems, we've seen migrations fail in the critical section and
+ assert.soon(function() {
+ // On *extremely* slow or variable systems, we've seen migrations fail in the critical
+ // section and
// kill the server. Do an explicit check for this. SERVER-8781
// TODO: Remove once we can better specify what systems to run what tests on.
try {
- assert.commandWorked(st.shard0.getDB("admin").runCommand({ ping: 1 }));
- assert.commandWorked(st.shard1.getDB("admin").runCommand({ ping: 1 }));
- }
- catch(e) {
+ assert.commandWorked(st.shard0.getDB("admin").runCommand({ping: 1}));
+ assert.commandWorked(st.shard1.getDB("admin").runCommand({ping: 1}));
+ } catch (e) {
print("An error occurred contacting a shard during balancing," +
" this may be due to slow disk I/O, aborting test.");
throw e;
}
-
- res = mongos.getDB( "config" ).chunks.group( { cond : { ns : "test.stuff" } ,
- key : { shard : 1 } ,
- reduce : function( doc , out ){ out.nChunks++; } ,
- initial : { nChunks : 0 } } );
-
- printjson( res );
- return res.length > 1 && Math.abs( res[0].nChunks - res[1].nChunks ) <= 3;
-
- } ,
- "never migrated" , 10 * 60 * 1000 , 1000 );
-
-st.stop();
+
+ res = mongos.getDB("config").chunks.group({
+ cond: {ns: "test.stuff"},
+ key: {shard: 1},
+ reduce: function(doc, out) {
+ out.nChunks++;
+ },
+ initial: {nChunks: 0}
+ });
+
+ printjson(res);
+ return res.length > 1 && Math.abs(res[0].nChunks - res[1].nChunks) <= 3;
+
+ }, "never migrated", 10 * 60 * 1000, 1000);
+
+ st.stop();
})();
diff --git a/jstests/sharding/migrate_overwrite_id.js b/jstests/sharding/migrate_overwrite_id.js
index 26afd8258ac..1d5bc2f3236 100644
--- a/jstests/sharding/migrate_overwrite_id.js
+++ b/jstests/sharding/migrate_overwrite_id.js
@@ -2,40 +2,40 @@
// Tests that a migration does not overwrite duplicate _ids on data transfer
//
-var st = new ShardingTest({ shards : 2, mongos : 1 });
+var st = new ShardingTest({shards: 2, mongos: 1});
st.stopBalancer();
var mongos = st.s0;
-var shards = mongos.getDB( "config" ).shards.find().toArray();
+var shards = mongos.getDB("config").shards.find().toArray();
shards[0].conn = st.shard0;
shards[1].conn = st.shard1;
-var admin = mongos.getDB( "admin" );
-var coll = mongos.getCollection( "foo.bar" );
+var admin = mongos.getDB("admin");
+var coll = mongos.getCollection("foo.bar");
-assert( admin.runCommand({ enableSharding : coll.getDB() + "" }).ok );
-printjson( admin.runCommand({ movePrimary : coll.getDB() + "", to : shards[0]._id }) );
-assert( admin.runCommand({ shardCollection : coll + "", key : { skey : 1 } }).ok );
-assert( admin.runCommand({ split : coll + "", middle : { skey : 0 } }).ok );
-assert( admin.runCommand({ moveChunk : coll + "", find : { skey : 0 }, to : shards[1]._id }).ok );
+assert(admin.runCommand({enableSharding: coll.getDB() + ""}).ok);
+printjson(admin.runCommand({movePrimary: coll.getDB() + "", to: shards[0]._id}));
+assert(admin.runCommand({shardCollection: coll + "", key: {skey: 1}}).ok);
+assert(admin.runCommand({split: coll + "", middle: {skey: 0}}).ok);
+assert(admin.runCommand({moveChunk: coll + "", find: {skey: 0}, to: shards[1]._id}).ok);
var id = 12345;
-jsTest.log( "Inserting a document with id : 12345 into both shards with diff shard key..." );
+jsTest.log("Inserting a document with id : 12345 into both shards with diff shard key...");
-assert.writeOK(coll.insert({ _id : id, skey : -1 }));
-assert.writeOK(coll.insert({ _id : id, skey : 1 }));
+assert.writeOK(coll.insert({_id: id, skey: -1}));
+assert.writeOK(coll.insert({_id: id, skey: 1}));
-printjson( shards[0].conn.getCollection( coll + "" ).find({ _id : id }).toArray() );
-printjson( shards[1].conn.getCollection( coll + "" ).find({ _id : id }).toArray() );
-assert.eq( 2, coll.find({ _id : id }).itcount() );
+printjson(shards[0].conn.getCollection(coll + "").find({_id: id}).toArray());
+printjson(shards[1].conn.getCollection(coll + "").find({_id: id}).toArray());
+assert.eq(2, coll.find({_id: id}).itcount());
-jsTest.log( "Moving both chunks to same shard..." );
+jsTest.log("Moving both chunks to same shard...");
-var result = admin.runCommand({ moveChunk : coll + "", find : { skey : -1 }, to : shards[1]._id });
-printjson( result );
+var result = admin.runCommand({moveChunk: coll + "", find: {skey: -1}, to: shards[1]._id});
+printjson(result);
-printjson( shards[0].conn.getCollection( coll + "" ).find({ _id : id }).toArray() );
-printjson( shards[1].conn.getCollection( coll + "" ).find({ _id : id }).toArray() );
-assert.eq( 2, coll.find({ _id : id }).itcount() );
+printjson(shards[0].conn.getCollection(coll + "").find({_id: id}).toArray());
+printjson(shards[1].conn.getCollection(coll + "").find({_id: id}).toArray());
+assert.eq(2, coll.find({_id: id}).itcount());
st.stop();
diff --git a/jstests/sharding/migration_failure.js b/jstests/sharding/migration_failure.js
index 0b9c950908b..97ab7ddf967 100644
--- a/jstests/sharding/migration_failure.js
+++ b/jstests/sharding/migration_failure.js
@@ -1,20 +1,20 @@
//
-// Tests that migration failures before and after commit correctly roll back
+// Tests that migration failures before and after commit correctly roll back
// when possible
//
-var st = new ShardingTest({ shards : 2, mongos : 1 });
+var st = new ShardingTest({shards: 2, mongos: 1});
st.stopBalancer();
var mongos = st.s0;
-var admin = mongos.getDB( "admin" );
-var shards = mongos.getCollection( "config.shards" ).find().toArray();
-var coll = mongos.getCollection( "foo.bar" );
+var admin = mongos.getDB("admin");
+var shards = mongos.getCollection("config.shards").find().toArray();
+var coll = mongos.getCollection("foo.bar");
-assert( admin.runCommand({ enableSharding : coll.getDB() + "" }).ok );
-printjson( admin.runCommand({ movePrimary : coll.getDB() + "", to : shards[0]._id }) );
-assert( admin.runCommand({ shardCollection : coll + "", key : { _id : 1 } }).ok );
-assert( admin.runCommand({ split : coll + "", middle : { _id : 0 } }).ok );
+assert(admin.runCommand({enableSharding: coll.getDB() + ""}).ok);
+printjson(admin.runCommand({movePrimary: coll.getDB() + "", to: shards[0]._id}));
+assert(admin.runCommand({shardCollection: coll + "", key: {_id: 1}}).ok);
+assert(admin.runCommand({split: coll + "", middle: {_id: 0}}).ok);
st.printShardingStatus();
@@ -23,58 +23,46 @@ jsTest.log("Testing failed migrations...");
var version = null;
var failVersion = null;
-assert.commandWorked(
- st.shard0.getDB("admin").runCommand({
- configureFailPoint : 'failMigrationCommit', mode : 'alwaysOn' }));
+assert.commandWorked(st.shard0.getDB("admin").runCommand(
+ {configureFailPoint: 'failMigrationCommit', mode: 'alwaysOn'}));
-version = st.shard0.getDB("admin").runCommand({ getShardVersion : coll.toString() });
+version = st.shard0.getDB("admin").runCommand({getShardVersion: coll.toString()});
-assert.commandFailed( admin.runCommand({ moveChunk : coll + "",
- find : { _id : 0 },
- to : shards[1]._id }) );
+assert.commandFailed(admin.runCommand({moveChunk: coll + "", find: {_id: 0}, to: shards[1]._id}));
-failVersion = st.shard0.getDB("admin").runCommand({ getShardVersion : coll.toString() });
+failVersion = st.shard0.getDB("admin").runCommand({getShardVersion: coll.toString()});
-assert.commandWorked(
- st.shard0.getDB("admin").runCommand({
- configureFailPoint : 'failMigrationCommit', mode : 'off' }));
+assert.commandWorked(st.shard0.getDB("admin")
+ .runCommand({configureFailPoint: 'failMigrationCommit', mode: 'off'}));
-assert.commandWorked(
- st.shard0.getDB("admin").runCommand({
- configureFailPoint : 'failMigrationConfigWritePrepare', mode : 'alwaysOn' }));
+assert.commandWorked(st.shard0.getDB("admin").runCommand(
+ {configureFailPoint: 'failMigrationConfigWritePrepare', mode: 'alwaysOn'}));
-version = st.shard0.getDB("admin").runCommand({ getShardVersion : coll.toString() });
+version = st.shard0.getDB("admin").runCommand({getShardVersion: coll.toString()});
-assert.commandFailed( admin.runCommand({ moveChunk : coll + "",
- find : { _id : 0 },
- to : shards[1]._id }) );
+assert.commandFailed(admin.runCommand({moveChunk: coll + "", find: {_id: 0}, to: shards[1]._id}));
-failVersion = st.shard0.getDB("admin").runCommand({ getShardVersion : coll.toString() });
+failVersion = st.shard0.getDB("admin").runCommand({getShardVersion: coll.toString()});
assert.eq(version.global, failVersion.global);
-assert.commandWorked(
- st.shard0.getDB("admin").runCommand({
- configureFailPoint : 'failMigrationConfigWritePrepare', mode : 'off' }));
+assert.commandWorked(st.shard0.getDB("admin").runCommand(
+ {configureFailPoint: 'failMigrationConfigWritePrepare', mode: 'off'}));
-assert.commandWorked(
- st.shard0.getDB("admin").runCommand({
- configureFailPoint : 'failApplyChunkOps', mode : 'alwaysOn' }));
+assert.commandWorked(st.shard0.getDB("admin")
+ .runCommand({configureFailPoint: 'failApplyChunkOps', mode: 'alwaysOn'}));
-version = st.shard0.getDB("admin").runCommand({ getShardVersion : coll.toString() });
+version = st.shard0.getDB("admin").runCommand({getShardVersion: coll.toString()});
-assert.commandWorked( admin.runCommand({ moveChunk : coll + "",
- find : { _id : 0 },
- to : shards[1]._id }) );
+assert.commandWorked(admin.runCommand({moveChunk: coll + "", find: {_id: 0}, to: shards[1]._id}));
-failVersion = st.shard0.getDB("admin").runCommand({ getShardVersion : coll.toString() });
+failVersion = st.shard0.getDB("admin").runCommand({getShardVersion: coll.toString()});
assert.neq(version.global, failVersion.global);
-assert.commandWorked(
- st.shard0.getDB("admin").runCommand({
- configureFailPoint : 'failApplyChunkOps', mode : 'off' }));
+assert.commandWorked(st.shard0.getDB("admin")
+ .runCommand({configureFailPoint: 'failApplyChunkOps', mode: 'off'}));
-jsTest.log( "DONE!" );
+jsTest.log("DONE!");
st.stop();
diff --git a/jstests/sharding/migration_ignore_interrupts.js b/jstests/sharding/migration_ignore_interrupts.js
index 80724895810..04b92088b3b 100644
--- a/jstests/sharding/migration_ignore_interrupts.js
+++ b/jstests/sharding/migration_ignore_interrupts.js
@@ -7,331 +7,303 @@
load('./jstests/libs/chunk_manipulation_util.js');
(function() {
-"use strict";
-
-///////////////////////////////////////////////////////////////////////////////////////////////////
-// Starting setup
-///////////////////////////////////////////////////////////////////////////////////////////////////
-
-// Shard0:
-// coll1: [0, 10) [10, 20) [20, 30)
-// coll2: [0, 10) [10, 20)
-// Shard1:
-// Shard2:
-
-var staticMongod1 = MongoRunner.runMongod({}); // For startParallelOps.
-var staticMongod2 = MongoRunner.runMongod({}); // For startParallelOps.
-
-var st = new ShardingTest({ shards : 4, mongos : 1 });
-
-var mongos = st.s0,
- admin = mongos.getDB('admin'),
- shards = mongos.getCollection('config.shards').find().toArray(),
- dbName = "testDB",
- ns1 = dbName + ".foo",
- coll1 = mongos.getCollection(ns1),
- ns2 = dbName + ".baz",
- coll2 = mongos.getCollection(ns2),
- shard0 = st.shard0,
- shard1 = st.shard1,
- shard2 = st.shard2,
- shard0Coll1 = shard0.getCollection(ns1),
- shard0Coll2 = shard0.getCollection(ns2),
- shard1Coll1 = shard1.getCollection(ns1),
- shard1Coll2 = shard1.getCollection(ns2),
- shard2Coll1 = shard2.getCollection(ns1),
- shard2Coll2 = shard2.getCollection(ns2);
-
-assert.commandWorked(admin.runCommand({enableSharding: dbName}));
-st.ensurePrimaryShard(dbName, shards[0]._id);
-
-assert.commandWorked(admin.runCommand({shardCollection: ns1, key: {a: 1}}));
-assert.commandWorked(admin.runCommand({split: ns1, middle: {a: 10}}));
-assert.commandWorked(admin.runCommand({split: ns1, middle: {a: 20}}));
-assert.commandWorked(admin.runCommand({shardCollection: ns2, key: {a: 1}}));
-assert.commandWorked(admin.runCommand({split: ns2, middle: {a: 10}}));
-
-assert.writeOK(coll1.insert({a: 0}));
-assert.writeOK(coll1.insert({a: 10}));
-assert.writeOK(coll1.insert({a: 20}));
-assert.eq(3, shard0Coll1.count());
-assert.eq(3, coll1.count());
-assert.writeOK(coll2.insert({a: 0}));
-assert.writeOK(coll2.insert({a: 10}));
-assert.eq(2, shard0Coll2.count());
-assert.eq(2, coll2.count());
-
-///////////////////////////////////////////////////////////////////////////////////////////////////
-// 1. When a migration is in process from shard0 to shard1 on coll1, shard2 is unable to
-// start a migration with either shard in the following cases:
-// 1. coll1 shard2 to shard0 -- coll1 is already locked.
-// 2. coll1 shard2 to shard1 -- coll1 is already locked.
-// 3. coll1 shard1 to shard2 -- coll1 is already locked.
-// 4. coll2 shard2 to shard1 -- shard1 can't receive two chunks simultaneously.
-// 5. coll2 shard0 to shard2 -- shard0 can't send two chunks simultaneously.
-///////////////////////////////////////////////////////////////////////////////////////////////////
-
-// Shard0:
-// coll1: [0, 10)
-// coll2: [0, 10)
-// Shard1:
-// coll1: [20, 30)
-// Shard2:
-// coll1: [10, 20)
-// coll2: [10, 20)
-
-assert.commandWorked(admin.runCommand({moveChunk: ns1, find: {a: 10}, to: shards[2]._id}));
-assert.commandWorked(admin.runCommand({moveChunk: ns2, find: {a: 10}, to: shards[2]._id}));
-assert.commandWorked(admin.runCommand({moveChunk: ns1, find: {a: 20}, to: shards[1]._id}));
-assert.eq(1, shard0Coll1.count());
-assert.eq(1, shard0Coll2.count());
-assert.eq(1, shard1Coll1.count());
-assert.eq(0, shard1Coll2.count());
-assert.eq(1, shard2Coll1.count());
-assert.eq(1, shard2Coll2.count());
-
-// Start a migration between shard0 and shard1 on coll1 and then pause it
-pauseMigrateAtStep(shard1, migrateStepNames.deletedPriorDataInRange);
-var joinMoveChunk1 = moveChunkParallel(
- staticMongod1,
- st.s0.host,
- {a: 0},
- null,
- coll1.getFullName(),
- shards[1]._id);
-waitForMigrateStep(shard1, migrateStepNames.deletedPriorDataInRange);
-
-jsTest.log('Attempting to interrupt migration....');
-// Test 1.1
-assert.commandFailed(admin.runCommand({moveChunk: ns1, find: {a: 10}, to: shards[0]._id}),
- "(1.1) coll1 lock should have prevented simultaneous migrations in the collection.");
-// Test 1.2
-assert.commandFailed(admin.runCommand({moveChunk: ns1, find: {a: 10}, to: shards[1]._id}),
- "(1.2) coll1 lock should have prevented simultaneous migrations in the collection.");
-// Test 1.3
-assert.commandFailed(admin.runCommand({moveChunk: ns1, find: {a: 20}, to: shards[2]._id}),
- "(1.3) coll1 lock should have prevented simultaneous migrations in the collection.");
-// Test 1.4
-assert.commandFailed(admin.runCommand({moveChunk: ns2, find: {a: 10}, to: shards[1]._id}),
- "(1.4) A shard should not be able to be the recipient of two ongoing migrations");
-// Test 1.5
-assert.commandFailed(admin.runCommand({moveChunk: ns2, find: {a: 0}, to: shards[2]._id}),
- "(1.5) A shard should not be able to be the donor for two ongoing migrations.");
-
-// Finish migration
-unpauseMigrateAtStep(shard1, migrateStepNames.deletedPriorDataInRange);
-assert.doesNotThrow(function() {
- joinMoveChunk1();
-});
-assert.eq(0, shard0Coll1.count());
-assert.eq(2, shard1Coll1.count());
-
-// Reset setup
-assert.commandWorked(admin.runCommand({moveChunk: ns1, find: {a: 0}, to: shards[0]._id}));
-assert.commandWorked(admin.runCommand({moveChunk: ns1, find: {a: 20}, to: shards[0]._id}));
-assert.commandWorked(admin.runCommand({moveChunk: ns1, find: {a: 10}, to: shards[0]._id}));
-assert.commandWorked(admin.runCommand({moveChunk: ns2, find: {a: 10}, to: shards[0]._id}));
-assert.eq(3, shard0Coll1.count());
-assert.eq(2, shard0Coll2.count());
-assert.eq(0, shard1Coll1.count());
-assert.eq(0, shard1Coll2.count());
-assert.eq(0, shard2Coll1.count());
-assert.eq(0, shard2Coll2.count());
-
-///////////////////////////////////////////////////////////////////////////////////////////////////
-// 2. When a migration between shard0 and shard1 is about to enter the commit phase, a
-// commit command from shard2 (different migration session ID) is rejected.
-///////////////////////////////////////////////////////////////////////////////////////////////////
-
-// Shard0:
-// coll1: [0, 10) [10, 20) [20, 30)
-// coll2: [0, 10) [10, 20)
-// Shard1:
-// Shard2:
-
-// Start a migration between shard0 and shard1 on coll1, pause in steady state before commit
-pauseMoveChunkAtStep(shard0, moveChunkStepNames.reachedSteadyState);
-joinMoveChunk1 = moveChunkParallel(
- staticMongod1,
- st.s0.host,
- {a: 0},
- null,
- coll1.getFullName(),
- shards[1]._id);
-waitForMoveChunkStep(shard0, moveChunkStepNames.reachedSteadyState);
-
-jsTest.log('Sending false commit command....');
-assert.commandFailed(shard2.adminCommand(
- {'_recvChunkCommit': 1, 'sessionId': "fake-migration-session-id"}));
-
-jsTest.log("Checking migration recipient is still in steady state, waiting for commit....");
-var res = shard1.adminCommand('_recvChunkStatus');
-assert.commandWorked(res);
-assert.eq(true, res.state === "steady", "False commit command succeeded");
-
-// Finish migration
-unpauseMoveChunkAtStep(shard0, moveChunkStepNames.reachedSteadyState);
-assert.doesNotThrow(function() {
- joinMoveChunk1();
-});
-assert.eq(2, shard0Coll1.count());
-assert.eq(1, shard1Coll1.count());
-
-// Reset setup
-assert.commandWorked(admin.runCommand({moveChunk: ns1, find: {a: 0}, to: shards[0]._id}));
-assert.eq(3, shard0Coll1.count());
-assert.eq(2, shard0Coll2.count());
-assert.eq(0, shard1Coll1.count());
-assert.eq(0, shard1Coll2.count());
-assert.eq(0, shard2Coll1.count());
-assert.eq(0, shard2Coll2.count());
-
-///////////////////////////////////////////////////////////////////////////////////////////////////
-// 3. If a donor aborts a migration to a recipient, the recipient does not realize the
-// migration has been aborted, and the donor moves on to a new migration, the original
-// recipient will then fail to clone documents from the donor.
-///////////////////////////////////////////////////////////////////////////////////////////////////
-
-// Shard0:
-// coll1: [0, 10) [10, 20) [20, 30)
-// coll2: [0, 10) [10, 20)
-// Shard1:
-// Shard2:
-
-// Start coll1 migration to shard1: pause recipient after delete step, donor before interrupt check
-pauseMigrateAtStep(shard1, migrateStepNames.deletedPriorDataInRange);
-pauseMoveChunkAtStep(shard0, moveChunkStepNames.startedMoveChunk);
-joinMoveChunk1 = moveChunkParallel(
- staticMongod1,
- st.s0.host,
- {a: 0},
- null,
- coll1.getFullName(),
- shards[1]._id);
-waitForMigrateStep(shard1, migrateStepNames.deletedPriorDataInRange);
-
-// Abort migration on donor side, recipient is unaware
-var inProgressOps = admin.currentOp().inprog;
-var abortedMigration = false;
-for (var op in inProgressOps) {
- if (inProgressOps[op].query.moveChunk) {
- admin.killOp(inProgressOps[op].opid);
- abortedMigration = true;
+ "use strict";
+
+ ///////////////////////////////////////////////////////////////////////////////////////////////////
+ // Starting setup
+ ///////////////////////////////////////////////////////////////////////////////////////////////////
+
+ // Shard0:
+ // coll1: [0, 10) [10, 20) [20, 30)
+ // coll2: [0, 10) [10, 20)
+ // Shard1:
+ // Shard2:
+
+ var staticMongod1 = MongoRunner.runMongod({}); // For startParallelOps.
+ var staticMongod2 = MongoRunner.runMongod({}); // For startParallelOps.
+
+ var st = new ShardingTest({shards: 4, mongos: 1});
+
+ var mongos = st.s0, admin = mongos.getDB('admin'),
+ shards = mongos.getCollection('config.shards').find().toArray(), dbName = "testDB",
+ ns1 = dbName + ".foo", coll1 = mongos.getCollection(ns1), ns2 = dbName + ".baz",
+ coll2 = mongos.getCollection(ns2), shard0 = st.shard0, shard1 = st.shard1,
+ shard2 = st.shard2, shard0Coll1 = shard0.getCollection(ns1),
+ shard0Coll2 = shard0.getCollection(ns2), shard1Coll1 = shard1.getCollection(ns1),
+ shard1Coll2 = shard1.getCollection(ns2), shard2Coll1 = shard2.getCollection(ns1),
+ shard2Coll2 = shard2.getCollection(ns2);
+
+ assert.commandWorked(admin.runCommand({enableSharding: dbName}));
+ st.ensurePrimaryShard(dbName, shards[0]._id);
+
+ assert.commandWorked(admin.runCommand({shardCollection: ns1, key: {a: 1}}));
+ assert.commandWorked(admin.runCommand({split: ns1, middle: {a: 10}}));
+ assert.commandWorked(admin.runCommand({split: ns1, middle: {a: 20}}));
+ assert.commandWorked(admin.runCommand({shardCollection: ns2, key: {a: 1}}));
+ assert.commandWorked(admin.runCommand({split: ns2, middle: {a: 10}}));
+
+ assert.writeOK(coll1.insert({a: 0}));
+ assert.writeOK(coll1.insert({a: 10}));
+ assert.writeOK(coll1.insert({a: 20}));
+ assert.eq(3, shard0Coll1.count());
+ assert.eq(3, coll1.count());
+ assert.writeOK(coll2.insert({a: 0}));
+ assert.writeOK(coll2.insert({a: 10}));
+ assert.eq(2, shard0Coll2.count());
+ assert.eq(2, coll2.count());
+
+ ///////////////////////////////////////////////////////////////////////////////////////////////////
+ // 1. When a migration is in process from shard0 to shard1 on coll1, shard2 is unable to
+ // start a migration with either shard in the following cases:
+ // 1. coll1 shard2 to shard0 -- coll1 is already locked.
+ // 2. coll1 shard2 to shard1 -- coll1 is already locked.
+ // 3. coll1 shard1 to shard2 -- coll1 is already locked.
+ // 4. coll2 shard2 to shard1 -- shard1 can't receive two chunks simultaneously.
+ // 5. coll2 shard0 to shard2 -- shard0 can't send two chunks simultaneously.
+ ///////////////////////////////////////////////////////////////////////////////////////////////////
+
+ // Shard0:
+ // coll1: [0, 10)
+ // coll2: [0, 10)
+ // Shard1:
+ // coll1: [20, 30)
+ // Shard2:
+ // coll1: [10, 20)
+ // coll2: [10, 20)
+
+ assert.commandWorked(admin.runCommand({moveChunk: ns1, find: {a: 10}, to: shards[2]._id}));
+ assert.commandWorked(admin.runCommand({moveChunk: ns2, find: {a: 10}, to: shards[2]._id}));
+ assert.commandWorked(admin.runCommand({moveChunk: ns1, find: {a: 20}, to: shards[1]._id}));
+ assert.eq(1, shard0Coll1.count());
+ assert.eq(1, shard0Coll2.count());
+ assert.eq(1, shard1Coll1.count());
+ assert.eq(0, shard1Coll2.count());
+ assert.eq(1, shard2Coll1.count());
+ assert.eq(1, shard2Coll2.count());
+
+ // Start a migration between shard0 and shard1 on coll1 and then pause it
+ pauseMigrateAtStep(shard1, migrateStepNames.deletedPriorDataInRange);
+ var joinMoveChunk1 = moveChunkParallel(
+ staticMongod1, st.s0.host, {a: 0}, null, coll1.getFullName(), shards[1]._id);
+ waitForMigrateStep(shard1, migrateStepNames.deletedPriorDataInRange);
+
+ jsTest.log('Attempting to interrupt migration....');
+ // Test 1.1
+ assert.commandFailed(
+ admin.runCommand({moveChunk: ns1, find: {a: 10}, to: shards[0]._id}),
+ "(1.1) coll1 lock should have prevented simultaneous migrations in the collection.");
+ // Test 1.2
+ assert.commandFailed(
+ admin.runCommand({moveChunk: ns1, find: {a: 10}, to: shards[1]._id}),
+ "(1.2) coll1 lock should have prevented simultaneous migrations in the collection.");
+ // Test 1.3
+ assert.commandFailed(
+ admin.runCommand({moveChunk: ns1, find: {a: 20}, to: shards[2]._id}),
+ "(1.3) coll1 lock should have prevented simultaneous migrations in the collection.");
+ // Test 1.4
+ assert.commandFailed(
+ admin.runCommand({moveChunk: ns2, find: {a: 10}, to: shards[1]._id}),
+ "(1.4) A shard should not be able to be the recipient of two ongoing migrations");
+ // Test 1.5
+ assert.commandFailed(
+ admin.runCommand({moveChunk: ns2, find: {a: 0}, to: shards[2]._id}),
+ "(1.5) A shard should not be able to be the donor for two ongoing migrations.");
+
+ // Finish migration
+ unpauseMigrateAtStep(shard1, migrateStepNames.deletedPriorDataInRange);
+ assert.doesNotThrow(function() {
+ joinMoveChunk1();
+ });
+ assert.eq(0, shard0Coll1.count());
+ assert.eq(2, shard1Coll1.count());
+
+ // Reset setup
+ assert.commandWorked(admin.runCommand({moveChunk: ns1, find: {a: 0}, to: shards[0]._id}));
+ assert.commandWorked(admin.runCommand({moveChunk: ns1, find: {a: 20}, to: shards[0]._id}));
+ assert.commandWorked(admin.runCommand({moveChunk: ns1, find: {a: 10}, to: shards[0]._id}));
+ assert.commandWorked(admin.runCommand({moveChunk: ns2, find: {a: 10}, to: shards[0]._id}));
+ assert.eq(3, shard0Coll1.count());
+ assert.eq(2, shard0Coll2.count());
+ assert.eq(0, shard1Coll1.count());
+ assert.eq(0, shard1Coll2.count());
+ assert.eq(0, shard2Coll1.count());
+ assert.eq(0, shard2Coll2.count());
+
+ ///////////////////////////////////////////////////////////////////////////////////////////////////
+ // 2. When a migration between shard0 and shard1 is about to enter the commit phase, a
+ // commit command from shard2 (different migration session ID) is rejected.
+ ///////////////////////////////////////////////////////////////////////////////////////////////////
+
+ // Shard0:
+ // coll1: [0, 10) [10, 20) [20, 30)
+ // coll2: [0, 10) [10, 20)
+ // Shard1:
+ // Shard2:
+
+ // Start a migration between shard0 and shard1 on coll1, pause in steady state before commit
+ pauseMoveChunkAtStep(shard0, moveChunkStepNames.reachedSteadyState);
+ joinMoveChunk1 = moveChunkParallel(
+ staticMongod1, st.s0.host, {a: 0}, null, coll1.getFullName(), shards[1]._id);
+ waitForMoveChunkStep(shard0, moveChunkStepNames.reachedSteadyState);
+
+ jsTest.log('Sending false commit command....');
+ assert.commandFailed(
+ shard2.adminCommand({'_recvChunkCommit': 1, 'sessionId': "fake-migration-session-id"}));
+
+ jsTest.log("Checking migration recipient is still in steady state, waiting for commit....");
+ var res = shard1.adminCommand('_recvChunkStatus');
+ assert.commandWorked(res);
+ assert.eq(true, res.state === "steady", "False commit command succeeded");
+
+ // Finish migration
+ unpauseMoveChunkAtStep(shard0, moveChunkStepNames.reachedSteadyState);
+ assert.doesNotThrow(function() {
+ joinMoveChunk1();
+ });
+ assert.eq(2, shard0Coll1.count());
+ assert.eq(1, shard1Coll1.count());
+
+ // Reset setup
+ assert.commandWorked(admin.runCommand({moveChunk: ns1, find: {a: 0}, to: shards[0]._id}));
+ assert.eq(3, shard0Coll1.count());
+ assert.eq(2, shard0Coll2.count());
+ assert.eq(0, shard1Coll1.count());
+ assert.eq(0, shard1Coll2.count());
+ assert.eq(0, shard2Coll1.count());
+ assert.eq(0, shard2Coll2.count());
+
+ ///////////////////////////////////////////////////////////////////////////////////////////////////
+ // 3. If a donor aborts a migration to a recipient, the recipient does not realize the
+ // migration has been aborted, and the donor moves on to a new migration, the original
+ // recipient will then fail to clone documents from the donor.
+ ///////////////////////////////////////////////////////////////////////////////////////////////////
+
+ // Shard0:
+ // coll1: [0, 10) [10, 20) [20, 30)
+ // coll2: [0, 10) [10, 20)
+ // Shard1:
+ // Shard2:
+
+ // Start coll1 migration to shard1: pause recipient after delete step, donor before interrupt
+ // check
+ pauseMigrateAtStep(shard1, migrateStepNames.deletedPriorDataInRange);
+ pauseMoveChunkAtStep(shard0, moveChunkStepNames.startedMoveChunk);
+ joinMoveChunk1 = moveChunkParallel(
+ staticMongod1, st.s0.host, {a: 0}, null, coll1.getFullName(), shards[1]._id);
+ waitForMigrateStep(shard1, migrateStepNames.deletedPriorDataInRange);
+
+ // Abort migration on donor side, recipient is unaware
+ var inProgressOps = admin.currentOp().inprog;
+ var abortedMigration = false;
+ for (var op in inProgressOps) {
+ if (inProgressOps[op].query.moveChunk) {
+ admin.killOp(inProgressOps[op].opid);
+ abortedMigration = true;
+ }
}
-}
-assert.eq(true, abortedMigration, "Failed to abort migration, current running ops: " +
- tojson(inProgressOps));
-unpauseMoveChunkAtStep(shard0, moveChunkStepNames.startedMoveChunk);
-assert.throws(function() {
- joinMoveChunk1();
-});
-
-// Start coll2 migration to shard2, pause recipient after delete step
-pauseMigrateAtStep(shard2, migrateStepNames.deletedPriorDataInRange);
-var joinMoveChunk2 = moveChunkParallel(
- staticMongod2,
- st.s0.host,
- {a: 0},
- null,
- coll2.getFullName(),
- shards[2]._id);
-waitForMigrateStep(shard2, migrateStepNames.deletedPriorDataInRange);
-
-jsTest.log('Releasing coll1 migration recipient, whose clone command should fail....');
-unpauseMigrateAtStep(shard1, migrateStepNames.deletedPriorDataInRange);
-assert.eq(3, shard0Coll1.count(), "donor shard0 completed a migration that it aborted");
-assert.eq(0, shard1Coll1.count(), "shard1 cloned documents despite donor migration abortion");
-
-jsTest.log('Finishing coll2 migration, which should succeed....');
-unpauseMigrateAtStep(shard2, migrateStepNames.deletedPriorDataInRange);
-assert.doesNotThrow(function() {
- joinMoveChunk2();
-});
-assert.eq(1, shard0Coll2.count(), "donor shard0 failed to complete a migration " +
- "after aborting a prior migration");
-assert.eq(1, shard2Coll2.count(), "shard2 failed to complete migration");
-
-// Reset setup
-assert.commandWorked(admin.runCommand({moveChunk: ns2, find: {a: 0}, to: shards[0]._id}));
-assert.eq(3, shard0Coll1.count());
-assert.eq(2, shard0Coll2.count());
-assert.eq(0, shard1Coll1.count());
-assert.eq(0, shard1Coll2.count());
-assert.eq(0, shard2Coll1.count());
-assert.eq(0, shard2Coll2.count());
-
-///////////////////////////////////////////////////////////////////////////////////////////////////
-// 4. If a donor aborts a migration to a recipient, the recipient does not realize the
-// migration has been aborted, and the donor moves on to a new migration, the original
-// recipient will then fail to retrieve transferMods from the donor's xfermods log.
-///////////////////////////////////////////////////////////////////////////////////////////////////
-
-// Shard0:
-// coll1: [0, 10) [10, 20) [20, 30)
-// coll2: [0, 10) [10, 20)
-// Shard1:
-// Shard2:
-
-// Start coll1 migration to shard1: pause recipient after cloning, donor before interrupt check
-pauseMigrateAtStep(shard1, migrateStepNames.cloned);
-pauseMoveChunkAtStep(shard0, moveChunkStepNames.startedMoveChunk);
-joinMoveChunk1 = moveChunkParallel(
- staticMongod1,
- st.s0.host,
- {a: 0},
- null,
- coll1.getFullName(),
- shards[1]._id);
-waitForMigrateStep(shard1, migrateStepNames.cloned);
-
-// Abort migration on donor side, recipient is unaware
-inProgressOps = admin.currentOp().inprog;
-abortedMigration = false;
-for (var op in inProgressOps) {
- if (inProgressOps[op].query.moveChunk) {
- admin.killOp(inProgressOps[op].opid);
- abortedMigration = true;
+ assert.eq(true,
+ abortedMigration,
+ "Failed to abort migration, current running ops: " + tojson(inProgressOps));
+ unpauseMoveChunkAtStep(shard0, moveChunkStepNames.startedMoveChunk);
+ assert.throws(function() {
+ joinMoveChunk1();
+ });
+
+ // Start coll2 migration to shard2, pause recipient after delete step
+ pauseMigrateAtStep(shard2, migrateStepNames.deletedPriorDataInRange);
+ var joinMoveChunk2 = moveChunkParallel(
+ staticMongod2, st.s0.host, {a: 0}, null, coll2.getFullName(), shards[2]._id);
+ waitForMigrateStep(shard2, migrateStepNames.deletedPriorDataInRange);
+
+ jsTest.log('Releasing coll1 migration recipient, whose clone command should fail....');
+ unpauseMigrateAtStep(shard1, migrateStepNames.deletedPriorDataInRange);
+ assert.eq(3, shard0Coll1.count(), "donor shard0 completed a migration that it aborted");
+ assert.eq(0, shard1Coll1.count(), "shard1 cloned documents despite donor migration abortion");
+
+ jsTest.log('Finishing coll2 migration, which should succeed....');
+ unpauseMigrateAtStep(shard2, migrateStepNames.deletedPriorDataInRange);
+ assert.doesNotThrow(function() {
+ joinMoveChunk2();
+ });
+ assert.eq(1,
+ shard0Coll2.count(),
+ "donor shard0 failed to complete a migration " + "after aborting a prior migration");
+ assert.eq(1, shard2Coll2.count(), "shard2 failed to complete migration");
+
+ // Reset setup
+ assert.commandWorked(admin.runCommand({moveChunk: ns2, find: {a: 0}, to: shards[0]._id}));
+ assert.eq(3, shard0Coll1.count());
+ assert.eq(2, shard0Coll2.count());
+ assert.eq(0, shard1Coll1.count());
+ assert.eq(0, shard1Coll2.count());
+ assert.eq(0, shard2Coll1.count());
+ assert.eq(0, shard2Coll2.count());
+
+ ///////////////////////////////////////////////////////////////////////////////////////////////////
+ // 4. If a donor aborts a migration to a recipient, the recipient does not realize the
+ // migration has been aborted, and the donor moves on to a new migration, the original
+ // recipient will then fail to retrieve transferMods from the donor's xfermods log.
+ ///////////////////////////////////////////////////////////////////////////////////////////////////
+
+ // Shard0:
+ // coll1: [0, 10) [10, 20) [20, 30)
+ // coll2: [0, 10) [10, 20)
+ // Shard1:
+ // Shard2:
+
+ // Start coll1 migration to shard1: pause recipient after cloning, donor before interrupt check
+ pauseMigrateAtStep(shard1, migrateStepNames.cloned);
+ pauseMoveChunkAtStep(shard0, moveChunkStepNames.startedMoveChunk);
+ joinMoveChunk1 = moveChunkParallel(
+ staticMongod1, st.s0.host, {a: 0}, null, coll1.getFullName(), shards[1]._id);
+ waitForMigrateStep(shard1, migrateStepNames.cloned);
+
+ // Abort migration on donor side, recipient is unaware
+ inProgressOps = admin.currentOp().inprog;
+ abortedMigration = false;
+ for (var op in inProgressOps) {
+ if (inProgressOps[op].query.moveChunk) {
+ admin.killOp(inProgressOps[op].opid);
+ abortedMigration = true;
+ }
}
-}
-assert.eq(true, abortedMigration, "Failed to abort migration, current running ops: " +
- tojson(inProgressOps));
-unpauseMoveChunkAtStep(shard0, moveChunkStepNames.startedMoveChunk);
-assert.throws(function() {
- joinMoveChunk1();
-});
-
-// Start coll2 migration to shard2, pause recipient after cloning step
-pauseMigrateAtStep(shard2, migrateStepNames.cloned);
-var joinMoveChunk2 = moveChunkParallel(
- staticMongod2,
- st.s0.host,
- {a: 0},
- null,
- coll2.getFullName(),
- shards[2]._id);
-waitForMigrateStep(shard2, migrateStepNames.cloned);
-
-// Populate donor (shard0) xfermods log.
-assert.writeOK(coll2.insert({a: 1}));
-assert.writeOK(coll2.insert({a: 2}));
-assert.eq(4, coll2.count(), "Failed to insert documents into coll2");
-assert.eq(4, shard0Coll2.count());
-
-jsTest.log('Releasing coll1 migration recipient, whose transferMods command should fail....');
-unpauseMigrateAtStep(shard1, migrateStepNames.cloned);
-assert.eq(3, shard0Coll1.count(), "donor shard0 completed a migration that it aborted");
-assert.eq(1, shard1Coll1.count(), "shard1 accessed the xfermods log despite " +
- "donor migration abortion");
-
-jsTest.log('Finishing coll2 migration, which should succeed....');
-unpauseMigrateAtStep(shard2, migrateStepNames.cloned);
-assert.doesNotThrow(function() {
- joinMoveChunk2();
-});
-assert.eq(1, shard0Coll2.count(), "donor shard0 failed to complete a migration " +
- "after aborting a prior migration");
-assert.eq(3, shard2Coll2.count(), "shard2 failed to complete migration");
-
-st.stop();
+ assert.eq(true,
+ abortedMigration,
+ "Failed to abort migration, current running ops: " + tojson(inProgressOps));
+ unpauseMoveChunkAtStep(shard0, moveChunkStepNames.startedMoveChunk);
+ assert.throws(function() {
+ joinMoveChunk1();
+ });
+
+ // Start coll2 migration to shard2, pause recipient after cloning step
+ pauseMigrateAtStep(shard2, migrateStepNames.cloned);
+ var joinMoveChunk2 = moveChunkParallel(
+ staticMongod2, st.s0.host, {a: 0}, null, coll2.getFullName(), shards[2]._id);
+ waitForMigrateStep(shard2, migrateStepNames.cloned);
+
+ // Populate donor (shard0) xfermods log.
+ assert.writeOK(coll2.insert({a: 1}));
+ assert.writeOK(coll2.insert({a: 2}));
+ assert.eq(4, coll2.count(), "Failed to insert documents into coll2");
+ assert.eq(4, shard0Coll2.count());
+
+ jsTest.log('Releasing coll1 migration recipient, whose transferMods command should fail....');
+ unpauseMigrateAtStep(shard1, migrateStepNames.cloned);
+ assert.eq(3, shard0Coll1.count(), "donor shard0 completed a migration that it aborted");
+ assert.eq(1,
+ shard1Coll1.count(),
+ "shard1 accessed the xfermods log despite " + "donor migration abortion");
+
+ jsTest.log('Finishing coll2 migration, which should succeed....');
+ unpauseMigrateAtStep(shard2, migrateStepNames.cloned);
+ assert.doesNotThrow(function() {
+ joinMoveChunk2();
+ });
+ assert.eq(1,
+ shard0Coll2.count(),
+ "donor shard0 failed to complete a migration " + "after aborting a prior migration");
+ assert.eq(3, shard2Coll2.count(), "shard2 failed to complete migration");
+
+ st.stop();
})();
diff --git a/jstests/sharding/migration_sets_fromMigrate_flag.js b/jstests/sharding/migration_sets_fromMigrate_flag.js
index c9143aac67c..55dbca8b5fa 100644
--- a/jstests/sharding/migration_sets_fromMigrate_flag.js
+++ b/jstests/sharding/migration_sets_fromMigrate_flag.js
@@ -17,150 +17,151 @@
load('./jstests/libs/chunk_manipulation_util.js');
(function() {
-"use strict";
-
-var staticMongod = MongoRunner.runMongod({}); // For startParallelOps.
-
-/**
- * Start up new sharded cluster, stop balancer that would interfere in manual chunk management.
- */
-
-var st = new ShardingTest({ shards : 2, mongos : 1, rs : { nodes : 3 } });
-st.stopBalancer();
-
-var mongos = st.s0,
- admin = mongos.getDB('admin'),
- shards = mongos.getCollection('config.shards').find().toArray(),
- dbName = "testDB",
- ns = dbName + ".foo",
- coll = mongos.getCollection(ns),
- donor = st.shard0,
- recipient = st.shard1,
- donorColl = donor.getCollection(ns),
- recipientColl = recipient.getCollection(ns),
- donorLocal = donor.getDB('local'),
- recipientLocal = recipient.getDB('local');
-
-// Two chunks
-// Donor: [0, 2) [2, 5)
-// Recipient:
-jsTest.log('Enable sharding of the collection and pre-split into two chunks....');
-
-assert.commandWorked(admin.runCommand({enableSharding: dbName}));
-st.ensurePrimaryShard(dbName, shards[0]._id);
-assert.commandWorked(donorColl.createIndex({_id: 1}));
-assert.commandWorked(admin.runCommand({shardCollection: ns, key: {_id: 1}}));
-assert.commandWorked(admin.runCommand({split: ns, middle: {_id: 2}}));
-
-// 6 documents,
-// donor: 2 in the first chunk, 3 in the second.
-// recipient: 1 document (shardkey overlaps with a doc in second chunk of donor shard)
-jsTest.log('Inserting 5 docs into donor shard, 1 doc into the recipient shard....');
-
-for (var i = 0; i < 5; ++i)
- assert.writeOK(coll.insert({_id: i}));
-assert.eq(5, donorColl.count());
-
-for (var i = 2; i < 3; ++i)
- assert.writeOK(recipientColl.insert({_id: i}));
-assert.eq(1, recipientColl.count());
-
-/**
- * Set failpoint: recipient will pause migration after cloning chunk data from donor,
- * before checking transfer mods log on donor.
- */
-
-jsTest.log('setting recipient failpoint cloned');
-pauseMigrateAtStep(recipient, migrateStepNames.cloned);
-
-/**
- * Start moving chunk [2, 5) from donor shard to recipient shard, run in the background.
- */
-
-// Donor: [0, 2)
-// Recipient: [2, 5)
-jsTest.log('Starting chunk migration, pause after cloning...');
-
-var joinMoveChunk = moveChunkParallel(
- staticMongod,
- st.s0.host,
- {_id: 2},
- null,
- coll.getFullName(),
- shards[1]._id);
-
-/**
- * Wait for recipient to finish cloning.
- * THEN update 1 document {_id: 3} on donor within the currently migrating chunk.
- * AND delete 1 document {_id: 4} on donor within the currently migrating chunk.
- */
-
-waitForMigrateStep(recipient, migrateStepNames.cloned);
-
-jsTest.log('Update 1 doc and delete 1 doc on donor within the currently migrating chunk...');
-
-assert.writeOK(coll.update({_id: 3}, {_id: 3, a: "updated doc"}));
-assert.writeOK(coll.remove({_id: 4}));
-
-/**
- * Finish migration. Unpause recipient migration, wait for it to collect
- * the transfer mods log from donor and finish migration.
- */
-
-jsTest.log('Continuing and finishing migration...');
-unpauseMigrateAtStep(recipient, migrateStepNames.cloned);
-joinMoveChunk();
-
-/**
- * Check documents are where they should be: 2 docs in donor chunk, 2 docs in recipient chunk
- * (because third doc in recipient shard's chunk got deleted on the donor shard during migration).
- */
-
-jsTest.log('Checking that documents are on the shards they should be...');
-
-assert.eq(2, recipientColl.count(), "Recipient shard doesn't have exactly 2 documents!");
-assert.eq(2, donorColl.count(), "Donor shard doesn't have exactly 2 documents!");
-assert.eq(4, coll.count(), "Collection total is not 4!");
-
-/**
- * Check that the fromMigrate flag has been set correctly in donor and recipient oplogs,
- */
-
-jsTest.log('Checking donor and recipient oplogs for correct fromMigrate flags...');
-
-var donorOplogRes = donorLocal.oplog.rs.find(
- {op: 'd', fromMigrate: true, 'o._id': 2}).count();
-assert.eq(1, donorOplogRes, "fromMigrate flag wasn't set on the donor shard's oplog for " +
- "migrating delete op on {_id: 2}! Test #2 failed.");
-
-donorOplogRes = donorLocal.oplog.rs.find(
- {op: 'd', fromMigrate: {$exists: false}, 'o._id': 4}).count();
-assert.eq(1, donorOplogRes, "Real delete of {_id: 4} on donor shard incorrectly set the " +
- "fromMigrate flag in the oplog! Test #5 failed.");
-
-var recipientOplogRes = recipientLocal.oplog.rs.find(
- {op: 'i', fromMigrate: true, 'o._id': 2}).count();
-assert.eq(1, recipientOplogRes, "fromMigrate flag wasn't set on the recipient shard's " +
- "oplog for migrating insert op on {_id: 2}! Test #3 failed.");
-
-recipientOplogRes = recipientLocal.oplog.rs.find(
- {op: 'd', fromMigrate: true, 'o._id': 2}).count();
-assert.eq(1, recipientOplogRes, "fromMigrate flag wasn't set on the recipient shard's " +
- "oplog for delete op on the old {_id: 2} that overlapped " +
- "with the chunk about to be copied! Test #1 failed.");
-
-recipientOplogRes = recipientLocal.oplog.rs.find(
- {op: 'u', fromMigrate: true, 'o._id': 3}).count();
-assert.eq(1, recipientOplogRes, "fromMigrate flag wasn't set on the recipient shard's " +
- "oplog for update op on {_id: 3}! Test #4 failed.");
-
-recipientOplogRes = recipientLocal.oplog.rs.find(
- {op: 'd', fromMigrate: true, 'o._id': 4}).count();
-assert.eq(1, recipientOplogRes, "fromMigrate flag wasn't set on the recipient shard's " +
- "oplog for delete op on {_id: 4} that occurred during " +
- "migration! Test #5 failed.");
-
-jsTest.log('DONE!');
-st.stop();
+ "use strict";
+
+ var staticMongod = MongoRunner.runMongod({}); // For startParallelOps.
+
+ /**
+ * Start up new sharded cluster, stop balancer that would interfere in manual chunk management.
+ */
+
+ var st = new ShardingTest({shards: 2, mongos: 1, rs: {nodes: 3}});
+ st.stopBalancer();
+
+ var mongos = st.s0, admin = mongos.getDB('admin'),
+ shards = mongos.getCollection('config.shards').find().toArray(), dbName = "testDB",
+ ns = dbName + ".foo", coll = mongos.getCollection(ns), donor = st.shard0,
+ recipient = st.shard1, donorColl = donor.getCollection(ns),
+ recipientColl = recipient.getCollection(ns), donorLocal = donor.getDB('local'),
+ recipientLocal = recipient.getDB('local');
+
+ // Two chunks
+ // Donor: [0, 2) [2, 5)
+ // Recipient:
+ jsTest.log('Enable sharding of the collection and pre-split into two chunks....');
+
+ assert.commandWorked(admin.runCommand({enableSharding: dbName}));
+ st.ensurePrimaryShard(dbName, shards[0]._id);
+ assert.commandWorked(donorColl.createIndex({_id: 1}));
+ assert.commandWorked(admin.runCommand({shardCollection: ns, key: {_id: 1}}));
+ assert.commandWorked(admin.runCommand({split: ns, middle: {_id: 2}}));
+
+ // 6 documents,
+ // donor: 2 in the first chunk, 3 in the second.
+ // recipient: 1 document (shardkey overlaps with a doc in second chunk of donor shard)
+ jsTest.log('Inserting 5 docs into donor shard, 1 doc into the recipient shard....');
+
+ for (var i = 0; i < 5; ++i)
+ assert.writeOK(coll.insert({_id: i}));
+ assert.eq(5, donorColl.count());
+
+ for (var i = 2; i < 3; ++i)
+ assert.writeOK(recipientColl.insert({_id: i}));
+ assert.eq(1, recipientColl.count());
+
+ /**
+ * Set failpoint: recipient will pause migration after cloning chunk data from donor,
+ * before checking transfer mods log on donor.
+ */
+
+ jsTest.log('setting recipient failpoint cloned');
+ pauseMigrateAtStep(recipient, migrateStepNames.cloned);
+
+ /**
+ * Start moving chunk [2, 5) from donor shard to recipient shard, run in the background.
+ */
+
+ // Donor: [0, 2)
+ // Recipient: [2, 5)
+ jsTest.log('Starting chunk migration, pause after cloning...');
+
+ var joinMoveChunk = moveChunkParallel(
+ staticMongod, st.s0.host, {_id: 2}, null, coll.getFullName(), shards[1]._id);
+
+ /**
+ * Wait for recipient to finish cloning.
+ * THEN update 1 document {_id: 3} on donor within the currently migrating chunk.
+ * AND delete 1 document {_id: 4} on donor within the currently migrating chunk.
+ */
+
+ waitForMigrateStep(recipient, migrateStepNames.cloned);
+
+ jsTest.log('Update 1 doc and delete 1 doc on donor within the currently migrating chunk...');
+
+ assert.writeOK(coll.update({_id: 3}, {_id: 3, a: "updated doc"}));
+ assert.writeOK(coll.remove({_id: 4}));
+
+ /**
+ * Finish migration. Unpause recipient migration, wait for it to collect
+ * the transfer mods log from donor and finish migration.
+ */
+
+ jsTest.log('Continuing and finishing migration...');
+ unpauseMigrateAtStep(recipient, migrateStepNames.cloned);
+ joinMoveChunk();
+
+ /**
+ * Check documents are where they should be: 2 docs in donor chunk, 2 docs in recipient chunk
+ * (because third doc in recipient shard's chunk got deleted on the donor shard during
+ * migration).
+ */
+
+ jsTest.log('Checking that documents are on the shards they should be...');
+
+ assert.eq(2, recipientColl.count(), "Recipient shard doesn't have exactly 2 documents!");
+ assert.eq(2, donorColl.count(), "Donor shard doesn't have exactly 2 documents!");
+ assert.eq(4, coll.count(), "Collection total is not 4!");
+
+ /**
+ * Check that the fromMigrate flag has been set correctly in donor and recipient oplogs,
+ */
+
+ jsTest.log('Checking donor and recipient oplogs for correct fromMigrate flags...');
+
+ var donorOplogRes = donorLocal.oplog.rs.find({op: 'd', fromMigrate: true, 'o._id': 2}).count();
+ assert.eq(1,
+ donorOplogRes,
+ "fromMigrate flag wasn't set on the donor shard's oplog for " +
+ "migrating delete op on {_id: 2}! Test #2 failed.");
+
+ donorOplogRes =
+ donorLocal.oplog.rs.find({op: 'd', fromMigrate: {$exists: false}, 'o._id': 4}).count();
+ assert.eq(1,
+ donorOplogRes,
+ "Real delete of {_id: 4} on donor shard incorrectly set the " +
+ "fromMigrate flag in the oplog! Test #5 failed.");
+
+ var recipientOplogRes =
+ recipientLocal.oplog.rs.find({op: 'i', fromMigrate: true, 'o._id': 2}).count();
+ assert.eq(1,
+ recipientOplogRes,
+ "fromMigrate flag wasn't set on the recipient shard's " +
+ "oplog for migrating insert op on {_id: 2}! Test #3 failed.");
+
+ recipientOplogRes =
+ recipientLocal.oplog.rs.find({op: 'd', fromMigrate: true, 'o._id': 2}).count();
+ assert.eq(1,
+ recipientOplogRes,
+ "fromMigrate flag wasn't set on the recipient shard's " +
+ "oplog for delete op on the old {_id: 2} that overlapped " +
+ "with the chunk about to be copied! Test #1 failed.");
+
+ recipientOplogRes =
+ recipientLocal.oplog.rs.find({op: 'u', fromMigrate: true, 'o._id': 3}).count();
+ assert.eq(1,
+ recipientOplogRes,
+ "fromMigrate flag wasn't set on the recipient shard's " +
+ "oplog for update op on {_id: 3}! Test #4 failed.");
+
+ recipientOplogRes =
+ recipientLocal.oplog.rs.find({op: 'd', fromMigrate: true, 'o._id': 4}).count();
+ assert.eq(1,
+ recipientOplogRes,
+ "fromMigrate flag wasn't set on the recipient shard's " +
+ "oplog for delete op on {_id: 4} that occurred during " +
+ "migration! Test #5 failed.");
+
+ jsTest.log('DONE!');
+ st.stop();
})();
diff --git a/jstests/sharding/migration_with_source_ops.js b/jstests/sharding/migration_with_source_ops.js
index f518530c866..31b6fff75e9 100644
--- a/jstests/sharding/migration_with_source_ops.js
+++ b/jstests/sharding/migration_with_source_ops.js
@@ -18,135 +18,129 @@
load('./jstests/libs/chunk_manipulation_util.js');
(function() {
-"use strict";
-
-var staticMongod = MongoRunner.runMongod({}); // For startParallelOps.
-
-/**
- * Start up new sharded cluster, stop balancer that would interfere in manual chunk management.
- */
-
-var st = new ShardingTest({ shards : 2, mongos : 1 });
-st.stopBalancer();
-
-var mongos = st.s0,
- admin = mongos.getDB('admin'),
- shards = mongos.getCollection('config.shards').find().toArray(),
- dbName = "testDB",
- ns = dbName + ".foo",
- coll = mongos.getCollection(ns),
- donor = st.shard0,
- recipient = st.shard1,
- donorColl = donor.getCollection(ns),
- recipientColl = recipient.getCollection(ns);
-
-/**
- * Exable sharding, and split collection into two chunks.
- */
-
-// Two chunks
-// Donor: [0, 20) [20, 40)
-// Recipient:
-jsTest.log('Enabling sharding of the collection and pre-splitting into two chunks....');
-assert.commandWorked(admin.runCommand({enableSharding: dbName}));
-st.ensurePrimaryShard(dbName, shards[0]._id);
-assert.commandWorked(admin.runCommand({shardCollection: ns, key: {a: 1}}));
-assert.commandWorked(admin.runCommand({split: ns, middle: {a: 20}}));
-
-/**
- * Insert data into collection
- */
-
-// 10 documents in each chunk on the donor
-jsTest.log('Inserting 20 docs into donor shard, 10 in each chunk....');
-for (var i = 0; i < 10; ++i)
- assert.writeOK(coll.insert({a: i}));
-for (var i = 20; i < 30; ++i)
- assert.writeOK(coll.insert({a: i}));
-assert.eq(20, coll.count());
-
-/**
- * Set failpoints. Recipient will crash if an out of chunk range data op is
- * received from donor. Recipient will pause migration after cloning chunk data from donor,
- * before checking transfer mods log on donor.
- */
-
-jsTest.log('Setting failpoint failMigrationReceivedOutOfRangeOperation');
-assert.commandWorked(recipient.getDB('admin').runCommand(
+ "use strict";
+
+ var staticMongod = MongoRunner.runMongod({}); // For startParallelOps.
+
+ /**
+ * Start up new sharded cluster, stop balancer that would interfere in manual chunk management.
+ */
+
+ var st = new ShardingTest({shards: 2, mongos: 1});
+ st.stopBalancer();
+
+ var mongos = st.s0, admin = mongos.getDB('admin'),
+ shards = mongos.getCollection('config.shards').find().toArray(), dbName = "testDB",
+ ns = dbName + ".foo", coll = mongos.getCollection(ns), donor = st.shard0,
+ recipient = st.shard1, donorColl = donor.getCollection(ns),
+ recipientColl = recipient.getCollection(ns);
+
+ /**
+ * Exable sharding, and split collection into two chunks.
+ */
+
+ // Two chunks
+ // Donor: [0, 20) [20, 40)
+ // Recipient:
+ jsTest.log('Enabling sharding of the collection and pre-splitting into two chunks....');
+ assert.commandWorked(admin.runCommand({enableSharding: dbName}));
+ st.ensurePrimaryShard(dbName, shards[0]._id);
+ assert.commandWorked(admin.runCommand({shardCollection: ns, key: {a: 1}}));
+ assert.commandWorked(admin.runCommand({split: ns, middle: {a: 20}}));
+
+ /**
+ * Insert data into collection
+ */
+
+ // 10 documents in each chunk on the donor
+ jsTest.log('Inserting 20 docs into donor shard, 10 in each chunk....');
+ for (var i = 0; i < 10; ++i)
+ assert.writeOK(coll.insert({a: i}));
+ for (var i = 20; i < 30; ++i)
+ assert.writeOK(coll.insert({a: i}));
+ assert.eq(20, coll.count());
+
+ /**
+ * Set failpoints. Recipient will crash if an out of chunk range data op is
+ * received from donor. Recipient will pause migration after cloning chunk data from donor,
+ * before checking transfer mods log on donor.
+ */
+
+ jsTest.log('Setting failpoint failMigrationReceivedOutOfRangeOperation');
+ assert.commandWorked(recipient.getDB('admin').runCommand(
{configureFailPoint: 'failMigrationReceivedOutOfRangeOperation', mode: 'alwaysOn'}));
-jsTest.log('Setting chunk migration recipient failpoint so that it pauses after bulk clone step');
-pauseMigrateAtStep(recipient, migrateStepNames.cloned);
-
-/**
- * Start a moveChunk in the background. Move chunk [20, 40), which has 10 docs in the
- * range, from shard 0 (donor) to shard 1 (recipient). Migration will pause after
- * cloning step (when it reaches the recipient failpoint).
- */
-
-// Donor: [0, 20)
-// Recipient: [20, 40)
-jsTest.log('Starting migration, pause after cloning...');
-var joinMoveChunk = moveChunkParallel(
- staticMongod,
- st.s0.host,
- {a: 20},
- null,
- coll.getFullName(),
- shards[1]._id);
-
-/**
- * Wait for recipient to finish cloning step.
- * THEN delete 10 documents on the donor shard, 5 in the migrating chunk and 5 in the remaining chunk.
- * AND insert 2 documents on the donor shard, 1 in the migrating chunk and 1 in the remaining chunk.
- * AND update 2 documents on the donor shard, 1 in the migrating chunk and 1 in the remaining chunk.
- *
- * This will populate the migration transfer mods log, which the recipient will collect when it
- * is unpaused.
- */
-
-waitForMigrateStep(recipient, migrateStepNames.cloned);
-
-jsTest.log('Deleting 5 docs from each chunk, migrating chunk and remaining chunk...');
-assert.writeOK(coll.remove({$and : [ {a: {$gte: 5}}, {a: {$lt: 25}} ]}));
-
-jsTest.log('Inserting 1 in the migrating chunk range and 1 in the remaining chunk range...');
-assert.writeOK(coll.insert({a: 10}));
-assert.writeOK(coll.insert({a: 30}));
-
-jsTest.log('Updating 1 in the migrating chunk range and 1 in the remaining chunk range...');
-assert.writeOK(coll.update({a: 0}, {a: 0, updatedData: "updated"}));
-assert.writeOK(coll.update({a: 25}, {a: 25, updatedData: "updated"}));
-
-/**
- * Finish migration. Unpause recipient migration, wait for it to collect
- * the new ops from the donor shard's migration transfer mods log, and finish.
- */
-
-jsTest.log('Continuing and finishing migration...');
-unpauseMigrateAtStep(recipient, migrateStepNames.cloned);
-joinMoveChunk();
-
-/**
- * Check documents are where they should be: 6 docs in each shard's respective chunk.
- */
-
-jsTest.log('Checking that documents are on the shards they should be...');
-assert.eq(6, donorColl.count());
-assert.eq(6, recipientColl.count());
-assert.eq(12, coll.count());
-
-/**
- * Check that the updated documents are where they should be, one on each shard.
- */
-
-jsTest.log('Checking that documents were updated correctly...');
-var donorCollUpdatedNum = donorColl.find({updatedData: "updated"}).count();
-assert.eq(1, donorCollUpdatedNum, "Update failed on donor shard during migration!");
-var recipientCollUpdatedNum = recipientColl.find({updatedData: "updated"}).count();
-assert.eq(1, recipientCollUpdatedNum, "Update failed on recipient shard during migration!");
-
-jsTest.log('DONE!');
-st.stop();
+ jsTest.log(
+ 'Setting chunk migration recipient failpoint so that it pauses after bulk clone step');
+ pauseMigrateAtStep(recipient, migrateStepNames.cloned);
+
+ /**
+ * Start a moveChunk in the background. Move chunk [20, 40), which has 10 docs in the
+ * range, from shard 0 (donor) to shard 1 (recipient). Migration will pause after
+ * cloning step (when it reaches the recipient failpoint).
+ */
+
+ // Donor: [0, 20)
+ // Recipient: [20, 40)
+ jsTest.log('Starting migration, pause after cloning...');
+ var joinMoveChunk = moveChunkParallel(
+ staticMongod, st.s0.host, {a: 20}, null, coll.getFullName(), shards[1]._id);
+
+ /**
+ * Wait for recipient to finish cloning step.
+ * THEN delete 10 documents on the donor shard, 5 in the migrating chunk and 5 in the remaining
+ *chunk.
+ * AND insert 2 documents on the donor shard, 1 in the migrating chunk and 1 in the remaining
+ *chunk.
+ * AND update 2 documents on the donor shard, 1 in the migrating chunk and 1 in the remaining
+ *chunk.
+ *
+ * This will populate the migration transfer mods log, which the recipient will collect when it
+ * is unpaused.
+ */
+
+ waitForMigrateStep(recipient, migrateStepNames.cloned);
+
+ jsTest.log('Deleting 5 docs from each chunk, migrating chunk and remaining chunk...');
+ assert.writeOK(coll.remove({$and: [{a: {$gte: 5}}, {a: {$lt: 25}}]}));
+
+ jsTest.log('Inserting 1 in the migrating chunk range and 1 in the remaining chunk range...');
+ assert.writeOK(coll.insert({a: 10}));
+ assert.writeOK(coll.insert({a: 30}));
+
+ jsTest.log('Updating 1 in the migrating chunk range and 1 in the remaining chunk range...');
+ assert.writeOK(coll.update({a: 0}, {a: 0, updatedData: "updated"}));
+ assert.writeOK(coll.update({a: 25}, {a: 25, updatedData: "updated"}));
+
+ /**
+ * Finish migration. Unpause recipient migration, wait for it to collect
+ * the new ops from the donor shard's migration transfer mods log, and finish.
+ */
+
+ jsTest.log('Continuing and finishing migration...');
+ unpauseMigrateAtStep(recipient, migrateStepNames.cloned);
+ joinMoveChunk();
+
+ /**
+ * Check documents are where they should be: 6 docs in each shard's respective chunk.
+ */
+
+ jsTest.log('Checking that documents are on the shards they should be...');
+ assert.eq(6, donorColl.count());
+ assert.eq(6, recipientColl.count());
+ assert.eq(12, coll.count());
+
+ /**
+ * Check that the updated documents are where they should be, one on each shard.
+ */
+
+ jsTest.log('Checking that documents were updated correctly...');
+ var donorCollUpdatedNum = donorColl.find({updatedData: "updated"}).count();
+ assert.eq(1, donorCollUpdatedNum, "Update failed on donor shard during migration!");
+ var recipientCollUpdatedNum = recipientColl.find({updatedData: "updated"}).count();
+ assert.eq(1, recipientCollUpdatedNum, "Update failed on recipient shard during migration!");
+
+ jsTest.log('DONE!');
+ st.stop();
})();
diff --git a/jstests/sharding/min_optime_recovery.js b/jstests/sharding/min_optime_recovery.js
index b3e1b1c45cc..d77f1e2ad42 100644
--- a/jstests/sharding/min_optime_recovery.js
+++ b/jstests/sharding/min_optime_recovery.js
@@ -7,81 +7,81 @@
* @tags: [requires_persistence]
*/
(function() {
-"use strict";
+ "use strict";
-var runTest = function(withRecovery) {
- var st = new ShardingTest({ shards: 2 });
+ var runTest = function(withRecovery) {
+ var st = new ShardingTest({shards: 2});
- var testDB = st.s.getDB('test');
- testDB.adminCommand({ enableSharding: 'test' });
- st.ensurePrimaryShard('test', 'shard0000');
- testDB.adminCommand({ shardCollection: 'test.user', key: { x: 1 }});
+ var testDB = st.s.getDB('test');
+ testDB.adminCommand({enableSharding: 'test'});
+ st.ensurePrimaryShard('test', 'shard0000');
+ testDB.adminCommand({shardCollection: 'test.user', key: {x: 1}});
- var opTimeBeforeMigrate = null;
- if (st.configRS) {
- var priConn = st.configRS.getPrimary();
- var replStatus = priConn.getDB('admin').runCommand({ replSetGetStatus: 1 });
- replStatus.members.forEach(function(memberState) {
- if (memberState.state == 1) { // if primary
- opTimeBeforeMigrate = memberState.optime;
+ var opTimeBeforeMigrate = null;
+ if (st.configRS) {
+ var priConn = st.configRS.getPrimary();
+ var replStatus = priConn.getDB('admin').runCommand({replSetGetStatus: 1});
+ replStatus.members.forEach(function(memberState) {
+ if (memberState.state == 1) { // if primary
+ opTimeBeforeMigrate = memberState.optime;
- assert.neq(null, opTimeBeforeMigrate);
- assert.neq(null, opTimeBeforeMigrate.ts);
- assert.neq(null, opTimeBeforeMigrate.t);
- }
- });
- }
+ assert.neq(null, opTimeBeforeMigrate);
+ assert.neq(null, opTimeBeforeMigrate.ts);
+ assert.neq(null, opTimeBeforeMigrate.t);
+ }
+ });
+ }
- testDB.adminCommand({ moveChunk: 'test.user', find: { x: 0 }, to: 'shard0001' });
+ testDB.adminCommand({moveChunk: 'test.user', find: {x: 0}, to: 'shard0001'});
- var shardAdmin = st.d0.getDB('admin');
- var doc = shardAdmin.system.version.findOne();
+ var shardAdmin = st.d0.getDB('admin');
+ var doc = shardAdmin.system.version.findOne();
- if (st.configRS) {
- assert.neq(null, doc);
- assert.eq('minOpTimeRecovery', doc._id);
- assert.eq(st.configRS.getURL(), doc.configsvrConnectionString);
- assert.eq('shard0000', doc.shardName);
- assert.gt(doc.minOpTime.ts.getTime(), 0);
- }
- else {
- assert.eq(null, doc);
- }
+ if (st.configRS) {
+ assert.neq(null, doc);
+ assert.eq('minOpTimeRecovery', doc._id);
+ assert.eq(st.configRS.getURL(), doc.configsvrConnectionString);
+ assert.eq('shard0000', doc.shardName);
+ assert.gt(doc.minOpTime.ts.getTime(), 0);
+ } else {
+ assert.eq(null, doc);
+ }
- var restartCmdLineOptions = Object.merge(st.d0.fullOptions, {
- setParameter: 'recoverShardingState=' + (withRecovery? 'true' : 'false'),
- restart: true
- });
+ var restartCmdLineOptions = Object.merge(
+ st.d0.fullOptions,
+ {
+ setParameter: 'recoverShardingState=' + (withRecovery ? 'true' : 'false'),
+ restart: true
+ });
- // Restart the shard that donated a chunk to trigger the optime recovery logic.
- st.stopMongod(0);
- var newMongod = MongoRunner.runMongod(restartCmdLineOptions);
- var shardingSection = newMongod.getDB('admin').runCommand({ serverStatus: 1 }).sharding;
+ // Restart the shard that donated a chunk to trigger the optime recovery logic.
+ st.stopMongod(0);
+ var newMongod = MongoRunner.runMongod(restartCmdLineOptions);
+ var shardingSection = newMongod.getDB('admin').runCommand({serverStatus: 1}).sharding;
- if (st.configRS && withRecovery) {
- assert.neq(null, shardingSection);
+ if (st.configRS && withRecovery) {
+ assert.neq(null, shardingSection);
- // Confirm that the config server string points to an actual config server replica set.
- var configConnStr = shardingSection.configsvrConnectionString;
- var configConn = new Mongo(configConnStr);
- var configIsMaster = configConn.getDB('admin').runCommand({ isMaster: 1 });
- assert.gt(configConnStr.indexOf('/'), 0);
- assert.eq(1, configIsMaster.configsvr); // If it's a shard, this field won't exist.
+ // Confirm that the config server string points to an actual config server replica set.
+ var configConnStr = shardingSection.configsvrConnectionString;
+ var configConn = new Mongo(configConnStr);
+ var configIsMaster = configConn.getDB('admin').runCommand({isMaster: 1});
+ assert.gt(configConnStr.indexOf('/'), 0);
+ assert.eq(1, configIsMaster.configsvr); // If it's a shard, this field won't exist.
- var configOpTimeObj = shardingSection.lastSeenConfigServerOpTime;
- assert.neq(null, configOpTimeObj);
- assert.gte(configOpTimeObj.ts.getTime(), opTimeBeforeMigrate.ts.getTime());
- assert.gte(configOpTimeObj.t, opTimeBeforeMigrate.t);
- }
- else {
- assert.eq(null, shardingSection);
- }
+ var configOpTimeObj = shardingSection.lastSeenConfigServerOpTime;
+ assert.neq(null, configOpTimeObj);
+ assert.gte(configOpTimeObj.ts.getTime(), opTimeBeforeMigrate.ts.getTime());
+ assert.gte(configOpTimeObj.t, opTimeBeforeMigrate.t);
+ } else {
+ assert.eq(null, shardingSection);
+ }
- MongoRunner.stopMongod(newMongod.port);
- st.stop();
-};
+ MongoRunner.stopMongod(newMongod.port);
+ st.stop();
+ };
-runTest(true);
-runTest(false);
+ runTest(true);
+ runTest(false);
})();
diff --git a/jstests/sharding/missing_key.js b/jstests/sharding/missing_key.js
index 2eebc0d0912..588d85e1a95 100644
--- a/jstests/sharding/missing_key.js
+++ b/jstests/sharding/missing_key.js
@@ -1,40 +1,39 @@
// Test that the shardCollection command fails when a preexisting document lacks a shard key field.
// SERVER-8772
-var st = new ShardingTest( { shards: 1 } );
+var st = new ShardingTest({shards: 1});
st.stopBalancer();
-var db = st.s.getDB( 'testDb' );
+var db = st.s.getDB('testDb');
var coll = db.testColl;
-coll.insert( { x:1, z:1 } );
-coll.insert( { y:1, z:1 } );
-db.adminCommand( { enableSharding:'testDb' } );
+coll.insert({x: 1, z: 1});
+coll.insert({y: 1, z: 1});
+db.adminCommand({enableSharding: 'testDb'});
/**
* Assert that the shardCollection command fails, with a preexisting index on the provided
* 'shardKey'.
*/
-function assertInvalidShardKey( shardKey ) {
-
+function assertInvalidShardKey(shardKey) {
// Manually create a shard key index.
coll.dropIndexes();
- coll.ensureIndex( shardKey );
+ coll.ensureIndex(shardKey);
// Ensure that the shard key index identifies 'x' as present in one document and absent in the
// other.
- assert.eq( 1, coll.find( { x:1 } ).hint( shardKey ).itcount() );
- assert.eq( 1, coll.find( { x:{ $exists:false } } ).hint( shardKey ).itcount() );
+ assert.eq(1, coll.find({x: 1}).hint(shardKey).itcount());
+ assert.eq(1, coll.find({x: {$exists: false}}).hint(shardKey).itcount());
// Assert that the shardCollection command fails with the provided 'shardKey'.
- assert.commandFailed( db.adminCommand( { shardCollection:'testDb.testColl', key:shardKey } ),
- 'shardCollection should have failed on key ' + tojson( shardKey ) );
+ assert.commandFailed(db.adminCommand({shardCollection: 'testDb.testColl', key: shardKey}),
+ 'shardCollection should have failed on key ' + tojson(shardKey));
}
// Test single, compound, and hashed shard keys.
-assertInvalidShardKey( { x:1 } );
-assertInvalidShardKey( { x:1, y:1 } );
-assertInvalidShardKey( { y:1, x:1 } );
-assertInvalidShardKey( { x:'hashed' } );
+assertInvalidShardKey({x: 1});
+assertInvalidShardKey({x: 1, y: 1});
+assertInvalidShardKey({y: 1, x: 1});
+assertInvalidShardKey({x: 'hashed'});
st.stop();
diff --git a/jstests/sharding/mongos_no_detect_sharding.js b/jstests/sharding/mongos_no_detect_sharding.js
index 1e5c8832ac0..608b2ca0bdb 100644
--- a/jstests/sharding/mongos_no_detect_sharding.js
+++ b/jstests/sharding/mongos_no_detect_sharding.js
@@ -1,44 +1,42 @@
// Tests whether new sharding is detected on insert by mongos
(function() {
-var st = new ShardingTest({ name: "mongos_no_detect_sharding",
- shards: 1,
- mongos: 2 });
+ var st = new ShardingTest({name: "mongos_no_detect_sharding", shards: 1, mongos: 2});
-var mongos = st.s;
-var config = mongos.getDB("config");
+ var mongos = st.s;
+ var config = mongos.getDB("config");
-print( "Creating unsharded connection..." );
+ print("Creating unsharded connection...");
-var mongos2 = st._mongos[1];
+ var mongos2 = st._mongos[1];
-var coll = mongos2.getCollection( "test.foo" );
-coll.insert({ i : 0 });
+ var coll = mongos2.getCollection("test.foo");
+ coll.insert({i: 0});
-print( "Sharding collection..." );
+ print("Sharding collection...");
-var admin = mongos.getDB("admin");
+ var admin = mongos.getDB("admin");
-assert.eq( coll.getShardVersion().ok, 0 );
+ assert.eq(coll.getShardVersion().ok, 0);
-admin.runCommand({ enableSharding : "test" });
-admin.runCommand({ shardCollection : "test.foo", key : { _id : 1 } });
+ admin.runCommand({enableSharding: "test"});
+ admin.runCommand({shardCollection: "test.foo", key: {_id: 1}});
-print( "Seeing if data gets inserted unsharded..." );
-print( "No splits occur here!" );
+ print("Seeing if data gets inserted unsharded...");
+ print("No splits occur here!");
-// Insert a bunch of data which should trigger a split
-var bulk = coll.initializeUnorderedBulkOp();
-for( var i = 0; i < 100; i++ ){
- bulk.insert({ i : i + 1 });
-}
-assert.writeOK(bulk.execute());
+ // Insert a bunch of data which should trigger a split
+ var bulk = coll.initializeUnorderedBulkOp();
+ for (var i = 0; i < 100; i++) {
+ bulk.insert({i: i + 1});
+ }
+ assert.writeOK(bulk.execute());
-st.printShardingStatus( true );
+ st.printShardingStatus(true);
-assert.eq( coll.getShardVersion().ok, 1 );
-assert.eq( 101, coll.find().itcount() );
+ assert.eq(coll.getShardVersion().ok, 1);
+ assert.eq(101, coll.find().itcount());
-st.stop();
+ st.stop();
})();
diff --git a/jstests/sharding/mongos_no_replica_set_refresh.js b/jstests/sharding/mongos_no_replica_set_refresh.js
index 48aa3ca27f9..3d9af893b55 100644
--- a/jstests/sharding/mongos_no_replica_set_refresh.js
+++ b/jstests/sharding/mongos_no_replica_set_refresh.js
@@ -1,92 +1,100 @@
// Tests whether new sharding is detected on insert by mongos
load("jstests/replsets/rslib.js");
-(function () {
-'use strict';
-
-var st = new ShardingTest({ name: 'mongos_no_replica_set_refresh',
- shards: 1,
- mongos: 1,
- other: {
- rs0: {
- nodes: [
- {},
- {rsConfig: {priority: 0}},
- {rsConfig: {priority: 0}},
- ],
- }
- } });
-
-var rsObj = st.rs0;
-assert.commandWorked(
- rsObj.nodes[0].adminCommand({
+(function() {
+ 'use strict';
+
+ var st = new ShardingTest({
+ name: 'mongos_no_replica_set_refresh',
+ shards: 1,
+ mongos: 1,
+ other: {
+ rs0: {
+ nodes: [{}, {rsConfig: {priority: 0}}, {rsConfig: {priority: 0}}, ],
+ }
+ }
+ });
+
+ var rsObj = st.rs0;
+ assert.commandWorked(rsObj.nodes[0].adminCommand({
replSetTest: 1,
waitForMemberState: ReplSetTest.State.PRIMARY,
timeoutMillis: 60 * 1000,
}),
- 'node 0 ' + rsObj.nodes[0].host + ' failed to become primary'
-);
+ 'node 0 ' + rsObj.nodes[0].host + ' failed to become primary');
-var mongos = st.s;
-var config = mongos.getDB("config");
+ var mongos = st.s;
+ var config = mongos.getDB("config");
-printjson( mongos.getCollection("foo.bar").findOne() );
+ printjson(mongos.getCollection("foo.bar").findOne());
-jsTestLog( "Reconfiguring replica set..." );
+ jsTestLog("Reconfiguring replica set...");
-var rsConfig = rsObj.getReplSetConfigFromNode(0);
+ var rsConfig = rsObj.getReplSetConfigFromNode(0);
-// Now remove the last node in the config.
-var removedNode = rsConfig.members.pop();
-rsConfig.version++;
-reconfig(rsObj, rsConfig);
+ // Now remove the last node in the config.
+ var removedNode = rsConfig.members.pop();
+ rsConfig.version++;
+ reconfig(rsObj, rsConfig);
-// Wait for the election round to complete
-rsObj.getPrimary();
+ // Wait for the election round to complete
+ rsObj.getPrimary();
-var numRSHosts = function(){
- var result = assert.commandWorked(rsObj.nodes[0].adminCommand({ismaster : 1}));
- jsTestLog('Nodes in ' + rsObj.name + ': ' + tojson(result));
- return result.hosts.length + result.passives.length;
-};
+ var numRSHosts = function() {
+ var result = assert.commandWorked(rsObj.nodes[0].adminCommand({ismaster: 1}));
+ jsTestLog('Nodes in ' + rsObj.name + ': ' + tojson(result));
+ return result.hosts.length + result.passives.length;
+ };
-assert.soon( function(){ return numRSHosts() < 3; } );
+ assert.soon(function() {
+ return numRSHosts() < 3;
+ });
-var numMongosHosts = function(){
- var commandResult = assert.commandWorked(mongos.adminCommand("connPoolStats"));
- var result = commandResult.replicaSets[rsObj.name];
- jsTestLog('Nodes in ' + rsObj.name + ' connected to mongos: ' + tojson(result));
- return result.hosts.length;
-};
+ var numMongosHosts = function() {
+ var commandResult = assert.commandWorked(mongos.adminCommand("connPoolStats"));
+ var result = commandResult.replicaSets[rsObj.name];
+ jsTestLog('Nodes in ' + rsObj.name + ' connected to mongos: ' + tojson(result));
+ return result.hosts.length;
+ };
-// Wait for ReplicaSetMonitor to refresh; it should discover that the set now has only 2 hosts.
-assert.soon( function(){ return numMongosHosts() < 3; } );
+ // Wait for ReplicaSetMonitor to refresh; it should discover that the set now has only 2 hosts.
+ assert.soon(function() {
+ return numMongosHosts() < 3;
+ });
-jsTestLog( "Mongos successfully detected change..." );
+ jsTestLog("Mongos successfully detected change...");
-var configServerURL = function(){
- var result = config.shards.find().toArray()[0];
- printjson( result );
- return result.host;
-};
+ var configServerURL = function() {
+ var result = config.shards.find().toArray()[0];
+ printjson(result);
+ return result.host;
+ };
-assert.soon( function(){ return configServerURL().indexOf( removedNode.host ) < 0; } );
+ assert.soon(function() {
+ return configServerURL().indexOf(removedNode.host) < 0;
+ });
-jsTestLog( "Now test adding new replica set servers..." );
+ jsTestLog("Now test adding new replica set servers...");
-config.shards.update({ _id : rsObj.name }, { $set : { host : rsObj.name + "/" + rsObj.nodes[0].host } });
-printjson( config.shards.find().toArray() );
+ config.shards.update({_id: rsObj.name}, {$set: {host: rsObj.name + "/" + rsObj.nodes[0].host}});
+ printjson(config.shards.find().toArray());
-rsConfig.members.push(removedNode);
-rsConfig.version++;
-reconfig(rsObj, rsConfig);
+ rsConfig.members.push(removedNode);
+ rsConfig.version++;
+ reconfig(rsObj, rsConfig);
-assert.soon( function(){ return numRSHosts() > 2; } );
+ assert.soon(function() {
+ return numRSHosts() > 2;
+ });
-assert.soon( function(){ return numMongosHosts() > 2; } );
+ assert.soon(function() {
+ return numMongosHosts() > 2;
+ });
-assert.soon( function(){ return configServerURL().indexOf( removedNode.host ) >= 0; } );
+ assert.soon(function() {
+ return configServerURL().indexOf(removedNode.host) >= 0;
+ });
-st.stop();
+ st.stop();
}());
diff --git a/jstests/sharding/mongos_rs_auth_shard_failure_tolerance.js b/jstests/sharding/mongos_rs_auth_shard_failure_tolerance.js
index f4dd12b0ecc..8eaf9653f11 100644
--- a/jstests/sharding/mongos_rs_auth_shard_failure_tolerance.js
+++ b/jstests/sharding/mongos_rs_auth_shard_failure_tolerance.js
@@ -11,48 +11,47 @@
// (connection connected after shard change).
//
-var options = { rs : true,
- rsOptions : { nodes : 2 },
- keyFile : "jstests/libs/key1" };
+var options = {
+ rs: true,
+ rsOptions: {nodes: 2},
+ keyFile: "jstests/libs/key1"
+};
-var st = new ShardingTest({shards : 3, mongos : 1, other : options});
+var st = new ShardingTest({shards: 3, mongos: 1, other: options});
var mongos = st.s0;
-var admin = mongos.getDB( "admin" );
+var admin = mongos.getDB("admin");
jsTest.log("Setting up initial admin user...");
var adminUser = "adminUser";
var password = "password";
// Create a user
-admin.createUser({ user : adminUser, pwd : password, roles: [ "root" ] });
+admin.createUser({user: adminUser, pwd: password, roles: ["root"]});
// There's an admin user now, so we need to login to do anything
// Login as admin user
admin.auth(adminUser, password);
st.stopBalancer();
-var shards = mongos.getDB( "config" ).shards.find().toArray();
+var shards = mongos.getDB("config").shards.find().toArray();
-assert.commandWorked( admin.runCommand({ setParameter : 1, traceExceptions : true }) );
+assert.commandWorked(admin.runCommand({setParameter: 1, traceExceptions: true}));
-var collSharded = mongos.getCollection( "fooSharded.barSharded" );
-var collUnsharded = mongos.getCollection( "fooUnsharded.barUnsharded" );
+var collSharded = mongos.getCollection("fooSharded.barSharded");
+var collUnsharded = mongos.getCollection("fooUnsharded.barUnsharded");
// Create the unsharded database with shard0 primary
-assert.writeOK(collUnsharded.insert({ some : "doc" }));
+assert.writeOK(collUnsharded.insert({some: "doc"}));
assert.writeOK(collUnsharded.remove({}));
-printjson( admin.runCommand({ movePrimary : collUnsharded.getDB().toString(),
- to : shards[0]._id }) );
+printjson(admin.runCommand({movePrimary: collUnsharded.getDB().toString(), to: shards[0]._id}));
// Create the sharded database with shard1 primary
-assert.commandWorked( admin.runCommand({ enableSharding : collSharded.getDB().toString() }) );
-printjson( admin.runCommand({ movePrimary : collSharded.getDB().toString(), to : shards[1]._id }) );
-assert.commandWorked( admin.runCommand({ shardCollection : collSharded.toString(),
- key : { _id : 1 } }) );
-assert.commandWorked( admin.runCommand({ split : collSharded.toString(), middle : { _id : 0 } }) );
-assert.commandWorked( admin.runCommand({ moveChunk : collSharded.toString(),
- find : { _id : -1 },
- to : shards[0]._id }) );
+assert.commandWorked(admin.runCommand({enableSharding: collSharded.getDB().toString()}));
+printjson(admin.runCommand({movePrimary: collSharded.getDB().toString(), to: shards[1]._id}));
+assert.commandWorked(admin.runCommand({shardCollection: collSharded.toString(), key: {_id: 1}}));
+assert.commandWorked(admin.runCommand({split: collSharded.toString(), middle: {_id: 0}}));
+assert.commandWorked(
+ admin.runCommand({moveChunk: collSharded.toString(), find: {_id: -1}, to: shards[0]._id}));
st.printShardingStatus();
var shardedDBUser = "shardedDBUser";
@@ -61,16 +60,14 @@ var unshardedDBUser = "unshardedDBUser";
jsTest.log("Setting up database users...");
// Create db users
-collSharded.getDB().createUser({ user : shardedDBUser,
- pwd : password, roles : [ "readWrite" ] });
-collUnsharded.getDB().createUser({ user : unshardedDBUser,
- pwd : password, roles : [ "readWrite" ] });
+collSharded.getDB().createUser({user: shardedDBUser, pwd: password, roles: ["readWrite"]});
+collUnsharded.getDB().createUser({user: unshardedDBUser, pwd: password, roles: ["readWrite"]});
admin.logout();
-function authDBUsers( conn ) {
- conn.getDB( collSharded.getDB().toString() ).auth(shardedDBUser, password);
- conn.getDB( collUnsharded.getDB().toString() ).auth(unshardedDBUser, password);
+function authDBUsers(conn) {
+ conn.getDB(collSharded.getDB().toString()).auth(shardedDBUser, password);
+ conn.getDB(collUnsharded.getDB().toString()).auth(unshardedDBUser, password);
return conn;
}
@@ -80,65 +77,67 @@ function authDBUsers( conn ) {
jsTest.log("Inserting initial data...");
-var mongosConnActive = authDBUsers( new Mongo( mongos.host ) );
+var mongosConnActive = authDBUsers(new Mongo(mongos.host));
authDBUsers(mongosConnActive);
var mongosConnIdle = null;
var mongosConnNew = null;
-var wc = {writeConcern: {w: 2, wtimeout: 60000}};
+var wc = {
+ writeConcern: {w: 2, wtimeout: 60000}
+};
-assert.writeOK(mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : -1 }, wc));
-assert.writeOK(mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : 1 }, wc));
-assert.writeOK(mongosConnActive.getCollection( collUnsharded.toString() ).insert({ _id : 1 }, wc));
+assert.writeOK(mongosConnActive.getCollection(collSharded.toString()).insert({_id: -1}, wc));
+assert.writeOK(mongosConnActive.getCollection(collSharded.toString()).insert({_id: 1}, wc));
+assert.writeOK(mongosConnActive.getCollection(collUnsharded.toString()).insert({_id: 1}, wc));
jsTest.log("Stopping primary of third shard...");
-mongosConnIdle = authDBUsers( new Mongo( mongos.host ) );
+mongosConnIdle = authDBUsers(new Mongo(mongos.host));
st.rs2.stop(st.rs2.getPrimary());
jsTest.log("Testing active connection with third primary down...");
-assert.neq(null, mongosConnActive.getCollection( collSharded.toString() ).findOne({ _id : -1 }));
-assert.neq(null, mongosConnActive.getCollection( collSharded.toString() ).findOne({ _id : 1 }));
-assert.neq(null, mongosConnActive.getCollection( collUnsharded.toString() ).findOne({ _id : 1 }));
+assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: -1}));
+assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: 1}));
+assert.neq(null, mongosConnActive.getCollection(collUnsharded.toString()).findOne({_id: 1}));
-assert.writeOK(mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : -2 }, wc));
-assert.writeOK(mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : 2 }, wc));
-assert.writeOK(mongosConnActive.getCollection( collUnsharded.toString() ).insert({ _id : 2 }, wc));
+assert.writeOK(mongosConnActive.getCollection(collSharded.toString()).insert({_id: -2}, wc));
+assert.writeOK(mongosConnActive.getCollection(collSharded.toString()).insert({_id: 2}, wc));
+assert.writeOK(mongosConnActive.getCollection(collUnsharded.toString()).insert({_id: 2}, wc));
jsTest.log("Testing idle connection with third primary down...");
-assert.writeOK(mongosConnIdle.getCollection( collSharded.toString() ).insert({ _id : -3 }, wc));
-assert.writeOK(mongosConnIdle.getCollection( collSharded.toString() ).insert({ _id : 3 }, wc));
-assert.writeOK(mongosConnIdle.getCollection( collUnsharded.toString() ).insert({ _id : 3 }, wc));
+assert.writeOK(mongosConnIdle.getCollection(collSharded.toString()).insert({_id: -3}, wc));
+assert.writeOK(mongosConnIdle.getCollection(collSharded.toString()).insert({_id: 3}, wc));
+assert.writeOK(mongosConnIdle.getCollection(collUnsharded.toString()).insert({_id: 3}, wc));
-assert.neq(null, mongosConnIdle.getCollection( collSharded.toString() ).findOne({ _id : -1 }) );
-assert.neq(null, mongosConnIdle.getCollection( collSharded.toString() ).findOne({ _id : 1 }) );
-assert.neq(null, mongosConnIdle.getCollection( collUnsharded.toString() ).findOne({ _id : 1 }) );
+assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: -1}));
+assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: 1}));
+assert.neq(null, mongosConnIdle.getCollection(collUnsharded.toString()).findOne({_id: 1}));
jsTest.log("Testing new connections with third primary down...");
-mongosConnNew = authDBUsers( new Mongo( mongos.host ) );
-assert.neq(null, mongosConnNew.getCollection( collSharded.toString() ).findOne({ _id : -1 }) );
-mongosConnNew = authDBUsers( new Mongo( mongos.host ) );
-assert.neq(null, mongosConnNew.getCollection( collSharded.toString() ).findOne({ _id : 1 }) );
-mongosConnNew = authDBUsers( new Mongo( mongos.host ) );
-assert.neq(null, mongosConnNew.getCollection( collUnsharded.toString() ).findOne({ _id : 1 }) );
+mongosConnNew = authDBUsers(new Mongo(mongos.host));
+assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: -1}));
+mongosConnNew = authDBUsers(new Mongo(mongos.host));
+assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: 1}));
+mongosConnNew = authDBUsers(new Mongo(mongos.host));
+assert.neq(null, mongosConnNew.getCollection(collUnsharded.toString()).findOne({_id: 1}));
-mongosConnNew = authDBUsers( new Mongo( mongos.host ) );
-assert.writeOK(mongosConnNew.getCollection( collSharded.toString() ).insert({ _id : -4 }, wc));
-mongosConnNew = authDBUsers( new Mongo( mongos.host ) );
-assert.writeOK(mongosConnNew.getCollection( collSharded.toString() ).insert({ _id : 4 }, wc));
-mongosConnNew = authDBUsers( new Mongo( mongos.host ) );
-assert.writeOK(mongosConnNew.getCollection( collUnsharded.toString() ).insert({ _id : 4 }, wc));
+mongosConnNew = authDBUsers(new Mongo(mongos.host));
+assert.writeOK(mongosConnNew.getCollection(collSharded.toString()).insert({_id: -4}, wc));
+mongosConnNew = authDBUsers(new Mongo(mongos.host));
+assert.writeOK(mongosConnNew.getCollection(collSharded.toString()).insert({_id: 4}, wc));
+mongosConnNew = authDBUsers(new Mongo(mongos.host));
+assert.writeOK(mongosConnNew.getCollection(collUnsharded.toString()).insert({_id: 4}, wc));
-gc(); // Clean up new connections
+gc(); // Clean up new connections
jsTest.log("Stopping primary of second shard...");
mongosConnActive.setSlaveOk();
-mongosConnIdle = authDBUsers( new Mongo( mongos.host ) );
+mongosConnIdle = authDBUsers(new Mongo(mongos.host));
mongosConnIdle.setSlaveOk();
// Need to save this node for later
@@ -148,142 +147,137 @@ st.rs1.stop(st.rs1.getPrimary());
jsTest.log("Testing active connection with second primary down...");
-assert.neq(null, mongosConnActive.getCollection( collSharded.toString() ).findOne({ _id : -1 }));
-assert.neq(null, mongosConnActive.getCollection( collSharded.toString() ).findOne({ _id : 1 }));
-assert.neq(null, mongosConnActive.getCollection( collUnsharded.toString() ).findOne({ _id : 1 }));
+assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: -1}));
+assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: 1}));
+assert.neq(null, mongosConnActive.getCollection(collUnsharded.toString()).findOne({_id: 1}));
-assert.writeOK(mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : -5 }, wc));
-assert.writeError(mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : 5 }, wc));
-assert.writeOK(mongosConnActive.getCollection( collUnsharded.toString() ).insert({ _id : 5 }, wc));
+assert.writeOK(mongosConnActive.getCollection(collSharded.toString()).insert({_id: -5}, wc));
+assert.writeError(mongosConnActive.getCollection(collSharded.toString()).insert({_id: 5}, wc));
+assert.writeOK(mongosConnActive.getCollection(collUnsharded.toString()).insert({_id: 5}, wc));
jsTest.log("Testing idle connection with second primary down...");
-assert.writeOK(mongosConnIdle.getCollection( collSharded.toString() ).insert({ _id : -6 }, wc));
-assert.writeError(mongosConnIdle.getCollection( collSharded.toString() ).insert({ _id : 6 }, wc));
-assert.writeOK(mongosConnIdle.getCollection( collUnsharded.toString() ).insert({ _id : 6 }, wc));
+assert.writeOK(mongosConnIdle.getCollection(collSharded.toString()).insert({_id: -6}, wc));
+assert.writeError(mongosConnIdle.getCollection(collSharded.toString()).insert({_id: 6}, wc));
+assert.writeOK(mongosConnIdle.getCollection(collUnsharded.toString()).insert({_id: 6}, wc));
-assert.neq(null, mongosConnIdle.getCollection( collSharded.toString() ).findOne({ _id : -1 }) );
-assert.neq(null, mongosConnIdle.getCollection( collSharded.toString() ).findOne({ _id : 1 }) );
-assert.neq(null, mongosConnIdle.getCollection( collUnsharded.toString() ).findOne({ _id : 1 }) );
+assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: -1}));
+assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: 1}));
+assert.neq(null, mongosConnIdle.getCollection(collUnsharded.toString()).findOne({_id: 1}));
jsTest.log("Testing new connections with second primary down...");
-mongosConnNew = authDBUsers( new Mongo( mongos.host ) );
+mongosConnNew = authDBUsers(new Mongo(mongos.host));
mongosConnNew.setSlaveOk();
-assert.neq(null, mongosConnNew.getCollection( collSharded.toString() ).findOne({ _id : -1 }) );
-mongosConnNew = authDBUsers( new Mongo( mongos.host ) );
+assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: -1}));
+mongosConnNew = authDBUsers(new Mongo(mongos.host));
mongosConnNew.setSlaveOk();
-assert.neq(null, mongosConnNew.getCollection( collSharded.toString() ).findOne({ _id : 1 }) );
-mongosConnNew = authDBUsers( new Mongo( mongos.host ) );
+assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: 1}));
+mongosConnNew = authDBUsers(new Mongo(mongos.host));
mongosConnNew.setSlaveOk();
-assert.neq(null, mongosConnNew.getCollection( collUnsharded.toString() ).findOne({ _id : 1 }) );
+assert.neq(null, mongosConnNew.getCollection(collUnsharded.toString()).findOne({_id: 1}));
-mongosConnNew = authDBUsers( new Mongo( mongos.host ) );
-assert.writeOK(mongosConnNew.getCollection( collSharded.toString() ).insert({ _id : -7 }, wc));
-mongosConnNew = authDBUsers( new Mongo( mongos.host ) );
-assert.writeError(mongosConnNew.getCollection( collSharded.toString() ).insert({ _id : 7 }, wc));
-mongosConnNew = authDBUsers( new Mongo( mongos.host ) );
-assert.writeOK(mongosConnNew.getCollection( collUnsharded.toString() ).insert({ _id : 7 }, wc));
+mongosConnNew = authDBUsers(new Mongo(mongos.host));
+assert.writeOK(mongosConnNew.getCollection(collSharded.toString()).insert({_id: -7}, wc));
+mongosConnNew = authDBUsers(new Mongo(mongos.host));
+assert.writeError(mongosConnNew.getCollection(collSharded.toString()).insert({_id: 7}, wc));
+mongosConnNew = authDBUsers(new Mongo(mongos.host));
+assert.writeOK(mongosConnNew.getCollection(collUnsharded.toString()).insert({_id: 7}, wc));
-gc(); // Clean up new connections
+gc(); // Clean up new connections
jsTest.log("Stopping primary of first shard...");
mongosConnActive.setSlaveOk();
-mongosConnIdle = authDBUsers( new Mongo( mongos.host ) );
+mongosConnIdle = authDBUsers(new Mongo(mongos.host));
mongosConnIdle.setSlaveOk();
st.rs0.stop(st.rs0.getPrimary());
jsTest.log("Testing active connection with first primary down...");
-assert.neq(null, mongosConnActive.getCollection( collSharded.toString() ).findOne({ _id : -1 }));
-assert.neq(null, mongosConnActive.getCollection( collSharded.toString() ).findOne({ _id : 1 }));
-assert.neq(null, mongosConnActive.getCollection( collUnsharded.toString() ).findOne({ _id : 1 }));
+assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: -1}));
+assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: 1}));
+assert.neq(null, mongosConnActive.getCollection(collUnsharded.toString()).findOne({_id: 1}));
-assert.writeError(mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : -8 }));
-assert.writeError(mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : 8 }));
-assert.writeError(mongosConnActive.getCollection( collUnsharded.toString() ).insert({ _id : 8 }));
+assert.writeError(mongosConnActive.getCollection(collSharded.toString()).insert({_id: -8}));
+assert.writeError(mongosConnActive.getCollection(collSharded.toString()).insert({_id: 8}));
+assert.writeError(mongosConnActive.getCollection(collUnsharded.toString()).insert({_id: 8}));
jsTest.log("Testing idle connection with first primary down...");
-assert.writeError(mongosConnIdle.getCollection( collSharded.toString() ).insert({ _id : -9 }));
-assert.writeError(mongosConnIdle.getCollection( collSharded.toString() ).insert({ _id : 9 }));
-assert.writeError(mongosConnIdle.getCollection( collUnsharded.toString() ).insert({ _id : 9 }));
+assert.writeError(mongosConnIdle.getCollection(collSharded.toString()).insert({_id: -9}));
+assert.writeError(mongosConnIdle.getCollection(collSharded.toString()).insert({_id: 9}));
+assert.writeError(mongosConnIdle.getCollection(collUnsharded.toString()).insert({_id: 9}));
-assert.neq(null, mongosConnIdle.getCollection( collSharded.toString() ).findOne({ _id : -1 }) );
-assert.neq(null, mongosConnIdle.getCollection( collSharded.toString() ).findOne({ _id : 1 }) );
-assert.neq(null, mongosConnIdle.getCollection( collUnsharded.toString() ).findOne({ _id : 1 }) );
+assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: -1}));
+assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: 1}));
+assert.neq(null, mongosConnIdle.getCollection(collUnsharded.toString()).findOne({_id: 1}));
jsTest.log("Testing new connections with first primary down...");
-mongosConnNew = authDBUsers( new Mongo( mongos.host ) );
+mongosConnNew = authDBUsers(new Mongo(mongos.host));
mongosConnNew.setSlaveOk();
-assert.neq(null, mongosConnNew.getCollection( collSharded.toString() ).findOne({ _id : -1 }) );
-mongosConnNew = authDBUsers( new Mongo( mongos.host ) );
+assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: -1}));
+mongosConnNew = authDBUsers(new Mongo(mongos.host));
mongosConnNew.setSlaveOk();
-assert.neq(null, mongosConnNew.getCollection( collSharded.toString() ).findOne({ _id : 1 }) );
-mongosConnNew = authDBUsers( new Mongo( mongos.host ) );
+assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: 1}));
+mongosConnNew = authDBUsers(new Mongo(mongos.host));
mongosConnNew.setSlaveOk();
-assert.neq(null, mongosConnNew.getCollection( collUnsharded.toString() ).findOne({ _id : 1 }) );
+assert.neq(null, mongosConnNew.getCollection(collUnsharded.toString()).findOne({_id: 1}));
-mongosConnNew = authDBUsers( new Mongo( mongos.host ) );
-assert.writeError(mongosConnNew.getCollection( collSharded.toString() ).insert({ _id : -10 }));
-mongosConnNew = authDBUsers( new Mongo( mongos.host ) );
-assert.writeError(mongosConnNew.getCollection( collSharded.toString() ).insert({ _id : 10 }));
-mongosConnNew = authDBUsers( new Mongo( mongos.host ) );
-assert.writeError(mongosConnNew.getCollection( collUnsharded.toString() ).insert({ _id : 10 }));
+mongosConnNew = authDBUsers(new Mongo(mongos.host));
+assert.writeError(mongosConnNew.getCollection(collSharded.toString()).insert({_id: -10}));
+mongosConnNew = authDBUsers(new Mongo(mongos.host));
+assert.writeError(mongosConnNew.getCollection(collSharded.toString()).insert({_id: 10}));
+mongosConnNew = authDBUsers(new Mongo(mongos.host));
+assert.writeError(mongosConnNew.getCollection(collUnsharded.toString()).insert({_id: 10}));
-gc(); // Clean up new connections
+gc(); // Clean up new connections
jsTest.log("Stopping second shard...");
mongosConnActive.setSlaveOk();
-mongosConnIdle = authDBUsers( new Mongo( mongos.host ) );
+mongosConnIdle = authDBUsers(new Mongo(mongos.host));
mongosConnIdle.setSlaveOk();
st.rs1.stop(rs1Secondary);
jsTest.log("Testing active connection with second shard down...");
-assert.neq(null, mongosConnActive.getCollection( collSharded.toString() ).findOne({ _id : -1 }));
-assert.neq(null, mongosConnActive.getCollection( collUnsharded.toString() ).findOne({ _id : 1 }));
+assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: -1}));
+assert.neq(null, mongosConnActive.getCollection(collUnsharded.toString()).findOne({_id: 1}));
-assert.writeError(mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : -11 }));
-assert.writeError(mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : 11 }));
-assert.writeError(mongosConnActive.getCollection( collUnsharded.toString() ).insert({ _id : 11 }));
+assert.writeError(mongosConnActive.getCollection(collSharded.toString()).insert({_id: -11}));
+assert.writeError(mongosConnActive.getCollection(collSharded.toString()).insert({_id: 11}));
+assert.writeError(mongosConnActive.getCollection(collUnsharded.toString()).insert({_id: 11}));
jsTest.log("Testing idle connection with second shard down...");
-assert.writeError(mongosConnIdle.getCollection( collSharded.toString() ).insert({ _id : -12 }));
-assert.writeError(mongosConnIdle.getCollection( collSharded.toString() ).insert({ _id : 12 }));
-assert.writeError(mongosConnIdle.getCollection( collUnsharded.toString() ).insert({ _id : 12 }));
+assert.writeError(mongosConnIdle.getCollection(collSharded.toString()).insert({_id: -12}));
+assert.writeError(mongosConnIdle.getCollection(collSharded.toString()).insert({_id: 12}));
+assert.writeError(mongosConnIdle.getCollection(collUnsharded.toString()).insert({_id: 12}));
-assert.neq(null, mongosConnIdle.getCollection( collSharded.toString() ).findOne({ _id : -1 }) );
-assert.neq(null, mongosConnIdle.getCollection( collUnsharded.toString() ).findOne({ _id : 1 }) );
+assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: -1}));
+assert.neq(null, mongosConnIdle.getCollection(collUnsharded.toString()).findOne({_id: 1}));
jsTest.log("Testing new connections with second shard down...");
-mongosConnNew = authDBUsers( new Mongo( mongos.host ) );
+mongosConnNew = authDBUsers(new Mongo(mongos.host));
mongosConnNew.setSlaveOk();
-assert.neq(null, mongosConnNew.getCollection( collSharded.toString() ).findOne({ _id : -1 }) );
-mongosConnNew = authDBUsers( new Mongo( mongos.host ) );
+assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: -1}));
+mongosConnNew = authDBUsers(new Mongo(mongos.host));
mongosConnNew.setSlaveOk();
-assert.neq(null, mongosConnNew.getCollection( collUnsharded.toString() ).findOne({ _id : 1 }) );
+assert.neq(null, mongosConnNew.getCollection(collUnsharded.toString()).findOne({_id: 1}));
-mongosConnNew = authDBUsers( new Mongo( mongos.host ) );
-assert.writeError(mongosConnNew.getCollection( collSharded.toString() ).insert({ _id : -13 }));
-mongosConnNew = authDBUsers( new Mongo( mongos.host ) );
-assert.writeError(mongosConnNew.getCollection( collSharded.toString() ).insert({ _id : 13 }));
-mongosConnNew = authDBUsers( new Mongo( mongos.host ) );
-assert.writeError(mongosConnNew.getCollection( collUnsharded.toString() ).insert({ _id : 13 }));
+mongosConnNew = authDBUsers(new Mongo(mongos.host));
+assert.writeError(mongosConnNew.getCollection(collSharded.toString()).insert({_id: -13}));
+mongosConnNew = authDBUsers(new Mongo(mongos.host));
+assert.writeError(mongosConnNew.getCollection(collSharded.toString()).insert({_id: 13}));
+mongosConnNew = authDBUsers(new Mongo(mongos.host));
+assert.writeError(mongosConnNew.getCollection(collUnsharded.toString()).insert({_id: 13}));
-gc(); // Clean up new connections
+gc(); // Clean up new connections
jsTest.log("DONE!");
st.stop();
-
-
-
-
-
diff --git a/jstests/sharding/mongos_rs_shard_failure_tolerance.js b/jstests/sharding/mongos_rs_shard_failure_tolerance.js
index b5117439925..e24566605ce 100644
--- a/jstests/sharding/mongos_rs_shard_failure_tolerance.js
+++ b/jstests/sharding/mongos_rs_shard_failure_tolerance.js
@@ -11,406 +11,406 @@
// (connection connected after shard change).
//
(function() {
-'use strict';
+ 'use strict';
-var st = new ShardingTest({ shards: 3, mongos: 1, other: { rs: true, rsOptions: { nodes: 2 } } });
+ var st = new ShardingTest({shards: 3, mongos: 1, other: {rs: true, rsOptions: {nodes: 2}}});
-var mongos = st.s0;
-var admin = mongos.getDB( "admin" );
-var shards = mongos.getDB( "config" ).shards.find().toArray();
+ var mongos = st.s0;
+ var admin = mongos.getDB("admin");
+ var shards = mongos.getDB("config").shards.find().toArray();
-assert.commandWorked( admin.runCommand({ setParameter : 1, traceExceptions : true }) );
+ assert.commandWorked(admin.runCommand({setParameter: 1, traceExceptions: true}));
-var collSharded = mongos.getCollection( "fooSharded.barSharded" );
-var collUnsharded = mongos.getCollection( "fooUnsharded.barUnsharded" );
+ var collSharded = mongos.getCollection("fooSharded.barSharded");
+ var collUnsharded = mongos.getCollection("fooUnsharded.barUnsharded");
-// Create the unsharded database
-assert.writeOK(collUnsharded.insert({ some : "doc" }));
-assert.writeOK(collUnsharded.remove({}));
-printjson( admin.runCommand({ movePrimary : collUnsharded.getDB().toString(),
- to : shards[0]._id }) );
+ // Create the unsharded database
+ assert.writeOK(collUnsharded.insert({some: "doc"}));
+ assert.writeOK(collUnsharded.remove({}));
+ printjson(admin.runCommand({movePrimary: collUnsharded.getDB().toString(), to: shards[0]._id}));
-// Create the sharded database
-assert.commandWorked( admin.runCommand({ enableSharding : collSharded.getDB().toString() }) );
-printjson( admin.runCommand({ movePrimary : collSharded.getDB().toString(), to : shards[0]._id }) );
-assert.commandWorked( admin.runCommand({ shardCollection : collSharded.toString(),
- key : { _id : 1 } }) );
-assert.commandWorked( admin.runCommand({ split : collSharded.toString(), middle : { _id : 0 } }) );
-assert.commandWorked( admin.runCommand({ moveChunk : collSharded.toString(),
- find : { _id : 0 },
- to : shards[1]._id }) );
+ // Create the sharded database
+ assert.commandWorked(admin.runCommand({enableSharding: collSharded.getDB().toString()}));
+ printjson(admin.runCommand({movePrimary: collSharded.getDB().toString(), to: shards[0]._id}));
+ assert.commandWorked(
+ admin.runCommand({shardCollection: collSharded.toString(), key: {_id: 1}}));
+ assert.commandWorked(admin.runCommand({split: collSharded.toString(), middle: {_id: 0}}));
+ assert.commandWorked(
+ admin.runCommand({moveChunk: collSharded.toString(), find: {_id: 0}, to: shards[1]._id}));
-st.printShardingStatus();
+ st.printShardingStatus();
-//
-// Setup is complete
-//
+ //
+ // Setup is complete
+ //
-jsTest.log("Inserting initial data...");
+ jsTest.log("Inserting initial data...");
-var mongosConnActive = new Mongo( mongos.host );
-var mongosConnIdle = null;
-var mongosConnNew = null;
+ var mongosConnActive = new Mongo(mongos.host);
+ var mongosConnIdle = null;
+ var mongosConnNew = null;
-var wc = {writeConcern: {w: 2, wtimeout: 60000}};
+ var wc = {
+ writeConcern: {w: 2, wtimeout: 60000}
+ };
-assert.writeOK(mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : -1 }, wc));
-assert.writeOK(mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : 1 }, wc));
-assert.writeOK(mongosConnActive.getCollection( collUnsharded.toString() ).insert({ _id : 1 }, wc));
+ assert.writeOK(mongosConnActive.getCollection(collSharded.toString()).insert({_id: -1}, wc));
+ assert.writeOK(mongosConnActive.getCollection(collSharded.toString()).insert({_id: 1}, wc));
+ assert.writeOK(mongosConnActive.getCollection(collUnsharded.toString()).insert({_id: 1}, wc));
-jsTest.log("Stopping primary of third shard...");
+ jsTest.log("Stopping primary of third shard...");
-mongosConnIdle = new Mongo( mongos.host );
+ mongosConnIdle = new Mongo(mongos.host);
-st.rs2.stop(st.rs2.getPrimary());
+ st.rs2.stop(st.rs2.getPrimary());
-jsTest.log("Testing active connection with third primary down...");
+ jsTest.log("Testing active connection with third primary down...");
-assert.neq(null, mongosConnActive.getCollection( collSharded.toString() ).findOne({ _id : -1 }));
-assert.neq(null, mongosConnActive.getCollection( collSharded.toString() ).findOne({ _id : 1 }));
-assert.neq(null, mongosConnActive.getCollection( collUnsharded.toString() ).findOne({ _id : 1 }));
+ assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: -1}));
+ assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: 1}));
+ assert.neq(null, mongosConnActive.getCollection(collUnsharded.toString()).findOne({_id: 1}));
-assert.writeOK(mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : -2 }, wc));
-assert.writeOK(mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : 2 }, wc));
-assert.writeOK(mongosConnActive.getCollection( collUnsharded.toString() ).insert({ _id : 2 }, wc));
+ assert.writeOK(mongosConnActive.getCollection(collSharded.toString()).insert({_id: -2}, wc));
+ assert.writeOK(mongosConnActive.getCollection(collSharded.toString()).insert({_id: 2}, wc));
+ assert.writeOK(mongosConnActive.getCollection(collUnsharded.toString()).insert({_id: 2}, wc));
-jsTest.log("Testing idle connection with third primary down...");
+ jsTest.log("Testing idle connection with third primary down...");
-assert.writeOK(mongosConnIdle.getCollection( collSharded.toString() ).insert({ _id : -3 }, wc));
-assert.writeOK(mongosConnIdle.getCollection( collSharded.toString() ).insert({ _id : 3 }, wc));
-assert.writeOK(mongosConnIdle.getCollection( collUnsharded.toString() ).insert({ _id : 3 }, wc));
+ assert.writeOK(mongosConnIdle.getCollection(collSharded.toString()).insert({_id: -3}, wc));
+ assert.writeOK(mongosConnIdle.getCollection(collSharded.toString()).insert({_id: 3}, wc));
+ assert.writeOK(mongosConnIdle.getCollection(collUnsharded.toString()).insert({_id: 3}, wc));
-assert.neq(null, mongosConnIdle.getCollection( collSharded.toString() ).findOne({ _id : -1 }) );
-assert.neq(null, mongosConnIdle.getCollection( collSharded.toString() ).findOne({ _id : 1 }) );
-assert.neq(null, mongosConnIdle.getCollection( collUnsharded.toString() ).findOne({ _id : 1 }) );
+ assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: -1}));
+ assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: 1}));
+ assert.neq(null, mongosConnIdle.getCollection(collUnsharded.toString()).findOne({_id: 1}));
-jsTest.log("Testing new connections with third primary down...");
+ jsTest.log("Testing new connections with third primary down...");
-mongosConnNew = new Mongo( mongos.host );
-assert.neq(null, mongosConnNew.getCollection( collSharded.toString() ).findOne({ _id : -1 }) );
-mongosConnNew = new Mongo( mongos.host );
-assert.neq(null, mongosConnNew.getCollection( collSharded.toString() ).findOne({ _id : 1 }) );
-mongosConnNew = new Mongo( mongos.host );
-assert.neq(null, mongosConnNew.getCollection( collUnsharded.toString() ).findOne({ _id : 1 }) );
-
-mongosConnNew = new Mongo( mongos.host );
-assert.writeOK(mongosConnNew.getCollection( collSharded.toString() ).insert({ _id : -4 }, wc));
-mongosConnNew = new Mongo( mongos.host );
-assert.writeOK(mongosConnNew.getCollection( collSharded.toString() ).insert({ _id : 4 }, wc));
-mongosConnNew = new Mongo( mongos.host );
-assert.writeOK(mongosConnNew.getCollection( collUnsharded.toString() ).insert({ _id : 4 }, wc));
-
-gc(); // Clean up new connections
-
-jsTest.log("Stopping primary of second shard...");
-
-mongosConnIdle = new Mongo( mongos.host );
-
-// Need to save this node for later
-var rs1Secondary = st.rs1.getSecondary();
-
-st.rs1.stop(st.rs1.getPrimary());
-
-jsTest.log("Testing active connection with second primary down...");
-
-// Reads with read prefs
-mongosConnActive.setSlaveOk();
-assert.neq(null, mongosConnActive.getCollection( collSharded.toString() ).findOne({ _id : -1 }));
-assert.neq(null, mongosConnActive.getCollection( collSharded.toString() ).findOne({ _id : 1 }));
-assert.neq(null, mongosConnActive.getCollection( collUnsharded.toString() ).findOne({ _id : 1 }));
-mongosConnActive.setSlaveOk(false);
-
-mongosConnActive.setReadPref("primary");
-assert.neq(null, mongosConnActive.getCollection( collSharded.toString() ).findOne({ _id : -1 }));
-assert.throws(function() {
- mongosConnActive.getCollection( collSharded.toString() ).findOne({ _id : 1 });
-});
-assert.neq(null, mongosConnActive.getCollection( collUnsharded.toString() ).findOne({ _id : 1 }));
-
-//Ensure read prefs override slaveOK
-mongosConnActive.setSlaveOk();
-mongosConnActive.setReadPref("primary");
-assert.neq(null, mongosConnActive.getCollection( collSharded.toString() ).findOne({ _id : -1 }));
-assert.throws(function() {
- mongosConnActive.getCollection( collSharded.toString() ).findOne({ _id : 1 });
-});
-assert.neq(null, mongosConnActive.getCollection( collUnsharded.toString() ).findOne({ _id : 1 }));
-mongosConnActive.setSlaveOk(false);
-
-mongosConnActive.setReadPref("secondary");
-assert.neq(null, mongosConnActive.getCollection( collSharded.toString() ).findOne({ _id : -1 }));
-assert.neq(null, mongosConnActive.getCollection( collSharded.toString() ).findOne({ _id : 1 }));
-assert.neq(null, mongosConnActive.getCollection( collUnsharded.toString() ).findOne({ _id : 1 }));
-
-mongosConnActive.setReadPref("primaryPreferred");
-assert.neq(null, mongosConnActive.getCollection( collSharded.toString() ).findOne({ _id : -1 }));
-assert.neq(null, mongosConnActive.getCollection( collSharded.toString() ).findOne({ _id : 1 }));
-assert.neq(null, mongosConnActive.getCollection( collUnsharded.toString() ).findOne({ _id : 1 }));
-
-mongosConnActive.setReadPref("secondaryPreferred");
-assert.neq(null, mongosConnActive.getCollection( collSharded.toString() ).findOne({ _id : -1 }));
-assert.neq(null, mongosConnActive.getCollection( collSharded.toString() ).findOne({ _id : 1 }));
-assert.neq(null, mongosConnActive.getCollection( collUnsharded.toString() ).findOne({ _id : 1 }));
-
-mongosConnActive.setReadPref("nearest");
-assert.neq(null, mongosConnActive.getCollection( collSharded.toString() ).findOne({ _id : -1 }));
-assert.neq(null, mongosConnActive.getCollection( collSharded.toString() ).findOne({ _id : 1 }));
-assert.neq(null, mongosConnActive.getCollection( collUnsharded.toString() ).findOne({ _id : 1 }));
-
-// Writes
-assert.writeOK(mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : -5 }, wc));
-assert.writeError(mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : 5 }, wc));
-assert.writeOK(mongosConnActive.getCollection( collUnsharded.toString() ).insert({ _id : 5 }, wc));
-
-jsTest.log("Testing idle connection with second primary down...");
-
-// Writes
-assert.writeOK(mongosConnIdle.getCollection( collSharded.toString() ).insert({ _id : -6 }, wc));
-assert.writeError(mongosConnIdle.getCollection( collSharded.toString() ).insert({ _id : 6 }, wc));
-assert.writeOK(mongosConnIdle.getCollection( collUnsharded.toString() ).insert({ _id : 6 }, wc));
-
-// Reads with read prefs
-mongosConnIdle.setSlaveOk();
-assert.neq(null, mongosConnIdle.getCollection( collSharded.toString() ).findOne({ _id : -1 }) );
-assert.neq(null, mongosConnIdle.getCollection( collSharded.toString() ).findOne({ _id : 1 }) );
-assert.neq(null, mongosConnIdle.getCollection( collUnsharded.toString() ).findOne({ _id : 1 }) );
-mongosConnIdle.setSlaveOk(false);
-
-mongosConnIdle.setReadPref("primary");
-assert.neq(null, mongosConnIdle.getCollection( collSharded.toString() ).findOne({ _id : -1 }));
-assert.throws(function() {
- mongosConnIdle.getCollection( collSharded.toString() ).findOne({ _id : 1 });
-});
-assert.neq(null, mongosConnIdle.getCollection( collUnsharded.toString() ).findOne({ _id : 1 }));
-
-// Ensure read prefs override slaveOK
-mongosConnIdle.setSlaveOk();
-mongosConnIdle.setReadPref("primary");
-assert.neq(null, mongosConnIdle.getCollection( collSharded.toString() ).findOne({ _id : -1 }));
-assert.throws(function() {
- mongosConnIdle.getCollection( collSharded.toString() ).findOne({ _id : 1 });
-});
-assert.neq(null, mongosConnIdle.getCollection( collUnsharded.toString() ).findOne({ _id : 1 }));
-mongosConnIdle.setSlaveOk(false);
-
-mongosConnIdle.setReadPref("secondary");
-assert.neq(null, mongosConnIdle.getCollection( collSharded.toString() ).findOne({ _id : -1 }));
-assert.neq(null, mongosConnIdle.getCollection( collSharded.toString() ).findOne({ _id : 1 }));
-assert.neq(null, mongosConnIdle.getCollection( collUnsharded.toString() ).findOne({ _id : 1 }));
-
-mongosConnIdle.setReadPref("primaryPreferred");
-assert.neq(null, mongosConnIdle.getCollection( collSharded.toString() ).findOne({ _id : -1 }));
-assert.neq(null, mongosConnIdle.getCollection( collSharded.toString() ).findOne({ _id : 1 }));
-assert.neq(null, mongosConnIdle.getCollection( collUnsharded.toString() ).findOne({ _id : 1 }));
-
-mongosConnIdle.setReadPref("secondaryPreferred");
-assert.neq(null, mongosConnIdle.getCollection( collSharded.toString() ).findOne({ _id : -1 }));
-assert.neq(null, mongosConnIdle.getCollection( collSharded.toString() ).findOne({ _id : 1 }));
-assert.neq(null, mongosConnIdle.getCollection( collUnsharded.toString() ).findOne({ _id : 1 }));
-
-mongosConnIdle.setReadPref("nearest");
-assert.neq(null, mongosConnIdle.getCollection( collSharded.toString() ).findOne({ _id : -1 }));
-assert.neq(null, mongosConnIdle.getCollection( collSharded.toString() ).findOne({ _id : 1 }));
-assert.neq(null, mongosConnIdle.getCollection( collUnsharded.toString() ).findOne({ _id : 1 }));
-
-jsTest.log("Testing new connections with second primary down...");
-
-// Reads with read prefs
-mongosConnNew = new Mongo( mongos.host );
-mongosConnNew.setSlaveOk();
-assert.neq(null, mongosConnNew.getCollection( collSharded.toString() ).findOne({ _id : -1 }) );
-mongosConnNew = new Mongo( mongos.host );
-mongosConnNew.setSlaveOk();
-assert.neq(null, mongosConnNew.getCollection( collSharded.toString() ).findOne({ _id : 1 }) );
-mongosConnNew = new Mongo( mongos.host );
-mongosConnNew.setSlaveOk();
-assert.neq(null, mongosConnNew.getCollection( collUnsharded.toString() ).findOne({ _id : 1 }) );
-
-gc(); // Clean up new connections incrementally to compensate for slow win32 machine.
-
-mongosConnNew = new Mongo( mongos.host );
-mongosConnNew.setReadPref("primary");
-assert.neq(null, mongosConnNew.getCollection( collSharded.toString() ).findOne({ _id : -1 }));
-mongosConnNew = new Mongo( mongos.host );
-mongosConnNew.setReadPref("primary");
-assert.throws(function() {
- mongosConnNew.getCollection( collSharded.toString() ).findOne({ _id : 1 });
-});
-mongosConnNew = new Mongo( mongos.host );
-mongosConnNew.setReadPref("primary");
-assert.neq(null, mongosConnNew.getCollection( collUnsharded.toString() ).findOne({ _id : 1 }));
-
-gc(); // Clean up new connections incrementally to compensate for slow win32 machine.
-
-// Ensure read prefs override slaveok
-mongosConnNew = new Mongo( mongos.host );
-mongosConnNew.setSlaveOk();
-mongosConnNew.setReadPref("primary");
-assert.neq(null, mongosConnNew.getCollection( collSharded.toString() ).findOne({ _id : -1 }));
-mongosConnNew = new Mongo( mongos.host );
-mongosConnNew.setSlaveOk();
-mongosConnNew.setReadPref("primary");
-assert.throws(function() {
- mongosConnNew.getCollection( collSharded.toString() ).findOne({ _id : 1 });
-});
-mongosConnNew = new Mongo( mongos.host );
-mongosConnNew.setSlaveOk();
-mongosConnNew.setReadPref("primary");
-assert.neq(null, mongosConnNew.getCollection( collUnsharded.toString() ).findOne({ _id : 1 }));
-
-gc(); // Clean up new connections incrementally to compensate for slow win32 machine.
-
-mongosConnNew = new Mongo( mongos.host );
-mongosConnNew.setReadPref("secondary");
-assert.neq(null, mongosConnNew.getCollection( collSharded.toString() ).findOne({ _id : -1 }));
-mongosConnNew = new Mongo( mongos.host );
-mongosConnNew.setReadPref("secondary");
-assert.neq(null, mongosConnNew.getCollection( collSharded.toString() ).findOne({ _id : 1 }));
-mongosConnNew = new Mongo( mongos.host );
-mongosConnNew.setReadPref("secondary");
-assert.neq(null, mongosConnNew.getCollection( collUnsharded.toString() ).findOne({ _id : 1 }));
-
-gc(); // Clean up new connections incrementally to compensate for slow win32 machine.
-
-mongosConnNew = new Mongo( mongos.host );
-mongosConnNew.setReadPref("primaryPreferred");
-assert.neq(null, mongosConnNew.getCollection( collSharded.toString() ).findOne({ _id : -1 }));
-mongosConnNew = new Mongo( mongos.host );
-mongosConnNew.setReadPref("primaryPreferred");
-assert.neq(null, mongosConnNew.getCollection( collSharded.toString() ).findOne({ _id : 1 }));
-mongosConnNew = new Mongo( mongos.host );
-mongosConnNew.setReadPref("primaryPreferred");
-assert.neq(null, mongosConnNew.getCollection( collUnsharded.toString() ).findOne({ _id : 1 }));
-
-gc(); // Clean up new connections incrementally to compensate for slow win32 machine.
-
-mongosConnNew = new Mongo( mongos.host );
-mongosConnNew.setReadPref("secondaryPreferred");
-assert.neq(null, mongosConnNew.getCollection( collSharded.toString() ).findOne({ _id : -1 }));
-mongosConnNew = new Mongo( mongos.host );
-mongosConnNew.setReadPref("secondaryPreferred");
-assert.neq(null, mongosConnNew.getCollection( collSharded.toString() ).findOne({ _id : 1 }));
-mongosConnNew = new Mongo( mongos.host );
-mongosConnNew.setReadPref("secondaryPreferred");
-assert.neq(null, mongosConnNew.getCollection( collUnsharded.toString() ).findOne({ _id : 1 }));
-
-gc(); // Clean up new connections incrementally to compensate for slow win32 machine.
-
-mongosConnNew = new Mongo( mongos.host );
-mongosConnNew.setReadPref("nearest");
-assert.neq(null, mongosConnNew.getCollection( collSharded.toString() ).findOne({ _id : -1 }));
-mongosConnNew = new Mongo( mongos.host );
-mongosConnNew.setReadPref("nearest");
-assert.neq(null, mongosConnNew.getCollection( collSharded.toString() ).findOne({ _id : 1 }));
-mongosConnNew = new Mongo( mongos.host );
-mongosConnNew.setReadPref("nearest");
-assert.neq(null, mongosConnNew.getCollection( collUnsharded.toString() ).findOne({ _id : 1 }));
-
-gc(); // Clean up new connections incrementally to compensate for slow win32 machine.
-
-// Writes
-mongosConnNew = new Mongo( mongos.host );
-assert.writeOK(mongosConnNew.getCollection( collSharded.toString() ).insert({ _id : -7 }, wc));
-mongosConnNew = new Mongo( mongos.host );
-assert.writeError(mongosConnNew.getCollection( collSharded.toString() ).insert({ _id : 7 }, wc));
-mongosConnNew = new Mongo( mongos.host );
-assert.writeOK(mongosConnNew.getCollection( collUnsharded.toString() ).insert({ _id : 7 }, wc));
-
-gc(); // Clean up new connections
-
-jsTest.log("Stopping primary of first shard...");
-
-mongosConnIdle = new Mongo( mongos.host );
-
-st.rs0.stop(st.rs0.getPrimary());
-
-jsTest.log("Testing active connection with first primary down...");
-
-mongosConnActive.setSlaveOk();
-assert.neq(null, mongosConnActive.getCollection( collSharded.toString() ).findOne({ _id : -1 }));
-assert.neq(null, mongosConnActive.getCollection( collSharded.toString() ).findOne({ _id : 1 }));
-assert.neq(null, mongosConnActive.getCollection( collUnsharded.toString() ).findOne({ _id : 1 }));
-
-assert.writeError(mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : -8 }));
-assert.writeError(mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : 8 }));
-assert.writeError(mongosConnActive.getCollection( collUnsharded.toString() ).insert({ _id : 8 }));
-
-jsTest.log("Testing idle connection with first primary down...");
-
-assert.writeError(mongosConnIdle.getCollection( collSharded.toString() ).insert({ _id : -9 }));
-assert.writeError(mongosConnIdle.getCollection( collSharded.toString() ).insert({ _id : 9 }));
-assert.writeError(mongosConnIdle.getCollection( collUnsharded.toString() ).insert({ _id : 9 }));
-
-mongosConnIdle.setSlaveOk();
-assert.neq(null, mongosConnIdle.getCollection( collSharded.toString() ).findOne({ _id : -1 }) );
-assert.neq(null, mongosConnIdle.getCollection( collSharded.toString() ).findOne({ _id : 1 }) );
-assert.neq(null, mongosConnIdle.getCollection( collUnsharded.toString() ).findOne({ _id : 1 }) );
-
-jsTest.log("Testing new connections with first primary down...");
-
-mongosConnNew = new Mongo( mongos.host );
-mongosConnNew.setSlaveOk();
-assert.neq(null, mongosConnNew.getCollection( collSharded.toString() ).findOne({ _id : -1 }) );
-mongosConnNew = new Mongo( mongos.host );
-mongosConnNew.setSlaveOk();
-assert.neq(null, mongosConnNew.getCollection( collSharded.toString() ).findOne({ _id : 1 }) );
-mongosConnNew = new Mongo( mongos.host );
-mongosConnNew.setSlaveOk();
-assert.neq(null, mongosConnNew.getCollection( collUnsharded.toString() ).findOne({ _id : 1 }) );
+ mongosConnNew = new Mongo(mongos.host);
+ assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: -1}));
+ mongosConnNew = new Mongo(mongos.host);
+ assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: 1}));
+ mongosConnNew = new Mongo(mongos.host);
+ assert.neq(null, mongosConnNew.getCollection(collUnsharded.toString()).findOne({_id: 1}));
+
+ mongosConnNew = new Mongo(mongos.host);
+ assert.writeOK(mongosConnNew.getCollection(collSharded.toString()).insert({_id: -4}, wc));
+ mongosConnNew = new Mongo(mongos.host);
+ assert.writeOK(mongosConnNew.getCollection(collSharded.toString()).insert({_id: 4}, wc));
+ mongosConnNew = new Mongo(mongos.host);
+ assert.writeOK(mongosConnNew.getCollection(collUnsharded.toString()).insert({_id: 4}, wc));
+
+ gc(); // Clean up new connections
+
+ jsTest.log("Stopping primary of second shard...");
+
+ mongosConnIdle = new Mongo(mongos.host);
+
+ // Need to save this node for later
+ var rs1Secondary = st.rs1.getSecondary();
+
+ st.rs1.stop(st.rs1.getPrimary());
+
+ jsTest.log("Testing active connection with second primary down...");
+
+ // Reads with read prefs
+ mongosConnActive.setSlaveOk();
+ assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: -1}));
+ assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: 1}));
+ assert.neq(null, mongosConnActive.getCollection(collUnsharded.toString()).findOne({_id: 1}));
+ mongosConnActive.setSlaveOk(false);
+
+ mongosConnActive.setReadPref("primary");
+ assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: -1}));
+ assert.throws(function() {
+ mongosConnActive.getCollection(collSharded.toString()).findOne({_id: 1});
+ });
+ assert.neq(null, mongosConnActive.getCollection(collUnsharded.toString()).findOne({_id: 1}));
+
+ // Ensure read prefs override slaveOK
+ mongosConnActive.setSlaveOk();
+ mongosConnActive.setReadPref("primary");
+ assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: -1}));
+ assert.throws(function() {
+ mongosConnActive.getCollection(collSharded.toString()).findOne({_id: 1});
+ });
+ assert.neq(null, mongosConnActive.getCollection(collUnsharded.toString()).findOne({_id: 1}));
+ mongosConnActive.setSlaveOk(false);
+
+ mongosConnActive.setReadPref("secondary");
+ assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: -1}));
+ assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: 1}));
+ assert.neq(null, mongosConnActive.getCollection(collUnsharded.toString()).findOne({_id: 1}));
+
+ mongosConnActive.setReadPref("primaryPreferred");
+ assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: -1}));
+ assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: 1}));
+ assert.neq(null, mongosConnActive.getCollection(collUnsharded.toString()).findOne({_id: 1}));
+
+ mongosConnActive.setReadPref("secondaryPreferred");
+ assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: -1}));
+ assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: 1}));
+ assert.neq(null, mongosConnActive.getCollection(collUnsharded.toString()).findOne({_id: 1}));
+
+ mongosConnActive.setReadPref("nearest");
+ assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: -1}));
+ assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: 1}));
+ assert.neq(null, mongosConnActive.getCollection(collUnsharded.toString()).findOne({_id: 1}));
+
+ // Writes
+ assert.writeOK(mongosConnActive.getCollection(collSharded.toString()).insert({_id: -5}, wc));
+ assert.writeError(mongosConnActive.getCollection(collSharded.toString()).insert({_id: 5}, wc));
+ assert.writeOK(mongosConnActive.getCollection(collUnsharded.toString()).insert({_id: 5}, wc));
+
+ jsTest.log("Testing idle connection with second primary down...");
+
+ // Writes
+ assert.writeOK(mongosConnIdle.getCollection(collSharded.toString()).insert({_id: -6}, wc));
+ assert.writeError(mongosConnIdle.getCollection(collSharded.toString()).insert({_id: 6}, wc));
+ assert.writeOK(mongosConnIdle.getCollection(collUnsharded.toString()).insert({_id: 6}, wc));
+
+ // Reads with read prefs
+ mongosConnIdle.setSlaveOk();
+ assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: -1}));
+ assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: 1}));
+ assert.neq(null, mongosConnIdle.getCollection(collUnsharded.toString()).findOne({_id: 1}));
+ mongosConnIdle.setSlaveOk(false);
+
+ mongosConnIdle.setReadPref("primary");
+ assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: -1}));
+ assert.throws(function() {
+ mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: 1});
+ });
+ assert.neq(null, mongosConnIdle.getCollection(collUnsharded.toString()).findOne({_id: 1}));
+
+ // Ensure read prefs override slaveOK
+ mongosConnIdle.setSlaveOk();
+ mongosConnIdle.setReadPref("primary");
+ assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: -1}));
+ assert.throws(function() {
+ mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: 1});
+ });
+ assert.neq(null, mongosConnIdle.getCollection(collUnsharded.toString()).findOne({_id: 1}));
+ mongosConnIdle.setSlaveOk(false);
+
+ mongosConnIdle.setReadPref("secondary");
+ assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: -1}));
+ assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: 1}));
+ assert.neq(null, mongosConnIdle.getCollection(collUnsharded.toString()).findOne({_id: 1}));
+
+ mongosConnIdle.setReadPref("primaryPreferred");
+ assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: -1}));
+ assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: 1}));
+ assert.neq(null, mongosConnIdle.getCollection(collUnsharded.toString()).findOne({_id: 1}));
+
+ mongosConnIdle.setReadPref("secondaryPreferred");
+ assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: -1}));
+ assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: 1}));
+ assert.neq(null, mongosConnIdle.getCollection(collUnsharded.toString()).findOne({_id: 1}));
+
+ mongosConnIdle.setReadPref("nearest");
+ assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: -1}));
+ assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: 1}));
+ assert.neq(null, mongosConnIdle.getCollection(collUnsharded.toString()).findOne({_id: 1}));
+
+ jsTest.log("Testing new connections with second primary down...");
+
+ // Reads with read prefs
+ mongosConnNew = new Mongo(mongos.host);
+ mongosConnNew.setSlaveOk();
+ assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: -1}));
+ mongosConnNew = new Mongo(mongos.host);
+ mongosConnNew.setSlaveOk();
+ assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: 1}));
+ mongosConnNew = new Mongo(mongos.host);
+ mongosConnNew.setSlaveOk();
+ assert.neq(null, mongosConnNew.getCollection(collUnsharded.toString()).findOne({_id: 1}));
+
+ gc(); // Clean up new connections incrementally to compensate for slow win32 machine.
+
+ mongosConnNew = new Mongo(mongos.host);
+ mongosConnNew.setReadPref("primary");
+ assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: -1}));
+ mongosConnNew = new Mongo(mongos.host);
+ mongosConnNew.setReadPref("primary");
+ assert.throws(function() {
+ mongosConnNew.getCollection(collSharded.toString()).findOne({_id: 1});
+ });
+ mongosConnNew = new Mongo(mongos.host);
+ mongosConnNew.setReadPref("primary");
+ assert.neq(null, mongosConnNew.getCollection(collUnsharded.toString()).findOne({_id: 1}));
+
+ gc(); // Clean up new connections incrementally to compensate for slow win32 machine.
+
+ // Ensure read prefs override slaveok
+ mongosConnNew = new Mongo(mongos.host);
+ mongosConnNew.setSlaveOk();
+ mongosConnNew.setReadPref("primary");
+ assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: -1}));
+ mongosConnNew = new Mongo(mongos.host);
+ mongosConnNew.setSlaveOk();
+ mongosConnNew.setReadPref("primary");
+ assert.throws(function() {
+ mongosConnNew.getCollection(collSharded.toString()).findOne({_id: 1});
+ });
+ mongosConnNew = new Mongo(mongos.host);
+ mongosConnNew.setSlaveOk();
+ mongosConnNew.setReadPref("primary");
+ assert.neq(null, mongosConnNew.getCollection(collUnsharded.toString()).findOne({_id: 1}));
+
+ gc(); // Clean up new connections incrementally to compensate for slow win32 machine.
+
+ mongosConnNew = new Mongo(mongos.host);
+ mongosConnNew.setReadPref("secondary");
+ assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: -1}));
+ mongosConnNew = new Mongo(mongos.host);
+ mongosConnNew.setReadPref("secondary");
+ assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: 1}));
+ mongosConnNew = new Mongo(mongos.host);
+ mongosConnNew.setReadPref("secondary");
+ assert.neq(null, mongosConnNew.getCollection(collUnsharded.toString()).findOne({_id: 1}));
+
+ gc(); // Clean up new connections incrementally to compensate for slow win32 machine.
+
+ mongosConnNew = new Mongo(mongos.host);
+ mongosConnNew.setReadPref("primaryPreferred");
+ assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: -1}));
+ mongosConnNew = new Mongo(mongos.host);
+ mongosConnNew.setReadPref("primaryPreferred");
+ assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: 1}));
+ mongosConnNew = new Mongo(mongos.host);
+ mongosConnNew.setReadPref("primaryPreferred");
+ assert.neq(null, mongosConnNew.getCollection(collUnsharded.toString()).findOne({_id: 1}));
+
+ gc(); // Clean up new connections incrementally to compensate for slow win32 machine.
+
+ mongosConnNew = new Mongo(mongos.host);
+ mongosConnNew.setReadPref("secondaryPreferred");
+ assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: -1}));
+ mongosConnNew = new Mongo(mongos.host);
+ mongosConnNew.setReadPref("secondaryPreferred");
+ assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: 1}));
+ mongosConnNew = new Mongo(mongos.host);
+ mongosConnNew.setReadPref("secondaryPreferred");
+ assert.neq(null, mongosConnNew.getCollection(collUnsharded.toString()).findOne({_id: 1}));
+
+ gc(); // Clean up new connections incrementally to compensate for slow win32 machine.
+
+ mongosConnNew = new Mongo(mongos.host);
+ mongosConnNew.setReadPref("nearest");
+ assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: -1}));
+ mongosConnNew = new Mongo(mongos.host);
+ mongosConnNew.setReadPref("nearest");
+ assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: 1}));
+ mongosConnNew = new Mongo(mongos.host);
+ mongosConnNew.setReadPref("nearest");
+ assert.neq(null, mongosConnNew.getCollection(collUnsharded.toString()).findOne({_id: 1}));
+
+ gc(); // Clean up new connections incrementally to compensate for slow win32 machine.
+
+ // Writes
+ mongosConnNew = new Mongo(mongos.host);
+ assert.writeOK(mongosConnNew.getCollection(collSharded.toString()).insert({_id: -7}, wc));
+ mongosConnNew = new Mongo(mongos.host);
+ assert.writeError(mongosConnNew.getCollection(collSharded.toString()).insert({_id: 7}, wc));
+ mongosConnNew = new Mongo(mongos.host);
+ assert.writeOK(mongosConnNew.getCollection(collUnsharded.toString()).insert({_id: 7}, wc));
+
+ gc(); // Clean up new connections
+
+ jsTest.log("Stopping primary of first shard...");
+
+ mongosConnIdle = new Mongo(mongos.host);
+
+ st.rs0.stop(st.rs0.getPrimary());
+
+ jsTest.log("Testing active connection with first primary down...");
+
+ mongosConnActive.setSlaveOk();
+ assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: -1}));
+ assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: 1}));
+ assert.neq(null, mongosConnActive.getCollection(collUnsharded.toString()).findOne({_id: 1}));
+
+ assert.writeError(mongosConnActive.getCollection(collSharded.toString()).insert({_id: -8}));
+ assert.writeError(mongosConnActive.getCollection(collSharded.toString()).insert({_id: 8}));
+ assert.writeError(mongosConnActive.getCollection(collUnsharded.toString()).insert({_id: 8}));
+
+ jsTest.log("Testing idle connection with first primary down...");
+
+ assert.writeError(mongosConnIdle.getCollection(collSharded.toString()).insert({_id: -9}));
+ assert.writeError(mongosConnIdle.getCollection(collSharded.toString()).insert({_id: 9}));
+ assert.writeError(mongosConnIdle.getCollection(collUnsharded.toString()).insert({_id: 9}));
+
+ mongosConnIdle.setSlaveOk();
+ assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: -1}));
+ assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: 1}));
+ assert.neq(null, mongosConnIdle.getCollection(collUnsharded.toString()).findOne({_id: 1}));
+
+ jsTest.log("Testing new connections with first primary down...");
+
+ mongosConnNew = new Mongo(mongos.host);
+ mongosConnNew.setSlaveOk();
+ assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: -1}));
+ mongosConnNew = new Mongo(mongos.host);
+ mongosConnNew.setSlaveOk();
+ assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: 1}));
+ mongosConnNew = new Mongo(mongos.host);
+ mongosConnNew.setSlaveOk();
+ assert.neq(null, mongosConnNew.getCollection(collUnsharded.toString()).findOne({_id: 1}));
-mongosConnNew = new Mongo( mongos.host );
-assert.writeError(mongosConnNew.getCollection( collSharded.toString() ).insert({ _id : -10 }));
-mongosConnNew = new Mongo( mongos.host );
-assert.writeError(mongosConnNew.getCollection( collSharded.toString() ).insert({ _id : 10 }));
-mongosConnNew = new Mongo( mongos.host );
-assert.writeError(mongosConnNew.getCollection( collUnsharded.toString() ).insert({ _id : 10 }));
-
-gc(); // Clean up new connections
+ mongosConnNew = new Mongo(mongos.host);
+ assert.writeError(mongosConnNew.getCollection(collSharded.toString()).insert({_id: -10}));
+ mongosConnNew = new Mongo(mongos.host);
+ assert.writeError(mongosConnNew.getCollection(collSharded.toString()).insert({_id: 10}));
+ mongosConnNew = new Mongo(mongos.host);
+ assert.writeError(mongosConnNew.getCollection(collUnsharded.toString()).insert({_id: 10}));
+
+ gc(); // Clean up new connections
-jsTest.log("Stopping second shard...");
+ jsTest.log("Stopping second shard...");
-mongosConnIdle = new Mongo( mongos.host );
+ mongosConnIdle = new Mongo(mongos.host);
-st.rs1.stop(rs1Secondary);
-
-jsTest.log("Testing active connection with second shard down...");
+ st.rs1.stop(rs1Secondary);
+
+ jsTest.log("Testing active connection with second shard down...");
-mongosConnActive.setSlaveOk();
-assert.neq(null, mongosConnActive.getCollection( collSharded.toString() ).findOne({ _id : -1 }));
-assert.neq(null, mongosConnActive.getCollection( collUnsharded.toString() ).findOne({ _id : 1 }));
+ mongosConnActive.setSlaveOk();
+ assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: -1}));
+ assert.neq(null, mongosConnActive.getCollection(collUnsharded.toString()).findOne({_id: 1}));
-assert.writeError(mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : -11 }));
-assert.writeError(mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : 11 }));
-assert.writeError(mongosConnActive.getCollection( collUnsharded.toString() ).insert({ _id : 11 }));
+ assert.writeError(mongosConnActive.getCollection(collSharded.toString()).insert({_id: -11}));
+ assert.writeError(mongosConnActive.getCollection(collSharded.toString()).insert({_id: 11}));
+ assert.writeError(mongosConnActive.getCollection(collUnsharded.toString()).insert({_id: 11}));
-jsTest.log("Testing idle connection with second shard down...");
+ jsTest.log("Testing idle connection with second shard down...");
-assert.writeError(mongosConnIdle.getCollection( collSharded.toString() ).insert({ _id : -12 }));
-assert.writeError(mongosConnIdle.getCollection( collSharded.toString() ).insert({ _id : 12 }));
-assert.writeError(mongosConnIdle.getCollection( collUnsharded.toString() ).insert({ _id : 12 }));
+ assert.writeError(mongosConnIdle.getCollection(collSharded.toString()).insert({_id: -12}));
+ assert.writeError(mongosConnIdle.getCollection(collSharded.toString()).insert({_id: 12}));
+ assert.writeError(mongosConnIdle.getCollection(collUnsharded.toString()).insert({_id: 12}));
-mongosConnIdle.setSlaveOk();
-assert.neq(null, mongosConnIdle.getCollection( collSharded.toString() ).findOne({ _id : -1 }) );
-assert.neq(null, mongosConnIdle.getCollection( collUnsharded.toString() ).findOne({ _id : 1 }) );
+ mongosConnIdle.setSlaveOk();
+ assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: -1}));
+ assert.neq(null, mongosConnIdle.getCollection(collUnsharded.toString()).findOne({_id: 1}));
-jsTest.log("Testing new connections with second shard down...");
+ jsTest.log("Testing new connections with second shard down...");
-mongosConnNew = new Mongo( mongos.host );
-mongosConnNew.setSlaveOk();
-assert.neq(null, mongosConnNew.getCollection( collSharded.toString() ).findOne({ _id : -1 }) );
-mongosConnNew = new Mongo( mongos.host );
-mongosConnNew.setSlaveOk();
-assert.neq(null, mongosConnNew.getCollection( collUnsharded.toString() ).findOne({ _id : 1 }) );
+ mongosConnNew = new Mongo(mongos.host);
+ mongosConnNew.setSlaveOk();
+ assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: -1}));
+ mongosConnNew = new Mongo(mongos.host);
+ mongosConnNew.setSlaveOk();
+ assert.neq(null, mongosConnNew.getCollection(collUnsharded.toString()).findOne({_id: 1}));
-mongosConnNew = new Mongo( mongos.host );
-assert.writeError(mongosConnNew.getCollection( collSharded.toString() ).insert({ _id : -13 }));
-mongosConnNew = new Mongo( mongos.host );
-assert.writeError(mongosConnNew.getCollection( collSharded.toString() ).insert({ _id : 13 }));
-mongosConnNew = new Mongo( mongos.host );
-assert.writeError(mongosConnNew.getCollection( collUnsharded.toString() ).insert({ _id : 13 }));
+ mongosConnNew = new Mongo(mongos.host);
+ assert.writeError(mongosConnNew.getCollection(collSharded.toString()).insert({_id: -13}));
+ mongosConnNew = new Mongo(mongos.host);
+ assert.writeError(mongosConnNew.getCollection(collSharded.toString()).insert({_id: 13}));
+ mongosConnNew = new Mongo(mongos.host);
+ assert.writeError(mongosConnNew.getCollection(collUnsharded.toString()).insert({_id: 13}));
-gc(); // Clean up new connections
+ gc(); // Clean up new connections
-st.stop();
+ st.stop();
})();
diff --git a/jstests/sharding/mongos_shard_failure_tolerance.js b/jstests/sharding/mongos_shard_failure_tolerance.js
index 9b26bbd7eb8..73455666635 100644
--- a/jstests/sharding/mongos_shard_failure_tolerance.js
+++ b/jstests/sharding/mongos_shard_failure_tolerance.js
@@ -11,131 +11,130 @@
// (connection connected after shard change).
//
(function() {
-'use strict';
+ 'use strict';
-var st = new ShardingTest({ shards: 3, mongos: 1 });
+ var st = new ShardingTest({shards: 3, mongos: 1});
-var admin = st.s0.getDB("admin");
+ var admin = st.s0.getDB("admin");
-var collSharded = st.s0.getCollection("fooSharded.barSharded");
-var collUnsharded = st.s0.getCollection("fooUnsharded.barUnsharded");
+ var collSharded = st.s0.getCollection("fooSharded.barSharded");
+ var collUnsharded = st.s0.getCollection("fooUnsharded.barUnsharded");
-assert.commandWorked(admin.runCommand({ enableSharding: collSharded.getDB().toString() }));
-st.ensurePrimaryShard(collSharded.getDB().toString(), st.shard0.shardName);
+ assert.commandWorked(admin.runCommand({enableSharding: collSharded.getDB().toString()}));
+ st.ensurePrimaryShard(collSharded.getDB().toString(), st.shard0.shardName);
-assert.commandWorked(admin.runCommand({ shardCollection: collSharded.toString(),
- key: { _id: 1 } }));
-assert.commandWorked(admin.runCommand({ split: collSharded.toString(), middle: { _id: 0 } }));
-assert.commandWorked(admin.runCommand({ moveChunk: collSharded.toString(),
- find: { _id: 0 },
- to: st.shard1.shardName }));
+ assert.commandWorked(
+ admin.runCommand({shardCollection: collSharded.toString(), key: {_id: 1}}));
+ assert.commandWorked(admin.runCommand({split: collSharded.toString(), middle: {_id: 0}}));
+ assert.commandWorked(admin.runCommand(
+ {moveChunk: collSharded.toString(), find: {_id: 0}, to: st.shard1.shardName}));
-// Create the unsharded database
-assert.writeOK(collUnsharded.insert({ some: "doc" }));
-assert.writeOK(collUnsharded.remove({}));
-assert.commandWorked(
- admin.runCommand({ movePrimary: collUnsharded.getDB().toString(), to: st.shard0.shardName }));
+ // Create the unsharded database
+ assert.writeOK(collUnsharded.insert({some: "doc"}));
+ assert.writeOK(collUnsharded.remove({}));
+ assert.commandWorked(admin.runCommand(
+ {movePrimary: collUnsharded.getDB().toString(), to: st.shard0.shardName}));
-//
-// Setup is complete
-//
+ //
+ // Setup is complete
+ //
-jsTest.log("Inserting initial data...");
+ jsTest.log("Inserting initial data...");
-var mongosConnActive = new Mongo(st.s0.host);
-var mongosConnIdle = null;
-var mongosConnNew = null;
+ var mongosConnActive = new Mongo(st.s0.host);
+ var mongosConnIdle = null;
+ var mongosConnNew = null;
-assert.writeOK(mongosConnActive.getCollection(collSharded.toString()).insert({ _id: -1 }));
-assert.writeOK(mongosConnActive.getCollection(collSharded.toString()).insert({ _id: 1 }));
-assert.writeOK(mongosConnActive.getCollection(collUnsharded.toString()).insert({ _id: 1 }));
+ assert.writeOK(mongosConnActive.getCollection(collSharded.toString()).insert({_id: -1}));
+ assert.writeOK(mongosConnActive.getCollection(collSharded.toString()).insert({_id: 1}));
+ assert.writeOK(mongosConnActive.getCollection(collUnsharded.toString()).insert({_id: 1}));
-jsTest.log("Stopping third shard...");
+ jsTest.log("Stopping third shard...");
-mongosConnIdle = new Mongo(st.s0.host);
+ mongosConnIdle = new Mongo(st.s0.host);
-MongoRunner.stopMongod(st.shard2);
+ MongoRunner.stopMongod(st.shard2);
-jsTest.log("Testing active connection...");
+ jsTest.log("Testing active connection...");
-assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({ _id: -1 }));
-assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({ _id: 1 }));
-assert.neq(null, mongosConnActive.getCollection(collUnsharded.toString()).findOne({ _id: 1 }));
+ assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: -1}));
+ assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: 1}));
+ assert.neq(null, mongosConnActive.getCollection(collUnsharded.toString()).findOne({_id: 1}));
-assert.writeOK(mongosConnActive.getCollection(collSharded.toString()).insert({ _id: -2 }));
-assert.writeOK(mongosConnActive.getCollection(collSharded.toString()).insert({ _id: 2 }));
-assert.writeOK(mongosConnActive.getCollection(collUnsharded.toString()).insert({ _id: 2 }));
+ assert.writeOK(mongosConnActive.getCollection(collSharded.toString()).insert({_id: -2}));
+ assert.writeOK(mongosConnActive.getCollection(collSharded.toString()).insert({_id: 2}));
+ assert.writeOK(mongosConnActive.getCollection(collUnsharded.toString()).insert({_id: 2}));
-jsTest.log("Testing idle connection...");
+ jsTest.log("Testing idle connection...");
-assert.writeOK(mongosConnIdle.getCollection(collSharded.toString()).insert({ _id: -3 }));
-assert.writeOK(mongosConnIdle.getCollection(collSharded.toString()).insert({ _id: 3 }));
-assert.writeOK(mongosConnIdle.getCollection(collUnsharded.toString()).insert({ _id: 3 }));
+ assert.writeOK(mongosConnIdle.getCollection(collSharded.toString()).insert({_id: -3}));
+ assert.writeOK(mongosConnIdle.getCollection(collSharded.toString()).insert({_id: 3}));
+ assert.writeOK(mongosConnIdle.getCollection(collUnsharded.toString()).insert({_id: 3}));
-assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({ _id: -1 }));
-assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({ _id: 1 }));
-assert.neq(null, mongosConnIdle.getCollection(collUnsharded.toString()).findOne({ _id: 1 }));
+ assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: -1}));
+ assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: 1}));
+ assert.neq(null, mongosConnIdle.getCollection(collUnsharded.toString()).findOne({_id: 1}));
-jsTest.log("Testing new connections...");
+ jsTest.log("Testing new connections...");
-mongosConnNew = new Mongo(st.s0.host);
-assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({ _id: -1 }));
-mongosConnNew = new Mongo(st.s0.host);
-assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({ _id: 1 }));
-mongosConnNew = new Mongo(st.s0.host);
-assert.neq(null, mongosConnNew.getCollection(collUnsharded.toString()).findOne({ _id: 1 }));
+ mongosConnNew = new Mongo(st.s0.host);
+ assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: -1}));
+ mongosConnNew = new Mongo(st.s0.host);
+ assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: 1}));
+ mongosConnNew = new Mongo(st.s0.host);
+ assert.neq(null, mongosConnNew.getCollection(collUnsharded.toString()).findOne({_id: 1}));
-mongosConnNew = new Mongo(st.s0.host);
-assert.writeOK(mongosConnNew.getCollection(collSharded.toString()).insert({ _id: -4 }));
-mongosConnNew = new Mongo(st.s0.host);
-assert.writeOK(mongosConnNew.getCollection(collSharded.toString()).insert({ _id: 4 }));
-mongosConnNew = new Mongo(st.s0.host);
-assert.writeOK(mongosConnNew.getCollection(collUnsharded.toString()).insert({ _id: 4 }));
+ mongosConnNew = new Mongo(st.s0.host);
+ assert.writeOK(mongosConnNew.getCollection(collSharded.toString()).insert({_id: -4}));
+ mongosConnNew = new Mongo(st.s0.host);
+ assert.writeOK(mongosConnNew.getCollection(collSharded.toString()).insert({_id: 4}));
+ mongosConnNew = new Mongo(st.s0.host);
+ assert.writeOK(mongosConnNew.getCollection(collUnsharded.toString()).insert({_id: 4}));
-gc(); // Clean up new connections
+ gc(); // Clean up new connections
-jsTest.log("Stopping second shard...");
+ jsTest.log("Stopping second shard...");
-mongosConnIdle = new Mongo(st.s0.host);
+ mongosConnIdle = new Mongo(st.s0.host);
-MongoRunner.stopMongod(st.shard1);
+ MongoRunner.stopMongod(st.shard1);
-jsTest.log("Testing active connection...");
+ jsTest.log("Testing active connection...");
-assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({ _id: -1 }));
-assert.neq(null, mongosConnActive.getCollection(collUnsharded.toString()).findOne({ _id: 1 }));
+ assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: -1}));
+ assert.neq(null, mongosConnActive.getCollection(collUnsharded.toString()).findOne({_id: 1}));
-assert.writeOK(mongosConnActive.getCollection(collSharded.toString()).insert({ _id: -5 }));
+ assert.writeOK(mongosConnActive.getCollection(collSharded.toString()).insert({_id: -5}));
-assert.writeError(mongosConnActive.getCollection(collSharded.toString()).insert({ _id: 5 }));
-assert.writeOK(mongosConnActive.getCollection(collUnsharded.toString()).insert({ _id: 5 }));
+ assert.writeError(mongosConnActive.getCollection(collSharded.toString()).insert({_id: 5}));
+ assert.writeOK(mongosConnActive.getCollection(collUnsharded.toString()).insert({_id: 5}));
-jsTest.log("Testing idle connection...");
+ jsTest.log("Testing idle connection...");
-assert.writeOK(mongosConnIdle.getCollection(collSharded.toString()).insert({ _id: -6 }));
-assert.writeError(mongosConnIdle.getCollection(collSharded.toString()).insert({ _id: 6 }));
-assert.writeOK(mongosConnIdle.getCollection(collUnsharded.toString()).insert({ _id: 6 }));
+ assert.writeOK(mongosConnIdle.getCollection(collSharded.toString()).insert({_id: -6}));
+ assert.writeError(mongosConnIdle.getCollection(collSharded.toString()).insert({_id: 6}));
+ assert.writeOK(mongosConnIdle.getCollection(collUnsharded.toString()).insert({_id: 6}));
-assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({ _id: -1 }));
-assert.neq(null, mongosConnIdle.getCollection(collUnsharded.toString()).findOne({ _id: 1 }));
+ assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: -1}));
+ assert.neq(null, mongosConnIdle.getCollection(collUnsharded.toString()).findOne({_id: 1}));
-jsTest.log("Testing new connections...");
+ jsTest.log("Testing new connections...");
-mongosConnNew = new Mongo(st.s0.host);
-assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({ _id: -1 }));
+ mongosConnNew = new Mongo(st.s0.host);
+ assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: -1}));
-mongosConnNew = new Mongo(st.s0.host);
-assert.neq(null, mongosConnNew.getCollection(collUnsharded.toString()).findOne({ _id: 1 }));
+ mongosConnNew = new Mongo(st.s0.host);
+ assert.neq(null, mongosConnNew.getCollection(collUnsharded.toString()).findOne({_id: 1}));
-mongosConnNew = new Mongo(st.s0.host);
-assert.writeOK(mongosConnNew.getCollection(collSharded.toString()).insert({ _id: -7 }));
+ mongosConnNew = new Mongo(st.s0.host);
+ assert.writeOK(mongosConnNew.getCollection(collSharded.toString()).insert({_id: -7}));
-mongosConnNew = new Mongo(st.s0.host);
-assert.writeError(mongosConnNew.getCollection(collSharded.toString()).insert({ _id: 7 }));
+ mongosConnNew = new Mongo(st.s0.host);
+ assert.writeError(mongosConnNew.getCollection(collSharded.toString()).insert({_id: 7}));
-mongosConnNew = new Mongo(st.s0.host);
-assert.writeOK(mongosConnNew.getCollection(collUnsharded.toString()).insert({ _id: 7 }));
+ mongosConnNew = new Mongo(st.s0.host);
+ assert.writeOK(mongosConnNew.getCollection(collUnsharded.toString()).insert({_id: 7}));
-st.stop();
+ st.stop();
})();
diff --git a/jstests/sharding/mongos_validate_backoff.js b/jstests/sharding/mongos_validate_backoff.js
index ef057c04ef2..f78dae0677e 100644
--- a/jstests/sharding/mongos_validate_backoff.js
+++ b/jstests/sharding/mongos_validate_backoff.js
@@ -1,60 +1,60 @@
-// Ensures that single mongos shard-key errors are fast, but slow down when many are triggered
+// Ensures that single mongos shard-key errors are fast, but slow down when many are triggered
(function() {
-'use strict';
+ 'use strict';
-var st = new ShardingTest({ shards : 1, mongos : 1 });
+ var st = new ShardingTest({shards: 1, mongos: 1});
-var mongos = st.s0;
-var admin = mongos.getDB("admin");
-var coll = mongos.getCollection("foo.bar");
+ var mongos = st.s0;
+ var admin = mongos.getDB("admin");
+ var coll = mongos.getCollection("foo.bar");
-assert.commandWorked(admin.runCommand({ enableSharding : coll.getDB() + "" }));
+ assert.commandWorked(admin.runCommand({enableSharding: coll.getDB() + ""}));
-coll.ensureIndex({ shardKey : 1 });
-assert.commandWorked(admin.runCommand({ shardCollection : coll + "", key : { shardKey : 1 } }));
+ coll.ensureIndex({shardKey: 1});
+ assert.commandWorked(admin.runCommand({shardCollection: coll + "", key: {shardKey: 1}}));
-var timeBadInsert = function() {
- var start = new Date().getTime();
+ var timeBadInsert = function() {
+ var start = new Date().getTime();
- // Bad insert, no shard key
- assert.writeError(coll.insert({ hello : "world" }));
+ // Bad insert, no shard key
+ assert.writeError(coll.insert({hello: "world"}));
- var end = new Date().getTime();
+ var end = new Date().getTime();
- return end - start;
-};
+ return end - start;
+ };
-// We need to work at least twice in order to check resetting the counter
-var successNeeded = 2;
-var success = 0;
+ // We need to work at least twice in order to check resetting the counter
+ var successNeeded = 2;
+ var success = 0;
-// Loop over this test a few times, to ensure that the error counters get reset if we don't have
-// bad inserts over a long enough time.
-for (var test = 0; test < 5; test++) {
- var firstWait = timeBadInsert();
- var lastWait = 0;
+ // Loop over this test a few times, to ensure that the error counters get reset if we don't have
+ // bad inserts over a long enough time.
+ for (var test = 0; test < 5; test++) {
+ var firstWait = timeBadInsert();
+ var lastWait = 0;
- for(var i = 0; i < 20; i++) {
- printjson(lastWait = timeBadInsert());
- }
-
- // As a heuristic test, we want to make sure that the error wait after sleeping is much less
- // than the error wait after a lot of errors.
- if (lastWait > firstWait * 2 * 2) {
- success++;
- }
-
- if (success >= successNeeded) {
- break;
- }
+ for (var i = 0; i < 20; i++) {
+ printjson(lastWait = timeBadInsert());
+ }
- // Abort if we've failed too many times
- assert.lt(test, 4);
+ // As a heuristic test, we want to make sure that the error wait after sleeping is much less
+ // than the error wait after a lot of errors.
+ if (lastWait > firstWait * 2 * 2) {
+ success++;
+ }
- // Sleeping for long enough to reset our exponential counter
- sleep(3000);
-}
+ if (success >= successNeeded) {
+ break;
+ }
+
+ // Abort if we've failed too many times
+ assert.lt(test, 4);
+
+ // Sleeping for long enough to reset our exponential counter
+ sleep(3000);
+ }
-st.stop();
+ st.stop();
})();
diff --git a/jstests/sharding/mongos_validate_writes.js b/jstests/sharding/mongos_validate_writes.js
index b0843cd5cdb..34d40b61172 100644
--- a/jstests/sharding/mongos_validate_writes.js
+++ b/jstests/sharding/mongos_validate_writes.js
@@ -4,7 +4,7 @@
// Note that this is *unsafe* with broadcast removes and updates
//
-var st = new ShardingTest({ shards : 2, mongos : 3, other : { shardOptions : { verbose : 2 } } });
+var st = new ShardingTest({shards: 2, mongos: 3, other: {shardOptions: {verbose: 2}}});
st.stopBalancer();
var mongos = st.s0;
@@ -12,22 +12,22 @@ var staleMongosA = st.s1;
var staleMongosB = st.s2;
// Additional logging
-printjson( mongos.getDB( "admin" ).runCommand({ setParameter : 1, logLevel : 2 }) );
-printjson( staleMongosA.getDB( "admin" ).runCommand({ setParameter : 1, logLevel : 2 }) );
-printjson( staleMongosB.getDB( "admin" ).runCommand({ setParameter : 1, logLevel : 2 }) );
-printjson( st._connections[0].getDB( "admin" ).runCommand({ setParameter : 1, logLevel : 2 }) );
-printjson( st._connections[1].getDB( "admin" ).runCommand({ setParameter : 1, logLevel : 2 }) );
-
-var admin = mongos.getDB( "admin" );
-var config = mongos.getDB( "config" );
-var coll = mongos.getCollection( "foo.bar" );
-var staleCollA = staleMongosA.getCollection( coll + "" );
-var staleCollB = staleMongosB.getCollection( coll + "" );
-
-printjson( admin.runCommand({ enableSharding : coll.getDB() + "" }) );
+printjson(mongos.getDB("admin").runCommand({setParameter: 1, logLevel: 2}));
+printjson(staleMongosA.getDB("admin").runCommand({setParameter: 1, logLevel: 2}));
+printjson(staleMongosB.getDB("admin").runCommand({setParameter: 1, logLevel: 2}));
+printjson(st._connections[0].getDB("admin").runCommand({setParameter: 1, logLevel: 2}));
+printjson(st._connections[1].getDB("admin").runCommand({setParameter: 1, logLevel: 2}));
+
+var admin = mongos.getDB("admin");
+var config = mongos.getDB("config");
+var coll = mongos.getCollection("foo.bar");
+var staleCollA = staleMongosA.getCollection(coll + "");
+var staleCollB = staleMongosB.getCollection(coll + "");
+
+printjson(admin.runCommand({enableSharding: coll.getDB() + ""}));
st.ensurePrimaryShard(coll.getDB().getName(), 'shard0001');
-coll.ensureIndex({ a : 1 });
-printjson( admin.runCommand({ shardCollection : coll + "", key : { a : 1 } }) );
+coll.ensureIndex({a: 1});
+printjson(admin.runCommand({shardCollection: coll + "", key: {a: 1}}));
// Let the stale mongos see the collection state
staleCollA.findOne();
@@ -35,57 +35,57 @@ staleCollB.findOne();
// Change the collection sharding state
coll.drop();
-coll.ensureIndex({ b : 1 });
-printjson( admin.runCommand({ shardCollection : coll + "", key : { b : 1 } }) );
+coll.ensureIndex({b: 1});
+printjson(admin.runCommand({shardCollection: coll + "", key: {b: 1}}));
// Make sure that we can successfully insert, even though we have stale state
-assert.writeOK(staleCollA.insert({ b : "b" }));
+assert.writeOK(staleCollA.insert({b: "b"}));
// Make sure we unsuccessfully insert with old info
-assert.writeError(staleCollB.insert({ a : "a" }));
+assert.writeError(staleCollB.insert({a: "a"}));
// Change the collection sharding state
coll.drop();
-coll.ensureIndex({ c : 1 });
-printjson( admin.runCommand({ shardCollection : coll + "", key : { c : 1 } }) );
+coll.ensureIndex({c: 1});
+printjson(admin.runCommand({shardCollection: coll + "", key: {c: 1}}));
// Make sure we can successfully upsert, even though we have stale state
-assert.writeOK(staleCollA.update({ c : "c" }, { c : "c" }, true ));
+assert.writeOK(staleCollA.update({c: "c"}, {c: "c"}, true));
// Make sure we unsuccessfully upsert with old info
-assert.writeError(staleCollB.update({ b : "b" }, { b : "b" }, true ));
+assert.writeError(staleCollB.update({b: "b"}, {b: "b"}, true));
// Change the collection sharding state
coll.drop();
-coll.ensureIndex({ d : 1 });
-printjson( admin.runCommand({ shardCollection : coll + "", key : { d : 1 } }) );
+coll.ensureIndex({d: 1});
+printjson(admin.runCommand({shardCollection: coll + "", key: {d: 1}}));
// Make sure we can successfully update, even though we have stale state
-assert.writeOK(coll.insert({ d : "d" }));
+assert.writeOK(coll.insert({d: "d"}));
-assert.writeOK(staleCollA.update({ d : "d" }, { $set : { x : "x" } }, false, false ));
-assert.eq( staleCollA.findOne().x, "x" );
+assert.writeOK(staleCollA.update({d: "d"}, {$set: {x: "x"}}, false, false));
+assert.eq(staleCollA.findOne().x, "x");
// Make sure we unsuccessfully update with old info
-assert.writeError(staleCollB.update({ c : "c" }, { $set : { x : "y" } }, false, false ));
-assert.eq( staleCollB.findOne().x, "x" );
+assert.writeError(staleCollB.update({c: "c"}, {$set: {x: "y"}}, false, false));
+assert.eq(staleCollB.findOne().x, "x");
// Change the collection sharding state
coll.drop();
-coll.ensureIndex({ e : 1 });
+coll.ensureIndex({e: 1});
// Deletes need to be across two shards to trigger an error - this is probably an exceptional case
-printjson( admin.runCommand({ movePrimary : coll.getDB() + "", to : "shard0000" }) );
-printjson( admin.runCommand({ shardCollection : coll + "", key : { e : 1 } }) );
-printjson( admin.runCommand({ split : coll + "", middle : { e : 0 } }) );
-printjson( admin.runCommand({ moveChunk : coll + "", find : { e : 0 }, to : "shard0001" }) );
+printjson(admin.runCommand({movePrimary: coll.getDB() + "", to: "shard0000"}));
+printjson(admin.runCommand({shardCollection: coll + "", key: {e: 1}}));
+printjson(admin.runCommand({split: coll + "", middle: {e: 0}}));
+printjson(admin.runCommand({moveChunk: coll + "", find: {e: 0}, to: "shard0001"}));
// Make sure we can successfully remove, even though we have stale state
-assert.writeOK(coll.insert({ e : "e" }));
+assert.writeOK(coll.insert({e: "e"}));
-assert.writeOK(staleCollA.remove({ e : "e" }, true));
-assert.eq( null, staleCollA.findOne() );
+assert.writeOK(staleCollA.remove({e: "e"}, true));
+assert.eq(null, staleCollA.findOne());
// Make sure we unsuccessfully remove with old info
-assert.writeError(staleCollB.remove({ d : "d" }, true ));
+assert.writeError(staleCollB.remove({d: "d"}, true));
st.stop();
diff --git a/jstests/sharding/movePrimary1.js b/jstests/sharding/movePrimary1.js
index cd0478b1a1e..25217879e6d 100644
--- a/jstests/sharding/movePrimary1.js
+++ b/jstests/sharding/movePrimary1.js
@@ -1,52 +1,54 @@
(function() {
-var s = new ShardingTest({ name: "movePrimary1", shards: 2 });
-
-initDB = function( name ){
- var db = s.getDB( name );
- var c = db.foo;
- c.save( { a : 1 } );
- c.save( { a : 2 } );
- c.save( { a : 3 } );
- assert.eq( 3 , c.count() );
-
- return s.getPrimaryShard( name );
-};
-
-from = initDB( "test1" );
-to = s.getOther( from );
-
-assert.eq( 3 , from.getDB( "test1" ).foo.count() , "from doesn't have data before move" );
-assert.eq( 0 , to.getDB( "test1" ).foo.count() , "to has data before move" );
-
-assert.eq( s.normalize( s.config.databases.findOne( { _id : "test1" } ).primary ) ,
- s.normalize( from.name ) , "not in db correctly to start" );
-s.printShardingStatus();
-oldShardName = s.config.databases.findOne( {_id: "test1"} ).primary;
-s.admin.runCommand( { moveprimary : "test1" , to : to.name } );
-s.printShardingStatus();
-assert.eq( s.normalize( s.config.databases.findOne( { _id : "test1" } ).primary ),
- s.normalize( to.name ) , "to in config db didn't change after first move" );
-
-assert.eq( 0 , from.getDB( "test1" ).foo.count() , "from still has data after move" );
-assert.eq( 3 , to.getDB( "test1" ).foo.count() , "to doesn't have data after move" );
-
-// move back, now using shard name instead of server address
-s.admin.runCommand( { moveprimary : "test1" , to : oldShardName } );
-s.printShardingStatus();
-assert.eq( s.normalize( s.config.databases.findOne( { _id : "test1" } ).primary ),
- oldShardName , "to in config db didn't change after second move" );
-
-assert.eq( 3 , from.getDB( "test1" ).foo.count() , "from doesn't have data after move back" );
-assert.eq( 0 , to.getDB( "test1" ).foo.count() , "to has data after move back" );
-
-// attempting to move primary DB to non-existent shard should error out with appropriate code
-var res = s.admin.runCommand({ movePrimary: 'test1', to: 'dontexist' });
-assert.commandFailed(res,
- 'attempting to use non-existent shard as primary should error out');
-// ErrorCodes::ShardNotFound === 70
-assert.eq(res.code, 70, 'ShardNotFound code not used');
-
-s.stop();
+ var s = new ShardingTest({name: "movePrimary1", shards: 2});
+
+ initDB = function(name) {
+ var db = s.getDB(name);
+ var c = db.foo;
+ c.save({a: 1});
+ c.save({a: 2});
+ c.save({a: 3});
+ assert.eq(3, c.count());
+
+ return s.getPrimaryShard(name);
+ };
+
+ from = initDB("test1");
+ to = s.getOther(from);
+
+ assert.eq(3, from.getDB("test1").foo.count(), "from doesn't have data before move");
+ assert.eq(0, to.getDB("test1").foo.count(), "to has data before move");
+
+ assert.eq(s.normalize(s.config.databases.findOne({_id: "test1"}).primary),
+ s.normalize(from.name),
+ "not in db correctly to start");
+ s.printShardingStatus();
+ oldShardName = s.config.databases.findOne({_id: "test1"}).primary;
+ s.admin.runCommand({moveprimary: "test1", to: to.name});
+ s.printShardingStatus();
+ assert.eq(s.normalize(s.config.databases.findOne({_id: "test1"}).primary),
+ s.normalize(to.name),
+ "to in config db didn't change after first move");
+
+ assert.eq(0, from.getDB("test1").foo.count(), "from still has data after move");
+ assert.eq(3, to.getDB("test1").foo.count(), "to doesn't have data after move");
+
+ // move back, now using shard name instead of server address
+ s.admin.runCommand({moveprimary: "test1", to: oldShardName});
+ s.printShardingStatus();
+ assert.eq(s.normalize(s.config.databases.findOne({_id: "test1"}).primary),
+ oldShardName,
+ "to in config db didn't change after second move");
+
+ assert.eq(3, from.getDB("test1").foo.count(), "from doesn't have data after move back");
+ assert.eq(0, to.getDB("test1").foo.count(), "to has data after move back");
+
+ // attempting to move primary DB to non-existent shard should error out with appropriate code
+ var res = s.admin.runCommand({movePrimary: 'test1', to: 'dontexist'});
+ assert.commandFailed(res, 'attempting to use non-existent shard as primary should error out');
+ // ErrorCodes::ShardNotFound === 70
+ assert.eq(res.code, 70, 'ShardNotFound code not used');
+
+ s.stop();
})();
diff --git a/jstests/sharding/move_chunk_basic.js b/jstests/sharding/move_chunk_basic.js
index 35703b6baa6..354a222da10 100644
--- a/jstests/sharding/move_chunk_basic.js
+++ b/jstests/sharding/move_chunk_basic.js
@@ -3,94 +3,95 @@
//
(function() {
-'use strict';
+ 'use strict';
-var st = new ShardingTest({mongos:1, shards:2});
+ var st = new ShardingTest({mongos: 1, shards: 2});
-var mongos = st.s0;
+ var mongos = st.s0;
-var kDbName = 'db';
+ var kDbName = 'db';
-var shards = mongos.getCollection('config.shards').find().toArray();
+ var shards = mongos.getCollection('config.shards').find().toArray();
-var shard0 = shards[0]._id;
-var shard1 = shards[1]._id;
+ var shard0 = shards[0]._id;
+ var shard1 = shards[1]._id;
-function testHashed() {
- var ns = kDbName + '.fooHashed';
+ function testHashed() {
+ var ns = kDbName + '.fooHashed';
- // Errors if either bounds is not a valid shard key
- assert.commandWorked(mongos.adminCommand({shardCollection: ns, key: {_id: 'hashed'}}));
+ // Errors if either bounds is not a valid shard key
+ assert.commandWorked(mongos.adminCommand({shardCollection: ns, key: {_id: 'hashed'}}));
- var aChunk = mongos.getDB('config').chunks.findOne({_id: RegExp(ns), shard: shard0});
- assert(aChunk);
- assert.commandFailed(mongos.adminCommand({moveChunk: ns, bounds: [aChunk.min, aChunk.max-1],
- to: shard1}));
+ var aChunk = mongos.getDB('config').chunks.findOne({_id: RegExp(ns), shard: shard0});
+ assert(aChunk);
+ assert.commandFailed(mongos.adminCommand(
+ {moveChunk: ns, bounds: [aChunk.min, aChunk.max - 1], to: shard1}));
- // Fail if find and bounds are both set.
- assert.commandFailed(mongos.adminCommand({moveChunk: ns, find: {_id: 1},
- bounds: [aChunk.min, aChunk.max], to: shard1}));
+ // Fail if find and bounds are both set.
+ assert.commandFailed(mongos.adminCommand(
+ {moveChunk: ns, find: {_id: 1}, bounds: [aChunk.min, aChunk.max], to: shard1}));
- // Using find on collections with hash shard keys should not crash
- assert.commandFailed(mongos.adminCommand({moveChunk: ns, find: {_id: 1}, to: shard1}));
+ // Using find on collections with hash shard keys should not crash
+ assert.commandFailed(mongos.adminCommand({moveChunk: ns, find: {_id: 1}, to: shard1}));
- // Fail if chunk is already at shard
- assert.commandFailed(mongos.adminCommand({moveChunk: ns, bounds: [aChunk.min, aChunk.max],
- to: shard0}));
+ // Fail if chunk is already at shard
+ assert.commandFailed(
+ mongos.adminCommand({moveChunk: ns, bounds: [aChunk.min, aChunk.max], to: shard0}));
- assert.commandWorked(mongos.adminCommand({moveChunk: ns, bounds: [aChunk.min, aChunk.max],
- to: shard1}));
- assert.eq(0, mongos.getDB('config').chunks.count({_id: aChunk._id, shard: shard0}));
- assert.eq(1, mongos.getDB('config').chunks.count({_id: aChunk._id, shard: shard1}));
+ assert.commandWorked(
+ mongos.adminCommand({moveChunk: ns, bounds: [aChunk.min, aChunk.max], to: shard1}));
+ assert.eq(0, mongos.getDB('config').chunks.count({_id: aChunk._id, shard: shard0}));
+ assert.eq(1, mongos.getDB('config').chunks.count({_id: aChunk._id, shard: shard1}));
- mongos.getDB(kDbName).fooHashed.drop();
-}
+ mongos.getDB(kDbName).fooHashed.drop();
+ }
-function testNotHashed(keyDoc) {
- var ns = kDbName + '.foo';
+ function testNotHashed(keyDoc) {
+ var ns = kDbName + '.foo';
- // Fail if find is not a valid shard key.
- assert.commandWorked(mongos.adminCommand({shardCollection: ns, key: keyDoc}));
+ // Fail if find is not a valid shard key.
+ assert.commandWorked(mongos.adminCommand({shardCollection: ns, key: keyDoc}));
- var chunkId = mongos.getDB('config').chunks.findOne({_id: RegExp(ns), shard: shard0})._id;
+ var chunkId = mongos.getDB('config').chunks.findOne({_id: RegExp(ns), shard: shard0})._id;
- assert.commandFailed(mongos.adminCommand({moveChunk: ns, find: {xxx: 1}, to: shard1}));
- assert.eq(shard0, mongos.getDB('config').chunks.findOne({_id: chunkId}).shard);
+ assert.commandFailed(mongos.adminCommand({moveChunk: ns, find: {xxx: 1}, to: shard1}));
+ assert.eq(shard0, mongos.getDB('config').chunks.findOne({_id: chunkId}).shard);
- assert.commandWorked(mongos.adminCommand({moveChunk: ns, find: keyDoc, to: shard1}));
- assert.eq(shard1, mongos.getDB('config').chunks.findOne({_id: chunkId}).shard);
+ assert.commandWorked(mongos.adminCommand({moveChunk: ns, find: keyDoc, to: shard1}));
+ assert.eq(shard1, mongos.getDB('config').chunks.findOne({_id: chunkId}).shard);
- // Fail if to shard does not exists
- assert.commandFailed(mongos.adminCommand({moveChunk: ns, find: keyDoc, to: 'WrongShard'}));
+ // Fail if to shard does not exists
+ assert.commandFailed(mongos.adminCommand({moveChunk: ns, find: keyDoc, to: 'WrongShard'}));
- // Fail if chunk is already at shard
- assert.commandFailed(mongos.adminCommand({moveChunk: ns, find: keyDoc, to: shard1}));
- assert.eq(shard1, mongos.getDB('config').chunks.findOne({_id: chunkId}).shard);
+ // Fail if chunk is already at shard
+ assert.commandFailed(mongos.adminCommand({moveChunk: ns, find: keyDoc, to: shard1}));
+ assert.eq(shard1, mongos.getDB('config').chunks.findOne({_id: chunkId}).shard);
- mongos.getDB(kDbName).foo.drop();
-}
+ mongos.getDB(kDbName).foo.drop();
+ }
-assert.commandWorked(mongos.adminCommand({enableSharding : kDbName}));
+ assert.commandWorked(mongos.adminCommand({enableSharding: kDbName}));
-st.ensurePrimaryShard(kDbName, shard0);
+ st.ensurePrimaryShard(kDbName, shard0);
-// Fail if invalid namespace.
-var res = assert.commandFailed(mongos.adminCommand({moveChunk: '', find: {_id: 1}, to: shard1}));
-assert.eq( res.info);
+ // Fail if invalid namespace.
+ var res =
+ assert.commandFailed(mongos.adminCommand({moveChunk: '', find: {_id: 1}, to: shard1}));
+ assert.eq(res.info);
-// Fail if database does not exist.
-assert.commandFailed(mongos.adminCommand({moveChunk: 'a.b', find: {_id: 1}, to: shard1}));
+ // Fail if database does not exist.
+ assert.commandFailed(mongos.adminCommand({moveChunk: 'a.b', find: {_id: 1}, to: shard1}));
-// Fail if collection is unsharded.
-assert.commandFailed(mongos.adminCommand({moveChunk: kDbName + '.xxx',
- find: {_id: 1}, to: shard1}));
+ // Fail if collection is unsharded.
+ assert.commandFailed(
+ mongos.adminCommand({moveChunk: kDbName + '.xxx', find: {_id: 1}, to: shard1}));
-testHashed();
+ testHashed();
-testNotHashed({a:1});
+ testNotHashed({a: 1});
-testNotHashed({a:1, b:1});
+ testNotHashed({a: 1, b: 1});
-st.stop();
+ st.stop();
})();
diff --git a/jstests/sharding/move_chunk_missing_idx.js b/jstests/sharding/move_chunk_missing_idx.js
index 6171bb539a7..ae3da051e29 100644
--- a/jstests/sharding/move_chunk_missing_idx.js
+++ b/jstests/sharding/move_chunk_missing_idx.js
@@ -3,44 +3,39 @@
* have the index and is not empty.
*/
-var st = new ShardingTest({ shards: 2 });
+var st = new ShardingTest({shards: 2});
var testDB = st.s.getDB('test');
-testDB.adminCommand({ enableSharding: 'test' });
-testDB.adminCommand({ movePrimary: 'test', to: 'shard0001' });
-testDB.adminCommand({ shardCollection: 'test.user', key: { x: 1 }});
+testDB.adminCommand({enableSharding: 'test'});
+testDB.adminCommand({movePrimary: 'test', to: 'shard0001'});
+testDB.adminCommand({shardCollection: 'test.user', key: {x: 1}});
// Test procedure:
// 1. Create index (index should now be in primary shard).
// 2. Split chunk into 3 parts.
// 3. Move 1 chunk to 2nd shard - should have no issues
-testDB.user.ensureIndex({ a: 1, b: 1 });
+testDB.user.ensureIndex({a: 1, b: 1});
-testDB.adminCommand({ split: 'test.user', middle: { x: 0 }});
-testDB.adminCommand({ split: 'test.user', middle: { x: 10 }});
+testDB.adminCommand({split: 'test.user', middle: {x: 0}});
+testDB.adminCommand({split: 'test.user', middle: {x: 10}});
// Collection does not exist, no chunk, index missing case at destination case.
-assert.commandWorked(testDB.adminCommand({ moveChunk: 'test.user',
- find: { x: 0 },
- to: 'shard0000' }));
+assert.commandWorked(testDB.adminCommand({moveChunk: 'test.user', find: {x: 0}, to: 'shard0000'}));
// Drop index since last moveChunk created this.
-st.d0.getDB('test').user.dropIndex({ a: 1, b: 1 });
+st.d0.getDB('test').user.dropIndex({a: 1, b: 1});
// Collection exist but empty, index missing at destination case.
-assert.commandWorked(testDB.adminCommand({ moveChunk: 'test.user',
- find: { x: 10 },
- to: 'shard0000' }));
+assert.commandWorked(testDB.adminCommand({moveChunk: 'test.user', find: {x: 10}, to: 'shard0000'}));
// Drop index since last moveChunk created this.
-st.d0.getDB('test').user.dropIndex({ a: 1, b: 1 });
+st.d0.getDB('test').user.dropIndex({a: 1, b: 1});
// Collection not empty, index missing at destination case.
-testDB.user.insert({ x: 10 });
-assert.commandFailed(testDB.adminCommand({ moveChunk: 'test.user',
- find: { x: -10 },
- to: 'shard0000' }));
+testDB.user.insert({x: 10});
+assert.commandFailed(
+ testDB.adminCommand({moveChunk: 'test.user', find: {x: -10}, to: 'shard0000'}));
st.stop();
diff --git a/jstests/sharding/move_primary_basic.js b/jstests/sharding/move_primary_basic.js
index 5e4a9f06a62..288d4fb03e5 100644
--- a/jstests/sharding/move_primary_basic.js
+++ b/jstests/sharding/move_primary_basic.js
@@ -3,58 +3,60 @@
//
(function() {
-'use strict';
+ 'use strict';
-var st = new ShardingTest({mongos:1, shards:2});
+ var st = new ShardingTest({mongos: 1, shards: 2});
-var mongos = st.s0;
+ var mongos = st.s0;
-var kDbName = 'db';
+ var kDbName = 'db';
-var shards = mongos.getCollection('config.shards').find().toArray();
+ var shards = mongos.getCollection('config.shards').find().toArray();
-var shard0 = shards[0]._id;
-var shard1 = shards[1]._id;
+ var shard0 = shards[0]._id;
+ var shard1 = shards[1]._id;
-assert.commandWorked(mongos.adminCommand({enableSharding : kDbName}));
-st.ensurePrimaryShard(kDbName, shard0);
-assert.eq(shard0, mongos.getDB('config').databases.findOne({_id: kDbName}).primary);
+ assert.commandWorked(mongos.adminCommand({enableSharding: kDbName}));
+ st.ensurePrimaryShard(kDbName, shard0);
+ assert.eq(shard0, mongos.getDB('config').databases.findOne({_id: kDbName}).primary);
-// Can run only on mongos.
-assert.commandFailedWithCode(st.d0.getDB('admin').runCommand({movePrimary : kDbName, to: shard0}),
- ErrorCodes.CommandNotFound);
+ // Can run only on mongos.
+ assert.commandFailedWithCode(
+ st.d0.getDB('admin').runCommand({movePrimary: kDbName, to: shard0}),
+ ErrorCodes.CommandNotFound);
-// Can run only against the admin database.
-assert.commandFailedWithCode(mongos.getDB('test').runCommand({movePrimary : kDbName, to: shard0}),
- ErrorCodes.Unauthorized);
+ // Can run only against the admin database.
+ assert.commandFailedWithCode(
+ mongos.getDB('test').runCommand({movePrimary: kDbName, to: shard0}),
+ ErrorCodes.Unauthorized);
-// Can't movePrimary for 'config' database.
-assert.commandFailed(mongos.adminCommand({movePrimary : 'config', to: shard0}));
+ // Can't movePrimary for 'config' database.
+ assert.commandFailed(mongos.adminCommand({movePrimary: 'config', to: shard0}));
-// Can't movePrimary for 'local' database.
-assert.commandFailed(mongos.adminCommand({movePrimary : 'local', to: shard0}));
+ // Can't movePrimary for 'local' database.
+ assert.commandFailed(mongos.adminCommand({movePrimary: 'local', to: shard0}));
-// Can't movePrimary for 'admin' database.
-assert.commandFailed(mongos.adminCommand({movePrimary : 'admin', to: shard0}));
+ // Can't movePrimary for 'admin' database.
+ assert.commandFailed(mongos.adminCommand({movePrimary: 'admin', to: shard0}));
-// Can't movePrimary for invalid db name.
-assert.commandFailed(mongos.adminCommand({movePrimary : 'a.b', to: shard0}));
-assert.commandFailed(mongos.adminCommand({movePrimary : '', to: shard0}));
+ // Can't movePrimary for invalid db name.
+ assert.commandFailed(mongos.adminCommand({movePrimary: 'a.b', to: shard0}));
+ assert.commandFailed(mongos.adminCommand({movePrimary: '', to: shard0}));
-// Fail if shard does not exist or empty.
-assert.commandFailed(mongos.adminCommand({movePrimary : kDbName, to: 'Unknown'}));
-assert.commandFailed(mongos.adminCommand({movePrimary : kDbName, to: ''}));
-assert.commandFailed(mongos.adminCommand({movePrimary : kDbName}));
+ // Fail if shard does not exist or empty.
+ assert.commandFailed(mongos.adminCommand({movePrimary: kDbName, to: 'Unknown'}));
+ assert.commandFailed(mongos.adminCommand({movePrimary: kDbName, to: ''}));
+ assert.commandFailed(mongos.adminCommand({movePrimary: kDbName}));
-// Fail if moveShard to already primary and verify metadata changes.
-assert.eq(shard0, mongos.getDB('config').databases.findOne({_id: kDbName}).primary);
+ // Fail if moveShard to already primary and verify metadata changes.
+ assert.eq(shard0, mongos.getDB('config').databases.findOne({_id: kDbName}).primary);
-assert.commandWorked(mongos.adminCommand({movePrimary : kDbName, to: shard1}));
-assert.eq(shard1, mongos.getDB('config').databases.findOne({_id: kDbName}).primary);
+ assert.commandWorked(mongos.adminCommand({movePrimary: kDbName, to: shard1}));
+ assert.eq(shard1, mongos.getDB('config').databases.findOne({_id: kDbName}).primary);
-assert.commandFailed(mongos.adminCommand({movePrimary : kDbName, to: shard1}));
-assert.eq(shard1, mongos.getDB('config').databases.findOne({_id: kDbName}).primary);
+ assert.commandFailed(mongos.adminCommand({movePrimary: kDbName, to: shard1}));
+ assert.eq(shard1, mongos.getDB('config').databases.findOne({_id: kDbName}).primary);
-st.stop();
+ st.stop();
})();
diff --git a/jstests/sharding/move_stale_mongos.js b/jstests/sharding/move_stale_mongos.js
index beaa7d34987..a7d1f647f61 100644
--- a/jstests/sharding/move_stale_mongos.js
+++ b/jstests/sharding/move_stale_mongos.js
@@ -16,9 +16,8 @@ var curShardIndex = 0;
for (var i = 0; i < 100; i += 10) {
assert.commandWorked(st.s0.getDB('admin').runCommand({split: testNs, middle: {_id: i}}));
var nextShardIndex = (curShardIndex + 1) % shards.length;
- assert.commandWorked(st.s1.getDB('admin').runCommand({moveChunk: testNs,
- find: {_id: i + 5},
- to: shards[nextShardIndex]}));
+ assert.commandWorked(st.s1.getDB('admin').runCommand(
+ {moveChunk: testNs, find: {_id: i + 5}, to: shards[nextShardIndex]}));
curShardIndex = nextShardIndex;
}
diff --git a/jstests/sharding/movechunk_include.js b/jstests/sharding/movechunk_include.js
index e8821be922b..7c20f0d675f 100644
--- a/jstests/sharding/movechunk_include.js
+++ b/jstests/sharding/movechunk_include.js
@@ -1,41 +1,43 @@
function setupMoveChunkTest(st) {
- //Stop Balancer
+ // Stop Balancer
st.stopBalancer();
- var testdb = st.getDB( "test" );
+ var testdb = st.getDB("test");
var testcoll = testdb.foo;
- st.adminCommand( { enablesharding : "test" } );
+ st.adminCommand({enablesharding: "test"});
st.ensurePrimaryShard('test', 'shard0001');
- st.adminCommand( { shardcollection : "test.foo" , key : { _id : 1 } } );
+ st.adminCommand({shardcollection: "test.foo", key: {_id: 1}});
var str = "";
- while ( str.length < 10000 ) {
+ while (str.length < 10000) {
str += "asdasdsdasdasdasdas";
}
var data = 0;
var num = 0;
- //Insert till you get to 10MB of data
+ // Insert till you get to 10MB of data
var bulk = testcoll.initializeUnorderedBulkOp();
- while ( data < ( 1024 * 1024 * 10 ) ) {
- bulk.insert({ _id: num++, s: str });
+ while (data < (1024 * 1024 * 10)) {
+ bulk.insert({_id: num++, s: str});
data += str.length;
}
assert.writeOK(bulk.execute());
- var stats = st.chunkCounts( "foo" );
+ var stats = st.chunkCounts("foo");
var to = "";
- for ( shard in stats ){
- if ( stats[shard] == 0 ) {
+ for (shard in stats) {
+ if (stats[shard] == 0) {
to = shard;
break;
}
}
- var result = st.adminCommand( { movechunk : "test.foo" ,
- find : { _id : 1 } ,
- to : to ,
- _waitForDelete : true} ); //some tests need this...
- assert(result, "movechunk failed: " + tojson( result ) );
+ var result = st.adminCommand({
+ movechunk: "test.foo",
+ find: {_id: 1},
+ to: to,
+ _waitForDelete: true
+ }); // some tests need this...
+ assert(result, "movechunk failed: " + tojson(result));
}
diff --git a/jstests/sharding/movechunk_with_default_paranoia.js b/jstests/sharding/movechunk_with_default_paranoia.js
index 250816a15b8..a6f4704ec90 100644
--- a/jstests/sharding/movechunk_with_default_paranoia.js
+++ b/jstests/sharding/movechunk_with_default_paranoia.js
@@ -2,14 +2,17 @@
* This test checks that moveParanoia defaults to off (ie the moveChunk directory will not
* be created).
*/
-var st = new ShardingTest( { shards:2, mongos:1 , other : { chunkSize: 1 }});
+var st = new ShardingTest({shards: 2, mongos: 1, other: {chunkSize: 1}});
load("jstests/sharding/movechunk_include.js");
setupMoveChunkTest(st);
var shards = [st.shard0, st.shard1];
-for(i in shards) {
+for (i in shards) {
var dbpath = shards[i].adminCommand("getCmdLineOpts").parsed.storage.dbPath;
- var hasMoveChunkDir = 0 != ls(dbpath).filter(function(a) {return null != a.match("moveChunk");}).length;
+ var hasMoveChunkDir = 0 !=
+ ls(dbpath).filter(function(a) {
+ return null != a.match("moveChunk");
+ }).length;
assert(!hasMoveChunkDir, dbpath + ": has MoveChunk directory + " + ls(dbpath));
}
st.stop();
diff --git a/jstests/sharding/movechunk_with_moveParanoia.js b/jstests/sharding/movechunk_with_moveParanoia.js
index f643e3aae0a..96348d827bf 100644
--- a/jstests/sharding/movechunk_with_moveParanoia.js
+++ b/jstests/sharding/movechunk_with_moveParanoia.js
@@ -1,20 +1,20 @@
/**
* This test sets moveParanoia flag and then check that the directory is created with the moved data
*/
-var st = new ShardingTest( { shards: 2,
- mongos:1,
- other : {
- chunkSize: 1,
- shardOptions: { moveParanoia:"" }}});
+var st = new ShardingTest(
+ {shards: 2, mongos: 1, other: {chunkSize: 1, shardOptions: {moveParanoia: ""}}});
load("jstests/sharding/movechunk_include.js");
setupMoveChunkTest(st);
var shards = [st.shard0, st.shard1];
var foundMoveChunk = false;
-for(i in shards) {
+for (i in shards) {
var dbpath = shards[i].adminCommand("getCmdLineOpts").parsed.storage.dbPath;
- var hasMoveChunkDir = 0 != ls(dbpath).filter(function(a) {return null != a.match("moveChunk");}).length;
+ var hasMoveChunkDir = 0 !=
+ ls(dbpath).filter(function(a) {
+ return null != a.match("moveChunk");
+ }).length;
foundMoveChunk = foundMoveChunk || hasMoveChunkDir;
}
diff --git a/jstests/sharding/movechunk_with_noMoveParanoia.js b/jstests/sharding/movechunk_with_noMoveParanoia.js
index 0e2f6bc2248..ae8ef5899a8 100644
--- a/jstests/sharding/movechunk_with_noMoveParanoia.js
+++ b/jstests/sharding/movechunk_with_noMoveParanoia.js
@@ -1,19 +1,19 @@
/**
* This test sets moveParanoia flag and then check that the directory is created with the moved data
*/
-var st = new ShardingTest( { shards: 2,
- mongos:1,
- other : {
- chunkSize: 1,
- shardOptions: { noMoveParanoia:"" }}});
+var st = new ShardingTest(
+ {shards: 2, mongos: 1, other: {chunkSize: 1, shardOptions: {noMoveParanoia: ""}}});
load("jstests/sharding/movechunk_include.js");
setupMoveChunkTest(st);
var shards = [st.shard0, st.shard1];
-for(i in shards) {
+for (i in shards) {
var dbpath = shards[i].adminCommand("getCmdLineOpts").parsed.storage.dbPath;
- var hasMoveChunkDir = 0 != ls(dbpath).filter(function(a) {return null != a.match("moveChunk");}).length;
+ var hasMoveChunkDir = 0 !=
+ ls(dbpath).filter(function(a) {
+ return null != a.match("moveChunk");
+ }).length;
assert(!hasMoveChunkDir, dbpath + ": has MoveChunk directory + " + ls(dbpath));
}
st.stop();
diff --git a/jstests/sharding/moveprimary_ignore_sharded.js b/jstests/sharding/moveprimary_ignore_sharded.js
index 9b608279c77..f73f50939cc 100644
--- a/jstests/sharding/moveprimary_ignore_sharded.js
+++ b/jstests/sharding/moveprimary_ignore_sharded.js
@@ -1,5 +1,5 @@
// Checks that movePrimary doesn't move collections detected as sharded when it begins moving
-var st = new ShardingTest({ shards : 2, mongos : 2, verbose : 1 });
+var st = new ShardingTest({shards: 2, mongos: 2, verbose: 1});
// Stop balancer, otherwise mongosB may load information about the database non-deterministically
st.stopBalancer();
@@ -7,51 +7,51 @@ st.stopBalancer();
var mongosA = st.s0;
var mongosB = st.s1;
-var adminA = mongosA.getDB( "admin" );
-var adminB = mongosB.getDB( "admin" );
+var adminA = mongosA.getDB("admin");
+var adminB = mongosB.getDB("admin");
-var configA = mongosA.getDB( "config" );
-var configB = mongosB.getDB( "config" );
+var configA = mongosA.getDB("config");
+var configB = mongosB.getDB("config");
// Populate some data
-assert.writeOK(mongosA.getCollection("foo.coll0").insert({ hello : "world" }));
-assert.writeOK(mongosA.getCollection("bar.coll0").insert({ hello : "world" }));
-assert.writeOK(mongosA.getCollection("foo.coll1").insert({ hello : "world" }));
-assert.writeOK(mongosA.getCollection("bar.coll1").insert({ hello : "world" }));
-assert.writeOK(mongosA.getCollection("foo.coll2").insert({ hello : "world" }));
-assert.writeOK(mongosA.getCollection("bar.coll2").insert({ hello : "world" }));
+assert.writeOK(mongosA.getCollection("foo.coll0").insert({hello: "world"}));
+assert.writeOK(mongosA.getCollection("bar.coll0").insert({hello: "world"}));
+assert.writeOK(mongosA.getCollection("foo.coll1").insert({hello: "world"}));
+assert.writeOK(mongosA.getCollection("bar.coll1").insert({hello: "world"}));
+assert.writeOK(mongosA.getCollection("foo.coll2").insert({hello: "world"}));
+assert.writeOK(mongosA.getCollection("bar.coll2").insert({hello: "world"}));
// Enable sharding
-printjson( adminA.runCommand({ enableSharding : "foo" }) );
+printjson(adminA.runCommand({enableSharding: "foo"}));
st.ensurePrimaryShard('foo', 'shard0001');
-printjson( adminA.runCommand({ enableSharding : "bar" }) );
+printjson(adminA.runCommand({enableSharding: "bar"}));
st.ensurePrimaryShard('bar', 'shard0000');
// Setup three collections per-db
// 0 : not sharded
// 1 : sharded
// 2 : sharded but not seen as sharded by mongosB
-printjson( adminA.runCommand({ shardCollection : "foo.coll1", key : { _id : 1 } }) );
-printjson( adminA.runCommand({ shardCollection : "foo.coll2", key : { _id : 1 } }) );
-printjson( adminA.runCommand({ shardCollection : "bar.coll1", key : { _id : 1 } }) );
-printjson( adminA.runCommand({ shardCollection : "bar.coll2", key : { _id : 1 } }) );
+printjson(adminA.runCommand({shardCollection: "foo.coll1", key: {_id: 1}}));
+printjson(adminA.runCommand({shardCollection: "foo.coll2", key: {_id: 1}}));
+printjson(adminA.runCommand({shardCollection: "bar.coll1", key: {_id: 1}}));
+printjson(adminA.runCommand({shardCollection: "bar.coll2", key: {_id: 1}}));
// All collections are now on primary shard
-var fooPrimaryShard = configA.databases.findOne({ _id : "foo" }).primary;
-var barPrimaryShard = configA.databases.findOne({ _id : "bar" }).primary;
+var fooPrimaryShard = configA.databases.findOne({_id: "foo"}).primary;
+var barPrimaryShard = configA.databases.findOne({_id: "bar"}).primary;
var shards = configA.shards.find().toArray();
-var fooPrimaryShard = fooPrimaryShard == shards[0]._id ? shards[0] : shards[1];
-var fooOtherShard = fooPrimaryShard._id == shards[0]._id ? shards[1] : shards[0];
-var barPrimaryShard = barPrimaryShard == shards[0]._id ? shards[0] : shards[1];
-var barOtherShard = barPrimaryShard._id == shards[0]._id ? shards[1] : shards[0];
+var fooPrimaryShard = fooPrimaryShard == shards[0]._id ? shards[0] : shards[1];
+var fooOtherShard = fooPrimaryShard._id == shards[0]._id ? shards[1] : shards[0];
+var barPrimaryShard = barPrimaryShard == shards[0]._id ? shards[0] : shards[1];
+var barOtherShard = barPrimaryShard._id == shards[0]._id ? shards[1] : shards[0];
st.printShardingStatus();
-jsTest.log( "Running movePrimary for foo through mongosA ..." );
+jsTest.log("Running movePrimary for foo through mongosA ...");
// MongosA should already know about all the collection states
-printjson( adminA.runCommand({ movePrimary : "foo", to : fooOtherShard._id }) );
+printjson(adminA.runCommand({movePrimary: "foo", to: fooOtherShard._id}));
if (st.configRS) {
// If we are in CSRS mode need to make sure that mongosB will actually get the most recent
@@ -59,31 +59,30 @@ if (st.configRS) {
st.configRS.awaitLastOpCommitted();
}
-
// All collections still correctly sharded / unsharded
-assert.neq( null, mongosA.getCollection("foo.coll0").findOne() );
-assert.neq( null, mongosA.getCollection("foo.coll1").findOne() );
-assert.neq( null, mongosA.getCollection("foo.coll2").findOne() );
+assert.neq(null, mongosA.getCollection("foo.coll0").findOne());
+assert.neq(null, mongosA.getCollection("foo.coll1").findOne());
+assert.neq(null, mongosA.getCollection("foo.coll2").findOne());
-assert.neq( null, mongosB.getCollection("foo.coll0").findOne() );
-assert.neq( null, mongosB.getCollection("foo.coll1").findOne() );
-assert.neq( null, mongosB.getCollection("foo.coll2").findOne() );
+assert.neq(null, mongosB.getCollection("foo.coll0").findOne());
+assert.neq(null, mongosB.getCollection("foo.coll1").findOne());
+assert.neq(null, mongosB.getCollection("foo.coll2").findOne());
-function realCollectionCount( mydb ) {
+function realCollectionCount(mydb) {
var num = 0;
- mydb.getCollectionNames().forEach( function(z) {
- if ( z.startsWith( "coll" ) )
+ mydb.getCollectionNames().forEach(function(z) {
+ if (z.startsWith("coll"))
num++;
- } );
+ });
return num;
}
// All collections sane
-assert.eq( 2, realCollectionCount( new Mongo( fooPrimaryShard.host ).getDB( "foo" ) ) );
-assert.eq( 1, realCollectionCount( new Mongo( fooOtherShard.host ).getDB( "foo" ) ) );
+assert.eq(2, realCollectionCount(new Mongo(fooPrimaryShard.host).getDB("foo")));
+assert.eq(1, realCollectionCount(new Mongo(fooOtherShard.host).getDB("foo")));
-jsTest.log( "Running movePrimary for bar through mongosB ..." );
-printjson( adminB.runCommand({ movePrimary : "bar", to : barOtherShard._id }) );
+jsTest.log("Running movePrimary for bar through mongosB ...");
+printjson(adminB.runCommand({movePrimary: "bar", to: barOtherShard._id}));
// We need to flush the cluster config on mongosA, so it can discover that database 'bar' got
// moved. Otherwise since the collections are not sharded, we have no way of discovering this.
@@ -93,19 +92,19 @@ if (st.configRS) {
// the most recent config data.
st.configRS.awaitLastOpCommitted();
}
-assert.commandWorked(adminA.runCommand({ flushRouterConfig : 1 }));
+assert.commandWorked(adminA.runCommand({flushRouterConfig: 1}));
// All collections still correctly sharded / unsharded
-assert.neq( null, mongosA.getCollection("bar.coll0").findOne() );
-assert.neq( null, mongosA.getCollection("bar.coll1").findOne() );
-assert.neq( null, mongosA.getCollection("bar.coll2").findOne() );
+assert.neq(null, mongosA.getCollection("bar.coll0").findOne());
+assert.neq(null, mongosA.getCollection("bar.coll1").findOne());
+assert.neq(null, mongosA.getCollection("bar.coll2").findOne());
-assert.neq( null, mongosB.getCollection("bar.coll0").findOne() );
-assert.neq( null, mongosB.getCollection("bar.coll1").findOne() );
-assert.neq( null, mongosB.getCollection("bar.coll2").findOne() );
+assert.neq(null, mongosB.getCollection("bar.coll0").findOne());
+assert.neq(null, mongosB.getCollection("bar.coll1").findOne());
+assert.neq(null, mongosB.getCollection("bar.coll2").findOne());
// All collections sane
-assert.eq( 2, realCollectionCount( new Mongo( barPrimaryShard.host ).getDB( "bar" ) ) );
-assert.eq( 1, realCollectionCount( new Mongo( barOtherShard.host ).getDB( "bar" ) ) );
+assert.eq(2, realCollectionCount(new Mongo(barPrimaryShard.host).getDB("bar")));
+assert.eq(1, realCollectionCount(new Mongo(barOtherShard.host).getDB("bar")));
st.stop();
diff --git a/jstests/sharding/mrShardedOutput.js b/jstests/sharding/mrShardedOutput.js
index a4dac1db3d2..ab3bae28d74 100644
--- a/jstests/sharding/mrShardedOutput.js
+++ b/jstests/sharding/mrShardedOutput.js
@@ -4,17 +4,21 @@
// collection input twice the size of the first and outputs it to the new sharded
// collection created in the first pass.
-var st = new ShardingTest({ shards: 2, other: { chunkSize: 1 }});
+var st = new ShardingTest({shards: 2, other: {chunkSize: 1}});
var config = st.getDB("config");
-st.adminCommand( { enablesharding: "test" } );
-st.getDB("admin").runCommand( { movePrimary: "test", to: "shard0001"});
-st.adminCommand( { shardcollection: "test.foo", key: { "a": 1 } } );
+st.adminCommand({enablesharding: "test"});
+st.getDB("admin").runCommand({movePrimary: "test", to: "shard0001"});
+st.adminCommand({shardcollection: "test.foo", key: {"a": 1}});
-var testDB = st.getDB( "test" );
+var testDB = st.getDB("test");
-function map2() { emit(this.i, { count: 1, y: this.y }); }
-function reduce2(key, values) { return values[0]; }
+function map2() {
+ emit(this.i, {count: 1, y: this.y});
+}
+function reduce2(key, values) {
+ return values[0];
+}
var numDocs = 0;
var numBatch = 5000;
@@ -24,17 +28,17 @@ var str = new Array(1024).join('a');
// M/R is strange in that it chooses the output shards based on currently sharded
// collections in the database. The upshot is that we need a sharded collection on
// both shards in order to ensure M/R will output to two shards.
-st.adminCommand({ split: 'test.foo', middle: { a: numDocs + numBatch / 2 }});
-st.adminCommand({ moveChunk: 'test.foo', find: { a: numDocs }, to: 'shard0000' });
+st.adminCommand({split: 'test.foo', middle: {a: numDocs + numBatch / 2}});
+st.adminCommand({moveChunk: 'test.foo', find: {a: numDocs}, to: 'shard0000'});
// Add some more data for input so that chunks will get split further
for (var splitPoint = 0; splitPoint < numBatch; splitPoint += 400) {
- testDB.adminCommand({ split: 'test.foo', middle: { a: splitPoint }});
+ testDB.adminCommand({split: 'test.foo', middle: {a: splitPoint}});
}
var bulk = testDB.foo.initializeUnorderedBulkOp();
for (var i = 0; i < numBatch; ++i) {
- bulk.insert({ a: numDocs + i, y: str, i: numDocs + i });
+ bulk.insert({a: numDocs + i, y: str, i: numDocs + i});
}
assert.writeOK(bulk.execute());
@@ -46,55 +50,58 @@ var res = testDB.foo.mapReduce(map2, reduce2, {out: {replace: "mrShardedOut", sh
jsTest.log("MapReduce results:" + tojson(res));
var reduceOutputCount = res.counts.output;
-assert.eq(numDocs, reduceOutputCount,
- "MapReduce FAILED: res.counts.output = " + reduceOutputCount +
- ", should be " + numDocs);
+assert.eq(numDocs,
+ reduceOutputCount,
+ "MapReduce FAILED: res.counts.output = " + reduceOutputCount + ", should be " + numDocs);
jsTest.log("Checking that all MapReduce output documents are in output collection");
var outColl = testDB["mrShardedOut"];
var outCollCount = outColl.find().itcount();
-assert.eq(numDocs, outCollCount,
- "MapReduce FAILED: outColl.find().itcount() = " + outCollCount +
- ", should be " + numDocs +
- ": this may happen intermittently until resolution of SERVER-3627");
+assert.eq(numDocs,
+ outCollCount,
+ "MapReduce FAILED: outColl.find().itcount() = " + outCollCount + ", should be " +
+ numDocs + ": this may happen intermittently until resolution of SERVER-3627");
// Make sure it's sharded and split
var newNumChunks = config.chunks.count({ns: testDB.mrShardedOut._fullName});
-assert.gt(newNumChunks, 1,
+assert.gt(newNumChunks,
+ 1,
"Sharding FAILURE: " + testDB.mrShardedOut._fullName + " has only 1 chunk");
// Check that there are no "jumbo" chunks.
var objSize = Object.bsonsize(testDB.mrShardedOut.findOne());
-var docsPerChunk = 1024 * 1024 / objSize * 1.1; // 1MB chunk size + allowance
+var docsPerChunk = 1024 * 1024 / objSize * 1.1; // 1MB chunk size + allowance
st.printShardingStatus(true);
-config.chunks.find({ ns: testDB.mrShardedOut.getFullName() }).forEach(function(chunkDoc) {
- var count = testDB.mrShardedOut.find({ _id: { $gte: chunkDoc.min._id,
- $lt: chunkDoc.max._id }}).itcount();
- assert.lte(count, docsPerChunk, 'Chunk has too many docs: ' + tojson(chunkDoc));
-});
+config.chunks.find({ns: testDB.mrShardedOut.getFullName()})
+ .forEach(function(chunkDoc) {
+ var count =
+ testDB.mrShardedOut.find({_id: {$gte: chunkDoc.min._id, $lt: chunkDoc.max._id}})
+ .itcount();
+ assert.lte(count, docsPerChunk, 'Chunk has too many docs: ' + tojson(chunkDoc));
+ });
// Check that chunks for the newly created sharded output collection are well distributed.
-var shard0Chunks = config.chunks.find({ ns: testDB.mrShardedOut._fullName,
- shard: 'shard0000' }).count();
-var shard1Chunks = config.chunks.find({ ns: testDB.mrShardedOut._fullName,
- shard: 'shard0001' }).count();
+var shard0Chunks =
+ config.chunks.find({ns: testDB.mrShardedOut._fullName, shard: 'shard0000'}).count();
+var shard1Chunks =
+ config.chunks.find({ns: testDB.mrShardedOut._fullName, shard: 'shard0001'}).count();
assert.lte(Math.abs(shard0Chunks - shard1Chunks), 1);
jsTest.log('Starting second pass');
-st.adminCommand({ split: 'test.foo', middle: { a: numDocs + numBatch / 2 }});
-st.adminCommand({ moveChunk: 'test.foo', find: { a: numDocs }, to: 'shard0000' });
+st.adminCommand({split: 'test.foo', middle: {a: numDocs + numBatch / 2}});
+st.adminCommand({moveChunk: 'test.foo', find: {a: numDocs}, to: 'shard0000'});
// Add some more data for input so that chunks will get split further
for (splitPoint = 0; splitPoint < numBatch; splitPoint += 400) {
- testDB.adminCommand({ split: 'test.foo', middle: { a: numDocs + splitPoint }});
+ testDB.adminCommand({split: 'test.foo', middle: {a: numDocs + splitPoint}});
}
bulk = testDB.foo.initializeUnorderedBulkOp();
for (var i = 0; i < numBatch; ++i) {
- bulk.insert({ a: numDocs + i, y: str, i: numDocs + i });
+ bulk.insert({a: numDocs + i, y: str, i: numDocs + i});
}
assert.writeOK(bulk.execute());
jsTest.log("No errors on insert batch.");
@@ -106,21 +113,22 @@ res = testDB.foo.mapReduce(map2, reduce2, {out: {replace: "mrShardedOut", sharde
jsTest.log("MapReduce results:" + tojson(res));
reduceOutputCount = res.counts.output;
-assert.eq(numDocs, reduceOutputCount,
- "MapReduce FAILED: res.counts.output = " + reduceOutputCount +
- ", should be " + numDocs);
+assert.eq(numDocs,
+ reduceOutputCount,
+ "MapReduce FAILED: res.counts.output = " + reduceOutputCount + ", should be " + numDocs);
jsTest.log("Checking that all MapReduce output documents are in output collection");
outColl = testDB["mrShardedOut"];
outCollCount = outColl.find().itcount();
-assert.eq(numDocs, outCollCount,
- "MapReduce FAILED: outColl.find().itcount() = " + outCollCount +
- ", should be " + numDocs +
- ": this may happen intermittently until resolution of SERVER-3627");
+assert.eq(numDocs,
+ outCollCount,
+ "MapReduce FAILED: outColl.find().itcount() = " + outCollCount + ", should be " +
+ numDocs + ": this may happen intermittently until resolution of SERVER-3627");
// Make sure it's sharded and split
newNumChunks = config.chunks.count({ns: testDB.mrShardedOut._fullName});
-assert.gt(newNumChunks, 1,
+assert.gt(newNumChunks,
+ 1,
"Sharding FAILURE: " + testDB.mrShardedOut._fullName + " has only 1 chunk");
st.printShardingStatus(true);
@@ -138,4 +146,3 @@ config.chunks.find({ ns: testDB.mrShardedOut.getFullName() }).forEach(function(c
// to balance chunks.
st.stop();
-
diff --git a/jstests/sharding/mrShardedOutputAuth.js b/jstests/sharding/mrShardedOutputAuth.js
index ed88e34aeed..acbb01f6794 100644
--- a/jstests/sharding/mrShardedOutputAuth.js
+++ b/jstests/sharding/mrShardedOutputAuth.js
@@ -6,91 +6,86 @@
(function() {
-function doMapReduce(connection, outputDb) {
- // clean output db and run m/r
- outputDb.numbers_out.drop();
- printjson(connection.getDB('input').runCommand(
- {
- mapreduce : "numbers",
- map : function() {
- emit(this.num, {count:1});
+ function doMapReduce(connection, outputDb) {
+ // clean output db and run m/r
+ outputDb.numbers_out.drop();
+ printjson(connection.getDB('input').runCommand({
+ mapreduce: "numbers",
+ map: function() {
+ emit(this.num, {count: 1});
},
- reduce : function(k, values) {
+ reduce: function(k, values) {
var result = {};
- values.forEach( function(value) {
+ values.forEach(function(value) {
result.count = 1;
});
return result;
},
- out : {
- merge : "numbers_out",
- sharded : true,
- db : "output"
- },
- verbose : true,
- query : {}
- }
- ));
-}
-
-function assertSuccess(configDb, outputDb) {
- assert.eq(outputDb.numbers_out.count(), 50, "map/reduce failed");
- assert( ! configDb.collections.findOne().dropped, "no sharded collections");
-}
-
-function assertFailure(configDb, outputDb) {
- assert.eq(outputDb.numbers_out.count(), 0, "map/reduce should not have succeeded");
-}
-
-
-var st = new ShardingTest({ name: "mrShardedOutputAuth",
- shards: 1,
- mongos: 1,
- other: { extraOptions : {"keyFile" : "jstests/libs/key1"} } });
-
-// Setup the users to the input, output and admin databases
-var mongos = st.s;
-var adminDb = mongos.getDB("admin");
-adminDb.createUser({user: "user", pwd: "pass", roles: jsTest.adminUserRoles});
-
-var authenticatedConn = new Mongo(mongos.host);
-authenticatedConn.getDB('admin').auth("user", "pass");
-adminDb = authenticatedConn.getDB("admin");
-
-var configDb = authenticatedConn.getDB("config");
-
-var inputDb = authenticatedConn.getDB("input");
-inputDb.createUser({user: "user", pwd: "pass", roles: jsTest.basicUserRoles});
-
-var outputDb = authenticatedConn.getDB("output");
-outputDb.createUser({user: "user", pwd: "pass", roles: jsTest.basicUserRoles});
-
-// Setup the input db
-inputDb.numbers.drop();
-for (var i = 0; i < 50; i++) {
- inputDb.numbers.insert({ num : i });
-}
-assert.eq(inputDb.numbers.count(), 50);
-
-// Setup a connection authenticated to both input and output db
-var inputOutputAuthConn = new Mongo(mongos.host);
-inputOutputAuthConn.getDB('input').auth("user", "pass");
-inputOutputAuthConn.getDB('output').auth("user", "pass");
-doMapReduce(inputOutputAuthConn, outputDb);
-assertSuccess(configDb, outputDb);
-
-// setup a connection authenticated to only input db
-var inputAuthConn = new Mongo(mongos.host);
-inputAuthConn.getDB('input').auth("user", "pass");
-doMapReduce(inputAuthConn, outputDb);
-assertFailure(configDb, outputDb);
-
-// setup a connection authenticated to only output db
-var outputAuthConn = new Mongo(mongos.host);
-outputAuthConn.getDB('output').auth("user", "pass");
-doMapReduce(outputAuthConn, outputDb);
-assertFailure(configDb, outputDb);
-
-st.stop();
+ out: {merge: "numbers_out", sharded: true, db: "output"},
+ verbose: true,
+ query: {}
+ }));
+ }
+
+ function assertSuccess(configDb, outputDb) {
+ assert.eq(outputDb.numbers_out.count(), 50, "map/reduce failed");
+ assert(!configDb.collections.findOne().dropped, "no sharded collections");
+ }
+
+ function assertFailure(configDb, outputDb) {
+ assert.eq(outputDb.numbers_out.count(), 0, "map/reduce should not have succeeded");
+ }
+
+ var st = new ShardingTest({
+ name: "mrShardedOutputAuth",
+ shards: 1,
+ mongos: 1,
+ other: {extraOptions: {"keyFile": "jstests/libs/key1"}}
+ });
+
+ // Setup the users to the input, output and admin databases
+ var mongos = st.s;
+ var adminDb = mongos.getDB("admin");
+ adminDb.createUser({user: "user", pwd: "pass", roles: jsTest.adminUserRoles});
+
+ var authenticatedConn = new Mongo(mongos.host);
+ authenticatedConn.getDB('admin').auth("user", "pass");
+ adminDb = authenticatedConn.getDB("admin");
+
+ var configDb = authenticatedConn.getDB("config");
+
+ var inputDb = authenticatedConn.getDB("input");
+ inputDb.createUser({user: "user", pwd: "pass", roles: jsTest.basicUserRoles});
+
+ var outputDb = authenticatedConn.getDB("output");
+ outputDb.createUser({user: "user", pwd: "pass", roles: jsTest.basicUserRoles});
+
+ // Setup the input db
+ inputDb.numbers.drop();
+ for (var i = 0; i < 50; i++) {
+ inputDb.numbers.insert({num: i});
+ }
+ assert.eq(inputDb.numbers.count(), 50);
+
+ // Setup a connection authenticated to both input and output db
+ var inputOutputAuthConn = new Mongo(mongos.host);
+ inputOutputAuthConn.getDB('input').auth("user", "pass");
+ inputOutputAuthConn.getDB('output').auth("user", "pass");
+ doMapReduce(inputOutputAuthConn, outputDb);
+ assertSuccess(configDb, outputDb);
+
+ // setup a connection authenticated to only input db
+ var inputAuthConn = new Mongo(mongos.host);
+ inputAuthConn.getDB('input').auth("user", "pass");
+ doMapReduce(inputAuthConn, outputDb);
+ assertFailure(configDb, outputDb);
+
+ // setup a connection authenticated to only output db
+ var outputAuthConn = new Mongo(mongos.host);
+ outputAuthConn.getDB('output').auth("user", "pass");
+ doMapReduce(outputAuthConn, outputDb);
+ assertFailure(configDb, outputDb);
+
+ st.stop();
})();
diff --git a/jstests/sharding/mr_and_agg_versioning.js b/jstests/sharding/mr_and_agg_versioning.js
index 0167a23554d..e2d1c6f7869 100644
--- a/jstests/sharding/mr_and_agg_versioning.js
+++ b/jstests/sharding/mr_and_agg_versioning.js
@@ -1,64 +1,65 @@
// Test that map reduce and aggregate properly handle shard versioning.
(function() {
-"use strict";
+ "use strict";
-var st = new ShardingTest({shards: 2, mongos: 3});
+ var st = new ShardingTest({shards: 2, mongos: 3});
-var dbName = jsTest.name();
-var collName = dbName + ".coll";
-var numDocs = 50000;
-var numKeys = 1000;
+ var dbName = jsTest.name();
+ var collName = dbName + ".coll";
+ var numDocs = 50000;
+ var numKeys = 1000;
-st.s.adminCommand({enableSharding: dbName});
-st.ensurePrimaryShard(dbName, 'shard0000');
-st.s.adminCommand({shardCollection: collName, key: {key: 1}});
+ st.s.adminCommand({enableSharding: dbName});
+ st.ensurePrimaryShard(dbName, 'shard0000');
+ st.s.adminCommand({shardCollection: collName, key: {key: 1}});
-// Load chunk data to the stale mongoses before moving a chunk
-var staleMongos1 = st.s1;
-var staleMongos2 = st.s2;
-staleMongos1.getCollection(collName).find().itcount();
-staleMongos2.getCollection(collName).find().itcount();
+ // Load chunk data to the stale mongoses before moving a chunk
+ var staleMongos1 = st.s1;
+ var staleMongos2 = st.s2;
+ staleMongos1.getCollection(collName).find().itcount();
+ staleMongos2.getCollection(collName).find().itcount();
-st.s.adminCommand({split: collName, middle: {key: numKeys/2}});
-st.s.adminCommand({moveChunk: collName, find: {key: 0}, to: 'shard0001'});
+ st.s.adminCommand({split: collName, middle: {key: numKeys / 2}});
+ st.s.adminCommand({moveChunk: collName, find: {key: 0}, to: 'shard0001'});
-var bulk = st.s.getCollection(collName).initializeUnorderedBulkOp();
-for(var i = 0; i < numDocs; i++) {
- bulk.insert({_id: i, key: (i % numKeys), value: i % numKeys});
-}
-assert.writeOK(bulk.execute());
+ var bulk = st.s.getCollection(collName).initializeUnorderedBulkOp();
+ for (var i = 0; i < numDocs; i++) {
+ bulk.insert({_id: i, key: (i % numKeys), value: i % numKeys});
+ }
+ assert.writeOK(bulk.execute());
-// Add orphaned documents directly to the shards to ensure they are properly filtered out.
-st.shard0.getCollection(collName).insert({_id: 0, key: 0, value: 0});
-st.shard1.getCollection(collName).insert({_id: numDocs, key: numKeys, value: numKeys});
+ // Add orphaned documents directly to the shards to ensure they are properly filtered out.
+ st.shard0.getCollection(collName).insert({_id: 0, key: 0, value: 0});
+ st.shard1.getCollection(collName).insert({_id: numDocs, key: numKeys, value: numKeys});
-jsTest.log("Doing mapReduce");
+ jsTest.log("Doing mapReduce");
-var map = function(){ emit( this.key, this.value ); };
-var reduce = function(k, values){
- var total = 0;
- for(var i = 0; i < values.length; i++) {
- total += values[i];
- }
- return total;
-};
-function validateOutput(output) {
- assert.eq(output.length, numKeys, tojson(output));
- for(var i = 0; i < output.length; i++) {
- assert.eq(output[i]._id * (numDocs/numKeys), output[i].value, tojson(output));
+ var map = function() {
+ emit(this.key, this.value);
+ };
+ var reduce = function(k, values) {
+ var total = 0;
+ for (var i = 0; i < values.length; i++) {
+ total += values[i];
+ }
+ return total;
+ };
+ function validateOutput(output) {
+ assert.eq(output.length, numKeys, tojson(output));
+ for (var i = 0; i < output.length; i++) {
+ assert.eq(output[i]._id * (numDocs / numKeys), output[i].value, tojson(output));
+ }
}
-}
-var res = staleMongos1.getCollection(collName).mapReduce(map, reduce, {out: {inline: 1}});
-validateOutput(res.results);
+ var res = staleMongos1.getCollection(collName).mapReduce(map, reduce, {out: {inline: 1}});
+ validateOutput(res.results);
-jsTest.log("Doing aggregation");
+ jsTest.log("Doing aggregation");
-res = staleMongos2.getCollection(collName).aggregate([
- {'$group': {_id: "$key", value: {"$sum": "$value"}}},
- {'$sort': {_id: 1}}]);
-validateOutput(res.toArray());
+ res = staleMongos2.getCollection(collName).aggregate(
+ [{'$group': {_id: "$key", value: {"$sum": "$value"}}}, {'$sort': {_id: 1}}]);
+ validateOutput(res.toArray());
-st.stop();
+ st.stop();
})();
diff --git a/jstests/sharding/mr_noscripting.js b/jstests/sharding/mr_noscripting.js
index a7663d54ccc..a2940d51c43 100644
--- a/jstests/sharding/mr_noscripting.js
+++ b/jstests/sharding/mr_noscripting.js
@@ -1,17 +1,17 @@
var shardOpts = [
- { noscripting: '' },
- { } // just use default params
+ {noscripting: ''},
+ {} // just use default params
];
-var st = new ShardingTest({ shards: shardOpts, other: { nopreallocj: 1 }});
+var st = new ShardingTest({shards: shardOpts, other: {nopreallocj: 1}});
var mongos = st.s;
-st.shardColl('bar', { x: 1 });
+st.shardColl('bar', {x: 1});
var testDB = mongos.getDB('test');
var coll = testDB.bar;
-coll.insert({ x: 1 });
+coll.insert({x: 1});
var map = function() {
emit(this.x, 1);
@@ -21,21 +21,19 @@ var reduce = function(key, values) {
return 1;
};
-var mrResult = testDB.runCommand({ mapreduce: 'bar', map: map, reduce: reduce,
- out: { inline: 1 }});
+var mrResult = testDB.runCommand({mapreduce: 'bar', map: map, reduce: reduce, out: {inline: 1}});
assert.eq(0, mrResult.ok, 'mr result: ' + tojson(mrResult));
// Confirm that mongos did not crash
-assert(testDB.adminCommand({ serverStatus: 1 }).ok);
+assert(testDB.adminCommand({serverStatus: 1}).ok);
// Confirm that the rest of the shards did not crash
-mongos.getDB('config').shards.find().forEach(function (shardDoc){
+mongos.getDB('config').shards.find().forEach(function(shardDoc) {
var shardConn = new Mongo(shardDoc.host);
var adminDB = shardConn.getDB('admin');
- var cmdResult = adminDB.runCommand({ serverStatus: 1 });
+ var cmdResult = adminDB.runCommand({serverStatus: 1});
- assert(cmdResult.ok, 'serverStatus on ' + shardDoc.host +
- ' failed, result: ' + tojson(cmdResult));
+ assert(cmdResult.ok,
+ 'serverStatus on ' + shardDoc.host + ' failed, result: ' + tojson(cmdResult));
});
-
diff --git a/jstests/sharding/mr_shard_version.js b/jstests/sharding/mr_shard_version.js
index 481feb7f268..fc2f7f02e4b 100644
--- a/jstests/sharding/mr_shard_version.js
+++ b/jstests/sharding/mr_shard_version.js
@@ -1,89 +1,100 @@
// Test for SERVER-4158 (version changes during mapreduce)
(function() {
-var st = new ShardingTest({ shards : 2, mongos : 1 });
+ var st = new ShardingTest({shards: 2, mongos: 1});
-//Stop balancer, since it'll just get in the way of these
-st.stopBalancer();
+ // Stop balancer, since it'll just get in the way of these
+ st.stopBalancer();
-var coll = st.s.getCollection( jsTest.name() + ".coll" );
+ var coll = st.s.getCollection(jsTest.name() + ".coll");
-var numDocs = 50000;
-var numKeys = 1000;
-var numTests = 3;
+ var numDocs = 50000;
+ var numKeys = 1000;
+ var numTests = 3;
-var bulk = coll.initializeUnorderedBulkOp();
-for( var i = 0; i < numDocs; i++ ){
- bulk.insert({ _id: i, key: "" + ( i % numKeys ), value: i % numKeys });
-}
-assert.writeOK(bulk.execute());
+ var bulk = coll.initializeUnorderedBulkOp();
+ for (var i = 0; i < numDocs; i++) {
+ bulk.insert({_id: i, key: "" + (i % numKeys), value: i % numKeys});
+ }
+ assert.writeOK(bulk.execute());
+
+ assert.eq(numDocs, coll.find().itcount());
+
+ var halfId = coll.find().itcount() / 2;
+
+ // Shard collection in half
+ st.shardColl(coll, {_id: 1}, {_id: halfId});
+
+ st.printShardingStatus();
+
+ jsTest.log("Collection now initialized with keys and values...");
+
+ jsTest.log("Starting migrations...");
+
+ var migrateOp = {
+ op: "command",
+ ns: "admin",
+ command: {moveChunk: "" + coll}
+ };
+
+ var checkMigrate = function() {
+ print("Result of migrate : ");
+ printjson(this);
+ };
+
+ var ops = {};
+ for (var i = 0; i < st._connections.length; i++) {
+ for (var j = 0; j < 2; j++) {
+ ops["" + (i * 2 + j)] = {
+ op: "command",
+ ns: "admin",
+ command: {
+ moveChunk: "" + coll,
+ find: {_id: (j == 0 ? 0 : halfId)},
+ to: st._connections[i].shardName
+ },
+ check: checkMigrate
+ };
+ }
+ }
-assert.eq( numDocs, coll.find().itcount() );
+ var bid = benchStart({ops: ops, host: st.s.host, parallel: 1, handleErrors: false});
-var halfId = coll.find().itcount() / 2;
+ jsTest.log("Starting m/r...");
-// Shard collection in half
-st.shardColl( coll, { _id : 1 }, { _id : halfId } );
+ var map = function() {
+ emit(this.key, this.value);
+ };
+ var reduce = function(k, values) {
+ var total = 0;
+ for (var i = 0; i < values.length; i++)
+ total += values[i];
+ return total;
+ };
-st.printShardingStatus();
+ var outputColl = st.s.getCollection(jsTest.name() + ".mrOutput");
-jsTest.log( "Collection now initialized with keys and values..." );
+ jsTest.log("Output coll : " + outputColl);
-jsTest.log( "Starting migrations..." );
+ for (var t = 0; t < numTests; t++) {
+ var results = coll.mapReduce(map, reduce, {out: {replace: outputColl.getName()}});
-var migrateOp = { op : "command", ns : "admin", command : { moveChunk : "" + coll } };
+ // Assert that the results are actually correct, all keys have values of (numDocs / numKeys)
+ // x key
+ var output = outputColl.find().sort({_id: 1}).toArray();
-var checkMigrate = function(){ print( "Result of migrate : " ); printjson( this ); };
+ // printjson( output )
-var ops = {};
-for( var i = 0; i < st._connections.length; i++ ){
- for( var j = 0; j < 2; j++ ){
- ops[ "" + (i * 2 + j) ] = { op : "command", ns : "admin",
- command : { moveChunk : "" + coll,
- find : { _id : ( j == 0 ? 0 : halfId ) },
- to : st._connections[i].shardName },
- check : checkMigrate };
+ assert.eq(output.length, numKeys);
+ printjson(output);
+ for (var i = 0; i < output.length; i++)
+ assert.eq(parseInt(output[i]._id) * (numDocs / numKeys), output[i].value);
}
-}
-
-var bid = benchStart({ ops : ops,
- host : st.s.host,
- parallel : 1,
- handleErrors : false });
-
-jsTest.log( "Starting m/r..." );
-
-var map = function(){ emit( this.key, this.value ); };
-var reduce = function(k, values){
- var total = 0;
- for( var i = 0; i < values.length; i++ ) total += values[i];
- return total;
-};
-
-var outputColl = st.s.getCollection( jsTest.name() + ".mrOutput" );
-
-jsTest.log( "Output coll : " + outputColl );
-
-for( var t = 0; t < numTests; t++ ){
-
- var results = coll.mapReduce( map, reduce, { out : { replace : outputColl.getName() } });
-
- // Assert that the results are actually correct, all keys have values of (numDocs / numKeys) x key
- var output = outputColl.find().sort({ _id : 1 }).toArray();
-
- // printjson( output )
-
- assert.eq( output.length, numKeys );
- printjson( output );
- for( var i = 0; i < output.length; i++ )
- assert.eq( parseInt( output[i]._id ) * ( numDocs / numKeys ), output[i].value );
-
-}
-
-jsTest.log( "Finishing parallel migrations..." );
-
-printjson( benchFinish( bid ) );
-
-st.stop();
+
+ jsTest.log("Finishing parallel migrations...");
+
+ printjson(benchFinish(bid));
+
+ st.stop();
})();
diff --git a/jstests/sharding/multi_coll_drop.js b/jstests/sharding/multi_coll_drop.js
index c4c2362bf44..96d939e7b99 100644
--- a/jstests/sharding/multi_coll_drop.js
+++ b/jstests/sharding/multi_coll_drop.js
@@ -1,46 +1,46 @@
// Tests the dropping and re-adding of a collection
(function() {
-var st = new ShardingTest({ name: "multidrop", shards: 1, mongos: 2 });
+ var st = new ShardingTest({name: "multidrop", shards: 1, mongos: 2});
-var mA = st.s0;
-var mB = st.s1;
+ var mA = st.s0;
+ var mB = st.s1;
-var coll = mA.getCollection('multidrop.coll');
-var collB = mB.getCollection('multidrop.coll');
+ var coll = mA.getCollection('multidrop.coll');
+ var collB = mB.getCollection('multidrop.coll');
-jsTestLog( "Shard and split collection..." );
+ jsTestLog("Shard and split collection...");
-var admin = mA.getDB( "admin" );
-admin.runCommand({ enableSharding : coll.getDB() + "" });
-admin.runCommand({ shardCollection : coll + "", key : { _id : 1 } });
+ var admin = mA.getDB("admin");
+ admin.runCommand({enableSharding: coll.getDB() + ""});
+ admin.runCommand({shardCollection: coll + "", key: {_id: 1}});
-for( var i = -100; i < 100; i++ ){
- admin.runCommand({ split : coll + "", middle : { _id : i } });
-}
+ for (var i = -100; i < 100; i++) {
+ admin.runCommand({split: coll + "", middle: {_id: i}});
+ }
-jsTestLog( "Create versioned connection for each mongos..." );
+ jsTestLog("Create versioned connection for each mongos...");
-coll.find().itcount();
-collB.find().itcount();
+ coll.find().itcount();
+ collB.find().itcount();
-jsTestLog( "Dropping sharded collection..." );
-coll.drop();
+ jsTestLog("Dropping sharded collection...");
+ coll.drop();
-jsTestLog( "Recreating collection..." );
+ jsTestLog("Recreating collection...");
-admin.runCommand({ shardCollection : coll + "", key : { _id : 1 } });
-for( var i = -10; i < 10; i++ ){
- admin.runCommand({ split : coll + "", middle : { _id : i } });
-}
+ admin.runCommand({shardCollection: coll + "", key: {_id: 1}});
+ for (var i = -10; i < 10; i++) {
+ admin.runCommand({split: coll + "", middle: {_id: i}});
+ }
-jsTestLog( "Retrying connections..." );
+ jsTestLog("Retrying connections...");
-coll.find().itcount();
-collB.find().itcount();
+ coll.find().itcount();
+ collB.find().itcount();
-jsTestLog( "Done." );
+ jsTestLog("Done.");
-st.stop();
+ st.stop();
})();
diff --git a/jstests/sharding/multi_mongos2.js b/jstests/sharding/multi_mongos2.js
index 43be2ecd9da..9184ce9e807 100644
--- a/jstests/sharding/multi_mongos2.js
+++ b/jstests/sharding/multi_mongos2.js
@@ -1,73 +1,78 @@
// This tests sharding an existing collection that both shards are aware of (SERVER-2828)
(function() {
-var s1 = new ShardingTest({ name: "multi_mongos1", shards: 2, mongos: 2 });
-s2 = s1._mongos[1];
+ var s1 = new ShardingTest({name: "multi_mongos1", shards: 2, mongos: 2});
+ s2 = s1._mongos[1];
-s1.adminCommand( { enablesharding : "test" } );
-s1.ensurePrimaryShard('test', 'shard0001');
-s1.adminCommand( { shardcollection : "test.foo" , key : { num : 1 } } );
+ s1.adminCommand({enablesharding: "test"});
+ s1.ensurePrimaryShard('test', 'shard0001');
+ s1.adminCommand({shardcollection: "test.foo", key: {num: 1}});
-s1.config.databases.find().forEach( printjson );
+ s1.config.databases.find().forEach(printjson);
-// test queries
+ // test queries
-s1.getDB('test').existing.insert({_id:1});
-assert.eq(1, s1.getDB('test').existing.count({_id:1}));
-assert.eq(1, s2.getDB('test').existing.count({_id:1}));
+ s1.getDB('test').existing.insert({_id: 1});
+ assert.eq(1, s1.getDB('test').existing.count({_id: 1}));
+ assert.eq(1, s2.getDB('test').existing.count({_id: 1}));
-// Balancer is by default stopped, thus it will not interfere with manual chunk moves.
+ // Balancer is by default stopped, thus it will not interfere with manual chunk moves.
-s2.adminCommand( { shardcollection : "test.existing" , key : { _id : 1 } } );
-assert.commandWorked(s2.adminCommand({ split: "test.existing", middle: { _id: 5 }}));
+ s2.adminCommand({shardcollection: "test.existing", key: {_id: 1}});
+ assert.commandWorked(s2.adminCommand({split: "test.existing", middle: {_id: 5}}));
-res = s2.getDB( "admin" ).runCommand( { moveChunk: "test.existing" ,
- find : { _id : 1 } ,
- to : s1.getOther( s1.getPrimaryShard( "test" ) ).name } );
+ res = s2.getDB("admin").runCommand({
+ moveChunk: "test.existing",
+ find: {_id: 1},
+ to: s1.getOther(s1.getPrimaryShard("test")).name
+ });
-assert.eq(1 , res.ok, tojson(res));
+ assert.eq(1, res.ok, tojson(res));
-s1.startBalancer();
+ s1.startBalancer();
-printjson( s2.adminCommand( {"getShardVersion" : "test.existing" } ) );
-printjson( new Mongo(s1.getPrimaryShard( "test" ).name).getDB( "admin" )
- .adminCommand( {"getShardVersion" : "test.existing" } ) );
+ printjson(s2.adminCommand({"getShardVersion": "test.existing"}));
+ printjson(new Mongo(s1.getPrimaryShard("test").name)
+ .getDB("admin")
+ .adminCommand({"getShardVersion": "test.existing"}));
-assert.eq(1, s1.getDB('test').existing.count({_id:1})); // SERVER-2828
-assert.eq(1, s2.getDB('test').existing.count({_id:1}));
+ assert.eq(1, s1.getDB('test').existing.count({_id: 1})); // SERVER-2828
+ assert.eq(1, s2.getDB('test').existing.count({_id: 1}));
-// test stats
+ // test stats
-s1.getDB('test').existing2.insert({_id:1});
-assert.eq(1, s1.getDB('test').existing2.count({_id:1}));
-assert.eq(1, s2.getDB('test').existing2.count({_id:1}));
+ s1.getDB('test').existing2.insert({_id: 1});
+ assert.eq(1, s1.getDB('test').existing2.count({_id: 1}));
+ assert.eq(1, s2.getDB('test').existing2.count({_id: 1}));
-s2.adminCommand( { shardcollection : "test.existing2" , key : { _id : 1 } } );
-assert.commandWorked(s2.adminCommand({ split: "test.existing2", middle: { _id: 5 }}));
+ s2.adminCommand({shardcollection: "test.existing2", key: {_id: 1}});
+ assert.commandWorked(s2.adminCommand({split: "test.existing2", middle: {_id: 5}}));
-var res = s1.getDB('test').existing2.stats();
-printjson( res );
-assert.eq(true, res.sharded); //SERVER-2828
-assert.eq(true, s2.getDB('test').existing2.stats().sharded);
+ var res = s1.getDB('test').existing2.stats();
+ printjson(res);
+ assert.eq(true, res.sharded); // SERVER-2828
+ assert.eq(true, s2.getDB('test').existing2.stats().sharded);
-// test admin commands
+ // test admin commands
-s1.getDB('test').existing3.insert({_id:1});
-assert.eq(1, s1.getDB('test').existing3.count({_id:1}));
-assert.eq(1, s2.getDB('test').existing3.count({_id:1}));
+ s1.getDB('test').existing3.insert({_id: 1});
+ assert.eq(1, s1.getDB('test').existing3.count({_id: 1}));
+ assert.eq(1, s2.getDB('test').existing3.count({_id: 1}));
-s1.stopBalancer();
+ s1.stopBalancer();
-s2.adminCommand( { shardcollection : "test.existing3" , key : { _id : 1 } } );
-assert.commandWorked(s2.adminCommand({ split: "test.existing3", middle: { _id: 5 }}));
+ s2.adminCommand({shardcollection: "test.existing3", key: {_id: 1}});
+ assert.commandWorked(s2.adminCommand({split: "test.existing3", middle: {_id: 5}}));
-res = s1.getDB( "admin" ).runCommand( { moveChunk: "test.existing3",
- find : { _id : 1 },
- to : s1.getOther( s1.getPrimaryShard( "test" ) ).name } );
-assert.eq(1 , res.ok, tojson(res));
+ res = s1.getDB("admin").runCommand({
+ moveChunk: "test.existing3",
+ find: {_id: 1},
+ to: s1.getOther(s1.getPrimaryShard("test")).name
+ });
+ assert.eq(1, res.ok, tojson(res));
-s1.startBalancer();
+ s1.startBalancer();
-s1.stop();
+ s1.stop();
})();
diff --git a/jstests/sharding/multi_mongos2a.js b/jstests/sharding/multi_mongos2a.js
index 3dea44fc4c5..829ce0de194 100644
--- a/jstests/sharding/multi_mongos2a.js
+++ b/jstests/sharding/multi_mongos2a.js
@@ -1,36 +1,35 @@
// This tests sharding an existing collection that both shards are aware of (SERVER-2828)
(function() {
-var s1 = new ShardingTest({ name: "multi_mongos2a",
- shards: 2,
- mongos: 2 });
-s2 = s1._mongos[1];
+ var s1 = new ShardingTest({name: "multi_mongos2a", shards: 2, mongos: 2});
+ s2 = s1._mongos[1];
-s1.adminCommand( { enablesharding : "test" } );
-s1.ensurePrimaryShard('test', 'shard0001');
-s1.adminCommand( { shardcollection : "test.foo" , key : { num : 1 } } );
+ s1.adminCommand({enablesharding: "test"});
+ s1.ensurePrimaryShard('test', 'shard0001');
+ s1.adminCommand({shardcollection: "test.foo", key: {num: 1}});
-s1.config.databases.find().forEach( printjson );
+ s1.config.databases.find().forEach(printjson);
-s1.getDB('test').existing.insert({_id:1});
-assert.eq(1, s1.getDB('test').existing.count({_id:1}));
-assert.eq(1, s2.getDB('test').existing.count({_id:1}));
+ s1.getDB('test').existing.insert({_id: 1});
+ assert.eq(1, s1.getDB('test').existing.count({_id: 1}));
+ assert.eq(1, s2.getDB('test').existing.count({_id: 1}));
-s2.adminCommand( { shardcollection : "test.existing" , key : { _id : 1 } } );
-assert.eq(true, s2.getDB('test').existing.stats().sharded);
+ s2.adminCommand({shardcollection: "test.existing", key: {_id: 1}});
+ assert.eq(true, s2.getDB('test').existing.stats().sharded);
+ res = s2.getDB("admin").runCommand({
+ moveChunk: "test.existing",
+ find: {_id: 1},
+ to: s1.getOther(s1.getPrimaryShard("test")).name
+ });
-res = s2.getDB( "admin" ).runCommand( { moveChunk: "test.existing",
- find : { _id : 1 },
- to : s1.getOther( s1.getPrimaryShard( "test" ) ).name } );
+ assert.eq(1, res.ok, tojson(res));
-assert.eq(1 , res.ok, tojson(res));
+ s1.adminCommand({flushRouterConfig: 1});
-s1.adminCommand( { flushRouterConfig : 1 } );
+ assert.eq(1, s1.getDB('test').existing.count({_id: 1})); // SERVER-2828
+ assert.eq(1, s2.getDB('test').existing.count({_id: 1}));
-assert.eq(1, s1.getDB('test').existing.count({_id:1})); // SERVER-2828
-assert.eq(1, s2.getDB('test').existing.count({_id:1}));
-
-s1.stop();
+ s1.stop();
})();
diff --git a/jstests/sharding/multi_write_target.js b/jstests/sharding/multi_write_target.js
index 3de867ed5ea..9c4f37430da 100644
--- a/jstests/sharding/multi_write_target.js
+++ b/jstests/sharding/multi_write_target.js
@@ -2,72 +2,72 @@
// Tests that multi-writes (update/delete) target *all* shards and not just shards in the collection
//
(function() {
-'use strict';
+ 'use strict';
-var st = new ShardingTest({ shards: 3, mongos: 2 });
+ var st = new ShardingTest({shards: 3, mongos: 2});
-var admin = st.s0.getDB( "admin" );
-var coll = st.s0.getCollection( "foo.bar" );
+ var admin = st.s0.getDB("admin");
+ var coll = st.s0.getCollection("foo.bar");
-assert.commandWorked(admin.runCommand({ enableSharding: coll.getDB() + "" }));
-st.ensurePrimaryShard(coll.getDB() + "", st.shard0.shardName);
-assert.commandWorked(admin.runCommand({ shardCollection: coll + "", key: { skey: 1 } }));
+ assert.commandWorked(admin.runCommand({enableSharding: coll.getDB() + ""}));
+ st.ensurePrimaryShard(coll.getDB() + "", st.shard0.shardName);
+ assert.commandWorked(admin.runCommand({shardCollection: coll + "", key: {skey: 1}}));
-assert.commandWorked(admin.runCommand({ split: coll + "", middle: { skey: 0 } }));
-assert.commandWorked(admin.runCommand({ split: coll + "", middle: { skey: 100 } }));
-assert.commandWorked(
- admin.runCommand({ moveChunk: coll + "", find: { skey: 0 }, to: st.shard1.shardName }));
-assert.commandWorked(
- admin.runCommand({ moveChunk: coll + "", find: { skey: 100 }, to: st.shard2.shardName }));
+ assert.commandWorked(admin.runCommand({split: coll + "", middle: {skey: 0}}));
+ assert.commandWorked(admin.runCommand({split: coll + "", middle: {skey: 100}}));
+ assert.commandWorked(
+ admin.runCommand({moveChunk: coll + "", find: {skey: 0}, to: st.shard1.shardName}));
+ assert.commandWorked(
+ admin.runCommand({moveChunk: coll + "", find: {skey: 100}, to: st.shard2.shardName}));
-jsTest.log("Testing multi-update...");
+ jsTest.log("Testing multi-update...");
-// Put data on all shards
-assert.writeOK(st.s0.getCollection(coll.toString()).insert({ _id: 0, skey: -1, x: 1 }));
-assert.writeOK(st.s0.getCollection(coll.toString()).insert({ _id: 1, skey: 1, x: 1 }));
-assert.writeOK(st.s0.getCollection(coll.toString()).insert({ _id: 0, skey: 100, x: 1 }));
+ // Put data on all shards
+ assert.writeOK(st.s0.getCollection(coll.toString()).insert({_id: 0, skey: -1, x: 1}));
+ assert.writeOK(st.s0.getCollection(coll.toString()).insert({_id: 1, skey: 1, x: 1}));
+ assert.writeOK(st.s0.getCollection(coll.toString()).insert({_id: 0, skey: 100, x: 1}));
-// Non-multi-update doesn't work without shard key
-assert.writeError(coll.update({ x: 1 }, { $set: { updated: true } }, { multi: false }));
-assert.writeOK(coll.update({ x: 1 }, { $set: { updated: true } }, { multi: true }));
+ // Non-multi-update doesn't work without shard key
+ assert.writeError(coll.update({x: 1}, {$set: {updated: true}}, {multi: false}));
+ assert.writeOK(coll.update({x: 1}, {$set: {updated: true}}, {multi: true}));
-// Ensure update goes to *all* shards
-assert.neq(null, st.shard0.getCollection(coll.toString()).findOne({ updated: true }));
-assert.neq(null, st.shard1.getCollection(coll.toString()).findOne({ updated: true }));
-assert.neq(null, st.shard2.getCollection(coll.toString()).findOne({ updated: true }));
+ // Ensure update goes to *all* shards
+ assert.neq(null, st.shard0.getCollection(coll.toString()).findOne({updated: true}));
+ assert.neq(null, st.shard1.getCollection(coll.toString()).findOne({updated: true}));
+ assert.neq(null, st.shard2.getCollection(coll.toString()).findOne({updated: true}));
-// _id update works, and goes to all shards even on the stale mongos
-var staleColl = st.s1.getCollection('foo.bar');
-assert.writeOK(staleColl.update({ _id: 0 }, { $set: { updatedById: true } }, { multi: false }));
+ // _id update works, and goes to all shards even on the stale mongos
+ var staleColl = st.s1.getCollection('foo.bar');
+ assert.writeOK(staleColl.update({_id: 0}, {$set: {updatedById: true}}, {multi: false}));
-// Ensure _id update goes to *all* shards
-assert.neq(null, st.shard0.getCollection(coll.toString()).findOne({ updatedById: true }));
-assert.neq(null, st.shard2.getCollection(coll.toString()).findOne({ updatedById: true }));
+ // Ensure _id update goes to *all* shards
+ assert.neq(null, st.shard0.getCollection(coll.toString()).findOne({updatedById: true}));
+ assert.neq(null, st.shard2.getCollection(coll.toString()).findOne({updatedById: true}));
-jsTest.log("Testing multi-delete...");
+ jsTest.log("Testing multi-delete...");
-// non-multi-delete doesn't work without shard key
-assert.writeError(coll.remove({ x: 1 }, { justOne: true }));
+ // non-multi-delete doesn't work without shard key
+ assert.writeError(coll.remove({x: 1}, {justOne: true}));
-assert.writeOK(coll.remove({ x: 1 }, { justOne: false }));
+ assert.writeOK(coll.remove({x: 1}, {justOne: false}));
-// Ensure delete goes to *all* shards
-assert.eq(null, st.shard0.getCollection(coll.toString()).findOne({ x: 1 }));
-assert.eq(null, st.shard1.getCollection(coll.toString()).findOne({ x: 1 }));
-assert.eq(null, st.shard2.getCollection(coll.toString()).findOne({ x: 1 }));
+ // Ensure delete goes to *all* shards
+ assert.eq(null, st.shard0.getCollection(coll.toString()).findOne({x: 1}));
+ assert.eq(null, st.shard1.getCollection(coll.toString()).findOne({x: 1}));
+ assert.eq(null, st.shard2.getCollection(coll.toString()).findOne({x: 1}));
-// Put more on all shards
-assert.writeOK(st.shard0.getCollection(coll.toString()).insert({ _id: 0, skey: -1, x: 1 }));
-assert.writeOK(st.shard1.getCollection(coll.toString()).insert({ _id: 1, skey: 1, x: 1 }));
-// Data not in chunks
-assert.writeOK(st.shard2.getCollection(coll.toString()).insert({ _id: 0, x: 1 }));
+ // Put more on all shards
+ assert.writeOK(st.shard0.getCollection(coll.toString()).insert({_id: 0, skey: -1, x: 1}));
+ assert.writeOK(st.shard1.getCollection(coll.toString()).insert({_id: 1, skey: 1, x: 1}));
+ // Data not in chunks
+ assert.writeOK(st.shard2.getCollection(coll.toString()).insert({_id: 0, x: 1}));
-assert.writeOK(coll.remove({ _id: 0 }, { justOne: true }));
+ assert.writeOK(coll.remove({_id: 0}, {justOne: true}));
-// Ensure _id delete goes to *all* shards
-assert.eq(null, st.shard0.getCollection(coll.toString()).findOne({ x: 1 }));
-assert.eq(null, st.shard2.getCollection(coll.toString()).findOne({ x: 1 }));
+ // Ensure _id delete goes to *all* shards
+ assert.eq(null, st.shard0.getCollection(coll.toString()).findOne({x: 1}));
+ assert.eq(null, st.shard2.getCollection(coll.toString()).findOne({x: 1}));
-st.stop();
+ st.stop();
})();
diff --git a/jstests/sharding/names.js b/jstests/sharding/names.js
index 28612681e46..33a337e5656 100644
--- a/jstests/sharding/names.js
+++ b/jstests/sharding/names.js
@@ -1,50 +1,52 @@
// Test that having replica set names the same as the names of other shards works fine
(function() {
-var st = new ShardingTest({ name: "HostNames",
- shards: 0,
- mongos: 2,
- other: { rs : true } });
+ var st = new ShardingTest({name: "HostNames", shards: 0, mongos: 2, other: {rs: true}});
-var rsA = new ReplSetTest({ nodes : 2, name : "rsA" });
-var rsB = new ReplSetTest({ nodes : 2, name : "rsB" });
+ var rsA = new ReplSetTest({nodes: 2, name: "rsA"});
+ var rsB = new ReplSetTest({nodes: 2, name: "rsB"});
-rsA.startSet();
-rsB.startSet();
-rsA.initiate();
-rsB.initiate();
-rsA.getPrimary();
-rsB.getPrimary();
+ rsA.startSet();
+ rsB.startSet();
+ rsA.initiate();
+ rsB.initiate();
+ rsA.getPrimary();
+ rsB.getPrimary();
-var mongos = st.s;
-var config = mongos.getDB("config");
-var admin = mongos.getDB("admin");
+ var mongos = st.s;
+ var config = mongos.getDB("config");
+ var admin = mongos.getDB("admin");
-assert( admin.runCommand({ addShard : rsA.getURL(), name : rsB.name }).ok );
-printjson( config.shards.find().toArray() );
+ assert(admin.runCommand({addShard: rsA.getURL(), name: rsB.name}).ok);
+ printjson(config.shards.find().toArray());
-assert( admin.runCommand({ addShard : rsB.getURL(), name : rsA.name }).ok );
-printjson( config.shards.find().toArray() );
+ assert(admin.runCommand({addShard: rsB.getURL(), name: rsA.name}).ok);
+ printjson(config.shards.find().toArray());
-assert.eq(2, config.shards.count(), "Error adding a shard");
-assert.eq(rsB.getURL(), config.shards.findOne({_id:rsA.name})["host"], "Wrong host for shard rsA");
-assert.eq(rsA.getURL(), config.shards.findOne({_id:rsB.name})["host"], "Wrong host for shard rsB");
+ assert.eq(2, config.shards.count(), "Error adding a shard");
+ assert.eq(
+ rsB.getURL(), config.shards.findOne({_id: rsA.name})["host"], "Wrong host for shard rsA");
+ assert.eq(
+ rsA.getURL(), config.shards.findOne({_id: rsB.name})["host"], "Wrong host for shard rsB");
-// Remove shard
-assert( admin.runCommand( { removeshard: rsA.name } ).ok , "failed to start draining shard" );
-assert( admin.runCommand( { removeshard: rsA.name } ).ok , "failed to remove shard" );
+ // Remove shard
+ assert(admin.runCommand({removeshard: rsA.name}).ok, "failed to start draining shard");
+ assert(admin.runCommand({removeshard: rsA.name}).ok, "failed to remove shard");
-assert.eq(1, config.shards.count(), "Error removing a shard");
-assert.eq(rsA.getURL(), config.shards.findOne({_id:rsB.name})["host"], "Wrong host for shard rsB 2");
+ assert.eq(1, config.shards.count(), "Error removing a shard");
+ assert.eq(
+ rsA.getURL(), config.shards.findOne({_id: rsB.name})["host"], "Wrong host for shard rsB 2");
-// Re-add shard
-assert( admin.runCommand({ addShard : rsB.getURL(), name : rsA.name }).ok );
-printjson( config.shards.find().toArray() );
+ // Re-add shard
+ assert(admin.runCommand({addShard: rsB.getURL(), name: rsA.name}).ok);
+ printjson(config.shards.find().toArray());
-assert.eq(2, config.shards.count(), "Error re-adding a shard");
-assert.eq(rsB.getURL(), config.shards.findOne({_id:rsA.name})["host"], "Wrong host for shard rsA 3");
-assert.eq(rsA.getURL(), config.shards.findOne({_id:rsB.name})["host"], "Wrong host for shard rsB 3");
+ assert.eq(2, config.shards.count(), "Error re-adding a shard");
+ assert.eq(
+ rsB.getURL(), config.shards.findOne({_id: rsA.name})["host"], "Wrong host for shard rsA 3");
+ assert.eq(
+ rsA.getURL(), config.shards.findOne({_id: rsB.name})["host"], "Wrong host for shard rsB 3");
-st.stop();
+ st.stop();
})();
diff --git a/jstests/sharding/noUpdateButN1inAnotherCollection.js b/jstests/sharding/noUpdateButN1inAnotherCollection.js
index 27c40e9056b..a95ee5924a0 100644
--- a/jstests/sharding/noUpdateButN1inAnotherCollection.js
+++ b/jstests/sharding/noUpdateButN1inAnotherCollection.js
@@ -1,57 +1,57 @@
-function debug( str ) {
- print( "---\n" + str + "\n-----" );
+function debug(str) {
+ print("---\n" + str + "\n-----");
}
var name = "badNonUpdate";
debug("Starting sharded cluster test stuff");
-var s = new ShardingTest({ name: name, shards: 2, mongos : 2 });
+var s = new ShardingTest({name: name, shards: 2, mongos: 2});
-var mongosA=s.s0;
-var mongosB=s.s1;
+var mongosA = s.s0;
+var mongosB = s.s1;
ns = "test.coll";
ns2 = "test.coll2";
-adminSA = mongosA.getDB( "admin" );
-adminSA.runCommand({ enableSharding : "test"});
+adminSA = mongosA.getDB("admin");
+adminSA.runCommand({enableSharding: "test"});
-adminSA.runCommand( { moveprimary : "test", to : "shard0000" } );
-adminSA.runCommand( { moveprimary : "test2", to : "shard0001" } );
+adminSA.runCommand({moveprimary: "test", to: "shard0000"});
+adminSA.runCommand({moveprimary: "test2", to: "shard0001"});
-adminSA.runCommand({ shardCollection : ns, key : { _id : 1 } });
+adminSA.runCommand({shardCollection: ns, key: {_id: 1}});
try {
- s.stopBalancer();
-} catch (e) {
- print("coundn't stop balancer via command");
+ s.stopBalancer();
+} catch (e) {
+ print("coundn't stop balancer via command");
}
-adminSA.settings.update({ _id: 'balancer' }, { $set: { stopped: true }});
+adminSA.settings.update({_id: 'balancer'}, {$set: {stopped: true}});
-var db = mongosA.getDB( "test" );
+var db = mongosA.getDB("test");
var coll = db.coll;
var coll2 = db.coll2;
numDocs = 10;
for (var i = 1; i < numDocs; i++) {
- coll.insert({_id:i, control:0});
- coll2.insert({_id:i, control:0});
+ coll.insert({_id: i, control: 0});
+ coll2.insert({_id: i, control: 0});
}
debug("Inserted docs, now split chunks");
-adminSA.runCommand( { split: ns, find : { _id : 3} });
-adminSA.runCommand( { movechunk: ns, find : { _id : 10}, to: "shard0001" });
+adminSA.runCommand({split: ns, find: {_id: 3}});
+adminSA.runCommand({movechunk: ns, find: {_id: 10}, to: "shard0001"});
var command = 'printjson(db.coll.update({ _id: 9 }, { $set: { a: "9" }}, true));';
// without this first query through mongo, the second time doesn't "fail"
debug("Try query first time");
-runMongoProgram( "mongo", "--quiet", "--port", "" + s._mongos[1].port, "--eval", command );
+runMongoProgram("mongo", "--quiet", "--port", "" + s._mongos[1].port, "--eval", command);
-var res = mongosB.getDB("test").coll2.update({ _id: 0 }, { $set: { c: "333" }});
-assert.eq( 0, res.nModified );
+var res = mongosB.getDB("test").coll2.update({_id: 0}, {$set: {c: "333"}});
+assert.eq(0, res.nModified);
s.stop();
diff --git a/jstests/sharding/no_empty_reset.js b/jstests/sharding/no_empty_reset.js
index bda63ee9edc..61fe5905cc0 100644
--- a/jstests/sharding/no_empty_reset.js
+++ b/jstests/sharding/no_empty_reset.js
@@ -1,63 +1,64 @@
// Tests that an empty shard can't be the cause of a chunk reset
-var st = new ShardingTest({ shards : 2, mongos : 2 });
+var st = new ShardingTest({shards: 2, mongos: 2});
// Don't balance since we're manually moving chunks
st.stopBalancer();
-var coll = st.s.getCollection( jsTestName() + ".coll" );
+var coll = st.s.getCollection(jsTestName() + ".coll");
-for( var i = -10; i < 10; i++ )
- coll.insert({ _id : i });
-
-st.shardColl( coll, { _id : 1 }, { _id : 0 } );
+for (var i = -10; i < 10; i++)
+ coll.insert({_id: i});
-jsTestLog( "Sharded setup complete" );
+st.shardColl(coll, {_id: 1}, {_id: 0});
+
+jsTestLog("Sharded setup complete");
st.printShardingStatus();
-jsTestLog( "Setting initial versions for each mongos..." );
+jsTestLog("Setting initial versions for each mongos...");
coll.find().itcount();
-var collB = st.s1.getCollection( "" + coll );
+var collB = st.s1.getCollection("" + coll);
collB.find().itcount();
-jsTestLog( "Migrating via first mongos..." );
+jsTestLog("Migrating via first mongos...");
-var fullShard = st.getShard( coll, { _id : 1 } );
-var emptyShard = st.getShard( coll, { _id : -1 } );
+var fullShard = st.getShard(coll, {_id: 1});
+var emptyShard = st.getShard(coll, {_id: -1});
-var admin = st.s.getDB( "admin" );
+var admin = st.s.getDB("admin");
assert.soon(
- function () {
- var result = admin.runCommand( { moveChunk: "" + coll,
- find: { _id: -1 },
- to: fullShard.shardName,
- _waitForDelete: true } );
+ function() {
+ var result = admin.runCommand({
+ moveChunk: "" + coll,
+ find: {_id: -1},
+ to: fullShard.shardName,
+ _waitForDelete: true
+ });
jsTestLog('moveChunk result = ' + tojson(result));
return result.ok;
},
- "Setup FAILURE: Unable to move chunk from " + emptyShard.shardName +
- " to " + fullShard.shardName
-);
+ "Setup FAILURE: Unable to move chunk from " + emptyShard.shardName + " to " +
+ fullShard.shardName);
-jsTestLog( "Resetting shard version via first mongos..." );
+jsTestLog("Resetting shard version via first mongos...");
coll.find().itcount();
-jsTestLog( "Making sure we don't insert into the wrong shard..." );
+jsTestLog("Making sure we don't insert into the wrong shard...");
-collB.insert({ _id : -11 });
+collB.insert({_id: -11});
-var emptyColl = emptyShard.getCollection( "" + coll );
+var emptyColl = emptyShard.getCollection("" + coll);
-print( emptyColl );
-print( emptyShard );
-print( emptyShard.shardName );
+print(emptyColl);
+print(emptyShard);
+print(emptyShard.shardName);
st.printShardingStatus();
-assert.eq( 0, emptyColl.find().itcount() );
+assert.eq(0, emptyColl.find().itcount());
jsTestLog("DONE!");
st.stop();
diff --git a/jstests/sharding/parallel.js b/jstests/sharding/parallel.js
index a05cfa2d396..cc332d65757 100644
--- a/jstests/sharding/parallel.js
+++ b/jstests/sharding/parallel.js
@@ -1,47 +1,57 @@
// This test fails when run with authentication because benchRun with auth is broken: SERVER-6388
(function() {
-"use strict";
-
-var numShards = 3;
-var s = new ShardingTest({name: "parallel", shards: numShards, mongos: 2});
-
-s.adminCommand( { enablesharding : "test" } );
-s.ensurePrimaryShard('test', 'shard0001');
-s.adminCommand( { shardcollection : "test.foo" , key : { _id : 1 } } );
-
-var db = s.getDB( "test" );
-
-var N = 10000;
-
-for (var i=0; i<N; i+=(N/12)) {
- s.adminCommand({split: "test.foo", middle: {_id: i}});
- s.s.getDB('admin').runCommand({moveChunk: "test.foo",
- find: {_id: i},
- to: "shard000" + Math.floor(Math.random() * numShards)});
-}
-
-s.startBalancer();
-
-var bulk = db.foo.initializeUnorderedBulkOp();
-for ( i=0; i<N; i++ )
- bulk.insert({ _id: i });
-assert.writeOK(bulk.execute());
-
-var doCommand = function( dbname , cmd ) {
- x = benchRun( { ops : [ { op : "findOne" , ns : dbname + ".$cmd" , query : cmd } ] ,
- host : db.getMongo().host , parallel : 2 , seconds : 2 } );
- printjson(x);
- x = benchRun( { ops : [ { op : "findOne" , ns : dbname + ".$cmd" , query : cmd } ] ,
- host : s._mongos[1].host , parallel : 2 , seconds : 2 } );
+ "use strict";
+
+ var numShards = 3;
+ var s = new ShardingTest({name: "parallel", shards: numShards, mongos: 2});
+
+ s.adminCommand({enablesharding: "test"});
+ s.ensurePrimaryShard('test', 'shard0001');
+ s.adminCommand({shardcollection: "test.foo", key: {_id: 1}});
+
+ var db = s.getDB("test");
+
+ var N = 10000;
+
+ for (var i = 0; i < N; i += (N / 12)) {
+ s.adminCommand({split: "test.foo", middle: {_id: i}});
+ s.s.getDB('admin').runCommand({
+ moveChunk: "test.foo",
+ find: {_id: i},
+ to: "shard000" + Math.floor(Math.random() * numShards)
+ });
+ }
+
+ s.startBalancer();
+
+ var bulk = db.foo.initializeUnorderedBulkOp();
+ for (i = 0; i < N; i++)
+ bulk.insert({_id: i});
+ assert.writeOK(bulk.execute());
+
+ var doCommand = function(dbname, cmd) {
+ x = benchRun({
+ ops: [{op: "findOne", ns: dbname + ".$cmd", query: cmd}],
+ host: db.getMongo().host,
+ parallel: 2,
+ seconds: 2
+ });
+ printjson(x);
+ x = benchRun({
+ ops: [{op: "findOne", ns: dbname + ".$cmd", query: cmd}],
+ host: s._mongos[1].host,
+ parallel: 2,
+ seconds: 2
+ });
+ printjson(x);
+ };
+
+ doCommand("test", {dbstats: 1});
+ doCommand("config", {dbstats: 1});
+
+ var x = s.getDB("config").stats();
+ assert(x.ok, tojson(x));
printjson(x);
-};
-
-doCommand( "test" , { dbstats : 1 } );
-doCommand( "config" , { dbstats : 1 } );
-
-var x = s.getDB( "config" ).stats();
-assert( x.ok , tojson(x) );
-printjson(x);
-s.stop();
+ s.stop();
}()); \ No newline at end of file
diff --git a/jstests/sharding/pending_chunk.js b/jstests/sharding/pending_chunk.js
index 3455699d9e0..21107fe370d 100644
--- a/jstests/sharding/pending_chunk.js
+++ b/jstests/sharding/pending_chunk.js
@@ -2,88 +2,77 @@
// Tests pending chunk metadata.
//
-(function() {
-"use strict";
-
-var st = new ShardingTest({ shards: 2, mongos: 2, other: { separateConfig: true } });
-
-var mongos = st.s0;
-var admin = mongos.getDB('admin');
-var shards = mongos.getCollection('config.shards').find().toArray();
-var coll = mongos.getCollection('foo.bar');
-var ns = coll.getFullName();
-var dbName = coll.getDB().getName();
-var shard0 = st.shard0, shard1 = st.shard1;
-
-assert.commandWorked(admin.runCommand({enableSharding: dbName}));
-printjson(admin.runCommand({movePrimary: dbName, to: shards[0]._id}));
-assert.commandWorked(admin.runCommand({shardCollection: ns, key: {_id: 1}}));
-
-jsTest.log('Moving some chunks to shard1...');
-
-assert.commandWorked(admin.runCommand({split: ns, middle: {_id: 0}}));
-assert.commandWorked(admin.runCommand({split: ns, middle: {_id: 1}}));
-assert.commandWorked(admin.runCommand({moveChunk: ns,
- find: {_id: 0},
- to: shards[1]._id,
- _waitForDelete: true}));
-assert.commandWorked(admin.runCommand({moveChunk: ns,
- find: {_id: 1},
- to: shards[1]._id,
- _waitForDelete: true}));
-
-
-function getMetadata(shard) {
- var admin = shard.getDB('admin'),
- metadata = admin.runCommand({
- getShardVersion: ns, fullMetadata: true
- }).metadata;
-
- jsTest.log('Got metadata: ' + tojson(metadata));
- return metadata;
-}
-
-var metadata = getMetadata(shard1);
-assert.eq(metadata.pending[0][0]._id, 1);
-assert.eq(metadata.pending[0][1]._id, MaxKey);
-
-jsTest.log('Moving some chunks back to shard0 after empty...');
-
-assert.commandWorked(admin.runCommand({moveChunk: ns,
- find: {_id: -1},
- to: shards[1]._id,
- _waitForDelete: true}));
-
-metadata = getMetadata(shard0);
-assert.eq(metadata.shardVersion.t, 0);
-assert.neq(metadata.collVersion.t, 0);
-assert.eq(metadata.pending.length, 0);
-
-assert.commandWorked(admin.runCommand({moveChunk: ns,
- find: {_id: 1},
- to: shards[0]._id,
- _waitForDelete: true}));
-
-metadata = getMetadata(shard0);
-assert.eq(metadata.shardVersion.t, 0);
-assert.neq(metadata.collVersion.t, 0);
-assert.eq(metadata.pending[0][0]._id, 1);
-assert.eq(metadata.pending[0][1]._id, MaxKey);
-
-// The pending chunk should be promoted to a real chunk when shard0 reloads
-// its config.
-jsTest.log('Checking that pending chunk is promoted on reload...');
-
-assert.eq(null, coll.findOne({_id: 1}));
-
-metadata = getMetadata(shard0);
-assert.neq(metadata.shardVersion.t, 0);
-assert.neq(metadata.collVersion.t, 0);
-assert.eq(metadata.chunks[0][0]._id, 1);
-assert.eq(metadata.chunks[0][1]._id, MaxKey);
-
-st.printShardingStatus();
-
-st.stop();
+(function() {
+ "use strict";
+
+ var st = new ShardingTest({shards: 2, mongos: 2, other: {separateConfig: true}});
+
+ var mongos = st.s0;
+ var admin = mongos.getDB('admin');
+ var shards = mongos.getCollection('config.shards').find().toArray();
+ var coll = mongos.getCollection('foo.bar');
+ var ns = coll.getFullName();
+ var dbName = coll.getDB().getName();
+ var shard0 = st.shard0, shard1 = st.shard1;
+
+ assert.commandWorked(admin.runCommand({enableSharding: dbName}));
+ printjson(admin.runCommand({movePrimary: dbName, to: shards[0]._id}));
+ assert.commandWorked(admin.runCommand({shardCollection: ns, key: {_id: 1}}));
+
+ jsTest.log('Moving some chunks to shard1...');
+
+ assert.commandWorked(admin.runCommand({split: ns, middle: {_id: 0}}));
+ assert.commandWorked(admin.runCommand({split: ns, middle: {_id: 1}}));
+ assert.commandWorked(admin.runCommand(
+ {moveChunk: ns, find: {_id: 0}, to: shards[1]._id, _waitForDelete: true}));
+ assert.commandWorked(admin.runCommand(
+ {moveChunk: ns, find: {_id: 1}, to: shards[1]._id, _waitForDelete: true}));
+
+ function getMetadata(shard) {
+ var admin = shard.getDB('admin'),
+ metadata = admin.runCommand({getShardVersion: ns, fullMetadata: true}).metadata;
+
+ jsTest.log('Got metadata: ' + tojson(metadata));
+ return metadata;
+ }
+
+ var metadata = getMetadata(shard1);
+ assert.eq(metadata.pending[0][0]._id, 1);
+ assert.eq(metadata.pending[0][1]._id, MaxKey);
+
+ jsTest.log('Moving some chunks back to shard0 after empty...');
+
+ assert.commandWorked(admin.runCommand(
+ {moveChunk: ns, find: {_id: -1}, to: shards[1]._id, _waitForDelete: true}));
+
+ metadata = getMetadata(shard0);
+ assert.eq(metadata.shardVersion.t, 0);
+ assert.neq(metadata.collVersion.t, 0);
+ assert.eq(metadata.pending.length, 0);
+
+ assert.commandWorked(admin.runCommand(
+ {moveChunk: ns, find: {_id: 1}, to: shards[0]._id, _waitForDelete: true}));
+
+ metadata = getMetadata(shard0);
+ assert.eq(metadata.shardVersion.t, 0);
+ assert.neq(metadata.collVersion.t, 0);
+ assert.eq(metadata.pending[0][0]._id, 1);
+ assert.eq(metadata.pending[0][1]._id, MaxKey);
+
+ // The pending chunk should be promoted to a real chunk when shard0 reloads
+ // its config.
+ jsTest.log('Checking that pending chunk is promoted on reload...');
+
+ assert.eq(null, coll.findOne({_id: 1}));
+
+ metadata = getMetadata(shard0);
+ assert.neq(metadata.shardVersion.t, 0);
+ assert.neq(metadata.collVersion.t, 0);
+ assert.eq(metadata.chunks[0][0]._id, 1);
+ assert.eq(metadata.chunks[0][1]._id, MaxKey);
+
+ st.printShardingStatus();
+
+ st.stop();
})();
diff --git a/jstests/sharding/prefix_shard_key.js b/jstests/sharding/prefix_shard_key.js
index 6e906add822..8ac414113df 100644
--- a/jstests/sharding/prefix_shard_key.js
+++ b/jstests/sharding/prefix_shard_key.js
@@ -7,16 +7,16 @@
// Insert docs with same val for 'skey' but different vals for 'extra'.
// Move chunks around and check that [min,max) chunk boundaries are properly obeyed.
-var s = new ShardingTest({ name : jsTestName(), shards : 2 });
+var s = new ShardingTest({name: jsTestName(), shards: 2});
-var db = s.getDB( "test" );
-var admin = s.getDB( "admin" );
-var config = s.getDB( "config" );
+var db = s.getDB("test");
+var admin = s.getDB("admin");
+var config = s.getDB("config");
var shards = config.shards.find().toArray();
-var shard0 = new Mongo( shards[0].host );
-var shard1 = new Mongo( shards[1].host );
+var shard0 = new Mongo(shards[0].host);
+var shard1 = new Mongo(shards[1].host);
-s.adminCommand( { enablesharding : "test" } );
+s.adminCommand({enablesharding: "test"});
s.ensurePrimaryShard('test', 'shard0001');
//******************Part 1********************
@@ -24,67 +24,75 @@ s.ensurePrimaryShard('test', 'shard0001');
var coll = db.foo;
var longStr = 'a';
-while ( longStr.length < 1024 * 128 ) { longStr += longStr; }
+while (longStr.length < 1024 * 128) {
+ longStr += longStr;
+}
var bulk = coll.initializeUnorderedBulkOp();
-for( i=0 ; i<100; i++){
- bulk.insert({ num: i, str: longStr });
- bulk.insert({ num: i+100, x: i, str: longStr });
+for (i = 0; i < 100; i++) {
+ bulk.insert({num: i, str: longStr});
+ bulk.insert({num: i + 100, x: i, str: longStr});
}
assert.writeOK(bulk.execute());
-//no usable index yet, should throw
-assert.throws( function(){ s.adminCommand( { shardCollection : coll.getFullName(), key : { num : 1 } } ); } );
+// no usable index yet, should throw
+assert.throws(function() {
+ s.adminCommand({shardCollection: coll.getFullName(), key: {num: 1}});
+});
-//create usable index
-assert.commandWorked(coll.ensureIndex({ num: 1, x: 1 }));
+// create usable index
+assert.commandWorked(coll.ensureIndex({num: 1, x: 1}));
-//usable index, but doc with empty 'num' value, so still should throw
-assert.writeOK(coll.insert({ x: -5 }));
-assert.throws( function(){ s.adminCommand( { shardCollection : coll.getFullName(), key : { num : 1 } } ); } );
+// usable index, but doc with empty 'num' value, so still should throw
+assert.writeOK(coll.insert({x: -5}));
+assert.throws(function() {
+ s.adminCommand({shardCollection: coll.getFullName(), key: {num: 1}});
+});
-//remove the bad doc. now should finally succeed
-assert.writeOK(coll.remove({ x: -5 }));
-var result1 = admin.runCommand( { shardCollection : coll.getFullName(), key : { num : 1 } } );
-printjson( result1 );
-assert.eq( 1, result1.ok , "sharding didn't succeed");
+// remove the bad doc. now should finally succeed
+assert.writeOK(coll.remove({x: -5}));
+var result1 = admin.runCommand({shardCollection: coll.getFullName(), key: {num: 1}});
+printjson(result1);
+assert.eq(1, result1.ok, "sharding didn't succeed");
-//make sure extra index is not created
-assert.eq( 2, coll.getIndexes().length );
+// make sure extra index is not created
+assert.eq(2, coll.getIndexes().length);
// make sure balancing happens
-s.awaitBalance( coll.getName(), db.getName() );
+s.awaitBalance(coll.getName(), db.getName());
// Make sure our initial balance cleanup doesn't interfere with later migrations.
-assert.soon( function(){
- print( "Waiting for migration cleanup to occur..." );
+assert.soon(function() {
+ print("Waiting for migration cleanup to occur...");
return coll.count() == coll.find().itcount();
});
s.stopBalancer();
-//test splitting
-var result2 = admin.runCommand( { split : coll.getFullName() , middle : { num : 50 } } );
-printjson( result2 );
-assert.eq( 1, result2.ok , "splitting didn't succeed");
-
-//test moving
-var result3 = admin.runCommand({ movechunk: coll.getFullName(),
- find: { num: 20 },
- to: s.getOther(s.getPrimaryShard("test")).name,
- _waitForDelete: true });
-printjson( result3 );
-assert.eq( 1, result3.ok , "moveChunk didn't succeed");
-
+// test splitting
+var result2 = admin.runCommand({split: coll.getFullName(), middle: {num: 50}});
+printjson(result2);
+assert.eq(1, result2.ok, "splitting didn't succeed");
+
+// test moving
+var result3 = admin.runCommand({
+ movechunk: coll.getFullName(),
+ find: {num: 20},
+ to: s.getOther(s.getPrimaryShard("test")).name,
+ _waitForDelete: true
+});
+printjson(result3);
+assert.eq(1, result3.ok, "moveChunk didn't succeed");
//******************Part 2********************
// Migrations and splits will still work on a sharded collection that only has multi key
// index.
-db.user.ensureIndex({ num: 1, x: 1 });
-db.adminCommand({ shardCollection: 'test.user', key: { num: 1 }});
+db.user.ensureIndex({num: 1, x: 1});
+db.adminCommand({shardCollection: 'test.user', key: {num: 1}});
var indexCount = db.user.getIndexes().length;
-assert.eq(2, indexCount, // indexes for _id_ and num_1_x_1
+assert.eq(2,
+ indexCount, // indexes for _id_ and num_1_x_1
'index count not expected: ' + tojson(db.user.getIndexes()));
var array = [];
@@ -93,37 +101,45 @@ for (var item = 0; item < 50; item++) {
}
for (var docs = 0; docs < 1000; docs++) {
- db.user.insert({ num: docs, x: array });
+ db.user.insert({num: docs, x: array});
}
assert.eq(1000, db.user.find().itcount());
-var result4 = admin.runCommand({ movechunk: 'test.user', find: { num: 70 },
- to: s.getOther(s.getPrimaryShard("test")).name, _waitForDelete: true });
+var result4 = admin.runCommand({
+ movechunk: 'test.user',
+ find: {num: 70},
+ to: s.getOther(s.getPrimaryShard("test")).name,
+ _waitForDelete: true
+});
assert.commandWorked(result4);
-var expectedShardCount = { shard0000: 0, shard0001: 0 };
-config.chunks.find({ ns: 'test.user' }).forEach(function(chunkDoc) {
- var min = chunkDoc.min.num;
- var max = chunkDoc.max.num;
-
- if (min < 0 || min == MinKey) {
- min = 0;
- }
+var expectedShardCount = {
+ shard0000: 0,
+ shard0001: 0
+};
+config.chunks.find({ns: 'test.user'})
+ .forEach(function(chunkDoc) {
+ var min = chunkDoc.min.num;
+ var max = chunkDoc.max.num;
+
+ if (min < 0 || min == MinKey) {
+ min = 0;
+ }
- if (max > 1000 || max == MaxKey) {
- max = 1000;
- }
+ if (max > 1000 || max == MaxKey) {
+ max = 1000;
+ }
- if (max > 0) {
- expectedShardCount[chunkDoc.shard] += (max - min);
- }
-});
+ if (max > 0) {
+ expectedShardCount[chunkDoc.shard] += (max - min);
+ }
+ });
assert.eq(expectedShardCount['shard0000'], shard0.getDB('test').user.find().count());
assert.eq(expectedShardCount['shard0001'], shard1.getDB('test').user.find().count());
-result4 = admin.runCommand({ split: 'test.user', middle: { num: 70 }});
+result4 = admin.runCommand({split: 'test.user', middle: {num: 70}});
assert.commandWorked(result4);
assert.eq(expectedShardCount['shard0000'], shard0.getDB('test').user.find().count());
@@ -134,62 +150,59 @@ assert.eq(expectedShardCount['shard0001'], shard1.getDB('test').user.find().coun
// Check chunk boundaries obeyed when using prefix shard key.
// This test repeats with shard key as the prefix of different longer indices.
-for( i=0; i < 3; i++ ){
-
+for (i = 0; i < 3; i++) {
// setup new collection on shard0
var coll2 = db.foo2;
coll2.drop();
- if ( s.getPrimaryShardIdForDatabase( coll2.getDB() ) != shards[0]._id ) {
- var moveRes = admin.runCommand( { movePrimary : coll2.getDB() + "", to : shards[0]._id } );
- assert.eq( moveRes.ok , 1 , "primary not moved correctly" );
+ if (s.getPrimaryShardIdForDatabase(coll2.getDB()) != shards[0]._id) {
+ var moveRes = admin.runCommand({movePrimary: coll2.getDB() + "", to: shards[0]._id});
+ assert.eq(moveRes.ok, 1, "primary not moved correctly");
}
// declare a longer index
- if ( i == 0 ) {
- assert.commandWorked( coll2.ensureIndex( { skey : 1, extra : 1 } ));
- }
- else if ( i == 1 ) {
- assert.commandWorked( coll2.ensureIndex( { skey : 1, extra : -1 } ));
- }
- else if ( i == 2 ) {
- assert.commandWorked( coll2.ensureIndex( { skey : 1, extra : 1 , superfluous : -1 } ));
+ if (i == 0) {
+ assert.commandWorked(coll2.ensureIndex({skey: 1, extra: 1}));
+ } else if (i == 1) {
+ assert.commandWorked(coll2.ensureIndex({skey: 1, extra: -1}));
+ } else if (i == 2) {
+ assert.commandWorked(coll2.ensureIndex({skey: 1, extra: 1, superfluous: -1}));
}
// then shard collection on prefix
- var shardRes = admin.runCommand( { shardCollection : coll2 + "", key : { skey : 1 } } );
- assert.eq( shardRes.ok , 1 , "collection not sharded" );
+ var shardRes = admin.runCommand({shardCollection: coll2 + "", key: {skey: 1}});
+ assert.eq(shardRes.ok, 1, "collection not sharded");
// insert docs with same value for skey
bulk = coll2.initializeUnorderedBulkOp();
- for( var i = 0; i < 5; i++ ){
- for( var j = 0; j < 5; j++ ){
- bulk.insert( { skey : 0, extra : i , superfluous : j } );
+ for (var i = 0; i < 5; i++) {
+ for (var j = 0; j < 5; j++) {
+ bulk.insert({skey: 0, extra: i, superfluous: j});
}
}
- assert.writeOK( bulk.execute() );
+ assert.writeOK(bulk.execute());
// split on that key, and check it makes 2 chunks
- var splitRes = admin.runCommand( { split : coll2 + "", middle : { skey : 0 } } );
- assert.eq( splitRes.ok , 1 , "split didn't work" );
- assert.eq( config.chunks.find( { ns : coll2.getFullName() } ).count() , 2 );
+ var splitRes = admin.runCommand({split: coll2 + "", middle: {skey: 0}});
+ assert.eq(splitRes.ok, 1, "split didn't work");
+ assert.eq(config.chunks.find({ns: coll2.getFullName()}).count(), 2);
// movechunk should move ALL docs since they have same value for skey
- moveRes = admin.runCommand({ moveChunk: coll2 + "", find: { skey: 0 },
- to: shards[1]._id, _waitForDelete: true });
- assert.eq( moveRes.ok , 1 , "movechunk didn't work" );
+ moveRes = admin.runCommand(
+ {moveChunk: coll2 + "", find: {skey: 0}, to: shards[1]._id, _waitForDelete: true});
+ assert.eq(moveRes.ok, 1, "movechunk didn't work");
// Make sure our migration eventually goes through before testing individual shards
- assert.soon( function(){
- print( "Waiting for migration cleanup to occur..." );
+ assert.soon(function() {
+ print("Waiting for migration cleanup to occur...");
return coll2.count() == coll2.find().itcount();
});
-
+
// check no orphaned docs on the shards
- assert.eq( 0 , shard0.getCollection( coll2 + "" ).find().itcount() );
- assert.eq( 25 , shard1.getCollection( coll2 + "" ).find().itcount() );
+ assert.eq(0, shard0.getCollection(coll2 + "").find().itcount());
+ assert.eq(25, shard1.getCollection(coll2 + "").find().itcount());
// and check total
- assert.eq( 25 , coll2.find().itcount() , "bad total number of docs after move" );
+ assert.eq(25, coll2.find().itcount(), "bad total number of docs after move");
s.printShardingStatus();
}
diff --git a/jstests/sharding/presplit.js b/jstests/sharding/presplit.js
index d5efef30c34..b59dc4aa901 100644
--- a/jstests/sharding/presplit.js
+++ b/jstests/sharding/presplit.js
@@ -1,43 +1,40 @@
(function() {
-var s = new ShardingTest({ name: "presplit",
- shards: 2,
- mongos: 1,
- other: { chunkSize : 1 } });
-
-s.adminCommand( { enablesharding : "test" } );
-s.ensurePrimaryShard('test', 'shard0001');
-
-// Insert enough data in 'test.foo' to fill several chunks, if it was sharded.
-bigString = "";
-while ( bigString.length < 10000 ){
- bigString += "asdasdasdasdadasdasdasdasdasdasdasdasda";
-}
-
-db = s.getDB( "test" );
-inserted = 0;
-num = 0;
-var bulk = db.foo.initializeUnorderedBulkOp();
-while ( inserted < ( 20 * 1024 * 1024 ) ){
- bulk.insert({ _id: num++, s: bigString });
- inserted += bigString.length;
-}
-assert.writeOK(bulk.execute());
-
-// Make sure that there's only one chunk holding all the data.
-s.printChunks();
-primary = s.getPrimaryShard( "test" ).getDB( "test" );
-assert.eq( 0 , s.config.chunks.count() , "single chunk assertion" );
-assert.eq( num , primary.foo.count() );
-
-s.adminCommand( { shardcollection : "test.foo" , key : { _id : 1 } } );
-
-// Make sure the collection's original chunk got split
-s.printChunks();
-assert.lt( 20 , s.config.chunks.count() , "many chunks assertion" );
-assert.eq( num , primary.foo.count() );
-
-s.printChangeLog();
-s.stop();
+ var s = new ShardingTest({name: "presplit", shards: 2, mongos: 1, other: {chunkSize: 1}});
+
+ s.adminCommand({enablesharding: "test"});
+ s.ensurePrimaryShard('test', 'shard0001');
+
+ // Insert enough data in 'test.foo' to fill several chunks, if it was sharded.
+ bigString = "";
+ while (bigString.length < 10000) {
+ bigString += "asdasdasdasdadasdasdasdasdasdasdasdasda";
+ }
+
+ db = s.getDB("test");
+ inserted = 0;
+ num = 0;
+ var bulk = db.foo.initializeUnorderedBulkOp();
+ while (inserted < (20 * 1024 * 1024)) {
+ bulk.insert({_id: num++, s: bigString});
+ inserted += bigString.length;
+ }
+ assert.writeOK(bulk.execute());
+
+ // Make sure that there's only one chunk holding all the data.
+ s.printChunks();
+ primary = s.getPrimaryShard("test").getDB("test");
+ assert.eq(0, s.config.chunks.count(), "single chunk assertion");
+ assert.eq(num, primary.foo.count());
+
+ s.adminCommand({shardcollection: "test.foo", key: {_id: 1}});
+
+ // Make sure the collection's original chunk got split
+ s.printChunks();
+ assert.lt(20, s.config.chunks.count(), "many chunks assertion");
+ assert.eq(num, primary.foo.count());
+
+ s.printChangeLog();
+ s.stop();
})();
diff --git a/jstests/sharding/printShardingStatus.js b/jstests/sharding/printShardingStatus.js
index 2bac7da381a..05e6eca0d4f 100644
--- a/jstests/sharding/printShardingStatus.js
+++ b/jstests/sharding/printShardingStatus.js
@@ -2,242 +2,236 @@
// contains important information that it should, like the major section
// headings and the names of sharded collections and their shard keys.
+(function() {
-(function () {
+ var st = new ShardingTest({shards: 1, mongos: 2, config: 1, other: {smallfiles: true}});
+ var mongos = st.s0;
+ var admin = mongos.getDB("admin");
-var st = new ShardingTest({ shards: 1, mongos: 2, config: 1, other: { smallfiles: true } });
-
-var mongos = st.s0;
-var admin = mongos.getDB( "admin" );
-
-
-function grabStatusOutput(configdb, verbose) {
- var res = print.captureAllOutput( function () {
+ function grabStatusOutput(configdb, verbose) {
+ var res = print.captureAllOutput(function() {
return printShardingStatus(configdb, verbose);
- } );
- var output = res.output.join("\n");
- jsTestLog(output);
- return output;
-}
-
-function assertPresentInOutput(output, content, what) {
- assert(output.includes(content), what + " \"" + content + "\" NOT present in output of "
- + "printShardingStatus() (but it should be)");
-}
-
-function assertNotPresentInOutput(output, content, what) {
- assert( ! output.includes(content), what + " \"" + content + "\" IS present in output of "
- + "printShardingStatus() (but it should not be)");
-}
-
-
-
-////////////////////////
-// Basic tests
-////////////////////////
-
-var dbName = "thisIsTheDatabase";
-var collName = "thisIsTheCollection";
-var shardKeyName = "thisIsTheShardKey";
-var nsName = dbName + "." + collName;
-
-assert.commandWorked( admin.runCommand({ enableSharding: dbName }) );
-var key = {};
-key[shardKeyName] = 1;
-assert.commandWorked( admin.runCommand({ shardCollection: nsName, key: key }) );
-
-
-function testBasic(output) {
- assertPresentInOutput(output, "shards:", "section header");
- assertPresentInOutput(output, "databases:", "section header");
- assertPresentInOutput(output, "balancer:", "section header");
- assertPresentInOutput(output, "active mongoses:", "section header");
- assertNotPresentInOutput(output, "most recently active mongoses:", "section header");
-
- assertPresentInOutput(output, dbName, "database");
- assertPresentInOutput(output, collName, "collection");
- assertPresentInOutput(output, shardKeyName, "shard key");
-}
-
-function testBasicNormalOnly(output) {
- assertPresentInOutput(output, tojson(version) + " : 2\n", "active mongos version");
-}
-
-function testBasicVerboseOnly(output) {
- assertPresentInOutput(output, '"mongoVersion" : ' + tojson(version), "active mongos version");
- assertPresentInOutput(output, '"_id" : ' + tojson(s1Host), "active mongos hostname");
- assertPresentInOutput(output, '"_id" : ' + tojson(s2Host), "active mongos hostname");
-}
-
-var buildinfo = assert.commandWorked( mongos.adminCommand("buildinfo") );
-var serverStatus1 = assert.commandWorked( mongos.adminCommand("serverStatus") );
-var serverStatus2 = assert.commandWorked( st.s1.adminCommand("serverStatus") );
-var version = buildinfo.version;
-var s1Host = serverStatus1.host;
-var s2Host = serverStatus2.host;
-
-
-// Normal, active mongoses
-var outputNormal = grabStatusOutput(st.config, false);
-testBasic(outputNormal);
-testBasicNormalOnly(outputNormal);
-
-var outputVerbose = grabStatusOutput(st.config, true);
-testBasic(outputVerbose);
-testBasicVerboseOnly(outputVerbose);
-
-
-// Take a copy of the config db, in order to test the harder-to-setup cases below.
-// TODO: Replace this manual copy with copydb once SERVER-13080 is fixed.
-var config = mongos.getDB("config");
-var configCopy = mongos.getDB("configCopy");
-config.getCollectionInfos().forEach( function (c) {
- // Create collection with options.
- assert.commandWorked( configCopy.createCollection(c.name, c.options) );
- // Clone the docs.
- config.getCollection(c.name).find().snapshot().forEach( function (d) {
- assert.writeOK( configCopy.getCollection(c.name).insert(d) );
- } );
- // Build the indexes.
- config.getCollection(c.name).getIndexes().forEach( function (i) {
- var key = i.key;
- delete i.key;
- delete i.ns;
- delete i.v;
- assert.commandWorked( configCopy.getCollection(c.name).ensureIndex(key, i) );
- } );
-} );
-
-
-// Inactive mongoses
-// Make the first ping be older than now by 1 second more than the threshold
-// Make the second ping be older still by the same amount again
-var pingAdjustMs = 60000 + 1000;
-var then = new Date();
-then.setTime(then.getTime() - pingAdjustMs);
-configCopy.mongos.update( { _id: s1Host }, { $set: { ping: then } } );
-then.setTime(then.getTime() - pingAdjustMs);
-configCopy.mongos.update( { _id: s2Host }, { $set: { ping: then } } );
-
-var output = grabStatusOutput(configCopy, false);
-assertPresentInOutput(output, "most recently active mongoses:", "section header");
-assertPresentInOutput(output, tojson(version) + " : 1\n", "recent mongos version");
-
-var output = grabStatusOutput(configCopy, true);
-assertPresentInOutput(output, "most recently active mongoses:", "section header");
-assertPresentInOutput(output, '"_id" : ' + tojson(s1Host), "recent mongos hostname");
-assertNotPresentInOutput(output, '"_id" : ' + tojson(s2Host), "old mongos hostname");
-
-
-// Older mongoses
-configCopy.mongos.remove( { _id: s1Host } );
-
-var output = grabStatusOutput(configCopy, false);
-assertPresentInOutput(output, "most recently active mongoses:", "section header");
-assertPresentInOutput(output, tojson(version) + " : 1\n", "recent mongos version");
-
-var output = grabStatusOutput(configCopy, true);
-assertPresentInOutput(output, "most recently active mongoses:", "section header");
-assertNotPresentInOutput(output, '"_id" : ' + tojson(s1Host), "removed mongos hostname");
-assertPresentInOutput(output, '"_id" : ' + tojson(s2Host), "recent mongos hostname");
-
-
-// No mongoses at all
-configCopy.mongos.remove({});
-
-var output = grabStatusOutput(configCopy, false);
-assertPresentInOutput(output, "most recently active mongoses:\n\tnone", "no mongoses");
-
-var output = grabStatusOutput(configCopy, true);
-assertPresentInOutput(output, "most recently active mongoses:\n\tnone", "no mongoses (verbose)");
-
-
-assert( mongos.getDB(dbName).dropDatabase() );
-
-
-
-////////////////////////
-// Extended tests
-////////////////////////
-
-var testCollDetailsNum = 0;
-function testCollDetails(args) {
- if (args === undefined || typeof(args) != "object") {
- args = {};
+ });
+ var output = res.output.join("\n");
+ jsTestLog(output);
+ return output;
}
- var getCollName = function (x) { return "test.test" + x.zeroPad(4); };
- var collName = getCollName(testCollDetailsNum);
-
- var cmdObj = { shardCollection: collName, key: { _id: 1 } };
- if (args.unique) {
- cmdObj.unique = true;
+ function assertPresentInOutput(output, content, what) {
+ assert(output.includes(content),
+ what + " \"" + content + "\" NOT present in output of " +
+ "printShardingStatus() (but it should be)");
}
- assert.commandWorked( admin.runCommand(cmdObj) );
- if (args.hasOwnProperty("unique")) {
- assert.writeOK( mongos.getDB("config").collections.update({ _id : collName },
- { $set : { "unique" : args.unique } }) );
- }
- if (args.hasOwnProperty("noBalance")) {
- assert.writeOK( mongos.getDB("config").collections.update({ _id : collName },
- { $set : { "noBalance" : args.noBalance } }) );
+ function assertNotPresentInOutput(output, content, what) {
+ assert(!output.includes(content),
+ what + " \"" + content + "\" IS present in output of " +
+ "printShardingStatus() (but it should not be)");
}
- var output = grabStatusOutput(st.config);
-
- assertPresentInOutput(output, collName, "collection");
- // If any of the previous collection names are present, then their optional indicators
- // might also be present. This might taint the results when we go searching through
- // the output.
- // This also means that earlier collNames can't be a prefix of later collNames.
- for (var i = 0; i < testCollDetailsNum; i++) {
- assertNotPresentInOutput(output, getCollName(i), "previous collection");
+ ////////////////////////
+ // Basic tests
+ ////////////////////////
+
+ var dbName = "thisIsTheDatabase";
+ var collName = "thisIsTheCollection";
+ var shardKeyName = "thisIsTheShardKey";
+ var nsName = dbName + "." + collName;
+
+ assert.commandWorked(admin.runCommand({enableSharding: dbName}));
+ var key = {};
+ key[shardKeyName] = 1;
+ assert.commandWorked(admin.runCommand({shardCollection: nsName, key: key}));
+
+ function testBasic(output) {
+ assertPresentInOutput(output, "shards:", "section header");
+ assertPresentInOutput(output, "databases:", "section header");
+ assertPresentInOutput(output, "balancer:", "section header");
+ assertPresentInOutput(output, "active mongoses:", "section header");
+ assertNotPresentInOutput(output, "most recently active mongoses:", "section header");
+
+ assertPresentInOutput(output, dbName, "database");
+ assertPresentInOutput(output, collName, "collection");
+ assertPresentInOutput(output, shardKeyName, "shard key");
}
- assertPresentInOutput(output, "unique: " + (!!args.unique), "unique shard key indicator");
- if (args.hasOwnProperty("unique") && typeof(args.unique) != "boolean") {
- // non-bool: actual value must be shown
- assertPresentInOutput(output, tojson(args.unique), "unique shard key indicator (non bool)");
+ function testBasicNormalOnly(output) {
+ assertPresentInOutput(output, tojson(version) + " : 2\n", "active mongos version");
}
- assertPresentInOutput(output,
- "balancing: " + (!args.noBalance),
- "balancing indicator (inverse of noBalance)");
- if (args.hasOwnProperty("noBalance") && typeof(args.noBalance) != "boolean") {
- // non-bool: actual value must be shown
- assertPresentInOutput(output, tojson(args.noBalance), "noBalance indicator (non bool)");
+ function testBasicVerboseOnly(output) {
+ assertPresentInOutput(
+ output, '"mongoVersion" : ' + tojson(version), "active mongos version");
+ assertPresentInOutput(output, '"_id" : ' + tojson(s1Host), "active mongos hostname");
+ assertPresentInOutput(output, '"_id" : ' + tojson(s2Host), "active mongos hostname");
}
- assert( mongos.getCollection(collName).drop() );
-
- testCollDetailsNum++;
-}
-
-assert.commandWorked( admin.runCommand({ enableSharding: "test" }) );
-
-// Defaults
-testCollDetails({ });
+ var buildinfo = assert.commandWorked(mongos.adminCommand("buildinfo"));
+ var serverStatus1 = assert.commandWorked(mongos.adminCommand("serverStatus"));
+ var serverStatus2 = assert.commandWorked(st.s1.adminCommand("serverStatus"));
+ var version = buildinfo.version;
+ var s1Host = serverStatus1.host;
+ var s2Host = serverStatus2.host;
+
+ // Normal, active mongoses
+ var outputNormal = grabStatusOutput(st.config, false);
+ testBasic(outputNormal);
+ testBasicNormalOnly(outputNormal);
+
+ var outputVerbose = grabStatusOutput(st.config, true);
+ testBasic(outputVerbose);
+ testBasicVerboseOnly(outputVerbose);
+
+ // Take a copy of the config db, in order to test the harder-to-setup cases below.
+ // TODO: Replace this manual copy with copydb once SERVER-13080 is fixed.
+ var config = mongos.getDB("config");
+ var configCopy = mongos.getDB("configCopy");
+ config.getCollectionInfos().forEach(function(c) {
+ // Create collection with options.
+ assert.commandWorked(configCopy.createCollection(c.name, c.options));
+ // Clone the docs.
+ config.getCollection(c.name).find().snapshot().forEach(function(d) {
+ assert.writeOK(configCopy.getCollection(c.name).insert(d));
+ });
+ // Build the indexes.
+ config.getCollection(c.name).getIndexes().forEach(function(i) {
+ var key = i.key;
+ delete i.key;
+ delete i.ns;
+ delete i.v;
+ assert.commandWorked(configCopy.getCollection(c.name).ensureIndex(key, i));
+ });
+ });
+
+ // Inactive mongoses
+ // Make the first ping be older than now by 1 second more than the threshold
+ // Make the second ping be older still by the same amount again
+ var pingAdjustMs = 60000 + 1000;
+ var then = new Date();
+ then.setTime(then.getTime() - pingAdjustMs);
+ configCopy.mongos.update({_id: s1Host}, {$set: {ping: then}});
+ then.setTime(then.getTime() - pingAdjustMs);
+ configCopy.mongos.update({_id: s2Host}, {$set: {ping: then}});
+
+ var output = grabStatusOutput(configCopy, false);
+ assertPresentInOutput(output, "most recently active mongoses:", "section header");
+ assertPresentInOutput(output, tojson(version) + " : 1\n", "recent mongos version");
+
+ var output = grabStatusOutput(configCopy, true);
+ assertPresentInOutput(output, "most recently active mongoses:", "section header");
+ assertPresentInOutput(output, '"_id" : ' + tojson(s1Host), "recent mongos hostname");
+ assertNotPresentInOutput(output, '"_id" : ' + tojson(s2Host), "old mongos hostname");
+
+ // Older mongoses
+ configCopy.mongos.remove({_id: s1Host});
+
+ var output = grabStatusOutput(configCopy, false);
+ assertPresentInOutput(output, "most recently active mongoses:", "section header");
+ assertPresentInOutput(output, tojson(version) + " : 1\n", "recent mongos version");
+
+ var output = grabStatusOutput(configCopy, true);
+ assertPresentInOutput(output, "most recently active mongoses:", "section header");
+ assertNotPresentInOutput(output, '"_id" : ' + tojson(s1Host), "removed mongos hostname");
+ assertPresentInOutput(output, '"_id" : ' + tojson(s2Host), "recent mongos hostname");
+
+ // No mongoses at all
+ configCopy.mongos.remove({});
+
+ var output = grabStatusOutput(configCopy, false);
+ assertPresentInOutput(output, "most recently active mongoses:\n\tnone", "no mongoses");
+
+ var output = grabStatusOutput(configCopy, true);
+ assertPresentInOutput(
+ output, "most recently active mongoses:\n\tnone", "no mongoses (verbose)");
+
+ assert(mongos.getDB(dbName).dropDatabase());
+
+ ////////////////////////
+ // Extended tests
+ ////////////////////////
+
+ var testCollDetailsNum = 0;
+ function testCollDetails(args) {
+ if (args === undefined || typeof(args) != "object") {
+ args = {};
+ }
+
+ var getCollName = function(x) {
+ return "test.test" + x.zeroPad(4);
+ };
+ var collName = getCollName(testCollDetailsNum);
+
+ var cmdObj = {
+ shardCollection: collName,
+ key: {_id: 1}
+ };
+ if (args.unique) {
+ cmdObj.unique = true;
+ }
+ assert.commandWorked(admin.runCommand(cmdObj));
+
+ if (args.hasOwnProperty("unique")) {
+ assert.writeOK(mongos.getDB("config").collections.update(
+ {_id: collName}, {$set: {"unique": args.unique}}));
+ }
+ if (args.hasOwnProperty("noBalance")) {
+ assert.writeOK(mongos.getDB("config").collections.update(
+ {_id: collName}, {$set: {"noBalance": args.noBalance}}));
+ }
+
+ var output = grabStatusOutput(st.config);
+
+ assertPresentInOutput(output, collName, "collection");
+ // If any of the previous collection names are present, then their optional indicators
+ // might also be present. This might taint the results when we go searching through
+ // the output.
+ // This also means that earlier collNames can't be a prefix of later collNames.
+ for (var i = 0; i < testCollDetailsNum; i++) {
+ assertNotPresentInOutput(output, getCollName(i), "previous collection");
+ }
+
+ assertPresentInOutput(output, "unique: " + (!!args.unique), "unique shard key indicator");
+ if (args.hasOwnProperty("unique") && typeof(args.unique) != "boolean") {
+ // non-bool: actual value must be shown
+ assertPresentInOutput(
+ output, tojson(args.unique), "unique shard key indicator (non bool)");
+ }
+
+ assertPresentInOutput(output,
+ "balancing: " + (!args.noBalance),
+ "balancing indicator (inverse of noBalance)");
+ if (args.hasOwnProperty("noBalance") && typeof(args.noBalance) != "boolean") {
+ // non-bool: actual value must be shown
+ assertPresentInOutput(output, tojson(args.noBalance), "noBalance indicator (non bool)");
+ }
+
+ assert(mongos.getCollection(collName).drop());
+
+ testCollDetailsNum++;
+ }
-// Expected values
-testCollDetails({ unique: false, noBalance: false });
-testCollDetails({ unique: true, noBalance: true });
+ assert.commandWorked(admin.runCommand({enableSharding: "test"}));
-// Unexpected truthy values
-testCollDetails({ unique: "truthy unique value 1", noBalance: "truthy noBalance value 1" });
-testCollDetails({ unique: 1, noBalance: 1 });
-testCollDetails({ unique: -1, noBalance: -1 });
-testCollDetails({ unique: {}, noBalance: {} });
+ // Defaults
+ testCollDetails({});
-// Unexpected falsy values
-testCollDetails({ unique: "", noBalance: "" });
-testCollDetails({ unique: 0, noBalance: 0 });
+ // Expected values
+ testCollDetails({unique: false, noBalance: false});
+ testCollDetails({unique: true, noBalance: true});
-assert( mongos.getDB("test").dropDatabase() );
+ // Unexpected truthy values
+ testCollDetails({unique: "truthy unique value 1", noBalance: "truthy noBalance value 1"});
+ testCollDetails({unique: 1, noBalance: 1});
+ testCollDetails({unique: -1, noBalance: -1});
+ testCollDetails({unique: {}, noBalance: {}});
+ // Unexpected falsy values
+ testCollDetails({unique: "", noBalance: ""});
+ testCollDetails({unique: 0, noBalance: 0});
+ assert(mongos.getDB("test").dropDatabase());
-st.stop();
+ st.stop();
})();
diff --git a/jstests/sharding/query_after_multi_write.js b/jstests/sharding/query_after_multi_write.js
index 74867dfd970..a952484435c 100644
--- a/jstests/sharding/query_after_multi_write.js
+++ b/jstests/sharding/query_after_multi_write.js
@@ -1,70 +1,63 @@
(function() {
-"use strict";
-
-/**
- * Test that queries will be properly routed after executing a write that does not
- * perform any shard version checks.
- */
-var runTest = function(writeFunc) {
- var st = new ShardingTest({ shards: 2, mongos: 2 });
-
- var testDB = st.s.getDB('test');
- testDB.dropDatabase();
-
- assert.commandWorked(testDB.adminCommand({ enableSharding: 'test' }));
- st.ensurePrimaryShard('test', 'shard0000');
-
- assert.commandWorked(testDB.adminCommand({ shardCollection: 'test.user', key: { x: 1 }}));
- assert.commandWorked(testDB.adminCommand({ split: 'test.user', middle: { x: 0 }}));
- assert.commandWorked(testDB.adminCommand({ moveChunk: 'test.user',
- find: { x: 0 },
- to: 'shard0001',
- _waitForDelete: true }));
-
- var testDB2 = st.s1.getDB('test');
- testDB2.user.insert({ x: 123456 });
-
- // Move chunk to bump version on a different mongos.
- assert.commandWorked(testDB.adminCommand({ moveChunk: 'test.user',
- find: { x: 0 },
- to: 'shard0000',
- _waitForDelete: true }));
-
- // Issue a query and make sure it gets routed to the right shard.
- assert.neq(null, testDB2.user.findOne({ x: 123456 }));
-
- // At this point, s1 thinks the version of 'test.user' is 2, bounce it again so it gets
- // incremented to 3
- assert.commandWorked(testDB.adminCommand({ moveChunk: 'test.user',
- find: { x: 0 },
- to: 'shard0001',
- _waitForDelete: true }));
-
- // Issue a query and make sure it gets routed to the right shard again.
- assert.neq(null, testDB2.user.findOne({ x: 123456 }));
-
- // At this point, s0 thinks the version of 'test.user' is 3, bounce it again so it gets
- // incremented to 4
- assert.commandWorked(testDB.adminCommand({ moveChunk: 'test.user',
- find: { x: 0 },
- to: 'shard0000',
- _waitForDelete: true }));
-
- // Ensure that write commands with multi version do not reset the connection shard version to
- // ignored.
- writeFunc(testDB2);
-
- assert.neq(null, testDB2.user.findOne({ x: 123456 }));
-
- st.stop();
-};
-
-runTest(function(db) {
- db.user.update({}, { $inc: { y: 987654 }}, false, true);
-});
-
-runTest(function(db) {
- db.user.remove({ y: 'noMatch' }, false);
-});
+ "use strict";
+
+ /**
+ * Test that queries will be properly routed after executing a write that does not
+ * perform any shard version checks.
+ */
+ var runTest = function(writeFunc) {
+ var st = new ShardingTest({shards: 2, mongos: 2});
+
+ var testDB = st.s.getDB('test');
+ testDB.dropDatabase();
+
+ assert.commandWorked(testDB.adminCommand({enableSharding: 'test'}));
+ st.ensurePrimaryShard('test', 'shard0000');
+
+ assert.commandWorked(testDB.adminCommand({shardCollection: 'test.user', key: {x: 1}}));
+ assert.commandWorked(testDB.adminCommand({split: 'test.user', middle: {x: 0}}));
+ assert.commandWorked(testDB.adminCommand(
+ {moveChunk: 'test.user', find: {x: 0}, to: 'shard0001', _waitForDelete: true}));
+
+ var testDB2 = st.s1.getDB('test');
+ testDB2.user.insert({x: 123456});
+
+ // Move chunk to bump version on a different mongos.
+ assert.commandWorked(testDB.adminCommand(
+ {moveChunk: 'test.user', find: {x: 0}, to: 'shard0000', _waitForDelete: true}));
+
+ // Issue a query and make sure it gets routed to the right shard.
+ assert.neq(null, testDB2.user.findOne({x: 123456}));
+
+ // At this point, s1 thinks the version of 'test.user' is 2, bounce it again so it gets
+ // incremented to 3
+ assert.commandWorked(testDB.adminCommand(
+ {moveChunk: 'test.user', find: {x: 0}, to: 'shard0001', _waitForDelete: true}));
+
+ // Issue a query and make sure it gets routed to the right shard again.
+ assert.neq(null, testDB2.user.findOne({x: 123456}));
+
+ // At this point, s0 thinks the version of 'test.user' is 3, bounce it again so it gets
+ // incremented to 4
+ assert.commandWorked(testDB.adminCommand(
+ {moveChunk: 'test.user', find: {x: 0}, to: 'shard0000', _waitForDelete: true}));
+
+ // Ensure that write commands with multi version do not reset the connection shard version
+ // to
+ // ignored.
+ writeFunc(testDB2);
+
+ assert.neq(null, testDB2.user.findOne({x: 123456}));
+
+ st.stop();
+ };
+
+ runTest(function(db) {
+ db.user.update({}, {$inc: {y: 987654}}, false, true);
+ });
+
+ runTest(function(db) {
+ db.user.remove({y: 'noMatch'}, false);
+ });
})();
diff --git a/jstests/sharding/query_config.js b/jstests/sharding/query_config.js
index dea4cf92258..c6b08b8b7c0 100644
--- a/jstests/sharding/query_config.js
+++ b/jstests/sharding/query_config.js
@@ -9,13 +9,14 @@
};
var getListIndexesCursor = function(coll, options, subsequentBatchSize) {
- return new DBCommandCursor(coll.getDB().getMongo(),
- coll.runCommand("listIndexes", options),
- subsequentBatchSize);
+ return new DBCommandCursor(
+ coll.getDB().getMongo(), coll.runCommand("listIndexes", options), subsequentBatchSize);
};
var arrayGetNames = function(array) {
- return array.map(function(spec) { return spec.name; });
+ return array.map(function(spec) {
+ return spec.name;
+ });
};
var cursorGetCollectionNames = function(cursor) {
@@ -23,7 +24,9 @@
};
var sortArrayByName = function(array) {
- return array.sort(function(a, b) { return a.name > b.name; });
+ return array.sort(function(a, b) {
+ return a.name > b.name;
+ });
};
var cursorGetIndexNames = function(cursor) {
@@ -31,13 +34,15 @@
};
var sortArrayById = function(array) {
- return array.sort(function(a, b) { return a._id > b._id; });
+ return array.sort(function(a, b) {
+ return a._id > b._id;
+ });
};
var dropCollectionIfExists = function(coll) {
try {
coll.drop();
- } catch(err) {
+ } catch (err) {
assert.eq(err.code, ErrorCodes.NamespaceNotFound);
}
};
@@ -51,15 +56,17 @@
// testKeys and testCollNames are parallel arrays, testKeys contains the shard key of the
// corresponding collection whose name is in testCollNames.
var testCollNames = ["4a1", "1a12", "3a1b1", "2a1b1c1", "b1", "b1c1", "d1"];
- var testKeys = [{a: 1}, {a: 1}, {a: 1, b: 1}, {a: 1, b: 1, c: 1}, {b: 1}, {b: 1, c: 1},
- {d: 1}];
+ var testKeys =
+ [{a: 1}, {a: 1}, {a: 1, b: 1}, {a: 1, b: 1, c: 1}, {b: 1}, {b: 1, c: 1}, {d: 1}];
var testDB = st.s.getDB("test");
assert.commandWorked(st.s.adminCommand({enablesharding: testDB.getName()}));
- var testNamespaces = testCollNames.map(function(e) { return testDB.getName() + "." + e; });
+ var testNamespaces = testCollNames.map(function(e) {
+ return testDB.getName() + "." + e;
+ });
for (var i = 0; i < testKeys.length; i++) {
- assert.commandWorked(st.s.adminCommand({shardcollection: testNamespaces[i],
- key: testKeys[i]}));
+ assert.commandWorked(
+ st.s.adminCommand({shardcollection: testNamespaces[i], key: testKeys[i]}));
}
return testNamespaces;
@@ -72,8 +79,19 @@
var testListConfigCollections = function(st) {
// This test depends on all the collections in the configCollList being in the config
// database.
- var configCollList = ["changelog", "chunks", "collections", "databases", "lockpings",
- "locks", "mongos", "settings", "shards", "tags", "version"];
+ var configCollList = [
+ "changelog",
+ "chunks",
+ "collections",
+ "databases",
+ "lockpings",
+ "locks",
+ "mongos",
+ "settings",
+ "shards",
+ "tags",
+ "version"
+ ];
var configDB = st.s.getDB("config");
var userAddedColl = configDB.userAddedColl;
var cursor;
@@ -134,8 +152,8 @@
// Find query.
cursor = configDB.collections.find({"key.a": 1}, {dropped: 1, "key.a": 1, "key.c": 1})
- .sort({"_id": 1})
- .batchSize(2);
+ .sort({"_id": 1})
+ .batchSize(2);
assert.eq(cursor.objsLeftInBatch(), 2);
assert.eq(cursor.next(), {_id: testNamespaces[1], dropped: false, key: {a: 1}});
assert.eq(cursor.next(), {_id: testNamespaces[3], dropped: false, key: {a: 1, c: 1}});
@@ -146,9 +164,11 @@
assert(!cursor.hasNext());
// Aggregate query.
- cursor = configDB.collections.aggregate([{$match: {"key.b": 1}},
- {$sort: {"_id": 1}},
- {$project: {"keyb":"$key.b", "keyc":"$key.c"}}],
+ cursor = configDB.collections.aggregate([
+ {$match: {"key.b": 1}},
+ {$sort: {"_id": 1}},
+ {$project: {"keyb": "$key.b", "keyc": "$key.c"}}
+ ],
{cursor: {batchSize: 2}});
assert.eq(cursor.objsLeftInBatch(), 2);
assert.eq(cursor.next(), {_id: testNamespaces[3], keyb: 1, keyc: 1});
@@ -175,7 +195,7 @@
var result;
// Get shard names.
- cursor = configDB.shards.find().sort({_id:1});
+ cursor = configDB.shards.find().sort({_id: 1});
var shard1 = cursor.next()._id;
var shard2 = cursor.next()._id;
assert(!cursor.hasNext());
@@ -183,8 +203,8 @@
st.ensurePrimaryShard(testDB.getName(), shard1);
// Setup.
- assert.commandWorked(st.s.adminCommand({shardcollection: testColl.getFullName(),
- key: {e: 1}}));
+ assert.commandWorked(
+ st.s.adminCommand({shardcollection: testColl.getFullName(), key: {e: 1}}));
for (var i = 0; i < testCollData.length; i++) {
assert.writeOK(testColl.insert(testCollData[i]));
}
@@ -192,19 +212,16 @@
assert.commandWorked(st.s.adminCommand({split: testColl.getFullName(), middle: {e: 6}}));
assert.commandWorked(st.s.adminCommand({split: testColl.getFullName(), middle: {e: 8}}));
assert.commandWorked(st.s.adminCommand({split: testColl.getFullName(), middle: {e: 11}}));
- assert.commandWorked(st.s.adminCommand({movechunk: testColl.getFullName(),
- find: {e: 1},
- to: shard2}));
- assert.commandWorked(st.s.adminCommand({movechunk: testColl.getFullName(),
- find: {e: 9},
- to: shard2}));
- assert.commandWorked(st.s.adminCommand({movechunk: testColl.getFullName(),
- find: {e: 12},
- to: shard2}));
+ assert.commandWorked(
+ st.s.adminCommand({movechunk: testColl.getFullName(), find: {e: 1}, to: shard2}));
+ assert.commandWorked(
+ st.s.adminCommand({movechunk: testColl.getFullName(), find: {e: 9}, to: shard2}));
+ assert.commandWorked(
+ st.s.adminCommand({movechunk: testColl.getFullName(), find: {e: 12}, to: shard2}));
// Find query.
cursor = configDB.chunks.find({ns: testColl.getFullName()},
- {_id:0, min:1, max:1, shard:1}).sort({"min.e":1});
+ {_id: 0, min: 1, max: 1, shard: 1}).sort({"min.e": 1});
assert.eq(cursor.next(), {min: {e: {"$minKey": 1}}, "max": {"e": 2}, shard: shard2});
assert.eq(cursor.next(), {min: {e: 2}, max: {e: 6}, shard: shard1});
assert.eq(cursor.next(), {min: {e: 6}, max: {e: 8}, shard: shard1});
@@ -219,13 +236,20 @@
assert.eq(configDB.chunks.distinct("shard").sort(), [shard1, shard2]);
// Group query.
- result = configDB.chunks.group({key: {shard: 1},
- cond: {ns: testColl.getFullName()},
- reduce: function(curr, res) { res.chunks++; },
- initial: {chunks: 0},
- finalize: function(res) { res._id = res.shard; }});
- assert.eq(sortArrayById(result), [{shard: shard1, chunks: 2, _id: shard1},
- {shard: shard2, chunks: 3, _id: shard2}]);
+ result = configDB.chunks.group({
+ key: {shard: 1},
+ cond: {ns: testColl.getFullName()},
+ reduce: function(curr, res) {
+ res.chunks++;
+ },
+ initial: {chunks: 0},
+ finalize: function(res) {
+ res._id = res.shard;
+ }
+ });
+ assert.eq(
+ sortArrayById(result),
+ [{shard: shard1, chunks: 2, _id: shard1}, {shard: shard2, chunks: 3, _id: shard2}]);
// Map reduce query.
var mapFunction = function() {
@@ -234,12 +258,14 @@
}
};
var reduceFunction = function(key, values) {
- return {chunks: values.length};
+ return {
+ chunks: values.length
+ };
};
result = configDB.chunks.mapReduce(mapFunction, reduceFunction, {out: {inline: 1}});
assert.eq(result.ok, 1);
- assert.eq(sortArrayById(result.results), [{_id: shard1, value: {chunks: 2}},
- {_id: shard2, value: {chunks: 3}}]);
+ assert.eq(sortArrayById(result.results),
+ [{_id: shard1, value: {chunks: 2}}, {_id: shard2, value: {chunks: 3}}]);
};
/**
@@ -247,13 +273,15 @@
*/
var queryUserCreated = function(database) {
var userColl = database.userColl;
- var userCollData = [{_id: 1, g: 1, c: 4, s: "c", u: [1, 2]},
- {_id: 2, g: 1, c: 5, s: "b", u: [1]},
- {_id: 3, g: 2, c: 16, s: "g", u: [3]},
- {_id: 4, g: 2, c: 1, s: "a", u: [2, 4]},
- {_id: 5, g: 2, c: 18, s: "d", u: [3]},
- {_id: 6, g: 3, c: 11, s: "e", u: [2, 3]},
- {_id: 7, g: 3, c: 2, s: "f", u: [1]}];
+ var userCollData = [
+ {_id: 1, g: 1, c: 4, s: "c", u: [1, 2]},
+ {_id: 2, g: 1, c: 5, s: "b", u: [1]},
+ {_id: 3, g: 2, c: 16, s: "g", u: [3]},
+ {_id: 4, g: 2, c: 1, s: "a", u: [2, 4]},
+ {_id: 5, g: 2, c: 18, s: "d", u: [3]},
+ {_id: 6, g: 3, c: 11, s: "e", u: [2, 3]},
+ {_id: 7, g: 3, c: 2, s: "f", u: [1]}
+ ];
var userCollIndexes = ["_id_", "s_1"];
var cursor;
var cursorArray;
@@ -294,10 +322,12 @@
assert(!cursor.hasNext());
// Aggregate query.
- cursor = userColl.aggregate([{$match: {c: {$gt: 1}}},
- {$unwind: "$u"},
- {$group: {_id: "$u", sum: {$sum: "$c"}}},
- {$sort: {_id: 1}}],
+ cursor = userColl.aggregate([
+ {$match: {c: {$gt: 1}}},
+ {$unwind: "$u"},
+ {$group: {_id: "$u", sum: {$sum: "$c"}}},
+ {$sort: {_id: 1}}
+ ],
{cursor: {batchSize: 2}});
assert.eq(cursor.objsLeftInBatch(), 2);
assert.eq(cursor.next(), {_id: 1, sum: 11});
@@ -317,26 +347,36 @@
assert.eq(userColl.distinct("g").sort(), [1, 2, 3]);
// Group query.
- result = userColl.group({key: {g: 1},
- reduce: function(curr, res) { res.prod *= curr.c; },
- initial: {prod: 1},
- finalize: function(res) { res._id = res.g; }});
- assert.eq(sortArrayById(result), [{g: 1, prod: 20, _id: 1},
- {g: 2, prod: 288, _id: 2},
- {g: 3, prod: 22, _id: 3}]);
+ result = userColl.group({
+ key: {g: 1},
+ reduce: function(curr, res) {
+ res.prod *= curr.c;
+ },
+ initial: {prod: 1},
+ finalize: function(res) {
+ res._id = res.g;
+ }
+ });
+ assert.eq(sortArrayById(result),
+ [{g: 1, prod: 20, _id: 1}, {g: 2, prod: 288, _id: 2}, {g: 3, prod: 22, _id: 3}]);
// Map reduce query.
var mapFunction = function() {
emit(this.g, 1);
};
var reduceFunction = function(key, values) {
- return {count: values.length};
+ return {
+ count: values.length
+ };
};
result = userColl.mapReduce(mapFunction, reduceFunction, {out: {inline: 1}});
assert.eq(result.ok, 1);
- assert.eq(sortArrayById(result.results), [{_id: 1, value: {count: 2}},
- {_id: 2, value: {count: 3}},
- {_id: 3, value: {count: 2}}]);
+ assert.eq(sortArrayById(result.results),
+ [
+ {_id: 1, value: {count: 2}},
+ {_id: 2, value: {count: 3}},
+ {_id: 3, value: {count: 2}}
+ ]);
assert(userColl.drop());
};
diff --git a/jstests/sharding/query_sharded.js b/jstests/sharding/query_sharded.js
index 2a4089c69cf..7cb698c4477 100644
--- a/jstests/sharding/query_sharded.js
+++ b/jstests/sharding/query_sharded.js
@@ -2,9 +2,7 @@
// Tests mongos-only query behavior
//
-var st = new ShardingTest({shards : 1,
- mongos : 1,
- verbose : 0});
+var st = new ShardingTest({shards: 1, mongos: 1, verbose: 0});
var mongos = st.s0;
var coll = mongos.getCollection("foo.bar");
@@ -13,22 +11,26 @@ var coll = mongos.getCollection("foo.bar");
//
// Ensure we can't use exhaust option through mongos
coll.remove({});
-assert.writeOK(coll.insert({a : 'b'}));
+assert.writeOK(coll.insert({a: 'b'}));
var query = coll.find({});
assert.neq(null, query.next());
query = coll.find({}).addOption(DBQuery.Option.exhaust);
-assert.throws(function(){ query.next(); });
+assert.throws(function() {
+ query.next();
+});
//
//
// Ensure we can't trick mongos by inserting exhaust option on a command through mongos
coll.remove({});
-assert.writeOK(coll.insert({a : 'b'}));
+assert.writeOK(coll.insert({a: 'b'}));
var cmdColl = mongos.getCollection(coll.getDB().toString() + ".$cmd");
-var cmdQuery = cmdColl.find({ ping : 1 }).limit(1);
+var cmdQuery = cmdColl.find({ping: 1}).limit(1);
assert.commandWorked(cmdQuery.next());
-cmdQuery = cmdColl.find({ ping : 1 }).limit(1).addOption(DBQuery.Option.exhaust);
-assert.throws(function(){ cmdQuery.next(); });
+cmdQuery = cmdColl.find({ping: 1}).limit(1).addOption(DBQuery.Option.exhaust);
+assert.throws(function() {
+ cmdQuery.next();
+});
jsTest.log("DONE!");
diff --git a/jstests/sharding/read_after_optime.js b/jstests/sharding/read_after_optime.js
index 442e8cc8ef6..e78e7394a84 100644
--- a/jstests/sharding/read_after_optime.js
+++ b/jstests/sharding/read_after_optime.js
@@ -36,24 +36,17 @@
var pingIntervalSeconds = 10;
var timeoutResult = assert.commandFailedWithCode(
runFindCommand(new Timestamp(lastOp.ts.getTime() + pingIntervalSeconds * 5, 0)),
- ErrorCodes.ExceededTimeLimit
- );
+ ErrorCodes.ExceededTimeLimit);
assert.gt(timeoutResult.waitedMS, 500);
var msg = 'Command on database local timed out waiting for read concern to be satisfied.';
- assert.soon(
- function() {
- var logMessages =
- assert.commandWorked(primaryConn.adminCommand({getLog: 'global'})).log;
- for (var i = 0; i < logMessages.length; i++) {
- if (logMessages[i].indexOf(msg) != -1) {
- return true;
- }
+ assert.soon(function() {
+ var logMessages = assert.commandWorked(primaryConn.adminCommand({getLog: 'global'})).log;
+ for (var i = 0; i < logMessages.length; i++) {
+ if (logMessages[i].indexOf(msg) != -1) {
+ return true;
}
- return false;
- },
- 'Did not see any log entries containing the following message: ' + msg,
- 60000,
- 300
- );
+ }
+ return false;
+ }, 'Did not see any log entries containing the following message: ' + msg, 60000, 300);
})();
diff --git a/jstests/sharding/read_does_not_create_namespaces.js b/jstests/sharding/read_does_not_create_namespaces.js
index db3c098c0fc..8ee48576ba1 100644
--- a/jstests/sharding/read_does_not_create_namespaces.js
+++ b/jstests/sharding/read_does_not_create_namespaces.js
@@ -2,15 +2,15 @@
// cause entries to be created in the catalog.
(function() {
-var shardingTest = new ShardingTest({ name: 'read_does_not_create_namespaces', shards: 1 });
-var db = shardingTest.getDB('NonExistentDB');
+ var shardingTest = new ShardingTest({name: 'read_does_not_create_namespaces', shards: 1});
+ var db = shardingTest.getDB('NonExistentDB');
-assert.isnull(db.nonExistentColl.findOne({}));
+ assert.isnull(db.nonExistentColl.findOne({}));
-// Neither the database nor the collection should have been created
-assert.isnull(shardingTest.getDB('config').databases.findOne({ _id: 'NonExistentDB' }));
-assert.eq(-1, shardingTest.shard0.getDBNames().indexOf('NonExistentDB'));
+ // Neither the database nor the collection should have been created
+ assert.isnull(shardingTest.getDB('config').databases.findOne({_id: 'NonExistentDB'}));
+ assert.eq(-1, shardingTest.shard0.getDBNames().indexOf('NonExistentDB'));
-shardingTest.stop();
+ shardingTest.stop();
})();
diff --git a/jstests/sharding/read_pref.js b/jstests/sharding/read_pref.js
index 7b92eb0d1b4..aadd8903344 100755..100644
--- a/jstests/sharding/read_pref.js
+++ b/jstests/sharding/read_pref.js
@@ -3,19 +3,14 @@
* can be found in dbtests/replica_set_monitor_test.cpp.
*/
-var PRI_TAG = { dc: 'ny' };
-var SEC_TAGS = [
- { dc: 'sf', s: "1" },
- { dc: 'ma', s: "2" },
- { dc: 'eu', s: "3" },
- { dc: 'jp', s: "4" }
-];
+var PRI_TAG = {
+ dc: 'ny'
+};
+var SEC_TAGS = [{dc: 'sf', s: "1"}, {dc: 'ma', s: "2"}, {dc: 'eu', s: "3"}, {dc: 'jp', s: "4"}];
var NODES = SEC_TAGS.length + 1;
var doTest = function(useDollarQuerySyntax) {
- var st = new ShardingTest({ shards: {
- rs0: { nodes: NODES, oplogSize: 10, useHostName: true }
- }});
+ var st = new ShardingTest({shards: {rs0: {nodes: NODES, oplogSize: 10, useHostName: true}}});
var replTest = st.rs0;
var primaryNode = replTest.getPrimary();
@@ -25,25 +20,24 @@ var doTest = function(useDollarQuerySyntax) {
return;
}
- var setupConf = function(){
- var replConf = primaryNode.getDB( 'local' ).system.replset.findOne();
+ var setupConf = function() {
+ var replConf = primaryNode.getDB('local').system.replset.findOne();
replConf.version = (replConf.version || 0) + 1;
var secIdx = 0;
- for ( var x = 0; x < NODES; x++ ){
+ for (var x = 0; x < NODES; x++) {
var node = replConf.members[x];
- if ( node.host == primaryNode.name ){
+ if (node.host == primaryNode.name) {
node.tags = PRI_TAG;
- }
- else {
+ } else {
node.tags = SEC_TAGS[secIdx++];
node.priority = 0;
}
}
try {
- primaryNode.getDB( 'admin' ).runCommand({ replSetReconfig: replConf });
+ primaryNode.getDB('admin').runCommand({replSetReconfig: replConf});
} catch (x) {
jsTest.log('Exception expected because reconfiguring would close all conn, got ' + x);
}
@@ -51,25 +45,25 @@ var doTest = function(useDollarQuerySyntax) {
return replConf;
};
- var checkTag = function( nodeToCheck, tag ){
- for ( var idx = 0; idx < NODES; idx++ ){
+ var checkTag = function(nodeToCheck, tag) {
+ for (var idx = 0; idx < NODES; idx++) {
var node = replConf.members[idx];
- if ( node.host == nodeToCheck ){
- jsTest.log( 'node[' + node.host + '], Tag: ' + tojson( node['tags'] ));
- jsTest.log( 'tagToCheck: ' + tojson( tag ));
+ if (node.host == nodeToCheck) {
+ jsTest.log('node[' + node.host + '], Tag: ' + tojson(node['tags']));
+ jsTest.log('tagToCheck: ' + tojson(tag));
var nodeTag = node['tags'];
- for ( var key in tag ){
- assert.eq( tag[key], nodeTag[key] );
+ for (var key in tag) {
+ assert.eq(tag[key], nodeTag[key]);
}
return;
}
}
- assert( false, 'node ' + nodeToCheck + ' not part of config!' );
+ assert(false, 'node ' + nodeToCheck + ' not part of config!');
};
var replConf = setupConf();
@@ -77,17 +71,16 @@ var doTest = function(useDollarQuerySyntax) {
var conn = st.s;
// Wait until the ReplicaSetMonitor refreshes its view and see the tags
- ReplSetTest.awaitRSClientHosts( conn, primaryNode,
- { ok: true, tags: PRI_TAG }, replTest.name );
+ ReplSetTest.awaitRSClientHosts(conn, primaryNode, {ok: true, tags: PRI_TAG}, replTest.name);
replTest.awaitReplication();
jsTest.log('New rs config: ' + tojson(primaryNode.getDB('local').system.replset.findOne()));
- jsTest.log( 'connpool: ' + tojson(conn.getDB('admin').runCommand({ connPoolStats: 1 })));
+ jsTest.log('connpool: ' + tojson(conn.getDB('admin').runCommand({connPoolStats: 1})));
- var coll = conn.getDB( 'test' ).user;
+ var coll = conn.getDB('test').user;
assert.soon(function() {
- var res = coll.insert({ x: 1 }, { writeConcern: { w: NODES }});
+ var res = coll.insert({x: 1}, {writeConcern: {w: NODES}});
if (!res.hasWriteError()) {
return true;
}
@@ -110,10 +103,10 @@ var doTest = function(useDollarQuerySyntax) {
readPrefObj.tags = readPrefTags;
}
- return coll.find({ $query: {}, $readPreference: readPrefObj,
- $explain: true }).limit(-1).next();
- }
- else {
+ return coll.find({$query: {}, $readPreference: readPrefObj, $explain: true})
+ .limit(-1)
+ .next();
+ } else {
return coll.find().readPref(readPrefMode, readPrefTags).explain("executionStats");
}
};
@@ -127,26 +120,26 @@ var doTest = function(useDollarQuerySyntax) {
// Read pref should work without slaveOk
var explain = getExplain("secondary");
var explainServer = getExplainServer(explain);
- assert.neq( primaryNode.name, explainServer );
+ assert.neq(primaryNode.name, explainServer);
conn.setSlaveOk();
// It should also work with slaveOk
explain = getExplain("secondary");
explainServer = getExplainServer(explain);
- assert.neq( primaryNode.name, explainServer );
+ assert.neq(primaryNode.name, explainServer);
// Check that $readPreference does not influence the actual query
- assert.eq( 1, explain.executionStats.nReturned );
+ assert.eq(1, explain.executionStats.nReturned);
- explain = getExplain("secondaryPreferred", [{ s: "2" }]);
+ explain = getExplain("secondaryPreferred", [{s: "2"}]);
explainServer = getExplainServer(explain);
- checkTag( explainServer, { s: "2" });
- assert.eq( 1, explain.executionStats.nReturned );
+ checkTag(explainServer, {s: "2"});
+ assert.eq(1, explain.executionStats.nReturned);
// Cannot use tags with primaryOnly
- assert.throws( function() {
- getExplain("primary", [{ s: "2" }]);
+ assert.throws(function() {
+ getExplain("primary", [{s: "2"}]);
});
// Ok to use empty tags on primaryOnly
@@ -159,44 +152,43 @@ var doTest = function(useDollarQuerySyntax) {
assert.eq(primaryNode.name, explainServer);
// Check that mongos will try the next tag if nothing matches the first
- explain = getExplain("secondary", [{ z: "3" }, { dc: "jp" }]);
+ explain = getExplain("secondary", [{z: "3"}, {dc: "jp"}]);
explainServer = getExplainServer(explain);
- checkTag( explainServer, { dc: "jp" });
- assert.eq( 1, explain.executionStats.nReturned );
+ checkTag(explainServer, {dc: "jp"});
+ assert.eq(1, explain.executionStats.nReturned);
// Check that mongos will fallback to primary if none of tags given matches
- explain = getExplain("secondaryPreferred", [{ z: "3" }, { dc: "ph" }]);
+ explain = getExplain("secondaryPreferred", [{z: "3"}, {dc: "ph"}]);
explainServer = getExplainServer(explain);
// Call getPrimary again since the primary could have changed after the restart.
assert.eq(replTest.getPrimary().name, explainServer);
- assert.eq( 1, explain.executionStats.nReturned );
+ assert.eq(1, explain.executionStats.nReturned);
// Kill all members except one
var stoppedNodes = [];
- for ( var x = 0; x < NODES - 1; x++ ){
- replTest.stop( x );
- stoppedNodes.push( replTest.nodes[x] );
+ for (var x = 0; x < NODES - 1; x++) {
+ replTest.stop(x);
+ stoppedNodes.push(replTest.nodes[x]);
}
// Wait for ReplicaSetMonitor to realize nodes are down
- ReplSetTest.awaitRSClientHosts( conn, stoppedNodes, { ok: false }, replTest.name );
+ ReplSetTest.awaitRSClientHosts(conn, stoppedNodes, {ok: false}, replTest.name);
// Wait for the last node to be in steady state -> secondary (not recovering)
var lastNode = replTest.nodes[NODES - 1];
- ReplSetTest.awaitRSClientHosts( conn, lastNode,
- { ok: true, secondary: true }, replTest.name );
+ ReplSetTest.awaitRSClientHosts(conn, lastNode, {ok: true, secondary: true}, replTest.name);
- jsTest.log( 'connpool: ' + tojson(conn.getDB('admin').runCommand({ connPoolStats: 1 })));
+ jsTest.log('connpool: ' + tojson(conn.getDB('admin').runCommand({connPoolStats: 1})));
// Test to make sure that connection is ok, in prep for priOnly test
explain = getExplain("nearest");
explainServer = getExplainServer(explain);
- assert.eq( explainServer, replTest.nodes[NODES - 1].name );
- assert.eq( 1, explain.executionStats.nReturned );
+ assert.eq(explainServer, replTest.nodes[NODES - 1].name);
+ assert.eq(1, explain.executionStats.nReturned);
// Should assert if request with priOnly but no primary
- assert.throws( function(){
- getExplain("primary");
+ assert.throws(function() {
+ getExplain("primary");
});
st.stop();
diff --git a/jstests/sharding/read_pref_cmd.js b/jstests/sharding/read_pref_cmd.js
index 9df6cc96221..1e4aa48ee25 100644
--- a/jstests/sharding/read_pref_cmd.js
+++ b/jstests/sharding/read_pref_cmd.js
@@ -7,8 +7,8 @@ var NODE_COUNT = 2;
*/
var setUp = function() {
var configDB = st.s.getDB('config');
- configDB.adminCommand({ enableSharding: 'test' });
- configDB.adminCommand({ shardCollection: 'test.user', key: { x: 1 }});
+ configDB.adminCommand({enableSharding: 'test'});
+ configDB.adminCommand({shardCollection: 'test.user', key: {x: 1}});
// Each time we drop the 'test' DB we have to re-enable profiling
st.rs0.nodes.forEach(function(node) {
@@ -38,7 +38,7 @@ var tearDown = function() {
*/
var testReadPreference = function(conn, hostList, isMongos, mode, tagSets, secExpected) {
var testDB = conn.getDB('test');
- conn.setSlaveOk(false); // purely rely on readPref
+ conn.setSlaveOk(false); // purely rely on readPref
jsTest.log('Testing mode: ' + mode + ', tag sets: ' + tojson(tagSets));
conn.setReadPref(mode, tagSets);
@@ -60,7 +60,9 @@ var testReadPreference = function(conn, hostList, isMongos, mode, tagSets, secEx
assert(cmdResult.ok);
var testedAtLeastOnce = false;
- var query = { op: 'command' };
+ var query = {
+ op: 'command'
+ };
Object.extend(query, profileQuery);
hostList.forEach(function(node) {
@@ -71,12 +73,11 @@ var testReadPreference = function(conn, hostList, isMongos, mode, tagSets, secEx
if (secOk && secExpected) {
// The command obeys read prefs and we expect to run
// commands on secondaries with this mode and tag sets
- assert(testDB.adminCommand({ isMaster: 1 }).secondary);
- }
- else {
+ assert(testDB.adminCommand({isMaster: 1}).secondary);
+ } else {
// The command does not obey read prefs, or we expect to run
// commands on primary with this mode or tag sets
- assert(testDB.adminCommand({ isMaster: 1 }).ismaster);
+ assert(testDB.adminCommand({isMaster: 1}).ismaster);
}
testedAtLeastOnce = true;
@@ -100,70 +101,75 @@ var testReadPreference = function(conn, hostList, isMongos, mode, tagSets, secEx
};
// Test command that can be sent to secondary
- cmdTest({ distinct: 'user', key: 'x', query: { x: 1 }}, true,
- formatProfileQuery({ distinct: 'user' }));
+ cmdTest(
+ {distinct: 'user', key: 'x', query: {x: 1}}, true, formatProfileQuery({distinct: 'user'}));
// Test command that can't be sent to secondary
- cmdTest({ create: 'mrIn' }, false, formatProfileQuery({ create: 'mrIn' }));
+ cmdTest({create: 'mrIn'}, false, formatProfileQuery({create: 'mrIn'}));
// Make sure mrIn is propagated to secondaries before proceeding
- testDB.runCommand({ getLastError: 1, w: NODE_COUNT });
+ testDB.runCommand({getLastError: 1, w: NODE_COUNT});
var mapFunc = function(doc) {};
- var reduceFunc = function(key, values) { return values; };
+ var reduceFunc = function(key, values) {
+ return values;
+ };
// Test inline mapReduce on sharded collection.
// Note that in sharded map reduce, it will output the result in a temp collection
// even if out is inline.
if (isMongos) {
- cmdTest({ mapreduce: 'user', map: mapFunc, reduce: reduceFunc, out: { inline: 1 }},
- false, formatProfileQuery({ mapreduce: 'user', shardedFirstPass: true }));
+ cmdTest({mapreduce: 'user', map: mapFunc, reduce: reduceFunc, out: {inline: 1}},
+ false,
+ formatProfileQuery({mapreduce: 'user', shardedFirstPass: true}));
}
// Test inline mapReduce on unsharded collection.
- cmdTest({ mapreduce: 'mrIn', map: mapFunc, reduce: reduceFunc, out: { inline: 1 }}, true,
- formatProfileQuery({ mapreduce: 'mrIn', 'out.inline': 1 }));
+ cmdTest({mapreduce: 'mrIn', map: mapFunc, reduce: reduceFunc, out: {inline: 1}},
+ true,
+ formatProfileQuery({mapreduce: 'mrIn', 'out.inline': 1}));
// Test non-inline mapReduce on sharded collection.
if (isMongos) {
- cmdTest({ mapreduce: 'user', map: mapFunc, reduce: reduceFunc,
- out: { replace: 'mrOut' }}, false,
- formatProfileQuery({ mapreduce: 'user', shardedFirstPass: true }));
+ cmdTest({mapreduce: 'user', map: mapFunc, reduce: reduceFunc, out: {replace: 'mrOut'}},
+ false,
+ formatProfileQuery({mapreduce: 'user', shardedFirstPass: true}));
}
// Test non-inline mapReduce on unsharded collection.
- cmdTest({ mapreduce: 'mrIn', map: mapFunc, reduce: reduceFunc, out: { replace: 'mrOut' }},
- false, formatProfileQuery({ mapreduce: 'mrIn', 'out.replace': 'mrOut' }));
+ cmdTest({mapreduce: 'mrIn', map: mapFunc, reduce: reduceFunc, out: {replace: 'mrOut'}},
+ false,
+ formatProfileQuery({mapreduce: 'mrIn', 'out.replace': 'mrOut'}));
// Test other commands that can be sent to secondary.
- cmdTest({ count: 'user' }, true, formatProfileQuery({ count: 'user' }));
- cmdTest({ group: { key: { x: true }, '$reduce': function(a, b) {}, ns: 'mrIn',
- initial: { x: 0 }}}, true, formatProfileQuery({ 'group.ns': 'mrIn' }));
+ cmdTest({count: 'user'}, true, formatProfileQuery({count: 'user'}));
+ cmdTest({group: {key: {x: true}, '$reduce': function(a, b) {}, ns: 'mrIn', initial: {x: 0}}},
+ true,
+ formatProfileQuery({'group.ns': 'mrIn'}));
- cmdTest({ collStats: 'user' }, true, formatProfileQuery({ count: 'user' }));
- cmdTest({ dbStats: 1 }, true, formatProfileQuery({ dbStats: 1 }));
+ cmdTest({collStats: 'user'}, true, formatProfileQuery({count: 'user'}));
+ cmdTest({dbStats: 1}, true, formatProfileQuery({dbStats: 1}));
- testDB.user.ensureIndex({ loc: '2d' });
- testDB.user.ensureIndex({ position: 'geoHaystack', type:1 }, { bucketSize: 10 });
- testDB.runCommand({ getLastError: 1, w: NODE_COUNT });
- cmdTest({ geoNear: 'user', near: [1, 1] }, true,
- formatProfileQuery({ geoNear: 'user' }));
+ testDB.user.ensureIndex({loc: '2d'});
+ testDB.user.ensureIndex({position: 'geoHaystack', type: 1}, {bucketSize: 10});
+ testDB.runCommand({getLastError: 1, w: NODE_COUNT});
+ cmdTest({geoNear: 'user', near: [1, 1]}, true, formatProfileQuery({geoNear: 'user'}));
// Mongos doesn't implement geoSearch; test it only with ReplicaSetConnection.
if (!isMongos) {
- cmdTest(
- {
- geoSearch: 'user', near: [1, 1],
- search: { type: 'restaurant'}, maxDistance: 10
- }, true, formatProfileQuery({ geoSearch: 'user'}));
+ cmdTest({geoSearch: 'user', near: [1, 1], search: {type: 'restaurant'}, maxDistance: 10},
+ true,
+ formatProfileQuery({geoSearch: 'user'}));
}
// Test on sharded
- cmdTest({ aggregate: 'user', pipeline: [{ $project: { x: 1 }}] }, true,
- formatProfileQuery({ aggregate: 'user' }));
+ cmdTest({aggregate: 'user', pipeline: [{$project: {x: 1}}]},
+ true,
+ formatProfileQuery({aggregate: 'user'}));
// Test on non-sharded
- cmdTest({ aggregate: 'mrIn', pipeline: [{ $project: { x: 1 }}] }, true,
- formatProfileQuery({ aggregate: 'mrIn' }));
+ cmdTest({aggregate: 'mrIn', pipeline: [{$project: {x: 1}}]},
+ true,
+ formatProfileQuery({aggregate: 'mrIn'}));
};
/**
@@ -187,20 +193,20 @@ var testBadMode = function(conn, hostList, isMongos, mode, tagSets) {
// Test that a command that could be routed to a secondary fails with bad mode / tags.
if (isMongos) {
// Command result should have ok: 0.
- cmdResult = testDB.runReadCommand({ distinct: 'user', key: 'x' });
+ cmdResult = testDB.runReadCommand({distinct: 'user', key: 'x'});
jsTest.log('cmd result: ' + tojson(cmdResult));
assert(!cmdResult.ok);
} else {
try {
// conn should throw error
- testDB.runReadCommand({ distinct: 'user', key: 'x' });
+ testDB.runReadCommand({distinct: 'user', key: 'x'});
failureMsg = "Unexpected success running distinct!";
- }
- catch (e) {
+ } catch (e) {
jsTest.log(e);
}
- if (failureMsg) throw failureMsg;
+ if (failureMsg)
+ throw failureMsg;
}
};
@@ -210,28 +216,28 @@ var testAllModes = function(conn, hostList, isMongos) {
// { tag: 'two' } so we can test the interaction of modes and tags. Test
// a bunch of combinations.
[
- // mode, tagSets, expectedHost
- ['primary', undefined, false],
- ['primary', [], false],
+ // mode, tagSets, expectedHost
+ ['primary', undefined, false],
+ ['primary', [], false],
- ['primaryPreferred', undefined, false],
- ['primaryPreferred', [{tag: 'one'}], false],
- // Correctly uses primary and ignores the tag
- ['primaryPreferred', [{tag: 'two'}], false],
+ ['primaryPreferred', undefined, false],
+ ['primaryPreferred', [{tag: 'one'}], false],
+ // Correctly uses primary and ignores the tag
+ ['primaryPreferred', [{tag: 'two'}], false],
- ['secondary', undefined, true],
- ['secondary', [{tag: 'two'}], true],
- ['secondary', [{tag: 'doesntexist'}, {}], true],
- ['secondary', [{tag: 'doesntexist'}, {tag:'two'}], true],
+ ['secondary', undefined, true],
+ ['secondary', [{tag: 'two'}], true],
+ ['secondary', [{tag: 'doesntexist'}, {}], true],
+ ['secondary', [{tag: 'doesntexist'}, {tag: 'two'}], true],
- ['secondaryPreferred', undefined, true],
- ['secondaryPreferred', [{tag: 'one'}], false],
- ['secondaryPreferred', [{tag: 'two'}], true],
+ ['secondaryPreferred', undefined, true],
+ ['secondaryPreferred', [{tag: 'one'}], false],
+ ['secondaryPreferred', [{tag: 'two'}], true],
- // We don't have a way to alter ping times so we can't predict where an
- // untagged 'nearest' command should go, hence only test with tags.
- ['nearest', [{tag: 'one'}], false],
- ['nearest', [{tag: 'two'}], true]
+ // We don't have a way to alter ping times so we can't predict where an
+ // untagged 'nearest' command should go, hence only test with tags.
+ ['nearest', [{tag: 'one'}], false],
+ ['nearest', [{tag: 'two'}], true]
].forEach(function(args) {
var mode = args[0], tagSets = args[1], secExpected = args[2];
@@ -242,17 +248,17 @@ var testAllModes = function(conn, hostList, isMongos) {
});
[
- // Tags not allowed with primary
- ['primary', [{dc: 'doesntexist'}]],
- ['primary', [{dc: 'ny'}]],
- ['primary', [{dc: 'one'}]],
+ // Tags not allowed with primary
+ ['primary', [{dc: 'doesntexist'}]],
+ ['primary', [{dc: 'ny'}]],
+ ['primary', [{dc: 'one'}]],
- // No matching node
- ['secondary', [{tag: 'one'}]],
- ['nearest', [{tag: 'doesntexist'}]],
+ // No matching node
+ ['secondary', [{tag: 'one'}]],
+ ['nearest', [{tag: 'doesntexist'}]],
- ['invalid-mode', undefined],
- ['secondary', ['misformatted-tags']]
+ ['invalid-mode', undefined],
+ ['secondary', ['misformatted-tags']]
].forEach(function(args) {
var mode = args[0], tagSets = args[1];
@@ -263,8 +269,8 @@ var testAllModes = function(conn, hostList, isMongos) {
});
};
-var st = new ShardingTest({shards : {rs0 : {nodes : NODE_COUNT, verbose : 1}},
- other : {mongosOptions : {verbose : 3}}});
+var st = new ShardingTest(
+ {shards: {rs0: {nodes: NODE_COUNT, verbose: 1}}, other: {mongosOptions: {verbose: 3}}});
st.stopBalancer();
ReplSetTest.awaitRSClientHosts(st.s, st.rs0.nodes);
@@ -272,8 +278,14 @@ ReplSetTest.awaitRSClientHosts(st.s, st.rs0.nodes);
// Tag primary with { dc: 'ny', tag: 'one' }, secondary with { dc: 'ny', tag: 'two' }
var primary = st.rs0.getPrimary();
var secondary = st.rs0.getSecondary();
-var PRIMARY_TAG = { dc: 'ny', tag: 'one' };
-var SECONDARY_TAG = { dc: 'ny', tag: 'two' };
+var PRIMARY_TAG = {
+ dc: 'ny',
+ tag: 'one'
+};
+var SECONDARY_TAG = {
+ dc: 'ny',
+ tag: 'two'
+};
var rsConfig = primary.getDB("local").system.replset.findOne();
jsTest.log('got rsconf ' + tojson(rsConfig));
@@ -287,13 +299,11 @@ rsConfig.members.forEach(function(member) {
rsConfig.version++;
-
jsTest.log('new rsconf ' + tojson(rsConfig));
try {
- primary.adminCommand({ replSetReconfig: rsConfig });
-}
-catch(e) {
+ primary.adminCommand({replSetReconfig: rsConfig});
+} catch (e) {
jsTest.log('replSetReconfig error: ' + e);
}
@@ -302,10 +312,9 @@ st.rs0.awaitSecondaryNodes();
// Force mongos to reconnect after our reconfig
assert.soon(function() {
try {
- st.s.getDB('foo').runCommand({ create: 'foo' });
+ st.s.getDB('foo').runCommand({create: 'foo'});
return true;
- }
- catch (x) {
+ } catch (x) {
// Intentionally caused an error that forces mongos's monitor to refresh.
jsTest.log('Caught exception while doing dummy command: ' + tojson(x));
return false;
@@ -321,8 +330,8 @@ jsTest.log('got rsconf ' + tojson(rsConfig));
var replConn = new Mongo(st.rs0.getURL());
// Make sure replica set connection is ready
-_awaitRSHostViaRSMonitor(primary.name, { ok: true, tags: PRIMARY_TAG }, st.rs0.name);
-_awaitRSHostViaRSMonitor(secondary.name, { ok: true, tags: SECONDARY_TAG }, st.rs0.name);
+_awaitRSHostViaRSMonitor(primary.name, {ok: true, tags: PRIMARY_TAG}, st.rs0.name);
+_awaitRSHostViaRSMonitor(secondary.name, {ok: true, tags: SECONDARY_TAG}, st.rs0.name);
testAllModes(replConn, st.rs0.nodes, false);
diff --git a/jstests/sharding/read_pref_multi_mongos_stale_config.js b/jstests/sharding/read_pref_multi_mongos_stale_config.js
index 3333e3678ae..42c54f82819 100644
--- a/jstests/sharding/read_pref_multi_mongos_stale_config.js
+++ b/jstests/sharding/read_pref_multi_mongos_stale_config.js
@@ -2,9 +2,11 @@
// Tests that a mongos will correctly retry a stale shard version when read preference is used
//
-var st = new ShardingTest({shards : {rs0 : {quiet : ''}, rs1 : {quiet : ''}},
- mongos : 2,
- other : {mongosOptions : {verbose : 2}}});
+var st = new ShardingTest({
+ shards: {rs0: {quiet: ''}, rs1: {quiet: ''}},
+ mongos: 2,
+ other: {mongosOptions: {verbose: 2}}
+});
var testDB1 = st.s0.getDB('test');
var testDB2 = st.s1.getDB('test');
@@ -12,28 +14,27 @@ var testDB2 = st.s1.getDB('test');
// Trigger a query on mongos 1 so it will have a view of test.user as being unsharded.
testDB1.user.findOne();
-testDB2.adminCommand({ enableSharding: 'test' });
-testDB2.adminCommand({ shardCollection: 'test.user', key: { x: 1 }});
+testDB2.adminCommand({enableSharding: 'test'});
+testDB2.adminCommand({shardCollection: 'test.user', key: {x: 1}});
-testDB2.adminCommand({ split: 'test.user', middle: { x: 100 }});
+testDB2.adminCommand({split: 'test.user', middle: {x: 100}});
var configDB2 = st.s1.getDB('config');
-var chunkToMove = configDB2.chunks.find().sort({ min: 1 }).next();
-var toShard = configDB2.shards.findOne({ _id: { $ne: chunkToMove.shard }})._id;
-testDB2.adminCommand({ moveChunk: 'test.user', to: toShard, find: { x: 50 }});
+var chunkToMove = configDB2.chunks.find().sort({min: 1}).next();
+var toShard = configDB2.shards.findOne({_id: {$ne: chunkToMove.shard}})._id;
+testDB2.adminCommand({moveChunk: 'test.user', to: toShard, find: {x: 50}});
// Insert a document into each chunk
-assert.writeOK(testDB2.user.insert({ x: 30 }));
-assert.writeOK(testDB2.user.insert({ x: 130 }));
+assert.writeOK(testDB2.user.insert({x: 30}));
+assert.writeOK(testDB2.user.insert({x: 130}));
// The testDB1 mongos does not know the chunk has been moved, and will retry
-var cursor = testDB1.user.find({ x: 30 }).readPref('primary');
+var cursor = testDB1.user.find({x: 30}).readPref('primary');
assert(cursor.hasNext());
assert.eq(30, cursor.next().x);
-cursor = testDB1.user.find({ x: 130 }).readPref('primary');
+cursor = testDB1.user.find({x: 130}).readPref('primary');
assert(cursor.hasNext());
assert.eq(130, cursor.next().x);
st.stop();
-
diff --git a/jstests/sharding/recovering_slaveok.js b/jstests/sharding/recovering_slaveok.js
index 936f8856903..cd66a1b81f5 100644
--- a/jstests/sharding/recovering_slaveok.js
+++ b/jstests/sharding/recovering_slaveok.js
@@ -2,127 +2,123 @@
// RECOVERING state, and don't break
(function() {
-'use strict';
+ 'use strict';
-var shardTest = new ShardingTest({ name: "recovering_slaveok",
- shards: 2,
- mongos: 2,
- other: { rs: true } });
+ var shardTest =
+ new ShardingTest({name: "recovering_slaveok", shards: 2, mongos: 2, other: {rs: true}});
-var mongos = shardTest.s0;
-var mongosSOK = shardTest.s1;
-mongosSOK.setSlaveOk();
+ var mongos = shardTest.s0;
+ var mongosSOK = shardTest.s1;
+ mongosSOK.setSlaveOk();
-var admin = mongos.getDB("admin");
-var config = mongos.getDB("config");
+ var admin = mongos.getDB("admin");
+ var config = mongos.getDB("config");
-var dbase = mongos.getDB("test");
-var coll = dbase.getCollection("foo");
-var dbaseSOk = mongosSOK.getDB( "" + dbase );
-var collSOk = mongosSOK.getCollection( "" + coll );
+ var dbase = mongos.getDB("test");
+ var coll = dbase.getCollection("foo");
+ var dbaseSOk = mongosSOK.getDB("" + dbase);
+ var collSOk = mongosSOK.getCollection("" + coll);
-var rsA = shardTest._rs[0].test;
-var rsB = shardTest._rs[1].test;
+ var rsA = shardTest._rs[0].test;
+ var rsB = shardTest._rs[1].test;
-assert.writeOK(rsA.getPrimary().getDB( "test_a" ).dummy.insert({ x : 1 }));
-assert.writeOK(rsB.getPrimary().getDB( "test_b" ).dummy.insert({ x : 1 }));
+ assert.writeOK(rsA.getPrimary().getDB("test_a").dummy.insert({x: 1}));
+ assert.writeOK(rsB.getPrimary().getDB("test_b").dummy.insert({x: 1}));
-rsA.awaitReplication();
-rsB.awaitReplication();
+ rsA.awaitReplication();
+ rsB.awaitReplication();
-print("1: initial insert");
+ print("1: initial insert");
-coll.save({ _id : -1, a : "a", date : new Date() });
-coll.save({ _id : 1, b : "b", date : new Date() });
+ coll.save({_id: -1, a: "a", date: new Date()});
+ coll.save({_id: 1, b: "b", date: new Date()});
-print("2: shard collection");
+ print("2: shard collection");
-shardTest.shardColl(coll, /* shardBy */ { _id : 1 }, /* splitAt */ { _id : 0 });
+ shardTest.shardColl(coll, /* shardBy */ {_id: 1}, /* splitAt */ {_id: 0});
-print("3: test normal and slaveOk queries");
+ print("3: test normal and slaveOk queries");
-// Make shardA and rsA the same
-var shardA = shardTest.getShard(coll, { _id : -1 });
-var shardAColl = shardA.getCollection( "" + coll );
-var shardB = shardTest.getShard(coll, { _id : 1 });
+ // Make shardA and rsA the same
+ var shardA = shardTest.getShard(coll, {_id: -1});
+ var shardAColl = shardA.getCollection("" + coll);
+ var shardB = shardTest.getShard(coll, {_id: 1});
-if (shardA.name == rsB.getURL()) {
- var swap = rsB;
- rsB = rsA;
- rsA = swap;
-}
+ if (shardA.name == rsB.getURL()) {
+ var swap = rsB;
+ rsB = rsA;
+ rsA = swap;
+ }
-rsA.awaitReplication();
-rsB.awaitReplication();
+ rsA.awaitReplication();
+ rsB.awaitReplication();
-// Because of async migration cleanup, we need to wait for this condition to be true
-assert.soon(function() { return coll.find().itcount() == collSOk.find().itcount(); });
+ // Because of async migration cleanup, we need to wait for this condition to be true
+ assert.soon(function() {
+ return coll.find().itcount() == collSOk.find().itcount();
+ });
-assert.eq(shardAColl.find().itcount(), 1);
-assert.eq(shardAColl.findOne()._id, -1);
+ assert.eq(shardAColl.find().itcount(), 1);
+ assert.eq(shardAColl.findOne()._id, -1);
-print("5: make one of the secondaries RECOVERING");
+ print("5: make one of the secondaries RECOVERING");
-var secs = rsA.getSecondaries();
-var goodSec = secs[0];
-var badSec = secs[1];
+ var secs = rsA.getSecondaries();
+ var goodSec = secs[0];
+ var badSec = secs[1];
-assert.commandWorked(badSec.adminCommand("replSetMaintenance"));
-rsA.waitForState(badSec, ReplSetTest.State.RECOVERING);
+ assert.commandWorked(badSec.adminCommand("replSetMaintenance"));
+ rsA.waitForState(badSec, ReplSetTest.State.RECOVERING);
-print("6: stop non-RECOVERING secondary");
+ print("6: stop non-RECOVERING secondary");
-rsA.stop(goodSec);
+ rsA.stop(goodSec);
-print("7: check our regular and slaveOk query");
+ print("7: check our regular and slaveOk query");
-assert.eq(2, coll.find().itcount());
-assert.eq(2, collSOk.find().itcount());
+ assert.eq(2, coll.find().itcount());
+ assert.eq(2, collSOk.find().itcount());
-print("8: restart both our secondaries clean");
+ print("8: restart both our secondaries clean");
-rsA.restart(rsA.getSecondaries(),
- { remember : true, startClean : true },
- undefined,
- 5 * 60 * 1000);
+ rsA.restart(rsA.getSecondaries(), {remember: true, startClean: true}, undefined, 5 * 60 * 1000);
-print("9: wait for recovery");
+ print("9: wait for recovery");
-rsA.waitForState(rsA.getSecondaries(), ReplSetTest.State.SECONDARY, 5 * 60 * 1000 );
+ rsA.waitForState(rsA.getSecondaries(), ReplSetTest.State.SECONDARY, 5 * 60 * 1000);
-print("10: check our regular and slaveOk query");
+ print("10: check our regular and slaveOk query");
-// We need to make sure our nodes are considered accessible from mongos - otherwise we fail
-// See SERVER-7274
-ReplSetTest.awaitRSClientHosts(coll.getMongo(), rsA.nodes, { ok : true });
-ReplSetTest.awaitRSClientHosts(coll.getMongo(), rsB.nodes, { ok : true });
+ // We need to make sure our nodes are considered accessible from mongos - otherwise we fail
+ // See SERVER-7274
+ ReplSetTest.awaitRSClientHosts(coll.getMongo(), rsA.nodes, {ok: true});
+ ReplSetTest.awaitRSClientHosts(coll.getMongo(), rsB.nodes, {ok: true});
-// We need to make sure at least one secondary is accessible from mongos - otherwise we fail
-// See SERVER-7699
-ReplSetTest.awaitRSClientHosts(collSOk.getMongo(), [rsA.getSecondaries()[0]],
- { secondary : true, ok : true });
-ReplSetTest.awaitRSClientHosts(collSOk.getMongo(), [rsB.getSecondaries()[0]],
- { secondary : true, ok : true });
+ // We need to make sure at least one secondary is accessible from mongos - otherwise we fail
+ // See SERVER-7699
+ ReplSetTest.awaitRSClientHosts(
+ collSOk.getMongo(), [rsA.getSecondaries()[0]], {secondary: true, ok: true});
+ ReplSetTest.awaitRSClientHosts(
+ collSOk.getMongo(), [rsB.getSecondaries()[0]], {secondary: true, ok: true});
-print("SlaveOK Query...");
-var sOKCount = collSOk.find().itcount();
+ print("SlaveOK Query...");
+ var sOKCount = collSOk.find().itcount();
-var collCount = null;
-try{
- print("Normal query...");
- collCount = coll.find().itcount();
-}
-catch(e){
- printjson(e);
+ var collCount = null;
+ try {
+ print("Normal query...");
+ collCount = coll.find().itcount();
+ } catch (e) {
+ printjson(e);
- // There may have been a stepdown caused by step 8, so we run this twice in a row. The first
- // time can error out.
- print("Error may have been caused by stepdown, try again.");
- collCount = coll.find().itcount();
-}
+ // There may have been a stepdown caused by step 8, so we run this twice in a row. The first
+ // time can error out.
+ print("Error may have been caused by stepdown, try again.");
+ collCount = coll.find().itcount();
+ }
-assert.eq(collCount, sOKCount);
+ assert.eq(collCount, sOKCount);
-shardTest.stop();
+ shardTest.stop();
})();
diff --git a/jstests/sharding/regex_targeting.js b/jstests/sharding/regex_targeting.js
index 5b6f9e02a79..7dd927d8aab 100644
--- a/jstests/sharding/regex_targeting.js
+++ b/jstests/sharding/regex_targeting.js
@@ -2,11 +2,13 @@
// This checks to make sure that sharded regex queries behave the same as unsharded regex queries
//
-var options = { mongosOptions : { binVersion : "" },
- shardOptions : { binVersion : "" },
- configOptions : { binVersion : "" } };
+var options = {
+ mongosOptions: {binVersion: ""},
+ shardOptions: {binVersion: ""},
+ configOptions: {binVersion: ""}
+};
-var st = new ShardingTest({ shards : 2, other : options });
+var st = new ShardingTest({shards: 2, other: options});
st.stopBalancer();
var mongos = st.s0;
@@ -23,149 +25,134 @@ var collCompound = mongos.getCollection("foo.barCompound");
var collNested = mongos.getCollection("foo.barNested");
var collHashed = mongos.getCollection("foo.barHashed");
-assert.commandWorked(admin.runCommand({ enableSharding : coll.getDB().toString() }));
-admin.runCommand({ movePrimary : coll.getDB().toString(), to : shards[0]._id });
+assert.commandWorked(admin.runCommand({enableSharding: coll.getDB().toString()}));
+admin.runCommand({movePrimary: coll.getDB().toString(), to: shards[0]._id});
//
// Split the collection so that "abcde-0" and "abcde-1" go on different shards when possible
//
-assert.commandWorked(admin.runCommand({ shardCollection : collSharded.toString(),
- key: { a : 1 } }));
-assert.commandWorked(admin.runCommand({ split : collSharded.toString(),
- middle : { a : "abcde-1" } }));
-assert.commandWorked(admin.runCommand({ moveChunk : collSharded.toString(),
- find : { a : 0 },
- to : shards[1]._id,
- _waitForDelete : true }));
-
-assert.commandWorked(admin.runCommand({ shardCollection : collCompound.toString(),
- key: { a : 1, b : 1 } }));
-assert.commandWorked(admin.runCommand({ split : collCompound.toString(),
- middle : { a : "abcde-1", b : 0 } }));
-assert.commandWorked(admin.runCommand({ moveChunk : collCompound.toString(),
- find : { a : 0, b : 0 },
- to : shards[1]._id,
- _waitForDelete : true }));
-
-assert.commandWorked(admin.runCommand({ shardCollection : collNested.toString(),
- key : { 'a.b' : 1 } }));
-assert.commandWorked(admin.runCommand({ split : collNested.toString(),
- middle : { 'a.b' : "abcde-1" } }));
-assert.commandWorked(admin.runCommand({ moveChunk : collNested.toString(),
- find : { a : { b : 0 } },
- to : shards[1]._id,
- _waitForDelete : true }));
-
-assert.commandWorked(admin.runCommand({ shardCollection : collHashed.toString(),
- key: { hash : "hashed" } }));
+assert.commandWorked(admin.runCommand({shardCollection: collSharded.toString(), key: {a: 1}}));
+assert.commandWorked(admin.runCommand({split: collSharded.toString(), middle: {a: "abcde-1"}}));
+assert.commandWorked(admin.runCommand(
+ {moveChunk: collSharded.toString(), find: {a: 0}, to: shards[1]._id, _waitForDelete: true}));
+
+assert.commandWorked(
+ admin.runCommand({shardCollection: collCompound.toString(), key: {a: 1, b: 1}}));
+assert.commandWorked(
+ admin.runCommand({split: collCompound.toString(), middle: {a: "abcde-1", b: 0}}));
+assert.commandWorked(admin.runCommand({
+ moveChunk: collCompound.toString(),
+ find: {a: 0, b: 0},
+ to: shards[1]._id,
+ _waitForDelete: true
+}));
+
+assert.commandWorked(admin.runCommand({shardCollection: collNested.toString(), key: {'a.b': 1}}));
+assert.commandWorked(admin.runCommand({split: collNested.toString(), middle: {'a.b': "abcde-1"}}));
+assert.commandWorked(admin.runCommand({
+ moveChunk: collNested.toString(),
+ find: {a: {b: 0}},
+ to: shards[1]._id,
+ _waitForDelete: true
+}));
+
+assert.commandWorked(
+ admin.runCommand({shardCollection: collHashed.toString(), key: {hash: "hashed"}}));
st.printShardingStatus();
//
//
// Cannot insert regex _id
-assert.writeError(coll.insert({ _id : /regex value/ }));
-assert.writeError(collSharded.insert({ _id : /regex value/, a : 0 }));
-assert.writeError(collCompound.insert({ _id : /regex value/, a : 0, b : 0 }));
-assert.writeError(collNested.insert({ _id : /regex value/, a : { b : 0 } }));
-assert.writeError(collHashed.insert({ _id : /regex value/, hash : 0 }));
-
+assert.writeError(coll.insert({_id: /regex value/}));
+assert.writeError(collSharded.insert({_id: /regex value/, a: 0}));
+assert.writeError(collCompound.insert({_id: /regex value/, a: 0, b: 0}));
+assert.writeError(collNested.insert({_id: /regex value/, a: {b: 0}}));
+assert.writeError(collHashed.insert({_id: /regex value/, hash: 0}));
//
//
// (For now) we can insert a regex shard key
-assert.writeOK(collSharded.insert({ a : /regex value/ }));
-assert.writeOK(collCompound.insert({ a : /regex value/, b : "other value" }));
-assert.writeOK(collNested.insert({ a : { b : /regex value/ } }));
-assert.writeOK(collHashed.insert({ hash : /regex value/ }));
-
+assert.writeOK(collSharded.insert({a: /regex value/}));
+assert.writeOK(collCompound.insert({a: /regex value/, b: "other value"}));
+assert.writeOK(collNested.insert({a: {b: /regex value/}}));
+assert.writeOK(collHashed.insert({hash: /regex value/}));
//
//
// Query by regex should hit all matching keys, across all shards if applicable
coll.remove({});
-assert.writeOK(coll.insert({ a : "abcde-0" }));
-assert.writeOK(coll.insert({ a : "abcde-1" }));
-assert.writeOK(coll.insert({ a : /abcde.*/ }));
-assert.eq(coll.find().itcount(), coll.find({ a : /abcde.*/ }).itcount());
+assert.writeOK(coll.insert({a: "abcde-0"}));
+assert.writeOK(coll.insert({a: "abcde-1"}));
+assert.writeOK(coll.insert({a: /abcde.*/}));
+assert.eq(coll.find().itcount(), coll.find({a: /abcde.*/}).itcount());
collSharded.remove({});
-assert.writeOK(collSharded.insert({ a : "abcde-0" }));
-assert.writeOK(collSharded.insert({ a : "abcde-1" }));
-assert.writeOK(collSharded.insert({ a : /abcde.*/ }));
-assert.eq(collSharded.find().itcount(), collSharded.find({ a : /abcde.*/ }).itcount());
+assert.writeOK(collSharded.insert({a: "abcde-0"}));
+assert.writeOK(collSharded.insert({a: "abcde-1"}));
+assert.writeOK(collSharded.insert({a: /abcde.*/}));
+assert.eq(collSharded.find().itcount(), collSharded.find({a: /abcde.*/}).itcount());
collCompound.remove({});
-assert.writeOK(collCompound.insert({ a : "abcde-0", b : 0 }));
-assert.writeOK(collCompound.insert({ a : "abcde-1", b : 0 }));
-assert.writeOK(collCompound.insert({ a : /abcde.*/, b : 0 }));
-assert.eq(collCompound.find().itcount(), collCompound.find({ a : /abcde.*/ }).itcount());
+assert.writeOK(collCompound.insert({a: "abcde-0", b: 0}));
+assert.writeOK(collCompound.insert({a: "abcde-1", b: 0}));
+assert.writeOK(collCompound.insert({a: /abcde.*/, b: 0}));
+assert.eq(collCompound.find().itcount(), collCompound.find({a: /abcde.*/}).itcount());
collNested.remove({});
-assert.writeOK(collNested.insert({ a : { b : "abcde-0" } }));
-assert.writeOK(collNested.insert({ a : { b : "abcde-1" } }));
-assert.writeOK(collNested.insert({ a : { b : /abcde.*/ } }));
-assert.eq(collNested.find().itcount(), collNested.find({ 'a.b' : /abcde.*/ }).itcount());
+assert.writeOK(collNested.insert({a: {b: "abcde-0"}}));
+assert.writeOK(collNested.insert({a: {b: "abcde-1"}}));
+assert.writeOK(collNested.insert({a: {b: /abcde.*/}}));
+assert.eq(collNested.find().itcount(), collNested.find({'a.b': /abcde.*/}).itcount());
collHashed.remove({});
while (st.shard0.getCollection(collHashed.toString()).count() == 0 ||
st.shard1.getCollection(collHashed.toString()).count() == 0) {
- assert.writeOK(collHashed.insert({ hash : "abcde-" + ObjectId().toString() }));
+ assert.writeOK(collHashed.insert({hash: "abcde-" + ObjectId().toString()}));
}
-assert.writeOK(collHashed.insert({ hash : /abcde.*/ }));
-assert.eq(collHashed.find().itcount(), collHashed.find({ hash : /abcde.*/ }).itcount());
-
+assert.writeOK(collHashed.insert({hash: /abcde.*/}));
+assert.eq(collHashed.find().itcount(), collHashed.find({hash: /abcde.*/}).itcount());
//
//
// Update by regex should hit all matching keys, across all shards if applicable
coll.remove({});
-assert.writeOK(coll.insert({ a : "abcde-0" }));
-assert.writeOK(coll.insert({ a : "abcde-1" }));
-assert.writeOK(coll.insert({ a : /abcde.*/ }));
-assert.writeOK(coll.update({ a : /abcde.*/ },
- { $set : { updated : true } },
- { multi : true }));
-assert.eq(coll.find().itcount(), coll.find({ updated : true }).itcount());
+assert.writeOK(coll.insert({a: "abcde-0"}));
+assert.writeOK(coll.insert({a: "abcde-1"}));
+assert.writeOK(coll.insert({a: /abcde.*/}));
+assert.writeOK(coll.update({a: /abcde.*/}, {$set: {updated: true}}, {multi: true}));
+assert.eq(coll.find().itcount(), coll.find({updated: true}).itcount());
collSharded.remove({});
-assert.writeOK(collSharded.insert({ a : "abcde-0" }));
-assert.writeOK(collSharded.insert({ a : "abcde-1" }));
-assert.writeOK(collSharded.insert({ a : /abcde.*/ }));
-assert.writeOK(collSharded.update({ a : /abcde.*/ },
- { $set : { updated : true } },
- { multi : true }));
-assert.eq(collSharded.find().itcount(), collSharded.find({ updated : true }).itcount());
+assert.writeOK(collSharded.insert({a: "abcde-0"}));
+assert.writeOK(collSharded.insert({a: "abcde-1"}));
+assert.writeOK(collSharded.insert({a: /abcde.*/}));
+assert.writeOK(collSharded.update({a: /abcde.*/}, {$set: {updated: true}}, {multi: true}));
+assert.eq(collSharded.find().itcount(), collSharded.find({updated: true}).itcount());
collCompound.remove({});
-assert.writeOK(collCompound.insert({ a : "abcde-0", b : 0 }));
-assert.writeOK(collCompound.insert({ a : "abcde-1", b : 0 }));
-assert.writeOK(collCompound.insert({ a : /abcde.*/, b : 0 }));
-assert.writeOK(collCompound.update({ a : /abcde.*/ },
- { $set : { updated : true } },
- { multi : true }));
-assert.eq(collCompound.find().itcount(), collCompound.find({ updated : true }).itcount());
+assert.writeOK(collCompound.insert({a: "abcde-0", b: 0}));
+assert.writeOK(collCompound.insert({a: "abcde-1", b: 0}));
+assert.writeOK(collCompound.insert({a: /abcde.*/, b: 0}));
+assert.writeOK(collCompound.update({a: /abcde.*/}, {$set: {updated: true}}, {multi: true}));
+assert.eq(collCompound.find().itcount(), collCompound.find({updated: true}).itcount());
collNested.remove({});
-assert.writeOK(collNested.insert({ a : { b : "abcde-0" } }));
-assert.writeOK(collNested.insert({ a : { b : "abcde-1" } }));
-assert.writeOK(collNested.insert({ a : { b : /abcde.*/ } }));
-assert.writeOK(collNested.update({ 'a.b' : /abcde.*/ },
- { $set : { updated : true } },
- { multi : true }));
-assert.eq(collNested.find().itcount(), collNested.find({ updated : true }).itcount());
+assert.writeOK(collNested.insert({a: {b: "abcde-0"}}));
+assert.writeOK(collNested.insert({a: {b: "abcde-1"}}));
+assert.writeOK(collNested.insert({a: {b: /abcde.*/}}));
+assert.writeOK(collNested.update({'a.b': /abcde.*/}, {$set: {updated: true}}, {multi: true}));
+assert.eq(collNested.find().itcount(), collNested.find({updated: true}).itcount());
collHashed.remove({});
while (st.shard0.getCollection(collHashed.toString()).count() == 0 ||
st.shard1.getCollection(collHashed.toString()).count() == 0) {
- assert.writeOK(collHashed.insert({ hash : "abcde-" + ObjectId().toString() }));
+ assert.writeOK(collHashed.insert({hash: "abcde-" + ObjectId().toString()}));
}
-assert.writeOK(collHashed.insert({ hash : /abcde.*/ }));
-assert.writeOK(collHashed.update({ hash : /abcde.*/ },
- { $set : { updated : true } },
- { multi : true }));
-assert.eq(collHashed.find().itcount(), collHashed.find({ updated : true }).itcount());
+assert.writeOK(collHashed.insert({hash: /abcde.*/}));
+assert.writeOK(collHashed.update({hash: /abcde.*/}, {$set: {updated: true}}, {multi: true}));
+assert.eq(collHashed.find().itcount(), collHashed.find({updated: true}).itcount());
//
//
@@ -174,18 +161,19 @@ assert.eq(collHashed.find().itcount(), collHashed.find({ updated : true }).itcou
collSharded.remove({});
collCompound.remove({});
collNested.remove({});
-assert.writeError(collSharded.update({ a : /abcde.*/ }, { $set : { a : /abcde.*/ } },
- { upsert : true }));
-assert.writeError(collCompound.update({ a : /abcde.*/ }, { $set : { a : /abcde.*/, b : 1 } },
- { upsert : true }));
+assert.writeError(collSharded.update({a: /abcde.*/}, {$set: {a: /abcde.*/}}, {upsert: true}));
+assert.writeError(collCompound.update({a: /abcde.*/},
+ {$set: {a: /abcde.*/, b: 1}},
+ {upsert: true}));
// Exact regex in query never equality
-assert.writeError(collNested.update({ 'a.b' : /abcde.*/ }, { $set : { 'a.b' : /abcde.*/ } },
- { upsert : true }));
+assert.writeError(collNested.update({'a.b': /abcde.*/},
+ {$set: {'a.b': /abcde.*/}},
+ {upsert: true}));
// Even nested regexes are not extracted in queries
-assert.writeError(collNested.update({ a : { b : /abcde.*/ } }, { $set : { 'a.b' : /abcde.*/ } },
- { upsert : true }));
-assert.writeError(collNested.update({ c : 1 }, { $set : { 'a.b' : /abcde.*/ } },
- { upsert : true }));
+assert.writeError(collNested.update({a: {b: /abcde.*/}},
+ {$set: {'a.b': /abcde.*/}},
+ {upsert: true}));
+assert.writeError(collNested.update({c: 1}, {$set: {'a.b': /abcde.*/}}, {upsert: true}));
//
//
@@ -194,84 +182,74 @@ assert.writeError(collNested.update({ c : 1 }, { $set : { 'a.b' : /abcde.*/ } },
collSharded.remove({});
collCompound.remove({});
collNested.remove({});
-assert.writeOK(collSharded.update({ a : /abcde.*/ }, { a : /abcde.*/ }, { upsert : true }));
-assert.writeOK(collCompound.update({ a : /abcde.*/ }, { a : /abcde.*/, b : 1 }, { upsert : true }));
-assert.writeOK(collNested.update({ 'a.b' : /abcde.*/ }, { a : { b : /abcde.*/ } },
- { upsert : true }));
-assert.writeOK(collNested.update({ a : { b : /abcde.*/ } }, { a : { b : /abcde.*/ } },
- { upsert : true }));
-assert.writeOK(collNested.update({ c : 1 }, { a : { b : /abcde.*/ } },
- { upsert : true }));
+assert.writeOK(collSharded.update({a: /abcde.*/}, {a: /abcde.*/}, {upsert: true}));
+assert.writeOK(collCompound.update({a: /abcde.*/}, {a: /abcde.*/, b: 1}, {upsert: true}));
+assert.writeOK(collNested.update({'a.b': /abcde.*/}, {a: {b: /abcde.*/}}, {upsert: true}));
+assert.writeOK(collNested.update({a: {b: /abcde.*/}}, {a: {b: /abcde.*/}}, {upsert: true}));
+assert.writeOK(collNested.update({c: 1}, {a: {b: /abcde.*/}}, {upsert: true}));
//
//
// Remove by regex should hit all matching keys, across all shards if applicable
coll.remove({});
-assert.writeOK(coll.insert({ a : "abcde-0" }));
-assert.writeOK(coll.insert({ a : "abcde-1" }));
-assert.writeOK(coll.insert({ a : /abcde.*/ }));
-assert.writeOK(coll.remove({ a : /abcde.*/ }));
+assert.writeOK(coll.insert({a: "abcde-0"}));
+assert.writeOK(coll.insert({a: "abcde-1"}));
+assert.writeOK(coll.insert({a: /abcde.*/}));
+assert.writeOK(coll.remove({a: /abcde.*/}));
assert.eq(0, coll.find({}).itcount());
-
collSharded.remove({});
-assert.writeOK(collSharded.insert({ a : "abcde-0" }));
-assert.writeOK(collSharded.insert({ a : "abcde-1" }));
-assert.writeOK(collSharded.insert({ a : /abcde.*/ }));
-assert.writeOK(collSharded.remove({ a : /abcde.*/ }));
+assert.writeOK(collSharded.insert({a: "abcde-0"}));
+assert.writeOK(collSharded.insert({a: "abcde-1"}));
+assert.writeOK(collSharded.insert({a: /abcde.*/}));
+assert.writeOK(collSharded.remove({a: /abcde.*/}));
assert.eq(0, collSharded.find({}).itcount());
collCompound.remove({});
-assert.writeOK(collCompound.insert({ a : "abcde-0", b : 0 }));
-assert.writeOK(collCompound.insert({ a : "abcde-1", b : 0 }));
-assert.writeOK(collCompound.insert({ a : /abcde.*/, b : 0 }));
-assert.writeOK(collCompound.remove({ a : /abcde.*/ }));
+assert.writeOK(collCompound.insert({a: "abcde-0", b: 0}));
+assert.writeOK(collCompound.insert({a: "abcde-1", b: 0}));
+assert.writeOK(collCompound.insert({a: /abcde.*/, b: 0}));
+assert.writeOK(collCompound.remove({a: /abcde.*/}));
assert.eq(0, collCompound.find({}).itcount());
collNested.remove({});
-assert.writeOK(collNested.insert({ a : { b : "abcde-0" } }));
-assert.writeOK(collNested.insert({ a : { b : "abcde-1" } }));
-assert.writeOK(collNested.insert({ a : { b : /abcde.*/ } }));
-assert.writeOK(collNested.remove({ 'a.b' : /abcde.*/ }));
+assert.writeOK(collNested.insert({a: {b: "abcde-0"}}));
+assert.writeOK(collNested.insert({a: {b: "abcde-1"}}));
+assert.writeOK(collNested.insert({a: {b: /abcde.*/}}));
+assert.writeOK(collNested.remove({'a.b': /abcde.*/}));
assert.eq(0, collNested.find({}).itcount());
collHashed.remove({});
while (st.shard0.getCollection(collHashed.toString()).count() == 0 ||
st.shard1.getCollection(collHashed.toString()).count() == 0) {
- assert.writeOK(collHashed.insert({ hash : "abcde-" + ObjectId().toString() }));
+ assert.writeOK(collHashed.insert({hash: "abcde-" + ObjectId().toString()}));
}
-assert.writeOK(collHashed.insert({ hash : /abcde.*/ }));
-assert.writeOK(collHashed.remove({ hash : /abcde.*/ }));
+assert.writeOK(collHashed.insert({hash: /abcde.*/}));
+assert.writeOK(collHashed.remove({hash: /abcde.*/}));
assert.eq(0, collHashed.find({}).itcount());
-
//
//
// Query/Update/Remove by nested regex is different depending on how the nested regex is specified
coll.remove({});
-assert.writeOK(coll.insert({ a : { b : "abcde-0" } }));
-assert.writeOK(coll.insert({ a : { b : "abcde-1" } }));
-assert.writeOK(coll.insert({ a : { b : /abcde.*/ } }));
-assert.eq(1, coll.find({ a : { b : /abcde.*/ } }).itcount());
-assert.writeOK(coll.update({ a : { b : /abcde.*/ } },
- { $set : { updated : true } },
- { multi : true }));
-assert.eq(1, coll.find({ updated : true }).itcount());
-assert.writeOK(coll.remove({ a : { b : /abcde.*/ } }));
+assert.writeOK(coll.insert({a: {b: "abcde-0"}}));
+assert.writeOK(coll.insert({a: {b: "abcde-1"}}));
+assert.writeOK(coll.insert({a: {b: /abcde.*/}}));
+assert.eq(1, coll.find({a: {b: /abcde.*/}}).itcount());
+assert.writeOK(coll.update({a: {b: /abcde.*/}}, {$set: {updated: true}}, {multi: true}));
+assert.eq(1, coll.find({updated: true}).itcount());
+assert.writeOK(coll.remove({a: {b: /abcde.*/}}));
assert.eq(2, coll.find().itcount());
collNested.remove({});
-assert.writeOK(collNested.insert({ a : { b : "abcde-0" } }));
-assert.writeOK(collNested.insert({ a : { b : "abcde-1" } }));
-assert.writeOK(collNested.insert({ a : { b : /abcde.*/ } }));
-assert.eq(1, collNested.find({ a : { b : /abcde.*/ } }).itcount());
-assert.writeOK(collNested.update({ a : { b : /abcde.*/ } },
- { $set : { updated : true } },
- { multi : true }));
-assert.eq(1, collNested.find({ updated : true }).itcount());
-assert.writeOK(collNested.remove({ a : { b : /abcde.*/ } }));
+assert.writeOK(collNested.insert({a: {b: "abcde-0"}}));
+assert.writeOK(collNested.insert({a: {b: "abcde-1"}}));
+assert.writeOK(collNested.insert({a: {b: /abcde.*/}}));
+assert.eq(1, collNested.find({a: {b: /abcde.*/}}).itcount());
+assert.writeOK(collNested.update({a: {b: /abcde.*/}}, {$set: {updated: true}}, {multi: true}));
+assert.eq(1, collNested.find({updated: true}).itcount());
+assert.writeOK(collNested.remove({a: {b: /abcde.*/}}));
assert.eq(2, collNested.find().itcount());
jsTest.log("DONE!");
st.stop();
-
diff --git a/jstests/sharding/remove1.js b/jstests/sharding/remove1.js
index 22443aae938..8dd315ffa2a 100644
--- a/jstests/sharding/remove1.js
+++ b/jstests/sharding/remove1.js
@@ -1,32 +1,34 @@
(function() {
-var s = new ShardingTest({ name: "remove_shard1", shards: 2 });
+ var s = new ShardingTest({name: "remove_shard1", shards: 2});
-assert.eq( 2, s.config.shards.count() , "initial server count wrong" );
+ assert.eq(2, s.config.shards.count(), "initial server count wrong");
-assert.writeOK(s.config.databases.insert({ _id: 'needToMove',
- partitioned: false,
- primary: 'shard0000'}));
+ assert.writeOK(
+ s.config.databases.insert({_id: 'needToMove', partitioned: false, primary: 'shard0000'}));
-// Returns an error when trying to remove a shard that doesn't exist.
-assert.commandFailed(s.admin.runCommand({ removeshard: "shardz" }));
+ // Returns an error when trying to remove a shard that doesn't exist.
+ assert.commandFailed(s.admin.runCommand({removeshard: "shardz"}));
-// first remove puts in draining mode, the second tells me a db needs to move, the third actually removes
-assert( s.admin.runCommand( { removeshard: "shard0000" } ).ok , "failed to start draining shard" );
-assert( !s.admin.runCommand( { removeshard: "shard0001" } ).ok , "allowed two draining shards" );
-assert.eq( s.admin.runCommand( { removeshard: "shard0000" } ).dbsToMove, ['needToMove'] , "didn't show db to move" );
-s.getDB('needToMove').dropDatabase();
-assert( s.admin.runCommand( { removeshard: "shard0000" } ).ok , "failed to remove shard" );
-assert.eq( 1, s.config.shards.count() , "removed server still appears in count" );
+ // first remove puts in draining mode, the second tells me a db needs to move, the third
+ // actually removes
+ assert(s.admin.runCommand({removeshard: "shard0000"}).ok, "failed to start draining shard");
+ assert(!s.admin.runCommand({removeshard: "shard0001"}).ok, "allowed two draining shards");
+ assert.eq(s.admin.runCommand({removeshard: "shard0000"}).dbsToMove,
+ ['needToMove'],
+ "didn't show db to move");
+ s.getDB('needToMove').dropDatabase();
+ assert(s.admin.runCommand({removeshard: "shard0000"}).ok, "failed to remove shard");
+ assert.eq(1, s.config.shards.count(), "removed server still appears in count");
-assert( !s.admin.runCommand( { removeshard: "shard0001" } ).ok , "allowed removing last shard" );
+ assert(!s.admin.runCommand({removeshard: "shard0001"}).ok, "allowed removing last shard");
-// should create a shard0002 shard
-var conn = MongoRunner.runMongod({});
-assert( s.admin.runCommand( { addshard: conn.host } ).ok, "failed to add shard" );
-assert.eq( 2, s.config.shards.count(), "new server does not appear in count" );
+ // should create a shard0002 shard
+ var conn = MongoRunner.runMongod({});
+ assert(s.admin.runCommand({addshard: conn.host}).ok, "failed to add shard");
+ assert.eq(2, s.config.shards.count(), "new server does not appear in count");
-MongoRunner.stopMongod(conn);
-s.stop();
+ MongoRunner.stopMongod(conn);
+ s.stop();
})();
diff --git a/jstests/sharding/remove2.js b/jstests/sharding/remove2.js
index 08af78404dd..b8c8d2f1b9e 100644
--- a/jstests/sharding/remove2.js
+++ b/jstests/sharding/remove2.js
@@ -1,153 +1,148 @@
// Test that removing and re-adding shard works correctly.
seedString = function(replTest) {
- members = replTest.getReplSetConfig().members.map(function(elem) { return elem.host; });
+ members = replTest.getReplSetConfig().members.map(function(elem) {
+ return elem.host;
+ });
return replTest.name + '/' + members.join(',');
};
removeShard = function(st, replTest) {
- print( "Removing shard with name: " + replTest.name );
- res = st.admin.runCommand( { removeshard: replTest.name } );
+ print("Removing shard with name: " + replTest.name);
+ res = st.admin.runCommand({removeshard: replTest.name});
printjson(res);
- assert( res.ok , "failed to start draining shard" );
+ assert(res.ok, "failed to start draining shard");
checkRemoveShard = function() {
- res = st.admin.runCommand( { removeshard: replTest.name } );
+ res = st.admin.runCommand({removeshard: replTest.name});
printjson(res);
return res.ok && res.msg == 'removeshard completed successfully';
};
- assert.soon( checkRemoveShard, "failed to remove shard", 5 * 60000 );
+ assert.soon(checkRemoveShard, "failed to remove shard", 5 * 60000);
// Need to wait for migration to be over... only works for inline deletes
checkNSLock = function() {
- printjson( st.s.getDB( "config" ).locks.find().toArray() );
+ printjson(st.s.getDB("config").locks.find().toArray());
return !st.isAnyBalanceInFlight();
};
- assert.soon( checkNSLock, "migrations did not end?" );
-
- sleep( 2000 );
-
- var directdb = replTest.getPrimary().getDB( "admin" );
- assert.soon( function(){
- var res = directdb.currentOp( { desc: /^clean/ } );
- print( "eliot: " + replTest.getPrimary() + "\t" + tojson(res) );
- return res.inprog.length == 0;
- }, "never clean", 5 * 60 * 1000, 1000 );
-
- replTest.getPrimary().getDB( coll.getDB().getName() ).dropDatabase();
- print( "Shard removed successfully" );
+ assert.soon(checkNSLock, "migrations did not end?");
+
+ sleep(2000);
+
+ var directdb = replTest.getPrimary().getDB("admin");
+ assert.soon(function() {
+ var res = directdb.currentOp({desc: /^clean/});
+ print("eliot: " + replTest.getPrimary() + "\t" + tojson(res));
+ return res.inprog.length == 0;
+ }, "never clean", 5 * 60 * 1000, 1000);
+
+ replTest.getPrimary().getDB(coll.getDB().getName()).dropDatabase();
+ print("Shard removed successfully");
};
addShard = function(st, replTest) {
seed = seedString(replTest);
- print( "Adding shard with seed: " + seed );
+ print("Adding shard with seed: " + seed);
try {
- assert.eq(true, st.adminCommand({ addshard : seed }));
+ assert.eq(true, st.adminCommand({addshard: seed}));
} catch (e) {
print("First attempt to addShard failed, trying again");
// transport error on first attempt is expected. Make sure second attempt goes through
- assert.eq(true, st.adminCommand({ addshard : seed }));
+ assert.eq(true, st.adminCommand({addshard: seed}));
}
- ReplSetTest.awaitRSClientHosts( new Mongo( st.s.host ),
- replTest.getSecondaries(),
- {ok : true, secondary : true} );
+ ReplSetTest.awaitRSClientHosts(
+ new Mongo(st.s.host), replTest.getSecondaries(), {ok: true, secondary: true});
- assert.soon( function() {
- var x = st.chunkDiff( coll.getName() , coll.getDB().getName() );
- print( "chunk diff: " + x );
+ assert.soon(function() {
+ var x = st.chunkDiff(coll.getName(), coll.getDB().getName());
+ print("chunk diff: " + x);
return x < 2;
- } , "no balance happened", 30 * 60 * 1000 );
+ }, "no balance happened", 30 * 60 * 1000);
try {
- assert.eq( 300, coll.find().itcount() );
+ assert.eq(300, coll.find().itcount());
} catch (e) {
// Expected. First query might get transport error and need to reconnect.
printjson(e);
- assert.eq( 300, coll.find().itcount() );
+ assert.eq(300, coll.find().itcount());
}
- print( "Shard added successfully" );
+ print("Shard added successfully");
};
-var st = new ShardingTest({ shards: {
- rs0: { nodes: 2 },
- rs1: { nodes: 2 }
- },
- other: {
- chunkSize: 1,
- enableBalancer: true
- }});
+var st = new ShardingTest(
+ {shards: {rs0: {nodes: 2}, rs1: {nodes: 2}}, other: {chunkSize: 1, enableBalancer: true}});
// Pending resolution of SERVER-8598, we need to wait for deletion after chunk migrations to avoid
// a pending delete re-creating a database after it was dropped.
-st.s.getDB("config").settings.update( { _id: "balancer" },
- { $set : { _waitForDelete : true } },
- true );
+st.s.getDB("config").settings.update({_id: "balancer"}, {$set: {_waitForDelete: true}}, true);
var rst0 = st._rs[0].test;
var rst1 = st._rs[1].test;
-var conn = new Mongo( st.s.host );
-var coll = conn.getCollection( "test.remove2" );
+var conn = new Mongo(st.s.host);
+var coll = conn.getCollection("test.remove2");
coll.drop();
-// Decrease how long it will take for rst0 to time out its ReplicaSetMonitor for rst1 when rs1 is shut down
-for( var i = 0; i < rst0.nodes.length; i++ ) {
+// Decrease how long it will take for rst0 to time out its ReplicaSetMonitor for rst1 when rs1 is
+// shut down
+for (var i = 0; i < rst0.nodes.length; i++) {
node = rst0.nodes[i];
- res = node.getDB('admin').runCommand({ setParameter : 1, replMonitorMaxFailedChecks : 1 });
- printjson( res );
- assert( res.ok );
+ res = node.getDB('admin').runCommand({setParameter: 1, replMonitorMaxFailedChecks: 1});
+ printjson(res);
+ assert(res.ok);
}
-st.admin.runCommand({ enableSharding : coll.getDB().getName() });
+st.admin.runCommand({enableSharding: coll.getDB().getName()});
st.ensurePrimaryShard(coll.getDB().getName(), 'test-rs0');
-st.admin.runCommand({ shardCollection : coll.getFullName(), key: { i : 1 }});
+st.admin.runCommand({shardCollection: coll.getFullName(), key: {i: 1}});
// Setup initial data
var str = 'a';
-while( str.length < 1024 * 16 ) {
+while (str.length < 1024 * 16) {
str += str;
}
var bulk = coll.initializeUnorderedBulkOp();
-for( var i = 0; i < 300; i++ ){
- bulk.insert({ i: i % 10, str: str });
+for (var i = 0; i < 300; i++) {
+ bulk.insert({i: i % 10, str: str});
}
assert.writeOK(bulk.execute());
-assert.eq( 300, coll.find().itcount() );
+assert.eq(300, coll.find().itcount());
-assert.soon( function() {
- var x = st.chunkDiff( 'remove2' , "test" ); print( "chunk diff: " + x ); return x < 2;
-} , "no balance happened", 30 * 60 * 1000 );
+assert.soon(function() {
+ var x = st.chunkDiff('remove2', "test");
+ print("chunk diff: " + x);
+ return x < 2;
+}, "no balance happened", 30 * 60 * 1000);
-assert.eq( 300, coll.find().itcount() );
+assert.eq(300, coll.find().itcount());
st.printShardingStatus();
// Remove shard and add it back in, without shutting it down.
-jsTestLog( "Attempting to remove shard and add it back in" );
-removeShard( st, rst1 );
-addShard(st, rst1 );
-
+jsTestLog("Attempting to remove shard and add it back in");
+removeShard(st, rst1);
+addShard(st, rst1);
// Remove shard, restart set, then add it back in.
-jsTestLog( "Attempting to remove shard, restart the set, and then add it back in" );
+jsTestLog("Attempting to remove shard, restart the set, and then add it back in");
originalSeed = seedString(rst1);
-removeShard( st, rst1 );
+removeShard(st, rst1);
rst1.stopSet();
-print( "Sleeping for 20 seconds to let the other shard's ReplicaSetMonitor time out" );
-sleep( 20000 ); // 1 failed check should take 10 seconds, sleep for 20 just to be safe
+print("Sleeping for 20 seconds to let the other shard's ReplicaSetMonitor time out");
+sleep(20000); // 1 failed check should take 10 seconds, sleep for 20 just to be safe
rst1.startSet();
rst1.initiate();
rst1.awaitReplication();
-assert.eq( originalSeed, seedString(rst1), "Set didn't come back up with the same hosts as before" );
-addShard( st, rst1 );
+assert.eq(originalSeed, seedString(rst1), "Set didn't come back up with the same hosts as before");
+addShard(st, rst1);
-
-// Shut down shard and wait for its ReplicaSetMonitor to be cleaned up, then start it back up and use it.
+// Shut down shard and wait for its ReplicaSetMonitor to be cleaned up, then start it back up and
+// use it.
// TODO: test this both with AND without waiting for the ReplicaSetMonitor to be cleaned up.
// This part doesn't pass, even without cleaning up the ReplicaSetMonitor - see SERVER-5900.
/*printjson( conn.getDB('admin').runCommand({movePrimary : 'test2', to : rst1.name}) );
@@ -179,41 +174,39 @@ if ( !gle.ok ) {
assert.eq( 1, conn.getDB('test2').foo.find().itcount() );
assert( conn.getDB('test2').dropDatabase().ok );*/
-
// Remove shard and add a new shard with the same replica set and shard name, but different ports.
-jsTestLog( "Attempt removing shard and adding a new shard with the same Replica Set name" );
-removeShard( st, rst1 );
+jsTestLog("Attempt removing shard and adding a new shard with the same Replica Set name");
+removeShard(st, rst1);
rst1.stopSet();
-print( "Sleeping for 20 seconds to let the other shard's ReplicaSetMonitor time out" );
-sleep( 20000 );
-
+print("Sleeping for 20 seconds to let the other shard's ReplicaSetMonitor time out");
+sleep(20000);
-var rst2 = new ReplSetTest({name : rst1.name, nodes : 2, useHostName : true});
+var rst2 = new ReplSetTest({name: rst1.name, nodes: 2, useHostName: true});
rst2.startSet();
rst2.initiate();
rst2.awaitReplication();
-addShard( st, rst2 );
-printjson( st.admin.runCommand({movePrimary : 'test2', to : rst2.name}) );
+addShard(st, rst2);
+printjson(st.admin.runCommand({movePrimary: 'test2', to: rst2.name}));
-assert.eq( 300, coll.find().itcount() );
-conn.getDB('test2').foo.insert({a:1});
-assert.eq( 1, conn.getDB('test2').foo.find().itcount() );
+assert.eq(300, coll.find().itcount());
+conn.getDB('test2').foo.insert({a: 1});
+assert.eq(1, conn.getDB('test2').foo.find().itcount());
// Can't shut down with rst2 in the set or ShardingTest will fail trying to cleanup on shutdown.
// Have to take out rst2 and put rst1 back into the set so that it can clean up.
-jsTestLog( "Putting ShardingTest back to state it expects" );
-printjson( st.admin.runCommand({movePrimary : 'test2', to : rst0.name}) );
-removeShard( st, rst2 );
+jsTestLog("Putting ShardingTest back to state it expects");
+printjson(st.admin.runCommand({movePrimary: 'test2', to: rst0.name}));
+removeShard(st, rst2);
rst2.stopSet();
rst1.startSet();
rst1.initiate();
rst1.awaitReplication();
-assert.eq( originalSeed, seedString(rst1), "Set didn't come back up with the same hosts as before" );
-addShard( st, rst1 );
+assert.eq(originalSeed, seedString(rst1), "Set didn't come back up with the same hosts as before");
+addShard(st, rst1);
-jsTestLog( "finishing!" );
+jsTestLog("finishing!");
// this should be fixed by SERVER-22176
-st.stop({ allowedExitCodes: [ MongoRunner.EXIT_ABRUPT ] });
+st.stop({allowedExitCodes: [MongoRunner.EXIT_ABRUPT]});
diff --git a/jstests/sharding/remove3.js b/jstests/sharding/remove3.js
index 1ca64fc3d10..fdbaeb4d142 100644
--- a/jstests/sharding/remove3.js
+++ b/jstests/sharding/remove3.js
@@ -1,48 +1,44 @@
// Validates the remove/drain shard functionality when there is data on the shard being removed
(function() {
-'use strict';
-
-var st = new ShardingTest({ name: "remove_shard3", shards: 2, mongos: 2 });
-
-assert.commandWorked(st.s0.adminCommand({ enableSharding: 'TestDB' }));
-st.ensurePrimaryShard('TestDB', 'shard0000');
-assert.commandWorked(st.s0.adminCommand({ shardCollection: 'TestDB.Coll', key: { _id: 1 } }));
-assert.commandWorked(st.s0.adminCommand({ split: 'TestDB.Coll', middle: { _id: 0 } }));
-
-// Insert some documents and make sure there are docs on both shards
-st.s0.getDB('TestDB').Coll.insert({ _id: -1, value: 'Negative value' });
-st.s0.getDB('TestDB').Coll.insert({ _id: 1, value: 'Positive value' });
-
-assert.commandWorked(st.s0.adminCommand({ moveChunk: 'TestDB.Coll',
- find: { _id: 1 },
- to: 'shard0001',
- _waitForDelete: true }));
-
-// Make sure both mongos instances know of the latest metadata
-assert.eq(2, st.s0.getDB('TestDB').Coll.find({}).toArray().length);
-assert.eq(2, st.s1.getDB('TestDB').Coll.find({}).toArray().length);
-
-// Remove shard0001
-var removeRes;
-removeRes = assert.commandWorked(st.s0.adminCommand({ removeShard: 'shard0001' }));
-assert.eq('started', removeRes.state);
-removeRes = assert.commandWorked(st.s0.adminCommand({ removeShard: 'shard0001' }));
-assert.eq('ongoing', removeRes.state);
-
-// Move the one chunk off shard0001
-assert.commandWorked(st.s0.adminCommand({ moveChunk: 'TestDB.Coll',
- find: { _id: 1 },
- to: 'shard0000',
- _waitForDelete: true }));
-
-// Remove shard must succeed now
-removeRes = assert.commandWorked(st.s0.adminCommand({ removeShard: 'shard0001' }));
-assert.eq('completed', removeRes.state);
-
-// Make sure both mongos instance refresh their metadata and do not reference the missing shard
-assert.eq(2, st.s0.getDB('TestDB').Coll.find({}).toArray().length);
-assert.eq(2, st.s1.getDB('TestDB').Coll.find({}).toArray().length);
-
-st.stop();
-
+ 'use strict';
+
+ var st = new ShardingTest({name: "remove_shard3", shards: 2, mongos: 2});
+
+ assert.commandWorked(st.s0.adminCommand({enableSharding: 'TestDB'}));
+ st.ensurePrimaryShard('TestDB', 'shard0000');
+ assert.commandWorked(st.s0.adminCommand({shardCollection: 'TestDB.Coll', key: {_id: 1}}));
+ assert.commandWorked(st.s0.adminCommand({split: 'TestDB.Coll', middle: {_id: 0}}));
+
+ // Insert some documents and make sure there are docs on both shards
+ st.s0.getDB('TestDB').Coll.insert({_id: -1, value: 'Negative value'});
+ st.s0.getDB('TestDB').Coll.insert({_id: 1, value: 'Positive value'});
+
+ assert.commandWorked(st.s0.adminCommand(
+ {moveChunk: 'TestDB.Coll', find: {_id: 1}, to: 'shard0001', _waitForDelete: true}));
+
+ // Make sure both mongos instances know of the latest metadata
+ assert.eq(2, st.s0.getDB('TestDB').Coll.find({}).toArray().length);
+ assert.eq(2, st.s1.getDB('TestDB').Coll.find({}).toArray().length);
+
+ // Remove shard0001
+ var removeRes;
+ removeRes = assert.commandWorked(st.s0.adminCommand({removeShard: 'shard0001'}));
+ assert.eq('started', removeRes.state);
+ removeRes = assert.commandWorked(st.s0.adminCommand({removeShard: 'shard0001'}));
+ assert.eq('ongoing', removeRes.state);
+
+ // Move the one chunk off shard0001
+ assert.commandWorked(st.s0.adminCommand(
+ {moveChunk: 'TestDB.Coll', find: {_id: 1}, to: 'shard0000', _waitForDelete: true}));
+
+ // Remove shard must succeed now
+ removeRes = assert.commandWorked(st.s0.adminCommand({removeShard: 'shard0001'}));
+ assert.eq('completed', removeRes.state);
+
+ // Make sure both mongos instance refresh their metadata and do not reference the missing shard
+ assert.eq(2, st.s0.getDB('TestDB').Coll.find({}).toArray().length);
+ assert.eq(2, st.s1.getDB('TestDB').Coll.find({}).toArray().length);
+
+ st.stop();
+
})();
diff --git a/jstests/sharding/rename.js b/jstests/sharding/rename.js
index 116af4592ae..e8518a1e6bf 100644
--- a/jstests/sharding/rename.js
+++ b/jstests/sharding/rename.js
@@ -1,63 +1,58 @@
(function() {
-'use strict';
+ 'use strict';
-var s = new ShardingTest({ name: "rename",
- shards: 2,
- mongos: 1,
- rs: { oplogSize: 10 } });
+ var s = new ShardingTest({name: "rename", shards: 2, mongos: 1, rs: {oplogSize: 10}});
-var db = s.getDB("test");
-var replTest = s.rs0;
+ var db = s.getDB("test");
+ var replTest = s.rs0;
-db.foo.insert({ _id: 1 });
-db.foo.renameCollection('bar');
-assert.isnull(db.getLastError(), '1.0');
-assert.eq(db.bar.findOne(), { _id: 1 }, '1.1');
-assert.eq(db.bar.count(), 1, '1.2');
-assert.eq(db.foo.count(), 0, '1.3');
+ db.foo.insert({_id: 1});
+ db.foo.renameCollection('bar');
+ assert.isnull(db.getLastError(), '1.0');
+ assert.eq(db.bar.findOne(), {_id: 1}, '1.1');
+ assert.eq(db.bar.count(), 1, '1.2');
+ assert.eq(db.foo.count(), 0, '1.3');
-db.foo.insert({ _id: 2 });
-db.foo.renameCollection('bar', true);
-assert.isnull(db.getLastError(), '2.0');
-assert.eq(db.bar.findOne(), { _id: 2 }, '2.1');
-assert.eq(db.bar.count(), 1, '2.2');
-assert.eq(db.foo.count(), 0, '2.3');
+ db.foo.insert({_id: 2});
+ db.foo.renameCollection('bar', true);
+ assert.isnull(db.getLastError(), '2.0');
+ assert.eq(db.bar.findOne(), {_id: 2}, '2.1');
+ assert.eq(db.bar.count(), 1, '2.2');
+ assert.eq(db.foo.count(), 0, '2.3');
-s.adminCommand({ enablesharding: "test" });
-s.getDB('admin').runCommand({ movePrimary: 'test', to: 'rename-rs0' });
+ s.adminCommand({enablesharding: "test"});
+ s.getDB('admin').runCommand({movePrimary: 'test', to: 'rename-rs0'});
-jsTest.log("Testing write concern (1)");
+ jsTest.log("Testing write concern (1)");
-db.foo.insert({ _id: 3 });
-db.foo.renameCollection('bar', true);
+ db.foo.insert({_id: 3});
+ db.foo.renameCollection('bar', true);
-var ans = db.runCommand({ getLastError: 1, w: 3 });
-printjson(ans);
-assert.isnull(ans.err, '3.0');
+ var ans = db.runCommand({getLastError: 1, w: 3});
+ printjson(ans);
+ assert.isnull(ans.err, '3.0');
-assert.eq(db.bar.findOne(), { _id: 3 }, '3.1');
-assert.eq(db.bar.count(), 1, '3.2');
-assert.eq(db.foo.count(), 0, '3.3');
+ assert.eq(db.bar.findOne(), {_id: 3}, '3.1');
+ assert.eq(db.bar.count(), 1, '3.2');
+ assert.eq(db.foo.count(), 0, '3.3');
-// Ensure write concern works by shutting down 1 node in a replica set shard
-jsTest.log("Testing write concern (2)");
+ // Ensure write concern works by shutting down 1 node in a replica set shard
+ jsTest.log("Testing write concern (2)");
-// Kill any node. Don't care if it's a primary or secondary.
-replTest.stop(0);
+ // Kill any node. Don't care if it's a primary or secondary.
+ replTest.stop(0);
-replTest.awaitSecondaryNodes();
-ReplSetTest.awaitRSClientHosts(s.s,
- replTest.getPrimary(),
- { ok: true, ismaster: true },
- replTest.name);
+ replTest.awaitSecondaryNodes();
+ ReplSetTest.awaitRSClientHosts(
+ s.s, replTest.getPrimary(), {ok: true, ismaster: true}, replTest.name);
-assert.writeOK(db.foo.insert({ _id: 4 }));
-assert.commandWorked(db.foo.renameCollection('bar', true));
+ assert.writeOK(db.foo.insert({_id: 4}));
+ assert.commandWorked(db.foo.renameCollection('bar', true));
-ans = db.runCommand({ getLastError: 1, w: 3, wtimeout: 5000 });
-assert.eq(ans.err, "timeout", 'gle: ' + tojson(ans));
+ ans = db.runCommand({getLastError: 1, w: 3, wtimeout: 5000});
+ assert.eq(ans.err, "timeout", 'gle: ' + tojson(ans));
-s.stop();
+ s.stop();
})();
diff --git a/jstests/sharding/rename_across_mongos.js b/jstests/sharding/rename_across_mongos.js
index 5d5dc1fcaf8..e9c435ecff1 100644
--- a/jstests/sharding/rename_across_mongos.js
+++ b/jstests/sharding/rename_across_mongos.js
@@ -1,29 +1,29 @@
(function() {
-'use strict';
+ 'use strict';
-var st = new ShardingTest({ name: 'rename_across_mongos', shards: 1, mongos: 2 });
-var dbName = 'RenameDB';
+ var st = new ShardingTest({name: 'rename_across_mongos', shards: 1, mongos: 2});
+ var dbName = 'RenameDB';
-st.s0.getDB(dbName).dropDatabase();
-st.s1.getDB(dbName).dropDatabase();
+ st.s0.getDB(dbName).dropDatabase();
+ st.s1.getDB(dbName).dropDatabase();
-// Create collection on first mongos and insert a document
-assert.commandWorked(st.s0.getDB(dbName).runCommand({ create: 'CollNameBeforeRename' }));
-assert.writeOK(st.s0.getDB(dbName).CollNameBeforeRename.insert({ Key: 1, Value: 1 }));
+ // Create collection on first mongos and insert a document
+ assert.commandWorked(st.s0.getDB(dbName).runCommand({create: 'CollNameBeforeRename'}));
+ assert.writeOK(st.s0.getDB(dbName).CollNameBeforeRename.insert({Key: 1, Value: 1}));
-if (st.configRS) {
- // Ensure that the second mongos will see the newly created database metadata when
- // it tries to do the collection rename.
- st.configRS.awaitLastOpCommitted();
-}
+ if (st.configRS) {
+ // Ensure that the second mongos will see the newly created database metadata when
+ // it tries to do the collection rename.
+ st.configRS.awaitLastOpCommitted();
+ }
-// Rename collection on second mongos and ensure the document is found
-assert.commandWorked(
- st.s1.getDB(dbName).CollNameBeforeRename.renameCollection('CollNameAfterRename'));
-assert.eq([{ Key: 1, Value: 1 }],
- st.s1.getDB(dbName).CollNameAfterRename.find({}, { _id: false }).toArray());
+ // Rename collection on second mongos and ensure the document is found
+ assert.commandWorked(
+ st.s1.getDB(dbName).CollNameBeforeRename.renameCollection('CollNameAfterRename'));
+ assert.eq([{Key: 1, Value: 1}],
+ st.s1.getDB(dbName).CollNameAfterRename.find({}, {_id: false}).toArray());
-st.stop();
+ st.stop();
})();
diff --git a/jstests/sharding/repl_monitor_refresh.js b/jstests/sharding/repl_monitor_refresh.js
index 6767a165d9d..28e86ec8d32 100644
--- a/jstests/sharding/repl_monitor_refresh.js
+++ b/jstests/sharding/repl_monitor_refresh.js
@@ -3,73 +3,71 @@
* become invalid when a replica set reconfig happens.
*/
(function() {
-"use strict";
-
-var NODE_COUNT = 3;
-var st = new ShardingTest({ shards: { rs0: { nodes: NODE_COUNT, oplogSize: 10 }}});
-var replTest = st.rs0;
-var mongos = st.s;
-
-var shardDoc = mongos.getDB('config').shards.findOne();
-assert.eq(NODE_COUNT, shardDoc.host.split(',').length); // seed list should contain all nodes
-
-/* Make sure that the first node is not the primary (by making the second one primary).
- * We need to do this since the ReplicaSetMonitor iterates over the nodes one
- * by one and you can't remove a node that is currently the primary.
- */
-var connPoolStats = mongos.getDB('admin').runCommand({ connPoolStats: 1 });
-var targetHostName = connPoolStats['replicaSets'][replTest.name].hosts[1].addr;
-
-var priConn = replTest.getPrimary();
-var confDoc = priConn.getDB("local").system.replset.findOne();
-
-for (var idx = 0; idx < confDoc.members.length; idx++) {
- if (confDoc.members[idx].host == targetHostName) {
- confDoc.members[idx].priority = 100;
- }
- else {
- confDoc.members[idx].priority = 1;
+ "use strict";
+
+ var NODE_COUNT = 3;
+ var st = new ShardingTest({shards: {rs0: {nodes: NODE_COUNT, oplogSize: 10}}});
+ var replTest = st.rs0;
+ var mongos = st.s;
+
+ var shardDoc = mongos.getDB('config').shards.findOne();
+ assert.eq(NODE_COUNT, shardDoc.host.split(',').length); // seed list should contain all nodes
+
+ /* Make sure that the first node is not the primary (by making the second one primary).
+ * We need to do this since the ReplicaSetMonitor iterates over the nodes one
+ * by one and you can't remove a node that is currently the primary.
+ */
+ var connPoolStats = mongos.getDB('admin').runCommand({connPoolStats: 1});
+ var targetHostName = connPoolStats['replicaSets'][replTest.name].hosts[1].addr;
+
+ var priConn = replTest.getPrimary();
+ var confDoc = priConn.getDB("local").system.replset.findOne();
+
+ for (var idx = 0; idx < confDoc.members.length; idx++) {
+ if (confDoc.members[idx].host == targetHostName) {
+ confDoc.members[idx].priority = 100;
+ } else {
+ confDoc.members[idx].priority = 1;
+ }
}
-}
-confDoc.version++;
+ confDoc.version++;
-jsTest.log('Changing conf to ' + tojson(confDoc));
+ jsTest.log('Changing conf to ' + tojson(confDoc));
-try {
- priConn.getDB('admin').adminCommand({ replSetReconfig: confDoc });
-} catch (x) {
- print('Expected exception because of reconfig' + x);
-}
+ try {
+ priConn.getDB('admin').adminCommand({replSetReconfig: confDoc});
+ } catch (x) {
+ print('Expected exception because of reconfig' + x);
+ }
-ReplSetTest.awaitRSClientHosts(mongos, { host: targetHostName },
- { ok: true, ismaster: true });
+ ReplSetTest.awaitRSClientHosts(mongos, {host: targetHostName}, {ok: true, ismaster: true});
-// Remove first node from set
-confDoc.members.shift();
-confDoc.version++;
+ // Remove first node from set
+ confDoc.members.shift();
+ confDoc.version++;
-try {
- replTest.getPrimary().getDB('admin').adminCommand({ replSetReconfig: confDoc });
-} catch (x) {
- print('Expected exception because of reconfig: ' + x);
-}
+ try {
+ replTest.getPrimary().getDB('admin').adminCommand({replSetReconfig: confDoc});
+ } catch (x) {
+ print('Expected exception because of reconfig: ' + x);
+ }
-assert.soon(function() {
- var connPoolStats = mongos.getDB('admin').runCommand('connPoolStats');
- var replView = connPoolStats.replicaSets[replTest.name].hosts;
- jsTest.log('current replView: ' + tojson(replView));
+ assert.soon(function() {
+ var connPoolStats = mongos.getDB('admin').runCommand('connPoolStats');
+ var replView = connPoolStats.replicaSets[replTest.name].hosts;
+ jsTest.log('current replView: ' + tojson(replView));
- return replView.length == NODE_COUNT - 1;
-});
+ return replView.length == NODE_COUNT - 1;
+ });
-assert.soon(function() {
- shardDoc = mongos.getDB('config').shards.findOne();
- jsTest.log('shardDoc: ' + tojson(shardDoc));
- // seed list should contain one less node
- return shardDoc.host.split(',').length == NODE_COUNT - 1;
-});
+ assert.soon(function() {
+ shardDoc = mongos.getDB('config').shards.findOne();
+ jsTest.log('shardDoc: ' + tojson(shardDoc));
+ // seed list should contain one less node
+ return shardDoc.host.split(',').length == NODE_COUNT - 1;
+ });
-st.stop();
+ st.stop();
}()); \ No newline at end of file
diff --git a/jstests/sharding/replmonitor_bad_seed.js b/jstests/sharding/replmonitor_bad_seed.js
index 43602ae26ed..1d52ac47abc 100644
--- a/jstests/sharding/replmonitor_bad_seed.js
+++ b/jstests/sharding/replmonitor_bad_seed.js
@@ -13,16 +13,18 @@
* was able to refresh before proceeding to check.
*/
-var rsOpt = { oplogSize: 10 };
-var st = new ShardingTest({ shards: 1, rs: rsOpt });
+var rsOpt = {
+ oplogSize: 10
+};
+var st = new ShardingTest({shards: 1, rs: rsOpt});
var mongos = st.s;
var replTest = st.rs0;
var adminDB = mongos.getDB('admin');
-//adminDB.runCommand({ addShard: replTest.getURL() });
+// adminDB.runCommand({ addShard: replTest.getURL() });
-adminDB.runCommand({ enableSharding: 'test' });
-adminDB.runCommand({ shardCollection: 'test.user', key: { x: 1 }});
+adminDB.runCommand({enableSharding: 'test'});
+adminDB.runCommand({shardCollection: 'test.user', key: {x: 1}});
/* The cluster now has the shard information. Then kill the replica set so
* when mongos restarts and tries to create a ReplSetMonitor for that shard,
@@ -30,13 +32,13 @@ adminDB.runCommand({ shardCollection: 'test.user', key: { x: 1 }});
*/
replTest.stopSet();
st.restartMongos(0);
-mongos = st.s; // refresh mongos with the new one
+mongos = st.s; // refresh mongos with the new one
var coll = mongos.getDB('test').user;
var verifyInsert = function() {
var beforeCount = coll.find().count();
- coll.insert({ x: 1 });
+ coll.insert({x: 1});
var afterCount = coll.find().count();
assert.eq(beforeCount + 1, afterCount);
@@ -45,15 +47,14 @@ var verifyInsert = function() {
jsTest.log('Insert to a downed replSet');
assert.throws(verifyInsert);
-replTest.startSet({ oplogSize: 10 });
+replTest.startSet({oplogSize: 10});
replTest.initiate();
replTest.awaitSecondaryNodes();
jsTest.log('Insert to an online replSet');
// Verify that the replSetMonitor can reach the restarted set.
-ReplSetTest.awaitRSClientHosts(mongos, replTest.nodes, { ok: true });
+ReplSetTest.awaitRSClientHosts(mongos, replTest.nodes, {ok: true});
verifyInsert();
st.stop();
-
diff --git a/jstests/sharding/return_partial_shards_down.js b/jstests/sharding/return_partial_shards_down.js
index d2519f0ae5e..a8eca975283 100644
--- a/jstests/sharding/return_partial_shards_down.js
+++ b/jstests/sharding/return_partial_shards_down.js
@@ -2,9 +2,7 @@
// Tests that zero results are correctly returned with returnPartial and shards down
//
-var st = new ShardingTest({shards : 3,
- mongos : 1,
- other : {mongosOptions : {verbose : 2}}});
+var st = new ShardingTest({shards: 3, mongos: 1, other: {mongosOptions: {verbose: 2}}});
// Stop balancer, we're doing our own manual chunk distribution
st.stopBalancer();
@@ -14,42 +12,31 @@ var config = mongos.getDB("config");
var admin = mongos.getDB("admin");
var shards = config.shards.find().toArray();
-for ( var i = 0; i < shards.length; i++) {
+for (var i = 0; i < shards.length; i++) {
shards[i].conn = new Mongo(shards[i].host);
}
var collOneShard = mongos.getCollection("foo.collOneShard");
var collAllShards = mongos.getCollection("foo.collAllShards");
-printjson(admin.runCommand({enableSharding : collOneShard.getDB() + ""}));
-printjson(admin.runCommand({movePrimary : collOneShard.getDB() + "",
- to : shards[0]._id}));
+printjson(admin.runCommand({enableSharding: collOneShard.getDB() + ""}));
+printjson(admin.runCommand({movePrimary: collOneShard.getDB() + "", to: shards[0]._id}));
-printjson(admin.runCommand({shardCollection : collOneShard + "",
- key : {_id : 1}}));
-printjson(admin.runCommand({shardCollection : collAllShards + "",
- key : {_id : 1}}));
+printjson(admin.runCommand({shardCollection: collOneShard + "", key: {_id: 1}}));
+printjson(admin.runCommand({shardCollection: collAllShards + "", key: {_id: 1}}));
// Split and move the "both shard" collection to both shards
-printjson(admin.runCommand({split : collAllShards + "",
- middle : {_id : 0}}));
-printjson(admin.runCommand({split : collAllShards + "",
- middle : {_id : 1000}}));
-printjson(admin.runCommand({moveChunk : collAllShards + "",
- find : {_id : 0},
- to : shards[1]._id}));
-printjson(admin.runCommand({moveChunk : collAllShards + "",
- find : {_id : 1000},
- to : shards[2]._id}));
+printjson(admin.runCommand({split: collAllShards + "", middle: {_id: 0}}));
+printjson(admin.runCommand({split: collAllShards + "", middle: {_id: 1000}}));
+printjson(admin.runCommand({moveChunk: collAllShards + "", find: {_id: 0}, to: shards[1]._id}));
+printjson(admin.runCommand({moveChunk: collAllShards + "", find: {_id: 1000}, to: shards[2]._id}));
// Collections are now distributed correctly
jsTest.log("Collections now distributed correctly.");
st.printShardingStatus();
-var inserts = [{_id : -1},
- {_id : 1},
- {_id : 1000}];
+var inserts = [{_id: -1}, {_id: 1}, {_id: 1000}];
collOneShard.insert(inserts);
assert.writeOK(collAllShards.insert(inserts));
diff --git a/jstests/sharding/rs_stepdown_and_pooling.js b/jstests/sharding/rs_stepdown_and_pooling.js
index 3cc97bfe147..928bd515635 100644
--- a/jstests/sharding/rs_stepdown_and_pooling.js
+++ b/jstests/sharding/rs_stepdown_and_pooling.js
@@ -2,115 +2,110 @@
// Tests what happens when a replica set primary goes down with pooled connections.
//
(function() {
-"use strict";
-
-var st = new ShardingTest({shards : {rs0 : {nodes : 2}}, mongos : 1});
-
-// Stop balancer to eliminate weird conn stuff
-st.stopBalancer();
-
-var mongos = st.s0;
-var coll = mongos.getCollection("foo.bar");
-var db = coll.getDB();
-
-//Test is not valid for Win32
-var is32Bits = ( db.serverBuildInfo().bits == 32 );
-if ( is32Bits && _isWindows() ) {
-
- // Win32 doesn't provide the polling interface we need to implement the check tested here
- jsTest.log( "Test is not valid on Win32 platform." );
-
-}
-else {
-
- // Non-Win32 platform
-
- var primary = st.rs0.getPrimary();
- var secondary = st.rs0.getSecondary();
-
- jsTest.log("Creating new connections...");
-
- // Create a bunch of connections to the primary node through mongos.
- // jstest ->(x10)-> mongos ->(x10)-> primary
- var conns = [];
- for ( var i = 0; i < 50; i++) {
- conns.push(new Mongo(mongos.host));
- conns[i].getCollection(coll + "").findOne();
- }
-
- jsTest.log("Returning the connections back to the pool.");
-
- for ( var i = 0; i < conns.length; i++ ) {
- conns[i] = null;
- }
- // Make sure we return connections back to the pool
- gc();
-
- // Don't make test fragile by linking to format of shardConnPoolStats, but this is useful if
- // something goes wrong.
- var connPoolStats = mongos.getDB("admin").runCommand({ shardConnPoolStats : 1 });
- printjson( connPoolStats );
-
- jsTest.log("Stepdown primary and then step back up...");
-
- var stepDown = function(node, timeSecs) {
- var result = null;
- try {
- result = node.getDB("admin").runCommand({ replSetStepDown : timeSecs, force : true });
- // Should not get here
- } catch (e) {
- printjson(e);
+ "use strict";
+
+ var st = new ShardingTest({shards: {rs0: {nodes: 2}}, mongos: 1});
+
+ // Stop balancer to eliminate weird conn stuff
+ st.stopBalancer();
+
+ var mongos = st.s0;
+ var coll = mongos.getCollection("foo.bar");
+ var db = coll.getDB();
+
+ // Test is not valid for Win32
+ var is32Bits = (db.serverBuildInfo().bits == 32);
+ if (is32Bits && _isWindows()) {
+ // Win32 doesn't provide the polling interface we need to implement the check tested here
+ jsTest.log("Test is not valid on Win32 platform.");
+
+ } else {
+ // Non-Win32 platform
+
+ var primary = st.rs0.getPrimary();
+ var secondary = st.rs0.getSecondary();
+
+ jsTest.log("Creating new connections...");
+
+ // Create a bunch of connections to the primary node through mongos.
+ // jstest ->(x10)-> mongos ->(x10)-> primary
+ var conns = [];
+ for (var i = 0; i < 50; i++) {
+ conns.push(new Mongo(mongos.host));
+ conns[i].getCollection(coll + "").findOne();
}
-
- if (result != null) printjson(result);
- assert.eq(null, result);
- };
-
- stepDown(primary, 0);
-
- jsTest.log("Waiting for mongos to acknowledge stepdown...");
-
- ReplSetTest.awaitRSClientHosts( mongos,
- secondary,
- { ismaster : true },
- st.rs0,
- 2 * 60 * 1000 ); // slow hosts can take longer to recognize sd
-
- jsTest.log("Stepping back up...");
-
- stepDown(secondary, 10000);
-
- jsTest.log("Waiting for mongos to acknowledge step up...");
-
- ReplSetTest.awaitRSClientHosts( mongos,
- primary,
- { ismaster : true },
- st.rs0,
- 2 * 60 * 1000 );
-
- jsTest.log("Waiting for socket timeout time...");
-
- // Need to wait longer than the socket polling time.
- sleep(2 * 5000);
-
- jsTest.log("Run queries using new connections.");
-
- var numErrors = 0;
- for ( var i = 0; i < conns.length; i++) {
- var newConn = new Mongo(mongos.host);
- try {
- printjson(newConn.getCollection("foo.bar").findOne());
- } catch (e) {
- printjson(e);
- numErrors++;
+
+ jsTest.log("Returning the connections back to the pool.");
+
+ for (var i = 0; i < conns.length; i++) {
+ conns[i] = null;
}
- }
-
- assert.eq(0, numErrors);
+ // Make sure we return connections back to the pool
+ gc();
+
+ // Don't make test fragile by linking to format of shardConnPoolStats, but this is useful if
+ // something goes wrong.
+ var connPoolStats = mongos.getDB("admin").runCommand({shardConnPoolStats: 1});
+ printjson(connPoolStats);
+
+ jsTest.log("Stepdown primary and then step back up...");
+
+ var stepDown = function(node, timeSecs) {
+ var result = null;
+ try {
+ result = node.getDB("admin").runCommand({replSetStepDown: timeSecs, force: true});
+ // Should not get here
+ } catch (e) {
+ printjson(e);
+ }
+
+ if (result != null)
+ printjson(result);
+ assert.eq(null, result);
+ };
+
+ stepDown(primary, 0);
+
+ jsTest.log("Waiting for mongos to acknowledge stepdown...");
+
+ ReplSetTest.awaitRSClientHosts(
+ mongos,
+ secondary,
+ {ismaster: true},
+ st.rs0,
+ 2 * 60 * 1000); // slow hosts can take longer to recognize sd
+
+ jsTest.log("Stepping back up...");
+
+ stepDown(secondary, 10000);
+
+ jsTest.log("Waiting for mongos to acknowledge step up...");
+
+ ReplSetTest.awaitRSClientHosts(mongos, primary, {ismaster: true}, st.rs0, 2 * 60 * 1000);
+
+ jsTest.log("Waiting for socket timeout time...");
+
+ // Need to wait longer than the socket polling time.
+ sleep(2 * 5000);
+
+ jsTest.log("Run queries using new connections.");
+
+ var numErrors = 0;
+ for (var i = 0; i < conns.length; i++) {
+ var newConn = new Mongo(mongos.host);
+ try {
+ printjson(newConn.getCollection("foo.bar").findOne());
+ } catch (e) {
+ printjson(e);
+ numErrors++;
+ }
+ }
+
+ assert.eq(0, numErrors);
-} // End Win32 check
+ } // End Win32 check
-jsTest.log("DONE!");
+ jsTest.log("DONE!");
-st.stop();
+ st.stop();
}()); \ No newline at end of file
diff --git a/jstests/sharding/secondary_query_routing.js b/jstests/sharding/secondary_query_routing.js
index 8b9649a23ad..ff0dfcb22d9 100644
--- a/jstests/sharding/secondary_query_routing.js
+++ b/jstests/sharding/secondary_query_routing.js
@@ -4,35 +4,35 @@
*/
(function() {
-var rsOpts = { nodes: 2 };
-var st = new ShardingTest({ mongos: 2, shards: { rs0: rsOpts, rs1: rsOpts }});
+ var rsOpts = {
+ nodes: 2
+ };
+ var st = new ShardingTest({mongos: 2, shards: {rs0: rsOpts, rs1: rsOpts}});
-st.s0.adminCommand({ enableSharding: 'test' });
+ st.s0.adminCommand({enableSharding: 'test'});
-st.ensurePrimaryShard('test', 'test-rs0');
-st.s0.adminCommand({ shardCollection: 'test.user', key: { x: 1 }});
-st.s0.adminCommand({ split: 'test.user', middle: { x: 0 }});
+ st.ensurePrimaryShard('test', 'test-rs0');
+ st.s0.adminCommand({shardCollection: 'test.user', key: {x: 1}});
+ st.s0.adminCommand({split: 'test.user', middle: {x: 0}});
-st.s1.setReadPref('secondary');
-var testDB = st.s1.getDB('test');
-// This establishes the shard version Mongos #1's view.
-testDB.user.insert({ x: 1 });
+ st.s1.setReadPref('secondary');
+ var testDB = st.s1.getDB('test');
+ // This establishes the shard version Mongos #1's view.
+ testDB.user.insert({x: 1});
-// Mongos #0 bumps up the version without Mongos #1 knowledge.
-// Note: moveChunk has implicit { w: 2 } write concern.
-st.s0.adminCommand({ moveChunk: 'test.user',
- find: { x: 0 },
- to: 'test-rs1',
- _waitForDelete: true });
+ // Mongos #0 bumps up the version without Mongos #1 knowledge.
+ // Note: moveChunk has implicit { w: 2 } write concern.
+ st.s0.adminCommand(
+ {moveChunk: 'test.user', find: {x: 0}, to: 'test-rs1', _waitForDelete: true});
-// Clear all the connections to make sure that Mongos #1 will attempt to establish
-// the shard version.
-assert.commandWorked(testDB.adminCommand({ connPoolSync: 1 }));
+ // Clear all the connections to make sure that Mongos #1 will attempt to establish
+ // the shard version.
+ assert.commandWorked(testDB.adminCommand({connPoolSync: 1}));
-// Mongos #1 performs a query to the secondary.
-var res = testDB.runReadCommand({ count: 'user', query: { x: 1 }});
-assert(res.ok);
-assert.eq(1, res.n, tojson(res));
+ // Mongos #1 performs a query to the secondary.
+ var res = testDB.runReadCommand({count: 'user', query: {x: 1}});
+ assert(res.ok);
+ assert.eq(1, res.n, tojson(res));
-st.stop();
+ st.stop();
})();
diff --git a/jstests/sharding/server_status.js b/jstests/sharding/server_status.js
index 094cb3ca433..b8e59b22275 100644
--- a/jstests/sharding/server_status.js
+++ b/jstests/sharding/server_status.js
@@ -4,48 +4,47 @@
*/
(function() {
-"use strict";
-
-var st = new ShardingTest({ shards: 1 });
-
-var testDB = st.s.getDB('test');
-testDB.adminCommand({ enableSharding: 'test' });
-testDB.adminCommand({ shardCollection: 'test.user', key: { _id: 1 }});
-
-// Initialize shard metadata in shards
-testDB.user.insert({ x: 1 });
-
-var checkShardingServerStatus = function(doc, isCSRS) {
- var shardingSection = doc.sharding;
- assert.neq(shardingSection, null);
-
- var configConnStr = shardingSection.configsvrConnectionString;
- var configConn = new Mongo(configConnStr);
- var configIsMaster = configConn.getDB('admin').runCommand({ isMaster: 1 });
-
- var configOpTimeObj = shardingSection.lastSeenConfigServerOpTime;
-
- if (isCSRS) {
- assert.gt(configConnStr.indexOf('/'), 0);
- assert.eq(1, configIsMaster.configsvr); // If it's a shard, this field won't exist.
- assert.neq(null, configOpTimeObj);
- assert.neq(null, configOpTimeObj.ts);
- assert.neq(null, configOpTimeObj.t);
- }
- else {
- assert.eq(-1, configConnStr.indexOf('/'));
- assert.gt(configConnStr.indexOf(','), 0);
- assert.eq(0, configIsMaster.configsvr);
- assert.eq(null, configOpTimeObj);
- }
-};
-
-var mongosServerStatus = testDB.adminCommand({ serverStatus: 1 });
-var isCSRS = st.configRS != null;
-checkShardingServerStatus(mongosServerStatus, isCSRS);
-
-var mongodServerStatus = st.d0.getDB('admin').runCommand({ serverStatus: 1 });
-checkShardingServerStatus(mongodServerStatus, isCSRS);
-
-st.stop();
+ "use strict";
+
+ var st = new ShardingTest({shards: 1});
+
+ var testDB = st.s.getDB('test');
+ testDB.adminCommand({enableSharding: 'test'});
+ testDB.adminCommand({shardCollection: 'test.user', key: {_id: 1}});
+
+ // Initialize shard metadata in shards
+ testDB.user.insert({x: 1});
+
+ var checkShardingServerStatus = function(doc, isCSRS) {
+ var shardingSection = doc.sharding;
+ assert.neq(shardingSection, null);
+
+ var configConnStr = shardingSection.configsvrConnectionString;
+ var configConn = new Mongo(configConnStr);
+ var configIsMaster = configConn.getDB('admin').runCommand({isMaster: 1});
+
+ var configOpTimeObj = shardingSection.lastSeenConfigServerOpTime;
+
+ if (isCSRS) {
+ assert.gt(configConnStr.indexOf('/'), 0);
+ assert.eq(1, configIsMaster.configsvr); // If it's a shard, this field won't exist.
+ assert.neq(null, configOpTimeObj);
+ assert.neq(null, configOpTimeObj.ts);
+ assert.neq(null, configOpTimeObj.t);
+ } else {
+ assert.eq(-1, configConnStr.indexOf('/'));
+ assert.gt(configConnStr.indexOf(','), 0);
+ assert.eq(0, configIsMaster.configsvr);
+ assert.eq(null, configOpTimeObj);
+ }
+ };
+
+ var mongosServerStatus = testDB.adminCommand({serverStatus: 1});
+ var isCSRS = st.configRS != null;
+ checkShardingServerStatus(mongosServerStatus, isCSRS);
+
+ var mongodServerStatus = st.d0.getDB('admin').runCommand({serverStatus: 1});
+ checkShardingServerStatus(mongodServerStatus, isCSRS);
+
+ st.stop();
})();
diff --git a/jstests/sharding/shard1.js b/jstests/sharding/shard1.js
index faf852c6044..3b97bbc0306 100644
--- a/jstests/sharding/shard1.js
+++ b/jstests/sharding/shard1.js
@@ -4,44 +4,51 @@
s = new ShardingTest({name: "shard1", shards: 2});
-db = s.getDB( "test" );
-db.foo.insert( { num : 1 , name : "eliot" } );
-db.foo.insert( { num : 2 , name : "sara" } );
-db.foo.insert( { num : -1 , name : "joe" } );
-db.foo.ensureIndex( { num : 1 } );
-assert.eq( 3 , db.foo.find().length() , "A" );
-
-shardCommand = { shardcollection : "test.foo" , key : { num : 1 } };
-
-assert.throws( function(){ s.adminCommand( shardCommand ); } );
-
-s.adminCommand( { enablesharding : "test" } );
+db = s.getDB("test");
+db.foo.insert({num: 1, name: "eliot"});
+db.foo.insert({num: 2, name: "sara"});
+db.foo.insert({num: -1, name: "joe"});
+db.foo.ensureIndex({num: 1});
+assert.eq(3, db.foo.find().length(), "A");
+
+shardCommand = {
+ shardcollection: "test.foo",
+ key: {num: 1}
+};
+
+assert.throws(function() {
+ s.adminCommand(shardCommand);
+});
+
+s.adminCommand({enablesharding: "test"});
s.ensurePrimaryShard('test', 'shard0001');
-assert.eq( 3 , db.foo.find().length() , "after partitioning count failed" );
+assert.eq(3, db.foo.find().length(), "after partitioning count failed");
-s.adminCommand( shardCommand );
+s.adminCommand(shardCommand);
-assert.throws( function(){ s.adminCommand({ shardCollection: 'test', key: { x: 1 }}); });
-assert.throws( function(){ s.adminCommand({ shardCollection: '.foo', key: { x: 1 }}); });
+assert.throws(function() {
+ s.adminCommand({shardCollection: 'test', key: {x: 1}});
+});
+assert.throws(function() {
+ s.adminCommand({shardCollection: '.foo', key: {x: 1}});
+});
-var cconfig = s.config.collections.findOne( { _id : "test.foo" } );
-assert( cconfig , "why no collection entry for test.foo" );
+var cconfig = s.config.collections.findOne({_id: "test.foo"});
+assert(cconfig, "why no collection entry for test.foo");
delete cconfig.lastmod;
delete cconfig.dropped;
delete cconfig.lastmodEpoch;
-assert.eq(cconfig,
- { _id : "test.foo" , key : { num : 1 } , unique : false },
- "Sharded content mismatch");
+assert.eq(cconfig, {_id: "test.foo", key: {num: 1}, unique: false}, "Sharded content mismatch");
-s.config.collections.find().forEach( printjson );
+s.config.collections.find().forEach(printjson);
-assert.eq( 1 , s.config.chunks.count() , "num chunks A");
+assert.eq(1, s.config.chunks.count(), "num chunks A");
si = s.config.chunks.findOne();
-assert( si );
-assert.eq( si.ns , "test.foo" );
+assert(si);
+assert.eq(si.ns, "test.foo");
-assert.eq( 3 , db.foo.find().length() , "after sharding, no split count failed" );
+assert.eq(3, db.foo.find().length(), "after sharding, no split count failed");
s.stop();
diff --git a/jstests/sharding/shard2.js b/jstests/sharding/shard2.js
index f4946e13573..abe91508650 100644
--- a/jstests/sharding/shard2.js
+++ b/jstests/sharding/shard2.js
@@ -4,18 +4,18 @@
* test basic sharding
*/
-placeCheck = function( num ){
- print("shard2 step: " + num );
+placeCheck = function(num) {
+ print("shard2 step: " + num);
};
-printAll = function(){
- print( "****************" );
- db.foo.find().forEach( printjsononeline );
- print( "++++++++++++++++++" );
- primary.foo.find().forEach( printjsononeline );
- print( "++++++++++++++++++" );
- secondary.foo.find().forEach( printjsononeline );
- print( "---------------------" );
+printAll = function() {
+ print("****************");
+ db.foo.find().forEach(printjsononeline);
+ print("++++++++++++++++++");
+ primary.foo.find().forEach(printjsononeline);
+ print("++++++++++++++++++");
+ secondary.foo.find().forEach(printjsononeline);
+ print("---------------------");
};
s = new ShardingTest({name: "shard2", shards: 2});
@@ -24,205 +24,221 @@ s = new ShardingTest({name: "shard2", shards: 2});
// it moves small #s of chunks too
s.stopBalancer();
-db = s.getDB( "test" );
+db = s.getDB("test");
-s.adminCommand( { enablesharding : "test" } );
+s.adminCommand({enablesharding: "test"});
s.ensurePrimaryShard('test', 'shard0001');
-s.adminCommand( { shardcollection : "test.foo" , key : { num : 1 } } );
-assert.eq( 1 , s.config.chunks.count() , "sanity check 1" );
+s.adminCommand({shardcollection: "test.foo", key: {num: 1}});
+assert.eq(1, s.config.chunks.count(), "sanity check 1");
-s.adminCommand( { split : "test.foo" , middle : { num : 0 } } );
-assert.eq( 2 , s.config.chunks.count() , "should be 2 shards" );
+s.adminCommand({split: "test.foo", middle: {num: 0}});
+assert.eq(2, s.config.chunks.count(), "should be 2 shards");
chunks = s.config.chunks.find().toArray();
-assert.eq( chunks[0].shard , chunks[1].shard , "server should be the same after a split" );
+assert.eq(chunks[0].shard, chunks[1].shard, "server should be the same after a split");
+db.foo.save({num: 1, name: "eliot"});
+db.foo.save({num: 2, name: "sara"});
+db.foo.save({num: -1, name: "joe"});
-db.foo.save( { num : 1 , name : "eliot" } );
-db.foo.save( { num : 2 , name : "sara" } );
-db.foo.save( { num : -1 , name : "joe" } );
+assert.eq(3,
+ s.getPrimaryShard("test").getDB("test").foo.find().length(),
+ "not right directly to db A");
+assert.eq(3, db.foo.find().length(), "not right on shard");
-assert.eq( 3 , s.getPrimaryShard( "test" ).getDB( "test" ).foo.find().length(),
- "not right directly to db A" );
-assert.eq( 3 , db.foo.find().length() , "not right on shard" );
+primary = s.getPrimaryShard("test").getDB("test");
+secondary = s.getOther(primary).getDB("test");
-primary = s.getPrimaryShard( "test" ).getDB( "test" );
-secondary = s.getOther( primary ).getDB( "test" );
+assert.eq(3, primary.foo.find().length(), "primary wrong B");
+assert.eq(0, secondary.foo.find().length(), "secondary wrong C");
+assert.eq(3, db.foo.find().sort({num: 1}).length());
-assert.eq( 3 , primary.foo.find().length() , "primary wrong B" );
-assert.eq( 0 , secondary.foo.find().length() , "secondary wrong C" );
-assert.eq( 3 , db.foo.find().sort( { num : 1 } ).length() );
-
-placeCheck( 2 );
+placeCheck(2);
// NOTE: at this point we have 2 shard on 1 server
// test move shard
-assert.throws( function(){ s.adminCommand( { movechunk : "test.foo" , find : { num : 1 } , to : primary.getMongo().name, _waitForDelete : true } ); } );
-assert.throws( function(){ s.adminCommand( { movechunk : "test.foo" , find : { num : 1 } , to : "adasd", _waitForDelete : true } ); } );
-
-s.adminCommand( { movechunk : "test.foo" , find : { num : 1 } , to : secondary.getMongo().name, _waitForDelete : true } );
-assert.eq( 2 , secondary.foo.find().length() , "secondary should have 2 after move shard" );
-assert.eq( 1 , primary.foo.find().length() , "primary should only have 1 after move shard" );
-
-assert.eq( 2 , s.config.chunks.count() , "still should have 2 shards after move not:" + s.getChunksString() );
+assert.throws(function() {
+ s.adminCommand({
+ movechunk: "test.foo",
+ find: {num: 1},
+ to: primary.getMongo().name,
+ _waitForDelete: true
+ });
+});
+assert.throws(function() {
+ s.adminCommand({movechunk: "test.foo", find: {num: 1}, to: "adasd", _waitForDelete: true});
+});
+
+s.adminCommand(
+ {movechunk: "test.foo", find: {num: 1}, to: secondary.getMongo().name, _waitForDelete: true});
+assert.eq(2, secondary.foo.find().length(), "secondary should have 2 after move shard");
+assert.eq(1, primary.foo.find().length(), "primary should only have 1 after move shard");
+
+assert.eq(2,
+ s.config.chunks.count(),
+ "still should have 2 shards after move not:" + s.getChunksString());
chunks = s.config.chunks.find().toArray();
-assert.neq( chunks[0].shard , chunks[1].shard , "servers should NOT be the same after the move" );
+assert.neq(chunks[0].shard, chunks[1].shard, "servers should NOT be the same after the move");
-placeCheck( 3 );
+placeCheck(3);
// test inserts go to right server/shard
-assert.writeOK(db.foo.save( { num : 3 , name : "bob" } ));
-assert.eq( 1 , primary.foo.find().length() , "after move insert go wrong place?" );
-assert.eq( 3 , secondary.foo.find().length() , "after move insert go wrong place?" );
+assert.writeOK(db.foo.save({num: 3, name: "bob"}));
+assert.eq(1, primary.foo.find().length(), "after move insert go wrong place?");
+assert.eq(3, secondary.foo.find().length(), "after move insert go wrong place?");
-assert.writeOK(db.foo.save( { num : -2 , name : "funny man" } ));
-assert.eq( 2 , primary.foo.find().length() , "after move insert go wrong place?" );
-assert.eq( 3 , secondary.foo.find().length() , "after move insert go wrong place?" );
+assert.writeOK(db.foo.save({num: -2, name: "funny man"}));
+assert.eq(2, primary.foo.find().length(), "after move insert go wrong place?");
+assert.eq(3, secondary.foo.find().length(), "after move insert go wrong place?");
+assert.writeOK(db.foo.save({num: 0, name: "funny guy"}));
+assert.eq(2, primary.foo.find().length(), "boundary A");
+assert.eq(4, secondary.foo.find().length(), "boundary B");
-assert.writeOK(db.foo.save( { num : 0 , name : "funny guy" } ));
-assert.eq( 2 , primary.foo.find().length() , "boundary A" );
-assert.eq( 4 , secondary.foo.find().length() , "boundary B" );
-
-placeCheck( 4 );
+placeCheck(4);
// findOne
-assert.eq( "eliot" , db.foo.findOne( { num : 1 } ).name );
-assert.eq( "funny man" , db.foo.findOne( { num : -2 } ).name );
+assert.eq("eliot", db.foo.findOne({num: 1}).name);
+assert.eq("funny man", db.foo.findOne({num: -2}).name);
// getAll
-function sumQuery( c ){
+function sumQuery(c) {
var sum = 0;
- c.toArray().forEach(
- function(z){
- sum += z.num;
- }
- );
+ c.toArray().forEach(function(z) {
+ sum += z.num;
+ });
return sum;
}
-assert.eq( 6 , db.foo.find().length() , "sharded query 1" );
-assert.eq( 3 , sumQuery( db.foo.find() ) , "sharded query 2" );
+assert.eq(6, db.foo.find().length(), "sharded query 1");
+assert.eq(3, sumQuery(db.foo.find()), "sharded query 2");
-placeCheck( 5 );
+placeCheck(5);
// sort by num
-assert.eq( 3 , sumQuery( db.foo.find().sort( { num : 1 } ) ) , "sharding query w/sort 1" );
-assert.eq( 3 , sumQuery( db.foo.find().sort( { num : -1 } ) ) , "sharding query w/sort 2" );
+assert.eq(3, sumQuery(db.foo.find().sort({num: 1})), "sharding query w/sort 1");
+assert.eq(3, sumQuery(db.foo.find().sort({num: -1})), "sharding query w/sort 2");
-assert.eq( "funny man" , db.foo.find().sort( { num : 1 } )[0].name , "sharding query w/sort 3 order wrong" );
-assert.eq( -2 , db.foo.find().sort( { num : 1 } )[0].num , "sharding query w/sort 4 order wrong" );
+assert.eq("funny man", db.foo.find().sort({num: 1})[0].name, "sharding query w/sort 3 order wrong");
+assert.eq(-2, db.foo.find().sort({num: 1})[0].num, "sharding query w/sort 4 order wrong");
-assert.eq( "bob" , db.foo.find().sort( { num : -1 } )[0].name , "sharding query w/sort 5 order wrong" );
-assert.eq( 3 , db.foo.find().sort( { num : -1 } )[0].num , "sharding query w/sort 6 order wrong" );
+assert.eq("bob", db.foo.find().sort({num: -1})[0].name, "sharding query w/sort 5 order wrong");
+assert.eq(3, db.foo.find().sort({num: -1})[0].num, "sharding query w/sort 6 order wrong");
-placeCheck( 6 );
+placeCheck(6);
// sory by name
-function getNames( c ){
- return c.toArray().map( function(z){ return z.name; } );
+function getNames(c) {
+ return c.toArray().map(function(z) {
+ return z.name;
+ });
}
-correct = getNames( db.foo.find() ).sort();
-assert.eq( correct , getNames( db.foo.find().sort( { name : 1 } ) ) );
+correct = getNames(db.foo.find()).sort();
+assert.eq(correct, getNames(db.foo.find().sort({name: 1})));
correct = correct.reverse();
-assert.eq( correct , getNames( db.foo.find().sort( { name : -1 } ) ) );
-
-assert.eq( 3 , sumQuery( db.foo.find().sort( { name : 1 } ) ) , "sharding query w/non-shard sort 1" );
-assert.eq( 3 , sumQuery( db.foo.find().sort( { name : -1 } ) ) , "sharding query w/non-shard sort 2" );
+assert.eq(correct, getNames(db.foo.find().sort({name: -1})));
+assert.eq(3, sumQuery(db.foo.find().sort({name: 1})), "sharding query w/non-shard sort 1");
+assert.eq(3, sumQuery(db.foo.find().sort({name: -1})), "sharding query w/non-shard sort 2");
// sort by num multiple shards per server
-s.adminCommand( { split : "test.foo" , middle : { num : 2 } } );
-assert.eq( "funny man" , db.foo.find().sort( { num : 1 } )[0].name , "sharding query w/sort and another split 1 order wrong" );
-assert.eq( "bob" , db.foo.find().sort( { num : -1 } )[0].name , "sharding query w/sort and another split 2 order wrong" );
-assert.eq( "funny man" , db.foo.find( { num : { $lt : 100 } } ).sort( { num : 1 } ).arrayAccess(0).name , "sharding query w/sort and another split 3 order wrong" );
-
-placeCheck( 7 );
-
-db.foo.find().sort( { _id : 1 } ).forEach( function(z){ print( z._id ); } );
+s.adminCommand({split: "test.foo", middle: {num: 2}});
+assert.eq("funny man",
+ db.foo.find().sort({num: 1})[0].name,
+ "sharding query w/sort and another split 1 order wrong");
+assert.eq("bob",
+ db.foo.find().sort({num: -1})[0].name,
+ "sharding query w/sort and another split 2 order wrong");
+assert.eq("funny man",
+ db.foo.find({num: {$lt: 100}}).sort({num: 1}).arrayAccess(0).name,
+ "sharding query w/sort and another split 3 order wrong");
+
+placeCheck(7);
+
+db.foo.find().sort({_id: 1}).forEach(function(z) {
+ print(z._id);
+});
zzz = db.foo.find().explain("executionStats").executionStats;
-assert.eq( 0 , zzz.totalKeysExamined , "EX1a" );
-assert.eq( 6 , zzz.nReturned , "EX1b" );
-assert.eq( 6 , zzz.totalDocsExamined , "EX1c" );
+assert.eq(0, zzz.totalKeysExamined, "EX1a");
+assert.eq(6, zzz.nReturned, "EX1b");
+assert.eq(6, zzz.totalDocsExamined, "EX1c");
-zzz = db.foo.find().hint( { _id : 1 } ).sort( { _id : 1 } )
- .explain("executionStats").executionStats;
-assert.eq( 6 , zzz.totalKeysExamined , "EX2a" );
-assert.eq( 6 , zzz.nReturned , "EX2b" );
-assert.eq( 6 , zzz.totalDocsExamined , "EX2c" );
+zzz = db.foo.find().hint({_id: 1}).sort({_id: 1}).explain("executionStats").executionStats;
+assert.eq(6, zzz.totalKeysExamined, "EX2a");
+assert.eq(6, zzz.nReturned, "EX2b");
+assert.eq(6, zzz.totalDocsExamined, "EX2c");
// getMore
-assert.eq( 4 , db.foo.find().limit(-4).toArray().length , "getMore 1" );
-function countCursor( c ){
+assert.eq(4, db.foo.find().limit(-4).toArray().length, "getMore 1");
+function countCursor(c) {
var num = 0;
- while ( c.hasNext() ){
+ while (c.hasNext()) {
c.next();
num++;
}
return num;
}
-assert.eq( 6 , countCursor( db.foo.find()._exec() ) , "getMore 2" );
-assert.eq( 6 , countCursor( db.foo.find().batchSize(1)._exec() ) , "getMore 3" );
+assert.eq(6, countCursor(db.foo.find()._exec()), "getMore 2");
+assert.eq(6, countCursor(db.foo.find().batchSize(1)._exec()), "getMore 3");
// find by non-shard-key
-db.foo.find().forEach(
- function(z){
- var y = db.foo.findOne( { _id : z._id } );
- assert( y , "_id check 1 : " + tojson( z ) );
- assert.eq( z.num , y.num , "_id check 2 : " + tojson( z ) );
- }
-);
+db.foo.find().forEach(function(z) {
+ var y = db.foo.findOne({_id: z._id});
+ assert(y, "_id check 1 : " + tojson(z));
+ assert.eq(z.num, y.num, "_id check 2 : " + tojson(z));
+});
// update
-person = db.foo.findOne( { num : 3 } );
-assert.eq( "bob" , person.name , "update setup 1" );
+person = db.foo.findOne({num: 3});
+assert.eq("bob", person.name, "update setup 1");
person.name = "bob is gone";
-db.foo.update( { num : 3 } , person );
-person = db.foo.findOne( { num : 3 } );
-assert.eq( "bob is gone" , person.name , "update test B" );
+db.foo.update({num: 3}, person);
+person = db.foo.findOne({num: 3});
+assert.eq("bob is gone", person.name, "update test B");
// remove
-assert( db.foo.findOne( { num : 3 } ) != null , "remove test A" );
-db.foo.remove( { num : 3 } );
-assert.isnull( db.foo.findOne( { num : 3 } ) , "remove test B" );
+assert(db.foo.findOne({num: 3}) != null, "remove test A");
+db.foo.remove({num: 3});
+assert.isnull(db.foo.findOne({num: 3}), "remove test B");
-db.foo.save( { num : 3 , name : "eliot2" } );
-person = db.foo.findOne( { num : 3 } );
-assert( person , "remove test C" );
-assert.eq( person.name , "eliot2" );
+db.foo.save({num: 3, name: "eliot2"});
+person = db.foo.findOne({num: 3});
+assert(person, "remove test C");
+assert.eq(person.name, "eliot2");
-db.foo.remove( { _id : person._id } );
-assert.isnull( db.foo.findOne( { num : 3 } ) , "remove test E" );
+db.foo.remove({_id: person._id});
+assert.isnull(db.foo.findOne({num: 3}), "remove test E");
-placeCheck( 8 );
+placeCheck(8);
// more update stuff
printAll();
total = db.foo.find().count();
-var res = assert.writeOK(db.foo.update( {}, { $inc: { x: 1 } }, false, true ));
+var res = assert.writeOK(db.foo.update({}, {$inc: {x: 1}}, false, true));
printAll();
-assert.eq( total , res.nModified, res.toString() );
-
+assert.eq(total, res.nModified, res.toString());
-res = db.foo.update( { num : -1 } , { $inc : { x : 1 } } , false , true );
-assert.eq( 1, res.nModified, res.toString() );
+res = db.foo.update({num: -1}, {$inc: {x: 1}}, false, true);
+assert.eq(1, res.nModified, res.toString());
// ---- move all to the secondary
-assert.eq( 2 , s.onNumShards( "foo" ) , "on 2 shards" );
+assert.eq(2, s.onNumShards("foo"), "on 2 shards");
-secondary.foo.insert( { num : -3 } );
+secondary.foo.insert({num: -3});
-s.adminCommand( { movechunk : "test.foo" , find : { num : -2 } , to : secondary.getMongo().name, _waitForDelete : true } );
-assert.eq( 1 , s.onNumShards( "foo" ) , "on 1 shards" );
+s.adminCommand(
+ {movechunk: "test.foo", find: {num: -2}, to: secondary.getMongo().name, _waitForDelete: true});
+assert.eq(1, s.onNumShards("foo"), "on 1 shards");
-s.adminCommand( { movechunk : "test.foo" , find : { num : -2 } , to : primary.getMongo().name, _waitForDelete : true } );
-assert.eq( 2 , s.onNumShards( "foo" ) , "on 2 shards again" );
-assert.eq( 3 , s.config.chunks.count() , "only 3 chunks" );
+s.adminCommand(
+ {movechunk: "test.foo", find: {num: -2}, to: primary.getMongo().name, _waitForDelete: true});
+assert.eq(2, s.onNumShards("foo"), "on 2 shards again");
+assert.eq(3, s.config.chunks.count(), "only 3 chunks");
-print( "YO : " + tojson( db.runCommand( "serverStatus" ) ) );
+print("YO : " + tojson(db.runCommand("serverStatus")));
s.stop();
diff --git a/jstests/sharding/shard3.js b/jstests/sharding/shard3.js
index 3b68d330eca..926b350c7e9 100644
--- a/jstests/sharding/shard3.js
+++ b/jstests/sharding/shard3.js
@@ -1,186 +1,194 @@
(function() {
-// Include helpers for analyzing explain output.
-load("jstests/libs/analyze_plan.js");
-
-var s = new ShardingTest({name: "shard3", shards: 2, mongos: 2, other: { enableBalancer: true }});
-
-s2 = s._mongos[1];
-
-db = s.getDB( "test" );
-s.adminCommand( { enablesharding : "test" } );
-s.ensurePrimaryShard('test', 'shard0001');
-s.adminCommand( { shardcollection : "test.foo" , key : { num : 1 } } );
-if (s.configRS) {
- // Ensure that the second mongos will see the movePrimary
- s.configRS.awaitLastOpCommitted();
-}
-
-assert( sh.getBalancerState() , "A1" );
-sh.setBalancerState(false);
-assert( ! sh.getBalancerState() , "A2" );
-sh.setBalancerState(true);
-assert( sh.getBalancerState() , "A3" );
-sh.setBalancerState(false);
-assert( ! sh.getBalancerState() , "A4" );
-
-s.config.databases.find().forEach( printjson );
-
-a = s.getDB( "test" ).foo;
-b = s2.getDB( "test" ).foo;
-
-primary = s.getPrimaryShard( "test" ).getDB( "test" ).foo;
-secondary = s.getOther( primary.name ).getDB( "test" ).foo;
-
-a.save( { num : 1 } );
-a.save( { num : 2 } );
-a.save( { num : 3 } );
-
-assert.eq( 3 , a.find().toArray().length , "normal A" );
-assert.eq( 3 , b.find().toArray().length , "other A" );
-
-assert.eq( 3 , primary.count() , "p1" );
-assert.eq( 0 , secondary.count() , "s1" );
-
-assert.eq( 1 , s.onNumShards( "foo" ) , "on 1 shards" );
-
-s.adminCommand( { split : "test.foo" , middle : { num : 2 } } );
-s.adminCommand( { movechunk : "test.foo",
- find : { num : 3 },
- to : s.getOther( s.getPrimaryShard( "test" ) ).name,
- _waitForDelete : true } );
-
-assert( primary.find().toArray().length > 0 , "blah 1" );
-assert( secondary.find().toArray().length > 0 , "blah 2" );
-assert.eq( 3 , primary.find().itcount() + secondary.find().itcount() , "blah 3" );
-
-assert.eq( 3 , a.find().toArray().length , "normal B" );
-assert.eq( 3 , b.find().toArray().length , "other B" );
-
-printjson( primary._db._adminCommand( "shardingState" ) );
-
-// --- filtering ---
-
-function doCounts( name , total , onlyItCounts ){
- total = total || ( primary.count() + secondary.count() );
- if ( ! onlyItCounts )
- assert.eq( total , a.count() , name + " count" );
- assert.eq( total , a.find().sort( { n : 1 } ).itcount() , name + " itcount - sort n" );
- assert.eq( total , a.find().itcount() , name + " itcount" );
- assert.eq( total , a.find().sort( { _id : 1 } ).itcount() , name + " itcount - sort _id" );
- return total;
-}
-
-var total = doCounts( "before wrong save" );
-assert.writeOK(secondary.insert( { _id : 111 , num : -3 } ));
-doCounts( "after wrong save" , total , true );
-e = a.find().explain("executionStats").executionStats;
-assert.eq( 3 , e.nReturned , "ex1" );
-assert.eq( 0 , e.totalKeysExamined , "ex2" );
-assert.eq( 4 , e.totalDocsExamined , "ex3" );
-
-var chunkSkips = 0;
-for (var shard in e.executionStages.shards) {
- var theShard = e.executionStages.shards[shard];
- chunkSkips += getChunkSkips(theShard.executionStages);
-}
-assert.eq( 1 , chunkSkips , "ex4" );
-
-// SERVER-4612
-// make sure idhack obeys chunks
-x = a.findOne( { _id : 111 } );
-assert( ! x , "idhack didn't obey chunk boundaries " + tojson(x) );
-
-// --- move all to 1 ---
-print( "MOVE ALL TO 1" );
-
-assert.eq( 2 , s.onNumShards( "foo" ) , "on 2 shards" );
-s.printCollectionInfo( "test.foo" );
-
-assert( a.findOne( { num : 1 } ) );
-assert( b.findOne( { num : 1 } ) );
-
-print( "GOING TO MOVE" );
-assert( a.findOne( { num : 1 } ) , "pre move 1" );
-s.printCollectionInfo( "test.foo" );
-myto = s.getOther( s.getPrimaryShard( "test" ) ).name;
-print( "counts before move: " + tojson( s.shardCounts( "foo" ) ) );
-s.adminCommand( { movechunk : "test.foo" , find : { num : 1 } , to : myto, _waitForDelete : true } );
-print( "counts after move: " + tojson( s.shardCounts( "foo" ) ) );
-s.printCollectionInfo( "test.foo" );
-assert.eq( 1 , s.onNumShards( "foo" ) , "on 1 shard again" );
-assert( a.findOne( { num : 1 } ) , "post move 1" );
-assert( b.findOne( { num : 1 } ) , "post move 2" );
-
-print( "*** drop" );
-
-s.printCollectionInfo( "test.foo" , "before drop" );
-a.drop();
-s.printCollectionInfo( "test.foo" , "after drop" );
-
-assert.eq( 0 , a.count() , "a count after drop" );
-assert.eq( 0 , b.count() , "b count after drop" );
-
-s.printCollectionInfo( "test.foo" , "after counts" );
-
-assert.eq( 0 , primary.count() , "p count after drop" );
-assert.eq( 0 , secondary.count() , "s count after drop" );
-
-print( "*** dropDatabase setup" );
-
-s.printShardingStatus();
-s.adminCommand( { shardcollection : "test.foo" , key : { num : 1 } } );
-a.save( { num : 2 } );
-a.save( { num : 3 } );
-s.adminCommand( { split : "test.foo" , middle : { num : 2 } } );
-s.adminCommand( { movechunk : "test.foo" ,
- find : { num : 3 } ,
- to : s.getOther( s.getPrimaryShard( "test" ) ).name,
- _waitForDelete : true } );
-s.printShardingStatus();
-
-s.printCollectionInfo( "test.foo" , "after dropDatabase setup" );
-doCounts( "after dropDatabase setup2" );
-s.printCollectionInfo( "test.foo" , "after dropDatabase setup3" );
-
-print( "*** ready to call dropDatabase" );
-res = s.getDB( "test" ).dropDatabase();
-assert.eq( 1 , res.ok , "dropDatabase failed : " + tojson( res ) );
-// Waiting for SERVER-2253
-assert.eq( 0 , s.config.databases.count( { _id: "test" } ) , "database 'test' was dropped but still appears in configDB" );
-
-s.printShardingStatus();
-s.printCollectionInfo( "test.foo" , "after dropDatabase call 1" );
-assert.eq( 0 , doCounts( "after dropDatabase called" ) );
-
-// ---- retry commands SERVER-1471 ----
-
-s.adminCommand( { enablesharding : "test2" } );
-s.ensurePrimaryShard('test2', 'shard0000');
-s.adminCommand( { shardcollection : "test2.foo" , key : { num : 1 } } );
-dba = s.getDB( "test2" );
-dbb = s2.getDB( "test2" );
-dba.foo.save( { num : 1 } );
-dba.foo.save( { num : 2 } );
-dba.foo.save( { num : 3 } );
-
-assert.eq( 1 , s.onNumShards( "foo" , "test2" ) , "B on 1 shards" );
-assert.eq( 3 , dba.foo.count() , "Ba" );
-assert.eq( 3 , dbb.foo.count() , "Bb" );
-
-s.adminCommand( { split : "test2.foo" , middle : { num : 2 } } );
-s.adminCommand( { movechunk : "test2.foo",
- find : { num : 3 } ,
- to : s.getOther( s.getPrimaryShard( "test2" ) ).name,
- _waitForDelete : true } );
-
-assert.eq( 2 , s.onNumShards( "foo" , "test2" ) , "B on 2 shards" );
-
-x = dba.foo.stats();
-printjson( x );
-y = dbb.foo.stats();
-printjson( y );
-
-s.stop();
+ // Include helpers for analyzing explain output.
+ load("jstests/libs/analyze_plan.js");
+
+ var s = new ShardingTest({name: "shard3", shards: 2, mongos: 2, other: {enableBalancer: true}});
+
+ s2 = s._mongos[1];
+
+ db = s.getDB("test");
+ s.adminCommand({enablesharding: "test"});
+ s.ensurePrimaryShard('test', 'shard0001');
+ s.adminCommand({shardcollection: "test.foo", key: {num: 1}});
+ if (s.configRS) {
+ // Ensure that the second mongos will see the movePrimary
+ s.configRS.awaitLastOpCommitted();
+ }
+
+ assert(sh.getBalancerState(), "A1");
+ sh.setBalancerState(false);
+ assert(!sh.getBalancerState(), "A2");
+ sh.setBalancerState(true);
+ assert(sh.getBalancerState(), "A3");
+ sh.setBalancerState(false);
+ assert(!sh.getBalancerState(), "A4");
+
+ s.config.databases.find().forEach(printjson);
+
+ a = s.getDB("test").foo;
+ b = s2.getDB("test").foo;
+
+ primary = s.getPrimaryShard("test").getDB("test").foo;
+ secondary = s.getOther(primary.name).getDB("test").foo;
+
+ a.save({num: 1});
+ a.save({num: 2});
+ a.save({num: 3});
+
+ assert.eq(3, a.find().toArray().length, "normal A");
+ assert.eq(3, b.find().toArray().length, "other A");
+
+ assert.eq(3, primary.count(), "p1");
+ assert.eq(0, secondary.count(), "s1");
+
+ assert.eq(1, s.onNumShards("foo"), "on 1 shards");
+
+ s.adminCommand({split: "test.foo", middle: {num: 2}});
+ s.adminCommand({
+ movechunk: "test.foo",
+ find: {num: 3},
+ to: s.getOther(s.getPrimaryShard("test")).name,
+ _waitForDelete: true
+ });
+
+ assert(primary.find().toArray().length > 0, "blah 1");
+ assert(secondary.find().toArray().length > 0, "blah 2");
+ assert.eq(3, primary.find().itcount() + secondary.find().itcount(), "blah 3");
+
+ assert.eq(3, a.find().toArray().length, "normal B");
+ assert.eq(3, b.find().toArray().length, "other B");
+
+ printjson(primary._db._adminCommand("shardingState"));
+
+ // --- filtering ---
+
+ function doCounts(name, total, onlyItCounts) {
+ total = total || (primary.count() + secondary.count());
+ if (!onlyItCounts)
+ assert.eq(total, a.count(), name + " count");
+ assert.eq(total, a.find().sort({n: 1}).itcount(), name + " itcount - sort n");
+ assert.eq(total, a.find().itcount(), name + " itcount");
+ assert.eq(total, a.find().sort({_id: 1}).itcount(), name + " itcount - sort _id");
+ return total;
+ }
+
+ var total = doCounts("before wrong save");
+ assert.writeOK(secondary.insert({_id: 111, num: -3}));
+ doCounts("after wrong save", total, true);
+ e = a.find().explain("executionStats").executionStats;
+ assert.eq(3, e.nReturned, "ex1");
+ assert.eq(0, e.totalKeysExamined, "ex2");
+ assert.eq(4, e.totalDocsExamined, "ex3");
+
+ var chunkSkips = 0;
+ for (var shard in e.executionStages.shards) {
+ var theShard = e.executionStages.shards[shard];
+ chunkSkips += getChunkSkips(theShard.executionStages);
+ }
+ assert.eq(1, chunkSkips, "ex4");
+
+ // SERVER-4612
+ // make sure idhack obeys chunks
+ x = a.findOne({_id: 111});
+ assert(!x, "idhack didn't obey chunk boundaries " + tojson(x));
+
+ // --- move all to 1 ---
+ print("MOVE ALL TO 1");
+
+ assert.eq(2, s.onNumShards("foo"), "on 2 shards");
+ s.printCollectionInfo("test.foo");
+
+ assert(a.findOne({num: 1}));
+ assert(b.findOne({num: 1}));
+
+ print("GOING TO MOVE");
+ assert(a.findOne({num: 1}), "pre move 1");
+ s.printCollectionInfo("test.foo");
+ myto = s.getOther(s.getPrimaryShard("test")).name;
+ print("counts before move: " + tojson(s.shardCounts("foo")));
+ s.adminCommand({movechunk: "test.foo", find: {num: 1}, to: myto, _waitForDelete: true});
+ print("counts after move: " + tojson(s.shardCounts("foo")));
+ s.printCollectionInfo("test.foo");
+ assert.eq(1, s.onNumShards("foo"), "on 1 shard again");
+ assert(a.findOne({num: 1}), "post move 1");
+ assert(b.findOne({num: 1}), "post move 2");
+
+ print("*** drop");
+
+ s.printCollectionInfo("test.foo", "before drop");
+ a.drop();
+ s.printCollectionInfo("test.foo", "after drop");
+
+ assert.eq(0, a.count(), "a count after drop");
+ assert.eq(0, b.count(), "b count after drop");
+
+ s.printCollectionInfo("test.foo", "after counts");
+
+ assert.eq(0, primary.count(), "p count after drop");
+ assert.eq(0, secondary.count(), "s count after drop");
+
+ print("*** dropDatabase setup");
+
+ s.printShardingStatus();
+ s.adminCommand({shardcollection: "test.foo", key: {num: 1}});
+ a.save({num: 2});
+ a.save({num: 3});
+ s.adminCommand({split: "test.foo", middle: {num: 2}});
+ s.adminCommand({
+ movechunk: "test.foo",
+ find: {num: 3},
+ to: s.getOther(s.getPrimaryShard("test")).name,
+ _waitForDelete: true
+ });
+ s.printShardingStatus();
+
+ s.printCollectionInfo("test.foo", "after dropDatabase setup");
+ doCounts("after dropDatabase setup2");
+ s.printCollectionInfo("test.foo", "after dropDatabase setup3");
+
+ print("*** ready to call dropDatabase");
+ res = s.getDB("test").dropDatabase();
+ assert.eq(1, res.ok, "dropDatabase failed : " + tojson(res));
+ // Waiting for SERVER-2253
+ assert.eq(0,
+ s.config.databases.count({_id: "test"}),
+ "database 'test' was dropped but still appears in configDB");
+
+ s.printShardingStatus();
+ s.printCollectionInfo("test.foo", "after dropDatabase call 1");
+ assert.eq(0, doCounts("after dropDatabase called"));
+
+ // ---- retry commands SERVER-1471 ----
+
+ s.adminCommand({enablesharding: "test2"});
+ s.ensurePrimaryShard('test2', 'shard0000');
+ s.adminCommand({shardcollection: "test2.foo", key: {num: 1}});
+ dba = s.getDB("test2");
+ dbb = s2.getDB("test2");
+ dba.foo.save({num: 1});
+ dba.foo.save({num: 2});
+ dba.foo.save({num: 3});
+
+ assert.eq(1, s.onNumShards("foo", "test2"), "B on 1 shards");
+ assert.eq(3, dba.foo.count(), "Ba");
+ assert.eq(3, dbb.foo.count(), "Bb");
+
+ s.adminCommand({split: "test2.foo", middle: {num: 2}});
+ s.adminCommand({
+ movechunk: "test2.foo",
+ find: {num: 3},
+ to: s.getOther(s.getPrimaryShard("test2")).name,
+ _waitForDelete: true
+ });
+
+ assert.eq(2, s.onNumShards("foo", "test2"), "B on 2 shards");
+
+ x = dba.foo.stats();
+ printjson(x);
+ y = dbb.foo.stats();
+ printjson(y);
+
+ s.stop();
})();
diff --git a/jstests/sharding/shard4.js b/jstests/sharding/shard4.js
index bf91b816607..76b9394cb19 100644
--- a/jstests/sharding/shard4.js
+++ b/jstests/sharding/shard4.js
@@ -4,53 +4,57 @@ s = new ShardingTest({name: "shard4", shards: 2, mongos: 2});
s2 = s._mongos[1];
-s.adminCommand( { enablesharding : "test" } );
+s.adminCommand({enablesharding: "test"});
s.ensurePrimaryShard('test', 'shard0001');
-s.adminCommand( { shardcollection : "test.foo" , key : { num : 1 } } );
+s.adminCommand({shardcollection: "test.foo", key: {num: 1}});
if (s.configRS) {
// Ensure that the second mongos will see the movePrimary
s.configRS.awaitLastOpCommitted();
}
-s.getDB( "test" ).foo.save( { num : 1 } );
-s.getDB( "test" ).foo.save( { num : 2 } );
-s.getDB( "test" ).foo.save( { num : 3 } );
-s.getDB( "test" ).foo.save( { num : 4 } );
-s.getDB( "test" ).foo.save( { num : 5 } );
-s.getDB( "test" ).foo.save( { num : 6 } );
-s.getDB( "test" ).foo.save( { num : 7 } );
-
-assert.eq( 7 , s.getDB( "test" ).foo.find().toArray().length , "normal A" );
-assert.eq( 7 , s2.getDB( "test" ).foo.find().toArray().length , "other A" );
-
-s.adminCommand( { split : "test.foo" , middle : { num : 4 } } );
-s.adminCommand( { movechunk : "test.foo",
- find : { num : 3 },
- to : s.getOther( s.getPrimaryShard( "test" ) ).name,
- _waitForDelete : true } );
-
-assert( s._connections[0].getDB( "test" ).foo.find().toArray().length > 0 , "blah 1" );
-assert( s._connections[1].getDB( "test" ).foo.find().toArray().length > 0 , "blah 2" );
-assert.eq( 7 , s._connections[0].getDB( "test" ).foo.find().toArray().length +
- s._connections[1].getDB( "test" ).foo.find().toArray().length , "blah 3" );
-
-assert.eq( 7 , s.getDB( "test" ).foo.find().toArray().length , "normal B" );
-assert.eq( 7 , s2.getDB( "test" ).foo.find().toArray().length , "other B" );
-
-s.adminCommand( { split : "test.foo" , middle : { num : 2 } } );
+s.getDB("test").foo.save({num: 1});
+s.getDB("test").foo.save({num: 2});
+s.getDB("test").foo.save({num: 3});
+s.getDB("test").foo.save({num: 4});
+s.getDB("test").foo.save({num: 5});
+s.getDB("test").foo.save({num: 6});
+s.getDB("test").foo.save({num: 7});
+
+assert.eq(7, s.getDB("test").foo.find().toArray().length, "normal A");
+assert.eq(7, s2.getDB("test").foo.find().toArray().length, "other A");
+
+s.adminCommand({split: "test.foo", middle: {num: 4}});
+s.adminCommand({
+ movechunk: "test.foo",
+ find: {num: 3},
+ to: s.getOther(s.getPrimaryShard("test")).name,
+ _waitForDelete: true
+});
+
+assert(s._connections[0].getDB("test").foo.find().toArray().length > 0, "blah 1");
+assert(s._connections[1].getDB("test").foo.find().toArray().length > 0, "blah 2");
+assert.eq(7,
+ s._connections[0].getDB("test").foo.find().toArray().length +
+ s._connections[1].getDB("test").foo.find().toArray().length,
+ "blah 3");
+
+assert.eq(7, s.getDB("test").foo.find().toArray().length, "normal B");
+assert.eq(7, s2.getDB("test").foo.find().toArray().length, "other B");
+
+s.adminCommand({split: "test.foo", middle: {num: 2}});
s.printChunks();
-print( "* A" );
+print("* A");
-assert.eq( 7 , s.getDB( "test" ).foo.find().toArray().length , "normal B 1" );
-assert.eq( 7 , s2.getDB( "test" ).foo.find().toArray().length , "other B 2" );
-print( "* B" );
-assert.eq( 7 , s.getDB( "test" ).foo.find().toArray().length , "normal B 3" );
-assert.eq( 7 , s2.getDB( "test" ).foo.find().toArray().length , "other B 4" );
+assert.eq(7, s.getDB("test").foo.find().toArray().length, "normal B 1");
+assert.eq(7, s2.getDB("test").foo.find().toArray().length, "other B 2");
+print("* B");
+assert.eq(7, s.getDB("test").foo.find().toArray().length, "normal B 3");
+assert.eq(7, s2.getDB("test").foo.find().toArray().length, "other B 4");
-for ( var i=0; i<10; i++ ){
- print( "* C " + i );
- assert.eq( 7 , s2.getDB( "test" ).foo.find().toArray().length , "other B " + i );
+for (var i = 0; i < 10; i++) {
+ print("* C " + i);
+ assert.eq(7, s2.getDB("test").foo.find().toArray().length, "other B " + i);
}
s.stop();
diff --git a/jstests/sharding/shard5.js b/jstests/sharding/shard5.js
index c88cd355d73..c4f05d610cd 100644
--- a/jstests/sharding/shard5.js
+++ b/jstests/sharding/shard5.js
@@ -2,59 +2,61 @@
// tests write passthrough
-s = new ShardingTest({name: "shard5", shards: 2, mongos:2});
+s = new ShardingTest({name: "shard5", shards: 2, mongos: 2});
s.stopBalancer();
s2 = s._mongos[1];
-s.adminCommand( { enablesharding : "test" } );
+s.adminCommand({enablesharding: "test"});
s.ensurePrimaryShard('test', 'shard0001');
-s.adminCommand( { shardcollection : "test.foo" , key : { num : 1 } } );
+s.adminCommand({shardcollection: "test.foo", key: {num: 1}});
if (s.configRS) {
// Ensure that the second mongos will see the movePrimary
s.configRS.awaitLastOpCommitted();
}
-s.getDB( "test" ).foo.save( { num : 1 } );
-s.getDB( "test" ).foo.save( { num : 2 } );
-s.getDB( "test" ).foo.save( { num : 3 } );
-s.getDB( "test" ).foo.save( { num : 4 } );
-s.getDB( "test" ).foo.save( { num : 5 } );
-s.getDB( "test" ).foo.save( { num : 6 } );
-s.getDB( "test" ).foo.save( { num : 7 } );
-
-assert.eq( 7 , s.getDB( "test" ).foo.find().toArray().length , "normal A" );
-assert.eq( 7 , s2.getDB( "test" ).foo.find().toArray().length , "other A" );
-
-s.adminCommand( { split : "test.foo" , middle : { num : 4 } } );
-s.adminCommand( { movechunk : "test.foo",
- find : { num : 3 },
- to : s.getOther( s.getPrimaryShard( "test" ) ).name,
- _waitForDelete : true } );
-
-assert( s._connections[0].getDB( "test" ).foo.find().toArray().length > 0 , "blah 1" );
-assert( s._connections[1].getDB( "test" ).foo.find().toArray().length > 0 , "blah 2" );
-assert.eq( 7 , s._connections[0].getDB( "test" ).foo.find().toArray().length +
- s._connections[1].getDB( "test" ).foo.find().toArray().length , "blah 3" );
-
-assert.eq( 7 , s.getDB( "test" ).foo.find().toArray().length , "normal B" );
-assert.eq( 7 , s2.getDB( "test" ).foo.find().toArray().length , "other B" );
-
-s.adminCommand( { split : "test.foo" , middle : { num : 2 } } );
+s.getDB("test").foo.save({num: 1});
+s.getDB("test").foo.save({num: 2});
+s.getDB("test").foo.save({num: 3});
+s.getDB("test").foo.save({num: 4});
+s.getDB("test").foo.save({num: 5});
+s.getDB("test").foo.save({num: 6});
+s.getDB("test").foo.save({num: 7});
+
+assert.eq(7, s.getDB("test").foo.find().toArray().length, "normal A");
+assert.eq(7, s2.getDB("test").foo.find().toArray().length, "other A");
+
+s.adminCommand({split: "test.foo", middle: {num: 4}});
+s.adminCommand({
+ movechunk: "test.foo",
+ find: {num: 3},
+ to: s.getOther(s.getPrimaryShard("test")).name,
+ _waitForDelete: true
+});
+
+assert(s._connections[0].getDB("test").foo.find().toArray().length > 0, "blah 1");
+assert(s._connections[1].getDB("test").foo.find().toArray().length > 0, "blah 2");
+assert.eq(7,
+ s._connections[0].getDB("test").foo.find().toArray().length +
+ s._connections[1].getDB("test").foo.find().toArray().length,
+ "blah 3");
+
+assert.eq(7, s.getDB("test").foo.find().toArray().length, "normal B");
+assert.eq(7, s2.getDB("test").foo.find().toArray().length, "other B");
+
+s.adminCommand({split: "test.foo", middle: {num: 2}});
s.printChunks();
-print( "* A" );
-
-assert.eq( 7 , s.getDB( "test" ).foo.find().toArray().length , "normal B 1" );
+print("* A");
-s2.getDB( "test" ).foo.save( { num : 2 } );
+assert.eq(7, s.getDB("test").foo.find().toArray().length, "normal B 1");
-assert.soon(
- function(){
- return 8 == s2.getDB( "test" ).foo.find().toArray().length;
- } , "other B 2" , 5000 , 100 );
+s2.getDB("test").foo.save({num: 2});
-assert.eq( 2 , s.onNumShards( "foo" ) , "on 2 shards" );
+assert.soon(function() {
+ return 8 == s2.getDB("test").foo.find().toArray().length;
+}, "other B 2", 5000, 100);
+assert.eq(2, s.onNumShards("foo"), "on 2 shards");
s.stop();
diff --git a/jstests/sharding/shard6.js b/jstests/sharding/shard6.js
index abc0b5adb31..2e0643189d1 100644
--- a/jstests/sharding/shard6.js
+++ b/jstests/sharding/shard6.js
@@ -4,118 +4,122 @@ summary = "";
s = new ShardingTest({name: "shard6", shards: 2});
-s.config.settings.update( { _id: "balancer" }, { $set : { stopped: true } } , true );
+s.config.settings.update({_id: "balancer"}, {$set: {stopped: true}}, true);
-s.adminCommand( { enablesharding : "test" } );
+s.adminCommand({enablesharding: "test"});
s.ensurePrimaryShard('test', 'shard0001');
-s.adminCommand( { shardcollection : "test.data" , key : { num : 1 } } );
+s.adminCommand({shardcollection: "test.data", key: {num: 1}});
-db = s.getDB( "test" );
+db = s.getDB("test");
-function poolStats( where ){
+function poolStats(where) {
var total = 0;
var msg = "poolStats " + where + " ";
- var x = db.runCommand( "connPoolStats" ).hosts;
- for ( var h in x ){
+ var x = db.runCommand("connPoolStats").hosts;
+ for (var h in x) {
var z = x[h];
msg += z.created + " ";
total += z.created;
}
- printjson( x );
- print( "****\n" + msg + "\n*****" );
+ printjson(x);
+ print("****\n" + msg + "\n*****");
summary += msg + "\n";
return [total, x.length];
}
-poolStats( "at start" );
+poolStats("at start");
// we want a lot of data, so lets make a 50k string to cheat :)
bigString = "";
-while ( bigString.length < 50000 )
+while (bigString.length < 50000)
bigString += "this is a big string. ";
// ok, now lets insert a some data
var num = 0;
-for ( ; num<100; num++ ){
- db.data.save( { num : num , bigString : bigString } );
+for (; num < 100; num++) {
+ db.data.save({num: num, bigString: bigString});
}
-assert.eq( 100 , db.data.find().toArray().length , "basic find after setup" );
+assert.eq(100, db.data.find().toArray().length, "basic find after setup");
-connBefore = poolStats( "setup done" );
+connBefore = poolStats("setup done");
// limit
-assert.eq( 77 , db.data.find().limit(77).itcount() , "limit test 1" );
-assert.eq( 1 , db.data.find().limit(1).itcount() , "limit test 2" );
-for ( var i=1; i<10; i++ ){
- assert.eq( i , db.data.find().limit(i).itcount() , "limit test 3a : " + i );
- assert.eq( i , db.data.find().skip(i).limit(i).itcount() , "limit test 3b : " + i );
- poolStats( "after loop : " + i );
+assert.eq(77, db.data.find().limit(77).itcount(), "limit test 1");
+assert.eq(1, db.data.find().limit(1).itcount(), "limit test 2");
+for (var i = 1; i < 10; i++) {
+ assert.eq(i, db.data.find().limit(i).itcount(), "limit test 3a : " + i);
+ assert.eq(i, db.data.find().skip(i).limit(i).itcount(), "limit test 3b : " + i);
+ poolStats("after loop : " + i);
}
// we do not want the number of connections from mongos to mongod to increase
// but it may have because of the background replica set monitor, and that case is ok.
// This is due to SERVER-22564.
-limitTestAfterConns = poolStats( "limit test done" );
+limitTestAfterConns = poolStats("limit test done");
// only check the number of connections is the same if the number of hosts we are connected to
// remains the same. TODO: remove host count check after SERVER-22564 is fixed.
-if( limitTestAfterConns[1] == connBefore[1]) {
- assert.eq( connBefore[0] , limitTestAfterConns[0], "limit test conns" );
+if (limitTestAfterConns[1] == connBefore[1]) {
+ assert.eq(connBefore[0], limitTestAfterConns[0], "limit test conns");
}
-function assertOrder( start , num ){
- var a = db.data.find().skip(start).limit(num).sort( { num : 1 } ).map( function(z){ return z.num; } );
+function assertOrder(start, num) {
+ var a = db.data.find().skip(start).limit(num).sort({num: 1}).map(function(z) {
+ return z.num;
+ });
var c = [];
- for ( var i=0; i<num; i++ )
- c.push( start + i );
- assert.eq( c , a , "assertOrder start: " + start + " num: " + num );
+ for (var i = 0; i < num; i++)
+ c.push(start + i);
+ assert.eq(c, a, "assertOrder start: " + start + " num: " + num);
}
-assertOrder( 0 , 10 );
-assertOrder( 5 , 10 );
+assertOrder(0, 10);
+assertOrder(5, 10);
-poolStats( "after checking order" );
+poolStats("after checking order");
-function doItCount( skip , sort , batchSize ){
+function doItCount(skip, sort, batchSize) {
var c = db.data.find();
- if ( skip )
- c.skip( skip );
- if ( sort )
- c.sort( sort );
- if ( batchSize )
- c.batchSize( batchSize );
+ if (skip)
+ c.skip(skip);
+ if (sort)
+ c.sort(sort);
+ if (batchSize)
+ c.batchSize(batchSize);
return c.itcount();
}
-function checkItCount( batchSize ){
- assert.eq( 5 , doItCount( num - 5 , null , batchSize ) , "skip 1 " + batchSize );
- assert.eq( 5 , doItCount( num - 5 , { num : 1 } , batchSize ) , "skip 2 " + batchSize );
- assert.eq( 5 , doItCount( num - 5 , { _id : 1 } , batchSize ) , "skip 3 " + batchSize );
- assert.eq( 0 , doItCount( num + 5 , { num : 1 } , batchSize ) , "skip 4 " + batchSize );
- assert.eq( 0 , doItCount( num + 5 , { _id : 1 } , batchSize ) , "skip 5 " + batchSize );
+function checkItCount(batchSize) {
+ assert.eq(5, doItCount(num - 5, null, batchSize), "skip 1 " + batchSize);
+ assert.eq(5, doItCount(num - 5, {num: 1}, batchSize), "skip 2 " + batchSize);
+ assert.eq(5, doItCount(num - 5, {_id: 1}, batchSize), "skip 3 " + batchSize);
+ assert.eq(0, doItCount(num + 5, {num: 1}, batchSize), "skip 4 " + batchSize);
+ assert.eq(0, doItCount(num + 5, {_id: 1}, batchSize), "skip 5 " + batchSize);
}
-poolStats( "before checking itcount" );
+poolStats("before checking itcount");
-checkItCount( 0 );
-checkItCount( 2 );
+checkItCount(0);
+checkItCount(2);
-poolStats( "after checking itcount" );
+poolStats("after checking itcount");
// --- test save support ---
o = db.data.findOne();
o.x = 16;
-db.data.save( o );
-o = db.data.findOne( { _id : o._id } );
-assert.eq( 16 , o.x , "x1 - did save fail? " + tojson(o) );
+db.data.save(o);
+o = db.data.findOne({_id: o._id});
+assert.eq(16, o.x, "x1 - did save fail? " + tojson(o));
-poolStats( "at end" );
+poolStats("at end");
-print( summary );
+print(summary);
-assert.throws( function(){ s.adminCommand( { enablesharding : "admin" } ); } );
+assert.throws(function() {
+ s.adminCommand({enablesharding: "admin"});
+});
s.stop();
diff --git a/jstests/sharding/shard7.js b/jstests/sharding/shard7.js
index e371ba0ffb7..ce0ce708784 100644
--- a/jstests/sharding/shard7.js
+++ b/jstests/sharding/shard7.js
@@ -3,49 +3,52 @@
s = new ShardingTest({name: 'shard7', shards: 2});
-db = s.admin._mongo.getDB( 'test' );
-c = db[ 'foo' ];
+db = s.admin._mongo.getDB('test');
+c = db['foo'];
c.drop();
-s.adminCommand( { enablesharding: '' + db } );
+s.adminCommand({enablesharding: '' + db});
s.ensurePrimaryShard(db.getName(), 'shard0001');
-s.adminCommand( { shardcollection: '' + c, key: { a:1,b:1 } } );
+s.adminCommand({shardcollection: '' + c, key: {a: 1, b: 1}});
// Check query operation with some satisfiable and unsatisfiable queries.
-assert.eq( 0, c.find({a:1}).itcount() );
-assert.eq( 0, c.find({a:1,b:1}).itcount() );
-assert.eq( 0, c.find({a:{$gt:4,$lt:2}}).itcount() );
-assert.eq( 0, c.find({a:1,b:{$gt:4,$lt:2}}).itcount() );
-assert.eq( 0, c.find({a:{$gt:0,$lt:2},b:{$gt:4,$lt:2}}).itcount() );
-assert.eq( 0, c.find({b:{$gt:4,$lt:2}}).itcount() );
-assert.eq( 0, c.find({a:{$in:[]}}).itcount() );
-assert.eq( 0, c.find({a:1,b:{$in:[]}}).itcount() );
+assert.eq(0, c.find({a: 1}).itcount());
+assert.eq(0, c.find({a: 1, b: 1}).itcount());
+assert.eq(0, c.find({a: {$gt: 4, $lt: 2}}).itcount());
+assert.eq(0, c.find({a: 1, b: {$gt: 4, $lt: 2}}).itcount());
+assert.eq(0, c.find({a: {$gt: 0, $lt: 2}, b: {$gt: 4, $lt: 2}}).itcount());
+assert.eq(0, c.find({b: {$gt: 4, $lt: 2}}).itcount());
+assert.eq(0, c.find({a: {$in: []}}).itcount());
+assert.eq(0, c.find({a: 1, b: {$in: []}}).itcount());
-assert.eq( 0, c.find({$or:[{a:{$gt:0,$lt:10}},{a:12}]}).itcount() );
-assert.eq( 0, c.find({$or:[{a:{$gt:0,$lt:10}},{a:5}]}).itcount() );
-assert.eq( 0, c.find({$or:[{a:1,b:{$gt:0,$lt:10}},{a:1,b:5}]}).itcount() );
+assert.eq(0, c.find({$or: [{a: {$gt: 0, $lt: 10}}, {a: 12}]}).itcount());
+assert.eq(0, c.find({$or: [{a: {$gt: 0, $lt: 10}}, {a: 5}]}).itcount());
+assert.eq(0, c.find({$or: [{a: 1, b: {$gt: 0, $lt: 10}}, {a: 1, b: 5}]}).itcount());
// Check other operations that use getShardsForQuery.
-unsatisfiable = {a:1,b:{$gt:4,$lt:2}};
-
-assert.eq( 0, c.count(unsatisfiable) );
-assert.eq( [], c.distinct('a',unsatisfiable) );
-
-aggregate = c.aggregate( { $match:unsatisfiable } );
-assert.eq( 0, aggregate.toArray().length );
-
-c.save( {a:null,b:null} );
-c.save( {a:1,b:1} );
-assert.writeOK( c.remove( unsatisfiable ));
-assert.eq( 2, c.count() );
-assert.writeOK( c.update( unsatisfiable, {$set:{c:1}}, false, true ));
-assert.eq( 2, c.count() );
-assert.eq( 0, c.count( {c:1} ) );
-
-c.ensureIndex( {loc:'2d'} );
-c.save( {a:2,b:2,loc:[0,0]} );
-near = db.runCommand( {geoNear:'foo', near:[0,0], query:unsatisfiable} );
-assert.commandWorked( near );
-assert.eq( 0, near.results.length );
+unsatisfiable = {
+ a: 1,
+ b: {$gt: 4, $lt: 2}
+};
+
+assert.eq(0, c.count(unsatisfiable));
+assert.eq([], c.distinct('a', unsatisfiable));
+
+aggregate = c.aggregate({$match: unsatisfiable});
+assert.eq(0, aggregate.toArray().length);
+
+c.save({a: null, b: null});
+c.save({a: 1, b: 1});
+assert.writeOK(c.remove(unsatisfiable));
+assert.eq(2, c.count());
+assert.writeOK(c.update(unsatisfiable, {$set: {c: 1}}, false, true));
+assert.eq(2, c.count());
+assert.eq(0, c.count({c: 1}));
+
+c.ensureIndex({loc: '2d'});
+c.save({a: 2, b: 2, loc: [0, 0]});
+near = db.runCommand({geoNear: 'foo', near: [0, 0], query: unsatisfiable});
+assert.commandWorked(near);
+assert.eq(0, near.results.length);
diff --git a/jstests/sharding/shard_collection_basic.js b/jstests/sharding/shard_collection_basic.js
index d83ae01a39c..769c2fc8163 100644
--- a/jstests/sharding/shard_collection_basic.js
+++ b/jstests/sharding/shard_collection_basic.js
@@ -3,179 +3,153 @@
//
(function() {
-'use strict';
+ 'use strict';
-var st = new ShardingTest({mongos:1, shards:2});
-var kDbName = 'db';
-var mongos = st.s0;
+ var st = new ShardingTest({mongos: 1, shards: 2});
+ var kDbName = 'db';
+ var mongos = st.s0;
-function testAndClenaupWithKeyNoIndexFailed(keyDoc) {
- assert.commandWorked(mongos.adminCommand({enableSharding : kDbName}));
+ function testAndClenaupWithKeyNoIndexFailed(keyDoc) {
+ assert.commandWorked(mongos.adminCommand({enableSharding: kDbName}));
- var ns = kDbName + '.foo';
- assert.commandFailed(mongos.adminCommand({
- shardCollection: ns,
- key: keyDoc
- }));
+ var ns = kDbName + '.foo';
+ assert.commandFailed(mongos.adminCommand({shardCollection: ns, key: keyDoc}));
- assert.eq(mongos.getDB('config').collections.count({_id: ns, dropped: false}), 0);
- mongos.getDB(kDbName).dropDatabase();
-}
+ assert.eq(mongos.getDB('config').collections.count({_id: ns, dropped: false}), 0);
+ mongos.getDB(kDbName).dropDatabase();
+ }
-function testAndClenaupWithKeyOK(keyDoc) {
- assert.commandWorked(mongos.adminCommand({enableSharding : kDbName}));
- assert.commandWorked(mongos.getDB(kDbName).foo.createIndex(keyDoc));
+ function testAndClenaupWithKeyOK(keyDoc) {
+ assert.commandWorked(mongos.adminCommand({enableSharding: kDbName}));
+ assert.commandWorked(mongos.getDB(kDbName).foo.createIndex(keyDoc));
- var ns = kDbName + '.foo';
- assert.eq(mongos.getDB('config').collections.count({_id: ns, dropped: false}), 0);
+ var ns = kDbName + '.foo';
+ assert.eq(mongos.getDB('config').collections.count({_id: ns, dropped: false}), 0);
- assert.commandWorked(mongos.adminCommand({
- shardCollection: ns,
- key: keyDoc
- }));
+ assert.commandWorked(mongos.adminCommand({shardCollection: ns, key: keyDoc}));
- assert.eq(mongos.getDB('config').collections.count({_id: ns, dropped: false}), 1);
- mongos.getDB(kDbName).dropDatabase();
-}
+ assert.eq(mongos.getDB('config').collections.count({_id: ns, dropped: false}), 1);
+ mongos.getDB(kDbName).dropDatabase();
+ }
-function testAndClenaupWithKeyNoIndexOK(keyDoc) {
- assert.commandWorked(mongos.adminCommand({enableSharding : kDbName}));
+ function testAndClenaupWithKeyNoIndexOK(keyDoc) {
+ assert.commandWorked(mongos.adminCommand({enableSharding: kDbName}));
- var ns = kDbName + '.foo';
- assert.eq(mongos.getDB('config').collections.count({_id: ns, dropped: false}), 0);
+ var ns = kDbName + '.foo';
+ assert.eq(mongos.getDB('config').collections.count({_id: ns, dropped: false}), 0);
- assert.commandWorked(mongos.adminCommand({
- shardCollection: ns,
- key: keyDoc
- }));
+ assert.commandWorked(mongos.adminCommand({shardCollection: ns, key: keyDoc}));
- assert.eq(mongos.getDB('config').collections.count({_id: ns, dropped: false}), 1);
- mongos.getDB(kDbName).dropDatabase();
-}
+ assert.eq(mongos.getDB('config').collections.count({_id: ns, dropped: false}), 1);
+ mongos.getDB(kDbName).dropDatabase();
+ }
-// Fail if db is not sharded.
-assert.commandFailed(mongos.adminCommand({ shardCollection: kDbName + '.foo', key: {_id:1} }));
+ // Fail if db is not sharded.
+ assert.commandFailed(mongos.adminCommand({shardCollection: kDbName + '.foo', key: {_id: 1}}));
-assert.writeOK(mongos.getDB(kDbName).foo.insert({a: 1, b: 1}));
+ assert.writeOK(mongos.getDB(kDbName).foo.insert({a: 1, b: 1}));
-// Fail if db is not sharding enabled.
-assert.commandFailed(mongos.adminCommand({ shardCollection: kDbName + '.foo', key: {_id:1} }));
+ // Fail if db is not sharding enabled.
+ assert.commandFailed(mongos.adminCommand({shardCollection: kDbName + '.foo', key: {_id: 1}}));
-assert.commandWorked(mongos.adminCommand({enableSharding : kDbName}));
+ assert.commandWorked(mongos.adminCommand({enableSharding: kDbName}));
-// Verify wrong arguments errors.
-assert.commandFailed(mongos.adminCommand({ shardCollection: 'foo', key: {_id:1} }));
+ // Verify wrong arguments errors.
+ assert.commandFailed(mongos.adminCommand({shardCollection: 'foo', key: {_id: 1}}));
-assert.commandFailed(
- mongos.adminCommand({ shardCollection: 'foo', key: "aaa" })
-);
+ assert.commandFailed(mongos.adminCommand({shardCollection: 'foo', key: "aaa"}));
-// shardCollection may only be run against admin database.
-assert.commandFailed(
- mongos.getDB('test').runCommand({ shardCollection: kDbName + '.foo', key: {_id:1} }));
+ // shardCollection may only be run against admin database.
+ assert.commandFailed(
+ mongos.getDB('test').runCommand({shardCollection: kDbName + '.foo', key: {_id: 1}}));
-assert.writeOK(mongos.getDB(kDbName).foo.insert({a: 1, b: 1}));
-// Can't shard if key is not specified.
-assert.commandFailed(mongos.adminCommand({
- shardCollection: kDbName + '.foo' }));
+ assert.writeOK(mongos.getDB(kDbName).foo.insert({a: 1, b: 1}));
+ // Can't shard if key is not specified.
+ assert.commandFailed(mongos.adminCommand({shardCollection: kDbName + '.foo'}));
-assert.commandFailed(mongos.adminCommand({
- shardCollection: kDbName + '.foo',
- key: {}
-}));
+ assert.commandFailed(mongos.adminCommand({shardCollection: kDbName + '.foo', key: {}}));
-// Verify key format
-assert.commandFailed(mongos.adminCommand({
- shardCollection: kDbName + '.foo',
- key: {aKey: "hahahashed"}
-}));
+ // Verify key format
+ assert.commandFailed(
+ mongos.adminCommand({shardCollection: kDbName + '.foo', key: {aKey: "hahahashed"}}));
-// Error if a collection is already sharded.
-assert.commandWorked(mongos.adminCommand({
- shardCollection: kDbName + '.foo',
- key: {_id:1}
-}));
+ // Error if a collection is already sharded.
+ assert.commandWorked(mongos.adminCommand({shardCollection: kDbName + '.foo', key: {_id: 1}}));
-assert.commandFailed(mongos.adminCommand({ shardCollection: kDbName + '.foo', key: {_id:1} }));
+ assert.commandFailed(mongos.adminCommand({shardCollection: kDbName + '.foo', key: {_id: 1}}));
-mongos.getDB(kDbName).dropDatabase();
+ mongos.getDB(kDbName).dropDatabase();
-// Shard empty collections no index required.
-testAndClenaupWithKeyNoIndexOK({_id:1});
-testAndClenaupWithKeyNoIndexOK({_id:'hashed'});
+ // Shard empty collections no index required.
+ testAndClenaupWithKeyNoIndexOK({_id: 1});
+ testAndClenaupWithKeyNoIndexOK({_id: 'hashed'});
-// Shard by a plain key.
-testAndClenaupWithKeyNoIndexOK({a:1});
+ // Shard by a plain key.
+ testAndClenaupWithKeyNoIndexOK({a: 1});
-// Cant shard collection with data and no index on the shard key.
-assert.writeOK(mongos.getDB(kDbName).foo.insert({a: 1, b: 1}));
-testAndClenaupWithKeyNoIndexFailed({a:1});
+ // Cant shard collection with data and no index on the shard key.
+ assert.writeOK(mongos.getDB(kDbName).foo.insert({a: 1, b: 1}));
+ testAndClenaupWithKeyNoIndexFailed({a: 1});
-assert.writeOK(mongos.getDB(kDbName).foo.insert({a: 1, b: 1}));
-testAndClenaupWithKeyOK({a:1});
+ assert.writeOK(mongos.getDB(kDbName).foo.insert({a: 1, b: 1}));
+ testAndClenaupWithKeyOK({a: 1});
-// Shard by a hashed key.
-testAndClenaupWithKeyNoIndexOK({a:'hashed'});
+ // Shard by a hashed key.
+ testAndClenaupWithKeyNoIndexOK({a: 'hashed'});
-assert.writeOK(mongos.getDB(kDbName).foo.insert({a: 1, b: 1}));
-testAndClenaupWithKeyNoIndexFailed({a:'hashed'});
+ assert.writeOK(mongos.getDB(kDbName).foo.insert({a: 1, b: 1}));
+ testAndClenaupWithKeyNoIndexFailed({a: 'hashed'});
-assert.writeOK(mongos.getDB(kDbName).foo.insert({a: 1, b: 1}));
-testAndClenaupWithKeyOK({a:'hashed'});
+ assert.writeOK(mongos.getDB(kDbName).foo.insert({a: 1, b: 1}));
+ testAndClenaupWithKeyOK({a: 'hashed'});
-// Shard by a compound key.
-testAndClenaupWithKeyNoIndexOK({x:1, y:1});
+ // Shard by a compound key.
+ testAndClenaupWithKeyNoIndexOK({x: 1, y: 1});
-assert.writeOK(mongos.getDB(kDbName).foo.insert({x: 1, y: 1}));
-testAndClenaupWithKeyNoIndexFailed({x:1, y:1});
+ assert.writeOK(mongos.getDB(kDbName).foo.insert({x: 1, y: 1}));
+ testAndClenaupWithKeyNoIndexFailed({x: 1, y: 1});
-assert.writeOK(mongos.getDB(kDbName).foo.insert({x: 1, y: 1}));
-testAndClenaupWithKeyOK({x:1, y:1});
+ assert.writeOK(mongos.getDB(kDbName).foo.insert({x: 1, y: 1}));
+ testAndClenaupWithKeyOK({x: 1, y: 1});
-testAndClenaupWithKeyNoIndexFailed({x:'hashed', y:1});
-testAndClenaupWithKeyNoIndexFailed({x:'hashed', y:'hashed'});
+ testAndClenaupWithKeyNoIndexFailed({x: 'hashed', y: 1});
+ testAndClenaupWithKeyNoIndexFailed({x: 'hashed', y: 'hashed'});
-// Shard by a key component.
-testAndClenaupWithKeyOK({'z.x':1});
-testAndClenaupWithKeyOK({'z.x':'hashed'});
+ // Shard by a key component.
+ testAndClenaupWithKeyOK({'z.x': 1});
+ testAndClenaupWithKeyOK({'z.x': 'hashed'});
-// Can't shard by a multikey.
-assert.commandWorked(mongos.getDB(kDbName).foo.createIndex({a:1}));
-assert.writeOK(mongos.getDB(kDbName).foo.insert({a: [1,2,3,4,5], b:1}));
-testAndClenaupWithKeyNoIndexFailed({a:1});
+ // Can't shard by a multikey.
+ assert.commandWorked(mongos.getDB(kDbName).foo.createIndex({a: 1}));
+ assert.writeOK(mongos.getDB(kDbName).foo.insert({a: [1, 2, 3, 4, 5], b: 1}));
+ testAndClenaupWithKeyNoIndexFailed({a: 1});
-assert.commandWorked(mongos.getDB(kDbName).foo.createIndex({a:1, b:1}));
-assert.writeOK(mongos.getDB(kDbName).foo.insert({a: [1,2,3,4,5], b:1}));
-testAndClenaupWithKeyNoIndexFailed({a:1, b:1});
+ assert.commandWorked(mongos.getDB(kDbName).foo.createIndex({a: 1, b: 1}));
+ assert.writeOK(mongos.getDB(kDbName).foo.insert({a: [1, 2, 3, 4, 5], b: 1}));
+ testAndClenaupWithKeyNoIndexFailed({a: 1, b: 1});
-assert.writeOK(mongos.getDB(kDbName).foo.insert({a: 1, b: 1}));
-testAndClenaupWithKeyNoIndexFailed({a:'hashed'});
+ assert.writeOK(mongos.getDB(kDbName).foo.insert({a: 1, b: 1}));
+ testAndClenaupWithKeyNoIndexFailed({a: 'hashed'});
-assert.writeOK(mongos.getDB(kDbName).foo.insert({a: 1, b: 1}));
-testAndClenaupWithKeyOK({a:'hashed'});
+ assert.writeOK(mongos.getDB(kDbName).foo.insert({a: 1, b: 1}));
+ testAndClenaupWithKeyOK({a: 'hashed'});
-// Cant shard by a parallel arrays.
-assert.writeOK(mongos.getDB(kDbName).foo.insert({a: [1,2,3,4,5], b: [1,2,3,4,5]}));
-testAndClenaupWithKeyNoIndexFailed({a:1, b:1});
+ // Cant shard by a parallel arrays.
+ assert.writeOK(mongos.getDB(kDbName).foo.insert({a: [1, 2, 3, 4, 5], b: [1, 2, 3, 4, 5]}));
+ testAndClenaupWithKeyNoIndexFailed({a: 1, b: 1});
-assert.commandWorked(mongos.adminCommand({enableSharding : kDbName}));
+ assert.commandWorked(mongos.adminCommand({enableSharding: kDbName}));
-// Can't shard on unique hashed key.
-assert.commandFailed(mongos.adminCommand({
- shardCollection: kDbName + '.foo',
- key: {aKey:"hashed"},
- unique: true
-}));
+ // Can't shard on unique hashed key.
+ assert.commandFailed(mongos.adminCommand(
+ {shardCollection: kDbName + '.foo', key: {aKey: "hashed"}, unique: true}));
-// If shardCollection has unique:true it must have a unique index.
-assert.commandWorked(mongos.getDB(kDbName).foo.createIndex({aKey:1}));
+ // If shardCollection has unique:true it must have a unique index.
+ assert.commandWorked(mongos.getDB(kDbName).foo.createIndex({aKey: 1}));
-assert.commandFailed(mongos.adminCommand({
- shardCollection: kDbName + '.foo',
- key: {aKey:1},
- unique: true
-}));
+ assert.commandFailed(
+ mongos.adminCommand({shardCollection: kDbName + '.foo', key: {aKey: 1}, unique: true}));
-st.stop();
+ st.stop();
})();
diff --git a/jstests/sharding/shard_existing.js b/jstests/sharding/shard_existing.js
index 9473ae62a03..a1b328cfe1f 100644
--- a/jstests/sharding/shard_existing.js
+++ b/jstests/sharding/shard_existing.js
@@ -1,42 +1,38 @@
(function() {
-var s = new ShardingTest({ name: "shard_existing",
- shards: 2,
- mongos: 1,
- other: { chunkSize: 1 } });
+ var s = new ShardingTest({name: "shard_existing", shards: 2, mongos: 1, other: {chunkSize: 1}});
-db = s.getDB( "test" );
+ db = s.getDB("test");
-var stringSize = 10000;
-var numDocs = 2000;
+ var stringSize = 10000;
+ var numDocs = 2000;
+ // we want a lot of data, so lets make a string to cheat :)
+ var bigString = new Array(stringSize).toString();
+ var docSize = Object.bsonsize({_id: numDocs, s: bigString});
+ var totalSize = docSize * numDocs;
+ print("NumDocs: " + numDocs + " DocSize: " + docSize + " TotalSize: " + totalSize);
-// we want a lot of data, so lets make a string to cheat :)
-var bigString = new Array(stringSize).toString();
-var docSize = Object.bsonsize({ _id: numDocs, s: bigString });
-var totalSize = docSize * numDocs;
-print("NumDocs: " + numDocs + " DocSize: " + docSize + " TotalSize: " + totalSize);
+ var bulk = db.data.initializeUnorderedBulkOp();
+ for (i = 0; i < numDocs; i++) {
+ bulk.insert({_id: i, s: bigString});
+ }
+ assert.writeOK(bulk.execute());
-var bulk = db.data.initializeUnorderedBulkOp();
-for (i=0; i<numDocs; i++) {
- bulk.insert({_id: i, s: bigString});
-}
-assert.writeOK(bulk.execute());
+ var avgObjSize = db.data.stats().avgObjSize;
+ var dataSize = db.data.stats().size;
+ assert.lte(totalSize, dataSize);
-var avgObjSize = db.data.stats().avgObjSize;
-var dataSize = db.data.stats().size;
-assert.lte(totalSize, dataSize);
+ s.adminCommand({enablesharding: "test"});
+ s.ensurePrimaryShard('test', 'shard0001');
+ res = s.adminCommand({shardcollection: "test.data", key: {_id: 1}});
+ printjson(res);
-s.adminCommand( { enablesharding : "test" } );
-s.ensurePrimaryShard('test', 'shard0001');
-res = s.adminCommand( { shardcollection : "test.data" , key : { _id : 1 } } );
-printjson(res);
+ // number of chunks should be approx equal to the total data size / half the chunk size
+ var numChunks = s.config.chunks.find().itcount();
+ var guess = Math.ceil(dataSize / (512 * 1024 + avgObjSize));
+ assert(Math.abs(numChunks - guess) < 2, "not right number of chunks");
-// number of chunks should be approx equal to the total data size / half the chunk size
-var numChunks = s.config.chunks.find().itcount();
-var guess = Math.ceil(dataSize / (512*1024 + avgObjSize));
-assert( Math.abs( numChunks - guess ) < 2, "not right number of chunks" );
-
-s.stop();
+ s.stop();
})();
diff --git a/jstests/sharding/shard_insert_getlasterror_w2.js b/jstests/sharding/shard_insert_getlasterror_w2.js
index 8df1b9caa8f..09ea5b5ec46 100644
--- a/jstests/sharding/shard_insert_getlasterror_w2.js
+++ b/jstests/sharding/shard_insert_getlasterror_w2.js
@@ -18,11 +18,11 @@
// Spin up a sharded cluster, but do not add the shards
var shardingTestConfig = {
- name : baseName,
- mongos : 1,
- shards : 1,
- rs : { nodes : replNodes },
- other : { manualAddShard : true }
+ name: baseName,
+ mongos: 1,
+ shards: 1,
+ rs: {nodes: replNodes},
+ other: {manualAddShard: true}
};
var shardingTest = new ShardingTest(shardingTestConfig);
@@ -33,7 +33,7 @@
var testDBReplSet1 = replSet1.getPrimary().getDB(testDBName);
var bulk = testDBReplSet1.foo.initializeUnorderedBulkOp();
for (var i = 0; i < numDocs; i++) {
- bulk.insert({ x: i, text: textString });
+ bulk.insert({x: i, text: textString});
}
assert.writeOK(bulk.execute());
@@ -42,44 +42,41 @@
var testDB = mongosConn.getDB(testDBName);
// Add replSet1 as only shard
- mongosConn.adminCommand({ addshard : replSet1.getURL() });
+ mongosConn.adminCommand({addshard: replSet1.getURL()});
// Enable sharding on test db and its collection foo
- assert.commandWorked(mongosConn.getDB('admin').runCommand({ enablesharding : testDBName }));
- testDB[testCollName].ensureIndex({ x : 1 });
- assert.commandWorked(mongosConn.getDB('admin').
- runCommand({ shardcollection : testDBName + '.' + testCollName,
- key : { x : 1 }}));
+ assert.commandWorked(mongosConn.getDB('admin').runCommand({enablesharding: testDBName}));
+ testDB[testCollName].ensureIndex({x: 1});
+ assert.commandWorked(mongosConn.getDB('admin').runCommand(
+ {shardcollection: testDBName + '.' + testCollName, key: {x: 1}}));
// Test case where GLE should return an error
- testDB.foo.insert({_id:'a', x:1});
- assert.writeError(testDB.foo.insert({ _id: 'a', x: 1 },
- { writeConcern: { w: 2, wtimeout: 30000 }}));
+ testDB.foo.insert({_id: 'a', x: 1});
+ assert.writeError(testDB.foo.insert({_id: 'a', x: 1}, {writeConcern: {w: 2, wtimeout: 30000}}));
// Add more data
bulk = testDB.foo.initializeUnorderedBulkOp();
for (var i = numDocs; i < 2 * numDocs; i++) {
- bulk.insert({ x: i, text: textString });
+ bulk.insert({x: i, text: textString});
}
- assert.writeOK(bulk.execute({ w: replNodes, wtimeout: 30000 }));
+ assert.writeOK(bulk.execute({w: replNodes, wtimeout: 30000}));
// Take down two nodes and make sure slaveOk reads still work
replSet1.stop(1);
replSet1.stop(2);
- testDB.getMongo().adminCommand({ setParameter : 1, logLevel : 1 });
+ testDB.getMongo().adminCommand({setParameter: 1, logLevel: 1});
testDB.getMongo().setSlaveOk();
print("trying some queries");
- assert.soon(function() { try {
- testDB.foo.find().next();
- }
- catch(e) {
- print(e);
- return false;
- }
- return true;
- }, "Queries took too long to complete correctly.",
- 2 * 60 * 1000);
-
+ assert.soon(function() {
+ try {
+ testDB.foo.find().next();
+ } catch (e) {
+ print(e);
+ return false;
+ }
+ return true;
+ }, "Queries took too long to complete correctly.", 2 * 60 * 1000);
+
// Shutdown cluster
shardingTest.stop();
diff --git a/jstests/sharding/shard_key_immutable.js b/jstests/sharding/shard_key_immutable.js
index c05ff17e365..76a648d8811 100644
--- a/jstests/sharding/shard_key_immutable.js
+++ b/jstests/sharding/shard_key_immutable.js
@@ -6,7 +6,8 @@
*
* To enforce this invariant, we have the following mongos rule:
*
- * - Upserts must always contain the full shard key and must only be targeted* to the applicable shard.
+ * - Upserts must always contain the full shard key and must only be targeted* to the applicable
+ *shard.
*
* and the following mongod rules:
*
@@ -15,16 +16,20 @@
* match this value.
* - Updates must not modify shard keys.
*
- * *Updates are targeted by the update query if $op-style, or the update document if replacement-style.
+ * *Updates are targeted by the update query if $op-style, or the update document if
+ *replacement-style.
*
- * NOTE: The above is enough to ensure that shard keys do not change. It is not enough to ensure
- * uniqueness of an upserted document based on the upsert query. This is necessary due to the save()
+ * NOTE: The above is enough to ensure that shard keys do not change. It is not enough to ensure
+ * uniqueness of an upserted document based on the upsert query. This is necessary due to the
+ *save()
* style operation:
- * db.coll.update({ _id : xxx }, { _id : xxx, shard : xxx, key : xxx, other : xxx }, { upsert : true })
+ * db.coll.update({ _id : xxx }, { _id : xxx, shard : xxx, key : xxx, other : xxx }, { upsert : true
+ *})
*
- * TODO: Minimize the impact of this hole by disallowing anything but save-style upserts of this form.
+ * TODO: Minimize the impact of this hole by disallowing anything but save-style upserts of this
+ *form.
* Save-style upserts of this form are not safe (duplicate _ids can be created) but the user is
- * explicitly responsible for this for the _id field.
+ * explicitly responsible for this for the _id field.
*
* In addition, there is an rule where non-multi updates can only affect 0 or 1 documents.
*
@@ -37,12 +42,12 @@
* - $ op updates have multiUpdate flag set to true.
*/
-var st = new ShardingTest({ shards: 2 });
+var st = new ShardingTest({shards: 2});
-st.adminCommand({ enablesharding: "test" });
+st.adminCommand({enablesharding: "test"});
st.ensurePrimaryShard('test', 'shard0001');
-st.adminCommand({ shardcollection: "test.col0", key: { a: 1, b: 1 }});
-st.adminCommand({ shardcollection: "test.col1", key: { 'x.a': 1 }});
+st.adminCommand({shardcollection: "test.col0", key: {a: 1, b: 1}});
+st.adminCommand({shardcollection: "test.col1", key: {'x.a': 1}});
var db = st.s.getDB('test');
var compoundColl = db.getCollection('col0');
@@ -53,104 +58,104 @@ var dotColl = db.getCollection('col1');
//
compoundColl.remove({}, false);
-compoundColl.insert({ a: 100, b: 100 });
-assert.writeError(compoundColl.update({}, { a: 1 }, false));
+compoundColl.insert({a: 100, b: 100});
+assert.writeError(compoundColl.update({}, {a: 1}, false));
var doc = compoundColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
+assert(friendlyEqual(doc, {a: 100, b: 100}), 'doc changed: ' + tojson(doc));
doc = compoundColl.findOne();
compoundColl.remove({}, false);
-compoundColl.insert({ a: 100, b: 100 });
-assert.writeError(compoundColl.update({}, { a: 1, b: 1 }, false));
+compoundColl.insert({a: 100, b: 100});
+assert.writeError(compoundColl.update({}, {a: 1, b: 1}, false));
doc = compoundColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
+assert(friendlyEqual(doc, {a: 100, b: 100}), 'doc changed: ' + tojson(doc));
compoundColl.remove({}, false);
-compoundColl.insert({ a: 100, b: 100 });
-assert.writeOK(compoundColl.update({}, { a: 100, b: 100 }, false));
+compoundColl.insert({a: 100, b: 100});
+assert.writeOK(compoundColl.update({}, {a: 100, b: 100}, false));
doc = compoundColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
+assert(friendlyEqual(doc, {a: 100, b: 100}), 'doc changed: ' + tojson(doc));
// Cannot modify _id!
compoundColl.remove({}, false);
-compoundColl.insert({ a: 100, b: 100 });
-assert.writeError(compoundColl.update({}, { a: 100, b: 100, _id: 1 }, false));
+compoundColl.insert({a: 100, b: 100});
+assert.writeError(compoundColl.update({}, {a: 100, b: 100, _id: 1}, false));
doc = compoundColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
+assert(friendlyEqual(doc, {a: 100, b: 100}), 'doc changed: ' + tojson(doc));
compoundColl.remove({}, false);
-compoundColl.insert({ a: 100, b: 100 });
-assert.writeError(compoundColl.update({}, { $set: { a: 1, b: 1 }}, false, true));
+compoundColl.insert({a: 100, b: 100});
+assert.writeError(compoundColl.update({}, {$set: {a: 1, b: 1}}, false, true));
doc = compoundColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
+assert(friendlyEqual(doc, {a: 100, b: 100}), 'doc changed: ' + tojson(doc));
compoundColl.remove({}, false);
-compoundColl.insert({ a: 100, b: 100 });
-assert.writeOK(compoundColl.update({}, { $set: { a: 100, b: 100 }}, false, true));
+compoundColl.insert({a: 100, b: 100});
+assert.writeOK(compoundColl.update({}, {$set: {a: 100, b: 100}}, false, true));
doc = compoundColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
+assert(friendlyEqual(doc, {a: 100, b: 100}), 'doc changed: ' + tojson(doc));
// Cannot modify _id
compoundColl.remove({}, false);
-compoundColl.insert({ a: 100, b: 100 });
-assert.writeError(compoundColl.update({}, { $set: { a: 1, b: 1, _id: 1 }}, false, true));
+compoundColl.insert({a: 100, b: 100});
+assert.writeError(compoundColl.update({}, {$set: {a: 1, b: 1, _id: 1}}, false, true));
doc = compoundColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
+assert(friendlyEqual(doc, {a: 100, b: 100}), 'doc changed: ' + tojson(doc));
compoundColl.remove({}, false);
-compoundColl.insert({ a: 100, b: 100 });
-assert.writeOK(compoundColl.update({}, { $set: { c: 1 }}, false, true));
+compoundColl.insert({a: 100, b: 100});
+assert.writeOK(compoundColl.update({}, {$set: {c: 1}}, false, true));
doc = compoundColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { a: 100, b: 100, c: 1 }), 'doc did not change: ' + tojson(doc));
+assert(friendlyEqual(doc, {a: 100, b: 100, c: 1}), 'doc did not change: ' + tojson(doc));
//
// Empty query upsert
//
compoundColl.remove({}, false);
-assert.writeError(compoundColl.update({}, { a: 1 }, true));
+assert.writeError(compoundColl.update({}, {a: 1}, true));
doc = compoundColl.findOne();
assert(doc == null, 'doc upserted: ' + tojson(doc));
compoundColl.remove({}, false);
-assert.writeOK(compoundColl.update({}, { a: 1, b: 1 }, true));
+assert.writeOK(compoundColl.update({}, {a: 1, b: 1}, true));
doc = compoundColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { a: 1, b: 1 }), 'doc not upserted properly: ' + tojson(doc));
+assert(friendlyEqual(doc, {a: 1, b: 1}), 'doc not upserted properly: ' + tojson(doc));
// Cannot modify _id!
compoundColl.remove({}, false);
-assert.writeOK(compoundColl.update({}, { a: 1, b: 1, _id: 1 }, true));
+assert.writeOK(compoundColl.update({}, {a: 1, b: 1, _id: 1}, true));
doc = compoundColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { a: 1, b: 1 }), 'doc not upserted properly: ' + tojson(doc));
+assert(friendlyEqual(doc, {a: 1, b: 1}), 'doc not upserted properly: ' + tojson(doc));
compoundColl.remove({}, false);
-assert.writeError(compoundColl.update({}, { $set: { a: 1 }}, true, true));
+assert.writeError(compoundColl.update({}, {$set: {a: 1}}, true, true));
doc = compoundColl.findOne();
assert(doc == null, 'doc upserted: ' + tojson(doc));
compoundColl.remove({}, false);
-assert.writeError(compoundColl.update({}, { $set: { a: 1, b: 1 }}, true, true));
+assert.writeError(compoundColl.update({}, {$set: {a: 1, b: 1}}, true, true));
doc = compoundColl.findOne();
assert(doc == null, 'doc upserted: ' + tojson(doc));
// Cannot modify _id!
compoundColl.remove({}, false);
-assert.writeError(compoundColl.update({}, { $set: { a: 1, b: 1, _id: 1 }}, true, true));
+assert.writeError(compoundColl.update({}, {$set: {a: 1, b: 1, _id: 1}}, true, true));
doc = compoundColl.findOne();
assert(doc == null, 'doc upserted: ' + tojson(doc));
compoundColl.remove({}, false);
-assert.writeError(compoundColl.update({}, { $set: { c: 1 }}, true, true));
+assert.writeError(compoundColl.update({}, {$set: {c: 1}}, true, true));
doc = compoundColl.findOne();
assert(doc == null, 'doc upserted: ' + tojson(doc));
@@ -159,154 +164,154 @@ assert(doc == null, 'doc upserted: ' + tojson(doc));
//
compoundColl.remove({}, false);
-compoundColl.insert({ a: 100, b: 100 });
-assert.writeError(compoundColl.update({ a: 100 }, { a: 100 }, false));
+compoundColl.insert({a: 100, b: 100});
+assert.writeError(compoundColl.update({a: 100}, {a: 100}, false));
doc = compoundColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
+assert(friendlyEqual(doc, {a: 100, b: 100}), 'doc changed: ' + tojson(doc));
compoundColl.remove({}, false);
-compoundColl.insert({ a: 100, b: 100 });
-assert.writeError(compoundColl.update({ a: 100 }, { a: 2 }, false));
+compoundColl.insert({a: 100, b: 100});
+assert.writeError(compoundColl.update({a: 100}, {a: 2}, false));
doc = compoundColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
+assert(friendlyEqual(doc, {a: 100, b: 100}), 'doc changed: ' + tojson(doc));
compoundColl.remove({}, false);
-compoundColl.insert({ a: 100, b: 100 });
-assert.writeError(compoundColl.update({ a: 100 }, { a: 100, b: 1 }, false));
+compoundColl.insert({a: 100, b: 100});
+assert.writeError(compoundColl.update({a: 100}, {a: 100, b: 1}, false));
doc = compoundColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
+assert(friendlyEqual(doc, {a: 100, b: 100}), 'doc changed: ' + tojson(doc));
// Inspecting query and update alone is not enough to tell whether a shard key will change.
compoundColl.remove({}, false);
-compoundColl.insert({ a: 100, b: 100 });
-assert.writeOK(compoundColl.update({ a: 100 }, { a: 100, b: 100 }, false));
+compoundColl.insert({a: 100, b: 100});
+assert.writeOK(compoundColl.update({a: 100}, {a: 100, b: 100}, false));
doc = compoundColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
+assert(friendlyEqual(doc, {a: 100, b: 100}), 'doc changed: ' + tojson(doc));
// Cannot modify _id!
compoundColl.remove({}, false);
-compoundColl.insert({ a: 100, b: 100 });
-assert.writeError(compoundColl.update({ a: 100 }, { a: 100, b: 100, _id: 1 }, false));
+compoundColl.insert({a: 100, b: 100});
+assert.writeError(compoundColl.update({a: 100}, {a: 100, b: 100, _id: 1}, false));
doc = compoundColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
+assert(friendlyEqual(doc, {a: 100, b: 100}), 'doc changed: ' + tojson(doc));
compoundColl.remove({}, false);
-compoundColl.insert({ a: 100, b: 100 });
-assert.writeOK(compoundColl.update({ a: 100 }, { $set: { a: 100 }}, false, true));
+compoundColl.insert({a: 100, b: 100});
+assert.writeOK(compoundColl.update({a: 100}, {$set: {a: 100}}, false, true));
doc = compoundColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
+assert(friendlyEqual(doc, {a: 100, b: 100}), 'doc changed: ' + tojson(doc));
compoundColl.remove({}, false);
-compoundColl.insert({ a: 100, b: 100 });
-assert.writeError(compoundColl.update({ a: 100 }, { $set: { b: 200 }}, false, true));
+compoundColl.insert({a: 100, b: 100});
+assert.writeError(compoundColl.update({a: 100}, {$set: {b: 200}}, false, true));
doc = compoundColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
+assert(friendlyEqual(doc, {a: 100, b: 100}), 'doc changed: ' + tojson(doc));
// Inspecting query and update alone is not enough to tell whether a shard key will change.
compoundColl.remove({}, false);
-compoundColl.insert({ a: 100, b: 100 });
-assert.writeOK(compoundColl.update({ a: 100 }, { $set: { b: 100 }}, false, true));
+compoundColl.insert({a: 100, b: 100});
+assert.writeOK(compoundColl.update({a: 100}, {$set: {b: 100}}, false, true));
doc = compoundColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
+assert(friendlyEqual(doc, {a: 100, b: 100}), 'doc changed: ' + tojson(doc));
compoundColl.remove({}, false);
-compoundColl.insert({ a: 100, b: 100 });
-assert.writeError(compoundColl.update({ a: 100 }, { $set: { a: 100, b: 200 }}, false, true));
+compoundColl.insert({a: 100, b: 100});
+assert.writeError(compoundColl.update({a: 100}, {$set: {a: 100, b: 200}}, false, true));
doc = compoundColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
+assert(friendlyEqual(doc, {a: 100, b: 100}), 'doc changed: ' + tojson(doc));
// Inspecting query and update alone is not enough to tell whether a shard key will change.
compoundColl.remove({}, false);
-compoundColl.insert({ a: 100, b: 100 });
-assert.writeOK(compoundColl.update({ a: 100 }, { $set: { a: 100, b: 100 }}, false, true));
+compoundColl.insert({a: 100, b: 100});
+assert.writeOK(compoundColl.update({a: 100}, {$set: {a: 100, b: 100}}, false, true));
doc = compoundColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
+assert(friendlyEqual(doc, {a: 100, b: 100}), 'doc changed: ' + tojson(doc));
// Cannot modify _id!
compoundColl.remove({}, false);
-compoundColl.insert({ a: 100, b: 100 });
-assert.writeError(compoundColl.update({ a: 100 }, { $set: { a: 100, b: 100, _id: 1 }}, false, true));
+compoundColl.insert({a: 100, b: 100});
+assert.writeError(compoundColl.update({a: 100}, {$set: {a: 100, b: 100, _id: 1}}, false, true));
doc = compoundColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
+assert(friendlyEqual(doc, {a: 100, b: 100}), 'doc changed: ' + tojson(doc));
compoundColl.remove({}, false);
-compoundColl.insert({ a: 100, b: 100 });
-assert.writeOK(compoundColl.update({ a: 100 }, { $set: { c: 1 }}, false, true));
+compoundColl.insert({a: 100, b: 100});
+assert.writeOK(compoundColl.update({a: 100}, {$set: {c: 1}}, false, true));
doc = compoundColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { a: 100, b: 100, c: 1 }), 'doc did not change: ' + tojson(doc));
+assert(friendlyEqual(doc, {a: 100, b: 100, c: 1}), 'doc did not change: ' + tojson(doc));
compoundColl.remove({}, false);
-compoundColl.insert({ a: 100, b: 100 });
-assert.writeOK(compoundColl.update({ a: 100 }, { $rename: { c: 'a' }}, false, true));
+compoundColl.insert({a: 100, b: 100});
+assert.writeOK(compoundColl.update({a: 100}, {$rename: {c: 'a'}}, false, true));
doc = compoundColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
+assert(friendlyEqual(doc, {a: 100, b: 100}), 'doc changed: ' + tojson(doc));
//
// Partial skey query upsert
//
compoundColl.remove({}, false);
-assert.writeError(compoundColl.update({ a: 100 }, { a: 100 }, true));
+assert.writeError(compoundColl.update({a: 100}, {a: 100}, true));
doc = compoundColl.findOne();
assert(doc == null, 'doc upserted: ' + tojson(doc));
compoundColl.remove({}, false);
-assert.writeError(compoundColl.update({ a: 100 }, { a: 2 }, true));
+assert.writeError(compoundColl.update({a: 100}, {a: 2}, true));
doc = compoundColl.findOne();
assert(doc == null, 'doc upserted: ' + tojson(doc));
compoundColl.remove({}, false);
-assert.writeError(compoundColl.update({ a: 100 }, { a: 1, b: 1 }, true));
+assert.writeError(compoundColl.update({a: 100}, {a: 1, b: 1}, true));
doc = compoundColl.findOne();
assert(doc == null, 'doc upserted: ' + tojson(doc));
// Cannot modify _id!
compoundColl.remove({}, false);
-assert.writeError(compoundColl.update({ a: 100 }, { a: 1, b: 1, _id: 1 }, true));
+assert.writeError(compoundColl.update({a: 100}, {a: 1, b: 1, _id: 1}, true));
doc = compoundColl.findOne();
assert(doc == null, 'doc upserted: ' + tojson(doc));
compoundColl.remove({}, false);
-assert.writeError(compoundColl.update({ a: 100 }, { $set: { a: 1 }}, true, true));
+assert.writeError(compoundColl.update({a: 100}, {$set: {a: 1}}, true, true));
doc = compoundColl.findOne();
assert(doc == null, 'doc upserted: ' + tojson(doc));
compoundColl.remove({}, false);
-assert.writeError(compoundColl.update({ a: 100 }, { $set: { b: 1 }}, true, true));
+assert.writeError(compoundColl.update({a: 100}, {$set: {b: 1}}, true, true));
doc = compoundColl.findOne();
assert(doc == null, 'doc upserted: ' + tojson(doc));
compoundColl.remove({}, false);
-assert.writeError(compoundColl.update({ a: 100 }, { $set: { a: 100, b: 1 }}, true, true));
+assert.writeError(compoundColl.update({a: 100}, {$set: {a: 100, b: 1}}, true, true));
doc = compoundColl.findOne();
assert(doc == null, 'doc upserted: ' + tojson(doc));
// Cannot modify _id!
compoundColl.remove({}, false);
-assert.writeError(compoundColl.update({ a: 100 }, { $set: { a: 100, b: 1, _id: 1 }}, true, true));
+assert.writeError(compoundColl.update({a: 100}, {$set: {a: 100, b: 1, _id: 1}}, true, true));
doc = compoundColl.findOne();
assert(doc == null, 'doc upserted: ' + tojson(doc));
compoundColl.remove({}, false);
-assert.writeError(compoundColl.update({ a: 100 }, { $set: { c: 1 }}, true, true));
+assert.writeError(compoundColl.update({a: 100}, {$set: {c: 1}}, true, true));
doc = compoundColl.findOne();
assert(doc == null, 'doc upserted: ' + tojson(doc));
compoundColl.remove({}, false);
-assert.writeError(compoundColl.update({ a: 100 }, { $rename: { c: 'a' }}, true, true));
+assert.writeError(compoundColl.update({a: 100}, {$rename: {c: 'a'}}, true, true));
doc = compoundColl.findOne();
assert(doc == null, 'doc upserted: ' + tojson(doc));
@@ -315,162 +320,162 @@ assert(doc == null, 'doc upserted: ' + tojson(doc));
//
compoundColl.remove({}, false);
-compoundColl.insert({ a: 100, b: 100 });
-assert.writeError(compoundColl.update({ b: 100 }, { b: 100 }, false));
+compoundColl.insert({a: 100, b: 100});
+assert.writeError(compoundColl.update({b: 100}, {b: 100}, false));
doc = compoundColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
+assert(friendlyEqual(doc, {a: 100, b: 100}), 'doc changed: ' + tojson(doc));
compoundColl.remove({}, false);
-compoundColl.insert({ a: 100, b: 100 });
-assert.writeError(compoundColl.update({ b: 100 }, { b: 2 }, false));
+compoundColl.insert({a: 100, b: 100});
+assert.writeError(compoundColl.update({b: 100}, {b: 2}, false));
doc = compoundColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
+assert(friendlyEqual(doc, {a: 100, b: 100}), 'doc changed: ' + tojson(doc));
compoundColl.remove({}, false);
-compoundColl.insert({ a: 100, b: 100 });
-assert.writeError(compoundColl.update({ b: 100 }, { a: 1 }, false));
+compoundColl.insert({a: 100, b: 100});
+assert.writeError(compoundColl.update({b: 100}, {a: 1}, false));
doc = compoundColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
+assert(friendlyEqual(doc, {a: 100, b: 100}), 'doc changed: ' + tojson(doc));
// Inspecting query and update alone is not enough to tell whether a shard key will change.
compoundColl.remove({}, false);
-compoundColl.insert({ a: 100, b: 100 });
-assert.writeError(compoundColl.update({ b: 100 }, { a: 100 }, false));
+compoundColl.insert({a: 100, b: 100});
+assert.writeError(compoundColl.update({b: 100}, {a: 100}, false));
doc = compoundColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
+assert(friendlyEqual(doc, {a: 100, b: 100}), 'doc changed: ' + tojson(doc));
compoundColl.remove({}, false);
-compoundColl.insert({ a: 100, b: 100 });
-assert.writeError(compoundColl.update({ b: 100 }, { a: 1, b: 100 }, false));
+compoundColl.insert({a: 100, b: 100});
+assert.writeError(compoundColl.update({b: 100}, {a: 1, b: 100}, false));
doc = compoundColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
+assert(friendlyEqual(doc, {a: 100, b: 100}), 'doc changed: ' + tojson(doc));
// Inspecting query and update alone is not enough to tell whether a shard key will change.
compoundColl.remove({}, false);
-compoundColl.insert({ a: 100, b: 100 });
-assert.writeOK(compoundColl.update({ b: 100 }, { a: 100, b: 100 }, false));
+compoundColl.insert({a: 100, b: 100});
+assert.writeOK(compoundColl.update({b: 100}, {a: 100, b: 100}, false));
doc = compoundColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
+assert(friendlyEqual(doc, {a: 100, b: 100}), 'doc changed: ' + tojson(doc));
// Cannot modify _id!
compoundColl.remove({}, false);
-compoundColl.insert({ a: 100, b: 100 });
-assert.writeError(compoundColl.update({ b: 100 }, { a: 1, b: 1, _id: 1 }, false));
+compoundColl.insert({a: 100, b: 100});
+assert.writeError(compoundColl.update({b: 100}, {a: 1, b: 1, _id: 1}, false));
doc = compoundColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
+assert(friendlyEqual(doc, {a: 100, b: 100}), 'doc changed: ' + tojson(doc));
compoundColl.remove({}, false);
-compoundColl.insert({ a: 100, b: 100 });
-assert.writeError(compoundColl.update({ b: 100 }, { $set: { b: 1 }}, false, true));
+compoundColl.insert({a: 100, b: 100});
+assert.writeError(compoundColl.update({b: 100}, {$set: {b: 1}}, false, true));
doc = compoundColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
+assert(friendlyEqual(doc, {a: 100, b: 100}), 'doc changed: ' + tojson(doc));
compoundColl.remove({}, false);
-compoundColl.insert({ a: 100, b: 100 });
-assert.writeError(compoundColl.update({ b: 100 }, { $set: { a: 1 }}, false, true));
+compoundColl.insert({a: 100, b: 100});
+assert.writeError(compoundColl.update({b: 100}, {$set: {a: 1}}, false, true));
doc = compoundColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
+assert(friendlyEqual(doc, {a: 100, b: 100}), 'doc changed: ' + tojson(doc));
compoundColl.remove({}, false);
-compoundColl.insert({ a: 100, b: 100 });
-assert.writeOK(compoundColl.update({ b: 100 }, { $set: { a: 100 }}, false, true));
+compoundColl.insert({a: 100, b: 100});
+assert.writeOK(compoundColl.update({b: 100}, {$set: {a: 100}}, false, true));
doc = compoundColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
+assert(friendlyEqual(doc, {a: 100, b: 100}), 'doc changed: ' + tojson(doc));
compoundColl.remove({}, false);
-compoundColl.insert({ a: 100, b: 100 });
-assert.writeError(compoundColl.update({ b: 100 }, { $set: { a: 1, b: 100 }}, false, true));
+compoundColl.insert({a: 100, b: 100});
+assert.writeError(compoundColl.update({b: 100}, {$set: {a: 1, b: 100}}, false, true));
doc = compoundColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
+assert(friendlyEqual(doc, {a: 100, b: 100}), 'doc changed: ' + tojson(doc));
// Inspecting query and update alone is not enough to tell whether a shard key will change.
compoundColl.remove({}, false);
-compoundColl.insert({ a: 100, b: 100 });
-assert.writeOK(compoundColl.update({ b: 100 }, { $set: { a: 100, b: 100 }}, false, true));
+compoundColl.insert({a: 100, b: 100});
+assert.writeOK(compoundColl.update({b: 100}, {$set: {a: 100, b: 100}}, false, true));
doc = compoundColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
+assert(friendlyEqual(doc, {a: 100, b: 100}), 'doc changed: ' + tojson(doc));
// Cannot modify _id!
compoundColl.remove({}, false);
-compoundColl.insert({ a: 100, b: 100 });
-assert.writeError(compoundColl.update({ b: 100 }, { $set: { a: 100, b: 100, _id: 1 }}, false, true));
+compoundColl.insert({a: 100, b: 100});
+assert.writeError(compoundColl.update({b: 100}, {$set: {a: 100, b: 100, _id: 1}}, false, true));
doc = compoundColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
+assert(friendlyEqual(doc, {a: 100, b: 100}), 'doc changed: ' + tojson(doc));
compoundColl.remove({}, false);
-compoundColl.insert({ a: 100, b: 100 });
-assert.writeOK(compoundColl.update({ b: 100 }, { $set: { c: 1 }}, false, true));
+compoundColl.insert({a: 100, b: 100});
+assert.writeOK(compoundColl.update({b: 100}, {$set: {c: 1}}, false, true));
doc = compoundColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { a: 100, b: 100, c: 1 }), 'doc did not change: ' + tojson(doc));
+assert(friendlyEqual(doc, {a: 100, b: 100, c: 1}), 'doc did not change: ' + tojson(doc));
//
// Not prefix of skey query upsert
//
compoundColl.remove({}, false);
-assert.writeError(compoundColl.update({ b: 100 }, { b: 100 }, true));
+assert.writeError(compoundColl.update({b: 100}, {b: 100}, true));
doc = compoundColl.findOne();
assert(doc == null, 'doc upserted: ' + tojson(doc));
compoundColl.remove({}, false);
-assert.writeError(compoundColl.update({ b: 100 }, { b: 2 }, true));
+assert.writeError(compoundColl.update({b: 100}, {b: 2}, true));
doc = compoundColl.findOne();
assert(doc == null, 'doc upserted: ' + tojson(doc));
compoundColl.remove({}, false);
-assert.writeError(compoundColl.update({ b: 100 }, { a: 1 }, true));
+assert.writeError(compoundColl.update({b: 100}, {a: 1}, true));
doc = compoundColl.findOne();
assert(doc == null, 'doc upserted: ' + tojson(doc));
compoundColl.remove({}, false);
-assert.writeError(compoundColl.update({ b: 100 }, { a: 1, b: 1 }, true));
+assert.writeError(compoundColl.update({b: 100}, {a: 1, b: 1}, true));
doc = compoundColl.findOne();
assert(doc == null, 'doc upserted: ' + tojson(doc));
// Cannot modify _id!
compoundColl.remove({}, false);
-assert.writeError(compoundColl.update({ b: 100 }, { a: 1, b: 1, _id: 1 }, true));
+assert.writeError(compoundColl.update({b: 100}, {a: 1, b: 1, _id: 1}, true));
doc = compoundColl.findOne();
assert(doc == null, 'doc upserted: ' + tojson(doc));
compoundColl.remove({}, false);
-assert.writeError(compoundColl.update({ b: 100 }, { $set: { b: 1 }}, true, true));
+assert.writeError(compoundColl.update({b: 100}, {$set: {b: 1}}, true, true));
doc = compoundColl.findOne();
assert(doc == null, 'doc upserted: ' + tojson(doc));
compoundColl.remove({}, false);
-assert.writeError(compoundColl.update({ b: 100 }, { $set: { a: 1 }}, true, true));
+assert.writeError(compoundColl.update({b: 100}, {$set: {a: 1}}, true, true));
doc = compoundColl.findOne();
assert(doc == null, 'doc upserted: ' + tojson(doc));
compoundColl.remove({}, false);
-assert.writeError(compoundColl.update({ b: 100 }, { $set: { a: 1, b: 1 }}, true, true));
+assert.writeError(compoundColl.update({b: 100}, {$set: {a: 1, b: 1}}, true, true));
doc = compoundColl.findOne();
assert(doc == null, 'doc upserted: ' + tojson(doc));
// Cannot modify _id!
compoundColl.remove({}, false);
-assert.writeError(compoundColl.update({ b: 100 }, { $set: { a: 1, b: 1, _id: 1 }}, true, true));
+assert.writeError(compoundColl.update({b: 100}, {$set: {a: 1, b: 1, _id: 1}}, true, true));
doc = compoundColl.findOne();
assert(doc == null, 'doc upserted: ' + tojson(doc));
compoundColl.remove({}, false);
-assert.writeError(compoundColl.update({ b: 100 }, { $set: { c: 1 }}, true, true));
+assert.writeError(compoundColl.update({b: 100}, {$set: {c: 1}}, true, true));
doc = compoundColl.findOne();
assert(doc == null, 'doc upserted: ' + tojson(doc));
@@ -479,211 +484,212 @@ assert(doc == null, 'doc upserted: ' + tojson(doc));
//
compoundColl.remove({}, false);
-compoundColl.insert({ a: 100, b: 100 });
-assert.writeError(compoundColl.update({ a: 100, b: 100 }, { a: 100 }, false));
+compoundColl.insert({a: 100, b: 100});
+assert.writeError(compoundColl.update({a: 100, b: 100}, {a: 100}, false));
doc = compoundColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
+assert(friendlyEqual(doc, {a: 100, b: 100}), 'doc changed: ' + tojson(doc));
compoundColl.remove({}, false);
-compoundColl.insert({ a: 100, b: 100 });
-assert.writeOK(compoundColl.update({ a: 100, b: 100 }, { a: 100, b: 100, c: 100 }, false));
+compoundColl.insert({a: 100, b: 100});
+assert.writeOK(compoundColl.update({a: 100, b: 100}, {a: 100, b: 100, c: 100}, false));
doc = compoundColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { a: 100, b: 100, c: 100 }), 'doc did not change: ' + tojson(doc));
+assert(friendlyEqual(doc, {a: 100, b: 100, c: 100}), 'doc did not change: ' + tojson(doc));
// Cannot modify _id!
compoundColl.remove({}, false);
-compoundColl.insert({ a: 100, b: 100 });
-assert.writeError(compoundColl.update({ a: 100, b: 100 }, { a: 100, b: 100, _id: 100 }, false));
+compoundColl.insert({a: 100, b: 100});
+assert.writeError(compoundColl.update({a: 100, b: 100}, {a: 100, b: 100, _id: 100}, false));
doc = compoundColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
+assert(friendlyEqual(doc, {a: 100, b: 100}), 'doc changed: ' + tojson(doc));
compoundColl.remove({}, false);
-compoundColl.insert({ a: 100, b: 100 });
-assert.writeError(compoundColl.update({ a: 100, b: 100 }, { b: 100 }, false));
+compoundColl.insert({a: 100, b: 100});
+assert.writeError(compoundColl.update({a: 100, b: 100}, {b: 100}, false));
doc = compoundColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
+assert(friendlyEqual(doc, {a: 100, b: 100}), 'doc changed: ' + tojson(doc));
compoundColl.remove({}, false);
-compoundColl.insert({ a: 100, b: 100 });
-assert.writeOK(compoundColl.update({ a: 100, b: 100 }, { $set: { b: 100, c: 1 }}, false, true));
+compoundColl.insert({a: 100, b: 100});
+assert.writeOK(compoundColl.update({a: 100, b: 100}, {$set: {b: 100, c: 1}}, false, true));
doc = compoundColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { a: 100, b: 100, c: 1 }), 'doc did not change: ' + tojson(doc));
+assert(friendlyEqual(doc, {a: 100, b: 100, c: 1}), 'doc did not change: ' + tojson(doc));
compoundColl.remove({}, false);
-compoundColl.insert({ a: 100, b: 100 });
-assert.writeOK(compoundColl.update({ a: 100, b: 100 }, { $set: { a: 100, b: 100, c: 1 }}, false, true));
+compoundColl.insert({a: 100, b: 100});
+assert.writeOK(compoundColl.update({a: 100, b: 100}, {$set: {a: 100, b: 100, c: 1}}, false, true));
doc = compoundColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { a: 100, b: 100, c: 1 }), 'doc did not change: ' + tojson(doc));
+assert(friendlyEqual(doc, {a: 100, b: 100, c: 1}), 'doc did not change: ' + tojson(doc));
// Cannot modify _id!
compoundColl.remove({}, false);
-compoundColl.insert({ a: 100, b: 100 });
-assert.writeError(compoundColl.update({ a: 100, b: 100 }, { $set: { a: 100, b: 100, _id: 1 }}, false, true));
+compoundColl.insert({a: 100, b: 100});
+assert.writeError(
+ compoundColl.update({a: 100, b: 100}, {$set: {a: 100, b: 100, _id: 1}}, false, true));
doc = compoundColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
+assert(friendlyEqual(doc, {a: 100, b: 100}), 'doc changed: ' + tojson(doc));
compoundColl.remove({}, false);
-compoundColl.insert({ a: 100, b: 100 });
-assert.writeError(compoundColl.update({ a: 100, b: 100 }, { $set: { a: 100, b: 2, c: 1 }}, false, true));
+compoundColl.insert({a: 100, b: 100});
+assert.writeError(compoundColl.update({a: 100, b: 100}, {$set: {a: 100, b: 2, c: 1}}, false, true));
doc = compoundColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
+assert(friendlyEqual(doc, {a: 100, b: 100}), 'doc changed: ' + tojson(doc));
compoundColl.remove({}, false);
-compoundColl.insert({ a: 100, b: 100 });
-assert.writeOK(compoundColl.update({ a: 100, b: 100 }, { $set: { c: 1 }}, false, true));
+compoundColl.insert({a: 100, b: 100});
+assert.writeOK(compoundColl.update({a: 100, b: 100}, {$set: {c: 1}}, false, true));
doc = compoundColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { a: 100, b: 100, c: 1 }), 'doc did not change: ' + tojson(doc));
+assert(friendlyEqual(doc, {a: 100, b: 100, c: 1}), 'doc did not change: ' + tojson(doc));
//
// Full skey query upsert
//
compoundColl.remove({}, false);
-assert.writeError(compoundColl.update({ a: 100, b: 100 }, { a: 100 }, true));
+assert.writeError(compoundColl.update({a: 100, b: 100}, {a: 100}, true));
doc = compoundColl.findOne();
assert(doc == null, 'doc was upserted: ' + tojson(doc));
compoundColl.remove({}, false);
-assert.writeOK(compoundColl.update({ a: 100, b: 100 }, { a: 100, b: 100, c: 1 }, true));
+assert.writeOK(compoundColl.update({a: 100, b: 100}, {a: 100, b: 100, c: 1}, true));
doc = compoundColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { a: 100, b: 100, c: 1 }), 'wrong doc: ' + tojson(doc));
+assert(friendlyEqual(doc, {a: 100, b: 100, c: 1}), 'wrong doc: ' + tojson(doc));
// Cannot modify _id!
compoundColl.remove({}, false);
-assert.writeOK(compoundColl.update({ a: 100, b: 100 }, { a: 100, b: 100, _id: 100 }, true));
+assert.writeOK(compoundColl.update({a: 100, b: 100}, {a: 100, b: 100, _id: 100}, true));
doc = compoundColl.findOne();
-assert(friendlyEqual(doc, { _id: 100, a: 100, b: 100 }), 'wrong doc: ' + tojson(doc));
+assert(friendlyEqual(doc, {_id: 100, a: 100, b: 100}), 'wrong doc: ' + tojson(doc));
compoundColl.remove({}, false);
-assert.writeError(compoundColl.update({ a: 100, b: 100 }, { b: 100 }, true));
+assert.writeError(compoundColl.update({a: 100, b: 100}, {b: 100}, true));
doc = compoundColl.findOne();
assert(doc == null, 'doc was upserted: ' + tojson(doc));
compoundColl.remove({}, false);
-assert.writeOK(compoundColl.update({ a: 100, b: 100 }, { $set: { b: 100, c: 1 }}, true, true));
+assert.writeOK(compoundColl.update({a: 100, b: 100}, {$set: {b: 100, c: 1}}, true, true));
doc = compoundColl.findOne();
assert(doc != null, 'doc was not upserted: ' + tojson(doc));
compoundColl.remove({}, false);
-assert.writeOK(compoundColl.update({ a: 100, b: 100 }, { $set: { a: 100, b: 100, c: 1 }}, true, true));
+assert.writeOK(compoundColl.update({a: 100, b: 100}, {$set: {a: 100, b: 100, c: 1}}, true, true));
doc = compoundColl.findOne();
assert(doc != null, 'doc was not upserted: ' + tojson(doc));
// Can upsert with new _id
compoundColl.remove({}, false);
-assert.writeOK(compoundColl.update({ a: 100, b: 100 }, { $set: { a: 100, b: 100, _id: 1 }}, true, true));
+assert.writeOK(compoundColl.update({a: 100, b: 100}, {$set: {a: 100, b: 100, _id: 1}}, true, true));
doc = compoundColl.findOne();
assert(doc != null, 'doc was not upserted: ' + tojson(doc));
compoundColl.remove({}, false);
-assert.writeError(compoundColl.update({ a: 100, b: 100 }, { $set: { a: 100, b: 2, c: 1 }}, true, true));
+assert.writeError(compoundColl.update({a: 100, b: 100}, {$set: {a: 100, b: 2, c: 1}}, true, true));
doc = compoundColl.findOne();
assert(doc == null, 'doc was upserted: ' + tojson(doc));
compoundColl.remove({}, false);
-assert.writeOK(compoundColl.update({ a: 100, b: 100 }, { $set: { c: 1 }}, true, true));
+assert.writeOK(compoundColl.update({a: 100, b: 100}, {$set: {c: 1}}, true, true));
doc = compoundColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { a: 100, b: 100, c: 1 }), 'wrong doc: ' + tojson(doc));
+assert(friendlyEqual(doc, {a: 100, b: 100, c: 1}), 'wrong doc: ' + tojson(doc));
//
// _id query update
//
compoundColl.remove({}, false);
-compoundColl.insert({ _id: 1, a: 100, b: 100 });
-assert.writeError(compoundColl.update({ _id: 1 }, { a: 1 }));
+compoundColl.insert({_id: 1, a: 100, b: 100});
+assert.writeError(compoundColl.update({_id: 1}, {a: 1}));
doc = compoundColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
+assert(friendlyEqual(doc, {a: 100, b: 100}), 'doc changed: ' + tojson(doc));
// Special case for _id. This is for making save method work.
compoundColl.remove({}, false);
-compoundColl.insert({ _id: 1, a: 100, b: 100 });
-assert.writeOK(compoundColl.update({ _id: 1 }, { a: 100, b: 100 }));
+compoundColl.insert({_id: 1, a: 100, b: 100});
+assert.writeOK(compoundColl.update({_id: 1}, {a: 100, b: 100}));
doc = compoundColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
+assert(friendlyEqual(doc, {a: 100, b: 100}), 'doc changed: ' + tojson(doc));
compoundColl.remove({}, false);
-compoundColl.insert({ _id: 1, a: 100, b: 100 });
-assert.writeError(compoundColl.update({ _id: 1 }, { a: 1, b: 1 }));
+compoundColl.insert({_id: 1, a: 100, b: 100});
+assert.writeError(compoundColl.update({_id: 1}, {a: 1, b: 1}));
doc = compoundColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
+assert(friendlyEqual(doc, {a: 100, b: 100}), 'doc changed: ' + tojson(doc));
compoundColl.remove({}, false);
-compoundColl.insert({ _id: 1, a: 100, b: 100 });
-assert.writeError(compoundColl.update({ _id: 1 }, { $set: { a: 1 }}, false, true));
+compoundColl.insert({_id: 1, a: 100, b: 100});
+assert.writeError(compoundColl.update({_id: 1}, {$set: {a: 1}}, false, true));
doc = compoundColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
+assert(friendlyEqual(doc, {a: 100, b: 100}), 'doc changed: ' + tojson(doc));
compoundColl.remove({}, false);
-compoundColl.insert({ _id: 1, a: 100, b: 100 });
-assert.writeOK(compoundColl.update({ _id: 1 }, { $set: { a: 100 }}, false, true));
+compoundColl.insert({_id: 1, a: 100, b: 100});
+assert.writeOK(compoundColl.update({_id: 1}, {$set: {a: 100}}, false, true));
doc = compoundColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
+assert(friendlyEqual(doc, {a: 100, b: 100}), 'doc changed: ' + tojson(doc));
compoundColl.remove({}, false);
-compoundColl.insert({ _id: 1, a: 100, b: 100 });
-assert.writeError(compoundColl.update({ _id: 1 }, { $set: { b: 1 }}, false, true));
+compoundColl.insert({_id: 1, a: 100, b: 100});
+assert.writeError(compoundColl.update({_id: 1}, {$set: {b: 1}}, false, true));
doc = compoundColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
+assert(friendlyEqual(doc, {a: 100, b: 100}), 'doc changed: ' + tojson(doc));
compoundColl.remove({}, false);
-compoundColl.insert({ _id: 1, a: 100, b: 100 });
-assert.writeOK(compoundColl.update({ _id: 1 }, { $set: { b: 100 }}, false, true));
+compoundColl.insert({_id: 1, a: 100, b: 100});
+assert.writeOK(compoundColl.update({_id: 1}, {$set: {b: 100}}, false, true));
doc = compoundColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
+assert(friendlyEqual(doc, {a: 100, b: 100}), 'doc changed: ' + tojson(doc));
compoundColl.remove({}, false);
-compoundColl.insert({ _id: 1, a: 100, b: 100 });
-assert.writeError(compoundColl.update({ _id: 1 }, { $set: { a: 1, b: 1 }}, false, true));
+compoundColl.insert({_id: 1, a: 100, b: 100});
+assert.writeError(compoundColl.update({_id: 1}, {$set: {a: 1, b: 1}}, false, true));
doc = compoundColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
+assert(friendlyEqual(doc, {a: 100, b: 100}), 'doc changed: ' + tojson(doc));
//
// _id query upsert
//
compoundColl.remove({}, false);
-assert.writeError(compoundColl.update({ _id: 1 }, { a: 1 }, true));
+assert.writeError(compoundColl.update({_id: 1}, {a: 1}, true));
doc = compoundColl.findOne();
assert(doc == null, 'doc was upserted: ' + tojson(doc));
compoundColl.remove({}, false);
-assert.writeOK(compoundColl.update({ _id: 1 }, { a: 1, b: 1 }, true));
+assert.writeOK(compoundColl.update({_id: 1}, {a: 1, b: 1}, true));
doc = compoundColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { a: 1, b: 1 }), 'bad doc: ' + tojson(doc));
+assert(friendlyEqual(doc, {a: 1, b: 1}), 'bad doc: ' + tojson(doc));
compoundColl.remove({}, false);
-assert.writeError(compoundColl.update({ _id: 1 }, { $set: { a: 1 }}, true, true));
+assert.writeError(compoundColl.update({_id: 1}, {$set: {a: 1}}, true, true));
doc = compoundColl.findOne();
assert(doc == null, 'doc was upserted: ' + tojson(doc));
compoundColl.remove({}, false);
-assert.writeError(compoundColl.update({ _id: 1 }, { $set: { b: 1 }}, true, true));
+assert.writeError(compoundColl.update({_id: 1}, {$set: {b: 1}}, true, true));
doc = compoundColl.findOne();
assert(doc == null, 'doc was upserted: ' + tojson(doc));
compoundColl.remove({}, false);
-assert.writeError(compoundColl.update({ _id: 1 }, { $set: { a: 1, b: 1 }}, true, true));
+assert.writeError(compoundColl.update({_id: 1}, {$set: {a: 1, b: 1}}, true, true));
assert.eq(0, compoundColl.count(), 'doc should not be inserted');
//
@@ -691,112 +697,112 @@ assert.eq(0, compoundColl.count(), 'doc should not be inserted');
//
dotColl.remove({}, false);
-dotColl.insert({ x: { a: 100 }});
-assert.writeOK(dotColl.update({ 'x.a': 100 }, { x: { a: 100, b: 2 }}));
+dotColl.insert({x: {a: 100}});
+assert.writeOK(dotColl.update({'x.a': 100}, {x: {a: 100, b: 2}}));
doc = dotColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { x: { a: 100, b: 2 }}), 'doc did not change: ' + tojson(doc));
+assert(friendlyEqual(doc, {x: {a: 100, b: 2}}), 'doc did not change: ' + tojson(doc));
// Dotted field names in the resulting objects should not be allowed.
// This check currently resides in the client drivers.
dotColl.remove({}, false);
-dotColl.insert({ x: { a: 100 }});
+dotColl.insert({x: {a: 100}});
assert.throws(function() {
- dotColl.update({ 'x.a': 100 }, { x: { 'a.z': 100 }});
+ dotColl.update({'x.a': 100}, {x: {'a.z': 100}});
});
// Dotted field names in the resulting objects should not be allowed.
// This check currently resides in the client drivers.
dotColl.remove({}, false);
-dotColl.insert({ x: { a: 100 }});
+dotColl.insert({x: {a: 100}});
assert.throws(function() {
- dotColl.update({ 'x.a': 100 }, { 'x.a': 100 });
+ dotColl.update({'x.a': 100}, {'x.a': 100});
});
// Dotted field names in the resulting objects should not be allowed.
// This check currently resides in the client drivers.
dotColl.remove({}, false);
-dotColl.insert({ x: { a: 100 }});
+dotColl.insert({x: {a: 100}});
assert.throws(function() {
- dotColl.update({ 'x.a': 100 }, { 'x.a.z': 100 });
+ dotColl.update({'x.a': 100}, {'x.a.z': 100});
});
dotColl.remove({}, false);
-dotColl.insert({ x: { a: 100 }});
-assert.writeError(dotColl.update({ 'x.a': 100 }, { x: 100 }));
+dotColl.insert({x: {a: 100}});
+assert.writeError(dotColl.update({'x.a': 100}, {x: 100}));
doc = dotColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { x: { a: 100 }}), 'doc changed: ' + tojson(doc));
+assert(friendlyEqual(doc, {x: {a: 100}}), 'doc changed: ' + tojson(doc));
dotColl.remove({}, false);
-dotColl.insert({ x: { a: 100 }});
-assert.writeError(dotColl.update({ 'x.a': 100 }, { x: { b: 100 }}));
+dotColl.insert({x: {a: 100}});
+assert.writeError(dotColl.update({'x.a': 100}, {x: {b: 100}}));
doc = dotColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { x: { a: 100 }}), 'doc changed: ' + tojson(doc));
+assert(friendlyEqual(doc, {x: {a: 100}}), 'doc changed: ' + tojson(doc));
dotColl.remove({}, false);
-dotColl.insert({ x: { a: 100 }});
-assert.writeOK(dotColl.update({ 'x.a': 100 }, { $set: { x: { a: 100, b: 2 }}}, false, true));
+dotColl.insert({x: {a: 100}});
+assert.writeOK(dotColl.update({'x.a': 100}, {$set: {x: {a: 100, b: 2}}}, false, true));
doc = dotColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { x: { a: 100, b: 2 }}), 'doc did not change: ' + tojson(doc));
+assert(friendlyEqual(doc, {x: {a: 100, b: 2}}), 'doc did not change: ' + tojson(doc));
dotColl.remove({}, false);
-dotColl.insert({ x: { a: 100 }});
-assert.writeError(dotColl.update({ 'x.a': 100 }, { $set: { x: { a: 2 }}}, false, true));
+dotColl.insert({x: {a: 100}});
+assert.writeError(dotColl.update({'x.a': 100}, {$set: {x: {a: 2}}}, false, true));
doc = dotColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { x: { a: 100 }}), 'doc changed: ' + tojson(doc));
+assert(friendlyEqual(doc, {x: {a: 100}}), 'doc changed: ' + tojson(doc));
dotColl.remove({}, false);
-dotColl.insert({ x: { a: 100 }});
-assert.writeError(dotColl.update({ 'x.a': 100 }, { $set: { x: { b: 100 }}}, false, true));
+dotColl.insert({x: {a: 100}});
+assert.writeError(dotColl.update({'x.a': 100}, {$set: {x: {b: 100}}}, false, true));
doc = dotColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { x: { a: 100 }}), 'doc changed: ' + tojson(doc));
+assert(friendlyEqual(doc, {x: {a: 100}}), 'doc changed: ' + tojson(doc));
dotColl.remove({}, false);
-dotColl.insert({ x: { a: 100 }});
-assert.writeOK(dotColl.update({ 'x.a': 100 }, { $set: { 'x.a': 100, b: 2 }}, false, true));
+dotColl.insert({x: {a: 100}});
+assert.writeOK(dotColl.update({'x.a': 100}, {$set: {'x.a': 100, b: 2}}, false, true));
doc = dotColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { x: { a: 100 }, b: 2 }), 'doc did not change: ' + tojson(doc));
+assert(friendlyEqual(doc, {x: {a: 100}, b: 2}), 'doc did not change: ' + tojson(doc));
dotColl.remove({}, false);
-dotColl.insert({ x: { a: 100 }});
-assert.writeError(dotColl.update({ 'x.a': 100 }, { $set: { x: { 'a.z': 100 }}}, false, true));
+dotColl.insert({x: {a: 100}});
+assert.writeError(dotColl.update({'x.a': 100}, {$set: {x: {'a.z': 100}}}, false, true));
doc = dotColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { x: { a: 100 }}), 'doc changed: ' + tojson(doc));
+assert(friendlyEqual(doc, {x: {a: 100}}), 'doc changed: ' + tojson(doc));
dotColl.remove({}, false);
-dotColl.insert({ x: { a: 100 }});
-assert.writeError(dotColl.update({ 'x.a': 100 }, { $set: { 'x.a.z': 100 }}, false, true));
+dotColl.insert({x: {a: 100}});
+assert.writeError(dotColl.update({'x.a': 100}, {$set: {'x.a.z': 100}}, false, true));
doc = dotColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { x: { a: 100 }}), 'doc changed: ' + tojson(doc));
+assert(friendlyEqual(doc, {x: {a: 100}}), 'doc changed: ' + tojson(doc));
dotColl.remove({}, false);
-dotColl.insert({ x: { a: 100 }});
-assert.writeError(dotColl.update({ 'x.a': 100 }, { $set: { x: 100 }}, false, true));
+dotColl.insert({x: {a: 100}});
+assert.writeError(dotColl.update({'x.a': 100}, {$set: {x: 100}}, false, true));
doc = dotColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { x: { a: 100 }}), 'doc changed: ' + tojson(doc));
+assert(friendlyEqual(doc, {x: {a: 100}}), 'doc changed: ' + tojson(doc));
dotColl.remove({}, false);
-dotColl.insert({ x: { a: 100 }});
-assert.writeOK(dotColl.update({ 'x.a': 100 }, { $set: { 'x.b': 200 }}, false, true));
+dotColl.insert({x: {a: 100}});
+assert.writeOK(dotColl.update({'x.a': 100}, {$set: {'x.b': 200}}, false, true));
doc = dotColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { x: { a: 100, b: 200 }}), 'doc did not change: ' + tojson(doc));
+assert(friendlyEqual(doc, {x: {a: 100, b: 200}}), 'doc did not change: ' + tojson(doc));
//
// Dotted query upsert
//
dotColl.remove({}, false);
-assert.writeOK(dotColl.update({ 'x.a': 100 }, { x: { a: 100, b: 2 }}, true));
+assert.writeOK(dotColl.update({'x.a': 100}, {x: {a: 100, b: 2}}, true));
doc = dotColl.findOne();
assert(doc != null, 'doc was not upserted: ' + tojson(doc));
@@ -804,80 +810,79 @@ assert(doc != null, 'doc was not upserted: ' + tojson(doc));
// This check currently resides in the client drivers.
dotColl.remove({}, false);
assert.throws(function() {
- dotColl.update({ 'x.a': 100 }, { x: { 'a.z': 100 }}, true);
+ dotColl.update({'x.a': 100}, {x: {'a.z': 100}}, true);
});
// Dotted field names in the resulting objects should not be allowed.
// This check currently resides in the client drivers.
dotColl.remove({}, false);
assert.throws(function() {
- dotColl.update({ 'x.a': 100 }, { 'x.a': 100 }, true);
+ dotColl.update({'x.a': 100}, {'x.a': 100}, true);
});
// Dotted field names in the resulting objects should not be allowed.
// This check currently resides in the client drivers.
dotColl.remove({}, false);
assert.throws(function() {
- dotColl.update({ 'x.a': 100 }, { 'x.a.z': 100 }, true);
+ dotColl.update({'x.a': 100}, {'x.a.z': 100}, true);
});
dotColl.remove({}, false);
-assert.writeError(dotColl.update({ 'x.a': 100 }, { x: 100 }, true));
+assert.writeError(dotColl.update({'x.a': 100}, {x: 100}, true));
doc = dotColl.findOne();
assert(doc == null, 'doc was upserted: ' + tojson(doc));
dotColl.remove({}, false);
-assert.writeError(dotColl.update({ 'x.a': 100 }, { x: { b: 100 }}, true));
+assert.writeError(dotColl.update({'x.a': 100}, {x: {b: 100}}, true));
doc = dotColl.findOne();
assert(doc == null, 'doc was upserted: ' + tojson(doc));
dotColl.remove({}, false);
-assert.writeOK(dotColl.update({ 'x.a': 100 }, { $set: { x: { a: 100, b: 2 }}}, true));
+assert.writeOK(dotColl.update({'x.a': 100}, {$set: {x: {a: 100, b: 2}}}, true));
doc = dotColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { x: { a: 100, b: 2 }}), 'bad doc: ' + tojson(doc));
+assert(friendlyEqual(doc, {x: {a: 100, b: 2}}), 'bad doc: ' + tojson(doc));
dotColl.remove({}, false);
-assert.writeError(dotColl.update({ 'x.a': 100 }, { $set: { x: { a: 2 }}}, true));
+assert.writeError(dotColl.update({'x.a': 100}, {$set: {x: {a: 2}}}, true));
doc = dotColl.findOne();
assert(doc == null, 'doc was upserted: ' + tojson(doc));
dotColl.remove({}, false);
-assert.writeError(dotColl.update({ 'x.a': 100 }, { $set: { x: { b: 100 }}}, true));
+assert.writeError(dotColl.update({'x.a': 100}, {$set: {x: {b: 100}}}, true));
doc = dotColl.findOne();
assert(doc == null, 'doc was upserted: ' + tojson(doc));
dotColl.remove({}, false);
-assert.writeOK(dotColl.update({ 'x.a': 100 }, { $set: { 'x.a': 100, b: 3 }}, true));
+assert.writeOK(dotColl.update({'x.a': 100}, {$set: {'x.a': 100, b: 3}}, true));
doc = dotColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { x: { a: 100 }, b: 3 }), 'bad doc: ' + tojson(doc));
+assert(friendlyEqual(doc, {x: {a: 100}, b: 3}), 'bad doc: ' + tojson(doc));
dotColl.remove({}, false);
-assert.writeError(dotColl.update({ 'x.a': 100 }, { $set: { 'x.a': 2 }}, true));
+assert.writeError(dotColl.update({'x.a': 100}, {$set: {'x.a': 2}}, true));
doc = dotColl.findOne();
assert(doc == null, 'doc was upserted: ' + tojson(doc));
dotColl.remove({}, false);
-assert.writeError(dotColl.update({ 'x.a': 100 }, { $set: { x: { 'a.z': 100 }}}, true));
+assert.writeError(dotColl.update({'x.a': 100}, {$set: {x: {'a.z': 100}}}, true));
doc = dotColl.findOne();
assert(doc == null, 'doc was upserted: ' + tojson(doc));
dotColl.remove({}, false);
-assert.writeError(dotColl.update({ 'x.a': 100 }, { $set: { 'x.a.z': 100 }}, true));
+assert.writeError(dotColl.update({'x.a': 100}, {$set: {'x.a.z': 100}}, true));
doc = dotColl.findOne();
assert(doc == null, 'doc was upserted: ' + tojson(doc));
dotColl.remove({}, false);
-assert.writeError(dotColl.update({ 'x.a': 100 }, { $set: { x: 100 }}, true));
+assert.writeError(dotColl.update({'x.a': 100}, {$set: {x: 100}}, true));
doc = dotColl.findOne();
assert(doc == null, 'doc was upserted: ' + tojson(doc));
dotColl.remove({}, false);
-assert.writeOK(dotColl.update({ 'x.a': 100 }, { $set: { 'x.b': 2 }}, true));
+assert.writeOK(dotColl.update({'x.a': 100}, {$set: {'x.b': 2}}, true));
doc = dotColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { x: { a: 100, b: 2 }}), 'bad doc: ' + tojson(doc));
+assert(friendlyEqual(doc, {x: {a: 100, b: 2}}), 'bad doc: ' + tojson(doc));
st.stop();
-
diff --git a/jstests/sharding/shard_keycount.js b/jstests/sharding/shard_keycount.js
index 0524a210918..9a63a2cfdce 100644
--- a/jstests/sharding/shard_keycount.js
+++ b/jstests/sharding/shard_keycount.js
@@ -1,50 +1,46 @@
// Tests splitting a chunk twice
(function() {
-var s = new ShardingTest({ name: "shard_keycount",
- shards: 2,
- mongos: 1,
- other:{ chunkSize: 1 } });
+ var s = new ShardingTest({name: "shard_keycount", shards: 2, mongos: 1, other: {chunkSize: 1}});
-dbName = "test";
-collName = "foo";
-ns = dbName + "." + collName;
-
-db = s.getDB( dbName );
+ dbName = "test";
+ collName = "foo";
+ ns = dbName + "." + collName;
-for(var i = 0; i < 10; i++){
- db.foo.insert({ _id : i });
-}
+ db = s.getDB(dbName);
-// Enable sharding on DB
-s.adminCommand( { enablesharding : dbName } );
-s.ensurePrimaryShard(dbName, 'shard0001');
+ for (var i = 0; i < 10; i++) {
+ db.foo.insert({_id: i});
+ }
-// Enable sharding on collection
-s.adminCommand( { shardcollection : ns, key : { _id : 1 } } );
+ // Enable sharding on DB
+ s.adminCommand({enablesharding: dbName});
+ s.ensurePrimaryShard(dbName, 'shard0001');
+ // Enable sharding on collection
+ s.adminCommand({shardcollection: ns, key: {_id: 1}});
-// Split into two chunks
-s.adminCommand({ split : ns, find : { _id : 3 } });
+ // Split into two chunks
+ s.adminCommand({split: ns, find: {_id: 3}});
-coll = db.getCollection( collName );
+ coll = db.getCollection(collName);
-// Split chunk again
-s.adminCommand({ split : ns, find : { _id : 3 } });
+ // Split chunk again
+ s.adminCommand({split: ns, find: {_id: 3}});
-coll.update({ _id : 3 }, { _id : 3 });
+ coll.update({_id: 3}, {_id: 3});
-// Split chunk again
-s.adminCommand({ split : ns, find : { _id : 3 } });
+ // Split chunk again
+ s.adminCommand({split: ns, find: {_id: 3}});
-coll.update({ _id : 3 }, { _id : 3 });
+ coll.update({_id: 3}, {_id: 3});
-// Split chunk again
-// FAILS since the key count is based on the full index, not the chunk itself
-// i.e. Split point calc'd is 5 key offset (10 documents), but only four docs
-// in chunk with bounds _id : 0 => 5
-s.adminCommand({ split : ns, find : { _id : 3 } });
+ // Split chunk again
+ // FAILS since the key count is based on the full index, not the chunk itself
+ // i.e. Split point calc'd is 5 key offset (10 documents), but only four docs
+ // in chunk with bounds _id : 0 => 5
+ s.adminCommand({split: ns, find: {_id: 3}});
-s.stop();
+ s.stop();
});
diff --git a/jstests/sharding/shard_kill_and_pooling.js b/jstests/sharding/shard_kill_and_pooling.js
index 13ae6e41e2c..6b8397f9e37 100644
--- a/jstests/sharding/shard_kill_and_pooling.js
+++ b/jstests/sharding/shard_kill_and_pooling.js
@@ -8,93 +8,89 @@
// Run through the same test twice, once with a hard -9 kill, once with a regular shutdown
-for ( var test = 0; test < 2; test++ ) {
-
-var killWith = (test == 0 ? 15 : 9);
-
-var st = new ShardingTest({shards : 2, mongos : 1});
-
-// Stop balancer to eliminate weird conn stuff
-st.stopBalancer();
-
-var mongos = st.s0;
-var coll = mongos.getCollection("foo.bar");
-var db = coll.getDB();
-
-//Test is not valid for Win32
-var is32Bits = ( db.serverBuildInfo().bits == 32 );
-if ( is32Bits && _isWindows() ) {
-
- // Win32 doesn't provide the polling interface we need to implement the check tested here
- jsTest.log( "Test is not valid on Win32 platform." );
-
-}
-else {
-
- // Non-Win32 platform
-
- assert.writeOK(coll.insert({ hello: "world" }));
-
- jsTest.log("Creating new connections...");
-
- // Create a bunch of connections to the primary node through mongos.
- // jstest ->(x10)-> mongos ->(x10)-> primary
- var conns = [];
- for ( var i = 0; i < 50; i++) {
- conns.push(new Mongo(mongos.host));
- assert.neq( null, conns[i].getCollection(coll + "").findOne() );
- }
-
- jsTest.log("Returning the connections back to the pool.");
-
- for ( var i = 0; i < conns.length; i++ ) {
- conns[i] = null;
- }
- // Make sure we return connections back to the pool
- gc();
-
- // Don't make test fragile by linking to format of shardConnPoolStats, but this is useful if
- // something goes wrong.
- var connPoolStats = mongos.getDB("admin").runCommand({ shardConnPoolStats : 1 });
- printjson( connPoolStats );
-
- jsTest.log("Shutdown shard " + (killWith == 9 ? "uncleanly" : "" ) + "...");
-
- // Flush writes to disk, since sometimes we're killing uncleanly
- assert( mongos.getDB( "admin" ).runCommand({ fsync : 1 }).ok );
-
- MongoRunner.stopMongod( st.shard0, killWith );
-
- jsTest.log("Restart shard...");
-
- st.shard0 = MongoRunner.runMongod({ restart : st.shard0, forceLock : true });
-
- jsTest.log("Waiting for socket timeout time...");
-
- // Need to wait longer than the socket polling time.
- sleep(2 * 5000);
-
- jsTest.log("Run queries using new connections.");
-
- var numErrors = 0;
- for ( var i = 0; i < conns.length; i++) {
- var newConn = new Mongo(mongos.host);
- try {
- assert.neq( null, newConn.getCollection("foo.bar").findOne() );
- } catch (e) {
- printjson(e);
- numErrors++;
+for (var test = 0; test < 2; test++) {
+ var killWith = (test == 0 ? 15 : 9);
+
+ var st = new ShardingTest({shards: 2, mongos: 1});
+
+ // Stop balancer to eliminate weird conn stuff
+ st.stopBalancer();
+
+ var mongos = st.s0;
+ var coll = mongos.getCollection("foo.bar");
+ var db = coll.getDB();
+
+ // Test is not valid for Win32
+ var is32Bits = (db.serverBuildInfo().bits == 32);
+ if (is32Bits && _isWindows()) {
+ // Win32 doesn't provide the polling interface we need to implement the check tested here
+ jsTest.log("Test is not valid on Win32 platform.");
+
+ } else {
+ // Non-Win32 platform
+
+ assert.writeOK(coll.insert({hello: "world"}));
+
+ jsTest.log("Creating new connections...");
+
+ // Create a bunch of connections to the primary node through mongos.
+ // jstest ->(x10)-> mongos ->(x10)-> primary
+ var conns = [];
+ for (var i = 0; i < 50; i++) {
+ conns.push(new Mongo(mongos.host));
+ assert.neq(null, conns[i].getCollection(coll + "").findOne());
+ }
+
+ jsTest.log("Returning the connections back to the pool.");
+
+ for (var i = 0; i < conns.length; i++) {
+ conns[i] = null;
}
- }
-
- assert.eq(0, numErrors);
+ // Make sure we return connections back to the pool
+ gc();
+
+ // Don't make test fragile by linking to format of shardConnPoolStats, but this is useful if
+ // something goes wrong.
+ var connPoolStats = mongos.getDB("admin").runCommand({shardConnPoolStats: 1});
+ printjson(connPoolStats);
+
+ jsTest.log("Shutdown shard " + (killWith == 9 ? "uncleanly" : "") + "...");
+
+ // Flush writes to disk, since sometimes we're killing uncleanly
+ assert(mongos.getDB("admin").runCommand({fsync: 1}).ok);
+
+ MongoRunner.stopMongod(st.shard0, killWith);
+
+ jsTest.log("Restart shard...");
+
+ st.shard0 = MongoRunner.runMongod({restart: st.shard0, forceLock: true});
+
+ jsTest.log("Waiting for socket timeout time...");
+
+ // Need to wait longer than the socket polling time.
+ sleep(2 * 5000);
+
+ jsTest.log("Run queries using new connections.");
+
+ var numErrors = 0;
+ for (var i = 0; i < conns.length; i++) {
+ var newConn = new Mongo(mongos.host);
+ try {
+ assert.neq(null, newConn.getCollection("foo.bar").findOne());
+ } catch (e) {
+ printjson(e);
+ numErrors++;
+ }
+ }
+
+ assert.eq(0, numErrors);
-} // End Win32 check
+ } // End Win32 check
-st.stop();
+ st.stop();
-jsTest.log("DONE test " + test);
+ jsTest.log("DONE test " + test);
-} // End test loop
+} // End test loop
jsTest.log("DONE!");
diff --git a/jstests/sharding/shard_targeting.js b/jstests/sharding/shard_targeting.js
index ce8537d6fb4..183adac8f1e 100644
--- a/jstests/sharding/shard_targeting.js
+++ b/jstests/sharding/shard_targeting.js
@@ -5,63 +5,61 @@
// shards if the shard key happens to be one of the fields in the command object.
(function() {
-var s = new ShardingTest({ name: "shard_targeting", shards: 2 });
-s.adminCommand({ enablesharding : "test" });
-s.ensurePrimaryShard('test', 'shard0001');
+ var s = new ShardingTest({name: "shard_targeting", shards: 2});
+ s.adminCommand({enablesharding: "test"});
+ s.ensurePrimaryShard('test', 'shard0001');
-var db = s.getDB("test");
-var res;
+ var db = s.getDB("test");
+ var res;
-//
-// Target count command
-//
+ //
+ // Target count command
+ //
-// Shard key is the same with command name.
-s.shardColl("foo", {count: 1}, { count: "" });
+ // Shard key is the same with command name.
+ s.shardColl("foo", {count: 1}, {count: ""});
-for (var i=0; i<50; i++) {
- db.foo.insert({count: i}); // chunk [MinKey, ""), including numbers
- db.foo.insert({count: "" + i}); // chunk ["", MaxKey]
-}
+ for (var i = 0; i < 50; i++) {
+ db.foo.insert({count: i}); // chunk [MinKey, ""), including numbers
+ db.foo.insert({count: "" + i}); // chunk ["", MaxKey]
+ }
-var theOtherShard = s.getOther( s.getPrimaryShard( "test" ) ).name;
-s.printShardingStatus();
+ var theOtherShard = s.getOther(s.getPrimaryShard("test")).name;
+ s.printShardingStatus();
-// Count documents on both shards
+ // Count documents on both shards
-// "count" commnad with "query" option { }.
-assert.eq(db.foo.count(), 100);
-// Optional "query" option is not given.
-res = db.foo.runCommand("count");
-assert.eq(res.n, 100);
+ // "count" commnad with "query" option { }.
+ assert.eq(db.foo.count(), 100);
+ // Optional "query" option is not given.
+ res = db.foo.runCommand("count");
+ assert.eq(res.n, 100);
-//
-// Target mapreduce command
-//
-db.foo.drop();
-// Shard key is the same with command name.
-s.shardColl("foo", {mapReduce: 1}, { mapReduce: "" });
+ //
+ // Target mapreduce command
+ //
+ db.foo.drop();
+ // Shard key is the same with command name.
+ s.shardColl("foo", {mapReduce: 1}, {mapReduce: ""});
-for (var i=0; i<50; i++) {
- db.foo.insert({mapReduce: i}); // to the chunk including number
- db.foo.insert({mapReduce: "" + i}); // to the chunk including string
-}
+ for (var i = 0; i < 50; i++) {
+ db.foo.insert({mapReduce: i}); // to the chunk including number
+ db.foo.insert({mapReduce: "" + i}); // to the chunk including string
+ }
-s.printShardingStatus();
+ s.printShardingStatus();
-function m() { emit("total", 1); }
-function r(k, v) { return Array.sum(v); }
-res = db.foo.runCommand(
-{
- mapReduce: "foo",
- map: m,
- reduce: r,
- out: { inline: 1 }
-});
+ function m() {
+ emit("total", 1);
+ }
+ function r(k, v) {
+ return Array.sum(v);
+ }
+ res = db.foo.runCommand({mapReduce: "foo", map: m, reduce: r, out: {inline: 1}});
-// Count documents on both shards
-assert.eq(res.results[0].value, 100);
+ // Count documents on both shards
+ assert.eq(res.results[0].value, 100);
-s.stop();
+ s.stop();
})();
diff --git a/jstests/sharding/shard_with_special_db_names.js b/jstests/sharding/shard_with_special_db_names.js
index cb1ae66a04c..e218a08609b 100644
--- a/jstests/sharding/shard_with_special_db_names.js
+++ b/jstests/sharding/shard_with_special_db_names.js
@@ -1,30 +1,28 @@
-(function(){
+(function() {
-var s = new ShardingTest({ name: "shard_with_special_db_names",
- shards: 2,
- mongos: 2 });
-var specialDB = "[a-z]+";
-var specialNS = specialDB + ".special";
+ var s = new ShardingTest({name: "shard_with_special_db_names", shards: 2, mongos: 2});
+ var specialDB = "[a-z]+";
+ var specialNS = specialDB + ".special";
-s.adminCommand( { enablesharding : "test" } );
-s.ensurePrimaryShard('test', 'shard0001');
-s.adminCommand( { shardcollection : "test.data" , key : { num : 1 } } );
+ s.adminCommand({enablesharding: "test"});
+ s.ensurePrimaryShard('test', 'shard0001');
+ s.adminCommand({shardcollection: "test.data", key: {num: 1}});
-// Test that the database will not complain "cannot have 2 database names that
-// differs on case"
-s.adminCommand( { enablesharding : specialDB } );
-s.ensurePrimaryShard(specialDB, 'shard0000');
-s.adminCommand( { shardcollection : specialNS, key : { num : 1 } } );
+ // Test that the database will not complain "cannot have 2 database names that
+ // differs on case"
+ s.adminCommand({enablesharding: specialDB});
+ s.ensurePrimaryShard(specialDB, 'shard0000');
+ s.adminCommand({shardcollection: specialNS, key: {num: 1}});
-var exists = s.getDB("config").collections.find( { _id: specialNS } ).count();
-assert.eq( exists, 1 );
+ var exists = s.getDB("config").collections.find({_id: specialNS}).count();
+ assert.eq(exists, 1);
-// Test that drop database properly cleans up config
-s.getDB(specialDB).dropDatabase();
+ // Test that drop database properly cleans up config
+ s.getDB(specialDB).dropDatabase();
-var cursor = s.getDB("config").collections.find( { _id: specialNS } );
+ var cursor = s.getDB("config").collections.find({_id: specialNS});
-assert.eq( cursor.count(), 1 );
-assert( cursor.next()["dropped"] );
+ assert.eq(cursor.count(), 1);
+ assert(cursor.next()["dropped"]);
})();
diff --git a/jstests/sharding/sharded_limit_batchsize.js b/jstests/sharding/sharded_limit_batchsize.js
index 57303ed530a..cd636b56cdc 100644
--- a/jstests/sharding/sharded_limit_batchsize.js
+++ b/jstests/sharding/sharded_limit_batchsize.js
@@ -2,118 +2,111 @@
// of limit and batchSize with sort return the correct results, and do not issue
// unnecessary getmores (see SERVER-14299).
(function() {
-'use strict';
-
-/**
- * Test the correctness of queries with sort and batchSize on a sharded cluster,
- * running the queries against collection 'coll'.
- */
-function testBatchSize(coll) {
- //Roll the cursor over the second batch and make sure it's correctly sized
- assert.eq(20, coll.find().sort({x: 1}).batchSize(3).itcount());
- assert.eq(15, coll.find().sort({x: 1}).batchSize(3).skip(5).itcount());
-}
-
-/**
- * Test the correctness of queries with sort and limit on a sharded cluster,
- * running the queries against collection 'coll'.
- */
-function testLimit(coll) {
- var cursor = coll.find().sort({x: 1}).limit(3);
- assert.eq(-10, cursor.next()["_id"]);
- assert.eq(-9, cursor.next()["_id"]);
- assert.eq(-8, cursor.next()["_id"]);
- assert(!cursor.hasNext());
-
- assert.eq(13, coll.find().sort({x: 1}).limit(13).itcount());
-
- cursor = coll.find().sort({x: 1}).skip(5).limit(2);
- assert.eq(-5, cursor.next()["_id"]);
- assert.eq(-4, cursor.next()["_id"]);
- assert(!cursor.hasNext());
-
- cursor = coll.find().sort({x: 1}).skip(9).limit(2);
- assert.eq(-1, cursor.next()["_id"]);
- assert.eq(1, cursor.next()["_id"]);
- assert(!cursor.hasNext());
-
- cursor = coll.find().sort({x: 1}).skip(11).limit(2);
- assert.eq(2, cursor.next()["_id"]);
- assert.eq(3, cursor.next()["_id"]);
- assert(!cursor.hasNext());
-}
-
-//
-// Create a two-shard cluster. Have an unsharded collection and a sharded collection.
-//
-
-var st = new ShardingTest({
- shards: 2,
- other: {shardOptions: {setParameter: "enableTestCommands=1"}}
-});
-
-var db = st.s.getDB("test");
-var shardedCol = db.getCollection("sharded_limit_batchsize");
-var unshardedCol = db.getCollection("unsharded_limit_batchsize");
-shardedCol.drop();
-unshardedCol.drop();
-
-// Enable sharding and pre-split the sharded collection.
-assert.commandWorked(db.adminCommand({enableSharding: db.getName()}));
-st.ensurePrimaryShard(db.getName(), "shard0000");
-db.adminCommand({shardCollection: shardedCol.getFullName(), key: {_id: 1}});
-assert.commandWorked(db.adminCommand({split: shardedCol.getFullName(), middle: {_id: 0}}));
-assert.commandWorked(db.adminCommand({moveChunk: shardedCol.getFullName(),
- find: {_id: 0},
- to: "shard0001"}));
-
-// Write 10 documents to shard 0, and 10 documents to shard 1 inside the sharded collection.
-// Write 20 documents which all go to the primary shard in the unsharded collection.
-for (var i=1; i<=10; ++i) {
- // These go to shard 1.
- assert.writeOK(shardedCol.insert({_id: i, x: i}));
-
- // These go to shard 0.
- assert.writeOK(shardedCol.insert({_id: -i, x: -i}));
-
- // These go to shard 0 inside the non-sharded collection.
- assert.writeOK(unshardedCol.insert({_id: i, x: i}));
- assert.writeOK(unshardedCol.insert({_id: -i, x: -i}));
-}
-
-//
-// Run tests for batch size. These should issue getmores.
-//
-
-jsTest.log("Running batchSize tests against sharded collection.");
-st.shard0.adminCommand({setParameter: 1, logLevel : 1});
-testBatchSize(shardedCol);
-st.shard0.adminCommand({setParameter: 1, logLevel : 0});
-
-jsTest.log("Running batchSize tests against non-sharded collection.");
-testBatchSize(unshardedCol);
-
-//
-// Run tests for limit. These should *not* issue getmores. We confirm this
-// by enabling the getmore failpoint on the shards.
-//
-
-assert.commandWorked(st.shard0.getDB("test").adminCommand({
- configureFailPoint: "failReceivedGetmore",
- mode: "alwaysOn"
-}));
-
-assert.commandWorked(st.shard1.getDB("test").adminCommand({
- configureFailPoint: "failReceivedGetmore",
- mode: "alwaysOn"
-}));
-
-jsTest.log("Running limit tests against sharded collection.");
-testLimit(shardedCol, st.shard0);
-
-jsTest.log("Running limit tests against non-sharded collection.");
-testLimit(unshardedCol, st.shard0);
-
-st.stop();
+ 'use strict';
+
+ /**
+ * Test the correctness of queries with sort and batchSize on a sharded cluster,
+ * running the queries against collection 'coll'.
+ */
+ function testBatchSize(coll) {
+ // Roll the cursor over the second batch and make sure it's correctly sized
+ assert.eq(20, coll.find().sort({x: 1}).batchSize(3).itcount());
+ assert.eq(15, coll.find().sort({x: 1}).batchSize(3).skip(5).itcount());
+ }
+
+ /**
+ * Test the correctness of queries with sort and limit on a sharded cluster,
+ * running the queries against collection 'coll'.
+ */
+ function testLimit(coll) {
+ var cursor = coll.find().sort({x: 1}).limit(3);
+ assert.eq(-10, cursor.next()["_id"]);
+ assert.eq(-9, cursor.next()["_id"]);
+ assert.eq(-8, cursor.next()["_id"]);
+ assert(!cursor.hasNext());
+
+ assert.eq(13, coll.find().sort({x: 1}).limit(13).itcount());
+
+ cursor = coll.find().sort({x: 1}).skip(5).limit(2);
+ assert.eq(-5, cursor.next()["_id"]);
+ assert.eq(-4, cursor.next()["_id"]);
+ assert(!cursor.hasNext());
+
+ cursor = coll.find().sort({x: 1}).skip(9).limit(2);
+ assert.eq(-1, cursor.next()["_id"]);
+ assert.eq(1, cursor.next()["_id"]);
+ assert(!cursor.hasNext());
+
+ cursor = coll.find().sort({x: 1}).skip(11).limit(2);
+ assert.eq(2, cursor.next()["_id"]);
+ assert.eq(3, cursor.next()["_id"]);
+ assert(!cursor.hasNext());
+ }
+
+ //
+ // Create a two-shard cluster. Have an unsharded collection and a sharded collection.
+ //
+
+ var st = new ShardingTest(
+ {shards: 2, other: {shardOptions: {setParameter: "enableTestCommands=1"}}});
+
+ var db = st.s.getDB("test");
+ var shardedCol = db.getCollection("sharded_limit_batchsize");
+ var unshardedCol = db.getCollection("unsharded_limit_batchsize");
+ shardedCol.drop();
+ unshardedCol.drop();
+
+ // Enable sharding and pre-split the sharded collection.
+ assert.commandWorked(db.adminCommand({enableSharding: db.getName()}));
+ st.ensurePrimaryShard(db.getName(), "shard0000");
+ db.adminCommand({shardCollection: shardedCol.getFullName(), key: {_id: 1}});
+ assert.commandWorked(db.adminCommand({split: shardedCol.getFullName(), middle: {_id: 0}}));
+ assert.commandWorked(
+ db.adminCommand({moveChunk: shardedCol.getFullName(), find: {_id: 0}, to: "shard0001"}));
+
+ // Write 10 documents to shard 0, and 10 documents to shard 1 inside the sharded collection.
+ // Write 20 documents which all go to the primary shard in the unsharded collection.
+ for (var i = 1; i <= 10; ++i) {
+ // These go to shard 1.
+ assert.writeOK(shardedCol.insert({_id: i, x: i}));
+
+ // These go to shard 0.
+ assert.writeOK(shardedCol.insert({_id: -i, x: -i}));
+
+ // These go to shard 0 inside the non-sharded collection.
+ assert.writeOK(unshardedCol.insert({_id: i, x: i}));
+ assert.writeOK(unshardedCol.insert({_id: -i, x: -i}));
+ }
+
+ //
+ // Run tests for batch size. These should issue getmores.
+ //
+
+ jsTest.log("Running batchSize tests against sharded collection.");
+ st.shard0.adminCommand({setParameter: 1, logLevel: 1});
+ testBatchSize(shardedCol);
+ st.shard0.adminCommand({setParameter: 1, logLevel: 0});
+
+ jsTest.log("Running batchSize tests against non-sharded collection.");
+ testBatchSize(unshardedCol);
+
+ //
+ // Run tests for limit. These should *not* issue getmores. We confirm this
+ // by enabling the getmore failpoint on the shards.
+ //
+
+ assert.commandWorked(st.shard0.getDB("test").adminCommand(
+ {configureFailPoint: "failReceivedGetmore", mode: "alwaysOn"}));
+
+ assert.commandWorked(st.shard1.getDB("test").adminCommand(
+ {configureFailPoint: "failReceivedGetmore", mode: "alwaysOn"}));
+
+ jsTest.log("Running limit tests against sharded collection.");
+ testLimit(shardedCol, st.shard0);
+
+ jsTest.log("Running limit tests against non-sharded collection.");
+ testLimit(unshardedCol, st.shard0);
+
+ st.stop();
})();
diff --git a/jstests/sharding/sharded_profile.js b/jstests/sharding/sharded_profile.js
index 4877cb8f1ca..0a8d8424e35 100644
--- a/jstests/sharding/sharded_profile.js
+++ b/jstests/sharding/sharded_profile.js
@@ -3,33 +3,33 @@
(function() {
-var st = new ShardingTest({ shards: 1, mongos: 2 });
-st.stopBalancer();
+ var st = new ShardingTest({shards: 1, mongos: 2});
+ st.stopBalancer();
-var admin = st.s0.getDB('admin');
-var shards = st.s0.getCollection('config.shards').find().toArray();
-var coll = st.s0.getCollection('foo.bar');
+ var admin = st.s0.getDB('admin');
+ var shards = st.s0.getCollection('config.shards').find().toArray();
+ var coll = st.s0.getCollection('foo.bar');
-assert(admin.runCommand({ enableSharding: coll.getDB() + '' }).ok);
-assert(admin.runCommand({ shardCollection: coll + '', key: { _id: 1 } }).ok);
+ assert(admin.runCommand({enableSharding: coll.getDB() + ''}).ok);
+ assert(admin.runCommand({shardCollection: coll + '', key: {_id: 1}}).ok);
-st.printShardingStatus();
+ st.printShardingStatus();
-jsTest.log('Turning on profiling on ' + st.shard0);
+ jsTest.log('Turning on profiling on ' + st.shard0);
-st.shard0.getDB(coll.getDB().toString()).setProfilingLevel(2);
+ st.shard0.getDB(coll.getDB().toString()).setProfilingLevel(2);
-var profileColl = st.shard0.getDB(coll.getDB().toString()).system.profile;
+ var profileColl = st.shard0.getDB(coll.getDB().toString()).system.profile;
-var inserts = [{ _id: 0 }, { _id: 1 }, { _id: 2 }];
+ var inserts = [{_id: 0}, {_id: 1}, {_id: 2}];
-assert.writeOK(st.s1.getCollection(coll.toString()).insert(inserts));
+ assert.writeOK(st.s1.getCollection(coll.toString()).insert(inserts));
-profileEntry = profileColl.findOne();
-assert.neq(null, profileEntry);
-printjson(profileEntry);
-assert.eq(profileEntry.query.documents, inserts);
+ profileEntry = profileColl.findOne();
+ assert.neq(null, profileEntry);
+ printjson(profileEntry);
+ assert.eq(profileEntry.query.documents, inserts);
-st.stop();
+ st.stop();
})();
diff --git a/jstests/sharding/sharding_balance1.js b/jstests/sharding/sharding_balance1.js
index 57df8648559..2d45e829492 100644
--- a/jstests/sharding/sharding_balance1.js
+++ b/jstests/sharding/sharding_balance1.js
@@ -1,62 +1,71 @@
(function() {
-var s = new ShardingTest({ name: "slow_sharding_balance1",
- shards: 2,
- mongos: 1,
- other: { chunkSize: 1, enableBalancer : true } });
-
-s.adminCommand( { enablesharding : "test" } );
-s.ensurePrimaryShard('test', 'shard0001');
-
-s.config.settings.find().forEach( printjson );
-
-db = s.getDB( "test" );
-
-bigString = "";
-while ( bigString.length < 10000 )
- bigString += "asdasdasdasdadasdasdasdasdasdasdasdasda";
-
-inserted = 0;
-num = 0;
-var bulk = db.foo.initializeUnorderedBulkOp();
-while ( inserted < ( 20 * 1024 * 1024 ) ){
- bulk.insert({ _id: num++, s: bigString });
- inserted += bigString.length;
-}
-assert.writeOK(bulk.execute());
-
-s.adminCommand( { shardcollection : "test.foo" , key : { _id : 1 } } );
-assert.lt( 20 , s.config.chunks.count() , "setup2" );
-
-function diff1(){
- var x = s.chunkCounts( "foo" );
- printjson( x );
- return Math.max( x.shard0000 , x.shard0001 ) - Math.min( x.shard0000 , x.shard0001 );
-}
-
-function sum(){
- var x = s.chunkCounts( "foo" );
- return x.shard0000 + x.shard0001;
-}
-
-assert.lt( 20 , diff1() , "big differential here" );
-print( diff1() );
-
-assert.soon( function(){
- var d = diff1();
- return d < 5;
-// Make sure there's enough time here, since balancing can sleep for 15s or so between balances.
-} , "balance didn't happen" , 1000 * 60 * 5 , 5000 );
-
-var chunkCount = sum();
-s.adminCommand( { removeshard: "shard0000" } );
-
-assert.soon( function(){
- printjson(s.chunkCounts( "foo" ));
- s.config.shards.find().forEach(function(z){printjson(z);});
- return chunkCount == s.config.chunks.count({shard: "shard0001"});
-} , "removeshard didn't happen" , 1000 * 60 * 3 , 5000 );
-
-s.stop();
+ var s = new ShardingTest({
+ name: "slow_sharding_balance1",
+ shards: 2,
+ mongos: 1,
+ other: {chunkSize: 1, enableBalancer: true}
+ });
+
+ s.adminCommand({enablesharding: "test"});
+ s.ensurePrimaryShard('test', 'shard0001');
+
+ s.config.settings.find().forEach(printjson);
+
+ db = s.getDB("test");
+
+ bigString = "";
+ while (bigString.length < 10000)
+ bigString += "asdasdasdasdadasdasdasdasdasdasdasdasda";
+
+ inserted = 0;
+ num = 0;
+ var bulk = db.foo.initializeUnorderedBulkOp();
+ while (inserted < (20 * 1024 * 1024)) {
+ bulk.insert({_id: num++, s: bigString});
+ inserted += bigString.length;
+ }
+ assert.writeOK(bulk.execute());
+
+ s.adminCommand({shardcollection: "test.foo", key: {_id: 1}});
+ assert.lt(20, s.config.chunks.count(), "setup2");
+
+ function diff1() {
+ var x = s.chunkCounts("foo");
+ printjson(x);
+ return Math.max(x.shard0000, x.shard0001) - Math.min(x.shard0000, x.shard0001);
+ }
+
+ function sum() {
+ var x = s.chunkCounts("foo");
+ return x.shard0000 + x.shard0001;
+ }
+
+ assert.lt(20, diff1(), "big differential here");
+ print(diff1());
+
+ assert.soon(
+ function() {
+ var d = diff1();
+ return d < 5;
+ // Make sure there's enough time here, since balancing can sleep for 15s or so between
+ // balances.
+ },
+ "balance didn't happen",
+ 1000 * 60 * 5,
+ 5000);
+
+ var chunkCount = sum();
+ s.adminCommand({removeshard: "shard0000"});
+
+ assert.soon(function() {
+ printjson(s.chunkCounts("foo"));
+ s.config.shards.find().forEach(function(z) {
+ printjson(z);
+ });
+ return chunkCount == s.config.chunks.count({shard: "shard0001"});
+ }, "removeshard didn't happen", 1000 * 60 * 3, 5000);
+
+ s.stop();
})();
diff --git a/jstests/sharding/sharding_balance2.js b/jstests/sharding/sharding_balance2.js
index e7ad317e0f5..37c84ed8ded 100644
--- a/jstests/sharding/sharding_balance2.js
+++ b/jstests/sharding/sharding_balance2.js
@@ -3,74 +3,73 @@
*/
(function() {
-"use strict";
+ "use strict";
-var MaxSizeMB = 1;
+ var MaxSizeMB = 1;
-var s = new ShardingTest({ shards: 2, other: { chunkSize: 1, manualAddShard: true }});
-var db = s.getDB( "test" );
-s.stopBalancer();
+ var s = new ShardingTest({shards: 2, other: {chunkSize: 1, manualAddShard: true}});
+ var db = s.getDB("test");
+ s.stopBalancer();
-var names = s.getConnNames();
-assert.eq(2, names.length);
-s.adminCommand({ addshard: names[0] });
-s.adminCommand({ addshard: names[1], maxSize: MaxSizeMB });
+ var names = s.getConnNames();
+ assert.eq(2, names.length);
+ s.adminCommand({addshard: names[0]});
+ s.adminCommand({addshard: names[1], maxSize: MaxSizeMB});
-s.adminCommand({ enablesharding: "test" });
-var res = db.adminCommand({ movePrimary: 'test', to: names[0] });
-assert(res.ok || res.errmsg == "it is already the primary");
+ s.adminCommand({enablesharding: "test"});
+ var res = db.adminCommand({movePrimary: 'test', to: names[0]});
+ assert(res.ok || res.errmsg == "it is already the primary");
+ var bigString = "";
+ while (bigString.length < 10000)
+ bigString += "asdasdasdasdadasdasdasdasdasdasdasdasda";
-var bigString = "";
-while ( bigString.length < 10000 )
- bigString += "asdasdasdasdadasdasdasdasdasdasdasdasda";
+ var inserted = 0;
+ var num = 0;
+ var bulk = db.foo.initializeUnorderedBulkOp();
+ while (inserted < (40 * 1024 * 1024)) {
+ bulk.insert({_id: num++, s: bigString});
+ inserted += bigString.length;
+ }
+ assert.writeOK(bulk.execute());
+ s.adminCommand({shardcollection: "test.foo", key: {_id: 1}});
+ assert.gt(s.config.chunks.count(), 10);
-var inserted = 0;
-var num = 0;
-var bulk = db.foo.initializeUnorderedBulkOp();
-while ( inserted < ( 40 * 1024 * 1024 ) ){
- bulk.insert({ _id: num++, s: bigString });
- inserted += bigString.length;
-}
-assert.writeOK(bulk.execute());
-s.adminCommand( { shardcollection : "test.foo" , key : { _id : 1 } } );
-assert.gt(s.config.chunks.count(), 10);
+ var getShardSize = function(conn) {
+ var listDatabases = conn.getDB('admin').runCommand({listDatabases: 1});
+ return listDatabases.totalSize;
+ };
-var getShardSize = function(conn) {
- var listDatabases = conn.getDB('admin').runCommand({ listDatabases: 1 });
- return listDatabases.totalSize;
-};
+ var shardConn = new Mongo(names[1]);
-var shardConn = new Mongo(names[1]);
+ // Make sure that shard doesn't have any documents.
+ assert.eq(0, shardConn.getDB('test').foo.find().itcount());
-// Make sure that shard doesn't have any documents.
-assert.eq(0, shardConn.getDB('test').foo.find().itcount());
+ var maxSizeBytes = MaxSizeMB * 1024 * 1024;
-var maxSizeBytes = MaxSizeMB * 1024 * 1024;
+ // Fill the shard with documents to exceed the max size so the balancer won't move
+ // chunks to this shard.
+ var localColl = shardConn.getDB('local').padding;
+ while (getShardSize(shardConn) < maxSizeBytes) {
+ var localBulk = localColl.initializeUnorderedBulkOp();
-// Fill the shard with documents to exceed the max size so the balancer won't move
-// chunks to this shard.
-var localColl = shardConn.getDB('local').padding;
-while (getShardSize(shardConn) < maxSizeBytes) {
- var localBulk = localColl.initializeUnorderedBulkOp();
+ for (var x = 0; x < 20; x++) {
+ localBulk.insert({x: x, val: bigString});
+ }
+ assert.writeOK(localBulk.execute());
- for (var x = 0; x < 20; x++) {
- localBulk.insert({ x: x, val: bigString });
+ // Force the storage engine to flush files to disk so shardSize will get updated.
+ assert.commandWorked(shardConn.getDB('admin').runCommand({fsync: 1}));
}
- assert.writeOK(localBulk.execute());
-
- // Force the storage engine to flush files to disk so shardSize will get updated.
- assert.commandWorked(shardConn.getDB('admin').runCommand({ fsync: 1 }));
-}
-s.startBalancer();
+ s.startBalancer();
-// Wait until balancer finishes at least one balancing round.
-assert(s.waitForBalancerRound(), "Balancer is not running: it never pinged config.mongos");
+ // Wait until balancer finishes at least one balancing round.
+ assert(s.waitForBalancerRound(), "Balancer is not running: it never pinged config.mongos");
-var chunkCounts = s.chunkCounts('foo', 'test');
-assert.eq(0, chunkCounts.shard0001);
+ var chunkCounts = s.chunkCounts('foo', 'test');
+ assert.eq(0, chunkCounts.shard0001);
-s.stop();
+ s.stop();
})();
diff --git a/jstests/sharding/sharding_balance3.js b/jstests/sharding/sharding_balance3.js
index 51e5765b19e..876709ace4a 100644
--- a/jstests/sharding/sharding_balance3.js
+++ b/jstests/sharding/sharding_balance3.js
@@ -1,68 +1,69 @@
-// Simple test to make sure things get balanced
+// Simple test to make sure things get balanced
(function() {
-var s = new ShardingTest({ name: "slow_sharding_balance3",
- shards: 2,
- mongos: 1,
- other: { chunkSize: 1, enableBalancer : true } });
-
-s.adminCommand( { enablesharding : "test" } );
-s.ensurePrimaryShard('test', 'shard0001');
-
-s.config.settings.find().forEach( printjson );
-
-db = s.getDB( "test" );
-
-bigString = "";
-while ( bigString.length < 10000 )
- bigString += "asdasdasdasdadasdasdasdasdasdasdasdasda";
-
-inserted = 0;
-num = 0;
-var bulk = db.foo.initializeUnorderedBulkOp();
-while ( inserted < ( 40 * 1024 * 1024 ) ){
- bulk.insert({ _id: num++, s: bigString });
- inserted += bigString.length;
-}
-assert.writeOK(bulk.execute());
-
-s.adminCommand( { shardcollection : "test.foo" , key : { _id : 1 } } );
-assert.lt( 20 , s.config.chunks.count() , "setup2" );
-
-function diff1(){
- var x = s.chunkCounts( "foo" );
- printjson( x );
- return Math.max( x.shard0000 , x.shard0001 ) - Math.min( x.shard0000 , x.shard0001 );
-}
-
-assert.lt( 10 , diff1() );
-
-// Wait for balancer to kick in.
-var initialDiff = diff1();
-assert.soon(function() {
- return diff1() != initialDiff;
- }, "Balancer did not kick in");
-
-print("* A");
-print( "disabling the balancer" );
-s.config.settings.update( { _id : "balancer" }, { $set : { stopped : true } } , true );
-s.config.settings.find().forEach( printjson );
-print("* B");
-
-
-print( diff1() );
-
-var currDiff = diff1();
-var waitTime = 0;
-var startTime = Date.now();
-while ( waitTime < ( 1000 * 60 ) ) {
- // Wait for 60 seconds to ensure balancer did not run
- assert.eq( currDiff, diff1(), "balance with stopped flag should not have happened" );
- sleep( 5000 );
- waitTime = Date.now() - startTime;
-}
-
-s.stop();
+ var s = new ShardingTest({
+ name: "slow_sharding_balance3",
+ shards: 2,
+ mongos: 1,
+ other: {chunkSize: 1, enableBalancer: true}
+ });
+
+ s.adminCommand({enablesharding: "test"});
+ s.ensurePrimaryShard('test', 'shard0001');
+
+ s.config.settings.find().forEach(printjson);
+
+ db = s.getDB("test");
+
+ bigString = "";
+ while (bigString.length < 10000)
+ bigString += "asdasdasdasdadasdasdasdasdasdasdasdasda";
+
+ inserted = 0;
+ num = 0;
+ var bulk = db.foo.initializeUnorderedBulkOp();
+ while (inserted < (40 * 1024 * 1024)) {
+ bulk.insert({_id: num++, s: bigString});
+ inserted += bigString.length;
+ }
+ assert.writeOK(bulk.execute());
+
+ s.adminCommand({shardcollection: "test.foo", key: {_id: 1}});
+ assert.lt(20, s.config.chunks.count(), "setup2");
+
+ function diff1() {
+ var x = s.chunkCounts("foo");
+ printjson(x);
+ return Math.max(x.shard0000, x.shard0001) - Math.min(x.shard0000, x.shard0001);
+ }
+
+ assert.lt(10, diff1());
+
+ // Wait for balancer to kick in.
+ var initialDiff = diff1();
+ assert.soon(function() {
+ return diff1() != initialDiff;
+ }, "Balancer did not kick in");
+
+ print("* A");
+ print("disabling the balancer");
+ s.config.settings.update({_id: "balancer"}, {$set: {stopped: true}}, true);
+ s.config.settings.find().forEach(printjson);
+ print("* B");
+
+ print(diff1());
+
+ var currDiff = diff1();
+ var waitTime = 0;
+ var startTime = Date.now();
+ while (waitTime < (1000 * 60)) {
+ // Wait for 60 seconds to ensure balancer did not run
+ assert.eq(currDiff, diff1(), "balance with stopped flag should not have happened");
+ sleep(5000);
+ waitTime = Date.now() - startTime;
+ }
+
+ s.stop();
})();
diff --git a/jstests/sharding/sharding_balance4.js b/jstests/sharding/sharding_balance4.js
index 3a89efce5c7..d78e94d407f 100644
--- a/jstests/sharding/sharding_balance4.js
+++ b/jstests/sharding/sharding_balance4.js
@@ -1,140 +1,140 @@
// Check that doing updates done during a migrate all go to the right place
(function() {
-var s = new ShardingTest({ name: "slow_sharding_balance4",
- shards: 2,
- mongos: 1,
- other: { chunkSize: 1 } });
-
-s.adminCommand( { enablesharding : "test" } );
-s.ensurePrimaryShard('test', 'shard0001');
-s.adminCommand( { shardcollection : "test.foo" , key : { _id : 1 } } );
-assert.eq( 1 , s.config.chunks.count() , "setup1" );
-
-s.config.settings.find().forEach( printjson );
-
-db = s.getDB( "test" );
-
-bigString = "";
-while ( bigString.length < 10000 )
- bigString += "asdasdasdasdadasdasdasdasdasdasdasdasda";
-
-N = 3000;
-
-num = 0;
-
-counts = {};
-
-//
-// TODO: Rewrite to make much clearer.
-//
-// The core behavior of this test is to add a bunch of documents to a sharded collection, then
-// incrementally update each document and make sure the counts in the document match our update
-// counts while balancing occurs (doUpdate()). Every once in a while we also check (check())
-// our counts via a query.
-//
-// If during a chunk migration an update is missed, we trigger an assertion and fail.
-//
-
-
-function doUpdate( bulk, includeString, optionalId ){
- var up = { $inc : { x : 1 } };
- if ( includeString )
- up["$set"] = { s : bigString };
- var myid = optionalId == undefined ? Random.randInt( N ) : optionalId;
- bulk.find({ _id : myid }).upsert().update( up );
-
- counts[myid] = ( counts[myid] ? counts[myid] : 0 ) + 1;
- return myid;
-}
-
-Random.setRandomSeed();
-// Initially update all documents from 1 to N, otherwise later checks can fail because no document
-// previously existed
-var bulk = db.foo.initializeUnorderedBulkOp();
-for ( i = 0; i < N; i++ ){
- doUpdate( bulk, true, i );
-}
-
-for ( i=0; i<N*9; i++ ){
- doUpdate( bulk, false );
-}
-assert.writeOK(bulk.execute());
-
-for ( var i=0; i<50; i++ ){
- s.printChunks( "test.foo" );
- if ( check( "initial:" + i , true ) )
- break;
- sleep( 5000 );
-}
-check( "initial at end" );
-
-
-assert.lt( 20 , s.config.chunks.count() , "setup2" );
-
-function check( msg , dontAssert ){
- for ( var x in counts ){
- var e = counts[x];
- var z = db.foo.findOne( { _id : parseInt( x ) } );
-
- if ( z && z.x == e )
- continue;
-
- if ( dontAssert ){
- if ( z )
- delete z.s;
- print( "not asserting for key failure: " + x + " want: " + e + " got: " + tojson(z) );
- return false;
- }
-
- s.s.getDB("admin").runCommand({ setParameter : 1, logLevel : 2 });
-
- printjson( db.foo.findOne( { _id : parseInt( x ) } ) );
+ var s = new ShardingTest(
+ {name: "slow_sharding_balance4", shards: 2, mongos: 1, other: {chunkSize: 1}});
+
+ s.adminCommand({enablesharding: "test"});
+ s.ensurePrimaryShard('test', 'shard0001');
+ s.adminCommand({shardcollection: "test.foo", key: {_id: 1}});
+ assert.eq(1, s.config.chunks.count(), "setup1");
+
+ s.config.settings.find().forEach(printjson);
+
+ db = s.getDB("test");
+
+ bigString = "";
+ while (bigString.length < 10000)
+ bigString += "asdasdasdasdadasdasdasdasdasdasdasdasda";
+
+ N = 3000;
+
+ num = 0;
+
+ counts = {};
+
+ //
+ // TODO: Rewrite to make much clearer.
+ //
+ // The core behavior of this test is to add a bunch of documents to a sharded collection, then
+ // incrementally update each document and make sure the counts in the document match our update
+ // counts while balancing occurs (doUpdate()). Every once in a while we also check (check())
+ // our counts via a query.
+ //
+ // If during a chunk migration an update is missed, we trigger an assertion and fail.
+ //
+
+ function doUpdate(bulk, includeString, optionalId) {
+ var up = {
+ $inc: {x: 1}
+ };
+ if (includeString)
+ up["$set"] = {
+ s: bigString
+ };
+ var myid = optionalId == undefined ? Random.randInt(N) : optionalId;
+ bulk.find({_id: myid}).upsert().update(up);
+
+ counts[myid] = (counts[myid] ? counts[myid] : 0) + 1;
+ return myid;
+ }
- var y = db.foo.findOne( { _id : parseInt( x ) } );
+ Random.setRandomSeed();
+ // Initially update all documents from 1 to N, otherwise later checks can fail because no
+ // document
+ // previously existed
+ var bulk = db.foo.initializeUnorderedBulkOp();
+ for (i = 0; i < N; i++) {
+ doUpdate(bulk, true, i);
+ }
- if ( y ){
- delete y.s;
- }
+ for (i = 0; i < N * 9; i++) {
+ doUpdate(bulk, false);
+ }
+ assert.writeOK(bulk.execute());
- s.printChunks( "test.foo" );
-
- assert( z , "couldn't find : " + x + " y:" + tojson(y) + " e: " + e + " " + msg );
- assert.eq( e , z.x , "count for : " + x + " y:" + tojson(y) + " " + msg );
+ for (var i = 0; i < 50; i++) {
+ s.printChunks("test.foo");
+ if (check("initial:" + i, true))
+ break;
+ sleep(5000);
}
+ check("initial at end");
+
+ assert.lt(20, s.config.chunks.count(), "setup2");
+
+ function check(msg, dontAssert) {
+ for (var x in counts) {
+ var e = counts[x];
+ var z = db.foo.findOne({_id: parseInt(x)});
+
+ if (z && z.x == e)
+ continue;
+
+ if (dontAssert) {
+ if (z)
+ delete z.s;
+ print("not asserting for key failure: " + x + " want: " + e + " got: " + tojson(z));
+ return false;
+ }
- return true;
-}
+ s.s.getDB("admin").runCommand({setParameter: 1, logLevel: 2});
-function diff1(){
-
- jsTest.log("Running diff1...");
+ printjson(db.foo.findOne({_id: parseInt(x)}));
- bulk = db.foo.initializeUnorderedBulkOp();
- var myid = doUpdate( bulk, false );
- var res = assert.writeOK(bulk.execute());
+ var y = db.foo.findOne({_id: parseInt(x)});
- assert.eq( 1, res.nModified,
- "diff myid: " + myid + " 2: " + res.toString() + "\n" +
- " correct count is: " + counts[myid] +
- " db says count is: " + tojson(db.foo.findOne({ _id: myid })) );
+ if (y) {
+ delete y.s;
+ }
- var x = s.chunkCounts( "foo" );
- if ( Math.random() > .999 )
- printjson( x );
- return Math.max( x.shard0000 , x.shard0001 ) - Math.min( x.shard0000 , x.shard0001 );
-}
+ s.printChunks("test.foo");
+
+ assert(z, "couldn't find : " + x + " y:" + tojson(y) + " e: " + e + " " + msg);
+ assert.eq(e, z.x, "count for : " + x + " y:" + tojson(y) + " " + msg);
+ }
+
+ return true;
+ }
+
+ function diff1() {
+ jsTest.log("Running diff1...");
+
+ bulk = db.foo.initializeUnorderedBulkOp();
+ var myid = doUpdate(bulk, false);
+ var res = assert.writeOK(bulk.execute());
+
+ assert.eq(1,
+ res.nModified,
+ "diff myid: " + myid + " 2: " + res.toString() + "\n" + " correct count is: " +
+ counts[myid] + " db says count is: " + tojson(db.foo.findOne({_id: myid})));
+
+ var x = s.chunkCounts("foo");
+ if (Math.random() > .999)
+ printjson(x);
+ return Math.max(x.shard0000, x.shard0001) - Math.min(x.shard0000, x.shard0001);
+ }
-assert.lt( 20 , diff1() ,"initial load" );
-print( diff1() );
+ assert.lt(20, diff1(), "initial load");
+ print(diff1());
-s.startBalancer();
+ s.startBalancer();
-assert.soon( function(){
- var d = diff1();
- return d < 5;
-} , "balance didn't happen" , 1000 * 60 * 20 , 1 );
+ assert.soon(function() {
+ var d = diff1();
+ return d < 5;
+ }, "balance didn't happen", 1000 * 60 * 20, 1);
-s.stop();
+ s.stop();
})();
diff --git a/jstests/sharding/sharding_migrate_cursor1.js b/jstests/sharding/sharding_migrate_cursor1.js
index 0edeb2a0ac9..584181cdce2 100644
--- a/jstests/sharding/sharding_migrate_cursor1.js
+++ b/jstests/sharding/sharding_migrate_cursor1.js
@@ -1,81 +1,85 @@
// SERVER-2068
(function() {
-var chunkSize = 25;
+ var chunkSize = 25;
-var s = new ShardingTest({ name: "migrate_cursor1",
- shards: 2,
- mongos: 1,
- other: { chunkSize : chunkSize } });
+ var s = new ShardingTest(
+ {name: "migrate_cursor1", shards: 2, mongos: 1, other: {chunkSize: chunkSize}});
-s.adminCommand( { enablesharding : "test" } );
-db = s.getDB( "test" );
-s.ensurePrimaryShard('test', 'shard0001');
-t = db.foo;
+ s.adminCommand({enablesharding: "test"});
+ db = s.getDB("test");
+ s.ensurePrimaryShard('test', 'shard0001');
+ t = db.foo;
-bigString = "";
-stringSize = 1024;
+ bigString = "";
+ stringSize = 1024;
-while ( bigString.length < stringSize )
- bigString += "asdasdas";
+ while (bigString.length < stringSize)
+ bigString += "asdasdas";
-stringSize = bigString.length;
-docsPerChunk = Math.ceil( ( chunkSize * 1024 * 1024 ) / ( stringSize - 12 ) );
-numChunks = 5;
-numDocs = 20 * docsPerChunk;
+ stringSize = bigString.length;
+ docsPerChunk = Math.ceil((chunkSize * 1024 * 1024) / (stringSize - 12));
+ numChunks = 5;
+ numDocs = 20 * docsPerChunk;
-print( "stringSize: " + stringSize + " docsPerChunk: " + docsPerChunk + " numDocs: " + numDocs );
+ print("stringSize: " + stringSize + " docsPerChunk: " + docsPerChunk + " numDocs: " + numDocs);
-var bulk = t.initializeUnorderedBulkOp();
-for (var i = 0; i < numDocs; i++){
- bulk.insert({ _id: i, s: bigString });
-}
-assert.writeOK(bulk.execute());
+ var bulk = t.initializeUnorderedBulkOp();
+ for (var i = 0; i < numDocs; i++) {
+ bulk.insert({_id: i, s: bigString});
+ }
+ assert.writeOK(bulk.execute());
-s.adminCommand( { shardcollection : "test.foo" , key : { _id : 1 } } );
+ s.adminCommand({shardcollection: "test.foo", key: {_id: 1}});
-assert.lt( numChunks , s.config.chunks.find().count() , "initial 1" );
+ assert.lt(numChunks, s.config.chunks.find().count(), "initial 1");
-primary = s.getPrimaryShard( "test" ).getDB( "test" ).foo;
-secondaryName = s.getOther( primary.name );
-secondary = secondaryName.getDB( "test" ).foo;
+ primary = s.getPrimaryShard("test").getDB("test").foo;
+ secondaryName = s.getOther(primary.name);
+ secondary = secondaryName.getDB("test").foo;
-assert.eq( numDocs , primary.count() , "initial 2" );
-assert.eq( 0 , secondary.count() , "initial 3" );
-assert.eq( numDocs , t.count() , "initial 4" );
+ assert.eq(numDocs, primary.count(), "initial 2");
+ assert.eq(0, secondary.count(), "initial 3");
+ assert.eq(numDocs, t.count(), "initial 4");
-x = primary.find( { _id : { $lt : 500 } } ).batchSize(2);
-x.next(); // 1. Create an open cursor
+ x = primary.find({_id: {$lt: 500}}).batchSize(2);
+ x.next(); // 1. Create an open cursor
-print("start moving chunks...");
+ print("start moving chunks...");
-// 2. Move chunk from s0 to s1 without waiting for deletion.
-// Command returns, but the deletion on s0 will block due to the open cursor.
-s.adminCommand( { moveChunk : "test.foo" , find : { _id : 0 } , to : secondaryName.name } );
+ // 2. Move chunk from s0 to s1 without waiting for deletion.
+ // Command returns, but the deletion on s0 will block due to the open cursor.
+ s.adminCommand({moveChunk: "test.foo", find: {_id: 0}, to: secondaryName.name});
-// 3. Start second moveChunk command from s0 to s1.
-// This moveChunk should not observe the above deletion as a 'mod', transfer it to s1 and cause deletion on s1.
-// This moveChunk will wait for deletion.
-join = startParallelShell( "db.x.insert( {x:1} ); db.adminCommand( { moveChunk : 'test.foo' , find : { _id : " + docsPerChunk * 3 + " } , to : '" + secondaryName.name + "', _waitForDelete: true } )" );
-assert.soon( function(){ return db.x.count() > 0; } , "XXX" , 30000 , 1 );
+ // 3. Start second moveChunk command from s0 to s1.
+ // This moveChunk should not observe the above deletion as a 'mod', transfer it to s1 and cause
+ // deletion on s1.
+ // This moveChunk will wait for deletion.
+ join = startParallelShell(
+ "db.x.insert( {x:1} ); db.adminCommand( { moveChunk : 'test.foo' , find : { _id : " +
+ docsPerChunk * 3 + " } , to : '" + secondaryName.name + "', _waitForDelete: true } )");
+ assert.soon(function() {
+ return db.x.count() > 0;
+ }, "XXX", 30000, 1);
-// 4. Close the cursor to enable chunk deletion.
-print( "itcount: " + x.itcount() );
+ // 4. Close the cursor to enable chunk deletion.
+ print("itcount: " + x.itcount());
-x = null;
-for ( i=0; i<5; i++ ) gc();
+ x = null;
+ for (i = 0; i < 5; i++)
+ gc();
-print( "cursor should be gone" );
+ print("cursor should be gone");
-// 5. Waiting for the second moveChunk to finish its deletion.
-// Note the deletion for the first moveChunk may not be finished.
-join();
+ // 5. Waiting for the second moveChunk to finish its deletion.
+ // Note the deletion for the first moveChunk may not be finished.
+ join();
-//assert.soon( function(){ return numDocs == t.count(); } , "at end 1" )
-// 6. Check the total number of docs on both shards to make sure no doc is lost.
-// Use itcount() to ignore orphan docments.
-assert.eq( numDocs , t.find().itcount() , "at end 2" );
+ // assert.soon( function(){ return numDocs == t.count(); } , "at end 1" )
+ // 6. Check the total number of docs on both shards to make sure no doc is lost.
+ // Use itcount() to ignore orphan docments.
+ assert.eq(numDocs, t.find().itcount(), "at end 2");
-s.stop();
+ s.stop();
})();
diff --git a/jstests/sharding/sharding_multiple_ns_rs.js b/jstests/sharding/sharding_multiple_ns_rs.js
index f83d744527e..f3465e3b10d 100644
--- a/jstests/sharding/sharding_multiple_ns_rs.js
+++ b/jstests/sharding/sharding_multiple_ns_rs.js
@@ -1,57 +1,55 @@
(function() {
-var s = new ShardingTest({ name: "Sharding multiple ns",
- shards: 1,
- mongos: 1,
- other: { rs : true , chunkSize: 1 } });
+ var s = new ShardingTest(
+ {name: "Sharding multiple ns", shards: 1, mongos: 1, other: {rs: true, chunkSize: 1}});
-s.adminCommand( { enablesharding : "test" } );
-s.adminCommand( { shardcollection : "test.foo" , key : { _id : 1 } } );
+ s.adminCommand({enablesharding: "test"});
+ s.adminCommand({shardcollection: "test.foo", key: {_id: 1}});
-db = s.getDB( "test" );
+ db = s.getDB("test");
-var bulk = db.foo.initializeUnorderedBulkOp();
-var bulk2 = db.bar.initializeUnorderedBulkOp();
-for ( i=0; i<100; i++ ) {
- bulk.insert({ _id: i, x: i });
- bulk2.insert({ _id: i, x: i });
-}
-assert.writeOK(bulk.execute());
-assert.writeOK(bulk2.execute());
+ var bulk = db.foo.initializeUnorderedBulkOp();
+ var bulk2 = db.bar.initializeUnorderedBulkOp();
+ for (i = 0; i < 100; i++) {
+ bulk.insert({_id: i, x: i});
+ bulk2.insert({_id: i, x: i});
+ }
+ assert.writeOK(bulk.execute());
+ assert.writeOK(bulk2.execute());
-sh.splitAt( "test.foo" , { _id : 50 } );
+ sh.splitAt("test.foo", {_id: 50});
-other = new Mongo( s.s.name );
-dbother = other.getDB( "test" );
+ other = new Mongo(s.s.name);
+ dbother = other.getDB("test");
-assert.eq( 5 , db.foo.findOne( { _id : 5 } ).x );
-assert.eq( 5 , dbother.foo.findOne( { _id : 5 } ).x );
+ assert.eq(5, db.foo.findOne({_id: 5}).x);
+ assert.eq(5, dbother.foo.findOne({_id: 5}).x);
-assert.eq( 5 , db.bar.findOne( { _id : 5 } ).x );
-assert.eq( 5 , dbother.bar.findOne( { _id : 5 } ).x );
+ assert.eq(5, db.bar.findOne({_id: 5}).x);
+ assert.eq(5, dbother.bar.findOne({_id: 5}).x);
-s._rs[0].test.awaitReplication();
-s._rs[0].test.stopMaster(15);
+ s._rs[0].test.awaitReplication();
+ s._rs[0].test.stopMaster(15);
-// Wait for the primary to come back online...
-var primary = s._rs[0].test.getPrimary();
+ // Wait for the primary to come back online...
+ var primary = s._rs[0].test.getPrimary();
-// Wait for the mongos to recognize the new primary...
-ReplSetTest.awaitRSClientHosts( db.getMongo(), primary, { ismaster : true } );
+ // Wait for the mongos to recognize the new primary...
+ ReplSetTest.awaitRSClientHosts(db.getMongo(), primary, {ismaster: true});
-assert.eq( 5 , db.foo.findOne( { _id : 5 } ).x );
-assert.eq( 5 , db.bar.findOne( { _id : 5 } ).x );
+ assert.eq(5, db.foo.findOne({_id: 5}).x);
+ assert.eq(5, db.bar.findOne({_id: 5}).x);
-s.adminCommand( { shardcollection : "test.bar" , key : { _id : 1 } } );
-sh.splitAt( "test.bar" , { _id : 50 } );
+ s.adminCommand({shardcollection: "test.bar", key: {_id: 1}});
+ sh.splitAt("test.bar", {_id: 50});
-yetagain = new Mongo( s.s.name );
-assert.eq( 5 , yetagain.getDB( "test" ).bar.findOne( { _id : 5 } ).x );
-assert.eq( 5 , yetagain.getDB( "test" ).foo.findOne( { _id : 5 } ).x );
+ yetagain = new Mongo(s.s.name);
+ assert.eq(5, yetagain.getDB("test").bar.findOne({_id: 5}).x);
+ assert.eq(5, yetagain.getDB("test").foo.findOne({_id: 5}).x);
-assert.eq( 5 , dbother.bar.findOne( { _id : 5 } ).x );
-assert.eq( 5 , dbother.foo.findOne( { _id : 5 } ).x );
+ assert.eq(5, dbother.bar.findOne({_id: 5}).x);
+ assert.eq(5, dbother.foo.findOne({_id: 5}).x);
-s.stop();
+ s.stop();
})();
diff --git a/jstests/sharding/sharding_options.js b/jstests/sharding/sharding_options.js
index 7fe34a0c140..0841967b18e 100644
--- a/jstests/sharding/sharding_options.js
+++ b/jstests/sharding/sharding_options.js
@@ -2,142 +2,99 @@ var baseName = "jstests_sharding_sharding_options";
load('jstests/libs/command_line/test_parsed_options.js');
-
-
// Move Paranoia
jsTest.log("Testing \"moveParanoia\" command line option");
var expectedResult = {
- "parsed" : {
- "sharding" : {
- "archiveMovedChunks" : true
- }
- }
+ "parsed": {"sharding": {"archiveMovedChunks": true}}
};
-testGetCmdLineOptsMongod({ moveParanoia : "" }, expectedResult);
+testGetCmdLineOptsMongod({moveParanoia: ""}, expectedResult);
jsTest.log("Testing \"noMoveParanoia\" command line option");
expectedResult = {
- "parsed" : {
- "sharding" : {
- "archiveMovedChunks" : false
- }
- }
+ "parsed": {"sharding": {"archiveMovedChunks": false}}
};
-testGetCmdLineOptsMongod({ noMoveParanoia : "" }, expectedResult);
+testGetCmdLineOptsMongod({noMoveParanoia: ""}, expectedResult);
jsTest.log("Testing \"sharding.archiveMovedChunks\" config file option");
expectedResult = {
- "parsed" : {
- "config" : "jstests/libs/config_files/enable_paranoia.json",
- "sharding" : {
- "archiveMovedChunks" : true
- }
+ "parsed": {
+ "config": "jstests/libs/config_files/enable_paranoia.json",
+ "sharding": {"archiveMovedChunks": true}
}
};
-testGetCmdLineOptsMongod({ config : "jstests/libs/config_files/enable_paranoia.json" },
+testGetCmdLineOptsMongod({config: "jstests/libs/config_files/enable_paranoia.json"},
expectedResult);
-
-
// Sharding Role
jsTest.log("Testing \"configsvr\" command line option");
var expectedResult = {
- "parsed" : {
- "sharding" : {
- "clusterRole" : "configsvr"
- },
- "storage" : {
- "journal" : {
- "enabled" : true
- }
- }
- }
+ "parsed":
+ {"sharding": {"clusterRole": "configsvr"}, "storage": {"journal": {"enabled": true}}}
};
-testGetCmdLineOptsMongod({ configsvr : "", journal: "" }, expectedResult);
+testGetCmdLineOptsMongod({configsvr: "", journal: ""}, expectedResult);
jsTest.log("Testing \"shardsvr\" command line option");
expectedResult = {
- "parsed" : {
- "sharding" : {
- "clusterRole" : "shardsvr"
- }
- }
+ "parsed": {"sharding": {"clusterRole": "shardsvr"}}
};
-testGetCmdLineOptsMongod({ shardsvr : "" }, expectedResult);
+testGetCmdLineOptsMongod({shardsvr: ""}, expectedResult);
jsTest.log("Testing \"sharding.clusterRole\" config file option");
expectedResult = {
- "parsed" : {
- "config" : "jstests/libs/config_files/set_shardingrole.json",
- "sharding" : {
- "clusterRole" : "configsvr"
- }
+ "parsed": {
+ "config": "jstests/libs/config_files/set_shardingrole.json",
+ "sharding": {"clusterRole": "configsvr"}
}
};
-testGetCmdLineOptsMongod({ config : "jstests/libs/config_files/set_shardingrole.json" },
+testGetCmdLineOptsMongod({config: "jstests/libs/config_files/set_shardingrole.json"},
expectedResult);
-
-
// Auto Splitting
jsTest.log("Testing \"noAutoSplit\" command line option");
var expectedResult = {
- "parsed" : {
- "sharding" : {
- "autoSplit" : false
- }
- }
+ "parsed": {"sharding": {"autoSplit": false}}
};
-testGetCmdLineOptsMongos({ noAutoSplit : "" }, expectedResult);
+testGetCmdLineOptsMongos({noAutoSplit: ""}, expectedResult);
jsTest.log("Testing \"sharding.autoSplit\" config file option");
expectedResult = {
- "parsed" : {
- "config" : "jstests/libs/config_files/enable_autosplit.json",
- "sharding" : {
- "autoSplit" : true
- }
+ "parsed": {
+ "config": "jstests/libs/config_files/enable_autosplit.json",
+ "sharding": {"autoSplit": true}
}
};
-testGetCmdLineOptsMongos({ config : "jstests/libs/config_files/enable_autosplit.json" },
+testGetCmdLineOptsMongos({config: "jstests/libs/config_files/enable_autosplit.json"},
expectedResult);
// Test that we preserve switches explicitly set to false in config files. See SERVER-13439.
jsTest.log("Testing explicitly disabled \"moveParanoia\" config file option");
expectedResult = {
- "parsed" : {
- "config" : "jstests/libs/config_files/disable_moveparanoia.ini",
- "sharding" : {
- "archiveMovedChunks" : false
- }
+ "parsed": {
+ "config": "jstests/libs/config_files/disable_moveparanoia.ini",
+ "sharding": {"archiveMovedChunks": false}
}
};
-testGetCmdLineOptsMongod({ config : "jstests/libs/config_files/disable_moveparanoia.ini" },
+testGetCmdLineOptsMongod({config: "jstests/libs/config_files/disable_moveparanoia.ini"},
expectedResult);
jsTest.log("Testing explicitly disabled \"noMoveParanoia\" config file option");
expectedResult = {
- "parsed" : {
- "config" : "jstests/libs/config_files/disable_nomoveparanoia.ini",
- "sharding" : {
- "archiveMovedChunks" : true
- }
+ "parsed": {
+ "config": "jstests/libs/config_files/disable_nomoveparanoia.ini",
+ "sharding": {"archiveMovedChunks": true}
}
};
-testGetCmdLineOptsMongod({ config : "jstests/libs/config_files/disable_nomoveparanoia.ini" },
+testGetCmdLineOptsMongod({config: "jstests/libs/config_files/disable_nomoveparanoia.ini"},
expectedResult);
jsTest.log("Testing explicitly disabled \"noAutoSplit\" config file option");
expectedResult = {
- "parsed" : {
- "config" : "jstests/libs/config_files/disable_noautosplit.ini",
- "sharding" : {
- "autoSplit" : true
- }
+ "parsed": {
+ "config": "jstests/libs/config_files/disable_noautosplit.ini",
+ "sharding": {"autoSplit": true}
}
};
-testGetCmdLineOptsMongos({ config : "jstests/libs/config_files/disable_noautosplit.ini" },
+testGetCmdLineOptsMongos({config: "jstests/libs/config_files/disable_noautosplit.ini"},
expectedResult);
-
print(baseName + " succeeded.");
diff --git a/jstests/sharding/sharding_rs1.js b/jstests/sharding/sharding_rs1.js
index 4da40b344d1..3ab25906838 100644
--- a/jstests/sharding/sharding_rs1.js
+++ b/jstests/sharding/sharding_rs1.js
@@ -1,60 +1,59 @@
// tests sharding with replica sets
(function() {
-'use strict';
+ 'use strict';
-var s = new ShardingTest({ shards: 3,
- other: { rs: true, chunkSize: 1, enableBalancer: true }});
+ var s = new ShardingTest({shards: 3, other: {rs: true, chunkSize: 1, enableBalancer: true}});
-s.adminCommand( { enablesharding : "test" } );
-s.ensurePrimaryShard('test', 'test-rs0');
-s.config.settings.update( { _id: "balancer" }, { $set : { _waitForDelete : true } } , true );
+ s.adminCommand({enablesharding: "test"});
+ s.ensurePrimaryShard('test', 'test-rs0');
+ s.config.settings.update({_id: "balancer"}, {$set: {_waitForDelete: true}}, true);
-var db = s.getDB("test");
+ var db = s.getDB("test");
-var bigString = "X".repeat(256 * 1024);
+ var bigString = "X".repeat(256 * 1024);
-var insertedBytes = 0;
-var num = 0;
+ var insertedBytes = 0;
+ var num = 0;
-// Insert 10 MB of data to result in 10+ chunks
-var bulk = db.foo.initializeUnorderedBulkOp();
-while (insertedBytes < (10 * 1024 * 1024)) {
- bulk.insert({ _id: num++, s: bigString, x: Math.random() });
- insertedBytes += bigString.length;
-}
-assert.writeOK(bulk.execute({w: 3}));
+ // Insert 10 MB of data to result in 10+ chunks
+ var bulk = db.foo.initializeUnorderedBulkOp();
+ while (insertedBytes < (10 * 1024 * 1024)) {
+ bulk.insert({_id: num++, s: bigString, x: Math.random()});
+ insertedBytes += bigString.length;
+ }
+ assert.writeOK(bulk.execute({w: 3}));
-assert.commandWorked(s.s.adminCommand({ shardcollection: "test.foo" , key: { _id: 1 } }));
+ assert.commandWorked(s.s.adminCommand({shardcollection: "test.foo", key: {_id: 1}}));
-jsTest.log("Waiting for balance to complete");
-s.awaitBalance('foo', 'test', 3 * 60 * 1000);
+ jsTest.log("Waiting for balance to complete");
+ s.awaitBalance('foo', 'test', 3 * 60 * 1000);
-jsTest.log("Stopping balancer");
-s.stopBalancer();
+ jsTest.log("Stopping balancer");
+ s.stopBalancer();
-jsTest.log("Balancer stopped, checking dbhashes");
-s._rs.forEach(function(rsNode) {
- rsNode.test.awaitReplication();
+ jsTest.log("Balancer stopped, checking dbhashes");
+ s._rs.forEach(function(rsNode) {
+ rsNode.test.awaitReplication();
- var dbHashes = rsNode.test.getHashes("test");
- print(rsNode.url + ': ' + tojson(dbHashes));
+ var dbHashes = rsNode.test.getHashes("test");
+ print(rsNode.url + ': ' + tojson(dbHashes));
- for (var j = 0; j < dbHashes.slaves.length; j++) {
- assert.eq(dbHashes.master.md5,
- dbHashes.slaves[j].md5,
- "hashes not same for: " + rsNode.url + " slave: " + j);
- }
-});
+ for (var j = 0; j < dbHashes.slaves.length; j++) {
+ assert.eq(dbHashes.master.md5,
+ dbHashes.slaves[j].md5,
+ "hashes not same for: " + rsNode.url + " slave: " + j);
+ }
+ });
-assert.eq( num , db.foo.find().count() , "C1" );
-assert.eq( num , db.foo.find().itcount() , "C2" );
-assert.eq( num , db.foo.find().sort( { _id : 1 } ).itcount() , "C3" );
-assert.eq( num , db.foo.find().sort( { _id : -1 } ).itcount() , "C4" );
+ assert.eq(num, db.foo.find().count(), "C1");
+ assert.eq(num, db.foo.find().itcount(), "C2");
+ assert.eq(num, db.foo.find().sort({_id: 1}).itcount(), "C3");
+ assert.eq(num, db.foo.find().sort({_id: -1}).itcount(), "C4");
-db.foo.ensureIndex( { x : 1 } );
-assert.eq( num , db.foo.find().sort( { x : 1 } ).itcount() , "C5" );
-assert.eq( num , db.foo.find().sort( { x : -1 } ).itcount() , "C6" );
+ db.foo.ensureIndex({x: 1});
+ assert.eq(num, db.foo.find().sort({x: 1}).itcount(), "C5");
+ assert.eq(num, db.foo.find().sort({x: -1}).itcount(), "C6");
-s.stop();
+ s.stop();
})();
diff --git a/jstests/sharding/sharding_rs2.js b/jstests/sharding/sharding_rs2.js
index affe175eaa4..7c323ac5d44 100644
--- a/jstests/sharding/sharding_rs2.js
+++ b/jstests/sharding/sharding_rs2.js
@@ -10,247 +10,244 @@
//
(function() {
-'use strict';
-
-// The mongod secondaries are set to priority 0 and votes 0 to prevent the primaries
-// from stepping down during migrations on slow evergreen builders.
-var s = new ShardingTest({ shards: 2,
- other: {
- chunkSize: 1,
- rs0: {
- nodes: [
- {rsConfig: {votes: 1}},
- {rsConfig: {priority: 0, votes: 0}},
- ],
- },
- rs1: {
- nodes: [
- {rsConfig: {votes: 1}},
- {rsConfig: {priority: 0, votes: 0}},
- ],
- }
- } });
-
-var db = s.getDB("test");
-var t = db.foo;
-
-s.adminCommand({ enablesharding: "test" });
-s.ensurePrimaryShard('test', 'test-rs0');
-
-// -------------------------------------------------------------------------------------------
-// ---------- test that config server updates when replica set config changes ----------------
-// -------------------------------------------------------------------------------------------
-
-
-db.foo.save({ _id: 5,x: 17 });
-assert.eq(1, db.foo.count());
-
-s.config.databases.find().forEach(printjson);
-s.config.shards.find().forEach(printjson);
-
-var dbPrimaryShardId = s.getPrimaryShardIdForDatabase("test");
-
-function countNodes(){
- var x = s.config.shards.findOne({ _id: dbPrimaryShardId });
- return x.host.split(",").length;
-}
-
-assert.eq(2, countNodes(), "A1");
-
-var rs = s.getRSEntry(dbPrimaryShardId);
-rs.test.add();
-try {
- rs.test.reInitiate();
-}
-catch (e){
- // this os ok as rs's may close connections on a change of master
- print(e);
-}
-
-assert.soon(
- function(){
+ 'use strict';
+
+ // The mongod secondaries are set to priority 0 and votes 0 to prevent the primaries
+ // from stepping down during migrations on slow evergreen builders.
+ var s = new ShardingTest({
+ shards: 2,
+ other: {
+ chunkSize: 1,
+ rs0: {
+ nodes: [{rsConfig: {votes: 1}}, {rsConfig: {priority: 0, votes: 0}}, ],
+ },
+ rs1: {
+ nodes: [{rsConfig: {votes: 1}}, {rsConfig: {priority: 0, votes: 0}}, ],
+ }
+ }
+ });
+
+ var db = s.getDB("test");
+ var t = db.foo;
+
+ s.adminCommand({enablesharding: "test"});
+ s.ensurePrimaryShard('test', 'test-rs0');
+
+ // -------------------------------------------------------------------------------------------
+ // ---------- test that config server updates when replica set config changes ----------------
+ // -------------------------------------------------------------------------------------------
+
+ db.foo.save({_id: 5, x: 17});
+ assert.eq(1, db.foo.count());
+
+ s.config.databases.find().forEach(printjson);
+ s.config.shards.find().forEach(printjson);
+
+ var dbPrimaryShardId = s.getPrimaryShardIdForDatabase("test");
+
+ function countNodes() {
+ var x = s.config.shards.findOne({_id: dbPrimaryShardId});
+ return x.host.split(",").length;
+ }
+
+ assert.eq(2, countNodes(), "A1");
+
+ var rs = s.getRSEntry(dbPrimaryShardId);
+ rs.test.add();
+ try {
+ rs.test.reInitiate();
+ } catch (e) {
+ // this os ok as rs's may close connections on a change of master
+ print(e);
+ }
+
+ assert.soon(function() {
try {
printjson(rs.test.getPrimary().getDB("admin").runCommand("isMaster"));
s.config.shards.find().forEach(printjsononeline);
return countNodes() == 3;
- }
- catch (e){
+ } catch (e) {
print(e);
}
}, "waiting for config server to update", 180 * 1000, 1000);
-// cleanup after adding node
-for (var i = 0; i < 5; i++) {
- try {
- db.foo.findOne();
- }
- catch (e) {
-
+ // cleanup after adding node
+ for (var i = 0; i < 5; i++) {
+ try {
+ db.foo.findOne();
+ } catch (e) {
+ }
}
-}
-
-jsTest.log("Awaiting replication of all nodes, so spurious sync'ing queries don't upset our counts...");
-rs.test.awaitReplication();
-// Make sure we wait for secondaries here - otherwise a secondary could come online later and be used for the
-// count command before being fully replicated
-jsTest.log("Awaiting secondary status of all nodes");
-rs.test.waitForState(rs.test.getSecondaries(), ReplSetTest.State.SECONDARY, 180 * 1000);
-
-// -------------------------------------------------------------------------------------------
-// ---------- test routing to slaves ----------------
-// -------------------------------------------------------------------------------------------
-
-// --- not sharded ----
-var m = new Mongo(s.s.name);
-var ts = m.getDB("test").foo;
+ jsTest.log(
+ "Awaiting replication of all nodes, so spurious sync'ing queries don't upset our counts...");
+ rs.test.awaitReplication();
+ // Make sure we wait for secondaries here - otherwise a secondary could come online later and be
+ // used for the
+ // count command before being fully replicated
+ jsTest.log("Awaiting secondary status of all nodes");
+ rs.test.waitForState(rs.test.getSecondaries(), ReplSetTest.State.SECONDARY, 180 * 1000);
-var before = rs.test.getPrimary().adminCommand("serverStatus").opcounters;
+ // -------------------------------------------------------------------------------------------
+ // ---------- test routing to slaves ----------------
+ // -------------------------------------------------------------------------------------------
-for (var i = 0; i < 10; i++) {
- assert.eq(17, ts.findOne().x, "B1");
-}
+ // --- not sharded ----
-m.setSlaveOk();
+ var m = new Mongo(s.s.name);
+ var ts = m.getDB("test").foo;
-for (var i = 0; i < 10; i++) {
- assert.eq(17, ts.findOne().x, "B2");
-}
+ var before = rs.test.getPrimary().adminCommand("serverStatus").opcounters;
-var after = rs.test.getPrimary().adminCommand("serverStatus").opcounters;
-
-printjson(before);
-printjson(after);
-
-assert.lte(before.query + 10, after.query, "B3");
+ for (var i = 0; i < 10; i++) {
+ assert.eq(17, ts.findOne().x, "B1");
+ }
-// --- add more data ----
+ m.setSlaveOk();
-db.foo.ensureIndex({ x: 1 });
+ for (var i = 0; i < 10; i++) {
+ assert.eq(17, ts.findOne().x, "B2");
+ }
-var bulk = db.foo.initializeUnorderedBulkOp();
-for (var i = 0; i < 100; i++) {
- if (i == 17) continue;
- bulk.insert({ x: i });
-}
-assert.writeOK(bulk.execute({ w: 3 }));
+ var after = rs.test.getPrimary().adminCommand("serverStatus").opcounters;
-// Counts pass the options of the connection - which is slaveOk'd, so we need to wait for
-// replication for this and future tests to pass
-rs.test.awaitReplication();
+ printjson(before);
+ printjson(after);
-assert.eq(100, ts.count(), "B4");
-assert.eq(100, ts.find().itcount(), "B5");
-assert.eq(100, ts.find().batchSize(5).itcount(), "B6");
+ assert.lte(before.query + 10, after.query, "B3");
-t.find().batchSize(3).next();
-gc(); gc(); gc();
+ // --- add more data ----
-// --- sharded ----
+ db.foo.ensureIndex({x: 1});
-assert.eq(100, db.foo.count(), "C1");
+ var bulk = db.foo.initializeUnorderedBulkOp();
+ for (var i = 0; i < 100; i++) {
+ if (i == 17)
+ continue;
+ bulk.insert({x: i});
+ }
+ assert.writeOK(bulk.execute({w: 3}));
-assert.commandWorked(s.s0.adminCommand({ shardcollection: "test.foo", key: { x: 1 } }));
+ // Counts pass the options of the connection - which is slaveOk'd, so we need to wait for
+ // replication for this and future tests to pass
+ rs.test.awaitReplication();
-// We're doing some manual chunk stuff, so stop the balancer first
-s.stopBalancer();
+ assert.eq(100, ts.count(), "B4");
+ assert.eq(100, ts.find().itcount(), "B5");
+ assert.eq(100, ts.find().batchSize(5).itcount(), "B6");
-assert.eq(100, t.count(), "C2");
-assert.commandWorked(s.s0.adminCommand({ split: "test.foo", middle: { x: 50 } }));
+ t.find().batchSize(3).next();
+ gc();
+ gc();
+ gc();
-s.printShardingStatus();
+ // --- sharded ----
-var other = s.config.shards.findOne({ _id: { $ne: dbPrimaryShardId } });
-assert.commandWorked(s.getDB('admin').runCommand({ moveChunk: "test.foo",
- find: { x: 10 },
- to: other._id,
- _secondaryThrottle: true,
- writeConcern: { w: 2 },
- _waitForDelete: true }));
-assert.eq(100, t.count(), "C3");
+ assert.eq(100, db.foo.count(), "C1");
-assert.eq(50, rs.test.getPrimary().getDB("test").foo.count(), "C4");
+ assert.commandWorked(s.s0.adminCommand({shardcollection: "test.foo", key: {x: 1}}));
-// by non-shard key
+ // We're doing some manual chunk stuff, so stop the balancer first
+ s.stopBalancer();
-m = new Mongo(s.s.name);
-ts = m.getDB("test").foo;
+ assert.eq(100, t.count(), "C2");
+ assert.commandWorked(s.s0.adminCommand({split: "test.foo", middle: {x: 50}}));
-before = rs.test.getPrimary().adminCommand("serverStatus").opcounters;
+ s.printShardingStatus();
-for (var i = 0; i < 10; i++) {
- assert.eq(17, ts.findOne({ _id: 5 }).x, "D1");
-}
+ var other = s.config.shards.findOne({_id: {$ne: dbPrimaryShardId}});
+ assert.commandWorked(s.getDB('admin').runCommand({
+ moveChunk: "test.foo",
+ find: {x: 10},
+ to: other._id,
+ _secondaryThrottle: true,
+ writeConcern: {w: 2},
+ _waitForDelete: true
+ }));
+ assert.eq(100, t.count(), "C3");
-m.setSlaveOk();
-for (var i = 0; i < 10; i++) {
- assert.eq(17, ts.findOne({ _id: 5 }).x, "D2");
-}
+ assert.eq(50, rs.test.getPrimary().getDB("test").foo.count(), "C4");
-after = rs.test.getPrimary().adminCommand("serverStatus").opcounters;
+ // by non-shard key
-assert.lte(before.query + 10, after.query, "D3");
+ m = new Mongo(s.s.name);
+ ts = m.getDB("test").foo;
-// by shard key
+ before = rs.test.getPrimary().adminCommand("serverStatus").opcounters;
-m = new Mongo(s.s.name);
-m.forceWriteMode("commands");
+ for (var i = 0; i < 10; i++) {
+ assert.eq(17, ts.findOne({_id: 5}).x, "D1");
+ }
-s.printShardingStatus();
+ m.setSlaveOk();
+ for (var i = 0; i < 10; i++) {
+ assert.eq(17, ts.findOne({_id: 5}).x, "D2");
+ }
-ts = m.getDB("test").foo;
+ after = rs.test.getPrimary().adminCommand("serverStatus").opcounters;
-before = rs.test.getPrimary().adminCommand("serverStatus").opcounters;
+ assert.lte(before.query + 10, after.query, "D3");
-for (var i = 0; i < 10; i++) {
- assert.eq(57, ts.findOne({ x: 57 }).x, "E1");
-}
+ // by shard key
-m.setSlaveOk();
-for (var i = 0; i < 10; i++) {
- assert.eq(57, ts.findOne({ x: 57 }).x, "E2");
-}
+ m = new Mongo(s.s.name);
+ m.forceWriteMode("commands");
-after = rs.test.getPrimary().adminCommand("serverStatus").opcounters;
+ s.printShardingStatus();
-assert.lte(before.query + 10, after.query, "E3");
+ ts = m.getDB("test").foo;
-assert.eq(100, ts.count(), "E4");
-assert.eq(100, ts.find().itcount(), "E5");
-printjson(ts.find().batchSize(5).explain());
+ before = rs.test.getPrimary().adminCommand("serverStatus").opcounters;
-// fsyncLock the secondaries
-rs.test.getSecondaries().forEach(function(secondary) {
- assert.commandWorked(secondary.getDB("test").fsyncLock());
-});
-// Modify data only on the primary replica of the primary shard.
-// { x: 60 } goes to the shard of "rs", which is the primary shard.
-assert.writeOK(ts.insert({ primaryOnly: true, x: 60 }));
-// Read from secondary through mongos, the doc is not there due to replication delay or fsync.
-// But we can guarantee not to read from primary.
-assert.eq(0, ts.find({ primaryOnly: true, x: 60 }).itcount());
-// Unlock the secondaries
-rs.test.getSecondaries().forEach(function(secondary) {
- secondary.getDB("test").fsyncUnlock();
-});
-// Clean up the data
-assert.writeOK(ts.remove({ primaryOnly: true, x: 60 }, { writeConcern: { w: 3 }}));
+ for (var i = 0; i < 10; i++) {
+ assert.eq(57, ts.findOne({x: 57}).x, "E1");
+ }
-for (var i = 0; i < 10; i++) {
- m = new Mongo(s.s.name);
m.setSlaveOk();
- ts = m.getDB("test").foo;
- assert.eq(100, ts.find().batchSize(5).itcount(), "F2." + i);
-}
+ for (var i = 0; i < 10; i++) {
+ assert.eq(57, ts.findOne({x: 57}).x, "E2");
+ }
-for (var i = 0; i < 10; i++) {
- m = new Mongo(s.s.name);
- ts = m.getDB("test").foo;
- assert.eq(100, ts.find().batchSize(5).itcount(), "F3." + i);
-}
+ after = rs.test.getPrimary().adminCommand("serverStatus").opcounters;
+
+ assert.lte(before.query + 10, after.query, "E3");
+
+ assert.eq(100, ts.count(), "E4");
+ assert.eq(100, ts.find().itcount(), "E5");
+ printjson(ts.find().batchSize(5).explain());
+
+ // fsyncLock the secondaries
+ rs.test.getSecondaries().forEach(function(secondary) {
+ assert.commandWorked(secondary.getDB("test").fsyncLock());
+ });
+ // Modify data only on the primary replica of the primary shard.
+ // { x: 60 } goes to the shard of "rs", which is the primary shard.
+ assert.writeOK(ts.insert({primaryOnly: true, x: 60}));
+ // Read from secondary through mongos, the doc is not there due to replication delay or fsync.
+ // But we can guarantee not to read from primary.
+ assert.eq(0, ts.find({primaryOnly: true, x: 60}).itcount());
+ // Unlock the secondaries
+ rs.test.getSecondaries().forEach(function(secondary) {
+ secondary.getDB("test").fsyncUnlock();
+ });
+ // Clean up the data
+ assert.writeOK(ts.remove({primaryOnly: true, x: 60}, {writeConcern: {w: 3}}));
+
+ for (var i = 0; i < 10; i++) {
+ m = new Mongo(s.s.name);
+ m.setSlaveOk();
+ ts = m.getDB("test").foo;
+ assert.eq(100, ts.find().batchSize(5).itcount(), "F2." + i);
+ }
+
+ for (var i = 0; i < 10; i++) {
+ m = new Mongo(s.s.name);
+ ts = m.getDB("test").foo;
+ assert.eq(100, ts.find().batchSize(5).itcount(), "F3." + i);
+ }
-printjson(db.adminCommand("getShardMap"));
+ printjson(db.adminCommand("getShardMap"));
-s.stop();
+ s.stop();
})();
diff --git a/jstests/sharding/sharding_state_after_stepdown.js b/jstests/sharding/sharding_state_after_stepdown.js
index 319f00cceaf..f5d9896a0d0 100644
--- a/jstests/sharding/sharding_state_after_stepdown.js
+++ b/jstests/sharding/sharding_state_after_stepdown.js
@@ -6,158 +6,181 @@
// @tags: [requires_persistence]
(function() {
-'use strict';
-
-var st = new ShardingTest({ shards: 2,
- mongos: 1,
- other: {
- rs: true,
- rsOptions: { nodes : 1 }
- }
- });
-
-var mongos = st.s0;
-var admin = mongos.getDB("admin");
-var shards = mongos.getCollection("config.shards").find().toArray();
-
-var coll = mongos.getCollection("foo.bar");
-var collSharded = mongos.getCollection("foo.barSharded");
-
-assert.commandWorked(admin.runCommand({ enableSharding : coll.getDB() + "" }));
-st.ensurePrimaryShard(coll.getDB() + "", shards[0]._id);
-assert.commandWorked(admin.runCommand({ shardCollection : collSharded.toString(),
- key : { _id : 1 } }));
-assert.commandWorked(admin.runCommand({ moveChunk : collSharded.toString(),
- find : { _id : 0 },
- to : shards[1]._id }));
-
-assert.writeOK(coll.insert({ some : "data" }));
-assert.writeOK(collSharded.insert({ some : "data" }));
-assert.eq(2, mongos.adminCommand({ getShardVersion : collSharded.toString() }).version.t);
-
-st.printShardingStatus();
-
-// Restart both primaries to reset our sharding data
-var restartPrimaries = function() {
- var rs0Primary = st.rs0.getPrimary();
- var rs1Primary = st.rs1.getPrimary();
-
- st.rs0.stop(rs0Primary);
- st.rs1.stop(rs1Primary);
-
- ReplSetTest.awaitRSClientHosts(mongos, [rs0Primary, rs1Primary], { ok : false });
-
- st.rs0.start(rs0Primary, { restart : true });
- st.rs1.start(rs1Primary, { restart : true });
-
- ReplSetTest.awaitRSClientHosts(mongos, [rs0Primary, rs1Primary], { ismaster : true });
-};
-
-restartPrimaries();
-
-// Sharding data gets initialized either when shards are hit by an unsharded query or if some
-// metadata operation was run before the step down, which wrote a minOpTime recovery record (CSRS
-// only). In this case we did a moveChunk above from shard0 to shard1, so we will have this record
-// on shard0.
-if (st.configRS) {
- assert.neq("",
- st.rs0.getPrimary().adminCommand({ getShardVersion: coll.toString() }).configServer);
-}
-else {
- assert.eq("",
- st.rs0.getPrimary().adminCommand({ getShardVersion: coll.toString() }).configServer);
-}
-assert.eq("",
- st.rs1.getPrimary().adminCommand({ getShardVersion : coll.toString() }).configServer);
-
-// Doing a find only accesses the primary (rs0), which is already recovered. Ensure that the
-// secondary still has no sharding knowledge.
-assert.neq(null, coll.findOne({}));
-assert.eq("",
- st.rs1.getPrimary().adminCommand({ getShardVersion : coll.toString() }).configServer);
-
-//
-//
-// Sharding data initialized when shards are hit by a sharded query
-assert.neq(null, collSharded.findOne({}));
-assert.neq("",
- st.rs0.getPrimary().adminCommand({ getShardVersion : coll.toString() }).configServer);
-assert.neq("",
- st.rs1.getPrimary().adminCommand({ getShardVersion : coll.toString() }).configServer);
-
-
-// Stepdown both primaries to reset our sharding data
-var stepDownPrimaries = function() {
-
- var rs0Primary = st.rs0.getPrimary();
- var rs1Primary = st.rs1.getPrimary();
-
- try {
- rs0Primary.adminCommand({ replSetStepDown : 1000 * 1000, force : true });
- assert(false);
- }
- catch(ex) {
- // Expected connection exception, will check for stepdown later
- }
-
- try {
- rs1Primary.adminCommand({ replSetStepDown : 1000 * 1000, force : true });
- assert(false);
- }
- catch(ex) {
- // Expected connection exception, will check for stepdown later
+ 'use strict';
+
+ var st = new ShardingTest({shards: 2, mongos: 1, other: {rs: true, rsOptions: {nodes: 1}}});
+
+ var mongos = st.s0;
+ var admin = mongos.getDB("admin");
+ var shards = mongos.getCollection("config.shards").find().toArray();
+
+ var coll = mongos.getCollection("foo.bar");
+ var collSharded = mongos.getCollection("foo.barSharded");
+
+ assert.commandWorked(admin.runCommand({enableSharding: coll.getDB() + ""}));
+ st.ensurePrimaryShard(coll.getDB() + "", shards[0]._id);
+ assert.commandWorked(
+ admin.runCommand({shardCollection: collSharded.toString(), key: {_id: 1}}));
+ assert.commandWorked(
+ admin.runCommand({moveChunk: collSharded.toString(), find: {_id: 0}, to: shards[1]._id}));
+
+ assert.writeOK(coll.insert({some: "data"}));
+ assert.writeOK(collSharded.insert({some: "data"}));
+ assert.eq(2, mongos.adminCommand({getShardVersion: collSharded.toString()}).version.t);
+
+ st.printShardingStatus();
+
+ // Restart both primaries to reset our sharding data
+ var restartPrimaries = function() {
+ var rs0Primary = st.rs0.getPrimary();
+ var rs1Primary = st.rs1.getPrimary();
+
+ st.rs0.stop(rs0Primary);
+ st.rs1.stop(rs1Primary);
+
+ ReplSetTest.awaitRSClientHosts(mongos,
+ [rs0Primary, rs1Primary],
+ {
+ ok:
+ false
+ });
+
+ st.rs0.start(rs0Primary, {restart: true});
+ st.rs1.start(rs1Primary, {restart: true});
+
+ ReplSetTest.awaitRSClientHosts(mongos,
+ [rs0Primary, rs1Primary],
+ {
+ ismaster:
+ true
+ });
+ };
+
+ restartPrimaries();
+
+ // Sharding data gets initialized either when shards are hit by an unsharded query or if some
+ // metadata operation was run before the step down, which wrote a minOpTime recovery record
+ // (CSRS
+ // only). In this case we did a moveChunk above from shard0 to shard1, so we will have this
+ // record
+ // on shard0.
+ if (st.configRS) {
+ assert.neq(
+ "", st.rs0.getPrimary().adminCommand({getShardVersion: coll.toString()}).configServer);
+ } else {
+ assert.eq(
+ "", st.rs0.getPrimary().adminCommand({getShardVersion: coll.toString()}).configServer);
}
+ assert.eq("",
+ st.rs1.getPrimary().adminCommand({getShardVersion: coll.toString()}).configServer);
- ReplSetTest.awaitRSClientHosts(mongos, [rs0Primary, rs1Primary], { secondary : true });
-
- assert.commandWorked(new Mongo(rs0Primary.host).adminCommand({ replSetFreeze : 0 }));
- assert.commandWorked(new Mongo(rs1Primary.host).adminCommand({ replSetFreeze : 0 }));
-
- rs0Primary = st.rs0.getPrimary();
- rs1Primary = st.rs1.getPrimary();
-
- // Flush connections to avoid transient issues with conn pooling
- assert.commandWorked(rs0Primary.adminCommand({ connPoolSync : true }));
- assert.commandWorked(rs1Primary.adminCommand({ connPoolSync : true }));
-
- ReplSetTest.awaitRSClientHosts(mongos, [rs0Primary, rs1Primary], { ismaster : true });
-};
-
-stepDownPrimaries();
-
-//
-//
-// No sharding metadata until shards are hit by a metadata operation
-assert.eq({},
- st.rs0.getPrimary().adminCommand(
- { getShardVersion : collSharded.toString(), fullMetadata : true }).metadata);
-assert.eq({},
- st.rs1.getPrimary().adminCommand(
- { getShardVersion : collSharded.toString(), fullMetadata : true }).metadata);
-
-//
-//
-// Metadata commands should enable sharding data implicitly
-assert.commandWorked(mongos.adminCommand({ split : collSharded.toString(), middle : { _id : 0 }}));
-assert.eq({},
- st.rs0.getPrimary().adminCommand(
- { getShardVersion : collSharded.toString(), fullMetadata : true }).metadata);
-assert.neq({},
- st.rs1.getPrimary().adminCommand(
- { getShardVersion : collSharded.toString(), fullMetadata : true }).metadata);
+ // Doing a find only accesses the primary (rs0), which is already recovered. Ensure that the
+ // secondary still has no sharding knowledge.
+ assert.neq(null, coll.findOne({}));
+ assert.eq("",
+ st.rs1.getPrimary().adminCommand({getShardVersion: coll.toString()}).configServer);
-//
-//
-// MoveChunk command should enable sharding data implicitly on TO-shard
-assert.commandWorked(mongos.adminCommand({ moveChunk : collSharded.toString(), find : { _id : 0 },
- to : shards[0]._id }));
-assert.neq({},
- st.rs0.getPrimary().adminCommand(
- { getShardVersion : collSharded.toString(), fullMetadata : true }).metadata);
-assert.neq({},
- st.rs1.getPrimary().adminCommand(
- { getShardVersion : collSharded.toString(), fullMetadata : true }).metadata);
-
-st.stop();
+ //
+ //
+ // Sharding data initialized when shards are hit by a sharded query
+ assert.neq(null, collSharded.findOne({}));
+ assert.neq("",
+ st.rs0.getPrimary().adminCommand({getShardVersion: coll.toString()}).configServer);
+ assert.neq("",
+ st.rs1.getPrimary().adminCommand({getShardVersion: coll.toString()}).configServer);
+
+ // Stepdown both primaries to reset our sharding data
+ var stepDownPrimaries = function() {
+
+ var rs0Primary = st.rs0.getPrimary();
+ var rs1Primary = st.rs1.getPrimary();
+
+ try {
+ rs0Primary.adminCommand({replSetStepDown: 1000 * 1000, force: true});
+ assert(false);
+ } catch (ex) {
+ // Expected connection exception, will check for stepdown later
+ }
+
+ try {
+ rs1Primary.adminCommand({replSetStepDown: 1000 * 1000, force: true});
+ assert(false);
+ } catch (ex) {
+ // Expected connection exception, will check for stepdown later
+ }
+
+ ReplSetTest.awaitRSClientHosts(mongos,
+ [rs0Primary, rs1Primary],
+ {
+ secondary:
+ true
+ });
+
+ assert.commandWorked(new Mongo(rs0Primary.host).adminCommand({replSetFreeze: 0}));
+ assert.commandWorked(new Mongo(rs1Primary.host).adminCommand({replSetFreeze: 0}));
+
+ rs0Primary = st.rs0.getPrimary();
+ rs1Primary = st.rs1.getPrimary();
+
+ // Flush connections to avoid transient issues with conn pooling
+ assert.commandWorked(rs0Primary.adminCommand({connPoolSync: true}));
+ assert.commandWorked(rs1Primary.adminCommand({connPoolSync: true}));
+
+ ReplSetTest.awaitRSClientHosts(mongos,
+ [rs0Primary, rs1Primary],
+ {
+ ismaster:
+ true
+ });
+ };
+
+ stepDownPrimaries();
+
+ //
+ //
+ // No sharding metadata until shards are hit by a metadata operation
+ assert.eq({},
+ st.rs0.getPrimary().adminCommand({
+ getShardVersion: collSharded.toString(),
+ fullMetadata: true
+ }).metadata);
+ assert.eq({},
+ st.rs1.getPrimary().adminCommand({
+ getShardVersion: collSharded.toString(),
+ fullMetadata: true
+ }).metadata);
+
+ //
+ //
+ // Metadata commands should enable sharding data implicitly
+ assert.commandWorked(mongos.adminCommand({split: collSharded.toString(), middle: {_id: 0}}));
+ assert.eq({},
+ st.rs0.getPrimary().adminCommand({
+ getShardVersion: collSharded.toString(),
+ fullMetadata: true
+ }).metadata);
+ assert.neq({},
+ st.rs1.getPrimary().adminCommand({
+ getShardVersion: collSharded.toString(),
+ fullMetadata: true
+ }).metadata);
+
+ //
+ //
+ // MoveChunk command should enable sharding data implicitly on TO-shard
+ assert.commandWorked(mongos.adminCommand(
+ {moveChunk: collSharded.toString(), find: {_id: 0}, to: shards[0]._id}));
+ assert.neq({},
+ st.rs0.getPrimary().adminCommand({
+ getShardVersion: collSharded.toString(),
+ fullMetadata: true
+ }).metadata);
+ assert.neq({},
+ st.rs1.getPrimary().adminCommand({
+ getShardVersion: collSharded.toString(),
+ fullMetadata: true
+ }).metadata);
+
+ st.stop();
})();
diff --git a/jstests/sharding/sharding_system_namespaces.js b/jstests/sharding/sharding_system_namespaces.js
index 27ba183dd64..f721c07634d 100644
--- a/jstests/sharding/sharding_system_namespaces.js
+++ b/jstests/sharding/sharding_system_namespaces.js
@@ -11,7 +11,7 @@
// P.S. wiredtiger options are not valid for MMAPv1, but MMAPv1 will
// keep and ignore them.
-var st = new ShardingTest({ shards : 2 });
+var st = new ShardingTest({shards: 2});
var db = st.s.getDB("test");
var coll = db.sharding_system_namespaces;
@@ -24,48 +24,39 @@ var storageEngines = st.shard0.getDB("local").serverBuildInfo().storageEngines;
print("Supported storage engines: " + storageEngines);
if (Array.contains(storageEngines, "wiredTiger")) {
-
function checkCollectionOptions(database) {
- var collectionsInfos = database.getCollectionInfos();
- printjson(collectionsInfos);
- var info = collectionsInfos.filter(function(c) {
- return c.name == "sharding_system_namespaces";
- })[0];
- assert.eq(info.options.storageEngine.wiredTiger.configString, "block_compressor=zlib");
+ var collectionsInfos = database.getCollectionInfos();
+ printjson(collectionsInfos);
+ var info = collectionsInfos.filter(function(c) {
+ return c.name == "sharding_system_namespaces";
+ })[0];
+ assert.eq(info.options.storageEngine.wiredTiger.configString, "block_compressor=zlib");
}
db.createCollection("sharding_system_namespaces",
- {
- storageEngine: {
- wiredTiger: { configString: "block_compressor=zlib" }
- }
- });
+ {storageEngine: {wiredTiger: {configString: "block_compressor=zlib"}}});
checkCollectionOptions(db);
- assert.commandWorked(db.adminCommand({ enableSharding: 'test' }));
+ assert.commandWorked(db.adminCommand({enableSharding: 'test'}));
st.ensurePrimaryShard('test', 'shard0001');
- assert.commandWorked(db.adminCommand({ shardCollection: coll + '', key: { x: 1 }}));
+ assert.commandWorked(db.adminCommand({shardCollection: coll + '', key: {x: 1}}));
coll.insert({x: 0});
coll.insert({x: 10});
- assert.commandWorked(db.adminCommand({ split: coll + '', middle: { x: 5 }}));
+ assert.commandWorked(db.adminCommand({split: coll + '', middle: {x: 5}}));
st.printShardingStatus();
var primaryShard = st.getPrimaryShard("test");
- anotherShard = st.getOther( primaryShard );
- assert.commandWorked(db.adminCommand({
- movechunk: coll + '',
- find: { x: 5 },
- to: anotherShard.name
- }));
+ anotherShard = st.getOther(primaryShard);
+ assert.commandWorked(
+ db.adminCommand({movechunk: coll + '', find: {x: 5}, to: anotherShard.name}));
st.printShardingStatus();
checkCollectionOptions(anotherShard.getDB("test"));
-}
-else {
+} else {
print("Skipping test. wiredTiger engine not supported by mongod binary.");
}
diff --git a/jstests/sharding/sort1.js b/jstests/sharding/sort1.js
index 2ff8e1a4daf..57bae9dc390 100644
--- a/jstests/sharding/sort1.js
+++ b/jstests/sharding/sort1.js
@@ -1,106 +1,110 @@
(function() {
-var s = new ShardingTest({ name: "sort1",
- shards: 2,
- mongos: 2 });
-
-s.adminCommand( { enablesharding : "test" } );
-s.ensurePrimaryShard('test', 'shard0001');
-s.adminCommand( { shardcollection : "test.data" , key : { 'sub.num' : 1 } } );
-
-db = s.getDB( "test" );
-
-N = 100;
-
-forward = [];
-backward = [];
-for ( i=0; i<N; i++ ){
- db.data.insert( { _id : i , sub: {num : i , x : N - i }} );
- forward.push( i );
- backward.push( ( N - 1 ) - i );
-}
-
-s.adminCommand( { split : "test.data" , middle : { 'sub.num' : 33 } } );
-s.adminCommand( { split : "test.data" , middle : { 'sub.num' : 66 } } );
-
-s.adminCommand({ movechunk : "test.data",
- find : { 'sub.num' : 50 },
- to : s.getOther( s.getPrimaryShard( "test" ) ).name,
- waitForDelete : true });
-
-assert.lte( 3 , s.config.chunks.find().itcount() , "A1" );
-
-temp = s.config.chunks.find().sort( { min : 1 } ).toArray();
-temp.forEach( printjsononeline );
-
-z = 0;
-for ( ; z<temp.length; z++ )
- if ( temp[z].min["sub.num"] <= 50 && temp[z].max["sub.num"] > 50 )
- break;
-
-assert.eq( temp[z-1].shard , temp[z+1].shard , "A2" );
-assert.neq( temp[z-1].shard , temp[z].shard , "A3" );
-
-temp = db.data.find().sort( { 'sub.num' : 1 } ).toArray();
-assert.eq( N , temp.length , "B1" );
-for ( i=0; i<100; i++ ){
- assert.eq( i , temp[i].sub.num , "B2" );
-}
-
-
-db.data.find().sort( { 'sub.num' : 1 } ).toArray();
-s.getPrimaryShard("test").getDB( "test" ).data.find().sort( { 'sub.num' : 1 } ).toArray();
-
-a = Date.timeFunc( function(){ z = db.data.find().sort( { 'sub.num' : 1 } ).toArray(); } , 200 );
-assert.eq( 100 , z.length , "C1" );
-b = 1.5 * Date.timeFunc( function(){
- z = s.getPrimaryShard("test").getDB("test").data.find().sort({'sub.num' : 1}).toArray();
- }, 200 );
-assert.eq( 67 , z.length , "C2" );
-
-print( "a: " + a + " b:" + b + " mongos slow down: " + Math.ceil( 100 * ( ( a - b ) / b ) ) + "%" );
-
-// -- secondary index sorting
-
-function getSorted( by , dir , proj ){
- var s = {};
- s[by] = dir || 1;
- printjson( s );
- var cur = db.data.find( {} , proj || {} ).sort( s );
- return terse( cur.map( function(z){ return z.sub.num; } ) );
-}
-
-function terse( a ){
- var s = "";
- for ( var i=0; i<a.length; i++ ){
- if ( i > 0 )
- s += ",";
- s += a[i];
+ var s = new ShardingTest({name: "sort1", shards: 2, mongos: 2});
+
+ s.adminCommand({enablesharding: "test"});
+ s.ensurePrimaryShard('test', 'shard0001');
+ s.adminCommand({shardcollection: "test.data", key: {'sub.num': 1}});
+
+ db = s.getDB("test");
+
+ N = 100;
+
+ forward = [];
+ backward = [];
+ for (i = 0; i < N; i++) {
+ db.data.insert({_id: i, sub: {num: i, x: N - i}});
+ forward.push(i);
+ backward.push((N - 1) - i);
+ }
+
+ s.adminCommand({split: "test.data", middle: {'sub.num': 33}});
+ s.adminCommand({split: "test.data", middle: {'sub.num': 66}});
+
+ s.adminCommand({
+ movechunk: "test.data",
+ find: {'sub.num': 50},
+ to: s.getOther(s.getPrimaryShard("test")).name,
+ waitForDelete: true
+ });
+
+ assert.lte(3, s.config.chunks.find().itcount(), "A1");
+
+ temp = s.config.chunks.find().sort({min: 1}).toArray();
+ temp.forEach(printjsononeline);
+
+ z = 0;
+ for (; z < temp.length; z++)
+ if (temp[z].min["sub.num"] <= 50 && temp[z].max["sub.num"] > 50)
+ break;
+
+ assert.eq(temp[z - 1].shard, temp[z + 1].shard, "A2");
+ assert.neq(temp[z - 1].shard, temp[z].shard, "A3");
+
+ temp = db.data.find().sort({'sub.num': 1}).toArray();
+ assert.eq(N, temp.length, "B1");
+ for (i = 0; i < 100; i++) {
+ assert.eq(i, temp[i].sub.num, "B2");
+ }
+
+ db.data.find().sort({'sub.num': 1}).toArray();
+ s.getPrimaryShard("test").getDB("test").data.find().sort({'sub.num': 1}).toArray();
+
+ a = Date.timeFunc(function() {
+ z = db.data.find().sort({'sub.num': 1}).toArray();
+ }, 200);
+ assert.eq(100, z.length, "C1");
+ b = 1.5 *
+ Date.timeFunc(function() {
+ z = s.getPrimaryShard("test").getDB("test").data.find().sort({'sub.num': 1}).toArray();
+ }, 200);
+ assert.eq(67, z.length, "C2");
+
+ print("a: " + a + " b:" + b + " mongos slow down: " + Math.ceil(100 * ((a - b) / b)) + "%");
+
+ // -- secondary index sorting
+
+ function getSorted(by, dir, proj) {
+ var s = {};
+ s[by] = dir || 1;
+ printjson(s);
+ var cur = db.data.find({}, proj || {}).sort(s);
+ return terse(cur.map(function(z) {
+ return z.sub.num;
+ }));
+ }
+
+ function terse(a) {
+ var s = "";
+ for (var i = 0; i < a.length; i++) {
+ if (i > 0)
+ s += ",";
+ s += a[i];
+ }
+ return s;
}
- return s;
-}
-forward = terse(forward);
-backward = terse(backward);
+ forward = terse(forward);
+ backward = terse(backward);
-assert.eq( forward , getSorted( "sub.num" , 1 ) , "D1" );
-assert.eq( backward , getSorted( "sub.num" , -1 ) , "D2" );
+ assert.eq(forward, getSorted("sub.num", 1), "D1");
+ assert.eq(backward, getSorted("sub.num", -1), "D2");
-assert.eq( backward , getSorted( "sub.x" , 1 ) , "D3" );
-assert.eq( forward , getSorted( "sub.x" , -1 ) , "D4" );
+ assert.eq(backward, getSorted("sub.x", 1), "D3");
+ assert.eq(forward, getSorted("sub.x", -1), "D4");
-assert.eq( backward , getSorted( "sub.x" , 1 , { 'sub.num' : 1 } ) , "D5" );
-assert.eq( forward , getSorted( "sub.x" , -1 , { 'sub.num' : 1 } ) , "D6" );
+ assert.eq(backward, getSorted("sub.x", 1, {'sub.num': 1}), "D5");
+ assert.eq(forward, getSorted("sub.x", -1, {'sub.num': 1}), "D6");
-assert.eq( backward , getSorted( "sub.x" , 1 , { 'sub' : 1 } ) , "D7" );
-assert.eq( forward , getSorted( "sub.x" , -1 , { 'sub' : 1 } ) , "D8" );
+ assert.eq(backward, getSorted("sub.x", 1, {'sub': 1}), "D7");
+ assert.eq(forward, getSorted("sub.x", -1, {'sub': 1}), "D8");
-assert.eq( backward , getSorted( "sub.x" , 1 , { '_id' : 0 } ) , "D9" );
-assert.eq( forward , getSorted( "sub.x" , -1 , { '_id' : 0 } ) , "D10" );
+ assert.eq(backward, getSorted("sub.x", 1, {'_id': 0}), "D9");
+ assert.eq(forward, getSorted("sub.x", -1, {'_id': 0}), "D10");
-assert.eq( backward , getSorted( "sub.x" , 1 , { '_id' : 0, 'sub.num':1 } ) , "D11" );
-assert.eq( forward , getSorted( "sub.x" , -1 , { '_id' : 0, 'sub.num':1 } ) , "D12" );
+ assert.eq(backward, getSorted("sub.x", 1, {'_id': 0, 'sub.num': 1}), "D11");
+ assert.eq(forward, getSorted("sub.x", -1, {'_id': 0, 'sub.num': 1}), "D12");
-s.stop();
+ s.stop();
})();
diff --git a/jstests/sharding/split_chunk.js b/jstests/sharding/split_chunk.js
index 0f3a33f324d..96368ff8023 100644
--- a/jstests/sharding/split_chunk.js
+++ b/jstests/sharding/split_chunk.js
@@ -5,11 +5,11 @@
* either the upper or lower bound of the entire shard key space.
*/
-var st = new ShardingTest({ shards: 1 });
+var st = new ShardingTest({shards: 1});
st.stopBalancer();
var testDB = st.s.getDB('test');
-testDB.adminCommand({ enableSharding: 'test' });
+testDB.adminCommand({enableSharding: 'test'});
var callSplit = function(db, minKey, maxKey, splitPoints) {
var res = st.s.adminCommand({getShardVersion: "test.user"});
@@ -20,111 +20,115 @@ var callSplit = function(db, minKey, maxKey, splitPoints) {
from: 'shard0000',
min: minKey,
max: maxKey,
- keyPattern: { x: 1 },
+ keyPattern: {x: 1},
splitKeys: splitPoints,
shardVersion: shardVersion,
});
};
var tests = [
-//
-// Lower extreme chunk tests.
-//
-
-// All chunks have 1 doc.
-//
-// Expected doc counts for new chunks:
-// [ MinKey, -2 ): 1
-// [ -2, -1 ): 1
-// [ -1, 0): 1
-//
-function(db) {
- var res = callSplit(db, { x: MinKey }, { x: 0 }, [{ x: -2 }, { x: -1 }]);
- assert.commandWorked(res);
- assert.neq(res.shouldMigrate, null, tojson(res));
- assert(bsonWoCompare(res.shouldMigrate.min, { x: MinKey }) == 0, tojson(res.shouldMigrate.min));
- assert(bsonWoCompare(res.shouldMigrate.max, { x: -2 }) == 0, tojson(res.shouldMigrate.max));
-},
-
-// One chunk has single doc, extreme doesn't.
-//
-// Expected doc counts for new chunks:
-// [ MinKey, -1 ): 2
-// [ -1, 0): 1
-//
-function(db) {
- var res = callSplit(db, { x: MinKey }, { x: 0 }, [{ x: -1 }]);
- assert.commandWorked(res);
- assert.eq(res.shouldMigrate, null, tojson(res));
-},
-
-// Only extreme has single doc.
-//
-// Expected doc counts for new chunks:
-// [ MinKey, -2 ): 1
-// [ -2, 0): 2
-//
-function(db) {
- var res = callSplit(db, { x: MinKey }, { x: 0 }, [{ x: -2 }]);
- assert.commandWorked(res);
- assert.neq(res.shouldMigrate, null, tojson(res));
- assert(bsonWoCompare(res.shouldMigrate.min, { x: MinKey }) == 0, tojson(res.shouldMigrate.min));
- assert(bsonWoCompare(res.shouldMigrate.max, { x: -2 }) == 0, tojson(res.shouldMigrate.max));
-},
-
-//
-// Upper extreme chunk tests.
-//
-
-// All chunks have 1 doc.
-//
-// Expected doc counts for new chunks:
-// [ 0, 1 ): 1
-// [ 1, 2 ): 1
-// [ 2, MaxKey): 1
-//
-function(db) {
- var res = callSplit(db, { x: 0 }, { x: MaxKey }, [{ x: 1 }, { x: 2 }]);
- assert.commandWorked(res);
- assert.neq(res.shouldMigrate, null, tojson(res));
- assert(bsonWoCompare(res.shouldMigrate.min, { x: 2 }) == 0, tojson(res.shouldMigrate.min));
- assert(bsonWoCompare(res.shouldMigrate.max, { x: MaxKey }) == 0, tojson(res.shouldMigrate.max));
-},
-
-// One chunk has single doc, extreme doesn't.
-//
-// Expected doc counts for new chunks:
-// [ 0, 1 ): 1
-// [ 1, MaxKey): 2
-//
-function(db) {
- var res = callSplit(db, { x: 0 }, { x: MaxKey }, [{ x: 1 }]);
- assert.commandWorked(res);
- assert.eq(res.shouldMigrate, null, tojson(res));
-},
-
-// Only extreme has single doc.
-//
-// Expected doc counts for new chunks:
-// [ 0, 2 ): 2
-// [ 2, MaxKey): 1
-//
-function(db) {
- var res = callSplit(db, { x: 0 }, { x: MaxKey }, [{ x: 2 }]);
- assert.commandWorked(res);
- assert.neq(res.shouldMigrate, null, tojson(res));
- assert(bsonWoCompare(res.shouldMigrate.min, { x: 2 }) == 0, tojson(res.shouldMigrate.min));
- assert(bsonWoCompare(res.shouldMigrate.max, { x: MaxKey }) == 0, tojson(res.shouldMigrate.max));
-},
+ //
+ // Lower extreme chunk tests.
+ //
+
+ // All chunks have 1 doc.
+ //
+ // Expected doc counts for new chunks:
+ // [ MinKey, -2 ): 1
+ // [ -2, -1 ): 1
+ // [ -1, 0): 1
+ //
+ function(db) {
+ var res = callSplit(db, {x: MinKey}, {x: 0}, [{x: -2}, {x: -1}]);
+ assert.commandWorked(res);
+ assert.neq(res.shouldMigrate, null, tojson(res));
+ assert(bsonWoCompare(res.shouldMigrate.min, {x: MinKey}) == 0,
+ tojson(res.shouldMigrate.min));
+ assert(bsonWoCompare(res.shouldMigrate.max, {x: -2}) == 0, tojson(res.shouldMigrate.max));
+ },
+
+ // One chunk has single doc, extreme doesn't.
+ //
+ // Expected doc counts for new chunks:
+ // [ MinKey, -1 ): 2
+ // [ -1, 0): 1
+ //
+ function(db) {
+ var res = callSplit(db, {x: MinKey}, {x: 0}, [{x: -1}]);
+ assert.commandWorked(res);
+ assert.eq(res.shouldMigrate, null, tojson(res));
+ },
+
+ // Only extreme has single doc.
+ //
+ // Expected doc counts for new chunks:
+ // [ MinKey, -2 ): 1
+ // [ -2, 0): 2
+ //
+ function(db) {
+ var res = callSplit(db, {x: MinKey}, {x: 0}, [{x: -2}]);
+ assert.commandWorked(res);
+ assert.neq(res.shouldMigrate, null, tojson(res));
+ assert(bsonWoCompare(res.shouldMigrate.min, {x: MinKey}) == 0,
+ tojson(res.shouldMigrate.min));
+ assert(bsonWoCompare(res.shouldMigrate.max, {x: -2}) == 0, tojson(res.shouldMigrate.max));
+ },
+
+ //
+ // Upper extreme chunk tests.
+ //
+
+ // All chunks have 1 doc.
+ //
+ // Expected doc counts for new chunks:
+ // [ 0, 1 ): 1
+ // [ 1, 2 ): 1
+ // [ 2, MaxKey): 1
+ //
+ function(db) {
+ var res = callSplit(db, {x: 0}, {x: MaxKey}, [{x: 1}, {x: 2}]);
+ assert.commandWorked(res);
+ assert.neq(res.shouldMigrate, null, tojson(res));
+ assert(bsonWoCompare(res.shouldMigrate.min, {x: 2}) == 0, tojson(res.shouldMigrate.min));
+ assert(bsonWoCompare(res.shouldMigrate.max, {x: MaxKey}) == 0,
+ tojson(res.shouldMigrate.max));
+ },
+
+ // One chunk has single doc, extreme doesn't.
+ //
+ // Expected doc counts for new chunks:
+ // [ 0, 1 ): 1
+ // [ 1, MaxKey): 2
+ //
+ function(db) {
+ var res = callSplit(db, {x: 0}, {x: MaxKey}, [{x: 1}]);
+ assert.commandWorked(res);
+ assert.eq(res.shouldMigrate, null, tojson(res));
+ },
+
+ // Only extreme has single doc.
+ //
+ // Expected doc counts for new chunks:
+ // [ 0, 2 ): 2
+ // [ 2, MaxKey): 1
+ //
+ function(db) {
+ var res = callSplit(db, {x: 0}, {x: MaxKey}, [{x: 2}]);
+ assert.commandWorked(res);
+ assert.neq(res.shouldMigrate, null, tojson(res));
+ assert(bsonWoCompare(res.shouldMigrate.min, {x: 2}) == 0, tojson(res.shouldMigrate.min));
+ assert(bsonWoCompare(res.shouldMigrate.max, {x: MaxKey}) == 0,
+ tojson(res.shouldMigrate.max));
+ },
];
tests.forEach(function(test) {
// setup
- testDB.adminCommand({ shardCollection: 'test.user', key: { x: 1 }});
- testDB.adminCommand({ split: 'test.user', middle: { x: 0 }});
+ testDB.adminCommand({shardCollection: 'test.user', key: {x: 1}});
+ testDB.adminCommand({split: 'test.user', middle: {x: 0}});
for (var x = -3; x < 3; x++) {
- testDB.user.insert({ x: x });
+ testDB.user.insert({x: x});
}
// run test
@@ -135,4 +139,3 @@ tests.forEach(function(test) {
});
st.stop();
-
diff --git a/jstests/sharding/split_large_key.js b/jstests/sharding/split_large_key.js
index a0cdcd61d67..5a8fe060c67 100644
--- a/jstests/sharding/split_large_key.js
+++ b/jstests/sharding/split_large_key.js
@@ -1,68 +1,71 @@
// Test for splitting a chunk with a very large shard key value should not be allowed
// and does not corrupt the config.chunks metadata.
(function() {
-'use strict';
+ 'use strict';
-function verifyChunk(keys, expectFail) {
- // If split failed then there's only 1 chunk
- // With a min & max for the shardKey
- if (expectFail) {
- assert.eq(1, configDB.chunks.find().count(), "Chunks count no split");
- var chunkDoc = configDB.chunks.findOne();
- assert.eq(0, bsonWoCompare(chunkDoc.min, keys.min), "Chunks min");
- assert.eq(0, bsonWoCompare(chunkDoc.max, keys.max), "Chunks max");
- } else {
- assert.eq(2, configDB.chunks.find().count(), "Chunks count split");
+ function verifyChunk(keys, expectFail) {
+ // If split failed then there's only 1 chunk
+ // With a min & max for the shardKey
+ if (expectFail) {
+ assert.eq(1, configDB.chunks.find().count(), "Chunks count no split");
+ var chunkDoc = configDB.chunks.findOne();
+ assert.eq(0, bsonWoCompare(chunkDoc.min, keys.min), "Chunks min");
+ assert.eq(0, bsonWoCompare(chunkDoc.max, keys.max), "Chunks max");
+ } else {
+ assert.eq(2, configDB.chunks.find().count(), "Chunks count split");
+ }
}
-}
-// Tests
-// - name: Name of test, used in collection name
-// - key: key to test
-// - keyFieldSize: size of each key field
-// - expectFail: true/false, true if key is too large to pre-split
-var tests = [
- {name: "Key size small", key: {x: 1}, keyFieldSize: 100, expectFail: false},
- {name: "Key size 512", key: {x: 1}, keyFieldSize: 512, expectFail: true},
- {name: "Key size 2000", key: {x: 1}, keyFieldSize: 2000, expectFail: true},
- {name: "Compound key size small", key: {x: 1, y: 1}, keyFieldSize: 100, expectFail: false},
- {name: "Compound key size 512", key: {x: 1, y: 1}, keyFieldSize: 256, expectFail: true},
- {name: "Compound key size 10000", key: {x: 1, y: 1}, keyFieldSize: 5000, expectFail: true},
-];
+ // Tests
+ // - name: Name of test, used in collection name
+ // - key: key to test
+ // - keyFieldSize: size of each key field
+ // - expectFail: true/false, true if key is too large to pre-split
+ var tests = [
+ {name: "Key size small", key: {x: 1}, keyFieldSize: 100, expectFail: false},
+ {name: "Key size 512", key: {x: 1}, keyFieldSize: 512, expectFail: true},
+ {name: "Key size 2000", key: {x: 1}, keyFieldSize: 2000, expectFail: true},
+ {name: "Compound key size small", key: {x: 1, y: 1}, keyFieldSize: 100, expectFail: false},
+ {name: "Compound key size 512", key: {x: 1, y: 1}, keyFieldSize: 256, expectFail: true},
+ {name: "Compound key size 10000", key: {x: 1, y: 1}, keyFieldSize: 5000, expectFail: true},
+ ];
-var st = new ShardingTest({ shards: 1 });
-var configDB = st.s.getDB('config');
+ var st = new ShardingTest({shards: 1});
+ var configDB = st.s.getDB('config');
-assert.commandWorked(configDB.adminCommand({ enableSharding: 'test' }));
+ assert.commandWorked(configDB.adminCommand({enableSharding: 'test'}));
-tests.forEach(function(test){
- var collName = "split_large_key_" + test.name;
- var midKey = {};
- var chunkKeys = {min: {}, max: {}};
- for (var k in test.key) {
- // new Array with join creates string length 1 less than size, so add 1
- midKey[k] = new Array(test.keyFieldSize+1).join('a');
- // min & max keys for each field in the index
- chunkKeys.min[k] = MinKey;
- chunkKeys.max[k] = MaxKey;
- }
+ tests.forEach(function(test) {
+ var collName = "split_large_key_" + test.name;
+ var midKey = {};
+ var chunkKeys = {
+ min: {},
+ max: {}
+ };
+ for (var k in test.key) {
+ // new Array with join creates string length 1 less than size, so add 1
+ midKey[k] = new Array(test.keyFieldSize + 1).join('a');
+ // min & max keys for each field in the index
+ chunkKeys.min[k] = MinKey;
+ chunkKeys.max[k] = MaxKey;
+ }
- assert.commandWorked(
- configDB.adminCommand({ shardCollection: "test." + collName, key: test.key }));
+ assert.commandWorked(
+ configDB.adminCommand({shardCollection: "test." + collName, key: test.key}));
- var res = configDB.adminCommand({ split: "test."+collName, middle: midKey});
- if (test.expectFail) {
- assert(!res.ok, "Split: " + collName);
- assert(res.errmsg !== null, "Split errmsg: " + collName);
- } else {
- assert(res.ok, "Split: " + collName + " " + res.errmsg);
- }
+ var res = configDB.adminCommand({split: "test." + collName, middle: midKey});
+ if (test.expectFail) {
+ assert(!res.ok, "Split: " + collName);
+ assert(res.errmsg !== null, "Split errmsg: " + collName);
+ } else {
+ assert(res.ok, "Split: " + collName + " " + res.errmsg);
+ }
- verifyChunk(chunkKeys, test.expectFail);
+ verifyChunk(chunkKeys, test.expectFail);
- st.s0.getCollection("test." + collName).drop();
-});
+ st.s0.getCollection("test." + collName).drop();
+ });
-st.stop();
+ st.stop();
})();
diff --git a/jstests/sharding/split_with_force.js b/jstests/sharding/split_with_force.js
index 117d17361e0..c66d2f145eb 100644
--- a/jstests/sharding/split_with_force.js
+++ b/jstests/sharding/split_with_force.js
@@ -2,60 +2,62 @@
// Tests autosplit locations with force : true
//
-var options = { chunkSize: 1, // MB
- mongosOptions : { noAutoSplit : "" }
- };
+var options = {
+ chunkSize: 1, // MB
+ mongosOptions: {noAutoSplit: ""}
+};
-var st = new ShardingTest({ shards : 1, mongos : 1, other : options });
+var st = new ShardingTest({shards: 1, mongos: 1, other: options});
st.stopBalancer();
var mongos = st.s0;
-var admin = mongos.getDB( "admin" );
-var config = mongos.getDB( "config" );
-var shardAdmin = st.shard0.getDB( "admin" );
-var coll = mongos.getCollection( "foo.bar" );
+var admin = mongos.getDB("admin");
+var config = mongos.getDB("config");
+var shardAdmin = st.shard0.getDB("admin");
+var coll = mongos.getCollection("foo.bar");
-assert( admin.runCommand({ enableSharding : coll.getDB() + "" }).ok );
-assert( admin.runCommand({ shardCollection : coll + "", key : { _id : 1 } }).ok );
-assert( admin.runCommand({ split : coll + "", middle : { _id : 0 } }).ok );
+assert(admin.runCommand({enableSharding: coll.getDB() + ""}).ok);
+assert(admin.runCommand({shardCollection: coll + "", key: {_id: 1}}).ok);
+assert(admin.runCommand({split: coll + "", middle: {_id: 0}}).ok);
-jsTest.log( "Insert a bunch of data into a chunk of the collection..." );
+jsTest.log("Insert a bunch of data into a chunk of the collection...");
var bulk = coll.initializeUnorderedBulkOp();
-for ( var i = 0; i < (250 * 1000) + 10; i++ ) {
- bulk.insert({ _id : i });
+for (var i = 0; i < (250 * 1000) + 10; i++) {
+ bulk.insert({_id: i});
}
assert.writeOK(bulk.execute());
-jsTest.log( "Insert a bunch of data into the rest of the collection..." );
+jsTest.log("Insert a bunch of data into the rest of the collection...");
bulk = coll.initializeUnorderedBulkOp();
-for ( var i = 1; i <= (250 * 1000); i++ ) {
- bulk.insert({ _id: -i });
+for (var i = 1; i <= (250 * 1000); i++) {
+ bulk.insert({_id: -i});
}
assert.writeOK(bulk.execute());
-jsTest.log( "Get split points of the chunk using force : true..." );
+jsTest.log("Get split points of the chunk using force : true...");
var maxChunkSizeBytes = 1024 * 1024;
-var splitKeys = shardAdmin.runCommand({ splitVector : coll + "",
- keyPattern : { _id : 1 },
- min : { _id : 0 },
- max : { _id : MaxKey },
- force : true
- }).splitKeys;
+var splitKeys = shardAdmin.runCommand({
+ splitVector: coll + "",
+ keyPattern: {_id: 1},
+ min: {_id: 0},
+ max: {_id: MaxKey},
+ force: true
+}).splitKeys;
-printjson( splitKeys );
-printjson( coll.stats() );
+printjson(splitKeys);
+printjson(coll.stats());
st.printShardingStatus();
-jsTest.log( "Make sure our split is approximately in half..." );
+jsTest.log("Make sure our split is approximately in half...");
-assert.eq( splitKeys.length, 1 );
+assert.eq(splitKeys.length, 1);
var splitKey = splitKeys[0]._id;
-assert.gt( splitKey, ((250 * 1000) / 2) - 50 );
-assert.lt( splitKey, ((250 * 1000) / 2) + 50 );
+assert.gt(splitKey, ((250 * 1000) / 2) - 50);
+assert.lt(splitKey, ((250 * 1000) / 2) + 50);
st.stop();
diff --git a/jstests/sharding/split_with_force_small.js b/jstests/sharding/split_with_force_small.js
index 86fb4667132..ad14f8642cb 100644
--- a/jstests/sharding/split_with_force_small.js
+++ b/jstests/sharding/split_with_force_small.js
@@ -2,68 +2,70 @@
// Tests autosplit locations with force : true, for small collections
//
-var options = { chunkSize: 1, // MB
- mongosOptions : { noAutoSplit : "" }
- };
+var options = {
+ chunkSize: 1, // MB
+ mongosOptions: {noAutoSplit: ""}
+};
-var st = new ShardingTest({ shards : 1, mongos : 1, other : options });
+var st = new ShardingTest({shards: 1, mongos: 1, other: options});
st.stopBalancer();
var mongos = st.s0;
-var admin = mongos.getDB( "admin" );
-var config = mongos.getDB( "config" );
-var shardAdmin = st.shard0.getDB( "admin" );
-var coll = mongos.getCollection( "foo.bar" );
+var admin = mongos.getDB("admin");
+var config = mongos.getDB("config");
+var shardAdmin = st.shard0.getDB("admin");
+var coll = mongos.getCollection("foo.bar");
-assert( admin.runCommand({ enableSharding : coll.getDB() + "" }).ok );
-assert( admin.runCommand({ shardCollection : coll + "", key : { _id : 1 } }).ok );
-assert( admin.runCommand({ split : coll + "", middle : { _id : 0 } }).ok );
+assert(admin.runCommand({enableSharding: coll.getDB() + ""}).ok);
+assert(admin.runCommand({shardCollection: coll + "", key: {_id: 1}}).ok);
+assert(admin.runCommand({split: coll + "", middle: {_id: 0}}).ok);
-jsTest.log( "Insert a bunch of data into the low chunk of a collection," +
- " to prevent relying on stats." );
+jsTest.log("Insert a bunch of data into the low chunk of a collection," +
+ " to prevent relying on stats.");
var data128k = "x";
-for ( var i = 0; i < 7; i++ ) data128k += data128k;
+for (var i = 0; i < 7; i++)
+ data128k += data128k;
var bulk = coll.initializeUnorderedBulkOp();
-for ( var i = 0; i < 1024; i++ ) {
- bulk.insert({ _id : -(i + 1) });
+for (var i = 0; i < 1024; i++) {
+ bulk.insert({_id: -(i + 1)});
}
assert.writeOK(bulk.execute());
-jsTest.log( "Insert 32 docs into the high chunk of a collection" );
+jsTest.log("Insert 32 docs into the high chunk of a collection");
bulk = coll.initializeUnorderedBulkOp();
-for ( var i = 0; i < 32; i++ ) {
- bulk.insert({ _id : i });
+for (var i = 0; i < 32; i++) {
+ bulk.insert({_id: i});
}
assert.writeOK(bulk.execute());
-jsTest.log( "Split off MaxKey chunk..." );
+jsTest.log("Split off MaxKey chunk...");
-assert( admin.runCommand({ split : coll + "", middle : { _id : 32 } }).ok );
+assert(admin.runCommand({split: coll + "", middle: {_id: 32}}).ok);
-jsTest.log( "Keep splitting chunk multiple times..." );
+jsTest.log("Keep splitting chunk multiple times...");
st.printShardingStatus();
-for ( var i = 0; i < 5; i++ ) {
- assert( admin.runCommand({ split : coll + "", find : { _id : 0 } }).ok );
+for (var i = 0; i < 5; i++) {
+ assert(admin.runCommand({split: coll + "", find: {_id: 0}}).ok);
st.printShardingStatus();
}
// Make sure we can't split further than 5 (2^5) times
-assert( !admin.runCommand({ split : coll + "", find : { _id : 0 } }).ok );
+assert(!admin.runCommand({split: coll + "", find: {_id: 0}}).ok);
-var chunks = config.chunks.find({ 'min._id' : { $gte : 0, $lt : 32 } }).sort({ min : 1 }).toArray();
-printjson( chunks );
+var chunks = config.chunks.find({'min._id': {$gte: 0, $lt: 32}}).sort({min: 1}).toArray();
+printjson(chunks);
// Make sure the chunks grow by 2x (except the first)
var nextSize = 1;
-for ( var i = 0; i < chunks.size; i++ ) {
- assert.eq( coll.count({ _id : { $gte : chunks[i].min._id, $lt : chunks[i].max._id } }),
- nextSize );
- if ( i != 0 ) nextSize += nextSize;
+for (var i = 0; i < chunks.size; i++) {
+ assert.eq(coll.count({_id: {$gte: chunks[i].min._id, $lt: chunks[i].max._id}}), nextSize);
+ if (i != 0)
+ nextSize += nextSize;
}
st.stop();
diff --git a/jstests/sharding/ssv_config_check.js b/jstests/sharding/ssv_config_check.js
index d1a1598b63f..edeb559d40d 100644
--- a/jstests/sharding/ssv_config_check.js
+++ b/jstests/sharding/ssv_config_check.js
@@ -3,70 +3,70 @@
* replica set name, but with a member list that is not strictly the same.
*/
(function() {
-"use strict";
+ "use strict";
-var st = new ShardingTest({ shards: 1 });
+ var st = new ShardingTest({shards: 1});
-var testDB = st.s.getDB('test');
-testDB.adminCommand({ enableSharding: 'test' });
-testDB.adminCommand({ shardCollection: 'test.user', key: { x: 1 }});
+ var testDB = st.s.getDB('test');
+ testDB.adminCommand({enableSharding: 'test'});
+ testDB.adminCommand({shardCollection: 'test.user', key: {x: 1}});
-// Initialize version on shard.
-testDB.user.insert({ x: 1 });
+ // Initialize version on shard.
+ testDB.user.insert({x: 1});
-var directConn = new Mongo(st.d0.host);
-var adminDB = directConn.getDB('admin');
+ var directConn = new Mongo(st.d0.host);
+ var adminDB = directConn.getDB('admin');
-var configStr = adminDB.runCommand({ getShardVersion: 'test.user' }).configServer;
-var alternateConfigStr = configStr.substring(0, configStr.lastIndexOf(','));
+ var configStr = adminDB.runCommand({getShardVersion: 'test.user'}).configServer;
+ var alternateConfigStr = configStr.substring(0, configStr.lastIndexOf(','));
-var shardDoc = st.s.getDB('config').shards.findOne();
+ var shardDoc = st.s.getDB('config').shards.findOne();
-assert.commandWorked(adminDB.runCommand({
- setShardVersion: '',
- init: true,
- authoritative: true,
- configdb: alternateConfigStr,
- shard: shardDoc._id,
- shardHost: shardDoc.host
-}));
+ assert.commandWorked(adminDB.runCommand({
+ setShardVersion: '',
+ init: true,
+ authoritative: true,
+ configdb: alternateConfigStr,
+ shard: shardDoc._id,
+ shardHost: shardDoc.host
+ }));
-assert.commandFailed(adminDB.runCommand({
- setShardVersion: '',
- init: true,
- authoritative: true,
- configdb: 'bad-rs/local:12,local:34',
- shard: shardDoc._id,
- shardHost: shardDoc.host
-}));
+ assert.commandFailed(adminDB.runCommand({
+ setShardVersion: '',
+ init: true,
+ authoritative: true,
+ configdb: 'bad-rs/local:12,local:34',
+ shard: shardDoc._id,
+ shardHost: shardDoc.host
+ }));
-var configAdmin = st.c0.getDB('admin');
-// Initialize internal config string.
-assert.commandWorked(configAdmin.runCommand({
- setShardVersion: '',
- init: true,
- authoritative: true,
- configdb: configStr,
- shard: 'config'
-}));
+ var configAdmin = st.c0.getDB('admin');
+ // Initialize internal config string.
+ assert.commandWorked(configAdmin.runCommand({
+ setShardVersion: '',
+ init: true,
+ authoritative: true,
+ configdb: configStr,
+ shard: 'config'
+ }));
-// Passing configdb that does not match initialized value is not ok.
-assert.commandFailed(configAdmin.runCommand({
- setShardVersion: '',
- init: true,
- authoritative: true,
- configdb: 'bad-rs/local:12,local:34',
- shard: 'config'
-}));
+ // Passing configdb that does not match initialized value is not ok.
+ assert.commandFailed(configAdmin.runCommand({
+ setShardVersion: '',
+ init: true,
+ authoritative: true,
+ configdb: 'bad-rs/local:12,local:34',
+ shard: 'config'
+ }));
-// Passing configdb that matches initialized value is ok.
-assert.commandWorked(configAdmin.runCommand({
- setShardVersion: '',
- init: true,
- authoritative: true,
- configdb: alternateConfigStr,
- shard: 'config'
-}));
+ // Passing configdb that matches initialized value is ok.
+ assert.commandWorked(configAdmin.runCommand({
+ setShardVersion: '',
+ init: true,
+ authoritative: true,
+ configdb: alternateConfigStr,
+ shard: 'config'
+ }));
-st.stop();
+ st.stop();
})();
diff --git a/jstests/sharding/stale_mongos_updates_and_removes.js b/jstests/sharding/stale_mongos_updates_and_removes.js
index 193cfcb83bf..17f75b4c986 100644
--- a/jstests/sharding/stale_mongos_updates_and_removes.js
+++ b/jstests/sharding/stale_mongos_updates_and_removes.js
@@ -17,10 +17,10 @@
var resetCollection = function() {
assert(staleMongos.getCollection(collNS).drop());
st.ensurePrimaryShard(dbName, st._shardNames[0]);
- assert.commandWorked(staleMongos.adminCommand({ shardCollection: collNS, key: { x: 1 }}));
- for (var i=0; i<numShardKeys; i++) {
- assert.writeOK(staleMongos.getCollection(collNS).insert({ x: i, fieldToUpdate: 0 }));
- assert.writeOK(staleMongos.getCollection(collNS).insert({ x: i, fieldToUpdate: 0 }));
+ assert.commandWorked(staleMongos.adminCommand({shardCollection: collNS, key: {x: 1}}));
+ for (var i = 0; i < numShardKeys; i++) {
+ assert.writeOK(staleMongos.getCollection(collNS).insert({x: i, fieldToUpdate: 0}));
+ assert.writeOK(staleMongos.getCollection(collNS).insert({x: i, fieldToUpdate: 0}));
}
};
@@ -37,22 +37,18 @@ var makeStaleMongosTargetMultipleShards = function() {
resetCollection();
// Make sure staleMongos sees all data on first shard.
- var chunk = staleMongos.getCollection("config.chunks").findOne({ min: { x: MinKey },
- max: { x: MaxKey }});
+ var chunk =
+ staleMongos.getCollection("config.chunks").findOne({min: {x: MinKey}, max: {x: MaxKey}});
assert(chunk.shard === st._shardNames[0]);
// Make sure staleMongos sees two chunks on two different shards.
- assert.commandWorked(staleMongos.adminCommand({ split: collNS, middle: { x: splitPoint }}));
- assert.commandWorked(staleMongos.adminCommand({ moveChunk: collNS,
- find: { x: 0 },
- to: st._shardNames[1],
- _waitForDelete: true }));
+ assert.commandWorked(staleMongos.adminCommand({split: collNS, middle: {x: splitPoint}}));
+ assert.commandWorked(staleMongos.adminCommand(
+ {moveChunk: collNS, find: {x: 0}, to: st._shardNames[1], _waitForDelete: true}));
// Use freshMongos to consolidate the chunks on one shard.
- assert.commandWorked(freshMongos.adminCommand({ moveChunk: collNS,
- find: { x: 0 },
- to: st._shardNames[0],
- _waitForDelete: true }));
+ assert.commandWorked(freshMongos.adminCommand(
+ {moveChunk: collNS, find: {x: 0}, to: st._shardNames[0], _waitForDelete: true}));
};
// Create a new sharded collection and move a chunk from one shard to another. In the end,
@@ -65,20 +61,22 @@ var makeStaleMongosTargetMultipleShards = function() {
var makeStaleMongosTargetSingleShard = function() {
resetCollection();
// Make sure staleMongos sees all data on first shard.
- var chunk = staleMongos.getCollection("config.chunks").findOne({ min: { x: MinKey },
- max: { x: MaxKey }});
+ var chunk =
+ staleMongos.getCollection("config.chunks").findOne({min: {x: MinKey}, max: {x: MaxKey}});
assert(chunk.shard === st._shardNames[0]);
// Use freshMongos to move chunk to another shard.
- assert.commandWorked(freshMongos.adminCommand({ moveChunk: collNS,
- find: { x: 0 },
- to: st._shardNames[1],
- _waitForDelete: true }));
+ assert.commandWorked(freshMongos.adminCommand(
+ {moveChunk: collNS, find: {x: 0}, to: st._shardNames[1], _waitForDelete: true}));
};
var checkAllRemoveQueries = function(makeMongosStaleFunc) {
- var multi = { justOne: false };
- var single = { justOne: true };
+ var multi = {
+ justOne: false
+ };
+ var single = {
+ justOne: true
+ };
var doRemove = function(query, multiOption, makeMongosStaleFunc) {
makeMongosStaleFunc();
@@ -94,7 +92,7 @@ var checkAllRemoveQueries = function(makeMongosStaleFunc) {
var checkRemoveIsInvalid = function(query, multiOption, makeMongosStaleFunc) {
makeMongosStaleFunc();
- var res = staleMongos.getCollection(collNS).remove(query, multiOption);
+ var res = staleMongos.getCollection(collNS).remove(query, multiOption);
assert.writeError(res);
};
@@ -116,12 +114,23 @@ var checkAllRemoveQueries = function(makeMongosStaleFunc) {
};
var checkAllUpdateQueries = function(makeMongosStaleFunc) {
- var oUpdate = { $inc: { fieldToUpdate: 1 }}; // op-style update (non-idempotent)
- var rUpdate = { x: 0, fieldToUpdate: 1 }; // replacement-style update (idempotent)
- var queryAfterUpdate = { fieldToUpdate: 1 };
+ var oUpdate = {
+ $inc: {fieldToUpdate: 1}
+ }; // op-style update (non-idempotent)
+ var rUpdate = {
+ x: 0,
+ fieldToUpdate: 1
+ }; // replacement-style update (idempotent)
+ var queryAfterUpdate = {
+ fieldToUpdate: 1
+ };
- var multi = { multi: true };
- var single = { multi: false };
+ var multi = {
+ multi: true
+ };
+ var single = {
+ multi: false
+ };
var doUpdate = function(query, update, multiOption, makeMongosStaleFunc) {
makeMongosStaleFunc();
@@ -143,7 +152,7 @@ var checkAllUpdateQueries = function(makeMongosStaleFunc) {
};
// This update has inconsistent behavior as explained in SERVER-22895.
- //doUpdate(emptyQuery, rUpdate, single, makeMongosStaleFunc);
+ // doUpdate(emptyQuery, rUpdate, single, makeMongosStaleFunc);
// Not possible because replacement-style requires equality match on shard key.
checkUpdateIsInvalid(emptyQuery, rUpdate, multi, makeMongosStaleFunc);
// Not possible because op-style requires equality match on shard key if single update.
@@ -172,28 +181,34 @@ var checkAllUpdateQueries = function(makeMongosStaleFunc) {
doUpdate(multiPointQuery, oUpdate, multi, makeMongosStaleFunc);
};
-var st = new ShardingTest({shards: 2, mongos: 2, other: { mongosOptions: { noAutoSplit: "" }} });
+var st = new ShardingTest({shards: 2, mongos: 2, other: {mongosOptions: {noAutoSplit: ""}}});
var dbName = 'test';
var collNS = dbName + '.foo';
var numShardKeys = 10;
var numDocs = numShardKeys * 2;
var splitPoint = numShardKeys / 2;
-assert.commandWorked(st.s.adminCommand({ enableSharding: dbName }));
-assert.commandWorked(st.s.adminCommand({ shardCollection: collNS, key: { x: 1 }}));
+assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
+assert.commandWorked(st.s.adminCommand({shardCollection: collNS, key: {x: 1}}));
var freshMongos = st.s0;
var staleMongos = st.s1;
var emptyQuery = {};
-var pointQuery = { x: 0 };
+var pointQuery = {
+ x: 0
+};
// Choose a range that would fall on only one shard.
// Use (splitPoint - 1) because of SERVER-20768.
-var rangeQuery = { x: { $gte: 0, $lt: splitPoint - 1 }};
+var rangeQuery = {
+ x: {$gte: 0, $lt: splitPoint - 1}
+};
// Choose points that would fall on two different shards.
-var multiPointQuery = { $or: [{ x: 0 }, { x: numShardKeys }]};
+var multiPointQuery = {
+ $or: [{x: 0}, {x: numShardKeys}]
+};
checkAllRemoveQueries(makeStaleMongosTargetSingleShard);
checkAllRemoveQueries(makeStaleMongosTargetMultipleShards);
diff --git a/jstests/sharding/stale_version_write.js b/jstests/sharding/stale_version_write.js
index 21680f1abee..e5885dcfa41 100644
--- a/jstests/sharding/stale_version_write.js
+++ b/jstests/sharding/stale_version_write.js
@@ -1,37 +1,37 @@
// Tests whether a reset sharding version triggers errors
-jsTest.log( "Starting sharded cluster..." );
+jsTest.log("Starting sharded cluster...");
-var st = new ShardingTest( { shards : 1, mongos : 2, verbose : 2 } );
+var st = new ShardingTest({shards: 1, mongos: 2, verbose: 2});
st.stopBalancer();
var mongosA = st.s0;
var mongosB = st.s1;
-jsTest.log( "Adding new collections...");
+jsTest.log("Adding new collections...");
-var collA = mongosA.getCollection( jsTestName() + ".coll" );
-assert.writeOK(collA.insert({ hello : "world" }));
+var collA = mongosA.getCollection(jsTestName() + ".coll");
+assert.writeOK(collA.insert({hello: "world"}));
-var collB = mongosB.getCollection( "" + collA );
-assert.writeOK(collB.insert({ hello : "world" }));
+var collB = mongosB.getCollection("" + collA);
+assert.writeOK(collB.insert({hello: "world"}));
-jsTest.log( "Enabling sharding..." );
+jsTest.log("Enabling sharding...");
-printjson( mongosA.getDB( "admin" ).runCommand({ enableSharding : "" + collA.getDB() }) );
-printjson( mongosA.getDB( "admin" ).runCommand({ shardCollection : "" + collA, key : { _id : 1 } }) );
+printjson(mongosA.getDB("admin").runCommand({enableSharding: "" + collA.getDB()}));
+printjson(mongosA.getDB("admin").runCommand({shardCollection: "" + collA, key: {_id: 1}}));
// MongoD doesn't know about the config shard version *until* MongoS tells it
collA.findOne();
-jsTest.log( "Trigger shard version mismatch..." );
+jsTest.log("Trigger shard version mismatch...");
-assert.writeOK(collB.insert({ goodbye : "world" }));
+assert.writeOK(collB.insert({goodbye: "world"}));
-print( "Inserted..." );
+print("Inserted...");
-assert.eq( 3, collA.find().itcount() );
-assert.eq( 3, collB.find().itcount() );
+assert.eq(3, collA.find().itcount());
+assert.eq(3, collB.find().itcount());
st.stop();
diff --git a/jstests/sharding/startup_with_all_configs_down.js b/jstests/sharding/startup_with_all_configs_down.js
index f88f128a75a..61bff427580 100644
--- a/jstests/sharding/startup_with_all_configs_down.js
+++ b/jstests/sharding/startup_with_all_configs_down.js
@@ -6,68 +6,71 @@
// A restarted standalone will lose all data when using an ephemeral storage engine.
// @tags: [requires_persistence]
(function() {
-"use strict";
+ "use strict";
-var st = new ShardingTest({shards: 2});
+ var st = new ShardingTest({shards: 2});
-jsTestLog("Setting up initial data");
+ jsTestLog("Setting up initial data");
-for (var i = 0; i < 100; i++) {
- assert.writeOK(st.s.getDB('test').foo.insert({_id:i}));
-}
+ for (var i = 0; i < 100; i++) {
+ assert.writeOK(st.s.getDB('test').foo.insert({_id: i}));
+ }
-st.ensurePrimaryShard('test', 'shard0000');
+ st.ensurePrimaryShard('test', 'shard0000');
-st.adminCommand({enableSharding: 'test'});
-st.adminCommand({shardCollection: 'test.foo', key: {_id: 1}});
-st.adminCommand({split: 'test.foo', find: {_id: 50}});
-st.adminCommand({moveChunk: 'test.foo', find: {_id: 75}, to: 'shard0001'});
+ st.adminCommand({enableSharding: 'test'});
+ st.adminCommand({shardCollection: 'test.foo', key: {_id: 1}});
+ st.adminCommand({split: 'test.foo', find: {_id: 50}});
+ st.adminCommand({moveChunk: 'test.foo', find: {_id: 75}, to: 'shard0001'});
-// Make sure the pre-existing mongos already has the routing information loaded into memory
-assert.eq(100, st.s.getDB('test').foo.find().itcount());
+ // Make sure the pre-existing mongos already has the routing information loaded into memory
+ assert.eq(100, st.s.getDB('test').foo.find().itcount());
-jsTestLog("Shutting down all config servers");
-for (var i = 0; i < st._configServers.length; i++) {
- st.stopConfigServer(i);
-}
+ jsTestLog("Shutting down all config servers");
+ for (var i = 0; i < st._configServers.length; i++) {
+ st.stopConfigServer(i);
+ }
-jsTestLog("Starting a new mongos when there are no config servers up");
-var newMongosInfo = MongoRunner.runMongos({configdb: st._configDB, waitForConnect: false});
-// The new mongos won't accept any new connections, but it should stay up and continue trying
-// to contact the config servers to finish startup.
-assert.throws(function() { new Mongo(newMongosInfo.host); });
+ jsTestLog("Starting a new mongos when there are no config servers up");
+ var newMongosInfo = MongoRunner.runMongos({configdb: st._configDB, waitForConnect: false});
+ // The new mongos won't accept any new connections, but it should stay up and continue trying
+ // to contact the config servers to finish startup.
+ assert.throws(function() {
+ new Mongo(newMongosInfo.host);
+ });
+ jsTestLog("Restarting a shard while there are no config servers up");
+ MongoRunner.stopMongod(st.shard1);
+ st.shard1.restart = true;
+ MongoRunner.runMongod(st.shard1);
-jsTestLog("Restarting a shard while there are no config servers up");
-MongoRunner.stopMongod(st.shard1);
-st.shard1.restart = true;
-MongoRunner.runMongod(st.shard1);
+ jsTestLog("Queries should fail because the shard can't initialize sharding state");
+ var error = assert.throws(function() {
+ st.s.getDB('test').foo.find().itcount();
+ });
+ assert.eq(ErrorCodes.ExceededTimeLimit, error.code);
-jsTestLog("Queries should fail because the shard can't initialize sharding state");
-var error = assert.throws(function() {st.s.getDB('test').foo.find().itcount();});
-assert.eq(ErrorCodes.ExceededTimeLimit, error.code);
+ jsTestLog("Restarting the config servers");
+ for (var i = 0; i < st._configServers.length; i++) {
+ st.restartConfigServer(i);
+ }
-jsTestLog("Restarting the config servers");
-for (var i = 0; i < st._configServers.length; i++) {
- st.restartConfigServer(i);
-}
+ jsTestLog("Queries against the original mongos should work again");
+ assert.eq(100, st.s.getDB('test').foo.find().itcount());
-jsTestLog("Queries against the original mongos should work again");
-assert.eq(100, st.s.getDB('test').foo.find().itcount());
+ jsTestLog("Should now be possible to connect to the mongos that was started while the config " +
+ "servers were down");
+ var mongos2 = null;
+ assert.soon(function() {
+ try {
+ mongos2 = new Mongo(newMongosInfo.host);
+ return true;
+ } catch (e) {
+ printjson(e);
+ return false;
+ }
+ });
+ assert.eq(100, mongos2.getDB('test').foo.find().itcount());
-jsTestLog("Should now be possible to connect to the mongos that was started while the config "
- + "servers were down");
-var mongos2 = null;
-assert.soon(function() {
- try {
- mongos2 = new Mongo(newMongosInfo.host);
- return true;
- } catch (e) {
- printjson(e);
- return false;
- }
- });
-assert.eq(100, mongos2.getDB('test').foo.find().itcount());
-
-st.stop();
+ st.stop();
}());
diff --git a/jstests/sharding/stats.js b/jstests/sharding/stats.js
index ba7221cbe47..da6d842fb99 100644
--- a/jstests/sharding/stats.js
+++ b/jstests/sharding/stats.js
@@ -1,196 +1,207 @@
-(function () {
-
-var s = new ShardingTest({ name: "stats", shards: 2, mongos: 1 });
-
-s.adminCommand( { enablesharding : "test" } );
-
-a = s._connections[0].getDB( "test" );
-b = s._connections[1].getDB( "test" );
-
-db = s.getDB( "test" );
-s.ensurePrimaryShard('test', 'shard0001');
-
-function numKeys(o){
- var num = 0;
- for (var x in o)
- num++;
- return num;
-}
-
-db.foo.drop();
-assert.commandFailed(db.foo.stats(),
- 'db.collection.stats() should fail on non-existent collection');
-
-// ---------- load some data -----
-
-// need collections sharded before and after main collection for proper test
-s.adminCommand( { shardcollection : "test.aaa" , key : { _id : 1 } } );
-s.adminCommand( { shardcollection : "test.foo" , key : { _id : 1 } } ); // this collection is actually used
-s.adminCommand( { shardcollection : "test.zzz" , key : { _id : 1 } } );
-
-
-N = 10000;
-s.adminCommand( { split : "test.foo" , middle : { _id : N/2 } } );
-s.adminCommand({ moveChunk: "test.foo", find: { _id: 3 },
- to: s.getNonPrimaries("test")[0], _waitForDelete: true });
-
-var bulk = db.foo.initializeUnorderedBulkOp();
-for ( i=0; i<N; i++ )
- bulk.insert( { _id : i } );
-assert.writeOK(bulk.execute());
-
-x = db.foo.stats();
-assert.eq( N , x.count , "coll total count expected" );
-assert.eq( db.foo.count() , x.count , "coll total count match" );
-assert.eq( 2 , x.nchunks , "coll chunk num" );
-assert.eq( 2 , numKeys(x.shards) , "coll shard num" );
-assert.eq( N / 2 , x.shards.shard0000.count , "coll count on shard0000 expected" );
-assert.eq( N / 2 , x.shards.shard0001.count , "coll count on shard0001 expected" );
-assert.eq( a.foo.count() , x.shards.shard0000.count , "coll count on shard0000 match" );
-assert.eq( b.foo.count() , x.shards.shard0001.count , "coll count on shard0001 match" );
-assert(!x.shards.shard0000.indexDetails,
- 'indexDetails should not be present in shard0000: ' + tojson(x.shards.shard0000));
-assert(!x.shards.shard0001.indexDetails,
- 'indexDetails should not be present in shard0001: ' + tojson(x.shards.shard0001));
-
-
-a_extras = a.stats().objects - a.foo.count(); // things like system.namespaces and system.indexes
-b_extras = b.stats().objects - b.foo.count(); // things like system.namespaces and system.indexes
-print("a_extras: " + a_extras);
-print("b_extras: " + b_extras);
-
-x = db.stats();
-
-//dbstats uses Future::CommandResult so raw output uses connection strings not shard names
-shards = Object.keySet(x.raw);
-
-assert.eq( N + (a_extras + b_extras) , x.objects , "db total count expected" );
-assert.eq( 2 , numKeys(x.raw) , "db shard num" );
-assert.eq( (N / 2) + a_extras, x.raw[shards[0]].objects , "db count on shard0000 expected" );
-assert.eq( (N / 2) + b_extras, x.raw[shards[1]].objects , "db count on shard0001 expected" );
-assert.eq( a.stats().objects , x.raw[shards[0]].objects , "db count on shard0000 match" );
-assert.eq( b.stats().objects , x.raw[shards[1]].objects , "db count on shard0001 match" );
-
-/* Test db.stat() and db.collection.stat() scaling */
-
-/* Helper functions */
-function statComp(stat, stat_scaled, scale) {
- /* Because of loss of floating point precision, do not check exact equality */
- if ( stat == stat_scaled )
- return true;
- assert(((stat_scaled - 2) <= (stat / scale)) &&
- ((stat / scale) <= (stat_scaled + 2)));
-}
-
-function dbStatComp(stat_obj, stat_obj_scaled, scale) {
- statComp(stat_obj.dataSize, stat_obj_scaled.dataSize, scale);
- statComp(stat_obj.storageSize, stat_obj_scaled.storageSize, scale);
- statComp(stat_obj.indexSize, stat_obj_scaled.indexSize, scale);
- statComp(stat_obj.fileSize, stat_obj_scaled.fileSize, scale);
- /* avgObjSize not scaled. See SERVER-7347 */
- statComp(stat_obj.avgObjSize, stat_obj_scaled.avgObjSize, 1);
-}
-
-function collStatComp(stat_obj, stat_obj_scaled, scale, mongos) {
- statComp(stat_obj.size, stat_obj_scaled.size, scale);
- statComp(stat_obj.storageSize, stat_obj_scaled.storageSize, scale);
- statComp(stat_obj.totalIndexSize, stat_obj_scaled.totalIndexSize, scale);
- statComp(stat_obj.avgObjSize, stat_obj_scaled.avgObjSize, scale);
- /* lastExtentSize doesn't exist in mongos level collection stats */
- if (!mongos) {
- statComp(stat_obj.lastExtentSize, stat_obj_scaled.lastExtentSize, scale);
- }
-}
+(function() {
-/* db.stats() tests */
-db_not_scaled = db.stats();
-db_scaled_512 = db.stats(512);
-db_scaled_1024 = db.stats(1024);
+ var s = new ShardingTest({name: "stats", shards: 2, mongos: 1});
-for (var shard in db_not_scaled.raw) {
- dbStatComp(db_not_scaled.raw[shard], db_scaled_512.raw[shard], 512);
- dbStatComp(db_not_scaled.raw[shard], db_scaled_1024.raw[shard], 1024);
-}
+ s.adminCommand({enablesharding: "test"});
-dbStatComp(db_not_scaled, db_scaled_512, 512);
-dbStatComp(db_not_scaled, db_scaled_1024, 1024);
+ a = s._connections[0].getDB("test");
+ b = s._connections[1].getDB("test");
-/* db.collection.stats() tests */
-coll_not_scaled = db.foo.stats();
-coll_scaled_512 = db.foo.stats(512);
-coll_scaled_1024 = db.foo.stats(1024);
+ db = s.getDB("test");
+ s.ensurePrimaryShard('test', 'shard0001');
-for (var shard in coll_not_scaled.shards) {
- collStatComp(coll_not_scaled.shards[shard], coll_scaled_512.shards[shard], 512, false);
- collStatComp(coll_not_scaled.shards[shard], coll_scaled_1024.shards[shard], 1024, false);
-}
+ function numKeys(o) {
+ var num = 0;
+ for (var x in o)
+ num++;
+ return num;
+ }
-collStatComp(coll_not_scaled, coll_scaled_512, 512, true);
-collStatComp(coll_not_scaled, coll_scaled_1024, 1024, true);
+ db.foo.drop();
+ assert.commandFailed(db.foo.stats(),
+ 'db.collection.stats() should fail on non-existent collection');
+
+ // ---------- load some data -----
+
+ // need collections sharded before and after main collection for proper test
+ s.adminCommand({shardcollection: "test.aaa", key: {_id: 1}});
+ s.adminCommand(
+ {shardcollection: "test.foo", key: {_id: 1}}); // this collection is actually used
+ s.adminCommand({shardcollection: "test.zzz", key: {_id: 1}});
+
+ N = 10000;
+ s.adminCommand({split: "test.foo", middle: {_id: N / 2}});
+ s.adminCommand({
+ moveChunk: "test.foo",
+ find: {_id: 3},
+ to: s.getNonPrimaries("test")[0],
+ _waitForDelete: true
+ });
+
+ var bulk = db.foo.initializeUnorderedBulkOp();
+ for (i = 0; i < N; i++)
+ bulk.insert({_id: i});
+ assert.writeOK(bulk.execute());
+
+ x = db.foo.stats();
+ assert.eq(N, x.count, "coll total count expected");
+ assert.eq(db.foo.count(), x.count, "coll total count match");
+ assert.eq(2, x.nchunks, "coll chunk num");
+ assert.eq(2, numKeys(x.shards), "coll shard num");
+ assert.eq(N / 2, x.shards.shard0000.count, "coll count on shard0000 expected");
+ assert.eq(N / 2, x.shards.shard0001.count, "coll count on shard0001 expected");
+ assert.eq(a.foo.count(), x.shards.shard0000.count, "coll count on shard0000 match");
+ assert.eq(b.foo.count(), x.shards.shard0001.count, "coll count on shard0001 match");
+ assert(!x.shards.shard0000.indexDetails,
+ 'indexDetails should not be present in shard0000: ' + tojson(x.shards.shard0000));
+ assert(!x.shards.shard0001.indexDetails,
+ 'indexDetails should not be present in shard0001: ' + tojson(x.shards.shard0001));
+
+ a_extras =
+ a.stats().objects - a.foo.count(); // things like system.namespaces and system.indexes
+ b_extras =
+ b.stats().objects - b.foo.count(); // things like system.namespaces and system.indexes
+ print("a_extras: " + a_extras);
+ print("b_extras: " + b_extras);
+
+ x = db.stats();
+
+ // dbstats uses Future::CommandResult so raw output uses connection strings not shard names
+ shards = Object.keySet(x.raw);
+
+ assert.eq(N + (a_extras + b_extras), x.objects, "db total count expected");
+ assert.eq(2, numKeys(x.raw), "db shard num");
+ assert.eq((N / 2) + a_extras, x.raw[shards[0]].objects, "db count on shard0000 expected");
+ assert.eq((N / 2) + b_extras, x.raw[shards[1]].objects, "db count on shard0001 expected");
+ assert.eq(a.stats().objects, x.raw[shards[0]].objects, "db count on shard0000 match");
+ assert.eq(b.stats().objects, x.raw[shards[1]].objects, "db count on shard0001 match");
+
+ /* Test db.stat() and db.collection.stat() scaling */
+
+ /* Helper functions */
+ function statComp(stat, stat_scaled, scale) {
+ /* Because of loss of floating point precision, do not check exact equality */
+ if (stat == stat_scaled)
+ return true;
+ assert(((stat_scaled - 2) <= (stat / scale)) && ((stat / scale) <= (stat_scaled + 2)));
+ }
-/* db.collection.stats() - indexDetails tests */
-(function() {
- var t = db.foo;
-
- assert.commandWorked(t.ensureIndex({a: 1}));
- assert.eq(2, t.getIndexes().length);
-
- var isWiredTiger = (!jsTest.options().storageEngine
- || jsTest.options().storageEngine === "wiredTiger");
-
- var stats = assert.commandWorked(t.stats({indexDetails: true}));
- var shardName;
- var shardStats;
- for (shardName in stats.shards) {
- shardStats = stats.shards[shardName];
- assert(shardStats.indexDetails,
- 'indexDetails missing for ' + shardName + ': ' + tojson(shardStats));
- if (isWiredTiger) {
- assert.eq(t.getIndexes().length, Object.keys(shardStats.indexDetails).length,
- 'incorrect number of entries in WiredTiger indexDetails: ' +
- tojson(shardStats));
+ function dbStatComp(stat_obj, stat_obj_scaled, scale) {
+ statComp(stat_obj.dataSize, stat_obj_scaled.dataSize, scale);
+ statComp(stat_obj.storageSize, stat_obj_scaled.storageSize, scale);
+ statComp(stat_obj.indexSize, stat_obj_scaled.indexSize, scale);
+ statComp(stat_obj.fileSize, stat_obj_scaled.fileSize, scale);
+ /* avgObjSize not scaled. See SERVER-7347 */
+ statComp(stat_obj.avgObjSize, stat_obj_scaled.avgObjSize, 1);
+ }
+
+ function collStatComp(stat_obj, stat_obj_scaled, scale, mongos) {
+ statComp(stat_obj.size, stat_obj_scaled.size, scale);
+ statComp(stat_obj.storageSize, stat_obj_scaled.storageSize, scale);
+ statComp(stat_obj.totalIndexSize, stat_obj_scaled.totalIndexSize, scale);
+ statComp(stat_obj.avgObjSize, stat_obj_scaled.avgObjSize, scale);
+ /* lastExtentSize doesn't exist in mongos level collection stats */
+ if (!mongos) {
+ statComp(stat_obj.lastExtentSize, stat_obj_scaled.lastExtentSize, scale);
}
}
- function getIndexName(indexKey) {
- var indexes = t.getIndexes().filter(function(doc) {
- return friendlyEqual(doc.key, indexKey);
- });
- assert.eq(1, indexes.length, tojson(indexKey) + ' not found in getIndexes() result: ' +
- tojson(t.getIndexes()));
- return indexes[0].name;
+ /* db.stats() tests */
+ db_not_scaled = db.stats();
+ db_scaled_512 = db.stats(512);
+ db_scaled_1024 = db.stats(1024);
+
+ for (var shard in db_not_scaled.raw) {
+ dbStatComp(db_not_scaled.raw[shard], db_scaled_512.raw[shard], 512);
+ dbStatComp(db_not_scaled.raw[shard], db_scaled_1024.raw[shard], 1024);
+ }
+
+ dbStatComp(db_not_scaled, db_scaled_512, 512);
+ dbStatComp(db_not_scaled, db_scaled_1024, 1024);
+
+ /* db.collection.stats() tests */
+ coll_not_scaled = db.foo.stats();
+ coll_scaled_512 = db.foo.stats(512);
+ coll_scaled_1024 = db.foo.stats(1024);
+
+ for (var shard in coll_not_scaled.shards) {
+ collStatComp(coll_not_scaled.shards[shard], coll_scaled_512.shards[shard], 512, false);
+ collStatComp(coll_not_scaled.shards[shard], coll_scaled_1024.shards[shard], 1024, false);
}
- function checkIndexDetails(options, indexName) {
- var stats = assert.commandWorked(t.stats(options));
+ collStatComp(coll_not_scaled, coll_scaled_512, 512, true);
+ collStatComp(coll_not_scaled, coll_scaled_1024, 1024, true);
+
+ /* db.collection.stats() - indexDetails tests */
+ (function() {
+ var t = db.foo;
+
+ assert.commandWorked(t.ensureIndex({a: 1}));
+ assert.eq(2, t.getIndexes().length);
+
+ var isWiredTiger =
+ (!jsTest.options().storageEngine || jsTest.options().storageEngine === "wiredTiger");
+
+ var stats = assert.commandWorked(t.stats({indexDetails: true}));
+ var shardName;
+ var shardStats;
for (shardName in stats.shards) {
shardStats = stats.shards[shardName];
assert(shardStats.indexDetails,
- 'indexDetails missing from db.collection.stats(' + tojson(options) +
- ').shards[' + shardName + '] result: ' + tojson(shardStats));
- // Currently, indexDetails is only supported with WiredTiger.
+ 'indexDetails missing for ' + shardName + ': ' + tojson(shardStats));
if (isWiredTiger) {
- assert.eq(1, Object.keys(shardStats.indexDetails).length,
- 'WiredTiger indexDetails must have exactly one entry');
- assert(shardStats.indexDetails[indexName],
- indexName + ' missing from WiredTiger indexDetails: ' +
- tojson(shardStats.indexDetails));
- assert.neq(0, Object.keys(shardStats.indexDetails[indexName]).length,
- indexName + ' exists in indexDetails but contains no information: ' +
- tojson(shardStats.indexDetails));
+ assert.eq(t.getIndexes().length,
+ Object.keys(shardStats.indexDetails).length,
+ 'incorrect number of entries in WiredTiger indexDetails: ' +
+ tojson(shardStats));
+ }
+ }
+
+ function getIndexName(indexKey) {
+ var indexes = t.getIndexes().filter(function(doc) {
+ return friendlyEqual(doc.key, indexKey);
+ });
+ assert.eq(
+ 1,
+ indexes.length,
+ tojson(indexKey) + ' not found in getIndexes() result: ' + tojson(t.getIndexes()));
+ return indexes[0].name;
+ }
+
+ function checkIndexDetails(options, indexName) {
+ var stats = assert.commandWorked(t.stats(options));
+ for (shardName in stats.shards) {
+ shardStats = stats.shards[shardName];
+ assert(shardStats.indexDetails,
+ 'indexDetails missing from db.collection.stats(' + tojson(options) +
+ ').shards[' + shardName + '] result: ' + tojson(shardStats));
+ // Currently, indexDetails is only supported with WiredTiger.
+ if (isWiredTiger) {
+ assert.eq(1,
+ Object.keys(shardStats.indexDetails).length,
+ 'WiredTiger indexDetails must have exactly one entry');
+ assert(shardStats.indexDetails[indexName],
+ indexName + ' missing from WiredTiger indexDetails: ' +
+ tojson(shardStats.indexDetails));
+ assert.neq(0,
+ Object.keys(shardStats.indexDetails[indexName]).length,
+ indexName + ' exists in indexDetails but contains no information: ' +
+ tojson(shardStats.indexDetails));
+ }
}
}
- }
- // indexDetailsKey - show indexDetails results for this index key only.
- var indexKey = {a: 1};
- var indexName = getIndexName(indexKey);
- checkIndexDetails({indexDetails: true, indexDetailsKey: indexKey}, indexName);
+ // indexDetailsKey - show indexDetails results for this index key only.
+ var indexKey = {
+ a: 1
+ };
+ var indexName = getIndexName(indexKey);
+ checkIndexDetails({indexDetails: true, indexDetailsKey: indexKey}, indexName);
- // indexDetailsName - show indexDetails results for this index name only.
- checkIndexDetails({indexDetails: true, indexDetailsName: indexName}, indexName);
-}());
+ // indexDetailsName - show indexDetails results for this index name only.
+ checkIndexDetails({indexDetails: true, indexDetailsName: indexName}, indexName);
+ }());
-s.stop();
+ s.stop();
})();
diff --git a/jstests/sharding/tag_auto_split.js b/jstests/sharding/tag_auto_split.js
index 835ec3b1546..df2011e5425 100644
--- a/jstests/sharding/tag_auto_split.js
+++ b/jstests/sharding/tag_auto_split.js
@@ -1,72 +1,66 @@
// Test to make sure that tag ranges get split
(function() {
-var s = new ShardingTest({ name: "tag_auto_split",
- shards: 2,
- mongos: 1,
- other: { enableBalancer : true } });
+ var s = new ShardingTest(
+ {name: "tag_auto_split", shards: 2, mongos: 1, other: {enableBalancer: true}});
-db = s.getDB( "test" );
+ db = s.getDB("test");
-s.adminCommand( { enablesharding : "test" } );
-s.ensurePrimaryShard('test', 'shard0001');
-s.adminCommand( { shardcollection : "test.foo" , key : { _id : 1 } } );
+ s.adminCommand({enablesharding: "test"});
+ s.ensurePrimaryShard('test', 'shard0001');
+ s.adminCommand({shardcollection: "test.foo", key: {_id: 1}});
-assert.eq( 1, s.config.chunks.count() );
+ assert.eq(1, s.config.chunks.count());
-sh.addShardTag( "shard0000" , "a" );
+ sh.addShardTag("shard0000", "a");
-sh.addTagRange( "test.foo" , { _id : 5 } , { _id : 10 } , "a" );
-sh.addTagRange( "test.foo" , { _id : 10 } , { _id : 15 } , "b" );
+ sh.addTagRange("test.foo", {_id: 5}, {_id: 10}, "a");
+ sh.addTagRange("test.foo", {_id: 10}, {_id: 15}, "b");
-assert.soon( function() {
- return s.config.chunks.count() == 3;
-}, "things didn't get split", 1000 * 60 * 10, 1000 );
+ assert.soon(function() {
+ return s.config.chunks.count() == 3;
+ }, "things didn't get split", 1000 * 60 * 10, 1000);
-s.printShardingStatus();
+ s.printShardingStatus();
-s.stop();
+ s.stop();
-//test without full shard key on tags
-s = new ShardingTest({ name: "tag_auto_split2",
- shards: 2,
- mongos: 1,
- other: { enableBalancer : true } });
+ // test without full shard key on tags
+ s = new ShardingTest(
+ {name: "tag_auto_split2", shards: 2, mongos: 1, other: {enableBalancer: true}});
-db = s.getDB( "test" );
+ db = s.getDB("test");
-s.adminCommand( { enablesharding : "test" } );
-s.ensurePrimaryShard('test', 'shard0001');
-s.adminCommand( { shardcollection : "test.foo" , key : { _id : 1, a : 1 } } );
+ s.adminCommand({enablesharding: "test"});
+ s.ensurePrimaryShard('test', 'shard0001');
+ s.adminCommand({shardcollection: "test.foo", key: {_id: 1, a: 1}});
-assert.eq( 1, s.config.chunks.count() );
+ assert.eq(1, s.config.chunks.count());
-sh.addShardTag( "shard0000" , "a" );
+ sh.addShardTag("shard0000", "a");
-sh.addTagRange( "test.foo" , { _id : 5 } , { _id : 10 } , "a" );
-sh.addTagRange( "test.foo" , { _id : 10 } , { _id : 15 } , "b" );
+ sh.addTagRange("test.foo", {_id: 5}, {_id: 10}, "a");
+ sh.addTagRange("test.foo", {_id: 10}, {_id: 15}, "b");
-assert.soon( function() {
- return s.config.chunks.count() == 3;
-}, "things didn't get split", 1000 * 60 * 10, 1000 );
+ assert.soon(function() {
+ return s.config.chunks.count() == 3;
+ }, "things didn't get split", 1000 * 60 * 10, 1000);
-s.config.chunks.find().forEach(
- function(chunk){
+ s.config.chunks.find().forEach(function(chunk) {
var numFields = 0;
- for ( var x in chunk.min ) {
+ for (var x in chunk.min) {
numFields++;
- assert( x == "_id" || x == "a", tojson(chunk) );
+ assert(x == "_id" || x == "a", tojson(chunk));
}
- assert.eq( 2, numFields,tojson(chunk) );
- }
-);
+ assert.eq(2, numFields, tojson(chunk));
+ });
-// check chunk mins correspond exactly to tag range boundaries, extended to match shard key
-assert.eq( 1, s.config.chunks.find( {min : {_id : 5 , a : MinKey} } ).count(),
- "bad chunk range boundary" );
-assert.eq( 1, s.config.chunks.find( {min : {_id : 10 , a : MinKey} } ).count(),
- "bad chunk range boundary" );
+ // check chunk mins correspond exactly to tag range boundaries, extended to match shard key
+ assert.eq(
+ 1, s.config.chunks.find({min: {_id: 5, a: MinKey}}).count(), "bad chunk range boundary");
+ assert.eq(
+ 1, s.config.chunks.find({min: {_id: 10, a: MinKey}}).count(), "bad chunk range boundary");
-s.stop();
+ s.stop();
})();
diff --git a/jstests/sharding/tag_range.js b/jstests/sharding/tag_range.js
index 897433001e2..a07656422d3 100644
--- a/jstests/sharding/tag_range.js
+++ b/jstests/sharding/tag_range.js
@@ -1,82 +1,80 @@
// tests to make sure that tag ranges are added/removed/updated successfully
-function countTags( num, message ) {
- assert.eq( s.config.tags.count() , num , message );
+function countTags(num, message) {
+ assert.eq(s.config.tags.count(), num, message);
}
-var s = new ShardingTest({ name: "tag_range",
- shards: 2,
- mongos: 1 });
+var s = new ShardingTest({name: "tag_range", shards: 2, mongos: 1});
// this set up is not required but prevents warnings in the remove
-db = s.getDB( "tag_range" );
+db = s.getDB("tag_range");
-s.adminCommand( { enableSharding : "test" } );
+s.adminCommand({enableSharding: "test"});
s.ensurePrimaryShard('test', 'shard0001');
-s.adminCommand( { shardCollection : "test.tag_range" , key : { _id : 1 } } );
+s.adminCommand({shardCollection: "test.tag_range", key: {_id: 1}});
-assert.eq( 1 , s.config.chunks.count() );
+assert.eq(1, s.config.chunks.count());
-sh.addShardTag( "shard0000" , "a" );
+sh.addShardTag("shard0000", "a");
// add two ranges, verify the additions
-sh.addTagRange( "test.tag_range" , { _id : 5 } , { _id : 10 } , "a" );
-sh.addTagRange( "test.tag_range" , { _id : 10 } , { _id : 15 } , "b" );
+sh.addTagRange("test.tag_range", {_id: 5}, {_id: 10}, "a");
+sh.addTagRange("test.tag_range", {_id: 10}, {_id: 15}, "b");
-countTags( 2 , "tag ranges were not successfully added" );
+countTags(2, "tag ranges were not successfully added");
// remove the second range, should be left with one
-sh.removeTagRange( "test.tag_range" , { _id : 10 } , { _id : 15 } , "b" );
+sh.removeTagRange("test.tag_range", {_id: 10}, {_id: 15}, "b");
-countTags( 1 , "tag range not removed successfully" );
+countTags(1, "tag range not removed successfully");
// the additions are actually updates, so you can alter a range's max
-sh.addTagRange( "test.tag_range" , { _id : 5 } , { _id : 11 } , "a" );
+sh.addTagRange("test.tag_range", {_id: 5}, {_id: 11}, "a");
-assert.eq( 11 , s.config.tags.findOne().max._id , "tag range not updated successfully" );
+assert.eq(11, s.config.tags.findOne().max._id, "tag range not updated successfully");
// add range min=max, verify the additions
try {
- sh.addTagRange( "test.tag_range" , { _id : 20 } , { _id : 20 } , "a" );
+ sh.addTagRange("test.tag_range", {_id: 20}, {_id: 20}, "a");
} catch (e) {
- countTags( 1 , "tag range should not have been added" );
+ countTags(1, "tag range should not have been added");
}
// removeTagRange tests for tag ranges that do not exist
// Bad namespace
-sh.removeTagRange("badns", { _id : 5 }, { _id : 11 }, "a");
-countTags(1 , "Bad namespace: tag range does not exist");
+sh.removeTagRange("badns", {_id: 5}, {_id: 11}, "a");
+countTags(1, "Bad namespace: tag range does not exist");
// Bad tag
-sh.removeTagRange("test.tag_range", { _id : 5 }, { _id : 11 }, "badtag");
-countTags(1 , "Bad tag: tag range does not exist");
+sh.removeTagRange("test.tag_range", {_id: 5}, {_id: 11}, "badtag");
+countTags(1, "Bad tag: tag range does not exist");
// Bad min
-sh.removeTagRange("test.tag_range", { _id : 0 }, { _id : 11 }, "a");
-countTags(1 , "Bad min: tag range does not exist");
+sh.removeTagRange("test.tag_range", {_id: 0}, {_id: 11}, "a");
+countTags(1, "Bad min: tag range does not exist");
// Bad max
-sh.removeTagRange("test.tag_range", { _id : 5 }, { _id : 12 }, "a");
-countTags(1 , "Bad max: tag range does not exist");
+sh.removeTagRange("test.tag_range", {_id: 5}, {_id: 12}, "a");
+countTags(1, "Bad max: tag range does not exist");
// Invalid namesapce
-sh.removeTagRange(35, { _id : 5 }, { _id : 11 }, "a");
-countTags(1 , "Invalid namespace: tag range does not exist");
+sh.removeTagRange(35, {_id: 5}, {_id: 11}, "a");
+countTags(1, "Invalid namespace: tag range does not exist");
// Invalid tag
-sh.removeTagRange("test.tag_range", { _id : 5 }, { _id : 11 }, 35);
-countTags(1 , "Invalid tag: tag range does not exist");
+sh.removeTagRange("test.tag_range", {_id: 5}, {_id: 11}, 35);
+countTags(1, "Invalid tag: tag range does not exist");
// Invalid min
-sh.removeTagRange("test.tag_range", 35, { _id : 11 }, "a");
-countTags(1 , "Invalid min: tag range does not exist");
+sh.removeTagRange("test.tag_range", 35, {_id: 11}, "a");
+countTags(1, "Invalid min: tag range does not exist");
// Invalid max
-sh.removeTagRange("test.tag_range", { _id : 5 }, 35, "a");
-countTags(1 , "Invalid max: tag range does not exist");
+sh.removeTagRange("test.tag_range", {_id: 5}, 35, "a");
+countTags(1, "Invalid max: tag range does not exist");
s.stop();
diff --git a/jstests/sharding/test_stacked_migration_cleanup.js b/jstests/sharding/test_stacked_migration_cleanup.js
index 93f9862e756..523f5de1a0c 100644
--- a/jstests/sharding/test_stacked_migration_cleanup.js
+++ b/jstests/sharding/test_stacked_migration_cleanup.js
@@ -1,68 +1,69 @@
-// Tests "stacking" multiple migration cleanup threads and their behavior when the collection changes
+// Tests "stacking" multiple migration cleanup threads and their behavior when the collection
+// changes
(function() {
-'use strict';
+ 'use strict';
-// start up a new sharded cluster
-var st = new ShardingTest({ shards : 2, mongos : 1 });
+ // start up a new sharded cluster
+ var st = new ShardingTest({shards: 2, mongos: 1});
-var mongos = st.s;
-var admin = mongos.getDB("admin");
-var shards = mongos.getDB("config").shards.find().toArray();
-var coll = mongos.getCollection("foo.bar");
+ var mongos = st.s;
+ var admin = mongos.getDB("admin");
+ var shards = mongos.getDB("config").shards.find().toArray();
+ var coll = mongos.getCollection("foo.bar");
-// Enable sharding of the collection
-assert.commandWorked(mongos.adminCommand({ enablesharding : coll.getDB() + "" }));
-st.ensurePrimaryShard(coll.getDB() + "", shards[0]._id);
-assert.commandWorked(mongos.adminCommand({ shardcollection : coll + "", key: { _id : 1 } }));
+ // Enable sharding of the collection
+ assert.commandWorked(mongos.adminCommand({enablesharding: coll.getDB() + ""}));
+ st.ensurePrimaryShard(coll.getDB() + "", shards[0]._id);
+ assert.commandWorked(mongos.adminCommand({shardcollection: coll + "", key: {_id: 1}}));
-var numChunks = 30;
+ var numChunks = 30;
-// Create a bunch of chunks
-for (var i = 0; i < numChunks; i++) {
- assert.commandWorked(mongos.adminCommand({ split : coll + "", middle : { _id : i } }));
-}
+ // Create a bunch of chunks
+ for (var i = 0; i < numChunks; i++) {
+ assert.commandWorked(mongos.adminCommand({split: coll + "", middle: {_id: i}}));
+ }
-jsTest.log("Inserting a lot of small documents...");
+ jsTest.log("Inserting a lot of small documents...");
-// Insert a lot of small documents to make multiple cursor batches
-var bulk = coll.initializeUnorderedBulkOp();
-for (var i = 0; i < 10 * 1000; i++) {
- bulk.insert({ _id : i });
-}
-assert.writeOK(bulk.execute());
+ // Insert a lot of small documents to make multiple cursor batches
+ var bulk = coll.initializeUnorderedBulkOp();
+ for (var i = 0; i < 10 * 1000; i++) {
+ bulk.insert({_id: i});
+ }
+ assert.writeOK(bulk.execute());
-jsTest.log("Opening a mongod cursor...");
+ jsTest.log("Opening a mongod cursor...");
-// Open a new cursor on the mongod
-var cursor = coll.find();
-var next = cursor.next();
+ // Open a new cursor on the mongod
+ var cursor = coll.find();
+ var next = cursor.next();
-jsTest.log("Moving a bunch of chunks to stack cleanup...");
+ jsTest.log("Moving a bunch of chunks to stack cleanup...");
-// Move a bunch of chunks, but don't close the cursor so they stack.
-for (var i = 0; i < numChunks; i++) {
- assert.commandWorked(
- mongos.adminCommand({ moveChunk : coll + "", find : { _id : i }, to : shards[1]._id }));
-}
+ // Move a bunch of chunks, but don't close the cursor so they stack.
+ for (var i = 0; i < numChunks; i++) {
+ assert.commandWorked(
+ mongos.adminCommand({moveChunk: coll + "", find: {_id: i}, to: shards[1]._id}));
+ }
-jsTest.log("Dropping and re-creating collection...");
+ jsTest.log("Dropping and re-creating collection...");
-coll.drop();
+ coll.drop();
-bulk = coll.initializeUnorderedBulkOp();
-for (var i = 0; i < numChunks; i++) {
- bulk.insert({ _id : i });
-}
-assert.writeOK(bulk.execute());
+ bulk = coll.initializeUnorderedBulkOp();
+ for (var i = 0; i < numChunks; i++) {
+ bulk.insert({_id: i});
+ }
+ assert.writeOK(bulk.execute());
-sleep(10 * 1000);
+ sleep(10 * 1000);
-jsTest.log("Checking that documents were not cleaned up...");
+ jsTest.log("Checking that documents were not cleaned up...");
-for (var i = 0; i < numChunks; i++) {
- assert.neq(null, coll.findOne({ _id : i }));
-}
+ for (var i = 0; i < numChunks; i++) {
+ assert.neq(null, coll.findOne({_id: i}));
+ }
-st.stop();
+ st.stop();
})();
diff --git a/jstests/sharding/top_chunk_autosplit.js b/jstests/sharding/top_chunk_autosplit.js
index 696bc3478ae..74a3e942cae 100644
--- a/jstests/sharding/top_chunk_autosplit.js
+++ b/jstests/sharding/top_chunk_autosplit.js
@@ -6,7 +6,7 @@ function shardSetup(shardConfig, dbName, collName) {
// Disable the balancer to not interfere with the test, but keep the balancer settings on
// (with default empty document) so the auto split logic will be able to move chunks around.
- assert.writeOK(st.s.getDB('config').settings.remove({ _id: 'balancer' }));
+ assert.writeOK(st.s.getDB('config').settings.remove({_id: 'balancer'}));
db.adminCommand({configureFailPoint: 'skipBalanceRound', mode: 'alwaysOn'});
return st;
}
@@ -31,13 +31,13 @@ function runTest(test) {
for (var i = 0; i < test.shards.length; i++) {
var startRange = test.shards[i].range.min;
var endRange = test.shards[i].range.max;
- var chunkSize = Math.abs(endRange-startRange)/test.shards[i].chunks;
+ var chunkSize = Math.abs(endRange - startRange) / test.shards[i].chunks;
for (var j = startRange; j < endRange; j += chunkSize) {
// No split on highest chunk
if (j + chunkSize >= MAXVAL) {
continue;
}
- db.adminCommand({split: coll + "", middle: {x: j+chunkSize}});
+ db.adminCommand({split: coll + "", middle: {x: j + chunkSize}});
db.adminCommand({moveChunk: coll + "", find: {x: j}, to: test.shards[i].name});
}
// Make sure to move chunk when there's only 1 chunk in shard
@@ -72,7 +72,10 @@ function runTest(test) {
// Insert one doc at a time until first auto-split occurs on top chunk
var xval = test.inserts.value;
do {
- var doc = {x: xval, val: largeStr};
+ var doc = {
+ x: xval,
+ val: largeStr
+ };
coll.insert(doc);
xval += test.inserts.inc;
} while (getNumberOfChunks(configDB) <= numChunks);
@@ -105,17 +108,44 @@ var configDB = st.s.getDB('config');
// Define shard key ranges for each of the shard nodes
var MINVAL = -500;
var MAXVAL = 1500;
-var lowChunkRange = {min: MINVAL, max: 0};
-var midChunkRange1 = {min: 0, max: 500};
-var midChunkRange2 = {min: 500, max: 1000};
-var highChunkRange = {min: 1000, max: MAXVAL};
-
-var lowChunkTagRange = {min: MinKey, max: 0};
-var highChunkTagRange = {min: 1000, max: MaxKey};
-
-var lowChunkInserts = {value: 0, inc: -1};
-var midChunkInserts = {value: 1, inc: 1};
-var highChunkInserts = {value: 1000, inc: 1};
+var lowChunkRange = {
+ min: MINVAL,
+ max: 0
+};
+var midChunkRange1 = {
+ min: 0,
+ max: 500
+};
+var midChunkRange2 = {
+ min: 500,
+ max: 1000
+};
+var highChunkRange = {
+ min: 1000,
+ max: MAXVAL
+};
+
+var lowChunkTagRange = {
+ min: MinKey,
+ max: 0
+};
+var highChunkTagRange = {
+ min: 1000,
+ max: MaxKey
+};
+
+var lowChunkInserts = {
+ value: 0,
+ inc: -1
+};
+var midChunkInserts = {
+ value: 1,
+ inc: 1
+};
+var highChunkInserts = {
+ value: 1000,
+ inc: 1
+};
var lowChunk = 1;
var highChunk = -1;
@@ -137,99 +167,119 @@ var highChunk = -1;
// high - high shard key value
var tests = [
{
- // Test auto-split on the "low" top chunk to another tagged shard
- name: "low top chunk with tag move",
- lowOrHigh: lowChunk,
- movedToShard: "shard0002",
- shards: [{name: "shard0000", range: lowChunkRange, chunks: 20, tags: ["NYC"]},
- {name: "shard0001", range: midChunkRange1, chunks: 20, tags: ["SF"]},
- {name: "shard0002", range: highChunkRange, chunks: 5, tags: ["NYC"]},
- {name: "shard0003", range: midChunkRange2, chunks: 1, tags: ["SF"]},
- ],
- tagRanges: [{range: lowChunkTagRange, tag: "NYC"},
- {range: highChunkTagRange, tag: "NYC"},
- {range: midChunkRange1, tag: "SF"},
- {range: midChunkRange2, tag: "SF"}],
- inserts: lowChunkInserts
+ // Test auto-split on the "low" top chunk to another tagged shard
+ name: "low top chunk with tag move",
+ lowOrHigh: lowChunk,
+ movedToShard: "shard0002",
+ shards: [
+ {name: "shard0000", range: lowChunkRange, chunks: 20, tags: ["NYC"]},
+ {name: "shard0001", range: midChunkRange1, chunks: 20, tags: ["SF"]},
+ {name: "shard0002", range: highChunkRange, chunks: 5, tags: ["NYC"]},
+ {name: "shard0003", range: midChunkRange2, chunks: 1, tags: ["SF"]},
+ ],
+ tagRanges: [
+ {range: lowChunkTagRange, tag: "NYC"},
+ {range: highChunkTagRange, tag: "NYC"},
+ {range: midChunkRange1, tag: "SF"},
+ {range: midChunkRange2, tag: "SF"}
+ ],
+ inserts: lowChunkInserts
},
{
- // Test auto-split on the "low" top chunk to same tagged shard
- name: "low top chunk with tag no move",
- lowOrHigh: lowChunk,
- movedToShard: "shard0000",
- shards: [{name: "shard0000", range: lowChunkRange, chunks: 5, tags: ["NYC"]},
- {name: "shard0001", range: midChunkRange1, chunks: 20, tags: ["SF"]},
- {name: "shard0002", range: highChunkRange, chunks: 20, tags: ["NYC"]},
- {name: "shard0003", range: midChunkRange2, chunks: 1, tags: ["SF"]},
- ],
- tagRanges: [{range: lowChunkTagRange, tag: "NYC"},
- {range: highChunkTagRange, tag: "NYC"},
- {range: midChunkRange1, tag: "SF"},
- {range: midChunkRange2, tag: "SF"}],
- inserts: lowChunkInserts
- },
+ // Test auto-split on the "low" top chunk to same tagged shard
+ name: "low top chunk with tag no move",
+ lowOrHigh: lowChunk,
+ movedToShard: "shard0000",
+ shards: [
+ {name: "shard0000", range: lowChunkRange, chunks: 5, tags: ["NYC"]},
+ {name: "shard0001", range: midChunkRange1, chunks: 20, tags: ["SF"]},
+ {name: "shard0002", range: highChunkRange, chunks: 20, tags: ["NYC"]},
+ {name: "shard0003", range: midChunkRange2, chunks: 1, tags: ["SF"]},
+ ],
+ tagRanges: [
+ {range: lowChunkTagRange, tag: "NYC"},
+ {range: highChunkTagRange, tag: "NYC"},
+ {range: midChunkRange1, tag: "SF"},
+ {range: midChunkRange2, tag: "SF"}
+ ],
+ inserts: lowChunkInserts
+ },
{
- // Test auto-split on the "low" top chunk to another shard
- name: "low top chunk no tag move",
- lowOrHigh: lowChunk,
- movedToShard: "shard0003",
- shards: [{name: "shard0000", range: lowChunkRange, chunks: 20},
- {name: "shard0001", range: midChunkRange1, chunks: 20},
- {name: "shard0002", range: highChunkRange, chunks: 5},
- {name: "shard0003", range: midChunkRange2, chunks: 1}],
- inserts: lowChunkInserts
+ // Test auto-split on the "low" top chunk to another shard
+ name: "low top chunk no tag move",
+ lowOrHigh: lowChunk,
+ movedToShard: "shard0003",
+ shards: [
+ {name: "shard0000", range: lowChunkRange, chunks: 20},
+ {name: "shard0001", range: midChunkRange1, chunks: 20},
+ {name: "shard0002", range: highChunkRange, chunks: 5},
+ {name: "shard0003", range: midChunkRange2, chunks: 1}
+ ],
+ inserts: lowChunkInserts
},
{
- // Test auto-split on the "high" top chunk to another tagged shard
- name: "high top chunk with tag move",
- lowOrHigh: highChunk,
- movedToShard: "shard0000",
- shards: [{name: "shard0000", range: lowChunkRange, chunks: 5, tags: ["NYC"]},
- {name: "shard0001", range: midChunkRange1, chunks: 20, tags: ["SF"]},
- {name: "shard0002", range: highChunkRange, chunks: 20, tags: ["NYC"]},
- {name: "shard0003", range: midChunkRange2, chunks: 1, tags: ["SF"]}],
- tagRanges: [{range: lowChunkTagRange, tag: "NYC"},
- {range: highChunkTagRange, tag: "NYC"},
- {range: midChunkRange1, tag: "SF"},
- {range: midChunkRange2, tag: "SF"}],
- inserts: highChunkInserts
+ // Test auto-split on the "high" top chunk to another tagged shard
+ name: "high top chunk with tag move",
+ lowOrHigh: highChunk,
+ movedToShard: "shard0000",
+ shards: [
+ {name: "shard0000", range: lowChunkRange, chunks: 5, tags: ["NYC"]},
+ {name: "shard0001", range: midChunkRange1, chunks: 20, tags: ["SF"]},
+ {name: "shard0002", range: highChunkRange, chunks: 20, tags: ["NYC"]},
+ {name: "shard0003", range: midChunkRange2, chunks: 1, tags: ["SF"]}
+ ],
+ tagRanges: [
+ {range: lowChunkTagRange, tag: "NYC"},
+ {range: highChunkTagRange, tag: "NYC"},
+ {range: midChunkRange1, tag: "SF"},
+ {range: midChunkRange2, tag: "SF"}
+ ],
+ inserts: highChunkInserts
},
{
- // Test auto-split on the "high" top chunk to another shard
- name: "high top chunk no tag move",
- lowOrHigh: highChunk,
- movedToShard: "shard0003",
- shards: [{name: "shard0000", range: lowChunkRange, chunks: 5},
- {name: "shard0001", range: midChunkRange1, chunks: 20},
- {name: "shard0002", range: highChunkRange, chunks: 20},
- {name: "shard0003", range: midChunkRange2, chunks: 1}],
- inserts: highChunkInserts
+ // Test auto-split on the "high" top chunk to another shard
+ name: "high top chunk no tag move",
+ lowOrHigh: highChunk,
+ movedToShard: "shard0003",
+ shards: [
+ {name: "shard0000", range: lowChunkRange, chunks: 5},
+ {name: "shard0001", range: midChunkRange1, chunks: 20},
+ {name: "shard0002", range: highChunkRange, chunks: 20},
+ {name: "shard0003", range: midChunkRange2, chunks: 1}
+ ],
+ inserts: highChunkInserts
},
{
- // Test auto-split on the "high" top chunk to same tagged shard
- name: "high top chunk with tag no move",
- lowOrHigh: highChunk,
- movedToShard: "shard0002",
- shards: [{name: "shard0000", range: lowChunkRange, chunks: 20, tags: ["NYC"]},
- {name: "shard0001", range: midChunkRange1, chunks: 20, tags: ["SF"]},
- {name: "shard0002", range: highChunkRange, chunks: 5, tags: ["NYC"]},
- {name: "shard0003", range: midChunkRange2, chunks: 1, tags: ["SF"]}],
- tagRanges: [{range: lowChunkTagRange, tag: "NYC"},
- {range: highChunkTagRange, tag: "NYC"},
- {range: midChunkRange1, tag: "SF"},
- {range: midChunkRange2, tag: "SF"}],
- inserts: highChunkInserts
+ // Test auto-split on the "high" top chunk to same tagged shard
+ name: "high top chunk with tag no move",
+ lowOrHigh: highChunk,
+ movedToShard: "shard0002",
+ shards: [
+ {name: "shard0000", range: lowChunkRange, chunks: 20, tags: ["NYC"]},
+ {name: "shard0001", range: midChunkRange1, chunks: 20, tags: ["SF"]},
+ {name: "shard0002", range: highChunkRange, chunks: 5, tags: ["NYC"]},
+ {name: "shard0003", range: midChunkRange2, chunks: 1, tags: ["SF"]}
+ ],
+ tagRanges: [
+ {range: lowChunkTagRange, tag: "NYC"},
+ {range: highChunkTagRange, tag: "NYC"},
+ {range: midChunkRange1, tag: "SF"},
+ {range: midChunkRange2, tag: "SF"}
+ ],
+ inserts: highChunkInserts
},
{
- // Test auto-split on the "high" top chunk to same shard
- name: "high top chunk no tag no move",
- lowOrHigh: highChunk,
- movedToShard: "shard0002",
- shards: [{name: "shard0000", range: lowChunkRange, chunks: 20},
- {name: "shard0001", range: midChunkRange1, chunks: 20},
- {name: "shard0002", range: highChunkRange, chunks: 1},
- {name: "shard0003", range: midChunkRange2, chunks: 5}],
- inserts: highChunkInserts
+ // Test auto-split on the "high" top chunk to same shard
+ name: "high top chunk no tag no move",
+ lowOrHigh: highChunk,
+ movedToShard: "shard0002",
+ shards: [
+ {name: "shard0000", range: lowChunkRange, chunks: 20},
+ {name: "shard0001", range: midChunkRange1, chunks: 20},
+ {name: "shard0002", range: highChunkRange, chunks: 1},
+ {name: "shard0003", range: midChunkRange2, chunks: 5}
+ ],
+ inserts: highChunkInserts
}
];
@@ -251,20 +301,20 @@ configDB = st.s.getDB('config');
var singleNodeTests = [
{
- // Test auto-split on the "low" top chunk on single node shard
- name: "single node shard - low top chunk",
- lowOrHigh: lowChunk,
- movedToShard: "shard0000",
- shards: [{name: "shard0000", range: lowChunkRange, chunks: 2}],
- inserts: lowChunkInserts
+ // Test auto-split on the "low" top chunk on single node shard
+ name: "single node shard - low top chunk",
+ lowOrHigh: lowChunk,
+ movedToShard: "shard0000",
+ shards: [{name: "shard0000", range: lowChunkRange, chunks: 2}],
+ inserts: lowChunkInserts
},
{
- // Test auto-split on the "high" top chunk on single node shard
- name: "single node shard - high top chunk",
- lowOrHigh: highChunk,
- movedToShard: "shard0000",
- shards: [{name: "shard0000", range: highChunkRange, chunks: 2}],
- inserts: highChunkInserts
+ // Test auto-split on the "high" top chunk on single node shard
+ name: "single node shard - high top chunk",
+ lowOrHigh: highChunk,
+ movedToShard: "shard0000",
+ shards: [{name: "shard0000", range: highChunkRange, chunks: 2}],
+ inserts: highChunkInserts
}
];
@@ -280,9 +330,8 @@ st.stop();
// maxSize test
// To set maxSize, must manually add the shards
-st = shardSetup({name: "maxSize", shards: 2, chunkSize: 1, other: {manualAddShard: true}},
- dbName,
- collName);
+st = shardSetup(
+ {name: "maxSize", shards: 2, chunkSize: 1, other: {manualAddShard: true}}, dbName, collName);
db = st.getDB(dbName);
coll = db[collName];
configDB = st.s.getDB('config');
@@ -293,32 +342,35 @@ st.adminCommand({addshard: st.getConnNames()[1], maxSize: 1});
var maxSizeTests = [
{
- // Test auto-split on the "low" top chunk with maxSize on
- // destination shard
- name: "maxSize - low top chunk",
- lowOrHigh: lowChunk,
- movedToShard: "shard0000",
- shards: [{name: "shard0000", range: lowChunkRange, chunks: 10},
- {name: "shard0001", range: highChunkRange, chunks: 1}],
- inserts: lowChunkInserts
+ // Test auto-split on the "low" top chunk with maxSize on
+ // destination shard
+ name: "maxSize - low top chunk",
+ lowOrHigh: lowChunk,
+ movedToShard: "shard0000",
+ shards: [
+ {name: "shard0000", range: lowChunkRange, chunks: 10},
+ {name: "shard0001", range: highChunkRange, chunks: 1}
+ ],
+ inserts: lowChunkInserts
},
{
- // Test auto-split on the "high" top chunk with maxSize on
- // destination shard
- name: "maxSize - high top chunk",
- lowOrHigh: highChunk,
- movedToShard: "shard0000",
- shards: [{name: "shard0000", range: highChunkRange, chunks: 10},
- {name: "shard0001", range: lowChunkRange, chunks: 1}],
- inserts: highChunkInserts
+ // Test auto-split on the "high" top chunk with maxSize on
+ // destination shard
+ name: "maxSize - high top chunk",
+ lowOrHigh: highChunk,
+ movedToShard: "shard0000",
+ shards: [
+ {name: "shard0000", range: highChunkRange, chunks: 10},
+ {name: "shard0001", range: lowChunkRange, chunks: 1}
+ ],
+ inserts: highChunkInserts
}
];
// SERVER-17070 Auto split moves to shard node running WiredTiger, if exceeding maxSize
var unsupported = ["wiredTiger", "rocksdb", "inMemory", "ephemeralForTest"];
-if (unsupported.indexOf(st.d0.adminCommand({serverStatus : 1}).storageEngine.name) == -1 &&
- unsupported.indexOf(st.d1.adminCommand({serverStatus : 1}).storageEngine.name) == -1) {
-
+if (unsupported.indexOf(st.d0.adminCommand({serverStatus: 1}).storageEngine.name) == -1 &&
+ unsupported.indexOf(st.d1.adminCommand({serverStatus: 1}).storageEngine.name) == -1) {
assert.commandWorked(db.adminCommand({enableSharding: dbName}));
db.adminCommand({movePrimary: dbName, to: 'shard0000'});
diff --git a/jstests/sharding/trace_missing_docs_test.js b/jstests/sharding/trace_missing_docs_test.js
index d7ac493cc5a..f02ccc80434 100644
--- a/jstests/sharding/trace_missing_docs_test.js
+++ b/jstests/sharding/trace_missing_docs_test.js
@@ -2,48 +2,48 @@
load('jstests/libs/trace_missing_docs.js');
(function() {
-'use strict';
+ 'use strict';
-var testDocMissing = function(useReplicaSet) {
- var options = { rs: useReplicaSet,
- shardOptions: { master: "", oplogSize: 10 },
- rsOptions: { nodes: 1, oplogSize: 10 } };
+ var testDocMissing = function(useReplicaSet) {
+ var options = {
+ rs: useReplicaSet,
+ shardOptions: {master: "", oplogSize: 10},
+ rsOptions: {nodes: 1, oplogSize: 10}
+ };
- var st = new ShardingTest({ shards: 2, mongos: 1, other: options });
+ var st = new ShardingTest({shards: 2, mongos: 1, other: options});
- var mongos = st.s0;
- var coll = mongos.getCollection("foo.bar");
- var admin = mongos.getDB("admin");
- var shards = mongos.getCollection("config.shards").find().toArray();
+ var mongos = st.s0;
+ var coll = mongos.getCollection("foo.bar");
+ var admin = mongos.getDB("admin");
+ var shards = mongos.getCollection("config.shards").find().toArray();
- assert.commandWorked(admin.runCommand({ enableSharding: coll.getDB() + "" }));
- st.ensurePrimaryShard(coll.getDB() + "", shards[0]._id);
+ assert.commandWorked(admin.runCommand({enableSharding: coll.getDB() + ""}));
+ st.ensurePrimaryShard(coll.getDB() + "", shards[0]._id);
- coll.ensureIndex({ sk: 1 });
- assert.commandWorked(admin.runCommand({ shardCollection: coll + "", key: { sk: 1 } }));
+ coll.ensureIndex({sk: 1});
+ assert.commandWorked(admin.runCommand({shardCollection: coll + "", key: {sk: 1}}));
- assert.writeOK(coll.insert({ _id: 12345, sk: 67890, hello: "world" }));
- assert.writeOK(coll.update({ _id: 12345 }, { $set: { baz: 'biz' } }));
- assert.writeOK(coll.update({ sk: 67890 }, { $set: { baz: 'boz' } }));
+ assert.writeOK(coll.insert({_id: 12345, sk: 67890, hello: "world"}));
+ assert.writeOK(coll.update({_id: 12345}, {$set: {baz: 'biz'}}));
+ assert.writeOK(coll.update({sk: 67890}, {$set: {baz: 'boz'}}));
- assert.commandWorked(admin.runCommand({ moveChunk: coll + "",
- find: { sk: 0 },
- to: shards[1]._id,
- _waitForDelete: true }));
+ assert.commandWorked(admin.runCommand(
+ {moveChunk: coll + "", find: {sk: 0}, to: shards[1]._id, _waitForDelete: true}));
- st.printShardingStatus();
+ st.printShardingStatus();
- var ops = traceMissingDoc(coll, { _id: 12345, sk: 67890 });
+ var ops = traceMissingDoc(coll, {_id: 12345, sk: 67890});
- assert.eq(ops[0].op, 'i');
- assert.eq(ops.length, 5);
+ assert.eq(ops[0].op, 'i');
+ assert.eq(ops.length, 5);
- jsTest.log("DONE! " + (useReplicaSet ? "(using rs)": "(using master/slave)"));
+ jsTest.log("DONE! " + (useReplicaSet ? "(using rs)" : "(using master/slave)"));
- st.stop();
-};
+ st.stop();
+ };
-testDocMissing(true);
-testDocMissing(false);
+ testDocMissing(true);
+ testDocMissing(false);
})();
diff --git a/jstests/sharding/unowned_doc_filtering.js b/jstests/sharding/unowned_doc_filtering.js
index e54cb21f3c7..2c12ef4b0cf 100644
--- a/jstests/sharding/unowned_doc_filtering.js
+++ b/jstests/sharding/unowned_doc_filtering.js
@@ -7,43 +7,44 @@
* @tags: [requires_persistence]
*/
(function() {
-"use strict";
+ "use strict";
-var st = new ShardingTest({ shards: 2 });
+ var st = new ShardingTest({shards: 2});
-var testDB = st.s.getDB('test');
+ var testDB = st.s.getDB('test');
-assert.commandWorked(testDB.adminCommand({ enableSharding: 'test' }));
-st.ensurePrimaryShard('test', 'shard0000');
-assert.commandWorked(testDB.adminCommand({ shardCollection: 'test.foo', key: { x: 1 }}));
+ assert.commandWorked(testDB.adminCommand({enableSharding: 'test'}));
+ st.ensurePrimaryShard('test', 'shard0000');
+ assert.commandWorked(testDB.adminCommand({shardCollection: 'test.foo', key: {x: 1}}));
-var inserts = [];
-for (var i = 0; i < 100; i++) {
- inserts.push({x:i});
-}
-assert.writeOK(testDB.foo.insert(inserts));
-
-assert.commandWorked(testDB.adminCommand({split:'test.foo', find: {x:50}}));
-assert.commandWorked(testDB.adminCommand({moveChunk:'test.foo', find:{x:100}, to: 'shard0001'}));
-
-// Insert some documents directly into the shards into chunks not owned by that shard.
-st.d0.getDB('test').foo.insert({x:100});
-st.d1.getDB('test').foo.insert({x:0});
-
-st.restartMongod(0);
-st.restartMongod(1);
-
-var fooCount;
-for (var retries = 0; retries <= 2; retries++) {
- try {
- fooCount = testDB.foo.find().itcount();
- break;
- } catch (e) {
- // expected for reestablishing connections broken by the mongod restart.
- assert.eq(ErrorCodes.HostUnreachable, e.code, tojson(e));
+ var inserts = [];
+ for (var i = 0; i < 100; i++) {
+ inserts.push({x: i});
+ }
+ assert.writeOK(testDB.foo.insert(inserts));
+
+ assert.commandWorked(testDB.adminCommand({split: 'test.foo', find: {x: 50}}));
+ assert.commandWorked(
+ testDB.adminCommand({moveChunk: 'test.foo', find: {x: 100}, to: 'shard0001'}));
+
+ // Insert some documents directly into the shards into chunks not owned by that shard.
+ st.d0.getDB('test').foo.insert({x: 100});
+ st.d1.getDB('test').foo.insert({x: 0});
+
+ st.restartMongod(0);
+ st.restartMongod(1);
+
+ var fooCount;
+ for (var retries = 0; retries <= 2; retries++) {
+ try {
+ fooCount = testDB.foo.find().itcount();
+ break;
+ } catch (e) {
+ // expected for reestablishing connections broken by the mongod restart.
+ assert.eq(ErrorCodes.HostUnreachable, e.code, tojson(e));
+ }
}
-}
-assert.eq(100, fooCount);
+ assert.eq(100, fooCount);
-st.stop();
+ st.stop();
}());
diff --git a/jstests/sharding/update_immutable_fields.js b/jstests/sharding/update_immutable_fields.js
index f0383fcf2f8..ba936808fdd 100644
--- a/jstests/sharding/update_immutable_fields.js
+++ b/jstests/sharding/update_immutable_fields.js
@@ -1,77 +1,79 @@
// Tests that updates can't change immutable fields (used in sharded system)
(function() {
-'use strict';
+ 'use strict';
-var st = new ShardingTest({ shards : 2, mongos : 1 });
+ var st = new ShardingTest({shards: 2, mongos: 1});
-var mongos = st.s;
-var config = mongos.getDB("config");
-var coll = mongos.getCollection(jsTestName() + ".coll1");
-var shard0 = st.shard0;
+ var mongos = st.s;
+ var config = mongos.getDB("config");
+ var coll = mongos.getCollection(jsTestName() + ".coll1");
+ var shard0 = st.shard0;
-assert.commandWorked(config.adminCommand({enableSharding : coll.getDB() + ""}));
-st.ensurePrimaryShard(coll.getDB().getName(), 'shard0000');
-assert.commandWorked(config.adminCommand({shardCollection : "" + coll, key : {a : 1}}));
+ assert.commandWorked(config.adminCommand({enableSharding: coll.getDB() + ""}));
+ st.ensurePrimaryShard(coll.getDB().getName(), 'shard0000');
+ assert.commandWorked(config.adminCommand({shardCollection: "" + coll, key: {a: 1}}));
-var getDirectShardedConn = function( st, collName ) {
+ var getDirectShardedConn = function(st, collName) {
- var shardConnWithVersion = new Mongo( st.shard0.host );
+ var shardConnWithVersion = new Mongo(st.shard0.host);
- var configConnStr = st._configDB;
+ var configConnStr = st._configDB;
- var maxChunk = st.s0.getCollection( "config.chunks" )
- .find({ ns : collName }).sort({ lastmod : -1 }).next();
+ var maxChunk =
+ st.s0.getCollection("config.chunks").find({ns: collName}).sort({lastmod: -1}).next();
- var ssvInitCmd = { setShardVersion : collName,
- authoritative : true,
- configdb : configConnStr,
- version : maxChunk.lastmod,
- shard: 'shard0000',
- versionEpoch : maxChunk.lastmodEpoch };
+ var ssvInitCmd = {
+ setShardVersion: collName,
+ authoritative: true,
+ configdb: configConnStr,
+ version: maxChunk.lastmod,
+ shard: 'shard0000',
+ versionEpoch: maxChunk.lastmodEpoch
+ };
- printjson(ssvInitCmd);
- assert.commandWorked( shardConnWithVersion.getDB( "admin" ).runCommand( ssvInitCmd ) );
+ printjson(ssvInitCmd);
+ assert.commandWorked(shardConnWithVersion.getDB("admin").runCommand(ssvInitCmd));
- return shardConnWithVersion;
-};
+ return shardConnWithVersion;
+ };
-var shard0Coll = getDirectShardedConn(st, coll.getFullName()).getCollection(coll.getFullName());
+ var shard0Coll = getDirectShardedConn(st, coll.getFullName()).getCollection(coll.getFullName());
-// No shard key
-shard0Coll.remove({});
-assert.writeError(shard0Coll.save({ _id: 3 }));
+ // No shard key
+ shard0Coll.remove({});
+ assert.writeError(shard0Coll.save({_id: 3}));
-// Full shard key in save
-assert.writeOK(shard0Coll.save({ _id: 1, a: 1 }));
+ // Full shard key in save
+ assert.writeOK(shard0Coll.save({_id: 1, a: 1}));
-// Full shard key on replacement (basically the same as above)
-shard0Coll.remove({});
-assert.writeOK(shard0Coll.update({ _id: 1 }, { a: 1 }, true));
+ // Full shard key on replacement (basically the same as above)
+ shard0Coll.remove({});
+ assert.writeOK(shard0Coll.update({_id: 1}, {a: 1}, true));
-// Full shard key after $set
-shard0Coll.remove({});
-assert.writeOK(shard0Coll.update({ _id: 1 }, { $set: { a: 1 }}, true));
+ // Full shard key after $set
+ shard0Coll.remove({});
+ assert.writeOK(shard0Coll.update({_id: 1}, {$set: {a: 1}}, true));
-// Update existing doc (replacement), same shard key value
-assert.writeOK(shard0Coll.update({ _id: 1 }, { a: 1 }));
+ // Update existing doc (replacement), same shard key value
+ assert.writeOK(shard0Coll.update({_id: 1}, {a: 1}));
-//Update existing doc ($set), same shard key value
-assert.writeOK(shard0Coll.update({ _id: 1 }, { $set: { a: 1 }}));
+ // Update existing doc ($set), same shard key value
+ assert.writeOK(shard0Coll.update({_id: 1}, {$set: {a: 1}}));
-// Error due to mutating the shard key (replacement)
-assert.writeError(shard0Coll.update({ _id: 1 }, { b: 1 }));
+ // Error due to mutating the shard key (replacement)
+ assert.writeError(shard0Coll.update({_id: 1}, {b: 1}));
-// Error due to mutating the shard key ($set)
-assert.writeError(shard0Coll.update({ _id: 1 }, { $unset: { a: 1 }}));
+ // Error due to mutating the shard key ($set)
+ assert.writeError(shard0Coll.update({_id: 1}, {$unset: {a: 1}}));
-// Error due to removing all the embedded fields.
-shard0Coll.remove({});
+ // Error due to removing all the embedded fields.
+ shard0Coll.remove({});
-assert.writeOK(shard0Coll.save({ _id: 2, a: { c: 1, b: 1 }}));
+ assert.writeOK(shard0Coll.save({_id: 2, a: {c: 1, b: 1}}));
-assert.writeError(shard0Coll.update({}, { $unset: { "a.c": 1 }}));
-assert.writeError(shard0Coll.update({}, { $unset: { "a.b": 1, "a.c": 1 }}));
+ assert.writeError(shard0Coll.update({}, {$unset: {"a.c": 1}}));
+ assert.writeError(shard0Coll.update({}, {$unset: {"a.b": 1, "a.c": 1}}));
-st.stop();
+ st.stop();
})();
diff --git a/jstests/sharding/update_sharded.js b/jstests/sharding/update_sharded.js
index e76521f2377..42a2954107e 100644
--- a/jstests/sharding/update_sharded.js
+++ b/jstests/sharding/update_sharded.js
@@ -2,103 +2,99 @@
// since shard key is immutable.
(function() {
-var s = new ShardingTest({ name: "auto1", shards: 2, mongos: 1 });
-
-s.adminCommand( { enablesharding : "test" } );
-s.ensurePrimaryShard('test', 'shard0001');
-
-// repeat same tests with hashed shard key, to ensure identical behavior
-s.adminCommand( { shardcollection : "test.update0" , key : { key : 1 } } );
-s.adminCommand( { shardcollection : "test.update1" , key : { key : "hashed" } } );
-
-db = s.getDB( "test" );
-
-for(i=0; i < 2; i++){
- coll = db.getCollection("update" + i);
-
- coll.insert({_id:1, key:1});
-
- // these are both upserts
- coll.save({_id:2, key:2});
- coll.update({_id:3, key:3}, {$set: {foo: 'bar'}}, {upsert: true});
-
- assert.eq(coll.count(), 3, "count A");
- assert.eq(coll.findOne({_id:3}).key, 3 , "findOne 3 key A");
- assert.eq(coll.findOne({_id:3}).foo, 'bar' , "findOne 3 foo A");
-
- // update existing using save()
- coll.save({_id:1, key:1, other:1});
-
- // update existing using update()
- coll.update({_id:2}, {key:2, other:2});
- coll.update({_id:3}, {key:3, other:3});
-
- // do a replacement-style update which queries the shard key and keeps it constant
- coll.save( {_id:4, key:4} );
- coll.update({key:4}, {key:4, other:4});
- assert.eq( coll.find({key:4, other:4}).count() , 1 , 'replacement update error');
- coll.remove( {_id:4} );
-
- assert.eq(coll.count(), 3, "count B");
- coll.find().forEach(function(x){
- assert.eq(x._id, x.key, "_id == key");
- assert.eq(x._id, x.other, "_id == other");
- });
-
- assert.writeError(coll.update({ _id: 1, key: 1 }, { $set: { key: 2 }}));
- assert.eq(coll.findOne({_id:1}).key, 1, 'key unchanged');
-
- assert.writeOK(coll.update({ _id: 1, key: 1 }, { $set: { foo: 2 }}));
-
- coll.update( { key : 17 } , { $inc : { x : 5 } } , true );
- assert.eq( 5 , coll.findOne( { key : 17 } ).x , "up1" );
-
- coll.update( { key : 18 } , { $inc : { x : 5 } } , true , true );
- assert.eq( 5 , coll.findOne( { key : 18 } ).x , "up2" );
-
- // Make sure we can extract exact _id from certain queries
- assert.writeOK(coll.update({_id : ObjectId()}, {$set : {x : 1}}, {multi : false}));
- assert.writeOK(coll.update({_id : {$eq : ObjectId()}}, {$set : {x : 1}}, {multi : false}));
- assert.writeOK(coll.update({_id : {$all : [ObjectId()]}}, {$set : {x : 1}}, {multi : false}));
- assert.writeOK(coll.update({$or : [{_id : ObjectId()}]}, {$set : {x : 1}}, {multi : false}));
- assert.writeOK(coll.update({$and : [{_id : ObjectId()}]}, {$set : {x : 1}}, {multi : false}));
-
- // Invalid extraction of exact _id from query
- assert.writeError(coll.update({}, {$set : {x : 1}}, {multi : false}));
- assert.writeError(coll.update({_id : {$gt : ObjectId()}}, {$set : {x : 1}}, {multi : false}));
- assert.writeError(coll.update({_id : {$in : [ObjectId()]}}, {$set : {x : 1}}, {multi : false}));
- assert.writeError(coll.update({$or : [{_id : ObjectId()}, {_id : ObjectId()}]},
- {$set : {x : 1}},
- {multi : false}));
- assert.writeError(coll.update({$and : [{_id : ObjectId()}, {_id : ObjectId()}]},
- {$set : {x : 1}},
- {multi : false}));
- assert.writeError(coll.update({'_id.x' : ObjectId()}, {$set : {x : 1}}, {multi : false}));
-
- // Make sure we can extract exact shard key from certain queries
- assert.writeOK(coll.update({key : ObjectId()}, {$set : {x : 1}}, {multi : false}));
- assert.writeOK(coll.update({key : {$eq : ObjectId()}}, {$set : {x : 1}}, {multi : false}));
- assert.writeOK(coll.update({key : {$all : [ObjectId()]}}, {$set : {x : 1}}, {multi : false}));
- assert.writeOK(coll.update({$or : [{key : ObjectId()}]}, {$set : {x : 1}}, {multi : false}));
- assert.writeOK(coll.update({$and : [{key : ObjectId()}]}, {$set : {x : 1}}, {multi : false}));
-
- // Invalid extraction of exact key from query
- assert.writeError(coll.update({}, {$set : {x : 1}}, {multi : false}));
- assert.writeError(coll.update({key : {$gt : ObjectId()}}, {$set : {x : 1}}, {multi : false}));
- assert.writeError(coll.update({key : {$in : [ObjectId()]}}, {$set : {x : 1}}, {multi : false}));
- assert.writeError(coll.update({$or : [{key : ObjectId()}, {key : ObjectId()}]},
- {$set : {x : 1}},
- {multi : false}));
- assert.writeError(coll.update({$and : [{key : ObjectId()}, {key : ObjectId()}]},
- {$set : {x : 1}},
- {multi : false}));
- assert.writeError(coll.update({'key.x' : ObjectId()}, {$set : {x : 1}}, {multi : false}));
-
- // Make sure failed shard key or _id extraction doesn't affect the other
- assert.writeOK(coll.update({'_id.x' : ObjectId(), key : 1}, {$set : {x : 1}}, {multi : false}));
- assert.writeOK(coll.update({_id : ObjectId(), 'key.x' : 1}, {$set : {x : 1}}, {multi : false}));
-}
-
-s.stop();
+ var s = new ShardingTest({name: "auto1", shards: 2, mongos: 1});
+
+ s.adminCommand({enablesharding: "test"});
+ s.ensurePrimaryShard('test', 'shard0001');
+
+ // repeat same tests with hashed shard key, to ensure identical behavior
+ s.adminCommand({shardcollection: "test.update0", key: {key: 1}});
+ s.adminCommand({shardcollection: "test.update1", key: {key: "hashed"}});
+
+ db = s.getDB("test");
+
+ for (i = 0; i < 2; i++) {
+ coll = db.getCollection("update" + i);
+
+ coll.insert({_id: 1, key: 1});
+
+ // these are both upserts
+ coll.save({_id: 2, key: 2});
+ coll.update({_id: 3, key: 3}, {$set: {foo: 'bar'}}, {upsert: true});
+
+ assert.eq(coll.count(), 3, "count A");
+ assert.eq(coll.findOne({_id: 3}).key, 3, "findOne 3 key A");
+ assert.eq(coll.findOne({_id: 3}).foo, 'bar', "findOne 3 foo A");
+
+ // update existing using save()
+ coll.save({_id: 1, key: 1, other: 1});
+
+ // update existing using update()
+ coll.update({_id: 2}, {key: 2, other: 2});
+ coll.update({_id: 3}, {key: 3, other: 3});
+
+ // do a replacement-style update which queries the shard key and keeps it constant
+ coll.save({_id: 4, key: 4});
+ coll.update({key: 4}, {key: 4, other: 4});
+ assert.eq(coll.find({key: 4, other: 4}).count(), 1, 'replacement update error');
+ coll.remove({_id: 4});
+
+ assert.eq(coll.count(), 3, "count B");
+ coll.find().forEach(function(x) {
+ assert.eq(x._id, x.key, "_id == key");
+ assert.eq(x._id, x.other, "_id == other");
+ });
+
+ assert.writeError(coll.update({_id: 1, key: 1}, {$set: {key: 2}}));
+ assert.eq(coll.findOne({_id: 1}).key, 1, 'key unchanged');
+
+ assert.writeOK(coll.update({_id: 1, key: 1}, {$set: {foo: 2}}));
+
+ coll.update({key: 17}, {$inc: {x: 5}}, true);
+ assert.eq(5, coll.findOne({key: 17}).x, "up1");
+
+ coll.update({key: 18}, {$inc: {x: 5}}, true, true);
+ assert.eq(5, coll.findOne({key: 18}).x, "up2");
+
+ // Make sure we can extract exact _id from certain queries
+ assert.writeOK(coll.update({_id: ObjectId()}, {$set: {x: 1}}, {multi: false}));
+ assert.writeOK(coll.update({_id: {$eq: ObjectId()}}, {$set: {x: 1}}, {multi: false}));
+ assert.writeOK(coll.update({_id: {$all: [ObjectId()]}}, {$set: {x: 1}}, {multi: false}));
+ assert.writeOK(coll.update({$or: [{_id: ObjectId()}]}, {$set: {x: 1}}, {multi: false}));
+ assert.writeOK(coll.update({$and: [{_id: ObjectId()}]}, {$set: {x: 1}}, {multi: false}));
+
+ // Invalid extraction of exact _id from query
+ assert.writeError(coll.update({}, {$set: {x: 1}}, {multi: false}));
+ assert.writeError(coll.update({_id: {$gt: ObjectId()}}, {$set: {x: 1}}, {multi: false}));
+ assert.writeError(coll.update({_id: {$in: [ObjectId()]}}, {$set: {x: 1}}, {multi: false}));
+ assert.writeError(coll.update(
+ {$or: [{_id: ObjectId()}, {_id: ObjectId()}]}, {$set: {x: 1}}, {multi: false}));
+ assert.writeError(coll.update(
+ {$and: [{_id: ObjectId()}, {_id: ObjectId()}]}, {$set: {x: 1}}, {multi: false}));
+ assert.writeError(coll.update({'_id.x': ObjectId()}, {$set: {x: 1}}, {multi: false}));
+
+ // Make sure we can extract exact shard key from certain queries
+ assert.writeOK(coll.update({key: ObjectId()}, {$set: {x: 1}}, {multi: false}));
+ assert.writeOK(coll.update({key: {$eq: ObjectId()}}, {$set: {x: 1}}, {multi: false}));
+ assert.writeOK(coll.update({key: {$all: [ObjectId()]}}, {$set: {x: 1}}, {multi: false}));
+ assert.writeOK(coll.update({$or: [{key: ObjectId()}]}, {$set: {x: 1}}, {multi: false}));
+ assert.writeOK(coll.update({$and: [{key: ObjectId()}]}, {$set: {x: 1}}, {multi: false}));
+
+ // Invalid extraction of exact key from query
+ assert.writeError(coll.update({}, {$set: {x: 1}}, {multi: false}));
+ assert.writeError(coll.update({key: {$gt: ObjectId()}}, {$set: {x: 1}}, {multi: false}));
+ assert.writeError(coll.update({key: {$in: [ObjectId()]}}, {$set: {x: 1}}, {multi: false}));
+ assert.writeError(coll.update(
+ {$or: [{key: ObjectId()}, {key: ObjectId()}]}, {$set: {x: 1}}, {multi: false}));
+ assert.writeError(coll.update(
+ {$and: [{key: ObjectId()}, {key: ObjectId()}]}, {$set: {x: 1}}, {multi: false}));
+ assert.writeError(coll.update({'key.x': ObjectId()}, {$set: {x: 1}}, {multi: false}));
+
+ // Make sure failed shard key or _id extraction doesn't affect the other
+ assert.writeOK(coll.update({'_id.x': ObjectId(), key: 1}, {$set: {x: 1}}, {multi: false}));
+ assert.writeOK(coll.update({_id: ObjectId(), 'key.x': 1}, {$set: {x: 1}}, {multi: false}));
+ }
+
+ s.stop();
})();
diff --git a/jstests/sharding/upsert_sharded.js b/jstests/sharding/upsert_sharded.js
index 7a31c350ef1..bf880df027f 100644
--- a/jstests/sharding/upsert_sharded.js
+++ b/jstests/sharding/upsert_sharded.js
@@ -3,105 +3,100 @@
// NOTE: Generic upsert behavior tests belong in the core suite
//
(function() {
-'use strict';
-
-var st = new ShardingTest({ shards : 2, mongos : 1 });
-
-var mongos = st.s0;
-var admin = mongos.getDB( "admin" );
-var shards = mongos.getCollection( "config.shards" ).find().toArray();
-var coll = mongos.getCollection( "foo.bar" );
-
-assert( admin.runCommand({ enableSharding : coll.getDB() + "" }).ok );
-st.ensurePrimaryShard(coll.getDB().getName(), 'shard0001');
-
-var upsertedResult = function(query, expr) {
- coll.remove({});
- return coll.update(query, expr, { upsert : true });
-};
-
-var upsertedField = function(query, expr, fieldName) {
- assert.writeOK(upsertedResult(query, expr));
- return coll.findOne()[fieldName];
-};
-
-var upsertedId = function(query, expr) {
- return upsertedField(query, expr, "_id");
-};
-
-var upsertedXVal = function(query, expr) {
- return upsertedField(query, expr, "x");
-};
-
-st.ensurePrimaryShard(coll.getDB() + "", shards[0]._id);
-assert.commandWorked(admin.runCommand({ shardCollection : coll + "", key : { x : 1 } }));
-assert.commandWorked(admin.runCommand({ split : coll + "", middle : { x : 0 } }));
-assert.commandWorked(admin.runCommand({ moveChunk : coll + "",
- find : { x : 0 },
- to : shards[1]._id,
- _waitForDelete : true }));
-
-st.printShardingStatus();
-
-// upserted update replacement would result in no shard key
-assert.writeError(upsertedResult({ x : 1 }, {}));
-
-// updates with upsert must contain shard key in query when $op style
-assert.eq(1, upsertedXVal({ x : 1 }, { $set : { a : 1 } }));
-assert.eq(1, upsertedXVal({ x : { $eq : 1 } }, { $set : { a : 1 } }));
-assert.eq(1, upsertedXVal({ x : { $all : [1] } }, { $set : { a : 1 } }));
-assert.eq(1, upsertedXVal({ $and : [{ x : { $eq : 1 } }] }, { $set : { a : 1 } }));
-assert.eq(1, upsertedXVal({ $or : [{ x : { $eq : 1 } }] }, { $set : { a : 1 } }));
-
-// shard key not extracted
-assert.writeError(upsertedResult({}, { $set : { a : 1, x : 1 } }));
-assert.writeError(upsertedResult({ x : { $gt : 1 } }, { $set : { a : 1, x : 1 } }));
-assert.writeError(upsertedResult({ x : { $in : [1] } }, { $set : { a : 1, x : 1 } }));
-
-// Shard key type errors
-assert.writeError(upsertedResult({ x : undefined }, { $set : { a : 1 } }));
-assert.writeError(upsertedResult({ x : [1, 2] }, { $set : { a : 1 } }));
-assert.writeError(upsertedResult({ x : { $eq : { $gt : 5 } } }, { $set : { a : 1 } }));
-// Regex shard key is not extracted from queries, even exact matches
-assert.writeError(upsertedResult({ x : { $eq : /abc/ } }, { $set : { a : 1 } }));
-
-// nested field extraction always fails with non-nested key - like _id, we require setting the
-// elements directly
-assert.writeError(upsertedResult({ "x.x" : 1 }, { $set : { a : 1 } }));
-assert.writeError(upsertedResult({ "x.x" : { $eq : 1 } }, { $set : { a : 1 } }));
-
-coll.drop();
-
-st.ensurePrimaryShard(coll.getDB() + "", shards[0]._id);
-assert.commandWorked(admin.runCommand({ shardCollection : coll + "", key : { 'x.x' : 1 } }));
-assert.commandWorked( admin.runCommand({ split : coll + "", middle : { 'x.x' : 0 } }));
-assert.commandWorked( admin.runCommand({ moveChunk : coll + "",
- find : { 'x.x' : 0 },
- to : shards[1]._id,
- _waitForDelete : true }));
-
-st.printShardingStatus();
-
-// nested field extraction with nested shard key
-assert.docEq({ x : 1 }, upsertedXVal({ "x.x" : 1 }, { $set : { a : 1 } }));
-assert.docEq({ x : 1 }, upsertedXVal({ "x.x" : { $eq : 1 } }, { $set : { a : 1 } }));
-assert.docEq({ x : 1 }, upsertedXVal({ "x.x" : { $all : [1] } }, { $set : { a : 1 } }));
-assert.docEq({ x : 1 }, upsertedXVal({ $and : [{ "x.x" : { $eq : 1 } }] }, { $set : { a : 1 } }));
-assert.docEq({ x : 1 }, upsertedXVal({ $or : [{ "x.x" : { $eq : 1 } }] }, { $set : { a : 1 } }));
-
-// Can specify siblings of nested shard keys
-assert.docEq({ x : 1, y : 1 }, upsertedXVal({ "x.x" : 1, "x.y" : 1 }, { $set : { a : 1 } }));
-assert.docEq({ x : 1, y : { z : 1 } },
- upsertedXVal({ "x.x" : 1, "x.y.z" : 1 }, { $set : { a : 1 } }));
-
-// No arrays at any level
-assert.writeError(upsertedResult({ "x.x" : [] }, { $set : { a : 1 } }));
-assert.writeError(upsertedResult({ x : { x : [] } }, { $set : { a : 1 } }));
-assert.writeError(upsertedResult({ x : [{ x : 1 }] }, { $set : { a : 1 } }));
-
-// Can't set sub-fields of nested key
-assert.writeError(upsertedResult({ "x.x.x" : { $eq : 1 } }, { $set : { a : 1 } }));
-
-st.stop();
+ 'use strict';
+
+ var st = new ShardingTest({shards: 2, mongos: 1});
+
+ var mongos = st.s0;
+ var admin = mongos.getDB("admin");
+ var shards = mongos.getCollection("config.shards").find().toArray();
+ var coll = mongos.getCollection("foo.bar");
+
+ assert(admin.runCommand({enableSharding: coll.getDB() + ""}).ok);
+ st.ensurePrimaryShard(coll.getDB().getName(), 'shard0001');
+
+ var upsertedResult = function(query, expr) {
+ coll.remove({});
+ return coll.update(query, expr, {upsert: true});
+ };
+
+ var upsertedField = function(query, expr, fieldName) {
+ assert.writeOK(upsertedResult(query, expr));
+ return coll.findOne()[fieldName];
+ };
+
+ var upsertedId = function(query, expr) {
+ return upsertedField(query, expr, "_id");
+ };
+
+ var upsertedXVal = function(query, expr) {
+ return upsertedField(query, expr, "x");
+ };
+
+ st.ensurePrimaryShard(coll.getDB() + "", shards[0]._id);
+ assert.commandWorked(admin.runCommand({shardCollection: coll + "", key: {x: 1}}));
+ assert.commandWorked(admin.runCommand({split: coll + "", middle: {x: 0}}));
+ assert.commandWorked(admin.runCommand(
+ {moveChunk: coll + "", find: {x: 0}, to: shards[1]._id, _waitForDelete: true}));
+
+ st.printShardingStatus();
+
+ // upserted update replacement would result in no shard key
+ assert.writeError(upsertedResult({x: 1}, {}));
+
+ // updates with upsert must contain shard key in query when $op style
+ assert.eq(1, upsertedXVal({x: 1}, {$set: {a: 1}}));
+ assert.eq(1, upsertedXVal({x: {$eq: 1}}, {$set: {a: 1}}));
+ assert.eq(1, upsertedXVal({x: {$all: [1]}}, {$set: {a: 1}}));
+ assert.eq(1, upsertedXVal({$and: [{x: {$eq: 1}}]}, {$set: {a: 1}}));
+ assert.eq(1, upsertedXVal({$or: [{x: {$eq: 1}}]}, {$set: {a: 1}}));
+
+ // shard key not extracted
+ assert.writeError(upsertedResult({}, {$set: {a: 1, x: 1}}));
+ assert.writeError(upsertedResult({x: {$gt: 1}}, {$set: {a: 1, x: 1}}));
+ assert.writeError(upsertedResult({x: {$in: [1]}}, {$set: {a: 1, x: 1}}));
+
+ // Shard key type errors
+ assert.writeError(upsertedResult({x: undefined}, {$set: {a: 1}}));
+ assert.writeError(upsertedResult({x: [1, 2]}, {$set: {a: 1}}));
+ assert.writeError(upsertedResult({x: {$eq: {$gt: 5}}}, {$set: {a: 1}}));
+ // Regex shard key is not extracted from queries, even exact matches
+ assert.writeError(upsertedResult({x: {$eq: /abc/}}, {$set: {a: 1}}));
+
+ // nested field extraction always fails with non-nested key - like _id, we require setting the
+ // elements directly
+ assert.writeError(upsertedResult({"x.x": 1}, {$set: {a: 1}}));
+ assert.writeError(upsertedResult({"x.x": {$eq: 1}}, {$set: {a: 1}}));
+
+ coll.drop();
+
+ st.ensurePrimaryShard(coll.getDB() + "", shards[0]._id);
+ assert.commandWorked(admin.runCommand({shardCollection: coll + "", key: {'x.x': 1}}));
+ assert.commandWorked(admin.runCommand({split: coll + "", middle: {'x.x': 0}}));
+ assert.commandWorked(admin.runCommand(
+ {moveChunk: coll + "", find: {'x.x': 0}, to: shards[1]._id, _waitForDelete: true}));
+
+ st.printShardingStatus();
+
+ // nested field extraction with nested shard key
+ assert.docEq({x: 1}, upsertedXVal({"x.x": 1}, {$set: {a: 1}}));
+ assert.docEq({x: 1}, upsertedXVal({"x.x": {$eq: 1}}, {$set: {a: 1}}));
+ assert.docEq({x: 1}, upsertedXVal({"x.x": {$all: [1]}}, {$set: {a: 1}}));
+ assert.docEq({x: 1}, upsertedXVal({$and: [{"x.x": {$eq: 1}}]}, {$set: {a: 1}}));
+ assert.docEq({x: 1}, upsertedXVal({$or: [{"x.x": {$eq: 1}}]}, {$set: {a: 1}}));
+
+ // Can specify siblings of nested shard keys
+ assert.docEq({x: 1, y: 1}, upsertedXVal({"x.x": 1, "x.y": 1}, {$set: {a: 1}}));
+ assert.docEq({x: 1, y: {z: 1}}, upsertedXVal({"x.x": 1, "x.y.z": 1}, {$set: {a: 1}}));
+
+ // No arrays at any level
+ assert.writeError(upsertedResult({"x.x": []}, {$set: {a: 1}}));
+ assert.writeError(upsertedResult({x: {x: []}}, {$set: {a: 1}}));
+ assert.writeError(upsertedResult({x: [{x: 1}]}, {$set: {a: 1}}));
+
+ // Can't set sub-fields of nested key
+ assert.writeError(upsertedResult({"x.x.x": {$eq: 1}}, {$set: {a: 1}}));
+
+ st.stop();
})();
diff --git a/jstests/sharding/user_flags_sharded.js b/jstests/sharding/user_flags_sharded.js
index 1ceadd1b2fd..f629c3b759f 100644
--- a/jstests/sharding/user_flags_sharded.js
+++ b/jstests/sharding/user_flags_sharded.js
@@ -2,64 +2,63 @@
// then collection is sharded, flags get carried over.
(function() {
-if (jsTest.options().storageEngine === "mmapv1") {
- // the dbname and collection we'll be working with
- var dbname = "testDB";
- var coll = "userFlagsColl";
- var ns = dbname + "." + coll;
+ if (jsTest.options().storageEngine === "mmapv1") {
+ // the dbname and collection we'll be working with
+ var dbname = "testDB";
+ var coll = "userFlagsColl";
+ var ns = dbname + "." + coll;
- // First create fresh collection on a new standalone mongod
- var newShardConn = MongoRunner.runMongod({});
- var db1 = newShardConn.getDB( dbname );
- var t = db1.getCollection( coll );
- print(t);
- db1.getCollection( coll ).drop(); //in case collection already existed
- db1.createCollection( coll );
+ // First create fresh collection on a new standalone mongod
+ var newShardConn = MongoRunner.runMongod({});
+ var db1 = newShardConn.getDB(dbname);
+ var t = db1.getCollection(coll);
+ print(t);
+ db1.getCollection(coll).drop(); // in case collection already existed
+ db1.createCollection(coll);
- // Then verify the new collection has userFlags set to 0
- var collstats = db1.getCollection( coll ).stats();
- print( "*************** Fresh Collection Stats ************" );
- printjson( collstats );
- assert.eq( collstats.userFlags , 1 , "fresh collection doesn't have userFlags = 1 ");
+ // Then verify the new collection has userFlags set to 0
+ var collstats = db1.getCollection(coll).stats();
+ print("*************** Fresh Collection Stats ************");
+ printjson(collstats);
+ assert.eq(collstats.userFlags, 1, "fresh collection doesn't have userFlags = 1 ");
- // Now we modify the collection with the usePowerOf2Sizes flag
- var res = db1.runCommand( { "collMod" : coll , "usePowerOf2Sizes" : false } );
- assert.eq( res.ok , 1 , "collMod failed" );
+ // Now we modify the collection with the usePowerOf2Sizes flag
+ var res = db1.runCommand({"collMod": coll, "usePowerOf2Sizes": false});
+ assert.eq(res.ok, 1, "collMod failed");
- // and insert some stuff, for the hell of it
- var numdocs = 20;
- for( i=0; i < numdocs; i++){
- assert.writeOK(db1.getCollection( coll ).insert({ _id : i }));
- }
+ // and insert some stuff, for the hell of it
+ var numdocs = 20;
+ for (i = 0; i < numdocs; i++) {
+ assert.writeOK(db1.getCollection(coll).insert({_id: i}));
+ }
- // Next verify that userFlags has changed to 0
- collstats = db1.getCollection( coll ).stats();
- print( "*************** Collection Stats After CollMod ************" );
- printjson( collstats );
- assert.eq( collstats.userFlags , 0 , "modified collection should have userFlags = 0 ");
+ // Next verify that userFlags has changed to 0
+ collstats = db1.getCollection(coll).stats();
+ print("*************** Collection Stats After CollMod ************");
+ printjson(collstats);
+ assert.eq(collstats.userFlags, 0, "modified collection should have userFlags = 0 ");
- // start up a new sharded cluster, and add previous mongod
- var s = new ShardingTest({ name: "user_flags", shards: 1 });
- assert( s.admin.runCommand( { addshard: newShardConn.host , name: "myShard" } ).ok,
- "did not accept new shard" );
+ // start up a new sharded cluster, and add previous mongod
+ var s = new ShardingTest({name: "user_flags", shards: 1});
+ assert(s.admin.runCommand({addshard: newShardConn.host, name: "myShard"}).ok,
+ "did not accept new shard");
- // enable sharding of the collection. Only 1 chunk initially, so move it to
- // other shard to create the collection on that shard
- s.adminCommand( { enablesharding : dbname } );
- s.adminCommand( { shardcollection : ns , key: { _id : 1 } } );
- s.adminCommand({ moveChunk: ns, find: { _id: 1 },
- to: "shard0000", _waitForDelete: true });
+ // enable sharding of the collection. Only 1 chunk initially, so move it to
+ // other shard to create the collection on that shard
+ s.adminCommand({enablesharding: dbname});
+ s.adminCommand({shardcollection: ns, key: {_id: 1}});
+ s.adminCommand({moveChunk: ns, find: {_id: 1}, to: "shard0000", _waitForDelete: true});
- print( "*************** Collection Stats On Other Shard ************" );
- var shard2 = s._connections[0].getDB( dbname );
- shard2stats = shard2.getCollection( coll ).stats();
- printjson( shard2stats );
+ print("*************** Collection Stats On Other Shard ************");
+ var shard2 = s._connections[0].getDB(dbname);
+ shard2stats = shard2.getCollection(coll).stats();
+ printjson(shard2stats);
- assert.eq( shard2stats.count , numdocs , "moveChunk didn't succeed" );
- assert.eq( shard2stats.userFlags , 0 , "new shard should also have userFlags = 0 ");
+ assert.eq(shard2stats.count, numdocs, "moveChunk didn't succeed");
+ assert.eq(shard2stats.userFlags, 0, "new shard should also have userFlags = 0 ");
- MongoRunner.stopMongod(newShardConn);
- s.stop();
-}
+ MongoRunner.stopMongod(newShardConn);
+ s.stop();
+ }
})();
diff --git a/jstests/sharding/version1.js b/jstests/sharding/version1.js
index c79d645b0fc..0e15e6180b1 100644
--- a/jstests/sharding/version1.js
+++ b/jstests/sharding/version1.js
@@ -1,77 +1,93 @@
(function() {
-var s = new ShardingTest({ name: "version1", shards: 1 });
-
-s.adminCommand( { enablesharding : "alleyinsider" } );
-s.adminCommand( { shardcollection : "alleyinsider.foo" , key : { num : 1 } } );
-
-// alleyinsider.foo is supposed to have one chunk, version 1|0, in shard000
-s.printShardingStatus();
-
-a = s._connections[0].getDB( "admin" );
-
-assert.commandFailed(a.runCommand({ setShardVersion: "alleyinsider.foo", configdb: s._configDB }));
-
-assert.commandFailed(a.runCommand({ setShardVersion: "alleyinsider.foo",
- configdb: s._configDB,
- version: "a" }));
-
-assert.commandFailed(a.runCommand({ setShardVersion: "alleyinsider.foo",
- configdb: s._configDB,
- authoritative: true }));
-
-assert.commandFailed(a.runCommand({ setShardVersion: "alleyinsider.foo",
- configdb: s._configDB,
- version: new Timestamp(2, 0)}),
- "should have failed b/c no auth" );
-
-assert.commandFailed(a.runCommand({ setShardVersion: "alleyinsider.foo",
- configdb: s._configDB,
- version: new Timestamp(2, 0),
- authoritative: true }),
- "should have failed because first setShardVersion needs shard info");
-
-assert.commandFailed(a.runCommand({ setShardVersion: "alleyinsider.foo",
- configdb: s._configDB,
- version: new Timestamp(2, 0),
- authoritative: true,
- shard: "shard0000",
- shardHost: s.s.host }),
- "should have failed because version is config is 1|0");
-
-var epoch = s.getDB('config').chunks.findOne().lastmodEpoch;
-assert.commandWorked( a.runCommand({ setShardVersion: "alleyinsider.foo",
- configdb: s._configDB,
- version: new Timestamp(1, 0),
- versionEpoch: epoch,
- authoritative: true,
- shard: "shard0000",
- shardHost: s.s.host }),
- "should have worked" );
-
-assert.commandFailed(a.runCommand({ setShardVersion: "alleyinsider.foo",
- configdb: "a",
- version: new Timestamp(0, 2),
- versionEpoch: epoch }));
-
-assert.commandFailed(a.runCommand({ setShardVersion: "alleyinsider.foo",
- configdb: s._configDB,
- version: new Timestamp(0, 2),
- versionEpoch: epoch }));
-
-assert.commandFailed(a.runCommand({ setShardVersion: "alleyinsider.foo",
- configdb: s._configDB,
- version: new Timestamp(0, 1),
- versionEpoch: epoch }));
-
-// the only way that setSharVersion passes is if the shard agrees with the version
-// the shard takes its version from config directly
-// TODO bump timestamps in config
-// assert.eq( a.runCommand( { "setShardVersion" : "alleyinsider.foo" , configdb : s._configDB , version : 3 } ).oldVersion.i , 2 , "oldVersion" );
-
-// assert.eq( a.runCommand( { "getShardVersion" : "alleyinsider.foo" } ).mine.i , 3 , "my get version A" );
-// assert.eq( a.runCommand( { "getShardVersion" : "alleyinsider.foo" } ).global.i , 3 , "my get version B" );
-
-s.stop();
+ var s = new ShardingTest({name: "version1", shards: 1});
+
+ s.adminCommand({enablesharding: "alleyinsider"});
+ s.adminCommand({shardcollection: "alleyinsider.foo", key: {num: 1}});
+
+ // alleyinsider.foo is supposed to have one chunk, version 1|0, in shard000
+ s.printShardingStatus();
+
+ a = s._connections[0].getDB("admin");
+
+ assert.commandFailed(
+ a.runCommand({setShardVersion: "alleyinsider.foo", configdb: s._configDB}));
+
+ assert.commandFailed(
+ a.runCommand({setShardVersion: "alleyinsider.foo", configdb: s._configDB, version: "a"}));
+
+ assert.commandFailed(a.runCommand(
+ {setShardVersion: "alleyinsider.foo", configdb: s._configDB, authoritative: true}));
+
+ assert.commandFailed(a.runCommand({
+ setShardVersion: "alleyinsider.foo",
+ configdb: s._configDB,
+ version: new Timestamp(2, 0)
+ }),
+ "should have failed b/c no auth");
+
+ assert.commandFailed(a.runCommand({
+ setShardVersion: "alleyinsider.foo",
+ configdb: s._configDB,
+ version: new Timestamp(2, 0),
+ authoritative: true
+ }),
+ "should have failed because first setShardVersion needs shard info");
+
+ assert.commandFailed(a.runCommand({
+ setShardVersion: "alleyinsider.foo",
+ configdb: s._configDB,
+ version: new Timestamp(2, 0),
+ authoritative: true,
+ shard: "shard0000",
+ shardHost: s.s.host
+ }),
+ "should have failed because version is config is 1|0");
+
+ var epoch = s.getDB('config').chunks.findOne().lastmodEpoch;
+ assert.commandWorked(a.runCommand({
+ setShardVersion: "alleyinsider.foo",
+ configdb: s._configDB,
+ version: new Timestamp(1, 0),
+ versionEpoch: epoch,
+ authoritative: true,
+ shard: "shard0000",
+ shardHost: s.s.host
+ }),
+ "should have worked");
+
+ assert.commandFailed(a.runCommand({
+ setShardVersion: "alleyinsider.foo",
+ configdb: "a",
+ version: new Timestamp(0, 2),
+ versionEpoch: epoch
+ }));
+
+ assert.commandFailed(a.runCommand({
+ setShardVersion: "alleyinsider.foo",
+ configdb: s._configDB,
+ version: new Timestamp(0, 2),
+ versionEpoch: epoch
+ }));
+
+ assert.commandFailed(a.runCommand({
+ setShardVersion: "alleyinsider.foo",
+ configdb: s._configDB,
+ version: new Timestamp(0, 1),
+ versionEpoch: epoch
+ }));
+
+ // the only way that setSharVersion passes is if the shard agrees with the version
+ // the shard takes its version from config directly
+ // TODO bump timestamps in config
+ // assert.eq( a.runCommand( { "setShardVersion" : "alleyinsider.foo" , configdb : s._configDB ,
+ // version : 3 } ).oldVersion.i , 2 , "oldVersion" );
+
+ // assert.eq( a.runCommand( { "getShardVersion" : "alleyinsider.foo" } ).mine.i , 3 , "my get
+ // version A" );
+ // assert.eq( a.runCommand( { "getShardVersion" : "alleyinsider.foo" } ).global.i , 3 , "my get
+ // version B" );
+
+ s.stop();
})();
diff --git a/jstests/sharding/version2.js b/jstests/sharding/version2.js
index 0bf8c5892b5..6bdc4601206 100644
--- a/jstests/sharding/version2.js
+++ b/jstests/sharding/version2.js
@@ -1,21 +1,22 @@
(function() {
-'use strict';
+ 'use strict';
-var s = new ShardingTest({ name: "version2", shards: 1 });
+ var s = new ShardingTest({name: "version2", shards: 1});
-assert.commandWorked(s.s0.adminCommand({ enablesharding: "alleyinsider" }));
-assert.commandWorked(s.s0.adminCommand({ shardcollection: "alleyinsider.foo", key: { num: 1 } }));
-assert.commandWorked(s.s0.adminCommand({ shardcollection: "alleyinsider.bar", key: { num: 1 } }));
+ assert.commandWorked(s.s0.adminCommand({enablesharding: "alleyinsider"}));
+ assert.commandWorked(s.s0.adminCommand({shardcollection: "alleyinsider.foo", key: {num: 1}}));
+ assert.commandWorked(s.s0.adminCommand({shardcollection: "alleyinsider.bar", key: {num: 1}}));
-var a = s._connections[0].getDB("admin");
+ var a = s._connections[0].getDB("admin");
-// Setup from one client
-assert.eq(a.runCommand({ "getShardVersion": "alleyinsider.foo", configdb: s._configDB }).mine.i, 0);
-assert.eq(a.runCommand({ "getShardVersion": "alleyinsider.foo", configdb: s._configDB }).global.i, 0);
+ // Setup from one client
+ assert.eq(a.runCommand({"getShardVersion": "alleyinsider.foo", configdb: s._configDB}).mine.i,
+ 0);
+ assert.eq(a.runCommand({"getShardVersion": "alleyinsider.foo", configdb: s._configDB}).global.i,
+ 0);
-var fooEpoch = s.getDB('config').chunks.findOne({ ns: 'alleyinsider.foo' }).lastmodEpoch;
-assert.commandWorked(
- a.runCommand({
+ var fooEpoch = s.getDB('config').chunks.findOne({ns: 'alleyinsider.foo'}).lastmodEpoch;
+ assert.commandWorked(a.runCommand({
setShardVersion: "alleyinsider.foo",
configdb: s._configDB,
authoritative: true,
@@ -25,44 +26,54 @@ assert.commandWorked(
shardHost: s.s.host,
}));
-printjson(s.config.chunks.findOne());
+ printjson(s.config.chunks.findOne());
-assert.eq(a.runCommand({ "getShardVersion": "alleyinsider.foo", configdb: s._configDB }).mine.t, 1);
-assert.eq(a.runCommand({ "getShardVersion": "alleyinsider.foo", configdb: s._configDB }).global.t, 1);
+ assert.eq(a.runCommand({"getShardVersion": "alleyinsider.foo", configdb: s._configDB}).mine.t,
+ 1);
+ assert.eq(a.runCommand({"getShardVersion": "alleyinsider.foo", configdb: s._configDB}).global.t,
+ 1);
-// From a different client
-var a2 = connect(s._connections[0].name + "/admin");
+ // From a different client
+ var a2 = connect(s._connections[0].name + "/admin");
-assert.eq(a2.runCommand({ "getShardVersion": "alleyinsider.foo", configdb: s._configDB }).global.t, 1, "a2 global 1");
-assert.eq(a2.runCommand({ "getShardVersion": "alleyinsider.foo", configdb: s._configDB }).mine.i, 0, "a2 mine 1");
+ assert.eq(
+ a2.runCommand({"getShardVersion": "alleyinsider.foo", configdb: s._configDB}).global.t,
+ 1,
+ "a2 global 1");
+ assert.eq(a2.runCommand({"getShardVersion": "alleyinsider.foo", configdb: s._configDB}).mine.i,
+ 0,
+ "a2 mine 1");
-function simpleFindOne(){
- return a2.getMongo().getDB("alleyinsider").foo.findOne();
-}
+ function simpleFindOne() {
+ return a2.getMongo().getDB("alleyinsider").foo.findOne();
+ }
-var barEpoch = s.getDB('config').chunks.findOne({ ns: 'alleyinsider.bar' }).lastmodEpoch;
-assert.commandWorked(a2.runCommand({ setShardVersion: "alleyinsider.bar",
- configdb: s._configDB,
- version: new Timestamp(1, 0),
- versionEpoch: barEpoch,
- shard: 'shard0000',
- authoritative: true }),
- "setShardVersion bar temp");
-
-assert.throws(simpleFindOne, [], "should complain about not in sharded mode 1");
+ var barEpoch = s.getDB('config').chunks.findOne({ns: 'alleyinsider.bar'}).lastmodEpoch;
+ assert.commandWorked(a2.runCommand({
+ setShardVersion: "alleyinsider.bar",
+ configdb: s._configDB,
+ version: new Timestamp(1, 0),
+ versionEpoch: barEpoch,
+ shard: 'shard0000',
+ authoritative: true
+ }),
+ "setShardVersion bar temp");
+ assert.throws(simpleFindOne, [], "should complain about not in sharded mode 1");
-// the only way that setSharVersion passes is if the shard agrees with the version
-// the shard takes its version from config directly
-// TODO bump timestamps in config
-// assert(a2.runCommand({ "setShardVersion": "alleyinsider.foo", configdb: s._configDB, version: 2 }).ok == 1, "setShardVersion a2-1");
+ // the only way that setSharVersion passes is if the shard agrees with the version
+ // the shard takes its version from config directly
+ // TODO bump timestamps in config
+ // assert(a2.runCommand({ "setShardVersion": "alleyinsider.foo", configdb: s._configDB, version:
+ // 2 }).ok == 1, "setShardVersion a2-1");
-// simpleFindOne(); // now should run ok
+ // simpleFindOne(); // now should run ok
-// assert(a2.runCommand({ "setShardVersion": "alleyinsider.foo", configdb: s._configDB, version: 3 }).ok == 1, "setShardVersion a2-2");
+ // assert(a2.runCommand({ "setShardVersion": "alleyinsider.foo", configdb: s._configDB, version:
+ // 3 }).ok == 1, "setShardVersion a2-2");
-// simpleFindOne(); // newer version is ok
+ // simpleFindOne(); // newer version is ok
-s.stop();
+ s.stop();
})();
diff --git a/jstests/sharding/write_cmd_auto_split.js b/jstests/sharding/write_cmd_auto_split.js
index dc8abc71597..110fa7ddd9f 100644
--- a/jstests/sharding/write_cmd_auto_split.js
+++ b/jstests/sharding/write_cmd_auto_split.js
@@ -2,154 +2,157 @@
* Tests the auto split will be triggered when using write commands.
*/
(function() {
-'use strict';
+ 'use strict';
-var st = new ShardingTest({ shards: 1, other: { chunkSize: 1 }});
+ var st = new ShardingTest({shards: 1, other: {chunkSize: 1}});
-var configDB = st.s.getDB('config');
-assert.commandWorked(configDB.adminCommand({ enableSharding: 'test' }));
-assert.commandWorked(configDB.adminCommand({ shardCollection: 'test.insert', key: { x: 1 }}));
+ var configDB = st.s.getDB('config');
+ assert.commandWorked(configDB.adminCommand({enableSharding: 'test'}));
+ assert.commandWorked(configDB.adminCommand({shardCollection: 'test.insert', key: {x: 1}}));
-var doc1k = (new Array(1024)).join('x');
-var testDB = st.s.getDB('test');
+ var doc1k = (new Array(1024)).join('x');
+ var testDB = st.s.getDB('test');
-jsTest.log('Test single batch insert should auto-split');
+ jsTest.log('Test single batch insert should auto-split');
-assert.eq(1, configDB.chunks.find().itcount());
+ assert.eq(1, configDB.chunks.find().itcount());
-// Note: Estimated 'chunk size' tracked by mongos is initialized with a random value so
-// we are going to be conservative.
-for (var x = 0; x < 3100; x++) {
- var res = testDB.runCommand({ insert: 'insert',
- documents: [{ x: x, v: doc1k }],
- ordered: false,
- writeConcern: { w: 1 }});
+ // Note: Estimated 'chunk size' tracked by mongos is initialized with a random value so
+ // we are going to be conservative.
+ for (var x = 0; x < 3100; x++) {
+ var res = testDB.runCommand({
+ insert: 'insert',
+ documents: [{x: x, v: doc1k}],
+ ordered: false,
+ writeConcern: {w: 1}
+ });
- assert(res.ok, 'insert failed: ' + tojson(res));
-}
+ assert(res.ok, 'insert failed: ' + tojson(res));
+ }
-// Inserted batch is a multiple of the chunkSize, expect the chunks to split into
-// more than 2.
-assert.gt(configDB.chunks.find().itcount(), 2);
-testDB.dropDatabase();
+ // Inserted batch is a multiple of the chunkSize, expect the chunks to split into
+ // more than 2.
+ assert.gt(configDB.chunks.find().itcount(), 2);
+ testDB.dropDatabase();
-jsTest.log('Test single batch update should auto-split');
+ jsTest.log('Test single batch update should auto-split');
-configDB.adminCommand({ enableSharding: 'test' });
-configDB.adminCommand({ shardCollection: 'test.update', key: { x: 1 }});
+ configDB.adminCommand({enableSharding: 'test'});
+ configDB.adminCommand({shardCollection: 'test.update', key: {x: 1}});
-assert.eq(1, configDB.chunks.find().itcount());
+ assert.eq(1, configDB.chunks.find().itcount());
-for (var x = 0; x < 1100; x++) {
- var res = testDB.runCommand({ update: 'update',
- updates: [{ q: { x: x }, u: { x: x, v: doc1k }, upsert: true }],
- ordered: false,
- writeConcern: { w: 1 }});
+ for (var x = 0; x < 1100; x++) {
+ var res = testDB.runCommand({
+ update: 'update',
+ updates: [{q: {x: x}, u: {x: x, v: doc1k}, upsert: true}],
+ ordered: false,
+ writeConcern: {w: 1}
+ });
- assert(res.ok, 'update failed: ' + tojson(res));
-}
+ assert(res.ok, 'update failed: ' + tojson(res));
+ }
-assert.gt(configDB.chunks.find().itcount(), 1);
-testDB.dropDatabase();
+ assert.gt(configDB.chunks.find().itcount(), 1);
+ testDB.dropDatabase();
-jsTest.log('Test single delete should not auto-split');
+ jsTest.log('Test single delete should not auto-split');
-configDB.adminCommand({ enableSharding: 'test' });
-configDB.adminCommand({ shardCollection: 'test.delete', key: { x: 1 }});
+ configDB.adminCommand({enableSharding: 'test'});
+ configDB.adminCommand({shardCollection: 'test.delete', key: {x: 1}});
-assert.eq(1, configDB.chunks.find().itcount());
+ assert.eq(1, configDB.chunks.find().itcount());
-for (var x = 0; x < 1100; x++) {
- var res = testDB.runCommand({ delete: 'delete',
- deletes: [{ q: { x: x, v: doc1k }, limit : NumberInt(0) }],
- ordered: false,
- writeConcern: { w: 1 }});
+ for (var x = 0; x < 1100; x++) {
+ var res = testDB.runCommand({
+ delete: 'delete',
+ deletes: [{q: {x: x, v: doc1k}, limit: NumberInt(0)}],
+ ordered: false,
+ writeConcern: {w: 1}
+ });
- assert(res.ok, 'delete failed: ' + tojson(res));
-}
+ assert(res.ok, 'delete failed: ' + tojson(res));
+ }
-assert.eq(1, configDB.chunks.find().itcount());
-testDB.dropDatabase();
+ assert.eq(1, configDB.chunks.find().itcount());
+ testDB.dropDatabase();
-jsTest.log('Test batched insert should auto-split');
+ jsTest.log('Test batched insert should auto-split');
-configDB.adminCommand({ enableSharding: 'test' });
-configDB.adminCommand({ shardCollection: 'test.insert', key: { x: 1 }});
+ configDB.adminCommand({enableSharding: 'test'});
+ configDB.adminCommand({shardCollection: 'test.insert', key: {x: 1}});
-assert.eq(1, configDB.chunks.find().itcount());
+ assert.eq(1, configDB.chunks.find().itcount());
-// Note: Estimated 'chunk size' tracked by mongos is initialized with a random value so
-// we are going to be conservative.
-for (var x = 0; x < 1100; x += 400) {
- var docs = [];
+ // Note: Estimated 'chunk size' tracked by mongos is initialized with a random value so
+ // we are going to be conservative.
+ for (var x = 0; x < 1100; x += 400) {
+ var docs = [];
- for (var y = 0; y < 400; y++) {
- docs.push({ x: (x + y), v: doc1k });
- }
+ for (var y = 0; y < 400; y++) {
+ docs.push({x: (x + y), v: doc1k});
+ }
- var res = testDB.runCommand({ insert: 'insert',
- documents: docs,
- ordered: false,
- writeConcern: { w: 1 }});
+ var res = testDB.runCommand(
+ {insert: 'insert', documents: docs, ordered: false, writeConcern: {w: 1}});
+ assert(res.ok, 'insert failed: ' + tojson(res));
+ }
+
+ assert.gt(configDB.chunks.find().itcount(), 1);
+ testDB.dropDatabase();
- assert(res.ok, 'insert failed: ' + tojson(res));
-}
+ jsTest.log('Test batched update should auto-split');
-assert.gt(configDB.chunks.find().itcount(), 1);
-testDB.dropDatabase();
+ configDB.adminCommand({enableSharding: 'test'});
+ configDB.adminCommand({shardCollection: 'test.update', key: {x: 1}});
-jsTest.log('Test batched update should auto-split');
+ assert.eq(1, configDB.chunks.find().itcount());
-configDB.adminCommand({ enableSharding: 'test' });
-configDB.adminCommand({ shardCollection: 'test.update', key: { x: 1 }});
+ for (var x = 0; x < 1100; x += 400) {
+ var docs = [];
-assert.eq(1, configDB.chunks.find().itcount());
+ for (var y = 0; y < 400; y++) {
+ var id = x + y;
+ docs.push({q: {x: id}, u: {x: id, v: doc1k}, upsert: true});
+ }
-for (var x = 0; x < 1100; x += 400) {
- var docs = [];
+ var res = testDB.runCommand(
+ {update: 'update', updates: docs, ordered: false, writeConcern: {w: 1}});
- for (var y = 0; y < 400; y++) {
- var id = x + y;
- docs.push({ q: { x: id }, u: { x: id, v: doc1k }, upsert: true });
+ assert(res.ok, 'update failed: ' + tojson(res));
}
- var res = testDB.runCommand({ update: 'update',
- updates: docs,
- ordered: false,
- writeConcern: { w: 1 }});
+ assert.gt(configDB.chunks.find().itcount(), 1);
+ testDB.dropDatabase();
- assert(res.ok, 'update failed: ' + tojson(res));
-}
+ jsTest.log('Test batched delete should not auto-split');
-assert.gt(configDB.chunks.find().itcount(), 1);
-testDB.dropDatabase();
+ configDB.adminCommand({enableSharding: 'test'});
+ configDB.adminCommand({shardCollection: 'test.delete', key: {x: 1}});
-jsTest.log('Test batched delete should not auto-split');
+ assert.eq(1, configDB.chunks.find().itcount());
-configDB.adminCommand({ enableSharding: 'test' });
-configDB.adminCommand({ shardCollection: 'test.delete', key: { x: 1 }});
+ for (var x = 0; x < 1100; x += 400) {
+ var docs = [];
-assert.eq(1, configDB.chunks.find().itcount());
+ for (var y = 0; y < 400; y++) {
+ var id = x + y;
+ docs.push({q: {x: id, v: doc1k}, top: 0});
+ }
-for (var x = 0; x < 1100; x += 400) {
- var docs = [];
+ var res = testDB.runCommand({
+ delete: 'delete',
+ deletes: [{q: {x: x, v: doc1k}, limit: NumberInt(0)}],
+ ordered: false,
+ writeConcern: {w: 1}
+ });
- for (var y = 0; y < 400; y++) {
- var id = x + y;
- docs.push({ q: { x: id, v: doc1k }, top: 0 });
+ assert(res.ok, 'delete failed: ' + tojson(res));
}
- var res = testDB.runCommand({ delete: 'delete',
- deletes: [{ q: { x: x, v: doc1k }, limit : NumberInt(0) }],
- ordered: false,
- writeConcern: { w: 1 }});
-
- assert(res.ok, 'delete failed: ' + tojson(res));
-}
-
-assert.eq(1, configDB.chunks.find().itcount());
+ assert.eq(1, configDB.chunks.find().itcount());
-st.stop();
+ st.stop();
})();
diff --git a/jstests/sharding/write_commands_sharding_state.js b/jstests/sharding/write_commands_sharding_state.js
index 7d0991870eb..ee4bf78958e 100644
--- a/jstests/sharding/write_commands_sharding_state.js
+++ b/jstests/sharding/write_commands_sharding_state.js
@@ -3,79 +3,80 @@
// @tags: [requires_persistence]
(function() {
-'use strict';
+ 'use strict';
-var st = new ShardingTest({name: "write_commands", mongos: 2, shards: 2 });
+ var st = new ShardingTest({name: "write_commands", mongos: 2, shards: 2});
-var dbTestName = 'WriteCommandsTestDB';
+ var dbTestName = 'WriteCommandsTestDB';
-assert.commandWorked(st.s0.adminCommand({ enablesharding: dbTestName }));
-st.ensurePrimaryShard(dbTestName, 'shard0000');
+ assert.commandWorked(st.s0.adminCommand({enablesharding: dbTestName}));
+ st.ensurePrimaryShard(dbTestName, 'shard0000');
-assert.commandWorked(st.s0.adminCommand({ shardCollection: dbTestName + '.TestColl',
- key: { Key: 1 },
- unique: true }));
+ assert.commandWorked(st.s0.adminCommand(
+ {shardCollection: dbTestName + '.TestColl', key: {Key: 1}, unique: true}));
-// Split at keys 10 and 20
-assert.commandWorked(st.s0.adminCommand({ split: dbTestName + '.TestColl', middle: { Key: 10 } }));
-assert.commandWorked(st.s0.adminCommand({ split: dbTestName + '.TestColl', middle: { Key: 20 } }));
+ // Split at keys 10 and 20
+ assert.commandWorked(st.s0.adminCommand({split: dbTestName + '.TestColl', middle: {Key: 10}}));
+ assert.commandWorked(st.s0.adminCommand({split: dbTestName + '.TestColl', middle: {Key: 20}}));
-printjson(st.config.getSiblingDB('config').chunks.find().toArray());
+ printjson(st.config.getSiblingDB('config').chunks.find().toArray());
-// Move < 10 to shard0000, 10 and 20 to shard00001
-st.s0.adminCommand({ moveChunk: dbTestName + '.TestColl', find: { Key: 0 }, to: 'shard0000' });
-st.s0.adminCommand({ moveChunk: dbTestName + '.TestColl', find: { Key: 19 }, to: 'shard0001' });
-st.s0.adminCommand({ moveChunk: dbTestName + '.TestColl', find: { Key: 21 }, to: 'shard0001' });
+ // Move < 10 to shard0000, 10 and 20 to shard00001
+ st.s0.adminCommand({moveChunk: dbTestName + '.TestColl', find: {Key: 0}, to: 'shard0000'});
+ st.s0.adminCommand({moveChunk: dbTestName + '.TestColl', find: {Key: 19}, to: 'shard0001'});
+ st.s0.adminCommand({moveChunk: dbTestName + '.TestColl', find: {Key: 21}, to: 'shard0001'});
-printjson(st.config.getSiblingDB('config').chunks.find().toArray());
+ printjson(st.config.getSiblingDB('config').chunks.find().toArray());
-// Insert one document in each chunk, which we will use to change
-assert(st.s1.getDB(dbTestName).TestColl.insert({ Key: 1 }));
-assert(st.s1.getDB(dbTestName).TestColl.insert({ Key: 11 }));
-assert(st.s1.getDB(dbTestName).TestColl.insert({ Key: 21 }));
+ // Insert one document in each chunk, which we will use to change
+ assert(st.s1.getDB(dbTestName).TestColl.insert({Key: 1}));
+ assert(st.s1.getDB(dbTestName).TestColl.insert({Key: 11}));
+ assert(st.s1.getDB(dbTestName).TestColl.insert({Key: 21}));
-// Make sure the documents are correctly placed
-printjson(st.d0.getDB(dbTestName).TestColl.find().toArray());
-printjson(st.d1.getDB(dbTestName).TestColl.find().toArray());
+ // Make sure the documents are correctly placed
+ printjson(st.d0.getDB(dbTestName).TestColl.find().toArray());
+ printjson(st.d1.getDB(dbTestName).TestColl.find().toArray());
-assert.eq(1, st.d0.getDB(dbTestName).TestColl.count());
-assert.eq(2, st.d1.getDB(dbTestName).TestColl.count());
+ assert.eq(1, st.d0.getDB(dbTestName).TestColl.count());
+ assert.eq(2, st.d1.getDB(dbTestName).TestColl.count());
-assert.eq(1, st.d0.getDB(dbTestName).TestColl.find({ Key: 1 }).count());
-assert.eq(1, st.d1.getDB(dbTestName).TestColl.find({ Key: 11 }).count());
-assert.eq(1, st.d1.getDB(dbTestName).TestColl.find({ Key: 21 }).count());
+ assert.eq(1, st.d0.getDB(dbTestName).TestColl.find({Key: 1}).count());
+ assert.eq(1, st.d1.getDB(dbTestName).TestColl.find({Key: 11}).count());
+ assert.eq(1, st.d1.getDB(dbTestName).TestColl.find({Key: 21}).count());
-// Move chunk [0, 19] to shard0000 and make sure the documents are correctly placed
-st.s0.adminCommand({ moveChunk: dbTestName + '.TestColl', find: { Key: 19 }, to: 'shard0000' });
+ // Move chunk [0, 19] to shard0000 and make sure the documents are correctly placed
+ st.s0.adminCommand({moveChunk: dbTestName + '.TestColl', find: {Key: 19}, to: 'shard0000'});
-printjson(st.config.getSiblingDB('config').chunks.find().toArray());
-printjson(st.d0.getDB(dbTestName).TestColl.find({}).toArray());
-printjson(st.d1.getDB(dbTestName).TestColl.find({}).toArray());
+ printjson(st.config.getSiblingDB('config').chunks.find().toArray());
+ printjson(st.d0.getDB(dbTestName).TestColl.find({}).toArray());
+ printjson(st.d1.getDB(dbTestName).TestColl.find({}).toArray());
-// Now restart all mongod instances, so they don't know yet that they are sharded
-st.restartMongod(0);
-st.restartMongod(1);
+ // Now restart all mongod instances, so they don't know yet that they are sharded
+ st.restartMongod(0);
+ st.restartMongod(1);
-// Now that both mongod shards are restarted, they don't know yet that they are part of a sharded
-// cluster until they get a setShardVerion command. Mongos instance s1 has stale metadata and
-// doesn't know that chunk with key 19 has moved to shard0000 so it will send it to shard0001 at
-// first.
-//
-// Shard0001 would only send back a stale config exception if it receives a setShardVersion
-// command. The bug that this test validates is that setShardVersion is indeed being sent (for more
-// information, see SERVER-19395).
-st.s1.getDB(dbTestName).TestColl.update({ Key: 11 }, { $inc: { Counter: 1 } }, { upsert: true });
+ // Now that both mongod shards are restarted, they don't know yet that they are part of a
+ // sharded
+ // cluster until they get a setShardVerion command. Mongos instance s1 has stale metadata and
+ // doesn't know that chunk with key 19 has moved to shard0000 so it will send it to shard0001 at
+ // first.
+ //
+ // Shard0001 would only send back a stale config exception if it receives a setShardVersion
+ // command. The bug that this test validates is that setShardVersion is indeed being sent (for
+ // more
+ // information, see SERVER-19395).
+ st.s1.getDB(dbTestName).TestColl.update({Key: 11}, {$inc: {Counter: 1}}, {upsert: true});
-printjson(st.d0.getDB(dbTestName).TestColl.find({}).toArray());
-printjson(st.d1.getDB(dbTestName).TestColl.find({}).toArray());
+ printjson(st.d0.getDB(dbTestName).TestColl.find({}).toArray());
+ printjson(st.d1.getDB(dbTestName).TestColl.find({}).toArray());
-assert.eq(2, st.d0.getDB(dbTestName).TestColl.count());
-assert.eq(1, st.d1.getDB(dbTestName).TestColl.count());
+ assert.eq(2, st.d0.getDB(dbTestName).TestColl.count());
+ assert.eq(1, st.d1.getDB(dbTestName).TestColl.count());
-assert.eq(1, st.d0.getDB(dbTestName).TestColl.find({ Key: 1 }).count());
-assert.eq(1, st.d0.getDB(dbTestName).TestColl.find({ Key: 11 }).count());
-assert.eq(1, st.d1.getDB(dbTestName).TestColl.find({ Key: 21 }).count());
+ assert.eq(1, st.d0.getDB(dbTestName).TestColl.find({Key: 1}).count());
+ assert.eq(1, st.d0.getDB(dbTestName).TestColl.find({Key: 11}).count());
+ assert.eq(1, st.d1.getDB(dbTestName).TestColl.find({Key: 21}).count());
-st.stop();
+ st.stop();
})();
diff --git a/jstests/sharding/zbigMapReduce.js b/jstests/sharding/zbigMapReduce.js
index d9b771b3d51..fda81e12df8 100644
--- a/jstests/sharding/zbigMapReduce.js
+++ b/jstests/sharding/zbigMapReduce.js
@@ -1,21 +1,25 @@
// This test is skipped on 32-bit platforms
function setupTest() {
- var s = new ShardingTest({ shards: 2,
- mongos: 1,
- other: { rs: true,
- numReplicas: 2,
- chunkSize: 1,
- rsOptions: { oplogSize: 50 },
- enableBalancer: 1 } });
+ var s = new ShardingTest({
+ shards: 2,
+ mongos: 1,
+ other: {
+ rs: true,
+ numReplicas: 2,
+ chunkSize: 1,
+ rsOptions: {oplogSize: 50},
+ enableBalancer: 1
+ }
+ });
// Reduce chunk size to split
var config = s.getDB("config");
config.settings.save({_id: "chunksize", value: 1});
- assert.commandWorked(s.s0.adminCommand({ enablesharding: "test" }));
+ assert.commandWorked(s.s0.adminCommand({enablesharding: "test"}));
s.ensurePrimaryShard('test', 'test-rs0');
- assert.commandWorked(s.s0.adminCommand({ shardcollection: "test.foo", key: { "_id": 1 } }));
+ assert.commandWorked(s.s0.adminCommand({shardcollection: "test.foo", key: {"_id": 1}}));
return s;
}
@@ -29,17 +33,18 @@ function runTest(s) {
if (db.serverBuildInfo().bits == 32) {
// Make data ~0.5MB for 32 bit builds
- for (var i = 0; i < 512; i++) str += "a";
- }
- else {
+ for (var i = 0; i < 512; i++)
+ str += "a";
+ } else {
// Make data ~4MB
- for (var i = 0; i < 4*1024; i++) str += "a";
+ for (var i = 0; i < 4 * 1024; i++)
+ str += "a";
}
var bulk = db.foo.initializeUnorderedBulkOp();
- for (j=0; j<100; j++) {
- for (i=0; i<512; i++) {
- bulk.insert({ i: idInc++, val: valInc++, y:str });
+ for (j = 0; j < 100; j++) {
+ for (i = 0; i < 512; i++) {
+ bulk.insert({i: idInc++, val: valInc++, y: str});
}
}
assert.writeOK(bulk.execute());
@@ -56,11 +61,12 @@ function runTest(s) {
print("Shard 1: " + s.shard1.getCollection(db.foo + "").find().itcount());
for (var i = 0; i < 51200; i++) {
- if(!db.foo.findOne({ i: i }, { i: 1 })) {
+ if (!db.foo.findOne({i: i}, {i: 1})) {
print("Could not find: " + i);
}
- if(i % 100 == 0) print("Checked " + i);
+ if (i % 100 == 0)
+ print("Checked " + i);
}
print("PROBABLY WILL ASSERT NOW");
@@ -79,15 +85,19 @@ function runTest(s) {
s.printChunks();
s.printChangeLog();
- function map() { emit('count', 1); }
- function reduce(key, values) { return Array.sum(values); }
+ function map() {
+ emit('count', 1);
+ }
+ function reduce(key, values) {
+ return Array.sum(values);
+ }
jsTest.log("Test basic mapreduce...");
// Test basic mapReduce
for (var iter = 0; iter < 5; iter++) {
print("Test #" + iter);
- out = db.foo.mapReduce(map, reduce,"big_out");
+ out = db.foo.mapReduce(map, reduce, "big_out");
}
print("Testing output to different db...");
@@ -104,7 +114,7 @@ function runTest(s) {
print("Testing mr replace into DB " + iter);
- res = db.foo.mapReduce(map , reduce , { out: { replace: outCollStr, db: outDbStr } });
+ res = db.foo.mapReduce(map, reduce, {out: {replace: outCollStr, db: outDbStr}});
printjson(res);
outDb = s.getDB(outDbStr);
@@ -112,7 +122,7 @@ function runTest(s) {
obj = outColl.convertToSingleObject("value");
- assert.eq(51200 , obj.count , "Received wrong result " + obj.count);
+ assert.eq(51200, obj.count, "Received wrong result " + obj.count);
print("checking result field");
assert.eq(res.result.collection, outCollStr, "Wrong collection " + res.result.collection);
@@ -123,81 +133,85 @@ function runTest(s) {
// check nonAtomic output
assert.throws(function() {
- db.foo.mapReduce(map, reduce, { out: {replace: "big_out", nonAtomic: true } });
+ db.foo.mapReduce(map, reduce, {out: {replace: "big_out", nonAtomic: true}});
});
jsTest.log();
// Add docs with dup "i"
valInc = 0;
- for (j=0; j<100; j++) {
+ for (j = 0; j < 100; j++) {
print("Inserted document: " + (j * 100));
bulk = db.foo.initializeUnorderedBulkOp();
- for (i=0; i<512; i++) {
- bulk.insert({ i: idInc++, val: valInc++, y: str });
+ for (i = 0; i < 512; i++) {
+ bulk.insert({i: idInc++, val: valInc++, y: str});
}
// wait for replication to catch up
- assert.writeOK(bulk.execute({ w: 2 }));
+ assert.writeOK(bulk.execute({w: 2}));
}
jsTest.log("No errors...");
- map2 = function() { emit(this.val, 1); };
- reduce2 = function(key, values) { return Array.sum(values); };
+ map2 = function() {
+ emit(this.val, 1);
+ };
+ reduce2 = function(key, values) {
+ return Array.sum(values);
+ };
// Test merge
outcol = "big_out_merge";
// M/R quarter of the docs
jsTestLog("Test A");
- out = db.foo.mapReduce(map2, reduce2, { query: {i: {$lt: 25600} }, out: { merge: outcol } });
+ out = db.foo.mapReduce(map2, reduce2, {query: {i: {$lt: 25600}}, out: {merge: outcol}});
printjson(out);
- assert.eq(25600 , out.counts.emit , "Received wrong result");
- assert.eq(25600 , out.counts.output , "Received wrong result");
+ assert.eq(25600, out.counts.emit, "Received wrong result");
+ assert.eq(25600, out.counts.output, "Received wrong result");
// M/R further docs
jsTestLog("Test B");
out = db.foo.mapReduce(
- map2, reduce2, { query: {i: {$gte: 25600, $lt: 51200} }, out: { merge: outcol } });
+ map2, reduce2, {query: {i: {$gte: 25600, $lt: 51200}}, out: {merge: outcol}});
printjson(out);
- assert.eq(25600 , out.counts.emit , "Received wrong result");
- assert.eq(51200 , out.counts.output , "Received wrong result");
+ assert.eq(25600, out.counts.emit, "Received wrong result");
+ assert.eq(51200, out.counts.output, "Received wrong result");
// M/R do 2nd half of docs
jsTestLog("Test C");
out = db.foo.mapReduce(
- map2, reduce2, { query: {i: {$gte: 51200} }, out: { merge: outcol, nonAtomic: true } });
+ map2, reduce2, {query: {i: {$gte: 51200}}, out: {merge: outcol, nonAtomic: true}});
printjson(out);
- assert.eq(51200 , out.counts.emit , "Received wrong result");
- assert.eq(51200 , out.counts.output , "Received wrong result");
- assert.eq(1 , db[outcol].findOne().value , "Received wrong result");
+ assert.eq(51200, out.counts.emit, "Received wrong result");
+ assert.eq(51200, out.counts.output, "Received wrong result");
+ assert.eq(1, db[outcol].findOne().value, "Received wrong result");
// Test reduce
jsTestLog("Test D");
outcol = "big_out_reduce";
// M/R quarter of the docs
- out = db.foo.mapReduce(map2, reduce2,{ query: { i: { $lt: 25600 } }, out: { reduce: outcol } });
+ out = db.foo.mapReduce(map2, reduce2, {query: {i: {$lt: 25600}}, out: {reduce: outcol}});
printjson(out);
- assert.eq(25600 , out.counts.emit , "Received wrong result");
- assert.eq(25600 , out.counts.output , "Received wrong result");
+ assert.eq(25600, out.counts.emit, "Received wrong result");
+ assert.eq(25600, out.counts.output, "Received wrong result");
// M/R further docs
jsTestLog("Test E");
out = db.foo.mapReduce(
- map2, reduce2, { query: { i: { $gte: 25600, $lt: 51200 } }, out: { reduce: outcol } });
+ map2, reduce2, {query: {i: {$gte: 25600, $lt: 51200}}, out: {reduce: outcol}});
printjson(out);
- assert.eq(25600 , out.counts.emit , "Received wrong result");
- assert.eq(51200 , out.counts.output , "Received wrong result");
+ assert.eq(25600, out.counts.emit, "Received wrong result");
+ assert.eq(51200, out.counts.output, "Received wrong result");
// M/R do 2nd half of docs
jsTestLog("Test F");
out = db.foo.mapReduce(
- map2, reduce2, { query: { i: {$gte: 51200} }, out: { reduce: outcol, nonAtomic: true } });
+ map2, reduce2, {query: {i: {$gte: 51200}}, out: {reduce: outcol, nonAtomic: true}});
printjson(out);
- assert.eq(51200 , out.counts.emit , "Received wrong result");
- assert.eq(51200 , out.counts.output , "Received wrong result");
- assert.eq(2 , db[outcol].findOne().value , "Received wrong result");
+ assert.eq(51200, out.counts.emit, "Received wrong result");
+ assert.eq(51200, out.counts.output, "Received wrong result");
+ assert.eq(2, db[outcol].findOne().value, "Received wrong result");
// Verify that data is also on secondary
jsTestLog("Test G");
@@ -208,9 +222,9 @@ function runTest(s) {
// that replication can keep up even on slow machines.
s.stopBalancer();
s._rs[0].test.awaitReplication(300 * 1000);
- assert.eq(51200 , primary.getDB("test")[outcol].count() , "Wrong count");
+ assert.eq(51200, primary.getDB("test")[outcol].count(), "Wrong count");
for (var i = 0; i < secondaries.length; ++i) {
- assert.eq(51200 , secondaries[i].getDB("test")[outcol].count() , "Wrong count");
+ assert.eq(51200, secondaries[i].getDB("test")[outcol].count(), "Wrong count");
}
}
@@ -218,8 +232,7 @@ var s = setupTest();
if (s.getDB("admin").runCommand("buildInfo").bits < 64) {
print("Skipping test on 32-bit platforms");
-}
-else {
+} else {
runTest(s);
}
diff --git a/jstests/sharding/zero_shard_version.js b/jstests/sharding/zero_shard_version.js
index 20fae7ac522..9f15e247e83 100644
--- a/jstests/sharding/zero_shard_version.js
+++ b/jstests/sharding/zero_shard_version.js
@@ -3,178 +3,175 @@
* against a major version of zero or incompatible epochs.
*/
(function() {
-'use strict';
-
-var st = new ShardingTest({ shards: 2, mongos: 4 });
-
-var testDB_s0 = st.s.getDB('test');
-assert.commandWorked(testDB_s0.adminCommand({ enableSharding: 'test' }));
-st.ensurePrimaryShard('test', 'shard0001');
-assert.commandWorked(testDB_s0.adminCommand({ shardCollection: 'test.user', key: { x: 1 }}));
-
-var checkShardMajorVersion = function(conn, expectedVersion) {
- var shardVersionInfo = conn.adminCommand({ getShardVersion: 'test.user' });
- assert.eq(expectedVersion, shardVersionInfo.global.getTime());
-};
+ 'use strict';
-///////////////////////////////////////////////////////
-// Test shard with empty chunk
+ var st = new ShardingTest({shards: 2, mongos: 4});
+
+ var testDB_s0 = st.s.getDB('test');
+ assert.commandWorked(testDB_s0.adminCommand({enableSharding: 'test'}));
+ st.ensurePrimaryShard('test', 'shard0001');
+ assert.commandWorked(testDB_s0.adminCommand({shardCollection: 'test.user', key: {x: 1}}));
-// shard0: 0|0|a
-// shard1: 1|0|a, [-inf, inf)
-// mongos0: 1|0|a
-
-var testDB_s1 = st.s1.getDB('test');
-assert.writeOK(testDB_s1.user.insert({ x: 1 }));
-assert.commandWorked(testDB_s1.adminCommand({ moveChunk: 'test.user',
- find: { x: 0 },
- to: 'shard0000' }));
-
-// Official config:
-// shard0: 2|0|a, [-inf, inf)
-// shard1: 0|0|a
-//
-// Shard metadata:
-// shard0: 0|0|a
-// shard1: 0|0|a
-// mongos0: 1|0|a
-
-checkShardMajorVersion(st.d0, 0);
-checkShardMajorVersion(st.d1, 0);
-
-// mongos0 still thinks that { x: 1 } belong to shard0001, but should be able to
-// refresh it's metadata correctly.
-assert.neq(null, testDB_s0.user.findOne({ x: 1 }));
+ var checkShardMajorVersion = function(conn, expectedVersion) {
+ var shardVersionInfo = conn.adminCommand({getShardVersion: 'test.user'});
+ assert.eq(expectedVersion, shardVersionInfo.global.getTime());
+ };
-checkShardMajorVersion(st.d0, 2);
-checkShardMajorVersion(st.d1, 0);
-
-// Set mongos2 & mongos3 to version 2|0|a
-var testDB_s2 = st.s2.getDB('test');
-assert.neq(null, testDB_s2.user.findOne({ x: 1 }));
-
-var testDB_s3 = st.s3.getDB('test');
-assert.neq(null, testDB_s3.user.findOne({ x: 1 }));
-
-///////////////////////////////////////////////////////
-// Test unsharded collection
-// mongos versions: s0, s2, s3: 2|0|a
-
-testDB_s1.user.drop();
-assert.writeOK(testDB_s1.user.insert({ x: 10 }));
-
-// shard0: 0|0|0
-// shard1: 0|0|0
-// mongos0: 2|0|a
-
-checkShardMajorVersion(st.d0, 0);
-checkShardMajorVersion(st.d1, 0);
-
-// mongos0 still thinks { x: 10 } belong to shard0000, but since coll is dropped,
-// query should be routed to primary shard.
-assert.neq(null, testDB_s0.user.findOne({ x: 10 }));
-
-checkShardMajorVersion(st.d0, 0);
-checkShardMajorVersion(st.d1, 0);
-
-///////////////////////////////////////////////////////
-// Test 2 shards with 1 chunk
-// mongos versions: s0: 0|0|0, s2, s3: 2|0|a
-
-testDB_s1.user.drop();
-testDB_s1.adminCommand({ shardCollection: 'test.user', key: { x: 1 }});
-testDB_s1.adminCommand({ split: 'test.user', middle: { x: 0 }});
-
-// shard0: 0|0|b,
-// shard1: 1|1|b, [-inf, 0), [0, inf)
-
-testDB_s1.user.insert({ x: 1 });
-testDB_s1.user.insert({ x: -11 });
-assert.commandWorked(testDB_s1.adminCommand({ moveChunk: 'test.user',
- find: { x: -1 },
- to: 'shard0000' }));
-
-// Official config:
-// shard0: 2|0|b, [-inf, 0)
-// shard1: 2|1|b, [0, inf)
-//
-// Shard metadata:
-// shard0: 0|0|b
-// shard1: 2|1|b
-//
-// mongos2: 2|0|a
-
-checkShardMajorVersion(st.d0, 0);
-checkShardMajorVersion(st.d1, 2);
-
-// mongos2 still thinks that { x: 1 } belong to shard0000, but should be able to
-// refresh it's metadata correctly.
-assert.neq(null, testDB_s2.user.findOne({ x: 1 }));
-
-checkShardMajorVersion(st.d0, 2);
-checkShardMajorVersion(st.d1, 2);
-
-// Set shard metadata to 2|0|b
-assert.neq(null, testDB_s2.user.findOne({ x: -11 }));
-
-checkShardMajorVersion(st.d0, 2);
-checkShardMajorVersion(st.d1, 2);
-
-// Official config:
-// shard0: 2|0|b, [-inf, 0)
-// shard1: 2|1|b, [0, inf)
-//
-// Shard metadata:
-// shard0: 2|0|b
-// shard1: 2|1|b
-//
-// mongos3: 2|0|a
-
-// 4th mongos still thinks that { x: 1 } belong to shard0000, but should be able to
-// refresh it's metadata correctly.
-assert.neq(null, testDB_s3.user.findOne({ x: 1 }));
-
-///////////////////////////////////////////////////////
-// Test mongos thinks unsharded when it's actually sharded
-// mongos current versions: s0: 0|0|0, s2, s3: 2|0|b
+ ///////////////////////////////////////////////////////
+ // Test shard with empty chunk
-// Set mongos0 to version 0|0|0
-testDB_s0.user.drop();
-
-checkShardMajorVersion(st.d0, 0);
-checkShardMajorVersion(st.d1, 0);
-
-assert.eq(null, testDB_s0.user.findOne({ x: 1 }));
+ // shard0: 0|0|a
+ // shard1: 1|0|a, [-inf, inf)
+ // mongos0: 1|0|a
+
+ var testDB_s1 = st.s1.getDB('test');
+ assert.writeOK(testDB_s1.user.insert({x: 1}));
+ assert.commandWorked(
+ testDB_s1.adminCommand({moveChunk: 'test.user', find: {x: 0}, to: 'shard0000'}));
+
+ // Official config:
+ // shard0: 2|0|a, [-inf, inf)
+ // shard1: 0|0|a
+ //
+ // Shard metadata:
+ // shard0: 0|0|a
+ // shard1: 0|0|a
+ // mongos0: 1|0|a
+
+ checkShardMajorVersion(st.d0, 0);
+ checkShardMajorVersion(st.d1, 0);
+
+ // mongos0 still thinks that { x: 1 } belong to shard0001, but should be able to
+ // refresh it's metadata correctly.
+ assert.neq(null, testDB_s0.user.findOne({x: 1}));
-// Needs to also set mongos1 to version 0|0|0, otherwise it'll complain that collection is
-// already sharded.
-assert.eq(null, testDB_s1.user.findOne({ x: 1 }));
-assert.commandWorked(testDB_s1.adminCommand({ shardCollection: 'test.user', key: { x: 1 }}));
-testDB_s1.user.insert({ x: 1 });
-
-assert.commandWorked(testDB_s1.adminCommand({ moveChunk: 'test.user',
- find: { x: 0 },
- to: 'shard0000' }));
-
-// Official config:
-// shard0: 2|0|c, [-inf, inf)
-// shard1: 0|0|c
-//
-// Shard metadata:
-// shard0: 0|0|c
-// shard1: 0|0|c
-//
-// mongos0: 0|0|0
-
-checkShardMajorVersion(st.d0, 0);
-checkShardMajorVersion(st.d1, 0);
-
-// 1st mongos thinks that collection is unshareded and will attempt to query primary shard.
-assert.neq(null, testDB_s0.user.findOne({ x: 1 }));
-
-checkShardMajorVersion(st.d0, 2);
-checkShardMajorVersion(st.d1, 0);
-
-st.stop();
+ checkShardMajorVersion(st.d0, 2);
+ checkShardMajorVersion(st.d1, 0);
+
+ // Set mongos2 & mongos3 to version 2|0|a
+ var testDB_s2 = st.s2.getDB('test');
+ assert.neq(null, testDB_s2.user.findOne({x: 1}));
+
+ var testDB_s3 = st.s3.getDB('test');
+ assert.neq(null, testDB_s3.user.findOne({x: 1}));
+
+ ///////////////////////////////////////////////////////
+ // Test unsharded collection
+ // mongos versions: s0, s2, s3: 2|0|a
+
+ testDB_s1.user.drop();
+ assert.writeOK(testDB_s1.user.insert({x: 10}));
+
+ // shard0: 0|0|0
+ // shard1: 0|0|0
+ // mongos0: 2|0|a
+
+ checkShardMajorVersion(st.d0, 0);
+ checkShardMajorVersion(st.d1, 0);
+
+ // mongos0 still thinks { x: 10 } belong to shard0000, but since coll is dropped,
+ // query should be routed to primary shard.
+ assert.neq(null, testDB_s0.user.findOne({x: 10}));
+
+ checkShardMajorVersion(st.d0, 0);
+ checkShardMajorVersion(st.d1, 0);
+
+ ///////////////////////////////////////////////////////
+ // Test 2 shards with 1 chunk
+ // mongos versions: s0: 0|0|0, s2, s3: 2|0|a
+
+ testDB_s1.user.drop();
+ testDB_s1.adminCommand({shardCollection: 'test.user', key: {x: 1}});
+ testDB_s1.adminCommand({split: 'test.user', middle: {x: 0}});
+
+ // shard0: 0|0|b,
+ // shard1: 1|1|b, [-inf, 0), [0, inf)
+
+ testDB_s1.user.insert({x: 1});
+ testDB_s1.user.insert({x: -11});
+ assert.commandWorked(
+ testDB_s1.adminCommand({moveChunk: 'test.user', find: {x: -1}, to: 'shard0000'}));
+
+ // Official config:
+ // shard0: 2|0|b, [-inf, 0)
+ // shard1: 2|1|b, [0, inf)
+ //
+ // Shard metadata:
+ // shard0: 0|0|b
+ // shard1: 2|1|b
+ //
+ // mongos2: 2|0|a
+
+ checkShardMajorVersion(st.d0, 0);
+ checkShardMajorVersion(st.d1, 2);
+
+ // mongos2 still thinks that { x: 1 } belong to shard0000, but should be able to
+ // refresh it's metadata correctly.
+ assert.neq(null, testDB_s2.user.findOne({x: 1}));
+
+ checkShardMajorVersion(st.d0, 2);
+ checkShardMajorVersion(st.d1, 2);
+
+ // Set shard metadata to 2|0|b
+ assert.neq(null, testDB_s2.user.findOne({x: -11}));
+
+ checkShardMajorVersion(st.d0, 2);
+ checkShardMajorVersion(st.d1, 2);
+
+ // Official config:
+ // shard0: 2|0|b, [-inf, 0)
+ // shard1: 2|1|b, [0, inf)
+ //
+ // Shard metadata:
+ // shard0: 2|0|b
+ // shard1: 2|1|b
+ //
+ // mongos3: 2|0|a
+
+ // 4th mongos still thinks that { x: 1 } belong to shard0000, but should be able to
+ // refresh it's metadata correctly.
+ assert.neq(null, testDB_s3.user.findOne({x: 1}));
+
+ ///////////////////////////////////////////////////////
+ // Test mongos thinks unsharded when it's actually sharded
+ // mongos current versions: s0: 0|0|0, s2, s3: 2|0|b
+
+ // Set mongos0 to version 0|0|0
+ testDB_s0.user.drop();
+
+ checkShardMajorVersion(st.d0, 0);
+ checkShardMajorVersion(st.d1, 0);
+
+ assert.eq(null, testDB_s0.user.findOne({x: 1}));
+
+ // Needs to also set mongos1 to version 0|0|0, otherwise it'll complain that collection is
+ // already sharded.
+ assert.eq(null, testDB_s1.user.findOne({x: 1}));
+ assert.commandWorked(testDB_s1.adminCommand({shardCollection: 'test.user', key: {x: 1}}));
+ testDB_s1.user.insert({x: 1});
+
+ assert.commandWorked(
+ testDB_s1.adminCommand({moveChunk: 'test.user', find: {x: 0}, to: 'shard0000'}));
+
+ // Official config:
+ // shard0: 2|0|c, [-inf, inf)
+ // shard1: 0|0|c
+ //
+ // Shard metadata:
+ // shard0: 0|0|c
+ // shard1: 0|0|c
+ //
+ // mongos0: 0|0|0
+
+ checkShardMajorVersion(st.d0, 0);
+ checkShardMajorVersion(st.d1, 0);
+
+ // 1st mongos thinks that collection is unshareded and will attempt to query primary shard.
+ assert.neq(null, testDB_s0.user.findOne({x: 1}));
+
+ checkShardMajorVersion(st.d0, 2);
+ checkShardMajorVersion(st.d1, 0);
+
+ st.stop();
})();
diff --git a/jstests/slow1/election_timing.js b/jstests/slow1/election_timing.js
index e1919bb7b61..2fe83be02ed 100644
--- a/jstests/slow1/election_timing.js
+++ b/jstests/slow1/election_timing.js
@@ -7,99 +7,104 @@
var testStart = Date.now();
var testCases = [
- {
- name: "testV1Stop",
- description: "protocolVersion 1, primary is stopped",
- protocolVersion: 1,
- // testRuns is the number of times a new ReplSetTest will be used.
- testRuns: 1,
- // testCycles is the number of election cycles that will be run per ReplSetTest lifespan.
- testCycles: 5,
- // testSetup is run after the replSet is initiated.
- // Function.prototype is the default.
- testSetup: Function.prototype,
- // Trigger an election by stepping down, stopping, or partitioning the primary.
- // stopPrimary is the default.
- electionTrigger: ElectionTimingTest.prototype.stopPrimary,
- // After the election has completed, make the old primary available again.
- // stopPrimaryReset is the default.
- testReset: ElectionTimingTest.prototype.stopPrimaryReset
- },
-
- /*
- This test case is flakey since waiting for the old primary to shutdown can take longer than the
- allowed timeout, even if a new primary was elected during the shutdown time.
-
- {
- name: "testV1StopTimeout1500",
- description: "protocolVersion 1, primary is stopped, electionTimeoutMillis set to 1500",
- protocolVersion: 1,
- testRuns: 1,
- testCycles: 5,
- // The settings object is merged into the replset config settings object.
- settings: {electionTimeoutMillis: 1500}
- },
- */
-
- {
- name: "testV1StepDown",
- description: "protocolVersion 1, primary is stepped down",
- protocolVersion: 1,
- testRuns: 1,
- testCycles: 5,
- electionTrigger: ElectionTimingTest.prototype.stepDownPrimary,
- testReset: ElectionTimingTest.prototype.stepDownPrimaryReset,
- },
-
- {
- name: "testV1StepDown1500",
- description: "protocolVersion 1, primary is stepped down",
- protocolVersion: 1,
- testRuns: 1,
- testCycles: 5,
- electionTrigger: ElectionTimingTest.prototype.stepDownPrimary,
- testReset: ElectionTimingTest.prototype.stepDownPrimaryReset,
- // The settings object is merged into the replset config settings object.
- settings: {electionTimeoutMillis: 1500}
- },
-
- {
- name: "testV1StepDownLargeCluster",
- description: "protocolVersion 1, primary is stepped down, 7 electable nodes",
- protocolVersion: 1,
- nodes: 7,
- testRuns: 1,
- testCycles: 5,
- electionTrigger: ElectionTimingTest.prototype.stepDownPrimary,
- testReset: function() {},
- waitForNewPrimary : function(rst, secondary) { rst.getPrimary(); }
- },
-
- {
- name: "testV0Stop",
- description: "protocolVersion 0, primary is stopped",
- protocolVersion: 0,
- testRuns: 1,
- testCycles: 1
- },
-
- {
- name: "testV0StepDown",
- description: "protocolVersion 0, primary is stepped down",
- protocolVersion: 0,
- testRuns: 1,
- testCycles: 2,
- stepDownGuardTime: 30,
- // There is a guard time in pv0 that prevents an election right
- // after initiating.
- testSetup: function() {sleep(30 * 1000);},
- electionTrigger: ElectionTimingTest.prototype.stepDownPrimary,
- testReset: ElectionTimingTest.prototype.stepDownPrimaryReset
- },
+ {
+ name: "testV1Stop",
+ description: "protocolVersion 1, primary is stopped",
+ protocolVersion: 1,
+ // testRuns is the number of times a new ReplSetTest will be used.
+ testRuns: 1,
+ // testCycles is the number of election cycles that will be run per ReplSetTest lifespan.
+ testCycles: 5,
+ // testSetup is run after the replSet is initiated.
+ // Function.prototype is the default.
+ testSetup: Function.prototype,
+ // Trigger an election by stepping down, stopping, or partitioning the primary.
+ // stopPrimary is the default.
+ electionTrigger: ElectionTimingTest.prototype.stopPrimary,
+ // After the election has completed, make the old primary available again.
+ // stopPrimaryReset is the default.
+ testReset: ElectionTimingTest.prototype.stopPrimaryReset
+ },
+
+ /*
+ This test case is flakey since waiting for the old primary to shutdown can take longer than
+ the
+ allowed timeout, even if a new primary was elected during the shutdown time.
+
+ {
+ name: "testV1StopTimeout1500",
+ description: "protocolVersion 1, primary is stopped, electionTimeoutMillis set to 1500",
+ protocolVersion: 1,
+ testRuns: 1,
+ testCycles: 5,
+ // The settings object is merged into the replset config settings object.
+ settings: {electionTimeoutMillis: 1500}
+ },
+ */
+
+ {
+ name: "testV1StepDown",
+ description: "protocolVersion 1, primary is stepped down",
+ protocolVersion: 1,
+ testRuns: 1,
+ testCycles: 5,
+ electionTrigger: ElectionTimingTest.prototype.stepDownPrimary,
+ testReset: ElectionTimingTest.prototype.stepDownPrimaryReset,
+ },
+
+ {
+ name: "testV1StepDown1500",
+ description: "protocolVersion 1, primary is stepped down",
+ protocolVersion: 1,
+ testRuns: 1,
+ testCycles: 5,
+ electionTrigger: ElectionTimingTest.prototype.stepDownPrimary,
+ testReset: ElectionTimingTest.prototype.stepDownPrimaryReset,
+ // The settings object is merged into the replset config settings object.
+ settings: {electionTimeoutMillis: 1500}
+ },
+
+ {
+ name: "testV1StepDownLargeCluster",
+ description: "protocolVersion 1, primary is stepped down, 7 electable nodes",
+ protocolVersion: 1,
+ nodes: 7,
+ testRuns: 1,
+ testCycles: 5,
+ electionTrigger: ElectionTimingTest.prototype.stepDownPrimary,
+ testReset: function() {},
+ waitForNewPrimary: function(rst, secondary) {
+ rst.getPrimary();
+ }
+ },
+
+ {
+ name: "testV0Stop",
+ description: "protocolVersion 0, primary is stopped",
+ protocolVersion: 0,
+ testRuns: 1,
+ testCycles: 1
+ },
+
+ {
+ name: "testV0StepDown",
+ description: "protocolVersion 0, primary is stepped down",
+ protocolVersion: 0,
+ testRuns: 1,
+ testCycles: 2,
+ stepDownGuardTime: 30,
+ // There is a guard time in pv0 that prevents an election right
+ // after initiating.
+ testSetup: function() {
+ sleep(30 * 1000);
+ },
+ electionTrigger: ElectionTimingTest.prototype.stepDownPrimary,
+ testReset: ElectionTimingTest.prototype.stepDownPrimaryReset
+ },
];
- testCases.forEach(function (tc) {
+ testCases.forEach(function(tc) {
var testRun = new ElectionTimingTest(tc);
tc.testResults = testRun.testResults;
tc.electionTimeoutLimitMillis = testRun.electionTimeoutLimitMillis;
@@ -118,27 +123,27 @@
printjson(tc.testResults);
});
- testCases.forEach(function (tc) {
+ testCases.forEach(function(tc) {
var allResults = [];
- tc.testResults.forEach(function (tr) {
+ tc.testResults.forEach(function(tr) {
allResults = allResults.concat(tr.results);
});
var resAvg = Array.avg(allResults);
- var resMin = Math.min(...allResults);
- var resMax = Math.max(...allResults);
+ var resMin = Math.min(... allResults);
+ var resMax = Math.max(... allResults);
var resStdDev = Array.stdDev(allResults);
- jsTestLog("Results: " + tc.name +
- " Average over " + allResults.length + " runs: " + resAvg +
- " Min: " + resMin + " Max: " + resMax +
- " Limit: " + tc.electionTimeoutLimitMillis/1000 +
- " StdDev: " + resStdDev.toFixed(4));
+ jsTestLog("Results: " + tc.name + " Average over " + allResults.length + " runs: " +
+ resAvg + " Min: " + resMin + " Max: " + resMax + " Limit: " +
+ tc.electionTimeoutLimitMillis / 1000 + " StdDev: " + resStdDev.toFixed(4));
allResults.forEach(function(failoverElapsedMillis) {
- assert.lte(failoverElapsedMillis, tc.electionTimeoutLimitMillis/1000,
- tc.name + ': failover (' + failoverElapsedMillis + ' sec) took too long. limit: ' +
- tc.electionTimeoutLimitMillis/1000 + ' sec');
+ assert.lte(failoverElapsedMillis,
+ tc.electionTimeoutLimitMillis / 1000,
+ tc.name + ': failover (' + failoverElapsedMillis +
+ ' sec) took too long. limit: ' + tc.electionTimeoutLimitMillis / 1000 +
+ ' sec');
});
});
diff --git a/jstests/slow1/large_role_chain.js b/jstests/slow1/large_role_chain.js
index 581db988be5..107263782ef 100644
--- a/jstests/slow1/large_role_chain.js
+++ b/jstests/slow1/large_role_chain.js
@@ -2,12 +2,10 @@
// each role is a member of the next, creating a large chain.
function runTest(conn) {
-
var testdb = conn.getDB("rolechain");
- testdb.runCommand({dropAllRolesFromDatabase:1});
+ testdb.runCommand({dropAllRolesFromDatabase: 1});
var chainLen = 2000;
-
jsTestLog("Generating a chain of " + chainLen + " linked roles");
var roleNameBase = "chainRole";
@@ -15,15 +13,13 @@ function runTest(conn) {
var name = roleNameBase + i;
if (i == 0) {
testdb.runCommand({createRole: name, privileges: [], roles: []});
- }
- else {
+ } else {
jsTestLog("Creating role " + i);
var prevRole = roleNameBase + (i - 1);
- testdb.runCommand({createRole: name, privileges: [], roles: [ prevRole ]});
+ testdb.runCommand({createRole: name, privileges: [], roles: [prevRole]});
var roleInfo = testdb.getRole(name);
}
}
-
}
// run all tests standalone
diff --git a/jstests/slow1/memory.js b/jstests/slow1/memory.js
index a2f84fddc6c..9d67aa7aab6 100644
--- a/jstests/slow1/memory.js
+++ b/jstests/slow1/memory.js
@@ -3,31 +3,45 @@ var col = db.memoryTest;
// test creating many collections to make sure no internal cache goes OOM
for (var i = 0; i < 10000; ++i) {
name = "memoryTest" + i;
- if ((i % 1000) == 0) print("Processing " + name);
- db.eval(function(col) { for (var i = 0; i < 100; ++i) {db[col + "_" + i].find();} }, name);
+ if ((i % 1000) == 0)
+ print("Processing " + name);
+ db.eval(function(col) {
+ for (var i = 0; i < 100; ++i) {
+ db[col + "_" + i].find();
+ }
+ }, name);
}
// test recovery of JS engine after out of memory
-db.system.js.save( { "_id" : "f1", "value" : function(n) {
- a = [];
- b = [];
- c = [];
- for (i = 0; i < n; i++) {
- a.push(Math.random());
- b.push(Math.random());
- c.push(Math.random());
+db.system.js.save({
+ "_id": "f1",
+ "value": function(n) {
+ a = [];
+ b = [];
+ c = [];
+ for (i = 0; i < n; i++) {
+ a.push(Math.random());
+ b.push(Math.random());
+ c.push(Math.random());
+ }
}
-} });
+});
// do mix of calls to make sure OOM is handled with no permanent damage
db.eval("f1(10)");
-assert.throws(function() { db.eval("f1(100000000)"); } );
+assert.throws(function() {
+ db.eval("f1(100000000)");
+});
db.eval("f1(10)");
-assert.throws(function() { db.eval("f1(1000000000)"); } );
+assert.throws(function() {
+ db.eval("f1(1000000000)");
+});
db.eval("f1(1000000)");
db.eval("f1(1000000)");
db.eval("f1(1000000)");
-assert.throws(function() { db.eval("f1(100000000)"); } );
+assert.throws(function() {
+ db.eval("f1(100000000)");
+});
db.eval("f1(10)");
db.eval("f1(1000000)");
db.eval("f1(1000000)");
@@ -37,6 +51,7 @@ db.eval("f1(1000000)");
col.drop();
col.insert({a: 1});
col.findOne({$where: "var arr = []; for (var i = 0; i < 1000000; ++i) {arr.push(0);}"});
-assert.throws(function() { col.findOne({$where: "var arr = []; for (var i = 0; i < 1000000000; ++i) {arr.push(0);}"}); });
+assert.throws(function() {
+ col.findOne({$where: "var arr = []; for (var i = 0; i < 1000000000; ++i) {arr.push(0);}"});
+});
col.findOne({$where: "var arr = []; for (var i = 0; i < 1000000; ++i) {arr.push(0);}"});
-
diff --git a/jstests/slow1/replsets_priority1.js b/jstests/slow1/replsets_priority1.js
index 614c6b7cec7..098af758bdd 100644
--- a/jstests/slow1/replsets_priority1.js
+++ b/jstests/slow1/replsets_priority1.js
@@ -5,7 +5,7 @@ print("\n\n\nreplsets_priority1.js BEGIN\n");
load("jstests/replsets/rslib.js");
-var rs = new ReplSetTest( {name: 'testSet', nodes: 3, nodeOptions: {verbose: 2}} );
+var rs = new ReplSetTest({name: 'testSet', nodes: 3, nodeOptions: {verbose: 2}});
var nodes = rs.startSet();
rs.initiate();
@@ -14,53 +14,51 @@ var master = rs.getPrimary();
var everyoneOkSoon = function() {
var status;
assert.soon(function() {
- var ok = true;
- status = master.adminCommand({replSetGetStatus : 1});
+ var ok = true;
+ status = master.adminCommand({replSetGetStatus: 1});
- if (!status.members) {
- return false;
- }
+ if (!status.members) {
+ return false;
+ }
- for (var i in status.members) {
- if (status.members[i].health == 0) {
- continue;
- }
- ok &= status.members[i].state == 1 || status.members[i].state == 2;
+ for (var i in status.members) {
+ if (status.members[i].health == 0) {
+ continue;
}
- return ok;
- }, tojson(status));
+ ok &= status.members[i].state == 1 || status.members[i].state == 2;
+ }
+ return ok;
+ }, tojson(status));
};
-var checkPrimaryIs = function (node) {
+var checkPrimaryIs = function(node) {
print("nreplsets_priority1.js checkPrimaryIs(" + node.host + ")");
var status;
- assert.soon(function () {
+ assert.soon(function() {
var ok = true;
try {
- status = master.adminCommand({ replSetGetStatus: 1 });
- }
- catch (e) {
+ status = master.adminCommand({replSetGetStatus: 1});
+ } catch (e) {
print(e);
print("nreplsets_priority1.js checkPrimaryIs reconnecting");
reconnect(master);
- status = master.adminCommand({ replSetGetStatus: 1 });
+ status = master.adminCommand({replSetGetStatus: 1});
}
var str = "goal: " + node.host + "==1 states: ";
if (!status || !status.members) {
return false;
}
- status.members.forEach(function (m) {
+ status.members.forEach(function(m) {
str += m.name + ": " + m.state + " ";
if (m.name == node.host) {
ok &= m.state == 1;
- }
- else {
+ } else {
ok &= m.state != 1 || (m.state == 1 && m.health == 0);
}
});
@@ -68,7 +66,7 @@ var checkPrimaryIs = function (node) {
print(str);
print();
- occasionally(function () {
+ occasionally(function() {
print("\nstatus:");
printjson(status);
print();
@@ -85,14 +83,14 @@ everyoneOkSoon();
print("\n\nreplsets_priority1.js initial sync");
// intial sync
-master.getDB("foo").bar.insert({x:1});
+master.getDB("foo").bar.insert({x: 1});
rs.awaitReplication();
print("\n\nreplsets_priority1.js starting loop");
var n = 5;
-for (i=0; i<n; i++) {
- print("Round "+i+": FIGHT!");
+for (i = 0; i < n; i++) {
+ print("Round " + i + ": FIGHT!");
var max = null;
var second = null;
@@ -102,7 +100,7 @@ for (i=0; i<n; i++) {
var version = config.version;
config.version++;
- for (var j=0; j<config.members.length; j++) {
+ for (var j = 0; j < config.members.length; j++) {
var priority = Math.random() * 100;
print("random priority : " + priority);
config.members[j].priority = priority;
@@ -112,7 +110,7 @@ for (i=0; i<n; i++) {
}
}
- for (var j=0; j<config.members.length; j++) {
+ for (var j = 0; j < config.members.length; j++) {
if (config.members[j] == max) {
continue;
}
@@ -121,24 +119,24 @@ for (i=0; i<n; i++) {
}
}
- print("\n\nreplsets_priority1.js max is " + max.host + " with priority " + max.priority + ", reconfiguring...");
+ print("\n\nreplsets_priority1.js max is " + max.host + " with priority " + max.priority +
+ ", reconfiguring...");
var count = 0;
while (config.version != version && count < 100) {
reconnect(master);
occasionally(function() {
- print("version is "+version+", trying to update to "+config.version);
- });
+ print("version is " + version + ", trying to update to " + config.version);
+ });
try {
- master.adminCommand({replSetReconfig : config});
+ master.adminCommand({replSetReconfig: config});
master = rs.getPrimary();
reconnect(master);
version = master.getDB("local").system.replset.findOne().version;
- }
- catch (e) {
+ } catch (e) {
print("nreplsets_priority1.js Caught exception: " + e);
}
@@ -146,7 +144,7 @@ for (i=0; i<n; i++) {
}
print("\nreplsets_priority1.js wait for 2 slaves");
-
+
assert.soon(function() {
rs.getPrimary();
return rs.liveNodes.slaves.length == 2;
@@ -155,16 +153,16 @@ for (i=0; i<n; i++) {
print("\nreplsets_priority1.js wait for new config version " + config.version);
assert.soon(function() {
- versions = [0,0];
- rs.liveNodes.slaves[0].setSlaveOk();
- versions[0] = rs.liveNodes.slaves[0].getDB("local").system.replset.findOne().version;
- rs.liveNodes.slaves[1].setSlaveOk();
- versions[1] = rs.liveNodes.slaves[1].getDB("local").system.replset.findOne().version;
- return versions[0] == config.version && versions[1] == config.version;
- });
+ versions = [0, 0];
+ rs.liveNodes.slaves[0].setSlaveOk();
+ versions[0] = rs.liveNodes.slaves[0].getDB("local").system.replset.findOne().version;
+ rs.liveNodes.slaves[1].setSlaveOk();
+ versions[1] = rs.liveNodes.slaves[1].getDB("local").system.replset.findOne().version;
+ return versions[0] == config.version && versions[1] == config.version;
+ });
print("replsets_priority1.js awaitReplication");
-
+
// the reconfiguration needs to be replicated! the hb sends it out
// separately from the repl
rs.awaitReplication();
@@ -181,7 +179,7 @@ for (i=0; i<n; i++) {
print("\nkilled max primary. Checking statuses.");
- print("second is "+second.host+" with priority "+second.priority);
+ print("second is " + second.host + " with priority " + second.priority);
checkPrimaryIs(second);
print("restart max " + max._id);
diff --git a/jstests/slow1/sharding_multiple_collections.js b/jstests/slow1/sharding_multiple_collections.js
index b844fe6c9dd..46ade14a0ad 100644
--- a/jstests/slow1/sharding_multiple_collections.js
+++ b/jstests/slow1/sharding_multiple_collections.js
@@ -1,61 +1,64 @@
(function() {
-var s = new ShardingTest({ name: "multcollections",
- shards: 2,
- mongos: 1,
- verbose: 1,
- other: { chunkSize: 1, enableBalancer : true } });
-
-s.adminCommand( { enablesharding : "test" } );
-db = s.getDB( "test" );
-s.ensurePrimaryShard('test', 'shard0001');
-
-N = 100000;
-
-S = "";
-while ( S.length < 500 )
- S += "123123312312";
-
-var bulk = db.foo.initializeUnorderedBulkOp();
-var bulk2 = db.bar.initializeUnorderedBulkOp();
-for ( i=0; i<N; i++ ){
- bulk.insert({ _id: i, s: S });
- bulk2.insert({ _id: i, s: S, s2: S });
-}
-assert.writeOK(bulk.execute());
-assert.writeOK(bulk2.execute());
-
-s.printShardingStatus();
-
-function mytest( coll , i , loopNumber ){
- x = coll.find( { _id : i } ).explain();
- if ( x )
- return;
- throw Error( "can't find " + i + " in " + coll.getName() + " on loopNumber: " + loopNumber + " explain: " + tojson( x ) );
-}
-
-loopNumber = 0;
-while ( 1 ){
- for ( i=0; i<N; i++ ){
- mytest( db.foo , i , loopNumber );
- mytest( db.bar , i , loopNumber );
- if ( i % 1000 == 0 )
- print( i );
+ var s = new ShardingTest({
+ name: "multcollections",
+ shards: 2,
+ mongos: 1,
+ verbose: 1,
+ other: {chunkSize: 1, enableBalancer: true}
+ });
+
+ s.adminCommand({enablesharding: "test"});
+ db = s.getDB("test");
+ s.ensurePrimaryShard('test', 'shard0001');
+
+ N = 100000;
+
+ S = "";
+ while (S.length < 500)
+ S += "123123312312";
+
+ var bulk = db.foo.initializeUnorderedBulkOp();
+ var bulk2 = db.bar.initializeUnorderedBulkOp();
+ for (i = 0; i < N; i++) {
+ bulk.insert({_id: i, s: S});
+ bulk2.insert({_id: i, s: S, s2: S});
}
+ assert.writeOK(bulk.execute());
+ assert.writeOK(bulk2.execute());
+
s.printShardingStatus();
- loopNumber++;
- if ( loopNumber == 1 ){
- s.adminCommand( { shardcollection : "test.foo" , key : { _id : 1 } } );
- s.adminCommand( { shardcollection : "test.bar" , key : { _id : 1 } } );
+ function mytest(coll, i, loopNumber) {
+ x = coll.find({_id: i}).explain();
+ if (x)
+ return;
+ throw Error("can't find " + i + " in " + coll.getName() + " on loopNumber: " + loopNumber +
+ " explain: " + tojson(x));
}
-
- assert( loopNumber < 1000 , "taking too long" );
- if ( s.chunkDiff( "foo" ) < 12 && s.chunkDiff( "bar" ) < 12 )
- break;
-}
+ loopNumber = 0;
+ while (1) {
+ for (i = 0; i < N; i++) {
+ mytest(db.foo, i, loopNumber);
+ mytest(db.bar, i, loopNumber);
+ if (i % 1000 == 0)
+ print(i);
+ }
+ s.printShardingStatus();
+ loopNumber++;
+
+ if (loopNumber == 1) {
+ s.adminCommand({shardcollection: "test.foo", key: {_id: 1}});
+ s.adminCommand({shardcollection: "test.bar", key: {_id: 1}});
+ }
+
+ assert(loopNumber < 1000, "taking too long");
+
+ if (s.chunkDiff("foo") < 12 && s.chunkDiff("bar") < 12)
+ break;
+ }
-s.stop();
+ s.stop();
})();
diff --git a/jstests/slow2/32bit.js b/jstests/slow2/32bit.js
index f76b04e5c21..d8b2c5ff728 100644
--- a/jstests/slow2/32bit.js
+++ b/jstests/slow2/32bit.js
@@ -7,10 +7,9 @@ var forceSeedToBe = null;
if (forceSeedToBe) {
print("\n32bit.js WARNING FORCING A SPECIFIC SEED");
- print("seed="+ forceSeedToBe);
+ print("seed=" + forceSeedToBe);
Random.srand(forceSeedToBe);
-}
-else {
+} else {
Random.setRandomSeed();
}
@@ -18,7 +17,7 @@ function f() {
'use strict';
var pass = 1;
- var mydb = db.getSisterDB( "test_32bit" );
+ var mydb = db.getSisterDB("test_32bit");
var t = mydb.colltest_32bit;
mydb.dropDatabase();
@@ -30,16 +29,16 @@ function f() {
print("32bit.js PASS #" + pass);
pass++;
- t.insert({x:1});
- t.ensureIndex({a:1});
- t.ensureIndex({b:1}, true);
- t.ensureIndex({x:1});
+ t.insert({x: 1});
+ t.ensureIndex({a: 1});
+ t.ensureIndex({b: 1}, true);
+ t.ensureIndex({x: 1});
if (Random.rand() < 0.3) {
- t.ensureIndex({c:1});
+ t.ensureIndex({c: 1});
}
- t.ensureIndex({d:1});
- t.ensureIndex({e:1});
- t.ensureIndex({f:1});
+ t.ensureIndex({d: 1});
+ t.ensureIndex({e: 1});
+ t.ensureIndex({f: 1});
// create 448 byte string
var big = 'a b';
@@ -70,34 +69,43 @@ function f() {
cc = null;
}
- var res = t.insert({ a: a, b: b, c: cc, d: d, f: f });
+ var res = t.insert({a: a, b: b, c: cc, d: d, f: f});
if (res.hasWriteError()) {
// Presumably we have mmap error on 32 bit. try a few more manipulations
// attempting to break things.
- t.insert({a:33,b:44,c:55,d:66,f:66});
- t.insert({a:33,b:44000,c:55,d:66});
- t.insert({a:33,b:440000,c:55});
- t.insert({a:33,b:4400000});
- t.update({a:20},{'$set':{c:'abc'}});
- t.update({a:21},{'$set':{c:'aadsfbc'}});
- t.update({a:22},{'$set':{c:'c'}});
- t.update({a:23},{'$set':{b:cc}});
- t.remove({a:22});
+ t.insert({a: 33, b: 44, c: 55, d: 66, f: 66});
+ t.insert({a: 33, b: 44000, c: 55, d: 66});
+ t.insert({a: 33, b: 440000, c: 55});
+ t.insert({a: 33, b: 4400000});
+ t.update({a: 20}, {'$set': {c: 'abc'}});
+ t.update({a: 21}, {'$set': {c: 'aadsfbc'}});
+ t.update({a: 22}, {'$set': {c: 'c'}});
+ t.update({a: 23}, {'$set': {b: cc}});
+ t.remove({a: 22});
break;
}
if (Random.rand() < 0.01) {
- t.remove({a:a});
- t.remove({b:Random.rand()});
- t.insert({e:1});
- t.insert({f:'aaaaaaaaaa'});
-
- if (Random.rand() < 0.00001) { print("remove cc"); t.remove({c:cc}); }
- if (Random.rand() < 0.0001) { print("update cc"); t.update({c:cc},{'$set':{c:1}},false,true); }
- if (Random.rand() < 0.00001) { print("remove e"); t.remove({e:1}); }
+ t.remove({a: a});
+ t.remove({b: Random.rand()});
+ t.insert({e: 1});
+ t.insert({f: 'aaaaaaaaaa'});
+
+ if (Random.rand() < 0.00001) {
+ print("remove cc");
+ t.remove({c: cc});
+ }
+ if (Random.rand() < 0.0001) {
+ print("update cc");
+ t.update({c: cc}, {'$set': {c: 1}}, false, true);
+ }
+ if (Random.rand() < 0.00001) {
+ print("remove e");
+ t.remove({e: 1});
+ }
}
if (a == 20000) {
- var delta_ms = (new Date())-start;
+ var delta_ms = (new Date()) - start;
// 2MM / 20000 = 100. 1000ms/sec.
var eta_secs = delta_ms * (100 / 1000);
print("32bit.js eta_secs:" + eta_secs);
@@ -124,7 +132,7 @@ function f() {
print("32bit.js FAIL validating");
print(res.result);
printjson(res);
- //mydb.dropDatabase();
+ // mydb.dropDatabase();
throw Error("fail validating 32bit.js");
}
diff --git a/jstests/slow2/conc_update.js b/jstests/slow2/conc_update.js
index 5cde7489090..b7b8b836831 100644
--- a/jstests/slow2/conc_update.js
+++ b/jstests/slow2/conc_update.js
@@ -1,52 +1,57 @@
-load( "jstests/libs/slow_weekly_util.js" );
-test = new SlowWeeklyMongod( "conc_update" );
+load("jstests/libs/slow_weekly_util.js");
+test = new SlowWeeklyMongod("conc_update");
db = test.getDB("concurrency");
db.dropDatabase();
-NRECORDS=3*1024*1024;
+NRECORDS = 3 * 1024 * 1024;
-print("loading "+NRECORDS+" documents (progress msg every 1024*1024 documents)");
+print("loading " + NRECORDS + " documents (progress msg every 1024*1024 documents)");
var bulk = db.conc.initializeUnorderedBulkOp();
for (var i = 0; i < NRECORDS; i++) {
- bulk.insert({ x: i });
+ bulk.insert({x: i});
}
assert.writeOK(bulk.execute());
print("making an index (this will take a while)");
-db.conc.ensureIndex({x:1});
+db.conc.ensureIndex({x: 1});
-var c1=db.conc.count({x:{$lt:NRECORDS}});
+var c1 = db.conc.count({x: {$lt: NRECORDS}});
-updater = startParallelShell("db = db.getSisterDB('concurrency');\
+updater = startParallelShell(
+ "db = db.getSisterDB('concurrency');\
db.concflag.insert({ inprog: true });\
sleep(20);\
assert.writeOK(db.conc.update({}, \
- { $inc: { x: " + NRECORDS + "}}, false, true)); \
+ { $inc: { x: " +
+ NRECORDS +
+ "}}, false, true)); \
assert.writeOK(db.concflag.update({}, { inprog: false }));");
-assert.soon( function(){ var x = db.concflag.findOne(); return x && x.inprog; } ,
- "wait for fork" , 30000 , 1 );
+assert.soon(function() {
+ var x = db.concflag.findOne();
+ return x && x.inprog;
+}, "wait for fork", 30000, 1);
-querycount=0;
-decrements=0;
-misses=0;
+querycount = 0;
+decrements = 0;
+misses = 0;
-assert.soon(function(){
- c2=db.conc.count({x:{$lt:NRECORDS}});
+assert.soon(function() {
+ c2 = db.conc.count({x: {$lt: NRECORDS}});
print(c2);
querycount++;
- if (c2<c1)
+ if (c2 < c1)
decrements++;
else
misses++;
c1 = c2;
- return ! db.concflag.findOne().inprog;
-}, "update never finished" , 2 * 60 * 60 * 1000 , 10 );
+ return !db.concflag.findOne().inprog;
+}, "update never finished", 2 * 60 * 60 * 1000, 10);
print(querycount + " queries, " + decrements + " decrements, " + misses + " misses");
-assert.eq( NRECORDS , db.conc.count() , "AT END 1" );
+assert.eq(NRECORDS, db.conc.count(), "AT END 1");
-updater(); // wait()
+updater(); // wait()
test.stop();
diff --git a/jstests/slow2/cursor_timeout.js b/jstests/slow2/cursor_timeout.js
index ed70471d918..f74521b9bc9 100644
--- a/jstests/slow2/cursor_timeout.js
+++ b/jstests/slow2/cursor_timeout.js
@@ -9,50 +9,58 @@
// After a period of inactivity, the test asserts that cursors #1 and #2 are still alive, and that
// #3 and #4 have been killed.
-var st =
- new ShardingTest( { shards: 2,
- other: { chunkSize: 1,
- shardOptions: { setParameter: "cursorTimeoutMillis=1000" },
- mongosOptions: { setParameter: "cursorTimeoutMillis=1000" } } } );
+var st = new ShardingTest({
+ shards: 2,
+ other: {
+ chunkSize: 1,
+ shardOptions: {setParameter: "cursorTimeoutMillis=1000"},
+ mongosOptions: {setParameter: "cursorTimeoutMillis=1000"}
+ }
+});
st.stopBalancer();
var adminDB = st.admin;
var configDB = st.config;
-var coll = st.s.getDB( 'test' ).user;
+var coll = st.s.getDB('test').user;
-adminDB.runCommand({ enableSharding: coll.getDB().getName() });
+adminDB.runCommand({enableSharding: coll.getDB().getName()});
st.ensurePrimaryShard(coll.getDB().getName(), 'shard0001');
-adminDB.runCommand({ shardCollection: coll.getFullName(), key: { x: 1 }});
+adminDB.runCommand({shardCollection: coll.getFullName(), key: {x: 1}});
var data = 'c';
-for( var x = 0; x < 18; x++ ){
+for (var x = 0; x < 18; x++) {
data += data;
}
-for( x = 0; x < 200; x++ ){
- coll.insert({ x: x, v: data });
+for (x = 0; x < 200; x++) {
+ coll.insert({x: x, v: data});
}
var chunkDoc = configDB.chunks.findOne();
var chunkOwner = chunkDoc.shard;
-var toShard = configDB.shards.findOne({ _id: { $ne: chunkOwner }})._id;
-var cmd = { moveChunk: coll.getFullName(), find: chunkDoc.min, to: toShard, _waitForDelete: true };
-var res = adminDB.runCommand( cmd );
+var toShard = configDB.shards.findOne({_id: {$ne: chunkOwner}})._id;
+var cmd = {
+ moveChunk: coll.getFullName(),
+ find: chunkDoc.min,
+ to: toShard,
+ _waitForDelete: true
+};
+var res = adminDB.runCommand(cmd);
-jsTest.log( 'move result: ' + tojson( res ));
+jsTest.log('move result: ' + tojson(res));
var shardedCursorWithTimeout = coll.find();
var shardedCursorWithNoTimeout = coll.find();
-shardedCursorWithNoTimeout.addOption( DBQuery.Option.noTimeout );
+shardedCursorWithNoTimeout.addOption(DBQuery.Option.noTimeout);
// Query directly to mongod
-var shardHost = configDB.shards.findOne({ _id: chunkOwner }).host;
-var mongod = new Mongo( shardHost );
-var shardColl = mongod.getCollection( coll.getFullName() );
+var shardHost = configDB.shards.findOne({_id: chunkOwner}).host;
+var mongod = new Mongo(shardHost);
+var shardColl = mongod.getCollection(coll.getFullName());
var cursorWithTimeout = shardColl.find();
var cursorWithNoTimeout = shardColl.find();
-cursorWithNoTimeout.addOption( DBQuery.Option.noTimeout );
+cursorWithNoTimeout.addOption(DBQuery.Option.noTimeout);
shardedCursorWithTimeout.next();
shardedCursorWithNoTimeout.next();
@@ -63,15 +71,18 @@ cursorWithNoTimeout.next();
// Wait until the idle cursor background job has killed the cursors that do not have the "no
// timeout" flag set. We use the "cursorTimeoutMillis" setParameter above to reduce the amount of
// time we need to wait here.
-sleep( 5000 );
+sleep(5000);
-assert.throws( function(){ shardedCursorWithTimeout.itcount(); } );
-assert.throws( function(){ cursorWithTimeout.itcount(); } );
+assert.throws(function() {
+ shardedCursorWithTimeout.itcount();
+});
+assert.throws(function() {
+ cursorWithTimeout.itcount();
+});
// +1 because we already advanced once
-assert.eq( coll.count(), shardedCursorWithNoTimeout.itcount() + 1 );
+assert.eq(coll.count(), shardedCursorWithNoTimeout.itcount() + 1);
-assert.eq( shardColl.count(), cursorWithNoTimeout.itcount() + 1 );
+assert.eq(shardColl.count(), cursorWithNoTimeout.itcount() + 1);
st.stop();
-
diff --git a/jstests/slow2/mr_during_migrate.js b/jstests/slow2/mr_during_migrate.js
index 66962ff0e0f..cb439aeb241 100644
--- a/jstests/slow2/mr_during_migrate.js
+++ b/jstests/slow2/mr_during_migrate.js
@@ -1,52 +1,55 @@
// Do parallel ops with migrates occurring
-var st = new ShardingTest({ shards : 10, mongos : 2, verbose : 2 });
+var st = new ShardingTest({shards: 10, mongos: 2, verbose: 2});
-jsTest.log( "Doing parallel operations..." );
+jsTest.log("Doing parallel operations...");
-//Stop balancer, since it'll just get in the way of these
+// Stop balancer, since it'll just get in the way of these
st.stopBalancer();
var mongos = st.s0;
var admin = mongos.getDB("admin");
-var coll = st.s.getCollection( jsTest.name() + ".coll" );
+var coll = st.s.getCollection(jsTest.name() + ".coll");
var numDocs = 1024 * 1024;
-var dataSize = 1024; // bytes, must be power of 2
+var dataSize = 1024; // bytes, must be power of 2
var data = "x";
-while( data.length < dataSize ) data += data;
+while (data.length < dataSize)
+ data += data;
var bulk = coll.initializeUnorderedBulkOp();
-for( var i = 0; i < numDocs; i++ ){
- bulk.insert({ _id: i, data: data });
+for (var i = 0; i < numDocs; i++) {
+ bulk.insert({_id: i, data: data});
}
assert.writeOK(bulk.execute());
// Make sure everything got inserted
-assert.eq( numDocs, coll.find().itcount() );
+assert.eq(numDocs, coll.find().itcount());
-
-jsTest.log( "Inserted " + sh._dataFormat( dataSize * numDocs ) + " of data." );
+jsTest.log("Inserted " + sh._dataFormat(dataSize * numDocs) + " of data.");
// Shard collection
-st.shardColl( coll, { _id : 1 }, false );
+st.shardColl(coll, {_id: 1}, false);
st.printShardingStatus();
-jsTest.log( "Sharded collection now initialized, starting migrations..." );
+jsTest.log("Sharded collection now initialized, starting migrations...");
-var checkMigrate = function(){ print( "Result of migrate : " ); printjson( this ); };
+var checkMigrate = function() {
+ print("Result of migrate : ");
+ printjson(this);
+};
// Creates a number of migrations of random chunks to diff shard servers
var ops = [];
-for(var i = 0; i < st._connections.length; i++) {
+for (var i = 0; i < st._connections.length; i++) {
ops.push({
op: "command",
ns: "admin",
command: {
moveChunk: "" + coll,
- find: { _id: { "#RAND_INT" : [ 0, numDocs ] }},
+ find: {_id: {"#RAND_INT": [0, numDocs]}},
to: st._connections[i].shardName,
_waitForDelete: true
},
@@ -56,55 +59,55 @@ for(var i = 0; i < st._connections.length; i++) {
// TODO: Also migrate output collection
-jsTest.log( "Starting migrations now..." );
+jsTest.log("Starting migrations now...");
-var bid = benchStart({ ops : ops,
- host : st.s.host,
- parallel : 1,
- handleErrors : false });
+var bid = benchStart({ops: ops, host: st.s.host, parallel: 1, handleErrors: false});
//#######################
// Tests during migration
var numTests = 5;
-for( var t = 0; t < numTests; t++ ){
-
- jsTest.log( "Test #" + t );
-
- var mongos = st.s1; // use other mongos so we get stale shard versions
- var coll = mongos.getCollection( coll + "" );
- var outputColl = mongos.getCollection( coll + "_output" );
-
+for (var t = 0; t < numTests; t++) {
+ jsTest.log("Test #" + t);
+
+ var mongos = st.s1; // use other mongos so we get stale shard versions
+ var coll = mongos.getCollection(coll + "");
+ var outputColl = mongos.getCollection(coll + "_output");
+
var numTypes = 32;
- var map = function(){ emit( this._id % 32 /* must be hardcoded */, { c : 1 } ); };
- var reduce = function( k, vals ){
+ var map = function() {
+ emit(this._id % 32 /* must be hardcoded */, {c: 1});
+ };
+ var reduce = function(k, vals) {
var total = 0;
- for( var i = 0; i < vals.length; i++ ) total += vals[i].c;
- return { c : total };
+ for (var i = 0; i < vals.length; i++)
+ total += vals[i].c;
+ return {
+ c: total
+ };
};
-
- printjson( coll.find({ _id : 0 }).itcount() );
-
- jsTest.log( "Starting new mapReduce run #" + t );
-
- //assert.eq( coll.find().itcount(), numDocs )
-
- coll.getMongo().getDB("admin").runCommand({ setParameter : 1, traceExceptions : true });
-
- printjson( coll.mapReduce( map, reduce, { out : { replace : outputColl.getName(), db : outputColl.getDB() + "" } }) );
-
- jsTest.log( "MapReduce run #" + t + " finished." );
-
- assert.eq( outputColl.find().itcount(), numTypes );
-
- outputColl.find().forEach( function( x ){
- assert.eq( x.value.c, numDocs / numTypes );
+
+ printjson(coll.find({_id: 0}).itcount());
+
+ jsTest.log("Starting new mapReduce run #" + t);
+
+ // assert.eq( coll.find().itcount(), numDocs )
+
+ coll.getMongo().getDB("admin").runCommand({setParameter: 1, traceExceptions: true});
+
+ printjson(coll.mapReduce(
+ map, reduce, {out: {replace: outputColl.getName(), db: outputColl.getDB() + ""}}));
+
+ jsTest.log("MapReduce run #" + t + " finished.");
+
+ assert.eq(outputColl.find().itcount(), numTypes);
+
+ outputColl.find().forEach(function(x) {
+ assert.eq(x.value.c, numDocs / numTypes);
});
-
}
-
-printjson( benchFinish( bid ) );
+printjson(benchFinish(bid));
st.stop();
diff --git a/jstests/slow2/remove_during_mr.js b/jstests/slow2/remove_during_mr.js
index 16374adeb24..9b632a11a56 100644
--- a/jstests/slow2/remove_during_mr.js
+++ b/jstests/slow2/remove_during_mr.js
@@ -5,13 +5,12 @@ load('jstests/libs/parallelTester.js');
function client1() {
Random.setRandomSeed();
- for(var i = 0; i < 1000; i++) {
+ for (var i = 0; i < 1000; i++) {
db.remove_during_mr.remove({rand: {$gte: Random.rand()}}, {justOne: true});
}
}
function client2() {
-
function mapper() {
emit(this.key, 1);
}
@@ -20,7 +19,7 @@ function client2() {
return {};
}
- for(var i = 0; i < 1000; i++) {
+ for (var i = 0; i < 1000; i++) {
var options = {
out: {replace: 'bar'},
sort: {_id: -1}
diff --git a/jstests/slow2/replsets_killop.js b/jstests/slow2/replsets_killop.js
index 603e1f9c63e..ea1cd560b91 100644
--- a/jstests/slow2/replsets_killop.js
+++ b/jstests/slow2/replsets_killop.js
@@ -4,66 +4,67 @@
numDocs = 1e5;
// Set up a replica set.
-replTest = new ReplSetTest( { name:'test', nodes:3 } );
+replTest = new ReplSetTest({name: 'test', nodes: 3});
nodes = replTest.startSet();
replTest.initiate();
primary = replTest.getPrimary();
secondary = replTest.getSecondary();
-db = primary.getDB( 'test' );
-db.test.save( { a:0 } );
+db = primary.getDB('test');
+db.test.save({a: 0});
replTest.awaitReplication();
-assert.soon( function() { return secondary.getDB( 'test' ).test.count() == 1; } );
+assert.soon(function() {
+ return secondary.getDB('test').test.count() == 1;
+});
// Start a parallel shell to insert new documents on the primary.
-inserter = startParallelShell(
- 'var bulk = db.test.initializeUnorderedBulkOp(); \
- for( i = 1; i < ' + numDocs + '; ++i ) { \
+inserter = startParallelShell('var bulk = db.test.initializeUnorderedBulkOp(); \
+ for( i = 1; i < ' + numDocs +
+ '; ++i ) { \
bulk.insert({ a: i }); \
} \
- bulk.execute();'
-);
+ bulk.execute();');
// Periodically kill replication get mores.
-for( i = 0; i < 1e3; ++i ) {
+for (i = 0; i < 1e3; ++i) {
allOps = db.currentOp();
- for( j in allOps.inprog ) {
- op = allOps.inprog[ j ];
- if ( op.ns == 'local.oplog.rs' && op.op == 'getmore' ) {
- db.killOp( op.opid );
+ for (j in allOps.inprog) {
+ op = allOps.inprog[j];
+ if (op.ns == 'local.oplog.rs' && op.op == 'getmore') {
+ db.killOp(op.opid);
}
}
- sleep( 100 );
+ sleep(100);
}
// Wait for the inserter to finish.
inserter();
-assert.eq( numDocs, db.test.count() );
+assert.eq(numDocs, db.test.count());
// Return true when the correct number of documents are present on the secondary. Otherwise print
// which documents are missing and return false.
function allReplicated() {
- count = secondary.getDB( 'test' ).test.count();
- if ( count == numDocs ) {
+ count = secondary.getDB('test').test.count();
+ if (count == numDocs) {
// Return true if the count is as expected.
return true;
}
-
+
// Identify and print the missing a-values.
foundSet = {};
- c = secondary.getDB( 'test' ).test.find();
- while( c.hasNext() ) {
- foundSet[ '' + c.next().a ] = true;
+ c = secondary.getDB('test').test.find();
+ while (c.hasNext()) {
+ foundSet['' + c.next().a] = true;
}
missing = [];
- for( i = 0; i < numDocs; ++i ) {
- if ( !( ( '' + i ) in foundSet ) ) {
- missing.push( i );
+ for (i = 0; i < numDocs; ++i) {
+ if (!(('' + i) in foundSet)) {
+ missing.push(i);
}
}
- print( 'count: ' + count + ' missing: ' + missing );
+ print('count: ' + count + ' missing: ' + missing);
return false;
}
// Wait for the correct number of (replicated) documents to be present on the secondary.
-assert.soon( allReplicated, "didn't replicate all docs", 5 * 60 * 1000 );
+assert.soon(allReplicated, "didn't replicate all docs", 5 * 60 * 1000);
diff --git a/jstests/ssl/disable_x509.js b/jstests/ssl/disable_x509.js
index 7aaf6ff1317..5663f6a6196 100644
--- a/jstests/ssl/disable_x509.js
+++ b/jstests/ssl/disable_x509.js
@@ -2,37 +2,45 @@
var CLIENT_USER = "C=US,ST=New York,L=New York City,O=MongoDB,OU=KernelUser,CN=client";
-var conn = MongoRunner.runMongod({smallfiles: "",
- auth: "",
- sslMode: "requireSSL",
- sslPEMKeyFile: "jstests/libs/server.pem",
- sslCAFile: "jstests/libs/ca.pem"});
+var conn = MongoRunner.runMongod({
+ smallfiles: "",
+ auth: "",
+ sslMode: "requireSSL",
+ sslPEMKeyFile: "jstests/libs/server.pem",
+ sslCAFile: "jstests/libs/ca.pem"
+});
// Find out if this build supports the authenticationMechanisms startup parameter.
// If it does, restart with and without the MONGODB-X509 mechanisms enabled.
var cmdOut = conn.getDB('admin').runCommand({getParameter: 1, authenticationMechanisms: 1});
if (cmdOut.ok) {
MongoRunner.stopMongod(conn);
- conn = MongoRunner.runMongod({ restart: conn,
- setParameter: "authenticationMechanisms=MONGODB-X509" });
+ conn = MongoRunner.runMongod(
+ {restart: conn, setParameter: "authenticationMechanisms=MONGODB-X509"});
external = conn.getDB("$external");
// Add user using localhost exception
- external.createUser({user: CLIENT_USER, roles:[
- {'role':'userAdminAnyDatabase', 'db':'admin'},
- {'role':'readWriteAnyDatabase', 'db':'admin'}]});
+ external.createUser({
+ user: CLIENT_USER,
+ roles: [
+ {'role': 'userAdminAnyDatabase', 'db': 'admin'},
+ {'role': 'readWriteAnyDatabase', 'db': 'admin'}
+ ]
+ });
// Localhost exception should not be in place anymore
- assert.throws( function() { test.foo.findOne();}, {}, "read without login" );
+ assert.throws(function() {
+ test.foo.findOne();
+ }, {}, "read without login");
- assert( external.auth({user: CLIENT_USER, mechanism: 'MONGODB-X509'}),
- "authentication with valid user failed" );
+ assert(external.auth({user: CLIENT_USER, mechanism: 'MONGODB-X509'}),
+ "authentication with valid user failed");
MongoRunner.stopMongod(conn);
- conn = MongoRunner.runMongod({ restart: conn,
- setParameter: "authenticationMechanisms=SCRAM-SHA-1" });
+ conn = MongoRunner.runMongod(
+ {restart: conn, setParameter: "authenticationMechanisms=SCRAM-SHA-1"});
external = conn.getDB("$external");
- assert( !external.auth({user: CLIENT_USER, mechanism: 'MONGODB-X509'}),
- "authentication with disabled auth mechanism succeeded" );
+ assert(!external.auth({user: CLIENT_USER, mechanism: 'MONGODB-X509'}),
+ "authentication with disabled auth mechanism succeeded");
}
diff --git a/jstests/ssl/initial_sync1_x509.js b/jstests/ssl/initial_sync1_x509.js
index 84f59d29556..85198604b57 100644
--- a/jstests/ssl/initial_sync1_x509.js
+++ b/jstests/ssl/initial_sync1_x509.js
@@ -1,17 +1,19 @@
// Basic tests for cluster authentication using x509.
-var common_options = {keyFile : "jstests/libs/key1",
- sslMode : "requireSSL",
- sslPEMKeyFile: "jstests/libs/server.pem",
- sslCAFile: "jstests/libs/ca.pem",
- sslAllowInvalidHostnames: ""};
+var common_options = {
+ keyFile: "jstests/libs/key1",
+ sslMode: "requireSSL",
+ sslPEMKeyFile: "jstests/libs/server.pem",
+ sslCAFile: "jstests/libs/ca.pem",
+ sslAllowInvalidHostnames: ""
+};
function runInitialSyncTest() {
load("jstests/replsets/rslib.js");
print("1. Bring up set");
- var replTest = new ReplSetTest({name: "jstests_initsync1_x509",
- nodes : {node0 : x509_options1, node1 : x509_options2}});
+ var replTest = new ReplSetTest(
+ {name: "jstests_initsync1_x509", nodes: {node0: x509_options1, node1: x509_options2}});
var conns = replTest.startSet();
replTest.initiate();
@@ -24,17 +26,17 @@ function runInitialSyncTest() {
var admin_s1 = slave1.getDB("admin");
print("2. Create a root user.");
- admin.createUser({ user: "root", pwd: "pass", roles: ["root"]});
+ admin.createUser({user: "root", pwd: "pass", roles: ["root"]});
admin.auth("root", "pass");
admin_s1.auth("root", "pass");
print("3. Insert some data");
var bulk = foo.bar.initializeUnorderedBulkOp();
for (var i = 0; i < 100; i++) {
- bulk.insert({ date: new Date(), x: i, str: "all the talk on the market" });
+ bulk.insert({date: new Date(), x: i, str: "all the talk on the market"});
}
assert.writeOK(bulk.execute());
- print("total in foo: "+foo.bar.count());
+ print("total in foo: " + foo.bar.count());
print("4. Make sure synced");
replTest.awaitReplication();
@@ -43,7 +45,7 @@ function runInitialSyncTest() {
master = replTest.getPrimary();
bulk = foo.bar.initializeUnorderedBulkOp();
for (var i = 0; i < 100; i++) {
- bulk.insert({ date: new Date(), x: i, str: "all the talk on the market" });
+ bulk.insert({date: new Date(), x: i, str: "all the talk on the market"});
}
assert.writeOK(bulk.execute());
@@ -54,21 +56,19 @@ function runInitialSyncTest() {
}
// Standard case, clusterAuthMode: x509
-var x509_options1 = Object.merge(common_options,
- {sslClusterFile: "jstests/libs/cluster_cert.pem",
- clusterAuthMode: "x509"});
+var x509_options1 = Object.merge(
+ common_options, {sslClusterFile: "jstests/libs/cluster_cert.pem", clusterAuthMode: "x509"});
var x509_options2 = x509_options1;
runInitialSyncTest();
-// Mixed clusterAuthMode: sendX509 and sendKeyFile and try adding --auth
-x509_options1 = Object.merge(common_options,
- {sslClusterFile: "jstests/libs/cluster_cert.pem",
- clusterAuthMode: "sendX509",
- auth: ""});
+// Mixed clusterAuthMode: sendX509 and sendKeyFile and try adding --auth
+x509_options1 = Object.merge(
+ common_options,
+ {sslClusterFile: "jstests/libs/cluster_cert.pem", clusterAuthMode: "sendX509", auth: ""});
x509_options2 = Object.merge(common_options, {clusterAuthMode: "sendKeyFile"});
runInitialSyncTest();
-// Mixed clusterAuthMode: x509 and sendX509, use the PEMKeyFile for outgoing connections
+// Mixed clusterAuthMode: x509 and sendX509, use the PEMKeyFile for outgoing connections
x509_options1 = Object.merge(common_options, {clusterAuthMode: "x509"});
x509_options2 = Object.merge(common_options, {clusterAuthMode: "sendX509"});
runInitialSyncTest();
@@ -76,8 +76,9 @@ runInitialSyncTest();
// verify that replset initiate fails if using a self-signed cert
x509_options1 = Object.merge(common_options, {clusterAuthMode: "x509"});
x509_options2 = Object.merge(common_options,
- {sslClusterFile: "jstests/libs/smoke.pem",
- clusterAuthMode: "x509"});
-var replTest = new ReplSetTest({nodes : {node0 : x509_options1, node1 : x509_options2}});
+ {sslClusterFile: "jstests/libs/smoke.pem", clusterAuthMode: "x509"});
+var replTest = new ReplSetTest({nodes: {node0: x509_options1, node1: x509_options2}});
var conns = replTest.startSet();
-assert.throws( function() { replTest.initiate(); } );
+assert.throws(function() {
+ replTest.initiate();
+});
diff --git a/jstests/ssl/libs/ssl_helpers.js b/jstests/ssl/libs/ssl_helpers.js
index 2b7d7afc5b6..5fab2f1f030 100644
--- a/jstests/ssl/libs/ssl_helpers.js
+++ b/jstests/ssl/libs/ssl_helpers.js
@@ -9,19 +9,27 @@ var CLIENT_CERT = "jstests/libs/client.pem";
// Note: "sslAllowInvalidCertificates" is enabled to avoid
// hostname conflicts with our testing certificates
-var disabled = {sslMode: "disabled"};
-var allowSSL = {sslMode : "allowSSL",
+var disabled = {
+ sslMode: "disabled"
+};
+var allowSSL = {
+ sslMode: "allowSSL",
sslAllowInvalidCertificates: "",
- sslPEMKeyFile : SERVER_CERT,
- sslCAFile: CA_CERT};
-var preferSSL = {sslMode : "preferSSL",
+ sslPEMKeyFile: SERVER_CERT,
+ sslCAFile: CA_CERT
+};
+var preferSSL = {
+ sslMode: "preferSSL",
sslAllowInvalidCertificates: "",
- sslPEMKeyFile : SERVER_CERT,
- sslCAFile: CA_CERT};
-var requireSSL = {sslMode : "requireSSL",
+ sslPEMKeyFile: SERVER_CERT,
+ sslCAFile: CA_CERT
+};
+var requireSSL = {
+ sslMode: "requireSSL",
sslAllowInvalidCertificates: "",
- sslPEMKeyFile : SERVER_CERT,
- sslCAFile: CA_CERT};
+ sslPEMKeyFile: SERVER_CERT,
+ sslCAFile: CA_CERT
+};
// Test if ssl replset configs work
@@ -41,8 +49,7 @@ var replShouldFail = function(name, opt1, opt2) {
ssl_options2 = opt2;
ssl_name = name;
replTest = null;
- assert.throws(load,[replSetTestFile],
- "This setup should have failed");
+ assert.throws(load, [replSetTestFile], "This setup should have failed");
// clean up to continue running...
if (replTest) {
replTest.stopSet(15);
@@ -55,11 +62,8 @@ var replShouldFail = function(name, opt1, opt2) {
*/
function mixedShardTest(options1, options2, shouldSucceed) {
try {
- var st = new ShardingTest({
- mongos : [options1],
- config : [options1],
- shards : [options1, options2]
- });
+ var st = new ShardingTest(
+ {mongos: [options1], config: [options1], shards: [options1, options2]});
st.stopBalancer();
// Test mongos talking to config servers
@@ -67,39 +71,38 @@ function mixedShardTest(options1, options2, shouldSucceed) {
assert.eq(r, true, "error enabling sharding for this configuration");
st.ensurePrimaryShard("test", "shard0000");
- r = st.adminCommand({ movePrimary: 'test', to: 'shard0001' });
+ r = st.adminCommand({movePrimary: 'test', to: 'shard0001'});
var db1 = st.getDB("test");
- r = st.adminCommand({ shardCollection : "test.col" , key : { _id : 1 } });
+ r = st.adminCommand({shardCollection: "test.col", key: {_id: 1}});
assert.eq(r, true, "error sharding collection for this configuration");
// Test mongos talking to shards
- var bigstr = Array(1024*1024).join("#");
+ var bigstr = Array(1024 * 1024).join("#");
var bulk = db1.col.initializeUnorderedBulkOp();
- for(var i = 0; i < 128; i++){
- bulk.insert({ _id: i, string: bigstr });
+ for (var i = 0; i < 128; i++) {
+ bulk.insert({_id: i, string: bigstr});
}
assert.writeOK(bulk.execute());
assert.eq(128, db1.col.count(), "error retrieving documents from cluster");
// Test shards talking to each other
- r = st.getDB('test').adminCommand({ moveChunk: 'test.col',
- find: { _id: 0 }, to: 'shard0000' });
+ r = st.getDB('test').adminCommand({moveChunk: 'test.col', find: {_id: 0}, to: 'shard0000'});
assert(r.ok, "error moving chunks: " + tojson(r));
db1.col.remove({});
- } catch(e) {
- if (shouldSucceed) throw e;
- //silence error if we should fail...
+ } catch (e) {
+ if (shouldSucceed)
+ throw e;
+ // silence error if we should fail...
print("IMPORTANT! => Test failed when it should have failed...continuing...");
} finally {
// This has to be done in order for failure
// to not prevent future tests from running...
- if(st) {
+ if (st) {
st.stop();
}
}
}
-
diff --git a/jstests/ssl/mixed_mode_sharded.js b/jstests/ssl/mixed_mode_sharded.js
index 8abaf77dabf..cdebc285a9a 100644
--- a/jstests/ssl/mixed_mode_sharded.js
+++ b/jstests/ssl/mixed_mode_sharded.js
@@ -16,4 +16,4 @@ mixedShardTest(preferSSL, allowSSL, true);
mixedShardTest(allowSSL, preferSSL, true);
print("=== Testing allowSSL/requireSSL cluster - SHOULD FAIL ===");
-mixedShardTest(requireSSL, allowSSL, false);
+mixedShardTest(requireSSL, allowSSL, false);
diff --git a/jstests/ssl/set_parameter_ssl.js b/jstests/ssl/set_parameter_ssl.js
index 92486f663d9..407ed8b0834 100644
--- a/jstests/ssl/set_parameter_ssl.js
+++ b/jstests/ssl/set_parameter_ssl.js
@@ -1,20 +1,16 @@
// Test changing the --sslMode and --clusterAuthMode parameters using setParameter
var SERVER_CERT = "jstests/libs/server.pem";
-var CA_CERT = "jstests/libs/ca.pem";
+var CA_CERT = "jstests/libs/ca.pem";
function testSSLTransition(oldMode, newMode, shouldSucceed) {
- var conn = MongoRunner.runMongod({
- sslMode: oldMode,
- sslPEMKeyFile: SERVER_CERT,
- sslCAFile: CA_CERT
- });
+ var conn =
+ MongoRunner.runMongod({sslMode: oldMode, sslPEMKeyFile: SERVER_CERT, sslCAFile: CA_CERT});
var adminDB = conn.getDB("admin");
adminDB.createUser({user: "root", pwd: "pwd", roles: ['root']});
adminDB.auth("root", "pwd");
- var res = adminDB.runCommand({ "setParameter" : 1,
- "sslMode" : newMode });
+ var res = adminDB.runCommand({"setParameter": 1, "sslMode": newMode});
assert(res["ok"] == shouldSucceed, tojson(res));
MongoRunner.stopMongod(conn.port);
@@ -31,8 +27,7 @@ function testAuthModeTransition(oldMode, newMode, sslMode, shouldSucceed) {
var adminDB = conn.getDB("admin");
adminDB.createUser({user: "root", pwd: "pwd", roles: ['root']});
adminDB.auth("root", "pwd");
- var res = adminDB.runCommand({ "setParameter" : 1,
- "clusterAuthMode" : newMode });
+ var res = adminDB.runCommand({"setParameter": 1, "clusterAuthMode": newMode});
assert(res["ok"] == shouldSucceed, tojson(res));
MongoRunner.stopMongod(conn.port);
diff --git a/jstests/ssl/sharding_with_x509.js b/jstests/ssl/sharding_with_x509.js
index 8a7e747c6ad..6e497ab290e 100644
--- a/jstests/ssl/sharding_with_x509.js
+++ b/jstests/ssl/sharding_with_x509.js
@@ -1,69 +1,73 @@
-// Tests basic sharding with x509 cluster auth
+// Tests basic sharding with x509 cluster auth
// The purpose is to verify the connectivity between mongos and the shards
-var x509_options = {sslMode : "requireSSL",
- sslPEMKeyFile : "jstests/libs/server.pem",
- sslCAFile: "jstests/libs/ca.pem",
- sslClusterFile: "jstests/libs/cluster_cert.pem",
- sslAllowInvalidHostnames: "",
- clusterAuthMode: "x509"};
+var x509_options = {
+ sslMode: "requireSSL",
+ sslPEMKeyFile: "jstests/libs/server.pem",
+ sslCAFile: "jstests/libs/ca.pem",
+ sslClusterFile: "jstests/libs/cluster_cert.pem",
+ sslAllowInvalidHostnames: "",
+ clusterAuthMode: "x509"
+};
// Start ShardingTest with enableBalancer because ShardingTest attempts to turn
// off the balancer otherwise, which it will not be authorized to do. Once SERVER-14017
// is fixed the "enableBalancer" line could be removed.
-var st = new ShardingTest({ name : "sharding_with_x509" ,
- shards : 2,
- mongos : 1,
- other: {
- enableBalancer: true,
- configOptions : x509_options,
- mongosOptions : x509_options,
- rsOptions : x509_options,
- shardOptions : x509_options
- }});
+var st = new ShardingTest({
+ name: "sharding_with_x509",
+ shards: 2,
+ mongos: 1,
+ other: {
+ enableBalancer: true,
+ configOptions: x509_options,
+ mongosOptions: x509_options,
+ rsOptions: x509_options,
+ shardOptions: x509_options
+ }
+});
st.s.getDB('admin').createUser({user: 'admin', pwd: 'pwd', roles: ['root']});
st.s.getDB('admin').auth('admin', 'pwd');
-var coll = st.s.getCollection( "test.foo" );
+var coll = st.s.getCollection("test.foo");
-st.shardColl( coll, { _id : 1 }, false );
+st.shardColl(coll, {_id: 1}, false);
// Create an index so we can find by num later
-coll.ensureIndex({ insert : 1 });
+coll.ensureIndex({insert: 1});
-print( "starting insertion phase" );
+print("starting insertion phase");
// Insert a bunch of data
var toInsert = 2000;
var bulk = coll.initializeUnorderedBulkOp();
-for( var i = 0; i < toInsert; i++ ){
- bulk.insert({ my: "test", data: "to", insert: i });
+for (var i = 0; i < toInsert; i++) {
+ bulk.insert({my: "test", data: "to", insert: i});
}
assert.writeOK(bulk.execute());
-print( "starting updating phase" );
+print("starting updating phase");
// Update a bunch of data
var toUpdate = toInsert;
bulk = coll.initializeUnorderedBulkOp();
-for( var i = 0; i < toUpdate; i++ ){
- var id = coll.findOne({ insert : i })._id;
- bulk.find({ insert : i, _id : id }).update({ $inc : { counter : 1 } });
+for (var i = 0; i < toUpdate; i++) {
+ var id = coll.findOne({insert: i})._id;
+ bulk.find({insert: i, _id: id}).update({$inc: {counter: 1}});
}
assert.writeOK(bulk.execute());
-print( "starting deletion" );
+print("starting deletion");
// Remove a bunch of data
var toDelete = toInsert / 2;
bulk = coll.initializeUnorderedBulkOp();
-for( var i = 0; i < toDelete; i++ ){
- bulk.find({ insert : i }).remove();
+for (var i = 0; i < toDelete; i++) {
+ bulk.find({insert: i}).remove();
}
assert.writeOK(bulk.execute());
// Make sure the right amount of data is there
-assert.eq( coll.find().count(), toInsert / 2 );
+assert.eq(coll.find().count(), toInsert / 2);
// Finish
st.stop();
diff --git a/jstests/ssl/ssl_cert_password.js b/jstests/ssl/ssl_cert_password.js
index b50fec9101c..772038f9970 100644
--- a/jstests/ssl/ssl_cert_password.js
+++ b/jstests/ssl/ssl_cert_password.js
@@ -11,20 +11,26 @@ resetDbpath(dbpath);
mkdir(external_scratch_dir);
// Password is correct
-var md = MongoRunner.runMongod({nopreallocj: "",
- dbpath: dbpath,
- sslMode: "requireSSL",
- sslPEMKeyFile: "jstests/libs/password_protected.pem",
- sslPEMKeyPassword: "qwerty"});
+var md = MongoRunner.runMongod({
+ nopreallocj: "",
+ dbpath: dbpath,
+ sslMode: "requireSSL",
+ sslPEMKeyFile: "jstests/libs/password_protected.pem",
+ sslPEMKeyPassword: "qwerty"
+});
// MongoRunner.runMongod connects a Mongo shell, so if we get here, the test is successful.
-
// Password incorrect; error logged is:
// error:06065064:digital envelope routines:EVP_DecryptFinal_ex:bad decrypt
-var exit_code = runMongoProgram("mongo", "--port", md.port,
- "--ssl", "--sslAllowInvalidCertificates",
- "--sslPEMKeyFile", "jstests/libs/password_protected.pem",
- "--sslPEMKeyPassword", "barf");
+var exit_code = runMongoProgram("mongo",
+ "--port",
+ md.port,
+ "--ssl",
+ "--sslAllowInvalidCertificates",
+ "--sslPEMKeyFile",
+ "jstests/libs/password_protected.pem",
+ "--sslPEMKeyPassword",
+ "barf");
// 1 is the exit code for failure
assert(exit_code == 1);
@@ -32,69 +38,95 @@ assert(exit_code == 1);
// Test that mongodump and mongorestore support ssl
c = md.getDB("dumprestore_ssl").getCollection("foo");
assert.eq(0, c.count(), "dumprestore_ssl.foo collection is not initially empty");
-c.save({ a : 22 });
+c.save({a: 22});
assert.eq(1, c.count(), "failed to insert document into dumprestore_ssl.foo collection");
-exit_code = runMongoProgram("mongodump", "--out", external_scratch_dir,
- "--port", md.port,
+exit_code = runMongoProgram("mongodump",
+ "--out",
+ external_scratch_dir,
+ "--port",
+ md.port,
"--ssl",
- "--sslPEMKeyFile", "jstests/libs/password_protected.pem",
- "--sslPEMKeyPassword", "qwerty");
+ "--sslPEMKeyFile",
+ "jstests/libs/password_protected.pem",
+ "--sslPEMKeyPassword",
+ "qwerty");
assert.eq(exit_code, 0, "Failed to start mongodump with ssl");
c.drop();
assert.eq(0, c.count(), "dumprestore_ssl.foo collection is not empty after drop");
-exit_code = runMongoProgram("mongorestore", "--dir", external_scratch_dir,
- "--port", md.port,
+exit_code = runMongoProgram("mongorestore",
+ "--dir",
+ external_scratch_dir,
+ "--port",
+ md.port,
"--ssl",
- "--sslPEMKeyFile", "jstests/libs/password_protected.pem",
- "--sslPEMKeyPassword", "qwerty");
+ "--sslPEMKeyFile",
+ "jstests/libs/password_protected.pem",
+ "--sslPEMKeyPassword",
+ "qwerty");
assert.eq(exit_code, 0, "Failed to start mongorestore with ssl");
assert.soon("c.findOne()", "no data after sleep. Expected a document after calling mongorestore");
-assert.eq(1, c.count(),
+assert.eq(1,
+ c.count(),
"did not find expected document in dumprestore_ssl.foo collection after mongorestore");
-assert.eq(22, c.findOne().a,
- "did not find correct value in document after mongorestore");
+assert.eq(22, c.findOne().a, "did not find correct value in document after mongorestore");
// Test that mongoimport and mongoexport support ssl
var exportimport_ssl_dbname = "exportimport_ssl";
c = md.getDB(exportimport_ssl_dbname).getCollection("foo");
assert.eq(0, c.count(), "exportimport_ssl.foo collection is not initially empty");
-c.save({ a : 22 });
+c.save({a: 22});
assert.eq(1, c.count(), "failed to insert document into exportimport_ssl.foo collection");
var exportimport_file = "data.json";
-exit_code = runMongoProgram("mongoexport", "--out", external_scratch_dir + exportimport_file,
- "-d", exportimport_ssl_dbname, "-c", "foo",
- "--port", md.port,
+exit_code = runMongoProgram("mongoexport",
+ "--out",
+ external_scratch_dir + exportimport_file,
+ "-d",
+ exportimport_ssl_dbname,
+ "-c",
+ "foo",
+ "--port",
+ md.port,
"--ssl",
- "--sslPEMKeyFile", "jstests/libs/password_protected.pem",
- "--sslPEMKeyPassword", "qwerty");
+ "--sslPEMKeyFile",
+ "jstests/libs/password_protected.pem",
+ "--sslPEMKeyPassword",
+ "qwerty");
assert.eq(exit_code, 0, "Failed to start mongoexport with ssl");
c.drop();
assert.eq(0, c.count(), "afterdrop", "-d", exportimport_ssl_dbname, "-c", "foo");
-exit_code = runMongoProgram("mongoimport", "--file", external_scratch_dir + exportimport_file,
- "-d", exportimport_ssl_dbname, "-c", "foo",
- "--port", md.port,
+exit_code = runMongoProgram("mongoimport",
+ "--file",
+ external_scratch_dir + exportimport_file,
+ "-d",
+ exportimport_ssl_dbname,
+ "-c",
+ "foo",
+ "--port",
+ md.port,
"--ssl",
- "--sslPEMKeyFile", "jstests/libs/password_protected.pem",
- "--sslPEMKeyPassword", "qwerty");
+ "--sslPEMKeyFile",
+ "jstests/libs/password_protected.pem",
+ "--sslPEMKeyPassword",
+ "qwerty");
assert.eq(exit_code, 0, "Failed to start mongoimport with ssl");
assert.soon("c.findOne()", "no data after sleep. Expected a document after calling mongoimport");
-assert.eq(1, c.count(),
+assert.eq(1,
+ c.count(),
"did not find expected document in dumprestore_ssl.foo collection after mongoimport");
-assert.eq(22, c.findOne().a,
- "did not find correct value in document after mongoimport");
+assert.eq(22, c.findOne().a, "did not find correct value in document after mongoimport");
// Test that mongofiles supports ssl
var mongofiles_ssl_dbname = "mongofiles_ssl";
@@ -103,11 +135,18 @@ mongofiles_db = md.getDB(mongofiles_ssl_dbname);
source_filename = 'jstests/ssl/ssl_cert_password.js';
filename = 'ssl_cert_password.js';
-exit_code = runMongoProgram("mongofiles", "-d", mongofiles_ssl_dbname, "put", source_filename,
- "--port", md.port,
+exit_code = runMongoProgram("mongofiles",
+ "-d",
+ mongofiles_ssl_dbname,
+ "put",
+ source_filename,
+ "--port",
+ md.port,
"--ssl",
- "--sslPEMKeyFile", "jstests/libs/password_protected.pem",
- "--sslPEMKeyPassword", "qwerty");
+ "--sslPEMKeyFile",
+ "jstests/libs/password_protected.pem",
+ "--sslPEMKeyPassword",
+ "qwerty");
assert.eq(exit_code, 0, "Failed to start mongofiles with ssl");
@@ -120,12 +159,20 @@ md5_computed = mongofiles_db.runCommand({filemd5: file_obj._id}).md5;
assert.eq(md5, md5_stored, "md5 incorrect for file");
assert.eq(md5, md5_computed, "md5 computed incorrectly by server");
-exit_code = runMongoProgram("mongofiles", "-d", mongofiles_ssl_dbname, "get", source_filename,
- "-l", external_scratch_dir + filename,
- "--port", md.port,
+exit_code = runMongoProgram("mongofiles",
+ "-d",
+ mongofiles_ssl_dbname,
+ "get",
+ source_filename,
+ "-l",
+ external_scratch_dir + filename,
+ "--port",
+ md.port,
"--ssl",
- "--sslPEMKeyFile", "jstests/libs/password_protected.pem",
- "--sslPEMKeyPassword", "qwerty");
+ "--sslPEMKeyFile",
+ "jstests/libs/password_protected.pem",
+ "--sslPEMKeyPassword",
+ "qwerty");
assert.eq(exit_code, 0, "Failed to start mongofiles with ssl");
diff --git a/jstests/ssl/ssl_crl.js b/jstests/ssl/ssl_crl.js
index 96c12b91bb9..9f70ba91b2b 100644
--- a/jstests/ssl/ssl_crl.js
+++ b/jstests/ssl/ssl_crl.js
@@ -8,20 +8,12 @@ load("jstests/libs/ssl_test.js");
var testUnrevoked = new SSLTest(
// Server option overrides
- {
- sslMode: "requireSSL",
- sslCRLFile: "jstests/libs/crl.pem"
- }
-);
+ {sslMode: "requireSSL", sslCRLFile: "jstests/libs/crl.pem"});
assert(testUnrevoked.connectWorked());
var testRevoked = new SSLTest(
// Server option overrides
- {
- sslMode: "requireSSL",
- sslCRLFile: "jstests/libs/crl_expired.pem"
- }
-);
+ {sslMode: "requireSSL", sslCRLFile: "jstests/libs/crl_expired.pem"});
assert(!testRevoked.connectWorked());
diff --git a/jstests/ssl/ssl_crl_revoked.js b/jstests/ssl/ssl_crl_revoked.js
index 97ab29bf46e..cfe5d03d285 100644
--- a/jstests/ssl/ssl_crl_revoked.js
+++ b/jstests/ssl/ssl_crl_revoked.js
@@ -2,16 +2,23 @@
// Note: crl_client_revoked.pem is a CRL with the client.pem certificate listed as revoked.
// This test should test that the user cannot connect with client.pem certificate.
-var md = MongoRunner.runMongod({sslMode: "requireSSL",
- sslPEMKeyFile: "jstests/libs/server.pem",
- sslCAFile: "jstests/libs/ca.pem",
- sslCRLFile: "jstests/libs/crl_client_revoked.pem"});
+var md = MongoRunner.runMongod({
+ sslMode: "requireSSL",
+ sslPEMKeyFile: "jstests/libs/server.pem",
+ sslCAFile: "jstests/libs/ca.pem",
+ sslCRLFile: "jstests/libs/crl_client_revoked.pem"
+});
-var mongo = runMongoProgram("mongo", "--port", md.port, "--ssl", "--sslAllowInvalidCertificates",
- "--sslPEMKeyFile", "jstests/libs/client_revoked.pem",
- "--eval", ";");
+var mongo = runMongoProgram("mongo",
+ "--port",
+ md.port,
+ "--ssl",
+ "--sslAllowInvalidCertificates",
+ "--sslPEMKeyFile",
+ "jstests/libs/client_revoked.pem",
+ "--eval",
+ ";");
// 1 is the exit code for the shell failing to connect, which is what we want
// for a successful test.
-assert(mongo==1);
-
+assert(mongo == 1);
diff --git a/jstests/ssl/ssl_fips.js b/jstests/ssl/ssl_fips.js
index 2630377fe36..9b3e4b94c96 100644
--- a/jstests/ssl/ssl_fips.js
+++ b/jstests/ssl/ssl_fips.js
@@ -1,27 +1,31 @@
// Test mongod start with FIPS mode enabled
var port = allocatePort();
-var md = MongoRunner.runMongod({port: port,
- sslMode: "requireSSL",
- sslPEMKeyFile: "jstests/libs/server.pem",
- sslCAFile: "jstests/libs/ca.pem",
- sslFIPSMode: ""});
+var md = MongoRunner.runMongod({
+ port: port,
+ sslMode: "requireSSL",
+ sslPEMKeyFile: "jstests/libs/server.pem",
+ sslCAFile: "jstests/libs/ca.pem",
+ sslFIPSMode: ""
+});
var mongo = runMongoProgram("mongo",
- "--port", port,
+ "--port",
+ port,
"--ssl",
"--sslAllowInvalidCertificates",
- "--sslPEMKeyFile", "jstests/libs/client.pem",
+ "--sslPEMKeyFile",
+ "jstests/libs/client.pem",
"--sslFIPSMode",
- "--eval", ";");
+ "--eval",
+ ";");
// if mongo shell didn't start/connect properly
if (mongo != 0) {
print("mongod failed to start, checking for FIPS support");
mongoOutput = rawMongoProgramOutput();
assert(mongoOutput.match(/this version of mongodb was not compiled with FIPS support/) ||
- mongoOutput.match(/FIPS_mode_set:fips mode not supported/));
-}
-else {
+ mongoOutput.match(/FIPS_mode_set:fips mode not supported/));
+} else {
// verify that auth works, SERVER-18051
md.getDB("admin").createUser({user: "root", pwd: "root", roles: ["root"]});
assert(md.getDB("admin").auth("root", "root"), "auth failed");
diff --git a/jstests/ssl/ssl_hostname_validation.js b/jstests/ssl/ssl_hostname_validation.js
index 7c60226f0ec..ab727320744 100644
--- a/jstests/ssl/ssl_hostname_validation.js
+++ b/jstests/ssl/ssl_hostname_validation.js
@@ -1,53 +1,62 @@
// Test SSL server certificate hostname validation
-// for client-server and server-server connections
-var CA_CERT = "jstests/libs/ca.pem";
+// for client-server and server-server connections
+var CA_CERT = "jstests/libs/ca.pem";
var SERVER_CERT = "jstests/libs/server.pem";
-var CN_CERT = "jstests/libs/localhostnameCN.pem";
-var SAN_CERT = "jstests/libs/localhostnameSAN.pem";
+var CN_CERT = "jstests/libs/localhostnameCN.pem";
+var SAN_CERT = "jstests/libs/localhostnameSAN.pem";
var CLIENT_CERT = "jstests/libs/client.pem";
var BAD_SAN_CERT = "jstests/libs/badSAN.pem";
function testCombination(certPath, allowInvalidHost, allowInvalidCert, shouldSucceed) {
- var mongod = MongoRunner.runMongod({sslMode: "requireSSL",
- sslPEMKeyFile: certPath,
- sslCAFile: CA_CERT});
+ var mongod = MongoRunner.runMongod(
+ {sslMode: "requireSSL", sslPEMKeyFile: certPath, sslCAFile: CA_CERT});
var mongo;
if (allowInvalidCert) {
mongo = runMongoProgram("mongo",
- "--port", mongod.port,
+ "--port",
+ mongod.port,
"--ssl",
- "--sslCAFile", CA_CERT,
- "--sslPEMKeyFile", CLIENT_CERT,
+ "--sslCAFile",
+ CA_CERT,
+ "--sslPEMKeyFile",
+ CLIENT_CERT,
"--sslAllowInvalidCertificates",
- "--eval", ";");
- }
- else if (allowInvalidHost) {
+ "--eval",
+ ";");
+ } else if (allowInvalidHost) {
mongo = runMongoProgram("mongo",
- "--port", mongod.port,
+ "--port",
+ mongod.port,
"--ssl",
- "--sslCAFile", CA_CERT,
- "--sslPEMKeyFile", CLIENT_CERT,
+ "--sslCAFile",
+ CA_CERT,
+ "--sslPEMKeyFile",
+ CLIENT_CERT,
"--sslAllowInvalidHostnames",
- "--eval", ";");
+ "--eval",
+ ";");
} else {
mongo = runMongoProgram("mongo",
- "--port", mongod.port,
+ "--port",
+ mongod.port,
"--ssl",
- "--sslCAFile", CA_CERT,
- "--sslPEMKeyFile", CLIENT_CERT,
- "--eval", ";");
+ "--sslCAFile",
+ CA_CERT,
+ "--sslPEMKeyFile",
+ CLIENT_CERT,
+ "--eval",
+ ";");
}
if (shouldSucceed) {
// runMongoProgram returns 0 on success
- assert.eq(0, mongo, "Connection attempt failed when it should succeed certPath: " +
- certPath);
- }
- else {
+ assert.eq(
+ 0, mongo, "Connection attempt failed when it should succeed certPath: " + certPath);
+ } else {
// runMongoProgram returns 1 on failure
- assert.eq(1, mongo, "Connection attempt succeeded when it should fail certPath: " +
- certPath);
+ assert.eq(
+ 1, mongo, "Connection attempt succeeded when it should fail certPath: " + certPath);
}
MongoRunner.stopMongod(mongod.port);
}
@@ -67,35 +76,43 @@ testCombination(SERVER_CERT, true, true, true);
testCombination(BAD_SAN_CERT, false, false, false);
// 2. Initiate ReplSetTest with invalid certs
-ssl_options = {sslMode : "requireSSL",
- // SERVER_CERT has SAN=localhost. CLIENT_CERT is exact same except no SANS
- sslPEMKeyFile : CLIENT_CERT,
- sslCAFile: CA_CERT};
+ssl_options = {
+ sslMode: "requireSSL",
+ // SERVER_CERT has SAN=localhost. CLIENT_CERT is exact same except no SANS
+ sslPEMKeyFile: CLIENT_CERT,
+ sslCAFile: CA_CERT
+};
-replTest = new ReplSetTest({nodes : {node0 : ssl_options, node1 : ssl_options}});
+replTest = new ReplSetTest({nodes: {node0: ssl_options, node1: ssl_options}});
replTest.startSet();
-assert.throws( function() { replTest.initiate(); } );
+assert.throws(function() {
+ replTest.initiate();
+});
replTest.stopSet();
// 3. Initiate ReplSetTest with invalid certs but set allowInvalidHostnames
-ssl_options = {sslMode : "requireSSL",
- sslPEMKeyFile : SERVER_CERT,
- sslCAFile: CA_CERT,
- sslAllowInvalidHostnames: ""};
+ssl_options = {
+ sslMode: "requireSSL",
+ sslPEMKeyFile: SERVER_CERT,
+ sslCAFile: CA_CERT,
+ sslAllowInvalidHostnames: ""
+};
-var replTest = new ReplSetTest({nodes : {node0 : ssl_options, node1 : ssl_options}});
+var replTest = new ReplSetTest({nodes: {node0: ssl_options, node1: ssl_options}});
replTest.startSet();
replTest.initiate();
replTest.stopSet();
// 4. Initiate ReplSetTest with invalid certs but set allowInvalidCertificates
-ssl_options = {sslMode : "requireSSL",
- // SERVER_CERT has SAN=localhost. CLIENT_CERT is exact same except no SANS
- sslPEMKeyFile : SERVER_CERT,
- sslCAFile: CA_CERT,
- sslAllowInvalidCertificates: ""};
+ssl_options = {
+ sslMode: "requireSSL",
+ // SERVER_CERT has SAN=localhost. CLIENT_CERT is exact same except no SANS
+ sslPEMKeyFile: SERVER_CERT,
+ sslCAFile: CA_CERT,
+ sslAllowInvalidCertificates: ""
+};
-var replTest = new ReplSetTest({nodes : {node0 : ssl_options, node1 : ssl_options}});
+var replTest = new ReplSetTest({nodes: {node0: ssl_options, node1: ssl_options}});
replTest.startSet();
replTest.initiate();
replTest.stopSet();
diff --git a/jstests/ssl/ssl_invalid_server_cert.js b/jstests/ssl/ssl_invalid_server_cert.js
index 6487d0f99b2..c6b9642ea54 100644
--- a/jstests/ssl/ssl_invalid_server_cert.js
+++ b/jstests/ssl/ssl_invalid_server_cert.js
@@ -4,15 +4,19 @@
// This test ensures that a mongod will not start with a certificate that is
// not yet valid. Tested certificate will become valid 06-17-2020.
-var md = MongoRunner.runMongod({sslMode: "requireSSL",
- sslPEMKeyFile: "jstests/libs/not_yet_valid.pem",
- sslCAFile: "jstests/libs/ca.pem"});
+var md = MongoRunner.runMongod({
+ sslMode: "requireSSL",
+ sslPEMKeyFile: "jstests/libs/not_yet_valid.pem",
+ sslCAFile: "jstests/libs/ca.pem"
+});
assert.eq(null, md, "Possible to start mongod with not yet valid certificate.");
// This test ensures that a mongod with SSL will not start with an expired certificate.
-md = MongoRunner.runMongod({sslMode: "requireSSL",
- sslPEMKeyFile: "jstests/libs/expired.pem",
- sslCAFile: "jstests/libs/ca.pem"});
+md = MongoRunner.runMongod({
+ sslMode: "requireSSL",
+ sslPEMKeyFile: "jstests/libs/expired.pem",
+ sslCAFile: "jstests/libs/ca.pem"
+});
assert.eq(null, md, "Possible to start mongod with expired certificate");
diff --git a/jstests/ssl/ssl_options.js b/jstests/ssl/ssl_options.js
index 1881114b482..571f854851b 100644
--- a/jstests/ssl/ssl_options.js
+++ b/jstests/ssl/ssl_options.js
@@ -2,11 +2,13 @@ var baseName = "jstests_ssl_ssl_options";
jsTest.log("Testing censorship of ssl options");
-var mongodConfig = { sslPEMKeyFile : "jstests/libs/password_protected.pem",
- sslMode : "requireSSL",
- sslPEMKeyPassword : "qwerty",
- sslClusterPassword : "qwerty",
- sslCAFile: "jstests/libs/ca.pem"};
+var mongodConfig = {
+ sslPEMKeyFile: "jstests/libs/password_protected.pem",
+ sslMode: "requireSSL",
+ sslPEMKeyPassword: "qwerty",
+ sslClusterPassword: "qwerty",
+ sslCAFile: "jstests/libs/ca.pem"
+};
var mongodSource = MongoRunner.runMongod(mongodConfig);
var getCmdLineOptsResult = mongodSource.adminCommand("getCmdLineOpts");
@@ -15,7 +17,8 @@ var i;
var isPassword = false;
for (i = 0; i < getCmdLineOptsResult.argv.length; i++) {
if (isPassword) {
- assert.eq(getCmdLineOptsResult.argv[i], "<password>",
+ assert.eq(getCmdLineOptsResult.argv[i],
+ "<password>",
"Password not properly censored: " + tojson(getCmdLineOptsResult));
isPassword = false;
continue;
@@ -26,9 +29,11 @@ for (i = 0; i < getCmdLineOptsResult.argv.length; i++) {
isPassword = true;
}
}
-assert.eq(getCmdLineOptsResult.parsed.net.ssl.PEMKeyPassword, "<password>",
+assert.eq(getCmdLineOptsResult.parsed.net.ssl.PEMKeyPassword,
+ "<password>",
"Password not properly censored: " + tojson(getCmdLineOptsResult));
-assert.eq(getCmdLineOptsResult.parsed.net.ssl.clusterPassword, "<password>",
+assert.eq(getCmdLineOptsResult.parsed.net.ssl.clusterPassword,
+ "<password>",
"Password not properly censored: " + tojson(getCmdLineOptsResult));
MongoRunner.stopMongod(mongodSource.port);
diff --git a/jstests/ssl/ssl_weak.js b/jstests/ssl/ssl_weak.js
index 5a239a6d13c..c5ea8a27a6e 100644
--- a/jstests/ssl/ssl_weak.js
+++ b/jstests/ssl/ssl_weak.js
@@ -4,34 +4,43 @@
// Test that connecting with no client certificate and --sslAllowConnectionsWithoutCertificates
// (an alias for sslWeakCertificateValidation) connects successfully.
-var md = MongoRunner.runMongod({sslMode: "requireSSL",
- sslPEMKeyFile: "jstests/libs/server.pem",
- sslCAFile: "jstests/libs/ca.pem",
- sslAllowConnectionsWithoutCertificates: ""});
+var md = MongoRunner.runMongod({
+ sslMode: "requireSSL",
+ sslPEMKeyFile: "jstests/libs/server.pem",
+ sslCAFile: "jstests/libs/ca.pem",
+ sslAllowConnectionsWithoutCertificates: ""
+});
-var mongo = runMongoProgram("mongo", "--port", md.port, "--ssl", "--sslAllowInvalidCertificates",
- "--eval", ";");
+var mongo = runMongoProgram(
+ "mongo", "--port", md.port, "--ssl", "--sslAllowInvalidCertificates", "--eval", ";");
// 0 is the exit code for success
-assert(mongo==0);
+assert(mongo == 0);
// Test that connecting with a valid client certificate connects successfully.
-mongo = runMongoProgram("mongo", "--port", md.port, "--ssl", "--sslAllowInvalidCertificates",
- "--sslPEMKeyFile", "jstests/libs/client.pem",
- "--eval", ";");
+mongo = runMongoProgram("mongo",
+ "--port",
+ md.port,
+ "--ssl",
+ "--sslAllowInvalidCertificates",
+ "--sslPEMKeyFile",
+ "jstests/libs/client.pem",
+ "--eval",
+ ";");
// 0 is the exit code for success
-assert(mongo==0);
-
+assert(mongo == 0);
// Test that connecting with no client certificate and no --sslAllowConnectionsWithoutCertificates
// fails to connect.
-var md2 = MongoRunner.runMongod({sslMode: "requireSSL",
- sslPEMKeyFile: "jstests/libs/server.pem",
- sslCAFile: "jstests/libs/ca.pem"});
+var md2 = MongoRunner.runMongod({
+ sslMode: "requireSSL",
+ sslPEMKeyFile: "jstests/libs/server.pem",
+ sslCAFile: "jstests/libs/ca.pem"
+});
-mongo = runMongoProgram("mongo", "--port", md2.port, "--ssl", "--sslAllowInvalidCertificates",
- "--eval", ";");
+mongo = runMongoProgram(
+ "mongo", "--port", md2.port, "--ssl", "--sslAllowInvalidCertificates", "--eval", ";");
// 1 is the exit code for failure
-assert(mongo==1);
+assert(mongo == 1);
diff --git a/jstests/ssl/ssl_without_ca.js b/jstests/ssl/ssl_without_ca.js
index 87e69eed7e4..0e865cc5fee 100644
--- a/jstests/ssl/ssl_without_ca.js
+++ b/jstests/ssl/ssl_without_ca.js
@@ -5,27 +5,30 @@ var CLIENT_USER = "C=US,ST=New York,L=New York City,O=MongoDB,OU=KernelUser,CN=c
jsTest.log("Assert x509 auth is not allowed when a standalone mongod is run without a CA file.");
// allowSSL instead of requireSSL so that the non-SSL connection succeeds.
-var conn = MongoRunner.runMongod({sslMode: 'allowSSL',
- sslPEMKeyFile: SERVER_CERT,
- auth: ''});
+var conn = MongoRunner.runMongod({sslMode: 'allowSSL', sslPEMKeyFile: SERVER_CERT, auth: ''});
var external = conn.getDB('$external');
external.createUser({
user: CLIENT_USER,
roles: [
- {'role':'userAdminAnyDatabase', 'db':'admin'},
- {'role':'readWriteAnyDatabase', 'db':'admin'}
- ]});
+ {'role': 'userAdminAnyDatabase', 'db': 'admin'},
+ {'role': 'readWriteAnyDatabase', 'db': 'admin'}
+ ]
+});
// Should not be able to authenticate with x509.
// Authenticate call will return 1 on success, 0 on error.
-var exitStatus = runMongoProgram('mongo', '--ssl', '--sslAllowInvalidCertificates',
- '--sslPEMKeyFile', CLIENT_CERT,
- '--port', conn.port,
- '--eval', ('quit(db.getSisterDB("$external").auth({' +
- 'user: "' + CLIENT_USER + '" ,' +
- 'mechanism: "MONGODB-X509"}));'
- ));
+var exitStatus = runMongoProgram('mongo',
+ '--ssl',
+ '--sslAllowInvalidCertificates',
+ '--sslPEMKeyFile',
+ CLIENT_CERT,
+ '--port',
+ conn.port,
+ '--eval',
+ ('quit(db.getSisterDB("$external").auth({' +
+ 'user: "' + CLIENT_USER + '" ,' +
+ 'mechanism: "MONGODB-X509"}));'));
assert.eq(exitStatus, 0, "authentication via MONGODB-X509 without CA succeeded");
@@ -33,17 +36,21 @@ MongoRunner.stopMongod(conn.port);
jsTest.log("Assert mongod doesn\'t start with CA file missing and clusterAuthMode=x509.");
-var sslParams = {clusterAuthMode: 'x509', sslMode: 'requireSSL', sslPEMKeyFile: SERVER_CERT};
+var sslParams = {
+ clusterAuthMode: 'x509',
+ sslMode: 'requireSSL',
+ sslPEMKeyFile: SERVER_CERT
+};
var conn = MongoRunner.runMongod(sslParams);
assert.isnull(conn, "server started with x509 clusterAuthMode but no CA file");
jsTest.log("Assert mongos doesn\'t start with CA file missing and clusterAuthMode=x509.");
assert.throws(function() {
- new ShardingTest({shards: 1, mongos: 1, verbose: 2,
- other: {configOptions: sslParams,
- mongosOptions: sslParams,
- shardOptions: sslParams}});
- },
- null,
- "mongos started with x509 clusterAuthMode but no CA file");
+ new ShardingTest({
+ shards: 1,
+ mongos: 1,
+ verbose: 2,
+ other: {configOptions: sslParams, mongosOptions: sslParams, shardOptions: sslParams}
+ });
+}, null, "mongos started with x509 clusterAuthMode but no CA file");
diff --git a/jstests/ssl/upgrade_to_ssl.js b/jstests/ssl/upgrade_to_ssl.js
index 0c4a2caf222..f1f8409b2a5 100644
--- a/jstests/ssl/upgrade_to_ssl.js
+++ b/jstests/ssl/upgrade_to_ssl.js
@@ -10,24 +10,26 @@
load("jstests/ssl/libs/ssl_helpers.js");
// "sslAllowInvalidCertificates" is enabled to avoid hostname conflicts with our testing certs
-var opts = {sslMode:"allowSSL",
- sslPEMKeyFile: SERVER_CERT,
- sslAllowInvalidCertificates: "",
- sslAllowConnectionsWithoutCertificates: "",
- sslCAFile: "jstests/libs/ca.pem"};
-var rst = new ReplSetTest({ name: 'sslSet', nodes: 3, nodeOptions : opts });
+var opts = {
+ sslMode: "allowSSL",
+ sslPEMKeyFile: SERVER_CERT,
+ sslAllowInvalidCertificates: "",
+ sslAllowConnectionsWithoutCertificates: "",
+ sslCAFile: "jstests/libs/ca.pem"
+};
+var rst = new ReplSetTest({name: 'sslSet', nodes: 3, nodeOptions: opts});
rst.startSet();
rst.initiate();
var rstConn1 = rst.getPrimary();
-rstConn1.getDB("test").a.insert({a:1, str:"TESTTESTTEST"});
+rstConn1.getDB("test").a.insert({a: 1, str: "TESTTESTTEST"});
assert.eq(1, rstConn1.getDB("test").a.count(), "Error interacting with replSet");
print("===== UPGRADE allowSSL -> preferSSL =====");
opts.sslMode = "preferSSL";
rst.upgradeSet(opts);
var rstConn2 = rst.getPrimary();
-rstConn2.getDB("test").a.insert({a:2, str:"CHECKCHECK"});
+rstConn2.getDB("test").a.insert({a: 2, str: "CHECKCHECK"});
assert.eq(2, rstConn2.getDB("test").a.count(), "Error interacting with replSet");
// Check that non-ssl connections can still be made
@@ -38,10 +40,10 @@ print("===== UPGRADE preferSSL -> requireSSL =====");
opts.sslMode = "requireSSL";
rst.upgradeSet(opts);
var rstConn3 = rst.getPrimary();
-rstConn3.getDB("test").a.insert({a:3, str:"GREENEGGSANDHAM"});
+rstConn3.getDB("test").a.insert({a: 3, str: "GREENEGGSANDHAM"});
assert.eq(3, rstConn3.getDB("test").a.count(), "Error interacting with replSet");
// Check that ssl connections can be made
-var canConnectSSL = runMongoProgram("mongo", "--port", rst.ports[0],
- "--ssl", "--sslAllowInvalidCertificates", "--eval", ";");
+var canConnectSSL = runMongoProgram(
+ "mongo", "--port", rst.ports[0], "--ssl", "--sslAllowInvalidCertificates", "--eval", ";");
assert.eq(0, canConnectSSL, "SSL Connection attempt failed when it should succeed");
diff --git a/jstests/ssl/upgrade_to_x509_ssl.js b/jstests/ssl/upgrade_to_x509_ssl.js
index 2ff79bcd7bf..e9d79859077 100644
--- a/jstests/ssl/upgrade_to_x509_ssl.js
+++ b/jstests/ssl/upgrade_to_x509_ssl.js
@@ -19,35 +19,46 @@ function authAllNodes() {
load("jstests/ssl/libs/ssl_helpers.js");
-opts = {sslMode:"allowSSL", sslPEMKeyFile: SERVER_CERT,
- sslAllowInvalidCertificates: "",
- clusterAuthMode:"sendKeyFile", keyFile: KEYFILE,
- sslCAFile: CA_CERT};
+opts = {
+ sslMode: "allowSSL",
+ sslPEMKeyFile: SERVER_CERT,
+ sslAllowInvalidCertificates: "",
+ clusterAuthMode: "sendKeyFile",
+ keyFile: KEYFILE,
+ sslCAFile: CA_CERT
+};
var NUM_NODES = 3;
-var rst = new ReplSetTest({ name: 'sslSet', nodes: NUM_NODES, nodeOptions : opts });
+var rst = new ReplSetTest({name: 'sslSet', nodes: NUM_NODES, nodeOptions: opts});
rst.startSet();
rst.initiate();
// Connect to master and do some basic operations
var rstConn1 = rst.getPrimary();
print("Performing basic operations on master.");
-rstConn1.getDB("admin").createUser({user:"root", pwd:"pwd", roles:["root"]}, {w: NUM_NODES});
+rstConn1.getDB("admin").createUser({user: "root", pwd: "pwd", roles: ["root"]}, {w: NUM_NODES});
rstConn1.getDB("admin").auth("root", "pwd");
-rstConn1.getDB("test").a.insert({a:1, str:"TESTTESTTEST"});
-rstConn1.getDB("test").a.insert({a:1, str:"WOOPWOOPWOOPWOOPWOOP"});
+rstConn1.getDB("test").a.insert({a: 1, str: "TESTTESTTEST"});
+rstConn1.getDB("test").a.insert({a: 1, str: "WOOPWOOPWOOPWOOPWOOP"});
assert.eq(2, rstConn1.getDB("test").a.count(), "Error interacting with replSet");
print("===== UPGRADE allowSSL,sendKeyfile -> preferSSL,sendX509 =====");
authAllNodes();
rst.awaitReplication();
-rst.upgradeSet({sslMode:"preferSSL", sslPEMKeyFile: SERVER_CERT,
- sslAllowInvalidCertificates: "",
- clusterAuthMode:"sendX509", keyFile: KEYFILE,
- sslCAFile: CA_CERT}, "root", "pwd");
+rst.upgradeSet(
+ {
+ sslMode: "preferSSL",
+ sslPEMKeyFile: SERVER_CERT,
+ sslAllowInvalidCertificates: "",
+ clusterAuthMode: "sendX509",
+ keyFile: KEYFILE,
+ sslCAFile: CA_CERT
+ },
+ "root",
+ "pwd");
// The upgradeSet call restarts the nodes so we need to reauthenticate.
authAllNodes();
var rstConn3 = rst.getPrimary();
-rstConn3.getDB("test").a.insert({a:3, str:"TESTTESTTEST"});
+rstConn3.getDB("test").a.insert({a: 3, str: "TESTTESTTEST"});
assert.eq(3, rstConn3.getDB("test").a.count(), "Error interacting with replSet");
rst.awaitReplication();
// Test that a non-ssl connection can still be made
@@ -55,11 +66,18 @@ var canConnectNoSSL = runMongoProgram("mongo", "--port", rst.ports[0], "--eval",
assert.eq(0, canConnectNoSSL, "SSL Connection attempt failed when it should succeed");
print("===== UPGRADE preferSSL,sendX509 -> requireSSL,x509 =====");
-rst.upgradeSet({sslMode:"requireSSL", sslPEMKeyFile: SERVER_CERT,
- sslAllowInvalidCertificates: "",
- clusterAuthMode:"x509", keyFile: KEYFILE,
- sslCAFile: CA_CERT}, "root", "pwd");
+rst.upgradeSet(
+ {
+ sslMode: "requireSSL",
+ sslPEMKeyFile: SERVER_CERT,
+ sslAllowInvalidCertificates: "",
+ clusterAuthMode: "x509",
+ keyFile: KEYFILE,
+ sslCAFile: CA_CERT
+ },
+ "root",
+ "pwd");
authAllNodes();
var rstConn4 = rst.getPrimary();
-rstConn4.getDB("test").a.insert({a:4, str:"TESTTESTTEST"});
+rstConn4.getDB("test").a.insert({a: 4, str: "TESTTESTTEST"});
assert.eq(4, rstConn4.getDB("test").a.count(), "Error interacting with replSet");
diff --git a/jstests/ssl/x509_client.js b/jstests/ssl/x509_client.js
index 769909f14b4..82c726fc0fd 100644
--- a/jstests/ssl/x509_client.js
+++ b/jstests/ssl/x509_client.js
@@ -1,21 +1,23 @@
// Check if this build supports the authenticationMechanisms startup parameter.
-var conn = MongoRunner.runMongod({smallfiles: "",
- auth: "",
- sslMode: "requireSSL",
- sslPEMKeyFile: "jstests/libs/server.pem",
- sslCAFile: "jstests/libs/ca.pem"});
+var conn = MongoRunner.runMongod({
+ smallfiles: "",
+ auth: "",
+ sslMode: "requireSSL",
+ sslPEMKeyFile: "jstests/libs/server.pem",
+ sslCAFile: "jstests/libs/ca.pem"
+});
conn.getDB('admin').createUser({user: "root", pwd: "pass", roles: ["root"]});
conn.getDB('admin').auth("root", "pass");
var cmdOut = conn.getDB('admin').runCommand({getParameter: 1, authenticationMechanisms: 1});
if (cmdOut.ok) {
- TestData.authMechanism = "MONGODB-X509"; // SERVER-10353
+ TestData.authMechanism = "MONGODB-X509"; // SERVER-10353
}
conn.getDB('admin').dropAllUsers();
conn.getDB('admin').logout();
MongoRunner.stopMongod(conn);
var SERVER_CERT = "jstests/libs/server.pem";
-var CA_CERT = "jstests/libs/ca.pem";
+var CA_CERT = "jstests/libs/ca.pem";
var SERVER_USER = "C=US,ST=New York,L=New York City,O=MongoDB,OU=Kernel,CN=server";
var INTERNAL_USER = "C=US,ST=New York,L=New York City,O=MongoDB,OU=Kernel,CN=internal";
@@ -27,49 +29,59 @@ function authAndTest(mongo) {
test = mongo.getDB("test");
// It should be impossible to create users with the same name as the server's subject
- assert.throws( function() {
- external.createUser({user: SERVER_USER,
- roles: [{'role':'userAdminAnyDatabase', 'db':'admin'}]
- });
+ assert.throws(function() {
+ external.createUser(
+ {user: SERVER_USER, roles: [{'role': 'userAdminAnyDatabase', 'db': 'admin'}]});
}, {}, "Created user with same name as the server's x.509 subject");
// It should be impossible to create users with names recognized as cluster members
- assert.throws( function() {
- external.createUser({user: INTERNAL_USER,
- roles: [{'role':'userAdminAnyDatabase', 'db':'admin'}]
- });
+ assert.throws(function() {
+ external.createUser(
+ {user: INTERNAL_USER, roles: [{'role': 'userAdminAnyDatabase', 'db': 'admin'}]});
}, {}, "Created user which would be recognized as a cluster member");
// Add user using localhost exception
- external.createUser({user: CLIENT_USER, roles:[
- {'role':'userAdminAnyDatabase', 'db':'admin'},
- {'role':'readWriteAnyDatabase', 'db':'admin'}]});
+ external.createUser({
+ user: CLIENT_USER,
+ roles: [
+ {'role': 'userAdminAnyDatabase', 'db': 'admin'},
+ {'role': 'readWriteAnyDatabase', 'db': 'admin'}
+ ]
+ });
// It should be impossible to create users with an internal name
- assert.throws( function() {external.createUser({user: SERVER_USER, roles: [
- {'role':'userAdminAnyDatabase', 'db':'admin'}]});});
+ assert.throws(function() {
+ external.createUser(
+ {user: SERVER_USER, roles: [{'role': 'userAdminAnyDatabase', 'db': 'admin'}]});
+ });
// Localhost exception should not be in place anymore
- assert.throws( function() { test.foo.findOne();}, {}, "read without login" );
+ assert.throws(function() {
+ test.foo.findOne();
+ }, {}, "read without login");
- assert( !external.auth({user: INVALID_CLIENT_USER, mechanism: 'MONGODB-X509'}),
- "authentication with invalid user failed" );
- assert( external.auth({user: CLIENT_USER, mechanism: 'MONGODB-X509'}),
- "authentication with valid user failed" );
+ assert(!external.auth({user: INVALID_CLIENT_USER, mechanism: 'MONGODB-X509'}),
+ "authentication with invalid user failed");
+ assert(external.auth({user: CLIENT_USER, mechanism: 'MONGODB-X509'}),
+ "authentication with valid user failed");
// Check that we can add a user and read data
- test.createUser({user: "test", pwd: "test", roles:[
- {'role': 'readWriteAnyDatabase', 'db': 'admin'}]});
+ test.createUser(
+ {user: "test", pwd: "test", roles: [{'role': 'readWriteAnyDatabase', 'db': 'admin'}]});
test.foo.findOne();
external.logout();
- assert.throws( function() { test.foo.findOne();}, {}, "read after logout" );
+ assert.throws(function() {
+ test.foo.findOne();
+ }, {}, "read after logout");
}
print("1. Testing x.509 auth to mongod");
-var x509_options = {sslMode : "requireSSL",
- sslPEMKeyFile : SERVER_CERT,
- sslCAFile : CA_CERT};
+var x509_options = {
+ sslMode: "requireSSL",
+ sslPEMKeyFile: SERVER_CERT,
+ sslCAFile: CA_CERT
+};
var mongo = MongoRunner.runMongod(Object.merge(x509_options, {auth: ""}));
@@ -78,14 +90,16 @@ MongoRunner.stopMongod(mongo.port);
print("2. Testing x.509 auth to mongos");
-var st = new ShardingTest({ shards : 1,
- mongos : 1,
- other: {
- extraOptions : {"keyFile" : "jstests/libs/key1"},
- configOptions : x509_options,
- mongosOptions : x509_options,
- shardOptions : x509_options,
- useHostname: false,
- }});
+var st = new ShardingTest({
+ shards: 1,
+ mongos: 1,
+ other: {
+ extraOptions: {"keyFile": "jstests/libs/key1"},
+ configOptions: x509_options,
+ mongosOptions: x509_options,
+ shardOptions: x509_options,
+ useHostname: false,
+ }
+});
authAndTest(new Mongo("localhost:" + st.s0.port));
diff --git a/jstests/sslSpecial/set_parameter_nossl.js b/jstests/sslSpecial/set_parameter_nossl.js
index 280d47b6130..74bab379aa0 100644
--- a/jstests/sslSpecial/set_parameter_nossl.js
+++ b/jstests/sslSpecial/set_parameter_nossl.js
@@ -7,12 +7,10 @@ function testTransition(newSSLMode, newClusterAuthMode) {
// If no parameters are given sslMode defaults to disabled
var conn = MongoRunner.runMongod({clusterAuthMode: "keyFile"});
var adminDB = conn.getDB("admin");
- var res = adminDB.runCommand({ "setParameter" : 1,
- "sslMode" : newSSLMode });
+ var res = adminDB.runCommand({"setParameter": 1, "sslMode": newSSLMode});
assert(!res["ok"]);
- var res = adminDB.runCommand({ "setParameter" : 1,
- "clusterAuthMode" : newClusterAuthMode });
+ var res = adminDB.runCommand({"setParameter": 1, "clusterAuthMode": newClusterAuthMode});
assert(!res["ok"]);
MongoRunner.stopMongod(conn.port);
}
diff --git a/jstests/sslSpecial/ssl_mixedmode.js b/jstests/sslSpecial/ssl_mixedmode.js
index 738aa26714f..085740f0a26 100644
--- a/jstests/sslSpecial/ssl_mixedmode.js
+++ b/jstests/sslSpecial/ssl_mixedmode.js
@@ -6,17 +6,16 @@
load("jstests/libs/ssl_test.js");
function testCombination(sslMode, sslShell, shouldSucceed) {
+ var serverOptionOverrides = {
+ sslMode: sslMode
+ };
- var serverOptionOverrides = {sslMode: sslMode};
-
- var clientOptions = sslShell ?
- SSLTest.prototype.defaultSSLClientOptions :
- SSLTest.prototype.noSSLClientOptions;
+ var clientOptions =
+ sslShell ? SSLTest.prototype.defaultSSLClientOptions : SSLTest.prototype.noSSLClientOptions;
var fixture = new SSLTest(serverOptionOverrides, clientOptions);
- print("Trying sslMode: '" + sslMode +
- "' with sslShell = " + sslShell +
+ print("Trying sslMode: '" + sslMode + "' with sslShell = " + sslShell +
"; expect connection to " + (shouldSucceed ? "SUCCEED" : "FAIL"));
assert.eq(shouldSucceed, fixture.connectWorked());
diff --git a/jstests/sslSpecial/upgrade_to_ssl_nossl.js b/jstests/sslSpecial/upgrade_to_ssl_nossl.js
index 53f7bd77fb9..11d43040d93 100644
--- a/jstests/sslSpecial/upgrade_to_ssl_nossl.js
+++ b/jstests/sslSpecial/upgrade_to_ssl_nossl.js
@@ -9,26 +9,26 @@
load("jstests/ssl/libs/ssl_helpers.js");
-var rst = new ReplSetTest({ name: 'sslSet', nodes: 3, nodeOptions : {sslMode:"disabled"} });
+var rst = new ReplSetTest({name: 'sslSet', nodes: 3, nodeOptions: {sslMode: "disabled"}});
rst.startSet();
rst.initiate();
var rstConn1 = rst.getPrimary();
-rstConn1.getDB("test").a.insert({a:1, str:"TESTTESTTEST"});
+rstConn1.getDB("test").a.insert({a: 1, str: "TESTTESTTEST"});
assert.eq(1, rstConn1.getDB("test").a.count(), "Error interacting with replSet");
print("===== UPGRADE disabled -> allowSSL =====");
-rst.upgradeSet({sslMode:"allowSSL", sslPEMKeyFile: SERVER_CERT, sslAllowInvalidCertificates:""});
+rst.upgradeSet({sslMode: "allowSSL", sslPEMKeyFile: SERVER_CERT, sslAllowInvalidCertificates: ""});
var rstConn2 = rst.getPrimary();
-rstConn2.getDB("test").a.insert({a:2, str:"TESTTESTTEST"});
+rstConn2.getDB("test").a.insert({a: 2, str: "TESTTESTTEST"});
assert.eq(2, rstConn2.getDB("test").a.count(), "Error interacting with replSet");
print("===== UPGRADE allowSSL -> preferSSL =====");
-rst.upgradeSet({sslMode:"preferSSL", sslPEMKeyFile: SERVER_CERT, sslAllowInvalidCertificates:""});
+rst.upgradeSet({sslMode: "preferSSL", sslPEMKeyFile: SERVER_CERT, sslAllowInvalidCertificates: ""});
var rstConn3 = rst.getPrimary();
-rstConn3.getDB("test").a.insert({a:3, str:"TESTTESTTEST"});
+rstConn3.getDB("test").a.insert({a: 3, str: "TESTTESTTEST"});
assert.eq(3, rstConn3.getDB("test").a.count(), "Error interacting with replSet");
-var canConnectSSL = runMongoProgram("mongo", "--port", rst.ports[0],
- "--ssl", "--sslAllowInvalidCertificates", "--eval", ";");
+var canConnectSSL = runMongoProgram(
+ "mongo", "--port", rst.ports[0], "--ssl", "--sslAllowInvalidCertificates", "--eval", ";");
assert.eq(0, canConnectSSL, "SSL Connection attempt failed when it should succeed");
diff --git a/jstests/sslSpecial/upgrade_to_x509_ssl_nossl.js b/jstests/sslSpecial/upgrade_to_x509_ssl_nossl.js
index 1d18dc2225d..ec29c991e9d 100644
--- a/jstests/sslSpecial/upgrade_to_x509_ssl_nossl.js
+++ b/jstests/sslSpecial/upgrade_to_x509_ssl_nossl.js
@@ -19,9 +19,13 @@ function authAllNodes() {
}
}
-opts = {sslMode:"disabled", clusterAuthMode:"keyFile", keyFile: KEYFILE};
+opts = {
+ sslMode: "disabled",
+ clusterAuthMode: "keyFile",
+ keyFile: KEYFILE
+};
var NUM_NODES = 3;
-var rst = new ReplSetTest({ name: 'sslSet', nodes: NUM_NODES, nodeOptions : opts });
+var rst = new ReplSetTest({name: 'sslSet', nodes: NUM_NODES, nodeOptions: opts});
rst.startSet();
rst.initiate();
@@ -29,54 +33,87 @@ rst.initiate();
var rstConn1 = rst.getPrimary();
rstConn1.getDB("admin").createUser({user: "root", pwd: "pwd", roles: ["root"]}, {w: NUM_NODES});
rstConn1.getDB("admin").auth("root", "pwd");
-rstConn1.getDB("test").a.insert({a:1, str:"TESTTESTTEST"});
+rstConn1.getDB("test").a.insert({a: 1, str: "TESTTESTTEST"});
assert.eq(1, rstConn1.getDB("test").a.count(), "Error interacting with replSet");
print("===== UPGRADE disabled,keyFile -> allowSSL,sendKeyfile =====");
authAllNodes();
-rst.upgradeSet({sslMode:"allowSSL", sslPEMKeyFile: SERVER_CERT,
- sslAllowInvalidCertificates:"",
- clusterAuthMode:"sendKeyFile", keyFile: KEYFILE,
- sslCAFile: CA_CERT}, "root", "pwd");
+rst.upgradeSet(
+ {
+ sslMode: "allowSSL",
+ sslPEMKeyFile: SERVER_CERT,
+ sslAllowInvalidCertificates: "",
+ clusterAuthMode: "sendKeyFile",
+ keyFile: KEYFILE,
+ sslCAFile: CA_CERT
+ },
+ "root",
+ "pwd");
authAllNodes();
rst.awaitReplication();
var rstConn2 = rst.getPrimary();
-rstConn2.getDB("test").a.insert({a:2, str:"CHECKCHECKCHECK"});
+rstConn2.getDB("test").a.insert({a: 2, str: "CHECKCHECKCHECK"});
assert.eq(2, rstConn2.getDB("test").a.count(), "Error interacting with replSet");
print("===== UPGRADE allowSSL,sendKeyfile -> preferSSL,sendX509 =====");
-rst.upgradeSet({sslMode:"preferSSL", sslPEMKeyFile: SERVER_CERT,
- sslAllowInvalidCertificates:"",
- clusterAuthMode:"sendX509", keyFile: KEYFILE,
- sslCAFile: CA_CERT}, "root", "pwd");
+rst.upgradeSet(
+ {
+ sslMode: "preferSSL",
+ sslPEMKeyFile: SERVER_CERT,
+ sslAllowInvalidCertificates: "",
+ clusterAuthMode: "sendX509",
+ keyFile: KEYFILE,
+ sslCAFile: CA_CERT
+ },
+ "root",
+ "pwd");
authAllNodes();
rst.awaitReplication();
var rstConn3 = rst.getPrimary();
-rstConn3.getDB("test").a.insert({a:3, str:"PEASandCARROTS"});
+rstConn3.getDB("test").a.insert({a: 3, str: "PEASandCARROTS"});
assert.eq(3, rstConn3.getDB("test").a.count(), "Error interacting with replSet");
-var canConnectSSL = runMongoProgram("mongo", "--port", rst.ports[0], "--ssl",
+var canConnectSSL = runMongoProgram("mongo",
+ "--port",
+ rst.ports[0],
+ "--ssl",
"--sslAllowInvalidCertificates",
- "--sslPEMKeyFile", CLIENT_CERT, "--eval", ";");
+ "--sslPEMKeyFile",
+ CLIENT_CERT,
+ "--eval",
+ ";");
assert.eq(0, canConnectSSL, "SSL Connection attempt failed when it should succeed");
print("===== UPGRADE preferSSL,sendX509 -> preferSSL,x509 =====");
-//we cannot upgrade past preferSSL here because it will break the test client
-rst.upgradeSet({sslMode:"preferSSL", sslPEMKeyFile: SERVER_CERT,
- sslAllowInvalidCertificates:"",
- clusterAuthMode:"x509", keyFile: KEYFILE,
- sslCAFile: CA_CERT}, "root", "pwd");
+// we cannot upgrade past preferSSL here because it will break the test client
+rst.upgradeSet(
+ {
+ sslMode: "preferSSL",
+ sslPEMKeyFile: SERVER_CERT,
+ sslAllowInvalidCertificates: "",
+ clusterAuthMode: "x509",
+ keyFile: KEYFILE,
+ sslCAFile: CA_CERT
+ },
+ "root",
+ "pwd");
authAllNodes();
rst.awaitReplication();
var rstConn4 = rst.getPrimary();
-rstConn4.getDB("test").a.insert({a:4, str:"BEEP BOOP"});
+rstConn4.getDB("test").a.insert({a: 4, str: "BEEP BOOP"});
rst.awaitReplication();
assert.eq(4, rstConn4.getDB("test").a.count(), "Error interacting with replSet");
// Test that an ssl connection can still be made
-var canConnectSSL = runMongoProgram("mongo", "--port", rst.ports[0], "--ssl",
+var canConnectSSL = runMongoProgram("mongo",
+ "--port",
+ rst.ports[0],
+ "--ssl",
"--sslAllowInvalidCertificates",
- "--sslPEMKeyFile", CLIENT_CERT, "--eval", ";");
+ "--sslPEMKeyFile",
+ CLIENT_CERT,
+ "--eval",
+ ";");
assert.eq(0, canConnectSSL, "SSL Connection attempt failed when it should succeed");
diff --git a/jstests/tool/command_line_quotes.js b/jstests/tool/command_line_quotes.js
index d7b618a3406..35f7305ff4a 100644
--- a/jstests/tool/command_line_quotes.js
+++ b/jstests/tool/command_line_quotes.js
@@ -8,14 +8,8 @@ coll.insert({a: 2});
var query = "{\"a\": {\"$gt\": 1} }";
assert(!MongoRunner.runMongoTool(
- "mongodump",
- {
- "host": "127.0.0.1:" + mongod.port,
- "db": "spaces",
- "collection": "coll",
- "query": query
- }
-));
+ "mongodump",
+ {"host": "127.0.0.1:" + mongod.port, "db": "spaces", "collection": "coll", "query": query}));
MongoRunner.stopMongod(mongod);
diff --git a/jstests/tool/csv1.js b/jstests/tool/csv1.js
index 3338d500fdf..7a5690062f8 100644
--- a/jstests/tool/csv1.js
+++ b/jstests/tool/csv1.js
@@ -1,42 +1,57 @@
// csv1.js
-t = new ToolTest( "csv1" );
+t = new ToolTest("csv1");
-c = t.startDB( "foo" );
+c = t.startDB("foo");
-base = { a : 1 , b : "foo,bar\"baz,qux" , c: 5, 'd d': -6 , e: '-', f : "."};
+base = {
+ a: 1,
+ b: "foo,bar\"baz,qux",
+ c: 5, 'd d': -6,
+ e: '-',
+ f: "."
+};
-assert.eq( 0 , c.count() , "setup1" );
-c.insert( base );
+assert.eq(0, c.count(), "setup1");
+c.insert(base);
delete base._id;
-assert.eq( 1 , c.count() , "setup2" );
+assert.eq(1, c.count(), "setup2");
-t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo" , "--csv" , "-f" , "a,b,c,d d,e,f" );
+t.runTool(
+ "export", "--out", t.extFile, "-d", t.baseName, "-c", "foo", "--csv", "-f", "a,b,c,d d,e,f");
c.drop();
-assert.eq( 0 , c.count() , "after drop" );
-
-t.runTool( "import" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo" , "--type" , "csv" , "-f" , "a,b,c,d d,e,f" );
-assert.soon( "2 == c.count()" , "restore 2" );
-
-a = c.find().sort( { a : 1 } ).toArray();
+assert.eq(0, c.count(), "after drop");
+
+t.runTool("import",
+ "--file",
+ t.extFile,
+ "-d",
+ t.baseName,
+ "-c",
+ "foo",
+ "--type",
+ "csv",
+ "-f",
+ "a,b,c,d d,e,f");
+assert.soon("2 == c.count()", "restore 2");
+
+a = c.find().sort({a: 1}).toArray();
delete a[0]._id;
delete a[1]._id;
-assert.docEq( { a : "a" , b : "b" , c : "c" , 'd d': "d d", e: 'e', f : "f"}, a[1], "csv parse 1" );
-assert.docEq( base, a[0], "csv parse 0" );
+assert.docEq({a: "a", b: "b", c: "c", 'd d': "d d", e: 'e', f: "f"}, a[1], "csv parse 1");
+assert.docEq(base, a[0], "csv parse 0");
c.drop();
-assert.eq( 0 , c.count() , "after drop 2" );
+assert.eq(0, c.count(), "after drop 2");
-t.runTool( "import" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo" , "--type" , "csv" , "--headerline" );
-assert.soon( "c.findOne()" , "no data after sleep" );
-assert.eq( 1 , c.count() , "after restore 2" );
+t.runTool(
+ "import", "--file", t.extFile, "-d", t.baseName, "-c", "foo", "--type", "csv", "--headerline");
+assert.soon("c.findOne()", "no data after sleep");
+assert.eq(1, c.count(), "after restore 2");
x = c.findOne();
delete x._id;
-assert.docEq( base, x, "csv parse 2" );
-
-
-
+assert.docEq(base, x, "csv parse 2");
t.stop();
diff --git a/jstests/tool/csvexport1.js b/jstests/tool/csvexport1.js
index f01acbcd6fc..afea559b2b0 100644
--- a/jstests/tool/csvexport1.js
+++ b/jstests/tool/csvexport1.js
@@ -1,64 +1,81 @@
// csvexport1.js
-t = new ToolTest( "csvexport1" );
+t = new ToolTest("csvexport1");
-c = t.startDB( "foo" );
+c = t.startDB("foo");
-assert.eq( 0 , c.count() , "setup1" );
+assert.eq(0, c.count(), "setup1");
objId = ObjectId();
-c.insert({ a : new NumberInt(1) , b : objId , c: [1, 2, 3], d : {a : "hello", b : "world"} , e: '-'});
-c.insert({ a : -2.0, c : MinKey, d : "Then he said, \"Hello World!\"", e : new NumberLong(3)});
-c.insert({ a : new BinData(0, "1234"), b : ISODate("2009-08-27T12:34:56.789"),
- c : new Timestamp(1234, 9876), d : /foo*\"bar\"/i,
- e : function foo() { print("Hello World!"); }});
-
-assert.eq( 3 , c.count() , "setup2" );
+c.insert({a: new NumberInt(1), b: objId, c: [1, 2, 3], d: {a: "hello", b: "world"}, e: '-'});
+c.insert({a: -2.0, c: MinKey, d: "Then he said, \"Hello World!\"", e: new NumberLong(3)});
+c.insert({
+ a: new BinData(0, "1234"),
+ b: ISODate("2009-08-27T12:34:56.789"),
+ c: new Timestamp(1234, 9876),
+ d: /foo*\"bar\"/i,
+ e: function foo() {
+ print("Hello World!");
+ }
+});
-t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo" , "--csv", "-f", "a,b,c,d,e");
+assert.eq(3, c.count(), "setup2");
+t.runTool("export", "--out", t.extFile, "-d", t.baseName, "-c", "foo", "--csv", "-f", "a,b,c,d,e");
c.drop();
-assert.eq( 0 , c.count() , "after drop" );
+assert.eq(0, c.count(), "after drop");
-t.runTool("import", "--file", t.extFile, "-d", t.baseName, "-c", "foo", "--type", "csv", "--headerline");
+t.runTool(
+ "import", "--file", t.extFile, "-d", t.baseName, "-c", "foo", "--type", "csv", "--headerline");
-assert.soon ( 3 + " == c.count()", "after import");
+assert.soon(3 + " == c.count()", "after import");
// Note: Exporting and Importing to/from CSV is not designed to be round-trippable
expected = [];
-expected.push({ a : 1, b : "ObjectId(" + objId.valueOf() + ")", c : [ 1, 2, 3 ], d : { "a" : "hello", "b" : "world" }, e : "-"});
-expected.push({ a : -2.0, b : "", c : "$MinKey", d : "Then he said, \"Hello World!\"", e : 3});
+expected.push({
+ a: 1,
+ b: "ObjectId(" + objId.valueOf() + ")",
+ c: [1, 2, 3],
+ d: {"a": "hello", "b": "world"},
+ e: "-"
+});
+expected.push({a: -2.0, b: "", c: "$MinKey", d: "Then he said, \"Hello World!\"", e: 3});
// "t" should be 1234, but the shell interprets the first field of timestamps as milliseconds while
// they are stored as seconds. See SERVER-7718.
-expected.push({ a : "D76DF8", b : "2009-08-27T12:34:56.789Z",
- c : { "$timestamp" : { "t" : 1234, "i" : 9876 } },
- d : "/foo*\\\"bar\\\"/i", e : tojson(function foo() { print("Hello World!"); })});
+expected.push({
+ a: "D76DF8",
+ b: "2009-08-27T12:34:56.789Z",
+ c: {"$timestamp": {"t": 1234, "i": 9876}},
+ d: "/foo*\\\"bar\\\"/i",
+ e: tojson(function foo() {
+ print("Hello World!");
+ })
+});
actual = [];
-actual.push(c.find({a : 1}).toArray()[0]);
-actual.push(c.find({a : -2.0}).toArray()[0]);
-actual.push(c.find({a : "D76DF8"}).toArray()[0]);
+actual.push(c.find({a: 1}).toArray()[0]);
+actual.push(c.find({a: -2.0}).toArray()[0]);
+actual.push(c.find({a: "D76DF8"}).toArray()[0]);
for (i = 0; i < expected.length; i++) {
delete actual[i]._id;
assert.eq(Object.keys(expected[i]).length, Object.keys(actual[i]).length);
keys = Object.keys(expected[i]);
- for(var j=0;j<keys.length;j++){
+ for (var j = 0; j < keys.length; j++) {
expectedVal = expected[i][keys[j]];
- if((typeof expectedVal)== "object"){
+ if ((typeof expectedVal) == "object") {
// For fields which contain arrays or objects, they have been
// exported as JSON - parse the JSON in the output and verify
// that it matches the original document's value
assert.docEq(expectedVal, JSON.parse(actual[i][keys[j]]), "CSV export " + i);
- }else{
+ } else {
// Otherwise just compare the values directly
assert.eq(expectedVal, actual[i][keys[j]], "CSV export " + i);
}
}
}
-
t.stop();
diff --git a/jstests/tool/csvexport2.js b/jstests/tool/csvexport2.js
index dc12288e83b..7ced84a953c 100644
--- a/jstests/tool/csvexport2.js
+++ b/jstests/tool/csvexport2.js
@@ -1,31 +1,34 @@
// csvexport2.js
-t = new ToolTest( "csvexport2" );
+t = new ToolTest("csvexport2");
-c = t.startDB( "foo" );
+c = t.startDB("foo");
// This test is designed to test exporting of a CodeWithScope object.
-// However, due to SERVER-3391, it is not possible to create a CodeWithScope object in the mongo shell,
-// therefore this test does not work. Once SERVER-3391 is resolved, this test should be un-commented out
+// However, due to SERVER-3391, it is not possible to create a CodeWithScope object in the mongo
+// shell,
+// therefore this test does not work. Once SERVER-3391 is resolved, this test should be
+// un-commented out
-//assert.eq( 0 , c.count() , "setup1" );
+// assert.eq( 0 , c.count() , "setup1" );
-//c.insert({ a : 1 , b : Code("print(\"Hello \" + x);", {"x" : "World!"})})
-//assert.eq( 1 , c.count() , "setup2" );
-//t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo" , "--csv", "-f", "a,b")
+// c.insert({ a : 1 , b : Code("print(\"Hello \" + x);", {"x" : "World!"})})
+// assert.eq( 1 , c.count() , "setup2" );
+// t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo" , "--csv", "-f",
+// "a,b")
+// c.drop()
-//c.drop()
+// assert.eq( 0 , c.count() , "after drop" )
+// t.runTool("import", "--file", t.extFile, "-d", t.baseName, "-c", "foo", "--type", "csv",
+// "--headerline");
+// assert.soon ( 1 + " == c.count()", "after import");
-//assert.eq( 0 , c.count() , "after drop" )
-//t.runTool("import", "--file", t.extFile, "-d", t.baseName, "-c", "foo", "--type", "csv", "--headerline");
-//assert.soon ( 1 + " == c.count()", "after import");
-
-//expected = { a : 1, b : "\"{ \"$code\" : print(\"Hello \" + x); , \"$scope\" : { \"x\" : \"World!\" } }"};
-//actual = c.findOne()
-
-//delete actual._id;
-//assert.eq( expected, actual );
+// expected = { a : 1, b : "\"{ \"$code\" : print(\"Hello \" + x); , \"$scope\" : { \"x\" :
+// \"World!\" } }"};
+// actual = c.findOne()
+// delete actual._id;
+// assert.eq( expected, actual );
t.stop(); \ No newline at end of file
diff --git a/jstests/tool/csvimport1.js b/jstests/tool/csvimport1.js
index a85470f2c11..28258bbe37f 100644
--- a/jstests/tool/csvimport1.js
+++ b/jstests/tool/csvimport1.js
@@ -1,40 +1,71 @@
// csvimport1.js
-t = new ToolTest( "csvimport1" );
+t = new ToolTest("csvimport1");
-c = t.startDB( "foo" );
+c = t.startDB("foo");
base = [];
-base.push({ a : 1, b : "this is some text.\nThis text spans multiple lines, and just for fun\ncontains a comma", "c" : "This has leading and trailing whitespace!" });
-base.push({a : 2, b : "When someone says something you \"put it in quotes\"", "c" : "I like embedded quotes/slashes\\backslashes" });
-base.push({a : 3, b : " This line contains the empty string and has leading and trailing whitespace inside the quotes! ", "c" : "" });
-base.push({a : 4, b : "", "c" : "How are empty entries handled?" });
-base.push({a : 5, b : "\"\"", c : "\"This string is in quotes and contains empty quotes (\"\")\""});
-base.push({ a : "a" , b : "b" , c : "c"});
+base.push({
+ a: 1,
+ b: "this is some text.\nThis text spans multiple lines, and just for fun\ncontains a comma",
+ "c": "This has leading and trailing whitespace!"
+});
+base.push({
+ a: 2,
+ b: "When someone says something you \"put it in quotes\"",
+ "c": "I like embedded quotes/slashes\\backslashes"
+});
+base.push({
+ a: 3,
+ b:
+ " This line contains the empty string and has leading and trailing whitespace inside the quotes! ",
+ "c": ""
+});
+base.push({a: 4, b: "", "c": "How are empty entries handled?"});
+base.push({a: 5, b: "\"\"", c: "\"This string is in quotes and contains empty quotes (\"\")\""});
+base.push({a: "a", b: "b", c: "c"});
-assert.eq( 0 , c.count() , "setup" );
+assert.eq(0, c.count(), "setup");
-t.runTool( "import" , "--file" , "jstests/tool/data/csvimport1.csv" , "-d" , t.baseName , "-c" , "foo" , "--type" , "csv" , "-f" , "a,b,c" );
-assert.soon( base.length + " == c.count()" , "after import 1 " );
+t.runTool("import",
+ "--file",
+ "jstests/tool/data/csvimport1.csv",
+ "-d",
+ t.baseName,
+ "-c",
+ "foo",
+ "--type",
+ "csv",
+ "-f",
+ "a,b,c");
+assert.soon(base.length + " == c.count()", "after import 1 ");
-a = c.find().sort( { a : 1 } ).toArray();
-for (i = 0; i < base.length; i++ ) {
+a = c.find().sort({a: 1}).toArray();
+for (i = 0; i < base.length; i++) {
delete a[i]._id;
- assert.docEq( base[i], a[i], "csv parse " + i);
+ assert.docEq(base[i], a[i], "csv parse " + i);
}
c.drop();
-assert.eq( 0 , c.count() , "after drop" );
+assert.eq(0, c.count(), "after drop");
-t.runTool( "import" , "--file" , "jstests/tool/data/csvimport1.csv" , "-d" , t.baseName , "-c" , "foo" , "--type" , "csv" , "--headerline" );
-assert.soon( "c.findOne()" , "no data after sleep" );
-assert.eq( base.length - 1 , c.count() , "after import 2" );
+t.runTool("import",
+ "--file",
+ "jstests/tool/data/csvimport1.csv",
+ "-d",
+ t.baseName,
+ "-c",
+ "foo",
+ "--type",
+ "csv",
+ "--headerline");
+assert.soon("c.findOne()", "no data after sleep");
+assert.eq(base.length - 1, c.count(), "after import 2");
-x = c.find().sort( { a : 1 } ).toArray();
-for (i = 0; i < base.length - 1; i++ ) {
+x = c.find().sort({a: 1}).toArray();
+for (i = 0; i < base.length - 1; i++) {
delete x[i]._id;
- assert.docEq( base[i], x[i], "csv parse with headerline " + i);
+ assert.docEq(base[i], x[i], "csv parse with headerline " + i);
}
-
t.stop();
diff --git a/jstests/tool/dumpauth.js b/jstests/tool/dumpauth.js
index 86caf260328..7be119a9f54 100644
--- a/jstests/tool/dumpauth.js
+++ b/jstests/tool/dumpauth.js
@@ -8,8 +8,8 @@ var profileName = "system.profile";
var dumpDir = MongoRunner.dataPath + "jstests_tool_dumprestore_dump_system_profile/";
db = m.getDB(dbName);
-db.createUser({user: "testuser" , pwd: "testuser", roles: jsTest.adminUserRoles});
-assert( db.auth( "testuser" , "testuser" ) , "auth failed" );
+db.createUser({user: "testuser", pwd: "testuser", roles: jsTest.adminUserRoles});
+assert(db.auth("testuser", "testuser"), "auth failed");
t = db[colName];
t.drop();
@@ -20,25 +20,30 @@ profile.drop();
db.setProfilingLevel(2);
// Populate the database
-for(var i = 0; i < 100; i++) {
- t.save({ "x": i });
+for (var i = 0; i < 100; i++) {
+ t.save({"x": i});
}
assert.gt(profile.count(), 0, "admin.system.profile should have documents");
assert.eq(t.count(), 100, "testcol should have documents");
// Create a user with backup permissions
-db.createUser({user: "backup" , pwd: "password", roles: ["backup"]});
+db.createUser({user: "backup", pwd: "password", roles: ["backup"]});
// Backup the database with the backup user
-x = runMongoProgram( "mongodump",
- "--db", dbName,
- "--out", dumpDir,
- "--authenticationDatabase=admin",
- "-u", "backup",
- "-p", "password",
- "-h", "127.0.0.1:"+m.port);
+x = runMongoProgram("mongodump",
+ "--db",
+ dbName,
+ "--out",
+ dumpDir,
+ "--authenticationDatabase=admin",
+ "-u",
+ "backup",
+ "-p",
+ "password",
+ "-h",
+ "127.0.0.1:" + m.port);
assert.eq(x, 0, "mongodump should succeed with authentication");
// Assert that a BSON document for admin.system.profile has been produced
-x = runMongoProgram( "bsondump", dumpDir + "/" + dbName + "/" + profileName + ".bson" );
+x = runMongoProgram("bsondump", dumpDir + "/" + dbName + "/" + profileName + ".bson");
assert.eq(x, 0, "bsondump should succeed parsing the profile data");
diff --git a/jstests/tool/dumpfilename1.js b/jstests/tool/dumpfilename1.js
index 4a79a11bdb1..3e826952c4c 100644
--- a/jstests/tool/dumpfilename1.js
+++ b/jstests/tool/dumpfilename1.js
@@ -1,13 +1,12 @@
-//dumpfilename1.js
+// dumpfilename1.js
-//Test designed to make sure error that dumping a collection with "/" fails
+// Test designed to make sure error that dumping a collection with "/" fails
-t = new ToolTest( "dumpfilename1" );
+t = new ToolTest("dumpfilename1");
-t.startDB( "foo" );
+t.startDB("foo");
c = t.db;
-assert.writeOK(c.getCollection("df/").insert({ a: 3 }));
-assert(t.runTool( "dump" , "--out" , t.ext ) != 0, "dump should fail with non-zero return code");
+assert.writeOK(c.getCollection("df/").insert({a: 3}));
+assert(t.runTool("dump", "--out", t.ext) != 0, "dump should fail with non-zero return code");
t.stop();
-
diff --git a/jstests/tool/dumprestore1.js b/jstests/tool/dumprestore1.js
index aabe441244f..dad1eb65a48 100644
--- a/jstests/tool/dumprestore1.js
+++ b/jstests/tool/dumprestore1.js
@@ -1,31 +1,31 @@
// dumprestore1.js
-t = new ToolTest( "dumprestore1" );
+t = new ToolTest("dumprestore1");
-c = t.startDB( "foo" );
-assert.eq( 0 , c.count() , "setup1" );
-c.save( { a : 22 } );
-assert.eq( 1 , c.count() , "setup2" );
+c = t.startDB("foo");
+assert.eq(0, c.count(), "setup1");
+c.save({a: 22});
+assert.eq(1, c.count(), "setup2");
-t.runTool( "dump" , "--out" , t.ext );
+t.runTool("dump", "--out", t.ext);
c.drop();
-assert.eq( 0 , c.count() , "after drop" );
+assert.eq(0, c.count(), "after drop");
-t.runTool( "restore" , "--dir" , t.ext );
-assert.soon( "c.findOne()" , "no data after sleep" );
-assert.eq( 1 , c.count() , "after restore 2" );
-assert.eq( 22 , c.findOne().a , "after restore 2" );
+t.runTool("restore", "--dir", t.ext);
+assert.soon("c.findOne()", "no data after sleep");
+assert.eq(1, c.count(), "after restore 2");
+assert.eq(22, c.findOne().a, "after restore 2");
// ensure that --collection is used with --db. See SERVER-7721
-var ret = t.runTool( "dump" , "--collection" , "col" );
-assert.neq( ret, 0, "mongodump should return failure code" );
+var ret = t.runTool("dump", "--collection", "col");
+assert.neq(ret, 0, "mongodump should return failure code");
t.stop();
// Ensure that --db and --collection are provided when filename is "-" (stdin).
-ret = t.runTool( "restore" , "--collection" , "coll", "--dir", "-" );
-assert.neq( ret, 0, "mongorestore should return failure code" );
+ret = t.runTool("restore", "--collection", "coll", "--dir", "-");
+assert.neq(ret, 0, "mongorestore should return failure code");
t.stop();
-ret = t.runTool( "restore" , "--db" , "db", "--dir", "-" );
-assert.neq( ret, 0, "mongorestore should return failure code" );
+ret = t.runTool("restore", "--db", "db", "--dir", "-");
+assert.neq(ret, 0, "mongorestore should return failure code");
t.stop();
diff --git a/jstests/tool/dumprestore10.js b/jstests/tool/dumprestore10.js
index 6cf3cbbbfa1..7c8cc0ada58 100644
--- a/jstests/tool/dumprestore10.js
+++ b/jstests/tool/dumprestore10.js
@@ -10,7 +10,7 @@ function step(msg) {
step();
-var replTest = new ReplSetTest( {name: name, nodes: 2} );
+var replTest = new ReplSetTest({name: name, nodes: 2});
var nodes = replTest.startSet();
replTest.initiate();
var master = replTest.getPrimary();
@@ -20,7 +20,7 @@ var total = 1000;
step("store data");
var foo = master.getDB("foo");
for (i = 0; i < total; i++) {
- foo.bar.insert({ x: i, y: "abc" });
+ foo.bar.insert({x: i, y: "abc"});
}
}
@@ -33,8 +33,7 @@ step("mongodump from replset");
var data = MongoRunner.dataDir + "/dumprestore10-dump1/";
-runMongoProgram( "mongodump", "--host", "127.0.0.1:"+master.port, "--out", data );
-
+runMongoProgram("mongodump", "--host", "127.0.0.1:" + master.port, "--out", data);
{
step("remove data after dumping");
@@ -48,7 +47,8 @@ runMongoProgram( "mongodump", "--host", "127.0.0.1:"+master.port, "--out", data
step("try mongorestore with write concern");
-runMongoProgram( "mongorestore", "--writeConcern", "2", "--host", "127.0.0.1:"+master.port, "--dir", data );
+runMongoProgram(
+ "mongorestore", "--writeConcern", "2", "--host", "127.0.0.1:" + master.port, "--dir", data);
var x = 0;
diff --git a/jstests/tool/dumprestore3.js b/jstests/tool/dumprestore3.js
index f6a8735d5f6..6ac6ae76c3f 100644
--- a/jstests/tool/dumprestore3.js
+++ b/jstests/tool/dumprestore3.js
@@ -3,7 +3,7 @@
var name = "dumprestore3";
-var replTest = new ReplSetTest( {name: name, nodes: 2} );
+var replTest = new ReplSetTest({name: name, nodes: 2});
var nodes = replTest.startSet();
replTest.initiate();
var primary = replTest.getPrimary();
@@ -12,7 +12,7 @@ var secondary = replTest.getSecondary();
jsTestLog("populate primary");
var foo = primary.getDB("foo");
for (i = 0; i < 20; i++) {
- foo.bar.insert({ x: i, y: "abc" });
+ foo.bar.insert({x: i, y: "abc"});
}
jsTestLog("wait for secondary");
@@ -21,21 +21,21 @@ replTest.awaitReplication();
jsTestLog("mongodump from primary");
var data = MongoRunner.dataDir + "/dumprestore3-other1/";
resetDbpath(data);
-var ret = runMongoProgram( "mongodump", "--host", primary.host, "--out", data );
+var ret = runMongoProgram("mongodump", "--host", primary.host, "--out", data);
assert.eq(ret, 0, "mongodump should exit w/ 0 on primary");
jsTestLog("try mongorestore to secondary");
-ret = runMongoProgram( "mongorestore", "--host", secondary.host, "--dir", data );
+ret = runMongoProgram("mongorestore", "--host", secondary.host, "--dir", data);
assert.neq(ret, 0, "mongorestore should exit w/ 1 on secondary");
jsTestLog("mongoexport from primary");
dataFile = MongoRunner.dataDir + "/dumprestore3-other2.json";
-ret = runMongoProgram( "mongoexport", "--host", primary.host, "--out",
- dataFile, "--db", "foo", "--collection", "bar" );
+ret = runMongoProgram(
+ "mongoexport", "--host", primary.host, "--out", dataFile, "--db", "foo", "--collection", "bar");
assert.eq(ret, 0, "mongoexport should exit w/ 0 on primary");
jsTestLog("mongoimport from secondary");
-ret = runMongoProgram( "mongoimport", "--host", secondary.host, "--file", dataFile );
+ret = runMongoProgram("mongoimport", "--host", secondary.host, "--file", dataFile);
assert.neq(ret, 0, "mongoreimport should exit w/ 1 on secondary");
jsTestLog("stopSet");
diff --git a/jstests/tool/dumprestore4.js b/jstests/tool/dumprestore4.js
index a4d33df7deb..58595f62383 100644
--- a/jstests/tool/dumprestore4.js
+++ b/jstests/tool/dumprestore4.js
@@ -1,6 +1,5 @@
// dumprestore4.js -- see SERVER-2186
-
// The point of this test is to ensure that mongorestore successfully
// constructs indexes when the database being restored into has a
// different name than the database dumped from. There are 2
@@ -9,35 +8,35 @@
// some reason you have another database called "A" at the time of the
// restore, mongorestore shouldn't touch it.
-t = new ToolTest( "dumprestore4" );
+t = new ToolTest("dumprestore4");
-c = t.startDB( "dumprestore4" );
+c = t.startDB("dumprestore4");
-db=t.db;
+db = t.db;
dbname = db.getName();
-dbname2 = "NOT_"+dbname;
+dbname2 = "NOT_" + dbname;
-db2=db.getSisterDB( dbname2 );
+db2 = db.getSisterDB(dbname2);
-db.dropDatabase(); // make sure it's empty
-db2.dropDatabase(); // make sure everybody's empty
+db.dropDatabase(); // make sure it's empty
+db2.dropDatabase(); // make sure everybody's empty
-assert.eq( 0 , c.getIndexes().length , "setup1" );
-c.ensureIndex({ x : 1} );
-assert.eq( 2 , c.getIndexes().length , "setup2" ); // _id and x_1
+assert.eq(0, c.getIndexes().length, "setup1");
+c.ensureIndex({x: 1});
+assert.eq(2, c.getIndexes().length, "setup2"); // _id and x_1
-assert.eq( 0, t.runTool( "dump" , "-d" , dbname, "--out", t.ext ), "dump");
+assert.eq(0, t.runTool("dump", "-d", dbname, "--out", t.ext), "dump");
// to ensure issue (2), we have to clear out the first db.
// By inspection, db.dropIndexes() doesn't get rid of the _id index on c,
// so we have to drop the collection.
c.drop();
-assert.eq( 0, t.runTool( "restore" , "--dir" , t.ext + "/" + dbname, "-d", dbname2 ), "restore" );
+assert.eq(0, t.runTool("restore", "--dir", t.ext + "/" + dbname, "-d", dbname2), "restore");
// issue (1)
-assert.eq( 2 , db2.dumprestore4.getIndexes().length , "after restore 1" );
+assert.eq(2, db2.dumprestore4.getIndexes().length, "after restore 1");
// issue (2)
-assert.eq( 0 , db.dumprestore4.getIndexes().length , "after restore 2" );
+assert.eq(0, db.dumprestore4.getIndexes().length, "after restore 2");
t.stop();
diff --git a/jstests/tool/dumprestore6.js b/jstests/tool/dumprestore6.js
index e342e71f3f1..653dd256895 100644
--- a/jstests/tool/dumprestore6.js
+++ b/jstests/tool/dumprestore6.js
@@ -1,26 +1,30 @@
// Test restoring from a dump with v:0 indexes.
-// mongodump strips the 'v' property from the index specification by default. When using
+// mongodump strips the 'v' property from the index specification by default. When using
// --keepIndexVersion, the 'v' property is not stripped, but index creation will fail.
-var toolTest = new ToolTest( "dumprestore6" );
-var col = toolTest.startDB( "foo" );
+var toolTest = new ToolTest("dumprestore6");
+var col = toolTest.startDB("foo");
var testDb = toolTest.db;
-assert.eq( 0 , col.count() , "setup1" );
+assert.eq(0, col.count(), "setup1");
// Normal restore should succeed and convert v:1 index.
-toolTest.runTool("restore", "--dir", "jstests/tool/data/dumprestore6", "--db",
- "jstests_tool_dumprestore6");
-assert.soon( "col.findOne()" , "no data after sleep" );
-assert.eq( 1 , col.count() , "after restore" );
+toolTest.runTool(
+ "restore", "--dir", "jstests/tool/data/dumprestore6", "--db", "jstests_tool_dumprestore6");
+assert.soon("col.findOne()", "no data after sleep");
+assert.eq(1, col.count(), "after restore");
var indexes = col.getIndexes();
-assert.eq( 2, indexes.length, "there aren't the correct number of indexes" );
+assert.eq(2, indexes.length, "there aren't the correct number of indexes");
// Try with --keepIndexVersion, should fail to restore v:0 index.
testDb.dropDatabase();
-assert.eq( 0 , col.count() , "after drop" );
-toolTest.runTool("restore", "--dir", "jstests/tool/data/dumprestore6", "--db",
- "jstests_tool_dumprestore6", "--keepIndexVersion");
+assert.eq(0, col.count(), "after drop");
+toolTest.runTool("restore",
+ "--dir",
+ "jstests/tool/data/dumprestore6",
+ "--db",
+ "jstests_tool_dumprestore6",
+ "--keepIndexVersion");
indexes = col.getIndexes();
-assert.eq( 1, indexes.length, "there aren't the correct number of indexes" );
+assert.eq(1, indexes.length, "there aren't the correct number of indexes");
toolTest.stop();
diff --git a/jstests/tool/dumprestore7.js b/jstests/tool/dumprestore7.js
index 9a7d09665ef..0598e73c0a8 100644
--- a/jstests/tool/dumprestore7.js
+++ b/jstests/tool/dumprestore7.js
@@ -8,7 +8,7 @@ function step(msg) {
step();
-var replTest = new ReplSetTest( {name: name, nodes: 1} );
+var replTest = new ReplSetTest({name: name, nodes: 1});
var nodes = replTest.startSet();
replTest.initiate();
var master = replTest.getPrimary();
@@ -17,14 +17,20 @@ var master = replTest.getPrimary();
step("first chunk of data");
var foo = master.getDB("foo");
for (i = 0; i < 20; i++) {
- foo.bar.insert({ x: i, y: "abc" });
+ foo.bar.insert({x: i, y: "abc"});
}
}
{
step("wait");
replTest.awaitReplication();
- var time = replTest.getPrimary().getDB("local").getCollection("oplog.rs").find().limit(1).sort({$natural:-1}).next();
+ var time = replTest.getPrimary()
+ .getDB("local")
+ .getCollection("oplog.rs")
+ .find()
+ .limit(1)
+ .sort({$natural: -1})
+ .next();
step(time.ts.t);
}
@@ -32,26 +38,29 @@ var master = replTest.getPrimary();
step("second chunk of data");
var foo = master.getDB("foo");
for (i = 30; i < 50; i++) {
- foo.bar.insert({ x: i, y: "abc" });
+ foo.bar.insert({x: i, y: "abc"});
}
}
-{
- var conn = MongoRunner.runMongod({});
-}
+{ var conn = MongoRunner.runMongod({}); }
step("try mongodump with $timestamp");
var data = MongoRunner.dataDir + "/dumprestore7-dump1/";
-var query = "{\"ts\":{\"$gt\":{\"$timestamp\":{\"t\":"+ time.ts.t + ",\"i\":" + time.ts.i +"}}}}";
+var query = "{\"ts\":{\"$gt\":{\"$timestamp\":{\"t\":" + time.ts.t + ",\"i\":" + time.ts.i + "}}}}";
-MongoRunner.runMongoTool( "mongodump",
- { "host": "127.0.0.1:"+replTest.ports[0],
- "db": "local", "collection": "oplog.rs",
- "query": query, "out": data });
+MongoRunner.runMongoTool("mongodump",
+ {
+ "host": "127.0.0.1:" + replTest.ports[0],
+ "db": "local",
+ "collection": "oplog.rs",
+ "query": query,
+ "out": data
+ });
step("try mongorestore from $timestamp");
-runMongoProgram( "mongorestore", "--host", "127.0.0.1:"+conn.port, "--dir", data, "--writeConcern", 1);
+runMongoProgram(
+ "mongorestore", "--host", "127.0.0.1:" + conn.port, "--dir", data, "--writeConcern", 1);
var x = 9;
x = conn.getDB("local").getCollection("oplog.rs").count();
@@ -61,4 +70,3 @@ step("stopSet");
replTest.stopSet();
step("SUCCESS");
-
diff --git a/jstests/tool/dumprestore8.js b/jstests/tool/dumprestore8.js
index edc1a874343..9cdae87df80 100644
--- a/jstests/tool/dumprestore8.js
+++ b/jstests/tool/dumprestore8.js
@@ -1,107 +1,110 @@
// dumprestore8.js
-
// This file tests that indexes and capped collection options get properly dumped and restored.
-// It checks that this works both when doing a full database dump/restore and when doing it just for a single db or collection
+// It checks that this works both when doing a full database dump/restore and when doing it just for
+// a single db or collection
-t = new ToolTest( "dumprestore8" );
+t = new ToolTest("dumprestore8");
-t.startDB( "foo" );
+t.startDB("foo");
db = t.db;
dbname = db.getName();
-dbname2 = "NOT_"+dbname;
+dbname2 = "NOT_" + dbname;
db.dropDatabase();
-assert.eq( 0 , db.foo.count() , "setup1" );
-db.foo.save( { a : 1, b : 1 } );
-db.foo.ensureIndex({a:1});
-db.foo.ensureIndex({b:1, _id:-1});
-assert.eq( 1 , db.foo.count() , "setup2" );
-
+assert.eq(0, db.foo.count(), "setup1");
+db.foo.save({a: 1, b: 1});
+db.foo.ensureIndex({a: 1});
+db.foo.ensureIndex({b: 1, _id: -1});
+assert.eq(1, db.foo.count(), "setup2");
-assert.eq( 0 , db.bar.count() , "setup3" );
-db.createCollection("bar", {capped:true, size:1000, max:10});
+assert.eq(0, db.bar.count(), "setup3");
+db.createCollection("bar", {capped: true, size: 1000, max: 10});
for (var i = 0; i < 1000; i++) {
- db.bar.save( { x : i } );
+ db.bar.save({x: i});
}
-db.bar.ensureIndex({x:1});
+db.bar.ensureIndex({x: 1});
barDocCount = db.bar.count();
-assert.gt( barDocCount, 0 , "No documents inserted" );
-assert.lt( db.bar.count(), 1000 , "Capped collection didn't evict documents" );
-assert.eq( 5 , db.foo.getIndexes().length + db.bar.getIndexes().length, "Indexes weren't created right" );
-
+assert.gt(barDocCount, 0, "No documents inserted");
+assert.lt(db.bar.count(), 1000, "Capped collection didn't evict documents");
+assert.eq(5,
+ db.foo.getIndexes().length + db.bar.getIndexes().length,
+ "Indexes weren't created right");
// Full dump/restore
-t.runTool( "dump" , "--out" , t.ext );
+t.runTool("dump", "--out", t.ext);
db.dropDatabase();
-assert.eq( 0 , db.foo.count() , "foo not dropped" );
-assert.eq( 0 , db.bar.count() , "bar not dropped" );
-assert.eq( 0 , db.bar.getIndexes().length , "indexes on bar not dropped" );
-assert.eq( 0 , db.foo.getIndexes().length , "indexes on foo not dropped" );
+assert.eq(0, db.foo.count(), "foo not dropped");
+assert.eq(0, db.bar.count(), "bar not dropped");
+assert.eq(0, db.bar.getIndexes().length, "indexes on bar not dropped");
+assert.eq(0, db.foo.getIndexes().length, "indexes on foo not dropped");
-t.runTool( "restore" , "--dir" , t.ext );
+t.runTool("restore", "--dir", t.ext);
-assert.soon( "db.foo.findOne()" , "no data after sleep" );
-assert.eq( 1 , db.foo.count() , "wrong number of docs restored to foo" );
-assert.eq( barDocCount, db.bar.count(), "wrong number of docs restored to bar" );
+assert.soon("db.foo.findOne()", "no data after sleep");
+assert.eq(1, db.foo.count(), "wrong number of docs restored to foo");
+assert.eq(barDocCount, db.bar.count(), "wrong number of docs restored to bar");
for (var i = 0; i < 10; i++) {
- db.bar.save({x:i});
+ db.bar.save({x: i});
}
-assert.eq( barDocCount, db.bar.count(), "Capped collection didn't evict documents after restore." );
-assert.eq( 5 , db.foo.getIndexes().length + db.bar.getIndexes().length, "Indexes weren't created correctly by restore");
+assert.eq(barDocCount, db.bar.count(), "Capped collection didn't evict documents after restore.");
+assert.eq(5,
+ db.foo.getIndexes().length + db.bar.getIndexes().length,
+ "Indexes weren't created correctly by restore");
// Dump/restore single DB
dumppath = t.ext + "singledbdump/";
mkdir(dumppath);
-t.runTool( "dump" , "-d", dbname, "--out" , dumppath );
+t.runTool("dump", "-d", dbname, "--out", dumppath);
db.dropDatabase();
-assert.eq( 0 , db.foo.count() , "foo not dropped2" );
-assert.eq( 0 , db.bar.count() , "bar not dropped2" );
-assert.eq( 0 , db.foo.getIndexes().length , "indexes on foo not dropped2" );
-assert.eq( 0 , db.bar.getIndexes().length , "indexes on bar not dropped2" );
+assert.eq(0, db.foo.count(), "foo not dropped2");
+assert.eq(0, db.bar.count(), "bar not dropped2");
+assert.eq(0, db.foo.getIndexes().length, "indexes on foo not dropped2");
+assert.eq(0, db.bar.getIndexes().length, "indexes on bar not dropped2");
-t.runTool( "restore" , "-d", dbname2, "--dir" , dumppath + dbname );
+t.runTool("restore", "-d", dbname2, "--dir", dumppath + dbname);
db = db.getSiblingDB(dbname2);
-assert.soon( "db.foo.findOne()" , "no data after sleep 2" );
-assert.eq( 1 , db.foo.count() , "wrong number of docs restored to foo 2" );
-assert.eq( barDocCount, db.bar.count(), "wrong number of docs restored to bar 2" );
+assert.soon("db.foo.findOne()", "no data after sleep 2");
+assert.eq(1, db.foo.count(), "wrong number of docs restored to foo 2");
+assert.eq(barDocCount, db.bar.count(), "wrong number of docs restored to bar 2");
for (var i = 0; i < 10; i++) {
- db.bar.save({x:i});
+ db.bar.save({x: i});
}
-assert.eq( barDocCount, db.bar.count(), "Capped collection didn't evict documents after restore 2." );
-assert.eq( 5 , db.foo.getIndexes().length + db.bar.getIndexes().length, "Indexes weren't created correctly by restore 2");
-
+assert.eq(barDocCount, db.bar.count(), "Capped collection didn't evict documents after restore 2.");
+assert.eq(5,
+ db.foo.getIndexes().length + db.bar.getIndexes().length,
+ "Indexes weren't created correctly by restore 2");
// Dump/restore single collection
dumppath = t.ext + "singlecolldump/";
mkdir(dumppath);
-t.runTool( "dump" , "-d", dbname2, "-c", "bar", "--out" , dumppath );
+t.runTool("dump", "-d", dbname2, "-c", "bar", "--out", dumppath);
db.dropDatabase();
-assert.eq( 0 , db.bar.count() , "bar not dropped3" );
-assert.eq( 0 , db.bar.getIndexes().length , "indexes not dropped3" );
+assert.eq(0, db.bar.count(), "bar not dropped3");
+assert.eq(0, db.bar.getIndexes().length, "indexes not dropped3");
-t.runTool( "restore" , "-d", dbname, "-c", "baz", "--dir" , dumppath + dbname2 + "/bar.bson" );
+t.runTool("restore", "-d", dbname, "-c", "baz", "--dir", dumppath + dbname2 + "/bar.bson");
db = db.getSiblingDB(dbname);
-assert.soon( "db.baz.findOne()" , "no data after sleep 2" );
-assert.eq( barDocCount, db.baz.count(), "wrong number of docs restored to bar 2" );
+assert.soon("db.baz.findOne()", "no data after sleep 2");
+assert.eq(barDocCount, db.baz.count(), "wrong number of docs restored to bar 2");
for (var i = 0; i < 10; i++) {
- db.baz.save({x:i});
+ db.baz.save({x: i});
}
-assert.eq( barDocCount, db.baz.count(), "Capped collection didn't evict documents after restore 3." );
-assert.eq( 2 , db.baz.getIndexes().length , "Indexes weren't created correctly by restore 3" );
+assert.eq(barDocCount, db.baz.count(), "Capped collection didn't evict documents after restore 3.");
+assert.eq(2, db.baz.getIndexes().length, "Indexes weren't created correctly by restore 3");
t.stop();
diff --git a/jstests/tool/dumprestore9.js b/jstests/tool/dumprestore9.js
index 5a36c54efd5..7db1f817b24 100644
--- a/jstests/tool/dumprestore9.js
+++ b/jstests/tool/dumprestore9.js
@@ -1,93 +1,103 @@
// Test disabled until SERVER-3853 is finished
-if(0) {
+if (0) {
+ (function() {
-(function() {
+ var name = "dumprestore9";
+ function step(msg) {
+ msg = msg || "";
+ this.x = (this.x || 0) + 1;
+ print('\n' + name + ".js step " + this.x + ' ' + msg);
+ }
-var name = "dumprestore9";
-function step(msg) {
- msg = msg || "";
- this.x = (this.x || 0) + 1;
- print('\n' + name + ".js step " + this.x + ' ' + msg);
-}
-
-var s = new ShardingTest({ name: "dumprestore9a",
- shards: 2,
- mongos: 3,
- other: { chunkSize: 1, enableBalancer : 1 } });
+ var s = new ShardingTest({
+ name: "dumprestore9a",
+ shards: 2,
+ mongos: 3,
+ other: {chunkSize: 1, enableBalancer: 1}
+ });
-step("Shard collection");
+ step("Shard collection");
-s.adminCommand( { enablesharding : "aaa" } ); // Make this db alphabetically before 'config' so it gets restored first
-s.ensurePrimaryShard('aaa', 'shard0001');
-s.adminCommand( { shardcollection : "aaa.foo" , key : { x : 1 } } );
+ s.adminCommand({
+ enablesharding: "aaa"
+ }); // Make this db alphabetically before 'config' so it gets restored first
+ s.ensurePrimaryShard('aaa', 'shard0001');
+ s.adminCommand({shardcollection: "aaa.foo", key: {x: 1}});
-db = s.getDB( "aaa" );
-coll = db.foo;
+ db = s.getDB("aaa");
+ coll = db.foo;
-step("insert data");
+ step("insert data");
-str = 'a';
-while (str.length < 1024*512) {
- str += str;
-}
+ str = 'a';
+ while (str.length < 1024 * 512) {
+ str += str;
+ }
-numDocs = 20;
-for (var i = 0; i < numDocs; i++) {
- coll.insert({x:i, str:str});
-}
+ numDocs = 20;
+ for (var i = 0; i < numDocs; i++) {
+ coll.insert({x: i, str: str});
+ }
-step("Wait for balancing");
+ step("Wait for balancing");
-assert.soon( function(){ var x = s.chunkDiff( "foo" , "aaa" ); print( "chunk diff: " + x ); return x < 2; } , "no balance happened" , 8 * 60 * 1000 , 2000 );
+ assert.soon(function() {
+ var x = s.chunkDiff("foo", "aaa");
+ print("chunk diff: " + x);
+ return x < 2;
+ }, "no balance happened", 8 * 60 * 1000, 2000);
-assert.eq(numDocs, coll.count(), "Documents weren't inserted correctly");
+ assert.eq(numDocs, coll.count(), "Documents weren't inserted correctly");
-step("dump cluster");
+ step("dump cluster");
-dumpdir = MongoRunner.dataDir + "/dumprestore9-dump1/";
-resetDbpath(dumpdir);
-runMongoProgram( "mongodump", "--host", s._mongos[0].host, "--out", dumpdir );
+ dumpdir = MongoRunner.dataDir + "/dumprestore9-dump1/";
+ resetDbpath(dumpdir);
+ runMongoProgram("mongodump", "--host", s._mongos[0].host, "--out", dumpdir);
-step("Shutting down cluster");
+ step("Shutting down cluster");
-s.stop();
+ s.stop();
-step("Starting up clean cluster");
-s = new ShardingTest({ name: "dumprestore9b",
- shards: 2,
- mongos: 3,
- other: {chunkSize:1} });
+ step("Starting up clean cluster");
+ s = new ShardingTest({name: "dumprestore9b", shards: 2, mongos: 3, other: {chunkSize: 1}});
-db = s.getDB( "aaa" );
-coll = db.foo;
+ db = s.getDB("aaa");
+ coll = db.foo;
-assert.eq(0, coll.count(), "Data wasn't cleaned up by restarting sharding test");
+ assert.eq(0, coll.count(), "Data wasn't cleaned up by restarting sharding test");
-step("Restore data and config");
+ step("Restore data and config");
-runMongoProgram( "mongorestore", dumpdir, "--host", s._mongos[1].host, "--restoreShardingConfig", "--forceConfigRestore");
+ runMongoProgram("mongorestore",
+ dumpdir,
+ "--host",
+ s._mongos[1].host,
+ "--restoreShardingConfig",
+ "--forceConfigRestore");
-config = s.getDB("config");
-assert(config.databases.findOne({_id:'aaa'}).partitioned, "Config data wasn't restored properly");
+ config = s.getDB("config");
+ assert(config.databases.findOne({_id: 'aaa'}).partitioned,
+ "Config data wasn't restored properly");
-assert( s.chunkDiff( "foo" , "aaa" ) < 2, "Chunk data wasn't restored properly");
+ assert(s.chunkDiff("foo", "aaa") < 2, "Chunk data wasn't restored properly");
-assert.eq(numDocs, coll.count(), "Didn't restore all documents properly2");
-assert.eq(numDocs, coll.find().itcount(), "Didn't restore all documents properly");
+ assert.eq(numDocs, coll.count(), "Didn't restore all documents properly2");
+ assert.eq(numDocs, coll.find().itcount(), "Didn't restore all documents properly");
-for (var i = 0; i < numDocs; i++) {
- doc = coll.findOne({x:i});
- assert.eq(i, doc.x, "Doc missing from the shard it should be on");
-}
-
-for (var i = 0; i < s._connections.length; i++) {
- assert(s._connections[i].getDB("aaa").foo.count() > 0, "No data on shard: " + s._connections[i].host);
-}
+ for (var i = 0; i < numDocs; i++) {
+ doc = coll.findOne({x: i});
+ assert.eq(i, doc.x, "Doc missing from the shard it should be on");
+ }
-step("Stop cluster");
-s.stop();
-step("SUCCESS");
+ for (var i = 0; i < s._connections.length; i++) {
+ assert(s._connections[i].getDB("aaa").foo.count() > 0,
+ "No data on shard: " + s._connections[i].host);
+ }
-})();
+ step("Stop cluster");
+ s.stop();
+ step("SUCCESS");
+ })();
}
diff --git a/jstests/tool/dumprestoreWithNoOptions.js b/jstests/tool/dumprestoreWithNoOptions.js
index b822deb93e8..1062abd1e94 100644
--- a/jstests/tool/dumprestoreWithNoOptions.js
+++ b/jstests/tool/dumprestoreWithNoOptions.js
@@ -8,107 +8,122 @@
// database dump/restore and when doing it just for a
// single db or collection.
+t = new ToolTest("dumprestoreWithNoOptions");
-t = new ToolTest( "dumprestoreWithNoOptions" );
-
-t.startDB( "foo" );
+t.startDB("foo");
db = t.db;
// We turn this off to prevent the server from touching the 'options' field in system.namespaces.
// This is important because we check exact values of the 'options' field in this test.
-db.adminCommand({setParameter:1, newCollectionsUsePowerOf2Sizes: false});
+db.adminCommand({setParameter: 1, newCollectionsUsePowerOf2Sizes: false});
dbname = db.getName();
-dbname2 = "NOT_"+dbname;
+dbname2 = "NOT_" + dbname;
db.dropDatabase();
var defaultFlags = {};
-var options = { capped: true, size: 4096, autoIndexId: true };
+var options = {
+ capped: true,
+ size: 4096,
+ autoIndexId: true
+};
db.createCollection('capped', options);
-assert.eq( 1, db.capped.getIndexes().length, "auto index not created" );
+assert.eq(1, db.capped.getIndexes().length, "auto index not created");
var cappedOptions = db.capped.exists().options;
-for ( var opt in options ) {
- assert.eq(options[opt], cappedOptions[opt],
+for (var opt in options) {
+ assert.eq(options[opt],
+ cappedOptions[opt],
'invalid option:' + tojson(options) + " " + tojson(cappedOptions));
}
-assert.writeOK(db.capped.insert({ x: 1 }));
+assert.writeOK(db.capped.insert({x: 1}));
// Full dump/restore
-t.runTool( "dump" , "--out" , t.ext );
+t.runTool("dump", "--out", t.ext);
db.dropDatabase();
-assert.eq( 0, db.capped.count(), "capped not dropped");
-assert.eq( 0, db.capped.getIndexes().length, "indexes not dropped" );
+assert.eq(0, db.capped.count(), "capped not dropped");
+assert.eq(0, db.capped.getIndexes().length, "indexes not dropped");
-t.runTool( "restore" , "--dir" , t.ext , "--noOptionsRestore");
+t.runTool("restore", "--dir", t.ext, "--noOptionsRestore");
-assert.eq( 1, db.capped.count() , "wrong number of docs restored to capped" );
+assert.eq(1, db.capped.count(), "wrong number of docs restored to capped");
assert(true !== db.capped.stats().capped, "restore options were not ignored");
-assert.eq( defaultFlags, db.capped.exists().options,
- "restore options not ignored: " + tojson( db.capped.exists() ) );
+assert.eq(defaultFlags,
+ db.capped.exists().options,
+ "restore options not ignored: " + tojson(db.capped.exists()));
// Dump/restore single DB
db.dropDatabase();
-var options = { capped: true, size: 4096, autoIndexId: true };
+var options = {
+ capped: true,
+ size: 4096,
+ autoIndexId: true
+};
db.createCollection('capped', options);
-assert.eq( 1, db.capped.getIndexes().length, "auto index not created" );
+assert.eq(1, db.capped.getIndexes().length, "auto index not created");
var cappedOptions = db.capped.exists().options;
-for ( var opt in options ) {
- assert.eq(options[opt], cappedOptions[opt], 'invalid option');
+for (var opt in options) {
+ assert.eq(options[opt], cappedOptions[opt], 'invalid option');
}
-assert.writeOK(db.capped.insert({ x: 1 }));
+assert.writeOK(db.capped.insert({x: 1}));
dumppath = t.ext + "noOptionsSingleDump/";
mkdir(dumppath);
-t.runTool( "dump" , "-d", dbname, "--out" , dumppath );
+t.runTool("dump", "-d", dbname, "--out", dumppath);
db.dropDatabase();
-assert.eq( 0, db.capped.count(), "capped not dropped");
-assert.eq( 0, db.capped.getIndexes().length, "indexes not dropped" );
+assert.eq(0, db.capped.count(), "capped not dropped");
+assert.eq(0, db.capped.getIndexes().length, "indexes not dropped");
-t.runTool( "restore" , "-d", dbname2, "--dir" , dumppath + dbname, "--noOptionsRestore");
+t.runTool("restore", "-d", dbname2, "--dir", dumppath + dbname, "--noOptionsRestore");
db = db.getSiblingDB(dbname2);
-assert.eq( 1, db.capped.count() , "wrong number of docs restored to capped" );
+assert.eq(1, db.capped.count(), "wrong number of docs restored to capped");
assert(true !== db.capped.stats().capped, "restore options were not ignored");
-assert.eq( defaultFlags, db.capped.exists().options,
- "restore options not ignored: " + tojson( db.capped.exists() ) );
+assert.eq(defaultFlags,
+ db.capped.exists().options,
+ "restore options not ignored: " + tojson(db.capped.exists()));
// Dump/restore single collection
db.dropDatabase();
-var options = { capped: true, size: 4096, autoIndexId: true };
+var options = {
+ capped: true,
+ size: 4096,
+ autoIndexId: true
+};
db.createCollection('capped', options);
-assert.eq( 1, db.capped.getIndexes().length, "auto index not created" );
+assert.eq(1, db.capped.getIndexes().length, "auto index not created");
var cappedOptions = db.capped.exists().options;
-for ( var opt in options ) {
- assert.eq(options[opt], cappedOptions[opt], 'invalid option');
+for (var opt in options) {
+ assert.eq(options[opt], cappedOptions[opt], 'invalid option');
}
-assert.writeOK(db.capped.insert({ x: 1 }));
+assert.writeOK(db.capped.insert({x: 1}));
dumppath = t.ext + "noOptionsSingleColDump/";
mkdir(dumppath);
dbname = db.getName();
-t.runTool( "dump" , "-d", dbname, "-c", "capped", "--out" , dumppath );
+t.runTool("dump", "-d", dbname, "-c", "capped", "--out", dumppath);
db.dropDatabase();
-assert.eq( 0, db.capped.count(), "capped not dropped");
-assert.eq( 0, db.capped.getIndexes().length, "indexes not dropped" );
+assert.eq(0, db.capped.count(), "capped not dropped");
+assert.eq(0, db.capped.getIndexes().length, "indexes not dropped");
-t.runTool( "restore", "-d", dbname, "--drop", "--noOptionsRestore", dumppath + dbname );
+t.runTool("restore", "-d", dbname, "--drop", "--noOptionsRestore", dumppath + dbname);
db = db.getSiblingDB(dbname);
-assert.eq( 1, db.capped.count() , "wrong number of docs restored to capped" );
-assert( true !== db.capped.stats().capped, "restore options were not ignored" );
-assert.eq( defaultFlags, db.capped.exists().options,
- "restore options not ignored: " + tojson( db.capped.exists() ) );
+assert.eq(1, db.capped.count(), "wrong number of docs restored to capped");
+assert(true !== db.capped.stats().capped, "restore options were not ignored");
+assert.eq(defaultFlags,
+ db.capped.exists().options,
+ "restore options not ignored: " + tojson(db.capped.exists()));
t.stop();
diff --git a/jstests/tool/dumprestore_auth.js b/jstests/tool/dumprestore_auth.js
index 4bda54a5bdc..a4a19650e77 100644
--- a/jstests/tool/dumprestore_auth.js
+++ b/jstests/tool/dumprestore_auth.js
@@ -1,48 +1,64 @@
// dumprestore_auth.js
-
-t = new ToolTest("dumprestore_auth", { auth : "" });
+t = new ToolTest("dumprestore_auth", {auth: ""});
c = t.startDB("foo");
var dbName = c.getDB().toString();
-print("DB is ",dbName);
+print("DB is ", dbName);
adminDB = c.getDB().getSiblingDB('admin');
adminDB.createUser({user: 'admin', pwd: 'password', roles: ['root']});
-adminDB.auth('admin','password');
+adminDB.auth('admin', 'password');
adminDB.createUser({user: 'backup', pwd: 'password', roles: ['backup']});
adminDB.createUser({user: 'restore', pwd: 'password', roles: ['restore']});
// Add user defined roles & users with those roles
var testUserAdmin = c.getDB().getSiblingDB(dbName);
-var backupActions = ["find","listCollections", "listIndexes"];
-testUserAdmin.createRole({role: "backupFoo",
- privileges: [{resource: {db: dbName, collection: "foo"}, actions:backupActions},
- {resource: {db: dbName, collection: "" },
- actions: backupActions}],
- roles: []});
+var backupActions = ["find", "listCollections", "listIndexes"];
+testUserAdmin.createRole({
+ role: "backupFoo",
+ privileges: [
+ {resource: {db: dbName, collection: "foo"}, actions: backupActions},
+ {resource: {db: dbName, collection: ""}, actions: backupActions}
+ ],
+ roles: []
+});
testUserAdmin.createUser({user: 'backupFoo', pwd: 'password', roles: ['backupFoo']});
-var restoreActions = ["collMod", "createCollection","createIndex","dropCollection","insert","listCollections","listIndexes"];
+var restoreActions = [
+ "collMod",
+ "createCollection",
+ "createIndex",
+ "dropCollection",
+ "insert",
+ "listCollections",
+ "listIndexes"
+];
var restoreActionsFind = restoreActions;
restoreActionsFind.push("find");
-testUserAdmin.createRole({role: "restoreChester",
- privileges: [{resource: {db: dbName, collection: "chester"}, actions: restoreActions},
- {resource: {db: dbName, collection: ""}, actions:["listCollections","listIndexes"]},
- ],
- roles: []});
-testUserAdmin.createRole({role: "restoreFoo",
- privileges: [{resource: {db: dbName, collection: "foo"}, actions:restoreActions},
- {resource: {db: dbName, collection: ""}, actions:["listCollections","listIndexes"]},
- ],
- roles: []});
+testUserAdmin.createRole({
+ role: "restoreChester",
+ privileges: [
+ {resource: {db: dbName, collection: "chester"}, actions: restoreActions},
+ {resource: {db: dbName, collection: ""}, actions: ["listCollections", "listIndexes"]},
+ ],
+ roles: []
+});
+testUserAdmin.createRole({
+ role: "restoreFoo",
+ privileges: [
+ {resource: {db: dbName, collection: "foo"}, actions: restoreActions},
+ {resource: {db: dbName, collection: ""}, actions: ["listCollections", "listIndexes"]},
+ ],
+ roles: []
+});
testUserAdmin.createUser({user: 'restoreChester', pwd: 'password', roles: ['restoreChester']});
testUserAdmin.createUser({user: 'restoreFoo', pwd: 'password', roles: ['restoreFoo']});
var sysUsers = adminDB.system.users.count();
-assert.eq(0 , c.count() , "setup1");
-c.save({ a : 22 });
-assert.eq(1 , c.count() , "setup2");
+assert.eq(0, c.count(), "setup1");
+c.save({a: 22});
+assert.eq(1, c.count(), "setup2");
assert.commandWorked(c.runCommand("collMod", {usePowerOf2Sizes: false}));
@@ -56,20 +72,28 @@ collections.forEach(function(coll) {
assert.neq(null, fooColl, "foo collection doesn't exist");
assert(!fooColl.options.flags, "find namespaces 1");
-t.runTool("dump" , "--out" , t.ext, "--username", "backup", "--password", "password");
+t.runTool("dump", "--out", t.ext, "--username", "backup", "--password", "password");
c.drop();
-assert.eq(0 , c.count() , "after drop");
+assert.eq(0, c.count(), "after drop");
// Restore should fail without user & pass
-t.runTool("restore" , "--dir" , t.ext, "--writeConcern" ,"0");
-assert.eq(0 , c.count() , "after restore without auth");
+t.runTool("restore", "--dir", t.ext, "--writeConcern", "0");
+assert.eq(0, c.count(), "after restore without auth");
// Restore should pass with authorized user
-t.runTool("restore" , "--dir" , t.ext, "--username", "restore", "--password", "password", "--writeConcern", "0");
-assert.soon("c.findOne()" , "no data after sleep");
-assert.eq(1 , c.count() , "after restore 2");
-assert.eq(22 , c.findOne().a , "after restore 2");
+t.runTool("restore",
+ "--dir",
+ t.ext,
+ "--username",
+ "restore",
+ "--password",
+ "password",
+ "--writeConcern",
+ "0");
+assert.soon("c.findOne()", "no data after sleep");
+assert.eq(1, c.count(), "after restore 2");
+assert.eq(22, c.findOne().a, "after restore 2");
collections = c.getDB().getCollectionInfos();
fooColl = null;
@@ -84,23 +108,52 @@ assert(!fooColl.options.flags, "find namespaces 2");
assert.eq(sysUsers, adminDB.system.users.count());
// Dump & restore DB/colection with user defined roles
-t.runTool("dump" , "--out" , t.ext, "--username", "backupFoo", "--password", "password",
- "--db", dbName, "--collection", "foo");
+t.runTool("dump",
+ "--out",
+ t.ext,
+ "--username",
+ "backupFoo",
+ "--password",
+ "password",
+ "--db",
+ dbName,
+ "--collection",
+ "foo");
c.drop();
-assert.eq(0 , c.count() , "after drop");
+assert.eq(0, c.count(), "after drop");
// Restore with wrong user
-t.runTool("restore" , "--username", "restoreChester", "--password", "password",
- "--db", dbName, "--collection", "foo", t.ext+dbName+"/foo.bson", "--writeConcern", "0");
-assert.eq(0 , c.count() , "after restore with wrong user");
+t.runTool("restore",
+ "--username",
+ "restoreChester",
+ "--password",
+ "password",
+ "--db",
+ dbName,
+ "--collection",
+ "foo",
+ t.ext + dbName + "/foo.bson",
+ "--writeConcern",
+ "0");
+assert.eq(0, c.count(), "after restore with wrong user");
// Restore with proper user
-t.runTool("restore" , "--username", "restoreFoo", "--password", "password",
- "--db", dbName, "--collection", "foo", t.ext+dbName+"/foo.bson", "--writeConcern", "0");
-assert.soon("c.findOne()" , "no data after sleep");
-assert.eq(1 , c.count() , "after restore 3");
-assert.eq(22 , c.findOne().a , "after restore 3");
+t.runTool("restore",
+ "--username",
+ "restoreFoo",
+ "--password",
+ "password",
+ "--db",
+ dbName,
+ "--collection",
+ "foo",
+ t.ext + dbName + "/foo.bson",
+ "--writeConcern",
+ "0");
+assert.soon("c.findOne()", "no data after sleep");
+assert.eq(1, c.count(), "after restore 3");
+assert.eq(22, c.findOne().a, "after restore 3");
collections = c.getDB().getCollectionInfos();
fooColl = null;
diff --git a/jstests/tool/dumprestore_auth2.js b/jstests/tool/dumprestore_auth2.js
index 4d410d34ca9..275b47ceac6 100644
--- a/jstests/tool/dumprestore_auth2.js
+++ b/jstests/tool/dumprestore_auth2.js
@@ -4,110 +4,118 @@
var dumpRestoreAuth2 = function(backup_role, restore_role) {
- t = new ToolTest("dumprestore_auth2", {auth: ""});
-
- coll = t.startDB("foo");
- admindb = coll.getDB().getSiblingDB("admin");
-
- // Create the relevant users and roles.
- admindb.createUser({user: "root", pwd: "pass", roles: ["root"]});
- admindb.auth("root", "pass");
-
- admindb.createUser({user: "backup", pwd: "pass", roles: [backup_role]});
- admindb.createUser({user: "restore", pwd: "pass", roles: [restore_role]});
-
- admindb.createRole({role: "customRole",
- privileges:[{resource: {db: "jstests_tool_dumprestore_auth2",
- collection: "foo"},
- actions: ["find"]}],
- roles:[]});
- admindb.createUser({user: "test", pwd: "pass", roles: ["customRole"]});
-
- coll.insert({word: "tomato"});
- assert.eq(1, coll.count());
-
- assert.eq(4, admindb.system.users.count(), "setup users");
- assert.eq(2, admindb.system.users.getIndexes().length,
- "setup2: " + tojson( admindb.system.users.getIndexes() ) );
- assert.eq(1, admindb.system.roles.count(), "setup3");
- assert.eq(2, admindb.system.roles.getIndexes().length, "setup4");
- assert.eq(1, admindb.system.version.count());
- var versionDoc = admindb.system.version.findOne();
-
- // Logout root user.
- admindb.logout();
-
- // Verify that the custom role works as expected.
- admindb.auth("test", "pass");
- assert.eq("tomato", coll.findOne().word);
- admindb.logout();
-
- // Dump the database.
- t.runTool("dump", "--out", t.ext, "--username", "backup", "--password", "pass");
-
- // Drop the relevant data in the database.
- admindb.auth("root", "pass");
- coll.getDB().dropDatabase();
- admindb.dropUser("backup");
- admindb.dropUser("test");
- admindb.dropRole("customRole");
-
- assert.eq(2, admindb.system.users.count(), "didn't drop backup and test users");
- assert.eq(0, admindb.system.roles.count(), "didn't drop roles");
- assert.eq(0, coll.count(), "didn't drop foo coll");
-
- // This test depends on W=0 to mask unique index violations.
- // This should be fixed once we implement TOOLS-341
- t.runTool("restore",
- "--dir", t.ext,
- "--username", "restore",
- "--password", "pass",
- "--writeConcern", "0");
-
- assert.soon("admindb.system.users.findOne()", "no data after restore");
- assert.eq(4, admindb.system.users.count(), "didn't restore users");
- assert.eq(2, admindb.system.users.getIndexes().length,
- "didn't restore user indexes");
- assert.eq(1, admindb.system.roles.find({role:'customRole'}).count(), "didn't restore roles");
- assert.eq(2, admindb.system.roles.getIndexes().length,
- "didn't restore role indexes");
-
- admindb.logout();
-
- // Login as user with customRole to verify privileges are restored.
- admindb.auth("test", "pass");
- assert.eq("tomato", coll.findOne().word);
- admindb.logout();
-
- admindb.auth("root", "pass");
- admindb.createUser({user: "root2", pwd: "pass", roles: ["root"]});
- admindb.dropRole("customRole");
- admindb.createRole({role: "customRole2", roles: [], privileges:[]});
- admindb.dropUser("root");
- admindb.logout();
-
- t.runTool("restore",
- "--dir", t.ext,
- "--username", "restore",
- "--password", "pass",
- "--drop",
- "--writeConcern", "0");
-
- admindb.auth("root", "pass");
- assert.soon("1 == admindb.system.users.find({user:'root'}).count()", "didn't restore users 2");
- assert.eq(0, admindb.system.users.find({user:'root2'}).count(), "didn't drop users");
- assert.eq(0, admindb.system.roles.find({role:'customRole2'}).count(), "didn't drop roles");
- assert.eq(1, admindb.system.roles.find({role:'customRole'}).count(), "didn't restore roles");
- assert.eq(2, admindb.system.users.getIndexes().length,
- "didn't maintain user indexes");
- assert.eq(2, admindb.system.roles.getIndexes().length,
- "didn't maintain role indexes");
- assert.eq(1, admindb.system.version.count(), "didn't restore version");
- assert.docEq(versionDoc, admindb.system.version.findOne(),
- "version doc wasn't restored properly");
- admindb.logout();
-
- t.stop();
+ t = new ToolTest("dumprestore_auth2", {auth: ""});
+
+ coll = t.startDB("foo");
+ admindb = coll.getDB().getSiblingDB("admin");
+
+ // Create the relevant users and roles.
+ admindb.createUser({user: "root", pwd: "pass", roles: ["root"]});
+ admindb.auth("root", "pass");
+
+ admindb.createUser({user: "backup", pwd: "pass", roles: [backup_role]});
+ admindb.createUser({user: "restore", pwd: "pass", roles: [restore_role]});
+
+ admindb.createRole({
+ role: "customRole",
+ privileges: [{
+ resource: {db: "jstests_tool_dumprestore_auth2", collection: "foo"},
+ actions: ["find"]
+ }],
+ roles: []
+ });
+ admindb.createUser({user: "test", pwd: "pass", roles: ["customRole"]});
+
+ coll.insert({word: "tomato"});
+ assert.eq(1, coll.count());
+
+ assert.eq(4, admindb.system.users.count(), "setup users");
+ assert.eq(2,
+ admindb.system.users.getIndexes().length,
+ "setup2: " + tojson(admindb.system.users.getIndexes()));
+ assert.eq(1, admindb.system.roles.count(), "setup3");
+ assert.eq(2, admindb.system.roles.getIndexes().length, "setup4");
+ assert.eq(1, admindb.system.version.count());
+ var versionDoc = admindb.system.version.findOne();
+
+ // Logout root user.
+ admindb.logout();
+
+ // Verify that the custom role works as expected.
+ admindb.auth("test", "pass");
+ assert.eq("tomato", coll.findOne().word);
+ admindb.logout();
+
+ // Dump the database.
+ t.runTool("dump", "--out", t.ext, "--username", "backup", "--password", "pass");
+
+ // Drop the relevant data in the database.
+ admindb.auth("root", "pass");
+ coll.getDB().dropDatabase();
+ admindb.dropUser("backup");
+ admindb.dropUser("test");
+ admindb.dropRole("customRole");
+
+ assert.eq(2, admindb.system.users.count(), "didn't drop backup and test users");
+ assert.eq(0, admindb.system.roles.count(), "didn't drop roles");
+ assert.eq(0, coll.count(), "didn't drop foo coll");
+
+ // This test depends on W=0 to mask unique index violations.
+ // This should be fixed once we implement TOOLS-341
+ t.runTool("restore",
+ "--dir",
+ t.ext,
+ "--username",
+ "restore",
+ "--password",
+ "pass",
+ "--writeConcern",
+ "0");
+
+ assert.soon("admindb.system.users.findOne()", "no data after restore");
+ assert.eq(4, admindb.system.users.count(), "didn't restore users");
+ assert.eq(2, admindb.system.users.getIndexes().length, "didn't restore user indexes");
+ assert.eq(1, admindb.system.roles.find({role: 'customRole'}).count(), "didn't restore roles");
+ assert.eq(2, admindb.system.roles.getIndexes().length, "didn't restore role indexes");
+
+ admindb.logout();
+
+ // Login as user with customRole to verify privileges are restored.
+ admindb.auth("test", "pass");
+ assert.eq("tomato", coll.findOne().word);
+ admindb.logout();
+
+ admindb.auth("root", "pass");
+ admindb.createUser({user: "root2", pwd: "pass", roles: ["root"]});
+ admindb.dropRole("customRole");
+ admindb.createRole({role: "customRole2", roles: [], privileges: []});
+ admindb.dropUser("root");
+ admindb.logout();
+
+ t.runTool("restore",
+ "--dir",
+ t.ext,
+ "--username",
+ "restore",
+ "--password",
+ "pass",
+ "--drop",
+ "--writeConcern",
+ "0");
+
+ admindb.auth("root", "pass");
+ assert.soon("1 == admindb.system.users.find({user:'root'}).count()", "didn't restore users 2");
+ assert.eq(0, admindb.system.users.find({user: 'root2'}).count(), "didn't drop users");
+ assert.eq(0, admindb.system.roles.find({role: 'customRole2'}).count(), "didn't drop roles");
+ assert.eq(1, admindb.system.roles.find({role: 'customRole'}).count(), "didn't restore roles");
+ assert.eq(2, admindb.system.users.getIndexes().length, "didn't maintain user indexes");
+ assert.eq(2, admindb.system.roles.getIndexes().length, "didn't maintain role indexes");
+ assert.eq(1, admindb.system.version.count(), "didn't restore version");
+ assert.docEq(
+ versionDoc, admindb.system.version.findOne(), "version doc wasn't restored properly");
+ admindb.logout();
+
+ t.stop();
};
diff --git a/jstests/tool/dumprestore_auth3.js b/jstests/tool/dumprestore_auth3.js
index 62eed2e7d84..6157020c2dd 100644
--- a/jstests/tool/dumprestore_auth3.js
+++ b/jstests/tool/dumprestore_auth3.js
@@ -4,7 +4,9 @@
// Runs the tool with the given name against the given mongod.
function runTool(toolName, mongod, options) {
- var opts = {host: mongod.host};
+ var opts = {
+ host: mongod.host
+ };
Object.extend(opts, options);
MongoRunner.runMongoTool(toolName, opts);
}
@@ -19,13 +21,15 @@ var dumpRestoreAuth3 = function(backup_role, restore_role) {
admindb.createUser({user: 'root', pwd: 'pass', roles: ['root']});
admindb.createUser({user: 'backup', pwd: 'pass', roles: ['backup']});
admindb.createUser({user: 'restore', pwd: 'pass', roles: ['restore']});
- admindb.createRole({role: "dummyRole", roles: [], privileges:[]});
+ admindb.createRole({role: "dummyRole", roles: [], privileges: []});
db.createUser({user: 'user', pwd: 'pass', roles: jsTest.basicUserRoles});
- db.createRole({role: 'role', roles: [], privileges:[]});
+ db.createRole({role: 'role', roles: [], privileges: []});
var backupActions = ['find'];
- db.createRole({role: 'backupFooChester',
- privileges: [{resource: {db: 'foo', collection: 'chester'}, actions: backupActions}],
- roles: []});
+ db.createRole({
+ role: 'backupFooChester',
+ privileges: [{resource: {db: 'foo', collection: 'chester'}, actions: backupActions}],
+ roles: []
+ });
db.createUser({user: 'backupFooChester', pwd: 'pass', roles: ['backupFooChester']});
var userCount = db.getUsers().length;
@@ -35,7 +39,7 @@ var dumpRestoreAuth3 = function(backup_role, restore_role) {
var systemUsersCount = admindb.system.users.count();
var systemVersionCount = admindb.system.version.count();
- db.bar.insert({a:1});
+ db.bar.insert({a: 1});
assert.eq(1, db.bar.findOne().a);
assert.eq(userCount, db.getUsers().length, "setup");
@@ -43,7 +47,7 @@ var dumpRestoreAuth3 = function(backup_role, restore_role) {
assert.eq(adminUsersCount, admindb.getUsers().length, "setup3");
assert.eq(adminRolesCount, admindb.getRoles().length, "setup4");
assert.eq(systemUsersCount, admindb.system.users.count(), "setup5");
- assert.eq(systemVersionCount, admindb.system.version.count(),"system version");
+ assert.eq(systemVersionCount, admindb.system.version.count(), "system version");
assert.eq(1, admindb.system.users.count({user: "restore"}), "Restore user is missing");
assert.eq(1, admindb.system.users.count({user: "backup"}), "Backup user is missing");
var versionDoc = admindb.system.version.findOne();
@@ -60,21 +64,22 @@ var dumpRestoreAuth3 = function(backup_role, restore_role) {
jsTestLog("Restore foo database from dump that doesn't contain user data ");
// This test depends on W=0 to mask unique index violations.
// This should be fixed once we implement TOOLS-341
- runTool("mongorestore",
- mongod,
- {dir: dumpDir + "foo/", db: 'foo', restoreDbUsersAndRoles: "", writeConcern: "0"}
- );
+ runTool("mongorestore",
+ mongod,
+ {dir: dumpDir + "foo/", db: 'foo', restoreDbUsersAndRoles: "", writeConcern: "0"});
db = mongod.getDB('foo');
- assert.soon(function() { return db.bar.findOne(); }, "no data after restore");
+ assert.soon(function() {
+ return db.bar.findOne();
+ }, "no data after restore");
assert.eq(1, db.bar.findOne().a);
assert.eq(0, db.getUsers().length, "Restore created users somehow");
assert.eq(0, db.getRoles().length, "Restore created roles somehow");
// Re-create user data
db.createUser({user: 'user', pwd: 'password', roles: jsTest.basicUserRoles});
- db.createRole({role: 'role', roles: [], privileges:[]});
+ db.createRole({role: 'role', roles: [], privileges: []});
userCount = 1;
rolesCount = 1;
@@ -98,25 +103,28 @@ var dumpRestoreAuth3 = function(backup_role, restore_role) {
runTool("mongorestore", mongod, {dir: dumpDir + "foo/", db: 'foo', writeConcern: "0"});
db = mongod.getDB('foo');
- assert.soon(function() { return db.bar.findOne(); }, "no data after restore");
+ assert.soon(function() {
+ return db.bar.findOne();
+ }, "no data after restore");
assert.eq(1, db.bar.findOne().a);
assert.eq(0, db.getUsers().length, "Restored users even though it shouldn't have");
assert.eq(0, db.getRoles().length, "Restored roles even though it shouldn't have");
jsTestLog("Restore foo database *with* user data");
- runTool("mongorestore",
- mongod,
- {dir: dumpDir + "foo/", db: 'foo', restoreDbUsersAndRoles: "", writeConcern: "0"}
- );
+ runTool("mongorestore",
+ mongod,
+ {dir: dumpDir + "foo/", db: 'foo', restoreDbUsersAndRoles: "", writeConcern: "0"});
db = mongod.getDB('foo');
admindb = mongod.getDB('admin');
- assert.soon(function() { return db.bar.findOne(); }, "no data after restore");
+ assert.soon(function() {
+ return db.bar.findOne();
+ }, "no data after restore");
assert.eq(1, db.bar.findOne().a);
assert.eq(userCount, db.getUsers().length, "didn't restore users");
assert.eq(rolesCount, db.getRoles().length, "didn't restore roles");
- assert.eq(1, admindb.system.users.count({user: "restore", db: "admin"}),
- "Restore user is missing");
+ assert.eq(
+ 1, admindb.system.users.count({user: "restore", db: "admin"}), "Restore user is missing");
assert.docEq(versionDoc,
db.getSiblingDB('admin').system.version.findOne(),
"version doc was changed by restore");
@@ -125,18 +133,25 @@ var dumpRestoreAuth3 = function(backup_role, restore_role) {
db.dropUser('user');
db.createUser({user: 'user2', pwd: 'password2', roles: jsTest.basicUserRoles});
db.dropRole('role');
- db.createRole({role: 'role2', roles: [], privileges:[]});
+ db.createRole({role: 'role2', roles: [], privileges: []});
jsTestLog("Restore foo database (and user data) with --drop so it overrides the changes made");
// Restore with --drop to override the changes to user data
- runTool("mongorestore",
- mongod,
- {dir: dumpDir + "foo/", db: 'foo', drop: "", restoreDbUsersAndRoles: "", writeConcern: "0"}
- );
+ runTool("mongorestore",
+ mongod,
+ {
+ dir: dumpDir + "foo/",
+ db: 'foo',
+ drop: "",
+ restoreDbUsersAndRoles: "",
+ writeConcern: "0"
+ });
db = mongod.getDB('foo');
admindb = mongod.getDB('admin');
- assert.soon(function() { return db.bar.findOne(); }, "no data after restore");
+ assert.soon(function() {
+ return db.bar.findOne();
+ }, "no data after restore");
assert.eq(adminUsersCount, admindb.getUsers().length, "Admin users were dropped");
assert.eq(adminRolesCount, admindb.getRoles().length, "Admin roles were dropped");
assert.eq(1, db.bar.findOne().a);
@@ -148,7 +163,6 @@ var dumpRestoreAuth3 = function(backup_role, restore_role) {
db.getSiblingDB('admin').system.version.findOne(),
"version doc was changed by restore");
-
jsTestLog("Dump just the admin database. User data should be dumped by default");
// Make a user in another database to make sure it is properly captured
db.getSiblingDB('bar').createUser({user: "user", pwd: 'pwd', roles: []});
@@ -163,15 +177,16 @@ var dumpRestoreAuth3 = function(backup_role, restore_role) {
db.getSiblingDB('admin').dropAllUsers();
jsTestLog("Restore just the admin database. User data should be restored by default");
- runTool("mongorestore",
- mongod,
- {dir: dumpDir + "admin/", db: 'admin', drop: "", writeConcern: "0"}
- );
+ runTool("mongorestore",
+ mongod,
+ {dir: dumpDir + "admin/", db: 'admin', drop: "", writeConcern: "0"});
db = mongod.getDB('foo');
var otherdb = db.getSiblingDB('bar');
var admindb = db.getSiblingDB('admin');
- assert.soon(function() { return db.bar.findOne(); }, "no data after restore");
+ assert.soon(function() {
+ return db.bar.findOne();
+ }, "no data after restore");
assert.eq(1, db.bar.findOne().a);
assert.eq(userCount, db.getUsers().length, "didn't restore users");
assert.eq("user", db.getUser('user').user, "didn't restore user");
@@ -179,8 +194,8 @@ var dumpRestoreAuth3 = function(backup_role, restore_role) {
assert.eq("role", db.getRole('role').role, "didn't restore role");
assert.eq(1, otherdb.getUsers().length, "didn't restore users for bar database");
assert.eq("user", otherdb.getUsers()[0].user, "didn't restore user for bar database");
- assert.eq(adminUsersCount, admindb.getUsers().length,
- "didn't restore users for admin database");
+ assert.eq(
+ adminUsersCount, admindb.getUsers().length, "didn't restore users for admin database");
assert.eq("user", admindb.getUser("user").user, "didn't restore user for admin database");
assert.eq(6, admindb.system.users.count(), "has the wrong # of users for the whole server");
assert.eq(2, admindb.system.roles.count(), "has the wrong # of roles for the whole server");
@@ -204,7 +219,9 @@ var dumpRestoreAuth3 = function(backup_role, restore_role) {
runTool("mongorestore", mongod, {dir: dumpDir, writeConcern: "0"});
db = mongod.getDB('foo');
- assert.soon(function() { return db.bar.findOne(); }, "no data after restore");
+ assert.soon(function() {
+ return db.bar.findOne();
+ }, "no data after restore");
assert.eq(1, db.bar.findOne().a);
assert.eq(1, db.getUsers().length, "didn't restore users");
assert.eq(1, db.getRoles().length, "didn't restore roles");
diff --git a/jstests/tool/dumprestore_excludecollections.js b/jstests/tool/dumprestore_excludecollections.js
index ac2059838a8..4563b8ffc03 100644
--- a/jstests/tool/dumprestore_excludecollections.js
+++ b/jstests/tool/dumprestore_excludecollections.js
@@ -1,7 +1,5 @@
// Tests for mongodump options for excluding collections
-
-
var testBaseName = "jstests_tool_dumprestore_excludecollections";
var dumpDir = MongoRunner.dataPath + testBaseName + "_dump_external/";
@@ -12,51 +10,58 @@ var mongodDest = MongoRunner.runMongod();
var destDB = mongodDest.getDB(testBaseName);
jsTest.log("Inserting documents into source mongod");
-sourceDB.test.insert({x:1});
-sourceDB.test2.insert({x:2});
-sourceDB.test3.insert({x:3});
-sourceDB.foo.insert({f:1});
-sourceDB.foo2.insert({f:2});
+sourceDB.test.insert({x: 1});
+sourceDB.test2.insert({x: 2});
+sourceDB.test3.insert({x: 3});
+sourceDB.foo.insert({f: 1});
+sourceDB.foo2.insert({f: 2});
jsTest.log("Testing incompabible option combinations");
resetDbpath(dumpDir);
-ret = MongoRunner.runMongoTool("mongodump", { out : dumpDir,
- excludeCollection : "test",
- host : mongodSource.host });
+ret = MongoRunner.runMongoTool("mongodump",
+ {out: dumpDir, excludeCollection: "test", host: mongodSource.host});
assert.neq(ret, 0, "mongodump started successfully with --excludeCollection but no --db option");
resetDbpath(dumpDir);
-ret = MongoRunner.runMongoTool("mongodump", { out : dumpDir,
- db : testBaseName,
- collection : "foo",
- excludeCollection : "test",
- host : mongodSource.host });
+ret = MongoRunner.runMongoTool("mongodump",
+ {
+ out: dumpDir,
+ db: testBaseName,
+ collection: "foo",
+ excludeCollection: "test",
+ host: mongodSource.host
+ });
assert.neq(ret, 0, "mongodump started successfully with --excludeCollection and --collection");
resetDbpath(dumpDir);
-ret = MongoRunner.runMongoTool("mongodump", { out : dumpDir,
- excludeCollectionsWithPrefix : "test",
- host : mongodSource.host });
-assert.neq(ret, 0, "mongodump started successfully with --excludeCollectionsWithPrefix but " +
- "no --db option");
+ret = MongoRunner.runMongoTool(
+ "mongodump", {out: dumpDir, excludeCollectionsWithPrefix: "test", host: mongodSource.host});
+assert.neq(ret,
+ 0,
+ "mongodump started successfully with --excludeCollectionsWithPrefix but " +
+ "no --db option");
resetDbpath(dumpDir);
-ret = MongoRunner.runMongoTool("mongodump", { out : dumpDir,
- db : testBaseName,
- collection : "foo",
- excludeCollectionsWithPrefix : "test",
- host : mongodSource.host });
-assert.neq(ret, 0, "mongodump started successfully with --excludeCollectionsWithPrefix and " +
- "--collection");
+ret = MongoRunner.runMongoTool("mongodump",
+ {
+ out: dumpDir,
+ db: testBaseName,
+ collection: "foo",
+ excludeCollectionsWithPrefix: "test",
+ host: mongodSource.host
+ });
+assert.neq(ret,
+ 0,
+ "mongodump started successfully with --excludeCollectionsWithPrefix and " +
+ "--collection");
jsTest.log("Testing proper behavior of collection exclusion");
resetDbpath(dumpDir);
-ret = MongoRunner.runMongoTool("mongodump", { out : dumpDir,
- db : testBaseName,
- excludeCollection : "test",
- host : mongodSource.host });
+ret = MongoRunner.runMongoTool(
+ "mongodump",
+ {out: dumpDir, db: testBaseName, excludeCollection: "test", host: mongodSource.host});
-ret = MongoRunner.runMongoTool("mongorestore", { dir : dumpDir, host : mongodDest.host });
+ret = MongoRunner.runMongoTool("mongorestore", {dir: dumpDir, host: mongodDest.host});
assert.eq(ret, 0, "failed to run mongodump on expected successful call");
assert.eq(destDB.test.count(), 0, "Found documents in collection that we excluded");
assert.eq(destDB.test2.count(), 1, "Did not find document in collection that we did not exclude");
@@ -70,12 +75,15 @@ assert.eq(destDB.foo2.findOne().f, 2, "Wrong value in document");
destDB.dropDatabase();
resetDbpath(dumpDir);
-ret = MongoRunner.runMongoTool("mongodump", { out : dumpDir,
- db : testBaseName,
- excludeCollectionsWithPrefix : "test",
- host : mongodSource.host });
-
-ret = MongoRunner.runMongoTool("mongorestore", { dir : dumpDir, host : mongodDest.host });
+ret = MongoRunner.runMongoTool("mongodump",
+ {
+ out: dumpDir,
+ db: testBaseName,
+ excludeCollectionsWithPrefix: "test",
+ host: mongodSource.host
+ });
+
+ret = MongoRunner.runMongoTool("mongorestore", {dir: dumpDir, host: mongodDest.host});
assert.eq(ret, 0, "failed to run mongodump on expected successful call");
assert.eq(destDB.test.count(), 0, "Found documents in collection that we excluded");
assert.eq(destDB.test2.count(), 0, "Found documents in collection that we excluded");
@@ -87,13 +95,16 @@ assert.eq(destDB.foo2.findOne().f, 2, "Wrong value in document");
destDB.dropDatabase();
resetDbpath(dumpDir);
-ret = MongoRunner.runMongoTool("mongodump", { out : dumpDir,
- db : testBaseName,
- excludeCollection : "foo",
- excludeCollectionsWithPrefix : "test",
- host : mongodSource.host });
-
-ret = MongoRunner.runMongoTool("mongorestore", { dir : dumpDir, host : mongodDest.host });
+ret = MongoRunner.runMongoTool("mongodump",
+ {
+ out: dumpDir,
+ db: testBaseName,
+ excludeCollection: "foo",
+ excludeCollectionsWithPrefix: "test",
+ host: mongodSource.host
+ });
+
+ret = MongoRunner.runMongoTool("mongorestore", {dir: dumpDir, host: mongodDest.host});
assert.eq(ret, 0, "failed to run mongodump on expected successful call");
assert.eq(destDB.test.count(), 0, "Found documents in collection that we excluded");
assert.eq(destDB.test2.count(), 0, "Found documents in collection that we excluded");
diff --git a/jstests/tool/dumpsecondary.js b/jstests/tool/dumpsecondary.js
index 31feacba674..9abe8d7476e 100644
--- a/jstests/tool/dumpsecondary.js
+++ b/jstests/tool/dumpsecondary.js
@@ -1,4 +1,4 @@
-var replTest = new ReplSetTest( {name: 'testSet', nodes: 2} );
+var replTest = new ReplSetTest({name: 'testSet', nodes: 2});
var nodes = replTest.startSet();
replTest.initiate();
@@ -9,29 +9,41 @@ db.foo.save({a: 1000});
replTest.awaitReplication();
replTest.awaitSecondaryNodes();
-assert.eq( 1 , db.foo.count() , "setup" );
+assert.eq(1, db.foo.count(), "setup");
var slaves = replTest.liveNodes.slaves;
-assert( slaves.length == 1, "Expected 1 slave but length was " + slaves.length );
+assert(slaves.length == 1, "Expected 1 slave but length was " + slaves.length);
slave = slaves[0];
-var args = ['mongodump', '-h', slave.host, '--out', MongoRunner.dataDir + '/jstests_tool_dumpsecondary_external/'];
-var authargs = ['--username', jsTest.options().authUser, '--password', jsTest.options().authPassword];
+var args = [
+ 'mongodump',
+ '-h',
+ slave.host,
+ '--out',
+ MongoRunner.dataDir + '/jstests_tool_dumpsecondary_external/'
+];
+var authargs =
+ ['--username', jsTest.options().authUser, '--password', jsTest.options().authPassword];
if (jsTest.options().keyFile) {
args = args.concat(authargs);
}
runMongoProgram.apply(null, args);
db.foo.drop();
-assert.eq( 0 , db.foo.count() , "after drop" );
-args = ['mongorestore', '-h', master.host, MongoRunner.dataDir + '/jstests_tool_dumpsecondary_external/'];
+assert.eq(0, db.foo.count(), "after drop");
+args = [
+ 'mongorestore',
+ '-h',
+ master.host,
+ MongoRunner.dataDir + '/jstests_tool_dumpsecondary_external/'
+];
if (jsTest.options().keyFile) {
args = args.concat(authargs);
}
runMongoProgram.apply(null, args);
-assert.soon( "db.foo.findOne()" , "no data after sleep" );
-assert.eq( 1 , db.foo.count() , "after restore" );
-assert.eq( 1000 , db.foo.findOne().a , "after restore 2" );
+assert.soon("db.foo.findOne()", "no data after sleep");
+assert.eq(1, db.foo.count(), "after restore");
+assert.eq(1000, db.foo.findOne().a, "after restore 2");
resetDbpath(MongoRunner.dataDir + '/jstests_tool_dumpsecondary_external');
diff --git a/jstests/tool/exportimport1.js b/jstests/tool/exportimport1.js
index 61379379fa4..69124b1f6b2 100644
--- a/jstests/tool/exportimport1.js
+++ b/jstests/tool/exportimport1.js
@@ -1,56 +1,55 @@
// exportimport1.js
-t = new ToolTest( "exportimport1" );
+t = new ToolTest("exportimport1");
-c = t.startDB( "foo" );
-assert.eq( 0 , c.count() , "setup1" );
+c = t.startDB("foo");
+assert.eq(0, c.count(), "setup1");
var arr = ["x", undefined, "y", undefined];
-c.save( { a : 22 , b : arr} );
-assert.eq( 1 , c.count() , "setup2" );
+c.save({a: 22, b: arr});
+assert.eq(1, c.count(), "setup2");
-t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo" );
+t.runTool("export", "--out", t.extFile, "-d", t.baseName, "-c", "foo");
c.drop();
-assert.eq( 0 , c.count() , "after drop" , "-d" , t.baseName , "-c" , "foo" );
+assert.eq(0, c.count(), "after drop", "-d", t.baseName, "-c", "foo");
-t.runTool( "import" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo" );
-assert.soon( "c.findOne()" , "no data after sleep" );
-assert.eq( 1 , c.count() , "after restore 2" );
+t.runTool("import", "--file", t.extFile, "-d", t.baseName, "-c", "foo");
+assert.soon("c.findOne()", "no data after sleep");
+assert.eq(1, c.count(), "after restore 2");
var doc = c.findOne();
-assert.eq( 22 , doc.a , "after restore 2" );
-for (var i=0; i<arr.length; i++) {
- assert.eq( arr[i], doc.b[i] , "after restore array: "+i );
+assert.eq(22, doc.a, "after restore 2");
+for (var i = 0; i < arr.length; i++) {
+ assert.eq(arr[i], doc.b[i], "after restore array: " + i);
}
// now with --jsonArray
-t.runTool( "export" , "--jsonArray" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo" );
+t.runTool("export", "--jsonArray", "--out", t.extFile, "-d", t.baseName, "-c", "foo");
c.drop();
-assert.eq( 0 , c.count() , "after drop" , "-d" , t.baseName , "-c" , "foo" );
+assert.eq(0, c.count(), "after drop", "-d", t.baseName, "-c", "foo");
-t.runTool( "import" , "--jsonArray" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo" );
-assert.soon( "c.findOne()" , "no data after sleep" );
-assert.eq( 1 , c.count() , "after restore 2" );
-assert.eq( 22 , c.findOne().a , "after restore 2" );
+t.runTool("import", "--jsonArray", "--file", t.extFile, "-d", t.baseName, "-c", "foo");
+assert.soon("c.findOne()", "no data after sleep");
+assert.eq(1, c.count(), "after restore 2");
+assert.eq(22, c.findOne().a, "after restore 2");
c.drop();
-assert.eq( 0 , c.count() , "after drop" , "-d" , t.baseName , "-c" , "foo" );
+assert.eq(0, c.count(), "after drop", "-d", t.baseName, "-c", "foo");
arr = ["a", undefined, "c"];
-c.save({a : arr});
-assert.eq( 1 , c.count() , "setup2" );
-t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo" );
+c.save({a: arr});
+assert.eq(1, c.count(), "setup2");
+t.runTool("export", "--out", t.extFile, "-d", t.baseName, "-c", "foo");
c.drop();
-assert.eq( 0 , c.count() , "after drop" , "-d" , t.baseName , "-c" , "foo" );
+assert.eq(0, c.count(), "after drop", "-d", t.baseName, "-c", "foo");
-t.runTool( "import" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo" );
-assert.soon( "c.findOne()" , "no data after sleep" );
-assert.eq( 1 , c.count() , "after restore 2" );
+t.runTool("import", "--file", t.extFile, "-d", t.baseName, "-c", "foo");
+assert.soon("c.findOne()", "no data after sleep");
+assert.eq(1, c.count(), "after restore 2");
var doc = c.findOne();
-for (var i=0; i<arr.length; i++) {
- assert.eq( arr[i], doc.a[i] , "after restore array: "+i );
+for (var i = 0; i < arr.length; i++) {
+ assert.eq(arr[i], doc.a[i], "after restore array: " + i);
}
-
t.stop();
diff --git a/jstests/tool/exportimport3.js b/jstests/tool/exportimport3.js
index 686ff467a6f..481db797964 100644
--- a/jstests/tool/exportimport3.js
+++ b/jstests/tool/exportimport3.js
@@ -1,27 +1,25 @@
// exportimport3.js
-t = new ToolTest( "exportimport3" );
+t = new ToolTest("exportimport3");
-c = t.startDB( "foo" );
-assert.eq( 0 , c.count() , "setup1" );
-c.save({a:1});
-c.save({a:2});
-c.save({a:3});
-c.save({a:4});
-c.save({a:5});
+c = t.startDB("foo");
+assert.eq(0, c.count(), "setup1");
+c.save({a: 1});
+c.save({a: 2});
+c.save({a: 3});
+c.save({a: 4});
+c.save({a: 5});
-assert.eq( 5 , c.count() , "setup2" );
+assert.eq(5, c.count(), "setup2");
-
-t.runTool( "export" , "--jsonArray" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo" );
+t.runTool("export", "--jsonArray", "--out", t.extFile, "-d", t.baseName, "-c", "foo");
c.drop();
-assert.eq( 0 , c.count() , "after drop" , "-d" , t.baseName , "-c" , "foo" );
-
-t.runTool( "import" , "--jsonArray" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo" );
+assert.eq(0, c.count(), "after drop", "-d", t.baseName, "-c", "foo");
-assert.soon( "c.findOne()" , "no data after sleep" );
-assert.eq( 5 , c.count() , "after restore 2" );
+t.runTool("import", "--jsonArray", "--file", t.extFile, "-d", t.baseName, "-c", "foo");
+assert.soon("c.findOne()", "no data after sleep");
+assert.eq(5, c.count(), "after restore 2");
t.stop();
diff --git a/jstests/tool/exportimport4.js b/jstests/tool/exportimport4.js
index 605e21b7337..9c6f6d70b0a 100644
--- a/jstests/tool/exportimport4.js
+++ b/jstests/tool/exportimport4.js
@@ -1,56 +1,57 @@
// exportimport4.js
-t = new ToolTest( "exportimport4" );
-c = t.startDB( "foo" );
+t = new ToolTest("exportimport4");
+c = t.startDB("foo");
install_test_data = function() {
c.drop();
- assert.eq( 0 , c.count() , "setup1" );
+ assert.eq(0, c.count(), "setup1");
- c.save( { a : [1, 2, 3, NaN, 4, null, 5] } );
- c.save( { a : [1, 2, 3, 4, 5] } );
- c.save( { a : [ NaN ] } );
- c.save( { a : [1, 2, 3, 4, NaN, NaN, 5, NaN] } );
- c.save( { a : [1, 2, 3, 4, null, null, 5, null] } );
+ c.save({a: [1, 2, 3, NaN, 4, null, 5]});
+ c.save({a: [1, 2, 3, 4, 5]});
+ c.save({a: [NaN]});
+ c.save({a: [1, 2, 3, 4, NaN, NaN, 5, NaN]});
+ c.save({a: [1, 2, 3, 4, null, null, 5, null]});
- assert.eq( 5 , c.count() , "setup2" );
+ assert.eq(5, c.count(), "setup2");
};
// attempt to export fields without NaN
install_test_data();
-t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo", "-q", "{a:{\"$nin\":[NaN]}}" );
+t.runTool(
+ "export", "--out", t.extFile, "-d", t.baseName, "-c", "foo", "-q", "{a:{\"$nin\":[NaN]}}");
c.drop();
-assert.eq( 0 , c.count() , "after drop" , "-d" , t.baseName , "-c" , "foo" );
+assert.eq(0, c.count(), "after drop", "-d", t.baseName, "-c", "foo");
-t.runTool( "import" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo", "--drop" );
+t.runTool("import", "--file", t.extFile, "-d", t.baseName, "-c", "foo", "--drop");
-assert.eq( 2 , c.count() , "after restore 1" );
+assert.eq(2, c.count(), "after restore 1");
// attempt to export fields with NaN
install_test_data();
-t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo", "-q", "{a:NaN}" );
+t.runTool("export", "--out", t.extFile, "-d", t.baseName, "-c", "foo", "-q", "{a:NaN}");
c.drop();
-assert.eq( 0 , c.count() , "after drop" , "-d" , t.baseName , "-c" , "foo" );
+assert.eq(0, c.count(), "after drop", "-d", t.baseName, "-c", "foo");
-t.runTool( "import" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo", "--drop" );
+t.runTool("import", "--file", t.extFile, "-d", t.baseName, "-c", "foo", "--drop");
-assert.eq( 3 , c.count() , "after restore 2" );
+assert.eq(3, c.count(), "after restore 2");
// attempt to export everything
install_test_data();
-t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo" );
+t.runTool("export", "--out", t.extFile, "-d", t.baseName, "-c", "foo");
c.drop();
-assert.eq( 0 , c.count() , "after drop" , "-d" , t.baseName , "-c" , "foo" );
+assert.eq(0, c.count(), "after drop", "-d", t.baseName, "-c", "foo");
-t.runTool( "import" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo", "--drop" );
+t.runTool("import", "--file", t.extFile, "-d", t.baseName, "-c", "foo", "--drop");
-assert.eq( 5 , c.count() , "after restore 3" );
+assert.eq(5, c.count(), "after restore 3");
t.stop();
diff --git a/jstests/tool/exportimport5.js b/jstests/tool/exportimport5.js
index 427b03f0232..380e9391118 100644
--- a/jstests/tool/exportimport5.js
+++ b/jstests/tool/exportimport5.js
@@ -1,81 +1,90 @@
// exportimport4.js
-t = new ToolTest( "exportimport5" );
-c = t.startDB( "foo" );
+t = new ToolTest("exportimport5");
+c = t.startDB("foo");
install_test_data = function() {
c.drop();
- assert.eq( 0 , c.count() , "setup1" );
+ assert.eq(0, c.count(), "setup1");
- c.save( { a : [1, 2, 3, Infinity, 4, null, 5] } );
- c.save( { a : [1, 2, 3, 4, 5] } );
- c.save( { a : [ Infinity ] } );
- c.save( { a : [1, 2, 3, 4, Infinity, Infinity, 5, -Infinity] } );
- c.save( { a : [1, 2, 3, 4, null, null, 5, null] } );
- c.save( { a : [ -Infinity ] } );
+ c.save({a: [1, 2, 3, Infinity, 4, null, 5]});
+ c.save({a: [1, 2, 3, 4, 5]});
+ c.save({a: [Infinity]});
+ c.save({a: [1, 2, 3, 4, Infinity, Infinity, 5, -Infinity]});
+ c.save({a: [1, 2, 3, 4, null, null, 5, null]});
+ c.save({a: [-Infinity]});
- assert.eq( 6 , c.count() , "setup2" );
+ assert.eq(6, c.count(), "setup2");
};
// attempt to export fields without Infinity
install_test_data();
-t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo", "-q", "{a:{\"$nin\":[Infinity]}}" );
+t.runTool(
+ "export", "--out", t.extFile, "-d", t.baseName, "-c", "foo", "-q", "{a:{\"$nin\":[Infinity]}}");
c.drop();
-assert.eq( 0 , c.count() , "after drop" , "-d" , t.baseName , "-c" , "foo" );
+assert.eq(0, c.count(), "after drop", "-d", t.baseName, "-c", "foo");
-t.runTool( "import" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo", "--drop" );
+t.runTool("import", "--file", t.extFile, "-d", t.baseName, "-c", "foo", "--drop");
-assert.eq( 3 , c.count() , "after restore 1" );
+assert.eq(3, c.count(), "after restore 1");
// attempt to export fields with Infinity
install_test_data();
-t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo", "-q", "{a:Infinity}" );
+t.runTool("export", "--out", t.extFile, "-d", t.baseName, "-c", "foo", "-q", "{a:Infinity}");
c.drop();
-assert.eq( 0 , c.count() , "after drop" , "-d" , t.baseName , "-c" , "foo" );
+assert.eq(0, c.count(), "after drop", "-d", t.baseName, "-c", "foo");
-t.runTool( "import" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo", "--drop" );
+t.runTool("import", "--file", t.extFile, "-d", t.baseName, "-c", "foo", "--drop");
-assert.eq( 3 , c.count() , "after restore 2" );
+assert.eq(3, c.count(), "after restore 2");
// attempt to export fields without -Infinity
install_test_data();
-t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo", "-q", "{a:{\"$nin\":[-Infinity]}}" );
+t.runTool("export",
+ "--out",
+ t.extFile,
+ "-d",
+ t.baseName,
+ "-c",
+ "foo",
+ "-q",
+ "{a:{\"$nin\":[-Infinity]}}");
c.drop();
-assert.eq( 0 , c.count() , "after drop" , "-d" , t.baseName , "-c" , "foo" );
+assert.eq(0, c.count(), "after drop", "-d", t.baseName, "-c", "foo");
-t.runTool( "import" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo", "--drop" );
+t.runTool("import", "--file", t.extFile, "-d", t.baseName, "-c", "foo", "--drop");
-assert.eq( 4 , c.count() , "after restore 3" );
+assert.eq(4, c.count(), "after restore 3");
// attempt to export fields with -Infinity
install_test_data();
-t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo", "-q", "{a:-Infinity}" );
+t.runTool("export", "--out", t.extFile, "-d", t.baseName, "-c", "foo", "-q", "{a:-Infinity}");
c.drop();
-assert.eq( 0 , c.count() , "after drop" , "-d" , t.baseName , "-c" , "foo" );
+assert.eq(0, c.count(), "after drop", "-d", t.baseName, "-c", "foo");
-t.runTool( "import" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo", "--drop" );
+t.runTool("import", "--file", t.extFile, "-d", t.baseName, "-c", "foo", "--drop");
-assert.eq( 2 , c.count() , "after restore 4" );
+assert.eq(2, c.count(), "after restore 4");
// attempt to export everything
install_test_data();
-t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo" );
+t.runTool("export", "--out", t.extFile, "-d", t.baseName, "-c", "foo");
c.drop();
-assert.eq( 0 , c.count() , "after drop" , "-d" , t.baseName , "-c" , "foo" );
+assert.eq(0, c.count(), "after drop", "-d", t.baseName, "-c", "foo");
-t.runTool( "import" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo", "--drop" );
+t.runTool("import", "--file", t.extFile, "-d", t.baseName, "-c", "foo", "--drop");
-assert.eq( 6 , c.count() , "after restore 5" );
+assert.eq(6, c.count(), "after restore 5");
t.stop();
diff --git a/jstests/tool/exportimport6.js b/jstests/tool/exportimport6.js
index 0924638e628..71d89baf01c 100644
--- a/jstests/tool/exportimport6.js
+++ b/jstests/tool/exportimport6.js
@@ -5,17 +5,28 @@ t = new ToolTest("exportimport6");
c = t.startDB("foo");
assert.eq(0, c.count(), "setup1");
-c.save({a:1, b:1});
-c.save({a:1, b:2});
-c.save({a:2, b:3});
-c.save({a:2, b:3});
-c.save({a:3, b:4});
-c.save({a:3, b:5});
+c.save({a: 1, b: 1});
+c.save({a: 1, b: 2});
+c.save({a: 2, b: 3});
+c.save({a: 2, b: 3});
+c.save({a: 3, b: 4});
+c.save({a: 3, b: 5});
assert.eq(6, c.count(), "setup2");
-t.runTool("export", "--out", t.extFile, "-d", t.baseName, "-c", "foo",
- "--sort", "{a:1, b:-1}", "--skip", "4", "--limit", "1");
+t.runTool("export",
+ "--out",
+ t.extFile,
+ "-d",
+ t.baseName,
+ "-c",
+ "foo",
+ "--sort",
+ "{a:1, b:-1}",
+ "--skip",
+ "4",
+ "--limit",
+ "1");
c.drop();
assert.eq(0, c.count(), "after drop", "-d", t.baseName, "-c", "foo");
diff --git a/jstests/tool/exportimport_bigarray.js b/jstests/tool/exportimport_bigarray.js
index 75d508b1ff4..0b801699d1b 100644
--- a/jstests/tool/exportimport_bigarray.js
+++ b/jstests/tool/exportimport_bigarray.js
@@ -11,19 +11,22 @@ dst.drop();
// Calculate the number of documents it takes to get above 16MB (here using 20MB just to be safe)
var bigString = new Array(1025).toString();
-var doc = {_id: new ObjectId(), x:bigString};
+var doc = {
+ _id: new ObjectId(),
+ x: bigString
+};
var docSize = Object.bsonsize(doc);
-var numDocs = Math.floor(20*1024*1024 / docSize);
+var numDocs = Math.floor(20 * 1024 * 1024 / docSize);
print('Size of one document: ' + docSize);
print('Number of documents to exceed maximum BSON size: ' + numDocs);
-print('About to insert ' + numDocs + ' documents into ' +
- exportimport_db.getName() + '.' + src.getName());
+print('About to insert ' + numDocs + ' documents into ' + exportimport_db.getName() + '.' +
+ src.getName());
var i;
var bulk = src.initializeUnorderedBulkOp();
for (i = 0; i < numDocs; ++i) {
- bulk.insert({ x: bigString });
+ bulk.insert({x: bigString});
}
assert.writeOK(bulk.execute());
@@ -31,27 +34,29 @@ data = 'data/exportimport_array_test.json';
print('About to call mongoexport on: ' + exportimport_db.getName() + '.' + src.getName() +
' with file: ' + data);
-tt.runTool('export', '--out' , data, '-d', exportimport_db.getName(), '-c', src.getName(),
- '--jsonArray');
+tt.runTool(
+ 'export', '--out', data, '-d', exportimport_db.getName(), '-c', src.getName(), '--jsonArray');
print('About to call mongoimport on: ' + exportimport_db.getName() + '.' + dst.getName() +
' with file: ' + data);
-tt.runTool('import', '--file', data, '-d', exportimport_db.getName(), '-c', dst.getName(),
- '--jsonArray');
+tt.runTool(
+ 'import', '--file', data, '-d', exportimport_db.getName(), '-c', dst.getName(), '--jsonArray');
print('About to verify that source and destination collections match');
-src_cursor = src.find().sort({ _id : 1 });
-dst_cursor = dst.find().sort({ _id : 1 });
+src_cursor = src.find().sort({_id: 1});
+dst_cursor = dst.find().sort({_id: 1});
var documentCount = 0;
while (src_cursor.hasNext()) {
- assert(dst_cursor.hasNext(), 'Source has more documents than destination. ' +
- 'Destination has ' + documentCount + ' documents.');
+ assert(dst_cursor.hasNext(),
+ 'Source has more documents than destination. ' +
+ 'Destination has ' + documentCount + ' documents.');
assert.eq(src_cursor.next(), dst_cursor.next(), 'Mismatch on document ' + documentCount);
++documentCount;
}
-assert(!dst_cursor.hasNext(), 'Destination has more documents than source. ' +
- 'Source has ' + documentCount + ' documents.');
+assert(!dst_cursor.hasNext(),
+ 'Destination has more documents than source. ' +
+ 'Source has ' + documentCount + ' documents.');
print('Verified that source and destination collections match');
diff --git a/jstests/tool/exportimport_date.js b/jstests/tool/exportimport_date.js
index 57a860ca1a8..ab51e0a2458 100644
--- a/jstests/tool/exportimport_date.js
+++ b/jstests/tool/exportimport_date.js
@@ -11,12 +11,12 @@ dst.drop();
// Insert a date that we can format
var formatable = ISODate("1970-01-02T05:00:00Z");
assert.eq(formatable.valueOf(), 104400000);
-src.insert({ "_id" : formatable });
+src.insert({"_id": formatable});
// Insert a date that we cannot format as an ISODate string
var nonformatable = ISODate("3001-01-01T00:00:00Z");
assert.eq(nonformatable.valueOf(), 32535216000000);
-src.insert({ "_id" : nonformatable });
+src.insert({"_id": nonformatable});
// Verify number of documents inserted
assert.eq(2, src.find().itcount());
@@ -25,7 +25,7 @@ data = 'data/exportimport_date_test.json';
print('About to call mongoexport on: ' + exportimport_db.getName() + '.' + src.getName() +
' with file: ' + data);
-tt.runTool('export', '--out' , data, '-d', exportimport_db.getName(), '-c', src.getName());
+tt.runTool('export', '--out', data, '-d', exportimport_db.getName(), '-c', src.getName());
print('About to call mongoimport on: ' + exportimport_db.getName() + '.' + dst.getName() +
' with file: ' + data);
@@ -33,17 +33,19 @@ tt.runTool('import', '--file', data, '-d', exportimport_db.getName(), '-c', dst.
print('About to verify that source and destination collections match');
-src_cursor = src.find().sort({ _id : 1 });
-dst_cursor = dst.find().sort({ _id : 1 });
+src_cursor = src.find().sort({_id: 1});
+dst_cursor = dst.find().sort({_id: 1});
var documentCount = 0;
while (src_cursor.hasNext()) {
- assert(dst_cursor.hasNext(), 'Source has more documents than destination. ' +
- 'Destination has ' + documentCount + ' documents.');
+ assert(dst_cursor.hasNext(),
+ 'Source has more documents than destination. ' +
+ 'Destination has ' + documentCount + ' documents.');
assert.eq(src_cursor.next(), dst_cursor.next(), 'Mismatch on document ' + documentCount);
++documentCount;
}
-assert(!dst_cursor.hasNext(), 'Destination has more documents than source. ' +
- 'Source has ' + documentCount + ' documents.');
+assert(!dst_cursor.hasNext(),
+ 'Destination has more documents than source. ' +
+ 'Source has ' + documentCount + ' documents.');
print('Verified that source and destination collections match');
diff --git a/jstests/tool/exportimport_minkey_maxkey.js b/jstests/tool/exportimport_minkey_maxkey.js
index 3e91b04e0c1..c6e1d5b7ea2 100644
--- a/jstests/tool/exportimport_minkey_maxkey.js
+++ b/jstests/tool/exportimport_minkey_maxkey.js
@@ -8,12 +8,12 @@ var dst = exportimport_db.dst;
src.drop();
dst.drop();
-src.insert({ "_id" : MaxKey });
-src.insert({ "_id" : MinKey });
+src.insert({"_id": MaxKey});
+src.insert({"_id": MinKey});
print('About to call mongoexport on: ' + exportimport_db.getName() + '.' + src.getName() +
' with file: ' + tt.extFile);
-tt.runTool('export', '--out' , tt.extFile, '-d', exportimport_db.getName(), '-c', src.getName());
+tt.runTool('export', '--out', tt.extFile, '-d', exportimport_db.getName(), '-c', src.getName());
print('About to call mongoimport on: ' + exportimport_db.getName() + '.' + dst.getName() +
' with file: ' + tt.extFile);
@@ -21,17 +21,19 @@ tt.runTool('import', '--file', tt.extFile, '-d', exportimport_db.getName(), '-c'
print('About to verify that source and destination collections match');
-src_cursor = src.find().sort({ _id : 1 });
-dst_cursor = dst.find().sort({ _id : 1 });
+src_cursor = src.find().sort({_id: 1});
+dst_cursor = dst.find().sort({_id: 1});
var documentCount = 0;
while (src_cursor.hasNext()) {
- assert(dst_cursor.hasNext(), 'Source has more documents than destination. ' +
- 'Destination has ' + documentCount + ' documents.');
+ assert(dst_cursor.hasNext(),
+ 'Source has more documents than destination. ' +
+ 'Destination has ' + documentCount + ' documents.');
assert.eq(src_cursor.next(), dst_cursor.next(), 'Mismatch on document ' + documentCount);
++documentCount;
}
-assert(!dst_cursor.hasNext(), 'Destination has more documents than source. ' +
- 'Source has ' + documentCount + ' documents.');
+assert(!dst_cursor.hasNext(),
+ 'Destination has more documents than source. ' +
+ 'Source has ' + documentCount + ' documents.');
print('Verified that source and destination collections match');
diff --git a/jstests/tool/files1.js b/jstests/tool/files1.js
index bd8ec971ad6..190ac983dae 100644
--- a/jstests/tool/files1.js
+++ b/jstests/tool/files1.js
@@ -1,27 +1,27 @@
// files1.js
-t = new ToolTest( "files1" );
+t = new ToolTest("files1");
db = t.startDB();
filename = 'mongod';
-if ( _isWindows() )
+if (_isWindows())
filename += '.exe';
-t.runTool( "files" , "-d" , t.baseName , "put" , filename );
+t.runTool("files", "-d", t.baseName, "put", filename);
md5 = md5sumFile(filename);
file_obj = db.fs.files.findOne();
-assert( file_obj , "A 0" );
+assert(file_obj, "A 0");
md5_stored = file_obj.md5;
md5_computed = db.runCommand({filemd5: file_obj._id}).md5;
-assert.eq( md5 , md5_stored , "A 1" );
-assert.eq( md5 , md5_computed, "A 2" );
+assert.eq(md5, md5_stored, "A 1");
+assert.eq(md5, md5_computed, "A 2");
mkdir(t.ext);
-t.runTool( "files" , "-d" , t.baseName , "get" , filename , '-l' , t.extFile );
+t.runTool("files", "-d", t.baseName, "get", filename, '-l', t.extFile);
md5 = md5sumFile(t.extFile);
-assert.eq( md5 , md5_stored , "B" );
+assert.eq(md5, md5_stored, "B");
t.stop();
diff --git a/jstests/tool/gridfs.js b/jstests/tool/gridfs.js
index c144563c45b..5fbf6e6036b 100644
--- a/jstests/tool/gridfs.js
+++ b/jstests/tool/gridfs.js
@@ -1,15 +1,10 @@
// tests gridfs with a sharded fs.chunks collection.
-var test = new ShardingTest({shards: 3,
- mongos: 1,
- config: 1,
- verbose: 2,
- other: {chunkSize:1}});
+var test = new ShardingTest({shards: 3, mongos: 1, config: 1, verbose: 2, other: {chunkSize: 1}});
var mongos = test.s0;
-
-var filename = "mongod"; // A large file we are guaranteed to have
+var filename = "mongod"; // A large file we are guaranteed to have
if (_isWindows())
filename += ".exe";
@@ -28,7 +23,7 @@ function testGridFS(name) {
assert.eq(d.fs.files.count(), 1);
var fileObj = d.fs.files.findOne();
print("fileObj: " + tojson(fileObj));
- assert.eq(rawmd5, fileObj.md5); //check that mongofiles inserted the correct md5
+ assert.eq(rawmd5, fileObj.md5); // check that mongofiles inserted the correct md5
// Call filemd5 ourself and check results.
var res = d.runCommand({filemd5: fileObj._id});
@@ -37,7 +32,7 @@ function testGridFS(name) {
assert.eq(rawmd5, res.md5);
var numChunks = d.fs.chunks.find({files_id: fileObj._id}).itcount();
- //var numChunks = d.fs.chunks.count({files_id: fileObj._id}) // this is broken for now
+ // var numChunks = d.fs.chunks.count({files_id: fileObj._id}) // this is broken for now
assert.eq(numChunks, res.numChunks);
}
@@ -53,13 +48,13 @@ testGridFS(name);
print('\n\n\t**** sharded collection on files_id ****\n\n');
name = 'sharded_files_id';
test.adminCommand({enablesharding: name});
-test.adminCommand({shardcollection: name+'.fs.chunks', key: {files_id:1}});
+test.adminCommand({shardcollection: name + '.fs.chunks', key: {files_id: 1}});
testGridFS(name);
print('\n\n\t**** sharded collection on files_id,n ****\n\n');
name = 'sharded_files_id_n';
test.adminCommand({enablesharding: name});
-test.adminCommand({shardcollection: name+'.fs.chunks', key: {files_id:1, n:1}});
+test.adminCommand({shardcollection: name + '.fs.chunks', key: {files_id: 1, n: 1}});
testGridFS(name);
test.stop();
diff --git a/jstests/tool/oplog1.js b/jstests/tool/oplog1.js
index bbee73d7f80..ad8146c080e 100644
--- a/jstests/tool/oplog1.js
+++ b/jstests/tool/oplog1.js
@@ -3,26 +3,27 @@
// very basic test for mongooplog
// need a lot more, but test that it functions at all
-t = new ToolTest( "oplog1" );
+t = new ToolTest("oplog1");
db = t.startDB();
output = db.output;
-doc = { _id : 5 , x : 17 };
+doc = {
+ _id: 5,
+ x: 17
+};
assert.commandWorked(db.createCollection(output.getName()));
-db.oplog.insert( { ts : new Timestamp() , "op" : "i" , "ns" : output.getFullName() , "o" : doc } );
+db.oplog.insert({ts: new Timestamp(), "op": "i", "ns": output.getFullName(), "o": doc});
-assert.eq( 0 , output.count() , "before" );
+assert.eq(0, output.count(), "before");
-t.runTool( "oplog" , "--oplogns" , db.getName() + ".oplog" , "--from" , "127.0.0.1:" + t.port , "-vv" );
+t.runTool("oplog", "--oplogns", db.getName() + ".oplog", "--from", "127.0.0.1:" + t.port, "-vv");
-assert.eq( 1 , output.count() , "after" );
+assert.eq(1, output.count(), "after");
-assert.docEq( doc , output.findOne() , "after check" );
+assert.docEq(doc, output.findOne(), "after check");
t.stop();
-
-
diff --git a/jstests/tool/oplog_all_ops.js b/jstests/tool/oplog_all_ops.js
index fb51f2b0b53..fb988174d24 100644
--- a/jstests/tool/oplog_all_ops.js
+++ b/jstests/tool/oplog_all_ops.js
@@ -4,10 +4,10 @@
* Correctness is verified using the dbhash command.
*/
-var repl1 = new ReplSetTest({ name: 'rs1', nodes: [{ nopreallocj: '' },
- { arbiter: true }, { arbiter: true }]});
+var repl1 =
+ new ReplSetTest({name: 'rs1', nodes: [{nopreallocj: ''}, {arbiter: true}, {arbiter: true}]});
-repl1.startSet({ oplogSize: 10 });
+repl1.startSet({oplogSize: 10});
repl1.initiate();
repl1.awaitSecondaryNodes();
@@ -16,52 +16,44 @@ var testDB = repl1Conn.getDB('test');
var testColl = testDB.user;
// op i
-testColl.insert({ x: 1 });
-testColl.insert({ x: 2 });
+testColl.insert({x: 1});
+testColl.insert({x: 2});
// op c
testDB.dropDatabase();
-testColl.insert({ y: 1 });
-testColl.insert({ y: 2 });
-testColl.insert({ y: 3 });
+testColl.insert({y: 1});
+testColl.insert({y: 2});
+testColl.insert({y: 3});
// op u
-testColl.update({}, { $inc: { z: 1 }}, true, true);
+testColl.update({}, {$inc: {z: 1}}, true, true);
// op d
-testColl.remove({ y: 2 });
+testColl.remove({y: 2});
// op n
var oplogColl = repl1Conn.getCollection('local.oplog.rs');
-oplogColl.insert({ ts: new Timestamp(), op: 'n', ns: testColl.getFullName(), 'o': { x: 'noop' }});
-
-var repl2 = new ReplSetTest({
- name: 'rs2',
- nodes: [
- {nopreallocj: ''},
- {arbiter: true},
- {arbiter: true}
- ]
-});
-
-repl2.startSet({ oplogSize: 10 });
+oplogColl.insert({ts: new Timestamp(), op: 'n', ns: testColl.getFullName(), 'o': {x: 'noop'}});
+
+var repl2 =
+ new ReplSetTest({name: 'rs2', nodes: [{nopreallocj: ''}, {arbiter: true}, {arbiter: true}]});
+
+repl2.startSet({oplogSize: 10});
repl2.initiate();
repl2.awaitSecondaryNodes();
var srcConn = repl1.getPrimary();
-runMongoProgram('mongooplog', '--from', repl1.getPrimary().host,
- '--host', repl2.getPrimary().host);
+runMongoProgram('mongooplog', '--from', repl1.getPrimary().host, '--host', repl2.getPrimary().host);
-var repl1Hash = testDB.runCommand({ dbhash: 1 });
+var repl1Hash = testDB.runCommand({dbhash: 1});
var repl2Conn = new Mongo(repl2.getURL());
var testDB2 = repl2Conn.getDB(testDB.getName());
-var repl2Hash = testDB2.runCommand({ dbhash: 1 });
+var repl2Hash = testDB2.runCommand({dbhash: 1});
assert(repl1Hash.md5);
assert.eq(repl1Hash.md5, repl2Hash.md5);
repl1.stopSet();
repl2.stopSet();
-
diff --git a/jstests/tool/restorewithauth.js b/jstests/tool/restorewithauth.js
index 6db4b0bf359..0fd29706ee1 100644
--- a/jstests/tool/restorewithauth.js
+++ b/jstests/tool/restorewithauth.js
@@ -1,5 +1,5 @@
/* SERVER-4972
- * Test for mongorestore on server with --auth allows restore without credentials of colls
+ * Test for mongorestore on server with --auth allows restore without credentials of colls
* with no index
*/
/*
@@ -14,14 +14,13 @@
* 9) Try restore with correct auth credentials. The restore should succeed this time.
*/
-
baseName = "jstests_restorewithauth";
var conn = MongoRunner.runMongod({nojournal: "", bind_ip: "127.0.0.1"});
// write to ns foo.bar
-var foo = conn.getDB( "foo" );
-for( var i = 0; i < 4; i++ ) {
- foo["bar"].save( { "x": i } );
+var foo = conn.getDB("foo");
+for (var i = 0; i < 4; i++) {
+ foo["bar"].save({"x": i});
foo["baz"].save({"x": i});
}
@@ -29,18 +28,18 @@ for( var i = 0; i < 4; i++ ) {
var collNames = foo.getCollectionNames();
assert.neq(-1, collNames.indexOf("bar"), "bar collection doesn't exist");
-//make sure it has no index except _id
+// make sure it has no index except _id
assert.eq(foo.bar.getIndexes().length, 1);
assert.eq(foo.baz.getIndexes().length, 1);
-foo.bar.createIndex({x:1});
+foo.bar.createIndex({x: 1});
assert.eq(foo.bar.getIndexes().length, 2);
assert.eq(foo.baz.getIndexes().length, 1);
// get data dump
var dumpdir = MongoRunner.dataDir + "/restorewithauth-dump1/";
-resetDbpath( dumpdir );
-x = runMongoProgram("mongodump", "--db", "foo", "-h", "127.0.0.1:"+ conn.port, "--out", dumpdir);
+resetDbpath(dumpdir);
+x = runMongoProgram("mongodump", "--db", "foo", "-h", "127.0.0.1:" + conn.port, "--out", dumpdir);
// now drop the db
foo.dropDatabase();
@@ -52,11 +51,11 @@ MongoRunner.stopMongod(conn);
conn = MongoRunner.runMongod({auth: "", nojournal: "", bind_ip: "127.0.0.1"});
// admin user
-var admin = conn.getDB( "admin" );
-admin.createUser({user: "admin" , pwd: "admin", roles: jsTest.adminUserRoles});
-admin.auth( "admin" , "admin" );
+var admin = conn.getDB("admin");
+admin.createUser({user: "admin", pwd: "admin", roles: jsTest.adminUserRoles});
+admin.auth("admin", "admin");
-var foo = conn.getDB( "foo" );
+var foo = conn.getDB("foo");
// make sure no collection with the same name exists
collNames = foo.getCollectionNames();
@@ -64,7 +63,7 @@ assert.eq(-1, collNames.indexOf("bar"), "bar collection already exists");
assert.eq(-1, collNames.indexOf("baz"), "baz collection already exists");
// now try to restore dump
-x = runMongoProgram( "mongorestore", "-h", "127.0.0.1:" + conn.port, "--dir" , dumpdir, "-vvvvv" );
+x = runMongoProgram("mongorestore", "-h", "127.0.0.1:" + conn.port, "--dir", dumpdir, "-vvvvv");
// make sure that the collection isn't restored
collNames = foo.getCollectionNames();
@@ -72,14 +71,19 @@ assert.eq(-1, collNames.indexOf("bar"), "bar collection was restored");
assert.eq(-1, collNames.indexOf("baz"), "baz collection was restored");
// now try to restore dump with correct credentials
-x = runMongoProgram( "mongorestore",
- "-h", "127.0.0.1:" + conn.port,
- "-d", "foo",
- "--authenticationDatabase=admin",
- "-u", "admin",
- "-p", "admin",
- "--dir", dumpdir + "foo/",
- "-vvvvv");
+x = runMongoProgram("mongorestore",
+ "-h",
+ "127.0.0.1:" + conn.port,
+ "-d",
+ "foo",
+ "--authenticationDatabase=admin",
+ "-u",
+ "admin",
+ "-p",
+ "admin",
+ "--dir",
+ dumpdir + "foo/",
+ "-vvvvv");
// make sure that the collection was restored
collNames = foo.getCollectionNames();
@@ -96,11 +100,16 @@ foo.createUser({user: 'user', pwd: 'password', roles: jsTest.basicUserRoles});
// now try to restore dump with foo database credentials
x = runMongoProgram("mongorestore",
- "-h", "127.0.0.1:" + conn.port,
- "-d", "foo",
- "-u", "user",
- "-p", "password",
- "--dir", dumpdir + "foo/",
+ "-h",
+ "127.0.0.1:" + conn.port,
+ "-d",
+ "foo",
+ "-u",
+ "user",
+ "-p",
+ "password",
+ "--dir",
+ dumpdir + "foo/",
"-vvvvv");
// make sure that the collection was restored
@@ -109,6 +118,7 @@ assert.neq(-1, collNames.indexOf("bar"), "bar collection was not restored");
assert.neq(-1, collNames.indexOf("baz"), "baz collection was not restored");
assert.eq(foo.bar.count(), 4);
assert.eq(foo.baz.count(), 4);
-assert.eq(foo.bar.getIndexes().length + foo.baz.getIndexes().length, 3); // _id on foo, _id on bar, x on foo
+assert.eq(foo.bar.getIndexes().length + foo.baz.getIndexes().length,
+ 3); // _id on foo, _id on bar, x on foo
MongoRunner.stopMongod(conn);
diff --git a/jstests/tool/stat1.js b/jstests/tool/stat1.js
index 0b5bf7f02b0..efdbcb0f376 100644
--- a/jstests/tool/stat1.js
+++ b/jstests/tool/stat1.js
@@ -3,13 +3,33 @@
baseName = "tool_stat1";
var m = MongoRunner.runMongod({auth: "", bind_ip: "127.0.0.1"});
-db = m.getDB( "admin" );
+db = m.getDB("admin");
-db.createUser({user: "eliot" , pwd: "eliot", roles: jsTest.adminUserRoles});
-assert( db.auth( "eliot" , "eliot" ) , "auth failed" );
+db.createUser({user: "eliot", pwd: "eliot", roles: jsTest.adminUserRoles});
+assert(db.auth("eliot", "eliot"), "auth failed");
-x = runMongoProgram( "mongostat", "--host", "127.0.0.1:"+m.port, "--username", "eliot", "--password", "eliot", "--rowcount", "1", "--authenticationDatabase", "admin" );
+x = runMongoProgram("mongostat",
+ "--host",
+ "127.0.0.1:" + m.port,
+ "--username",
+ "eliot",
+ "--password",
+ "eliot",
+ "--rowcount",
+ "1",
+ "--authenticationDatabase",
+ "admin");
assert.eq(x, 0, "mongostat should exit successfully with eliot:eliot");
-x = runMongoProgram( "mongostat", "--host", "127.0.0.1:"+m.port, "--username", "eliot", "--password", "wrong", "--rowcount", "1", "--authenticationDatabase", "admin" );
+x = runMongoProgram("mongostat",
+ "--host",
+ "127.0.0.1:" + m.port,
+ "--username",
+ "eliot",
+ "--password",
+ "wrong",
+ "--rowcount",
+ "1",
+ "--authenticationDatabase",
+ "admin");
assert.neq(x, 0, "mongostat should exit with -1 with eliot:wrong");
diff --git a/jstests/tool/tool1.js b/jstests/tool/tool1.js
index 6fb0a1f0f02..ce5e880b4ba 100644
--- a/jstests/tool/tool1.js
+++ b/jstests/tool/tool1.js
@@ -6,37 +6,52 @@ externalPath = MongoRunner.dataPath + baseName + "_external/";
externalBaseName = "export.json";
externalFile = externalPath + externalBaseName;
-function fileSize(){
- var l = listFiles( externalPath );
- for ( var i=0; i<l.length; i++ ){
- if ( l[i].baseName == externalBaseName )
+function fileSize() {
+ var l = listFiles(externalPath);
+ for (var i = 0; i < l.length; i++) {
+ if (l[i].baseName == externalBaseName)
return l[i].size;
}
return -1;
}
-
-resetDbpath( externalPath );
+resetDbpath(externalPath);
var m = MongoRunner.runMongod({dbpath: dbPath, noprealloc: "", bind_ip: "127.0.0.1"});
-c = m.getDB( baseName ).getCollection( baseName );
-c.save( { a: 1 } );
-assert( c.findOne() );
+c = m.getDB(baseName).getCollection(baseName);
+c.save({a: 1});
+assert(c.findOne());
-runMongoProgram( "mongodump", "--host", "127.0.0.1:" + m.port, "--out", externalPath );
+runMongoProgram("mongodump", "--host", "127.0.0.1:" + m.port, "--out", externalPath);
c.drop();
-runMongoProgram( "mongorestore", "--host", "127.0.0.1:" + m.port, "--dir", externalPath );
-assert.soon( "c.findOne()" , "mongodump then restore has no data w/sleep" );
-assert( c.findOne() , "mongodump then restore has no data" );
-assert.eq( 1 , c.findOne().a , "mongodump then restore has no broken data" );
-
-resetDbpath( externalPath );
-
-assert.eq( -1 , fileSize() , "mongoexport prep invalid" );
-runMongoProgram( "mongoexport", "--host", "127.0.0.1:" + m.port, "-d", baseName, "-c", baseName, "--out", externalFile );
-assert.lt( 10 , fileSize() , "file size changed" );
+runMongoProgram("mongorestore", "--host", "127.0.0.1:" + m.port, "--dir", externalPath);
+assert.soon("c.findOne()", "mongodump then restore has no data w/sleep");
+assert(c.findOne(), "mongodump then restore has no data");
+assert.eq(1, c.findOne().a, "mongodump then restore has no broken data");
+
+resetDbpath(externalPath);
+
+assert.eq(-1, fileSize(), "mongoexport prep invalid");
+runMongoProgram("mongoexport",
+ "--host",
+ "127.0.0.1:" + m.port,
+ "-d",
+ baseName,
+ "-c",
+ baseName,
+ "--out",
+ externalFile);
+assert.lt(10, fileSize(), "file size changed");
c.drop();
-runMongoProgram( "mongoimport", "--host", "127.0.0.1:" + m.port, "-d", baseName, "-c", baseName, "--file", externalFile );
-assert.soon( "c.findOne()" , "mongo import json A" );
-assert( c.findOne() && 1 == c.findOne().a , "mongo import json B" );
+runMongoProgram("mongoimport",
+ "--host",
+ "127.0.0.1:" + m.port,
+ "-d",
+ baseName,
+ "-c",
+ baseName,
+ "--file",
+ externalFile);
+assert.soon("c.findOne()", "mongo import json A");
+assert(c.findOne() && 1 == c.findOne().a, "mongo import json B");
diff --git a/jstests/tool/tool_replset.js b/jstests/tool/tool_replset.js
index 90560c9ee2c..efe55b46605 100644
--- a/jstests/tool/tool_replset.js
+++ b/jstests/tool/tool_replset.js
@@ -17,7 +17,7 @@
(function() {
"use strict";
- var replTest = new ReplSetTest({ name: 'tool_replset', nodes: 2, oplogSize: 5 });
+ var replTest = new ReplSetTest({name: 'tool_replset', nodes: 2, oplogSize: 5});
var nodes = replTest.startSet();
var config = replTest.getReplSetConfig();
config.members[0].priority = 3;
@@ -26,12 +26,12 @@
var master = replTest.getPrimary();
assert.eq(nodes[0], master, "incorrect master elected");
for (var i = 0; i < 100; i++) {
- assert.writeOK(master.getDB("foo").bar.insert({ a: i }));
+ assert.writeOK(master.getDB("foo").bar.insert({a: i}));
}
replTest.awaitReplication();
- var replSetConnString = "tool_replset/127.0.0.1:" + replTest.ports[0] +
- ",127.0.0.1:" + replTest.ports[1];
+ var replSetConnString =
+ "tool_replset/127.0.0.1:" + replTest.ports[0] + ",127.0.0.1:" + replTest.ports[1];
// Test with mongodump/mongorestore
print("dump the db");
@@ -54,33 +54,44 @@
// Test with mongoexport/mongoimport
print("export the collection");
var extFile = MongoRunner.dataDir + "/tool_replset/export";
- runMongoProgram("mongoexport", "--host", replSetConnString, "--out", extFile,
- "-d", "foo", "-c", "bar");
+ runMongoProgram(
+ "mongoexport", "--host", replSetConnString, "--out", extFile, "-d", "foo", "-c", "bar");
print("collection successfully exported, dropping now");
master.getDB("foo").getCollection("bar").drop();
replTest.awaitReplication();
print("import the collection");
- runMongoProgram("mongoimport", "--host", replSetConnString, "--file", extFile,
- "-d", "foo", "-c", "bar");
+ runMongoProgram(
+ "mongoimport", "--host", replSetConnString, "--file", extFile, "-d", "foo", "-c", "bar");
var x = master.getDB("foo").getCollection("bar").count();
assert.eq(x, 100, "mongoimport should have successfully imported the collection");
- var doc = {_id: 5, x: 17};
- var oplogEntry = {ts: new Timestamp(), "op": "i", "ns": "foo.bar", "o": doc, "v": NumberInt(2)};
+ var doc = {
+ _id: 5,
+ x: 17
+ };
+ var oplogEntry = {
+ ts: new Timestamp(),
+ "op": "i",
+ "ns": "foo.bar",
+ "o": doc,
+ "v": NumberInt(2)
+ };
assert.writeOK(master.getDB("local").oplog.rs.insert(oplogEntry));
- assert.eq(100, master.getDB("foo").getCollection("bar").count(), "count before running " +
- "mongooplog was not 100 as expected");
+ assert.eq(100,
+ master.getDB("foo").getCollection("bar").count(),
+ "count before running " + "mongooplog was not 100 as expected");
- runMongoProgram("mongooplog" , "--from", "127.0.0.1:" + replTest.ports[0],
- "--host", replSetConnString);
+ runMongoProgram(
+ "mongooplog", "--from", "127.0.0.1:" + replTest.ports[0], "--host", replSetConnString);
print("finished running mongooplog to replay the oplog");
- assert.eq(101, master.getDB("foo").getCollection("bar").count(), "count after running " +
- "mongooplog was not 101 as expected");
+ assert.eq(101,
+ master.getDB("foo").getCollection("bar").count(),
+ "count after running " + "mongooplog was not 101 as expected");
print("all tests successful, stopping replica set");
diff --git a/jstests/tool/tsv1.js b/jstests/tool/tsv1.js
index 8395a77c711..62316401521 100644
--- a/jstests/tool/tsv1.js
+++ b/jstests/tool/tsv1.js
@@ -1,32 +1,55 @@
// tsv1.js
-t = new ToolTest( "tsv1" );
-
-c = t.startDB( "foo" );
-
-base = { a : "", b : 1 , c : "foobar" , d: 5, e: -6 };
-
-t.runTool( "import" , "--file" , "jstests/tool/data/a.tsv" , "-d" , t.baseName , "-c" , "foo" , "--type" , "tsv" , "-f" , "a,b,c,d,e" );
-assert.soon( "2 == c.count()" , "restore 2" );
-
-a = c.find().sort( { a : 1 } ).toArray();
+t = new ToolTest("tsv1");
+
+c = t.startDB("foo");
+
+base = {
+ a: "",
+ b: 1,
+ c: "foobar",
+ d: 5,
+ e: -6
+};
+
+t.runTool("import",
+ "--file",
+ "jstests/tool/data/a.tsv",
+ "-d",
+ t.baseName,
+ "-c",
+ "foo",
+ "--type",
+ "tsv",
+ "-f",
+ "a,b,c,d,e");
+assert.soon("2 == c.count()", "restore 2");
+
+a = c.find().sort({a: 1}).toArray();
delete a[0]._id;
delete a[1]._id;
-assert.docEq( { a : "a" , b : "b" , c : "c" , d: "d", e: "e"} , a[1] , "tsv parse 1" );
-assert.docEq( base , a[0] , "tsv parse 0" );
+assert.docEq({a: "a", b: "b", c: "c", d: "d", e: "e"}, a[1], "tsv parse 1");
+assert.docEq(base, a[0], "tsv parse 0");
c.drop();
-assert.eq( 0 , c.count() , "after drop 2" );
-
-t.runTool( "import" , "--file" , "jstests/tool/data/a.tsv" , "-d" , t.baseName , "-c" , "foo" , "--type" , "tsv" , "--headerline" );
-assert.soon( "c.findOne()" , "no data after sleep" );
-assert.eq( 1 , c.count() , "after restore 2" );
+assert.eq(0, c.count(), "after drop 2");
+
+t.runTool("import",
+ "--file",
+ "jstests/tool/data/a.tsv",
+ "-d",
+ t.baseName,
+ "-c",
+ "foo",
+ "--type",
+ "tsv",
+ "--headerline");
+assert.soon("c.findOne()", "no data after sleep");
+assert.eq(1, c.count(), "after restore 2");
x = c.findOne();
delete x._id;
-assert.docEq( base , x , "tsv parse 2" );
-
-
+assert.docEq(base, x, "tsv parse 2");
t.stop();