summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMark Benvenuto <mark.benvenuto@mongodb.com>2016-05-28 17:55:12 -0400
committerMark Benvenuto <mark.benvenuto@mongodb.com>2016-05-28 17:55:12 -0400
commit6dcdd23dd37ef12c87e71cf59ef01cd82432efe0 (patch)
treec8cfb5acb62c80f375bc37e7d4350382deea6a37
parentd4ac5673ea3f6cef4ce9dbcec90e31813997a528 (diff)
downloadmongo-6dcdd23dd37ef12c87e71cf59ef01cd82432efe0.tar.gz
SERVER-23971 Clang-Format code
-rw-r--r--jstests/aggregation/bugs/cond.js25
-rw-r--r--jstests/aggregation/bugs/firstlast.js16
-rw-r--r--jstests/aggregation/bugs/match.js14
-rw-r--r--jstests/aggregation/bugs/server10176.js43
-rw-r--r--jstests/aggregation/bugs/server11118.js58
-rw-r--r--jstests/aggregation/bugs/server11675.js78
-rw-r--r--jstests/aggregation/bugs/server12015.js5
-rw-r--r--jstests/aggregation/bugs/server14670.js4
-rw-r--r--jstests/aggregation/bugs/server17943.js46
-rw-r--r--jstests/aggregation/bugs/server18198.js8
-rw-r--r--jstests/aggregation/bugs/server18222.js7
-rw-r--r--jstests/aggregation/bugs/server19095.js174
-rw-r--r--jstests/aggregation/bugs/server20163.js109
-rw-r--r--jstests/aggregation/bugs/server20169.js16
-rw-r--r--jstests/aggregation/bugs/server21632.js5
-rw-r--r--jstests/aggregation/bugs/server22093.js5
-rw-r--r--jstests/aggregation/bugs/server22580.js12
-rw-r--r--jstests/aggregation/bugs/server3253.js16
-rw-r--r--jstests/aggregation/bugs/server4588.js16
-rw-r--r--jstests/aggregation/bugs/server5044.js12
-rw-r--r--jstests/aggregation/bugs/server6121.js30
-rw-r--r--jstests/aggregation/bugs/server6125.js4
-rw-r--r--jstests/aggregation/bugs/server6131.js36
-rw-r--r--jstests/aggregation/bugs/server6179.js8
-rw-r--r--jstests/aggregation/bugs/server6189.js86
-rw-r--r--jstests/aggregation/bugs/server6190.js5
-rw-r--r--jstests/aggregation/bugs/server6195.js2
-rw-r--r--jstests/aggregation/bugs/server6529.js14
-rw-r--r--jstests/aggregation/bugs/server6531.js8
-rw-r--r--jstests/aggregation/bugs/server6556.js12
-rw-r--r--jstests/aggregation/bugs/server7781.js12
-rw-r--r--jstests/aggregation/bugs/server9840.js109
-rw-r--r--jstests/aggregation/bugs/server9841.js25
-rw-r--r--jstests/aggregation/bugs/substr.js26
-rw-r--r--jstests/aggregation/bugs/upperlower.js5
-rw-r--r--jstests/aggregation/expressions/expression_mod.js79
-rw-r--r--jstests/aggregation/expressions/in.js46
-rw-r--r--jstests/aggregation/expressions/indexof_array.js16
-rw-r--r--jstests/aggregation/expressions/indexof_bytes.js20
-rw-r--r--jstests/aggregation/expressions/indexof_codepoints.js20
-rw-r--r--jstests/aggregation/expressions/reduce.js90
-rw-r--r--jstests/aggregation/expressions/split.js24
-rw-r--r--jstests/aggregation/expressions/switch.js6
-rw-r--r--jstests/aggregation/expressions/switch_errors.js28
-rw-r--r--jstests/aggregation/extras/mrabench.js11
-rw-r--r--jstests/aggregation/extras/testutils.js8
-rw-r--r--jstests/aggregation/extras/utils.js8
-rw-r--r--jstests/aggregation/mongos_slaveok.js5
-rw-r--r--jstests/aggregation/sources/graphLookup/airports.js78
-rw-r--r--jstests/aggregation/sources/graphLookup/basic.js148
-rw-r--r--jstests/aggregation/sources/graphLookup/error.js40
-rw-r--r--jstests/aggregation/sources/graphLookup/nested_objects.js60
-rw-r--r--jstests/aggregation/sources/graphLookup/sharded.js3
-rw-r--r--jstests/aggregation/sources/graphLookup/socialite.js26
-rw-r--r--jstests/aggregation/testSlave.js4
-rw-r--r--jstests/aggregation/testall.js31
-rw-r--r--jstests/aggregation/testshard1.js83
-rw-r--r--jstests/aggregation/unwind.js7
-rw-r--r--jstests/auth/access_control_with_unreachable_configs.js5
-rw-r--r--jstests/auth/arbiter.js13
-rw-r--r--jstests/auth/auth_options.js4
-rw-r--r--jstests/auth/auth_schema_upgrade.js5
-rw-r--r--jstests/auth/authz_modifications_access_control.js10
-rw-r--r--jstests/auth/basic_role_auth.js236
-rw-r--r--jstests/auth/clac_system_colls.js5
-rw-r--r--jstests/auth/commands_builtin_roles.js10
-rw-r--r--jstests/auth/commands_user_defined_roles.js15
-rw-r--r--jstests/auth/copyauth.js17
-rw-r--r--jstests/auth/copyauth_between_shards.js6
-rw-r--r--jstests/auth/indexSystemUsers.js20
-rw-r--r--jstests/auth/js_scope_leak.js3
-rw-r--r--jstests/auth/lib/commands_lib.js114
-rw-r--r--jstests/auth/localhostAuthBypass.js12
-rw-r--r--jstests/auth/log_user_basic.js11
-rw-r--r--jstests/auth/pseudo_commands.js15
-rw-r--r--jstests/auth/renameSystemCollections.js7
-rw-r--r--jstests/auth/repl.js21
-rw-r--r--jstests/auth/repl_auth.js8
-rw-r--r--jstests/auth/resource_pattern_matching.js57
-rw-r--r--jstests/auth/role_management_commands.js22
-rw-r--r--jstests/auth/role_management_commands_edge_cases.js10
-rw-r--r--jstests/auth/server-4892.js68
-rw-r--r--jstests/auth/show_log_auth.js16
-rw-r--r--jstests/auth/upgrade_noauth_to_keyfile.js12
-rw-r--r--jstests/auth/upgrade_noauth_to_keyfile_with_sharding.js9
-rw-r--r--jstests/auth/user_defined_roles.js27
-rw-r--r--jstests/auth/user_defined_roles_on_secondaries.js13
-rw-r--r--jstests/auth/user_management_commands.js27
-rw-r--r--jstests/concurrency/fsm_background_workloads/background_base.js5
-rw-r--r--jstests/concurrency/fsm_example_inheritance.js29
-rw-r--r--jstests/concurrency/fsm_libs/cluster.js11
-rw-r--r--jstests/concurrency/fsm_libs/composer.js15
-rw-r--r--jstests/concurrency/fsm_libs/fsm.js11
-rw-r--r--jstests/concurrency/fsm_libs/parse_config.js60
-rw-r--r--jstests/concurrency/fsm_libs/runner.js28
-rw-r--r--jstests/concurrency/fsm_libs/thread_mgr.js20
-rw-r--r--jstests/concurrency/fsm_libs/worker_thread.js37
-rw-r--r--jstests/concurrency/fsm_selftests.js6
-rw-r--r--jstests/concurrency/fsm_workload_modifiers/make_capped.js9
-rw-r--r--jstests/concurrency/fsm_workloads/agg_base.js17
-rw-r--r--jstests/concurrency/fsm_workloads/agg_graph_lookup.js39
-rw-r--r--jstests/concurrency/fsm_workloads/agg_group_external.js78
-rw-r--r--jstests/concurrency/fsm_workloads/agg_match.js44
-rw-r--r--jstests/concurrency/fsm_workloads/agg_sort.js41
-rw-r--r--jstests/concurrency/fsm_workloads/agg_sort_external.js70
-rw-r--r--jstests/concurrency/fsm_workloads/auth_create_role.js10
-rw-r--r--jstests/concurrency/fsm_workloads/auth_create_user.js10
-rw-r--r--jstests/concurrency/fsm_workloads/auth_drop_role.js18
-rw-r--r--jstests/concurrency/fsm_workloads/auth_drop_user.js18
-rw-r--r--jstests/concurrency/fsm_workloads/collmod.js8
-rw-r--r--jstests/concurrency/fsm_workloads/collmod_separate_collections.js44
-rw-r--r--jstests/concurrency/fsm_workloads/compact.js7
-rw-r--r--jstests/concurrency/fsm_workloads/compact_simultaneous_padding_bytes.js38
-rw-r--r--jstests/concurrency/fsm_workloads/convert_to_capped_collection.js14
-rw-r--r--jstests/concurrency/fsm_workloads/convert_to_capped_collection_index.js18
-rw-r--r--jstests/concurrency/fsm_workloads/count.js18
-rw-r--r--jstests/concurrency/fsm_workloads/count_indexed.js43
-rw-r--r--jstests/concurrency/fsm_workloads/count_limit_skip.js72
-rw-r--r--jstests/concurrency/fsm_workloads/create_capped_collection.js23
-rw-r--r--jstests/concurrency/fsm_workloads/create_capped_collection_maxdocs.js80
-rw-r--r--jstests/concurrency/fsm_workloads/create_collection.js10
-rw-r--r--jstests/concurrency/fsm_workloads/create_index_background.js10
-rw-r--r--jstests/concurrency/fsm_workloads/distinct.js16
-rw-r--r--jstests/concurrency/fsm_workloads/distinct_noindex.js18
-rw-r--r--jstests/concurrency/fsm_workloads/distinct_projection.js21
-rw-r--r--jstests/concurrency/fsm_workloads/drop_collection.js18
-rw-r--r--jstests/concurrency/fsm_workloads/drop_database.js5
-rw-r--r--jstests/concurrency/fsm_workloads/explain.js6
-rw-r--r--jstests/concurrency/fsm_workloads/explain_aggregate.js70
-rw-r--r--jstests/concurrency/fsm_workloads/explain_count.js97
-rw-r--r--jstests/concurrency/fsm_workloads/explain_distinct.js42
-rw-r--r--jstests/concurrency/fsm_workloads/explain_find.js105
-rw-r--r--jstests/concurrency/fsm_workloads/explain_group.js30
-rw-r--r--jstests/concurrency/fsm_workloads/explain_remove.js65
-rw-r--r--jstests/concurrency/fsm_workloads/explain_update.js113
-rw-r--r--jstests/concurrency/fsm_workloads/findAndModify_inc.js10
-rw-r--r--jstests/concurrency/fsm_workloads/findAndModify_mixed_queue_unindexed.js122
-rw-r--r--jstests/concurrency/fsm_workloads/findAndModify_remove.js22
-rw-r--r--jstests/concurrency/fsm_workloads/findAndModify_remove_queue.js30
-rw-r--r--jstests/concurrency/fsm_workloads/findAndModify_remove_queue_unindexed.js19
-rw-r--r--jstests/concurrency/fsm_workloads/findAndModify_update.js12
-rw-r--r--jstests/concurrency/fsm_workloads/findAndModify_update_collscan.js19
-rw-r--r--jstests/concurrency/fsm_workloads/findAndModify_update_grow.js19
-rw-r--r--jstests/concurrency/fsm_workloads/findAndModify_update_queue.js95
-rw-r--r--jstests/concurrency/fsm_workloads/findAndModify_update_queue_unindexed.js19
-rw-r--r--jstests/concurrency/fsm_workloads/findAndModify_upsert.js41
-rw-r--r--jstests/concurrency/fsm_workloads/findAndModify_upsert_collscan.js11
-rw-r--r--jstests/concurrency/fsm_workloads/group.js12
-rw-r--r--jstests/concurrency/fsm_workloads/group_cond.js42
-rw-r--r--jstests/concurrency/fsm_workloads/indexed_insert_1char.js21
-rw-r--r--jstests/concurrency/fsm_workloads/indexed_insert_2d.js74
-rw-r--r--jstests/concurrency/fsm_workloads/indexed_insert_2dsphere.js19
-rw-r--r--jstests/concurrency/fsm_workloads/indexed_insert_base.js6
-rw-r--r--jstests/concurrency/fsm_workloads/indexed_insert_compound.js44
-rw-r--r--jstests/concurrency/fsm_workloads/indexed_insert_eval.js38
-rw-r--r--jstests/concurrency/fsm_workloads/indexed_insert_eval_nolock.js9
-rw-r--r--jstests/concurrency/fsm_workloads/indexed_insert_heterogeneous.js89
-rw-r--r--jstests/concurrency/fsm_workloads/indexed_insert_large.js47
-rw-r--r--jstests/concurrency/fsm_workloads/indexed_insert_long_fieldname.js20
-rw-r--r--jstests/concurrency/fsm_workloads/indexed_insert_multikey.js26
-rw-r--r--jstests/concurrency/fsm_workloads/indexed_insert_ordered_bulk.js40
-rw-r--r--jstests/concurrency/fsm_workloads/indexed_insert_text.js5
-rw-r--r--jstests/concurrency/fsm_workloads/indexed_insert_text_multikey.js41
-rw-r--r--jstests/concurrency/fsm_workloads/indexed_insert_ttl.js5
-rw-r--r--jstests/concurrency/fsm_workloads/indexed_insert_unordered_bulk.js40
-rw-r--r--jstests/concurrency/fsm_workloads/indexed_insert_upsert.js46
-rw-r--r--jstests/concurrency/fsm_workloads/indexed_insert_where.js9
-rw-r--r--jstests/concurrency/fsm_workloads/list_indexes.js5
-rw-r--r--jstests/concurrency/fsm_workloads/map_reduce_drop.js6
-rw-r--r--jstests/concurrency/fsm_workloads/map_reduce_inline.js22
-rw-r--r--jstests/concurrency/fsm_workloads/map_reduce_merge.js67
-rw-r--r--jstests/concurrency/fsm_workloads/map_reduce_merge_nonatomic.js82
-rw-r--r--jstests/concurrency/fsm_workloads/map_reduce_reduce.js75
-rw-r--r--jstests/concurrency/fsm_workloads/map_reduce_reduce_nonatomic.js57
-rw-r--r--jstests/concurrency/fsm_workloads/map_reduce_replace.js82
-rw-r--r--jstests/concurrency/fsm_workloads/map_reduce_replace_nonexistent.js64
-rw-r--r--jstests/concurrency/fsm_workloads/plan_cache_drop_database.js15
-rw-r--r--jstests/concurrency/fsm_workloads/reindex.js11
-rw-r--r--jstests/concurrency/fsm_workloads/reindex_background.js30
-rw-r--r--jstests/concurrency/fsm_workloads/remove_and_bulk_insert.js5
-rw-r--r--jstests/concurrency/fsm_workloads/remove_multiple_documents.js13
-rw-r--r--jstests/concurrency/fsm_workloads/remove_single_document.js4
-rw-r--r--jstests/concurrency/fsm_workloads/remove_single_document_eval.js46
-rw-r--r--jstests/concurrency/fsm_workloads/remove_single_document_eval_nolock.js9
-rw-r--r--jstests/concurrency/fsm_workloads/remove_where.js55
-rw-r--r--jstests/concurrency/fsm_workloads/rename_capped_collection_chain.js15
-rw-r--r--jstests/concurrency/fsm_workloads/rename_capped_collection_dbname_chain.js15
-rw-r--r--jstests/concurrency/fsm_workloads/rename_capped_collection_dbname_droptarget.js15
-rw-r--r--jstests/concurrency/fsm_workloads/rename_capped_collection_droptarget.js15
-rw-r--r--jstests/concurrency/fsm_workloads/rename_collection_chain.js10
-rw-r--r--jstests/concurrency/fsm_workloads/rename_collection_dbname_chain.js10
-rw-r--r--jstests/concurrency/fsm_workloads/rename_collection_dbname_droptarget.js10
-rw-r--r--jstests/concurrency/fsm_workloads/rename_collection_droptarget.js10
-rw-r--r--jstests/concurrency/fsm_workloads/server_status.js14
-rw-r--r--jstests/concurrency/fsm_workloads/touch_base.js88
-rw-r--r--jstests/concurrency/fsm_workloads/touch_data.js18
-rw-r--r--jstests/concurrency/fsm_workloads/touch_index.js18
-rw-r--r--jstests/concurrency/fsm_workloads/touch_no_data_no_index.js28
-rw-r--r--jstests/concurrency/fsm_workloads/update_and_bulk_insert.js5
-rw-r--r--jstests/concurrency/fsm_workloads/update_array.js5
-rw-r--r--jstests/concurrency/fsm_workloads/update_check_index.js8
-rw-r--r--jstests/concurrency/fsm_workloads/update_inc.js14
-rw-r--r--jstests/concurrency/fsm_workloads/update_multifield.js16
-rw-r--r--jstests/concurrency/fsm_workloads/update_multifield_isolated_multiupdate.js48
-rw-r--r--jstests/concurrency/fsm_workloads/update_multifield_multiupdate.js72
-rw-r--r--jstests/concurrency/fsm_workloads/update_ordered_bulk_inc.js10
-rw-r--r--jstests/concurrency/fsm_workloads/update_rename.js12
-rw-r--r--jstests/concurrency/fsm_workloads/update_replace.js4
-rw-r--r--jstests/concurrency/fsm_workloads/update_simple.js13
-rw-r--r--jstests/concurrency/fsm_workloads/update_simple_eval.js34
-rw-r--r--jstests/concurrency/fsm_workloads/update_simple_eval_nolock.js9
-rw-r--r--jstests/concurrency/fsm_workloads/update_upsert_multi.js11
-rw-r--r--jstests/concurrency/fsm_workloads/update_where.js75
-rw-r--r--jstests/concurrency/fsm_workloads/upsert_where.js59
-rw-r--r--jstests/concurrency/fsm_workloads/yield.js4
-rw-r--r--jstests/concurrency/fsm_workloads/yield_and_hashed.js106
-rw-r--r--jstests/concurrency/fsm_workloads/yield_and_sorted.js102
-rw-r--r--jstests/concurrency/fsm_workloads/yield_fetch.js30
-rw-r--r--jstests/concurrency/fsm_workloads/yield_geo_near.js123
-rw-r--r--jstests/concurrency/fsm_workloads/yield_geo_near_dedup.js146
-rw-r--r--jstests/concurrency/fsm_workloads/yield_id_hack.js41
-rw-r--r--jstests/concurrency/fsm_workloads/yield_rooted_or.js72
-rw-r--r--jstests/concurrency/fsm_workloads/yield_sort.js54
-rw-r--r--jstests/concurrency/fsm_workloads/yield_sort_merge.js75
-rw-r--r--jstests/concurrency/fsm_workloads/yield_text.js73
-rw-r--r--jstests/core/apitest_db.js10
-rw-r--r--jstests/core/apitest_dbcollection.js4
-rw-r--r--jstests/core/apply_ops1.js15
-rw-r--r--jstests/core/array4.js4
-rw-r--r--jstests/core/array_match4.js4
-rw-r--r--jstests/core/arrayfind7.js7
-rw-r--r--jstests/core/arrayfind8.js12
-rw-r--r--jstests/core/basic3.js5
-rw-r--r--jstests/core/basic9.js5
-rw-r--r--jstests/core/batch_size.js7
-rw-r--r--jstests/core/big_object1.js5
-rw-r--r--jstests/core/bittest.js15
-rw-r--r--jstests/core/capped6.js4
-rw-r--r--jstests/core/collation_shell_helpers.js387
-rw-r--r--jstests/core/commands_that_do_not_write_do_not_accept_wc.js4
-rw-r--r--jstests/core/connection_status.js10
-rw-r--r--jstests/core/constructors.js41
-rw-r--r--jstests/core/convert_to_capped_nonexistant.js10
-rw-r--r--jstests/core/count11.js8
-rw-r--r--jstests/core/create_indexes.js8
-rw-r--r--jstests/core/crud_api.js4
-rw-r--r--jstests/core/currentop_predicate.js22
-rw-r--r--jstests/core/cursor5.js12
-rw-r--r--jstests/core/cursor6.js9
-rw-r--r--jstests/core/cursor7.js60
-rw-r--r--jstests/core/cursora.js10
-rw-r--r--jstests/core/datasize2.js8
-rw-r--r--jstests/core/date2.js4
-rw-r--r--jstests/core/depth_limit.js4
-rw-r--r--jstests/core/distinct_index1.js5
-rw-r--r--jstests/core/doc_validation_invalid_validators.js4
-rw-r--r--jstests/core/dropdb_race.js2
-rw-r--r--jstests/core/elemMatchProjection.js23
-rw-r--r--jstests/core/eval0.js10
-rw-r--r--jstests/core/eval_mr.js19
-rw-r--r--jstests/core/evalg.js13
-rw-r--r--jstests/core/exists4.js16
-rw-r--r--jstests/core/existsa.js8
-rw-r--r--jstests/core/explain_distinct.js5
-rw-r--r--jstests/core/explain_find.js4
-rw-r--r--jstests/core/explain_find_and_modify.js65
-rw-r--r--jstests/core/explain_multi_plan.js13
-rw-r--r--jstests/core/find4.js8
-rw-r--r--jstests/core/find_and_modify.js12
-rw-r--r--jstests/core/find_and_modify_server6865.js286
-rw-r--r--jstests/core/find_dedup.js17
-rw-r--r--jstests/core/find_getmore_bsonsize.js5
-rw-r--r--jstests/core/fts_blog.js5
-rw-r--r--jstests/core/fts_blogwild.js15
-rw-r--r--jstests/core/fts_diacritic_and_casesensitive.js7
-rw-r--r--jstests/core/fts_diacriticsensitive.js6
-rw-r--r--jstests/core/fts_index.js52
-rw-r--r--jstests/core/fts_phrase.js10
-rw-r--r--jstests/core/fts_projection.js23
-rw-r--r--jstests/core/geo10.js14
-rw-r--r--jstests/core/geo3.js4
-rw-r--r--jstests/core/geo9.js4
-rw-r--r--jstests/core/geo_2d_with_geojson_point.js5
-rw-r--r--jstests/core/geo_array2.js12
-rw-r--r--jstests/core/geo_big_polygon.js32
-rw-r--r--jstests/core/geo_big_polygon2.js31
-rw-r--r--jstests/core/geo_big_polygon3.js34
-rw-r--r--jstests/core/geo_borders.js134
-rw-r--r--jstests/core/geo_box1.js16
-rw-r--r--jstests/core/geo_box1_noindex.js16
-rw-r--r--jstests/core/geo_center_sphere1.js9
-rw-r--r--jstests/core/geo_center_sphere2.js12
-rw-r--r--jstests/core/geo_circle1.js16
-rw-r--r--jstests/core/geo_circle1_noindex.js16
-rw-r--r--jstests/core/geo_distinct.js5
-rw-r--r--jstests/core/geo_fiddly_box.js5
-rw-r--r--jstests/core/geo_group.js12
-rw-r--r--jstests/core/geo_haystack1.js9
-rw-r--r--jstests/core/geo_haystack2.js9
-rw-r--r--jstests/core/geo_invalid_polygon.js5
-rw-r--r--jstests/core/geo_mapreduce.js4
-rw-r--r--jstests/core/geo_mapreduce2.js16
-rw-r--r--jstests/core/geo_mindistance.js31
-rw-r--r--jstests/core/geo_mindistance_boundaries.js44
-rw-r--r--jstests/core/geo_operator_crs.js10
-rw-r--r--jstests/core/geo_or.js92
-rw-r--r--jstests/core/geo_poly_edge.js6
-rw-r--r--jstests/core/geo_polygon1.js16
-rw-r--r--jstests/core/geo_polygon1_noindex.js16
-rw-r--r--jstests/core/geo_polygon3.js5
-rw-r--r--jstests/core/geo_s2cursorlimitskip.js23
-rw-r--r--jstests/core/geo_s2descindex.js16
-rw-r--r--jstests/core/geo_s2disjoint_holes.js20
-rw-r--r--jstests/core/geo_s2dupe_points.js5
-rw-r--r--jstests/core/geo_s2explain.js8
-rw-r--r--jstests/core/geo_s2holesameasshell.js15
-rw-r--r--jstests/core/geo_s2index.js10
-rw-r--r--jstests/core/geo_s2indexversion1.js19
-rw-r--r--jstests/core/geo_s2intersection.js23
-rw-r--r--jstests/core/geo_s2multi.js40
-rw-r--r--jstests/core/geo_s2near.js4
-rw-r--r--jstests/core/geo_s2nearComplex.js14
-rw-r--r--jstests/core/geo_s2near_equator_opposite.js14
-rw-r--r--jstests/core/geo_s2nongeoarray.js6
-rw-r--r--jstests/core/geo_s2nonstring.js18
-rw-r--r--jstests/core/geo_s2oddshapes.js10
-rw-r--r--jstests/core/geo_s2polywithholes.js15
-rw-r--r--jstests/core/geo_s2sparse.js12
-rw-r--r--jstests/core/geo_s2twofields.js46
-rw-r--r--jstests/core/geo_uniqueDocs.js12
-rw-r--r--jstests/core/geo_uniqueDocs2.js23
-rw-r--r--jstests/core/geo_update_btree.js17
-rw-r--r--jstests/core/geo_update_dedup.js4
-rw-r--r--jstests/core/geo_withinquery.js13
-rw-r--r--jstests/core/geob.js16
-rw-r--r--jstests/core/geof.js10
-rw-r--r--jstests/core/geonear_cmd_input_validation.js20
-rw-r--r--jstests/core/getlog2.js45
-rw-r--r--jstests/core/group1.js25
-rw-r--r--jstests/core/group2.js4
-rw-r--r--jstests/core/grow_hash_table.js8
-rw-r--r--jstests/core/hashindex1.js17
-rw-r--r--jstests/core/hint1.js17
-rw-r--r--jstests/core/idhack.js13
-rw-r--r--jstests/core/in5.js18
-rw-r--r--jstests/core/index_arr2.js9
-rw-r--r--jstests/core/index_check3.js4
-rw-r--r--jstests/core/index_check6.js14
-rw-r--r--jstests/core/index_create_too_many.js5
-rw-r--r--jstests/core/index_create_with_nul_in_name.js6
-rw-r--r--jstests/core/index_diag.js5
-rw-r--r--jstests/core/index_filter_commands.js54
-rw-r--r--jstests/core/index_many.js8
-rw-r--r--jstests/core/index_partial_create_drop.js10
-rw-r--r--jstests/core/index_stats.js4
-rw-r--r--jstests/core/indexu.js12
-rw-r--r--jstests/core/insert1.js4
-rw-r--r--jstests/core/js1.js22
-rw-r--r--jstests/core/js2.js10
-rw-r--r--jstests/core/js3.js32
-rw-r--r--jstests/core/js7.js7
-rw-r--r--jstests/core/js8.js24
-rw-r--r--jstests/core/js9.js15
-rw-r--r--jstests/core/list_collections1.js120
-rw-r--r--jstests/core/list_collections_filter.js22
-rw-r--r--jstests/core/long_index_rename.js10
-rw-r--r--jstests/core/max_doc_size.js7
-rw-r--r--jstests/core/mr1.js32
-rw-r--r--jstests/core/mr2.js5
-rw-r--r--jstests/core/mr3.js4
-rw-r--r--jstests/core/mr4.js4
-rw-r--r--jstests/core/mr5.js11
-rw-r--r--jstests/core/mr_bigobject.js5
-rw-r--r--jstests/core/mr_bigobject_replace.js18
-rw-r--r--jstests/core/mr_index3.js44
-rw-r--r--jstests/core/mr_killop.js7
-rw-r--r--jstests/core/mr_mutable_properties.js28
-rw-r--r--jstests/core/mr_stored.js5
-rw-r--r--jstests/core/nestedarr1.js8
-rw-r--r--jstests/core/nestedobj1.js8
-rw-r--r--jstests/core/nin.js8
-rw-r--r--jstests/core/not3.js8
-rw-r--r--jstests/core/or1.js17
-rw-r--r--jstests/core/or_inexact.js4
-rw-r--r--jstests/core/orc.js26
-rw-r--r--jstests/core/ork.js26
-rw-r--r--jstests/core/plan_cache_list_plans.js11
-rw-r--r--jstests/core/plan_cache_list_shapes.js5
-rw-r--r--jstests/core/plan_cache_shell_helpers.js39
-rw-r--r--jstests/core/pop_server_13516.js5
-rw-r--r--jstests/core/profile1.js8
-rw-r--r--jstests/core/profile_count.js8
-rw-r--r--jstests/core/profile_insert.js8
-rw-r--r--jstests/core/push_sort.js5
-rw-r--r--jstests/core/ref.js5
-rw-r--r--jstests/core/ref3.js5
-rw-r--r--jstests/core/ref4.js5
-rw-r--r--jstests/core/regex2.js5
-rw-r--r--jstests/core/regex3.js5
-rw-r--r--jstests/core/regex4.js5
-rw-r--r--jstests/core/regex6.js48
-rw-r--r--jstests/core/remove7.js4
-rw-r--r--jstests/core/rename4.js5
-rw-r--r--jstests/core/repair_database.js5
-rw-r--r--jstests/core/return_key.js11
-rw-r--r--jstests/core/role_management_helpers.js20
-rw-r--r--jstests/core/set_param1.js10
-rw-r--r--jstests/core/sort3.js21
-rw-r--r--jstests/core/sort5.js32
-rw-r--r--jstests/core/sortk.js17
-rw-r--r--jstests/core/splitvector.js14
-rw-r--r--jstests/core/stages_delete.js4
-rw-r--r--jstests/core/stages_sort.js4
-rw-r--r--jstests/core/system_profile.js27
-rw-r--r--jstests/core/update_find_and_modify_id.js5
-rw-r--r--jstests/core/update_min_max_examples.js5
-rw-r--r--jstests/core/update_server-12848.js10
-rw-r--r--jstests/core/upsert_fields.js20
-rw-r--r--jstests/core/validate_user_documents.js3
-rw-r--r--jstests/core/where1.js4
-rw-r--r--jstests/core/where3.js4
-rw-r--r--jstests/core/where4.js17
-rw-r--r--jstests/decimal/decimal_find_query.js9
-rw-r--r--jstests/disk/datafile_options.js4
-rw-r--r--jstests/disk/dbNoCreate.js5
-rw-r--r--jstests/disk/index_options.js4
-rw-r--r--jstests/dur/journaling_options.js16
-rw-r--r--jstests/gle/gle_explicit_optime.js13
-rw-r--r--jstests/gle/opcounters_legacy.js5
-rw-r--r--jstests/httpinterface/network_options.js16
-rw-r--r--jstests/libs/chunk_manipulation_util.js4
-rw-r--r--jstests/libs/cleanup_orphaned_util.js8
-rw-r--r--jstests/libs/csrs_upgrade_util.js8
-rw-r--r--jstests/libs/election_timing_test.js5
-rw-r--r--jstests/libs/fts.js12
-rw-r--r--jstests/libs/geo_near_random.js11
-rw-r--r--jstests/libs/override_methods/implicitly_shard_accessed_collections.js6
-rw-r--r--jstests/libs/override_methods/set_majority_read_and_write_concerns.js18
-rw-r--r--jstests/libs/override_methods/sharding_continuous_config_stepdown.js22
-rw-r--r--jstests/libs/test_background_ops.js4
-rw-r--r--jstests/libs/trace_missing_docs.js12
-rw-r--r--jstests/mmap_v1/capped2.js4
-rw-r--r--jstests/mmap_v1/capped8.js5
-rw-r--r--jstests/mmap_v1/collmod.js5
-rw-r--r--jstests/mmap_v1/datasize.js26
-rw-r--r--jstests/mmap_v1/datasize3.js4
-rw-r--r--jstests/mmap_v1/update.js6
-rw-r--r--jstests/mmap_v1/use_power_of_2.js10
-rw-r--r--jstests/multiVersion/geo_2dsphere_v2_to_v3.js8
-rw-r--r--jstests/multiVersion/initialsync.js5
-rw-r--r--jstests/multiVersion/invalid_key_pattern_upgrade.js11
-rw-r--r--jstests/multiVersion/libs/data_generators.js8
-rw-r--r--jstests/multiVersion/libs/dumprestore_helpers.js52
-rw-r--r--jstests/multiVersion/minor_version_tags_new_old_new.js48
-rw-r--r--jstests/multiVersion/minor_version_tags_old_new_old.js48
-rw-r--r--jstests/multiVersion/mixed_storage_version_replication.js17
-rw-r--r--jstests/multiVersion/partial_index_upgrade.js5
-rw-r--r--jstests/multiVersion/transitioning_to_and_from_WT.js9
-rw-r--r--jstests/multiVersion/upgrade_cluster.js4
-rw-r--r--jstests/multiVersion/wt_index_option_defaults_replset.js8
-rw-r--r--jstests/noPassthrough/backup_restore.js15
-rw-r--r--jstests/noPassthrough/commands_handle_kill.js8
-rw-r--r--jstests/noPassthrough/count_helper_read_preference.js5
-rw-r--r--jstests/noPassthrough/currentop_query.js6
-rw-r--r--jstests/noPassthrough/cursor_timeout.js7
-rw-r--r--jstests/noPassthrough/exit_logging.js51
-rw-r--r--jstests/noPassthrough/ftdc_setparam.js4
-rw-r--r--jstests/noPassthrough/geo_full.js139
-rw-r--r--jstests/noPassthrough/geo_mnypts_plus_fields.js8
-rw-r--r--jstests/noPassthrough/initial_sync_cloner_dups.js7
-rw-r--r--jstests/noPassthrough/javascript_options.js4
-rw-r--r--jstests/noPassthrough/js_protection.js17
-rw-r--r--jstests/noPassthrough/lock_stats.js8
-rw-r--r--jstests/noPassthrough/logging_options.js20
-rw-r--r--jstests/noPassthrough/minvalid2.js5
-rw-r--r--jstests/noPassthrough/parameters.js12
-rw-r--r--jstests/noPassthrough/profile_options.js12
-rw-r--r--jstests/noPassthrough/read_committed_lookup.js8
-rw-r--r--jstests/noPassthrough/read_majority.js4
-rw-r--r--jstests/noPassthrough/read_majority_reads.js24
-rw-r--r--jstests/noPassthrough/sync_write.js6
-rw-r--r--jstests/noPassthrough/update_server-5552.js19
-rw-r--r--jstests/noPassthrough/write_local.js4
-rw-r--r--jstests/noPassthrough/wt_index_option_defaults.js17
-rw-r--r--jstests/noPassthrough/wt_nojournal_repl.js7
-rw-r--r--jstests/noPassthroughWithMongod/apply_ops_errors.js7
-rw-r--r--jstests/noPassthroughWithMongod/bench_test_crud_commands.js15
-rw-r--r--jstests/noPassthroughWithMongod/clonecollection.js10
-rw-r--r--jstests/noPassthroughWithMongod/commands_that_write_accept_wc_standalone.js9
-rw-r--r--jstests/noPassthroughWithMongod/create_indexes_shell_helper.js8
-rw-r--r--jstests/noPassthroughWithMongod/external_sort_text_agg.js13
-rw-r--r--jstests/noPassthroughWithMongod/ftdc_params.js4
-rw-r--r--jstests/noPassthroughWithMongod/geo_axis_aligned.js23
-rw-r--r--jstests/noPassthroughWithMongod/geo_polygon.js19
-rw-r--r--jstests/noPassthroughWithMongod/index_check10.js4
-rw-r--r--jstests/noPassthroughWithMongod/index_check9.js4
-rw-r--r--jstests/noPassthroughWithMongod/index_multi.js8
-rw-r--r--jstests/noPassthroughWithMongod/indexbg_drop.js5
-rw-r--r--jstests/noPassthroughWithMongod/indexbg_updates.js9
-rw-r--r--jstests/noPassthroughWithMongod/insertMulti.js4
-rw-r--r--jstests/noPassthroughWithMongod/mr_writeconflict.js6
-rw-r--r--jstests/noPassthroughWithMongod/replReads.js5
-rw-r--r--jstests/noPassthroughWithMongod/rpc_protocols.js52
-rw-r--r--jstests/noPassthroughWithMongod/temp_namespace.js8
-rw-r--r--jstests/parallel/checkMultiThread.js10
-rw-r--r--jstests/perf/v8_mapreduce.js5
-rw-r--r--jstests/readonly/geo.js5
-rw-r--r--jstests/readonly/lib/read_only_test.js19
-rw-r--r--jstests/repl/basic1.js15
-rw-r--r--jstests/repl/batch_write_command_wc_repl.js24
-rw-r--r--jstests/repl/repl13.js5
-rw-r--r--jstests/repl/repl14.js12
-rw-r--r--jstests/replsets/apply_batch_only_goes_forward.js13
-rw-r--r--jstests/replsets/apply_ops_lastop.js9
-rw-r--r--jstests/replsets/apply_ops_wc.js5
-rw-r--r--jstests/replsets/auth1.js16
-rw-r--r--jstests/replsets/auth2.js4
-rw-r--r--jstests/replsets/auth3.js16
-rw-r--r--jstests/replsets/batch_write_command_wc.js36
-rw-r--r--jstests/replsets/capped_id.js9
-rw-r--r--jstests/replsets/chaining_removal.js4
-rw-r--r--jstests/replsets/commands_that_write_accept_wc.js13
-rw-r--r--jstests/replsets/config_server_checks.js7
-rw-r--r--jstests/replsets/disallow_adding_initialized_node1.js14
-rw-r--r--jstests/replsets/disallow_adding_initialized_node2.js16
-rw-r--r--jstests/replsets/initial_sync1.js5
-rw-r--r--jstests/replsets/initial_sync_update_missing_doc2.js5
-rw-r--r--jstests/replsets/localhostAuthBypass.js12
-rw-r--r--jstests/replsets/oplog_format.js4
-rw-r--r--jstests/replsets/optime.js4
-rw-r--r--jstests/replsets/pipelineout.js8
-rw-r--r--jstests/replsets/priority_takeover_one_node_higher_priority.js10
-rw-r--r--jstests/replsets/priority_takeover_two_nodes_equal_priority.js10
-rw-r--r--jstests/replsets/read_committed.js17
-rw-r--r--jstests/replsets/read_committed_with_catalog_changes.js4
-rw-r--r--jstests/replsets/reconfig.js6
-rw-r--r--jstests/replsets/reconfig_tags.js10
-rw-r--r--jstests/replsets/remove1.js5
-rw-r--r--jstests/replsets/repl_options.js4
-rw-r--r--jstests/replsets/replset2.js4
-rw-r--r--jstests/replsets/replset5.js5
-rw-r--r--jstests/replsets/replset7.js11
-rw-r--r--jstests/replsets/rollback5.js5
-rw-r--r--jstests/replsets/rollback_auth.js36
-rw-r--r--jstests/replsets/rollback_cmd_unrollbackable.js5
-rw-r--r--jstests/replsets/rollback_collMod_PowerOf2Sizes.js15
-rw-r--r--jstests/replsets/rollback_collMod_fatal.js5
-rw-r--r--jstests/replsets/rollback_different_h.js5
-rw-r--r--jstests/replsets/rollback_dropdb.js5
-rw-r--r--jstests/replsets/rollback_empty_ns.js5
-rw-r--r--jstests/replsets/rollback_empty_o.js5
-rw-r--r--jstests/replsets/rollback_empty_o2.js5
-rw-r--r--jstests/replsets/rollback_fake_cmd.js5
-rw-r--r--jstests/replsets/rollback_index.js13
-rw-r--r--jstests/replsets/rollback_too_new.js8
-rw-r--r--jstests/replsets/server_status_metrics.js6
-rw-r--r--jstests/replsets/stepdown3.js4
-rw-r--r--jstests/replsets/stepdown_catch_up_opt.js15
-rw-r--r--jstests/replsets/stepdown_killop.js8
-rw-r--r--jstests/replsets/stepdown_long_wait_time.js4
-rw-r--r--jstests/replsets/sync2.js4
-rw-r--r--jstests/replsets/tags.js48
-rw-r--r--jstests/replsets/two_initsync.js6
-rw-r--r--jstests/replsets/user_management_wc.js9
-rw-r--r--jstests/serial_run/election_timing.js4
-rw-r--r--jstests/sharding/SERVER-7379.js6
-rw-r--r--jstests/sharding/addshard2.js31
-rw-r--r--jstests/sharding/auth.js81
-rw-r--r--jstests/sharding/authCommands.js30
-rw-r--r--jstests/sharding/auth_add_shard.js14
-rw-r--r--jstests/sharding/auth_no_config_primary.js4
-rw-r--r--jstests/sharding/auth_slaveok_routing.js4
-rw-r--r--jstests/sharding/authmr.js8
-rw-r--r--jstests/sharding/authwhere.js8
-rw-r--r--jstests/sharding/auto2.js21
-rw-r--r--jstests/sharding/balance_repl.js10
-rw-r--r--jstests/sharding/batch_write_command_sharded.js5
-rw-r--r--jstests/sharding/cleanup_orphaned_cmd_during_movechunk_hashed.js6
-rw-r--r--jstests/sharding/cleanup_orphaned_cmd_prereload.js21
-rw-r--r--jstests/sharding/coll_epoch_test0.js25
-rw-r--r--jstests/sharding/commands_that_write_accept_wc_configRS.js9
-rw-r--r--jstests/sharding/commands_that_write_accept_wc_shards.js9
-rw-r--r--jstests/sharding/conf_server_write_concern.js8
-rw-r--r--jstests/sharding/covered_shard_key_indexes.js21
-rw-r--r--jstests/sharding/delete_during_migrate.js4
-rw-r--r--jstests/sharding/drop_sharded_db.js4
-rw-r--r--jstests/sharding/empty_doc_results.js5
-rw-r--r--jstests/sharding/explain_cmd.js6
-rw-r--r--jstests/sharding/explain_find_and_modify_sharded.js4
-rw-r--r--jstests/sharding/explain_read_pref.js46
-rw-r--r--jstests/sharding/fair_balancer_round.js8
-rw-r--r--jstests/sharding/features1.js23
-rw-r--r--jstests/sharding/features2.js11
-rw-r--r--jstests/sharding/fts_score_sort_sharded.js5
-rw-r--r--jstests/sharding/geo_near_random1.js4
-rw-r--r--jstests/sharding/geo_near_random2.js6
-rw-r--r--jstests/sharding/geo_shardedgeonear.js7
-rw-r--r--jstests/sharding/group_slaveok.js13
-rw-r--r--jstests/sharding/hash_shard1.js4
-rw-r--r--jstests/sharding/index1.js16
-rw-r--r--jstests/sharding/key_many.js44
-rw-r--r--jstests/sharding/key_string.js9
-rw-r--r--jstests/sharding/lagged_config_secondary.js4
-rw-r--r--jstests/sharding/limit_push.js4
-rw-r--r--jstests/sharding/localhostAuthBypass.js8
-rw-r--r--jstests/sharding/max_time_ms_sharded.js42
-rw-r--r--jstests/sharding/migrateBig.js7
-rw-r--r--jstests/sharding/migration_failure.js4
-rw-r--r--jstests/sharding/min_optime_recovery.js10
-rw-r--r--jstests/sharding/mongos_no_replica_set_refresh.js6
-rw-r--r--jstests/sharding/mongos_rs_auth_shard_failure_tolerance.js10
-rw-r--r--jstests/sharding/mongos_rs_shard_failure_tolerance.js4
-rw-r--r--jstests/sharding/mongos_shard_failure_tolerance.js4
-rw-r--r--jstests/sharding/move_chunk_basic.js8
-rw-r--r--jstests/sharding/move_chunk_wc.js25
-rw-r--r--jstests/sharding/move_stale_mongos.js8
-rw-r--r--jstests/sharding/movechunk_with_default_paranoia.js8
-rw-r--r--jstests/sharding/movechunk_with_moveParanoia.js8
-rw-r--r--jstests/sharding/movechunk_with_noMoveParanoia.js8
-rw-r--r--jstests/sharding/mrShardedOutput.js22
-rw-r--r--jstests/sharding/mr_shard_version.js6
-rw-r--r--jstests/sharding/multi_mongos2.js6
-rw-r--r--jstests/sharding/no_empty_reset.js8
-rw-r--r--jstests/sharding/pending_chunk.js12
-rw-r--r--jstests/sharding/prefix_shard_key.js34
-rw-r--r--jstests/sharding/printShardingStatus.js5
-rw-r--r--jstests/sharding/query_config.js52
-rw-r--r--jstests/sharding/read_pref.js8
-rw-r--r--jstests/sharding/read_pref_cmd.js68
-rw-r--r--jstests/sharding/regex_targeting.js15
-rw-r--r--jstests/sharding/replmonitor_bad_seed.js4
-rw-r--r--jstests/sharding/secondary_query_routing.js4
-rw-r--r--jstests/sharding/shard2.js18
-rw-r--r--jstests/sharding/shard_aware_init.js11
-rw-r--r--jstests/sharding/shard_aware_primary_failover.js4
-rw-r--r--jstests/sharding/shard_identity_config_update.js4
-rw-r--r--jstests/sharding/sharding_balance1.js16
-rw-r--r--jstests/sharding/sharding_balance4.js8
-rw-r--r--jstests/sharding/sharding_options.js8
-rw-r--r--jstests/sharding/sharding_rs2.js10
-rw-r--r--jstests/sharding/sharding_state_after_stepdown.js70
-rw-r--r--jstests/sharding/sort1.js7
-rw-r--r--jstests/sharding/split_large_key.js5
-rw-r--r--jstests/sharding/split_with_force.js16
-rw-r--r--jstests/sharding/stale_mongos_updates_and_removes.js41
-rw-r--r--jstests/sharding/stats.js4
-rw-r--r--jstests/sharding/top_chunk_autosplit.js54
-rw-r--r--jstests/slow1/mr_during_migrate.js4
-rw-r--r--jstests/slow1/remove_during_mr.js5
-rw-r--r--jstests/ssl/disable_x509.js9
-rw-r--r--jstests/ssl/libs/ssl_helpers.js15
-rw-r--r--jstests/ssl/ssl_hostname_validation.js4
-rw-r--r--jstests/ssl/ssl_without_ca.js6
-rw-r--r--jstests/ssl/upgrade_to_x509_ssl.js42
-rw-r--r--jstests/ssl/x509_client.js46
-rw-r--r--jstests/sslSpecial/mixed_mode_sharded_transition_nossl.js4
-rw-r--r--jstests/sslSpecial/ssl_mixedmode.js4
-rw-r--r--jstests/sslSpecial/upgrade_noauth_to_x509_nossl.js4
-rw-r--r--jstests/sslSpecial/upgrade_to_x509_ssl_nossl.js63
-rw-r--r--jstests/tool/csvimport1.js3
-rw-r--r--jstests/tool/dumprestore7.js15
-rw-r--r--jstests/tool/dumprestore8.js5
-rw-r--r--jstests/tool/dumprestoreWithNoOptions.js18
-rw-r--r--jstests/tool/dumprestore_auth3.js20
-rw-r--r--jstests/tool/dumprestore_excludecollections.js74
-rw-r--r--jstests/tool/exportimport_bigarray.js5
-rw-r--r--jstests/tool/tool_replset.js13
-rw-r--r--src/mongo/base/data_range_cursor.h2
-rw-r--r--src/mongo/base/data_type_string_data_test.cpp2
-rw-r--r--src/mongo/base/init.h4
-rw-r--r--src/mongo/base/initializer.cpp4
-rw-r--r--src/mongo/base/initializer_dependency_graph.h2
-rw-r--r--src/mongo/base/parse_number_test.cpp2
-rw-r--r--src/mongo/base/secure_allocator.cpp5
-rw-r--r--src/mongo/base/string_data.h3
-rw-r--r--src/mongo/bson/bson_obj_data_type_test.cpp2
-rw-r--r--src/mongo/bson/bson_obj_test.cpp5
-rw-r--r--src/mongo/bson/bson_validate.cpp2
-rw-r--r--src/mongo/bson/bson_validate_test.cpp30
-rw-r--r--src/mongo/bson/bsonelement.h2
-rw-r--r--src/mongo/bson/bsonobj.cpp2
-rw-r--r--src/mongo/bson/bsonobjbuilder.h2
-rw-r--r--src/mongo/bson/bsonobjbuilder_test.cpp6
-rw-r--r--src/mongo/bson/bsontypes.h2
-rw-r--r--src/mongo/bson/json.cpp6
-rw-r--r--src/mongo/bson/json.h2
-rw-r--r--src/mongo/bson/mutable/algorithm.h2
-rw-r--r--src/mongo/bson/mutable/document.cpp5
-rw-r--r--src/mongo/bson/oid.h2
-rw-r--r--src/mongo/bson/timestamp.cpp2
-rw-r--r--src/mongo/bson/util/bson_check.h5
-rw-r--r--src/mongo/bson/util/bson_check_test.cpp13
-rw-r--r--src/mongo/bson/util/bson_extract.cpp33
-rw-r--r--src/mongo/bson/util/bson_extract_test.cpp6
-rw-r--r--src/mongo/bson/util/builder.h2
-rw-r--r--src/mongo/client/authenticate.cpp78
-rw-r--r--src/mongo/client/authenticate_test.cpp38
-rw-r--r--src/mongo/client/cyrus_sasl_client_session.cpp3
-rw-r--r--src/mongo/client/dbclient.cpp26
-rw-r--r--src/mongo/client/dbclient_rs.cpp41
-rw-r--r--src/mongo/client/dbclientinterface.h15
-rw-r--r--src/mongo/client/fetcher.cpp46
-rw-r--r--src/mongo/client/fetcher.h2
-rw-r--r--src/mongo/client/fetcher_test.cpp278
-rw-r--r--src/mongo/client/parallel.cpp28
-rw-r--r--src/mongo/client/read_preference.cpp13
-rw-r--r--src/mongo/client/read_preference_test.cpp13
-rw-r--r--src/mongo/client/remote_command_retry_scheduler.h2
-rw-r--r--src/mongo/client/remote_command_retry_scheduler_test.cpp10
-rw-r--r--src/mongo/client/remote_command_runner_impl.cpp18
-rw-r--r--src/mongo/client/replica_set_monitor.cpp6
-rw-r--r--src/mongo/client/replica_set_monitor.h2
-rw-r--r--src/mongo/client/replica_set_monitor_internal.h2
-rw-r--r--src/mongo/client/replica_set_monitor_manager.cpp3
-rw-r--r--src/mongo/client/replica_set_monitor_test.cpp446
-rw-r--r--src/mongo/client/sasl_scramsha1_client_conversation.cpp40
-rw-r--r--src/mongo/client/sasl_sspi.cpp9
-rw-r--r--src/mongo/client/sasl_sspi_options.cpp23
-rw-r--r--src/mongo/client/scoped_db_conn_test.cpp6
-rw-r--r--src/mongo/crypto/crypto_openssl.cpp2
-rw-r--r--src/mongo/crypto/crypto_test.cpp456
-rw-r--r--src/mongo/crypto/mechanism_scram.cpp6
-rw-r--r--src/mongo/crypto/tom/tomcrypt.h6
-rw-r--r--src/mongo/crypto/tom/tomcrypt_cfg.h6
-rw-r--r--src/mongo/db/auth/action_set.cpp8
-rw-r--r--src/mongo/db/auth/auth_decorations.cpp2
-rw-r--r--src/mongo/db/auth/auth_index_d.cpp24
-rw-r--r--src/mongo/db/auth/authorization_manager.cpp16
-rw-r--r--src/mongo/db/auth/authorization_manager_global.cpp3
-rw-r--r--src/mongo/db/auth/authorization_manager_test.cpp128
-rw-r--r--src/mongo/db/auth/authorization_session.cpp8
-rw-r--r--src/mongo/db/auth/authorization_session_test.cpp70
-rw-r--r--src/mongo/db/auth/authz_manager_external_state_d.cpp3
-rw-r--r--src/mongo/db/auth/authz_manager_external_state_local.cpp30
-rw-r--r--src/mongo/db/auth/authz_manager_external_state_mock.cpp3
-rw-r--r--src/mongo/db/auth/authz_manager_external_state_mock.h2
-rw-r--r--src/mongo/db/auth/authz_manager_external_state_s.cpp50
-rw-r--r--src/mongo/db/auth/authz_session_external_state_server_common.cpp3
-rw-r--r--src/mongo/db/auth/native_sasl_authentication_session.cpp2
-rw-r--r--src/mongo/db/auth/privilege_parser_test.cpp60
-rw-r--r--src/mongo/db/auth/role_graph.cpp47
-rw-r--r--src/mongo/db/auth/role_graph_update.cpp4
-rw-r--r--src/mongo/db/auth/sasl_authentication_session.cpp2
-rw-r--r--src/mongo/db/auth/sasl_options.cpp44
-rw-r--r--src/mongo/db/auth/sasl_scramsha1_server_conversation.cpp50
-rw-r--r--src/mongo/db/auth/security_file.cpp8
-rw-r--r--src/mongo/db/auth/security_key.cpp15
-rw-r--r--src/mongo/db/auth/user_cache_invalidator_job.cpp6
-rw-r--r--src/mongo/db/auth/user_document_parser_test.cpp233
-rw-r--r--src/mongo/db/auth/user_management_commands_parser.cpp7
-rw-r--r--src/mongo/db/auth/user_management_commands_parser.h2
-rw-r--r--src/mongo/db/background.cpp6
-rw-r--r--src/mongo/db/background.h2
-rw-r--r--src/mongo/db/catalog/capped_utils.cpp2
-rw-r--r--src/mongo/db/catalog/coll_mod.cpp6
-rw-r--r--src/mongo/db/catalog/collection.cpp11
-rw-r--r--src/mongo/db/catalog/collection_compact.cpp10
-rw-r--r--src/mongo/db/catalog/collection_options_test.cpp6
-rw-r--r--src/mongo/db/catalog/cursor_manager.cpp2
-rw-r--r--src/mongo/db/catalog/database.cpp4
-rw-r--r--src/mongo/db/catalog/database_holder.cpp6
-rw-r--r--src/mongo/db/catalog/drop_indexes.cpp4
-rw-r--r--src/mongo/db/catalog/index_catalog.cpp34
-rw-r--r--src/mongo/db/catalog/index_create.cpp2
-rw-r--r--src/mongo/db/catalog/index_key_validate_test.cpp3
-rw-r--r--src/mongo/db/catalog/rename_collection.cpp3
-rw-r--r--src/mongo/db/clientlistplugin.cpp3
-rw-r--r--src/mongo/db/cloner.cpp12
-rw-r--r--src/mongo/db/cloner.h2
-rw-r--r--src/mongo/db/commands.cpp10
-rw-r--r--src/mongo/db/commands/apply_ops_cmd.cpp16
-rw-r--r--src/mongo/db/commands/clone.cpp2
-rw-r--r--src/mongo/db/commands/clone_collection.cpp4
-rw-r--r--src/mongo/db/commands/collection_to_capped.cpp13
-rw-r--r--src/mongo/db/commands/copydb.cpp21
-rw-r--r--src/mongo/db/commands/copydb_common.cpp4
-rw-r--r--src/mongo/db/commands/copydb_start_commands.cpp2
-rw-r--r--src/mongo/db/commands/create_indexes.cpp9
-rw-r--r--src/mongo/db/commands/distinct.cpp13
-rw-r--r--src/mongo/db/commands/drop_indexes.cpp2
-rw-r--r--src/mongo/db/commands/find_and_modify.cpp13
-rw-r--r--src/mongo/db/commands/find_cmd.cpp4
-rw-r--r--src/mongo/db/commands/generic.cpp9
-rw-r--r--src/mongo/db/commands/getmore_cmd.cpp6
-rw-r--r--src/mongo/db/commands/group_cmd.cpp4
-rw-r--r--src/mongo/db/commands/index_filter_commands.cpp6
-rw-r--r--src/mongo/db/commands/index_filter_commands.h2
-rw-r--r--src/mongo/db/commands/index_filter_commands_test.cpp28
-rw-r--r--src/mongo/db/commands/list_indexes.cpp4
-rw-r--r--src/mongo/db/commands/mr.cpp17
-rw-r--r--src/mongo/db/commands/mr_test.cpp6
-rw-r--r--src/mongo/db/commands/oplog_note.cpp4
-rw-r--r--src/mongo/db/commands/parallel_collection_scan.cpp5
-rw-r--r--src/mongo/db/commands/parameters.cpp19
-rw-r--r--src/mongo/db/commands/pipeline_command.cpp6
-rw-r--r--src/mongo/db/commands/plan_cache_commands.cpp6
-rw-r--r--src/mongo/db/commands/plan_cache_commands_test.cpp15
-rw-r--r--src/mongo/db/commands/rename_collection_cmd.cpp4
-rw-r--r--src/mongo/db/commands/server_status.cpp2
-rw-r--r--src/mongo/db/commands/server_status.h2
-rw-r--r--src/mongo/db/commands/test_commands.cpp12
-rw-r--r--src/mongo/db/commands/top_command.cpp2
-rw-r--r--src/mongo/db/commands/user_management_commands.cpp109
-rw-r--r--src/mongo/db/commands/user_management_commands_common.cpp19
-rw-r--r--src/mongo/db/commands/write_commands/write_commands_common.cpp2
-rw-r--r--src/mongo/db/commands/write_commands/write_commands_common.h2
-rw-r--r--src/mongo/db/concurrency/d_concurrency.cpp2
-rw-r--r--src/mongo/db/concurrency/lock_manager_defs.h2
-rw-r--r--src/mongo/db/concurrency/lock_state.cpp2
-rw-r--r--src/mongo/db/curop.cpp3
-rw-r--r--src/mongo/db/curop.h2
-rw-r--r--src/mongo/db/curop_metrics.cpp2
-rw-r--r--src/mongo/db/db.cpp13
-rw-r--r--src/mongo/db/db.h2
-rw-r--r--src/mongo/db/db_raii.cpp2
-rw-r--r--src/mongo/db/dbcommands.cpp19
-rw-r--r--src/mongo/db/dbhelpers.cpp8
-rw-r--r--src/mongo/db/dbhelpers.h2
-rw-r--r--src/mongo/db/dbwebserver.cpp7
-rw-r--r--src/mongo/db/exec/and_hash.cpp2
-rw-r--r--src/mongo/db/exec/and_hash.h2
-rw-r--r--src/mongo/db/exec/cached_plan.cpp3
-rw-r--r--src/mongo/db/exec/cached_plan.h2
-rw-r--r--src/mongo/db/exec/collection_scan.cpp10
-rw-r--r--src/mongo/db/exec/delete.cpp2
-rw-r--r--src/mongo/db/exec/geo_near.cpp2
-rw-r--r--src/mongo/db/exec/geo_near.h2
-rw-r--r--src/mongo/db/exec/keep_mutations.h2
-rw-r--r--src/mongo/db/exec/limit.h2
-rw-r--r--src/mongo/db/exec/multi_plan.cpp4
-rw-r--r--src/mongo/db/exec/multi_plan.h4
-rw-r--r--src/mongo/db/exec/near.h2
-rw-r--r--src/mongo/db/exec/pipeline_proxy.h2
-rw-r--r--src/mongo/db/exec/projection_exec_test.cpp4
-rw-r--r--src/mongo/db/exec/sort.cpp2
-rw-r--r--src/mongo/db/exec/sort.h2
-rw-r--r--src/mongo/db/exec/sort_key_generator.h2
-rw-r--r--src/mongo/db/exec/stagedebug_cmd.cpp12
-rw-r--r--src/mongo/db/exec/subplan.cpp2
-rw-r--r--src/mongo/db/exec/text.cpp4
-rw-r--r--src/mongo/db/exec/text_match.cpp2
-rw-r--r--src/mongo/db/exec/update.cpp24
-rw-r--r--src/mongo/db/exec/working_set.h2
-rw-r--r--src/mongo/db/exec/working_set_common.cpp4
-rw-r--r--src/mongo/db/exec/working_set_test.cpp2
-rw-r--r--src/mongo/db/field_parser_test.cpp24
-rw-r--r--src/mongo/db/ftdc/compressor_test.cpp201
-rw-r--r--src/mongo/db/ftdc/file_manager.cpp10
-rw-r--r--src/mongo/db/ftdc/file_manager_test.cpp94
-rw-r--r--src/mongo/db/ftdc/file_reader.cpp3
-rw-r--r--src/mongo/db/ftdc/file_writer_test.cpp86
-rw-r--r--src/mongo/db/ftdc/ftdc_test.cpp4
-rw-r--r--src/mongo/db/ftdc/util.cpp7
-rw-r--r--src/mongo/db/ftdc/varint.h2
-rw-r--r--src/mongo/db/fts/fts_element_iterator.cpp3
-rw-r--r--src/mongo/db/fts/fts_index_format.cpp7
-rw-r--r--src/mongo/db/fts/fts_index_format_test.cpp32
-rw-r--r--src/mongo/db/fts/fts_language.cpp9
-rw-r--r--src/mongo/db/fts/fts_language.h2
-rw-r--r--src/mongo/db/fts/fts_language_test.cpp2
-rw-r--r--src/mongo/db/fts/fts_matcher.cpp2
-rw-r--r--src/mongo/db/fts/fts_query_impl.cpp4
-rw-r--r--src/mongo/db/fts/fts_query_impl_test.cpp25
-rw-r--r--src/mongo/db/fts/fts_spec.cpp14
-rw-r--r--src/mongo/db/fts/fts_spec.h2
-rw-r--r--src/mongo/db/fts/fts_spec_legacy.cpp4
-rw-r--r--src/mongo/db/fts/fts_spec_test.cpp22
-rw-r--r--src/mongo/db/geo/big_polygon_test.cpp155
-rw-r--r--src/mongo/db/geo/geoparser.cpp5
-rw-r--r--src/mongo/db/geo/geoparser_test.cpp76
-rw-r--r--src/mongo/db/geo/hash.cpp17
-rw-r--r--src/mongo/db/geo/hash.h2
-rw-r--r--src/mongo/db/geo/hash_test.cpp8
-rw-r--r--src/mongo/db/geo/r2_region_coverer.cpp2
-rw-r--r--src/mongo/db/geo/r2_region_coverer_test.cpp7
-rw-r--r--src/mongo/db/geo/shapes.cpp2
-rw-r--r--src/mongo/db/geo/shapes.h2
-rw-r--r--src/mongo/db/hasher_test.cpp5
-rw-r--r--src/mongo/db/index/2d_access_method.cpp2
-rw-r--r--src/mongo/db/index/btree_access_method.h2
-rw-r--r--src/mongo/db/index/btree_key_generator.cpp6
-rw-r--r--src/mongo/db/index/expression_keys_private.cpp2
-rw-r--r--src/mongo/db/index/expression_keys_private.h2
-rw-r--r--src/mongo/db/index/expression_params.cpp12
-rw-r--r--src/mongo/db/index/expression_params.h2
-rw-r--r--src/mongo/db/index/external_key_generator.cpp2
-rw-r--r--src/mongo/db/index/hash_access_method.cpp2
-rw-r--r--src/mongo/db/index/hash_access_method.h2
-rw-r--r--src/mongo/db/index/index_access_method.cpp6
-rw-r--r--src/mongo/db/index/index_descriptor.h2
-rw-r--r--src/mongo/db/index/s2_access_method.cpp40
-rw-r--r--src/mongo/db/index/s2_key_generator_test.cpp12
-rw-r--r--src/mongo/db/index_rebuilder.cpp2
-rw-r--r--src/mongo/db/initialize_server_global_state.cpp33
-rw-r--r--src/mongo/db/instance.cpp15
-rw-r--r--src/mongo/db/jsobj.h12
-rw-r--r--src/mongo/db/keypattern.cpp3
-rw-r--r--src/mongo/db/keypattern_test.cpp6
-rw-r--r--src/mongo/db/matcher/expression.cpp2
-rw-r--r--src/mongo/db/matcher/expression.h2
-rw-r--r--src/mongo/db/matcher/expression_algo_test.cpp23
-rw-r--r--src/mongo/db/matcher/expression_array.h2
-rw-r--r--src/mongo/db/matcher/expression_geo.cpp19
-rw-r--r--src/mongo/db/matcher/expression_geo_test.cpp2
-rw-r--r--src/mongo/db/matcher/expression_leaf.cpp4
-rw-r--r--src/mongo/db/matcher/expression_leaf.h2
-rw-r--r--src/mongo/db/matcher/expression_leaf_test.cpp2
-rw-r--r--src/mongo/db/matcher/expression_parser.cpp4
-rw-r--r--src/mongo/db/matcher/expression_parser.h3
-rw-r--r--src/mongo/db/matcher/expression_parser_array_test.cpp152
-rw-r--r--src/mongo/db/matcher/expression_parser_leaf_test.cpp583
-rw-r--r--src/mongo/db/matcher/expression_serialization_test.cpp61
-rw-r--r--src/mongo/db/matcher/expression_text.cpp6
-rw-r--r--src/mongo/db/matcher/expression_text_base.cpp6
-rw-r--r--src/mongo/db/matcher/expression_tree.cpp2
-rw-r--r--src/mongo/db/matcher/expression_tree_test.cpp2
-rw-r--r--src/mongo/db/matcher/expression_where.cpp2
-rw-r--r--src/mongo/db/matcher/matchable.cpp4
-rw-r--r--src/mongo/db/matcher/matcher.cpp2
-rw-r--r--src/mongo/db/matcher/path.cpp4
-rw-r--r--src/mongo/db/mongod_options.cpp342
-rw-r--r--src/mongo/db/op_observer.cpp4
-rw-r--r--src/mongo/db/operation_context_impl.cpp2
-rw-r--r--src/mongo/db/operation_context_noop.h2
-rw-r--r--src/mongo/db/ops/field_checker.cpp3
-rw-r--r--src/mongo/db/ops/insert.cpp14
-rw-r--r--src/mongo/db/ops/log_builder.cpp12
-rw-r--r--src/mongo/db/ops/log_builder_test.cpp27
-rw-r--r--src/mongo/db/ops/modifier_add_to_set.cpp17
-rw-r--r--src/mongo/db/ops/modifier_bit.cpp18
-rw-r--r--src/mongo/db/ops/modifier_compare.cpp3
-rw-r--r--src/mongo/db/ops/modifier_current_date.cpp6
-rw-r--r--src/mongo/db/ops/modifier_inc.cpp23
-rw-r--r--src/mongo/db/ops/modifier_object_replace.cpp9
-rw-r--r--src/mongo/db/ops/modifier_pop.cpp13
-rw-r--r--src/mongo/db/ops/modifier_pop_test.cpp2
-rw-r--r--src/mongo/db/ops/modifier_pull.cpp3
-rw-r--r--src/mongo/db/ops/modifier_pull_all.cpp13
-rw-r--r--src/mongo/db/ops/modifier_push.cpp29
-rw-r--r--src/mongo/db/ops/modifier_push_sorter.h2
-rw-r--r--src/mongo/db/ops/modifier_push_test.cpp34
-rw-r--r--src/mongo/db/ops/modifier_rename.cpp21
-rw-r--r--src/mongo/db/ops/modifier_set.cpp3
-rw-r--r--src/mongo/db/ops/modifier_unset.cpp3
-rw-r--r--src/mongo/db/ops/parsed_delete.cpp2
-rw-r--r--src/mongo/db/ops/parsed_update.h2
-rw-r--r--src/mongo/db/ops/path_support.cpp9
-rw-r--r--src/mongo/db/ops/path_support_test.cpp16
-rw-r--r--src/mongo/db/ops/update.cpp3
-rw-r--r--src/mongo/db/ops/update.h2
-rw-r--r--src/mongo/db/ops/update_driver.cpp18
-rw-r--r--src/mongo/db/ops/update_lifecycle_impl.cpp4
-rw-r--r--src/mongo/db/ops/update_request.h2
-rw-r--r--src/mongo/db/ops/update_result.h2
-rw-r--r--src/mongo/db/ops/write_ops_exec.cpp7
-rw-r--r--src/mongo/db/ops/write_ops_parsers.cpp23
-rw-r--r--src/mongo/db/ops/write_ops_parsers.h2
-rw-r--r--src/mongo/db/ops/write_ops_parsers_test.cpp68
-rw-r--r--src/mongo/db/pipeline/accumulator.cpp2
-rw-r--r--src/mongo/db/pipeline/accumulator.h2
-rw-r--r--src/mongo/db/pipeline/document_internal.h4
-rw-r--r--src/mongo/db/pipeline/document_source.h2
-rw-r--r--src/mongo/db/pipeline/document_source_cursor.cpp8
-rw-r--r--src/mongo/db/pipeline/document_source_geo_near.cpp2
-rw-r--r--src/mongo/db/pipeline/document_source_graph_lookup.cpp36
-rw-r--r--src/mongo/db/pipeline/document_source_lookup.cpp59
-rw-r--r--src/mongo/db/pipeline/document_source_match.cpp64
-rw-r--r--src/mongo/db/pipeline/document_source_merge_cursors.cpp11
-rw-r--r--src/mongo/db/pipeline/document_source_out.cpp8
-rw-r--r--src/mongo/db/pipeline/document_source_redact.cpp3
-rw-r--r--src/mongo/db/pipeline/document_source_sample_from_random_cursor.cpp12
-rw-r--r--src/mongo/db/pipeline/document_source_sort.cpp3
-rw-r--r--src/mongo/db/pipeline/document_source_test.cpp70
-rw-r--r--src/mongo/db/pipeline/document_source_unwind.cpp3
-rw-r--r--src/mongo/db/pipeline/expression.cpp172
-rw-r--r--src/mongo/db/pipeline/expression.h16
-rw-r--r--src/mongo/db/pipeline/expression_test.cpp216
-rw-r--r--src/mongo/db/pipeline/field_path_test.cpp2
-rw-r--r--src/mongo/db/pipeline/lookup_set_cache.h8
-rw-r--r--src/mongo/db/pipeline/lookup_set_cache_test.cpp2
-rw-r--r--src/mongo/db/pipeline/pipeline.cpp3
-rw-r--r--src/mongo/db/pipeline/pipeline_d.cpp13
-rw-r--r--src/mongo/db/pipeline/value.cpp2
-rw-r--r--src/mongo/db/pipeline/value.h3
-rw-r--r--src/mongo/db/pipeline/value_internal.h4
-rw-r--r--src/mongo/db/query/canonical_query_test.cpp170
-rw-r--r--src/mongo/db/query/collation/collation_serializer_test.cpp135
-rw-r--r--src/mongo/db/query/collation/collator_factory_icu.cpp136
-rw-r--r--src/mongo/db/query/collation/collator_factory_icu_decoration.cpp5
-rw-r--r--src/mongo/db/query/collation/collator_factory_icu_test.cpp468
-rw-r--r--src/mongo/db/query/collation/collator_factory_mock.cpp2
-rw-r--r--src/mongo/db/query/count_request_test.cpp56
-rw-r--r--src/mongo/db/query/cursor_response.cpp24
-rw-r--r--src/mongo/db/query/cursor_response_test.cpp158
-rw-r--r--src/mongo/db/query/explain.cpp2
-rw-r--r--src/mongo/db/query/expression_index.cpp2
-rw-r--r--src/mongo/db/query/find.cpp12
-rw-r--r--src/mongo/db/query/get_executor.cpp13
-rw-r--r--src/mongo/db/query/get_executor.h10
-rw-r--r--src/mongo/db/query/getmore_request.cpp11
-rw-r--r--src/mongo/db/query/getmore_request_test.cpp49
-rw-r--r--src/mongo/db/query/index_bounds_builder.h2
-rw-r--r--src/mongo/db/query/index_bounds_builder_test.cpp20
-rw-r--r--src/mongo/db/query/index_bounds_test.cpp6
-rw-r--r--src/mongo/db/query/killcursors_request.cpp4
-rw-r--r--src/mongo/db/query/killcursors_request_test.cpp9
-rw-r--r--src/mongo/db/query/killcursors_response.cpp4
-rw-r--r--src/mongo/db/query/killcursors_response_test.cpp42
-rw-r--r--src/mongo/db/query/lite_parsed_query.cpp31
-rw-r--r--src/mongo/db/query/lite_parsed_query_test.cpp6
-rw-r--r--src/mongo/db/query/parsed_projection_test.cpp8
-rw-r--r--src/mongo/db/query/plan_cache.cpp8
-rw-r--r--src/mongo/db/query/plan_cache.h2
-rw-r--r--src/mongo/db/query/plan_cache_indexability.cpp4
-rw-r--r--src/mongo/db/query/plan_cache_indexability_test.cpp4
-rw-r--r--src/mongo/db/query/plan_cache_test.cpp54
-rw-r--r--src/mongo/db/query/plan_enumerator.cpp5
-rw-r--r--src/mongo/db/query/plan_executor.cpp5
-rw-r--r--src/mongo/db/query/plan_ranker.cpp2
-rw-r--r--src/mongo/db/query/planner_access.cpp2
-rw-r--r--src/mongo/db/query/planner_analysis.cpp4
-rw-r--r--src/mongo/db/query/planner_analysis_test.cpp10
-rw-r--r--src/mongo/db/query/planner_ixselect.cpp2
-rw-r--r--src/mongo/db/query/planner_ixselect_test.cpp2
-rw-r--r--src/mongo/db/query/query_planner.cpp21
-rw-r--r--src/mongo/db/query/query_planner_array_test.cpp78
-rw-r--r--src/mongo/db/query/query_planner_collation_test.cpp18
-rw-r--r--src/mongo/db/query/query_planner_geo_test.cpp299
-rw-r--r--src/mongo/db/query/query_planner_test.cpp122
-rw-r--r--src/mongo/db/query/query_planner_test_fixture.cpp8
-rw-r--r--src/mongo/db/query/query_planner_test_lib.cpp2
-rw-r--r--src/mongo/db/query/query_planner_test_lib.h2
-rw-r--r--src/mongo/db/query/query_planner_text_test.cpp123
-rw-r--r--src/mongo/db/query/query_solution.h2
-rw-r--r--src/mongo/db/query/query_solution_test.cpp6
-rw-r--r--src/mongo/db/query/stage_builder.cpp6
-rw-r--r--src/mongo/db/range_arithmetic.h2
-rw-r--r--src/mongo/db/range_deleter.cpp2
-rw-r--r--src/mongo/db/repair_database.cpp13
-rw-r--r--src/mongo/db/repl/applier_test.cpp64
-rw-r--r--src/mongo/db/repl/base_cloner_test_fixture.cpp5
-rw-r--r--src/mongo/db/repl/base_cloner_test_fixture.h2
-rw-r--r--src/mongo/db/repl/bgsync.cpp13
-rw-r--r--src/mongo/db/repl/check_quorum_for_config_change_test.cpp247
-rw-r--r--src/mongo/db/repl/collection_cloner.cpp4
-rw-r--r--src/mongo/db/repl/collection_cloner.h2
-rw-r--r--src/mongo/db/repl/collection_cloner_test.cpp12
-rw-r--r--src/mongo/db/repl/data_replicator.cpp4
-rw-r--r--src/mongo/db/repl/data_replicator.h4
-rw-r--r--src/mongo/db/repl/data_replicator_test.cpp387
-rw-r--r--src/mongo/db/repl/database_cloner.cpp32
-rw-r--r--src/mongo/db/repl/database_cloner.h2
-rw-r--r--src/mongo/db/repl/database_cloner_test.cpp115
-rw-r--r--src/mongo/db/repl/database_task.h2
-rw-r--r--src/mongo/db/repl/elect_cmd_runner_test.cpp41
-rw-r--r--src/mongo/db/repl/freshness_checker_test.cpp284
-rw-r--r--src/mongo/db/repl/freshness_scanner.cpp5
-rw-r--r--src/mongo/db/repl/freshness_scanner_test.cpp39
-rw-r--r--src/mongo/db/repl/is_master_response.cpp27
-rw-r--r--src/mongo/db/repl/isself.cpp6
-rw-r--r--src/mongo/db/repl/master_slave.cpp4
-rw-r--r--src/mongo/db/repl/member_config_test.cpp181
-rw-r--r--src/mongo/db/repl/old_update_position_args.cpp6
-rw-r--r--src/mongo/db/repl/oplog.cpp87
-rw-r--r--src/mongo/db/repl/oplog.h2
-rw-r--r--src/mongo/db/repl/oplog_fetcher.cpp41
-rw-r--r--src/mongo/db/repl/oplog_fetcher_test.cpp21
-rw-r--r--src/mongo/db/repl/oplog_interface_local.cpp4
-rw-r--r--src/mongo/db/repl/oplog_interface_mock.h2
-rw-r--r--src/mongo/db/repl/optime_extract_test.cpp3
-rw-r--r--src/mongo/db/repl/read_concern_args.cpp3
-rw-r--r--src/mongo/db/repl/read_concern_args_test.cpp116
-rw-r--r--src/mongo/db/repl/repl_set_heartbeat_response.cpp18
-rw-r--r--src/mongo/db/repl/repl_set_heartbeat_response_test.cpp12
-rw-r--r--src/mongo/db/repl/repl_set_html_summary.cpp2
-rw-r--r--src/mongo/db/repl/repl_settings.h2
-rw-r--r--src/mongo/db/repl/replica_set_config.cpp86
-rw-r--r--src/mongo/db/repl/replica_set_config_checks.cpp46
-rw-r--r--src/mongo/db/repl/replica_set_config_checks_test.cpp414
-rw-r--r--src/mongo/db/repl/replica_set_config_test.cpp1236
-rw-r--r--src/mongo/db/repl/replication_coordinator_external_state_impl.cpp19
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl.cpp74
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl.h2
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_elect.cpp4
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_elect_test.cpp176
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_elect_v1_test.cpp286
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp16
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_heartbeat_test.cpp24
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_heartbeat_v1_test.cpp133
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_reconfig_test.cpp114
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_test.cpp1711
-rw-r--r--src/mongo/db/repl/replication_coordinator_mock.cpp2
-rw-r--r--src/mongo/db/repl/replication_coordinator_test_fixture.cpp36
-rw-r--r--src/mongo/db/repl/replication_executor.cpp4
-rw-r--r--src/mongo/db/repl/replication_executor_test.cpp111
-rw-r--r--src/mongo/db/repl/replication_executor_test_fixture.h2
-rw-r--r--src/mongo/db/repl/replset_commands.cpp11
-rw-r--r--src/mongo/db/repl/replset_web_handler.cpp2
-rw-r--r--src/mongo/db/repl/reporter_test.cpp20
-rw-r--r--src/mongo/db/repl/resync.cpp2
-rw-r--r--src/mongo/db/repl/roll_back_local_operations_test.cpp10
-rw-r--r--src/mongo/db/repl/rollback_checker.cpp94
-rw-r--r--src/mongo/db/repl/rollback_source_impl.cpp2
-rw-r--r--src/mongo/db/repl/rs_initialsync.cpp7
-rw-r--r--src/mongo/db/repl/rs_rollback.cpp38
-rw-r--r--src/mongo/db/repl/rs_rollback_test.cpp95
-rw-r--r--src/mongo/db/repl/rs_sync.cpp2
-rw-r--r--src/mongo/db/repl/rs_sync.h2
-rw-r--r--src/mongo/db/repl/storage_interface_impl.cpp6
-rw-r--r--src/mongo/db/repl/sync_source_feedback.cpp38
-rw-r--r--src/mongo/db/repl/sync_tail.cpp9
-rw-r--r--src/mongo/db/repl/sync_tail_test.cpp76
-rw-r--r--src/mongo/db/repl/task_runner.cpp2
-rw-r--r--src/mongo/db/repl/topology_coordinator.h2
-rw-r--r--src/mongo/db/repl/topology_coordinator_impl.cpp32
-rw-r--r--src/mongo/db/repl/topology_coordinator_impl_test.cpp1584
-rw-r--r--src/mongo/db/repl/topology_coordinator_impl_v1_test.cpp1456
-rw-r--r--src/mongo/db/repl/update_position_args.cpp2
-rw-r--r--src/mongo/db/repl/vote_requester_test.cpp80
-rw-r--r--src/mongo/db/s/check_sharding_index_command.cpp2
-rw-r--r--src/mongo/db/s/cleanup_orphaned_cmd.cpp4
-rw-r--r--src/mongo/db/s/collection_metadata.cpp3
-rw-r--r--src/mongo/db/s/collection_metadata_test.cpp45
-rw-r--r--src/mongo/db/s/collection_sharding_state.cpp14
-rw-r--r--src/mongo/db/s/collection_sharding_state_test.cpp2
-rw-r--r--src/mongo/db/s/metadata_loader_test.cpp3
-rw-r--r--src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp22
-rw-r--r--src/mongo/db/s/migration_chunk_cloner_source_legacy_commands.cpp2
-rw-r--r--src/mongo/db/s/migration_destination_manager.cpp43
-rw-r--r--src/mongo/db/s/migration_session_id_test.cpp5
-rw-r--r--src/mongo/db/s/migration_source_manager.cpp127
-rw-r--r--src/mongo/db/s/move_chunk_command.cpp7
-rw-r--r--src/mongo/db/s/move_timing_helper.cpp4
-rw-r--r--src/mongo/db/s/sharding_initialization_mongod.cpp19
-rw-r--r--src/mongo/db/s/sharding_state.cpp17
-rw-r--r--src/mongo/db/s/sharding_state_recovery.cpp2
-rw-r--r--src/mongo/db/s/sharding_state_test.cpp10
-rw-r--r--src/mongo/db/s/split_chunk_command.cpp7
-rw-r--r--src/mongo/db/s/start_chunk_clone_request_test.cpp7
-rw-r--r--src/mongo/db/s/type_shard_identity_test.cpp18
-rw-r--r--src/mongo/db/server_options_helpers.cpp220
-rw-r--r--src/mongo/db/server_parameters.h2
-rw-r--r--src/mongo/db/service_context_d.cpp21
-rw-r--r--src/mongo/db/service_context_noop.cpp2
-rw-r--r--src/mongo/db/sorter/sorter.cpp34
-rw-r--r--src/mongo/db/sorter/sorter.h6
-rw-r--r--src/mongo/db/sorter/sorter_test.cpp6
-rw-r--r--src/mongo/db/startup_warnings_common.cpp3
-rw-r--r--src/mongo/db/startup_warnings_mongod.cpp16
-rw-r--r--src/mongo/db/stats/counters.h4
-rw-r--r--src/mongo/db/stats/timer_stats_test.cpp2
-rw-r--r--src/mongo/db/storage/devnull/devnull_init.cpp2
-rw-r--r--src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_btree_impl.cpp2
-rw-r--r--src/mongo/db/storage/key_string.cpp21
-rw-r--r--src/mongo/db/storage/kv/kv_collection_catalog_entry_test.cpp5
-rw-r--r--src/mongo/db/storage/kv/kv_engine_test_harness.cpp2
-rw-r--r--src/mongo/db/storage/mmap_v1/btree/btree_logic.cpp5
-rw-r--r--src/mongo/db/storage/mmap_v1/catalog/hashtab.h2
-rw-r--r--src/mongo/db/storage/mmap_v1/catalog/namespace_details.cpp2
-rw-r--r--src/mongo/db/storage/mmap_v1/catalog/namespace_details_collection_entry.cpp3
-rw-r--r--src/mongo/db/storage/mmap_v1/data_file.cpp8
-rw-r--r--src/mongo/db/storage/mmap_v1/data_file_sync.cpp2
-rw-r--r--src/mongo/db/storage/mmap_v1/dur.cpp13
-rw-r--r--src/mongo/db/storage/mmap_v1/dur_commitjob.cpp2
-rw-r--r--src/mongo/db/storage/mmap_v1/dur_journal.cpp10
-rw-r--r--src/mongo/db/storage/mmap_v1/dur_recover.cpp13
-rw-r--r--src/mongo/db/storage/mmap_v1/durable_mapped_file.cpp2
-rw-r--r--src/mongo/db/storage/mmap_v1/durop.cpp2
-rw-r--r--src/mongo/db/storage/mmap_v1/durop.h2
-rw-r--r--src/mongo/db/storage/mmap_v1/extent.cpp10
-rw-r--r--src/mongo/db/storage/mmap_v1/file_allocator.cpp2
-rw-r--r--src/mongo/db/storage/mmap_v1/journal_latency_test_cmd.cpp2
-rw-r--r--src/mongo/db/storage/mmap_v1/logfile.cpp8
-rw-r--r--src/mongo/db/storage/mmap_v1/mmap.cpp4
-rw-r--r--src/mongo/db/storage/mmap_v1/mmap_posix.cpp11
-rw-r--r--src/mongo/db/storage/mmap_v1/mmap_v1_database_catalog_entry.cpp2
-rw-r--r--src/mongo/db/storage/mmap_v1/mmap_v1_database_catalog_entry.h2
-rw-r--r--src/mongo/db/storage/mmap_v1/mmap_v1_engine.cpp21
-rw-r--r--src/mongo/db/storage/mmap_v1/mmap_v1_engine.h2
-rw-r--r--src/mongo/db/storage/mmap_v1/mmap_v1_extent_manager.cpp6
-rw-r--r--src/mongo/db/storage/mmap_v1/mmap_v1_init_test.cpp12
-rw-r--r--src/mongo/db/storage/mmap_v1/record_access_tracker.cpp2
-rw-r--r--src/mongo/db/storage/mmap_v1/record_store_v1_base.cpp25
-rw-r--r--src/mongo/db/storage/mmap_v1/record_store_v1_base.h2
-rw-r--r--src/mongo/db/storage/mmap_v1/record_store_v1_capped.cpp17
-rw-r--r--src/mongo/db/storage/mmap_v1/record_store_v1_capped_test.cpp2
-rw-r--r--src/mongo/db/storage/mmap_v1/record_store_v1_repair_iterator.h2
-rw-r--r--src/mongo/db/storage/mmap_v1/record_store_v1_simple.cpp9
-rw-r--r--src/mongo/db/storage/mmap_v1/record_store_v1_test_help.cpp2
-rw-r--r--src/mongo/db/storage/mmap_v1/repair_database.cpp13
-rw-r--r--src/mongo/db/storage/paths.cpp8
-rw-r--r--src/mongo/db/storage/paths.h4
-rw-r--r--src/mongo/db/storage/sorted_data_interface_test_cursor_end_position.cpp58
-rw-r--r--src/mongo/db/storage/sorted_data_interface_test_cursor_saverestore.cpp33
-rw-r--r--src/mongo/db/storage/sorted_data_interface_test_cursor_seek_exact.cpp19
-rw-r--r--src/mongo/db/storage/sorted_data_interface_test_harness.cpp8
-rw-r--r--src/mongo/db/storage/storage_engine_lock_file_posix.cpp21
-rw-r--r--src/mongo/db/storage/storage_engine_lock_file_test.cpp4
-rw-r--r--src/mongo/db/storage/storage_engine_lock_file_windows.cpp11
-rw-r--r--src/mongo/db/storage/storage_engine_metadata.cpp29
-rw-r--r--src/mongo/db/storage/storage_engine_metadata_test.cpp2
-rw-r--r--src/mongo/db/storage/storage_init.cpp8
-rw-r--r--src/mongo/db/storage/storage_options.cpp3
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_global_options.cpp72
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_index.cpp11
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_index.h2
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_index_test.cpp3
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_init.cpp8
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_init_test.cpp16
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp6
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_parameters.cpp7
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp6
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_record_store_test.cpp4
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_util.cpp33
-rw-r--r--src/mongo/db/ttl.cpp2
-rw-r--r--src/mongo/db/update_index_data.cpp2
-rw-r--r--src/mongo/db/write_concern.cpp8
-rw-r--r--src/mongo/dbtests/basictests.cpp2
-rw-r--r--src/mongo/dbtests/chunktests.cpp5
-rw-r--r--src/mongo/dbtests/commandtests.cpp7
-rw-r--r--src/mongo/dbtests/dbtests.cpp6
-rw-r--r--src/mongo/dbtests/directclienttests.cpp5
-rw-r--r--src/mongo/dbtests/framework.cpp2
-rw-r--r--src/mongo/dbtests/framework_options.cpp28
-rw-r--r--src/mongo/dbtests/framework_options_init.cpp2
-rw-r--r--src/mongo/dbtests/index_access_method_test.cpp4
-rw-r--r--src/mongo/dbtests/indexcatalogtests.cpp3
-rw-r--r--src/mongo/dbtests/indexupdatetests.cpp138
-rw-r--r--src/mongo/dbtests/jsobjtests.cpp326
-rw-r--r--src/mongo/dbtests/jsontests.cpp12
-rw-r--r--src/mongo/dbtests/jstests.cpp3
-rw-r--r--src/mongo/dbtests/mock/mock_remote_db_server.cpp2
-rw-r--r--src/mongo/dbtests/mock/mock_replica_set.h4
-rw-r--r--src/mongo/dbtests/mock_dbclient_conn_test.cpp12
-rw-r--r--src/mongo/dbtests/multikey_paths_test.cpp40
-rw-r--r--src/mongo/dbtests/namespacetests.cpp4
-rw-r--r--src/mongo/dbtests/oplogstarttests.cpp6
-rw-r--r--src/mongo/dbtests/plan_ranking.cpp28
-rw-r--r--src/mongo/dbtests/query_stage_count.cpp11
-rw-r--r--src/mongo/dbtests/query_stage_ensure_sorted.cpp2
-rw-r--r--src/mongo/dbtests/query_stage_subplan.cpp3
-rw-r--r--src/mongo/dbtests/querytests.cpp57
-rw-r--r--src/mongo/dbtests/replica_set_monitor_test.cpp2
-rw-r--r--src/mongo/dbtests/repltests.cpp21
-rw-r--r--src/mongo/dbtests/sort_key_generator_test.cpp2
-rw-r--r--src/mongo/dbtests/threadedtests.cpp2
-rw-r--r--src/mongo/dbtests/updatetests.cpp59
-rw-r--r--src/mongo/dbtests/validate_tests.cpp110
-rw-r--r--src/mongo/executor/async_mock_stream_factory.cpp109
-rw-r--r--src/mongo/executor/async_mock_stream_factory.h2
-rw-r--r--src/mongo/executor/async_stream_test.cpp9
-rw-r--r--src/mongo/executor/connection_pool.cpp79
-rw-r--r--src/mongo/executor/connection_pool.h2
-rw-r--r--src/mongo/executor/connection_pool_asio.cpp16
-rw-r--r--src/mongo/executor/connection_pool_asio.h4
-rw-r--r--src/mongo/executor/connection_pool_test.cpp6
-rw-r--r--src/mongo/executor/connection_pool_test_fixture.cpp12
-rw-r--r--src/mongo/executor/downconvert_find_and_getmore_commands.cpp12
-rw-r--r--src/mongo/executor/network_interface_asio.cpp3
-rw-r--r--src/mongo/executor/network_interface_asio.h2
-rw-r--r--src/mongo/executor/network_interface_asio_auth.cpp7
-rw-r--r--src/mongo/executor/network_interface_asio_command.cpp71
-rw-r--r--src/mongo/executor/network_interface_asio_connect.cpp7
-rw-r--r--src/mongo/executor/network_interface_asio_integration_test.cpp112
-rw-r--r--src/mongo/executor/network_interface_asio_operation.cpp12
-rw-r--r--src/mongo/executor/network_interface_asio_test.cpp169
-rw-r--r--src/mongo/executor/network_interface_asio_test_utils.h33
-rw-r--r--src/mongo/executor/network_interface_mock.cpp44
-rw-r--r--src/mongo/executor/network_interface_mock_test.cpp58
-rw-r--r--src/mongo/executor/network_interface_thread_pool.cpp11
-rw-r--r--src/mongo/executor/network_interface_thread_pool_test.cpp2
-rw-r--r--src/mongo/executor/remote_command_response.h2
-rw-r--r--src/mongo/executor/task_executor.h4
-rw-r--r--src/mongo/executor/task_executor_test_common.cpp50
-rw-r--r--src/mongo/executor/thread_pool_task_executor.cpp32
-rw-r--r--src/mongo/executor/thread_pool_task_executor.h2
-rw-r--r--src/mongo/executor/thread_pool_task_executor_test.cpp25
-rw-r--r--src/mongo/executor/thread_pool_task_executor_test_fixture.h2
-rw-r--r--src/mongo/logger/console_test.cpp2
-rw-r--r--src/mongo/logger/log_component.cpp6
-rw-r--r--src/mongo/logger/log_domain-impl.h3
-rw-r--r--src/mongo/logger/log_test.cpp3
-rw-r--r--src/mongo/logger/logger.h2
-rw-r--r--src/mongo/logger/parse_log_component_settings.cpp18
-rw-r--r--src/mongo/logger/parse_log_component_settings_test.cpp5
-rw-r--r--src/mongo/logger/ramlog.cpp2
-rw-r--r--src/mongo/logger/ramlog.h2
-rw-r--r--src/mongo/logger/rotatable_file_writer.cpp35
-rw-r--r--src/mongo/platform/decimal128.cpp2
-rw-r--r--src/mongo/platform/posix_fadvise.cpp5
-rw-r--r--src/mongo/platform/process_id.cpp2
-rw-r--r--src/mongo/platform/random.cpp10
-rw-r--r--src/mongo/platform/shared_library_posix.cpp3
-rw-r--r--src/mongo/platform/shared_library_windows.cpp12
-rw-r--r--src/mongo/platform/strcasestr.cpp5
-rw-r--r--src/mongo/platform/strnlen.cpp2
-rw-r--r--src/mongo/platform/windows_basic.h2
-rw-r--r--src/mongo/rpc/command_request.cpp17
-rw-r--r--src/mongo/rpc/command_request_test.cpp2
-rw-r--r--src/mongo/rpc/legacy_reply.cpp11
-rw-r--r--src/mongo/rpc/legacy_reply.h2
-rw-r--r--src/mongo/rpc/legacy_request_builder.cpp2
-rw-r--r--src/mongo/rpc/metadata.cpp2
-rw-r--r--src/mongo/rpc/metadata/audit_metadata.h4
-rw-r--r--src/mongo/rpc/metadata/config_server_metadata.cpp4
-rw-r--r--src/mongo/rpc/metadata/repl_set_metadata_test.cpp10
-rw-r--r--src/mongo/rpc/metadata/server_selection_metadata.cpp10
-rw-r--r--src/mongo/rpc/metadata/server_selection_metadata_test.cpp41
-rw-r--r--src/mongo/rpc/metadata/sharding_metadata_test.cpp49
-rw-r--r--src/mongo/rpc/object_check.h2
-rw-r--r--src/mongo/rpc/object_check_test.cpp5
-rw-r--r--src/mongo/rpc/protocol.cpp6
-rw-r--r--src/mongo/rpc/protocol_test.cpp7
-rw-r--r--src/mongo/rpc/reply_builder_test.cpp6
-rw-r--r--src/mongo/s/balancer/balancer.cpp21
-rw-r--r--src/mongo/s/balancer/balancer_chunk_selection_policy_impl.cpp3
-rw-r--r--src/mongo/s/balancer/balancer_configuration.h2
-rw-r--r--src/mongo/s/balancer/balancer_configuration_test.cpp52
-rw-r--r--src/mongo/s/catalog/dist_lock_catalog_mock.cpp37
-rw-r--r--src/mongo/s/catalog/dist_lock_manager_mock.cpp24
-rw-r--r--src/mongo/s/catalog/replset/catalog_manager_replica_set.cpp82
-rw-r--r--src/mongo/s/catalog/replset/catalog_manager_replica_set_add_shard_test.cpp42
-rw-r--r--src/mongo/s/catalog/replset/catalog_manager_replica_set_shard_collection_test.cpp20
-rw-r--r--src/mongo/s/catalog/replset/catalog_manager_replica_set_test.cpp116
-rw-r--r--src/mongo/s/catalog/replset/catalog_manager_replica_set_upgrade_test.cpp7
-rw-r--r--src/mongo/s/catalog/replset/dist_lock_catalog_impl.cpp30
-rw-r--r--src/mongo/s/catalog/replset/dist_lock_catalog_impl_test.cpp79
-rw-r--r--src/mongo/s/catalog/replset/replset_dist_lock_manager.cpp4
-rw-r--r--src/mongo/s/catalog/replset/replset_dist_lock_manager_test.cpp115
-rw-r--r--src/mongo/s/catalog/type_changelog_test.cpp82
-rw-r--r--src/mongo/s/catalog/type_chunk_test.cpp100
-rw-r--r--src/mongo/s/catalog/type_collection_test.cpp37
-rw-r--r--src/mongo/s/catalog/type_config_version_test.cpp8
-rw-r--r--src/mongo/s/catalog/type_locks_test.cpp94
-rw-r--r--src/mongo/s/catalog/type_mongos_test.cpp57
-rw-r--r--src/mongo/s/catalog/type_shard_test.cpp3
-rw-r--r--src/mongo/s/chunk.cpp9
-rw-r--r--src/mongo/s/chunk_diff.h4
-rw-r--r--src/mongo/s/chunk_diff_test.cpp2
-rw-r--r--src/mongo/s/chunk_manager.cpp27
-rw-r--r--src/mongo/s/chunk_manager_targeter.cpp5
-rw-r--r--src/mongo/s/chunk_manager_targeter_test.cpp3
-rw-r--r--src/mongo/s/chunk_version.cpp2
-rw-r--r--src/mongo/s/client/shard_factory.cpp2
-rw-r--r--src/mongo/s/client/shard_factory.h4
-rw-r--r--src/mongo/s/client/shard_local_test.cpp2
-rw-r--r--src/mongo/s/client/shard_registry.cpp2
-rw-r--r--src/mongo/s/client/shard_registry.h2
-rw-r--r--src/mongo/s/client/shard_remote.cpp2
-rw-r--r--src/mongo/s/client/sharding_network_connection_hook.cpp2
-rw-r--r--src/mongo/s/client/version_manager.cpp30
-rw-r--r--src/mongo/s/cluster_write.cpp3
-rw-r--r--src/mongo/s/commands/cluster_count_cmd.cpp2
-rw-r--r--src/mongo/s/commands/cluster_current_op.cpp8
-rw-r--r--src/mongo/s/commands/cluster_explain.cpp6
-rw-r--r--src/mongo/s/commands/cluster_find_and_modify_cmd.cpp8
-rw-r--r--src/mongo/s/commands/cluster_find_cmd.cpp2
-rw-r--r--src/mongo/s/commands/cluster_get_last_error_cmd.cpp2
-rw-r--r--src/mongo/s/commands/cluster_getmore_cmd.cpp4
-rw-r--r--src/mongo/s/commands/cluster_kill_op.cpp4
-rw-r--r--src/mongo/s/commands/cluster_map_reduce_cmd.cpp20
-rw-r--r--src/mongo/s/commands/cluster_move_chunk_cmd.cpp3
-rw-r--r--src/mongo/s/commands/cluster_move_primary_cmd.cpp17
-rw-r--r--src/mongo/s/commands/cluster_pipeline_cmd.cpp18
-rw-r--r--src/mongo/s/commands/cluster_plan_cache_cmd.cpp4
-rw-r--r--src/mongo/s/commands/cluster_remove_shard_cmd.cpp2
-rw-r--r--src/mongo/s/commands/cluster_split_cmd.cpp2
-rw-r--r--src/mongo/s/commands/cluster_user_management_commands.cpp20
-rw-r--r--src/mongo/s/commands/commands_public.cpp23
-rw-r--r--src/mongo/s/commands/request.cpp2
-rw-r--r--src/mongo/s/commands/strategy.cpp18
-rw-r--r--src/mongo/s/commands/strategy.h2
-rw-r--r--src/mongo/s/config.cpp6
-rw-r--r--src/mongo/s/grid.cpp2
-rw-r--r--src/mongo/s/mongos_options.cpp24
-rw-r--r--src/mongo/s/mongos_options_init.cpp2
-rw-r--r--src/mongo/s/ns_targeter.h4
-rw-r--r--src/mongo/s/query/async_results_merger.cpp17
-rw-r--r--src/mongo/s/query/async_results_merger_test.cpp9
-rw-r--r--src/mongo/s/query/cluster_cursor_manager.cpp5
-rw-r--r--src/mongo/s/query/cluster_cursor_manager_test.cpp2
-rw-r--r--src/mongo/s/query/cluster_find.cpp19
-rw-r--r--src/mongo/s/query/router_stage_merge.h4
-rw-r--r--src/mongo/s/request_types/add_shard_request_test.cpp51
-rw-r--r--src/mongo/s/server.cpp18
-rw-r--r--src/mongo/s/set_shard_version_request_test.cpp255
-rw-r--r--src/mongo/s/shard_key_pattern.cpp5
-rw-r--r--src/mongo/s/shard_key_pattern_test.cpp24
-rw-r--r--src/mongo/s/shard_util.cpp2
-rw-r--r--src/mongo/s/sharding_initialization.cpp8
-rw-r--r--src/mongo/s/sharding_test_fixture.cpp5
-rw-r--r--src/mongo/s/sharding_test_fixture.h2
-rw-r--r--src/mongo/s/stale_exception.h29
-rw-r--r--src/mongo/s/write_ops/batch_downconvert_test.cpp9
-rw-r--r--src/mongo/s/write_ops/batch_write_exec_test.cpp2
-rw-r--r--src/mongo/s/write_ops/batch_write_op.cpp2
-rw-r--r--src/mongo/s/write_ops/batch_write_op_test.cpp5
-rw-r--r--src/mongo/s/write_ops/batched_command_request.h2
-rw-r--r--src/mongo/s/write_ops/batched_command_request_test.cpp21
-rw-r--r--src/mongo/s/write_ops/batched_command_response.h2
-rw-r--r--src/mongo/s/write_ops/batched_command_response_test.cpp17
-rw-r--r--src/mongo/s/write_ops/batched_delete_request_test.cpp8
-rw-r--r--src/mongo/s/write_ops/batched_insert_request.cpp4
-rw-r--r--src/mongo/s/write_ops/batched_insert_request_test.cpp8
-rw-r--r--src/mongo/s/write_ops/batched_update_request_test.cpp24
-rw-r--r--src/mongo/s/write_ops/write_op.h2
-rw-r--r--src/mongo/scripting/bson_template_evaluator.h5
-rw-r--r--src/mongo/scripting/bson_template_evaluator_test.cpp50
-rw-r--r--src/mongo/scripting/engine.cpp4
-rw-r--r--src/mongo/scripting/engine.h2
-rw-r--r--src/mongo/scripting/mozjs/bindata.cpp2
-rw-r--r--src/mongo/scripting/mozjs/db.cpp2
-rw-r--r--src/mongo/scripting/mozjs/dbcollection.cpp2
-rw-r--r--src/mongo/scripting/mozjs/implscope.cpp2
-rw-r--r--src/mongo/scripting/mozjs/implscope.h2
-rw-r--r--src/mongo/scripting/mozjs/jscustomallocator.cpp15
-rw-r--r--src/mongo/scripting/mozjs/jsthread.cpp2
-rw-r--r--src/mongo/scripting/mozjs/maxkey.cpp2
-rw-r--r--src/mongo/scripting/mozjs/minkey.cpp2
-rw-r--r--src/mongo/scripting/mozjs/mongo.cpp15
-rw-r--r--src/mongo/scripting/mozjs/objectwrapper.cpp7
-rw-r--r--src/mongo/scripting/mozjs/proxyscope.cpp8
-rw-r--r--src/mongo/scripting/mozjs/timestamp.cpp4
-rw-r--r--src/mongo/scripting/mozjs/wrapconstrainedmethod.h9
-rw-r--r--src/mongo/shell/bench.cpp33
-rw-r--r--src/mongo/shell/bridge.js95
-rw-r--r--src/mongo/shell/bulk_api.js160
-rw-r--r--src/mongo/shell/collection.js108
-rw-r--r--src/mongo/shell/crud_api.js51
-rw-r--r--src/mongo/shell/db.js75
-rw-r--r--src/mongo/shell/dbshell.cpp3
-rw-r--r--src/mongo/shell/explain_query.js4
-rw-r--r--src/mongo/shell/explainable.js19
-rw-r--r--src/mongo/shell/linenoise.cpp16
-rw-r--r--src/mongo/shell/linenoise_utf8.cpp2
-rw-r--r--src/mongo/shell/mk_wcwidth.cpp375
-rw-r--r--src/mongo/shell/mongo.js4
-rw-r--r--src/mongo/shell/mr.js6
-rw-r--r--src/mongo/shell/query.js21
-rw-r--r--src/mongo/shell/replsettest.js61
-rw-r--r--src/mongo/shell/servers.js36
-rw-r--r--src/mongo/shell/shardingtest.js139
-rw-r--r--src/mongo/shell/shell_options.cpp93
-rw-r--r--src/mongo/shell/shell_utils.cpp4
-rw-r--r--src/mongo/shell/shell_utils_launcher.cpp21
-rw-r--r--src/mongo/shell/shell_utils_launcher.h2
-rw-r--r--src/mongo/shell/types.js5
-rw-r--r--src/mongo/shell/utils.js102
-rw-r--r--src/mongo/shell/utils_auth.js12
-rw-r--r--src/mongo/shell/utils_sh.js150
-rw-r--r--src/mongo/tools/bridge.cpp6
-rw-r--r--src/mongo/tools/bridge_commands.cpp2
-rw-r--r--src/mongo/tools/mongobridge_options.cpp3
-rw-r--r--src/mongo/tools/mongobridge_options_init.cpp2
-rw-r--r--src/mongo/tools/sniffer.cpp5
-rw-r--r--src/mongo/unittest/death_test.h61
-rw-r--r--src/mongo/unittest/temp_dir.cpp4
-rw-r--r--src/mongo/unittest/temp_dir_test.cpp2
-rw-r--r--src/mongo/unittest/unittest.cpp4
-rw-r--r--src/mongo/unittest/unittest.h38
-rw-r--r--src/mongo/unittest/unittest_test.cpp2
-rw-r--r--src/mongo/util/admin_access.h2
-rw-r--r--src/mongo/util/assert_util.cpp11
-rw-r--r--src/mongo/util/assert_util.h2
-rw-r--r--src/mongo/util/background_thread_clock_source.h4
-rw-r--r--src/mongo/util/base64.h2
-rw-r--r--src/mongo/util/cmdline_utils/censor_cmdline_test.cpp93
-rw-r--r--src/mongo/util/concurrency/rwlockimpl.cpp2
-rw-r--r--src/mongo/util/concurrency/thread_pool.cpp32
-rw-r--r--src/mongo/util/concurrency/thread_pool_test_common.cpp63
-rw-r--r--src/mongo/util/concurrency/ticketholder.cpp3
-rw-r--r--src/mongo/util/debugger.cpp2
-rw-r--r--src/mongo/util/descriptive_stats-inl.h3
-rw-r--r--src/mongo/util/duration.h3
-rw-r--r--src/mongo/util/duration_test.cpp2
-rw-r--r--src/mongo/util/exception_filter_win32.cpp2
-rw-r--r--src/mongo/util/fail_point_service.cpp5
-rw-r--r--src/mongo/util/file.cpp24
-rw-r--r--src/mongo/util/intrusive_counter.cpp3
-rw-r--r--src/mongo/util/intrusive_counter.h2
-rw-r--r--src/mongo/util/itoa_test.cpp2
-rw-r--r--src/mongo/util/log.cpp2
-rw-r--r--src/mongo/util/mongoutils/str.h2
-rw-r--r--src/mongo/util/net/asio_message_port.cpp4
-rw-r--r--src/mongo/util/net/hostandport.cpp11
-rw-r--r--src/mongo/util/net/hostname_canonicalization.cpp4
-rw-r--r--src/mongo/util/net/listen.cpp16
-rw-r--r--src/mongo/util/net/message.cpp2
-rw-r--r--src/mongo/util/net/message_port.cpp4
-rw-r--r--src/mongo/util/net/sock.cpp12
-rw-r--r--src/mongo/util/net/sock.h2
-rw-r--r--src/mongo/util/net/sockaddr.cpp10
-rw-r--r--src/mongo/util/net/sockaddr.h2
-rw-r--r--src/mongo/util/net/ssl_manager.cpp25
-rw-r--r--src/mongo/util/net/ssl_options.cpp80
-rw-r--r--src/mongo/util/ntservice.cpp12
-rw-r--r--src/mongo/util/options_parser/environment_test.cpp13
-rw-r--r--src/mongo/util/options_parser/options_parser.cpp4
-rw-r--r--src/mongo/util/options_parser/options_parser_test.cpp24
-rw-r--r--src/mongo/util/platform_init.cpp4
-rw-r--r--src/mongo/util/processinfo.cpp7
-rw-r--r--src/mongo/util/processinfo_freebsd.cpp2
-rw-r--r--src/mongo/util/processinfo_linux.cpp8
-rw-r--r--src/mongo/util/processinfo_openbsd.cpp2
-rw-r--r--src/mongo/util/processinfo_osx.cpp12
-rw-r--r--src/mongo/util/processinfo_solaris.cpp16
-rw-r--r--src/mongo/util/processinfo_test.cpp2
-rw-r--r--src/mongo/util/processinfo_windows.cpp2
-rw-r--r--src/mongo/util/progress_meter_test.cpp2
-rw-r--r--src/mongo/util/safe_num-inl.h10
-rw-r--r--src/mongo/util/safe_num_test.cpp4
-rw-r--r--src/mongo/util/signal_handlers_synchronous.cpp2
-rw-r--r--src/mongo/util/signal_handlers_synchronous_test.cpp4
-rw-r--r--src/mongo/util/signal_win32.cpp2
-rw-r--r--src/mongo/util/string_map_test.cpp2
-rw-r--r--src/mongo/util/stringutils_test.cpp2
-rw-r--r--src/mongo/util/tcmalloc_set_parameter.cpp15
-rw-r--r--src/mongo/util/text.cpp3
-rw-r--r--src/mongo/util/text.h2
-rw-r--r--src/mongo/util/time_support.cpp10
-rw-r--r--src/mongo/util/time_support.h2
-rw-r--r--src/mongo/util/unowned_ptr.h12
-rw-r--r--src/mongo/util/winutil.h2
1523 files changed, 21580 insertions, 19327 deletions
diff --git a/jstests/aggregation/bugs/cond.js b/jstests/aggregation/bugs/cond.js
index 2b4fa8ff16e..c48c6b724f0 100644
--- a/jstests/aggregation/bugs/cond.js
+++ b/jstests/aggregation/bugs/cond.js
@@ -68,16 +68,15 @@ t.save({noonSense: 'pm', mealCombined: 'yes'});
t.save({noonSense: 'pm', mealCombined: 'no'});
assert.eq(['breakfast', 'brunch', 'linner', 'dinner'],
t.aggregate({
- $project: {
- a: {
- $cond: [
- {$eq: ['$noonSense', 'am']},
- {$cond: [{$eq: ['$mealCombined', 'yes']}, 'brunch', 'breakfast']},
- {$cond: [{$eq: ['$mealCombined', 'yes']}, 'linner', 'dinner']}
- ]
- }
- }
- })
- .map(function(x) {
- return x.a;
- }));
+ $project: {
+ a: {
+ $cond: [
+ {$eq: ['$noonSense', 'am']},
+ {$cond: [{$eq: ['$mealCombined', 'yes']}, 'brunch', 'breakfast']},
+ {$cond: [{$eq: ['$mealCombined', 'yes']}, 'linner', 'dinner']}
+ ]
+ }
+ }
+ }).map(function(x) {
+ return x.a;
+ }));
diff --git a/jstests/aggregation/bugs/firstlast.js b/jstests/aggregation/bugs/firstlast.js
index ca9e963f6ca..54f0f8be0e9 100644
--- a/jstests/aggregation/bugs/firstlast.js
+++ b/jstests/aggregation/bugs/firstlast.js
@@ -104,17 +104,5 @@ assertFirstLast([1, 2], [6], [], '$b.c');
t.drop();
t.save({a: 1, b: 1});
t.save({a: 1, b: 2});
-assertFirstLast(1,
- 0,
- [],
- {
-$mod:
- ['$b', 2]
- });
-assertFirstLast(0,
- 1,
- [],
- {
-$mod:
- [{$add: ['$b', 1]}, 2]
- });
+assertFirstLast(1, 0, [], {$mod: ['$b', 2]});
+assertFirstLast(0, 1, [], {$mod: [{$add: ['$b', 1]}, 2]});
diff --git a/jstests/aggregation/bugs/match.js b/jstests/aggregation/bugs/match.js
index fbc467812d7..2ee646dac7e 100644
--- a/jstests/aggregation/bugs/match.js
+++ b/jstests/aggregation/bugs/match.js
@@ -13,9 +13,7 @@ identityProjection = {
/** Assert that an aggregation generated the expected error. */
function assertError(expectedCode, matchSpec) {
- matchStage = {
- $match: matchSpec
- };
+ matchStage = {$match: matchSpec};
// Check where matching is folded in to DocumentSourceCursor.
assertErrorCode(t, [matchStage], expectedCode);
// Check where matching is not folded in to DocumentSourceCursor.
@@ -41,9 +39,7 @@ function assertResults(expectedResults, matchSpec) {
if (expectedResults) {
assertEqualResultsUnordered(expectedResults, findResults);
}
- matchStage = {
- $match: matchSpec
- };
+ matchStage = {$match: matchSpec};
// Check where matching is folded in to DocumentSourceCursor.
assertEqualResultsUnordered(findResults, t.aggregate(matchStage).toArray());
// Check where matching is not folded in to DocumentSourceCursor.
@@ -176,11 +172,7 @@ function checkMatchResults(indexed) {
// $and
assertResults([{_id: 1, a: 2}], {$and: [{a: 2}, {_id: 1}]});
- assertResults([],
- {
- $and:
- [{a: 1}, {_id: 1}]
- });
+ assertResults([], {$and: [{a: 1}, {_id: 1}]});
assertResults([{_id: 1, a: 2}, {_id: 2, a: 3}],
{$and: [{$or: [{_id: 1}, {a: 3}]}, {$or: [{_id: 2}, {a: 2}]}]});
diff --git a/jstests/aggregation/bugs/server10176.js b/jstests/aggregation/bugs/server10176.js
index 5a56585265d..b04db0c4319 100644
--- a/jstests/aggregation/bugs/server10176.js
+++ b/jstests/aggregation/bugs/server10176.js
@@ -33,28 +33,27 @@ load('jstests/aggregation/extras/utils.js');
// valid use of $abs: numbers become positive, null/undefined/nonexistent become null
var results = coll.aggregate([{$project: {a: {$abs: "$a"}}}]).toArray();
- assert.eq(results,
- [
- {_id: 0, a: 5},
- {_id: 1, a: 5},
- {_id: 2, a: 5.5},
- {_id: 3, a: 5.5},
- {_id: 4, a: 5},
- {_id: 5, a: 5},
- {_id: 6, a: NumberLong("5")},
- {_id: 7, a: NumberLong("5")},
- {_id: 8, a: 0},
- {_id: 9, a: 0},
- {_id: 10, a: 0},
- {_id: 11, a: NumberLong(Math.pow(2, 31))},
- {_id: 12, a: Math.pow(2, 31)},
- {_id: 13, a: NumberLong("1152921504606846977")},
- {_id: 14, a: NumberLong("1152921504606846977")},
- {_id: 15, a: null},
- {_id: 16, a: null},
- {_id: 17, a: NaN},
- {_id: 18, a: null},
- ]);
+ assert.eq(results, [
+ {_id: 0, a: 5},
+ {_id: 1, a: 5},
+ {_id: 2, a: 5.5},
+ {_id: 3, a: 5.5},
+ {_id: 4, a: 5},
+ {_id: 5, a: 5},
+ {_id: 6, a: NumberLong("5")},
+ {_id: 7, a: NumberLong("5")},
+ {_id: 8, a: 0},
+ {_id: 9, a: 0},
+ {_id: 10, a: 0},
+ {_id: 11, a: NumberLong(Math.pow(2, 31))},
+ {_id: 12, a: Math.pow(2, 31)},
+ {_id: 13, a: NumberLong("1152921504606846977")},
+ {_id: 14, a: NumberLong("1152921504606846977")},
+ {_id: 15, a: null},
+ {_id: 16, a: null},
+ {_id: 17, a: NaN},
+ {_id: 18, a: null},
+ ]);
// Invalid
// using $abs on string
diff --git a/jstests/aggregation/bugs/server11118.js b/jstests/aggregation/bugs/server11118.js
index da4e9862bad..3d2813ed8ae 100644
--- a/jstests/aggregation/bugs/server11118.js
+++ b/jstests/aggregation/bugs/server11118.js
@@ -9,9 +9,13 @@ function testFormat(date, formatStr, expectedStr) {
db.dates.drop();
db.dates.insert({date: date});
- var res = db.dates.aggregate([{
- $project: {_id: 0, formatted: {$dateToString: {format: formatStr, date: "$date"}}}
- }]).toArray();
+ var res =
+ db.dates
+ .aggregate([{
+ $project:
+ {_id: 0, formatted: {$dateToString: {format: formatStr, date: "$date"}}}
+ }])
+ .toArray();
assert.eq(res[0].formatted, expectedStr);
}
@@ -36,18 +40,16 @@ function testDateValueError(dateVal, errCode) {
var now = ISODate();
// Use all modifiers we can test with js provided function
-testFormat(now,
- "%%-%Y-%m-%d-%H-%M-%S-%L",
- [
- "%",
- now.getUTCFullYear().zeroPad(4),
- (now.getUTCMonth() + 1).zeroPad(2),
- now.getUTCDate().zeroPad(2),
- now.getUTCHours().zeroPad(2),
- now.getUTCMinutes().zeroPad(2),
- now.getUTCSeconds().zeroPad(2),
- now.getUTCMilliseconds().zeroPad(3)
- ].join("-"));
+testFormat(now, "%%-%Y-%m-%d-%H-%M-%S-%L", [
+ "%",
+ now.getUTCFullYear().zeroPad(4),
+ (now.getUTCMonth() + 1).zeroPad(2),
+ now.getUTCDate().zeroPad(2),
+ now.getUTCHours().zeroPad(2),
+ now.getUTCMinutes().zeroPad(2),
+ now.getUTCSeconds().zeroPad(2),
+ now.getUTCMilliseconds().zeroPad(3)
+].join("-"));
// Padding tests
var padme = ISODate("2001-02-03T04:05:06.007Z");
@@ -62,20 +64,18 @@ testFormat(padme, "%S", padme.getUTCSeconds().zeroPad(2));
testFormat(padme, "%L", padme.getUTCMilliseconds().zeroPad(3));
// no space and multiple characters between modifiers
-testFormat(now,
- "%d%d***%d***%d**%d*%d",
- [
- now.getUTCDate().zeroPad(2),
- now.getUTCDate().zeroPad(2),
- "***",
- now.getUTCDate().zeroPad(2),
- "***",
- now.getUTCDate().zeroPad(2),
- "**",
- now.getUTCDate().zeroPad(2),
- "*",
- now.getUTCDate().zeroPad(2)
- ].join(""));
+testFormat(now, "%d%d***%d***%d**%d*%d", [
+ now.getUTCDate().zeroPad(2),
+ now.getUTCDate().zeroPad(2),
+ "***",
+ now.getUTCDate().zeroPad(2),
+ "***",
+ now.getUTCDate().zeroPad(2),
+ "**",
+ now.getUTCDate().zeroPad(2),
+ "*",
+ now.getUTCDate().zeroPad(2)
+].join(""));
// JS doesn't have equivalents of these format specifiers
testFormat(ISODate('1999-01-02 03:04:05.006Z'), "%U-%w-%j", "00-7-002");
diff --git a/jstests/aggregation/bugs/server11675.js b/jstests/aggregation/bugs/server11675.js
index 709120c27ca..a38570dc8cd 100644
--- a/jstests/aggregation/bugs/server11675.js
+++ b/jstests/aggregation/bugs/server11675.js
@@ -88,57 +88,57 @@ var server11675 = function() {
return obj;
});
var res = t.aggregate([
- {$match: {$text: {$search: 'apple banana'}}},
- {$sort: {textScore: {$meta: 'textScore'}}}
- ]).toArray();
+ {$match: {$text: {$search: 'apple banana'}}},
+ {$sort: {textScore: {$meta: 'textScore'}}}
+ ]).toArray();
assert.eq(res, findRes);
// Make sure {$meta: 'textScore'} can be used as a sub-expression
var res = t.aggregate([
- {$match: {_id: 1, $text: {$search: 'apple'}}},
- {
- $project: {
- words: 1,
- score: {$meta: 'textScore'},
- wordsTimesScore: {$multiply: ['$words', {$meta: 'textScore'}]}
- }
- }
- ]).toArray();
+ {$match: {_id: 1, $text: {$search: 'apple'}}},
+ {
+ $project: {
+ words: 1,
+ score: {$meta: 'textScore'},
+ wordsTimesScore: {$multiply: ['$words', {$meta: 'textScore'}]}
+ }
+ }
+ ]).toArray();
assert.eq(res[0].wordsTimesScore, res[0].words * res[0].score, tojson(res));
// And can be used in $group
var res = t.aggregate([
- {$match: {_id: 1, $text: {$search: 'apple banana'}}},
- {$group: {_id: {$meta: 'textScore'}, score: {$first: {$meta: 'textScore'}}}}
- ]).toArray();
+ {$match: {_id: 1, $text: {$search: 'apple banana'}}},
+ {$group: {_id: {$meta: 'textScore'}, score: {$first: {$meta: 'textScore'}}}}
+ ]).toArray();
assert.eq(res[0]._id, res[0].score, tojson(res));
// Make sure metadata crosses shard -> merger boundary
var res = t.aggregate([
- {$match: {_id: 1, $text: {$search: 'apple'}}},
- {$project: {scoreOnShard: {$meta: 'textScore'}}},
- {$limit: 1} // force a split. later stages run on merger
- ,
- {$project: {scoreOnShard: 1, scoreOnMerger: {$meta: 'textScore'}}}
- ]).toArray();
+ {$match: {_id: 1, $text: {$search: 'apple'}}},
+ {$project: {scoreOnShard: {$meta: 'textScore'}}},
+ {$limit: 1} // force a split. later stages run on merger
+ ,
+ {$project: {scoreOnShard: 1, scoreOnMerger: {$meta: 'textScore'}}}
+ ]).toArray();
assert.eq(res[0].scoreOnMerger, res[0].scoreOnShard);
var score = res[0].scoreOnMerger; // save for later tests
// Make sure metadata crosses shard -> merger boundary even if not used on shard
var res = t.aggregate([
- {$match: {_id: 1, $text: {$search: 'apple'}}},
- {$limit: 1} // force a split. later stages run on merger
- ,
- {$project: {scoreOnShard: 1, scoreOnMerger: {$meta: 'textScore'}}}
- ]).toArray();
+ {$match: {_id: 1, $text: {$search: 'apple'}}},
+ {$limit: 1} // force a split. later stages run on merger
+ ,
+ {$project: {scoreOnShard: 1, scoreOnMerger: {$meta: 'textScore'}}}
+ ]).toArray();
assert.eq(res[0].scoreOnMerger, score);
// Make sure metadata works if first $project doesn't use it.
var res = t.aggregate([
- {$match: {_id: 1, $text: {$search: 'apple'}}},
- {$project: {_id: 1}},
- {$project: {_id: 1, score: {$meta: 'textScore'}}}
- ]).toArray();
+ {$match: {_id: 1, $text: {$search: 'apple'}}},
+ {$project: {_id: 1}},
+ {$project: {_id: 1, score: {$meta: 'textScore'}}}
+ ]).toArray();
assert.eq(res[0].score, score);
// Make sure the metadata is 'missing()' when it doesn't exist because it was never created
@@ -147,20 +147,20 @@ var server11675 = function() {
// Make sure the metadata is 'missing()' when it doesn't exist because the document changed
var res = t.aggregate([
- {$match: {_id: 1, $text: {$search: 'apple banana'}}},
- {$group: {_id: 1, score: {$first: {$meta: 'textScore'}}}},
- {$project: {_id: 1, scoreAgain: {$meta: 'textScore'}}},
- ]).toArray();
+ {$match: {_id: 1, $text: {$search: 'apple banana'}}},
+ {$group: {_id: 1, score: {$first: {$meta: 'textScore'}}}},
+ {$project: {_id: 1, scoreAgain: {$meta: 'textScore'}}},
+ ]).toArray();
assert(!("scoreAgain" in res[0]));
// Make sure metadata works after a $unwind
t.insert({_id: 5, text: 'mango', words: [1, 2, 3]});
var res = t.aggregate([
- {$match: {$text: {$search: 'mango'}}},
- {$project: {score: {$meta: "textScore"}, _id: 1, words: 1}},
- {$unwind: '$words'},
- {$project: {scoreAgain: {$meta: "textScore"}, score: 1}}
- ]).toArray();
+ {$match: {$text: {$search: 'mango'}}},
+ {$project: {score: {$meta: "textScore"}, _id: 1, words: 1}},
+ {$unwind: '$words'},
+ {$project: {scoreAgain: {$meta: "textScore"}, score: 1}}
+ ]).toArray();
assert.eq(res[0].scoreAgain, res[0].score);
// Error checking
diff --git a/jstests/aggregation/bugs/server12015.js b/jstests/aggregation/bugs/server12015.js
index af4ee75f92d..c237e4f6f90 100644
--- a/jstests/aggregation/bugs/server12015.js
+++ b/jstests/aggregation/bugs/server12015.js
@@ -12,10 +12,7 @@ load("jstests/aggregation/extras/utils.js"); // For orderedArrayEq.
"use strict";
var coll = db.server12015;
coll.drop();
- var indexSpec = {
- a: 1,
- b: 1
- };
+ var indexSpec = {a: 1, b: 1};
assert.writeOK(coll.insert({_id: 0, a: 0, b: 0}));
assert.writeOK(coll.insert({_id: 1, a: 0, b: 1}));
diff --git a/jstests/aggregation/bugs/server14670.js b/jstests/aggregation/bugs/server14670.js
index 92c6e98e8e1..dc8a750e9db 100644
--- a/jstests/aggregation/bugs/server14670.js
+++ b/jstests/aggregation/bugs/server14670.js
@@ -12,12 +12,12 @@ load("jstests/aggregation/extras/utils.js"); // For assertErrorCode.
coll.insert({});
assertErrorCode(coll,
- [{$project: {strLen: {$strLenBytes: 1}}}],
+ [{$project: {strLen: {$strLenBytes: 1}}}],
34473,
"$strLenBytes requires a string argument.");
assertErrorCode(coll,
- [{$project: {strLen: {$strLenCP: 1}}}],
+ [{$project: {strLen: {$strLenCP: 1}}}],
34471,
"$strLenCP requires a string argument.");
}());
diff --git a/jstests/aggregation/bugs/server17943.js b/jstests/aggregation/bugs/server17943.js
index 10dbac2c37a..7036a16b940 100644
--- a/jstests/aggregation/bugs/server17943.js
+++ b/jstests/aggregation/bugs/server17943.js
@@ -18,11 +18,7 @@ load('jstests/aggregation/extras/utils.js');
assert.writeOK(coll.insert({_id: 6}));
// Create filter to only accept odd numbers.
- filterDoc = {
- input: '$a',
- as: 'x',
- cond: {$eq: [1, {$mod: ['$$x', 2]}]}
- };
+ filterDoc = {input: '$a', as: 'x', cond: {$eq: [1, {$mod: ['$$x', 2]}]}};
var expectedResults = [
{_id: 0, b: [1, 3, 5]},
{_id: 1, b: []},
@@ -45,57 +41,31 @@ load('jstests/aggregation/extras/utils.js');
assertErrorCode(coll, [{$project: {b: {$filter: filterDoc}}}], 28646);
// Extra field(s).
- filterDoc = {
- input: '$a',
- as: 'x',
- cond: true,
- extra: 1
- };
+ filterDoc = {input: '$a', as: 'x', cond: true, extra: 1};
assertErrorCode(coll, [{$project: {b: {$filter: filterDoc}}}], 28647);
// Missing 'input'.
- filterDoc = {
- as: 'x',
- cond: true
- };
+ filterDoc = {as: 'x', cond: true};
assertErrorCode(coll, [{$project: {b: {$filter: filterDoc}}}], 28648);
// Missing 'as'.
- filterDoc = {
- input: '$a',
- cond: true
- };
+ filterDoc = {input: '$a', cond: true};
assertErrorCode(coll, [{$project: {b: {$filter: filterDoc}}}], 28649);
// Missing 'cond'.
- filterDoc = {
- input: '$a',
- as: 'x'
- };
+ filterDoc = {input: '$a', as: 'x'};
assertErrorCode(coll, [{$project: {b: {$filter: filterDoc}}}], 28650);
// 'as' is not a valid variable name.
- filterDoc = {
- input: '$a',
- as: '$x',
- cond: true
- };
+ filterDoc = {input: '$a', as: '$x', cond: true};
assertErrorCode(coll, [{$project: {b: {$filter: filterDoc}}}], 16867);
// 'input' is not an array.
- filterDoc = {
- input: 'string',
- as: 'x',
- cond: true
- };
+ filterDoc = {input: 'string', as: 'x', cond: true};
assertErrorCode(coll, [{$project: {b: {$filter: filterDoc}}}], 28651);
coll.drop();
assert.writeOK(coll.insert({a: 'string'}));
- filterDoc = {
- input: '$a',
- as: 'x',
- cond: true
- };
+ filterDoc = {input: '$a', as: 'x', cond: true};
assertErrorCode(coll, [{$project: {b: {$filter: filterDoc}}}], 28651);
}());
diff --git a/jstests/aggregation/bugs/server18198.js b/jstests/aggregation/bugs/server18198.js
index 39cb37074e5..19b355f4953 100644
--- a/jstests/aggregation/bugs/server18198.js
+++ b/jstests/aggregation/bugs/server18198.js
@@ -16,14 +16,10 @@
},
runCommand: function(db, cmd, opts) {
commandsRan.push({db: db, cmd: cmd, opts: opts});
- return {
- ok: 1.0
- };
+ return {ok: 1.0};
},
getReadPref: function() {
- return {
- mode: "secondaryPreferred"
- };
+ return {mode: "secondaryPreferred"};
},
getReadPrefMode: function() {
return "secondaryPreferred";
diff --git a/jstests/aggregation/bugs/server18222.js b/jstests/aggregation/bugs/server18222.js
index 1a46ff349c8..cea52b3970d 100644
--- a/jstests/aggregation/bugs/server18222.js
+++ b/jstests/aggregation/bugs/server18222.js
@@ -20,8 +20,11 @@
assert.writeOK(coll.insert({_id: 10, x: ['0']}));
// Project field is_array to represent whether the field x was an array.
- var results =
- coll.aggregate([{$sort: {_id: 1}}, {$project: {isArray: {$isArray: '$x'}}}, ]).toArray();
+ var results = coll.aggregate([
+ {$sort: {_id: 1}},
+ {$project: {isArray: {$isArray: '$x'}}},
+ ])
+ .toArray();
var expectedResults = [
{_id: 0, isArray: false},
{_id: 1, isArray: false},
diff --git a/jstests/aggregation/bugs/server19095.js b/jstests/aggregation/bugs/server19095.js
index 30d2610aad9..875a7d16a9d 100644
--- a/jstests/aggregation/bugs/server19095.js
+++ b/jstests/aggregation/bugs/server19095.js
@@ -55,11 +55,10 @@ load("jstests/aggregation/extras/utils.js");
{_id: 1, a: null, "same": [{_id: 1, b: null}, {_id: 2}]},
{_id: 2, "same": [{_id: 1, b: null}, {_id: 2}]}
];
- testPipeline([{
- $lookup: {localField: "nonexistent", foreignField: "b", from: "from", as: "same"}
- }],
- expectedResults,
- coll);
+ testPipeline(
+ [{$lookup: {localField: "nonexistent", foreignField: "b", from: "from", as: "same"}}],
+ expectedResults,
+ coll);
// If foreignField is nonexistent, it is treated as if it is null.
expectedResults = [
@@ -67,25 +66,22 @@ load("jstests/aggregation/extras/utils.js");
{_id: 1, a: null, "same": [{_id: 0, b: 1}, {_id: 1, b: null}, {_id: 2}]},
{_id: 2, "same": [{_id: 0, b: 1}, {_id: 1, b: null}, {_id: 2}]}
];
- testPipeline([{
- $lookup: {localField: "a", foreignField: "nonexistent", from: "from", as: "same"}
- }],
- expectedResults,
- coll);
+ testPipeline(
+ [{$lookup: {localField: "a", foreignField: "nonexistent", from: "from", as: "same"}}],
+ expectedResults,
+ coll);
// If there are no matches or the from coll doesn't exist, the result is an empty array.
expectedResults =
[{_id: 0, a: 1, "same": []}, {_id: 1, a: null, "same": []}, {_id: 2, "same": []}];
- testPipeline([{
- $lookup: {localField: "_id", foreignField: "nonexistent", from: "from", as: "same"}
- }],
- expectedResults,
- coll);
- testPipeline([{
- $lookup: {localField: "a", foreignField: "b", from: "nonexistent", as: "same"}
- }],
- expectedResults,
- coll);
+ testPipeline(
+ [{$lookup: {localField: "_id", foreignField: "nonexistent", from: "from", as: "same"}}],
+ expectedResults,
+ coll);
+ testPipeline(
+ [{$lookup: {localField: "a", foreignField: "b", from: "nonexistent", as: "same"}}],
+ expectedResults,
+ coll);
// If field name specified by "as" already exists, it is overwritten.
expectedResults = [
@@ -106,13 +102,14 @@ load("jstests/aggregation/extras/utils.js");
},
{_id: 2, "c": [{_id: 1, b: null}, {_id: 2}], "d": [{_id: 1, b: null}, {_id: 2}]}
];
- testPipeline([
- {$lookup: {localField: "a", foreignField: "b", from: "from", as: "c"}},
- {$project: {"a": 1, "c": 1}},
- {$lookup: {localField: "a", foreignField: "b", from: "from", as: "d"}}
- ],
- expectedResults,
- coll);
+ testPipeline(
+ [
+ {$lookup: {localField: "a", foreignField: "b", from: "from", as: "c"}},
+ {$project: {"a": 1, "c": 1}},
+ {$lookup: {localField: "a", foreignField: "b", from: "from", as: "d"}}
+ ],
+ expectedResults,
+ coll);
//
// Coalescing with $unwind.
@@ -126,12 +123,13 @@ load("jstests/aggregation/extras/utils.js");
{_id: 2, same: {_id: 1, b: null}},
{_id: 2, same: {_id: 2}}
];
- testPipeline([
- {$lookup: {localField: "a", foreignField: "b", from: "from", as: "same"}},
- {$unwind: {path: "$same"}}
- ],
- expectedResults,
- coll);
+ testPipeline(
+ [
+ {$lookup: {localField: "a", foreignField: "b", from: "from", as: "same"}},
+ {$unwind: {path: "$same"}}
+ ],
+ expectedResults,
+ coll);
// An $unwind on the "as" field, with includeArrayIndex.
expectedResults = [
@@ -141,39 +139,51 @@ load("jstests/aggregation/extras/utils.js");
{_id: 2, same: {_id: 1, b: null}, index: NumberLong(0)},
{_id: 2, same: {_id: 2}, index: NumberLong(1)},
];
- testPipeline([
- {$lookup: {localField: "a", foreignField: "b", from: "from", as: "same"}},
- {$unwind: {path: "$same", includeArrayIndex: "index"}}
- ],
- expectedResults,
- coll);
+ testPipeline(
+ [
+ {$lookup: {localField: "a", foreignField: "b", from: "from", as: "same"}},
+ {$unwind: {path: "$same", includeArrayIndex: "index"}}
+ ],
+ expectedResults,
+ coll);
// Normal $unwind with no matching documents.
expectedResults = [];
- testPipeline([
- {$lookup: {localField: "_id", foreignField: "nonexistent", from: "from", as: "same"}},
- {$unwind: {path: "$same"}}
- ],
- expectedResults,
- coll);
+ testPipeline(
+ [
+ {$lookup: {localField: "_id", foreignField: "nonexistent", from: "from", as: "same"}},
+ {$unwind: {path: "$same"}}
+ ],
+ expectedResults,
+ coll);
// $unwind with preserveNullAndEmptyArray with no matching documents.
- expectedResults = [{_id: 0, a: 1}, {_id: 1, a: null}, {_id: 2}, ];
- testPipeline([
- {$lookup: {localField: "_id", foreignField: "nonexistent", from: "from", as: "same"}},
- {$unwind: {path: "$same", preserveNullAndEmptyArrays: true}}
- ],
- expectedResults,
- coll);
+ expectedResults = [
+ {_id: 0, a: 1},
+ {_id: 1, a: null},
+ {_id: 2},
+ ];
+ testPipeline(
+ [
+ {$lookup: {localField: "_id", foreignField: "nonexistent", from: "from", as: "same"}},
+ {$unwind: {path: "$same", preserveNullAndEmptyArrays: true}}
+ ],
+ expectedResults,
+ coll);
// $unwind with preserveNullAndEmptyArray, some with matching documents, some without.
- expectedResults = [{_id: 0, a: 1}, {_id: 1, a: null, same: {_id: 0, b: 1}}, {_id: 2}, ];
- testPipeline([
- {$lookup: {localField: "_id", foreignField: "b", from: "from", as: "same"}},
- {$unwind: {path: "$same", preserveNullAndEmptyArrays: true}}
- ],
- expectedResults,
- coll);
+ expectedResults = [
+ {_id: 0, a: 1},
+ {_id: 1, a: null, same: {_id: 0, b: 1}},
+ {_id: 2},
+ ];
+ testPipeline(
+ [
+ {$lookup: {localField: "_id", foreignField: "b", from: "from", as: "same"}},
+ {$unwind: {path: "$same", preserveNullAndEmptyArrays: true}}
+ ],
+ expectedResults,
+ coll);
// $unwind with preserveNullAndEmptyArray and includeArrayIndex, some with matching
// documents, some without.
@@ -182,15 +192,16 @@ load("jstests/aggregation/extras/utils.js");
{_id: 1, a: null, same: {_id: 0, b: 1}, index: NumberLong(0)},
{_id: 2, index: null},
];
- testPipeline([
- {$lookup: {localField: "_id", foreignField: "b", from: "from", as: "same"}},
- {
- $unwind:
- {path: "$same", preserveNullAndEmptyArrays: true, includeArrayIndex: "index"}
- }
- ],
- expectedResults,
- coll);
+ testPipeline(
+ [
+ {$lookup: {localField: "_id", foreignField: "b", from: "from", as: "same"}},
+ {
+ $unwind:
+ {path: "$same", preserveNullAndEmptyArrays: true, includeArrayIndex: "index"}
+ }
+ ],
+ expectedResults,
+ coll);
//
// Dependencies.
@@ -203,12 +214,13 @@ load("jstests/aggregation/extras/utils.js");
{_id: 1, "same": [{_id: 1, b: null}, {_id: 2}]},
{_id: 2, "same": [{_id: 1, b: null}, {_id: 2}]}
];
- testPipeline([
- {$lookup: {localField: "a", foreignField: "b", from: "from", as: "same"}},
- {$project: {"same": 1}}
- ],
- expectedResults,
- coll);
+ testPipeline(
+ [
+ {$lookup: {localField: "a", foreignField: "b", from: "from", as: "same"}},
+ {$project: {"same": 1}}
+ ],
+ expectedResults,
+ coll);
//
// Dotted field paths.
@@ -277,12 +289,7 @@ load("jstests/aggregation/extras/utils.js");
}
];
expectedResults = [
- {
- _id: 0,
- a: {b: 1},
- same: {documents: {_id: 0, target: 1}},
- c: {d: {e: NumberLong(0)}}
- },
+ {_id: 0, a: {b: 1}, same: {documents: {_id: 0, target: 1}}, c: {d: {e: NumberLong(0)}}},
{_id: 1, same: {}, c: {d: {e: null}}},
];
testPipeline(pipeline, expectedResults, coll);
@@ -401,9 +408,8 @@ load("jstests/aggregation/extras/utils.js");
// An error is thrown if the from collection is sharded.
assert(sharded.adminCommand({shardCollection: "test.from", key: {_id: 1}}));
- assertErrorCode(
- sharded.getDB('test').lookUp,
- [{$lookup: {localField: "a", foreignField: "b", from: "from", as: "same"}}],
- 28769);
+ assertErrorCode(sharded.getDB('test').lookUp,
+ [{$lookup: {localField: "a", foreignField: "b", from: "from", as: "same"}}],
+ 28769);
sharded.stop();
}());
diff --git a/jstests/aggregation/bugs/server20163.js b/jstests/aggregation/bugs/server20163.js
index 501a6cc546a..e61ba606c24 100644
--- a/jstests/aggregation/bugs/server20163.js
+++ b/jstests/aggregation/bugs/server20163.js
@@ -12,182 +12,127 @@ load("jstests/aggregation/extras/utils.js"); // For assertErrorCode.
var zipObj = 3;
assertErrorCode(coll,
- [{$project: {zipped: {$zip: zipObj}}}],
+ [{$project: {zipped: {$zip: zipObj}}}],
34460,
"$zip requires an object" + " as an argument.");
- zipObj = {
- inputs: []
- };
+ zipObj = {inputs: []};
assertErrorCode(coll,
- [{$project: {zipped: {$zip: zipObj}}}],
+ [{$project: {zipped: {$zip: zipObj}}}],
34465,
"$zip requires at least" + " one input array");
- zipObj = {
- inputs: {"a": "b"}
- };
+ zipObj = {inputs: {"a": "b"}};
assertErrorCode(coll, [{$project: {zipped: {$zip: zipObj}}}], 34461, "inputs is not an array");
- zipObj = {
- inputs: ["$a"],
- defaults: ["A"]
- };
+ zipObj = {inputs: ["$a"], defaults: ["A"]};
assertErrorCode(coll,
- [{$project: {zipped: {$zip: zipObj}}}],
+ [{$project: {zipped: {$zip: zipObj}}}],
34466,
"cannot specify defaults" + " unless useLongestLength is true.");
- zipObj = {
- inputs: ["$a"],
- defaults: ["A", "B"],
- useLongestLength: true
- };
+ zipObj = {inputs: ["$a"], defaults: ["A", "B"], useLongestLength: true};
assertErrorCode(coll,
- [{$project: {zipped: {$zip: zipObj}}}],
+ [{$project: {zipped: {$zip: zipObj}}}],
34467,
"inputs and defaults" + " must be the same length.");
- zipObj = {
- inputs: ["$a"],
- defaults: {"a": "b"}
- };
+ zipObj = {inputs: ["$a"], defaults: {"a": "b"}};
assertErrorCode(
coll, [{$project: {zipped: {$zip: zipObj}}}], 34462, "defaults is not an" + " array");
- zipObj = {
- inputs: ["$a"],
- defaults: ["A"],
- useLongestLength: 1
- };
+ zipObj = {inputs: ["$a"], defaults: ["A"], useLongestLength: 1};
assertErrorCode(
coll, [{$project: {zipped: {$zip: zipObj}}}], 34463, "useLongestLength is not" + " a bool");
- zipObj = {
- inputs: ["$a", "$b"],
- defaults: ["A"],
- notAField: 1
- };
+ zipObj = {inputs: ["$a", "$b"], defaults: ["A"], notAField: 1};
assertErrorCode(coll, [{$project: {zipped: {$zip: zipObj}}}], 34464, "unknown argument");
- zipObj = {
- inputs: ["A", "B"]
- };
+ zipObj = {inputs: ["A", "B"]};
assertErrorCode(coll,
- [{$project: {zipped: {$zip: zipObj}}}],
+ [{$project: {zipped: {$zip: zipObj}}}],
34468,
"an element of inputs" + " was not an array.");
- zipObj = {
- inputs: [[1, 2, 3], ["A", "B", "C"]]
- };
+ zipObj = {inputs: [[1, 2, 3], ["A", "B", "C"]]};
var res = coll.aggregate([{$project: {zipped: {$zip: zipObj}}}]);
var output = res.toArray();
assert.eq(1, output.length);
assert.eq(output[0].zipped, [[1, "A"], [2, "B"], [3, "C"]]);
- zipObj = {
- inputs: [[1, 2, 3], null]
- };
+ zipObj = {inputs: [[1, 2, 3], null]};
res = coll.aggregate([{$project: {zipped: {$zip: zipObj}}}]);
output = res.toArray();
assert.eq(1, output.length);
assert.eq(output[0].zipped, null);
- zipObj = {
- inputs: [null, [1, 2, 3]]
- };
+ zipObj = {inputs: [null, [1, 2, 3]]};
res = coll.aggregate([{$project: {zipped: {$zip: zipObj}}}]);
output = res.toArray();
assert.eq(1, output.length);
assert.eq(output[0].zipped, null);
- zipObj = {
- inputs: ["$missing", [1, 2, 3]]
- };
+ zipObj = {inputs: ["$missing", [1, 2, 3]]};
res = coll.aggregate([{$project: {zipped: {$zip: zipObj}}}]);
output = res.toArray();
assert.eq(1, output.length);
assert.eq(output[0].zipped, null);
- zipObj = {
- inputs: [undefined, [1, 2, 3]]
- };
+ zipObj = {inputs: [undefined, [1, 2, 3]]};
res = coll.aggregate([{$project: {zipped: {$zip: zipObj}}}]);
output = res.toArray();
assert.eq(1, output.length);
assert.eq(output[0].zipped, null);
- zipObj = {
- inputs: [[1, 2, 3], ["A", "B"]]
- };
+ zipObj = {inputs: [[1, 2, 3], ["A", "B"]]};
res = coll.aggregate([{$project: {zipped: {$zip: zipObj}}}]);
output = res.toArray();
assert.eq(1, output.length);
assert.eq(output[0].zipped, [[1, "A"], [2, "B"]]);
- zipObj = {
- inputs: [["A", "B"], [1, 2, 3]]
- };
+ zipObj = {inputs: [["A", "B"], [1, 2, 3]]};
res = coll.aggregate([{$project: {zipped: {$zip: zipObj}}}]);
output = res.toArray();
assert.eq(1, output.length);
assert.eq(output[0].zipped, [["A", 1], ["B", 2]]);
- zipObj = {
- inputs: [[], []]
- };
+ zipObj = {inputs: [[], []]};
res = coll.aggregate([{$project: {zipped: {$zip: zipObj}}}]);
output = res.toArray();
assert.eq(1, output.length);
assert.eq(output[0].zipped, []);
- zipObj = {
- inputs: [["$short"], ["$long"]]
- };
+ zipObj = {inputs: [["$short"], ["$long"]]};
res = coll.aggregate([{$project: {zipped: {$zip: zipObj}}}]);
output = res.toArray();
assert.eq(1, output.length);
assert.eq(output[0].zipped, [[['x', 'y'], [1, 2, 3]]]);
- zipObj = {
- inputs: ["$short", "$long"]
- };
+ zipObj = {inputs: ["$short", "$long"]};
res = coll.aggregate([{$project: {zipped: {$zip: zipObj}}}]);
output = res.toArray();
assert.eq(1, output.length);
assert.eq(output[0].zipped, [['x', 1], ['y', 2]]);
- zipObj = {
- inputs: [["$long"]]
- };
+ zipObj = {inputs: [["$long"]]};
res = coll.aggregate([{$project: {zipped: {$zip: zipObj}}}]);
output = res.toArray();
assert.eq(1, output.length);
assert.eq(output[0].zipped, [[[1, 2, 3]]]);
- zipObj = {
- inputs: [[1, 2, 3], ['a', 'b', 'c'], ['c', 'b', 'a']]
- };
+ zipObj = {inputs: [[1, 2, 3], ['a', 'b', 'c'], ['c', 'b', 'a']]};
res = coll.aggregate([{$project: {zipped: {$zip: zipObj}}}]);
output = res.toArray();
assert.eq(1, output.length);
assert.eq(output[0].zipped, [[1, 'a', 'c'], [2, 'b', 'b'], [3, 'c', 'a']]);
- zipObj = {
- inputs: [[1, 2, 3], ["A", "B"]],
- defaults: ["C", "D"],
- useLongestLength: true
- };
+ zipObj = {inputs: [[1, 2, 3], ["A", "B"]], defaults: ["C", "D"], useLongestLength: true};
res = coll.aggregate([{$project: {zipped: {$zip: zipObj}}}]);
output = res.toArray();
assert.eq(1, output.length);
assert.eq(output[0].zipped, [[1, "A"], [2, "B"], [3, "D"]]);
- zipObj = {
- inputs: [[1, 2, 3], ["A", "B"]],
- useLongestLength: true
- };
+ zipObj = {inputs: [[1, 2, 3], ["A", "B"]], useLongestLength: true};
res = coll.aggregate([{$project: {zipped: {$zip: zipObj}}}]);
output = res.toArray();
assert.eq(1, output.length);
diff --git a/jstests/aggregation/bugs/server20169.js b/jstests/aggregation/bugs/server20169.js
index f4d8735701e..27995b8030c 100644
--- a/jstests/aggregation/bugs/server20169.js
+++ b/jstests/aggregation/bugs/server20169.js
@@ -13,49 +13,49 @@ load("jstests/aggregation/extras/utils.js"); // For assertErrorCode.
var rangeObj = [1];
assertErrorCode(coll,
- [{$project: {range: {$range: rangeObj}}}],
+ [{$project: {range: {$range: rangeObj}}}],
28667,
"range requires two" + " or three arguments");
rangeObj = ["a", 1];
assertErrorCode(coll,
- [{$project: {range: {$range: rangeObj}}}],
+ [{$project: {range: {$range: rangeObj}}}],
34443,
"range requires a" + " numeric starting value");
rangeObj = [1.1, 1];
assertErrorCode(coll,
- [{$project: {range: {$range: rangeObj}}}],
+ [{$project: {range: {$range: rangeObj}}}],
34444,
"range requires an" + " integral starting value");
rangeObj = [1, "a"];
assertErrorCode(coll,
- [{$project: {range: {$range: rangeObj}}}],
+ [{$project: {range: {$range: rangeObj}}}],
34445,
"range requires a" + " numeric ending value");
rangeObj = [1, 1.1];
assertErrorCode(coll,
- [{$project: {range: {$range: rangeObj}}}],
+ [{$project: {range: {$range: rangeObj}}}],
34446,
"range requires an" + " integral ending value");
rangeObj = [1, 3, "a"];
assertErrorCode(coll,
- [{$project: {range: {$range: rangeObj}}}],
+ [{$project: {range: {$range: rangeObj}}}],
34447,
"range requires a" + " numeric step value");
rangeObj = [1, 3, 1.1];
assertErrorCode(coll,
- [{$project: {range: {$range: rangeObj}}}],
+ [{$project: {range: {$range: rangeObj}}}],
34448,
"range requires an" + " integral step value");
rangeObj = [1, 3, 0];
assertErrorCode(coll,
- [{$project: {range: {$range: rangeObj}}}],
+ [{$project: {range: {$range: rangeObj}}}],
34449,
"range requires a" + " non-zero step value");
}());
diff --git a/jstests/aggregation/bugs/server21632.js b/jstests/aggregation/bugs/server21632.js
index 5aa9a315b5f..c99834c6f73 100644
--- a/jstests/aggregation/bugs/server21632.js
+++ b/jstests/aggregation/bugs/server21632.js
@@ -35,10 +35,7 @@
// If there is only one document, we should get that document.
var paddingStr = "abcdefghijklmnopqrstuvwxyz";
- var firstDoc = {
- _id: 0,
- paddingStr: paddingStr
- };
+ var firstDoc = {_id: 0, paddingStr: paddingStr};
assert.writeOK(coll.insert(firstDoc));
assert.eq([firstDoc], coll.aggregate([{$sample: {size: 1}}]).toArray());
assert.eq([firstDoc], coll.aggregate([{$sample: {size: 10}}]).toArray());
diff --git a/jstests/aggregation/bugs/server22093.js b/jstests/aggregation/bugs/server22093.js
index aca39a4e789..61302bbf4cd 100644
--- a/jstests/aggregation/bugs/server22093.js
+++ b/jstests/aggregation/bugs/server22093.js
@@ -22,9 +22,8 @@ load('jstests/libs/analyze_plan.js');
assert.eq(simpleGroup.length, 1);
assert.eq(simpleGroup[0]["count"], 15);
- var explained =
- coll.explain()
- .aggregate([{$match: {foo: {$gt: 0}}}, {$group: {_id: null, count: {$sum: 1}}}]);
+ var explained = coll.explain().aggregate(
+ [{$match: {foo: {$gt: 0}}}, {$group: {_id: null, count: {$sum: 1}}}]);
assert(planHasStage(explained.stages[0].$cursor.queryPlanner.winningPlan, "COUNT_SCAN"));
diff --git a/jstests/aggregation/bugs/server22580.js b/jstests/aggregation/bugs/server22580.js
index afbfdd00dcd..3a448173875 100644
--- a/jstests/aggregation/bugs/server22580.js
+++ b/jstests/aggregation/bugs/server22580.js
@@ -12,32 +12,32 @@ load("jstests/aggregation/extras/utils.js"); // For assertErrorCode.
coll.insert({});
assertErrorCode(coll,
- [{$project: {substr: {$substrCP: ["abc", 0, "a"]}}}],
+ [{$project: {substr: {$substrCP: ["abc", 0, "a"]}}}],
34452,
"$substrCP" + " does not accept non-numeric types as a length.");
assertErrorCode(coll,
- [{$project: {substr: {$substrCP: ["abc", 0, NaN]}}}],
+ [{$project: {substr: {$substrCP: ["abc", 0, NaN]}}}],
34453,
"$substrCP" + " does not accept non-integers as a length.");
assertErrorCode(coll,
- [{$project: {substr: {$substrCP: ["abc", "abc", 3]}}}],
+ [{$project: {substr: {$substrCP: ["abc", "abc", 3]}}}],
34450,
"$substrCP does not accept non-numeric types as a starting index.");
assertErrorCode(coll,
- [{$project: {substr: {$substrCP: ["abc", 2.2, 3]}}}],
+ [{$project: {substr: {$substrCP: ["abc", 2.2, 3]}}}],
34451,
"$substrCP" + " does not accept non-integers as a starting index.");
assertErrorCode(coll,
- [{$project: {substr: {$substrCP: ["abc", -1, 3]}}}],
+ [{$project: {substr: {$substrCP: ["abc", -1, 3]}}}],
34455,
"$substrCP " + "does not accept negative integers as inputs.");
assertErrorCode(coll,
- [{$project: {substr: {$substrCP: ["abc", 1, -3]}}}],
+ [{$project: {substr: {$substrCP: ["abc", 1, -3]}}}],
34454,
"$substrCP " + "does not accept negative integers as inputs.");
}());
diff --git a/jstests/aggregation/bugs/server3253.js b/jstests/aggregation/bugs/server3253.js
index 1adab9ca977..774712ecb2f 100644
--- a/jstests/aggregation/bugs/server3253.js
+++ b/jstests/aggregation/bugs/server3253.js
@@ -58,7 +58,7 @@ assert.eq([], listCollections(/tmp\.agg_out/));
// basic test
test(input,
- [{$project: {a: {$add: ['$_id', '$_id']}}}],
+ [{$project: {a: {$add: ['$_id', '$_id']}}}],
[{_id: 1, a: 2}, {_id: 2, a: 4}, {_id: 3, a: 6}]);
// test with indexes
@@ -66,7 +66,7 @@ assert.eq(output.getIndexes().length, 1);
output.ensureIndex({a: 1});
assert.eq(output.getIndexes().length, 2);
test(input,
- [{$project: {a: {$multiply: ['$_id', '$_id']}}}],
+ [{$project: {a: {$multiply: ['$_id', '$_id']}}}],
[{_id: 1, a: 1}, {_id: 2, a: 4}, {_id: 3, a: 9}]);
// test with empty result set and make sure old result is gone, but indexes remain
@@ -81,13 +81,11 @@ test(input, [{$project: {b: "$_id"}}], [{_id: 1, b: 1}, {_id: 2, b: 2}, {_id: 3,
// test with full text index
output.ensureIndex({c: "text"});
assert.eq(output.getIndexes().length, 4);
-test(input,
- [{$project: {c: {$concat: ["hello there ", "_id"]}}}],
- [
- {_id: 1, c: "hello there _id"},
- {_id: 2, c: "hello there _id"},
- {_id: 3, c: "hello there _id"}
- ]);
+test(input, [{$project: {c: {$concat: ["hello there ", "_id"]}}}], [
+ {_id: 1, c: "hello there _id"},
+ {_id: 2, c: "hello there _id"},
+ {_id: 3, c: "hello there _id"}
+]);
// test with capped collection
cappedOutput.drop();
diff --git a/jstests/aggregation/bugs/server4588.js b/jstests/aggregation/bugs/server4588.js
index ba49c78e9ea..49f41283f6d 100644
--- a/jstests/aggregation/bugs/server4588.js
+++ b/jstests/aggregation/bugs/server4588.js
@@ -13,7 +13,12 @@
// Without includeArrayIndex.
var actualResults = coll.aggregate([{$unwind: {path: "$x"}}]).toArray();
- var expectedResults = [{_id: 3, x: 1}, {_id: 3, x: 2}, {_id: 3, x: 3}, {_id: 4, x: 5}, ];
+ var expectedResults = [
+ {_id: 3, x: 1},
+ {_id: 3, x: 2},
+ {_id: 3, x: 3},
+ {_id: 4, x: 5},
+ ];
assert.eq(expectedResults, actualResults, "Incorrect results for normal $unwind");
// With includeArrayIndex, index inserted into a new field.
@@ -28,9 +33,12 @@
// With both includeArrayIndex and preserveNullAndEmptyArrays.
// TODO: update this test when SERVER-20168 is resolved.
- actualResults = coll.aggregate([{
- $unwind: {path: "$x", includeArrayIndex: "index", preserveNullAndEmptyArrays: true}
- }]).toArray();
+ actualResults =
+ coll.aggregate([{
+ $unwind:
+ {path: "$x", includeArrayIndex: "index", preserveNullAndEmptyArrays: true}
+ }])
+ .toArray();
expectedResults = [
{_id: 0, index: null},
{_id: 1, x: null, index: null},
diff --git a/jstests/aggregation/bugs/server5044.js b/jstests/aggregation/bugs/server5044.js
index 945f31c302c..f1f77a1991d 100644
--- a/jstests/aggregation/bugs/server5044.js
+++ b/jstests/aggregation/bugs/server5044.js
@@ -10,12 +10,12 @@ function test(data, popExpected, sampExpected) {
assert.writeOK(t.insert({num: data[i]}));
var res = t.aggregate({
- $group: {
- _id: 1,
- pop: {$stdDevPop: '$num'},
- samp: {$stdDevSamp: '$num'},
- }
- }).next();
+ $group: {
+ _id: 1,
+ pop: {$stdDevPop: '$num'},
+ samp: {$stdDevSamp: '$num'},
+ }
+ }).next();
if (popExpected === null) {
assert.isnull(res.pop);
diff --git a/jstests/aggregation/bugs/server6121.js b/jstests/aggregation/bugs/server6121.js
index 97d5a4d72c9..b7ea33abdaa 100644
--- a/jstests/aggregation/bugs/server6121.js
+++ b/jstests/aggregation/bugs/server6121.js
@@ -22,20 +22,22 @@ db.s6121.drop();
db.s6121.save({date: new Timestamp(1341337661, 1)});
db.s6121.save({date: new Date(1341337661000)});
// Aggregate checking various combinations of the constant and the field
-var s6121 = db.s6121.aggregate({
- $project: {
- _id: 0,
- dayOfMonth: {$dayOfMonth: '$date'},
- dayOfWeek: {$dayOfWeek: '$date'},
- dayOfYear: {$dayOfYear: '$date'},
- hour: {$hour: '$date'},
- minute: {$minute: '$date'},
- month: {$month: '$date'},
- second: {$second: '$date'},
- week: {$week: '$date'},
- year: {$year: '$date'}
- }
-}).toArray();
+var s6121 = db.s6121
+ .aggregate({
+ $project: {
+ _id: 0,
+ dayOfMonth: {$dayOfMonth: '$date'},
+ dayOfWeek: {$dayOfWeek: '$date'},
+ dayOfYear: {$dayOfYear: '$date'},
+ hour: {$hour: '$date'},
+ minute: {$minute: '$date'},
+ month: {$month: '$date'},
+ second: {$second: '$date'},
+ week: {$week: '$date'},
+ year: {$year: '$date'}
+ }
+ })
+ .toArray();
// Assert the two entries are equal
assert.eq(s6121[0], s6121[1], 's6121 failed');
diff --git a/jstests/aggregation/bugs/server6125.js b/jstests/aggregation/bugs/server6125.js
index 746c191d8fe..bfc4f471318 100644
--- a/jstests/aggregation/bugs/server6125.js
+++ b/jstests/aggregation/bugs/server6125.js
@@ -10,9 +10,7 @@
// to make results array nested (problem 2)
function nestArray(nstArray) {
for (x = 0; x < nstArray.length; x++) {
- nstArray[x].a = {
- b: nstArray[x].a
- };
+ nstArray[x].a = {b: nstArray[x].a};
}
}
diff --git a/jstests/aggregation/bugs/server6131.js b/jstests/aggregation/bugs/server6131.js
index 602894ab721..640eea2723e 100644
--- a/jstests/aggregation/bugs/server6131.js
+++ b/jstests/aggregation/bugs/server6131.js
@@ -11,11 +11,7 @@ t.drop();
// An empty array document is dropped.
t.save({_id: 0, a: 1, b: [], c: 2});
-assertAggregationResults([],
- {
-$unwind:
- '$b'
- });
+assertAggregationResults([], {$unwind: '$b'});
// Values from a nonempty array in another document are unwound.
t.save({_id: 1, b: [4, 5]});
@@ -29,48 +25,28 @@ t.drop();
// A nested empty array document is dropped.
t.save({_id: 0, a: 1, b: {x: 10, y: [], z: 20}, c: 2});
-assertAggregationResults([],
- {
-$unwind:
- '$b.y'
- });
+assertAggregationResults([], {$unwind: '$b.y'});
t.drop();
// A null value document is dropped.
t.save({_id: 0, a: 1, b: null, c: 2});
-assertAggregationResults([],
- {
-$unwind:
- '$b'
- });
+assertAggregationResults([], {$unwind: '$b'});
t.drop();
// A missing value causes the document to be dropped.
t.save({_id: 0, a: 1, c: 2});
-assertAggregationResults([],
- {
-$unwind:
- '$b'
- });
+assertAggregationResults([], {$unwind: '$b'});
t.drop();
// A missing value in an existing nested object causes the document to be dropped.
t.save({_id: 0, a: 1, b: {d: 4}, c: 2});
-assertAggregationResults([],
- {
-$unwind:
- '$b.y'
- });
+assertAggregationResults([], {$unwind: '$b.y'});
t.drop();
// A missing value in a missing nested object causes the document to be dropped.
t.save({_id: 0, a: 1, b: 10, c: 2});
-assertAggregationResults([],
- {
-$unwind:
- '$b.y'
- });
+assertAggregationResults([], {$unwind: '$b.y'});
diff --git a/jstests/aggregation/bugs/server6179.js b/jstests/aggregation/bugs/server6179.js
index 20158af7fb7..c05103a13b8 100644
--- a/jstests/aggregation/bugs/server6179.js
+++ b/jstests/aggregation/bugs/server6179.js
@@ -28,9 +28,11 @@
{movechunk: "test.data", find: {_id: 50}, to: s.getOther(s.getPrimaryShard("test")).name});
// Check that we get results rather than an error
- var result = d.data.aggregate({$group: {_id: '$_id', i: {$first: '$i'}}},
- {$group: {_id: '$i', avg_id: {$avg: '$_id'}}},
- {$sort: {_id: 1}}).toArray();
+ var result = d.data
+ .aggregate({$group: {_id: '$_id', i: {$first: '$i'}}},
+ {$group: {_id: '$i', avg_id: {$avg: '$_id'}}},
+ {$sort: {_id: 1}})
+ .toArray();
expected = [
{"_id": 0, "avg_id": 45},
{"_id": 1, "avg_id": 46},
diff --git a/jstests/aggregation/bugs/server6189.js b/jstests/aggregation/bugs/server6189.js
index 4cb615f0659..f8cb88194e2 100644
--- a/jstests/aggregation/bugs/server6189.js
+++ b/jstests/aggregation/bugs/server6189.js
@@ -11,36 +11,35 @@ function test(date, testSynthetics) {
: 'ISODate("%Y-%m-%dT%H:%M:%S.%LZ")';
// Can't use aggregate helper or assertErrorCode because we need to handle multiple error types
- var res = c.runCommand('aggregate',
- {
- pipeline: [{
- $project: {
- _id: 0,
- year: {$year: '$date'},
- month: {$month: '$date'},
- dayOfMonth: {$dayOfMonth: '$date'},
- hour: {$hour: '$date'},
- minute: {$minute: '$date'},
- second: {$second: '$date'}
-
- // server-6666
- ,
- millisecond: {$millisecond: '$date'}
-
- // server-9289
- ,
- millisecondPlusTen: {$millisecond: {$add: ['$date', 10]}}
-
- // $substr will call coerceToString
- ,
- string: {$substr: ['$date', 0, 1000]}
-
- // server-11118
- ,
- format: {$dateToString: {format: ISOfmt, date: '$date'}}
- }
- }]
- });
+ var res = c.runCommand('aggregate', {
+ pipeline: [{
+ $project: {
+ _id: 0,
+ year: {$year: '$date'},
+ month: {$month: '$date'},
+ dayOfMonth: {$dayOfMonth: '$date'},
+ hour: {$hour: '$date'},
+ minute: {$minute: '$date'},
+ second: {$second: '$date'}
+
+ // server-6666
+ ,
+ millisecond: {$millisecond: '$date'}
+
+ // server-9289
+ ,
+ millisecondPlusTen: {$millisecond: {$add: ['$date', 10]}}
+
+ // $substr will call coerceToString
+ ,
+ string: {$substr: ['$date', 0, 1000]}
+
+ // server-11118
+ ,
+ format: {$dateToString: {format: ISOfmt, date: '$date'}}
+ }
+ }]
+ });
if (date.valueOf() < 0 && _isWindows() && res.code == 16422) {
// some versions of windows (but not all) fail with dates before 1970
@@ -55,20 +54,19 @@ function test(date, testSynthetics) {
}
assert.commandWorked(res);
- assert.eq(res.result[0],
- {
- year: date.getUTCFullYear(),
- month: date.getUTCMonth() + 1 // jan == 1
- ,
- dayOfMonth: date.getUTCDate(),
- hour: date.getUTCHours(),
- minute: date.getUTCMinutes(),
- second: date.getUTCSeconds(),
- millisecond: date.getUTCMilliseconds(),
- millisecondPlusTen: ((date.getUTCMilliseconds() + 10) % 1000),
- string: date.tojson().slice(9, 28),
- format: date.tojson()
- });
+ assert.eq(res.result[0], {
+ year: date.getUTCFullYear(),
+ month: date.getUTCMonth() + 1 // jan == 1
+ ,
+ dayOfMonth: date.getUTCDate(),
+ hour: date.getUTCHours(),
+ minute: date.getUTCMinutes(),
+ second: date.getUTCSeconds(),
+ millisecond: date.getUTCMilliseconds(),
+ millisecondPlusTen: ((date.getUTCMilliseconds() + 10) % 1000),
+ string: date.tojson().slice(9, 28),
+ format: date.tojson()
+ });
if (testSynthetics) {
// Tests with this set all have the same value for these fields
diff --git a/jstests/aggregation/bugs/server6190.js b/jstests/aggregation/bugs/server6190.js
index d32a652e74b..ea7bfe1601b 100644
--- a/jstests/aggregation/bugs/server6190.js
+++ b/jstests/aggregation/bugs/server6190.js
@@ -8,8 +8,9 @@ t.drop();
t.save({});
function week(date) {
- return t.aggregate({$project: {a: {$week: date}}},
- {$match: {a: {$type: 16 /* Int type expected */}}})
+ return t
+ .aggregate({$project: {a: {$week: date}}},
+ {$match: {a: {$type: 16 /* Int type expected */}}})
.toArray()[0]
.a;
}
diff --git a/jstests/aggregation/bugs/server6195.js b/jstests/aggregation/bugs/server6195.js
index cca80a14ad5..13dccd10877 100644
--- a/jstests/aggregation/bugs/server6195.js
+++ b/jstests/aggregation/bugs/server6195.js
@@ -27,7 +27,7 @@ assertErrorCode(c, {$project: {str: {$concat: [1]}}}, 16702);
assertErrorCode(c, {$project: {str: {$concat: [NumberInt(1)]}}}, 16702);
assertErrorCode(c, {$project: {str: {$concat: [NumberLong(1)]}}}, 16702);
assertErrorCode(c, {$project: {str: {$concat: [true]}}}, 16702);
-assertErrorCode(c, {$project: {str: {$concat: [function(){}]}}}, 16702);
+assertErrorCode(c, {$project: {str: {$concat: [function() {}]}}}, 16702);
assertErrorCode(c, {$project: {str: {$concat: [{}]}}}, 16702);
assertErrorCode(c, {$project: {str: {$concat: [[]]}}}, 16702);
assertErrorCode(c, {$project: {str: {$concat: [new Timestamp(0, 0)]}}}, 16702);
diff --git a/jstests/aggregation/bugs/server6529.js b/jstests/aggregation/bugs/server6529.js
index 1bc4119c547..94af2f1fc72 100644
--- a/jstests/aggregation/bugs/server6529.js
+++ b/jstests/aggregation/bugs/server6529.js
@@ -13,16 +13,16 @@ assertErrorCode(c, {$group: {_id: {a: 1}}}, 17390);
// but any amount of nesting in a project should work
assert.eq(c.aggregate({$project: {_id: 0, a: {b: {c: {d: {e: {f: {g: 1}}}}}}}}).toArray(),
- [{a: {b: {c: {d: {e: {f: {g: 19}}}}}}}]);
+ [{a: {b: {c: {d: {e: {f: {g: 19}}}}}}}]);
assert.eq(c.aggregate({$project: {_id: 0, a: {b: {c: {d: {e: {f: 1}}}}}}}).toArray(),
- [{a: {b: {c: {d: {e: {f: {g: 19}}}}}}}]);
+ [{a: {b: {c: {d: {e: {f: {g: 19}}}}}}}]);
assert.eq(c.aggregate({$project: {_id: 0, a: {b: {c: {d: {e: 1}}}}}}).toArray(),
- [{a: {b: {c: {d: {e: {f: {g: 19}}}}}}}]);
+ [{a: {b: {c: {d: {e: {f: {g: 19}}}}}}}]);
assert.eq(c.aggregate({$project: {_id: 0, a: {b: {c: {d: 1}}}}}).toArray(),
- [{a: {b: {c: {d: {e: {f: {g: 19}}}}}}}]);
+ [{a: {b: {c: {d: {e: {f: {g: 19}}}}}}}]);
assert.eq(c.aggregate({$project: {_id: 0, a: {b: {c: 1}}}}).toArray(),
- [{a: {b: {c: {d: {e: {f: {g: 19}}}}}}}]);
+ [{a: {b: {c: {d: {e: {f: {g: 19}}}}}}}]);
assert.eq(c.aggregate({$project: {_id: 0, a: {b: 1}}}).toArray(),
- [{a: {b: {c: {d: {e: {f: {g: 19}}}}}}}]);
+ [{a: {b: {c: {d: {e: {f: {g: 19}}}}}}}]);
assert.eq(c.aggregate({$project: {_id: 0, a: 1}}).toArray(),
- [{a: {b: {c: {d: {e: {f: {g: 19}}}}}}}]);
+ [{a: {b: {c: {d: {e: {f: {g: 19}}}}}}}]);
diff --git a/jstests/aggregation/bugs/server6531.js b/jstests/aggregation/bugs/server6531.js
index 7d117ce6905..7d020d79219 100644
--- a/jstests/aggregation/bugs/server6531.js
+++ b/jstests/aggregation/bugs/server6531.js
@@ -10,12 +10,8 @@ for (var x = 0; x < 10; x++) {
}
function test(variant) {
- query = {
- loc: {$within: {$center: [[5, 5], 3]}}
- };
- sort = {
- _id: 1
- };
+ query = {loc: {$within: {$center: [[5, 5], 3]}}};
+ sort = {_id: 1};
aggOut = c.aggregate({$match: query}, {$sort: sort});
cursor = c.find(query).sort(sort);
diff --git a/jstests/aggregation/bugs/server6556.js b/jstests/aggregation/bugs/server6556.js
index 636bef6b02c..261dc5a35b5 100644
--- a/jstests/aggregation/bugs/server6556.js
+++ b/jstests/aggregation/bugs/server6556.js
@@ -7,17 +7,17 @@ c.save({foo: "as\0df"});
// compare the whole string, they should match
assert.eq(c.aggregate({$project: {_id: 0, matches: {$eq: ["as\0df", "$foo"]}}}).toArray(),
- [{matches: true}]);
+ [{matches: true}]);
// compare with the substring containing only the up to the null, they should not match
assert.eq(c.aggregate({
- $project: {_id: 0, matches: {$eq: ["as\0df", {$substrBytes: ["$foo", 0, 3]}]}}
-}).toArray(),
- [{matches: false}]);
+ $project: {_id: 0, matches: {$eq: ["as\0df", {$substrBytes: ["$foo", 0, 3]}]}}
+ }).toArray(),
+ [{matches: false}]);
// partial the other way shouldnt work either
assert.eq(c.aggregate({$project: {_id: 0, matches: {$eq: ["as", "$foo"]}}}).toArray(),
- [{matches: false}]);
+ [{matches: false}]);
// neither should one that differs after the null
assert.eq(c.aggregate({$project: {_id: 0, matches: {$eq: ["as\0de", "$foo"]}}}).toArray(),
- [{matches: false}]);
+ [{matches: false}]);
// should assert on fieldpaths with a null
assert.throws(c.aggregate, {$project: {_id: 0, matches: {$eq: ["as\0df", "$f\0oo"]}}});
diff --git a/jstests/aggregation/bugs/server7781.js b/jstests/aggregation/bugs/server7781.js
index 230a8a64c9f..0a4831fb800 100644
--- a/jstests/aggregation/bugs/server7781.js
+++ b/jstests/aggregation/bugs/server7781.js
@@ -30,10 +30,7 @@
for (var i = 0; i < cmdOut.length; i++) {
massaged = {};
Object.extend(massaged, cmdOut[i].obj, /*deep=*/true);
- massaged.stats = {
- 'dis': cmdOut[i].dis,
- 'loc': cmdOut[i].loc
- };
+ massaged.stats = {'dis': cmdOut[i].dis, 'loc': cmdOut[i].loc};
if (!friendlyEqual(massaged, aggOut[i])) {
allSame = false; // don't bail yet since we want to print all differences
@@ -87,12 +84,7 @@
// test with defaults
var queryPoint = pointMaker.mkPt(0.25); // stick to center of map
- geoCmd = {
- geoNear: coll,
- near: queryPoint,
- includeLocs: true,
- spherical: true
- };
+ geoCmd = {geoNear: coll, near: queryPoint, includeLocs: true, spherical: true};
aggCmd = {
$geoNear: {
near: queryPoint,
diff --git a/jstests/aggregation/bugs/server9840.js b/jstests/aggregation/bugs/server9840.js
index b3a73cfc3d7..4316730370a 100644
--- a/jstests/aggregation/bugs/server9840.js
+++ b/jstests/aggregation/bugs/server9840.js
@@ -27,62 +27,57 @@ test({$add: ['$two', '$$CURRENT.three']}, 5);
test({$add: ['$$CURRENT.two', '$$ROOT.nested.four']}, 6);
// $let simple
-test({$let: {vars: {a: 10}, in: '$$a'}}, 10);
-test({$let: {vars: {a: '$zero'}, in: '$$a'}}, 0);
-test({$let: {vars: {a: {$add: ['$one', '$two']}, b: 10}, in: {$multiply: ['$$a', '$$b']}}}, 30);
+test({$let: {vars: {a: 10}, in : '$$a'}}, 10);
+test({$let: {vars: {a: '$zero'}, in : '$$a'}}, 0);
+test({$let: {vars: {a: {$add: ['$one', '$two']}, b: 10}, in : {$multiply: ['$$a', '$$b']}}}, 30);
// $let changing CURRENT
-test({$let: {vars: {CURRENT: '$$ROOT.nested'}, in: {$multiply: ['$four', '$$ROOT.two']}}}, 8);
-test(
- {
- $let: {
- vars: {CURRENT: '$$CURRENT.nested'}, // using original value of CURRENT
- in: {$multiply: ['$four', '$$ROOT.two']}
- }
- },
- 8);
-test(
- {
- $let: {
- vars: {CURRENT: '$nested'}, // same as last
- in: {$multiply: ['$four', '$$ROOT.two']}
- }
- },
- 8);
-test(
- {
- $let: {
- vars: {CURRENT: {$const: {ten: 10}}}, // "artificial" object
- in: {$multiply: ['$ten', '$$ROOT.two']}
- }
- },
- 20);
-test(
- {
- $let: {
- vars: {CURRENT: '$three'}, // sets current to the number 3 (not an object)
- in: {$multiply: ['$$CURRENT', '$$ROOT.two']}
- }
- },
- 6);
+test({$let: {vars: {CURRENT: '$$ROOT.nested'}, in : {$multiply: ['$four', '$$ROOT.two']}}}, 8);
+test({
+ $let: {
+ vars: {CURRENT: '$$CURRENT.nested'}, // using original value of CURRENT
+ in : {$multiply: ['$four', '$$ROOT.two']}
+ }
+},
+ 8);
+test({
+ $let: {
+ vars: {CURRENT: '$nested'}, // same as last
+ in : {$multiply: ['$four', '$$ROOT.two']}
+ }
+},
+ 8);
+test({
+ $let: {
+ vars: {CURRENT: {$const: {ten: 10}}}, // "artificial" object
+ in : {$multiply: ['$ten', '$$ROOT.two']}
+ }
+},
+ 20);
+test({
+ $let: {
+ vars: {CURRENT: '$three'}, // sets current to the number 3 (not an object)
+ in : {$multiply: ['$$CURRENT', '$$ROOT.two']}
+ }
+},
+ 6);
// swapping with $let (ensures there is no ordering dependency in vars)
-test(
- {
- $let: {
- vars: {x: 6, y: 10},
- in: {
- $let: {
- vars: {x: '$$y', y: '$$x'}, // now {x:10, y:6}
- in: {$subtract: ['$$x', '$$y']}
- }
- }
- }
- }, // not commutative!
- 4); // 10-6 not 6-10 or 6-6
+test({
+ $let: {
+ vars: {x: 6, y: 10},
+ in : {
+ $let: {
+ vars: {x: '$$y', y: '$$x'}, // now {x:10, y:6}
+ in : {$subtract: ['$$x', '$$y']}
+ }
+ }
+ }
+}, // not commutative!
+ 4); // 10-6 not 6-10 or 6-6
// unicode is allowed
-test({$let: {vars: {'日本語': 10}, in: '$$日本語'}}, 10); // Japanese for "Japanese language"
+test({$let: {vars: {'日本語': 10}, in : '$$日本語'}}, 10); // Japanese for "Japanese language"
// Can use ROOT and CURRENT directly with no subfield (SERVER-5916)
t.drop();
@@ -90,15 +85,15 @@ t.insert({_id: 'obj'});
assert.eq(t.aggregate({$project: {_id: 0, obj: '$$ROOT'}}).toArray(), [{obj: {_id: 'obj'}}]);
assert.eq(t.aggregate({$project: {_id: 0, obj: '$$CURRENT'}}).toArray(), [{obj: {_id: 'obj'}}]);
assert.eq(t.aggregate({$group: {_id: 0, objs: {$push: '$$ROOT'}}}).toArray(),
- [{_id: 0, objs: [{_id: 'obj'}]}]);
+ [{_id: 0, objs: [{_id: 'obj'}]}]);
assert.eq(t.aggregate({$group: {_id: 0, objs: {$push: '$$CURRENT'}}}).toArray(),
- [{_id: 0, objs: [{_id: 'obj'}]}]);
+ [{_id: 0, objs: [{_id: 'obj'}]}]);
// check name validity checks
-assertErrorCode(t, {$project: {a: {$let: {vars: {ROOT: 1}, in: '$$ROOT'}}}}, 16867);
-assertErrorCode(t, {$project: {a: {$let: {vars: {FOO: 1}, in: '$$FOO'}}}}, 16867);
-assertErrorCode(t, {$project: {a: {$let: {vars: {_underbar: 1}, in: '$$FOO'}}}}, 16867);
-assertErrorCode(t, {$project: {a: {$let: {vars: {'a.b': 1}, in: '$$FOO'}}}}, 16868);
-assertErrorCode(t, {$project: {a: {$let: {vars: {'a b': 1}, in: '$$FOO'}}}}, 16868);
+assertErrorCode(t, {$project: {a: {$let: {vars: {ROOT: 1}, in : '$$ROOT'}}}}, 16867);
+assertErrorCode(t, {$project: {a: {$let: {vars: {FOO: 1}, in : '$$FOO'}}}}, 16867);
+assertErrorCode(t, {$project: {a: {$let: {vars: {_underbar: 1}, in : '$$FOO'}}}}, 16867);
+assertErrorCode(t, {$project: {a: {$let: {vars: {'a.b': 1}, in : '$$FOO'}}}}, 16868);
+assertErrorCode(t, {$project: {a: {$let: {vars: {'a b': 1}, in : '$$FOO'}}}}, 16868);
assertErrorCode(t, {$project: {a: '$$_underbar'}}, 16870);
assertErrorCode(t, {$project: {a: '$$with spaces'}}, 16871);
diff --git a/jstests/aggregation/bugs/server9841.js b/jstests/aggregation/bugs/server9841.js
index 5bf9b32db93..28e6037caf1 100644
--- a/jstests/aggregation/bugs/server9841.js
+++ b/jstests/aggregation/bugs/server9841.js
@@ -16,27 +16,26 @@ function test(expression, expected) {
assert.eq(result, [{res: expected}]);
}
-test({$map: {input: "$simple", as: "var", in: '$$var'}}, [1, 2, 3, 4]);
-test({$map: {input: "$simple", as: "var", in: {$add: [10, '$$var']}}}, [11, 12, 13, 14]);
+test({$map: {input: "$simple", as: "var", in : '$$var'}}, [1, 2, 3, 4]);
+test({$map: {input: "$simple", as: "var", in : {$add: [10, '$$var']}}}, [11, 12, 13, 14]);
-test({$map: {input: "$nested", as: "var", in: '$$var.a'}}, [1, 2]);
-test({$map: {input: "$nested", as: "CURRENT", in: '$a'}}, [1, 2]);
+test({$map: {input: "$nested", as: "var", in : '$$var.a'}}, [1, 2]);
+test({$map: {input: "$nested", as: "CURRENT", in : '$a'}}, [1, 2]);
-test({$map: {input: "$mixed", as: "var", in: '$$var.a'}},
+test({$map: {input: "$mixed", as: "var", in : '$$var.a'}},
[1, null, 2, null]); // missing becomes null
-test({$map: {input: "$null", as: "var", in: '$$var'}}, null);
+test({$map: {input: "$null", as: "var", in : '$$var'}}, null);
// can't set ROOT
-assertErrorCode(t, {$project: {a: {$map: {input: "$simple", as: "ROOT", in: '$$ROOT'}}}}, 16867);
+assertErrorCode(t, {$project: {a: {$map: {input: "$simple", as: "ROOT", in : '$$ROOT'}}}}, 16867);
// error on non-array
-assertErrorCode(t, {$project: {a: {$map: {input: "$notArray", as: "var", in: '$$var'}}}}, 16883);
+assertErrorCode(t, {$project: {a: {$map: {input: "$notArray", as: "var", in : '$$var'}}}}, 16883);
// parse errors (missing or extra fields)
-assertErrorCode(t,
- {$project: {a: {$map: {x: 1, input: "$simple", as: "var", in: '$$var'}}}},
- 16879);
-assertErrorCode(t, {$project: {a: {$map: {as: "var", in: '$$var'}}}}, 16880);
-assertErrorCode(t, {$project: {a: {$map: {input: "$simple", in: '$$var'}}}}, 16881);
+assertErrorCode(
+ t, {$project: {a: {$map: {x: 1, input: "$simple", as: "var", in : '$$var'}}}}, 16879);
+assertErrorCode(t, {$project: {a: {$map: {as: "var", in : '$$var'}}}}, 16880);
+assertErrorCode(t, {$project: {a: {$map: {input: "$simple", in : '$$var'}}}}, 16881);
assertErrorCode(t, {$project: {a: {$map: {input: "$simple", as: "var"}}}}, 16882);
diff --git a/jstests/aggregation/bugs/substr.js b/jstests/aggregation/bugs/substr.js
index 9b514eb4679..8581aaf5601 100644
--- a/jstests/aggregation/bugs/substr.js
+++ b/jstests/aggregation/bugs/substr.js
@@ -108,18 +108,18 @@ assertSubstring('cde', '$z', {$add: ['$b', 1]}, {$add: [2, '$d']});
assert.eq(
'e',
t.aggregate({
- $project: {
- a: {
- $substrBytes: [
- {
- $substrBytes:
- [{$substrBytes: [{$substrBytes: ['abcdefghij', 1, 6]}, 2, 5]}, 0, 3]
- },
- 1,
- 1
- ]
- }
- }
- })
+ $project: {
+ a: {
+ $substrBytes: [
+ {
+ $substrBytes:
+ [{$substrBytes: [{$substrBytes: ['abcdefghij', 1, 6]}, 2, 5]}, 0, 3]
+ },
+ 1,
+ 1
+ ]
+ }
+ }
+ })
.toArray()[0]
.a);
diff --git a/jstests/aggregation/bugs/upperlower.js b/jstests/aggregation/bugs/upperlower.js
index 60bcba8db20..a393bf3bd72 100644
--- a/jstests/aggregation/bugs/upperlower.js
+++ b/jstests/aggregation/bugs/upperlower.js
@@ -6,8 +6,9 @@ t.drop();
t.save({});
function assertResult(expectedUpper, expectedLower, string) {
- result = t.aggregate({$project: {upper: {$toUpper: string}, lower: {$toLower: string}}})
- .toArray()[0];
+ result = t.aggregate({
+ $project: {upper: {$toUpper: string}, lower: {$toLower: string}}
+ }).toArray()[0];
assert.eq(expectedUpper, result.upper);
assert.eq(expectedLower, result.lower);
}
diff --git a/jstests/aggregation/expressions/expression_mod.js b/jstests/aggregation/expressions/expression_mod.js
index 923324797ed..63469ca8177 100644
--- a/jstests/aggregation/expressions/expression_mod.js
+++ b/jstests/aggregation/expressions/expression_mod.js
@@ -17,47 +17,44 @@ load("jstests/aggregation/extras/utils.js"); // For assertErrorCode and testExp
// The $match portion ensures they are of the correct type as the shell turns the ints back to
// doubles at the end so we can not check types with assert.
coll.save({});
- var result =
- coll.aggregate(
- {
- $project: {
- _id: 0,
- dub_dub: {$mod: [138.5, 3.0]},
- dub_int: {$mod: [138.5, NumberLong(3)]},
- dub_long: {$mod: [138.5, NumberInt(3)]},
- int_dub: {$mod: [NumberInt(8), 3.25]},
- int_dubint: {$mod: [NumberInt(8), 3.0]},
- int_int: {$mod: [NumberInt(8), NumberInt(3)]},
- int_long: {$mod: [NumberInt(8), NumberLong(3)]},
- long_dub: {$mod: [NumberLong(8), 3.25]},
- long_dubint: {$mod: [NumberLong(8), 3.0]},
- long_dublong: {$mod: [NumberLong(500000000000), 450000000000.0]},
- long_int: {$mod: [NumberLong(8), NumberInt(3)]},
- long_long: {$mod: [NumberLong(8), NumberLong(3)]},
- verylong_verylong:
- {$mod: [NumberLong(800000000000), NumberLong(300000000000)]}
- }
- },
- {
- $match: {
- // 1 is NumberDouble
- dub_dub: {$type: 1},
- dub_int: {$type: 1},
- dub_long: {$type: 1},
- int_dub: {$type: 1},
- // 16 is NumberInt
- int_dubint: {$type: 16},
- int_int: {$type: 16},
- // 18 is NumberLong
- int_long: {$type: 18},
- long_dub: {$type: 1},
- long_dubint: {$type: 18},
- long_dublong: {$type: 1},
- long_int: {$type: 18},
- long_long: {$type: 18},
- verylong_verylong: {$type: 18}
- }
- });
+ var result = coll.aggregate({
+ $project: {
+ _id: 0,
+ dub_dub: {$mod: [138.5, 3.0]},
+ dub_int: {$mod: [138.5, NumberLong(3)]},
+ dub_long: {$mod: [138.5, NumberInt(3)]},
+ int_dub: {$mod: [NumberInt(8), 3.25]},
+ int_dubint: {$mod: [NumberInt(8), 3.0]},
+ int_int: {$mod: [NumberInt(8), NumberInt(3)]},
+ int_long: {$mod: [NumberInt(8), NumberLong(3)]},
+ long_dub: {$mod: [NumberLong(8), 3.25]},
+ long_dubint: {$mod: [NumberLong(8), 3.0]},
+ long_dublong: {$mod: [NumberLong(500000000000), 450000000000.0]},
+ long_int: {$mod: [NumberLong(8), NumberInt(3)]},
+ long_long: {$mod: [NumberLong(8), NumberLong(3)]},
+ verylong_verylong: {$mod: [NumberLong(800000000000), NumberLong(300000000000)]}
+ }
+ },
+ {
+ $match: {
+ // 1 is NumberDouble
+ dub_dub: {$type: 1},
+ dub_int: {$type: 1},
+ dub_long: {$type: 1},
+ int_dub: {$type: 1},
+ // 16 is NumberInt
+ int_dubint: {$type: 16},
+ int_int: {$type: 16},
+ // 18 is NumberLong
+ int_long: {$type: 18},
+ long_dub: {$type: 1},
+ long_dubint: {$type: 18},
+ long_dublong: {$type: 1},
+ long_int: {$type: 18},
+ long_long: {$type: 18},
+ verylong_verylong: {$type: 18}
+ }
+ });
// Correct answers (it is mainly the types that are important here).
var expectedResult = [{
diff --git a/jstests/aggregation/expressions/in.js b/jstests/aggregation/expressions/in.js
index 7839c95d32e..ba09b0f8fac 100644
--- a/jstests/aggregation/expressions/in.js
+++ b/jstests/aggregation/expressions/in.js
@@ -5,13 +5,11 @@ load("jstests/aggregation/extras/utils.js"); // For assertErrorCode.
(function() {
"use strict";
- var coll = db.in;
+ var coll = db.in ;
coll.drop();
function testExpression(options) {
- var pipeline = {
- $project: {included: {$in: ["$elementField", {$literal: options.array}]}}
- };
+ var pipeline = {$project: {included: {$in: ["$elementField", {$literal: options.array}]}}};
coll.drop();
assert.writeOK(coll.insert({elementField: options.element}));
@@ -20,9 +18,7 @@ load("jstests/aggregation/extras/utils.js"); // For assertErrorCode.
assert.eq(res[0].included, options.elementIsIncluded);
if (options.queryFormShouldBeEquivalent) {
- var query = {
- elementField: {$in: options.array}
- };
+ var query = {elementField: {$in: options.array}};
res = coll.find(query).toArray();
if (options.elementIsIncluded) {
@@ -33,12 +29,8 @@ load("jstests/aggregation/extras/utils.js"); // For assertErrorCode.
}
}
- testExpression({
- element: 1,
- array: [1, 2, 3],
- elementIsIncluded: true,
- queryFormShouldBeEquivalent: true
- });
+ testExpression(
+ {element: 1, array: [1, 2, 3], elementIsIncluded: true, queryFormShouldBeEquivalent: true});
testExpression({
element: "A",
@@ -119,38 +111,24 @@ load("jstests/aggregation/extras/utils.js"); // For assertErrorCode.
coll.drop();
coll.insert({});
- var pipeline = {
- $project: {included: {$in: [[1, 2], 1]}}
- };
+ var pipeline = {$project: {included: {$in: [[1, 2], 1]}}};
assertErrorCode(coll, pipeline, 40081, "$in requires an array as a second argument");
- pipeline = {
- $project: {included: {$in: [1, null]}}
- };
+ pipeline = {$project: {included: {$in: [1, null]}}};
assertErrorCode(coll, pipeline, 40081, "$in requires an array as a second argument");
- pipeline = {
- $project: {included: {$in: [1, "$notAField"]}}
- };
+ pipeline = {$project: {included: {$in: [1, "$notAField"]}}};
assertErrorCode(coll, pipeline, 40081, "$in requires an array as a second argument");
- pipeline = {
- $project: {included: {$in: null}}
- };
+ pipeline = {$project: {included: {$in: null}}};
assertErrorCode(coll, pipeline, 16020, "$in requires two arguments");
- pipeline = {
- $project: {included: {$in: [1]}}
- };
+ pipeline = {$project: {included: {$in: [1]}}};
assertErrorCode(coll, pipeline, 16020, "$in requires two arguments");
- pipeline = {
- $project: {included: {$in: []}}
- };
+ pipeline = {$project: {included: {$in: []}}};
assertErrorCode(coll, pipeline, 16020, "$in requires two arguments");
- pipeline = {
- $project: {included: {$in: [1, 2, 3]}}
- };
+ pipeline = {$project: {included: {$in: [1, 2, 3]}}};
assertErrorCode(coll, pipeline, 16020, "$in requires two arguments");
}());
diff --git a/jstests/aggregation/expressions/indexof_array.js b/jstests/aggregation/expressions/indexof_array.js
index bfc9ef71a15..3fb445e5066 100644
--- a/jstests/aggregation/expressions/indexof_array.js
+++ b/jstests/aggregation/expressions/indexof_array.js
@@ -46,23 +46,15 @@ load("jstests/aggregation/extras/utils.js"); // For assertErrorCode and testExp
};
assertErrorCode(coll, pipeline, 40090);
- pipeline = {
- $project: {output: {$indexOfArray: [[1, 2, 3], 2, "bad"]}}
- };
+ pipeline = {$project: {output: {$indexOfArray: [[1, 2, 3], 2, "bad"]}}};
assertErrorCode(coll, pipeline, 40096);
- pipeline = {
- $project: {output: {$indexOfArray: [[1, 2, 3], 2, 0, "bad"]}}
- };
+ pipeline = {$project: {output: {$indexOfArray: [[1, 2, 3], 2, 0, "bad"]}}};
assertErrorCode(coll, pipeline, 40096);
- pipeline = {
- $project: {output: {$indexOfArray: [[1, 2, 3], 2, -1]}}
- };
+ pipeline = {$project: {output: {$indexOfArray: [[1, 2, 3], 2, -1]}}};
assertErrorCode(coll, pipeline, 40097);
- pipeline = {
- $project: {output: {$indexOfArray: [[1, 2, 3], 2, 1, -1]}}
- };
+ pipeline = {$project: {output: {$indexOfArray: [[1, 2, 3], 2, 1, -1]}}};
assertErrorCode(coll, pipeline, 40097);
}());
diff --git a/jstests/aggregation/expressions/indexof_bytes.js b/jstests/aggregation/expressions/indexof_bytes.js
index ac3cefda790..d484ad50948 100644
--- a/jstests/aggregation/expressions/indexof_bytes.js
+++ b/jstests/aggregation/expressions/indexof_bytes.js
@@ -17,9 +17,7 @@ load("jstests/aggregation/extras/utils.js"); // For assertErrorCode and testExp
var start = indexOfSpec.length > 2 ? indexOfSpec[2] : 0;
// Use $strLenBytes because JavaScript's length property is based off of UTF-16, not the
// actual number of bytes.
- var end = indexOfSpec.length > 3 ? indexOfSpec[3] : {
- $strLenBytes: input
- };
+ var end = indexOfSpec.length > 3 ? indexOfSpec[3] : {$strLenBytes: input};
var substrExpr = {
$indexOfBytes: [{$substrBytes: [input, start, {$subtract: [end, start]}]}, token]
@@ -127,23 +125,15 @@ load("jstests/aggregation/extras/utils.js"); // For assertErrorCode and testExp
};
assertErrorCode(coll, pipeline, 40092);
- pipeline = {
- $project: {output: {$indexOfBytes: ["abc", "b", "bad"]}}
- };
+ pipeline = {$project: {output: {$indexOfBytes: ["abc", "b", "bad"]}}};
assertErrorCode(coll, pipeline, 40096);
- pipeline = {
- $project: {output: {$indexOfBytes: ["abc", "b", 0, "bad"]}}
- };
+ pipeline = {$project: {output: {$indexOfBytes: ["abc", "b", 0, "bad"]}}};
assertErrorCode(coll, pipeline, 40096);
- pipeline = {
- $project: {output: {$indexOfBytes: ["abc", "b", -1]}}
- };
+ pipeline = {$project: {output: {$indexOfBytes: ["abc", "b", -1]}}};
assertErrorCode(coll, pipeline, 40097);
- pipeline = {
- $project: {output: {$indexOfBytes: ["abc", "b", 1, -1]}}
- };
+ pipeline = {$project: {output: {$indexOfBytes: ["abc", "b", 1, -1]}}};
assertErrorCode(coll, pipeline, 40097);
}());
diff --git a/jstests/aggregation/expressions/indexof_codepoints.js b/jstests/aggregation/expressions/indexof_codepoints.js
index 20b9534b050..506b1a13cfa 100644
--- a/jstests/aggregation/expressions/indexof_codepoints.js
+++ b/jstests/aggregation/expressions/indexof_codepoints.js
@@ -15,9 +15,7 @@ load("jstests/aggregation/extras/utils.js"); // For assertErrorCode and testExp
var input = indexOfSpec[0];
var token = indexOfSpec[1];
var start = indexOfSpec.length > 2 ? indexOfSpec[2] : 0;
- var end = indexOfSpec.length > 3 ? indexOfSpec[3] : {
- $strLenCP: input
- };
+ var end = indexOfSpec.length > 3 ? indexOfSpec[3] : {$strLenCP: input};
var substrExpr = {
$indexOfCP: [{$substrCP: [input, start, {$subtract: [end, start]}]}, token]
@@ -107,23 +105,15 @@ load("jstests/aggregation/extras/utils.js"); // For assertErrorCode and testExp
};
assertErrorCode(coll, pipeline, 40094);
- pipeline = {
- $project: {output: {$indexOfCP: ["abc", "b", "bad"]}}
- };
+ pipeline = {$project: {output: {$indexOfCP: ["abc", "b", "bad"]}}};
assertErrorCode(coll, pipeline, 40096);
- pipeline = {
- $project: {output: {$indexOfCP: ["abc", "b", 0, "bad"]}}
- };
+ pipeline = {$project: {output: {$indexOfCP: ["abc", "b", 0, "bad"]}}};
assertErrorCode(coll, pipeline, 40096);
- pipeline = {
- $project: {output: {$indexOfCP: ["abc", "b", -1]}}
- };
+ pipeline = {$project: {output: {$indexOfCP: ["abc", "b", -1]}}};
assertErrorCode(coll, pipeline, 40097);
- pipeline = {
- $project: {output: {$indexOfCP: ["abc", "b", 1, -1]}}
- };
+ pipeline = {$project: {output: {$indexOfCP: ["abc", "b", 1, -1]}}};
assertErrorCode(coll, pipeline, 40097);
}());
diff --git a/jstests/aggregation/expressions/reduce.js b/jstests/aggregation/expressions/reduce.js
index 73bb0cfa16f..54a66fc8b56 100644
--- a/jstests/aggregation/expressions/reduce.js
+++ b/jstests/aggregation/expressions/reduce.js
@@ -11,15 +11,15 @@ load("jstests/aggregation/extras/utils.js"); // For assertErrorCode and testExp
coll,
{
$reduce:
- {input: [1, 2, 3], initialValue: {$literal: 0}, in: {$sum: ["$$this", "$$value"]}}
+ {input: [1, 2, 3], initialValue: {$literal: 0}, in : {$sum: ["$$this", "$$value"]}}
},
6);
- testExpression(coll, {$reduce: {input: [], initialValue: {$literal: 0}, in: 10}}, 0);
+ testExpression(coll, {$reduce: {input: [], initialValue: {$literal: 0}, in : 10}}, 0);
testExpression(
coll,
{
$reduce:
- {input: [1, 2, 3], initialValue: [], in: {$concatArrays: ["$$value", ["$$this"]]}}
+ {input: [1, 2, 3], initialValue: [], in : {$concatArrays: ["$$value", ["$$this"]]}}
},
[1, 2, 3]);
testExpression(coll,
@@ -27,7 +27,7 @@ load("jstests/aggregation/extras/utils.js"); // For assertErrorCode and testExp
$reduce: {
input: [1, 2],
initialValue: [],
- in: {$concatArrays: ["$$value", ["$$value.notAField"]]}
+ in : {$concatArrays: ["$$value", ["$$value.notAField"]]}
}
},
[[], []]);
@@ -38,14 +38,14 @@ load("jstests/aggregation/extras/utils.js"); // For assertErrorCode and testExp
$reduce: {
input: [[1, 2, 3], [4, 5]],
initialValue: 1,
- in: {
+ in : {
$multiply: [
"$$value",
{
$reduce: {
input: "$$this",
initialValue: 0,
- in: {$sum: ["$$value", "$$this"]}
+ in : {$sum: ["$$value", "$$this"]}
}
}
]
@@ -56,43 +56,43 @@ load("jstests/aggregation/extras/utils.js"); // For assertErrorCode and testExp
// A nested $reduce using a $let to allow the inner $reduce to access the variables of the
// outer.
- testExpression(
- coll,
- {
- $reduce: {
- input: [[0, 1], [2, 3]],
- initialValue: {allElements: [], sumOfInner: {$literal: 0}},
- in: {
- $let: {
- vars: {outerValue: "$$value", innerArray: "$$this"},
- in: {
- $reduce: {
- input: "$$innerArray",
- initialValue: "$$outerValue",
- in: {
- allElements:
- {$concatArrays: ["$$value.allElements", ["$$this"]]},
- sumOfInner: {$sum: ["$$value.sumOfInner", "$$this"]}
- }
- }
- }
- }
- }
- }
- },
- {allElements: [0, 1, 2, 3], sumOfInner: 6});
+ testExpression(coll,
+ {
+ $reduce: {
+ input: [[0, 1], [2, 3]],
+ initialValue: {allElements: [], sumOfInner: {$literal: 0}},
+ in : {
+ $let: {
+ vars: {outerValue: "$$value", innerArray: "$$this"},
+ in : {
+ $reduce: {
+ input: "$$innerArray",
+ initialValue: "$$outerValue",
+ in : {
+ allElements: {
+ $concatArrays:
+ ["$$value.allElements", ["$$this"]]
+ },
+ sumOfInner:
+ {$sum: ["$$value.sumOfInner", "$$this"]}
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ {allElements: [0, 1, 2, 3], sumOfInner: 6});
// Nullish input produces null as an output.
- testExpression(coll, {$reduce: {input: null, initialValue: {$literal: 0}, in: 5}}, null);
+ testExpression(coll, {$reduce: {input: null, initialValue: {$literal: 0}, in : 5}}, null);
testExpression(
- coll, {$reduce: {input: "$nonexistent", initialValue: {$literal: 0}, in: 5}}, null);
+ coll, {$reduce: {input: "$nonexistent", initialValue: {$literal: 0}, in : 5}}, null);
// Error cases for $reduce.
// $reduce requires an object.
- var pipeline = {
- $project: {reduced: {$reduce: 0}}
- };
+ var pipeline = {$project: {reduced: {$reduce: 0}}};
assertErrorCode(coll, pipeline, 40075);
// Unknown field specified.
@@ -102,7 +102,7 @@ load("jstests/aggregation/extras/utils.js"); // For assertErrorCode and testExp
$reduce: {
input: {$literal: 0},
initialValue: {$literal: 0},
- in: {$literal: 0},
+ in : {$literal: 0},
notAField: {$literal: 0}
}
}
@@ -111,15 +111,11 @@ load("jstests/aggregation/extras/utils.js"); // For assertErrorCode and testExp
assertErrorCode(coll, pipeline, 40076);
// $reduce requires input to be specified.
- pipeline = {
- $project: {reduced: {$reduce: {initialValue: {$literal: 0}, in: {$literal: 0}}}}
- };
+ pipeline = {$project: {reduced: {$reduce: {initialValue: {$literal: 0}, in : {$literal: 0}}}}};
assertErrorCode(coll, pipeline, 40077);
// $reduce requires initialValue to be specified.
- pipeline = {
- $project: {reduced: {$reduce: {input: {$literal: 0}, in: {$literal: 0}}}}
- };
+ pipeline = {$project: {reduced: {$reduce: {input: {$literal: 0}, in : {$literal: 0}}}}};
assertErrorCode(coll, pipeline, 40078);
// $reduce requires in to be specified.
@@ -129,14 +125,10 @@ load("jstests/aggregation/extras/utils.js"); // For assertErrorCode and testExp
assertErrorCode(coll, pipeline, 40079);
// $$value is undefined in the non-'in' arguments of $reduce.
- pipeline = {
- $project: {reduced: {$reduce: {input: "$$value", initialValue: [], in: []}}}
- };
+ pipeline = {$project: {reduced: {$reduce: {input: "$$value", initialValue: [], in : []}}}};
assertErrorCode(coll, pipeline, 17276);
// $$this is undefined in the non-'in' arguments of $reduce.
- pipeline = {
- $project: {reduced: {$reduce: {input: "$$this", initialValue: [], in: []}}}
- };
+ pipeline = {$project: {reduced: {$reduce: {input: "$$this", initialValue: [], in : []}}}};
assertErrorCode(coll, pipeline, 17276);
}());
diff --git a/jstests/aggregation/expressions/split.js b/jstests/aggregation/expressions/split.js
index bfed38314b4..7d3402bde4e 100644
--- a/jstests/aggregation/expressions/split.js
+++ b/jstests/aggregation/expressions/split.js
@@ -38,35 +38,23 @@ load("jstests/aggregation/extras/utils.js"); // For assertErrorCode and testExp
testExpression(coll, {$split: ["a", "$a"]}, null);
// Ensure that $split errors when given more or less than two arguments.
- var pipeline = {
- $project: {split: {$split: []}}
- };
+ var pipeline = {$project: {split: {$split: []}}};
assertErrorCode(coll, pipeline, 16020);
- pipeline = {
- $project: {split: {$split: ["a"]}}
- };
+ pipeline = {$project: {split: {$split: ["a"]}}};
assertErrorCode(coll, pipeline, 16020);
- pipeline = {
- $project: {split: {$split: ["a", "b", "c"]}}
- };
+ pipeline = {$project: {split: {$split: ["a", "b", "c"]}}};
assertErrorCode(coll, pipeline, 16020);
// Ensure that $split errors when given non-string input.
- pipeline = {
- $project: {split: {$split: [1, "abc"]}}
- };
+ pipeline = {$project: {split: {$split: [1, "abc"]}}};
assertErrorCode(coll, pipeline, 40085);
- pipeline = {
- $project: {split: {$split: ["abc", 1]}}
- };
+ pipeline = {$project: {split: {$split: ["abc", 1]}}};
assertErrorCode(coll, pipeline, 40086);
// Ensure that $split errors when given an empty separator.
- pipeline = {
- $project: {split: {$split: ["abc", ""]}}
- };
+ pipeline = {$project: {split: {$split: ["abc", ""]}}};
assertErrorCode(coll, pipeline, 40087);
}());
diff --git a/jstests/aggregation/expressions/switch.js b/jstests/aggregation/expressions/switch.js
index 9a6dbbb529d..64cd9e1db2f 100644
--- a/jstests/aggregation/expressions/switch.js
+++ b/jstests/aggregation/expressions/switch.js
@@ -99,8 +99,7 @@
pipeline = {
"$project": {
"_id": 0,
- "output":
- {"$switch": {"branches": [{"case": true, "then": null}], "default": false}}
+ "output": {"$switch": {"branches": [{"case": true, "then": null}], "default": false}}
}
};
res = coll.aggregate(pipeline).toArray();
@@ -125,8 +124,7 @@
pipeline = {
"$project": {
"_id": 0,
- "output":
- {"$switch": {"branches": [{"case": null, "then": false}], "default": null}}
+ "output": {"$switch": {"branches": [{"case": null, "then": false}], "default": null}}
}
};
res = coll.aggregate(pipeline).toArray();
diff --git a/jstests/aggregation/expressions/switch_errors.js b/jstests/aggregation/expressions/switch_errors.js
index cf6dc0f4f93..0d9023fb250 100644
--- a/jstests/aggregation/expressions/switch_errors.js
+++ b/jstests/aggregation/expressions/switch_errors.js
@@ -8,24 +8,16 @@ load("jstests/aggregation/extras/utils.js"); // For assertErrorCode.
var coll = db.switch;
coll.drop();
- var pipeline = {
- "$project": {"output": {"$switch": "not an object"}}
- };
+ var pipeline = {"$project": {"output": {"$switch": "not an object"}}};
assertErrorCode(coll, pipeline, 40060, "$switch requires an object as an argument.");
- pipeline = {
- "$project": {"output": {"$switch": {"branches": "not an array"}}}
- };
+ pipeline = {"$project": {"output": {"$switch": {"branches": "not an array"}}}};
assertErrorCode(coll, pipeline, 40061, "$switch requires 'branches' to be an array.");
- pipeline = {
- "$project": {"output": {"$switch": {"branches": ["not an object"]}}}
- };
+ pipeline = {"$project": {"output": {"$switch": {"branches": ["not an object"]}}}};
assertErrorCode(coll, pipeline, 40062, "$switch requires each branch to be an object.");
- pipeline = {
- "$project": {"output": {"$switch": {"branches": [{}]}}}
- };
+ pipeline = {"$project": {"output": {"$switch": {"branches": [{}]}}}};
assertErrorCode(coll, pipeline, 40064, "$switch requires each branch have a 'case'.");
pipeline = {
@@ -47,19 +39,13 @@ load("jstests/aggregation/extras/utils.js"); // For assertErrorCode.
};
assertErrorCode(coll, pipeline, 40063, "$switch found a branch with an unknown argument");
- pipeline = {
- "$project": {"output": {"$switch": {"notAnArgument": 1}}}
- };
+ pipeline = {"$project": {"output": {"$switch": {"notAnArgument": 1}}}};
assertErrorCode(coll, pipeline, 40067, "$switch found an unknown argument");
- pipeline = {
- "$project": {"output": {"$switch": {"branches": []}}}
- };
+ pipeline = {"$project": {"output": {"$switch": {"branches": []}}}};
assertErrorCode(coll, pipeline, 40068, "$switch requires at least one branch");
- pipeline = {
- "$project": {"output": {"$switch": {}}}
- };
+ pipeline = {"$project": {"output": {"$switch": {}}}};
assertErrorCode(coll, pipeline, 40068, "$switch requires at least one branch");
coll.insert({x: 1});
diff --git a/jstests/aggregation/extras/mrabench.js b/jstests/aggregation/extras/mrabench.js
index fe731ecd28b..78be64a2064 100644
--- a/jstests/aggregation/extras/mrabench.js
+++ b/jstests/aggregation/extras/mrabench.js
@@ -11,10 +11,7 @@ function rollupMap() {
}
function rollupReduce(key, values) {
- var res = {
- total: 0,
- unique: 0
- };
+ var res = {total: 0, unique: 0};
for (var i = 0; i < values.length; i++) {
res.total += values[i].total;
res.unique += values[i].unique;
@@ -41,15 +38,13 @@ function rollupWeeklyMR() {
function rollupMonthlyA() {
resMonthlyA = db.runCommand({
aggregate: "gen.monthly.ip",
- pipeline:
- [{$group: {_id: {month: "_id.t"}, total: {$sum: "$value"}, unique: {$sum: 1}}}]
+ pipeline: [{$group: {_id: {month: "_id.t"}, total: {$sum: "$value"}, unique: {$sum: 1}}}]
});
}
function rollupWeeklyA() {
resWeeklyA = db.runCommand({
aggregate: "gen.weekly.ip",
- pipeline:
- [{$group: {_id: {month: "_id.t"}, total: {$sum: "$value"}, unique: {$sum: 1}}}]
+ pipeline: [{$group: {_id: {month: "_id.t"}, total: {$sum: "$value"}, unique: {$sum: 1}}}]
});
}
diff --git a/jstests/aggregation/extras/testutils.js b/jstests/aggregation/extras/testutils.js
index bb753921906..bd05ea835f0 100644
--- a/jstests/aggregation/extras/testutils.js
+++ b/jstests/aggregation/extras/testutils.js
@@ -39,7 +39,13 @@ assert(!resultsEq(t1result, t1resultf2, verbose), 't2a failed');
assert(!resultsEq(t1resultf2, t1result, verbose), 't2b failed');
var t1resultf3 = [
- {"_id": ObjectId("4dc07fedd8420ab8d0d4066d"), "pageViews": 5, "tags": ["fun", ]},
+ {
+ "_id": ObjectId("4dc07fedd8420ab8d0d4066d"),
+ "pageViews": 5,
+ "tags": [
+ "fun",
+ ]
+ },
{"_id": ObjectId("4dc07fedd8420ab8d0d4066e"), "pageViews": 7, "tags": ["fun", "nasty"]},
{"_id": ObjectId("4dc07fedd8420ab8d0d4066f"), "pageViews": 6, "tags": ["filthy"]}
];
diff --git a/jstests/aggregation/extras/utils.js b/jstests/aggregation/extras/utils.js
index 33cfe9b2b2b..7fca7aa0ca3 100644
--- a/jstests/aggregation/extras/utils.js
+++ b/jstests/aggregation/extras/utils.js
@@ -259,13 +259,9 @@ function assertErrorCode(coll, pipe, code, errmsg) {
assert.eq(res.code, code);
// Test with cursors
- var cmd = {
- pipeline: pipe
- };
+ var cmd = {pipeline: pipe};
// cmd.cursor = {};
- cmd.cursor = {
- batchSize: 0
- };
+ cmd.cursor = {batchSize: 0};
var cursorRes = coll.runCommand("aggregate", cmd);
if (cursorRes.ok) {
diff --git a/jstests/aggregation/mongos_slaveok.js b/jstests/aggregation/mongos_slaveok.js
index 91a0533d59e..9ccf383335e 100644
--- a/jstests/aggregation/mongos_slaveok.js
+++ b/jstests/aggregation/mongos_slaveok.js
@@ -26,10 +26,7 @@
var res = testDB.runCommand({aggregate: 'user', pipeline: [{$project: {x: 1}}]});
assert(res.ok, 'aggregate command failed: ' + tojson(res));
- var profileQuery = {
- op: 'command',
- ns: 'test.user', 'command.aggregate': 'user'
- };
+ var profileQuery = {op: 'command', ns: 'test.user', 'command.aggregate': 'user'};
var profileDoc = secNode.getDB('test').system.profile.findOne(profileQuery);
assert(profileDoc != null);
diff --git a/jstests/aggregation/sources/graphLookup/airports.js b/jstests/aggregation/sources/graphLookup/airports.js
index 7cf0142c631..71a38e268b8 100644
--- a/jstests/aggregation/sources/graphLookup/airports.js
+++ b/jstests/aggregation/sources/graphLookup/airports.js
@@ -30,45 +30,39 @@
local.insert({});
// Perform a simple $graphLookup and ensure it retrieves every result.
- var res = local.aggregate({
- $graphLookup: {
- from: "foreign",
- startWith: "PWM",
- connectFromField: "connects",
- connectToField: "_id",
- as: "connections"
- }
- }).toArray()[0];
-
- // "foreign" represents a connected graph.
- assert.eq(res.connections.length, airports.length);
-
- // Perform a $graphLookup and ensure it correctly computes the shortest path to a node when more
- // than one path exists.
- res = local.aggregate(
- {
+ var res = local
+ .aggregate({
$graphLookup: {
from: "foreign",
- startWith: "BOS",
+ startWith: "PWM",
connectFromField: "connects",
connectToField: "_id",
- depthField: "hops",
as: "connections"
}
- },
- {$unwind: "$connections"},
- {$project: {_id: "$connections._id", hops: "$connections.hops"}}).toArray();
+ })
+ .toArray()[0];
+
+ // "foreign" represents a connected graph.
+ assert.eq(res.connections.length, airports.length);
+
+ // Perform a $graphLookup and ensure it correctly computes the shortest path to a node when more
+ // than one path exists.
+ res = local
+ .aggregate({
+ $graphLookup: {
+ from: "foreign",
+ startWith: "BOS",
+ connectFromField: "connects",
+ connectToField: "_id",
+ depthField: "hops",
+ as: "connections"
+ }
+ },
+ {$unwind: "$connections"},
+ {$project: {_id: "$connections._id", hops: "$connections.hops"}})
+ .toArray();
- var expectedDistances = {
- BOS: 0,
- PWM: 1,
- JFK: 1,
- LGA: 1,
- ORD: 2,
- SFO: 2,
- MIA: 3,
- ATL: 4
- };
+ var expectedDistances = {BOS: 0, PWM: 1, JFK: 1, LGA: 1, ORD: 2, SFO: 2, MIA: 3, ATL: 4};
assert.eq(res.length, airports.length);
res.forEach(function(c) {
@@ -78,15 +72,17 @@
// Disconnect the graph, and ensure we don't find the other side.
foreign.remove({_id: "JFK"});
- res = db.local.aggregate({
- $graphLookup: {
- from: "foreign",
- startWith: "ATL",
- connectFromField: "connects",
- connectToField: "_id",
- as: "connections"
- }
- }).toArray()[0];
+ res = db.local
+ .aggregate({
+ $graphLookup: {
+ from: "foreign",
+ startWith: "ATL",
+ connectFromField: "connects",
+ connectToField: "_id",
+ as: "connections"
+ }
+ })
+ .toArray()[0];
// ATL should now connect to itself, MIA, and SFO.
assert.eq(res.connections.length, 3);
diff --git a/jstests/aggregation/sources/graphLookup/basic.js b/jstests/aggregation/sources/graphLookup/basic.js
index 8ee26a64f9f..15c8ef85e22 100644
--- a/jstests/aggregation/sources/graphLookup/basic.js
+++ b/jstests/aggregation/sources/graphLookup/basic.js
@@ -19,59 +19,67 @@
assert.writeOK(local.insert({starting: 50}));
// Perform a simple $graphLookup and ensure it retrieves every result.
- var res = local.aggregate({
- $graphLookup: {
- from: "foreign",
- startWith: "$starting",
- connectFromField: "neighbors",
- connectToField: "_id",
- as: "integers"
- }
- }).toArray()[0];
+ var res = local
+ .aggregate({
+ $graphLookup: {
+ from: "foreign",
+ startWith: "$starting",
+ connectFromField: "neighbors",
+ connectToField: "_id",
+ as: "integers"
+ }
+ })
+ .toArray()[0];
assert.eq(res.integers.length, 100);
// Perform a $graphLookup and ensure it respects "maxDepth".
- res = local.aggregate({
- $graphLookup: {
- from: "foreign",
- startWith: "$starting",
- connectFromField: "neighbors",
- connectToField: "_id",
- maxDepth: 5,
- as: "integers"
- }
- }).toArray()[0];
+ res = local
+ .aggregate({
+ $graphLookup: {
+ from: "foreign",
+ startWith: "$starting",
+ connectFromField: "neighbors",
+ connectToField: "_id",
+ maxDepth: 5,
+ as: "integers"
+ }
+ })
+ .toArray()[0];
// At depth zero, we retrieve one integer, and two for every depth thereafter.
assert.eq(res.integers.length, 11);
// Perform a $graphLookup and ensure it properly evaluates "startWith".
- res = local.aggregate({
- $graphLookup: {
- from: "foreign",
- startWith: {$add: ["$starting", 3]},
- connectFromField: "neighbors",
- connectToField: "_id",
- maxDepth: 0,
- as: "integers"
- }
- }).toArray()[0];
+ res = local
+ .aggregate({
+ $graphLookup: {
+ from: "foreign",
+ startWith: {$add: ["$starting", 3]},
+ connectFromField: "neighbors",
+ connectToField: "_id",
+ maxDepth: 0,
+ as: "integers"
+ }
+ })
+ .toArray()[0];
assert.eq(res.integers.length, 1);
assert.eq(res.integers[0]._id, 53);
// Perform a $graphLookup and ensure it properly expands "startWith".
- res = local.aggregate({
- $graphLookup: {
- from: "foreign",
- startWith: {$literal: [1, 2, 3]},
- connectFromField: "neighbors",
- connectToField: "_id",
- maxDepth: 0,
- as: "integers"
- }
- }).toArray()[0];
+ res = local
+ .aggregate({
+ $graphLookup: {
+ from: "foreign",
+ startWith: {$literal: [1, 2, 3]},
+ connectFromField: "neighbors",
+ connectToField: "_id",
+ maxDepth: 0,
+ as: "integers"
+ }
+ })
+ .toArray()[0];
assert.eq(res.integers.length, 3);
@@ -83,15 +91,17 @@
assert.writeOK(foreign.insert({_id: 51}));
assert.writeOK(foreign.insert({_id: null, neighbors: [50, 52]}));
- res = local.aggregate({
- $graphLookup: {
- from: "foreign",
- startWith: "$starting",
- connectFromField: "neighbors",
- connectToField: "_id",
- as: "integers"
- }
- }).toArray()[0];
+ res = local
+ .aggregate({
+ $graphLookup: {
+ from: "foreign",
+ startWith: "$starting",
+ connectFromField: "neighbors",
+ connectToField: "_id",
+ as: "integers"
+ }
+ })
+ .toArray()[0];
// Our result should be missing the values with _id from 52 to 99.
assert.eq(res.integers.length, 52);
@@ -103,29 +113,33 @@
assert.writeOK(foreign.update({_id: 99}, {$set: {neighbors: [98, 0]}}));
assert.writeOK(foreign.update({_id: 0}, {$set: {neighbors: [99, 1]}}));
- res = local.aggregate({
- $graphLookup: {
- from: "foreign",
- startWith: "$starting",
- connectFromField: "neighbors",
- connectToField: "_id",
- as: "integers"
- }
- }).toArray()[0];
+ res = local
+ .aggregate({
+ $graphLookup: {
+ from: "foreign",
+ startWith: "$starting",
+ connectFromField: "neighbors",
+ connectToField: "_id",
+ as: "integers"
+ }
+ })
+ .toArray()[0];
assert.eq(res.integers.length, 100);
// Perform a $graphLookup and ensure that "depthField" is properly populated.
- res = local.aggregate({
- $graphLookup: {
- from: "foreign",
- startWith: "$starting",
- connectFromField: "neighbors",
- connectToField: "_id",
- depthField: "distance",
- as: "integers"
- }
- }).toArray()[0];
+ res = local
+ .aggregate({
+ $graphLookup: {
+ from: "foreign",
+ startWith: "$starting",
+ connectFromField: "neighbors",
+ connectToField: "_id",
+ depthField: "distance",
+ as: "integers"
+ }
+ })
+ .toArray()[0];
assert.eq(res.integers.length, 100);
diff --git a/jstests/aggregation/sources/graphLookup/error.js b/jstests/aggregation/sources/graphLookup/error.js
index 70e9d2a8079..721bce5eb3e 100644
--- a/jstests/aggregation/sources/graphLookup/error.js
+++ b/jstests/aggregation/sources/graphLookup/error.js
@@ -165,8 +165,7 @@ load("jstests/aggregation/extras/utils.js"); // For "assertErrorCode".
assertErrorCode(local, pipeline, 40105, "connectToField was not specified");
pipeline = {
- $graphLookup:
- {from: "foreign", startWith: {$const: 0}, connectToField: "a", as: "output"}
+ $graphLookup: {from: "foreign", startWith: {$const: 0}, connectToField: "a", as: "output"}
};
assertErrorCode(local, pipeline, 40105, "connectFromField was not specified");
@@ -211,9 +210,7 @@ load("jstests/aggregation/extras/utils.js"); // For "assertErrorCode".
var initial = [];
for (var i = 0; i < 8; i++) {
- var obj = {
- _id: i
- };
+ var obj = {_id: i};
obj['longString'] = new Array(14 * 1024 * 1024).join('x');
initial.push(i);
@@ -238,10 +235,7 @@ load("jstests/aggregation/extras/utils.js"); // For "assertErrorCode".
var bulk = foreign.initializeUnorderedBulkOp();
for (var i = 0; i < 14; i++) {
- var obj = {
- from: 0,
- to: 1
- };
+ var obj = {from: 0, to: 1};
obj['s'] = new Array(7 * 1024 * 1024).join(' ');
bulk.insert(obj);
}
@@ -264,26 +258,24 @@ load("jstests/aggregation/extras/utils.js"); // For "assertErrorCode".
var bulk = foreign.initializeUnorderedBulkOp();
for (var i = 0; i < 13; i++) {
- var obj = {
- from: 0,
- to: 1
- };
+ var obj = {from: 0, to: 1};
obj['s'] = new Array(7 * 1024 * 1024).join(' ');
bulk.insert(obj);
}
assert.writeOK(bulk.execute());
- var res = local.aggregate(
- {
- $graphLookup: {
- from: "foreign",
- startWith: {$literal: 0},
- connectToField: "from",
- connectFromField: "to",
- as: "out"
- }
- },
- {$unwind: {path: "$out"}}).toArray();
+ var res = local
+ .aggregate({
+ $graphLookup: {
+ from: "foreign",
+ startWith: {$literal: 0},
+ connectToField: "from",
+ connectFromField: "to",
+ as: "out"
+ }
+ },
+ {$unwind: {path: "$out"}})
+ .toArray();
assert.eq(res.length, 13);
}());
diff --git a/jstests/aggregation/sources/graphLookup/nested_objects.js b/jstests/aggregation/sources/graphLookup/nested_objects.js
index e95d99f293f..b2e3f5ac59a 100644
--- a/jstests/aggregation/sources/graphLookup/nested_objects.js
+++ b/jstests/aggregation/sources/graphLookup/nested_objects.js
@@ -19,15 +19,17 @@
assert.writeOK(local.insert({starting: 0}));
- var res = local.aggregate({
- $graphLookup: {
- from: "foreign",
- startWith: "$starting",
- connectFromField: "neighbors.id",
- connectToField: "_id",
- as: "integers"
- }
- }).toArray()[0];
+ var res = local
+ .aggregate({
+ $graphLookup: {
+ from: "foreign",
+ startWith: "$starting",
+ connectFromField: "neighbors.id",
+ connectToField: "_id",
+ as: "integers"
+ }
+ })
+ .toArray()[0];
assert.eq(res.integers.length, 100);
foreign.drop();
@@ -39,15 +41,17 @@
}
assert.writeOK(bulk.execute());
- var res = local.aggregate({
- $graphLookup: {
- from: "foreign",
- startWith: "$starting",
- connectFromField: "value",
- connectToField: "previous.neighbor",
- as: "integers"
- }
- }).toArray()[0];
+ var res = local
+ .aggregate({
+ $graphLookup: {
+ from: "foreign",
+ startWith: "$starting",
+ connectFromField: "value",
+ connectToField: "previous.neighbor",
+ as: "integers"
+ }
+ })
+ .toArray()[0];
assert.eq(res.integers.length, 100);
foreign.drop();
@@ -62,14 +66,16 @@
}
assert.writeOK(bulk.execute());
- var res = local.aggregate({
- $graphLookup: {
- from: "foreign",
- startWith: "$starting",
- connectFromField: "values.neighbor",
- connectToField: "previous.neighbor",
- as: "integers"
- }
- }).toArray()[0];
+ var res = local
+ .aggregate({
+ $graphLookup: {
+ from: "foreign",
+ startWith: "$starting",
+ connectFromField: "values.neighbor",
+ connectToField: "previous.neighbor",
+ as: "integers"
+ }
+ })
+ .toArray()[0];
assert.eq(res.integers.length, 100);
}());
diff --git a/jstests/aggregation/sources/graphLookup/sharded.js b/jstests/aggregation/sources/graphLookup/sharded.js
index 26fbbc2e9f0..d54e2be01c8 100644
--- a/jstests/aggregation/sources/graphLookup/sharded.js
+++ b/jstests/aggregation/sources/graphLookup/sharded.js
@@ -24,7 +24,8 @@ load("jstests/aggregation/extras/utils.js"); // For assertErrorCode.
assert.writeOK(local.insert({}));
var res = st.s.getDB("graphLookup")
- .local.aggregate({
+ .local
+ .aggregate({
$graphLookup: {
from: "foreign",
startWith: {$literal: 0},
diff --git a/jstests/aggregation/sources/graphLookup/socialite.js b/jstests/aggregation/sources/graphLookup/socialite.js
index 21027f21b0b..6efe2672540 100644
--- a/jstests/aggregation/sources/graphLookup/socialite.js
+++ b/jstests/aggregation/sources/graphLookup/socialite.js
@@ -30,18 +30,20 @@
// Find the social network of "Darren", that is, people Darren follows, and people who are
// followed by someone Darren follows, etc.
- var res = users.aggregate({$match: {fullname: "Darren"}},
- {
- $graphLookup: {
- from: "followers",
- startWith: "$_id",
- connectFromField: "_t",
- connectToField: "_f",
- as: "network"
- }
- },
- {$unwind: "$network"},
- {$project: {_id: "$network._t"}}).toArray();
+ var res = users
+ .aggregate({$match: {fullname: "Darren"}},
+ {
+ $graphLookup: {
+ from: "followers",
+ startWith: "$_id",
+ connectFromField: "_t",
+ connectToField: "_f",
+ as: "network"
+ }
+ },
+ {$unwind: "$network"},
+ {$project: {_id: "$network._t"}})
+ .toArray();
// "djw" is followed, directly or indirectly, by "jsr" and "bmw".
assert.eq(res.length, 2);
diff --git a/jstests/aggregation/testSlave.js b/jstests/aggregation/testSlave.js
index 21a798a1ad2..0a03d68a49b 100644
--- a/jstests/aggregation/testSlave.js
+++ b/jstests/aggregation/testSlave.js
@@ -7,9 +7,7 @@ replTest.awaitReplication();
var primary = replTest.getPrimary().getDB('test');
var secondary = replTest.getSecondary().getDB('test');
-var options = {
- writeConcern: {w: 2}
-};
+var options = {writeConcern: {w: 2}};
primary.agg.insert({}, options);
primary.agg.insert({}, options);
primary.agg.insert({}, options);
diff --git a/jstests/aggregation/testall.js b/jstests/aggregation/testall.js
index 5771cbefaef..c58d3e1e023 100644
--- a/jstests/aggregation/testall.js
+++ b/jstests/aggregation/testall.js
@@ -370,7 +370,10 @@ db.p11.save({
var p11 = db.runCommand({
aggregate: "p11",
- pipeline: [{$unwind: "$items.authors"}, {$project: {name: 1, author: "$items.authors"}}, ]
+ pipeline: [
+ {$unwind: "$items.authors"},
+ {$project: {name: 1, author: "$items.authors"}},
+ ]
});
p11result = [
@@ -418,8 +421,14 @@ assert.docEq(p13.result, p13result, 'p13 failed');
var p14 = db.runCommand({
aggregate: "article",
pipeline: [{
- $project:
- {theRemainder: {$mod: [{$ifNull: ["$other.foo", "$other.bar"]}, "$pageViews", ]}}
+ $project: {
+ theRemainder: {
+ $mod: [
+ {$ifNull: ["$other.foo", "$other.bar"]},
+ "$pageViews",
+ ]
+ }
+ }
}]
});
@@ -788,9 +797,21 @@ g5.result.forEach(function(obj) {
var g5result = [
{"_id": {"tags": "filthy"}, "authors": ["jane"]},
- {"_id": {"tags": "fun"}, "authors": ["bob", "dave", ]},
+ {
+ "_id": {"tags": "fun"},
+ "authors": [
+ "bob",
+ "dave",
+ ]
+ },
{"_id": {"tags": "good"}, "authors": ["bob"]},
- {"_id": {"tags": "nasty"}, "authors": ["dave", "jane", ]}
+ {
+ "_id": {"tags": "nasty"},
+ "authors": [
+ "dave",
+ "jane",
+ ]
+ }
];
assert.docEq(g5.result, g5result, 'g5 failed');
diff --git a/jstests/aggregation/testshard1.js b/jstests/aggregation/testshard1.js
index 0d773351f1d..516325cbe5c 100644
--- a/jstests/aggregation/testshard1.js
+++ b/jstests/aggregation/testshard1.js
@@ -46,26 +46,9 @@ being added as arrays within arrays.
var count = 0;
var strings = [
- "one",
- "two",
- "three",
- "four",
- "five",
- "six",
- "seven",
- "eight",
- "nine",
- "ten",
- "eleven",
- "twelve",
- "thirteen",
- "fourteen",
- "fifteen",
- "sixteen",
- "seventeen",
- "eighteen",
- "nineteen",
- "twenty"
+ "one", "two", "three", "four", "five", "six", "seven",
+ "eight", "nine", "ten", "eleven", "twelve", "thirteen", "fourteen",
+ "fifteen", "sixteen", "seventeen", "eighteen", "nineteen", "twenty"
];
jsTestLog("Bulk inserting data");
@@ -94,13 +77,11 @@ for (var i = 0; i < shards.length; i++) {
}
jsTestLog('a project and group in shards, result combined in mongos');
-var a1 = aggregateNoOrder(
- db.ts1,
- [
- {$project: {cMod10: {$mod: ["$counter", 10]}, number: 1, counter: 1}},
- {$group: {_id: "$cMod10", numberSet: {$addToSet: "$number"}, avgCounter: {$avg: "$cMod10"}}},
- {$sort: {_id: 1}}
- ]);
+var a1 = aggregateNoOrder(db.ts1, [
+ {$project: {cMod10: {$mod: ["$counter", 10]}, number: 1, counter: 1}},
+ {$group: {_id: "$cMod10", numberSet: {$addToSet: "$number"}, avgCounter: {$avg: "$cMod10"}}},
+ {$sort: {_id: 1}}
+]);
for (i = 0; i < 10; ++i) {
assert.eq(a1[i].avgCounter, a1[i]._id, 'agg sharded test avgCounter failed');
@@ -115,7 +96,7 @@ assert.eq(a2[0].total, (nItems / 2) * (1 + nItems), 'agg sharded test counter su
jsTestLog('A group combining all documents into one, averaging a null field.');
assert.eq(aggregateOrdered(db.ts1, [{$group: {_id: null, avg: {$avg: "$missing"}}}]),
- [{_id: null, avg: null}]);
+ [{_id: null, avg: null}]);
jsTestLog('an initial group starts the group in the shards, and combines them in mongos');
var a3 =
@@ -126,19 +107,18 @@ for (i = 0; i < strings.length; ++i) {
}
jsTestLog('a match takes place in the shards; just returning the results from mongos');
-var a4 = aggregateNoOrder(db.ts1,
- [{
- $match: {
- $or: [
- {counter: 55},
- {counter: 1111},
- {counter: 2222},
- {counter: 33333},
- {counter: 99999},
- {counter: 55555}
- ]
- }
- }]);
+var a4 = aggregateNoOrder(db.ts1, [{
+ $match: {
+ $or: [
+ {counter: 55},
+ {counter: 1111},
+ {counter: 2222},
+ {counter: 33333},
+ {counter: 99999},
+ {counter: 55555}
+ ]
+ }
+ }]);
assert.eq(a4.length, 6, tojson(a4));
for (i = 0; i < 6; ++i) {
@@ -192,13 +172,15 @@ function testAvgStdDev() {
jsTestLog('testing $avg and $stdDevPop in sharded $group');
// Note: not using aggregateOrdered since it requires exact results. $stdDevPop can vary
// slightly between runs if a migration occurs. This is why we use assert.close below.
- var res = db.ts1.aggregate([{
- $group: {
- _id: null,
- avg: {$avg: '$counter'},
- stdDevPop: {$stdDevPop: '$counter'},
- }
- }]).toArray();
+ var res = db.ts1
+ .aggregate([{
+ $group: {
+ _id: null,
+ avg: {$avg: '$counter'},
+ stdDevPop: {$stdDevPop: '$counter'},
+ }
+ }])
+ .toArray();
// http://en.wikipedia.org/wiki/Arithmetic_progression#Sum
var avg = (1 + nItems) / 2;
assert.close(res[0].avg, avg, '', 10 /*decimal places*/);
@@ -233,10 +215,7 @@ db.literal.save({dollar: false});
result = aggregateOrdered(
db.literal,
- [{
- $project:
- {_id: 0, cost: {$cond: ['$dollar', {$literal: '$1.00'}, {$literal: '$.99'}]}}
- }]);
+ [{$project: {_id: 0, cost: {$cond: ['$dollar', {$literal: '$1.00'}, {$literal: '$.99'}]}}}]);
assert.eq([{cost: '$.99'}], result);
diff --git a/jstests/aggregation/unwind.js b/jstests/aggregation/unwind.js
index ba6ffa44a86..ffd2a3da9c6 100644
--- a/jstests/aggregation/unwind.js
+++ b/jstests/aggregation/unwind.js
@@ -12,7 +12,6 @@ t.insert({_id: 6, x: 4});
var res = t.aggregate([{$unwind: "$x"}, {$sort: {_id: 1}}]).toArray();
assert.eq(4, res.length);
-assert.eq([1, 2, 3, 4],
- res.map(function(z) {
- return z.x;
- }));
+assert.eq([1, 2, 3, 4], res.map(function(z) {
+ return z.x;
+}));
diff --git a/jstests/auth/access_control_with_unreachable_configs.js b/jstests/auth/access_control_with_unreachable_configs.js
index 6c833d5c844..7e173475f7a 100644
--- a/jstests/auth/access_control_with_unreachable_configs.js
+++ b/jstests/auth/access_control_with_unreachable_configs.js
@@ -3,10 +3,7 @@
// are user documents stored in the configuration information, it must assume that
// there are.
-var dopts = {
- smallfiles: "",
- nopreallocj: ""
-};
+var dopts = {smallfiles: "", nopreallocj: ""};
var st = new ShardingTest({
shards: 1,
mongos: 1,
diff --git a/jstests/auth/arbiter.js b/jstests/auth/arbiter.js
index 75b7b67a9ad..df9f85a8015 100644
--- a/jstests/auth/arbiter.js
+++ b/jstests/auth/arbiter.js
@@ -19,16 +19,9 @@ replTest.initiate({
var primaryAdmin = replTest.nodes[0].getDB("admin");
var arbiterAdmin = replTest.nodes[2].getDB("admin");
-var cmd0 = {
- getCmdLineOpts: 1
-};
-var cmd1 = {
- getParameter: 1,
- logLevel: 1
-};
-var cmd2 = {
- serverStatus: 1
-};
+var cmd0 = {getCmdLineOpts: 1};
+var cmd1 = {getParameter: 1, logLevel: 1};
+var cmd2 = {serverStatus: 1};
assert.commandFailedWithCode(primaryAdmin.runCommand(cmd0), 13);
assert.commandFailedWithCode(primaryAdmin.runCommand(cmd1), 13);
diff --git a/jstests/auth/auth_options.js b/jstests/auth/auth_options.js
index d2f89d12a0f..aa8bd7ee9eb 100644
--- a/jstests/auth/auth_options.js
+++ b/jstests/auth/auth_options.js
@@ -3,9 +3,7 @@ var baseName = "jstests_auth_auth_options";
load('jstests/libs/command_line/test_parsed_options.js');
jsTest.log("Testing \"auth\" command line option");
-var expectedResult = {
- "parsed": {"security": {"authorization": "enabled"}}
-};
+var expectedResult = {"parsed": {"security": {"authorization": "enabled"}}};
testGetCmdLineOptsMongod({auth: ""}, expectedResult);
diff --git a/jstests/auth/auth_schema_upgrade.js b/jstests/auth/auth_schema_upgrade.js
index d80fbe6bbad..b63eea34788 100644
--- a/jstests/auth/auth_schema_upgrade.js
+++ b/jstests/auth/auth_schema_upgrade.js
@@ -69,10 +69,7 @@ testAuthSchemaUpgrade(conn);
MongoRunner.stopMongod(conn);
jsTest.log('Test authSchemUpgrade sharded');
-var dopts = {
- smallfiles: "",
- nopreallocj: ""
-};
+var dopts = {smallfiles: "", nopreallocj: ""};
var st = new ShardingTest({
shards: 1,
mongos: 1,
diff --git a/jstests/auth/authz_modifications_access_control.js b/jstests/auth/authz_modifications_access_control.js
index bb294796a7f..7f7b01e12ba 100644
--- a/jstests/auth/authz_modifications_access_control.js
+++ b/jstests/auth/authz_modifications_access_control.js
@@ -5,8 +5,8 @@
function runTest(conn) {
var authzErrorCode = 13;
- conn.getDB('admin')
- .createUser({user: 'userAdmin', pwd: 'pwd', roles: ['userAdminAnyDatabase']});
+ conn.getDB('admin').createUser(
+ {user: 'userAdmin', pwd: 'pwd', roles: ['userAdminAnyDatabase']});
var userAdminConn = new Mongo(conn.host);
userAdminConn.getDB('admin').auth('userAdmin', 'pwd');
@@ -62,11 +62,7 @@ function runTest(conn) {
(function() {
jsTestLog("Testing role creation, of user-defined roles with same name as built-in roles");
- var cmdObj = {
- createRole: "readWrite",
- roles: [],
- privileges: []
- };
+ var cmdObj = {createRole: "readWrite", roles: [], privileges: []};
var res = adminUserAdmin.runCommand(cmdObj);
assert.commandFailed(res, tojson(cmdObj));
diff --git a/jstests/auth/basic_role_auth.js b/jstests/auth/basic_role_auth.js
index f44a331fa95..7188aa7e4f6 100644
--- a/jstests/auth/basic_role_auth.js
+++ b/jstests/auth/basic_role_auth.js
@@ -36,36 +36,12 @@ var AUTH_INFO = {
};
// Constants that lists the privileges of a given role.
-var READ_PERM = {
- query: 1,
- index_r: 1,
- killCursor: 1
-};
-var READ_WRITE_PERM = {
- insert: 1,
- update: 1,
- remove: 1,
- query: 1,
- index_r: 1,
- index_w: 1,
- killCursor: 1
-};
-var ADMIN_PERM = {
- index_r: 1,
- index_w: 1,
- profile_r: 1
-};
-var UADMIN_PERM = {
- user_r: 1,
- user_w: 1
-};
-var CLUSTER_PERM = {
- killOp: 1,
- currentOp: 1,
- fsync_unlock: 1,
- killCursor: 1,
- profile_r: 1
-};
+var READ_PERM = {query: 1, index_r: 1, killCursor: 1};
+var READ_WRITE_PERM =
+ {insert: 1, update: 1, remove: 1, query: 1, index_r: 1, index_w: 1, killCursor: 1};
+var ADMIN_PERM = {index_r: 1, index_w: 1, profile_r: 1};
+var UADMIN_PERM = {user_r: 1, user_w: 1};
+var CLUSTER_PERM = {killOp: 1, currentOp: 1, fsync_unlock: 1, killCursor: 1, profile_r: 1};
/**
* Checks whether an error occurs after running an operation.
@@ -101,91 +77,79 @@ var checkErr = function(shouldPass, opFunc) {
* fsync_unlock.
*/
var testOps = function(db, allowedActions) {
- checkErr(allowedActions.hasOwnProperty('insert'),
- function() {
- var res = db.user.insert({y: 1});
- if (res.hasWriteError())
- throw Error("insert failed: " + tojson(res.getRawResponse()));
- });
-
- checkErr(allowedActions.hasOwnProperty('update'),
- function() {
- var res = db.user.update({y: 1}, {z: 3});
- if (res.hasWriteError())
- throw Error("update failed: " + tojson(res.getRawResponse()));
- });
-
- checkErr(allowedActions.hasOwnProperty('remove'),
- function() {
- var res = db.user.remove({y: 1});
- if (res.hasWriteError())
- throw Error("remove failed: " + tojson(res.getRawResponse()));
- });
-
- checkErr(allowedActions.hasOwnProperty('query'),
- function() {
- db.user.findOne({y: 1});
- });
-
- checkErr(allowedActions.hasOwnProperty('killOp'),
- function() {
- var errorCodeUnauthorized = 13;
- var res = db.killOp(1);
-
- if (res.code == errorCodeUnauthorized) {
- throw Error("unauthorized killOp");
- }
- });
-
- checkErr(allowedActions.hasOwnProperty('currentOp'),
- function() {
- var errorCodeUnauthorized = 13;
- var res = db.currentOp();
-
- if (res.code == errorCodeUnauthorized) {
- throw Error("unauthorized currentOp");
- }
- });
-
- checkErr(allowedActions.hasOwnProperty('index_r'),
- function() {
- db.system.indexes.findOne();
- });
-
- checkErr(allowedActions.hasOwnProperty('index_w'),
- function() {
- var res = db.user.ensureIndex({x: 1});
- if (res.code == 13) { // Unauthorized
- throw Error("unauthorized currentOp");
- }
- });
-
- checkErr(allowedActions.hasOwnProperty('profile_r'),
- function() {
- db.system.profile.findOne();
- });
-
- checkErr(allowedActions.hasOwnProperty('profile_w'),
- function() {
- var res = db.system.profile.insert({x: 1});
- if (res.hasWriteError()) {
- throw Error("profile insert failed: " + tojson(res.getRawResponse()));
- }
- });
-
- checkErr(allowedActions.hasOwnProperty('user_r'),
- function() {
- var result = db.runCommand({usersInfo: 1});
- if (!result.ok) {
- throw new Error(tojson(result));
- }
- });
-
- checkErr(allowedActions.hasOwnProperty('user_w'),
- function() {
- db.createUser({user: 'a', pwd: 'a', roles: jsTest.basicUserRoles});
- assert(db.dropUser('a'));
- });
+ checkErr(allowedActions.hasOwnProperty('insert'), function() {
+ var res = db.user.insert({y: 1});
+ if (res.hasWriteError())
+ throw Error("insert failed: " + tojson(res.getRawResponse()));
+ });
+
+ checkErr(allowedActions.hasOwnProperty('update'), function() {
+ var res = db.user.update({y: 1}, {z: 3});
+ if (res.hasWriteError())
+ throw Error("update failed: " + tojson(res.getRawResponse()));
+ });
+
+ checkErr(allowedActions.hasOwnProperty('remove'), function() {
+ var res = db.user.remove({y: 1});
+ if (res.hasWriteError())
+ throw Error("remove failed: " + tojson(res.getRawResponse()));
+ });
+
+ checkErr(allowedActions.hasOwnProperty('query'), function() {
+ db.user.findOne({y: 1});
+ });
+
+ checkErr(allowedActions.hasOwnProperty('killOp'), function() {
+ var errorCodeUnauthorized = 13;
+ var res = db.killOp(1);
+
+ if (res.code == errorCodeUnauthorized) {
+ throw Error("unauthorized killOp");
+ }
+ });
+
+ checkErr(allowedActions.hasOwnProperty('currentOp'), function() {
+ var errorCodeUnauthorized = 13;
+ var res = db.currentOp();
+
+ if (res.code == errorCodeUnauthorized) {
+ throw Error("unauthorized currentOp");
+ }
+ });
+
+ checkErr(allowedActions.hasOwnProperty('index_r'), function() {
+ db.system.indexes.findOne();
+ });
+
+ checkErr(allowedActions.hasOwnProperty('index_w'), function() {
+ var res = db.user.ensureIndex({x: 1});
+ if (res.code == 13) { // Unauthorized
+ throw Error("unauthorized currentOp");
+ }
+ });
+
+ checkErr(allowedActions.hasOwnProperty('profile_r'), function() {
+ db.system.profile.findOne();
+ });
+
+ checkErr(allowedActions.hasOwnProperty('profile_w'), function() {
+ var res = db.system.profile.insert({x: 1});
+ if (res.hasWriteError()) {
+ throw Error("profile insert failed: " + tojson(res.getRawResponse()));
+ }
+ });
+
+ checkErr(allowedActions.hasOwnProperty('user_r'), function() {
+ var result = db.runCommand({usersInfo: 1});
+ if (!result.ok) {
+ throw new Error(tojson(result));
+ }
+ });
+
+ checkErr(allowedActions.hasOwnProperty('user_w'), function() {
+ db.createUser({user: 'a', pwd: 'a', roles: jsTest.basicUserRoles});
+ assert(db.dropUser('a'));
+ });
// Test for kill cursor
(function() {
@@ -206,33 +170,31 @@ var testOps = function(db, allowedActions) {
// before proceeding.
db.runCommand({whatsmyuri: 1});
- checkErr(!allowedActions.hasOwnProperty('killCursor'),
- function() {
- while (cursor.hasNext()) {
- var next = cursor.next();
-
- // This is a failure in mongos case. Standalone case will fail
- // when next() was called.
- if (next.code == 16336) {
- // could not find cursor in cache for id
- throw next.$err;
- }
- }
- });
+ checkErr(!allowedActions.hasOwnProperty('killCursor'), function() {
+ while (cursor.hasNext()) {
+ var next = cursor.next();
+
+ // This is a failure in mongos case. Standalone case will fail
+ // when next() was called.
+ if (next.code == 16336) {
+ // could not find cursor in cache for id
+ throw next.$err;
+ }
+ }
+ });
}); // TODO: enable test after SERVER-5813 is fixed.
var isMongos = db.runCommand({isdbgrid: 1}).isdbgrid;
// Note: fsyncUnlock is not supported in mongos.
if (!isMongos) {
- checkErr(allowedActions.hasOwnProperty('fsync_unlock'),
- function() {
- var res = db.fsyncUnlock();
- var errorCodeUnauthorized = 13;
-
- if (res.code == errorCodeUnauthorized) {
- throw Error("unauthorized unauthorized fsyncUnlock");
- }
- });
+ checkErr(allowedActions.hasOwnProperty('fsync_unlock'), function() {
+ var res = db.fsyncUnlock();
+ var errorCodeUnauthorized = 13;
+
+ if (res.code == errorCodeUnauthorized) {
+ throw Error("unauthorized unauthorized fsyncUnlock");
+ }
+ });
}
};
diff --git a/jstests/auth/clac_system_colls.js b/jstests/auth/clac_system_colls.js
index 527f6c29e5d..26a315db252 100644
--- a/jstests/auth/clac_system_colls.js
+++ b/jstests/auth/clac_system_colls.js
@@ -23,10 +23,7 @@ function runTest(admindb) {
{resource: {db: admindb.getName(), collection: sysCollections[i]}, actions: ['find']});
}
- var findPriv = {
- resource: {db: admindb.getName(), collection: ""},
- actions: ['find']
- };
+ var findPriv = {resource: {db: admindb.getName(), collection: ""}, actions: ['find']};
admindb.createRole({role: "FindInDB", roles: [], privileges: [findPriv]});
admindb.createRole({role: "FindOnSysRes", roles: [], privileges: sysPrivs});
diff --git a/jstests/auth/commands_builtin_roles.js b/jstests/auth/commands_builtin_roles.js
index 1d5f5f59756..2fb65de8663 100644
--- a/jstests/auth/commands_builtin_roles.js
+++ b/jstests/auth/commands_builtin_roles.js
@@ -135,14 +135,8 @@ function checkForNonExistentRoles() {
}
}
-var opts = {
- auth: "",
- enableExperimentalStorageDetailsCmd: ""
-};
-var impls = {
- createUsers: createUsers,
- runOneTest: runOneTest
-};
+var opts = {auth: "", enableExperimentalStorageDetailsCmd: ""};
+var impls = {createUsers: createUsers, runOneTest: runOneTest};
checkForNonExistentRoles();
diff --git a/jstests/auth/commands_user_defined_roles.js b/jstests/auth/commands_user_defined_roles.js
index 8a7402f033d..8dff4e050be 100644
--- a/jstests/auth/commands_user_defined_roles.js
+++ b/jstests/auth/commands_user_defined_roles.js
@@ -102,10 +102,7 @@ function runOneTest(conn, t) {
var actions = p.actions;
for (var k = 0; k < actions.length; k++) {
- var privDoc = {
- resource: resource,
- actions: [actions[k]]
- };
+ var privDoc = {resource: resource, actions: [actions[k]]};
msg = testInsufficientPrivileges(conn, t, testcase, [privDoc]);
if (msg) {
failures.push(t.testname + ": " + msg);
@@ -157,14 +154,8 @@ function createUsers(conn) {
adminDb.logout();
}
-var opts = {
- auth: "",
- enableExperimentalStorageDetailsCmd: ""
-};
-var impls = {
- createUsers: createUsers,
- runOneTest: runOneTest
-};
+var opts = {auth: "", enableExperimentalStorageDetailsCmd: ""};
+var impls = {createUsers: createUsers, runOneTest: runOneTest};
// run all tests standalone
var conn = MongoRunner.runMongod(opts);
diff --git a/jstests/auth/copyauth.js b/jstests/auth/copyauth.js
index f9baf5dee79..91e17edc669 100644
--- a/jstests/auth/copyauth.js
+++ b/jstests/auth/copyauth.js
@@ -57,11 +57,10 @@ function ClusterSpawnHelper(clusterType, startWithAuth, startWithTransitionToAut
replSetTest.startSet();
replSetTest.initiate();
if (startWithAuth) {
- authutil.asCluster(replSetTest.nodes,
- replSetTestConfig.nodeOptions.keyFile,
- function() {
- replSetTest.awaitReplication();
- });
+ authutil.asCluster(
+ replSetTest.nodes, replSetTestConfig.nodeOptions.keyFile, function() {
+ replSetTest.awaitReplication();
+ });
} else {
replSetTest.awaitReplication();
}
@@ -122,8 +121,8 @@ function copydbBetweenClustersTest(configObj) {
if (configObj.isSourceUsingAuth) {
// Create a super user so we can create a regular user and not be locked out afterwards
- source.conn.getDB("admin")
- .createUser({user: "sourceSuperUser", pwd: "sourceSuperUser", roles: ["root"]});
+ source.conn.getDB("admin").createUser(
+ {user: "sourceSuperUser", pwd: "sourceSuperUser", roles: ["root"]});
source.conn.getDB("admin").auth("sourceSuperUser", "sourceSuperUser");
source.conn.getDB(baseName)[baseName].save({i: 1});
@@ -157,8 +156,8 @@ function copydbBetweenClustersTest(configObj) {
configObj.isTargetUsingTransitionToAuth);
if (configObj.isTargetUsingAuth) {
- target.conn.getDB("admin")
- .createUser({user: "targetSuperUser", pwd: "targetSuperUser", roles: ["root"]});
+ target.conn.getDB("admin").createUser(
+ {user: "targetSuperUser", pwd: "targetSuperUser", roles: ["root"]});
var readWhenLoggedOut = function() {
target.conn.getDB(baseName)[baseName].findOne();
diff --git a/jstests/auth/copyauth_between_shards.js b/jstests/auth/copyauth_between_shards.js
index 219d38c7f0f..419d548f969 100644
--- a/jstests/auth/copyauth_between_shards.js
+++ b/jstests/auth/copyauth_between_shards.js
@@ -4,11 +4,7 @@
var baseName = "jstests_clone_copyauth_between_shards";
function copydbWithinShardedCluster(useReplSets, passCredentials, useAuth) {
- var clusterConfig = {
- shards: 1,
- mongos: 1,
- config: 1
- };
+ var clusterConfig = {shards: 1, mongos: 1, config: 1};
if (useAuth) {
clusterConfig.auth = "";
diff --git a/jstests/auth/indexSystemUsers.js b/jstests/auth/indexSystemUsers.js
index ef7187dfe46..fd33e906974 100644
--- a/jstests/auth/indexSystemUsers.js
+++ b/jstests/auth/indexSystemUsers.js
@@ -18,17 +18,15 @@ assert.eq(13, res.code); // unauthorized
assert.writeError(adminDB.exploit.system.indexes.insert(
{ns: "admin.system.users", key: {haxx: 1.0}, name: "haxx_1", unique: true, dropDups: true}));
// Make sure that no indexes were built.
-var collectionInfosCursor =
- adminDB.runCommand("listCollections",
- {
- filter: {
- $and: [
- {name: /^admin\.system\.users\.\$/},
- {name: {$ne: "admin.system.users.$_id_"}},
- {name: {$ne: "admin.system.users.$user_1_db_1"}}
- ]
- }
- });
+var collectionInfosCursor = adminDB.runCommand("listCollections", {
+ filter: {
+ $and: [
+ {name: /^admin\.system\.users\.\$/},
+ {name: {$ne: "admin.system.users.$_id_"}},
+ {name: {$ne: "admin.system.users.$user_1_db_1"}}
+ ]
+ }
+});
assert.eq([], new DBCommandCursor(adminDB.getMongo(), collectionInfosCursor).toArray());
adminDB.logout();
diff --git a/jstests/auth/js_scope_leak.js b/jstests/auth/js_scope_leak.js
index f1d5d192876..847919debf1 100644
--- a/jstests/auth/js_scope_leak.js
+++ b/jstests/auth/js_scope_leak.js
@@ -21,8 +21,7 @@ function missingOrEquals(string) {
'var global = function(){return this;}.call();'
// Uncomment the next line when debugging.
// + 'print(global.hasOwnProperty("someGlobal") ? someGlobal : "MISSING" );'
- +
- 'return !global.hasOwnProperty("someGlobal")' +
+ + 'return !global.hasOwnProperty("someGlobal")' +
' || someGlobal == unescape("' + escape(string) + '");' +
'}()';
}
diff --git a/jstests/auth/lib/commands_lib.js b/jstests/auth/lib/commands_lib.js
index 2cc54f30eff..7725f9987de 100644
--- a/jstests/auth/lib/commands_lib.js
+++ b/jstests/auth/lib/commands_lib.js
@@ -86,14 +86,8 @@ var commandNotSupportedCode = 115;
var shard0name = "shard0000";
// useful shorthand when defining the tests below
-var roles_write = {
- readWrite: 1,
- readWriteAnyDatabase: 1,
- dbOwner: 1,
- restore: 1,
- root: 1,
- __system: 1
-};
+var roles_write =
+ {readWrite: 1, readWriteAnyDatabase: 1, dbOwner: 1, restore: 1, root: 1, __system: 1};
var roles_read = {
read: 1,
readAnyDatabase: 1,
@@ -104,25 +98,9 @@ var roles_read = {
root: 1,
__system: 1
};
-var roles_readAny = {
- readAnyDatabase: 1,
- readWriteAnyDatabase: 1,
- backup: 1,
- root: 1,
- __system: 1
-};
-var roles_dbAdmin = {
- dbAdmin: 1,
- dbAdminAnyDatabase: 1,
- dbOwner: 1,
- root: 1,
- __system: 1
-};
-var roles_dbAdminAny = {
- dbAdminAnyDatabase: 1,
- root: 1,
- __system: 1
-};
+var roles_readAny = {readAnyDatabase: 1, readWriteAnyDatabase: 1, backup: 1, root: 1, __system: 1};
+var roles_dbAdmin = {dbAdmin: 1, dbAdminAnyDatabase: 1, dbOwner: 1, root: 1, __system: 1};
+var roles_dbAdminAny = {dbAdminAnyDatabase: 1, root: 1, __system: 1};
var roles_writeDbAdmin = {
readWrite: 1,
readWriteAnyDatabase: 1,
@@ -132,12 +110,7 @@ var roles_writeDbAdmin = {
root: 1,
__system: 1
};
-var roles_writeDbAdminAny = {
- readWriteAnyDatabase: 1,
- dbAdminAnyDatabase: 1,
- root: 1,
- __system: 1
-};
+var roles_writeDbAdminAny = {readWriteAnyDatabase: 1, dbAdminAnyDatabase: 1, root: 1, __system: 1};
var roles_readDbAdmin = {
read: 1,
readAnyDatabase: 1,
@@ -149,31 +122,11 @@ var roles_readDbAdmin = {
root: 1,
__system: 1
};
-var roles_readDbAdminAny = {
- readAnyDatabase: 1,
- readWriteAnyDatabase: 1,
- dbAdminAnyDatabase: 1,
- root: 1,
- __system: 1
-};
-var roles_monitoring = {
- clusterMonitor: 1,
- clusterAdmin: 1,
- root: 1,
- __system: 1
-};
-var roles_hostManager = {
- hostManager: 1,
- clusterAdmin: 1,
- root: 1,
- __system: 1
-};
-var roles_clusterManager = {
- clusterManager: 1,
- clusterAdmin: 1,
- root: 1,
- __system: 1
-};
+var roles_readDbAdminAny =
+ {readAnyDatabase: 1, readWriteAnyDatabase: 1, dbAdminAnyDatabase: 1, root: 1, __system: 1};
+var roles_monitoring = {clusterMonitor: 1, clusterAdmin: 1, root: 1, __system: 1};
+var roles_hostManager = {hostManager: 1, clusterAdmin: 1, root: 1, __system: 1};
+var roles_clusterManager = {clusterManager: 1, clusterAdmin: 1, root: 1, __system: 1};
var roles_all = {
read: 1,
readAnyDatabase: 1,
@@ -653,8 +606,7 @@ var authCommandsLib = {
{
runOnDb: secondDbName,
roles: roles_readAny,
- privileges:
- [{resource: {db: secondDbName, collection: "x"}, actions: ["find"]}]
+ privileges: [{resource: {db: secondDbName, collection: "x"}, actions: ["find"]}]
}
]
},
@@ -668,10 +620,9 @@ var authCommandsLib = {
{
runOnDb: firstDbName,
roles: Object.extend({restore: 1}, roles_writeDbAdmin),
- privileges: [{
- resource: {db: firstDbName, collection: "x"},
- actions: ["createCollection"]
- }]
+ privileges: [
+ {resource: {db: firstDbName, collection: "x"}, actions: ["createCollection"]}
+ ]
},
{
runOnDb: firstDbName,
@@ -890,8 +841,7 @@ var authCommandsLib = {
{
runOnDb: firstDbName,
roles: roles_read,
- privileges:
- [{resource: {db: firstDbName, collection: "coll"}, actions: ["find"]}]
+ privileges: [{resource: {db: firstDbName, collection: "coll"}, actions: ["find"]}]
},
{
runOnDb: secondDbName,
@@ -1161,8 +1111,7 @@ var authCommandsLib = {
{
runOnDb: secondDbName,
roles: roles_readAny,
- privileges:
- [{resource: {db: secondDbName, collection: "x"}, actions: ["find"]}]
+ privileges: [{resource: {db: secondDbName, collection: "x"}, actions: ["find"]}]
}
]
},
@@ -1187,8 +1136,7 @@ var authCommandsLib = {
{
runOnDb: secondDbName,
roles: roles_readAny,
- privileges:
- [{resource: {db: secondDbName, collection: "x"}, actions: ["find"]}]
+ privileges: [{resource: {db: secondDbName, collection: "x"}, actions: ["find"]}]
}
]
},
@@ -1350,8 +1298,7 @@ var authCommandsLib = {
{
runOnDb: secondDbName,
roles: roles_readAny,
- privileges:
- [{resource: {db: secondDbName, collection: "x"}, actions: ["find"]}]
+ privileges: [{resource: {db: secondDbName, collection: "x"}, actions: ["find"]}]
}
]
},
@@ -1619,8 +1566,7 @@ var authCommandsLib = {
{
runOnDb: secondDbName,
roles: roles_readAny,
- privileges:
- [{resource: {db: secondDbName, collection: "x"}, actions: ["find"]}]
+ privileges: [{resource: {db: secondDbName, collection: "x"}, actions: ["find"]}]
}
]
},
@@ -1860,10 +1806,9 @@ var authCommandsLib = {
{
runOnDb: secondDbName,
roles: roles_dbAdminAny,
- privileges: [{
- resource: {db: secondDbName, collection: ""},
- actions: ["enableProfiler"]
- }]
+ privileges: [
+ {resource: {db: secondDbName, collection: ""}, actions: ["enableProfiler"]}
+ ]
}
]
},
@@ -1908,8 +1853,7 @@ var authCommandsLib = {
},
{
testname: "renameCollection_sameDb",
- command:
- {renameCollection: firstDbName + ".x", to: firstDbName + ".y", dropTarget: true},
+ command: {renameCollection: firstDbName + ".x", to: firstDbName + ".y", dropTarget: true},
setup: function(db) {
db.getSisterDB(firstDbName).x.save({});
},
@@ -2003,8 +1947,7 @@ var authCommandsLib = {
{
runOnDb: firstDbName,
roles: roles_dbAdmin,
- privileges:
- [{resource: {db: firstDbName, collection: "x"}, actions: ["reIndex"]}]
+ privileges: [{resource: {db: firstDbName, collection: "x"}, actions: ["reIndex"]}]
},
{
runOnDb: secondDbName,
@@ -2058,10 +2001,9 @@ var authCommandsLib = {
runOnDb: secondDbName,
roles:
{dbAdminAnyDatabase: 1, hostManager: 1, clusterAdmin: 1, root: 1, __system: 1},
- privileges: [{
- resource: {db: secondDbName, collection: ""},
- actions: ["repairDatabase"]
- }]
+ privileges: [
+ {resource: {db: secondDbName, collection: ""}, actions: ["repairDatabase"]}
+ ]
}
]
},
diff --git a/jstests/auth/localhostAuthBypass.js b/jstests/auth/localhostAuthBypass.js
index 6cb315650ef..cd56e3bc160 100644
--- a/jstests/auth/localhostAuthBypass.js
+++ b/jstests/auth/localhostAuthBypass.js
@@ -79,17 +79,13 @@ var assertCannotRunCommands = function(mongo) {
{param: "userCacheInvalidationIntervalSecs", val: 300}
];
params.forEach(function(p) {
- var cmd = {
- setParameter: 1
- };
+ var cmd = {setParameter: 1};
cmd[p.param] = p.val;
assert.commandFailedWithCode(
mongo.getDB("admin").runCommand(cmd), authorizeErrorCode, "setParameter: " + p.param);
});
params.forEach(function(p) {
- var cmd = {
- getParameter: 1
- };
+ var cmd = {getParameter: 1};
cmd[p.param] = 1;
assert.commandFailedWithCode(
mongo.getDB("admin").runCommand(cmd), authorizeErrorCode, "getParameter: " + p.param);
@@ -167,8 +163,8 @@ var runNonlocalTest = function(host) {
assertCannotRunCommands(mongo);
assert.throws(function() {
- mongo.getDB("admin")
- .createUser({user: username, pwd: password, roles: jsTest.adminUserRoles});
+ mongo.getDB("admin").createUser(
+ {user: username, pwd: password, roles: jsTest.adminUserRoles});
});
assert.throws(function() {
mongo.getDB("$external")
diff --git a/jstests/auth/log_user_basic.js b/jstests/auth/log_user_basic.js
index 817d83ac519..9e782fed9f5 100644
--- a/jstests/auth/log_user_basic.js
+++ b/jstests/auth/log_user_basic.js
@@ -26,10 +26,7 @@ if (0) {
return null;
}
- var logInfo = {
- id: res[1],
- users: {}
- };
+ var logInfo = {id: res[1], users: {}};
var userLog = null;
res = ID_USER_PATTERN.exec(line);
@@ -64,11 +61,7 @@ if (0) {
users: {} // contains authenticated users represented as a map of db to user names.
};
- var connInfo2 = {
- id: null,
- mongo: conn2,
- users: {}
- };
+ var connInfo2 = {id: null, mongo: conn2, users: {}};
var conn1Auth =
[{user: 'foo', pwd: 'bar', db: 'test'}, {user: 'chun', pwd: 'li', db: 'sf'}];
diff --git a/jstests/auth/pseudo_commands.js b/jstests/auth/pseudo_commands.js
index 510f8c71e53..2a882f219c8 100644
--- a/jstests/auth/pseudo_commands.js
+++ b/jstests/auth/pseudo_commands.js
@@ -68,10 +68,7 @@ function runTest(conn) {
__system: true
};
- var privilege = {
- resource: {cluster: true},
- actions: ['inprog']
- };
+ var privilege = {resource: {cluster: true}, actions: ['inprog']};
var testFunc = function(shouldPass) {
var passed = true;
@@ -110,10 +107,7 @@ function runTest(conn) {
__system: true
};
- var privilege = {
- resource: {cluster: true},
- actions: ['killop']
- };
+ var privilege = {resource: {cluster: true}, actions: ['killop']};
var testFunc = function(shouldPass) {
var passed = true;
@@ -159,10 +153,7 @@ function runTest(conn) {
__system: true
};
- var privilege = {
- resource: {cluster: true},
- actions: ['unlock']
- };
+ var privilege = {resource: {cluster: true}, actions: ['unlock']};
var testFunc = function(shouldPass) {
var passed = true;
diff --git a/jstests/auth/renameSystemCollections.js b/jstests/auth/renameSystemCollections.js
index dffee963499..32b2478095d 100644
--- a/jstests/auth/renameSystemCollections.js
+++ b/jstests/auth/renameSystemCollections.js
@@ -7,12 +7,7 @@ var testDB2 = conn.getDB("testdb2");
var CodeUnauthorized = 13;
-var backdoorUserDoc = {
- user: 'backdoor',
- db: 'admin',
- pwd: 'hashed',
- roles: ['root']
-};
+var backdoorUserDoc = {user: 'backdoor', db: 'admin', pwd: 'hashed', roles: ['root']};
adminDB.createUser({user: 'userAdmin', pwd: 'password', roles: ['userAdminAnyDatabase']});
diff --git a/jstests/auth/repl.js b/jstests/auth/repl.js
index 17ab3c2db20..d6a1a9ce6ec 100644
--- a/jstests/auth/repl.js
+++ b/jstests/auth/repl.js
@@ -3,10 +3,7 @@
var baseName = "jstests_auth_repl";
var rsName = baseName + "_rs";
var rtName = baseName + "_rt";
-var mongoOptions = {
- auth: null,
- keyFile: "jstests/libs/key1"
-};
+var mongoOptions = {auth: null, keyFile: "jstests/libs/key1"};
var authErrCode = 13;
var AuthReplTest = function(spec) {
@@ -178,11 +175,9 @@ jsTest.log("1 test replica sets");
var rs = new ReplSetTest({name: rsName, nodes: 2});
var nodes = rs.startSet(mongoOptions);
rs.initiate();
-authutil.asCluster(nodes,
- "jstests/libs/key1",
- function() {
- rs.awaitReplication();
- });
+authutil.asCluster(nodes, "jstests/libs/key1", function() {
+ rs.awaitReplication();
+});
var primary = rs.getPrimary();
var secondary = rs.getSecondary();
@@ -196,11 +191,9 @@ jsTest.log("2 test initial sync");
rs = new ReplSetTest({name: rsName, nodes: 1, nodeOptions: mongoOptions});
nodes = rs.startSet();
rs.initiate();
-authutil.asCluster(nodes,
- "jstests/libs/key1",
- function() {
- rs.awaitReplication();
- });
+authutil.asCluster(nodes, "jstests/libs/key1", function() {
+ rs.awaitReplication();
+});
primary = rs.getPrimary();
diff --git a/jstests/auth/repl_auth.js b/jstests/auth/repl_auth.js
index bafaafd8f7b..aa851840bc5 100644
--- a/jstests/auth/repl_auth.js
+++ b/jstests/auth/repl_auth.js
@@ -16,11 +16,11 @@ var admin = setupConn.getDB('admin');
admin.createUser({user: 'admin', pwd: 'password', roles: jsTest.adminUserRoles});
admin.auth('admin', 'password');
-setupConn.getDB('foo')
- .createUser({user: 'foo', pwd: 'foopwd', roles: jsTest.basicUserRoles}, {w: NUM_NODES});
+setupConn.getDB('foo').createUser({user: 'foo', pwd: 'foopwd', roles: jsTest.basicUserRoles},
+ {w: NUM_NODES});
setupConn.getDB('foo').logout();
-setupConn.getDB('bar')
- .createUser({user: 'bar', pwd: 'barpwd', roles: jsTest.basicUserRoles}, {w: NUM_NODES});
+setupConn.getDB('bar').createUser({user: 'bar', pwd: 'barpwd', roles: jsTest.basicUserRoles},
+ {w: NUM_NODES});
setupConn.getDB('bar').logout();
var replConn0 = new Mongo(rsTest.getURL());
diff --git a/jstests/auth/resource_pattern_matching.js b/jstests/auth/resource_pattern_matching.js
index 5a4da66871c..36d6711ea52 100644
--- a/jstests/auth/resource_pattern_matching.js
+++ b/jstests/auth/resource_pattern_matching.js
@@ -135,7 +135,7 @@ function run_tests(granter, verifier) {
run_test("specific",
granter,
verifier,
- [{resource: {db: "a", collection: "a"}, actions: ["find"]}],
+ [{resource: {db: "a", collection: "a"}, actions: ["find"]}],
{
"a.a": should_find,
"a.b": should_fail_find,
@@ -143,50 +143,39 @@ function run_tests(granter, verifier) {
"b.b": should_fail_find
});
- run_test("glob_collection",
- granter,
- verifier,
- [{resource: {db: "a", collection: ""}, actions: ["find"]}],
- {
- "a.a": should_find,
- "a.b": should_find,
- "b.a": should_fail_find,
- "b.b": should_fail_find
- });
+ run_test(
+ "glob_collection",
+ granter,
+ verifier,
+ [{resource: {db: "a", collection: ""}, actions: ["find"]}],
+ {"a.a": should_find, "a.b": should_find, "b.a": should_fail_find, "b.b": should_fail_find});
- run_test("glob_database",
- granter,
- verifier,
- [{resource: {db: "", collection: "a"}, actions: ["find"]}],
- {
- "a.a": should_find,
- "a.b": should_fail_find,
- "b.a": should_find,
- "b.b": should_fail_find
- });
+ run_test(
+ "glob_database",
+ granter,
+ verifier,
+ [{resource: {db: "", collection: "a"}, actions: ["find"]}],
+ {"a.a": should_find, "a.b": should_fail_find, "b.a": should_find, "b.b": should_fail_find});
run_test("glob_all",
granter,
verifier,
- [{resource: {db: "", collection: ""}, actions: ["find"]}],
+ [{resource: {db: "", collection: ""}, actions: ["find"]}],
{"a.a": should_find, "a.b": should_find, "b.a": should_find, "b.b": should_find});
- run_test("any_resource",
- granter,
- verifier,
- [{resource: {anyResource: true}, actions: ["find"]}],
- {
- "a.a": should_find,
- "a.b": should_find,
- "b.a": should_find,
- "b.b": should_find,
- "c.a": should_find
- });
+ run_test(
+ "any_resource", granter, verifier, [{resource: {anyResource: true}, actions: ["find"]}], {
+ "a.a": should_find,
+ "a.b": should_find,
+ "b.a": should_find,
+ "b.b": should_find,
+ "c.a": should_find
+ });
run_test("no_global_access",
granter,
verifier,
- [{resource: {db: "$", collection: "cmd"}, actions: ["find"]}],
+ [{resource: {db: "$", collection: "cmd"}, actions: ["find"]}],
{
"a.a": function(testdb, testcol) {
var r = testdb.stats();
diff --git a/jstests/auth/role_management_commands.js b/jstests/auth/role_management_commands.js
index 9847818e3b1..af2f7709bf1 100644
--- a/jstests/auth/role_management_commands.js
+++ b/jstests/auth/role_management_commands.js
@@ -180,12 +180,10 @@ function runTest(conn) {
(function testGrantPrivilegesToRole() {
jsTestLog("Testing grantPrivilegesToRole");
- adminUserAdmin.grantPrivilegesToRole(
- 'adminRole',
- [
- {resource: {cluster: true}, actions: ['serverStatus']},
- {resource: {db: "", collection: ""}, actions: ['find']}
- ]);
+ adminUserAdmin.grantPrivilegesToRole('adminRole', [
+ {resource: {cluster: true}, actions: ['serverStatus']},
+ {resource: {db: "", collection: ""}, actions: ['find']}
+ ]);
assert.doesNotThrow(function() {
db.foo.findOne();
});
@@ -197,12 +195,10 @@ function runTest(conn) {
assert.commandWorked(db.adminCommand('serverStatus'));
testUserAdmin.updateUser('testUser', {roles: ['testRole2']});
- testUserAdmin.grantPrivilegesToRole(
- 'testRole2',
- [
- {resource: {db: 'test', collection: ''}, actions: ['insert', 'update']},
- {resource: {db: 'test', collection: 'foo'}, actions: ['find']}
- ]);
+ testUserAdmin.grantPrivilegesToRole('testRole2', [
+ {resource: {db: 'test', collection: ''}, actions: ['insert', 'update']},
+ {resource: {db: 'test', collection: 'foo'}, actions: ['find']}
+ ]);
assert.doesNotThrow(function() {
db.foo.findOne();
});
@@ -219,7 +215,7 @@ function runTest(conn) {
testUserAdmin.revokePrivilegesFromRole(
'testRole2',
- [{resource: {db: 'test', collection: ''}, actions: ['insert', 'update', 'find']}]);
+ [{resource: {db: 'test', collection: ''}, actions: ['insert', 'update', 'find']}]);
assert.doesNotThrow(function() {
db.foo.findOne();
});
diff --git a/jstests/auth/role_management_commands_edge_cases.js b/jstests/auth/role_management_commands_edge_cases.js
index 122221ce234..5af309a1a7a 100644
--- a/jstests/auth/role_management_commands_edge_cases.js
+++ b/jstests/auth/role_management_commands_edge_cases.js
@@ -243,10 +243,7 @@ function runTest(conn) {
db.grantPrivilegesToRole("role1", []);
});
- var basicPriv = {
- resource: {db: 'test', collection: ""},
- actions: ['find']
- };
+ var basicPriv = {resource: {db: 'test', collection: ""}, actions: ['find']};
// Invalid first argument
assert.throws(function() {
@@ -302,10 +299,7 @@ function runTest(conn) {
"readWrite", [{resource: {db: 'test', collection: ''}, actions: ['find']}]);
});
- var basicPriv = {
- resource: {db: 'test', collection: ""},
- actions: ['find']
- };
+ var basicPriv = {resource: {db: 'test', collection: ""}, actions: ['find']};
// Invalid first argument
assert.throws(function() {
diff --git a/jstests/auth/server-4892.js b/jstests/auth/server-4892.js
index ef0c95c868e..4926ea7e131 100644
--- a/jstests/auth/server-4892.js
+++ b/jstests/auth/server-4892.js
@@ -46,44 +46,42 @@ function expectNumLiveCursors(mongod, expectedNumLiveCursors) {
expectedNumLiveCursors + ")");
}
-withMongod({noauth: ""},
- function setupTest(mongod) {
- var admin, somedb, conn;
- conn = new Mongo(mongod.host);
- admin = conn.getDB('admin');
- somedb = conn.getDB('somedb');
- admin.createUser({user: 'admin', pwd: 'admin', roles: jsTest.adminUserRoles});
- admin.auth('admin', 'admin');
- somedb.createUser({user: 'frim', pwd: 'fram', roles: jsTest.basicUserRoles});
- somedb.data.drop();
- for (var i = 0; i < 10; ++i) {
- assert.writeOK(somedb.data.insert({val: i}));
- }
- admin.logout();
- });
+withMongod({noauth: ""}, function setupTest(mongod) {
+ var admin, somedb, conn;
+ conn = new Mongo(mongod.host);
+ admin = conn.getDB('admin');
+ somedb = conn.getDB('somedb');
+ admin.createUser({user: 'admin', pwd: 'admin', roles: jsTest.adminUserRoles});
+ admin.auth('admin', 'admin');
+ somedb.createUser({user: 'frim', pwd: 'fram', roles: jsTest.basicUserRoles});
+ somedb.data.drop();
+ for (var i = 0; i < 10; ++i) {
+ assert.writeOK(somedb.data.insert({val: i}));
+ }
+ admin.logout();
+});
-withMongod({auth: ""},
- function runTest(mongod) {
- var conn = new Mongo(mongod.host);
- var somedb = conn.getDB('somedb');
- somedb.auth('frim', 'fram');
+withMongod({auth: ""}, function runTest(mongod) {
+ var conn = new Mongo(mongod.host);
+ var somedb = conn.getDB('somedb');
+ somedb.auth('frim', 'fram');
- expectNumLiveCursors(mongod, 0);
+ expectNumLiveCursors(mongod, 0);
- var cursor = somedb.data.find({}, {'_id': 1}).batchSize(1);
- cursor.next();
- expectNumLiveCursors(mongod, 1);
+ var cursor = somedb.data.find({}, {'_id': 1}).batchSize(1);
+ cursor.next();
+ expectNumLiveCursors(mongod, 1);
- cursor = null;
- // NOTE(schwerin): We assume that after setting cursor = null, there are no remaining
- // references
- // to the cursor, and that gc() will deterministically garbage collect it.
- gc();
+ cursor = null;
+ // NOTE(schwerin): We assume that after setting cursor = null, there are no remaining
+ // references
+ // to the cursor, and that gc() will deterministically garbage collect it.
+ gc();
- // NOTE(schwerin): dbKillCursors gets piggybacked on subsequent messages on the
- // connection, so we
- // have to force a message to the server.
- somedb.data.findOne();
+ // NOTE(schwerin): dbKillCursors gets piggybacked on subsequent messages on the
+ // connection, so we
+ // have to force a message to the server.
+ somedb.data.findOne();
- expectNumLiveCursors(mongod, 0);
- });
+ expectNumLiveCursors(mongod, 0);
+});
diff --git a/jstests/auth/show_log_auth.js b/jstests/auth/show_log_auth.js
index b318e0536ad..05df5f20610 100644
--- a/jstests/auth/show_log_auth.js
+++ b/jstests/auth/show_log_auth.js
@@ -11,14 +11,18 @@ function assertStartsWith(s, prefix) {
assert.eq(s.substr(0, prefix.length), prefix);
}
-assertStartsWith(print.captureAllOutput(function() {
- shellHelper.show('logs');
-}).output[0],
+assertStartsWith(print
+ .captureAllOutput(function() {
+ shellHelper.show('logs');
+ })
+ .output[0],
'Error while trying to show logs');
-assertStartsWith(print.captureAllOutput(function() {
- shellHelper.show('log ' + baseName);
-}).output[0],
+assertStartsWith(print
+ .captureAllOutput(function() {
+ shellHelper.show('log ' + baseName);
+ })
+ .output[0],
'Error while trying to show ' + baseName + ' log');
db.auth("admin", "pass");
diff --git a/jstests/auth/upgrade_noauth_to_keyfile.js b/jstests/auth/upgrade_noauth_to_keyfile.js
index 372ae61af2b..da517fef031 100644
--- a/jstests/auth/upgrade_noauth_to_keyfile.js
+++ b/jstests/auth/upgrade_noauth_to_keyfile.js
@@ -12,17 +12,11 @@ load('jstests/multiVersion/libs/multi_rs.js');
var keyFilePath = 'jstests/libs/key1';
// Disable auth explicitly
- var noAuthOptions = {
- noauth: ''
- };
+ var noAuthOptions = {noauth: ''};
// Undefine the flags we're replacing, otherwise upgradeSet will keep old values.
- var transitionToAuthOptions = {
- noauth: undefined,
- clusterAuthMode: 'keyFile',
- keyFile: keyFilePath,
- transitionToAuth: ''
- };
+ var transitionToAuthOptions =
+ {noauth: undefined, clusterAuthMode: 'keyFile', keyFile: keyFilePath, transitionToAuth: ''};
var keyFileOptions = {
clusterAuthMode: 'keyFile',
keyFile: keyFilePath,
diff --git a/jstests/auth/upgrade_noauth_to_keyfile_with_sharding.js b/jstests/auth/upgrade_noauth_to_keyfile_with_sharding.js
index f6ecfec9abb..8cfca96933a 100644
--- a/jstests/auth/upgrade_noauth_to_keyfile_with_sharding.js
+++ b/jstests/auth/upgrade_noauth_to_keyfile_with_sharding.js
@@ -7,18 +7,13 @@ load('jstests/ssl/libs/ssl_helpers.js');
'use strict';
// Disable auth explicitly
- var noAuthOptions = {
- noauth: ''
- };
+ var noAuthOptions = {noauth: ''};
var transitionToAuthOptions = {
clusterAuthMode: 'keyFile',
keyFile: KEYFILE,
transitionToAuth: ''
};
- var keyFileOptions = {
- clusterAuthMode: 'keyFile',
- keyFile: KEYFILE
- };
+ var keyFileOptions = {clusterAuthMode: 'keyFile', keyFile: KEYFILE};
print('=== Testing no-auth/transitionToAuth cluster ===');
mixedShardTest(noAuthOptions, transitionToAuthOptions, true);
diff --git a/jstests/auth/user_defined_roles.js b/jstests/auth/user_defined_roles.js
index 0190ad9385e..c786018dbb5 100644
--- a/jstests/auth/user_defined_roles.js
+++ b/jstests/auth/user_defined_roles.js
@@ -12,8 +12,8 @@ function runTest(conn) {
conn.getDB('admin').createUser({user: 'admin', pwd: 'pwd', roles: ['root']});
conn.getDB('admin').auth('admin', 'pwd');
- conn.getDB('admin')
- .createUser({user: 'userAdmin', pwd: 'pwd', roles: ['userAdminAnyDatabase']});
+ conn.getDB('admin').createUser(
+ {user: 'userAdmin', pwd: 'pwd', roles: ['userAdminAnyDatabase']});
conn.getDB('admin').logout();
var userAdminConn = new Mongo(conn.host);
@@ -99,12 +99,11 @@ function runTest(conn) {
testDB.updateUser('testUser', {customData: {zipCode: 10036}});
});
assert.eq(null, testDB.getUser('testUser').customData);
- testUserAdmin.grantPrivilegesToRole(
- 'testRole1',
- [{
- resource: {db: 'test', collection: ''},
- actions: ['changeOwnPassword', 'changeOwnCustomData']
- }]);
+ testUserAdmin.grantPrivilegesToRole('testRole1',
+ [{
+ resource: {db: 'test', collection: ''},
+ actions: ['changeOwnPassword', 'changeOwnCustomData']
+ }]);
testDB.changeUserPassword('testUser', 'password');
assert(!testDB.auth('testUser', 'pwd'));
assert(testDB.auth('testUser', 'password'));
@@ -124,11 +123,9 @@ function runTest(conn) {
assert.eq(10036, testDB.getUser('testUser').customData.zipCode);
// Test changeAnyPassword/changeAnyCustomData
- testUserAdmin.grantPrivilegesToRole('testRole2',
- [{
- resource: {db: 'test', collection: ''},
- actions: ['changePassword', 'changeCustomData']
- }]);
+ testUserAdmin.grantPrivilegesToRole('testRole2', [
+ {resource: {db: 'test', collection: ''}, actions: ['changePassword', 'changeCustomData']}
+ ]);
testDB.changeUserPassword('testUser', 'pwd');
assert(!testDB.auth('testUser', 'password'));
assert(testDB.auth('testUser', 'pwd'));
@@ -137,8 +134,8 @@ function runTest(conn) {
// Test privileges on the cluster resource
assert.commandFailed(testDB.runCommand({serverStatus: 1}));
- adminUserAdmin.grantPrivilegesToRole(
- 'adminRole', [{resource: {cluster: true}, actions: ['serverStatus']}]);
+ adminUserAdmin.grantPrivilegesToRole('adminRole',
+ [{resource: {cluster: true}, actions: ['serverStatus']}]);
assert.commandWorked(testDB.serverStatus());
}
diff --git a/jstests/auth/user_defined_roles_on_secondaries.js b/jstests/auth/user_defined_roles_on_secondaries.js
index 959b76a3cae..0bf3894d0ab 100644
--- a/jstests/auth/user_defined_roles_on_secondaries.js
+++ b/jstests/auth/user_defined_roles_on_secondaries.js
@@ -87,13 +87,12 @@
rstest.add();
rstest.reInitiate();
- rstest.getPrimary().getDB("db1").createRole(
- {
- role: "r3",
- roles: ["r1", "r2"],
- privileges: [{resource: {db: "db1", collection: "log"}, actions: ["update"]}]
- },
- {w: 2});
+ rstest.getPrimary().getDB("db1").createRole({
+ role: "r3",
+ roles: ["r1", "r2"],
+ privileges: [{resource: {db: "db1", collection: "log"}, actions: ["update"]}]
+ },
+ {w: 2});
// Verify that both members of the set see the same role graph.
rstest.nodes.forEach(function(node) {
diff --git a/jstests/auth/user_management_commands.js b/jstests/auth/user_management_commands.js
index e835aa4b348..a5cca448576 100644
--- a/jstests/auth/user_management_commands.js
+++ b/jstests/auth/user_management_commands.js
@@ -113,14 +113,13 @@ function runTest(conn) {
assert.commandFailedWithCode(db.runCommand({collMod: 'foo', usePowerOf2Sizes: true}),
authzErrorCode);
- testUserAdmin.grantRolesToUser('spencer',
- [
- 'readWrite',
- 'dbAdmin',
- {role: 'readWrite', db: 'test'},
- {role: 'testRole', db: 'test'},
- 'readWrite'
- ]);
+ testUserAdmin.grantRolesToUser('spencer', [
+ 'readWrite',
+ 'dbAdmin',
+ {role: 'readWrite', db: 'test'},
+ {role: 'testRole', db: 'test'},
+ 'readWrite'
+ ]);
assert.commandWorked(db.runCommand({collMod: 'foo', usePowerOf2Sizes: true}));
assert.writeOK(db.foo.update({}, {$inc: {a: 1}}));
@@ -135,13 +134,11 @@ function runTest(conn) {
(function testRevokeRolesFromUser() {
jsTestLog("Testing revokeRolesFromUser");
- testUserAdmin.revokeRolesFromUser(
- 'spencer',
- [
- 'readWrite',
- {role: 'dbAdmin', db: 'test2'}, // role user doesnt have
- "testRole"
- ]);
+ testUserAdmin.revokeRolesFromUser('spencer', [
+ 'readWrite',
+ {role: 'dbAdmin', db: 'test2'}, // role user doesnt have
+ "testRole"
+ ]);
assert.commandWorked(db.runCommand({collMod: 'foo', usePowerOf2Sizes: true}));
hasAuthzError(db.foo.update({}, {$inc: {a: 1}}));
diff --git a/jstests/concurrency/fsm_background_workloads/background_base.js b/jstests/concurrency/fsm_background_workloads/background_base.js
index febc3a5d0dd..abcf751e029 100644
--- a/jstests/concurrency/fsm_background_workloads/background_base.js
+++ b/jstests/concurrency/fsm_background_workloads/background_base.js
@@ -31,10 +31,7 @@ var $config = (function() {
}
};
- var transitions = {
- wait: {checkForTermination: 1},
- checkForTermination: {wait: 1}
- };
+ var transitions = {wait: {checkForTermination: 1}, checkForTermination: {wait: 1}};
var teardown = function teardown(db, collName, cluster) {
db.getSiblingDB('config').fsm_background.drop();
diff --git a/jstests/concurrency/fsm_example_inheritance.js b/jstests/concurrency/fsm_example_inheritance.js
index 01d95a0d9d6..8c45525d7e3 100644
--- a/jstests/concurrency/fsm_example_inheritance.js
+++ b/jstests/concurrency/fsm_example_inheritance.js
@@ -4,21 +4,20 @@ load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
load('jstests/concurrency/fsm_example.js'); // for $config
// extendWorkload takes a $config object and a callback, and returns an extended $config object.
-var $config = extendWorkload($config,
- function($config, $super) {
- // In the callback, $super is the base workload definition we're
- // extending,
- // and $config is the extended workload definition we're creating.
+var $config = extendWorkload($config, function($config, $super) {
+ // In the callback, $super is the base workload definition we're
+ // extending,
+ // and $config is the extended workload definition we're creating.
- // You can replace any properties on $config, including methods you
- // want to override.
- $config.setup = function(db, collName, cluster) {
- // Overridden methods should usually call the corresponding
- // method on $super.
- $super.setup.apply(this, arguments);
+ // You can replace any properties on $config, including methods you
+ // want to override.
+ $config.setup = function(db, collName, cluster) {
+ // Overridden methods should usually call the corresponding
+ // method on $super.
+ $super.setup.apply(this, arguments);
- db[collName].ensureIndex({exampleIndexedField: 1});
- };
+ db[collName].ensureIndex({exampleIndexedField: 1});
+ };
- return $config;
- });
+ return $config;
+});
diff --git a/jstests/concurrency/fsm_libs/cluster.js b/jstests/concurrency/fsm_libs/cluster.js
index 5a350c9836d..a587d8f323a 100644
--- a/jstests/concurrency/fsm_libs/cluster.js
+++ b/jstests/concurrency/fsm_libs/cluster.js
@@ -97,10 +97,7 @@ var Cluster = function(options) {
var initialized = false;
var clusterStartTime;
- var _conns = {
- mongos: [],
- mongod: []
- };
+ var _conns = {mongos: [], mongod: []};
var nextConn = 0;
var replSets = [];
@@ -342,11 +339,7 @@ var Cluster = function(options) {
return '';
}
- var cluster = {
- mongos: [],
- config: [],
- shards: {}
- };
+ var cluster = {mongos: [], config: [], shards: {}};
var i = 0;
var mongos = st.s0;
diff --git a/jstests/concurrency/fsm_libs/composer.js b/jstests/concurrency/fsm_libs/composer.js
index 495648fb01a..99cfb64f34d 100644
--- a/jstests/concurrency/fsm_libs/composer.js
+++ b/jstests/concurrency/fsm_libs/composer.js
@@ -51,12 +51,11 @@ var composer = (function() {
}
var args = configs[workload];
- Object.keys(args.states)
- .forEach(function(state) {
- if (state !== args.startState) {
- otherStates.push({workload: workload, state: state});
- }
- });
+ Object.keys(args.states).forEach(function(state) {
+ if (state !== args.startState) {
+ otherStates.push({workload: workload, state: state});
+ }
+ });
});
var next = getRandomElem(otherStates, Random.rand());
@@ -70,8 +69,6 @@ var composer = (function() {
return items[Math.floor(randVal * items.length)];
}
- return {
- run: runCombinedFSM
- };
+ return {run: runCombinedFSM};
})();
diff --git a/jstests/concurrency/fsm_libs/fsm.js b/jstests/concurrency/fsm_libs/fsm.js
index 0a3e4a45bf4..e7a3eafb946 100644
--- a/jstests/concurrency/fsm_libs/fsm.js
+++ b/jstests/concurrency/fsm_libs/fsm.js
@@ -21,11 +21,7 @@ var fsm = (function() {
// See fsm_libs/cluster.js for the format of args.cluster.
var connCache;
if (args.passConnectionCache) {
- connCache = {
- mongos: [],
- config: [],
- shards: {}
- };
+ connCache = {mongos: [], config: [], shards: {}};
connCache.mongos = args.cluster.mongos.map(connStr => new Mongo(connStr));
connCache.config = args.cluster.config.map(connStr => new Mongo(connStr));
@@ -87,8 +83,5 @@ var fsm = (function() {
assert(false, 'not reached');
}
- return {
- run: runFSM,
- _getWeightedRandomChoice: getWeightedRandomChoice
- };
+ return {run: runFSM, _getWeightedRandomChoice: getWeightedRandomChoice};
})();
diff --git a/jstests/concurrency/fsm_libs/parse_config.js b/jstests/concurrency/fsm_libs/parse_config.js
index b569f660c8a..3c365dc5f4c 100644
--- a/jstests/concurrency/fsm_libs/parse_config.js
+++ b/jstests/concurrency/fsm_libs/parse_config.js
@@ -36,44 +36,40 @@ function parseConfig(config) {
assert.eq('object', typeof config.states);
assert.gt(Object.keys(config.states).length, 0);
- Object.keys(config.states)
- .forEach(function(k) {
- assert.eq(
- 'function', typeof config.states[k], 'config.states.' + k + ' is not a function');
- if (config.passConnectionCache) {
- assert.eq(3,
- config.states[k].length,
- 'if passConnectionCache is true, state functions should ' +
- 'accept 3 parameters: db, collName, and connCache');
- } else {
- assert.eq(2,
- config.states[k].length,
- 'if passConnectionCache is false, state functions should ' +
- 'accept 2 parameters: db and collName');
- }
- });
+ Object.keys(config.states).forEach(function(k) {
+ assert.eq('function', typeof config.states[k], 'config.states.' + k + ' is not a function');
+ if (config.passConnectionCache) {
+ assert.eq(3,
+ config.states[k].length,
+ 'if passConnectionCache is true, state functions should ' +
+ 'accept 3 parameters: db, collName, and connCache');
+ } else {
+ assert.eq(2,
+ config.states[k].length,
+ 'if passConnectionCache is false, state functions should ' +
+ 'accept 2 parameters: db and collName');
+ }
+ });
// assert all states mentioned in config.transitions are present in config.states
assert.eq('object', typeof config.transitions);
assert.gt(Object.keys(config.transitions).length, 0);
- Object.keys(config.transitions)
- .forEach(function(fromState) {
- assert(config.states.hasOwnProperty(fromState),
- 'config.transitions contains a state not in config.states: ' + fromState);
+ Object.keys(config.transitions).forEach(function(fromState) {
+ assert(config.states.hasOwnProperty(fromState),
+ 'config.transitions contains a state not in config.states: ' + fromState);
- assert.gt(Object.keys(config.transitions[fromState]).length, 0);
- Object.keys(config.transitions[fromState])
- .forEach(function(toState) {
- assert(config.states.hasOwnProperty(toState),
- 'config.transitions.' + fromState +
- ' contains a state not in config.states: ' + toState);
- assert.eq('number',
- typeof config.transitions[fromState][toState],
- 'transitions.' + fromState + '.' + toState + ' should be a number');
- assert(!isNaN(config.transitions[fromState][toState]),
- 'transitions.' + fromState + '.' + toState + ' cannot be NaN');
- });
+ assert.gt(Object.keys(config.transitions[fromState]).length, 0);
+ Object.keys(config.transitions[fromState]).forEach(function(toState) {
+ assert(config.states.hasOwnProperty(toState),
+ 'config.transitions.' + fromState + ' contains a state not in config.states: ' +
+ toState);
+ assert.eq('number',
+ typeof config.transitions[fromState][toState],
+ 'transitions.' + fromState + '.' + toState + ' should be a number');
+ assert(!isNaN(config.transitions[fromState][toState]),
+ 'transitions.' + fromState + '.' + toState + ' cannot be NaN');
});
+ });
config.setup = config.setup || function() {};
assert.eq('function', typeof config.setup);
diff --git a/jstests/concurrency/fsm_libs/runner.js b/jstests/concurrency/fsm_libs/runner.js
index 4881676636d..cba8c4efe8a 100644
--- a/jstests/concurrency/fsm_libs/runner.js
+++ b/jstests/concurrency/fsm_libs/runner.js
@@ -214,9 +214,7 @@ var runner = (function() {
myDB[collName].drop();
if (cluster.isSharded()) {
- var shardKey = context[workload].config.data.shardKey || {
- _id: 'hashed'
- };
+ var shardKey = context[workload].config.data.shardKey || {_id: 'hashed'};
// TODO: allow workload config data to specify split
cluster.shardCollection(myDB[collName], shardKey, false);
}
@@ -328,11 +326,13 @@ var runner = (function() {
numUniqueTraces + ' of which were unique:\n\n';
return summary +
- uniqueTraces.map(function(obj) {
- var line = pluralize('thread', obj.freq) + ' with tids ' +
- JSON.stringify(obj.tids) + ' threw\n';
- return indent(line + obj.value, 8);
- }).join('\n\n');
+ uniqueTraces
+ .map(function(obj) {
+ var line = pluralize('thread', obj.freq) + ' with tids ' +
+ JSON.stringify(obj.tids) + ' threw\n';
+ return indent(line + obj.value, 8);
+ })
+ .join('\n\n');
}
if (workerErrs.length > 0) {
@@ -401,9 +401,7 @@ var runner = (function() {
workloads.forEach(function(workload) {
load(workload); // for $config
assert.neq('undefined', typeof $config, '$config was not defined by ' + workload);
- context[workload] = {
- config: parseConfig($config)
- };
+ context[workload] = {config: parseConfig($config)};
if (applyMultipliers) {
context[workload].config.iterations *= executionOptions.iterationMultiplier;
context[workload].config.threadCount *= executionOptions.threadMultiplier;
@@ -525,7 +523,7 @@ var runner = (function() {
} finally {
// Threads must be joined before destruction, so do this
// even in the presence of exceptions.
- errors.push(... threadMgr.joinAll().map(
+ errors.push(...threadMgr.joinAll().map(
e => new WorkloadFailure(
e.err, e.stack, e.tid, 'Foreground ' + e.workloads.join(' '))));
}
@@ -630,8 +628,8 @@ var runner = (function() {
var dbHashBlacklist = ['local'];
if (cleanupOptions.dropDatabaseBlacklist) {
- dbBlacklist.push(... cleanupOptions.dropDatabaseBlacklist);
- dbHashBlacklist.push(... cleanupOptions.dropDatabaseBlacklist);
+ dbBlacklist.push(...cleanupOptions.dropDatabaseBlacklist);
+ dbHashBlacklist.push(...cleanupOptions.dropDatabaseBlacklist);
}
if (!cleanupOptions.keepExistingDatabases) {
dropAllDatabases(cluster.getDB('test'), dbBlacklist);
@@ -703,7 +701,7 @@ var runner = (function() {
} finally {
// Set a flag so background threads know to terminate.
bgThreadMgr.markAllForTermination();
- errors.push(... bgThreadMgr.joinAll().map(
+ errors.push(...bgThreadMgr.joinAll().map(
e => new WorkloadFailure(
e.err, e.stack, e.tid, 'Background ' + e.workloads.join(' '))));
}
diff --git a/jstests/concurrency/fsm_libs/thread_mgr.js b/jstests/concurrency/fsm_libs/thread_mgr.js
index 145fb57146e..9680e22435b 100644
--- a/jstests/concurrency/fsm_libs/thread_mgr.js
+++ b/jstests/concurrency/fsm_libs/thread_mgr.js
@@ -211,22 +211,18 @@ workerThread.fsm = function(workloads, args, options) {
load('jstests/concurrency/fsm_libs/worker_thread.js'); // for workerThread.main
load('jstests/concurrency/fsm_libs/fsm.js'); // for fsm.run
- return workerThread.main(workloads,
- args,
- function(configs) {
- var workloads = Object.keys(configs);
- assert.eq(1, workloads.length);
- fsm.run(configs[workloads[0]]);
- });
+ return workerThread.main(workloads, args, function(configs) {
+ var workloads = Object.keys(configs);
+ assert.eq(1, workloads.length);
+ fsm.run(configs[workloads[0]]);
+ });
};
workerThread.composed = function(workloads, args, options) {
load('jstests/concurrency/fsm_libs/worker_thread.js'); // for workerThread.main
load('jstests/concurrency/fsm_libs/composer.js'); // for composer.run
- return workerThread.main(workloads,
- args,
- function(configs) {
- composer.run(workloads, configs, options);
- });
+ return workerThread.main(workloads, args, function(configs) {
+ composer.run(workloads, configs, options);
+ });
};
diff --git a/jstests/concurrency/fsm_libs/worker_thread.js b/jstests/concurrency/fsm_libs/worker_thread.js
index 5c6dd771509..7a9bf6b3304 100644
--- a/jstests/concurrency/fsm_libs/worker_thread.js
+++ b/jstests/concurrency/fsm_libs/worker_thread.js
@@ -62,21 +62,20 @@ var workerThread = (function() {
// configurable, enumerable, and writable. To prevent workloads from changing
// the iterations and threadCount properties in their state functions, we redefine
// them here as non-configurable, non-enumerable, and non-writable.
- Object.defineProperties(data,
- {
- 'iterations': {
- configurable: false,
- enumerable: false,
- writable: false,
- value: data.iterations
- },
- 'threadCount': {
- configurable: false,
- enumerable: false,
- writable: false,
- value: data.threadCount
- }
- });
+ Object.defineProperties(data, {
+ 'iterations': {
+ configurable: false,
+ enumerable: false,
+ writable: false,
+ value: data.iterations
+ },
+ 'threadCount': {
+ configurable: false,
+ enumerable: false,
+ writable: false,
+ value: data.threadCount
+ }
+ });
data.tid = args.tid;
configs[workload] = {
@@ -103,9 +102,7 @@ var workerThread = (function() {
Random.setRandomSeed(args.seed);
run(configs);
- return {
- ok: 1
- };
+ return {ok: 1};
} catch (e) {
args.errorLatch.countDown();
return {
@@ -124,8 +121,6 @@ var workerThread = (function() {
}
}
- return {
- main: main
- };
+ return {main: main};
})();
diff --git a/jstests/concurrency/fsm_selftests.js b/jstests/concurrency/fsm_selftests.js
index 686a6d286e7..30c614e9148 100644
--- a/jstests/concurrency/fsm_selftests.js
+++ b/jstests/concurrency/fsm_selftests.js
@@ -9,11 +9,7 @@ load('jstests/concurrency/fsm_libs/fsm.js');
(function() {
var getWeightedRandomChoice = fsm._getWeightedRandomChoice;
- var doc = {
- a: 0.25,
- b: 0.5,
- c: 0.25
- };
+ var doc = {a: 0.25, b: 0.5, c: 0.25};
// NOTE: getWeightedRandomChoice calls assert internally, so it will print stack traces
// when assert.throws executes
diff --git a/jstests/concurrency/fsm_workload_modifiers/make_capped.js b/jstests/concurrency/fsm_workload_modifiers/make_capped.js
index 00f68964531..8bae4b4a9ff 100644
--- a/jstests/concurrency/fsm_workload_modifiers/make_capped.js
+++ b/jstests/concurrency/fsm_workload_modifiers/make_capped.js
@@ -17,11 +17,10 @@ function makeCapped($config, $super) {
$config.setup = function setup(db, collName, cluster) {
assertWhenOwnColl(function() {
db[collName].drop();
- assertAlways.commandWorked(db.createCollection(collName,
- {
- capped: true,
- size: 16384 // bytes
- }));
+ assertAlways.commandWorked(db.createCollection(collName, {
+ capped: true,
+ size: 16384 // bytes
+ }));
});
$super.setup.apply(this, arguments);
diff --git a/jstests/concurrency/fsm_workloads/agg_base.js b/jstests/concurrency/fsm_workloads/agg_base.js
index 846e6900215..9bc1be04785 100644
--- a/jstests/concurrency/fsm_workloads/agg_base.js
+++ b/jstests/concurrency/fsm_workloads/agg_base.js
@@ -45,9 +45,7 @@ var $config = (function() {
}
};
- var transitions = {
- query: {query: 1}
- };
+ var transitions = {query: {query: 1}};
function setup(db, collName, cluster) {
// load example data
@@ -55,13 +53,12 @@ var $config = (function() {
for (var i = 0; i < this.numDocs; ++i) {
// note: padDoc caches the large string after allocating it once, so it's ok to call it
// in this loop
- bulk.insert(padDoc(
- {
- flag: i % 2 ? true : false,
- rand: Random.rand(),
- randInt: Random.randInt(this.numDocs)
- },
- this.docSize));
+ bulk.insert(padDoc({
+ flag: i % 2 ? true : false,
+ rand: Random.rand(),
+ randInt: Random.randInt(this.numDocs)
+ },
+ this.docSize));
}
var res = bulk.execute();
assertWhenOwnColl.writeOK(res);
diff --git a/jstests/concurrency/fsm_workloads/agg_graph_lookup.js b/jstests/concurrency/fsm_workloads/agg_graph_lookup.js
index 7b675dc1e9c..aacbe632fc2 100644
--- a/jstests/concurrency/fsm_workloads/agg_graph_lookup.js
+++ b/jstests/concurrency/fsm_workloads/agg_graph_lookup.js
@@ -7,28 +7,28 @@
*/
var $config = (function() {
- var data = {
- numDocs: 1000
- };
+ var data = {numDocs: 1000};
var states = {
query: function query(db, collName) {
var limitAmount = 20;
var startingId = Random.randInt(this.numDocs - limitAmount);
- var res = db[collName].aggregate([
- {$match: {_id: {$gt: startingId}}},
- {
- $graphLookup: {
- from: collName,
- startWith: "$to",
- connectToField: "_id",
- connectFromField: "to",
- maxDepth: 10,
- as: "out",
- }
- },
- {$limit: limitAmount}
- ]).toArray();
+ var res = db[collName]
+ .aggregate([
+ {$match: {_id: {$gt: startingId}}},
+ {
+ $graphLookup: {
+ from: collName,
+ startWith: "$to",
+ connectToField: "_id",
+ connectFromField: "to",
+ maxDepth: 10,
+ as: "out",
+ }
+ },
+ {$limit: limitAmount}
+ ])
+ .toArray();
assertWhenOwnColl.eq(res.length, limitAmount);
},
@@ -40,10 +40,7 @@ var $config = (function() {
}
};
- var transitions = {
- query: {query: 0.5, update: 0.5},
- update: {query: 0.5, update: 0.5}
- };
+ var transitions = {query: {query: 0.5, update: 0.5}, update: {query: 0.5, update: 0.5}};
function setup(db, collName, cluster) {
// Load example data.
diff --git a/jstests/concurrency/fsm_workloads/agg_group_external.js b/jstests/concurrency/fsm_workloads/agg_group_external.js
index 22d71d4564d..e4ca604a99c 100644
--- a/jstests/concurrency/fsm_workloads/agg_group_external.js
+++ b/jstests/concurrency/fsm_workloads/agg_group_external.js
@@ -12,43 +12,41 @@ load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWo
load('jstests/concurrency/fsm_workloads/agg_base.js'); // for $config
load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropCollections
-var $config = extendWorkload(
- $config,
- function($config, $super) {
-
- // use enough docs to exceed 100MB, the in-memory limit for $sort and $group
- $config.data.numDocs = 24 * 1000;
- var MB = 1024 * 1024; // bytes
- assertAlways.lte(100 * MB, $config.data.numDocs * $config.data.docSize);
-
- // assume no other workload will manipulate collections with this prefix
- $config.data.getOutputCollPrefix = function getOutputCollPrefix(collName) {
- return collName + '_out_agg_group_external_';
- };
-
- $config.states.query = function query(db, collName) {
- var otherCollName = this.getOutputCollPrefix(collName) + this.tid;
- var cursor = db[collName].aggregate(
- [{$group: {_id: '$randInt', count: {$sum: 1}}}, {$out: otherCollName}],
- {allowDiskUse: true});
- assertAlways.eq(0, cursor.itcount());
- assertWhenOwnColl(function() {
- // sum the .count fields in the output coll
- var sum = db[otherCollName]
- .aggregate([{$group: {_id: null, totalCount: {$sum: '$count'}}}])
- .toArray()[0]
- .totalCount;
- assertWhenOwnColl.eq(this.numDocs, sum);
- }.bind(this));
- };
-
- $config.teardown = function teardown(db, collName, cluster) {
- $super.teardown.apply(this, arguments);
-
- // drop all collections with this workload's assumed-to-be-unique prefix
- // NOTE: assumes the prefix contains no special regex chars
- dropCollections(db, new RegExp('^' + this.getOutputCollPrefix(collName)));
- };
-
- return $config;
- });
+var $config = extendWorkload($config, function($config, $super) {
+
+ // use enough docs to exceed 100MB, the in-memory limit for $sort and $group
+ $config.data.numDocs = 24 * 1000;
+ var MB = 1024 * 1024; // bytes
+ assertAlways.lte(100 * MB, $config.data.numDocs * $config.data.docSize);
+
+ // assume no other workload will manipulate collections with this prefix
+ $config.data.getOutputCollPrefix = function getOutputCollPrefix(collName) {
+ return collName + '_out_agg_group_external_';
+ };
+
+ $config.states.query = function query(db, collName) {
+ var otherCollName = this.getOutputCollPrefix(collName) + this.tid;
+ var cursor = db[collName].aggregate(
+ [{$group: {_id: '$randInt', count: {$sum: 1}}}, {$out: otherCollName}],
+ {allowDiskUse: true});
+ assertAlways.eq(0, cursor.itcount());
+ assertWhenOwnColl(function() {
+ // sum the .count fields in the output coll
+ var sum = db[otherCollName]
+ .aggregate([{$group: {_id: null, totalCount: {$sum: '$count'}}}])
+ .toArray()[0]
+ .totalCount;
+ assertWhenOwnColl.eq(this.numDocs, sum);
+ }.bind(this));
+ };
+
+ $config.teardown = function teardown(db, collName, cluster) {
+ $super.teardown.apply(this, arguments);
+
+ // drop all collections with this workload's assumed-to-be-unique prefix
+ // NOTE: assumes the prefix contains no special regex chars
+ dropCollections(db, new RegExp('^' + this.getOutputCollPrefix(collName)));
+ };
+
+ return $config;
+});
diff --git a/jstests/concurrency/fsm_workloads/agg_match.js b/jstests/concurrency/fsm_workloads/agg_match.js
index a685096155b..ca9682e1a58 100644
--- a/jstests/concurrency/fsm_workloads/agg_match.js
+++ b/jstests/concurrency/fsm_workloads/agg_match.js
@@ -8,31 +8,29 @@
load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
load('jstests/concurrency/fsm_workloads/agg_base.js'); // for $config
-var $config = extendWorkload(
- $config,
- function($config, $super) {
+var $config = extendWorkload($config, function($config, $super) {
- $config.data.getOutCollName = function getOutCollName(collName) {
- return collName + '_out_agg_match';
- };
+ $config.data.getOutCollName = function getOutCollName(collName) {
+ return collName + '_out_agg_match';
+ };
- $config.states.query = function query(db, collName) {
- // note that all threads output to the same collection
- var otherCollName = this.getOutCollName(collName);
- var cursor = db[collName].aggregate([{$match: {flag: true}}, {$out: otherCollName}]);
- assertAlways.eq(0, cursor.itcount(), 'cursor returned by $out should always be empty');
- // NOTE: This relies on the fast-path for .count() with no query being isolated.
- // NOTE: There's a bug, SERVER-3645, where .count() is wrong on sharded collections, so
- // we
- // blacklisted this test for sharded clusters.
- assertWhenOwnColl.eq(db[collName].count() / 2, db[otherCollName].count());
- };
+ $config.states.query = function query(db, collName) {
+ // note that all threads output to the same collection
+ var otherCollName = this.getOutCollName(collName);
+ var cursor = db[collName].aggregate([{$match: {flag: true}}, {$out: otherCollName}]);
+ assertAlways.eq(0, cursor.itcount(), 'cursor returned by $out should always be empty');
+ // NOTE: This relies on the fast-path for .count() with no query being isolated.
+ // NOTE: There's a bug, SERVER-3645, where .count() is wrong on sharded collections, so
+ // we
+ // blacklisted this test for sharded clusters.
+ assertWhenOwnColl.eq(db[collName].count() / 2, db[otherCollName].count());
+ };
- $config.teardown = function teardown(db, collName, cluster) {
- $super.teardown.apply(this, arguments);
+ $config.teardown = function teardown(db, collName, cluster) {
+ $super.teardown.apply(this, arguments);
- assertWhenOwnColl(db[this.getOutCollName(collName)].drop());
- };
+ assertWhenOwnColl(db[this.getOutCollName(collName)].drop());
+ };
- return $config;
- });
+ return $config;
+});
diff --git a/jstests/concurrency/fsm_workloads/agg_sort.js b/jstests/concurrency/fsm_workloads/agg_sort.js
index 936ae2cf71b..8ab372d87f0 100644
--- a/jstests/concurrency/fsm_workloads/agg_sort.js
+++ b/jstests/concurrency/fsm_workloads/agg_sort.js
@@ -10,30 +10,27 @@ load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWo
load('jstests/concurrency/fsm_workloads/agg_base.js'); // for $config
load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropCollections
-var $config = extendWorkload(
- $config,
- function($config, $super) {
+var $config = extendWorkload($config, function($config, $super) {
- $config.data.getOutputCollPrefix = function getOutputCollPrefix(collName) {
- return collName + '_out_agg_sort_';
- };
+ $config.data.getOutputCollPrefix = function getOutputCollPrefix(collName) {
+ return collName + '_out_agg_sort_';
+ };
- $config.states.query = function query(db, collName) {
- var otherCollName = this.getOutputCollPrefix(collName) + this.tid;
- var cursor = db[collName].aggregate(
- [{$match: {flag: true}}, {$sort: {rand: 1}}, {$out: otherCollName}]);
- assertAlways.eq(0, cursor.itcount());
- assertWhenOwnColl.eq(db[collName].find().itcount() / 2,
- db[otherCollName].find().itcount());
- };
+ $config.states.query = function query(db, collName) {
+ var otherCollName = this.getOutputCollPrefix(collName) + this.tid;
+ var cursor = db[collName].aggregate(
+ [{$match: {flag: true}}, {$sort: {rand: 1}}, {$out: otherCollName}]);
+ assertAlways.eq(0, cursor.itcount());
+ assertWhenOwnColl.eq(db[collName].find().itcount() / 2, db[otherCollName].find().itcount());
+ };
- $config.teardown = function teardown(db, collName, cluster) {
- $super.teardown.apply(this, arguments);
+ $config.teardown = function teardown(db, collName, cluster) {
+ $super.teardown.apply(this, arguments);
- // drop all collections with this workload's assumed-to-be-unique prefix
- // NOTE: assumes the prefix contains no special regex chars
- dropCollections(db, new RegExp('^' + this.getOutputCollPrefix(collName)));
- };
+ // drop all collections with this workload's assumed-to-be-unique prefix
+ // NOTE: assumes the prefix contains no special regex chars
+ dropCollections(db, new RegExp('^' + this.getOutputCollPrefix(collName)));
+ };
- return $config;
- });
+ return $config;
+});
diff --git a/jstests/concurrency/fsm_workloads/agg_sort_external.js b/jstests/concurrency/fsm_workloads/agg_sort_external.js
index 8a7b6b22495..785d1a4d150 100644
--- a/jstests/concurrency/fsm_workloads/agg_sort_external.js
+++ b/jstests/concurrency/fsm_workloads/agg_sort_external.js
@@ -12,40 +12,36 @@ load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWo
load('jstests/concurrency/fsm_workloads/agg_base.js'); // for $config
load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropCollections
-var $config = extendWorkload(
- $config,
- function($config, $super) {
-
- // use enough docs to exceed 100MB, the in-memory limit for $sort and $group
- $config.data.numDocs = 24 * 1000;
- var MB = 1024 * 1024; // bytes
- // assert that *half* the docs exceed the in-memory limit, because the $match stage will
- // only
- // pass half the docs in the collection on to the $sort stage.
- assertAlways.lte(100 * MB, $config.data.numDocs * $config.data.docSize / 2);
-
- $config.data.getOutputCollPrefix = function getOutputCollPrefix(collName) {
- return collName + '_out_agg_sort_external_';
- };
-
- $config.states.query = function query(db, collName) {
- var otherCollName = this.getOutputCollPrefix(collName) + this.tid;
- var cursor =
- db[collName]
- .aggregate([{$match: {flag: true}}, {$sort: {rand: 1}}, {$out: otherCollName}],
- {allowDiskUse: true});
- assertAlways.eq(0, cursor.itcount());
- assertWhenOwnColl.eq(db[collName].find().itcount() / 2,
- db[otherCollName].find().itcount());
- };
-
- $config.teardown = function teardown(db, collName, cluster) {
- $super.teardown.apply(this, arguments);
-
- // drop all collections with this workload's assumed-to-be-unique prefix
- // NOTE: assumes the prefix contains no special regex chars
- dropCollections(db, new RegExp('^' + this.getOutputCollPrefix(collName)));
- };
-
- return $config;
- });
+var $config = extendWorkload($config, function($config, $super) {
+
+ // use enough docs to exceed 100MB, the in-memory limit for $sort and $group
+ $config.data.numDocs = 24 * 1000;
+ var MB = 1024 * 1024; // bytes
+ // assert that *half* the docs exceed the in-memory limit, because the $match stage will
+ // only
+ // pass half the docs in the collection on to the $sort stage.
+ assertAlways.lte(100 * MB, $config.data.numDocs * $config.data.docSize / 2);
+
+ $config.data.getOutputCollPrefix = function getOutputCollPrefix(collName) {
+ return collName + '_out_agg_sort_external_';
+ };
+
+ $config.states.query = function query(db, collName) {
+ var otherCollName = this.getOutputCollPrefix(collName) + this.tid;
+ var cursor = db[collName].aggregate(
+ [{$match: {flag: true}}, {$sort: {rand: 1}}, {$out: otherCollName}],
+ {allowDiskUse: true});
+ assertAlways.eq(0, cursor.itcount());
+ assertWhenOwnColl.eq(db[collName].find().itcount() / 2, db[otherCollName].find().itcount());
+ };
+
+ $config.teardown = function teardown(db, collName, cluster) {
+ $super.teardown.apply(this, arguments);
+
+ // drop all collections with this workload's assumed-to-be-unique prefix
+ // NOTE: assumes the prefix contains no special regex chars
+ dropCollections(db, new RegExp('^' + this.getOutputCollPrefix(collName)));
+ };
+
+ return $config;
+});
diff --git a/jstests/concurrency/fsm_workloads/auth_create_role.js b/jstests/concurrency/fsm_workloads/auth_create_role.js
index 8b8d3933c2d..6ad5de17d9a 100644
--- a/jstests/concurrency/fsm_workloads/auth_create_role.js
+++ b/jstests/concurrency/fsm_workloads/auth_create_role.js
@@ -44,17 +44,11 @@ var $config = (function() {
}
}
- return {
- init: init,
- createRole: createRole
- };
+ return {init: init, createRole: createRole};
})();
- var transitions = {
- init: {createRole: 1},
- createRole: {createRole: 1}
- };
+ var transitions = {init: {createRole: 1}, createRole: {createRole: 1}};
function teardown(db, collName, cluster) {
var pattern = new RegExp('^' + this.prefix + '\\d+_\\d+$');
diff --git a/jstests/concurrency/fsm_workloads/auth_create_user.js b/jstests/concurrency/fsm_workloads/auth_create_user.js
index e49c63bc68e..07fd1135032 100644
--- a/jstests/concurrency/fsm_workloads/auth_create_user.js
+++ b/jstests/concurrency/fsm_workloads/auth_create_user.js
@@ -39,17 +39,11 @@ var $config = (function() {
}
}
- return {
- init: init,
- createUser: createUser
- };
+ return {init: init, createUser: createUser};
})();
- var transitions = {
- init: {createUser: 1},
- createUser: {createUser: 1}
- };
+ var transitions = {init: {createUser: 1}, createUser: {createUser: 1}};
function teardown(db, collName, cluster) {
var pattern = new RegExp('^' + this.prefix + '\\d+_\\d+$');
diff --git a/jstests/concurrency/fsm_workloads/auth_drop_role.js b/jstests/concurrency/fsm_workloads/auth_drop_role.js
index d41066dbc63..eba694ccd49 100644
--- a/jstests/concurrency/fsm_workloads/auth_drop_role.js
+++ b/jstests/concurrency/fsm_workloads/auth_drop_role.js
@@ -44,24 +44,12 @@ var $config = (function() {
assertAlways.isnull(db.getRole(roleName), "role '" + roleName + "' should not exist");
}
- return {
- init: init,
- createAndDropRole: createAndDropRole
- };
+ return {init: init, createAndDropRole: createAndDropRole};
})();
- var transitions = {
- init: {createAndDropRole: 1},
- createAndDropRole: {createAndDropRole: 1}
- };
+ var transitions = {init: {createAndDropRole: 1}, createAndDropRole: {createAndDropRole: 1}};
- return {
- threadCount: 10,
- iterations: 20,
- data: data,
- states: states,
- transitions: transitions
- };
+ return {threadCount: 10, iterations: 20, data: data, states: states, transitions: transitions};
})();
diff --git a/jstests/concurrency/fsm_workloads/auth_drop_user.js b/jstests/concurrency/fsm_workloads/auth_drop_user.js
index 65cb8e41da2..a47d5566874 100644
--- a/jstests/concurrency/fsm_workloads/auth_drop_user.js
+++ b/jstests/concurrency/fsm_workloads/auth_drop_user.js
@@ -37,24 +37,12 @@ var $config = (function() {
assertAlways.isnull(db.getUser(username), "user '" + username + "' should not exist");
}
- return {
- init: init,
- createAndDropUser: createAndDropUser
- };
+ return {init: init, createAndDropUser: createAndDropUser};
})();
- var transitions = {
- init: {createAndDropUser: 1},
- createAndDropUser: {createAndDropUser: 1}
- };
+ var transitions = {init: {createAndDropUser: 1}, createAndDropUser: {createAndDropUser: 1}};
- return {
- threadCount: 10,
- iterations: 20,
- data: data,
- states: states,
- transitions: transitions
- };
+ return {threadCount: 10, iterations: 20, data: data, states: states, transitions: transitions};
})();
diff --git a/jstests/concurrency/fsm_workloads/collmod.js b/jstests/concurrency/fsm_workloads/collmod.js
index 7b803cd3284..adfe98c2ff1 100644
--- a/jstests/concurrency/fsm_workloads/collmod.js
+++ b/jstests/concurrency/fsm_workloads/collmod.js
@@ -33,15 +33,11 @@ var $config = (function() {
}
}
- return {
- collMod: collMod
- };
+ return {collMod: collMod};
})();
- var transitions = {
- collMod: {collMod: 1}
- };
+ var transitions = {collMod: {collMod: 1}};
function setup(db, collName, cluster) {
// other workloads that extend this one might have set 'this.threadCollName'
diff --git a/jstests/concurrency/fsm_workloads/collmod_separate_collections.js b/jstests/concurrency/fsm_workloads/collmod_separate_collections.js
index 5233733eb2d..a033eb200ee 100644
--- a/jstests/concurrency/fsm_workloads/collmod_separate_collections.js
+++ b/jstests/concurrency/fsm_workloads/collmod_separate_collections.js
@@ -13,32 +13,28 @@ load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWo
load('jstests/concurrency/fsm_workloads/collmod.js'); // for $config
load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropCollections
-var $config = extendWorkload($config,
- function($config, $super) {
- $config.data.prefix = 'collmod_separate_collections';
- $config.data.shardKey = {
- createdAt: 1
- };
+var $config = extendWorkload($config, function($config, $super) {
+ $config.data.prefix = 'collmod_separate_collections';
+ $config.data.shardKey = {createdAt: 1};
- $config.states.init = function init(db, collName) {
- this.threadCollName = this.prefix + '_' + this.tid;
- $super.setup.call(this, db, this.threadCollName);
- };
+ $config.states.init = function init(db, collName) {
+ this.threadCollName = this.prefix + '_' + this.tid;
+ $super.setup.call(this, db, this.threadCollName);
+ };
- $config.transitions =
- Object.extend({init: {collMod: 1}}, $super.transitions);
+ $config.transitions = Object.extend({init: {collMod: 1}}, $super.transitions);
- $config.setup = function setup(db, collName, cluster) {
- // no-op: since the init state is used to setup
- // the separate collections on a per-thread basis.
- };
+ $config.setup = function setup(db, collName, cluster) {
+ // no-op: since the init state is used to setup
+ // the separate collections on a per-thread basis.
+ };
- $config.teardown = function teardown(db, collName, cluster) {
- var pattern = new RegExp('^' + this.prefix + '_\\d+$');
- dropCollections(db, pattern);
- $super.teardown.apply(this, arguments);
- };
+ $config.teardown = function teardown(db, collName, cluster) {
+ var pattern = new RegExp('^' + this.prefix + '_\\d+$');
+ dropCollections(db, pattern);
+ $super.teardown.apply(this, arguments);
+ };
- $config.startState = 'init';
- return $config;
- });
+ $config.startState = 'init';
+ return $config;
+});
diff --git a/jstests/concurrency/fsm_workloads/compact.js b/jstests/concurrency/fsm_workloads/compact.js
index afea3f8a28f..8f91f52bf5e 100644
--- a/jstests/concurrency/fsm_workloads/compact.js
+++ b/jstests/concurrency/fsm_workloads/compact.js
@@ -72,12 +72,7 @@ var $config = (function() {
assertWhenOwnColl.eq(indexesCount, this.nIndexes);
}
- return {
- init: init,
- collectionSetup: collectionSetup,
- compact: compact,
- query: query
- };
+ return {init: init, collectionSetup: collectionSetup, compact: compact, query: query};
})();
var transitions = {
diff --git a/jstests/concurrency/fsm_workloads/compact_simultaneous_padding_bytes.js b/jstests/concurrency/fsm_workloads/compact_simultaneous_padding_bytes.js
index 47ad30cedab..b1991cd3b8c 100644
--- a/jstests/concurrency/fsm_workloads/compact_simultaneous_padding_bytes.js
+++ b/jstests/concurrency/fsm_workloads/compact_simultaneous_padding_bytes.js
@@ -12,26 +12,24 @@ load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extend
load('jstests/concurrency/fsm_workloads/compact.js'); // for $config
load('jstests/concurrency/fsm_workload_helpers/server_types.js'); // for isEphemeral
-var $config = extendWorkload(
- $config,
- function($config, $super) {
- $config.states.init = function init(db, collName) {
- this.threadCollName = collName;
- };
+var $config = extendWorkload($config, function($config, $super) {
+ $config.states.init = function init(db, collName) {
+ this.threadCollName = collName;
+ };
- $config.states.compact = function compact(db, collName) {
- var res =
- db.runCommand({compact: this.threadCollName, paddingBytes: 1024 * 5, force: true});
- if (!isEphemeral(db)) {
- assertAlways.commandWorked(res);
- } else {
- assertAlways.commandFailedWithCode(res, ErrorCodes.CommandNotSupported);
- }
- };
+ $config.states.compact = function compact(db, collName) {
+ var res =
+ db.runCommand({compact: this.threadCollName, paddingBytes: 1024 * 5, force: true});
+ if (!isEphemeral(db)) {
+ assertAlways.commandWorked(res);
+ } else {
+ assertAlways.commandFailedWithCode(res, ErrorCodes.CommandNotSupported);
+ }
+ };
- // no-op the query state because querying while compacting can result in closed cursors
- // as per SERVER-3964, as well as inaccurate counts, leaving nothing to assert.
- $config.states.query = function query(db, collName) {};
+ // no-op the query state because querying while compacting can result in closed cursors
+ // as per SERVER-3964, as well as inaccurate counts, leaving nothing to assert.
+ $config.states.query = function query(db, collName) {};
- return $config;
- });
+ return $config;
+});
diff --git a/jstests/concurrency/fsm_workloads/convert_to_capped_collection.js b/jstests/concurrency/fsm_workloads/convert_to_capped_collection.js
index 79b9934077b..14cd4e98646 100644
--- a/jstests/concurrency/fsm_workloads/convert_to_capped_collection.js
+++ b/jstests/concurrency/fsm_workloads/convert_to_capped_collection.js
@@ -13,9 +13,7 @@ load('jstests/concurrency/fsm_workload_helpers/drop_utils.js');
var $config = (function() {
// TODO: This workload may fail if an iteration multiplier is specified.
- var data = {
- prefix: 'convert_to_capped_collection'
- };
+ var data = {prefix: 'convert_to_capped_collection'};
var states = (function() {
@@ -62,16 +60,10 @@ var $config = (function() {
});
}
- return {
- init: init,
- convertToCapped: convertToCapped
- };
+ return {init: init, convertToCapped: convertToCapped};
})();
- var transitions = {
- init: {convertToCapped: 1},
- convertToCapped: {convertToCapped: 1}
- };
+ var transitions = {init: {convertToCapped: 1}, convertToCapped: {convertToCapped: 1}};
function setup(db, collName, cluster) {
// Initial size should not be a power of 256.
diff --git a/jstests/concurrency/fsm_workloads/convert_to_capped_collection_index.js b/jstests/concurrency/fsm_workloads/convert_to_capped_collection_index.js
index 01342b9b603..4b410ea7305 100644
--- a/jstests/concurrency/fsm_workloads/convert_to_capped_collection_index.js
+++ b/jstests/concurrency/fsm_workloads/convert_to_capped_collection_index.js
@@ -16,14 +16,12 @@
load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
load('jstests/concurrency/fsm_workloads/convert_to_capped_collection.js'); // for $config
-var $config = extendWorkload(
- $config,
- function($config, $super) {
- $config.states.convertToCapped = function convertToCapped(db, collName) {
- assertWhenOwnDB.commandWorked(db[this.threadCollName].ensureIndex({i: 1, rand: 1}));
- assertWhenOwnDB.eq(2, db[this.threadCollName].getIndexes().length);
- $super.states.convertToCapped.apply(this, arguments);
- };
+var $config = extendWorkload($config, function($config, $super) {
+ $config.states.convertToCapped = function convertToCapped(db, collName) {
+ assertWhenOwnDB.commandWorked(db[this.threadCollName].ensureIndex({i: 1, rand: 1}));
+ assertWhenOwnDB.eq(2, db[this.threadCollName].getIndexes().length);
+ $super.states.convertToCapped.apply(this, arguments);
+ };
- return $config;
- });
+ return $config;
+});
diff --git a/jstests/concurrency/fsm_workloads/count.js b/jstests/concurrency/fsm_workloads/count.js
index 61a4c93d3ab..16221b7ad9d 100644
--- a/jstests/concurrency/fsm_workloads/count.js
+++ b/jstests/concurrency/fsm_workloads/count.js
@@ -53,24 +53,12 @@ var $config = (function() {
assertWhenOwnColl.eq(this.getCount(db, {i: num}), this.countPerNum);
}
- return {
- init: init,
- count: count
- };
+ return {init: init, count: count};
})();
- var transitions = {
- init: {count: 1},
- count: {count: 1}
- };
+ var transitions = {init: {count: 1}, count: {count: 1}};
- return {
- data: data,
- threadCount: 10,
- iterations: 20,
- states: states,
- transitions: transitions
- };
+ return {data: data, threadCount: 10, iterations: 20, states: states, transitions: transitions};
})();
diff --git a/jstests/concurrency/fsm_workloads/count_indexed.js b/jstests/concurrency/fsm_workloads/count_indexed.js
index b9c09020042..77edf8da6ef 100644
--- a/jstests/concurrency/fsm_workloads/count_indexed.js
+++ b/jstests/concurrency/fsm_workloads/count_indexed.js
@@ -14,31 +14,26 @@ load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWo
load('jstests/concurrency/fsm_workloads/count.js'); // for $config
load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropCollections
-var $config = extendWorkload(
- $config,
- function($config, $super) {
- $config.data.prefix = 'count_fsm';
- $config.data.shardKey = {
- tid: 1,
- i: 1
- };
+var $config = extendWorkload($config, function($config, $super) {
+ $config.data.prefix = 'count_fsm';
+ $config.data.shardKey = {tid: 1, i: 1};
- $config.data.getCount = function getCount(db, predicate) {
- var query = Object.extend({tid: this.tid}, predicate);
- return db[this.threadCollName].find(query).hint({tid: 1, i: 1}).count();
- };
+ $config.data.getCount = function getCount(db, predicate) {
+ var query = Object.extend({tid: this.tid}, predicate);
+ return db[this.threadCollName].find(query).hint({tid: 1, i: 1}).count();
+ };
- $config.states.init = function init(db, collName) {
- this.threadCollName = this.prefix + '_' + this.tid;
- $super.states.init.apply(this, arguments);
- assertAlways.commandWorked(db[this.threadCollName].ensureIndex({tid: 1, i: 1}));
- };
+ $config.states.init = function init(db, collName) {
+ this.threadCollName = this.prefix + '_' + this.tid;
+ $super.states.init.apply(this, arguments);
+ assertAlways.commandWorked(db[this.threadCollName].ensureIndex({tid: 1, i: 1}));
+ };
- $config.teardown = function teardown(db, collName) {
- var pattern = new RegExp('^' + this.prefix + '_\\d+$');
- dropCollections(db, pattern);
- $super.teardown.apply(this, arguments);
- };
+ $config.teardown = function teardown(db, collName) {
+ var pattern = new RegExp('^' + this.prefix + '_\\d+$');
+ dropCollections(db, pattern);
+ $super.teardown.apply(this, arguments);
+ };
- return $config;
- });
+ return $config;
+});
diff --git a/jstests/concurrency/fsm_workloads/count_limit_skip.js b/jstests/concurrency/fsm_workloads/count_limit_skip.js
index 59cc5db835f..169094a1c82 100644
--- a/jstests/concurrency/fsm_workloads/count_limit_skip.js
+++ b/jstests/concurrency/fsm_workloads/count_limit_skip.js
@@ -14,42 +14,36 @@ load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWo
load('jstests/concurrency/fsm_workloads/count.js'); // for $config
load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropCollections
-var $config = extendWorkload(
- $config,
- function($config, $super) {
- $config.data.prefix = 'count_fsm_q_l_s';
-
- $config.data.getCount = function getCount(db, predicate) {
- var query = Object.extend({tid: this.tid}, predicate);
- return db[this.threadCollName]
- .find(query)
- .skip(this.countPerNum - 1)
- .limit(10)
- .count(true);
- };
-
- $config.states.init = function init(db, collName) {
- this.threadCollName = this.prefix + '_' + this.tid;
-
- $super.states.init.apply(this, arguments);
- };
-
- $config.states.count = function count(db, collName) {
- assertWhenOwnColl.eq(this.getCount(db),
- // having done 'skip(this.countPerNum - 1).limit(10)'
- 10);
-
- var num = Random.randInt(this.modulus);
- assertWhenOwnColl.eq(this.getCount(db, {i: num}),
- // having done 'skip(this.countPerNum - 1).limit(10)'
- 1);
- };
-
- $config.teardown = function teardown(db, collName) {
- var pattern = new RegExp('^' + this.prefix + '_\\d+$');
- dropCollections(db, pattern);
- $super.teardown.apply(this, arguments);
- };
-
- return $config;
- });
+var $config = extendWorkload($config, function($config, $super) {
+ $config.data.prefix = 'count_fsm_q_l_s';
+
+ $config.data.getCount = function getCount(db, predicate) {
+ var query = Object.extend({tid: this.tid}, predicate);
+ return db[this.threadCollName].find(query).skip(this.countPerNum - 1).limit(10).count(true);
+ };
+
+ $config.states.init = function init(db, collName) {
+ this.threadCollName = this.prefix + '_' + this.tid;
+
+ $super.states.init.apply(this, arguments);
+ };
+
+ $config.states.count = function count(db, collName) {
+ assertWhenOwnColl.eq(this.getCount(db),
+ // having done 'skip(this.countPerNum - 1).limit(10)'
+ 10);
+
+ var num = Random.randInt(this.modulus);
+ assertWhenOwnColl.eq(this.getCount(db, {i: num}),
+ // having done 'skip(this.countPerNum - 1).limit(10)'
+ 1);
+ };
+
+ $config.teardown = function teardown(db, collName) {
+ var pattern = new RegExp('^' + this.prefix + '_\\d+$');
+ dropCollections(db, pattern);
+ $super.teardown.apply(this, arguments);
+ };
+
+ return $config;
+});
diff --git a/jstests/concurrency/fsm_workloads/create_capped_collection.js b/jstests/concurrency/fsm_workloads/create_capped_collection.js
index 96fdda8ebda..c0ec6c0e071 100644
--- a/jstests/concurrency/fsm_workloads/create_capped_collection.js
+++ b/jstests/concurrency/fsm_workloads/create_capped_collection.js
@@ -14,10 +14,7 @@ var $config = (function() {
// Returns a document of the form { _id: ObjectId(...), field: '...' }
// with specified BSON size.
function makeDocWithSize(targetSize) {
- var doc = {
- _id: new ObjectId(),
- field: ''
- };
+ var doc = {_id: new ObjectId(), field: ''};
var size = Object.bsonsize(doc);
assertAlways.gte(targetSize, size);
@@ -45,11 +42,9 @@ var $config = (function() {
// Returns an array containing the _id fields of all the documents
// in the collection, sorted according to their insertion order.
function getObjectIds(db, collName) {
- return db[collName]
- .find({}, {_id: 1})
- .map(function(doc) {
- return doc._id;
- });
+ return db[collName].find({}, {_id: 1}).map(function(doc) {
+ return doc._id;
+ });
}
var data = {
@@ -148,17 +143,11 @@ var $config = (function() {
this.verifySizeTruncation(db, myCollName, options);
}
- return {
- init: init,
- create: create
- };
+ return {init: init, create: create};
})();
- var transitions = {
- init: {create: 1},
- create: {create: 1}
- };
+ var transitions = {init: {create: 1}, create: {create: 1}};
function teardown(db, collName, cluster) {
var pattern = new RegExp('^' + this.prefix + '\\d+_\\d+$');
diff --git a/jstests/concurrency/fsm_workloads/create_capped_collection_maxdocs.js b/jstests/concurrency/fsm_workloads/create_capped_collection_maxdocs.js
index b3a836b8b0b..e29ed65a274 100644
--- a/jstests/concurrency/fsm_workloads/create_capped_collection_maxdocs.js
+++ b/jstests/concurrency/fsm_workloads/create_capped_collection_maxdocs.js
@@ -10,57 +10,55 @@
load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
load('jstests/concurrency/fsm_workloads/create_capped_collection.js'); // for $config
-var $config = extendWorkload(
- $config,
- function($config, $super) {
+var $config = extendWorkload($config, function($config, $super) {
- // Use the workload name as a prefix for the collection name,
- // since the workload name is assumed to be unique.
- $config.data.prefix = 'create_capped_collection_maxdocs';
+ // Use the workload name as a prefix for the collection name,
+ // since the workload name is assumed to be unique.
+ $config.data.prefix = 'create_capped_collection_maxdocs';
- var options = {
- capped: true,
- size: 8192, // multiple of 256; larger than 4096 default
- max: 3
- };
+ var options = {
+ capped: true,
+ size: 8192, // multiple of 256; larger than 4096 default
+ max: 3
+ };
- function uniqueCollectionName(prefix, tid, num) {
- return prefix + tid + '_' + num;
- }
+ function uniqueCollectionName(prefix, tid, num) {
+ return prefix + tid + '_' + num;
+ }
- // TODO: how to avoid having too many files open?
- function create(db, collName) {
- var myCollName = uniqueCollectionName(this.prefix, this.tid, this.num++);
- assertAlways.commandWorked(db.createCollection(myCollName, options));
+ // TODO: how to avoid having too many files open?
+ function create(db, collName) {
+ var myCollName = uniqueCollectionName(this.prefix, this.tid, this.num++);
+ assertAlways.commandWorked(db.createCollection(myCollName, options));
- // Define a small document to be an eighth the size of the capped collection.
- var smallDocSize = Math.floor(options.size / 8) - 1;
+ // Define a small document to be an eighth the size of the capped collection.
+ var smallDocSize = Math.floor(options.size / 8) - 1;
- // Verify size functionality still works as we expect
- this.verifySizeTruncation(db, myCollName, options);
+ // Verify size functionality still works as we expect
+ this.verifySizeTruncation(db, myCollName, options);
- // Insert multiple small documents and verify that at least one truncation has occurred.
- // There should never be more than 3 documents in the collection, regardless of the
- // storage
- // engine. They should always be the most recently inserted documents.
+ // Insert multiple small documents and verify that at least one truncation has occurred.
+ // There should never be more than 3 documents in the collection, regardless of the
+ // storage
+ // engine. They should always be the most recently inserted documents.
- var ids = [];
- var count;
+ var ids = [];
+ var count;
- ids.push(this.insert(db, myCollName, smallDocSize));
- ids.push(this.insert(db, myCollName, smallDocSize));
+ ids.push(this.insert(db, myCollName, smallDocSize));
+ ids.push(this.insert(db, myCollName, smallDocSize));
- for (var i = 0; i < 50; i++) {
- ids.push(this.insert(db, myCollName, smallDocSize));
- count = db[myCollName].find().itcount();
- assertWhenOwnDB.eq(3, count, 'expected truncation to occur due to number of docs');
- assertWhenOwnDB.eq(ids.slice(ids.length - count),
- this.getObjectIds(db, myCollName),
- 'expected truncation to remove the oldest documents');
- }
+ for (var i = 0; i < 50; i++) {
+ ids.push(this.insert(db, myCollName, smallDocSize));
+ count = db[myCollName].find().itcount();
+ assertWhenOwnDB.eq(3, count, 'expected truncation to occur due to number of docs');
+ assertWhenOwnDB.eq(ids.slice(ids.length - count),
+ this.getObjectIds(db, myCollName),
+ 'expected truncation to remove the oldest documents');
}
+ }
- $config.states.create = create;
+ $config.states.create = create;
- return $config;
- });
+ return $config;
+});
diff --git a/jstests/concurrency/fsm_workloads/create_collection.js b/jstests/concurrency/fsm_workloads/create_collection.js
index fdc6d8af9fd..023c6811f1f 100644
--- a/jstests/concurrency/fsm_workloads/create_collection.js
+++ b/jstests/concurrency/fsm_workloads/create_collection.js
@@ -32,17 +32,11 @@ var $config = (function() {
assertAlways.commandWorked(db.createCollection(myCollName));
}
- return {
- init: init,
- create: create
- };
+ return {init: init, create: create};
})();
- var transitions = {
- init: {create: 1},
- create: {create: 1}
- };
+ var transitions = {init: {create: 1}, create: {create: 1}};
function teardown(db, collName, cluster) {
var pattern = new RegExp('^' + this.prefix + '\\d+_\\d+$');
diff --git a/jstests/concurrency/fsm_workloads/create_index_background.js b/jstests/concurrency/fsm_workloads/create_index_background.js
index 046709ebdd3..cfc47156407 100644
--- a/jstests/concurrency/fsm_workloads/create_index_background.js
+++ b/jstests/concurrency/fsm_workloads/create_index_background.js
@@ -105,8 +105,8 @@ var $config = (function() {
for (var i = 0; i < this.nDocumentsToUpdate; ++i) {
// Do randomized updates on index x. A document is not guaranteed
// to match the randomized 'x' predicate.
- res = coll.update({x: Random.randInt(highest), tid: this.tid},
- {$inc: {crud: 1}});
+ res =
+ coll.update({x: Random.randInt(highest), tid: this.tid}, {$inc: {crud: 1}});
assertAlways.writeOK(res);
if (db.getMongo().writeMode() === 'commands') {
assertWhenOwnColl.contains(res.nModified, [0, 1], tojson(res));
@@ -207,10 +207,8 @@ var $config = (function() {
setParameter: 1,
internalQueryExecYieldIterations: internalQueryExecYieldIterations
}));
- assertAlways.commandWorked(db.adminCommand({
- setParameter: 1,
- internalQueryExecYieldPeriodMS: internalQueryExecYieldPeriodMS
- }));
+ assertAlways.commandWorked(db.adminCommand(
+ {setParameter: 1, internalQueryExecYieldPeriodMS: internalQueryExecYieldPeriodMS}));
});
}
diff --git a/jstests/concurrency/fsm_workloads/distinct.js b/jstests/concurrency/fsm_workloads/distinct.js
index c76b5e972f5..c2f519551ce 100644
--- a/jstests/concurrency/fsm_workloads/distinct.js
+++ b/jstests/concurrency/fsm_workloads/distinct.js
@@ -11,11 +11,7 @@ load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropColl
var $config = (function() {
- var data = {
- numDocs: 1000,
- prefix: 'distinct_fsm',
- shardKey: {i: 1}
- };
+ var data = {numDocs: 1000, prefix: 'distinct_fsm', shardKey: {i: 1}};
var states = (function() {
@@ -35,17 +31,11 @@ var $config = (function() {
assertWhenOwnColl.eq(this.numDocs, db[this.threadCollName].distinct('i').length);
}
- return {
- init: init,
- distinct: distinct
- };
+ return {init: init, distinct: distinct};
})();
- var transitions = {
- init: {distinct: 1},
- distinct: {distinct: 1}
- };
+ var transitions = {init: {distinct: 1}, distinct: {distinct: 1}};
function teardown(db, collName, cluster) {
var pattern = new RegExp('^' + this.prefix + '_\\d+$');
diff --git a/jstests/concurrency/fsm_workloads/distinct_noindex.js b/jstests/concurrency/fsm_workloads/distinct_noindex.js
index b55d1e58d3b..3727c968a14 100644
--- a/jstests/concurrency/fsm_workloads/distinct_noindex.js
+++ b/jstests/concurrency/fsm_workloads/distinct_noindex.js
@@ -35,24 +35,12 @@ var $config = (function() {
assertWhenOwnColl.eq(this.modulus, db[collName].distinct('i', {tid: this.tid}).length);
}
- return {
- init: init,
- distinct: distinct
- };
+ return {init: init, distinct: distinct};
})();
- var transitions = {
- init: {distinct: 1},
- distinct: {distinct: 1}
- };
+ var transitions = {init: {distinct: 1}, distinct: {distinct: 1}};
- return {
- data: data,
- threadCount: 10,
- iterations: 20,
- states: states,
- transitions: transitions
- };
+ return {data: data, threadCount: 10, iterations: 20, states: states, transitions: transitions};
})();
diff --git a/jstests/concurrency/fsm_workloads/distinct_projection.js b/jstests/concurrency/fsm_workloads/distinct_projection.js
index 3f9c4c3192d..cf287cdb210 100644
--- a/jstests/concurrency/fsm_workloads/distinct_projection.js
+++ b/jstests/concurrency/fsm_workloads/distinct_projection.js
@@ -10,18 +10,13 @@
load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
load('jstests/concurrency/fsm_workloads/distinct.js'); // for $config
-var $config = extendWorkload($config,
- function($config, $super) {
- $config.data.prefix = 'distinct_projection_fsm';
+var $config = extendWorkload($config, function($config, $super) {
+ $config.data.prefix = 'distinct_projection_fsm';
- $config.states.distinct = function distinct(db, collName) {
- var query = {
- i: {$lt: this.numDocs / 2}
- };
- assertWhenOwnColl.eq(
- this.numDocs / 2,
- db[this.threadCollName].distinct('i', query).length);
- };
+ $config.states.distinct = function distinct(db, collName) {
+ var query = {i: {$lt: this.numDocs / 2}};
+ assertWhenOwnColl.eq(this.numDocs / 2, db[this.threadCollName].distinct('i', query).length);
+ };
- return $config;
- });
+ return $config;
+});
diff --git a/jstests/concurrency/fsm_workloads/drop_collection.js b/jstests/concurrency/fsm_workloads/drop_collection.js
index 64a60ef8e79..89952c0a48e 100644
--- a/jstests/concurrency/fsm_workloads/drop_collection.js
+++ b/jstests/concurrency/fsm_workloads/drop_collection.js
@@ -30,24 +30,12 @@ var $config = (function() {
assertAlways(db[myCollName].drop());
}
- return {
- init: init,
- createAndDrop: createAndDrop
- };
+ return {init: init, createAndDrop: createAndDrop};
})();
- var transitions = {
- init: {createAndDrop: 1},
- createAndDrop: {createAndDrop: 1}
- };
+ var transitions = {init: {createAndDrop: 1}, createAndDrop: {createAndDrop: 1}};
- return {
- threadCount: 10,
- iterations: 10,
- data: data,
- states: states,
- transitions: transitions
- };
+ return {threadCount: 10, iterations: 10, data: data, states: states, transitions: transitions};
})();
diff --git a/jstests/concurrency/fsm_workloads/drop_database.js b/jstests/concurrency/fsm_workloads/drop_database.js
index 9a6b9e0fb80..9d372d354f8 100644
--- a/jstests/concurrency/fsm_workloads/drop_database.js
+++ b/jstests/concurrency/fsm_workloads/drop_database.js
@@ -24,10 +24,7 @@ var $config = (function() {
}
};
- var transitions = {
- init: {createAndDrop: 1},
- createAndDrop: {createAndDrop: 1}
- };
+ var transitions = {init: {createAndDrop: 1}, createAndDrop: {createAndDrop: 1}};
return {
threadCount: 10,
diff --git a/jstests/concurrency/fsm_workloads/explain.js b/jstests/concurrency/fsm_workloads/explain.js
index 983218a7fbf..89dfa530d76 100644
--- a/jstests/concurrency/fsm_workloads/explain.js
+++ b/jstests/concurrency/fsm_workloads/explain.js
@@ -61,11 +61,7 @@ var $config = (function() {
}
}
- return {
- insert: insert,
- explain: explain,
- explainNonExistentNS: explainNonExistentNS
- };
+ return {insert: insert, explain: explain, explainNonExistentNS: explainNonExistentNS};
})();
diff --git a/jstests/concurrency/fsm_workloads/explain_aggregate.js b/jstests/concurrency/fsm_workloads/explain_aggregate.js
index e0bbccb7683..883ab11c06e 100644
--- a/jstests/concurrency/fsm_workloads/explain_aggregate.js
+++ b/jstests/concurrency/fsm_workloads/explain_aggregate.js
@@ -9,43 +9,37 @@
load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
load('jstests/concurrency/fsm_workloads/explain.js'); // for $config
-var $config = extendWorkload(
- $config,
- function($config, $super) {
-
- function assertCursorStages(num, obj) {
- assertAlways(obj.stages, tojson(obj));
- assertAlways.eq(num, obj.stages.length, tojson(obj.stages));
- assertAlways(obj.stages[0].$cursor, tojson(obj.stages[0]));
- assertAlways(obj.stages[0].$cursor.hasOwnProperty('queryPlanner'),
- tojson(obj.stages[0].$cursor));
+var $config = extendWorkload($config, function($config, $super) {
+
+ function assertCursorStages(num, obj) {
+ assertAlways(obj.stages, tojson(obj));
+ assertAlways.eq(num, obj.stages.length, tojson(obj.stages));
+ assertAlways(obj.stages[0].$cursor, tojson(obj.stages[0]));
+ assertAlways(obj.stages[0].$cursor.hasOwnProperty('queryPlanner'),
+ tojson(obj.stages[0].$cursor));
+ }
+
+ $config.states = Object.extend({
+ explainMatch: function explainMatch(db, collName) {
+ var res = db[collName].explain().aggregate([{$match: {i: this.nInserted / 2}}]);
+ assertAlways.commandWorked(res);
+
+ // stages reported: $cursor
+ assertCursorStages(1, res);
+ },
+ explainMatchProject: function explainMatchProject(db, collName) {
+ var res = db[collName].explain().aggregate(
+ [{$match: {i: this.nInserted / 3}}, {$project: {i: 1}}]);
+ assertAlways.commandWorked(res);
+
+ // stages reported: $cursor, $project
+ assertCursorStages(2, res);
}
+ },
+ $super.states);
- $config.states = Object.extend(
- {
- explainMatch: function explainMatch(db, collName) {
- var res = db[collName].explain().aggregate([{$match: {i: this.nInserted / 2}}]);
- assertAlways.commandWorked(res);
-
- // stages reported: $cursor
- assertCursorStages(1, res);
- },
- explainMatchProject: function explainMatchProject(db, collName) {
- var res =
- db[collName]
- .explain()
- .aggregate([{$match: {i: this.nInserted / 3}}, {$project: {i: 1}}]);
- assertAlways.commandWorked(res);
-
- // stages reported: $cursor, $project
- assertCursorStages(2, res);
- }
- },
- $super.states);
-
- $config.transitions =
- Object.extend({explain: $config.data.assignEqualProbsToTransitions($config.states)},
- $super.transitions);
-
- return $config;
- });
+ $config.transitions = Object.extend(
+ {explain: $config.data.assignEqualProbsToTransitions($config.states)}, $super.transitions);
+
+ return $config;
+});
diff --git a/jstests/concurrency/fsm_workloads/explain_count.js b/jstests/concurrency/fsm_workloads/explain_count.js
index b6a9f0fa8b8..948a108ca10 100644
--- a/jstests/concurrency/fsm_workloads/explain_count.js
+++ b/jstests/concurrency/fsm_workloads/explain_count.js
@@ -10,59 +10,54 @@ load('jstests/concurrency/fsm_workloads/explain.js'); // for $confi
load('jstests/concurrency/fsm_workload_helpers/server_types.js'); // for isMongos
load('jstests/libs/analyze_plan.js'); // for planHasStage
-var $config = extendWorkload(
- $config,
- function($config, $super) {
+var $config = extendWorkload($config, function($config, $super) {
- function assertNCounted(num, obj, db) {
- var stage = obj.executionStats.executionStages;
- // get sharded stage(s) if counting on mongos
- if (isMongos(db)) {
- stage = stage.shards[0].executionStages;
- }
- assertWhenOwnColl.eq(num, stage.nCounted);
+ function assertNCounted(num, obj, db) {
+ var stage = obj.executionStats.executionStages;
+ // get sharded stage(s) if counting on mongos
+ if (isMongos(db)) {
+ stage = stage.shards[0].executionStages;
}
+ assertWhenOwnColl.eq(num, stage.nCounted);
+ }
- $config.states = Object.extend(
- {
- explainBasicCount: function explainBasicCount(db, collName) {
- var res = db[collName].explain().count();
- assertAlways.commandWorked(res);
- assertAlways(planHasStage(res.queryPlanner.winningPlan, 'COUNT'));
- },
- explainCountHint: function explainCountHint(db, collName) {
- assertWhenOwnColl(function() {
- var res =
- db[collName].explain().find({i: this.nInserted / 2}).hint({i: 1}).count();
- assertWhenOwnColl.commandWorked(res);
- assertWhenOwnColl(planHasStage(res.queryPlanner.winningPlan, 'COUNT'));
- assertWhenOwnColl(planHasStage(res.queryPlanner.winningPlan, 'COUNT_SCAN'));
- });
- },
- explainCountNoSkipLimit: function explainCountNoSkipLimit(db, collName) {
- var res = db[collName]
- .explain('executionStats')
- .find({i: this.nInserted})
- .skip(1)
- .count(false);
- assertAlways.commandWorked(res);
- assertNCounted(1, res, db);
- },
- explainCountSkipLimit: function explainCountSkipLimit(db, collName) {
- var res = db[collName]
- .explain('executionStats')
- .find({i: this.nInserted})
- .skip(1)
- .count(true);
- assertAlways.commandWorked(res);
- assertNCounted(0, res, db);
- }
- },
- $super.states);
+ $config.states = Object.extend({
+ explainBasicCount: function explainBasicCount(db, collName) {
+ var res = db[collName].explain().count();
+ assertAlways.commandWorked(res);
+ assertAlways(planHasStage(res.queryPlanner.winningPlan, 'COUNT'));
+ },
+ explainCountHint: function explainCountHint(db, collName) {
+ assertWhenOwnColl(function() {
+ var res = db[collName].explain().find({i: this.nInserted / 2}).hint({i: 1}).count();
+ assertWhenOwnColl.commandWorked(res);
+ assertWhenOwnColl(planHasStage(res.queryPlanner.winningPlan, 'COUNT'));
+ assertWhenOwnColl(planHasStage(res.queryPlanner.winningPlan, 'COUNT_SCAN'));
+ });
+ },
+ explainCountNoSkipLimit: function explainCountNoSkipLimit(db, collName) {
+ var res = db[collName]
+ .explain('executionStats')
+ .find({i: this.nInserted})
+ .skip(1)
+ .count(false);
+ assertAlways.commandWorked(res);
+ assertNCounted(1, res, db);
+ },
+ explainCountSkipLimit: function explainCountSkipLimit(db, collName) {
+ var res = db[collName]
+ .explain('executionStats')
+ .find({i: this.nInserted})
+ .skip(1)
+ .count(true);
+ assertAlways.commandWorked(res);
+ assertNCounted(0, res, db);
+ }
+ },
+ $super.states);
- $config.transitions =
- Object.extend({explain: $config.data.assignEqualProbsToTransitions($config.states)},
- $super.transitions);
+ $config.transitions = Object.extend(
+ {explain: $config.data.assignEqualProbsToTransitions($config.states)}, $super.transitions);
- return $config;
- });
+ return $config;
+});
diff --git a/jstests/concurrency/fsm_workloads/explain_distinct.js b/jstests/concurrency/fsm_workloads/explain_distinct.js
index 86c30f3aca9..1c14254daed 100644
--- a/jstests/concurrency/fsm_workloads/explain_distinct.js
+++ b/jstests/concurrency/fsm_workloads/explain_distinct.js
@@ -9,28 +9,24 @@ load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
load('jstests/concurrency/fsm_workloads/explain.js'); // for $config
load('jstests/libs/analyze_plan.js'); // for planHasStage
-var $config = extendWorkload(
- $config,
- function($config, $super) {
- $config.states = Object.extend(
- {
- explainBasicDistinct: function(db, collName) {
- var res = db[collName].explain().distinct('i');
- assertAlways.commandWorked(res);
- assertAlways(planHasStage(res.queryPlanner.winningPlan, 'COLLSCAN'));
- },
- explainDistinctIndex: function(db, collName) {
- var res = db[collName].explain().distinct('_id');
- assertAlways.commandWorked(res);
- assertAlways(planHasStage(res.queryPlanner.winningPlan, 'PROJECTION'));
- assertAlways(planHasStage(res.queryPlanner.winningPlan, 'DISTINCT_SCAN'));
- }
- },
- $super.states);
+var $config = extendWorkload($config, function($config, $super) {
+ $config.states = Object.extend({
+ explainBasicDistinct: function(db, collName) {
+ var res = db[collName].explain().distinct('i');
+ assertAlways.commandWorked(res);
+ assertAlways(planHasStage(res.queryPlanner.winningPlan, 'COLLSCAN'));
+ },
+ explainDistinctIndex: function(db, collName) {
+ var res = db[collName].explain().distinct('_id');
+ assertAlways.commandWorked(res);
+ assertAlways(planHasStage(res.queryPlanner.winningPlan, 'PROJECTION'));
+ assertAlways(planHasStage(res.queryPlanner.winningPlan, 'DISTINCT_SCAN'));
+ }
+ },
+ $super.states);
- $config.transitions =
- Object.extend({explain: $config.data.assignEqualProbsToTransitions($config.states)},
- $super.transitions);
+ $config.transitions = Object.extend(
+ {explain: $config.data.assignEqualProbsToTransitions($config.states)}, $super.transitions);
- return $config;
- });
+ return $config;
+});
diff --git a/jstests/concurrency/fsm_workloads/explain_find.js b/jstests/concurrency/fsm_workloads/explain_find.js
index f0b6f099c63..e338b00e516 100644
--- a/jstests/concurrency/fsm_workloads/explain_find.js
+++ b/jstests/concurrency/fsm_workloads/explain_find.js
@@ -10,62 +10,57 @@ load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
load('jstests/concurrency/fsm_workloads/explain.js'); // for $config
load('jstests/libs/analyze_plan.js'); // for planHasStage and isIxscan
-var $config = extendWorkload(
- $config,
- function($config, $super) {
+var $config = extendWorkload($config, function($config, $super) {
- $config.states = Object.extend(
- {
- explainLimit: function explainLimit(db, collName) {
- var res = db[collName].find().limit(3).explain();
- assertAlways.commandWorked(res);
- assertAlways(planHasStage(res.queryPlanner.winningPlan, 'LIMIT'));
- },
- explainBatchSize: function explainBatchSize(db, collName) {
- var res = db[collName].find().batchSize(3).explain();
- assertAlways.commandWorked(res);
- },
- explainAddOption: function explainAddOption(db, collName) {
- var res =
- db[collName].explain().find().addOption(DBQuery.Option.exhaust).finish();
- assertAlways.commandWorked(res);
- },
- explainSkip: function explainSkip(db, collName) {
- var res = db[collName].explain().find().skip(3).finish();
- assertAlways.commandWorked(res);
- assertAlways(planHasStage(res.queryPlanner.winningPlan, 'SKIP'));
- },
- explainSort: function explainSort(db, collName) {
- var res = db[collName].find().sort({i: -1}).explain();
- assertAlways.commandWorked(res);
- assertAlways(planHasStage(res.queryPlanner.winningPlan, 'SORT'));
- },
- explainHint: function explainHint(db, collName) {
- assertWhenOwnColl(function() {
- var res = db[collName].find().hint({j: 1}).explain();
- assertWhenOwnColl.commandWorked(res);
- assertWhenOwnColl(isIxscan(res.queryPlanner.winningPlan));
- });
- },
- explainMaxTimeMS: function explainMaxTimeMS(db, collName) {
- var res = db[collName].find().maxTimeMS(2000).explain();
- assertAlways.commandWorked(res);
- },
- explainSnapshot: function explainSnapshot(db, collName) {
- var res = db[collName].find().snapshot().explain();
- assertAlways.commandWorked(res);
- assertWhenOwnColl(isIxscan(res.queryPlanner.winningPlan));
- }
- },
- $super.states);
+ $config.states = Object.extend({
+ explainLimit: function explainLimit(db, collName) {
+ var res = db[collName].find().limit(3).explain();
+ assertAlways.commandWorked(res);
+ assertAlways(planHasStage(res.queryPlanner.winningPlan, 'LIMIT'));
+ },
+ explainBatchSize: function explainBatchSize(db, collName) {
+ var res = db[collName].find().batchSize(3).explain();
+ assertAlways.commandWorked(res);
+ },
+ explainAddOption: function explainAddOption(db, collName) {
+ var res = db[collName].explain().find().addOption(DBQuery.Option.exhaust).finish();
+ assertAlways.commandWorked(res);
+ },
+ explainSkip: function explainSkip(db, collName) {
+ var res = db[collName].explain().find().skip(3).finish();
+ assertAlways.commandWorked(res);
+ assertAlways(planHasStage(res.queryPlanner.winningPlan, 'SKIP'));
+ },
+ explainSort: function explainSort(db, collName) {
+ var res = db[collName].find().sort({i: -1}).explain();
+ assertAlways.commandWorked(res);
+ assertAlways(planHasStage(res.queryPlanner.winningPlan, 'SORT'));
+ },
+ explainHint: function explainHint(db, collName) {
+ assertWhenOwnColl(function() {
+ var res = db[collName].find().hint({j: 1}).explain();
+ assertWhenOwnColl.commandWorked(res);
+ assertWhenOwnColl(isIxscan(res.queryPlanner.winningPlan));
+ });
+ },
+ explainMaxTimeMS: function explainMaxTimeMS(db, collName) {
+ var res = db[collName].find().maxTimeMS(2000).explain();
+ assertAlways.commandWorked(res);
+ },
+ explainSnapshot: function explainSnapshot(db, collName) {
+ var res = db[collName].find().snapshot().explain();
+ assertAlways.commandWorked(res);
+ assertWhenOwnColl(isIxscan(res.queryPlanner.winningPlan));
+ }
+ },
+ $super.states);
- $config.transitions =
- Object.extend({explain: $config.data.assignEqualProbsToTransitions($config.states)},
- $super.transitions);
+ $config.transitions = Object.extend(
+ {explain: $config.data.assignEqualProbsToTransitions($config.states)}, $super.transitions);
- // doubling number of iterations so there is a higher chance we will
- // transition to each of the 8 new states at least once
- $config.iterations = $super.iterations * 2;
+ // doubling number of iterations so there is a higher chance we will
+ // transition to each of the 8 new states at least once
+ $config.iterations = $super.iterations * 2;
- return $config;
- });
+ return $config;
+});
diff --git a/jstests/concurrency/fsm_workloads/explain_group.js b/jstests/concurrency/fsm_workloads/explain_group.js
index f379bf4e608..99e0a0c1266 100644
--- a/jstests/concurrency/fsm_workloads/explain_group.js
+++ b/jstests/concurrency/fsm_workloads/explain_group.js
@@ -10,23 +10,19 @@ load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
load('jstests/concurrency/fsm_workloads/explain.js'); // for $config
load('jstests/libs/analyze_plan.js'); // for planHasStage
-var $config =
- extendWorkload($config,
- function($config, $super) {
+var $config = extendWorkload($config, function($config, $super) {
- $config.states = Object.extend(
- {
- explainBasicGroup: function explainBasicGroup(db, collName) {
- var res = db[collName].explain().group(
- {key: {i: 1}, initial: {}, reduce: function() {}});
- assertAlways.commandWorked(res);
- }
- },
- $super.states);
+ $config.states = Object.extend({
+ explainBasicGroup: function explainBasicGroup(db, collName) {
+ var res =
+ db[collName].explain().group({key: {i: 1}, initial: {}, reduce: function() {}});
+ assertAlways.commandWorked(res);
+ }
+ },
+ $super.states);
- $config.transitions = Object.extend(
- {explain: $config.data.assignEqualProbsToTransitions($config.states)},
- $super.transitions);
+ $config.transitions = Object.extend(
+ {explain: $config.data.assignEqualProbsToTransitions($config.states)}, $super.transitions);
- return $config;
- });
+ return $config;
+});
diff --git a/jstests/concurrency/fsm_workloads/explain_remove.js b/jstests/concurrency/fsm_workloads/explain_remove.js
index c5c05a9af69..173f9b44623 100644
--- a/jstests/concurrency/fsm_workloads/explain_remove.js
+++ b/jstests/concurrency/fsm_workloads/explain_remove.js
@@ -8,42 +8,37 @@
load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
load('jstests/concurrency/fsm_workloads/explain.js'); // for $config
-var $config = extendWorkload(
- $config,
- function($config, $super) {
+var $config = extendWorkload($config, function($config, $super) {
- $config.states = Object.extend(
- {
- explainSingleRemove: function explainSingleRemove(db, collName) {
- var res = db[collName]
- .explain('executionStats')
- .remove({i: this.nInserted}, /* justOne */ true);
- assertAlways.commandWorked(res);
- assertWhenOwnColl(function() {
- assertWhenOwnColl.eq(1, res.executionStats.totalDocsExamined);
+ $config.states = Object.extend({
+ explainSingleRemove: function explainSingleRemove(db, collName) {
+ var res = db[collName]
+ .explain('executionStats')
+ .remove({i: this.nInserted}, /* justOne */ true);
+ assertAlways.commandWorked(res);
+ assertWhenOwnColl(function() {
+ assertWhenOwnColl.eq(1, res.executionStats.totalDocsExamined);
- // the document should not have been deleted.
- assertWhenOwnColl.eq(1, db[collName].find({i: this.nInserted}).itcount());
- }.bind(this));
- },
- explainMultiRemove: function explainMultiRemove(db, collName) {
- var res = db[collName]
- .explain('executionStats')
- .remove({i: {$lte: this.nInserted / 2}});
- assertAlways.commandWorked(res);
- assertWhenOwnColl(function() {
- assertWhenOwnColl.eq(this.nInserted / 2 + 1,
- explain.executionStats.totalDocsExamined);
- // no documents should have been deleted
- assertWhenOwnColl.eq(this.nInserted, db[collName].itcount());
- }.bind(this));
- }
- },
- $super.states);
+ // the document should not have been deleted.
+ assertWhenOwnColl.eq(1, db[collName].find({i: this.nInserted}).itcount());
+ }.bind(this));
+ },
+ explainMultiRemove: function explainMultiRemove(db, collName) {
+ var res =
+ db[collName].explain('executionStats').remove({i: {$lte: this.nInserted / 2}});
+ assertAlways.commandWorked(res);
+ assertWhenOwnColl(function() {
+ assertWhenOwnColl.eq(this.nInserted / 2 + 1,
+ explain.executionStats.totalDocsExamined);
+ // no documents should have been deleted
+ assertWhenOwnColl.eq(this.nInserted, db[collName].itcount());
+ }.bind(this));
+ }
+ },
+ $super.states);
- $config.transitions =
- Object.extend({explain: $config.data.assignEqualProbsToTransitions($config.states)},
- $super.transitions);
+ $config.transitions = Object.extend(
+ {explain: $config.data.assignEqualProbsToTransitions($config.states)}, $super.transitions);
- return $config;
- });
+ return $config;
+});
diff --git a/jstests/concurrency/fsm_workloads/explain_update.js b/jstests/concurrency/fsm_workloads/explain_update.js
index e63f5948fef..787ccdf7e4b 100644
--- a/jstests/concurrency/fsm_workloads/explain_update.js
+++ b/jstests/concurrency/fsm_workloads/explain_update.js
@@ -9,69 +9,64 @@ load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extend
load('jstests/concurrency/fsm_workloads/explain.js'); // for $config
load('jstests/concurrency/fsm_workload_helpers/server_types.js'); // for isMongos
-var $config = extendWorkload(
- $config,
- function($config, $super) {
+var $config = extendWorkload($config, function($config, $super) {
- $config.states = Object.extend(
- {
- explainBasicUpdate: function explainBasicUpdate(db, collName) {
- var res = db[collName]
- .explain('executionStats')
- .update({i: this.nInserted}, {$set: {j: 49}});
- assertAlways.commandWorked(res);
- assertWhenOwnColl(function() {
- assertWhenOwnColl.eq(1, explain.executionStats.totalDocsExamined);
+ $config.states = Object.extend({
+ explainBasicUpdate: function explainBasicUpdate(db, collName) {
+ var res =
+ db[collName].explain('executionStats').update({i: this.nInserted}, {$set: {j: 49}});
+ assertAlways.commandWorked(res);
+ assertWhenOwnColl(function() {
+ assertWhenOwnColl.eq(1, explain.executionStats.totalDocsExamined);
- // document should not have been updated.
- var doc = db[collName].findOne({i: this.nInserted});
- assertWhenOwnColl.eq(2 * this.nInserted, doc.j);
- }.bind(this));
- },
- explainUpdateUpsert: function explainUpdateUpsert(db, collName) {
- var res = db[collName]
- .explain('executionStats')
- .update({i: 2 * this.nInserted + 1},
- {$set: {j: 81}},
- /* upsert */ true);
- assertAlways.commandWorked(res);
- var stage = res.executionStats.executionStages;
+ // document should not have been updated.
+ var doc = db[collName].findOne({i: this.nInserted});
+ assertWhenOwnColl.eq(2 * this.nInserted, doc.j);
+ }.bind(this));
+ },
+ explainUpdateUpsert: function explainUpdateUpsert(db, collName) {
+ var res = db[collName]
+ .explain('executionStats')
+ .update({i: 2 * this.nInserted + 1},
+ {$set: {j: 81}},
+ /* upsert */ true);
+ assertAlways.commandWorked(res);
+ var stage = res.executionStats.executionStages;
- // if explaining a write command through mongos
- if (isMongos(db)) {
- stage = stage.shards[0].executionStages;
- }
- assertAlways.eq(stage.stage, 'UPDATE');
- assertWhenOwnColl(stage.wouldInsert);
+ // if explaining a write command through mongos
+ if (isMongos(db)) {
+ stage = stage.shards[0].executionStages;
+ }
+ assertAlways.eq(stage.stage, 'UPDATE');
+ assertWhenOwnColl(stage.wouldInsert);
- // make sure that the insert didn't actually happen.
- assertWhenOwnColl.eq(this.nInserted, db[collName].find().itcount());
- },
- explainUpdateMulti: function explainUpdateMulti(db, collName) {
- var res = db[collName]
- .explain('executionStats')
- .update({i: {$lte: 2}},
- {$set: {b: 3}},
- /* upsert */ false,
- /* multi */ true);
- assertAlways.commandWorked(res);
- var stage = res.executionStats.executionStages;
+ // make sure that the insert didn't actually happen.
+ assertWhenOwnColl.eq(this.nInserted, db[collName].find().itcount());
+ },
+ explainUpdateMulti: function explainUpdateMulti(db, collName) {
+ var res = db[collName]
+ .explain('executionStats')
+ .update({i: {$lte: 2}},
+ {$set: {b: 3}},
+ /* upsert */ false,
+ /* multi */ true);
+ assertAlways.commandWorked(res);
+ var stage = res.executionStats.executionStages;
- // if explaining a write command through mongos
- if (isMongos(db)) {
- stage = stage.shards[0].executionStages;
- }
- assertAlways.eq(stage.stage, 'UPDATE');
- assertWhenOwnColl(!stage.wouldInsert);
- assertWhenOwnColl.eq(3, stage.nMatched);
- assertWhenOwnColl.eq(3, stage.nWouldModify);
- }
- },
- $super.states);
+ // if explaining a write command through mongos
+ if (isMongos(db)) {
+ stage = stage.shards[0].executionStages;
+ }
+ assertAlways.eq(stage.stage, 'UPDATE');
+ assertWhenOwnColl(!stage.wouldInsert);
+ assertWhenOwnColl.eq(3, stage.nMatched);
+ assertWhenOwnColl.eq(3, stage.nWouldModify);
+ }
+ },
+ $super.states);
- $config.transitions =
- Object.extend({explain: $config.data.assignEqualProbsToTransitions($config.states)},
- $super.transitions);
+ $config.transitions = Object.extend(
+ {explain: $config.data.assignEqualProbsToTransitions($config.states)}, $super.transitions);
- return $config;
- });
+ return $config;
+});
diff --git a/jstests/concurrency/fsm_workloads/findAndModify_inc.js b/jstests/concurrency/fsm_workloads/findAndModify_inc.js
index 5c5d55b55b8..5a8b63392a8 100644
--- a/jstests/concurrency/fsm_workloads/findAndModify_inc.js
+++ b/jstests/concurrency/fsm_workloads/findAndModify_inc.js
@@ -24,9 +24,7 @@ var $config = (function() {
},
update: function update(db, collName) {
- var updateDoc = {
- $inc: {}
- };
+ var updateDoc = {$inc: {}};
updateDoc.$inc[this.fieldName] = 1;
var res = db.runCommand(
@@ -64,11 +62,7 @@ var $config = (function() {
};
- var transitions = {
- init: {update: 1},
- update: {find: 1},
- find: {update: 1}
- };
+ var transitions = {init: {update: 1}, update: {find: 1}, find: {update: 1}};
function setup(db, collName, cluster) {
db[collName].insert({_id: 'findAndModify_inc'});
diff --git a/jstests/concurrency/fsm_workloads/findAndModify_mixed_queue_unindexed.js b/jstests/concurrency/fsm_workloads/findAndModify_mixed_queue_unindexed.js
index 11ac81d63fd..19cd191b1aa 100644
--- a/jstests/concurrency/fsm_workloads/findAndModify_mixed_queue_unindexed.js
+++ b/jstests/concurrency/fsm_workloads/findAndModify_mixed_queue_unindexed.js
@@ -21,79 +21,73 @@ load('jstests/concurrency/fsm_workloads/findAndModify_remove_queue.js'); // for
// For isMongod and supportsDocumentLevelConcurrency.
load('jstests/concurrency/fsm_workload_helpers/server_types.js');
-var $config = extendWorkload(
- $config,
- function($config, $super) {
+var $config = extendWorkload($config, function($config, $super) {
- // Use the workload name as the database name, since the workload name is assumed to be
- // unique.
- $config.data.uniqueDBName = 'findAndModify_mixed_queue_unindexed';
+ // Use the workload name as the database name, since the workload name is assumed to be
+ // unique.
+ $config.data.uniqueDBName = 'findAndModify_mixed_queue_unindexed';
- $config.data.newDocForInsert = function newDocForInsert(i) {
- return {
- _id: i,
- rand: Random.rand(),
- counter: 0
- };
- };
+ $config.data.newDocForInsert = function newDocForInsert(i) {
+ return {_id: i, rand: Random.rand(), counter: 0};
+ };
- $config.data.getIndexSpecs = function getIndexSpecs() {
- return [];
- };
+ $config.data.getIndexSpecs = function getIndexSpecs() {
+ return [];
+ };
- $config.data.opName = 'modified';
+ $config.data.opName = 'modified';
- $config.data.validateResult = function validateResult(db, collName, res) {
- assertAlways.commandWorked(res);
+ $config.data.validateResult = function validateResult(db, collName, res) {
+ assertAlways.commandWorked(res);
- var doc = res.value;
- if (isMongod(db) && supportsDocumentLevelConcurrency(db)) {
- // Storage engines which do not support document-level concurrency will not
- // automatically retry if there was a conflict, so it is expected that it may return
- // null in the case of a conflict. All other storage engines should automatically
- // retry the operation, and thus should never return null.
- assertWhenOwnColl.neq(
- doc, null, 'findAndModify should have found a matching document');
- }
- if (doc !== null) {
- this.saveDocId(db, collName, doc._id);
- }
- };
+ var doc = res.value;
+ if (isMongod(db) && supportsDocumentLevelConcurrency(db)) {
+ // Storage engines which do not support document-level concurrency will not
+ // automatically retry if there was a conflict, so it is expected that it may return
+ // null in the case of a conflict. All other storage engines should automatically
+ // retry the operation, and thus should never return null.
+ assertWhenOwnColl.neq(doc, null, 'findAndModify should have found a matching document');
+ }
+ if (doc !== null) {
+ this.saveDocId(db, collName, doc._id);
+ }
+ };
- $config.states = (function() {
- // Avoid removing documents that were already updated.
- function remove(db, collName) {
- var res = db.runCommand({
- findAndModify: db[collName].getName(),
- query: {counter: 0},
- sort: {rand: -1},
- remove: true
- });
- this.validateResult(db, collName, res);
- }
+ $config.states = (function() {
+ // Avoid removing documents that were already updated.
+ function remove(db, collName) {
+ var res = db.runCommand({
+ findAndModify: db[collName].getName(),
+ query: {counter: 0},
+ sort: {rand: -1},
+ remove: true
+ });
+ this.validateResult(db, collName, res);
+ }
- function update(db, collName) {
- // Update the counter field to avoid matching the same document again.
- var res = db.runCommand({
- findAndModify: db[collName].getName(),
- query: {counter: 0},
- sort: {rand: -1},
- update: {$inc: {counter: 1}}, new: false
- });
- this.validateResult(db, collName, res);
- }
+ function update(db, collName) {
+ // Update the counter field to avoid matching the same document again.
+ var res = db.runCommand({
+ findAndModify: db[collName].getName(),
+ query: {counter: 0},
+ sort: {rand: -1},
+ update: {$inc: {counter: 1}},
+ new: false
+ });
+ this.validateResult(db, collName, res);
+ }
- return {
- remove: remove,
- update: update,
- };
+ return {
+ remove: remove,
+ update: update,
+ };
- })();
+ })();
- $config.transitions = {
- remove: {remove: 0.5, update: 0.5},
- update: {remove: 0.5, update: 0.5},
- };
+ $config.transitions = {
+ remove: {remove: 0.5, update: 0.5},
+ update: {remove: 0.5, update: 0.5},
+ };
- return $config;
- });
+ return $config;
+});
diff --git a/jstests/concurrency/fsm_workloads/findAndModify_remove.js b/jstests/concurrency/fsm_workloads/findAndModify_remove.js
index b33e67b2e01..b4a32a3cc74 100644
--- a/jstests/concurrency/fsm_workloads/findAndModify_remove.js
+++ b/jstests/concurrency/fsm_workloads/findAndModify_remove.js
@@ -8,9 +8,7 @@
*/
var $config = (function() {
- var data = {
- shardKey: {tid: 1}
- };
+ var data = {shardKey: {tid: 1}};
var states = (function() {
@@ -42,24 +40,12 @@ var $config = (function() {
this.iter++;
}
- return {
- init: init,
- insertAndRemove: insertAndRemove
- };
+ return {init: init, insertAndRemove: insertAndRemove};
})();
- var transitions = {
- init: {insertAndRemove: 1},
- insertAndRemove: {insertAndRemove: 1}
- };
+ var transitions = {init: {insertAndRemove: 1}, insertAndRemove: {insertAndRemove: 1}};
- return {
- threadCount: 20,
- iterations: 20,
- data: data,
- states: states,
- transitions: transitions
- };
+ return {threadCount: 20, iterations: 20, data: data, states: states, transitions: transitions};
})();
diff --git a/jstests/concurrency/fsm_workloads/findAndModify_remove_queue.js b/jstests/concurrency/fsm_workloads/findAndModify_remove_queue.js
index 3a330529e0c..9b945468cf5 100644
--- a/jstests/concurrency/fsm_workloads/findAndModify_remove_queue.js
+++ b/jstests/concurrency/fsm_workloads/findAndModify_remove_queue.js
@@ -22,10 +22,7 @@ var $config = (function() {
uniqueDBName: 'findAndModify_remove_queue',
newDocForInsert: function newDocForInsert(i) {
- return {
- _id: i,
- rand: Random.rand()
- };
+ return {_id: i, rand: Random.rand()};
},
getIndexSpecs: function getIndexSpecs() {
@@ -38,9 +35,7 @@ var $config = (function() {
// Use a separate database to avoid conflicts with other FSM workloads.
var ownedDB = db.getSiblingDB(db.getName() + this.uniqueDBName);
- var updateDoc = {
- $push: {}
- };
+ var updateDoc = {$push: {}};
updateDoc.$push[this.opName] = id;
var res = ownedDB[collName].update({_id: this.tid}, updateDoc, {upsert: true});
@@ -64,12 +59,8 @@ var $config = (function() {
var states = (function() {
function remove(db, collName) {
- var res = db.runCommand({
- findAndModify: db[collName].getName(),
- query: {},
- sort: {rand: -1},
- remove: true
- });
+ var res = db.runCommand(
+ {findAndModify: db[collName].getName(), query: {}, sort: {rand: -1}, remove: true});
assertAlways.commandWorked(res);
var doc = res.value;
@@ -86,15 +77,11 @@ var $config = (function() {
}
}
- return {
- remove: remove
- };
+ return {remove: remove};
})();
- var transitions = {
- remove: {remove: 1}
- };
+ var transitions = {remove: {remove: 1}};
function setup(db, collName, cluster) {
// Each thread should remove exactly one document per iteration.
@@ -193,10 +180,7 @@ var $config = (function() {
if (!smallestValueIsSet) {
return null;
}
- return {
- value: smallestValue,
- indices: smallestIndices
- };
+ return {value: smallestValue, indices: smallestIndices};
}
}
diff --git a/jstests/concurrency/fsm_workloads/findAndModify_remove_queue_unindexed.js b/jstests/concurrency/fsm_workloads/findAndModify_remove_queue_unindexed.js
index 80ce7567a7d..387c5467f04 100644
--- a/jstests/concurrency/fsm_workloads/findAndModify_remove_queue_unindexed.js
+++ b/jstests/concurrency/fsm_workloads/findAndModify_remove_queue_unindexed.js
@@ -16,16 +16,15 @@
load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
load('jstests/concurrency/fsm_workloads/findAndModify_remove_queue.js'); // for $config
-var $config = extendWorkload($config,
- function($config, $super) {
+var $config = extendWorkload($config, function($config, $super) {
- // Use the workload name as the database name, since the workload
- // name is assumed to be unique.
- $config.data.uniqueDBName = 'findAndModify_remove_queue_unindexed';
+ // Use the workload name as the database name, since the workload
+ // name is assumed to be unique.
+ $config.data.uniqueDBName = 'findAndModify_remove_queue_unindexed';
- $config.data.getIndexSpecs = function getIndexSpecs() {
- return [];
- };
+ $config.data.getIndexSpecs = function getIndexSpecs() {
+ return [];
+ };
- return $config;
- });
+ return $config;
+});
diff --git a/jstests/concurrency/fsm_workloads/findAndModify_update.js b/jstests/concurrency/fsm_workloads/findAndModify_update.js
index c794c755ed9..16aa80b8a33 100644
--- a/jstests/concurrency/fsm_workloads/findAndModify_update.js
+++ b/jstests/concurrency/fsm_workloads/findAndModify_update.js
@@ -18,11 +18,7 @@ var $config = (function() {
var states = (function() {
function makeDoc(tid) {
- return {
- _id: new ObjectId(),
- tid: tid,
- value: 0
- };
+ return {_id: new ObjectId(), tid: tid, value: 0};
}
function init(db, collName) {
@@ -40,7 +36,8 @@ var $config = (function() {
findandmodify: db[collName].getName(),
query: {tid: this.tid},
sort: {value: 1},
- update: {$max: {value: updatedValue}}, new: true
+ update: {$max: {value: updatedValue}},
+ new: true
});
assertAlways.commandWorked(res);
@@ -60,7 +57,8 @@ var $config = (function() {
findandmodify: db[collName].getName(),
query: {tid: this.tid},
sort: {value: -1},
- update: {$min: {value: updatedValue}}, new: true
+ update: {$min: {value: updatedValue}},
+ new: true
});
assertAlways.commandWorked(res);
diff --git a/jstests/concurrency/fsm_workloads/findAndModify_update_collscan.js b/jstests/concurrency/fsm_workloads/findAndModify_update_collscan.js
index f9e40b6b467..d1c8134bd39 100644
--- a/jstests/concurrency/fsm_workloads/findAndModify_update_collscan.js
+++ b/jstests/concurrency/fsm_workloads/findAndModify_update_collscan.js
@@ -13,16 +13,15 @@
load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
load('jstests/concurrency/fsm_workloads/findAndModify_update.js'); // for $config
-var $config = extendWorkload($config,
- function($config, $super) {
+var $config = extendWorkload($config, function($config, $super) {
- // Do not create the { tid: 1, value: 1 } index so that a
- // collection
- // scan is performed for the query and sort operations.
- $config.setup = function setup(db, collName, cluster) {};
+ // Do not create the { tid: 1, value: 1 } index so that a
+ // collection
+ // scan is performed for the query and sort operations.
+ $config.setup = function setup(db, collName, cluster) {};
- // Remove the shardKey so that a collection scan is performed
- delete $config.data.shardKey;
+ // Remove the shardKey so that a collection scan is performed
+ delete $config.data.shardKey;
- return $config;
- });
+ return $config;
+});
diff --git a/jstests/concurrency/fsm_workloads/findAndModify_update_grow.js b/jstests/concurrency/fsm_workloads/findAndModify_update_grow.js
index 277b2882700..7c32f6aefec 100644
--- a/jstests/concurrency/fsm_workloads/findAndModify_update_grow.js
+++ b/jstests/concurrency/fsm_workloads/findAndModify_update_grow.js
@@ -30,11 +30,7 @@ var $config = (function() {
function makeDoc(tid) {
// Use 32-bit integer for representing 'length' property
// to ensure $mul does integer multiplication
- var doc = {
- _id: new ObjectId(),
- tid: tid,
- length: new NumberInt(1)
- };
+ var doc = {_id: new ObjectId(), tid: tid, length: new NumberInt(1)};
doc[uniqueFieldName] = makeStringOfLength(doc.length);
return doc;
}
@@ -70,17 +66,15 @@ var $config = (function() {
var updatedLength = factor * this.length;
var updatedValue = makeStringOfLength(updatedLength);
- var update = {
- $set: {},
- $mul: {length: factor}
- };
+ var update = {$set: {}, $mul: {length: factor}};
update.$set[uniqueFieldName] = updatedValue;
var res = db.runCommand({
findandmodify: db[collName].getName(),
query: {tid: this.tid},
sort: {length: 1}, // fetch document of smallest size
- update: update, new: true
+ update: update,
+ new: true
});
assertAlways.commandWorked(res);
@@ -117,10 +111,7 @@ var $config = (function() {
})();
- var transitions = {
- insert: {findAndModify: 1},
- findAndModify: {findAndModify: 1}
- };
+ var transitions = {insert: {findAndModify: 1}, findAndModify: {findAndModify: 1}};
return {
threadCount: 20,
diff --git a/jstests/concurrency/fsm_workloads/findAndModify_update_queue.js b/jstests/concurrency/fsm_workloads/findAndModify_update_queue.js
index 104b299b317..1d82f4b7eb2 100644
--- a/jstests/concurrency/fsm_workloads/findAndModify_update_queue.js
+++ b/jstests/concurrency/fsm_workloads/findAndModify_update_queue.js
@@ -17,69 +17,58 @@ load('jstests/concurrency/fsm_workloads/findAndModify_remove_queue.js'); // for
// For isMongod and supportsDocumentLevelConcurrency.
load('jstests/concurrency/fsm_workload_helpers/server_types.js');
-var $config = extendWorkload(
- $config,
- function($config, $super) {
+var $config = extendWorkload($config, function($config, $super) {
- // Use the workload name as the database name, since the workload name is assumed to be
- // unique.
- $config.data.uniqueDBName = 'findAndModify_update_queue';
+ // Use the workload name as the database name, since the workload name is assumed to be
+ // unique.
+ $config.data.uniqueDBName = 'findAndModify_update_queue';
- $config.data.newDocForInsert = function newDocForInsert(i) {
- return {
- _id: i,
- rand: Random.rand(),
- counter: 0
- };
- };
+ $config.data.newDocForInsert = function newDocForInsert(i) {
+ return {_id: i, rand: Random.rand(), counter: 0};
+ };
- $config.data.getIndexSpecs = function getIndexSpecs() {
- return [{counter: 1, rand: -1}];
- };
+ $config.data.getIndexSpecs = function getIndexSpecs() {
+ return [{counter: 1, rand: -1}];
+ };
- $config.data.opName = 'updated';
+ $config.data.opName = 'updated';
- var states = (function() {
+ var states = (function() {
- function update(db, collName) {
- // Update the counter field to avoid matching the same document again.
- var res = db.runCommand({
- findAndModify: db[collName].getName(),
- query: {counter: 0},
- sort: {rand: -1},
- update: {$inc: {counter: 1}}, new: false
- });
- assertAlways.commandWorked(res);
+ function update(db, collName) {
+ // Update the counter field to avoid matching the same document again.
+ var res = db.runCommand({
+ findAndModify: db[collName].getName(),
+ query: {counter: 0},
+ sort: {rand: -1},
+ update: {$inc: {counter: 1}},
+ new: false
+ });
+ assertAlways.commandWorked(res);
- var doc = res.value;
- if (isMongod(db) && supportsDocumentLevelConcurrency(db)) {
- // Storage engines which do not support document-level concurrency will not
- // automatically retry if there was a conflict, so it is expected that it may
- // return null in the case of a conflict. All other storage engines should
- // automatically retry the operation, and thus should never return null.
- assertWhenOwnColl.neq(
- doc,
- null,
- 'findAndModify should have found and updated a matching document');
- }
- if (doc !== null) {
- this.saveDocId(db, collName, doc._id);
- }
+ var doc = res.value;
+ if (isMongod(db) && supportsDocumentLevelConcurrency(db)) {
+ // Storage engines which do not support document-level concurrency will not
+ // automatically retry if there was a conflict, so it is expected that it may
+ // return null in the case of a conflict. All other storage engines should
+ // automatically retry the operation, and thus should never return null.
+ assertWhenOwnColl.neq(
+ doc, null, 'findAndModify should have found and updated a matching document');
}
+ if (doc !== null) {
+ this.saveDocId(db, collName, doc._id);
+ }
+ }
- return {
- update: update
- };
+ return {update: update};
- })();
+ })();
- var transitions = {
- update: {update: 1}
- };
+ var transitions = {update: {update: 1}};
- $config.startState = 'update';
- $config.states = states;
- $config.transitions = transitions;
+ $config.startState = 'update';
+ $config.states = states;
+ $config.transitions = transitions;
- return $config;
- });
+ return $config;
+});
diff --git a/jstests/concurrency/fsm_workloads/findAndModify_update_queue_unindexed.js b/jstests/concurrency/fsm_workloads/findAndModify_update_queue_unindexed.js
index c6561829b26..cda9a494a61 100644
--- a/jstests/concurrency/fsm_workloads/findAndModify_update_queue_unindexed.js
+++ b/jstests/concurrency/fsm_workloads/findAndModify_update_queue_unindexed.js
@@ -16,16 +16,15 @@
load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
load('jstests/concurrency/fsm_workloads/findAndModify_update_queue.js'); // for $config
-var $config = extendWorkload($config,
- function($config, $super) {
+var $config = extendWorkload($config, function($config, $super) {
- // Use the workload name as the database name, since the workload
- // name is assumed to be unique.
- $config.data.uniqueDBName = 'findAndModify_update_queue_unindexed';
+ // Use the workload name as the database name, since the workload
+ // name is assumed to be unique.
+ $config.data.uniqueDBName = 'findAndModify_update_queue_unindexed';
- $config.data.getIndexSpecs = function getIndexSpecs() {
- return [];
- };
+ $config.data.getIndexSpecs = function getIndexSpecs() {
+ return [];
+ };
- return $config;
- });
+ return $config;
+});
diff --git a/jstests/concurrency/fsm_workloads/findAndModify_upsert.js b/jstests/concurrency/fsm_workloads/findAndModify_upsert.js
index 499e8324cae..e79b5322bc4 100644
--- a/jstests/concurrency/fsm_workloads/findAndModify_upsert.js
+++ b/jstests/concurrency/fsm_workloads/findAndModify_upsert.js
@@ -10,10 +10,7 @@
*/
var $config = (function() {
- var data = {
- sort: false,
- shardKey: {tid: 1}
- };
+ var data = {sort: false, shardKey: {tid: 1}};
var states = (function() {
@@ -41,15 +38,13 @@ var $config = (function() {
var updatedValue = this.iter++;
// Use a query specification that does not match any existing documents
- var query = {
- _id: new ObjectId(),
- tid: this.tid
- };
+ var query = {_id: new ObjectId(), tid: this.tid};
var cmdObj = {
findandmodify: db[collName].getName(),
query: query,
- update: {$setOnInsert: {values: [updatedValue]}}, new: true,
+ update: {$setOnInsert: {values: [updatedValue]}},
+ new: true,
upsert: true
};
@@ -64,11 +59,12 @@ var $config = (function() {
assertAlways(doc !== null, 'a document should have been inserted');
assertAlways((function() {
- assertAlways.eq(this.tid, doc.tid);
- assertAlways(Array.isArray(doc.values), 'expected values to be an array');
- assertAlways.eq(1, doc.values.length);
- assertAlways.eq(updatedValue, doc.values[0]);
- }).bind(this));
+ assertAlways.eq(this.tid, doc.tid);
+ assertAlways(Array.isArray(doc.values),
+ 'expected values to be an array');
+ assertAlways.eq(1, doc.values.length);
+ assertAlways.eq(updatedValue, doc.values[0]);
+ }).bind(this));
}
function update(db, collName) {
@@ -77,7 +73,8 @@ var $config = (function() {
var cmdObj = {
findandmodify: db[collName].getName(),
query: {tid: this.tid},
- update: {$push: {values: updatedValue}}, new: true,
+ update: {$push: {values: updatedValue}},
+ new: true,
upsert: false
};
@@ -103,11 +100,7 @@ var $config = (function() {
}
}
- return {
- init: init,
- upsert: upsert,
- update: update
- };
+ return {init: init, upsert: upsert, update: update};
})();
@@ -117,12 +110,6 @@ var $config = (function() {
update: {upsert: 0.1, update: 0.9}
};
- return {
- threadCount: 20,
- iterations: 20,
- data: data,
- states: states,
- transitions: transitions
- };
+ return {threadCount: 20, iterations: 20, data: data, states: states, transitions: transitions};
})();
diff --git a/jstests/concurrency/fsm_workloads/findAndModify_upsert_collscan.js b/jstests/concurrency/fsm_workloads/findAndModify_upsert_collscan.js
index aad1fbc644c..8751e99fd21 100644
--- a/jstests/concurrency/fsm_workloads/findAndModify_upsert_collscan.js
+++ b/jstests/concurrency/fsm_workloads/findAndModify_upsert_collscan.js
@@ -13,12 +13,9 @@
load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
load('jstests/concurrency/fsm_workloads/findAndModify_upsert.js'); // for $config
-var $config = extendWorkload($config,
- function($config, $super) {
+var $config = extendWorkload($config, function($config, $super) {
- $config.data.sort = {
- $natural: 1
- };
+ $config.data.sort = {$natural: 1};
- return $config;
- });
+ return $config;
+});
diff --git a/jstests/concurrency/fsm_workloads/group.js b/jstests/concurrency/fsm_workloads/group.js
index 3ccc909e0c9..a02d175f202 100644
--- a/jstests/concurrency/fsm_workloads/group.js
+++ b/jstests/concurrency/fsm_workloads/group.js
@@ -21,9 +21,7 @@ var $config = (function() {
initial: {bucketCount: 0, bucketSum: 0},
$keyf: function $keyf(doc) {
// place doc.rand into appropriate bucket
- return {
- bucket: Math.floor(doc.rand * 10) + 1
- };
+ return {bucket: Math.floor(doc.rand * 10) + 1};
},
$reduce: function $reduce(curr, result) {
result.bucketCount++;
@@ -63,15 +61,11 @@ var $config = (function() {
}.bind(this));
}
- return {
- group: group
- };
+ return {group: group};
})();
- var transitions = {
- group: {group: 1}
- };
+ var transitions = {group: {group: 1}};
function setup(db, collName, cluster) {
var bulk = db[collName].initializeUnorderedBulkOp();
diff --git a/jstests/concurrency/fsm_workloads/group_cond.js b/jstests/concurrency/fsm_workloads/group_cond.js
index 1ab6aa827c6..03a77993578 100644
--- a/jstests/concurrency/fsm_workloads/group_cond.js
+++ b/jstests/concurrency/fsm_workloads/group_cond.js
@@ -16,29 +16,25 @@
load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
load('jstests/concurrency/fsm_workloads/group.js'); // for $config
-var $config =
- extendWorkload($config,
- function($config, $super) {
- $config.setup = function setup(db, collName, cluster) {
- $super.setup.apply(this, arguments);
- assertAlways.commandWorked(db[collName].ensureIndex({rand: 1}));
- };
+var $config = extendWorkload($config, function($config, $super) {
+ $config.setup = function setup(db, collName, cluster) {
+ $super.setup.apply(this, arguments);
+ assertAlways.commandWorked(db[collName].ensureIndex({rand: 1}));
+ };
- $config.states.group = function group(db, collName) {
- var cmdObj = this.generateGroupCmdObj(collName);
- cmdObj.group.cond = {
- rand: {$gte: 0.5}
- };
- var res = db.runCommand(cmdObj);
- assertWhenOwnColl.commandWorked(res);
+ $config.states.group = function group(db, collName) {
+ var cmdObj = this.generateGroupCmdObj(collName);
+ cmdObj.group.cond = {rand: {$gte: 0.5}};
+ var res = db.runCommand(cmdObj);
+ assertWhenOwnColl.commandWorked(res);
- assertWhenOwnColl.lte(res.count, this.numDocs);
- assertWhenOwnColl.lte(res.keys, 5);
- assertWhenOwnColl(function() {
- assertWhenOwnColl.lte(res.retval.length, 5);
- assertWhenOwnColl.eq(this.sumBucketCount(res.retval), res.count);
- }.bind(this));
- };
+ assertWhenOwnColl.lte(res.count, this.numDocs);
+ assertWhenOwnColl.lte(res.keys, 5);
+ assertWhenOwnColl(function() {
+ assertWhenOwnColl.lte(res.retval.length, 5);
+ assertWhenOwnColl.eq(this.sumBucketCount(res.retval), res.count);
+ }.bind(this));
+ };
- return $config;
- });
+ return $config;
+});
diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_1char.js b/jstests/concurrency/fsm_workloads/indexed_insert_1char.js
index 5330bd9191e..54fe0662cb4 100644
--- a/jstests/concurrency/fsm_workloads/indexed_insert_1char.js
+++ b/jstests/concurrency/fsm_workloads/indexed_insert_1char.js
@@ -10,18 +10,17 @@
load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
load('jstests/concurrency/fsm_workloads/indexed_insert_base.js'); // for $config
-var $config = extendWorkload($config,
- function($config, $super) {
+var $config = extendWorkload($config, function($config, $super) {
- $config.data.indexedField = 'indexed_insert_1char';
- $config.data.shardKey = {};
- $config.data.shardKey[$config.data.indexedField] = 1;
+ $config.data.indexedField = 'indexed_insert_1char';
+ $config.data.shardKey = {};
+ $config.data.shardKey[$config.data.indexedField] = 1;
- $config.states.init = function init(db, collName) {
- $super.states.init.apply(this, arguments);
+ $config.states.init = function init(db, collName) {
+ $super.states.init.apply(this, arguments);
- this.indexedValue = String.fromCharCode(33 + this.tid);
- };
+ this.indexedValue = String.fromCharCode(33 + this.tid);
+ };
- return $config;
- });
+ return $config;
+});
diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_2d.js b/jstests/concurrency/fsm_workloads/indexed_insert_2d.js
index 674f229f53b..a461f9cb310 100644
--- a/jstests/concurrency/fsm_workloads/indexed_insert_2d.js
+++ b/jstests/concurrency/fsm_workloads/indexed_insert_2d.js
@@ -10,48 +10,46 @@
load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
load('jstests/concurrency/fsm_workloads/indexed_insert_base.js'); // for $config
-var $config = extendWorkload(
- $config,
- function($config, $super) {
+var $config = extendWorkload($config, function($config, $super) {
- $config.data.indexedField = 'indexed_insert_2d';
- // Remove the shard key for 2d indexes, as they are not supported
- delete $config.data.shardKey;
+ $config.data.indexedField = 'indexed_insert_2d';
+ // Remove the shard key for 2d indexes, as they are not supported
+ delete $config.data.shardKey;
- $config.states.init = function init(db, collName) {
- $super.states.init.apply(this, arguments);
+ $config.states.init = function init(db, collName) {
+ $super.states.init.apply(this, arguments);
- assertAlways.lt(this.tid, 1 << 16); // assume tid is a 16 bit nonnegative int
- // split the tid into the odd bits and the even bits
- // for example:
- // tid: 57 = 00111001
- // even: 0 1 0 1 = 5
- // odd: 0 1 1 0 = 6
- // This lets us turn every tid into a unique pair of numbers within the range [0, 255].
- // The pairs are then normalized to have valid longitude and latitude values.
- var oddBits = 0;
- var evenBits = 0;
- for (var i = 0; i < 16; ++i) {
- if (this.tid & 1 << i) {
- if (i % 2 === 0) {
- // i is even
- evenBits |= 1 << (i / 2);
- } else {
- // i is odd
- oddBits |= 1 << (i / 2);
- }
+ assertAlways.lt(this.tid, 1 << 16); // assume tid is a 16 bit nonnegative int
+ // split the tid into the odd bits and the even bits
+ // for example:
+ // tid: 57 = 00111001
+ // even: 0 1 0 1 = 5
+ // odd: 0 1 1 0 = 6
+ // This lets us turn every tid into a unique pair of numbers within the range [0, 255].
+ // The pairs are then normalized to have valid longitude and latitude values.
+ var oddBits = 0;
+ var evenBits = 0;
+ for (var i = 0; i < 16; ++i) {
+ if (this.tid & 1 << i) {
+ if (i % 2 === 0) {
+ // i is even
+ evenBits |= 1 << (i / 2);
+ } else {
+ // i is odd
+ oddBits |= 1 << (i / 2);
}
}
- assertAlways.lt(oddBits, 256);
- assertAlways.lt(evenBits, 256);
- this.indexedValue = [(evenBits - 128) / 2, (oddBits - 128) / 2];
- };
+ }
+ assertAlways.lt(oddBits, 256);
+ assertAlways.lt(evenBits, 256);
+ this.indexedValue = [(evenBits - 128) / 2, (oddBits - 128) / 2];
+ };
- $config.data.getIndexSpec = function getIndexSpec() {
- var ixSpec = {};
- ixSpec[this.indexedField] = '2d';
- return ixSpec;
- };
+ $config.data.getIndexSpec = function getIndexSpec() {
+ var ixSpec = {};
+ ixSpec[this.indexedField] = '2d';
+ return ixSpec;
+ };
- return $config;
- });
+ return $config;
+});
diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_2dsphere.js b/jstests/concurrency/fsm_workloads/indexed_insert_2dsphere.js
index a0fb5613ef6..40134e97840 100644
--- a/jstests/concurrency/fsm_workloads/indexed_insert_2dsphere.js
+++ b/jstests/concurrency/fsm_workloads/indexed_insert_2dsphere.js
@@ -10,16 +10,15 @@
load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
load('jstests/concurrency/fsm_workloads/indexed_insert_2d.js'); // for $config
-var $config = extendWorkload($config,
- function($config, $super) {
+var $config = extendWorkload($config, function($config, $super) {
- $config.data.indexedField = 'indexed_insert_2dsphere';
+ $config.data.indexedField = 'indexed_insert_2dsphere';
- $config.data.getIndexSpec = function getIndexSpec() {
- var ixSpec = {};
- ixSpec[this.indexedField] = '2dsphere';
- return ixSpec;
- };
+ $config.data.getIndexSpec = function getIndexSpec() {
+ var ixSpec = {};
+ ixSpec[this.indexedField] = '2dsphere';
+ return ixSpec;
+ };
- return $config;
- });
+ return $config;
+});
diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_base.js b/jstests/concurrency/fsm_workloads/indexed_insert_base.js
index 59dcab4f0a0..b7a52aae08a 100644
--- a/jstests/concurrency/fsm_workloads/indexed_insert_base.js
+++ b/jstests/concurrency/fsm_workloads/indexed_insert_base.js
@@ -69,11 +69,7 @@ var $config = (function() {
}
};
- var transitions = {
- init: {insert: 1},
- insert: {find: 1},
- find: {insert: 1}
- };
+ var transitions = {init: {insert: 1}, insert: {find: 1}, find: {insert: 1}};
function setup(db, collName, cluster) {
var res = db[collName].ensureIndex(this.getIndexSpec());
diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_compound.js b/jstests/concurrency/fsm_workloads/indexed_insert_compound.js
index a32fc084215..c704b6dd0bc 100644
--- a/jstests/concurrency/fsm_workloads/indexed_insert_compound.js
+++ b/jstests/concurrency/fsm_workloads/indexed_insert_compound.js
@@ -10,31 +10,29 @@
load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
load('jstests/concurrency/fsm_workloads/indexed_insert_base.js'); // for $config
-var $config =
- extendWorkload($config,
- function($config, $super) {
+var $config = extendWorkload($config, function($config, $super) {
- $config.states.init = function init(db, collName) {
- $super.states.init.apply(this, arguments);
- };
+ $config.states.init = function init(db, collName) {
+ $super.states.init.apply(this, arguments);
+ };
- $config.data.getDoc = function getDoc() {
- return {
- indexed_insert_compound_x: this.tid & 0x0f, // lowest 4 bits
- indexed_insert_compound_y: this.tid >> 4, // high bits
- indexed_insert_compound_z: String.fromCharCode(33 + this.tid)
- };
- };
+ $config.data.getDoc = function getDoc() {
+ return {
+ indexed_insert_compound_x: this.tid & 0x0f, // lowest 4 bits
+ indexed_insert_compound_y: this.tid >> 4, // high bits
+ indexed_insert_compound_z: String.fromCharCode(33 + this.tid)
+ };
+ };
- $config.data.getIndexSpec = function getIndexSpec() {
- return {
- indexed_insert_compound_x: 1,
- indexed_insert_compound_y: 1,
- indexed_insert_compound_z: 1
- };
- };
+ $config.data.getIndexSpec = function getIndexSpec() {
+ return {
+ indexed_insert_compound_x: 1,
+ indexed_insert_compound_y: 1,
+ indexed_insert_compound_z: 1
+ };
+ };
- $config.data.shardKey = $config.data.getIndexSpec();
+ $config.data.shardKey = $config.data.getIndexSpec();
- return $config;
- });
+ return $config;
+});
diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_eval.js b/jstests/concurrency/fsm_workloads/indexed_insert_eval.js
index ccb3696ffeb..f35d4565e3b 100644
--- a/jstests/concurrency/fsm_workloads/indexed_insert_eval.js
+++ b/jstests/concurrency/fsm_workloads/indexed_insert_eval.js
@@ -10,26 +10,24 @@
load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
load('jstests/concurrency/fsm_workloads/indexed_insert_base.js'); // for $config
-var $config =
- extendWorkload($config,
- function($config, $super) {
+var $config = extendWorkload($config, function($config, $super) {
- $config.data.nolock = false;
+ $config.data.nolock = false;
- $config.states.insert = function insert(db, collName) {
- var evalResult = db.runCommand({
- eval: function(collName, doc) {
- var insertResult = db[collName].insert(doc);
- return tojson(insertResult);
- },
- args: [collName, this.getDoc()],
- nolock: this.nolock
- });
- assertAlways.commandWorked(evalResult);
- var insertResult = JSON.parse(evalResult.retval);
- assertAlways.eq(1, insertResult.nInserted, tojson(insertResult));
- this.nInserted += this.docsPerInsert;
- };
+ $config.states.insert = function insert(db, collName) {
+ var evalResult = db.runCommand({
+ eval: function(collName, doc) {
+ var insertResult = db[collName].insert(doc);
+ return tojson(insertResult);
+ },
+ args: [collName, this.getDoc()],
+ nolock: this.nolock
+ });
+ assertAlways.commandWorked(evalResult);
+ var insertResult = JSON.parse(evalResult.retval);
+ assertAlways.eq(1, insertResult.nInserted, tojson(insertResult));
+ this.nInserted += this.docsPerInsert;
+ };
- return $config;
- });
+ return $config;
+});
diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_eval_nolock.js b/jstests/concurrency/fsm_workloads/indexed_insert_eval_nolock.js
index 33e8ef41d56..f87ba5da790 100644
--- a/jstests/concurrency/fsm_workloads/indexed_insert_eval_nolock.js
+++ b/jstests/concurrency/fsm_workloads/indexed_insert_eval_nolock.js
@@ -10,10 +10,9 @@
load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
load('jstests/concurrency/fsm_workloads/indexed_insert_eval.js'); // for $config
-var $config = extendWorkload($config,
- function($config, $super) {
+var $config = extendWorkload($config, function($config, $super) {
- $config.data.nolock = true;
+ $config.data.nolock = true;
- return $config;
- });
+ return $config;
+});
diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_heterogeneous.js b/jstests/concurrency/fsm_workloads/indexed_insert_heterogeneous.js
index ddf2a0c0ead..b486120185d 100644
--- a/jstests/concurrency/fsm_workloads/indexed_insert_heterogeneous.js
+++ b/jstests/concurrency/fsm_workloads/indexed_insert_heterogeneous.js
@@ -10,48 +10,47 @@
load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
load('jstests/concurrency/fsm_workloads/indexed_insert_base.js'); // for $config
-var $config = extendWorkload($config,
- function($config, $super) {
-
- $config.data.indexedField = 'indexed_insert_heterogeneous';
- $config.data.shardKey = {};
- $config.data.shardKey[$config.data.indexedField] = 1;
-
- $config.states.init = function init(db, collName) {
- $super.states.init.apply(this, arguments);
-
- // prefix str with zeroes to make it have length len
- function pad(len, str) {
- var padding = new Array(len + 1).join('0');
- return (padding + str).slice(-len);
- }
-
- function makeOID(tid) {
- var str = pad(24, tid.toString(16));
- return new ObjectId(str);
- }
-
- function makeDate(tid) {
- var d = new ISODate('2000-01-01T00:00:00.000Z');
- // setSeconds(n) where n >= 60 will just cause the minutes,
- // hours, etc to increase,
- // so this produces a unique date for each tid
- d.setSeconds(tid);
- return d;
- }
-
- var choices = [
- this.tid, // int
- this.tid.toString(), // string
- this.tid * 0.0001, // float
- {tid: this.tid}, // subdocument
- makeOID(this.tid), // objectid
- makeDate(this.tid), // date
- new Function('', 'return ' + this.tid + ';') // function
- ];
-
- this.indexedValue = choices[this.tid % choices.length];
- };
-
- return $config;
- });
+var $config = extendWorkload($config, function($config, $super) {
+
+ $config.data.indexedField = 'indexed_insert_heterogeneous';
+ $config.data.shardKey = {};
+ $config.data.shardKey[$config.data.indexedField] = 1;
+
+ $config.states.init = function init(db, collName) {
+ $super.states.init.apply(this, arguments);
+
+ // prefix str with zeroes to make it have length len
+ function pad(len, str) {
+ var padding = new Array(len + 1).join('0');
+ return (padding + str).slice(-len);
+ }
+
+ function makeOID(tid) {
+ var str = pad(24, tid.toString(16));
+ return new ObjectId(str);
+ }
+
+ function makeDate(tid) {
+ var d = new ISODate('2000-01-01T00:00:00.000Z');
+ // setSeconds(n) where n >= 60 will just cause the minutes,
+ // hours, etc to increase,
+ // so this produces a unique date for each tid
+ d.setSeconds(tid);
+ return d;
+ }
+
+ var choices = [
+ this.tid, // int
+ this.tid.toString(), // string
+ this.tid * 0.0001, // float
+ {tid: this.tid}, // subdocument
+ makeOID(this.tid), // objectid
+ makeDate(this.tid), // date
+ new Function('', 'return ' + this.tid + ';') // function
+ ];
+
+ this.indexedValue = choices[this.tid % choices.length];
+ };
+
+ return $config;
+});
diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_large.js b/jstests/concurrency/fsm_workloads/indexed_insert_large.js
index d7bedb22ade..cb2dbf58b21 100644
--- a/jstests/concurrency/fsm_workloads/indexed_insert_large.js
+++ b/jstests/concurrency/fsm_workloads/indexed_insert_large.js
@@ -11,38 +11,35 @@
load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
load('jstests/concurrency/fsm_workloads/indexed_insert_base.js'); // for $config
-var $config = extendWorkload(
- $config,
- function($config, $super) {
+var $config = extendWorkload($config, function($config, $super) {
- $config.data.indexedField = 'indexed_insert_large';
+ $config.data.indexedField = 'indexed_insert_large';
- // Remove the shard key, since it cannot be greater than 512 bytes
- delete $config.data.shardKey;
+ // Remove the shard key, since it cannot be greater than 512 bytes
+ delete $config.data.shardKey;
- $config.states.init = function init(db, collName) {
- $super.states.init.apply(this, arguments);
+ $config.states.init = function init(db, collName) {
+ $super.states.init.apply(this, arguments);
- // "The total size of an index entry, which can include structural overhead depending on
- // the
- // BSON type, must be less than 1024 bytes."
- // http://docs.mongodb.org/manual/reference/limits/
- var maxIndexedSize = 1023;
+ // "The total size of an index entry, which can include structural overhead depending on
+ // the
+ // BSON type, must be less than 1024 bytes."
+ // http://docs.mongodb.org/manual/reference/limits/
+ var maxIndexedSize = 1023;
- var bsonOverhead = Object.bsonsize({'': ''});
+ var bsonOverhead = Object.bsonsize({'': ''});
- var bigstr = new Array(maxIndexedSize + 1).join('x');
+ var bigstr = new Array(maxIndexedSize + 1).join('x');
- // prefix the big string with tid to make it unique,
- // then trim it down so that it plus bson overhead is maxIndexedSize
+ // prefix the big string with tid to make it unique,
+ // then trim it down so that it plus bson overhead is maxIndexedSize
- this.indexedValue = (this.tid + bigstr).slice(0, maxIndexedSize - bsonOverhead);
+ this.indexedValue = (this.tid + bigstr).slice(0, maxIndexedSize - bsonOverhead);
- assertAlways.eq(
- maxIndexedSize,
- Object.bsonsize({'': this.indexedValue}),
- 'buggy test: the inserted docs will not have the expected index-key size');
- };
+ assertAlways.eq(maxIndexedSize,
+ Object.bsonsize({'': this.indexedValue}),
+ 'buggy test: the inserted docs will not have the expected index-key size');
+ };
- return $config;
- });
+ return $config;
+});
diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_long_fieldname.js b/jstests/concurrency/fsm_workloads/indexed_insert_long_fieldname.js
index 3c8c2f70223..143f548619a 100644
--- a/jstests/concurrency/fsm_workloads/indexed_insert_long_fieldname.js
+++ b/jstests/concurrency/fsm_workloads/indexed_insert_long_fieldname.js
@@ -10,16 +10,14 @@
load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
load('jstests/concurrency/fsm_workloads/indexed_insert_base.js'); // for $config
-var $config = extendWorkload($config,
- function($config, $super) {
+var $config = extendWorkload($config, function($config, $super) {
- // TODO: make this field name even longer?
- var length = 100;
- var prefix = 'indexed_insert_long_fieldname_';
- $config.data.indexedField =
- prefix + new Array(length - prefix.length + 1).join('x');
- $config.data.shardKey = {};
- $config.data.shardKey[$config.data.indexedField] = 1;
+ // TODO: make this field name even longer?
+ var length = 100;
+ var prefix = 'indexed_insert_long_fieldname_';
+ $config.data.indexedField = prefix + new Array(length - prefix.length + 1).join('x');
+ $config.data.shardKey = {};
+ $config.data.shardKey[$config.data.indexedField] = 1;
- return $config;
- });
+ return $config;
+});
diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_multikey.js b/jstests/concurrency/fsm_workloads/indexed_insert_multikey.js
index e49b5356760..34c28db22eb 100644
--- a/jstests/concurrency/fsm_workloads/indexed_insert_multikey.js
+++ b/jstests/concurrency/fsm_workloads/indexed_insert_multikey.js
@@ -10,21 +10,19 @@
load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
load('jstests/concurrency/fsm_workloads/indexed_insert_base.js'); // for $config
-var $config =
- extendWorkload($config,
- function($config, $super) {
+var $config = extendWorkload($config, function($config, $super) {
- $config.data.indexedField = 'indexed_insert_multikey';
- // Remove the shard key, since it cannot be a multikey index
- delete $config.data.shardKey;
+ $config.data.indexedField = 'indexed_insert_multikey';
+ // Remove the shard key, since it cannot be a multikey index
+ delete $config.data.shardKey;
- $config.states.init = function init(db, collName) {
- $super.states.init.apply(this, arguments);
+ $config.states.init = function init(db, collName) {
+ $super.states.init.apply(this, arguments);
- this.indexedValue = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9].map(function(n) {
- return this.tid * 10 + n;
- }.bind(this));
- };
+ this.indexedValue = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9].map(function(n) {
+ return this.tid * 10 + n;
+ }.bind(this));
+ };
- return $config;
- });
+ return $config;
+});
diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_ordered_bulk.js b/jstests/concurrency/fsm_workloads/indexed_insert_ordered_bulk.js
index eb70c850488..38998cd9f59 100644
--- a/jstests/concurrency/fsm_workloads/indexed_insert_ordered_bulk.js
+++ b/jstests/concurrency/fsm_workloads/indexed_insert_ordered_bulk.js
@@ -11,30 +11,28 @@
load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
load('jstests/concurrency/fsm_workloads/indexed_insert_base.js'); // for $config
-var $config =
- extendWorkload($config,
- function($config, $super) {
+var $config = extendWorkload($config, function($config, $super) {
- $config.data.indexedField = 'indexed_insert_ordered_bulk';
- $config.data.shardKey = {};
- $config.data.shardKey[$config.data.indexedField] = 1;
+ $config.data.indexedField = 'indexed_insert_ordered_bulk';
+ $config.data.shardKey = {};
+ $config.data.shardKey[$config.data.indexedField] = 1;
- $config.states.insert = function insert(db, collName) {
- var doc = {};
- doc[this.indexedField] = this.indexedValue;
+ $config.states.insert = function insert(db, collName) {
+ var doc = {};
+ doc[this.indexedField] = this.indexedValue;
- var bulk = db[collName].initializeOrderedBulkOp();
- for (var i = 0; i < this.docsPerInsert; ++i) {
- bulk.insert(doc);
- }
- var res = bulk.execute();
- assertAlways.writeOK(res);
- assertAlways.eq(this.docsPerInsert, res.nInserted, tojson(res));
+ var bulk = db[collName].initializeOrderedBulkOp();
+ for (var i = 0; i < this.docsPerInsert; ++i) {
+ bulk.insert(doc);
+ }
+ var res = bulk.execute();
+ assertAlways.writeOK(res);
+ assertAlways.eq(this.docsPerInsert, res.nInserted, tojson(res));
- this.nInserted += this.docsPerInsert;
- };
+ this.nInserted += this.docsPerInsert;
+ };
- $config.data.docsPerInsert = 15;
+ $config.data.docsPerInsert = 15;
- return $config;
- });
+ return $config;
+});
diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_text.js b/jstests/concurrency/fsm_workloads/indexed_insert_text.js
index ab38d07098f..82e0feb09a8 100644
--- a/jstests/concurrency/fsm_workloads/indexed_insert_text.js
+++ b/jstests/concurrency/fsm_workloads/indexed_insert_text.js
@@ -32,10 +32,7 @@ var $config = (function() {
}
};
- var transitions = {
- init: {insert: 1},
- insert: {insert: 1}
- };
+ var transitions = {init: {insert: 1}, insert: {insert: 1}};
function setup(db, collName, cluster) {
var ixSpec = {};
diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_text_multikey.js b/jstests/concurrency/fsm_workloads/indexed_insert_text_multikey.js
index 0cc7b590684..b527ef016f8 100644
--- a/jstests/concurrency/fsm_workloads/indexed_insert_text_multikey.js
+++ b/jstests/concurrency/fsm_workloads/indexed_insert_text_multikey.js
@@ -8,30 +8,27 @@
load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
load('jstests/concurrency/fsm_workloads/indexed_insert_text.js'); // for $config
-var $config =
- extendWorkload($config,
- function($config, $super) {
+var $config = extendWorkload($config, function($config, $super) {
- $config.states.init = function init(db, collName) {
- $super.states.init.apply(this, arguments);
- };
+ $config.states.init = function init(db, collName) {
+ $super.states.init.apply(this, arguments);
+ };
- $config.data.getRandomTextSnippet = function getRandomTextSnippet() {
- var len = Random.randInt(5) +
- 1; // ensure we always add some text, not just empty array
- var textArr = [];
- for (var i = 0; i < len; ++i) {
- textArr.push($super.data.getRandomTextSnippet.call(this, arguments));
- }
- return textArr;
- };
+ $config.data.getRandomTextSnippet = function getRandomTextSnippet() {
+ var len = Random.randInt(5) + 1; // ensure we always add some text, not just empty array
+ var textArr = [];
+ for (var i = 0; i < len; ++i) {
+ textArr.push($super.data.getRandomTextSnippet.call(this, arguments));
+ }
+ return textArr;
+ };
- // SERVER-21291: Reduce the thread count to alleviate PV1 failovers on
- // Windows DEBUG hosts.
- $config.threadCount = 5;
+ // SERVER-21291: Reduce the thread count to alleviate PV1 failovers on
+ // Windows DEBUG hosts.
+ $config.threadCount = 5;
- // Remove the shard key, since it cannot be a multikey index
- delete $config.data.shardKey;
+ // Remove the shard key, since it cannot be a multikey index
+ delete $config.data.shardKey;
- return $config;
- });
+ return $config;
+});
diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_ttl.js b/jstests/concurrency/fsm_workloads/indexed_insert_ttl.js
index 90aa6d3baf7..ba95fa6e778 100644
--- a/jstests/concurrency/fsm_workloads/indexed_insert_ttl.js
+++ b/jstests/concurrency/fsm_workloads/indexed_insert_ttl.js
@@ -24,10 +24,7 @@ var $config = (function() {
}
};
- var transitions = {
- init: {insert: 1},
- insert: {insert: 1}
- };
+ var transitions = {init: {insert: 1}, insert: {insert: 1}};
function setup(db, collName, cluster) {
var res = db[collName].ensureIndex({indexed_insert_ttl: 1},
diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_unordered_bulk.js b/jstests/concurrency/fsm_workloads/indexed_insert_unordered_bulk.js
index aa64e8d21e5..3c1ea8f0ea0 100644
--- a/jstests/concurrency/fsm_workloads/indexed_insert_unordered_bulk.js
+++ b/jstests/concurrency/fsm_workloads/indexed_insert_unordered_bulk.js
@@ -11,30 +11,28 @@
load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
load('jstests/concurrency/fsm_workloads/indexed_insert_base.js'); // for $config
-var $config =
- extendWorkload($config,
- function($config, $super) {
+var $config = extendWorkload($config, function($config, $super) {
- $config.data.indexedField = 'indexed_insert_unordered_bulk';
- $config.data.shardKey = {};
- $config.data.shardKey[$config.data.indexedField] = 1;
+ $config.data.indexedField = 'indexed_insert_unordered_bulk';
+ $config.data.shardKey = {};
+ $config.data.shardKey[$config.data.indexedField] = 1;
- $config.states.insert = function insert(db, collName) {
- var doc = {};
- doc[this.indexedField] = this.indexedValue;
+ $config.states.insert = function insert(db, collName) {
+ var doc = {};
+ doc[this.indexedField] = this.indexedValue;
- var bulk = db[collName].initializeUnorderedBulkOp();
- for (var i = 0; i < this.docsPerInsert; ++i) {
- bulk.insert(doc);
- }
- var res = bulk.execute();
- assertAlways.writeOK(res);
- assertAlways.eq(this.docsPerInsert, res.nInserted, tojson(res));
+ var bulk = db[collName].initializeUnorderedBulkOp();
+ for (var i = 0; i < this.docsPerInsert; ++i) {
+ bulk.insert(doc);
+ }
+ var res = bulk.execute();
+ assertAlways.writeOK(res);
+ assertAlways.eq(this.docsPerInsert, res.nInserted, tojson(res));
- this.nInserted += this.docsPerInsert;
- };
+ this.nInserted += this.docsPerInsert;
+ };
- $config.data.docsPerInsert = 15;
+ $config.data.docsPerInsert = 15;
- return $config;
- });
+ return $config;
+});
diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_upsert.js b/jstests/concurrency/fsm_workloads/indexed_insert_upsert.js
index cc26d364ace..bc1b65e9597 100644
--- a/jstests/concurrency/fsm_workloads/indexed_insert_upsert.js
+++ b/jstests/concurrency/fsm_workloads/indexed_insert_upsert.js
@@ -13,34 +13,32 @@
load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
load('jstests/concurrency/fsm_workloads/indexed_insert_base.js'); // for $config
-var $config = extendWorkload(
- $config,
- function($config, $super) {
+var $config = extendWorkload($config, function($config, $super) {
- $config.data.indexedField = 'indexed_insert_upsert';
- $config.data.shardKey = {};
- $config.data.shardKey[$config.data.indexedField] = 1;
+ $config.data.indexedField = 'indexed_insert_upsert';
+ $config.data.shardKey = {};
+ $config.data.shardKey[$config.data.indexedField] = 1;
- $config.states.init = function init(db, collName) {
- $super.states.init.apply(this, arguments);
+ $config.states.init = function init(db, collName) {
+ $super.states.init.apply(this, arguments);
- this.counter = 0;
- };
+ this.counter = 0;
+ };
- $config.states.insert = function insert(db, collName) {
- var doc = this.getDoc();
- doc.counter = this.counter++; // ensure doc is unique to guarantee an upsert occurs
- doc._id = new ObjectId(); // _id is required for shard targeting
+ $config.states.insert = function insert(db, collName) {
+ var doc = this.getDoc();
+ doc.counter = this.counter++; // ensure doc is unique to guarantee an upsert occurs
+ doc._id = new ObjectId(); // _id is required for shard targeting
- var res = db[collName].update(doc, {$inc: {unused: 0}}, {upsert: true});
- assertAlways.eq(0, res.nMatched, tojson(res));
- assertAlways.eq(1, res.nUpserted, tojson(res));
- if (db.getMongo().writeMode() === 'commands') {
- assertAlways.eq(0, res.nModified, tojson(res));
- }
+ var res = db[collName].update(doc, {$inc: {unused: 0}}, {upsert: true});
+ assertAlways.eq(0, res.nMatched, tojson(res));
+ assertAlways.eq(1, res.nUpserted, tojson(res));
+ if (db.getMongo().writeMode() === 'commands') {
+ assertAlways.eq(0, res.nModified, tojson(res));
+ }
- this.nInserted += this.docsPerInsert;
- };
+ this.nInserted += this.docsPerInsert;
+ };
- return $config;
- });
+ return $config;
+});
diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_where.js b/jstests/concurrency/fsm_workloads/indexed_insert_where.js
index 14408c26f69..e5d2a98b8c5 100644
--- a/jstests/concurrency/fsm_workloads/indexed_insert_where.js
+++ b/jstests/concurrency/fsm_workloads/indexed_insert_where.js
@@ -15,9 +15,7 @@ var $config = (function() {
documentsToInsert: 100,
insertedDocuments: 0,
generateDocumentToInsert: function generateDocumentToInsert() {
- return {
- tid: this.tid
- };
+ return {tid: this.tid};
},
shardKey: {tid: 1}
};
@@ -43,10 +41,7 @@ var $config = (function() {
}
};
- var transitions = {
- insert: {insert: 0.2, query: 0.8},
- query: {insert: 0.8, query: 0.2}
- };
+ var transitions = {insert: {insert: 0.2, query: 0.8}, query: {insert: 0.8, query: 0.2}};
var setup = function setup(db, collName, cluster) {
assertAlways.commandWorked(db[collName].ensureIndex({tid: 1}));
diff --git a/jstests/concurrency/fsm_workloads/list_indexes.js b/jstests/concurrency/fsm_workloads/list_indexes.js
index 6bcdb8ba96c..e62225eaa51 100644
--- a/jstests/concurrency/fsm_workloads/list_indexes.js
+++ b/jstests/concurrency/fsm_workloads/list_indexes.js
@@ -26,10 +26,7 @@ var $config = (function() {
assertWhenOwnColl.gte(cursor.itcount(), 0);
}
- return {
- modifyIndices: modifyIndices,
- listIndices: listIndices
- };
+ return {modifyIndices: modifyIndices, listIndices: listIndices};
})();
var transitions = {
diff --git a/jstests/concurrency/fsm_workloads/map_reduce_drop.js b/jstests/concurrency/fsm_workloads/map_reduce_drop.js
index ef03805dffd..48398b4aae2 100644
--- a/jstests/concurrency/fsm_workloads/map_reduce_drop.js
+++ b/jstests/concurrency/fsm_workloads/map_reduce_drop.js
@@ -80,11 +80,7 @@ var $config = (function() {
}
}
- return {
- dropColl: dropColl,
- dropDB: dropDB,
- mapReduce: mapReduce
- };
+ return {dropColl: dropColl, dropDB: dropDB, mapReduce: mapReduce};
})();
diff --git a/jstests/concurrency/fsm_workloads/map_reduce_inline.js b/jstests/concurrency/fsm_workloads/map_reduce_inline.js
index 1633ce0cc19..ade5a8aa369 100644
--- a/jstests/concurrency/fsm_workloads/map_reduce_inline.js
+++ b/jstests/concurrency/fsm_workloads/map_reduce_inline.js
@@ -38,12 +38,7 @@ var $config = (function() {
return reducedValue;
}
- var data = {
- numDocs: 2000,
- mapper: mapper,
- reducer: reducer,
- finalizer: finalizer
- };
+ var data = {numDocs: 2000, mapper: mapper, reducer: reducer, finalizer: finalizer};
var states = (function() {
@@ -53,26 +48,17 @@ var $config = (function() {
}
function mapReduce(db, collName) {
- var options = {
- finalize: this.finalizer,
- out: {inline: 1}
- };
+ var options = {finalize: this.finalizer, out: {inline: 1}};
var res = db[collName].mapReduce(this.mapper, this.reducer, options);
assertAlways.commandWorked(res);
}
- return {
- init: init,
- mapReduce: mapReduce
- };
+ return {init: init, mapReduce: mapReduce};
})();
- var transitions = {
- init: {mapReduce: 1},
- mapReduce: {mapReduce: 1}
- };
+ var transitions = {init: {mapReduce: 1}, mapReduce: {mapReduce: 1}};
function makeDoc(keyLimit, valueLimit) {
return {
diff --git a/jstests/concurrency/fsm_workloads/map_reduce_merge.js b/jstests/concurrency/fsm_workloads/map_reduce_merge.js
index 4f96c229eac..f12e7f24144 100644
--- a/jstests/concurrency/fsm_workloads/map_reduce_merge.js
+++ b/jstests/concurrency/fsm_workloads/map_reduce_merge.js
@@ -16,49 +16,44 @@
load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
load('jstests/concurrency/fsm_workloads/map_reduce_inline.js'); // for $config
-var $config =
- extendWorkload($config,
- function($config, $super) {
+var $config = extendWorkload($config, function($config, $super) {
- // Use the workload name as the database name,
- // since the workload name is assumed to be unique.
- var uniqueDBName = 'map_reduce_merge';
+ // Use the workload name as the database name,
+ // since the workload name is assumed to be unique.
+ var uniqueDBName = 'map_reduce_merge';
- $config.states.init = function init(db, collName) {
- $super.states.init.apply(this, arguments);
+ $config.states.init = function init(db, collName) {
+ $super.states.init.apply(this, arguments);
- this.outDBName = db.getName() + uniqueDBName;
- };
+ this.outDBName = db.getName() + uniqueDBName;
+ };
- $config.states.mapReduce = function mapReduce(db, collName) {
- var outDB = db.getSiblingDB(this.outDBName);
- var fullName = outDB[collName].getFullName();
- assertAlways(outDB[collName].exists() !== null,
- "output collection '" + fullName + "' should exist");
+ $config.states.mapReduce = function mapReduce(db, collName) {
+ var outDB = db.getSiblingDB(this.outDBName);
+ var fullName = outDB[collName].getFullName();
+ assertAlways(outDB[collName].exists() !== null,
+ "output collection '" + fullName + "' should exist");
- // Have all threads combine their results into the same collection
- var options = {
- finalize: this.finalizer,
- out: {merge: collName, db: this.outDBName}
- };
+ // Have all threads combine their results into the same collection
+ var options = {finalize: this.finalizer, out: {merge: collName, db: this.outDBName}};
- var res = db[collName].mapReduce(this.mapper, this.reducer, options);
- assertAlways.commandWorked(res);
- };
+ var res = db[collName].mapReduce(this.mapper, this.reducer, options);
+ assertAlways.commandWorked(res);
+ };
- $config.setup = function setup(db, collName, cluster) {
- $super.setup.apply(this, arguments);
+ $config.setup = function setup(db, collName, cluster) {
+ $super.setup.apply(this, arguments);
- var outDB = db.getSiblingDB(db.getName() + uniqueDBName);
- assertAlways.commandWorked(outDB.createCollection(collName));
- };
+ var outDB = db.getSiblingDB(db.getName() + uniqueDBName);
+ assertAlways.commandWorked(outDB.createCollection(collName));
+ };
- $config.teardown = function teardown(db, collName, cluster) {
- var outDB = db.getSiblingDB(db.getName() + uniqueDBName);
- var res = outDB.dropDatabase();
- assertAlways.commandWorked(res);
- assertAlways.eq(db.getName() + uniqueDBName, res.dropped);
- };
+ $config.teardown = function teardown(db, collName, cluster) {
+ var outDB = db.getSiblingDB(db.getName() + uniqueDBName);
+ var res = outDB.dropDatabase();
+ assertAlways.commandWorked(res);
+ assertAlways.eq(db.getName() + uniqueDBName, res.dropped);
+ };
- return $config;
- });
+ return $config;
+});
diff --git a/jstests/concurrency/fsm_workloads/map_reduce_merge_nonatomic.js b/jstests/concurrency/fsm_workloads/map_reduce_merge_nonatomic.js
index 8f0804b365d..9522854d566 100644
--- a/jstests/concurrency/fsm_workloads/map_reduce_merge_nonatomic.js
+++ b/jstests/concurrency/fsm_workloads/map_reduce_merge_nonatomic.js
@@ -17,45 +17,43 @@ load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWo
load('jstests/concurrency/fsm_workloads/map_reduce_inline.js'); // for $config
load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropDatabases
-var $config =
- extendWorkload($config,
- function($config, $super) {
-
- // Use the workload name as a prefix for the database name,
- // since the workload name is assumed to be unique.
- var prefix = 'map_reduce_merge_nonatomic';
-
- function uniqueDBName(prefix, tid) {
- return prefix + tid;
- }
-
- $config.states.init = function init(db, collName) {
- $super.states.init.apply(this, arguments);
-
- this.outDBName = db.getName() + uniqueDBName(prefix, this.tid);
- var outDB = db.getSiblingDB(this.outDBName);
- assertAlways.commandWorked(outDB.createCollection(collName));
- };
-
- $config.states.mapReduce = function mapReduce(db, collName) {
- var outDB = db.getSiblingDB(this.outDBName);
- var fullName = outDB[collName].getFullName();
- assertAlways(outDB[collName].exists() !== null,
- "output collection '" + fullName + "' should exist");
-
- var options = {
- finalize: this.finalizer,
- out: {merge: collName, db: this.outDBName, nonAtomic: true}
- };
-
- var res = db[collName].mapReduce(this.mapper, this.reducer, options);
- assertAlways.commandWorked(res);
- };
-
- $config.teardown = function teardown(db, collName, cluster) {
- var pattern = new RegExp('^' + db.getName() + prefix + '\\d+$');
- dropDatabases(db, pattern);
- };
-
- return $config;
- });
+var $config = extendWorkload($config, function($config, $super) {
+
+ // Use the workload name as a prefix for the database name,
+ // since the workload name is assumed to be unique.
+ var prefix = 'map_reduce_merge_nonatomic';
+
+ function uniqueDBName(prefix, tid) {
+ return prefix + tid;
+ }
+
+ $config.states.init = function init(db, collName) {
+ $super.states.init.apply(this, arguments);
+
+ this.outDBName = db.getName() + uniqueDBName(prefix, this.tid);
+ var outDB = db.getSiblingDB(this.outDBName);
+ assertAlways.commandWorked(outDB.createCollection(collName));
+ };
+
+ $config.states.mapReduce = function mapReduce(db, collName) {
+ var outDB = db.getSiblingDB(this.outDBName);
+ var fullName = outDB[collName].getFullName();
+ assertAlways(outDB[collName].exists() !== null,
+ "output collection '" + fullName + "' should exist");
+
+ var options = {
+ finalize: this.finalizer,
+ out: {merge: collName, db: this.outDBName, nonAtomic: true}
+ };
+
+ var res = db[collName].mapReduce(this.mapper, this.reducer, options);
+ assertAlways.commandWorked(res);
+ };
+
+ $config.teardown = function teardown(db, collName, cluster) {
+ var pattern = new RegExp('^' + db.getName() + prefix + '\\d+$');
+ dropDatabases(db, pattern);
+ };
+
+ return $config;
+});
diff --git a/jstests/concurrency/fsm_workloads/map_reduce_reduce.js b/jstests/concurrency/fsm_workloads/map_reduce_reduce.js
index b11ccf3614f..3acd8d3b64c 100644
--- a/jstests/concurrency/fsm_workloads/map_reduce_reduce.js
+++ b/jstests/concurrency/fsm_workloads/map_reduce_reduce.js
@@ -15,43 +15,38 @@ load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWo
load('jstests/concurrency/fsm_workloads/map_reduce_inline.js'); // for $config
load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropCollections
-var $config =
- extendWorkload($config,
- function($config, $super) {
-
- // Use the workload name as a prefix for the collection name,
- // since the workload name is assumed to be unique.
- var prefix = 'map_reduce_reduce';
-
- function uniqueCollectionName(prefix, tid) {
- return prefix + tid;
- }
-
- $config.states.init = function init(db, collName) {
- $super.states.init.apply(this, arguments);
-
- this.outCollName = uniqueCollectionName(prefix, this.tid);
- assertAlways.commandWorked(db.createCollection(this.outCollName));
- };
-
- $config.states.mapReduce = function mapReduce(db, collName) {
- var fullName = db[this.outCollName].getFullName();
- assertAlways(db[this.outCollName].exists() !== null,
- "output collection '" + fullName + "' should exist");
-
- var options = {
- finalize: this.finalizer,
- out: {reduce: this.outCollName}
- };
-
- var res = db[collName].mapReduce(this.mapper, this.reducer, options);
- assertAlways.commandWorked(res);
- };
-
- $config.teardown = function teardown(db, collName, cluster) {
- var pattern = new RegExp('^' + prefix + '\\d+$');
- dropCollections(db, pattern);
- };
-
- return $config;
- });
+var $config = extendWorkload($config, function($config, $super) {
+
+ // Use the workload name as a prefix for the collection name,
+ // since the workload name is assumed to be unique.
+ var prefix = 'map_reduce_reduce';
+
+ function uniqueCollectionName(prefix, tid) {
+ return prefix + tid;
+ }
+
+ $config.states.init = function init(db, collName) {
+ $super.states.init.apply(this, arguments);
+
+ this.outCollName = uniqueCollectionName(prefix, this.tid);
+ assertAlways.commandWorked(db.createCollection(this.outCollName));
+ };
+
+ $config.states.mapReduce = function mapReduce(db, collName) {
+ var fullName = db[this.outCollName].getFullName();
+ assertAlways(db[this.outCollName].exists() !== null,
+ "output collection '" + fullName + "' should exist");
+
+ var options = {finalize: this.finalizer, out: {reduce: this.outCollName}};
+
+ var res = db[collName].mapReduce(this.mapper, this.reducer, options);
+ assertAlways.commandWorked(res);
+ };
+
+ $config.teardown = function teardown(db, collName, cluster) {
+ var pattern = new RegExp('^' + prefix + '\\d+$');
+ dropCollections(db, pattern);
+ };
+
+ return $config;
+});
diff --git a/jstests/concurrency/fsm_workloads/map_reduce_reduce_nonatomic.js b/jstests/concurrency/fsm_workloads/map_reduce_reduce_nonatomic.js
index 5953c7c2a07..6ffdf20f85d 100644
--- a/jstests/concurrency/fsm_workloads/map_reduce_reduce_nonatomic.js
+++ b/jstests/concurrency/fsm_workloads/map_reduce_reduce_nonatomic.js
@@ -17,44 +17,39 @@
load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
load('jstests/concurrency/fsm_workloads/map_reduce_inline.js'); // for $config
-var $config =
- extendWorkload($config,
- function($config, $super) {
+var $config = extendWorkload($config, function($config, $super) {
- // Use the workload name as the collection name,
- // since the workload name is assumed to be unique.
- var uniqueCollectionName = 'map_reduce_reduce_nonatomic';
+ // Use the workload name as the collection name,
+ // since the workload name is assumed to be unique.
+ var uniqueCollectionName = 'map_reduce_reduce_nonatomic';
- $config.states.init = function init(db, collName) {
- $super.states.init.apply(this, arguments);
+ $config.states.init = function init(db, collName) {
+ $super.states.init.apply(this, arguments);
- this.outCollName = uniqueCollectionName;
- };
+ this.outCollName = uniqueCollectionName;
+ };
- $config.states.mapReduce = function mapReduce(db, collName) {
- var fullName = db[this.outCollName].getFullName();
- assertAlways(db[this.outCollName].exists() !== null,
- "output collection '" + fullName + "' should exist");
+ $config.states.mapReduce = function mapReduce(db, collName) {
+ var fullName = db[this.outCollName].getFullName();
+ assertAlways(db[this.outCollName].exists() !== null,
+ "output collection '" + fullName + "' should exist");
- // Have all threads combine their results into the same collection
- var options = {
- finalize: this.finalizer,
- out: {reduce: this.outCollName, nonAtomic: true}
- };
+ // Have all threads combine their results into the same collection
+ var options = {finalize: this.finalizer, out: {reduce: this.outCollName, nonAtomic: true}};
- var res = db[collName].mapReduce(this.mapper, this.reducer, options);
- assertAlways.commandWorked(res);
- };
+ var res = db[collName].mapReduce(this.mapper, this.reducer, options);
+ assertAlways.commandWorked(res);
+ };
- $config.setup = function setup(db, collName, cluster) {
- $super.setup.apply(this, arguments);
+ $config.setup = function setup(db, collName, cluster) {
+ $super.setup.apply(this, arguments);
- assertAlways.commandWorked(db.createCollection(uniqueCollectionName));
- };
+ assertAlways.commandWorked(db.createCollection(uniqueCollectionName));
+ };
- $config.teardown = function teardown(db, collName, cluster) {
- assertAlways(db[uniqueCollectionName].drop());
- };
+ $config.teardown = function teardown(db, collName, cluster) {
+ assertAlways(db[uniqueCollectionName].drop());
+ };
- return $config;
- });
+ return $config;
+});
diff --git a/jstests/concurrency/fsm_workloads/map_reduce_replace.js b/jstests/concurrency/fsm_workloads/map_reduce_replace.js
index ce268bf5e20..3db3685a7ad 100644
--- a/jstests/concurrency/fsm_workloads/map_reduce_replace.js
+++ b/jstests/concurrency/fsm_workloads/map_reduce_replace.js
@@ -15,45 +15,43 @@ load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWo
load('jstests/concurrency/fsm_workloads/map_reduce_inline.js'); // for $config
load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropCollections
-var $config =
- extendWorkload($config,
- function($config, $super) {
-
- // Use the workload name as a prefix for the collection name,
- // since the workload name is assumed to be unique.
- var prefix = 'map_reduce_replace';
-
- function uniqueCollectionName(prefix, tid) {
- return prefix + tid;
- }
-
- $config.states.init = function init(db, collName) {
- $super.states.init.apply(this, arguments);
-
- this.outCollName = uniqueCollectionName(prefix, this.tid);
- assertAlways.commandWorked(db.createCollection(this.outCollName));
- };
-
- $config.states.mapReduce = function mapReduce(db, collName) {
- var fullName = db[this.outCollName].getFullName();
- assertAlways(db[this.outCollName].exists() !== null,
- "output collection '" + fullName + "' should exist");
-
- var options = {
- finalize: this.finalizer,
- out: {replace: this.outCollName},
- query: {key: {$exists: true}, value: {$exists: true}},
- sort: {_id: -1} // sort key must be an existing index
- };
-
- var res = db[collName].mapReduce(this.mapper, this.reducer, options);
- assertAlways.commandWorked(res);
- };
-
- $config.teardown = function teardown(db, collName, cluster) {
- var pattern = new RegExp('^' + prefix + '\\d+$');
- dropCollections(db, pattern);
- };
-
- return $config;
- });
+var $config = extendWorkload($config, function($config, $super) {
+
+ // Use the workload name as a prefix for the collection name,
+ // since the workload name is assumed to be unique.
+ var prefix = 'map_reduce_replace';
+
+ function uniqueCollectionName(prefix, tid) {
+ return prefix + tid;
+ }
+
+ $config.states.init = function init(db, collName) {
+ $super.states.init.apply(this, arguments);
+
+ this.outCollName = uniqueCollectionName(prefix, this.tid);
+ assertAlways.commandWorked(db.createCollection(this.outCollName));
+ };
+
+ $config.states.mapReduce = function mapReduce(db, collName) {
+ var fullName = db[this.outCollName].getFullName();
+ assertAlways(db[this.outCollName].exists() !== null,
+ "output collection '" + fullName + "' should exist");
+
+ var options = {
+ finalize: this.finalizer,
+ out: {replace: this.outCollName},
+ query: {key: {$exists: true}, value: {$exists: true}},
+ sort: {_id: -1} // sort key must be an existing index
+ };
+
+ var res = db[collName].mapReduce(this.mapper, this.reducer, options);
+ assertAlways.commandWorked(res);
+ };
+
+ $config.teardown = function teardown(db, collName, cluster) {
+ var pattern = new RegExp('^' + prefix + '\\d+$');
+ dropCollections(db, pattern);
+ };
+
+ return $config;
+});
diff --git a/jstests/concurrency/fsm_workloads/map_reduce_replace_nonexistent.js b/jstests/concurrency/fsm_workloads/map_reduce_replace_nonexistent.js
index 3bfdb6086de..cb802da7a90 100644
--- a/jstests/concurrency/fsm_workloads/map_reduce_replace_nonexistent.js
+++ b/jstests/concurrency/fsm_workloads/map_reduce_replace_nonexistent.js
@@ -14,39 +14,37 @@ load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWo
load('jstests/concurrency/fsm_workloads/map_reduce_inline.js'); // for $config
load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropCollections
-var $config = extendWorkload(
- $config,
- function($config, $super) {
-
- // Use the workload name as a prefix for the collection name,
- // since the workload name is assumed to be unique.
- var prefix = 'map_reduce_replace_nonexistent';
-
- function uniqueCollectionName(prefix, tid) {
- return prefix + tid;
- }
-
- $config.states.mapReduce = function mapReduce(db, collName) {
- var outCollName = uniqueCollectionName(prefix, this.tid);
- var fullName = db[outCollName].getFullName();
- assertAlways.isnull(db[outCollName].exists(),
- "output collection '" + fullName + "' should not exist");
-
- var options = {
- finalize: this.finalizer,
- out: {replace: outCollName},
- query: {key: {$exists: true}, value: {$exists: true}}
- };
-
- var res = db[collName].mapReduce(this.mapper, this.reducer, options);
- assertAlways.commandWorked(res);
- assertAlways(db[outCollName].drop());
+var $config = extendWorkload($config, function($config, $super) {
+
+ // Use the workload name as a prefix for the collection name,
+ // since the workload name is assumed to be unique.
+ var prefix = 'map_reduce_replace_nonexistent';
+
+ function uniqueCollectionName(prefix, tid) {
+ return prefix + tid;
+ }
+
+ $config.states.mapReduce = function mapReduce(db, collName) {
+ var outCollName = uniqueCollectionName(prefix, this.tid);
+ var fullName = db[outCollName].getFullName();
+ assertAlways.isnull(db[outCollName].exists(),
+ "output collection '" + fullName + "' should not exist");
+
+ var options = {
+ finalize: this.finalizer,
+ out: {replace: outCollName},
+ query: {key: {$exists: true}, value: {$exists: true}}
};
- $config.teardown = function teardown(db, collName, cluster) {
- var pattern = new RegExp('^' + prefix + '\\d+$');
- dropCollections(db, pattern);
- };
+ var res = db[collName].mapReduce(this.mapper, this.reducer, options);
+ assertAlways.commandWorked(res);
+ assertAlways(db[outCollName].drop());
+ };
+
+ $config.teardown = function teardown(db, collName, cluster) {
+ var pattern = new RegExp('^' + prefix + '\\d+$');
+ dropCollections(db, pattern);
+ };
- return $config;
- });
+ return $config;
+});
diff --git a/jstests/concurrency/fsm_workloads/plan_cache_drop_database.js b/jstests/concurrency/fsm_workloads/plan_cache_drop_database.js
index abd1312b7c3..51ec0c3780b 100644
--- a/jstests/concurrency/fsm_workloads/plan_cache_drop_database.js
+++ b/jstests/concurrency/fsm_workloads/plan_cache_drop_database.js
@@ -40,10 +40,7 @@ var $config = (function() {
function count(db, collName) {
var coll = db.getSiblingDB(this.dbName)[collName];
- var cmdObj = {
- query: {a: 1, b: {$gt: Random.rand()}},
- limit: Random.randInt(10)
- };
+ var cmdObj = {query: {a: 1, b: {$gt: Random.rand()}}, limit: Random.randInt(10)};
// We can't use assertAlways.commandWorked here because the plan
// executor can be killed during the count.
@@ -61,17 +58,11 @@ var $config = (function() {
populateData(myDB, collName);
}
- return {
- count: count,
- dropDB: dropDB
- };
+ return {count: count, dropDB: dropDB};
})();
- var transitions = {
- count: {count: 0.95, dropDB: 0.05},
- dropDB: {count: 0.95, dropDB: 0.05}
- };
+ var transitions = {count: {count: 0.95, dropDB: 0.05}, dropDB: {count: 0.95, dropDB: 0.05}};
function setup(db, collName, cluster) {
var myDB = db.getSiblingDB(this.dbName);
diff --git a/jstests/concurrency/fsm_workloads/reindex.js b/jstests/concurrency/fsm_workloads/reindex.js
index cec33eddc5f..523e29789e0 100644
--- a/jstests/concurrency/fsm_workloads/reindex.js
+++ b/jstests/concurrency/fsm_workloads/reindex.js
@@ -62,9 +62,7 @@ var $config = (function() {
'inserted');
var coords = [[[-26, -26], [-26, 26], [26, 26], [26, -26], [-26, -26]]];
- var geoQuery = {
- geo: {$geoWithin: {$geometry: {type: 'Polygon', coordinates: coords}}}
- };
+ var geoQuery = {geo: {$geoWithin: {$geometry: {type: 'Polygon', coordinates: coords}}}};
// We can only perform a geo query when we own the collection and are sure a geo index
// is present. The same is true of text queries.
@@ -91,12 +89,7 @@ var $config = (function() {
assertAlways.commandWorked(res);
}
- return {
- init: init,
- createIndexes: createIndexes,
- reIndex: reIndex,
- query: query
- };
+ return {init: init, createIndexes: createIndexes, reIndex: reIndex, query: query};
})();
var transitions = {
diff --git a/jstests/concurrency/fsm_workloads/reindex_background.js b/jstests/concurrency/fsm_workloads/reindex_background.js
index 22db164ae6a..ff5dbc8d4d2 100644
--- a/jstests/concurrency/fsm_workloads/reindex_background.js
+++ b/jstests/concurrency/fsm_workloads/reindex_background.js
@@ -12,24 +12,22 @@
load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
load('jstests/concurrency/fsm_workloads/reindex.js'); // for $config
-var $config =
- extendWorkload($config,
- function($config, $super) {
- $config.data.prefix = 'reindex_background';
+var $config = extendWorkload($config, function($config, $super) {
+ $config.data.prefix = 'reindex_background';
- $config.states.createIndexes = function createIndexes(db, collName) {
- var coll = db[this.threadCollName];
+ $config.states.createIndexes = function createIndexes(db, collName) {
+ var coll = db[this.threadCollName];
- // The number of indexes created here is also stored in data.nIndexes
- var textResult = coll.ensureIndex({text: 'text'}, {background: true});
- assertAlways.commandWorked(textResult);
+ // The number of indexes created here is also stored in data.nIndexes
+ var textResult = coll.ensureIndex({text: 'text'}, {background: true});
+ assertAlways.commandWorked(textResult);
- var geoResult = coll.ensureIndex({geo: '2dsphere'}, {background: true});
- assertAlways.commandWorked(geoResult);
+ var geoResult = coll.ensureIndex({geo: '2dsphere'}, {background: true});
+ assertAlways.commandWorked(geoResult);
- var integerResult = coll.ensureIndex({integer: 1}, {background: true});
- assertAlways.commandWorked(integerResult);
- };
+ var integerResult = coll.ensureIndex({integer: 1}, {background: true});
+ assertAlways.commandWorked(integerResult);
+ };
- return $config;
- });
+ return $config;
+});
diff --git a/jstests/concurrency/fsm_workloads/remove_and_bulk_insert.js b/jstests/concurrency/fsm_workloads/remove_and_bulk_insert.js
index a57e61a44b0..63e179a0d2b 100644
--- a/jstests/concurrency/fsm_workloads/remove_and_bulk_insert.js
+++ b/jstests/concurrency/fsm_workloads/remove_and_bulk_insert.js
@@ -26,10 +26,7 @@ var $config = (function() {
}
};
- var transitions = {
- insert: {insert: 0.5, remove: 0.5},
- remove: {insert: 0.5, remove: 0.5}
- };
+ var transitions = {insert: {insert: 0.5, remove: 0.5}, remove: {insert: 0.5, remove: 0.5}};
return {
threadCount: 5,
diff --git a/jstests/concurrency/fsm_workloads/remove_multiple_documents.js b/jstests/concurrency/fsm_workloads/remove_multiple_documents.js
index d809b0be8e3..bfd64cd6790 100644
--- a/jstests/concurrency/fsm_workloads/remove_multiple_documents.js
+++ b/jstests/concurrency/fsm_workloads/remove_multiple_documents.js
@@ -34,17 +34,8 @@ var $config = (function() {
}
};
- var transitions = {
- init: {count: 1},
- count: {remove: 1},
- remove: {remove: 0.825, count: 0.125}
- };
+ var transitions = {init: {count: 1}, count: {remove: 1}, remove: {remove: 0.825, count: 0.125}};
- return {
- threadCount: 10,
- iterations: 20,
- states: states,
- transitions: transitions
- };
+ return {threadCount: 10, iterations: 20, states: states, transitions: transitions};
})();
diff --git a/jstests/concurrency/fsm_workloads/remove_single_document.js b/jstests/concurrency/fsm_workloads/remove_single_document.js
index 5f83e0f57f8..1bfdb2b6897 100644
--- a/jstests/concurrency/fsm_workloads/remove_single_document.js
+++ b/jstests/concurrency/fsm_workloads/remove_single_document.js
@@ -23,9 +23,7 @@ var $config = (function() {
}
};
- var transitions = {
- remove: {remove: 1}
- };
+ var transitions = {remove: {remove: 1}};
function setup(db, collName, cluster) {
// insert enough documents so that each thread can remove exactly one per iteration
diff --git a/jstests/concurrency/fsm_workloads/remove_single_document_eval.js b/jstests/concurrency/fsm_workloads/remove_single_document_eval.js
index e90eaa63114..3109c743e29 100644
--- a/jstests/concurrency/fsm_workloads/remove_single_document_eval.js
+++ b/jstests/concurrency/fsm_workloads/remove_single_document_eval.js
@@ -8,32 +8,30 @@
load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
load('jstests/concurrency/fsm_workloads/remove_single_document.js'); // for $config
-var $config =
- extendWorkload($config,
- function($config, $super) {
+var $config = extendWorkload($config, function($config, $super) {
- $config.data.doRemove = function doRemove(db, collName, query, options) {
- var evalResult = db.runCommand({
- eval: function(f, collName, query, options) {
- return tojson(f(db, collName, query, options));
- },
- args: [$super.data.doRemove, collName, query, options],
- nolock: this.nolock
- });
- assertAlways.commandWorked(evalResult);
- var res = JSON.parse(evalResult.retval);
- return res;
- };
+ $config.data.doRemove = function doRemove(db, collName, query, options) {
+ var evalResult = db.runCommand({
+ eval: function(f, collName, query, options) {
+ return tojson(f(db, collName, query, options));
+ },
+ args: [$super.data.doRemove, collName, query, options],
+ nolock: this.nolock
+ });
+ assertAlways.commandWorked(evalResult);
+ var res = JSON.parse(evalResult.retval);
+ return res;
+ };
- $config.data.assertResult = function assertResult(res) {
- assertWhenOwnColl.eq(1, res.nRemoved, tojson(res));
- };
+ $config.data.assertResult = function assertResult(res) {
+ assertWhenOwnColl.eq(1, res.nRemoved, tojson(res));
+ };
- $config.data.nolock = false;
+ $config.data.nolock = false;
- // scale down threadCount and iterations because eval takes a global lock
- $config.threadCount = 5;
- $config.iterations = 10;
+ // scale down threadCount and iterations because eval takes a global lock
+ $config.threadCount = 5;
+ $config.iterations = 10;
- return $config;
- });
+ return $config;
+});
diff --git a/jstests/concurrency/fsm_workloads/remove_single_document_eval_nolock.js b/jstests/concurrency/fsm_workloads/remove_single_document_eval_nolock.js
index e88868c3345..a3f67c26892 100644
--- a/jstests/concurrency/fsm_workloads/remove_single_document_eval_nolock.js
+++ b/jstests/concurrency/fsm_workloads/remove_single_document_eval_nolock.js
@@ -8,10 +8,9 @@
load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
load('jstests/concurrency/fsm_workloads/remove_single_document_eval.js'); // for $config
-var $config = extendWorkload($config,
- function($config, $super) {
+var $config = extendWorkload($config, function($config, $super) {
- $config.data.nolock = true;
+ $config.data.nolock = true;
- return $config;
- });
+ return $config;
+});
diff --git a/jstests/concurrency/fsm_workloads/remove_where.js b/jstests/concurrency/fsm_workloads/remove_where.js
index 36e228ebd9a..f9c0e6a2c03 100644
--- a/jstests/concurrency/fsm_workloads/remove_where.js
+++ b/jstests/concurrency/fsm_workloads/remove_where.js
@@ -11,37 +11,32 @@
load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
load('jstests/concurrency/fsm_workloads/indexed_insert_where.js'); // for $config
-var $config = extendWorkload(
- $config,
- function($config, $super) {
- $config.data.randomBound = 10;
- $config.data.generateDocumentToInsert = function generateDocumentToInsert() {
- return {
- tid: this.tid,
- x: Random.randInt(this.randomBound)
- };
- };
+var $config = extendWorkload($config, function($config, $super) {
+ $config.data.randomBound = 10;
+ $config.data.generateDocumentToInsert = function generateDocumentToInsert() {
+ return {tid: this.tid, x: Random.randInt(this.randomBound)};
+ };
- $config.states.remove = function remove(db, collName) {
- var res = db[collName].remove({
- // Server-side JS does not support Random.randInt, so use Math.floor/random instead
- $where: 'this.x === Math.floor(Math.random() * ' + this.randomBound + ') ' +
- '&& this.tid === ' + this.tid
- });
- assertWhenOwnColl.gte(res.nRemoved, 0);
- assertWhenOwnColl.lte(res.nRemoved, this.insertedDocuments);
- this.insertedDocuments -= res.nRemoved;
- };
+ $config.states.remove = function remove(db, collName) {
+ var res = db[collName].remove({
+ // Server-side JS does not support Random.randInt, so use Math.floor/random instead
+ $where: 'this.x === Math.floor(Math.random() * ' + this.randomBound + ') ' +
+ '&& this.tid === ' + this.tid
+ });
+ assertWhenOwnColl.gte(res.nRemoved, 0);
+ assertWhenOwnColl.lte(res.nRemoved, this.insertedDocuments);
+ this.insertedDocuments -= res.nRemoved;
+ };
- $config.transitions = {
- insert: {insert: 0.2, remove: 0.4, query: 0.4},
- remove: {insert: 0.4, remove: 0.2, query: 0.4},
- query: {insert: 0.4, remove: 0.4, query: 0.2}
- };
+ $config.transitions = {
+ insert: {insert: 0.2, remove: 0.4, query: 0.4},
+ remove: {insert: 0.4, remove: 0.2, query: 0.4},
+ query: {insert: 0.4, remove: 0.4, query: 0.2}
+ };
- $config.setup = function setup(db, collName, cluster) {
- /* no-op to prevent index from being created */
- };
+ $config.setup = function setup(db, collName, cluster) {
+ /* no-op to prevent index from being created */
+ };
- return $config;
- });
+ return $config;
+});
diff --git a/jstests/concurrency/fsm_workloads/rename_capped_collection_chain.js b/jstests/concurrency/fsm_workloads/rename_capped_collection_chain.js
index b02642cb4c2..cc3de60d3c0 100644
--- a/jstests/concurrency/fsm_workloads/rename_capped_collection_chain.js
+++ b/jstests/concurrency/fsm_workloads/rename_capped_collection_chain.js
@@ -27,10 +27,7 @@ var $config = (function() {
this.fromCollName = uniqueCollectionName(this.prefix, this.tid, 0);
this.num = 1;
- var options = {
- capped: true,
- size: 4096
- };
+ var options = {capped: true, size: 4096};
assertAlways.commandWorked(db.createCollection(this.fromCollName, options));
assertWhenOwnDB(db[this.fromCollName].isCapped());
@@ -44,17 +41,11 @@ var $config = (function() {
this.fromCollName = toCollName;
}
- return {
- init: init,
- rename: rename
- };
+ return {init: init, rename: rename};
})();
- var transitions = {
- init: {rename: 1},
- rename: {rename: 1}
- };
+ var transitions = {init: {rename: 1}, rename: {rename: 1}};
function teardown(db, collName, cluster) {
var pattern = new RegExp('^' + this.prefix + '\\d+_\\d+$');
diff --git a/jstests/concurrency/fsm_workloads/rename_capped_collection_dbname_chain.js b/jstests/concurrency/fsm_workloads/rename_capped_collection_dbname_chain.js
index d69bb975d62..93d52f8c251 100644
--- a/jstests/concurrency/fsm_workloads/rename_capped_collection_dbname_chain.js
+++ b/jstests/concurrency/fsm_workloads/rename_capped_collection_dbname_chain.js
@@ -28,10 +28,7 @@ var $config = (function() {
this.num = 1;
var fromDB = db.getSiblingDB(this.fromDBName);
- var options = {
- capped: true,
- size: 4096
- };
+ var options = {capped: true, size: 4096};
assertAlways.commandWorked(fromDB.createCollection(collName, options));
assertAlways(fromDB[collName].isCapped());
@@ -57,17 +54,11 @@ var $config = (function() {
this.fromDBName = toDBName;
}
- return {
- init: init,
- rename: rename
- };
+ return {init: init, rename: rename};
})();
- var transitions = {
- init: {rename: 1},
- rename: {rename: 1}
- };
+ var transitions = {init: {rename: 1}, rename: {rename: 1}};
function teardown(db, collName, cluster) {
var pattern = new RegExp('^' + db.getName() + this.prefix + '\\d+_\\d+$');
diff --git a/jstests/concurrency/fsm_workloads/rename_capped_collection_dbname_droptarget.js b/jstests/concurrency/fsm_workloads/rename_capped_collection_dbname_droptarget.js
index 06930a0457c..b1c4c156c3f 100644
--- a/jstests/concurrency/fsm_workloads/rename_capped_collection_dbname_droptarget.js
+++ b/jstests/concurrency/fsm_workloads/rename_capped_collection_dbname_droptarget.js
@@ -19,10 +19,7 @@ var $config = (function() {
var states = (function() {
- var options = {
- capped: true,
- size: 4096
- };
+ var options = {capped: true, size: 4096};
function uniqueDBName(prefix, tid, num) {
return prefix + tid + '_' + num;
@@ -80,17 +77,11 @@ var $config = (function() {
this.toDBName = temp;
}
- return {
- init: init,
- rename: rename
- };
+ return {init: init, rename: rename};
})();
- var transitions = {
- init: {rename: 1},
- rename: {rename: 1}
- };
+ var transitions = {init: {rename: 1}, rename: {rename: 1}};
function teardown(db, collName, cluster) {
var pattern = new RegExp('^' + db.getName() + this.prefix + '\\d+_\\d+$');
diff --git a/jstests/concurrency/fsm_workloads/rename_capped_collection_droptarget.js b/jstests/concurrency/fsm_workloads/rename_capped_collection_droptarget.js
index 11621a0318b..1d6dfd6faf4 100644
--- a/jstests/concurrency/fsm_workloads/rename_capped_collection_droptarget.js
+++ b/jstests/concurrency/fsm_workloads/rename_capped_collection_droptarget.js
@@ -19,10 +19,7 @@ var $config = (function() {
var states = (function() {
- var options = {
- capped: true,
- size: 4096
- };
+ var options = {capped: true, size: 4096};
function uniqueCollectionName(prefix, tid, num) {
return prefix + tid + '_' + num;
@@ -72,17 +69,11 @@ var $config = (function() {
this.toCollName = temp;
}
- return {
- init: init,
- rename: rename
- };
+ return {init: init, rename: rename};
})();
- var transitions = {
- init: {rename: 1},
- rename: {rename: 1}
- };
+ var transitions = {init: {rename: 1}, rename: {rename: 1}};
function teardown(db, collName, cluster) {
var pattern = new RegExp('^' + this.prefix + '\\d+_\\d+$');
diff --git a/jstests/concurrency/fsm_workloads/rename_collection_chain.js b/jstests/concurrency/fsm_workloads/rename_collection_chain.js
index 81c0313e217..eb6792f6df8 100644
--- a/jstests/concurrency/fsm_workloads/rename_collection_chain.js
+++ b/jstests/concurrency/fsm_workloads/rename_collection_chain.js
@@ -36,17 +36,11 @@ var $config = (function() {
this.fromCollName = toCollName;
}
- return {
- init: init,
- rename: rename
- };
+ return {init: init, rename: rename};
})();
- var transitions = {
- init: {rename: 1},
- rename: {rename: 1}
- };
+ var transitions = {init: {rename: 1}, rename: {rename: 1}};
function teardown(db, collName, cluster) {
var pattern = new RegExp('^' + this.prefix + '\\d+_\\d+$');
diff --git a/jstests/concurrency/fsm_workloads/rename_collection_dbname_chain.js b/jstests/concurrency/fsm_workloads/rename_collection_dbname_chain.js
index d11dfd19d22..bf76caa5014 100644
--- a/jstests/concurrency/fsm_workloads/rename_collection_dbname_chain.js
+++ b/jstests/concurrency/fsm_workloads/rename_collection_dbname_chain.js
@@ -49,17 +49,11 @@ var $config = (function() {
this.fromDBName = toDBName;
}
- return {
- init: init,
- rename: rename
- };
+ return {init: init, rename: rename};
})();
- var transitions = {
- init: {rename: 1},
- rename: {rename: 1}
- };
+ var transitions = {init: {rename: 1}, rename: {rename: 1}};
function teardown(db, collName, cluster) {
var pattern = new RegExp('^' + db.getName() + this.prefix + '\\d+_\\d+$');
diff --git a/jstests/concurrency/fsm_workloads/rename_collection_dbname_droptarget.js b/jstests/concurrency/fsm_workloads/rename_collection_dbname_droptarget.js
index 453d5a27379..7f23ecbc2d7 100644
--- a/jstests/concurrency/fsm_workloads/rename_collection_dbname_droptarget.js
+++ b/jstests/concurrency/fsm_workloads/rename_collection_dbname_droptarget.js
@@ -72,17 +72,11 @@ var $config = (function() {
this.toDBName = temp;
}
- return {
- init: init,
- rename: rename
- };
+ return {init: init, rename: rename};
})();
- var transitions = {
- init: {rename: 1},
- rename: {rename: 1}
- };
+ var transitions = {init: {rename: 1}, rename: {rename: 1}};
function teardown(db, collName, cluster) {
var pattern = new RegExp('^' + db.getName() + this.prefix + '\\d+_\\d+$');
diff --git a/jstests/concurrency/fsm_workloads/rename_collection_droptarget.js b/jstests/concurrency/fsm_workloads/rename_collection_droptarget.js
index bb2651258a5..592654ed3b1 100644
--- a/jstests/concurrency/fsm_workloads/rename_collection_droptarget.js
+++ b/jstests/concurrency/fsm_workloads/rename_collection_droptarget.js
@@ -64,17 +64,11 @@ var $config = (function() {
this.toCollName = temp;
}
- return {
- init: init,
- rename: rename
- };
+ return {init: init, rename: rename};
})();
- var transitions = {
- init: {rename: 1},
- rename: {rename: 1}
- };
+ var transitions = {init: {rename: 1}, rename: {rename: 1}};
function teardown(db, collName, cluster) {
var pattern = new RegExp('^' + this.prefix + '\\d+_\\d+$');
diff --git a/jstests/concurrency/fsm_workloads/server_status.js b/jstests/concurrency/fsm_workloads/server_status.js
index dbde1420b99..fa3c8cbbeef 100644
--- a/jstests/concurrency/fsm_workloads/server_status.js
+++ b/jstests/concurrency/fsm_workloads/server_status.js
@@ -9,23 +9,15 @@ var $config = (function() {
var states = {
status: function status(db, collName) {
- var opts = {
- opcounterRepl: 1,
- oplog: 1,
- rangeDeleter: 1,
- repl: 1,
- security: 1,
- tcmalloc: 1
- };
+ var opts =
+ {opcounterRepl: 1, oplog: 1, rangeDeleter: 1, repl: 1, security: 1, tcmalloc: 1};
var res = db.serverStatus();
assertAlways.commandWorked(res);
assertAlways(res.hasOwnProperty('version'));
}
};
- var transitions = {
- status: {status: 1}
- };
+ var transitions = {status: {status: 1}};
return {
threadCount: 10,
diff --git a/jstests/concurrency/fsm_workloads/touch_base.js b/jstests/concurrency/fsm_workloads/touch_base.js
index df419e17db7..d69b7c94b2e 100644
--- a/jstests/concurrency/fsm_workloads/touch_base.js
+++ b/jstests/concurrency/fsm_workloads/touch_base.js
@@ -12,52 +12,42 @@ load('jstests/concurrency/fsm_workloads/indexed_insert_where.js'); // for $conf
// For isMongod, isMMAPv1, and isEphemeral.
load('jstests/concurrency/fsm_workload_helpers/server_types.js');
-var $config =
- extendWorkload($config,
- function($config, $super) {
- $config.data.generateDocumentToInsert = function generateDocumentToInsert() {
- return {
- tid: this.tid,
- x: Random.randInt(10)
- };
- };
-
- $config.data.generateTouchCmdObj = function generateTouchCmdObj(collName) {
- return {
- touch: collName,
- data: true,
- index: true
- };
- };
-
- $config.states.touch = function touch(db, collName) {
- var res = db.runCommand(this.generateTouchCmdObj(collName));
- if (isMongod(db) && (isMMAPv1(db) || isEphemeral(db))) {
- assertAlways.commandWorked(res);
- } else {
- // SERVER-16850 and SERVER-16797
- assertAlways.commandFailed(res);
- }
- };
-
- $config.states.query = function query(db, collName) {
- var count = db[collName].find({tid: this.tid}).itcount();
- assertWhenOwnColl.eq(
- count,
- this.insertedDocuments,
- 'collection scan should return the number of documents this thread' +
- ' inserted');
- };
-
- $config.transitions = {
- insert: {insert: 0.2, touch: 0.4, query: 0.4},
- touch: {insert: 0.4, touch: 0.2, query: 0.4},
- query: {insert: 0.4, touch: 0.4, query: 0.2}
- };
-
- $config.setup = function setup(db, collName, cluster) {
- assertAlways.commandWorked(db[collName].ensureIndex({x: 1}));
- };
-
- return $config;
- });
+var $config = extendWorkload($config, function($config, $super) {
+ $config.data.generateDocumentToInsert = function generateDocumentToInsert() {
+ return {tid: this.tid, x: Random.randInt(10)};
+ };
+
+ $config.data.generateTouchCmdObj = function generateTouchCmdObj(collName) {
+ return {touch: collName, data: true, index: true};
+ };
+
+ $config.states.touch = function touch(db, collName) {
+ var res = db.runCommand(this.generateTouchCmdObj(collName));
+ if (isMongod(db) && (isMMAPv1(db) || isEphemeral(db))) {
+ assertAlways.commandWorked(res);
+ } else {
+ // SERVER-16850 and SERVER-16797
+ assertAlways.commandFailed(res);
+ }
+ };
+
+ $config.states.query = function query(db, collName) {
+ var count = db[collName].find({tid: this.tid}).itcount();
+ assertWhenOwnColl.eq(count,
+ this.insertedDocuments,
+ 'collection scan should return the number of documents this thread' +
+ ' inserted');
+ };
+
+ $config.transitions = {
+ insert: {insert: 0.2, touch: 0.4, query: 0.4},
+ touch: {insert: 0.4, touch: 0.2, query: 0.4},
+ query: {insert: 0.4, touch: 0.4, query: 0.2}
+ };
+
+ $config.setup = function setup(db, collName, cluster) {
+ assertAlways.commandWorked(db[collName].ensureIndex({x: 1}));
+ };
+
+ return $config;
+});
diff --git a/jstests/concurrency/fsm_workloads/touch_data.js b/jstests/concurrency/fsm_workloads/touch_data.js
index dc3b7cecef0..11d2e53cdb6 100644
--- a/jstests/concurrency/fsm_workloads/touch_data.js
+++ b/jstests/concurrency/fsm_workloads/touch_data.js
@@ -10,16 +10,10 @@
load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
load('jstests/concurrency/fsm_workloads/touch_base.js'); // for $config
-var $config =
- extendWorkload($config,
- function($config, $super) {
- $config.data.generateTouchCmdObj = function generateTouchCmdObj(collName) {
- return {
- touch: collName,
- data: true,
- index: false
- };
- };
+var $config = extendWorkload($config, function($config, $super) {
+ $config.data.generateTouchCmdObj = function generateTouchCmdObj(collName) {
+ return {touch: collName, data: true, index: false};
+ };
- return $config;
- });
+ return $config;
+});
diff --git a/jstests/concurrency/fsm_workloads/touch_index.js b/jstests/concurrency/fsm_workloads/touch_index.js
index cc0b6fcf48d..b4e47835073 100644
--- a/jstests/concurrency/fsm_workloads/touch_index.js
+++ b/jstests/concurrency/fsm_workloads/touch_index.js
@@ -10,16 +10,10 @@
load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
load('jstests/concurrency/fsm_workloads/touch_base.js'); // for $config
-var $config =
- extendWorkload($config,
- function($config, $super) {
- $config.data.generateTouchCmdObj = function generateTouchCmdObj(collName) {
- return {
- touch: collName,
- data: false,
- index: true
- };
- };
+var $config = extendWorkload($config, function($config, $super) {
+ $config.data.generateTouchCmdObj = function generateTouchCmdObj(collName) {
+ return {touch: collName, data: false, index: true};
+ };
- return $config;
- });
+ return $config;
+});
diff --git a/jstests/concurrency/fsm_workloads/touch_no_data_no_index.js b/jstests/concurrency/fsm_workloads/touch_no_data_no_index.js
index 25ce50fc5ac..14c3e81993d 100644
--- a/jstests/concurrency/fsm_workloads/touch_no_data_no_index.js
+++ b/jstests/concurrency/fsm_workloads/touch_no_data_no_index.js
@@ -10,22 +10,16 @@
load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
load('jstests/concurrency/fsm_workloads/touch_base.js'); // for $config
-var $config =
- extendWorkload($config,
- function($config, $super) {
- $config.data.generateTouchCmdObj = function generateTouchCmdObj(collName) {
- return {
- touch: collName,
- data: false,
- index: false
- };
- };
+var $config = extendWorkload($config, function($config, $super) {
+ $config.data.generateTouchCmdObj = function generateTouchCmdObj(collName) {
+ return {touch: collName, data: false, index: false};
+ };
- $config.states.touch = function touch(db, collName) {
- var res = db.runCommand(this.generateTouchCmdObj(collName));
- // The command always fails because "index" and "data" are both false
- assertAlways.commandFailed(res);
- };
+ $config.states.touch = function touch(db, collName) {
+ var res = db.runCommand(this.generateTouchCmdObj(collName));
+ // The command always fails because "index" and "data" are both false
+ assertAlways.commandFailed(res);
+ };
- return $config;
- });
+ return $config;
+});
diff --git a/jstests/concurrency/fsm_workloads/update_and_bulk_insert.js b/jstests/concurrency/fsm_workloads/update_and_bulk_insert.js
index 5d59ff2b0ef..dde8b4b7093 100644
--- a/jstests/concurrency/fsm_workloads/update_and_bulk_insert.js
+++ b/jstests/concurrency/fsm_workloads/update_and_bulk_insert.js
@@ -31,10 +31,7 @@ var $config = (function() {
}
};
- var transitions = {
- insert: {insert: 0.2, update: 0.8},
- update: {insert: 0.2, update: 0.8}
- };
+ var transitions = {insert: {insert: 0.2, update: 0.8}, update: {insert: 0.2, update: 0.8}};
return {
threadCount: 5,
diff --git a/jstests/concurrency/fsm_workloads/update_array.js b/jstests/concurrency/fsm_workloads/update_array.js
index 192626c2430..2020ee3c60a 100644
--- a/jstests/concurrency/fsm_workloads/update_array.js
+++ b/jstests/concurrency/fsm_workloads/update_array.js
@@ -110,10 +110,7 @@ var $config = (function() {
})();
- var transitions = {
- push: {push: 0.8, pull: 0.2},
- pull: {push: 0.8, pull: 0.2}
- };
+ var transitions = {push: {push: 0.8, pull: 0.2}, pull: {push: 0.8, pull: 0.2}};
function setup(db, collName, cluster) {
// index on 'arr', the field being updated
diff --git a/jstests/concurrency/fsm_workloads/update_check_index.js b/jstests/concurrency/fsm_workloads/update_check_index.js
index 3e099d6b2a5..bd82e39c471 100644
--- a/jstests/concurrency/fsm_workloads/update_check_index.js
+++ b/jstests/concurrency/fsm_workloads/update_check_index.js
@@ -15,14 +15,10 @@ var $config = (function() {
db[collName].update({a: 1, b: 1}, {$set: {c: newC}}, {multi: true});
}
- return {
- multiUpdate: multiUpdate
- };
+ return {multiUpdate: multiUpdate};
})();
- var transitions = {
- multiUpdate: {multiUpdate: 1.0}
- };
+ var transitions = {multiUpdate: {multiUpdate: 1.0}};
function setup(db, collName, cluster) {
assertAlways.commandWorked(db[collName].ensureIndex({a: 1}));
diff --git a/jstests/concurrency/fsm_workloads/update_inc.js b/jstests/concurrency/fsm_workloads/update_inc.js
index f16d841d3bb..a8328c26703 100644
--- a/jstests/concurrency/fsm_workloads/update_inc.js
+++ b/jstests/concurrency/fsm_workloads/update_inc.js
@@ -27,9 +27,7 @@ var $config = (function() {
},
update: function update(db, collName) {
- var updateDoc = {
- $inc: {}
- };
+ var updateDoc = {$inc: {}};
updateDoc.$inc[this.fieldName] = 1;
var res = db[collName].update({_id: this.id}, updateDoc);
@@ -74,16 +72,10 @@ var $config = (function() {
}
};
- var transitions = {
- init: {update: 1},
- update: {find: 1},
- find: {update: 1}
- };
+ var transitions = {init: {update: 1}, update: {find: 1}, find: {update: 1}};
function setup(db, collName, cluster) {
- var doc = {
- _id: this.id
- };
+ var doc = {_id: this.id};
// Pre-populate the fields we need to avoid size change for capped collections.
for (var i = 0; i < this.threadCount; ++i) {
diff --git a/jstests/concurrency/fsm_workloads/update_multifield.js b/jstests/concurrency/fsm_workloads/update_multifield.js
index 1f62472f754..7428c289c98 100644
--- a/jstests/concurrency/fsm_workloads/update_multifield.js
+++ b/jstests/concurrency/fsm_workloads/update_multifield.js
@@ -35,15 +35,9 @@ var $config = (function() {
var push = Random.rand() > 0.2;
var updateDoc = {};
- updateDoc[set ? '$set' : '$unset'] = {
- x: x
- };
- updateDoc[push ? '$push' : '$pull'] = {
- y: y
- };
- updateDoc.$inc = {
- z: z
- };
+ updateDoc[set ? '$set' : '$unset'] = {x: x};
+ updateDoc[push ? '$push' : '$pull'] = {y: y};
+ updateDoc.$inc = {z: z};
return updateDoc;
}
@@ -61,9 +55,7 @@ var $config = (function() {
}
};
- var transitions = {
- update: {update: 1}
- };
+ var transitions = {update: {update: 1}};
function setup(db, collName, cluster) {
assertAlways.commandWorked(db[collName].ensureIndex({x: 1}));
diff --git a/jstests/concurrency/fsm_workloads/update_multifield_isolated_multiupdate.js b/jstests/concurrency/fsm_workloads/update_multifield_isolated_multiupdate.js
index 3dd8b584d98..8d95deac710 100644
--- a/jstests/concurrency/fsm_workloads/update_multifield_isolated_multiupdate.js
+++ b/jstests/concurrency/fsm_workloads/update_multifield_isolated_multiupdate.js
@@ -9,32 +9,30 @@
load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
load('jstests/concurrency/fsm_workloads/update_multifield.js'); // for $config
-var $config =
- extendWorkload($config,
- function($config, $super) {
+var $config = extendWorkload($config, function($config, $super) {
- $config.data.multi = true;
- $config.data.isolated = true;
+ $config.data.multi = true;
+ $config.data.isolated = true;
- $config.data.assertResult = function assertResult(res, db, collName, query) {
- assertAlways.eq(0, res.nUpserted, tojson(res));
- // documents can't move during an update, because we use $isolated
- assertWhenOwnColl.eq(this.numDocs, res.nMatched, tojson(res));
- if (db.getMongo().writeMode() === 'commands') {
- assertWhenOwnColl.eq(this.numDocs, res.nModified, tojson(res));
- }
+ $config.data.assertResult = function assertResult(res, db, collName, query) {
+ assertAlways.eq(0, res.nUpserted, tojson(res));
+ // documents can't move during an update, because we use $isolated
+ assertWhenOwnColl.eq(this.numDocs, res.nMatched, tojson(res));
+ if (db.getMongo().writeMode() === 'commands') {
+ assertWhenOwnColl.eq(this.numDocs, res.nModified, tojson(res));
+ }
- // every thread only increments z, and z starts at 0,
- // so z should always be strictly greater than 0 after an update,
- // even if other threads modify the doc.
- var docs = db[collName].find().toArray();
- assertWhenOwnColl(function() {
- docs.forEach(function(doc) {
- assertWhenOwnColl.eq('number', typeof doc.z);
- assertWhenOwnColl.gt(doc.z, 0);
- });
- });
- };
+ // every thread only increments z, and z starts at 0,
+ // so z should always be strictly greater than 0 after an update,
+ // even if other threads modify the doc.
+ var docs = db[collName].find().toArray();
+ assertWhenOwnColl(function() {
+ docs.forEach(function(doc) {
+ assertWhenOwnColl.eq('number', typeof doc.z);
+ assertWhenOwnColl.gt(doc.z, 0);
+ });
+ });
+ };
- return $config;
- });
+ return $config;
+});
diff --git a/jstests/concurrency/fsm_workloads/update_multifield_multiupdate.js b/jstests/concurrency/fsm_workloads/update_multifield_multiupdate.js
index 2cc975085ca..2ca02e2f38e 100644
--- a/jstests/concurrency/fsm_workloads/update_multifield_multiupdate.js
+++ b/jstests/concurrency/fsm_workloads/update_multifield_multiupdate.js
@@ -12,40 +12,38 @@ load('jstests/concurrency/fsm_workloads/update_multifield.js'); // for $config
// For isMongod and recordIdCanChangeOnUpdate.
load('jstests/concurrency/fsm_workload_helpers/server_types.js');
-var $config =
- extendWorkload($config,
- function($config, $super) {
-
- $config.data.multi = true;
-
- $config.data.assertResult = function(res, db, collName, query) {
- assertAlways.eq(0, res.nUpserted, tojson(res));
-
- if (isMongod(db)) {
- if (!recordIdCanChangeOnUpdate(db)) {
- // If a document's RecordId cannot change, then we should not
- // have updated any document more than once, since the update
- // stage internally de-duplicates based on RecordId.
- assertWhenOwnColl.lte(this.numDocs, res.nMatched, tojson(res));
- } else {
- // If RecordIds can change, then there are no guarantees on how
- // many documents were updated.
- assertAlways.gte(res.nMatched, 0, tojson(res));
- }
- } else { // mongos
- assertAlways.gte(res.nMatched, 0, tojson(res));
- }
-
- if (db.getMongo().writeMode() === 'commands') {
- assertWhenOwnColl.eq(res.nMatched, res.nModified, tojson(res));
- }
-
- var docs = db[collName].find().toArray();
- docs.forEach(function(doc) {
- assertWhenOwnColl.eq('number', typeof doc.z);
- assertWhenOwnColl.gt(doc.z, 0);
- });
- };
-
- return $config;
- });
+var $config = extendWorkload($config, function($config, $super) {
+
+ $config.data.multi = true;
+
+ $config.data.assertResult = function(res, db, collName, query) {
+ assertAlways.eq(0, res.nUpserted, tojson(res));
+
+ if (isMongod(db)) {
+ if (!recordIdCanChangeOnUpdate(db)) {
+ // If a document's RecordId cannot change, then we should not
+ // have updated any document more than once, since the update
+ // stage internally de-duplicates based on RecordId.
+ assertWhenOwnColl.lte(this.numDocs, res.nMatched, tojson(res));
+ } else {
+ // If RecordIds can change, then there are no guarantees on how
+ // many documents were updated.
+ assertAlways.gte(res.nMatched, 0, tojson(res));
+ }
+ } else { // mongos
+ assertAlways.gte(res.nMatched, 0, tojson(res));
+ }
+
+ if (db.getMongo().writeMode() === 'commands') {
+ assertWhenOwnColl.eq(res.nMatched, res.nModified, tojson(res));
+ }
+
+ var docs = db[collName].find().toArray();
+ docs.forEach(function(doc) {
+ assertWhenOwnColl.eq('number', typeof doc.z);
+ assertWhenOwnColl.gt(doc.z, 0);
+ });
+ };
+
+ return $config;
+});
diff --git a/jstests/concurrency/fsm_workloads/update_ordered_bulk_inc.js b/jstests/concurrency/fsm_workloads/update_ordered_bulk_inc.js
index 0d6b4651a5d..45a079091e0 100644
--- a/jstests/concurrency/fsm_workloads/update_ordered_bulk_inc.js
+++ b/jstests/concurrency/fsm_workloads/update_ordered_bulk_inc.js
@@ -22,9 +22,7 @@ var $config = (function() {
},
update: function update(db, collName) {
- var updateDoc = {
- $inc: {}
- };
+ var updateDoc = {$inc: {}};
updateDoc.$inc[this.fieldName] = 1;
var bulk = db[collName].initializeOrderedBulkOp();
@@ -74,11 +72,7 @@ var $config = (function() {
}
};
- var transitions = {
- init: {update: 1},
- update: {find: 1},
- find: {update: 1}
- };
+ var transitions = {init: {update: 1}, update: {find: 1}, find: {update: 1}};
function setup(db, collName, cluster) {
this.count = 0;
diff --git a/jstests/concurrency/fsm_workloads/update_rename.js b/jstests/concurrency/fsm_workloads/update_rename.js
index b163b44f690..ca74ac5287d 100644
--- a/jstests/concurrency/fsm_workloads/update_rename.js
+++ b/jstests/concurrency/fsm_workloads/update_rename.js
@@ -20,15 +20,11 @@ var $config = (function() {
var to = choose(fieldNames.filter(function(n) {
return n !== from;
}));
- var updater = {
- $rename: {}
- };
+ var updater = {$rename: {}};
updater.$rename[from] = to;
var query = {};
- query[from] = {
- $exists: 1
- };
+ query[from] = {$exists: 1};
var res = db[collName].update(query, updater);
@@ -40,9 +36,7 @@ var $config = (function() {
}
};
- var transitions = {
- update: {update: 1}
- };
+ var transitions = {update: {update: 1}};
function setup(db, collName, cluster) {
// Create an index on all but one fieldName key to make it possible to test renames
diff --git a/jstests/concurrency/fsm_workloads/update_replace.js b/jstests/concurrency/fsm_workloads/update_replace.js
index f7edb05126d..2d04f38294c 100644
--- a/jstests/concurrency/fsm_workloads/update_replace.js
+++ b/jstests/concurrency/fsm_workloads/update_replace.js
@@ -56,9 +56,7 @@ var $config = (function() {
}
};
- var transitions = {
- update: {update: 1}
- };
+ var transitions = {update: {update: 1}};
function setup(db, collName, cluster) {
assertAlways.commandWorked(db[collName].ensureIndex({a: 1}));
diff --git a/jstests/concurrency/fsm_workloads/update_simple.js b/jstests/concurrency/fsm_workloads/update_simple.js
index 85f877e6090..cb4adf926c9 100644
--- a/jstests/concurrency/fsm_workloads/update_simple.js
+++ b/jstests/concurrency/fsm_workloads/update_simple.js
@@ -24,10 +24,7 @@ var $config = (function() {
}
};
- var transitions = {
- set: {set: 0.5, unset: 0.5},
- unset: {set: 0.5, unset: 0.5}
- };
+ var transitions = {set: {set: 0.5, unset: 0.5}, unset: {set: 0.5, unset: 0.5}};
function setup(db, collName, cluster) {
// index on 'value', the field being updated
@@ -83,13 +80,9 @@ var $config = (function() {
var value = Random.randInt(5);
var updater = {};
- updater[set ? '$set' : '$unset'] = {
- value: value
- };
+ updater[set ? '$set' : '$unset'] = {value: value};
- var query = {
- _id: docIndex
- };
+ var query = {_id: docIndex};
var res = this.doUpdate(db, collName, query, updater);
this.assertResult(db, res);
},
diff --git a/jstests/concurrency/fsm_workloads/update_simple_eval.js b/jstests/concurrency/fsm_workloads/update_simple_eval.js
index 988c2e44ab3..f8b9115e455 100644
--- a/jstests/concurrency/fsm_workloads/update_simple_eval.js
+++ b/jstests/concurrency/fsm_workloads/update_simple_eval.js
@@ -12,24 +12,22 @@
load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
load('jstests/concurrency/fsm_workloads/update_simple.js'); // for $config
-var $config =
- extendWorkload($config,
- function($config, $super) {
+var $config = extendWorkload($config, function($config, $super) {
- $config.data.doUpdate = function doUpdate(db, collName, query, updater) {
- var evalResult = db.runCommand({
- eval: function(f, collName, query, updater) {
- return tojson(f(db, collName, query, updater));
- },
- args: [$super.data.doUpdate, collName, query, updater],
- nolock: this.nolock
- });
- assertAlways.commandWorked(evalResult);
- var res = JSON.parse(evalResult.retval);
- return res;
- };
+ $config.data.doUpdate = function doUpdate(db, collName, query, updater) {
+ var evalResult = db.runCommand({
+ eval: function(f, collName, query, updater) {
+ return tojson(f(db, collName, query, updater));
+ },
+ args: [$super.data.doUpdate, collName, query, updater],
+ nolock: this.nolock
+ });
+ assertAlways.commandWorked(evalResult);
+ var res = JSON.parse(evalResult.retval);
+ return res;
+ };
- $config.data.nolock = false;
+ $config.data.nolock = false;
- return $config;
- });
+ return $config;
+});
diff --git a/jstests/concurrency/fsm_workloads/update_simple_eval_nolock.js b/jstests/concurrency/fsm_workloads/update_simple_eval_nolock.js
index 282e98a461d..c5f081088cd 100644
--- a/jstests/concurrency/fsm_workloads/update_simple_eval_nolock.js
+++ b/jstests/concurrency/fsm_workloads/update_simple_eval_nolock.js
@@ -8,10 +8,9 @@
load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
load('jstests/concurrency/fsm_workloads/update_simple_eval.js'); // for $config
-var $config = extendWorkload($config,
- function($config, $super) {
+var $config = extendWorkload($config, function($config, $super) {
- $config.data.nolock = true;
+ $config.data.nolock = true;
- return $config;
- });
+ return $config;
+});
diff --git a/jstests/concurrency/fsm_workloads/update_upsert_multi.js b/jstests/concurrency/fsm_workloads/update_upsert_multi.js
index 96d83cb5115..61e7f84f2e9 100644
--- a/jstests/concurrency/fsm_workloads/update_upsert_multi.js
+++ b/jstests/concurrency/fsm_workloads/update_upsert_multi.js
@@ -51,13 +51,10 @@ var $config = (function() {
// because docs with lower i are newer, so they have had fewer
// opportunities to have n incremented.)
var prevN = Infinity;
- db[collName]
- .find({tid: this.tid})
- .sort({i: 1})
- .forEach(function(doc) {
- assertWhenOwnColl.gte(prevN, doc.n);
- prevN = doc.n;
- });
+ db[collName].find({tid: this.tid}).sort({i: 1}).forEach(function(doc) {
+ assertWhenOwnColl.gte(prevN, doc.n);
+ prevN = doc.n;
+ });
}
};
diff --git a/jstests/concurrency/fsm_workloads/update_where.js b/jstests/concurrency/fsm_workloads/update_where.js
index d7ef045131f..614cbc86093 100644
--- a/jstests/concurrency/fsm_workloads/update_where.js
+++ b/jstests/concurrency/fsm_workloads/update_where.js
@@ -10,43 +10,38 @@
load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
load('jstests/concurrency/fsm_workloads/indexed_insert_where.js'); // for $config
-var $config = extendWorkload(
- $config,
- function($config, $super) {
- $config.data.randomBound = 10;
- $config.data.generateDocumentToInsert = function generateDocumentToInsert() {
- return {
- tid: this.tid,
- x: Random.randInt(this.randomBound)
- };
- };
-
- $config.states.update = function update(db, collName) {
- var res = db[collName].update(
- // Server-side JS does not support Random.randInt, so use Math.floor/random instead
- {
- $where: 'this.x === Math.floor(Math.random() * ' + this.randomBound + ') ' +
- '&& this.tid === ' + this.tid
- },
- {$set: {x: Random.randInt(this.randomBound)}},
- {multi: true});
- assertAlways.writeOK(res);
-
- if (db.getMongo().writeMode() === 'commands') {
- assertWhenOwnColl.gte(res.nModified, 0);
- assertWhenOwnColl.lte(res.nModified, this.insertedDocuments);
- }
- };
-
- $config.transitions = {
- insert: {insert: 0.2, update: 0.4, query: 0.4},
- update: {insert: 0.4, update: 0.2, query: 0.4},
- query: {insert: 0.4, update: 0.4, query: 0.2}
- };
-
- $config.setup = function setup(db, collName, cluster) {
- /* no-op to prevent index from being created */
- };
-
- return $config;
- });
+var $config = extendWorkload($config, function($config, $super) {
+ $config.data.randomBound = 10;
+ $config.data.generateDocumentToInsert = function generateDocumentToInsert() {
+ return {tid: this.tid, x: Random.randInt(this.randomBound)};
+ };
+
+ $config.states.update = function update(db, collName) {
+ var res = db[collName].update(
+ // Server-side JS does not support Random.randInt, so use Math.floor/random instead
+ {
+ $where: 'this.x === Math.floor(Math.random() * ' + this.randomBound + ') ' +
+ '&& this.tid === ' + this.tid
+ },
+ {$set: {x: Random.randInt(this.randomBound)}},
+ {multi: true});
+ assertAlways.writeOK(res);
+
+ if (db.getMongo().writeMode() === 'commands') {
+ assertWhenOwnColl.gte(res.nModified, 0);
+ assertWhenOwnColl.lte(res.nModified, this.insertedDocuments);
+ }
+ };
+
+ $config.transitions = {
+ insert: {insert: 0.2, update: 0.4, query: 0.4},
+ update: {insert: 0.4, update: 0.2, query: 0.4},
+ query: {insert: 0.4, update: 0.4, query: 0.2}
+ };
+
+ $config.setup = function setup(db, collName, cluster) {
+ /* no-op to prevent index from being created */
+ };
+
+ return $config;
+});
diff --git a/jstests/concurrency/fsm_workloads/upsert_where.js b/jstests/concurrency/fsm_workloads/upsert_where.js
index 72ff542c572..7fa00727725 100644
--- a/jstests/concurrency/fsm_workloads/upsert_where.js
+++ b/jstests/concurrency/fsm_workloads/upsert_where.js
@@ -9,39 +9,34 @@
load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
load('jstests/concurrency/fsm_workloads/indexed_insert_where.js'); // for $config
-var $config = extendWorkload(
- $config,
- function($config, $super) {
- $config.data.randomBound = 10;
- $config.data.generateDocumentToInsert = function generateDocumentToInsert() {
- return {
- tid: this.tid,
- x: Random.randInt(this.randomBound)
- };
- };
+var $config = extendWorkload($config, function($config, $super) {
+ $config.data.randomBound = 10;
+ $config.data.generateDocumentToInsert = function generateDocumentToInsert() {
+ return {tid: this.tid, x: Random.randInt(this.randomBound)};
+ };
- $config.states.upsert = function upsert(db, collName) {
- var res = db[collName].update(
- {$where: 'this.x === ' + this.randomBound + ' && this.tid === ' + this.tid},
- {$set: {x: Random.randInt(this.randomBound), tid: this.tid}},
- {upsert: true});
- assertWhenOwnColl.eq(res.nUpserted, 1);
- var upsertedDocument = db[collName].findOne({_id: res.getUpsertedId()._id});
- assertWhenOwnColl(function() {
- assertWhenOwnColl.eq(upsertedDocument.tid, this.tid);
- }.bind(this));
- this.insertedDocuments += res.nUpserted;
- };
+ $config.states.upsert = function upsert(db, collName) {
+ var res = db[collName].update(
+ {$where: 'this.x === ' + this.randomBound + ' && this.tid === ' + this.tid},
+ {$set: {x: Random.randInt(this.randomBound), tid: this.tid}},
+ {upsert: true});
+ assertWhenOwnColl.eq(res.nUpserted, 1);
+ var upsertedDocument = db[collName].findOne({_id: res.getUpsertedId()._id});
+ assertWhenOwnColl(function() {
+ assertWhenOwnColl.eq(upsertedDocument.tid, this.tid);
+ }.bind(this));
+ this.insertedDocuments += res.nUpserted;
+ };
- $config.transitions = {
- insert: {insert: 0.2, upsert: 0.4, query: 0.4},
- upsert: {insert: 0.4, upsert: 0.2, query: 0.4},
- query: {insert: 0.4, upsert: 0.4, query: 0.2}
- };
+ $config.transitions = {
+ insert: {insert: 0.2, upsert: 0.4, query: 0.4},
+ upsert: {insert: 0.4, upsert: 0.2, query: 0.4},
+ query: {insert: 0.4, upsert: 0.4, query: 0.2}
+ };
- $config.setup = function setup(db, collName, cluster) {
- /* no-op to prevent index from being created */
- };
+ $config.setup = function setup(db, collName, cluster) {
+ /* no-op to prevent index from being created */
+ };
- return $config;
- });
+ return $config;
+});
diff --git a/jstests/concurrency/fsm_workloads/yield.js b/jstests/concurrency/fsm_workloads/yield.js
index 0ef6aa9b1a0..adbe1076ee6 100644
--- a/jstests/concurrency/fsm_workloads/yield.js
+++ b/jstests/concurrency/fsm_workloads/yield.js
@@ -44,9 +44,7 @@ var $config = (function() {
*/
genUpdateDoc: function genUpdateDoc() {
var newVal = Random.randInt(this.nDocs);
- return {
- $set: {a: newVal}
- };
+ return {$set: {a: newVal}};
}
};
diff --git a/jstests/concurrency/fsm_workloads/yield_and_hashed.js b/jstests/concurrency/fsm_workloads/yield_and_hashed.js
index 48a14d706e2..f9267b9c020 100644
--- a/jstests/concurrency/fsm_workloads/yield_and_hashed.js
+++ b/jstests/concurrency/fsm_workloads/yield_and_hashed.js
@@ -9,67 +9,63 @@
load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
load('jstests/concurrency/fsm_workloads/yield_rooted_or.js'); // for $config
-var $config = extendWorkload(
- $config,
- function($config, $super) {
+var $config = extendWorkload($config, function($config, $super) {
- /*
- * Issue a query that will use the AND_HASH stage. This is a little tricky, so use
- * stagedebug to force it to happen. Unfortunately this means it can't be batched.
- */
- $config.states.query = function andHash(db, collName) {
- var nMatches = 100;
- assertAlways.lte(nMatches, this.nDocs);
- // Construct the query plan: two ixscans under an andHashed.
- // Scan c <= nMatches
- var ixscan1 = {
- ixscan: {
- args: {
- name: 'stages_and_hashed',
- keyPattern: {c: 1},
- startKey: {'': nMatches},
- endKey: {},
- endKeyInclusive: true,
- direction: -1
- }
+ /*
+ * Issue a query that will use the AND_HASH stage. This is a little tricky, so use
+ * stagedebug to force it to happen. Unfortunately this means it can't be batched.
+ */
+ $config.states.query = function andHash(db, collName) {
+ var nMatches = 100;
+ assertAlways.lte(nMatches, this.nDocs);
+ // Construct the query plan: two ixscans under an andHashed.
+ // Scan c <= nMatches
+ var ixscan1 = {
+ ixscan: {
+ args: {
+ name: 'stages_and_hashed',
+ keyPattern: {c: 1},
+ startKey: {'': nMatches},
+ endKey: {},
+ endKeyInclusive: true,
+ direction: -1
}
- };
+ }
+ };
- // Scan d >= this.nDocs - nMatches
- var ixscan2 = {
- ixscan: {
- args: {
- name: 'stages_and_hashed',
- keyPattern: {d: 1},
- startKey: {'': this.nDocs - nMatches},
- endKey: {},
- endKeyInclusive: true,
- direction: 1
- }
+ // Scan d >= this.nDocs - nMatches
+ var ixscan2 = {
+ ixscan: {
+ args: {
+ name: 'stages_and_hashed',
+ keyPattern: {d: 1},
+ startKey: {'': this.nDocs - nMatches},
+ endKey: {},
+ endKeyInclusive: true,
+ direction: 1
}
- };
-
- var andix1ix2 = {
- andHash: {args: {nodes: [ixscan1, ixscan2]}}
- };
+ }
+ };
- // On non-MMAP storage engines, index intersection plans will always re-filter
- // the docs to make sure we don't get any spurious matches.
- var fetch = {
- fetch: {
- filter: {c: {$lte: nMatches}, d: {$gte: (this.nDocs - nMatches)}},
- args: {node: andix1ix2}
- }
- };
+ var andix1ix2 = {andHash: {args: {nodes: [ixscan1, ixscan2]}}};
- var res = db.runCommand({stageDebug: {plan: fetch, collection: collName}});
- assertAlways.commandWorked(res);
- for (var i = 0; i < res.results.length; i++) {
- var result = res.results[i];
- assertAlways.lte(result.c, nMatches);
- assertAlways.gte(result.d, this.nDocs - nMatches);
+ // On non-MMAP storage engines, index intersection plans will always re-filter
+ // the docs to make sure we don't get any spurious matches.
+ var fetch = {
+ fetch: {
+ filter: {c: {$lte: nMatches}, d: {$gte: (this.nDocs - nMatches)}},
+ args: {node: andix1ix2}
}
};
- return $config;
- });
+ var res = db.runCommand({stageDebug: {plan: fetch, collection: collName}});
+ assertAlways.commandWorked(res);
+ for (var i = 0; i < res.results.length; i++) {
+ var result = res.results[i];
+ assertAlways.lte(result.c, nMatches);
+ assertAlways.gte(result.d, this.nDocs - nMatches);
+ }
+ };
+
+ return $config;
+});
diff --git a/jstests/concurrency/fsm_workloads/yield_and_sorted.js b/jstests/concurrency/fsm_workloads/yield_and_sorted.js
index 2bea4226ba0..aed15988b10 100644
--- a/jstests/concurrency/fsm_workloads/yield_and_sorted.js
+++ b/jstests/concurrency/fsm_workloads/yield_and_sorted.js
@@ -9,62 +9,58 @@
load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
load('jstests/concurrency/fsm_workloads/yield_rooted_or.js'); // for $config
-var $config = extendWorkload(
- $config,
- function($config, $super) {
+var $config = extendWorkload($config, function($config, $super) {
- /*
- * Issue a query that will use the AND_SORTED stage. This is a little tricky, so use
- * stagedebug to force it to happen. Unfortunately this means it can't be batched.
- */
- $config.states.query = function andSorted(db, collName) {
- // Not very many docs returned in this, so loop to increase chances of yielding in the
- // middle.
- for (var i = 0; i < 100; i++) {
- // Construct the query plan: two ixscans under an andSorted.
- // Scan a == 0
- var ixscan1 = {
- ixscan: {
- args: {
- name: 'stages_and_sorted',
- keyPattern: {c: 1},
- startKey: {'': 0},
- endKey: {'': 0},
- endKeyInclusive: false,
- direction: 1
- }
+ /*
+ * Issue a query that will use the AND_SORTED stage. This is a little tricky, so use
+ * stagedebug to force it to happen. Unfortunately this means it can't be batched.
+ */
+ $config.states.query = function andSorted(db, collName) {
+ // Not very many docs returned in this, so loop to increase chances of yielding in the
+ // middle.
+ for (var i = 0; i < 100; i++) {
+ // Construct the query plan: two ixscans under an andSorted.
+ // Scan a == 0
+ var ixscan1 = {
+ ixscan: {
+ args: {
+ name: 'stages_and_sorted',
+ keyPattern: {c: 1},
+ startKey: {'': 0},
+ endKey: {'': 0},
+ endKeyInclusive: false,
+ direction: 1
}
- };
- // Scan b == this.nDocs
- var ixscan2 = {
- ixscan: {
- args: {
- name: 'stages_and_sorted',
- keyPattern: {d: 1},
- startKey: {'': this.nDocs},
- endKey: {'': this.nDocs},
- endKeyInclusive: false,
- direction: -1
- }
+ }
+ };
+ // Scan b == this.nDocs
+ var ixscan2 = {
+ ixscan: {
+ args: {
+ name: 'stages_and_sorted',
+ keyPattern: {d: 1},
+ startKey: {'': this.nDocs},
+ endKey: {'': this.nDocs},
+ endKeyInclusive: false,
+ direction: -1
}
- };
-
- // Intersect the two
- var andix1ix2 = {
- andSorted: {args: {nodes: [ixscan1, ixscan2]}}
- };
- var res = db.runCommand({stageDebug: {collection: collName, plan: andix1ix2}});
- assertAlways.commandWorked(res);
- for (var j = 0; j < res.results.length; j++) {
- var result = res.results[j];
- // These should always be true, since they're just verifying that the results
- // match
- // the query predicate.
- assertAlways.eq(result.c, 0);
- assertAlways.eq(result.d, this.nDocs);
}
+ };
+
+ // Intersect the two
+ var andix1ix2 = {andSorted: {args: {nodes: [ixscan1, ixscan2]}}};
+ var res = db.runCommand({stageDebug: {collection: collName, plan: andix1ix2}});
+ assertAlways.commandWorked(res);
+ for (var j = 0; j < res.results.length; j++) {
+ var result = res.results[j];
+ // These should always be true, since they're just verifying that the results
+ // match
+ // the query predicate.
+ assertAlways.eq(result.c, 0);
+ assertAlways.eq(result.d, this.nDocs);
}
- };
+ }
+ };
- return $config;
- });
+ return $config;
+});
diff --git a/jstests/concurrency/fsm_workloads/yield_fetch.js b/jstests/concurrency/fsm_workloads/yield_fetch.js
index b3f47a5fe5d..e802635af73 100644
--- a/jstests/concurrency/fsm_workloads/yield_fetch.js
+++ b/jstests/concurrency/fsm_workloads/yield_fetch.js
@@ -9,24 +9,22 @@
load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
load('jstests/concurrency/fsm_workloads/yield_rooted_or.js'); // for $config
-var $config = extendWorkload(
- $config,
- function($config, $super) {
+var $config = extendWorkload($config, function($config, $super) {
- /*
- * Issue a query that will use the FETCH stage.
- */
- $config.states.query = function fetch(db, collName) {
- var nMatches = 100;
+ /*
+ * Issue a query that will use the FETCH stage.
+ */
+ $config.states.query = function fetch(db, collName) {
+ var nMatches = 100;
- var cursor = db[collName].find({c: {$lt: nMatches}}).batchSize(this.batchSize);
+ var cursor = db[collName].find({c: {$lt: nMatches}}).batchSize(this.batchSize);
- var verifier = function fetchVerifier(doc, prevDoc) {
- return doc.c < nMatches;
- };
-
- this.advanceCursor(cursor, verifier);
+ var verifier = function fetchVerifier(doc, prevDoc) {
+ return doc.c < nMatches;
};
- return $config;
- });
+ this.advanceCursor(cursor, verifier);
+ };
+
+ return $config;
+});
diff --git a/jstests/concurrency/fsm_workloads/yield_geo_near.js b/jstests/concurrency/fsm_workloads/yield_geo_near.js
index fd13bd31014..3ed79835906 100644
--- a/jstests/concurrency/fsm_workloads/yield_geo_near.js
+++ b/jstests/concurrency/fsm_workloads/yield_geo_near.js
@@ -8,80 +8,69 @@
load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
load('jstests/concurrency/fsm_workloads/yield.js'); // for $config
-var $config = extendWorkload(
- $config,
- function($config, $super) {
+var $config = extendWorkload($config, function($config, $super) {
- /*
- * Use geo $near query to find points near the origin. Note this should be done using the
- * geoNear command, rather than a $near query, as the $near query doesn't work in a sharded
- * environment. Unfortunately this means we cannot batch the request.
- */
- $config.states.query = function geoNear(db, collName) {
- // This distance gets about 80 docs around the origin. There is one doc inserted
- // every 1m^2 and the area scanned by a 5m radius is PI*(5m)^2 ~ 79.
- var maxDistance = 5;
+ /*
+ * Use geo $near query to find points near the origin. Note this should be done using the
+ * geoNear command, rather than a $near query, as the $near query doesn't work in a sharded
+ * environment. Unfortunately this means we cannot batch the request.
+ */
+ $config.states.query = function geoNear(db, collName) {
+ // This distance gets about 80 docs around the origin. There is one doc inserted
+ // every 1m^2 and the area scanned by a 5m radius is PI*(5m)^2 ~ 79.
+ var maxDistance = 5;
- var res = db.runCommand({geoNear: collName, near: [0, 0], maxDistance: maxDistance});
- assertWhenOwnColl.commandWorked(res); // Could fail if more than 1 2d index.
- assertWhenOwnColl(function verifyResults() {
- var results = res.results;
- var prevDoc = {
- dis: 0
- }; // distance should never be less than 0
- for (var i = 0; i < results.length; i++) {
- var doc = results[i];
- assertAlways.lte(NumberInt(doc.dis), maxDistance); // satisfies query
- assertAlways.lte(prevDoc.dis, doc.dis); // returned in the correct order
- prevDoc = doc;
- }
- });
- };
+ var res = db.runCommand({geoNear: collName, near: [0, 0], maxDistance: maxDistance});
+ assertWhenOwnColl.commandWorked(res); // Could fail if more than 1 2d index.
+ assertWhenOwnColl(function verifyResults() {
+ var results = res.results;
+ var prevDoc = {dis: 0}; // distance should never be less than 0
+ for (var i = 0; i < results.length; i++) {
+ var doc = results[i];
+ assertAlways.lte(NumberInt(doc.dis), maxDistance); // satisfies query
+ assertAlways.lte(prevDoc.dis, doc.dis); // returned in the correct order
+ prevDoc = doc;
+ }
+ });
+ };
- $config.data.genUpdateDoc = function genUpdateDoc() {
- var P = Math.floor(Math.sqrt(this.nDocs));
+ $config.data.genUpdateDoc = function genUpdateDoc() {
+ var P = Math.floor(Math.sqrt(this.nDocs));
- // Move the point to another location within the PxP grid.
- var newX = Random.randInt(P) - P / 2;
- var newY = Random.randInt(P) - P / 2;
- return {
- $set: {geo: [newX, newY]}
- };
- };
+ // Move the point to another location within the PxP grid.
+ var newX = Random.randInt(P) - P / 2;
+ var newY = Random.randInt(P) - P / 2;
+ return {$set: {geo: [newX, newY]}};
+ };
- $config.data.getIndexSpec = function getIndexSpec() {
- return {
- geo: '2d'
- };
- };
+ $config.data.getIndexSpec = function getIndexSpec() {
+ return {geo: '2d'};
+ };
- $config.data.getReplaceSpec = function getReplaceSpec(i, coords) {
- return {
- _id: i,
- geo: coords
- };
- };
+ $config.data.getReplaceSpec = function getReplaceSpec(i, coords) {
+ return {_id: i, geo: coords};
+ };
- /*
- * Insert some docs in geo form and make a 2d index.
- */
- $config.setup = function setup(db, collName, cluster) {
- $super.setup.apply(this, arguments);
+ /*
+ * Insert some docs in geo form and make a 2d index.
+ */
+ $config.setup = function setup(db, collName, cluster) {
+ $super.setup.apply(this, arguments);
- var P = Math.floor(Math.sqrt(this.nDocs));
- var i = 0;
- // Set up some points to query (in a PxP grid around 0,0).
- var bulk = db[collName].initializeUnorderedBulkOp();
- for (var x = 0; x < P; x++) {
- for (var y = 0; y < P; y++) {
- var coords = [x - P / 2, y - P / 2];
- bulk.find({_id: i}).upsert().replaceOne(this.getReplaceSpec(i, coords));
- i++;
- }
+ var P = Math.floor(Math.sqrt(this.nDocs));
+ var i = 0;
+ // Set up some points to query (in a PxP grid around 0,0).
+ var bulk = db[collName].initializeUnorderedBulkOp();
+ for (var x = 0; x < P; x++) {
+ for (var y = 0; y < P; y++) {
+ var coords = [x - P / 2, y - P / 2];
+ bulk.find({_id: i}).upsert().replaceOne(this.getReplaceSpec(i, coords));
+ i++;
}
- assertAlways.writeOK(bulk.execute());
- assertAlways.commandWorked(db[collName].ensureIndex(this.getIndexSpec()));
- };
+ }
+ assertAlways.writeOK(bulk.execute());
+ assertAlways.commandWorked(db[collName].ensureIndex(this.getIndexSpec()));
+ };
- return $config;
- });
+ return $config;
+});
diff --git a/jstests/concurrency/fsm_workloads/yield_geo_near_dedup.js b/jstests/concurrency/fsm_workloads/yield_geo_near_dedup.js
index 5efd9cf7242..9f3476873f7 100644
--- a/jstests/concurrency/fsm_workloads/yield_geo_near_dedup.js
+++ b/jstests/concurrency/fsm_workloads/yield_geo_near_dedup.js
@@ -8,92 +8,78 @@
load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
load('jstests/concurrency/fsm_workloads/yield_geo_near.js'); // for $config
-var $config = extendWorkload(
- $config,
- function($config, $super) {
+var $config = extendWorkload($config, function($config, $super) {
- $config.states.remove = function remove(db, collName) {
- var id = Random.randInt(this.nDocs);
- var doc = db[collName].findOne({_id: id});
- if (doc !== null) {
- var res = db[collName].remove({_id: id});
- assertAlways.writeOK(res);
- if (res.nRemoved > 0) {
- // Re-insert the document with the same '_id', but an incremented
- // 'timesInserted' to
- // distinguish it from the deleted document.
- doc.timesInserted++;
- assertAlways.writeOK(db[collName].insert(doc));
- }
+ $config.states.remove = function remove(db, collName) {
+ var id = Random.randInt(this.nDocs);
+ var doc = db[collName].findOne({_id: id});
+ if (doc !== null) {
+ var res = db[collName].remove({_id: id});
+ assertAlways.writeOK(res);
+ if (res.nRemoved > 0) {
+ // Re-insert the document with the same '_id', but an incremented
+ // 'timesInserted' to
+ // distinguish it from the deleted document.
+ doc.timesInserted++;
+ assertAlways.writeOK(db[collName].insert(doc));
}
- };
+ }
+ };
- /*
- * Use geo $nearSphere query to find points near the origin. Note this should be done using
- *the
- * geoNear command, rather than a $nearSphere query, as the $nearSphere query doesn't work
- *in a
- * sharded environment. Unfortunately this means we cannot batch the request.
- *
- * Only points are covered in this test as there is no guarantee that geometries indexed in
- * multiple cells will be deduplicated correctly with interspersed updates. If multiple
- *index
- * cells for the same geometry occur in the same search interval, an update may cause
- *geoNear
- * to return the same document multiple times.
- */
- $config.states.query = function geoNear(db, collName) {
- // This distance gets about 80 docs around the origin. There is one doc inserted
- // every 1m^2 and the area scanned by a 5m radius is PI*(5m)^2 ~ 79.
- var maxDistance = 5;
+ /*
+ * Use geo $nearSphere query to find points near the origin. Note this should be done using
+ *the
+ * geoNear command, rather than a $nearSphere query, as the $nearSphere query doesn't work
+ *in a
+ * sharded environment. Unfortunately this means we cannot batch the request.
+ *
+ * Only points are covered in this test as there is no guarantee that geometries indexed in
+ * multiple cells will be deduplicated correctly with interspersed updates. If multiple
+ *index
+ * cells for the same geometry occur in the same search interval, an update may cause
+ *geoNear
+ * to return the same document multiple times.
+ */
+ $config.states.query = function geoNear(db, collName) {
+ // This distance gets about 80 docs around the origin. There is one doc inserted
+ // every 1m^2 and the area scanned by a 5m radius is PI*(5m)^2 ~ 79.
+ var maxDistance = 5;
- var res = db.runCommand(
- {geoNear: collName, near: [0, 0], maxDistance: maxDistance, spherical: true});
- assertWhenOwnColl.commandWorked(res);
- assertWhenOwnColl(function verifyResults() {
- var results = res.results;
- var seenObjs = [];
- for (var i = 0; i < results.length; i++) {
- var doc = results[i].obj;
+ var res = db.runCommand(
+ {geoNear: collName, near: [0, 0], maxDistance: maxDistance, spherical: true});
+ assertWhenOwnColl.commandWorked(res);
+ assertWhenOwnColl(function verifyResults() {
+ var results = res.results;
+ var seenObjs = [];
+ for (var i = 0; i < results.length; i++) {
+ var doc = results[i].obj;
- // The pair (_id, timesInserted) is the smallest set of attributes that uniquely
- // identifies a document.
- var objToSearchFor = {
- _id: doc._id,
- timesInserted: doc.timesInserted
- };
- var found = seenObjs.some(function(obj) {
- return bsonWoCompare(obj, objToSearchFor) === 0;
- });
- assertWhenOwnColl(!found,
- 'geoNear command returned the document ' + tojson(doc) +
- ' multiple times: ' + tojson(seenObjs));
- seenObjs.push(objToSearchFor);
- }
- });
- };
+ // The pair (_id, timesInserted) is the smallest set of attributes that uniquely
+ // identifies a document.
+ var objToSearchFor = {_id: doc._id, timesInserted: doc.timesInserted};
+ var found = seenObjs.some(function(obj) {
+ return bsonWoCompare(obj, objToSearchFor) === 0;
+ });
+ assertWhenOwnColl(!found,
+ 'geoNear command returned the document ' + tojson(doc) +
+ ' multiple times: ' + tojson(seenObjs));
+ seenObjs.push(objToSearchFor);
+ }
+ });
+ };
- $config.data.genUpdateDoc = function genUpdateDoc() {
- // Attempts to perform an in-place update to trigger an invalidation on MMAP v1.
- return {
- $inc: {timesUpdated: 1}
- };
- };
+ $config.data.genUpdateDoc = function genUpdateDoc() {
+ // Attempts to perform an in-place update to trigger an invalidation on MMAP v1.
+ return {$inc: {timesUpdated: 1}};
+ };
- $config.data.getIndexSpec = function getIndexSpec() {
- return {
- geo: '2dsphere'
- };
- };
+ $config.data.getIndexSpec = function getIndexSpec() {
+ return {geo: '2dsphere'};
+ };
- $config.data.getReplaceSpec = function getReplaceSpec(i, coords) {
- return {
- _id: i,
- geo: coords,
- timesUpdated: 0,
- timesInserted: 0
- };
- };
+ $config.data.getReplaceSpec = function getReplaceSpec(i, coords) {
+ return {_id: i, geo: coords, timesUpdated: 0, timesInserted: 0};
+ };
- return $config;
- });
+ return $config;
+});
diff --git a/jstests/concurrency/fsm_workloads/yield_id_hack.js b/jstests/concurrency/fsm_workloads/yield_id_hack.js
index 0d50eb7d350..eddb653c1d8 100644
--- a/jstests/concurrency/fsm_workloads/yield_id_hack.js
+++ b/jstests/concurrency/fsm_workloads/yield_id_hack.js
@@ -9,26 +9,25 @@
load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
load('jstests/concurrency/fsm_workloads/yield.js'); // for $config
-var $config = extendWorkload($config,
- function($config, $super) {
+var $config = extendWorkload($config, function($config, $super) {
- /*
- * Issue a query that will use the ID_HACK stage. This cannot be
- * batched, so issue a
- * number of them to increase the chances of yielding between
- * getting the key and looking
- * up its value.
- */
- $config.states.query = function idHack(db, collName) {
- var nQueries = 100;
- for (var i = 0; i < nQueries; i++) {
- assertAlways.lte(db[collName].find({_id: i}).itcount(), 1);
- var res = db[collName].findOne({_id: i});
- if (res !== null) {
- assertAlways.eq(i, res._id);
- }
- }
- };
+ /*
+ * Issue a query that will use the ID_HACK stage. This cannot be
+ * batched, so issue a
+ * number of them to increase the chances of yielding between
+ * getting the key and looking
+ * up its value.
+ */
+ $config.states.query = function idHack(db, collName) {
+ var nQueries = 100;
+ for (var i = 0; i < nQueries; i++) {
+ assertAlways.lte(db[collName].find({_id: i}).itcount(), 1);
+ var res = db[collName].findOne({_id: i});
+ if (res !== null) {
+ assertAlways.eq(i, res._id);
+ }
+ }
+ };
- return $config;
- });
+ return $config;
+});
diff --git a/jstests/concurrency/fsm_workloads/yield_rooted_or.js b/jstests/concurrency/fsm_workloads/yield_rooted_or.js
index f7dd0dcffdf..2d21427b42b 100644
--- a/jstests/concurrency/fsm_workloads/yield_rooted_or.js
+++ b/jstests/concurrency/fsm_workloads/yield_rooted_or.js
@@ -10,41 +10,37 @@
load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
load('jstests/concurrency/fsm_workloads/yield.js'); // for $config
-var $config =
- extendWorkload($config,
- function($config, $super) {
-
- /*
- * Issue a query with an or stage as the root.
- */
- $config.states.query = function rootedOr(db, collName) {
- var nMatches = 100;
-
- var cursor = db[collName].find({
- $or: [{c: {$lte: nMatches / 2}}, {d: {$lte: nMatches / 2}}]
- }).batchSize(this.batchSize);
-
- var verifier = function rootedOrVerifier(doc, prevDoc) {
- return (doc.c <= nMatches / 2 || doc.d <= nMatches / 2);
- };
-
- this.advanceCursor(cursor, verifier);
- };
-
- $config.data.genUpdateDoc = function genUpdateDoc() {
- var newC = Random.randInt(this.nDocs);
- var newD = Random.randInt(this.nDocs);
- return {
- $set: {c: newC, d: newD}
- };
- };
-
- $config.setup = function setup(db, collName, cluster) {
- $super.setup.apply(this, arguments);
-
- assertAlways.commandWorked(db[collName].ensureIndex({c: 1}));
- assertAlways.commandWorked(db[collName].ensureIndex({d: 1}));
- };
-
- return $config;
- });
+var $config = extendWorkload($config, function($config, $super) {
+
+ /*
+ * Issue a query with an or stage as the root.
+ */
+ $config.states.query = function rootedOr(db, collName) {
+ var nMatches = 100;
+
+ var cursor = db[collName]
+ .find({$or: [{c: {$lte: nMatches / 2}}, {d: {$lte: nMatches / 2}}]})
+ .batchSize(this.batchSize);
+
+ var verifier = function rootedOrVerifier(doc, prevDoc) {
+ return (doc.c <= nMatches / 2 || doc.d <= nMatches / 2);
+ };
+
+ this.advanceCursor(cursor, verifier);
+ };
+
+ $config.data.genUpdateDoc = function genUpdateDoc() {
+ var newC = Random.randInt(this.nDocs);
+ var newD = Random.randInt(this.nDocs);
+ return {$set: {c: newC, d: newD}};
+ };
+
+ $config.setup = function setup(db, collName, cluster) {
+ $super.setup.apply(this, arguments);
+
+ assertAlways.commandWorked(db[collName].ensureIndex({c: 1}));
+ assertAlways.commandWorked(db[collName].ensureIndex({d: 1}));
+ };
+
+ return $config;
+});
diff --git a/jstests/concurrency/fsm_workloads/yield_sort.js b/jstests/concurrency/fsm_workloads/yield_sort.js
index d0d905177f8..1c535ae6415 100644
--- a/jstests/concurrency/fsm_workloads/yield_sort.js
+++ b/jstests/concurrency/fsm_workloads/yield_sort.js
@@ -9,37 +9,33 @@
load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
load('jstests/concurrency/fsm_workloads/yield_sort_merge.js'); // for $config
-var $config = extendWorkload(
- $config,
- function($config, $super) {
+var $config = extendWorkload($config, function($config, $super) {
- /*
- * Execute a query that will use the SORT stage.
- */
- $config.states.query = function sort(db, collName) {
- var nMatches = 100;
- // Sort on c, since it's not an indexed field.
- var cursor =
- db[collName].find({a: {$lt: nMatches}}).sort({c: -1}).batchSize(this.batchSize);
+ /*
+ * Execute a query that will use the SORT stage.
+ */
+ $config.states.query = function sort(db, collName) {
+ var nMatches = 100;
+ // Sort on c, since it's not an indexed field.
+ var cursor =
+ db[collName].find({a: {$lt: nMatches}}).sort({c: -1}).batchSize(this.batchSize);
- var verifier = function sortVerifier(doc, prevDoc) {
- var correctOrder = true;
- if (prevDoc !== null) {
- correctOrder = (doc.c <= prevDoc.c);
- }
- return doc.a < nMatches && correctOrder;
- };
-
- this.advanceCursor(cursor, verifier);
+ var verifier = function sortVerifier(doc, prevDoc) {
+ var correctOrder = true;
+ if (prevDoc !== null) {
+ correctOrder = (doc.c <= prevDoc.c);
+ }
+ return doc.a < nMatches && correctOrder;
};
- $config.data.genUpdateDoc = function genUpdateDoc() {
- var newA = Random.randInt(this.nDocs);
- var newC = Random.randInt(this.nDocs);
- return {
- $set: {a: newA, c: newC}
- };
- };
+ this.advanceCursor(cursor, verifier);
+ };
+
+ $config.data.genUpdateDoc = function genUpdateDoc() {
+ var newA = Random.randInt(this.nDocs);
+ var newC = Random.randInt(this.nDocs);
+ return {$set: {a: newA, c: newC}};
+ };
- return $config;
- });
+ return $config;
+});
diff --git a/jstests/concurrency/fsm_workloads/yield_sort_merge.js b/jstests/concurrency/fsm_workloads/yield_sort_merge.js
index c46163df492..d715a813701 100644
--- a/jstests/concurrency/fsm_workloads/yield_sort_merge.js
+++ b/jstests/concurrency/fsm_workloads/yield_sort_merge.js
@@ -10,49 +10,44 @@
load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
load('jstests/concurrency/fsm_workloads/yield.js'); // for $config
-var $config = extendWorkload(
- $config,
- function($config, $super) {
-
- /*
- * Execute a query that will use the SORT_MERGE stage.
- */
- $config.states.query = function sortMerge(db, collName) {
- var nMatches = 50; // Don't push this too high, or SORT_MERGE stage won't be selected.
-
- // Build an array [0, nMatches).
- var matches = [];
- for (var i = 0; i < nMatches; i++) {
- matches.push(i);
+var $config = extendWorkload($config, function($config, $super) {
+
+ /*
+ * Execute a query that will use the SORT_MERGE stage.
+ */
+ $config.states.query = function sortMerge(db, collName) {
+ var nMatches = 50; // Don't push this too high, or SORT_MERGE stage won't be selected.
+
+ // Build an array [0, nMatches).
+ var matches = [];
+ for (var i = 0; i < nMatches; i++) {
+ matches.push(i);
+ }
+
+ var cursor = db[collName].find({a: {$in: matches}}).sort({b: -1}).batchSize(this.batchSize);
+
+ var verifier = function sortMergeVerifier(doc, prevDoc) {
+ var correctOrder = true;
+ if (prevDoc !== null) {
+ correctOrder = (doc.b <= prevDoc.b);
}
-
- var cursor =
- db[collName].find({a: {$in: matches}}).sort({b: -1}).batchSize(this.batchSize);
-
- var verifier = function sortMergeVerifier(doc, prevDoc) {
- var correctOrder = true;
- if (prevDoc !== null) {
- correctOrder = (doc.b <= prevDoc.b);
- }
- return doc.a < nMatches && correctOrder;
- };
-
- this.advanceCursor(cursor, verifier);
+ return doc.a < nMatches && correctOrder;
};
- $config.data.genUpdateDoc = function genUpdateDoc() {
- var newA = Random.randInt(this.nDocs);
- var newB = Random.randInt(this.nDocs);
- return {
- $set: {a: newA, b: newB}
- };
- };
+ this.advanceCursor(cursor, verifier);
+ };
- $config.setup = function setup(db, collName, cluster) {
- $super.setup.apply(this, arguments);
+ $config.data.genUpdateDoc = function genUpdateDoc() {
+ var newA = Random.randInt(this.nDocs);
+ var newB = Random.randInt(this.nDocs);
+ return {$set: {a: newA, b: newB}};
+ };
- assertAlways.commandWorked(db[collName].ensureIndex({a: 1, b: 1}));
- };
+ $config.setup = function setup(db, collName, cluster) {
+ $super.setup.apply(this, arguments);
+
+ assertAlways.commandWorked(db[collName].ensureIndex({a: 1, b: 1}));
+ };
- return $config;
- });
+ return $config;
+});
diff --git a/jstests/concurrency/fsm_workloads/yield_text.js b/jstests/concurrency/fsm_workloads/yield_text.js
index 9291c25e527..0ccf5b8a7d4 100644
--- a/jstests/concurrency/fsm_workloads/yield_text.js
+++ b/jstests/concurrency/fsm_workloads/yield_text.js
@@ -9,47 +9,42 @@
load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload
load('jstests/concurrency/fsm_workloads/yield.js'); // for $config
-var $config = extendWorkload(
- $config,
- function($config, $super) {
-
- /*
- * Pick a random word and search for it using full text search.
- */
- $config.states.query = function text(db, collName) {
- var word = this.words[Random.randInt(this.words.length)];
-
- var cursor = db[collName].find({
- $text: {$search: word},
- yield_text: {$exists: true}
- }).batchSize(this.batchSize);
-
- var verifier = function textVerifier(doc, prevDoc) {
- return doc.yield_text.indexOf(word) !== -1;
- };
-
- // If we don't have the right text index, or someone drops our text index, this
- // assertion
- // is either pointless or won't work. So only verify the results when we know no one
- // else
- // is messing with our indices.
- assertWhenOwnColl(function verifyTextResults() {
- this.advanceCursor(cursor, verifier);
- }.bind(this));
- };
+var $config = extendWorkload($config, function($config, $super) {
- $config.data.genUpdateDoc = function genUpdateDoc() {
- var newWord = this.words[Random.randInt(this.words.length)];
- return {
- $set: {yield_text: newWord}
- };
- };
+ /*
+ * Pick a random word and search for it using full text search.
+ */
+ $config.states.query = function text(db, collName) {
+ var word = this.words[Random.randInt(this.words.length)];
- $config.setup = function setup(db, collName, cluster) {
- $super.setup.apply(this, arguments);
+ var cursor = db[collName]
+ .find({$text: {$search: word}, yield_text: {$exists: true}})
+ .batchSize(this.batchSize);
- assertWhenOwnColl.commandWorked(db[collName].ensureIndex({yield_text: 'text'}));
+ var verifier = function textVerifier(doc, prevDoc) {
+ return doc.yield_text.indexOf(word) !== -1;
};
- return $config;
- });
+ // If we don't have the right text index, or someone drops our text index, this
+ // assertion
+ // is either pointless or won't work. So only verify the results when we know no one
+ // else
+ // is messing with our indices.
+ assertWhenOwnColl(function verifyTextResults() {
+ this.advanceCursor(cursor, verifier);
+ }.bind(this));
+ };
+
+ $config.data.genUpdateDoc = function genUpdateDoc() {
+ var newWord = this.words[Random.randInt(this.words.length)];
+ return {$set: {yield_text: newWord}};
+ };
+
+ $config.setup = function setup(db, collName, cluster) {
+ $super.setup.apply(this, arguments);
+
+ assertWhenOwnColl.commandWorked(db[collName].ensureIndex({yield_text: 'text'}));
+ };
+
+ return $config;
+});
diff --git a/jstests/core/apitest_db.js b/jstests/core/apitest_db.js
index 2713a6cfd24..be90b9eb877 100644
--- a/jstests/core/apitest_db.js
+++ b/jstests/core/apitest_db.js
@@ -73,9 +73,9 @@ assert.docEq(validStorageEngineOptions,
// The indexOptionDefaults must be a document that contains only a storageEngine field.
db.idxOptions.drop();
assert.commandFailed(db.createCollection('idxOptions', {indexOptionDefaults: 'not a document'}));
-assert.commandFailed(db.createCollection('idxOptions',
- {indexOptionDefaults: {unknownOption: true}}),
- 'created a collection with an unknown option to indexOptionDefaults');
+assert.commandFailed(
+ db.createCollection('idxOptions', {indexOptionDefaults: {unknownOption: true}}),
+ 'created a collection with an unknown option to indexOptionDefaults');
assert.commandWorked(db.createCollection('idxOptions', {indexOptionDefaults: {}}),
'should have been able to specify an empty object for indexOptionDefaults');
assert(db.idxOptions.drop());
@@ -99,9 +99,7 @@ assert.commandFailed(
var alternateStorageEngine =
db.serverBuildInfo().storageEngines.find(engine => engine !== storageEngineName);
if (alternateStorageEngine) {
- var indexOptions = {
- storageEngine: {[alternateStorageEngine]: {}}
- };
+ var indexOptions = {storageEngine: {[alternateStorageEngine]: {}}};
assert.commandWorked(db.createCollection('idxOptions', {indexOptionDefaults: indexOptions}),
'should have been able to configure a non-active storage engine');
assert(db.idxOptions.drop());
diff --git a/jstests/core/apitest_dbcollection.js b/jstests/core/apitest_dbcollection.js
index 8f0129319f7..ac30128b90e 100644
--- a/jstests/core/apitest_dbcollection.js
+++ b/jstests/core/apitest_dbcollection.js
@@ -214,9 +214,7 @@ assert.eq(0, db.getCollection("test_db").getIndexes().length, "24");
}
// indexDetailsKey - show indexDetails results for this index key only.
- var indexKey = {
- a: 1
- };
+ var indexKey = {a: 1};
var indexName = getIndexName(indexKey);
checkIndexDetails({indexDetails: true, indexDetailsKey: indexKey}, indexName);
diff --git a/jstests/core/apply_ops1.js b/jstests/core/apply_ops1.js
index 8a19caa9f23..96840f125e9 100644
--- a/jstests/core/apply_ops1.js
+++ b/jstests/core/apply_ops1.js
@@ -156,10 +156,7 @@
assert.eq(1, t.find().count(), "Duplicate insert failed");
assert.eq(true, a.results[0], "Bad result value for duplicate insert");
- var o = {
- _id: 5,
- x: 17
- };
+ var o = {_id: 5, x: 17};
assert.eq(o, t.findOne(), "Mismatching document inserted.");
var res = db.runCommand({
@@ -243,8 +240,9 @@
res = t.getIndexes();
assert.eq(1,
res.filter(function(element, index, array) {
- return element.name == 'a_1';
- }).length,
+ return element.name == 'a_1';
+ })
+ .length,
'Foreground index not found in listIndexes result: ' + tojson(res));
// Background indexes are created in the foreground when processed by applyOps.
@@ -265,7 +263,8 @@
res = t.getIndexes();
assert.eq(1,
res.filter(function(element, index, array) {
- return element.name == 'b_1';
- }).length,
+ return element.name == 'b_1';
+ })
+ .length,
'Background index not found in listIndexes result: ' + tojson(res));
})();
diff --git a/jstests/core/array4.js b/jstests/core/array4.js
index c6fe1599880..fe131a04ca9 100644
--- a/jstests/core/array4.js
+++ b/jstests/core/array4.js
@@ -5,9 +5,7 @@ t.drop();
t.insert({"a": ["1", "2", "3"]});
t.insert({"a": ["2", "1"]});
-var x = {
- 'a.0': /1/
-};
+var x = {'a.0': /1/};
assert.eq(t.count(x), 1);
diff --git a/jstests/core/array_match4.js b/jstests/core/array_match4.js
index 4956fc1d8b2..b4cdec5143a 100644
--- a/jstests/core/array_match4.js
+++ b/jstests/core/array_match4.js
@@ -3,9 +3,7 @@ var t = db.array_match4;
t.drop();
t.save({a: [1, 2]});
-var query_gte = {
- a: {$gte: [1, 2]}
-};
+var query_gte = {a: {$gte: [1, 2]}};
//
// without index
diff --git a/jstests/core/arrayfind7.js b/jstests/core/arrayfind7.js
index f0dc2e2caa8..699f8c9586d 100644
--- a/jstests/core/arrayfind7.js
+++ b/jstests/core/arrayfind7.js
@@ -36,10 +36,9 @@ checkElemMatch({'a.b.c': 1},
{a: {$elemMatch: {b: {$elemMatch: {c: {$gte: 1, $in: [2]}}}}}});
// Two nested $elemMatch expressions.
-checkElemMatch(
- {'a.d.e': 1, 'a.b.c': 1},
- {a: [{b: [{c: 1}], d: [{e: 1}]}]},
- {a: {$elemMatch: {d: {$elemMatch: {e: {$lte: 1}}}, b: {$elemMatch: {c: {$gte: 1}}}}}});
+checkElemMatch({'a.d.e': 1, 'a.b.c': 1}, {a: [{b: [{c: 1}], d: [{e: 1}]}]}, {
+ a: {$elemMatch: {d: {$elemMatch: {e: {$lte: 1}}}, b: {$elemMatch: {c: {$gte: 1}}}}}
+});
// A non $elemMatch expression and a nested $elemMatch expression.
checkElemMatch({'a.x': 1, 'a.b.c': 1},
diff --git a/jstests/core/arrayfind8.js b/jstests/core/arrayfind8.js
index d322229a298..a687351b554 100644
--- a/jstests/core/arrayfind8.js
+++ b/jstests/core/arrayfind8.js
@@ -77,20 +77,14 @@ function checkQuery(subQuery, bothMatch, elemMatch, nonElemMatch, additionalCons
// Construct standard and elemMatch queries from subQuery.
firstSubQueryKey = Object.keySet(subQuery)[0];
if (firstSubQueryKey[0] == '$') {
- standardQuery = {
- $and: [{a: subQuery}, additionalConstraints]
- };
+ standardQuery = {$and: [{a: subQuery}, additionalConstraints]};
} else {
// If the subQuery contains a field rather than operators, append to the 'a' field.
modifiedSubQuery = {};
modifiedSubQuery['a.' + firstSubQueryKey] = subQuery[firstSubQueryKey];
- standardQuery = {
- $and: [modifiedSubQuery, additionalConstraints]
- };
+ standardQuery = {$and: [modifiedSubQuery, additionalConstraints]};
}
- elemMatchQuery = {
- $and: [{a: {$elemMatch: subQuery}}, additionalConstraints]
- };
+ elemMatchQuery = {$and: [{a: {$elemMatch: subQuery}}, additionalConstraints]};
debug(elemMatchQuery);
function maySave(aValue) {
diff --git a/jstests/core/basic3.js b/jstests/core/basic3.js
index ec0b48ec0cf..2fa26627bf9 100644
--- a/jstests/core/basic3.js
+++ b/jstests/core/basic3.js
@@ -35,7 +35,6 @@ t.update({"a": 0}, {$set: {"c.c": 1}});
t.update({"a": 0}, {$inc: {"c.c": 1}});
// edge cases
-assert.throws(doBadUpdate,
- [{a: 0}, {"": {"b.b": 1}}],
- "must deny '' embedded '.' in key of update");
+assert.throws(
+ doBadUpdate, [{a: 0}, {"": {"b.b": 1}}], "must deny '' embedded '.' in key of update");
t.update({"a": 0}, {});
diff --git a/jstests/core/basic9.js b/jstests/core/basic9.js
index bebaeb54740..6d368f46e3b 100644
--- a/jstests/core/basic9.js
+++ b/jstests/core/basic9.js
@@ -14,6 +14,5 @@ t.save({foo$foo: 5});
t.save({foo$: 5});
assert.throws(doBadSave, [{$foo: 5}], "key names aren't allowed to start with $ doesn't work");
-assert.throws(doBadSave,
- [{x: {$foo: 5}}],
- "embedded key names aren't allowed to start with $ doesn't work");
+assert.throws(
+ doBadSave, [{x: {$foo: 5}}], "embedded key names aren't allowed to start with $ doesn't work");
diff --git a/jstests/core/batch_size.js b/jstests/core/batch_size.js
index eca41e412ba..3dfbe0face6 100644
--- a/jstests/core/batch_size.js
+++ b/jstests/core/batch_size.js
@@ -95,12 +95,7 @@ while (bigStr.length < 1000000) {
// Insert enough documents to exceed the 32 MB in-memory sort limit.
for (var i = 0; i < 40; i++) {
- var doc = {
- x: 1,
- y: 1,
- z: i,
- big: bigStr
- };
+ var doc = {x: 1, y: 1, z: i, big: bigStr};
t.insert(doc);
}
diff --git a/jstests/core/big_object1.js b/jstests/core/big_object1.js
index 017fcdc9756..8029cbb868f 100644
--- a/jstests/core/big_object1.js
+++ b/jstests/core/big_object1.js
@@ -11,10 +11,7 @@ if (db.adminCommand("buildinfo").bits == 64) {
x = 0;
while (true) {
var result;
- n = {
- _id: x,
- a: []
- };
+ n = {_id: x, a: []};
for (i = 0; i < 14 + x; i++)
n.a.push(s);
try {
diff --git a/jstests/core/bittest.js b/jstests/core/bittest.js
index 45559d8f505..0a54215ee60 100644
--- a/jstests/core/bittest.js
+++ b/jstests/core/bittest.js
@@ -144,14 +144,13 @@
assertQueryCorrect({a: {$bitsAnyClear: BinData(0, "////////////////////////////")}}, 3);
// Tests with multiple predicates.
- assertQueryCorrect(
- {
- a: {
- $bitsAllSet: BinData(0, "AANgAAAAAAAAAAAAAAAAAAAAAAAA"),
- $bitsAllClear: BinData(0, "//yf////////////////////////")
- }
- },
- 1);
+ assertQueryCorrect({
+ a: {
+ $bitsAllSet: BinData(0, "AANgAAAAAAAAAAAAAAAAAAAAAAAA"),
+ $bitsAllClear: BinData(0, "//yf////////////////////////")
+ }
+ },
+ 1);
coll.drop();
})(); \ No newline at end of file
diff --git a/jstests/core/capped6.js b/jstests/core/capped6.js
index d7b8a60985a..e94e7ea44e8 100644
--- a/jstests/core/capped6.js
+++ b/jstests/core/capped6.js
@@ -38,9 +38,7 @@
var c = "";
for (i = 0; i < maxDocuments; ++i, c += "-") {
// The a values are strings of increasing length.
- valueArray[i] = {
- a: c
- };
+ valueArray[i] = {a: c};
}
if (shouldReverse) {
valueArray.reverse();
diff --git a/jstests/core/collation_shell_helpers.js b/jstests/core/collation_shell_helpers.js
index 99dda6047b1..e9308960164 100644
--- a/jstests/core/collation_shell_helpers.js
+++ b/jstests/core/collation_shell_helpers.js
@@ -47,49 +47,46 @@
db.createCollection("collation_shell_helpers", {collation: {locale: "fr_CA"}}));
var collectionInfos = db.getCollectionInfos({name: "collation_shell_helpers"});
assert.eq(collectionInfos.length, 1);
- assert.eq(collectionInfos[0].options.collation,
- {
- locale: "fr_CA",
- caseLevel: false,
- caseFirst: "off",
- strength: 3,
- numericOrdering: false,
- alternate: "non-ignorable",
- maxVariable: "punct",
- normalization: false,
- backwards: true
- });
+ assert.eq(collectionInfos[0].options.collation, {
+ locale: "fr_CA",
+ caseLevel: false,
+ caseFirst: "off",
+ strength: 3,
+ numericOrdering: false,
+ alternate: "non-ignorable",
+ maxVariable: "punct",
+ normalization: false,
+ backwards: true
+ });
// Ensure that an index with no collation inherits the collection-default collation.
assert.commandWorked(coll.ensureIndex({a: 1}));
- assertIndexHasCollation({a: 1},
- {
- locale: "fr_CA",
- caseLevel: false,
- caseFirst: "off",
- strength: 3,
- numericOrdering: false,
- alternate: "non-ignorable",
- maxVariable: "punct",
- normalization: false,
- backwards: true
- });
+ assertIndexHasCollation({a: 1}, {
+ locale: "fr_CA",
+ caseLevel: false,
+ caseFirst: "off",
+ strength: 3,
+ numericOrdering: false,
+ alternate: "non-ignorable",
+ maxVariable: "punct",
+ normalization: false,
+ backwards: true
+ });
// Ensure that an index which specifies an overriding collation does not use the collection
// default.
assert.commandWorked(coll.ensureIndex({b: 1}, {collation: {locale: "en_US"}}));
- assertIndexHasCollation({b: 1},
- {
- locale: "en_US",
- caseLevel: false,
- caseFirst: "off",
- strength: 3,
- numericOrdering: false,
- alternate: "non-ignorable",
- maxVariable: "punct",
- normalization: false,
- backwards: false
- });
+ assertIndexHasCollation({b: 1}, {
+ locale: "en_US",
+ caseLevel: false,
+ caseFirst: "off",
+ strength: 3,
+ numericOrdering: false,
+ alternate: "non-ignorable",
+ maxVariable: "punct",
+ normalization: false,
+ backwards: false
+ });
coll.drop();
@@ -106,58 +103,54 @@
assert.commandFailed(coll.ensureIndex({a: 1}, {collation: {locale: "en", strength: 99}}));
assert.commandWorked(coll.ensureIndex({a: 1}, {collation: {locale: "en_US"}}));
- assertIndexHasCollation({a: 1},
- {
- locale: "en_US",
- caseLevel: false,
- caseFirst: "off",
- strength: 3,
- numericOrdering: false,
- alternate: "non-ignorable",
- maxVariable: "punct",
- normalization: false,
- backwards: false
- });
+ assertIndexHasCollation({a: 1}, {
+ locale: "en_US",
+ caseLevel: false,
+ caseFirst: "off",
+ strength: 3,
+ numericOrdering: false,
+ alternate: "non-ignorable",
+ maxVariable: "punct",
+ normalization: false,
+ backwards: false
+ });
assert.commandWorked(coll.createIndex({b: 1}, {collation: {locale: "en_US"}}));
- assertIndexHasCollation({b: 1},
- {
- locale: "en_US",
- caseLevel: false,
- caseFirst: "off",
- strength: 3,
- numericOrdering: false,
- alternate: "non-ignorable",
- maxVariable: "punct",
- normalization: false,
- backwards: false
- });
+ assertIndexHasCollation({b: 1}, {
+ locale: "en_US",
+ caseLevel: false,
+ caseFirst: "off",
+ strength: 3,
+ numericOrdering: false,
+ alternate: "non-ignorable",
+ maxVariable: "punct",
+ normalization: false,
+ backwards: false
+ });
assert.commandWorked(coll.createIndexes([{c: 1}, {d: 1}], {collation: {locale: "fr_CA"}}));
- assertIndexHasCollation({c: 1},
- {
- locale: "fr_CA",
- caseLevel: false,
- caseFirst: "off",
- strength: 3,
- numericOrdering: false,
- alternate: "non-ignorable",
- maxVariable: "punct",
- normalization: false,
- backwards: true
- });
- assertIndexHasCollation({d: 1},
- {
- locale: "fr_CA",
- caseLevel: false,
- caseFirst: "off",
- strength: 3,
- numericOrdering: false,
- alternate: "non-ignorable",
- maxVariable: "punct",
- normalization: false,
- backwards: true
- });
+ assertIndexHasCollation({c: 1}, {
+ locale: "fr_CA",
+ caseLevel: false,
+ caseFirst: "off",
+ strength: 3,
+ numericOrdering: false,
+ alternate: "non-ignorable",
+ maxVariable: "punct",
+ normalization: false,
+ backwards: true
+ });
+ assertIndexHasCollation({d: 1}, {
+ locale: "fr_CA",
+ caseLevel: false,
+ caseFirst: "off",
+ strength: 3,
+ numericOrdering: false,
+ alternate: "non-ignorable",
+ maxVariable: "punct",
+ normalization: false,
+ backwards: true
+ });
// TODO SERVER-23791: Test that queries with matching collations can use these indices, and that
// the indices contain collator-generated comparison keys rather than the verbatim indexed
@@ -172,21 +165,8 @@
assert.writeOK(coll.insert({_id: 2, str: "bar"}));
// Aggregation.
- assert.eq(2,
- coll.aggregate([],
- {
- collation : {
- locale:
- "fr"
- }
- }).itcount());
- assert.commandWorked(coll.explain().aggregate([],
- {
- collation : {
- locale:
- "fr"
- }
- }));
+ assert.eq(2, coll.aggregate([], {collation: {locale: "fr"}}).itcount());
+ assert.commandWorked(coll.explain().aggregate([], {collation: {locale: "fr"}}));
// Count command.
assert.eq(0, coll.find({str: "FOO"}).count());
@@ -236,28 +216,24 @@
assert.commandWorked(coll.find().collation({locale: "fr"}).explain());
// findAndModify.
- assert.eq({_id: 2, str: "baz"},
- coll.findAndModify({
- query: {str: "bar"},
- update: {$set: {str: "baz"}}, new: true,
- collation: {locale: "fr"}
- }));
- assert.commandWorked(coll.explain().findAndModify({
+ assert.eq({_id: 2, str: "baz"}, coll.findAndModify({
query: {str: "bar"},
- update: {$set: {str: "baz"}}, new: true,
+ update: {$set: {str: "baz"}},
+ new: true,
collation: {locale: "fr"}
}));
+ assert.commandWorked(coll.explain().findAndModify(
+ {query: {str: "bar"}, update: {$set: {str: "baz"}}, new: true, collation: {locale: "fr"}}));
// Group.
- assert.eq([{str: "foo", count: 1}, {str: "baz", count: 1}],
- coll.group({
- key: {str: 1},
- initial: {count: 0},
- reduce: function(curr, result) {
- result.count += 1;
- },
- collation: {locale: "fr"}
- }));
+ assert.eq([{str: "foo", count: 1}, {str: "baz", count: 1}], coll.group({
+ key: {str: 1},
+ initial: {count: 0},
+ reduce: function(curr, result) {
+ result.count += 1;
+ },
+ collation: {locale: "fr"}
+ }));
assert.commandWorked(coll.explain().group({
key: {str: 1},
initial: {count: 0},
@@ -323,79 +299,95 @@
// String field not indexed.
assert.commandWorked(coll.ensureIndex({geo: "2dsphere"}));
assert.eq(0,
- assert.commandWorked(db.runCommand({
- geoNear: coll.getName(),
- near: {type: "Point", coordinates: [0, 0]},
- spherical: true,
- query: {str: "ABC"}
- })).results.length);
+ assert
+ .commandWorked(db.runCommand({
+ geoNear: coll.getName(),
+ near: {type: "Point", coordinates: [0, 0]},
+ spherical: true,
+ query: {str: "ABC"}
+ }))
+ .results.length);
assert.eq(1,
- assert.commandWorked(db.runCommand({
- geoNear: coll.getName(),
- near: {type: "Point", coordinates: [0, 0]},
- spherical: true,
- query: {str: "ABC"},
- collation: {locale: "en_US", strength: 2}
- })).results.length);
+ assert
+ .commandWorked(db.runCommand({
+ geoNear: coll.getName(),
+ near: {type: "Point", coordinates: [0, 0]},
+ spherical: true,
+ query: {str: "ABC"},
+ collation: {locale: "en_US", strength: 2}
+ }))
+ .results.length);
// String field indexed without collation.
assert.commandWorked(coll.dropIndexes());
assert.commandWorked(coll.ensureIndex({geo: "2dsphere", str: 1}));
assert.eq(0,
- assert.commandWorked(db.runCommand({
- geoNear: coll.getName(),
- near: {type: "Point", coordinates: [0, 0]},
- spherical: true,
- query: {str: "ABC"}
- })).results.length);
+ assert
+ .commandWorked(db.runCommand({
+ geoNear: coll.getName(),
+ near: {type: "Point", coordinates: [0, 0]},
+ spherical: true,
+ query: {str: "ABC"}
+ }))
+ .results.length);
assert.eq(1,
- assert.commandWorked(db.runCommand({
- geoNear: coll.getName(),
- near: {type: "Point", coordinates: [0, 0]},
- spherical: true,
- query: {str: "ABC"},
- collation: {locale: "en_US", strength: 2}
- })).results.length);
+ assert
+ .commandWorked(db.runCommand({
+ geoNear: coll.getName(),
+ near: {type: "Point", coordinates: [0, 0]},
+ spherical: true,
+ query: {str: "ABC"},
+ collation: {locale: "en_US", strength: 2}
+ }))
+ .results.length);
// String field indexed with non-matching collation.
assert.commandWorked(coll.dropIndexes());
assert.commandWorked(
coll.ensureIndex({geo: "2dsphere", str: 1}, {collation: {locale: "en_US", strength: 3}}));
assert.eq(0,
- assert.commandWorked(db.runCommand({
- geoNear: coll.getName(),
- near: {type: "Point", coordinates: [0, 0]},
- spherical: true,
- query: {str: "ABC"}
- })).results.length);
+ assert
+ .commandWorked(db.runCommand({
+ geoNear: coll.getName(),
+ near: {type: "Point", coordinates: [0, 0]},
+ spherical: true,
+ query: {str: "ABC"}
+ }))
+ .results.length);
assert.eq(1,
- assert.commandWorked(db.runCommand({
- geoNear: coll.getName(),
- near: {type: "Point", coordinates: [0, 0]},
- spherical: true,
- query: {str: "ABC"},
- collation: {locale: "en_US", strength: 2}
- })).results.length);
+ assert
+ .commandWorked(db.runCommand({
+ geoNear: coll.getName(),
+ near: {type: "Point", coordinates: [0, 0]},
+ spherical: true,
+ query: {str: "ABC"},
+ collation: {locale: "en_US", strength: 2}
+ }))
+ .results.length);
// String field indexed with matching collation.
assert.commandWorked(coll.dropIndexes());
assert.commandWorked(
coll.ensureIndex({geo: "2dsphere", str: 1}, {collation: {locale: "en_US", strength: 2}}));
assert.eq(0,
- assert.commandWorked(db.runCommand({
- geoNear: coll.getName(),
- near: {type: "Point", coordinates: [0, 0]},
- spherical: true,
- query: {str: "ABC"}
- })).results.length);
+ assert
+ .commandWorked(db.runCommand({
+ geoNear: coll.getName(),
+ near: {type: "Point", coordinates: [0, 0]},
+ spherical: true,
+ query: {str: "ABC"}
+ }))
+ .results.length);
assert.eq(1,
- assert.commandWorked(db.runCommand({
- geoNear: coll.getName(),
- near: {type: "Point", coordinates: [0, 0]},
- spherical: true,
- query: {str: "ABC"},
- collation: {locale: "en_US", strength: 2}
- })).results.length);
+ assert
+ .commandWorked(db.runCommand({
+ geoNear: coll.getName(),
+ near: {type: "Point", coordinates: [0, 0]},
+ spherical: true,
+ query: {str: "ABC"},
+ collation: {locale: "en_US", strength: 2}
+ }))
+ .results.length);
coll.drop();
@@ -407,14 +399,15 @@
assert.commandWorked(coll.ensureIndex({geo: "2dsphere"}));
assert.eq(0,
coll.find({
- str: "ABC",
- geo: {$nearSphere: {$geometry: {type: "Point", coordinates: [0, 0]}}}
- }).itcount());
+ str: "ABC",
+ geo: {$nearSphere: {$geometry: {type: "Point", coordinates: [0, 0]}}}
+ })
+ .itcount());
assert.eq(1,
coll.find({
- str: "ABC",
- geo: {$nearSphere: {$geometry: {type: "Point", coordinates: [0, 0]}}}
- })
+ str: "ABC",
+ geo: {$nearSphere: {$geometry: {type: "Point", coordinates: [0, 0]}}}
+ })
.collation({locale: "en_US", strength: 2})
.itcount());
@@ -423,14 +416,15 @@
assert.commandWorked(coll.ensureIndex({geo: "2dsphere", str: 1}));
assert.eq(0,
coll.find({
- str: "ABC",
- geo: {$nearSphere: {$geometry: {type: "Point", coordinates: [0, 0]}}}
- }).itcount());
+ str: "ABC",
+ geo: {$nearSphere: {$geometry: {type: "Point", coordinates: [0, 0]}}}
+ })
+ .itcount());
assert.eq(1,
coll.find({
- str: "ABC",
- geo: {$nearSphere: {$geometry: {type: "Point", coordinates: [0, 0]}}}
- })
+ str: "ABC",
+ geo: {$nearSphere: {$geometry: {type: "Point", coordinates: [0, 0]}}}
+ })
.collation({locale: "en_US", strength: 2})
.itcount());
@@ -440,14 +434,15 @@
{collation: {locale: "en_US", strength: 3}}));
assert.eq(0,
coll.find({
- str: "ABC",
- geo: {$nearSphere: {$geometry: {type: "Point", coordinates: [0, 0]}}}
- }).itcount());
+ str: "ABC",
+ geo: {$nearSphere: {$geometry: {type: "Point", coordinates: [0, 0]}}}
+ })
+ .itcount());
assert.eq(1,
coll.find({
- str: "ABC",
- geo: {$nearSphere: {$geometry: {type: "Point", coordinates: [0, 0]}}}
- })
+ str: "ABC",
+ geo: {$nearSphere: {$geometry: {type: "Point", coordinates: [0, 0]}}}
+ })
.collation({locale: "en_US", strength: 2})
.itcount());
@@ -457,14 +452,15 @@
{collation: {locale: "en_US", strength: 2}}));
assert.eq(0,
coll.find({
- str: "ABC",
- geo: {$nearSphere: {$geometry: {type: "Point", coordinates: [0, 0]}}}
- }).itcount());
+ str: "ABC",
+ geo: {$nearSphere: {$geometry: {type: "Point", coordinates: [0, 0]}}}
+ })
+ .itcount());
assert.eq(1,
coll.find({
- str: "ABC",
- geo: {$nearSphere: {$geometry: {type: "Point", coordinates: [0, 0]}}}
- })
+ str: "ABC",
+ geo: {$nearSphere: {$geometry: {type: "Point", coordinates: [0, 0]}}}
+ })
.collation({locale: "en_US", strength: 2})
.itcount());
@@ -684,11 +680,8 @@
} else {
assert.throws(function() {
coll.bulkWrite([{
- replaceOne: {
- filter: {str: "foo"},
- replacement: {str: "bar"},
- collation: {locale: "fr"}
- }
+ replaceOne:
+ {filter: {str: "foo"}, replacement: {str: "bar"}, collation: {locale: "fr"}}
}]);
});
}
diff --git a/jstests/core/commands_that_do_not_write_do_not_accept_wc.js b/jstests/core/commands_that_do_not_write_do_not_accept_wc.js
index 3faab189bcb..ef5f8762a42 100644
--- a/jstests/core/commands_that_do_not_write_do_not_accept_wc.js
+++ b/jstests/core/commands_that_do_not_write_do_not_accept_wc.js
@@ -22,9 +22,7 @@
});
},
reduce: function(key, values) {
- return {
- count: values.length
- };
+ return {count: values.length};
},
out: {inline: 1}
});
diff --git a/jstests/core/connection_status.js b/jstests/core/connection_status.js
index 728e3d8a131..47aab3f0f43 100644
--- a/jstests/core/connection_status.js
+++ b/jstests/core/connection_status.js
@@ -70,14 +70,8 @@
}
function test(userName) {
- var user = {
- user: userName,
- db: dbName
- };
- var role = {
- role: "root",
- db: "admin"
- };
+ var user = {user: userName, db: dbName};
+ var role = {role: "root", db: "admin"};
myDB.createUser({user: userName, pwd: "weak password", roles: [role]});
myDB.auth(userName, "weak password");
diff --git a/jstests/core/constructors.js b/jstests/core/constructors.js
index 814766ee2c3..34e6cbed9a6 100644
--- a/jstests/core/constructors.js
+++ b/jstests/core/constructors.js
@@ -12,10 +12,7 @@ function addConstructorsWithNew(constructorList) {
// We use slice(0) here to make a copy of our lists
var validWithNew = valid.concat(valid.slice(0).map(prependNew));
var invalidWithNew = invalid.concat(invalid.slice(0).map(prependNew));
- return {
- "valid": validWithNew,
- "invalid": invalidWithNew
- };
+ return {"valid": validWithNew, "invalid": invalidWithNew};
}
function clientEvalConstructorTest(constructorList) {
@@ -142,12 +139,22 @@ var dbpointerConstructors = {
};
var objectidConstructors = {
- "valid": ['ObjectId()', 'ObjectId("FFFFFFFFFFFFFFFFFFFFFFFF")', ],
- "invalid": ['ObjectId(5)', 'ObjectId("FFFFFFFFFFFFFFFFFFFFFFFQ")', ]
+ "valid": [
+ 'ObjectId()',
+ 'ObjectId("FFFFFFFFFFFFFFFFFFFFFFFF")',
+ ],
+ "invalid": [
+ 'ObjectId(5)',
+ 'ObjectId("FFFFFFFFFFFFFFFFFFFFFFFQ")',
+ ]
};
var timestampConstructors = {
- "valid": ['Timestamp()', 'Timestamp(0,0)', 'Timestamp(1.0,1.0)', ],
+ "valid": [
+ 'Timestamp()',
+ 'Timestamp(0,0)',
+ 'Timestamp(1.0,1.0)',
+ ],
"invalid": [
'Timestamp(0)',
'Timestamp(0,0,0)',
@@ -161,7 +168,9 @@ var timestampConstructors = {
};
var bindataConstructors = {
- "valid": ['BinData(0,"test")', ],
+ "valid": [
+ 'BinData(0,"test")',
+ ],
"invalid": [
'BinData(0,"test", "test")',
'BinData()',
@@ -178,7 +187,9 @@ var bindataConstructors = {
};
var uuidConstructors = {
- "valid": ['UUID("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa")', ],
+ "valid": [
+ 'UUID("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa")',
+ ],
"invalid": [
'UUID("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", 0)',
'UUID()',
@@ -195,7 +206,9 @@ var uuidConstructors = {
};
var md5Constructors = {
- "valid": ['MD5("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa")', ],
+ "valid": [
+ 'MD5("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa")',
+ ],
"invalid": [
'MD5("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", 0)',
'MD5()',
@@ -238,7 +251,13 @@ var hexdataConstructors = {
};
var dateConstructors = {
- "valid": ['Date()', 'Date(0)', 'Date(0,0)', 'Date(0,0,0)', 'Date("foo")', ],
+ "valid": [
+ 'Date()',
+ 'Date(0)',
+ 'Date(0,0)',
+ 'Date(0,0,0)',
+ 'Date("foo")',
+ ],
"invalid": []
};
diff --git a/jstests/core/convert_to_capped_nonexistant.js b/jstests/core/convert_to_capped_nonexistant.js
index a09ce553788..08996087da7 100644
--- a/jstests/core/convert_to_capped_nonexistant.js
+++ b/jstests/core/convert_to_capped_nonexistant.js
@@ -7,9 +7,8 @@ testDb.dropDatabase();
// Database does not exist here
var result = testDb.runCommand({convertToCapped: 'foo', size: 1024});
assert.eq(result.ok, 0, "converting a nonexistent to capped worked and should not have");
-assert.eq(result.code,
- 26,
- "converting a nonexistent database to capped failed, but code has changed");
+assert.eq(
+ result.code, 26, "converting a nonexistent database to capped failed, but code has changed");
assert.eq(result.errmsg,
"database convert_to_capped_nonexistent not found",
"converting a nonexistent to capped failed, but message has changed");
@@ -19,9 +18,8 @@ testDb.coll.insert({});
var result = testDb.runCommand({convertToCapped: 'foo', size: 1024});
assert.eq(result.ok, 0, "converting a nonexistent to capped worked and should not have");
-assert.eq(result.code,
- 26,
- "converting a nonexistent collection to capped failed, but code has changed");
+assert.eq(
+ result.code, 26, "converting a nonexistent collection to capped failed, but code has changed");
assert.eq(result.errmsg,
"source collection convert_to_capped_nonexistent.foo does not exist",
"converting a nonexistent to capped failed, but message has changed");
diff --git a/jstests/core/count11.js b/jstests/core/count11.js
index 4ce218bfc43..aeda1e4b1cf 100644
--- a/jstests/core/count11.js
+++ b/jstests/core/count11.js
@@ -5,12 +5,8 @@ var t = db.count11;
t.drop();
-var validQuery = {
- a: 1
-};
-var invalidQuery = {
- a: {$invalid: 1}
-};
+var validQuery = {a: 1};
+var invalidQuery = {a: {$invalid: 1}};
// Query non-existing collection with empty query.
assert.eq(0, t.find().count());
diff --git a/jstests/core/create_indexes.js b/jstests/core/create_indexes.js
index 3e069d7478e..a6cdb99ea89 100644
--- a/jstests/core/create_indexes.js
+++ b/jstests/core/create_indexes.js
@@ -114,9 +114,11 @@
res = t.runCommand("createIndexes", {indexes: [{key: {"c": 1}, sparse: true, name: "c_1"}]});
assert.eq(6, t.getIndexes().length);
assert.eq(1,
- t.getIndexes().filter(function(z) {
- return z.sparse;
- }).length);
+ t.getIndexes()
+ .filter(function(z) {
+ return z.sparse;
+ })
+ .length);
res = t.runCommand("createIndexes", {indexes: [{key: {"x": "foo"}, name: "x_1"}]});
assert(!res.ok);
diff --git a/jstests/core/crud_api.js b/jstests/core/crud_api.js
index f6cc77025c3..c9dbfb40c85 100644
--- a/jstests/core/crud_api.js
+++ b/jstests/core/crud_api.js
@@ -32,9 +32,7 @@
if (db.getMongo().writeMode() === 'commands') {
assert.docEq(first, second);
} else {
- var overrideModifiedCount = {
- modifiedCount: undefined
- };
+ var overrideModifiedCount = {modifiedCount: undefined};
assert.docEq(Object.merge(first, overrideModifiedCount),
Object.merge(second, overrideModifiedCount));
}
diff --git a/jstests/core/currentop_predicate.js b/jstests/core/currentop_predicate.js
index 98df3c9756e..73b8a006588 100644
--- a/jstests/core/currentop_predicate.js
+++ b/jstests/core/currentop_predicate.js
@@ -1,22 +1,20 @@
// Tests the use of a match predicate with the currentOp command.
(function() {
// Test a $where predicate that matches the currentOp operation we are running.
- var res = db.adminCommand("currentOp",
- {
- $where: function() {
- return true;
- }
- });
+ var res = db.adminCommand("currentOp", {
+ $where: function() {
+ return true;
+ }
+ });
assert.commandWorked(res);
assert.gt(res.inprog.length, 0, tojson(res));
// Test a $where predicate that matches no operations.
- res = db.adminCommand("currentOp",
- {
- $where: function() {
- return false;
- }
- });
+ res = db.adminCommand("currentOp", {
+ $where: function() {
+ return false;
+ }
+ });
assert.commandWorked(res);
assert.eq(res.inprog.length, 0, tojson(res));
})();
diff --git a/jstests/core/cursor5.js b/jstests/core/cursor5.js
index 64158eaccec..0232b92e4cc 100644
--- a/jstests/core/cursor5.js
+++ b/jstests/core/cursor5.js
@@ -24,16 +24,8 @@ function testBoundsWithSubobjectIndexes(db) {
];
for (i = 0; i < z.length; ++i)
r.save(z[i]);
- idx = {
- "a.d": 1,
- a: 1,
- e: -1
- };
- rIdx = {
- "a.d": -1,
- a: -1,
- e: 1
- };
+ idx = {"a.d": 1, a: 1, e: -1};
+ rIdx = {"a.d": -1, a: -1, e: 1};
r.ensureIndex(idx);
checkResults([z[0], z[4], z[2]], r.find({e: 4}).sort(idx).hint(idx));
diff --git a/jstests/core/cursor6.js b/jstests/core/cursor6.js
index f793d37bfe5..d373e952b2d 100644
--- a/jstests/core/cursor6.js
+++ b/jstests/core/cursor6.js
@@ -8,14 +8,9 @@ function eq(one, two) {
function check(indexed) {
var hint;
if (indexed) {
- hint = {
- a: 1,
- b: -1
- };
+ hint = {a: 1, b: -1};
} else {
- hint = {
- $natural: 1
- };
+ hint = {$natural: 1};
}
f = r.find().sort({a: 1, b: 1}).hint(hint);
diff --git a/jstests/core/cursor7.js b/jstests/core/cursor7.js
index 6e77a144ba3..7807dbe22f5 100644
--- a/jstests/core/cursor7.js
+++ b/jstests/core/cursor7.js
@@ -15,14 +15,8 @@ function testMultipleInequalities(db) {
z = [{a: 1, b: 2}, {a: 3, b: 4}, {a: 5, b: 6}, {a: 7, b: 8}];
for (i = 0; i < z.length; ++i)
r.save(z[i]);
- idx = {
- a: 1,
- b: 1
- };
- rIdx = {
- a: -1,
- b: -1
- };
+ idx = {a: 1, b: 1};
+ rIdx = {a: -1, b: -1};
r.ensureIndex(idx);
checkResults([z[2], z[3]], r.find({a: {$gt: 3}}).sort(idx).hint(idx));
@@ -33,35 +27,27 @@ function testMultipleInequalities(db) {
checkResults([z[2]], r.find({a: {$gt: 3, $lt: 7}}).sort(rIdx).hint(idx));
checkResults([z[2]], r.find({a: {$gt: 3, $lt: 7, $lte: 5}}).sort(rIdx).hint(idx));
- checkResults([z[1], z[2]],
- r.find({
- a: {$gt: 1, $lt: 7, $gte: 3, $lte: 5},
- b: {$gt: 2, $lt: 8, $gte: 4, $lte: 6}
- })
- .sort(idx)
- .hint(idx));
- checkResults([z[2], z[1]],
- r.find({
- a: {$gt: 1, $lt: 7, $gte: 3, $lte: 5},
- b: {$gt: 2, $lt: 8, $gte: 4, $lte: 6}
- })
- .sort(rIdx)
- .hint(idx));
-
- checkResults([z[1], z[2]],
- r.find({
- a: {$gte: 1, $lte: 7, $gt: 2, $lt: 6},
- b: {$gte: 2, $lte: 8, $gt: 3, $lt: 7}
- })
- .sort(idx)
- .hint(idx));
- checkResults([z[2], z[1]],
- r.find({
- a: {$gte: 1, $lte: 7, $gt: 2, $lt: 6},
- b: {$gte: 2, $lte: 8, $gt: 3, $lt: 7}
- })
- .sort(rIdx)
- .hint(idx));
+ checkResults(
+ [z[1], z[2]],
+ r.find({a: {$gt: 1, $lt: 7, $gte: 3, $lte: 5}, b: {$gt: 2, $lt: 8, $gte: 4, $lte: 6}})
+ .sort(idx)
+ .hint(idx));
+ checkResults(
+ [z[2], z[1]],
+ r.find({a: {$gt: 1, $lt: 7, $gte: 3, $lte: 5}, b: {$gt: 2, $lt: 8, $gte: 4, $lte: 6}})
+ .sort(rIdx)
+ .hint(idx));
+
+ checkResults(
+ [z[1], z[2]],
+ r.find({a: {$gte: 1, $lte: 7, $gt: 2, $lt: 6}, b: {$gte: 2, $lte: 8, $gt: 3, $lt: 7}})
+ .sort(idx)
+ .hint(idx));
+ checkResults(
+ [z[2], z[1]],
+ r.find({a: {$gte: 1, $lte: 7, $gt: 2, $lt: 6}, b: {$gte: 2, $lte: 8, $gt: 3, $lt: 7}})
+ .sort(rIdx)
+ .hint(idx));
}
testMultipleInequalities(db);
diff --git a/jstests/core/cursora.js b/jstests/core/cursora.js
index dfd9e28f281..1ee0a6d3e69 100644
--- a/jstests/core/cursora.js
+++ b/jstests/core/cursora.js
@@ -24,11 +24,11 @@ function run(n, atomic) {
try {
start = new Date();
num = t.find(function() {
- num = 2;
- for (var x = 0; x < 1000; x++)
- num += 2;
- return num > 0;
- })
+ num = 2;
+ for (var x = 0; x < 1000; x++)
+ num += 2;
+ return num > 0;
+ })
.sort({_id: -1})
.itcount();
end = new Date();
diff --git a/jstests/core/datasize2.js b/jstests/core/datasize2.js
index 6cb5b9b10d9..eb79b12a6c4 100644
--- a/jstests/core/datasize2.js
+++ b/jstests/core/datasize2.js
@@ -15,12 +15,8 @@
coll.insert({_id: i, s: "asdasdasdasdasdasdasd"});
}
- var dataSizeCommand = {
- "dataSize": "test.foo",
- "keyPattern": {"_id": 1},
- "min": {"_id": 0},
- "max": {"_id": N}
- };
+ var dataSizeCommand =
+ {"dataSize": "test.foo", "keyPattern": {"_id": 1}, "min": {"_id": 0}, "max": {"_id": N}};
assert.eq(N,
db.runCommand(dataSizeCommand).numObjects,
diff --git a/jstests/core/date2.js b/jstests/core/date2.js
index 2980f10bf7a..15993815b14 100644
--- a/jstests/core/date2.js
+++ b/jstests/core/date2.js
@@ -5,9 +5,7 @@ t.drop();
t.ensureIndex({a: 1});
-var obj = {
- a: new Timestamp(0, 1)
-}; // in old versions this was == to new Date(1)
+var obj = {a: new Timestamp(0, 1)}; // in old versions this was == to new Date(1)
t.save(obj);
assert.eq(0, t.find({a: {$gt: new Date(1)}}).itcount());
assert.eq(1, t.find(obj).itcount());
diff --git a/jstests/core/depth_limit.js b/jstests/core/depth_limit.js
index ddb648b4586..4e40c114369 100644
--- a/jstests/core/depth_limit.js
+++ b/jstests/core/depth_limit.js
@@ -17,9 +17,7 @@ function test() {
function objWithDepth(depth) {
var out = 1;
while (depth--) {
- out = {
- o: out
- };
+ out = {o: out};
}
return out;
}
diff --git a/jstests/core/distinct_index1.js b/jstests/core/distinct_index1.js
index 31faecd376e..026fdca4e0a 100644
--- a/jstests/core/distinct_index1.js
+++ b/jstests/core/distinct_index1.js
@@ -11,10 +11,7 @@ function d(k, q) {
}
for (i = 0; i < 1000; i++) {
- o = {
- a: r(i * 5),
- b: r(i)
- };
+ o = {a: r(i * 5), b: r(i)};
t.insert(o);
}
diff --git a/jstests/core/doc_validation_invalid_validators.js b/jstests/core/doc_validation_invalid_validators.js
index aeebae42820..b78b31c0977 100644
--- a/jstests/core/doc_validation_invalid_validators.js
+++ b/jstests/core/doc_validation_invalid_validators.js
@@ -23,8 +23,8 @@
assert.commandFailed(
db.getSiblingDB("admin").createCollection(collName, {validator: {a: {$exists: true}}}));
if (!db.runCommand("isdbgrid").isdbgrid) {
- assert.commandFailed(db.getSiblingDB("local")
- .createCollection(collName, {validator: {a: {$exists: true}}}));
+ assert.commandFailed(
+ db.getSiblingDB("local").createCollection(collName, {validator: {a: {$exists: true}}}));
}
assert.commandFailed(
db.getSiblingDB("config").createCollection(collName, {validator: {a: {$exists: true}}}));
diff --git a/jstests/core/dropdb_race.js b/jstests/core/dropdb_race.js
index b4666ecc3ad..bd5e7e5ddba 100644
--- a/jstests/core/dropdb_race.js
+++ b/jstests/core/dropdb_race.js
@@ -14,7 +14,7 @@ var start = new Date();
for (var pass = 0; pass < 100; pass++) {
if (pass % 2 == 0) {
// sometimes wait for create db first, to vary the timing of things
- var options = ( pass % 4 == 0 )? { writeConcern: { fsync: true }} : undefined;
+ var options = (pass % 4 == 0) ? {writeConcern: {fsync: true}} : undefined;
t.insert({}, options);
}
t.insert({x: 1});
diff --git a/jstests/core/elemMatchProjection.js b/jstests/core/elemMatchProjection.js
index 97d1be0f081..e7aa5194607 100644
--- a/jstests/core/elemMatchProjection.js
+++ b/jstests/core/elemMatchProjection.js
@@ -47,9 +47,8 @@ assert.eq(1,
t.find({group: 3, 'x.a': 2}, {'x.$': 1}).toArray()[0].x.length,
"single object match (array length match)");
-assert.eq(2,
- t.find({group: 3, 'x.a': 1}, {'x.$': 1}).toArray()[0].x[0].b,
- "single object match first");
+assert.eq(
+ 2, t.find({group: 3, 'x.a': 1}, {'x.$': 1}).toArray()[0].x[0].b, "single object match first");
assert.eq(undefined,
t.find({group: 3, 'x.a': 2}, {_id: 0, 'x.$': 1}).toArray()[0]._id,
@@ -149,9 +148,8 @@ if (false) {
//
// SERVER-2238: $elemMatch projections
//
-assert.eq(-6,
- t.find({group: 4}, {x: {$elemMatch: {a: -6}}}).toArray()[0].x[0].a,
- "single object match");
+assert.eq(
+ -6, t.find({group: 4}, {x: {$elemMatch: {a: -6}}}).toArray()[0].x[0].a, "single object match");
assert.eq(1,
t.find({group: 4}, {x: {$elemMatch: {a: -6}}}).toArray()[0].x.length,
@@ -184,9 +182,8 @@ assert.eq([1],
// but this may become a user assertion, since a single element of an array can't match more than
// one value
-assert.eq([1],
- t.find({group: 1}, {x: {$elemMatch: {$all: [1]}}}).toArray()[0].x,
- "$in number match");
+assert.eq(
+ [1], t.find({group: 1}, {x: {$elemMatch: {$all: [1]}}}).toArray()[0].x, "$in number match");
assert.eq([{a: 'string', b: date1}],
t.find({group: 6}, {x: {$elemMatch: {a: 'string'}}}).toArray()[0].x,
@@ -212,10 +209,10 @@ assert.eq([{a: 2, c: 3}],
t.find({group: 3}, {x: {$elemMatch: {a: {$mod: [2, 0]}}}}).toArray()[0].x,
"mixed object match on mod");
-assert.eq({"x": [{"a": 1, "b": 2}], "y": [{"c": 3, "d": 4}]},
- t.find({group: 10}, {_id: 0, x: {$elemMatch: {a: 1}}, y: {$elemMatch: {c: 3}}})
- .toArray()[0],
- "multiple $elemMatch on unique fields 1");
+assert.eq(
+ {"x": [{"a": 1, "b": 2}], "y": [{"c": 3, "d": 4}]},
+ t.find({group: 10}, {_id: 0, x: {$elemMatch: {a: 1}}, y: {$elemMatch: {c: 3}}}).toArray()[0],
+ "multiple $elemMatch on unique fields 1");
assert.eq({"x": [{"y": [{"a": 1, "b": 2}, {"a": 3, "b": 4}]}]},
t.find({group: 8}, {_id: 0, x: {$elemMatch: {y: {$elemMatch: {a: 3}}}}}).toArray()[0],
diff --git a/jstests/core/eval0.js b/jstests/core/eval0.js
index a0c93da2cab..57a60e77244 100644
--- a/jstests/core/eval0.js
+++ b/jstests/core/eval0.js
@@ -4,13 +4,9 @@ assert.eq(17,
return 11 + 6;
}),
"A");
-assert.eq(17,
- db.eval(
- function(x) {
- return 10 + x;
- },
- 7),
- "B");
+assert.eq(17, db.eval(function(x) {
+ return 10 + x;
+}, 7), "B");
// check that functions in system.js work
db.system.js.insert({
diff --git a/jstests/core/eval_mr.js b/jstests/core/eval_mr.js
index 84036b1e0d5..4a3dc8dad6c 100644
--- a/jstests/core/eval_mr.js
+++ b/jstests/core/eval_mr.js
@@ -6,16 +6,15 @@
assert.writeOK(db.eval_mr.insert({val: 1}));
assert.writeOK(db.eval_mr.insert({val: 2}));
var runBasicMapReduce = function() {
- return db.eval_mr.runCommand("mapReduce",
- {
- map: function() {
- emit(0, this.val);
- },
- reduce: function(id, values) {
- return Array.sum(values);
- },
- out: {replace: "eval_mr_out"}
- });
+ return db.eval_mr.runCommand("mapReduce", {
+ map: function() {
+ emit(0, this.val);
+ },
+ reduce: function(id, values) {
+ return Array.sum(values);
+ },
+ out: {replace: "eval_mr_out"}
+ });
};
assert.commandWorked(runBasicMapReduce());
assert.eq(3, db.eval_mr_out.findOne().value);
diff --git a/jstests/core/evalg.js b/jstests/core/evalg.js
index 570464cbce2..18503659217 100644
--- a/jstests/core/evalg.js
+++ b/jstests/core/evalg.js
@@ -3,10 +3,9 @@ db.evalg.drop();
for (var i = 0; i < 102; ++i) {
db.evalg.insert({});
}
-assert.eq(102,
- db.eval(function() {
- var cursor = db.evalg.aggregate();
- assert(cursor.hasNext());
- assert.eq(101, cursor.objsLeftInBatch());
- return cursor.itcount();
- }));
+assert.eq(102, db.eval(function() {
+ var cursor = db.evalg.aggregate();
+ assert(cursor.hasNext());
+ assert.eq(101, cursor.objsLeftInBatch());
+ return cursor.itcount();
+}));
diff --git a/jstests/core/exists4.js b/jstests/core/exists4.js
index 097a3462da9..2979a60f276 100644
--- a/jstests/core/exists4.js
+++ b/jstests/core/exists4.js
@@ -34,12 +34,10 @@ assert.eq(
3,
t.find({date: new Date("08/27/2010"), country_code: {$exists: true}, user_id: {$exists: true}})
.count());
-assert.eq(2,
- t.find({
- date: new Date("08/27/2010"),
- country_code: {$exists: true},
- user_id: {$exists: false}
- }).count());
-assert.eq(2,
- t.find({date: new Date("08/27/2010"), country_code: {$exists: true}, user_id: null})
- .count());
+assert.eq(
+ 2,
+ t.find({date: new Date("08/27/2010"), country_code: {$exists: true}, user_id: {$exists: false}})
+ .count());
+assert.eq(
+ 2,
+ t.find({date: new Date("08/27/2010"), country_code: {$exists: true}, user_id: null}).count());
diff --git a/jstests/core/existsa.js b/jstests/core/existsa.js
index 45adb5d3172..e9430b489a3 100644
--- a/jstests/core/existsa.js
+++ b/jstests/core/existsa.js
@@ -38,9 +38,7 @@ function assertExists(query, expectedExists) {
// An $exists:true predicate generates no index filters. Add another predicate on the index key
// to trigger use of the index.
andClause = {};
- andClause[indexKeyField] = {
- $ne: null
- };
+ andClause[indexKeyField] = {$ne: null};
Object.extend(query, {$and: [andClause]});
assert.eq(expectedExists, t.count(query));
assert.eq(expectedExists, hintedCount(query));
@@ -52,9 +50,7 @@ function assertExistsUnindexed(query, expectedExists) {
assert.eq(expectedExists, t.count(query));
// Even with another predicate on the index key, the sparse index is disallowed.
andClause = {};
- andClause[indexKeyField] = {
- $ne: null
- };
+ andClause[indexKeyField] = {$ne: null};
Object.extend(query, {$and: [andClause]});
assert.eq(expectedExists, t.count(query));
assert.eq(expectedExists, hintedCount(query));
diff --git a/jstests/core/explain_distinct.js b/jstests/core/explain_distinct.js
index bc5b3635c95..57e5f55e065 100644
--- a/jstests/core/explain_distinct.js
+++ b/jstests/core/explain_distinct.js
@@ -10,10 +10,7 @@
var coll = db[collName];
function runDistinctExplain(collection, keyString, query) {
- var distinctCmd = {
- distinct: collection.getName(),
- key: keyString
- };
+ var distinctCmd = {distinct: collection.getName(), key: keyString};
if (typeof query !== 'undefined') {
distinctCmd.query = query;
diff --git a/jstests/core/explain_find.js b/jstests/core/explain_find.js
index 820e6dffbcd..87174e99a62 100644
--- a/jstests/core/explain_find.js
+++ b/jstests/core/explain_find.js
@@ -10,8 +10,8 @@ for (var i = 0; i < 10; i++) {
t.insert({_id: i, a: i});
}
-var explain = db.runCommand(
- {explain: {find: collName, filter: {a: {$lte: 2}}}, verbosity: "executionStats"});
+var explain =
+ db.runCommand({explain: {find: collName, filter: {a: {$lte: 2}}}, verbosity: "executionStats"});
printjson(explain);
assert.commandWorked(explain);
assert.eq(3, explain.executionStats.nReturned);
diff --git a/jstests/core/explain_find_and_modify.js b/jstests/core/explain_find_and_modify.js
index 346e7029cd1..1d30486ecb4 100644
--- a/jstests/core/explain_find_and_modify.js
+++ b/jstests/core/explain_find_and_modify.js
@@ -13,12 +13,8 @@
var t = db.getCollection(cName);
// Different types of findAndModify explain requests.
- var explainRemove = {
- explain: {findAndModify: cName, remove: true, query: {_id: 0}}
- };
- var explainUpdate = {
- explain: {findAndModify: cName, update: {$inc: {i: 1}}, query: {_id: 0}}
- };
+ var explainRemove = {explain: {findAndModify: cName, remove: true, query: {_id: 0}}};
+ var explainUpdate = {explain: {findAndModify: cName, update: {$inc: {i: 1}}, query: {_id: 0}}};
var explainUpsert = {
explain: {findAndModify: cName, update: {$inc: {i: 1}}, query: {_id: 0}, upsert: true}
};
@@ -60,39 +56,24 @@
assert.commandFailed(db.runCommand({remove: true, new: true}));
// 4. Explaining findAndModify should not modify any contents of the collection.
- var onlyDoc = {
- _id: 0,
- i: 1
- };
+ var onlyDoc = {_id: 0, i: 1};
assert.writeOK(t.insert(onlyDoc));
// Explaining a delete should not delete anything.
- var matchingRemoveCmd = {
- findAndModify: cName,
- remove: true,
- query: {_id: onlyDoc._id}
- };
+ var matchingRemoveCmd = {findAndModify: cName, remove: true, query: {_id: onlyDoc._id}};
var res = db.runCommand({explain: matchingRemoveCmd});
assert.commandWorked(res);
assert.eq(t.find().itcount(), 1, "Explaining a remove should not remove any documents.");
// Explaining an update should not update anything.
- var matchingUpdateCmd = {
- findAndModify: cName,
- update: {x: "x"},
- query: {_id: onlyDoc._id}
- };
+ var matchingUpdateCmd = {findAndModify: cName, update: {x: "x"}, query: {_id: onlyDoc._id}};
var res = db.runCommand({explain: matchingUpdateCmd});
assert.commandWorked(res);
assert.eq(t.findOne(), onlyDoc, "Explaining an update should not update any documents.");
// Explaining an upsert should not insert anything.
- var matchingUpsertCmd = {
- findAndModify: cName,
- update: {x: "x"},
- query: {_id: "non-match"},
- upsert: true
- };
+ var matchingUpsertCmd =
+ {findAndModify: cName, update: {x: "x"}, query: {_id: "non-match"}, upsert: true};
var res = db.runCommand({explain: matchingUpsertCmd});
assert.commandWorked(res);
assert.eq(t.find().itcount(), 1, "Explaining an upsert should not insert any documents.");
@@ -273,23 +254,21 @@
function assertExplainResultsMatch(explainOut, expectedMatches, preMsg, currentPath) {
// This is only used recursively, to keep track of where we are in the document.
var isRootLevel = typeof currentPath === "undefined";
- Object.keys(expectedMatches)
- .forEach(function(key) {
- var totalFieldName = isRootLevel ? key : currentPath + "." + key;
- assert(explainOut.hasOwnProperty(key),
- preMsg + "Explain's output does not have a value for " + key);
- if (typeof expectedMatches[key] === "object") {
- // Sub-doc, recurse to match on it's fields
- assertExplainResultsMatch(
- explainOut[key], expectedMatches[key], preMsg, totalFieldName);
- } else {
- assert.eq(explainOut[key],
- expectedMatches[key],
- preMsg + "Explain's " + totalFieldName + " (" + explainOut[key] +
- ")" + " does not match expected value (" + expectedMatches[key] +
- ").");
- }
- });
+ Object.keys(expectedMatches).forEach(function(key) {
+ var totalFieldName = isRootLevel ? key : currentPath + "." + key;
+ assert(explainOut.hasOwnProperty(key),
+ preMsg + "Explain's output does not have a value for " + key);
+ if (typeof expectedMatches[key] === "object") {
+ // Sub-doc, recurse to match on it's fields
+ assertExplainResultsMatch(
+ explainOut[key], expectedMatches[key], preMsg, totalFieldName);
+ } else {
+ assert.eq(explainOut[key],
+ expectedMatches[key],
+ preMsg + "Explain's " + totalFieldName + " (" + explainOut[key] + ")" +
+ " does not match expected value (" + expectedMatches[key] + ").");
+ }
+ });
}
/**
diff --git a/jstests/core/explain_multi_plan.js b/jstests/core/explain_multi_plan.js
index f74078c717d..693237d9edc 100644
--- a/jstests/core/explain_multi_plan.js
+++ b/jstests/core/explain_multi_plan.js
@@ -49,13 +49,12 @@
});
assert.doesNotThrow(function() {
- coll.explain("allPlansExecution")
- .group({
- key: {a: 1},
- cond: {a: {$gte: 1}},
- reduce: function(curr, result) {},
- initial: {}
- });
+ coll.explain("allPlansExecution").group({
+ key: {a: 1},
+ cond: {a: {$gte: 1}},
+ reduce: function(curr, result) {},
+ initial: {}
+ });
});
// SERVER-21376: Make sure the 'rejectedPlans' field is filled in appropriately.
diff --git a/jstests/core/find4.js b/jstests/core/find4.js
index 7a5ebf79578..204e7c511e2 100644
--- a/jstests/core/find4.js
+++ b/jstests/core/find4.js
@@ -23,9 +23,11 @@ t.drop();
t.save({a: 1, b: 1});
t.save({a: 2, b: 2});
assert.eq("1-1,2-2",
- t.find().map(function(z) {
- return z.a + "-" + z.b;
- }).toString());
+ t.find()
+ .map(function(z) {
+ return z.a + "-" + z.b;
+ })
+ .toString());
assert.eq("1-undefined,2-undefined",
t.find({}, {a: 1})
.map(function(z) {
diff --git a/jstests/core/find_and_modify.js b/jstests/core/find_and_modify.js
index cf2f8804d9e..394b618f109 100644
--- a/jstests/core/find_and_modify.js
+++ b/jstests/core/find_and_modify.js
@@ -69,7 +69,8 @@ var cmdRes = db.runCommand({
query: {_id: "miss"},
update: {$inc: {y: 1}},
fields: {foo: {$pop: ["bar"]}},
- upsert: true, new: true
+ upsert: true,
+ new: true
});
assert.commandFailed(cmdRes);
@@ -81,7 +82,8 @@ cmdRes = db.runCommand({
query: {_id: "found"},
update: {$inc: {y: 1}},
fields: {foo: {$pop: ["bar"]}},
- upsert: true, new: true
+ upsert: true,
+ new: true
});
assert.commandFailed(cmdRes);
@@ -90,7 +92,8 @@ cmdRes = db.runCommand({
findAndModify: t.getName(),
query: {_id: "found"},
update: {$inc: {y: 1}},
- fields: {foo: {$pop: ["bar"]}}, new: true
+ fields: {foo: {$pop: ["bar"]}},
+ new: true
});
assert.commandFailed(cmdRes);
@@ -128,7 +131,8 @@ cmdRes = db.runCommand({
findAndModify: t.getName(),
query: {_id: "missagain"},
update: {$inc: {y: 1}},
- upsert: true, new: true
+ upsert: true,
+ new: true
});
assert.commandWorked(cmdRes);
assert("value" in cmdRes);
diff --git a/jstests/core/find_and_modify_server6865.js b/jstests/core/find_and_modify_server6865.js
index b38c0b1bee4..3583da42412 100644
--- a/jstests/core/find_and_modify_server6865.js
+++ b/jstests/core/find_and_modify_server6865.js
@@ -69,66 +69,60 @@
{_id: 42, c: 4});
// Simple query that uses $elemMatch in the projection.
- testFAMWorked(
- {
- _id: 42,
- b: [{name: 'first', value: 1}, {name: 'second', value: 2}, {name: 'third', value: 3}]
- },
- {query: {_id: 42}, fields: {b: {$elemMatch: {value: 2}}}, remove: true},
- {_id: 42, b: [{name: 'second', value: 2}]});
+ testFAMWorked({
+ _id: 42,
+ b: [{name: 'first', value: 1}, {name: 'second', value: 2}, {name: 'third', value: 3}]
+ },
+ {query: {_id: 42}, fields: {b: {$elemMatch: {value: 2}}}, remove: true},
+ {_id: 42, b: [{name: 'second', value: 2}]});
// Query on an array of values while using a positional projection.
- testFAMWorked({_id: 42, a: [1, 2]},
- {query: {a: 2}, fields: {'a.$': 1}, remove: true},
- {_id: 42, a: [2]});
+ testFAMWorked(
+ {_id: 42, a: [1, 2]}, {query: {a: 2}, fields: {'a.$': 1}, remove: true}, {_id: 42, a: [2]});
// Query on an array of objects while using a positional projection.
- testFAMWorked(
- {
- _id: 42,
- b: [{name: 'first', value: 1}, {name: 'second', value: 2}, {name: 'third', value: 3}]
- },
- {query: {_id: 42, 'b.name': 'third'}, fields: {'b.$': 1}, remove: true},
- {_id: 42, b: [{name: 'third', value: 3}]});
+ testFAMWorked({
+ _id: 42,
+ b: [{name: 'first', value: 1}, {name: 'second', value: 2}, {name: 'third', value: 3}]
+ },
+ {query: {_id: 42, 'b.name': 'third'}, fields: {'b.$': 1}, remove: true},
+ {_id: 42, b: [{name: 'third', value: 3}]});
// Query on an array of objects while using a position projection.
// Verifies that the projection {'b.$.value': 1} is treated the
// same as {'b.$': 1}.
- testFAMWorked(
- {
- _id: 42,
- b: [{name: 'first', value: 1}, {name: 'second', value: 2}, {name: 'third', value: 3}]
- },
- {query: {_id: 42, 'b.name': 'third'}, fields: {'b.$.value': 1}, remove: true},
- {_id: 42, b: [{name: 'third', value: 3}]});
+ testFAMWorked({
+ _id: 42,
+ b: [{name: 'first', value: 1}, {name: 'second', value: 2}, {name: 'third', value: 3}]
+ },
+ {query: {_id: 42, 'b.name': 'third'}, fields: {'b.$.value': 1}, remove: true},
+ {_id: 42, b: [{name: 'third', value: 3}]});
// Query on an array of objects using $elemMatch while using an inclusion projection.
- testFAMWorked(
- {
- _id: 42,
- a: 5,
- b: [{name: 'john', value: 1}, {name: 'jess', value: 2}, {name: 'jeff', value: 3}]
- },
- {
- query: {b: {$elemMatch: {name: 'john', value: {$lt: 2}}}},
- fields: {_id: 0, a: 5},
- remove: true
- },
- {a: 5});
+ testFAMWorked({
+ _id: 42,
+ a: 5,
+ b: [{name: 'john', value: 1}, {name: 'jess', value: 2}, {name: 'jeff', value: 3}]
+ },
+ {
+ query: {b: {$elemMatch: {name: 'john', value: {$lt: 2}}}},
+ fields: {_id: 0, a: 5},
+ remove: true
+ },
+ {a: 5});
// Query on an array of objects using $elemMatch while using the positional
// operator in the projection.
- testFAMWorked(
- {
- _id: 42,
- b: [{name: 'john', value: 1}, {name: 'jess', value: 2}, {name: 'jeff', value: 3}]
- },
- {
- query: {b: {$elemMatch: {name: 'john', value: {$lt: 2}}}},
- fields: {_id: 0, 'b.$': 1},
- remove: true
- },
- {b: [{name: 'john', value: 1}]});
+ testFAMWorked({
+ _id: 42,
+ b: [{name: 'john', value: 1}, {name: 'jess', value: 2}, {name: 'jeff', value: 3}]
+ },
+ {
+ query: {b: {$elemMatch: {name: 'john', value: {$lt: 2}}}},
+ fields: {_id: 0, 'b.$': 1},
+ remove: true
+ },
+ {b: [{name: 'john', value: 1}]});
//
// Update operations with new=false
@@ -145,17 +139,17 @@
{_id: 42, c: 4});
// Simple query that uses $elemMatch in the projection.
- testFAMWorked(
- {
- _id: 42,
- b: [{name: 'first', value: 1}, {name: 'second', value: 2}, {name: 'third', value: 3}]
- },
- {
- query: {_id: 42},
- fields: {b: {$elemMatch: {value: 2}}},
- update: {$set: {name: '2nd'}}, new: false
- },
- {_id: 42, b: [{name: 'second', value: 2}]});
+ testFAMWorked({
+ _id: 42,
+ b: [{name: 'first', value: 1}, {name: 'second', value: 2}, {name: 'third', value: 3}]
+ },
+ {
+ query: {_id: 42},
+ fields: {b: {$elemMatch: {value: 2}}},
+ update: {$set: {name: '2nd'}},
+ new: false
+ },
+ {_id: 42, b: [{name: 'second', value: 2}]});
// Query on an array of values while using a positional projection.
testFAMWorked(
@@ -164,17 +158,17 @@
{_id: 42, a: [2]});
// Query on an array of objects while using a positional projection.
- testFAMWorked(
- {
- _id: 42,
- b: [{name: 'first', value: 1}, {name: 'second', value: 2}, {name: 'third', value: 3}]
- },
- {
- query: {_id: 42, 'b.name': 'third'},
- fields: {'b.$': 1},
- update: {$set: {'b.$.kind': 'xyz'}}, new: false
- },
- {_id: 42, b: [{name: 'third', value: 3}]});
+ testFAMWorked({
+ _id: 42,
+ b: [{name: 'first', value: 1}, {name: 'second', value: 2}, {name: 'third', value: 3}]
+ },
+ {
+ query: {_id: 42, 'b.name': 'third'},
+ fields: {'b.$': 1},
+ update: {$set: {'b.$.kind': 'xyz'}},
+ new: false
+ },
+ {_id: 42, b: [{name: 'third', value: 3}]});
// Query on an array of objects while using $elemMatch in the projection,
// where the matched array element is modified.
@@ -184,32 +178,32 @@
{_id: 1, a: [{x: 1, y: 1}]});
// Query on an array of objects using $elemMatch while using an inclusion projection.
- testFAMWorked(
- {
- _id: 42,
- a: 5,
- b: [{name: 'john', value: 1}, {name: 'jess', value: 2}, {name: 'jeff', value: 3}]
- },
- {
- query: {b: {$elemMatch: {name: 'john', value: {$lt: 2}}}},
- fields: {_id: 0, a: 5},
- update: {$inc: {a: 6}}, new: false
- },
- {a: 5});
+ testFAMWorked({
+ _id: 42,
+ a: 5,
+ b: [{name: 'john', value: 1}, {name: 'jess', value: 2}, {name: 'jeff', value: 3}]
+ },
+ {
+ query: {b: {$elemMatch: {name: 'john', value: {$lt: 2}}}},
+ fields: {_id: 0, a: 5},
+ update: {$inc: {a: 6}},
+ new: false
+ },
+ {a: 5});
// Query on an array of objects using $elemMatch while using the positional
// operator in the projection.
- testFAMWorked(
- {
- _id: 42,
- b: [{name: 'john', value: 1}, {name: 'jess', value: 2}, {name: 'jeff', value: 3}]
- },
- {
- query: {b: {$elemMatch: {name: 'john', value: {$lt: 2}}}},
- fields: {_id: 0, 'b.$': 1},
- update: {$set: {name: 'james'}}, new: false
- },
- {b: [{name: 'john', value: 1}]});
+ testFAMWorked({
+ _id: 42,
+ b: [{name: 'john', value: 1}, {name: 'jess', value: 2}, {name: 'jeff', value: 3}]
+ },
+ {
+ query: {b: {$elemMatch: {name: 'john', value: {$lt: 2}}}},
+ fields: {_id: 0, 'b.$': 1},
+ update: {$set: {name: 'james'}},
+ new: false
+ },
+ {b: [{name: 'john', value: 1}]});
//
// Update operations with new=true
@@ -226,17 +220,17 @@
{_id: 42, c: 5});
// Simple query that uses $elemMatch in the projection.
- testFAMWorked(
- {
- _id: 42,
- b: [{name: 'first', value: 1}, {name: 'second', value: 2}, {name: 'third', value: 3}]
- },
- {
- query: {_id: 42},
- fields: {b: {$elemMatch: {value: 2}}},
- update: {$set: {'b.1.name': '2nd'}}, new: true
- },
- {_id: 42, b: [{name: '2nd', value: 2}]});
+ testFAMWorked({
+ _id: 42,
+ b: [{name: 'first', value: 1}, {name: 'second', value: 2}, {name: 'third', value: 3}]
+ },
+ {
+ query: {_id: 42},
+ fields: {b: {$elemMatch: {value: 2}}},
+ update: {$set: {'b.1.name': '2nd'}},
+ new: true
+ },
+ {_id: 42, b: [{name: '2nd', value: 2}]});
// Query on an array of values while using a positional projection.
testFAMFailed(
@@ -244,29 +238,29 @@
{query: {a: 2}, fields: {'a.$': 1}, update: {$set: {'b.kind': 'xyz'}}, new: true});
// Query on an array of objects while using a positional projection.
- testFAMFailed(
- {
- _id: 42,
- b: [{name: 'first', value: 1}, {name: 'second', value: 2}, {name: 'third', value: 3}]
- },
- {
- query: {_id: 42, 'b.name': 'third'},
- fields: {'b.$': 1},
- update: {$set: {'b.$.kind': 'xyz'}}, new: true
- });
+ testFAMFailed({
+ _id: 42,
+ b: [{name: 'first', value: 1}, {name: 'second', value: 2}, {name: 'third', value: 3}]
+ },
+ {
+ query: {_id: 42, 'b.name': 'third'},
+ fields: {'b.$': 1},
+ update: {$set: {'b.$.kind': 'xyz'}},
+ new: true
+ });
// Query on an array of objects while using $elemMatch in the projection.
- testFAMWorked(
- {
- _id: 42,
- b: [{name: 'first', value: 1}, {name: 'second', value: 2}, {name: 'third', value: 3}]
- },
- {
- query: {_id: 42},
- fields: {b: {$elemMatch: {value: 2}}, c: 1},
- update: {$set: {c: 'xyz'}}, new: true
- },
- {_id: 42, b: [{name: 'second', value: 2}], c: 'xyz'});
+ testFAMWorked({
+ _id: 42,
+ b: [{name: 'first', value: 1}, {name: 'second', value: 2}, {name: 'third', value: 3}]
+ },
+ {
+ query: {_id: 42},
+ fields: {b: {$elemMatch: {value: 2}}, c: 1},
+ update: {$set: {c: 'xyz'}},
+ new: true
+ },
+ {_id: 42, b: [{name: 'second', value: 2}], c: 'xyz'});
// Query on an array of objects while using $elemMatch in the projection,
// where the matched array element is modified.
@@ -276,30 +270,30 @@
{_id: 1, a: [{x: 1, y: 2}]});
// Query on an array of objects using $elemMatch while using an inclusion projection.
- testFAMWorked(
- {
- _id: 42,
- a: 5,
- b: [{name: 'john', value: 1}, {name: 'jess', value: 2}, {name: 'jeff', value: 3}]
- },
- {
- query: {b: {$elemMatch: {name: 'john', value: {$lt: 2}}}},
- fields: {_id: 0, a: 5},
- update: {$inc: {a: 6}}, new: true
- },
- {a: 11});
+ testFAMWorked({
+ _id: 42,
+ a: 5,
+ b: [{name: 'john', value: 1}, {name: 'jess', value: 2}, {name: 'jeff', value: 3}]
+ },
+ {
+ query: {b: {$elemMatch: {name: 'john', value: {$lt: 2}}}},
+ fields: {_id: 0, a: 5},
+ update: {$inc: {a: 6}},
+ new: true
+ },
+ {a: 11});
// Query on an array of objects using $elemMatch while using the positional
// operator in the projection.
- testFAMFailed(
- {
- _id: 42,
- b: [{name: 'john', value: 1}, {name: 'jess', value: 2}, {name: 'jeff', value: 3}]
- },
- {
- query: {b: {$elemMatch: {name: 'john', value: {$lt: 2}}}},
- fields: {_id: 0, 'b.$': 1},
- update: {$set: {name: 'james'}}, new: true
- });
+ testFAMFailed({
+ _id: 42,
+ b: [{name: 'john', value: 1}, {name: 'jess', value: 2}, {name: 'jeff', value: 3}]
+ },
+ {
+ query: {b: {$elemMatch: {name: 'john', value: {$lt: 2}}}},
+ fields: {_id: 0, 'b.$': 1},
+ update: {$set: {name: 'james'}},
+ new: true
+ });
})();
diff --git a/jstests/core/find_dedup.js b/jstests/core/find_dedup.js
index a9160df7562..7a489bd185c 100644
--- a/jstests/core/find_dedup.js
+++ b/jstests/core/find_dedup.js
@@ -20,15 +20,14 @@ t.save({_id: 2, a: 1, b: 1});
t.save({_id: 3, a: 2, b: 2});
t.save({_id: 4, a: 3, b: 3});
t.save({_id: 5, a: 3, b: 3});
-checkDedup(
- {
- $or: [
- {a: {$gte: 0, $lte: 2}, b: {$gte: 0, $lte: 2}},
- {a: {$gte: 1, $lte: 3}, b: {$gte: 1, $lte: 3}},
- {a: {$gte: 1, $lte: 4}, b: {$gte: 1, $lte: 4}}
- ]
- },
- [1, 2, 3, 4, 5]);
+checkDedup({
+ $or: [
+ {a: {$gte: 0, $lte: 2}, b: {$gte: 0, $lte: 2}},
+ {a: {$gte: 1, $lte: 3}, b: {$gte: 1, $lte: 3}},
+ {a: {$gte: 1, $lte: 4}, b: {$gte: 1, $lte: 4}}
+ ]
+},
+ [1, 2, 3, 4, 5]);
// Deduping multikey
t.drop();
diff --git a/jstests/core/find_getmore_bsonsize.js b/jstests/core/find_getmore_bsonsize.js
index 904a9c33ab0..fdad2b1f1d6 100644
--- a/jstests/core/find_getmore_bsonsize.js
+++ b/jstests/core/find_getmore_bsonsize.js
@@ -74,10 +74,7 @@
bigStr += bigStr;
}
bigStr = bigStr.substring(0, (16 * oneMB) - 32);
- var maxSizeDoc = {
- _id: 0,
- padding: bigStr
- };
+ var maxSizeDoc = {_id: 0, padding: bigStr};
assert.eq(Object.bsonsize(maxSizeDoc), 16 * oneMB);
assert.writeOK(coll.insert(maxSizeDoc));
diff --git a/jstests/core/fts_blog.js b/jstests/core/fts_blog.js
index 9f35836ef37..68cca6fa3a5 100644
--- a/jstests/core/fts_blog.js
+++ b/jstests/core/fts_blog.js
@@ -9,8 +9,9 @@ t.save({_id: 3, title: "knives are Fun", text: "this is a new blog i am writing.
// specify weights if you want a field to be more meaningull
t.ensureIndex({"title": "text", text: "text"}, {weights: {title: 10}});
-res = t.find({"$text": {"$search": "blog"}}, {score: {"$meta": "textScore"}})
- .sort({score: {"$meta": "textScore"}});
+res = t.find({"$text": {"$search": "blog"}}, {score: {"$meta": "textScore"}}).sort({
+ score: {"$meta": "textScore"}
+});
assert.eq(3, res.length());
assert.eq(1, res[0]._id);
diff --git a/jstests/core/fts_blogwild.js b/jstests/core/fts_blogwild.js
index dad96cd2836..377e4529f3e 100644
--- a/jstests/core/fts_blogwild.js
+++ b/jstests/core/fts_blogwild.js
@@ -24,17 +24,20 @@ t.dropIndex("dummy_text");
assert.eq(1, t.getIndexKeys().length, "C1");
t.ensureIndex({dummy: "text"}, {weights: {"$**": 1, title: 2}});
-res = t.find({"$text": {"$search": "write"}}, {score: {"$meta": "textScore"}})
- .sort({score: {"$meta": "textScore"}});
+res = t.find({"$text": {"$search": "write"}}, {score: {"$meta": "textScore"}}).sort({
+ score: {"$meta": "textScore"}
+});
assert.eq(3, res.length(), "C2");
assert.eq(3, res[0]._id, "C3");
-res = t.find({"$text": {"$search": "blog"}}, {score: {"$meta": "textScore"}})
- .sort({score: {"$meta": "textScore"}});
+res = t.find({"$text": {"$search": "blog"}}, {score: {"$meta": "textScore"}}).sort({
+ score: {"$meta": "textScore"}
+});
assert.eq(3, res.length(), "D1");
assert.eq(1, res[0]._id, "D2");
-res = t.find({"$text": {"$search": "eliot"}}, {score: {"$meta": "textScore"}})
- .sort({score: {"$meta": "textScore"}});
+res = t.find({"$text": {"$search": "eliot"}}, {score: {"$meta": "textScore"}}).sort({
+ score: {"$meta": "textScore"}
+});
assert.eq(2, res.length(), "E1");
assert.eq(3, res[0]._id, "E2");
diff --git a/jstests/core/fts_diacritic_and_casesensitive.js b/jstests/core/fts_diacritic_and_casesensitive.js
index 397b6033f88..d5c15034dbc 100644
--- a/jstests/core/fts_diacritic_and_casesensitive.js
+++ b/jstests/core/fts_diacritic_and_casesensitive.js
@@ -19,8 +19,8 @@ load('jstests/libs/fts.js');
assert.eq(
[0],
queryIDS(coll, "próximo vôo à", null, {$diacriticSensitive: true, $caseSensitive: true}));
- assert.eq(
- [0], queryIDS(coll, "Atlântico", null, {$diacriticSensitive: true, $caseSensitive: true}));
+ assert.eq([0],
+ queryIDS(coll, "Atlântico", null, {$diacriticSensitive: true, $caseSensitive: true}));
assert.eq(
[0],
queryIDS(coll, "\"próximo\"", null, {$diacriticSensitive: true, $caseSensitive: true}));
@@ -47,8 +47,7 @@ load('jstests/libs/fts.js');
queryIDS(coll, "proximo vôo à", null, {$diacriticSensitive: true, $caseSensitive: true}));
assert.eq(
[],
- queryIDS(
- coll, "À -próximo -Vôo", null, {$diacriticSensitive: true, $caseSensitive: true}));
+ queryIDS(coll, "À -próximo -Vôo", null, {$diacriticSensitive: true, $caseSensitive: true}));
assert.eq(
[],
queryIDS(coll, "à proximo -Vôo", null, {$diacriticSensitive: true, $caseSensitive: true}));
diff --git a/jstests/core/fts_diacriticsensitive.js b/jstests/core/fts_diacriticsensitive.js
index 29e7784a785..e21d5360051 100644
--- a/jstests/core/fts_diacriticsensitive.js
+++ b/jstests/core/fts_diacriticsensitive.js
@@ -24,8 +24,7 @@ load('jstests/libs/fts.js');
assert.eq([0], queryIDS(coll, "\"próximo\"", null, {$diacriticSensitive: true}));
assert.eq([0], queryIDS(coll, "\"põe\" atlântico", null, {$diacriticSensitive: true}));
assert.eq(
- [0],
- queryIDS(coll, "\"próximo vôo\" \"único médico\"", null, {$diacriticSensitive: true}));
+ [0], queryIDS(coll, "\"próximo vôo\" \"único médico\"", null, {$diacriticSensitive: true}));
assert.eq(
[0],
queryIDS(coll, "\"próximo vôo\" -\"unico médico\"", null, {$diacriticSensitive: true}));
@@ -38,7 +37,6 @@ load('jstests/libs/fts.js');
assert.eq([], queryIDS(coll, "mo vô", null, {$diacriticSensitive: true}));
assert.eq([], queryIDS(coll, "\"unico medico\"", null, {$diacriticSensitive: true}));
assert.eq(
- [],
- queryIDS(coll, "\"próximo vôo\" -\"único médico\"", null, {$diacriticSensitive: true}));
+ [], queryIDS(coll, "\"próximo vôo\" -\"único médico\"", null, {$diacriticSensitive: true}));
})();
diff --git a/jstests/core/fts_index.js b/jstests/core/fts_index.js
index 8cda28096d2..ab953a88624 100644
--- a/jstests/core/fts_index.js
+++ b/jstests/core/fts_index.js
@@ -17,48 +17,60 @@ coll.getDB().createCollection(coll.getName());
// Spec passes text-specific index validation.
assert.commandWorked(coll.ensureIndex({a: "text"}, {name: indexName, default_language: "spanish"}));
assert.eq(1,
- coll.getIndexes().filter(function(z) {
- return z.name == indexName;
- }).length);
+ coll.getIndexes()
+ .filter(function(z) {
+ return z.name == indexName;
+ })
+ .length);
coll.dropIndexes();
// Spec fails text-specific index validation ("spanglish" unrecognized).
-assert.commandFailed(coll.ensureIndex({a: "text"},
- {name: indexName, default_language: "spanglish"}));
+assert.commandFailed(
+ coll.ensureIndex({a: "text"}, {name: indexName, default_language: "spanglish"}));
assert.eq(0,
- coll.getIndexes().filter(function(z) {
- return z.name == indexName;
- }).length);
+ coll.getIndexes()
+ .filter(function(z) {
+ return z.name == indexName;
+ })
+ .length);
coll.dropIndexes();
// Spec passes general index validation.
assert.commandWorked(coll.ensureIndex({"$**": "text"}, {name: indexName}));
assert.eq(1,
- coll.getIndexes().filter(function(z) {
- return z.name == indexName;
- }).length);
+ coll.getIndexes()
+ .filter(function(z) {
+ return z.name == indexName;
+ })
+ .length);
coll.dropIndexes();
// Spec fails general index validation ("a.$**" invalid field name for key).
assert.commandFailed(coll.ensureIndex({"a.$**": "text"}, {name: indexName}));
assert.eq(0,
- coll.getIndexes().filter(function(z) {
- return z.name == indexName;
- }).length);
+ coll.getIndexes()
+ .filter(function(z) {
+ return z.name == indexName;
+ })
+ .length);
coll.dropIndexes();
// SERVER-19519 Spec fails if '_fts' is specified on a non-text index.
assert.commandFailed(coll.ensureIndex({_fts: 1}, {name: indexName}));
assert.eq(0,
- coll.getIndexes().filter(function(z) {
- return z.name == indexName;
- }).length);
+ coll.getIndexes()
+ .filter(function(z) {
+ return z.name == indexName;
+ })
+ .length);
coll.dropIndexes();
assert.commandFailed(coll.ensureIndex({_fts: "text"}, {name: indexName}));
assert.eq(0,
- coll.getIndexes().filter(function(z) {
- return z.name == indexName;
- }).length);
+ coll.getIndexes()
+ .filter(function(z) {
+ return z.name == indexName;
+ })
+ .length);
coll.dropIndexes();
//
diff --git a/jstests/core/fts_phrase.js b/jstests/core/fts_phrase.js
index d36df8aaeb0..d93e53b0898 100644
--- a/jstests/core/fts_phrase.js
+++ b/jstests/core/fts_phrase.js
@@ -8,14 +8,16 @@ t.save({_id: 3, title: "knives are Fun", text: "this is a new blog i am writing.
t.ensureIndex({"title": "text", text: "text"}, {weights: {title: 10}});
-res = t.find({"$text": {"$search": "blog write"}}, {score: {"$meta": "textScore"}})
- .sort({score: {"$meta": "textScore"}});
+res = t.find({"$text": {"$search": "blog write"}}, {score: {"$meta": "textScore"}}).sort({
+ score: {"$meta": "textScore"}
+});
assert.eq(3, res.length());
assert.eq(1, res[0]._id);
assert(res[0].score > (res[1].score * 2), tojson(res.toArray()));
-res = t.find({"$text": {"$search": "write blog"}}, {score: {"$meta": "textScore"}})
- .sort({score: {"$meta": "textScore"}});
+res = t.find({"$text": {"$search": "write blog"}}, {score: {"$meta": "textScore"}}).sort({
+ score: {"$meta": "textScore"}
+});
assert.eq(3, res.length());
assert.eq(1, res[0]._id);
assert(res[0].score > (res[1].score * 2), tojson(res.toArray()));
diff --git a/jstests/core/fts_projection.js b/jstests/core/fts_projection.js
index 50fe4755fc3..6cb1471505b 100644
--- a/jstests/core/fts_projection.js
+++ b/jstests/core/fts_projection.js
@@ -11,8 +11,10 @@ t.insert({_id: 2, a: "irrelevant content"});
t.ensureIndex({a: "text"});
// Project the text score.
-var results = t.find({$text: {$search: "textual content -irrelevant"}},
- {_idCopy: 0, score: {$meta: "textScore"}}).toArray();
+var results = t.find({$text: {$search: "textual content -irrelevant"}}, {
+ _idCopy: 0,
+ score: {$meta: "textScore"}
+ }).toArray();
// printjson(results);
// Scores should exist.
assert.eq(results.length, 2);
@@ -29,8 +31,10 @@ scores[results[1]._id] = results[1].score;
//
// Project text score into 2 fields.
-results = t.find({$text: {$search: "textual content -irrelevant"}},
- {otherScore: {$meta: "textScore"}, score: {$meta: "textScore"}}).toArray();
+results = t.find({$text: {$search: "textual content -irrelevant"}}, {
+ otherScore: {$meta: "textScore"},
+ score: {$meta: "textScore"}
+ }).toArray();
assert.eq(2, results.length);
for (var i = 0; i < results.length; ++i) {
assert.close(scores[results[i]._id], results[i].score);
@@ -41,8 +45,9 @@ for (var i = 0; i < results.length; ++i) {
// Project text score into "x.$" shouldn't crash
assert.throws(function() {
- t.find({$text: {$search: "textual content -irrelevant"}}, {'x.$': {$meta: "textScore"}})
- .toArray();
+ t.find({$text: {$search: "textual content -irrelevant"}}, {
+ 'x.$': {$meta: "textScore"}
+ }).toArray();
});
// TODO: We can't project 'x.y':1 and 'x':1 (yet).
@@ -71,8 +76,10 @@ assert.throws(function() {
// SERVER-12173
// When $text operator is in $or, should evaluate first
-results = t.find({$or: [{$text: {$search: "textual content -irrelevant"}}, {_id: 1}]},
- {_idCopy: 0, score: {$meta: "textScore"}}).toArray();
+results = t.find({$or: [{$text: {$search: "textual content -irrelevant"}}, {_id: 1}]}, {
+ _idCopy: 0,
+ score: {$meta: "textScore"}
+ }).toArray();
printjson(results);
assert.eq(2, results.length);
for (var i = 0; i < results.length; ++i) {
diff --git a/jstests/core/geo10.js b/jstests/core/geo10.js
index 10879fc5d80..640ae67e594 100644
--- a/jstests/core/geo10.js
+++ b/jstests/core/geo10.js
@@ -10,7 +10,13 @@ assert.writeOK(db.geo10.insert({c: [1, 1], t: 1}));
assert.writeOK(db.geo10.insert({c: [3600, 3600], t: 1}));
assert.writeOK(db.geo10.insert({c: [0.001, 0.001], t: 1}));
-printjson(db.geo10.find({
- c: {$within: {$box: [[0.001, 0.001], [Math.pow(2, 40) - 0.001, Math.pow(2, 40) - 0.001]]}},
- t: 1
-}).toArray());
+printjson(
+ db.geo10
+ .find({
+ c: {
+ $within:
+ {$box: [[0.001, 0.001], [Math.pow(2, 40) - 0.001, Math.pow(2, 40) - 0.001]]}
+ },
+ t: 1
+ })
+ .toArray());
diff --git a/jstests/core/geo3.js b/jstests/core/geo3.js
index da3d8641049..feb93b783cb 100644
--- a/jstests/core/geo3.js
+++ b/jstests/core/geo3.js
@@ -29,9 +29,7 @@ filtered1.results.forEach(function(z) {
function avgA(q, len) {
if (!len)
len = 10;
- var realq = {
- loc: {$near: [50, 50]}
- };
+ var realq = {loc: {$near: [50, 50]}};
if (q)
Object.extend(realq, q);
var as = t.find(realq).limit(len).map(function(z) {
diff --git a/jstests/core/geo9.js b/jstests/core/geo9.js
index 201bee7dfa5..7419615818e 100644
--- a/jstests/core/geo9.js
+++ b/jstests/core/geo9.js
@@ -15,9 +15,7 @@ t.ensureIndex({b: "2d"});
function check(field) {
var q = {};
- q[field] = {
- $near: [11, 11]
- };
+ q[field] = {$near: [11, 11]};
arr = t.find(q).limit(3).map(function(z) {
return Geo.distance([11, 11], z[field]);
});
diff --git a/jstests/core/geo_2d_with_geojson_point.js b/jstests/core/geo_2d_with_geojson_point.js
index aaadf4be333..23592e004f8 100644
--- a/jstests/core/geo_2d_with_geojson_point.js
+++ b/jstests/core/geo_2d_with_geojson_point.js
@@ -6,10 +6,7 @@ var t = db.geo_2d_with_geojson_point;
t.drop();
t.ensureIndex({loc: '2d'});
-var geoJSONPoint = {
- type: 'Point',
- coordinates: [0, 0]
-};
+var geoJSONPoint = {type: 'Point', coordinates: [0, 0]};
print(assert.throws(function() {
t.findOne({loc: {$near: {$geometry: geoJSONPoint}}});
diff --git a/jstests/core/geo_array2.js b/jstests/core/geo_array2.js
index 33aad98930a..6195e038de3 100644
--- a/jstests/core/geo_array2.js
+++ b/jstests/core/geo_array2.js
@@ -40,8 +40,7 @@ for (var t = 0; t < 2; t++) {
// Do near check
var nearResults =
- db.runCommand(
- {geoNear: "geoarray2", near: center, num: count, query: {type: type}})
+ db.runCommand({geoNear: "geoarray2", near: center, num: count, query: {type: type}})
.results;
// printjson( nearResults )
@@ -76,10 +75,11 @@ for (var t = 0; t < 2; t++) {
// Earth Radius from geoconstants.h
var eRad = 6378.1;
- nearResults = db.geoarray2.find({
- loc: {$nearSphere: center, $maxDistance: 500 /* km */ / eRad},
- type: type
- }).toArray();
+ nearResults =
+ db.geoarray2
+ .find(
+ {loc: {$nearSphere: center, $maxDistance: 500 /* km */ / eRad}, type: type})
+ .toArray();
assert.eq(nearResults.length, count);
diff --git a/jstests/core/geo_big_polygon.js b/jstests/core/geo_big_polygon.js
index 6f278c59147..9371e1954b2 100644
--- a/jstests/core/geo_big_polygon.js
+++ b/jstests/core/geo_big_polygon.js
@@ -9,10 +9,7 @@ coll.drop();
coll.getMongo().getDB("admin").runCommand({setParameter: 1, verboseQueryLogging: true});
-var bigCRS = {
- type: "name",
- properties: {name: "urn:x-mongodb:crs:strictwinding:EPSG:4326"}
-};
+var bigCRS = {type: "name", properties: {name: "urn:x-mongodb:crs:strictwinding:EPSG:4326"}};
var bigPoly20 = {
type: "Polygon",
@@ -36,20 +33,11 @@ var line10 = {
coordinates: [[5.0, 5.0], [5.0, -5.0], [-5.0, -5.0], [-5.0, 5.0], [5.0, 5.0]]
};
-var centerPoint = {
- type: "Point",
- coordinates: [0, 0]
-};
+var centerPoint = {type: "Point", coordinates: [0, 0]};
-var polarPoint = {
- type: "Point",
- coordinates: [85, 85]
-};
+var polarPoint = {type: "Point", coordinates: [85, 85]};
-var lineEquator = {
- type: "LineString",
- coordinates: [[-20, 0], [20, 0]]
-};
+var lineEquator = {type: "LineString", coordinates: [[-20, 0], [20, 0]]};
assert.writeOK(coll.insert({loc: poly10}));
assert.writeOK(coll.insert({loc: line10}));
@@ -100,16 +88,8 @@ assert.commandWorked(coll.ensureIndex({loc: "2dsphere"}));
assert.writeError(coll.insert({_id: "bigPoly10", loc: bigPoly10}));
// Query geometries that don't support big CRS should error out.
-var bigPoint = {
- type: "Point",
- coordinates: [0, 0],
- crs: bigCRS
-};
-var bigLine = {
- type: "LineString",
- coordinates: [[-20, 0], [20, 0]],
- crs: bigCRS
-};
+var bigPoint = {type: "Point", coordinates: [0, 0], crs: bigCRS};
+var bigLine = {type: "LineString", coordinates: [[-20, 0], [20, 0]], crs: bigCRS};
assert.throws(function() {
coll.find({loc: {$geoIntersects: {$geometry: bigPoint}}}).itcount();
diff --git a/jstests/core/geo_big_polygon2.js b/jstests/core/geo_big_polygon2.js
index 46ac327b7e0..2193229990d 100644
--- a/jstests/core/geo_big_polygon2.js
+++ b/jstests/core/geo_big_polygon2.js
@@ -5,23 +5,11 @@
// - Big polygon objects cannot be stored
// Try all different shapes queries against various stored geo points, line & polygons
-var crs84CRS = {
- type: "name",
- properties: {name: "urn:ogc:def:crs:OGC:1.3:CRS84"}
-};
-var epsg4326CRS = {
- type: "name",
- properties: {name: "EPSG:4326"}
-};
-var strictCRS = {
- type: "name",
- properties: {name: "urn:x-mongodb:crs:strictwinding:EPSG:4326"}
-};
+var crs84CRS = {type: "name", properties: {name: "urn:ogc:def:crs:OGC:1.3:CRS84"}};
+var epsg4326CRS = {type: "name", properties: {name: "EPSG:4326"}};
+var strictCRS = {type: "name", properties: {name: "urn:x-mongodb:crs:strictwinding:EPSG:4326"}};
// invalid CRS name
-var badCRS = {
- type: "name",
- properties: {name: "urn:x-mongodb:crs:invalid:EPSG:4326"}
-};
+var badCRS = {type: "name", properties: {name: "urn:x-mongodb:crs:invalid:EPSG:4326"}};
// helper to generate a line along a longitudinal
function genLonLine(lon, startLat, endLat, latStep) {
@@ -206,8 +194,7 @@ var objects = [
},
{
name: "two points (MultiPoint) but only one in: Shenzhen, Guangdong, China",
- geo:
- {type: "MultiPoint", coordinates: [[114.0538788, 22.5551603], [113.743858, 23.025815]]}
+ geo: {type: "MultiPoint", coordinates: [[114.0538788, 22.5551603], [113.743858, 23.025815]]}
},
{
name: "multi line string: new zealand bays",
@@ -606,9 +593,7 @@ indexes.forEach(function(index) {
// geoWithin query
var docArray = [];
- var q = {
- geo: {$geoWithin: {$geometry: p}}
- };
+ var q = {geo: {$geoWithin: {$geometry: p}}};
// Test query in aggregate
docArray = coll.aggregate({$match: q}).toArray();
assert.eq(p.nW, docArray.length, "aggregate within " + p.name);
@@ -616,9 +601,7 @@ indexes.forEach(function(index) {
assert.eq(p.nW, docArray.length, "within " + p.name);
// geoIntersects query
- q = {
- geo: {$geoIntersects: {$geometry: p}}
- };
+ q = {geo: {$geoIntersects: {$geometry: p}}};
// Test query in aggregate
docArray = coll.aggregate({$match: q}).toArray();
assert.eq(p.nI, docArray.length, "aggregate intersects " + p.name);
diff --git a/jstests/core/geo_big_polygon3.js b/jstests/core/geo_big_polygon3.js
index 049064ebc5b..cd59ed7a2fb 100644
--- a/jstests/core/geo_big_polygon3.js
+++ b/jstests/core/geo_big_polygon3.js
@@ -10,18 +10,9 @@
// MapReduce with a big polygon
// CRS84 & EPSG4326 objects should be retrieved from query with big polygon
-var crs84CRS = {
- type: "name",
- properties: {name: "urn:ogc:def:crs:OGC:1.3:CRS84"}
-};
-var epsg4326CRS = {
- type: "name",
- properties: {name: "EPSG:4326"}
-};
-var strictCRS = {
- type: "name",
- properties: {name: "urn:x-mongodb:crs:strictwinding:EPSG:4326"}
-};
+var crs84CRS = {type: "name", properties: {name: "urn:ogc:def:crs:OGC:1.3:CRS84"}};
+var epsg4326CRS = {type: "name", properties: {name: "EPSG:4326"}};
+var strictCRS = {type: "name", properties: {name: "urn:x-mongodb:crs:strictwinding:EPSG:4326"}};
var coll = db.geo_bigpoly_edgecases;
coll.drop();
@@ -125,12 +116,9 @@ objects = [
name: "NYC - Times Square to CitiField to JFK to Times Square - polygon",
geo: {
type: "Polygon",
- coordinates: [[
- [-73.9857, 40.7577],
- [-73.7789, 40.6397],
- [-73.8458, 40.7569],
- [-73.9857, 40.7577]
- ]],
+ coordinates: [
+ [[-73.9857, 40.7577], [-73.7789, 40.6397], [-73.8458, 40.7569], [-73.9857, 40.7577]]
+ ],
crs: strictCRS
}
}
@@ -150,9 +138,8 @@ var poly = {
crs: strictCRS
};
-assert.eq(0,
- coll.count({geo: {$geoWithin: {$geometry: poly}}}),
- "ignore objects with strictCRS within");
+assert.eq(
+ 0, coll.count({geo: {$geoWithin: {$geometry: poly}}}), "ignore objects with strictCRS within");
assert.eq(0,
coll.count({geo: {$geoIntersects: {$geometry: poly}}}),
"ignore objects with strictCRS intersects");
@@ -162,9 +149,8 @@ coll.update({}, {$unset: {"geo.crs": ""}}, {multi: true});
var totalDocs = coll.count();
assert.eq(totalDocs, coll.count({geo: {$geoWithin: {$geometry: poly}}}), "no strictCRS within");
-assert.eq(totalDocs,
- coll.count({geo: {$geoIntersects: {$geometry: poly}}}),
- "no strictCRS intersects");
+assert.eq(
+ totalDocs, coll.count({geo: {$geoIntersects: {$geometry: poly}}}), "no strictCRS intersects");
// Clear collection
coll.remove({});
diff --git a/jstests/core/geo_borders.js b/jstests/core/geo_borders.js
index f0a47339591..f8a94d997dd 100644
--- a/jstests/core/geo_borders.js
+++ b/jstests/core/geo_borders.js
@@ -33,81 +33,75 @@ assert.commandWorked(res);
// ************
// If the bounds are bigger than the box itself, just clip at the borders
-assert.eq(numItems,
- t.find({
- loc: {
- $within: {
- $box: [
- [overallMin - 2 * epsilon, overallMin - 2 * epsilon],
- [overallMax + 2 * epsilon, overallMax + 2 * epsilon]
- ]
- }
- }
- }).count());
+assert.eq(numItems, t.find({
+ loc: {
+ $within: {
+ $box: [
+ [overallMin - 2 * epsilon, overallMin - 2 * epsilon],
+ [overallMax + 2 * epsilon, overallMax + 2 * epsilon]
+ ]
+ }
+ }
+ }).count());
// Check this works also for bounds where only a single dimension is off-bounds
-assert.eq(numItems - 5,
- t.find({
- loc: {
- $within: {
- $box: [
- [overallMin - 2 * epsilon, overallMin - 0.5 * epsilon],
- [overallMax - epsilon, overallMax - epsilon]
- ]
- }
- }
- }).count());
+assert.eq(numItems - 5, t.find({
+ loc: {
+ $within: {
+ $box: [
+ [overallMin - 2 * epsilon, overallMin - 0.5 * epsilon],
+ [overallMax - epsilon, overallMax - epsilon]
+ ]
+ }
+ }
+ }).count());
// Make sure we can get at least close to the bounds of the index
-assert.eq(numItems,
- t.find({
- loc: {
- $within: {
- $box: [
- [overallMin - epsilon / 2, overallMin - epsilon / 2],
- [overallMax + epsilon / 2, overallMax + epsilon / 2]
- ]
- }
- }
- }).count());
+assert.eq(numItems, t.find({
+ loc: {
+ $within: {
+ $box: [
+ [overallMin - epsilon / 2, overallMin - epsilon / 2],
+ [overallMax + epsilon / 2, overallMax + epsilon / 2]
+ ]
+ }
+ }
+ }).count());
// Make sure we can get at least close to the bounds of the index
-assert.eq(numItems,
- t.find({
- loc: {
- $within: {
- $box: [
- [overallMax + epsilon / 2, overallMax + epsilon / 2],
- [overallMin - epsilon / 2, overallMin - epsilon / 2]
- ]
- }
- }
- }).count());
+assert.eq(numItems, t.find({
+ loc: {
+ $within: {
+ $box: [
+ [overallMax + epsilon / 2, overallMax + epsilon / 2],
+ [overallMin - epsilon / 2, overallMin - epsilon / 2]
+ ]
+ }
+ }
+ }).count());
// Check that swapping min/max has good behavior
-assert.eq(numItems,
- t.find({
- loc: {
- $within: {
- $box: [
- [overallMax + epsilon / 2, overallMax + epsilon / 2],
- [overallMin - epsilon / 2, overallMin - epsilon / 2]
- ]
- }
- }
- }).count());
-
-assert.eq(numItems,
- t.find({
- loc: {
- $within: {
- $box: [
- [overallMax + epsilon / 2, overallMin - epsilon / 2],
- [overallMin - epsilon / 2, overallMax + epsilon / 2]
- ]
- }
- }
- }).count());
+assert.eq(numItems, t.find({
+ loc: {
+ $within: {
+ $box: [
+ [overallMax + epsilon / 2, overallMax + epsilon / 2],
+ [overallMin - epsilon / 2, overallMin - epsilon / 2]
+ ]
+ }
+ }
+ }).count());
+
+assert.eq(numItems, t.find({
+ loc: {
+ $within: {
+ $box: [
+ [overallMax + epsilon / 2, overallMin - epsilon / 2],
+ [overallMin - epsilon / 2, overallMax + epsilon / 2]
+ ]
+ }
+ }
+ }).count());
// **************
// Circle tests
@@ -206,6 +200,6 @@ assert.commandWorked(db.runCommand({geoNear: "borders", near: onBounds}));
// Make sure we can get all nearby points within one step (4 points in top
// corner)
-assert.eq(4,
- db.runCommand({geoNear: "borders", near: offCenter, maxDistance: step * 1.5})
- .results.length);
+assert.eq(
+ 4,
+ db.runCommand({geoNear: "borders", near: offCenter, maxDistance: step * 1.5}).results.length);
diff --git a/jstests/core/geo_box1.js b/jstests/core/geo_box1.js
index 45e9aab9118..b1949063568 100644
--- a/jstests/core/geo_box1.js
+++ b/jstests/core/geo_box1.js
@@ -5,25 +5,25 @@ t.drop();
num = 0;
for (x = 0; x <= 20; x++) {
for (y = 0; y <= 20; y++) {
- o = {
- _id: num++,
- loc: [x, y]
- };
+ o = {_id: num++, loc: [x, y]};
t.save(o);
}
}
t.ensureIndex({loc: "2d"});
-searches = [[[1, 2], [4, 5]], [[1, 1], [2, 2]], [[0, 2], [4, 5]], [[1, 1], [2, 8]], ];
+searches = [
+ [[1, 2], [4, 5]],
+ [[1, 1], [2, 2]],
+ [[0, 2], [4, 5]],
+ [[1, 1], [2, 8]],
+];
for (i = 0; i < searches.length; i++) {
b = searches[i];
// printjson( b );
- q = {
- loc: {$within: {$box: b}}
- };
+ q = {loc: {$within: {$box: b}}};
numWanetd = (1 + b[1][0] - b[0][0]) * (1 + b[1][1] - b[0][1]);
assert.eq(numWanetd, t.find(q).itcount(), "itcount: " + tojson(q));
printjson(t.find(q).explain());
diff --git a/jstests/core/geo_box1_noindex.js b/jstests/core/geo_box1_noindex.js
index 36e932105a6..a953149c59f 100644
--- a/jstests/core/geo_box1_noindex.js
+++ b/jstests/core/geo_box1_noindex.js
@@ -5,21 +5,21 @@ t.drop();
num = 0;
for (x = 0; x <= 20; x++) {
for (y = 0; y <= 20; y++) {
- o = {
- _id: num++,
- loc: [x, y]
- };
+ o = {_id: num++, loc: [x, y]};
t.save(o);
}
}
-searches = [[[1, 2], [4, 5]], [[1, 1], [2, 2]], [[0, 2], [4, 5]], [[1, 1], [2, 8]], ];
+searches = [
+ [[1, 2], [4, 5]],
+ [[1, 1], [2, 2]],
+ [[0, 2], [4, 5]],
+ [[1, 1], [2, 8]],
+];
for (i = 0; i < searches.length; i++) {
b = searches[i];
- q = {
- loc: {$within: {$box: b}}
- };
+ q = {loc: {$within: {$box: b}}};
numWanted = (1 + b[1][0] - b[0][0]) * (1 + b[1][1] - b[0][1]);
assert.eq(numWanted, t.find(q).itcount(), "itcount: " + tojson(q));
printjson(t.find(q).explain());
diff --git a/jstests/core/geo_center_sphere1.js b/jstests/core/geo_center_sphere1.js
index f3b39b552cd..1ee46b03486 100644
--- a/jstests/core/geo_center_sphere1.js
+++ b/jstests/core/geo_center_sphere1.js
@@ -27,10 +27,7 @@ function test(index) {
var bulk = t.initializeUnorderedBulkOp();
for (x = -179; x <= 179; x += skip) {
for (y = -89; y <= 89; y += skip) {
- o = {
- _id: num++,
- loc: [x, y]
- };
+ o = {_id: num++, loc: [x, y]};
bulk.insert(o);
for (i = 0; i < searches.length; i++) {
if (Geo.sphereDistance([x, y], searches[i][0]) <= searches[i][1])
@@ -48,9 +45,7 @@ function test(index) {
for (i = 0; i < searches.length; i++) {
print('------------');
print(tojson(searches[i]) + "\t" + correct[i].length);
- q = {
- loc: {$within: {$centerSphere: searches[i]}}
- };
+ q = {loc: {$within: {$centerSphere: searches[i]}}};
// correct[i].forEach( printjson )
// printjson( q );
diff --git a/jstests/core/geo_center_sphere2.js b/jstests/core/geo_center_sphere2.js
index f3dc465e350..79c69de1ab4 100644
--- a/jstests/core/geo_center_sphere2.js
+++ b/jstests/core/geo_center_sphere2.js
@@ -131,12 +131,12 @@ for (var test = 0; test < numTests; test++) {
// geoNear
results = db.runCommand({
- geoNear: "sphere",
- near: startPoint,
- maxDistance: radius,
- num: 2 * pointsIn,
- spherical: true
- }).results;
+ geoNear: "sphere",
+ near: startPoint,
+ maxDistance: radius,
+ num: 2 * pointsIn,
+ spherical: true
+ }).results;
/*
printjson( results );
diff --git a/jstests/core/geo_circle1.js b/jstests/core/geo_circle1.js
index a679a408b32..c5b6841b9a8 100644
--- a/jstests/core/geo_circle1.js
+++ b/jstests/core/geo_circle1.js
@@ -2,7 +2,12 @@
t = db.geo_circle1;
t.drop();
-searches = [[[5, 5], 3], [[5, 5], 1], [[5, 5], 5], [[0, 5], 5], ];
+searches = [
+ [[5, 5], 3],
+ [[5, 5], 1],
+ [[5, 5], 5],
+ [[0, 5], 5],
+];
correct = searches.map(function(z) {
return [];
});
@@ -11,10 +16,7 @@ num = 0;
for (x = 0; x <= 20; x++) {
for (y = 0; y <= 20; y++) {
- o = {
- _id: num++,
- loc: [x, y]
- };
+ o = {_id: num++, loc: [x, y]};
t.save(o);
for (i = 0; i < searches.length; i++)
if (Geo.distance([x, y], searches[i][0]) <= searches[i][1])
@@ -26,9 +28,7 @@ t.ensureIndex({loc: "2d"});
for (i = 0; i < searches.length; i++) {
// print( tojson( searches[i] ) + "\t" + correct[i].length )
- q = {
- loc: {$within: {$center: searches[i]}}
- };
+ q = {loc: {$within: {$center: searches[i]}}};
// correct[i].forEach( printjson )
// printjson( q );
diff --git a/jstests/core/geo_circle1_noindex.js b/jstests/core/geo_circle1_noindex.js
index 872883dbf74..6c3135855a5 100644
--- a/jstests/core/geo_circle1_noindex.js
+++ b/jstests/core/geo_circle1_noindex.js
@@ -2,7 +2,12 @@
t = db.geo_circle1_noindex;
t.drop();
-searches = [[[5, 5], 3], [[5, 5], 1], [[5, 5], 5], [[0, 5], 5], ];
+searches = [
+ [[5, 5], 3],
+ [[5, 5], 1],
+ [[5, 5], 5],
+ [[0, 5], 5],
+];
correct = searches.map(function(z) {
return [];
});
@@ -11,10 +16,7 @@ num = 0;
for (x = 0; x <= 20; x++) {
for (y = 0; y <= 20; y++) {
- o = {
- _id: num++,
- loc: [x, y]
- };
+ o = {_id: num++, loc: [x, y]};
t.save(o);
for (i = 0; i < searches.length; i++)
if (Geo.distance([x, y], searches[i][0]) <= searches[i][1])
@@ -23,9 +25,7 @@ for (x = 0; x <= 20; x++) {
}
for (i = 0; i < searches.length; i++) {
- q = {
- loc: {$within: {$center: searches[i]}}
- };
+ q = {loc: {$within: {$center: searches[i]}}};
assert.eq(correct[i].length, t.find(q).itcount(), "itcount : " + tojson(searches[i]));
assert.eq(correct[i].length, t.find(q).count(), "count : " + tojson(searches[i]));
}
diff --git a/jstests/core/geo_distinct.js b/jstests/core/geo_distinct.js
index 705bf1cc7ce..33ea7dd3461 100644
--- a/jstests/core/geo_distinct.js
+++ b/jstests/core/geo_distinct.js
@@ -60,10 +60,7 @@ for (var i = 0; i < 50; ++i) {
coll.insert({zone: 4, loc: {type: 'Point', coordinates: [10, 10]}});
coll.insert({zone: 5, loc: {type: 'Point', coordinates: [20, 20]}});
}
-var originGeoJSON = {
- type: 'Point',
- coordinates: [0, 0]
-};
+var originGeoJSON = {type: 'Point', coordinates: [0, 0]};
// Test distinct with $nearSphere query predicate.
diff --git a/jstests/core/geo_fiddly_box.js b/jstests/core/geo_fiddly_box.js
index f5cd3ddcc6b..4e33780112d 100644
--- a/jstests/core/geo_fiddly_box.js
+++ b/jstests/core/geo_fiddly_box.js
@@ -19,9 +19,8 @@ t.insert({"loc": [3, -1]});
// OK!
print(t.count());
-assert.eq(7,
- t.count({"loc": {"$within": {"$box": [[2, -2], [46, 2]]}}}),
- "Not all locations found!");
+assert.eq(
+ 7, t.count({"loc": {"$within": {"$box": [[2, -2], [46, 2]]}}}), "Not all locations found!");
// Test normal lookup of a small square of points as a sanity check.
diff --git a/jstests/core/geo_group.js b/jstests/core/geo_group.js
index 9ee5a76b7ea..34ecc2c3a84 100644
--- a/jstests/core/geo_group.js
+++ b/jstests/core/geo_group.js
@@ -20,22 +20,18 @@ assert.eq(t.find({loc: {$near: [56, 8, 10]}}).count(), 81);
// Test basic group that effectively does a count
assert.eq(t.group({
reduce: function(obj, prev) {
- prev.sums = {
- count: prev.sums.count + 1
- };
+ prev.sums = {count: prev.sums.count + 1};
},
initial: {sums: {count: 0}}
}),
- [{"sums": {"count": 10000}}]);
+ [{"sums": {"count": 10000}}]);
// Test basic group + $near that does a count
assert.eq(t.group({
reduce: function(obj, prev) {
- prev.sums = {
- count: prev.sums.count + 1
- };
+ prev.sums = {count: prev.sums.count + 1};
},
initial: {sums: {count: 0}},
cond: {loc: {$near: [56, 8, 10]}}
}),
- [{"sums": {"count": 81}}]);
+ [{"sums": {"count": 81}}]);
diff --git a/jstests/core/geo_haystack1.js b/jstests/core/geo_haystack1.js
index 5abb166a6f9..97e746ccdfa 100644
--- a/jstests/core/geo_haystack1.js
+++ b/jstests/core/geo_haystack1.js
@@ -16,13 +16,12 @@ function distanceTotal(a, arr, f) {
return total;
}
-queries = [{near: [7, 8], maxDistance: 3, search: {z: 3}}, ];
+queries = [
+ {near: [7, 8], maxDistance: 3, search: {z: 3}},
+];
answers = queries.map(function() {
- return {
- totalDistance: 0,
- results: []
- };
+ return {totalDistance: 0, results: []};
});
n = 0;
diff --git a/jstests/core/geo_haystack2.js b/jstests/core/geo_haystack2.js
index cb684239a63..3420feeed1c 100644
--- a/jstests/core/geo_haystack2.js
+++ b/jstests/core/geo_haystack2.js
@@ -16,13 +16,12 @@ function distanceTotal(a, arr, f) {
return total;
}
-queries = [{near: [7, 8], maxDistance: 3, search: {z: 3}}, ];
+queries = [
+ {near: [7, 8], maxDistance: 3, search: {z: 3}},
+];
answers = queries.map(function() {
- return {
- totalDistance: 0,
- results: []
- };
+ return {totalDistance: 0, results: []};
});
n = 0;
diff --git a/jstests/core/geo_invalid_polygon.js b/jstests/core/geo_invalid_polygon.js
index c3d244a504f..0eab7ca5406 100644
--- a/jstests/core/geo_invalid_polygon.js
+++ b/jstests/core/geo_invalid_polygon.js
@@ -5,10 +5,7 @@ t.drop();
// Self-intersecting polygon, triggers
// "Exterior shell of polygon is invalid".
-var geometry = {
- type: "Polygon",
- coordinates: [[[0, 0], [0, 1], [1, 1], [-2, -1], [0, 0]]]
-};
+var geometry = {type: "Polygon", coordinates: [[[0, 0], [0, 1], [1, 1], [-2, -1], [0, 0]]]};
t.insert({_id: 42, geometry: geometry});
var err = t.createIndex({geometry: '2dsphere'});
diff --git a/jstests/core/geo_mapreduce.js b/jstests/core/geo_mapreduce.js
index e15a4911763..ebea034d00c 100644
--- a/jstests/core/geo_mapreduce.js
+++ b/jstests/core/geo_mapreduce.js
@@ -32,9 +32,7 @@ r = function(key, values) {
for (var i = 0; i < values.length; i++) {
total += values[i].apples;
}
- return {
- "apples": total
- };
+ return {"apples": total};
};
// mapreduce without geo query works fine
diff --git a/jstests/core/geo_mapreduce2.js b/jstests/core/geo_mapreduce2.js
index d71eb8ef216..679b6474cc9 100644
--- a/jstests/core/geo_mapreduce2.js
+++ b/jstests/core/geo_mapreduce2.js
@@ -21,19 +21,15 @@ r = function(key, values) {
total += values[i].count;
}
- return {
- count: total
- };
+ return {count: total};
};
try {
- coll.mapReduce(m,
- r,
- {
- out: coll.getName() + "_mr",
- sort: {_id: 1},
- query: {'location': {$within: {$centerSphere: [[10, 20], 0.01]}}}
- });
+ coll.mapReduce(m, r, {
+ out: coll.getName() + "_mr",
+ sort: {_id: 1},
+ query: {'location': {$within: {$centerSphere: [[10, 20], 0.01]}}}
+ });
} catch (e) {
// This should occur, since we can't in-mem sort for mreduce
diff --git a/jstests/core/geo_mindistance.js b/jstests/core/geo_mindistance.js
index 6a2329bc524..4bbb77db9bf 100644
--- a/jstests/core/geo_mindistance.js
+++ b/jstests/core/geo_mindistance.js
@@ -47,12 +47,7 @@ for (var x = 0; x <= 10; x += 1) {
/* $minDistance is supported for 2dsphere index only, not 2d or geoHaystack. */
t.ensureIndex({loc: "2dsphere"});
-var n_docs = t.count(), geoJSONPoint =
- {
- type: 'Point',
- coordinates: [0, 0]
- },
- legacyPoint = [0, 0];
+var n_docs = t.count(), geoJSONPoint = {type: 'Point', coordinates: [0, 0]}, legacyPoint = [0, 0];
//
// Test $near with GeoJSON point (required for $near with 2dsphere index).
@@ -67,9 +62,10 @@ assert.eq(n_docs - n_docs_within(1400),
"Expected " + (n_docs - n_docs_within(1400)) +
" points $near (0, 0) with $minDistance 1400 km, got " + n_min1400_count);
-var n_bw500_and_1000_count = t.find({
- loc: {$near: {$geometry: geoJSONPoint, $minDistance: 500 * km, $maxDistance: 1000 * km}}
-}).count();
+var n_bw500_and_1000_count =
+ t.find({
+ loc: {$near: {$geometry: geoJSONPoint, $minDistance: 500 * km, $maxDistance: 1000 * km}}
+ }).count();
assert.eq(n_docs_within(1000) - n_docs_within(500),
n_bw500_and_1000_count,
@@ -92,12 +88,12 @@ assert.eq(n_docs - n_docs_within(1400),
" points $nearSphere (0, 0) with $minDistance 1400 km, got " + n_min1400_count);
n_bw500_and_1000_count = t.find({
- loc: {
- $nearSphere: legacyPoint,
- $minDistance: metersToRadians(500 * km),
- $maxDistance: metersToRadians(1000 * km)
- }
-}).count();
+ loc: {
+ $nearSphere: legacyPoint,
+ $minDistance: metersToRadians(500 * km),
+ $maxDistance: metersToRadians(1000 * km)
+ }
+ }).count();
assert.eq(n_docs_within(1000) - n_docs_within(500),
n_bw500_and_1000_count,
@@ -118,8 +114,9 @@ assert.eq(n_docs - n_docs_within(1400),
" points $nearSphere (0, 0) with $minDistance 1400 km, got " + n_min1400_count);
n_bw500_and_1000_count =
- t.find({loc: {$nearSphere: geoJSONPoint, $minDistance: 500 * km, $maxDistance: 1000 * km}})
- .count();
+ t.find({
+ loc: {$nearSphere: geoJSONPoint, $minDistance: 500 * km, $maxDistance: 1000 * km}
+ }).count();
assert.eq(n_docs_within(1000) - n_docs_within(500),
n_bw500_and_1000_count,
diff --git a/jstests/core/geo_mindistance_boundaries.js b/jstests/core/geo_mindistance_boundaries.js
index 6cbae8015e9..7e97732dfd1 100644
--- a/jstests/core/geo_mindistance_boundaries.js
+++ b/jstests/core/geo_mindistance_boundaries.js
@@ -12,11 +12,7 @@ t.ensureIndex({loc: "2dsphere"});
// Useful constants.
//
-var km = 1000, earthRadiusMeters = 6378.1 * km, geoJSONPoint =
- {
- type: 'Point',
- coordinates: [0, 0]
- },
+var km = 1000, earthRadiusMeters = 6378.1 * km, geoJSONPoint = {type: 'Point', coordinates: [0, 0]},
// One degree of longitude at the equator, about 111 km.
degreeInMeters = 2 * Math.PI * earthRadiusMeters / 360, metersEpsilon = Number.MIN_VALUE;
@@ -43,33 +39,37 @@ assert.eq(1,
assert.eq(
1,
- t.find({loc: {$near: {$geometry: geoJSONPoint, $minDistance: degreeInMeters - metersEpsilon}}})
- .itcount(),
+ t.find({
+ loc: {$near: {$geometry: geoJSONPoint, $minDistance: degreeInMeters - metersEpsilon}}
+ }).itcount(),
"Expected to find (0, 1) within $minDistance (1 degree - epsilon) from origin");
assert.eq(
0,
- t.find({loc: {$near: {$geometry: geoJSONPoint, $minDistance: degreeInMeters + metersEpsilon}}})
- .itcount(),
+ t.find({
+ loc: {$near: {$geometry: geoJSONPoint, $minDistance: degreeInMeters + metersEpsilon}}
+ }).itcount(),
"Expected *not* to find (0, 1) within $minDistance (1 degree + epsilon) from origin");
//
// Test boundary conditions for $nearSphere and GeoJSON, in meters.
//
-assert.eq(1,
- t.find({loc: {$nearSphere: {$geometry: geoJSONPoint, $minDistance: degreeInMeters}}})
- .itcount(),
- "Expected to find (0, 1) within $minDistance 1 degree from origin");
+assert.eq(
+ 1,
+ t.find({loc: {$nearSphere: {$geometry: geoJSONPoint, $minDistance: degreeInMeters}}}).itcount(),
+ "Expected to find (0, 1) within $minDistance 1 degree from origin");
assert.eq(1,
- t.find({loc: {$nearSphere: geoJSONPoint, $minDistance: degreeInMeters - metersEpsilon}})
- .itcount(),
+ t.find({
+ loc: {$nearSphere: geoJSONPoint, $minDistance: degreeInMeters - metersEpsilon}
+ }).itcount(),
"Expected to find (0, 1) within $minDistance (1 degree - epsilon) from origin");
assert.eq(0,
- t.find({loc: {$nearSphere: geoJSONPoint, $minDistance: degreeInMeters + metersEpsilon}})
- .itcount(),
+ t.find({
+ loc: {$nearSphere: geoJSONPoint, $minDistance: degreeInMeters + metersEpsilon}
+ }).itcount(),
"Expected *not* to find (0, 1) within $minDistance (1 degree + epsilon) from origin");
//
@@ -90,11 +90,13 @@ assert.eq(1,
"Expected to find (0, 1) within $minDistance 1 degree from origin");
assert.eq(1,
- t.find({loc: {$nearSphere: legacyPoint, $minDistance: degreeInRadians - radiansEpsilon}})
- .itcount(),
+ t.find({
+ loc: {$nearSphere: legacyPoint, $minDistance: degreeInRadians - radiansEpsilon}
+ }).itcount(),
"Expected to find (0, 1) within $minDistance (1 degree - epsilon) from origin");
assert.eq(0,
- t.find({loc: {$nearSphere: legacyPoint, $minDistance: degreeInRadians + radiansEpsilon}})
- .itcount(),
+ t.find({
+ loc: {$nearSphere: legacyPoint, $minDistance: degreeInRadians + radiansEpsilon}
+ }).itcount(),
"Expected *not* to find (0, 1) within $minDistance (1 degree + epsilon) from origin");
diff --git a/jstests/core/geo_operator_crs.js b/jstests/core/geo_operator_crs.js
index b9e242309dc..13353f2262d 100644
--- a/jstests/core/geo_operator_crs.js
+++ b/jstests/core/geo_operator_crs.js
@@ -12,15 +12,9 @@ coll.drop();
assert.commandWorked(coll.ensureIndex({geo: "2dsphere"}));
var legacyZeroPt = [0, 0];
-var jsonZeroPt = {
- type: "Point",
- coordinates: [0, 0]
-};
+var jsonZeroPt = {type: "Point", coordinates: [0, 0]};
var legacy90Pt = [90, 0];
-var json90Pt = {
- type: "Point",
- coordinates: [90, 0]
-};
+var json90Pt = {type: "Point", coordinates: [90, 0]};
assert.writeOK(coll.insert({geo: json90Pt}));
diff --git a/jstests/core/geo_or.js b/jstests/core/geo_or.js
index 20eb7b7dce1..341ce32d397 100644
--- a/jstests/core/geo_or.js
+++ b/jstests/core/geo_or.js
@@ -30,54 +30,54 @@ assert.throws(function() {
assert.eq(2,
t.find({
- $or: [
- {loc: {$geoWithin: {$centerSphere: [p, 10]}}},
- {loc: {$geoWithin: {$centerSphere: [p, 10]}}}
- ]
- }).itcount(),
+ $or: [
+ {loc: {$geoWithin: {$centerSphere: [p, 10]}}},
+ {loc: {$geoWithin: {$centerSphere: [p, 10]}}}
+ ]
+ }).itcount(),
'multiple $geoWithin clauses not supported by $or. index type: ' + indexname);
+assert.eq(
+ 2,
+ t.find({
+ $or: [
+ {loc: {$geoIntersects: {$geometry: {type: 'LineString', coordinates: [p, q]}}}},
+ {
+ loc: {
+ $geoIntersects:
+ {$geometry: {type: 'LineString', coordinates: [[0, 0], [1, 1]]}}
+ }
+ }
+ ]
+ }).itcount(),
+ 'multiple $geoIntersects LineString clauses not supported by $or. index type: ' + indexname);
assert.eq(2,
t.find({
- $or: [
- {loc: {$geoIntersects: {$geometry: {type: 'LineString', coordinates: [p, q]}}}},
- {
- loc: {
- $geoIntersects:
- {$geometry: {type: 'LineString', coordinates: [[0, 0], [1, 1]]}}
- }
- }
- ]
- }).itcount(),
- 'multiple $geoIntersects LineString clauses not supported by $or. index type: ' +
- indexname);
-assert.eq(2,
- t.find({
- $or: [
- {loc: {$geoIntersects: {$geometry: {type: 'Point', coordinates: p}}}},
- {loc: {$geoIntersects: {$geometry: {type: 'Point', coordinates: q}}}}
- ]
- }).itcount(),
+ $or: [
+ {loc: {$geoIntersects: {$geometry: {type: 'Point', coordinates: p}}}},
+ {loc: {$geoIntersects: {$geometry: {type: 'Point', coordinates: q}}}}
+ ]
+ }).itcount(),
'multiple $geoIntersects Point clauses not supported by $or. index type: ' + indexname);
assert.eq(
2,
t.find({
- $or: [
- {
- loc: {
- $geoIntersects:
- {$geometry: {type: 'Polygon', coordinates: [[[0, 0], p, q, [0, 0]]]}}
- }
- },
- {
- loc: {
- $geoIntersects: {
- $geometry:
- {type: 'Polygon', coordinates: [[[0, 0], [1, 1], [0, 1], [0, 0]]]}
- }
- }
- }
- ]
- }).itcount(),
+ $or: [
+ {
+ loc: {
+ $geoIntersects:
+ {$geometry: {type: 'Polygon', coordinates: [[[0, 0], p, q, [0, 0]]]}}
+ }
+ },
+ {
+ loc: {
+ $geoIntersects: {
+ $geometry:
+ {type: 'Polygon', coordinates: [[[0, 0], [1, 1], [0, 1], [0, 0]]]}
+ }
+ }
+ }
+ ]
+ }).itcount(),
'multiple $geoIntersects Polygon clauses not supported by $or. index type: ' + indexname);
t.dropIndexes();
@@ -88,9 +88,9 @@ t.ensureIndex({loc: indexname});
assert.eq(2,
t.find({
- $or: [
- {loc: {$geoWithin: {$centerSphere: [p, 10]}}},
- {loc: {$geoWithin: {$centerSphere: [p, 10]}}}
- ]
- }).itcount(),
+ $or: [
+ {loc: {$geoWithin: {$centerSphere: [p, 10]}}},
+ {loc: {$geoWithin: {$centerSphere: [p, 10]}}}
+ ]
+ }).itcount(),
'multiple $geoWithin clauses not supported by $or. index type: ' + indexname);
diff --git a/jstests/core/geo_poly_edge.js b/jstests/core/geo_poly_edge.js
index 380fe533861..a8239cde0af 100644
--- a/jstests/core/geo_poly_edge.js
+++ b/jstests/core/geo_poly_edge.js
@@ -17,6 +17,6 @@ assert.eq(coll.find({loc: {$within: {$polygon: [[10, 10], [10, 10], [10, 10]]}}}
coll.insert({loc: [179, 0]});
coll.insert({loc: [0, 179]});
-assert.eq(coll.find({loc: {$within: {$polygon: [[0, 0], [1000, 0], [1000, 1000], [0, 1000]]}}})
- .itcount(),
- 3);
+assert.eq(
+ coll.find({loc: {$within: {$polygon: [[0, 0], [1000, 0], [1000, 1000], [0, 1000]]}}}).itcount(),
+ 3);
diff --git a/jstests/core/geo_polygon1.js b/jstests/core/geo_polygon1.js
index d1dbf0c19dc..45f0eb71d64 100644
--- a/jstests/core/geo_polygon1.js
+++ b/jstests/core/geo_polygon1.js
@@ -8,10 +8,7 @@ t.drop();
num = 0;
for (x = 1; x < 9; x++) {
for (y = 1; y < 9; y++) {
- o = {
- _id: num++,
- loc: [x, y]
- };
+ o = {_id: num++, loc: [x, y]};
t.save(o);
}
}
@@ -29,15 +26,14 @@ assert.eq(num, t.find({loc: {"$within": {"$polygon": boxBounds}}}).count(), "Bou
// Make sure we can add object-based polygons
assert.eq(
- num,
- t.find({loc: {$within: {$polygon: {a: [-10, -10], b: [-10, 10], c: [10, 10], d: [10, -10]}}}})
- .count());
+ num, t.find({
+ loc: {$within: {$polygon: {a: [-10, -10], b: [-10, 10], c: [10, 10], d: [10, -10]}}}
+ }).count());
// Look in a box much bigger than the one we have data in
boxBounds = [[-100, -100], [-100, 100], [100, 100], [100, -100]];
-assert.eq(num,
- t.find({loc: {"$within": {"$polygon": boxBounds}}}).count(),
- "Big Bounding Box Test");
+assert.eq(
+ num, t.find({loc: {"$within": {"$polygon": boxBounds}}}).count(), "Big Bounding Box Test");
t.drop();
diff --git a/jstests/core/geo_polygon1_noindex.js b/jstests/core/geo_polygon1_noindex.js
index 22f90e7157c..e5aabb5043d 100644
--- a/jstests/core/geo_polygon1_noindex.js
+++ b/jstests/core/geo_polygon1_noindex.js
@@ -6,10 +6,7 @@ t.drop();
num = 0;
for (x = 1; x < 9; x++) {
for (y = 1; y < 9; y++) {
- o = {
- _id: num++,
- loc: [x, y]
- };
+ o = {_id: num++, loc: [x, y]};
t.save(o);
}
}
@@ -25,15 +22,14 @@ assert.eq(num, t.find({loc: {"$within": {"$polygon": boxBounds}}}).count(), "Bou
// Make sure we can add object-based polygons
assert.eq(
- num,
- t.find({loc: {$within: {$polygon: {a: [-10, -10], b: [-10, 10], c: [10, 10], d: [10, -10]}}}})
- .count());
+ num, t.find({
+ loc: {$within: {$polygon: {a: [-10, -10], b: [-10, 10], c: [10, 10], d: [10, -10]}}}
+ }).count());
// Look in a box much bigger than the one we have data in
boxBounds = [[-100, -100], [-100, 100], [100, 100], [100, -100]];
-assert.eq(num,
- t.find({loc: {"$within": {"$polygon": boxBounds}}}).count(),
- "Big Bounding Box Test");
+assert.eq(
+ num, t.find({loc: {"$within": {"$polygon": boxBounds}}}).count(), "Big Bounding Box Test");
t.drop();
diff --git a/jstests/core/geo_polygon3.js b/jstests/core/geo_polygon3.js
index ed8f040fa8d..1f81e70adfa 100644
--- a/jstests/core/geo_polygon3.js
+++ b/jstests/core/geo_polygon3.js
@@ -11,10 +11,7 @@ for (var n = 0; n < numTests; n++) {
num = 0;
for (x = 1; x < 9; x++) {
for (y = 1; y < 9; y++) {
- o = {
- _id: num++,
- loc: [x, y]
- };
+ o = {_id: num++, loc: [x, y]};
t.save(o);
}
}
diff --git a/jstests/core/geo_s2cursorlimitskip.js b/jstests/core/geo_s2cursorlimitskip.js
index 25ae81beb2d..427fbf8fe29 100644
--- a/jstests/core/geo_s2cursorlimitskip.js
+++ b/jstests/core/geo_s2cursorlimitskip.js
@@ -23,9 +23,7 @@ function insertRandomPoints(num, minDist, maxDist) {
for (var i = 0; i < num; i++) {
var lat = sign() * (minDist + random() * (maxDist - minDist));
var lng = sign() * (minDist + random() * (maxDist - minDist));
- var point = {
- geo: {type: "Point", coordinates: [lng, lat]}
- };
+ var point = {geo: {type: "Point", coordinates: [lng, lat]}};
assert.writeOK(t.insert(point));
}
}
@@ -37,8 +35,9 @@ var batchSize = 4;
// Insert points between 0.01 and 1.0 away.
insertRandomPoints(totalPointCount, 0.01, 1.0);
-var cursor = t.find({geo: {$geoNear: {$geometry: {type: "Point", coordinates: [0.0, 0.0]}}}})
- .batchSize(batchSize);
+var cursor = t.find({
+ geo: {$geoNear: {$geometry: {type: "Point", coordinates: [0.0, 0.0]}}}
+ }).batchSize(batchSize);
assert.eq(cursor.count(), totalPointCount);
// Disable profiling in order to drop the system.profile collection.
@@ -48,8 +47,8 @@ testDB.setProfilingLevel(0);
testDB.system.profile.drop();
// Create 4MB system.profile collection to prevent the 'getmore' operations from overwriting the
// original query.
-assert.commandWorked(testDB.createCollection("system.profile",
- {capped: true, size: 4 * 1024 * 1024}));
+assert.commandWorked(
+ testDB.createCollection("system.profile", {capped: true, size: 4 * 1024 * 1024}));
testDB.setProfilingLevel(2);
for (var j = 0; j < initialAdvance; j++) {
@@ -81,14 +80,16 @@ assert(!cursor.hasNext());
var someLimit = 23;
// Make sure limit does something.
-cursor = t.find({geo: {$geoNear: {$geometry: {type: "Point", coordinates: [0.0, 0.0]}}}})
- .limit(someLimit);
+cursor = t.find({
+ geo: {$geoNear: {$geometry: {type: "Point", coordinates: [0.0, 0.0]}}}
+ }).limit(someLimit);
// Count doesn't work here -- ignores limit/skip, so we use itcount.
assert.eq(cursor.itcount(), someLimit);
// Make sure skip works by skipping some stuff ourselves.
var someSkip = 3;
-cursor = t.find({geo: {$geoNear: {$geometry: {type: "Point", coordinates: [0.0, 0.0]}}}})
- .limit(someLimit + someSkip);
+cursor = t.find({
+ geo: {$geoNear: {$geometry: {type: "Point", coordinates: [0.0, 0.0]}}}
+ }).limit(someLimit + someSkip);
for (var i = 0; i < someSkip; ++i) {
cursor.next();
}
diff --git a/jstests/core/geo_s2descindex.js b/jstests/core/geo_s2descindex.js
index d6dca95213d..daf5531b31d 100644
--- a/jstests/core/geo_s2descindex.js
+++ b/jstests/core/geo_s2descindex.js
@@ -5,24 +5,14 @@
var coll = db.getCollection("twodspheredesc");
var descriptors = [["field1", -1], ["field2", -1], ["coordinates", "2dsphere"]];
-var docA = {
- field1: "a",
- field2: 1,
- coordinates: [-118.2400013, 34.073893]
-};
-var docB = {
- field1: "b",
- field2: 1,
- coordinates: [-118.2400012, 34.073894]
-};
+var docA = {field1: "a", field2: 1, coordinates: [-118.2400013, 34.073893]};
+var docB = {field1: "b", field2: 1, coordinates: [-118.2400012, 34.073894]};
// Try both regular and near index cursors
var query = {
coordinates: {$geoWithin: {$centerSphere: [[-118.240013, 34.073893], 0.44915760491198753]}}
};
-var queryNear = {
- coordinates: {$geoNear: {"type": "Point", "coordinates": [0, 0]}}
-};
+var queryNear = {coordinates: {$geoNear: {"type": "Point", "coordinates": [0, 0]}}};
//
// The idea here is we try "2dsphere" indexes in combination with descending
diff --git a/jstests/core/geo_s2disjoint_holes.js b/jstests/core/geo_s2disjoint_holes.js
index a3988e9a614..f7731b416e4 100644
--- a/jstests/core/geo_s2disjoint_holes.js
+++ b/jstests/core/geo_s2disjoint_holes.js
@@ -8,18 +8,14 @@
// http://geojson.org/geojson-spec.html#polygon
//
-var t = db.geo_s2disjoint_holes, coordinates = [
- // One square.
- [[9, 9], [9, 11], [11, 11], [11, 9], [9, 9]],
- // Another disjoint square.
- [[0, 0], [0, 1], [1, 1], [1, 0], [0, 0]]
-],
- poly =
- {
- type: 'Polygon',
- coordinates: coordinates
- },
- multiPoly = {
+var t = db.geo_s2disjoint_holes, coordinates =
+ [
+ // One square.
+ [[9, 9], [9, 11], [11, 11], [11, 9], [9, 9]],
+ // Another disjoint square.
+ [[0, 0], [0, 1], [1, 1], [1, 0], [0, 0]]
+ ],
+ poly = {type: 'Polygon', coordinates: coordinates}, multiPoly = {
type: 'MultiPolygon',
// Multi-polygon's coordinates are wrapped in one more array.
coordinates: [coordinates]
diff --git a/jstests/core/geo_s2dupe_points.js b/jstests/core/geo_s2dupe_points.js
index 63e4369d2fa..406a7b1ff4c 100644
--- a/jstests/core/geo_s2dupe_points.js
+++ b/jstests/core/geo_s2dupe_points.js
@@ -28,10 +28,7 @@ var lineWithDupes = {
_id: "line",
geo: {type: "LineString", coordinates: [[40, 5], [40, 5], [40, 5], [41, 6], [41, 6]]}
};
-var lineWithoutDupes = {
- type: "LineString",
- coordinates: [[40, 5], [41, 6]]
-};
+var lineWithoutDupes = {type: "LineString", coordinates: [[40, 5], [41, 6]]};
// Polygon
var polygonWithDupes = {
diff --git a/jstests/core/geo_s2explain.js b/jstests/core/geo_s2explain.js
index c8d32e00379..97f45e89a68 100644
--- a/jstests/core/geo_s2explain.js
+++ b/jstests/core/geo_s2explain.js
@@ -4,12 +4,8 @@
var t = db.jstests_geo_s2explain;
t.drop();
-var point1 = {
- loc: {type: "Point", coordinates: [10, 10]}
-};
-var point2 = {
- loc: {type: "Point", coordinates: [10.001, 10]}
-};
+var point1 = {loc: {type: "Point", coordinates: [10, 10]}};
+var point2 = {loc: {type: "Point", coordinates: [10.001, 10]}};
assert.writeOK(t.insert([point1, point2]));
assert.commandWorked(t.ensureIndex({loc: "2dsphere"}));
diff --git a/jstests/core/geo_s2holesameasshell.js b/jstests/core/geo_s2holesameasshell.js
index 29f00b88f7a..5407fe45c26 100644
--- a/jstests/core/geo_s2holesameasshell.js
+++ b/jstests/core/geo_s2holesameasshell.js
@@ -3,18 +3,9 @@ var t = db.geo_s2holessameasshell;
t.drop();
t.ensureIndex({geo: "2dsphere"});
-var centerPoint = {
- "type": "Point",
- "coordinates": [0.5, 0.5]
-};
-var edgePoint = {
- "type": "Point",
- "coordinates": [0, 0.5]
-};
-var cornerPoint = {
- "type": "Point",
- "coordinates": [0, 0]
-};
+var centerPoint = {"type": "Point", "coordinates": [0.5, 0.5]};
+var edgePoint = {"type": "Point", "coordinates": [0, 0.5]};
+var cornerPoint = {"type": "Point", "coordinates": [0, 0]};
// Various "edge" cases. None of them should be returned by the non-polygon
// polygon below.
diff --git a/jstests/core/geo_s2index.js b/jstests/core/geo_s2index.js
index cc25b4fabfe..99c3852aae9 100644
--- a/jstests/core/geo_s2index.js
+++ b/jstests/core/geo_s2index.js
@@ -89,10 +89,12 @@ assert.throws(function() {
return t.count({loc: {$foo: [0, 0]}});
});
assert.throws(function() {
- return t.find({
- "nonGeo": "pointA",
- "geo": {"$geoIntersects": {"$geometry": somepoly}, "$near": {"$geometry": somepoly}}
- }).count();
+ return t
+ .find({
+ "nonGeo": "pointA",
+ "geo": {"$geoIntersects": {"$geometry": somepoly}, "$near": {"$geometry": somepoly}}
+ })
+ .count();
});
// If we specify a datum, it has to be valid (WGS84).
diff --git a/jstests/core/geo_s2indexversion1.js b/jstests/core/geo_s2indexversion1.js
index 49aa80dbbca..4fa58bb589f 100644
--- a/jstests/core/geo_s2indexversion1.js
+++ b/jstests/core/geo_s2indexversion1.js
@@ -106,15 +106,9 @@ coll.drop();
// Test compatibility of various GeoJSON objects with both 2dsphere index versions.
//
-var pointDoc = {
- geo: {type: "Point", coordinates: [40, 5]}
-};
-var lineStringDoc = {
- geo: {type: "LineString", coordinates: [[40, 5], [41, 6]]}
-};
-var polygonDoc = {
- geo: {type: "Polygon", coordinates: [[[0, 0], [3, 6], [6, 1], [0, 0]]]}
-};
+var pointDoc = {geo: {type: "Point", coordinates: [40, 5]}};
+var lineStringDoc = {geo: {type: "LineString", coordinates: [[40, 5], [41, 6]]}};
+var polygonDoc = {geo: {type: "Polygon", coordinates: [[[0, 0], [3, 6], [6, 1], [0, 0]]]}};
var multiPointDoc = {
geo: {
type: "MultiPoint",
@@ -144,12 +138,7 @@ var multiPolygonDoc = {
[-73.9814, 40.7681],
[-73.958, 40.8003]
]],
- [[
- [-73.958, 40.8003],
- [-73.9498, 40.7968],
- [-73.9737, 40.7648],
- [-73.958, 40.8003]
- ]]
+ [[[-73.958, 40.8003], [-73.9498, 40.7968], [-73.9737, 40.7648], [-73.958, 40.8003]]]
]
}
};
diff --git a/jstests/core/geo_s2intersection.js b/jstests/core/geo_s2intersection.js
index bf65c02c0c2..faf9304396c 100644
--- a/jstests/core/geo_s2intersection.js
+++ b/jstests/core/geo_s2intersection.js
@@ -10,10 +10,7 @@ var canonLine = {
geo: {type: "LineString", coordinates: [[0.0, 0.0], [1.0, 0.0]]}
};
-var canonPoint = {
- name: 'canonPoint',
- geo: {type: "Point", coordinates: [10.0, 10.0]}
-};
+var canonPoint = {name: 'canonPoint', geo: {type: "Point", coordinates: [10.0, 10.0]}};
var canonPoly = {
name: 'canonPoly',
@@ -28,10 +25,7 @@ t.insert(canonPoint);
t.insert(canonPoly);
// Case 1: Basic sanity intersection.
-var testLine = {
- type: "LineString",
- coordinates: [[0.5, 0.5], [0.5, -0.5]]
-};
+var testLine = {type: "LineString", coordinates: [[0.5, 0.5], [0.5, -0.5]]};
var result = t.find({geo: {$geoIntersects: {$geometry: testLine}}});
assert.eq(result.count(), 1);
@@ -127,10 +121,7 @@ assert.eq(result.count(), 1);
assert.eq(result[0]['name'], 'canonPoint');
// Case 10: Sanity point non-intersection.
-var testPoint = {
- type: "Point",
- coordinates: [12.0, 12.0]
-};
+var testPoint = {type: "Point", coordinates: [12.0, 12.0]};
result = t.find({geo: {$geoIntersects: {$geometry: testPoint}}});
assert.eq(result.count(), 0);
@@ -152,12 +143,8 @@ t.drop();
t.ensureIndex({a: "2dsphere"});
t.insert({a: {type: "Polygon", coordinates: [[[0, 0], [3, 6], [6, 0], [0, 0]]]}});
-var firstPoint = {
- $geometry: {type: "Point", coordinates: [3.0, 1.0]}
-};
-var secondPoint = {
- $geometry: {type: "Point", coordinates: [4.0, 1.0]}
-};
+var firstPoint = {$geometry: {type: "Point", coordinates: [3.0, 1.0]}};
+var secondPoint = {$geometry: {type: "Point", coordinates: [4.0, 1.0]}};
// First point should intersect with the polygon.
result = t.find({a: {$geoIntersects: firstPoint}});
diff --git a/jstests/core/geo_s2multi.js b/jstests/core/geo_s2multi.js
index 2cd6a3d73d7..8899c9d5561 100644
--- a/jstests/core/geo_s2multi.js
+++ b/jstests/core/geo_s2multi.js
@@ -28,39 +28,39 @@ multiPolygonA = {
};
assert.writeOK(t.insert({geo: multiPolygonA}));
-assert.eq(3,
- t.find({geo: {$geoIntersects: {$geometry: {"type": "Point", "coordinates": [100, 0]}}}})
- .itcount());
+assert.eq(3, t.find({
+ geo: {$geoIntersects: {$geometry: {"type": "Point", "coordinates": [100, 0]}}}
+ }).itcount());
assert.eq(3,
t.find({
- geo: {$geoIntersects: {$geometry: {"type": "Point", "coordinates": [101.0, 1.0]}}}
- }).itcount());
+ geo: {$geoIntersects: {$geometry: {"type": "Point", "coordinates": [101.0, 1.0]}}}
+ }).itcount());
// Inside the hole in multiPolygonA
assert.eq(
- 0,
- t.find({geo: {$geoIntersects: {$geometry: {"type": "Point", "coordinates": [100.21, 0.21]}}}})
- .itcount());
+ 0, t.find({
+ geo: {$geoIntersects: {$geometry: {"type": "Point", "coordinates": [100.21, 0.21]}}}
+ }).itcount());
// One point inside the hole, one out.
assert.eq(
3,
t.find({
- geo: {
- $geoIntersects:
- {$geometry: {"type": "MultiPoint", "coordinates": [[100, 0], [100.21, 0.21]]}}
- }
- }).itcount());
+ geo: {
+ $geoIntersects:
+ {$geometry: {"type": "MultiPoint", "coordinates": [[100, 0], [100.21, 0.21]]}}
+ }
+ }).itcount());
assert.eq(
3,
t.find({
- geo: {
- $geoIntersects: {
- $geometry:
- {"type": "MultiPoint", "coordinates": [[100, 0], [100.21, 0.21], [101, 1]]}
- }
- }
- }).itcount());
+ geo: {
+ $geoIntersects: {
+ $geometry:
+ {"type": "MultiPoint", "coordinates": [[100, 0], [100.21, 0.21], [101, 1]]}
+ }
+ }
+ }).itcount());
// Polygon contains itself and the multipoint.
assert.eq(2, t.find({geo: {$geoWithin: {$geometry: multiPolygonA}}}).itcount());
diff --git a/jstests/core/geo_s2near.js b/jstests/core/geo_s2near.js
index 08bf5ab9081..f8bacf350b4 100644
--- a/jstests/core/geo_s2near.js
+++ b/jstests/core/geo_s2near.js
@@ -90,8 +90,8 @@ function testRadAndDegreesOK(distance) {
assert.eq(resRadians.itcount(), resMeters.itcount());
// Also, geoNear should behave the same way.
- resGNMeters = db.runCommand(
- {geoNear: t.getName(), near: origin, maxDistance: distance, spherical: true});
+ resGNMeters =
+ db.runCommand({geoNear: t.getName(), near: origin, maxDistance: distance, spherical: true});
resGNRadians = db.runCommand({
geoNear: t.getName(),
near: [0, 0],
diff --git a/jstests/core/geo_s2nearComplex.js b/jstests/core/geo_s2nearComplex.js
index 0584c5e694a..bf9c76ffde4 100644
--- a/jstests/core/geo_s2nearComplex.js
+++ b/jstests/core/geo_s2nearComplex.js
@@ -11,15 +11,9 @@ var sin = Math.sin;
var cos = Math.cos;
var atan2 = Math.atan2;
-var originGeo = {
- type: "Point",
- coordinates: [20.0, 20.0]
-};
+var originGeo = {type: "Point", coordinates: [20.0, 20.0]};
// Center point for all tests.
-var origin = {
- name: "origin",
- geo: originGeo
-};
+var origin = {name: "origin", geo: originGeo};
/*
* Convenience function for checking that coordinates match. threshold let's you
@@ -163,9 +157,7 @@ function validateOrdering(query) {
}
}
-var query = {
- geo: {$geoNear: {$geometry: originGeo}}
-};
+var query = {geo: {$geoNear: {$geometry: originGeo}}};
// Test a uniform distribution of 1000 points.
uniformPoints(origin, 1000, 0.5, 1.5);
diff --git a/jstests/core/geo_s2near_equator_opposite.js b/jstests/core/geo_s2near_equator_opposite.js
index 13bbc776daa..754c27e523d 100644
--- a/jstests/core/geo_s2near_equator_opposite.js
+++ b/jstests/core/geo_s2near_equator_opposite.js
@@ -14,12 +14,16 @@ t.ensureIndex({loc: '2dsphere'});
// upper bound for half of earth's circumference in meters
var dist = 40075000 / 2 + 1;
-var nearSphereCount = t.find({
- loc: {$nearSphere: {$geometry: {type: 'Point', coordinates: [180, 0]}, $maxDistance: dist}}
-}).itcount();
+var nearSphereCount =
+ t.find({
+ loc: {
+ $nearSphere: {$geometry: {type: 'Point', coordinates: [180, 0]}, $maxDistance: dist}
+ }
+ }).itcount();
var nearCount =
- t.find({loc: {$near: {$geometry: {type: 'Point', coordinates: [180, 0]}, $maxDistance: dist}}})
- .itcount();
+ t.find({
+ loc: {$near: {$geometry: {type: 'Point', coordinates: [180, 0]}, $maxDistance: dist}}
+ }).itcount();
var geoNearResult = db.runCommand(
{geoNear: t.getName(), near: {type: 'Point', coordinates: [180, 0]}, spherical: true});
diff --git a/jstests/core/geo_s2nongeoarray.js b/jstests/core/geo_s2nongeoarray.js
index 8684706d168..4b210f8f779 100644
--- a/jstests/core/geo_s2nongeoarray.js
+++ b/jstests/core/geo_s2nongeoarray.js
@@ -4,11 +4,7 @@ t = db.geo_s2nongeoarray;
oldPoint = [40, 5];
-var data = {
- geo: oldPoint,
- nonGeo: [123, 456],
- otherNonGeo: [{b: [1, 2]}, {b: [3, 4]}]
-};
+var data = {geo: oldPoint, nonGeo: [123, 456], otherNonGeo: [{b: [1, 2]}, {b: [3, 4]}]};
t.drop();
assert.writeOK(t.insert(data));
diff --git a/jstests/core/geo_s2nonstring.js b/jstests/core/geo_s2nonstring.js
index 43587f0c8e8..960f0c727a8 100644
--- a/jstests/core/geo_s2nonstring.js
+++ b/jstests/core/geo_s2nonstring.js
@@ -14,15 +14,13 @@ t.save({geo: {type: 'Point', coordinates: [0, 0]}, x: 'a'});
t.save({geo: {type: 'Point', coordinates: [0, 0]}});
// Expect 1 match, where x is 'a'
-assert.eq(1,
- t.count({
- geo: {$near: {$geometry: {type: 'Point', coordinates: [0, 0]}, $maxDistance: 20}},
- x: 'a'
- }));
+assert.eq(1, t.count({
+ geo: {$near: {$geometry: {type: 'Point', coordinates: [0, 0]}, $maxDistance: 20}},
+ x: 'a'
+}));
// Expect 1 match, where x matches null (missing matches null).
-assert.eq(1,
- t.count({
- geo: {$near: {$geometry: {type: 'Point', coordinates: [0, 0]}, $maxDistance: 20}},
- x: null
- }));
+assert.eq(1, t.count({
+ geo: {$near: {$geometry: {type: 'Point', coordinates: [0, 0]}, $maxDistance: 20}},
+ x: null
+}));
diff --git a/jstests/core/geo_s2oddshapes.js b/jstests/core/geo_s2oddshapes.js
index 6f14533c928..4ae953696a0 100644
--- a/jstests/core/geo_s2oddshapes.js
+++ b/jstests/core/geo_s2oddshapes.js
@@ -5,10 +5,7 @@ var t = db.geo_s2oddshapes;
t.drop();
t.ensureIndex({geo: "2dsphere"});
-var testPoint = {
- name: "origin",
- geo: {type: "Point", coordinates: [0.0, 0.0]}
-};
+var testPoint = {name: "origin", geo: {type: "Point", coordinates: [0.0, 0.0]}};
var testHorizLine = {
name: "horiz",
@@ -60,10 +57,7 @@ assert.eq(result.itcount(), 3);
t.drop();
t.ensureIndex({geo: "2dsphere"});
-var insidePoint = {
- name: "inside",
- geo: {type: "Point", name: "inside", coordinates: [100.0, 0.0]}
-};
+var insidePoint = {name: "inside", geo: {type: "Point", name: "inside", coordinates: [100.0, 0.0]}};
var outsidePoint = {
name: "inside",
diff --git a/jstests/core/geo_s2polywithholes.js b/jstests/core/geo_s2polywithholes.js
index 6ace711c718..80f7b0556c4 100644
--- a/jstests/core/geo_s2polywithholes.js
+++ b/jstests/core/geo_s2polywithholes.js
@@ -2,18 +2,9 @@ var t = db.geo_s2weirdpolys;
t.drop();
t.ensureIndex({geo: "2dsphere"});
-var centerPoint = {
- "type": "Point",
- "coordinates": [0.5, 0.5]
-};
-var edgePoint = {
- "type": "Point",
- "coordinates": [0, 0.5]
-};
-var cornerPoint = {
- "type": "Point",
- "coordinates": [0, 0]
-};
+var centerPoint = {"type": "Point", "coordinates": [0.5, 0.5]};
+var edgePoint = {"type": "Point", "coordinates": [0, 0.5]};
+var cornerPoint = {"type": "Point", "coordinates": [0, 0]};
t.insert({geo: centerPoint});
t.insert({geo: edgePoint});
diff --git a/jstests/core/geo_s2sparse.js b/jstests/core/geo_s2sparse.js
index ab3363b5860..d31905b5a92 100644
--- a/jstests/core/geo_s2sparse.js
+++ b/jstests/core/geo_s2sparse.js
@@ -3,15 +3,9 @@
var coll = db.geo_s2sparse;
-var point = {
- type: "Point",
- coordinates: [5, 5]
-};
-
-var indexSpec = {
- geo: "2dsphere",
- nonGeo: 1
-};
+var point = {type: "Point", coordinates: [5, 5]};
+
+var indexSpec = {geo: "2dsphere", nonGeo: 1};
var indexName = 'test.geo_s2sparse.$geo_2dsphere_nonGeo_1';
diff --git a/jstests/core/geo_s2twofields.js b/jstests/core/geo_s2twofields.js
index 1868287cf5b..564d6fcd91a 100644
--- a/jstests/core/geo_s2twofields.js
+++ b/jstests/core/geo_s2twofields.js
@@ -13,14 +13,8 @@ function randomCoord(center, minDistDeg, maxDistDeg) {
return [center[0] + dx, center[1] + dy];
}
-var nyc = {
- type: "Point",
- coordinates: [-74.0064, 40.7142]
-};
-var miami = {
- type: "Point",
- coordinates: [-80.1303, 25.7903]
-};
+var nyc = {type: "Point", coordinates: [-74.0064, 40.7142]};
+var miami = {type: "Point", coordinates: [-80.1303, 25.7903]};
var maxPoints = 10000;
var degrees = 5;
@@ -29,10 +23,8 @@ for (var i = 0; i < maxPoints; ++i) {
var fromCoord = randomCoord(nyc.coordinates, 0, degrees);
var toCoord = randomCoord(miami.coordinates, 0, degrees);
- arr.push({
- from: {type: "Point", coordinates: fromCoord},
- to: {type: "Point", coordinates: toCoord}
- });
+ arr.push(
+ {from: {type: "Point", coordinates: fromCoord}, to: {type: "Point", coordinates: toCoord}});
}
res = t.insert(arr);
assert.writeOK(res);
@@ -65,31 +57,25 @@ function timeWithoutAndWithAnIndex(index, query) {
var maxQueryRad = 0.5 * PI / 180.0;
// When we're not looking at ALL the data, anything indexed should beat not-indexed.
-var smallQuery =
- timeWithoutAndWithAnIndex({to: "2dsphere", from: "2dsphere"},
- {
- from: {$within: {$centerSphere: [nyc.coordinates, maxQueryRad]}},
- to: {$within: {$centerSphere: [miami.coordinates, maxQueryRad]}}
- });
+var smallQuery = timeWithoutAndWithAnIndex({to: "2dsphere", from: "2dsphere"}, {
+ from: {$within: {$centerSphere: [nyc.coordinates, maxQueryRad]}},
+ to: {$within: {$centerSphere: [miami.coordinates, maxQueryRad]}}
+});
print("Indexed time " + smallQuery[1] + " unindexed " + smallQuery[0]);
// assert(smallQuery[0] > smallQuery[1]);
// Let's just index one field.
-var smallQuery =
- timeWithoutAndWithAnIndex({to: "2dsphere"},
- {
- from: {$within: {$centerSphere: [nyc.coordinates, maxQueryRad]}},
- to: {$within: {$centerSphere: [miami.coordinates, maxQueryRad]}}
- });
+var smallQuery = timeWithoutAndWithAnIndex({to: "2dsphere"}, {
+ from: {$within: {$centerSphere: [nyc.coordinates, maxQueryRad]}},
+ to: {$within: {$centerSphere: [miami.coordinates, maxQueryRad]}}
+});
print("Indexed time " + smallQuery[1] + " unindexed " + smallQuery[0]);
// assert(smallQuery[0] > smallQuery[1]);
// And the other one.
-var smallQuery =
- timeWithoutAndWithAnIndex({from: "2dsphere"},
- {
- from: {$within: {$centerSphere: [nyc.coordinates, maxQueryRad]}},
- to: {$within: {$centerSphere: [miami.coordinates, maxQueryRad]}}
- });
+var smallQuery = timeWithoutAndWithAnIndex({from: "2dsphere"}, {
+ from: {$within: {$centerSphere: [nyc.coordinates, maxQueryRad]}},
+ to: {$within: {$centerSphere: [miami.coordinates, maxQueryRad]}}
+});
print("Indexed time " + smallQuery[1] + " unindexed " + smallQuery[0]);
// assert(smallQuery[0] > smallQuery[1]);
diff --git a/jstests/core/geo_uniqueDocs.js b/jstests/core/geo_uniqueDocs.js
index 8c4e11fc82e..d66d5243c01 100644
--- a/jstests/core/geo_uniqueDocs.js
+++ b/jstests/core/geo_uniqueDocs.js
@@ -36,9 +36,9 @@ assert.eq(2, t.find({locs: {$within: {$center: [[5, 5], 7], $uniqueDocs: false}}
assert.eq(2, t.find({locs: {$within: {$centerSphere: [[5, 5], 1], $uniqueDocs: true}}}).itcount());
assert.eq(2, t.find({locs: {$within: {$centerSphere: [[5, 5], 1], $uniqueDocs: false}}}).itcount());
-assert.eq(2,
- t.find({locs: {$within: {$polygon: [[0, 0], [0, 9], [9, 9]], $uniqueDocs: true}}})
- .itcount());
-assert.eq(2,
- t.find({locs: {$within: {$polygon: [[0, 0], [0, 9], [9, 9]], $uniqueDocs: false}}})
- .itcount());
+assert.eq(
+ 2,
+ t.find({locs: {$within: {$polygon: [[0, 0], [0, 9], [9, 9]], $uniqueDocs: true}}}).itcount());
+assert.eq(
+ 2,
+ t.find({locs: {$within: {$polygon: [[0, 0], [0, 9], [9, 9]], $uniqueDocs: false}}}).itcount());
diff --git a/jstests/core/geo_uniqueDocs2.js b/jstests/core/geo_uniqueDocs2.js
index f6481b30f41..6f5ceed3478 100644
--- a/jstests/core/geo_uniqueDocs2.js
+++ b/jstests/core/geo_uniqueDocs2.js
@@ -54,30 +54,27 @@ assert(notUniqueInclude.results[0].loc);
assert(uniqueInclude.results[0].loc);
// For geoNear / uniqueDocs, 'num' limit seems to apply to locs.
-assert.eq(
- 1,
- db.runCommand(
- {geoNear: collName, near: [50, 50], num: 1, uniqueDocs: false, includeLocs: false})
- .results.length);
+assert.eq(1,
+ db.runCommand(
+ {geoNear: collName, near: [50, 50], num: 1, uniqueDocs: false, includeLocs: false})
+ .results.length);
// Check locs returned in includeLocs mode.
t.remove({});
objLocs = [{x: 20, y: 30, z: ['loc1', 'loca']}, {x: 40, y: 50, z: ['loc2', 'locb']}];
t.save({loc: objLocs});
-results =
- db.runCommand(
- {geoNear: collName, near: [50, 50], num: 10, uniqueDocs: false, includeLocs: true})
- .results;
+results = db.runCommand(
+ {geoNear: collName, near: [50, 50], num: 10, uniqueDocs: false, includeLocs: true})
+ .results;
assert.contains(results[0].loc, objLocs);
// Check locs returned in includeLocs mode, where locs are arrays.
t.remove({});
arrLocs = [[20, 30], [40, 50]];
t.save({loc: arrLocs});
-results =
- db.runCommand(
- {geoNear: collName, near: [50, 50], num: 10, uniqueDocs: false, includeLocs: true})
- .results;
+results = db.runCommand(
+ {geoNear: collName, near: [50, 50], num: 10, uniqueDocs: false, includeLocs: true})
+ .results;
// The original loc arrays are returned as objects.
expectedLocs = arrLocs;
diff --git a/jstests/core/geo_update_btree.js b/jstests/core/geo_update_btree.js
index ea1025b10a9..a85d4274415 100644
--- a/jstests/core/geo_update_btree.js
+++ b/jstests/core/geo_update_btree.js
@@ -19,16 +19,13 @@ var parallelInsert = startParallelShell(
" db.jstests_geo_update_btree.insert(doc);" + "}");
for (i = 0; i < 1000; i++) {
- coll.update(
- {
- loc: {
- $within:
- {$center: [[Random.rand() * 180, Random.rand() * 180], Random.rand() * 50]}
- }
- },
- {$set: {v: big}},
- false,
- true);
+ coll.update({
+ loc:
+ {$within: {$center: [[Random.rand() * 180, Random.rand() * 180], Random.rand() * 50]}}
+ },
+ {$set: {v: big}},
+ false,
+ true);
if (i % 10 == 0)
print(i);
diff --git a/jstests/core/geo_update_dedup.js b/jstests/core/geo_update_dedup.js
index b354f3ca7ae..a630954e8ca 100644
--- a/jstests/core/geo_update_dedup.js
+++ b/jstests/core/geo_update_dedup.js
@@ -9,9 +9,7 @@ t.drop();
t.ensureIndex({locs: "2d"});
t.save({locs: [[49.999, 49.999], [50.0, 50.0], [50.001, 50.001]]});
-var q = {
- locs: {$near: [50.0, 50.0]}
-};
+var q = {locs: {$near: [50.0, 50.0]}};
assert.eq(1, t.find(q).itcount(), 'duplicates returned from query');
var res = t.update({locs: {$near: [50.0, 50.0]}}, {$inc: {touchCount: 1}}, false, true);
diff --git a/jstests/core/geo_withinquery.js b/jstests/core/geo_withinquery.js
index 3a71608ab6d..7f712ba1e86 100644
--- a/jstests/core/geo_withinquery.js
+++ b/jstests/core/geo_withinquery.js
@@ -5,15 +5,16 @@ t.drop();
num = 0;
for (x = 0; x <= 20; x++) {
for (y = 0; y <= 20; y++) {
- o = {
- _id: num++,
- loc: [x, y]
- };
+ o = {_id: num++, loc: [x, y]};
t.save(o);
}
}
assert.eq(21 * 21 - 1,
- t.find({$and: [{loc: {$ne: [0, 0]}}, {loc: {$within: {$box: [[0, 0], [100, 100]]}}}, ]})
- .itcount(),
+ t.find({
+ $and: [
+ {loc: {$ne: [0, 0]}},
+ {loc: {$within: {$box: [[0, 0], [100, 100]]}}},
+ ]
+ }).itcount(),
"UHOH!");
diff --git a/jstests/core/geob.js b/jstests/core/geob.js
index b78eaa453df..c711a676b2b 100644
--- a/jstests/core/geob.js
+++ b/jstests/core/geob.js
@@ -1,18 +1,10 @@
var t = db.geob;
t.drop();
-var a = {
- p: [0, 0]
-};
-var b = {
- p: [1, 0]
-};
-var c = {
- p: [3, 4]
-};
-var d = {
- p: [0, 6]
-};
+var a = {p: [0, 0]};
+var b = {p: [1, 0]};
+var c = {p: [3, 4]};
+var d = {p: [0, 6]};
t.save(a);
t.save(b);
diff --git a/jstests/core/geof.js b/jstests/core/geof.js
index 1d7f13eb881..4eae803a856 100644
--- a/jstests/core/geof.js
+++ b/jstests/core/geof.js
@@ -13,9 +13,7 @@ t.insert({loc: [-0.9, 0]});
t.ensureIndex({loc: "2d"});
-t.find({loc: {$near: [0, 0]}})
- .limit(2)
- .forEach(function(o) {
- // printjson(o);
- assert.lt(Geo.distance([0, 0], o.loc), 0.95);
- });
+t.find({loc: {$near: [0, 0]}}).limit(2).forEach(function(o) {
+ // printjson(o);
+ assert.lt(Geo.distance([0, 0], o.loc), 0.95);
+});
diff --git a/jstests/core/geonear_cmd_input_validation.js b/jstests/core/geonear_cmd_input_validation.js
index ad3d56d240a..f9cfaa4ff21 100644
--- a/jstests/core/geonear_cmd_input_validation.js
+++ b/jstests/core/geonear_cmd_input_validation.js
@@ -24,11 +24,7 @@ indexTypes.forEach(function(indexType) {
pointDescription = (isLegacy ? "legacy coordinates" : "GeoJSON point");
function makeCommand(distance) {
- var command = {
- geoNear: t.getName(),
- near: pointType,
- spherical: spherical
- };
+ var command = {geoNear: t.getName(), near: pointType, spherical: spherical};
command[optionName] = distance;
return command;
}
@@ -59,16 +55,14 @@ indexTypes.forEach(function(indexType) {
}
// Try several bad values for min/maxDistance.
- badNumbers.concat(outOfRangeDistances)
- .forEach(function(badDistance) {
+ badNumbers.concat(outOfRangeDistances).forEach(function(badDistance) {
- var msg =
- ("geoNear with spherical=" + spherical + " and " + pointDescription +
- " and " + indexType + " index should've failed with " + optionName +
- " " + badDistance);
+ var msg = ("geoNear with spherical=" + spherical + " and " + pointDescription +
+ " and " + indexType + " index should've failed with " + optionName +
+ " " + badDistance);
- assert.commandFailed(db.runCommand(makeCommand(badDistance)), msg);
- });
+ assert.commandFailed(db.runCommand(makeCommand(badDistance)), msg);
+ });
// Bad values for limit / num.
['num', 'limit'].forEach(function(limitOptionName) {
diff --git a/jstests/core/getlog2.js b/jstests/core/getlog2.js
index b6cf223b967..597a85e20ee 100644
--- a/jstests/core/getlog2.js
+++ b/jstests/core/getlog2.js
@@ -26,15 +26,14 @@ if (db.isMaster().msg != "isdbgrid") {
});
// run a slow update
- glcol.update(
- {
- "SENTINEL": 1,
- "$where": function() {
- sleep(1000);
- return true;
- }
- },
- {"x": "x"});
+ glcol.update({
+ "SENTINEL": 1,
+ "$where": function() {
+ sleep(1000);
+ return true;
+ }
+ },
+ {"x": "x"});
var resp = db.adminCommand({getLog: "global"});
assert(resp.ok == 1, "error executing getLog command");
@@ -42,21 +41,19 @@ if (db.isMaster().msg != "isdbgrid") {
assert(resp.log.length > 0, "no log lines");
// ensure that slow query is logged in detail
- assert(contains(resp.log,
- function(v) {
- print(v);
- var opString = db.getMongo().useReadCommands() ? " find " : " query ";
- var filterString = db.getMongo().useReadCommands() ? "filter:" : "query:";
- return v.indexOf(opString) != -1 && v.indexOf(filterString) != -1 &&
- v.indexOf("keysExamined:") != -1 && v.indexOf("docsExamined:") != -1 &&
- v.indexOf("SENTINEL") != -1;
- }));
+ assert(contains(resp.log, function(v) {
+ print(v);
+ var opString = db.getMongo().useReadCommands() ? " find " : " query ";
+ var filterString = db.getMongo().useReadCommands() ? "filter:" : "query:";
+ return v.indexOf(opString) != -1 && v.indexOf(filterString) != -1 &&
+ v.indexOf("keysExamined:") != -1 && v.indexOf("docsExamined:") != -1 &&
+ v.indexOf("SENTINEL") != -1;
+ }));
// same, but for update
- assert(contains(resp.log,
- function(v) {
- return v.indexOf(" update ") != -1 && v.indexOf("query") != -1 &&
- v.indexOf("keysExamined:") != -1 && v.indexOf("docsExamined:") != -1 &&
- v.indexOf("SENTINEL") != -1;
- }));
+ assert(contains(resp.log, function(v) {
+ return v.indexOf(" update ") != -1 && v.indexOf("query") != -1 &&
+ v.indexOf("keysExamined:") != -1 && v.indexOf("docsExamined:") != -1 &&
+ v.indexOf("SENTINEL") != -1;
+ }));
}
diff --git a/jstests/core/group1.js b/jstests/core/group1.js
index 6100ee94c70..5e4fcdea245 100644
--- a/jstests/core/group1.js
+++ b/jstests/core/group1.js
@@ -88,9 +88,8 @@ p = {
initial: {count: 0},
finalize: "abc"
};
-assert.commandFailedWithCode(db.runCommand({group: p}),
- ErrorCodes.JSInterpreterFailure,
- "Illegal finalize function");
+assert.commandFailedWithCode(
+ db.runCommand({group: p}), ErrorCodes.JSInterpreterFailure, "Illegal finalize function");
p = {
ns: "group1",
@@ -103,9 +102,8 @@ p = {
ob;
}
};
-assert.commandFailedWithCode(db.runCommand({group: p}),
- ErrorCodes.JSInterpreterFailure,
- "Illegal finalize function 2");
+assert.commandFailedWithCode(
+ db.runCommand({group: p}), ErrorCodes.JSInterpreterFailure, "Illegal finalize function 2");
p = {
ns: "group1",
@@ -118,9 +116,8 @@ p = {
ob;
}
};
-assert.commandFailedWithCode(db.runCommand({group: p}),
- ErrorCodes.JSInterpreterFailure,
- "Illegal keyf function");
+assert.commandFailedWithCode(
+ db.runCommand({group: p}), ErrorCodes.JSInterpreterFailure, "Illegal keyf function");
p = {
ns: "group1",
@@ -128,9 +125,8 @@ p = {
$reduce: "abc",
initial: {count: 0}
};
-assert.commandFailedWithCode(db.runCommand({group: p}),
- ErrorCodes.JSInterpreterFailure,
- "Illegal reduce function");
+assert.commandFailedWithCode(
+ db.runCommand({group: p}), ErrorCodes.JSInterpreterFailure, "Illegal reduce function");
p = {
ns: "group1",
@@ -140,8 +136,7 @@ p = {
},
initial: {count: 0}
};
-assert.commandFailedWithCode(db.runCommand({group: p}),
- ErrorCodes.JSInterpreterFailure,
- "Illegal reduce function 2");
+assert.commandFailedWithCode(
+ db.runCommand({group: p}), ErrorCodes.JSInterpreterFailure, "Illegal reduce function 2");
t.drop();
diff --git a/jstests/core/group2.js b/jstests/core/group2.js
index ada675f6f69..1577932beca 100644
--- a/jstests/core/group2.js
+++ b/jstests/core/group2.js
@@ -25,9 +25,7 @@ assert.eq(1, result[1].count, "G");
assert.eq(1, result[2].count, "H");
var keyFn = function(x) {
- return {
- a: 'a' in x ? x.a : null
- };
+ return {a: 'a' in x ? x.a : null};
};
delete cmd.key;
diff --git a/jstests/core/grow_hash_table.js b/jstests/core/grow_hash_table.js
index b26baae31af..d35ffa65405 100644
--- a/jstests/core/grow_hash_table.js
+++ b/jstests/core/grow_hash_table.js
@@ -11,12 +11,8 @@ var testDB = db.getSiblingDB('grow_hash_table');
var doTest = function(count) {
print('Testing with count of ' + count);
testDB.dropDatabase();
- var id = {
- data: 1
- };
- var doc = {
- _id: id
- };
+ var id = {data: 1};
+ var doc = {_id: id};
var projection = {};
// Create a document and a projection with fields r1, r2, r3 ...
diff --git a/jstests/core/hashindex1.js b/jstests/core/hashindex1.js
index 778e31d84b2..93986ecfd5e 100644
--- a/jstests/core/hashindex1.js
+++ b/jstests/core/hashindex1.js
@@ -5,17 +5,12 @@ t.drop();
load("jstests/libs/analyze_plan.js");
// test non-single field hashed indexes don't get created (maybe change later)
-var badspec = {
- a: "hashed",
- b: 1
-};
+var badspec = {a: "hashed", b: 1};
t.ensureIndex(badspec);
assert.eq(t.getIndexes().length, 1, "only _id index should be created");
// test unique index not created (maybe change later)
-var goodspec = {
- a: "hashed"
-};
+var goodspec = {a: "hashed"};
t.ensureIndex(goodspec, {"unique": true});
assert.eq(t.getIndexes().length, 1, "unique index got created.");
@@ -67,9 +62,7 @@ var explain = t.find({$and: [{a: {$in: [1, 2]}}, {a: {$gt: 1}}]}).explain();
assert(isIxscan(explain.queryPlanner.winningPlan), "not using hashed index");
// test creation of index based on hash of _id index
-var goodspec2 = {
- '_id': "hashed"
-};
+var goodspec2 = {'_id': "hashed"};
t.ensureIndex(goodspec2);
assert.eq(t.getIndexes().length, 3, "_id index didn't get created");
@@ -79,9 +72,7 @@ assert.eq(t.find({_id: newid}).hint({_id: 1}).toArray()[0]._id,
"using hashed index and different index returns different docs");
// test creation of sparse hashed index
-var sparseindex = {
- b: "hashed"
-};
+var sparseindex = {b: "hashed"};
t.ensureIndex(sparseindex, {"sparse": true});
assert.eq(t.getIndexes().length, 4, "sparse index didn't get created");
diff --git a/jstests/core/hint1.js b/jstests/core/hint1.js
index ddee0f369be..6542f9752bc 100644
--- a/jstests/core/hint1.js
+++ b/jstests/core/hint1.js
@@ -4,13 +4,10 @@ p.drop();
p.save({ts: new Date(1), cls: "entry", verticals: "alleyinsider", live: true});
p.ensureIndex({ts: 1});
-assert.eq(1,
- p.find({
- live: true,
- ts: {$lt: new Date(1234119308272)},
- cls: "entry",
- verticals: "alleyinsider"
- })
- .sort({ts: -1})
- .hint({ts: 1})
- .count());
+assert.eq(
+ 1,
+ p.find(
+ {live: true, ts: {$lt: new Date(1234119308272)}, cls: "entry", verticals: "alleyinsider"})
+ .sort({ts: -1})
+ .hint({ts: 1})
+ .count());
diff --git a/jstests/core/idhack.js b/jstests/core/idhack.js
index 292c2ed86b6..e26427f4b3c 100644
--- a/jstests/core/idhack.js
+++ b/jstests/core/idhack.js
@@ -25,9 +25,7 @@ assert.eq(8, t.findOne({_id: 2}).z, "C2");
assert.eq(8, t.findOne({_id: 3}).z, "C3");
// explain output should show that the ID hack was applied.
-var query = {
- _id: {x: 2}
-};
+var query = {_id: {x: 2}};
var explain = t.find(query).explain(true);
print("explain for " + tojson(query, "", true) + " = " + tojson(explain));
assert.eq(1, explain.executionStats.nReturned, "D1");
@@ -67,11 +65,10 @@ assert.eq({_id: 0, a: 0}, t.find({_id: 0}, {_id: 1, a: 1}).next());
// Non-simple: exclusion.
assert.eq({_id: 1, a: 1}, t.find({_id: 1}, {b: 0}).next());
-assert.eq(
- {
- _id: 0,
- },
- t.find({_id: 0}, {a: 0, b: 0}).next());
+assert.eq({
+ _id: 0,
+},
+ t.find({_id: 0}, {a: 0, b: 0}).next());
// Non-simple: dotted fields.
assert.eq({b: [{c: 1}, {c: 2}]}, t.find({_id: 0}, {_id: 0, "b.c": 1}).next());
diff --git a/jstests/core/in5.js b/jstests/core/in5.js
index c56621c91f3..772427365b7 100644
--- a/jstests/core/in5.js
+++ b/jstests/core/in5.js
@@ -4,30 +4,20 @@ t = db.in5;
function go(fn) {
t.drop();
o = {};
- o[fn] = {
- a: 1,
- b: 2
- };
+ o[fn] = {a: 1, b: 2};
t.insert(o);
x = {};
- x[fn] = {
- a: 1,
- b: 2
- };
+ x[fn] = {a: 1, b: 2};
assert.eq(1, t.find(x).itcount(), "A1 - " + fn);
y = {};
- y[fn] = {
- $in: [{a: 1, b: 2}]
- };
+ y[fn] = {$in: [{a: 1, b: 2}]};
assert.eq(1, t.find(y).itcount(), "A2 - " + fn);
z = {};
z[fn + ".a"] = 1;
- z[fn + ".b"] = {
- $in: [2]
- };
+ z[fn + ".b"] = {$in: [2]};
assert.eq(1, t.find(z).itcount(), "A3 - " + fn); // SERVER-1366
i = {};
diff --git a/jstests/core/index_arr2.js b/jstests/core/index_arr2.js
index 952be73ff13..a7e541add09 100644
--- a/jstests/core/index_arr2.js
+++ b/jstests/core/index_arr2.js
@@ -7,10 +7,7 @@ function test(withIndex) {
t.drop();
// insert a bunch of items to force queries to use the index.
- newObject = {
- _id: 1,
- a: [{b: {c: 1}}]
- };
+ newObject = {_id: 1, a: [{b: {c: 1}}]};
now = (new Date()).getTime() / 1000;
for (created = now - NUM; created <= now; created++) {
@@ -20,9 +17,7 @@ function test(withIndex) {
}
// change the last M items.
- query = {
- 'created': {'$gte': now - M}
- };
+ query = {'created': {'$gte': now - M}};
Z = t.find(query).count();
diff --git a/jstests/core/index_check3.js b/jstests/core/index_check3.js
index 2c07ae6d50a..7ffcc8da411 100644
--- a/jstests/core/index_check3.js
+++ b/jstests/core/index_check3.js
@@ -19,9 +19,7 @@ assert.eq(1, t.find({a: {$gt: 2}}).itcount(), "D");
t.drop();
for (var i = 0; i < 100; i++) {
- var o = {
- i: i
- };
+ var o = {i: i};
if (i % 2 == 0)
o.foo = i;
t.save(o);
diff --git a/jstests/core/index_check6.js b/jstests/core/index_check6.js
index 4baeced8fb9..e85913aeec3 100644
--- a/jstests/core/index_check6.js
+++ b/jstests/core/index_check6.js
@@ -30,10 +30,10 @@ assert.eq(29,
assert.eq(5,
keysExamined({age: {$gte: 29, $lte: 30}, rating: 5}, {age: 1, rating: 1}),
"C"); // SERVER-371
-assert.eq(7,
- keysExamined({age: {$gte: 29, $lte: 30}, rating: {$gte: 4, $lte: 5}},
- {age: 1, rating: 1}),
- "D"); // SERVER-371
+assert.eq(
+ 7,
+ keysExamined({age: {$gte: 29, $lte: 30}, rating: {$gte: 4, $lte: 5}}, {age: 1, rating: 1}),
+ "D"); // SERVER-371
assert.eq.automsg("2",
"t.find( { age:30, rating:{ $gte:4, $lte:5} } )" + ".explain('executionStats')" +
@@ -91,11 +91,7 @@ for (var a = -1; a <= 1; a += 2) {
for (var b = -1; b <= 1; b += 2) {
for (var c = -1; c <= 1; c += 2) {
t.dropIndexes();
- var spec = {
- a: a,
- b: b,
- c: c
- };
+ var spec = {a: a, b: b, c: c};
t.ensureIndex(spec);
doTest(spec, spec);
doTest({a: -a, b: -b, c: -c}, spec);
diff --git a/jstests/core/index_create_too_many.js b/jstests/core/index_create_too_many.js
index 44d5016a7cf..0b2002d9cc1 100644
--- a/jstests/core/index_create_too_many.js
+++ b/jstests/core/index_create_too_many.js
@@ -6,10 +6,7 @@ coll.drop();
// create 62 indexes, which leaves us with 63 indexes total (+1 for the _id index)
for (var i = 0; i < 62; i++) {
var name = 'i' + i;
- var spec = {
- key: {},
- name: name
- };
+ var spec = {key: {}, name: name};
spec.key[name] = 1;
var res = coll.runCommand('createIndexes', {indexes: [spec]});
diff --git a/jstests/core/index_create_with_nul_in_name.js b/jstests/core/index_create_with_nul_in_name.js
index 9134649c086..c128dcc5880 100644
--- a/jstests/core/index_create_with_nul_in_name.js
+++ b/jstests/core/index_create_with_nul_in_name.js
@@ -6,11 +6,7 @@
var coll = db.create_index_with_nul_in_name;
coll.drop();
- var idx = {
- key: {'a': 1},
- name: 'foo\0bar',
- ns: coll.getFullName()
- };
+ var idx = {key: {'a': 1}, name: 'foo\0bar', ns: coll.getFullName()};
var res = coll.runCommand('createIndexes', {indexes: [idx]});
assert.commandFailed(res, tojson(res));
diff --git a/jstests/core/index_diag.js b/jstests/core/index_diag.js
index 3e25bf2a1eb..e458a590dda 100644
--- a/jstests/core/index_diag.js
+++ b/jstests/core/index_diag.js
@@ -16,10 +16,7 @@ function r(a) {
}
for (i = 1; i < 4; i++) {
- o = {
- _id: i,
- x: -i
- };
+ o = {_id: i, x: -i};
t.insert(o);
all.push(o);
ids.push({_id: i});
diff --git a/jstests/core/index_filter_commands.js b/jstests/core/index_filter_commands.js
index 027731e97cf..8d00d69cbe7 100644
--- a/jstests/core/index_filter_commands.js
+++ b/jstests/core/index_filter_commands.js
@@ -36,32 +36,16 @@ t.save({a: 1, b: 1});
// Add 2 indexes.
// 1st index is more efficient.
// 2nd and 3rd indexes will be used to test index filters.
-var indexA1 = {
- a: 1
-};
-var indexA1B1 = {
- a: 1,
- b: 1
-};
-var indexA1C1 = {
- a: 1,
- c: 1
-};
+var indexA1 = {a: 1};
+var indexA1B1 = {a: 1, b: 1};
+var indexA1C1 = {a: 1, c: 1};
t.ensureIndex(indexA1);
t.ensureIndex(indexA1B1);
t.ensureIndex(indexA1C1);
-var queryA1 = {
- a: 1,
- b: 1
-};
-var projectionA1 = {
- _id: 0,
- a: 1
-};
-var sortA1 = {
- a: -1
-};
+var queryA1 = {a: 1, b: 1};
+var projectionA1 = {_id: 0, a: 1};
+var sortA1 = {a: -1};
//
// Tests for planCacheListFilters, planCacheClearFilters, planCacheSetFilter
@@ -110,17 +94,12 @@ assert.eq(0, filters.length, 'unexpected number of index filters in planCacheLis
// Check details of winning plan in plan cache before setting index filter.
assert.eq(1, t.find(queryA1, projectionA1).sort(sortA1).itcount(), 'unexpected document count');
-var shape = {
- query: queryA1,
- sort: sortA1,
- projection: projectionA1
-};
+var shape = {query: queryA1, sort: sortA1, projection: projectionA1};
var planBeforeSetFilter = getPlans(shape)[0];
print('Winning plan (before setting index filters) = ' + tojson(planBeforeSetFilter));
// Check filterSet field in plan details
-assert.eq(false,
- planBeforeSetFilter.filterSet,
- 'missing or invalid filterSet field in plan details');
+assert.eq(
+ false, planBeforeSetFilter.filterSet, 'missing or invalid filterSet field in plan details');
// Adding index filters to a non-existent collection should be an error.
assert.commandFailed(missingCollection.runCommand(
@@ -132,9 +111,8 @@ assert.commandWorked(t.runCommand(
'planCacheSetFilter',
{query: queryA1, sort: sortA1, projection: projectionA1, indexes: [indexA1B1, indexA1C1]}));
filters = getFilters();
-assert.eq(1,
- filters.length,
- 'no change in query settings after successfully setting index filters');
+assert.eq(
+ 1, filters.length, 'no change in query settings after successfully setting index filters');
assert.eq(queryA1, filters[0].query, 'unexpected query in filters');
assert.eq(sortA1, filters[0].sort, 'unexpected sort in filters');
assert.eq(projectionA1, filters[0].projection, 'unexpected projection in filters');
@@ -196,13 +174,9 @@ if (db.isMaster().msg !== "isdbgrid") {
.queryPlanner.indexFilterSet);
// With two filters set.
- assert.commandWorked(t.runCommand('planCacheSetFilter',
- {
- query: queryA1,
- projection: projectionA1,
- sort: sortA1,
- indexes: [indexA1B1, indexA1C1]
- }));
+ assert.commandWorked(t.runCommand(
+ 'planCacheSetFilter',
+ {query: queryA1, projection: projectionA1, sort: sortA1, indexes: [indexA1B1, indexA1C1]}));
assert.eq(true, t.find({z: 1}).explain('queryPlanner').queryPlanner.indexFilterSet);
assert.eq(true,
t.find(queryA1, projectionA1)
diff --git a/jstests/core/index_many.js b/jstests/core/index_many.js
index 142c9bbc4a5..f2daa8c9fcf 100644
--- a/jstests/core/index_many.js
+++ b/jstests/core/index_many.js
@@ -15,13 +15,9 @@ function f() {
patt = {};
patt[x] = 1;
if (x == 20)
- patt = {
- x: 1
- };
+ patt = {x: 1};
if (x == 64)
- patt = {
- y: 1
- };
+ patt = {y: 1};
lastErr = t.ensureIndex(patt);
x++;
}
diff --git a/jstests/core/index_partial_create_drop.js b/jstests/core/index_partial_create_drop.js
index 483dc26f5e5..1c8552804b3 100644
--- a/jstests/core/index_partial_create_drop.js
+++ b/jstests/core/index_partial_create_drop.js
@@ -24,12 +24,10 @@
assert.commandFailed(coll.ensureIndex({x: 1}, {partialFilterExpression: {x: {$asdasd: 3}}}));
assert.commandFailed(coll.ensureIndex({x: 1}, {partialFilterExpression: {$and: 5}}));
assert.commandFailed(coll.ensureIndex({x: 1}, {partialFilterExpression: {x: /abc/}}));
- assert.commandFailed(coll.ensureIndex(
- {x: 1},
- {
- partialFilterExpression:
- {$and: [{$and: [{x: {$lt: 2}}, {x: {$gt: 0}}]}, {x: {$exists: true}}]}
- }));
+ assert.commandFailed(coll.ensureIndex({x: 1}, {
+ partialFilterExpression:
+ {$and: [{$and: [{x: {$lt: 2}}, {x: {$gt: 0}}]}, {x: {$exists: true}}]}
+ }));
for (var i = 0; i < 10; i++) {
assert.writeOK(coll.insert({x: i, a: i}));
diff --git a/jstests/core/index_stats.js b/jstests/core/index_stats.js
index 7db4559210c..16d5a16d8d2 100644
--- a/jstests/core/index_stats.js
+++ b/jstests/core/index_stats.js
@@ -78,8 +78,8 @@
//
// Confirm index stats tick on findAndModify() update.
//
- var res = db.runCommand(
- {findAndModify: colName, query: {a: 1}, update: {$set: {d: 1}}, 'new': true});
+ var res =
+ db.runCommand({findAndModify: colName, query: {a: 1}, update: {$set: {d: 1}}, 'new': true});
assert.commandWorked(res);
countA++;
assert.eq(countA, getUsageCount("a_1"));
diff --git a/jstests/core/indexu.js b/jstests/core/indexu.js
index 923356bf79b..088f0b86d44 100644
--- a/jstests/core/indexu.js
+++ b/jstests/core/indexu.js
@@ -4,15 +4,9 @@
t = db.jstests_indexu;
t.drop();
-var dupDoc = {
- a: [{'0': 1}]
-}; // There are two 'a.0' fields in this doc.
-var dupDoc2 = {
- a: [{'1': 1}, 'c']
-};
-var noDupDoc = {
- a: [{'1': 1}]
-};
+var dupDoc = {a: [{'0': 1}]}; // There are two 'a.0' fields in this doc.
+var dupDoc2 = {a: [{'1': 1}, 'c']};
+var noDupDoc = {a: [{'1': 1}]};
// Test that we can't index dupDoc.
assert.writeOK(t.save(dupDoc));
diff --git a/jstests/core/insert1.js b/jstests/core/insert1.js
index 0f4f6977a1a..d6886491999 100644
--- a/jstests/core/insert1.js
+++ b/jstests/core/insert1.js
@@ -1,9 +1,7 @@
t = db.insert1;
t.drop();
-var o = {
- a: 1
-};
+var o = {a: 1};
t.insert(o);
var doc = t.findOne();
assert.eq(1, doc.a);
diff --git a/jstests/core/js1.js b/jstests/core/js1.js
index 89910f4bd23..66462237417 100644
--- a/jstests/core/js1.js
+++ b/jstests/core/js1.js
@@ -6,17 +6,15 @@ t.remove({});
t.save({z: 1});
t.save({z: 2});
assert(2 == t.find().length());
-assert(2 ==
- t.find({
- $where: function() {
- return 1;
- }
- }).length());
-assert(1 ==
- t.find({
- $where: function() {
- return obj.z == 2;
- }
- }).length());
+assert(2 == t.find({
+ $where: function() {
+ return 1;
+ }
+ }).length());
+assert(1 == t.find({
+ $where: function() {
+ return obj.z == 2;
+ }
+ }).length());
assert(t.validate().valid);
diff --git a/jstests/core/js2.js b/jstests/core/js2.js
index 9dfb5c0b091..abd707fdbbd 100644
--- a/jstests/core/js2.js
+++ b/jstests/core/js2.js
@@ -11,11 +11,11 @@ t.save({z: 1});
t.save({z: 2});
assert.throws(function() {
t.find({
- $where: function() {
- db.jstests_js2_2.save({y: 1});
- return 1;
- }
- }).forEach(printjson);
+ $where: function() {
+ db.jstests_js2_2.save({y: 1});
+ return 1;
+ }
+ }).forEach(printjson);
}, null, "can't save from $where");
assert.eq(0, t2.find().length(), "B");
diff --git a/jstests/core/js3.js b/jstests/core/js3.js
index 36d16051135..4d46c25bbf7 100644
--- a/jstests/core/js3.js
+++ b/jstests/core/js3.js
@@ -18,14 +18,12 @@ for (z = 0; z < 2; z++) {
for (i = 0; i < 1000; i++)
t.save({
i: i,
- z:
- "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
+ z: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
});
- assert(33 ==
- db.dbEval(function() {
- return 33;
- }));
+ assert(33 == db.dbEval(function() {
+ return 33;
+ }));
db.dbEval(function() {
db.jstests_js3.save({i: -1, z: "server side"});
@@ -33,12 +31,11 @@ for (z = 0; z < 2; z++) {
assert(t.findOne({i: -1}));
- assert(2 ==
- t.find({
- $where: function() {
- return obj.i == 7 || obj.i == 8;
- }
- }).length());
+ assert(2 == t.find({
+ $where: function() {
+ return obj.i == 7 || obj.i == 8;
+ }
+ }).length());
// NPE test
var ok = false;
@@ -62,10 +59,10 @@ for (z = 0; z < 2; z++) {
debug("before indexed find");
arr = t.find({
- $where: function() {
- return obj.i == 7 || obj.i == 8;
- }
- }).toArray();
+ $where: function() {
+ return obj.i == 7 || obj.i == 8;
+ }
+ }).toArray();
debug(arr);
assert.eq(2, arr.length);
@@ -74,8 +71,7 @@ for (z = 0; z < 2; z++) {
for (i = 1000; i < 2000; i++)
t.save({
i: i,
- z:
- "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
+ z: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
});
assert(t.find().count() == 2001);
diff --git a/jstests/core/js7.js b/jstests/core/js7.js
index aeaec66ff47..810f4692d4f 100644
--- a/jstests/core/js7.js
+++ b/jstests/core/js7.js
@@ -1,7 +1,6 @@
t = db.jstests_js7;
t.drop();
-assert.eq(17,
- db.eval(function(foo) {
- return foo;
- }, 17));
+assert.eq(17, db.eval(function(foo) {
+ return foo;
+}, 17));
diff --git a/jstests/core/js8.js b/jstests/core/js8.js
index 15b7ff7d7af..d670abb0587 100644
--- a/jstests/core/js8.js
+++ b/jstests/core/js8.js
@@ -6,30 +6,30 @@ t.save({a: 1, b: [2, 3, 4]});
assert.eq(1, t.find().length(), "A");
assert.eq(1,
t.find(function() {
- return this.a == 1;
- }).length(),
+ return this.a == 1;
+ }).length(),
"B");
assert.eq(1,
t.find(function() {
- if (!this.b.length)
- return true;
- return this.b.length == 3;
- }).length(),
+ if (!this.b.length)
+ return true;
+ return this.b.length == 3;
+ }).length(),
"B2");
assert.eq(1,
t.find(function() {
- return this.b[0] == 2;
- }).length(),
+ return this.b[0] == 2;
+ }).length(),
"C");
assert.eq(0,
t.find(function() {
- return this.b[0] == 3;
- }).length(),
+ return this.b[0] == 3;
+ }).length(),
"D");
assert.eq(1,
t.find(function() {
- return this.b[1] == 3;
- }).length(),
+ return this.b[1] == 3;
+ }).length(),
"E");
assert(t.validate().valid);
diff --git a/jstests/core/js9.js b/jstests/core/js9.js
index b29a31afdc4..515fa883aea 100644
--- a/jstests/core/js9.js
+++ b/jstests/core/js9.js
@@ -7,11 +7,10 @@ c.save({a: 2});
assert.eq(2, c.find().length());
assert.eq(2, c.find().count());
-assert.eq(2,
- db.eval(function() {
- num = 0;
- db.jstests_js9.find().forEach(function(z) {
- num++;
- });
- return num;
- }));
+assert.eq(2, db.eval(function() {
+ num = 0;
+ db.jstests_js9.find().forEach(function(z) {
+ num++;
+ });
+ return num;
+}));
diff --git a/jstests/core/list_collections1.js b/jstests/core/list_collections1.js
index 04acb82290b..9745adc1f90 100644
--- a/jstests/core/list_collections1.js
+++ b/jstests/core/list_collections1.js
@@ -46,11 +46,9 @@
assert.commandWorked(mydb.dropDatabase());
assert.commandWorked(mydb.createCollection("foo"));
- assert.eq(1,
- cursorCountMatching(getListCollectionsCursor(),
- function(c) {
- return c.name === "foo";
- }));
+ assert.eq(1, cursorCountMatching(getListCollectionsCursor(), function(c) {
+ return c.name === "foo";
+ }));
//
// Test that the collection metadata object is returned correctly.
@@ -59,16 +57,12 @@
assert.commandWorked(mydb.dropDatabase());
assert.commandWorked(mydb.createCollection("foo"));
assert.commandWorked(mydb.createCollection("bar", {temp: true}));
- assert.eq(1,
- cursorCountMatching(getListCollectionsCursor(),
- function(c) {
- return c.name === "foo" && c.options.temp === undefined;
- }));
- assert.eq(1,
- cursorCountMatching(getListCollectionsCursor(),
- function(c) {
- return c.name === "bar" && c.options.temp === true;
- }));
+ assert.eq(1, cursorCountMatching(getListCollectionsCursor(), function(c) {
+ return c.name === "foo" && c.options.temp === undefined;
+ }));
+ assert.eq(1, cursorCountMatching(getListCollectionsCursor(), function(c) {
+ return c.name === "bar" && c.options.temp === true;
+ }));
//
// Test basic usage of "filter" option.
@@ -77,29 +71,23 @@
assert.commandWorked(mydb.dropDatabase());
assert.commandWorked(mydb.createCollection("foo"));
assert.commandWorked(mydb.createCollection("bar", {temp: true}));
- assert.eq(2,
- cursorCountMatching(getListCollectionsCursor({filter: {}}),
- function(c) {
- return c.name === "foo" || c.name === "bar";
- }));
+ assert.eq(2, cursorCountMatching(getListCollectionsCursor({filter: {}}), function(c) {
+ return c.name === "foo" || c.name === "bar";
+ }));
assert.eq(2, getListCollectionsCursor({filter: {name: {$in: ["foo", "bar"]}}}).itcount());
assert.eq(1, getListCollectionsCursor({filter: {name: /^foo$/}}).itcount());
assert.eq(1, getListCollectionsCursor({filter: {"options.temp": true}}).itcount());
mydb.foo.drop();
- assert.eq(1,
- cursorCountMatching(getListCollectionsCursor({filter: {}}),
- function(c) {
- return c.name === "foo" || c.name === "bar";
- }));
+ assert.eq(1, cursorCountMatching(getListCollectionsCursor({filter: {}}), function(c) {
+ return c.name === "foo" || c.name === "bar";
+ }));
assert.eq(1, getListCollectionsCursor({filter: {name: {$in: ["foo", "bar"]}}}).itcount());
assert.eq(0, getListCollectionsCursor({filter: {name: /^foo$/}}).itcount());
assert.eq(1, getListCollectionsCursor({filter: {"options.temp": true}}).itcount());
mydb.bar.drop();
- assert.eq(0,
- cursorCountMatching(getListCollectionsCursor({filter: {}}),
- function(c) {
- return c.name === "foo" || c.name === "bar";
- }));
+ assert.eq(0, cursorCountMatching(getListCollectionsCursor({filter: {}}), function(c) {
+ return c.name === "foo" || c.name === "bar";
+ }));
assert.eq(0, getListCollectionsCursor({filter: {name: {$in: ["foo", "bar"]}}}).itcount());
assert.eq(0, getListCollectionsCursor({filter: {name: /^foo$/}}).itcount());
assert.eq(0, getListCollectionsCursor({filter: {"options.temp": true}}).itcount());
@@ -130,50 +118,38 @@
assert.commandWorked(mydb.createCollection("bar"));
cursor = getListCollectionsCursor({cursor: {batchSize: 2}});
assert.eq(2, cursor.objsLeftInBatch());
- assert.eq(2,
- cursorCountMatching(cursor,
- function(c) {
- return c.name === "foo" || c.name === "bar";
- }));
+ assert.eq(2, cursorCountMatching(cursor, function(c) {
+ return c.name === "foo" || c.name === "bar";
+ }));
cursor = getListCollectionsCursor({cursor: {batchSize: 1}});
assert.eq(1, cursor.objsLeftInBatch());
- assert.eq(2,
- cursorCountMatching(cursor,
- function(c) {
- return c.name === "foo" || c.name === "bar";
- }));
+ assert.eq(2, cursorCountMatching(cursor, function(c) {
+ return c.name === "foo" || c.name === "bar";
+ }));
cursor = getListCollectionsCursor({cursor: {batchSize: 0}});
assert.eq(0, cursor.objsLeftInBatch());
- assert.eq(2,
- cursorCountMatching(cursor,
- function(c) {
- return c.name === "foo" || c.name === "bar";
- }));
+ assert.eq(2, cursorCountMatching(cursor, function(c) {
+ return c.name === "foo" || c.name === "bar";
+ }));
cursor = getListCollectionsCursor({cursor: {batchSize: NumberInt(2)}});
assert.eq(2, cursor.objsLeftInBatch());
- assert.eq(2,
- cursorCountMatching(cursor,
- function(c) {
- return c.name === "foo" || c.name === "bar";
- }));
+ assert.eq(2, cursorCountMatching(cursor, function(c) {
+ return c.name === "foo" || c.name === "bar";
+ }));
cursor = getListCollectionsCursor({cursor: {batchSize: NumberLong(2)}});
assert.eq(2, cursor.objsLeftInBatch());
- assert.eq(2,
- cursorCountMatching(cursor,
- function(c) {
- return c.name === "foo" || c.name === "bar";
- }));
+ assert.eq(2, cursorCountMatching(cursor, function(c) {
+ return c.name === "foo" || c.name === "bar";
+ }));
// Test a large batch size, and assert that at least 2 results are returned in the initial
// batch.
cursor = getListCollectionsCursor({cursor: {batchSize: Math.pow(2, 62)}});
assert.lte(2, cursor.objsLeftInBatch());
- assert.eq(2,
- cursorCountMatching(cursor,
- function(c) {
- return c.name === "foo" || c.name === "bar";
- }));
+ assert.eq(2, cursorCountMatching(cursor, function(c) {
+ return c.name === "foo" || c.name === "bar";
+ }));
// Ensure that the server accepts an empty object for "cursor". This is equivalent to not
// specifying "cursor" at all.
@@ -181,11 +157,9 @@
// We do not test for objsLeftInBatch() here, since the default batch size for this command
// is not specified.
cursor = getListCollectionsCursor({cursor: {}});
- assert.eq(2,
- cursorCountMatching(cursor,
- function(c) {
- return c.name === "foo" || c.name === "bar";
- }));
+ assert.eq(2, cursorCountMatching(cursor, function(c) {
+ return c.name === "foo" || c.name === "bar";
+ }));
//
// Test for invalid values of "cursor" and "cursor.batchSize".
@@ -245,11 +219,9 @@
assert.commandWorked(mydb.dropDatabase());
cursor = getListCollectionsCursor();
- assert.eq(0,
- cursorCountMatching(cursor,
- function(c) {
- return c.name === "foo";
- }));
+ assert.eq(0, cursorCountMatching(cursor, function(c) {
+ return c.name === "foo";
+ }));
//
// Test on empty database.
@@ -259,11 +231,9 @@
assert.commandWorked(mydb.createCollection("foo"));
mydb.foo.drop();
cursor = getListCollectionsCursor();
- assert.eq(0,
- cursorCountMatching(cursor,
- function(c) {
- return c.name === "foo";
- }));
+ assert.eq(0, cursorCountMatching(cursor, function(c) {
+ return c.name === "foo";
+ }));
//
// Test killCursors against a listCollections cursor.
diff --git a/jstests/core/list_collections_filter.js b/jstests/core/list_collections_filter.js
index 4b5c42bbc78..e0d18f055d0 100644
--- a/jstests/core/list_collections_filter.js
+++ b/jstests/core/list_collections_filter.js
@@ -67,10 +67,20 @@
// Filter with $and and $in.
testListCollections({name: {$in: ["lists", /.*_sets$/]}, options: {}},
["lists", "ordered_sets", "unordered_sets"]);
- testListCollections(
- {$and: [{name: {$in: ["lists", /.*_sets$/]}}, {name: "lists"}, {options: {}}, ]},
- ["lists"]);
- testListCollections(
- {$and: [{name: {$in: ["lists", /.*_sets$/]}}, {name: "non-existent"}, {options: {}}, ]},
- []);
+ testListCollections({
+ $and: [
+ {name: {$in: ["lists", /.*_sets$/]}},
+ {name: "lists"},
+ {options: {}},
+ ]
+ },
+ ["lists"]);
+ testListCollections({
+ $and: [
+ {name: {$in: ["lists", /.*_sets$/]}},
+ {name: "non-existent"},
+ {options: {}},
+ ]
+ },
+ []);
}());
diff --git a/jstests/core/long_index_rename.js b/jstests/core/long_index_rename.js
index df3397bbb46..a0bf96aa894 100644
--- a/jstests/core/long_index_rename.js
+++ b/jstests/core/long_index_rename.js
@@ -11,11 +11,9 @@ for (i = 1; i < 10; i++) {
}
t.createIndex({a: 1}, {name: "aaa"});
-var result = t.createIndex(
- {a: 1},
- {
- name: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" +
- "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
- });
+var result = t.createIndex({a: 1}, {
+ name: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" +
+ "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
+});
assert(!result.ok);
assert(result.errmsg.indexOf("too long") >= 0);
diff --git a/jstests/core/max_doc_size.js b/jstests/core/max_doc_size.js
index 03deeafb307..383e4b4b76d 100644
--- a/jstests/core/max_doc_size.js
+++ b/jstests/core/max_doc_size.js
@@ -53,10 +53,7 @@ assert.neq(null, res.writeErrors);
coll.drop();
id = new ObjectId();
coll.insert({_id: id});
-res = db.runCommand({
- update: coll.getName(),
- ordered: true,
- updates: [{q: {_id: id}, u: {$set: {x: overBigStr}}}]
-});
+res = db.runCommand(
+ {update: coll.getName(), ordered: true, updates: [{q: {_id: id}, u: {$set: {x: overBigStr}}}]});
assert(res.ok);
assert.neq(null, res.writeErrors);
diff --git a/jstests/core/mr1.js b/jstests/core/mr1.js
index 0225ab3cb62..00a5726db0d 100644
--- a/jstests/core/mr1.js
+++ b/jstests/core/mr1.js
@@ -34,9 +34,7 @@ r = function(key, values) {
for (var i = 0; i < values.length; i++) {
total += values[i].count;
}
- return {
- count: total
- };
+ return {count: total};
};
r2 = function(key, values) {
@@ -136,19 +134,14 @@ if (true) {
printjson(db.runCommand({mapreduce: "mr1", map: m, reduce: r, verbose: true, out: "mr1_out"}));
}
-print("t1: " +
- Date.timeFunc(
- function() {
- var out = db.runCommand({mapreduce: "mr1", map: m, reduce: r, out: "mr1_out"});
- if (ks == "_id")
- assert(out.ok, "XXX : " + tojson(out));
- db[out.result].drop();
- },
- 10) +
- " (~500 on 2.8ghz) - itcount: " +
- Date.timeFunc(function() {
- db.mr1.find().itcount();
- }, 10));
+print("t1: " + Date.timeFunc(function() {
+ var out = db.runCommand({mapreduce: "mr1", map: m, reduce: r, out: "mr1_out"});
+ if (ks == "_id")
+ assert(out.ok, "XXX : " + tojson(out));
+ db[out.result].drop();
+}, 10) + " (~500 on 2.8ghz) - itcount: " + Date.timeFunc(function() {
+ db.mr1.find().itcount();
+}, 10));
// test doesn't exist
res =
@@ -169,10 +162,9 @@ if (true) {
res = db.runCommand({mapreduce: "mr1", out: "mr1_foo", map: m, reduce: r});
d(res);
- print("t2: " + res.timeMillis + " (~3500 on 2.8ghz) - itcount: " +
- Date.timeFunc(function() {
- db.mr1.find().itcount();
- }));
+ print("t2: " + res.timeMillis + " (~3500 on 2.8ghz) - itcount: " + Date.timeFunc(function() {
+ db.mr1.find().itcount();
+ }));
x = db[res.result];
z = {};
x.find().forEach(function(a) {
diff --git a/jstests/core/mr2.js b/jstests/core/mr2.js
index c13ff447970..744fb916088 100644
--- a/jstests/core/mr2.js
+++ b/jstests/core/mr2.js
@@ -15,10 +15,7 @@ function m() {
}
function r(who, values) {
- var n = {
- totalSize: 0,
- num: 0
- };
+ var n = {totalSize: 0, num: 0};
for (var i = 0; i < values.length; i++) {
n.totalSize += values[i].totalSize;
n.num += values[i].num;
diff --git a/jstests/core/mr3.js b/jstests/core/mr3.js
index a2cee1f2d8f..f5a45e0fd1c 100644
--- a/jstests/core/mr3.js
+++ b/jstests/core/mr3.js
@@ -20,9 +20,7 @@ r = function(key, values) {
for (var i = 0; i < values.length; i++) {
total += values[i].count;
}
- return {
- count: total
- };
+ return {count: total};
};
res = t.mapReduce(m, r, {out: "mr3_out"});
diff --git a/jstests/core/mr4.js b/jstests/core/mr4.js
index ae5e11528af..58ea303f7e8 100644
--- a/jstests/core/mr4.js
+++ b/jstests/core/mr4.js
@@ -18,9 +18,7 @@ r = function(key, values) {
for (var i = 0; i < values.length; i++) {
total += values[i].count;
}
- return {
- count: total
- };
+ return {count: total};
};
res = t.mapReduce(m, r, {out: "mr4_out", scope: {xx: 1}});
diff --git a/jstests/core/mr5.js b/jstests/core/mr5.js
index 537625e954b..8552f891358 100644
--- a/jstests/core/mr5.js
+++ b/jstests/core/mr5.js
@@ -22,10 +22,7 @@ r = function(k, v) {
total += v[i].stats[j];
}
}
- return {
- stats: stats,
- total: total
- };
+ return {stats: stats, total: total};
};
res = t.mapReduce(m, r, {out: "mr5_out", scope: {xx: 1}});
@@ -41,11 +38,7 @@ res.drop();
m = function() {
var x = "partner";
var y = "visits";
- emit(this [x],
- {
- stats:
- [this[y]]
- });
+ emit(this[x], {stats: [this[y]]});
};
res = t.mapReduce(m, r, {out: "mr5_out", scope: {xx: 1}});
diff --git a/jstests/core/mr_bigobject.js b/jstests/core/mr_bigobject.js
index d87b2af4cdc..ff8e3cd1652 100644
--- a/jstests/core/mr_bigobject.js
+++ b/jstests/core/mr_bigobject.js
@@ -41,8 +41,7 @@ r = function(k, v) {
return total;
};
-assert.eq({1: t.count() * s.length},
- t.mapReduce(m, r, "mr_bigobject_out").convertToSingleObject(),
- "A1");
+assert.eq(
+ {1: t.count() * s.length}, t.mapReduce(m, r, "mr_bigobject_out").convertToSingleObject(), "A1");
t.drop();
diff --git a/jstests/core/mr_bigobject_replace.js b/jstests/core/mr_bigobject_replace.js
index 28a295c1b2e..c30b6e5de48 100644
--- a/jstests/core/mr_bigobject_replace.js
+++ b/jstests/core/mr_bigobject_replace.js
@@ -14,10 +14,7 @@
// Returns a document of the form { _id: ObjectId(...), value: '...' } with the specified
// 'targetSize' in bytes.
function makeDocWithSize(targetSize) {
- var doc = {
- _id: new ObjectId(),
- value: ''
- };
+ var doc = {_id: new ObjectId(), value: ''};
var size = Object.bsonsize(doc);
assert.gte(targetSize, size);
@@ -41,13 +38,12 @@
// Insert a document so the mapper gets run.
assert.writeOK(db.input.insert({}));
- var res = db.runCommand(Object.extend(
- {
- mapReduce: "input",
- map: mapper,
- out: {replace: "mr_bigobject_replace"},
- },
- testOptions));
+ var res = db.runCommand(Object.extend({
+ mapReduce: "input",
+ map: mapper,
+ out: {replace: "mr_bigobject_replace"},
+ },
+ testOptions));
assert.commandFailed(res, "creating a document larger than 16MB didn't fail");
assert.lte(0,
diff --git a/jstests/core/mr_index3.js b/jstests/core/mr_index3.js
index bac61cb6bc1..885089ade4d 100644
--- a/jstests/core/mr_index3.js
+++ b/jstests/core/mr_index3.js
@@ -17,20 +17,16 @@ r = function(key, values) {
};
a1 = db.runCommand({mapreduce: 'mr_index3', map: m, reduce: r, out: {inline: true}}).results;
-a2 = db.runCommand({
- mapreduce: 'mr_index3',
- map: m,
- reduce: r,
- query: {name: 'name1'},
- out: {inline: true}
-}).results;
+a2 = db.runCommand(
+ {mapreduce: 'mr_index3', map: m, reduce: r, query: {name: 'name1'}, out: {inline: true}})
+ .results;
a3 = db.runCommand({
- mapreduce: 'mr_index3',
- map: m,
- reduce: r,
- query: {name: {$gt: 'name'}},
- out: {inline: true}
-}).results;
+ mapreduce: 'mr_index3',
+ map: m,
+ reduce: r,
+ query: {name: {$gt: 'name'}},
+ out: {inline: true}
+ }).results;
assert.eq([{"_id": "cat", "value": 3}, {"_id": "dog", "value": 2}, {"_id": "mouse", "value": 1}],
a1,
@@ -41,20 +37,16 @@ assert.eq(a1, a3, "A3");
t.ensureIndex({name: 1, tags: 1});
b1 = db.runCommand({mapreduce: 'mr_index3', map: m, reduce: r, out: {inline: true}}).results;
-b2 = db.runCommand({
- mapreduce: 'mr_index3',
- map: m,
- reduce: r,
- query: {name: 'name1'},
- out: {inline: true}
-}).results;
+b2 = db.runCommand(
+ {mapreduce: 'mr_index3', map: m, reduce: r, query: {name: 'name1'}, out: {inline: true}})
+ .results;
b3 = db.runCommand({
- mapreduce: 'mr_index3',
- map: m,
- reduce: r,
- query: {name: {$gt: 'name'}},
- out: {inline: true}
-}).results;
+ mapreduce: 'mr_index3',
+ map: m,
+ reduce: r,
+ query: {name: {$gt: 'name'}},
+ out: {inline: true}
+ }).results;
assert.eq(a1, b1, "AB1");
assert.eq(a2, b2, "AB2");
diff --git a/jstests/core/mr_killop.js b/jstests/core/mr_killop.js
index 52865eacbe4..78e98f0bcaa 100644
--- a/jstests/core/mr_killop.js
+++ b/jstests/core/mr_killop.js
@@ -51,12 +51,7 @@ function testOne(map, reduce, finalize, scope, childLoop, wait) {
t.save({a: 1});
t.save({a: 1});
- spec = {
- mapreduce: "jstests_mr_killop",
- out: "jstests_mr_killop_out",
- map: map,
- reduce: reduce
- };
+ spec = {mapreduce: "jstests_mr_killop", out: "jstests_mr_killop_out", map: map, reduce: reduce};
if (finalize) {
spec["finalize"] = finalize;
}
diff --git a/jstests/core/mr_mutable_properties.js b/jstests/core/mr_mutable_properties.js
index 12c52385275..84066bf76c0 100644
--- a/jstests/core/mr_mutable_properties.js
+++ b/jstests/core/mr_mutable_properties.js
@@ -8,28 +8,20 @@ collection.insert({a: 1});
var map = function() {
// set property on receiver
- this.feed = {
- beef: 1
- };
+ this.feed = {beef: 1};
// modify property on receiever
- this.a = {
- cake: 1
- };
+ this.a = {cake: 1};
emit(this._id, this.feed);
emit(this._id, this.a);
};
var reduce = function(key, values) {
// set property on receiver
- this.feed = {
- beat: 1
- };
+ this.feed = {beat: 1};
// set property on key arg
- key.fed = {
- mochi: 1
- };
+ key.fed = {mochi: 1};
// push properties onto values array arg
values.push(this.feed);
@@ -39,21 +31,15 @@ var reduce = function(key, values) {
values.forEach(function(val) {
val.mod = 1;
});
- return {
- food: values
- };
+ return {food: values};
};
var finalize = function(key, values) {
// set property on receiver
- this.feed = {
- ice: 1
- };
+ this.feed = {ice: 1};
// set property on key arg
- key.fed = {
- cream: 1
- };
+ key.fed = {cream: 1};
// push properties onto values array arg
printjson(values);
diff --git a/jstests/core/mr_stored.js b/jstests/core/mr_stored.js
index 63fa301e66d..5969638b8b9 100644
--- a/jstests/core/mr_stored.js
+++ b/jstests/core/mr_stored.js
@@ -22,10 +22,7 @@ r = function(k, v) {
total += v[i].stats[j];
}
}
- return {
- stats: stats,
- total: total
- };
+ return {stats: stats, total: total};
};
// Test that map reduce works with stored javascript
diff --git a/jstests/core/nestedarr1.js b/jstests/core/nestedarr1.js
index 98ddc2193ea..720c1c27821 100644
--- a/jstests/core/nestedarr1.js
+++ b/jstests/core/nestedarr1.js
@@ -3,13 +3,9 @@
function makeNestArr(depth) {
if (depth == 1) {
- return {
- a: [depth]
- };
+ return {a: [depth]};
} else {
- return {
- a: [makeNestArr(depth - 1)]
- };
+ return {a: [makeNestArr(depth - 1)]};
}
}
diff --git a/jstests/core/nestedobj1.js b/jstests/core/nestedobj1.js
index 97b9460da6f..379224c1775 100644
--- a/jstests/core/nestedobj1.js
+++ b/jstests/core/nestedobj1.js
@@ -1,14 +1,10 @@
// SERVER-5127, SERVER-5036
function makeNestObj(depth) {
- toret = {
- a: 1
- };
+ toret = {a: 1};
for (i = 1; i < depth; i++) {
- toret = {
- a: toret
- };
+ toret = {a: toret};
}
return toret;
diff --git a/jstests/core/nin.js b/jstests/core/nin.js
index d6cd78ee7a4..e7bb66bed93 100644
--- a/jstests/core/nin.js
+++ b/jstests/core/nin.js
@@ -3,13 +3,9 @@ t.drop();
function checkEqual(name, key, value) {
var o = {};
- o[key] = {
- $in: [value]
- };
+ o[key] = {$in: [value]};
var i = t.find(o).count();
- o[key] = {
- $nin: [value]
- };
+ o[key] = {$nin: [value]};
var n = t.find(o).count();
assert.eq(t.find().count(),
diff --git a/jstests/core/not3.js b/jstests/core/not3.js
index 9699f3838d1..9f3014f2c1a 100644
--- a/jstests/core/not3.js
+++ b/jstests/core/not3.js
@@ -9,15 +9,11 @@ t.save({_id: 0, arr: [1, 2, 3]});
t.save({_id: 1, arr: [10, 11]});
// Case 1: simple $ne over array field.
-var case1 = {
- arr: {$ne: 3}
-};
+var case1 = {arr: {$ne: 3}};
assert.eq(1, t.find(case1).itcount(), "Case 1: wrong number of results");
assert.eq(1, t.findOne(case1)._id, "Case 1: wrong _id");
// Case 2: simple $not over array field.
-var case2 = {
- arr: {$not: {$gt: 6}}
-};
+var case2 = {arr: {$not: {$gt: 6}}};
assert.eq(1, t.find(case2).itcount(), "Case 2: wrong number of results");
assert.eq(0, t.findOne(case2)._id, "Case 2: wrong _id");
diff --git a/jstests/core/or1.js b/jstests/core/or1.js
index 0552524eb4c..e7c417800b6 100644
--- a/jstests/core/or1.js
+++ b/jstests/core/or1.js
@@ -41,14 +41,15 @@ doTest = function() {
checkArrs([{_id: 0, a: 1}, {_id: 4, a: 1, b: 1}, {_id: 5, a: 1, b: 2}], a1);
a1b2 = t.find({$or: [{a: 1}, {b: 2}]}).toArray();
- checkArrs([
- {_id: 0, a: 1},
- {_id: 3, b: 2},
- {_id: 4, a: 1, b: 1},
- {_id: 5, a: 1, b: 2},
- {_id: 7, a: 2, b: 2}
- ],
- a1b2);
+ checkArrs(
+ [
+ {_id: 0, a: 1},
+ {_id: 3, b: 2},
+ {_id: 4, a: 1, b: 1},
+ {_id: 5, a: 1, b: 2},
+ {_id: 7, a: 2, b: 2}
+ ],
+ a1b2);
t.drop();
t.save({a: [0, 1], b: [0, 1]});
diff --git a/jstests/core/or_inexact.js b/jstests/core/or_inexact.js
index 8c9db1cc7ba..3e7e374d7f5 100644
--- a/jstests/core/or_inexact.js
+++ b/jstests/core/or_inexact.js
@@ -190,8 +190,8 @@ t.drop();
t.ensureIndex({"a.b": 1});
t.insert({_id: 0, a: [{b: 1}, {b: 2}]});
t.insert({_id: 1, a: [{b: 2}, {b: 4}]});
-cursor = t.find(
- {"a.b": 2, $or: [{a: {$elemMatch: {b: {$lte: 1}}}}, {a: {$elemMatch: {b: {$gte: 4}}}}]});
+cursor =
+ t.find({"a.b": 2, $or: [{a: {$elemMatch: {b: {$lte: 1}}}}, {a: {$elemMatch: {b: {$gte: 4}}}}]});
assert.eq(2, cursor.itcount(), "case 14");
// Case 15: $or below $elemMatch.
diff --git a/jstests/core/orc.js b/jstests/core/orc.js
index 7d686972898..c68c6859651 100644
--- a/jstests/core/orc.js
+++ b/jstests/core/orc.js
@@ -22,24 +22,22 @@ t.ensureIndex({a: -1, b: 1, c: 1});
// sanity test
t.save({a: null, b: 4, c: 4});
-assert.eq(1,
- t.count({
- $or: [
- {a: null, b: {$gte: 0, $lte: 5}, c: {$gte: 0, $lte: 5}},
- {a: null, b: {$gte: 3, $lte: 8}, c: {$gte: 3, $lte: 8}}
- ]
- }));
+assert.eq(1, t.count({
+ $or: [
+ {a: null, b: {$gte: 0, $lte: 5}, c: {$gte: 0, $lte: 5}},
+ {a: null, b: {$gte: 3, $lte: 8}, c: {$gte: 3, $lte: 8}}
+ ]
+}));
// from here on is SERVER-2245
t.remove({});
t.save({b: 4, c: 4});
-assert.eq(1,
- t.count({
- $or: [
- {a: null, b: {$gte: 0, $lte: 5}, c: {$gte: 0, $lte: 5}},
- {a: null, b: {$gte: 3, $lte: 8}, c: {$gte: 3, $lte: 8}}
- ]
- }));
+assert.eq(1, t.count({
+ $or: [
+ {a: null, b: {$gte: 0, $lte: 5}, c: {$gte: 0, $lte: 5}},
+ {a: null, b: {$gte: 3, $lte: 8}, c: {$gte: 3, $lte: 8}}
+ ]
+}));
// t.remove({});
// t.save( {a:[],b:4,c:4} );
diff --git a/jstests/core/ork.js b/jstests/core/ork.js
index f367b6b4bad..7c9bc982776 100644
--- a/jstests/core/ork.js
+++ b/jstests/core/ork.js
@@ -7,17 +7,15 @@ t.ensureIndex({a: 1});
t.save({a: [1, 2], b: 5});
t.save({a: [2, 4], b: 5});
-assert.eq(2,
- t.find({
- $or: [
- {a: 1, $and: [{$or: [{a: 2}, {a: 3}]}, {$or: [{b: 5}]}]},
- {a: 2, $or: [{a: 3}, {a: 4}]}
- ]
- }).itcount());
-assert.eq(1,
- t.find({
- $or: [
- {a: 1, $and: [{$or: [{a: 2}, {a: 3}]}, {$or: [{b: 6}]}]},
- {a: 2, $or: [{a: 3}, {a: 4}]}
- ]
- }).itcount());
+assert.eq(2, t.find({
+ $or: [
+ {a: 1, $and: [{$or: [{a: 2}, {a: 3}]}, {$or: [{b: 5}]}]},
+ {a: 2, $or: [{a: 3}, {a: 4}]}
+ ]
+ }).itcount());
+assert.eq(1, t.find({
+ $or: [
+ {a: 1, $and: [{$or: [{a: 2}, {a: 3}]}, {$or: [{b: 6}]}]},
+ {a: 2, $or: [{a: 3}, {a: 4}]}
+ ]
+ }).itcount());
diff --git a/jstests/core/plan_cache_list_plans.js b/jstests/core/plan_cache_list_plans.js
index b4be4ad46c4..7ca599483ff 100644
--- a/jstests/core/plan_cache_list_plans.js
+++ b/jstests/core/plan_cache_list_plans.js
@@ -5,11 +5,7 @@ t.drop();
// Utility function to list plans for a query.
function getPlans(query, sort, projection) {
- var key = {
- query: query,
- sort: sort,
- projection: projection
- };
+ var key = {query: query, sort: sort, projection: projection};
var res = t.runCommand('planCacheListPlans', key);
assert.commandWorked(res, 'planCacheListPlans(' + tojson(key, '', true) + ' failed');
assert(res.hasOwnProperty('plans'),
@@ -32,9 +28,8 @@ assert.eq(0,
'planCacheListPlans should return empty results on unknown query shape');
// Create a cache entry.
-assert.eq(1,
- t.find({a: 1, b: 1}, {_id: 0, a: 1}).sort({a: -1}).itcount(),
- 'unexpected document count');
+assert.eq(
+ 1, t.find({a: 1, b: 1}, {_id: 0, a: 1}).sort({a: -1}).itcount(), 'unexpected document count');
// Retrieve plans for valid cache entry.
var plans = getPlans({a: 1, b: 1}, {a: -1}, {_id: 0, a: 1});
diff --git a/jstests/core/plan_cache_list_shapes.js b/jstests/core/plan_cache_list_shapes.js
index 4711940870d..1c9ecdf9e1b 100644
--- a/jstests/core/plan_cache_list_shapes.js
+++ b/jstests/core/plan_cache_list_shapes.js
@@ -34,9 +34,8 @@ t.ensureIndex({a: 1});
t.ensureIndex({a: 1, b: 1});
// Run a query.
-assert.eq(1,
- t.find({a: 1, b: 1}, {_id: 1, a: 1}).sort({a: -1}).itcount(),
- 'unexpected document count');
+assert.eq(
+ 1, t.find({a: 1, b: 1}, {_id: 1, a: 1}).sort({a: -1}).itcount(), 'unexpected document count');
// We now expect the two indices to be compared and a cache entry to exist.
// Retrieve query shapes from the test collection
diff --git a/jstests/core/plan_cache_shell_helpers.js b/jstests/core/plan_cache_shell_helpers.js
index a61421afc7b..dc990b19dcc 100644
--- a/jstests/core/plan_cache_shell_helpers.js
+++ b/jstests/core/plan_cache_shell_helpers.js
@@ -16,11 +16,7 @@ function getShapes(collection) {
}
// Utility function to list plans for a query.
function getPlans(query, sort, projection) {
- var key = {
- query: query,
- sort: sort,
- projection: projection
- };
+ var key = {query: query, sort: sort, projection: projection};
var res = t.runCommand('planCacheListPlans', key);
assert.commandWorked(res, 'planCacheListPlans(' + tojson(key, '', true) + ' failed');
assert(res.hasOwnProperty('plans'),
@@ -37,17 +33,9 @@ t.ensureIndex({a: 1});
t.ensureIndex({b: 1});
// Populate plan cache.
-var queryB = {
- a: {$gte: 199},
- b: -1
-};
-var projectionB = {
- _id: 0,
- b: 1
-};
-var sortC = {
- c: -1
-};
+var queryB = {a: {$gte: 199}, b: -1};
+var projectionB = {_id: 0, b: 1};
+var sortC = {c: -1};
assert.eq(1, t.find(queryB, projectionB).sort(sortC).itcount(), 'unexpected document count');
assert.eq(1, t.find(queryB, projectionB).itcount(), 'unexpected document count');
assert.eq(1, t.find(queryB).sort(sortC).itcount(), 'unexpected document count');
@@ -124,11 +112,7 @@ assert.eq(getPlans(queryB, {}, {}),
// projection: <projection>,
// sort: <sort>
// }
-var shapeB = {
- query: queryB,
- projection: projectionB,
- sort: sortC
-};
+var shapeB = {query: queryB, projection: projectionB, sort: sortC};
assert.eq(getPlans(queryB, sortC, projectionB),
planCache.getPlansByQuery(shapeB),
'collection.getPlanCache().getPlansByQuery() did not accept query shape object');
@@ -141,12 +125,13 @@ assert.eq(0,
planCache.getPlansByQuery({query: queryB}).length,
'collection.getPlanCache.getPlansByQuery should return empty results on ' +
'incomplete query shape');
-assert.eq(0,
- planCache.getPlansByQuery(
- {query: queryB, sort: sortC, projection: projectionB, unknown_field: 1})
- .length,
- 'collection.getPlanCache.getPlansByQuery should return empty results on ' +
- 'invalid query shape');
+assert.eq(
+ 0,
+ planCache
+ .getPlansByQuery({query: queryB, sort: sortC, projection: projectionB, unknown_field: 1})
+ .length,
+ 'collection.getPlanCache.getPlansByQuery should return empty results on ' +
+ 'invalid query shape');
//
// collection.getPlanCache().clearPlansByQuery
diff --git a/jstests/core/pop_server_13516.js b/jstests/core/pop_server_13516.js
index 8d0bacbb3e5..008d0de79a1 100644
--- a/jstests/core/pop_server_13516.js
+++ b/jstests/core/pop_server_13516.js
@@ -4,10 +4,7 @@ var t = db.jstests_pop_server_13516;
t.drop();
var id = NumberInt(0);
-var object = {
- _id: id,
- data: []
-};
+var object = {_id: id, data: []};
for (var i = 0; i < 4096; i++) {
object.data[i] = 0;
diff --git a/jstests/core/profile1.js b/jstests/core/profile1.js
index 67f4a53d2b9..b1024f6668c 100644
--- a/jstests/core/profile1.js
+++ b/jstests/core/profile1.js
@@ -87,12 +87,8 @@
resetProfile(2);
db.profile1.drop();
- var q = {
- _id: 5
- };
- var u = {
- $inc: {x: 1}
- };
+ var q = {_id: 5};
+ var u = {$inc: {x: 1}};
db.profile1.update(q, u);
var r = profileCursor({ns: db.profile1.getFullName()}).sort({$natural: -1})[0];
assert.eq(q, r.query, "Y1: " + tojson(r));
diff --git a/jstests/core/profile_count.js b/jstests/core/profile_count.js
index 09ba6520828..d79fb94df85 100644
--- a/jstests/core/profile_count.js
+++ b/jstests/core/profile_count.js
@@ -44,9 +44,7 @@
assert.writeOK(coll.insert({a: i}));
}
- var query = {
- a: {$gte: 5}
- };
+ var query = {a: {$gte: 5}};
assert.eq(5, coll.count(query));
profileObj = getLatestProfilerEntry(testDB);
@@ -62,9 +60,7 @@
}
assert.commandWorked(coll.createIndex({a: 1}));
- query = {
- a: {$gte: 5}
- };
+ query = {a: {$gte: 5}};
assert.eq(5, coll.count(query));
profileObj = getLatestProfilerEntry(testDB);
diff --git a/jstests/core/profile_insert.js b/jstests/core/profile_insert.js
index 75a44fcb4e9..e49699a9d11 100644
--- a/jstests/core/profile_insert.js
+++ b/jstests/core/profile_insert.js
@@ -17,9 +17,7 @@
//
// Test single insert.
//
- var doc = {
- _id: 1
- };
+ var doc = {_id: 1};
var result = coll.insert(doc);
if (isWriteCommand) {
assert.writeOK(result);
@@ -76,9 +74,7 @@
// Test insert options.
//
coll.drop();
- doc = {
- _id: 1
- };
+ doc = {_id: 1};
var wtimeout = 60000;
assert.writeOK(coll.insert(doc, {writeConcern: {w: 1, wtimeout: wtimeout}, ordered: false}));
diff --git a/jstests/core/push_sort.js b/jstests/core/push_sort.js
index 0e407d969ba..b9676f6c963 100644
--- a/jstests/core/push_sort.js
+++ b/jstests/core/push_sort.js
@@ -53,10 +53,7 @@ assert.eq([{a: {b: 2}}, {a: {b: 3}}], t.findOne({_id: 7}).x);
//
// $push with $sort should not push a "$sort" field
-var doc8 = {
- _id: 8,
- x: [{a: 1}, {a: 2}]
-};
+var doc8 = {_id: 8, x: [{a: 1}, {a: 2}]};
t.save(doc8);
var res = t.update({_id: 8}, {$push: {x: {$sort: {a: -1}}}});
assert.writeError(res);
diff --git a/jstests/core/ref.js b/jstests/core/ref.js
index 02c4cb92a07..33d83738cd2 100644
--- a/jstests/core/ref.js
+++ b/jstests/core/ref.js
@@ -4,10 +4,7 @@
db.otherthings.drop();
db.things.drop();
-var other = {
- s: "other thing",
- n: 1
-};
+var other = {s: "other thing", n: 1};
db.otherthings.save(other);
db.things.save({name: "abc"});
diff --git a/jstests/core/ref3.js b/jstests/core/ref3.js
index 929e4152daf..4406863d899 100644
--- a/jstests/core/ref3.js
+++ b/jstests/core/ref3.js
@@ -4,10 +4,7 @@
db.otherthings3.drop();
db.things3.drop();
-var other = {
- s: "other thing",
- n: 1
-};
+var other = {s: "other thing", n: 1};
db.otherthings3.save(other);
db.things3.save({name: "abc"});
diff --git a/jstests/core/ref4.js b/jstests/core/ref4.js
index 07796d1e96a..882253f3883 100644
--- a/jstests/core/ref4.js
+++ b/jstests/core/ref4.js
@@ -5,10 +5,7 @@ b = db.ref4b;
a.drop();
b.drop();
-var other = {
- s: "other thing",
- n: 17
-};
+var other = {s: "other thing", n: 17};
b.save(other);
a.save({name: "abc", others: [new DBRef("ref4b", other._id), new DBPointer("ref4b", other._id)]});
diff --git a/jstests/core/regex2.js b/jstests/core/regex2.js
index 80dec55f184..fe933892834 100644
--- a/jstests/core/regex2.js
+++ b/jstests/core/regex2.js
@@ -29,9 +29,8 @@ assert.eq(1, t.find({a: a}).count(), "C B");
assert.eq(1, t.find({a: b}).count(), "C C");
assert.eq(1, t.find({a: new RegExp(a)}).count(), "C D");
assert.eq(1, t.find({a: new RegExp(b)}).count(), "C E");
-assert.eq(2,
- t.find({a: new RegExp(a, "i")}).count(),
- "C F is spidermonkey built with UTF-8 support?");
+assert.eq(
+ 2, t.find({a: new RegExp(a, "i")}).count(), "C F is spidermonkey built with UTF-8 support?");
// same tests as above but using {$regex: "a|b", $options: "imx"} syntax.
t.drop();
diff --git a/jstests/core/regex3.js b/jstests/core/regex3.js
index bc1623cecea..23b7f3c69cc 100644
--- a/jstests/core/regex3.js
+++ b/jstests/core/regex3.js
@@ -8,9 +8,8 @@ t.save({name: "bob"});
t.save({name: "aaron"});
assert.eq(2, t.find({name: /^e.*/}).itcount(), "no index count");
-assert.eq(4,
- t.find({name: /^e.*/}).explain(true).executionStats.totalDocsExamined,
- "no index explain");
+assert.eq(
+ 4, t.find({name: /^e.*/}).explain(true).executionStats.totalDocsExamined, "no index explain");
t.ensureIndex({name: 1});
assert.eq(2, t.find({name: /^e.*/}).itcount(), "index count");
assert.eq(2,
diff --git a/jstests/core/regex4.js b/jstests/core/regex4.js
index 112375e2e09..131f93bcc26 100644
--- a/jstests/core/regex4.js
+++ b/jstests/core/regex4.js
@@ -8,9 +8,8 @@ t.save({name: "bob"});
t.save({name: "aaron"});
assert.eq(2, t.find({name: /^e.*/}).count(), "no index count");
-assert.eq(4,
- t.find({name: /^e.*/}).explain(true).executionStats.totalDocsExamined,
- "no index explain");
+assert.eq(
+ 4, t.find({name: /^e.*/}).explain(true).executionStats.totalDocsExamined, "no index explain");
// assert.eq( 2 , t.find( { name : { $ne : /^e.*/ } } ).count() , "no index count ne" ); //
// SERVER-251
diff --git a/jstests/core/regex6.js b/jstests/core/regex6.js
index 7b9ed1910ed..b7e90664c4d 100644
--- a/jstests/core/regex6.js
+++ b/jstests/core/regex6.js
@@ -11,30 +11,24 @@ t.save({name: "[with]some?symbols"});
t.ensureIndex({name: 1});
assert.eq(0, t.find({name: /^\//}).count(), "index count");
-assert.eq(1,
- t.find({name: /^\//}).explain(true).executionStats.totalKeysExamined,
- "index explain 1");
-assert.eq(0,
- t.find({name: /^é/}).explain(true).executionStats.totalKeysExamined,
- "index explain 2");
-assert.eq(0,
- t.find({name: /^\é/}).explain(true).executionStats.totalKeysExamined,
- "index explain 3");
-assert.eq(1,
- t.find({name: /^\./}).explain(true).executionStats.totalKeysExamined,
- "index explain 4");
-assert.eq(5,
- t.find({name: /^./}).explain(true).executionStats.totalKeysExamined,
- "index explain 5");
+assert.eq(
+ 1, t.find({name: /^\//}).explain(true).executionStats.totalKeysExamined, "index explain 1");
+assert.eq(
+ 0, t.find({name: /^é/}).explain(true).executionStats.totalKeysExamined, "index explain 2");
+assert.eq(
+ 0, t.find({name: /^\é/}).explain(true).executionStats.totalKeysExamined, "index explain 3");
+assert.eq(
+ 1, t.find({name: /^\./}).explain(true).executionStats.totalKeysExamined, "index explain 4");
+assert.eq(
+ 5, t.find({name: /^./}).explain(true).executionStats.totalKeysExamined, "index explain 5");
// SERVER-2862
assert.eq(0, t.find({name: /^\Qblah\E/}).count(), "index explain 6");
assert.eq(1,
t.find({name: /^\Qblah\E/}).explain(true).executionStats.totalKeysExamined,
"index explain 6");
-assert.eq(1,
- t.find({name: /^blah/}).explain(true).executionStats.totalKeysExamined,
- "index explain 6");
+assert.eq(
+ 1, t.find({name: /^blah/}).explain(true).executionStats.totalKeysExamined, "index explain 6");
assert.eq(1, t.find({name: /^\Q[\Ewi\Qth]some?s\Eym/}).count(), "index count 2");
assert.eq(2,
t.find({name: /^\Q[\Ewi\Qth]some?s\Eym/}).explain(true).executionStats.totalKeysExamined,
@@ -43,13 +37,11 @@ assert.eq(2,
t.find({name: /^bob/}).explain(true).executionStats.totalKeysExamined,
"index explain 6"); // proof executionStats.totalKeysExamined == count+1
-assert.eq(1,
- t.find({name: {$regex: "^e", $gte: "emily"}})
- .explain(true)
- .executionStats.totalKeysExamined,
- "ie7");
-assert.eq(1,
- t.find({name: {$gt: "a", $regex: "^emily"}})
- .explain(true)
- .executionStats.totalKeysExamined,
- "ie7");
+assert.eq(
+ 1,
+ t.find({name: {$regex: "^e", $gte: "emily"}}).explain(true).executionStats.totalKeysExamined,
+ "ie7");
+assert.eq(
+ 1,
+ t.find({name: {$gt: "a", $regex: "^emily"}}).explain(true).executionStats.totalKeysExamined,
+ "ie7");
diff --git a/jstests/core/remove7.js b/jstests/core/remove7.js
index ef5500fa1fa..9c78b24eefa 100644
--- a/jstests/core/remove7.js
+++ b/jstests/core/remove7.js
@@ -22,9 +22,7 @@ t.ensureIndex({tags: 1});
for (i = 0; i < 200; i++) {
for (var j = 0; j < 10; j++)
t.save({tags: getTags(100)});
- var q = {
- tags: {$in: getTags(10)}
- };
+ var q = {tags: {$in: getTags(10)}};
var before = t.find(q).count();
var res = t.remove(q);
var after = t.find(q).count();
diff --git a/jstests/core/rename4.js b/jstests/core/rename4.js
index 904709175f9..185193deaa9 100644
--- a/jstests/core/rename4.js
+++ b/jstests/core/rename4.js
@@ -101,9 +101,8 @@ good({aa: 1, b: 2}, {$rename: {z: 'c'}, $set: {b: 5}}, {aa: 1, b: 5});
good({a: {z: 1, b: 1}}, {$rename: {'a.b': 'a.c'}}, {a: {c: 1, z: 1}});
good({a: {z: 1, tomato: 1}}, {$rename: {'a.tomato': 'a.potato'}}, {a: {potato: 1, z: 1}});
good({a: {z: 1, b: 1, c: 1}}, {$rename: {'a.b': 'a.c'}}, {a: {c: 1, z: 1}});
-good({a: {z: 1, tomato: 1, potato: 1}},
- {$rename: {'a.tomato': 'a.potato'}},
- {a: {potato: 1, z: 1}});
+good(
+ {a: {z: 1, tomato: 1, potato: 1}}, {$rename: {'a.tomato': 'a.potato'}}, {a: {potato: 1, z: 1}});
good({a: {z: 1, b: 1}}, {$rename: {'a.b': 'a.cc'}}, {a: {cc: 1, z: 1}});
good({a: {z: 1, b: 1, c: 1}}, {$rename: {'a.b': 'aa.c'}}, {a: {c: 1, z: 1}, aa: {c: 1}});
diff --git a/jstests/core/repair_database.js b/jstests/core/repair_database.js
index 45f936b022a..2005a7bf344 100644
--- a/jstests/core/repair_database.js
+++ b/jstests/core/repair_database.js
@@ -13,10 +13,7 @@ mydb.dropDatabase();
var myColl = mydb.a;
// 2
-var doc = {
- _id: 1,
- a: "hello world"
-};
+var doc = {_id: 1, a: "hello world"};
myColl.insert(doc);
myColl.ensureIndex({a: 1});
mydb.repairDatabase();
diff --git a/jstests/core/return_key.js b/jstests/core/return_key.js
index b39764846d4..3692521f58c 100644
--- a/jstests/core/return_key.js
+++ b/jstests/core/return_key.js
@@ -73,10 +73,9 @@ load("jstests/libs/analyze_plan.js");
.sort({b: 1})
.returnKey()
.toArray();
- assert.eq(results,
- [
- {a: 3, c: {'': 1}, d: {'': 1}},
- {a: 2, c: {'': 2}, d: {'': 2}},
- {a: 1, c: {'': 3}, d: {'': 3}}
- ]);
+ assert.eq(results, [
+ {a: 3, c: {'': 1}, d: {'': 1}},
+ {a: 2, c: {'': 2}, d: {'': 2}},
+ {a: 1, c: {'': 3}, d: {'': 3}}
+ ]);
})();
diff --git a/jstests/core/role_management_helpers.js b/jstests/core/role_management_helpers.js
index fa25d8a2d57..b47d87e5533 100644
--- a/jstests/core/role_management_helpers.js
+++ b/jstests/core/role_management_helpers.js
@@ -106,12 +106,10 @@ function assertHasPrivilege(privilegeArray, privilege) {
assertHasRole(roleObj.roles, "roleC", db.getName());
// Privileges on the same resource get collapsed
- db.grantPrivilegesToRole(
- "roleA",
- [
- {resource: {db: db.getName(), collection: ""}, actions: ['dropDatabase']},
- {resource: {db: db.getName(), collection: "foo"}, actions: ['insert']}
- ]);
+ db.grantPrivilegesToRole("roleA", [
+ {resource: {db: db.getName(), collection: ""}, actions: ['dropDatabase']},
+ {resource: {db: db.getName(), collection: "foo"}, actions: ['insert']}
+ ]);
roleObj = db.getRole("roleA", {showPrivileges: true});
assert.eq(0, roleObj.roles.length);
assert.eq(2, roleObj.privileges.length);
@@ -122,12 +120,10 @@ function assertHasPrivilege(privilegeArray, privilege) {
{resource: {db: db.getName(), collection: ""}, actions: ['dropDatabase']});
// Update role
- db.updateRole(
- "roleA",
- {
- roles: ['roleB'],
- privileges: [{resource: {db: db.getName(), collection: "foo"}, actions: ['find']}]
- });
+ db.updateRole("roleA", {
+ roles: ['roleB'],
+ privileges: [{resource: {db: db.getName(), collection: "foo"}, actions: ['find']}]
+ });
roleObj = db.getRole("roleA", {showPrivileges: true});
assert.eq(1, roleObj.roles.length);
assertHasRole(roleObj.roles, "roleB", db.getName());
diff --git a/jstests/core/set_param1.js b/jstests/core/set_param1.js
index 2df37442518..b0df18bccd3 100644
--- a/jstests/core/set_param1.js
+++ b/jstests/core/set_param1.js
@@ -28,14 +28,12 @@ assert.neq(undefined,
assert.commandFailed(db.adminCommand({"setParameter": 1, logComponentVerbosity: "not an object"}));
// Non-numeric verbosity for component should be rejected.
-assert.commandFailed(db.adminCommand({
- "setParameter": 1,
- logComponentVerbosity: {storage: {journal: {verbosity: "not a number"}}}
-}));
+assert.commandFailed(db.adminCommand(
+ {"setParameter": 1, logComponentVerbosity: {storage: {journal: {verbosity: "not a number"}}}}));
// Invalid component shall be rejected
-assert.commandFailed(db.adminCommand(
- {"setParameter": 1, logComponentVerbosity: {NoSuchComponent: {verbosity: 2}}}));
+assert.commandFailed(
+ db.adminCommand({"setParameter": 1, logComponentVerbosity: {NoSuchComponent: {verbosity: 2}}}));
// Set multiple component log levels at once.
(function() {
diff --git a/jstests/core/sort3.js b/jstests/core/sort3.js
index bfc1ee5134c..933f16da6cb 100644
--- a/jstests/core/sort3.js
+++ b/jstests/core/sort3.js
@@ -5,16 +5,13 @@ t.save({a: 1});
t.save({a: 5});
t.save({a: 3});
-assert.eq("1,5,3",
- t.find().toArray().map(function(z) {
- return z.a;
- }));
+assert.eq("1,5,3", t.find().toArray().map(function(z) {
+ return z.a;
+}));
-assert.eq("1,3,5",
- t.find().sort({a: 1}).toArray().map(function(z) {
- return z.a;
- }));
-assert.eq("5,3,1",
- t.find().sort({a: -1}).toArray().map(function(z) {
- return z.a;
- }));
+assert.eq("1,3,5", t.find().sort({a: 1}).toArray().map(function(z) {
+ return z.a;
+}));
+assert.eq("5,3,1", t.find().sort({a: -1}).toArray().map(function(z) {
+ return z.a;
+}));
diff --git a/jstests/core/sort5.js b/jstests/core/sort5.js
index 399c9fb4e28..05fe781ae32 100644
--- a/jstests/core/sort5.js
+++ b/jstests/core/sort5.js
@@ -9,37 +9,29 @@ t.save({_id: 9, x: 4, y: {a: 9, b: 3}});
// test compound sorting
assert.eq([4, 2, 3, 1],
- t.find()
- .sort({"y.b": 1, "y.a": -1})
- .map(function(z) {
- return z.x;
- }),
+ t.find().sort({"y.b": 1, "y.a": -1}).map(function(z) {
+ return z.x;
+ }),
"A no index");
t.ensureIndex({"y.b": 1, "y.a": -1});
assert.eq([4, 2, 3, 1],
- t.find()
- .sort({"y.b": 1, "y.a": -1})
- .map(function(z) {
- return z.x;
- }),
+ t.find().sort({"y.b": 1, "y.a": -1}).map(function(z) {
+ return z.x;
+ }),
"A index");
assert(t.validate().valid, "A valid");
// test sorting on compound key involving _id
assert.eq([4, 2, 3, 1],
- t.find()
- .sort({"y.b": 1, _id: -1})
- .map(function(z) {
- return z.x;
- }),
+ t.find().sort({"y.b": 1, _id: -1}).map(function(z) {
+ return z.x;
+ }),
"B no index");
t.ensureIndex({"y.b": 1, "_id": -1});
assert.eq([4, 2, 3, 1],
- t.find()
- .sort({"y.b": 1, _id: -1})
- .map(function(z) {
- return z.x;
- }),
+ t.find().sort({"y.b": 1, _id: -1}).map(function(z) {
+ return z.x;
+ }),
"B index");
assert(t.validate().valid, "B valid");
diff --git a/jstests/core/sortk.js b/jstests/core/sortk.js
index 7ecb86fa6df..76c381681eb 100644
--- a/jstests/core/sortk.js
+++ b/jstests/core/sortk.js
@@ -17,17 +17,10 @@ resetCollection();
t.ensureIndex({a: 1, b: 1});
function simpleQuery(extraFields, sort, hint) {
- query = {
- a: {$in: [1, 2]}
- };
+ query = {a: {$in: [1, 2]}};
Object.extend(query, extraFields);
- sort = sort || {
- b: 1
- };
- hint = hint || {
- a: 1,
- b: 1
- };
+ sort = sort || {b: 1};
+ hint = hint || {a: 1, b: 1};
return t.find(query).sort(sort).hint(hint);
}
@@ -133,9 +126,7 @@ assert.eq(0, andEqInQueryWithLimit(-2)[0].c);
assert.eq(1, andEqInQueryWithLimit(-2)[1].c);
function inQueryWithLimit(limit, sort) {
- sort = sort || {
- b: 1
- };
+ sort = sort || {b: 1};
return t.find({a: {$in: [0, 1]}}).sort(sort).hint({a: 1, b: 1, c: 1}).limit(limit);
}
diff --git a/jstests/core/splitvector.js b/jstests/core/splitvector.js
index 233911d29c6..9ba62e2df83 100644
--- a/jstests/core/splitvector.js
+++ b/jstests/core/splitvector.js
@@ -62,17 +62,17 @@ resetCollection();
// Case 1: missing parameters
assert.eq(false, db.runCommand({splitVector: "test.jstests_splitvector"}).ok, "1a");
-assert.eq(false,
- db.runCommand({splitVector: "test.jstests_splitvector", maxChunkSize: 1}).ok,
- "1b");
+assert.eq(
+ false, db.runCommand({splitVector: "test.jstests_splitvector", maxChunkSize: 1}).ok, "1b");
// -------------------------
// Case 2: missing index
-assert.eq(false,
- db.runCommand(
- {splitVector: "test.jstests_splitvector", keyPattern: {x: 1}, maxChunkSize: 1}).ok,
- "2");
+assert.eq(
+ false,
+ db.runCommand({splitVector: "test.jstests_splitvector", keyPattern: {x: 1}, maxChunkSize: 1})
+ .ok,
+ "2");
// -------------------------
// Case 3: empty collection
diff --git a/jstests/core/stages_delete.js b/jstests/core/stages_delete.js
index 1624b1fcc6a..f8e7380c75a 100644
--- a/jstests/core/stages_delete.js
+++ b/jstests/core/stages_delete.js
@@ -1,8 +1,6 @@
// Test basic delete stage functionality.
var coll = db.stages_delete;
-var collScanStage = {
- cscan: {args: {direction: 1}, filter: {deleteMe: true}}
-};
+var collScanStage = {cscan: {args: {direction: 1}, filter: {deleteMe: true}}};
var deleteStage;
// Test delete stage with isMulti: true.
diff --git a/jstests/core/stages_sort.js b/jstests/core/stages_sort.js
index b6cb5a456af..6d25279fa27 100644
--- a/jstests/core/stages_sort.js
+++ b/jstests/core/stages_sort.js
@@ -25,9 +25,7 @@ if (false) {
};
// Sort with foo ascending.
- sort1 = {
- sort: {args: {node: ixscan1, pattern: {foo: 1}}}
- };
+ sort1 = {sort: {args: {node: ixscan1, pattern: {foo: 1}}}};
res = db.runCommand({stageDebug: sort1});
assert.eq(res.ok, 1);
assert.eq(res.results.length, 21);
diff --git a/jstests/core/system_profile.js b/jstests/core/system_profile.js
index 73d303a3277..09baf334290 100644
--- a/jstests/core/system_profile.js
+++ b/jstests/core/system_profile.js
@@ -26,25 +26,24 @@ assert.writeError(testDB.system.profile.remove({}));
// Using findAndModify to write to "system.profile" should fail.
assert.commandWorked(testDB.dropDatabase());
assert.commandWorked(testDB.createCollection("system.profile"));
-assert.commandFailed(testDB.system.profile.runCommand("findAndModify",
- {query: {}, update: {a: 1}}));
-assert.commandFailed(testDB.system.profile.runCommand("findAndModify",
- {query: {}, update: {a: 1}, upsert: true}));
+assert.commandFailed(
+ testDB.system.profile.runCommand("findAndModify", {query: {}, update: {a: 1}}));
+assert.commandFailed(
+ testDB.system.profile.runCommand("findAndModify", {query: {}, update: {a: 1}, upsert: true}));
assert.commandFailed(testDB.system.profile.runCommand("findAndModify", {query: {}, remove: true}));
// Using mapReduce to write to "system.profile" should fail.
assert.commandWorked(testDB.dropDatabase());
assert.writeOK(testDB.foo.insert({val: 1}));
-assert.commandFailed(testDB.foo.runCommand("mapReduce",
- {
- map: function() {
- emit(0, this.val);
- },
- reduce: function(id, values) {
- return Array.sum(values);
- },
- out: "system.profile"
- }));
+assert.commandFailed(testDB.foo.runCommand("mapReduce", {
+ map: function() {
+ emit(0, this.val);
+ },
+ reduce: function(id, values) {
+ return Array.sum(values);
+ },
+ out: "system.profile"
+}));
// Using aggregate to write to "system.profile" should fail.
assert.commandWorked(testDB.dropDatabase());
diff --git a/jstests/core/update_find_and_modify_id.js b/jstests/core/update_find_and_modify_id.js
index a75a5595451..ca565cb0e8c 100644
--- a/jstests/core/update_find_and_modify_id.js
+++ b/jstests/core/update_find_and_modify_id.js
@@ -2,10 +2,7 @@
// an _id in the update document, as long as the _id will not be modified
var t = db.jstests_server4516;
-var startingDoc = {
- _id: 1,
- a: 1
-};
+var startingDoc = {_id: 1, a: 1};
function prepare() {
t.drop();
diff --git a/jstests/core/update_min_max_examples.js b/jstests/core/update_min_max_examples.js
index a8a86f22986..7bcb6197f93 100644
--- a/jstests/core/update_min_max_examples.js
+++ b/jstests/core/update_min_max_examples.js
@@ -46,10 +46,7 @@ assert.writeOK(res);
assert.eq(coll.findOne({_id: 6}).a, 1e-15);
// $max with positional operator
-var insertdoc = {
- _id: 7,
- y: [{a: 2}, {a: 6}, {a: [9, 1, 1]}]
-};
+var insertdoc = {_id: 7, y: [{a: 2}, {a: 6}, {a: [9, 1, 1]}]};
coll.insert(insertdoc);
res = coll.update({_id: 7, "y.a": 6}, {$max: {"y.$.a": 7}});
assert.writeOK(res);
diff --git a/jstests/core/update_server-12848.js b/jstests/core/update_server-12848.js
index c33e8dd9f62..0f86e0135b3 100644
--- a/jstests/core/update_server-12848.js
+++ b/jstests/core/update_server-12848.js
@@ -8,10 +8,7 @@ var res;
var t = db.update_server_12848;
t.drop();
-var orig = {
- "_id": 1,
- "a": [1, []]
-};
+var orig = {"_id": 1, "a": [1, []]};
res = t.insert(orig);
assert.writeOK(res, "insert");
assert.eq(orig, t.findOne());
@@ -19,8 +16,5 @@ assert.eq(orig, t.findOne());
res = t.update({"_id": 1}, {$addToSet: {"a.1": 1}});
assert.writeOK(res, "update");
-var updated = {
- "_id": 1,
- "a": [1, [1]]
-};
+var updated = {"_id": 1, "a": [1, [1]]};
assert.eq(updated, t.findOne());
diff --git a/jstests/core/upsert_fields.js b/jstests/core/upsert_fields.js
index ae385980b1b..910ae6d3e11 100644
--- a/jstests/core/upsert_fields.js
+++ b/jstests/core/upsert_fields.js
@@ -60,13 +60,9 @@ for (var i = 0; i < 3; i++) {
// $op style
if (i == 1)
- expr = {
- $set: {a: 1}
- };
+ expr = {$set: {a: 1}};
if (i == 2)
- expr = {
- $setOnInsert: {a: 1}
- };
+ expr = {$setOnInsert: {a: 1}};
var isReplStyle = i == 0;
@@ -135,13 +131,9 @@ for (var i = 0; i < 3; i++) {
// $op style
if (i == 1)
- expr = {
- $set: {a: 1}
- };
+ expr = {$set: {a: 1}};
if (i == 2)
- expr = {
- $setOnInsert: {a: 1}
- };
+ expr = {$setOnInsert: {a: 1}};
var isReplStyle = i == 0;
@@ -187,9 +179,7 @@ for (var i = 0; i < 3; i++) {
}
// nested field extraction
- var docValue = isReplStyle ? undefined : {
- x: 1
- };
+ var docValue = isReplStyle ? undefined : {x: 1};
assert.docEq(docValue, upsertedXVal({"x.x": 1}, expr));
assert.docEq(docValue, upsertedXVal({"x.x": {$eq: 1}}, expr));
assert.docEq(docValue, upsertedXVal({"x.x": {$all: [1]}}, expr));
diff --git a/jstests/core/validate_user_documents.js b/jstests/core/validate_user_documents.js
index 9c12e6075a7..676c47a301f 100644
--- a/jstests/core/validate_user_documents.js
+++ b/jstests/core/validate_user_documents.js
@@ -34,8 +34,7 @@ assert.commandWorked(mydb.runCommand({createUser: "spencer", pwd: "password", ro
assert.commandWorked(mydb.runCommand({
createUser: "andy",
pwd: "password",
- roles:
- [{role: "dbAdmin", db: "validate_user_documents", hasRole: true, canDelegate: false}]
+ roles: [{role: "dbAdmin", db: "validate_user_documents", hasRole: true, canDelegate: false}]
}));
// Non-existent role; insert should fail
diff --git a/jstests/core/where1.js b/jstests/core/where1.js
index 85466901016..53441580b90 100644
--- a/jstests/core/where1.js
+++ b/jstests/core/where1.js
@@ -8,8 +8,8 @@ t.save({a: 3});
assert.eq(1,
t.find(function() {
- return this.a == 2;
- }).length(),
+ return this.a == 2;
+ }).length(),
"A");
assert.eq(1, t.find({$where: "return this.a == 2"}).toArray().length, "B");
diff --git a/jstests/core/where3.js b/jstests/core/where3.js
index 633276489a5..e26b36ffcf4 100644
--- a/jstests/core/where3.js
+++ b/jstests/core/where3.js
@@ -7,8 +7,8 @@ t.save({returned_date: 6});
assert.eq(1,
t.find(function() {
- return this.returned_date == 5;
- }).count(),
+ return this.returned_date == 5;
+ }).count(),
"A");
assert.eq(1, t.find({$where: "return this.returned_date == 5;"}).count(), "B");
assert.eq(1, t.find({$where: "this.returned_date == 5;"}).count(), "C");
diff --git a/jstests/core/where4.js b/jstests/core/where4.js
index 612dba59e67..3db37ae6fe5 100644
--- a/jstests/core/where4.js
+++ b/jstests/core/where4.js
@@ -5,15 +5,14 @@ myDB.dropDatabase();
assert.writeOK(myDB.where4.insert({x: 1, y: 1}));
assert.writeOK(myDB.where4.insert({x: 2, y: 1}));
-assert.writeOK(myDB.where4.update(
- {
- $where: function() {
- return this.x == 1;
- }
- },
- {$inc: {y: 1}},
- false,
- true));
+assert.writeOK(myDB.where4.update({
+ $where: function() {
+ return this.x == 1;
+ }
+},
+ {$inc: {y: 1}},
+ false,
+ true));
assert.eq(2, myDB.where4.findOne({x: 1}).y);
assert.eq(1, myDB.where4.findOne({x: 2}).y);
diff --git a/jstests/decimal/decimal_find_query.js b/jstests/decimal/decimal_find_query.js
index 47e5c7167d0..7f3d8c10284 100644
--- a/jstests/decimal/decimal_find_query.js
+++ b/jstests/decimal/decimal_find_query.js
@@ -29,11 +29,10 @@
assert.eq(col.find({'decimal': {$gte: NumberDecimal('2.000')}}).count(), 3);
assert.eq(col.find({'decimal': {$lte: NumberDecimal('0.9999999999999999')}}).count(), 4);
- assert.eq(
- col.find({'decimal': {$nin: [NumberDecimal('Infinity'), NumberDecimal('-Infinity')]}})
- .count(),
- 9,
- 'Infinity count incorrect');
+ assert.eq(col.find({'decimal': {$nin: [NumberDecimal('Infinity'), NumberDecimal('-Infinity')]}})
+ .count(),
+ 9,
+ 'Infinity count incorrect');
// Test $mod
col.drop();
diff --git a/jstests/disk/datafile_options.js b/jstests/disk/datafile_options.js
index a7639c43d7d..d495e61cff2 100644
--- a/jstests/disk/datafile_options.js
+++ b/jstests/disk/datafile_options.js
@@ -3,9 +3,7 @@ var baseName = "jstests_disk_datafile_options";
load('jstests/libs/command_line/test_parsed_options.js');
jsTest.log("Testing \"noprealloc\" command line option");
-var expectedResult = {
- "parsed": {"storage": {"mmapv1": {"preallocDataFiles": false}}}
-};
+var expectedResult = {"parsed": {"storage": {"mmapv1": {"preallocDataFiles": false}}}};
testGetCmdLineOptsMongod({noprealloc: ""}, expectedResult);
jsTest.log("Testing \"storage.mmapv1.preallocDataFiles\" config file option");
diff --git a/jstests/disk/dbNoCreate.js b/jstests/disk/dbNoCreate.js
index f3498fcedb4..66c2cc74ad6 100644
--- a/jstests/disk/dbNoCreate.js
+++ b/jstests/disk/dbNoCreate.js
@@ -12,6 +12,5 @@ t.drop();
MongoRunner.stopMongod(m);
m = MongoRunner.runMongod({restart: true, cleanData: false, dbpath: m.dbpath});
-assert.eq(-1,
- m.getDBNames().indexOf(baseName),
- "found " + baseName + " in " + tojson(m.getDBNames()));
+assert.eq(
+ -1, m.getDBNames().indexOf(baseName), "found " + baseName + " in " + tojson(m.getDBNames()));
diff --git a/jstests/disk/index_options.js b/jstests/disk/index_options.js
index 68710de75a1..faa12e23fdd 100644
--- a/jstests/disk/index_options.js
+++ b/jstests/disk/index_options.js
@@ -3,9 +3,7 @@ var baseName = "jstests_disk_index_options";
load('jstests/libs/command_line/test_parsed_options.js');
jsTest.log("Testing \"noIndexBuildRetry\" command line option");
-var expectedResult = {
- "parsed": {"storage": {"indexBuildRetry": false}}
-};
+var expectedResult = {"parsed": {"storage": {"indexBuildRetry": false}}};
testGetCmdLineOptsMongod({noIndexBuildRetry: ""}, expectedResult);
jsTest.log("Testing \"storage.indexBuildRetry\" config file option");
diff --git a/jstests/dur/journaling_options.js b/jstests/dur/journaling_options.js
index d0600009a70..28402e9536c 100644
--- a/jstests/dur/journaling_options.js
+++ b/jstests/dur/journaling_options.js
@@ -5,27 +5,19 @@ function doTest() {
load('jstests/libs/command_line/test_parsed_options.js');
jsTest.log("Testing \"dur\" command line option");
- var expectedResult = {
- "parsed": {"storage": {"journal": {"enabled": true}}}
- };
+ var expectedResult = {"parsed": {"storage": {"journal": {"enabled": true}}}};
testGetCmdLineOptsMongod({dur: ""}, expectedResult);
jsTest.log("Testing \"nodur\" command line option");
- expectedResult = {
- "parsed": {"storage": {"journal": {"enabled": false}}}
- };
+ expectedResult = {"parsed": {"storage": {"journal": {"enabled": false}}}};
testGetCmdLineOptsMongod({nodur: ""}, expectedResult);
jsTest.log("Testing \"journal\" command line option");
- expectedResult = {
- "parsed": {"storage": {"journal": {"enabled": true}}}
- };
+ expectedResult = {"parsed": {"storage": {"journal": {"enabled": true}}}};
testGetCmdLineOptsMongod({journal: ""}, expectedResult);
jsTest.log("Testing \"nojournal\" command line option");
- expectedResult = {
- "parsed": {"storage": {"journal": {"enabled": false}}}
- };
+ expectedResult = {"parsed": {"storage": {"journal": {"enabled": false}}}};
testGetCmdLineOptsMongod({nojournal: ""}, expectedResult);
jsTest.log("Testing \"storage.journal.enabled\" config file option");
diff --git a/jstests/gle/gle_explicit_optime.js b/jstests/gle/gle_explicit_optime.js
index 476409c57b4..8a0bc0e4676 100644
--- a/jstests/gle/gle_explicit_optime.js
+++ b/jstests/gle/gle_explicit_optime.js
@@ -42,22 +42,13 @@ assert.eq(null, gleObj.err);
// Using an explicit optime on the new client should work if the optime is earlier than the
// secondary was locked
-var gleOpTimeBefore = {
- getLastError: true,
- w: 2,
- wOpTime: opTimeBeforeFailure
-};
+var gleOpTimeBefore = {getLastError: true, w: 2, wOpTime: opTimeBeforeFailure};
gleObj = newClientConn.getCollection(coll.toString()).getDB().runCommand(gleOpTimeBefore);
assert.eq(null, gleObj.err);
// Using an explicit optime on the new client should not work if the optime is later than the
// secondary was locked
-var gleOpTimeAfter = {
- getLastError: true,
- w: 2,
- wtimeout: 1000,
- wOpTime: opTimeAfterFailure
-};
+var gleOpTimeAfter = {getLastError: true, w: 2, wtimeout: 1000, wOpTime: opTimeAfterFailure};
gleObj = newClientConn.getCollection(coll.toString()).getDB().runCommand(gleOpTimeAfter);
assert.neq(null, gleObj.err);
assert(gleObj.wtimeout);
diff --git a/jstests/gle/opcounters_legacy.js b/jstests/gle/opcounters_legacy.js
index b243b8bc076..f4168305e2c 100644
--- a/jstests/gle/opcounters_legacy.js
+++ b/jstests/gle/opcounters_legacy.js
@@ -170,10 +170,7 @@ assert.eq(metricsObj.serverStatus.failed,
serverStatus = db.runCommand({serverStatus: 1});
opCounters = serverStatus.opcounters;
metricsObj = serverStatus.metrics.commands;
-var countVal = {
- "total": 0,
- "failed": 0
-};
+var countVal = {"total": 0, "failed": 0};
if (metricsObj.count != null) {
countVal = metricsObj.count;
}
diff --git a/jstests/httpinterface/network_options.js b/jstests/httpinterface/network_options.js
index 0302c9ac15b..ff5e453b4ec 100644
--- a/jstests/httpinterface/network_options.js
+++ b/jstests/httpinterface/network_options.js
@@ -6,9 +6,7 @@ load('jstests/libs/command_line/test_parsed_options.js');
// Object Check
jsTest.log("Testing \"objcheck\" command line option");
-var expectedResult = {
- "parsed": {"net": {"wireObjectCheck": true}}
-};
+var expectedResult = {"parsed": {"net": {"wireObjectCheck": true}}};
testGetCmdLineOptsMongod({objcheck: ""}, expectedResult);
jsTest.log("Testing \"noobjcheck\" command line option");
@@ -35,9 +33,7 @@ testGetCmdLineOptsMongod({}, expectedResult);
// HTTP Interface
jsTest.log("Testing \"httpinterface\" command line option");
-var expectedResult = {
- "parsed": {"net": {"http": {"enabled": true}}}
-};
+var expectedResult = {"parsed": {"net": {"http": {"enabled": true}}}};
testGetCmdLineOptsMongod({httpinterface: ""}, expectedResult);
jsTest.log("Testing \"nohttpinterface\" command line option");
@@ -77,9 +73,7 @@ testGetCmdLineOptsMongod({}, expectedResult);
// Unix Socket
if (!_isWindows()) {
jsTest.log("Testing \"nounixsocket\" command line option");
- expectedResult = {
- "parsed": {"net": {"unixDomainSocket": {"enabled": false}}}
- };
+ expectedResult = {"parsed": {"net": {"unixDomainSocket": {"enabled": false}}}};
testGetCmdLineOptsMongod({nounixsocket: ""}, expectedResult);
jsTest.log("Testing \"net.wireObjectCheck\" config file option");
@@ -93,9 +87,7 @@ if (!_isWindows()) {
expectedResult);
jsTest.log("Testing with no explicit network option setting");
- expectedResult = {
- "parsed": {"net": {}}
- };
+ expectedResult = {"parsed": {"net": {}}};
testGetCmdLineOptsMongod({}, expectedResult);
}
diff --git a/jstests/libs/chunk_manipulation_util.js b/jstests/libs/chunk_manipulation_util.js
index a334cbe8aec..dcf46fe4529 100644
--- a/jstests/libs/chunk_manipulation_util.js
+++ b/jstests/libs/chunk_manipulation_util.js
@@ -28,9 +28,7 @@ function moveChunkParallel(staticMongod, mongosURL, findCriteria, bounds, ns, to
assert((findCriteria || bounds) && !(findCriteria && bounds),
'Specify either findCriteria or bounds, but not both.');
- var mongos = new Mongo(mongosURL), admin = mongos.getDB('admin'), cmd = {
- moveChunk: ns
- };
+ var mongos = new Mongo(mongosURL), admin = mongos.getDB('admin'), cmd = {moveChunk: ns};
if (findCriteria) {
cmd.find = findCriteria;
diff --git a/jstests/libs/cleanup_orphaned_util.js b/jstests/libs/cleanup_orphaned_util.js
index cfd69ab128f..3990c148df4 100644
--- a/jstests/libs/cleanup_orphaned_util.js
+++ b/jstests/libs/cleanup_orphaned_util.js
@@ -94,12 +94,8 @@ function testCleanupOrphaned(options) {
assert.commandWorked(admin.runCommand({split: coll.getFullName(), middle: oneQuarter}));
- assert.commandWorked(admin.runCommand({
- moveChunk: coll.getFullName(),
- find: beginning,
- to: shards[1]._id,
- _waitForDelete: true
- }));
+ assert.commandWorked(admin.runCommand(
+ {moveChunk: coll.getFullName(), find: beginning, to: shards[1]._id, _waitForDelete: true}));
// 1/4 of the data is on the first shard.
// shard 0: [threeQuarters, middle)
diff --git a/jstests/libs/csrs_upgrade_util.js b/jstests/libs/csrs_upgrade_util.js
index 7dccccc3e7c..8d43507beee 100644
--- a/jstests/libs/csrs_upgrade_util.js
+++ b/jstests/libs/csrs_upgrade_util.js
@@ -127,12 +127,8 @@ var CSRSUpgradeCoordinator = function() {
*/
this.restartFirstConfigAsReplSet = function() {
jsTest.log("Restarting " + st.c0.name + " as a standalone replica set");
- csrsConfig = {
- _id: csrsName,
- version: 1,
- configsvr: true,
- members: [{_id: 0, host: st.c0.name}]
- };
+ csrsConfig =
+ {_id: csrsName, version: 1, configsvr: true, members: [{_id: 0, host: st.c0.name}]};
assert.commandWorked(st.c0.adminCommand({replSetInitiate: csrsConfig}));
csrs = [];
csrs0Opts = Object.extend({}, st.c0.fullOptions, /* deep */ true);
diff --git a/jstests/libs/election_timing_test.js b/jstests/libs/election_timing_test.js
index f40ad5f931b..16634530abf 100644
--- a/jstests/libs/election_timing_test.js
+++ b/jstests/libs/election_timing_test.js
@@ -48,10 +48,7 @@ var ElectionTimingTest = function(opts) {
ElectionTimingTest.prototype._runTimingTest = function() {
for (var run = 0; run < this.testRuns; run++) {
var collectionName = "test." + this.name;
- var cycleData = {
- testRun: run,
- results: []
- };
+ var cycleData = {testRun: run, results: []};
jsTestLog("Starting ReplSetTest for test " + this.name + " run: " + run);
this.rst =
diff --git a/jstests/libs/fts.js b/jstests/libs/fts.js
index eb5baec8a5a..e46b492564a 100644
--- a/jstests/libs/fts.js
+++ b/jstests/libs/fts.js
@@ -1,13 +1,9 @@
// Utility functions for FTS tests
//
function queryIDS(coll, search, filter, extra, limit) {
- var query = {
- "$text": {"$search": search}
- };
+ var query = {"$text": {"$search": search}};
if (extra)
- query = {
- "$text": Object.extend({"$search": search}, extra)
- };
+ query = {"$text": Object.extend({"$search": search}, extra)};
if (filter)
Object.extend(query, filter);
@@ -17,8 +13,8 @@ function queryIDS(coll, search, filter, extra, limit) {
.sort({score: {"$meta": "textScore"}})
.limit(limit);
else
- result = coll.find(query, {score: {"$meta": "textScore"}})
- .sort({score: {"$meta": "textScore"}});
+ result =
+ coll.find(query, {score: {"$meta": "textScore"}}).sort({score: {"$meta": "textScore"}});
return getIDS(result);
}
diff --git a/jstests/libs/geo_near_random.js b/jstests/libs/geo_near_random.js
index 2af13814173..91f0167ea31 100644
--- a/jstests/libs/geo_near_random.js
+++ b/jstests/libs/geo_near_random.js
@@ -65,12 +65,7 @@ GeoNearRandomTest.prototype.testPt = function(pt, opts) {
print("testing point: " + tojson(pt) + " opts: " + tojson(opts));
- var cmd = {
- geoNear: this.t.getName(),
- near: pt,
- num: 1,
- spherical: opts.sphere
- };
+ var cmd = {geoNear: this.t.getName(), near: pt, num: 1, spherical: opts.sphere};
var last = db.runCommand(cmd).results;
for (var i = 2; i <= opts.nToTest; i++) {
@@ -97,9 +92,7 @@ GeoNearRandomTest.prototype.testPt = function(pt, opts) {
return x.obj;
});
- var query = {
- loc: {}
- };
+ var query = {loc: {}};
query.loc[opts.sphere ? '$nearSphere' : '$near'] = pt;
var near = this.t.find(query).limit(opts.nToTest).toArray();
diff --git a/jstests/libs/override_methods/implicitly_shard_accessed_collections.js b/jstests/libs/override_methods/implicitly_shard_accessed_collections.js
index 313bd7faf7c..65e32bd8a53 100644
--- a/jstests/libs/override_methods/implicitly_shard_accessed_collections.js
+++ b/jstests/libs/override_methods/implicitly_shard_accessed_collections.js
@@ -14,7 +14,11 @@
var originalGetCollection = DB.prototype.getCollection;
// Blacklisted namespaces that should not be sharded.
- var blacklistedNamespaces = [/\$cmd/, /^admin\./, /\.system\./, ];
+ var blacklistedNamespaces = [
+ /\$cmd/,
+ /^admin\./,
+ /\.system\./,
+ ];
DB.prototype.getCollection = function() {
var dbName = this.getName();
diff --git a/jstests/libs/override_methods/set_majority_read_and_write_concerns.js b/jstests/libs/override_methods/set_majority_read_and_write_concerns.js
index 767134d43a4..232d97e6562 100644
--- a/jstests/libs/override_methods/set_majority_read_and_write_concerns.js
+++ b/jstests/libs/override_methods/set_majority_read_and_write_concerns.js
@@ -9,9 +9,7 @@
// Use a "signature" value that won't typically match a value assigned in normal use.
wtimeout: 60321
};
- var defaultReadConcern = {
- level: "majority"
- };
+ var defaultReadConcern = {level: "majority"};
var originalDBQuery = DBQuery;
@@ -82,11 +80,19 @@
// These commands do writes but do not support a writeConcern argument. Emulate it with a
// getLastError command.
- var commandsToEmulateWriteConcern = ["createIndexes", ];
+ var commandsToEmulateWriteConcern = [
+ "createIndexes",
+ ];
// These are reading commands that support majority readConcern.
- var commandsToForceReadConcern =
- ["count", "distinct", "find", "geoNear", "geoSearch", "group", ];
+ var commandsToForceReadConcern = [
+ "count",
+ "distinct",
+ "find",
+ "geoNear",
+ "geoSearch",
+ "group",
+ ];
var forceWriteConcern = Array.contains(commandsToForceWriteConcern, cmdName);
var emulateWriteConcern = Array.contains(commandsToEmulateWriteConcern, cmdName);
diff --git a/jstests/libs/override_methods/sharding_continuous_config_stepdown.js b/jstests/libs/override_methods/sharding_continuous_config_stepdown.js
index aea3e482961..d0d2814fc90 100644
--- a/jstests/libs/override_methods/sharding_continuous_config_stepdown.js
+++ b/jstests/libs/override_methods/sharding_continuous_config_stepdown.js
@@ -106,16 +106,10 @@ function retryOnNetworkError(func) {
}
print('*** Continuous stepdown thread completed successfully');
- return {
- ok: 1
- };
+ return {ok: 1};
} catch (e) {
print('*** Continuous stepdown thread caught exception: ' + tojson(e));
- return {
- ok: 0,
- error: e.toString(),
- stack: e.stack
- };
+ return {ok: 0, error: e.toString(), stack: e.stack};
}
}
@@ -209,12 +203,12 @@ function retryOnNetworkError(func) {
// Set electionTimeoutMillis to 5 seconds, from 10, so that chunk migrations don't
// time out because of the CSRS primary being down so often for so long.
- arguments[0].configReplSetTestOptions = Object.merge(arguments[0].configReplSetTestOptions,
- {
- settings: {
- electionTimeoutMillis: 5000,
- },
- });
+ arguments[0].configReplSetTestOptions =
+ Object.merge(arguments[0].configReplSetTestOptions, {
+ settings: {
+ electionTimeoutMillis: 5000,
+ },
+ });
// Construct the original object
originalShardingTest.apply(this, arguments);
diff --git a/jstests/libs/test_background_ops.js b/jstests/libs/test_background_ops.js
index 384e0bd5b64..db2361d67c8 100644
--- a/jstests/libs/test_background_ops.js
+++ b/jstests/libs/test_background_ops.js
@@ -43,9 +43,7 @@ var waitForLock = function(mongo, name) {
};
// Return an object we can invoke unlock on
- return {
- unlock: unlock
- };
+ return {unlock: unlock};
};
/**
diff --git a/jstests/libs/trace_missing_docs.js b/jstests/libs/trace_missing_docs.js
index 3bc9ef75333..d0052f55f6f 100644
--- a/jstests/libs/trace_missing_docs.js
+++ b/jstests/libs/trace_missing_docs.js
@@ -23,12 +23,8 @@ function traceMissingDoc(coll, doc, mongos) {
if (doc[k] == undefined) {
jsTest.log("Shard key " + tojson(shardKey) + " not found in doc " + tojson(doc) +
", falling back to _id search...");
- shardKeyPatt = {
- _id: 1
- };
- shardKey = {
- _id: doc['_id']
- };
+ shardKeyPatt = {_id: 1};
+ shardKey = {_id: doc['_id']};
break;
}
shardKey[k] = doc[k];
@@ -70,9 +66,7 @@ function traceMissingDoc(coll, doc, mongos) {
// Find ops
addToOps(oplog.find(addKeyQuery({op: 'i'}, 'o')));
- var updateQuery = {
- $or: [addKeyQuery({op: 'u'}, 'o2'), {op: 'u', 'o2._id': doc['_id']}]
- };
+ var updateQuery = {$or: [addKeyQuery({op: 'u'}, 'o2'), {op: 'u', 'o2._id': doc['_id']}]};
addToOps(oplog.find(updateQuery));
addToOps(oplog.find({op: 'd', 'o._id': doc['_id']}));
}
diff --git a/jstests/mmap_v1/capped2.js b/jstests/mmap_v1/capped2.js
index ae74a396f98..82a6dca3874 100644
--- a/jstests/mmap_v1/capped2.js
+++ b/jstests/mmap_v1/capped2.js
@@ -9,9 +9,7 @@ function debug(x) {
var val = new Array(2000);
var c = "";
for (i = 0; i < 2000; ++i, c += "---") { // bigger and bigger objects through the array...
- val[i] = {
- a: c
- };
+ val[i] = {a: c};
}
function checkIncreasing(i) {
diff --git a/jstests/mmap_v1/capped8.js b/jstests/mmap_v1/capped8.js
index 68b3deb0b2a..78b9d1b2017 100644
--- a/jstests/mmap_v1/capped8.js
+++ b/jstests/mmap_v1/capped8.js
@@ -10,10 +10,7 @@ function debug(x) {
/** Generate an object with a string field of specified length */
function obj(size, x) {
- return {
- X: x,
- a: new Array(size + 1).toString()
- };
+ return {X: x, a: new Array(size + 1).toString()};
}
function withinTwo(a, b) {
diff --git a/jstests/mmap_v1/collmod.js b/jstests/mmap_v1/collmod.js
index 53c83f7d927..0ac6e98df60 100644
--- a/jstests/mmap_v1/collmod.js
+++ b/jstests/mmap_v1/collmod.js
@@ -54,9 +54,8 @@ assert.eq(0, res.ok, "TTL mod shouldn't work with non-numeric expireAfterSeconds
var res =
db.runCommand({"collMod": coll, "index": {"keyPattern": {a: 1}, "expireAfterSeconds": 100}});
debug(res);
-assert.eq(1,
- db.system.indexes.count({key: {a: 1}, expireAfterSeconds: 100}),
- "TTL index not modified");
+assert.eq(
+ 1, db.system.indexes.count({key: {a: 1}, expireAfterSeconds: 100}), "TTL index not modified");
// try to modify a faulty TTL index with a non-numeric expireAfterSeconds field
t.dropIndex({a: 1});
diff --git a/jstests/mmap_v1/datasize.js b/jstests/mmap_v1/datasize.js
index 8c61b927748..d12527a8922 100644
--- a/jstests/mmap_v1/datasize.js
+++ b/jstests/mmap_v1/datasize.js
@@ -27,20 +27,16 @@ assert.eq(96,
db.runCommand({datasize: "test.jstests_datasize", min: {qq: 'a'}, max: {qq: 'z'}}).size);
assert.eq(48,
db.runCommand({datasize: "test.jstests_datasize", min: {qq: 'a'}, max: {qq: 'd'}}).size);
-assert.eq(48,
- db.runCommand({
- datasize: "test.jstests_datasize",
- min: {qq: 'a'},
- max: {qq: 'd'},
- keyPattern: {qq: 1}
- }).size);
-assert.eq(48,
- db.runCommand({
- datasize: "test.jstests_datasize",
- min: {qq: 'd'},
- max: {qq: 'z'},
- keyPattern: {qq: 1}
- }).size);
+assert.eq(
+ 48,
+ db.runCommand(
+ {datasize: "test.jstests_datasize", min: {qq: 'a'}, max: {qq: 'd'}, keyPattern: {qq: 1}})
+ .size);
+assert.eq(
+ 48,
+ db.runCommand(
+ {datasize: "test.jstests_datasize", min: {qq: 'd'}, max: {qq: 'z'}, keyPattern: {qq: 1}})
+ .size);
assert.eq(0,
db.runCommand({datasize: "test.jstests_datasize", min: {qq: 'c'}, max: {qq: 'c'}}).size);
@@ -50,5 +46,5 @@ assert.eq(48,
assert.eq(
0,
db.runCommand(
- {datasize: "test.jstests_datasize", min: {qq: 'a'}, max: {qq: 'd'}, keyPattern: {a: 1}})
+ {datasize: "test.jstests_datasize", min: {qq: 'a'}, max: {qq: 'd'}, keyPattern: {a: 1}})
.ok);
diff --git a/jstests/mmap_v1/datasize3.js b/jstests/mmap_v1/datasize3.js
index cefcdcf9949..da5d89384b2 100644
--- a/jstests/mmap_v1/datasize3.js
+++ b/jstests/mmap_v1/datasize3.js
@@ -3,9 +3,7 @@ t = db.datasize3;
t.drop();
function run(options) {
- var c = {
- dataSize: "test.datasize3"
- };
+ var c = {dataSize: "test.datasize3"};
if (options)
Object.extend(c, options);
return db.runCommand(c);
diff --git a/jstests/mmap_v1/update.js b/jstests/mmap_v1/update.js
index fd96337aacf..3e132ca666a 100644
--- a/jstests/mmap_v1/update.js
+++ b/jstests/mmap_v1/update.js
@@ -11,9 +11,7 @@ var iterations = _isWindows() ? 2500 : 5000;
// fill db
for (var i = 1; i <= iterations; i++) {
- var obj = {
- txt: txt
- };
+ var obj = {txt: txt};
asdf.save(obj);
var obj2 = {
@@ -36,7 +34,7 @@ var stats = db.runCommand({collstats: "asdf"});
// basic
// testing of the collstats command at the same time
assert(stats.count == iterations);
-assert(stats.size<140433012 * 5 && stats.size> 1000000);
+assert(stats.size < 140433012 * 5 && stats.size > 1000000);
assert(stats.numExtents < 20);
assert(stats.nindexes == 1);
diff --git a/jstests/mmap_v1/use_power_of_2.js b/jstests/mmap_v1/use_power_of_2.js
index a192a79653d..b3db7077e1d 100644
--- a/jstests/mmap_v1/use_power_of_2.js
+++ b/jstests/mmap_v1/use_power_of_2.js
@@ -5,14 +5,8 @@
*/
// prepare a doc of 14K
-var doc = {
- _id: new Object(),
- data: "a"
-};
-var bigDoc = {
- _id: new Object(),
- data: "a"
-};
+var doc = {_id: new Object(), data: "a"};
+var bigDoc = {_id: new Object(), data: "a"};
while (doc.data.length < 14 * 1024)
doc.data += "a";
diff --git a/jstests/multiVersion/geo_2dsphere_v2_to_v3.js b/jstests/multiVersion/geo_2dsphere_v2_to_v3.js
index 9fa3773e2a8..c00612a492f 100644
--- a/jstests/multiVersion/geo_2dsphere_v2_to_v3.js
+++ b/jstests/multiVersion/geo_2dsphere_v2_to_v3.js
@@ -3,9 +3,7 @@
function generatePoint() {
var longitude = Math.random() * 10 - 5;
var latitude = Math.random() * 10 - 5;
- var pt = {
- geometry: {type: "Point", coordinates: [longitude, latitude]}
- };
+ var pt = {geometry: {type: "Point", coordinates: [longitude, latitude]}};
return pt;
}
@@ -49,9 +47,7 @@ function get2dsphereIndexVersion(coll) {
return -1;
}
-var nearQuery = {
- geometry: {$near: {$geometry: {type: "Point", coordinates: [0, 0]}}}
-};
+var nearQuery = {geometry: {$near: {$geometry: {type: "Point", coordinates: [0, 0]}}}};
var mongod = MongoRunner.runMongod({binVersion: "3.0"});
var coll = getCollection(mongod);
diff --git a/jstests/multiVersion/initialsync.js b/jstests/multiVersion/initialsync.js
index bbc06c11490..e9a424fd05c 100644
--- a/jstests/multiVersion/initialsync.js
+++ b/jstests/multiVersion/initialsync.js
@@ -8,10 +8,7 @@ var newVersion = "latest";
var name = "multiversioninitsync";
var multitest = function(replSetVersion, newNodeVersion) {
- var nodes = {
- n1: {binVersion: replSetVersion},
- n2: {binVersion: replSetVersion}
- };
+ var nodes = {n1: {binVersion: replSetVersion}, n2: {binVersion: replSetVersion}};
print("Start up a two-node " + replSetVersion + " replica set.");
var rst = new ReplSetTest({name: name, nodes: nodes});
diff --git a/jstests/multiVersion/invalid_key_pattern_upgrade.js b/jstests/multiVersion/invalid_key_pattern_upgrade.js
index ce71333ef40..a3098e34c0a 100644
--- a/jstests/multiVersion/invalid_key_pattern_upgrade.js
+++ b/jstests/multiVersion/invalid_key_pattern_upgrade.js
@@ -8,7 +8,11 @@
(function() {
'use strict';
- var testCases = [{a: 0}, {a: NaN}, {a: true}, ];
+ var testCases = [
+ {a: 0},
+ {a: NaN},
+ {a: true},
+ ];
// The mongod should not start up when an index with an invalid key pattern exists.
testCases.forEach(function(indexKeyPattern) {
@@ -49,7 +53,10 @@
// replicates.
testCases.forEach(function(indexKeyPattern) {
var replSetName = 'invalid_key_pattern_replset';
- var nodes = [{binVersion: '3.2'}, {binVersion: 'latest'}, ];
+ var nodes = [
+ {binVersion: '3.2'},
+ {binVersion: 'latest'},
+ ];
var rst = new ReplSetTest({name: replSetName, nodes: nodes});
diff --git a/jstests/multiVersion/libs/data_generators.js b/jstests/multiVersion/libs/data_generators.js
index c2af0638a5f..bc48845fbb2 100644
--- a/jstests/multiVersion/libs/data_generators.js
+++ b/jstests/multiVersion/libs/data_generators.js
@@ -56,9 +56,7 @@ function DataGenerator() {
function GenObject(seed) {
var seed = seed || 0;
- return {
- "object": true
- };
+ return {"object": true};
}
// BSON Type: 4
function GenArray(seed) {
@@ -647,9 +645,7 @@ function CollectionMetadataGenerator(options) {
//"autoIndexId" : false // XXX: this doesn't exist in 2.4
};
// We need to explicitly enable usePowerOf2Sizes, since it's the default in 2.6 but not in 2.4
- var normalCollectionMetadata = {
- "usePowerOf2Sizes": true
- };
+ var normalCollectionMetadata = {"usePowerOf2Sizes": true};
return {
"get": function() {
diff --git a/jstests/multiVersion/libs/dumprestore_helpers.js b/jstests/multiVersion/libs/dumprestore_helpers.js
index 6ca7e3bd37f..bb552f855df 100644
--- a/jstests/multiVersion/libs/dumprestore_helpers.js
+++ b/jstests/multiVersion/libs/dumprestore_helpers.js
@@ -83,22 +83,20 @@ function multiVersionDumpRestoreTest(configObj) {
// Dump using the specified version of mongodump from the running mongod or mongos instance.
if (configObj.dumpType === "mongod") {
- MongoRunner.runMongoTool("mongodump",
- {
- out: configObj.dumpDir,
- binVersion: configObj.mongoDumpVersion,
- host: serverSource.host,
- db: testBaseName
- });
+ MongoRunner.runMongoTool("mongodump", {
+ out: configObj.dumpDir,
+ binVersion: configObj.mongoDumpVersion,
+ host: serverSource.host,
+ db: testBaseName
+ });
MongoRunner.stopMongod(serverSource.port);
} else { /* "mongos" */
- MongoRunner.runMongoTool("mongodump",
- {
- out: configObj.dumpDir,
- binVersion: configObj.mongoDumpVersion,
- host: serverSource.host,
- db: testBaseName
- });
+ MongoRunner.runMongoTool("mongodump", {
+ out: configObj.dumpDir,
+ binVersion: configObj.mongoDumpVersion,
+ host: serverSource.host,
+ db: testBaseName
+ });
shardingTest.stop();
}
@@ -106,13 +104,12 @@ function multiVersionDumpRestoreTest(configObj) {
if (configObj.restoreType === "mongod") {
var serverDest = MongoRunner.runMongod({binVersion: configObj.serverDestVersion});
- MongoRunner.runMongoTool("mongorestore",
- {
- dir: configObj.dumpDir + "/" + testBaseName,
- binVersion: configObj.mongoRestoreVersion,
- host: serverDest.host,
- db: testBaseName
- });
+ MongoRunner.runMongoTool("mongorestore", {
+ dir: configObj.dumpDir + "/" + testBaseName,
+ binVersion: configObj.mongoRestoreVersion,
+ host: serverDest.host,
+ db: testBaseName
+ });
} else { /* "mongos" */
var shardingTestConfig = {
name: testBaseName + "_sharded_dest",
@@ -124,13 +121,12 @@ function multiVersionDumpRestoreTest(configObj) {
};
var shardingTest = new ShardingTest(shardingTestConfig);
serverDest = shardingTest.s;
- MongoRunner.runMongoTool("mongorestore",
- {
- dir: configObj.dumpDir + "/" + testBaseName,
- binVersion: configObj.mongoRestoreVersion,
- host: serverDest.host,
- db: testBaseName
- });
+ MongoRunner.runMongoTool("mongorestore", {
+ dir: configObj.dumpDir + "/" + testBaseName,
+ binVersion: configObj.mongoRestoreVersion,
+ host: serverDest.host,
+ db: testBaseName
+ });
}
var destDB = serverDest.getDB(testBaseName);
diff --git a/jstests/multiVersion/minor_version_tags_new_old_new.js b/jstests/multiVersion/minor_version_tags_new_old_new.js
index f39b3da4c68..c0fd26343cd 100644
--- a/jstests/multiVersion/minor_version_tags_new_old_new.js
+++ b/jstests/multiVersion/minor_version_tags_new_old_new.js
@@ -109,9 +109,7 @@
replTest.waitForState(replTest.nodes[nodeId], ReplSetTest.State.PRIMARY, 60 * 1000);
primary = replTest.getPrimary();
primary.forceWriteMode('commands');
- var writeConcern = {
- writeConcern: {w: expectedWritableNodes, wtimeout: 30 * 1000}
- };
+ var writeConcern = {writeConcern: {w: expectedWritableNodes, wtimeout: 30 * 1000}};
assert.writeOK(primary.getDB('foo').bar.insert({x: 100}, writeConcern));
return primary;
};
@@ -136,9 +134,7 @@
jsTestLog('partitions: nodes with each set of brackets [N1, N2, N3] form a complete network.');
jsTestLog('partitions: [0-1-2] [3] [4] (only nodes 0 and 1 can replicate from primary node 2');
- var doc = {
- x: 1
- };
+ var doc = {x: 1};
// This timeout should be shorter in duration than the server parameter maxSyncSourceLagSecs.
// Some writes are expected to block for this 'timeout' duration before failing.
@@ -151,9 +147,7 @@
primary = ensurePrimary(2, 3);
jsTestLog('Non-existent write concern should be rejected.');
- options = {
- writeConcern: {w: 'blahblah', wtimeout: timeout}
- };
+ options = {writeConcern: {w: 'blahblah', wtimeout: timeout}};
assert.writeOK(primary.getDB('foo').bar.insert(doc));
var result = assert.writeError(primary.getDB('foo').bar.insert(doc, options));
assert.neq(null, result.getWriteConcernError());
@@ -162,9 +156,7 @@
tojson(result.getWriteConcernError()));
jsTestLog('Write concern "3 or 4" should fail - 3 and 4 are not connected to the primary.');
- var options = {
- writeConcern: {w: '3 or 4', wtimeout: timeout}
- };
+ var options = {writeConcern: {w: '3 or 4', wtimeout: timeout}};
assert.writeOK(primary.getDB('foo').bar.insert(doc));
result = primary.getDB('foo').bar.insert(doc, options);
assert.neq(null, result.getWriteConcernError());
@@ -177,16 +169,12 @@
jsTestLog('Write concern "3 or 4" should work - 4 is now connected to the primary ' +
primary.host + ' via node 1 ' + replTest.nodes[1].host);
- options = {
- writeConcern: {w: '3 or 4', wtimeout: timeout}
- };
+ options = {writeConcern: {w: '3 or 4', wtimeout: timeout}};
assert.writeOK(primary.getDB('foo').bar.insert(doc));
assert.writeOK(primary.getDB('foo').bar.insert(doc, options));
jsTestLog('Write concern "3 and 4" should fail - 3 is not connected to the primary.');
- options = {
- writeConcern: {w: '3 and 4', wtimeout: timeout}
- };
+ options = {writeConcern: {w: '3 and 4', wtimeout: timeout}};
assert.writeOK(primary.getDB('foo').bar.insert(doc));
result = assert.writeError(primary.getDB('foo').bar.insert(doc, options));
assert.neq(null, result.getWriteConcernError());
@@ -201,31 +189,23 @@
jsTestLog('31003 should sync from 31004 (31024)');
jsTestLog('Write concern "3 and 4" should work - ' +
'nodes 3 and 4 are connected to primary via node 1.');
- options = {
- writeConcern: {w: '3 and 4', wtimeout: timeout}
- };
+ options = {writeConcern: {w: '3 and 4', wtimeout: timeout}};
assert.writeOK(primary.getDB('foo').bar.insert(doc));
assert.writeOK(primary.getDB('foo').bar.insert(doc, options));
jsTestLog('Write concern "2" - writes to primary only.');
- options = {
- writeConcern: {w: '2', wtimeout: 0}
- };
+ options = {writeConcern: {w: '2', wtimeout: 0}};
assert.writeOK(primary.getDB('foo').bar.insert(doc));
assert.writeOK(primary.getDB('foo').bar.insert(doc, options));
jsTestLog('Write concern "1 and 2"');
- options = {
- writeConcern: {w: '1 and 2', wtimeout: 0}
- };
+ options = {writeConcern: {w: '1 and 2', wtimeout: 0}};
assert.writeOK(primary.getDB('foo').bar.insert(doc));
assert.writeOK(primary.getDB('foo').bar.insert(doc, options));
jsTestLog('Write concern "2 dc and 3 server"');
primary = ensurePrimary(2, 5);
- options = {
- writeConcern: {w: '2 dc and 3 server', wtimeout: timeout}
- };
+ options = {writeConcern: {w: '2 dc and 3 server', wtimeout: timeout}};
assert.writeOK(primary.getDB('foo').bar.insert(doc));
assert.writeOK(primary.getDB('foo').bar.insert(doc, options));
@@ -250,17 +230,13 @@
primary = ensurePrimary(1, 4);
jsTestLog('Write concern "3 and 4" should still work with new primary node 1 ' + primary.host);
- options = {
- writeConcern: {w: '3 and 4', wtimeout: timeout}
- };
+ options = {writeConcern: {w: '3 and 4', wtimeout: timeout}};
assert.writeOK(primary.getDB('foo').bar.insert(doc));
assert.writeOK(primary.getDB('foo').bar.insert(doc, options));
jsTestLog('Write concern "2" should fail because node 2 ' + replTest.nodes[2].host +
' is down.');
- options = {
- writeConcern: {w: '2', wtimeout: timeout}
- };
+ options = {writeConcern: {w: '2', wtimeout: timeout}};
assert.writeOK(primary.getDB('foo').bar.insert(doc));
result = assert.writeError(primary.getDB('foo').bar.insert(doc, options));
assert.neq(null, result.getWriteConcernError());
diff --git a/jstests/multiVersion/minor_version_tags_old_new_old.js b/jstests/multiVersion/minor_version_tags_old_new_old.js
index 056aab2972a..1c3097da061 100644
--- a/jstests/multiVersion/minor_version_tags_old_new_old.js
+++ b/jstests/multiVersion/minor_version_tags_old_new_old.js
@@ -109,9 +109,7 @@
replTest.waitForState(replTest.nodes[nodeId], ReplSetTest.State.PRIMARY, 60 * 1000);
primary = replTest.getPrimary();
primary.forceWriteMode('commands');
- var writeConcern = {
- writeConcern: {w: expectedWritableNodes, wtimeout: 30 * 1000}
- };
+ var writeConcern = {writeConcern: {w: expectedWritableNodes, wtimeout: 30 * 1000}};
assert.writeOK(primary.getDB('foo').bar.insert({x: 100}, writeConcern));
return primary;
};
@@ -136,9 +134,7 @@
jsTestLog('partitions: nodes with each set of brackets [N1, N2, N3] form a complete network.');
jsTestLog('partitions: [0-1-2] [3] [4] (only nodes 0 and 1 can replicate from primary node 2');
- var doc = {
- x: 1
- };
+ var doc = {x: 1};
// This timeout should be shorter in duration than the server parameter maxSyncSourceLagSecs.
// Some writes are expected to block for this 'timeout' duration before failing.
@@ -151,9 +147,7 @@
primary = ensurePrimary(2, 3);
jsTestLog('Non-existent write concern should be rejected.');
- options = {
- writeConcern: {w: 'blahblah', wtimeout: timeout}
- };
+ options = {writeConcern: {w: 'blahblah', wtimeout: timeout}};
assert.writeOK(primary.getDB('foo').bar.insert(doc));
var result = assert.writeError(primary.getDB('foo').bar.insert(doc, options));
assert.neq(null, result.getWriteConcernError());
@@ -162,9 +156,7 @@
tojson(result.getWriteConcernError()));
jsTestLog('Write concern "3 or 4" should fail - 3 and 4 are not connected to the primary.');
- var options = {
- writeConcern: {w: '3 or 4', wtimeout: timeout}
- };
+ var options = {writeConcern: {w: '3 or 4', wtimeout: timeout}};
assert.writeOK(primary.getDB('foo').bar.insert(doc));
result = primary.getDB('foo').bar.insert(doc, options);
assert.neq(null, result.getWriteConcernError());
@@ -177,16 +169,12 @@
jsTestLog('Write concern "3 or 4" should work - 4 is now connected to the primary ' +
primary.host + ' via node 1 ' + replTest.nodes[1].host);
- options = {
- writeConcern: {w: '3 or 4', wtimeout: timeout}
- };
+ options = {writeConcern: {w: '3 or 4', wtimeout: timeout}};
assert.writeOK(primary.getDB('foo').bar.insert(doc));
assert.writeOK(primary.getDB('foo').bar.insert(doc, options));
jsTestLog('Write concern "3 and 4" should fail - 3 is not connected to the primary.');
- options = {
- writeConcern: {w: '3 and 4', wtimeout: timeout}
- };
+ options = {writeConcern: {w: '3 and 4', wtimeout: timeout}};
assert.writeOK(primary.getDB('foo').bar.insert(doc));
result = assert.writeError(primary.getDB('foo').bar.insert(doc, options));
assert.neq(null, result.getWriteConcernError());
@@ -201,31 +189,23 @@
jsTestLog('31003 should sync from 31004 (31024)');
jsTestLog('Write concern "3 and 4" should work - ' +
'nodes 3 and 4 are connected to primary via node 1.');
- options = {
- writeConcern: {w: '3 and 4', wtimeout: timeout}
- };
+ options = {writeConcern: {w: '3 and 4', wtimeout: timeout}};
assert.writeOK(primary.getDB('foo').bar.insert(doc));
assert.writeOK(primary.getDB('foo').bar.insert(doc, options));
jsTestLog('Write concern "2" - writes to primary only.');
- options = {
- writeConcern: {w: '2', wtimeout: 0}
- };
+ options = {writeConcern: {w: '2', wtimeout: 0}};
assert.writeOK(primary.getDB('foo').bar.insert(doc));
assert.writeOK(primary.getDB('foo').bar.insert(doc, options));
jsTestLog('Write concern "1 and 2"');
- options = {
- writeConcern: {w: '1 and 2', wtimeout: 0}
- };
+ options = {writeConcern: {w: '1 and 2', wtimeout: 0}};
assert.writeOK(primary.getDB('foo').bar.insert(doc));
assert.writeOK(primary.getDB('foo').bar.insert(doc, options));
jsTestLog('Write concern "2 dc and 3 server"');
primary = ensurePrimary(2, 5);
- options = {
- writeConcern: {w: '2 dc and 3 server', wtimeout: timeout}
- };
+ options = {writeConcern: {w: '2 dc and 3 server', wtimeout: timeout}};
assert.writeOK(primary.getDB('foo').bar.insert(doc));
assert.writeOK(primary.getDB('foo').bar.insert(doc, options));
@@ -250,17 +230,13 @@
primary = ensurePrimary(1, 4);
jsTestLog('Write concern "3 and 4" should still work with new primary node 1 ' + primary.host);
- options = {
- writeConcern: {w: '3 and 4', wtimeout: timeout}
- };
+ options = {writeConcern: {w: '3 and 4', wtimeout: timeout}};
assert.writeOK(primary.getDB('foo').bar.insert(doc));
assert.writeOK(primary.getDB('foo').bar.insert(doc, options));
jsTestLog('Write concern "2" should fail because node 2 ' + replTest.nodes[2].host +
' is down.');
- options = {
- writeConcern: {w: '2', wtimeout: timeout}
- };
+ options = {writeConcern: {w: '2', wtimeout: timeout}};
assert.writeOK(primary.getDB('foo').bar.insert(doc));
result = assert.writeError(primary.getDB('foo').bar.insert(doc, options));
assert.neq(null, result.getWriteConcernError());
diff --git a/jstests/multiVersion/mixed_storage_version_replication.js b/jstests/multiVersion/mixed_storage_version_replication.js
index 58c75803b4e..0973fbd85ff 100644
--- a/jstests/multiVersion/mixed_storage_version_replication.js
+++ b/jstests/multiVersion/mixed_storage_version_replication.js
@@ -211,9 +211,7 @@ var RandomOps = {
}
var field = this.randomChoice(this.fieldNames);
- var updateDoc = {
- $set: {}
- };
+ var updateDoc = {$set: {}};
updateDoc.$set[field] = this.randomChoice(this.fieldValues);
if (this.verbose) {
print("Updating:");
@@ -432,9 +430,8 @@ var RandomOps = {
if (this.verbose) {
print("Converting " + coll.getFullName() + " to a capped collection.");
}
- assert.commandWorked(
- conn.getDB(coll.getDB())
- .runCommand({convertToCapped: coll.getName(), size: 1024 * 1024}));
+ assert.commandWorked(conn.getDB(coll.getDB())
+ .runCommand({convertToCapped: coll.getName(), size: 1024 * 1024}));
if (this.verbose) {
print("done.");
}
@@ -671,18 +668,14 @@ function doMultiThreadedWork(primary, numThreads) {
nodes["n" + node] = setups[i];
node++;
}
- nodes["n" + 2 * setups.length] = {
- arbiter: true
- };
+ nodes["n" + 2 * setups.length] = {arbiter: true};
var replTest = new ReplSetTest({nodes: nodes, name: name});
var conns = replTest.startSet();
var config = replTest.getReplSetConfig();
// Make sure everyone is syncing from the primary, to ensure we have all combinations of
// primary/secondary syncing.
- config.settings = {
- chainingAllowed: false
- };
+ config.settings = {chainingAllowed: false};
config.protocolVersion = 0;
replTest.initiate(config);
// Ensure all are synced.
diff --git a/jstests/multiVersion/partial_index_upgrade.js b/jstests/multiVersion/partial_index_upgrade.js
index 474252b4dc8..b19631ae3b3 100644
--- a/jstests/multiVersion/partial_index_upgrade.js
+++ b/jstests/multiVersion/partial_index_upgrade.js
@@ -58,7 +58,10 @@
// The secondary should terminate when the command to build an invalid partial index replicates.
testCases.forEach(function(indexOptions) {
var replSetName = 'partial_index_replset';
- var nodes = [{binVersion: '3.0'}, {binVersion: 'latest'}, ];
+ var nodes = [
+ {binVersion: '3.0'},
+ {binVersion: 'latest'},
+ ];
var rst = new ReplSetTest({name: replSetName, nodes: nodes});
diff --git a/jstests/multiVersion/transitioning_to_and_from_WT.js b/jstests/multiVersion/transitioning_to_and_from_WT.js
index 97ac9b7af74..f51e21a2ef9 100644
--- a/jstests/multiVersion/transitioning_to_and_from_WT.js
+++ b/jstests/multiVersion/transitioning_to_and_from_WT.js
@@ -7,11 +7,10 @@
jsTestLog("Setting up initial data set with the last stable version of mongod");
- var toolTest = new ToolTest('transitioning_to_and_from_WT',
- {
- binVersion: MongoRunner.getBinVersionFor("last-stable"),
- storageEngine: "mmapv1",
- });
+ var toolTest = new ToolTest('transitioning_to_and_from_WT', {
+ binVersion: MongoRunner.getBinVersionFor("last-stable"),
+ storageEngine: "mmapv1",
+ });
toolTest.dbpath = toolTest.root + "/original/";
resetDbpath(toolTest.dbpath);
diff --git a/jstests/multiVersion/upgrade_cluster.js b/jstests/multiVersion/upgrade_cluster.js
index a7703d8c30d..033df67611e 100644
--- a/jstests/multiVersion/upgrade_cluster.js
+++ b/jstests/multiVersion/upgrade_cluster.js
@@ -32,8 +32,8 @@ load('./jstests/multiVersion/libs/multi_cluster.js');
var unshardedDB = mongos.getDB('unshareded');
assert.commandWorked(unshardedDB.runCommand({insert: 'foo', documents: [{x: 1}]}));
- assert.commandWorked(unshardedDB.runCommand(
- {update: 'foo', updates: [{q: {x: 1}, u: {$set: {y: 1}}}]}));
+ assert.commandWorked(
+ unshardedDB.runCommand({update: 'foo', updates: [{q: {x: 1}, u: {$set: {y: 1}}}]}));
var doc = unshardedDB.foo.findOne({x: 1});
assert.eq(1, doc.y);
assert.commandWorked(
diff --git a/jstests/multiVersion/wt_index_option_defaults_replset.js b/jstests/multiVersion/wt_index_option_defaults_replset.js
index 9156d0b06ae..af17bd182b8 100644
--- a/jstests/multiVersion/wt_index_option_defaults_replset.js
+++ b/jstests/multiVersion/wt_index_option_defaults_replset.js
@@ -32,9 +32,7 @@
var secondary30 = conns[1].getDB('test');
// Create a collection with "indexOptionDefaults" specified.
- var indexOptions = {
- storageEngine: {wiredTiger: {configString: 'prefix_compression=false'}}
- };
+ var indexOptions = {storageEngine: {wiredTiger: {configString: 'prefix_compression=false'}}};
assert.commandWorked(primary32.runCommand({create: 'coll', indexOptionDefaults: indexOptions}));
// Verify that the "indexOptionDefaults" field is present in the corresponding oplog entry.
@@ -90,9 +88,7 @@
var secondary32 = conns[1].getDB('test');
// Create a collection with "indexOptionDefaults" specified.
- var indexOptions = {
- storageEngine: {wiredTiger: {configString: 'prefix_compression=false'}}
- };
+ var indexOptions = {storageEngine: {wiredTiger: {configString: 'prefix_compression=false'}}};
assert.commandWorked(primary30.runCommand({create: 'coll', indexOptionDefaults: indexOptions}));
// Verify that the "indexOptionDefaults" field is present in the corresponding oplog entry.
diff --git a/jstests/noPassthrough/backup_restore.js b/jstests/noPassthrough/backup_restore.js
index 2a896dcb2c7..3e15e2b114b 100644
--- a/jstests/noPassthrough/backup_restore.js
+++ b/jstests/noPassthrough/backup_restore.js
@@ -229,12 +229,8 @@
rst.waitForState(rst.getSecondaries(), ReplSetTest.State.SECONDARY, 60 * 1000);
// Add new hidden node to replSetTest
- var hiddenCfg = {
- restart: true,
- oplogSize: 1024,
- dbpath: hiddenDbpath,
- replSet: replSetName
- };
+ var hiddenCfg =
+ {restart: true, oplogSize: 1024, dbpath: hiddenDbpath, replSet: replSetName};
rst.add(hiddenCfg);
var hiddenHost = rst.nodes[numNodes].host;
@@ -257,12 +253,7 @@
// Add new hidden secondary to replica set
var rsConfig = primary.getDB("local").system.replset.findOne();
rsConfig.version += 1;
- var hiddenMember = {
- _id: numNodes,
- host: hiddenHost,
- priority: 0,
- hidden: true
- };
+ var hiddenMember = {_id: numNodes, host: hiddenHost, priority: 0, hidden: true};
rsConfig.members.push(hiddenMember);
assert.commandWorked(primary.adminCommand({replSetReconfig: rsConfig}),
testName + ' failed to reconfigure replSet ' + tojson(rsConfig));
diff --git a/jstests/noPassthrough/commands_handle_kill.js b/jstests/noPassthrough/commands_handle_kill.js
index b7ad6ea1bea..d2aedb8f4ea 100644
--- a/jstests/noPassthrough/commands_handle_kill.js
+++ b/jstests/noPassthrough/commands_handle_kill.js
@@ -70,12 +70,8 @@
// group command errors if plan executor is killed.
res = db.runCommand({
- group: {
- ns: coll.getFullName(),
- key: "_id",
- $reduce: function(curr, result) {},
- initial: {}
- }
+ group:
+ {ns: coll.getFullName(), key: "_id", $reduce: function(curr, result) {}, initial: {}}
});
assert.commandFailed(res);
assert(res.errmsg.indexOf("hit planExecutorAlwaysDead fail point") > -1);
diff --git a/jstests/noPassthrough/count_helper_read_preference.js b/jstests/noPassthrough/count_helper_read_preference.js
index a049e586598..b07621f3498 100644
--- a/jstests/noPassthrough/count_helper_read_preference.js
+++ b/jstests/noPassthrough/count_helper_read_preference.js
@@ -10,10 +10,7 @@
MockMongo.prototype = Mongo.prototype;
MockMongo.prototype.runCommand = function(db, cmd, opts) {
commandsRan.push({db: db, cmd: cmd, opts: opts});
- return {
- ok: 1,
- n: 100
- };
+ return {ok: 1, n: 100};
};
var db = new DB(new MockMongo(), "test");
diff --git a/jstests/noPassthrough/currentop_query.js b/jstests/noPassthrough/currentop_query.js
index deb470666af..b72da385564 100644
--- a/jstests/noPassthrough/currentop_query.js
+++ b/jstests/noPassthrough/currentop_query.js
@@ -47,9 +47,7 @@
testObj.currentOpFilter.ns = coll.getFullName();
testObj.currentOpFilter.planSummary = testObj.planSummary;
if (testObj.hasOwnProperty("command")) {
- testObj.currentOpFilter["query." + testObj.command] = {
- $exists: true
- };
+ testObj.currentOpFilter["query." + testObj.command] = {$exists: true};
} else if (testObj.hasOwnProperty("operation")) {
testObj.currentOpFilter.op = testObj.operation;
}
@@ -132,7 +130,7 @@
reduce: function() {},
initial: {}
}),
- [{"a": 1}]);
+ [{"a": 1}]);
},
command: "group",
planSummary: "COLLSCAN",
diff --git a/jstests/noPassthrough/cursor_timeout.js b/jstests/noPassthrough/cursor_timeout.js
index f74521b9bc9..46a054da0ea 100644
--- a/jstests/noPassthrough/cursor_timeout.js
+++ b/jstests/noPassthrough/cursor_timeout.js
@@ -39,12 +39,7 @@ for (x = 0; x < 200; x++) {
var chunkDoc = configDB.chunks.findOne();
var chunkOwner = chunkDoc.shard;
var toShard = configDB.shards.findOne({_id: {$ne: chunkOwner}})._id;
-var cmd = {
- moveChunk: coll.getFullName(),
- find: chunkDoc.min,
- to: toShard,
- _waitForDelete: true
-};
+var cmd = {moveChunk: coll.getFullName(), find: chunkDoc.min, to: toShard, _waitForDelete: true};
var res = adminDB.runCommand(cmd);
jsTest.log('move result: ' + tojson(res));
diff --git a/jstests/noPassthrough/exit_logging.js b/jstests/noPassthrough/exit_logging.js
index 0647f312cc4..f996766866f 100644
--- a/jstests/noPassthrough/exit_logging.js
+++ b/jstests/noPassthrough/exit_logging.js
@@ -25,23 +25,23 @@
function checkOutput() {
var logContents = "";
- assert.soon(() =>
- {
- logContents = rawMongoProgramOutput();
- return matchFn(logContents);
- },
- function() {
- // We can't just return a string because it will be well over the max
- // line length.
- // So we just print manually.
- print("================ BEGIN LOG CONTENTS ==================");
- logContents.split(/\n/).forEach((line) => {
- print(line);
- });
- print("================ END LOG CONTENTS =====================");
- return "";
- },
- 30000);
+ assert.soon(
+ () => {
+ logContents = rawMongoProgramOutput();
+ return matchFn(logContents);
+ },
+ function() {
+ // We can't just return a string because it will be well over the max
+ // line length.
+ // So we just print manually.
+ print("================ BEGIN LOG CONTENTS ==================");
+ logContents.split(/\n/).forEach((line) => {
+ print(line);
+ });
+ print("================ END LOG CONTENTS =====================");
+ return "";
+ },
+ 30000);
}
try {
@@ -55,12 +55,9 @@
function runAllTests(launcher) {
const SIGSEGV = 11;
const SIGABRT = 6;
- testShutdownLogging(launcher,
- function(conn) {
- conn.getDB('admin').shutdownServer();
- },
- makeRegExMatchFn(/shutdown command received/),
- MongoRunner.EXIT_CLEAN);
+ testShutdownLogging(launcher, function(conn) {
+ conn.getDB('admin').shutdownServer();
+ }, makeRegExMatchFn(/shutdown command received/), MongoRunner.EXIT_CLEAN);
testShutdownLogging(launcher,
makeShutdownByCrashFn('fault'),
@@ -88,9 +85,7 @@
runAllTests({
start: function(opts) {
- var actualOpts = {
- nojournal: ""
- };
+ var actualOpts = {nojournal: ""};
Object.extend(actualOpts, opts);
return MongoRunner.runMongod(actualOpts);
},
@@ -105,9 +100,7 @@
var st = new ShardingTest({shards: 1, other: {shardOptions: {nojournal: ""}}});
var mongosLauncher = {
start: function(opts) {
- var actualOpts = {
- configdb: st._configDB
- };
+ var actualOpts = {configdb: st._configDB};
Object.extend(actualOpts, opts);
return MongoRunner.runMongos(actualOpts);
},
diff --git a/jstests/noPassthrough/ftdc_setparam.js b/jstests/noPassthrough/ftdc_setparam.js
index 73e3f8720a6..d4cf4029426 100644
--- a/jstests/noPassthrough/ftdc_setparam.js
+++ b/jstests/noPassthrough/ftdc_setparam.js
@@ -7,9 +7,7 @@
// Check the defaults are correct
//
function getparam(field) {
- var q = {
- getParameter: 1
- };
+ var q = {getParameter: 1};
q[field] = 1;
var ret = m.getDB("admin").runCommand(q);
diff --git a/jstests/noPassthrough/geo_full.js b/jstests/noPassthrough/geo_full.js
index 505a0efab33..491229233f5 100644
--- a/jstests/noPassthrough/geo_full.js
+++ b/jstests/noPassthrough/geo_full.js
@@ -46,13 +46,7 @@ var randEnvironment = function() {
var range = max - min;
var bucketSize = range / (4 * 1024 * 1024 * 1024);
- return {
- max: max,
- min: min,
- bits: bits,
- earth: false,
- bucketSize: bucketSize
- };
+ return {max: max, min: min, bits: bits, earth: false, bucketSize: bucketSize};
};
var randPoint = function(env, query) {
@@ -107,10 +101,7 @@ var randDataType = function() {
maxLocs = Math.floor(Random.rand() * locScale) + 1;
}
- return {
- numDocs: numDocs,
- maxLocs: maxLocs
- };
+ return {numDocs: numDocs, maxLocs: maxLocs};
};
function deg2rad(arg) {
@@ -241,22 +232,13 @@ var queryResults = function(locs, query, results) {
if (!results["center"]) {
for (var type in resultTypes) {
- results[type] = {
- docsIn: 0,
- docsOut: 0,
- locsIn: 0,
- locsOut: 0
- };
+ results[type] = {docsIn: 0, docsOut: 0, locsIn: 0, locsOut: 0};
}
}
var indResults = {};
for (var type in resultTypes) {
- indResults[type] = {
- docIn: false,
- locsIn: 0,
- locsOut: 0
- };
+ indResults[type] = {docIn: false, locsIn: 0, locsOut: 0};
}
for (var type in resultTypes) {
@@ -313,29 +295,19 @@ var randYesQuery = function() {
var choice = Math.floor(Random.rand() * 7);
if (choice == 0)
- return {
- $ne: "no"
- };
+ return {$ne: "no"};
else if (choice == 1)
return "yes";
else if (choice == 2)
return /^yes/;
else if (choice == 3)
- return {
- $in: ["good", "yes", "ok"]
- };
+ return {$in: ["good", "yes", "ok"]};
else if (choice == 4)
- return {
- $exists: true
- };
+ return {$exists: true};
else if (choice == 5)
- return {
- $nin: ["bad", "no", "not ok"]
- };
+ return {$nin: ["bad", "no", "not ok"]};
else if (choice == 6)
- return {
- $not: /^no/
- };
+ return {$not: /^no/};
};
var locArray = function(loc) {
@@ -423,13 +395,9 @@ for (var test = 0; test < numTests; test++) {
var doc;
// Nest the keys differently
if (Random.rand() < 0.5)
- doc = {
- locs: {loc: randLocTypes(multiPoint)}
- };
+ doc = {locs: {loc: randLocTypes(multiPoint)}};
else
- doc = {
- locs: randLocTypes(multiPoint, "loc")
- };
+ doc = {locs: randLocTypes(multiPoint, "loc")};
randQueryAdditions(doc, indResults);
@@ -438,9 +406,7 @@ for (var test = 0; test < numTests; test++) {
}
assert.writeOK(bulk.execute());
- var indexDoc = {
- "locs.loc": "2d"
- };
+ var indexDoc = {"locs.loc": "2d"};
randIndexAdditions(indexDoc);
t.ensureIndex(indexDoc, env);
assert.isnull(db.getLastError());
@@ -472,19 +438,18 @@ for (var test = 0; test < numTests; test++) {
print("Min box : " + minBoxSize(env, query.radius));
assert.eq(results.center.docsIn,
t.find({
- "locs.loc": {$within: {$center: [query.center, query.radius], $uniqueDocs: 1}},
- "center.docIn": randYesQuery()
- }).count());
+ "locs.loc": {$within: {$center: [query.center, query.radius], $uniqueDocs: 1}},
+ "center.docIn": randYesQuery()
+ }).count());
print("Center query update...");
- var res = t.update(
- {
- "locs.loc": {$within: {$center: [query.center, query.radius], $uniqueDocs: true}},
- "center.docIn": randYesQuery()
- },
- {$set: {centerPaddingA: padding}},
- false,
- true);
+ var res = t.update({
+ "locs.loc": {$within: {$center: [query.center, query.radius], $uniqueDocs: true}},
+ "center.docIn": randYesQuery()
+ },
+ {$set: {centerPaddingA: padding}},
+ false,
+ true);
assert.eq(results.center.docsIn, res.nModified);
if (query.sphereRadius >= 0) {
@@ -493,41 +458,37 @@ for (var test = 0; test < numTests; test++) {
assert.eq(
results.sphere.docsIn,
t.find({
- "locs.loc": {$within: {$centerSphere: [query.sphereCenter, query.sphereRadius]}},
- "sphere.docIn": randYesQuery()
- }).count());
+ "locs.loc": {$within: {$centerSphere: [query.sphereCenter, query.sphereRadius]}},
+ "sphere.docIn": randYesQuery()
+ }).count());
print("Center sphere query update...");
- res = t.update(
- {
- "locs.loc": {
- $within: {
- $centerSphere: [query.sphereCenter, query.sphereRadius],
- $uniqueDocs: true
- }
- },
- "sphere.docIn": randYesQuery()
+ res = t.update({
+ "locs.loc": {
+ $within:
+ {$centerSphere: [query.sphereCenter, query.sphereRadius], $uniqueDocs: true}
},
- {$set: {spherePaddingA: padding}},
- false,
- true);
+ "sphere.docIn": randYesQuery()
+ },
+ {$set: {spherePaddingA: padding}},
+ false,
+ true);
assert.eq(results.sphere.docsIn, res.nModified);
}
// $box
print("Box query...");
- assert.eq(results.box.docsIn,
- t.find({
- "locs.loc": {$within: {$box: query.box, $uniqueDocs: true}},
- "box.docIn": randYesQuery()
- }).count());
+ assert.eq(results.box.docsIn, t.find({
+ "locs.loc": {$within: {$box: query.box, $uniqueDocs: true}},
+ "box.docIn": randYesQuery()
+ }).count());
// $polygon
print("Polygon query...");
- assert.eq(
- results.poly.docsIn,
- t.find({"locs.loc": {$within: {$polygon: query.boxPoly}}, "poly.docIn": randYesQuery()})
- .count());
+ assert.eq(results.poly.docsIn, t.find({
+ "locs.loc": {$within: {$polygon: query.boxPoly}},
+ "poly.docIn": randYesQuery()
+ }).count());
var defaultDocLimit = 100;
@@ -544,8 +505,8 @@ for (var test = 0; test < numTests; test++) {
assert.eq(
results.sphere.docsIn,
t.find({
- "locs.loc": {$nearSphere: query.sphereCenter, $maxDistance: query.sphereRadius}
- }).count(true),
+ "locs.loc": {$nearSphere: query.sphereCenter, $maxDistance: query.sphereRadius}
+ }).count(true),
"Near sphere query: sphere center: " + query.sphereCenter + "; radius: " +
query.sphereRadius + "; docs: " + results.sphere.docsIn + "; locs: " +
results.sphere.locsIn);
@@ -568,12 +529,12 @@ for (var test = 0; test < numTests; test++) {
var num = Math.min(2 * defaultDocLimit, 2 * results.center.docsIn);
var output = db.runCommand({
- geoNear: "testAllGeo",
- near: query.center,
- maxDistance: query.radius,
- includeLocs: true,
- num: num
- }).results;
+ geoNear: "testAllGeo",
+ near: query.center,
+ maxDistance: query.radius,
+ includeLocs: true,
+ num: num
+ }).results;
assert.eq(Math.min(num, results.center.docsIn),
output.length,
diff --git a/jstests/noPassthrough/geo_mnypts_plus_fields.js b/jstests/noPassthrough/geo_mnypts_plus_fields.js
index 801c7dcfc8b..eb8a03ce739 100644
--- a/jstests/noPassthrough/geo_mnypts_plus_fields.js
+++ b/jstests/noPassthrough/geo_mnypts_plus_fields.js
@@ -17,9 +17,7 @@ for (var fields = 1; fields < maxFields; fields++) {
for (var i = 0; i < totalPts; i++) {
var ii = i % 10000;
- var doc = {
- loc: [ii % 100, Math.floor(ii / 100)]
- };
+ var doc = {loc: [ii % 100, Math.floor(ii / 100)]};
// Add fields with different kinds of data
for (var j = 0; j < fields; j++) {
@@ -49,9 +47,7 @@ for (var fields = 1; fields < maxFields; fields++) {
if (j % 3 == 0) {
field = "abcdefg";
} else if (j % 3 == 1) {
- field = {
- $lte: new Date()
- };
+ field = {$lte: new Date()};
} else {
field = true;
}
diff --git a/jstests/noPassthrough/initial_sync_cloner_dups.js b/jstests/noPassthrough/initial_sync_cloner_dups.js
index dd0e05a8673..8967e94722c 100644
--- a/jstests/noPassthrough/initial_sync_cloner_dups.js
+++ b/jstests/noPassthrough/initial_sync_cloner_dups.js
@@ -100,10 +100,9 @@
// Removed the assertion because it was too flaky. Printing a warning instead (dan)
jsTestLog("making sure we dropped some dups");
var res = secondary.adminCommand({getLog: "global"});
- var droppedDups = (contains(res.log,
- function(v) {
- return v.indexOf("index build dropped" /* NNN dups*/) != -1;
- }));
+ var droppedDups = (contains(res.log, function(v) {
+ return v.indexOf("index build dropped" /* NNN dups*/) != -1;
+ }));
if (!droppedDups) {
jsTestLog(
"Warning: Test did not trigger duplicate documents, this run will be a false negative");
diff --git a/jstests/noPassthrough/javascript_options.js b/jstests/noPassthrough/javascript_options.js
index e0f1690bd5d..52cc641b274 100644
--- a/jstests/noPassthrough/javascript_options.js
+++ b/jstests/noPassthrough/javascript_options.js
@@ -3,9 +3,7 @@ var baseName = "jstests_nopassthrough_javascript_options";
load('jstests/libs/command_line/test_parsed_options.js');
jsTest.log("Testing \"noscripting\" command line option");
-var expectedResult = {
- "parsed": {"security": {"javascriptEnabled": false}}
-};
+var expectedResult = {"parsed": {"security": {"javascriptEnabled": false}}};
testGetCmdLineOptsMongod({noscripting: ""}, expectedResult);
jsTest.log("Testing explicitly disabled \"noscripting\" config file option");
diff --git a/jstests/noPassthrough/js_protection.js b/jstests/noPassthrough/js_protection.js
index 1299131289d..eda42395cd9 100644
--- a/jstests/noPassthrough/js_protection.js
+++ b/jstests/noPassthrough/js_protection.js
@@ -55,15 +55,14 @@
assert.neq(null, doc);
assert.eq(0, doc.y, tojson(doc));
- res = t.update(
- {
- $where: function() {
- return this.val === 0;
- }
- },
- {$set: {y: 100}},
- false,
- true);
+ res = t.update({
+ $where: function() {
+ return this.val === 0;
+ }
+ },
+ {$set: {y: 100}},
+ false,
+ true);
assert.writeOK(res);
doc = t.findOne({name: "testdoc"});
diff --git a/jstests/noPassthrough/lock_stats.js b/jstests/noPassthrough/lock_stats.js
index 078a22ead2d..73a3027b33f 100644
--- a/jstests/noPassthrough/lock_stats.js
+++ b/jstests/noPassthrough/lock_stats.js
@@ -36,14 +36,10 @@
// The server was just started, so initial stats may be missing.
if (!startStats.acquireWaitCount || !startStats.acquireWaitCount.W) {
- startStats.acquireWaitCount = {
- W: 0
- };
+ startStats.acquireWaitCount = {W: 0};
}
if (!startStats.timeAcquiringMicros || !startStats.timeAcquiringMicros.W) {
- startStats.timeAcquiringMicros = {
- W: 0
- };
+ startStats.timeAcquiringMicros = {W: 0};
}
var acquireWaitCount = endStats.acquireWaitCount.W - startStats.acquireWaitCount.W;
diff --git a/jstests/noPassthrough/logging_options.js b/jstests/noPassthrough/logging_options.js
index 794680fa937..238faa3c618 100644
--- a/jstests/noPassthrough/logging_options.js
+++ b/jstests/noPassthrough/logging_options.js
@@ -4,34 +4,24 @@ load('jstests/libs/command_line/test_parsed_options.js');
// Verbosity testing
jsTest.log("Testing \"verbose\" command line option with no args");
-var expectedResult = {
- "parsed": {"systemLog": {"verbosity": 1}}
-};
+var expectedResult = {"parsed": {"systemLog": {"verbosity": 1}}};
testGetCmdLineOptsMongod({verbose: ""}, expectedResult);
jsTest.log("Testing \"verbose\" command line option with one \"v\"");
-var expectedResult = {
- "parsed": {"systemLog": {"verbosity": 1}}
-};
+var expectedResult = {"parsed": {"systemLog": {"verbosity": 1}}};
testGetCmdLineOptsMongod({verbose: "v"}, expectedResult);
jsTest.log("Testing \"verbose\" command line option with two \"v\"s");
-var expectedResult = {
- "parsed": {"systemLog": {"verbosity": 2}}
-};
+var expectedResult = {"parsed": {"systemLog": {"verbosity": 2}}};
testGetCmdLineOptsMongod({verbose: "vv"}, expectedResult);
jsTest.log("Testing \"v\" command line option");
-var expectedResult = {
- "parsed": {"systemLog": {"verbosity": 1}}
-};
+var expectedResult = {"parsed": {"systemLog": {"verbosity": 1}}};
// Currently the test converts "{ v : 1 }" to "-v" when it spawns the binary.
testGetCmdLineOptsMongod({v: 1}, expectedResult);
jsTest.log("Testing \"vv\" command line option");
-var expectedResult = {
- "parsed": {"systemLog": {"verbosity": 2}}
-};
+var expectedResult = {"parsed": {"systemLog": {"verbosity": 2}}};
// Currently the test converts "{ v : 2 }" to "-vv" when it spawns the binary.
testGetCmdLineOptsMongod({v: 2}, expectedResult);
diff --git a/jstests/noPassthrough/minvalid2.js b/jstests/noPassthrough/minvalid2.js
index a9096805b66..2eb167444ad 100644
--- a/jstests/noPassthrough/minvalid2.js
+++ b/jstests/noPassthrough/minvalid2.js
@@ -55,9 +55,8 @@ printjson(lastOp);
// Overwrite minvalid document to simulate an inconsistent state (as might result from a server
// crash.
-local.replset.minvalid.update({},
- {ts: new Timestamp(lastOp.ts.t, lastOp.ts.i + 1)},
- {upsert: true});
+local.replset.minvalid.update(
+ {}, {ts: new Timestamp(lastOp.ts.t, lastOp.ts.i + 1)}, {upsert: true});
printjson(local.replset.minvalid.findOne());
print("5: shut down master");
diff --git a/jstests/noPassthrough/parameters.js b/jstests/noPassthrough/parameters.js
index a4fe35446b4..ddc5def5864 100644
--- a/jstests/noPassthrough/parameters.js
+++ b/jstests/noPassthrough/parameters.js
@@ -2,17 +2,13 @@ var dbConn = MongoRunner.runMongod();
function setAndCheckParameter(dbConn, parameterName, newValue, expectedResult) {
jsTest.log("Test setting parameter: " + parameterName + " to value: " + newValue);
- var getParameterCommand = {
- getParameter: 1
- };
+ var getParameterCommand = {getParameter: 1};
getParameterCommand[parameterName] = 1;
var ret = dbConn.adminCommand(getParameterCommand);
assert.eq(ret.ok, 1, tojson(ret));
oldValue = ret[parameterName];
- var setParameterCommand = {
- setParameter: 1
- };
+ var setParameterCommand = {setParameter: 1};
setParameterCommand[parameterName] = newValue;
var ret = dbConn.adminCommand(setParameterCommand);
assert.eq(ret.ok, 1, tojson(ret));
@@ -45,9 +41,7 @@ setAndCheckParameter(dbConn, "replMonitorMaxFailedChecks", -30);
function ensureSetParameterFailure(dbConn, parameterName, newValue) {
jsTest.log("Test setting parameter: " + parameterName + " to invalid value: " + newValue);
- var setParameterCommand = {
- setParameter: 1
- };
+ var setParameterCommand = {setParameter: 1};
setParameterCommand[parameterName] = newValue;
var ret = dbConn.adminCommand(setParameterCommand);
assert.eq(ret.ok, 0, tojson(ret));
diff --git a/jstests/noPassthrough/profile_options.js b/jstests/noPassthrough/profile_options.js
index 0e45391a7ef..e3f9c8bcc03 100644
--- a/jstests/noPassthrough/profile_options.js
+++ b/jstests/noPassthrough/profile_options.js
@@ -3,21 +3,15 @@ var baseName = "jstests_core_profile_options";
load('jstests/libs/command_line/test_parsed_options.js');
jsTest.log("Testing \"profile\" command line option with profiling off");
-var expectedResult = {
- "parsed": {"operationProfiling": {"mode": "off"}}
-};
+var expectedResult = {"parsed": {"operationProfiling": {"mode": "off"}}};
testGetCmdLineOptsMongod({profile: "0"}, expectedResult);
jsTest.log("Testing \"profile\" command line option with profiling slow operations on");
-var expectedResult = {
- "parsed": {"operationProfiling": {"mode": "slowOp"}}
-};
+var expectedResult = {"parsed": {"operationProfiling": {"mode": "slowOp"}}};
testGetCmdLineOptsMongod({profile: "1"}, expectedResult);
jsTest.log("Testing \"profile\" command line option with profiling all on");
-var expectedResult = {
- "parsed": {"operationProfiling": {"mode": "all"}}
-};
+var expectedResult = {"parsed": {"operationProfiling": {"mode": "all"}}};
testGetCmdLineOptsMongod({profile: "2"}, expectedResult);
jsTest.log("Testing \"operationProfiling.mode\" config file option");
diff --git a/jstests/noPassthrough/read_committed_lookup.js b/jstests/noPassthrough/read_committed_lookup.js
index e66195739ee..a8b3ff8522d 100644
--- a/jstests/noPassthrough/read_committed_lookup.js
+++ b/jstests/noPassthrough/read_committed_lookup.js
@@ -63,9 +63,7 @@ load("jstests/replsets/rslib.js"); // For startSetIfSupportsReadMajority.
}
// Seed matching data.
- var majorityWriteConcernObj = {
- writeConcern: {w: "majority", wtimeout: 60 * 1000}
- };
+ var majorityWriteConcernObj = {writeConcern: {w: "majority", wtimeout: 60 * 1000}};
var localId = db.local.insertOne({foreignKey: "x"}, majorityWriteConcernObj).insertedId;
var foreignId = db.foreign.insertOne({matchedField: "x"}, majorityWriteConcernObj).insertedId;
@@ -90,7 +88,9 @@ load("jstests/replsets/rslib.js"); // For startSetIfSupportsReadMajority.
var expectedMatchedResult = [{
_id: localId,
foreignKey: "x",
- match: [{_id: foreignId, matchedField: "x"}, ],
+ match: [
+ {_id: foreignId, matchedField: "x"},
+ ],
}];
var expectedUnmatchedResult = [{
_id: localId,
diff --git a/jstests/noPassthrough/read_majority.js b/jstests/noPassthrough/read_majority.js
index 3d84fce0911..c2b20df292a 100644
--- a/jstests/noPassthrough/read_majority.js
+++ b/jstests/noPassthrough/read_majority.js
@@ -28,8 +28,8 @@ load("jstests/libs/analyze_plan.js");
var t = db.readMajority;
function assertNoReadMajoritySnapshotAvailable() {
- var res = t.runCommand('find',
- {batchSize: 2, readConcern: {level: "majority"}, maxTimeMS: 1000});
+ var res =
+ t.runCommand('find', {batchSize: 2, readConcern: {level: "majority"}, maxTimeMS: 1000});
assert.commandFailed(res);
assert.eq(res.code, ErrorCodes.ExceededTimeLimit);
}
diff --git a/jstests/noPassthrough/read_majority_reads.js b/jstests/noPassthrough/read_majority_reads.js
index 5c169e3bed7..92a0032a2cd 100644
--- a/jstests/noPassthrough/read_majority_reads.js
+++ b/jstests/noPassthrough/read_majority_reads.js
@@ -86,12 +86,11 @@
},
geoNear: {
run: function(coll) {
- var res = coll.runCommand('geoNear',
- {
- readConcern: {level: 'majority'},
- near: [0, 0],
- spherical: true,
- });
+ var res = coll.runCommand('geoNear', {
+ readConcern: {level: 'majority'},
+ near: [0, 0],
+ spherical: true,
+ });
assert.commandWorked(res);
assert.eq(res.results.length, 1, tojson(res));
return res.results[0].obj.state;
@@ -101,13 +100,12 @@
},
geoSearch: {
run: function(coll) {
- var res = coll.runCommand('geoSearch',
- {
- readConcern: {level: 'majority'},
- near: [0, 0],
- search: {_id: 1}, // Needed due to SERVER-23158.
- maxDistance: 1,
- });
+ var res = coll.runCommand('geoSearch', {
+ readConcern: {level: 'majority'},
+ near: [0, 0],
+ search: {_id: 1}, // Needed due to SERVER-23158.
+ maxDistance: 1,
+ });
assert.commandWorked(res);
assert.eq(res.results.length, 1, tojson(res));
return res.results[0].state;
diff --git a/jstests/noPassthrough/sync_write.js b/jstests/noPassthrough/sync_write.js
index 04c654cff72..b2f2fa24a45 100644
--- a/jstests/noPassthrough/sync_write.js
+++ b/jstests/noPassthrough/sync_write.js
@@ -11,11 +11,7 @@
var dbpath = MongoRunner.dataPath + 'sync_write';
resetDbpath(dbpath);
- var mongodArgs = {
- dbpath: dbpath,
- noCleanData: true,
- journal: ''
- };
+ var mongodArgs = {dbpath: dbpath, noCleanData: true, journal: ''};
// Start a mongod.
var conn = MongoRunner.runMongod(mongodArgs);
diff --git a/jstests/noPassthrough/update_server-5552.js b/jstests/noPassthrough/update_server-5552.js
index 146dd0dab31..453914d6b3d 100644
--- a/jstests/noPassthrough/update_server-5552.js
+++ b/jstests/noPassthrough/update_server-5552.js
@@ -16,16 +16,15 @@ assert.writeOK(bulk.execute());
join = startParallelShell(
"while( db.foo.findOne( { _id : 0 } ).x == 1 ); db.foo.ensureIndex( { x : 1 } );");
-t.update(
- {
- $where: function() {
- sleep(1);
- return true;
- }
- },
- {$set: {x: 5}},
- false,
- true);
+t.update({
+ $where: function() {
+ sleep(1);
+ return true;
+ }
+},
+ {$set: {x: 5}},
+ false,
+ true);
db.getLastError();
join();
diff --git a/jstests/noPassthrough/write_local.js b/jstests/noPassthrough/write_local.js
index 019b8c437f7..0cfda86003f 100644
--- a/jstests/noPassthrough/write_local.js
+++ b/jstests/noPassthrough/write_local.js
@@ -3,9 +3,7 @@
'use strict';
// Limit concurrent WiredTiger transactions to maximize locking issues, harmless for other SEs.
- var options = {
- verbose: 1
- };
+ var options = {verbose: 1};
// Create a new single node replicaSet
var replTest =
diff --git a/jstests/noPassthrough/wt_index_option_defaults.js b/jstests/noPassthrough/wt_index_option_defaults.js
index 2516cc28d24..8609666a14d 100644
--- a/jstests/noPassthrough/wt_index_option_defaults.js
+++ b/jstests/noPassthrough/wt_index_option_defaults.js
@@ -56,14 +56,13 @@
// Start a mongod with system-wide defaults for engine-specific index options.
var conn = MongoRunner.runMongod({
dbpath: dbpath,
- noCleanData: true, [engine + 'IndexConfigString']: systemWideConfigString,
+ noCleanData: true,
+ [engine + 'IndexConfigString']: systemWideConfigString,
});
assert.neq(null, conn, 'mongod was unable to start up');
var testDB = conn.getDB('test');
- var cmdObj = {
- create: 'coll'
- };
+ var cmdObj = {create: 'coll'};
// Apply collection-wide defaults for engine-specific index options if any were
// specified.
@@ -78,12 +77,10 @@
assert.commandWorked(testDB.coll.createIndex({a: 1}, {name: 'without_options'}));
// Create an index that specifies engine-specific index options.
- assert.commandWorked(testDB.coll.createIndex(
- {b: 1},
- {
- name: 'with_options',
- storageEngine: {[engine]: {configString: indexSpecificConfigString}}
- }));
+ assert.commandWorked(testDB.coll.createIndex({b: 1}, {
+ name: 'with_options',
+ storageEngine: {[engine]: {configString: indexSpecificConfigString}}
+ }));
var collStats = testDB.runCommand({collStats: 'coll'});
assert.commandWorked(collStats);
diff --git a/jstests/noPassthrough/wt_nojournal_repl.js b/jstests/noPassthrough/wt_nojournal_repl.js
index b9c58a516db..8e25d5923f8 100644
--- a/jstests/noPassthrough/wt_nojournal_repl.js
+++ b/jstests/noPassthrough/wt_nojournal_repl.js
@@ -73,10 +73,9 @@ if (jsTest.options().storageEngine && jsTest.options().storageEngine !== "wiredT
// Test that the restarted secondary did NOT do an initial sync by checking the log
var res = secondary1.adminCommand({getLog: "global"});
- assert(!contains(res.log,
- function(v) {
- return v.indexOf("initial sync") != -1;
- }));
+ assert(!contains(res.log, function(v) {
+ return v.indexOf("initial sync") != -1;
+ }));
jsTestLog("check data is in both collections");
assert.eq(secondary1.getDB("test").foo.count(), 100);
diff --git a/jstests/noPassthroughWithMongod/apply_ops_errors.js b/jstests/noPassthroughWithMongod/apply_ops_errors.js
index 31353523810..d2f584af787 100644
--- a/jstests/noPassthroughWithMongod/apply_ops_errors.js
+++ b/jstests/noPassthroughWithMongod/apply_ops_errors.js
@@ -22,8 +22,11 @@
coll.ensureIndex({x: 1}, {unique: true});
coll.insert({_id: 1, x: "init"});
- var res =
- db.runCommand({applyOps: [{op: "i", ns: coll.getFullName(), o: {_id: 2, x: "init"}}, ]});
+ var res = db.runCommand({
+ applyOps: [
+ {op: "i", ns: coll.getFullName(), o: {_id: 2, x: "init"}},
+ ]
+ });
assert.eq(1, res.applied);
assert(res.code);
diff --git a/jstests/noPassthroughWithMongod/bench_test_crud_commands.js b/jstests/noPassthroughWithMongod/bench_test_crud_commands.js
index 14ee7d0fdb7..7bbafa4e931 100644
--- a/jstests/noPassthroughWithMongod/bench_test_crud_commands.js
+++ b/jstests/noPassthroughWithMongod/bench_test_crud_commands.js
@@ -7,9 +7,7 @@
assert.commandWorked(coll.getDB().createCollection(coll.getName()));
function makeDocument(docSize) {
- var doc = {
- "fieldName": ""
- };
+ var doc = {"fieldName": ""};
var longString = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx";
while (Object.bsonsize(doc) < docSize) {
if (Object.bsonsize(doc) < docSize - longString.length) {
@@ -22,12 +20,7 @@
}
function executeBenchRun(benchOps) {
- var benchArgs = {
- ops: benchOps,
- parallel: 2,
- seconds: 1,
- host: db.getMongo().host
- };
+ var benchArgs = {ops: benchOps, parallel: 2, seconds: 1, host: db.getMongo().host};
if (jsTest.options().auth) {
benchArgs['db'] = 'admin';
benchArgs['username'] = jsTest.options().adminUser;
@@ -73,8 +66,8 @@
assert.writeOK(coll.insert({}));
}
- var res = executeBenchRun(
- [{ns: coll.getFullName(), op: "findOne", query: {}, readCmd: readCmd}]);
+ var res =
+ executeBenchRun([{ns: coll.getFullName(), op: "findOne", query: {}, readCmd: readCmd}]);
assert.gt(res.findOne, 0, tojson(res));
}
diff --git a/jstests/noPassthroughWithMongod/clonecollection.js b/jstests/noPassthroughWithMongod/clonecollection.js
index a3633a12e58..2f0ec45ad44 100644
--- a/jstests/noPassthroughWithMongod/clonecollection.js
+++ b/jstests/noPassthroughWithMongod/clonecollection.js
@@ -16,9 +16,8 @@ assert.eq(1000, t.a.find().count(), "A2");
t.a.drop();
-assert.commandWorked(t.cloneCollection("localhost:" + fromMongod.port,
- "a",
- {i: {$gte: 10, $lt: 20}}));
+assert.commandWorked(
+ t.cloneCollection("localhost:" + fromMongod.port, "a", {i: {$gte: 10, $lt: 20}}));
assert.eq(10, t.a.find().count(), "A3");
t.a.drop();
@@ -35,9 +34,8 @@ assert.eq(2, t.a.getIndexes().length, "expected index missing");
x = t.a.find({i: 50}).hint({i: 1}).explain("executionStats");
printjson(x);
assert.eq(1, x.executionStats.nReturned, "verify 1");
-assert.eq(1,
- t.a.find({i: 50}).hint({i: 1}).toArray().length,
- "match length did not match expected");
+assert.eq(
+ 1, t.a.find({i: 50}).hint({i: 1}).toArray().length, "match length did not match expected");
// Check that capped-ness is preserved on clone
f.a.drop();
diff --git a/jstests/noPassthroughWithMongod/commands_that_write_accept_wc_standalone.js b/jstests/noPassthroughWithMongod/commands_that_write_accept_wc_standalone.js
index 20b6501c459..22617b681c2 100644
--- a/jstests/noPassthroughWithMongod/commands_that_write_accept_wc_standalone.js
+++ b/jstests/noPassthroughWithMongod/commands_that_write_accept_wc_standalone.js
@@ -82,9 +82,7 @@
});
},
reduce: function(key, values) {
- return {
- count: values.length
- };
+ return {count: values.length};
},
out: "foo"
},
@@ -103,10 +101,7 @@
});
function testValidWriteConcern(cmd) {
- cmd.req.writeConcern = {
- w: 1,
- j: true
- };
+ cmd.req.writeConcern = {w: 1, j: true};
jsTest.log("Testing " + tojson(cmd.req));
coll.drop();
diff --git a/jstests/noPassthroughWithMongod/create_indexes_shell_helper.js b/jstests/noPassthroughWithMongod/create_indexes_shell_helper.js
index f9f9f7b9f06..bca0cd05f54 100644
--- a/jstests/noPassthroughWithMongod/create_indexes_shell_helper.js
+++ b/jstests/noPassthroughWithMongod/create_indexes_shell_helper.js
@@ -20,15 +20,11 @@
},
runCommand: function(db, cmd, opts) {
commandsRan.push({db: db, cmd: cmd, opts: opts});
- return {
- ok: 1.0
- };
+ return {ok: 1.0};
},
insert: function(db, indexSpecs, opts) {
insertsRan.push({db: db, indexSpecs: indexSpecs, opts: opts});
- return {
- ok: 1.0
- };
+ return {ok: 1.0};
},
getWriteConcern: function() {
return null;
diff --git a/jstests/noPassthroughWithMongod/external_sort_text_agg.js b/jstests/noPassthroughWithMongod/external_sort_text_agg.js
index ecb843ae9e5..b08a7c79a44 100644
--- a/jstests/noPassthroughWithMongod/external_sort_text_agg.js
+++ b/jstests/noPassthroughWithMongod/external_sort_text_agg.js
@@ -8,12 +8,13 @@ for (i = 0; i < 100; i++) {
}
var score = t.find({$text: {$search: "asdf"}}, {score: {$meta: 'textScore'}}).next().score;
-var res = t.aggregate([
- {$match: {$text: {$search: "asdf"}}},
- {$sort: {"_id": 1}},
- {$project: {string: "$text", score: {$meta: "textScore"}}}
-],
- {allowDiskUse: true});
+var res = t.aggregate(
+ [
+ {$match: {$text: {$search: "asdf"}}},
+ {$sort: {"_id": 1}},
+ {$project: {string: "$text", score: {$meta: "textScore"}}}
+ ],
+ {allowDiskUse: true});
// we must use .next() rather than a $limit because a $limit will optimize away the external sort
printjson(res.next());
assert.eq(res.next().score, score);
diff --git a/jstests/noPassthroughWithMongod/ftdc_params.js b/jstests/noPassthroughWithMongod/ftdc_params.js
index 5fae9e77c49..732af83f7d6 100644
--- a/jstests/noPassthroughWithMongod/ftdc_params.js
+++ b/jstests/noPassthroughWithMongod/ftdc_params.js
@@ -7,9 +7,7 @@
// Check the defaults are correct
//
function getparam(field) {
- var q = {
- getParameter: 1
- };
+ var q = {getParameter: 1};
q[field] = 1;
var ret = admin.runCommand(q);
diff --git a/jstests/noPassthroughWithMongod/geo_axis_aligned.js b/jstests/noPassthroughWithMongod/geo_axis_aligned.js
index 47c0369e5e0..5e08a6c1739 100644
--- a/jstests/noPassthroughWithMongod/geo_axis_aligned.js
+++ b/jstests/noPassthroughWithMongod/geo_axis_aligned.js
@@ -98,18 +98,17 @@ for (var b = 0; b < bits.length; b++) {
assert.gte(a[k].dis, distance);
}
- r = t.find(
- {
- loc: {
- $within: {
- $box: [
- [center[j][0] - radius[i], center[j][1] - radius[i]],
- [center[j][0] + radius[i], center[j][1] + radius[i]]
- ]
- }
- }
- },
- {_id: 1});
+ r = t.find({
+ loc: {
+ $within: {
+ $box: [
+ [center[j][0] - radius[i], center[j][1] - radius[i]],
+ [center[j][0] + radius[i], center[j][1] + radius[i]]
+ ]
+ }
+ }
+ },
+ {_id: 1});
assert.eq(9, r.count());
}
}
diff --git a/jstests/noPassthroughWithMongod/geo_polygon.js b/jstests/noPassthroughWithMongod/geo_polygon.js
index 073ffdeb72d..d2b271c32d7 100644
--- a/jstests/noPassthroughWithMongod/geo_polygon.js
+++ b/jstests/noPassthroughWithMongod/geo_polygon.js
@@ -5,10 +5,7 @@ num = 0;
var bulk = t.initializeUnorderedBulkOp();
for (x = -180; x < 180; x += .5) {
for (y = -180; y < 180; y += .5) {
- o = {
- _id: num++,
- loc: [x, y]
- };
+ o = {_id: num++, loc: [x, y]};
bulk.insert(o);
}
}
@@ -27,8 +24,8 @@ for (var n = 0; n < numTests; n++) {
assert.eq(
num,
t.find({
- loc: {"$within": {"$polygon": [[-180, -180], [-180, 180], [180, 180], [180, -180]]}}
- }).count(),
+ loc: {"$within": {"$polygon": [[-180, -180], [-180, 180], [180, 180], [180, -180]]}}
+ }).count(),
"Bounding Box Test");
assert.eq(
@@ -44,15 +41,17 @@ for (var n = 0; n < numTests; n++) {
// slope falls.
assert.between(
341 - 18,
- t.find({loc: {"$within": {"$polygon": [[0, 0], [0, 10], [10, 10], [10, 0], [5, 5]]}}})
- .count(),
+ t.find({
+ loc: {"$within": {"$polygon": [[0, 0], [0, 10], [10, 10], [10, 0], [5, 5]]}}
+ }).count(),
341,
"Square Missing Chunk Test",
true);
assert.between(
21 - 2,
- t.find({loc: {"$within": {"$polygon": [[0, 0], [0, 2], [2, 2], [2, 0], [1, 1]]}}})
- .count(),
+ t.find({
+ loc: {"$within": {"$polygon": [[0, 0], [0, 2], [2, 2], [2, 0], [1, 1]]}}
+ }).count(),
21,
"Square Missing Chunk Test 2",
true);
diff --git a/jstests/noPassthroughWithMongod/index_check10.js b/jstests/noPassthroughWithMongod/index_check10.js
index 30ed9c17eac..2816eecdb4b 100644
--- a/jstests/noPassthroughWithMongod/index_check10.js
+++ b/jstests/noPassthroughWithMongod/index_check10.js
@@ -69,9 +69,7 @@ function doIt() {
for (var j = 0; j < Random.randInt(15); ++j) {
vals.push(r());
}
- spec[fields[i]] = {
- $in: vals
- };
+ spec[fields[i]] = {$in: vals};
}
}
s = sort();
diff --git a/jstests/noPassthroughWithMongod/index_check9.js b/jstests/noPassthroughWithMongod/index_check9.js
index a801b473a44..fe158efbdad 100644
--- a/jstests/noPassthroughWithMongod/index_check9.js
+++ b/jstests/noPassthroughWithMongod/index_check9.js
@@ -83,9 +83,7 @@ function doIt() {
for (var j = 0; j < inLength; ++j) {
vals.push(r(alphas[i]));
}
- spec[fields[i]] = {
- $in: vals
- };
+ spec[fields[i]] = {$in: vals};
break;
}
case 2 /* equality */: {
diff --git a/jstests/noPassthroughWithMongod/index_multi.js b/jstests/noPassthroughWithMongod/index_multi.js
index 68004f27678..a09b20fee96 100644
--- a/jstests/noPassthroughWithMongod/index_multi.js
+++ b/jstests/noPassthroughWithMongod/index_multi.js
@@ -9,9 +9,7 @@ db.results.drop();
var bulk = coll.initializeUnorderedBulkOp();
print("Populate the collection with random data");
for (var i = 0; i < 1e4; i++) {
- var doc = {
- "_id": i
- };
+ var doc = {"_id": i};
for (var j = 0; j < 100; j++) {
// Skip some of the fields
@@ -89,9 +87,7 @@ for (var i = 0; i < 30; i++) {
print("Do some sets and unsets");
bulk = coll.initializeUnorderedBulkOp();
for (i = 0; i < 1e4; i++) {
- var criteria = {
- _id: Random.randInt(1e5)
- };
+ var criteria = {_id: Random.randInt(1e5)};
var mod = {};
if (Random.rand() < .5) {
mod['$set'] = {};
diff --git a/jstests/noPassthroughWithMongod/indexbg_drop.js b/jstests/noPassthroughWithMongod/indexbg_drop.js
index df66d82aaeb..74722b0e631 100644
--- a/jstests/noPassthroughWithMongod/indexbg_drop.js
+++ b/jstests/noPassthroughWithMongod/indexbg_drop.js
@@ -39,10 +39,7 @@ var secondId = replTest.getNodeId(second);
var masterDB = master.getDB(dbname);
var secondDB = second.getDB(dbname);
-var dc = {
- dropIndexes: collection,
- index: "i_1"
-};
+var dc = {dropIndexes: collection, index: "i_1"};
// set up collections
masterDB.dropDatabase();
diff --git a/jstests/noPassthroughWithMongod/indexbg_updates.js b/jstests/noPassthroughWithMongod/indexbg_updates.js
index a660ffc6eeb..c3465f78047 100644
--- a/jstests/noPassthroughWithMongod/indexbg_updates.js
+++ b/jstests/noPassthroughWithMongod/indexbg_updates.js
@@ -15,10 +15,7 @@
var bulk = coll.initializeUnorderedBulkOp();
print("Populate the collection with random data");
for (var i = 0; i < numDocs; i++) {
- var doc = {
- "_id": i,
- "field0": Random.rand()
- };
+ var doc = {"_id": i, "field0": Random.rand()};
bulk.insert(doc);
}
@@ -28,9 +25,7 @@
// field being actively indexed in the background
bulk = coll.initializeUnorderedBulkOp();
for (i = 0; i < numDocs; i++) {
- var criteria = {
- "_id": 1000
- };
+ var criteria = {"_id": 1000};
var mod = {};
if (Random.rand() < .8) {
diff --git a/jstests/noPassthroughWithMongod/insertMulti.js b/jstests/noPassthroughWithMongod/insertMulti.js
index e2a70307550..2d6fb3a9df4 100644
--- a/jstests/noPassthroughWithMongod/insertMulti.js
+++ b/jstests/noPassthroughWithMongod/insertMulti.js
@@ -4,9 +4,7 @@
"use strict";
function makeDocument(docSize) {
- var doc = {
- "fieldName": ""
- };
+ var doc = {"fieldName": ""};
var longString = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx";
while (Object.bsonsize(doc) < docSize) {
if (Object.bsonsize(doc) < docSize - longString.length) {
diff --git a/jstests/noPassthroughWithMongod/mr_writeconflict.js b/jstests/noPassthroughWithMongod/mr_writeconflict.js
index baae608b59e..4afabfb0143 100644
--- a/jstests/noPassthroughWithMongod/mr_writeconflict.js
+++ b/jstests/noPassthroughWithMongod/mr_writeconflict.js
@@ -5,11 +5,7 @@
load('jstests/libs/parallelTester.js');
var makeDoc = function(keyLimit, valueLimit) {
- return {
- _id: ObjectId(),
- key: Random.randInt(keyLimit),
- value: Random.randInt(valueLimit)
- };
+ return {_id: ObjectId(), key: Random.randInt(keyLimit), value: Random.randInt(valueLimit)};
};
var main = function() {
diff --git a/jstests/noPassthroughWithMongod/replReads.js b/jstests/noPassthroughWithMongod/replReads.js
index 45e0a4d49a6..3010be8e80c 100644
--- a/jstests/noPassthroughWithMongod/replReads.js
+++ b/jstests/noPassthroughWithMongod/replReads.js
@@ -56,10 +56,7 @@ function testReadLoadBalancing(numReplicas) {
connections.push(conn);
}
- var profileCriteria = {
- op: 'query',
- ns: 'test.foo'
- };
+ var profileCriteria = {op: 'query', ns: 'test.foo'};
for (var i = 0; i < secondaries.length; i++) {
var profileCollection = secondaries[i].getDB('test').system.profile;
diff --git a/jstests/noPassthroughWithMongod/rpc_protocols.js b/jstests/noPassthroughWithMongod/rpc_protocols.js
index 7e33c3986d3..9650d57e421 100644
--- a/jstests/noPassthroughWithMongod/rpc_protocols.js
+++ b/jstests/noPassthroughWithMongod/rpc_protocols.js
@@ -4,10 +4,7 @@
// startup using the "--rpcProtocols" command line option, or at runtime using the
// "setClientRPCProtocols" method on the Mongo object.
-var RPC_PROTOCOLS = {
- OP_QUERY: "opQueryOnly",
- OP_COMMAND: "opCommandOnly"
-};
+var RPC_PROTOCOLS = {OP_QUERY: "opQueryOnly", OP_COMMAND: "opCommandOnly"};
(function() {
"use strict";
@@ -28,50 +25,43 @@ var RPC_PROTOCOLS = {
}
// Test that --rpcProtocols=opQueryOnly forces OP_QUERY commands.
- runInShell(
- RPC_PROTOCOLS.OP_QUERY,
- function() {
- assert(db.getMongo().getClientRPCProtocols() === "opQueryOnly");
- db.getSiblingDB("test").rpcProtocols.find().comment("opQueryCommandLine").itcount();
- });
+ runInShell(RPC_PROTOCOLS.OP_QUERY, function() {
+ assert(db.getMongo().getClientRPCProtocols() === "opQueryOnly");
+ db.getSiblingDB("test").rpcProtocols.find().comment("opQueryCommandLine").itcount();
+ });
var profileDoc = db.system.profile.findOne({"query.comment": "opQueryCommandLine"});
assert(profileDoc !== null);
assert.eq(profileDoc.protocol, "op_query");
// Test that --rpcProtocols=opCommandOnly forces OP_COMMAND commands.
- runInShell(
- RPC_PROTOCOLS.OP_COMMAND,
- function() {
- assert(db.getMongo().getClientRPCProtocols() === "opCommandOnly");
- db.getSiblingDB("test").rpcProtocols.find().comment("opCommandCommandLine").itcount();
- });
+ runInShell(RPC_PROTOCOLS.OP_COMMAND, function() {
+ assert(db.getMongo().getClientRPCProtocols() === "opCommandOnly");
+ db.getSiblingDB("test").rpcProtocols.find().comment("opCommandCommandLine").itcount();
+ });
profileDoc = db.system.profile.findOne({"query.comment": "opCommandCommandLine"});
assert(profileDoc !== null);
assert.eq(profileDoc.protocol, "op_command");
// Test that .setClientRPCProtocols("opQueryOnly") forces OP_QUERY commands. We start the shell
// in OP_COMMAND only mode, then switch it to OP_QUERY mode at runtime.
- runInShell(RPC_PROTOCOLS.OP_COMMAND,
- function() {
- assert(db.getMongo().getClientRPCProtocols() === "opCommandOnly");
- db.getMongo().setClientRPCProtocols("opQueryOnly");
- assert(db.getMongo().getClientRPCProtocols() === "opQueryOnly");
- db.getSiblingDB("test").rpcProtocols.find().comment("opQueryRuntime").itcount();
- });
+ runInShell(RPC_PROTOCOLS.OP_COMMAND, function() {
+ assert(db.getMongo().getClientRPCProtocols() === "opCommandOnly");
+ db.getMongo().setClientRPCProtocols("opQueryOnly");
+ assert(db.getMongo().getClientRPCProtocols() === "opQueryOnly");
+ db.getSiblingDB("test").rpcProtocols.find().comment("opQueryRuntime").itcount();
+ });
profileDoc = db.system.profile.findOne({"query.comment": "opQueryRuntime"});
assert(profileDoc !== null);
assert.eq(profileDoc.protocol, "op_query");
// Test that .setClientRPCProtocols("opCommandOnly") forces OP_COMMAND commands. We start the
// shell in OP_QUERY only mode, then switch it to OP_COMMAND mode at runtime.
- runInShell(
- RPC_PROTOCOLS.OP_QUERY,
- function() {
- assert(db.getMongo().getClientRPCProtocols() === "opQueryOnly");
- db.getMongo().setClientRPCProtocols("opCommandOnly");
- assert(db.getMongo().getClientRPCProtocols() === "opCommandOnly");
- db.getSiblingDB("test").rpcProtocols.find().comment("opCommandRuntime").itcount();
- });
+ runInShell(RPC_PROTOCOLS.OP_QUERY, function() {
+ assert(db.getMongo().getClientRPCProtocols() === "opQueryOnly");
+ db.getMongo().setClientRPCProtocols("opCommandOnly");
+ assert(db.getMongo().getClientRPCProtocols() === "opCommandOnly");
+ db.getSiblingDB("test").rpcProtocols.find().comment("opCommandRuntime").itcount();
+ });
profileDoc = db.system.profile.findOne({"query.comment": "opCommandRuntime"});
assert(profileDoc !== null);
assert.eq(profileDoc.protocol, "op_command");
diff --git a/jstests/noPassthroughWithMongod/temp_namespace.js b/jstests/noPassthroughWithMongod/temp_namespace.js
index a2f1aa21a80..d84dcb302f3 100644
--- a/jstests/noPassthroughWithMongod/temp_namespace.js
+++ b/jstests/noPassthroughWithMongod/temp_namespace.js
@@ -17,9 +17,11 @@ d.runCommand({create: testname + 'keep3'});
d[testname + 'keep4'].insert({});
function countCollectionNames(theDB, regex) {
- return theDB.getCollectionNames().filter(function(z) {
- return z.match(regex);
- }).length;
+ return theDB.getCollectionNames()
+ .filter(function(z) {
+ return z.match(regex);
+ })
+ .length;
}
assert.eq(countCollectionNames(d, /temp\d$/), 2);
diff --git a/jstests/parallel/checkMultiThread.js b/jstests/parallel/checkMultiThread.js
index a6b92689bec..c4c6ccd48f0 100644
--- a/jstests/parallel/checkMultiThread.js
+++ b/jstests/parallel/checkMultiThread.js
@@ -12,9 +12,7 @@ a.start();
b.start();
a.join();
b.join();
-assert.lt(a.returnData().getMilliseconds(),
- start.getMilliseconds() + 15000,
- "A took more than 15s");
-assert.lt(b.returnData().getMilliseconds(),
- start.getMilliseconds() + 15000,
- "B took more than 15s");
+assert.lt(
+ a.returnData().getMilliseconds(), start.getMilliseconds() + 15000, "A took more than 15s");
+assert.lt(
+ b.returnData().getMilliseconds(), start.getMilliseconds() + 15000, "B took more than 15s");
diff --git a/jstests/perf/v8_mapreduce.js b/jstests/perf/v8_mapreduce.js
index 7ff329c5284..c2123c89403 100644
--- a/jstests/perf/v8_mapreduce.js
+++ b/jstests/perf/v8_mapreduce.js
@@ -13,10 +13,7 @@ if (/V8/.test(interpreterVersion()) && db.runCommand({buildinfo: 1}).javascriptE
var tid = tid || 0;
var threadStart = new Date();
job(tid);
- return {
- "threadStart": threadStart,
- "threadEnd": new Date()
- };
+ return {"threadStart": threadStart, "threadEnd": new Date()};
};
// function timeMultipleThreads
diff --git a/jstests/readonly/geo.js b/jstests/readonly/geo.js
index 73e91c64eeb..13705af0408 100644
--- a/jstests/readonly/geo.js
+++ b/jstests/readonly/geo.js
@@ -24,10 +24,7 @@ runReadOnlyTest(function() {
name: "The Counting Room",
loc: {type: "Point", coordinates: [40.7209601, -73.9588041]}
},
- {
- name: "Kinfolk 94",
- loc: {type: "Point", coordinates: [40.7217058, -73.9605489]}
- }
+ {name: "Kinfolk 94", loc: {type: "Point", coordinates: [40.7217058, -73.9605489]}}
];
writableCollection.insertMany(locDocs);
diff --git a/jstests/readonly/lib/read_only_test.js b/jstests/readonly/lib/read_only_test.js
index 37802ee66d2..e3b68671966 100644
--- a/jstests/readonly/lib/read_only_test.js
+++ b/jstests/readonly/lib/read_only_test.js
@@ -34,11 +34,7 @@ var StandaloneFixture, ShardedFixture, runReadOnlyTest, zip2, cycleN;
try {
makeDirectoryReadOnly(this.dbpath);
- var options = {
- queryableBackupMode: "",
- noCleanData: true,
- dbpath: this.dbpath
- };
+ var options = {queryableBackupMode: "", noCleanData: true, dbpath: this.dbpath};
this.mongod = MongoRunner.runMongod(options);
@@ -71,17 +67,12 @@ var StandaloneFixture, ShardedFixture, runReadOnlyTest, zip2, cycleN;
jsTest.log("restarting shards...");
try {
for (var i = 0; i < this.nShards; ++i) {
- var opts = {
- queryableBackupMode: "",
- dbpath: this.paths[i]
- };
+ var opts = {queryableBackupMode: "", dbpath: this.paths[i]};
assert.commandWorked(this.shardingTest["d" + i].getDB("local").dropDatabase());
- this.shardingTest.restartMongod(i,
- opts,
- () => {
- makeDirectoryReadOnly(this.paths[i]);
- });
+ this.shardingTest.restartMongod(i, opts, () => {
+ makeDirectoryReadOnly(this.paths[i]);
+ });
}
jsTest.log("restarting mongos...");
diff --git a/jstests/repl/basic1.js b/jstests/repl/basic1.js
index 515667c48e4..ff286b23721 100644
--- a/jstests/repl/basic1.js
+++ b/jstests/repl/basic1.js
@@ -92,9 +92,7 @@ block();
checkNumCollections("MR4");
var t = am.rpos;
-var writeOption = {
- writeConcern: {w: 2, wtimeout: 3000}
-};
+var writeOption = {writeConcern: {w: 2, wtimeout: 3000}};
t.insert({_id: 1, a: [{n: "a", c: 1}, {n: "b", c: 1}, {n: "c", c: 1}], b: [1, 2, 3]}, writeOption);
check("after pos 1 ");
@@ -114,11 +112,7 @@ printjson(as.rpos.findOne());
// ).forEach( printjson )
t = am.b;
-var updateOption = {
- upsert: true,
- multi: false,
- writeConcern: {w: 2, wtimeout: 3000}
-};
+var updateOption = {upsert: true, multi: false, writeConcern: {w: 2, wtimeout: 3000}};
t.update({_id: "fun"}, {$inc: {"a.b.c.x": 6743}}, updateOption);
check("b 1");
@@ -142,9 +136,8 @@ assert.soon(function() {
return am.lotOfIndexes.getIndexes().length == as.lotOfIndexes.getIndexes().length;
}, "lots of indexes a");
-assert.eq(am.lotOfIndexes.getIndexes().length,
- as.lotOfIndexes.getIndexes().length,
- "lots of indexes b");
+assert.eq(
+ am.lotOfIndexes.getIndexes().length, as.lotOfIndexes.getIndexes().length, "lots of indexes b");
// multi-update with $inc
diff --git a/jstests/repl/batch_write_command_wc_repl.js b/jstests/repl/batch_write_command_wc_repl.js
index 3e28b330bf1..167182ed6ae 100644
--- a/jstests/repl/batch_write_command_wc_repl.js
+++ b/jstests/repl/batch_write_command_wc_repl.js
@@ -18,7 +18,10 @@ var coll = mongod.getCollection("test.batch_write_command_wc_repl");
//
// Basic insert, default WC
coll.remove({});
-printjson(request = {insert: coll.getName(), documents: [{a: 1}]});
+printjson(request = {
+ insert: coll.getName(),
+ documents: [{a: 1}]
+});
printjson(result = coll.runCommand(request));
assert(result.ok);
assert.eq(1, result.n);
@@ -27,7 +30,11 @@ assert.eq(1, coll.count());
//
// Basic insert, majority WC
coll.remove({});
-printjson(request = {insert: coll.getName(), documents: [{a: 1}], writeConcern: {w: 'majority'}});
+printjson(request = {
+ insert: coll.getName(),
+ documents: [{a: 1}],
+ writeConcern: {w: 'majority'}
+});
printjson(result = coll.runCommand(request));
assert(result.ok);
assert.eq(1, result.n);
@@ -36,7 +43,11 @@ assert.eq(1, coll.count());
//
// Basic insert, immediate bad wMode error
coll.remove({});
-printjson(request = {insert: coll.getName(), documents: [{a: 1}], writeConcern: {w: 'invalid'}});
+printjson(request = {
+ insert: coll.getName(),
+ documents: [{a: 1}],
+ writeConcern: {w: 'invalid'}
+});
printjson(result = coll.runCommand(request));
assert(!result.ok);
assert.eq(0, coll.count());
@@ -44,8 +55,11 @@ assert.eq(0, coll.count());
//
// Basic insert, error on WC with wtimeout
coll.remove({});
-printjson(
- request = {insert: coll.getName(), documents: [{a: 1}], writeConcern: {w: 2, wtimeout: 1}});
+printjson(request = {
+ insert: coll.getName(),
+ documents: [{a: 1}],
+ writeConcern: {w: 2, wtimeout: 1}
+});
printjson(result = coll.runCommand(request));
assert(result.ok);
assert.eq(1, result.n);
diff --git a/jstests/repl/repl13.js b/jstests/repl/repl13.js
index 78fe9adfc81..c560f285cf4 100644
--- a/jstests/repl/repl13.js
+++ b/jstests/repl/repl13.js
@@ -61,6 +61,5 @@ assert.eq(1, sc.findOne({_id: 90000}).a);
m_hash = m.getDB("d").runCommand("dbhash");
s_hash = s.getDB("d").runCommand("dbhash");
-assert.eq(m_hash.collections.c,
- s_hash.collections.c,
- "sad " + tojson(m_hash) + " " + tojson(s_hash));
+assert.eq(
+ m_hash.collections.c, s_hash.collections.c, "sad " + tojson(m_hash) + " " + tojson(s_hash));
diff --git a/jstests/repl/repl14.js b/jstests/repl/repl14.js
index 5bd806ef92f..e897d9376f1 100644
--- a/jstests/repl/repl14.js
+++ b/jstests/repl/repl14.js
@@ -16,9 +16,7 @@ function testWithCollectionIndexIds(capped, sparse, useIds) {
toInsert = {};
if (capped) {
// Add a singleton array as padding, so the push later on will not change document size.
- toInsert = {
- p: [1]
- };
+ toInsert = {p: [1]};
}
if (useIds) { // Insert wiith an auto generated _id.
mc.insert(toInsert);
@@ -35,14 +33,10 @@ function testWithCollectionIndexIds(capped, sparse, useIds) {
return sc.count() > 0;
}, "doc not replicated soon enough", 60 * 1000);
- modifiers = {
- $push: {a: 1}
- };
+ modifiers = {$push: {a: 1}};
if (capped) {
// Delete our singleton array to balance the new singleton array we're going to create.
- modifiers['$unset'] = {
- p: 1
- };
+ modifiers['$unset'] = {p: 1};
}
assert.writeOK(mc.update({}, modifiers));
diff --git a/jstests/replsets/apply_batch_only_goes_forward.js b/jstests/replsets/apply_batch_only_goes_forward.js
index 4744fcf4342..a461e9fad12 100644
--- a/jstests/replsets/apply_batch_only_goes_forward.js
+++ b/jstests/replsets/apply_batch_only_goes_forward.js
@@ -35,10 +35,7 @@
var sLocal = slave.getDB("local");
var sMinvalid = sLocal["replset.minvalid"];
var stepDownSecs = 30;
- var stepDownCmd = {
- replSetStepDown: stepDownSecs,
- force: true
- };
+ var stepDownCmd = {replSetStepDown: stepDownSecs, force: true};
// Write op
assert.writeOK(mTest.foo.save({}, {writeConcern: {w: 3}}));
@@ -50,9 +47,11 @@
var farFutureTS = new Timestamp(
Math.floor(new Date().getTime() / 1000) + (60 * 60 * 24 * 5 /* in five days*/), 0);
var rsgs = assert.commandWorked(mLocal.adminCommand("replSetGetStatus"));
- var primaryOpTime = rsgs.members.filter(function(member) {
- return member.self;
- })[0].optime;
+ var primaryOpTime = rsgs.members
+ .filter(function(member) {
+ return member.self;
+ })[0]
+ .optime;
jsTest.log("future TS: " + tojson(farFutureTS) + ", date:" + tsToDate(farFutureTS));
// We do an update in case there is a minvalid document on the primary already.
// If the doc doesn't exist then upsert:true will create it, and the writeConcern ensures
diff --git a/jstests/replsets/apply_ops_lastop.js b/jstests/replsets/apply_ops_lastop.js
index 1e7df9a9035..e3ff0f19554 100644
--- a/jstests/replsets/apply_ops_lastop.js
+++ b/jstests/replsets/apply_ops_lastop.js
@@ -27,15 +27,12 @@
var insertApplyOps = [{op: "i", ns: 'foo.bar', o: {_id: 1, a: "b"}}];
var deleteApplyOps = [{op: "d", ns: 'foo.bar', o: {_id: 1, a: "b"}}];
var badPreCondition = [{ns: 'foo.bar', q: {_id: 10, a: "aaa"}, res: {a: "aaa"}}];
- var majorityWriteConcern = {
- w: 'majority',
- wtimeout: 30000
- };
+ var majorityWriteConcern = {w: 'majority', wtimeout: 30000};
// Set up some data
assert.writeOK(coll.insert({x: 1})); // creating the collection so applyOps works
- assert.commandWorked(m1.getDB('foo').runCommand(
- {applyOps: insertApplyOps, writeConcern: majorityWriteConcern}));
+ assert.commandWorked(
+ m1.getDB('foo').runCommand({applyOps: insertApplyOps, writeConcern: majorityWriteConcern}));
var insertOp = m1.getDB('foo').getLastErrorObj('majority', 30000).lastOp;
// No-op applyOps
diff --git a/jstests/replsets/apply_ops_wc.js b/jstests/replsets/apply_ops_wc.js
index 0b8a49e19bd..b0c4fed59c4 100644
--- a/jstests/replsets/apply_ops_wc.js
+++ b/jstests/replsets/apply_ops_wc.js
@@ -71,7 +71,10 @@
var secondaries = replTest.getSecondaries();
- var majorityWriteConcerns = [{w: 2, wtimeout: 30000}, {w: 'majority', wtimeout: 30000}, ];
+ var majorityWriteConcerns = [
+ {w: 2, wtimeout: 30000},
+ {w: 'majority', wtimeout: 30000},
+ ];
function testMajorityWriteConcerns(wc) {
jsTest.log("Testing " + tojson(wc));
diff --git a/jstests/replsets/auth1.js b/jstests/replsets/auth1.js
index d41ef9ba5ef..13c5f1da260 100644
--- a/jstests/replsets/auth1.js
+++ b/jstests/replsets/auth1.js
@@ -32,9 +32,8 @@ m = runMongoProgram(
"mongod", "--keyFile", key1_644, "--port", port[0], "--dbpath", MongoRunner.dataPath + name);
print("should fail with wrong permissions");
-assert.eq(m,
- _isWindows() ? 100 : 1,
- "mongod should exit w/ 1 (EXIT_FAILURE): permissions too open");
+assert.eq(
+ m, _isWindows() ? 100 : 1, "mongod should exit w/ 1 (EXIT_FAILURE): permissions too open");
MongoRunner.stopMongod(port[0]);
print("add a user to server0: foo");
@@ -74,9 +73,14 @@ assert.eq(r.x, 1);
slave.setSlaveOk();
function doQueryOn(p) {
- var error = assert.throws(function() {
- r = p.getDB("test").foo.findOne();
- }, [], "find did not throw, returned: " + tojson(r)).toString();
+ var error = assert
+ .throws(
+ function() {
+ r = p.getDB("test").foo.findOne();
+ },
+ [],
+ "find did not throw, returned: " + tojson(r))
+ .toString();
printjson(error);
assert.gt(error.indexOf("not authorized"), -1, "error was non-auth");
}
diff --git a/jstests/replsets/auth2.js b/jstests/replsets/auth2.js
index f7b8d8ab468..2300b03bf9c 100644
--- a/jstests/replsets/auth2.js
+++ b/jstests/replsets/auth2.js
@@ -44,8 +44,8 @@ rs.initiate({
var master = rs.getPrimary();
print("add an admin user");
-master.getDB("admin")
- .createUser({user: "foo", pwd: "bar", roles: jsTest.adminUserRoles}, {w: 3, wtimeout: 30000});
+master.getDB("admin").createUser({user: "foo", pwd: "bar", roles: jsTest.adminUserRoles},
+ {w: 3, wtimeout: 30000});
var m = rs.nodes[0];
print("starting 1 and 2 with key file");
diff --git a/jstests/replsets/auth3.js b/jstests/replsets/auth3.js
index 3ac812bcfa1..bebee24be1b 100644
--- a/jstests/replsets/auth3.js
+++ b/jstests/replsets/auth3.js
@@ -36,11 +36,9 @@
jsTest.log("make common point");
safeInsert();
- authutil.asCluster(rs.nodes,
- keyfile,
- function() {
- rs.awaitReplication();
- });
+ authutil.asCluster(rs.nodes, keyfile, function() {
+ rs.awaitReplication();
+ });
jsTest.log("write stuff to 0&2");
rs.stop(1);
@@ -63,10 +61,8 @@
jsTest.log("doing rollback!");
- authutil.asCluster(rs.nodes,
- keyfile,
- function() {
- rs.awaitSecondaryNodes();
- });
+ authutil.asCluster(rs.nodes, keyfile, function() {
+ rs.awaitSecondaryNodes();
+ });
}());
diff --git a/jstests/replsets/batch_write_command_wc.js b/jstests/replsets/batch_write_command_wc.js
index d6f83c08e3a..b86fd8c80a8 100644
--- a/jstests/replsets/batch_write_command_wc.js
+++ b/jstests/replsets/batch_write_command_wc.js
@@ -22,7 +22,10 @@ var coll = mongod.getCollection("test.batch_write_command_wc");
//
// Basic insert, default WC
coll.remove({});
-printjson(request = {insert: coll.getName(), documents: [{a: 1}]});
+printjson(request = {
+ insert: coll.getName(),
+ documents: [{a: 1}]
+});
printjson(result = coll.runCommand(request));
assert(result.ok);
assert.eq(1, result.n);
@@ -31,7 +34,11 @@ assert.eq(1, coll.count());
//
// Basic insert, majority WC
coll.remove({});
-printjson(request = {insert: coll.getName(), documents: [{a: 1}], writeConcern: {w: 'majority'}});
+printjson(request = {
+ insert: coll.getName(),
+ documents: [{a: 1}],
+ writeConcern: {w: 'majority'}
+});
printjson(result = coll.runCommand(request));
assert(result.ok);
assert.eq(1, result.n);
@@ -40,7 +47,11 @@ assert.eq(1, coll.count());
//
// Basic insert, w:2 WC
coll.remove({});
-printjson(request = {insert: coll.getName(), documents: [{a: 1}], writeConcern: {w: 2}});
+printjson(request = {
+ insert: coll.getName(),
+ documents: [{a: 1}],
+ writeConcern: {w: 2}
+});
printjson(result = coll.runCommand(request));
assert(result.ok);
assert.eq(1, result.n);
@@ -49,7 +60,11 @@ assert.eq(1, coll.count());
//
// Basic insert, immediate nojournal error
coll.remove({});
-printjson(request = {insert: coll.getName(), documents: [{a: 1}], writeConcern: {j: true}});
+printjson(request = {
+ insert: coll.getName(),
+ documents: [{a: 1}],
+ writeConcern: {j: true}
+});
printjson(result = coll.runCommand(request));
assert(!result.ok);
assert.eq(0, coll.count());
@@ -57,8 +72,11 @@ assert.eq(0, coll.count());
//
// Basic insert, timeout wc error
coll.remove({});
-printjson(
- request = {insert: coll.getName(), documents: [{a: 1}], writeConcern: {w: 3, wtimeout: 1}});
+printjson(request = {
+ insert: coll.getName(),
+ documents: [{a: 1}],
+ writeConcern: {w: 3, wtimeout: 1}
+});
printjson(result = coll.runCommand(request));
assert(result.ok);
assert.eq(1, result.n);
@@ -69,7 +87,11 @@ assert.eq(1, coll.count());
//
// Basic insert, wmode wc error
coll.remove({});
-printjson(request = {insert: coll.getName(), documents: [{a: 1}], writeConcern: {w: 'invalid'}});
+printjson(request = {
+ insert: coll.getName(),
+ documents: [{a: 1}],
+ writeConcern: {w: 'invalid'}
+});
printjson(result = coll.runCommand(request));
assert(result.ok);
assert.eq(1, result.n);
diff --git a/jstests/replsets/capped_id.js b/jstests/replsets/capped_id.js
index 8708f5752f8..ee05e810d59 100644
--- a/jstests/replsets/capped_id.js
+++ b/jstests/replsets/capped_id.js
@@ -40,9 +40,12 @@ var slave1db = slave1.getDB(dbname);
var slave2db = slave2.getDB(dbname);
function countIdIndexes(theDB, coll) {
- return theDB[coll].getIndexes().filter(function(idx) {
- return friendlyEqual(idx.key, {_id: 1});
- }).length;
+ return theDB[coll]
+ .getIndexes()
+ .filter(function(idx) {
+ return friendlyEqual(idx.key, {_id: 1});
+ })
+ .length;
}
var numtests = 4;
diff --git a/jstests/replsets/chaining_removal.js b/jstests/replsets/chaining_removal.js
index dbc80148745..929e452844f 100644
--- a/jstests/replsets/chaining_removal.js
+++ b/jstests/replsets/chaining_removal.js
@@ -49,9 +49,7 @@
// write that should reach all nodes
var timeout = 60 * 1000;
- var options = {
- writeConcern: {w: numNodes, wtimeout: timeout}
- };
+ var options = {writeConcern: {w: numNodes, wtimeout: timeout}};
assert.writeOK(primary.getDB(name).foo.insert({x: 1}, options));
var config = primary.getDB("local").system.replset.findOne();
diff --git a/jstests/replsets/commands_that_write_accept_wc.js b/jstests/replsets/commands_that_write_accept_wc.js
index 9f35d5c6549..83a341767c2 100644
--- a/jstests/replsets/commands_that_write_accept_wc.js
+++ b/jstests/replsets/commands_that_write_accept_wc.js
@@ -99,9 +99,7 @@ load('jstests/libs/write_concern_util.js');
});
},
reduce: function(key, values) {
- return {
- count: values.length
- };
+ return {count: values.length};
},
out: "foo"
},
@@ -120,10 +118,7 @@ load('jstests/libs/write_concern_util.js');
});
function testValidWriteConcern(cmd) {
- cmd.req.writeConcern = {
- w: 'majority',
- wtimeout: 25000
- };
+ cmd.req.writeConcern = {w: 'majority', wtimeout: 25000};
jsTest.log("Testing " + tojson(cmd.req));
dropTestCollection();
@@ -136,9 +131,7 @@ load('jstests/libs/write_concern_util.js');
}
function testInvalidWriteConcern(cmd) {
- cmd.req.writeConcern = {
- w: 'invalid'
- };
+ cmd.req.writeConcern = {w: 'invalid'};
jsTest.log("Testing " + tojson(cmd.req));
dropTestCollection();
diff --git a/jstests/replsets/config_server_checks.js b/jstests/replsets/config_server_checks.js
index 041b3a29699..f5dfab883cf 100644
--- a/jstests/replsets/config_server_checks.js
+++ b/jstests/replsets/config_server_checks.js
@@ -89,11 +89,8 @@ function expectState(rst, state) {
// fail to
// start up and won't automatically add "configsvr" to the replset config (SERVER-21236).
jsTestLog("set initiated without configsvr, restarted adding --configsvr cmd line");
- var rst = new ReplSetTest({
- name: "configrs7",
- nodes: 1,
- nodeOptions: {journal: "", storageEngine: "wiredTiger"}
- });
+ var rst = new ReplSetTest(
+ {name: "configrs7", nodes: 1, nodeOptions: {journal: "", storageEngine: "wiredTiger"}});
rst.startSet();
var conf = rst.getReplSetConfig();
diff --git a/jstests/replsets/disallow_adding_initialized_node1.js b/jstests/replsets/disallow_adding_initialized_node1.js
index 8d4491975b6..f2a6b3053fb 100644
--- a/jstests/replsets/disallow_adding_initialized_node1.js
+++ b/jstests/replsets/disallow_adding_initialized_node1.js
@@ -7,11 +7,21 @@
'use strict';
var name = 'disallow_adding_initialized_node1';
- var replSetA = new ReplSetTest({name: name, nodes: [{rsConfig: {_id: 10}}, ]});
+ var replSetA = new ReplSetTest({
+ name: name,
+ nodes: [
+ {rsConfig: {_id: 10}},
+ ]
+ });
replSetA.startSet({dbpath: "$set-A-$node"});
replSetA.initiate();
- var replSetB = new ReplSetTest({name: name, nodes: [{rsConfig: {_id: 20}}, ]});
+ var replSetB = new ReplSetTest({
+ name: name,
+ nodes: [
+ {rsConfig: {_id: 20}},
+ ]
+ });
replSetB.startSet({dbpath: "$set-B-$node"});
replSetB.initiate();
diff --git a/jstests/replsets/disallow_adding_initialized_node2.js b/jstests/replsets/disallow_adding_initialized_node2.js
index c4125f7c069..5778a3c3e8b 100644
--- a/jstests/replsets/disallow_adding_initialized_node2.js
+++ b/jstests/replsets/disallow_adding_initialized_node2.js
@@ -12,12 +12,22 @@
'use strict';
var name = 'disallow_adding_initialized_node2';
- var replSetA = new ReplSetTest(
- {name: name, nodes: [{rsConfig: {_id: 10}}, {rsConfig: {_id: 11, arbiterOnly: true}}, ]});
+ var replSetA = new ReplSetTest({
+ name: name,
+ nodes: [
+ {rsConfig: {_id: 10}},
+ {rsConfig: {_id: 11, arbiterOnly: true}},
+ ]
+ });
replSetA.startSet({dbpath: "$set-A-$node"});
replSetA.initiate();
- var replSetB = new ReplSetTest({name: name, nodes: [{rsConfig: {_id: 20}}, ]});
+ var replSetB = new ReplSetTest({
+ name: name,
+ nodes: [
+ {rsConfig: {_id: 20}},
+ ]
+ });
replSetB.startSet({dbpath: "$set-B-$node"});
replSetB.initiate();
diff --git a/jstests/replsets/initial_sync1.js b/jstests/replsets/initial_sync1.js
index 3977445743e..ceb614b68a6 100644
--- a/jstests/replsets/initial_sync1.js
+++ b/jstests/replsets/initial_sync1.js
@@ -77,9 +77,8 @@ wait(function() {
return config2.version == config.version && (config3 && config3.version == config.version);
});
-replTest.waitForState(slave2,
- [ReplSetTest.State.SECONDARY, ReplSetTest.State.RECOVERING],
- 60 * 1000);
+replTest.waitForState(
+ slave2, [ReplSetTest.State.SECONDARY, ReplSetTest.State.RECOVERING], 60 * 1000);
print("7. Kill the secondary in the middle of syncing");
replTest.stop(slave1);
diff --git a/jstests/replsets/initial_sync_update_missing_doc2.js b/jstests/replsets/initial_sync_update_missing_doc2.js
index 559fda41534..f8333baa41a 100644
--- a/jstests/replsets/initial_sync_update_missing_doc2.js
+++ b/jstests/replsets/initial_sync_update_missing_doc2.js
@@ -60,10 +60,7 @@
checkLog(secondary,
'initial sync - initialSyncHangBeforeGettingMissingDocument fail point enabled');
- var doc = {
- _id: 0,
- x: 3
- };
+ var doc = {_id: 0, x: 3};
// Re-insert deleted document.
assert.writeOK(coll.insert(doc, {writeConcern: {w: 1}}));
diff --git a/jstests/replsets/localhostAuthBypass.js b/jstests/replsets/localhostAuthBypass.js
index 2b2332d258a..22a512f19d0 100644
--- a/jstests/replsets/localhostAuthBypass.js
+++ b/jstests/replsets/localhostAuthBypass.js
@@ -68,17 +68,13 @@ var assertCannotRunCommands = function(mongo, isPrimary) {
{param: "userCacheInvalidationIntervalSecs", val: 300}
];
params.forEach(function(p) {
- var cmd = {
- setParameter: 1
- };
+ var cmd = {setParameter: 1};
cmd[p.param] = p.val;
assert.commandFailedWithCode(
mongo.getDB("admin").runCommand(cmd), authorizeErrorCode, "setParameter: " + p.param);
});
params.forEach(function(p) {
- var cmd = {
- getParameter: 1
- };
+ var cmd = {getParameter: 1};
cmd[p.param] = 1;
assert.commandFailedWithCode(
mongo.getDB("admin").runCommand(cmd), authorizeErrorCode, "getParameter: " + p.param);
@@ -221,8 +217,8 @@ var runNonlocalTest = function(ipAddr) {
});
assert.throws(function() {
- mongo.getDB("admin")
- .createUser({user: username, pwd: password, roles: jsTest.adminUserRoles});
+ mongo.getDB("admin").createUser(
+ {user: username, pwd: password, roles: jsTest.adminUserRoles});
});
shutdown(rs);
diff --git a/jstests/replsets/oplog_format.js b/jstests/replsets/oplog_format.js
index e597197fed0..a2c37d294a5 100644
--- a/jstests/replsets/oplog_format.js
+++ b/jstests/replsets/oplog_format.js
@@ -169,8 +169,8 @@ assertLastOplog({$set: {"a.b": [{c: 1}, {c: 2}]}}, {_id: 1}, msg);
var msg = "bad array $push $slice $sort reversed first two";
coll.save({_id: 1, a: {b: [{c: 1}, {c: 2}]}});
-res = assert.writeOK(coll.update(
- {_id: {$gt: 0}}, {$push: {"a.b": {$each: [{c: -1}], $slice: -2, $sort: {c: -1}}}}));
+res = assert.writeOK(
+ coll.update({_id: {$gt: 0}}, {$push: {"a.b": {$each: [{c: -1}], $slice: -2, $sort: {c: -1}}}}));
assert.eq(res.nModified, 1, "update failed for '" + msg + "': " + res.toString());
assert.docEq({_id: 1, a: {b: [{c: 1}, {c: -1}]}}, coll.findOne({}), msg);
assertLastOplog({$set: {"a.b": [{c: 1}, {c: -1}]}}, {_id: 1}, msg);
diff --git a/jstests/replsets/optime.js b/jstests/replsets/optime.js
index a716ca3dbca..5403ab35bcd 100644
--- a/jstests/replsets/optime.js
+++ b/jstests/replsets/optime.js
@@ -42,9 +42,7 @@ var initialInfo = master.getDB('admin').serverStatus({oplog: true}).oplog;
// Do an insert to increment optime, but without rolling the oplog
// latestOptime should be updated, but earliestOptime should be unchanged
-var options = {
- writeConcern: {w: replTest.nodes.length}
-};
+var options = {writeConcern: {w: replTest.nodes.length}};
assert.writeOK(master.getDB('test').foo.insert({a: 1}, options));
assert(optimesAreEqual(replTest));
diff --git a/jstests/replsets/pipelineout.js b/jstests/replsets/pipelineout.js
index bb86f98c4e9..91e07d9a457 100644
--- a/jstests/replsets/pipelineout.js
+++ b/jstests/replsets/pipelineout.js
@@ -12,21 +12,21 @@ var secondary = replTest.liveNodes.slaves[0].getDB(name);
// populate the collection
for (i = 0; i < 5; i++) {
- primary.in.insert({x: i});
+ primary.in .insert({x: i});
}
replTest.awaitReplication();
// make sure $out cannot be run on a secondary
assert.throws(function() {
- secondary.in.aggregate({$out: "out"}).itcount;
+ secondary.in .aggregate({$out: "out"}).itcount;
});
// even if slaveOk
secondary.setSlaveOk();
assert.throws(function() {
- secondary.in.aggregate({$out: "out"}).itcount;
+ secondary.in .aggregate({$out: "out"}).itcount;
});
// run one and check for proper replication
-primary.in.aggregate({$out: "out"}).itcount;
+primary.in .aggregate({$out: "out"}).itcount;
replTest.awaitReplication();
assert.eq(primary.out.find().sort({x: 1}).toArray(), secondary.out.find().sort({x: 1}).toArray());
diff --git a/jstests/replsets/priority_takeover_one_node_higher_priority.js b/jstests/replsets/priority_takeover_one_node_higher_priority.js
index e718ef131f9..8c909c643c3 100644
--- a/jstests/replsets/priority_takeover_one_node_higher_priority.js
+++ b/jstests/replsets/priority_takeover_one_node_higher_priority.js
@@ -8,8 +8,14 @@
load('jstests/replsets/rslib.js');
var name = 'priority_takeover_one_node_higher_priority';
- var replSet = new ReplSetTest(
- {name: name, nodes: [{rsConfig: {priority: 3}}, {}, {rsConfig: {arbiterOnly: true}}, ]});
+ var replSet = new ReplSetTest({
+ name: name,
+ nodes: [
+ {rsConfig: {priority: 3}},
+ {},
+ {rsConfig: {arbiterOnly: true}},
+ ]
+ });
replSet.startSet();
replSet.initiate();
diff --git a/jstests/replsets/priority_takeover_two_nodes_equal_priority.js b/jstests/replsets/priority_takeover_two_nodes_equal_priority.js
index 42134f9430e..b6e8cc25b77 100644
--- a/jstests/replsets/priority_takeover_two_nodes_equal_priority.js
+++ b/jstests/replsets/priority_takeover_two_nodes_equal_priority.js
@@ -8,8 +8,14 @@ load('jstests/replsets/rslib.js');
'use strict';
var name = 'priority_takeover_two_nodes_equal_priority';
- var replSet = new ReplSetTest(
- {name: name, nodes: [{rsConfig: {priority: 3}}, {rsConfig: {priority: 3}}, {}, ]});
+ var replSet = new ReplSetTest({
+ name: name,
+ nodes: [
+ {rsConfig: {priority: 3}},
+ {rsConfig: {priority: 3}},
+ {},
+ ]
+ });
replSet.startSet();
replSet.initiate();
diff --git a/jstests/replsets/read_committed.js b/jstests/replsets/read_committed.js
index be051ffbee8..bed22c08248 100644
--- a/jstests/replsets/read_committed.js
+++ b/jstests/replsets/read_committed.js
@@ -11,9 +11,7 @@ load("jstests/replsets/rslib.js"); // For startSetIfSupportsReadMajority.
(function() {
"use strict";
- const majorityWriteConcern = {
- writeConcern: {w: "majority", wtimeout: 60 * 1000}
- };
+ const majorityWriteConcern = {writeConcern: {w: "majority", wtimeout: 60 * 1000}};
// Each test case includes a 'prepareCollection' method that sets up the initial state starting
// with an empty collection, a 'write' method that does some write, and two arrays,
@@ -105,13 +103,12 @@ load("jstests/replsets/rslib.js"); // For startSetIfSupportsReadMajority.
function readLatestOplogEntry(readConcernLevel) {
var oplog = primary.getDB('local').oplog.rs;
- var res = oplog.runCommand('find',
- {
- "readConcern": {"level": readConcernLevel},
- "maxTimeMS": 3000,
- sort: {$natural: -1},
- limit: 1,
- });
+ var res = oplog.runCommand('find', {
+ "readConcern": {"level": readConcernLevel},
+ "maxTimeMS": 3000,
+ sort: {$natural: -1},
+ limit: 1,
+ });
assert.commandWorked(res);
return new DBCommandCursor(coll.getMongo(), res).toArray()[0];
}
diff --git a/jstests/replsets/read_committed_with_catalog_changes.js b/jstests/replsets/read_committed_with_catalog_changes.js
index 14eec54cab4..03b9808f9a0 100644
--- a/jstests/replsets/read_committed_with_catalog_changes.js
+++ b/jstests/replsets/read_committed_with_catalog_changes.js
@@ -215,8 +215,8 @@ load("jstests/replsets/rslib.js"); // For startSetIfSupportsReadMajority.
}
function assertReadsSucceed(coll, timeoutMs = 10000) {
- var res = coll.runCommand('find',
- {"readConcern": {"level": "majority"}, "maxTimeMS": timeoutMs});
+ var res =
+ coll.runCommand('find', {"readConcern": {"level": "majority"}, "maxTimeMS": timeoutMs});
assert.commandWorked(res, 'reading from ' + coll.getFullName());
// Exhaust the cursor to avoid leaking cursors on the server.
new DBCommandCursor(coll.getMongo(), res).itcount();
diff --git a/jstests/replsets/reconfig.js b/jstests/replsets/reconfig.js
index 1351aa1d54b..312a3cb6243 100644
--- a/jstests/replsets/reconfig.js
+++ b/jstests/replsets/reconfig.js
@@ -23,11 +23,7 @@
jsTestLog("Invalid reconfig");
config.version++;
- var badMember = {
- _id: numNodes,
- host: "localhost:12345",
- priority: "High"
- };
+ var badMember = {_id: numNodes, host: "localhost:12345", priority: "High"};
config.members.push(badMember);
var invalidConfigCode = 93;
assert.commandFailedWithCode(primary.adminCommand({replSetReconfig: config}),
diff --git a/jstests/replsets/reconfig_tags.js b/jstests/replsets/reconfig_tags.js
index 3c4d0e2616d..86a2419f4e9 100644
--- a/jstests/replsets/reconfig_tags.js
+++ b/jstests/replsets/reconfig_tags.js
@@ -12,15 +12,9 @@ var rsConfig = primary.getDB("local").system.replset.findOne();
jsTest.log('got rsconf ' + tojson(rsConfig));
rsConfig.members.forEach(function(member) {
if (member.host == primary.host) {
- member.tags = {
- dc: 'ny',
- tag: 'one'
- };
+ member.tags = {dc: 'ny', tag: 'one'};
} else {
- member.tags = {
- dc: 'ny',
- tag: 'two'
- };
+ member.tags = {dc: 'ny', tag: 'two'};
}
});
diff --git a/jstests/replsets/remove1.js b/jstests/replsets/remove1.js
index cde4974677a..f587275570a 100644
--- a/jstests/replsets/remove1.js
+++ b/jstests/replsets/remove1.js
@@ -55,9 +55,8 @@ assert.soon(function() {
});
// Now we should successfully reconnect to the secondary.
-assert.eq(secondary.getDB("admin").runCommand({ping: 1}).ok,
- 1,
- "we aren't connected to the secondary");
+assert.eq(
+ secondary.getDB("admin").runCommand({ping: 1}).ok, 1, "we aren't connected to the secondary");
reconnect(master);
diff --git a/jstests/replsets/repl_options.js b/jstests/replsets/repl_options.js
index 66a07787889..a0af2fa2573 100644
--- a/jstests/replsets/repl_options.js
+++ b/jstests/replsets/repl_options.js
@@ -3,9 +3,7 @@ var baseName = "jstests_repl_repl_options";
load('jstests/libs/command_line/test_parsed_options.js');
jsTest.log("Testing \"replSet\" command line option");
-var expectedResult = {
- "parsed": {"replication": {"replSet": "mycmdlinename"}}
-};
+var expectedResult = {"parsed": {"replication": {"replSet": "mycmdlinename"}}};
testGetCmdLineOptsMongod({replSet: "mycmdlinename"}, expectedResult);
jsTest.log("Testing \"replication.replSetName\" config file option");
diff --git a/jstests/replsets/replset2.js b/jstests/replsets/replset2.js
index b50a939242d..d578fe0d4b4 100644
--- a/jstests/replsets/replset2.js
+++ b/jstests/replsets/replset2.js
@@ -66,9 +66,7 @@ doTest = function(signal) {
// Test write concern with a simple insert
print("replset2.js **** Try inserting a single record ****");
master.getDB(testDB).dropDatabase();
- var options = {
- writeConcern: {w: 3, wtimeout: 10000}
- };
+ var options = {writeConcern: {w: 3, wtimeout: 10000}};
assert.writeOK(master.getDB(testDB).foo.insert({n: 1}, options));
m1 = master.getDB(testDB).foo.findOne({n: 1});
diff --git a/jstests/replsets/replset5.js b/jstests/replsets/replset5.js
index c0aee6e1154..65f2b66212e 100644
--- a/jstests/replsets/replset5.js
+++ b/jstests/replsets/replset5.js
@@ -11,10 +11,7 @@ load("jstests/replsets/rslib.js");
// Initiate set with default for write concern
var config = replTest.getReplSetConfig();
config.settings = {};
- config.settings.getLastErrorDefaults = {
- 'w': 3,
- 'wtimeout': 20000
- };
+ config.settings.getLastErrorDefaults = {'w': 3, 'wtimeout': 20000};
config.settings.heartbeatTimeoutSecs = 15;
// Prevent node 2 from becoming primary, as we will attempt to set it to hidden later.
config.members[2].priority = 0;
diff --git a/jstests/replsets/replset7.js b/jstests/replsets/replset7.js
index 8b13f2ed7e2..94dff59b33f 100644
--- a/jstests/replsets/replset7.js
+++ b/jstests/replsets/replset7.js
@@ -45,7 +45,10 @@ rt.awaitSecondaryNodes();
// Do we have an index?
assert.eq(1,
- slave.getDB('d')['c'].getIndexes().filter(function(doc) {
- return (doc.v === 1 && JSON.stringify(doc.key) === JSON.stringify({x: 1}) &&
- doc.ns === 'd.c' && doc.name === 'x_1');
- }).length);
+ slave.getDB('d')['c']
+ .getIndexes()
+ .filter(function(doc) {
+ return (doc.v === 1 && JSON.stringify(doc.key) === JSON.stringify({x: 1}) &&
+ doc.ns === 'd.c' && doc.name === 'x_1');
+ })
+ .length);
diff --git a/jstests/replsets/rollback5.js b/jstests/replsets/rollback5.js
index e63b7ab34ea..42649dc5162 100644
--- a/jstests/replsets/rollback5.js
+++ b/jstests/replsets/rollback5.js
@@ -44,10 +44,7 @@ assert.soon(function() {
return res.myState == 7;
}, "Arbiter failed to initialize.");
-var options = {
- writeConcern: {w: 2, wtimeout: 60000},
- upsert: true
-};
+var options = {writeConcern: {w: 2, wtimeout: 60000}, upsert: true};
assert.writeOK(A.foo.update({key: 'value1'}, {$set: {req: 'req'}}, options));
replTest.stop(AID);
diff --git a/jstests/replsets/rollback_auth.js b/jstests/replsets/rollback_auth.js
index 0c0b35b91ed..85f4a0c4991 100644
--- a/jstests/replsets/rollback_auth.js
+++ b/jstests/replsets/rollback_auth.js
@@ -125,15 +125,14 @@
// Modify the the user and role in a way that will be rolled back.
b.grantPrivilegesToRole(
'myRole',
- [{resource: {db: 'test', collection: 'foo'}, actions: ['collStats']}],
- {}); // Default write concern will wait for majority, which will time out.
- b.createRole(
- {
- role: 'temporaryRole',
- roles: [],
- privileges: [{resource: {db: 'test', collection: 'bar'}, actions: ['collStats']}]
- },
+ [{resource: {db: 'test', collection: 'foo'}, actions: ['collStats']}],
{}); // Default write concern will wait for majority, which will time out.
+ b.createRole({
+ role: 'temporaryRole',
+ roles: [],
+ privileges: [{resource: {db: 'test', collection: 'bar'}, actions: ['collStats']}]
+ },
+ {}); // Default write concern will wait for majority, which will time out.
b.grantRolesToUser('spencer',
['temporaryRole'],
{}); // Default write concern will wait for majority, which will time out.
@@ -172,13 +171,12 @@
a.grantPrivilegesToRole(
'myRole', [{resource: {db: 'test', collection: 'baz'}, actions: ['collStats']}], {});
- a.createRole(
- {
- role: 'persistentRole',
- roles: [],
- privileges: [{resource: {db: 'test', collection: 'foobar'}, actions: ['collStats']}]
- },
- {});
+ a.createRole({
+ role: 'persistentRole',
+ roles: [],
+ privileges: [{resource: {db: 'test', collection: 'foobar'}, actions: ['collStats']}]
+ },
+ {});
a.grantRolesToUser('spencer', ['persistentRole'], {});
A.logout();
a.auth('spencer', 'pwd');
@@ -191,11 +189,9 @@
// bring B back in contact with A
// as A is primary, B will roll back and then catch up
replTest.restart(1);
- authutil.asCluster(replTest.nodes,
- 'jstests/libs/key1',
- function() {
- replTest.awaitReplication();
- });
+ authutil.asCluster(replTest.nodes, 'jstests/libs/key1', function() {
+ replTest.awaitReplication();
+ });
assert.soon(function() {
return b.auth('spencer', 'pwd');
});
diff --git a/jstests/replsets/rollback_cmd_unrollbackable.js b/jstests/replsets/rollback_cmd_unrollbackable.js
index 801d4c285a7..cf176ab312d 100644
--- a/jstests/replsets/rollback_cmd_unrollbackable.js
+++ b/jstests/replsets/rollback_cmd_unrollbackable.js
@@ -30,10 +30,7 @@ replTest.waitForState(replTest.nodes[0], ReplSetTest.State.PRIMARY, 60 * 1000);
var master = replTest.getPrimary();
assert(master === conns[0], "conns[0] assumed to be master");
assert(a_conn.host === master.host, "a_conn assumed to be master");
-var options = {
- writeConcern: {w: 2, wtimeout: 60000},
- upsert: true
-};
+var options = {writeConcern: {w: 2, wtimeout: 60000}, upsert: true};
assert.writeOK(a_conn.getDB(name).foo.insert({x: 1}, options));
// shut down master
diff --git a/jstests/replsets/rollback_collMod_PowerOf2Sizes.js b/jstests/replsets/rollback_collMod_PowerOf2Sizes.js
index deab19b2f09..24177ebe6b7 100644
--- a/jstests/replsets/rollback_collMod_PowerOf2Sizes.js
+++ b/jstests/replsets/rollback_collMod_PowerOf2Sizes.js
@@ -43,10 +43,7 @@
};
assert.commandWorked(a_conn.getDB(name).createCollection('foo', originalCollectionOptions));
- var options = {
- writeConcern: {w: 2, wtimeout: 60000},
- upsert: true
- };
+ var options = {writeConcern: {w: 2, wtimeout: 60000}, upsert: true};
assert.writeOK(a_conn.getDB(name).foo.insert({x: 1}, options));
assert.eq(getOptions(a_conn), originalCollectionOptions);
@@ -64,9 +61,8 @@
validationLevel: "moderate",
validationAction: "warn"
}));
- assert.eq(
- getOptions(a_conn),
- {flags: 2, validator: {a: 1}, validationLevel: "moderate", validationAction: "warn"});
+ assert.eq(getOptions(a_conn),
+ {flags: 2, validator: {a: 1}, validationLevel: "moderate", validationAction: "warn"});
// Shut down A and fail over to B.
replTest.stop(AID);
@@ -76,10 +72,7 @@
b_conn = master;
// Do a write on B so that A will have to roll back.
- options = {
- writeConcern: {w: 1, wtimeout: 60000},
- upsert: true
- };
+ options = {writeConcern: {w: 1, wtimeout: 60000}, upsert: true};
assert.writeOK(b_conn.getDB(name).foo.insert({x: 2}, options));
// Restart A, which should rollback the collMod before becoming primary.
diff --git a/jstests/replsets/rollback_collMod_fatal.js b/jstests/replsets/rollback_collMod_fatal.js
index c907213f05d..0fcec31f6fc 100644
--- a/jstests/replsets/rollback_collMod_fatal.js
+++ b/jstests/replsets/rollback_collMod_fatal.js
@@ -31,10 +31,7 @@ replTest.waitForState(replTest.nodes[0], ReplSetTest.State.PRIMARY, 60 * 1000);
var master = replTest.getPrimary();
assert(master === conns[0], "conns[0] assumed to be master");
assert(a_conn.host === master.host, "a_conn assumed to be master");
-var options = {
- writeConcern: {w: 2, wtimeout: 60000},
- upsert: true
-};
+var options = {writeConcern: {w: 2, wtimeout: 60000}, upsert: true};
a_conn.getDB(name).foo.ensureIndex({x: 1}, {expireAfterSeconds: 3600});
assert.writeOK(a_conn.getDB(name).foo.insert({x: 1}, options));
diff --git a/jstests/replsets/rollback_different_h.js b/jstests/replsets/rollback_different_h.js
index 4b9aede1bbc..1d753ea46c6 100644
--- a/jstests/replsets/rollback_different_h.js
+++ b/jstests/replsets/rollback_different_h.js
@@ -42,10 +42,7 @@ replTest.waitForState(replTest.nodes[0], ReplSetTest.State.PRIMARY, 60 * 1000);
var master = replTest.getPrimary();
assert(master === conns[0], "conns[0] assumed to be master");
assert(a_conn.host === master.host, "a_conn assumed to be master");
-var options = {
- writeConcern: {w: 2, wtimeout: 60000},
- upsert: true
-};
+var options = {writeConcern: {w: 2, wtimeout: 60000}, upsert: true};
assert.writeOK(a_conn.getDB(name).foo.insert({x: 1}, options));
// shut down master
diff --git a/jstests/replsets/rollback_dropdb.js b/jstests/replsets/rollback_dropdb.js
index c11b14ab06e..818d74b2ad2 100644
--- a/jstests/replsets/rollback_dropdb.js
+++ b/jstests/replsets/rollback_dropdb.js
@@ -31,10 +31,7 @@ replTest.waitForState(replTest.nodes[0], ReplSetTest.State.PRIMARY, 60 * 1000);
var master = replTest.getPrimary();
assert(master === conns[0], "conns[0] assumed to be master");
assert(a_conn.host === master.host, "a_conn assumed to be master");
-var options = {
- writeConcern: {w: 2, wtimeout: 60000},
- upsert: true
-};
+var options = {writeConcern: {w: 2, wtimeout: 60000}, upsert: true};
assert.writeOK(a_conn.getDB(name).foo.insert({x: 1}, options));
// shut down master
diff --git a/jstests/replsets/rollback_empty_ns.js b/jstests/replsets/rollback_empty_ns.js
index f6a07319eb4..44be3b0bbb4 100644
--- a/jstests/replsets/rollback_empty_ns.js
+++ b/jstests/replsets/rollback_empty_ns.js
@@ -42,10 +42,7 @@ replTest.waitForState(replTest.nodes[0], ReplSetTest.State.PRIMARY, 60 * 1000);
var master = replTest.getPrimary();
assert(master === conns[0], "conns[0] assumed to be master");
assert(a_conn.host === master.host, "a_conn assumed to be master");
-var options = {
- writeConcern: {w: 2, wtimeout: 60000},
- upsert: true
-};
+var options = {writeConcern: {w: 2, wtimeout: 60000}, upsert: true};
assert.writeOK(a_conn.getDB(name).foo.insert({x: 1}, options));
// shut down master
diff --git a/jstests/replsets/rollback_empty_o.js b/jstests/replsets/rollback_empty_o.js
index f3468fcde5e..ebce959f704 100644
--- a/jstests/replsets/rollback_empty_o.js
+++ b/jstests/replsets/rollback_empty_o.js
@@ -42,10 +42,7 @@ replTest.waitForState(replTest.nodes[0], ReplSetTest.State.PRIMARY, 60 * 1000);
var master = replTest.getPrimary();
assert(master === conns[0], "conns[0] assumed to be master");
assert(a_conn.host === master.host, "a_conn assumed to be master");
-var options = {
- writeConcern: {w: 2, wtimeout: 60000},
- upsert: true
-};
+var options = {writeConcern: {w: 2, wtimeout: 60000}, upsert: true};
assert.writeOK(a_conn.getDB(name).foo.insert({x: 1}, options));
// shut down master
diff --git a/jstests/replsets/rollback_empty_o2.js b/jstests/replsets/rollback_empty_o2.js
index 56eb8512575..bf13aac67ad 100644
--- a/jstests/replsets/rollback_empty_o2.js
+++ b/jstests/replsets/rollback_empty_o2.js
@@ -42,10 +42,7 @@ replTest.waitForState(replTest.nodes[0], ReplSetTest.State.PRIMARY, 60 * 1000);
var master = replTest.getPrimary();
assert(master === conns[0], "conns[0] assumed to be master");
assert(a_conn.host === master.host, "a_conn assumed to be master");
-var options = {
- writeConcern: {w: 2, wtimeout: 60000},
- upsert: true
-};
+var options = {writeConcern: {w: 2, wtimeout: 60000}, upsert: true};
assert.writeOK(a_conn.getDB(name).foo.insert({x: 1}, options));
// shut down master
diff --git a/jstests/replsets/rollback_fake_cmd.js b/jstests/replsets/rollback_fake_cmd.js
index 175359121f8..f6ce306ba08 100644
--- a/jstests/replsets/rollback_fake_cmd.js
+++ b/jstests/replsets/rollback_fake_cmd.js
@@ -42,10 +42,7 @@ replTest.waitForState(replTest.nodes[0], ReplSetTest.State.PRIMARY, 60 * 1000);
var master = replTest.getPrimary();
assert(master === conns[0], "conns[0] assumed to be master");
assert(a_conn.host === master.host, "a_conn assumed to be master");
-var options = {
- writeConcern: {w: 2, wtimeout: 60000},
- upsert: true
-};
+var options = {writeConcern: {w: 2, wtimeout: 60000}, upsert: true};
assert.writeOK(a_conn.getDB(name).foo.insert({x: 1}, options));
// shut down master
diff --git a/jstests/replsets/rollback_index.js b/jstests/replsets/rollback_index.js
index 6fb3044b740..ca03075ad2b 100644
--- a/jstests/replsets/rollback_index.js
+++ b/jstests/replsets/rollback_index.js
@@ -44,10 +44,7 @@ replTest.waitForState(replTest.nodes[0], ReplSetTest.State.PRIMARY, 60 * 1000);
var master = replTest.getPrimary();
assert(master === conns[0], "conns[0] assumed to be master");
assert(a_conn.host === master.host, "a_conn assumed to be master");
-var options = {
- writeConcern: {w: 2, wtimeout: 60000},
- upsert: true
-};
+var options = {writeConcern: {w: 2, wtimeout: 60000}, upsert: true};
assert.writeOK(a_conn.getDB(name).foo.insert({x: 1}, options));
// shut down master
@@ -97,9 +94,9 @@ assert.writeOK(a_conn.getDB(name).foo.insert({x: 1}, options));
assert.eq(3,
b_conn.getDB(name).foo.count(),
'Collection on B does not have the same number of documents as A');
-assert.eq(a_conn.getDB(name).foo.getIndexes().length,
- b_conn.getDB(name).foo.getIndexes().length,
- 'Unique index not dropped during rollback: ' +
- tojson(b_conn.getDB(name).foo.getIndexes()));
+assert.eq(
+ a_conn.getDB(name).foo.getIndexes().length,
+ b_conn.getDB(name).foo.getIndexes().length,
+ 'Unique index not dropped during rollback: ' + tojson(b_conn.getDB(name).foo.getIndexes()));
replTest.stopSet();
diff --git a/jstests/replsets/rollback_too_new.js b/jstests/replsets/rollback_too_new.js
index e0a88e12f31..2e8e4d3693d 100644
--- a/jstests/replsets/rollback_too_new.js
+++ b/jstests/replsets/rollback_too_new.js
@@ -28,17 +28,13 @@
// get master and do an initial write
var master = replTest.getPrimary();
- var options = {
- writeConcern: {w: 2, wtimeout: 60000}
- };
+ var options = {writeConcern: {w: 2, wtimeout: 60000}};
assert.writeOK(master.getDB(name).foo.insert({x: 1}, options));
// add an oplog entry from the distant future as the most recent entry on node C
var future_oplog_entry = conns[2].getDB("local").oplog.rs.find().sort({$natural: -1})[0];
future_oplog_entry["ts"] = new Timestamp(future_oplog_entry["ts"].getTime() + 200000, 1);
- options = {
- writeConcern: {w: 1, wtimeout: 60000}
- };
+ options = {writeConcern: {w: 1, wtimeout: 60000}};
assert.writeOK(conns[2].getDB("local").oplog.rs.insert(future_oplog_entry, options));
replTest.stop(CID);
diff --git a/jstests/replsets/server_status_metrics.js b/jstests/replsets/server_status_metrics.js
index fb7f92fe55f..05a3b4bccc0 100644
--- a/jstests/replsets/server_status_metrics.js
+++ b/jstests/replsets/server_status_metrics.js
@@ -56,11 +56,7 @@ assert.writeOK(bulk.execute({w: 2}));
testSecondaryMetrics(secondary, 1000, secondaryBaseOplogInserts);
-var options = {
- writeConcern: {w: 2},
- multi: true,
- upsert: true
-};
+var options = {writeConcern: {w: 2}, multi: true, upsert: true};
assert.writeOK(testDB.a.update({}, {$set: {d: new Date()}}, options));
testSecondaryMetrics(secondary, 2000, secondaryBaseOplogInserts);
diff --git a/jstests/replsets/stepdown3.js b/jstests/replsets/stepdown3.js
index d0da019f7a1..968877a2069 100644
--- a/jstests/replsets/stepdown3.js
+++ b/jstests/replsets/stepdown3.js
@@ -19,9 +19,7 @@
// do another write, because the first one might be longer than 10 seconds ago
// on the secondary (due to starting up), and we need to be within 10 seconds
// to step down.
- var options = {
- writeConcern: {w: 2, wtimeout: 30000}
- };
+ var options = {writeConcern: {w: 2, wtimeout: 30000}};
assert.writeOK(master.getDB("test").foo.insert({x: 2}, options));
// lock secondary, to pause replication
print("\nlock secondary");
diff --git a/jstests/replsets/stepdown_catch_up_opt.js b/jstests/replsets/stepdown_catch_up_opt.js
index 304927a7838..48bd11adde1 100644
--- a/jstests/replsets/stepdown_catch_up_opt.js
+++ b/jstests/replsets/stepdown_catch_up_opt.js
@@ -26,10 +26,10 @@
var stringNotIntCode = 14;
// Expect a failure with a string argument.
- assert.commandFailedWithCode(primary.getDB('admin').runCommand(
- {replSetStepDown: 10, secondaryCatchUpPeriodSecs: 'STR'}),
- stringNotIntCode,
- 'Expected string argument to secondaryCatchupPeriodSecs to fail.');
+ assert.commandFailedWithCode(
+ primary.getDB('admin').runCommand({replSetStepDown: 10, secondaryCatchUpPeriodSecs: 'STR'}),
+ stringNotIntCode,
+ 'Expected string argument to secondaryCatchupPeriodSecs to fail.');
// Expect a failure with a longer secondaryCatchupPeriodSecs than the stepdown period.
assert.commandFailedWithCode(
@@ -44,8 +44,8 @@
'Failed to configure rsSyncApplyStop failpoint.');
function disableFailPoint() {
- assert.commandWorked(secondary.getDB('admin')
- .runCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'off'}),
+ assert.commandWorked(secondary.getDB('admin').runCommand(
+ {configureFailPoint: 'rsSyncApplyStop', mode: 'off'}),
'Failed to disable rsSyncApplyStop failpoint.');
}
@@ -60,8 +60,7 @@
jsTestLog('Try to step down.');
var startTime = new Date();
assert.commandFailedWithCode(
- primary.getDB('admin')
- .runCommand({replSetStepDown: 10, secondaryCatchUpPeriodSecs: 1}),
+ primary.getDB('admin').runCommand({replSetStepDown: 10, secondaryCatchUpPeriodSecs: 1}),
noCaughtUpSecondariesCode,
'Expected replSetStepDown to fail, since no secondaries should be caught up.');
var endTime = new Date();
diff --git a/jstests/replsets/stepdown_killop.js b/jstests/replsets/stepdown_killop.js
index c5fc593239b..9185517e6c7 100644
--- a/jstests/replsets/stepdown_killop.js
+++ b/jstests/replsets/stepdown_killop.js
@@ -38,11 +38,11 @@
// do a write then ask the PRIMARY to stepdown
jsTestLog("Initiating stepdown");
- assert.writeOK(primary.getDB(name)
- .foo.insert({myDoc: true, x: 1}, {writeConcern: {w: 1, wtimeout: 60000}}));
+ assert.writeOK(primary.getDB(name).foo.insert({myDoc: true, x: 1},
+ {writeConcern: {w: 1, wtimeout: 60000}}));
var stepDownCmd = function() {
- var res = db.getSiblingDB('admin')
- .runCommand({replSetStepDown: 60, secondaryCatchUpPeriodSecs: 60});
+ var res = db.getSiblingDB('admin').runCommand(
+ {replSetStepDown: 60, secondaryCatchUpPeriodSecs: 60});
assert.commandFailedWithCode(res, 11601 /*interrupted*/);
};
var stepDowner = startParallelShell(stepDownCmd, primary.port);
diff --git a/jstests/replsets/stepdown_long_wait_time.js b/jstests/replsets/stepdown_long_wait_time.js
index 60e0fdb4247..8ef398224f2 100644
--- a/jstests/replsets/stepdown_long_wait_time.js
+++ b/jstests/replsets/stepdown_long_wait_time.js
@@ -32,9 +32,7 @@
'Failed to configure rsSyncApplyStop failpoint.');
jsTestLog("do a write then ask the PRIMARY to stepdown");
- var options = {
- writeConcern: {w: 1, wtimeout: 60000}
- };
+ var options = {writeConcern: {w: 1, wtimeout: 60000}};
assert.writeOK(primary.getDB(name).foo.insert({x: 1}, options));
var stepDownSecs = 60;
var secondaryCatchUpPeriodSecs = 60;
diff --git a/jstests/replsets/sync2.js b/jstests/replsets/sync2.js
index 481f59a13d9..8624d913538 100644
--- a/jstests/replsets/sync2.js
+++ b/jstests/replsets/sync2.js
@@ -35,9 +35,7 @@ assert.soon(function() {
replTest.awaitReplication();
jsTestLog("Checking that ops still replicate correctly");
-var option = {
- writeConcern: {w: 5, wtimeout: 30000}
-};
+var option = {writeConcern: {w: 5, wtimeout: 30000}};
// In PV0, this write can fail as a result of a bad spanning tree. If 2 was syncing from 4 prior to
// bridging, it will not change sync sources and receive the write in time. This was not a problem
// in 3.0 because the old version of mongobridge caused all the nodes to restart during
diff --git a/jstests/replsets/tags.js b/jstests/replsets/tags.js
index 55a0c4e2927..a079c4c9bb9 100644
--- a/jstests/replsets/tags.js
+++ b/jstests/replsets/tags.js
@@ -98,9 +98,7 @@
jsTestLog('Node ' + nodeId + ' (' + replTest.nodes[nodeId].host + ') should be primary.');
replTest.waitForState(replTest.nodes[nodeId], ReplSetTest.State.PRIMARY, 60 * 1000);
primary = replTest.getPrimary();
- var writeConcern = {
- writeConcern: {w: expectedWritableNodes, wtimeout: 30 * 1000}
- };
+ var writeConcern = {writeConcern: {w: expectedWritableNodes, wtimeout: 30 * 1000}};
assert.writeOK(primary.getDB('foo').bar.insert({x: 100}, writeConcern));
return primary;
};
@@ -125,9 +123,7 @@
jsTestLog('partitions: nodes with each set of brackets [N1, N2, N3] form a complete network.');
jsTestLog('partitions: [0-1-2] [3] [4] (only nodes 0 and 1 can replicate from primary node 2');
- var doc = {
- x: 1
- };
+ var doc = {x: 1};
// This timeout should be shorter in duration than the server parameter maxSyncSourceLagSecs.
// Some writes are expected to block for this 'timeout' duration before failing.
@@ -140,9 +136,7 @@
primary = ensurePrimary(2, 3);
jsTestLog('Non-existent write concern should be rejected.');
- options = {
- writeConcern: {w: 'blahblah', wtimeout: timeout}
- };
+ options = {writeConcern: {w: 'blahblah', wtimeout: timeout}};
assert.writeOK(primary.getDB('foo').bar.insert(doc));
var result = assert.writeError(primary.getDB('foo').bar.insert(doc, options));
assert.neq(null, result.getWriteConcernError());
@@ -151,9 +145,7 @@
tojson(result.getWriteConcernError()));
jsTestLog('Write concern "3 or 4" should fail - 3 and 4 are not connected to the primary.');
- var options = {
- writeConcern: {w: '3 or 4', wtimeout: timeout}
- };
+ var options = {writeConcern: {w: '3 or 4', wtimeout: timeout}};
assert.writeOK(primary.getDB('foo').bar.insert(doc));
result = primary.getDB('foo').bar.insert(doc, options);
assert.neq(null, result.getWriteConcernError());
@@ -166,16 +158,12 @@
jsTestLog('Write concern "3 or 4" should work - 4 is now connected to the primary ' +
primary.host + ' via node 1 ' + replTest.nodes[1].host);
- options = {
- writeConcern: {w: '3 or 4', wtimeout: timeout}
- };
+ options = {writeConcern: {w: '3 or 4', wtimeout: timeout}};
assert.writeOK(primary.getDB('foo').bar.insert(doc));
assert.writeOK(primary.getDB('foo').bar.insert(doc, options));
jsTestLog('Write concern "3 and 4" should fail - 3 is not connected to the primary.');
- options = {
- writeConcern: {w: '3 and 4', wtimeout: timeout}
- };
+ options = {writeConcern: {w: '3 and 4', wtimeout: timeout}};
assert.writeOK(primary.getDB('foo').bar.insert(doc));
result = assert.writeError(primary.getDB('foo').bar.insert(doc, options));
assert.neq(null, result.getWriteConcernError());
@@ -190,31 +178,23 @@
jsTestLog('31003 should sync from 31004 (31024)');
jsTestLog('Write concern "3 and 4" should work - ' +
'nodes 3 and 4 are connected to primary via node 1.');
- options = {
- writeConcern: {w: '3 and 4', wtimeout: timeout}
- };
+ options = {writeConcern: {w: '3 and 4', wtimeout: timeout}};
assert.writeOK(primary.getDB('foo').bar.insert(doc));
assert.writeOK(primary.getDB('foo').bar.insert(doc, options));
jsTestLog('Write concern "2" - writes to primary only.');
- options = {
- writeConcern: {w: '2', wtimeout: 0}
- };
+ options = {writeConcern: {w: '2', wtimeout: 0}};
assert.writeOK(primary.getDB('foo').bar.insert(doc));
assert.writeOK(primary.getDB('foo').bar.insert(doc, options));
jsTestLog('Write concern "1 and 2"');
- options = {
- writeConcern: {w: '1 and 2', wtimeout: 0}
- };
+ options = {writeConcern: {w: '1 and 2', wtimeout: 0}};
assert.writeOK(primary.getDB('foo').bar.insert(doc));
assert.writeOK(primary.getDB('foo').bar.insert(doc, options));
jsTestLog('Write concern "2 dc and 3 server"');
primary = ensurePrimary(2, 5);
- options = {
- writeConcern: {w: '2 dc and 3 server', wtimeout: timeout}
- };
+ options = {writeConcern: {w: '2 dc and 3 server', wtimeout: timeout}};
assert.writeOK(primary.getDB('foo').bar.insert(doc));
assert.writeOK(primary.getDB('foo').bar.insert(doc, options));
@@ -239,17 +219,13 @@
primary = ensurePrimary(1, 4);
jsTestLog('Write concern "3 and 4" should still work with new primary node 1 ' + primary.host);
- options = {
- writeConcern: {w: '3 and 4', wtimeout: timeout}
- };
+ options = {writeConcern: {w: '3 and 4', wtimeout: timeout}};
assert.writeOK(primary.getDB('foo').bar.insert(doc));
assert.writeOK(primary.getDB('foo').bar.insert(doc, options));
jsTestLog('Write concern "2" should fail because node 2 ' + replTest.nodes[2].host +
' is down.');
- options = {
- writeConcern: {w: '2', wtimeout: timeout}
- };
+ options = {writeConcern: {w: '2', wtimeout: timeout}};
assert.writeOK(primary.getDB('foo').bar.insert(doc));
result = assert.writeError(primary.getDB('foo').bar.insert(doc, options));
assert.neq(null, result.getWriteConcernError());
diff --git a/jstests/replsets/two_initsync.js b/jstests/replsets/two_initsync.js
index 1f2b526d61e..a9d47eb3ff1 100644
--- a/jstests/replsets/two_initsync.js
+++ b/jstests/replsets/two_initsync.js
@@ -56,10 +56,8 @@ doTest = function(signal) {
var a = replTest.getPrimary().getDB("two");
for (var i = 0; i < 20000; i++)
- a.coll.insert({
- i: i,
- s: "a b"
- });
+ a.coll.insert(
+ {i: i, s: "a b"});
// Start a second node
var second = replTest.add();
diff --git a/jstests/replsets/user_management_wc.js b/jstests/replsets/user_management_wc.js
index 691a2520544..3aa186df277 100644
--- a/jstests/replsets/user_management_wc.js
+++ b/jstests/replsets/user_management_wc.js
@@ -132,10 +132,7 @@ load('jstests/multiVersion/libs/auth_helpers.js');
}
function testValidWriteConcern(cmd) {
- cmd.req.writeConcern = {
- w: 'majority',
- wtimeout: 25000
- };
+ cmd.req.writeConcern = {w: 'majority', wtimeout: 25000};
jsTest.log("Testing " + tojson(cmd.req));
dropUsersAndRoles();
@@ -148,9 +145,7 @@ load('jstests/multiVersion/libs/auth_helpers.js');
}
function testInvalidWriteConcern(cmd) {
- cmd.req.writeConcern = {
- w: 15
- };
+ cmd.req.writeConcern = {w: 15};
jsTest.log("Testing " + tojson(cmd.req));
dropUsersAndRoles();
diff --git a/jstests/serial_run/election_timing.js b/jstests/serial_run/election_timing.js
index 2fe83be02ed..16a3bf4e519 100644
--- a/jstests/serial_run/election_timing.js
+++ b/jstests/serial_run/election_timing.js
@@ -130,8 +130,8 @@
});
var resAvg = Array.avg(allResults);
- var resMin = Math.min(... allResults);
- var resMax = Math.max(... allResults);
+ var resMin = Math.min(...allResults);
+ var resMax = Math.max(...allResults);
var resStdDev = Array.stdDev(allResults);
jsTestLog("Results: " + tc.name + " Average over " + allResults.length + " runs: " +
diff --git a/jstests/sharding/SERVER-7379.js b/jstests/sharding/SERVER-7379.js
index bdf311cbf6e..a98161f101e 100644
--- a/jstests/sharding/SERVER-7379.js
+++ b/jstests/sharding/SERVER-7379.js
@@ -7,11 +7,7 @@ st.adminCommand(
var db = st.s.getDB('test');
var offerChange = db.getCollection('offerChange');
-var testDoc = {
- "_id": 123,
- "categoryId": 9881,
- "store": "NEW"
-};
+var testDoc = {"_id": 123, "categoryId": 9881, "store": "NEW"};
offerChange.remove({}, false);
offerChange.insert(testDoc);
diff --git a/jstests/sharding/addshard2.js b/jstests/sharding/addshard2.js
index 7af23a4ab5b..6579d89686d 100644
--- a/jstests/sharding/addshard2.js
+++ b/jstests/sharding/addshard2.js
@@ -55,13 +55,15 @@
assert.eq("add_shard2_rs1", shard._id, "t2 name");
// step 3. replica set w/ name given
- assert(s.admin.runCommand({
- "addshard": "add_shard2_rs2/" + getHostName() + ":" + master2.port,
- "name": "myshard"
- }).ok,
+ assert(s.admin
+ .runCommand({
+ "addshard": "add_shard2_rs2/" + getHostName() + ":" + master2.port,
+ "name": "myshard"
+ })
+ .ok,
"failed to add shard in step 4");
- shard = s.getDB("config")
- .shards.findOne({"_id": {"$nin": ["shard0000", "bar", "add_shard2_rs1"]}});
+ shard =
+ s.getDB("config").shards.findOne({"_id": {"$nin": ["shard0000", "bar", "add_shard2_rs1"]}});
assert(shard, "shard wasn't found");
assert.eq("myshard", shard._id, "t3 name");
@@ -77,15 +79,18 @@
// step 5. replica set w/ a wrong host
var portWithoutHostRunning = allocatePort();
- assert(!s.admin.runCommand(
- {addshard: "add_shard2_rs2/NonExistingHost:" + portWithoutHostRunning}).ok,
- "accepted bad hostname in step 5");
+ assert(
+ !s.admin.runCommand({addshard: "add_shard2_rs2/NonExistingHost:" + portWithoutHostRunning})
+ .ok,
+ "accepted bad hostname in step 5");
// step 6. replica set w/ mixed wrong/right hosts
- assert(!s.admin.runCommand({
- addshard: "add_shard2_rs2/" + getHostName() + ":" + master2.port + ",foo:" +
- portWithoutHostRunning
- }).ok,
+ assert(!s.admin
+ .runCommand({
+ addshard: "add_shard2_rs2/" + getHostName() + ":" + master2.port + ",foo:" +
+ portWithoutHostRunning
+ })
+ .ok,
"accepted bad hostname in step 6");
// Cannot add invalid stand alone host.
diff --git a/jstests/sharding/auth.js b/jstests/sharding/auth.js
index ea3ed974cc5..f3fe71a5950 100644
--- a/jstests/sharding/auth.js
+++ b/jstests/sharding/auth.js
@@ -3,23 +3,11 @@
(function() {
'use strict';
- var adminUser = {
- db: "admin",
- username: "foo",
- password: "bar"
- };
-
- var testUser = {
- db: "test",
- username: "bar",
- password: "baz"
- };
-
- var testUserReadOnly = {
- db: "test",
- username: "sad",
- password: "bat"
- };
+ var adminUser = {db: "admin", username: "foo", password: "bar"};
+
+ var testUser = {db: "test", username: "bar", password: "baz"};
+
+ var testUserReadOnly = {db: "test", username: "sad", password: "bat"};
function login(userObj, thingToUse) {
if (!thingToUse) {
@@ -58,9 +46,11 @@
}
print("Configuration: Add user " + tojson(adminUser));
- s.getDB(adminUser.db)
- .createUser(
- {user: adminUser.username, pwd: adminUser.password, roles: jsTest.adminUserRoles});
+ s.getDB(adminUser.db).createUser({
+ user: adminUser.username,
+ pwd: adminUser.password,
+ roles: jsTest.adminUserRoles
+ });
login(adminUser);
// Set the chunk size, disable the secondary throttle (so the test doesn't run so slow)
@@ -80,11 +70,9 @@
d1.initiate();
print("d1 initiated");
- var shardName = authutil.asCluster(d1.nodes,
- "jstests/libs/key2",
- function() {
- return getShardName(d1);
- });
+ var shardName = authutil.asCluster(d1.nodes, "jstests/libs/key2", function() {
+ return getShardName(d1);
+ });
print("adding shard w/out auth " + shardName);
logout(adminUser);
@@ -124,15 +112,16 @@
d1.waitForState(d1.getSecondaries(), ReplSetTest.State.SECONDARY, 5 * 60 * 1000);
- s.getDB(testUser.db)
- .createUser(
- {user: testUser.username, pwd: testUser.password, roles: jsTest.basicUserRoles});
- s.getDB(testUserReadOnly.db)
- .createUser({
- user: testUserReadOnly.username,
- pwd: testUserReadOnly.password,
- roles: jsTest.readOnlyUserRoles
- });
+ s.getDB(testUser.db).createUser({
+ user: testUser.username,
+ pwd: testUser.password,
+ roles: jsTest.basicUserRoles
+ });
+ s.getDB(testUserReadOnly.db).createUser({
+ user: testUserReadOnly.username,
+ pwd: testUserReadOnly.password,
+ roles: jsTest.readOnlyUserRoles
+ });
logout(adminUser);
@@ -162,11 +151,9 @@
d2.initiate();
d2.awaitSecondaryNodes();
- shardName = authutil.asCluster(d2.nodes,
- "jstests/libs/key1",
- function() {
- return getShardName(d2);
- });
+ shardName = authutil.asCluster(d2.nodes, "jstests/libs/key1", function() {
+ return getShardName(d2);
+ });
print("adding shard " + shardName);
login(adminUser);
@@ -254,16 +241,12 @@
d1.waitForState(d1.getSecondaries(), ReplSetTest.State.SECONDARY, 5 * 60 * 1000);
d2.waitForState(d2.getSecondaries(), ReplSetTest.State.SECONDARY, 5 * 60 * 1000);
- authutil.asCluster(d1.nodes,
- "jstests/libs/key1",
- function() {
- d1.awaitReplication(120000);
- });
- authutil.asCluster(d2.nodes,
- "jstests/libs/key1",
- function() {
- d2.awaitReplication(120000);
- });
+ authutil.asCluster(d1.nodes, "jstests/libs/key1", function() {
+ d1.awaitReplication(120000);
+ });
+ authutil.asCluster(d2.nodes, "jstests/libs/key1", function() {
+ d2.awaitReplication(120000);
+ });
// add admin on shard itself, hack to prevent localhost auth bypass
d1.getPrimary()
diff --git a/jstests/sharding/authCommands.js b/jstests/sharding/authCommands.js
index 03e77848974..deb6512a6b0 100644
--- a/jstests/sharding/authCommands.js
+++ b/jstests/sharding/authCommands.js
@@ -3,10 +3,7 @@
*/
var doTest = function() {
- var rsOpts = {
- oplogSize: 10,
- useHostname: false
- };
+ var rsOpts = {oplogSize: 10, useHostname: false};
var st = new ShardingTest({
keyFile: 'jstests/libs/key1',
shards: 2,
@@ -132,12 +129,10 @@ var doTest = function() {
assert.eq(100, res.results.length);
assert.eq(45, res.results[0].value);
- res = checkCommandSucceeded(
- testDB,
- {
- aggregate: 'foo',
- pipeline: [{$project: {j: 1}}, {$group: {_id: 'j', sum: {$sum: '$j'}}}]
- });
+ res = checkCommandSucceeded(testDB, {
+ aggregate: 'foo',
+ pipeline: [{$project: {j: 1}}, {$group: {_id: 'j', sum: {$sum: '$j'}}}]
+ });
assert.eq(4500, res.result[0].sum);
} else {
print("Checking read operations, should fail");
@@ -148,12 +143,10 @@ var doTest = function() {
checkCommandFailed(testDB, {collstats: 'foo'});
checkCommandFailed(testDB,
{mapreduce: 'foo', map: map, reduce: reduce, out: {inline: 1}});
- checkCommandFailed(
- testDB,
- {
- aggregate: 'foo',
- pipeline: [{$project: {j: 1}}, {$group: {_id: 'j', sum: {$sum: '$j'}}}]
- });
+ checkCommandFailed(testDB, {
+ aggregate: 'foo',
+ pipeline: [{$project: {j: 1}}, {$group: {_id: 'j', sum: {$sum: '$j'}}}]
+ });
}
};
@@ -233,10 +226,7 @@ var doTest = function() {
checkCommandSucceeded(adminDB, {isdbgrid: 1});
checkCommandSucceeded(adminDB, {ismaster: 1});
checkCommandFailed(adminDB, {split: 'test.foo', find: {i: 1, j: 1}});
- chunkKey = {
- i: {$minKey: 1},
- j: {$minKey: 1}
- };
+ chunkKey = {i: {$minKey: 1}, j: {$minKey: 1}};
checkCommandFailed(
adminDB,
{moveChunk: 'test.foo', find: chunkKey, to: st.rs1.name, _waitForDelete: true});
diff --git a/jstests/sharding/auth_add_shard.js b/jstests/sharding/auth_add_shard.js
index 4f0fec6de83..b24afd0172c 100644
--- a/jstests/sharding/auth_add_shard.js
+++ b/jstests/sharding/auth_add_shard.js
@@ -11,11 +11,7 @@
}
// admin user object
- var adminUser = {
- db: "admin",
- username: "foo",
- password: "bar"
- };
+ var adminUser = {db: "admin", username: "foo", password: "bar"};
// set up a 2 shard cluster with keyfile
var st = new ShardingTest(
@@ -28,9 +24,11 @@
// add the admin user
print("adding user");
- mongos.getDB(adminUser.db)
- .createUser(
- {user: adminUser.username, pwd: adminUser.password, roles: jsTest.adminUserRoles});
+ mongos.getDB(adminUser.db).createUser({
+ user: adminUser.username,
+ pwd: adminUser.password,
+ roles: jsTest.adminUserRoles
+ });
// login as admin user
login(adminUser);
diff --git a/jstests/sharding/auth_no_config_primary.js b/jstests/sharding/auth_no_config_primary.js
index a4be8806f66..3bb1ea1cf4c 100644
--- a/jstests/sharding/auth_no_config_primary.js
+++ b/jstests/sharding/auth_no_config_primary.js
@@ -33,8 +33,8 @@
assert.eq('world', res.hello);
// Test authenticate through new mongos.
- var otherMongos = MongoRunner.runMongos(
- {keyFile: "jstests/libs/key1", configdb: st.s.savedOptions.configdb});
+ var otherMongos =
+ MongoRunner.runMongos({keyFile: "jstests/libs/key1", configdb: st.s.savedOptions.configdb});
assert.commandFailedWithCode(otherMongos.getDB('test').runCommand({find: 'user'}),
ErrorCodes.Unauthorized);
diff --git a/jstests/sharding/auth_slaveok_routing.js b/jstests/sharding/auth_slaveok_routing.js
index a01314fe405..c950730c799 100644
--- a/jstests/sharding/auth_slaveok_routing.js
+++ b/jstests/sharding/auth_slaveok_routing.js
@@ -30,9 +30,7 @@ function doesRouteToSec(coll, query) {
return cmdRes.secondary;
}
-var rsOpts = {
- oplogSize: 50
-};
+var rsOpts = {oplogSize: 50};
var st = new ShardingTest(
{keyFile: 'jstests/libs/key1', shards: 1, rs: rsOpts, other: {nopreallocj: 1}});
diff --git a/jstests/sharding/authmr.js b/jstests/sharding/authmr.js
index 6484c729474..67f87fc59b4 100644
--- a/jstests/sharding/authmr.js
+++ b/jstests/sharding/authmr.js
@@ -10,12 +10,8 @@
var adminUser = {
user: "admin",
pwd: "a",
- roles: [
- "readWriteAnyDatabase",
- "dbAdminAnyDatabase",
- "userAdminAnyDatabase",
- "clusterAdmin"
- ]
+ roles:
+ ["readWriteAnyDatabase", "dbAdminAnyDatabase", "userAdminAnyDatabase", "clusterAdmin"]
};
var test1User = {
diff --git a/jstests/sharding/authwhere.js b/jstests/sharding/authwhere.js
index df27078784b..f9af413f470 100644
--- a/jstests/sharding/authwhere.js
+++ b/jstests/sharding/authwhere.js
@@ -10,12 +10,8 @@
var adminUser = {
user: "admin",
pwd: "a",
- roles: [
- "readWriteAnyDatabase",
- "dbAdminAnyDatabase",
- "userAdminAnyDatabase",
- "clusterAdmin"
- ]
+ roles:
+ ["readWriteAnyDatabase", "dbAdminAnyDatabase", "userAdminAnyDatabase", "clusterAdmin"]
};
var test1Reader = {
diff --git a/jstests/sharding/auto2.js b/jstests/sharding/auto2.js
index 3d21559f8d6..3c8eb168d7b 100644
--- a/jstests/sharding/auto2.js
+++ b/jstests/sharding/auto2.js
@@ -16,15 +16,14 @@
var i = 0;
for (var j = 0; j < 30; j++) {
- print("j:" + j + " : " +
- Date.timeFunc(function() {
- var bulk = coll.initializeUnorderedBulkOp();
- for (var k = 0; k < 100; k++) {
- bulk.insert({num: i, s: bigString});
- i++;
- }
- assert.writeOK(bulk.execute());
- }));
+ print("j:" + j + " : " + Date.timeFunc(function() {
+ var bulk = coll.initializeUnorderedBulkOp();
+ for (var k = 0; k < 100; k++) {
+ bulk.insert({num: i, s: bigString});
+ i++;
+ }
+ assert.writeOK(bulk.execute());
+ }));
}
s.startBalancer();
@@ -92,8 +91,8 @@
print("checkpoint C");
assert(Array.unique(s.config.chunks.find().toArray().map(function(z) {
- return z.shard;
- })).length == 2,
+ return z.shard;
+ })).length == 2,
"should be using both servers");
for (i = 0; i < 100; i++) {
diff --git a/jstests/sharding/balance_repl.js b/jstests/sharding/balance_repl.js
index 433e8167829..39c28b46448 100644
--- a/jstests/sharding/balance_repl.js
+++ b/jstests/sharding/balance_repl.js
@@ -12,10 +12,16 @@
other: {
chunkSize: 1,
rs0: {
- nodes: [{rsConfig: {votes: 1}}, {rsConfig: {priority: 0, votes: 0}}, ],
+ nodes: [
+ {rsConfig: {votes: 1}},
+ {rsConfig: {priority: 0, votes: 0}},
+ ],
},
rs1: {
- nodes: [{rsConfig: {votes: 1}}, {rsConfig: {priority: 0, votes: 0}}, ],
+ nodes: [
+ {rsConfig: {votes: 1}},
+ {rsConfig: {priority: 0, votes: 0}},
+ ],
}
}
});
diff --git a/jstests/sharding/batch_write_command_sharded.js b/jstests/sharding/batch_write_command_sharded.js
index 9c0dc61d21d..884d5bb85bb 100644
--- a/jstests/sharding/batch_write_command_sharded.js
+++ b/jstests/sharding/batch_write_command_sharded.js
@@ -65,10 +65,7 @@
assert.commandWorked(coll.getMongo().getDB("admin").runCommand({setParameter: 1, logLevel: 4}));
coll.remove({});
- request = {
- insert: coll.getName(),
- documents: documents
- };
+ request = {insert: coll.getName(), documents: documents};
printjson(result = coll.runCommand(request));
assert(result.ok);
assert.eq(1000, result.n);
diff --git a/jstests/sharding/cleanup_orphaned_cmd_during_movechunk_hashed.js b/jstests/sharding/cleanup_orphaned_cmd_during_movechunk_hashed.js
index 242c1f28c09..e928eaebcf2 100644
--- a/jstests/sharding/cleanup_orphaned_cmd_during_movechunk_hashed.js
+++ b/jstests/sharding/cleanup_orphaned_cmd_during_movechunk_hashed.js
@@ -31,11 +31,7 @@ load('./jstests/libs/cleanup_orphaned_util.js');
var found = false;
for (var i = 0; i < 10000; i++) {
- var doc =
- {
- key: ObjectId()
- },
- hash = mongos.adminCommand({_hashBSONElement: doc.key}).out;
+ var doc = {key: ObjectId()}, hash = mongos.adminCommand({_hashBSONElement: doc.key}).out;
print('doc.key ' + doc.key + ' hashes to ' + hash);
diff --git a/jstests/sharding/cleanup_orphaned_cmd_prereload.js b/jstests/sharding/cleanup_orphaned_cmd_prereload.js
index 7155baea970..05fbd8b741a 100644
--- a/jstests/sharding/cleanup_orphaned_cmd_prereload.js
+++ b/jstests/sharding/cleanup_orphaned_cmd_prereload.js
@@ -18,12 +18,14 @@ jsTest.log("Moving some chunks to shard1...");
assert(admin.runCommand({split: coll + "", middle: {_id: 0}}).ok);
assert(admin.runCommand({split: coll + "", middle: {_id: 1}}).ok);
-assert(admin.runCommand(
- {moveChunk: coll + "", find: {_id: 0}, to: shards[1]._id, _waitForDelete: true})
- .ok);
-assert(admin.runCommand(
- {moveChunk: coll + "", find: {_id: 1}, to: shards[1]._id, _waitForDelete: true})
- .ok);
+assert(
+ admin
+ .runCommand({moveChunk: coll + "", find: {_id: 0}, to: shards[1]._id, _waitForDelete: true})
+ .ok);
+assert(
+ admin
+ .runCommand({moveChunk: coll + "", find: {_id: 1}, to: shards[1]._id, _waitForDelete: true})
+ .ok);
var metadata =
st.shard1.getDB("admin").runCommand({getShardVersion: coll + "", fullMetadata: true}).metadata;
@@ -52,9 +54,10 @@ assert.eq(metadata.shardVersion.t, 0);
assert.neq(metadata.collVersion.t, 0);
assert.eq(metadata.pending.length, 0);
-assert(admin.runCommand(
- {moveChunk: coll + "", find: {_id: 1}, to: shards[0]._id, _waitForDelete: true})
- .ok);
+assert(
+ admin
+ .runCommand({moveChunk: coll + "", find: {_id: 1}, to: shards[0]._id, _waitForDelete: true})
+ .ok);
var metadata =
st.shard0.getDB("admin").runCommand({getShardVersion: coll + "", fullMetadata: true}).metadata;
diff --git a/jstests/sharding/coll_epoch_test0.js b/jstests/sharding/coll_epoch_test0.js
index 0ec0a5d3201..49fe99914a0 100644
--- a/jstests/sharding/coll_epoch_test0.js
+++ b/jstests/sharding/coll_epoch_test0.js
@@ -21,19 +21,18 @@ config.shards.find().forEach(function(doc) {
var createdEpoch = null;
var checkEpochs = function() {
- config.chunks.find({ns: coll + ""})
- .forEach(function(chunk) {
-
- // Make sure the epochs exist, are non-zero, and are consistent
- assert(chunk.lastmodEpoch);
- print(chunk.lastmodEpoch + "");
- assert.neq(chunk.lastmodEpoch + "", "000000000000000000000000");
- if (createdEpoch == null)
- createdEpoch = chunk.lastmodEpoch;
- else
- assert.eq(createdEpoch, chunk.lastmodEpoch);
-
- });
+ config.chunks.find({ns: coll + ""}).forEach(function(chunk) {
+
+ // Make sure the epochs exist, are non-zero, and are consistent
+ assert(chunk.lastmodEpoch);
+ print(chunk.lastmodEpoch + "");
+ assert.neq(chunk.lastmodEpoch + "", "000000000000000000000000");
+ if (createdEpoch == null)
+ createdEpoch = chunk.lastmodEpoch;
+ else
+ assert.eq(createdEpoch, chunk.lastmodEpoch);
+
+ });
};
checkEpochs();
diff --git a/jstests/sharding/commands_that_write_accept_wc_configRS.js b/jstests/sharding/commands_that_write_accept_wc_configRS.js
index c8dd99fb06d..4ae7071da82 100644
--- a/jstests/sharding/commands_that_write_accept_wc_configRS.js
+++ b/jstests/sharding/commands_that_write_accept_wc_configRS.js
@@ -59,8 +59,8 @@ load('jstests/multiVersion/libs/auth_helpers.js');
shardCollectionWithChunks(st, coll);
adminDB.system.version.update(
{_id: "authSchema"}, {"currentVersion": 3}, {upsert: true});
- localDB.getSiblingDB('admin')
- .system.version.update({_id: "authSchema"}, {"currentVersion": 3}, {upsert: true});
+ localDB.getSiblingDB('admin').system.version.update(
+ {_id: "authSchema"}, {"currentVersion": 3}, {upsert: true});
db.createUser({user: 'user1', pwd: 'pass', roles: jsTest.basicUserRoles});
assert(db.auth({mechanism: 'MONGODB-CR', user: 'user1', pwd: 'pass'}));
@@ -238,10 +238,7 @@ load('jstests/multiVersion/libs/auth_helpers.js');
var setupFunc = cmd.setupFunc;
var confirmFunc = cmd.confirmFunc;
- req.writeConcern = {
- w: 'majority',
- wtimeout: 25000
- };
+ req.writeConcern = {w: 'majority', wtimeout: 25000};
jsTest.log("Testing " + tojson(req));
dropTestData();
diff --git a/jstests/sharding/commands_that_write_accept_wc_shards.js b/jstests/sharding/commands_that_write_accept_wc_shards.js
index f5bd55550e2..e584b4ee264 100644
--- a/jstests/sharding/commands_that_write_accept_wc_shards.js
+++ b/jstests/sharding/commands_that_write_accept_wc_shards.js
@@ -404,10 +404,7 @@ load('jstests/libs/write_concern_util.js');
});
function testValidWriteConcern(cmd) {
- cmd.req.writeConcern = {
- w: 'majority',
- wtimeout: 25000
- };
+ cmd.req.writeConcern = {w: 'majority', wtimeout: 25000};
jsTest.log("Testing " + tojson(cmd.req));
dropTestDatabase();
@@ -420,9 +417,7 @@ load('jstests/libs/write_concern_util.js');
}
function testInvalidWriteConcern(cmd) {
- cmd.req.writeConcern = {
- w: 'invalid'
- };
+ cmd.req.writeConcern = {w: 'invalid'};
jsTest.log("Testing " + tojson(cmd.req));
dropTestDatabase();
diff --git a/jstests/sharding/conf_server_write_concern.js b/jstests/sharding/conf_server_write_concern.js
index c4e08939548..d6ca0e006e0 100644
--- a/jstests/sharding/conf_server_write_concern.js
+++ b/jstests/sharding/conf_server_write_concern.js
@@ -10,12 +10,12 @@ function writeToConfigTest() {
{_id: 'balancer'}, {$set: {stopped: true}}, {writeConcern: {w: 'majority'}}));
// w:1 should still work - it gets automatically upconverted to w:majority
- assert.writeOK(confDB.settings.update(
- {_id: 'balancer'}, {$set: {stopped: true}}, {writeConcern: {w: 1}}));
+ assert.writeOK(
+ confDB.settings.update({_id: 'balancer'}, {$set: {stopped: true}}, {writeConcern: {w: 1}}));
// Write concerns other than w:1 and w:majority should fail.
- assert.writeError(confDB.settings.update(
- {_id: 'balancer'}, {$set: {stopped: true}}, {writeConcern: {w: 2}}));
+ assert.writeError(
+ confDB.settings.update({_id: 'balancer'}, {$set: {stopped: true}}, {writeConcern: {w: 2}}));
st.stop();
}
diff --git a/jstests/sharding/covered_shard_key_indexes.js b/jstests/sharding/covered_shard_key_indexes.js
index 307dc241d9f..98168e7dccb 100644
--- a/jstests/sharding/covered_shard_key_indexes.js
+++ b/jstests/sharding/covered_shard_key_indexes.js
@@ -46,10 +46,9 @@ assert.eq(0, coll.find({a: true}, {_id: 1, a: 1}).explain(true).executionStats.t
assert.commandWorked(coll.dropIndexes());
assert.commandWorked(coll.ensureIndex({a: 1, b: 1, _id: 1}));
assert.eq(1, coll.find({a: true, b: true}).explain(true).executionStats.totalDocsExamined);
-assert.eq(0,
- coll.find({a: true, b: true}, {_id: 1, a: 1})
- .explain(true)
- .executionStats.totalDocsExamined);
+assert.eq(
+ 0,
+ coll.find({a: true, b: true}, {_id: 1, a: 1}).explain(true).executionStats.totalDocsExamined);
//
//
@@ -126,20 +125,18 @@ assert.writeOK(coll.insert({_id: true, a: {b: true}, c: true}));
// Index without shard key query - not covered
assert.commandWorked(coll.ensureIndex({c: 1}));
assert.eq(1, coll.find({c: true}).explain(true).executionStats.totalDocsExamined);
-assert.eq(1,
- coll.find({c: true}, {_id: 0, 'a.b': 1, c: 1})
- .explain(true)
- .executionStats.totalDocsExamined);
+assert.eq(
+ 1,
+ coll.find({c: true}, {_id: 0, 'a.b': 1, c: 1}).explain(true).executionStats.totalDocsExamined);
//
// Index with shard key query - nested query not covered even when projecting
assert.commandWorked(coll.dropIndex({c: 1}));
assert.commandWorked(coll.ensureIndex({c: 1, 'a.b': 1}));
assert.eq(1, coll.find({c: true}).explain(true).executionStats.totalDocsExamined);
-assert.eq(1,
- coll.find({c: true}, {_id: 0, 'a.b': 1, c: 1})
- .explain(true)
- .executionStats.totalDocsExamined);
+assert.eq(
+ 1,
+ coll.find({c: true}, {_id: 0, 'a.b': 1, c: 1}).explain(true).executionStats.totalDocsExamined);
//
//
diff --git a/jstests/sharding/delete_during_migrate.js b/jstests/sharding/delete_during_migrate.js
index 982b0c00787..3926d180acb 100644
--- a/jstests/sharding/delete_during_migrate.js
+++ b/jstests/sharding/delete_during_migrate.js
@@ -38,8 +38,8 @@ startMongoProgramNoConnect("mongo",
dbname);
// migrate while deletions are happening
-var moveResult = s.adminCommand(
- {moveChunk: ns, find: {a: 1}, to: st.getOther(st.getPrimaryShard(dbname)).name});
+var moveResult =
+ s.adminCommand({moveChunk: ns, find: {a: 1}, to: st.getOther(st.getPrimaryShard(dbname)).name});
// check if migration worked
assert(moveResult.ok, "migration didn't work while doing deletes");
diff --git a/jstests/sharding/drop_sharded_db.js b/jstests/sharding/drop_sharded_db.js
index 962ff84fc40..9a0a56ca245 100644
--- a/jstests/sharding/drop_sharded_db.js
+++ b/jstests/sharding/drop_sharded_db.js
@@ -21,9 +21,7 @@
dbC.getCollection("data" + (i % numColls)).insert({_id: i});
}
- var key = {
- _id: 1
- };
+ var key = {_id: 1};
for (var i = 0; i < numColls; i++) {
st.shardColl(dbA.getCollection("data" + i), key);
st.shardColl(dbB.getCollection("data" + i), key);
diff --git a/jstests/sharding/empty_doc_results.js b/jstests/sharding/empty_doc_results.js
index be63f509532..2038a27c538 100644
--- a/jstests/sharding/empty_doc_results.js
+++ b/jstests/sharding/empty_doc_results.js
@@ -2,10 +2,7 @@
// Verifies that mongos correctly handles empty documents when all fields are projected out
//
-var options = {
- mongosOptions: {binVersion: ""},
- shardOptions: {binVersion: ""}
-};
+var options = {mongosOptions: {binVersion: ""}, shardOptions: {binVersion: ""}};
var st = new ShardingTest({shards: 2, other: options});
diff --git a/jstests/sharding/explain_cmd.js b/jstests/sharding/explain_cmd.js
index 767e26c7eb2..c638fccbced 100644
--- a/jstests/sharding/explain_cmd.js
+++ b/jstests/sharding/explain_cmd.js
@@ -50,10 +50,8 @@ assert.eq(2, explain.queryPlanner.winningPlan.shards.length);
assert.eq(2, explain.executionStats.executionStages.shards.length);
// An explain of a command that doesn't exist should fail gracefully.
-explain = db.runCommand({
- explain: {nonexistent: collSharded.getName(), query: {b: 1}},
- verbosity: "allPlansExecution"
-});
+explain = db.runCommand(
+ {explain: {nonexistent: collSharded.getName(), query: {b: 1}}, verbosity: "allPlansExecution"});
printjson(explain);
assert.commandFailed(explain);
diff --git a/jstests/sharding/explain_find_and_modify_sharded.js b/jstests/sharding/explain_find_and_modify_sharded.js
index 7c1b10321c2..62ffa2d35f8 100644
--- a/jstests/sharding/explain_find_and_modify_sharded.js
+++ b/jstests/sharding/explain_find_and_modify_sharded.js
@@ -12,9 +12,7 @@
st.stopBalancer();
var testDB = st.s.getDB('test');
- var shardKey = {
- a: 1
- };
+ var shardKey = {a: 1};
// Create a collection with an index on the intended shard key.
var shardedColl = testDB.getCollection(collName);
diff --git a/jstests/sharding/explain_read_pref.js b/jstests/sharding/explain_read_pref.js
index cdf1d1e74a4..8ac4fc4ff49 100644
--- a/jstests/sharding/explain_read_pref.js
+++ b/jstests/sharding/explain_read_pref.js
@@ -32,28 +32,28 @@ var testAllModes = function(conn, isMongos) {
// { tag: 'two' } so we can test the interaction of modes and tags. Test
// a bunch of combinations.
[
- // mode, tagSets, expectedHost
- ['primary', undefined, false],
- ['primary', [{}], false],
+ // mode, tagSets, expectedHost
+ ['primary', undefined, false],
+ ['primary', [{}], false],
- ['primaryPreferred', undefined, false],
- ['primaryPreferred', [{tag: 'one'}], false],
- // Correctly uses primary and ignores the tag
- ['primaryPreferred', [{tag: 'two'}], false],
+ ['primaryPreferred', undefined, false],
+ ['primaryPreferred', [{tag: 'one'}], false],
+ // Correctly uses primary and ignores the tag
+ ['primaryPreferred', [{tag: 'two'}], false],
- ['secondary', undefined, true],
- ['secondary', [{tag: 'two'}], true],
- ['secondary', [{tag: 'doesntexist'}, {}], true],
- ['secondary', [{tag: 'doesntexist'}, {tag: 'two'}], true],
+ ['secondary', undefined, true],
+ ['secondary', [{tag: 'two'}], true],
+ ['secondary', [{tag: 'doesntexist'}, {}], true],
+ ['secondary', [{tag: 'doesntexist'}, {tag: 'two'}], true],
- ['secondaryPreferred', undefined, true],
- ['secondaryPreferred', [{tag: 'one'}], false],
- ['secondaryPreferred', [{tag: 'two'}], true],
+ ['secondaryPreferred', undefined, true],
+ ['secondaryPreferred', [{tag: 'one'}], false],
+ ['secondaryPreferred', [{tag: 'two'}], true],
- // We don't have a way to alter ping times so we can't predict where an
- // untagged 'nearest' command should go, hence only test with tags.
- ['nearest', [{tag: 'one'}], false],
- ['nearest', [{tag: 'two'}], true]
+ // We don't have a way to alter ping times so we can't predict where an
+ // untagged 'nearest' command should go, hence only test with tags.
+ ['nearest', [{tag: 'one'}], false],
+ ['nearest', [{tag: 'two'}], true]
].forEach(function(args) {
var mode = args[0], tagSets = args[1], secExpected = args[2];
@@ -101,14 +101,8 @@ ReplSetTest.awaitRSClientHosts(st.s, st.rs0.nodes);
// Tag primary with { dc: 'ny', tag: 'one' }, secondary with { dc: 'ny', tag: 'two' }
var primary = st.rs0.getPrimary();
var secondary = st.rs0.getSecondary();
-var PRIMARY_TAG = {
- dc: 'ny',
- tag: 'one'
-};
-var SECONDARY_TAG = {
- dc: 'ny',
- tag: 'two'
-};
+var PRIMARY_TAG = {dc: 'ny', tag: 'one'};
+var SECONDARY_TAG = {dc: 'ny', tag: 'two'};
var rsConfig = primary.getDB("local").system.replset.findOne();
jsTest.log('got rsconf ' + tojson(rsConfig));
diff --git a/jstests/sharding/fair_balancer_round.js b/jstests/sharding/fair_balancer_round.js
index 90fc345c8cb..6b477efac78 100644
--- a/jstests/sharding/fair_balancer_round.js
+++ b/jstests/sharding/fair_balancer_round.js
@@ -2,9 +2,7 @@
// Tests that a balancer round loads newly sharded collection data
//
-var options = {
- mongosOptions: {verbose: 1}
-};
+var options = {mongosOptions: {verbose: 1}};
var st = new ShardingTest({shards: 2, mongos: 2, other: options});
@@ -30,8 +28,8 @@ for (var i = 0; i < numSplits; i++) {
st.stopMongos(0);
// Start balancer, which lets the stale mongos balance
-assert.writeOK(staleMongos.getDB("config")
- .settings.update({_id: "balancer"}, {$set: {stopped: false}}, true));
+assert.writeOK(
+ staleMongos.getDB("config").settings.update({_id: "balancer"}, {$set: {stopped: false}}, true));
// Make sure we eventually start moving chunks
assert.soon(function() {
diff --git a/jstests/sharding/features1.js b/jstests/sharding/features1.js
index dc547ddad74..d545ea096d6 100644
--- a/jstests/sharding/features1.js
+++ b/jstests/sharding/features1.js
@@ -182,13 +182,15 @@
printjson(db.foo6.getIndexes());
assert.eq(2,
- db.foo6.group({
- key: {a: 1},
- initial: {count: 0},
- reduce: function(z, prev) {
- prev.count++;
- }
- }).length);
+ db.foo6
+ .group({
+ key: {a: 1},
+ initial: {count: 0},
+ reduce: function(z, prev) {
+ prev.count++;
+ }
+ })
+ .length);
assert.eq(3, db.foo6.find().count());
assert(s.admin.runCommand({shardcollection: "test.foo6", key: {a: 1}}).ok);
@@ -202,11 +204,8 @@
// Remove when SERVER-10232 is fixed
assert.soon(function() {
- var cmdRes = s.admin.runCommand({
- movechunk: "test.foo6",
- find: {a: 3},
- to: s.getOther(s.getPrimaryShard("test")).name
- });
+ var cmdRes = s.admin.runCommand(
+ {movechunk: "test.foo6", find: {a: 3}, to: s.getOther(s.getPrimaryShard("test")).name});
return cmdRes.ok;
}, 'move chunk test.foo6', 60000, 1000);
diff --git a/jstests/sharding/features2.js b/jstests/sharding/features2.js
index 010289ac1cc..fd28882213b 100644
--- a/jstests/sharding/features2.js
+++ b/jstests/sharding/features2.js
@@ -86,9 +86,7 @@
for (var i = 0; i < values.length; i++) {
total += values[i].count;
}
- return {
- count: total
- };
+ return {count: total};
};
doMR = function(n) {
@@ -146,12 +144,7 @@
doMR("after extra split");
- cmd = {
- mapreduce: "mr",
- map: "emit( ",
- reduce: "fooz + ",
- out: "broken1"
- };
+ cmd = {mapreduce: "mr", map: "emit( ", reduce: "fooz + ", out: "broken1"};
x = db.runCommand(cmd);
y = s._connections[0].getDB("test").runCommand(cmd);
diff --git a/jstests/sharding/fts_score_sort_sharded.js b/jstests/sharding/fts_score_sort_sharded.js
index e6bf01503be..8ce5c3383ad 100644
--- a/jstests/sharding/fts_score_sort_sharded.js
+++ b/jstests/sharding/fts_score_sort_sharded.js
@@ -51,8 +51,9 @@ assert.throws(function() {
});
// Projection specified with incorrect field name.
-cursor = coll.find({$text: {$search: "pizza"}}, {t: {$meta: "textScore"}})
- .sort({s: {$meta: "textScore"}});
+cursor = coll.find({$text: {$search: "pizza"}}, {t: {$meta: "textScore"}}).sort({
+ s: {$meta: "textScore"}
+});
assert.throws(function() {
cursor.next();
});
diff --git a/jstests/sharding/geo_near_random1.js b/jstests/sharding/geo_near_random1.js
index 0229c84555c..707d3c550a0 100644
--- a/jstests/sharding/geo_near_random1.js
+++ b/jstests/sharding/geo_near_random1.js
@@ -38,9 +38,7 @@
printShardingSizes();
- var opts = {
- sharded: true
- };
+ var opts = {sharded: true};
test.testPt([0, 0], opts);
test.testPt(test.mkPt(), opts);
test.testPt(test.mkPt(), opts);
diff --git a/jstests/sharding/geo_near_random2.js b/jstests/sharding/geo_near_random2.js
index cdf8543274a..4833f5bc0d0 100644
--- a/jstests/sharding/geo_near_random2.js
+++ b/jstests/sharding/geo_near_random2.js
@@ -36,11 +36,7 @@
// Turn balancer back on, for actual tests
// s.startBalancer(); // SERVER-13365
- opts = {
- sphere: 0,
- nToTest: test.nPts * 0.01,
- sharded: true
- };
+ opts = {sphere: 0, nToTest: test.nPts * 0.01, sharded: true};
test.testPt([0, 0], opts);
test.testPt(test.mkPt(), opts);
test.testPt(test.mkPt(), opts);
diff --git a/jstests/sharding/geo_shardedgeonear.js b/jstests/sharding/geo_shardedgeonear.js
index 54bda17cf16..123b4b174cc 100644
--- a/jstests/sharding/geo_shardedgeonear.js
+++ b/jstests/sharding/geo_shardedgeonear.js
@@ -39,12 +39,7 @@ function test(db, sharded, indexType) {
assert.commandWorked(db[coll].ensureIndex({loc: indexType}));
var queryPoint = [0, 0];
- geoCmd = {
- geoNear: coll,
- near: queryPoint,
- spherical: true,
- includeLocs: true
- };
+ geoCmd = {geoNear: coll, near: queryPoint, spherical: true, includeLocs: true};
assert.commandWorked(db.runCommand(geoCmd), tojson({sharded: sharded, indexType: indexType}));
}
diff --git a/jstests/sharding/group_slaveok.js b/jstests/sharding/group_slaveok.js
index 0d9221ed896..2a18cd7a22e 100644
--- a/jstests/sharding/group_slaveok.js
+++ b/jstests/sharding/group_slaveok.js
@@ -40,12 +40,13 @@
// Should not throw exception, since slaveOk'd
assert.eq(10,
coll.group({
- key: {i: true},
- reduce: function(obj, ctx) {
- ctx.count += 1;
- },
- initial: {count: 0}
- }).length);
+ key: {i: true},
+ reduce: function(obj, ctx) {
+ ctx.count += 1;
+ },
+ initial: {count: 0}
+ })
+ .length);
try {
conn.setSlaveOk(false);
diff --git a/jstests/sharding/hash_shard1.js b/jstests/sharding/hash_shard1.js
index 10ab1b1308b..21b69472e3c 100644
--- a/jstests/sharding/hash_shard1.js
+++ b/jstests/sharding/hash_shard1.js
@@ -35,8 +35,8 @@ assert.neq(chunk, null, "all chunks on shard0000!");
printjson(chunk);
// try to move the chunk using an invalid specification method. should fail.
-var res = db.adminCommand(
- {movechunk: ns, find: {a: 0}, bounds: [chunk.min, chunk.max], to: "shard0000"});
+var res =
+ db.adminCommand({movechunk: ns, find: {a: 0}, bounds: [chunk.min, chunk.max], to: "shard0000"});
assert.eq(res.ok, 0, "moveChunk shouldn't work with invalid specification method");
// now move a chunk using the lower/upper bound method. should work.
diff --git a/jstests/sharding/index1.js b/jstests/sharding/index1.js
index 766bd96e260..9b57c3e43f8 100644
--- a/jstests/sharding/index1.js
+++ b/jstests/sharding/index1.js
@@ -118,9 +118,11 @@
// Make sure the index created is unique!
assert.eq(1,
- coll.getIndexes().filter(function(z) {
- return friendlyEqual(z.key, {num: 1}) && z.unique;
- }).length);
+ coll.getIndexes()
+ .filter(function(z) {
+ return friendlyEqual(z.key, {num: 1}) && z.unique;
+ })
+ .length);
}
if (i == 7) {
coll.remove({});
@@ -156,9 +158,11 @@
// Make sure the index created is unique!
assert.eq(1,
- coll.getIndexes().filter(function(z) {
- return friendlyEqual(z.key, {num: 1}) && z.unique;
- }).length);
+ coll.getIndexes()
+ .filter(function(z) {
+ return friendlyEqual(z.key, {num: 1}) && z.unique;
+ })
+ .length);
}
if (i == 9) {
// Unique index exists on a different field as well
diff --git a/jstests/sharding/key_many.js b/jstests/sharding/key_many.js
index 93ce1b4d64a..96e351c25e8 100644
--- a/jstests/sharding/key_many.js
+++ b/jstests/sharding/key_many.js
@@ -115,9 +115,7 @@
function makeInQuery() {
if (curT.compound) {
// cheating a bit...
- return {
- 'o.a': {$in: [1, 2]}
- };
+ return {'o.a': {$in: [1, 2]}};
} else {
return makeObjectDotted({$in: curT.values});
}
@@ -178,26 +176,26 @@
assert.eq(
6, c.find().sort(makeObjectDotted(1)).count(), curT.name + " total count with count()");
- assert.eq(
- 2,
- c.find({$or: [makeObjectDotted(curT.values[2]), makeObjectDotted(curT.values[4])]})
- .count(),
- curT.name + " $or count()");
- assert.eq(
- 2,
- c.find({$or: [makeObjectDotted(curT.values[2]), makeObjectDotted(curT.values[4])]})
- .itcount(),
- curT.name + " $or itcount()");
- assert.eq(
- 4,
- c.find({$nor: [makeObjectDotted(curT.values[2]), makeObjectDotted(curT.values[4])]})
- .count(),
- curT.name + " $nor count()");
- assert.eq(
- 4,
- c.find({$nor: [makeObjectDotted(curT.values[2]), makeObjectDotted(curT.values[4])]})
- .itcount(),
- curT.name + " $nor itcount()");
+ assert.eq(2,
+ c.find({
+ $or: [makeObjectDotted(curT.values[2]), makeObjectDotted(curT.values[4])]
+ }).count(),
+ curT.name + " $or count()");
+ assert.eq(2,
+ c.find({
+ $or: [makeObjectDotted(curT.values[2]), makeObjectDotted(curT.values[4])]
+ }).itcount(),
+ curT.name + " $or itcount()");
+ assert.eq(4,
+ c.find({
+ $nor: [makeObjectDotted(curT.values[2]), makeObjectDotted(curT.values[4])]
+ }).count(),
+ curT.name + " $nor count()");
+ assert.eq(4,
+ c.find({
+ $nor: [makeObjectDotted(curT.values[2]), makeObjectDotted(curT.values[4])]
+ }).itcount(),
+ curT.name + " $nor itcount()");
var stats = c.stats();
printjson(stats);
diff --git a/jstests/sharding/key_string.js b/jstests/sharding/key_string.js
index 414e056bf1f..43a270e0175 100644
--- a/jstests/sharding/key_string.js
+++ b/jstests/sharding/key_string.js
@@ -49,12 +49,9 @@
}),
"sort 1");
assert.eq("sara,mark,joe,eliot,bob,allan",
- db.foo.find()
- .sort({name: -1})
- .toArray()
- .map(function(z) {
- return z.name;
- }),
+ db.foo.find().sort({name: -1}).toArray().map(function(z) {
+ return z.name;
+ }),
"sort 2");
// make sure we can't foce a split on an extreme key
diff --git a/jstests/sharding/lagged_config_secondary.js b/jstests/sharding/lagged_config_secondary.js
index 5c28f79f24f..92f0453b941 100644
--- a/jstests/sharding/lagged_config_secondary.js
+++ b/jstests/sharding/lagged_config_secondary.js
@@ -9,8 +9,8 @@
var configSecondaryToKill = configSecondaryList[0];
var delayedConfigSecondary = configSecondaryList[1];
- delayedConfigSecondary.getDB('admin')
- .adminCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'alwaysOn'});
+ delayedConfigSecondary.getDB('admin').adminCommand(
+ {configureFailPoint: 'rsSyncApplyStop', mode: 'alwaysOn'});
var testDB = st.s.getDB('test');
testDB.adminCommand({enableSharding: 'test'});
diff --git a/jstests/sharding/limit_push.js b/jstests/sharding/limit_push.js
index 055b5c8b788..b681f328298 100644
--- a/jstests/sharding/limit_push.js
+++ b/jstests/sharding/limit_push.js
@@ -31,9 +31,7 @@
// The query is asking for the maximum value below a given value
// db.limit_push.find( { x : { $lt : 60} } ).sort( { x:-1} ).limit(1)
- q = {
- x: {$lt: 60}
- };
+ q = {x: {$lt: 60}};
// Make sure the basic queries are correct
assert.eq(60, db.limit_push.find(q).count(), "Did not find 60 documents");
diff --git a/jstests/sharding/localhostAuthBypass.js b/jstests/sharding/localhostAuthBypass.js
index 459e627fb3f..b36972da685 100644
--- a/jstests/sharding/localhostAuthBypass.js
+++ b/jstests/sharding/localhostAuthBypass.js
@@ -107,17 +107,13 @@ var assertCannotRunCommands = function(mongo, st) {
{param: "userCacheInvalidationIntervalSecs", val: 300}
];
params.forEach(function(p) {
- var cmd = {
- setParameter: 1
- };
+ var cmd = {setParameter: 1};
cmd[p.param] = p.val;
assert.commandFailedWithCode(
mongo.getDB("admin").runCommand(cmd), authorizeErrorCode, "setParameter: " + p.param);
});
params.forEach(function(p) {
- var cmd = {
- getParameter: 1
- };
+ var cmd = {getParameter: 1};
cmd[p.param] = 1;
assert.commandFailedWithCode(
mongo.getDB("admin").runCommand(cmd), authorizeErrorCode, "getParameter: " + p.param);
diff --git a/jstests/sharding/max_time_ms_sharded.js b/jstests/sharding/max_time_ms_sharded.js
index 7194c98750c..36ccf7d1d6f 100644
--- a/jstests/sharding/max_time_ms_sharded.js
+++ b/jstests/sharding/max_time_ms_sharded.js
@@ -160,17 +160,16 @@
// Positive test for "mapReduce".
configureMaxTimeAlwaysTimeOut("alwaysOn");
- res = coll.runCommand("mapReduce",
- {
- map: function() {
- emit(0, 0);
- },
- reduce: function(key, values) {
- return 0;
- },
- out: {inline: 1},
- maxTimeMS: 60 * 1000
- });
+ res = coll.runCommand("mapReduce", {
+ map: function() {
+ emit(0, 0);
+ },
+ reduce: function(key, values) {
+ return 0;
+ },
+ out: {inline: 1},
+ maxTimeMS: 60 * 1000
+ });
assert.commandFailed(
res, "expected mapReduce to fail in mongod due to maxTimeAlwaysTimeOut fail point");
assert.eq(
@@ -180,17 +179,16 @@
// Negative test for "mapReduce".
configureMaxTimeAlwaysTimeOut("off");
- assert.commandWorked(coll.runCommand("mapReduce",
- {
- map: function() {
- emit(0, 0);
- },
- reduce: function(key, values) {
- return 0;
- },
- out: {inline: 1},
- maxTimeMS: 60 * 1000
- }),
+ assert.commandWorked(coll.runCommand("mapReduce", {
+ map: function() {
+ emit(0, 0);
+ },
+ reduce: function(key, values) {
+ return 0;
+ },
+ out: {inline: 1},
+ maxTimeMS: 60 * 1000
+ }),
"expected mapReduce to not hit time limit in mongod");
// Positive test for "aggregate".
diff --git a/jstests/sharding/migrateBig.js b/jstests/sharding/migrateBig.js
index 8895d14c0d6..01260123b67 100644
--- a/jstests/sharding/migrateBig.js
+++ b/jstests/sharding/migrateBig.js
@@ -42,11 +42,8 @@
s.printShardingStatus();
assert.throws(function() {
- s.adminCommand({
- movechunk: "test.foo",
- find: {x: 50},
- to: s.getOther(s.getPrimaryShard("test")).name
- });
+ s.adminCommand(
+ {movechunk: "test.foo", find: {x: 50}, to: s.getOther(s.getPrimaryShard("test")).name});
}, [], "move should fail");
for (i = 0; i < 20; i += 2) {
diff --git a/jstests/sharding/migration_failure.js b/jstests/sharding/migration_failure.js
index 6198c8d2cef..aee7fdc97db 100644
--- a/jstests/sharding/migration_failure.js
+++ b/jstests/sharding/migration_failure.js
@@ -53,8 +53,8 @@
assert.neq(version.global, failVersion.global);
- assert.commandWorked(st.shard0.getDB("admin")
- .runCommand({configureFailPoint: 'failApplyChunkOps', mode: 'off'}));
+ assert.commandWorked(st.shard0.getDB("admin").runCommand(
+ {configureFailPoint: 'failApplyChunkOps', mode: 'off'}));
assert.commandWorked(st.shard0.getDB("admin").runCommand(
{configureFailPoint: 'failCommitMigrationCommand', mode: 'off'}));
diff --git a/jstests/sharding/min_optime_recovery.js b/jstests/sharding/min_optime_recovery.js
index d77f1e2ad42..69ccc78f02c 100644
--- a/jstests/sharding/min_optime_recovery.js
+++ b/jstests/sharding/min_optime_recovery.js
@@ -47,12 +47,10 @@
assert.eq(null, doc);
}
- var restartCmdLineOptions = Object.merge(
- st.d0.fullOptions,
- {
- setParameter: 'recoverShardingState=' + (withRecovery ? 'true' : 'false'),
- restart: true
- });
+ var restartCmdLineOptions = Object.merge(st.d0.fullOptions, {
+ setParameter: 'recoverShardingState=' + (withRecovery ? 'true' : 'false'),
+ restart: true
+ });
// Restart the shard that donated a chunk to trigger the optime recovery logic.
st.stopMongod(0);
diff --git a/jstests/sharding/mongos_no_replica_set_refresh.js b/jstests/sharding/mongos_no_replica_set_refresh.js
index 3d9af893b55..53809fd88aa 100644
--- a/jstests/sharding/mongos_no_replica_set_refresh.js
+++ b/jstests/sharding/mongos_no_replica_set_refresh.js
@@ -10,7 +10,11 @@ load("jstests/replsets/rslib.js");
mongos: 1,
other: {
rs0: {
- nodes: [{}, {rsConfig: {priority: 0}}, {rsConfig: {priority: 0}}, ],
+ nodes: [
+ {},
+ {rsConfig: {priority: 0}},
+ {rsConfig: {priority: 0}},
+ ],
}
}
});
diff --git a/jstests/sharding/mongos_rs_auth_shard_failure_tolerance.js b/jstests/sharding/mongos_rs_auth_shard_failure_tolerance.js
index 8eaf9653f11..0acb2cc5609 100644
--- a/jstests/sharding/mongos_rs_auth_shard_failure_tolerance.js
+++ b/jstests/sharding/mongos_rs_auth_shard_failure_tolerance.js
@@ -11,11 +11,7 @@
// (connection connected after shard change).
//
-var options = {
- rs: true,
- rsOptions: {nodes: 2},
- keyFile: "jstests/libs/key1"
-};
+var options = {rs: true, rsOptions: {nodes: 2}, keyFile: "jstests/libs/key1"};
var st = new ShardingTest({shards: 3, mongos: 1, other: options});
@@ -82,9 +78,7 @@ authDBUsers(mongosConnActive);
var mongosConnIdle = null;
var mongosConnNew = null;
-var wc = {
- writeConcern: {w: 2, wtimeout: 60000}
-};
+var wc = {writeConcern: {w: 2, wtimeout: 60000}};
assert.writeOK(mongosConnActive.getCollection(collSharded.toString()).insert({_id: -1}, wc));
assert.writeOK(mongosConnActive.getCollection(collSharded.toString()).insert({_id: 1}, wc));
diff --git a/jstests/sharding/mongos_rs_shard_failure_tolerance.js b/jstests/sharding/mongos_rs_shard_failure_tolerance.js
index e24566605ce..f811c9ad443 100644
--- a/jstests/sharding/mongos_rs_shard_failure_tolerance.js
+++ b/jstests/sharding/mongos_rs_shard_failure_tolerance.js
@@ -50,9 +50,7 @@
var mongosConnIdle = null;
var mongosConnNew = null;
- var wc = {
- writeConcern: {w: 2, wtimeout: 60000}
- };
+ var wc = {writeConcern: {w: 2, wtimeout: 60000}};
assert.writeOK(mongosConnActive.getCollection(collSharded.toString()).insert({_id: -1}, wc));
assert.writeOK(mongosConnActive.getCollection(collSharded.toString()).insert({_id: 1}, wc));
diff --git a/jstests/sharding/mongos_shard_failure_tolerance.js b/jstests/sharding/mongos_shard_failure_tolerance.js
index 73455666635..b3e480ae3ed 100644
--- a/jstests/sharding/mongos_shard_failure_tolerance.js
+++ b/jstests/sharding/mongos_shard_failure_tolerance.js
@@ -32,8 +32,8 @@
// Create the unsharded database
assert.writeOK(collUnsharded.insert({some: "doc"}));
assert.writeOK(collUnsharded.remove({}));
- assert.commandWorked(admin.runCommand(
- {movePrimary: collUnsharded.getDB().toString(), to: st.shard0.shardName}));
+ assert.commandWorked(
+ admin.runCommand({movePrimary: collUnsharded.getDB().toString(), to: st.shard0.shardName}));
//
// Setup is complete
diff --git a/jstests/sharding/move_chunk_basic.js b/jstests/sharding/move_chunk_basic.js
index 69bdf4d8c90..267ca74f718 100644
--- a/jstests/sharding/move_chunk_basic.js
+++ b/jstests/sharding/move_chunk_basic.js
@@ -33,10 +33,10 @@
assert(aChunk);
// Error if either of the bounds is not a valid shard key (BSON object - 1 yields a NaN)
- assert.commandFailed(mongos.adminCommand(
- {moveChunk: ns, bounds: [aChunk.min - 1, aChunk.max], to: shard1}));
- assert.commandFailed(mongos.adminCommand(
- {moveChunk: ns, bounds: [aChunk.min, aChunk.max - 1], to: shard1}));
+ assert.commandFailed(
+ mongos.adminCommand({moveChunk: ns, bounds: [aChunk.min - 1, aChunk.max], to: shard1}));
+ assert.commandFailed(
+ mongos.adminCommand({moveChunk: ns, bounds: [aChunk.min, aChunk.max - 1], to: shard1}));
// Fail if find and bounds are both set.
assert.commandFailed(mongos.adminCommand(
diff --git a/jstests/sharding/move_chunk_wc.js b/jstests/sharding/move_chunk_wc.js
index 9f7d4b2b646..1ebf1be6b36 100644
--- a/jstests/sharding/move_chunk_wc.js
+++ b/jstests/sharding/move_chunk_wc.js
@@ -57,10 +57,7 @@ load('jstests/libs/write_concern_util.js');
_waitForDelete: true
};
- req.writeConcern = {
- w: 1,
- wtimeout: 30000
- };
+ req.writeConcern = {w: 1, wtimeout: 30000};
jsTest.log("Testing " + tojson(req));
var res = db.adminCommand(req);
assert.commandWorked(res);
@@ -68,10 +65,7 @@ load('jstests/libs/write_concern_util.js');
checkChunkCount(2, 0);
// This should pass because w: majority is always passed to config servers.
- req.writeConcern = {
- w: 2,
- wtimeout: 30000
- };
+ req.writeConcern = {w: 2, wtimeout: 30000};
jsTest.log("Testing " + tojson(req));
req.to = s1;
res = db.adminCommand(req);
@@ -80,10 +74,7 @@ load('jstests/libs/write_concern_util.js');
checkChunkCount(1, 1);
// This should fail because the writeConcern cannot be satisfied on the to shard.
- req.writeConcern = {
- w: 4,
- wtimeout: 3000
- };
+ req.writeConcern = {w: 4, wtimeout: 3000};
jsTest.log("Testing " + tojson(req));
req.to = s0;
res = db.adminCommand(req);
@@ -92,10 +83,7 @@ load('jstests/libs/write_concern_util.js');
checkChunkCount(1, 1);
// This should fail because the writeConcern cannot be satisfied on the from shard.
- req.writeConcern = {
- w: 6,
- wtimeout: 3000
- };
+ req.writeConcern = {w: 6, wtimeout: 3000};
jsTest.log("Testing " + tojson(req));
req.to = s0;
res = db.adminCommand(req);
@@ -104,10 +92,7 @@ load('jstests/libs/write_concern_util.js');
checkChunkCount(1, 1);
// This should fail because the writeConcern is invalid and cannot be satisfied anywhere.
- req.writeConcern = {
- w: "invalid",
- wtimeout: 3000
- };
+ req.writeConcern = {w: "invalid", wtimeout: 3000};
jsTest.log("Testing " + tojson(req));
req.to = s0;
res = db.adminCommand(req);
diff --git a/jstests/sharding/move_stale_mongos.js b/jstests/sharding/move_stale_mongos.js
index 63482d894c3..ceb805ee674 100644
--- a/jstests/sharding/move_stale_mongos.js
+++ b/jstests/sharding/move_stale_mongos.js
@@ -17,12 +17,8 @@ for (var i = 0; i < 100; i += 10) {
assert.commandWorked(st.s0.getDB('admin').runCommand({split: testNs, middle: {_id: i}}));
st.configRS.awaitLastOpCommitted(); // Ensure that other mongos sees the split
var nextShardIndex = (curShardIndex + 1) % shards.length;
- assert.commandWorked(st.s1.getDB('admin').runCommand({
- moveChunk: testNs,
- find: {_id: i + 5},
- to: shards[nextShardIndex],
- _waitForDelete: true
- }));
+ assert.commandWorked(st.s1.getDB('admin').runCommand(
+ {moveChunk: testNs, find: {_id: i + 5}, to: shards[nextShardIndex], _waitForDelete: true}));
curShardIndex = nextShardIndex;
st.configRS.awaitLastOpCommitted(); // Ensure that other mongos sees the move
}
diff --git a/jstests/sharding/movechunk_with_default_paranoia.js b/jstests/sharding/movechunk_with_default_paranoia.js
index a6f4704ec90..52597fec149 100644
--- a/jstests/sharding/movechunk_with_default_paranoia.js
+++ b/jstests/sharding/movechunk_with_default_paranoia.js
@@ -10,9 +10,11 @@ var shards = [st.shard0, st.shard1];
for (i in shards) {
var dbpath = shards[i].adminCommand("getCmdLineOpts").parsed.storage.dbPath;
var hasMoveChunkDir = 0 !=
- ls(dbpath).filter(function(a) {
- return null != a.match("moveChunk");
- }).length;
+ ls(dbpath)
+ .filter(function(a) {
+ return null != a.match("moveChunk");
+ })
+ .length;
assert(!hasMoveChunkDir, dbpath + ": has MoveChunk directory + " + ls(dbpath));
}
st.stop();
diff --git a/jstests/sharding/movechunk_with_moveParanoia.js b/jstests/sharding/movechunk_with_moveParanoia.js
index 96348d827bf..f8c2fd0fbd8 100644
--- a/jstests/sharding/movechunk_with_moveParanoia.js
+++ b/jstests/sharding/movechunk_with_moveParanoia.js
@@ -12,9 +12,11 @@ var foundMoveChunk = false;
for (i in shards) {
var dbpath = shards[i].adminCommand("getCmdLineOpts").parsed.storage.dbPath;
var hasMoveChunkDir = 0 !=
- ls(dbpath).filter(function(a) {
- return null != a.match("moveChunk");
- }).length;
+ ls(dbpath)
+ .filter(function(a) {
+ return null != a.match("moveChunk");
+ })
+ .length;
foundMoveChunk = foundMoveChunk || hasMoveChunkDir;
}
diff --git a/jstests/sharding/movechunk_with_noMoveParanoia.js b/jstests/sharding/movechunk_with_noMoveParanoia.js
index ae8ef5899a8..4e75421543a 100644
--- a/jstests/sharding/movechunk_with_noMoveParanoia.js
+++ b/jstests/sharding/movechunk_with_noMoveParanoia.js
@@ -11,9 +11,11 @@ var shards = [st.shard0, st.shard1];
for (i in shards) {
var dbpath = shards[i].adminCommand("getCmdLineOpts").parsed.storage.dbPath;
var hasMoveChunkDir = 0 !=
- ls(dbpath).filter(function(a) {
- return null != a.match("moveChunk");
- }).length;
+ ls(dbpath)
+ .filter(function(a) {
+ return null != a.match("moveChunk");
+ })
+ .length;
assert(!hasMoveChunkDir, dbpath + ": has MoveChunk directory + " + ls(dbpath));
}
st.stop();
diff --git a/jstests/sharding/mrShardedOutput.js b/jstests/sharding/mrShardedOutput.js
index ab3bae28d74..39def771ef9 100644
--- a/jstests/sharding/mrShardedOutput.js
+++ b/jstests/sharding/mrShardedOutput.js
@@ -64,9 +64,8 @@ assert.eq(numDocs,
// Make sure it's sharded and split
var newNumChunks = config.chunks.count({ns: testDB.mrShardedOut._fullName});
-assert.gt(newNumChunks,
- 1,
- "Sharding FAILURE: " + testDB.mrShardedOut._fullName + " has only 1 chunk");
+assert.gt(
+ newNumChunks, 1, "Sharding FAILURE: " + testDB.mrShardedOut._fullName + " has only 1 chunk");
// Check that there are no "jumbo" chunks.
var objSize = Object.bsonsize(testDB.mrShardedOut.findOne());
@@ -74,13 +73,11 @@ var docsPerChunk = 1024 * 1024 / objSize * 1.1; // 1MB chunk size + allowance
st.printShardingStatus(true);
-config.chunks.find({ns: testDB.mrShardedOut.getFullName()})
- .forEach(function(chunkDoc) {
- var count =
- testDB.mrShardedOut.find({_id: {$gte: chunkDoc.min._id, $lt: chunkDoc.max._id}})
- .itcount();
- assert.lte(count, docsPerChunk, 'Chunk has too many docs: ' + tojson(chunkDoc));
- });
+config.chunks.find({ns: testDB.mrShardedOut.getFullName()}).forEach(function(chunkDoc) {
+ var count =
+ testDB.mrShardedOut.find({_id: {$gte: chunkDoc.min._id, $lt: chunkDoc.max._id}}).itcount();
+ assert.lte(count, docsPerChunk, 'Chunk has too many docs: ' + tojson(chunkDoc));
+});
// Check that chunks for the newly created sharded output collection are well distributed.
var shard0Chunks =
@@ -127,9 +124,8 @@ assert.eq(numDocs,
// Make sure it's sharded and split
newNumChunks = config.chunks.count({ns: testDB.mrShardedOut._fullName});
-assert.gt(newNumChunks,
- 1,
- "Sharding FAILURE: " + testDB.mrShardedOut._fullName + " has only 1 chunk");
+assert.gt(
+ newNumChunks, 1, "Sharding FAILURE: " + testDB.mrShardedOut._fullName + " has only 1 chunk");
st.printShardingStatus(true);
diff --git a/jstests/sharding/mr_shard_version.js b/jstests/sharding/mr_shard_version.js
index fc2f7f02e4b..5040fc17ef2 100644
--- a/jstests/sharding/mr_shard_version.js
+++ b/jstests/sharding/mr_shard_version.js
@@ -31,11 +31,7 @@
jsTest.log("Starting migrations...");
- var migrateOp = {
- op: "command",
- ns: "admin",
- command: {moveChunk: "" + coll}
- };
+ var migrateOp = {op: "command", ns: "admin", command: {moveChunk: "" + coll}};
var checkMigrate = function() {
print("Result of migrate : ");
diff --git a/jstests/sharding/multi_mongos2.js b/jstests/sharding/multi_mongos2.js
index 2112408afc0..dcbd5a66655 100644
--- a/jstests/sharding/multi_mongos2.js
+++ b/jstests/sharding/multi_mongos2.js
@@ -28,9 +28,9 @@
assert.eq(1, res.ok, tojson(res));
printjson(s2.adminCommand({"getShardVersion": "test.existing"}));
- printjson(new Mongo(s1.getPrimaryShard("test").name)
- .getDB("admin")
- .adminCommand({"getShardVersion": "test.existing"}));
+ printjson(new Mongo(s1.getPrimaryShard("test").name).getDB("admin").adminCommand({
+ "getShardVersion": "test.existing"
+ }));
assert.eq(1, s1.getDB('test').existing.count({_id: 1})); // SERVER-2828
assert.eq(1, s2.getDB('test').existing.count({_id: 1}));
diff --git a/jstests/sharding/no_empty_reset.js b/jstests/sharding/no_empty_reset.js
index 61fe5905cc0..da77597ecae 100644
--- a/jstests/sharding/no_empty_reset.js
+++ b/jstests/sharding/no_empty_reset.js
@@ -31,12 +31,8 @@ var emptyShard = st.getShard(coll, {_id: -1});
var admin = st.s.getDB("admin");
assert.soon(
function() {
- var result = admin.runCommand({
- moveChunk: "" + coll,
- find: {_id: -1},
- to: fullShard.shardName,
- _waitForDelete: true
- });
+ var result = admin.runCommand(
+ {moveChunk: "" + coll, find: {_id: -1}, to: fullShard.shardName, _waitForDelete: true});
jsTestLog('moveChunk result = ' + tojson(result));
return result.ok;
},
diff --git a/jstests/sharding/pending_chunk.js b/jstests/sharding/pending_chunk.js
index 21107fe370d..96089b6d491 100644
--- a/jstests/sharding/pending_chunk.js
+++ b/jstests/sharding/pending_chunk.js
@@ -23,10 +23,10 @@
assert.commandWorked(admin.runCommand({split: ns, middle: {_id: 0}}));
assert.commandWorked(admin.runCommand({split: ns, middle: {_id: 1}}));
- assert.commandWorked(admin.runCommand(
- {moveChunk: ns, find: {_id: 0}, to: shards[1]._id, _waitForDelete: true}));
- assert.commandWorked(admin.runCommand(
- {moveChunk: ns, find: {_id: 1}, to: shards[1]._id, _waitForDelete: true}));
+ assert.commandWorked(
+ admin.runCommand({moveChunk: ns, find: {_id: 0}, to: shards[1]._id, _waitForDelete: true}));
+ assert.commandWorked(
+ admin.runCommand({moveChunk: ns, find: {_id: 1}, to: shards[1]._id, _waitForDelete: true}));
function getMetadata(shard) {
var admin = shard.getDB('admin'),
@@ -50,8 +50,8 @@
assert.neq(metadata.collVersion.t, 0);
assert.eq(metadata.pending.length, 0);
- assert.commandWorked(admin.runCommand(
- {moveChunk: ns, find: {_id: 1}, to: shards[0]._id, _waitForDelete: true}));
+ assert.commandWorked(
+ admin.runCommand({moveChunk: ns, find: {_id: 1}, to: shards[0]._id, _waitForDelete: true}));
metadata = getMetadata(shard0);
assert.eq(metadata.shardVersion.t, 0);
diff --git a/jstests/sharding/prefix_shard_key.js b/jstests/sharding/prefix_shard_key.js
index 92a077d73b5..a13b133e3ef 100644
--- a/jstests/sharding/prefix_shard_key.js
+++ b/jstests/sharding/prefix_shard_key.js
@@ -109,27 +109,23 @@
_waitForDelete: true
}));
- var expectedShardCount = {
- shard0000: 0,
- shard0001: 0
- };
- config.chunks.find({ns: 'test.user'})
- .forEach(function(chunkDoc) {
- var min = chunkDoc.min.num;
- var max = chunkDoc.max.num;
-
- if (min < 0 || min == MinKey) {
- min = 0;
- }
+ var expectedShardCount = {shard0000: 0, shard0001: 0};
+ config.chunks.find({ns: 'test.user'}).forEach(function(chunkDoc) {
+ var min = chunkDoc.min.num;
+ var max = chunkDoc.max.num;
- if (max > 1000 || max == MaxKey) {
- max = 1000;
- }
+ if (min < 0 || min == MinKey) {
+ min = 0;
+ }
- if (max > 0) {
- expectedShardCount[chunkDoc.shard] += (max - min);
- }
- });
+ if (max > 1000 || max == MaxKey) {
+ max = 1000;
+ }
+
+ if (max > 0) {
+ expectedShardCount[chunkDoc.shard] += (max - min);
+ }
+ });
assert.eq(expectedShardCount['shard0000'], shard0.getDB('test').user.find().count());
assert.eq(expectedShardCount['shard0001'], shard1.getDB('test').user.find().count());
diff --git a/jstests/sharding/printShardingStatus.js b/jstests/sharding/printShardingStatus.js
index 05e6eca0d4f..63b5ef3090c 100644
--- a/jstests/sharding/printShardingStatus.js
+++ b/jstests/sharding/printShardingStatus.js
@@ -162,10 +162,7 @@
};
var collName = getCollName(testCollDetailsNum);
- var cmdObj = {
- shardCollection: collName,
- key: {_id: 1}
- };
+ var cmdObj = {shardCollection: collName, key: {_id: 1}};
if (args.unique) {
cmdObj.unique = true;
}
diff --git a/jstests/sharding/query_config.js b/jstests/sharding/query_config.js
index c6b08b8b7c0..7547eef007a 100644
--- a/jstests/sharding/query_config.js
+++ b/jstests/sharding/query_config.js
@@ -164,12 +164,13 @@
assert(!cursor.hasNext());
// Aggregate query.
- cursor = configDB.collections.aggregate([
- {$match: {"key.b": 1}},
- {$sort: {"_id": 1}},
- {$project: {"keyb": "$key.b", "keyc": "$key.c"}}
- ],
- {cursor: {batchSize: 2}});
+ cursor = configDB.collections.aggregate(
+ [
+ {$match: {"key.b": 1}},
+ {$sort: {"_id": 1}},
+ {$project: {"keyb": "$key.b", "keyc": "$key.c"}}
+ ],
+ {cursor: {batchSize: 2}});
assert.eq(cursor.objsLeftInBatch(), 2);
assert.eq(cursor.next(), {_id: testNamespaces[3], keyb: 1, keyc: 1});
assert.eq(cursor.next(), {_id: testNamespaces[2], keyb: 1});
@@ -220,8 +221,9 @@
st.s.adminCommand({movechunk: testColl.getFullName(), find: {e: 12}, to: shard2}));
// Find query.
- cursor = configDB.chunks.find({ns: testColl.getFullName()},
- {_id: 0, min: 1, max: 1, shard: 1}).sort({"min.e": 1});
+ cursor =
+ configDB.chunks.find({ns: testColl.getFullName()}, {_id: 0, min: 1, max: 1, shard: 1})
+ .sort({"min.e": 1});
assert.eq(cursor.next(), {min: {e: {"$minKey": 1}}, "max": {"e": 2}, shard: shard2});
assert.eq(cursor.next(), {min: {e: 2}, max: {e: 6}, shard: shard1});
assert.eq(cursor.next(), {min: {e: 6}, max: {e: 8}, shard: shard1});
@@ -258,9 +260,7 @@
}
};
var reduceFunction = function(key, values) {
- return {
- chunks: values.length
- };
+ return {chunks: values.length};
};
result = configDB.chunks.mapReduce(mapFunction, reduceFunction, {out: {inline: 1}});
assert.eq(result.ok, 1);
@@ -322,13 +322,14 @@
assert(!cursor.hasNext());
// Aggregate query.
- cursor = userColl.aggregate([
- {$match: {c: {$gt: 1}}},
- {$unwind: "$u"},
- {$group: {_id: "$u", sum: {$sum: "$c"}}},
- {$sort: {_id: 1}}
- ],
- {cursor: {batchSize: 2}});
+ cursor = userColl.aggregate(
+ [
+ {$match: {c: {$gt: 1}}},
+ {$unwind: "$u"},
+ {$group: {_id: "$u", sum: {$sum: "$c"}}},
+ {$sort: {_id: 1}}
+ ],
+ {cursor: {batchSize: 2}});
assert.eq(cursor.objsLeftInBatch(), 2);
assert.eq(cursor.next(), {_id: 1, sum: 11});
assert.eq(cursor.next(), {_id: 2, sum: 15});
@@ -365,18 +366,15 @@
emit(this.g, 1);
};
var reduceFunction = function(key, values) {
- return {
- count: values.length
- };
+ return {count: values.length};
};
result = userColl.mapReduce(mapFunction, reduceFunction, {out: {inline: 1}});
assert.eq(result.ok, 1);
- assert.eq(sortArrayById(result.results),
- [
- {_id: 1, value: {count: 2}},
- {_id: 2, value: {count: 3}},
- {_id: 3, value: {count: 2}}
- ]);
+ assert.eq(sortArrayById(result.results), [
+ {_id: 1, value: {count: 2}},
+ {_id: 2, value: {count: 3}},
+ {_id: 3, value: {count: 2}}
+ ]);
assert(userColl.drop());
};
diff --git a/jstests/sharding/read_pref.js b/jstests/sharding/read_pref.js
index aadd8903344..973f02993ac 100644
--- a/jstests/sharding/read_pref.js
+++ b/jstests/sharding/read_pref.js
@@ -3,9 +3,7 @@
* can be found in dbtests/replica_set_monitor_test.cpp.
*/
-var PRI_TAG = {
- dc: 'ny'
-};
+var PRI_TAG = {dc: 'ny'};
var SEC_TAGS = [{dc: 'sf', s: "1"}, {dc: 'ma', s: "2"}, {dc: 'eu', s: "3"}, {dc: 'jp', s: "4"}];
var NODES = SEC_TAGS.length + 1;
@@ -95,9 +93,7 @@ var doTest = function(useDollarQuerySyntax) {
var getExplain = function(readPrefMode, readPrefTags) {
if (useDollarQuerySyntax) {
- var readPrefObj = {
- mode: readPrefMode
- };
+ var readPrefObj = {mode: readPrefMode};
if (readPrefTags) {
readPrefObj.tags = readPrefTags;
diff --git a/jstests/sharding/read_pref_cmd.js b/jstests/sharding/read_pref_cmd.js
index 1e4aa48ee25..3cf6a694cd0 100644
--- a/jstests/sharding/read_pref_cmd.js
+++ b/jstests/sharding/read_pref_cmd.js
@@ -60,9 +60,7 @@ var testReadPreference = function(conn, hostList, isMongos, mode, tagSets, secEx
assert(cmdResult.ok);
var testedAtLeastOnce = false;
- var query = {
- op: 'command'
- };
+ var query = {op: 'command'};
Object.extend(query, profileQuery);
hostList.forEach(function(node) {
@@ -216,28 +214,28 @@ var testAllModes = function(conn, hostList, isMongos) {
// { tag: 'two' } so we can test the interaction of modes and tags. Test
// a bunch of combinations.
[
- // mode, tagSets, expectedHost
- ['primary', undefined, false],
- ['primary', [], false],
+ // mode, tagSets, expectedHost
+ ['primary', undefined, false],
+ ['primary', [], false],
- ['primaryPreferred', undefined, false],
- ['primaryPreferred', [{tag: 'one'}], false],
- // Correctly uses primary and ignores the tag
- ['primaryPreferred', [{tag: 'two'}], false],
+ ['primaryPreferred', undefined, false],
+ ['primaryPreferred', [{tag: 'one'}], false],
+ // Correctly uses primary and ignores the tag
+ ['primaryPreferred', [{tag: 'two'}], false],
- ['secondary', undefined, true],
- ['secondary', [{tag: 'two'}], true],
- ['secondary', [{tag: 'doesntexist'}, {}], true],
- ['secondary', [{tag: 'doesntexist'}, {tag: 'two'}], true],
+ ['secondary', undefined, true],
+ ['secondary', [{tag: 'two'}], true],
+ ['secondary', [{tag: 'doesntexist'}, {}], true],
+ ['secondary', [{tag: 'doesntexist'}, {tag: 'two'}], true],
- ['secondaryPreferred', undefined, true],
- ['secondaryPreferred', [{tag: 'one'}], false],
- ['secondaryPreferred', [{tag: 'two'}], true],
+ ['secondaryPreferred', undefined, true],
+ ['secondaryPreferred', [{tag: 'one'}], false],
+ ['secondaryPreferred', [{tag: 'two'}], true],
- // We don't have a way to alter ping times so we can't predict where an
- // untagged 'nearest' command should go, hence only test with tags.
- ['nearest', [{tag: 'one'}], false],
- ['nearest', [{tag: 'two'}], true]
+ // We don't have a way to alter ping times so we can't predict where an
+ // untagged 'nearest' command should go, hence only test with tags.
+ ['nearest', [{tag: 'one'}], false],
+ ['nearest', [{tag: 'two'}], true]
].forEach(function(args) {
var mode = args[0], tagSets = args[1], secExpected = args[2];
@@ -248,17 +246,17 @@ var testAllModes = function(conn, hostList, isMongos) {
});
[
- // Tags not allowed with primary
- ['primary', [{dc: 'doesntexist'}]],
- ['primary', [{dc: 'ny'}]],
- ['primary', [{dc: 'one'}]],
+ // Tags not allowed with primary
+ ['primary', [{dc: 'doesntexist'}]],
+ ['primary', [{dc: 'ny'}]],
+ ['primary', [{dc: 'one'}]],
- // No matching node
- ['secondary', [{tag: 'one'}]],
- ['nearest', [{tag: 'doesntexist'}]],
+ // No matching node
+ ['secondary', [{tag: 'one'}]],
+ ['nearest', [{tag: 'doesntexist'}]],
- ['invalid-mode', undefined],
- ['secondary', ['misformatted-tags']]
+ ['invalid-mode', undefined],
+ ['secondary', ['misformatted-tags']]
].forEach(function(args) {
var mode = args[0], tagSets = args[1];
@@ -278,14 +276,8 @@ ReplSetTest.awaitRSClientHosts(st.s, st.rs0.nodes);
// Tag primary with { dc: 'ny', tag: 'one' }, secondary with { dc: 'ny', tag: 'two' }
var primary = st.rs0.getPrimary();
var secondary = st.rs0.getSecondary();
-var PRIMARY_TAG = {
- dc: 'ny',
- tag: 'one'
-};
-var SECONDARY_TAG = {
- dc: 'ny',
- tag: 'two'
-};
+var PRIMARY_TAG = {dc: 'ny', tag: 'one'};
+var SECONDARY_TAG = {dc: 'ny', tag: 'two'};
var rsConfig = primary.getDB("local").system.replset.findOne();
jsTest.log('got rsconf ' + tojson(rsConfig));
diff --git a/jstests/sharding/regex_targeting.js b/jstests/sharding/regex_targeting.js
index 7dd927d8aab..33411447721 100644
--- a/jstests/sharding/regex_targeting.js
+++ b/jstests/sharding/regex_targeting.js
@@ -162,17 +162,14 @@ collSharded.remove({});
collCompound.remove({});
collNested.remove({});
assert.writeError(collSharded.update({a: /abcde.*/}, {$set: {a: /abcde.*/}}, {upsert: true}));
-assert.writeError(collCompound.update({a: /abcde.*/},
- {$set: {a: /abcde.*/, b: 1}},
- {upsert: true}));
+assert.writeError(
+ collCompound.update({a: /abcde.*/}, {$set: {a: /abcde.*/, b: 1}}, {upsert: true}));
// Exact regex in query never equality
-assert.writeError(collNested.update({'a.b': /abcde.*/},
- {$set: {'a.b': /abcde.*/}},
- {upsert: true}));
+assert.writeError(
+ collNested.update({'a.b': /abcde.*/}, {$set: {'a.b': /abcde.*/}}, {upsert: true}));
// Even nested regexes are not extracted in queries
-assert.writeError(collNested.update({a: {b: /abcde.*/}},
- {$set: {'a.b': /abcde.*/}},
- {upsert: true}));
+assert.writeError(
+ collNested.update({a: {b: /abcde.*/}}, {$set: {'a.b': /abcde.*/}}, {upsert: true}));
assert.writeError(collNested.update({c: 1}, {$set: {'a.b': /abcde.*/}}, {upsert: true}));
//
diff --git a/jstests/sharding/replmonitor_bad_seed.js b/jstests/sharding/replmonitor_bad_seed.js
index 1d52ac47abc..95d6d7526a8 100644
--- a/jstests/sharding/replmonitor_bad_seed.js
+++ b/jstests/sharding/replmonitor_bad_seed.js
@@ -13,9 +13,7 @@
* was able to refresh before proceeding to check.
*/
-var rsOpt = {
- oplogSize: 10
-};
+var rsOpt = {oplogSize: 10};
var st = new ShardingTest({shards: 1, rs: rsOpt});
var mongos = st.s;
var replTest = st.rs0;
diff --git a/jstests/sharding/secondary_query_routing.js b/jstests/sharding/secondary_query_routing.js
index ff0dfcb22d9..3eb706022aa 100644
--- a/jstests/sharding/secondary_query_routing.js
+++ b/jstests/sharding/secondary_query_routing.js
@@ -4,9 +4,7 @@
*/
(function() {
- var rsOpts = {
- nodes: 2
- };
+ var rsOpts = {nodes: 2};
var st = new ShardingTest({mongos: 2, shards: {rs0: rsOpts, rs1: rsOpts}});
st.s0.adminCommand({enableSharding: 'test'});
diff --git a/jstests/sharding/shard2.js b/jstests/sharding/shard2.js
index abe91508650..6f563aebbb0 100644
--- a/jstests/sharding/shard2.js
+++ b/jstests/sharding/shard2.js
@@ -40,9 +40,8 @@ db.foo.save({num: 1, name: "eliot"});
db.foo.save({num: 2, name: "sara"});
db.foo.save({num: -1, name: "joe"});
-assert.eq(3,
- s.getPrimaryShard("test").getDB("test").foo.find().length(),
- "not right directly to db A");
+assert.eq(
+ 3, s.getPrimaryShard("test").getDB("test").foo.find().length(), "not right directly to db A");
assert.eq(3, db.foo.find().length(), "not right on shard");
primary = s.getPrimaryShard("test").getDB("test");
@@ -58,12 +57,8 @@ placeCheck(2);
// test move shard
assert.throws(function() {
- s.adminCommand({
- movechunk: "test.foo",
- find: {num: 1},
- to: primary.getMongo().name,
- _waitForDelete: true
- });
+ s.adminCommand(
+ {movechunk: "test.foo", find: {num: 1}, to: primary.getMongo().name, _waitForDelete: true});
});
assert.throws(function() {
s.adminCommand({movechunk: "test.foo", find: {num: 1}, to: "adasd", _waitForDelete: true});
@@ -74,9 +69,8 @@ s.adminCommand(
assert.eq(2, secondary.foo.find().length(), "secondary should have 2 after move shard");
assert.eq(1, primary.foo.find().length(), "primary should only have 1 after move shard");
-assert.eq(2,
- s.config.chunks.count(),
- "still should have 2 shards after move not:" + s.getChunksString());
+assert.eq(
+ 2, s.config.chunks.count(), "still should have 2 shards after move not:" + s.getChunksString());
chunks = s.config.chunks.find().toArray();
assert.neq(chunks[0].shard, chunks[1].shard, "servers should NOT be the same after the move");
diff --git a/jstests/sharding/shard_aware_init.js b/jstests/sharding/shard_aware_init.js
index 199eb369557..5dda623bb77 100644
--- a/jstests/sharding/shard_aware_init.js
+++ b/jstests/sharding/shard_aware_init.js
@@ -40,8 +40,8 @@
mongodConn = MongoRunner.runMongod(options);
waitForMaster(mongodConn);
- var res = mongodConn.getDB('admin')
- .system.version.update({_id: 'shardIdentity'}, shardIdentityDoc);
+ var res = mongodConn.getDB('admin').system.version.update({_id: 'shardIdentity'},
+ shardIdentityDoc);
assert.eq(1, res.nModified);
MongoRunner.stopMongod(mongodConn.port);
@@ -60,8 +60,8 @@
return mongodConn;
};
- assert.writeOK(mongodConn.getDB('admin')
- .system.version.update({_id: 'shardIdentity'}, shardIdentityDoc, true));
+ assert.writeOK(mongodConn.getDB('admin').system.version.update(
+ {_id: 'shardIdentity'}, shardIdentityDoc, true));
var res = mongodConn.getDB('admin').runCommand({shardingState: 1});
@@ -98,8 +98,7 @@
waitForMaster(mongodConn);
assert.writeOK(mongodConn.getDB('admin').system.version.update(
- {_id: 'shardIdentity'},
- {_id: 'shardIdentity', shardName: 'x', clusterId: ObjectId()}));
+ {_id: 'shardIdentity'}, {_id: 'shardIdentity', shardName: 'x', clusterId: ObjectId()}));
MongoRunner.stopMongod(mongodConn.port);
diff --git a/jstests/sharding/shard_aware_primary_failover.js b/jstests/sharding/shard_aware_primary_failover.js
index 0d939c6e1ea..127e74b948c 100644
--- a/jstests/sharding/shard_aware_primary_failover.js
+++ b/jstests/sharding/shard_aware_primary_failover.js
@@ -29,8 +29,8 @@
clusterId: ObjectId()
};
- assert.writeOK(primaryConn.getDB('admin')
- .system.version.insert(shardIdentityDoc, {writeConcern: {w: 'majority'}}));
+ assert.writeOK(primaryConn.getDB('admin').system.version.insert(
+ shardIdentityDoc, {writeConcern: {w: 'majority'}}));
replTest.stopMaster();
replTest.waitForMaster();
diff --git a/jstests/sharding/shard_identity_config_update.js b/jstests/sharding/shard_identity_config_update.js
index 4eb142d7f20..678a04c79fa 100644
--- a/jstests/sharding/shard_identity_config_update.js
+++ b/jstests/sharding/shard_identity_config_update.js
@@ -19,8 +19,8 @@
clusterId: ObjectId()
};
- var res = conn.getDB('admin')
- .system.version.update({_id: 'shardIdentity'}, shardIdentityDoc, true);
+ var res = conn.getDB('admin').system.version.update(
+ {_id: 'shardIdentity'}, shardIdentityDoc, true);
assert.eq(1, res.nUpserted);
};
diff --git a/jstests/sharding/sharding_balance1.js b/jstests/sharding/sharding_balance1.js
index d2fad545bba..22a8aaa6210 100644
--- a/jstests/sharding/sharding_balance1.js
+++ b/jstests/sharding/sharding_balance1.js
@@ -39,16 +39,12 @@
assert.lt(20, diff1(), "big differential here");
print(diff1());
- assert.soon(
- function() {
- var d = diff1();
- return d < 5;
- // Make sure there's enough time here, since balancing can sleep for 15s or so between
- // balances.
- },
- "balance didn't happen",
- 1000 * 60 * 5,
- 5000);
+ assert.soon(function() {
+ var d = diff1();
+ return d < 5;
+ // Make sure there's enough time here, since balancing can sleep for 15s or so between
+ // balances.
+ }, "balance didn't happen", 1000 * 60 * 5, 5000);
s.stop();
})();
diff --git a/jstests/sharding/sharding_balance4.js b/jstests/sharding/sharding_balance4.js
index 91c9d5ce9ad..170448cbf11 100644
--- a/jstests/sharding/sharding_balance4.js
+++ b/jstests/sharding/sharding_balance4.js
@@ -35,13 +35,9 @@
//
function doUpdate(bulk, includeString, optionalId) {
- var up = {
- $inc: {x: 1}
- };
+ var up = {$inc: {x: 1}};
if (includeString) {
- up["$set"] = {
- s: bigString
- };
+ up["$set"] = {s: bigString};
}
var myid = optionalId == undefined ? Random.randInt(N) : optionalId;
bulk.find({_id: myid}).upsert().update(up);
diff --git a/jstests/sharding/sharding_options.js b/jstests/sharding/sharding_options.js
index 0841967b18e..333b53e9cfd 100644
--- a/jstests/sharding/sharding_options.js
+++ b/jstests/sharding/sharding_options.js
@@ -4,9 +4,7 @@ load('jstests/libs/command_line/test_parsed_options.js');
// Move Paranoia
jsTest.log("Testing \"moveParanoia\" command line option");
-var expectedResult = {
- "parsed": {"sharding": {"archiveMovedChunks": true}}
-};
+var expectedResult = {"parsed": {"sharding": {"archiveMovedChunks": true}}};
testGetCmdLineOptsMongod({moveParanoia: ""}, expectedResult);
jsTest.log("Testing \"noMoveParanoia\" command line option");
@@ -51,9 +49,7 @@ testGetCmdLineOptsMongod({config: "jstests/libs/config_files/set_shardingrole.js
// Auto Splitting
jsTest.log("Testing \"noAutoSplit\" command line option");
-var expectedResult = {
- "parsed": {"sharding": {"autoSplit": false}}
-};
+var expectedResult = {"parsed": {"sharding": {"autoSplit": false}}};
testGetCmdLineOptsMongos({noAutoSplit: ""}, expectedResult);
jsTest.log("Testing \"sharding.autoSplit\" config file option");
diff --git a/jstests/sharding/sharding_rs2.js b/jstests/sharding/sharding_rs2.js
index 7c323ac5d44..ed15592a9b2 100644
--- a/jstests/sharding/sharding_rs2.js
+++ b/jstests/sharding/sharding_rs2.js
@@ -19,10 +19,16 @@
other: {
chunkSize: 1,
rs0: {
- nodes: [{rsConfig: {votes: 1}}, {rsConfig: {priority: 0, votes: 0}}, ],
+ nodes: [
+ {rsConfig: {votes: 1}},
+ {rsConfig: {priority: 0, votes: 0}},
+ ],
},
rs1: {
- nodes: [{rsConfig: {votes: 1}}, {rsConfig: {priority: 0, votes: 0}}, ],
+ nodes: [
+ {rsConfig: {votes: 1}},
+ {rsConfig: {priority: 0, votes: 0}},
+ ],
}
}
});
diff --git a/jstests/sharding/sharding_state_after_stepdown.js b/jstests/sharding/sharding_state_after_stepdown.js
index 3007b4b08a2..6bd2f4927cc 100644
--- a/jstests/sharding/sharding_state_after_stepdown.js
+++ b/jstests/sharding/sharding_state_after_stepdown.js
@@ -38,22 +38,12 @@
st.rs0.stop(rs0Primary);
st.rs1.stop(rs1Primary);
- ReplSetTest.awaitRSClientHosts(mongos,
- [rs0Primary, rs1Primary],
- {
- ok:
- false
- });
+ ReplSetTest.awaitRSClientHosts(mongos, [rs0Primary, rs1Primary], {ok: false});
st.rs0.start(rs0Primary, Object.extend(rs0Primary.savedOptions, {restart: true}));
st.rs1.start(rs1Primary, Object.extend(rs1Primary.savedOptions, {restart: true}));
- ReplSetTest.awaitRSClientHosts(mongos,
- [rs0Primary, rs1Primary],
- {
- ismaster:
- true
- });
+ ReplSetTest.awaitRSClientHosts(mongos, [rs0Primary, rs1Primary], {ismaster: true});
};
restartPrimaries();
@@ -109,12 +99,7 @@
// Expected connection exception, will check for stepdown later
}
- ReplSetTest.awaitRSClientHosts(mongos,
- [rs0Primary, rs1Primary],
- {
- secondary:
- true
- });
+ ReplSetTest.awaitRSClientHosts(mongos, [rs0Primary, rs1Primary], {secondary: true});
assert.commandWorked(new Mongo(rs0Primary.host).adminCommand({replSetFreeze: 0}));
assert.commandWorked(new Mongo(rs1Primary.host).adminCommand({replSetFreeze: 0}));
@@ -126,12 +111,7 @@
assert.commandWorked(rs0Primary.adminCommand({connPoolSync: true}));
assert.commandWorked(rs1Primary.adminCommand({connPoolSync: true}));
- ReplSetTest.awaitRSClientHosts(mongos,
- [rs0Primary, rs1Primary],
- {
- ismaster:
- true
- });
+ ReplSetTest.awaitRSClientHosts(mongos, [rs0Primary, rs1Primary], {ismaster: true});
};
stepDownPrimaries();
@@ -140,30 +120,26 @@
//
// No sharding metadata until shards are hit by a metadata operation
assert.eq({},
- st.rs0.getPrimary().adminCommand({
- getShardVersion: collSharded.toString(),
- fullMetadata: true
- }).metadata);
+ st.rs0.getPrimary()
+ .adminCommand({getShardVersion: collSharded.toString(), fullMetadata: true})
+ .metadata);
assert.eq({},
- st.rs1.getPrimary().adminCommand({
- getShardVersion: collSharded.toString(),
- fullMetadata: true
- }).metadata);
+ st.rs1.getPrimary()
+ .adminCommand({getShardVersion: collSharded.toString(), fullMetadata: true})
+ .metadata);
//
//
// Metadata commands should enable sharding data implicitly
assert.commandWorked(mongos.adminCommand({split: collSharded.toString(), middle: {_id: 0}}));
assert.eq({},
- st.rs0.getPrimary().adminCommand({
- getShardVersion: collSharded.toString(),
- fullMetadata: true
- }).metadata);
+ st.rs0.getPrimary()
+ .adminCommand({getShardVersion: collSharded.toString(), fullMetadata: true})
+ .metadata);
assert.neq({},
- st.rs1.getPrimary().adminCommand({
- getShardVersion: collSharded.toString(),
- fullMetadata: true
- }).metadata);
+ st.rs1.getPrimary()
+ .adminCommand({getShardVersion: collSharded.toString(), fullMetadata: true})
+ .metadata);
//
//
@@ -171,15 +147,13 @@
assert.commandWorked(mongos.adminCommand(
{moveChunk: collSharded.toString(), find: {_id: 0}, to: shards[0]._id}));
assert.neq({},
- st.rs0.getPrimary().adminCommand({
- getShardVersion: collSharded.toString(),
- fullMetadata: true
- }).metadata);
+ st.rs0.getPrimary()
+ .adminCommand({getShardVersion: collSharded.toString(), fullMetadata: true})
+ .metadata);
assert.neq({},
- st.rs1.getPrimary().adminCommand({
- getShardVersion: collSharded.toString(),
- fullMetadata: true
- }).metadata);
+ st.rs1.getPrimary()
+ .adminCommand({getShardVersion: collSharded.toString(), fullMetadata: true})
+ .metadata);
st.stop();
diff --git a/jstests/sharding/sort1.js b/jstests/sharding/sort1.js
index 57bae9dc390..5cf8dcfd901 100644
--- a/jstests/sharding/sort1.js
+++ b/jstests/sharding/sort1.js
@@ -54,10 +54,9 @@
z = db.data.find().sort({'sub.num': 1}).toArray();
}, 200);
assert.eq(100, z.length, "C1");
- b = 1.5 *
- Date.timeFunc(function() {
- z = s.getPrimaryShard("test").getDB("test").data.find().sort({'sub.num': 1}).toArray();
- }, 200);
+ b = 1.5 * Date.timeFunc(function() {
+ z = s.getPrimaryShard("test").getDB("test").data.find().sort({'sub.num': 1}).toArray();
+ }, 200);
assert.eq(67, z.length, "C2");
print("a: " + a + " b:" + b + " mongos slow down: " + Math.ceil(100 * ((a - b) / b)) + "%");
diff --git a/jstests/sharding/split_large_key.js b/jstests/sharding/split_large_key.js
index 5a8fe060c67..0468fce757b 100644
--- a/jstests/sharding/split_large_key.js
+++ b/jstests/sharding/split_large_key.js
@@ -38,10 +38,7 @@
tests.forEach(function(test) {
var collName = "split_large_key_" + test.name;
var midKey = {};
- var chunkKeys = {
- min: {},
- max: {}
- };
+ var chunkKeys = {min: {}, max: {}};
for (var k in test.key) {
// new Array with join creates string length 1 less than size, so add 1
midKey[k] = new Array(test.keyFieldSize + 1).join('a');
diff --git a/jstests/sharding/split_with_force.js b/jstests/sharding/split_with_force.js
index c66d2f145eb..35e25b5803e 100644
--- a/jstests/sharding/split_with_force.js
+++ b/jstests/sharding/split_with_force.js
@@ -40,13 +40,15 @@ jsTest.log("Get split points of the chunk using force : true...");
var maxChunkSizeBytes = 1024 * 1024;
-var splitKeys = shardAdmin.runCommand({
- splitVector: coll + "",
- keyPattern: {_id: 1},
- min: {_id: 0},
- max: {_id: MaxKey},
- force: true
-}).splitKeys;
+var splitKeys = shardAdmin
+ .runCommand({
+ splitVector: coll + "",
+ keyPattern: {_id: 1},
+ min: {_id: 0},
+ max: {_id: MaxKey},
+ force: true
+ })
+ .splitKeys;
printjson(splitKeys);
printjson(coll.stats());
diff --git a/jstests/sharding/stale_mongos_updates_and_removes.js b/jstests/sharding/stale_mongos_updates_and_removes.js
index 791120f6f82..d5d03fcb442 100644
--- a/jstests/sharding/stale_mongos_updates_and_removes.js
+++ b/jstests/sharding/stale_mongos_updates_and_removes.js
@@ -76,12 +76,8 @@ var makeStaleMongosTargetSingleShard = function() {
};
var checkAllRemoveQueries = function(makeMongosStaleFunc) {
- var multi = {
- justOne: false
- };
- var single = {
- justOne: true
- };
+ var multi = {justOne: false};
+ var single = {justOne: true};
var doRemove = function(query, multiOption, makeMongosStaleFunc) {
makeMongosStaleFunc();
@@ -119,23 +115,12 @@ var checkAllRemoveQueries = function(makeMongosStaleFunc) {
};
var checkAllUpdateQueries = function(makeMongosStaleFunc) {
- var oUpdate = {
- $inc: {fieldToUpdate: 1}
- }; // op-style update (non-idempotent)
- var rUpdate = {
- x: 0,
- fieldToUpdate: 1
- }; // replacement-style update (idempotent)
- var queryAfterUpdate = {
- fieldToUpdate: 1
- };
+ var oUpdate = {$inc: {fieldToUpdate: 1}}; // op-style update (non-idempotent)
+ var rUpdate = {x: 0, fieldToUpdate: 1}; // replacement-style update (idempotent)
+ var queryAfterUpdate = {fieldToUpdate: 1};
- var multi = {
- multi: true
- };
- var single = {
- multi: false
- };
+ var multi = {multi: true};
+ var single = {multi: false};
var doUpdate = function(query, update, multiOption, makeMongosStaleFunc) {
makeMongosStaleFunc();
@@ -200,20 +185,14 @@ var freshMongos = st.s0;
var staleMongos = st.s1;
var emptyQuery = {};
-var pointQuery = {
- x: 0
-};
+var pointQuery = {x: 0};
// Choose a range that would fall on only one shard.
// Use (splitPoint - 1) because of SERVER-20768.
-var rangeQuery = {
- x: {$gte: 0, $lt: splitPoint - 1}
-};
+var rangeQuery = {x: {$gte: 0, $lt: splitPoint - 1}};
// Choose points that would fall on two different shards.
-var multiPointQuery = {
- $or: [{x: 0}, {x: numShardKeys}]
-};
+var multiPointQuery = {$or: [{x: 0}, {x: numShardKeys}]};
checkAllRemoveQueries(makeStaleMongosTargetSingleShard);
checkAllRemoveQueries(makeStaleMongosTargetMultipleShards);
diff --git a/jstests/sharding/stats.js b/jstests/sharding/stats.js
index da6d842fb99..36ce75b520e 100644
--- a/jstests/sharding/stats.js
+++ b/jstests/sharding/stats.js
@@ -192,9 +192,7 @@
}
// indexDetailsKey - show indexDetails results for this index key only.
- var indexKey = {
- a: 1
- };
+ var indexKey = {a: 1};
var indexName = getIndexName(indexKey);
checkIndexDetails({indexDetails: true, indexDetailsKey: indexKey}, indexName);
diff --git a/jstests/sharding/top_chunk_autosplit.js b/jstests/sharding/top_chunk_autosplit.js
index 74a3e942cae..b4bf2c96e60 100644
--- a/jstests/sharding/top_chunk_autosplit.js
+++ b/jstests/sharding/top_chunk_autosplit.js
@@ -72,10 +72,7 @@ function runTest(test) {
// Insert one doc at a time until first auto-split occurs on top chunk
var xval = test.inserts.value;
do {
- var doc = {
- x: xval,
- val: largeStr
- };
+ var doc = {x: xval, val: largeStr};
coll.insert(doc);
xval += test.inserts.inc;
} while (getNumberOfChunks(configDB) <= numChunks);
@@ -108,44 +105,17 @@ var configDB = st.s.getDB('config');
// Define shard key ranges for each of the shard nodes
var MINVAL = -500;
var MAXVAL = 1500;
-var lowChunkRange = {
- min: MINVAL,
- max: 0
-};
-var midChunkRange1 = {
- min: 0,
- max: 500
-};
-var midChunkRange2 = {
- min: 500,
- max: 1000
-};
-var highChunkRange = {
- min: 1000,
- max: MAXVAL
-};
-
-var lowChunkTagRange = {
- min: MinKey,
- max: 0
-};
-var highChunkTagRange = {
- min: 1000,
- max: MaxKey
-};
-
-var lowChunkInserts = {
- value: 0,
- inc: -1
-};
-var midChunkInserts = {
- value: 1,
- inc: 1
-};
-var highChunkInserts = {
- value: 1000,
- inc: 1
-};
+var lowChunkRange = {min: MINVAL, max: 0};
+var midChunkRange1 = {min: 0, max: 500};
+var midChunkRange2 = {min: 500, max: 1000};
+var highChunkRange = {min: 1000, max: MAXVAL};
+
+var lowChunkTagRange = {min: MinKey, max: 0};
+var highChunkTagRange = {min: 1000, max: MaxKey};
+
+var lowChunkInserts = {value: 0, inc: -1};
+var midChunkInserts = {value: 1, inc: 1};
+var highChunkInserts = {value: 1000, inc: 1};
var lowChunk = 1;
var highChunk = -1;
diff --git a/jstests/slow1/mr_during_migrate.js b/jstests/slow1/mr_during_migrate.js
index cb439aeb241..61a06f11249 100644
--- a/jstests/slow1/mr_during_migrate.js
+++ b/jstests/slow1/mr_during_migrate.js
@@ -83,9 +83,7 @@ for (var t = 0; t < numTests; t++) {
var total = 0;
for (var i = 0; i < vals.length; i++)
total += vals[i].c;
- return {
- c: total
- };
+ return {c: total};
};
printjson(coll.find({_id: 0}).itcount());
diff --git a/jstests/slow1/remove_during_mr.js b/jstests/slow1/remove_during_mr.js
index 9b632a11a56..508a17e6f2a 100644
--- a/jstests/slow1/remove_during_mr.js
+++ b/jstests/slow1/remove_during_mr.js
@@ -20,10 +20,7 @@ function client2() {
}
for (var i = 0; i < 1000; i++) {
- var options = {
- out: {replace: 'bar'},
- sort: {_id: -1}
- };
+ var options = {out: {replace: 'bar'}, sort: {_id: -1}};
db.remove_during_mr.mapReduce(mapper, reducer, options);
}
diff --git a/jstests/ssl/disable_x509.js b/jstests/ssl/disable_x509.js
index 5663f6a6196..c386b84c591 100644
--- a/jstests/ssl/disable_x509.js
+++ b/jstests/ssl/disable_x509.js
@@ -29,9 +29,12 @@ if (cmdOut.ok) {
});
// Localhost exception should not be in place anymore
- assert.throws(function() {
- test.foo.findOne();
- }, {}, "read without login");
+ assert.throws(
+ function() {
+ test.foo.findOne();
+ },
+ {},
+ "read without login");
assert(external.auth({user: CLIENT_USER, mechanism: 'MONGODB-X509'}),
"authentication with valid user failed");
diff --git a/jstests/ssl/libs/ssl_helpers.js b/jstests/ssl/libs/ssl_helpers.js
index 8067076610e..50463d8dec9 100644
--- a/jstests/ssl/libs/ssl_helpers.js
+++ b/jstests/ssl/libs/ssl_helpers.js
@@ -9,9 +9,7 @@ var CLIENT_CERT = "jstests/libs/client.pem";
// Note: "sslAllowInvalidCertificates" is enabled to avoid
// hostname conflicts with our testing certificates
-var disabled = {
- sslMode: "disabled"
-};
+var disabled = {sslMode: "disabled"};
var allowSSL = {
sslMode: "allowSSL",
sslAllowInvalidCertificates: "",
@@ -81,10 +79,13 @@ function testShardedLookup(shardingTest) {
assert.writeOK(fooBulk.execute());
assert.writeOK(barBulk.execute());
- var docs = lookupdb.foo.aggregate([
- {$sort: {_id: 1}},
- {$lookup: {from: "bar", localField: "_id", foreignField: "_id", as: "bar_docs"}}
- ]).toArray();
+ var docs =
+ lookupdb.foo
+ .aggregate([
+ {$sort: {_id: 1}},
+ {$lookup: {from: "bar", localField: "_id", foreignField: "_id", as: "bar_docs"}}
+ ])
+ .toArray();
assert.eq(lookupShouldReturn, docs, "error $lookup failed in this configuration");
assert.commandWorked(lookupdb.dropDatabase());
}
diff --git a/jstests/ssl/ssl_hostname_validation.js b/jstests/ssl/ssl_hostname_validation.js
index ab727320744..9801e14807c 100644
--- a/jstests/ssl/ssl_hostname_validation.js
+++ b/jstests/ssl/ssl_hostname_validation.js
@@ -8,8 +8,8 @@ var CLIENT_CERT = "jstests/libs/client.pem";
var BAD_SAN_CERT = "jstests/libs/badSAN.pem";
function testCombination(certPath, allowInvalidHost, allowInvalidCert, shouldSucceed) {
- var mongod = MongoRunner.runMongod(
- {sslMode: "requireSSL", sslPEMKeyFile: certPath, sslCAFile: CA_CERT});
+ var mongod =
+ MongoRunner.runMongod({sslMode: "requireSSL", sslPEMKeyFile: certPath, sslCAFile: CA_CERT});
var mongo;
if (allowInvalidCert) {
diff --git a/jstests/ssl/ssl_without_ca.js b/jstests/ssl/ssl_without_ca.js
index 0e865cc5fee..2255e019883 100644
--- a/jstests/ssl/ssl_without_ca.js
+++ b/jstests/ssl/ssl_without_ca.js
@@ -36,11 +36,7 @@ MongoRunner.stopMongod(conn.port);
jsTest.log("Assert mongod doesn\'t start with CA file missing and clusterAuthMode=x509.");
-var sslParams = {
- clusterAuthMode: 'x509',
- sslMode: 'requireSSL',
- sslPEMKeyFile: SERVER_CERT
-};
+var sslParams = {clusterAuthMode: 'x509', sslMode: 'requireSSL', sslPEMKeyFile: SERVER_CERT};
var conn = MongoRunner.runMongod(sslParams);
assert.isnull(conn, "server started with x509 clusterAuthMode but no CA file");
diff --git a/jstests/ssl/upgrade_to_x509_ssl.js b/jstests/ssl/upgrade_to_x509_ssl.js
index e9d79859077..2072d6c51c5 100644
--- a/jstests/ssl/upgrade_to_x509_ssl.js
+++ b/jstests/ssl/upgrade_to_x509_ssl.js
@@ -44,17 +44,16 @@ assert.eq(2, rstConn1.getDB("test").a.count(), "Error interacting with replSet")
print("===== UPGRADE allowSSL,sendKeyfile -> preferSSL,sendX509 =====");
authAllNodes();
rst.awaitReplication();
-rst.upgradeSet(
- {
- sslMode: "preferSSL",
- sslPEMKeyFile: SERVER_CERT,
- sslAllowInvalidCertificates: "",
- clusterAuthMode: "sendX509",
- keyFile: KEYFILE,
- sslCAFile: CA_CERT
- },
- "root",
- "pwd");
+rst.upgradeSet({
+ sslMode: "preferSSL",
+ sslPEMKeyFile: SERVER_CERT,
+ sslAllowInvalidCertificates: "",
+ clusterAuthMode: "sendX509",
+ keyFile: KEYFILE,
+ sslCAFile: CA_CERT
+},
+ "root",
+ "pwd");
// The upgradeSet call restarts the nodes so we need to reauthenticate.
authAllNodes();
var rstConn3 = rst.getPrimary();
@@ -66,17 +65,16 @@ var canConnectNoSSL = runMongoProgram("mongo", "--port", rst.ports[0], "--eval",
assert.eq(0, canConnectNoSSL, "SSL Connection attempt failed when it should succeed");
print("===== UPGRADE preferSSL,sendX509 -> requireSSL,x509 =====");
-rst.upgradeSet(
- {
- sslMode: "requireSSL",
- sslPEMKeyFile: SERVER_CERT,
- sslAllowInvalidCertificates: "",
- clusterAuthMode: "x509",
- keyFile: KEYFILE,
- sslCAFile: CA_CERT
- },
- "root",
- "pwd");
+rst.upgradeSet({
+ sslMode: "requireSSL",
+ sslPEMKeyFile: SERVER_CERT,
+ sslAllowInvalidCertificates: "",
+ clusterAuthMode: "x509",
+ keyFile: KEYFILE,
+ sslCAFile: CA_CERT
+},
+ "root",
+ "pwd");
authAllNodes();
var rstConn4 = rst.getPrimary();
rstConn4.getDB("test").a.insert({a: 4, str: "TESTTESTTEST"});
diff --git a/jstests/ssl/x509_client.js b/jstests/ssl/x509_client.js
index 82c726fc0fd..8c3b7254198 100644
--- a/jstests/ssl/x509_client.js
+++ b/jstests/ssl/x509_client.js
@@ -29,16 +29,22 @@ function authAndTest(mongo) {
test = mongo.getDB("test");
// It should be impossible to create users with the same name as the server's subject
- assert.throws(function() {
- external.createUser(
- {user: SERVER_USER, roles: [{'role': 'userAdminAnyDatabase', 'db': 'admin'}]});
- }, {}, "Created user with same name as the server's x.509 subject");
+ assert.throws(
+ function() {
+ external.createUser(
+ {user: SERVER_USER, roles: [{'role': 'userAdminAnyDatabase', 'db': 'admin'}]});
+ },
+ {},
+ "Created user with same name as the server's x.509 subject");
// It should be impossible to create users with names recognized as cluster members
- assert.throws(function() {
- external.createUser(
- {user: INTERNAL_USER, roles: [{'role': 'userAdminAnyDatabase', 'db': 'admin'}]});
- }, {}, "Created user which would be recognized as a cluster member");
+ assert.throws(
+ function() {
+ external.createUser(
+ {user: INTERNAL_USER, roles: [{'role': 'userAdminAnyDatabase', 'db': 'admin'}]});
+ },
+ {},
+ "Created user which would be recognized as a cluster member");
// Add user using localhost exception
external.createUser({
@@ -56,9 +62,12 @@ function authAndTest(mongo) {
});
// Localhost exception should not be in place anymore
- assert.throws(function() {
- test.foo.findOne();
- }, {}, "read without login");
+ assert.throws(
+ function() {
+ test.foo.findOne();
+ },
+ {},
+ "read without login");
assert(!external.auth({user: INVALID_CLIENT_USER, mechanism: 'MONGODB-X509'}),
"authentication with invalid user failed");
@@ -71,17 +80,16 @@ function authAndTest(mongo) {
test.foo.findOne();
external.logout();
- assert.throws(function() {
- test.foo.findOne();
- }, {}, "read after logout");
+ assert.throws(
+ function() {
+ test.foo.findOne();
+ },
+ {},
+ "read after logout");
}
print("1. Testing x.509 auth to mongod");
-var x509_options = {
- sslMode: "requireSSL",
- sslPEMKeyFile: SERVER_CERT,
- sslCAFile: CA_CERT
-};
+var x509_options = {sslMode: "requireSSL", sslPEMKeyFile: SERVER_CERT, sslCAFile: CA_CERT};
var mongo = MongoRunner.runMongod(Object.merge(x509_options, {auth: ""}));
diff --git a/jstests/sslSpecial/mixed_mode_sharded_transition_nossl.js b/jstests/sslSpecial/mixed_mode_sharded_transition_nossl.js
index 4978b56e11f..85f775c3b6b 100644
--- a/jstests/sslSpecial/mixed_mode_sharded_transition_nossl.js
+++ b/jstests/sslSpecial/mixed_mode_sharded_transition_nossl.js
@@ -12,9 +12,7 @@ load('jstests/ssl/libs/ssl_helpers.js');
'use strict';
// Disable auth explicitly
- var noAuthOptions = {
- noauth: ''
- };
+ var noAuthOptions = {noauth: ''};
var transitionToX509AllowSSL =
Object.merge(allowSSL, {transitionToAuth: '', clusterAuthMode: 'x509'});
var x509RequireSSL = Object.merge(requireSSL, {clusterAuthMode: 'x509'});
diff --git a/jstests/sslSpecial/ssl_mixedmode.js b/jstests/sslSpecial/ssl_mixedmode.js
index 085740f0a26..a9eefa16a2c 100644
--- a/jstests/sslSpecial/ssl_mixedmode.js
+++ b/jstests/sslSpecial/ssl_mixedmode.js
@@ -6,9 +6,7 @@
load("jstests/libs/ssl_test.js");
function testCombination(sslMode, sslShell, shouldSucceed) {
- var serverOptionOverrides = {
- sslMode: sslMode
- };
+ var serverOptionOverrides = {sslMode: sslMode};
var clientOptions =
sslShell ? SSLTest.prototype.defaultSSLClientOptions : SSLTest.prototype.noSSLClientOptions;
diff --git a/jstests/sslSpecial/upgrade_noauth_to_x509_nossl.js b/jstests/sslSpecial/upgrade_noauth_to_x509_nossl.js
index 964397397de..9e56c43f81e 100644
--- a/jstests/sslSpecial/upgrade_noauth_to_x509_nossl.js
+++ b/jstests/sslSpecial/upgrade_noauth_to_x509_nossl.js
@@ -16,9 +16,7 @@ load('jstests/ssl/libs/ssl_helpers.js');
var dbName = 'upgradeToX509';
// Disable auth explicitly
- var noAuth = {
- noauth: ''
- };
+ var noAuth = {noauth: ''};
// Undefine the flags we're replacing, otherwise upgradeSet will keep old values.
var transitionToX509AllowSSL =
diff --git a/jstests/sslSpecial/upgrade_to_x509_ssl_nossl.js b/jstests/sslSpecial/upgrade_to_x509_ssl_nossl.js
index ec29c991e9d..47cc303069c 100644
--- a/jstests/sslSpecial/upgrade_to_x509_ssl_nossl.js
+++ b/jstests/sslSpecial/upgrade_to_x509_ssl_nossl.js
@@ -38,17 +38,16 @@ assert.eq(1, rstConn1.getDB("test").a.count(), "Error interacting with replSet")
print("===== UPGRADE disabled,keyFile -> allowSSL,sendKeyfile =====");
authAllNodes();
-rst.upgradeSet(
- {
- sslMode: "allowSSL",
- sslPEMKeyFile: SERVER_CERT,
- sslAllowInvalidCertificates: "",
- clusterAuthMode: "sendKeyFile",
- keyFile: KEYFILE,
- sslCAFile: CA_CERT
- },
- "root",
- "pwd");
+rst.upgradeSet({
+ sslMode: "allowSSL",
+ sslPEMKeyFile: SERVER_CERT,
+ sslAllowInvalidCertificates: "",
+ clusterAuthMode: "sendKeyFile",
+ keyFile: KEYFILE,
+ sslCAFile: CA_CERT
+},
+ "root",
+ "pwd");
authAllNodes();
rst.awaitReplication();
@@ -57,17 +56,16 @@ rstConn2.getDB("test").a.insert({a: 2, str: "CHECKCHECKCHECK"});
assert.eq(2, rstConn2.getDB("test").a.count(), "Error interacting with replSet");
print("===== UPGRADE allowSSL,sendKeyfile -> preferSSL,sendX509 =====");
-rst.upgradeSet(
- {
- sslMode: "preferSSL",
- sslPEMKeyFile: SERVER_CERT,
- sslAllowInvalidCertificates: "",
- clusterAuthMode: "sendX509",
- keyFile: KEYFILE,
- sslCAFile: CA_CERT
- },
- "root",
- "pwd");
+rst.upgradeSet({
+ sslMode: "preferSSL",
+ sslPEMKeyFile: SERVER_CERT,
+ sslAllowInvalidCertificates: "",
+ clusterAuthMode: "sendX509",
+ keyFile: KEYFILE,
+ sslCAFile: CA_CERT
+},
+ "root",
+ "pwd");
authAllNodes();
rst.awaitReplication();
@@ -88,17 +86,16 @@ assert.eq(0, canConnectSSL, "SSL Connection attempt failed when it should succee
print("===== UPGRADE preferSSL,sendX509 -> preferSSL,x509 =====");
// we cannot upgrade past preferSSL here because it will break the test client
-rst.upgradeSet(
- {
- sslMode: "preferSSL",
- sslPEMKeyFile: SERVER_CERT,
- sslAllowInvalidCertificates: "",
- clusterAuthMode: "x509",
- keyFile: KEYFILE,
- sslCAFile: CA_CERT
- },
- "root",
- "pwd");
+rst.upgradeSet({
+ sslMode: "preferSSL",
+ sslPEMKeyFile: SERVER_CERT,
+ sslAllowInvalidCertificates: "",
+ clusterAuthMode: "x509",
+ keyFile: KEYFILE,
+ sslCAFile: CA_CERT
+},
+ "root",
+ "pwd");
authAllNodes();
rst.awaitReplication();
var rstConn4 = rst.getPrimary();
diff --git a/jstests/tool/csvimport1.js b/jstests/tool/csvimport1.js
index 28258bbe37f..0bcdcc8e789 100644
--- a/jstests/tool/csvimport1.js
+++ b/jstests/tool/csvimport1.js
@@ -17,8 +17,7 @@ base.push({
});
base.push({
a: 3,
- b:
- " This line contains the empty string and has leading and trailing whitespace inside the quotes! ",
+ b: " This line contains the empty string and has leading and trailing whitespace inside the quotes! ",
"c": ""
});
base.push({a: 4, b: "", "c": "How are empty entries handled?"});
diff --git a/jstests/tool/dumprestore7.js b/jstests/tool/dumprestore7.js
index 0598e73c0a8..9235dd9c450 100644
--- a/jstests/tool/dumprestore7.js
+++ b/jstests/tool/dumprestore7.js
@@ -48,14 +48,13 @@ step("try mongodump with $timestamp");
var data = MongoRunner.dataDir + "/dumprestore7-dump1/";
var query = "{\"ts\":{\"$gt\":{\"$timestamp\":{\"t\":" + time.ts.t + ",\"i\":" + time.ts.i + "}}}}";
-MongoRunner.runMongoTool("mongodump",
- {
- "host": "127.0.0.1:" + replTest.ports[0],
- "db": "local",
- "collection": "oplog.rs",
- "query": query,
- "out": data
- });
+MongoRunner.runMongoTool("mongodump", {
+ "host": "127.0.0.1:" + replTest.ports[0],
+ "db": "local",
+ "collection": "oplog.rs",
+ "query": query,
+ "out": data
+});
step("try mongorestore from $timestamp");
diff --git a/jstests/tool/dumprestore8.js b/jstests/tool/dumprestore8.js
index 9cdae87df80..4e76e8bd60c 100644
--- a/jstests/tool/dumprestore8.js
+++ b/jstests/tool/dumprestore8.js
@@ -31,9 +31,8 @@ db.bar.ensureIndex({x: 1});
barDocCount = db.bar.count();
assert.gt(barDocCount, 0, "No documents inserted");
assert.lt(db.bar.count(), 1000, "Capped collection didn't evict documents");
-assert.eq(5,
- db.foo.getIndexes().length + db.bar.getIndexes().length,
- "Indexes weren't created right");
+assert.eq(
+ 5, db.foo.getIndexes().length + db.bar.getIndexes().length, "Indexes weren't created right");
// Full dump/restore
diff --git a/jstests/tool/dumprestoreWithNoOptions.js b/jstests/tool/dumprestoreWithNoOptions.js
index 1062abd1e94..a1782d3ff7c 100644
--- a/jstests/tool/dumprestoreWithNoOptions.js
+++ b/jstests/tool/dumprestoreWithNoOptions.js
@@ -24,11 +24,7 @@ db.dropDatabase();
var defaultFlags = {};
-var options = {
- capped: true,
- size: 4096,
- autoIndexId: true
-};
+var options = {capped: true, size: 4096, autoIndexId: true};
db.createCollection('capped', options);
assert.eq(1, db.capped.getIndexes().length, "auto index not created");
var cappedOptions = db.capped.exists().options;
@@ -58,11 +54,7 @@ assert.eq(defaultFlags,
// Dump/restore single DB
db.dropDatabase();
-var options = {
- capped: true,
- size: 4096,
- autoIndexId: true
-};
+var options = {capped: true, size: 4096, autoIndexId: true};
db.createCollection('capped', options);
assert.eq(1, db.capped.getIndexes().length, "auto index not created");
var cappedOptions = db.capped.exists().options;
@@ -92,11 +84,7 @@ assert.eq(defaultFlags,
// Dump/restore single collection
db.dropDatabase();
-var options = {
- capped: true,
- size: 4096,
- autoIndexId: true
-};
+var options = {capped: true, size: 4096, autoIndexId: true};
db.createCollection('capped', options);
assert.eq(1, db.capped.getIndexes().length, "auto index not created");
var cappedOptions = db.capped.exists().options;
diff --git a/jstests/tool/dumprestore_auth3.js b/jstests/tool/dumprestore_auth3.js
index b59c5c1a951..8fb70d2a91b 100644
--- a/jstests/tool/dumprestore_auth3.js
+++ b/jstests/tool/dumprestore_auth3.js
@@ -4,9 +4,7 @@
// Runs the tool with the given name against the given mongod.
function runTool(toolName, mongod, options) {
- var opts = {
- host: mongod.host
- };
+ var opts = {host: mongod.host};
Object.extend(opts, options);
MongoRunner.runMongoTool(toolName, opts);
}
@@ -138,15 +136,13 @@ var dumpRestoreAuth3 = function(backup_role, restore_role) {
jsTestLog("Restore foo database (and user data) with --drop so it overrides the changes made");
// Restore with --drop to override the changes to user data
- runTool("mongorestore",
- mongod,
- {
- dir: dumpDir + "foo/",
- db: 'foo',
- drop: "",
- restoreDbUsersAndRoles: "",
- writeConcern: "0"
- });
+ runTool("mongorestore", mongod, {
+ dir: dumpDir + "foo/",
+ db: 'foo',
+ drop: "",
+ restoreDbUsersAndRoles: "",
+ writeConcern: "0"
+ });
db = mongod.getDB('foo');
admindb = mongod.getDB('admin');
diff --git a/jstests/tool/dumprestore_excludecollections.js b/jstests/tool/dumprestore_excludecollections.js
index 4563b8ffc03..c3f18065ce3 100644
--- a/jstests/tool/dumprestore_excludecollections.js
+++ b/jstests/tool/dumprestore_excludecollections.js
@@ -23,37 +23,35 @@ ret = MongoRunner.runMongoTool("mongodump",
assert.neq(ret, 0, "mongodump started successfully with --excludeCollection but no --db option");
resetDbpath(dumpDir);
-ret = MongoRunner.runMongoTool("mongodump",
- {
- out: dumpDir,
- db: testBaseName,
- collection: "foo",
- excludeCollection: "test",
- host: mongodSource.host
- });
+ret = MongoRunner.runMongoTool("mongodump", {
+ out: dumpDir,
+ db: testBaseName,
+ collection: "foo",
+ excludeCollection: "test",
+ host: mongodSource.host
+});
assert.neq(ret, 0, "mongodump started successfully with --excludeCollection and --collection");
resetDbpath(dumpDir);
ret = MongoRunner.runMongoTool(
"mongodump", {out: dumpDir, excludeCollectionsWithPrefix: "test", host: mongodSource.host});
-assert.neq(ret,
- 0,
- "mongodump started successfully with --excludeCollectionsWithPrefix but " +
- "no --db option");
+assert.neq(
+ ret,
+ 0,
+ "mongodump started successfully with --excludeCollectionsWithPrefix but " + "no --db option");
resetDbpath(dumpDir);
-ret = MongoRunner.runMongoTool("mongodump",
- {
- out: dumpDir,
- db: testBaseName,
- collection: "foo",
- excludeCollectionsWithPrefix: "test",
- host: mongodSource.host
- });
-assert.neq(ret,
- 0,
- "mongodump started successfully with --excludeCollectionsWithPrefix and " +
- "--collection");
+ret = MongoRunner.runMongoTool("mongodump", {
+ out: dumpDir,
+ db: testBaseName,
+ collection: "foo",
+ excludeCollectionsWithPrefix: "test",
+ host: mongodSource.host
+});
+assert.neq(
+ ret,
+ 0,
+ "mongodump started successfully with --excludeCollectionsWithPrefix and " + "--collection");
jsTest.log("Testing proper behavior of collection exclusion");
resetDbpath(dumpDir);
@@ -75,13 +73,12 @@ assert.eq(destDB.foo2.findOne().f, 2, "Wrong value in document");
destDB.dropDatabase();
resetDbpath(dumpDir);
-ret = MongoRunner.runMongoTool("mongodump",
- {
- out: dumpDir,
- db: testBaseName,
- excludeCollectionsWithPrefix: "test",
- host: mongodSource.host
- });
+ret = MongoRunner.runMongoTool("mongodump", {
+ out: dumpDir,
+ db: testBaseName,
+ excludeCollectionsWithPrefix: "test",
+ host: mongodSource.host
+});
ret = MongoRunner.runMongoTool("mongorestore", {dir: dumpDir, host: mongodDest.host});
assert.eq(ret, 0, "failed to run mongodump on expected successful call");
@@ -95,14 +92,13 @@ assert.eq(destDB.foo2.findOne().f, 2, "Wrong value in document");
destDB.dropDatabase();
resetDbpath(dumpDir);
-ret = MongoRunner.runMongoTool("mongodump",
- {
- out: dumpDir,
- db: testBaseName,
- excludeCollection: "foo",
- excludeCollectionsWithPrefix: "test",
- host: mongodSource.host
- });
+ret = MongoRunner.runMongoTool("mongodump", {
+ out: dumpDir,
+ db: testBaseName,
+ excludeCollection: "foo",
+ excludeCollectionsWithPrefix: "test",
+ host: mongodSource.host
+});
ret = MongoRunner.runMongoTool("mongorestore", {dir: dumpDir, host: mongodDest.host});
assert.eq(ret, 0, "failed to run mongodump on expected successful call");
diff --git a/jstests/tool/exportimport_bigarray.js b/jstests/tool/exportimport_bigarray.js
index 0b801699d1b..60642b6afb1 100644
--- a/jstests/tool/exportimport_bigarray.js
+++ b/jstests/tool/exportimport_bigarray.js
@@ -11,10 +11,7 @@ dst.drop();
// Calculate the number of documents it takes to get above 16MB (here using 20MB just to be safe)
var bigString = new Array(1025).toString();
-var doc = {
- _id: new ObjectId(),
- x: bigString
-};
+var doc = {_id: new ObjectId(), x: bigString};
var docSize = Object.bsonsize(doc);
var numDocs = Math.floor(20 * 1024 * 1024 / docSize);
diff --git a/jstests/tool/tool_replset.js b/jstests/tool/tool_replset.js
index efe55b46605..3b0338a02d4 100644
--- a/jstests/tool/tool_replset.js
+++ b/jstests/tool/tool_replset.js
@@ -67,17 +67,8 @@
var x = master.getDB("foo").getCollection("bar").count();
assert.eq(x, 100, "mongoimport should have successfully imported the collection");
- var doc = {
- _id: 5,
- x: 17
- };
- var oplogEntry = {
- ts: new Timestamp(),
- "op": "i",
- "ns": "foo.bar",
- "o": doc,
- "v": NumberInt(2)
- };
+ var doc = {_id: 5, x: 17};
+ var oplogEntry = {ts: new Timestamp(), "op": "i", "ns": "foo.bar", "o": doc, "v": NumberInt(2)};
assert.writeOK(master.getDB("local").oplog.rs.insert(oplogEntry));
assert.eq(100,
diff --git a/src/mongo/base/data_range_cursor.h b/src/mongo/base/data_range_cursor.h
index 36eb4681eaf..862e9e8f5a0 100644
--- a/src/mongo/base/data_range_cursor.h
+++ b/src/mongo/base/data_range_cursor.h
@@ -31,8 +31,8 @@
#include <cstring>
#include <limits>
-#include "mongo/base/data_type.h"
#include "mongo/base/data_range.h"
+#include "mongo/base/data_type.h"
#include "mongo/platform/endian.h"
#include "mongo/util/mongoutils/str.h"
diff --git a/src/mongo/base/data_type_string_data_test.cpp b/src/mongo/base/data_type_string_data_test.cpp
index a452bfe4447..601623124fe 100644
--- a/src/mongo/base/data_type_string_data_test.cpp
+++ b/src/mongo/base/data_type_string_data_test.cpp
@@ -28,9 +28,9 @@
#include "mongo/base/data_type_string_data.h"
-#include "mongo/base/data_type_terminated.h"
#include "mongo/base/data_range.h"
#include "mongo/base/data_range_cursor.h"
+#include "mongo/base/data_type_terminated.h"
#include "mongo/unittest/unittest.h"
namespace mongo {
diff --git a/src/mongo/base/init.h b/src/mongo/base/init.h
index 2bae4441319..c68eb027e0f 100644
--- a/src/mongo/base/init.h
+++ b/src/mongo/base/init.h
@@ -40,11 +40,11 @@
#pragma once
+#include "mongo/base/global_initializer.h"
+#include "mongo/base/global_initializer_registerer.h"
#include "mongo/base/initializer.h"
#include "mongo/base/initializer_context.h"
#include "mongo/base/initializer_function.h"
-#include "mongo/base/global_initializer.h"
-#include "mongo/base/global_initializer_registerer.h"
#include "mongo/base/make_string_vector.h"
#include "mongo/base/status.h"
diff --git a/src/mongo/base/initializer.cpp b/src/mongo/base/initializer.cpp
index 9bd95915d4d..8de4eb41533 100644
--- a/src/mongo/base/initializer.cpp
+++ b/src/mongo/base/initializer.cpp
@@ -27,10 +27,10 @@
#include "mongo/base/initializer.h"
-#include <iostream>
+#include "mongo/base/global_initializer.h"
#include "mongo/util/assert_util.h"
#include "mongo/util/quick_exit.h"
-#include "mongo/base/global_initializer.h"
+#include <iostream>
namespace mongo {
diff --git a/src/mongo/base/initializer_dependency_graph.h b/src/mongo/base/initializer_dependency_graph.h
index d125ddcd41d..2be99a304f9 100644
--- a/src/mongo/base/initializer_dependency_graph.h
+++ b/src/mongo/base/initializer_dependency_graph.h
@@ -28,8 +28,8 @@
#pragma once
#include <string>
-#include <vector>
#include <utility>
+#include <vector>
#include "mongo/base/disallow_copying.h"
#include "mongo/base/initializer_function.h"
diff --git a/src/mongo/base/parse_number_test.cpp b/src/mongo/base/parse_number_test.cpp
index 01522ad517b..df2902800e0 100644
--- a/src/mongo/base/parse_number_test.cpp
+++ b/src/mongo/base/parse_number_test.cpp
@@ -34,8 +34,8 @@
#include "mongo/base/parse_number.h"
#include "mongo/base/status.h"
-#include "mongo/util/mongoutils/str.h" // for str::stream()!
#include "mongo/unittest/unittest.h"
+#include "mongo/util/mongoutils/str.h" // for str::stream()!
#define ASSERT_PARSES(TYPE, INPUT_STRING, EXPECTED_VALUE) \
do { \
diff --git a/src/mongo/base/secure_allocator.cpp b/src/mongo/base/secure_allocator.cpp
index 336edbf6364..ecfc6b12f0d 100644
--- a/src/mongo/base/secure_allocator.cpp
+++ b/src/mongo/base/secure_allocator.cpp
@@ -225,9 +225,8 @@ std::shared_ptr<Allocation> lastAllocation = nullptr;
} // namespace
-MONGO_INITIALIZER_GENERAL(SecureAllocator,
- ("SystemInfo"),
- MONGO_NO_DEPENDENTS)(InitializerContext* context) {
+MONGO_INITIALIZER_GENERAL(SecureAllocator, ("SystemInfo"), MONGO_NO_DEPENDENTS)
+(InitializerContext* context) {
return Status::OK();
}
diff --git a/src/mongo/base/string_data.h b/src/mongo/base/string_data.h
index 53cfc637bf9..86c809fc7a7 100644
--- a/src/mongo/base/string_data.h
+++ b/src/mongo/base/string_data.h
@@ -80,8 +80,7 @@ public:
*/
struct LiteralTag {};
template <size_t N>
- StringData(const char(&val)[N], LiteralTag)
- : StringData(&val[0], N - 1) {}
+ StringData(const char (&val)[N], LiteralTag) : StringData(&val[0], N - 1) {}
/**
* Constructs a StringData, for the case of a std::string. We can
diff --git a/src/mongo/bson/bson_obj_data_type_test.cpp b/src/mongo/bson/bson_obj_data_type_test.cpp
index cbf5305e45c..21476a7149e 100644
--- a/src/mongo/bson/bson_obj_data_type_test.cpp
+++ b/src/mongo/bson/bson_obj_data_type_test.cpp
@@ -25,8 +25,8 @@
* then also delete it in the license file.
*/
-#include "mongo/base/data_range_cursor.h"
#include "mongo/base/data_range.h"
+#include "mongo/base/data_range_cursor.h"
#include "mongo/bson/bsonobj.h"
#include "mongo/bson/bsonobjbuilder.h"
diff --git a/src/mongo/bson/bson_obj_test.cpp b/src/mongo/bson/bson_obj_test.cpp
index 6cf98fd998b..843dfb13829 100644
--- a/src/mongo/bson/bson_obj_test.cpp
+++ b/src/mongo/bson/bson_obj_test.cpp
@@ -527,7 +527,10 @@ TEST(BSONObj, getFields) {
TEST(BSONObj, getFieldsWithDuplicates) {
auto e = BSON("a" << 2 << "b"
<< "3"
- << "a" << 9 << "b" << 10);
+ << "a"
+ << 9
+ << "b"
+ << 10);
std::array<StringData, 2> fieldNames{"a", "b"};
std::array<BSONElement, 2> fields;
e.getFields(fieldNames, &fields);
diff --git a/src/mongo/bson/bson_validate.cpp b/src/mongo/bson/bson_validate.cpp
index 0f5012d01f3..c578231e435 100644
--- a/src/mongo/bson/bson_validate.cpp
+++ b/src/mongo/bson/bson_validate.cpp
@@ -28,8 +28,8 @@
*/
#include <cstring>
-#include <vector>
#include <limits>
+#include <vector>
#include "mongo/base/data_view.h"
#include "mongo/bson/bson_validate.h"
diff --git a/src/mongo/bson/bson_validate_test.cpp b/src/mongo/bson/bson_validate_test.cpp
index 06a3ee098c3..1d44b237d35 100644
--- a/src/mongo/bson/bson_validate_test.cpp
+++ b/src/mongo/bson/bson_validate_test.cpp
@@ -30,10 +30,10 @@
#include "mongo/platform/basic.h"
#include "mongo/base/data_view.h"
+#include "mongo/bson/bson_validate.h"
#include "mongo/db/jsobj.h"
-#include "mongo/unittest/unittest.h"
#include "mongo/platform/random.h"
-#include "mongo/bson/bson_validate.h"
+#include "mongo/unittest/unittest.h"
#include "mongo/util/log.h"
namespace {
@@ -148,15 +148,23 @@ TEST(BSONValidate, Fuzz) {
log() << "BSONValidate Fuzz random seed: " << seed << endl;
PseudoRandom randomSource(seed);
- BSONObj original =
- BSON("one" << 3 << "two" << 5 << "three" << BSONObj() << "four"
- << BSON("five" << BSON("six" << 11)) << "seven" << BSON_ARRAY("a"
- << "bb"
- << "ccc" << 5)
- << "eight" << BSONDBRef("rrr", OID("01234567890123456789aaaa")) << "_id"
- << OID("deadbeefdeadbeefdeadbeef") << "nine"
- << BSONBinData("\x69\xb7", 2, BinDataGeneral) << "ten"
- << Date_t::fromMillisSinceEpoch(44) << "eleven" << BSONRegEx("foooooo", "i"));
+ BSONObj original = BSON("one" << 3 << "two" << 5 << "three" << BSONObj() << "four"
+ << BSON("five" << BSON("six" << 11))
+ << "seven"
+ << BSON_ARRAY("a"
+ << "bb"
+ << "ccc"
+ << 5)
+ << "eight"
+ << BSONDBRef("rrr", OID("01234567890123456789aaaa"))
+ << "_id"
+ << OID("deadbeefdeadbeefdeadbeef")
+ << "nine"
+ << BSONBinData("\x69\xb7", 2, BinDataGeneral)
+ << "ten"
+ << Date_t::fromMillisSinceEpoch(44)
+ << "eleven"
+ << BSONRegEx("foooooo", "i"));
int32_t fuzzFrequencies[] = {2, 10, 20, 100, 1000};
for (size_t i = 0; i < sizeof(fuzzFrequencies) / sizeof(int32_t); ++i) {
diff --git a/src/mongo/bson/bsonelement.h b/src/mongo/bson/bsonelement.h
index 8798fc58794..876e4a40050 100644
--- a/src/mongo/bson/bsonelement.h
+++ b/src/mongo/bson/bsonelement.h
@@ -37,9 +37,9 @@
#include "mongo/base/data_type_endian.h"
#include "mongo/base/data_view.h"
+#include "mongo/base/string_data_comparator_interface.h"
#include "mongo/bson/bsontypes.h"
#include "mongo/bson/oid.h"
-#include "mongo/base/string_data_comparator_interface.h"
#include "mongo/bson/timestamp.h"
#include "mongo/config.h"
#include "mongo/platform/decimal128.h"
diff --git a/src/mongo/bson/bsonobj.cpp b/src/mongo/bson/bsonobj.cpp
index 5c19d28a3c3..8a492643608 100644
--- a/src/mongo/bson/bsonobj.cpp
+++ b/src/mongo/bson/bsonobj.cpp
@@ -844,7 +844,7 @@ bool BSONIteratorSorted::ElementFieldCmp::operator()(const char* s1, const char*
}
BSONIteratorSorted::BSONIteratorSorted(const BSONObj& o, const ElementFieldCmp& cmp)
- : _nfields(o.nFields()), _fields(new const char* [_nfields]) {
+ : _nfields(o.nFields()), _fields(new const char*[_nfields]) {
int x = 0;
BSONObjIterator i(o);
while (i.more()) {
diff --git a/src/mongo/bson/bsonobjbuilder.h b/src/mongo/bson/bsonobjbuilder.h
index a3bf857095f..3a3f91a60cf 100644
--- a/src/mongo/bson/bsonobjbuilder.h
+++ b/src/mongo/bson/bsonobjbuilder.h
@@ -34,9 +34,9 @@
#pragma once
-#include <map>
#include <cmath>
#include <limits>
+#include <map>
#include "mongo/base/data_view.h"
#include "mongo/base/parse_number.h"
diff --git a/src/mongo/bson/bsonobjbuilder_test.cpp b/src/mongo/bson/bsonobjbuilder_test.cpp
index fdee41b8ba1..f9bad62059f 100644
--- a/src/mongo/bson/bsonobjbuilder_test.cpp
+++ b/src/mongo/bson/bsonobjbuilder_test.cpp
@@ -33,8 +33,8 @@
#include "mongo/db/jsobj.h"
#include "mongo/db/json.h"
-#include <sstream>
#include "mongo/unittest/unittest.h"
+#include <sstream>
namespace {
@@ -300,7 +300,9 @@ TEST(BSONObjBuilderTest, ResumeBuildingWithNesting) {
auto obj = BSONObj(b.buf());
ASSERT_EQ(obj,
BSON("ll" << BSON("f" << BSON("cc"
- << "dd")) << "a" << BSON("c" << 3)));
+ << "dd"))
+ << "a"
+ << BSON("c" << 3)));
}
TEST(BSONObjBuilderTest, ResetToEmptyResultsInEmptyObj) {
diff --git a/src/mongo/bson/bsontypes.h b/src/mongo/bson/bsontypes.h
index fa21b9e5020..1685249e9ef 100644
--- a/src/mongo/bson/bsontypes.h
+++ b/src/mongo/bson/bsontypes.h
@@ -29,9 +29,9 @@
#pragma once
+#include "mongo/config.h"
#include "mongo/platform/decimal128.h"
#include "mongo/util/assert_util.h"
-#include "mongo/config.h"
namespace mongo {
diff --git a/src/mongo/bson/json.cpp b/src/mongo/bson/json.cpp
index 796771b41aa..fffbd042328 100644
--- a/src/mongo/bson/json.cpp
+++ b/src/mongo/bson/json.cpp
@@ -76,9 +76,9 @@ enum {
DATE_RESERVE_SIZE = 64
};
-static const char* LBRACE = "{", * RBRACE = "}", * LBRACKET = "[", * RBRACKET = "]", * LPAREN = "(",
- * RPAREN = ")", * COLON = ":", * COMMA = ",", * FORWARDSLASH = "/",
- * SINGLEQUOTE = "'", * DOUBLEQUOTE = "\"";
+static const char *LBRACE = "{", *RBRACE = "}", *LBRACKET = "[", *RBRACKET = "]", *LPAREN = "(",
+ *RPAREN = ")", *COLON = ":", *COMMA = ",", *FORWARDSLASH = "/",
+ *SINGLEQUOTE = "'", *DOUBLEQUOTE = "\"";
JParse::JParse(StringData str)
: _buf(str.rawData()), _input(_buf), _input_end(_input + str.size()) {}
diff --git a/src/mongo/bson/json.h b/src/mongo/bson/json.h
index 8553f79408e..5e5516607a1 100644
--- a/src/mongo/bson/json.h
+++ b/src/mongo/bson/json.h
@@ -30,8 +30,8 @@
#include <string>
-#include "mongo/bson/bsonobj.h"
#include "mongo/base/status.h"
+#include "mongo/bson/bsonobj.h"
namespace mongo {
diff --git a/src/mongo/bson/mutable/algorithm.h b/src/mongo/bson/mutable/algorithm.h
index 66cec29956a..850f37d9f00 100644
--- a/src/mongo/bson/mutable/algorithm.h
+++ b/src/mongo/bson/mutable/algorithm.h
@@ -27,8 +27,8 @@
#pragma once
-#include <cstddef>
#include <algorithm>
+#include <cstddef>
#include <vector>
#include "mongo/bson/mutable/const_element.h"
diff --git a/src/mongo/bson/mutable/document.cpp b/src/mongo/bson/mutable/document.cpp
index 2afe8c8de96..5ae92341217 100644
--- a/src/mongo/bson/mutable/document.cpp
+++ b/src/mongo/bson/mutable/document.cpp
@@ -777,8 +777,9 @@ public:
// It should be impossible to have an opaque left child and be non-serialized,
dassert(rep->serialized);
- BSONElement childElt = (hasValue(*rep) ? getSerializedElement(*rep).embeddedObject()
- : getObject(rep->objIdx)).firstElement();
+ BSONElement childElt =
+ (hasValue(*rep) ? getSerializedElement(*rep).embeddedObject() : getObject(rep->objIdx))
+ .firstElement();
if (!childElt.eoo()) {
// Do this now before other writes so compiler can exploit knowing
diff --git a/src/mongo/bson/oid.h b/src/mongo/bson/oid.h
index 0cc1e2629e3..7debe105409 100644
--- a/src/mongo/bson/oid.h
+++ b/src/mongo/bson/oid.h
@@ -89,7 +89,7 @@ public:
}
/** init from a reference to a 12-byte array */
- explicit OID(const unsigned char(&arr)[kOIDSize]) {
+ explicit OID(const unsigned char (&arr)[kOIDSize]) {
std::memcpy(_data, arr, sizeof(arr));
}
diff --git a/src/mongo/bson/timestamp.cpp b/src/mongo/bson/timestamp.cpp
index 2abd3a8990a..45230597ddf 100644
--- a/src/mongo/bson/timestamp.cpp
+++ b/src/mongo/bson/timestamp.cpp
@@ -25,8 +25,8 @@
* then also delete it in the license file.
*/
-#include "mongo/bson/bsontypes.h"
#include "mongo/bson/timestamp.h"
+#include "mongo/bson/bsontypes.h"
#include <cstdint>
#include <ctime>
diff --git a/src/mongo/bson/util/bson_check.h b/src/mongo/bson/util/bson_check.h
index 4f2585e9e75..dd4f59816ba 100644
--- a/src/mongo/bson/util/bson_check.h
+++ b/src/mongo/bson/util/bson_check.h
@@ -64,7 +64,8 @@ Status bsonCheckOnlyHasFields(StringData objectName,
if (occurrences[i] > 1) {
return Status(ErrorCodes::DuplicateKey,
str::stream() << "Field " << *curr << " appears " << occurrences[i]
- << " times in " << objectName);
+ << " times in "
+ << objectName);
}
}
return Status::OK();
@@ -77,7 +78,7 @@ Status bsonCheckOnlyHasFields(StringData objectName,
template <typename StringType, int N>
Status bsonCheckOnlyHasFields(StringData objectName,
const BSONObj& o,
- const StringType(&legals)[N]) {
+ const StringType (&legals)[N]) {
return bsonCheckOnlyHasFields(objectName, o, &legals[0], legals + N);
}
diff --git a/src/mongo/bson/util/bson_check_test.cpp b/src/mongo/bson/util/bson_check_test.cpp
index f14da7fa459..18365f9ee62 100644
--- a/src/mongo/bson/util/bson_check_test.cpp
+++ b/src/mongo/bson/util/bson_check_test.cpp
@@ -49,19 +49,26 @@ TEST(BsonCheck, CheckHasOnlyLegalFields) {
ASSERT_OK(bsonCheckOnlyHasFields("",
BSON("aField"
<< "value"
- << "thirdField" << 1 << "anotherField" << 2),
+ << "thirdField"
+ << 1
+ << "anotherField"
+ << 2),
legals));
ASSERT_OK(bsonCheckOnlyHasFields("",
BSON("aField"
<< "value"
- << "thirdField" << 1),
+ << "thirdField"
+ << 1),
legals));
ASSERT_EQUALS(ErrorCodes::BadValue,
bsonCheckOnlyHasFields("",
BSON("aField"
<< "value"
- << "illegal" << 4 << "thirdField" << 1),
+ << "illegal"
+ << 4
+ << "thirdField"
+ << 1),
legals));
}
diff --git a/src/mongo/bson/util/bson_extract.cpp b/src/mongo/bson/util/bson_extract.cpp
index 095185e4baa..b3ff37ca1b2 100644
--- a/src/mongo/bson/util/bson_extract.cpp
+++ b/src/mongo/bson/util/bson_extract.cpp
@@ -37,7 +37,8 @@ Status bsonExtractField(const BSONObj& object, StringData fieldName, BSONElement
if (element.eoo())
return Status(ErrorCodes::NoSuchKey,
mongoutils::str::stream() << "Missing expected field \""
- << fieldName.toString() << "\"");
+ << fieldName.toString()
+ << "\"");
*outElement = element;
return Status::OK();
}
@@ -51,9 +52,11 @@ Status bsonExtractTypedField(const BSONObj& object,
return status;
if (type != outElement->type()) {
return Status(ErrorCodes::TypeMismatch,
- mongoutils::str::stream()
- << "\"" << fieldName << "\" had the wrong type. Expected "
- << typeName(type) << ", found " << typeName(outElement->type()));
+ mongoutils::str::stream() << "\"" << fieldName
+ << "\" had the wrong type. Expected "
+ << typeName(type)
+ << ", found "
+ << typeName(outElement->type()));
}
return Status::OK();
}
@@ -81,7 +84,8 @@ Status bsonExtractBooleanFieldWithDefault(const BSONObj& object,
} else if (!value.isNumber() && !value.isBoolean()) {
return Status(ErrorCodes::TypeMismatch,
mongoutils::str::stream() << "Expected boolean or number type for field \""
- << fieldName << "\", found "
+ << fieldName
+ << "\", found "
<< typeName(value.type()));
} else {
*out = value.trueValue();
@@ -155,11 +159,12 @@ Status bsonExtractIntegerField(const BSONObj& object, StringData fieldName, long
}
long long result = value.safeNumberLong();
if (result != value.numberDouble()) {
- return Status(ErrorCodes::BadValue,
- mongoutils::str::stream()
- << "Expected field \"" << fieldName
- << "\" to have a value "
- "exactly representable as a 64-bit integer, but found " << value);
+ return Status(
+ ErrorCodes::BadValue,
+ mongoutils::str::stream() << "Expected field \"" << fieldName
+ << "\" to have a value "
+ "exactly representable as a 64-bit integer, but found "
+ << value);
}
*out = result;
return Status::OK();
@@ -188,9 +193,11 @@ Status bsonExtractIntegerFieldWithDefaultIf(const BSONObj& object,
return status;
}
if (!pred(*out)) {
- return Status(ErrorCodes::BadValue,
- mongoutils::str::stream() << "Invalid value in field \"" << fieldName
- << "\": " << *out << ": " << predDescription);
+ return Status(
+ ErrorCodes::BadValue,
+ mongoutils::str::stream() << "Invalid value in field \"" << fieldName << "\": " << *out
+ << ": "
+ << predDescription);
}
return Status::OK();
}
diff --git a/src/mongo/bson/util/bson_extract_test.cpp b/src/mongo/bson/util/bson_extract_test.cpp
index 8ec13d1fbd1..9ef6a448e80 100644
--- a/src/mongo/bson/util/bson_extract_test.cpp
+++ b/src/mongo/bson/util/bson_extract_test.cpp
@@ -89,10 +89,12 @@ TEST(ExtractBSON, ExtractStringFieldWithDefault) {
TEST(ExtractBSON, ExtractBooleanFieldWithDefault) {
BSONObj obj1 = BSON("a" << 1 << "b"
<< "hello"
- << "c" << true);
+ << "c"
+ << true);
BSONObj obj2 = BSON("a" << 0 << "b"
<< "hello"
- << "c" << false);
+ << "c"
+ << false);
bool b;
b = false;
ASSERT_OK(bsonExtractBooleanFieldWithDefault(obj1, "a", false, &b));
diff --git a/src/mongo/bson/util/builder.h b/src/mongo/bson/util/builder.h
index 1b43a69ad27..f440e9b8fcb 100644
--- a/src/mongo/bson/util/builder.h
+++ b/src/mongo/bson/util/builder.h
@@ -32,8 +32,8 @@
#include <cfloat>
#include <sstream>
#include <stdio.h>
-#include <string>
#include <string.h>
+#include <string>
#include "mongo/base/data_type_endian.h"
diff --git a/src/mongo/client/authenticate.cpp b/src/mongo/client/authenticate.cpp
index 498b37480b5..c5b37fc16c6 100644
--- a/src/mongo/client/authenticate.cpp
+++ b/src/mongo/client/authenticate.cpp
@@ -31,9 +31,9 @@
#include "mongo/client/authenticate.h"
-#include "mongo/bson/json.h"
#include "mongo/base/status.h"
#include "mongo/base/status_with.h"
+#include "mongo/bson/json.h"
#include "mongo/bson/util/bson_extract.h"
#include "mongo/client/sasl_client_authenticate.h"
#include "mongo/config.h"
@@ -160,26 +160,25 @@ void authMongoCR(RunCommandHook runCommand, const BSONObj& params, AuthCompletio
if (!nonceRequest.isOK())
return handler(std::move(nonceRequest.getStatus()));
- runCommand(nonceRequest.getValue(),
- [runCommand, params, handler](AuthResponse response) {
- if (!response.isOK())
- return handler(std::move(response));
-
- // Ensure response was valid
- std::string nonce;
- BSONObj nonceResponse = response.getValue().data;
- auto valid = bsonExtractStringField(nonceResponse, "nonce", &nonce);
- if (!valid.isOK())
- return handler({ErrorCodes::AuthenticationFailed,
- "Invalid nonce response: " + nonceResponse.toString()});
-
- // Step 2: send authenticate command, receive response
- auto authRequest = createMongoCRAuthenticateCmd(params, nonce);
- if (!authRequest.isOK())
- return handler(std::move(authRequest.getStatus()));
-
- runCommand(authRequest.getValue(), handler);
- });
+ runCommand(nonceRequest.getValue(), [runCommand, params, handler](AuthResponse response) {
+ if (!response.isOK())
+ return handler(std::move(response));
+
+ // Ensure response was valid
+ std::string nonce;
+ BSONObj nonceResponse = response.getValue().data;
+ auto valid = bsonExtractStringField(nonceResponse, "nonce", &nonce);
+ if (!valid.isOK())
+ return handler({ErrorCodes::AuthenticationFailed,
+ "Invalid nonce response: " + nonceResponse.toString()});
+
+ // Step 2: send authenticate command, receive response
+ auto authRequest = createMongoCRAuthenticateCmd(params, nonce);
+ if (!authRequest.isOK())
+ return handler(std::move(authRequest.getStatus()));
+
+ runCommand(authRequest.getValue(), handler);
+ });
}
//
@@ -216,7 +215,8 @@ AuthRequest createX509AuthCmd(const BSONObj& params, StringData clientName) {
request.cmdObj = BSON("authenticate" << 1 << "mechanism"
<< "MONGODB-X509"
- << "user" << username);
+ << "user"
+ << username);
return std::move(request);
}
@@ -332,19 +332,15 @@ void authenticateClient(const BSONObj& params,
} else {
// Run synchronously through async framework
// NOTE: this assumes that runCommand executes synchronously.
- asyncAuth(runCommand,
- params,
- hostname,
- clientName,
- [](AuthResponse response) {
- // DBClient expects us to throw in case of an auth error.
- uassertStatusOK(response);
-
- auto serverResponse = response.getValue().data;
- uassert(ErrorCodes::AuthenticationFailed,
- serverResponse["errmsg"].str(),
- isOk(serverResponse));
- });
+ asyncAuth(runCommand, params, hostname, clientName, [](AuthResponse response) {
+ // DBClient expects us to throw in case of an auth error.
+ uassertStatusOK(response);
+
+ auto serverResponse = response.getValue().data;
+ uassert(ErrorCodes::AuthenticationFailed,
+ serverResponse["errmsg"].str(),
+ isOk(serverResponse));
+ });
}
}
@@ -352,10 +348,14 @@ BSONObj buildAuthParams(StringData dbname,
StringData username,
StringData passwordText,
bool digestPassword) {
- return BSON(saslCommandMechanismFieldName
- << "SCRAM-SHA-1" << saslCommandUserDBFieldName << dbname << saslCommandUserFieldName
- << username << saslCommandPasswordFieldName << passwordText
- << saslCommandDigestPasswordFieldName << digestPassword);
+ return BSON(saslCommandMechanismFieldName << "SCRAM-SHA-1" << saslCommandUserDBFieldName
+ << dbname
+ << saslCommandUserFieldName
+ << username
+ << saslCommandPasswordFieldName
+ << passwordText
+ << saslCommandDigestPasswordFieldName
+ << digestPassword);
}
StringData getSaslCommandUserDBFieldName() {
diff --git a/src/mongo/client/authenticate_test.cpp b/src/mongo/client/authenticate_test.cpp
index 0de2a1e9670..adca0ec582b 100644
--- a/src/mongo/client/authenticate_test.cpp
+++ b/src/mongo/client/authenticate_test.cpp
@@ -63,10 +63,10 @@ public:
_nonce("7ca422a24f326f2a"),
_requests(),
_responses() {
- _runCommandCallback =
- [this](RemoteCommandRequest request, RunCommandResultHandler handler) {
- runCommand(std::move(request), handler);
- };
+ _runCommandCallback = [this](RemoteCommandRequest request,
+ RunCommandResultHandler handler) {
+ runCommand(std::move(request), handler);
+ };
// create our digest
md5digest d;
@@ -130,7 +130,11 @@ public:
<< "MONGODB-CR"
<< "db"
<< "admin"
- << "user" << _username << "pwd" << _password << "digest"
+ << "user"
+ << _username
+ << "pwd"
+ << _password
+ << "digest"
<< "true");
}
@@ -140,7 +144,8 @@ public:
pushRequest("$external",
BSON("authenticate" << 1 << "mechanism"
<< "MONGODB-X509"
- << "user" << _username));
+ << "user"
+ << _username));
// 2. Client receives 'ok'
pushResponse(BSON("ok" << 1));
@@ -150,7 +155,8 @@ public:
<< "MONGODB-X509"
<< "db"
<< "$external"
- << "user" << _username);
+ << "user"
+ << _username);
}
@@ -178,11 +184,10 @@ TEST_F(AuthClientTest, MongoCR) {
TEST_F(AuthClientTest, asyncMongoCR) {
auto params = loadMongoCRConversation();
- auth::authenticateClient(std::move(params),
- "",
- "",
- _runCommandCallback,
- [this](auth::AuthResponse response) { ASSERT(response.isOK()); });
+ auth::authenticateClient(
+ std::move(params), "", "", _runCommandCallback, [this](auth::AuthResponse response) {
+ ASSERT(response.isOK());
+ });
}
#ifdef MONGO_CONFIG_SSL
@@ -193,11 +198,10 @@ TEST_F(AuthClientTest, X509) {
TEST_F(AuthClientTest, asyncX509) {
auto params = loadX509Conversation();
- auth::authenticateClient(std::move(params),
- "",
- _username,
- _runCommandCallback,
- [this](auth::AuthResponse response) { ASSERT(response.isOK()); });
+ auth::authenticateClient(
+ std::move(params), "", _username, _runCommandCallback, [this](auth::AuthResponse response) {
+ ASSERT(response.isOK());
+ });
}
#endif
diff --git a/src/mongo/client/cyrus_sasl_client_session.cpp b/src/mongo/client/cyrus_sasl_client_session.cpp
index 22875070e23..7fe8896588c 100644
--- a/src/mongo/client/cyrus_sasl_client_session.cpp
+++ b/src/mongo/client/cyrus_sasl_client_session.cpp
@@ -141,7 +141,8 @@ MONGO_INITIALIZER_WITH_PREREQUISITES(CyrusSaslClientContext,
if (result != SASL_OK) {
return Status(ErrorCodes::UnknownError,
mongoutils::str::stream() << "Could not initialize sasl client components ("
- << sasl_errstring(result, NULL, NULL) << ")");
+ << sasl_errstring(result, NULL, NULL)
+ << ")");
}
SaslClientSession::create = createCyrusSaslClientSession;
diff --git a/src/mongo/client/dbclient.cpp b/src/mongo/client/dbclient.cpp
index f42f1c3a2e3..6c7c1ab26e8 100644
--- a/src/mongo/client/dbclient.cpp
+++ b/src/mongo/client/dbclient.cpp
@@ -200,16 +200,23 @@ rpc::UniqueReply DBClientWithCommands::runCommandWithMetadata(StringData databas
// more helpful error message. Note that call() can itself throw a socket exception.
uassert(ErrorCodes::HostUnreachable,
str::stream() << "network error while attempting to run "
- << "command '" << command << "' "
- << "on host '" << host << "' ",
+ << "command '"
+ << command
+ << "' "
+ << "on host '"
+ << host
+ << "' ",
call(requestMsg, replyMsg, false, &host));
auto commandReply = rpc::makeReply(&replyMsg);
uassert(ErrorCodes::RPCProtocolNegotiationFailed,
str::stream() << "Mismatched RPC protocols - request was '"
- << networkOpToString(requestMsg.operation()) << "' '"
- << " but reply was '" << networkOpToString(replyMsg.operation()) << "' ",
+ << networkOpToString(requestMsg.operation())
+ << "' '"
+ << " but reply was '"
+ << networkOpToString(replyMsg.operation())
+ << "' ",
requestBuilder->getProtocol() == commandReply->getProtocol());
if (ErrorCodes::SendStaleConfig ==
@@ -281,7 +288,8 @@ bool DBClientWithCommands::runPseudoCommand(StringData db,
if (status == ErrorCodes::CommandResultSchemaViolation) {
msgasserted(28624,
str::stream() << "Received bad " << realCommandName
- << " response from server: " << info);
+ << " response from server: "
+ << info);
} else if (status == ErrorCodes::CommandNotFound) {
NamespaceString pseudoCommandNss(db, pseudoCommandCol);
// if this throws we just let it escape as that's how runCommand works.
@@ -689,7 +697,10 @@ void DBClientInterface::findN(vector<BSONObj>& out,
uassert(10276,
str::stream() << "DBClientBase::findN: transport error: " << getServerAddress()
- << " ns: " << ns << " query: " << query.toString(),
+ << " ns: "
+ << ns
+ << " query: "
+ << query.toString(),
c.get());
if (c->hasResultFlag(ResultFlag_ShardConfigStale)) {
@@ -844,7 +855,8 @@ Status DBClientConnection::connectSocketOnly(const HostAndPort& serverAddress) {
if (!osAddr.isValid()) {
return Status(ErrorCodes::InvalidOptions,
str::stream() << "couldn't initialize connection to host "
- << serverAddress.host() << ", address is invalid");
+ << serverAddress.host()
+ << ", address is invalid");
}
if (isMessagePortImplASIO()) {
diff --git a/src/mongo/client/dbclient_rs.cpp b/src/mongo/client/dbclient_rs.cpp
index 5c1af64d8fd..9718e0b8fc8 100644
--- a/src/mongo/client/dbclient_rs.cpp
+++ b/src/mongo/client/dbclient_rs.cpp
@@ -152,7 +152,8 @@ ReplicaSetMonitorPtr DBClientReplicaSet::_getMonitor() const {
// If you can't get a ReplicaSetMonitor then this connection isn't valid
uassert(16340,
str::stream() << "No replica set monitor active and no cached seed "
- "found for set: " << _setName,
+ "found for set: "
+ << _setName,
rsm);
return rsm;
}
@@ -162,7 +163,8 @@ string DBClientReplicaSet::getServerAddress() const {
ReplicaSetMonitorPtr rsm = ReplicaSetMonitor::get(_setName);
if (!rsm) {
warning() << "Trying to get server address for DBClientReplicaSet, but no "
- "ReplicaSetMonitor exists for " << _setName;
+ "ReplicaSetMonitor exists for "
+ << _setName;
return str::stream() << _setName << "/";
}
return rsm->getServerAddress();
@@ -312,8 +314,10 @@ DBClientConnection* DBClientReplicaSet::checkMaster() {
monitor->failedHost(_masterHost);
uasserted(ErrorCodes::FailedToSatisfyReadPreference,
str::stream() << "can't connect to new replica set master ["
- << _masterHost.toString() << "]"
- << (errmsg.empty() ? "" : ", err: ") << errmsg);
+ << _masterHost.toString()
+ << "]"
+ << (errmsg.empty() ? "" : ", err: ")
+ << errmsg);
}
resetMaster();
@@ -509,10 +513,12 @@ unique_ptr<DBClientCursor> DBClientReplicaSet::query(const string& ns,
if (_isSecondaryQuery(ns, query.obj, *readPref)) {
LOG(3) << "dbclient_rs query using secondary or tagged node selection in "
<< _getMonitor()->getName() << ", read pref is " << readPref->toBSON()
- << " (primary : " << (_master.get() != NULL ? _master->getServerAddress()
- : "[not cached]") << ", lastTagged : "
- << (_lastSlaveOkConn.get() != NULL ? _lastSlaveOkConn->getServerAddress()
- : "[not cached]") << ")" << endl;
+ << " (primary : "
+ << (_master.get() != NULL ? _master->getServerAddress() : "[not cached]")
+ << ", lastTagged : " << (_lastSlaveOkConn.get() != NULL
+ ? _lastSlaveOkConn->getServerAddress()
+ : "[not cached]")
+ << ")" << endl;
string lastNodeErrMsg;
for (size_t retry = 0; retry < MAX_RETRY; retry++) {
@@ -561,10 +567,12 @@ BSONObj DBClientReplicaSet::findOne(const string& ns,
if (_isSecondaryQuery(ns, query.obj, *readPref)) {
LOG(3) << "dbclient_rs findOne using secondary or tagged node selection in "
<< _getMonitor()->getName() << ", read pref is " << readPref->toBSON()
- << " (primary : " << (_master.get() != NULL ? _master->getServerAddress()
- : "[not cached]") << ", lastTagged : "
- << (_lastSlaveOkConn.get() != NULL ? _lastSlaveOkConn->getServerAddress()
- : "[not cached]") << ")" << endl;
+ << " (primary : "
+ << (_master.get() != NULL ? _master->getServerAddress() : "[not cached]")
+ << ", lastTagged : " << (_lastSlaveOkConn.get() != NULL
+ ? _lastSlaveOkConn->getServerAddress()
+ : "[not cached]")
+ << ")" << endl;
string lastNodeErrMsg;
@@ -742,7 +750,8 @@ void DBClientReplicaSet::say(Message& toSend, bool isRetry, string* actualServer
<< (_master.get() != NULL ? _master->getServerAddress() : "[not cached]")
<< ", lastTagged : " << (_lastSlaveOkConn.get() != NULL
? _lastSlaveOkConn->getServerAddress()
- : "[not cached]") << ")" << endl;
+ : "[not cached]")
+ << ")" << endl;
string lastNodeErrMsg;
@@ -936,7 +945,8 @@ rpc::UniqueReply DBClientReplicaSet::runCommandWithMetadata(StringData database,
}
uasserted(ErrorCodes::NodeNotFound,
str::stream() << "Could not satisfy $readPreference of '" << readPref.toBSON() << "' "
- << "while attempting to run command " << command);
+ << "while attempting to run command "
+ << command);
}
bool DBClientReplicaSet::call(Message& toSend,
@@ -959,7 +969,8 @@ bool DBClientReplicaSet::call(Message& toSend,
<< (_master.get() != NULL ? _master->getServerAddress() : "[not cached]")
<< ", lastTagged : " << (_lastSlaveOkConn.get() != NULL
? _lastSlaveOkConn->getServerAddress()
- : "[not cached]") << ")" << endl;
+ : "[not cached]")
+ << ")" << endl;
for (size_t retry = 0; retry < MAX_RETRY; retry++) {
try {
diff --git a/src/mongo/client/dbclientinterface.h b/src/mongo/client/dbclientinterface.h
index ed0e312673f..6244db6c4ed 100644
--- a/src/mongo/client/dbclientinterface.h
+++ b/src/mongo/client/dbclientinterface.h
@@ -32,13 +32,13 @@
#include "mongo/base/string_data.h"
#include "mongo/client/connection_string.h"
-#include "mongo/client/read_preference.h"
#include "mongo/client/query.h"
+#include "mongo/client/read_preference.h"
#include "mongo/db/jsobj.h"
#include "mongo/db/write_concern_options.h"
#include "mongo/platform/atomic_word.h"
-#include "mongo/rpc/protocol.h"
#include "mongo/rpc/metadata.h"
+#include "mongo/rpc/protocol.h"
#include "mongo/rpc/unique_message.h"
#include "mongo/stdx/functional.h"
#include "mongo/util/mongoutils/str.h"
@@ -232,9 +232,14 @@ public:
}
std::string toString() const {
- return str::stream() << "QSpec " << BSON("ns" << _ns << "n2skip" << _ntoskip << "n2return"
- << _ntoreturn << "options" << _options
- << "query" << _query << "fields" << _fields);
+ return str::stream() << "QSpec "
+ << BSON("ns" << _ns << "n2skip" << _ntoskip << "n2return" << _ntoreturn
+ << "options"
+ << _options
+ << "query"
+ << _query
+ << "fields"
+ << _fields);
}
};
diff --git a/src/mongo/client/fetcher.cpp b/src/mongo/client/fetcher.cpp
index c25202da605..57fef47f068 100644
--- a/src/mongo/client/fetcher.cpp
+++ b/src/mongo/client/fetcher.cpp
@@ -69,12 +69,13 @@ Status parseCursorResponse(const BSONObj& obj,
if (cursorElement.eoo()) {
return Status(ErrorCodes::FailedToParse,
str::stream() << "cursor response must contain '" << kCursorFieldName
- << "' field: " << obj);
+ << "' field: "
+ << obj);
}
if (!cursorElement.isABSONObj()) {
- return Status(ErrorCodes::FailedToParse,
- str::stream() << "'" << kCursorFieldName
- << "' field must be an object: " << obj);
+ return Status(
+ ErrorCodes::FailedToParse,
+ str::stream() << "'" << kCursorFieldName << "' field must be an object: " << obj);
}
BSONObj cursorObj = cursorElement.Obj();
@@ -82,13 +83,17 @@ Status parseCursorResponse(const BSONObj& obj,
if (cursorIdElement.eoo()) {
return Status(ErrorCodes::FailedToParse,
str::stream() << "cursor response must contain '" << kCursorFieldName << "."
- << kCursorIdFieldName << "' field: " << obj);
+ << kCursorIdFieldName
+ << "' field: "
+ << obj);
}
if (cursorIdElement.type() != mongo::NumberLong) {
return Status(ErrorCodes::FailedToParse,
str::stream() << "'" << kCursorFieldName << "." << kCursorIdFieldName
<< "' field must be a 'long' but was a '"
- << typeName(cursorIdElement.type()) << "': " << obj);
+ << typeName(cursorIdElement.type())
+ << "': "
+ << obj);
}
batchData->cursorId = cursorIdElement.numberLong();
@@ -96,19 +101,25 @@ Status parseCursorResponse(const BSONObj& obj,
if (namespaceElement.eoo()) {
return Status(ErrorCodes::FailedToParse,
str::stream() << "cursor response must contain "
- << "'" << kCursorFieldName << "." << kNamespaceFieldName
- << "' field: " << obj);
+ << "'"
+ << kCursorFieldName
+ << "."
+ << kNamespaceFieldName
+ << "' field: "
+ << obj);
}
if (namespaceElement.type() != mongo::String) {
return Status(ErrorCodes::FailedToParse,
str::stream() << "'" << kCursorFieldName << "." << kNamespaceFieldName
- << "' field must be a string: " << obj);
+ << "' field must be a string: "
+ << obj);
}
NamespaceString tempNss(namespaceElement.valuestrsafe());
if (!tempNss.isValid()) {
return Status(ErrorCodes::BadValue,
str::stream() << "'" << kCursorFieldName << "." << kNamespaceFieldName
- << "' contains an invalid namespace: " << obj);
+ << "' contains an invalid namespace: "
+ << obj);
}
batchData->nss = tempNss;
@@ -116,20 +127,27 @@ Status parseCursorResponse(const BSONObj& obj,
if (batchElement.eoo()) {
return Status(ErrorCodes::FailedToParse,
str::stream() << "cursor response must contain '" << kCursorFieldName << "."
- << batchFieldName << "' field: " << obj);
+ << batchFieldName
+ << "' field: "
+ << obj);
}
if (!batchElement.isABSONObj()) {
return Status(ErrorCodes::FailedToParse,
str::stream() << "'" << kCursorFieldName << "." << batchFieldName
- << "' field must be an array: " << obj);
+ << "' field must be an array: "
+ << obj);
}
BSONObj batchObj = batchElement.Obj();
for (auto itemElement : batchObj) {
if (!itemElement.isABSONObj()) {
return Status(ErrorCodes::FailedToParse,
str::stream() << "found non-object " << itemElement << " in "
- << "'" << kCursorFieldName << "." << batchFieldName
- << "' field: " << obj);
+ << "'"
+ << kCursorFieldName
+ << "."
+ << batchFieldName
+ << "' field: "
+ << obj);
}
batchData->documents.push_back(itemElement.Obj().getOwned());
}
diff --git a/src/mongo/client/fetcher.h b/src/mongo/client/fetcher.h
index 222e5ebf193..a8e7dc98dac 100644
--- a/src/mongo/client/fetcher.h
+++ b/src/mongo/client/fetcher.h
@@ -40,8 +40,8 @@
#include "mongo/db/clientcursor.h"
#include "mongo/db/namespace_string.h"
#include "mongo/executor/task_executor.h"
-#include "mongo/stdx/functional.h"
#include "mongo/stdx/condition_variable.h"
+#include "mongo/stdx/functional.h"
#include "mongo/stdx/mutex.h"
#include "mongo/util/net/hostandport.h"
diff --git a/src/mongo/client/fetcher_test.cpp b/src/mongo/client/fetcher_test.cpp
index f9398e22547..9aa484da892 100644
--- a/src/mongo/client/fetcher_test.cpp
+++ b/src/mongo/client/fetcher_test.cpp
@@ -32,8 +32,8 @@
#include "mongo/client/fetcher.h"
#include "mongo/db/jsobj.h"
-#include "mongo/executor/thread_pool_task_executor_test_fixture.h"
#include "mongo/executor/network_interface_mock.h"
+#include "mongo/executor/thread_pool_task_executor_test_fixture.h"
#include "mongo/rpc/metadata.h"
#include "mongo/unittest/unittest.h"
@@ -423,7 +423,8 @@ TEST_F(FetcherTest, FindCommandFailed2) {
ASSERT_OK(fetcher->schedule());
processNetworkResponse(BSON("ok" << 0 << "errmsg"
<< "bad hint"
- << "code" << int(ErrorCodes::BadValue)),
+ << "code"
+ << int(ErrorCodes::BadValue)),
ReadyQueueState::kEmpty,
FetcherState::kInactive);
ASSERT_EQUALS(ErrorCodes::BadValue, status.code());
@@ -449,7 +450,10 @@ TEST_F(FetcherTest, CursorIdFieldMissing) {
ASSERT_OK(fetcher->schedule());
processNetworkResponse(BSON("cursor" << BSON("ns"
<< "db.coll"
- << "firstBatch" << BSONArray()) << "ok" << 1),
+ << "firstBatch"
+ << BSONArray())
+ << "ok"
+ << 1),
ReadyQueueState::kEmpty,
FetcherState::kInactive);
ASSERT_EQUALS(ErrorCodes::FailedToParse, status.code());
@@ -460,7 +464,10 @@ TEST_F(FetcherTest, CursorIdNotLongNumber) {
ASSERT_OK(fetcher->schedule());
processNetworkResponse(BSON("cursor" << BSON("id" << 123.1 << "ns"
<< "db.coll"
- << "firstBatch" << BSONArray()) << "ok" << 1),
+ << "firstBatch"
+ << BSONArray())
+ << "ok"
+ << 1),
ReadyQueueState::kEmpty,
FetcherState::kInactive);
ASSERT_EQUALS(ErrorCodes::FailedToParse, status.code());
@@ -480,10 +487,11 @@ TEST_F(FetcherTest, NamespaceFieldMissing) {
TEST_F(FetcherTest, NamespaceNotAString) {
ASSERT_OK(fetcher->schedule());
- processNetworkResponse(BSON("cursor" << BSON("id" << 123LL << "ns" << 123 << "firstBatch"
- << BSONArray()) << "ok" << 1),
- ReadyQueueState::kEmpty,
- FetcherState::kInactive);
+ processNetworkResponse(
+ BSON("cursor" << BSON("id" << 123LL << "ns" << 123 << "firstBatch" << BSONArray()) << "ok"
+ << 1),
+ ReadyQueueState::kEmpty,
+ FetcherState::kInactive);
ASSERT_EQUALS(ErrorCodes::FailedToParse, status.code());
ASSERT_STRING_CONTAINS(status.reason(), "'cursor.ns' field must be a string");
}
@@ -492,7 +500,10 @@ TEST_F(FetcherTest, NamespaceEmpty) {
ASSERT_OK(fetcher->schedule());
processNetworkResponse(BSON("cursor" << BSON("id" << 123LL << "ns"
<< ""
- << "firstBatch" << BSONArray()) << "ok" << 1),
+ << "firstBatch"
+ << BSONArray())
+ << "ok"
+ << 1),
ReadyQueueState::kEmpty,
FetcherState::kInactive);
ASSERT_EQUALS(ErrorCodes::BadValue, status.code());
@@ -503,7 +514,10 @@ TEST_F(FetcherTest, NamespaceMissingCollectionName) {
ASSERT_OK(fetcher->schedule());
processNetworkResponse(BSON("cursor" << BSON("id" << 123LL << "ns"
<< "db."
- << "firstBatch" << BSONArray()) << "ok" << 1),
+ << "firstBatch"
+ << BSONArray())
+ << "ok"
+ << 1),
ReadyQueueState::kEmpty,
FetcherState::kInactive);
ASSERT_EQUALS(ErrorCodes::BadValue, status.code());
@@ -513,7 +527,9 @@ TEST_F(FetcherTest, NamespaceMissingCollectionName) {
TEST_F(FetcherTest, FirstBatchFieldMissing) {
ASSERT_OK(fetcher->schedule());
processNetworkResponse(BSON("cursor" << BSON("id" << 0LL << "ns"
- << "db.coll") << "ok" << 1),
+ << "db.coll")
+ << "ok"
+ << 1),
ReadyQueueState::kEmpty,
FetcherState::kInactive);
ASSERT_EQUALS(ErrorCodes::FailedToParse, status.code());
@@ -524,7 +540,10 @@ TEST_F(FetcherTest, FirstBatchNotAnArray) {
ASSERT_OK(fetcher->schedule());
processNetworkResponse(BSON("cursor" << BSON("id" << 0LL << "ns"
<< "db.coll"
- << "firstBatch" << 123) << "ok" << 1),
+ << "firstBatch"
+ << 123)
+ << "ok"
+ << 1),
ReadyQueueState::kEmpty,
FetcherState::kInactive);
ASSERT_EQUALS(ErrorCodes::FailedToParse, status.code());
@@ -533,12 +552,14 @@ TEST_F(FetcherTest, FirstBatchNotAnArray) {
TEST_F(FetcherTest, FirstBatchArrayContainsNonObject) {
ASSERT_OK(fetcher->schedule());
- processNetworkResponse(
- BSON("cursor" << BSON("id" << 0LL << "ns"
- << "db.coll"
- << "firstBatch" << BSON_ARRAY(8)) << "ok" << 1),
- ReadyQueueState::kEmpty,
- FetcherState::kInactive);
+ processNetworkResponse(BSON("cursor" << BSON("id" << 0LL << "ns"
+ << "db.coll"
+ << "firstBatch"
+ << BSON_ARRAY(8))
+ << "ok"
+ << 1),
+ ReadyQueueState::kEmpty,
+ FetcherState::kInactive);
ASSERT_EQUALS(ErrorCodes::FailedToParse, status.code());
ASSERT_STRING_CONTAINS(status.reason(), "found non-object");
ASSERT_STRING_CONTAINS(status.reason(), "in 'cursor.firstBatch' field");
@@ -548,7 +569,10 @@ TEST_F(FetcherTest, FirstBatchEmptyArray) {
ASSERT_OK(fetcher->schedule());
processNetworkResponse(BSON("cursor" << BSON("id" << 0LL << "ns"
<< "db.coll"
- << "firstBatch" << BSONArray()) << "ok" << 1),
+ << "firstBatch"
+ << BSONArray())
+ << "ok"
+ << 1),
ReadyQueueState::kEmpty,
FetcherState::kInactive);
ASSERT_OK(status);
@@ -560,12 +584,14 @@ TEST_F(FetcherTest, FirstBatchEmptyArray) {
TEST_F(FetcherTest, FetchOneDocument) {
ASSERT_OK(fetcher->schedule());
const BSONObj doc = BSON("_id" << 1);
- processNetworkResponse(
- BSON("cursor" << BSON("id" << 0LL << "ns"
- << "db.coll"
- << "firstBatch" << BSON_ARRAY(doc)) << "ok" << 1),
- ReadyQueueState::kEmpty,
- FetcherState::kInactive);
+ processNetworkResponse(BSON("cursor" << BSON("id" << 0LL << "ns"
+ << "db.coll"
+ << "firstBatch"
+ << BSON_ARRAY(doc))
+ << "ok"
+ << 1),
+ ReadyQueueState::kEmpty,
+ FetcherState::kInactive);
ASSERT_OK(status);
ASSERT_EQUALS(0, cursorId);
ASSERT_EQUALS("db.coll", nss.ns());
@@ -586,12 +612,14 @@ TEST_F(FetcherTest, SetNextActionToContinueWhenNextBatchIsNotAvailable) {
*nextAction = Fetcher::NextAction::kGetMore;
ASSERT_FALSE(getMoreBob);
};
- processNetworkResponse(
- BSON("cursor" << BSON("id" << 0LL << "ns"
- << "db.coll"
- << "firstBatch" << BSON_ARRAY(doc)) << "ok" << 1),
- ReadyQueueState::kEmpty,
- FetcherState::kInactive);
+ processNetworkResponse(BSON("cursor" << BSON("id" << 0LL << "ns"
+ << "db.coll"
+ << "firstBatch"
+ << BSON_ARRAY(doc))
+ << "ok"
+ << 1),
+ ReadyQueueState::kEmpty,
+ FetcherState::kInactive);
ASSERT_OK(status);
ASSERT_EQUALS(0, cursorId);
ASSERT_EQUALS("db.coll", nss.ns());
@@ -617,13 +645,15 @@ TEST_F(FetcherTest, FetchMultipleBatches) {
const BSONObj doc = BSON("_id" << 1);
- processNetworkResponse(
- BSON("cursor" << BSON("id" << 1LL << "ns"
- << "db.coll"
- << "firstBatch" << BSON_ARRAY(doc)) << "ok" << 1),
- Milliseconds(100),
- ReadyQueueState::kHasReadyRequests,
- FetcherState::kActive);
+ processNetworkResponse(BSON("cursor" << BSON("id" << 1LL << "ns"
+ << "db.coll"
+ << "firstBatch"
+ << BSON_ARRAY(doc))
+ << "ok"
+ << 1),
+ Milliseconds(100),
+ ReadyQueueState::kHasReadyRequests,
+ FetcherState::kActive);
ASSERT_OK(status);
ASSERT_EQUALS(1LL, cursorId);
@@ -636,13 +666,15 @@ TEST_F(FetcherTest, FetchMultipleBatches) {
const BSONObj doc2 = BSON("_id" << 2);
- processNetworkResponse(
- BSON("cursor" << BSON("id" << 1LL << "ns"
- << "db.coll"
- << "nextBatch" << BSON_ARRAY(doc2)) << "ok" << 1),
- Milliseconds(200),
- ReadyQueueState::kHasReadyRequests,
- FetcherState::kActive);
+ processNetworkResponse(BSON("cursor" << BSON("id" << 1LL << "ns"
+ << "db.coll"
+ << "nextBatch"
+ << BSON_ARRAY(doc2))
+ << "ok"
+ << 1),
+ Milliseconds(200),
+ ReadyQueueState::kHasReadyRequests,
+ FetcherState::kActive);
ASSERT_OK(status);
ASSERT_EQUALS(1LL, cursorId);
@@ -655,13 +687,15 @@ TEST_F(FetcherTest, FetchMultipleBatches) {
const BSONObj doc3 = BSON("_id" << 3);
- processNetworkResponse(
- BSON("cursor" << BSON("id" << 0LL << "ns"
- << "db.coll"
- << "nextBatch" << BSON_ARRAY(doc3)) << "ok" << 1),
- Milliseconds(300),
- ReadyQueueState::kEmpty,
- FetcherState::kInactive);
+ processNetworkResponse(BSON("cursor" << BSON("id" << 0LL << "ns"
+ << "db.coll"
+ << "nextBatch"
+ << BSON_ARRAY(doc3))
+ << "ok"
+ << 1),
+ Milliseconds(300),
+ ReadyQueueState::kEmpty,
+ FetcherState::kInactive);
ASSERT_OK(status);
ASSERT_EQUALS(0, cursorId);
@@ -680,12 +714,14 @@ TEST_F(FetcherTest, ScheduleGetMoreAndCancel) {
const BSONObj doc = BSON("_id" << 1);
- processNetworkResponse(
- BSON("cursor" << BSON("id" << 1LL << "ns"
- << "db.coll"
- << "firstBatch" << BSON_ARRAY(doc)) << "ok" << 1),
- ReadyQueueState::kHasReadyRequests,
- FetcherState::kActive);
+ processNetworkResponse(BSON("cursor" << BSON("id" << 1LL << "ns"
+ << "db.coll"
+ << "firstBatch"
+ << BSON_ARRAY(doc))
+ << "ok"
+ << 1),
+ ReadyQueueState::kHasReadyRequests,
+ FetcherState::kActive);
ASSERT_OK(status);
ASSERT_EQUALS(1LL, cursorId);
@@ -695,12 +731,14 @@ TEST_F(FetcherTest, ScheduleGetMoreAndCancel) {
ASSERT_TRUE(Fetcher::NextAction::kGetMore == nextAction);
const BSONObj doc2 = BSON("_id" << 2);
- processNetworkResponse(
- BSON("cursor" << BSON("id" << 1LL << "ns"
- << "db.coll"
- << "nextBatch" << BSON_ARRAY(doc2)) << "ok" << 1),
- ReadyQueueState::kHasReadyRequests,
- FetcherState::kActive);
+ processNetworkResponse(BSON("cursor" << BSON("id" << 1LL << "ns"
+ << "db.coll"
+ << "nextBatch"
+ << BSON_ARRAY(doc2))
+ << "ok"
+ << 1),
+ ReadyQueueState::kHasReadyRequests,
+ FetcherState::kActive);
ASSERT_OK(status);
ASSERT_EQUALS(1LL, cursorId);
@@ -727,12 +765,14 @@ TEST_F(FetcherTest, ScheduleGetMoreButShutdown) {
const BSONObj doc = BSON("_id" << 1);
- processNetworkResponse(
- BSON("cursor" << BSON("id" << 1LL << "ns"
- << "db.coll"
- << "firstBatch" << BSON_ARRAY(doc)) << "ok" << 1),
- ReadyQueueState::kHasReadyRequests,
- FetcherState::kActive);
+ processNetworkResponse(BSON("cursor" << BSON("id" << 1LL << "ns"
+ << "db.coll"
+ << "firstBatch"
+ << BSON_ARRAY(doc))
+ << "ok"
+ << 1),
+ ReadyQueueState::kHasReadyRequests,
+ FetcherState::kActive);
ASSERT_OK(status);
ASSERT_EQUALS(1LL, cursorId);
@@ -743,12 +783,14 @@ TEST_F(FetcherTest, ScheduleGetMoreButShutdown) {
const BSONObj doc2 = BSON("_id" << 2);
- processNetworkResponse(
- BSON("cursor" << BSON("id" << 1LL << "ns"
- << "db.coll"
- << "nextBatch" << BSON_ARRAY(doc2)) << "ok" << 1),
- ReadyQueueState::kHasReadyRequests,
- FetcherState::kActive);
+ processNetworkResponse(BSON("cursor" << BSON("id" << 1LL << "ns"
+ << "db.coll"
+ << "nextBatch"
+ << BSON_ARRAY(doc2))
+ << "ok"
+ << 1),
+ ReadyQueueState::kHasReadyRequests,
+ FetcherState::kActive);
ASSERT_OK(status);
ASSERT_EQUALS(1LL, cursorId);
@@ -780,12 +822,14 @@ TEST_F(FetcherTest, EmptyGetMoreRequestAfterFirstBatchMakesFetcherInactiveAndKil
const BSONObj doc = BSON("_id" << 1);
- processNetworkResponse(
- BSON("cursor" << BSON("id" << 1LL << "ns"
- << "db.coll"
- << "firstBatch" << BSON_ARRAY(doc)) << "ok" << 1),
- ReadyQueueState::kHasReadyRequests,
- FetcherState::kInactive);
+ processNetworkResponse(BSON("cursor" << BSON("id" << 1LL << "ns"
+ << "db.coll"
+ << "firstBatch"
+ << BSON_ARRAY(doc))
+ << "ok"
+ << 1),
+ ReadyQueueState::kHasReadyRequests,
+ FetcherState::kInactive);
ASSERT_OK(status);
ASSERT_EQUALS(1LL, cursorId);
@@ -835,12 +879,14 @@ TEST_F(FetcherTest, UpdateNextActionAfterSecondBatch) {
const BSONObj doc = BSON("_id" << 1);
- processNetworkResponse(
- BSON("cursor" << BSON("id" << 1LL << "ns"
- << "db.coll"
- << "firstBatch" << BSON_ARRAY(doc)) << "ok" << 1),
- ReadyQueueState::kHasReadyRequests,
- FetcherState::kActive);
+ processNetworkResponse(BSON("cursor" << BSON("id" << 1LL << "ns"
+ << "db.coll"
+ << "firstBatch"
+ << BSON_ARRAY(doc))
+ << "ok"
+ << 1),
+ ReadyQueueState::kHasReadyRequests,
+ FetcherState::kActive);
ASSERT_OK(status);
ASSERT_EQUALS(1LL, cursorId);
@@ -853,12 +899,14 @@ TEST_F(FetcherTest, UpdateNextActionAfterSecondBatch) {
callbackHook = setNextActionToNoAction;
- processNetworkResponse(
- BSON("cursor" << BSON("id" << 1LL << "ns"
- << "db.coll"
- << "nextBatch" << BSON_ARRAY(doc2)) << "ok" << 1),
- ReadyQueueState::kHasReadyRequests,
- FetcherState::kInactive);
+ processNetworkResponse(BSON("cursor" << BSON("id" << 1LL << "ns"
+ << "db.coll"
+ << "nextBatch"
+ << BSON_ARRAY(doc2))
+ << "ok"
+ << 1),
+ ReadyQueueState::kHasReadyRequests,
+ FetcherState::kInactive);
ASSERT_OK(status);
ASSERT_EQUALS(1LL, cursorId);
@@ -928,12 +976,14 @@ TEST_F(FetcherTest, ShutdownDuringSecondBatch) {
const BSONObj doc = BSON("_id" << 1);
- processNetworkResponse(
- BSON("cursor" << BSON("id" << 1LL << "ns"
- << "db.coll"
- << "firstBatch" << BSON_ARRAY(doc)) << "ok" << 1),
- ReadyQueueState::kHasReadyRequests,
- FetcherState::kActive);
+ processNetworkResponse(BSON("cursor" << BSON("id" << 1LL << "ns"
+ << "db.coll"
+ << "firstBatch"
+ << BSON_ARRAY(doc))
+ << "ok"
+ << 1),
+ ReadyQueueState::kHasReadyRequests,
+ FetcherState::kActive);
ASSERT_OK(status);
ASSERT_EQUALS(1LL, cursorId);
@@ -953,12 +1003,14 @@ TEST_F(FetcherTest, ShutdownDuringSecondBatch) {
&getExecutor(),
&isShutdownCalled);
- processNetworkResponse(
- BSON("cursor" << BSON("id" << 1LL << "ns"
- << "db.coll"
- << "nextBatch" << BSON_ARRAY(doc2)) << "ok" << 1),
- ReadyQueueState::kEmpty,
- FetcherState::kInactive);
+ processNetworkResponse(BSON("cursor" << BSON("id" << 1LL << "ns"
+ << "db.coll"
+ << "nextBatch"
+ << BSON_ARRAY(doc2))
+ << "ok"
+ << 1),
+ ReadyQueueState::kEmpty,
+ FetcherState::kInactive);
// Fetcher should attempt (unsuccessfully) to schedule a killCursors command.
ASSERT_EQUALS(
@@ -996,12 +1048,14 @@ TEST_F(FetcherTest, FetcherAppliesRetryPolicyToFirstCommandButNotToGetMoreReques
"second",
ReadyQueueState::kHasReadyRequests,
FetcherState::kActive);
- processNetworkResponse(
- BSON("cursor" << BSON("id" << 1LL << "ns"
- << "db.coll"
- << "firstBatch" << BSON_ARRAY(doc)) << "ok" << 1),
- ReadyQueueState::kHasReadyRequests,
- FetcherState::kActive);
+ processNetworkResponse(BSON("cursor" << BSON("id" << 1LL << "ns"
+ << "db.coll"
+ << "firstBatch"
+ << BSON_ARRAY(doc))
+ << "ok"
+ << 1),
+ ReadyQueueState::kHasReadyRequests,
+ FetcherState::kActive);
ASSERT_OK(status);
ASSERT_EQUALS(1LL, cursorId);
ASSERT_EQUALS("db.coll", nss.ns());
diff --git a/src/mongo/client/parallel.cpp b/src/mongo/client/parallel.cpp
index 6f91f80c940..8f1d0582f49 100644
--- a/src/mongo/client/parallel.cpp
+++ b/src/mongo/client/parallel.cpp
@@ -36,8 +36,8 @@
#include "mongo/client/connpool.h"
#include "mongo/client/constants.h"
-#include "mongo/client/dbclientcursor.h"
#include "mongo/client/dbclient_rs.h"
+#include "mongo/client/dbclientcursor.h"
#include "mongo/client/replica_set_monitor.h"
#include "mongo/db/query/lite_parsed_query.h"
#include "mongo/s/catalog/catalog_cache.h"
@@ -316,7 +316,12 @@ BSONObj ParallelConnectionState::toBSON() const {
BSONObj ParallelConnectionMetadata::toBSON() const {
return BSON("state" << (pcState ? pcState->toBSON() : BSONObj()) << "retryNext" << retryNext
- << "init" << initialized << "finish" << finished << "errored" << errored);
+ << "init"
+ << initialized
+ << "finish"
+ << finished
+ << "errored"
+ << errored);
}
void ParallelSortClusteredCursor::fullInit(OperationContext* txn) {
@@ -1040,13 +1045,14 @@ void ParallelSortClusteredCursor::_oldInit() {
conns[i]->done();
// Version is zero b/c this is deprecated codepath
- staleConfigExs.push_back(
- str::stream() << "stale config detected for "
- << RecvStaleConfigException(_ns,
- "ParallelCursor::_init",
- ChunkVersion(0, 0, OID()),
- ChunkVersion(0, 0, OID())).what()
- << errLoc);
+ staleConfigExs.push_back(str::stream()
+ << "stale config detected for "
+ << RecvStaleConfigException(_ns,
+ "ParallelCursor::_init",
+ ChunkVersion(0, 0, OID()),
+ ChunkVersion(0, 0, OID()))
+ .what()
+ << errLoc);
break;
}
@@ -1107,8 +1113,8 @@ void ParallelSortClusteredCursor::_oldInit() {
_cursors[i].reset(NULL, NULL);
if (!retry) {
- socketExs.push_back(str::stream()
- << "error querying server: " << servers[i]);
+ socketExs.push_back(str::stream() << "error querying server: "
+ << servers[i]);
conns[i]->done();
} else {
retryQueries.insert(i);
diff --git a/src/mongo/client/read_preference.cpp b/src/mongo/client/read_preference.cpp
index 1ab5c5c13e1..9c58e95a1d4 100644
--- a/src/mongo/client/read_preference.cpp
+++ b/src/mongo/client/read_preference.cpp
@@ -84,9 +84,16 @@ StatusWith<ReadPreference> parseReadPreferenceMode(StringData prefStr) {
}
return Status(ErrorCodes::FailedToParse,
str::stream() << "Could not parse $readPreference mode '" << prefStr
- << "'. Only the modes '" << kPrimaryOnly << "', '"
- << kPrimaryPreferred << "', " << kSecondaryOnly << "', '"
- << kSecondaryPreferred << "', and '" << kNearest
+ << "'. Only the modes '"
+ << kPrimaryOnly
+ << "', '"
+ << kPrimaryPreferred
+ << "', "
+ << kSecondaryOnly
+ << "', '"
+ << kSecondaryPreferred
+ << "', and '"
+ << kNearest
<< "' are supported.");
}
diff --git a/src/mongo/client/read_preference_test.cpp b/src/mongo/client/read_preference_test.cpp
index 447f37d51ca..afc96632e26 100644
--- a/src/mongo/client/read_preference_test.cpp
+++ b/src/mongo/client/read_preference_test.cpp
@@ -52,13 +52,15 @@ TEST(ReadPreferenceSetting, ParseValid) {
// that the tags are parsed as the empty TagSet.
checkParse(BSON("mode"
<< "primary"
- << "tags" << BSON_ARRAY(BSONObj())),
+ << "tags"
+ << BSON_ARRAY(BSONObj())),
ReadPreferenceSetting(ReadPreference::PrimaryOnly, TagSet::primaryOnly()));
checkParse(BSON("mode"
<< "secondaryPreferred"
- << "tags" << BSON_ARRAY(BSON("dc"
- << "ny"))),
+ << "tags"
+ << BSON_ARRAY(BSON("dc"
+ << "ny"))),
ReadPreferenceSetting(ReadPreference::SecondaryPreferred,
TagSet(BSON_ARRAY(BSON("dc"
<< "ny")))));
@@ -73,8 +75,9 @@ TEST(ReadPreferenceSetting, ParseInvalid) {
// mode primary can not have tags
checkParseFails(BSON("mode"
<< "primary"
- << "tags" << BSON_ARRAY(BSON("foo"
- << "bar"))));
+ << "tags"
+ << BSON_ARRAY(BSON("foo"
+ << "bar"))));
// bad mode
checkParseFails(BSON("mode"
<< "khalesi"));
diff --git a/src/mongo/client/remote_command_retry_scheduler.h b/src/mongo/client/remote_command_retry_scheduler.h
index e8bba705edf..d830ac2a582 100644
--- a/src/mongo/client/remote_command_retry_scheduler.h
+++ b/src/mongo/client/remote_command_retry_scheduler.h
@@ -35,8 +35,8 @@
#include "mongo/base/disallow_copying.h"
#include "mongo/base/error_codes.h"
#include "mongo/executor/task_executor.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/stdx/condition_variable.h"
+#include "mongo/stdx/mutex.h"
#include "mongo/util/time_support.h"
namespace mongo {
diff --git a/src/mongo/client/remote_command_retry_scheduler_test.cpp b/src/mongo/client/remote_command_retry_scheduler_test.cpp
index 3e3b3d3c513..c5eb97a381a 100644
--- a/src/mongo/client/remote_command_retry_scheduler_test.cpp
+++ b/src/mongo/client/remote_command_retry_scheduler_test.cpp
@@ -382,11 +382,11 @@ TEST_F(RemoteCommandRetrySchedulerTest, SchedulerIgnoresEmbeddedErrorInSuccessfu
// Scheduler does not parse document in a successful response for embedded errors.
// This is the case with some commands (e.g. find) which do not always return errors using the
// wire protocol.
- executor::RemoteCommandResponse response(BSON("ok" << 0 << "code"
- << int(ErrorCodes::FailedToParse) << "errmsg"
- << "injected error"),
- BSON("z" << 456),
- Milliseconds(100));
+ executor::RemoteCommandResponse response(
+ BSON("ok" << 0 << "code" << int(ErrorCodes::FailedToParse) << "errmsg"
+ << "injected error"),
+ BSON("z" << 456),
+ Milliseconds(100));
processNetworkResponse(response);
checkCompletionStatus(&scheduler, callback, response);
diff --git a/src/mongo/client/remote_command_runner_impl.cpp b/src/mongo/client/remote_command_runner_impl.cpp
index 36bf7283d90..353d133c58c 100644
--- a/src/mongo/client/remote_command_runner_impl.cpp
+++ b/src/mongo/client/remote_command_runner_impl.cpp
@@ -60,7 +60,8 @@ StatusWith<Milliseconds> getTimeoutMillis(const Date_t expDate, const Date_t now
if (expDate <= nowDate) {
return {ErrorCodes::ExceededTimeLimit,
str::stream() << "Went to run command, but it was too late. "
- "Expiration was set to " << dateToISOStringUTC(expDate)};
+ "Expiration was set to "
+ << dateToISOStringUTC(expDate)};
}
return expDate - nowDate;
}
@@ -80,10 +81,10 @@ Status getStatusFromCursorResult(DBClientCursor& cursor) {
getErrField(error).valuestrsafe());
}
-using RequestDownconverter = StatusWith<Message>(*)(const RemoteCommandRequest&);
-using ReplyUpconverter = StatusWith<RemoteCommandResponse>(*)(std::int32_t requestId,
- StringData cursorNamespace,
- const Message& response);
+using RequestDownconverter = StatusWith<Message> (*)(const RemoteCommandRequest&);
+using ReplyUpconverter = StatusWith<RemoteCommandResponse> (*)(std::int32_t requestId,
+ StringData cursorNamespace,
+ const Message& response);
template <RequestDownconverter downconvertRequest, ReplyUpconverter upconvertReply>
StatusWith<RemoteCommandResponse> runDownconvertedCommand(DBClientConnection* conn,
@@ -210,8 +211,11 @@ StatusWith<RemoteCommandResponse> RemoteCommandRunnerImpl::runCommand(
return StatusWith<RemoteCommandResponse>(
ErrorCodes::UnknownError,
str::stream() << "Sending command " << request.cmdObj << " on database "
- << request.dbname << " over network to " << request.target.toString()
- << " received exception " << ex.what());
+ << request.dbname
+ << " over network to "
+ << request.target.toString()
+ << " received exception "
+ << ex.what());
}
}
diff --git a/src/mongo/client/replica_set_monitor.cpp b/src/mongo/client/replica_set_monitor.cpp
index a852d145ca4..30b6aca60fa 100644
--- a/src/mongo/client/replica_set_monitor.cpp
+++ b/src/mongo/client/replica_set_monitor.cpp
@@ -48,8 +48,8 @@
#include "mongo/util/debug_util.h"
#include "mongo/util/exit.h"
#include "mongo/util/log.h"
-#include "mongo/util/string_map.h"
#include "mongo/util/static_observer.h"
+#include "mongo/util/string_map.h"
#include "mongo/util/timer.h"
namespace mongo {
@@ -300,7 +300,9 @@ StatusWith<HostAndPort> ReplicaSetMonitor::getHostOrRefresh(const ReadPreference
return Status(ErrorCodes::FailedToSatisfyReadPreference,
str::stream() << "could not find host matching read preference "
- << criteria.toString() << " for set " << getName());
+ << criteria.toString()
+ << " for set "
+ << getName());
}
HostAndPort ReplicaSetMonitor::getMasterOrUassert() {
diff --git a/src/mongo/client/replica_set_monitor.h b/src/mongo/client/replica_set_monitor.h
index fc415806b0f..289244ea9a6 100644
--- a/src/mongo/client/replica_set_monitor.h
+++ b/src/mongo/client/replica_set_monitor.h
@@ -29,9 +29,9 @@
#include <atomic>
#include <memory>
+#include <memory>
#include <set>
#include <string>
-#include <memory>
#include "mongo/base/disallow_copying.h"
#include "mongo/base/string_data.h"
diff --git a/src/mongo/client/replica_set_monitor_internal.h b/src/mongo/client/replica_set_monitor_internal.h
index 6dda1935d29..4dbf9cb7996 100644
--- a/src/mongo/client/replica_set_monitor_internal.h
+++ b/src/mongo/client/replica_set_monitor_internal.h
@@ -44,8 +44,8 @@
#include "mongo/client/replica_set_monitor.h"
#include "mongo/db/jsobj.h"
#include "mongo/platform/random.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/stdx/condition_variable.h"
+#include "mongo/stdx/mutex.h"
#include "mongo/util/net/hostandport.h"
namespace mongo {
diff --git a/src/mongo/client/replica_set_monitor_manager.cpp b/src/mongo/client/replica_set_monitor_manager.cpp
index 2789d8c4df9..0f5513c0a8f 100644
--- a/src/mongo/client/replica_set_monitor_manager.cpp
+++ b/src/mongo/client/replica_set_monitor_manager.cpp
@@ -80,7 +80,8 @@ shared_ptr<ReplicaSetMonitor> ReplicaSetMonitorManager::getOrCreateMonitor(
_taskExecutor = stdx::make_unique<ThreadPoolTaskExecutor>(
stdx::make_unique<NetworkInterfaceThreadPool>(netPtr), std::move(net));
LOG(1) << "Starting up task executor for monitoring replica sets in response to request to "
- "monitor set: " << connStr.toString();
+ "monitor set: "
+ << connStr.toString();
_taskExecutor->startup();
}
diff --git a/src/mongo/client/replica_set_monitor_test.cpp b/src/mongo/client/replica_set_monitor_test.cpp
index 845b66e8366..a27c3e00692 100644
--- a/src/mongo/client/replica_set_monitor_test.cpp
+++ b/src/mongo/client/replica_set_monitor_test.cpp
@@ -88,10 +88,22 @@ TEST(ReplicaSetMonitor, IsMasterReplyRSNotInitiated) {
BSONObj ismaster = BSON(
"ismaster" << false << "secondary" << false << "info"
<< "can't get local.system.replset config from self or any seed (EMPTYCONFIG)"
- << "isreplicaset" << true << "maxBsonObjectSize" << 16777216
- << "maxMessageSizeBytes" << 48000000 << "maxWriteBatchSize" << 1000
- << "localTime" << mongo::jsTime() << "maxWireVersion" << 2 << "minWireVersion"
- << 0 << "ok" << 1);
+ << "isreplicaset"
+ << true
+ << "maxBsonObjectSize"
+ << 16777216
+ << "maxMessageSizeBytes"
+ << 48000000
+ << "maxWriteBatchSize"
+ << 1000
+ << "localTime"
+ << mongo::jsTime()
+ << "maxWireVersion"
+ << 2
+ << "minWireVersion"
+ << 0
+ << "ok"
+ << 1);
IsMasterReply imr(HostAndPort(), -1, ismaster);
@@ -110,15 +122,34 @@ TEST(ReplicaSetMonitor, IsMasterReplyRSNotInitiated) {
TEST(ReplicaSetMonitor, IsMasterReplyRSPrimary) {
BSONObj ismaster = BSON("setName"
<< "test"
- << "setVersion" << 1 << "electionId" << OID("7fffffff0000000000000001")
- << "ismaster" << true << "secondary" << false << "hosts"
- << BSON_ARRAY("mongo.example:3000") << "primary"
+ << "setVersion"
+ << 1
+ << "electionId"
+ << OID("7fffffff0000000000000001")
+ << "ismaster"
+ << true
+ << "secondary"
+ << false
+ << "hosts"
+ << BSON_ARRAY("mongo.example:3000")
+ << "primary"
<< "mongo.example:3000"
<< "me"
<< "mongo.example:3000"
- << "maxBsonObjectSize" << 16777216 << "maxMessageSizeBytes" << 48000000
- << "maxWriteBatchSize" << 1000 << "localTime" << mongo::jsTime()
- << "maxWireVersion" << 2 << "minWireVersion" << 0 << "ok" << 1);
+ << "maxBsonObjectSize"
+ << 16777216
+ << "maxMessageSizeBytes"
+ << 48000000
+ << "maxWriteBatchSize"
+ << 1000
+ << "localTime"
+ << mongo::jsTime()
+ << "maxWireVersion"
+ << 2
+ << "minWireVersion"
+ << 0
+ << "ok"
+ << 1);
IsMasterReply imr(HostAndPort("mongo.example:3000"), -1, ismaster);
@@ -138,16 +169,38 @@ TEST(ReplicaSetMonitor, IsMasterReplyRSPrimary) {
TEST(ReplicaSetMonitor, IsMasterReplyPassiveSecondary) {
BSONObj ismaster = BSON("setName"
<< "test"
- << "setVersion" << 2 << "electionId" << OID("7fffffff0000000000000001")
- << "ismaster" << false << "secondary" << true << "hosts"
- << BSON_ARRAY("mongo.example:3000") << "passives"
- << BSON_ARRAY("mongo.example:3001") << "primary"
+ << "setVersion"
+ << 2
+ << "electionId"
+ << OID("7fffffff0000000000000001")
+ << "ismaster"
+ << false
+ << "secondary"
+ << true
+ << "hosts"
+ << BSON_ARRAY("mongo.example:3000")
+ << "passives"
+ << BSON_ARRAY("mongo.example:3001")
+ << "primary"
<< "mongo.example:3000"
- << "passive" << true << "me"
+ << "passive"
+ << true
+ << "me"
<< "mongo.example:3001"
- << "maxBsonObjectSize" << 16777216 << "maxMessageSizeBytes" << 48000000
- << "maxWriteBatchSize" << 1000 << "localTime" << mongo::jsTime()
- << "maxWireVersion" << 2 << "minWireVersion" << 0 << "ok" << 1);
+ << "maxBsonObjectSize"
+ << 16777216
+ << "maxMessageSizeBytes"
+ << 48000000
+ << "maxWriteBatchSize"
+ << 1000
+ << "localTime"
+ << mongo::jsTime()
+ << "maxWireVersion"
+ << 2
+ << "minWireVersion"
+ << 0
+ << "ok"
+ << 1);
IsMasterReply imr(HostAndPort("mongo.example:3001"), -1, ismaster);
@@ -168,15 +221,38 @@ TEST(ReplicaSetMonitor, IsMasterReplyPassiveSecondary) {
TEST(ReplicaSetMonitor, IsMasterReplyHiddenSecondary) {
BSONObj ismaster = BSON("setName"
<< "test"
- << "setVersion" << 2 << "electionId" << OID("7fffffff0000000000000001")
- << "ismaster" << false << "secondary" << true << "hosts"
- << BSON_ARRAY("mongo.example:3000") << "primary"
+ << "setVersion"
+ << 2
+ << "electionId"
+ << OID("7fffffff0000000000000001")
+ << "ismaster"
+ << false
+ << "secondary"
+ << true
+ << "hosts"
+ << BSON_ARRAY("mongo.example:3000")
+ << "primary"
<< "mongo.example:3000"
- << "passive" << true << "hidden" << true << "me"
+ << "passive"
+ << true
+ << "hidden"
+ << true
+ << "me"
<< "mongo.example:3001"
- << "maxBsonObjectSize" << 16777216 << "maxMessageSizeBytes" << 48000000
- << "maxWriteBatchSize" << 1000 << "localTime" << mongo::jsTime()
- << "maxWireVersion" << 2 << "minWireVersion" << 0 << "ok" << 1);
+ << "maxBsonObjectSize"
+ << 16777216
+ << "maxMessageSizeBytes"
+ << 48000000
+ << "maxWriteBatchSize"
+ << 1000
+ << "localTime"
+ << mongo::jsTime()
+ << "maxWireVersion"
+ << 2
+ << "minWireVersion"
+ << 0
+ << "ok"
+ << 1);
IsMasterReply imr(HostAndPort("mongo.example:3001"), -1, ismaster);
@@ -196,20 +272,40 @@ TEST(ReplicaSetMonitor, IsMasterReplyHiddenSecondary) {
TEST(ReplicaSetMonitor, IsMasterSecondaryWithTags) {
BSONObj ismaster = BSON("setName"
<< "test"
- << "setVersion" << 2 << "electionId" << OID("7fffffff0000000000000001")
- << "ismaster" << false << "secondary" << true << "hosts"
+ << "setVersion"
+ << 2
+ << "electionId"
+ << OID("7fffffff0000000000000001")
+ << "ismaster"
+ << false
+ << "secondary"
+ << true
+ << "hosts"
<< BSON_ARRAY("mongo.example:3000"
- << "mongo.example:3001") << "primary"
+ << "mongo.example:3001")
+ << "primary"
<< "mongo.example:3000"
<< "me"
<< "mongo.example:3001"
- << "maxBsonObjectSize" << 16777216 << "maxMessageSizeBytes" << 48000000
- << "maxWriteBatchSize" << 1000 << "localTime" << mongo::jsTime()
- << "maxWireVersion" << 2 << "minWireVersion" << 0 << "tags"
+ << "maxBsonObjectSize"
+ << 16777216
+ << "maxMessageSizeBytes"
+ << 48000000
+ << "maxWriteBatchSize"
+ << 1000
+ << "localTime"
+ << mongo::jsTime()
+ << "maxWireVersion"
+ << 2
+ << "minWireVersion"
+ << 0
+ << "tags"
<< BSON("dc"
<< "nyc"
<< "use"
- << "production") << "ok" << 1);
+ << "production")
+ << "ok"
+ << 1);
IsMasterReply imr(HostAndPort("mongo.example:3001"), -1, ismaster);
@@ -249,10 +345,16 @@ TEST(ReplicaSetMonitor, CheckAllSeedsSerial) {
-1,
BSON("setName"
<< "name"
- << "ismaster" << primary << "secondary" << !primary
- << "hosts" << BSON_ARRAY("a"
- << "b"
- << "c") << "ok" << true));
+ << "ismaster"
+ << primary
+ << "secondary"
+ << !primary
+ << "hosts"
+ << BSON_ARRAY("a"
+ << "b"
+ << "c")
+ << "ok"
+ << true));
}
NextStep ns = refresher.getNextStep();
@@ -299,10 +401,16 @@ TEST(ReplicaSetMonitor, CheckAllSeedsParallel) {
-1,
BSON("setName"
<< "name"
- << "ismaster" << primary << "secondary" << !primary
- << "hosts" << BSON_ARRAY("a"
- << "b"
- << "c") << "ok" << true));
+ << "ismaster"
+ << primary
+ << "secondary"
+ << !primary
+ << "hosts"
+ << BSON_ARRAY("a"
+ << "b"
+ << "c")
+ << "ok"
+ << true));
}
// Now all hosts have returned data
@@ -340,10 +448,16 @@ TEST(ReplicaSetMonitor, NoMasterInitAllUp) {
-1,
BSON("setName"
<< "name"
- << "ismaster" << false << "secondary" << true << "hosts"
+ << "ismaster"
+ << false
+ << "secondary"
+ << true
+ << "hosts"
<< BSON_ARRAY("a"
<< "b"
- << "c") << "ok" << true));
+ << "c")
+ << "ok"
+ << true));
}
NextStep ns = refresher.getNextStep();
@@ -380,11 +494,17 @@ TEST(ReplicaSetMonitor, MasterNotInSeeds_NoPrimaryInIsMaster) {
-1,
BSON("setName"
<< "name"
- << "ismaster" << false << "secondary" << true << "hosts"
+ << "ismaster"
+ << false
+ << "secondary"
+ << true
+ << "hosts"
<< BSON_ARRAY("a"
<< "b"
<< "c"
- << "d") << "ok" << true));
+ << "d")
+ << "ok"
+ << true));
}
// Only look at "d" after exhausting all other hosts
@@ -395,11 +515,17 @@ TEST(ReplicaSetMonitor, MasterNotInSeeds_NoPrimaryInIsMaster) {
-1,
BSON("setName"
<< "name"
- << "ismaster" << true << "secondary" << false << "hosts"
+ << "ismaster"
+ << true
+ << "secondary"
+ << false
+ << "hosts"
<< BSON_ARRAY("a"
<< "b"
<< "c"
- << "d") << "ok" << true));
+ << "d")
+ << "ok"
+ << true));
ns = refresher.getNextStep();
@@ -449,13 +575,19 @@ TEST(ReplicaSetMonitor, MasterNotInSeeds_PrimaryInIsMaster) {
-1,
BSON("setName"
<< "name"
- << "ismaster" << primary << "secondary" << !primary
+ << "ismaster"
+ << primary
+ << "secondary"
+ << !primary
<< "primary"
<< "d"
- << "hosts" << BSON_ARRAY("a"
- << "b"
- << "c"
- << "d") << "ok" << true));
+ << "hosts"
+ << BSON_ARRAY("a"
+ << "b"
+ << "c"
+ << "d")
+ << "ok"
+ << true));
}
NextStep ns = refresher.getNextStep();
@@ -501,8 +633,14 @@ TEST(ReplicaSetMonitor, SlavesUsableEvenIfNoMaster) {
-1,
BSON("setName"
<< "name"
- << "ismaster" << false << "secondary" << true << "hosts"
- << BSON_ARRAY("a") << "ok" << true));
+ << "ismaster"
+ << false
+ << "secondary"
+ << true
+ << "hosts"
+ << BSON_ARRAY("a")
+ << "ok"
+ << true));
// Check intended conditions for entry to refreshUntilMatches.
ASSERT(state->currentScan->hostsToScan.empty());
@@ -547,10 +685,16 @@ TEST(ReplicaSetMonitor, MultipleMasterLastNodeWins) {
-1,
BSON("setName"
<< "name"
- << "ismaster" << true << "secondary" << false << "hosts"
+ << "ismaster"
+ << true
+ << "secondary"
+ << false
+ << "hosts"
<< BSON_ARRAY("a"
<< "b"
- << "c") << "ok" << true));
+ << "c")
+ << "ok"
+ << true));
// Ensure the set primary is the host we just got a reply from
HostAndPort currentPrimary = state->getMatchingHost(primaryOnly);
@@ -594,9 +738,14 @@ TEST(ReplicaSetMonitor, MasterIsSourceOfTruth) {
-1,
BSON("setName"
<< "name"
- << "ismaster" << primary << "secondary" << !primary
- << "hosts" << (primary ? primaryHosts : secondaryHosts)
- << "ok" << true));
+ << "ismaster"
+ << primary
+ << "secondary"
+ << !primary
+ << "hosts"
+ << (primary ? primaryHosts : secondaryHosts)
+ << "ok"
+ << true));
ns = refresher.getNextStep();
}
@@ -643,8 +792,14 @@ TEST(ReplicaSetMonitor, MultipleMastersDisagree) {
-1,
BSON("setName"
<< "name"
- << "ismaster" << true << "secondary" << false << "hosts"
- << hostsForSeed[i % 2] << "ok" << true));
+ << "ismaster"
+ << true
+ << "secondary"
+ << false
+ << "hosts"
+ << hostsForSeed[i % 2]
+ << "ok"
+ << true));
// Ensure the primary is the host we just got a reply from
HostAndPort currentPrimary = state->getMatchingHost(primaryOnly);
@@ -673,8 +828,14 @@ TEST(ReplicaSetMonitor, MultipleMastersDisagree) {
-1,
BSON("setName"
<< "name"
- << "ismaster" << false << "secondary" << true << "hosts"
- << hostsForSeed[0] << "ok" << true));
+ << "ismaster"
+ << false
+ << "secondary"
+ << true
+ << "hosts"
+ << hostsForSeed[0]
+ << "ok"
+ << true));
// scan should be complete
ns = refresher.getNextStep();
@@ -721,10 +882,16 @@ TEST(ReplicaSetMonitor, GetMatchingDuringScan) {
-1,
BSON("setName"
<< "name"
- << "ismaster" << primary << "secondary" << !primary
- << "hosts" << BSON_ARRAY("a"
- << "b"
- << "c") << "ok" << true));
+ << "ismaster"
+ << primary
+ << "secondary"
+ << !primary
+ << "hosts"
+ << BSON_ARRAY("a"
+ << "b"
+ << "c")
+ << "ok"
+ << true));
bool hasPrimary = !(state->getMatchingHost(primaryOnly).empty());
bool hasSecondary = !(state->getMatchingHost(secondaryOnly).empty());
@@ -761,10 +928,16 @@ TEST(ReplicaSetMonitor, OutOfBandFailedHost) {
-1,
BSON("setName"
<< "name"
- << "ismaster" << primary << "secondary" << !primary
- << "hosts" << BSON_ARRAY("a"
- << "b"
- << "c") << "ok" << true));
+ << "ismaster"
+ << primary
+ << "secondary"
+ << !primary
+ << "hosts"
+ << BSON_ARRAY("a"
+ << "b"
+ << "c")
+ << "ok"
+ << true));
if (i >= 1) {
HostAndPort a("a");
@@ -811,10 +984,17 @@ TEST(ReplicaSetMonitorTests, NewPrimaryWithMaxElectionId) {
-1,
BSON("setName"
<< "name"
- << "ismaster" << true << "secondary" << false << "hosts"
+ << "ismaster"
+ << true
+ << "secondary"
+ << false
+ << "hosts"
<< BSON_ARRAY("a"
<< "b"
- << "c") << "electionId" << OID::gen() << "ok"
+ << "c")
+ << "electionId"
+ << OID::gen()
+ << "ok"
<< true));
// Ensure the set primary is the host we just got a reply from
@@ -862,12 +1042,18 @@ TEST(ReplicaSetMonitorTests, IgnoreElectionIdFromSecondaries) {
-1,
BSON("setName"
<< "name"
- << "ismaster" << primary << "secondary" << !primary
+ << "ismaster"
+ << primary
+ << "secondary"
+ << !primary
<< "electionId"
- << (primary ? primaryElectionId : OID::gen()) << "hosts"
+ << (primary ? primaryElectionId : OID::gen())
+ << "hosts"
<< BSON_ARRAY("a"
<< "b"
- << "c") << "ok" << true));
+ << "c")
+ << "ok"
+ << true));
}
// check that the SetState's maxElectionId == primary's electionId
@@ -901,11 +1087,20 @@ TEST(ReplicaSetMonitorTests, StalePrimaryWithObsoleteElectionId) {
-1,
BSON("setName"
<< "name"
- << "ismaster" << true << "secondary" << false
- << "setVersion" << 1 << "electionId" << secondElectionId
- << "hosts" << BSON_ARRAY("a"
- << "b"
- << "c") << "ok" << true));
+ << "ismaster"
+ << true
+ << "secondary"
+ << false
+ << "setVersion"
+ << 1
+ << "electionId"
+ << secondElectionId
+ << "hosts"
+ << BSON_ARRAY("a"
+ << "b"
+ << "c")
+ << "ok"
+ << true));
Node* node = state->findNode(ns.host);
ASSERT(node);
@@ -925,11 +1120,18 @@ TEST(ReplicaSetMonitorTests, StalePrimaryWithObsoleteElectionId) {
-1,
BSON("setName"
<< "name"
- << "ismaster" << true << "secondary" << false
- << "electionId" << firstElectionId << "hosts"
+ << "ismaster"
+ << true
+ << "secondary"
+ << false
+ << "electionId"
+ << firstElectionId
+ << "hosts"
<< BSON_ARRAY("a"
<< "b"
- << "c") << "ok" << true));
+ << "c")
+ << "ok"
+ << true));
Node* node = state->findNode(ns.host);
ASSERT(node);
@@ -951,10 +1153,16 @@ TEST(ReplicaSetMonitorTests, StalePrimaryWithObsoleteElectionId) {
-1,
BSON("setName"
<< "name"
- << "ismaster" << false << "secondary" << true << "hosts"
+ << "ismaster"
+ << false
+ << "secondary"
+ << true
+ << "hosts"
<< BSON_ARRAY("a"
<< "b"
- << "c") << "ok" << true));
+ << "c")
+ << "ok"
+ << true));
Node* node = state->findNode(ns.host);
ASSERT(node);
@@ -997,11 +1205,20 @@ TEST(ReplicaSetMonitorTests, TwoPrimaries2ndHasNewerConfigVersion) {
-1,
BSON("setName"
<< "name"
- << "ismaster" << true << "secondary" << false << "setVersion"
- << 1 << "electionId" << OID("7fffffff0000000000000001")
- << "hosts" << BSON_ARRAY("a"
- << "b"
- << "c") << "ok" << true));
+ << "ismaster"
+ << true
+ << "secondary"
+ << false
+ << "setVersion"
+ << 1
+ << "electionId"
+ << OID("7fffffff0000000000000001")
+ << "hosts"
+ << BSON_ARRAY("a"
+ << "b"
+ << "c")
+ << "ok"
+ << true));
// check that the SetState's maxElectionId == primary's electionId
ASSERT_EQUALS(state->maxElectionId, OID("7fffffff0000000000000001"));
@@ -1014,11 +1231,20 @@ TEST(ReplicaSetMonitorTests, TwoPrimaries2ndHasNewerConfigVersion) {
-1,
BSON("setName"
<< "name"
- << "ismaster" << true << "secondary" << false << "setVersion"
- << 2 << "electionId" << primaryElectionId << "hosts"
+ << "ismaster"
+ << true
+ << "secondary"
+ << false
+ << "setVersion"
+ << 2
+ << "electionId"
+ << primaryElectionId
+ << "hosts"
<< BSON_ARRAY("a"
<< "b"
- << "c") << "ok" << true));
+ << "c")
+ << "ok"
+ << true));
ASSERT_EQUALS(state->maxElectionId, primaryElectionId);
ASSERT_EQUALS(state->configVersion, 2);
@@ -1040,11 +1266,20 @@ TEST(ReplicaSetMonitorTests, TwoPrimaries2ndHasOlderConfigVersion) {
-1,
BSON("setName"
<< "name"
- << "ismaster" << true << "secondary" << false << "electionId"
- << primaryElectionId << "setVersion" << 2 << "hosts"
+ << "ismaster"
+ << true
+ << "secondary"
+ << false
+ << "electionId"
+ << primaryElectionId
+ << "setVersion"
+ << 2
+ << "hosts"
<< BSON_ARRAY("a"
<< "b"
- << "c") << "ok" << true));
+ << "c")
+ << "ok"
+ << true));
ASSERT_EQUALS(state->maxElectionId, primaryElectionId);
ASSERT_EQUALS(state->configVersion, 2);
@@ -1054,11 +1289,20 @@ TEST(ReplicaSetMonitorTests, TwoPrimaries2ndHasOlderConfigVersion) {
-1,
BSON("setName"
<< "name"
- << "ismaster" << true << "secondary" << false << "setVersion"
- << 1 << "electionId" << OID("7fffffff0000000000000001")
- << "hosts" << BSON_ARRAY("a"
- << "b"
- << "c") << "ok" << true));
+ << "ismaster"
+ << true
+ << "secondary"
+ << false
+ << "setVersion"
+ << 1
+ << "electionId"
+ << OID("7fffffff0000000000000001")
+ << "hosts"
+ << BSON_ARRAY("a"
+ << "b"
+ << "c")
+ << "ok"
+ << true));
ASSERT_EQUALS(state->maxElectionId, primaryElectionId);
ASSERT_EQUALS(state->configVersion, 2);
diff --git a/src/mongo/client/sasl_scramsha1_client_conversation.cpp b/src/mongo/client/sasl_scramsha1_client_conversation.cpp
index e7d8643668a..ba5bd94389b 100644
--- a/src/mongo/client/sasl_scramsha1_client_conversation.cpp
+++ b/src/mongo/client/sasl_scramsha1_client_conversation.cpp
@@ -69,9 +69,9 @@ StatusWith<bool> SaslSCRAMSHA1ClientConversation::step(StringData inputData,
case 3:
return _thirdStep(input, outputData);
default:
- return StatusWith<bool>(ErrorCodes::AuthenticationFailed,
- mongoutils::str::stream()
- << "Invalid SCRAM-SHA-1 authentication step: " << _step);
+ return StatusWith<bool>(
+ ErrorCodes::AuthenticationFailed,
+ mongoutils::str::stream() << "Invalid SCRAM-SHA-1 authentication step: " << _step);
}
}
@@ -135,19 +135,20 @@ StatusWith<bool> SaslSCRAMSHA1ClientConversation::_secondStep(const std::vector<
ErrorCodes::BadValue,
mongoutils::str::stream()
<< "Incorrect number of arguments for first SCRAM-SHA-1 server message, got "
- << input.size() << " expected 3");
+ << input.size()
+ << " expected 3");
} else if (!str::startsWith(input[0], "r=") || input[0].size() < 2) {
- return StatusWith<bool>(ErrorCodes::BadValue,
- mongoutils::str::stream()
- << "Incorrect SCRAM-SHA-1 client|server nonce: " << input[0]);
+ return StatusWith<bool>(
+ ErrorCodes::BadValue,
+ mongoutils::str::stream() << "Incorrect SCRAM-SHA-1 client|server nonce: " << input[0]);
} else if (!str::startsWith(input[1], "s=") || input[1].size() < 6) {
return StatusWith<bool>(ErrorCodes::BadValue,
- mongoutils::str::stream()
- << "Incorrect SCRAM-SHA-1 salt: " << input[1]);
+ mongoutils::str::stream() << "Incorrect SCRAM-SHA-1 salt: "
+ << input[1]);
} else if (!str::startsWith(input[2], "i=") || input[2].size() < 3) {
- return StatusWith<bool>(ErrorCodes::BadValue,
- mongoutils::str::stream()
- << "Incorrect SCRAM-SHA-1 iteration count: " << input[2]);
+ return StatusWith<bool>(
+ ErrorCodes::BadValue,
+ mongoutils::str::stream() << "Incorrect SCRAM-SHA-1 iteration count: " << input[2]);
}
std::string nonce = input[0].substr(2);
@@ -165,7 +166,8 @@ StatusWith<bool> SaslSCRAMSHA1ClientConversation::_secondStep(const std::vector<
if (status != Status::OK()) {
return StatusWith<bool>(ErrorCodes::BadValue,
mongoutils::str::stream()
- << "Failed to parse SCRAM-SHA-1 iteration count: " << input[2]);
+ << "Failed to parse SCRAM-SHA-1 iteration count: "
+ << input[2]);
}
// Append client-final-message-without-proof to _authMessage
@@ -208,19 +210,21 @@ StatusWith<bool> SaslSCRAMSHA1ClientConversation::_thirdStep(const std::vector<s
ErrorCodes::BadValue,
mongoutils::str::stream()
<< "Incorrect number of arguments for final SCRAM-SHA-1 server message, got "
- << input.size() << " expected 1");
+ << input.size()
+ << " expected 1");
} else if (input[0].size() < 3) {
return StatusWith<bool>(ErrorCodes::BadValue,
mongoutils::str::stream()
- << "Incorrect SCRAM-SHA-1 server message length: " << input[0]);
+ << "Incorrect SCRAM-SHA-1 server message length: "
+ << input[0]);
} else if (str::startsWith(input[0], "e=")) {
return StatusWith<bool>(ErrorCodes::AuthenticationFailed,
mongoutils::str::stream() << "SCRAM-SHA-1 authentication failure: "
<< input[0].substr(2));
} else if (!str::startsWith(input[0], "v=")) {
- return StatusWith<bool>(ErrorCodes::BadValue,
- mongoutils::str::stream()
- << "Incorrect SCRAM-SHA-1 ServerSignature: " << input[0]);
+ return StatusWith<bool>(
+ ErrorCodes::BadValue,
+ mongoutils::str::stream() << "Incorrect SCRAM-SHA-1 ServerSignature: " << input[0]);
}
bool validServerSignature =
diff --git a/src/mongo/client/sasl_sspi.cpp b/src/mongo/client/sasl_sspi.cpp
index c1ee9fce981..67852c5098d 100644
--- a/src/mongo/client/sasl_sspi.cpp
+++ b/src/mongo/client/sasl_sspi.cpp
@@ -487,7 +487,8 @@ MONGO_INITIALIZER_WITH_PREREQUISITES(SaslSspiClientPlugin,
if (SASL_OK != ret) {
return Status(ErrorCodes::UnknownError,
mongoutils::str::stream() << "could not add SASL Client SSPI plugin "
- << sspiPluginName << ": "
+ << sspiPluginName
+ << ": "
<< sasl_errstring(ret, NULL, NULL));
}
@@ -500,7 +501,8 @@ MONGO_INITIALIZER_WITH_PREREQUISITES(SaslCramClientPlugin,
if (SASL_OK != ret) {
return Status(ErrorCodes::UnknownError,
mongoutils::str::stream() << "Could not add SASL Client CRAM-MD5 plugin "
- << sspiPluginName << ": "
+ << sspiPluginName
+ << ": "
<< sasl_errstring(ret, NULL, NULL));
}
@@ -514,7 +516,8 @@ MONGO_INITIALIZER_WITH_PREREQUISITES(SaslPlainClientPlugin,
if (SASL_OK != ret) {
return Status(ErrorCodes::UnknownError,
mongoutils::str::stream() << "Could not add SASL Client PLAIN plugin "
- << sspiPluginName << ": "
+ << sspiPluginName
+ << ": "
<< sasl_errstring(ret, NULL, NULL));
}
diff --git a/src/mongo/client/sasl_sspi_options.cpp b/src/mongo/client/sasl_sspi_options.cpp
index c63a3f408ba..60368522c9a 100644
--- a/src/mongo/client/sasl_sspi_options.cpp
+++ b/src/mongo/client/sasl_sspi_options.cpp
@@ -36,8 +36,8 @@
#include <vector>
#include "mongo/base/status.h"
-#include "mongo/util/options_parser/startup_options.h"
#include "mongo/util/options_parser/startup_option_init.h"
+#include "mongo/util/options_parser/startup_options.h"
namespace mongo {
@@ -45,16 +45,19 @@ SASLSSPIGlobalParams saslSSPIGlobalParams;
Status addSASLSSPIOptions(moe::OptionSection* options) {
moe::OptionSection sspiOptions("Kerberos Options");
- sspiOptions.addOptionChaining("security.sspiHostnameCanonicalization",
- "sspiHostnameCanonicalization",
- moe::String,
- "DNS resolution strategy to use for hostname canonicalization. "
- "May be one of: {none, forward, forwardAndReverse}")
+ sspiOptions
+ .addOptionChaining("security.sspiHostnameCanonicalization",
+ "sspiHostnameCanonicalization",
+ moe::String,
+ "DNS resolution strategy to use for hostname canonicalization. "
+ "May be one of: {none, forward, forwardAndReverse}")
.setDefault(moe::Value(std::string("none")));
- sspiOptions.addOptionChaining("security.sspiRealmOverride",
- "sspiRealmOverride",
- moe::String,
- "Override the detected realm with the provided string").hidden();
+ sspiOptions
+ .addOptionChaining("security.sspiRealmOverride",
+ "sspiRealmOverride",
+ moe::String,
+ "Override the detected realm with the provided string")
+ .hidden();
return options->addSection(sspiOptions);
}
diff --git a/src/mongo/client/scoped_db_conn_test.cpp b/src/mongo/client/scoped_db_conn_test.cpp
index 7057b2ed024..59d0bfe5009 100644
--- a/src/mongo/client/scoped_db_conn_test.cpp
+++ b/src/mongo/client/scoped_db_conn_test.cpp
@@ -29,8 +29,8 @@
#include "mongo/platform/basic.h"
-#include <vector>
#include <string>
+#include <vector>
#include "mongo/client/connpool.h"
#include "mongo/client/global_conn_pool.h"
@@ -207,8 +207,8 @@ public:
break;
}
if (timer.seconds() > 20) {
- FAIL(str::stream()
- << "Timed out connecting to dummy server: " << connectStatus.toString());
+ FAIL(str::stream() << "Timed out connecting to dummy server: "
+ << connectStatus.toString());
}
}
}
diff --git a/src/mongo/crypto/crypto_openssl.cpp b/src/mongo/crypto/crypto_openssl.cpp
index 4dc1e5d02c4..ca6844acb9e 100644
--- a/src/mongo/crypto/crypto_openssl.cpp
+++ b/src/mongo/crypto/crypto_openssl.cpp
@@ -35,9 +35,9 @@
#error This file should only be included in SSL-enabled builds
#endif
-#include <openssl/sha.h>
#include <openssl/evp.h>
#include <openssl/hmac.h>
+#include <openssl/sha.h>
namespace mongo {
namespace crypto {
diff --git a/src/mongo/crypto/crypto_test.cpp b/src/mongo/crypto/crypto_test.cpp
index 52d488b92cf..b3a1a6badb7 100644
--- a/src/mongo/crypto/crypto_test.cpp
+++ b/src/mongo/crypto/crypto_test.cpp
@@ -37,49 +37,12 @@ const int digestLen = 20;
const struct {
const char* msg;
unsigned char hash[digestLen];
-} sha1Tests[] = {{"abc",
- {0xa9,
- 0x99,
- 0x3e,
- 0x36,
- 0x47,
- 0x06,
- 0x81,
- 0x6a,
- 0xba,
- 0x3e,
- 0x25,
- 0x71,
- 0x78,
- 0x50,
- 0xc2,
- 0x6c,
- 0x9c,
- 0xd0,
- 0xd8,
- 0x9d}},
+} sha1Tests[] = {{"abc", {0xa9, 0x99, 0x3e, 0x36, 0x47, 0x06, 0x81, 0x6a, 0xba, 0x3e,
+ 0x25, 0x71, 0x78, 0x50, 0xc2, 0x6c, 0x9c, 0xd0, 0xd8, 0x9d}},
{"abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq",
- {0x84,
- 0x98,
- 0x3E,
- 0x44,
- 0x1C,
- 0x3B,
- 0xD2,
- 0x6E,
- 0xBA,
- 0xAE,
- 0x4A,
- 0xA1,
- 0xF9,
- 0x51,
- 0x29,
- 0xE5,
- 0xE5,
- 0x46,
- 0x70,
- 0xF1}}};
+ {0x84, 0x98, 0x3E, 0x44, 0x1C, 0x3B, 0xD2, 0x6E, 0xBA, 0xAE,
+ 0x4A, 0xA1, 0xF9, 0x51, 0x29, 0xE5, 0xE5, 0x46, 0x70, 0xF1}}};
TEST(CryptoVectors, SHA1) {
unsigned char sha1Result[digestLen];
@@ -105,400 +68,53 @@ const struct {
unsigned char hash[digestLen];
} hmacSha1Tests[] = {
// RFC test case 1
- {{0x0b,
- 0x0b,
- 0x0b,
- 0x0b,
- 0x0b,
- 0x0b,
- 0x0b,
- 0x0b,
- 0x0b,
- 0x0b,
- 0x0b,
- 0x0b,
- 0x0b,
- 0x0b,
- 0x0b,
- 0x0b,
- 0x0b,
- 0x0b,
- 0x0b,
- 0x0b},
+ {{0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b,
+ 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b},
20,
{0x48, 0x69, 0x20, 0x54, 0x68, 0x65, 0x72, 0x65},
8,
- {0xb6,
- 0x17,
- 0x31,
- 0x86,
- 0x55,
- 0x05,
- 0x72,
- 0x64,
- 0xe2,
- 0x8b,
- 0xc0,
- 0xb6,
- 0xfb,
- 0x37,
- 0x8c,
- 0x8e,
- 0xf1,
- 0x46,
- 0xbe,
- 0x00}},
+ {0xb6, 0x17, 0x31, 0x86, 0x55, 0x05, 0x72, 0x64, 0xe2, 0x8b,
+ 0xc0, 0xb6, 0xfb, 0x37, 0x8c, 0x8e, 0xf1, 0x46, 0xbe, 0x00}},
// RFC test case 3
- {{0xaa,
- 0xaa,
- 0xaa,
- 0xaa,
- 0xaa,
- 0xaa,
- 0xaa,
- 0xaa,
- 0xaa,
- 0xaa,
- 0xaa,
- 0xaa,
- 0xaa,
- 0xaa,
- 0xaa,
- 0xaa,
- 0xaa,
- 0xaa,
- 0xaa,
- 0xaa},
+ {{0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa},
20,
- {0xdd,
- 0xdd,
- 0xdd,
- 0xdd,
- 0xdd,
- 0xdd,
- 0xdd,
- 0xdd,
- 0xdd,
- 0xdd,
- 0xdd,
- 0xdd,
- 0xdd,
- 0xdd,
- 0xdd,
- 0xdd,
- 0xdd,
- 0xdd,
- 0xdd,
- 0xdd,
- 0xdd,
- 0xdd,
- 0xdd,
- 0xdd,
- 0xdd,
- 0xdd,
- 0xdd,
- 0xdd,
- 0xdd,
- 0xdd,
- 0xdd,
- 0xdd,
- 0xdd,
- 0xdd,
- 0xdd,
- 0xdd,
- 0xdd,
- 0xdd,
- 0xdd,
- 0xdd,
- 0xdd,
- 0xdd,
- 0xdd,
- 0xdd,
- 0xdd,
- 0xdd,
- 0xdd,
- 0xdd,
- 0xdd,
- 0xdd},
+ {0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd,
+ 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd,
+ 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd,
+ 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd},
50,
- {0x12,
- 0x5d,
- 0x73,
- 0x42,
- 0xb9,
- 0xac,
- 0x11,
- 0xcd,
- 0x91,
- 0xa3,
- 0x9a,
- 0xf4,
- 0x8a,
- 0xa1,
- 0x7b,
- 0x4f,
- 0x63,
- 0xf1,
- 0x75,
- 0xd3}},
+ {0x12, 0x5d, 0x73, 0x42, 0xb9, 0xac, 0x11, 0xcd, 0x91, 0xa3,
+ 0x9a, 0xf4, 0x8a, 0xa1, 0x7b, 0x4f, 0x63, 0xf1, 0x75, 0xd3}},
// RFC test case 4
- {{0x01,
- 0x02,
- 0x03,
- 0x04,
- 0x05,
- 0x06,
- 0x07,
- 0x08,
- 0x09,
- 0x0a,
- 0x0b,
- 0x0c,
- 0x0d,
- 0x0e,
- 0x0f,
- 0x10,
- 0x11,
- 0x12,
- 0x13,
- 0x14,
- 0x15,
- 0x16,
- 0x17,
- 0x18,
- 0x19},
+ {{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d,
+ 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19},
25,
- {0xcd,
- 0xcd,
- 0xcd,
- 0xcd,
- 0xcd,
- 0xcd,
- 0xcd,
- 0xcd,
- 0xcd,
- 0xcd,
- 0xcd,
- 0xcd,
- 0xcd,
- 0xcd,
- 0xcd,
- 0xcd,
- 0xcd,
- 0xcd,
- 0xcd,
- 0xcd,
- 0xcd,
- 0xcd,
- 0xcd,
- 0xcd,
- 0xcd,
- 0xcd,
- 0xcd,
- 0xcd,
- 0xcd,
- 0xcd,
- 0xcd,
- 0xcd,
- 0xcd,
- 0xcd,
- 0xcd,
- 0xcd,
- 0xcd,
- 0xcd,
- 0xcd,
- 0xcd,
- 0xcd,
- 0xcd,
- 0xcd,
- 0xcd,
- 0xcd,
- 0xcd,
- 0xcd,
- 0xcd,
- 0xcd,
- 0xcd},
+ {0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd,
+ 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd,
+ 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd,
+ 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd, 0xcd},
50,
- {0x4c,
- 0x90,
- 0x07,
- 0xf4,
- 0x02,
- 0x62,
- 0x50,
- 0xc6,
- 0xbc,
- 0x84,
- 0x14,
- 0xf9,
- 0xbf,
- 0x50,
- 0xc8,
- 0x6c,
- 0x2d,
- 0x72,
- 0x35,
- 0xda}},
+ {0x4c, 0x90, 0x07, 0xf4, 0x02, 0x62, 0x50, 0xc6, 0xbc, 0x84,
+ 0x14, 0xf9, 0xbf, 0x50, 0xc8, 0x6c, 0x2d, 0x72, 0x35, 0xda}},
// RFC test case 6
- {{0xaa,
- 0xaa,
- 0xaa,
- 0xaa,
- 0xaa,
- 0xaa,
- 0xaa,
- 0xaa,
- 0xaa,
- 0xaa,
- 0xaa,
- 0xaa,
- 0xaa,
- 0xaa,
- 0xaa,
- 0xaa,
- 0xaa,
- 0xaa,
- 0xaa,
- 0xaa,
- 0xaa,
- 0xaa,
- 0xaa,
- 0xaa,
- 0xaa,
- 0xaa,
- 0xaa,
- 0xaa,
- 0xaa,
- 0xaa,
- 0xaa,
- 0xaa,
- 0xaa,
- 0xaa,
- 0xaa,
- 0xaa,
- 0xaa,
- 0xaa,
- 0xaa,
- 0xaa,
- 0xaa,
- 0xaa,
- 0xaa,
- 0xaa,
- 0xaa,
- 0xaa,
- 0xaa,
- 0xaa,
- 0xaa,
- 0xaa,
- 0xaa,
- 0xaa,
- 0xaa,
- 0xaa,
- 0xaa,
- 0xaa,
- 0xaa,
- 0xaa,
- 0xaa,
- 0xaa,
- 0xaa,
- 0xaa,
- 0xaa,
- 0xaa,
- 0xaa,
- 0xaa,
- 0xaa,
- 0xaa,
- 0xaa,
- 0xaa,
- 0xaa,
- 0xaa,
- 0xaa,
- 0xaa,
- 0xaa,
- 0xaa,
- 0xaa,
- 0xaa,
- 0xaa,
- 0xaa},
+ {{0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa},
80,
- {0x54,
- 0x65,
- 0x73,
- 0x74,
- 0x20,
- 0x55,
- 0x73,
- 0x69,
- 0x6e,
- 0x67,
- 0x20,
- 0x4c,
- 0x61,
- 0x72,
- 0x67,
- 0x65,
- 0x72,
- 0x20,
- 0x54,
- 0x68,
- 0x61,
- 0x6e,
- 0x20,
- 0x42,
- 0x6c,
- 0x6f,
- 0x63,
- 0x6b,
- 0x2d,
- 0x53,
- 0x69,
- 0x7a,
- 0x65,
- 0x20,
- 0x4b,
- 0x65,
- 0x79,
- 0x20,
- 0x2d,
- 0x20,
- 0x48,
- 0x61,
- 0x73,
- 0x68,
- 0x20,
- 0x4b,
- 0x65,
- 0x79,
- 0x20,
- 0x46,
- 0x69,
- 0x72,
- 0x73,
- 0x74},
+ {0x54, 0x65, 0x73, 0x74, 0x20, 0x55, 0x73, 0x69, 0x6e, 0x67, 0x20, 0x4c, 0x61, 0x72,
+ 0x67, 0x65, 0x72, 0x20, 0x54, 0x68, 0x61, 0x6e, 0x20, 0x42, 0x6c, 0x6f, 0x63, 0x6b,
+ 0x2d, 0x53, 0x69, 0x7a, 0x65, 0x20, 0x4b, 0x65, 0x79, 0x20, 0x2d, 0x20, 0x48, 0x61,
+ 0x73, 0x68, 0x20, 0x4b, 0x65, 0x79, 0x20, 0x46, 0x69, 0x72, 0x73, 0x74},
54,
- {0xaa,
- 0x4a,
- 0xe5,
- 0xe1,
- 0x52,
- 0x72,
- 0xd0,
- 0x0e,
- 0x95,
- 0x70,
- 0x56,
- 0x37,
- 0xce,
- 0x8a,
- 0x3b,
- 0x55,
- 0xed,
- 0x40,
- 0x21,
- 0x12}}};
+ {0xaa, 0x4a, 0xe5, 0xe1, 0x52, 0x72, 0xd0, 0x0e, 0x95, 0x70,
+ 0x56, 0x37, 0xce, 0x8a, 0x3b, 0x55, 0xed, 0x40, 0x21, 0x12}}};
TEST(CryptoVectors, HMACSHA1) {
unsigned char hmacSha1Result[digestLen];
diff --git a/src/mongo/crypto/mechanism_scram.cpp b/src/mongo/crypto/mechanism_scram.cpp
index 086e9943c4c..329da56c9f8 100644
--- a/src/mongo/crypto/mechanism_scram.cpp
+++ b/src/mongo/crypto/mechanism_scram.cpp
@@ -156,8 +156,10 @@ BSONObj generateCredentials(const std::string& hashedPassword, int iterationCoun
std::string encodedServerKey = base64::encode(reinterpret_cast<char*>(serverKey), hashSize);
return BSON(iterationCountFieldName << iterationCount << saltFieldName << encodedUserSalt
- << storedKeyFieldName << encodedStoredKey
- << serverKeyFieldName << encodedServerKey);
+ << storedKeyFieldName
+ << encodedStoredKey
+ << serverKeyFieldName
+ << encodedServerKey);
}
std::string generateClientProof(const unsigned char saltedPassword[hashSize],
diff --git a/src/mongo/crypto/tom/tomcrypt.h b/src/mongo/crypto/tom/tomcrypt.h
index f707cbef616..aa4d5c02df5 100644
--- a/src/mongo/crypto/tom/tomcrypt.h
+++ b/src/mongo/crypto/tom/tomcrypt.h
@@ -16,12 +16,12 @@
#ifndef TOMCRYPT_H_
#define TOMCRYPT_H_
#include <assert.h>
+#include <ctype.h>
+#include <limits.h>
#include <stdio.h>
-#include <string.h>
#include <stdlib.h>
+#include <string.h>
#include <time.h>
-#include <ctype.h>
-#include <limits.h>
/* use configuration data */
#include "tomcrypt_custom.h"
diff --git a/src/mongo/crypto/tom/tomcrypt_cfg.h b/src/mongo/crypto/tom/tomcrypt_cfg.h
index c599bab88ca..37dc04780d9 100644
--- a/src/mongo/crypto/tom/tomcrypt_cfg.h
+++ b/src/mongo/crypto/tom/tomcrypt_cfg.h
@@ -43,8 +43,10 @@ LTC_EXPORT void* LTC_CALL XREALLOC(void* p, size_t n);
LTC_EXPORT void* LTC_CALL XCALLOC(size_t n, size_t s);
LTC_EXPORT void LTC_CALL XFREE(void* p);
-LTC_EXPORT void LTC_CALL
-XQSORT(void* base, size_t nmemb, size_t size, int (*compar)(const void*, const void*));
+LTC_EXPORT void LTC_CALL XQSORT(void* base,
+ size_t nmemb,
+ size_t size,
+ int (*compar)(const void*, const void*));
/* change the clock function too */
diff --git a/src/mongo/db/auth/action_set.cpp b/src/mongo/db/auth/action_set.cpp
index 7d3dc9f1712..924ec1e1439 100644
--- a/src/mongo/db/auth/action_set.cpp
+++ b/src/mongo/db/auth/action_set.cpp
@@ -37,8 +37,8 @@
#include "mongo/base/status.h"
#include "mongo/bson/util/builder.h"
#include "mongo/util/log.h"
-#include "mongo/util/stringutils.h"
#include "mongo/util/mongoutils/str.h"
+#include "mongo/util/stringutils.h"
namespace mongo {
@@ -97,9 +97,9 @@ Status ActionSet::parseActionSetFromString(const std::string& actionsString, Act
}
std::string unrecognizedActionsString;
joinStringDelim(unrecognizedActions, &unrecognizedActionsString, ',');
- return Status(
- ErrorCodes::FailedToParse,
- str::stream() << "Unrecognized action privilege strings: " << unrecognizedActionsString);
+ return Status(ErrorCodes::FailedToParse,
+ str::stream() << "Unrecognized action privilege strings: "
+ << unrecognizedActionsString);
}
Status ActionSet::parseActionSetFromStringVector(const std::vector<std::string>& actionsVector,
diff --git a/src/mongo/db/auth/auth_decorations.cpp b/src/mongo/db/auth/auth_decorations.cpp
index 2bd2264e0f9..60b148d1ad0 100644
--- a/src/mongo/db/auth/auth_decorations.cpp
+++ b/src/mongo/db/auth/auth_decorations.cpp
@@ -34,8 +34,8 @@
#include "mongo/base/init.h"
#include "mongo/db/auth/authentication_session.h"
#include "mongo/db/auth/authorization_manager.h"
-#include "mongo/db/auth/authz_manager_external_state.h"
#include "mongo/db/auth/authorization_session.h"
+#include "mongo/db/auth/authz_manager_external_state.h"
#include "mongo/db/client.h"
#include "mongo/db/server_options.h"
#include "mongo/db/service_context.h"
diff --git a/src/mongo/db/auth/auth_index_d.cpp b/src/mongo/db/auth/auth_index_d.cpp
index ac3d38ebf03..8f45cfda0e3 100644
--- a/src/mongo/db/auth/auth_index_d.cpp
+++ b/src/mongo/db/auth/auth_index_d.cpp
@@ -61,16 +61,20 @@ std::string v3SystemRolesIndexName;
MONGO_INITIALIZER(AuthIndexKeyPatterns)(InitializerContext*) {
v1SystemUsersKeyPattern = BSON("user" << 1 << "userSource" << 1);
- v3SystemUsersKeyPattern = BSON(AuthorizationManager::USER_NAME_FIELD_NAME
- << 1 << AuthorizationManager::USER_DB_FIELD_NAME << 1);
- v3SystemRolesKeyPattern = BSON(AuthorizationManager::ROLE_NAME_FIELD_NAME
- << 1 << AuthorizationManager::ROLE_DB_FIELD_NAME << 1);
+ v3SystemUsersKeyPattern = BSON(
+ AuthorizationManager::USER_NAME_FIELD_NAME << 1 << AuthorizationManager::USER_DB_FIELD_NAME
+ << 1);
+ v3SystemRolesKeyPattern = BSON(
+ AuthorizationManager::ROLE_NAME_FIELD_NAME << 1 << AuthorizationManager::ROLE_DB_FIELD_NAME
+ << 1);
v3SystemUsersIndexName =
std::string(str::stream() << AuthorizationManager::USER_NAME_FIELD_NAME << "_1_"
- << AuthorizationManager::USER_DB_FIELD_NAME << "_1");
+ << AuthorizationManager::USER_DB_FIELD_NAME
+ << "_1");
v3SystemRolesIndexName =
std::string(str::stream() << AuthorizationManager::ROLE_NAME_FIELD_NAME << "_1_"
- << AuthorizationManager::ROLE_DB_FIELD_NAME << "_1");
+ << AuthorizationManager::ROLE_DB_FIELD_NAME
+ << "_1");
return Status::OK();
}
@@ -113,12 +117,16 @@ void createSystemIndexes(OperationContext* txn, Collection* collection) {
collection->getIndexCatalog()->createIndexOnEmptyCollection(
txn,
BSON("name" << v3SystemUsersIndexName << "ns" << collection->ns().ns() << "key"
- << v3SystemUsersKeyPattern << "unique" << true));
+ << v3SystemUsersKeyPattern
+ << "unique"
+ << true));
} else if (ns == AuthorizationManager::rolesCollectionNamespace) {
collection->getIndexCatalog()->createIndexOnEmptyCollection(
txn,
BSON("name" << v3SystemRolesIndexName << "ns" << collection->ns().ns() << "key"
- << v3SystemRolesKeyPattern << "unique" << true));
+ << v3SystemRolesKeyPattern
+ << "unique"
+ << true));
}
}
diff --git a/src/mongo/db/auth/authorization_manager.cpp b/src/mongo/db/auth/authorization_manager.cpp
index df65782689e..046ed24a1bc 100644
--- a/src/mongo/db/auth/authorization_manager.cpp
+++ b/src/mongo/db/auth/authorization_manager.cpp
@@ -69,8 +69,8 @@ using std::vector;
AuthInfo internalSecurity;
-MONGO_INITIALIZER_WITH_PREREQUISITES(SetupInternalSecurityUser,
- MONGO_NO_PREREQUISITES)(InitializerContext* context) {
+MONGO_INITIALIZER_WITH_PREREQUISITES(SetupInternalSecurityUser, MONGO_NO_PREREQUISITES)
+(InitializerContext* context) {
User* user = new User(UserName("__system", "local"));
user->incrementRefCount(); // Pin this user so the ref count never drops below 1.
@@ -381,7 +381,8 @@ Status AuthorizationManager::_initializeUserFromPrivilegeDocument(User* user,
mongoutils::str::stream() << "User name from privilege document \""
<< userName
<< "\" doesn't match name of provided User \""
- << user->getName().getUser() << "\"",
+ << user->getName().getUser()
+ << "\"",
0);
}
@@ -484,7 +485,8 @@ Status AuthorizationManager::acquireUser(OperationContext* txn,
case schemaVersion24:
status = Status(ErrorCodes::AuthSchemaIncompatible,
mongoutils::str::stream()
- << "Authorization data schema version " << schemaVersion24
+ << "Authorization data schema version "
+ << schemaVersion24
<< " not supported after MongoDB version 2.6.");
break;
}
@@ -669,7 +671,8 @@ StatusWith<UserName> extractUserNameFromIdString(StringData idstr) {
return StatusWith<UserName>(ErrorCodes::FailedToParse,
mongoutils::str::stream()
<< "_id entries for user documents must be of "
- "the form <dbname>.<username>. Found: " << idstr);
+ "the form <dbname>.<username>. Found: "
+ << idstr);
}
return StatusWith<UserName>(
UserName(idstr.substr(splitPoint + 1), idstr.substr(0, splitPoint)));
@@ -702,7 +705,8 @@ void AuthorizationManager::_invalidateRelevantCacheData(const char* op,
if (!userName.isOK()) {
warning() << "Invalidating user cache based on user being updated failed, will "
- "invalidate the entire cache instead: " << userName.getStatus() << endl;
+ "invalidate the entire cache instead: "
+ << userName.getStatus() << endl;
invalidateUserCache();
return;
}
diff --git a/src/mongo/db/auth/authorization_manager_global.cpp b/src/mongo/db/auth/authorization_manager_global.cpp
index 2fc20deef25..9dd114247cb 100644
--- a/src/mongo/db/auth/authorization_manager_global.cpp
+++ b/src/mongo/db/auth/authorization_manager_global.cpp
@@ -50,7 +50,8 @@ public:
MONGO_INITIALIZER_GENERAL(AuthzSchemaParameter,
MONGO_NO_PREREQUISITES,
- ("BeginStartupOptionParsing"))(InitializerContext*) {
+ ("BeginStartupOptionParsing"))
+(InitializerContext*) {
new AuthzVersionParameter(ServerParameterSet::getGlobal(), authSchemaVersionServerParameter);
return Status::OK();
}
diff --git a/src/mongo/db/auth/authorization_manager_test.cpp b/src/mongo/db/auth/authorization_manager_test.cpp
index 2faf6d0fe10..cd2b83fa6b1 100644
--- a/src/mongo/db/auth/authorization_manager_test.cpp
+++ b/src/mongo/db/auth/authorization_manager_test.cpp
@@ -34,10 +34,10 @@
#include "mongo/bson/mutable/document.h"
#include "mongo/db/auth/action_set.h"
#include "mongo/db/auth/action_type.h"
-#include "mongo/db/auth/authz_session_external_state_mock.h"
-#include "mongo/db/auth/authz_manager_external_state_mock.h"
#include "mongo/db/auth/authorization_manager.h"
#include "mongo/db/auth/authorization_session.h"
+#include "mongo/db/auth/authz_manager_external_state_mock.h"
+#include "mongo/db/auth/authz_session_external_state_mock.h"
#include "mongo/db/jsobj.h"
#include "mongo/db/namespace_string.h"
#include "mongo/db/operation_context_noop.h"
@@ -175,36 +175,38 @@ public:
TEST_F(AuthorizationManagerTest, testAcquireV2User) {
OperationContextNoop txn;
- ASSERT_OK(
- externalState->insertPrivilegeDocument(&txn,
- BSON("_id"
- << "admin.v2read"
- << "user"
- << "v2read"
- << "db"
- << "test"
- << "credentials" << BSON("MONGODB-CR"
- << "password")
- << "roles" << BSON_ARRAY(BSON("role"
- << "read"
- << "db"
- << "test"))),
- BSONObj()));
- ASSERT_OK(
- externalState->insertPrivilegeDocument(&txn,
- BSON("_id"
- << "admin.v2cluster"
- << "user"
- << "v2cluster"
- << "db"
- << "admin"
- << "credentials" << BSON("MONGODB-CR"
- << "password")
- << "roles" << BSON_ARRAY(BSON("role"
- << "clusterAdmin"
- << "db"
- << "admin"))),
- BSONObj()));
+ ASSERT_OK(externalState->insertPrivilegeDocument(&txn,
+ BSON("_id"
+ << "admin.v2read"
+ << "user"
+ << "v2read"
+ << "db"
+ << "test"
+ << "credentials"
+ << BSON("MONGODB-CR"
+ << "password")
+ << "roles"
+ << BSON_ARRAY(BSON("role"
+ << "read"
+ << "db"
+ << "test"))),
+ BSONObj()));
+ ASSERT_OK(externalState->insertPrivilegeDocument(&txn,
+ BSON("_id"
+ << "admin.v2cluster"
+ << "user"
+ << "v2cluster"
+ << "db"
+ << "admin"
+ << "credentials"
+ << BSON("MONGODB-CR"
+ << "password")
+ << "roles"
+ << BSON_ARRAY(BSON("role"
+ << "clusterAdmin"
+ << "db"
+ << "admin"))),
+ BSONObj()));
User* v2read;
ASSERT_OK(authzManager->acquireUser(&txn, UserName("v2read", "test"), &v2read));
@@ -260,13 +262,13 @@ public:
private:
Status _getUserDocument(OperationContext* txn, const UserName& userName, BSONObj* userDoc) {
- Status status =
- findOne(txn,
- AuthorizationManager::usersCollectionNamespace,
- BSON(AuthorizationManager::USER_NAME_FIELD_NAME
- << userName.getUser() << AuthorizationManager::USER_DB_FIELD_NAME
- << userName.getDB()),
- userDoc);
+ Status status = findOne(txn,
+ AuthorizationManager::usersCollectionNamespace,
+ BSON(AuthorizationManager::USER_NAME_FIELD_NAME
+ << userName.getUser()
+ << AuthorizationManager::USER_DB_FIELD_NAME
+ << userName.getDB()),
+ userDoc);
if (status == ErrorCodes::NoMatchingDocument) {
status = Status(ErrorCodes::UserNotFound,
mongoutils::str::stream() << "Could not find user "
@@ -301,27 +303,33 @@ public:
TEST_F(AuthorizationManagerTest, testAcquireV2UserWithUnrecognizedActions) {
OperationContextNoop txn;
- ASSERT_OK(externalState->insertPrivilegeDocument(
- &txn,
- BSON("_id"
- << "admin.myUser"
- << "user"
- << "myUser"
- << "db"
- << "test"
- << "credentials" << BSON("MONGODB-CR"
- << "password") << "roles" << BSON_ARRAY(BSON("role"
- << "myRole"
- << "db"
- << "test"))
- << "inheritedPrivileges" << BSON_ARRAY(BSON("resource" << BSON("db"
- << "test"
- << "collection"
- << "") << "actions"
- << BSON_ARRAY("find"
- << "fakeAction"
- << "insert")))),
- BSONObj()));
+ ASSERT_OK(
+ externalState->insertPrivilegeDocument(&txn,
+ BSON("_id"
+ << "admin.myUser"
+ << "user"
+ << "myUser"
+ << "db"
+ << "test"
+ << "credentials"
+ << BSON("MONGODB-CR"
+ << "password")
+ << "roles"
+ << BSON_ARRAY(BSON("role"
+ << "myRole"
+ << "db"
+ << "test"))
+ << "inheritedPrivileges"
+ << BSON_ARRAY(BSON(
+ "resource" << BSON("db"
+ << "test"
+ << "collection"
+ << "")
+ << "actions"
+ << BSON_ARRAY("find"
+ << "fakeAction"
+ << "insert")))),
+ BSONObj()));
User* myUser;
ASSERT_OK(authzManager->acquireUser(&txn, UserName("myUser", "test"), &myUser));
diff --git a/src/mongo/db/auth/authorization_session.cpp b/src/mongo/db/auth/authorization_session.cpp
index c2474ac5199..7a620253cc6 100644
--- a/src/mongo/db/auth/authorization_session.cpp
+++ b/src/mongo/db/auth/authorization_session.cpp
@@ -38,8 +38,8 @@
#include "mongo/base/status.h"
#include "mongo/db/auth/action_set.h"
#include "mongo/db/auth/action_type.h"
-#include "mongo/db/auth/authz_session_external_state.h"
#include "mongo/db/auth/authorization_manager.h"
+#include "mongo/db/auth/authz_session_external_state.h"
#include "mongo/db/auth/privilege.h"
#include "mongo/db/auth/security_key.h"
#include "mongo/db/auth/user_management_commands_parser.h"
@@ -338,7 +338,8 @@ Status AuthorizationSession::checkAuthorizedToGrantPrivilege(const Privilege& pr
ActionType::grantRole)) {
return Status(ErrorCodes::Unauthorized,
str::stream() << "Not authorized to grant privileges on the "
- << resource.databaseToMatch() << "database");
+ << resource.databaseToMatch()
+ << "database");
}
} else if (!isAuthorizedForActionsOnResource(ResourcePattern::forDatabaseName("admin"),
ActionType::grantRole)) {
@@ -358,7 +359,8 @@ Status AuthorizationSession::checkAuthorizedToRevokePrivilege(const Privilege& p
ActionType::revokeRole)) {
return Status(ErrorCodes::Unauthorized,
str::stream() << "Not authorized to revoke privileges on the "
- << resource.databaseToMatch() << "database");
+ << resource.databaseToMatch()
+ << "database");
}
} else if (!isAuthorizedForActionsOnResource(ResourcePattern::forDatabaseName("admin"),
ActionType::revokeRole)) {
diff --git a/src/mongo/db/auth/authorization_session_test.cpp b/src/mongo/db/auth/authorization_session_test.cpp
index 8dfc448910f..9098c5a8e1f 100644
--- a/src/mongo/db/auth/authorization_session_test.cpp
+++ b/src/mongo/db/auth/authorization_session_test.cpp
@@ -31,10 +31,10 @@
* Unit tests of the AuthorizationSession type.
*/
#include "mongo/base/status.h"
-#include "mongo/db/auth/authz_session_external_state_mock.h"
-#include "mongo/db/auth/authz_manager_external_state_mock.h"
#include "mongo/db/auth/authorization_manager.h"
#include "mongo/db/auth/authorization_session.h"
+#include "mongo/db/auth/authz_manager_external_state_mock.h"
+#include "mongo/db/auth/authz_session_external_state_mock.h"
#include "mongo/db/jsobj.h"
#include "mongo/db/namespace_string.h"
#include "mongo/db/operation_context_noop.h"
@@ -144,8 +144,10 @@ TEST_F(AuthorizationSessionTest, AddUserAndCheckAuthorization) {
<< "spencer"
<< "db"
<< "test"
- << "credentials" << BSON("MONGODB-CR"
- << "a") << "roles"
+ << "credentials"
+ << BSON("MONGODB-CR"
+ << "a")
+ << "roles"
<< BSON_ARRAY(BSON("role"
<< "readWrite"
<< "db"
@@ -171,8 +173,10 @@ TEST_F(AuthorizationSessionTest, AddUserAndCheckAuthorization) {
<< "admin"
<< "db"
<< "admin"
- << "credentials" << BSON("MONGODB-CR"
- << "a") << "roles"
+ << "credentials"
+ << BSON("MONGODB-CR"
+ << "a")
+ << "roles"
<< BSON_ARRAY(BSON("role"
<< "readWriteAnyDatabase"
<< "db"
@@ -216,8 +220,10 @@ TEST_F(AuthorizationSessionTest, DuplicateRolesOK) {
<< "spencer"
<< "db"
<< "test"
- << "credentials" << BSON("MONGODB-CR"
- << "a") << "roles"
+ << "credentials"
+ << BSON("MONGODB-CR"
+ << "a")
+ << "roles"
<< BSON_ARRAY(BSON("role"
<< "readWrite"
<< "db"
@@ -247,8 +253,10 @@ TEST_F(AuthorizationSessionTest, SystemCollectionsAccessControl) {
<< "rw"
<< "db"
<< "test"
- << "credentials" << BSON("MONGODB-CR"
- << "a") << "roles"
+ << "credentials"
+ << BSON("MONGODB-CR"
+ << "a")
+ << "roles"
<< BSON_ARRAY(BSON("role"
<< "readWrite"
<< "db"
@@ -263,8 +271,10 @@ TEST_F(AuthorizationSessionTest, SystemCollectionsAccessControl) {
<< "useradmin"
<< "db"
<< "test"
- << "credentials" << BSON("MONGODB-CR"
- << "a") << "roles"
+ << "credentials"
+ << BSON("MONGODB-CR"
+ << "a")
+ << "roles"
<< BSON_ARRAY(BSON("role"
<< "userAdmin"
<< "db"
@@ -276,8 +286,10 @@ TEST_F(AuthorizationSessionTest, SystemCollectionsAccessControl) {
<< "rwany"
<< "db"
<< "test"
- << "credentials" << BSON("MONGODB-CR"
- << "a") << "roles"
+ << "credentials"
+ << BSON("MONGODB-CR"
+ << "a")
+ << "roles"
<< BSON_ARRAY(BSON("role"
<< "readWriteAnyDatabase"
<< "db"
@@ -293,8 +305,10 @@ TEST_F(AuthorizationSessionTest, SystemCollectionsAccessControl) {
<< "useradminany"
<< "db"
<< "test"
- << "credentials" << BSON("MONGODB-CR"
- << "a") << "roles"
+ << "credentials"
+ << BSON("MONGODB-CR"
+ << "a")
+ << "roles"
<< BSON_ARRAY(BSON("role"
<< "userAdminAnyDatabase"
<< "db"
@@ -387,8 +401,10 @@ TEST_F(AuthorizationSessionTest, InvalidateUser) {
<< "spencer"
<< "db"
<< "test"
- << "credentials" << BSON("MONGODB-CR"
- << "a") << "roles"
+ << "credentials"
+ << BSON("MONGODB-CR"
+ << "a")
+ << "roles"
<< BSON_ARRAY(BSON("role"
<< "readWrite"
<< "db"
@@ -413,8 +429,10 @@ TEST_F(AuthorizationSessionTest, InvalidateUser) {
<< "spencer"
<< "db"
<< "test"
- << "credentials" << BSON("MONGODB-CR"
- << "a") << "roles"
+ << "credentials"
+ << BSON("MONGODB-CR"
+ << "a")
+ << "roles"
<< BSON_ARRAY(BSON("role"
<< "read"
<< "db"
@@ -452,8 +470,10 @@ TEST_F(AuthorizationSessionTest, UseOldUserInfoInFaceOfConnectivityProblems) {
<< "spencer"
<< "db"
<< "test"
- << "credentials" << BSON("MONGODB-CR"
- << "a") << "roles"
+ << "credentials"
+ << BSON("MONGODB-CR"
+ << "a")
+ << "roles"
<< BSON_ARRAY(BSON("role"
<< "readWrite"
<< "db"
@@ -479,8 +499,10 @@ TEST_F(AuthorizationSessionTest, UseOldUserInfoInFaceOfConnectivityProblems) {
<< "spencer"
<< "db"
<< "test"
- << "credentials" << BSON("MONGODB-CR"
- << "a") << "roles"
+ << "credentials"
+ << BSON("MONGODB-CR"
+ << "a")
+ << "roles"
<< BSON_ARRAY(BSON("role"
<< "read"
<< "db"
diff --git a/src/mongo/db/auth/authz_manager_external_state_d.cpp b/src/mongo/db/auth/authz_manager_external_state_d.cpp
index 601c14decff..bd24c6c5b19 100644
--- a/src/mongo/db/auth/authz_manager_external_state_d.cpp
+++ b/src/mongo/db/auth/authz_manager_external_state_d.cpp
@@ -87,7 +87,8 @@ Status AuthzManagerExternalStateMongod::findOne(OperationContext* txn,
}
return Status(ErrorCodes::NoMatchingDocument,
mongoutils::str::stream() << "No document in " << collectionName.ns()
- << " matches " << query);
+ << " matches "
+ << query);
}
} // namespace mongo
diff --git a/src/mongo/db/auth/authz_manager_external_state_local.cpp b/src/mongo/db/auth/authz_manager_external_state_local.cpp
index 5d76027fc22..82bd5c29440 100644
--- a/src/mongo/db/auth/authz_manager_external_state_local.cpp
+++ b/src/mongo/db/auth/authz_manager_external_state_local.cpp
@@ -53,7 +53,8 @@ Status AuthzManagerExternalStateLocal::initialize(OperationContext* txn) {
<< status.reason();
} else {
error() << "Could not generate role graph from admin.system.roles; "
- "only system roles available: " << status;
+ "only system roles available: "
+ << status;
}
}
@@ -81,8 +82,11 @@ Status AuthzManagerExternalStateLocal::getStoredAuthorizationVersion(OperationCo
return Status(ErrorCodes::TypeMismatch,
mongoutils::str::stream()
<< "Could not determine schema version of authorization data. "
- "Bad (non-numeric) type " << typeName(versionElement.type())
- << " (" << versionElement.type() << ") for "
+ "Bad (non-numeric) type "
+ << typeName(versionElement.type())
+ << " ("
+ << versionElement.type()
+ << ") for "
<< AuthorizationManager::schemaVersionFieldName
<< " field in version document");
}
@@ -123,7 +127,8 @@ void addPrivilegeObjectsOrWarningsToArrayElement(mutablebson::Element privileges
std::string(mongoutils::str::stream()
<< "Skipped privileges on resource "
<< privileges[i].getResourcePattern().toString()
- << ". Reason: " << errmsg)));
+ << ". Reason: "
+ << errmsg)));
}
}
}
@@ -222,7 +227,8 @@ Status AuthzManagerExternalStateLocal::_getUserDocument(OperationContext* txn,
Status status = findOne(txn,
AuthorizationManager::usersCollectionNamespace,
BSON(AuthorizationManager::USER_NAME_FIELD_NAME
- << userName.getUser() << AuthorizationManager::USER_DB_FIELD_NAME
+ << userName.getUser()
+ << AuthorizationManager::USER_DB_FIELD_NAME
<< userName.getDB()),
userDoc);
if (status == ErrorCodes::NoMatchingDocument) {
@@ -324,7 +330,8 @@ void addRoleFromDocumentOrWarn(RoleGraph* roleGraph, const BSONObj& doc) {
Status status = roleGraph->addRoleFromDocument(doc);
if (!status.isOK()) {
warning() << "Skipping invalid admin.system.roles document while calculating privileges"
- " for user-defined roles: " << status << "; document " << doc;
+ " for user-defined roles: "
+ << status << "; document " << doc;
}
}
@@ -352,7 +359,8 @@ Status AuthzManagerExternalStateLocal::_initializeRoleGraph(OperationContext* tx
RoleGraphState newState;
if (status == ErrorCodes::GraphContainsCycle) {
error() << "Inconsistent role graph during authorization manager initialization. Only "
- "direct privileges available. " << status.reason();
+ "direct privileges available. "
+ << status.reason();
newState = roleGraphStateHasCycle;
status = Status::OK();
} else if (status.isOK()) {
@@ -400,8 +408,8 @@ public:
if (_isO2Set)
oplogEntryBuilder << "o2" << _o2;
error() << "Unsupported modification to roles collection in oplog; "
- "restart this process to reenable user-defined roles; " << status.reason()
- << "; Oplog entry: " << oplogEntryBuilder.done();
+ "restart this process to reenable user-defined roles; "
+ << status.reason() << "; Oplog entry: " << oplogEntryBuilder.done();
} else if (!status.isOK()) {
warning() << "Skipping bad update to roles collection in oplog. " << status
<< " Oplog entry: " << _op;
@@ -410,8 +418,8 @@ public:
if (status == ErrorCodes::GraphContainsCycle) {
_externalState->_roleGraphState = _externalState->roleGraphStateHasCycle;
error() << "Inconsistent role graph during authorization manager initialization. "
- "Only direct privileges available. " << status.reason()
- << " after applying oplog entry " << _op;
+ "Only direct privileges available. "
+ << status.reason() << " after applying oplog entry " << _op;
} else {
fassert(17183, status);
_externalState->_roleGraphState = _externalState->roleGraphStateConsistent;
diff --git a/src/mongo/db/auth/authz_manager_external_state_mock.cpp b/src/mongo/db/auth/authz_manager_external_state_mock.cpp
index 6bab48f91e9..6c2fe3f9398 100644
--- a/src/mongo/db/auth/authz_manager_external_state_mock.cpp
+++ b/src/mongo/db/auth/authz_manager_external_state_mock.cpp
@@ -77,7 +77,8 @@ void addPrivilegeObjectsOrWarningsToArrayElement(mutablebson::Element privileges
std::string(mongoutils::str::stream()
<< "Skipped privileges on resource "
<< privileges[i].getResourcePattern().toString()
- << ". Reason: " << errmsg)));
+ << ". Reason: "
+ << errmsg)));
}
}
}
diff --git a/src/mongo/db/auth/authz_manager_external_state_mock.h b/src/mongo/db/auth/authz_manager_external_state_mock.h
index d6b457e0de9..0b8fa3e0b3c 100644
--- a/src/mongo/db/auth/authz_manager_external_state_mock.h
+++ b/src/mongo/db/auth/authz_manager_external_state_mock.h
@@ -28,8 +28,8 @@
#pragma once
-#include <string>
#include <map>
+#include <string>
#include <vector>
#include "mongo/base/disallow_copying.h"
diff --git a/src/mongo/db/auth/authz_manager_external_state_s.cpp b/src/mongo/db/auth/authz_manager_external_state_s.cpp
index 48800c500c4..4bdb2648688 100644
--- a/src/mongo/db/auth/authz_manager_external_state_s.cpp
+++ b/src/mongo/db/auth/authz_manager_external_state_s.cpp
@@ -69,8 +69,8 @@ Status AuthzManagerExternalStateMongos::getStoredAuthorizationVersion(OperationC
// that runs this command
BSONObj getParameterCmd = BSON("getParameter" << 1 << authSchemaVersionServerParameter << 1);
BSONObjBuilder builder;
- const bool ok = grid.catalogManager(txn)
- ->runUserManagementReadCommand(txn, "admin", getParameterCmd, &builder);
+ const bool ok = grid.catalogManager(txn)->runUserManagementReadCommand(
+ txn, "admin", getParameterCmd, &builder);
BSONObj cmdResult = builder.obj();
if (!ok) {
return getStatusFromCommandResult(cmdResult);
@@ -92,11 +92,14 @@ Status AuthzManagerExternalStateMongos::getUserDescription(OperationContext* txn
BSON("usersInfo" << BSON_ARRAY(BSON(AuthorizationManager::USER_NAME_FIELD_NAME
<< userName.getUser()
<< AuthorizationManager::USER_DB_FIELD_NAME
- << userName.getDB())) << "showPrivileges" << true
- << "showCredentials" << true);
+ << userName.getDB()))
+ << "showPrivileges"
+ << true
+ << "showCredentials"
+ << true);
BSONObjBuilder builder;
- const bool ok = grid.catalogManager(txn)
- ->runUserManagementReadCommand(txn, "admin", usersInfoCmd, &builder);
+ const bool ok = grid.catalogManager(txn)->runUserManagementReadCommand(
+ txn, "admin", usersInfoCmd, &builder);
BSONObj cmdResult = builder.obj();
if (!ok) {
return getStatusFromCommandResult(cmdResult);
@@ -110,7 +113,9 @@ Status AuthzManagerExternalStateMongos::getUserDescription(OperationContext* txn
if (foundUsers.size() > 1) {
return Status(ErrorCodes::UserDataInconsistent,
str::stream() << "Found multiple users on the \"" << userName.getDB()
- << "\" database with name \"" << userName.getUser() << "\"");
+ << "\" database with name \""
+ << userName.getUser()
+ << "\"");
}
*result = foundUsers[0].Obj().getOwned();
return Status::OK();
@@ -121,13 +126,15 @@ Status AuthzManagerExternalStateMongos::getRoleDescription(OperationContext* txn
bool showPrivileges,
BSONObj* result) {
BSONObj rolesInfoCmd =
- BSON("rolesInfo" << BSON_ARRAY(BSON(
- AuthorizationManager::ROLE_NAME_FIELD_NAME
- << roleName.getRole() << AuthorizationManager::ROLE_DB_FIELD_NAME
- << roleName.getDB())) << "showPrivileges" << showPrivileges);
+ BSON("rolesInfo" << BSON_ARRAY(BSON(AuthorizationManager::ROLE_NAME_FIELD_NAME
+ << roleName.getRole()
+ << AuthorizationManager::ROLE_DB_FIELD_NAME
+ << roleName.getDB()))
+ << "showPrivileges"
+ << showPrivileges);
BSONObjBuilder builder;
- const bool ok = grid.catalogManager(txn)
- ->runUserManagementReadCommand(txn, "admin", rolesInfoCmd, &builder);
+ const bool ok = grid.catalogManager(txn)->runUserManagementReadCommand(
+ txn, "admin", rolesInfoCmd, &builder);
BSONObj cmdResult = builder.obj();
if (!ok) {
return getStatusFromCommandResult(cmdResult);
@@ -141,7 +148,9 @@ Status AuthzManagerExternalStateMongos::getRoleDescription(OperationContext* txn
if (foundRoles.size() > 1) {
return Status(ErrorCodes::RoleDataInconsistent,
str::stream() << "Found multiple roles on the \"" << roleName.getDB()
- << "\" database with name \"" << roleName.getRole() << "\"");
+ << "\" database with name \""
+ << roleName.getRole()
+ << "\"");
}
*result = foundRoles[0].Obj().getOwned();
return Status::OK();
@@ -152,8 +161,9 @@ Status AuthzManagerExternalStateMongos::getRoleDescriptionsForDB(OperationContex
bool showPrivileges,
bool showBuiltinRoles,
std::vector<BSONObj>* result) {
- BSONObj rolesInfoCmd = BSON("rolesInfo" << 1 << "showPrivileges" << showPrivileges
- << "showBuiltinRoles" << showBuiltinRoles);
+ BSONObj rolesInfoCmd =
+ BSON("rolesInfo" << 1 << "showPrivileges" << showPrivileges << "showBuiltinRoles"
+ << showBuiltinRoles);
BSONObjBuilder builder;
const bool ok =
grid.catalogManager(txn)->runUserManagementReadCommand(txn, dbname, rolesInfoCmd, &builder);
@@ -170,8 +180,8 @@ Status AuthzManagerExternalStateMongos::getRoleDescriptionsForDB(OperationContex
bool AuthzManagerExternalStateMongos::hasAnyPrivilegeDocuments(OperationContext* txn) {
BSONObj usersInfoCmd = BSON("usersInfo" << 1);
BSONObjBuilder userBuilder;
- bool ok = grid.catalogManager(txn)
- ->runUserManagementReadCommand(txn, "admin", usersInfoCmd, &userBuilder);
+ bool ok = grid.catalogManager(txn)->runUserManagementReadCommand(
+ txn, "admin", usersInfoCmd, &userBuilder);
if (!ok) {
// If we were unable to complete the query,
// it's best to assume that there _are_ privilege documents. This might happen
@@ -188,8 +198,8 @@ bool AuthzManagerExternalStateMongos::hasAnyPrivilegeDocuments(OperationContext*
BSONObj rolesInfoCmd = BSON("rolesInfo" << 1);
BSONObjBuilder roleBuilder;
- ok = grid.catalogManager(txn)
- ->runUserManagementReadCommand(txn, "admin", rolesInfoCmd, &roleBuilder);
+ ok = grid.catalogManager(txn)->runUserManagementReadCommand(
+ txn, "admin", rolesInfoCmd, &roleBuilder);
if (!ok) {
return true;
}
diff --git a/src/mongo/db/auth/authz_session_external_state_server_common.cpp b/src/mongo/db/auth/authz_session_external_state_server_common.cpp
index a85ab1c5ac2..16fb107f2f3 100644
--- a/src/mongo/db/auth/authz_session_external_state_server_common.cpp
+++ b/src/mongo/db/auth/authz_session_external_state_server_common.cpp
@@ -69,7 +69,8 @@ void AuthzSessionExternalStateServerCommon::_checkShouldAllowLocalhost(Operation
if (_allowLocalhost) {
ONCE {
log() << "note: no users configured in admin.system.users, allowing localhost "
- "access" << std::endl;
+ "access"
+ << std::endl;
}
}
}
diff --git a/src/mongo/db/auth/native_sasl_authentication_session.cpp b/src/mongo/db/auth/native_sasl_authentication_session.cpp
index 9566ba37487..9e21ffe8d9b 100644
--- a/src/mongo/db/auth/native_sasl_authentication_session.cpp
+++ b/src/mongo/db/auth/native_sasl_authentication_session.cpp
@@ -37,7 +37,6 @@
#include "mongo/base/string_data.h"
#include "mongo/bson/util/bson_extract.h"
#include "mongo/client/sasl_client_authenticate.h"
-#include "mongo/db/commands.h"
#include "mongo/db/auth/authorization_manager.h"
#include "mongo/db/auth/authorization_manager_global.h"
#include "mongo/db/auth/authorization_session.h"
@@ -46,6 +45,7 @@
#include "mongo/db/auth/sasl_options.h"
#include "mongo/db/auth/sasl_plain_server_conversation.h"
#include "mongo/db/auth/sasl_scramsha1_server_conversation.h"
+#include "mongo/db/commands.h"
#include "mongo/stdx/memory.h"
#include "mongo/util/assert_util.h"
#include "mongo/util/mongoutils/str.h"
diff --git a/src/mongo/db/auth/privilege_parser_test.cpp b/src/mongo/db/auth/privilege_parser_test.cpp
index 1192e911386..74bace49c7e 100644
--- a/src/mongo/db/auth/privilege_parser_test.cpp
+++ b/src/mongo/db/auth/privilege_parser_test.cpp
@@ -51,23 +51,28 @@ TEST(PrivilegeParserTest, IsValidTest) {
ASSERT_FALSE(parsedPrivilege.isValid(&errmsg));
// resource can't have cluster as well as db or collection
- parsedPrivilege.parseBSON(
- BSON("resource" << BSON("cluster" << true << "db"
- << ""
- << "collection"
- << "") << "actions" << BSON_ARRAY("find")),
- &errmsg);
+ parsedPrivilege.parseBSON(BSON("resource" << BSON("cluster" << true << "db"
+ << ""
+ << "collection"
+ << "")
+ << "actions"
+ << BSON_ARRAY("find")),
+ &errmsg);
ASSERT_FALSE(parsedPrivilege.isValid(&errmsg));
// resource can't have db without collection
parsedPrivilege.parseBSON(BSON("resource" << BSON("db"
- << "") << "actions" << BSON_ARRAY("find")),
+ << "")
+ << "actions"
+ << BSON_ARRAY("find")),
&errmsg);
ASSERT_FALSE(parsedPrivilege.isValid(&errmsg));
// resource can't have collection without db
parsedPrivilege.parseBSON(BSON("resource" << BSON("collection"
- << "") << "actions" << BSON_ARRAY("find")),
+ << "")
+ << "actions"
+ << BSON_ARRAY("find")),
&errmsg);
ASSERT_FALSE(parsedPrivilege.isValid(&errmsg));
@@ -75,7 +80,9 @@ TEST(PrivilegeParserTest, IsValidTest) {
parsedPrivilege.parseBSON(BSON("resource" << BSON("db"
<< ""
<< "collection"
- << "") << "actions" << BSON_ARRAY("find")),
+ << "")
+ << "actions"
+ << BSON_ARRAY("find")),
&errmsg);
ASSERT(parsedPrivilege.isValid(&errmsg));
@@ -83,7 +90,9 @@ TEST(PrivilegeParserTest, IsValidTest) {
parsedPrivilege.parseBSON(BSON("resource" << BSON("db"
<< "test"
<< "collection"
- << "foo") << "actions" << BSON_ARRAY("find")),
+ << "foo")
+ << "actions"
+ << BSON_ARRAY("find")),
&errmsg);
ASSERT(parsedPrivilege.isValid(&errmsg));
@@ -105,7 +114,9 @@ TEST(PrivilegeParserTest, ConvertBetweenPrivilegeTest) {
parsedPrivilege.parseBSON(BSON("resource" << BSON("db"
<< ""
<< "collection"
- << "") << "actions" << BSON_ARRAY("find")),
+ << "")
+ << "actions"
+ << BSON_ARRAY("find")),
&errmsg);
ASSERT(parsedPrivilege.isValid(&errmsg));
ASSERT_OK(ParsedPrivilege::parsedPrivilegeToPrivilege(
@@ -130,7 +141,9 @@ TEST(PrivilegeParserTest, ConvertBetweenPrivilegeTest) {
parsedPrivilege.parseBSON(BSON("resource" << BSON("db"
<< "test"
<< "collection"
- << "foo") << "actions" << BSON_ARRAY("find")),
+ << "foo")
+ << "actions"
+ << BSON_ARRAY("find")),
&errmsg);
ASSERT(parsedPrivilege.isValid(&errmsg));
ASSERT_OK(ParsedPrivilege::parsedPrivilegeToPrivilege(
@@ -156,7 +169,9 @@ TEST(PrivilegeParserTest, ConvertBetweenPrivilegeTest) {
parsedPrivilege.parseBSON(BSON("resource" << BSON("db"
<< "test"
<< "collection"
- << "") << "actions" << BSON_ARRAY("find")),
+ << "")
+ << "actions"
+ << BSON_ARRAY("find")),
&errmsg);
ASSERT(parsedPrivilege.isValid(&errmsg));
ASSERT_OK(ParsedPrivilege::parsedPrivilegeToPrivilege(
@@ -181,7 +196,9 @@ TEST(PrivilegeParserTest, ConvertBetweenPrivilegeTest) {
parsedPrivilege.parseBSON(BSON("resource" << BSON("db"
<< ""
<< "collection"
- << "foo") << "actions" << BSON_ARRAY("find")),
+ << "foo")
+ << "actions"
+ << BSON_ARRAY("find")),
&errmsg);
ASSERT(parsedPrivilege.isValid(&errmsg));
ASSERT_OK(ParsedPrivilege::parsedPrivilegeToPrivilege(
@@ -232,13 +249,14 @@ TEST(PrivilegeParserTest, ParseInvalidActionsTest) {
std::vector<std::string> unrecognizedActions;
actionsVector.push_back("find");
- parsedPrivilege.parseBSON(
- BSON("resource" << BSON("db"
- << ""
- << "collection"
- << "") << "actions" << BSON_ARRAY("find"
- << "fakeAction")),
- &errmsg);
+ parsedPrivilege.parseBSON(BSON("resource" << BSON("db"
+ << ""
+ << "collection"
+ << "")
+ << "actions"
+ << BSON_ARRAY("find"
+ << "fakeAction")),
+ &errmsg);
ASSERT(parsedPrivilege.isValid(&errmsg));
ASSERT_OK(ParsedPrivilege::parsedPrivilegeToPrivilege(
parsedPrivilege, &privilege, &unrecognizedActions));
diff --git a/src/mongo/db/auth/role_graph.cpp b/src/mongo/db/auth/role_graph.cpp
index a0861b98236..15e8fc87646 100644
--- a/src/mongo/db/auth/role_graph.cpp
+++ b/src/mongo/db/auth/role_graph.cpp
@@ -119,8 +119,8 @@ Status RoleGraph::deleteRole(const RoleName& role) {
}
if (isBuiltinRole(role)) {
return Status(ErrorCodes::InvalidRoleModification,
- mongoutils::str::stream()
- << "Cannot delete built-in role: " << role.getFullName(),
+ mongoutils::str::stream() << "Cannot delete built-in role: "
+ << role.getFullName(),
0);
}
@@ -183,8 +183,8 @@ Status RoleGraph::addRoleToRole(const RoleName& recipient, const RoleName& role)
}
if (isBuiltinRole(recipient)) {
return Status(ErrorCodes::InvalidRoleModification,
- mongoutils::str::stream()
- << "Cannot grant roles to built-in role: " << role.getFullName());
+ mongoutils::str::stream() << "Cannot grant roles to built-in role: "
+ << role.getFullName());
}
if (!roleExists(role)) {
return Status(ErrorCodes::RoleNotFound,
@@ -212,8 +212,8 @@ Status RoleGraph::removeRoleFromRole(const RoleName& recipient, const RoleName&
}
if (isBuiltinRole(recipient)) {
return Status(ErrorCodes::InvalidRoleModification,
- mongoutils::str::stream()
- << "Cannot remove roles from built-in role: " << role.getFullName(),
+ mongoutils::str::stream() << "Cannot remove roles from built-in role: "
+ << role.getFullName(),
0);
}
if (!roleExists(role)) {
@@ -252,8 +252,8 @@ Status RoleGraph::removeAllRolesFromRole(const RoleName& victim) {
}
if (isBuiltinRole(victim)) {
return Status(ErrorCodes::InvalidRoleModification,
- mongoutils::str::stream()
- << "Cannot remove roles from built-in role: " << victim.getFullName(),
+ mongoutils::str::stream() << "Cannot remove roles from built-in role: "
+ << victim.getFullName(),
0);
}
@@ -281,8 +281,8 @@ Status RoleGraph::addPrivilegeToRole(const RoleName& role, const Privilege& priv
}
if (isBuiltinRole(role)) {
return Status(ErrorCodes::InvalidRoleModification,
- mongoutils::str::stream()
- << "Cannot grant privileges to built-in role: " << role.getFullName(),
+ mongoutils::str::stream() << "Cannot grant privileges to built-in role: "
+ << role.getFullName(),
0);
}
@@ -308,8 +308,8 @@ Status RoleGraph::addPrivilegesToRole(const RoleName& role,
}
if (isBuiltinRole(role)) {
return Status(ErrorCodes::InvalidRoleModification,
- mongoutils::str::stream()
- << "Cannot grant privileges to built-in role: " << role.getFullName(),
+ mongoutils::str::stream() << "Cannot grant privileges to built-in role: "
+ << role.getFullName(),
0);
}
@@ -330,8 +330,8 @@ Status RoleGraph::removePrivilegeFromRole(const RoleName& role,
}
if (isBuiltinRole(role)) {
return Status(ErrorCodes::InvalidRoleModification,
- mongoutils::str::stream()
- << "Cannot remove privileges from built-in role: " << role.getFullName());
+ mongoutils::str::stream() << "Cannot remove privileges from built-in role: "
+ << role.getFullName());
}
PrivilegeVector& currentPrivileges = _directPrivilegesForRole[role];
@@ -343,13 +343,14 @@ Status RoleGraph::removePrivilegeFromRole(const RoleName& role,
if (!curActions.isSupersetOf(privilegeToRemove.getActions())) {
// Didn't possess all the actions being removed.
- return Status(ErrorCodes::PrivilegeNotFound,
- mongoutils::str::stream()
- << "Role: " << role.getFullName()
- << " does not contain a privilege on "
- << privilegeToRemove.getResourcePattern().toString()
- << " with actions: " << privilegeToRemove.getActions().toString(),
- 0);
+ return Status(
+ ErrorCodes::PrivilegeNotFound,
+ mongoutils::str::stream() << "Role: " << role.getFullName()
+ << " does not contain a privilege on "
+ << privilegeToRemove.getResourcePattern().toString()
+ << " with actions: "
+ << privilegeToRemove.getActions().toString(),
+ 0);
}
curPrivilege.removeActions(privilegeToRemove.getActions());
@@ -389,8 +390,8 @@ Status RoleGraph::removeAllPrivilegesFromRole(const RoleName& role) {
}
if (isBuiltinRole(role)) {
return Status(ErrorCodes::InvalidRoleModification,
- mongoutils::str::stream()
- << "Cannot remove privileges from built-in role: " << role.getFullName());
+ mongoutils::str::stream() << "Cannot remove privileges from built-in role: "
+ << role.getFullName());
}
_directPrivilegesForRole[role].clear();
return Status::OK();
diff --git a/src/mongo/db/auth/role_graph_update.cpp b/src/mongo/db/auth/role_graph_update.cpp
index f9dd3b10efe..21be828753c 100644
--- a/src/mongo/db/auth/role_graph_update.cpp
+++ b/src/mongo/db/auth/role_graph_update.cpp
@@ -87,7 +87,9 @@ Status checkIdMatchesRoleName(const BSONElement& idElement, const RoleName& role
return Status(ErrorCodes::FailedToParse,
mongoutils::str::stream()
<< "Role document _id fields must be encoded as the string "
- "dbname.rolename. Found " << idField << " for "
+ "dbname.rolename. Found "
+ << idField
+ << " for "
<< roleName.getFullName());
}
return Status::OK();
diff --git a/src/mongo/db/auth/sasl_authentication_session.cpp b/src/mongo/db/auth/sasl_authentication_session.cpp
index c74bba6fadb..c64e4be8100 100644
--- a/src/mongo/db/auth/sasl_authentication_session.cpp
+++ b/src/mongo/db/auth/sasl_authentication_session.cpp
@@ -36,12 +36,12 @@
#include "mongo/base/string_data.h"
#include "mongo/bson/util/bson_extract.h"
#include "mongo/client/sasl_client_authenticate.h"
-#include "mongo/db/commands.h"
#include "mongo/db/auth/authorization_manager.h"
#include "mongo/db/auth/authorization_manager_global.h"
#include "mongo/db/auth/authorization_session.h"
#include "mongo/db/auth/authz_manager_external_state_mock.h"
#include "mongo/db/auth/authz_session_external_state_mock.h"
+#include "mongo/db/commands.h"
#include "mongo/util/assert_util.h"
#include "mongo/util/mongoutils/str.h"
diff --git a/src/mongo/db/auth/sasl_options.cpp b/src/mongo/db/auth/sasl_options.cpp
index 69bfb504e83..fdb64f044e2 100644
--- a/src/mongo/db/auth/sasl_options.cpp
+++ b/src/mongo/db/auth/sasl_options.cpp
@@ -60,27 +60,31 @@ SASLGlobalParams::SASLGlobalParams() {
Status addSASLOptions(moe::OptionSection* options) {
moe::OptionSection saslOptions("SASL Options");
- saslOptions.addOptionChaining("security.authenticationMechanisms",
- "",
- moe::StringVector,
- "List of supported authentication mechanisms. "
- "Default is MONGODB-CR, SCRAM-SHA-1 and MONGODB-X509.")
+ saslOptions
+ .addOptionChaining("security.authenticationMechanisms",
+ "",
+ moe::StringVector,
+ "List of supported authentication mechanisms. "
+ "Default is MONGODB-CR, SCRAM-SHA-1 and MONGODB-X509.")
.setSources(moe::SourceYAMLConfig);
- saslOptions.addOptionChaining(
- "security.sasl.hostName", "", moe::String, "Fully qualified server domain name")
+ saslOptions
+ .addOptionChaining(
+ "security.sasl.hostName", "", moe::String, "Fully qualified server domain name")
.setSources(moe::SourceYAMLConfig);
- saslOptions.addOptionChaining("security.sasl.serviceName",
- "",
- moe::String,
- "Registered name of the service using SASL")
+ saslOptions
+ .addOptionChaining("security.sasl.serviceName",
+ "",
+ moe::String,
+ "Registered name of the service using SASL")
.setSources(moe::SourceYAMLConfig);
- saslOptions.addOptionChaining("security.sasl.saslauthdSocketPath",
- "",
- moe::String,
- "Path to Unix domain socket file for saslauthd")
+ saslOptions
+ .addOptionChaining("security.sasl.saslauthdSocketPath",
+ "",
+ moe::String,
+ "Path to Unix domain socket file for saslauthd")
.setSources(moe::SourceYAMLConfig);
Status ret = options->addSection(saslOptions);
@@ -178,11 +182,11 @@ public:
virtual Status validate(const int& newValue) {
if (newValue < minimumScramIterationCount) {
- return Status(ErrorCodes::BadValue,
- mongoutils::str::stream()
- << "Invalid value for SCRAM iteration count: " << newValue
- << " is less than the minimum SCRAM iteration count, "
- << minimumScramIterationCount);
+ return Status(
+ ErrorCodes::BadValue,
+ mongoutils::str::stream() << "Invalid value for SCRAM iteration count: " << newValue
+ << " is less than the minimum SCRAM iteration count, "
+ << minimumScramIterationCount);
}
return Status::OK();
diff --git a/src/mongo/db/auth/sasl_scramsha1_server_conversation.cpp b/src/mongo/db/auth/sasl_scramsha1_server_conversation.cpp
index 9fd8496b7bc..ed812ddb27f 100644
--- a/src/mongo/db/auth/sasl_scramsha1_server_conversation.cpp
+++ b/src/mongo/db/auth/sasl_scramsha1_server_conversation.cpp
@@ -61,9 +61,9 @@ StatusWith<bool> SaslSCRAMSHA1ServerConversation::step(StringData inputData,
_step++;
if (_step > 3 || _step <= 0) {
- return StatusWith<bool>(ErrorCodes::AuthenticationFailed,
- mongoutils::str::stream()
- << "Invalid SCRAM-SHA-1 authentication step: " << _step);
+ return StatusWith<bool>(
+ ErrorCodes::AuthenticationFailed,
+ mongoutils::str::stream() << "Invalid SCRAM-SHA-1 authentication step: " << _step);
}
if (_step == 1) {
return _firstStep(input, outputData);
@@ -109,8 +109,8 @@ StatusWith<bool> SaslSCRAMSHA1ServerConversation::_firstStep(std::vector<string>
*/
if (!str::startsWith(input[1], "a=") || input[1].size() < 3) {
return StatusWith<bool>(ErrorCodes::BadValue,
- mongoutils::str::stream()
- << "Incorrect SCRAM-SHA-1 authzid: " << input[1]);
+ mongoutils::str::stream() << "Incorrect SCRAM-SHA-1 authzid: "
+ << input[1]);
}
authzId = input[1].substr(2);
input.erase(input.begin() + 1);
@@ -121,26 +121,29 @@ StatusWith<bool> SaslSCRAMSHA1ServerConversation::_firstStep(std::vector<string>
ErrorCodes::BadValue,
mongoutils::str::stream()
<< "Incorrect number of arguments for first SCRAM-SHA-1 client message, got "
- << input.size() << " expected 4");
+ << input.size()
+ << " expected 4");
} else if (input[0] != "n") {
return StatusWith<bool>(ErrorCodes::BadValue,
mongoutils::str::stream()
- << "Incorrect SCRAM-SHA-1 client message prefix: " << input[0]);
+ << "Incorrect SCRAM-SHA-1 client message prefix: "
+ << input[0]);
} else if (!str::startsWith(input[1], "n=") || input[1].size() < 3) {
return StatusWith<bool>(ErrorCodes::BadValue,
- mongoutils::str::stream()
- << "Incorrect SCRAM-SHA-1 user name: " << input[1]);
+ mongoutils::str::stream() << "Incorrect SCRAM-SHA-1 user name: "
+ << input[1]);
} else if (!str::startsWith(input[2], "r=") || input[2].size() < 6) {
return StatusWith<bool>(ErrorCodes::BadValue,
- mongoutils::str::stream()
- << "Incorrect SCRAM-SHA-1 client nonce: " << input[2]);
+ mongoutils::str::stream() << "Incorrect SCRAM-SHA-1 client nonce: "
+ << input[2]);
}
_user = input[1].substr(2);
if (!authzId.empty() && _user != authzId) {
return StatusWith<bool>(ErrorCodes::BadValue,
mongoutils::str::stream() << "SCRAM-SHA-1 user name " << _user
- << " does not match authzid " << authzId);
+ << " does not match authzid "
+ << authzId);
}
decodeSCRAMUsername(_user);
@@ -237,19 +240,20 @@ StatusWith<bool> SaslSCRAMSHA1ServerConversation::_secondStep(const std::vector<
ErrorCodes::BadValue,
mongoutils::str::stream()
<< "Incorrect number of arguments for second SCRAM-SHA-1 client message, got "
- << input.size() << " expected 3");
+ << input.size()
+ << " expected 3");
} else if (!str::startsWith(input[0], "c=") || input[0].size() < 3) {
- return StatusWith<bool>(ErrorCodes::BadValue,
- mongoutils::str::stream()
- << "Incorrect SCRAM-SHA-1 channel binding: " << input[0]);
+ return StatusWith<bool>(
+ ErrorCodes::BadValue,
+ mongoutils::str::stream() << "Incorrect SCRAM-SHA-1 channel binding: " << input[0]);
} else if (!str::startsWith(input[1], "r=") || input[1].size() < 6) {
- return StatusWith<bool>(ErrorCodes::BadValue,
- mongoutils::str::stream()
- << "Incorrect SCRAM-SHA-1 client|server nonce: " << input[1]);
+ return StatusWith<bool>(
+ ErrorCodes::BadValue,
+ mongoutils::str::stream() << "Incorrect SCRAM-SHA-1 client|server nonce: " << input[1]);
} else if (!str::startsWith(input[2], "p=") || input[2].size() < 3) {
return StatusWith<bool>(ErrorCodes::BadValue,
- mongoutils::str::stream()
- << "Incorrect SCRAM-SHA-1 ClientProof: " << input[2]);
+ mongoutils::str::stream() << "Incorrect SCRAM-SHA-1 ClientProof: "
+ << input[2]);
}
// add client-final-message-without-proof to authMessage
@@ -262,7 +266,9 @@ StatusWith<bool> SaslSCRAMSHA1ServerConversation::_secondStep(const std::vector<
ErrorCodes::BadValue,
mongoutils::str::stream()
<< "Unmatched SCRAM-SHA-1 nonce received from client in second step, expected "
- << _nonce << " but received " << nonce);
+ << _nonce
+ << " but received "
+ << nonce);
}
std::string clientProof = input[2].substr(2);
diff --git a/src/mongo/db/auth/security_file.cpp b/src/mongo/db/auth/security_file.cpp
index fd31a13a6f3..2538259bcae 100644
--- a/src/mongo/db/auth/security_file.cpp
+++ b/src/mongo/db/auth/security_file.cpp
@@ -32,8 +32,8 @@
#include "mongo/db/auth/security_key.h"
-#include <sys/stat.h>
#include <string>
+#include <sys/stat.h>
#include "mongo/base/status_with.h"
#include "mongo/util/mongoutils/str.h"
@@ -92,9 +92,9 @@ StatusWith<std::string> readSecurityFile(const std::string& filename) {
if ((buf < 'A' || buf > 'Z') && (buf < 'a' || buf > 'z') && (buf < '0' || buf > '9') &&
buf != '+' && buf != '/' && buf != '=') {
fclose(file);
- return StatusWith<std::string>(ErrorCodes::UnsupportedFormat,
- str::stream() << "invalid char in key file " << filename
- << ": " << buf);
+ return StatusWith<std::string>(
+ ErrorCodes::UnsupportedFormat,
+ str::stream() << "invalid char in key file " << filename << ": " << buf);
}
str += buf;
diff --git a/src/mongo/db/auth/security_key.cpp b/src/mongo/db/auth/security_key.cpp
index a8e5611e1c0..97e7076c447 100644
--- a/src/mongo/db/auth/security_key.cpp
+++ b/src/mongo/db/auth/security_key.cpp
@@ -32,8 +32,8 @@
#include "mongo/db/auth/security_key.h"
-#include <sys/stat.h>
#include <string>
+#include <sys/stat.h>
#include <vector>
#include "mongo/base/status_with.h"
@@ -89,11 +89,14 @@ bool setUpSecurityKey(const string& filename) {
if (clusterAuthMode == ServerGlobalParams::ClusterAuthMode_keyFile ||
clusterAuthMode == ServerGlobalParams::ClusterAuthMode_sendKeyFile) {
setInternalUserAuthParams(
- BSON(saslCommandMechanismFieldName
- << "SCRAM-SHA-1" << saslCommandUserDBFieldName
- << internalSecurity.user->getName().getDB() << saslCommandUserFieldName
- << internalSecurity.user->getName().getUser() << saslCommandPasswordFieldName
- << credentials.password << saslCommandDigestPasswordFieldName << false));
+ BSON(saslCommandMechanismFieldName << "SCRAM-SHA-1" << saslCommandUserDBFieldName
+ << internalSecurity.user->getName().getDB()
+ << saslCommandUserFieldName
+ << internalSecurity.user->getName().getUser()
+ << saslCommandPasswordFieldName
+ << credentials.password
+ << saslCommandDigestPasswordFieldName
+ << false));
}
return true;
diff --git a/src/mongo/db/auth/user_cache_invalidator_job.cpp b/src/mongo/db/auth/user_cache_invalidator_job.cpp
index ef64311cbe6..5f58d956c5c 100644
--- a/src/mongo/db/auth/user_cache_invalidator_job.cpp
+++ b/src/mongo/db/auth/user_cache_invalidator_job.cpp
@@ -130,7 +130,8 @@ void UserCacheInvalidator::initialize(OperationContext* txn) {
"running an outdated version of mongod on the config servers";
} else {
warning() << "An error occurred while fetching initial user cache generation from "
- "config servers: " << currentGeneration.getStatus();
+ "config servers: "
+ << currentGeneration.getStatus();
}
_previousCacheGeneration = OID();
}
@@ -162,7 +163,8 @@ void UserCacheInvalidator::run() {
if (currentGeneration.getStatus().code() == ErrorCodes::CommandNotFound) {
warning() << "_getUserCacheGeneration command not found on config server(s), "
"this most likely means you are running an outdated version of mongod "
- "on the config servers" << std::endl;
+ "on the config servers"
+ << std::endl;
} else {
warning() << "An error occurred while fetching current user cache generation "
"to check if user cache needs invalidation: "
diff --git a/src/mongo/db/auth/user_document_parser_test.cpp b/src/mongo/db/auth/user_document_parser_test.cpp
index c3a1e0a490f..273eaff86f5 100644
--- a/src/mongo/db/auth/user_document_parser_test.cpp
+++ b/src/mongo/db/auth/user_document_parser_test.cpp
@@ -74,7 +74,8 @@ TEST_F(V1UserDocumentParsing, testParsingV0UserDocuments) {
<< "spencer"
<< "pwd"
<< "passwordHash"
- << "readOnly" << true);
+ << "readOnly"
+ << true);
BSONObj readWriteAdmin = BSON("user"
<< "admin"
<< "pwd"
@@ -83,7 +84,8 @@ TEST_F(V1UserDocumentParsing, testParsingV0UserDocuments) {
<< "admin"
<< "pwd"
<< "passwordHash"
- << "readOnly" << true);
+ << "readOnly"
+ << true);
ASSERT_OK(v1parser.initializeUserRolesFromUserDocument(user.get(), readOnly, "test"));
RoleNameIterator roles = user->getRoles();
@@ -124,15 +126,15 @@ TEST_F(V1UserDocumentParsing, VerifyRolesFieldMustBeAnArray) {
}
TEST_F(V1UserDocumentParsing, VerifySemanticallyInvalidRolesStillParse) {
- ASSERT_OK(
- v1parser.initializeUserRolesFromUserDocument(user.get(),
- BSON("user"
- << "spencer"
- << "pwd"
- << ""
- << "roles" << BSON_ARRAY("read"
- << "frim")),
- "test"));
+ ASSERT_OK(v1parser.initializeUserRolesFromUserDocument(user.get(),
+ BSON("user"
+ << "spencer"
+ << "pwd"
+ << ""
+ << "roles"
+ << BSON_ARRAY("read"
+ << "frim")),
+ "test"));
RoleNameIterator roles = user->getRoles();
RoleName role = roles.next();
if (role == RoleName("read", "test")) {
@@ -145,26 +147,28 @@ TEST_F(V1UserDocumentParsing, VerifySemanticallyInvalidRolesStillParse) {
}
TEST_F(V1UserDocumentParsing, VerifyOtherDBRolesMustBeAnObjectOfArraysOfStrings) {
- ASSERT_NOT_OK(
- v1parser.initializeUserRolesFromUserDocument(adminUser.get(),
- BSON("user"
- << "admin"
- << "pwd"
- << ""
- << "roles" << BSON_ARRAY("read")
- << "otherDBRoles" << BSON_ARRAY("read")),
- "admin"));
+ ASSERT_NOT_OK(v1parser.initializeUserRolesFromUserDocument(adminUser.get(),
+ BSON("user"
+ << "admin"
+ << "pwd"
+ << ""
+ << "roles"
+ << BSON_ARRAY("read")
+ << "otherDBRoles"
+ << BSON_ARRAY("read")),
+ "admin"));
- ASSERT_NOT_OK(
- v1parser.initializeUserRolesFromUserDocument(adminUser.get(),
- BSON("user"
- << "admin"
- << "pwd"
- << ""
- << "roles" << BSON_ARRAY("read")
- << "otherDBRoles" << BSON("test2"
- << "read")),
- "admin"));
+ ASSERT_NOT_OK(v1parser.initializeUserRolesFromUserDocument(adminUser.get(),
+ BSON("user"
+ << "admin"
+ << "pwd"
+ << ""
+ << "roles"
+ << BSON_ARRAY("read")
+ << "otherDBRoles"
+ << BSON("test2"
+ << "read")),
+ "admin"));
}
TEST_F(V1UserDocumentParsing, VerifyCannotGrantPrivilegesOnOtherDatabasesNormally) {
@@ -175,7 +179,8 @@ TEST_F(V1UserDocumentParsing, VerifyCannotGrantPrivilegesOnOtherDatabasesNormall
<< "spencer"
<< "pwd"
<< ""
- << "roles" << BSONArrayBuilder().arr()
+ << "roles"
+ << BSONArrayBuilder().arr()
<< "otherDBRoles"
<< BSON("test2" << BSON_ARRAY("read"))),
"test"));
@@ -184,15 +189,17 @@ TEST_F(V1UserDocumentParsing, VerifyCannotGrantPrivilegesOnOtherDatabasesNormall
TEST_F(V1UserDocumentParsing, GrantUserAdminOnTestViaAdmin) {
// Grant userAdmin on test via admin.
- ASSERT_OK(v1parser.initializeUserRolesFromUserDocument(
- adminUser.get(),
- BSON("user"
- << "admin"
- << "pwd"
- << ""
- << "roles" << BSONArrayBuilder().arr() << "otherDBRoles"
- << BSON("test" << BSON_ARRAY("userAdmin"))),
- "admin"));
+ ASSERT_OK(v1parser.initializeUserRolesFromUserDocument(adminUser.get(),
+ BSON("user"
+ << "admin"
+ << "pwd"
+ << ""
+ << "roles"
+ << BSONArrayBuilder().arr()
+ << "otherDBRoles"
+ << BSON("test" << BSON_ARRAY(
+ "userAdmin"))),
+ "admin"));
RoleNameIterator roles = adminUser->getRoles();
ASSERT_EQUALS(RoleName("userAdmin", "test"), roles.next());
ASSERT_FALSE(roles.more());
@@ -200,15 +207,16 @@ TEST_F(V1UserDocumentParsing, GrantUserAdminOnTestViaAdmin) {
TEST_F(V1UserDocumentParsing, MixedV0V1UserDocumentsAreInvalid) {
// Try to mix fields from V0 and V1 user documents and make sure it fails.
- ASSERT_NOT_OK(
- v1parser.initializeUserRolesFromUserDocument(user.get(),
- BSON("user"
- << "spencer"
- << "pwd"
- << "passwordHash"
- << "readOnly" << false << "roles"
- << BSON_ARRAY("read")),
- "test"));
+ ASSERT_NOT_OK(v1parser.initializeUserRolesFromUserDocument(user.get(),
+ BSON("user"
+ << "spencer"
+ << "pwd"
+ << "passwordHash"
+ << "readOnly"
+ << false
+ << "roles"
+ << BSON_ARRAY("read")),
+ "test"));
ASSERT_FALSE(user->getRoles().more());
}
@@ -235,20 +243,25 @@ TEST_F(V2UserDocumentParsing, V2DocumentValidation) {
<< "spencer"
<< "pwd"
<< "a"
- << "roles" << BSON_ARRAY("read"))));
+ << "roles"
+ << BSON_ARRAY("read"))));
// Need name field
ASSERT_NOT_OK(v2parser.checkValidUserDocument(BSON("db"
<< "test"
- << "credentials" << BSON("MONGODB-CR"
- << "a") << "roles"
+ << "credentials"
+ << BSON("MONGODB-CR"
+ << "a")
+ << "roles"
<< emptyArray)));
// Need source field
ASSERT_NOT_OK(v2parser.checkValidUserDocument(BSON("user"
<< "spencer"
- << "credentials" << BSON("MONGODB-CR"
- << "a") << "roles"
+ << "credentials"
+ << BSON("MONGODB-CR"
+ << "a")
+ << "roles"
<< emptyArray)));
// Need credentials field
@@ -256,23 +269,27 @@ TEST_F(V2UserDocumentParsing, V2DocumentValidation) {
<< "spencer"
<< "db"
<< "test"
- << "roles" << emptyArray)));
+ << "roles"
+ << emptyArray)));
// Need roles field
ASSERT_NOT_OK(v2parser.checkValidUserDocument(BSON("user"
<< "spencer"
<< "db"
<< "test"
- << "credentials" << BSON("MONGODB-CR"
- << "a"))));
+ << "credentials"
+ << BSON("MONGODB-CR"
+ << "a"))));
// Empty roles arrays are OK
ASSERT_OK(v2parser.checkValidUserDocument(BSON("user"
<< "spencer"
<< "db"
<< "test"
- << "credentials" << BSON("MONGODB-CR"
- << "a") << "roles"
+ << "credentials"
+ << BSON("MONGODB-CR"
+ << "a")
+ << "roles"
<< emptyArray)));
// Need credentials of {external: true} if user's db is $external
@@ -280,16 +297,20 @@ TEST_F(V2UserDocumentParsing, V2DocumentValidation) {
<< "spencer"
<< "db"
<< "$external"
- << "credentials" << BSON("external" << true)
- << "roles" << emptyArray)));
+ << "credentials"
+ << BSON("external" << true)
+ << "roles"
+ << emptyArray)));
// Roles must be objects
ASSERT_NOT_OK(v2parser.checkValidUserDocument(BSON("user"
<< "spencer"
<< "db"
<< "test"
- << "credentials" << BSON("MONGODB-CR"
- << "a") << "roles"
+ << "credentials"
+ << BSON("MONGODB-CR"
+ << "a")
+ << "roles"
<< BSON_ARRAY("read"))));
// Role needs name
@@ -297,8 +318,10 @@ TEST_F(V2UserDocumentParsing, V2DocumentValidation) {
<< "spencer"
<< "db"
<< "test"
- << "credentials" << BSON("MONGODB-CR"
- << "a") << "roles"
+ << "credentials"
+ << BSON("MONGODB-CR"
+ << "a")
+ << "roles"
<< BSON_ARRAY(BSON("db"
<< "dbA")))));
@@ -307,8 +330,10 @@ TEST_F(V2UserDocumentParsing, V2DocumentValidation) {
<< "spencer"
<< "db"
<< "test"
- << "credentials" << BSON("MONGODB-CR"
- << "a") << "roles"
+ << "credentials"
+ << BSON("MONGODB-CR"
+ << "a")
+ << "roles"
<< BSON_ARRAY(BSON("role"
<< "roleA")))));
@@ -318,8 +343,10 @@ TEST_F(V2UserDocumentParsing, V2DocumentValidation) {
<< "spencer"
<< "db"
<< "test"
- << "credentials" << BSON("MONGODB-CR"
- << "a") << "roles"
+ << "credentials"
+ << BSON("MONGODB-CR"
+ << "a")
+ << "roles"
<< BSON_ARRAY(BSON("role"
<< "roleA"
<< "db"
@@ -330,8 +357,10 @@ TEST_F(V2UserDocumentParsing, V2DocumentValidation) {
<< "spencer"
<< "db"
<< "test"
- << "credentials" << BSON("MONGODB-CR"
- << "a") << "roles"
+ << "credentials"
+ << BSON("MONGODB-CR"
+ << "a")
+ << "roles"
<< BSON_ARRAY(BSON("role"
<< "roleA"
<< "db"
@@ -346,10 +375,13 @@ TEST_F(V2UserDocumentParsing, V2DocumentValidation) {
<< "spencer"
<< "db"
<< "test"
- << "credentials" << BSON("MONGODB-CR"
- << "a") << "extraData"
+ << "credentials"
+ << BSON("MONGODB-CR"
+ << "a")
+ << "extraData"
<< BSON("foo"
- << "bar") << "roles"
+ << "bar")
+ << "roles"
<< BSON_ARRAY(BSON("role"
<< "roleA"
<< "db"
@@ -424,44 +456,45 @@ TEST_F(V2UserDocumentParsing, V2RoleExtraction) {
user.get()));
// V1-style roles arrays no longer work
- ASSERT_NOT_OK(
- v2parser.initializeUserRolesFromUserDocument(BSON("user"
- << "spencer"
- << "roles" << BSON_ARRAY("read")),
- user.get()));
+ ASSERT_NOT_OK(v2parser.initializeUserRolesFromUserDocument(BSON("user"
+ << "spencer"
+ << "roles"
+ << BSON_ARRAY("read")),
+ user.get()));
// Roles must have "db" field
- ASSERT_NOT_OK(
- v2parser.initializeUserRolesFromUserDocument(BSON("user"
- << "spencer"
- << "roles" << BSON_ARRAY(BSONObj())),
- user.get()));
-
ASSERT_NOT_OK(v2parser.initializeUserRolesFromUserDocument(BSON("user"
<< "spencer"
- << "roles" << BSON_ARRAY(BSON(
- "role"
- << "roleA"))),
+ << "roles"
+ << BSON_ARRAY(BSONObj())),
user.get()));
ASSERT_NOT_OK(
v2parser.initializeUserRolesFromUserDocument(BSON("user"
<< "spencer"
- << "roles" << BSON_ARRAY(BSON("user"
- << "roleA"
- << "db"
- << "dbA"))),
+ << "roles"
+ << BSON_ARRAY(BSON("role"
+ << "roleA"))),
user.get()));
+ ASSERT_NOT_OK(v2parser.initializeUserRolesFromUserDocument(BSON("user"
+ << "spencer"
+ << "roles"
+ << BSON_ARRAY(BSON("user"
+ << "roleA"
+ << "db"
+ << "dbA"))),
+ user.get()));
+
// Valid role names are extracted successfully
- ASSERT_OK(
- v2parser.initializeUserRolesFromUserDocument(BSON("user"
- << "spencer"
- << "roles" << BSON_ARRAY(BSON("role"
- << "roleA"
- << "db"
- << "dbA"))),
- user.get()));
+ ASSERT_OK(v2parser.initializeUserRolesFromUserDocument(BSON("user"
+ << "spencer"
+ << "roles"
+ << BSON_ARRAY(BSON("role"
+ << "roleA"
+ << "db"
+ << "dbA"))),
+ user.get()));
RoleNameIterator roles = user->getRoles();
ASSERT_EQUALS(RoleName("roleA", "dbA"), roles.next());
ASSERT_FALSE(roles.more());
diff --git a/src/mongo/db/auth/user_management_commands_parser.cpp b/src/mongo/db/auth/user_management_commands_parser.cpp
index 6707a70b8ca..db6a2f96f9a 100644
--- a/src/mongo/db/auth/user_management_commands_parser.cpp
+++ b/src/mongo/db/auth/user_management_commands_parser.cpp
@@ -684,8 +684,11 @@ Status parseAuthSchemaUpgradeCommand(const BSONObj& cmdObj,
if (steps < minUpgradeSteps || steps > maxUpgradeSteps) {
return Status(ErrorCodes::BadValue,
mongoutils::str::stream() << "Legal values for \"maxSteps\" are at least "
- << minUpgradeSteps << " and no more than "
- << maxUpgradeSteps << "; found " << steps);
+ << minUpgradeSteps
+ << " and no more than "
+ << maxUpgradeSteps
+ << "; found "
+ << steps);
}
parsedArgs->maxSteps = static_cast<int>(steps);
diff --git a/src/mongo/db/auth/user_management_commands_parser.h b/src/mongo/db/auth/user_management_commands_parser.h
index ff65eca69e4..94dc3b7b2ae 100644
--- a/src/mongo/db/auth/user_management_commands_parser.h
+++ b/src/mongo/db/auth/user_management_commands_parser.h
@@ -31,9 +31,9 @@
#include <string>
#include <vector>
+#include "mongo/base/disallow_copying.h"
#include "mongo/base/status.h"
#include "mongo/base/string_data.h"
-#include "mongo/base/disallow_copying.h"
#include "mongo/db/auth/privilege.h"
#include "mongo/db/auth/role_name.h"
#include "mongo/db/auth/user.h"
diff --git a/src/mongo/db/background.cpp b/src/mongo/db/background.cpp
index e3869b1bd6b..18af0509631 100644
--- a/src/mongo/db/background.cpp
+++ b/src/mongo/db/background.cpp
@@ -133,7 +133,8 @@ void BackgroundOperation::assertNoBgOpInProgForDb(StringData db) {
uassert(ErrorCodes::BackgroundOperationInProgressForDatabase,
mongoutils::str::stream()
<< "cannot perform operation: a background operation is currently running for "
- "database " << db,
+ "database "
+ << db,
!inProgForDb(db));
}
@@ -141,7 +142,8 @@ void BackgroundOperation::assertNoBgOpInProgForNs(StringData ns) {
uassert(ErrorCodes::BackgroundOperationInProgressForNamespace,
mongoutils::str::stream()
<< "cannot perform operation: a background operation is currently running for "
- "collection " << ns,
+ "collection "
+ << ns,
!inProgForNs(ns));
}
diff --git a/src/mongo/db/background.h b/src/mongo/db/background.h
index b510c165a96..f8cad335a34 100644
--- a/src/mongo/db/background.h
+++ b/src/mongo/db/background.h
@@ -33,9 +33,9 @@
#pragma once
+#include <iosfwd>
#include <map>
#include <set>
-#include <iosfwd>
#include "mongo/base/disallow_copying.h"
#include "mongo/base/string_data.h"
diff --git a/src/mongo/db/catalog/capped_utils.cpp b/src/mongo/db/catalog/capped_utils.cpp
index 2883cb26439..c18a7b0975f 100644
--- a/src/mongo/db/catalog/capped_utils.cpp
+++ b/src/mongo/db/catalog/capped_utils.cpp
@@ -38,8 +38,8 @@
#include "mongo/db/catalog/database.h"
#include "mongo/db/catalog/document_validation.h"
#include "mongo/db/catalog/index_catalog.h"
-#include "mongo/db/concurrency/write_conflict_exception.h"
#include "mongo/db/client.h"
+#include "mongo/db/concurrency/write_conflict_exception.h"
#include "mongo/db/curop.h"
#include "mongo/db/db_raii.h"
#include "mongo/db/index_builder.h"
diff --git a/src/mongo/db/catalog/coll_mod.cpp b/src/mongo/db/catalog/coll_mod.cpp
index 0f2304a6f49..b73b732b723 100644
--- a/src/mongo/db/catalog/coll_mod.cpp
+++ b/src/mongo/db/catalog/coll_mod.cpp
@@ -105,9 +105,9 @@ Status collMod(OperationContext* txn,
const IndexDescriptor* idx =
coll->getIndexCatalog()->findIndexByKeyPattern(txn, keyPattern);
if (idx == NULL) {
- errorStatus = Status(ErrorCodes::InvalidOptions,
- str::stream() << "cannot find index " << keyPattern
- << " for ns " << nss.ns());
+ errorStatus = Status(
+ ErrorCodes::InvalidOptions,
+ str::stream() << "cannot find index " << keyPattern << " for ns " << nss.ns());
continue;
}
BSONElement oldExpireSecs = idx->infoObj().getField("expireAfterSeconds");
diff --git a/src/mongo/db/catalog/collection.cpp b/src/mongo/db/catalog/collection.cpp
index 883bf5deb17..02bb9bbaa1d 100644
--- a/src/mongo/db/catalog/collection.cpp
+++ b/src/mongo/db/catalog/collection.cpp
@@ -306,7 +306,9 @@ StatusWithMatchExpression Collection::parseValidator(const BSONObj& validator) c
if (ns().isOnInternalDb()) {
return {ErrorCodes::InvalidOptions,
str::stream() << "Document validators are not allowed on collections in"
- << " the " << ns().db() << " database"};
+ << " the "
+ << ns().db()
+ << " database"};
}
{
@@ -358,7 +360,8 @@ Status Collection::insertDocuments(OperationContext* txn,
if (hasIdIndex && (*it)["_id"].eoo()) {
return Status(ErrorCodes::InternalError,
str::stream() << "Collection::insertDocument got "
- "document without _id for ns:" << _ns.ns());
+ "document without _id for ns:"
+ << _ns.ns());
}
auto status = checkValidation(txn, *it);
@@ -600,7 +603,9 @@ StatusWith<RecordId> Collection::updateDocument(OperationContext* txn,
if (_recordStore->isCapped() && oldSize != newDoc.objsize())
return {ErrorCodes::CannotGrowDocumentInCappedNamespace,
str::stream() << "Cannot change the size of a document in a capped collection: "
- << oldSize << " != " << newDoc.objsize()};
+ << oldSize
+ << " != "
+ << newDoc.objsize()};
// At the end of this step, we will have a map of UpdateTickets, one per index, which
// represent the index updates needed to be done, based on the changes between oldDoc and
diff --git a/src/mongo/db/catalog/collection_compact.cpp b/src/mongo/db/catalog/collection_compact.cpp
index b9ea06a2aef..b8fbffe2f69 100644
--- a/src/mongo/db/catalog/collection_compact.cpp
+++ b/src/mongo/db/catalog/collection_compact.cpp
@@ -34,13 +34,13 @@
#include "mongo/base/counter.h"
#include "mongo/base/owned_pointer_map.h"
+#include "mongo/db/catalog/database.h"
+#include "mongo/db/catalog/document_validation.h"
#include "mongo/db/catalog/index_create.h"
+#include "mongo/db/catalog/index_key_validate.h"
#include "mongo/db/clientcursor.h"
#include "mongo/db/commands/server_status.h"
#include "mongo/db/curop.h"
-#include "mongo/db/catalog/database.h"
-#include "mongo/db/catalog/document_validation.h"
-#include "mongo/db/catalog/index_key_validate.h"
#include "mongo/db/index/index_access_method.h"
#include "mongo/db/operation_context.h"
#include "mongo/util/log.h"
@@ -150,7 +150,9 @@ StatusWith<CompactStats> Collection::compact(OperationContext* txn,
return StatusWith<CompactStats>(
ErrorCodes::CannotCreateIndex,
str::stream() << "Cannot compact collection due to invalid index " << spec
- << ": " << keyStatus.reason() << " For more info see"
+ << ": "
+ << keyStatus.reason()
+ << " For more info see"
<< " http://dochub.mongodb.org/core/index-validation");
}
indexSpecs.push_back(spec);
diff --git a/src/mongo/db/catalog/collection_options_test.cpp b/src/mongo/db/catalog/collection_options_test.cpp
index 4e773f1b027..25a03c16270 100644
--- a/src/mongo/db/catalog/collection_options_test.cpp
+++ b/src/mongo/db/catalog/collection_options_test.cpp
@@ -131,9 +131,9 @@ TEST(CollectionOptions, InvalidStorageEngineField) {
TEST(CollectionOptions, ParseEngineField) {
CollectionOptions opts;
- ASSERT_OK(opts.parse(fromjson(
- "{unknownField: 1, "
- "storageEngine: {storageEngine1: {x: 1, y: 2}, storageEngine2: {a: 1, b:2}}}")));
+ ASSERT_OK(opts.parse(
+ fromjson("{unknownField: 1, "
+ "storageEngine: {storageEngine1: {x: 1, y: 2}, storageEngine2: {a: 1, b:2}}}")));
checkRoundTrip(opts);
// Unrecognized field should not be present in BSON representation.
diff --git a/src/mongo/db/catalog/cursor_manager.cpp b/src/mongo/db/catalog/cursor_manager.cpp
index b93c0bb4332..14a7042a13c 100644
--- a/src/mongo/db/catalog/cursor_manager.cpp
+++ b/src/mongo/db/catalog/cursor_manager.cpp
@@ -40,9 +40,9 @@
#include "mongo/db/catalog/database_holder.h"
#include "mongo/db/client.h"
#include "mongo/db/db_raii.h"
-#include "mongo/db/service_context.h"
#include "mongo/db/operation_context.h"
#include "mongo/db/query/plan_executor.h"
+#include "mongo/db/service_context.h"
#include "mongo/platform/random.h"
#include "mongo/util/exit.h"
#include "mongo/util/startup_test.h"
diff --git a/src/mongo/db/catalog/database.cpp b/src/mongo/db/catalog/database.cpp
index 080d37e0eb8..26eb99c0a9c 100644
--- a/src/mongo/db/catalog/database.cpp
+++ b/src/mongo/db/catalog/database.cpp
@@ -500,7 +500,9 @@ Collection* Database::createCollection(OperationContext* txn,
// This check only applies for actual collections, not indexes or other types of ns.
uassert(17381,
str::stream() << "fully qualified namespace " << ns << " is too long "
- << "(max is " << NamespaceString::MaxNsCollectionLen << " bytes)",
+ << "(max is "
+ << NamespaceString::MaxNsCollectionLen
+ << " bytes)",
ns.size() <= NamespaceString::MaxNsCollectionLen);
}
diff --git a/src/mongo/db/catalog/database_holder.cpp b/src/mongo/db/catalog/database_holder.cpp
index 7ba373fd362..cf27d3da9a7 100644
--- a/src/mongo/db/catalog/database_holder.cpp
+++ b/src/mongo/db/catalog/database_holder.cpp
@@ -35,12 +35,12 @@
#include "mongo/db/audit.h"
#include "mongo/db/auth/auth_index_d.h"
#include "mongo/db/background.h"
-#include "mongo/db/client.h"
-#include "mongo/db/clientcursor.h"
#include "mongo/db/catalog/database.h"
#include "mongo/db/catalog/database_catalog_entry.h"
-#include "mongo/db/service_context.h"
+#include "mongo/db/client.h"
+#include "mongo/db/clientcursor.h"
#include "mongo/db/operation_context.h"
+#include "mongo/db/service_context.h"
#include "mongo/db/storage/storage_engine.h"
#include "mongo/util/log.h"
diff --git a/src/mongo/db/catalog/drop_indexes.cpp b/src/mongo/db/catalog/drop_indexes.cpp
index 85430ff6f10..7d6387e4ae8 100644
--- a/src/mongo/db/catalog/drop_indexes.cpp
+++ b/src/mongo/db/catalog/drop_indexes.cpp
@@ -107,8 +107,8 @@ Status wrappedRun(OperationContext* txn,
collection->getIndexCatalog()->findIndexByKeyPattern(txn, f.embeddedObject());
if (desc == NULL) {
return Status(ErrorCodes::IndexNotFound,
- str::stream()
- << "can't find index with key: " << f.embeddedObject().toString());
+ str::stream() << "can't find index with key: "
+ << f.embeddedObject().toString());
}
if (desc->isIdIndex()) {
diff --git a/src/mongo/db/catalog/index_catalog.cpp b/src/mongo/db/catalog/index_catalog.cpp
index 7887759486b..58efa054129 100644
--- a/src/mongo/db/catalog/index_catalog.cpp
+++ b/src/mongo/db/catalog/index_catalog.cpp
@@ -38,8 +38,8 @@
#include "mongo/db/audit.h"
#include "mongo/db/background.h"
-#include "mongo/db/catalog/collection_catalog_entry.h"
#include "mongo/db/catalog/collection.h"
+#include "mongo/db/catalog/collection_catalog_entry.h"
#include "mongo/db/catalog/database_catalog_entry.h"
#include "mongo/db/catalog/index_create.h"
#include "mongo/db/catalog/index_key_validate.h"
@@ -47,7 +47,6 @@
#include "mongo/db/clientcursor.h"
#include "mongo/db/curop.h"
#include "mongo/db/field_ref.h"
-#include "mongo/db/service_context.h"
#include "mongo/db/index/index_access_method.h"
#include "mongo/db/index/index_descriptor.h"
#include "mongo/db/index_legacy.h"
@@ -56,12 +55,13 @@
#include "mongo/db/keypattern.h"
#include "mongo/db/matcher/expression.h"
#include "mongo/db/matcher/extensions_callback_disallow_extensions.h"
+#include "mongo/db/operation_context.h"
#include "mongo/db/ops/delete.h"
#include "mongo/db/query/collation/collation_serializer.h"
#include "mongo/db/query/collation/collator_factory_interface.h"
#include "mongo/db/query/internal_plans.h"
#include "mongo/db/repl/replication_coordinator_global.h"
-#include "mongo/db/operation_context.h"
+#include "mongo/db/service_context.h"
#include "mongo/util/assert_util.h"
#include "mongo/util/log.h"
#include "mongo/util/mongoutils/str.h"
@@ -186,7 +186,8 @@ Status IndexCatalog::checkUnfinished() const {
return Status(ErrorCodes::InternalError,
str::stream() << "IndexCatalog has left over indexes that must be cleared"
- << " ns: " << _collection->ns().ns());
+ << " ns: "
+ << _collection->ns().ns());
}
bool IndexCatalog::_shouldOverridePlugin(OperationContext* txn, const BSONObj& keyPattern) const {
@@ -199,7 +200,8 @@ bool IndexCatalog::_shouldOverridePlugin(OperationContext* txn, const BSONObj& k
// supports an index plugin unsupported by this version.
uassert(17197,
str::stream() << "Invalid index type '" << pluginName << "' "
- << "in index " << keyPattern,
+ << "in index "
+ << keyPattern,
known);
return false;
}
@@ -483,7 +485,8 @@ Status IndexCatalog::_isSpecOk(OperationContext* txn, const BSONObj& spec) const
if (v != 0 && v != 1) {
return Status(ErrorCodes::CannotCreateIndex,
str::stream() << "this version of mongod cannot build new indexes "
- << "of version number " << v);
+ << "of version number "
+ << v);
}
}
@@ -508,7 +511,9 @@ Status IndexCatalog::_isSpecOk(OperationContext* txn, const BSONObj& spec) const
return Status(ErrorCodes::CannotCreateIndex,
str::stream() << "the \"ns\" field of the index spec '"
<< specNamespace.valueStringData()
- << "' does not match the collection name '" << nss.ns() << "'");
+ << "' does not match the collection name '"
+ << nss.ns()
+ << "'");
// logical name of the index
const BSONElement nameElem = spec["name"];
@@ -526,7 +531,8 @@ Status IndexCatalog::_isSpecOk(OperationContext* txn, const BSONObj& spec) const
if (indexNamespace.length() > NamespaceString::MaxNsLen)
return Status(ErrorCodes::CannotCreateIndex,
str::stream() << "namespace name generated from index name \""
- << indexNamespace << "\" is too long (127 byte max)");
+ << indexNamespace
+ << "\" is too long (127 byte max)");
const BSONObj key = spec.getObjectField("key");
const Status keyStatus = validateKeyPattern(key);
@@ -650,9 +656,12 @@ Status IndexCatalog::_doesSpecConflictWithExisting(OperationContext* txn,
if (!desc->keyPattern().equal(key))
return Status(ErrorCodes::IndexKeySpecsConflict,
str::stream() << "Trying to create an index "
- << "with same name " << name
- << " with different key spec " << key
- << " vs existing spec " << desc->keyPattern());
+ << "with same name "
+ << name
+ << " with different key spec "
+ << key
+ << " vs existing spec "
+ << desc->keyPattern());
IndexDescriptor temp(_collection, _getAccessMethodName(txn, key), spec);
if (!desc->areIndexOptionsEquivalent(&temp))
@@ -702,7 +711,8 @@ Status IndexCatalog::_doesSpecConflictWithExisting(OperationContext* txn,
return Status(ErrorCodes::CannotCreateIndex,
str::stream() << "only one text index per collection allowed, "
<< "found existing text index \""
- << textIndexes[0]->indexName() << "\"");
+ << textIndexes[0]->indexName()
+ << "\"");
}
}
return Status::OK();
diff --git a/src/mongo/db/catalog/index_create.cpp b/src/mongo/db/catalog/index_create.cpp
index 72c0bc6a28a..4007879a247 100644
--- a/src/mongo/db/catalog/index_create.cpp
+++ b/src/mongo/db/catalog/index_create.cpp
@@ -44,9 +44,9 @@
#include "mongo/db/concurrency/write_conflict_exception.h"
#include "mongo/db/curop.h"
#include "mongo/db/exec/working_set_common.h"
+#include "mongo/db/operation_context.h"
#include "mongo/db/query/internal_plans.h"
#include "mongo/db/repl/replication_coordinator_global.h"
-#include "mongo/db/operation_context.h"
#include "mongo/stdx/mutex.h"
#include "mongo/util/fail_point.h"
#include "mongo/util/fail_point_service.h"
diff --git a/src/mongo/db/catalog/index_key_validate_test.cpp b/src/mongo/db/catalog/index_key_validate_test.cpp
index 1cfe8c03d69..46c8bd394a2 100644
--- a/src/mongo/db/catalog/index_key_validate_test.cpp
+++ b/src/mongo/db/catalog/index_key_validate_test.cpp
@@ -87,7 +87,8 @@ TEST(IndexKeyValidateTest, KeyElementBooleanValueFails) {
ASSERT_EQ(ErrorCodes::CannotCreateIndex,
validateKeyPattern(BSON("a"
<< "2dsphere"
- << "b" << true)));
+ << "b"
+ << true)));
}
TEST(IndexKeyValidateTest, KeyElementNullValueFails) {
diff --git a/src/mongo/db/catalog/rename_collection.cpp b/src/mongo/db/catalog/rename_collection.cpp
index 1fd09e158cc..2f3517e8159 100644
--- a/src/mongo/db/catalog/rename_collection.cpp
+++ b/src/mongo/db/catalog/rename_collection.cpp
@@ -78,7 +78,8 @@ Status renameCollection(OperationContext* txn,
if (userInitiatedWritesAndNotPrimary) {
return Status(ErrorCodes::NotMaster,
str::stream() << "Not primary while renaming collection " << source.ns()
- << " to " << target.ns());
+ << " to "
+ << target.ns());
}
Database* const sourceDB = dbHolder().get(txn, source.db());
diff --git a/src/mongo/db/clientlistplugin.cpp b/src/mongo/db/clientlistplugin.cpp
index 2aef3af715c..713e9a176f0 100644
--- a/src/mongo/db/clientlistplugin.cpp
+++ b/src/mongo/db/clientlistplugin.cpp
@@ -63,7 +63,8 @@ public:
<< th(a("", "Connections to the database, both internal and external.", "Client"))
<< th(a("http://dochub.mongodb.org/core/viewingandterminatingcurrentoperation",
"",
- "OpId")) << "<th>Locking</th>"
+ "OpId"))
+ << "<th>Locking</th>"
<< "<th>Waiting</th>"
<< "<th>SecsRunning</th>"
<< "<th>Op</th>"
diff --git a/src/mongo/db/cloner.cpp b/src/mongo/db/cloner.cpp
index 635381749f0..e4ae99ee19d 100644
--- a/src/mongo/db/cloner.cpp
+++ b/src/mongo/db/cloner.cpp
@@ -125,7 +125,8 @@ struct Cloner::Fun {
unique_ptr<Lock::GlobalWrite> globalWriteLock(new Lock::GlobalWrite(txn->lockState()));
uassert(ErrorCodes::NotMaster,
str::stream() << "Not primary while cloning collection " << from_collection.ns()
- << " to " << to_collection.ns(),
+ << " to "
+ << to_collection.ns(),
!txn->writesAreReplicated() ||
repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(to_collection));
@@ -279,7 +280,10 @@ void Cloner::copy(OperationContext* txn,
uassert(ErrorCodes::NotMaster,
str::stream() << "Not primary while cloning collection " << from_collection.ns()
- << " to " << to_collection.ns() << " with filter " << query.toString(),
+ << " to "
+ << to_collection.ns()
+ << " with filter "
+ << query.toString(),
!txn->writesAreReplicated() ||
repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(to_collection));
}
@@ -308,7 +312,9 @@ void Cloner::copyIndexes(OperationContext* txn,
uassert(ErrorCodes::NotMaster,
str::stream() << "Not primary while copying indexes from " << from_collection.ns()
- << " to " << to_collection.ns() << " (Cloner)",
+ << " to "
+ << to_collection.ns()
+ << " (Cloner)",
!txn->writesAreReplicated() ||
repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(to_collection));
diff --git a/src/mongo/db/cloner.h b/src/mongo/db/cloner.h
index 7b091e4f29b..7f1629fafde 100644
--- a/src/mongo/db/cloner.h
+++ b/src/mongo/db/cloner.h
@@ -30,8 +30,8 @@
#pragma once
-#include <vector>
#include <string>
+#include <vector>
#include "mongo/base/disallow_copying.h"
#include "mongo/client/dbclientinterface.h"
diff --git a/src/mongo/db/commands.cpp b/src/mongo/db/commands.cpp
index 1f59a884410..abfd3c221f5 100644
--- a/src/mongo/db/commands.cpp
+++ b/src/mongo/db/commands.cpp
@@ -78,9 +78,9 @@ Command::~Command() = default;
string Command::parseNsFullyQualified(const string& dbname, const BSONObj& cmdObj) const {
BSONElement first = cmdObj.firstElement();
uassert(17005,
- mongoutils::str::stream()
- << "Main argument to " << first.fieldNameStringData()
- << " must be a fully qualified namespace string. Found: " << first.toString(false),
+ mongoutils::str::stream() << "Main argument to " << first.fieldNameStringData()
+ << " must be a fully qualified namespace string. Found: "
+ << first.toString(false),
first.type() == mongo::String &&
NamespaceString::validCollectionComponent(first.valuestr()));
return first.String();
@@ -108,7 +108,9 @@ NamespaceString Command::parseNsCollectionRequired(const string& dbname,
#if defined(CLC)
DEV if (mongoutils::str::startsWith(coll, dbname + '.')) {
log() << "DEBUG parseNs Command's collection name looks like it includes the db name\n"
- << dbname << '\n' << coll << '\n' << cmdObj.toString();
+ << dbname << '\n'
+ << coll << '\n'
+ << cmdObj.toString();
dassert(false);
}
#endif
diff --git a/src/mongo/db/commands/apply_ops_cmd.cpp b/src/mongo/db/commands/apply_ops_cmd.cpp
index 12df18040a9..21947850f6c 100644
--- a/src/mongo/db/commands/apply_ops_cmd.cpp
+++ b/src/mongo/db/commands/apply_ops_cmd.cpp
@@ -43,16 +43,16 @@
#include "mongo/db/client.h"
#include "mongo/db/commands.h"
#include "mongo/db/commands/dbhash.h"
-#include "mongo/db/db_raii.h"
#include "mongo/db/concurrency/write_conflict_exception.h"
+#include "mongo/db/db_raii.h"
#include "mongo/db/dbdirectclient.h"
-#include "mongo/db/service_context.h"
#include "mongo/db/jsobj.h"
#include "mongo/db/matcher/matcher.h"
#include "mongo/db/operation_context.h"
#include "mongo/db/repl/oplog.h"
#include "mongo/db/repl/repl_client_info.h"
#include "mongo/db/repl/replication_coordinator_global.h"
+#include "mongo/db/service_context.h"
#include "mongo/util/log.h"
#include "mongo/util/scopeguard.h"
@@ -147,8 +147,8 @@ private:
// op - operation type
BSONElement opElement = obj.getField("op");
if (opElement.eoo()) {
- errmsg = str::stream()
- << "op does not contain required \"op\" field: " << e.fieldName();
+ errmsg = str::stream() << "op does not contain required \"op\" field: "
+ << e.fieldName();
return false;
}
if (opElement.type() != mongo::String) {
@@ -166,8 +166,8 @@ private:
// Only operations of type 'n' are allowed to have an empty namespace.
BSONElement nsElement = obj.getField("ns");
if (nsElement.eoo()) {
- errmsg = str::stream()
- << "op does not contain required \"ns\" field: " << e.fieldName();
+ errmsg = str::stream() << "op does not contain required \"ns\" field: "
+ << e.fieldName();
return false;
}
if (nsElement.type() != mongo::String) {
@@ -179,8 +179,8 @@ private:
return false;
}
if (*opType != 'n' && nsElement.String().empty()) {
- errmsg = str::stream()
- << "\"ns\" field value cannot be empty when op type is not 'n': " << e.fieldName();
+ errmsg = str::stream() << "\"ns\" field value cannot be empty when op type is not 'n': "
+ << e.fieldName();
return false;
}
return true;
diff --git a/src/mongo/db/commands/clone.cpp b/src/mongo/db/commands/clone.cpp
index 9ce858feb3d..ddb251bc4c9 100644
--- a/src/mongo/db/commands/clone.cpp
+++ b/src/mongo/db/commands/clone.cpp
@@ -30,8 +30,8 @@
#include "mongo/base/status.h"
#include "mongo/db/auth/action_set.h"
-#include "mongo/db/auth/resource_pattern.h"
#include "mongo/db/auth/authorization_session.h"
+#include "mongo/db/auth/resource_pattern.h"
#include "mongo/db/catalog/document_validation.h"
#include "mongo/db/cloner.h"
#include "mongo/db/commands.h"
diff --git a/src/mongo/db/commands/clone_collection.cpp b/src/mongo/db/commands/clone_collection.cpp
index 87673050a2a..b8b475b6926 100644
--- a/src/mongo/db/commands/clone_collection.cpp
+++ b/src/mongo/db/commands/clone_collection.cpp
@@ -35,8 +35,8 @@
#include "mongo/bson/util/builder.h"
#include "mongo/client/dbclientinterface.h"
#include "mongo/db/auth/action_set.h"
-#include "mongo/db/auth/resource_pattern.h"
#include "mongo/db/auth/authorization_session.h"
+#include "mongo/db/auth/resource_pattern.h"
#include "mongo/db/catalog/collection.h"
#include "mongo/db/catalog/document_validation.h"
#include "mongo/db/cloner.h"
@@ -48,8 +48,8 @@
#include "mongo/db/instance.h"
#include "mongo/db/jsobj.h"
#include "mongo/db/namespace_string.h"
-#include "mongo/db/repl/isself.h"
#include "mongo/db/ops/insert.h"
+#include "mongo/db/repl/isself.h"
#include "mongo/db/storage/storage_options.h"
#include "mongo/util/log.h"
diff --git a/src/mongo/db/commands/collection_to_capped.cpp b/src/mongo/db/commands/collection_to_capped.cpp
index 59866778ab1..5484395cd6e 100644
--- a/src/mongo/db/commands/collection_to_capped.cpp
+++ b/src/mongo/db/commands/collection_to_capped.cpp
@@ -36,12 +36,12 @@
#include "mongo/db/client.h"
#include "mongo/db/commands.h"
#include "mongo/db/db_raii.h"
-#include "mongo/db/service_context.h"
#include "mongo/db/index_builder.h"
#include "mongo/db/op_observer.h"
#include "mongo/db/query/find.h"
#include "mongo/db/query/internal_plans.h"
#include "mongo/db/repl/replication_coordinator_global.h"
+#include "mongo/db/service_context.h"
namespace mongo {
@@ -100,11 +100,12 @@ public:
NamespaceString nss(dbname, to);
if (!repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(nss)) {
- return appendCommandStatus(result,
- Status(ErrorCodes::NotMaster,
- str::stream()
- << "Not primary while cloning collection " << from
- << " to " << to << " (as capped)"));
+ return appendCommandStatus(
+ result,
+ Status(ErrorCodes::NotMaster,
+ str::stream() << "Not primary while cloning collection " << from << " to "
+ << to
+ << " (as capped)"));
}
Database* const db = autoDb.getDb();
diff --git a/src/mongo/db/commands/copydb.cpp b/src/mongo/db/commands/copydb.cpp
index 7619c87a20d..92d721b70e1 100644
--- a/src/mongo/db/commands/copydb.cpp
+++ b/src/mongo/db/commands/copydb.cpp
@@ -31,8 +31,8 @@
#include "mongo/base/status.h"
#include "mongo/client/sasl_client_authenticate.h"
#include "mongo/db/auth/action_set.h"
-#include "mongo/db/auth/resource_pattern.h"
#include "mongo/db/auth/authorization_session.h"
+#include "mongo/db/auth/resource_pattern.h"
#include "mongo/db/catalog/document_validation.h"
#include "mongo/db/cloner.h"
#include "mongo/db/commands.h"
@@ -165,10 +165,11 @@ public:
uassert(13008, "must call copydbgetnonce first", authConn.get());
BSONObj ret;
{
- if (!authConn->runCommand(cloneOptions.fromDB,
- BSON("authenticate" << 1 << "user" << username << "nonce"
- << nonce << "key" << key),
- ret)) {
+ if (!authConn->runCommand(
+ cloneOptions.fromDB,
+ BSON("authenticate" << 1 << "user" << username << "nonce" << nonce << "key"
+ << key),
+ ret)) {
errmsg = "unable to login " + ret.toString();
authConn.reset();
return false;
@@ -179,11 +180,11 @@ public:
cmdObj.hasField(saslCommandPayloadFieldName)) {
uassert(25487, "must call copydbsaslstart first", authConn.get());
BSONObj ret;
- if (!authConn->runCommand(cloneOptions.fromDB,
- BSON("saslContinue"
- << 1 << cmdObj[saslCommandConversationIdFieldName]
- << cmdObj[saslCommandPayloadFieldName]),
- ret)) {
+ if (!authConn->runCommand(
+ cloneOptions.fromDB,
+ BSON("saslContinue" << 1 << cmdObj[saslCommandConversationIdFieldName]
+ << cmdObj[saslCommandPayloadFieldName]),
+ ret)) {
errmsg = "unable to login " + ret.toString();
authConn.reset();
return false;
diff --git a/src/mongo/db/commands/copydb_common.cpp b/src/mongo/db/commands/copydb_common.cpp
index 5f033aede73..2a690ae0a17 100644
--- a/src/mongo/db/commands/copydb_common.cpp
+++ b/src/mongo/db/commands/copydb_common.cpp
@@ -69,8 +69,8 @@ Status checkAuthForCopydbCommand(ClientBasic* client,
actions.addAction(ActionType::bypassDocumentValidation);
}
- if (!AuthorizationSession::get(client)
- ->isAuthorizedForActionsOnResource(ResourcePattern::forDatabaseName(todb), actions)) {
+ if (!AuthorizationSession::get(client)->isAuthorizedForActionsOnResource(
+ ResourcePattern::forDatabaseName(todb), actions)) {
return Status(ErrorCodes::Unauthorized, "Unauthorized");
}
diff --git a/src/mongo/db/commands/copydb_start_commands.cpp b/src/mongo/db/commands/copydb_start_commands.cpp
index 609c1167184..8426b14d072 100644
--- a/src/mongo/db/commands/copydb_start_commands.cpp
+++ b/src/mongo/db/commands/copydb_start_commands.cpp
@@ -37,8 +37,8 @@
#include "mongo/client/dbclientinterface.h"
#include "mongo/client/sasl_client_authenticate.h"
#include "mongo/db/auth/action_set.h"
-#include "mongo/db/auth/resource_pattern.h"
#include "mongo/db/auth/authorization_session.h"
+#include "mongo/db/auth/resource_pattern.h"
#include "mongo/db/client.h"
#include "mongo/db/cloner.h"
#include "mongo/db/commands.h"
diff --git a/src/mongo/db/commands/create_indexes.cpp b/src/mongo/db/commands/create_indexes.cpp
index f18c15bce74..c7e42889772 100644
--- a/src/mongo/db/commands/create_indexes.cpp
+++ b/src/mongo/db/commands/create_indexes.cpp
@@ -41,13 +41,13 @@
#include "mongo/db/commands.h"
#include "mongo/db/concurrency/write_conflict_exception.h"
#include "mongo/db/curop.h"
-#include "mongo/db/service_context.h"
#include "mongo/db/op_observer.h"
#include "mongo/db/ops/insert.h"
#include "mongo/db/repl/repl_client_info.h"
#include "mongo/db/repl/replication_coordinator_global.h"
#include "mongo/db/s/collection_metadata.h"
#include "mongo/db/s/sharding_state.h"
+#include "mongo/db/service_context.h"
#include "mongo/s/shard_key_pattern.h"
#include "mongo/util/scopeguard.h"
@@ -130,8 +130,8 @@ public:
BSONElement e = it.next();
StringData fieldName(e.fieldName(), e.fieldNameSize());
if (std::find(keys.begin(), keys.end(), fieldName) != keys.end()) {
- errmsg = str::stream()
- << "duplicate keys detected in index spec: " << indexKey;
+ errmsg = str::stream() << "duplicate keys detected in index spec: "
+ << indexKey;
return false;
}
keys.push_back(fieldName);
@@ -286,7 +286,8 @@ public:
Status(ErrorCodes::NotMaster,
str::stream()
<< "Not primary while creating background indexes in "
- << ns.ns() << ": cleaning up index build failure due to "
+ << ns.ns()
+ << ": cleaning up index build failure due to "
<< e.toString()));
}
} catch (...) {
diff --git a/src/mongo/db/commands/distinct.cpp b/src/mongo/db/commands/distinct.cpp
index 83aed3c3d0e..a280f059f23 100644
--- a/src/mongo/db/commands/distinct.cpp
+++ b/src/mongo/db/commands/distinct.cpp
@@ -127,8 +127,10 @@ public:
return Status(ErrorCodes::TypeMismatch,
str::stream() << "\"" << kQueryField
<< "\" had the wrong type. Expected "
- << typeName(BSONType::Object) << " or "
- << typeName(BSONType::jstNULL) << ", found "
+ << typeName(BSONType::Object)
+ << " or "
+ << typeName(BSONType::jstNULL)
+ << ", found "
<< typeName(queryElt.type()));
}
}
@@ -142,7 +144,8 @@ public:
return Status(ErrorCodes::TypeMismatch,
str::stream() << "\"" << kCollationField
<< "\" had the wrong type. Expected "
- << typeName(BSONType::Object) << ", found "
+ << typeName(BSONType::Object)
+ << ", found "
<< typeName(collationElt.type()));
}
collation = collationElt.embeddedObject();
@@ -198,8 +201,8 @@ public:
{
stdx::lock_guard<Client>(*txn->getClient());
- CurOp::get(txn)
- ->setPlanSummary_inlock(Explain::getPlanSummary(executor.getValue().get()));
+ CurOp::get(txn)->setPlanSummary_inlock(
+ Explain::getPlanSummary(executor.getValue().get()));
}
string key = cmdObj[kKeyField].valuestrsafe();
diff --git a/src/mongo/db/commands/drop_indexes.cpp b/src/mongo/db/commands/drop_indexes.cpp
index 9829ce29e3c..2b334a51e87 100644
--- a/src/mongo/db/commands/drop_indexes.cpp
+++ b/src/mongo/db/commands/drop_indexes.cpp
@@ -49,11 +49,11 @@
#include "mongo/db/curop.h"
#include "mongo/db/db_raii.h"
#include "mongo/db/dbdirectclient.h"
-#include "mongo/db/service_context.h"
#include "mongo/db/index/index_descriptor.h"
#include "mongo/db/index_builder.h"
#include "mongo/db/op_observer.h"
#include "mongo/db/repl/replication_coordinator_global.h"
+#include "mongo/db/service_context.h"
#include "mongo/util/log.h"
namespace mongo {
diff --git a/src/mongo/db/commands/find_and_modify.cpp b/src/mongo/db/commands/find_and_modify.cpp
index 0691b451f3e..dd1e04f311b 100644
--- a/src/mongo/db/commands/find_and_modify.cpp
+++ b/src/mongo/db/commands/find_and_modify.cpp
@@ -32,8 +32,8 @@
#include "mongo/db/commands/find_and_modify.h"
-#include <memory>
#include <boost/optional.hpp>
+#include <memory>
#include "mongo/base/status_with.h"
#include "mongo/bson/bsonobj.h"
@@ -127,7 +127,8 @@ StatusWith<boost::optional<BSONObj>> advanceExecutor(OperationContext* txn,
const std::string opstr = isRemove ? "delete" : "update";
return {ErrorCodes::OperationFailed,
str::stream() << "executor returned " << PlanExecutor::statestr(state)
- << " while executing " << opstr};
+ << " while executing "
+ << opstr};
}
invariant(state == PlanExecutor::IS_EOF);
@@ -376,8 +377,8 @@ public:
// Attach the namespace and database profiling level to the current op.
{
stdx::lock_guard<Client> lk(*txn->getClient());
- CurOp::get(txn)
- ->enter_inlock(nsString.ns().c_str(), autoDb.getDb()->getProfilingLevel());
+ CurOp::get(txn)->enter_inlock(nsString.ns().c_str(),
+ autoDb.getDb()->getProfilingLevel());
}
auto css = CollectionShardingState::get(txn, nsString);
@@ -444,8 +445,8 @@ public:
// Attach the namespace and database profiling level to the current op.
{
stdx::lock_guard<Client> lk(*txn->getClient());
- CurOp::get(txn)
- ->enter_inlock(nsString.ns().c_str(), autoDb.getDb()->getProfilingLevel());
+ CurOp::get(txn)->enter_inlock(nsString.ns().c_str(),
+ autoDb.getDb()->getProfilingLevel());
}
auto css = CollectionShardingState::get(txn, nsString);
diff --git a/src/mongo/db/commands/find_cmd.cpp b/src/mongo/db/commands/find_cmd.cpp
index 547ebc4bfa1..5573e8fc819 100644
--- a/src/mongo/db/commands/find_cmd.cpp
+++ b/src/mongo/db/commands/find_cmd.cpp
@@ -41,16 +41,16 @@
#include "mongo/db/commands.h"
#include "mongo/db/db_raii.h"
#include "mongo/db/exec/working_set_common.h"
-#include "mongo/db/service_context.h"
#include "mongo/db/matcher/extensions_callback_real.h"
-#include "mongo/db/repl/replication_coordinator.h"
#include "mongo/db/query/cursor_response.h"
#include "mongo/db/query/explain.h"
#include "mongo/db/query/find.h"
#include "mongo/db/query/find_common.h"
#include "mongo/db/query/get_executor.h"
+#include "mongo/db/repl/replication_coordinator.h"
#include "mongo/db/s/collection_sharding_state.h"
#include "mongo/db/server_parameters.h"
+#include "mongo/db/service_context.h"
#include "mongo/db/stats/counters.h"
#include "mongo/util/log.h"
diff --git a/src/mongo/db/commands/generic.cpp b/src/mongo/db/commands/generic.cpp
index 05724e33705..941ac0d2503 100644
--- a/src/mongo/db/commands/generic.cpp
+++ b/src/mongo/db/commands/generic.cpp
@@ -281,9 +281,9 @@ public:
if (it->first == it->second->getName())
commands.push_back(it->second);
}
- std::sort(commands.begin(),
- commands.end(),
- [](Command* lhs, Command* rhs) { return (lhs->getName()) < (rhs->getName()); });
+ std::sort(commands.begin(), commands.end(), [](Command* lhs, Command* rhs) {
+ return (lhs->getName()) < (rhs->getName());
+ });
BSONObjBuilder b(result.subobjStart("commands"));
for (const auto& c : commands) {
@@ -414,7 +414,8 @@ public:
result,
Status(ErrorCodes::TypeMismatch,
str::stream() << "Argument to getLog must be of type String; found "
- << val.toString(false) << " of type "
+ << val.toString(false)
+ << " of type "
<< typeName(val.type())));
}
diff --git a/src/mongo/db/commands/getmore_cmd.cpp b/src/mongo/db/commands/getmore_cmd.cpp
index 1ffc0342330..c2d4bd0da2f 100644
--- a/src/mongo/db/commands/getmore_cmd.cpp
+++ b/src/mongo/db/commands/getmore_cmd.cpp
@@ -49,8 +49,8 @@
#include "mongo/db/query/find_common.h"
#include "mongo/db/query/getmore_request.h"
#include "mongo/db/query/plan_summary_stats.h"
-#include "mongo/db/repl/replication_coordinator_global.h"
#include "mongo/db/repl/oplog.h"
+#include "mongo/db/repl/replication_coordinator_global.h"
#include "mongo/db/s/operation_sharding_state.h"
#include "mongo/db/service_context.h"
#include "mongo/db/stats/counters.h"
@@ -137,8 +137,8 @@ public:
}
const GetMoreRequest& request = parseStatus.getValue();
- return AuthorizationSession::get(client)
- ->checkAuthForGetMore(request.nss, request.cursorid, request.term.is_initialized());
+ return AuthorizationSession::get(client)->checkAuthForGetMore(
+ request.nss, request.cursorid, request.term.is_initialized());
}
bool run(OperationContext* txn,
diff --git a/src/mongo/db/commands/group_cmd.cpp b/src/mongo/db/commands/group_cmd.cpp
index 63308fc27ed..ba9ff7d2ead 100644
--- a/src/mongo/db/commands/group_cmd.cpp
+++ b/src/mongo/db/commands/group_cmd.cpp
@@ -94,8 +94,8 @@ private:
const std::string& dbname,
const BSONObj& cmdObj) {
std::string ns = parseNs(dbname, cmdObj);
- if (!AuthorizationSession::get(client)
- ->isAuthorizedForActionsOnNamespace(NamespaceString(ns), ActionType::find)) {
+ if (!AuthorizationSession::get(client)->isAuthorizedForActionsOnNamespace(
+ NamespaceString(ns), ActionType::find)) {
return Status(ErrorCodes::Unauthorized, "unauthorized");
}
return Status::OK();
diff --git a/src/mongo/db/commands/index_filter_commands.cpp b/src/mongo/db/commands/index_filter_commands.cpp
index ab8c0634747..ecf27a3f1d8 100644
--- a/src/mongo/db/commands/index_filter_commands.cpp
+++ b/src/mongo/db/commands/index_filter_commands.cpp
@@ -30,8 +30,8 @@
#include "mongo/platform/basic.h"
-#include <string>
#include <sstream>
+#include <string>
#include "mongo/base/init.h"
#include "mongo/base/owned_pointer_vector.h"
@@ -105,8 +105,8 @@ static Status getQuerySettingsAndPlanCache(OperationContext* txn,
// available to the client.
//
-MONGO_INITIALIZER_WITH_PREREQUISITES(SetupIndexFilterCommands,
- MONGO_NO_PREREQUISITES)(InitializerContext* context) {
+MONGO_INITIALIZER_WITH_PREREQUISITES(SetupIndexFilterCommands, MONGO_NO_PREREQUISITES)
+(InitializerContext* context) {
new ListFilters();
new ClearFilters();
new SetFilter();
diff --git a/src/mongo/db/commands/index_filter_commands.h b/src/mongo/db/commands/index_filter_commands.h
index 7ba1157bef7..80fed645c5e 100644
--- a/src/mongo/db/commands/index_filter_commands.h
+++ b/src/mongo/db/commands/index_filter_commands.h
@@ -29,8 +29,8 @@
#pragma once
#include "mongo/db/commands.h"
-#include "mongo/db/query/query_settings.h"
#include "mongo/db/query/plan_cache.h"
+#include "mongo/db/query/query_settings.h"
namespace mongo {
diff --git a/src/mongo/db/commands/index_filter_commands_test.cpp b/src/mongo/db/commands/index_filter_commands_test.cpp
index 7f00a1521f4..765871ac134 100644
--- a/src/mongo/db/commands/index_filter_commands_test.cpp
+++ b/src/mongo/db/commands/index_filter_commands_test.cpp
@@ -308,13 +308,13 @@ TEST(IndexFilterCommandsTest, SetAndClearFilters) {
addQueryShapeToPlanCache(txn.get(), &planCache, "{a: 1, b: 1}", "{a: -1}", "{_id: 0, a: 1}");
ASSERT_TRUE(planCacheContains(planCache, "{a: 1, b: 1}", "{a: -1}", "{_id: 0, a: 1}"));
- ASSERT_OK(SetFilter::set(txn.get(),
- &querySettings,
- &planCache,
- nss.ns(),
- fromjson(
- "{query: {a: 1, b: 1}, sort: {a: -1}, projection: {_id: 0, a: 1}, "
- "indexes: [{a: 1}]}")));
+ ASSERT_OK(
+ SetFilter::set(txn.get(),
+ &querySettings,
+ &planCache,
+ nss.ns(),
+ fromjson("{query: {a: 1, b: 1}, sort: {a: -1}, projection: {_id: 0, a: 1}, "
+ "indexes: [{a: 1}]}")));
vector<BSONObj> filters = getFilters(querySettings);
ASSERT_EQUALS(filters.size(), 1U);
@@ -328,13 +328,13 @@ TEST(IndexFilterCommandsTest, SetAndClearFilters) {
// Replacing the hint for the same query shape ({a: 1, b: 1} and {b: 2, a: 3}
// share same shape) should not change the query settings size.
- ASSERT_OK(SetFilter::set(txn.get(),
- &querySettings,
- &planCache,
- nss.ns(),
- fromjson(
- "{query: {b: 2, a: 3}, sort: {a: -1}, projection: {_id: 0, a: 1}, "
- "indexes: [{a: 1, b: 1}]}")));
+ ASSERT_OK(
+ SetFilter::set(txn.get(),
+ &querySettings,
+ &planCache,
+ nss.ns(),
+ fromjson("{query: {b: 2, a: 3}, sort: {a: -1}, projection: {_id: 0, a: 1}, "
+ "indexes: [{a: 1, b: 1}]}")));
filters = getFilters(querySettings);
ASSERT_EQUALS(filters.size(), 1U);
diff --git a/src/mongo/db/commands/list_indexes.cpp b/src/mongo/db/commands/list_indexes.cpp
index f6d144de358..724c0f5f1f0 100644
--- a/src/mongo/db/commands/list_indexes.cpp
+++ b/src/mongo/db/commands/list_indexes.cpp
@@ -107,8 +107,8 @@ public:
}
return Status(ErrorCodes::Unauthorized,
- str::stream()
- << "Not authorized to list indexes on collection: " << ns.coll());
+ str::stream() << "Not authorized to list indexes on collection: "
+ << ns.coll());
}
CmdListIndexes() : Command("listIndexes") {}
diff --git a/src/mongo/db/commands/mr.cpp b/src/mongo/db/commands/mr.cpp
index 62d92ca1120..a55d60d0fb0 100644
--- a/src/mongo/db/commands/mr.cpp
+++ b/src/mongo/db/commands/mr.cpp
@@ -416,7 +416,9 @@ void State::prepTempCollection() {
if (!status.isOK()) {
uasserted(17305,
str::stream() << "createIndex failed for mr incLong ns: "
- << _config.incLong << " err: " << status.code());
+ << _config.incLong
+ << " err: "
+ << status.code());
}
wuow.commit();
}
@@ -511,7 +513,9 @@ void State::appendResults(BSONObjBuilder& final) {
BSONObj idKey = BSON("_id" << 1);
if (!_db.runCommand("admin",
BSON("splitVector" << _config.outputOptions.finalNamespace
- << "keyPattern" << idKey << "maxChunkSizeBytes"
+ << "keyPattern"
+ << idKey
+ << "maxChunkSizeBytes"
<< _config.splitInfo),
res)) {
uasserted(15921, str::stream() << "splitVector failed: " << res);
@@ -622,7 +626,8 @@ long long State::postProcessCollectionNonAtomic(OperationContext* txn,
if (!_db.runCommand("admin",
BSON("renameCollection" << _config.tempNamespace << "to"
<< _config.outputOptions.finalNamespace
- << "stayTemp" << _config.shardedFirstPass),
+ << "stayTemp"
+ << _config.shardedFirstPass),
info)) {
uasserted(10076, str::stream() << "rename failed: " << info);
}
@@ -749,8 +754,10 @@ void State::_insertToInc(BSONObj& o) {
if (o.objsize() > BSONObjMaxUserSize) {
uasserted(ErrorCodes::BadValue,
str::stream() << "object to insert too large for incremental collection"
- << ". size in bytes: " << o.objsize()
- << ", max size: " << BSONObjMaxUserSize);
+ << ". size in bytes: "
+ << o.objsize()
+ << ", max size: "
+ << BSONObjMaxUserSize);
}
// TODO: Consider whether to pass OpDebug for stats tracking under SERVER-23261.
diff --git a/src/mongo/db/commands/mr_test.cpp b/src/mongo/db/commands/mr_test.cpp
index d58ca5326f2..8987af00474 100644
--- a/src/mongo/db/commands/mr_test.cpp
+++ b/src/mongo/db/commands/mr_test.cpp
@@ -57,7 +57,11 @@ void _compareOutputOptionField(const std::string& dbname,
if (actual == expected)
return;
FAIL(str::stream() << "parseOutputOptions(\"" << dbname << ", " << cmdObjStr << "): "
- << fieldName << ": Expected: " << expected << ". Actual: " << actual);
+ << fieldName
+ << ": Expected: "
+ << expected
+ << ". Actual: "
+ << actual);
}
/**
diff --git a/src/mongo/db/commands/oplog_note.cpp b/src/mongo/db/commands/oplog_note.cpp
index bebf6d4d13c..784781405b4 100644
--- a/src/mongo/db/commands/oplog_note.cpp
+++ b/src/mongo/db/commands/oplog_note.cpp
@@ -32,13 +32,13 @@
#include "mongo/db/auth/action_type.h"
#include "mongo/db/auth/authorization_session.h"
#include "mongo/db/auth/resource_pattern.h"
-#include "mongo/db/service_context.h"
#include "mongo/db/commands.h"
#include "mongo/db/jsobj.h"
-#include "mongo/db/operation_context.h"
#include "mongo/db/op_observer.h"
+#include "mongo/db/operation_context.h"
#include "mongo/db/repl/oplog.h"
#include "mongo/db/repl/replication_coordinator_global.h"
+#include "mongo/db/service_context.h"
namespace mongo {
diff --git a/src/mongo/db/commands/parallel_collection_scan.cpp b/src/mongo/db/commands/parallel_collection_scan.cpp
index 425d5eb8791..2dfe8fdf614 100644
--- a/src/mongo/db/commands/parallel_collection_scan.cpp
+++ b/src/mongo/db/commands/parallel_collection_scan.cpp
@@ -28,6 +28,7 @@
#include "mongo/platform/basic.h"
+#include "mongo/base/checked_cast.h"
#include "mongo/db/auth/authorization_session.h"
#include "mongo/db/catalog/collection.h"
#include "mongo/db/catalog/database.h"
@@ -38,7 +39,6 @@
#include "mongo/db/query/cursor_response.h"
#include "mongo/db/service_context.h"
#include "mongo/stdx/memory.h"
-#include "mongo/base/checked_cast.h"
namespace mongo {
@@ -103,7 +103,8 @@ public:
Status(ErrorCodes::BadValue,
str::stream()
<< "numCursors has to be between 1 and 10000"
- << " was: " << numCursors));
+ << " was: "
+ << numCursors));
auto iterators = collection->getManyCursors(txn);
if (iterators.size() < numCursors) {
diff --git a/src/mongo/db/commands/parameters.cpp b/src/mongo/db/commands/parameters.cpp
index c2a5bf23d91..acc8128536d 100644
--- a/src/mongo/db/commands/parameters.cpp
+++ b/src/mongo/db/commands/parameters.cpp
@@ -255,8 +255,8 @@ public:
int newValue;
if (!newValueElement.coerce(&newValue) || newValue < 0)
return Status(ErrorCodes::BadValue,
- mongoutils::str::stream()
- << "Invalid value for logLevel: " << newValueElement);
+ mongoutils::str::stream() << "Invalid value for logLevel: "
+ << newValueElement);
LogSeverity newSeverity =
(newValue > 0) ? LogSeverity::Debug(newValue) : LogSeverity::Log();
globalLogDomain()->setMinimumLoggedSeverity(newSeverity);
@@ -483,7 +483,8 @@ public:
if (str != "disabled" && str != "allowSSL" && str != "preferSSL" && str != "requireSSL") {
return Status(ErrorCodes::BadValue,
mongoutils::str::stream()
- << "Invalid value for sslMode via setParameter command: " << str);
+ << "Invalid value for sslMode via setParameter command: "
+ << str);
}
int oldMode = sslGlobalParams.sslMode.load();
@@ -495,7 +496,9 @@ public:
return Status(ErrorCodes::BadValue,
mongoutils::str::stream()
<< "Illegal state transition for sslMode, attempt to change from "
- << sslModeStr() << " to " << str);
+ << sslModeStr()
+ << " to "
+ << str);
}
return Status::OK();
}
@@ -566,7 +569,9 @@ public:
#ifdef MONGO_CONFIG_SSL
setInternalUserAuthParams(
BSON(saslCommandMechanismFieldName
- << "MONGODB-X509" << saslCommandUserDBFieldName << "$external"
+ << "MONGODB-X509"
+ << saslCommandUserDBFieldName
+ << "$external"
<< saslCommandUserFieldName
<< getSSLManager()->getSSLConfiguration().clientSubjectName));
#endif
@@ -576,7 +581,9 @@ public:
return Status(ErrorCodes::BadValue,
mongoutils::str::stream()
<< "Illegal state transition for clusterAuthMode, change from "
- << clusterAuthModeStr() << " to " << str);
+ << clusterAuthModeStr()
+ << " to "
+ << str);
}
return Status::OK();
}
diff --git a/src/mongo/db/commands/pipeline_command.cpp b/src/mongo/db/commands/pipeline_command.cpp
index c3154f96391..a2962ae712b 100644
--- a/src/mongo/db/commands/pipeline_command.cpp
+++ b/src/mongo/db/commands/pipeline_command.cpp
@@ -40,7 +40,6 @@
#include "mongo/db/db_raii.h"
#include "mongo/db/exec/pipeline_proxy.h"
#include "mongo/db/exec/working_set_common.h"
-#include "mongo/db/service_context.h"
#include "mongo/db/pipeline/accumulator.h"
#include "mongo/db/pipeline/document.h"
#include "mongo/db/pipeline/document_source.h"
@@ -52,6 +51,7 @@
#include "mongo/db/query/find_common.h"
#include "mongo/db/query/get_executor.h"
#include "mongo/db/query/plan_summary_stats.h"
+#include "mongo/db/service_context.h"
#include "mongo/db/storage/storage_options.h"
#include "mongo/stdx/memory.h"
@@ -127,7 +127,9 @@ static bool handleCursorCommand(OperationContext* txn,
msgasserted(
17391,
str::stream() << "Aggregation has more results than fit in initial batch, but can't "
- << "create cursor since collection " << ns << " doesn't exist");
+ << "create cursor since collection "
+ << ns
+ << " doesn't exist");
}
if (cursor) {
diff --git a/src/mongo/db/commands/plan_cache_commands.cpp b/src/mongo/db/commands/plan_cache_commands.cpp
index 440889255ba..0d4d11793f2 100644
--- a/src/mongo/db/commands/plan_cache_commands.cpp
+++ b/src/mongo/db/commands/plan_cache_commands.cpp
@@ -30,8 +30,8 @@
#include "mongo/platform/basic.h"
-#include <string>
#include <sstream>
+#include <string>
#include "mongo/base/init.h"
#include "mongo/base/status.h"
@@ -96,8 +96,8 @@ static Status getPlanCache(OperationContext* txn,
// available to the client.
//
-MONGO_INITIALIZER_WITH_PREREQUISITES(SetupPlanCacheCommands,
- MONGO_NO_PREREQUISITES)(InitializerContext* context) {
+MONGO_INITIALIZER_WITH_PREREQUISITES(SetupPlanCacheCommands, MONGO_NO_PREREQUISITES)
+(InitializerContext* context) {
// PlanCacheCommand constructors refer to static ActionType instances.
// Registering commands in a mongo static initializer ensures that
// the ActionType construction will be completed first.
diff --git a/src/mongo/db/commands/plan_cache_commands_test.cpp b/src/mongo/db/commands/plan_cache_commands_test.cpp
index 21cbc937e73..bb8e2f0ad7e 100644
--- a/src/mongo/db/commands/plan_cache_commands_test.cpp
+++ b/src/mongo/db/commands/plan_cache_commands_test.cpp
@@ -202,8 +202,9 @@ TEST(PlanCacheCommandsTest, Canonicalize) {
ASSERT_NOT_OK(PlanCacheCommand::canonicalize(&txn, nss.ns(), fromjson("{query: {}, sort: 1}"))
.getStatus());
// Bad query (invalid sort order)
- ASSERT_NOT_OK(PlanCacheCommand::canonicalize(
- &txn, nss.ns(), fromjson("{query: {}, sort: {a: 0}}")).getStatus());
+ ASSERT_NOT_OK(
+ PlanCacheCommand::canonicalize(&txn, nss.ns(), fromjson("{query: {}, sort: {a: 0}}"))
+ .getStatus());
// Valid parameters
auto statusWithCQ =
@@ -307,10 +308,12 @@ TEST(PlanCacheCommandsTest, planCacheClearOneKey) {
// Check keys in cache before dropping {b: 1}
vector<BSONObj> shapesBefore = getShapes(planCache);
ASSERT_EQUALS(shapesBefore.size(), 2U);
- BSONObj shapeA = BSON("query" << cqA->getQueryObj() << "sort" << cqA->getParsed().getSort()
- << "projection" << cqA->getParsed().getProj());
- BSONObj shapeB = BSON("query" << cqB->getQueryObj() << "sort" << cqB->getParsed().getSort()
- << "projection" << cqB->getParsed().getProj());
+ BSONObj shapeA =
+ BSON("query" << cqA->getQueryObj() << "sort" << cqA->getParsed().getSort() << "projection"
+ << cqA->getParsed().getProj());
+ BSONObj shapeB =
+ BSON("query" << cqB->getQueryObj() << "sort" << cqB->getParsed().getSort() << "projection"
+ << cqB->getParsed().getProj());
ASSERT_TRUE(std::find(shapesBefore.begin(), shapesBefore.end(), shapeA) != shapesBefore.end());
ASSERT_TRUE(std::find(shapesBefore.begin(), shapesBefore.end(), shapeB) != shapesBefore.end());
diff --git a/src/mongo/db/commands/rename_collection_cmd.cpp b/src/mongo/db/commands/rename_collection_cmd.cpp
index 210e279607f..746aeb49db1 100644
--- a/src/mongo/db/commands/rename_collection_cmd.cpp
+++ b/src/mongo/db/commands/rename_collection_cmd.cpp
@@ -31,21 +31,21 @@
#include "mongo/client/dbclientcursor.h"
#include "mongo/db/catalog/collection.h"
#include "mongo/db/catalog/collection_catalog_entry.h"
-#include "mongo/db/catalog/rename_collection.h"
#include "mongo/db/catalog/database_holder.h"
#include "mongo/db/catalog/index_catalog.h"
#include "mongo/db/catalog/index_create.h"
+#include "mongo/db/catalog/rename_collection.h"
#include "mongo/db/client.h"
#include "mongo/db/commands.h"
#include "mongo/db/commands/rename_collection.h"
#include "mongo/db/db_raii.h"
-#include "mongo/db/service_context.h"
#include "mongo/db/index/index_descriptor.h"
#include "mongo/db/index_builder.h"
#include "mongo/db/namespace_string.h"
#include "mongo/db/op_observer.h"
#include "mongo/db/ops/insert.h"
#include "mongo/db/repl/replication_coordinator_global.h"
+#include "mongo/db/service_context.h"
#include "mongo/util/scopeguard.h"
namespace mongo {
diff --git a/src/mongo/db/commands/server_status.cpp b/src/mongo/db/commands/server_status.cpp
index 9570b212f64..bbe7e9930a7 100644
--- a/src/mongo/db/commands/server_status.cpp
+++ b/src/mongo/db/commands/server_status.cpp
@@ -32,13 +32,13 @@
#include "mongo/platform/basic.h"
+#include "mongo/config.h"
#include "mongo/db/auth/action_set.h"
#include "mongo/db/auth/action_type.h"
#include "mongo/db/auth/authorization_manager.h"
#include "mongo/db/auth/authorization_session.h"
#include "mongo/db/auth/privilege.h"
#include "mongo/db/client_basic.h"
-#include "mongo/config.h"
#include "mongo/db/commands.h"
#include "mongo/db/commands/server_status.h"
#include "mongo/db/commands/server_status_internal.h"
diff --git a/src/mongo/db/commands/server_status.h b/src/mongo/db/commands/server_status.h
index 1ebe57280d7..862cf1960e9 100644
--- a/src/mongo/db/commands/server_status.h
+++ b/src/mongo/db/commands/server_status.h
@@ -30,11 +30,11 @@
#pragma once
-#include <string>
#include "mongo/db/commands.h"
#include "mongo/db/jsobj.h"
#include "mongo/db/stats/counters.h"
#include "mongo/platform/atomic_word.h"
+#include <string>
namespace mongo {
diff --git a/src/mongo/db/commands/test_commands.cpp b/src/mongo/db/commands/test_commands.cpp
index 5bcfe71e365..c32cc208090 100644
--- a/src/mongo/db/commands/test_commands.cpp
+++ b/src/mongo/db/commands/test_commands.cpp
@@ -41,11 +41,11 @@
#include "mongo/db/client.h"
#include "mongo/db/commands.h"
#include "mongo/db/db_raii.h"
-#include "mongo/db/service_context.h"
#include "mongo/db/index_builder.h"
#include "mongo/db/op_observer.h"
#include "mongo/db/query/internal_plans.h"
#include "mongo/db/repl/replication_coordinator_global.h"
+#include "mongo/db/service_context.h"
#include "mongo/util/log.h"
namespace mongo {
@@ -263,11 +263,11 @@ public:
for (int i = 0; i < n + 1; ++i) {
PlanExecutor::ExecState state = exec->getNext(nullptr, &end);
if (PlanExecutor::ADVANCED != state) {
- return appendCommandStatus(result,
- {ErrorCodes::IllegalOperation,
- str::stream()
- << "invalid n, collection contains fewer than "
- << n << " documents"});
+ return appendCommandStatus(
+ result,
+ {ErrorCodes::IllegalOperation,
+ str::stream() << "invalid n, collection contains fewer than " << n
+ << " documents"});
}
}
}
diff --git a/src/mongo/db/commands/top_command.cpp b/src/mongo/db/commands/top_command.cpp
index e4b788dc711..6f236de90da 100644
--- a/src/mongo/db/commands/top_command.cpp
+++ b/src/mongo/db/commands/top_command.cpp
@@ -33,10 +33,10 @@
#include "mongo/db/auth/action_type.h"
#include "mongo/db/auth/privilege.h"
#include "mongo/db/client.h"
+#include "mongo/db/commands.h"
#include "mongo/db/jsobj.h"
#include "mongo/db/operation_context.h"
#include "mongo/db/stats/top.h"
-#include "mongo/db/commands.h"
namespace {
diff --git a/src/mongo/db/commands/user_management_commands.cpp b/src/mongo/db/commands/user_management_commands.cpp
index 696339496eb..2bc8c1d1b51 100644
--- a/src/mongo/db/commands/user_management_commands.cpp
+++ b/src/mongo/db/commands/user_management_commands.cpp
@@ -95,7 +95,8 @@ BSONArray roleSetToBSONArray(const unordered_set<RoleName>& roles) {
for (unordered_set<RoleName>::const_iterator it = roles.begin(); it != roles.end(); ++it) {
const RoleName& role = *it;
rolesArrayBuilder.append(BSON(AuthorizationManager::ROLE_NAME_FIELD_NAME
- << role.getRole() << AuthorizationManager::ROLE_DB_FIELD_NAME
+ << role.getRole()
+ << AuthorizationManager::ROLE_DB_FIELD_NAME
<< role.getDB()));
}
return rolesArrayBuilder.arr();
@@ -106,7 +107,8 @@ BSONArray rolesVectorToBSONArray(const std::vector<RoleName>& roles) {
for (std::vector<RoleName>::const_iterator it = roles.begin(); it != roles.end(); ++it) {
const RoleName& role = *it;
rolesArrayBuilder.append(BSON(AuthorizationManager::ROLE_NAME_FIELD_NAME
- << role.getRole() << AuthorizationManager::ROLE_DB_FIELD_NAME
+ << role.getRole()
+ << AuthorizationManager::ROLE_DB_FIELD_NAME
<< role.getDB()));
}
return rolesArrayBuilder.arr();
@@ -172,9 +174,9 @@ Status checkOkayToGrantRolesToRole(OperationContext* txn,
}
if (role.getDB() != "admin" && roleToAdd.getDB() != role.getDB()) {
- return Status(ErrorCodes::InvalidRoleModification,
- str::stream()
- << "Roles on the \'" << role.getDB()
+ return Status(
+ ErrorCodes::InvalidRoleModification,
+ str::stream() << "Roles on the \'" << role.getDB()
<< "\' database cannot be granted roles from other databases");
}
@@ -195,11 +197,11 @@ Status checkOkayToGrantRolesToRole(OperationContext* txn,
}
if (sequenceContains(indirectRoles, role)) {
- return Status(ErrorCodes::InvalidRoleModification,
- mongoutils::str::stream()
- << "Granting " << roleToAdd.getFullName() << " to "
- << role.getFullName()
- << " would introduce a cycle in the role graph.");
+ return Status(
+ ErrorCodes::InvalidRoleModification,
+ mongoutils::str::stream() << "Granting " << roleToAdd.getFullName() << " to "
+ << role.getFullName()
+ << " would introduce a cycle in the role graph.");
}
}
return Status::OK();
@@ -421,13 +423,14 @@ Status insertRoleDocument(OperationContext* txn, const BSONObj& roleObj) {
* Updates the given role object with the given update modifier.
*/
Status updateRoleDocument(OperationContext* txn, const RoleName& role, const BSONObj& updateObj) {
- Status status = updateOneAuthzDocument(
- txn,
- AuthorizationManager::rolesCollectionNamespace,
- BSON(AuthorizationManager::ROLE_NAME_FIELD_NAME
- << role.getRole() << AuthorizationManager::ROLE_DB_FIELD_NAME << role.getDB()),
- updateObj,
- false);
+ Status status = updateOneAuthzDocument(txn,
+ AuthorizationManager::rolesCollectionNamespace,
+ BSON(AuthorizationManager::ROLE_NAME_FIELD_NAME
+ << role.getRole()
+ << AuthorizationManager::ROLE_DB_FIELD_NAME
+ << role.getDB()),
+ updateObj,
+ false);
if (status.isOK()) {
return status;
}
@@ -481,13 +484,14 @@ Status insertPrivilegeDocument(OperationContext* txn, const BSONObj& userObj) {
Status updatePrivilegeDocument(OperationContext* txn,
const UserName& user,
const BSONObj& updateObj) {
- Status status = updateOneAuthzDocument(
- txn,
- AuthorizationManager::usersCollectionNamespace,
- BSON(AuthorizationManager::USER_NAME_FIELD_NAME
- << user.getUser() << AuthorizationManager::USER_DB_FIELD_NAME << user.getDB()),
- updateObj,
- false);
+ Status status = updateOneAuthzDocument(txn,
+ AuthorizationManager::usersCollectionNamespace,
+ BSON(AuthorizationManager::USER_NAME_FIELD_NAME
+ << user.getUser()
+ << AuthorizationManager::USER_DB_FIELD_NAME
+ << user.getDB()),
+ updateObj,
+ false);
if (status.isOK()) {
return status;
}
@@ -554,7 +558,8 @@ Status requireAuthSchemaVersion26Final(OperationContext* txn, AuthorizationManag
str::stream()
<< "User and role management commands require auth data to have "
<< "at least schema version "
- << AuthorizationManager::schemaVersion26Final << " but found "
+ << AuthorizationManager::schemaVersion26Final
+ << " but found "
<< foundSchemaVersion);
}
return writeAuthSchemaVersionIfNeeded(txn, authzManager, foundSchemaVersion);
@@ -577,7 +582,8 @@ Status requireAuthSchemaVersion26UpgradeOrFinal(OperationContext* txn,
return Status(ErrorCodes::AuthSchemaIncompatible,
str::stream() << "The usersInfo and rolesInfo commands require auth data to "
<< "have at least schema version "
- << AuthorizationManager::schemaVersion26Upgrade << " but found "
+ << AuthorizationManager::schemaVersion26Upgrade
+ << " but found "
<< foundSchemaVersion);
}
return Status::OK();
@@ -1901,7 +1907,8 @@ public:
ss << "Drops a single role. Before deleting the role completely it must remove it "
"from any users or roles that reference it. If any errors occur in the middle "
"of that process it's possible to be left in a state where the role has been "
- "removed from some user/roles but otherwise still exists." << endl;
+ "removed from some user/roles but otherwise still exists."
+ << endl;
}
virtual Status checkAuthForCommand(ClientBasic* client,
@@ -1967,11 +1974,12 @@ public:
ErrorCodes::Error code = status.code() == ErrorCodes::UnknownError
? ErrorCodes::UserModificationFailed
: status.code();
- return appendCommandStatus(
- result,
- Status(code,
- str::stream() << "Failed to remove role " << roleName.getFullName()
- << " from all users: " << status.reason()));
+ return appendCommandStatus(result,
+ Status(code,
+ str::stream() << "Failed to remove role "
+ << roleName.getFullName()
+ << " from all users: "
+ << status.reason()));
}
// Remove this role from all other roles
@@ -2019,7 +2027,8 @@ public:
Status(status.code(),
str::stream() << "Removed role " << roleName.getFullName()
<< " from all users and roles but failed to actually delete"
- " the role itself: " << status.reason()));
+ " the role itself: "
+ << status.reason()));
}
dassert(nMatched == 0 || nMatched == 1);
@@ -2052,7 +2061,8 @@ public:
"it must remove them from any users or other roles that reference them. If any "
"errors occur in the middle of that process it's possible to be left in a state "
"where the roles have been removed from some user/roles but otherwise still "
- "exist." << endl;
+ "exist."
+ << endl;
}
virtual Status checkAuthForCommand(ClientBasic* client,
@@ -2100,9 +2110,10 @@ public:
: status.code();
return appendCommandStatus(result,
Status(code,
- str::stream()
- << "Failed to remove roles from \"" << dbname
- << "\" db from all users: " << status.reason()));
+ str::stream() << "Failed to remove roles from \""
+ << dbname
+ << "\" db from all users: "
+ << status.reason()));
}
// Remove these roles from all other roles
@@ -2125,9 +2136,10 @@ public:
: status.code();
return appendCommandStatus(result,
Status(code,
- str::stream()
- << "Failed to remove roles from \"" << dbname
- << "\" db from all roles: " << status.reason()));
+ str::stream() << "Failed to remove roles from \""
+ << dbname
+ << "\" db from all roles: "
+ << status.reason()));
}
audit::logDropAllRolesFromDatabase(ClientBasic::getCurrent(), dbname);
@@ -2143,7 +2155,8 @@ public:
str::stream() << "Removed roles from \"" << dbname
<< "\" db "
" from all users and roles but failed to actually delete"
- " those roles themselves: " << status.reason()));
+ " those roles themselves: "
+ << status.reason()));
}
result.append("n", nMatched);
@@ -2535,7 +2548,9 @@ public:
BSONObj query =
db.empty() ? BSONObj() : BSON(AuthorizationManager::USER_DB_FIELD_NAME << db);
BSONObj fields = BSON(AuthorizationManager::USER_NAME_FIELD_NAME
- << 1 << AuthorizationManager::USER_DB_FIELD_NAME << 1);
+ << 1
+ << AuthorizationManager::USER_DB_FIELD_NAME
+ << 1);
Status status =
queryAuthzDocument(txn,
@@ -2613,7 +2628,9 @@ public:
BSONObj query =
db.empty() ? BSONObj() : BSON(AuthorizationManager::ROLE_DB_FIELD_NAME << db);
BSONObj fields = BSON(AuthorizationManager::ROLE_NAME_FIELD_NAME
- << 1 << AuthorizationManager::ROLE_DB_FIELD_NAME << 1);
+ << 1
+ << AuthorizationManager::ROLE_DB_FIELD_NAME
+ << 1);
Status status =
queryAuthzDocument(txn,
@@ -2741,7 +2758,8 @@ void updateUserCredentials(OperationContext* txn,
mongoutils::str::stream()
<< "While preparing to upgrade user doc from "
"2.6/3.0 user data schema to the 3.0+ SCRAM only schema, found a user doc "
- "with missing or incorrectly formatted credentials: " << userDoc.toString(),
+ "with missing or incorrectly formatted credentials: "
+ << userDoc.toString(),
credentialsElement.type() == Object);
BSONObj credentialsObj = credentialsElement.Obj();
@@ -2758,7 +2776,8 @@ void updateUserCredentials(OperationContext* txn,
mongoutils::str::stream()
<< "While preparing to upgrade user doc from "
"2.6/3.0 user data schema to the 3.0+ SCRAM only schema, found a user doc "
- "missing MONGODB-CR credentials :" << userDoc.toString(),
+ "missing MONGODB-CR credentials :"
+ << userDoc.toString(),
!mongoCRElement.eoo());
std::string hashedPassword = mongoCRElement.String();
diff --git a/src/mongo/db/commands/user_management_commands_common.cpp b/src/mongo/db/commands/user_management_commands_common.cpp
index 03a6a129fdc..292c21a8255 100644
--- a/src/mongo/db/commands/user_management_commands_common.cpp
+++ b/src/mongo/db/commands/user_management_commands_common.cpp
@@ -66,8 +66,8 @@ Status checkAuthorizedToGrantRoles(AuthorizationSession* authzSession,
for (size_t i = 0; i < roles.size(); ++i) {
if (!authzSession->isAuthorizedToGrantRole(roles[i])) {
return Status(ErrorCodes::Unauthorized,
- str::stream()
- << "Not authorized to grant role: " << roles[i].getFullName());
+ str::stream() << "Not authorized to grant role: "
+ << roles[i].getFullName());
}
}
@@ -91,8 +91,8 @@ Status checkAuthorizedToRevokeRoles(AuthorizationSession* authzSession,
for (size_t i = 0; i < roles.size(); ++i) {
if (!authzSession->isAuthorizedToRevokeRole(roles[i])) {
return Status(ErrorCodes::Unauthorized,
- str::stream()
- << "Not authorized to revoke role: " << roles[i].getFullName());
+ str::stream() << "Not authorized to revoke role: "
+ << roles[i].getFullName());
}
}
return Status::OK();
@@ -123,8 +123,8 @@ Status checkAuthForCreateUserCommand(ClientBasic* client,
if (!authzSession->isAuthorizedForActionsOnResource(
ResourcePattern::forDatabaseName(args.userName.getDB()), ActionType::createUser)) {
return Status(ErrorCodes::Unauthorized,
- str::stream()
- << "Not authorized to create users on db: " << args.userName.getDB());
+ str::stream() << "Not authorized to create users on db: "
+ << args.userName.getDB());
}
return checkAuthorizedToGrantRoles(authzSession, args.roles);
@@ -205,8 +205,8 @@ Status checkAuthForCreateRoleCommand(ClientBasic* client,
if (!authzSession->isAuthorizedToCreateRole(args)) {
return Status(ErrorCodes::Unauthorized,
- str::stream()
- << "Not authorized to create roles on db: " << args.roleName.getDB());
+ str::stream() << "Not authorized to create roles on db: "
+ << args.roleName.getDB());
}
status = checkAuthorizedToGrantRoles(authzSession, args.roles);
@@ -441,7 +441,8 @@ Status checkAuthForRolesInfoCommand(ClientBasic* client,
ActionType::viewRole)) {
return Status(ErrorCodes::Unauthorized,
str::stream() << "Not authorized to view roles from the "
- << args.roleNames[i].getDB() << " database");
+ << args.roleNames[i].getDB()
+ << " database");
}
}
}
diff --git a/src/mongo/db/commands/write_commands/write_commands_common.cpp b/src/mongo/db/commands/write_commands/write_commands_common.cpp
index 82f3ab4db67..aa208a1d3c7 100644
--- a/src/mongo/db/commands/write_commands/write_commands_common.cpp
+++ b/src/mongo/db/commands/write_commands/write_commands_common.cpp
@@ -33,9 +33,9 @@
#include <string>
#include <vector>
-#include "mongo/db/auth/privilege.h"
#include "mongo/db/auth/action_set.h"
#include "mongo/db/auth/action_type.h"
+#include "mongo/db/auth/privilege.h"
#include "mongo/db/catalog/document_validation.h"
#include "mongo/util/assert_util.h"
diff --git a/src/mongo/db/commands/write_commands/write_commands_common.h b/src/mongo/db/commands/write_commands/write_commands_common.h
index cf47bdc02b1..53ba02aad05 100644
--- a/src/mongo/db/commands/write_commands/write_commands_common.h
+++ b/src/mongo/db/commands/write_commands/write_commands_common.h
@@ -28,9 +28,9 @@
#pragma once
+#include "mongo/db/auth/authorization_session.h"
#include "mongo/db/jsobj.h"
#include "mongo/db/namespace_string.h"
-#include "mongo/db/auth/authorization_session.h"
#include "mongo/s/write_ops/batched_command_request.h"
/**
diff --git a/src/mongo/db/concurrency/d_concurrency.cpp b/src/mongo/db/concurrency/d_concurrency.cpp
index 2477b58f02d..672266146e4 100644
--- a/src/mongo/db/concurrency/d_concurrency.cpp
+++ b/src/mongo/db/concurrency/d_concurrency.cpp
@@ -32,9 +32,9 @@
#include <string>
-#include "mongo/db/service_context.h"
#include "mongo/db/namespace_string.h"
#include "mongo/db/server_parameters.h"
+#include "mongo/db/service_context.h"
#include "mongo/util/assert_util.h"
#include "mongo/util/mongoutils/str.h"
#include "mongo/util/stacktrace.h"
diff --git a/src/mongo/db/concurrency/lock_manager_defs.h b/src/mongo/db/concurrency/lock_manager_defs.h
index 48fcb073bbe..d794c7c2031 100644
--- a/src/mongo/db/concurrency/lock_manager_defs.h
+++ b/src/mongo/db/concurrency/lock_manager_defs.h
@@ -29,8 +29,8 @@
#pragma once
#include <cstdint>
-#include <string>
#include <limits>
+#include <string>
#include "mongo/base/string_data.h"
#include "mongo/config.h"
diff --git a/src/mongo/db/concurrency/lock_state.cpp b/src/mongo/db/concurrency/lock_state.cpp
index ff77d6021c3..d16c9f4e11d 100644
--- a/src/mongo/db/concurrency/lock_state.cpp
+++ b/src/mongo/db/concurrency/lock_state.cpp
@@ -34,8 +34,8 @@
#include <vector>
-#include "mongo/db/service_context.h"
#include "mongo/db/namespace_string.h"
+#include "mongo/db/service_context.h"
#include "mongo/platform/compiler.h"
#include "mongo/util/background.h"
#include "mongo/util/concurrency/synchronization.h"
diff --git a/src/mongo/db/curop.cpp b/src/mongo/db/curop.cpp
index 3c2a30cd932..e3906d29de9 100644
--- a/src/mongo/db/curop.cpp
+++ b/src/mongo/db/curop.cpp
@@ -134,7 +134,8 @@ BSONObj upconvertGetMoreEntry(const NamespaceString& nss, CursorId cursorId, int
boost::none, // awaitDataTimeout
boost::none, // term
boost::none // lastKnownCommittedOpTime
- ).toBSON();
+ )
+ .toBSON();
}
} // namespace
diff --git a/src/mongo/db/curop.h b/src/mongo/db/curop.h
index 09a761d639a..eee95967b96 100644
--- a/src/mongo/db/curop.h
+++ b/src/mongo/db/curop.h
@@ -35,9 +35,9 @@
#include "mongo/db/operation_context.h"
#include "mongo/db/server_options.h"
#include "mongo/platform/atomic_word.h"
+#include "mongo/util/net/message.h"
#include "mongo/util/progress_meter.h"
#include "mongo/util/time_support.h"
-#include "mongo/util/net/message.h"
namespace mongo {
diff --git a/src/mongo/db/curop_metrics.cpp b/src/mongo/db/curop_metrics.cpp
index 558659ae554..be2ed8e00de 100644
--- a/src/mongo/db/curop_metrics.cpp
+++ b/src/mongo/db/curop_metrics.cpp
@@ -29,9 +29,9 @@
#include "mongo/platform/basic.h"
#include "mongo/base/counter.h"
+#include "mongo/db/commands/server_status_metric.h"
#include "mongo/db/curop.h"
#include "mongo/db/operation_context.h"
-#include "mongo/db/commands/server_status_metric.h"
namespace mongo {
namespace {
diff --git a/src/mongo/db/db.cpp b/src/mongo/db/db.cpp
index 547b946492d..b67e6b2b11d 100644
--- a/src/mongo/db/db.cpp
+++ b/src/mongo/db/db.cpp
@@ -441,7 +441,8 @@ static void repairDatabasesAndCheckVersion(OperationContext* txn) {
status = {ErrorCodes::MustUpgrade, status.reason()};
}
severe() << "Unable to start mongod due to an incompatibility with the data files and"
- " this version of mongod: " << status;
+ " this version of mongod: "
+ << status;
severe() << "Please consult our documentation when trying to downgrade to a previous"
" major release";
quickExit(EXIT_NEED_UPGRADE);
@@ -835,9 +836,8 @@ int main(int argc, char* argv[], char** envp) {
}
#endif
-MONGO_INITIALIZER_GENERAL(ForkServer,
- ("EndStartupOptionHandling"),
- ("default"))(InitializerContext* context) {
+MONGO_INITIALIZER_GENERAL(ForkServer, ("EndStartupOptionHandling"), ("default"))
+(InitializerContext* context) {
mongo::forkServerOrDie();
return Status::OK();
}
@@ -949,9 +949,8 @@ MONGO_INITIALIZER_WITH_PREREQUISITES(CreateReplicationManager,
}
#ifdef MONGO_CONFIG_SSL
-MONGO_INITIALIZER_GENERAL(setSSLManagerType,
- MONGO_NO_PREREQUISITES,
- ("SSLManager"))(InitializerContext* context) {
+MONGO_INITIALIZER_GENERAL(setSSLManagerType, MONGO_NO_PREREQUISITES, ("SSLManager"))
+(InitializerContext* context) {
isSSLServer = true;
return Status::OK();
}
diff --git a/src/mongo/db/db.h b/src/mongo/db/db.h
index 5a9da9cafb6..f5ca27cd748 100644
--- a/src/mongo/db/db.h
+++ b/src/mongo/db/db.h
@@ -30,9 +30,9 @@
#include "mongo/platform/basic.h"
+#include "mongo/db/catalog/database_holder.h"
#include "mongo/db/client.h"
#include "mongo/db/curop.h"
-#include "mongo/db/catalog/database_holder.h"
#include "mongo/util/net/message.h"
namespace mongo {
diff --git a/src/mongo/db/db_raii.cpp b/src/mongo/db/db_raii.cpp
index e25c3d09ae0..f1bc7208092 100644
--- a/src/mongo/db/db_raii.cpp
+++ b/src/mongo/db/db_raii.cpp
@@ -30,9 +30,9 @@
#include "mongo/db/db_raii.h"
-#include "mongo/db/catalog/database_holder.h"
#include "mongo/db/catalog/collection.h"
#include "mongo/db/catalog/database.h"
+#include "mongo/db/catalog/database_holder.h"
#include "mongo/db/client.h"
#include "mongo/db/curop.h"
#include "mongo/db/repl/replication_coordinator_global.h"
diff --git a/src/mongo/db/dbcommands.cpp b/src/mongo/db/dbcommands.cpp
index 4b4a6089364..a7def1cb6c5 100644
--- a/src/mongo/db/dbcommands.cpp
+++ b/src/mongo/db/dbcommands.cpp
@@ -63,9 +63,9 @@
#include "mongo/db/dbdirectclient.h"
#include "mongo/db/dbhelpers.h"
#include "mongo/db/exec/working_set_common.h"
-#include "mongo/db/index_builder.h"
-#include "mongo/db/index/index_descriptor.h"
#include "mongo/db/index/index_access_method.h"
+#include "mongo/db/index/index_descriptor.h"
+#include "mongo/db/index_builder.h"
#include "mongo/db/instance.h"
#include "mongo/db/introspect.h"
#include "mongo/db/jsobj.h"
@@ -90,13 +90,13 @@
#include "mongo/db/s/sharding_state.h"
#include "mongo/db/server_parameters.h"
#include "mongo/db/write_concern.h"
-#include "mongo/rpc/request_interface.h"
-#include "mongo/rpc/reply_builder_interface.h"
#include "mongo/rpc/metadata.h"
#include "mongo/rpc/metadata/config_server_metadata.h"
#include "mongo/rpc/metadata/server_selection_metadata.h"
#include "mongo/rpc/metadata/sharding_metadata.h"
#include "mongo/rpc/protocol.h"
+#include "mongo/rpc/reply_builder_interface.h"
+#include "mongo/rpc/request_interface.h"
#include "mongo/s/chunk_version.h"
#include "mongo/s/client/shard_registry.h"
#include "mongo/s/grid.h"
@@ -1379,7 +1379,8 @@ void Command::execCommand(OperationContext* txn,
34422,
str::stream()
<< "Received a command with sharding chunk version information but this "
- "node is not sharding aware: " << request.getCommandArgs().jsonString(),
+ "node is not sharding aware: "
+ << request.getCommandArgs().jsonString(),
!oss.hasShardVersion() ||
ChunkVersion::isIgnoredVersion(oss.getShardVersion(commandNS)));
}
@@ -1405,8 +1406,8 @@ void Command::execCommand(OperationContext* txn,
// If we got a stale config, wait in case the operation is stuck in a critical section
if (e.getCode() == ErrorCodes::SendStaleConfig) {
auto& sce = static_cast<const StaleConfigException&>(e);
- ShardingState::get(txn)
- ->onStaleShardVersion(txn, NamespaceString(sce.getns()), sce.getVersionReceived());
+ ShardingState::get(txn)->onStaleShardVersion(
+ txn, NamespaceString(sce.getns()), sce.getVersionReceived());
}
BSONObjBuilder metadataBob;
@@ -1509,8 +1510,8 @@ bool Command::run(OperationContext* txn,
// Wait until a snapshot is available.
while (status == ErrorCodes::ReadConcernMajorityNotAvailableYet) {
- LOG(debugLevel)
- << "Snapshot not available for readConcern: " << readConcernArgs;
+ LOG(debugLevel) << "Snapshot not available for readConcern: "
+ << readConcernArgs;
replCoord->waitUntilSnapshotCommitted(txn, SnapshotName::min());
status = txn->recoveryUnit()->setReadFromMajorityCommittedSnapshot();
}
diff --git a/src/mongo/db/dbhelpers.cpp b/src/mongo/db/dbhelpers.cpp
index e0a5b6ffcd3..dc88905ae63 100644
--- a/src/mongo/db/dbhelpers.cpp
+++ b/src/mongo/db/dbhelpers.cpp
@@ -40,7 +40,6 @@
#include "mongo/db/db.h"
#include "mongo/db/db_raii.h"
#include "mongo/db/exec/working_set_common.h"
-#include "mongo/db/service_context.h"
#include "mongo/db/index/btree_access_method.h"
#include "mongo/db/json.h"
#include "mongo/db/keypattern.h"
@@ -57,16 +56,17 @@
#include "mongo/db/range_arithmetic.h"
#include "mongo/db/repl/repl_client_info.h"
#include "mongo/db/repl/replication_coordinator_global.h"
+#include "mongo/db/s/collection_metadata.h"
+#include "mongo/db/s/sharding_state.h"
+#include "mongo/db/service_context.h"
#include "mongo/db/storage/data_protector.h"
#include "mongo/db/storage/storage_options.h"
#include "mongo/db/storage/wiredtiger/wiredtiger_customization_hooks.h"
#include "mongo/db/write_concern.h"
#include "mongo/db/write_concern_options.h"
-#include "mongo/db/s/collection_metadata.h"
-#include "mongo/db/s/sharding_state.h"
#include "mongo/s/shard_key_pattern.h"
-#include "mongo/util/mongoutils/str.h"
#include "mongo/util/log.h"
+#include "mongo/util/mongoutils/str.h"
#include "mongo/util/scopeguard.h"
namespace mongo {
diff --git a/src/mongo/db/dbhelpers.h b/src/mongo/db/dbhelpers.h
index 53d0839711b..6386a943552 100644
--- a/src/mongo/db/dbhelpers.h
+++ b/src/mongo/db/dbhelpers.h
@@ -28,8 +28,8 @@
#pragma once
-#include <memory>
#include <boost/filesystem/path.hpp>
+#include <memory>
#include "mongo/db/db.h"
#include "mongo/db/record_id.h"
diff --git a/src/mongo/db/dbwebserver.cpp b/src/mongo/db/dbwebserver.cpp
index c924c0a6ddb..f6088f8be2c 100644
--- a/src/mongo/db/dbwebserver.cpp
+++ b/src/mongo/db/dbwebserver.cpp
@@ -42,14 +42,14 @@
#include "mongo/db/auth/authorization_manager_global.h"
#include "mongo/db/auth/authorization_session.h"
#include "mongo/db/auth/privilege.h"
-#include "mongo/db/auth/user_name.h"
#include "mongo/db/auth/user.h"
+#include "mongo/db/auth/user_name.h"
#include "mongo/db/background.h"
#include "mongo/db/commands.h"
#include "mongo/db/db.h"
-#include "mongo/db/service_context.h"
#include "mongo/db/instance.h"
#include "mongo/db/operation_context.h"
+#include "mongo/db/service_context.h"
#include "mongo/db/stats/snapshots.h"
#include "mongo/rpc/command_reply.h"
#include "mongo/rpc/command_reply_builder.h"
@@ -451,7 +451,8 @@ void DbWebServer::doRequest(const char* rq,
"These read-only context-less commands can be executed from the web "
"interface. Results are json format, unless ?text=1 is appended in which "
"case the result is output as text for easier human viewing",
- "Commands") << ": ";
+ "Commands")
+ << ": ";
auto m = Command::commandsByBestName();
diff --git a/src/mongo/db/exec/and_hash.cpp b/src/mongo/db/exec/and_hash.cpp
index a9bcb8466f2..033949827bc 100644
--- a/src/mongo/db/exec/and_hash.cpp
+++ b/src/mongo/db/exec/and_hash.cpp
@@ -30,8 +30,8 @@
#include "mongo/db/exec/and_common-inl.h"
#include "mongo/db/exec/scoped_timer.h"
-#include "mongo/db/exec/working_set_common.h"
#include "mongo/db/exec/working_set.h"
+#include "mongo/db/exec/working_set_common.h"
#include "mongo/stdx/memory.h"
#include "mongo/util/mongoutils/str.h"
diff --git a/src/mongo/db/exec/and_hash.h b/src/mongo/db/exec/and_hash.h
index b984f390590..456062dc41e 100644
--- a/src/mongo/db/exec/and_hash.h
+++ b/src/mongo/db/exec/and_hash.h
@@ -30,8 +30,8 @@
#include <vector>
-#include "mongo/db/jsobj.h"
#include "mongo/db/exec/plan_stage.h"
+#include "mongo/db/jsobj.h"
#include "mongo/db/matcher/expression.h"
#include "mongo/db/record_id.h"
#include "mongo/platform/unordered_map.h"
diff --git a/src/mongo/db/exec/cached_plan.cpp b/src/mongo/db/exec/cached_plan.cpp
index 3425a2562b5..5ec0329f8b1 100644
--- a/src/mongo/db/exec/cached_plan.cpp
+++ b/src/mongo/db/exec/cached_plan.cpp
@@ -209,7 +209,8 @@ Status CachedPlanStage::replan(PlanYieldPolicy* yieldPolicy, bool shouldCache) {
if (!status.isOK()) {
return Status(ErrorCodes::BadValue,
str::stream() << "error processing query: " << _canonicalQuery->toString()
- << " planner returned error: " << status.reason());
+ << " planner returned error: "
+ << status.reason());
}
OwnedPointerVector<QuerySolution> solutions(rawSolutions);
diff --git a/src/mongo/db/exec/cached_plan.h b/src/mongo/db/exec/cached_plan.h
index e065b955012..99cfaf42e6c 100644
--- a/src/mongo/db/exec/cached_plan.h
+++ b/src/mongo/db/exec/cached_plan.h
@@ -31,9 +31,9 @@
#include <list>
#include <memory>
-#include "mongo/db/jsobj.h"
#include "mongo/db/exec/plan_stage.h"
#include "mongo/db/exec/working_set.h"
+#include "mongo/db/jsobj.h"
#include "mongo/db/query/canonical_query.h"
#include "mongo/db/query/query_planner_params.h"
#include "mongo/db/query/query_solution.h"
diff --git a/src/mongo/db/exec/collection_scan.cpp b/src/mongo/db/exec/collection_scan.cpp
index a4179218638..b130d128023 100644
--- a/src/mongo/db/exec/collection_scan.cpp
+++ b/src/mongo/db/exec/collection_scan.cpp
@@ -30,14 +30,14 @@
#include "mongo/db/exec/collection_scan.h"
+#include "mongo/db/catalog/collection.h"
#include "mongo/db/catalog/database.h"
+#include "mongo/db/concurrency/write_conflict_exception.h"
#include "mongo/db/exec/collection_scan_common.h"
#include "mongo/db/exec/filter.h"
#include "mongo/db/exec/scoped_timer.h"
#include "mongo/db/exec/working_set.h"
#include "mongo/db/exec/working_set_common.h"
-#include "mongo/db/concurrency/write_conflict_exception.h"
-#include "mongo/db/catalog/collection.h"
#include "mongo/db/storage/record_fetcher.h"
#include "mongo/stdx/memory.h"
#include "mongo/util/fail_point_service.h"
@@ -74,7 +74,8 @@ PlanStage::StageState CollectionScan::doWork(WorkingSetID* out) {
ErrorCodes::CappedPositionLost,
str::stream()
<< "CollectionScan died due to position in capped collection being deleted. "
- << "Last seen record id: " << _lastSeenId);
+ << "Last seen record id: "
+ << _lastSeenId);
*out = WorkingSetCommon::allocateStatusMember(_workingSet, status);
return PlanStage::DEAD;
}
@@ -107,7 +108,8 @@ PlanStage::StageState CollectionScan::doWork(WorkingSetID* out) {
Status status(ErrorCodes::CappedPositionLost,
str::stream() << "CollectionScan died due to failure to restore "
<< "tailable cursor position. "
- << "Last seen record id: " << _lastSeenId);
+ << "Last seen record id: "
+ << _lastSeenId);
*out = WorkingSetCommon::allocateStatusMember(_workingSet, status);
return PlanStage::DEAD;
}
diff --git a/src/mongo/db/exec/delete.cpp b/src/mongo/db/exec/delete.cpp
index e1e88a3333a..0f3e09314e7 100644
--- a/src/mongo/db/exec/delete.cpp
+++ b/src/mongo/db/exec/delete.cpp
@@ -38,10 +38,10 @@
#include "mongo/db/exec/scoped_timer.h"
#include "mongo/db/exec/working_set_common.h"
#include "mongo/db/exec/write_stage_common.h"
-#include "mongo/db/service_context.h"
#include "mongo/db/op_observer.h"
#include "mongo/db/query/canonical_query.h"
#include "mongo/db/repl/replication_coordinator_global.h"
+#include "mongo/db/service_context.h"
#include "mongo/stdx/memory.h"
#include "mongo/util/log.h"
#include "mongo/util/scopeguard.h"
diff --git a/src/mongo/db/exec/geo_near.cpp b/src/mongo/db/exec/geo_near.cpp
index b40e9be02eb..c53573706f0 100644
--- a/src/mongo/db/exec/geo_near.cpp
+++ b/src/mongo/db/exec/geo_near.cpp
@@ -35,8 +35,8 @@
#include "third_party/s2/s2regionintersection.h"
#include "mongo/base/owned_pointer_vector.h"
-#include "mongo/db/exec/index_scan.h"
#include "mongo/db/exec/fetch.h"
+#include "mongo/db/exec/index_scan.h"
#include "mongo/db/exec/working_set_computed_data.h"
#include "mongo/db/geo/geoconstants.h"
#include "mongo/db/geo/geoparser.h"
diff --git a/src/mongo/db/exec/geo_near.h b/src/mongo/db/exec/geo_near.h
index 8040a57f183..5d58f616248 100644
--- a/src/mongo/db/exec/geo_near.h
+++ b/src/mongo/db/exec/geo_near.h
@@ -35,8 +35,8 @@
#include "mongo/db/geo/r2_region_coverer.h"
#include "mongo/db/index/index_descriptor.h"
#include "mongo/db/index/s2_common.h"
-#include "mongo/db/matcher/expression_geo.h"
#include "mongo/db/matcher/expression.h"
+#include "mongo/db/matcher/expression_geo.h"
#include "mongo/db/query/index_bounds.h"
#include "third_party/s2/s2cellunion.h"
diff --git a/src/mongo/db/exec/keep_mutations.h b/src/mongo/db/exec/keep_mutations.h
index 07883c5a7a9..cadfc02940c 100644
--- a/src/mongo/db/exec/keep_mutations.h
+++ b/src/mongo/db/exec/keep_mutations.h
@@ -29,8 +29,8 @@
#pragma once
-#include "mongo/db/jsobj.h"
#include "mongo/db/exec/plan_stage.h"
+#include "mongo/db/jsobj.h"
#include "mongo/db/matcher/expression.h"
#include "mongo/db/record_id.h"
diff --git a/src/mongo/db/exec/limit.h b/src/mongo/db/exec/limit.h
index b5e2e690b80..bc62d78d82c 100644
--- a/src/mongo/db/exec/limit.h
+++ b/src/mongo/db/exec/limit.h
@@ -29,8 +29,8 @@
#pragma once
-#include "mongo/db/jsobj.h"
#include "mongo/db/exec/plan_stage.h"
+#include "mongo/db/jsobj.h"
#include "mongo/db/record_id.h"
namespace mongo {
diff --git a/src/mongo/db/exec/multi_plan.cpp b/src/mongo/db/exec/multi_plan.cpp
index c57e0a5f02f..7f9185435c9 100644
--- a/src/mongo/db/exec/multi_plan.cpp
+++ b/src/mongo/db/exec/multi_plan.cpp
@@ -38,8 +38,8 @@
#include "mongo/base/owned_pointer_vector.h"
#include "mongo/db/catalog/collection.h"
#include "mongo/db/catalog/database.h"
-#include "mongo/db/concurrency/write_conflict_exception.h"
#include "mongo/db/client.h"
+#include "mongo/db/concurrency/write_conflict_exception.h"
#include "mongo/db/exec/scoped_timer.h"
#include "mongo/db/exec/working_set_common.h"
#include "mongo/db/query/explain.h"
@@ -47,8 +47,8 @@
#include "mongo/db/query/plan_ranker.h"
#include "mongo/db/storage/record_fetcher.h"
#include "mongo/stdx/memory.h"
-#include "mongo/util/mongoutils/str.h"
#include "mongo/util/log.h"
+#include "mongo/util/mongoutils/str.h"
namespace mongo {
diff --git a/src/mongo/db/exec/multi_plan.h b/src/mongo/db/exec/multi_plan.h
index 3aff4852c19..1363847caf5 100644
--- a/src/mongo/db/exec/multi_plan.h
+++ b/src/mongo/db/exec/multi_plan.h
@@ -29,14 +29,14 @@
#pragma once
-#include "mongo/db/jsobj.h"
#include "mongo/db/catalog/collection.h"
#include "mongo/db/exec/plan_stage.h"
#include "mongo/db/exec/working_set.h"
+#include "mongo/db/jsobj.h"
#include "mongo/db/query/canonical_query.h"
-#include "mongo/db/query/query_solution.h"
#include "mongo/db/query/plan_ranker.h"
#include "mongo/db/query/plan_yield_policy.h"
+#include "mongo/db/query/query_solution.h"
#include "mongo/db/record_id.h"
namespace mongo {
diff --git a/src/mongo/db/exec/near.h b/src/mongo/db/exec/near.h
index e2eba0c6b53..d9ccdc4b92a 100644
--- a/src/mongo/db/exec/near.h
+++ b/src/mongo/db/exec/near.h
@@ -30,8 +30,8 @@
#include <queue>
-#include "mongo/base/string_data.h"
#include "mongo/base/status_with.h"
+#include "mongo/base/string_data.h"
#include "mongo/db/catalog/collection.h"
#include "mongo/db/exec/plan_stage.h"
#include "mongo/db/exec/plan_stats.h"
diff --git a/src/mongo/db/exec/pipeline_proxy.h b/src/mongo/db/exec/pipeline_proxy.h
index 791a1ebfa62..bf30e2c8eb5 100644
--- a/src/mongo/db/exec/pipeline_proxy.h
+++ b/src/mongo/db/exec/pipeline_proxy.h
@@ -28,8 +28,8 @@
#pragma once
-#include <boost/optional/optional.hpp>
#include <boost/intrusive_ptr.hpp>
+#include <boost/optional/optional.hpp>
#include "mongo/db/catalog/collection.h"
#include "mongo/db/exec/plan_stage.h"
diff --git a/src/mongo/db/exec/projection_exec_test.cpp b/src/mongo/db/exec/projection_exec_test.cpp
index 4c9689f544e..2399e35fd73 100644
--- a/src/mongo/db/exec/projection_exec_test.cpp
+++ b/src/mongo/db/exec/projection_exec_test.cpp
@@ -32,13 +32,13 @@
#include "mongo/db/exec/projection_exec.h"
-#include <memory>
-#include "mongo/db/json.h"
#include "mongo/db/exec/working_set_computed_data.h"
+#include "mongo/db/json.h"
#include "mongo/db/matcher/expression_parser.h"
#include "mongo/db/matcher/extensions_callback_disallow_extensions.h"
#include "mongo/db/query/collation/collator_interface_mock.h"
#include "mongo/unittest/unittest.h"
+#include <memory>
using namespace mongo;
diff --git a/src/mongo/db/exec/sort.cpp b/src/mongo/db/exec/sort.cpp
index ee7d4ab4d15..1b604e1085e 100644
--- a/src/mongo/db/exec/sort.cpp
+++ b/src/mongo/db/exec/sort.cpp
@@ -33,11 +33,11 @@
#include <algorithm>
#include "mongo/db/catalog/collection.h"
-#include "mongo/db/index_names.h"
#include "mongo/db/exec/scoped_timer.h"
#include "mongo/db/exec/working_set_common.h"
#include "mongo/db/exec/working_set_computed_data.h"
#include "mongo/db/index/btree_key_generator.h"
+#include "mongo/db/index_names.h"
#include "mongo/db/query/collation/collator_interface.h"
#include "mongo/db/query/find_common.h"
#include "mongo/db/query/lite_parsed_query.h"
diff --git a/src/mongo/db/exec/sort.h b/src/mongo/db/exec/sort.h
index 650eb9174d1..e2efeb87337 100644
--- a/src/mongo/db/exec/sort.h
+++ b/src/mongo/db/exec/sort.h
@@ -28,8 +28,8 @@
#pragma once
-#include <vector>
#include <set>
+#include <vector>
#include "mongo/db/exec/plan_stage.h"
#include "mongo/db/exec/sort_key_generator.h"
diff --git a/src/mongo/db/exec/sort_key_generator.h b/src/mongo/db/exec/sort_key_generator.h
index b63b56b0c4d..c4d168f02e3 100644
--- a/src/mongo/db/exec/sort_key_generator.h
+++ b/src/mongo/db/exec/sort_key_generator.h
@@ -31,8 +31,8 @@
#include <memory>
#include "mongo/bson/bsonobj.h"
-#include "mongo/db/index/btree_key_generator.h"
#include "mongo/db/exec/plan_stage.h"
+#include "mongo/db/index/btree_key_generator.h"
#include "mongo/db/query/index_bounds.h"
#include "mongo/db/query/stage_types.h"
diff --git a/src/mongo/db/exec/stagedebug_cmd.cpp b/src/mongo/db/exec/stagedebug_cmd.cpp
index f5316a7a94c..837300e3ace 100644
--- a/src/mongo/db/exec/stagedebug_cmd.cpp
+++ b/src/mongo/db/exec/stagedebug_cmd.cpp
@@ -212,12 +212,12 @@ public:
<< PlanExecutor::statestr(state)
<< ", stats: " << Explain::getWinningPlanStats(exec.get());
- return appendCommandStatus(
- result,
- Status(ErrorCodes::OperationFailed,
- str::stream()
- << "Executor error during "
- << "StageDebug command: " << WorkingSetCommon::toStatusString(obj)));
+ return appendCommandStatus(result,
+ Status(ErrorCodes::OperationFailed,
+ str::stream()
+ << "Executor error during "
+ << "StageDebug command: "
+ << WorkingSetCommon::toStatusString(obj)));
}
return true;
diff --git a/src/mongo/db/exec/subplan.cpp b/src/mongo/db/exec/subplan.cpp
index 8be5da178dc..5d1fee15703 100644
--- a/src/mongo/db/exec/subplan.cpp
+++ b/src/mongo/db/exec/subplan.cpp
@@ -38,8 +38,8 @@
#include "mongo/db/matcher/extensions_callback_real.h"
#include "mongo/db/query/get_executor.h"
#include "mongo/db/query/plan_executor.h"
-#include "mongo/db/query/planner_analysis.h"
#include "mongo/db/query/planner_access.h"
+#include "mongo/db/query/planner_analysis.h"
#include "mongo/db/query/query_planner.h"
#include "mongo/db/query/query_planner_common.h"
#include "mongo/db/query/stage_builder.h"
diff --git a/src/mongo/db/exec/text.cpp b/src/mongo/db/exec/text.cpp
index fdc53a11a25..ddbcc1d9a46 100644
--- a/src/mongo/db/exec/text.cpp
+++ b/src/mongo/db/exec/text.cpp
@@ -32,9 +32,9 @@
#include "mongo/db/exec/filter.h"
#include "mongo/db/exec/index_scan.h"
-#include "mongo/db/exec/text_or.h"
-#include "mongo/db/exec/text_match.h"
#include "mongo/db/exec/scoped_timer.h"
+#include "mongo/db/exec/text_match.h"
+#include "mongo/db/exec/text_or.h"
#include "mongo/db/exec/working_set.h"
#include "mongo/db/fts/fts_index_format.h"
#include "mongo/db/jsobj.h"
diff --git a/src/mongo/db/exec/text_match.cpp b/src/mongo/db/exec/text_match.cpp
index 0d94a8a5bfb..f9aabab4f2a 100644
--- a/src/mongo/db/exec/text_match.cpp
+++ b/src/mongo/db/exec/text_match.cpp
@@ -34,8 +34,8 @@
#include "mongo/db/exec/working_set.h"
#include "mongo/db/exec/working_set_common.h"
#include "mongo/db/jsobj.h"
-#include "mongo/util/mongoutils/str.h"
#include "mongo/stdx/memory.h"
+#include "mongo/util/mongoutils/str.h"
namespace mongo {
diff --git a/src/mongo/db/exec/update.cpp b/src/mongo/db/exec/update.cpp
index b06197460ac..7fca4f3906b 100644
--- a/src/mongo/db/exec/update.cpp
+++ b/src/mongo/db/exec/update.cpp
@@ -37,11 +37,11 @@
#include "mongo/db/exec/scoped_timer.h"
#include "mongo/db/exec/working_set_common.h"
#include "mongo/db/exec/write_stage_common.h"
-#include "mongo/db/service_context.h"
#include "mongo/db/op_observer.h"
#include "mongo/db/ops/update_lifecycle.h"
#include "mongo/db/query/explain.h"
#include "mongo/db/repl/replication_coordinator_global.h"
+#include "mongo/db/service_context.h"
#include "mongo/stdx/memory.h"
#include "mongo/util/log.h"
#include "mongo/util/scopeguard.h"
@@ -146,7 +146,8 @@ Status validateDollarPrefixElement(const mb::ConstElement elem, const bool deep)
// not an okay, $ prefixed field name.
return Status(ErrorCodes::DollarPrefixedFieldName,
str::stream() << "The dollar ($) prefixed field '" << elem.getFieldName()
- << "' in '" << mb::getFullName(elem)
+ << "' in '"
+ << mb::getFullName(elem)
<< "' is not valid for storage.");
}
@@ -198,7 +199,8 @@ Status storageValid(const mb::ConstElement& elem, const bool deep) {
// Field name cannot have a "." in it.
return Status(ErrorCodes::DottedFieldName,
str::stream() << "The dotted field '" << elem.getFieldName() << "' in '"
- << mb::getFullName(elem) << "' is not valid for storage.");
+ << mb::getFullName(elem)
+ << "' is not valid for storage.");
}
}
@@ -340,9 +342,12 @@ inline Status validate(const BSONObj& original,
return Status(ErrorCodes::ImmutableField,
mongoutils::str::stream()
<< "After applying the update to the document with "
- << newIdElem.toString() << ", the '" << current.dottedField()
+ << newIdElem.toString()
+ << ", the '"
+ << current.dottedField()
<< "' (required and immutable) field was "
- "found to have been removed --" << original);
+ "found to have been removed --"
+ << original);
}
} else {
// Find the potentially affected field in the original document.
@@ -358,7 +363,8 @@ inline Status validate(const BSONObj& original,
mongoutils::str::stream()
<< "After applying the update to the document {"
<< (oldIdElem.ok() ? oldIdElem.toString() : newIdElem.toString())
- << " , ...}, the (immutable) field '" << current.dottedField()
+ << " , ...}, the (immutable) field '"
+ << current.dottedField()
<< "' was found to be an array or array descendant.");
}
currElem = currElem.parent();
@@ -369,8 +375,10 @@ inline Status validate(const BSONObj& original,
return Status(ErrorCodes::ImmutableField,
mongoutils::str::stream()
<< "After applying the update to the document {"
- << oldElem.toString() << " , ...}, the (immutable) field '"
- << current.dottedField() << "' was found to have been altered to "
+ << oldElem.toString()
+ << " , ...}, the (immutable) field '"
+ << current.dottedField()
+ << "' was found to have been altered to "
<< newElem.toString());
}
}
diff --git a/src/mongo/db/exec/working_set.h b/src/mongo/db/exec/working_set.h
index 57e3bdc9a5a..8dab9943f3d 100644
--- a/src/mongo/db/exec/working_set.h
+++ b/src/mongo/db/exec/working_set.h
@@ -28,8 +28,8 @@
#pragma once
-#include <vector>
#include <unordered_set>
+#include <vector>
#include "mongo/base/disallow_copying.h"
#include "mongo/db/jsobj.h"
diff --git a/src/mongo/db/exec/working_set_common.cpp b/src/mongo/db/exec/working_set_common.cpp
index 1981985a831..a4052435d63 100644
--- a/src/mongo/db/exec/working_set_common.cpp
+++ b/src/mongo/db/exec/working_set_common.cpp
@@ -31,11 +31,11 @@
#include "mongo/db/exec/working_set_common.h"
#include "mongo/db/catalog/collection.h"
-#include "mongo/db/service_context.h"
#include "mongo/db/exec/working_set.h"
-#include "mongo/db/service_context.h"
#include "mongo/db/index/index_access_method.h"
#include "mongo/db/query/canonical_query.h"
+#include "mongo/db/service_context.h"
+#include "mongo/db/service_context.h"
namespace mongo {
diff --git a/src/mongo/db/exec/working_set_test.cpp b/src/mongo/db/exec/working_set_test.cpp
index c0c98facdf7..378e4b08907 100644
--- a/src/mongo/db/exec/working_set_test.cpp
+++ b/src/mongo/db/exec/working_set_test.cpp
@@ -32,8 +32,8 @@
#include "mongo/db/exec/working_set.h"
-#include "mongo/db/json.h"
#include "mongo/db/jsobj.h"
+#include "mongo/db/json.h"
#include "mongo/db/storage/snapshot.h"
#include "mongo/unittest/unittest.h"
#include "mongo/util/assert_util.h"
diff --git a/src/mongo/db/field_parser_test.cpp b/src/mongo/db/field_parser_test.cpp
index 2e9027362c8..428699484b9 100644
--- a/src/mongo/db/field_parser_test.cpp
+++ b/src/mongo/db/field_parser_test.cpp
@@ -26,9 +26,9 @@
* it in the license file.
*/
+#include <map>
#include <string>
#include <vector>
-#include <map>
#include "mongo/db/field_parser.h"
#include "mongo/db/jsobj.h"
@@ -78,7 +78,9 @@ protected:
valLong = 1LL;
doc = BSON(aBool(valBool) << anArray(valArray) << anObj(valObj) << aDate(valDate)
- << aString(valString) << anOID(valOID) << aLong(valLong));
+ << aString(valString)
+ << anOID(valOID)
+ << aLong(valLong));
}
void tearDown() {}
@@ -315,9 +317,13 @@ TEST(ComplexExtraction, GetObjectMap) {
BSONObjBuilder bob;
bob << mapField() << BSON("a" << BSON("a"
- << "a") << "b" << BSON("b"
- << "b") << "c" << BSON("c"
- << "c"));
+ << "a")
+ << "b"
+ << BSON("b"
+ << "b")
+ << "c"
+ << BSON("c"
+ << "c"));
BSONObj obj = bob.obj();
map<string, BSONObj> parsedMap;
@@ -342,7 +348,9 @@ TEST(ComplexExtraction, GetBadMap) {
BSONObjBuilder bob;
bob << mapField() << BSON("a"
<< "a"
- << "b" << 123 << "c"
+ << "b"
+ << 123
+ << "c"
<< "c");
BSONObj obj = bob.obj();
@@ -421,7 +429,9 @@ TEST(ComplexExtraction, GetBadNestedMap) {
BSONObj nestedMapObj = BSON("a"
<< "a"
- << "b" << 123 << "c"
+ << "b"
+ << 123
+ << "c"
<< "c");
BSONObjBuilder bob;
diff --git a/src/mongo/db/ftdc/compressor_test.cpp b/src/mongo/db/ftdc/compressor_test.cpp
index 6950dd93f66..688197a392a 100644
--- a/src/mongo/db/ftdc/compressor_test.cpp
+++ b/src/mongo/db/ftdc/compressor_test.cpp
@@ -67,12 +67,18 @@ TEST(FTDCCompressor, TestBasic) {
auto st = c.addSample(BSON("name"
<< "joe"
- << "key1" << 33 << "key2" << 42),
+ << "key1"
+ << 33
+ << "key2"
+ << 42),
Date_t());
ASSERT_HAS_SPACE(st);
st = c.addSample(BSON("name"
<< "joe"
- << "key1" << 34 << "key2" << 45),
+ << "key1"
+ << 34
+ << "key2"
+ << 45),
Date_t());
ASSERT_HAS_SPACE(st);
@@ -179,89 +185,141 @@ TEST(FTDCCompressor, TestSchemaChanges) {
auto st = c.addSample(BSON("name"
<< "joe"
- << "key1" << 33 << "key2" << 42));
+ << "key1"
+ << 33
+ << "key2"
+ << 42));
ASSERT_HAS_SPACE(st);
st = c.addSample(BSON("name"
<< "joe"
- << "key1" << 34 << "key2" << 45));
+ << "key1"
+ << 34
+ << "key2"
+ << 45));
ASSERT_HAS_SPACE(st);
st = c.addSample(BSON("name"
<< "joe"
- << "key1" << 34 << "key2" << 45));
+ << "key1"
+ << 34
+ << "key2"
+ << 45));
ASSERT_HAS_SPACE(st);
// Add Field
st = c.addSample(BSON("name"
<< "joe"
- << "key1" << 34 << "key2" << 45 << "key3" << 47));
+ << "key1"
+ << 34
+ << "key2"
+ << 45
+ << "key3"
+ << 47));
ASSERT_SCHEMA_CHANGED(st);
st = c.addSample(BSON("name"
<< "joe"
- << "key1" << 34 << "key2" << 45 << "key3" << 47));
+ << "key1"
+ << 34
+ << "key2"
+ << 45
+ << "key3"
+ << 47));
ASSERT_HAS_SPACE(st);
// Rename field
st = c.addSample(BSON("name"
<< "joe"
- << "key1" << 34 << "key5" << 45 << "key3" << 47));
+ << "key1"
+ << 34
+ << "key5"
+ << 45
+ << "key3"
+ << 47));
ASSERT_SCHEMA_CHANGED(st);
// Change type
st = c.addSample(BSON("name"
<< "joe"
- << "key1" << 34 << "key5"
+ << "key1"
+ << 34
+ << "key5"
<< "45"
- << "key3" << 47));
+ << "key3"
+ << 47));
ASSERT_SCHEMA_CHANGED(st);
// Add Field
st = c.addSample(BSON("name"
<< "joe"
- << "key1" << 34 << "key2" << 45 << "key3" << 47 << "key7" << 34 << "key9"
- << 45 << "key13" << 47));
+ << "key1"
+ << 34
+ << "key2"
+ << 45
+ << "key3"
+ << 47
+ << "key7"
+ << 34
+ << "key9"
+ << 45
+ << "key13"
+ << 47));
ASSERT_SCHEMA_CHANGED(st);
// Remove Field
st = c.addSample(BSON("name"
<< "joe"
- << "key7" << 34 << "key9" << 45 << "key13" << 47));
+ << "key7"
+ << 34
+ << "key9"
+ << 45
+ << "key13"
+ << 47));
ASSERT_SCHEMA_CHANGED(st);
st = c.addSample(BSON("name"
<< "joe"
- << "key7" << 34 << "key9" << 45 << "key13" << 47));
+ << "key7"
+ << 34
+ << "key9"
+ << 45
+ << "key13"
+ << 47));
ASSERT_HAS_SPACE(st);
// Start new batch
st = c.addSample(BSON("name"
<< "joe"
- << "key7" << 5));
+ << "key7"
+ << 5));
ASSERT_SCHEMA_CHANGED(st);
// Change field to object
st = c.addSample(BSON("name"
<< "joe"
- << "key7" << BSON( // nested object
- "a" << 1)));
+ << "key7"
+ << BSON( // nested object
+ "a" << 1)));
ASSERT_SCHEMA_CHANGED(st);
// Change field from object to number
st = c.addSample(BSON("name"
<< "joe"
- << "key7" << 7));
+ << "key7"
+ << 7));
ASSERT_SCHEMA_CHANGED(st);
// Change field from number to array
st = c.addSample(BSON("name"
<< "joe"
- << "key7" << BSON_ARRAY(13 << 17)));
+ << "key7"
+ << BSON_ARRAY(13 << 17)));
ASSERT_SCHEMA_CHANGED(st);
// Change field from array to number
st = c.addSample(BSON("name"
<< "joe"
- << "key7" << 19));
+ << "key7"
+ << 19));
ASSERT_SCHEMA_CHANGED(st);
@@ -288,15 +346,24 @@ TEST(FTDCCompressor, TestNumbersCompat) {
auto st = c.addSample(BSON("name"
<< "joe"
- << "key1" << 33 << "key2" << 42LL));
+ << "key1"
+ << 33
+ << "key2"
+ << 42LL));
ASSERT_HAS_SPACE(st);
st = c.addSample(BSON("name"
<< "joe"
- << "key1" << 34LL << "key2" << 45.0f));
+ << "key1"
+ << 34LL
+ << "key2"
+ << 45.0f));
ASSERT_HAS_SPACE(st);
st = c.addSample(BSON("name"
<< "joe"
- << "key1" << static_cast<char>(32) << "key2" << 45.0F));
+ << "key1"
+ << static_cast<char>(32)
+ << "key2"
+ << 45.0F));
ASSERT_HAS_SPACE(st);
}
@@ -320,31 +387,49 @@ TEST(FTDCCompressor, Types) {
auto st = c.addSample(BSON("name"
<< "joe"
- << "key1" << 33 << "key2" << 42LL));
+ << "key1"
+ << 33
+ << "key2"
+ << 42LL));
ASSERT_HAS_SPACE(st);
const char bytes[] = {0x1, 0x2, 0x3};
- BSONObj o = BSON("created" << DATENOW // date_t
- << "null" << BSONNULL // { a : null }
- << "undefined" << BSONUndefined // { a : undefined }
- << "obj" << BSON( // nested object
- "a"
- << "abc"
- << "b" << 123LL) << "foo"
+ BSONObj o = BSON("created" << DATENOW // date_t
+ << "null"
+ << BSONNULL // { a : null }
+ << "undefined"
+ << BSONUndefined // { a : undefined }
+ << "obj"
+ << BSON( // nested object
+ "a"
+ << "abc"
+ << "b"
+ << 123LL)
+ << "foo"
<< BSON_ARRAY("bar"
<< "baz"
- << "qux") // array of strings
- << "foo2" << BSON_ARRAY(5 << 6 << 7) // array of ints
- << "bindata" << BSONBinData(&bytes[0], 3, bdtCustom) // bindata
- << "oid" << OID("010203040506070809101112") // oid
- << "bool" << true // bool
- << "regex" << BSONRegEx("mongodb") // regex
- << "ref" << BSONDBRef("c", OID("010203040506070809101112")) // ref
- << "code" << BSONCode("func f() { return 1; }") // code
- << "codewscope" << BSONCodeWScope("func f() { return 1; }",
- BSON("c" << true)) // codew
- << "minkey" << MINKEY // minkey
- << "maxkey" << MAXKEY // maxkey
+ << "qux") // array of strings
+ << "foo2"
+ << BSON_ARRAY(5 << 6 << 7) // array of ints
+ << "bindata"
+ << BSONBinData(&bytes[0], 3, bdtCustom) // bindata
+ << "oid"
+ << OID("010203040506070809101112") // oid
+ << "bool"
+ << true // bool
+ << "regex"
+ << BSONRegEx("mongodb") // regex
+ << "ref"
+ << BSONDBRef("c", OID("010203040506070809101112")) // ref
+ << "code"
+ << BSONCode("func f() { return 1; }") // code
+ << "codewscope"
+ << BSONCodeWScope("func f() { return 1; }",
+ BSON("c" << true)) // codew
+ << "minkey"
+ << MINKEY // minkey
+ << "maxkey"
+ << MAXKEY // maxkey
);
st = c.addSample(o);
@@ -355,11 +440,17 @@ TEST(FTDCCompressor, Types) {
st = c.addSample(BSON("name"
<< "joe"
- << "key1" << 34LL << "key2" << 45.0f));
+ << "key1"
+ << 34LL
+ << "key2"
+ << 45.0f));
ASSERT_SCHEMA_CHANGED(st);
st = c.addSample(BSON("name"
<< "joe"
- << "key1" << static_cast<char>(32) << "key2" << 45.0F));
+ << "key1"
+ << static_cast<char>(32)
+ << "key2"
+ << 45.0F));
ASSERT_HAS_SPACE(st);
}
@@ -371,25 +462,37 @@ TEST(FTDCCompressor, TestFull) {
auto st = c.addSample(BSON("name"
<< "joe"
- << "key1" << 33 << "key2" << 42));
+ << "key1"
+ << 33
+ << "key2"
+ << 42));
ASSERT_HAS_SPACE(st);
for (size_t i = 0; i != FTDCConfig::kMaxSamplesPerArchiveMetricChunkDefault - 2; i++) {
st = c.addSample(BSON("name"
<< "joe"
- << "key1" << static_cast<long long int>(i * j) << "key2" << 45));
+ << "key1"
+ << static_cast<long long int>(i * j)
+ << "key2"
+ << 45));
ASSERT_HAS_SPACE(st);
}
st = c.addSample(BSON("name"
<< "joe"
- << "key1" << 34 << "key2" << 45));
+ << "key1"
+ << 34
+ << "key2"
+ << 45));
ASSERT_FULL(st);
// Add Value
st = c.addSample(BSON("name"
<< "joe"
- << "key1" << 34 << "key2" << 45));
+ << "key1"
+ << 34
+ << "key2"
+ << 45));
ASSERT_HAS_SPACE(st);
}
}
diff --git a/src/mongo/db/ftdc/file_manager.cpp b/src/mongo/db/ftdc/file_manager.cpp
index dbefc3fd451..146ad60120c 100644
--- a/src/mongo/db/ftdc/file_manager.cpp
+++ b/src/mongo/db/ftdc/file_manager.cpp
@@ -71,8 +71,8 @@ StatusWith<std::unique_ptr<FTDCFileManager>> FTDCFileManager::create(
boost::filesystem::create_directories(dir, ec);
if (ec) {
return {ErrorCodes::NonExistentPath,
- str::stream() << "\'" << dir.generic_string()
- << "\' could not be created: " << ec.message()};
+ str::stream() << "\'" << dir.generic_string() << "\' could not be created: "
+ << ec.message()};
}
}
@@ -241,7 +241,8 @@ FTDCFileManager::recoverInterimFile() {
log() << "Unclean full-time diagnostic data capture shutdown detected, found interim file, "
"but failed "
"to open it, some "
- "metrics may have been lost. " << s;
+ "metrics may have been lost. "
+ << s;
// Note: We ignore any actual errors as reading from the interim files is a best-effort
return docs;
@@ -258,7 +259,8 @@ FTDCFileManager::recoverInterimFile() {
if (!m.isOK() || !docs.empty()) {
log() << "Unclean full-time diagnostic data capture shutdown detected, found interim file, "
"some "
- "metrics may have been lost. " << m.getStatus();
+ "metrics may have been lost. "
+ << m.getStatus();
}
// Note: We ignore any actual errors as reading from the interim files is a best-effort
diff --git a/src/mongo/db/ftdc/file_manager_test.cpp b/src/mongo/db/ftdc/file_manager_test.cpp
index 4d10c659bbc..6c2e5c220a6 100644
--- a/src/mongo/db/ftdc/file_manager_test.cpp
+++ b/src/mongo/db/ftdc/file_manager_test.cpp
@@ -28,8 +28,8 @@
#include "mongo/platform/basic.h"
-#include <boost/filesystem.hpp>
#include <algorithm>
+#include <boost/filesystem.hpp>
#include <iostream>
#include <string>
@@ -69,34 +69,45 @@ TEST(FTDCFileManagerTest, TestFull) {
// Test a large numbers of zeros, and incremental numbers in a full buffer
for (int j = 0; j < 10; j++) {
- ASSERT_OK(
- mgr->writeSampleAndRotateIfNeeded(client,
- BSON("name"
- << "joe"
- << "key1" << 3230792343LL << "key2" << 235135),
- Date_t()));
+ ASSERT_OK(mgr->writeSampleAndRotateIfNeeded(client,
+ BSON("name"
+ << "joe"
+ << "key1"
+ << 3230792343LL
+ << "key2"
+ << 235135),
+ Date_t()));
for (size_t i = 0; i <= FTDCConfig::kMaxSamplesPerArchiveMetricChunkDefault - 2; i++) {
- ASSERT_OK(mgr->writeSampleAndRotateIfNeeded(
- client,
- BSON("name"
- << "joe"
- << "key1" << static_cast<long long int>(i * j * 37) << "key2"
- << static_cast<long long int>(i * (645 << j))),
- Date_t()));
+ ASSERT_OK(
+ mgr->writeSampleAndRotateIfNeeded(client,
+ BSON("name"
+ << "joe"
+ << "key1"
+ << static_cast<long long int>(i * j * 37)
+ << "key2"
+ << static_cast<long long int>(i *
+ (645 << j))),
+ Date_t()));
}
ASSERT_OK(mgr->writeSampleAndRotateIfNeeded(client,
BSON("name"
<< "joe"
- << "key1" << 34 << "key2" << 45),
+ << "key1"
+ << 34
+ << "key2"
+ << 45),
Date_t()));
// Add Value
ASSERT_OK(mgr->writeSampleAndRotateIfNeeded(client,
BSON("name"
<< "joe"
- << "key1" << 34 << "key2" << 45),
+ << "key1"
+ << 34
+ << "key2"
+ << 45),
Date_t()));
}
@@ -161,7 +172,9 @@ TEST(FTDCFileManagerTest, TestNormalRestart) {
ASSERT_OK(mgr->writeSampleAndRotateIfNeeded(client,
BSON("name"
<< "joe"
- << "key1" << 3230792343LL << "key2"
+ << "key1"
+ << 3230792343LL
+ << "key2"
<< 235135),
Date_t()));
@@ -171,7 +184,9 @@ TEST(FTDCFileManagerTest, TestNormalRestart) {
client,
BSON("name"
<< "joe"
- << "key1" << static_cast<long long int>(i * j * 37) << "key2"
+ << "key1"
+ << static_cast<long long int>(i * j * 37)
+ << "key2"
<< static_cast<long long int>(i * (645 << j))),
Date_t()));
}
@@ -179,14 +194,20 @@ TEST(FTDCFileManagerTest, TestNormalRestart) {
ASSERT_OK(mgr->writeSampleAndRotateIfNeeded(client,
BSON("name"
<< "joe"
- << "key1" << 34 << "key2" << 45),
+ << "key1"
+ << 34
+ << "key2"
+ << 45),
Date_t()));
// Add Value
ASSERT_OK(mgr->writeSampleAndRotateIfNeeded(client,
BSON("name"
<< "joe"
- << "key1" << 34 << "key2" << 45),
+ << "key1"
+ << 34
+ << "key2"
+ << 45),
Date_t()));
}
@@ -221,7 +242,9 @@ TEST(FTDCFileManagerTest, TestCorruptCrashRestart) {
ASSERT_OK(mgr->writeSampleAndRotateIfNeeded(client,
BSON("name"
<< "joe"
- << "key1" << 3230792343LL << "key2"
+ << "key1"
+ << 3230792343LL
+ << "key2"
<< 235135),
Date_t()));
@@ -231,7 +254,9 @@ TEST(FTDCFileManagerTest, TestCorruptCrashRestart) {
client,
BSON("name"
<< "joe"
- << "key1" << static_cast<long long int>(i * j * 37) << "key2"
+ << "key1"
+ << static_cast<long long int>(i * j * 37)
+ << "key2"
<< static_cast<long long int>(i * (645 << j))),
Date_t()));
}
@@ -239,14 +264,20 @@ TEST(FTDCFileManagerTest, TestCorruptCrashRestart) {
ASSERT_OK(mgr->writeSampleAndRotateIfNeeded(client,
BSON("name"
<< "joe"
- << "key1" << 34 << "key2" << 45),
+ << "key1"
+ << 34
+ << "key2"
+ << 45),
Date_t()));
// Add Value
ASSERT_OK(mgr->writeSampleAndRotateIfNeeded(client,
BSON("name"
<< "joe"
- << "key1" << 34 << "key2" << 45),
+ << "key1"
+ << 34
+ << "key2"
+ << 45),
Date_t()));
}
@@ -277,14 +308,23 @@ TEST(FTDCFileManagerTest, TestNormalCrashInterim) {
BSONObj mdoc1 = BSON("name"
<< "some_metadata"
- << "key1" << 34 << "something" << 98);
+ << "key1"
+ << 34
+ << "something"
+ << 98);
BSONObj sdoc1 = BSON("name"
<< "joe"
- << "key1" << 34 << "key2" << 45);
+ << "key1"
+ << 34
+ << "key2"
+ << 45);
BSONObj sdoc2 = BSON("name"
<< "joe"
- << "key3" << 34 << "key5" << 45);
+ << "key3"
+ << 34
+ << "key5"
+ << 45);
boost::filesystem::path fileOut;
diff --git a/src/mongo/db/ftdc/file_reader.cpp b/src/mongo/db/ftdc/file_reader.cpp
index 55af92327d5..54b79151958 100644
--- a/src/mongo/db/ftdc/file_reader.cpp
+++ b/src/mongo/db/ftdc/file_reader.cpp
@@ -194,7 +194,8 @@ StatusWith<BSONObj> FTDCFileReader::readDocument() {
if (readSize != _stream.gcount()) {
return {ErrorCodes::FileStreamFailed,
str::stream() << "Failed to read " << readSize << " bytes from file \'"
- << _file.generic_string() << "\'"};
+ << _file.generic_string()
+ << "\'"};
}
ConstDataRange cdr(_buffer.data(), _buffer.data() + bsonLength);
diff --git a/src/mongo/db/ftdc/file_writer_test.cpp b/src/mongo/db/ftdc/file_writer_test.cpp
index 6555a13c115..00124eb2c36 100644
--- a/src/mongo/db/ftdc/file_writer_test.cpp
+++ b/src/mongo/db/ftdc/file_writer_test.cpp
@@ -56,10 +56,16 @@ TEST(FTDCFileTest, TestFileBasicMetadata) {
BSONObj doc1 = BSON("name"
<< "joe"
- << "key1" << 34 << "key2" << 45);
+ << "key1"
+ << 34
+ << "key2"
+ << 45);
BSONObj doc2 = BSON("name"
<< "joe"
- << "key3" << 34 << "key5" << 45);
+ << "key3"
+ << 34
+ << "key5"
+ << 45);
FTDCConfig config;
FTDCFileWriter writer(&config);
@@ -101,10 +107,16 @@ TEST(FTDCFileTest, TestFileBasicCompress) {
BSONObj doc1 = BSON("name"
<< "joe"
- << "key1" << 34 << "key2" << 45);
+ << "key1"
+ << 34
+ << "key2"
+ << 45);
BSONObj doc2 = BSON("name"
<< "joe"
- << "key3" << 34 << "key5" << 45);
+ << "key3"
+ << 34
+ << "key5"
+ << 45);
FTDCConfig config;
FTDCFileWriter writer(&config);
@@ -180,41 +192,69 @@ TEST(FTDCFileTest, TestSchemaChanges) {
c.addSample(BSON("name"
<< "joe"
- << "key1" << 33 << "key2" << 42));
+ << "key1"
+ << 33
+ << "key2"
+ << 42));
c.addSample(BSON("name"
<< "joe"
- << "key1" << 34 << "key2" << 45));
+ << "key1"
+ << 34
+ << "key2"
+ << 45));
c.addSample(BSON("name"
<< "joe"
- << "key1" << 34 << "key2" << 45));
+ << "key1"
+ << 34
+ << "key2"
+ << 45));
// Add Value
c.addSample(BSON("name"
<< "joe"
- << "key1" << 34 << "key2" << 45 << "key3" << 47));
+ << "key1"
+ << 34
+ << "key2"
+ << 45
+ << "key3"
+ << 47));
c.addSample(BSON("name"
<< "joe"
- << "key1" << 34 << "key2" << 45 << "key3" << 47));
+ << "key1"
+ << 34
+ << "key2"
+ << 45
+ << "key3"
+ << 47));
// Rename field
c.addSample(BSON("name"
<< "joe"
- << "key1" << 34 << "key5" << 45 << "key3" << 47));
+ << "key1"
+ << 34
+ << "key5"
+ << 45
+ << "key3"
+ << 47));
// Change type
c.addSample(BSON("name"
<< "joe"
- << "key1" << 34 << "key5"
+ << "key1"
+ << 34
+ << "key5"
<< "45"
- << "key3" << 47));
+ << "key3"
+ << 47));
// RemoveField
c.addSample(BSON("name"
<< "joe"
<< "key5"
<< "45"
- << "key3" << 47));
+ << "key3"
+ << 47));
}
// Test a full buffer
@@ -225,22 +265,34 @@ TEST(FTDCFileTest, TestFull) {
c.addSample(BSON("name"
<< "joe"
- << "key1" << 33 << "key2" << 42));
+ << "key1"
+ << 33
+ << "key2"
+ << 42));
for (size_t i = 0; i <= FTDCConfig::kMaxSamplesPerArchiveMetricChunkDefault - 2; i++) {
c.addSample(BSON("name"
<< "joe"
- << "key1" << static_cast<long long int>(i * j) << "key2" << 45));
+ << "key1"
+ << static_cast<long long int>(i * j)
+ << "key2"
+ << 45));
}
c.addSample(BSON("name"
<< "joe"
- << "key1" << 34 << "key2" << 45));
+ << "key1"
+ << 34
+ << "key2"
+ << 45));
// Add Value
c.addSample(BSON("name"
<< "joe"
- << "key1" << 34 << "key2" << 45));
+ << "key1"
+ << 34
+ << "key2"
+ << 45));
}
}
diff --git a/src/mongo/db/ftdc/ftdc_test.cpp b/src/mongo/db/ftdc/ftdc_test.cpp
index b09aa2b6ef5..3a010ab32c5 100644
--- a/src/mongo/db/ftdc/ftdc_test.cpp
+++ b/src/mongo/db/ftdc/ftdc_test.cpp
@@ -109,8 +109,8 @@ void createDirectoryClean(const boost::filesystem::path& dir) {
boost::filesystem::create_directory(dir);
}
-MONGO_INITIALIZER_WITH_PREREQUISITES(FTDCTestInit,
- ("ThreadNameInitializer"))(InitializerContext* context) {
+MONGO_INITIALIZER_WITH_PREREQUISITES(FTDCTestInit, ("ThreadNameInitializer"))
+(InitializerContext* context) {
setGlobalServiceContext(stdx::make_unique<ServiceContextNoop>());
getGlobalServiceContext()->setFastClockSource(stdx::make_unique<ClockSourceMock>());
diff --git a/src/mongo/db/ftdc/util.cpp b/src/mongo/db/ftdc/util.cpp
index ea87e6b58e5..d56eb8ca380 100644
--- a/src/mongo/db/ftdc/util.cpp
+++ b/src/mongo/db/ftdc/util.cpp
@@ -152,7 +152,8 @@ StatusWith<bool> extractMetricsFromDocument(const BSONObj& referenceDoc,
!(referenceElement.isNumber() == true &&
currentElement.isNumber() == referenceElement.isNumber())) {
LOG(4) << "full-time diagnostic data capture schema change: field type change for "
- "field '" << referenceElement.fieldNameStringData() << "' from '"
+ "field '"
+ << referenceElement.fieldNameStringData() << "' from '"
<< static_cast<int>(referenceElement.type()) << "' to '"
<< static_cast<int>(currentElement.type()) << "'";
matches = false;
@@ -371,7 +372,9 @@ StatusWith<FTDCType> getBSONDocumentType(const BSONObj& obj) {
static_cast<FTDCType>(value) != FTDCType::kMetadata) {
return {ErrorCodes::BadValue,
str::stream() << "Field '" << std::string(kFTDCTypeField)
- << "' is not an expected value, found '" << value << "'"};
+ << "' is not an expected value, found '"
+ << value
+ << "'"};
}
return {static_cast<FTDCType>(value)};
diff --git a/src/mongo/db/ftdc/varint.h b/src/mongo/db/ftdc/varint.h
index 0dd4c73fb1b..beb0313b9ac 100644
--- a/src/mongo/db/ftdc/varint.h
+++ b/src/mongo/db/ftdc/varint.h
@@ -31,8 +31,8 @@
#include <cstddef>
#include <cstdint>
-#include "mongo/base/status.h"
#include "mongo/base/data_type.h"
+#include "mongo/base/status.h"
namespace mongo {
/**
diff --git a/src/mongo/db/fts/fts_element_iterator.cpp b/src/mongo/db/fts/fts_element_iterator.cpp
index 4df642dc66a..0e2d0b8d463 100644
--- a/src/mongo/db/fts/fts_element_iterator.cpp
+++ b/src/mongo/db/fts/fts_element_iterator.cpp
@@ -47,7 +47,8 @@ extern const double MAX_WEIGHT;
std::ostream& operator<<(std::ostream& os, FTSElementIterator::FTSIteratorFrame& frame) {
BSONObjIterator it = frame._it;
return os << "FTSIteratorFrame["
- " element=" << (*it).toString() << ", _language=" << frame._language->str()
+ " element="
+ << (*it).toString() << ", _language=" << frame._language->str()
<< ", _parentPath=" << frame._parentPath << ", _isArray=" << frame._isArray << "]";
}
diff --git a/src/mongo/db/fts/fts_index_format.cpp b/src/mongo/db/fts/fts_index_format.cpp
index 30814b54d78..c9698d59c93 100644
--- a/src/mongo/db/fts/fts_index_format.cpp
+++ b/src/mongo/db/fts/fts_index_format.cpp
@@ -139,7 +139,8 @@ void FTSIndexFormat::getKeys(const FTSSpec& spec, const BSONObj& obj, BSONObjSet
uassert(16732,
mongoutils::str::stream() << "too many unique keys for a single document to"
- << " have a text index, max is " << term_freqs.size()
+ << " have a text index, max is "
+ << term_freqs.size()
<< obj["_id"],
term_freqs.size() <= 400000);
@@ -173,7 +174,9 @@ void FTSIndexFormat::getKeys(const FTSSpec& spec, const BSONObj& obj, BSONObjSet
uassert(16733,
mongoutils::str::stream()
<< "trying to index text where term list is too big, max is "
- << MaxKeyBSONSizeMB << "mb " << obj["_id"],
+ << MaxKeyBSONSizeMB
+ << "mb "
+ << obj["_id"],
keyBSONSize <= (MaxKeyBSONSizeMB * 1024 * 1024));
}
}
diff --git a/src/mongo/db/fts/fts_index_format_test.cpp b/src/mongo/db/fts/fts_index_format_test.cpp
index af353d51f26..03eb7406a79 100644
--- a/src/mongo/db/fts/fts_index_format_test.cpp
+++ b/src/mongo/db/fts/fts_index_format_test.cpp
@@ -36,9 +36,9 @@
#include "mongo/db/fts/fts_index_format.h"
#include "mongo/db/fts/fts_spec.h"
+#include "mongo/unittest/unittest.h"
#include "mongo/util/log.h"
#include "mongo/util/mongoutils/str.h"
-#include "mongo/unittest/unittest.h"
namespace mongo {
@@ -67,12 +67,14 @@ TEST(FTSIndexFormat, Simple1) {
TEST(FTSIndexFormat, ExtraBack1) {
FTSSpec spec(assertGet(FTSSpec::fixSpec(BSON("key" << BSON("data"
<< "text"
- << "x" << 1)))));
+ << "x"
+ << 1)))));
BSONObjSet keys;
FTSIndexFormat::getKeys(spec,
BSON("data"
<< "cat"
- << "x" << 5),
+ << "x"
+ << 5),
&keys);
ASSERT_EQUALS(1U, keys.size());
@@ -91,7 +93,8 @@ TEST(FTSIndexFormat, ExtraFront1) {
FTSIndexFormat::getKeys(spec,
BSON("data"
<< "cat"
- << "x" << 5),
+ << "x"
+ << 5),
&keys);
ASSERT_EQUALS(1U, keys.size());
@@ -152,9 +155,10 @@ void assertEqualsIndexKeys(std::set<std::string>& expectedKeys, const BSONObjSet
* Terms that are too long are not truncated in version 1.
*/
TEST(FTSIndexFormat, LongWordsTextIndexVersion1) {
- FTSSpec spec(
- assertGet(FTSSpec::fixSpec(BSON("key" << BSON("data"
- << "text") << "textIndexVersion" << 1))));
+ FTSSpec spec(assertGet(FTSSpec::fixSpec(BSON("key" << BSON("data"
+ << "text")
+ << "textIndexVersion"
+ << 1))));
BSONObjSet keys;
string longPrefix(1024U, 'a');
// "aaa...aaacat"
@@ -181,9 +185,10 @@ TEST(FTSIndexFormat, LongWordsTextIndexVersion1) {
* characters of the term to form the index key.
*/
TEST(FTSIndexFormat, LongWordTextIndexVersion2) {
- FTSSpec spec(
- assertGet(FTSSpec::fixSpec(BSON("key" << BSON("data"
- << "text") << "textIndexVersion" << 2))));
+ FTSSpec spec(assertGet(FTSSpec::fixSpec(BSON("key" << BSON("data"
+ << "text")
+ << "textIndexVersion"
+ << 2))));
BSONObjSet keys;
string longPrefix(1024U, 'a');
// "aaa...aaacat"
@@ -215,9 +220,10 @@ TEST(FTSIndexFormat, LongWordTextIndexVersion2) {
* characters of the term to form the index key.
*/
TEST(FTSIndexFormat, LongWordTextIndexVersion3) {
- FTSSpec spec(
- assertGet(FTSSpec::fixSpec(BSON("key" << BSON("data"
- << "text") << "textIndexVersion" << 3))));
+ FTSSpec spec(assertGet(FTSSpec::fixSpec(BSON("key" << BSON("data"
+ << "text")
+ << "textIndexVersion"
+ << 3))));
BSONObjSet keys;
string longPrefix(1024U, 'a');
// "aaa...aaacat"
diff --git a/src/mongo/db/fts/fts_language.cpp b/src/mongo/db/fts/fts_language.cpp
index b01e9de6508..f52002b9be0 100644
--- a/src/mongo/db/fts/fts_language.cpp
+++ b/src/mongo/db/fts/fts_language.cpp
@@ -279,10 +279,11 @@ StatusWithFTSLanguage FTSLanguage::make(StringData langName, TextIndexVersion te
if (it == languageMap->end()) {
// TEXT_INDEX_VERSION_2 and above reject unrecognized language strings.
- Status status = Status(ErrorCodes::BadValue,
- mongoutils::str::stream()
- << "unsupported language: \"" << langName
- << "\" for text index version " << textIndexVersion);
+ Status status =
+ Status(ErrorCodes::BadValue,
+ mongoutils::str::stream() << "unsupported language: \"" << langName
+ << "\" for text index version "
+ << textIndexVersion);
return StatusWithFTSLanguage(status);
}
diff --git a/src/mongo/db/fts/fts_language.h b/src/mongo/db/fts/fts_language.h
index 062a3255ba1..49da2439529 100644
--- a/src/mongo/db/fts/fts_language.h
+++ b/src/mongo/db/fts/fts_language.h
@@ -30,11 +30,11 @@
#pragma once
+#include "mongo/base/status_with.h"
#include "mongo/db/fts/fts_basic_phrase_matcher.h"
#include "mongo/db/fts/fts_phrase_matcher.h"
#include "mongo/db/fts/fts_unicode_phrase_matcher.h"
#include "mongo/db/fts/fts_util.h"
-#include "mongo/base/status_with.h"
#include <string>
diff --git a/src/mongo/db/fts/fts_language_test.cpp b/src/mongo/db/fts/fts_language_test.cpp
index 87e37272850..3049d8d4af8 100644
--- a/src/mongo/db/fts/fts_language_test.cpp
+++ b/src/mongo/db/fts/fts_language_test.cpp
@@ -28,9 +28,9 @@
* it in the license file.
*/
-#include "mongo/platform/basic.h"
#include "mongo/db/fts/fts_language.h"
#include "mongo/db/fts/fts_spec.h"
+#include "mongo/platform/basic.h"
#include "mongo/unittest/unittest.h"
namespace mongo {
diff --git a/src/mongo/db/fts/fts_matcher.cpp b/src/mongo/db/fts/fts_matcher.cpp
index 6a782e730cc..b3107418542 100644
--- a/src/mongo/db/fts/fts_matcher.cpp
+++ b/src/mongo/db/fts/fts_matcher.cpp
@@ -30,10 +30,10 @@
#include "mongo/platform/basic.h"
+#include "mongo/db/fts/fts_element_iterator.h"
#include "mongo/db/fts/fts_matcher.h"
#include "mongo/db/fts/fts_phrase_matcher.h"
#include "mongo/db/fts/fts_tokenizer.h"
-#include "mongo/db/fts/fts_element_iterator.h"
namespace mongo {
diff --git a/src/mongo/db/fts/fts_query_impl.cpp b/src/mongo/db/fts/fts_query_impl.cpp
index bc879375e88..040333de5c7 100644
--- a/src/mongo/db/fts/fts_query_impl.cpp
+++ b/src/mongo/db/fts/fts_query_impl.cpp
@@ -32,12 +32,12 @@
#include "mongo/db/fts/fts_query_impl.h"
-#include "mongo/db/fts/fts_spec.h"
#include "mongo/db/fts/fts_query_parser.h"
+#include "mongo/db/fts/fts_spec.h"
#include "mongo/db/fts/fts_tokenizer.h"
+#include "mongo/stdx/memory.h"
#include "mongo/util/mongoutils/str.h"
#include "mongo/util/stringutils.h"
-#include "mongo/stdx/memory.h"
namespace mongo {
diff --git a/src/mongo/db/fts/fts_query_impl_test.cpp b/src/mongo/db/fts/fts_query_impl_test.cpp
index 25f0f0fd211..43585e8a982 100644
--- a/src/mongo/db/fts/fts_query_impl_test.cpp
+++ b/src/mongo/db/fts/fts_query_impl_test.cpp
@@ -159,9 +159,8 @@ TEST(FTSQueryImpl, Phrase1) {
ASSERT(q.parse(TEXT_INDEX_VERSION_3).isOK());
ASSERT_EQUALS(q.toBSON(),
- fromjson(
- "{terms: ['fun', 'phrase', 'test'], negatedTerms: [], phrases: ['phrase "
- "test'], negatedPhrases: []}"));
+ fromjson("{terms: ['fun', 'phrase', 'test'], negatedTerms: [], phrases: ['phrase "
+ "test'], negatedPhrases: []}"));
ASSERT_TRUE(q.getTermsForBounds() == q.getPositiveTerms());
}
@@ -197,9 +196,8 @@ TEST(FTSQueryImpl, HyphenSurroundedByWhitespaceBeforePhraseShouldNotNegateEntire
q.setDiacriticSensitive(false);
ASSERT(q.parse(TEXT_INDEX_VERSION_3).isOK());
ASSERT_EQUALS(q.toBSON(),
- fromjson(
- "{terms: ['fun', 'phrase', 'test'], negatedTerms: [], phrases: ['phrase "
- "test'], negatedPhrases: []}"));
+ fromjson("{terms: ['fun', 'phrase', 'test'], negatedTerms: [], phrases: ['phrase "
+ "test'], negatedPhrases: []}"));
}
TEST(FTSQueryImpl, HyphenBetweenTermAndPhraseShouldBeTreatedAsDelimiter) {
@@ -210,9 +208,8 @@ TEST(FTSQueryImpl, HyphenBetweenTermAndPhraseShouldBeTreatedAsDelimiter) {
q.setDiacriticSensitive(false);
ASSERT(q.parse(TEXT_INDEX_VERSION_3).isOK());
ASSERT_EQUALS(q.toBSON(),
- fromjson(
- "{terms: ['fun', 'phrase', 'test'], negatedTerms: [], phrases: ['phrase "
- "test'], negatedPhrases: []}"));
+ fromjson("{terms: ['fun', 'phrase', 'test'], negatedTerms: [], phrases: ['phrase "
+ "test'], negatedPhrases: []}"));
}
TEST(FTSQueryImpl, HyphenShouldNegateAllSucceedingPhrasesSeparatedByHyphens) {
@@ -223,9 +220,8 @@ TEST(FTSQueryImpl, HyphenShouldNegateAllSucceedingPhrasesSeparatedByHyphens) {
q.setDiacriticSensitive(false);
ASSERT(q.parse(TEXT_INDEX_VERSION_3).isOK());
ASSERT_EQUALS(q.toBSON(),
- fromjson(
- "{terms: ['anoth', 'phrase'], negatedTerms: [], phrases: ['another "
- "phrase'], negatedPhrases: ['really fun', 'stuff here']}"));
+ fromjson("{terms: ['anoth', 'phrase'], negatedTerms: [], phrases: ['another "
+ "phrase'], negatedPhrases: ['really fun', 'stuff here']}"));
}
TEST(FTSQueryImpl, CaseSensitiveOption) {
@@ -309,9 +305,8 @@ TEST(FTSQueryImpl, Mix1) {
q.setDiacriticSensitive(false);
ASSERT(q.parse(TEXT_INDEX_VERSION_3).isOK());
ASSERT_EQUALS(q.toBSON(),
- fromjson(
- "{terms: ['industri'], negatedTerms: ['melbourn', 'physic'], phrases: "
- "['industry'], negatedPhrases: []}"));
+ fromjson("{terms: ['industri'], negatedTerms: ['melbourn', 'physic'], phrases: "
+ "['industry'], negatedPhrases: []}"));
}
TEST(FTSQueryImpl, NegPhrase2) {
diff --git a/src/mongo/db/fts/fts_spec.cpp b/src/mongo/db/fts/fts_spec.cpp
index 05248d157b7..0c03ed8aa18 100644
--- a/src/mongo/db/fts/fts_spec.cpp
+++ b/src/mongo/db/fts/fts_spec.cpp
@@ -58,7 +58,7 @@ const std::string moduleDefaultLanguage("english");
bool validateOverride(const string& override) {
// The override field can't be empty, can't be prefixed with a dollar sign, and
// can't contain a dot.
- return !override.empty() && override[0] != '$' && override.find('.') == std::string::npos;
+ return !override.empty()&& override[0] != '$' && override.find('.') == std::string::npos;
}
}
@@ -90,8 +90,12 @@ FTSSpec::FTSSpec(const BSONObj& indexInfo) {
msgasserted(17364,
str::stream() << "attempt to use unsupported textIndexVersion "
<< textIndexVersionElt.numberInt()
- << "; versions supported: " << TEXT_INDEX_VERSION_3 << ", "
- << TEXT_INDEX_VERSION_2 << ", " << TEXT_INDEX_VERSION_1);
+ << "; versions supported: "
+ << TEXT_INDEX_VERSION_3
+ << ", "
+ << TEXT_INDEX_VERSION_2
+ << ", "
+ << TEXT_INDEX_VERSION_1);
}
// Initialize _defaultLanguage. Note that the FTSLanguage constructor requires
@@ -401,7 +405,9 @@ StatusWith<BSONObj> FTSSpec::fixSpec(const BSONObj& spec) {
if (i->second <= 0 || i->second >= MAX_WORD_WEIGHT) {
return {ErrorCodes::CannotCreateIndex,
str::stream() << "text index weight must be in the exclusive interval (0,"
- << MAX_WORD_WEIGHT << ") but found: " << i->second};
+ << MAX_WORD_WEIGHT
+ << ") but found: "
+ << i->second};
}
// Verify weight refers to a valid field.
diff --git a/src/mongo/db/fts/fts_spec.h b/src/mongo/db/fts/fts_spec.h
index a00e04f7052..8cd293e70cf 100644
--- a/src/mongo/db/fts/fts_spec.h
+++ b/src/mongo/db/fts/fts_spec.h
@@ -31,8 +31,8 @@
#pragma once
#include <map>
-#include <vector>
#include <string>
+#include <vector>
#include "mongo/base/status_with.h"
#include "mongo/db/fts/fts_language.h"
diff --git a/src/mongo/db/fts/fts_spec_legacy.cpp b/src/mongo/db/fts/fts_spec_legacy.cpp
index 15739d8787d..f660c00f526 100644
--- a/src/mongo/db/fts/fts_spec_legacy.cpp
+++ b/src/mongo/db/fts/fts_spec_legacy.cpp
@@ -241,7 +241,9 @@ StatusWith<BSONObj> FTSSpec::_fixSpecV1(const BSONObj& spec) {
if (i->second <= 0 || i->second >= MAX_WORD_WEIGHT) {
return {ErrorCodes::CannotCreateIndex,
str::stream() << "text index weight must be in the exclusive interval (0,"
- << MAX_WORD_WEIGHT << ") but found: " << i->second};
+ << MAX_WORD_WEIGHT
+ << ") but found: "
+ << i->second};
}
b.append(i->first, i->second);
}
diff --git a/src/mongo/db/fts/fts_spec_test.cpp b/src/mongo/db/fts/fts_spec_test.cpp
index 5ecc0109333..3c041cbd363 100644
--- a/src/mongo/db/fts/fts_spec_test.cpp
+++ b/src/mongo/db/fts/fts_spec_test.cpp
@@ -184,7 +184,9 @@ TEST(FTSSpec, ScoreSingleField1) {
BSONObj user = BSON("key" << BSON("title"
<< "text"
<< "text"
- << "text") << "weights" << BSON("title" << 10));
+ << "text")
+ << "weights"
+ << BSON("title" << 10));
FTSSpec spec(assertGet(FTSSpec::fixSpec(user)));
@@ -202,7 +204,9 @@ TEST(FTSSpec, ScoreMultipleField1) {
BSONObj user = BSON("key" << BSON("title"
<< "text"
<< "text"
- << "text") << "weights" << BSON("title" << 10));
+ << "text")
+ << "weights"
+ << BSON("title" << 10));
FTSSpec spec(assertGet(FTSSpec::fixSpec(user)));
@@ -243,7 +247,9 @@ TEST(FTSSpec, ScoreRepeatWord) {
BSONObj user = BSON("key" << BSON("title"
<< "text"
<< "text"
- << "text") << "weights" << BSON("title" << 10));
+ << "text")
+ << "weights"
+ << BSON("title" << 10));
FTSSpec spec(assertGet(FTSSpec::fixSpec(user)));
@@ -268,7 +274,8 @@ TEST(FTSSpec, Extra1) {
TEST(FTSSpec, Extra2) {
BSONObj user = BSON("key" << BSON("data"
<< "text"
- << "x" << 1));
+ << "x"
+ << 1));
BSONObj fixed = assertGet(FTSSpec::fixSpec(user));
FTSSpec spec(fixed);
ASSERT_EQUALS(0U, spec.numExtraBefore());
@@ -286,7 +293,8 @@ TEST(FTSSpec, Extra3) {
ASSERT_EQUALS(BSON("x" << 1 << "_fts"
<< "text"
- << "_ftsx" << 1),
+ << "_ftsx"
+ << 1),
fixed["key"].Obj());
ASSERT_EQUALS(BSON("data" << 1), fixed["weights"].Obj());
@@ -512,7 +520,9 @@ TEST(FTSSpec, NestedLanguages_Wildcard) {
// Multi-language test_6: test wildcard spec with override
TEST(FTSSpec, NestedLanguages_WildcardOverride) {
BSONObj indexSpec = BSON("key" << BSON("$**"
- << "text") << "weights" << BSON("d.e.f" << 20));
+ << "text")
+ << "weights"
+ << BSON("d.e.f" << 20));
FTSSpec spec(assertGet(FTSSpec::fixSpec(indexSpec)));
TermFrequencyMap tfm;
diff --git a/src/mongo/db/geo/big_polygon_test.cpp b/src/mongo/db/geo/big_polygon_test.cpp
index 3ac82b03768..26146654f40 100644
--- a/src/mongo/db/geo/big_polygon_test.cpp
+++ b/src/mongo/db/geo/big_polygon_test.cpp
@@ -28,8 +28,8 @@
#include "mongo/db/geo/big_polygon.h"
-#include "mongo/bson/util/builder.h"
#include "mongo/bson/bsonobjbuilder.h"
+#include "mongo/bson/util/builder.h"
#include "mongo/unittest/unittest.h"
namespace {
@@ -80,7 +80,8 @@ typedef PointBuilder points;
TEST(BigSimplePolygon, Basic) {
// A 20x20 square centered at [0,0]
BigSimplePolygon bigPoly20(loop(points() << LatLng(10.0, 10.0) << LatLng(10.0, -10.0)
- << LatLng(-10.0, -10.0) << LatLng(-10.0, 10.0)));
+ << LatLng(-10.0, -10.0)
+ << LatLng(-10.0, 10.0)));
// A 10x10 square centered at [0,0]
S2Polygon poly10(loopVec(points() << LatLng(5.0, 5.0) << LatLng(5.0, -5.0) << LatLng(-5.0, -5.0)
@@ -93,7 +94,8 @@ TEST(BigSimplePolygon, Basic) {
// A 20x20 square centered at [0,20]
BigSimplePolygon bigPoly20Offset(loop(points() << LatLng(10.0, 30.0) << LatLng(10.0, 10.0)
- << LatLng(-10.0, 10.0) << LatLng(-10.0, 30.0)));
+ << LatLng(-10.0, 10.0)
+ << LatLng(-10.0, 30.0)));
ASSERT_LESS_THAN(bigPoly20Offset.GetArea(), 2 * M_PI);
ASSERT_LESS_THAN(poly10.GetArea(), bigPoly20Offset.GetArea());
@@ -105,15 +107,18 @@ TEST(BigSimplePolygon, BasicWithHole) {
// A 30x30 square centered at [0,0] with a 20X20 hole
vector<S2Loop*> loops;
loops.push_back(loop(points() << LatLng(15.0, 15.0) << LatLng(15.0, -15.0)
- << LatLng(-15.0, -15.0) << LatLng(-15.0, 15.0)));
+ << LatLng(-15.0, -15.0)
+ << LatLng(-15.0, 15.0)));
loops.push_back(loop(points() << LatLng(10.0, 10.0) << LatLng(10.0, -10.0)
- << LatLng(-10.0, -10.0) << LatLng(-10.0, 10.0)));
+ << LatLng(-10.0, -10.0)
+ << LatLng(-10.0, 10.0)));
S2Polygon holePoly(&loops);
// A 16X16 square centered at [0,0]
BigSimplePolygon bigPoly16(loop(points() << LatLng(8.0, 8.0) << LatLng(8.0, -8.0)
- << LatLng(-8.0, -8.0) << LatLng(-8.0, 8.0)));
+ << LatLng(-8.0, -8.0)
+ << LatLng(-8.0, 8.0)));
ASSERT_LESS_THAN(bigPoly16.GetArea(), 2 * M_PI);
ASSERT_FALSE(bigPoly16.Contains(holePoly));
@@ -121,7 +126,8 @@ TEST(BigSimplePolygon, BasicWithHole) {
// A big polygon bigger than the hole.
BigSimplePolygon bigPoly24(loop(points() << LatLng(12.0, 12.0) << LatLng(12.0, -12.0)
- << LatLng(-12.0, -12.0) << LatLng(-12.0, 12.0)));
+ << LatLng(-12.0, -12.0)
+ << LatLng(-12.0, 12.0)));
ASSERT_LESS_THAN(bigPoly24.GetArea(), 2 * M_PI);
ASSERT_FALSE(bigPoly24.Contains(holePoly));
ASSERT_TRUE(bigPoly24.Intersects(holePoly));
@@ -132,10 +138,12 @@ TEST(BigSimplePolygon, BasicWithHoleAndShell) {
vector<S2Loop*> loops;
// Border
loops.push_back(loop(points() << LatLng(15.0, 15.0) << LatLng(15.0, -15.0)
- << LatLng(-15.0, -15.0) << LatLng(-15.0, 15.0)));
+ << LatLng(-15.0, -15.0)
+ << LatLng(-15.0, 15.0)));
// Hole
loops.push_back(loop(points() << LatLng(10.0, 10.0) << LatLng(10.0, -10.0)
- << LatLng(-10.0, -10.0) << LatLng(-10.0, 10.0)));
+ << LatLng(-10.0, -10.0)
+ << LatLng(-10.0, 10.0)));
// Shell
loops.push_back(loop(points() << LatLng(5.0, 5.0) << LatLng(5.0, -5.0) << LatLng(-5.0, -5.0)
<< LatLng(-5.0, 5.0)));
@@ -143,21 +151,24 @@ TEST(BigSimplePolygon, BasicWithHoleAndShell) {
// A 16X16 square centered at [0,0] containing the shell
BigSimplePolygon bigPoly16(loop(points() << LatLng(8.0, 8.0) << LatLng(8.0, -8.0)
- << LatLng(-8.0, -8.0) << LatLng(-8.0, 8.0)));
+ << LatLng(-8.0, -8.0)
+ << LatLng(-8.0, 8.0)));
ASSERT_LESS_THAN(bigPoly16.GetArea(), 2 * M_PI);
ASSERT_FALSE(bigPoly16.Contains(shellPoly));
ASSERT_TRUE(bigPoly16.Intersects(shellPoly));
// Try a big polygon bigger than the hole.
BigSimplePolygon bigPoly24(loop(points() << LatLng(12.0, 12.0) << LatLng(12.0, -12.0)
- << LatLng(-12.0, -12.0) << LatLng(-12.0, 12.0)));
+ << LatLng(-12.0, -12.0)
+ << LatLng(-12.0, 12.0)));
ASSERT_LESS_THAN(bigPoly24.GetArea(), 2 * M_PI);
ASSERT_FALSE(bigPoly24.Contains(shellPoly));
ASSERT_TRUE(bigPoly24.Intersects(shellPoly));
// Try a big polygon smaller than the shell.
BigSimplePolygon bigPoly8(loop(points() << LatLng(4.0, 4.0) << LatLng(4.0, -4.0)
- << LatLng(-4.0, -4.0) << LatLng(-4.0, 4.0)));
+ << LatLng(-4.0, -4.0)
+ << LatLng(-4.0, 4.0)));
ASSERT_LESS_THAN(bigPoly8.GetArea(), 2 * M_PI);
ASSERT_FALSE(bigPoly8.Contains(shellPoly));
ASSERT_TRUE(bigPoly8.Intersects(shellPoly));
@@ -166,7 +177,8 @@ TEST(BigSimplePolygon, BasicWithHoleAndShell) {
TEST(BigSimplePolygon, BasicComplement) {
// Everything *not* in a 20x20 square centered at [0,0]
BigSimplePolygon bigPoly20Comp(loop(points() << LatLng(10.0, 10.0) << LatLng(10.0, -10.0)
- << LatLng(-10.0, -10.0) << LatLng(-10.0, 10.0)));
+ << LatLng(-10.0, -10.0)
+ << LatLng(-10.0, 10.0)));
bigPoly20Comp.Invert();
// A 10x10 square centered at [0,0]
@@ -179,7 +191,8 @@ TEST(BigSimplePolygon, BasicComplement) {
// A 10x10 square centered at [0,20], contained by bigPoly20Comp
S2Polygon poly10Contained(loopVec(points() << LatLng(25.0, 25.0) << LatLng(25.0, 15.0)
- << LatLng(15.0, 15.0) << LatLng(15.0, 25.0)));
+ << LatLng(15.0, 15.0)
+ << LatLng(15.0, 25.0)));
ASSERT_LESS_THAN(poly10Contained.GetArea(), bigPoly20Comp.GetArea());
ASSERT(bigPoly20Comp.Contains(poly10Contained));
@@ -188,7 +201,8 @@ TEST(BigSimplePolygon, BasicComplement) {
// A 30x30 square centered at [0,0], so that bigPoly20Comp contains its complement entirely,
// which is not allowed by S2.
S2Polygon poly30(loopVec(points() << LatLng(15.0, 15.0) << LatLng(15.0, -15.0)
- << LatLng(-15.0, -15.0) << LatLng(-15.0, 15.0)));
+ << LatLng(-15.0, -15.0)
+ << LatLng(-15.0, 15.0)));
ASSERT_LESS_THAN(poly30.GetArea(), bigPoly20Comp.GetArea());
ASSERT_FALSE(bigPoly20Comp.Contains(poly30));
ASSERT_TRUE(bigPoly20Comp.Intersects(poly30));
@@ -197,7 +211,8 @@ TEST(BigSimplePolygon, BasicComplement) {
TEST(BigSimplePolygon, BasicIntersects) {
// Everything *not* in a 20x20 square centered at [0,0]
BigSimplePolygon bigPoly20(loop(points() << LatLng(10.0, 10.0) << LatLng(10.0, -10.0)
- << LatLng(-10.0, -10.0) << LatLng(-10.0, 10.0)));
+ << LatLng(-10.0, -10.0)
+ << LatLng(-10.0, 10.0)));
bigPoly20.Invert();
// A 10x10 square centered at [10,10] (partial overlap)
@@ -212,16 +227,19 @@ TEST(BigSimplePolygon, BasicComplementWithHole) {
// A 30x30 square centered at [0,0] with a 20X20 hole
vector<S2Loop*> loops;
loops.push_back(loop(points() << LatLng(15.0, 15.0) << LatLng(15.0, -15.0)
- << LatLng(-15.0, -15.0) << LatLng(-15.0, 15.0)));
+ << LatLng(-15.0, -15.0)
+ << LatLng(-15.0, 15.0)));
loops.push_back(loop(points() << LatLng(10.0, 10.0) << LatLng(10.0, -10.0)
- << LatLng(-10.0, -10.0) << LatLng(-10.0, 10.0)));
+ << LatLng(-10.0, -10.0)
+ << LatLng(-10.0, 10.0)));
S2Polygon holePoly(&loops);
// 1. BigPolygon doesn't touch holePoly
// Everything *not* in a 40x40 square centered at [0,0]
BigSimplePolygon bigPoly40Comp(loop(points() << LatLng(20.0, 20.0) << LatLng(20.0, -20.0)
- << LatLng(-20.0, -20.0) << LatLng(-20.0, 20.0)));
+ << LatLng(-20.0, -20.0)
+ << LatLng(-20.0, 20.0)));
bigPoly40Comp.Invert();
ASSERT_GREATER_THAN(bigPoly40Comp.GetArea(), 2 * M_PI);
ASSERT_FALSE(bigPoly40Comp.Contains(holePoly));
@@ -230,7 +248,8 @@ TEST(BigSimplePolygon, BasicComplementWithHole) {
// 2. BigPolygon intersects holePoly
// Everything *not* in a 24X24 square centered at [0,0]
BigSimplePolygon bigPoly24Comp(loop(points() << LatLng(12.0, 12.0) << LatLng(12.0, -12.0)
- << LatLng(-12.0, -12.0) << LatLng(-12.0, 12.0)));
+ << LatLng(-12.0, -12.0)
+ << LatLng(-12.0, 12.0)));
bigPoly24Comp.Invert();
ASSERT_GREATER_THAN(bigPoly24Comp.GetArea(), 2 * M_PI);
ASSERT_FALSE(bigPoly24Comp.Contains(holePoly));
@@ -239,7 +258,8 @@ TEST(BigSimplePolygon, BasicComplementWithHole) {
// 3. BigPolygon contains holePoly
// Everything *not* in a 16X16 square centered at [0,0]
BigSimplePolygon bigPoly16Comp(loop(points() << LatLng(8.0, 8.0) << LatLng(8.0, -8.0)
- << LatLng(-8.0, -8.0) << LatLng(-8.0, 8.0)));
+ << LatLng(-8.0, -8.0)
+ << LatLng(-8.0, 8.0)));
bigPoly16Comp.Invert();
ASSERT_GREATER_THAN(bigPoly16Comp.GetArea(), 2 * M_PI);
ASSERT_TRUE(bigPoly16Comp.Contains(holePoly));
@@ -261,10 +281,12 @@ TEST(BigSimplePolygon, BasicComplementWithHoleAndShell) {
vector<S2Loop*> loops;
// Border
loops.push_back(loop(points() << LatLng(15.0, 15.0) << LatLng(15.0, -15.0)
- << LatLng(-15.0, -15.0) << LatLng(-15.0, 15.0)));
+ << LatLng(-15.0, -15.0)
+ << LatLng(-15.0, 15.0)));
// Hole
loops.push_back(loop(points() << LatLng(10.0, 10.0) << LatLng(10.0, -10.0)
- << LatLng(-10.0, -10.0) << LatLng(-10.0, 10.0)));
+ << LatLng(-10.0, -10.0)
+ << LatLng(-10.0, 10.0)));
// Shell
loops.push_back(loop(points() << LatLng(5.0, 5.0) << LatLng(5.0, -5.0) << LatLng(-5.0, -5.0)
<< LatLng(-5.0, 5.0)));
@@ -273,7 +295,8 @@ TEST(BigSimplePolygon, BasicComplementWithHoleAndShell) {
// 1. BigPolygon doesn't touch shellPoly
// Everything *not* in a 40x40 square centered at [0,0]
BigSimplePolygon bigPoly40Comp(loop(points() << LatLng(20.0, 20.0) << LatLng(20.0, -20.0)
- << LatLng(-20.0, -20.0) << LatLng(-20.0, 20.0)));
+ << LatLng(-20.0, -20.0)
+ << LatLng(-20.0, 20.0)));
bigPoly40Comp.Invert();
ASSERT_GREATER_THAN(bigPoly40Comp.GetArea(), 2 * M_PI);
ASSERT_FALSE(bigPoly40Comp.Contains(shellPoly));
@@ -282,7 +305,8 @@ TEST(BigSimplePolygon, BasicComplementWithHoleAndShell) {
// 2. BigPolygon intersects shellPoly
// Everything *not* in a 24X24 square centered at [0,0]
BigSimplePolygon bigPoly24Comp(loop(points() << LatLng(12.0, 12.0) << LatLng(12.0, -12.0)
- << LatLng(-12.0, -12.0) << LatLng(-12.0, 12.0)));
+ << LatLng(-12.0, -12.0)
+ << LatLng(-12.0, 12.0)));
bigPoly24Comp.Invert();
ASSERT_GREATER_THAN(bigPoly24Comp.GetArea(), 2 * M_PI);
ASSERT_FALSE(bigPoly24Comp.Contains(shellPoly));
@@ -291,7 +315,8 @@ TEST(BigSimplePolygon, BasicComplementWithHoleAndShell) {
// 3. BigPolygon contains shellPoly's outer ring
// Everything *not* in a 16X16 square centered at [0,0]
BigSimplePolygon bigPoly16Comp(loop(points() << LatLng(8.0, 8.0) << LatLng(8.0, -8.0)
- << LatLng(-8.0, -8.0) << LatLng(-8.0, 8.0)));
+ << LatLng(-8.0, -8.0)
+ << LatLng(-8.0, 8.0)));
bigPoly16Comp.Invert();
ASSERT_GREATER_THAN(bigPoly16Comp.GetArea(), 2 * M_PI);
ASSERT_FALSE(bigPoly16Comp.Contains(shellPoly));
@@ -309,7 +334,8 @@ TEST(BigSimplePolygon, BasicComplementWithHoleAndShell) {
// 5. BigPolygon contain shellPoly (CW)
BigSimplePolygon bigPolyCompOffset(loop(points() << LatLng(6.0, 6.0) << LatLng(6.0, 8.0)
- << LatLng(-6.0, 8.0) << LatLng(-6.0, 6.0)));
+ << LatLng(-6.0, 8.0)
+ << LatLng(-6.0, 6.0)));
ASSERT_GREATER_THAN(bigPolyCompOffset.GetArea(), 2 * M_PI);
ASSERT_TRUE(bigPolyCompOffset.Contains(shellPoly));
ASSERT_TRUE(bigPolyCompOffset.Intersects(shellPoly));
@@ -318,11 +344,13 @@ TEST(BigSimplePolygon, BasicComplementWithHoleAndShell) {
TEST(BigSimplePolygon, BasicWinding) {
// A 20x20 square centered at [0,0] (CCW)
BigSimplePolygon bigPoly20(loop(points() << LatLng(10.0, 10.0) << LatLng(10.0, -10.0)
- << LatLng(-10.0, -10.0) << LatLng(-10.0, 10.0)));
+ << LatLng(-10.0, -10.0)
+ << LatLng(-10.0, 10.0)));
// Everything *not* in a 20x20 square centered at [0,0] (CW)
BigSimplePolygon bigPoly20Comp(loop(points() << LatLng(10.0, 10.0) << LatLng(-10.0, 10.0)
- << LatLng(-10.0, -10.0) << LatLng(10.0, -10.0)));
+ << LatLng(-10.0, -10.0)
+ << LatLng(10.0, -10.0)));
ASSERT_LESS_THAN(bigPoly20.GetArea(), 2 * M_PI);
ASSERT_GREATER_THAN(bigPoly20Comp.GetArea(), 2 * M_PI);
@@ -331,11 +359,13 @@ TEST(BigSimplePolygon, BasicWinding) {
TEST(BigSimplePolygon, LineRelations) {
// A 20x20 square centered at [0,0]
BigSimplePolygon bigPoly20(loop(points() << LatLng(10.0, 10.0) << LatLng(10.0, -10.0)
- << LatLng(-10.0, -10.0) << LatLng(-10.0, 10.0)));
+ << LatLng(-10.0, -10.0)
+ << LatLng(-10.0, 10.0)));
// A 10x10 line circling [0,0]
S2Polyline line10(pointVec(points() << LatLng(5.0, 5.0) << LatLng(5.0, -5.0)
- << LatLng(-5.0, -5.0) << LatLng(-5.0, 5.0)));
+ << LatLng(-5.0, -5.0)
+ << LatLng(-5.0, 5.0)));
ASSERT_LESS_THAN(bigPoly20.GetArea(), 2 * M_PI);
ASSERT(bigPoly20.Contains(line10));
@@ -355,12 +385,14 @@ TEST(BigSimplePolygon, LineRelations) {
TEST(BigSimplePolygon, LineRelationsComplement) {
// A 20x20 square centered at [0,0]
BigSimplePolygon bigPoly20Comp(loop(points() << LatLng(10.0, 10.0) << LatLng(10.0, -10.0)
- << LatLng(-10.0, -10.0) << LatLng(-10.0, 10.0)));
+ << LatLng(-10.0, -10.0)
+ << LatLng(-10.0, 10.0)));
bigPoly20Comp.Invert();
// A 10x10 line circling [0,0]
S2Polyline line10(pointVec(points() << LatLng(5.0, 5.0) << LatLng(5.0, -5.0)
- << LatLng(-5.0, -5.0) << LatLng(-5.0, 5.0)));
+ << LatLng(-5.0, -5.0)
+ << LatLng(-5.0, 5.0)));
ASSERT_GREATER_THAN(bigPoly20Comp.GetArea(), 2 * M_PI);
ASSERT_FALSE(bigPoly20Comp.Contains(line10));
@@ -373,7 +405,8 @@ TEST(BigSimplePolygon, LineRelationsComplement) {
// A 10x10 line circling [0,0]
S2Polyline line30(pointVec(points() << LatLng(15.0, 15.0) << LatLng(15.0, -15.0)
- << LatLng(-15.0, -15.0) << LatLng(-15.0, 15.0)));
+ << LatLng(-15.0, -15.0)
+ << LatLng(-15.0, 15.0)));
ASSERT_TRUE(bigPoly20Comp.Contains(line30));
ASSERT_TRUE(bigPoly20Comp.Intersects(line30));
}
@@ -381,11 +414,13 @@ TEST(BigSimplePolygon, LineRelationsComplement) {
TEST(BigSimplePolygon, LineRelationsWinding) {
// Everything *not* in a 20x20 square centered at [0,0] (CW winding)
BigSimplePolygon bigPoly20Comp(loop(points() << LatLng(10.0, 10.0) << LatLng(-10.0, 10.0)
- << LatLng(-10.0, -10.0) << LatLng(10.0, -10.0)));
+ << LatLng(-10.0, -10.0)
+ << LatLng(10.0, -10.0)));
// A 10x10 line circling [0,0]
S2Polyline line10(pointVec(points() << LatLng(5.0, 5.0) << LatLng(5.0, -5.0)
- << LatLng(-5.0, -5.0) << LatLng(-5.0, 5.0)));
+ << LatLng(-5.0, -5.0)
+ << LatLng(-5.0, 5.0)));
ASSERT_GREATER_THAN(bigPoly20Comp.GetArea(), 2 * M_PI);
ASSERT_FALSE(bigPoly20Comp.Contains(line10));
@@ -395,11 +430,13 @@ TEST(BigSimplePolygon, LineRelationsWinding) {
TEST(BigSimplePolygon, PolarContains) {
// Square 10 degrees from the north pole [90,0]
BigSimplePolygon bigNorthPoly(loop(points() << LatLng(80.0, 0.0) << LatLng(80.0, 90.0)
- << LatLng(80.0, 180.0) << LatLng(80.0, -90.0)));
+ << LatLng(80.0, 180.0)
+ << LatLng(80.0, -90.0)));
// Square 5 degrees from the north pole [90, 0]
S2Polygon northPoly(loopVec(points() << LatLng(85.0, 0.0) << LatLng(85.0, 90.0)
- << LatLng(85.0, 180.0) << LatLng(85.0, -90.0)));
+ << LatLng(85.0, 180.0)
+ << LatLng(85.0, -90.0)));
ASSERT_LESS_THAN(bigNorthPoly.GetArea(), 2 * M_PI);
ASSERT_LESS_THAN(northPoly.GetArea(), bigNorthPoly.GetArea());
@@ -410,7 +447,8 @@ TEST(BigSimplePolygon, PolarContains) {
TEST(BigSimplePolygon, PolarContainsWithHoles) {
// Square 10 degrees from the north pole [90,0]
BigSimplePolygon bigNorthPoly(loop(points() << LatLng(80.0, 0.0) << LatLng(80.0, 90.0)
- << LatLng(80.0, 180.0) << LatLng(80.0, -90.0)));
+ << LatLng(80.0, 180.0)
+ << LatLng(80.0, -90.0)));
// Square 5 degrees from the north pole [90, 0] with a concentric hole 1 degree from the
// north pole
@@ -429,7 +467,8 @@ TEST(BigSimplePolygon, PolarContainsWithHoles) {
TEST(BigSimplePolygon, PolarIntersectsWithHoles) {
// Square 10 degrees from the north pole [90,0]
BigSimplePolygon bigNorthPoly(loop(points() << LatLng(80.0, 0.0) << LatLng(80.0, 90.0)
- << LatLng(80.0, 180.0) << LatLng(80.0, -90.0)));
+ << LatLng(80.0, 180.0)
+ << LatLng(80.0, -90.0)));
// 5-degree square with 1-degree-wide concentric hole, centered on [80.0, 0.0]
vector<S2Loop*> loops;
@@ -472,7 +511,8 @@ void checkConsistency(const BigSimplePolygon& bigPoly,
TEST(BigSimplePolygon, ShareEdgeDisjoint) {
// Big polygon smaller than a hemisphere.
BigSimplePolygon bigPoly(loop(points() << LatLng(80.0, 0.0) << LatLng(-80.0, 0.0)
- << LatLng(-80.0, 90.0) << LatLng(80.0, 90.0)));
+ << LatLng(-80.0, 90.0)
+ << LatLng(80.0, 90.0)));
ASSERT_LESS_THAN(bigPoly.GetArea(), 2 * M_PI);
// Vertex point and collinear point
@@ -481,10 +521,12 @@ TEST(BigSimplePolygon, ShareEdgeDisjoint) {
// Polygon shares one edge
S2Polygon poly(loopVec(points() << LatLng(80.0, 0.0) << LatLng(-80.0, 0.0)
- << LatLng(-80.0, -10.0) << LatLng(80.0, -10.0)));
+ << LatLng(-80.0, -10.0)
+ << LatLng(80.0, -10.0)));
// Polygon shares a segment of one edge
S2Polygon collinearPoly(loopVec(points() << LatLng(50.0, 0.0) << LatLng(-50.0, 0.0)
- << LatLng(-50.0, -10.0) << LatLng(50.0, -10.0)));
+ << LatLng(-50.0, -10.0)
+ << LatLng(50.0, -10.0)));
// Line
S2Polyline line(
@@ -495,9 +537,12 @@ TEST(BigSimplePolygon, ShareEdgeDisjoint) {
// Big polygon larger than a hemisphere.
BigSimplePolygon expandedBigPoly(loop(points() << LatLng(80.0, 0.0) << LatLng(-80.0, 0.0)
- << LatLng(-80.0, 90.0) << LatLng(-80.0, 180.0)
- << LatLng(-80.0, -90.0) << LatLng(80.0, -90.0)
- << LatLng(80.0, 180.0) << LatLng(80.0, 90.0)));
+ << LatLng(-80.0, 90.0)
+ << LatLng(-80.0, 180.0)
+ << LatLng(-80.0, -90.0)
+ << LatLng(80.0, -90.0)
+ << LatLng(80.0, 180.0)
+ << LatLng(80.0, 90.0)));
ASSERT_GREATER_THAN(expandedBigPoly.GetArea(), 2 * M_PI);
checkConsistency(bigPoly, expandedBigPoly, point);
@@ -525,15 +570,18 @@ TEST(BigSimplePolygon, ShareEdgeDisjoint) {
TEST(BigSimplePolygon, ShareEdgeContained) {
// Big polygon smaller than a hemisphere.
BigSimplePolygon bigPoly(loop(points() << LatLng(80.0, 0.0) << LatLng(-80.0, 0.0)
- << LatLng(-80.0, 90.0) << LatLng(80.0, 90.0)));
+ << LatLng(-80.0, 90.0)
+ << LatLng(80.0, 90.0)));
ASSERT_LESS_THAN(bigPoly.GetArea(), 2 * M_PI);
// Polygon
S2Polygon poly(loopVec(points() << LatLng(80.0, 0.0) << LatLng(-80.0, 0.0)
- << LatLng(-80.0, 10.0) << LatLng(80.0, 10.0)));
+ << LatLng(-80.0, 10.0)
+ << LatLng(80.0, 10.0)));
// Polygon shares a segment of one edge
S2Polygon collinearPoly(loopVec(points() << LatLng(50.0, 0.0) << LatLng(-50.0, 0.0)
- << LatLng(-50.0, 10.0) << LatLng(50.0, 10.0)));
+ << LatLng(-50.0, 10.0)
+ << LatLng(50.0, 10.0)));
// Line
S2Polyline line(
pointVec(points() << LatLng(80.0, 0.0) << LatLng(-80.0, 0.0) << LatLng(0.0, 10.0)));
@@ -543,9 +591,12 @@ TEST(BigSimplePolygon, ShareEdgeContained) {
// Big polygon larger than a hemisphere.
BigSimplePolygon expandedBigPoly(loop(points() << LatLng(80.0, 0.0) << LatLng(-80.0, 0.0)
- << LatLng(-80.0, 90.0) << LatLng(-80.0, 180.0)
- << LatLng(-80.0, -90.0) << LatLng(80.0, -90.0)
- << LatLng(80.0, 180.0) << LatLng(80.0, 90.0)));
+ << LatLng(-80.0, 90.0)
+ << LatLng(-80.0, 180.0)
+ << LatLng(-80.0, -90.0)
+ << LatLng(80.0, -90.0)
+ << LatLng(80.0, 180.0)
+ << LatLng(80.0, 90.0)));
ASSERT_GREATER_THAN(expandedBigPoly.GetArea(), 2 * M_PI);
checkConsistency(bigPoly, expandedBigPoly, poly);
diff --git a/src/mongo/db/geo/geoparser.cpp b/src/mongo/db/geo/geoparser.cpp
index 12831f640bc..7e58a768aa8 100644
--- a/src/mongo/db/geo/geoparser.cpp
+++ b/src/mongo/db/geo/geoparser.cpp
@@ -30,9 +30,9 @@
#include "mongo/db/geo/geoparser.h"
+#include <cmath>
#include <string>
#include <vector>
-#include <cmath>
#include "mongo/db/geo/shapes.h"
#include "mongo/db/jsobj.h"
@@ -227,7 +227,8 @@ static Status parseGeoJSONPolygonCoordinates(const BSONElement& elem,
"Secondary loops not contained by first exterior loop - "
"secondary loops must be holes: "
<< coordinateElt.toString(false)
- << " first loop: " << elem.Obj().firstElement().toString(false));
+ << " first loop: "
+ << elem.Obj().firstElement().toString(false));
}
}
diff --git a/src/mongo/db/geo/geoparser_test.cpp b/src/mongo/db/geo/geoparser_test.cpp
index a8fc1397659..4f90986d3c1 100644
--- a/src/mongo/db/geo/geoparser_test.cpp
+++ b/src/mongo/db/geo/geoparser_test.cpp
@@ -30,13 +30,13 @@
* This file contains tests for mongo/db/geo/geoparser.cpp.
*/
-#include <string>
#include <sstream>
+#include <string>
#include "mongo/db/geo/geoparser.h"
#include "mongo/db/geo/shapes.h"
-#include "mongo/db/json.h"
#include "mongo/db/jsobj.h"
+#include "mongo/db/json.h"
#include "mongo/unittest/unittest.h"
#include "mongo/util/assert_util.h"
@@ -148,16 +148,14 @@ TEST(GeoParser, parseGeoJSONPolygon) {
&polygon));
// And one with a hole.
ASSERT_OK(GeoParser::parseGeoJSONPolygon(
- fromjson(
- "{'type':'Polygon', 'coordinates':[ [[0,0],[5,0],[5,5],[0,5],[0,0]],"
- " [[1,1],[4,1],[4,4],[1,4],[1,1]] ]}"),
+ fromjson("{'type':'Polygon', 'coordinates':[ [[0,0],[5,0],[5,5],[0,5],[0,0]],"
+ " [[1,1],[4,1],[4,4],[1,4],[1,1]] ]}"),
false,
&polygon));
// Latitudes must be OK
ASSERT_NOT_OK(GeoParser::parseGeoJSONPolygon(
- fromjson(
- "{'type':'Polygon', 'coordinates':[ [[0,0],[5,0],[5,91],[0,91],[0,0]],"
- " [[1,1],[4,1],[4,4],[1,4],[1,1]] ]}"),
+ fromjson("{'type':'Polygon', 'coordinates':[ [[0,0],[5,0],[5,91],[0,91],[0,0]],"
+ " [[1,1],[4,1],[4,4],[1,4],[1,1]] ]}"),
false,
&polygon));
// First point must be the same as the last.
@@ -165,9 +163,8 @@ TEST(GeoParser, parseGeoJSONPolygon) {
fromjson("{'type':'Polygon', 'coordinates':[ [[1,2],[3,4],[5,6]] ]}"), false, &polygon));
// Extra elements are allowed
ASSERT_OK(GeoParser::parseGeoJSONPolygon(
- fromjson(
- "{'type':'Polygon', 'coordinates':[ [[0,0,0,0],[5,0,0],[5,5,1],"
- " [0,5],[0,0]] ]}"),
+ fromjson("{'type':'Polygon', 'coordinates':[ [[0,0,0,0],[5,0,0],[5,5,1],"
+ " [0,5],[0,0]] ]}"),
false,
&polygon));
@@ -185,9 +182,8 @@ TEST(GeoParser, parseGeoJSONPolygon) {
PolygonWithCRS polygonB;
ASSERT_OK(GeoParser::parseGeoJSONPolygon(
- fromjson(
- "{'type':'Polygon', 'coordinates':[ [[0,0],[5,0],[5,5],[0,5],[0,0]],"
- " [[1,1],[1,4],[4,4],[4,1],[1,1]] ]}"),
+ fromjson("{'type':'Polygon', 'coordinates':[ [[0,0],[5,0],[5,5],[0,5],[0,0]],"
+ " [[1,1],[1,4],[4,4],[4,1],[1,1]] ]}"),
false,
&polygonB));
// We removed this in the hole.
@@ -204,9 +200,8 @@ TEST(GeoParser, parseGeoJSONPolygon) {
PolygonWithCRS polygonD;
ASSERT_OK(GeoParser::parseGeoJSONPolygon(
- fromjson(
- "{'type':'Polygon', 'coordinates':[ [[0,0],[0,5],[5,5],[5,0],[0,0]],"
- " [[1,1],[1,4],[4,4],[4,1],[1,1]] ]}"),
+ fromjson("{'type':'Polygon', 'coordinates':[ [[0,0],[0,5],[5,5],[5,0],[0,0]],"
+ " [[1,1],[1,4],[4,4],[4,1],[1,1]] ]}"),
false,
&polygonD));
// Also removed in the loop.
@@ -324,31 +319,28 @@ TEST(GeoParser, parseMultiLine) {
mongo::MultiLineWithCRS ml;
ASSERT_OK(GeoParser::parseMultiLine(
- fromjson(
- "{'type':'MultiLineString','coordinates':[ [[1,1],[2,2],[3,3]],"
- "[[4,5],[6,7]]]}"),
+ fromjson("{'type':'MultiLineString','coordinates':[ [[1,1],[2,2],[3,3]],"
+ "[[4,5],[6,7]]]}"),
false,
&ml));
ASSERT_EQUALS(ml.lines.size(), (size_t)2);
- ASSERT_OK(
- GeoParser::parseMultiLine(fromjson(
- "{'type':'MultiLineString','coordinates':[ [[1,1],[2,2]],"
- "[[4,5],[6,7]]]}"),
- false,
- &ml));
+ ASSERT_OK(GeoParser::parseMultiLine(
+ fromjson("{'type':'MultiLineString','coordinates':[ [[1,1],[2,2]],"
+ "[[4,5],[6,7]]]}"),
+ false,
+ &ml));
ASSERT_EQUALS(ml.lines.size(), (size_t)2);
ASSERT_OK(GeoParser::parseMultiLine(
fromjson("{'type':'MultiLineString','coordinates':[ [[1,1],[2,2]]]}"), false, &ml));
ASSERT_EQUALS(ml.lines.size(), (size_t)1);
- ASSERT_OK(
- GeoParser::parseMultiLine(fromjson(
- "{'type':'MultiLineString','coordinates':[ [[1,1],[2,2]],"
- "[[2,2],[1,1]]]}"),
- false,
- &ml));
+ ASSERT_OK(GeoParser::parseMultiLine(
+ fromjson("{'type':'MultiLineString','coordinates':[ [[1,1],[2,2]],"
+ "[[2,2],[1,1]]]}"),
+ false,
+ &ml));
ASSERT_EQUALS(ml.lines.size(), (size_t)2);
ASSERT_NOT_OK(GeoParser::parseMultiLine(
@@ -365,22 +357,20 @@ TEST(GeoParser, parseMultiPolygon) {
mongo::MultiPolygonWithCRS mp;
ASSERT_OK(GeoParser::parseMultiPolygon(
- fromjson(
- "{'type':'MultiPolygon','coordinates':["
- "[[[102.0, 2.0], [103.0, 2.0], [103.0, 3.0], [102.0, 3.0], [102.0, 2.0]]],"
- "[[[100.0, 0.0], [101.0, 0.0], [101.0, 1.0], [100.0, 1.0], [100.0, 0.0]],"
- "[[100.2, 0.2], [100.8, 0.2], [100.8, 0.8], [100.2, 0.8], [100.2, 0.2]]]"
- "]}"),
+ fromjson("{'type':'MultiPolygon','coordinates':["
+ "[[[102.0, 2.0], [103.0, 2.0], [103.0, 3.0], [102.0, 3.0], [102.0, 2.0]]],"
+ "[[[100.0, 0.0], [101.0, 0.0], [101.0, 1.0], [100.0, 1.0], [100.0, 0.0]],"
+ "[[100.2, 0.2], [100.8, 0.2], [100.8, 0.8], [100.2, 0.8], [100.2, 0.2]]]"
+ "]}"),
false,
&mp));
ASSERT_EQUALS(mp.polygons.size(), (size_t)2);
ASSERT_OK(GeoParser::parseMultiPolygon(
- fromjson(
- "{'type':'MultiPolygon','coordinates':["
- "[[[100.0, 0.0], [101.0, 0.0], [101.0, 1.0], [100.0, 1.0], [100.0, 0.0]],"
- "[[100.2, 0.2], [100.8, 0.2], [100.8, 0.8], [100.2, 0.8], [100.2, 0.2]]]"
- "]}"),
+ fromjson("{'type':'MultiPolygon','coordinates':["
+ "[[[100.0, 0.0], [101.0, 0.0], [101.0, 1.0], [100.0, 1.0], [100.0, 0.0]],"
+ "[[100.2, 0.2], [100.8, 0.2], [100.8, 0.8], [100.2, 0.8], [100.2, 0.2]]]"
+ "]}"),
false,
&mp));
ASSERT_EQUALS(mp.polygons.size(), (size_t)1);
diff --git a/src/mongo/db/geo/hash.cpp b/src/mongo/db/geo/hash.cpp
index 9b96a98a957..affc42e7fba 100644
--- a/src/mongo/db/geo/hash.cpp
+++ b/src/mongo/db/geo/hash.cpp
@@ -26,11 +26,11 @@
* it in the license file.
*/
+#include "mongo/db/geo/hash.h"
#include "mongo/config.h"
#include "mongo/db/field_parser.h"
-#include "mongo/db/jsobj.h"
-#include "mongo/db/geo/hash.h"
#include "mongo/db/geo/shapes.h"
+#include "mongo/db/jsobj.h"
#include "mongo/util/mongoutils/str.h"
#include <algorithm> // for max()
@@ -669,13 +669,19 @@ Status GeoHashConverter::parseParameters(const BSONObj& paramDoc,
if (params->bits < 1 || params->bits > 32) {
return Status(ErrorCodes::InvalidOptions,
str::stream() << "bits for hash must be > 0 and <= 32, "
- << "but " << params->bits << " bits were specified");
+ << "but "
+ << params->bits
+ << " bits were specified");
}
if (params->min >= params->max) {
return Status(ErrorCodes::InvalidOptions,
str::stream() << "region for hash must be valid and have positive area, "
- << "but [" << params->min << ", " << params->max << "] "
+ << "but ["
+ << params->min
+ << ", "
+ << params->max
+ << "] "
<< "was specified");
}
@@ -770,7 +776,8 @@ GeoHash GeoHashConverter::hash(const BSONObj& o, const BSONObj* src) const {
GeoHash GeoHashConverter::hash(double x, double y) const {
uassert(16433,
str::stream() << "point not in interval of [ " << _params.min << ", " << _params.max
- << " ]" << causedBy(BSON_ARRAY(x << y).toString()),
+ << " ]"
+ << causedBy(BSON_ARRAY(x << y).toString()),
x <= _params.max && x >= _params.min && y <= _params.max && y >= _params.min);
return GeoHash(convertToHashScale(x), convertToHashScale(y), _params.bits);
diff --git a/src/mongo/db/geo/hash.h b/src/mongo/db/geo/hash.h
index 0fbbaa2ac38..f772145ede8 100644
--- a/src/mongo/db/geo/hash.h
+++ b/src/mongo/db/geo/hash.h
@@ -28,8 +28,8 @@
#pragma once
-#include "mongo/platform/basic.h"
#include "mongo/db/jsobj.h"
+#include "mongo/platform/basic.h"
namespace mongo {
diff --git a/src/mongo/db/geo/hash_test.cpp b/src/mongo/db/geo/hash_test.cpp
index bfe6050fd9d..23aed0a6d93 100644
--- a/src/mongo/db/geo/hash_test.cpp
+++ b/src/mongo/db/geo/hash_test.cpp
@@ -30,12 +30,12 @@
* This file contains tests for mongo/db/geo/hash.cpp.
*/
+#include <algorithm> // For max()
#include <bitset>
-#include <string>
-#include <sstream>
-#include <iomanip>
#include <cmath>
-#include <algorithm> // For max()
+#include <iomanip>
+#include <sstream>
+#include <string>
#include "mongo/db/geo/hash.h"
#include "mongo/db/geo/shapes.h"
diff --git a/src/mongo/db/geo/r2_region_coverer.cpp b/src/mongo/db/geo/r2_region_coverer.cpp
index e9cbc789833..c593498e683 100644
--- a/src/mongo/db/geo/r2_region_coverer.cpp
+++ b/src/mongo/db/geo/r2_region_coverer.cpp
@@ -32,8 +32,8 @@
#include "mongo/platform/basic.h"
-#include "mongo/db/geo/shapes.h"
#include "mongo/db/geo/r2_region_coverer.h"
+#include "mongo/db/geo/shapes.h"
#include "mongo/util/log.h"
namespace mongo {
diff --git a/src/mongo/db/geo/r2_region_coverer_test.cpp b/src/mongo/db/geo/r2_region_coverer_test.cpp
index ba487d231a9..69b6abba563 100644
--- a/src/mongo/db/geo/r2_region_coverer_test.cpp
+++ b/src/mongo/db/geo/r2_region_coverer_test.cpp
@@ -34,10 +34,10 @@
#include "mongo/db/geo/r2_region_coverer.h"
#include "mongo/base/init.h"
-#include "mongo/unittest/unittest.h"
-#include "mongo/platform/random.h"
#include "mongo/bson/bsonmisc.h"
#include "mongo/db/geo/geometry_container.h"
+#include "mongo/platform/random.h"
+#include "mongo/unittest/unittest.h"
#include "mongo/util/log.h"
namespace {
@@ -278,7 +278,8 @@ GeometryContainer* getRandomCircle(double radius) {
container->parseFromQuery(
BSON("$center" << BSON_ARRAY(BSON_ARRAY(randDouble(radius, MAXBOUND - radius)
<< randDouble(radius, MAXBOUND - radius))
- << radius)).firstElement());
+ << radius))
+ .firstElement());
return container;
}
diff --git a/src/mongo/db/geo/shapes.cpp b/src/mongo/db/geo/shapes.cpp
index fa6018877bb..5ccc0fb85d4 100644
--- a/src/mongo/db/geo/shapes.cpp
+++ b/src/mongo/db/geo/shapes.cpp
@@ -26,8 +26,8 @@
* it in the license file.
*/
-#include "mongo/db/jsobj.h"
#include "mongo/db/geo/shapes.h"
+#include "mongo/db/jsobj.h"
#include "mongo/util/mongoutils/str.h"
using std::abs;
diff --git a/src/mongo/db/geo/shapes.h b/src/mongo/db/geo/shapes.h
index 3d8863ff964..7c610d94bf8 100644
--- a/src/mongo/db/geo/shapes.h
+++ b/src/mongo/db/geo/shapes.h
@@ -33,9 +33,9 @@
#include <vector>
#include "mongo/base/owned_pointer_vector.h"
-#include "mongo/db/jsobj.h"
#include "mongo/db/geo/big_polygon.h"
#include "mongo/db/geo/s2.h"
+#include "mongo/db/jsobj.h"
#include "third_party/s2/s2cap.h"
#include "third_party/s2/s2cell.h"
#include "third_party/s2/s2latlng.h"
diff --git a/src/mongo/db/hasher_test.cpp b/src/mongo/db/hasher_test.cpp
index a837126bebd..874181a3100 100644
--- a/src/mongo/db/hasher_test.cpp
+++ b/src/mongo/db/hasher_test.cpp
@@ -30,10 +30,10 @@
#include "mongo/platform/basic.h"
+#include "mongo/bson/bsontypes.h"
#include "mongo/db/hasher.h"
#include "mongo/db/jsobj.h"
#include "mongo/db/json.h"
-#include "mongo/bson/bsontypes.h"
#include "mongo/unittest/unittest.h"
@@ -271,7 +271,8 @@ TEST(BSONElementHasher, HashString) {
TEST(BSONElementHasher, HashObject) {
BSONObj o = BSON("check" << BSON("a"
<< "abc"
- << "b" << 123LL));
+ << "b"
+ << 123LL));
ASSERT_EQUALS(hashIt(o), 4771603801758380216LL);
o = BSON("check" << BSONObj());
diff --git a/src/mongo/db/index/2d_access_method.cpp b/src/mongo/db/index/2d_access_method.cpp
index c4e5e6e8843..ed63659593a 100644
--- a/src/mongo/db/index/2d_access_method.cpp
+++ b/src/mongo/db/index/2d_access_method.cpp
@@ -31,10 +31,10 @@
#include <string>
#include <vector>
-#include "mongo/db/index_names.h"
#include "mongo/db/index/2d_common.h"
#include "mongo/db/index/expression_keys_private.h"
#include "mongo/db/index/expression_params.h"
+#include "mongo/db/index_names.h"
#include "mongo/db/jsobj.h"
namespace mongo {
diff --git a/src/mongo/db/index/btree_access_method.h b/src/mongo/db/index/btree_access_method.h
index 5873514c01c..ed5389d5f79 100644
--- a/src/mongo/db/index/btree_access_method.h
+++ b/src/mongo/db/index/btree_access_method.h
@@ -30,9 +30,9 @@
#include "mongo/base/status.h"
-#include "mongo/db/index/index_access_method.h"
#include "mongo/db/index/btree_key_generator.h"
#include "mongo/db/index/index_access_method.h"
+#include "mongo/db/index/index_access_method.h"
#include "mongo/db/jsobj.h"
namespace mongo {
diff --git a/src/mongo/db/index/btree_key_generator.cpp b/src/mongo/db/index/btree_key_generator.cpp
index 1f1eb949b06..5e847e77438 100644
--- a/src/mongo/db/index/btree_key_generator.cpp
+++ b/src/mongo/db/index/btree_key_generator.cpp
@@ -243,8 +243,10 @@ BSONElement BtreeKeyGeneratorV1::extractNextElement(const BSONObj& obj,
uassert(16746,
mongoutils::str::stream()
<< "Ambiguous field name found in array (do not use numeric field names in "
- "embedded elements in an array), field: '" << arrField.fieldName()
- << "' for array: " << positionalInfo.arrayObj,
+ "embedded elements in an array), field: '"
+ << arrField.fieldName()
+ << "' for array: "
+ << positionalInfo.arrayObj,
!haveObjField || !positionalInfo.hasPositionallyIndexedElt());
*arrayNestedArray = false;
diff --git a/src/mongo/db/index/expression_keys_private.cpp b/src/mongo/db/index/expression_keys_private.cpp
index 99f2b889c01..9afc653127f 100644
--- a/src/mongo/db/index/expression_keys_private.cpp
+++ b/src/mongo/db/index/expression_keys_private.cpp
@@ -37,9 +37,9 @@
#include "mongo/db/geo/geometry_container.h"
#include "mongo/db/geo/geoparser.h"
#include "mongo/db/geo/s2.h"
-#include "mongo/db/index_names.h"
#include "mongo/db/index/2d_common.h"
#include "mongo/db/index/s2_common.h"
+#include "mongo/db/index_names.h"
#include "mongo/db/query/collation/collation_index_key.h"
#include "mongo/util/assert_util.h"
#include "mongo/util/log.h"
diff --git a/src/mongo/db/index/expression_keys_private.h b/src/mongo/db/index/expression_keys_private.h
index b84acbcae11..5206f4a6768 100644
--- a/src/mongo/db/index/expression_keys_private.h
+++ b/src/mongo/db/index/expression_keys_private.h
@@ -30,8 +30,8 @@
#include <vector>
-#include "mongo/bson/bsonobj.h"
#include "mongo/bson/bsonmisc.h"
+#include "mongo/bson/bsonobj.h"
#include "mongo/db/hasher.h"
namespace mongo {
diff --git a/src/mongo/db/index/expression_params.cpp b/src/mongo/db/index/expression_params.cpp
index e0ab49c303e..956a99e6f69 100644
--- a/src/mongo/db/index/expression_params.cpp
+++ b/src/mongo/db/index/expression_params.cpp
@@ -31,9 +31,9 @@
#include "mongo/bson/util/bson_extract.h"
#include "mongo/db/geo/geoconstants.h"
#include "mongo/db/hasher.h"
-#include "mongo/db/index_names.h"
#include "mongo/db/index/2d_common.h"
#include "mongo/db/index/s2_common.h"
+#include "mongo/db/index_names.h"
#include "mongo/util/mongoutils/str.h"
#include "third_party/s2/s2.h"
@@ -192,8 +192,14 @@ void ExpressionParams::initialize2dsphereParams(const BSONObj& infoObj,
massert(17395,
stream() << "unsupported geo index version { " << kIndexVersionFieldName << " : "
- << out->indexVersion << " }, only support versions: [" << S2_INDEX_VERSION_1
- << "," << S2_INDEX_VERSION_2 << "," << S2_INDEX_VERSION_3 << "]",
+ << out->indexVersion
+ << " }, only support versions: ["
+ << S2_INDEX_VERSION_1
+ << ","
+ << S2_INDEX_VERSION_2
+ << ","
+ << S2_INDEX_VERSION_3
+ << "]",
out->indexVersion == S2_INDEX_VERSION_3 || out->indexVersion == S2_INDEX_VERSION_2 ||
out->indexVersion == S2_INDEX_VERSION_1);
}
diff --git a/src/mongo/db/index/expression_params.h b/src/mongo/db/index/expression_params.h
index 21cf7c298c0..d8a12323abc 100644
--- a/src/mongo/db/index/expression_params.h
+++ b/src/mongo/db/index/expression_params.h
@@ -31,8 +31,8 @@
#include <string>
#include <vector>
-#include "mongo/db/jsobj.h"
#include "mongo/db/hasher.h"
+#include "mongo/db/jsobj.h"
namespace mongo {
diff --git a/src/mongo/db/index/external_key_generator.cpp b/src/mongo/db/index/external_key_generator.cpp
index de1aec11d64..1ab9c1ad9ae 100644
--- a/src/mongo/db/index/external_key_generator.cpp
+++ b/src/mongo/db/index/external_key_generator.cpp
@@ -32,12 +32,12 @@
#include <string>
#include "mongo/db/fts/fts_spec.h"
-#include "mongo/db/index_names.h"
#include "mongo/db/index/2d_common.h"
#include "mongo/db/index/btree_key_generator.h"
#include "mongo/db/index/expression_keys_private.h"
#include "mongo/db/index/expression_params.h"
#include "mongo/db/index/s2_common.h"
+#include "mongo/db/index_names.h"
#include "mongo/db/jsobj.h"
namespace mongo {
diff --git a/src/mongo/db/index/hash_access_method.cpp b/src/mongo/db/index/hash_access_method.cpp
index 10339f7eae9..34f4323fede 100644
--- a/src/mongo/db/index/hash_access_method.cpp
+++ b/src/mongo/db/index/hash_access_method.cpp
@@ -26,10 +26,10 @@
* it in the license file.
*/
+#include "mongo/db/index/hash_access_method.h"
#include "mongo/db/hasher.h"
#include "mongo/db/index/expression_keys_private.h"
#include "mongo/db/index/expression_params.h"
-#include "mongo/db/index/hash_access_method.h"
namespace mongo {
diff --git a/src/mongo/db/index/hash_access_method.h b/src/mongo/db/index/hash_access_method.h
index 8fc5db36636..e73fc2c623e 100644
--- a/src/mongo/db/index/hash_access_method.h
+++ b/src/mongo/db/index/hash_access_method.h
@@ -32,8 +32,8 @@
#include "mongo/base/status.h"
#include "mongo/db/hasher.h" // For HashSeed.
-#include "mongo/db/index/index_descriptor.h"
#include "mongo/db/index/index_access_method.h"
+#include "mongo/db/index/index_descriptor.h"
#include "mongo/db/jsobj.h"
namespace mongo {
diff --git a/src/mongo/db/index/index_access_method.cpp b/src/mongo/db/index/index_access_method.cpp
index 2abb95870d3..d3ca3c0c808 100644
--- a/src/mongo/db/index/index_access_method.cpp
+++ b/src/mongo/db/index/index_access_method.cpp
@@ -32,8 +32,8 @@
#include "mongo/db/index/btree_access_method.h"
-#include <vector>
#include <utility>
+#include <vector>
#include "mongo/base/error_codes.h"
#include "mongo/base/status.h"
@@ -110,8 +110,8 @@ IndexAccessMethod::IndexAccessMethod(IndexCatalogEntry* btreeState, SortedDataIn
bool IndexAccessMethod::ignoreKeyTooLong(OperationContext* txn) {
// Ignore this error if we're on a secondary or if the user requested it
- const auto canAcceptWritesForNs = repl::ReplicationCoordinator::get(txn)
- ->canAcceptWritesFor(NamespaceString(_btreeState->ns()));
+ const auto canAcceptWritesForNs = repl::ReplicationCoordinator::get(txn)->canAcceptWritesFor(
+ NamespaceString(_btreeState->ns()));
return !canAcceptWritesForNs || !failIndexKeyTooLong;
}
diff --git a/src/mongo/db/index/index_descriptor.h b/src/mongo/db/index/index_descriptor.h
index 3bb46d78a57..cead1965b9e 100644
--- a/src/mongo/db/index/index_descriptor.h
+++ b/src/mongo/db/index/index_descriptor.h
@@ -32,9 +32,9 @@
#include <string>
+#include "mongo/db/catalog/collection.h"
#include "mongo/db/index/multikey_paths.h"
#include "mongo/db/jsobj.h"
-#include "mongo/db/catalog/collection.h"
#include "mongo/util/stacktrace.h"
diff --git a/src/mongo/db/index/s2_access_method.cpp b/src/mongo/db/index/s2_access_method.cpp
index a05f72855dc..eee92fa8037 100644
--- a/src/mongo/db/index/s2_access_method.cpp
+++ b/src/mongo/db/index/s2_access_method.cpp
@@ -33,11 +33,11 @@
#include <vector>
#include "mongo/base/status.h"
-#include "mongo/db/geo/geoparser.h"
#include "mongo/db/geo/geoconstants.h"
-#include "mongo/db/index_names.h"
+#include "mongo/db/geo/geoparser.h"
#include "mongo/db/index/expression_keys_private.h"
#include "mongo/db/index/expression_params.h"
+#include "mongo/db/index_names.h"
#include "mongo/db/jsobj.h"
#include "mongo/util/log.h"
@@ -95,18 +95,30 @@ StatusWith<BSONObj> S2AccessMethod::fixSpec(const BSONObj& specObj) {
if (!indexVersionElt.isNumber()) {
return {ErrorCodes::CannotCreateIndex,
str::stream() << "Invalid type for geo index version { " << kIndexVersionFieldName
- << " : " << indexVersionElt << " }, only versions: ["
- << S2_INDEX_VERSION_1 << "," << S2_INDEX_VERSION_2 << ","
- << S2_INDEX_VERSION_3 << "] are supported"};
+ << " : "
+ << indexVersionElt
+ << " }, only versions: ["
+ << S2_INDEX_VERSION_1
+ << ","
+ << S2_INDEX_VERSION_2
+ << ","
+ << S2_INDEX_VERSION_3
+ << "] are supported"};
}
if (indexVersionElt.type() == BSONType::NumberDouble &&
!std::isnormal(indexVersionElt.numberDouble())) {
return {ErrorCodes::CannotCreateIndex,
str::stream() << "Invalid value for geo index version { " << kIndexVersionFieldName
- << " : " << indexVersionElt << " }, only versions: ["
- << S2_INDEX_VERSION_1 << "," << S2_INDEX_VERSION_2 << ","
- << S2_INDEX_VERSION_3 << "] are supported"};
+ << " : "
+ << indexVersionElt
+ << " }, only versions: ["
+ << S2_INDEX_VERSION_1
+ << ","
+ << S2_INDEX_VERSION_2
+ << ","
+ << S2_INDEX_VERSION_3
+ << "] are supported"};
}
const auto indexVersion = indexVersionElt.numberLong();
@@ -114,9 +126,15 @@ StatusWith<BSONObj> S2AccessMethod::fixSpec(const BSONObj& specObj) {
indexVersion != S2_INDEX_VERSION_3) {
return {ErrorCodes::CannotCreateIndex,
str::stream() << "unsupported geo index version { " << kIndexVersionFieldName
- << " : " << indexVersionElt << " }, only versions: ["
- << S2_INDEX_VERSION_1 << "," << S2_INDEX_VERSION_2 << ","
- << S2_INDEX_VERSION_3 << "] are supported"};
+ << " : "
+ << indexVersionElt
+ << " }, only versions: ["
+ << S2_INDEX_VERSION_1
+ << ","
+ << S2_INDEX_VERSION_2
+ << ","
+ << S2_INDEX_VERSION_3
+ << "] are supported"};
}
return specObj;
diff --git a/src/mongo/db/index/s2_key_generator_test.cpp b/src/mongo/db/index/s2_key_generator_test.cpp
index a26a43337e8..1002ea1ecde 100644
--- a/src/mongo/db/index/s2_key_generator_test.cpp
+++ b/src/mongo/db/index/s2_key_generator_test.cpp
@@ -33,8 +33,8 @@
#include "mongo/db/index/expression_keys_private.h"
#include "mongo/bson/bsonobjbuilder.h"
-#include "mongo/db/index/s2_common.h"
#include "mongo/db/index/expression_params.h"
+#include "mongo/db/index/s2_common.h"
#include "mongo/db/json.h"
#include "mongo/db/query/collation/collator_interface_mock.h"
#include "mongo/unittest/unittest.h"
@@ -67,7 +67,8 @@ bool assertKeysetsEqual(const BSONObjSet& expectedKeys, const BSONObjSet& actual
long long getCellID(int x, int y) {
BSONObj obj = BSON("a" << BSON("type"
<< "Point"
- << "coordinates" << BSON_ARRAY(x << y)));
+ << "coordinates"
+ << BSON_ARRAY(x << y)));
BSONObj keyPattern = fromjson("{a: '2dsphere'}");
BSONObj infoObj = fromjson("{key: {a: '2dsphere'}, '2dsphereIndexVersion': 3}");
S2IndexingParams params;
@@ -109,7 +110,8 @@ TEST(S2KeyGeneratorTest, CollationAppliedToNonGeoStringFieldBeforeGeoField) {
BSONObjSet expectedKeys;
expectedKeys.insert(BSON(""
<< "gnirts"
- << "" << getCellID(0, 0)));
+ << ""
+ << getCellID(0, 0)));
ASSERT(assertKeysetsEqual(expectedKeys, actualKeys));
}
@@ -127,7 +129,9 @@ TEST(S2KeyGeneratorTest, CollationAppliedToAllNonGeoStringFields) {
BSONObjSet expectedKeys;
expectedKeys.insert(BSON(""
<< "gnirts"
- << "" << getCellID(0, 0) << ""
+ << ""
+ << getCellID(0, 0)
+ << ""
<< "2gnirts"));
ASSERT(assertKeysetsEqual(expectedKeys, actualKeys));
diff --git a/src/mongo/db/index_rebuilder.cpp b/src/mongo/db/index_rebuilder.cpp
index cbc90157404..ed9e82a6edb 100644
--- a/src/mongo/db/index_rebuilder.cpp
+++ b/src/mongo/db/index_rebuilder.cpp
@@ -43,8 +43,8 @@
#include "mongo/db/catalog/index_create.h"
#include "mongo/db/client.h"
#include "mongo/db/db_raii.h"
-#include "mongo/db/service_context.h"
#include "mongo/db/instance.h"
+#include "mongo/db/service_context.h"
#include "mongo/db/storage/storage_engine.h"
#include "mongo/util/log.h"
#include "mongo/util/scopeguard.h"
diff --git a/src/mongo/db/initialize_server_global_state.cpp b/src/mongo/db/initialize_server_global_state.cpp
index c9a313491f8..acd16f3577d 100644
--- a/src/mongo/db/initialize_server_global_state.cpp
+++ b/src/mongo/db/initialize_server_global_state.cpp
@@ -38,9 +38,9 @@
#include <signal.h>
#ifndef _WIN32
-#include <syslog.h>
#include <sys/types.h>
#include <sys/wait.h>
+#include <syslog.h>
#endif
#include "mongo/base/init.h"
@@ -51,8 +51,8 @@
#include "mongo/db/auth/internal_user_auth.h"
#include "mongo/db/auth/security_key.h"
#include "mongo/db/server_options.h"
-#include "mongo/logger/logger.h"
#include "mongo/logger/console_appender.h"
+#include "mongo/logger/logger.h"
#include "mongo/logger/message_event.h"
#include "mongo/logger/message_event_utf8_encoder.h"
#include "mongo/logger/ramlog.h"
@@ -66,8 +66,8 @@
#include "mongo/util/net/listen.h"
#include "mongo/util/net/ssl_manager.h"
#include "mongo/util/processinfo.h"
-#include "mongo/util/signal_handlers_synchronous.h"
#include "mongo/util/quick_exit.h"
+#include "mongo/util/signal_handlers_synchronous.h"
namespace fs = boost::filesystem;
@@ -200,7 +200,8 @@ void forkServerOrDie() {
MONGO_INITIALIZER_GENERAL(ServerLogRedirection,
("GlobalLogManager", "EndStartupOptionHandling", "ForkServer"),
- ("default"))(InitializerContext*) {
+ ("default"))
+(InitializerContext*) {
using logger::LogManager;
using logger::MessageEventEphemeral;
using logger::MessageEventDetailsEncoder;
@@ -230,8 +231,9 @@ MONGO_INITIALIZER_GENERAL(ServerLogRedirection,
#endif // defined(_WIN32)
} else if (!serverGlobalParams.logpath.empty()) {
fassert(16448, !serverGlobalParams.logWithSyslog);
- std::string absoluteLogpath = boost::filesystem::absolute(serverGlobalParams.logpath,
- serverGlobalParams.cwd).string();
+ std::string absoluteLogpath =
+ boost::filesystem::absolute(serverGlobalParams.logpath, serverGlobalParams.cwd)
+ .string();
bool exists;
@@ -240,15 +242,16 @@ MONGO_INITIALIZER_GENERAL(ServerLogRedirection,
} catch (boost::filesystem::filesystem_error& e) {
return Status(ErrorCodes::FileNotOpen,
mongoutils::str::stream() << "Failed probe for \"" << absoluteLogpath
- << "\": " << e.code().message());
+ << "\": "
+ << e.code().message());
}
if (exists) {
if (boost::filesystem::is_directory(absoluteLogpath)) {
- return Status(ErrorCodes::FileNotOpen,
- mongoutils::str::stream()
- << "logpath \"" << absoluteLogpath
- << "\" should name a file, not a directory.");
+ return Status(
+ ErrorCodes::FileNotOpen,
+ mongoutils::str::stream() << "logpath \"" << absoluteLogpath
+ << "\" should name a file, not a directory.");
}
if (!serverGlobalParams.logAppend && boost::filesystem::is_regular(absoluteLogpath)) {
@@ -260,7 +263,9 @@ MONGO_INITIALIZER_GENERAL(ServerLogRedirection,
return Status(ErrorCodes::FileRenameFailed,
mongoutils::str::stream()
<< "Could not rename preexisting log file \""
- << absoluteLogpath << "\" to \"" << renameTarget
+ << absoluteLogpath
+ << "\" to \""
+ << renameTarget
<< "\"; run with --logappend or manually remove file: "
<< errnoWithDescription());
}
@@ -362,7 +367,9 @@ bool initializeServerGlobalState() {
clusterAuthMode == ServerGlobalParams::ClusterAuthMode_sendX509) {
setInternalUserAuthParams(
BSON(saslCommandMechanismFieldName
- << "MONGODB-X509" << saslCommandUserDBFieldName << "$external"
+ << "MONGODB-X509"
+ << saslCommandUserDBFieldName
+ << "$external"
<< saslCommandUserFieldName
<< getSSLManager()->getSSLConfiguration().clientSubjectName));
}
diff --git a/src/mongo/db/instance.cpp b/src/mongo/db/instance.cpp
index 386b925e653..bae59911976 100644
--- a/src/mongo/db/instance.cpp
+++ b/src/mongo/db/instance.cpp
@@ -40,8 +40,8 @@
#include "mongo/db/auth/authorization_manager.h"
#include "mongo/db/auth/authorization_session.h"
#include "mongo/db/auth/authz_manager_external_state_d.h"
-#include "mongo/db/client.h"
#include "mongo/db/catalog/cursor_manager.h"
+#include "mongo/db/client.h"
#include "mongo/db/commands.h"
#include "mongo/db/commands/fsync.h"
#include "mongo/db/concurrency/d_concurrency.h"
@@ -370,8 +370,8 @@ void receivedQuery(OperationContext* txn,
// If we got a stale config, wait in case the operation is stuck in a critical section
if (e.getCode() == ErrorCodes::SendStaleConfig) {
auto& sce = static_cast<const StaleConfigException&>(e);
- ShardingState::get(txn)
- ->onStaleShardVersion(txn, NamespaceString(sce.getns()), sce.getVersionReceived());
+ ShardingState::get(txn)->onStaleShardVersion(
+ txn, NamespaceString(sce.getns()), sce.getVersionReceived());
}
dbResponse.response.reset();
@@ -651,9 +651,12 @@ void assembleResponse(OperationContext* txn,
const ShardedConnectionInfo* connInfo = ShardedConnectionInfo::get(&c, false);
uassert(18663,
str::stream() << "legacy writeOps not longer supported for "
- << "versioned connections, ns: " << nsString.ns()
- << ", op: " << networkOpToString(op)
- << ", remote: " << remote.toString(),
+ << "versioned connections, ns: "
+ << nsString.ns()
+ << ", op: "
+ << networkOpToString(op)
+ << ", remote: "
+ << remote.toString(),
connInfo == NULL);
}
diff --git a/src/mongo/db/jsobj.h b/src/mongo/db/jsobj.h
index 1135b34aa5d..4691f7c157f 100644
--- a/src/mongo/db/jsobj.h
+++ b/src/mongo/db/jsobj.h
@@ -42,13 +42,13 @@
#include "mongo/platform/basic.h"
-#include "mongo/bson/util/builder.h"
-#include "mongo/bson/timestamp.h"
-#include "mongo/bson/bsontypes.h"
-#include "mongo/bson/oid.h"
+#include "mongo/base/string_data.h"
#include "mongo/bson/bsonelement.h"
-#include "mongo/bson/bsonobj.h"
#include "mongo/bson/bsonmisc.h"
+#include "mongo/bson/bsonobj.h"
#include "mongo/bson/bsonobjbuilder.h"
+#include "mongo/bson/bsontypes.h"
+#include "mongo/bson/oid.h"
#include "mongo/bson/ordering.h"
-#include "mongo/base/string_data.h"
+#include "mongo/bson/timestamp.h"
+#include "mongo/bson/util/builder.h"
diff --git a/src/mongo/db/keypattern.cpp b/src/mongo/db/keypattern.cpp
index 9ef77deb639..2c1bf09497d 100644
--- a/src/mongo/db/keypattern.cpp
+++ b/src/mongo/db/keypattern.cpp
@@ -69,7 +69,8 @@ BSONObj KeyPattern::extendRangeBound(const BSONObj& bound, bool makeUpperInclusi
BSONElement patElt = pat.next();
massert(16634,
str::stream() << "field names of bound " << bound
- << " do not match those of keyPattern " << _pattern,
+ << " do not match those of keyPattern "
+ << _pattern,
str::equals(srcElt.fieldName(), patElt.fieldName()));
newBound.append(srcElt);
}
diff --git a/src/mongo/db/keypattern_test.cpp b/src/mongo/db/keypattern_test.cpp
index 300843ecfea..4b83648fa7d 100644
--- a/src/mongo/db/keypattern_test.cpp
+++ b/src/mongo/db/keypattern_test.cpp
@@ -122,10 +122,12 @@ TEST(KeyPattern, GlobalMinMax) {
BSON("a" << MAXKEY << "b" << MINKEY));
ASSERT_EQUALS(KeyPattern(BSON("a"
- << "hashed")).globalMin(),
+ << "hashed"))
+ .globalMin(),
BSON("a" << MINKEY));
ASSERT_EQUALS(KeyPattern(BSON("a"
- << "hashed")).globalMax(),
+ << "hashed"))
+ .globalMax(),
BSON("a" << MAXKEY));
//
diff --git a/src/mongo/db/matcher/expression.cpp b/src/mongo/db/matcher/expression.cpp
index 187050c5eb5..76ff3261dbb 100644
--- a/src/mongo/db/matcher/expression.cpp
+++ b/src/mongo/db/matcher/expression.cpp
@@ -30,8 +30,8 @@
#include "mongo/db/matcher/expression.h"
-#include "mongo/bson/bsonobj.h"
#include "mongo/bson/bsonmisc.h"
+#include "mongo/bson/bsonobj.h"
namespace mongo {
diff --git a/src/mongo/db/matcher/expression.h b/src/mongo/db/matcher/expression.h
index ad10d17621c..515e25ab5e1 100644
--- a/src/mongo/db/matcher/expression.h
+++ b/src/mongo/db/matcher/expression.h
@@ -35,8 +35,8 @@
#include "mongo/base/status.h"
#include "mongo/bson/bsonobj.h"
#include "mongo/bson/bsonobjbuilder.h"
-#include "mongo/db/matcher/matchable.h"
#include "mongo/db/matcher/match_details.h"
+#include "mongo/db/matcher/matchable.h"
#include "mongo/stdx/memory.h"
namespace mongo {
diff --git a/src/mongo/db/matcher/expression_algo_test.cpp b/src/mongo/db/matcher/expression_algo_test.cpp
index 73d3a5b8b88..95e7bcd62fc 100644
--- a/src/mongo/db/matcher/expression_algo_test.cpp
+++ b/src/mongo/db/matcher/expression_algo_test.cpp
@@ -73,9 +73,10 @@ TEST(ExpressionAlgoIsSubsetOf, NullAndOmittedField) {
// an Undefined type.
BSONObj undefined = fromjson("{a: undefined}");
const CollatorInterface* collator = nullptr;
- ASSERT_EQUALS(ErrorCodes::BadValue,
- MatchExpressionParser::parse(
- undefined, ExtensionsCallbackDisallowExtensions(), collator).getStatus());
+ ASSERT_EQUALS(
+ ErrorCodes::BadValue,
+ MatchExpressionParser::parse(undefined, ExtensionsCallbackDisallowExtensions(), collator)
+ .getStatus());
ParsedMatchExpression empty("{}");
ParsedMatchExpression null("{a: null}");
@@ -839,11 +840,11 @@ TEST(SplitMatchExpression, ComplexMatchExpressionSplitsCorrectly) {
splitExpr.second->serialize(&secondBob);
ASSERT_EQUALS(firstBob.obj(), fromjson("{$or: [{'a.b': {$eq: 3}}, {'a.b.c': {$eq: 4}}]}"));
- ASSERT_EQUALS(secondBob.obj(),
- fromjson(
- "{$and: [{$nor: [{$and: [{x: {$size: 2}}]}]}, {$nor: [{x: {$gt: 4}}, {$and: "
- "[{$nor: [{$and: [{x: "
- "{$eq: 1}}]}]}, {y: {$eq: 3}}]}]}]}"));
+ ASSERT_EQUALS(
+ secondBob.obj(),
+ fromjson("{$and: [{$nor: [{$and: [{x: {$size: 2}}]}]}, {$nor: [{x: {$gt: 4}}, {$and: "
+ "[{$nor: [{$and: [{x: "
+ "{$eq: 1}}]}]}, {y: {$eq: 3}}]}]}]}"));
}
TEST(MapOverMatchExpression, DoesMapOverLogicalNodes) {
@@ -909,9 +910,9 @@ TEST(MapOverMatchExpression, DoesMapOverNodesWithMultipleChildren) {
ASSERT_OK(swMatchExpression.getStatus());
size_t nodeCount = 0;
- expression::mapOver(swMatchExpression.getValue().get(),
- [&nodeCount](MatchExpression* expression, std::string path)
- -> void { ++nodeCount; });
+ expression::mapOver(
+ swMatchExpression.getValue().get(),
+ [&nodeCount](MatchExpression* expression, std::string path) -> void { ++nodeCount; });
ASSERT_EQ(nodeCount, 3U);
}
diff --git a/src/mongo/db/matcher/expression_array.h b/src/mongo/db/matcher/expression_array.h
index a51f6c7ade5..0ade4faeab0 100644
--- a/src/mongo/db/matcher/expression_array.h
+++ b/src/mongo/db/matcher/expression_array.h
@@ -33,8 +33,8 @@
#include <vector>
#include "mongo/base/status.h"
-#include "mongo/bson/bsonobj.h"
#include "mongo/bson/bsonmisc.h"
+#include "mongo/bson/bsonobj.h"
#include "mongo/db/matcher/expression.h"
#include "mongo/db/matcher/expression_leaf.h"
diff --git a/src/mongo/db/matcher/expression_geo.cpp b/src/mongo/db/matcher/expression_geo.cpp
index 222bece16a4..9970384577b 100644
--- a/src/mongo/db/matcher/expression_geo.cpp
+++ b/src/mongo/db/matcher/expression_geo.cpp
@@ -30,11 +30,11 @@
#define MONGO_LOG_DEFAULT_COMPONENT ::mongo::logger::LogComponent::kDefault
-#include "mongo/platform/basic.h"
#include "mongo/db/matcher/expression_geo.h"
#include "mongo/db/geo/geoparser.h"
-#include "mongo/util/mongoutils/str.h"
+#include "mongo/platform/basic.h"
#include "mongo/util/log.h"
+#include "mongo/util/mongoutils/str.h"
namespace mongo {
@@ -133,8 +133,8 @@ Status GeoExpression::parseFrom(const BSONObj& obj) {
if (GeoExpression::INTERSECT == predicate) {
if (!geoContainer->supportsProject(SPHERE)) {
return Status(ErrorCodes::BadValue,
- str::stream()
- << "$geoIntersect not supported with provided geometry: " << obj);
+ str::stream() << "$geoIntersect not supported with provided geometry: "
+ << obj);
}
geoContainer->projectInto(SPHERE);
}
@@ -219,7 +219,8 @@ Status GeoNearExpression::parseNewQuery(const BSONObj& obj) {
return Status(ErrorCodes::BadValue,
mongoutils::str::stream()
<< "geo near accepts just one argument when querying for a GeoJSON "
- << "point. Extra field found: " << objIt.next());
+ << "point. Extra field found: "
+ << objIt.next());
}
// Parse "new" near:
@@ -231,8 +232,8 @@ Status GeoNearExpression::parseNewQuery(const BSONObj& obj) {
BSONObj::MatchType matchType = static_cast<BSONObj::MatchType>(e.getGtLtOp());
if (BSONObj::opNEAR != matchType) {
return Status(ErrorCodes::BadValue,
- mongoutils::str::stream()
- << "invalid geo near query operator: " << e.fieldName());
+ mongoutils::str::stream() << "invalid geo near query operator: "
+ << e.fieldName());
}
// Iterate over the argument.
@@ -247,7 +248,9 @@ Status GeoNearExpression::parseNewQuery(const BSONObj& obj) {
return Status(ErrorCodes::BadValue,
str::stream()
<< "invalid point in geo near query $geometry argument: "
- << embeddedObj << " " << status.reason());
+ << embeddedObj
+ << " "
+ << status.reason());
}
uassert(16681,
"$near requires geojson point, given " + embeddedObj.toString(),
diff --git a/src/mongo/db/matcher/expression_geo_test.cpp b/src/mongo/db/matcher/expression_geo_test.cpp
index 0bc96f33e7f..52ed8ac77e1 100644
--- a/src/mongo/db/matcher/expression_geo_test.cpp
+++ b/src/mongo/db/matcher/expression_geo_test.cpp
@@ -34,9 +34,9 @@
#include "mongo/db/jsobj.h"
#include "mongo/db/json.h"
-#include "mongo/db/matcher/matcher.h"
#include "mongo/db/matcher/expression.h"
#include "mongo/db/matcher/expression_geo.h"
+#include "mongo/db/matcher/matcher.h"
#include "mongo/stdx/memory.h"
namespace mongo {
diff --git a/src/mongo/db/matcher/expression_leaf.cpp b/src/mongo/db/matcher/expression_leaf.cpp
index 04529e40e48..26a3e7e4846 100644
--- a/src/mongo/db/matcher/expression_leaf.cpp
+++ b/src/mongo/db/matcher/expression_leaf.cpp
@@ -31,11 +31,11 @@
#include "mongo/db/matcher/expression_leaf.h"
#include <cmath>
-#include <unordered_map>
#include <pcrecpp.h>
+#include <unordered_map>
-#include "mongo/bson/bsonobj.h"
#include "mongo/bson/bsonmisc.h"
+#include "mongo/bson/bsonobj.h"
#include "mongo/config.h"
#include "mongo/db/field_ref.h"
#include "mongo/db/jsobj.h"
diff --git a/src/mongo/db/matcher/expression_leaf.h b/src/mongo/db/matcher/expression_leaf.h
index 21d0b63365d..bb7b1f54c2f 100644
--- a/src/mongo/db/matcher/expression_leaf.h
+++ b/src/mongo/db/matcher/expression_leaf.h
@@ -32,8 +32,8 @@
#include <unordered_map>
-#include "mongo/bson/bsonobj.h"
#include "mongo/bson/bsonmisc.h"
+#include "mongo/bson/bsonobj.h"
#include "mongo/db/matcher/expression.h"
#include "mongo/stdx/memory.h"
diff --git a/src/mongo/db/matcher/expression_leaf_test.cpp b/src/mongo/db/matcher/expression_leaf_test.cpp
index a1a1fe6cb7d..5abec471bcb 100644
--- a/src/mongo/db/matcher/expression_leaf_test.cpp
+++ b/src/mongo/db/matcher/expression_leaf_test.cpp
@@ -32,9 +32,9 @@
#include "mongo/db/jsobj.h"
#include "mongo/db/json.h"
-#include "mongo/db/matcher/expression_parser.h"
#include "mongo/db/matcher/expression.h"
#include "mongo/db/matcher/expression_leaf.h"
+#include "mongo/db/matcher/expression_parser.h"
#include "mongo/db/query/collation/collator_interface_mock.h"
namespace mongo {
diff --git a/src/mongo/db/matcher/expression_parser.cpp b/src/mongo/db/matcher/expression_parser.cpp
index e0b3b86fd62..97af471a317 100644
--- a/src/mongo/db/matcher/expression_parser.cpp
+++ b/src/mongo/db/matcher/expression_parser.cpp
@@ -358,8 +358,8 @@ StatusWithMatchExpression MatchExpressionParser::_parse(const BSONObj& obj, int
root->add(eq.release());
} else {
return {Status(ErrorCodes::BadValue,
- mongoutils::str::stream()
- << "unknown top level operator: " << e.fieldName())};
+ mongoutils::str::stream() << "unknown top level operator: "
+ << e.fieldName())};
}
continue;
diff --git a/src/mongo/db/matcher/expression_parser.h b/src/mongo/db/matcher/expression_parser.h
index 823412ecc73..3c150ef9c9d 100644
--- a/src/mongo/db/matcher/expression_parser.h
+++ b/src/mongo/db/matcher/expression_parser.h
@@ -160,6 +160,7 @@ private:
};
typedef stdx::function<StatusWithMatchExpression(
- const char* name, int type, const BSONObj& section)> MatchExpressionParserGeoCallback;
+ const char* name, int type, const BSONObj& section)>
+ MatchExpressionParserGeoCallback;
extern MatchExpressionParserGeoCallback expressionParserGeoCallback;
}
diff --git a/src/mongo/db/matcher/expression_parser_array_test.cpp b/src/mongo/db/matcher/expression_parser_array_test.cpp
index e37f6e8adc6..2a9d0467ce3 100644
--- a/src/mongo/db/matcher/expression_parser_array_test.cpp
+++ b/src/mongo/db/matcher/expression_parser_array_test.cpp
@@ -214,12 +214,16 @@ TEST(MatchExpressionParserArrayTest, ElemMatchDBRef1) {
OID oid = OID::gen();
BSONObj match = BSON("$ref"
<< "coll"
- << "$id" << oid << "$db"
+ << "$id"
+ << oid
+ << "$db"
<< "db");
OID oidx = OID::gen();
BSONObj notMatch = BSON("$ref"
<< "coll"
- << "$id" << oidx << "$db"
+ << "$id"
+ << oidx
+ << "$db"
<< "db");
BSONObj query = BSON("x" << BSON("$elemMatch" << BSON("$eq" << match)));
@@ -237,12 +241,16 @@ TEST(MatchExpressionParserArrayTest, ElemMatchDBRef2) {
OID oid = OID::gen();
BSONObj match = BSON("$ref"
<< "coll"
- << "$id" << oid << "$db"
+ << "$id"
+ << oid
+ << "$db"
<< "db");
OID oidx = OID::gen();
BSONObj notMatch = BSON("$ref"
<< "coll"
- << "$id" << oidx << "$db"
+ << "$id"
+ << oidx
+ << "$db"
<< "db");
BSONObj query = BSON("x" << BSON("$elemMatch" << match));
@@ -261,11 +269,17 @@ TEST(MatchExpressionParserArrayTest, ElemMatchDBRef3) {
OID oid = OID::gen();
BSONObj match = BSON("$ref"
<< "coll"
- << "$id" << oid << "foo" << 12345);
+ << "$id"
+ << oid
+ << "foo"
+ << 12345);
OID oidx = OID::gen();
BSONObj notMatch = BSON("$ref"
<< "coll"
- << "$id" << oidx << "foo" << 12345);
+ << "$id"
+ << oidx
+ << "foo"
+ << 12345);
BSONObj query = BSON("x" << BSON("$elemMatch" << match));
const CollatorInterface* collator = nullptr;
@@ -278,10 +292,14 @@ TEST(MatchExpressionParserArrayTest, ElemMatchDBRef3) {
ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(match))));
// Document contains fields not referred to in $elemMatch query.
- ASSERT(result.getValue()->matchesBSON(
- BSON("x" << BSON_ARRAY(BSON("$ref"
- << "coll"
- << "$id" << oid << "foo" << 12345 << "bar" << 678)))));
+ ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSON("$ref"
+ << "coll"
+ << "$id"
+ << oid
+ << "foo"
+ << 12345
+ << "bar"
+ << 678)))));
}
// Query with DBRef fields out of order.
@@ -289,16 +307,22 @@ TEST(MatchExpressionParserArrayTest, ElemMatchDBRef4) {
OID oid = OID::gen();
BSONObj match = BSON("$ref"
<< "coll"
- << "$id" << oid << "$db"
+ << "$id"
+ << oid
+ << "$db"
<< "db");
BSONObj matchOutOfOrder = BSON("$db"
<< "db"
- << "$id" << oid << "$ref"
+ << "$id"
+ << oid
+ << "$ref"
<< "coll");
OID oidx = OID::gen();
BSONObj notMatch = BSON("$ref"
<< "coll"
- << "$id" << oidx << "$db"
+ << "$id"
+ << oidx
+ << "$db"
<< "db");
BSONObj query = BSON("x" << BSON("$elemMatch" << matchOutOfOrder));
@@ -318,13 +342,19 @@ TEST(MatchExpressionParserArrayTest, ElemMatchDBRef5) {
OID oid = OID::gen();
BSONObj match = BSON("$ref"
<< "coll"
- << "$id" << oid << "foo" << 12345);
+ << "$id"
+ << oid
+ << "foo"
+ << 12345);
BSONObj matchOutOfOrder = BSON("foo" << 12345 << "$id" << oid << "$ref"
<< "coll");
OID oidx = OID::gen();
BSONObj notMatch = BSON("$ref"
<< "coll"
- << "$id" << oidx << "foo" << 12345);
+ << "$id"
+ << oidx
+ << "foo"
+ << 12345);
BSONObj query = BSON("x" << BSON("$elemMatch" << matchOutOfOrder));
const CollatorInterface* collator = nullptr;
@@ -337,10 +367,14 @@ TEST(MatchExpressionParserArrayTest, ElemMatchDBRef5) {
ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(match))));
// Document contains fields not referred to in $elemMatch query.
- ASSERT(result.getValue()->matchesBSON(
- BSON("x" << BSON_ARRAY(BSON("$ref"
- << "coll"
- << "$id" << oid << "foo" << 12345 << "bar" << 678)))));
+ ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSON("$ref"
+ << "coll"
+ << "$id"
+ << oid
+ << "foo"
+ << 12345
+ << "bar"
+ << 678)))));
}
// Incomplete DBRef - $id missing.
@@ -348,13 +382,20 @@ TEST(MatchExpressionParserArrayTest, ElemMatchDBRef6) {
OID oid = OID::gen();
BSONObj match = BSON("$ref"
<< "coll"
- << "$id" << oid << "foo" << 12345);
+ << "$id"
+ << oid
+ << "foo"
+ << 12345);
BSONObj matchMissingID = BSON("$ref"
<< "coll"
- << "foo" << 12345);
+ << "foo"
+ << 12345);
BSONObj notMatch = BSON("$ref"
<< "collx"
- << "$id" << oid << "foo" << 12345);
+ << "$id"
+ << oid
+ << "foo"
+ << 12345);
BSONObj query = BSON("x" << BSON("$elemMatch" << matchMissingID));
const CollatorInterface* collator = nullptr;
@@ -367,10 +408,14 @@ TEST(MatchExpressionParserArrayTest, ElemMatchDBRef6) {
ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(match))));
// Document contains fields not referred to in $elemMatch query.
- ASSERT(result.getValue()->matchesBSON(
- BSON("x" << BSON_ARRAY(BSON("$ref"
- << "coll"
- << "$id" << oid << "foo" << 12345 << "bar" << 678)))));
+ ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSON("$ref"
+ << "coll"
+ << "$id"
+ << oid
+ << "foo"
+ << 12345
+ << "bar"
+ << 678)))));
}
// Incomplete DBRef - $ref missing.
@@ -378,12 +423,18 @@ TEST(MatchExpressionParserArrayTest, ElemMatchDBRef7) {
OID oid = OID::gen();
BSONObj match = BSON("$ref"
<< "coll"
- << "$id" << oid << "foo" << 12345);
+ << "$id"
+ << oid
+ << "foo"
+ << 12345);
BSONObj matchMissingRef = BSON("$id" << oid << "foo" << 12345);
OID oidx = OID::gen();
BSONObj notMatch = BSON("$ref"
<< "coll"
- << "$id" << oidx << "foo" << 12345);
+ << "$id"
+ << oidx
+ << "foo"
+ << 12345);
BSONObj query = BSON("x" << BSON("$elemMatch" << matchMissingRef));
const CollatorInterface* collator = nullptr;
@@ -396,10 +447,14 @@ TEST(MatchExpressionParserArrayTest, ElemMatchDBRef7) {
ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(match))));
// Document contains fields not referred to in $elemMatch query.
- ASSERT(result.getValue()->matchesBSON(
- BSON("x" << BSON_ARRAY(BSON("$ref"
- << "coll"
- << "$id" << oid << "foo" << 12345 << "bar" << 678)))));
+ ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSON("$ref"
+ << "coll"
+ << "$id"
+ << oid
+ << "foo"
+ << 12345
+ << "bar"
+ << 678)))));
}
// Incomplete DBRef - $db only.
@@ -407,17 +462,24 @@ TEST(MatchExpressionParserArrayTest, ElemMatchDBRef8) {
OID oid = OID::gen();
BSONObj match = BSON("$ref"
<< "coll"
- << "$id" << oid << "$db"
+ << "$id"
+ << oid
+ << "$db"
<< "db"
- << "foo" << 12345);
+ << "foo"
+ << 12345);
BSONObj matchDBOnly = BSON("$db"
<< "db"
- << "foo" << 12345);
+ << "foo"
+ << 12345);
BSONObj notMatch = BSON("$ref"
<< "coll"
- << "$id" << oid << "$db"
+ << "$id"
+ << oid
+ << "$db"
<< "dbx"
- << "foo" << 12345);
+ << "foo"
+ << 12345);
BSONObj query = BSON("x" << BSON("$elemMatch" << matchDBOnly));
const CollatorInterface* collator = nullptr;
@@ -430,12 +492,16 @@ TEST(MatchExpressionParserArrayTest, ElemMatchDBRef8) {
ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(match))));
// Document contains fields not referred to in $elemMatch query.
- ASSERT(result.getValue()->matchesBSON(
- BSON("x" << BSON_ARRAY(BSON("$ref"
- << "coll"
- << "$id" << oid << "$db"
- << "db"
- << "foo" << 12345 << "bar" << 678)))));
+ ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSON("$ref"
+ << "coll"
+ << "$id"
+ << oid
+ << "$db"
+ << "db"
+ << "foo"
+ << 12345
+ << "bar"
+ << 678)))));
}
TEST(MatchExpressionParserArrayTest, All1) {
diff --git a/src/mongo/db/matcher/expression_parser_leaf_test.cpp b/src/mongo/db/matcher/expression_parser_leaf_test.cpp
index b0897f8468e..d644a2c9e31 100644
--- a/src/mongo/db/matcher/expression_parser_leaf_test.cpp
+++ b/src/mongo/db/matcher/expression_parser_leaf_test.cpp
@@ -446,7 +446,9 @@ TEST(MatchExpressionParserLeafTest, INSingleDBRef) {
OID oid = OID::gen();
BSONObj query = BSON("x" << BSON("$in" << BSON_ARRAY(BSON("$ref"
<< "coll"
- << "$id" << oid << "$db"
+ << "$id"
+ << oid
+ << "$db"
<< "db"))));
const CollatorInterface* collator = nullptr;
StatusWithMatchExpression result =
@@ -456,11 +458,15 @@ TEST(MatchExpressionParserLeafTest, INSingleDBRef) {
OID oidx = OID::gen();
ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON("$ref"
<< "collx"
- << "$id" << oidx << "$db"
+ << "$id"
+ << oidx
+ << "$db"
<< "db"))));
ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON("$ref"
<< "coll"
- << "$id" << oidx << "$db"
+ << "$id"
+ << oidx
+ << "$db"
<< "db"))));
ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON("$id" << oid << "$ref"
<< "coll"
@@ -476,28 +482,39 @@ TEST(MatchExpressionParserLeafTest, INSingleDBRef) {
<< "db")))));
ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON("$ref"
<< "coll"
- << "$id" << oid << "$db"
+ << "$id"
+ << oid
+ << "$db"
<< "dbx"))));
ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON("$db"
<< "db"
<< "$ref"
<< "coll"
- << "$id" << oid))));
+ << "$id"
+ << oid))));
ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON("$ref"
<< "coll"
- << "$id" << oid << "$db"
+ << "$id"
+ << oid
+ << "$db"
<< "db"))));
ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSON("$ref"
<< "coll"
- << "$id" << oid << "$db"
+ << "$id"
+ << oid
+ << "$db"
<< "db")))));
ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSON("$ref"
<< "collx"
- << "$id" << oidx << "$db"
+ << "$id"
+ << oidx
+ << "$db"
<< "db")
<< BSON("$ref"
<< "coll"
- << "$id" << oid << "$db"
+ << "$id"
+ << oid
+ << "$db"
<< "db")))));
}
@@ -506,11 +523,15 @@ TEST(MatchExpressionParserLeafTest, INMultipleDBRef) {
OID oidy = OID::gen();
BSONObj query = BSON("x" << BSON("$in" << BSON_ARRAY(BSON("$ref"
<< "colly"
- << "$id" << oidy << "$db"
+ << "$id"
+ << oidy
+ << "$db"
<< "db")
<< BSON("$ref"
<< "coll"
- << "$id" << oid << "$db"
+ << "$id"
+ << oid
+ << "$db"
<< "db"))));
const CollatorInterface* collator = nullptr;
StatusWithMatchExpression result =
@@ -520,11 +541,15 @@ TEST(MatchExpressionParserLeafTest, INMultipleDBRef) {
OID oidx = OID::gen();
ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON("$ref"
<< "collx"
- << "$id" << oidx << "$db"
+ << "$id"
+ << oidx
+ << "$db"
<< "db"))));
ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON("$ref"
<< "coll"
- << "$id" << oidx << "$db"
+ << "$id"
+ << oidx
+ << "$db"
<< "db"))));
ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON("$id" << oid << "$ref"
<< "coll"
@@ -532,11 +557,15 @@ TEST(MatchExpressionParserLeafTest, INMultipleDBRef) {
<< "db"))));
ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSON("$ref"
<< "coll"
- << "$id" << oidy << "$db"
+ << "$id"
+ << oidy
+ << "$db"
<< "db")))));
ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSON("$ref"
<< "colly"
- << "$id" << oid << "$db"
+ << "$id"
+ << oid
+ << "$db"
<< "db")))));
ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSON("$id" << oid << "$ref"
<< "coll"
@@ -544,7 +573,9 @@ TEST(MatchExpressionParserLeafTest, INMultipleDBRef) {
<< "db")))));
ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSON("$ref"
<< "coll"
- << "$id" << oid << "$db"
+ << "$id"
+ << oid
+ << "$db"
<< "dbx")))));
ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSON("$id" << oidy << "$ref"
<< "colly"
@@ -552,59 +583,87 @@ TEST(MatchExpressionParserLeafTest, INMultipleDBRef) {
<< "db")))));
ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSON("$ref"
<< "collx"
- << "$id" << oidx << "$db"
+ << "$id"
+ << oidx
+ << "$db"
<< "db")
<< BSON("$ref"
<< "coll"
- << "$id" << oidx << "$db"
+ << "$id"
+ << oidx
+ << "$db"
<< "db")))));
ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSON("$ref"
<< "collx"
- << "$id" << oidx << "$db"
+ << "$id"
+ << oidx
+ << "$db"
<< "db")
<< BSON("$ref"
<< "colly"
- << "$id" << oidx << "$db"
+ << "$id"
+ << oidx
+ << "$db"
<< "db")))));
ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSON("$ref"
<< "collx"
- << "$id" << oidx << "$db"
+ << "$id"
+ << oidx
+ << "$db"
<< "db")
<< BSON("$ref"
<< "coll"
- << "$id" << oid << "$db"
+ << "$id"
+ << oid
+ << "$db"
<< "dbx")))));
ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON("$ref"
<< "coll"
- << "$id" << oid << "$db"
+ << "$id"
+ << oid
+ << "$db"
<< "db"))));
ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON("$ref"
<< "colly"
- << "$id" << oidy << "$db"
+ << "$id"
+ << oidy
+ << "$db"
<< "db"))));
ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSON("$ref"
<< "coll"
- << "$id" << oid << "$db"
+ << "$id"
+ << oid
+ << "$db"
<< "db")))));
ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSON("$ref"
<< "colly"
- << "$id" << oidy << "$db"
+ << "$id"
+ << oidy
+ << "$db"
<< "db")))));
ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSON("$ref"
<< "collx"
- << "$id" << oidx << "$db"
+ << "$id"
+ << oidx
+ << "$db"
<< "db")
<< BSON("$ref"
<< "coll"
- << "$id" << oid << "$db"
+ << "$id"
+ << oid
+ << "$db"
<< "db")))));
ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSON("$ref"
<< "collx"
- << "$id" << oidx << "$db"
+ << "$id"
+ << oidx
+ << "$db"
<< "db")
<< BSON("$ref"
<< "colly"
- << "$id" << oidy << "$db"
+ << "$id"
+ << oidy
+ << "$db"
<< "db")))));
}
@@ -612,7 +671,10 @@ TEST(MatchExpressionParserLeafTest, INDBRefWithOptionalField1) {
OID oid = OID::gen();
BSONObj query = BSON("x" << BSON("$in" << BSON_ARRAY(BSON("$ref"
<< "coll"
- << "$id" << oid << "foo" << 12345))));
+ << "$id"
+ << oid
+ << "foo"
+ << 12345))));
const CollatorInterface* collator = nullptr;
StatusWithMatchExpression result =
MatchExpressionParser::parse(query, ExtensionsCallbackDisallowExtensions(), collator);
@@ -621,19 +683,28 @@ TEST(MatchExpressionParserLeafTest, INDBRefWithOptionalField1) {
OID oidx = OID::gen();
ASSERT(!result.getValue()->matchesBSON(BSON("x" << BSON("$ref"
<< "coll"
- << "$id" << oidx << "$db"
+ << "$id"
+ << oidx
+ << "$db"
<< "db"))));
- ASSERT(result.getValue()->matchesBSON(
- BSON("x" << BSON_ARRAY(BSON("$ref"
- << "coll"
- << "$id" << oid << "foo" << 12345)))));
- ASSERT(result.getValue()->matchesBSON(
- BSON("x" << BSON_ARRAY(BSON("$ref"
- << "collx"
- << "$id" << oidx << "foo" << 12345)
- << BSON("$ref"
- << "coll"
- << "$id" << oid << "foo" << 12345)))));
+ ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSON("$ref"
+ << "coll"
+ << "$id"
+ << oid
+ << "foo"
+ << 12345)))));
+ ASSERT(result.getValue()->matchesBSON(BSON("x" << BSON_ARRAY(BSON("$ref"
+ << "collx"
+ << "$id"
+ << oidx
+ << "foo"
+ << 12345)
+ << BSON("$ref"
+ << "coll"
+ << "$id"
+ << oid
+ << "foo"
+ << 12345)))));
}
TEST(MatchExpressionParserLeafTest, INInvalidDBRefs) {
@@ -648,7 +719,8 @@ TEST(MatchExpressionParserLeafTest, INInvalidDBRefs) {
// second field is not $id
query = BSON("x" << BSON("$in" << BSON_ARRAY(BSON("$ref"
<< "coll"
- << "$foo" << 1))));
+ << "$foo"
+ << 1))));
result = MatchExpressionParser::parse(query, ExtensionsCallbackDisallowExtensions(), collator);
ASSERT_FALSE(result.isOK());
@@ -662,7 +734,8 @@ TEST(MatchExpressionParserLeafTest, INInvalidDBRefs) {
// missing $id and $ref field
query = BSON("x" << BSON("$in" << BSON_ARRAY(BSON("$db"
<< "test"
- << "foo" << 3))));
+ << "foo"
+ << 3))));
result = MatchExpressionParser::parse(query, ExtensionsCallbackDisallowExtensions(), collator);
ASSERT_FALSE(result.isOK());
}
@@ -1011,20 +1084,25 @@ TEST(MatchExpressionParserLeafTest, TypeBadString) {
const CollatorInterface* collator = nullptr;
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$type: null}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$type: true}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$type: {}}}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(
MatchExpressionParser::parse(fromjson("{a: {$type: ObjectId('000000000000000000000000')}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$type: []}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
}
TEST(MatchExpressionParserLeafTest, TypeStringnameDouble) {
@@ -1198,59 +1276,75 @@ TEST(MatchExpressionParserTest, BitTestMatchExpressionValidMask) {
const CollatorInterface* collator = nullptr;
ASSERT_OK(MatchExpressionParser::parse(BSON("a" << BSON("$bitsAllSet" << 54)),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_OK(MatchExpressionParser::parse(
BSON("a" << BSON("$bitsAllSet" << std::numeric_limits<long long>::max())),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_OK(MatchExpressionParser::parse(BSON("a" << BSON("$bitsAllSet" << k2Power53)),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_OK(MatchExpressionParser::parse(BSON("a" << BSON("$bitsAllSet" << k2Power53 - 1)),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_OK(MatchExpressionParser::parse(BSON("a" << BSON("$bitsAllClear" << 54)),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_OK(MatchExpressionParser::parse(
BSON("a" << BSON("$bitsAllClear" << std::numeric_limits<long long>::max())),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_OK(MatchExpressionParser::parse(BSON("a" << BSON("$bitsAllClear" << k2Power53)),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_OK(MatchExpressionParser::parse(BSON("a" << BSON("$bitsAllClear" << k2Power53 - 1)),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_OK(MatchExpressionParser::parse(BSON("a" << BSON("$bitsAnySet" << 54)),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_OK(MatchExpressionParser::parse(
BSON("a" << BSON("$bitsAnySet" << std::numeric_limits<long long>::max())),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_OK(MatchExpressionParser::parse(BSON("a" << BSON("$bitsAnySet" << k2Power53)),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_OK(MatchExpressionParser::parse(BSON("a" << BSON("$bitsAnySet" << k2Power53 - 1)),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_OK(MatchExpressionParser::parse(BSON("a" << BSON("$bitsAnyClear" << 54)),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_OK(MatchExpressionParser::parse(
BSON("a" << BSON("$bitsAnyClear" << std::numeric_limits<long long>::max())),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_OK(MatchExpressionParser::parse(BSON("a" << BSON("$bitsAnyClear" << k2Power53)),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_OK(MatchExpressionParser::parse(BSON("a" << BSON("$bitsAnyClear" << k2Power53 - 1)),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
}
TEST(MatchExpressionParserTest, BitTestMatchExpressionValidArray) {
@@ -1263,63 +1357,79 @@ TEST(MatchExpressionParserTest, BitTestMatchExpressionValidArray) {
const CollatorInterface* collator = nullptr;
ASSERT_OK(MatchExpressionParser::parse(BSON("a" << BSON("$bitsAllSet" << BSON_ARRAY(0))),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_OK(MatchExpressionParser::parse(
BSON("a" << BSON("$bitsAllSet" << BSON_ARRAY(0 << 1 << 2 << 3))),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_OK(MatchExpressionParser::parse(BSON("a" << BSON("$bitsAllSet" << bsonArrayLongLong)),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_OK(MatchExpressionParser::parse(
BSON("a" << BSON("$bitsAllSet" << BSON_ARRAY(std::numeric_limits<int>::max()))),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_OK(MatchExpressionParser::parse(BSON("a" << BSON("$bitsAllClear" << BSON_ARRAY(0))),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_OK(MatchExpressionParser::parse(
BSON("a" << BSON("$bitsAllClear" << BSON_ARRAY(0 << 1 << 2 << 3))),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_OK(MatchExpressionParser::parse(BSON("a" << BSON("$bitsAllClear" << bsonArrayLongLong)),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_OK(MatchExpressionParser::parse(
BSON("a" << BSON("$bitsAllClear" << BSON_ARRAY(std::numeric_limits<int>::max()))),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_OK(MatchExpressionParser::parse(BSON("a" << BSON("$bitsAnySet" << BSON_ARRAY(0))),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_OK(MatchExpressionParser::parse(
BSON("a" << BSON("$bitsAnySet" << BSON_ARRAY(0 << 1 << 2 << 3))),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_OK(MatchExpressionParser::parse(BSON("a" << BSON("$bitsAnySet" << bsonArrayLongLong)),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_OK(MatchExpressionParser::parse(
BSON("a" << BSON("$bitsAnySet" << BSON_ARRAY(std::numeric_limits<int>::max()))),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_OK(MatchExpressionParser::parse(BSON("a" << BSON("$bitsAnyClear" << BSON_ARRAY(0))),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_OK(MatchExpressionParser::parse(
BSON("a" << BSON("$bitsAnyClear" << BSON_ARRAY(0 << 1 << 2 << 3))),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_OK(MatchExpressionParser::parse(BSON("a" << BSON("$bitsAnyClear" << bsonArrayLongLong)),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_OK(MatchExpressionParser::parse(
BSON("a" << BSON("$bitsAnyClear" << BSON_ARRAY(std::numeric_limits<int>::max()))),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
}
TEST(MatchExpressionParserTest, BitTestMatchExpressionValidBinData) {
@@ -1328,94 +1438,117 @@ TEST(MatchExpressionParserTest, BitTestMatchExpressionValidBinData) {
MatchExpressionParser::parse(
fromjson("{a: {$bitsAllSet: {$binary: 'AAAAAAAAAAAAAAAAAAAAAAAAAAAA', $type: '00'}}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_OK(
MatchExpressionParser::parse(
fromjson(
"{a: {$bitsAllClear: {$binary: 'AAAAAAAAAAAAAAAAAAAAAAAAAAAA', $type: '00'}}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_OK(
MatchExpressionParser::parse(
fromjson("{a: {$bitsAnySet: {$binary: 'AAAAAAAAAAAAAAAAAAAAAAAAAAAA', $type: '00'}}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_OK(
MatchExpressionParser::parse(
fromjson(
"{a: {$bitsAnyClear: {$binary: 'AAAAAAAAAAAAAAAAAAAAAAAAAAAA', $type: '00'}}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
}
TEST(MatchExpressionParserTest, BitTestMatchExpressionInvalidMaskType) {
const CollatorInterface* collator = nullptr;
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAllSet: null}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAllSet: true}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAllSet: {}}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAllSet: ''}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAllClear: null}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAllClear: true}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAllClear: {}}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAllClear: ''}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(
fromjson("{a: {$bitsAllClear: ObjectId('000000000000000000000000')}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAnySet: null}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAnySet: true}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAnySet: {}}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAnySet: ''}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(
fromjson("{a: {$bitsAnySet: ObjectId('000000000000000000000000')}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAnyClear: null}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAnyClear: true}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAnyClear: {}}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAnyClear: ''}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(
fromjson("{a: {$bitsAnyClear: ObjectId('000000000000000000000000')}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
}
TEST(MatchExpressionParserTest, BitTestMatchExpressionInvalidMaskValue) {
@@ -1424,296 +1557,376 @@ TEST(MatchExpressionParserTest, BitTestMatchExpressionInvalidMaskValue) {
const CollatorInterface* collator = nullptr;
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAllSet: NaN}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAllSet: -54}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(
BSON("a" << BSON("$bitsAllSet" << std::numeric_limits<double>::max())),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(
MatchExpressionParser::parse(BSON("a" << BSON("$bitsAllSet" << kLongLongMaxAsDouble)),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAllSet: 2.5}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAllClear: NaN}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAllClear: -54}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(
BSON("a" << BSON("$bitsAllClear" << std::numeric_limits<double>::max())),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(
MatchExpressionParser::parse(BSON("a" << BSON("$bitsAllClear" << kLongLongMaxAsDouble)),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAllClear: 2.5}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAnySet: NaN}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAnySet: -54}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(
BSON("a" << BSON("$bitsAnySet" << std::numeric_limits<double>::max())),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(
MatchExpressionParser::parse(BSON("a" << BSON("$bitsAnySet" << kLongLongMaxAsDouble)),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAnySet: 2.5}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAnyClear: NaN}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAnyClear: -54}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(
BSON("a" << BSON("$bitsAnyClear" << std::numeric_limits<double>::max())),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(
MatchExpressionParser::parse(BSON("a" << BSON("$bitsAnyClear" << kLongLongMaxAsDouble)),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAnyClear: 2.5}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
}
TEST(MatchExpressionParserTest, BitTestMatchExpressionInvalidArray) {
const CollatorInterface* collator = nullptr;
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAllSet: [null]}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAllSet: [true]}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAllSet: ['']}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAllSet: [{}]}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAllSet: [[]]}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAllSet: [-1]}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAllSet: [2.5]}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(
MatchExpressionParser::parse(
fromjson(
"{a: {$bitsAllSet: [{$binary: 'AAAAAAAAAAAAAAAAAAAAAAAAAAAA', $type: '00'}]}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAllClear: [null]}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAllClear: [true]}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAllClear: ['']}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAllClear: [{}]}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAllClear: [[]]}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAllClear: [-1]}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAllClear: [2.5]}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(
MatchExpressionParser::parse(
fromjson(
"{a: {$bitsAllClear: [{$binary: 'AAAAAAAAAAAAAAAAAAAAAAAAAAAA', $type: '00'}]}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAnySet: [null]}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAnySet: [true]}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAnySet: ['']}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAnySet: [{}]}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAnySet: [[]]}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAnySet: [-1]}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAnySet: [2.5]}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(
MatchExpressionParser::parse(
fromjson(
"{a: {$bitsAnySet: [{$binary: 'AAAAAAAAAAAAAAAAAAAAAAAAAAAA', $type: '00'}]}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAnyClear: [null]}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAnyClear: [true]}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAnyClear: ['']}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAnyClear: [{}]}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAnyClear: [[]]}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAnyClear: [-1]}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAnyClear: [2.5]}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(
MatchExpressionParser::parse(
fromjson(
"{a: {$bitsAnyClear: [{$binary: 'AAAAAAAAAAAAAAAAAAAAAAAAAAAA', $type: '00'}]}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
}
TEST(MatchExpressionParserTest, BitTestMatchExpressionInvalidArrayValue) {
const CollatorInterface* collator = nullptr;
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAllSet: [-54]}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAllSet: [NaN]}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAllSet: [2.5]}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAllSet: [1e100]}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAllSet: [-1e100]}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(
MatchExpressionParser::parse(
BSON("a" << BSON("$bitsAllSet" << BSON_ARRAY(std::numeric_limits<long long>::max()))),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(
MatchExpressionParser::parse(
BSON("a" << BSON("$bitsAllSet" << BSON_ARRAY(std::numeric_limits<long long>::min()))),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAllClear: [-54]}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAllClear: [NaN]}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAllClear: [2.5]}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAllClear: [1e100]}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAllClear: [-1e100]}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(
MatchExpressionParser::parse(
BSON("a" << BSON("$bitsAllClear" << BSON_ARRAY(std::numeric_limits<long long>::max()))),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(
MatchExpressionParser::parse(
BSON("a" << BSON("$bitsAllClear" << BSON_ARRAY(std::numeric_limits<long long>::min()))),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAnySet: [-54]}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAnySet: [NaN]}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAnySet: [2.5]}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAnySet: [1e100]}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAnySet: [-1e100]}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(
MatchExpressionParser::parse(
BSON("a" << BSON("$bitsAnySet" << BSON_ARRAY(std::numeric_limits<long long>::max()))),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(
MatchExpressionParser::parse(
BSON("a" << BSON("$bitsAnySet" << BSON_ARRAY(std::numeric_limits<long long>::min()))),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAnyClear: [-54]}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAnyClear: [NaN]}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAnyClear: [2.5]}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAnyClear: [1e100]}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(MatchExpressionParser::parse(fromjson("{a: {$bitsAnyClear: [-1e100]}}"),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(
MatchExpressionParser::parse(
BSON("a" << BSON("$bitsAnyClear" << BSON_ARRAY(std::numeric_limits<long long>::max()))),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
ASSERT_NOT_OK(
MatchExpressionParser::parse(
BSON("a" << BSON("$bitsAnyClear" << BSON_ARRAY(std::numeric_limits<long long>::min()))),
ExtensionsCallbackDisallowExtensions(),
- collator).getStatus());
+ collator)
+ .getStatus());
}
}
diff --git a/src/mongo/db/matcher/expression_serialization_test.cpp b/src/mongo/db/matcher/expression_serialization_test.cpp
index 3a764e420cd..ce321f326d7 100644
--- a/src/mongo/db/matcher/expression_serialization_test.cpp
+++ b/src/mongo/db/matcher/expression_serialization_test.cpp
@@ -666,19 +666,18 @@ TEST(SerializeBasic, ExpressionNotWithRegexValueAndOptionsSerializesCorrectly) {
TEST(SerializeBasic, ExpressionNotWithGeoSerializesCorrectly) {
const CollatorInterface* collator = nullptr;
- Matcher original(fromjson(
- "{x: {$not: {$geoIntersects: {$geometry: {type: 'Polygon', "
- "coordinates: [[[0,0], [5,0], "
- "[5, 5], [0, 5], [0, 0]]]}}}}}"),
+ Matcher original(fromjson("{x: {$not: {$geoIntersects: {$geometry: {type: 'Polygon', "
+ "coordinates: [[[0,0], [5,0], "
+ "[5, 5], [0, 5], [0, 0]]]}}}}}"),
ExtensionsCallbackNoop(),
collator);
Matcher reserialized(
serialize(original.getMatchExpression()), ExtensionsCallbackNoop(), collator);
- ASSERT_EQ(*reserialized.getQuery(),
- fromjson(
- "{$nor: [{$and: [{x: {$geoIntersects: {$geometry: {type: 'Polygon', coordinates: "
- "[[[0,0], "
- "[5,0], [5, 5], [0, 5], [0, 0]]]}}}}]}]}"));
+ ASSERT_EQ(
+ *reserialized.getQuery(),
+ fromjson("{$nor: [{$and: [{x: {$geoIntersects: {$geometry: {type: 'Polygon', coordinates: "
+ "[[[0,0], "
+ "[5,0], [5, 5], [0, 5], [0, 0]]]}}}}]}]}"));
ASSERT_EQ(*reserialized.getQuery(), serialize(reserialized.getMatchExpression()));
BSONObj obj =
@@ -803,10 +802,10 @@ TEST(SerializeBasic, ExpressionGeoWithinSerializesCorrectly) {
collator);
Matcher reserialized(
serialize(original.getMatchExpression()), ExtensionsCallbackNoop(), collator);
- ASSERT_EQ(*reserialized.getQuery(),
- fromjson(
- "{x: {$geoWithin: {$geometry: {type: 'Polygon', coordinates: [[[0,0], [10,0], "
- "[10, 10], [0, 10], [0, 0]]]}}}}"));
+ ASSERT_EQ(
+ *reserialized.getQuery(),
+ fromjson("{x: {$geoWithin: {$geometry: {type: 'Polygon', coordinates: [[[0,0], [10,0], "
+ "[10, 10], [0, 10], [0, 0]]]}}}}"));
ASSERT_EQ(*reserialized.getQuery(), serialize(reserialized.getMatchExpression()));
BSONObj obj = fromjson("{x: {type: 'Point', coordinates: [5, 5]}}");
@@ -826,10 +825,10 @@ TEST(SerializeBasic, ExpressionGeoIntersectsSerializesCorrectly) {
collator);
Matcher reserialized(
serialize(original.getMatchExpression()), ExtensionsCallbackNoop(), collator);
- ASSERT_EQ(*reserialized.getQuery(),
- fromjson(
- "{x: {$geoIntersects: {$geometry: {type: 'Polygon', coordinates: [[[0,0], [5,0], "
- "[5, 5], [0, 5], [0, 0]]]}}}}"));
+ ASSERT_EQ(
+ *reserialized.getQuery(),
+ fromjson("{x: {$geoIntersects: {$geometry: {type: 'Polygon', coordinates: [[[0,0], [5,0], "
+ "[5, 5], [0, 5], [0, 0]]]}}}}"));
ASSERT_EQ(*reserialized.getQuery(), serialize(reserialized.getMatchExpression()));
BSONObj obj =
@@ -849,17 +848,16 @@ TEST(SerializeBasic, ExpressionGeoIntersectsSerializesCorrectly) {
TEST(SerializeBasic, ExpressionNearSerializesCorrectly) {
const CollatorInterface* collator = nullptr;
Matcher original(
- fromjson(
- "{x: {$near: {$geometry: {type: 'Point', coordinates: [0, 0]}, $maxDistance: 10, "
- "$minDistance: 1}}}"),
+ fromjson("{x: {$near: {$geometry: {type: 'Point', coordinates: [0, 0]}, $maxDistance: 10, "
+ "$minDistance: 1}}}"),
ExtensionsCallbackNoop(),
collator);
Matcher reserialized(
serialize(original.getMatchExpression()), ExtensionsCallbackNoop(), collator);
- ASSERT_EQ(*reserialized.getQuery(),
- fromjson(
- "{x: {$near: {$geometry: {type: 'Point', coordinates: [0, 0]}, $maxDistance: 10, "
- "$minDistance: 1}}}"));
+ ASSERT_EQ(
+ *reserialized.getQuery(),
+ fromjson("{x: {$near: {$geometry: {type: 'Point', coordinates: [0, 0]}, $maxDistance: 10, "
+ "$minDistance: 1}}}"));
ASSERT_EQ(*reserialized.getQuery(), serialize(reserialized.getMatchExpression()));
}
@@ -874,9 +872,8 @@ TEST(SerializeBasic, ExpressionNearSphereSerializesCorrectly) {
Matcher reserialized(
serialize(original.getMatchExpression()), ExtensionsCallbackNoop(), collator);
ASSERT_EQ(*reserialized.getQuery(),
- fromjson(
- "{x: {$nearSphere: {$geometry: {type: 'Point', coordinates: [0, 0]}, "
- "$maxDistance: 10, $minDistance: 1}}}"));
+ fromjson("{x: {$nearSphere: {$geometry: {type: 'Point', coordinates: [0, 0]}, "
+ "$maxDistance: 10, $minDistance: 1}}}"));
ASSERT_EQ(*reserialized.getQuery(), serialize(reserialized.getMatchExpression()));
}
@@ -888,9 +885,8 @@ TEST(SerializeBasic, ExpressionTextSerializesCorrectly) {
Matcher reserialized(
serialize(original.getMatchExpression()), ExtensionsCallbackNoop(), collator);
ASSERT_EQ(*reserialized.getQuery(),
- fromjson(
- "{$text: {$search: 'a', $language: 'en', $caseSensitive: true, "
- "$diacriticSensitive: false}}"));
+ fromjson("{$text: {$search: 'a', $language: 'en', $caseSensitive: true, "
+ "$diacriticSensitive: false}}"));
ASSERT_EQ(*reserialized.getQuery(), serialize(reserialized.getMatchExpression()));
}
@@ -902,9 +898,8 @@ TEST(SerializeBasic, ExpressionTextWithDefaultLanguageSerializesCorrectly) {
Matcher reserialized(
serialize(original.getMatchExpression()), ExtensionsCallbackNoop(), collator);
ASSERT_EQ(*reserialized.getQuery(),
- fromjson(
- "{$text: {$search: 'a', $language: '', $caseSensitive: false, "
- "$diacriticSensitive: false}}"));
+ fromjson("{$text: {$search: 'a', $language: '', $caseSensitive: false, "
+ "$diacriticSensitive: false}}"));
ASSERT_EQ(*reserialized.getQuery(), serialize(reserialized.getMatchExpression()));
}
diff --git a/src/mongo/db/matcher/expression_text.cpp b/src/mongo/db/matcher/expression_text.cpp
index 0a95a6e798d..8c8bd979cf8 100644
--- a/src/mongo/db/matcher/expression_text.cpp
+++ b/src/mongo/db/matcher/expression_text.cpp
@@ -59,13 +59,15 @@ Status TextMatchExpression::init(OperationContext* txn,
if (!db) {
return {ErrorCodes::IndexNotFound,
str::stream() << "text index required for $text query (no such collection '"
- << nss.ns() << "')"};
+ << nss.ns()
+ << "')"};
}
Collection* collection = db->getCollection(nss);
if (!collection) {
return {ErrorCodes::IndexNotFound,
str::stream() << "text index required for $text query (no such collection '"
- << nss.ns() << "')"};
+ << nss.ns()
+ << "')"};
}
std::vector<IndexDescriptor*> idxMatches;
collection->getIndexCatalog()->findIndexByType(txn, IndexNames::TEXT, idxMatches);
diff --git a/src/mongo/db/matcher/expression_text_base.cpp b/src/mongo/db/matcher/expression_text_base.cpp
index afed1a33e6f..625107d4aca 100644
--- a/src/mongo/db/matcher/expression_text_base.cpp
+++ b/src/mongo/db/matcher/expression_text_base.cpp
@@ -58,8 +58,10 @@ void TextMatchExpressionBase::serialize(BSONObjBuilder* out) const {
const fts::FTSQuery& ftsQuery = getFTSQuery();
out->append("$text",
BSON("$search" << ftsQuery.getQuery() << "$language" << ftsQuery.getLanguage()
- << "$caseSensitive" << ftsQuery.getCaseSensitive()
- << "$diacriticSensitive" << ftsQuery.getDiacriticSensitive()));
+ << "$caseSensitive"
+ << ftsQuery.getCaseSensitive()
+ << "$diacriticSensitive"
+ << ftsQuery.getDiacriticSensitive()));
}
bool TextMatchExpressionBase::equivalent(const MatchExpression* other) const {
diff --git a/src/mongo/db/matcher/expression_tree.cpp b/src/mongo/db/matcher/expression_tree.cpp
index 02da8991465..7cd2f8b2d60 100644
--- a/src/mongo/db/matcher/expression_tree.cpp
+++ b/src/mongo/db/matcher/expression_tree.cpp
@@ -30,8 +30,8 @@
#include "mongo/db/matcher/expression_tree.h"
-#include "mongo/bson/bsonobj.h"
#include "mongo/bson/bsonmisc.h"
+#include "mongo/bson/bsonobj.h"
#include "mongo/bson/bsonobjbuilder.h"
namespace mongo {
diff --git a/src/mongo/db/matcher/expression_tree_test.cpp b/src/mongo/db/matcher/expression_tree_test.cpp
index 9799f25ccc4..8ede86c0f0d 100644
--- a/src/mongo/db/matcher/expression_tree_test.cpp
+++ b/src/mongo/db/matcher/expression_tree_test.cpp
@@ -33,8 +33,8 @@
#include "mongo/db/jsobj.h"
#include "mongo/db/json.h"
#include "mongo/db/matcher/expression.h"
-#include "mongo/db/matcher/expression_tree.h"
#include "mongo/db/matcher/expression_leaf.h"
+#include "mongo/db/matcher/expression_tree.h"
namespace mongo {
diff --git a/src/mongo/db/matcher/expression_where.cpp b/src/mongo/db/matcher/expression_where.cpp
index 63b705f6127..1e24dbc2501 100644
--- a/src/mongo/db/matcher/expression_where.cpp
+++ b/src/mongo/db/matcher/expression_where.cpp
@@ -34,11 +34,11 @@
#include "mongo/base/init.h"
#include "mongo/db/auth/authorization_session.h"
-#include "mongo/db/namespace_string.h"
#include "mongo/db/client.h"
#include "mongo/db/jsobj.h"
#include "mongo/db/matcher/expression.h"
#include "mongo/db/matcher/expression_parser.h"
+#include "mongo/db/namespace_string.h"
#include "mongo/scripting/engine.h"
#include "mongo/stdx/memory.h"
diff --git a/src/mongo/db/matcher/matchable.cpp b/src/mongo/db/matcher/matchable.cpp
index bb5671ea801..b64ebdd572a 100644
--- a/src/mongo/db/matcher/matchable.cpp
+++ b/src/mongo/db/matcher/matchable.cpp
@@ -28,9 +28,9 @@
* it in the license file.
*/
-#include "mongo/platform/basic.h"
-#include "mongo/db/jsobj.h"
#include "mongo/db/matcher/matchable.h"
+#include "mongo/db/jsobj.h"
+#include "mongo/platform/basic.h"
namespace mongo {
diff --git a/src/mongo/db/matcher/matcher.cpp b/src/mongo/db/matcher/matcher.cpp
index 6fa7eec44b5..faf0f6f1170 100644
--- a/src/mongo/db/matcher/matcher.cpp
+++ b/src/mongo/db/matcher/matcher.cpp
@@ -31,11 +31,11 @@
#include "mongo/platform/basic.h"
#include "mongo/base/init.h"
+#include "mongo/db/exec/working_set.h"
#include "mongo/db/jsobj.h"
#include "mongo/db/matcher/expression_parser.h"
#include "mongo/db/matcher/matcher.h"
#include "mongo/db/matcher/path.h"
-#include "mongo/db/exec/working_set.h"
#include "mongo/util/mongoutils/str.h"
#include "mongo/util/stacktrace.h"
diff --git a/src/mongo/db/matcher/path.cpp b/src/mongo/db/matcher/path.cpp
index 3dd9374faa7..deeb7a75933 100644
--- a/src/mongo/db/matcher/path.cpp
+++ b/src/mongo/db/matcher/path.cpp
@@ -28,10 +28,10 @@
* it in the license file.
*/
-#include "mongo/platform/basic.h"
+#include "mongo/db/matcher/path.h"
#include "mongo/db/jsobj.h"
#include "mongo/db/matcher/path_internal.h"
-#include "mongo/db/matcher/path.h"
+#include "mongo/platform/basic.h"
namespace mongo {
diff --git a/src/mongo/db/mongod_options.cpp b/src/mongo/db/mongod_options.cpp
index 59ceb56e187..32f88c57d51 100644
--- a/src/mongo/db/mongod_options.cpp
+++ b/src/mongo/db/mongod_options.cpp
@@ -45,9 +45,9 @@
#include "mongo/db/server_options.h"
#include "mongo/db/server_options_helpers.h"
#include "mongo/db/storage/mmap_v1/mmap_v1_options.h"
-#include "mongo/s/catalog/catalog_manager.h"
#include "mongo/logger/console_appender.h"
#include "mongo/logger/message_event_utf8_encoder.h"
+#include "mongo/s/catalog/catalog_manager.h"
#include "mongo/util/log.h"
#include "mongo/util/mongoutils/str.h"
#include "mongo/util/net/ssl_options.h"
@@ -106,13 +106,13 @@ Status addMongodOptions(moe::OptionSection* options) {
// Way to enable or disable auth in JSON Config
general_options
.addOptionChaining(
- "security.authorization",
- "",
- moe::String,
- "How the database behaves with respect to authorization of clients. "
- "Options are \"disabled\", which means that authorization checks are not "
- "performed, and \"enabled\" which means that a client cannot perform actions it is "
- "not authorized to do.")
+ "security.authorization",
+ "",
+ moe::String,
+ "How the database behaves with respect to authorization of clients. "
+ "Options are \"disabled\", which means that authorization checks are not "
+ "performed, and \"enabled\" which means that a client cannot perform actions it is "
+ "not authorized to do.")
.setSources(moe::SourceYAMLConfig)
.format("(:?disabled)|(:?enabled)", "(disabled/enabled)");
@@ -137,35 +137,35 @@ Status addMongodOptions(moe::OptionSection* options) {
// Diagnostic Options
- general_options.addOptionChaining("diaglog",
- "diaglog",
- moe::Int,
- "DEPRECATED: 0=off 1=W 2=R 3=both 7=W+some reads")
+ general_options
+ .addOptionChaining(
+ "diaglog", "diaglog", moe::Int, "DEPRECATED: 0=off 1=W 2=R 3=both 7=W+some reads")
.hidden()
.setSources(moe::SourceAllLegacy);
- general_options.addOptionChaining("operationProfiling.slowOpThresholdMs",
- "slowms",
- moe::Int,
- "value of slow for profile and console log")
+ general_options
+ .addOptionChaining("operationProfiling.slowOpThresholdMs",
+ "slowms",
+ moe::Int,
+ "value of slow for profile and console log")
.setDefault(moe::Value(100));
general_options.addOptionChaining("profile", "profile", moe::Int, "0=off 1=slow, 2=all")
.setSources(moe::SourceAllLegacy);
- general_options.addOptionChaining(
- "operationProfiling.mode", "", moe::String, "(off/slowOp/all)")
+ general_options
+ .addOptionChaining("operationProfiling.mode", "", moe::String, "(off/slowOp/all)")
.setSources(moe::SourceYAMLConfig)
.format("(:?off)|(:?slowOp)|(:?all)", "(off/slowOp/all)");
- general_options.addOptionChaining(
- "cpu", "cpu", moe::Switch, "periodically show cpu and iowait utilization")
+ general_options
+ .addOptionChaining(
+ "cpu", "cpu", moe::Switch, "periodically show cpu and iowait utilization")
.setSources(moe::SourceAllLegacy);
- general_options.addOptionChaining("sysinfo",
- "sysinfo",
- moe::Switch,
- "print some diagnostic system information")
+ general_options
+ .addOptionChaining(
+ "sysinfo", "sysinfo", moe::Switch, "print some diagnostic system information")
.setSources(moe::SourceAllLegacy);
// Storage Options
@@ -201,55 +201,59 @@ Status addMongodOptions(moe::OptionSection* options) {
moe::Switch,
"each database will be stored in a separate directory");
- storage_options.addOptionChaining(
- "storage.queryableBackupMode",
- "queryableBackupMode",
- moe::Switch,
- "enable read-only mode - if true the server will not accept writes.")
+ storage_options
+ .addOptionChaining("storage.queryableBackupMode",
+ "queryableBackupMode",
+ moe::Switch,
+ "enable read-only mode - if true the server will not accept writes.")
.setSources(moe::SourceAll)
.hidden();
- general_options.addOptionChaining(
- "noIndexBuildRetry",
- "noIndexBuildRetry",
- moe::Switch,
- "don't retry any index builds that were interrupted by shutdown")
+ general_options
+ .addOptionChaining("noIndexBuildRetry",
+ "noIndexBuildRetry",
+ moe::Switch,
+ "don't retry any index builds that were interrupted by shutdown")
.setSources(moe::SourceAllLegacy);
- general_options.addOptionChaining(
- "storage.indexBuildRetry",
- "",
- moe::Bool,
- "don't retry any index builds that were interrupted by shutdown")
+ general_options
+ .addOptionChaining("storage.indexBuildRetry",
+ "",
+ moe::Bool,
+ "don't retry any index builds that were interrupted by shutdown")
.setSources(moe::SourceYAMLConfig);
- storage_options.addOptionChaining(
- "noprealloc",
- "noprealloc",
- moe::Switch,
- "disable data file preallocation - will often hurt performance")
+ storage_options
+ .addOptionChaining("noprealloc",
+ "noprealloc",
+ moe::Switch,
+ "disable data file preallocation - will often hurt performance")
.setSources(moe::SourceAllLegacy);
- storage_options.addOptionChaining(
- "storage.mmapv1.preallocDataFiles",
- "",
- moe::Bool,
- "disable data file preallocation - will often hurt performance",
- "storage.preallocDataFiles").setSources(moe::SourceYAMLConfig);
-
- storage_options.addOptionChaining("storage.mmapv1.nsSize",
- "nssize",
- moe::Int,
- ".ns file size (in MB) for new databases",
- "storage.nsSize").setDefault(moe::Value(16));
+ storage_options
+ .addOptionChaining("storage.mmapv1.preallocDataFiles",
+ "",
+ moe::Bool,
+ "disable data file preallocation - will often hurt performance",
+ "storage.preallocDataFiles")
+ .setSources(moe::SourceYAMLConfig);
- storage_options.addOptionChaining(
- "storage.mmapv1.quota.enforced",
- "quota",
- moe::Switch,
- "limits each database to a certain number of files (8 default)",
- "storage.quota.enforced").incompatibleWith("keyFile");
+ storage_options
+ .addOptionChaining("storage.mmapv1.nsSize",
+ "nssize",
+ moe::Int,
+ ".ns file size (in MB) for new databases",
+ "storage.nsSize")
+ .setDefault(moe::Value(16));
+
+ storage_options
+ .addOptionChaining("storage.mmapv1.quota.enforced",
+ "quota",
+ moe::Switch,
+ "limits each database to a certain number of files (8 default)",
+ "storage.quota.enforced")
+ .incompatibleWith("keyFile");
storage_options.addOptionChaining("storage.mmapv1.quota.maxFilesPerDB",
"quotaFiles",
@@ -263,10 +267,11 @@ Status addMongodOptions(moe::OptionSection* options) {
"use a smaller default file size",
"storage.smallFiles");
- storage_options.addOptionChaining("storage.syncPeriodSecs",
- "syncdelay",
- moe::Double,
- "seconds between disk syncs (0=never, but not recommended)")
+ storage_options
+ .addOptionChaining("storage.syncPeriodSecs",
+ "syncdelay",
+ moe::Double,
+ "seconds between disk syncs (0=never, but not recommended)")
.setDefault(moe::Value(60.0));
// Upgrade and repair are disallowed in JSON configs since they trigger very heavyweight
@@ -284,18 +289,19 @@ Status addMongodOptions(moe::OptionSection* options) {
// Javascript Options
- general_options.addOptionChaining(
- "noscripting", "noscripting", moe::Switch, "disable scripting engine")
+ general_options
+ .addOptionChaining("noscripting", "noscripting", moe::Switch, "disable scripting engine")
.setSources(moe::SourceAllLegacy);
- general_options.addOptionChaining(
- "security.javascriptEnabled", "", moe::Bool, "Enable javascript execution")
+ general_options
+ .addOptionChaining(
+ "security.javascriptEnabled", "", moe::Bool, "Enable javascript execution")
.setSources(moe::SourceYAMLConfig);
// Query Options
- general_options.addOptionChaining(
- "notablescan", "notablescan", moe::Switch, "do not allow table scans")
+ general_options
+ .addOptionChaining("notablescan", "notablescan", moe::Switch, "do not allow table scans")
.setSources(moe::SourceAllLegacy);
// Journaling Options
@@ -304,10 +310,11 @@ Status addMongodOptions(moe::OptionSection* options) {
storage_options.addOptionChaining("journal", "journal", moe::Switch, "enable journaling")
.setSources(moe::SourceAllLegacy);
- storage_options.addOptionChaining("nojournal",
- "nojournal",
- moe::Switch,
- "disable journaling (journaling is on by default for 64 bit)")
+ storage_options
+ .addOptionChaining("nojournal",
+ "nojournal",
+ moe::Switch,
+ "disable journaling (journaling is on by default for 64 bit)")
.setSources(moe::SourceAllLegacy);
storage_options.addOptionChaining("dur", "dur", moe::Switch, "enable journaling")
@@ -323,14 +330,16 @@ Status addMongodOptions(moe::OptionSection* options) {
.setSources(moe::SourceYAMLConfig);
// Two ways to set durability diagnostic options. durOptions is deprecated
- storage_options.addOptionChaining("storage.mmapv1.journal.debugFlags",
- "journalOptions",
- moe::Int,
- "journal diagnostic options",
- "storage.journal.debugFlags").incompatibleWith("durOptions");
-
- storage_options.addOptionChaining(
- "durOptions", "durOptions", moe::Int, "durability diagnostic options")
+ storage_options
+ .addOptionChaining("storage.mmapv1.journal.debugFlags",
+ "journalOptions",
+ moe::Int,
+ "journal diagnostic options",
+ "storage.journal.debugFlags")
+ .incompatibleWith("durOptions");
+
+ storage_options
+ .addOptionChaining("durOptions", "durOptions", moe::Int, "durability diagnostic options")
.hidden()
.setSources(moe::SourceAllLegacy)
.incompatibleWith("storage.mmapv1.journal.debugFlags");
@@ -342,10 +351,9 @@ Status addMongodOptions(moe::OptionSection* options) {
"storage.mmapv1.journal.commitIntervalMs");
// Deprecated option that we don't want people to use for performance reasons
- storage_options.addOptionChaining("nopreallocj",
- "nopreallocj",
- moe::Switch,
- "don't preallocate journal files")
+ storage_options
+ .addOptionChaining(
+ "nopreallocj", "nopreallocj", moe::Switch, "don't preallocate journal files")
.hidden()
.setSources(moe::SourceAllLegacy);
@@ -367,33 +375,33 @@ Status addMongodOptions(moe::OptionSection* options) {
.incompatibleWith("replication.replSetName")
.setSources(moe::SourceAllLegacy);
- ms_options.addOptionChaining(
- "source", "source", moe::String, "when slave: specify master as <server:port>")
+ ms_options
+ .addOptionChaining(
+ "source", "source", moe::String, "when slave: specify master as <server:port>")
.incompatibleWith("replication.replSet")
.incompatibleWith("replication.replSetName")
.setSources(moe::SourceAllLegacy);
- ms_options.addOptionChaining("only",
- "only",
- moe::String,
- "when slave: specify a single database to replicate")
+ ms_options
+ .addOptionChaining(
+ "only", "only", moe::String, "when slave: specify a single database to replicate")
.incompatibleWith("replication.replSet")
.incompatibleWith("replication.replSetName")
.setSources(moe::SourceAllLegacy);
- ms_options.addOptionChaining(
- "slavedelay",
- "slavedelay",
- moe::Int,
- "specify delay (in seconds) to be used when applying master ops to slave")
+ ms_options
+ .addOptionChaining(
+ "slavedelay",
+ "slavedelay",
+ moe::Int,
+ "specify delay (in seconds) to be used when applying master ops to slave")
.incompatibleWith("replication.replSet")
.incompatibleWith("replication.replSetName")
.setSources(moe::SourceAllLegacy);
- ms_options.addOptionChaining("autoresync",
- "autoresync",
- moe::Switch,
- "automatically resync if slave data is stale")
+ ms_options
+ .addOptionChaining(
+ "autoresync", "autoresync", moe::Switch, "automatically resync if slave data is stale")
.incompatibleWith("replication.replSet")
.incompatibleWith("replication.replSetName")
.setSources(moe::SourceAllLegacy);
@@ -407,21 +415,22 @@ Status addMongodOptions(moe::OptionSection* options) {
"size to use (in MB) for replication op log. default is 5% of disk space "
"(i.e. large is good)");
- rs_options.addOptionChaining("replication.replSet",
- "replSet",
- moe::String,
- "arg is <setname>[/<optionalseedhostlist>]")
+ rs_options
+ .addOptionChaining("replication.replSet",
+ "replSet",
+ moe::String,
+ "arg is <setname>[/<optionalseedhostlist>]")
.setSources(moe::SourceAllLegacy);
rs_options.addOptionChaining("replication.replSetName", "", moe::String, "arg is <setname>")
.setSources(moe::SourceYAMLConfig)
.format("[^/]+", "[replica set name with no \"/\"]");
- rs_options.addOptionChaining(
- "replication.secondaryIndexPrefetch",
- "replIndexPrefetch",
- moe::String,
- "specify index prefetching behavior (if secondary) [none|_id_only|all]")
+ rs_options
+ .addOptionChaining("replication.secondaryIndexPrefetch",
+ "replIndexPrefetch",
+ moe::String,
+ "specify index prefetching behavior (if secondary) [none|_id_only|all]")
.format("(:?none)|(:?_id_only)|(:?all)", "(none/_id_only/all)");
rs_options.addOptionChaining("replication.enableMajorityReadConcern",
@@ -431,73 +440,73 @@ Status addMongodOptions(moe::OptionSection* options) {
// Sharding Options
- sharding_options.addOptionChaining(
- "configsvr",
- "configsvr",
- moe::Switch,
- "declare this is a config db of a cluster; default port 27019; "
- "default dir /data/configdb")
+ sharding_options
+ .addOptionChaining("configsvr",
+ "configsvr",
+ moe::Switch,
+ "declare this is a config db of a cluster; default port 27019; "
+ "default dir /data/configdb")
.setSources(moe::SourceAllLegacy)
.incompatibleWith("shardsvr")
.incompatibleWith("nojournal");
- sharding_options.addOptionChaining(
- "shardsvr",
- "shardsvr",
- moe::Switch,
- "declare this is a shard db of a cluster; default port 27018")
+ sharding_options
+ .addOptionChaining("shardsvr",
+ "shardsvr",
+ moe::Switch,
+ "declare this is a shard db of a cluster; default port 27018")
.setSources(moe::SourceAllLegacy)
.incompatibleWith("configsvr");
sharding_options
.addOptionChaining(
- "sharding.clusterRole",
- "",
- moe::String,
- "Choose what role this mongod has in a sharded cluster. Possible values are:\n"
- " \"configsvr\": Start this node as a config server. Starts on port 27019 by "
- "default."
- " \"shardsvr\": Start this node as a shard server. Starts on port 27018 by "
- "default.")
+ "sharding.clusterRole",
+ "",
+ moe::String,
+ "Choose what role this mongod has in a sharded cluster. Possible values are:\n"
+ " \"configsvr\": Start this node as a config server. Starts on port 27019 by "
+ "default."
+ " \"shardsvr\": Start this node as a shard server. Starts on port 27018 by "
+ "default.")
.setSources(moe::SourceYAMLConfig)
.format("(:?configsvr)|(:?shardsvr)", "(configsvr/shardsvr)");
sharding_options
.addOptionChaining(
- "sharding._overrideShardIdentity",
- "",
- moe::String,
- "overrides the shardIdentity document settings stored in the local storage with "
- "a MongoDB Extended JSON document in string format")
+ "sharding._overrideShardIdentity",
+ "",
+ moe::String,
+ "overrides the shardIdentity document settings stored in the local storage with "
+ "a MongoDB Extended JSON document in string format")
.setSources(moe::SourceYAMLConfig)
.incompatibleWith("configsvr")
.requires("storage.queryableBackupMode");
- sharding_options.addOptionChaining(
- "noMoveParanoia",
- "noMoveParanoia",
- moe::Switch,
- "turn off paranoid saving of data for the moveChunk command; default")
+ sharding_options
+ .addOptionChaining("noMoveParanoia",
+ "noMoveParanoia",
+ moe::Switch,
+ "turn off paranoid saving of data for the moveChunk command; default")
.hidden()
.setSources(moe::SourceAllLegacy)
.incompatibleWith("moveParanoia");
- sharding_options.addOptionChaining(
- "moveParanoia",
- "moveParanoia",
- moe::Switch,
- "turn on paranoid saving of data during the moveChunk command "
- "(used for internal system diagnostics)")
+ sharding_options
+ .addOptionChaining("moveParanoia",
+ "moveParanoia",
+ moe::Switch,
+ "turn on paranoid saving of data during the moveChunk command "
+ "(used for internal system diagnostics)")
.hidden()
.setSources(moe::SourceAllLegacy)
.incompatibleWith("noMoveParanoia");
- sharding_options.addOptionChaining(
- "sharding.archiveMovedChunks",
- "",
- moe::Bool,
- "config file option to turn on paranoid saving of data during the "
- "moveChunk command (used for internal system diagnostics)")
+ sharding_options
+ .addOptionChaining("sharding.archiveMovedChunks",
+ "",
+ moe::Bool,
+ "config file option to turn on paranoid saving of data during the "
+ "moveChunk command (used for internal system diagnostics)")
.hidden()
.setSources(moe::SourceYAMLConfig);
@@ -517,18 +526,20 @@ Status addMongodOptions(moe::OptionSection* options) {
// The following are legacy options that are disallowed in the JSON config file
- options->addOptionChaining(
- "fastsync",
- "fastsync",
- moe::Switch,
- "indicate that this instance is starting from a dbpath snapshot of the repl peer")
+ options
+ ->addOptionChaining(
+ "fastsync",
+ "fastsync",
+ moe::Switch,
+ "indicate that this instance is starting from a dbpath snapshot of the repl peer")
.hidden()
.setSources(moe::SourceAllLegacy);
- options->addOptionChaining("pretouch",
- "pretouch",
- moe::Int,
- "n pretouch threads for applying master/slave operations")
+ options
+ ->addOptionChaining("pretouch",
+ "pretouch",
+ moe::Int,
+ "n pretouch threads for applying master/slave operations")
.hidden()
.setSources(moe::SourceAllLegacy);
@@ -541,8 +552,8 @@ Status addMongodOptions(moe::OptionSection* options) {
.positional(1, 3)
.setSources(moe::SourceAllLegacy);
- options->addOptionChaining(
- "cacheSize", "cacheSize", moe::Long, "cache size (in MB) for rec store")
+ options
+ ->addOptionChaining("cacheSize", "cacheSize", moe::Long, "cache size (in MB) for rec store")
.hidden()
.setSources(moe::SourceAllLegacy);
@@ -1203,7 +1214,8 @@ Status storeMongodOptions(const moe::Environment& params, const std::vector<std:
if (x <= 0) {
return Status(ErrorCodes::BadValue,
str::stream() << "bad --oplogSize, arg must be greater than 0,"
- "found: " << x);
+ "found: "
+ << x);
}
// note a small size such as x==1 is ok for an arbiter.
if (x > 1000 && sizeof(void*) == 4) {
diff --git a/src/mongo/db/op_observer.cpp b/src/mongo/db/op_observer.cpp
index 25bf9866c7f..5910edd37bf 100644
--- a/src/mongo/db/op_observer.cpp
+++ b/src/mongo/db/op_observer.cpp
@@ -216,7 +216,9 @@ void OpObserver::onRenameCollection(OperationContext* txn,
std::string dbName = fromCollection.db().toString() + ".$cmd";
BSONObj cmdObj =
BSON("renameCollection" << fromCollection.ns() << "to" << toCollection.ns() << "stayTemp"
- << stayTemp << "dropTarget" << dropTarget);
+ << stayTemp
+ << "dropTarget"
+ << dropTarget);
repl::logOp(txn, "c", dbName.c_str(), cmdObj, nullptr, false);
diff --git a/src/mongo/db/operation_context_impl.cpp b/src/mongo/db/operation_context_impl.cpp
index f20d0a4eab8..139cfddd869 100644
--- a/src/mongo/db/operation_context_impl.cpp
+++ b/src/mongo/db/operation_context_impl.cpp
@@ -35,9 +35,9 @@
#include "mongo/db/client.h"
#include "mongo/db/concurrency/lock_state.h"
#include "mongo/db/curop.h"
-#include "mongo/db/service_context.h"
#include "mongo/db/namespace_string.h"
#include "mongo/db/repl/replication_coordinator_global.h"
+#include "mongo/db/service_context.h"
#include "mongo/db/storage/storage_engine.h"
#include "mongo/stdx/memory.h"
diff --git a/src/mongo/db/operation_context_noop.h b/src/mongo/db/operation_context_noop.h
index 75fdc841e20..92efb01802f 100644
--- a/src/mongo/db/operation_context_noop.h
+++ b/src/mongo/db/operation_context_noop.h
@@ -28,10 +28,10 @@
#pragma once
-#include "mongo/db/operation_context.h"
#include "mongo/db/client.h"
#include "mongo/db/concurrency/locker_noop.h"
#include "mongo/db/curop.h"
+#include "mongo/db/operation_context.h"
#include "mongo/db/storage/recovery_unit_noop.h"
namespace mongo {
diff --git a/src/mongo/db/ops/field_checker.cpp b/src/mongo/db/ops/field_checker.cpp
index 0c71c7e5d07..4e8c8b82de6 100644
--- a/src/mongo/db/ops/field_checker.cpp
+++ b/src/mongo/db/ops/field_checker.cpp
@@ -51,7 +51,8 @@ Status isUpdatable(const FieldRef& field) {
if (part.empty()) {
return Status(ErrorCodes::EmptyFieldName,
mongoutils::str::stream()
- << "The update path '" << field.dottedField()
+ << "The update path '"
+ << field.dottedField()
<< "' contains an empty field name, which is not allowed.");
}
}
diff --git a/src/mongo/db/ops/insert.cpp b/src/mongo/db/ops/insert.cpp
index de198beac53..c7648b0f254 100644
--- a/src/mongo/db/ops/insert.cpp
+++ b/src/mongo/db/ops/insert.cpp
@@ -42,8 +42,10 @@ StatusWith<BSONObj> fixDocumentForInsert(const BSONObj& doc) {
if (doc.objsize() > BSONObjMaxUserSize)
return StatusWith<BSONObj>(ErrorCodes::BadValue,
str::stream() << "object to insert too large"
- << ". size in bytes: " << doc.objsize()
- << ", max size: " << BSONObjMaxUserSize);
+ << ". size in bytes: "
+ << doc.objsize()
+ << ", max size: "
+ << BSONObjMaxUserSize);
bool firstElementIsId = false;
bool hasTimestampToFix = false;
@@ -162,9 +164,11 @@ Status userAllowedCreateNS(StringData db, StringData coll) {
if (db.size() + 1 /* dot */ + coll.size() > NamespaceString::MaxNsCollectionLen)
return Status(ErrorCodes::BadValue,
- str::stream()
- << "fully qualified namespace " << db << '.' << coll << " is too long "
- << "(max is " << NamespaceString::MaxNsCollectionLen << " bytes)");
+ str::stream() << "fully qualified namespace " << db << '.' << coll
+ << " is too long "
+ << "(max is "
+ << NamespaceString::MaxNsCollectionLen
+ << " bytes)");
// check spceial areas
diff --git a/src/mongo/db/ops/log_builder.cpp b/src/mongo/db/ops/log_builder.cpp
index 21baffe246c..355ccb092e8 100644
--- a/src/mongo/db/ops/log_builder.cpp
+++ b/src/mongo/db/ops/log_builder.cpp
@@ -89,8 +89,10 @@ Status LogBuilder::addToSetsWithNewFieldName(StringData name, const mutablebson:
if (!elemToSet.ok())
return Status(ErrorCodes::InternalError,
str::stream() << "Could not create new '" << name
- << "' element from existing element '" << val.getFieldName()
- << "' of type " << typeName(val.getType()));
+ << "' element from existing element '"
+ << val.getFieldName()
+ << "' of type "
+ << typeName(val.getType()));
return addToSets(elemToSet);
}
@@ -100,8 +102,10 @@ Status LogBuilder::addToSetsWithNewFieldName(StringData name, const BSONElement&
if (!elemToSet.ok())
return Status(ErrorCodes::InternalError,
str::stream() << "Could not create new '" << name
- << "' element from existing element '" << val.fieldName()
- << "' of type " << typeName(val.type()));
+ << "' element from existing element '"
+ << val.fieldName()
+ << "' of type "
+ << typeName(val.type()));
return addToSets(elemToSet);
}
diff --git a/src/mongo/db/ops/log_builder_test.cpp b/src/mongo/db/ops/log_builder_test.cpp
index f2a3d20aa78..a957194e56f 100644
--- a/src/mongo/db/ops/log_builder_test.cpp
+++ b/src/mongo/db/ops/log_builder_test.cpp
@@ -106,11 +106,10 @@ TEST(LogBuilder, AddOneToEach) {
ASSERT_OK(lb.addToUnsets("x.y"));
- ASSERT_EQUALS(mongo::fromjson(
- "{ "
- " $set : { 'a.b' : 1 }, "
- " $unset : { 'x.y' : true } "
- "}"),
+ ASSERT_EQUALS(mongo::fromjson("{ "
+ " $set : { 'a.b' : 1 }, "
+ " $unset : { 'x.y' : true } "
+ "}"),
doc);
}
@@ -164,11 +163,10 @@ TEST(LogBuilder, VerifySetsAreGrouped) {
ASSERT_TRUE(elt_xy.ok());
ASSERT_OK(lb.addToSets(elt_xy));
- ASSERT_EQUALS(mongo::fromjson(
- "{ $set : {"
- " 'a.b' : 1, "
- " 'x.y' : 1 "
- "} }"),
+ ASSERT_EQUALS(mongo::fromjson("{ $set : {"
+ " 'a.b' : 1, "
+ " 'x.y' : 1 "
+ "} }"),
doc);
}
@@ -179,11 +177,10 @@ TEST(LogBuilder, VerifyUnsetsAreGrouped) {
ASSERT_OK(lb.addToUnsets("a.b"));
ASSERT_OK(lb.addToUnsets("x.y"));
- ASSERT_EQUALS(mongo::fromjson(
- "{ $unset : {"
- " 'a.b' : true, "
- " 'x.y' : true "
- "} }"),
+ ASSERT_EQUALS(mongo::fromjson("{ $unset : {"
+ " 'a.b' : true, "
+ " 'x.y' : true "
+ "} }"),
doc);
}
diff --git a/src/mongo/db/ops/modifier_add_to_set.cpp b/src/mongo/db/ops/modifier_add_to_set.cpp
index 383991c2b1f..ffa9c28ddf4 100644
--- a/src/mongo/db/ops/modifier_add_to_set.cpp
+++ b/src/mongo/db/ops/modifier_add_to_set.cpp
@@ -124,7 +124,8 @@ Status ModifierAddToSet::init(const BSONElement& modExpr, const Options& opts, b
if (foundDollar && foundCount > 1) {
return Status(ErrorCodes::BadValue,
str::stream() << "Too many positional (i.e. '$') elements found in path '"
- << _fieldRef.dottedField() << "'");
+ << _fieldRef.dottedField()
+ << "'");
}
// TODO: The driver could potentially do this re-writing.
@@ -247,11 +248,12 @@ Status ModifierAddToSet::prepare(mb::Element root, StringData matchedField, Exec
if (_preparedState->elemFound.getType() != mongo::Array) {
mb::Element idElem = mb::findElementNamed(root.leftChild(), "_id");
return Status(ErrorCodes::BadValue,
- str::stream()
- << "Cannot apply $addToSet to a non-array field. Field named '"
- << _preparedState->elemFound.getFieldName() << "' has a non-array type "
- << typeName(_preparedState->elemFound.getType()) << " in the document "
- << idElem.toString());
+ str::stream() << "Cannot apply $addToSet to a non-array field. Field named '"
+ << _preparedState->elemFound.getFieldName()
+ << "' has a non-array type "
+ << typeName(_preparedState->elemFound.getType())
+ << " in the document "
+ << idElem.toString());
}
// If the array is empty, then we don't need to check anything: all of the values are
@@ -387,7 +389,8 @@ Status ModifierAddToSet::log(LogBuilder* logBuilder) const {
if (!status.isOK()) {
return Status(ErrorCodes::BadValue,
str::stream() << "Could not append entry for $addToSet oplog entry."
- << "Underlying cause: " << status.toString());
+ << "Underlying cause: "
+ << status.toString());
}
curr = curr.rightSibling();
}
diff --git a/src/mongo/db/ops/modifier_bit.cpp b/src/mongo/db/ops/modifier_bit.cpp
index d6acbfe1ff8..fb7ae3f45eb 100644
--- a/src/mongo/db/ops/modifier_bit.cpp
+++ b/src/mongo/db/ops/modifier_bit.cpp
@@ -84,7 +84,8 @@ Status ModifierBit::init(const BSONElement& modExpr, const Options& opts, bool*
if (foundDollar && foundCount > 1) {
return Status(ErrorCodes::BadValue,
str::stream() << "Too many positional (i.e. '$') elements found in path '"
- << _fieldRef.dottedField() << "'");
+ << _fieldRef.dottedField()
+ << "'");
}
if (modExpr.type() != mongo::Object)
@@ -120,7 +121,9 @@ Status ModifierBit::init(const BSONElement& modExpr, const Options& opts, bool*
return Status(ErrorCodes::BadValue,
str::stream()
<< "The $bit modifier only supports 'and', 'or', and 'xor', not '"
- << payloadFieldName << "' which is an unknown operator: {" << curOp
+ << payloadFieldName
+ << "' which is an unknown operator: {"
+ << curOp
<< "}");
}
@@ -128,7 +131,9 @@ Status ModifierBit::init(const BSONElement& modExpr, const Options& opts, bool*
return Status(ErrorCodes::BadValue,
str::stream()
<< "The $bit modifier field must be an Integer(32/64 bit); a '"
- << typeName(curOp.type()) << "' is not supported here: {" << curOp
+ << typeName(curOp.type())
+ << "' is not supported here: {"
+ << curOp
<< "}");
const OpEntry entry = {SafeNum(curOp), op};
@@ -191,7 +196,8 @@ Status ModifierBit::prepare(mutablebson::Element root,
mb::Element idElem = mb::findElementNamed(root.leftChild(), "_id");
return Status(ErrorCodes::BadValue,
str::stream() << "Cannot apply $bit to a value of non-integral type."
- << idElem.toString() << " has the field "
+ << idElem.toString()
+ << " has the field "
<< _preparedState->elemFound.getFieldName()
<< " of non-integer type "
<< typeName(_preparedState->elemFound.getType()));
@@ -260,7 +266,9 @@ Status ModifierBit::log(LogBuilder* logBuilder) const {
if (!logElement.ok()) {
return Status(ErrorCodes::InternalError,
str::stream() << "Could not append entry to $bit oplog entry: "
- << "set '" << _fieldRef.dottedField() << "' -> "
+ << "set '"
+ << _fieldRef.dottedField()
+ << "' -> "
<< _preparedState->newValue.debugString());
}
return logBuilder->addToSets(logElement);
diff --git a/src/mongo/db/ops/modifier_compare.cpp b/src/mongo/db/ops/modifier_compare.cpp
index 36f800202e4..2d05d29f360 100644
--- a/src/mongo/db/ops/modifier_compare.cpp
+++ b/src/mongo/db/ops/modifier_compare.cpp
@@ -77,7 +77,8 @@ Status ModifierCompare::init(const BSONElement& modExpr, const Options& opts, bo
if (foundDollar && foundCount > 1) {
return Status(ErrorCodes::BadValue,
str::stream() << "Too many positional (i.e. '$') elements found in path '"
- << _updatePath.dottedField() << "'");
+ << _updatePath.dottedField()
+ << "'");
}
// Store value for later.
diff --git a/src/mongo/db/ops/modifier_current_date.cpp b/src/mongo/db/ops/modifier_current_date.cpp
index 75d0be014e3..cd328f5fe94 100644
--- a/src/mongo/db/ops/modifier_current_date.cpp
+++ b/src/mongo/db/ops/modifier_current_date.cpp
@@ -84,7 +84,8 @@ Status ModifierCurrentDate::init(const BSONElement& modExpr,
if (foundDollar && foundCount > 1) {
return Status(ErrorCodes::BadValue,
str::stream() << "Too many positional (i.e. '$') elements found in path '"
- << _updatePath.dottedField() << "'");
+ << _updatePath.dottedField()
+ << "'");
}
// Validate and store the type to produce
@@ -113,7 +114,8 @@ Status ModifierCurrentDate::init(const BSONElement& modExpr,
str::stream()
<< "The only valid field of the option is '$type': "
"{$currentDate: {field : {$type: 'date/timestamp'}}}; "
- << "arg: " << argObj);
+ << "arg: "
+ << argObj);
}
}
}
diff --git a/src/mongo/db/ops/modifier_inc.cpp b/src/mongo/db/ops/modifier_inc.cpp
index 8bc6e2ff9a4..314ac6a5024 100644
--- a/src/mongo/db/ops/modifier_inc.cpp
+++ b/src/mongo/db/ops/modifier_inc.cpp
@@ -89,7 +89,8 @@ Status ModifierInc::init(const BSONElement& modExpr, const Options& opts, bool*
if (foundDollar && foundCount > 1) {
return Status(ErrorCodes::BadValue,
str::stream() << "Too many positional (i.e. '$') elements found in path '"
- << _fieldRef.dottedField() << "'");
+ << _fieldRef.dottedField()
+ << "'");
}
//
@@ -101,7 +102,9 @@ Status ModifierInc::init(const BSONElement& modExpr, const Options& opts, bool*
// include mod code, etc.
return Status(ErrorCodes::TypeMismatch,
str::stream() << "Cannot " << (_mode == MODE_INC ? "increment" : "multiply")
- << " with non-numeric argument: {" << modExpr << "}");
+ << " with non-numeric argument: {"
+ << modExpr
+ << "}");
}
_val = modExpr;
@@ -172,7 +175,8 @@ Status ModifierInc::prepare(mutablebson::Element root,
mb::Element idElem = mb::findFirstChildNamed(root, "_id");
return Status(ErrorCodes::TypeMismatch,
str::stream() << "Cannot apply " << (_mode == MODE_INC ? "$inc" : "$mul")
- << " to a value of non-numeric type. {" << idElem.toString()
+ << " to a value of non-numeric type. {"
+ << idElem.toString()
<< "} has the field '"
<< _preparedState->elemFound.getFieldName()
<< "' of non-numeric type "
@@ -191,8 +195,10 @@ Status ModifierInc::prepare(mutablebson::Element root,
mb::Element idElem = mb::findFirstChildNamed(root, "_id");
return Status(ErrorCodes::BadValue,
str::stream() << "Failed to apply $inc operations to current value ("
- << currentValue.debugString() << ") for document {"
- << idElem.toString() << "}");
+ << currentValue.debugString()
+ << ") for document {"
+ << idElem.toString()
+ << "}");
}
// If the values are identical (same type, same value), then this is a no-op.
@@ -254,8 +260,11 @@ Status ModifierInc::log(LogBuilder* logBuilder) const {
if (!logElement.ok()) {
return Status(ErrorCodes::InternalError,
str::stream() << "Could not append entry to "
- << (_mode == MODE_INC ? "$inc" : "$mul") << " oplog entry: "
- << "set '" << _fieldRef.dottedField() << "' -> "
+ << (_mode == MODE_INC ? "$inc" : "$mul")
+ << " oplog entry: "
+ << "set '"
+ << _fieldRef.dottedField()
+ << "' -> "
<< _preparedState->newValue.debugString());
}
diff --git a/src/mongo/db/ops/modifier_object_replace.cpp b/src/mongo/db/ops/modifier_object_replace.cpp
index 64bec6283d7..0cecd5a0d1e 100644
--- a/src/mongo/db/ops/modifier_object_replace.cpp
+++ b/src/mongo/db/ops/modifier_object_replace.cpp
@@ -86,7 +86,8 @@ Status ModifierObjectReplace::init(const BSONElement& modExpr,
// Impossible, really since the caller check this already...
return Status(ErrorCodes::BadValue,
str::stream() << "Document replacement expects a complete document"
- " but the type supplied was " << modExpr.type());
+ " but the type supplied was "
+ << modExpr.type());
}
// Object replacements never have positional operator.
@@ -150,8 +151,10 @@ Status ModifierObjectReplace::apply() const {
if (srcIdElement.compareWithBSONElement(dstIdElement, true) != 0) {
return Status(ErrorCodes::ImmutableField,
str::stream() << "The _id field cannot be changed from {"
- << srcIdElement.toString() << "} to {"
- << dstIdElement.toString() << "}.");
+ << srcIdElement.toString()
+ << "} to {"
+ << dstIdElement.toString()
+ << "}.");
}
continue;
}
diff --git a/src/mongo/db/ops/modifier_pop.cpp b/src/mongo/db/ops/modifier_pop.cpp
index c46fdd7a9bf..07682c976bf 100644
--- a/src/mongo/db/ops/modifier_pop.cpp
+++ b/src/mongo/db/ops/modifier_pop.cpp
@@ -89,7 +89,8 @@ Status ModifierPop::init(const BSONElement& modExpr, const Options& opts, bool*
if (foundDollar && foundCount > 1) {
return Status(ErrorCodes::BadValue,
str::stream() << "Too many positional (i.e. '$') elements found in path '"
- << _fieldRef.dottedField() << "'");
+ << _fieldRef.dottedField()
+ << "'");
}
//
@@ -136,9 +137,9 @@ Status ModifierPop::prepare(mutablebson::Element root,
// array.
if (_preparedState->pathFoundElement.getType() != Array) {
mb::Element idElem = mb::findFirstChildNamed(root, "_id");
- return Status(ErrorCodes::BadValue,
- str::stream()
- << "Can only $pop from arrays. {" << idElem.toString()
+ return Status(
+ ErrorCodes::BadValue,
+ str::stream() << "Can only $pop from arrays. {" << idElem.toString()
<< "} has the field '"
<< _preparedState->pathFoundElement.getFieldName()
<< "' of non-array type "
@@ -190,7 +191,9 @@ Status ModifierPop::log(LogBuilder* logBuilder) const {
if (!logElement.ok()) {
return Status(ErrorCodes::InternalError,
str::stream() << "Could not append entry to $pop oplog entry: "
- << "set '" << _fieldRef.dottedField() << "' -> "
+ << "set '"
+ << _fieldRef.dottedField()
+ << "' -> "
<< _preparedState->pathFoundElement.toString());
}
return logBuilder->addToSets(logElement);
diff --git a/src/mongo/db/ops/modifier_pop_test.cpp b/src/mongo/db/ops/modifier_pop_test.cpp
index 06dd60a029e..8e288c73aa4 100644
--- a/src/mongo/db/ops/modifier_pop_test.cpp
+++ b/src/mongo/db/ops/modifier_pop_test.cpp
@@ -37,8 +37,8 @@
#include "mongo/bson/mutable/document.h"
#include "mongo/bson/mutable/mutable_bson_test_utils.h"
#include "mongo/db/jsobj.h"
-#include "mongo/db/ops/log_builder.h"
#include "mongo/db/json.h"
+#include "mongo/db/ops/log_builder.h"
#include "mongo/unittest/unittest.h"
namespace {
diff --git a/src/mongo/db/ops/modifier_pull.cpp b/src/mongo/db/ops/modifier_pull.cpp
index ce87c03e0f3..a172251eea6 100644
--- a/src/mongo/db/ops/modifier_pull.cpp
+++ b/src/mongo/db/ops/modifier_pull.cpp
@@ -94,7 +94,8 @@ Status ModifierPull::init(const BSONElement& modExpr, const Options& opts, bool*
if (foundDollar && foundCount > 1) {
return Status(ErrorCodes::BadValue,
str::stream() << "Too many positional (i.e. '$') elements found in path '"
- << _fieldRef.dottedField() << "'");
+ << _fieldRef.dottedField()
+ << "'");
}
_exprElt = modExpr;
diff --git a/src/mongo/db/ops/modifier_pull_all.cpp b/src/mongo/db/ops/modifier_pull_all.cpp
index 287dc4828b4..681769fd195 100644
--- a/src/mongo/db/ops/modifier_pull_all.cpp
+++ b/src/mongo/db/ops/modifier_pull_all.cpp
@@ -105,7 +105,8 @@ Status ModifierPullAll::init(const BSONElement& modExpr, const Options& opts, bo
if (foundDollar && foundCount > 1) {
return Status(ErrorCodes::BadValue,
str::stream() << "Too many positional (i.e. '$') elements found in path '"
- << _fieldRef.dottedField() << "'");
+ << _fieldRef.dottedField()
+ << "'");
}
//
@@ -155,9 +156,9 @@ Status ModifierPullAll::prepare(mutablebson::Element root,
// array.
if (_preparedState->pathFoundElement.getType() != Array) {
mb::Element idElem = mb::findElementNamed(root.leftChild(), "_id");
- return Status(ErrorCodes::BadValue,
- str::stream()
- << "Can only apply $pullAll to an array. " << idElem.toString()
+ return Status(
+ ErrorCodes::BadValue,
+ str::stream() << "Can only apply $pullAll to an array. " << idElem.toString()
<< " has the field "
<< _preparedState->pathFoundElement.getFieldName()
<< " of non-array type "
@@ -227,7 +228,9 @@ Status ModifierPullAll::log(LogBuilder* logBuilder) const {
if (!logElement.ok()) {
return Status(ErrorCodes::InternalError,
str::stream() << "Could not append entry to $pullAll oplog entry: "
- << "set '" << _fieldRef.dottedField() << "' -> "
+ << "set '"
+ << _fieldRef.dottedField()
+ << "' -> "
<< _preparedState->pathFoundElement.toString());
}
return logBuilder->addToSets(logElement);
diff --git a/src/mongo/db/ops/modifier_push.cpp b/src/mongo/db/ops/modifier_push.cpp
index bf3886cbe24..7dc0f24e712 100644
--- a/src/mongo/db/ops/modifier_push.cpp
+++ b/src/mongo/db/ops/modifier_push.cpp
@@ -104,9 +104,9 @@ Status parseEachMode(ModifierPush::ModifierPushMode pushMode,
*eachElem = modExpr.embeddedObject()[kEach];
if (eachElem->type() != Array) {
return Status(ErrorCodes::BadValue,
- str::stream()
- << "The argument to $each in $push must be"
- " an array but it was of type: " << typeName(eachElem->type()));
+ str::stream() << "The argument to $each in $push must be"
+ " an array but it was of type: "
+ << typeName(eachElem->type()));
}
// There must be only one $each clause.
@@ -149,8 +149,8 @@ Status parseEachMode(ModifierPush::ModifierPushMode pushMode,
seenPosition = true;
} else if (!mongoutils::str::equals(elem.fieldName(), kEach)) {
return Status(ErrorCodes::BadValue,
- str::stream()
- << "Unrecognized clause in $push: " << elem.fieldNameStringData());
+ str::stream() << "Unrecognized clause in $push: "
+ << elem.fieldNameStringData());
}
}
@@ -214,7 +214,8 @@ Status ModifierPush::init(const BSONElement& modExpr, const Options& opts, bool*
if (foundDollar && foundCount > 1) {
return Status(ErrorCodes::BadValue,
str::stream() << "Too many positional (i.e. '$') elements found in path '"
- << _fieldRef.dottedField() << "'");
+ << _fieldRef.dottedField()
+ << "'");
}
//
@@ -264,7 +265,8 @@ Status ModifierPush::init(const BSONElement& modExpr, const Options& opts, bool*
if (_pushMode == PUSH_ALL) {
return Status(ErrorCodes::BadValue,
str::stream() << "$pushAll requires an array of values "
- "but was given type: " << typeName(modExpr.type()));
+ "but was given type: "
+ << typeName(modExpr.type()));
}
_val = modExpr;
@@ -379,9 +381,9 @@ Status ModifierPush::init(const BSONElement& modExpr, const Options& opts, bool*
for (size_t i = 0; i < sortField.numParts(); i++) {
if (sortField.getPart(i).size() == 0) {
return Status(ErrorCodes::BadValue,
- str::stream()
- << "The $sort field is a dotted field "
- "but has an empty part: " << sortField.dottedField());
+ str::stream() << "The $sort field is a dotted field "
+ "but has an empty part: "
+ << sortField.dottedField());
}
}
}
@@ -442,7 +444,9 @@ Status ModifierPush::prepare(mutablebson::Element root,
str::stream() << "The field '" << _fieldRef.dottedField() << "'"
<< " must be an array but is of type "
<< typeName(_preparedState->elemFound.getType())
- << " in document {" << idElem.toString() << "}");
+ << " in document {"
+ << idElem.toString()
+ << "}");
}
} else {
return status;
@@ -477,7 +481,8 @@ Status pushFirstElement(mb::Element& arrayElem,
if (!fromElem.ok()) {
return Status(ErrorCodes::InvalidLength,
str::stream() << "The specified position (" << appendPos << "/" << pos
- << ") is invalid based on the length ( " << arraySize
+ << ") is invalid based on the length ( "
+ << arraySize
<< ") of the array");
}
diff --git a/src/mongo/db/ops/modifier_push_sorter.h b/src/mongo/db/ops/modifier_push_sorter.h
index c942f4e5da3..6d795ec372d 100644
--- a/src/mongo/db/ops/modifier_push_sorter.h
+++ b/src/mongo/db/ops/modifier_push_sorter.h
@@ -28,9 +28,9 @@
#pragma once
-#include "mongo/db/jsobj.h"
#include "mongo/bson/mutable/document.h"
#include "mongo/bson/mutable/element.h"
+#include "mongo/db/jsobj.h"
namespace mongo {
diff --git a/src/mongo/db/ops/modifier_push_test.cpp b/src/mongo/db/ops/modifier_push_test.cpp
index e148ce5f6ef..6e37fc24d74 100644
--- a/src/mongo/db/ops/modifier_push_test.cpp
+++ b/src/mongo/db/ops/modifier_push_test.cpp
@@ -35,10 +35,10 @@
#include "mongo/base/status.h"
#include "mongo/base/string_data.h"
-#include "mongo/bson/ordering.h"
#include "mongo/bson/mutable/algorithm.h"
#include "mongo/bson/mutable/document.h"
#include "mongo/bson/mutable/mutable_bson_test_utils.h"
+#include "mongo/bson/ordering.h"
#include "mongo/db/jsobj.h"
#include "mongo/db/json.h"
#include "mongo/db/ops/log_builder.h"
@@ -659,13 +659,13 @@ TEST(SimpleObjMod, PrepareApplyNormal) {
}
TEST(SimpleObjMod, PrepareApplyDotted) {
- Document doc(fromjson(
- "{ _id : 1 , "
- " question : 'a', "
- " choices : { "
- " first : { choice : 'b' }, "
- " second : { choice : 'c' } }"
- "}"));
+ Document doc(
+ fromjson("{ _id : 1 , "
+ " question : 'a', "
+ " choices : { "
+ " first : { choice : 'b' }, "
+ " second : { choice : 'c' } }"
+ "}"));
Mod pushMod(fromjson("{$push: {'choices.first.votes': 1}}"));
ModifierInterface::ExecInfo execInfo;
@@ -676,13 +676,12 @@ TEST(SimpleObjMod, PrepareApplyDotted) {
ASSERT_OK(pushMod.apply());
ASSERT_FALSE(doc.isInPlaceModeEnabled());
- ASSERT_EQUALS(fromjson(
- "{ _id : 1 , "
- " question : 'a', "
- " choices : { "
- " first : { choice : 'b', votes: [1]}, "
- " second : { choice : 'c' } }"
- "}"),
+ ASSERT_EQUALS(fromjson("{ _id : 1 , "
+ " question : 'a', "
+ " choices : { "
+ " first : { choice : 'b', votes: [1]}, "
+ " second : { choice : 'c' } }"
+ "}"),
doc);
Document logDoc;
@@ -1059,8 +1058,9 @@ public:
arrBuilder.append(*it);
}
- _modObj = BSON("$push" << BSON("a" << BSON("$each" << arrBuilder.arr() << "$slice" << slice
- << "$sort" << sort)));
+ _modObj = BSON(
+ "$push" << BSON(
+ "a" << BSON("$each" << arrBuilder.arr() << "$slice" << slice << "$sort" << sort)));
ASSERT_OK(_mod.init(_modObj["$push"].embeddedObject().firstElement(),
ModifierInterface::Options::normal()));
diff --git a/src/mongo/db/ops/modifier_rename.cpp b/src/mongo/db/ops/modifier_rename.cpp
index 26dc2b23df9..c2935fb1906 100644
--- a/src/mongo/db/ops/modifier_rename.cpp
+++ b/src/mongo/db/ops/modifier_rename.cpp
@@ -29,8 +29,8 @@
#include "mongo/db/ops/modifier_rename.h"
#include "mongo/base/error_codes.h"
-#include "mongo/bson/mutable/document.h"
#include "mongo/bson/mutable/algorithm.h"
+#include "mongo/bson/mutable/document.h"
#include "mongo/db/ops/field_checker.h"
#include "mongo/db/ops/log_builder.h"
#include "mongo/db/ops/path_support.h"
@@ -95,15 +95,16 @@ Status ModifierRename::init(const BSONElement& modExpr, const Options& opts, boo
// Old restriction is that if the fields are the same then it is not allowed.
if (_fromFieldRef == _toFieldRef)
return Status(ErrorCodes::BadValue,
- str::stream()
- << "The source and target field for $rename must differ: " << modExpr);
+ str::stream() << "The source and target field for $rename must differ: "
+ << modExpr);
// TODO: Remove this restriction by allowing moving deeping from the 'from' path
// Old restriction is that if the to/from is on the same path it fails
if (_fromFieldRef.isPrefixOf(_toFieldRef) || _toFieldRef.isPrefixOf(_fromFieldRef)) {
return Status(ErrorCodes::BadValue,
str::stream() << "The source and target field for $rename must "
- "not be on the same path: " << modExpr);
+ "not be on the same path: "
+ << modExpr);
}
// TODO: We can remove this restriction as long as there is only one,
// or it is the same array -- should think on this a bit.
@@ -161,9 +162,11 @@ Status ModifierRename::prepare(mutablebson::Element root,
if (curr.getType() == Array)
return Status(ErrorCodes::BadValue,
str::stream() << "The source field cannot be an array element, '"
- << _fromFieldRef.dottedField() << "' in doc with "
+ << _fromFieldRef.dottedField()
+ << "' in doc with "
<< findElementNamed(root.leftChild(), "_id").toString()
- << " has an array field called '" << curr.getFieldName()
+ << " has an array field called '"
+ << curr.getFieldName()
<< "'");
curr = curr.parent();
}
@@ -191,9 +194,11 @@ Status ModifierRename::prepare(mutablebson::Element root,
if (curr.getType() == Array)
return Status(ErrorCodes::BadValue,
str::stream() << "The destination field cannot be an array element, '"
- << _fromFieldRef.dottedField() << "' in doc with "
+ << _fromFieldRef.dottedField()
+ << "' in doc with "
<< findElementNamed(root.leftChild(), "_id").toString()
- << " has an array field called '" << curr.getFieldName()
+ << " has an array field called '"
+ << curr.getFieldName()
<< "'");
curr = curr.parent();
}
diff --git a/src/mongo/db/ops/modifier_set.cpp b/src/mongo/db/ops/modifier_set.cpp
index c6966fae079..59f59c555ce 100644
--- a/src/mongo/db/ops/modifier_set.cpp
+++ b/src/mongo/db/ops/modifier_set.cpp
@@ -88,7 +88,8 @@ Status ModifierSet::init(const BSONElement& modExpr, const Options& opts, bool*
if (foundDollar && foundCount > 1) {
return Status(ErrorCodes::BadValue,
str::stream() << "Too many positional (i.e. '$') elements found in path '"
- << _fieldRef.dottedField() << "'");
+ << _fieldRef.dottedField()
+ << "'");
}
//
diff --git a/src/mongo/db/ops/modifier_unset.cpp b/src/mongo/db/ops/modifier_unset.cpp
index 673cbdb8d16..453b2d60d1c 100644
--- a/src/mongo/db/ops/modifier_unset.cpp
+++ b/src/mongo/db/ops/modifier_unset.cpp
@@ -83,7 +83,8 @@ Status ModifierUnset::init(const BSONElement& modExpr, const Options& opts, bool
if (foundDollar && foundCount > 1) {
return Status(ErrorCodes::BadValue,
str::stream() << "Too many positional (i.e. '$') elements found in path '"
- << _fieldRef.dottedField() << "'");
+ << _fieldRef.dottedField()
+ << "'");
}
diff --git a/src/mongo/db/ops/parsed_delete.cpp b/src/mongo/db/ops/parsed_delete.cpp
index 4e027390a79..b2f723e455e 100644
--- a/src/mongo/db/ops/parsed_delete.cpp
+++ b/src/mongo/db/ops/parsed_delete.cpp
@@ -35,8 +35,8 @@
#include "mongo/db/catalog/collection.h"
#include "mongo/db/catalog/database.h"
#include "mongo/db/exec/delete.h"
-#include "mongo/db/ops/delete_request.h"
#include "mongo/db/matcher/extensions_callback_real.h"
+#include "mongo/db/ops/delete_request.h"
#include "mongo/db/query/canonical_query.h"
#include "mongo/db/query/get_executor.h"
#include "mongo/db/query/query_planner_common.h"
diff --git a/src/mongo/db/ops/parsed_update.h b/src/mongo/db/ops/parsed_update.h
index c9bb03edf9a..eabef19b483 100644
--- a/src/mongo/db/ops/parsed_update.h
+++ b/src/mongo/db/ops/parsed_update.h
@@ -30,9 +30,9 @@
#include "mongo/base/disallow_copying.h"
#include "mongo/base/status.h"
+#include "mongo/db/ops/update_driver.h"
#include "mongo/db/query/collation/collator_interface.h"
#include "mongo/db/query/plan_executor.h"
-#include "mongo/db/ops/update_driver.h"
namespace mongo {
diff --git a/src/mongo/db/ops/path_support.cpp b/src/mongo/db/ops/path_support.cpp
index e430c7d57cc..a95b700acd0 100644
--- a/src/mongo/db/ops/path_support.cpp
+++ b/src/mongo/db/ops/path_support.cpp
@@ -66,7 +66,8 @@ Status maybePadTo(mutablebson::Element* elemArray, size_t sizeRequired) {
if (toPad > kMaxPaddingAllowed) {
return Status(ErrorCodes::CannotBackfillArray,
mongoutils::str::stream() << "can't backfill more than "
- << kMaxPaddingAllowed << " elements");
+ << kMaxPaddingAllowed
+ << " elements");
}
for (size_t i = 0; i < toPad; i++) {
@@ -139,8 +140,10 @@ Status findLongestPrefix(const FieldRef& prefix,
*elemFound = prev;
return Status(ErrorCodes::PathNotViable,
mongoutils::str::stream() << "cannot use the part (" << prefix.getPart(i - 1)
- << " of " << prefix.dottedField()
- << ") to traverse the element ({" << curr.toString()
+ << " of "
+ << prefix.dottedField()
+ << ") to traverse the element ({"
+ << curr.toString()
<< "})");
} else if (curr.ok()) {
*idxFound = i - 1;
diff --git a/src/mongo/db/ops/path_support_test.cpp b/src/mongo/db/ops/path_support_test.cpp
index 01345564ccd..f1de975850f 100644
--- a/src/mongo/db/ops/path_support_test.cpp
+++ b/src/mongo/db/ops/path_support_test.cpp
@@ -537,7 +537,9 @@ static void assertContains(const EqualityMatches& equalities, const BSONObj& wra
}
if (!it->second->getData().valuesEqual(value)) {
FAIL(stream() << "Equality match at path \"" << path << "\" contains value "
- << it->second->getData() << ", not value " << value);
+ << it->second->getData()
+ << ", not value "
+ << value);
}
}
@@ -827,12 +829,17 @@ static void assertParent(const EqualityMatches& equalities,
StringData foundParentPath = path.dottedSubstring(0, parentPathPart);
if (foundParentPath != parentPath) {
FAIL(stream() << "Equality match parent at path \"" << foundParentPath
- << "\" does not match \"" << parentPath << "\"");
+ << "\" does not match \""
+ << parentPath
+ << "\"");
}
if (!parentEl.valuesEqual(value)) {
FAIL(stream() << "Equality match parent for \"" << pathStr << "\" at path \"" << parentPath
- << "\" contains value " << parentEl << ", not value " << value);
+ << "\" contains value "
+ << parentEl
+ << ", not value "
+ << value);
}
}
@@ -852,7 +859,8 @@ static void assertNoParent(const EqualityMatches& equalities, StringData pathStr
if (!parentEl.eoo()) {
StringData foundParentPath = path.dottedSubstring(0, parentPathPart);
FAIL(stream() << "Equality matches contained parent for \"" << pathStr << "\" at \""
- << foundParentPath << "\"");
+ << foundParentPath
+ << "\"");
}
}
diff --git a/src/mongo/db/ops/update.cpp b/src/mongo/db/ops/update.cpp
index d5b83ae666b..c808d9aab34 100644
--- a/src/mongo/db/ops/update.cpp
+++ b/src/mongo/db/ops/update.cpp
@@ -96,7 +96,8 @@ UpdateResult update(OperationContext* txn, Database* db, const UpdateRequest& re
if (userInitiatedWritesAndNotPrimary) {
uassertStatusOK(Status(ErrorCodes::NotMaster,
str::stream() << "Not primary while creating collection "
- << nsString.ns() << " during upsert"));
+ << nsString.ns()
+ << " during upsert"));
}
WriteUnitOfWork wuow(txn);
collection = db->createCollection(txn, nsString.ns(), CollectionOptions());
diff --git a/src/mongo/db/ops/update.h b/src/mongo/db/ops/update.h
index ff21054e25a..8ff64538a9d 100644
--- a/src/mongo/db/ops/update.h
+++ b/src/mongo/db/ops/update.h
@@ -30,8 +30,8 @@
#pragma once
-#include "mongo/db/jsobj.h"
#include "mongo/db/curop.h"
+#include "mongo/db/jsobj.h"
#include "mongo/db/ops/update_request.h"
#include "mongo/db/ops/update_result.h"
diff --git a/src/mongo/db/ops/update_driver.cpp b/src/mongo/db/ops/update_driver.cpp
index 19abae7b96f..fad283493cd 100644
--- a/src/mongo/db/ops/update_driver.cpp
+++ b/src/mongo/db/ops/update_driver.cpp
@@ -112,7 +112,9 @@ Status UpdateDriver::parse(const BSONObj& updateExpr, const bool multi) {
str::stream() << "Modifiers operate on fields but we found type "
<< typeName(outerModElem.type())
<< " instead. For example: {$mod: {<field>: ...}}"
- << " not {" << outerModElem.toString() << "}");
+ << " not {"
+ << outerModElem.toString()
+ << "}");
}
// Check whether there are indeed mods under this modifier.
@@ -120,7 +122,9 @@ Status UpdateDriver::parse(const BSONObj& updateExpr, const bool multi) {
return Status(ErrorCodes::FailedToParse,
str::stream() << "'" << outerModElem.fieldName()
<< "' is empty. You must specify a field like so: "
- "{" << outerModElem.fieldName() << ": {<field>: ...}}");
+ "{"
+ << outerModElem.fieldName()
+ << ": {<field>: ...}}");
}
BSONObjIterator innerIter(outerModElem.embeddedObject());
@@ -146,7 +150,9 @@ inline Status UpdateDriver::addAndParse(const modifiertable::ModifierType type,
if (elem.eoo()) {
return Status(ErrorCodes::FailedToParse,
str::stream() << "'" << elem.fieldName() << "' has no value in : " << elem
- << " which is not allowed for any $" << type << " mod.");
+ << " which is not allowed for any $"
+ << type
+ << " mod.");
}
unique_ptr<ModifierInterface> mod(modifiertable::makeUpdateMod(type));
@@ -275,7 +281,8 @@ Status UpdateDriver::update(StringData matchedField,
if (!targetFields->insert(execInfo.fieldRef[i], &other)) {
return Status(ErrorCodes::ConflictingUpdateOperators,
str::stream() << "Cannot update '" << other->dottedField()
- << "' and '" << execInfo.fieldRef[i]->dottedField()
+ << "' and '"
+ << execInfo.fieldRef[i]->dottedField()
<< "' at the same time");
}
@@ -371,7 +378,8 @@ BSONObj UpdateDriver::makeOplogEntryQuery(const BSONObj& doc, bool multi) const
} else {
uassert(16980,
str::stream() << "Multi-update operations require all documents to "
- "have an '_id' field. " << doc.toString(false, false),
+ "have an '_id' field. "
+ << doc.toString(false, false),
!multi);
return doc;
}
diff --git a/src/mongo/db/ops/update_lifecycle_impl.cpp b/src/mongo/db/ops/update_lifecycle_impl.cpp
index a5202948963..8f714519ea1 100644
--- a/src/mongo/db/ops/update_lifecycle_impl.cpp
+++ b/src/mongo/db/ops/update_lifecycle_impl.cpp
@@ -30,10 +30,10 @@
#include "mongo/db/ops/update_lifecycle_impl.h"
-#include "mongo/db/client.h"
+#include "mongo/db/catalog/collection.h"
#include "mongo/db/catalog/database.h"
+#include "mongo/db/client.h"
#include "mongo/db/field_ref.h"
-#include "mongo/db/catalog/collection.h"
#include "mongo/db/s/collection_metadata.h"
#include "mongo/db/s/sharding_state.h"
diff --git a/src/mongo/db/ops/update_request.h b/src/mongo/db/ops/update_request.h
index 731c1195606..f6aa0e31d10 100644
--- a/src/mongo/db/ops/update_request.h
+++ b/src/mongo/db/ops/update_request.h
@@ -28,8 +28,8 @@
#pragma once
-#include "mongo/db/jsobj.h"
#include "mongo/db/curop.h"
+#include "mongo/db/jsobj.h"
#include "mongo/db/namespace_string.h"
#include "mongo/db/query/explain.h"
#include "mongo/util/mongoutils/str.h"
diff --git a/src/mongo/db/ops/update_result.h b/src/mongo/db/ops/update_result.h
index 9c1c27c5a93..2c3107e3ea1 100644
--- a/src/mongo/db/ops/update_result.h
+++ b/src/mongo/db/ops/update_result.h
@@ -28,8 +28,8 @@
#pragma once
-#include "mongo/db/jsobj.h"
#include "mongo/db/curop.h"
+#include "mongo/db/jsobj.h"
#include "mongo/db/namespace_string.h"
#include "mongo/util/mongoutils/str.h"
diff --git a/src/mongo/db/ops/write_ops_exec.cpp b/src/mongo/db/ops/write_ops_exec.cpp
index 82e712b39ab..5371736b7bd 100644
--- a/src/mongo/db/ops/write_ops_exec.cpp
+++ b/src/mongo/db/ops/write_ops_exec.cpp
@@ -209,8 +209,8 @@ bool handleError(OperationContext* txn,
<< demangleName(typeid(ex)));
}
- ShardingState::get(txn)
- ->onStaleShardVersion(txn, wholeOp.ns, staleConfigException->getVersionReceived());
+ ShardingState::get(txn)->onStaleShardVersion(
+ txn, wholeOp.ns, staleConfigException->getVersionReceived());
out->staleConfigException =
stdx::make_unique<SendStaleConfigException>(*staleConfigException);
return false;
@@ -230,7 +230,8 @@ static WriteResult::SingleResult createIndex(OperationContext* txn,
uassert(ErrorCodes::TypeMismatch,
str::stream() << "Expected \"ns\" field of index description to be a "
"string, "
- "but found a " << typeName(nsElement.type()),
+ "but found a "
+ << typeName(nsElement.type()),
nsElement.type() == String);
const NamespaceString ns(nsElement.valueStringData());
uassert(ErrorCodes::InvalidOptions,
diff --git a/src/mongo/db/ops/write_ops_parsers.cpp b/src/mongo/db/ops/write_ops_parsers.cpp
index 1eb3e8693d2..a4ade410010 100644
--- a/src/mongo/db/ops/write_ops_parsers.cpp
+++ b/src/mongo/db/ops/write_ops_parsers.cpp
@@ -50,22 +50,31 @@ void checkTypeInArray(BSONType expectedType,
const BSONElement& arrayElem) {
uassert(ErrorCodes::TypeMismatch,
str::stream() << "Wrong type for " << arrayElem.fieldNameStringData() << '['
- << elem.fieldNameStringData() << "]. Expected a "
- << typeName(expectedType) << ", got a " << typeName(elem.type()) << '.',
+ << elem.fieldNameStringData()
+ << "]. Expected a "
+ << typeName(expectedType)
+ << ", got a "
+ << typeName(elem.type())
+ << '.',
elem.type() == expectedType);
}
void checkType(BSONType expectedType, const BSONElement& elem) {
uassert(ErrorCodes::TypeMismatch,
str::stream() << "Wrong type for '" << elem.fieldNameStringData() << "'. Expected a "
- << typeName(expectedType) << ", got a " << typeName(elem.type()) << '.',
+ << typeName(expectedType)
+ << ", got a "
+ << typeName(elem.type())
+ << '.',
elem.type() == expectedType);
}
void checkOpCountForCommand(size_t numOps) {
uassert(ErrorCodes::InvalidLength,
str::stream() << "Write batch sizes must be between 1 and " << kMaxWriteBatchSize
- << ". Got " << numOps << " operations.",
+ << ". Got "
+ << numOps
+ << " operations.",
numOps != 0 && numOps <= kMaxWriteBatchSize);
}
@@ -108,7 +117,8 @@ void parseWriteCommand(StringData dbName,
"writeConcern", "maxTimeMS", "shardVersion"};
uassert(ErrorCodes::FailedToParse,
str::stream() << "Unknown option to " << cmd.firstElementFieldName()
- << " command: " << fieldName,
+ << " command: "
+ << fieldName,
std::find(ignoredFields.begin(), ignoredFields.end(), fieldName) !=
ignoredFields.end());
}
@@ -116,7 +126,8 @@ void parseWriteCommand(StringData dbName,
uassert(ErrorCodes::FailedToParse,
str::stream() << "The " << uniqueFieldName << " option is required to the "
- << cmd.firstElementFieldName() << " command.",
+ << cmd.firstElementFieldName()
+ << " command.",
haveUniqueField);
}
}
diff --git a/src/mongo/db/ops/write_ops_parsers.h b/src/mongo/db/ops/write_ops_parsers.h
index 526703fadd3..376fac81874 100644
--- a/src/mongo/db/ops/write_ops_parsers.h
+++ b/src/mongo/db/ops/write_ops_parsers.h
@@ -29,8 +29,8 @@
#pragma once
#include "mongo/db/jsobj.h"
-#include "mongo/util/net/message.h"
#include "mongo/db/ops/write_ops.h"
+#include "mongo/util/net/message.h"
namespace mongo {
diff --git a/src/mongo/db/ops/write_ops_parsers_test.cpp b/src/mongo/db/ops/write_ops_parsers_test.cpp
index 252479b4915..78eb0212609 100644
--- a/src/mongo/db/ops/write_ops_parsers_test.cpp
+++ b/src/mongo/db/ops/write_ops_parsers_test.cpp
@@ -39,7 +39,9 @@ TEST(CommandWriteOpsParsers, CommonFields_BypassDocumentValidation) {
for (BSONElement bypassDocumentValidation : BSON_ARRAY(true << false << 1 << 0 << 1.0 << 0.0)) {
auto cmd = BSON("insert"
<< "bar"
- << "documents" << BSON_ARRAY(BSONObj()) << "bypassDocumentValidation"
+ << "documents"
+ << BSON_ARRAY(BSONObj())
+ << "bypassDocumentValidation"
<< bypassDocumentValidation);
auto op = parseInsertCommand("foo", cmd);
ASSERT_EQ(op.bypassDocumentValidation, shouldBypassDocumentValidationForCommand(cmd));
@@ -50,7 +52,10 @@ TEST(CommandWriteOpsParsers, CommonFields_Ordered) {
for (bool ordered : {true, false}) {
auto cmd = BSON("insert"
<< "bar"
- << "documents" << BSON_ARRAY(BSONObj()) << "ordered" << ordered);
+ << "documents"
+ << BSON_ARRAY(BSONObj())
+ << "ordered"
+ << ordered);
auto op = parseInsertCommand("foo", cmd);
ASSERT_EQ(op.continueOnError, !ordered);
}
@@ -60,45 +65,55 @@ TEST(CommandWriteOpsParsers, CommonFields_IgnoredFields) {
// These flags are ignored, so there is nothing to check other than that this doesn't throw.
auto cmd = BSON("insert"
<< "bar"
- << "documents" << BSON_ARRAY(BSONObj()) << "maxTimeMS" << 1000 << "shardVersion"
- << BSONObj() << "writeConcern" << BSONObj());
+ << "documents"
+ << BSON_ARRAY(BSONObj())
+ << "maxTimeMS"
+ << 1000
+ << "shardVersion"
+ << BSONObj()
+ << "writeConcern"
+ << BSONObj());
parseInsertCommand("foo", cmd);
}
TEST(CommandWriteOpsParsers, GarbageFieldsAtTopLevel) {
auto cmd = BSON("insert"
<< "bar"
- << "documents" << BSON_ARRAY(BSONObj()) << "GARBAGE" << 1);
+ << "documents"
+ << BSON_ARRAY(BSONObj())
+ << "GARBAGE"
+ << 1);
ASSERT_THROWS_CODE(parseInsertCommand("foo", cmd), UserException, ErrorCodes::FailedToParse);
}
TEST(CommandWriteOpsParsers, GarbageFieldsInUpdateDoc) {
- auto cmd =
- BSON("update"
- << "bar"
- << "updates" << BSON_ARRAY("q" << BSONObj() << "u" << BSONObj() << "GARBAGE" << 1));
+ auto cmd = BSON("update"
+ << "bar"
+ << "updates"
+ << BSON_ARRAY("q" << BSONObj() << "u" << BSONObj() << "GARBAGE" << 1));
ASSERT_THROWS_CODE(parseInsertCommand("foo", cmd), UserException, ErrorCodes::FailedToParse);
}
TEST(CommandWriteOpsParsers, GarbageFieldsInDeleteDoc) {
auto cmd = BSON("delete"
<< "bar"
- << "deletes" << BSON_ARRAY("q" << BSONObj() << "limit" << 0 << "GARBAGE" << 1));
+ << "deletes"
+ << BSON_ARRAY("q" << BSONObj() << "limit" << 0 << "GARBAGE" << 1));
}
TEST(CommandWriteOpsParsers, BadCollationFieldInUpdateDoc) {
- auto cmd =
- BSON("update"
- << "bar"
- << "updates" << BSON_ARRAY("q" << BSONObj() << "u" << BSONObj() << "collation" << 1));
+ auto cmd = BSON("update"
+ << "bar"
+ << "updates"
+ << BSON_ARRAY("q" << BSONObj() << "u" << BSONObj() << "collation" << 1));
ASSERT_THROWS_CODE(parseInsertCommand("foo", cmd), UserException, ErrorCodes::FailedToParse);
}
TEST(CommandWriteOpsParsers, BadCollationFieldInDeleteDoc) {
- auto cmd =
- BSON("delete"
- << "bar"
- << "deletes" << BSON_ARRAY("q" << BSONObj() << "limit" << 0 << "collation" << 1));
+ auto cmd = BSON("delete"
+ << "bar"
+ << "deletes"
+ << BSON_ARRAY("q" << BSONObj() << "limit" << 0 << "collation" << 1));
ASSERT_THROWS_CODE(parseInsertCommand("foo", cmd), UserException, ErrorCodes::FailedToParse);
}
@@ -144,8 +159,11 @@ TEST(CommandWriteOpsParsers, Update) {
for (bool multi : {false, true}) {
auto cmd = BSON("update" << ns.coll() << "updates"
<< BSON_ARRAY(BSON("q" << query << "u" << update << "collation"
- << collation << "upsert" << upsert
- << "multi" << multi)));
+ << collation
+ << "upsert"
+ << upsert
+ << "multi"
+ << multi)));
auto op = parseUpdateCommand(ns.db(), cmd);
ASSERT_EQ(op.ns.ns(), ns.ns());
ASSERT(!op.bypassDocumentValidation);
@@ -166,9 +184,10 @@ TEST(CommandWriteOpsParsers, Remove) {
const BSONObj collation = BSON("locale"
<< "en_US");
for (bool multi : {false, true}) {
- auto cmd = BSON("delete" << ns.coll() << "deletes"
- << BSON_ARRAY(BSON("q" << query << "collation" << collation
- << "limit" << (multi ? 0 : 1))));
+ auto cmd =
+ BSON("delete" << ns.coll() << "deletes"
+ << BSON_ARRAY(BSON("q" << query << "collation" << collation << "limit"
+ << (multi ? 0 : 1))));
auto op = parseDeleteCommand(ns.db(), cmd);
ASSERT_EQ(op.ns.ns(), ns.ns());
ASSERT(!op.bypassDocumentValidation);
@@ -185,7 +204,8 @@ TEST(CommandWriteOpsParsers, RemoveErrorsWithBadLimit) {
for (BSONElement limit : BSON_ARRAY(-1 << 2 << 0.5)) {
auto cmd = BSON("delete"
<< "bar"
- << "deletes" << BSON_ARRAY("q" << BSONObj() << "limit" << limit));
+ << "deletes"
+ << BSON_ARRAY("q" << BSONObj() << "limit" << limit));
ASSERT_THROWS_CODE(
parseInsertCommand("foo", cmd), UserException, ErrorCodes::FailedToParse);
}
diff --git a/src/mongo/db/pipeline/accumulator.cpp b/src/mongo/db/pipeline/accumulator.cpp
index f5b21798f5a..b2fc55a455e 100644
--- a/src/mongo/db/pipeline/accumulator.cpp
+++ b/src/mongo/db/pipeline/accumulator.cpp
@@ -32,8 +32,8 @@
#include "mongo/db/pipeline/accumulator.h"
#include "mongo/db/pipeline/value.h"
-#include "mongo/util/string_map.h"
#include "mongo/util/mongoutils/str.h"
+#include "mongo/util/string_map.h"
namespace mongo {
diff --git a/src/mongo/db/pipeline/accumulator.h b/src/mongo/db/pipeline/accumulator.h
index afcce43669e..7dd604b82b4 100644
--- a/src/mongo/db/pipeline/accumulator.h
+++ b/src/mongo/db/pipeline/accumulator.h
@@ -56,7 +56,7 @@ namespace mongo {
class Accumulator : public RefCountable {
public:
- using Factory = boost::intrusive_ptr<Accumulator>(*)();
+ using Factory = boost::intrusive_ptr<Accumulator> (*)();
Accumulator() = default;
diff --git a/src/mongo/db/pipeline/document_internal.h b/src/mongo/db/pipeline/document_internal.h
index 78f4dac02da..3ad0d1ee577 100644
--- a/src/mongo/db/pipeline/document_internal.h
+++ b/src/mongo/db/pipeline/document_internal.h
@@ -30,11 +30,11 @@
#include <third_party/murmurhash3/MurmurHash3.h>
-#include <boost/intrusive_ptr.hpp>
#include <bitset>
+#include <boost/intrusive_ptr.hpp>
-#include "mongo/util/intrusive_counter.h"
#include "mongo/db/pipeline/value.h"
+#include "mongo/util/intrusive_counter.h"
namespace mongo {
/** Helper class to make the position in a document abstract
diff --git a/src/mongo/db/pipeline/document_source.h b/src/mongo/db/pipeline/document_source.h
index a2a534e7aca..6fc931c09f4 100644
--- a/src/mongo/db/pipeline/document_source.h
+++ b/src/mongo/db/pipeline/document_source.h
@@ -44,11 +44,11 @@
#include "mongo/db/jsobj.h"
#include "mongo/db/matcher/matcher.h"
#include "mongo/db/pipeline/accumulator.h"
-#include "mongo/db/pipeline/lookup_set_cache.h"
#include "mongo/db/pipeline/dependencies.h"
#include "mongo/db/pipeline/document.h"
#include "mongo/db/pipeline/expression.h"
#include "mongo/db/pipeline/expression_context.h"
+#include "mongo/db/pipeline/lookup_set_cache.h"
#include "mongo/db/pipeline/pipeline.h"
#include "mongo/db/pipeline/value.h"
#include "mongo/db/sorter/sorter.h"
diff --git a/src/mongo/db/pipeline/document_source_cursor.cpp b/src/mongo/db/pipeline/document_source_cursor.cpp
index 183ea58fda7..68cf11ba20e 100644
--- a/src/mongo/db/pipeline/document_source_cursor.cpp
+++ b/src/mongo/db/pipeline/document_source_cursor.cpp
@@ -125,10 +125,10 @@ void DocumentSourceCursor::loadBatch() {
<< WorkingSetCommon::toStatusString(obj),
state != PlanExecutor::DEAD);
- uassert(
- 17285,
- str::stream() << "cursor encountered an error: " << WorkingSetCommon::toStatusString(obj),
- state != PlanExecutor::FAILURE);
+ uassert(17285,
+ str::stream() << "cursor encountered an error: "
+ << WorkingSetCommon::toStatusString(obj),
+ state != PlanExecutor::FAILURE);
massert(17286,
str::stream() << "Unexpected return from PlanExecutor::getNext: " << state,
diff --git a/src/mongo/db/pipeline/document_source_geo_near.cpp b/src/mongo/db/pipeline/document_source_geo_near.cpp
index da30c2adaa3..35133d6a400 100644
--- a/src/mongo/db/pipeline/document_source_geo_near.cpp
+++ b/src/mongo/db/pipeline/document_source_geo_near.cpp
@@ -30,8 +30,8 @@
#include "mongo/platform/basic.h"
-#include "mongo/db/pipeline/document_source.h"
#include "mongo/db/pipeline/document.h"
+#include "mongo/db/pipeline/document_source.h"
#include "mongo/util/log.h"
namespace mongo {
diff --git a/src/mongo/db/pipeline/document_source_graph_lookup.cpp b/src/mongo/db/pipeline/document_source_graph_lookup.cpp
index f2821b9107c..3c5fccf47bd 100644
--- a/src/mongo/db/pipeline/document_source_graph_lookup.cpp
+++ b/src/mongo/db/pipeline/document_source_graph_lookup.cpp
@@ -358,9 +358,12 @@ void DocumentSourceGraphLookUp::checkMemoryUsage() {
void DocumentSourceGraphLookUp::serializeToArray(std::vector<Value>& array, bool explain) const {
// Serialize default options.
MutableDocument spec(DOC("from" << _from.coll() << "as" << _as.getPath(false)
- << "connectToField" << _connectToField.getPath(false)
- << "connectFromField" << _connectFromField.getPath(false)
- << "startWith" << _startWith->serialize(false)));
+ << "connectToField"
+ << _connectToField.getPath(false)
+ << "connectFromField"
+ << _connectFromField.getPath(false)
+ << "startWith"
+ << _startWith->serialize(false)));
// depthField is optional; serialize it if it was specified.
if (_depthField) {
@@ -376,7 +379,8 @@ void DocumentSourceGraphLookUp::serializeToArray(std::vector<Value>& array, bool
const boost::optional<FieldPath> indexPath = (*_unwind)->indexPath();
spec["unwinding"] =
Value(DOC("preserveNullAndEmptyArrays"
- << (*_unwind)->preserveNullAndEmptyArrays() << "includeArrayIndex"
+ << (*_unwind)->preserveNullAndEmptyArrays()
+ << "includeArrayIndex"
<< (indexPath ? Value((*indexPath).getPath(false)) : Value())));
}
@@ -432,14 +436,14 @@ intrusive_ptr<DocumentSource> DocumentSourceGraphLookUp::createFromBson(
<< typeName(argument.type()),
argument.isNumber());
maxDepth = argument.safeNumberLong();
- uassert(
- 40101,
- str::stream() << "maxDepth requires a nonnegative argument, found: " << *maxDepth,
- *maxDepth >= 0);
- uassert(
- 40102,
- str::stream() << "maxDepth could not be represented as a long long: " << *maxDepth,
- *maxDepth == argument.number());
+ uassert(40101,
+ str::stream() << "maxDepth requires a nonnegative argument, found: "
+ << *maxDepth,
+ *maxDepth >= 0);
+ uassert(40102,
+ str::stream() << "maxDepth could not be represented as a long long: "
+ << *maxDepth,
+ *maxDepth == argument.number());
continue;
}
@@ -447,8 +451,8 @@ intrusive_ptr<DocumentSource> DocumentSourceGraphLookUp::createFromBson(
argName == "depthField" || argName == "connectToField") {
// All remaining arguments to $graphLookup are expected to be strings.
uassert(40103,
- str::stream() << "expected string as argument for " << argName
- << ", found: " << argument.toString(false, false),
+ str::stream() << "expected string as argument for " << argName << ", found: "
+ << argument.toString(false, false),
argument.type() == String);
}
@@ -464,8 +468,8 @@ intrusive_ptr<DocumentSource> DocumentSourceGraphLookUp::createFromBson(
depthField = boost::optional<FieldPath>(FieldPath(argument.String()));
} else {
uasserted(40104,
- str::stream()
- << "Unknown argument to $graphLookup: " << argument.fieldName());
+ str::stream() << "Unknown argument to $graphLookup: "
+ << argument.fieldName());
}
}
diff --git a/src/mongo/db/pipeline/document_source_lookup.cpp b/src/mongo/db/pipeline/document_source_lookup.cpp
index c4e6b72947a..277ea77d0fc 100644
--- a/src/mongo/db/pipeline/document_source_lookup.cpp
+++ b/src/mongo/db/pipeline/document_source_lookup.cpp
@@ -93,9 +93,9 @@ boost::optional<Document> DocumentSourceLookUp::getNext() {
if (!_additionalFilter && _matchSrc) {
// We have internalized a $match, but have not yet computed the descended $match that should
// be applied to our queries.
- _additionalFilter = DocumentSourceMatch::descendMatchOnPath(_matchSrc->getMatchExpression(),
- _as.getPath(false),
- pExpCtx)->getQuery();
+ _additionalFilter = DocumentSourceMatch::descendMatchOnPath(
+ _matchSrc->getMatchExpression(), _as.getPath(false), pExpCtx)
+ ->getQuery();
}
if (_handlingUnwind) {
@@ -120,7 +120,8 @@ boost::optional<Document> DocumentSourceLookUp::getNext() {
objsize += result.objsize();
uassert(4568,
str::stream() << "Total size of documents in " << _fromNs.coll() << " matching "
- << query << " exceeds maximum document size",
+ << query
+ << " exceeds maximum document size",
objsize <= BSONObjMaxInternalSize);
results.push_back(Value(result));
}
@@ -224,23 +225,23 @@ Pipeline::SourceContainer::iterator DocumentSourceLookUp::optimizeAt(
}
bool isMatchOnlyOnAs = true;
- auto computeWhetherMatchOnAs =
- [&isMatchOnlyOnAs, &outputPath](MatchExpression* expression, std::string path) -> void {
- // If 'expression' is the child of a $elemMatch, we cannot internalize the $match. For
- // example, {b: {$elemMatch: {$gt: 1, $lt: 4}}}, where "b" is our "_as" field. This is
- // because there's no way to modify the expression to be a match just on 'b'--we cannot
- // change the path to an empty string, or remove the node entirely.
- if (expression->matchType() == MatchExpression::ELEM_MATCH_VALUE ||
- expression->matchType() == MatchExpression::ELEM_MATCH_OBJECT) {
- isMatchOnlyOnAs = false;
- }
- if (expression->numChildren() == 0) {
- // 'expression' is a leaf node; examine the path. It is important that 'outputPath'
- // not equal 'path', because we cannot change the expression {b: {$eq: 3}}, where
- // 'path' is 'b', to be a match on a subfield, since no subfield exists.
- isMatchOnlyOnAs = isMatchOnlyOnAs && expression::isPathPrefixOf(outputPath, path);
- }
- };
+ auto computeWhetherMatchOnAs = [&isMatchOnlyOnAs, &outputPath](MatchExpression* expression,
+ std::string path) -> void {
+ // If 'expression' is the child of a $elemMatch, we cannot internalize the $match. For
+ // example, {b: {$elemMatch: {$gt: 1, $lt: 4}}}, where "b" is our "_as" field. This is
+ // because there's no way to modify the expression to be a match just on 'b'--we cannot
+ // change the path to an empty string, or remove the node entirely.
+ if (expression->matchType() == MatchExpression::ELEM_MATCH_VALUE ||
+ expression->matchType() == MatchExpression::ELEM_MATCH_OBJECT) {
+ isMatchOnlyOnAs = false;
+ }
+ if (expression->numChildren() == 0) {
+ // 'expression' is a leaf node; examine the path. It is important that 'outputPath'
+ // not equal 'path', because we cannot change the expression {b: {$eq: 3}}, where
+ // 'path' is 'b', to be a match on a subfield, since no subfield exists.
+ isMatchOnlyOnAs = isMatchOnlyOnAs && expression::isPathPrefixOf(outputPath, path);
+ }
+ };
expression::mapOver(dependent->getMatchExpression(), computeWhetherMatchOnAs);
@@ -375,23 +376,27 @@ boost::optional<Document> DocumentSourceLookUp::unwindResult() {
void DocumentSourceLookUp::serializeToArray(std::vector<Value>& array, bool explain) const {
MutableDocument output(
DOC(getSourceName() << DOC("from" << _fromNs.coll() << "as" << _as.getPath(false)
- << "localField" << _localField.getPath(false)
- << "foreignField" << _foreignField.getPath(false))));
+ << "localField"
+ << _localField.getPath(false)
+ << "foreignField"
+ << _foreignField.getPath(false))));
if (explain) {
if (_handlingUnwind) {
const boost::optional<FieldPath> indexPath = _unwindSrc->indexPath();
output[getSourceName()]["unwinding"] =
Value(DOC("preserveNullAndEmptyArrays"
- << _unwindSrc->preserveNullAndEmptyArrays() << "includeArrayIndex"
+ << _unwindSrc->preserveNullAndEmptyArrays()
+ << "includeArrayIndex"
<< (indexPath ? Value(indexPath->getPath(false)) : Value())));
}
if (_matchSrc) {
// Our output does not have to be parseable, so include a "matching" field with the
// descended match expression.
- output[getSourceName()]["matching"] = Value(
- DocumentSourceMatch::descendMatchOnPath(
- _matchSrc->getMatchExpression(), _as.getPath(false), pExpCtx)->getQuery());
+ output[getSourceName()]["matching"] =
+ Value(DocumentSourceMatch::descendMatchOnPath(
+ _matchSrc->getMatchExpression(), _as.getPath(false), pExpCtx)
+ ->getQuery());
}
array.push_back(Value(output.freeze()));
diff --git a/src/mongo/db/pipeline/document_source_match.cpp b/src/mongo/db/pipeline/document_source_match.cpp
index 21f7dab580c..b754caa3af9 100644
--- a/src/mongo/db/pipeline/document_source_match.cpp
+++ b/src/mongo/db/pipeline/document_source_match.cpp
@@ -404,31 +404,29 @@ boost::intrusive_ptr<DocumentSourceMatch> DocumentSourceMatch::descendMatchOnPat
MatchExpression* matchExpr,
const std::string& descendOn,
intrusive_ptr<ExpressionContext> expCtx) {
- expression::mapOver(matchExpr,
- [&descendOn](MatchExpression* node, std::string path) -> void {
- // Cannot call this method on a $match including a $elemMatch.
- invariant(node->matchType() != MatchExpression::ELEM_MATCH_OBJECT &&
- node->matchType() != MatchExpression::ELEM_MATCH_VALUE);
- // Logical nodes do not have a path, but both 'leaf' and 'array' nodes
- // do.
- if (node->isLogical()) {
- return;
- }
-
- auto leafPath = node->path();
- invariant(expression::isPathPrefixOf(descendOn, leafPath));
-
- auto newPath = leafPath.substr(descendOn.size() + 1);
- if (node->isLeaf() &&
- node->matchType() != MatchExpression::TYPE_OPERATOR &&
- node->matchType() != MatchExpression::WHERE) {
- auto leafNode = static_cast<LeafMatchExpression*>(node);
- leafNode->setPath(newPath);
- } else if (node->isArray()) {
- auto arrayNode = static_cast<ArrayMatchingMatchExpression*>(node);
- arrayNode->setPath(newPath);
- }
- });
+ expression::mapOver(matchExpr, [&descendOn](MatchExpression* node, std::string path) -> void {
+ // Cannot call this method on a $match including a $elemMatch.
+ invariant(node->matchType() != MatchExpression::ELEM_MATCH_OBJECT &&
+ node->matchType() != MatchExpression::ELEM_MATCH_VALUE);
+ // Logical nodes do not have a path, but both 'leaf' and 'array' nodes
+ // do.
+ if (node->isLogical()) {
+ return;
+ }
+
+ auto leafPath = node->path();
+ invariant(expression::isPathPrefixOf(descendOn, leafPath));
+
+ auto newPath = leafPath.substr(descendOn.size() + 1);
+ if (node->isLeaf() && node->matchType() != MatchExpression::TYPE_OPERATOR &&
+ node->matchType() != MatchExpression::WHERE) {
+ auto leafNode = static_cast<LeafMatchExpression*>(node);
+ leafNode->setPath(newPath);
+ } else if (node->isArray()) {
+ auto arrayNode = static_cast<ArrayMatchingMatchExpression*>(node);
+ arrayNode->setPath(newPath);
+ }
+ });
BSONObjBuilder query;
matchExpr->serialize(&query);
@@ -479,15 +477,13 @@ DocumentSource::GetDepsReturn DocumentSourceMatch::getDependencies(DepsTracker*
}
void DocumentSourceMatch::addDependencies(DepsTracker* deps) const {
- expression::mapOver(_expression.get(),
- [deps](MatchExpression* node, std::string path) -> void {
- if (!path.empty() &&
- (node->numChildren() == 0 ||
- node->matchType() == MatchExpression::ELEM_MATCH_VALUE ||
- node->matchType() == MatchExpression::ELEM_MATCH_OBJECT)) {
- deps->fields.insert(path);
- }
- });
+ expression::mapOver(_expression.get(), [deps](MatchExpression* node, std::string path) -> void {
+ if (!path.empty() &&
+ (node->numChildren() == 0 || node->matchType() == MatchExpression::ELEM_MATCH_VALUE ||
+ node->matchType() == MatchExpression::ELEM_MATCH_OBJECT)) {
+ deps->fields.insert(path);
+ }
+ });
}
DocumentSourceMatch::DocumentSourceMatch(const BSONObj& query,
diff --git a/src/mongo/db/pipeline/document_source_merge_cursors.cpp b/src/mongo/db/pipeline/document_source_merge_cursors.cpp
index 6056a2f7646..01f11cb0c9f 100644
--- a/src/mongo/db/pipeline/document_source_merge_cursors.cpp
+++ b/src/mongo/db/pipeline/document_source_merge_cursors.cpp
@@ -84,9 +84,11 @@ intrusive_ptr<DocumentSource> DocumentSourceMergeCursors::createFromBson(
Value DocumentSourceMergeCursors::serialize(bool explain) const {
vector<Value> cursors;
for (size_t i = 0; i < _cursorDescriptors.size(); i++) {
- cursors.push_back(Value(
- DOC("host" << Value(_cursorDescriptors[i].connectionString.toString()) << "ns"
- << _cursorDescriptors[i].ns << "id" << _cursorDescriptors[i].cursorId)));
+ cursors.push_back(
+ Value(DOC("host" << Value(_cursorDescriptors[i].connectionString.toString()) << "ns"
+ << _cursorDescriptors[i].ns
+ << "id"
+ << _cursorDescriptors[i].cursorId)));
}
return Value(DOC(getSourceName() << Value(cursors)));
}
@@ -137,7 +139,8 @@ Document DocumentSourceMergeCursors::nextSafeFrom(DBClientCursor* cursor) {
const int code = next.hasField("code") ? next["code"].numberInt() : 17029;
uasserted(code,
str::stream() << "Received error in response from " << cursor->originalHost()
- << ": " << next);
+ << ": "
+ << next);
}
return Document::fromBsonWithMetaData(next);
}
diff --git a/src/mongo/db/pipeline/document_source_out.cpp b/src/mongo/db/pipeline/document_source_out.cpp
index 148f7f4fc7e..252d894ba04 100644
--- a/src/mongo/db/pipeline/document_source_out.cpp
+++ b/src/mongo/db/pipeline/document_source_out.cpp
@@ -87,7 +87,8 @@ void DocumentSourceOut::prepTempCollection() {
bool ok = conn->runCommand(_outputNs.db().toString(), cmd.done(), info);
uassert(16994,
str::stream() << "failed to create temporary $out collection '" << _tempNs.ns()
- << "': " << info.toString(),
+ << "': "
+ << info.toString(),
ok);
}
@@ -103,7 +104,10 @@ void DocumentSourceOut::prepTempCollection() {
BSONObj err = conn->getLastErrorDetailed();
uassert(16995,
str::stream() << "copying index for $out failed."
- << " index: " << indexBson << " error: " << err,
+ << " index: "
+ << indexBson
+ << " error: "
+ << err,
DBClientWithCommands::getLastErrorString(err).empty());
}
}
diff --git a/src/mongo/db/pipeline/document_source_redact.cpp b/src/mongo/db/pipeline/document_source_redact.cpp
index b1d2c9e54f2..38f1e9fa8b8 100644
--- a/src/mongo/db/pipeline/document_source_redact.cpp
+++ b/src/mongo/db/pipeline/document_source_redact.cpp
@@ -150,7 +150,8 @@ boost::optional<Document> DocumentSourceRedact::redactObject() {
uasserted(17053,
str::stream() << "$redact's expression should not return anything "
<< "aside from the variables $$KEEP, $$DESCEND, and "
- << "$$PRUNE, but returned " << expressionResult.toString());
+ << "$$PRUNE, but returned "
+ << expressionResult.toString());
}
}
diff --git a/src/mongo/db/pipeline/document_source_sample_from_random_cursor.cpp b/src/mongo/db/pipeline/document_source_sample_from_random_cursor.cpp
index 802a5d05aab..dafae4ed111 100644
--- a/src/mongo/db/pipeline/document_source_sample_from_random_cursor.cpp
+++ b/src/mongo/db/pipeline/document_source_sample_from_random_cursor.cpp
@@ -106,9 +106,12 @@ boost::optional<Document> DocumentSourceSampleFromRandomCursor::getNextNonDuplic
uassert(
28793,
str::stream()
- << "The optimized $sample stage requires all documents have a " << _idField
+ << "The optimized $sample stage requires all documents have a "
+ << _idField
<< " field in order to de-duplicate results, but encountered a document without a "
- << _idField << " field: " << (*doc).toString(),
+ << _idField
+ << " field: "
+ << (*doc).toString(),
!idField.missing());
if (_seenDocs.insert(std::move(idField)).second) {
@@ -118,8 +121,9 @@ boost::optional<Document> DocumentSourceSampleFromRandomCursor::getNextNonDuplic
}
uasserted(28799,
str::stream() << "$sample stage could not find a non-duplicate document after "
- << kMaxAttempts << " while using a random cursor. This is likely a "
- "sporadic failure, please try again.");
+ << kMaxAttempts
+ << " while using a random cursor. This is likely a "
+ "sporadic failure, please try again.");
}
Value DocumentSourceSampleFromRandomCursor::serialize(bool explain) const {
diff --git a/src/mongo/db/pipeline/document_source_sort.cpp b/src/mongo/db/pipeline/document_source_sort.cpp
index f2e5dca0347..c9c21043497 100644
--- a/src/mongo/db/pipeline/document_source_sort.cpp
+++ b/src/mongo/db/pipeline/document_source_sort.cpp
@@ -76,7 +76,8 @@ void DocumentSourceSort::serializeToArray(vector<Value>& array, bool explain) co
array.push_back(
Value(DOC(getSourceName()
<< DOC("sortKey" << serializeSortKey(explain) << "mergePresorted"
- << (_mergingPresorted ? Value(true) : Value()) << "limit"
+ << (_mergingPresorted ? Value(true) : Value())
+ << "limit"
<< (limitSrc ? Value(limitSrc->getLimit()) : Value())))));
} else { // one Value for $sort and maybe a Value for $limit
MutableDocument inner(serializeSortKey(explain));
diff --git a/src/mongo/db/pipeline/document_source_test.cpp b/src/mongo/db/pipeline/document_source_test.cpp
index e0c6782883e..b5ed007d1f9 100644
--- a/src/mongo/db/pipeline/document_source_test.cpp
+++ b/src/mongo/db/pipeline/document_source_test.cpp
@@ -40,9 +40,9 @@
#include "mongo/db/storage/storage_options.h"
#include "mongo/dbtests/dbtests.h"
#include "mongo/stdx/memory.h"
+#include "mongo/unittest/temp_dir.h"
#include "mongo/util/clock_source_mock.h"
#include "mongo/util/tick_source_mock.h"
-#include "mongo/unittest/temp_dir.h"
namespace mongo {
bool isMongos() {
@@ -117,7 +117,7 @@ TEST(TruncateSort, TruncateSortDedupsSortCorrectly) {
}
template <size_t ArrayLen>
-set<string> arrayToSet(const char*(&array)[ArrayLen]) {
+set<string> arrayToSet(const char* (&array)[ArrayLen]) {
set<string> out;
for (size_t i = 0; i < ArrayLen; i++)
out.insert(array[i]);
@@ -837,8 +837,9 @@ class TwoValuesTwoKeys : public CheckResultsBase {
virtual BSONObj groupSpec() {
return BSON("_id"
<< "$_id"
- << "a" << BSON("$push"
- << "$a"));
+ << "a"
+ << BSON("$push"
+ << "$a"));
}
virtual string expectedResultSetString() {
return "[{_id:0,a:[1]},{_id:1,a:[2]}]";
@@ -856,8 +857,9 @@ class FourValuesTwoKeys : public CheckResultsBase {
virtual BSONObj groupSpec() {
return BSON("_id"
<< "$id"
- << "a" << BSON("$push"
- << "$a"));
+ << "a"
+ << BSON("$push"
+ << "$a"));
}
virtual string expectedResultSetString() {
return "[{_id:0,a:[1,3]},{_id:1,a:[2,4]}]";
@@ -875,8 +877,10 @@ class FourValuesTwoKeysTwoAccumulators : public CheckResultsBase {
virtual BSONObj groupSpec() {
return BSON("_id"
<< "$id"
- << "list" << BSON("$push"
- << "$a") << "sum"
+ << "list"
+ << BSON("$push"
+ << "$a")
+ << "sum"
<< BSON("$sum" << BSON("$divide" << BSON_ARRAY("$a" << 2))));
}
virtual string expectedResultSetString() {
@@ -892,8 +896,9 @@ class GroupNullUndefinedIds : public CheckResultsBase {
virtual BSONObj groupSpec() {
return BSON("_id"
<< "$a"
- << "sum" << BSON("$sum"
- << "$b"));
+ << "sum"
+ << BSON("$sum"
+ << "$b"));
}
virtual string expectedResultSetString() {
return "[{_id:null,sum:110}]";
@@ -957,8 +962,9 @@ public:
// Create a group source.
createGroup(BSON("_id"
<< "$x"
- << "list" << BSON("$push"
- << "$y")));
+ << "list"
+ << BSON("$push"
+ << "$y")));
// Create a merger version of the source.
intrusive_ptr<DocumentSource> group = createMerger();
// Attach the merger to the synthetic shard results.
@@ -2348,7 +2354,8 @@ private:
void createUnwind(bool preserveNullAndEmptyArrays, bool includeArrayIndex) {
auto specObj =
DOC("$unwind" << DOC("path" << unwindFieldPath() << "preserveNullAndEmptyArrays"
- << preserveNullAndEmptyArrays << "includeArrayIndex"
+ << preserveNullAndEmptyArrays
+ << "includeArrayIndex"
<< (includeArrayIndex ? Value(indexPath()) : Value())));
_unwind = static_cast<DocumentSourceUnwind*>(
DocumentSourceUnwind::createFromBson(specObj.toBson().firstElement(), ctx()).get());
@@ -2396,11 +2403,12 @@ private:
}
BSONObj expectedSerialization(bool preserveNullAndEmptyArrays, bool includeArrayIndex) const {
- return DOC("$unwind" << DOC(
- "path" << Value(unwindFieldPath()) << "preserveNullAndEmptyArrays"
- << (preserveNullAndEmptyArrays ? Value(true) : Value())
- << "includeArrayIndex"
- << (includeArrayIndex ? Value(indexPath()) : Value()))).toBson();
+ return DOC("$unwind" << DOC("path" << Value(unwindFieldPath())
+ << "preserveNullAndEmptyArrays"
+ << (preserveNullAndEmptyArrays ? Value(true) : Value())
+ << "includeArrayIndex"
+ << (includeArrayIndex ? Value(indexPath()) : Value())))
+ .toBson();
}
/** Assert that iterator state accessors consistently report the source is exhausted. */
@@ -2911,7 +2919,8 @@ TEST_F(InvalidUnwindSpec, NonDollarPrefixedPath) {
TEST_F(InvalidUnwindSpec, NonBoolPreserveNullAndEmptyArrays) {
ASSERT_THROWS_CODE(createUnwind(BSON("$unwind" << BSON("path"
<< "$x"
- << "preserveNullAndEmptyArrays" << 2))),
+ << "preserveNullAndEmptyArrays"
+ << 2))),
UserException,
28809);
}
@@ -2919,7 +2928,8 @@ TEST_F(InvalidUnwindSpec, NonBoolPreserveNullAndEmptyArrays) {
TEST_F(InvalidUnwindSpec, NonStringIncludeArrayIndex) {
ASSERT_THROWS_CODE(createUnwind(BSON("$unwind" << BSON("path"
<< "$x"
- << "includeArrayIndex" << 2))),
+ << "includeArrayIndex"
+ << 2))),
UserException,
28810);
}
@@ -2951,13 +2961,16 @@ TEST_F(InvalidUnwindSpec, DollarPrefixedIncludeArrayIndex) {
TEST_F(InvalidUnwindSpec, UnrecognizedOption) {
ASSERT_THROWS_CODE(createUnwind(BSON("$unwind" << BSON("path"
<< "$x"
- << "preserveNullAndEmptyArrays" << true
- << "foo" << 3))),
+ << "preserveNullAndEmptyArrays"
+ << true
+ << "foo"
+ << 3))),
UserException,
28811);
ASSERT_THROWS_CODE(createUnwind(BSON("$unwind" << BSON("path"
<< "$x"
- << "foo" << 3))),
+ << "foo"
+ << 3))),
UserException,
28811);
}
@@ -3306,9 +3319,8 @@ public:
match1->optimizeAt(container.begin(), &container);
ASSERT_EQUALS(container.size(), 1U);
ASSERT_EQUALS(match1->getQuery(),
- fromjson(
- "{'$and': [{'$and': [{a:1}, {b:1}]},"
- "{c:1}]}"));
+ fromjson("{'$and': [{'$and': [{a:1}, {b:1}]},"
+ "{c:1}]}"));
}
};
@@ -3367,7 +3379,8 @@ public:
<< "foreignField"
<< "c"
<< "as"
- << "d.e")).firstElement(),
+ << "d.e"))
+ .firstElement(),
ctx());
lookup->setSource(source.get());
@@ -3391,7 +3404,8 @@ public:
<< "foreignField"
<< "c"
<< "as"
- << "d")).firstElement(),
+ << "d"))
+ .firstElement(),
ctx());
lookup->setSource(source.get());
diff --git a/src/mongo/db/pipeline/document_source_unwind.cpp b/src/mongo/db/pipeline/document_source_unwind.cpp
index bba51298389..58a4d979146 100644
--- a/src/mongo/db/pipeline/document_source_unwind.cpp
+++ b/src/mongo/db/pipeline/document_source_unwind.cpp
@@ -309,7 +309,8 @@ intrusive_ptr<DocumentSource> DocumentSourceUnwind::createFromBson(
indexPath = subElem.String();
uassert(28822,
str::stream() << "includeArrayIndex option to $unwind stage should not be "
- "prefixed with a '$': " << (*indexPath),
+ "prefixed with a '$': "
+ << (*indexPath),
(*indexPath)[0] != '$');
} else {
uasserted(28811,
diff --git a/src/mongo/db/pipeline/expression.cpp b/src/mongo/db/pipeline/expression.cpp
index 8024ec39cf2..9c68924d476 100644
--- a/src/mongo/db/pipeline/expression.cpp
+++ b/src/mongo/db/pipeline/expression.cpp
@@ -40,9 +40,9 @@
#include "mongo/db/pipeline/document.h"
#include "mongo/db/pipeline/expression_context.h"
#include "mongo/db/pipeline/value.h"
-#include "mongo/util/string_map.h"
-#include "mongo/util/mongoutils/str.h"
#include "mongo/platform/bits.h"
+#include "mongo/util/mongoutils/str.h"
+#include "mongo/util/string_map.h"
namespace mongo {
using Parser = Expression::Parser;
@@ -85,7 +85,9 @@ void Variables::uassertValidNameForUserWrite(StringData varName) {
uassert(16868,
str::stream() << "'" << varName << "' contains an invalid character "
- << "for a variable name: '" << varName[i] << "'",
+ << "for a variable name: '"
+ << varName[i]
+ << "'",
charIsValid);
}
}
@@ -110,7 +112,9 @@ void Variables::uassertValidNameForUserRead(StringData varName) {
uassert(16871,
str::stream() << "'" << varName << "' contains an invalid character "
- << "for a variable name: '" << varName[i] << "'",
+ << "for a variable name: '"
+ << varName[i]
+ << "'",
charIsValid);
}
}
@@ -182,7 +186,8 @@ bool Expression::ObjectCtx::inclusionOk() const {
string Expression::removeFieldPrefix(const string& prefixedField) {
uassert(16419,
str::stream() << "field path must not contain embedded null characters"
- << prefixedField.find("\0") << ",",
+ << prefixedField.find("\0")
+ << ",",
prefixedField.find('\0') == string::npos);
const char* pPrefixedField = prefixedField.c_str();
@@ -220,7 +225,8 @@ intrusive_ptr<Expression> Expression::parseObject(BSONObj obj,
uassert(
15983,
str::stream() << "the operator must be the only field in a pipeline object (at '"
- << pFieldName << "'",
+ << pFieldName
+ << "'",
fieldCount == 0);
uassert(16404,
@@ -234,7 +240,9 @@ intrusive_ptr<Expression> Expression::parseObject(BSONObj obj,
} else {
uassert(15990,
str::stream() << "this object is already an operator expression, and can't be "
- "used as a document expression (at '" << pFieldName << "')",
+ "used as a document expression (at '"
+ << pFieldName
+ << "')",
kind != OPERATOR);
uassert(16405,
@@ -299,7 +307,9 @@ intrusive_ptr<Expression> Expression::parseObject(BSONObj obj,
default:
uassert(15992,
str::stream() << "disallowed field type " << typeName(fieldType)
- << " in object expression (at '" << fieldName << "')",
+ << " in object expression (at '"
+ << fieldName
+ << "')",
false);
}
}
@@ -637,11 +647,13 @@ Value ExpressionArrayElemAt::evaluateInternal(Variables* vars) const {
array.isArray());
uassert(28690,
str::stream() << getOpName() << "'s second argument must be a numeric value,"
- << " but is " << typeName(indexArg.getType()),
+ << " but is "
+ << typeName(indexArg.getType()),
indexArg.numeric());
uassert(28691,
str::stream() << getOpName() << "'s second argument must be representable as"
- << " a 32-bit integer: " << indexArg.coerceToDouble(),
+ << " a 32-bit integer: "
+ << indexArg.coerceToDouble(),
indexArg.integral());
long long i = indexArg.coerceToLong();
@@ -969,8 +981,8 @@ intrusive_ptr<Expression> ExpressionDateToString::parse(BSONElement expr,
dateElem = arg;
} else {
uasserted(18534,
- str::stream()
- << "Unrecognized argument to $dateToString: " << arg.fieldName());
+ str::stream() << "Unrecognized argument to $dateToString: "
+ << arg.fieldName());
}
}
@@ -1070,7 +1082,8 @@ string ExpressionDateToString::formatDate(const string& format,
const int year = ExpressionYear::extract(tm);
uassert(18537,
str::stream() << "$dateToString is only defined on year 0-9999,"
- << " tried to use year " << year,
+ << " tried to use year "
+ << year,
(year >= 0) && (year <= 9999));
insertPadded(formatted, year, 4);
break;
@@ -1201,7 +1214,9 @@ Value ExpressionDivide::evaluateInternal(Variables* vars) const {
} else {
uasserted(16609,
str::stream() << "$divide only supports numeric types, not "
- << typeName(lhs.getType()) << " and " << typeName(rhs.getType()));
+ << typeName(lhs.getType())
+ << " and "
+ << typeName(rhs.getType()));
}
}
@@ -1683,8 +1698,9 @@ intrusive_ptr<Expression> ExpressionFilter::optimize() {
}
Value ExpressionFilter::serialize(bool explain) const {
- return Value(DOC("$filter" << DOC("input" << _input->serialize(explain) << "as" << _varName
- << "cond" << _filter->serialize(explain))));
+ return Value(
+ DOC("$filter" << DOC("input" << _input->serialize(explain) << "as" << _varName << "cond"
+ << _filter->serialize(explain))));
}
Value ExpressionFilter::evaluateInternal(Variables* vars) const {
@@ -2038,7 +2054,9 @@ Value ExpressionMod::evaluateInternal(Variables* vars) const {
} else {
uasserted(16611,
str::stream() << "$mod only supports numeric types, not "
- << typeName(lhs.getType()) << " and " << typeName(rhs.getType()));
+ << typeName(lhs.getType())
+ << " and "
+ << typeName(rhs.getType()));
}
}
@@ -2165,12 +2183,15 @@ void uassertIfNotIntegralAndNonNegative(Value val,
StringData argumentName) {
uassert(40096,
str::stream() << expressionName << "requires an integral " << argumentName
- << ", found a value of type: " << typeName(val.getType())
- << ", with value: " << val.toString(),
+ << ", found a value of type: "
+ << typeName(val.getType())
+ << ", with value: "
+ << val.toString(),
val.integral());
uassert(40097,
str::stream() << expressionName << " requires a nonnegative " << argumentName
- << ", found: " << val.toString(),
+ << ", found: "
+ << val.toString(),
val.coerceToInt() >= 0);
}
@@ -2796,7 +2817,8 @@ Value ExpressionRange::evaluateInternal(Variables* vars) const {
startVal.numeric());
uassert(34444,
str::stream() << "$range requires a starting value that can be represented as a 32-bit "
- "integer, found value: " << startVal.toString(),
+ "integer, found value: "
+ << startVal.toString(),
startVal.integral());
uassert(34445,
str::stream() << "$range requires a numeric ending value, found value of type: "
@@ -2804,7 +2826,8 @@ Value ExpressionRange::evaluateInternal(Variables* vars) const {
endVal.numeric());
uassert(34446,
str::stream() << "$range requires an ending value that can be represented as a 32-bit "
- "integer, found value: " << endVal.toString(),
+ "integer, found value: "
+ << endVal.toString(),
endVal.integral());
int current = startVal.coerceToInt();
@@ -2821,7 +2844,8 @@ Value ExpressionRange::evaluateInternal(Variables* vars) const {
stepVal.numeric());
uassert(34448,
str::stream() << "$range requires a step value that can be represented as a 32-bit "
- "integer, found value: " << stepVal.toString(),
+ "integer, found value: "
+ << stepVal.toString(),
stepVal.integral());
step = stepVal.coerceToInt();
@@ -2984,11 +3008,13 @@ Value ExpressionSetDifference::evaluateInternal(Variables* vars) const {
uassert(17048,
str::stream() << "both operands of $setDifference must be arrays. First "
- << "argument is of type: " << typeName(lhs.getType()),
+ << "argument is of type: "
+ << typeName(lhs.getType()),
lhs.isArray());
uassert(17049,
str::stream() << "both operands of $setDifference must be arrays. Second "
- << "argument is of type: " << typeName(rhs.getType()),
+ << "argument is of type: "
+ << typeName(rhs.getType()),
rhs.isArray());
ValueSet rhsSet = arrayToSet(rhs);
@@ -3026,7 +3052,8 @@ Value ExpressionSetEquals::evaluateInternal(Variables* vars) const {
const Value nextEntry = vpOperand[i]->evaluateInternal(vars);
uassert(17044,
str::stream() << "All operands of $setEquals must be arrays. One "
- << "argument is of type: " << typeName(nextEntry.getType()),
+ << "argument is of type: "
+ << typeName(nextEntry.getType()),
nextEntry.isArray());
if (i == 0) {
@@ -3058,7 +3085,8 @@ Value ExpressionSetIntersection::evaluateInternal(Variables* vars) const {
}
uassert(17047,
str::stream() << "All operands of $setIntersection must be arrays. One "
- << "argument is of type: " << typeName(nextEntry.getType()),
+ << "argument is of type: "
+ << typeName(nextEntry.getType()),
nextEntry.isArray());
if (i == 0) {
@@ -3113,11 +3141,13 @@ Value ExpressionSetIsSubset::evaluateInternal(Variables* vars) const {
uassert(17046,
str::stream() << "both operands of $setIsSubset must be arrays. First "
- << "argument is of type: " << typeName(lhs.getType()),
+ << "argument is of type: "
+ << typeName(lhs.getType()),
lhs.isArray());
uassert(17042,
str::stream() << "both operands of $setIsSubset must be arrays. Second "
- << "argument is of type: " << typeName(rhs.getType()),
+ << "argument is of type: "
+ << typeName(rhs.getType()),
rhs.isArray());
return setIsSubsetHelper(lhs.getArray(), arrayToSet(rhs));
@@ -3142,7 +3172,8 @@ public:
uassert(17310,
str::stream() << "both operands of $setIsSubset must be arrays. First "
- << "argument is of type: " << typeName(lhs.getType()),
+ << "argument is of type: "
+ << typeName(lhs.getType()),
lhs.isArray());
return setIsSubsetHelper(lhs.getArray(), _cachedRhsSet);
@@ -3164,7 +3195,8 @@ intrusive_ptr<Expression> ExpressionSetIsSubset::optimize() {
const Value rhs = ec->getValue();
uassert(17311,
str::stream() << "both operands of $setIsSubset must be arrays. Second "
- << "argument is of type: " << typeName(rhs.getType()),
+ << "argument is of type: "
+ << typeName(rhs.getType()),
rhs.isArray());
return new Optimized(arrayToSet(rhs), vpOperand);
@@ -3189,7 +3221,8 @@ Value ExpressionSetUnion::evaluateInternal(Variables* vars) const {
}
uassert(17043,
str::stream() << "All operands of $setUnion must be arrays. One argument"
- << " is of type: " << typeName(newEntries.getType()),
+ << " is of type: "
+ << typeName(newEntries.getType()),
newEntries.isArray());
unionedSet.insert(newEntries.getArray().begin(), newEntries.getArray().end());
@@ -3229,15 +3262,18 @@ Value ExpressionSlice::evaluateInternal(Variables* vars) const {
uassert(28724,
str::stream() << "First argument to $slice must be an array, but is"
- << " of type: " << typeName(arrayVal.getType()),
+ << " of type: "
+ << typeName(arrayVal.getType()),
arrayVal.isArray());
uassert(28725,
str::stream() << "Second argument to $slice must be a numeric value,"
- << " but is of type: " << typeName(arg2.getType()),
+ << " but is of type: "
+ << typeName(arg2.getType()),
arg2.numeric());
uassert(28726,
str::stream() << "Second argument to $slice can't be represented as"
- << " a 32-bit integer: " << arg2.coerceToDouble(),
+ << " a 32-bit integer: "
+ << arg2.coerceToDouble(),
arg2.integral());
const auto& array = arrayVal.getArray();
@@ -3277,11 +3313,13 @@ Value ExpressionSlice::evaluateInternal(Variables* vars) const {
uassert(28727,
str::stream() << "Third argument to $slice must be numeric, but "
- << "is of type: " << typeName(countVal.getType()),
+ << "is of type: "
+ << typeName(countVal.getType()),
countVal.numeric());
uassert(28728,
str::stream() << "Third argument to $slice can't be represented"
- << " as a 32-bit integer: " << countVal.coerceToDouble(),
+ << " as a 32-bit integer: "
+ << countVal.coerceToDouble(),
countVal.integral());
uassert(28729,
str::stream() << "Third argument to $slice must be positive: "
@@ -3329,11 +3367,13 @@ Value ExpressionSplit::evaluateInternal(Variables* vars) const {
uassert(40085,
str::stream() << "$split requires an expression that evaluates to a string as a first "
- "argument, found: " << typeName(inputArg.getType()),
+ "argument, found: "
+ << typeName(inputArg.getType()),
inputArg.getType() == BSONType::String);
uassert(40086,
str::stream() << "$split requires an expression that evaluates to a string as a second "
- "argument, found: " << typeName(separatorArg.getType()),
+ "argument, found: "
+ << typeName(separatorArg.getType()),
separatorArg.getType() == BSONType::String);
std::string input = inputArg.getString();
@@ -3421,12 +3461,14 @@ Value ExpressionSubstrBytes::evaluateInternal(Variables* vars) const {
uassert(16034,
str::stream() << getOpName()
<< ": starting index must be a numeric type (is BSON type "
- << typeName(pLower.getType()) << ")",
+ << typeName(pLower.getType())
+ << ")",
(pLower.getType() == NumberInt || pLower.getType() == NumberLong ||
pLower.getType() == NumberDouble));
uassert(16035,
str::stream() << getOpName() << ": length must be a numeric type (is BSON type "
- << typeName(pLength.getType()) << ")",
+ << typeName(pLength.getType())
+ << ")",
(pLength.getType() == NumberInt || pLength.getType() == NumberLong ||
pLength.getType() == NumberDouble));
@@ -3471,7 +3513,8 @@ Value ExpressionSubstrCP::evaluateInternal(Variables* vars) const {
std::string str = inputVal.coerceToString();
uassert(34450,
str::stream() << getOpName() << ": starting index must be a numeric type (is BSON type "
- << typeName(lowerVal.getType()) << ")",
+ << typeName(lowerVal.getType())
+ << ")",
lowerVal.numeric());
uassert(34451,
str::stream() << getOpName()
@@ -3480,7 +3523,8 @@ Value ExpressionSubstrCP::evaluateInternal(Variables* vars) const {
lowerVal.integral());
uassert(34452,
str::stream() << getOpName() << ": length must be a numeric type (is BSON type "
- << typeName(lengthVal.getType()) << ")",
+ << typeName(lengthVal.getType())
+ << ")",
lengthVal.numeric());
uassert(34453,
str::stream() << getOpName()
@@ -3562,10 +3606,10 @@ const char* ExpressionStrLenBytes::getOpName() const {
Value ExpressionStrLenCP::evaluateInternal(Variables* vars) const {
Value val(vpOperand[0]->evaluateInternal(vars));
- uassert(
- 34471,
- str::stream() << "$strLenCP requires a string argument, found: " << typeName(val.getType()),
- val.getType() == String);
+ uassert(34471,
+ str::stream() << "$strLenCP requires a string argument, found: "
+ << typeName(val.getType()),
+ val.getType() == String);
std::string stringVal = val.getString();
@@ -4076,10 +4120,10 @@ Value ExpressionZip::evaluateInternal(Variables* vars) const {
return Value(BSONNULL);
}
- uassert(
- 34468,
- str::stream() << "$zip found a non-array expression in input: " << evalExpr.toString(),
- evalExpr.isArray());
+ uassert(34468,
+ str::stream() << "$zip found a non-array expression in input: "
+ << evalExpr.toString(),
+ evalExpr.isArray());
inputValues.push_back(evalExpr.getArray());
@@ -4136,14 +4180,16 @@ boost::intrusive_ptr<Expression> ExpressionZip::optimize() {
std::transform(_inputs.begin(),
_inputs.end(),
_inputs.begin(),
- [](intrusive_ptr<Expression> inputExpression)
- -> intrusive_ptr<Expression> { return inputExpression->optimize(); });
+ [](intrusive_ptr<Expression> inputExpression) -> intrusive_ptr<Expression> {
+ return inputExpression->optimize();
+ });
std::transform(_defaults.begin(),
_defaults.end(),
_defaults.begin(),
- [](intrusive_ptr<Expression> defaultExpression)
- -> intrusive_ptr<Expression> { return defaultExpression->optimize(); });
+ [](intrusive_ptr<Expression> defaultExpression) -> intrusive_ptr<Expression> {
+ return defaultExpression->optimize();
+ });
return this;
}
@@ -4162,19 +4208,21 @@ Value ExpressionZip::serialize(bool explain) const {
}
return Value(DOC("$zip" << DOC("inputs" << Value(serializedInput) << "defaults"
- << Value(serializedDefaults) << "useLongestLength"
+ << Value(serializedDefaults)
+ << "useLongestLength"
<< serializedUseLongestLength)));
}
void ExpressionZip::addDependencies(DepsTracker* deps, std::vector<std::string>* path) const {
- std::for_each(_inputs.begin(),
- _inputs.end(),
- [&deps](intrusive_ptr<Expression> inputExpression)
- -> void { inputExpression->addDependencies(deps); });
+ std::for_each(
+ _inputs.begin(), _inputs.end(), [&deps](intrusive_ptr<Expression> inputExpression) -> void {
+ inputExpression->addDependencies(deps);
+ });
std::for_each(_defaults.begin(),
_defaults.end(),
- [&deps](intrusive_ptr<Expression> defaultExpression)
- -> void { defaultExpression->addDependencies(deps); });
+ [&deps](intrusive_ptr<Expression> defaultExpression) -> void {
+ defaultExpression->addDependencies(deps);
+ });
}
const char* ExpressionZip::getOpName() const {
diff --git a/src/mongo/db/pipeline/expression.h b/src/mongo/db/pipeline/expression.h
index 87a9536e689..8ed6f95f6eb 100644
--- a/src/mongo/db/pipeline/expression.h
+++ b/src/mongo/db/pipeline/expression.h
@@ -394,10 +394,14 @@ class ExpressionRangedArity : public ExpressionNaryBase<SubClass> {
public:
void validateArguments(const Expression::ExpressionVector& args) const override {
uassert(28667,
- mongoutils::str::stream()
- << "Expression " << this->getOpName() << " takes at least " << MinArgs
- << " arguments, and at most " << MaxArgs << ", but " << args.size()
- << " were passed in.",
+ mongoutils::str::stream() << "Expression " << this->getOpName()
+ << " takes at least "
+ << MinArgs
+ << " arguments, and at most "
+ << MaxArgs
+ << ", but "
+ << args.size()
+ << " were passed in.",
MinArgs <= args.size() && args.size() <= MaxArgs);
}
};
@@ -409,7 +413,9 @@ public:
void validateArguments(const Expression::ExpressionVector& args) const override {
uassert(16020,
mongoutils::str::stream() << "Expression " << this->getOpName() << " takes exactly "
- << NArgs << " arguments. " << args.size()
+ << NArgs
+ << " arguments. "
+ << args.size()
<< " were passed in.",
args.size() == NArgs);
}
diff --git a/src/mongo/db/pipeline/expression_test.cpp b/src/mongo/db/pipeline/expression_test.cpp
index d4b566e2bb4..2487bfc18f1 100644
--- a/src/mongo/db/pipeline/expression_test.cpp
+++ b/src/mongo/db/pipeline/expression_test.cpp
@@ -528,8 +528,8 @@ TEST_F(ExpressionNaryTest, FlattenInnerOperandsOptimizationOnAssociativeOnlyMidd
intrusive_ptr<Expression> optimized = _associativeOnly->optimize();
ASSERT(_associativeOnly == optimized);
- BSONArray expectedContent = BSON_ARRAY(200 << "$path3" << BSON_ARRAY(201 << 100) << "$path1"
- << BSON_ARRAY(101 << 99) << "$path2");
+ BSONArray expectedContent = BSON_ARRAY(
+ 200 << "$path3" << BSON_ARRAY(201 << 100) << "$path1" << BSON_ARRAY(101 << 99) << "$path2");
assertContents(_associativeOnly, expectedContent);
}
@@ -1368,7 +1368,8 @@ class NonConstantZero : public OptimizeBase {
class NonConstantNonConstantOne : public OptimizeBase {
BSONObj spec() {
return BSON("$and" << BSON_ARRAY("$a"
- << "$b" << 1));
+ << "$b"
+ << 1));
}
BSONObj expectedOptimized() {
return BSON("$and" << BSON_ARRAY("$a"
@@ -1380,7 +1381,8 @@ class NonConstantNonConstantOne : public OptimizeBase {
class NonConstantNonConstantZero : public OptimizeBase {
BSONObj spec() {
return BSON("$and" << BSON_ARRAY("$a"
- << "$b" << 0));
+ << "$b"
+ << 0));
}
BSONObj expectedOptimized() {
return BSON("$const" << false);
@@ -2250,12 +2252,11 @@ public:
void run() {
intrusive_ptr<Expression> expression = ExpressionFieldPath::create("a.b.c");
assertBinaryEqual(fromjson("{'':[[1,2],3,[4],[[5]],[6,7]]}"),
- toBson(expression->evaluate(fromBson(fromjson(
- "{a:[{b:[{c:1},{c:2}]},"
- "{b:{c:3}},"
- "{b:[{c:4}]},"
- "{b:[{c:[5]}]},"
- "{b:{c:[6,7]}}]}")))));
+ toBson(expression->evaluate(fromBson(fromjson("{a:[{b:[{c:1},{c:2}]},"
+ "{b:{c:3}},"
+ "{b:[{c:4}]},"
+ "{b:[{c:[5]}]},"
+ "{b:{c:[6,7]}}]}")))));
}
};
@@ -3386,7 +3387,8 @@ class NonConstantZero : public OptimizeBase {
class NonConstantNonConstantOne : public OptimizeBase {
BSONObj spec() {
return BSON("$or" << BSON_ARRAY("$a"
- << "$b" << 1));
+ << "$b"
+ << 1));
}
BSONObj expectedOptimized() {
return BSON("$const" << true);
@@ -3397,7 +3399,8 @@ class NonConstantNonConstantOne : public OptimizeBase {
class NonConstantNonConstantZero : public OptimizeBase {
BSONObj spec() {
return BSON("$or" << BSON_ARRAY("$a"
- << "$b" << 0));
+ << "$b"
+ << 0));
}
BSONObj expectedOptimized() {
return BSON("$or" << BSON_ARRAY("$a"
@@ -3996,13 +3999,15 @@ public:
const BSONObj obj = BSON(asserters[i].getString() << args);
VariablesIdGenerator idGenerator;
VariablesParseState vps(&idGenerator);
- ASSERT_THROWS({
- // NOTE: parse and evaluatation failures are treated the
- // same
- const intrusive_ptr<Expression> expr =
- Expression::parseExpression(obj.firstElement(), vps);
- expr->evaluate(Document());
- }, UserException);
+ ASSERT_THROWS(
+ {
+ // NOTE: parse and evaluatation failures are treated the
+ // same
+ const intrusive_ptr<Expression> expr =
+ Expression::parseExpression(obj.firstElement(), vps);
+ expr->evaluate(Document());
+ },
+ UserException);
}
}
}
@@ -4015,9 +4020,12 @@ class Same : public ExpectedResultBase {
Document getSpec() {
return DOC("input" << DOC_ARRAY(DOC_ARRAY(1 << 2) << DOC_ARRAY(1 << 2)) << "expected"
<< DOC("$setIsSubset" << true << "$setEquals" << true
- << "$setIntersection" << DOC_ARRAY(1 << 2)
- << "$setUnion" << DOC_ARRAY(1 << 2)
- << "$setDifference" << vector<Value>()));
+ << "$setIntersection"
+ << DOC_ARRAY(1 << 2)
+ << "$setUnion"
+ << DOC_ARRAY(1 << 2)
+ << "$setDifference"
+ << vector<Value>()));
}
};
@@ -4025,9 +4033,12 @@ class Redundant : public ExpectedResultBase {
Document getSpec() {
return DOC("input" << DOC_ARRAY(DOC_ARRAY(1 << 2) << DOC_ARRAY(1 << 2 << 2)) << "expected"
<< DOC("$setIsSubset" << true << "$setEquals" << true
- << "$setIntersection" << DOC_ARRAY(1 << 2)
- << "$setUnion" << DOC_ARRAY(1 << 2)
- << "$setDifference" << vector<Value>()));
+ << "$setIntersection"
+ << DOC_ARRAY(1 << 2)
+ << "$setUnion"
+ << DOC_ARRAY(1 << 2)
+ << "$setDifference"
+ << vector<Value>()));
}
};
@@ -4036,8 +4047,11 @@ class DoubleRedundant : public ExpectedResultBase {
return DOC(
"input" << DOC_ARRAY(DOC_ARRAY(1 << 1 << 2) << DOC_ARRAY(1 << 2 << 2)) << "expected"
<< DOC("$setIsSubset" << true << "$setEquals" << true << "$setIntersection"
- << DOC_ARRAY(1 << 2) << "$setUnion" << DOC_ARRAY(1 << 2)
- << "$setDifference" << vector<Value>()));
+ << DOC_ARRAY(1 << 2)
+ << "$setUnion"
+ << DOC_ARRAY(1 << 2)
+ << "$setDifference"
+ << vector<Value>()));
}
};
@@ -4045,9 +4059,12 @@ class Super : public ExpectedResultBase {
Document getSpec() {
return DOC("input" << DOC_ARRAY(DOC_ARRAY(1 << 2) << DOC_ARRAY(1)) << "expected"
<< DOC("$setIsSubset" << false << "$setEquals" << false
- << "$setIntersection" << DOC_ARRAY(1)
- << "$setUnion" << DOC_ARRAY(1 << 2)
- << "$setDifference" << DOC_ARRAY(2)));
+ << "$setIntersection"
+ << DOC_ARRAY(1)
+ << "$setUnion"
+ << DOC_ARRAY(1 << 2)
+ << "$setDifference"
+ << DOC_ARRAY(2)));
}
};
@@ -4055,9 +4072,12 @@ class SuperWithRedundant : public ExpectedResultBase {
Document getSpec() {
return DOC("input" << DOC_ARRAY(DOC_ARRAY(1 << 2 << 2) << DOC_ARRAY(1)) << "expected"
<< DOC("$setIsSubset" << false << "$setEquals" << false
- << "$setIntersection" << DOC_ARRAY(1)
- << "$setUnion" << DOC_ARRAY(1 << 2)
- << "$setDifference" << DOC_ARRAY(2)));
+ << "$setIntersection"
+ << DOC_ARRAY(1)
+ << "$setUnion"
+ << DOC_ARRAY(1 << 2)
+ << "$setDifference"
+ << DOC_ARRAY(2)));
}
};
@@ -4065,9 +4085,12 @@ class Sub : public ExpectedResultBase {
Document getSpec() {
return DOC("input" << DOC_ARRAY(DOC_ARRAY(1) << DOC_ARRAY(1 << 2)) << "expected"
<< DOC("$setIsSubset" << true << "$setEquals" << false
- << "$setIntersection" << DOC_ARRAY(1)
- << "$setUnion" << DOC_ARRAY(1 << 2)
- << "$setDifference" << vector<Value>()));
+ << "$setIntersection"
+ << DOC_ARRAY(1)
+ << "$setUnion"
+ << DOC_ARRAY(1 << 2)
+ << "$setDifference"
+ << vector<Value>()));
}
};
@@ -4075,9 +4098,12 @@ class SameBackwards : public ExpectedResultBase {
Document getSpec() {
return DOC("input" << DOC_ARRAY(DOC_ARRAY(1 << 2) << DOC_ARRAY(2 << 1)) << "expected"
<< DOC("$setIsSubset" << true << "$setEquals" << true
- << "$setIntersection" << DOC_ARRAY(1 << 2)
- << "$setUnion" << DOC_ARRAY(1 << 2)
- << "$setDifference" << vector<Value>()));
+ << "$setIntersection"
+ << DOC_ARRAY(1 << 2)
+ << "$setUnion"
+ << DOC_ARRAY(1 << 2)
+ << "$setDifference"
+ << vector<Value>()));
}
};
@@ -4085,9 +4111,12 @@ class NoOverlap : public ExpectedResultBase {
Document getSpec() {
return DOC("input" << DOC_ARRAY(DOC_ARRAY(1 << 2) << DOC_ARRAY(8 << 4)) << "expected"
<< DOC("$setIsSubset" << false << "$setEquals" << false
- << "$setIntersection" << vector<Value>()
- << "$setUnion" << DOC_ARRAY(1 << 2 << 4 << 8)
- << "$setDifference" << DOC_ARRAY(1 << 2)));
+ << "$setIntersection"
+ << vector<Value>()
+ << "$setUnion"
+ << DOC_ARRAY(1 << 2 << 4 << 8)
+ << "$setDifference"
+ << DOC_ARRAY(1 << 2)));
}
};
@@ -4095,9 +4124,12 @@ class Overlap : public ExpectedResultBase {
Document getSpec() {
return DOC("input" << DOC_ARRAY(DOC_ARRAY(1 << 2) << DOC_ARRAY(8 << 2 << 4)) << "expected"
<< DOC("$setIsSubset" << false << "$setEquals" << false
- << "$setIntersection" << DOC_ARRAY(2)
- << "$setUnion" << DOC_ARRAY(1 << 2 << 4 << 8)
- << "$setDifference" << DOC_ARRAY(1)));
+ << "$setIntersection"
+ << DOC_ARRAY(2)
+ << "$setUnion"
+ << DOC_ARRAY(1 << 2 << 4 << 8)
+ << "$setDifference"
+ << DOC_ARRAY(1)));
}
};
@@ -4105,7 +4137,9 @@ class LastNull : public ExpectedResultBase {
Document getSpec() {
return DOC("input" << DOC_ARRAY(DOC_ARRAY(1 << 2) << Value(BSONNULL)) << "expected"
<< DOC("$setIntersection" << BSONNULL << "$setUnion" << BSONNULL
- << "$setDifference" << BSONNULL) << "error"
+ << "$setDifference"
+ << BSONNULL)
+ << "error"
<< DOC_ARRAY("$setEquals"
<< "$setIsSubset"));
}
@@ -4115,7 +4149,9 @@ class FirstNull : public ExpectedResultBase {
Document getSpec() {
return DOC("input" << DOC_ARRAY(Value(BSONNULL) << DOC_ARRAY(1 << 2)) << "expected"
<< DOC("$setIntersection" << BSONNULL << "$setUnion" << BSONNULL
- << "$setDifference" << BSONNULL) << "error"
+ << "$setDifference"
+ << BSONNULL)
+ << "error"
<< DOC_ARRAY("$setEquals"
<< "$setIsSubset"));
}
@@ -4126,9 +4162,10 @@ class NoArg : public ExpectedResultBase {
return DOC(
"input" << vector<Value>() << "expected"
<< DOC("$setIntersection" << vector<Value>() << "$setUnion" << vector<Value>())
- << "error" << DOC_ARRAY("$setEquals"
- << "$setIsSubset"
- << "$setDifference"));
+ << "error"
+ << DOC_ARRAY("$setEquals"
+ << "$setIsSubset"
+ << "$setDifference"));
}
};
@@ -4136,7 +4173,8 @@ class OneArg : public ExpectedResultBase {
Document getSpec() {
return DOC("input" << DOC_ARRAY(DOC_ARRAY(1 << 2)) << "expected"
<< DOC("$setIntersection" << DOC_ARRAY(1 << 2) << "$setUnion"
- << DOC_ARRAY(1 << 2)) << "error"
+ << DOC_ARRAY(1 << 2))
+ << "error"
<< DOC_ARRAY("$setEquals"
<< "$setIsSubset"
<< "$setDifference"));
@@ -4148,9 +4186,10 @@ class EmptyArg : public ExpectedResultBase {
return DOC(
"input" << DOC_ARRAY(vector<Value>()) << "expected"
<< DOC("$setIntersection" << vector<Value>() << "$setUnion" << vector<Value>())
- << "error" << DOC_ARRAY("$setEquals"
- << "$setIsSubset"
- << "$setDifference"));
+ << "error"
+ << DOC_ARRAY("$setEquals"
+ << "$setIsSubset"
+ << "$setDifference"));
}
};
@@ -4158,8 +4197,12 @@ class LeftArgEmpty : public ExpectedResultBase {
Document getSpec() {
return DOC("input" << DOC_ARRAY(vector<Value>() << DOC_ARRAY(1 << 2)) << "expected"
<< DOC("$setIntersection" << vector<Value>() << "$setUnion"
- << DOC_ARRAY(1 << 2) << "$setIsSubset" << true
- << "$setEquals" << false << "$setDifference"
+ << DOC_ARRAY(1 << 2)
+ << "$setIsSubset"
+ << true
+ << "$setEquals"
+ << false
+ << "$setDifference"
<< vector<Value>()));
}
};
@@ -4168,8 +4211,12 @@ class RightArgEmpty : public ExpectedResultBase {
Document getSpec() {
return DOC("input" << DOC_ARRAY(DOC_ARRAY(1 << 2) << vector<Value>()) << "expected"
<< DOC("$setIntersection" << vector<Value>() << "$setUnion"
- << DOC_ARRAY(1 << 2) << "$setIsSubset" << false
- << "$setEquals" << false << "$setDifference"
+ << DOC_ARRAY(1 << 2)
+ << "$setIsSubset"
+ << false
+ << "$setEquals"
+ << false
+ << "$setDifference"
<< DOC_ARRAY(1 << 2)));
}
};
@@ -4177,27 +4224,34 @@ class RightArgEmpty : public ExpectedResultBase {
class ManyArgs : public ExpectedResultBase {
Document getSpec() {
return DOC(
- "input" << DOC_ARRAY(DOC_ARRAY(8 << 3)
- << DOC_ARRAY("asdf"
- << "foo") << DOC_ARRAY(80.3 << 34) << vector<Value>()
- << DOC_ARRAY(80.3 << "foo" << 11 << "yay")) << "expected"
- << DOC("$setIntersection"
- << vector<Value>() << "$setEquals" << false << "$setUnion"
- << DOC_ARRAY(3 << 8 << 11 << 34 << 80.3 << "asdf"
- << "foo"
- << "yay")) << "error" << DOC_ARRAY("$setIsSubset"
- << "$setDifference"));
+ "input" << DOC_ARRAY(DOC_ARRAY(8 << 3) << DOC_ARRAY("asdf"
+ << "foo")
+ << DOC_ARRAY(80.3 << 34)
+ << vector<Value>()
+ << DOC_ARRAY(80.3 << "foo" << 11 << "yay"))
+ << "expected"
+ << DOC("$setIntersection" << vector<Value>() << "$setEquals" << false
+ << "$setUnion"
+ << DOC_ARRAY(3 << 8 << 11 << 34 << 80.3 << "asdf"
+ << "foo"
+ << "yay"))
+ << "error"
+ << DOC_ARRAY("$setIsSubset"
+ << "$setDifference"));
}
};
class ManyArgsEqual : public ExpectedResultBase {
Document getSpec() {
- return DOC("input" << DOC_ARRAY(DOC_ARRAY(1 << 2 << 4)
- << DOC_ARRAY(1 << 2 << 2 << 4) << DOC_ARRAY(4 << 1 << 2)
- << DOC_ARRAY(2 << 1 << 1 << 4)) << "expected"
+ return DOC("input" << DOC_ARRAY(DOC_ARRAY(1 << 2 << 4) << DOC_ARRAY(1 << 2 << 2 << 4)
+ << DOC_ARRAY(4 << 1 << 2)
+ << DOC_ARRAY(2 << 1 << 1 << 4))
+ << "expected"
<< DOC("$setIntersection" << DOC_ARRAY(1 << 2 << 4) << "$setEquals"
- << true << "$setUnion"
- << DOC_ARRAY(1 << 2 << 4)) << "error"
+ << true
+ << "$setUnion"
+ << DOC_ARRAY(1 << 2 << 4))
+ << "error"
<< DOC_ARRAY("$setIsSubset"
<< "$setDifference"));
}
@@ -4757,13 +4811,15 @@ public:
const BSONObj obj = BSON(asserters[i].getString() << args);
VariablesIdGenerator idGenerator;
VariablesParseState vps(&idGenerator);
- ASSERT_THROWS({
- // NOTE: parse and evaluatation failures are treated the
- // same
- const intrusive_ptr<Expression> expr =
- Expression::parseExpression(obj.firstElement(), vps);
- expr->evaluate(Document());
- }, UserException);
+ ASSERT_THROWS(
+ {
+ // NOTE: parse and evaluatation failures are treated the
+ // same
+ const intrusive_ptr<Expression> expr =
+ Expression::parseExpression(obj.firstElement(), vps);
+ expr->evaluate(Document());
+ },
+ UserException);
}
}
}
diff --git a/src/mongo/db/pipeline/field_path_test.cpp b/src/mongo/db/pipeline/field_path_test.cpp
index 63c0216a76d..92ba167f562 100644
--- a/src/mongo/db/pipeline/field_path_test.cpp
+++ b/src/mongo/db/pipeline/field_path_test.cpp
@@ -29,9 +29,9 @@
#include "mongo/platform/basic.h"
#include "mongo/db/pipeline/field_path.h"
+#include "mongo/dbtests/dbtests.h"
#include "mongo/unittest/unittest.h"
#include "mongo/util/assert_util.h"
-#include "mongo/dbtests/dbtests.h"
namespace mongo {
using std::string;
diff --git a/src/mongo/db/pipeline/lookup_set_cache.h b/src/mongo/db/pipeline/lookup_set_cache.h
index a40ac28155b..3150d7bc1af 100644
--- a/src/mongo/db/pipeline/lookup_set_cache.h
+++ b/src/mongo/db/pipeline/lookup_set_cache.h
@@ -29,15 +29,15 @@
#include "mongo/platform/basic.h"
-#include <unordered_map>
-#include <unordered_set>
-#include <iostream>
#include <boost/intrusive_ptr.hpp>
-#include <boost/multi_index_container.hpp>
#include <boost/multi_index/hashed_index.hpp>
#include <boost/multi_index/member.hpp>
#include <boost/multi_index/sequenced_index.hpp>
+#include <boost/multi_index_container.hpp>
#include <boost/optional.hpp>
+#include <iostream>
+#include <unordered_map>
+#include <unordered_set>
#include "mongo/bson/bsonobj.h"
#include "mongo/db/pipeline/value.h"
diff --git a/src/mongo/db/pipeline/lookup_set_cache_test.cpp b/src/mongo/db/pipeline/lookup_set_cache_test.cpp
index 4d5ec28ad56..2903a3632f5 100644
--- a/src/mongo/db/pipeline/lookup_set_cache_test.cpp
+++ b/src/mongo/db/pipeline/lookup_set_cache_test.cpp
@@ -28,9 +28,9 @@
#include "mongo/platform/basic.h"
+#include "mongo/bson/bsonobjbuilder.h"
#include "mongo/db/pipeline/lookup_set_cache.h"
#include "mongo/unittest/unittest.h"
-#include "mongo/bson/bsonobjbuilder.h"
namespace mongo {
diff --git a/src/mongo/db/pipeline/pipeline.cpp b/src/mongo/db/pipeline/pipeline.cpp
index 32285652e24..9f5d6e1fd67 100644
--- a/src/mongo/db/pipeline/pipeline.cpp
+++ b/src/mongo/db/pipeline/pipeline.cpp
@@ -499,7 +499,8 @@ void Pipeline::run(BSONObjBuilder& result) {
// object will be too large, assert. the extra 1KB is for headers
uassert(16389,
str::stream() << "aggregation result exceeds maximum document size ("
- << BSONObjMaxUserSize / (1024 * 1024) << "MB)",
+ << BSONObjMaxUserSize / (1024 * 1024)
+ << "MB)",
resultArray.len() < BSONObjMaxUserSize - 1024);
}
diff --git a/src/mongo/db/pipeline/pipeline_d.cpp b/src/mongo/db/pipeline/pipeline_d.cpp
index 6e4c8c817a3..f30e513a2f4 100644
--- a/src/mongo/db/pipeline/pipeline_d.cpp
+++ b/src/mongo/db/pipeline/pipeline_d.cpp
@@ -37,13 +37,13 @@
#include "mongo/db/catalog/database.h"
#include "mongo/db/catalog/document_validation.h"
#include "mongo/db/concurrency/write_conflict_exception.h"
+#include "mongo/db/db_raii.h"
+#include "mongo/db/dbdirectclient.h"
#include "mongo/db/exec/fetch.h"
#include "mongo/db/exec/index_iterator.h"
#include "mongo/db/exec/multi_iterator.h"
#include "mongo/db/exec/shard_filter.h"
#include "mongo/db/exec/working_set.h"
-#include "mongo/db/db_raii.h"
-#include "mongo/db/dbdirectclient.h"
#include "mongo/db/index/index_access_method.h"
#include "mongo/db/matcher/extensions_callback_real.h"
#include "mongo/db/pipeline/document_source.h"
@@ -51,11 +51,11 @@
#include "mongo/db/query/collation/collation_serializer.h"
#include "mongo/db/query/get_executor.h"
#include "mongo/db/query/query_planner.h"
+#include "mongo/db/s/sharded_connection_info.h"
+#include "mongo/db/s/sharding_state.h"
#include "mongo/db/service_context.h"
#include "mongo/db/storage/record_store.h"
#include "mongo/db/storage/sorted_data_interface.h"
-#include "mongo/db/s/sharded_connection_info.h"
-#include "mongo/db/s/sharding_state.h"
#include "mongo/s/chunk_version.h"
#include "mongo/stdx/memory.h"
#include "mongo/util/log.h"
@@ -84,9 +84,8 @@ public:
bool isSharded(const NamespaceString& ns) final {
const ChunkVersion unsharded(0, 0, OID());
- return !(ShardingState::get(_ctx->opCtx)
- ->getVersion(ns.ns())
- .isWriteCompatibleWith(unsharded));
+ return !(
+ ShardingState::get(_ctx->opCtx)->getVersion(ns.ns()).isWriteCompatibleWith(unsharded));
}
bool isCapped(const NamespaceString& ns) final {
diff --git a/src/mongo/db/pipeline/value.cpp b/src/mongo/db/pipeline/value.cpp
index 9d98f8d6f02..8a37c51067c 100644
--- a/src/mongo/db/pipeline/value.cpp
+++ b/src/mongo/db/pipeline/value.cpp
@@ -30,9 +30,9 @@
#include "mongo/db/pipeline/value.h"
+#include <boost/functional/hash.hpp>
#include <cmath>
#include <limits>
-#include <boost/functional/hash.hpp>
#include "mongo/base/compare_numbers.h"
#include "mongo/base/data_type_endian.h"
diff --git a/src/mongo/db/pipeline/value.h b/src/mongo/db/pipeline/value.h
index 19b951f852c..c6d4b90c0cd 100644
--- a/src/mongo/db/pipeline/value.h
+++ b/src/mongo/db/pipeline/value.h
@@ -303,8 +303,7 @@ inline void swap(mongo::Value& lhs, mongo::Value& rhs) {
class ImplicitValue : public Value {
public:
template <typename T>
- ImplicitValue(T arg)
- : Value(std::move(arg)) {}
+ ImplicitValue(T arg) : Value(std::move(arg)) {}
};
}
diff --git a/src/mongo/db/pipeline/value_internal.h b/src/mongo/db/pipeline/value_internal.h
index 556eebec060..fe34b97e0a7 100644
--- a/src/mongo/db/pipeline/value_internal.h
+++ b/src/mongo/db/pipeline/value_internal.h
@@ -32,13 +32,13 @@
#include <boost/config.hpp>
#include <boost/intrusive_ptr.hpp>
+#include "mongo/bson/bsonmisc.h"
#include "mongo/bson/bsonobj.h"
#include "mongo/bson/bsontypes.h"
-#include "mongo/bson/bsonmisc.h"
#include "mongo/bson/oid.h"
+#include "mongo/bson/timestamp.h"
#include "mongo/util/debug_util.h"
#include "mongo/util/intrusive_counter.h"
-#include "mongo/bson/timestamp.h"
namespace mongo {
diff --git a/src/mongo/db/query/canonical_query_test.cpp b/src/mongo/db/query/canonical_query_test.cpp
index 3dc4386f412..c5e54fedf31 100644
--- a/src/mongo/db/query/canonical_query_test.cpp
+++ b/src/mongo/db/query/canonical_query_test.cpp
@@ -112,12 +112,12 @@ TEST(CanonicalQueryTest, IsValidText) {
ASSERT_OK(isValid("{$text: {$search: 's'}}", *lpq));
// Valid: TEXT inside OR.
- ASSERT_OK(isValid(
- "{$or: ["
- " {$text: {$search: 's'}},"
- " {a: 1}"
- "]}",
- *lpq));
+ ASSERT_OK(
+ isValid("{$or: ["
+ " {$text: {$search: 's'}},"
+ " {a: 1}"
+ "]}",
+ *lpq));
// Valid: TEXT outside NOR.
ASSERT_OK(isValid("{$text: {$search: 's'}, $nor: [{a: 1}, {b: 1}]}", *lpq));
@@ -126,37 +126,37 @@ TEST(CanonicalQueryTest, IsValidText) {
ASSERT_NOT_OK(isValid("{$nor: [{$text: {$search: 's'}}, {a: 1}]}", *lpq));
// Invalid: TEXT inside NOR.
- ASSERT_NOT_OK(isValid(
- "{$nor: ["
- " {$or: ["
- " {$text: {$search: 's'}},"
- " {a: 1}"
- " ]},"
- " {a: 2}"
- "]}",
- *lpq));
+ ASSERT_NOT_OK(
+ isValid("{$nor: ["
+ " {$or: ["
+ " {$text: {$search: 's'}},"
+ " {a: 1}"
+ " ]},"
+ " {a: 2}"
+ "]}",
+ *lpq));
// Invalid: >1 TEXT.
- ASSERT_NOT_OK(isValid(
- "{$and: ["
- " {$text: {$search: 's'}},"
- " {$text: {$search: 't'}}"
- "]}",
- *lpq));
+ ASSERT_NOT_OK(
+ isValid("{$and: ["
+ " {$text: {$search: 's'}},"
+ " {$text: {$search: 't'}}"
+ "]}",
+ *lpq));
// Invalid: >1 TEXT.
- ASSERT_NOT_OK(isValid(
- "{$and: ["
- " {$or: ["
- " {$text: {$search: 's'}},"
- " {a: 1}"
- " ]},"
- " {$or: ["
- " {$text: {$search: 't'}},"
- " {b: 1}"
- " ]}"
- "]}",
- *lpq));
+ ASSERT_NOT_OK(
+ isValid("{$and: ["
+ " {$or: ["
+ " {$text: {$search: 's'}},"
+ " {a: 1}"
+ " ]},"
+ " {$or: ["
+ " {$text: {$search: 't'}},"
+ " {b: 1}"
+ " ]}"
+ "]}",
+ *lpq));
}
TEST(CanonicalQueryTest, IsValidTextTailable) {
@@ -178,61 +178,61 @@ TEST(CanonicalQueryTest, IsValidGeo) {
ASSERT_OK(isValid("{a: {$near: [0, 0]}}", *lpq));
// Valid: GEO_NEAR inside nested AND.
- ASSERT_OK(isValid(
- "{$and: ["
- " {$and: ["
- " {a: {$near: [0, 0]}},"
- " {b: 1}"
- " ]},"
- " {c: 1}"
- "]}",
- *lpq));
+ ASSERT_OK(
+ isValid("{$and: ["
+ " {$and: ["
+ " {a: {$near: [0, 0]}},"
+ " {b: 1}"
+ " ]},"
+ " {c: 1}"
+ "]}",
+ *lpq));
// Invalid: >1 GEO_NEAR.
- ASSERT_NOT_OK(isValid(
- "{$and: ["
- " {a: {$near: [0, 0]}},"
- " {b: {$near: [0, 0]}}"
- "]}",
- *lpq));
+ ASSERT_NOT_OK(
+ isValid("{$and: ["
+ " {a: {$near: [0, 0]}},"
+ " {b: {$near: [0, 0]}}"
+ "]}",
+ *lpq));
// Invalid: >1 GEO_NEAR.
- ASSERT_NOT_OK(isValid(
- "{$and: ["
- " {a: {$geoNear: [0, 0]}},"
- " {b: {$near: [0, 0]}}"
- "]}",
- *lpq));
+ ASSERT_NOT_OK(
+ isValid("{$and: ["
+ " {a: {$geoNear: [0, 0]}},"
+ " {b: {$near: [0, 0]}}"
+ "]}",
+ *lpq));
// Invalid: >1 GEO_NEAR.
- ASSERT_NOT_OK(isValid(
- "{$and: ["
- " {$and: ["
- " {a: {$near: [0, 0]}},"
- " {b: 1}"
- " ]},"
- " {$and: ["
- " {c: {$near: [0, 0]}},"
- " {d: 1}"
- " ]}"
- "]}",
- *lpq));
+ ASSERT_NOT_OK(
+ isValid("{$and: ["
+ " {$and: ["
+ " {a: {$near: [0, 0]}},"
+ " {b: 1}"
+ " ]},"
+ " {$and: ["
+ " {c: {$near: [0, 0]}},"
+ " {d: 1}"
+ " ]}"
+ "]}",
+ *lpq));
// Invalid: GEO_NEAR inside NOR.
- ASSERT_NOT_OK(isValid(
- "{$nor: ["
- " {a: {$near: [0, 0]}},"
- " {b: 1}"
- "]}",
- *lpq));
+ ASSERT_NOT_OK(
+ isValid("{$nor: ["
+ " {a: {$near: [0, 0]}},"
+ " {b: 1}"
+ "]}",
+ *lpq));
// Invalid: GEO_NEAR inside OR.
- ASSERT_NOT_OK(isValid(
- "{$or: ["
- " {a: {$near: [0, 0]}},"
- " {b: 1}"
- "]}",
- *lpq));
+ ASSERT_NOT_OK(
+ isValid("{$or: ["
+ " {a: {$near: [0, 0]}},"
+ " {b: 1}"
+ "]}",
+ *lpq));
}
TEST(CanonicalQueryTest, IsValidTextAndGeo) {
@@ -247,13 +247,13 @@ TEST(CanonicalQueryTest, IsValidTextAndGeo) {
ASSERT_NOT_OK(isValid("{$text: {$search: 's'}, a: {$geoNear: [0, 0]}}", *lpq));
// Invalid: TEXT and GEO_NEAR.
- ASSERT_NOT_OK(isValid(
- "{$or: ["
- " {$text: {$search: 's'}},"
- " {a: 1}"
- " ],"
- " b: {$near: [0, 0]}}",
- *lpq));
+ ASSERT_NOT_OK(
+ isValid("{$or: ["
+ " {$text: {$search: 's'}},"
+ " {a: 1}"
+ " ],"
+ " b: {$near: [0, 0]}}",
+ *lpq));
}
TEST(CanonicalQueryTest, IsValidTextAndNaturalAscending) {
diff --git a/src/mongo/db/query/collation/collation_serializer_test.cpp b/src/mongo/db/query/collation/collation_serializer_test.cpp
index 60e1e63d9f6..fd961b158a0 100644
--- a/src/mongo/db/query/collation/collation_serializer_test.cpp
+++ b/src/mongo/db/query/collation/collation_serializer_test.cpp
@@ -44,13 +44,22 @@ TEST(CollationSerializerTest, ToBSONCorrectlySerializesDefaults) {
BSONObj expectedObj = BSON("locale"
<< "myLocale"
- << "caseLevel" << false << "caseFirst"
+ << "caseLevel"
+ << false
+ << "caseFirst"
<< "off"
- << "strength" << 3 << "numericOrdering" << false << "alternate"
+ << "strength"
+ << 3
+ << "numericOrdering"
+ << false
+ << "alternate"
<< "non-ignorable"
<< "maxVariable"
<< "punct"
- << "normalization" << false << "backwards" << false);
+ << "normalization"
+ << false
+ << "backwards"
+ << false);
ASSERT_EQ(expectedObj, CollationSerializer::specToBSON(collationSpec));
}
@@ -62,13 +71,22 @@ TEST(CollationSerializerTest, ToBSONCorrectlySerializesCaseFirstUpper) {
BSONObj expectedObj = BSON("locale"
<< "myLocale"
- << "caseLevel" << false << "caseFirst"
+ << "caseLevel"
+ << false
+ << "caseFirst"
<< "upper"
- << "strength" << 3 << "numericOrdering" << false << "alternate"
+ << "strength"
+ << 3
+ << "numericOrdering"
+ << false
+ << "alternate"
<< "non-ignorable"
<< "maxVariable"
<< "punct"
- << "normalization" << false << "backwards" << false);
+ << "normalization"
+ << false
+ << "backwards"
+ << false);
ASSERT_EQ(expectedObj, CollationSerializer::specToBSON(collationSpec));
}
@@ -80,13 +98,22 @@ TEST(CollationSerializerTest, ToBSONCorrectlySerializesCaseFirstLower) {
BSONObj expectedObj = BSON("locale"
<< "myLocale"
- << "caseLevel" << false << "caseFirst"
+ << "caseLevel"
+ << false
+ << "caseFirst"
<< "lower"
- << "strength" << 3 << "numericOrdering" << false << "alternate"
+ << "strength"
+ << 3
+ << "numericOrdering"
+ << false
+ << "alternate"
<< "non-ignorable"
<< "maxVariable"
<< "punct"
- << "normalization" << false << "backwards" << false);
+ << "normalization"
+ << false
+ << "backwards"
+ << false);
ASSERT_EQ(expectedObj, CollationSerializer::specToBSON(collationSpec));
}
@@ -98,13 +125,22 @@ TEST(CollationSerializerTest, ToBSONCorrectlySerializesPrimaryStrength) {
BSONObj expectedObj = BSON("locale"
<< "myLocale"
- << "caseLevel" << false << "caseFirst"
+ << "caseLevel"
+ << false
+ << "caseFirst"
<< "off"
- << "strength" << 1 << "numericOrdering" << false << "alternate"
+ << "strength"
+ << 1
+ << "numericOrdering"
+ << false
+ << "alternate"
<< "non-ignorable"
<< "maxVariable"
<< "punct"
- << "normalization" << false << "backwards" << false);
+ << "normalization"
+ << false
+ << "backwards"
+ << false);
ASSERT_EQ(expectedObj, CollationSerializer::specToBSON(collationSpec));
}
@@ -116,13 +152,22 @@ TEST(CollationSerializerTest, ToBSONCorrectlySerializesSecondaryStrength) {
BSONObj expectedObj = BSON("locale"
<< "myLocale"
- << "caseLevel" << false << "caseFirst"
+ << "caseLevel"
+ << false
+ << "caseFirst"
<< "off"
- << "strength" << 2 << "numericOrdering" << false << "alternate"
+ << "strength"
+ << 2
+ << "numericOrdering"
+ << false
+ << "alternate"
<< "non-ignorable"
<< "maxVariable"
<< "punct"
- << "normalization" << false << "backwards" << false);
+ << "normalization"
+ << false
+ << "backwards"
+ << false);
ASSERT_EQ(expectedObj, CollationSerializer::specToBSON(collationSpec));
}
@@ -134,13 +179,22 @@ TEST(CollationSerializerTest, ToBSONCorrectlySerializesQuaternaryStrength) {
BSONObj expectedObj = BSON("locale"
<< "myLocale"
- << "caseLevel" << false << "caseFirst"
+ << "caseLevel"
+ << false
+ << "caseFirst"
<< "off"
- << "strength" << 4 << "numericOrdering" << false << "alternate"
+ << "strength"
+ << 4
+ << "numericOrdering"
+ << false
+ << "alternate"
<< "non-ignorable"
<< "maxVariable"
<< "punct"
- << "normalization" << false << "backwards" << false);
+ << "normalization"
+ << false
+ << "backwards"
+ << false);
ASSERT_EQ(expectedObj, CollationSerializer::specToBSON(collationSpec));
}
@@ -152,13 +206,22 @@ TEST(CollationSerializerTest, ToBSONCorrectlySerializesIdenticalStrength) {
BSONObj expectedObj = BSON("locale"
<< "myLocale"
- << "caseLevel" << false << "caseFirst"
+ << "caseLevel"
+ << false
+ << "caseFirst"
<< "off"
- << "strength" << 5 << "numericOrdering" << false << "alternate"
+ << "strength"
+ << 5
+ << "numericOrdering"
+ << false
+ << "alternate"
<< "non-ignorable"
<< "maxVariable"
<< "punct"
- << "normalization" << false << "backwards" << false);
+ << "normalization"
+ << false
+ << "backwards"
+ << false);
ASSERT_EQ(expectedObj, CollationSerializer::specToBSON(collationSpec));
}
@@ -170,13 +233,22 @@ TEST(CollationSerializerTest, ToBSONCorrectlySerializesAlternateShifted) {
BSONObj expectedObj = BSON("locale"
<< "myLocale"
- << "caseLevel" << false << "caseFirst"
+ << "caseLevel"
+ << false
+ << "caseFirst"
<< "off"
- << "strength" << 3 << "numericOrdering" << false << "alternate"
+ << "strength"
+ << 3
+ << "numericOrdering"
+ << false
+ << "alternate"
<< "shifted"
<< "maxVariable"
<< "punct"
- << "normalization" << false << "backwards" << false);
+ << "normalization"
+ << false
+ << "backwards"
+ << false);
ASSERT_EQ(expectedObj, CollationSerializer::specToBSON(collationSpec));
}
@@ -188,13 +260,22 @@ TEST(CollationSerializerTest, ToBSONCorrectlySerializesMaxVariableSpace) {
BSONObj expectedObj = BSON("locale"
<< "myLocale"
- << "caseLevel" << false << "caseFirst"
+ << "caseLevel"
+ << false
+ << "caseFirst"
<< "off"
- << "strength" << 3 << "numericOrdering" << false << "alternate"
+ << "strength"
+ << 3
+ << "numericOrdering"
+ << false
+ << "alternate"
<< "non-ignorable"
<< "maxVariable"
<< "space"
- << "normalization" << false << "backwards" << false);
+ << "normalization"
+ << false
+ << "backwards"
+ << false);
ASSERT_EQ(expectedObj, CollationSerializer::specToBSON(collationSpec));
}
diff --git a/src/mongo/db/query/collation/collator_factory_icu.cpp b/src/mongo/db/query/collation/collator_factory_icu.cpp
index f72731d6e9b..5a4470d4bf5 100644
--- a/src/mongo/db/query/collation/collator_factory_icu.cpp
+++ b/src/mongo/db/query/collation/collator_factory_icu.cpp
@@ -183,9 +183,13 @@ StatusWith<CollationSpec::CaseFirstType> stringToCaseFirstType(const std::string
} else {
return {ErrorCodes::FailedToParse,
str::stream() << "Field '" << CollationSpec::kCaseFirstField << "' must be '"
- << CollationSpec::kCaseFirstUpper << "', '"
- << CollationSpec::kCaseFirstLower << "', or '"
- << CollationSpec::kCaseFirstOff << "'. Got: " << caseFirst};
+ << CollationSpec::kCaseFirstUpper
+ << "', '"
+ << CollationSpec::kCaseFirstLower
+ << "', or '"
+ << CollationSpec::kCaseFirstOff
+ << "'. Got: "
+ << caseFirst};
}
}
@@ -204,7 +208,8 @@ StatusWith<CollationSpec::StrengthType> integerToStrengthType(long long strength
}
return {ErrorCodes::FailedToParse,
str::stream() << "Field '" << CollationSpec::kStrengthField
- << "' must be an integer 1 through 5. Got: " << strength};
+ << "' must be an integer 1 through 5. Got: "
+ << strength};
}
StatusWith<CollationSpec::AlternateType> stringToAlternateType(const std::string& alternate) {
@@ -215,8 +220,11 @@ StatusWith<CollationSpec::AlternateType> stringToAlternateType(const std::string
} else {
return {ErrorCodes::FailedToParse,
str::stream() << "Field '" << CollationSpec::kAlternateField << "' must be '"
- << CollationSpec::kAlternateNonIgnorable << "' or '"
- << CollationSpec::kAlternateShifted << "'. Got: " << alternate};
+ << CollationSpec::kAlternateNonIgnorable
+ << "' or '"
+ << CollationSpec::kAlternateShifted
+ << "'. Got: "
+ << alternate};
}
}
@@ -228,8 +236,11 @@ StatusWith<CollationSpec::MaxVariableType> stringToMaxVariableType(const std::st
} else {
return {ErrorCodes::FailedToParse,
str::stream() << "Field '" << CollationSpec::kMaxVariableField << "' must be '"
- << CollationSpec::kMaxVariablePunct << "' or '"
- << CollationSpec::kMaxVariableSpace << "'. Got: " << maxVariable};
+ << CollationSpec::kMaxVariablePunct
+ << "' or '"
+ << CollationSpec::kMaxVariableSpace
+ << "'. Got: "
+ << maxVariable};
}
}
@@ -259,8 +270,10 @@ StatusWith<CollationSpec> parseToCollationSpec(const BSONObj& spec,
icuError.set(status);
return {ErrorCodes::OperationFailed,
str::stream() << "Failed to get '" << CollationSpec::kCaseLevelField
- << "' attribute from icu::Collator: " << icuError.errorName()
- << ". Collation spec: " << spec};
+ << "' attribute from icu::Collator: "
+ << icuError.errorName()
+ << ". Collation spec: "
+ << spec};
}
parsedSpec.caseLevel = attributeToBool(caseLevelAttribute);
} else if (!parseStatus.isOK()) {
@@ -274,8 +287,10 @@ StatusWith<CollationSpec> parseToCollationSpec(const BSONObj& spec,
icuError.set(status);
return {ErrorCodes::OperationFailed,
str::stream() << "Failed to set '" << CollationSpec::kCaseLevelField
- << "' attribute: " << icuError.errorName()
- << ". Collation spec: " << spec};
+ << "' attribute: "
+ << icuError.errorName()
+ << ". Collation spec: "
+ << spec};
}
}
@@ -290,8 +305,10 @@ StatusWith<CollationSpec> parseToCollationSpec(const BSONObj& spec,
icuError.set(status);
return {ErrorCodes::OperationFailed,
str::stream() << "Failed to get '" << CollationSpec::kCaseFirstField
- << "' attribute from icu::Collator: " << icuError.errorName()
- << ". Collation spec: " << spec};
+ << "' attribute from icu::Collator: "
+ << icuError.errorName()
+ << ". Collation spec: "
+ << spec};
}
parsedSpec.caseFirst = getCaseFirstFromAttribute(caseFirstAttribute);
} else if (!parseStatus.isOK()) {
@@ -313,8 +330,10 @@ StatusWith<CollationSpec> parseToCollationSpec(const BSONObj& spec,
icuError.set(status);
return {ErrorCodes::OperationFailed,
str::stream() << "Failed to set '" << CollationSpec::kCaseFirstField
- << "' attribute: " << icuError.errorName()
- << ". Collation spec: " << spec};
+ << "' attribute: "
+ << icuError.errorName()
+ << ". Collation spec: "
+ << spec};
}
}
@@ -329,8 +348,10 @@ StatusWith<CollationSpec> parseToCollationSpec(const BSONObj& spec,
icuError.set(status);
return {ErrorCodes::OperationFailed,
str::stream() << "Failed to get '" << CollationSpec::kStrengthField
- << "' attribute from icu::Collator: " << icuError.errorName()
- << ". Collation spec: " << spec};
+ << "' attribute from icu::Collator: "
+ << icuError.errorName()
+ << ". Collation spec: "
+ << spec};
}
parsedSpec.strength = getStrengthFromAttribute(strengthAttribute);
} else if (!parseStatus.isOK()) {
@@ -351,8 +372,10 @@ StatusWith<CollationSpec> parseToCollationSpec(const BSONObj& spec,
icuError.set(status);
return {ErrorCodes::OperationFailed,
str::stream() << "Failed to set '" << CollationSpec::kStrengthField
- << "' attribute: " << icuError.errorName()
- << ". Collation spec: " << spec};
+ << "' attribute: "
+ << icuError.errorName()
+ << ". Collation spec: "
+ << spec};
}
}
@@ -368,8 +391,10 @@ StatusWith<CollationSpec> parseToCollationSpec(const BSONObj& spec,
icuError.set(status);
return {ErrorCodes::OperationFailed,
str::stream() << "Failed to get '" << CollationSpec::kNumericOrderingField
- << "' attribute from icu::Collator: " << icuError.errorName()
- << ". Collation spec: " << spec};
+ << "' attribute from icu::Collator: "
+ << icuError.errorName()
+ << ". Collation spec: "
+ << spec};
}
parsedSpec.numericOrdering = attributeToBool(numericOrderingAttribute);
} else if (!parseStatus.isOK()) {
@@ -384,8 +409,10 @@ StatusWith<CollationSpec> parseToCollationSpec(const BSONObj& spec,
icuError.set(status);
return {ErrorCodes::OperationFailed,
str::stream() << "Failed to set '" << CollationSpec::kNumericOrderingField
- << "' attribute: " << icuError.errorName()
- << ". Collation spec: " << spec};
+ << "' attribute: "
+ << icuError.errorName()
+ << ". Collation spec: "
+ << spec};
}
}
@@ -401,8 +428,10 @@ StatusWith<CollationSpec> parseToCollationSpec(const BSONObj& spec,
icuError.set(status);
return {ErrorCodes::OperationFailed,
str::stream() << "Failed to get '" << CollationSpec::kAlternateField
- << "' attribute from icu::Collator: " << icuError.errorName()
- << ". Collation spec: " << spec};
+ << "' attribute from icu::Collator: "
+ << icuError.errorName()
+ << ". Collation spec: "
+ << spec};
}
parsedSpec.alternate = getAlternateFromAttribute(alternateAttribute);
} else if (!parseStatus.isOK()) {
@@ -424,8 +453,10 @@ StatusWith<CollationSpec> parseToCollationSpec(const BSONObj& spec,
icuError.set(status);
return {ErrorCodes::OperationFailed,
str::stream() << "Failed to set '" << CollationSpec::kAlternateField
- << "' attribute: " << icuError.errorName()
- << ". Collation spec: " << spec};
+ << "' attribute: "
+ << icuError.errorName()
+ << ". Collation spec: "
+ << spec};
}
}
@@ -452,8 +483,10 @@ StatusWith<CollationSpec> parseToCollationSpec(const BSONObj& spec,
icuError.set(status);
return {ErrorCodes::OperationFailed,
str::stream() << "Failed to set '" << CollationSpec::kMaxVariableField
- << "' attribute: " << icuError.errorName()
- << ". Collation spec: " << spec};
+ << "' attribute: "
+ << icuError.errorName()
+ << ". Collation spec: "
+ << spec};
}
}
@@ -469,8 +502,10 @@ StatusWith<CollationSpec> parseToCollationSpec(const BSONObj& spec,
icuError.set(status);
return {ErrorCodes::OperationFailed,
str::stream() << "Failed to get '" << CollationSpec::kNormalizationField
- << "' attribute from icu::Collator: " << icuError.errorName()
- << ". Collation spec: " << spec};
+ << "' attribute from icu::Collator: "
+ << icuError.errorName()
+ << ". Collation spec: "
+ << spec};
}
parsedSpec.normalization = attributeToBool(normalizationAttribute);
} else if (!parseStatus.isOK()) {
@@ -485,8 +520,10 @@ StatusWith<CollationSpec> parseToCollationSpec(const BSONObj& spec,
icuError.set(status);
return {ErrorCodes::OperationFailed,
str::stream() << "Failed to set '" << CollationSpec::kNormalizationField
- << "' attribute: " << icuError.errorName()
- << ". Collation spec: " << spec};
+ << "' attribute: "
+ << icuError.errorName()
+ << ". Collation spec: "
+ << spec};
}
}
@@ -502,8 +539,10 @@ StatusWith<CollationSpec> parseToCollationSpec(const BSONObj& spec,
icuError.set(status);
return {ErrorCodes::OperationFailed,
str::stream() << "Failed to get '" << CollationSpec::kBackwardsField
- << "' attribute from icu::Collator: " << icuError.errorName()
- << ". Collation spec: " << spec};
+ << "' attribute from icu::Collator: "
+ << icuError.errorName()
+ << ". Collation spec: "
+ << spec};
}
parsedSpec.backwards = attributeToBool(backwardsAttribute);
} else if (!parseStatus.isOK()) {
@@ -518,8 +557,10 @@ StatusWith<CollationSpec> parseToCollationSpec(const BSONObj& spec,
icuError.set(status);
return {ErrorCodes::OperationFailed,
str::stream() << "Failed to set '" << CollationSpec::kBackwardsField
- << "' attribute: " << icuError.errorName()
- << ". Collation spec: " << spec};
+ << "' attribute: "
+ << icuError.errorName()
+ << ". Collation spec: "
+ << spec};
}
}
@@ -543,7 +584,8 @@ StatusWith<std::string> parseLocaleID(const BSONObj& spec) {
if (localeID.find('\0') != std::string::npos) {
return {ErrorCodes::BadValue,
str::stream() << "Field '" << CollationSpec::kLocaleField
- << "' cannot contain null byte. Collation spec: " << spec};
+ << "' cannot contain null byte. Collation spec: "
+ << spec};
}
return localeID;
}
@@ -559,13 +601,15 @@ Status validateLocaleID(const BSONObj& spec,
icuError.set(status);
return {ErrorCodes::OperationFailed,
str::stream() << "Failed to get locale from icu::Collator: " << icuError.errorName()
- << ". Collation spec: " << spec};
+ << ". Collation spec: "
+ << spec};
}
if (originalID.empty()) {
return {ErrorCodes::BadValue,
str::stream() << "Field '" << CollationSpec::kLocaleField
- << "' cannot be the empty string in: " << spec};
+ << "' cannot be the empty string in: "
+ << spec};
}
// Check that each component of the locale ID is recognized by ICU. If ICU 1) cannot parse the
@@ -607,7 +651,8 @@ StatusWith<std::unique_ptr<CollatorInterface>> CollatorFactoryICU::makeFromBSON(
return {ErrorCodes::FailedToParse,
str::stream() << "If " << CollationSpec::kLocaleField << "="
<< CollationSpec::kSimpleBinaryComparison
- << ", no other fields should be present in: " << spec};
+ << ", no other fields should be present in: "
+ << spec};
}
return {nullptr};
}
@@ -616,8 +661,8 @@ StatusWith<std::unique_ptr<CollatorInterface>> CollatorFactoryICU::makeFromBSON(
auto userLocale = icu::Locale::createFromName(parsedLocaleID.getValue().c_str());
if (userLocale.isBogus()) {
return {ErrorCodes::BadValue,
- str::stream() << "Field '" << CollationSpec::kLocaleField
- << "' is not valid in: " << spec};
+ str::stream() << "Field '" << CollationSpec::kLocaleField << "' is not valid in: "
+ << spec};
}
// Construct an icu::Collator.
@@ -628,7 +673,8 @@ StatusWith<std::unique_ptr<CollatorInterface>> CollatorFactoryICU::makeFromBSON(
icuError.set(status);
return {ErrorCodes::OperationFailed,
str::stream() << "Failed to create collator: " << icuError.errorName()
- << ". Collation spec: " << spec};
+ << ". Collation spec: "
+ << spec};
}
Status localeValidationStatus = validateLocaleID(spec, parsedLocaleID.getValue(), *icuCollator);
diff --git a/src/mongo/db/query/collation/collator_factory_icu_decoration.cpp b/src/mongo/db/query/collation/collator_factory_icu_decoration.cpp
index 62d1432b899..46ddbf54d8b 100644
--- a/src/mongo/db/query/collation/collator_factory_icu_decoration.cpp
+++ b/src/mongo/db/query/collation/collator_factory_icu_decoration.cpp
@@ -37,9 +37,8 @@ namespace mongo {
namespace {
-MONGO_INITIALIZER_WITH_PREREQUISITES(CreateCollatorFactory,
- ("SetGlobalEnvironment",
- "LoadICUData"))(InitializerContext* context) {
+MONGO_INITIALIZER_WITH_PREREQUISITES(CreateCollatorFactory, ("SetGlobalEnvironment", "LoadICUData"))
+(InitializerContext* context) {
CollatorFactoryInterface::set(getGlobalServiceContext(),
stdx::make_unique<CollatorFactoryICU>());
return Status::OK();
diff --git a/src/mongo/db/query/collation/collator_factory_icu_test.cpp b/src/mongo/db/query/collation/collator_factory_icu_test.cpp
index db829fb359a..aa77665040f 100644
--- a/src/mongo/db/query/collation/collator_factory_icu_test.cpp
+++ b/src/mongo/db/query/collation/collator_factory_icu_test.cpp
@@ -59,7 +59,8 @@ TEST(CollatorFactoryICUTest, SimpleLocaleWithOtherFieldsFailsToParse) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "simple"
- << "caseLevel" << true));
+ << "caseLevel"
+ << true));
ASSERT_NOT_OK(collator.getStatus());
ASSERT_EQ(collator.getStatus(), ErrorCodes::FailedToParse);
}
@@ -354,8 +355,10 @@ TEST(CollatorFactoryICUTest, TaiwanLocaleWithCollationStrokeDisallowed) {
TEST(CollatorFactoryICUTest, LocaleWithValidLanguageCountryAndVariantAllowed) {
CollatorFactoryICU factory;
- ASSERT_OK(factory.makeFromBSON(BSON("locale"
- << "en_US_POSIX")).getStatus());
+ ASSERT_OK(factory
+ .makeFromBSON(BSON("locale"
+ << "en_US_POSIX"))
+ .getStatus());
}
TEST(CollatorFactoryICUTest, USLocaleWithCollationPhonebookDisallowed) {
@@ -368,14 +371,18 @@ TEST(CollatorFactoryICUTest, USLocaleWithCollationPhonebookDisallowed) {
TEST(CollatorFactoryICUTest, GermanLocaleWithCollationPhonebookAllowed) {
CollatorFactoryICU factory;
- ASSERT_OK(factory.makeFromBSON(BSON("locale"
- << "de@collation=phonebook")).getStatus());
+ ASSERT_OK(factory
+ .makeFromBSON(BSON("locale"
+ << "de@collation=phonebook"))
+ .getStatus());
}
TEST(CollatorFactoryICUTest, ChineseTraditionalLocaleWithCollationPinyinAllowed) {
CollatorFactoryICU factory;
- ASSERT_OK(factory.makeFromBSON(BSON("locale"
- << "zh_Hant@collation=pinyin")).getStatus());
+ ASSERT_OK(factory
+ .makeFromBSON(BSON("locale"
+ << "zh_Hant@collation=pinyin"))
+ .getStatus());
}
TEST(CollatorFactoryICUTest, LocaleStringCannotContainNullByte) {
@@ -435,7 +442,8 @@ TEST(CollatorFactoryICUTest, CaseLevelFalseParsesSuccessfully) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "caseLevel" << false));
+ << "caseLevel"
+ << false));
ASSERT_OK(collator.getStatus());
ASSERT_FALSE(collator.getValue()->getSpec().caseLevel);
}
@@ -444,7 +452,8 @@ TEST(CollatorFactoryICUTest, CaseLevelTrueParsesSuccessfully) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "caseLevel" << true));
+ << "caseLevel"
+ << true));
ASSERT_OK(collator.getStatus());
ASSERT_TRUE(collator.getValue()->getSpec().caseLevel);
}
@@ -486,7 +495,8 @@ TEST(CollatorFactoryICUTest, PrimaryStrengthParsesSuccessfully) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "strength" << 1));
+ << "strength"
+ << 1));
ASSERT_OK(collator.getStatus());
ASSERT_EQ(static_cast<int>(CollationSpec::StrengthType::kPrimary),
static_cast<int>(collator.getValue()->getSpec().strength));
@@ -496,7 +506,8 @@ TEST(CollatorFactoryICUTest, SecondaryStrengthParsesSuccessfully) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "strength" << 2));
+ << "strength"
+ << 2));
ASSERT_OK(collator.getStatus());
ASSERT_EQ(static_cast<int>(CollationSpec::StrengthType::kSecondary),
static_cast<int>(collator.getValue()->getSpec().strength));
@@ -506,7 +517,8 @@ TEST(CollatorFactoryICUTest, TertiaryStrengthParsesSuccessfully) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "strength" << 3));
+ << "strength"
+ << 3));
ASSERT_OK(collator.getStatus());
ASSERT_EQ(static_cast<int>(CollationSpec::StrengthType::kTertiary),
static_cast<int>(collator.getValue()->getSpec().strength));
@@ -516,7 +528,8 @@ TEST(CollatorFactoryICUTest, QuaternaryStrengthParsesSuccessfully) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "strength" << 4));
+ << "strength"
+ << 4));
ASSERT_OK(collator.getStatus());
ASSERT_EQ(static_cast<int>(CollationSpec::StrengthType::kQuaternary),
static_cast<int>(collator.getValue()->getSpec().strength));
@@ -526,7 +539,8 @@ TEST(CollatorFactoryICUTest, IdenticalStrengthParsesSuccessfully) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "strength" << 5));
+ << "strength"
+ << 5));
ASSERT_OK(collator.getStatus());
ASSERT_EQ(static_cast<int>(CollationSpec::StrengthType::kIdentical),
static_cast<int>(collator.getValue()->getSpec().strength));
@@ -536,7 +550,8 @@ TEST(CollatorFactoryICUTest, NumericOrderingFalseParsesSuccessfully) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "numericOrdering" << false));
+ << "numericOrdering"
+ << false));
ASSERT_OK(collator.getStatus());
ASSERT_FALSE(collator.getValue()->getSpec().numericOrdering);
}
@@ -545,7 +560,8 @@ TEST(CollatorFactoryICUTest, NumericOrderingTrueParsesSuccessfully) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "numericOrdering" << true));
+ << "numericOrdering"
+ << true));
ASSERT_OK(collator.getStatus());
ASSERT_TRUE(collator.getValue()->getSpec().numericOrdering);
}
@@ -598,7 +614,8 @@ TEST(CollatorFactoryICUTest, NormalizationFalseParsesSuccessfully) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "normalization" << false));
+ << "normalization"
+ << false));
ASSERT_OK(collator.getStatus());
ASSERT_FALSE(collator.getValue()->getSpec().normalization);
}
@@ -607,7 +624,8 @@ TEST(CollatorFactoryICUTest, NormalizationTrueParsesSuccessfully) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "normalization" << true));
+ << "normalization"
+ << true));
ASSERT_OK(collator.getStatus());
ASSERT_TRUE(collator.getValue()->getSpec().normalization);
}
@@ -616,7 +634,8 @@ TEST(CollatorFactoryICUTest, BackwardsFalseParsesSuccessfully) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "backwards" << false));
+ << "backwards"
+ << false));
ASSERT_OK(collator.getStatus());
ASSERT_FALSE(collator.getValue()->getSpec().backwards);
}
@@ -625,7 +644,8 @@ TEST(CollatorFactoryICUTest, BackwardsTrueParsesSuccessfully) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "backwards" << true));
+ << "backwards"
+ << true));
ASSERT_OK(collator.getStatus());
ASSERT_TRUE(collator.getValue()->getSpec().backwards);
}
@@ -634,7 +654,8 @@ TEST(CollatorFactoryICUTest, LongStrengthFieldParsesSuccessfully) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "strength" << 1LL));
+ << "strength"
+ << 1LL));
ASSERT_OK(collator.getStatus());
ASSERT_EQ(static_cast<int>(CollationSpec::StrengthType::kPrimary),
static_cast<int>(collator.getValue()->getSpec().strength));
@@ -644,7 +665,8 @@ TEST(CollatorFactoryICUTest, DoubleStrengthFieldParsesSuccessfully) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "strength" << 1.0));
+ << "strength"
+ << 1.0));
ASSERT_OK(collator.getStatus());
ASSERT_EQ(static_cast<int>(CollationSpec::StrengthType::kPrimary),
static_cast<int>(collator.getValue()->getSpec().strength));
@@ -664,7 +686,8 @@ TEST(CollatorFactoryICUTest, NonStringCaseFirstFieldFailsToParse) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "caseFirst" << 1));
+ << "caseFirst"
+ << 1));
ASSERT_NOT_OK(collator.getStatus());
ASSERT_EQ(collator.getStatus(), ErrorCodes::TypeMismatch);
}
@@ -693,7 +716,8 @@ TEST(CollatorFactoryICUTest, TooLargeStrengthFieldFailsToParse) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "strength" << 2147483648LL));
+ << "strength"
+ << 2147483648LL));
ASSERT_NOT_OK(collator.getStatus());
ASSERT_EQ(collator.getStatus(), ErrorCodes::FailedToParse);
}
@@ -702,7 +726,8 @@ TEST(CollatorFactoryICUTest, FractionalStrengthFieldFailsToParse) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "strength" << 0.5));
+ << "strength"
+ << 0.5));
ASSERT_NOT_OK(collator.getStatus());
ASSERT_EQ(collator.getStatus(), ErrorCodes::BadValue);
}
@@ -711,7 +736,8 @@ TEST(CollatorFactoryICUTest, NegativeStrengthFieldFailsToParse) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "strength" << -1));
+ << "strength"
+ << -1));
ASSERT_NOT_OK(collator.getStatus());
ASSERT_EQ(collator.getStatus(), ErrorCodes::FailedToParse);
}
@@ -720,7 +746,8 @@ TEST(CollatorFactoryICUTest, InvalidIntegerStrengthFieldFailsToParse) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "strength" << 6));
+ << "strength"
+ << 6));
ASSERT_NOT_OK(collator.getStatus());
ASSERT_EQ(collator.getStatus(), ErrorCodes::FailedToParse);
}
@@ -739,7 +766,8 @@ TEST(CollatorFactoryICUTest, NonStringAlternateFieldFailsToParse) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "alternate" << 1));
+ << "alternate"
+ << 1));
ASSERT_NOT_OK(collator.getStatus());
ASSERT_EQ(collator.getStatus(), ErrorCodes::TypeMismatch);
}
@@ -758,7 +786,8 @@ TEST(CollatorFactoryICUTest, NonStringMaxVariableFieldFailsToParse) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "maxVariable" << 1));
+ << "maxVariable"
+ << 1));
ASSERT_NOT_OK(collator.getStatus());
ASSERT_EQ(collator.getStatus(), ErrorCodes::TypeMismatch);
}
@@ -810,7 +839,8 @@ TEST(CollatorFactoryICUTest, PrimaryStrengthCollatorIgnoresCaseAndAccents) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "strength" << 1));
+ << "strength"
+ << 1));
ASSERT_OK(collator.getStatus());
// u8"\u00E1" is latin small letter a with acute.
@@ -822,7 +852,8 @@ TEST(CollatorFactoryICUTest, SecondaryStrengthCollatorsIgnoresCaseButNotAccents)
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "strength" << 2));
+ << "strength"
+ << 2));
ASSERT_OK(collator.getStatus());
// u8"\u00E1" is latin small letter a with acute.
@@ -834,7 +865,8 @@ TEST(CollatorFactoryICUTest, TertiaryStrengthCollatorConsidersCaseAndAccents) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "strength" << 3));
+ << "strength"
+ << 3));
ASSERT_OK(collator.getStatus());
// u8"\u00E1" is latin small letter a with acute.
@@ -846,7 +878,10 @@ TEST(CollatorFactoryICUTest, PrimaryStrengthCaseLevelTrue) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "strength" << 1 << "caseLevel" << true));
+ << "strength"
+ << 1
+ << "caseLevel"
+ << true));
ASSERT_OK(collator.getStatus());
// u8"\u00E1" is latin small letter a with acute.
@@ -856,11 +891,14 @@ TEST(CollatorFactoryICUTest, PrimaryStrengthCaseLevelTrue) {
TEST(CollatorFactoryICUTest, PrimaryStrengthCaseLevelTrueCaseFirstUpper) {
CollatorFactoryICU factory;
- auto collator =
- factory.makeFromBSON(BSON("locale"
- << "en_US"
- << "strength" << 1 << "caseLevel" << true << "caseFirst"
- << "upper"));
+ auto collator = factory.makeFromBSON(BSON("locale"
+ << "en_US"
+ << "strength"
+ << 1
+ << "caseLevel"
+ << true
+ << "caseFirst"
+ << "upper"));
ASSERT_OK(collator.getStatus());
// u8"\u00E1" is latin small letter a with acute.
@@ -870,11 +908,14 @@ TEST(CollatorFactoryICUTest, PrimaryStrengthCaseLevelTrueCaseFirstUpper) {
TEST(CollatorFactoryICUTest, TertiaryStrengthCaseLevelTrueCaseFirstUpper) {
CollatorFactoryICU factory;
- auto collator =
- factory.makeFromBSON(BSON("locale"
- << "en_US"
- << "strength" << 3 << "caseLevel" << true << "caseFirst"
- << "upper"));
+ auto collator = factory.makeFromBSON(BSON("locale"
+ << "en_US"
+ << "strength"
+ << 3
+ << "caseLevel"
+ << true
+ << "caseFirst"
+ << "upper"));
ASSERT_OK(collator.getStatus());
ASSERT_LT(collator.getValue()->compare("A", "a"), 0);
}
@@ -891,7 +932,8 @@ TEST(CollatorFactoryICUTest, NumericOrderingTrue) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "numericOrdering" << true));
+ << "numericOrdering"
+ << true));
ASSERT_OK(collator.getStatus());
ASSERT_LT(collator.getValue()->compare("2", "10"), 0);
}
@@ -900,7 +942,9 @@ TEST(CollatorFactoryICUTest, PrimaryStrengthAlternateShifted) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "strength" << 1 << "alternate"
+ << "strength"
+ << 1
+ << "alternate"
<< "shifted"));
ASSERT_OK(collator.getStatus());
ASSERT_EQ(collator.getValue()->compare("a b", "ab"), 0);
@@ -911,7 +955,9 @@ TEST(CollatorFactoryICUTest, QuaternaryStrengthAlternateShifted) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "strength" << 4 << "alternate"
+ << "strength"
+ << 4
+ << "alternate"
<< "shifted"));
ASSERT_OK(collator.getStatus());
ASSERT_LT(collator.getValue()->compare("a b", "ab"), 0);
@@ -922,7 +968,9 @@ TEST(CollatorFactoryICUTest, PrimaryStrengthAlternateShiftedMaxVariableSpace) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "strength" << 1 << "alternate"
+ << "strength"
+ << 1
+ << "alternate"
<< "shifted"
<< "maxVariable"
<< "space"));
@@ -935,7 +983,8 @@ TEST(CollatorFactoryICUTest, SecondaryStrengthBackwardsFalse) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "strength" << 2));
+ << "strength"
+ << 2));
ASSERT_OK(collator.getStatus());
// u8"\u00E1" is latin small letter a with acute.
@@ -946,7 +995,10 @@ TEST(CollatorFactoryICUTest, SecondaryStrengthBackwardsTrue) {
CollatorFactoryICU factory;
auto collator = factory.makeFromBSON(BSON("locale"
<< "en_US"
- << "strength" << 2 << "backwards" << true));
+ << "strength"
+ << 2
+ << "backwards"
+ << true));
ASSERT_OK(collator.getStatus());
// u8"\u00E1" is latin small letter a with acute.
@@ -975,320 +1027,426 @@ TEST(CollatorFactoryICUTest, FactoryMadeCollatorComparisonKeysCorrectEnUS) {
TEST(CollatorFactoryICUTest, FactoryInitializationSucceedsWithArabicLocale) {
CollatorFactoryICU factory;
- ASSERT_OK(factory.makeFromBSON(BSON("locale"
- << "ar")).getStatus());
+ ASSERT_OK(factory
+ .makeFromBSON(BSON("locale"
+ << "ar"))
+ .getStatus());
}
TEST(CollatorFactoryICUTest, FactoryInitializationSucceedsWithArmenianLocale) {
CollatorFactoryICU factory;
- ASSERT_OK(factory.makeFromBSON(BSON("locale"
- << "hy")).getStatus());
+ ASSERT_OK(factory
+ .makeFromBSON(BSON("locale"
+ << "hy"))
+ .getStatus());
}
TEST(CollatorFactoryICUTest, FactoryInitializationSucceedsWithBengaliLocale) {
CollatorFactoryICU factory;
- ASSERT_OK(factory.makeFromBSON(BSON("locale"
- << "bn")).getStatus());
+ ASSERT_OK(factory
+ .makeFromBSON(BSON("locale"
+ << "bn"))
+ .getStatus());
}
TEST(CollatorFactoryICUTest, FactoryInitializationSucceedsWithCatalanLocale) {
CollatorFactoryICU factory;
- ASSERT_OK(factory.makeFromBSON(BSON("locale"
- << "ca")).getStatus());
+ ASSERT_OK(factory
+ .makeFromBSON(BSON("locale"
+ << "ca"))
+ .getStatus());
}
TEST(CollatorFactoryICUTest, FactoryInitializationSucceedsWithChineseLocale) {
CollatorFactoryICU factory;
- ASSERT_OK(factory.makeFromBSON(BSON("locale"
- << "zh")).getStatus());
+ ASSERT_OK(factory
+ .makeFromBSON(BSON("locale"
+ << "zh"))
+ .getStatus());
}
TEST(CollatorFactoryICUTest, FactoryInitializationSucceedsWithChineseTraditionalLocale) {
CollatorFactoryICU factory;
- ASSERT_OK(factory.makeFromBSON(BSON("locale"
- << "zh_Hant")).getStatus());
+ ASSERT_OK(factory
+ .makeFromBSON(BSON("locale"
+ << "zh_Hant"))
+ .getStatus());
}
TEST(CollatorFactoryICUTest, FactoryInitializationSucceedsWithCroatianLocale) {
CollatorFactoryICU factory;
- ASSERT_OK(factory.makeFromBSON(BSON("locale"
- << "hr")).getStatus());
+ ASSERT_OK(factory
+ .makeFromBSON(BSON("locale"
+ << "hr"))
+ .getStatus());
}
TEST(CollatorFactoryICUTest, FactoryInitializationSucceedsWithCzechLocale) {
CollatorFactoryICU factory;
- ASSERT_OK(factory.makeFromBSON(BSON("locale"
- << "cs")).getStatus());
+ ASSERT_OK(factory
+ .makeFromBSON(BSON("locale"
+ << "cs"))
+ .getStatus());
}
TEST(CollatorFactoryICUTest, FactoryInitializationSucceedsWithDanishLocale) {
CollatorFactoryICU factory;
- ASSERT_OK(factory.makeFromBSON(BSON("locale"
- << "da")).getStatus());
+ ASSERT_OK(factory
+ .makeFromBSON(BSON("locale"
+ << "da"))
+ .getStatus());
}
TEST(CollatorFactoryICUTest, FactoryInitializationSucceedsWithEnglishLocale) {
CollatorFactoryICU factory;
- ASSERT_OK(factory.makeFromBSON(BSON("locale"
- << "en")).getStatus());
+ ASSERT_OK(factory
+ .makeFromBSON(BSON("locale"
+ << "en"))
+ .getStatus());
}
TEST(CollatorFactoryICUTest, FactoryInitializationSucceedsWithEnglishUnitedStatesLocale) {
CollatorFactoryICU factory;
- ASSERT_OK(factory.makeFromBSON(BSON("locale"
- << "en_US")).getStatus());
+ ASSERT_OK(factory
+ .makeFromBSON(BSON("locale"
+ << "en_US"))
+ .getStatus());
}
TEST(CollatorFactoryICUTest, FactoryInitializationSucceedsWithEnglishUnitedStatesComputerLocale) {
CollatorFactoryICU factory;
- ASSERT_OK(factory.makeFromBSON(BSON("locale"
- << "en_US_POSIX")).getStatus());
+ ASSERT_OK(factory
+ .makeFromBSON(BSON("locale"
+ << "en_US_POSIX"))
+ .getStatus());
}
TEST(CollatorFactoryICUTest, FactoryInitializationSucceedsWithEstonianLocale) {
CollatorFactoryICU factory;
- ASSERT_OK(factory.makeFromBSON(BSON("locale"
- << "et")).getStatus());
+ ASSERT_OK(factory
+ .makeFromBSON(BSON("locale"
+ << "et"))
+ .getStatus());
}
TEST(CollatorFactoryICUTest, FactoryInitializationSucceedsWithFilipinoLocale) {
CollatorFactoryICU factory;
- ASSERT_OK(factory.makeFromBSON(BSON("locale"
- << "fil")).getStatus());
+ ASSERT_OK(factory
+ .makeFromBSON(BSON("locale"
+ << "fil"))
+ .getStatus());
}
TEST(CollatorFactoryICUTest, FactoryInitializationSucceedsWithFinnishLocale) {
CollatorFactoryICU factory;
- ASSERT_OK(factory.makeFromBSON(BSON("locale"
- << "fi")).getStatus());
+ ASSERT_OK(factory
+ .makeFromBSON(BSON("locale"
+ << "fi"))
+ .getStatus());
}
TEST(CollatorFactoryICUTest, FactoryInitializationSucceedsWithFrenchLocale) {
CollatorFactoryICU factory;
- ASSERT_OK(factory.makeFromBSON(BSON("locale"
- << "fr")).getStatus());
+ ASSERT_OK(factory
+ .makeFromBSON(BSON("locale"
+ << "fr"))
+ .getStatus());
}
TEST(CollatorFactoryICUTest, FactoryInitializationSucceedsWithFrenchCanadaLocale) {
CollatorFactoryICU factory;
- ASSERT_OK(factory.makeFromBSON(BSON("locale"
- << "fr_CA")).getStatus());
+ ASSERT_OK(factory
+ .makeFromBSON(BSON("locale"
+ << "fr_CA"))
+ .getStatus());
}
TEST(CollatorFactoryICUTest, FactoryInitializationSucceedsWithGeorgianLocale) {
CollatorFactoryICU factory;
- ASSERT_OK(factory.makeFromBSON(BSON("locale"
- << "ka")).getStatus());
+ ASSERT_OK(factory
+ .makeFromBSON(BSON("locale"
+ << "ka"))
+ .getStatus());
}
TEST(CollatorFactoryICUTest, FactoryInitializationSucceedsWithGermanLocale) {
CollatorFactoryICU factory;
- ASSERT_OK(factory.makeFromBSON(BSON("locale"
- << "de")).getStatus());
+ ASSERT_OK(factory
+ .makeFromBSON(BSON("locale"
+ << "de"))
+ .getStatus());
}
TEST(CollatorFactoryICUTest, FactoryInitializationSucceedsWithGermanAustriaLocale) {
CollatorFactoryICU factory;
- ASSERT_OK(factory.makeFromBSON(BSON("locale"
- << "de_AT")).getStatus());
+ ASSERT_OK(factory
+ .makeFromBSON(BSON("locale"
+ << "de_AT"))
+ .getStatus());
}
TEST(CollatorFactoryICUTest, FactoryInitializationSucceedsWithGreekLocale) {
CollatorFactoryICU factory;
- ASSERT_OK(factory.makeFromBSON(BSON("locale"
- << "el")).getStatus());
+ ASSERT_OK(factory
+ .makeFromBSON(BSON("locale"
+ << "el"))
+ .getStatus());
}
TEST(CollatorFactoryICUTest, FactoryInitializationSucceedsWithHebrewLocale) {
CollatorFactoryICU factory;
- ASSERT_OK(factory.makeFromBSON(BSON("locale"
- << "he")).getStatus());
+ ASSERT_OK(factory
+ .makeFromBSON(BSON("locale"
+ << "he"))
+ .getStatus());
}
TEST(CollatorFactoryICUTest, FactoryInitializationSucceedsWithHindiLocale) {
CollatorFactoryICU factory;
- ASSERT_OK(factory.makeFromBSON(BSON("locale"
- << "hi")).getStatus());
+ ASSERT_OK(factory
+ .makeFromBSON(BSON("locale"
+ << "hi"))
+ .getStatus());
}
TEST(CollatorFactoryICUTest, FactoryInitializationSucceedsWithHungarianLocale) {
CollatorFactoryICU factory;
- ASSERT_OK(factory.makeFromBSON(BSON("locale"
- << "hu")).getStatus());
+ ASSERT_OK(factory
+ .makeFromBSON(BSON("locale"
+ << "hu"))
+ .getStatus());
}
TEST(CollatorFactoryICUTest, FactoryInitializationSucceedsWithIcelandicLocale) {
CollatorFactoryICU factory;
- ASSERT_OK(factory.makeFromBSON(BSON("locale"
- << "is")).getStatus());
+ ASSERT_OK(factory
+ .makeFromBSON(BSON("locale"
+ << "is"))
+ .getStatus());
}
TEST(CollatorFactoryICUTest, FactoryInitializationSucceedsWithIndonesianLocale) {
CollatorFactoryICU factory;
- ASSERT_OK(factory.makeFromBSON(BSON("locale"
- << "id")).getStatus());
+ ASSERT_OK(factory
+ .makeFromBSON(BSON("locale"
+ << "id"))
+ .getStatus());
}
TEST(CollatorFactoryICUTest, FactoryInitializationSucceedsWithIrishLocale) {
CollatorFactoryICU factory;
- ASSERT_OK(factory.makeFromBSON(BSON("locale"
- << "ga")).getStatus());
+ ASSERT_OK(factory
+ .makeFromBSON(BSON("locale"
+ << "ga"))
+ .getStatus());
}
TEST(CollatorFactoryICUTest, FactoryInitializationSucceedsWithItalianLocale) {
CollatorFactoryICU factory;
- ASSERT_OK(factory.makeFromBSON(BSON("locale"
- << "it")).getStatus());
+ ASSERT_OK(factory
+ .makeFromBSON(BSON("locale"
+ << "it"))
+ .getStatus());
}
TEST(CollatorFactoryICUTest, FactoryInitializationSucceedsWithJapaneseLocale) {
CollatorFactoryICU factory;
- ASSERT_OK(factory.makeFromBSON(BSON("locale"
- << "ja")).getStatus());
+ ASSERT_OK(factory
+ .makeFromBSON(BSON("locale"
+ << "ja"))
+ .getStatus());
}
TEST(CollatorFactoryICUTest, FactoryInitializationSucceedsWithKoreanLocale) {
CollatorFactoryICU factory;
- ASSERT_OK(factory.makeFromBSON(BSON("locale"
- << "ko")).getStatus());
+ ASSERT_OK(factory
+ .makeFromBSON(BSON("locale"
+ << "ko"))
+ .getStatus());
}
TEST(CollatorFactoryICUTest, FactoryInitializationSucceedsWithLatvianLocale) {
CollatorFactoryICU factory;
- ASSERT_OK(factory.makeFromBSON(BSON("locale"
- << "lv")).getStatus());
+ ASSERT_OK(factory
+ .makeFromBSON(BSON("locale"
+ << "lv"))
+ .getStatus());
}
TEST(CollatorFactoryICUTest, FactoryInitializationSucceedsWithLithuanianLocale) {
CollatorFactoryICU factory;
- ASSERT_OK(factory.makeFromBSON(BSON("locale"
- << "lt")).getStatus());
+ ASSERT_OK(factory
+ .makeFromBSON(BSON("locale"
+ << "lt"))
+ .getStatus());
}
TEST(CollatorFactoryICUTest, FactoryInitializationSucceedsWithNorwegianNynorskLocale) {
CollatorFactoryICU factory;
- ASSERT_OK(factory.makeFromBSON(BSON("locale"
- << "nn")).getStatus());
+ ASSERT_OK(factory
+ .makeFromBSON(BSON("locale"
+ << "nn"))
+ .getStatus());
}
TEST(CollatorFactoryICUTest, FactoryInitializationSucceedsWithPashtoLocale) {
CollatorFactoryICU factory;
- ASSERT_OK(factory.makeFromBSON(BSON("locale"
- << "ps")).getStatus());
+ ASSERT_OK(factory
+ .makeFromBSON(BSON("locale"
+ << "ps"))
+ .getStatus());
}
TEST(CollatorFactoryICUTest, FactoryInitializationSucceedsWithPersianLocale) {
CollatorFactoryICU factory;
- ASSERT_OK(factory.makeFromBSON(BSON("locale"
- << "fa")).getStatus());
+ ASSERT_OK(factory
+ .makeFromBSON(BSON("locale"
+ << "fa"))
+ .getStatus());
}
TEST(CollatorFactoryICUTest, FactoryInitializationSucceedsWithPersianAfghanistanLocale) {
CollatorFactoryICU factory;
- ASSERT_OK(factory.makeFromBSON(BSON("locale"
- << "fa_AF")).getStatus());
+ ASSERT_OK(factory
+ .makeFromBSON(BSON("locale"
+ << "fa_AF"))
+ .getStatus());
}
TEST(CollatorFactoryICUTest, FactoryInitializationSucceedsWithPolishLocale) {
CollatorFactoryICU factory;
- ASSERT_OK(factory.makeFromBSON(BSON("locale"
- << "pl")).getStatus());
+ ASSERT_OK(factory
+ .makeFromBSON(BSON("locale"
+ << "pl"))
+ .getStatus());
}
TEST(CollatorFactoryICUTest, FactoryInitializationSucceedsWithPortugueseLocale) {
CollatorFactoryICU factory;
- ASSERT_OK(factory.makeFromBSON(BSON("locale"
- << "pt")).getStatus());
+ ASSERT_OK(factory
+ .makeFromBSON(BSON("locale"
+ << "pt"))
+ .getStatus());
}
TEST(CollatorFactoryICUTest, FactoryInitializationSucceedsWithPunjabiLocale) {
CollatorFactoryICU factory;
- ASSERT_OK(factory.makeFromBSON(BSON("locale"
- << "pa")).getStatus());
+ ASSERT_OK(factory
+ .makeFromBSON(BSON("locale"
+ << "pa"))
+ .getStatus());
}
TEST(CollatorFactoryICUTest, FactoryInitializationSucceedsWithRomanianLocale) {
CollatorFactoryICU factory;
- ASSERT_OK(factory.makeFromBSON(BSON("locale"
- << "ro")).getStatus());
+ ASSERT_OK(factory
+ .makeFromBSON(BSON("locale"
+ << "ro"))
+ .getStatus());
}
TEST(CollatorFactoryICUTest, FactoryInitializationSucceedsWithRussianLocale) {
CollatorFactoryICU factory;
- ASSERT_OK(factory.makeFromBSON(BSON("locale"
- << "ru")).getStatus());
+ ASSERT_OK(factory
+ .makeFromBSON(BSON("locale"
+ << "ru"))
+ .getStatus());
}
TEST(CollatorFactoryICUTest, FactoryInitializationSucceedsWithSlovakLocale) {
CollatorFactoryICU factory;
- ASSERT_OK(factory.makeFromBSON(BSON("locale"
- << "sk")).getStatus());
+ ASSERT_OK(factory
+ .makeFromBSON(BSON("locale"
+ << "sk"))
+ .getStatus());
}
TEST(CollatorFactoryICUTest, FactoryInitializationSucceedsWithSlovenianLocale) {
CollatorFactoryICU factory;
- ASSERT_OK(factory.makeFromBSON(BSON("locale"
- << "sl")).getStatus());
+ ASSERT_OK(factory
+ .makeFromBSON(BSON("locale"
+ << "sl"))
+ .getStatus());
}
TEST(CollatorFactoryICUTest, FactoryInitializationSucceedsWithSpanishLocale) {
CollatorFactoryICU factory;
- ASSERT_OK(factory.makeFromBSON(BSON("locale"
- << "es")).getStatus());
+ ASSERT_OK(factory
+ .makeFromBSON(BSON("locale"
+ << "es"))
+ .getStatus());
}
TEST(CollatorFactoryICUTest, FactoryInitializationSucceedsWithSwedishLocale) {
CollatorFactoryICU factory;
- ASSERT_OK(factory.makeFromBSON(BSON("locale"
- << "sv")).getStatus());
+ ASSERT_OK(factory
+ .makeFromBSON(BSON("locale"
+ << "sv"))
+ .getStatus());
}
TEST(CollatorFactoryICUTest, FactoryInitializationSucceedsWithThaiLocale) {
CollatorFactoryICU factory;
- ASSERT_OK(factory.makeFromBSON(BSON("locale"
- << "th")).getStatus());
+ ASSERT_OK(factory
+ .makeFromBSON(BSON("locale"
+ << "th"))
+ .getStatus());
}
TEST(CollatorFactoryICUTest, FactoryInitializationSucceedsWithTurkishLocale) {
CollatorFactoryICU factory;
- ASSERT_OK(factory.makeFromBSON(BSON("locale"
- << "tr")).getStatus());
+ ASSERT_OK(factory
+ .makeFromBSON(BSON("locale"
+ << "tr"))
+ .getStatus());
}
TEST(CollatorFactoryICUTest, FactoryInitializationSucceedsWithUkrainianLocale) {
CollatorFactoryICU factory;
- ASSERT_OK(factory.makeFromBSON(BSON("locale"
- << "uk")).getStatus());
+ ASSERT_OK(factory
+ .makeFromBSON(BSON("locale"
+ << "uk"))
+ .getStatus());
}
TEST(CollatorFactoryICUTest, FactoryInitializationSucceedsWithUrduLocale) {
CollatorFactoryICU factory;
- ASSERT_OK(factory.makeFromBSON(BSON("locale"
- << "ur")).getStatus());
+ ASSERT_OK(factory
+ .makeFromBSON(BSON("locale"
+ << "ur"))
+ .getStatus());
}
TEST(CollatorFactoryICUTest, FactoryInitializationSucceedsWithVietnameseLocale) {
CollatorFactoryICU factory;
- ASSERT_OK(factory.makeFromBSON(BSON("locale"
- << "vi")).getStatus());
+ ASSERT_OK(factory
+ .makeFromBSON(BSON("locale"
+ << "vi"))
+ .getStatus());
}
TEST(CollatorFactoryICUTest, FactoryInitializationFailsWithAfrikaansLocale) {
CollatorFactoryICU factory;
- ASSERT_NOT_OK(factory.makeFromBSON(BSON("locale"
- << "af")).getStatus());
+ ASSERT_NOT_OK(factory
+ .makeFromBSON(BSON("locale"
+ << "af"))
+ .getStatus());
}
TEST(CollatorFactoryICUTest, FactoryInitializationFailsWithEsperantoLocale) {
CollatorFactoryICU factory;
- ASSERT_NOT_OK(factory.makeFromBSON(BSON("locale"
- << "eo")).getStatus());
+ ASSERT_NOT_OK(factory
+ .makeFromBSON(BSON("locale"
+ << "eo"))
+ .getStatus());
}
TEST(CollatorFactoryICUTest, FactoryInitializationFailsWithSwahiliLocale) {
CollatorFactoryICU factory;
- ASSERT_NOT_OK(factory.makeFromBSON(BSON("locale"
- << "sw")).getStatus());
+ ASSERT_NOT_OK(factory
+ .makeFromBSON(BSON("locale"
+ << "sw"))
+ .getStatus());
}
} // namespace
diff --git a/src/mongo/db/query/collation/collator_factory_mock.cpp b/src/mongo/db/query/collation/collator_factory_mock.cpp
index 503b84c7f79..f6d10450de9 100644
--- a/src/mongo/db/query/collation/collator_factory_mock.cpp
+++ b/src/mongo/db/query/collation/collator_factory_mock.cpp
@@ -30,8 +30,8 @@
#include "mongo/db/query/collation/collator_factory_mock.h"
-#include "mongo/bson/bsonobj.h"
#include "mongo/base/status_with.h"
+#include "mongo/bson/bsonobj.h"
#include "mongo/db/query/collation/collator_interface_mock.h"
#include "mongo/stdx/memory.h"
diff --git a/src/mongo/db/query/count_request_test.cpp b/src/mongo/db/query/count_request_test.cpp
index cea7d0e3885..3b7ceeb400b 100644
--- a/src/mongo/db/query/count_request_test.cpp
+++ b/src/mongo/db/query/count_request_test.cpp
@@ -30,8 +30,8 @@
#include "mongo/bson/json.h"
#include "mongo/db/query/count_request.h"
-#include "mongo/util/mongoutils/str.h"
#include "mongo/unittest/unittest.h"
+#include "mongo/util/mongoutils/str.h"
namespace mongo {
namespace {
@@ -41,7 +41,8 @@ TEST(CountRequest, ParseDefaults) {
CountRequest::parseFromBSON("TestDB",
BSON("count"
<< "TestColl"
- << "query" << BSON("a" << BSON("$lte" << 10))));
+ << "query"
+ << BSON("a" << BSON("$lte" << 10))));
ASSERT_OK(countRequestStatus.getStatus());
@@ -62,10 +63,17 @@ TEST(CountRequest, ParseComplete) {
CountRequest::parseFromBSON("TestDB",
BSON("count"
<< "TestColl"
- << "query" << BSON("a" << BSON("$gte" << 11)) << "limit"
- << 100 << "skip" << 1000 << "hint" << BSON("b" << 5)
- << "collation" << BSON("locale"
- << "en_US")));
+ << "query"
+ << BSON("a" << BSON("$gte" << 11))
+ << "limit"
+ << 100
+ << "skip"
+ << 1000
+ << "hint"
+ << BSON("b" << 5)
+ << "collation"
+ << BSON("locale"
+ << "en_US")));
ASSERT_OK(countRequestStatus.getStatus());
@@ -84,10 +92,17 @@ TEST(CountRequest, ParseNegativeLimit) {
CountRequest::parseFromBSON("TestDB",
BSON("count"
<< "TestColl"
- << "query" << BSON("a" << BSON("$gte" << 11)) << "limit"
- << -100 << "skip" << 1000 << "hint" << BSON("b" << 5)
- << "collation" << BSON("locale"
- << "en_US")));
+ << "query"
+ << BSON("a" << BSON("$gte" << 11))
+ << "limit"
+ << -100
+ << "skip"
+ << 1000
+ << "hint"
+ << BSON("b" << 5)
+ << "collation"
+ << BSON("locale"
+ << "en_US")));
ASSERT_OK(countRequestStatus.getStatus());
@@ -113,7 +128,9 @@ TEST(CountRequest, FailParseBadSkipValue) {
CountRequest::parseFromBSON("TestDB",
BSON("count"
<< "TestColl"
- << "query" << BSON("a" << BSON("$gte" << 11)) << "skip"
+ << "query"
+ << BSON("a" << BSON("$gte" << 11))
+ << "skip"
<< -1000));
ASSERT_EQUALS(countRequestStatus.getStatus(), ErrorCodes::BadValue);
@@ -124,7 +141,8 @@ TEST(CountRequest, FailParseBadCollationValue) {
CountRequest::parseFromBSON("TestDB",
BSON("count"
<< "TestColl"
- << "query" << BSON("a" << BSON("$gte" << 11))
+ << "query"
+ << BSON("a" << BSON("$gte" << 11))
<< "collation"
<< "en_US"));
@@ -140,13 +158,13 @@ TEST(CountRequest, ToBSON) {
<< "en_US"));
BSONObj actualObj = countRequest.toBSON();
- BSONObj expectedObj(fromjson(
- "{ count : 'TestDB.TestColl',"
- " query : { a : { '$gte' : 11 } },"
- " limit : 100,"
- " skip : 1000,"
- " hint : { b : 5 },"
- " collation : { locale : 'en_US' } },"));
+ BSONObj expectedObj(
+ fromjson("{ count : 'TestDB.TestColl',"
+ " query : { a : { '$gte' : 11 } },"
+ " limit : 100,"
+ " skip : 1000,"
+ " hint : { b : 5 },"
+ " collation : { locale : 'en_US' } },"));
ASSERT_EQUALS(actualObj, expectedObj);
}
diff --git a/src/mongo/db/query/cursor_response.cpp b/src/mongo/db/query/cursor_response.cpp
index bf812d302a8..33a1661ea0b 100644
--- a/src/mongo/db/query/cursor_response.cpp
+++ b/src/mongo/db/query/cursor_response.cpp
@@ -123,24 +123,24 @@ StatusWith<CursorResponse> CursorResponse::parseFromBSON(const BSONObj& cmdRespo
BSONElement cursorElt = cmdResponse[kCursorField];
if (cursorElt.type() != BSONType::Object) {
return {ErrorCodes::TypeMismatch,
- str::stream() << "Field '" << kCursorField
- << "' must be a nested object in: " << cmdResponse};
+ str::stream() << "Field '" << kCursorField << "' must be a nested object in: "
+ << cmdResponse};
}
BSONObj cursorObj = cursorElt.Obj();
BSONElement idElt = cursorObj[kIdField];
if (idElt.type() != BSONType::NumberLong) {
- return {ErrorCodes::TypeMismatch,
- str::stream() << "Field '" << kIdField
- << "' must be of type long in: " << cmdResponse};
+ return {
+ ErrorCodes::TypeMismatch,
+ str::stream() << "Field '" << kIdField << "' must be of type long in: " << cmdResponse};
}
cursorId = idElt.Long();
BSONElement nsElt = cursorObj[kNsField];
if (nsElt.type() != BSONType::String) {
return {ErrorCodes::TypeMismatch,
- str::stream() << "Field '" << kNsField
- << "' must be of type string in: " << cmdResponse};
+ str::stream() << "Field '" << kNsField << "' must be of type string in: "
+ << cmdResponse};
}
fullns = nsElt.String();
@@ -152,16 +152,18 @@ StatusWith<CursorResponse> CursorResponse::parseFromBSON(const BSONObj& cmdRespo
if (batchElt.type() != BSONType::Array) {
return {ErrorCodes::TypeMismatch,
str::stream() << "Must have array field '" << kBatchFieldInitial << "' or '"
- << kBatchField << "' in: " << cmdResponse};
+ << kBatchField
+ << "' in: "
+ << cmdResponse};
}
batchObj = batchElt.Obj();
std::vector<BSONObj> batch;
for (BSONElement elt : batchObj) {
if (elt.type() != BSONType::Object) {
- return {
- ErrorCodes::BadValue,
- str::stream() << "getMore response batch contains a non-object element: " << elt};
+ return {ErrorCodes::BadValue,
+ str::stream() << "getMore response batch contains a non-object element: "
+ << elt};
}
batch.push_back(elt.Obj().getOwned());
diff --git a/src/mongo/db/query/cursor_response_test.cpp b/src/mongo/db/query/cursor_response_test.cpp
index 0229f526ca4..711d83d4213 100644
--- a/src/mongo/db/query/cursor_response_test.cpp
+++ b/src/mongo/db/query/cursor_response_test.cpp
@@ -37,11 +37,13 @@ namespace mongo {
namespace {
TEST(CursorResponseTest, parseFromBSONFirstBatch) {
- StatusWith<CursorResponse> result = CursorResponse::parseFromBSON(BSON(
- "cursor" << BSON("id" << CursorId(123) << "ns"
- << "db.coll"
- << "firstBatch" << BSON_ARRAY(BSON("_id" << 1) << BSON("_id" << 2)))
- << "ok" << 1));
+ StatusWith<CursorResponse> result = CursorResponse::parseFromBSON(
+ BSON("cursor" << BSON("id" << CursorId(123) << "ns"
+ << "db.coll"
+ << "firstBatch"
+ << BSON_ARRAY(BSON("_id" << 1) << BSON("_id" << 2)))
+ << "ok"
+ << 1));
ASSERT_OK(result.getStatus());
CursorResponse response = std::move(result.getValue());
@@ -53,11 +55,13 @@ TEST(CursorResponseTest, parseFromBSONFirstBatch) {
}
TEST(CursorResponseTest, parseFromBSONNextBatch) {
- StatusWith<CursorResponse> result = CursorResponse::parseFromBSON(BSON(
- "cursor" << BSON("id" << CursorId(123) << "ns"
- << "db.coll"
- << "nextBatch" << BSON_ARRAY(BSON("_id" << 1) << BSON("_id" << 2)))
- << "ok" << 1));
+ StatusWith<CursorResponse> result = CursorResponse::parseFromBSON(
+ BSON("cursor" << BSON("id" << CursorId(123) << "ns"
+ << "db.coll"
+ << "nextBatch"
+ << BSON_ARRAY(BSON("_id" << 1) << BSON("_id" << 2)))
+ << "ok"
+ << 1));
ASSERT_OK(result.getStatus());
CursorResponse response = std::move(result.getValue());
@@ -69,11 +73,13 @@ TEST(CursorResponseTest, parseFromBSONNextBatch) {
}
TEST(CursorResponseTest, parseFromBSONCursorIdZero) {
- StatusWith<CursorResponse> result = CursorResponse::parseFromBSON(BSON(
- "cursor" << BSON("id" << CursorId(0) << "ns"
- << "db.coll"
- << "nextBatch" << BSON_ARRAY(BSON("_id" << 1) << BSON("_id" << 2)))
- << "ok" << 1));
+ StatusWith<CursorResponse> result = CursorResponse::parseFromBSON(
+ BSON("cursor" << BSON("id" << CursorId(0) << "ns"
+ << "db.coll"
+ << "nextBatch"
+ << BSON_ARRAY(BSON("_id" << 1) << BSON("_id" << 2)))
+ << "ok"
+ << 1));
ASSERT_OK(result.getStatus());
CursorResponse response = std::move(result.getValue());
@@ -85,10 +91,13 @@ TEST(CursorResponseTest, parseFromBSONCursorIdZero) {
}
TEST(CursorResponseTest, parseFromBSONEmptyBatch) {
- StatusWith<CursorResponse> result = CursorResponse::parseFromBSON(
- BSON("cursor" << BSON("id" << CursorId(123) << "ns"
- << "db.coll"
- << "nextBatch" << BSONArrayBuilder().arr()) << "ok" << 1));
+ StatusWith<CursorResponse> result =
+ CursorResponse::parseFromBSON(BSON("cursor" << BSON("id" << CursorId(123) << "ns"
+ << "db.coll"
+ << "nextBatch"
+ << BSONArrayBuilder().arr())
+ << "ok"
+ << 1));
ASSERT_OK(result.getStatus());
CursorResponse response = std::move(result.getValue());
@@ -109,16 +118,20 @@ TEST(CursorResponseTest, parseFromBSONCursorFieldWrongType) {
}
TEST(CursorResponseTest, parseFromBSONNsFieldMissing) {
- StatusWith<CursorResponse> result = CursorResponse::parseFromBSON(BSON(
- "cursor" << BSON("id" << CursorId(123) << "firstBatch"
- << BSON_ARRAY(BSON("_id" << 1) << BSON("_id" << 2))) << "ok" << 1));
+ StatusWith<CursorResponse> result = CursorResponse::parseFromBSON(
+ BSON("cursor" << BSON("id" << CursorId(123) << "firstBatch"
+ << BSON_ARRAY(BSON("_id" << 1) << BSON("_id" << 2)))
+ << "ok"
+ << 1));
ASSERT_NOT_OK(result.getStatus());
}
TEST(CursorResponseTest, parseFromBSONNsFieldWrongType) {
- StatusWith<CursorResponse> result = CursorResponse::parseFromBSON(BSON(
- "cursor" << BSON("id" << CursorId(123) << "ns" << 456 << "firstBatch"
- << BSON_ARRAY(BSON("_id" << 1) << BSON("_id" << 2))) << "ok" << 1));
+ StatusWith<CursorResponse> result = CursorResponse::parseFromBSON(
+ BSON("cursor" << BSON("id" << CursorId(123) << "ns" << 456 << "firstBatch"
+ << BSON_ARRAY(BSON("_id" << 1) << BSON("_id" << 2)))
+ << "ok"
+ << 1));
ASSERT_NOT_OK(result.getStatus());
}
@@ -126,8 +139,10 @@ TEST(CursorResponseTest, parseFromBSONIdFieldMissing) {
StatusWith<CursorResponse> result = CursorResponse::parseFromBSON(
BSON("cursor" << BSON("ns"
<< "db.coll"
- << "nextBatch" << BSON_ARRAY(BSON("_id" << 1) << BSON("_id" << 2)))
- << "ok" << 1));
+ << "nextBatch"
+ << BSON_ARRAY(BSON("_id" << 1) << BSON("_id" << 2)))
+ << "ok"
+ << 1));
ASSERT_NOT_OK(result.getStatus());
}
@@ -137,39 +152,50 @@ TEST(CursorResponseTest, parseFromBSONIdFieldWrongType) {
<< "123"
<< "ns"
<< "db.coll"
- << "nextBatch" << BSON_ARRAY(BSON("_id" << 1) << BSON("_id" << 2)))
- << "ok" << 1));
+ << "nextBatch"
+ << BSON_ARRAY(BSON("_id" << 1) << BSON("_id" << 2)))
+ << "ok"
+ << 1));
ASSERT_NOT_OK(result.getStatus());
}
TEST(CursorResponseTest, parseFromBSONBatchFieldMissing) {
StatusWith<CursorResponse> result =
CursorResponse::parseFromBSON(BSON("cursor" << BSON("id" << CursorId(123) << "ns"
- << "db.coll") << "ok" << 1));
+ << "db.coll")
+ << "ok"
+ << 1));
ASSERT_NOT_OK(result.getStatus());
}
TEST(CursorResponseTest, parseFromBSONFirstBatchFieldWrongType) {
- StatusWith<CursorResponse> result = CursorResponse::parseFromBSON(
- BSON("cursor" << BSON("id" << CursorId(123) << "ns"
- << "db.coll"
- << "firstBatch" << BSON("_id" << 1)) << "ok" << 1));
+ StatusWith<CursorResponse> result =
+ CursorResponse::parseFromBSON(BSON("cursor" << BSON("id" << CursorId(123) << "ns"
+ << "db.coll"
+ << "firstBatch"
+ << BSON("_id" << 1))
+ << "ok"
+ << 1));
ASSERT_NOT_OK(result.getStatus());
}
TEST(CursorResponseTest, parseFromBSONNextBatchFieldWrongType) {
- StatusWith<CursorResponse> result = CursorResponse::parseFromBSON(
- BSON("cursor" << BSON("id" << CursorId(123) << "ns"
- << "db.coll"
- << "nextBatch" << BSON("_id" << 1)) << "ok" << 1));
+ StatusWith<CursorResponse> result =
+ CursorResponse::parseFromBSON(BSON("cursor" << BSON("id" << CursorId(123) << "ns"
+ << "db.coll"
+ << "nextBatch"
+ << BSON("_id" << 1))
+ << "ok"
+ << 1));
ASSERT_NOT_OK(result.getStatus());
}
TEST(CursorResponseTest, parseFromBSONOkFieldMissing) {
- StatusWith<CursorResponse> result = CursorResponse::parseFromBSON(BSON(
- "cursor" << BSON("id" << CursorId(123) << "ns"
- << "db.coll"
- << "nextBatch" << BSON_ARRAY(BSON("_id" << 1) << BSON("_id" << 2)))));
+ StatusWith<CursorResponse> result = CursorResponse::parseFromBSON(
+ BSON("cursor" << BSON("id" << CursorId(123) << "ns"
+ << "db.coll"
+ << "nextBatch"
+ << BSON_ARRAY(BSON("_id" << 1) << BSON("_id" << 2)))));
ASSERT_NOT_OK(result.getStatus());
}
@@ -186,11 +212,13 @@ TEST(CursorResponseTest, toBSONInitialResponse) {
std::vector<BSONObj> batch = {BSON("_id" << 1), BSON("_id" << 2)};
CursorResponse response(NamespaceString("testdb.testcoll"), CursorId(123), batch);
BSONObj responseObj = response.toBSON(CursorResponse::ResponseType::InitialResponse);
- BSONObj expectedResponse = BSON(
- "cursor" << BSON("id" << CursorId(123) << "ns"
- << "testdb.testcoll"
- << "firstBatch" << BSON_ARRAY(BSON("_id" << 1) << BSON("_id" << 2)))
- << "ok" << 1.0);
+ BSONObj expectedResponse =
+ BSON("cursor" << BSON("id" << CursorId(123) << "ns"
+ << "testdb.testcoll"
+ << "firstBatch"
+ << BSON_ARRAY(BSON("_id" << 1) << BSON("_id" << 2)))
+ << "ok"
+ << 1.0);
ASSERT_EQ(responseObj, expectedResponse);
}
@@ -198,11 +226,13 @@ TEST(CursorResponseTest, toBSONSubsequentResponse) {
std::vector<BSONObj> batch = {BSON("_id" << 1), BSON("_id" << 2)};
CursorResponse response(NamespaceString("testdb.testcoll"), CursorId(123), batch);
BSONObj responseObj = response.toBSON(CursorResponse::ResponseType::SubsequentResponse);
- BSONObj expectedResponse = BSON(
- "cursor" << BSON("id" << CursorId(123) << "ns"
- << "testdb.testcoll"
- << "nextBatch" << BSON_ARRAY(BSON("_id" << 1) << BSON("_id" << 2)))
- << "ok" << 1.0);
+ BSONObj expectedResponse =
+ BSON("cursor" << BSON("id" << CursorId(123) << "ns"
+ << "testdb.testcoll"
+ << "nextBatch"
+ << BSON_ARRAY(BSON("_id" << 1) << BSON("_id" << 2)))
+ << "ok"
+ << 1.0);
ASSERT_EQ(responseObj, expectedResponse);
}
@@ -214,11 +244,13 @@ TEST(CursorResponseTest, addToBSONInitialResponse) {
response.addToBSON(CursorResponse::ResponseType::InitialResponse, &builder);
BSONObj responseObj = builder.obj();
- BSONObj expectedResponse = BSON(
- "cursor" << BSON("id" << CursorId(123) << "ns"
- << "testdb.testcoll"
- << "firstBatch" << BSON_ARRAY(BSON("_id" << 1) << BSON("_id" << 2)))
- << "ok" << 1.0);
+ BSONObj expectedResponse =
+ BSON("cursor" << BSON("id" << CursorId(123) << "ns"
+ << "testdb.testcoll"
+ << "firstBatch"
+ << BSON_ARRAY(BSON("_id" << 1) << BSON("_id" << 2)))
+ << "ok"
+ << 1.0);
ASSERT_EQ(responseObj, expectedResponse);
}
@@ -230,11 +262,13 @@ TEST(CursorResponseTest, addToBSONSubsequentResponse) {
response.addToBSON(CursorResponse::ResponseType::SubsequentResponse, &builder);
BSONObj responseObj = builder.obj();
- BSONObj expectedResponse = BSON(
- "cursor" << BSON("id" << CursorId(123) << "ns"
- << "testdb.testcoll"
- << "nextBatch" << BSON_ARRAY(BSON("_id" << 1) << BSON("_id" << 2)))
- << "ok" << 1.0);
+ BSONObj expectedResponse =
+ BSON("cursor" << BSON("id" << CursorId(123) << "ns"
+ << "testdb.testcoll"
+ << "nextBatch"
+ << BSON_ARRAY(BSON("_id" << 1) << BSON("_id" << 2)))
+ << "ok"
+ << 1.0);
ASSERT_EQ(responseObj, expectedResponse);
}
diff --git a/src/mongo/db/query/explain.cpp b/src/mongo/db/query/explain.cpp
index 4e0c82aaa81..6475b5aaa5f 100644
--- a/src/mongo/db/query/explain.cpp
+++ b/src/mongo/db/query/explain.cpp
@@ -39,13 +39,13 @@
#include "mongo/db/exec/multi_plan.h"
#include "mongo/db/exec/near.h"
#include "mongo/db/exec/text.h"
+#include "mongo/db/exec/working_set_common.h"
#include "mongo/db/query/get_executor.h"
#include "mongo/db/query/plan_executor.h"
#include "mongo/db/query/plan_summary_stats.h"
#include "mongo/db/query/query_planner.h"
#include "mongo/db/query/query_settings.h"
#include "mongo/db/query/stage_builder.h"
-#include "mongo/db/exec/working_set_common.h"
#include "mongo/db/server_options.h"
#include "mongo/db/server_parameters.h"
#include "mongo/util/mongoutils/str.h"
diff --git a/src/mongo/db/query/expression_index.cpp b/src/mongo/db/query/expression_index.cpp
index 6212c28016c..ff24396baeb 100644
--- a/src/mongo/db/query/expression_index.cpp
+++ b/src/mongo/db/query/expression_index.cpp
@@ -34,8 +34,8 @@
#include "mongo/db/geo/r2_region_coverer.h"
#include "mongo/db/hasher.h"
#include "mongo/db/index/expression_params.h"
-#include "mongo/db/server_parameters.h"
#include "mongo/db/query/expression_index_knobs.h"
+#include "mongo/db/server_parameters.h"
#include "third_party/s2/s2cellid.h"
#include "third_party/s2/s2region.h"
#include "third_party/s2/s2regioncoverer.h"
diff --git a/src/mongo/db/query/find.cpp b/src/mongo/db/query/find.cpp
index 0888b5e7085..88f09fcfa19 100644
--- a/src/mongo/db/query/find.cpp
+++ b/src/mongo/db/query/find.cpp
@@ -42,7 +42,6 @@
#include "mongo/db/db_raii.h"
#include "mongo/db/exec/filter.h"
#include "mongo/db/exec/working_set_common.h"
-#include "mongo/db/service_context.h"
#include "mongo/db/keypattern.h"
#include "mongo/db/matcher/extensions_callback_real.h"
#include "mongo/db/query/explain.h"
@@ -55,6 +54,7 @@
#include "mongo/db/s/collection_sharding_state.h"
#include "mongo/db/server_options.h"
#include "mongo/db/server_parameters.h"
+#include "mongo/db/service_context.h"
#include "mongo/db/storage/storage_options.h"
#include "mongo/s/chunk_version.h"
#include "mongo/s/stale_exception.h"
@@ -311,7 +311,9 @@ QueryResult::View getMore(OperationContext* txn,
// there for the cursor.
uassert(ErrorCodes::Unauthorized,
str::stream() << "Requested getMore on namespace " << ns << ", but cursor "
- << cursorid << " belongs to namespace " << cc->ns(),
+ << cursorid
+ << " belongs to namespace "
+ << cc->ns(),
ns == cc->ns());
*isCursorAuthorized = true;
@@ -504,9 +506,9 @@ std::string runQuery(OperationContext* txn,
auto statusWithCQ = CanonicalQuery::canonicalize(txn, q, ExtensionsCallbackReal(txn, &nss));
if (!statusWithCQ.isOK()) {
- uasserted(
- 17287,
- str::stream() << "Can't canonicalize query: " << statusWithCQ.getStatus().toString());
+ uasserted(17287,
+ str::stream() << "Can't canonicalize query: "
+ << statusWithCQ.getStatus().toString());
}
unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
invariant(cq.get());
diff --git a/src/mongo/db/query/get_executor.cpp b/src/mongo/db/query/get_executor.cpp
index f97810a90e0..c2330958e76 100644
--- a/src/mongo/db/query/get_executor.cpp
+++ b/src/mongo/db/query/get_executor.cpp
@@ -51,8 +51,8 @@
#include "mongo/db/exec/sort_key_generator.h"
#include "mongo/db/exec/subplan.h"
#include "mongo/db/exec/update.h"
-#include "mongo/db/index_names.h"
#include "mongo/db/index/index_descriptor.h"
+#include "mongo/db/index_names.h"
#include "mongo/db/matcher/extensions_callback_disallow_extensions.h"
#include "mongo/db/matcher/extensions_callback_noop.h"
#include "mongo/db/matcher/extensions_callback_real.h"
@@ -71,14 +71,14 @@
#include "mongo/db/query/query_settings.h"
#include "mongo/db/query/stage_builder.h"
#include "mongo/db/repl/replication_coordinator_global.h"
-#include "mongo/db/server_options.h"
-#include "mongo/db/server_parameters.h"
-#include "mongo/db/service_context.h"
#include "mongo/db/s/collection_metadata.h"
#include "mongo/db/s/collection_sharding_state.h"
#include "mongo/db/s/sharding_state.h"
-#include "mongo/db/storage/storage_options.h"
+#include "mongo/db/server_options.h"
+#include "mongo/db/server_parameters.h"
+#include "mongo/db/service_context.h"
#include "mongo/db/storage/oplog_hack.h"
+#include "mongo/db/storage/storage_options.h"
#include "mongo/scripting/engine.h"
#include "mongo/stdx/memory.h"
#include "mongo/util/log.h"
@@ -774,7 +774,8 @@ inline void validateUpdate(const char* ns, const BSONObj& updateobj, const BSONO
has pointers into it */
uassert(10156,
str::stream() << "cannot update system collection: " << ns << " q: " << patternOrig
- << " u: " << updateobj,
+ << " u: "
+ << updateobj,
legalClientSystemNS(ns, true));
}
}
diff --git a/src/mongo/db/query/get_executor.h b/src/mongo/db/query/get_executor.h
index 0488aeccf7d..c15c7144370 100644
--- a/src/mongo/db/query/get_executor.h
+++ b/src/mongo/db/query/get_executor.h
@@ -26,16 +26,16 @@
* it in the license file.
*/
-#include "mongo/db/query/canonical_query.h"
-#include "mongo/db/query/plan_executor.h"
-#include "mongo/db/query/query_planner_params.h"
-#include "mongo/db/query/query_settings.h"
-#include "mongo/db/query/query_solution.h"
#include "mongo/db/ops/delete_request.h"
#include "mongo/db/ops/parsed_delete.h"
#include "mongo/db/ops/parsed_update.h"
#include "mongo/db/ops/update_driver.h"
#include "mongo/db/ops/update_request.h"
+#include "mongo/db/query/canonical_query.h"
+#include "mongo/db/query/plan_executor.h"
+#include "mongo/db/query/query_planner_params.h"
+#include "mongo/db/query/query_settings.h"
+#include "mongo/db/query/query_solution.h"
namespace mongo {
diff --git a/src/mongo/db/query/getmore_request.cpp b/src/mongo/db/query/getmore_request.cpp
index 4662942294c..b1a97f32c5e 100644
--- a/src/mongo/db/query/getmore_request.cpp
+++ b/src/mongo/db/query/getmore_request.cpp
@@ -81,7 +81,8 @@ Status GetMoreRequest::isValid() const {
if (batchSize && *batchSize <= 0) {
return Status(ErrorCodes::BadValue,
str::stream() << "Batch size for getMore must be positive, "
- << "but received: " << *batchSize);
+ << "but received: "
+ << *batchSize);
}
return Status::OK();
@@ -122,8 +123,8 @@ StatusWith<GetMoreRequest> GetMoreRequest::parseFromBSON(const std::string& dbna
} else if (str::equals(fieldName, kCollectionField)) {
if (el.type() != BSONType::String) {
return {ErrorCodes::TypeMismatch,
- str::stream()
- << "Field 'collection' must be of type string in: " << cmdObj};
+ str::stream() << "Field 'collection' must be of type string in: "
+ << cmdObj};
}
fullns = parseNs(dbname, cmdObj);
@@ -159,7 +160,9 @@ StatusWith<GetMoreRequest> GetMoreRequest::parseFromBSON(const std::string& dbna
} else if (!str::startsWith(fieldName, "$")) {
return {ErrorCodes::FailedToParse,
str::stream() << "Failed to parse: " << cmdObj << ". "
- << "Unrecognized field '" << fieldName << "'."};
+ << "Unrecognized field '"
+ << fieldName
+ << "'."};
}
}
diff --git a/src/mongo/db/query/getmore_request_test.cpp b/src/mongo/db/query/getmore_request_test.cpp
index 997310c9645..94dfb9ba366 100644
--- a/src/mongo/db/query/getmore_request_test.cpp
+++ b/src/mongo/db/query/getmore_request_test.cpp
@@ -30,9 +30,9 @@
#include <string>
-#include "mongo/db/repl/optime.h"
-#include "mongo/db/query/getmore_request.h"
#include "mongo/db/jsobj.h"
+#include "mongo/db/query/getmore_request.h"
+#include "mongo/db/repl/optime.h"
#include "mongo/unittest/unittest.h"
@@ -60,7 +60,8 @@ TEST(GetMoreRequestTest, parseFromBSONCursorIdNotLongLong) {
StatusWith<GetMoreRequest> result = GetMoreRequest::parseFromBSON("db",
BSON("getMore"
<< "not a number"
- << "collection" << 123));
+ << "collection"
+ << 123));
ASSERT_NOT_OK(result.getStatus());
ASSERT_EQUALS(ErrorCodes::TypeMismatch, result.getStatus().code());
}
@@ -115,7 +116,8 @@ TEST(GetMoreRequestTest, parseFromBSONUnrecognizedFieldName) {
GetMoreRequest::parseFromBSON("db",
BSON("getMore" << CursorId(123) << "collection"
<< "coll"
- << "unknown_field" << 1));
+ << "unknown_field"
+ << 1));
ASSERT_NOT_OK(result.getStatus());
ASSERT_EQUALS(ErrorCodes::FailedToParse, result.getStatus().code());
}
@@ -125,7 +127,8 @@ TEST(GetMoreRequestTest, parseFromBSONInvalidBatchSize) {
GetMoreRequest::parseFromBSON("db",
BSON("getMore" << CursorId(123) << "collection"
<< "coll"
- << "batchSize" << -1));
+ << "batchSize"
+ << -1));
ASSERT_NOT_OK(result.getStatus());
ASSERT_EQUALS(ErrorCodes::BadValue, result.getStatus().code());
}
@@ -135,7 +138,8 @@ TEST(GetMoreRequestTest, parseFromBSONInvalidBatchSizeOfZero) {
GetMoreRequest::parseFromBSON("db",
BSON("getMore" << CursorId(123) << "collection"
<< "coll"
- << "batchSize" << 0));
+ << "batchSize"
+ << 0));
ASSERT_NOT_OK(result.getStatus());
ASSERT_EQUALS(ErrorCodes::BadValue, result.getStatus().code());
}
@@ -156,7 +160,8 @@ TEST(GetMoreRequestTest, parseFromBSONBatchSizeProvided) {
GetMoreRequest::parseFromBSON("db",
BSON("getMore" << CursorId(123) << "collection"
<< "coll"
- << "batchSize" << 200));
+ << "batchSize"
+ << 200));
ASSERT_EQUALS("db.coll", result.getValue().nss.toString());
ASSERT_EQUALS(CursorId(123), result.getValue().cursorid);
ASSERT(result.getValue().batchSize);
@@ -180,7 +185,8 @@ TEST(GetMoreRequestTest, parseFromBSONHasMaxTimeMS) {
GetMoreRequest::parseFromBSON("db",
BSON("getMore" << CursorId(123) << "collection"
<< "coll"
- << "maxTimeMS" << 100));
+ << "maxTimeMS"
+ << 100));
ASSERT_OK(result.getStatus());
ASSERT_EQUALS("db.coll", result.getValue().nss.toString());
ASSERT(result.getValue().awaitDataTimeout);
@@ -193,7 +199,8 @@ TEST(GetMoreRequestTest, parseFromBSONHasMaxTimeMSOfZero) {
GetMoreRequest::parseFromBSON("db",
BSON("getMore" << CursorId(123) << "collection"
<< "coll"
- << "maxTimeMS" << 0));
+ << "maxTimeMS"
+ << 0));
ASSERT_OK(result.getStatus());
ASSERT_EQUALS("db.coll", result.getValue().nss.toString());
ASSERT_EQUALS(CursorId(123), result.getValue().cursorid);
@@ -208,7 +215,8 @@ TEST(GetMoreRequestTest, toBSONHasBatchSize) {
BSONObj requestObj = request.toBSON();
BSONObj expectedRequest = BSON("getMore" << CursorId(123) << "collection"
<< "testcoll"
- << "batchSize" << 99);
+ << "batchSize"
+ << 99);
ASSERT_EQ(requestObj, expectedRequest);
}
@@ -231,7 +239,10 @@ TEST(GetMoreRequestTest, toBSONHasTerm) {
BSONObj requestObj = request.toBSON();
BSONObj expectedRequest = BSON("getMore" << CursorId(123) << "collection"
<< "testcoll"
- << "batchSize" << 99 << "term" << 1);
+ << "batchSize"
+ << 99
+ << "term"
+ << 1);
ASSERT_EQ(requestObj, expectedRequest);
}
@@ -243,11 +254,14 @@ TEST(GetMoreRequestTest, toBSONHasCommitLevel) {
1,
repl::OpTime(Timestamp(0, 10), 2));
BSONObj requestObj = request.toBSON();
- BSONObj expectedRequest =
- BSON("getMore" << CursorId(123) << "collection"
- << "testcoll"
- << "batchSize" << 99 << "term" << 1 << "lastKnownCommittedOpTime"
- << BSON("ts" << Timestamp(0, 10) << "t" << 2LL));
+ BSONObj expectedRequest = BSON("getMore" << CursorId(123) << "collection"
+ << "testcoll"
+ << "batchSize"
+ << 99
+ << "term"
+ << 1
+ << "lastKnownCommittedOpTime"
+ << BSON("ts" << Timestamp(0, 10) << "t" << 2LL));
ASSERT_EQ(requestObj, expectedRequest);
}
@@ -261,7 +275,8 @@ TEST(GetMoreRequestTest, toBSONHasMaxTimeMS) {
BSONObj requestObj = request.toBSON();
BSONObj expectedRequest = BSON("getMore" << CursorId(123) << "collection"
<< "testcoll"
- << "maxTimeMS" << 789);
+ << "maxTimeMS"
+ << 789);
ASSERT_EQ(requestObj, expectedRequest);
}
diff --git a/src/mongo/db/query/index_bounds_builder.h b/src/mongo/db/query/index_bounds_builder.h
index b37901f6d2d..5d3c02e029f 100644
--- a/src/mongo/db/query/index_bounds_builder.h
+++ b/src/mongo/db/query/index_bounds_builder.h
@@ -28,8 +28,8 @@
#pragma once
-#include "mongo/db/jsobj.h"
#include "mongo/db/hasher.h"
+#include "mongo/db/jsobj.h"
#include "mongo/db/matcher/expression_parser.h"
#include "mongo/db/query/index_bounds.h"
#include "mongo/db/query/index_entry.h"
diff --git a/src/mongo/db/query/index_bounds_builder_test.cpp b/src/mongo/db/query/index_bounds_builder_test.cpp
index 4d93b8b63c9..af346b5f908 100644
--- a/src/mongo/db/query/index_bounds_builder_test.cpp
+++ b/src/mongo/db/query/index_bounds_builder_test.cpp
@@ -571,9 +571,8 @@ TEST(IndexBoundsBuilderTest, TranslateLteBinData) {
ASSERT_EQ(oil.intervals.size(), 1U);
ASSERT_EQ(Interval::INTERVAL_EQUALS,
oil.intervals[0].compare(
- Interval(fromjson(
- "{'': {$binary: '', $type: '00'},"
- "'': {$binary: 'AAAAAAAAAAAAAAAAAAAAAAAAAAAA', $type: '00'}}"),
+ Interval(fromjson("{'': {$binary: '', $type: '00'},"
+ "'': {$binary: 'AAAAAAAAAAAAAAAAAAAAAAAAAAAA', $type: '00'}}"),
true,
true)));
ASSERT_EQ(tightness, IndexBoundsBuilder::EXACT);
@@ -593,9 +592,8 @@ TEST(IndexBoundsBuilderTest, TranslateLtBinData) {
ASSERT_EQ(oil.intervals.size(), 1U);
ASSERT_EQ(Interval::INTERVAL_EQUALS,
oil.intervals[0].compare(
- Interval(fromjson(
- "{'': {$binary: '', $type: '00'},"
- "'': {$binary: 'AAAAAAAAAAAAAAAAAAAAAAAAAAAA', $type: '00'}}"),
+ Interval(fromjson("{'': {$binary: '', $type: '00'},"
+ "'': {$binary: 'AAAAAAAAAAAAAAAAAAAAAAAAAAAA', $type: '00'}}"),
true,
false)));
ASSERT_EQ(tightness, IndexBoundsBuilder::EXACT);
@@ -615,9 +613,8 @@ TEST(IndexBoundsBuilderTest, TranslateGtBinData) {
ASSERT_EQ(oil.intervals.size(), 1U);
ASSERT_EQ(Interval::INTERVAL_EQUALS,
oil.intervals[0].compare(
- Interval(fromjson(
- "{'': {$binary: '////////////////////////////', $type: '00'},"
- "'': ObjectId('000000000000000000000000')}"),
+ Interval(fromjson("{'': {$binary: '////////////////////////////', $type: '00'},"
+ "'': ObjectId('000000000000000000000000')}"),
false,
false)));
ASSERT_EQ(tightness, IndexBoundsBuilder::EXACT);
@@ -637,9 +634,8 @@ TEST(IndexBoundsBuilderTest, TranslateGteBinData) {
ASSERT_EQ(oil.intervals.size(), 1U);
ASSERT_EQ(Interval::INTERVAL_EQUALS,
oil.intervals[0].compare(
- Interval(fromjson(
- "{'': {$binary: '////////////////////////////', $type: '00'},"
- "'': ObjectId('000000000000000000000000')}"),
+ Interval(fromjson("{'': {$binary: '////////////////////////////', $type: '00'},"
+ "'': ObjectId('000000000000000000000000')}"),
true,
false)));
ASSERT_EQ(tightness, IndexBoundsBuilder::EXACT);
diff --git a/src/mongo/db/query/index_bounds_test.cpp b/src/mongo/db/query/index_bounds_test.cpp
index 75b49f6429e..250563b54ce 100644
--- a/src/mongo/db/query/index_bounds_test.cpp
+++ b/src/mongo/db/query/index_bounds_test.cpp
@@ -32,13 +32,13 @@
#include "mongo/platform/basic.h"
-#include "mongo/db/query/index_bounds.h"
-#include "mongo/db/json.h"
#include "mongo/db/jsobj.h"
+#include "mongo/db/json.h"
+#include "mongo/db/query/index_bounds.h"
#include "mongo/unittest/unittest.h"
#include "mongo/util/assert_util.h"
-#include "mongo/util/text.h"
#include "mongo/util/mongoutils/str.h"
+#include "mongo/util/text.h"
using namespace mongo;
diff --git a/src/mongo/db/query/killcursors_request.cpp b/src/mongo/db/query/killcursors_request.cpp
index 6d95311accd..c446998eaa1 100644
--- a/src/mongo/db/query/killcursors_request.cpp
+++ b/src/mongo/db/query/killcursors_request.cpp
@@ -66,8 +66,8 @@ StatusWith<KillCursorsRequest> KillCursorsRequest::parseFromBSON(const std::stri
if (cmdObj[kCursorsField].type() != BSONType::Array) {
return {ErrorCodes::FailedToParse,
- str::stream() << "Field '" << kCursorsField
- << "' must be of type array in: " << cmdObj};
+ str::stream() << "Field '" << kCursorsField << "' must be of type array in: "
+ << cmdObj};
}
std::vector<CursorId> cursorIds;
diff --git a/src/mongo/db/query/killcursors_request_test.cpp b/src/mongo/db/query/killcursors_request_test.cpp
index 74ce8bfa31e..19c220a7408 100644
--- a/src/mongo/db/query/killcursors_request_test.cpp
+++ b/src/mongo/db/query/killcursors_request_test.cpp
@@ -94,7 +94,8 @@ TEST(KillCursorsRequestTest, parseFromBSONCursorFieldNotArray) {
KillCursorsRequest::parseFromBSON("db",
BSON("killCursors"
<< "coll"
- << "cursors" << CursorId(123)));
+ << "cursors"
+ << CursorId(123)));
ASSERT_NOT_OK(result.getStatus());
ASSERT_EQ(result.getStatus().code(), ErrorCodes::FailedToParse);
}
@@ -104,7 +105,8 @@ TEST(KillCursorsRequestTest, parseFromBSONCursorFieldEmptyArray) {
KillCursorsRequest::parseFromBSON("db",
BSON("killCursors"
<< "coll"
- << "cursors" << BSONArrayBuilder().arr()));
+ << "cursors"
+ << BSONArrayBuilder().arr()));
ASSERT_NOT_OK(result.getStatus());
ASSERT_EQ(result.getStatus().code(), ErrorCodes::BadValue);
}
@@ -129,7 +131,8 @@ TEST(KillCursorsRequestTest, toBSON) {
BSONObj requestObj = request.toBSON();
BSONObj expectedObj = BSON("killCursors"
<< "coll"
- << "cursors" << BSON_ARRAY(CursorId(123) << CursorId(456)));
+ << "cursors"
+ << BSON_ARRAY(CursorId(123) << CursorId(456)));
ASSERT_EQ(requestObj, expectedObj);
}
diff --git a/src/mongo/db/query/killcursors_response.cpp b/src/mongo/db/query/killcursors_response.cpp
index 0484a1100fa..2cf7d998d8f 100644
--- a/src/mongo/db/query/killcursors_response.cpp
+++ b/src/mongo/db/query/killcursors_response.cpp
@@ -50,8 +50,8 @@ Status fillOutCursorArray(const BSONObj& cmdResponse,
if (elt.type() != BSONType::Array) {
return {ErrorCodes::FailedToParse,
- str::stream() << "Field '" << fieldName
- << "' must be of type array in: " << cmdResponse};
+ str::stream() << "Field '" << fieldName << "' must be of type array in: "
+ << cmdResponse};
}
for (BSONElement cursorElt : elt.Obj()) {
diff --git a/src/mongo/db/query/killcursors_response_test.cpp b/src/mongo/db/query/killcursors_response_test.cpp
index e2a56af98c4..0fe3d996edf 100644
--- a/src/mongo/db/query/killcursors_response_test.cpp
+++ b/src/mongo/db/query/killcursors_response_test.cpp
@@ -40,9 +40,13 @@ namespace {
TEST(KillCursorsResponseTest, parseFromBSONSuccess) {
StatusWith<KillCursorsResponse> result = KillCursorsResponse::parseFromBSON(
BSON("cursorsKilled" << BSON_ARRAY(CursorId(123)) << "cursorsNotFound"
- << BSON_ARRAY(CursorId(456) << CursorId(6)) << "cursorsAlive"
+ << BSON_ARRAY(CursorId(456) << CursorId(6))
+ << "cursorsAlive"
<< BSON_ARRAY(CursorId(7) << CursorId(8) << CursorId(9))
- << "cursorsUnknown" << BSONArray() << "ok" << 1.0));
+ << "cursorsUnknown"
+ << BSONArray()
+ << "ok"
+ << 1.0));
ASSERT_OK(result.getStatus());
KillCursorsResponse response = result.getValue();
ASSERT_EQ(response.cursorsKilled.size(), 1U);
@@ -60,8 +64,11 @@ TEST(KillCursorsResponseTest, parseFromBSONSuccess) {
TEST(KillCursorsResponseTest, parseFromBSONSuccessOmitCursorsAlive) {
StatusWith<KillCursorsResponse> result = KillCursorsResponse::parseFromBSON(
BSON("cursorsKilled" << BSON_ARRAY(CursorId(123)) << "cursorsNotFound"
- << BSON_ARRAY(CursorId(456) << CursorId(6)) << "cursorsUnknown"
- << BSON_ARRAY(CursorId(789)) << "ok" << 1.0));
+ << BSON_ARRAY(CursorId(456) << CursorId(6))
+ << "cursorsUnknown"
+ << BSON_ARRAY(CursorId(789))
+ << "ok"
+ << 1.0));
ASSERT_NOT_OK(result.getStatus());
ASSERT_EQ(result.getStatus().code(), ErrorCodes::FailedToParse);
}
@@ -76,11 +83,13 @@ TEST(KillCursorsResponseTest, parseFromBSONCommandNotOk) {
}
TEST(KillCursorsResponseTest, parseFromBSONFieldNotArray) {
- StatusWith<KillCursorsResponse> result = KillCursorsResponse::parseFromBSON(BSON(
- "cursorsKilled" << BSON_ARRAY(CursorId(123)) << "cursorsNotFound"
- << "foobar"
- << "cursorsAlive" << BSON_ARRAY(CursorId(7) << CursorId(8) << CursorId(9))
- << "ok" << 1.0));
+ StatusWith<KillCursorsResponse> result = KillCursorsResponse::parseFromBSON(
+ BSON("cursorsKilled" << BSON_ARRAY(CursorId(123)) << "cursorsNotFound"
+ << "foobar"
+ << "cursorsAlive"
+ << BSON_ARRAY(CursorId(7) << CursorId(8) << CursorId(9))
+ << "ok"
+ << 1.0));
ASSERT_NOT_OK(result.getStatus());
ASSERT_EQ(result.getStatus().code(), ErrorCodes::FailedToParse);
}
@@ -88,8 +97,11 @@ TEST(KillCursorsResponseTest, parseFromBSONFieldNotArray) {
TEST(KillCursorsResponseTest, parseFromBSONArrayContainsInvalidElement) {
StatusWith<KillCursorsResponse> result = KillCursorsResponse::parseFromBSON(
BSON("cursorsKilled" << BSON_ARRAY(CursorId(123)) << "cursorsNotFound"
- << BSON_ARRAY(CursorId(456) << CursorId(6)) << "cursorsAlive"
- << BSON_ARRAY(CursorId(7) << "foobar" << CursorId(9)) << "ok" << 1.0));
+ << BSON_ARRAY(CursorId(456) << CursorId(6))
+ << "cursorsAlive"
+ << BSON_ARRAY(CursorId(7) << "foobar" << CursorId(9))
+ << "ok"
+ << 1.0));
ASSERT_NOT_OK(result.getStatus());
ASSERT_EQ(result.getStatus().code(), ErrorCodes::FailedToParse);
}
@@ -103,9 +115,13 @@ TEST(KillCursorsResponseTest, toBSON) {
BSONObj responseObj = response.toBSON();
BSONObj expectedResponse =
BSON("cursorsKilled" << BSON_ARRAY(CursorId(123)) << "cursorsNotFound"
- << BSON_ARRAY(CursorId(456) << CursorId(6)) << "cursorsAlive"
+ << BSON_ARRAY(CursorId(456) << CursorId(6))
+ << "cursorsAlive"
<< BSON_ARRAY(CursorId(7) << CursorId(8) << CursorId(9))
- << "cursorsUnknown" << BSONArray() << "ok" << 1.0);
+ << "cursorsUnknown"
+ << BSONArray()
+ << "ok"
+ << 1.0);
ASSERT_EQ(responseObj, expectedResponse);
}
diff --git a/src/mongo/db/query/lite_parsed_query.cpp b/src/mongo/db/query/lite_parsed_query.cpp
index 00ad7091397..4e7e036a4cc 100644
--- a/src/mongo/db/query/lite_parsed_query.cpp
+++ b/src/mongo/db/query/lite_parsed_query.cpp
@@ -351,7 +351,9 @@ StatusWith<unique_ptr<LiteParsedQuery>> LiteParsedQuery::makeFromFindCommand(Nam
} else if (!str::startsWith(fieldName, '$')) {
return Status(ErrorCodes::FailedToParse,
str::stream() << "Failed to parse: " << cmdObj.toString() << ". "
- << "Unrecognized field '" << fieldName << "'.");
+ << "Unrecognized field '"
+ << fieldName
+ << "'.");
}
}
@@ -553,32 +555,32 @@ Status LiteParsedQuery::validate() const {
if (_limit && *_limit < 0) {
return Status(ErrorCodes::BadValue,
- str::stream()
- << "Limit value must be non-negative, but received: " << *_limit);
+ str::stream() << "Limit value must be non-negative, but received: "
+ << *_limit);
}
if (_batchSize && *_batchSize < 0) {
return Status(ErrorCodes::BadValue,
- str::stream()
- << "BatchSize value must be non-negative, but received: " << *_batchSize);
+ str::stream() << "BatchSize value must be non-negative, but received: "
+ << *_batchSize);
}
if (_ntoreturn && *_ntoreturn < 0) {
return Status(ErrorCodes::BadValue,
- str::stream()
- << "NToReturn value must be non-negative, but received: " << *_ntoreturn);
+ str::stream() << "NToReturn value must be non-negative, but received: "
+ << *_ntoreturn);
}
if (_maxScan < 0) {
return Status(ErrorCodes::BadValue,
- str::stream()
- << "MaxScan value must be non-negative, but received: " << _maxScan);
+ str::stream() << "MaxScan value must be non-negative, but received: "
+ << _maxScan);
}
if (_maxTimeMS < 0) {
return Status(ErrorCodes::BadValue,
- str::stream()
- << "MaxTimeMS value must be non-negative, but received: " << _maxTimeMS);
+ str::stream() << "MaxTimeMS value must be non-negative, but received: "
+ << _maxTimeMS);
}
if (_tailable) {
@@ -618,9 +620,10 @@ StatusWith<int> LiteParsedQuery::parseMaxTimeMS(BSONElement maxTimeMSElt) {
}
double maxTimeMSDouble = maxTimeMSElt.numberDouble();
if (maxTimeMSElt.type() == mongo::NumberDouble && floor(maxTimeMSDouble) != maxTimeMSDouble) {
- return StatusWith<int>(ErrorCodes::BadValue,
- (StringBuilder() << maxTimeMSElt.fieldNameStringData()
- << " has non-integral value").str());
+ return StatusWith<int>(
+ ErrorCodes::BadValue,
+ (StringBuilder() << maxTimeMSElt.fieldNameStringData() << " has non-integral value")
+ .str());
}
return StatusWith<int>(static_cast<int>(maxTimeMSLongLong));
}
diff --git a/src/mongo/db/query/lite_parsed_query_test.cpp b/src/mongo/db/query/lite_parsed_query_test.cpp
index d94ec230cb6..17affe6f9c3 100644
--- a/src/mongo/db/query/lite_parsed_query_test.cpp
+++ b/src/mongo/db/query/lite_parsed_query_test.cpp
@@ -346,9 +346,9 @@ TEST(LiteParsedQueryTest, ValidateSortOrder) {
ASSERT_FALSE(LiteParsedQuery::isValidSortOrder(fromjson("{a: {$meta: 1}}")));
ASSERT_FALSE(LiteParsedQuery::isValidSortOrder(fromjson("{a: {$meta: \"image\"}}")));
ASSERT_FALSE(LiteParsedQuery::isValidSortOrder(fromjson("{a: {$world: \"textScore\"}}")));
- ASSERT_FALSE(LiteParsedQuery::isValidSortOrder(fromjson(
- "{a: {$meta: \"textScore\","
- " b: 1}}")));
+ ASSERT_FALSE(
+ LiteParsedQuery::isValidSortOrder(fromjson("{a: {$meta: \"textScore\","
+ " b: 1}}")));
ASSERT_FALSE(LiteParsedQuery::isValidSortOrder(fromjson("{'': 1}")));
ASSERT_FALSE(LiteParsedQuery::isValidSortOrder(fromjson("{'': -1}")));
}
diff --git a/src/mongo/db/query/parsed_projection_test.cpp b/src/mongo/db/query/parsed_projection_test.cpp
index 90f1b9bfb5c..b1b1b59324f 100644
--- a/src/mongo/db/query/parsed_projection_test.cpp
+++ b/src/mongo/db/query/parsed_projection_test.cpp
@@ -28,11 +28,11 @@
#include "mongo/db/query/parsed_projection.h"
-#include <memory>
#include "mongo/db/json.h"
#include "mongo/db/matcher/expression_parser.h"
#include "mongo/db/matcher/extensions_callback_disallow_extensions.h"
#include "mongo/unittest/unittest.h"
+#include <memory>
namespace {
@@ -56,8 +56,10 @@ unique_ptr<ParsedProjection> createParsedProjection(const BSONObj& query, const
Status status = ParsedProjection::make(
projObj, queryMatchExpr.get(), &out, ExtensionsCallbackDisallowExtensions());
if (!status.isOK()) {
- FAIL(mongoutils::str::stream() << "failed to parse projection " << projObj
- << " (query: " << query << "): " << status.toString());
+ FAIL(mongoutils::str::stream() << "failed to parse projection " << projObj << " (query: "
+ << query
+ << "): "
+ << status.toString());
}
ASSERT(out);
return unique_ptr<ParsedProjection>(out);
diff --git a/src/mongo/db/query/plan_cache.cpp b/src/mongo/db/query/plan_cache.cpp
index ab64b8f1d26..ca1f93d9d6f 100644
--- a/src/mongo/db/query/plan_cache.cpp
+++ b/src/mongo/db/query/plan_cache.cpp
@@ -32,19 +32,19 @@
#include "mongo/db/query/plan_cache.h"
-#include <algorithm>
-#include <math.h>
-#include <memory>
#include "mongo/base/owned_pointer_vector.h"
#include "mongo/client/dbclientinterface.h" // For QueryOption_foobar
#include "mongo/db/matcher/expression_array.h"
#include "mongo/db/matcher/expression_geo.h"
#include "mongo/db/query/plan_ranker.h"
-#include "mongo/db/query/query_solution.h"
#include "mongo/db/query/query_knobs.h"
+#include "mongo/db/query/query_solution.h"
#include "mongo/util/assert_util.h"
#include "mongo/util/log.h"
#include "mongo/util/mongoutils/str.h"
+#include <algorithm>
+#include <math.h>
+#include <memory>
namespace mongo {
namespace {
diff --git a/src/mongo/db/query/plan_cache.h b/src/mongo/db/query/plan_cache.h
index 02ab8ef64cb..332e7b79cea 100644
--- a/src/mongo/db/query/plan_cache.h
+++ b/src/mongo/db/query/plan_cache.h
@@ -28,8 +28,8 @@
#pragma once
-#include <set>
#include <boost/optional/optional.hpp>
+#include <set>
#include "mongo/db/exec/plan_stats.h"
#include "mongo/db/query/canonical_query.h"
diff --git a/src/mongo/db/query/plan_cache_indexability.cpp b/src/mongo/db/query/plan_cache_indexability.cpp
index 4e61e9ba595..066d7a2782b 100644
--- a/src/mongo/db/query/plan_cache_indexability.cpp
+++ b/src/mongo/db/query/plan_cache_indexability.cpp
@@ -30,14 +30,14 @@
#include "mongo/db/query/plan_cache_indexability.h"
-#include <memory>
#include "mongo/base/init.h"
#include "mongo/base/owned_pointer_vector.h"
-#include "mongo/db/query/index_entry.h"
#include "mongo/db/matcher/expression.h"
#include "mongo/db/matcher/expression_algo.h"
#include "mongo/db/matcher/expression_leaf.h"
+#include "mongo/db/query/index_entry.h"
#include "mongo/stdx/memory.h"
+#include <memory>
namespace mongo {
diff --git a/src/mongo/db/query/plan_cache_indexability_test.cpp b/src/mongo/db/query/plan_cache_indexability_test.cpp
index bfc1d786878..e5db935d3a3 100644
--- a/src/mongo/db/query/plan_cache_indexability_test.cpp
+++ b/src/mongo/db/query/plan_cache_indexability_test.cpp
@@ -42,8 +42,8 @@ std::unique_ptr<MatchExpression> parseMatchExpression(const BSONObj& obj) {
StatusWithMatchExpression status =
MatchExpressionParser::parse(obj, ExtensionsCallbackDisallowExtensions(), collator);
if (!status.isOK()) {
- FAIL(str::stream() << "failed to parse query: " << obj.toString()
- << ". Reason: " << status.getStatus().toString());
+ FAIL(str::stream() << "failed to parse query: " << obj.toString() << ". Reason: "
+ << status.getStatus().toString());
}
return std::move(status.getValue());
}
diff --git a/src/mongo/db/query/plan_cache_test.cpp b/src/mongo/db/query/plan_cache_test.cpp
index 4192f748362..708ff2b69e6 100644
--- a/src/mongo/db/query/plan_cache_test.cpp
+++ b/src/mongo/db/query/plan_cache_test.cpp
@@ -33,8 +33,8 @@
#include "mongo/db/query/plan_cache.h"
#include <algorithm>
-#include <ostream>
#include <memory>
+#include <ostream>
#include "mongo/db/jsobj.h"
#include "mongo/db/json.h"
@@ -318,9 +318,9 @@ TEST(PlanCacheTest, ShouldNotCacheQueryWithMax) {
* the planner is able to come up with a cacheable solution.
*/
TEST(PlanCacheTest, ShouldCacheQueryWithGeoWithinLegacyCoordinates) {
- unique_ptr<CanonicalQuery> cq(canonicalize(
- "{a: {$geoWithin: "
- "{$box: [[-180, -90], [180, 90]]}}}"));
+ unique_ptr<CanonicalQuery> cq(
+ canonicalize("{a: {$geoWithin: "
+ "{$box: [[-180, -90], [180, 90]]}}}"));
assertShouldCacheQuery(*cq);
}
@@ -328,10 +328,10 @@ TEST(PlanCacheTest, ShouldCacheQueryWithGeoWithinLegacyCoordinates) {
* $geoWithin queries with GeoJSON coordinates are supported by the index bounds builder.
*/
TEST(PlanCacheTest, ShouldCacheQueryWithGeoWithinJSONCoordinates) {
- unique_ptr<CanonicalQuery> cq(canonicalize(
- "{a: {$geoWithin: "
- "{$geometry: {type: 'Polygon', coordinates: "
- "[[[0, 0], [0, 90], [90, 0], [0, 0]]]}}}}"));
+ unique_ptr<CanonicalQuery> cq(
+ canonicalize("{a: {$geoWithin: "
+ "{$geometry: {type: 'Polygon', coordinates: "
+ "[[[0, 0], [0, 90], [90, 0], [0, 0]]]}}}}"));
assertShouldCacheQuery(*cq);
}
@@ -339,11 +339,11 @@ TEST(PlanCacheTest, ShouldCacheQueryWithGeoWithinJSONCoordinates) {
* $geoWithin queries with both legacy and GeoJSON coordinates are cacheable.
*/
TEST(PlanCacheTest, ShouldCacheQueryWithGeoWithinLegacyAndJSONCoordinates) {
- unique_ptr<CanonicalQuery> cq(canonicalize(
- "{$or: [{a: {$geoWithin: {$geometry: {type: 'Polygon', "
- "coordinates: [[[0, 0], [0, 90], "
- "[90, 0], [0, 0]]]}}}},"
- "{a: {$geoWithin: {$box: [[-180, -90], [180, 90]]}}}]}"));
+ unique_ptr<CanonicalQuery> cq(
+ canonicalize("{$or: [{a: {$geoWithin: {$geometry: {type: 'Polygon', "
+ "coordinates: [[[0, 0], [0, 90], "
+ "[90, 0], [0, 0]]]}}}},"
+ "{a: {$geoWithin: {$box: [[-180, -90], [180, 90]]}}}]}"));
assertShouldCacheQuery(*cq);
}
@@ -351,10 +351,10 @@ TEST(PlanCacheTest, ShouldCacheQueryWithGeoWithinLegacyAndJSONCoordinates) {
* $geoIntersects queries are always cacheable because they support GeoJSON coordinates only.
*/
TEST(PlanCacheTest, ShouldCacheQueryWithGeoIntersects) {
- unique_ptr<CanonicalQuery> cq(canonicalize(
- "{a: {$geoIntersects: "
- "{$geometry: {type: 'Point', coordinates: "
- "[10.0, 10.0]}}}}"));
+ unique_ptr<CanonicalQuery> cq(
+ canonicalize("{a: {$geoIntersects: "
+ "{$geometry: {type: 'Point', coordinates: "
+ "[10.0, 10.0]}}}}"));
assertShouldCacheQuery(*cq);
}
@@ -363,9 +363,9 @@ TEST(PlanCacheTest, ShouldCacheQueryWithGeoIntersects) {
* between flat and spherical queries.
*/
TEST(PlanCacheTest, ShouldNotCacheQueryWithGeoNear) {
- unique_ptr<CanonicalQuery> cq(canonicalize(
- "{a: {$geoNear: {$geometry: {type: 'Point',"
- "coordinates: [0,0]}, $maxDistance:100}}}"));
+ unique_ptr<CanonicalQuery> cq(
+ canonicalize("{a: {$geoNear: {$geometry: {type: 'Point',"
+ "coordinates: [0,0]}, $maxDistance:100}}}"));
assertShouldCacheQuery(*cq);
}
@@ -1279,14 +1279,14 @@ TEST(PlanCacheTest, ComputeKeyGeoWithin) {
PlanCache planCache;
// Legacy coordinates.
- unique_ptr<CanonicalQuery> cqLegacy(canonicalize(
- "{a: {$geoWithin: "
- "{$box: [[-180, -90], [180, 90]]}}}"));
+ unique_ptr<CanonicalQuery> cqLegacy(
+ canonicalize("{a: {$geoWithin: "
+ "{$box: [[-180, -90], [180, 90]]}}}"));
// GeoJSON coordinates.
- unique_ptr<CanonicalQuery> cqNew(canonicalize(
- "{a: {$geoWithin: "
- "{$geometry: {type: 'Polygon', coordinates: "
- "[[[0, 0], [0, 90], [90, 0], [0, 0]]]}}}}"));
+ unique_ptr<CanonicalQuery> cqNew(
+ canonicalize("{a: {$geoWithin: "
+ "{$geometry: {type: 'Polygon', coordinates: "
+ "[[[0, 0], [0, 90], [90, 0], [0, 0]]]}}}}"));
ASSERT_NOT_EQUALS(planCache.computeKey(*cqLegacy), planCache.computeKey(*cqNew));
}
diff --git a/src/mongo/db/query/plan_enumerator.cpp b/src/mongo/db/query/plan_enumerator.cpp
index c6fe53a1c37..4b843baabe4 100644
--- a/src/mongo/db/query/plan_enumerator.cpp
+++ b/src/mongo/db/query/plan_enumerator.cpp
@@ -32,8 +32,8 @@
#include <set>
-#include "mongo/db/query/indexability.h"
#include "mongo/db/query/index_tag.h"
+#include "mongo/db/query/indexability.h"
#include "mongo/util/log.h"
#include "mongo/util/string_map.h"
@@ -332,8 +332,7 @@ bool PlanEnumerator::getNext(MatchExpression** tree) {
sortUsingTags(*tree);
_root->resetTag();
- LOG(5) << "Enumerator: memo just before moving:" << endl
- << dumpMemo();
+ LOG(5) << "Enumerator: memo just before moving:" << endl << dumpMemo();
_done = nextMemo(memoIDForNode(_root));
return true;
}
diff --git a/src/mongo/db/query/plan_executor.cpp b/src/mongo/db/query/plan_executor.cpp
index 84602dc3c29..4a121926b9e 100644
--- a/src/mongo/db/query/plan_executor.cpp
+++ b/src/mongo/db/query/plan_executor.cpp
@@ -40,8 +40,8 @@
#include "mongo/db/exec/subplan.h"
#include "mongo/db/exec/working_set.h"
#include "mongo/db/exec/working_set_common.h"
-#include "mongo/db/service_context.h"
#include "mongo/db/query/plan_yield_policy.h"
+#include "mongo/db/service_context.h"
#include "mongo/db/storage/record_fetcher.h"
#include "mongo/stdx/memory.h"
#include "mongo/util/fail_point_service.h"
@@ -556,7 +556,8 @@ Status PlanExecutor::executePlan() {
if (PlanExecutor::DEAD == state || PlanExecutor::FAILURE == state) {
return Status(ErrorCodes::OperationFailed,
str::stream() << "Exec error: " << WorkingSetCommon::toStatusString(obj)
- << ", state: " << PlanExecutor::statestr(state));
+ << ", state: "
+ << PlanExecutor::statestr(state));
}
invariant(PlanExecutor::IS_EOF == state);
diff --git a/src/mongo/db/query/plan_ranker.cpp b/src/mongo/db/query/plan_ranker.cpp
index d4f68eb0602..9836e980fbb 100644
--- a/src/mongo/db/query/plan_ranker.cpp
+++ b/src/mongo/db/query/plan_ranker.cpp
@@ -32,8 +32,8 @@
#include <algorithm>
#include <cmath>
-#include <vector>
#include <utility>
+#include <vector>
#include "mongo/db/query/plan_ranker.h"
diff --git a/src/mongo/db/query/planner_access.cpp b/src/mongo/db/query/planner_access.cpp
index 9b28a933131..c3172d5387d 100644
--- a/src/mongo/db/query/planner_access.cpp
+++ b/src/mongo/db/query/planner_access.cpp
@@ -39,9 +39,9 @@
#include "mongo/db/matcher/expression_array.h"
#include "mongo/db/matcher/expression_geo.h"
#include "mongo/db/matcher/expression_text.h"
-#include "mongo/db/query/indexability.h"
#include "mongo/db/query/index_bounds_builder.h"
#include "mongo/db/query/index_tag.h"
+#include "mongo/db/query/indexability.h"
#include "mongo/db/query/query_knobs.h"
#include "mongo/db/query/query_planner.h"
#include "mongo/db/query/query_planner_common.h"
diff --git a/src/mongo/db/query/planner_analysis.cpp b/src/mongo/db/query/planner_analysis.cpp
index b789395cb44..1a507d16ef2 100644
--- a/src/mongo/db/query/planner_analysis.cpp
+++ b/src/mongo/db/query/planner_analysis.cpp
@@ -33,12 +33,12 @@
#include <set>
#include <vector>
-#include "mongo/db/jsobj.h"
#include "mongo/db/index/expression_params.h"
#include "mongo/db/index/s2_common.h"
+#include "mongo/db/jsobj.h"
#include "mongo/db/matcher/expression_geo.h"
-#include "mongo/db/query/query_planner_common.h"
#include "mongo/db/query/query_planner.h"
+#include "mongo/db/query/query_planner_common.h"
#include "mongo/util/log.h"
namespace mongo {
diff --git a/src/mongo/db/query/planner_analysis_test.cpp b/src/mongo/db/query/planner_analysis_test.cpp
index 78b82845afa..c02c9c25cfb 100644
--- a/src/mongo/db/query/planner_analysis_test.cpp
+++ b/src/mongo/db/query/planner_analysis_test.cpp
@@ -90,16 +90,14 @@ TEST(QueryPlannerAnalysis, GetSortPatternSpecialIndexTypes) {
ASSERT_EQUALS(fromjson("{a: 1}"),
QueryPlannerAnalysis::getSortPattern(fromjson("{a: 1, b: 'text', c: 1}")));
ASSERT_EQUALS(fromjson("{a: 1}"),
- QueryPlannerAnalysis::getSortPattern(fromjson(
- "{a: 1, b: '2dsphere',"
- " c: 1}")));
+ QueryPlannerAnalysis::getSortPattern(fromjson("{a: 1, b: '2dsphere',"
+ " c: 1}")));
ASSERT_EQUALS(fromjson("{a: 1, b: 1}"),
QueryPlannerAnalysis::getSortPattern(fromjson("{a: 1, b: 1, c: 'text'}")));
ASSERT_EQUALS(fromjson("{a: 1, b: 1}"),
- QueryPlannerAnalysis::getSortPattern(fromjson(
- "{a: 1, b: 1, c: 'text',"
- " d: 1}")));
+ QueryPlannerAnalysis::getSortPattern(fromjson("{a: 1, b: 1, c: 'text',"
+ " d: 1}")));
}
// Test the generation of sort orders provided by an index scan done by
diff --git a/src/mongo/db/query/planner_ixselect.cpp b/src/mongo/db/query/planner_ixselect.cpp
index 8af46a211af..7c32d516260 100644
--- a/src/mongo/db/query/planner_ixselect.cpp
+++ b/src/mongo/db/query/planner_ixselect.cpp
@@ -40,8 +40,8 @@
#include "mongo/db/matcher/expression_geo.h"
#include "mongo/db/matcher/expression_text.h"
#include "mongo/db/query/collation/collator_interface.h"
-#include "mongo/db/query/indexability.h"
#include "mongo/db/query/index_tag.h"
+#include "mongo/db/query/indexability.h"
#include "mongo/db/query/query_planner_common.h"
#include "mongo/util/log.h"
diff --git a/src/mongo/db/query/planner_ixselect_test.cpp b/src/mongo/db/query/planner_ixselect_test.cpp
index 14c46e47970..098c921e5bd 100644
--- a/src/mongo/db/query/planner_ixselect_test.cpp
+++ b/src/mongo/db/query/planner_ixselect_test.cpp
@@ -32,7 +32,6 @@
#include "mongo/db/query/planner_ixselect.h"
-#include <memory>
#include "mongo/db/json.h"
#include "mongo/db/matcher/expression_parser.h"
#include "mongo/db/matcher/extensions_callback_disallow_extensions.h"
@@ -40,6 +39,7 @@
#include "mongo/db/query/index_tag.h"
#include "mongo/unittest/unittest.h"
#include "mongo/util/text.h"
+#include <memory>
using namespace mongo;
diff --git a/src/mongo/db/query/query_planner.cpp b/src/mongo/db/query/query_planner.cpp
index 6560e54a125..faad7ca6aae 100644
--- a/src/mongo/db/query/query_planner.cpp
+++ b/src/mongo/db/query/query_planner.cpp
@@ -40,10 +40,10 @@
#include "mongo/db/matcher/expression_text.h"
#include "mongo/db/query/canonical_query.h"
#include "mongo/db/query/plan_cache.h"
+#include "mongo/db/query/plan_enumerator.h"
#include "mongo/db/query/planner_access.h"
#include "mongo/db/query/planner_analysis.h"
#include "mongo/db/query/planner_ixselect.h"
-#include "mongo/db/query/plan_enumerator.h"
#include "mongo/db/query/query_planner_common.h"
#include "mongo/db/query/query_solution.h"
#include "mongo/util/log.h"
@@ -398,8 +398,7 @@ Status QueryPlanner::planFromCache(const CanonicalQuery& query,
// The planner requires a defined sort order.
sortUsingTags(clone.get());
- LOG(5) << "Tagged tree:" << endl
- << clone->toString();
+ LOG(5) << "Tagged tree:" << endl << clone->toString();
// Use the cached index assignments to build solnRoot.
QuerySolutionNode* solnRoot = QueryPlannerAccess::buildIndexedDataAccess(
@@ -415,8 +414,8 @@ Status QueryPlanner::planFromCache(const CanonicalQuery& query,
QuerySolution* soln = QueryPlannerAnalysis::analyzeDataAccess(query, params, solnRoot);
if (!soln) {
return Status(ErrorCodes::BadValue,
- str::stream()
- << "Failed to analyze plan from cache. Query: " << query.toStringShort());
+ str::stream() << "Failed to analyze plan from cache. Query: "
+ << query.toStringShort());
}
LOG(5) << "Planner: solution constructed from the cache:\n" << soln->toString();
@@ -677,8 +676,7 @@ Status QueryPlanner::plan(const CanonicalQuery& query,
}
// query.root() is now annotated with RelevantTag(s).
- LOG(5) << "Rated tree:" << endl
- << query.root()->toString();
+ LOG(5) << "Rated tree:" << endl << query.root()->toString();
// If there is a GEO_NEAR it must have an index it can use directly.
const MatchExpression* gnNode = NULL;
@@ -744,8 +742,7 @@ Status QueryPlanner::plan(const CanonicalQuery& query,
MatchExpression* rawTree;
while (isp.getNext(&rawTree) && (out->size() < params.maxIndexedSolutions)) {
- LOG(5) << "About to build solntree from tagged tree:" << endl
- << rawTree->toString();
+ LOG(5) << "About to build solntree from tagged tree:" << endl << rawTree->toString();
// The tagged tree produced by the plan enumerator is not guaranteed
// to be canonically sorted. In order to be compatible with the cached
@@ -771,8 +768,7 @@ Status QueryPlanner::plan(const CanonicalQuery& query,
QuerySolution* soln = QueryPlannerAnalysis::analyzeDataAccess(query, params, solnRoot);
if (NULL != soln) {
- LOG(5) << "Planner: adding solution:" << endl
- << soln->toString();
+ LOG(5) << "Planner: adding solution:" << endl << soln->toString();
if (indexTreeStatus.isOK()) {
SolutionCacheData* scd = new SolutionCacheData();
scd->tree.reset(autoData.release());
@@ -918,8 +914,7 @@ Status QueryPlanner::plan(const CanonicalQuery& query,
scd->solnType = SolutionCacheData::COLLSCAN_SOLN;
collscan->cacheData.reset(scd);
out->push_back(collscan);
- LOG(5) << "Planner: outputting a collscan:" << endl
- << collscan->toString();
+ LOG(5) << "Planner: outputting a collscan:" << endl << collscan->toString();
}
}
diff --git a/src/mongo/db/query/query_planner_array_test.cpp b/src/mongo/db/query/query_planner_array_test.cpp
index d5f7fa741a3..3bc52263b13 100644
--- a/src/mongo/db/query/query_planner_array_test.cpp
+++ b/src/mongo/db/query/query_planner_array_test.cpp
@@ -92,9 +92,9 @@ TEST_F(QueryPlannerTest, AllElemMatchCompound) {
// true means multikey
addIndex(BSON("d" << 1 << "a.b" << 1 << "a.c" << 1), true);
- runQuery(fromjson(
- "{d: 1, a: {$all: [{$elemMatch: {b: 2, c: 2}},"
- "{$elemMatch: {b: 3, c: 3}}]}}"));
+ runQuery(
+ fromjson("{d: 1, a: {$all: [{$elemMatch: {b: 2, c: 2}},"
+ "{$elemMatch: {b: 3, c: 3}}]}}"));
assertNumSolutions(2U);
assertSolutionExists("{cscan: {dir: 1}}");
@@ -121,9 +121,9 @@ TEST_F(QueryPlannerTest, ElemMatchWithAllElemMatchChild) {
TEST_F(QueryPlannerTest, ElemMatchWithAllElemMatchChild2) {
// true means multikey
addIndex(BSON("a.b.c.d" << 1), true);
- runQuery(fromjson(
- "{'a.b': {$elemMatch: {c: {$all: "
- "[{$elemMatch: {d: {$gt: 1, $lt: 3}}}]}}}}"));
+ runQuery(
+ fromjson("{'a.b': {$elemMatch: {c: {$all: "
+ "[{$elemMatch: {d: {$gt: 1, $lt: 3}}}]}}}}"));
assertNumSolutions(3U);
assertSolutionExists("{cscan: {dir: 1}}");
@@ -202,9 +202,9 @@ TEST_F(QueryPlannerTest, ElemMatchNested) {
TEST_F(QueryPlannerTest, TwoElemMatchNested) {
addIndex(BSON("a.d.e" << 1));
addIndex(BSON("a.b.c" << 1));
- runQuery(fromjson(
- "{ a:{ $elemMatch:{ d:{ $elemMatch:{ e:{ $lte:1 } } },"
- "b:{ $elemMatch:{ c:{ $gte:1 } } } } } }"));
+ runQuery(
+ fromjson("{ a:{ $elemMatch:{ d:{ $elemMatch:{ e:{ $lte:1 } } },"
+ "b:{ $elemMatch:{ c:{ $gte:1 } } } } } }"));
ASSERT_EQUALS(getNumSolutions(), 3U);
assertSolutionExists("{cscan: {dir: 1}}");
@@ -486,9 +486,9 @@ TEST_F(QueryPlannerTest, MultikeyNestedElemMatchIn) {
// The bounds can be compounded because the index is not multikey.
TEST_F(QueryPlannerTest, TwoNestedElemMatchBounds) {
addIndex(BSON("a.d.e" << 1 << "a.b.c" << 1));
- runQuery(fromjson(
- "{a: {$elemMatch: {d: {$elemMatch: {e: {$lte: 1}}},"
- "b: {$elemMatch: {c: {$gte: 1}}}}}}"));
+ runQuery(
+ fromjson("{a: {$elemMatch: {d: {$elemMatch: {e: {$lte: 1}}},"
+ "b: {$elemMatch: {c: {$gte: 1}}}}}}"));
assertNumSolutions(2U);
assertSolutionExists("{cscan: {dir: 1}}");
@@ -505,9 +505,9 @@ TEST_F(QueryPlannerTest, TwoNestedElemMatchBounds) {
TEST_F(QueryPlannerTest, MultikeyTwoNestedElemMatchBounds) {
// true means multikey
addIndex(BSON("a.d.e" << 1 << "a.b.c" << 1), true);
- runQuery(fromjson(
- "{a: {$elemMatch: {d: {$elemMatch: {e: {$lte: 1}}},"
- "b: {$elemMatch: {c: {$gte: 1}}}}}}"));
+ runQuery(
+ fromjson("{a: {$elemMatch: {d: {$elemMatch: {e: {$lte: 1}}},"
+ "b: {$elemMatch: {c: {$gte: 1}}}}}}"));
assertNumSolutions(2U);
assertSolutionExists("{cscan: {dir: 1}}");
@@ -535,9 +535,9 @@ TEST_F(QueryPlannerTest, MultikeyElemMatchValue) {
// the index is not multikey.
TEST_F(QueryPlannerTest, ElemMatchIntersectBoundsNotMultikey) {
addIndex(BSON("a.b" << 1));
- runQuery(fromjson(
- "{a: {$elemMatch: {b: {$elemMatch: {$gte: 1, $lte: 4}}}},"
- "'a.b': {$in: [2,5]}}"));
+ runQuery(
+ fromjson("{a: {$elemMatch: {b: {$elemMatch: {$gte: 1, $lte: 4}}}},"
+ "'a.b': {$in: [2,5]}}"));
assertNumSolutions(2U);
assertSolutionExists("{cscan: {dir: 1}}");
@@ -554,9 +554,9 @@ TEST_F(QueryPlannerTest, ElemMatchIntersectBoundsNotMultikey) {
TEST_F(QueryPlannerTest, ElemMatchIntersectBoundsMultikey) {
// true means multikey
addIndex(BSON("a.b" << 1), true);
- runQuery(fromjson(
- "{a: {$elemMatch: {b: {$elemMatch: {$gte: 1, $lte: 4}}}},"
- "'a.b': {$in: [2,5]}}"));
+ runQuery(
+ fromjson("{a: {$elemMatch: {b: {$elemMatch: {$gte: 1, $lte: 4}}}},"
+ "'a.b': {$in: [2,5]}}"));
assertNumSolutions(3U);
assertSolutionExists("{cscan: {dir: 1}}");
@@ -848,9 +848,9 @@ TEST_F(QueryPlannerTest, MultikeyDoubleDottedElemMatchOnDotted) {
TEST_F(QueryPlannerTest, MultikeyComplexDoubleDotted) {
// true means multikey
addIndex(BSON("a.b.c" << 1 << "a.e.f" << 1 << "a.b.d" << 1 << "a.e.g" << 1), true);
- runQuery(fromjson(
- "{'a.b': {$elemMatch: {c: 1, d: 1}}, "
- "'a.e': {$elemMatch: {f: 1, g: 1}}}"));
+ runQuery(
+ fromjson("{'a.b': {$elemMatch: {c: 1, d: 1}}, "
+ "'a.e': {$elemMatch: {f: 1, g: 1}}}"));
assertNumSolutions(2U);
assertSolutionExists("{cscan: {dir: 1}}");
@@ -866,9 +866,9 @@ TEST_F(QueryPlannerTest, MultikeyComplexDoubleDotted) {
TEST_F(QueryPlannerTest, MultikeyComplexDoubleDotted2) {
// true means multikey
addIndex(BSON("a.b.c" << 1 << "a.e.c" << 1 << "a.b.d" << 1 << "a.e.d" << 1), true);
- runQuery(fromjson(
- "{'a.b': {$elemMatch: {c: 1, d: 1}}, "
- "'a.e': {$elemMatch: {f: 1, g: 1}}}"));
+ runQuery(
+ fromjson("{'a.b': {$elemMatch: {c: 1, d: 1}}, "
+ "'a.e': {$elemMatch: {f: 1, g: 1}}}"));
assertNumSolutions(2U);
assertSolutionExists("{cscan: {dir: 1}}");
@@ -889,9 +889,9 @@ TEST_F(QueryPlannerTest, ElemMatchIndexIntersection) {
addIndex(BSON("a.b.startDate" << 1), true);
addIndex(BSON("a.b.endDate" << 1), true);
- runQuery(fromjson(
- "{shortId: 3, 'a.b': {$elemMatch: {startDate: {$lte: 3},"
- "endDate: {$gt: 6}}}}"));
+ runQuery(
+ fromjson("{shortId: 3, 'a.b': {$elemMatch: {startDate: {$lte: 3},"
+ "endDate: {$gt: 6}}}}"));
assertNumSolutions(6U);
@@ -1071,9 +1071,9 @@ TEST_F(QueryPlannerTest, MultikeyElemMatchAll) {
// SERVER-16042
TEST_F(QueryPlannerTest, MultikeyElemMatchAllCompound) {
addIndex(BSON("a.b" << 1 << "c" << 1), true);
- runQuery(fromjson(
- "{a: {$all: [{$elemMatch: {b: {$gt: 1}}}, "
- "{$elemMatch: {b: {$lt: 0}}}]}, c: 3}"));
+ runQuery(
+ fromjson("{a: {$all: [{$elemMatch: {b: {$gt: 1}}}, "
+ "{$elemMatch: {b: {$lt: 0}}}]}, c: 3}"));
assertNumSolutions(3U);
assertSolutionExists("{cscan: {dir: 1}}");
@@ -1090,9 +1090,9 @@ TEST_F(QueryPlannerTest, MultikeyElemMatchAllCompound) {
// SERVER-16042
TEST_F(QueryPlannerTest, MultikeyElemMatchAllCompound2) {
addIndex(BSON("a.b" << 1 << "c" << 1), true);
- runQuery(fromjson(
- "{a: {$all: [{$elemMatch: {b: {$gt: 1}}}, "
- "{$elemMatch: {b: {$lt: 0}}}]}, c: {$gte: 3, $lte: 4}}"));
+ runQuery(
+ fromjson("{a: {$all: [{$elemMatch: {b: {$gt: 1}}}, "
+ "{$elemMatch: {b: {$lt: 0}}}]}, c: {$gte: 3, $lte: 4}}"));
assertNumSolutions(3U);
assertSolutionExists("{cscan: {dir: 1}}");
@@ -1348,9 +1348,9 @@ TEST_F(QueryPlannerTest, CannotIntersectBoundsOfTwoSeparateElemMatches) {
MultikeyPaths multikeyPaths{{0U}, {0U}};
addIndex(BSON("a.b" << 1 << "a.c" << 1), multikeyPaths);
- runQuery(fromjson(
- "{$and: [{a: {$elemMatch: {b: {$gte: 0}, c: {$lt: 20}}}}, "
- "{a: {$elemMatch: {b: {$lt: 10}, c: {$gte: 5}}}}]}"));
+ runQuery(
+ fromjson("{$and: [{a: {$elemMatch: {b: {$gte: 0}, c: {$lt: 20}}}}, "
+ "{a: {$elemMatch: {b: {$lt: 10}, c: {$gte: 5}}}}]}"));
assertNumSolutions(3U);
assertSolutionExists(
diff --git a/src/mongo/db/query/query_planner_collation_test.cpp b/src/mongo/db/query/query_planner_collation_test.cpp
index 7f9d784e6c5..996c644e609 100644
--- a/src/mongo/db/query/query_planner_collation_test.cpp
+++ b/src/mongo/db/query/query_planner_collation_test.cpp
@@ -90,9 +90,9 @@ TEST_F(QueryPlannerTest, StringComparisonAndNonStringComparisonCanUseSeparateInd
// The string predicate can use index {a: 1}, since the collators match. The non-string
// comparison can use index {b: 1}, even though the collators don't match.
- runQueryAsCommand(fromjson(
- "{find: 'testns', filter: {a: {$lt: 'foo'}, b: {$lte: 4}}, collation: {locale: "
- "'reverse'}}"));
+ runQueryAsCommand(
+ fromjson("{find: 'testns', filter: {a: {$lt: 'foo'}, b: {$lte: 4}}, collation: {locale: "
+ "'reverse'}}"));
assertNumSolutions(3U);
assertSolutionExists("{cscan: {dir: 1}}");
@@ -189,9 +189,9 @@ TEST_F(QueryPlannerTest, OrQueryResultsInCollscanWhenOnlyOneBranchHasIndexWithMa
addIndex(fromjson("{a: 1}"), &reverseStringCollator);
addIndex(fromjson("{b: 1}"), &alwaysEqualCollator);
- runQueryAsCommand(fromjson(
- "{find: 'testns', filter: {$or: [{a: 'foo'}, {b: 'bar'}]}, collation: {locale: "
- "'reverse'}}"));
+ runQueryAsCommand(
+ fromjson("{find: 'testns', filter: {$or: [{a: 'foo'}, {b: 'bar'}]}, collation: {locale: "
+ "'reverse'}}"));
assertNumSolutions(1U);
assertSolutionExists("{cscan: {dir: 1}}");
@@ -202,9 +202,9 @@ TEST_F(QueryPlannerTest, OrQueryCanBeIndexedWhenBothBranchesHaveIndexWithMatchin
addIndex(fromjson("{a: 1}"), &collator);
addIndex(fromjson("{b: 1}"), &collator);
- runQueryAsCommand(fromjson(
- "{find: 'testns', filter: {$or: [{a: 'foo'}, {b: 'bar'}]}, collation: {locale: "
- "'reverse'}}"));
+ runQueryAsCommand(
+ fromjson("{find: 'testns', filter: {$or: [{a: 'foo'}, {b: 'bar'}]}, collation: {locale: "
+ "'reverse'}}"));
assertNumSolutions(2U);
assertSolutionExists("{cscan: {dir: 1}}");
diff --git a/src/mongo/db/query/query_planner_geo_test.cpp b/src/mongo/db/query/query_planner_geo_test.cpp
index f4218f20fb9..9e087444079 100644
--- a/src/mongo/db/query/query_planner_geo_test.cpp
+++ b/src/mongo/db/query/query_planner_geo_test.cpp
@@ -75,10 +75,10 @@ TEST_F(QueryPlannerTest, Basic2DSphereCompound) {
addIndex(BSON("loc"
<< "2dsphere"));
- runQuery(fromjson(
- "{loc:{$near:{$geometry:{type:'Point',"
- "coordinates : [-81.513743,28.369947] },"
- " $maxDistance :100}},a: 'mouse'}"));
+ runQuery(
+ fromjson("{loc:{$near:{$geometry:{type:'Point',"
+ "coordinates : [-81.513743,28.369947] },"
+ " $maxDistance :100}},a: 'mouse'}"));
assertNumSolutions(1U);
assertSolutionExists(
"{fetch: {node: {geoNear2dsphere: {pattern: {loc: '2dsphere'}, "
@@ -88,11 +88,12 @@ TEST_F(QueryPlannerTest, Basic2DSphereCompound) {
TEST_F(QueryPlannerTest, Basic2DCompound) {
addIndex(BSON("loc"
<< "2d"
- << "a" << 1));
+ << "a"
+ << 1));
- runQuery(fromjson(
- "{ loc: { $geoWithin: { $box : [[0, 0],[10, 10]] } },"
- "a: 'mouse' }"));
+ runQuery(
+ fromjson("{ loc: { $geoWithin: { $box : [[0, 0],[10, 10]] } },"
+ "a: 'mouse' }"));
assertNumSolutions(2U);
assertSolutionExists("{cscan: {dir: 1}}");
assertSolutionExists(
@@ -110,10 +111,10 @@ TEST_F(QueryPlannerTest, Multikey2DSphereCompound) {
<< "2dsphere"),
true);
- runQuery(fromjson(
- "{loc:{$near:{$geometry:{type:'Point',"
- "coordinates : [-81.513743,28.369947] },"
- " $maxDistance :100}},a: 'mouse'}"));
+ runQuery(
+ fromjson("{loc:{$near:{$geometry:{type:'Point',"
+ "coordinates : [-81.513743,28.369947] },"
+ " $maxDistance :100}},a: 'mouse'}"));
assertNumSolutions(1U);
assertSolutionExists(
"{fetch: {node: {geoNear2dsphere: {pattern: {loc: '2dsphere'}, "
@@ -125,9 +126,9 @@ TEST_F(QueryPlannerTest, Basic2DSphereNonNear) {
addIndex(BSON("a"
<< "2dsphere"));
- runQuery(fromjson(
- "{a: {$geoIntersects: {$geometry: {type: 'Point',"
- "coordinates: [10.0, 10.0]}}}}"));
+ runQuery(
+ fromjson("{a: {$geoIntersects: {$geometry: {type: 'Point',"
+ "coordinates: [10.0, 10.0]}}}}"));
assertNumSolutions(2U);
assertSolutionExists("{cscan: {dir: 1}}");
assertSolutionExists("{fetch: {node: {ixscan: {pattern: {a: '2dsphere'}}}}}");
@@ -147,9 +148,9 @@ TEST_F(QueryPlannerTest, Multikey2DSphereNonNear) {
<< "2dsphere"),
true);
- runQuery(fromjson(
- "{a: {$geoIntersects: {$geometry: {type: 'Point',"
- "coordinates: [10.0, 10.0]}}}}"));
+ runQuery(
+ fromjson("{a: {$geoIntersects: {$geometry: {type: 'Point',"
+ "coordinates: [10.0, 10.0]}}}}"));
assertNumSolutions(2U);
assertSolutionExists("{cscan: {dir: 1}}");
assertSolutionExists("{fetch: {node: {ixscan: {pattern: {a: '2dsphere'}}}}}");
@@ -182,9 +183,9 @@ TEST_F(QueryPlannerTest, Basic2DSphereGeoNear) {
"{geoNear2dsphere: {pattern: {a: '2dsphere'}, "
"bounds: {a: [['MinKey', 'MaxKey', true, true]]}}}");
- runQuery(fromjson(
- "{a: {$geoNear: {$geometry: {type: 'Point', coordinates: [0,0]},"
- "$maxDistance:100}}}"));
+ runQuery(
+ fromjson("{a: {$geoNear: {$geometry: {type: 'Point', coordinates: [0,0]},"
+ "$maxDistance:100}}}"));
assertNumSolutions(1U);
assertSolutionExists(
"{geoNear2dsphere: {pattern: {a: '2dsphere'}, "
@@ -204,9 +205,9 @@ TEST_F(QueryPlannerTest, Multikey2DSphereGeoNear) {
"{geoNear2dsphere: {pattern: {a: '2dsphere'}, "
"bounds: {a: [['MinKey', 'MaxKey', true, true]]}}}");
- runQuery(fromjson(
- "{a: {$geoNear: {$geometry: {type: 'Point', coordinates: [0,0]},"
- "$maxDistance:100}}}"));
+ runQuery(
+ fromjson("{a: {$geoNear: {$geometry: {type: 'Point', coordinates: [0,0]},"
+ "$maxDistance:100}}}"));
assertNumSolutions(1U);
assertSolutionExists(
"{geoNear2dsphere: {pattern: {a: '2dsphere'}, "
@@ -277,9 +278,9 @@ TEST_F(QueryPlannerTest, GeoNearMultipleRelevantIndicesButOnlyOneCompatible) {
addIndex(BSON("b" << 1 << "a"
<< "2dsphere"));
- runQuery(fromjson(
- "{a: {$nearSphere: {$geometry: {type: 'Point', coordinates: [0,0]}}},"
- " b: {$exists: false}}"));
+ runQuery(
+ fromjson("{a: {$nearSphere: {$geometry: {type: 'Point', coordinates: [0,0]}}},"
+ " b: {$exists: false}}"));
assertNumSolutions(1U);
assertSolutionExists(
@@ -294,9 +295,9 @@ TEST_F(QueryPlannerTest, Or2DNonNear) {
<< "2d"));
addIndex(BSON("b"
<< "2d"));
- runQuery(fromjson(
- "{$or: [ {a : { $within : { $polygon : [[0,0], [2,0], [4,0]] } }},"
- " {b : { $within : { $center : [[ 5, 5 ], 7 ] } }} ]}"));
+ runQuery(
+ fromjson("{$or: [ {a : { $within : { $polygon : [[0,0], [2,0], [4,0]] } }},"
+ " {b : { $within : { $center : [[ 5, 5 ], 7 ] } }} ]}"));
assertNumSolutions(2U);
assertSolutionExists("{cscan: {dir: 1}}");
@@ -309,9 +310,9 @@ TEST_F(QueryPlannerTest, Or2DNonNear) {
TEST_F(QueryPlannerTest, Or2DSameFieldNonNear) {
addIndex(BSON("a"
<< "2d"));
- runQuery(fromjson(
- "{$or: [ {a : { $within : { $polygon : [[0,0], [2,0], [4,0]] } }},"
- " {a : { $within : { $center : [[ 5, 5 ], 7 ] } }} ]}"));
+ runQuery(
+ fromjson("{$or: [ {a : { $within : { $polygon : [[0,0], [2,0], [4,0]] } }},"
+ " {a : { $within : { $center : [[ 5, 5 ], 7 ] } }} ]}"));
assertNumSolutions(2U);
assertSolutionExists("{cscan: {dir: 1}}");
@@ -344,10 +345,10 @@ TEST_F(QueryPlannerTest, Or2DSphereNonNearMultikey) {
addIndex(BSON("b"
<< "2dsphere"),
true);
- runQuery(fromjson(
- "{$or: [ {a: {$geoIntersects: {$geometry: "
- "{type: 'Point', coordinates: [10.0, 10.0]}}}},"
- " {b: {$geoWithin: { $centerSphere: [[ 10, 20 ], 0.01 ] } }} ]}"));
+ runQuery(
+ fromjson("{$or: [ {a: {$geoIntersects: {$geometry: "
+ "{type: 'Point', coordinates: [10.0, 10.0]}}}},"
+ " {b: {$geoWithin: { $centerSphere: [[ 10, 20 ], 0.01 ] } }} ]}"));
assertNumSolutions(2U);
assertSolutionExists("{cscan: {dir: 1}}");
@@ -360,9 +361,9 @@ TEST_F(QueryPlannerTest, Or2DSphereNonNearMultikey) {
TEST_F(QueryPlannerTest, And2DSameFieldNonNear) {
addIndex(BSON("a"
<< "2d"));
- runQuery(fromjson(
- "{$and: [ {a : { $within : { $polygon : [[0,0], [2,0], [4,0]] } }},"
- " {a : { $within : { $center : [[ 5, 5 ], 7 ] } }} ]}"));
+ runQuery(
+ fromjson("{$and: [ {a : { $within : { $polygon : [[0,0], [2,0], [4,0]] } }},"
+ " {a : { $within : { $center : [[ 5, 5 ], 7 ] } }} ]}"));
assertNumSolutions(2U);
assertSolutionExists("{cscan: {dir: 1}}");
@@ -374,9 +375,9 @@ TEST_F(QueryPlannerTest, And2DSameFieldNonNear) {
TEST_F(QueryPlannerTest, And2DWith2DNearSameField) {
addIndex(BSON("a"
<< "2d"));
- runQuery(fromjson(
- "{$and: [ {a : { $within : { $polygon : [[0,0], [2,0], [4,0]] } }},"
- " {a : { $near : [ 5, 5 ] } } ]}"));
+ runQuery(
+ fromjson("{$and: [ {a : { $within : { $polygon : [[0,0], [2,0], [4,0]] } }},"
+ " {a : { $near : [ 5, 5 ] } } ]}"));
// GEO_NEAR must use the index, and GEO predicate becomes a filter.
assertNumSolutions(1U);
@@ -388,9 +389,9 @@ TEST_F(QueryPlannerTest, And2DWith2DNearSameFieldMultikey) {
addIndex(BSON("geo"
<< "2d"),
multikey);
- runQuery(fromjson(
- "{$and: [{geo: {$near: [0, 0]}}, "
- "{geo: {$within: {$polygon: [[0, 0], [1, 0], [1, 1]]}}}]}"));
+ runQuery(
+ fromjson("{$and: [{geo: {$near: [0, 0]}}, "
+ "{geo: {$within: {$polygon: [[0, 0], [1, 0], [1, 1]]}}}]}"));
// GEO_NEAR must use the index, and GEO predicate becomes a filter.
assertNumSolutions(1U);
@@ -402,11 +403,11 @@ TEST_F(QueryPlannerTest, And2DWith2DNearSameFieldMultikey) {
TEST_F(QueryPlannerTest, And2DSphereSameFieldNonNear) {
addIndex(BSON("a"
<< "2dsphere"));
- runQuery(fromjson(
- "{$and: [ {a: {$geoIntersects: {$geometry: "
- "{type: 'Point', coordinates: [3.0, 1.0]}}}},"
- " {a: {$geoIntersects: {$geometry: "
- "{type: 'Point', coordinates: [4.0, 1.0]}}}}]}"));
+ runQuery(
+ fromjson("{$and: [ {a: {$geoIntersects: {$geometry: "
+ "{type: 'Point', coordinates: [3.0, 1.0]}}}},"
+ " {a: {$geoIntersects: {$geometry: "
+ "{type: 'Point', coordinates: [4.0, 1.0]}}}}]}"));
assertNumSolutions(2U);
assertSolutionExists("{cscan: {dir: 1}}");
@@ -420,11 +421,11 @@ TEST_F(QueryPlannerTest, And2DSphereSameFieldNonNearMultikey) {
addIndex(BSON("a"
<< "2dsphere"),
true);
- runQuery(fromjson(
- "{$and: [ {a: {$geoIntersects: {$geometry: "
- "{type: 'Point', coordinates: [3.0, 1.0]}}}},"
- " {a: {$geoIntersects: {$geometry: "
- "{type: 'Point', coordinates: [4.0, 1.0]}}}}]}"));
+ runQuery(
+ fromjson("{$and: [ {a: {$geoIntersects: {$geometry: "
+ "{type: 'Point', coordinates: [3.0, 1.0]}}}},"
+ " {a: {$geoIntersects: {$geometry: "
+ "{type: 'Point', coordinates: [4.0, 1.0]}}}}]}"));
assertNumSolutions(3U);
assertSolutionExists("{cscan: {dir: 1}}");
@@ -434,11 +435,11 @@ TEST_F(QueryPlannerTest, And2DSphereSameFieldNonNearMultikey) {
TEST_F(QueryPlannerTest, And2DSphereWithNearSameField) {
addIndex(BSON("a"
<< "2dsphere"));
- runQuery(fromjson(
- "{$and: [{a: {$geoIntersects: {$geometry: "
- "{type: 'Point', coordinates: [3.0, 1.0]}}}},"
- "{a: {$near: {$geometry: "
- "{type: 'Point', coordinates: [10.0, 10.0]}}}}]}"));
+ runQuery(
+ fromjson("{$and: [{a: {$geoIntersects: {$geometry: "
+ "{type: 'Point', coordinates: [3.0, 1.0]}}}},"
+ "{a: {$near: {$geometry: "
+ "{type: 'Point', coordinates: [10.0, 10.0]}}}}]}"));
// GEO_NEAR must use the index, and GEO predicate becomes a filter.
assertNumSolutions(1U);
@@ -450,11 +451,11 @@ TEST_F(QueryPlannerTest, And2DSphereWithNearSameFieldMultikey) {
addIndex(BSON("a"
<< "2dsphere"),
true);
- runQuery(fromjson(
- "{$and: [{a: {$geoIntersects: {$geometry: "
- "{type: 'Point', coordinates: [3.0, 1.0]}}}},"
- "{a: {$near: {$geometry: "
- "{type: 'Point', coordinates: [10.0, 10.0]}}}}]}"));
+ runQuery(
+ fromjson("{$and: [{a: {$geoIntersects: {$geometry: "
+ "{type: 'Point', coordinates: [3.0, 1.0]}}}},"
+ "{a: {$near: {$geometry: "
+ "{type: 'Point', coordinates: [10.0, 10.0]}}}}]}"));
// GEO_NEAR must use the index, and GEO predicate becomes a filter.
assertNumSolutions(1U);
@@ -466,11 +467,11 @@ TEST_F(QueryPlannerTest, And2DSphereWithNearSameFieldMultikey) {
TEST_F(QueryPlannerTest, Or2DSphereSameFieldNonNear) {
addIndex(BSON("a"
<< "2dsphere"));
- runQuery(fromjson(
- "{$or: [ {a: {$geoIntersects: {$geometry: "
- "{type: 'Point', coordinates: [3.0, 1.0]}}}},"
- " {a: {$geoIntersects: {$geometry: "
- "{type: 'Point', coordinates: [4.0, 1.0]}}}}]}"));
+ runQuery(
+ fromjson("{$or: [ {a: {$geoIntersects: {$geometry: "
+ "{type: 'Point', coordinates: [3.0, 1.0]}}}},"
+ " {a: {$geoIntersects: {$geometry: "
+ "{type: 'Point', coordinates: [4.0, 1.0]}}}}]}"));
assertNumSolutions(2U);
assertSolutionExists("{cscan: {dir: 1}}");
@@ -482,11 +483,11 @@ TEST_F(QueryPlannerTest, Or2DSphereSameFieldNonNearMultikey) {
addIndex(BSON("a"
<< "2dsphere"),
true);
- runQuery(fromjson(
- "{$or: [ {a: {$geoIntersects: {$geometry: "
- "{type: 'Point', coordinates: [3.0, 1.0]}}}},"
- " {a: {$geoIntersects: {$geometry: "
- "{type: 'Point', coordinates: [4.0, 1.0]}}}}]}"));
+ runQuery(
+ fromjson("{$or: [ {a: {$geoIntersects: {$geometry: "
+ "{type: 'Point', coordinates: [3.0, 1.0]}}}},"
+ " {a: {$geoIntersects: {$geometry: "
+ "{type: 'Point', coordinates: [4.0, 1.0]}}}}]}"));
assertNumSolutions(2U);
assertSolutionExists("{cscan: {dir: 1}}");
@@ -498,9 +499,9 @@ TEST_F(QueryPlannerTest, CompoundMultikey2DSphereNear) {
addIndex(BSON("a" << 1 << "b"
<< "2dsphere"),
true);
- runQuery(fromjson(
- "{a: {$gte: 0}, b: {$near: {$geometry: "
- "{type: 'Point', coordinates: [2, 2]}}}}"));
+ runQuery(
+ fromjson("{a: {$gte: 0}, b: {$near: {$geometry: "
+ "{type: 'Point', coordinates: [2, 2]}}}}"));
assertNumSolutions(1U);
assertSolutionExists(
@@ -513,9 +514,9 @@ TEST_F(QueryPlannerTest, CompoundMultikey2DSphereNearFetchRequired) {
addIndex(BSON("a" << 1 << "b"
<< "2dsphere"),
true);
- runQuery(fromjson(
- "{a: {$gte: 0, $lt: 5}, b: {$near: {$geometry: "
- "{type: 'Point', coordinates: [2, 2]}}}}"));
+ runQuery(
+ fromjson("{a: {$gte: 0, $lt: 5}, b: {$near: {$geometry: "
+ "{type: 'Point', coordinates: [2, 2]}}}}"));
assertNumSolutions(1U);
assertSolutionExists(
@@ -532,9 +533,9 @@ TEST_F(QueryPlannerTest, CompoundMultikey2DSphereNearMultipleIndices) {
addIndex(BSON("c" << 1 << "b"
<< "2dsphere"),
true);
- runQuery(fromjson(
- "{a: {$gte: 0}, c: 3, b: {$near: {$geometry: "
- "{type: 'Point', coordinates: [2, 2]}}}}"));
+ runQuery(
+ fromjson("{a: {$gte: 0}, c: 3, b: {$near: {$geometry: "
+ "{type: 'Point', coordinates: [2, 2]}}}}"));
assertNumSolutions(2U);
assertSolutionExists(
@@ -552,9 +553,9 @@ TEST_F(QueryPlannerTest, CompoundMultikey2DSphereNearMultipleLeadingFields) {
addIndex(BSON("a" << 1 << "b" << 1 << "c"
<< "2dsphere"),
true);
- runQuery(fromjson(
- "{a: {$lt: 5, $gt: 1}, b: 6, c: {$near: {$geometry: "
- "{type: 'Point', coordinates: [2, 2]}}}}"));
+ runQuery(
+ fromjson("{a: {$lt: 5, $gt: 1}, b: 6, c: {$near: {$geometry: "
+ "{type: 'Point', coordinates: [2, 2]}}}}"));
assertNumSolutions(1U);
assertSolutionExists(
@@ -569,10 +570,10 @@ TEST_F(QueryPlannerTest, CompoundMultikey2DSphereNearMultipleGeoPreds) {
addIndex(BSON("a" << 1 << "b" << 1 << "c"
<< "2dsphere"),
true);
- runQuery(fromjson(
- "{a: 1, b: 6, $and: ["
- "{c: {$near: {$geometry: {type: 'Point', coordinates: [2, 2]}}}},"
- "{c: {$geoWithin: {$box: [ [1, 1], [3, 3] ] } } } ] }"));
+ runQuery(
+ fromjson("{a: 1, b: 6, $and: ["
+ "{c: {$near: {$geometry: {type: 'Point', coordinates: [2, 2]}}}},"
+ "{c: {$geoWithin: {$box: [ [1, 1], [3, 3] ] } } } ] }"));
assertNumSolutions(1U);
assertSolutionExists(
@@ -585,12 +586,15 @@ TEST_F(QueryPlannerTest, CompoundMultikey2DSphereNearCompoundTest) {
// true means multikey
addIndex(BSON("a" << 1 << "b"
<< "2dsphere"
- << "c" << 1 << "d" << 1),
+ << "c"
+ << 1
+ << "d"
+ << 1),
true);
- runQuery(fromjson(
- "{a: {$gte: 0}, c: {$gte: 0, $lt: 4}, d: {$gt: 1, $lt: 5},"
- "b: {$near: {$geometry: "
- "{type: 'Point', coordinates: [2, 2]}}}}"));
+ runQuery(
+ fromjson("{a: {$gte: 0}, c: {$gte: 0, $lt: 4}, d: {$gt: 1, $lt: 5},"
+ "b: {$near: {$geometry: "
+ "{type: 'Point', coordinates: [2, 2]}}}}"));
assertNumSolutions(1U);
assertSolutionExists(
@@ -604,7 +608,8 @@ TEST_F(QueryPlannerTest, CompoundMultikey2DNear) {
// true means multikey
addIndex(BSON("a"
<< "2d"
- << "b" << 1),
+ << "b"
+ << 1),
true);
runQuery(fromjson("{a: {$near: [0, 0]}, b: {$gte: 0}}"));
@@ -698,9 +703,8 @@ TEST_F(QueryPlannerTest, CantUseNonCompoundGeoIndexToProvideSort) {
TEST_F(QueryPlannerTest, CantUseNonCompoundGeoIndexToProvideSortWithIndexablePred) {
addIndex(BSON("x"
<< "2dsphere"));
- runQuerySortProj(fromjson(
- "{x: {$geoIntersects: {$geometry: {type: 'Point',"
- " coordinates: [0, 0]}}}}"),
+ runQuerySortProj(fromjson("{x: {$geoIntersects: {$geometry: {type: 'Point',"
+ " coordinates: [0, 0]}}}}"),
BSON("x" << 1),
BSONObj());
@@ -727,9 +731,8 @@ TEST_F(QueryPlannerTest, CantUseCompoundGeoIndexToProvideSortIfNoGeoPred) {
TEST_F(QueryPlannerTest, CanUseCompoundGeoIndexToProvideSortWithGeoPred) {
addIndex(BSON("x" << 1 << "y"
<< "2dsphere"));
- runQuerySortProj(fromjson(
- "{x: 1, y: {$geoIntersects: {$geometry: {type: 'Point',"
- " coordinates: [0, 0]}}}}"),
+ runQuerySortProj(fromjson("{x: 1, y: {$geoIntersects: {$geometry: {type: 'Point',"
+ " coordinates: [0, 0]}}}}"),
BSON("x" << 1),
BSONObj());
@@ -768,19 +771,19 @@ TEST_F(QueryPlannerTest, Negation2DSphereGeoNear) {
addIndex(BSON("a"
<< "2dsphere"));
- runQuery(fromjson(
- "{$and: [{a: {$nearSphere: [0,0], $maxDistance: 0.31}}, "
- "{b: {$ne: 1}}]}"));
+ runQuery(
+ fromjson("{$and: [{a: {$nearSphere: [0,0], $maxDistance: 0.31}}, "
+ "{b: {$ne: 1}}]}"));
assertNumSolutions(1U);
assertSolutionExists(
"{fetch: {node: {geoNear2dsphere: {pattern: {a: '2dsphere'}, "
"bounds: {a: [['MinKey', 'MaxKey', true, true]]}}}}}");
- runQuery(fromjson(
- "{$and: [{a: {$geoNear: {$geometry: {type: 'Point', "
- "coordinates: [0, 0]},"
- "$maxDistance: 100}}},"
- "{b: {$ne: 1}}]}"));
+ runQuery(
+ fromjson("{$and: [{a: {$geoNear: {$geometry: {type: 'Point', "
+ "coordinates: [0, 0]},"
+ "$maxDistance: 100}}},"
+ "{b: {$ne: 1}}]}"));
assertNumSolutions(1U);
assertSolutionExists(
"{fetch: {node: {geoNear2dsphere: {pattern: {a: '2dsphere'}, "
@@ -798,19 +801,19 @@ TEST_F(QueryPlannerTest, Negation2DSphereGeoNearMultikey) {
<< "2dsphere"),
true);
- runQuery(fromjson(
- "{$and: [{a: {$nearSphere: [0,0], $maxDistance: 0.31}}, "
- "{b: {$ne: 1}}]}"));
+ runQuery(
+ fromjson("{$and: [{a: {$nearSphere: [0,0], $maxDistance: 0.31}}, "
+ "{b: {$ne: 1}}]}"));
assertNumSolutions(1U);
assertSolutionExists(
"{fetch: {node: {geoNear2dsphere: {pattern: {a: '2dsphere'}, "
"bounds: {a: [['MinKey', 'MaxKey', true, true]]}}}}}");
- runQuery(fromjson(
- "{$and: [{a: {$geoNear: {$geometry: {type: 'Point', "
- "coordinates: [0, 0]},"
- "$maxDistance: 100}}},"
- "{b: {$ne: 1}}]}"));
+ runQuery(
+ fromjson("{$and: [{a: {$geoNear: {$geometry: {type: 'Point', "
+ "coordinates: [0, 0]},"
+ "$maxDistance: 100}}},"
+ "{b: {$ne: 1}}]}"));
assertNumSolutions(1U);
assertSolutionExists(
"{fetch: {node: {geoNear2dsphere: {pattern: {a: '2dsphere'}, "
@@ -1065,10 +1068,10 @@ TEST_F(QueryPlannerGeo2dsphereTest, CannotIntersectBoundsOfTwoSeparateElemMatche
<< "2dsphere"),
multikeyPaths);
- runQuery(fromjson(
- "{$and: [{a: {$elemMatch: {b: {$gte: 0}, c: {$lt: 20}}}}, "
- "{a: {$elemMatch: {b: {$lt: 10}, c: {$gte: 5}}}}, "
- "{'a.geo': {$nearSphere: [0, 0]}}]}"));
+ runQuery(
+ fromjson("{$and: [{a: {$elemMatch: {b: {$gte: 0}, c: {$lt: 20}}}}, "
+ "{a: {$elemMatch: {b: {$lt: 10}, c: {$gte: 5}}}}, "
+ "{'a.geo': {$nearSphere: [0, 0]}}]}"));
assertNumSolutions(1U);
assertSolutionExists(
@@ -1096,7 +1099,10 @@ TEST_F(QueryPlannerGeo2dsphereTest,
MultikeyPaths multikeyPaths{{1U}, {1U}, {1U}};
addIndex(BSON("a.geo"
<< "2dsphere"
- << "a.b" << 1 << "a.c" << 1),
+ << "a.b"
+ << 1
+ << "a.c"
+ << 1),
multikeyPaths);
runQuery(fromjson("{'a.geo': {$nearSphere: [0, 0]}, 'a.b': 2, 'a.c': 3}"));
@@ -1126,7 +1132,10 @@ TEST_F(QueryPlannerGeo2dsphereTest,
MultikeyPaths multikeyPaths{{0U}, {0U}, {0U}};
addIndex(BSON("a.geo"
<< "2dsphere"
- << "a.b" << 1 << "a.c" << 1),
+ << "a.b"
+ << 1
+ << "a.c"
+ << 1),
multikeyPaths);
runQuery(fromjson("{'a.geo': {$nearSphere: [0, 0]}, 'a.b': 2, 'a.c': 3}"));
@@ -1157,7 +1166,10 @@ TEST_F(QueryPlannerGeo2dsphereTest,
MultikeyPaths multikeyPaths{{0U}, {0U}, {0U}};
addIndex(BSON("a.geo"
<< "2dsphere"
- << "a.b" << 1 << "a.c" << 1),
+ << "a.b"
+ << 1
+ << "a.c"
+ << 1),
multikeyPaths);
runQuery(fromjson("{'a.geo': {$nearSphere: [0, 0]}, a: {$elemMatch: {b: 2, c: 3}}}"));
@@ -1189,7 +1201,10 @@ TEST_F(QueryPlannerGeo2dsphereTest,
MultikeyPaths multikeyPaths{{0U, 1U}, {0U, 1U}, {0U, 1U}};
addIndex(BSON("a.b.geo"
<< "2dsphere"
- << "a.b.c" << 1 << "a.b.d" << 1),
+ << "a.b.c"
+ << 1
+ << "a.b.d"
+ << 1),
multikeyPaths);
runQuery(fromjson("{'a.b.geo': {$nearSphere: [0, 0]}, a: {$elemMatch: {'b.c': 2, 'b.d': 3}}}"));
@@ -1207,9 +1222,9 @@ TEST_F(QueryPlannerGeo2dsphereTest, CanIntersectBoundsOn2dsphereFieldWhenItIsNot
addIndex(BSON("geo"
<< "2dsphere"),
multikeyPaths);
- runQuery(fromjson(
- "{$and: [{geo: {$nearSphere: [0, 0]}}, "
- "{geo: {$geoIntersects: {$geometry: {type: 'Point', coordinates: [0, 0]}}}}]}"));
+ runQuery(
+ fromjson("{$and: [{geo: {$nearSphere: [0, 0]}}, "
+ "{geo: {$geoIntersects: {$geometry: {type: 'Point', coordinates: [0, 0]}}}}]}"));
assertNumSolutions(1U);
assertSolutionExists(
@@ -1223,9 +1238,9 @@ TEST_F(QueryPlannerGeo2dsphereTest, CannotIntersectBoundsOn2dsphereFieldWhenItIs
addIndex(BSON("geo"
<< "2dsphere"),
multikeyPaths);
- runQuery(fromjson(
- "{$and: [{geo: {$nearSphere: [0, 0]}}, "
- "{geo: {$geoIntersects: {$geometry: {type: 'Point', coordinates: [0, 0]}}}}]}"));
+ runQuery(
+ fromjson("{$and: [{geo: {$nearSphere: [0, 0]}}, "
+ "{geo: {$geoIntersects: {$geometry: {type: 'Point', coordinates: [0, 0]}}}}]}"));
assertNumSolutions(1U);
assertSolutionExists(
@@ -1353,7 +1368,8 @@ TEST_F(QueryPlanner2dsphereVersionTest, TwoDNearCompound) {
std::vector<int> versions{2, 3};
std::vector<BSONObj> keyPatterns = {BSON("geo"
<< "2dsphere"
- << "nongeo" << 1)};
+ << "nongeo"
+ << 1)};
BSONObj predicate = fromjson("{geo: {$nearSphere: [-71.34895, 42.46037]}}");
testMultiple2dsphereIndexVersions(versions, keyPatterns, predicate, 1U);
}
@@ -1364,10 +1380,16 @@ TEST_F(QueryPlanner2dsphereVersionTest, TwoDSphereSparseBelowOr) {
std::vector<int> versions{2, 3};
std::vector<BSONObj> keyPatterns = {BSON("geo1"
<< "2dsphere"
- << "a" << 1 << "b" << 1),
+ << "a"
+ << 1
+ << "b"
+ << 1),
BSON("geo2"
<< "2dsphere"
- << "a" << 1 << "b" << 1)};
+ << "a"
+ << 1
+ << "b"
+ << 1)};
BSONObj predicate = fromjson(
"{a: 4, b: 5, $or: ["
@@ -1389,7 +1411,8 @@ TEST_F(QueryPlanner2dsphereVersionTest, TwoDSphereSparseBelowElemMatch) {
std::vector<int> versions{2, 3};
std::vector<BSONObj> keyPatterns = {BSON("a.b"
<< "2dsphere"
- << "a.c" << 1)};
+ << "a.c"
+ << 1)};
BSONObj predicate = fromjson(
"{a: {$elemMatch: {b: {$geoWithin: {$centerSphere: [[10,20], 0.01]}},"
diff --git a/src/mongo/db/query/query_planner_test.cpp b/src/mongo/db/query/query_planner_test.cpp
index 108d4002195..de70d351035 100644
--- a/src/mongo/db/query/query_planner_test.cpp
+++ b/src/mongo/db/query/query_planner_test.cpp
@@ -666,9 +666,9 @@ TEST_F(QueryPlannerTest, OrOfAnd3) {
// SERVER-12594: we don't yet collapse an OR of ANDs into a single ixscan.
TEST_F(QueryPlannerTest, OrOfAnd4) {
addIndex(BSON("a" << 1 << "b" << 1));
- runQuery(fromjson(
- "{$or: [{a:{$gt:1,$lt:5}, b:{$gt:0,$lt:3}, c:6}, "
- "{a:3, b:{$gt:1,$lt:2}, c:{$gt:0,$lt:10}}]}"));
+ runQuery(
+ fromjson("{$or: [{a:{$gt:1,$lt:5}, b:{$gt:0,$lt:3}, c:6}, "
+ "{a:3, b:{$gt:1,$lt:2}, c:{$gt:0,$lt:10}}]}"));
assertNumSolutions(2U);
assertSolutionExists("{cscan: {dir: 1}}");
@@ -684,9 +684,9 @@ TEST_F(QueryPlannerTest, OrOfAnd4) {
// SERVER-12594: we don't yet collapse an OR of ANDs into a single ixscan.
TEST_F(QueryPlannerTest, OrOfAnd5) {
addIndex(BSON("a" << 1 << "b" << 1));
- runQuery(fromjson(
- "{$or: [{a:{$gt:1,$lt:5}, c:6}, "
- "{a:3, b:{$gt:1,$lt:2}, c:{$gt:0,$lt:10}}]}"));
+ runQuery(
+ fromjson("{$or: [{a:{$gt:1,$lt:5}, c:6}, "
+ "{a:3, b:{$gt:1,$lt:2}, c:{$gt:0,$lt:10}}]}"));
assertNumSolutions(2U);
assertSolutionExists("{cscan: {dir: 1}}");
@@ -870,9 +870,9 @@ TEST_F(QueryPlannerTest, OrInexactWithExact2) {
// SERVER-13960: an exact, inexact covered, and inexact fetch predicate.
TEST_F(QueryPlannerTest, OrAllThreeTightnesses) {
addIndex(BSON("names" << 1));
- runQuery(fromjson(
- "{$or: [{names: 'frank'}, {names: /^al(ice)|(ex)/},"
- "{names: {$elemMatch: {$eq: 'thomas'}}}]}"));
+ runQuery(
+ fromjson("{$or: [{names: 'frank'}, {names: /^al(ice)|(ex)/},"
+ "{names: {$elemMatch: {$eq: 'thomas'}}}]}"));
assertNumSolutions(2U);
assertSolutionExists("{cscan: {dir: 1}}");
@@ -887,9 +887,9 @@ TEST_F(QueryPlannerTest, OrAllThreeTightnesses) {
TEST_F(QueryPlannerTest, OrTwoInexactFetch) {
// true means multikey
addIndex(BSON("names" << 1), true);
- runQuery(fromjson(
- "{$or: [{names: {$elemMatch: {$eq: 'alexandra'}}},"
- "{names: {$elemMatch: {$eq: 'thomas'}}}]}"));
+ runQuery(
+ fromjson("{$or: [{names: {$elemMatch: {$eq: 'alexandra'}}},"
+ "{names: {$elemMatch: {$eq: 'thomas'}}}]}"));
assertNumSolutions(2U);
assertSolutionExists("{cscan: {dir: 1}}");
@@ -917,9 +917,9 @@ TEST_F(QueryPlannerTest, OrInexactCoveredMultikey) {
TEST_F(QueryPlannerTest, OrElemMatchObject) {
// true means multikey
addIndex(BSON("a.b" << 1), true);
- runQuery(fromjson(
- "{$or: [{a: {$elemMatch: {b: {$lte: 1}}}},"
- "{a: {$elemMatch: {b: {$gte: 4}}}}]}"));
+ runQuery(
+ fromjson("{$or: [{a: {$elemMatch: {b: {$lte: 1}}}},"
+ "{a: {$elemMatch: {b: {$gte: 4}}}}]}"));
assertNumSolutions(2U);
assertSolutionExists("{cscan: {dir: 1}}");
@@ -935,9 +935,9 @@ TEST_F(QueryPlannerTest, OrElemMatchObject) {
TEST_F(QueryPlannerTest, OrElemMatchObjectBeneathAnd) {
// true means multikey
addIndex(BSON("a.b" << 1), true);
- runQuery(fromjson(
- "{$or: [{'a.b': 0, a: {$elemMatch: {b: {$lte: 1}}}},"
- "{a: {$elemMatch: {b: {$gte: 4}}}}]}"));
+ runQuery(
+ fromjson("{$or: [{'a.b': 0, a: {$elemMatch: {b: {$lte: 1}}}},"
+ "{a: {$elemMatch: {b: {$gte: 4}}}}]}"));
assertNumSolutions(3U);
assertSolutionExists("{cscan: {dir: 1}}");
@@ -988,9 +988,9 @@ TEST_F(QueryPlannerTest, OrWithExactAndInexact) {
// SERVER-13960: $in with exact, inexact covered, and inexact fetch predicates.
TEST_F(QueryPlannerTest, OrWithExactAndInexact2) {
addIndex(BSON("name" << 1));
- runQuery(fromjson(
- "{$or: [{name: {$in: ['thomas', /^alexand(er|ra)/]}},"
- "{name: {$exists: false}}]}"));
+ runQuery(
+ fromjson("{$or: [{name: {$in: ['thomas', /^alexand(er|ra)/]}},"
+ "{name: {$exists: false}}]}"));
assertNumSolutions(2U);
assertSolutionExists("{cscan: {dir: 1}}");
@@ -1005,9 +1005,9 @@ TEST_F(QueryPlannerTest, OrWithExactAndInexact2) {
TEST_F(QueryPlannerTest, OrWithExactAndInexact3) {
addIndex(BSON("a" << 1));
addIndex(BSON("b" << 1));
- runQuery(fromjson(
- "{$or: [{a: {$in: [/z/, /x/]}}, {a: 'w'},"
- "{b: {$exists: false}}, {b: {$in: ['p']}}]}"));
+ runQuery(
+ fromjson("{$or: [{a: {$in: [/z/, /x/]}}, {a: 'w'},"
+ "{b: {$exists: false}}, {b: {$in: ['p']}}]}"));
assertNumSolutions(2U);
assertSolutionExists("{cscan: {dir: 1}}");
@@ -1420,7 +1420,8 @@ TEST_F(QueryPlannerTest, CantUseHashedIndexToProvideSortWithIndexablePred) {
TEST_F(QueryPlannerTest, CantUseTextIndexToProvideSort) {
addIndex(BSON("x" << 1 << "_fts"
<< "text"
- << "_ftsx" << 1));
+ << "_ftsx"
+ << 1));
runQuerySortProj(BSONObj(), BSON("x" << 1), BSONObj());
ASSERT_EQUALS(getNumSolutions(), 1U);
@@ -1766,10 +1767,9 @@ TEST_F(QueryPlannerTest, ManyInWithSort) {
// SERVER-1205
TEST_F(QueryPlannerTest, TooManyToExplode) {
addIndex(BSON("a" << 1 << "b" << 1 << "c" << 1 << "d" << 1));
- runQuerySortProjSkipNToReturn(fromjson(
- "{a: {$in: [1,2,3,4,5,6]},"
- "b:{$in:[1,2,3,4,5,6,7,8]},"
- "c:{$in:[1,2,3,4,5,6,7,8]}}"),
+ runQuerySortProjSkipNToReturn(fromjson("{a: {$in: [1,2,3,4,5,6]},"
+ "b:{$in:[1,2,3,4,5,6,7,8]},"
+ "c:{$in:[1,2,3,4,5,6,7,8]}}"),
BSON("d" << 1),
BSONObj(),
0,
@@ -1962,11 +1962,10 @@ TEST_F(QueryPlannerTest, TooManyToExplodeOr) {
addIndex(BSON("b" << 1 << "e" << 1));
addIndex(BSON("c" << 1 << "e" << 1));
addIndex(BSON("d" << 1 << "e" << 1));
- runQuerySortProj(fromjson(
- "{$or: [{a: {$in: [1,2,3,4,5,6]},"
- "b: {$in: [1,2,3,4,5,6]}},"
- "{c: {$in: [1,2,3,4,5,6]},"
- "d: {$in: [1,2,3,4,5,6]}}]}"),
+ runQuerySortProj(fromjson("{$or: [{a: {$in: [1,2,3,4,5,6]},"
+ "b: {$in: [1,2,3,4,5,6]}},"
+ "{c: {$in: [1,2,3,4,5,6]},"
+ "d: {$in: [1,2,3,4,5,6]}}]}"),
BSON("e" << 1),
BSONObj());
@@ -2004,9 +2003,8 @@ TEST_F(QueryPlannerTest, TooManyToExplodeOr) {
TEST_F(QueryPlannerTest, ExplodeIxscanWithFilter) {
addIndex(BSON("a" << 1 << "b" << 1));
- runQuerySortProj(fromjson(
- "{$and: [{b: {$regex: 'foo', $options: 'i'}},"
- "{a: {$in: [1, 2]}}]}"),
+ runQuerySortProj(fromjson("{$and: [{b: {$regex: 'foo', $options: 'i'}},"
+ "{a: {$in: [1, 2]}}]}"),
BSON("b" << 1),
BSONObj());
@@ -2106,9 +2104,9 @@ TEST_F(QueryPlannerTest, TwoPlansElemMatch) {
addIndex(BSON("a" << 1 << "b" << 1));
addIndex(BSON("arr.x" << 1 << "a" << 1));
- runQuery(fromjson(
- "{arr: { $elemMatch : { x : 5 , y : 5 } },"
- " a : 55 , b : { $in : [ 1 , 5 , 8 ] } }"));
+ runQuery(
+ fromjson("{arr: { $elemMatch : { x : 5 , y : 5 } },"
+ " a : 55 , b : { $in : [ 1 , 5 , 8 ] } }"));
// 2 indexed solns and one non-indexed
ASSERT_EQUALS(getNumSolutions(), 3U);
@@ -2763,9 +2761,9 @@ TEST_F(QueryPlannerTest, NegatedRangeIntGTE) {
TEST_F(QueryPlannerTest, TwoNegatedRanges) {
addIndex(BSON("i" << 1));
- runQuery(fromjson(
- "{$and: [{i: {$not: {$lte: 'b'}}}, "
- "{i: {$not: {$gte: 'f'}}}]}"));
+ runQuery(
+ fromjson("{$and: [{i: {$not: {$lte: 'b'}}}, "
+ "{i: {$not: {$gte: 'f'}}}]}"));
assertNumSolutions(2U);
assertSolutionExists("{cscan: {dir: 1}}");
@@ -3273,14 +3271,14 @@ TEST_F(QueryPlannerTest, IntersectCanBeVeryBig) {
addIndex(BSON("b" << 1));
addIndex(BSON("c" << 1));
addIndex(BSON("d" << 1));
- runQuery(fromjson(
- "{$or: [{ 'a' : null, 'b' : 94, 'c' : null, 'd' : null },"
- "{ 'a' : null, 'b' : 98, 'c' : null, 'd' : null },"
- "{ 'a' : null, 'b' : 1, 'c' : null, 'd' : null },"
- "{ 'a' : null, 'b' : 2, 'c' : null, 'd' : null },"
- "{ 'a' : null, 'b' : 7, 'c' : null, 'd' : null },"
- "{ 'a' : null, 'b' : 9, 'c' : null, 'd' : null },"
- "{ 'a' : null, 'b' : 16, 'c' : null, 'd' : null }]}"));
+ runQuery(
+ fromjson("{$or: [{ 'a' : null, 'b' : 94, 'c' : null, 'd' : null },"
+ "{ 'a' : null, 'b' : 98, 'c' : null, 'd' : null },"
+ "{ 'a' : null, 'b' : 1, 'c' : null, 'd' : null },"
+ "{ 'a' : null, 'b' : 2, 'c' : null, 'd' : null },"
+ "{ 'a' : null, 'b' : 7, 'c' : null, 'd' : null },"
+ "{ 'a' : null, 'b' : 9, 'c' : null, 'd' : null },"
+ "{ 'a' : null, 'b' : 16, 'c' : null, 'd' : null }]}"));
assertNumSolutions(internalQueryEnumerationMaxOrSolutions);
}
@@ -3549,13 +3547,13 @@ TEST_F(QueryPlannerTest, OrEnumerationLimit) {
// 6 $or clauses, each with 2 indexed predicates
// means 2^6 = 64 possibilities. We should hit the limit.
- runQuery(fromjson(
- "{$or: [{a: 1, b: 1},"
- "{a: 2, b: 2},"
- "{a: 3, b: 3},"
- "{a: 4, b: 4},"
- "{a: 5, b: 5},"
- "{a: 6, b: 6}]}"));
+ runQuery(
+ fromjson("{$or: [{a: 1, b: 1},"
+ "{a: 2, b: 2},"
+ "{a: 3, b: 3},"
+ "{a: 4, b: 4},"
+ "{a: 5, b: 5},"
+ "{a: 6, b: 6}]}"));
assertNumSolutions(internalQueryEnumerationMaxOrSolutions);
}
@@ -3569,10 +3567,10 @@ TEST_F(QueryPlannerTest, OrEnumerationLimit2) {
// 3 $or clauses, and a few other preds. Each $or clause can
// generate up to the max number of allowed $or enumerations.
- runQuery(fromjson(
- "{$or: [{a: 1, b: 1, c: 1, d: 1},"
- "{a: 2, b: 2, c: 2, d: 2},"
- "{a: 3, b: 3, c: 3, d: 3}]}"));
+ runQuery(
+ fromjson("{$or: [{a: 1, b: 1, c: 1, d: 1},"
+ "{a: 2, b: 2, c: 2, d: 2},"
+ "{a: 3, b: 3, c: 3, d: 3}]}"));
assertNumSolutions(internalQueryEnumerationMaxOrSolutions);
}
diff --git a/src/mongo/db/query/query_planner_test_fixture.cpp b/src/mongo/db/query/query_planner_test_fixture.cpp
index 2258a818547..aae4d95372f 100644
--- a/src/mongo/db/query/query_planner_test_fixture.cpp
+++ b/src/mongo/db/query/query_planner_test_fixture.cpp
@@ -34,10 +34,10 @@
#include <algorithm>
-#include "mongo/db/namespace_string.h"
#include "mongo/db/matcher/expression_parser.h"
-#include "mongo/db/matcher/extensions_callback_noop.h"
#include "mongo/db/matcher/extensions_callback_disallow_extensions.h"
+#include "mongo/db/matcher/extensions_callback_noop.h"
+#include "mongo/db/namespace_string.h"
#include "mongo/db/query/query_knobs.h"
#include "mongo/db/query/query_planner.h"
#include "mongo/db/query/query_planner_test_lib.h"
@@ -404,8 +404,8 @@ std::unique_ptr<MatchExpression> QueryPlannerTest::parseMatchExpression(const BS
StatusWithMatchExpression status =
MatchExpressionParser::parse(obj, ExtensionsCallbackDisallowExtensions(), collator);
if (!status.isOK()) {
- FAIL(str::stream() << "failed to parse query: " << obj.toString()
- << ". Reason: " << status.getStatus().toString());
+ FAIL(str::stream() << "failed to parse query: " << obj.toString() << ". Reason: "
+ << status.getStatus().toString());
}
return std::move(status.getValue());
}
diff --git a/src/mongo/db/query/query_planner_test_lib.cpp b/src/mongo/db/query/query_planner_test_lib.cpp
index 2e6d9c000d2..a52505acc23 100644
--- a/src/mongo/db/query/query_planner_test_lib.cpp
+++ b/src/mongo/db/query/query_planner_test_lib.cpp
@@ -32,7 +32,6 @@
#include "mongo/db/query/query_planner_test_lib.h"
-#include <ostream>
#include "mongo/db/jsobj.h"
#include "mongo/db/json.h"
#include "mongo/db/matcher/expression_parser.h"
@@ -42,6 +41,7 @@
#include "mongo/db/query/query_solution.h"
#include "mongo/unittest/unittest.h"
#include "mongo/util/assert_util.h"
+#include <ostream>
namespace {
diff --git a/src/mongo/db/query/query_planner_test_lib.h b/src/mongo/db/query/query_planner_test_lib.h
index 0a1931d1828..e8ca0e5c360 100644
--- a/src/mongo/db/query/query_planner_test_lib.h
+++ b/src/mongo/db/query/query_planner_test_lib.h
@@ -30,7 +30,6 @@
* This file contains tests for mongo/db/query/query_planner.cpp
*/
-#include <ostream>
#include "mongo/db/jsobj.h"
#include "mongo/db/json.h"
#include "mongo/db/matcher/expression_parser.h"
@@ -38,6 +37,7 @@
#include "mongo/db/query/query_solution.h"
#include "mongo/unittest/unittest.h"
#include "mongo/util/assert_util.h"
+#include <ostream>
namespace mongo {
diff --git a/src/mongo/db/query/query_planner_text_test.cpp b/src/mongo/db/query/query_planner_text_test.cpp
index 884b8d1dac9..5050653292a 100644
--- a/src/mongo/db/query/query_planner_text_test.cpp
+++ b/src/mongo/db/query/query_planner_text_test.cpp
@@ -51,7 +51,8 @@ using namespace mongo;
TEST_F(QueryPlannerTest, SimpleText) {
addIndex(BSON("_fts"
<< "text"
- << "_ftsx" << 1));
+ << "_ftsx"
+ << 1));
runQuery(fromjson("{$text: {$search: 'blah'}}"));
assertNumSolutions(1);
@@ -63,7 +64,8 @@ TEST_F(QueryPlannerTest, CantUseTextUnlessHaveTextPred) {
params.options = QueryPlannerParams::NO_TABLE_SCAN;
addIndex(BSON("a" << 1 << "_fts"
<< "text"
- << "_ftsx" << 1));
+ << "_ftsx"
+ << 1));
runQuery(fromjson("{a:1}"));
// No table scans allowed so there is no solution.
@@ -76,7 +78,8 @@ TEST_F(QueryPlannerTest, HaveOKPrefixOnTextIndex) {
params.options = QueryPlannerParams::NO_TABLE_SCAN;
addIndex(BSON("a" << 1 << "_fts"
<< "text"
- << "_ftsx" << 1));
+ << "_ftsx"
+ << 1));
runQuery(fromjson("{a:1, $text:{$search: 'blah'}}"));
assertNumSolutions(1);
@@ -95,7 +98,8 @@ TEST_F(QueryPlannerTest, HaveBadPrefixOnTextIndex) {
params.options = QueryPlannerParams::NO_TABLE_SCAN;
addIndex(BSON("a" << 1 << "_fts"
<< "text"
- << "_ftsx" << 1));
+ << "_ftsx"
+ << 1));
runInvalidQuery(fromjson("{a:{$gt: 1}, $text:{$search: 'blah'}}"));
runInvalidQuery(fromjson("{$text: {$search: 'blah'}}"));
@@ -108,7 +112,8 @@ TEST_F(QueryPlannerTest, ManyPrefixTextIndex) {
params.options = QueryPlannerParams::NO_TABLE_SCAN;
addIndex(BSON("a" << 1 << "b" << 1 << "_fts"
<< "text"
- << "_ftsx" << 1));
+ << "_ftsx"
+ << 1));
// Both points.
runQuery(fromjson("{a:1, b:1, $text:{$search: 'blah'}}"));
@@ -133,7 +138,10 @@ TEST_F(QueryPlannerTest, SuffixOptional) {
params.options = QueryPlannerParams::NO_TABLE_SCAN;
addIndex(BSON("a" << 1 << "_fts"
<< "text"
- << "_ftsx" << 1 << "b" << 1));
+ << "_ftsx"
+ << 1
+ << "b"
+ << 1));
runQuery(fromjson("{a:1, $text:{$search: 'blah'}}"));
assertNumSolutions(1);
@@ -148,7 +156,10 @@ TEST_F(QueryPlannerTest, RemoveFromSubtree) {
params.options = QueryPlannerParams::NO_TABLE_SCAN;
addIndex(BSON("a" << 1 << "_fts"
<< "text"
- << "_ftsx" << 1 << "b" << 1));
+ << "_ftsx"
+ << 1
+ << "b"
+ << 1));
runQuery(fromjson("{a:1, $or: [{a:1}, {b:7}], $text:{$search: 'blah'}}"));
assertNumSolutions(1);
@@ -164,7 +175,8 @@ TEST_F(QueryPlannerTest, CompoundPrefixEvenIfMultikey) {
params.options = QueryPlannerParams::NO_TABLE_SCAN;
addIndex(BSON("a" << 1 << "b" << 1 << "_fts"
<< "text"
- << "_ftsx" << 1),
+ << "_ftsx"
+ << 1),
true);
// Both points.
@@ -177,7 +189,10 @@ TEST_F(QueryPlannerTest, IndexOnOwnFieldButNotLeafPrefix) {
params.options = QueryPlannerParams::NO_TABLE_SCAN;
addIndex(BSON("a" << 1 << "_fts"
<< "text"
- << "_ftsx" << 1 << "b" << 1));
+ << "_ftsx"
+ << 1
+ << "b"
+ << 1));
// 'a' is not an EQ so it doesn't compound w/the text pred. We also shouldn't use the text
// index to satisfy it w/o the text query.
@@ -188,7 +203,10 @@ TEST_F(QueryPlannerTest, IndexOnOwnFieldButNotLeafSuffixNoPrefix) {
params.options = QueryPlannerParams::NO_TABLE_SCAN;
addIndex(BSON("_fts"
<< "text"
- << "_ftsx" << 1 << "b" << 1));
+ << "_ftsx"
+ << 1
+ << "b"
+ << 1));
runQuery(fromjson("{b:{$elemMatch:{$gt: 0, $lt: 2}}, $text:{$search: 'blah'}}"));
assertNumSolutions(1);
@@ -198,7 +216,8 @@ TEST_F(QueryPlannerTest, TextInsideAndWithCompoundIndex) {
params.options = QueryPlannerParams::NO_TABLE_SCAN;
addIndex(BSON("a" << 1 << "_fts"
<< "text"
- << "_ftsx" << 1));
+ << "_ftsx"
+ << 1));
runQuery(fromjson("{$and: [{a: 3}, {$text: {$search: 'foo'}}], a: 3}"));
assertNumSolutions(1U);
@@ -211,7 +230,8 @@ TEST_F(QueryPlannerTest, TextInsideAndWithCompoundIndexAndMultiplePredsOnIndexPr
params.options = QueryPlannerParams::NO_TABLE_SCAN;
addIndex(BSON("a" << 1 << "_fts"
<< "text"
- << "_ftsx" << 1));
+ << "_ftsx"
+ << 1));
runQuery(fromjson("{$and: [{a: 1}, {a: 2}, {$text: {$search: 'foo'}}]}"));
assertNumSolutions(1U);
@@ -225,7 +245,8 @@ TEST_F(QueryPlannerTest, TextInsideOrBasic) {
addIndex(BSON("a" << 1));
addIndex(BSON("_fts"
<< "text"
- << "_ftsx" << 1));
+ << "_ftsx"
+ << 1));
runQuery(fromjson("{a: 0, $or: [{_id: 1}, {$text: {$search: 'foo'}}]}"));
assertNumSolutions(1U);
@@ -241,10 +262,11 @@ TEST_F(QueryPlannerTest, TextInsideOrWithAnotherOr) {
addIndex(BSON("a" << 1));
addIndex(BSON("_fts"
<< "text"
- << "_ftsx" << 1));
- runQuery(fromjson(
- "{$and: [{$or: [{a: 3}, {a: 4}]}, "
- "{$or: [{$text: {$search: 'foo'}}, {a: 5}]}]}"));
+ << "_ftsx"
+ << 1));
+ runQuery(
+ fromjson("{$and: [{$or: [{a: 3}, {a: 4}]}, "
+ "{$or: [{$text: {$search: 'foo'}}, {a: 5}]}]}"));
assertNumSolutions(1U);
assertSolutionExists(
@@ -260,10 +282,11 @@ TEST_F(QueryPlannerTest, TextInsideOrOfAnd) {
addIndex(BSON("a" << 1));
addIndex(BSON("_fts"
<< "text"
- << "_ftsx" << 1));
- runQuery(fromjson(
- "{$or: [{a: {$gt: 1, $gt: 2}}, "
- "{a: {$gt: 3}, $text: {$search: 'foo'}}]}"));
+ << "_ftsx"
+ << 1));
+ runQuery(
+ fromjson("{$or: [{a: {$gt: 1, $gt: 2}}, "
+ "{a: {$gt: 3}, $text: {$search: 'foo'}}]}"));
assertNumSolutions(1U);
assertSolutionExists(
@@ -281,10 +304,11 @@ TEST_F(QueryPlannerTest, TextInsideAndOrAnd) {
addIndex(BSON("b" << 1));
addIndex(BSON("_fts"
<< "text"
- << "_ftsx" << 1));
- runQuery(fromjson(
- "{a: 1, $or: [{a:2}, {b:2}, "
- "{a: 1, $text: {$search: 'foo'}}]}"));
+ << "_ftsx"
+ << 1));
+ runQuery(
+ fromjson("{a: 1, $or: [{a:2}, {b:2}, "
+ "{a: 1, $text: {$search: 'foo'}}]}"));
assertNumSolutions(1U);
assertSolutionExists(
@@ -300,12 +324,13 @@ TEST_F(QueryPlannerTest, TextInsideAndOrAndOr) {
addIndex(BSON("a" << 1));
addIndex(BSON("_fts"
<< "text"
- << "_ftsx" << 1));
- runQuery(fromjson(
- "{$or: [{a: {$gt: 1, $gt: 2}}, "
- "{a: {$gt: 3}, $or: [{$text: {$search: 'foo'}}, "
- "{a: 6}]}], "
- "a: 5}"));
+ << "_ftsx"
+ << 1));
+ runQuery(
+ fromjson("{$or: [{a: {$gt: 1, $gt: 2}}, "
+ "{a: {$gt: 3}, $or: [{$text: {$search: 'foo'}}, "
+ "{a: 6}]}], "
+ "a: 5}"));
assertNumSolutions(1U);
assertSolutionExists(
@@ -323,7 +348,8 @@ TEST_F(QueryPlannerTest, TextInsideOrOneBranchNotIndexed) {
addIndex(BSON("a" << 1));
addIndex(BSON("_fts"
<< "text"
- << "_ftsx" << 1));
+ << "_ftsx"
+ << 1));
runQuery(fromjson("{a: 1, $or: [{b: 2}, {$text: {$search: 'foo'}}]}"));
assertNumSolutions(0);
@@ -336,10 +362,11 @@ TEST_F(QueryPlannerTest, TextInsideOrWithAnotherUnindexableOr) {
addIndex(BSON("a" << 1));
addIndex(BSON("_fts"
<< "text"
- << "_ftsx" << 1));
- runQuery(fromjson(
- "{$and: [{$or: [{a: 1}, {b: 1}]}, "
- "{$or: [{a: 2}, {$text: {$search: 'foo'}}]}]}"));
+ << "_ftsx"
+ << 1));
+ runQuery(
+ fromjson("{$and: [{$or: [{a: 1}, {b: 1}]}, "
+ "{$or: [{a: 2}, {$text: {$search: 'foo'}}]}]}"));
assertNumSolutions(1U);
assertSolutionExists(
@@ -351,10 +378,11 @@ TEST_F(QueryPlannerTest, TextInsideOrWithAnotherUnindexableOr) {
TEST_F(QueryPlannerTest, AndTextWithGeoNonNear) {
addIndex(BSON("_fts"
<< "text"
- << "_ftsx" << 1));
- runQuery(fromjson(
- "{$text: {$search: 'foo'}, a: {$geoIntersects: {$geometry: "
- "{type: 'Point', coordinates: [3.0, 1.0]}}}}"));
+ << "_ftsx"
+ << 1));
+ runQuery(
+ fromjson("{$text: {$search: 'foo'}, a: {$geoIntersects: {$geometry: "
+ "{type: 'Point', coordinates: [3.0, 1.0]}}}}"));
// Mandatory text index is used, and geo predicate becomes a filter.
assertNumSolutions(1U);
@@ -365,7 +393,8 @@ TEST_F(QueryPlannerTest, AndTextWithGeoNonNear) {
TEST_F(QueryPlannerTest, OrTextExact) {
addIndex(BSON("pre" << 1 << "_fts"
<< "text"
- << "_ftsx" << 1));
+ << "_ftsx"
+ << 1));
addIndex(BSON("other" << 1));
runQuery(fromjson("{$or: [{$text: {$search: 'dave'}, pre: 3}, {other: 2}]}"));
@@ -380,7 +409,8 @@ TEST_F(QueryPlannerTest, OrTextExact) {
TEST_F(QueryPlannerTest, OrTextInexactCovered) {
addIndex(BSON("pre" << 1 << "_fts"
<< "text"
- << "_ftsx" << 1));
+ << "_ftsx"
+ << 1));
addIndex(BSON("other" << 1));
runQuery(fromjson("{$or: [{$text: {$search: 'dave'}, pre: 3}, {other: /bar/}]}"));
@@ -395,7 +425,8 @@ TEST_F(QueryPlannerTest, OrTextInexactCovered) {
TEST_F(QueryPlannerTest, TextCaseSensitive) {
addIndex(BSON("_fts"
<< "text"
- << "_ftsx" << 1));
+ << "_ftsx"
+ << 1));
runQuery(fromjson("{$text: {$search: 'blah', $caseSensitive: true}}"));
assertNumSolutions(1);
@@ -405,7 +436,8 @@ TEST_F(QueryPlannerTest, TextCaseSensitive) {
TEST_F(QueryPlannerTest, TextDiacriticSensitive) {
addIndex(BSON("_fts"
<< "text"
- << "_ftsx" << 1));
+ << "_ftsx"
+ << 1));
runQuery(fromjson("{$text: {$search: 'blah', $diacriticSensitive: true}}"));
assertNumSolutions(1);
@@ -415,7 +447,8 @@ TEST_F(QueryPlannerTest, TextDiacriticSensitive) {
TEST_F(QueryPlannerTest, SortKeyMetaProjectionWithTextScoreMetaSort) {
addIndex(BSON("_fts"
<< "text"
- << "_ftsx" << 1));
+ << "_ftsx"
+ << 1));
runQuerySortProj(fromjson("{$text: {$search: 'foo'}}"),
fromjson("{a: {$meta: 'textScore'}}"),
diff --git a/src/mongo/db/query/query_solution.h b/src/mongo/db/query/query_solution.h
index f64e5d263dc..0a63f8a10e0 100644
--- a/src/mongo/db/query/query_solution.h
+++ b/src/mongo/db/query/query_solution.h
@@ -30,9 +30,9 @@
#include <memory>
+#include "mongo/db/fts/fts_query.h"
#include "mongo/db/jsobj.h"
#include "mongo/db/matcher/expression.h"
-#include "mongo/db/fts/fts_query.h"
#include "mongo/db/query/index_bounds.h"
#include "mongo/db/query/plan_cache.h"
#include "mongo/db/query/stage_types.h"
diff --git a/src/mongo/db/query/query_solution_test.cpp b/src/mongo/db/query/query_solution_test.cpp
index ac1b6abbac1..e5d01e4a091 100644
--- a/src/mongo/db/query/query_solution_test.cpp
+++ b/src/mongo/db/query/query_solution_test.cpp
@@ -259,8 +259,10 @@ std::unique_ptr<ParsedProjection> createParsedProjection(const BSONObj& query,
Status status = ParsedProjection::make(
projObj, queryMatchExpr.getValue().get(), &out, ExtensionsCallbackDisallowExtensions());
if (!status.isOK()) {
- FAIL(mongoutils::str::stream() << "failed to parse projection " << projObj
- << " (query: " << query << "): " << status.toString());
+ FAIL(mongoutils::str::stream() << "failed to parse projection " << projObj << " (query: "
+ << query
+ << "): "
+ << status.toString());
}
ASSERT(out);
return std::unique_ptr<ParsedProjection>(out);
diff --git a/src/mongo/db/query/stage_builder.cpp b/src/mongo/db/query/stage_builder.cpp
index 74e5c58d4e7..2701dc420ec 100644
--- a/src/mongo/db/query/stage_builder.cpp
+++ b/src/mongo/db/query/stage_builder.cpp
@@ -32,6 +32,8 @@
#include "mongo/db/query/stage_builder.h"
+#include "mongo/db/catalog/collection.h"
+#include "mongo/db/catalog/database.h"
#include "mongo/db/client.h"
#include "mongo/db/exec/and_hash.h"
#include "mongo/db/exec/and_sorted.h"
@@ -48,14 +50,12 @@
#include "mongo/db/exec/or.h"
#include "mongo/db/exec/projection.h"
#include "mongo/db/exec/shard_filter.h"
+#include "mongo/db/exec/skip.h"
#include "mongo/db/exec/sort.h"
#include "mongo/db/exec/sort_key_generator.h"
-#include "mongo/db/exec/skip.h"
#include "mongo/db/exec/text.h"
#include "mongo/db/index/fts_access_method.h"
#include "mongo/db/matcher/extensions_callback_real.h"
-#include "mongo/db/catalog/collection.h"
-#include "mongo/db/catalog/database.h"
#include "mongo/db/s/sharding_state.h"
#include "mongo/stdx/memory.h"
#include "mongo/util/log.h"
diff --git a/src/mongo/db/range_arithmetic.h b/src/mongo/db/range_arithmetic.h
index 0032fc5b996..09682ada033 100644
--- a/src/mongo/db/range_arithmetic.h
+++ b/src/mongo/db/range_arithmetic.h
@@ -28,8 +28,8 @@
#pragma once
-#include <string>
#include <map>
+#include <string>
#include <vector>
#include "mongo/db/jsobj.h"
diff --git a/src/mongo/db/range_deleter.cpp b/src/mongo/db/range_deleter.cpp
index 8d05a926991..666c8ee45a9 100644
--- a/src/mongo/db/range_deleter.cpp
+++ b/src/mongo/db/range_deleter.cpp
@@ -36,8 +36,8 @@
#include <memory>
#include "mongo/db/client.h"
-#include "mongo/db/service_context.h"
#include "mongo/db/repl/replication_coordinator_global.h"
+#include "mongo/db/service_context.h"
#include "mongo/db/write_concern_options.h"
#include "mongo/util/concurrency/synchronization.h"
#include "mongo/util/exit.h"
diff --git a/src/mongo/db/repair_database.cpp b/src/mongo/db/repair_database.cpp
index 9039f488f95..8e726434f7c 100644
--- a/src/mongo/db/repair_database.cpp
+++ b/src/mongo/db/repair_database.cpp
@@ -36,10 +36,10 @@
#include "mongo/base/string_data.h"
#include "mongo/bson/bson_validate.h"
#include "mongo/db/background.h"
-#include "mongo/db/catalog/collection_catalog_entry.h"
#include "mongo/db/catalog/collection.h"
-#include "mongo/db/catalog/database_catalog_entry.h"
+#include "mongo/db/catalog/collection_catalog_entry.h"
#include "mongo/db/catalog/database.h"
+#include "mongo/db/catalog/database_catalog_entry.h"
#include "mongo/db/catalog/database_holder.h"
#include "mongo/db/catalog/document_validation.h"
#include "mongo/db/catalog/index_create.h"
@@ -77,7 +77,10 @@ Status rebuildIndexesOnCollection(OperationContext* txn,
return Status(
ErrorCodes::CannotCreateIndex,
str::stream()
- << "Cannot rebuild index " << spec << ": " << keyStatus.reason()
+ << "Cannot rebuild index "
+ << spec
+ << ": "
+ << keyStatus.reason()
<< " For more info see http://dochub.mongodb.org/core/index-validation");
}
}
@@ -194,8 +197,8 @@ Status repairDatabase(OperationContext* txn,
if (engine->isMmapV1()) {
// MMAPv1 is a layering violation so it implements its own repairDatabase.
- return static_cast<MMAPV1Engine*>(engine)
- ->repairDatabase(txn, dbName, preserveClonedFilesOnFailure, backupOriginalFiles);
+ return static_cast<MMAPV1Engine*>(engine)->repairDatabase(
+ txn, dbName, preserveClonedFilesOnFailure, backupOriginalFiles);
}
// These are MMAPv1 specific
diff --git a/src/mongo/db/repl/applier_test.cpp b/src/mongo/db/repl/applier_test.cpp
index 2b49a92dc19..8ef0bf4db30 100644
--- a/src/mongo/db/repl/applier_test.cpp
+++ b/src/mongo/db/repl/applier_test.cpp
@@ -304,13 +304,16 @@ TEST_F(ApplierTest, ApplyOperationSuccessful) {
Applier::Operations operationsToApply{
OplogEntry(BSON("op"
<< "a"
- << "ts" << Timestamp(Seconds(123), 0))),
+ << "ts"
+ << Timestamp(Seconds(123), 0))),
OplogEntry(BSON("op"
<< "b"
- << "ts" << Timestamp(Seconds(456), 0))),
+ << "ts"
+ << Timestamp(Seconds(456), 0))),
OplogEntry(BSON("op"
<< "c"
- << "ts" << Timestamp(Seconds(789), 0))),
+ << "ts"
+ << Timestamp(Seconds(789), 0))),
};
stdx::mutex mutex;
StatusWith<Timestamp> result = getDetectableErrorStatus();
@@ -352,13 +355,16 @@ void ApplierTest::_testApplyOperationFailed(size_t opIndex, stdx::function<Statu
Applier::Operations operationsToApply{
OplogEntry(BSON("op"
<< "a"
- << "ts" << Timestamp(Seconds(123), 0))),
+ << "ts"
+ << Timestamp(Seconds(123), 0))),
OplogEntry(BSON("op"
<< "b"
- << "ts" << Timestamp(Seconds(456), 0))),
+ << "ts"
+ << Timestamp(Seconds(456), 0))),
OplogEntry(BSON("op"
<< "c"
- << "ts" << Timestamp(Seconds(789), 0))),
+ << "ts"
+ << Timestamp(Seconds(789), 0))),
};
stdx::mutex mutex;
StatusWith<Timestamp> result = getDetectableErrorStatus();
@@ -403,12 +409,11 @@ TEST_F(ApplierTest, ApplyOperationFailedOnFirstOperation) {
}
TEST_F(ApplierTest, ApplyOperationThrowsExceptionOnFirstOperation) {
- _testApplyOperationFailed(0U,
- []() {
- uasserted(ErrorCodes::OperationFailed, "");
- MONGO_UNREACHABLE;
- return Status(ErrorCodes::InternalError, "unreachable");
- });
+ _testApplyOperationFailed(0U, []() {
+ uasserted(ErrorCodes::OperationFailed, "");
+ MONGO_UNREACHABLE;
+ return Status(ErrorCodes::InternalError, "unreachable");
+ });
}
TEST_F(ApplierTest, ApplyOperationFailedOnSecondOperation) {
@@ -416,12 +421,11 @@ TEST_F(ApplierTest, ApplyOperationFailedOnSecondOperation) {
}
TEST_F(ApplierTest, ApplyOperationThrowsExceptionOnSecondOperation) {
- _testApplyOperationFailed(1U,
- []() {
- uasserted(ErrorCodes::OperationFailed, "");
- MONGO_UNREACHABLE;
- return Status(ErrorCodes::InternalError, "unreachable");
- });
+ _testApplyOperationFailed(1U, []() {
+ uasserted(ErrorCodes::OperationFailed, "");
+ MONGO_UNREACHABLE;
+ return Status(ErrorCodes::InternalError, "unreachable");
+ });
}
TEST_F(ApplierTest, ApplyOperationFailedOnLastOperation) {
@@ -429,12 +433,11 @@ TEST_F(ApplierTest, ApplyOperationFailedOnLastOperation) {
}
TEST_F(ApplierTest, ApplyOperationThrowsExceptionOnLastOperation) {
- _testApplyOperationFailed(2U,
- []() {
- uasserted(ErrorCodes::OperationFailed, "");
- MONGO_UNREACHABLE;
- return Status(ErrorCodes::InternalError, "unreachable");
- });
+ _testApplyOperationFailed(2U, []() {
+ uasserted(ErrorCodes::OperationFailed, "");
+ MONGO_UNREACHABLE;
+ return Status(ErrorCodes::InternalError, "unreachable");
+ });
}
class ApplyUntilAndPauseTest : public ApplierTest {};
@@ -454,8 +457,8 @@ TEST_F(ApplyUntilAndPauseTest, NoOperationsInRange) {
auto result = applyUntilAndPause(
&getReplExecutor(),
{
- OplogEntry(BSON("ts" << Timestamp(Seconds(456), 0))),
- OplogEntry(BSON("ts" << Timestamp(Seconds(789), 0))),
+ OplogEntry(BSON("ts" << Timestamp(Seconds(456), 0))),
+ OplogEntry(BSON("ts" << Timestamp(Seconds(789), 0))),
},
[](OperationContext* txn, const OplogEntry& operation) { return Status::OK(); },
Timestamp(Seconds(123), 0),
@@ -594,13 +597,16 @@ void _testApplyUntilAndPauseDiscardOperations(ReplicationExecutor* executor,
Applier::Operations operationsToApply{
OplogEntry(BSON("op"
<< "a"
- << "ts" << Timestamp(Seconds(123), 0))),
+ << "ts"
+ << Timestamp(Seconds(123), 0))),
OplogEntry(BSON("op"
<< "b"
- << "ts" << Timestamp(Seconds(456), 0))),
+ << "ts"
+ << Timestamp(Seconds(456), 0))),
OplogEntry(BSON("op"
<< "c"
- << "ts" << Timestamp(Seconds(789), 0))),
+ << "ts"
+ << Timestamp(Seconds(789), 0))),
};
stdx::mutex mutex;
StatusWith<Timestamp> completionResult = ApplyUntilAndPauseTest::getDetectableErrorStatus();
diff --git a/src/mongo/db/repl/base_cloner_test_fixture.cpp b/src/mongo/db/repl/base_cloner_test_fixture.cpp
index ea57b00133a..90c845c62f0 100644
--- a/src/mongo/db/repl/base_cloner_test_fixture.cpp
+++ b/src/mongo/db/repl/base_cloner_test_fixture.cpp
@@ -32,8 +32,8 @@
#include <memory>
-#include "mongo/stdx/thread.h"
#include "mongo/db/jsobj.h"
+#include "mongo/stdx/thread.h"
namespace mongo {
namespace repl {
@@ -45,7 +45,8 @@ const HostAndPort BaseClonerTest::target("localhost", -1);
const NamespaceString BaseClonerTest::nss("db.coll");
const BSONObj BaseClonerTest::idIndexSpec = BSON("v" << 1 << "key" << BSON("_id" << 1) << "name"
<< "_id_"
- << "ns" << nss.ns());
+ << "ns"
+ << nss.ns());
// static
BSONObj BaseClonerTest::createCursorResponse(CursorId cursorId,
diff --git a/src/mongo/db/repl/base_cloner_test_fixture.h b/src/mongo/db/repl/base_cloner_test_fixture.h
index cab5c517916..1451adb4960 100644
--- a/src/mongo/db/repl/base_cloner_test_fixture.h
+++ b/src/mongo/db/repl/base_cloner_test_fixture.h
@@ -38,8 +38,8 @@
#include "mongo/db/repl/collection_cloner.h"
#include "mongo/db/repl/replication_executor_test_fixture.h"
#include "mongo/executor/network_interface_mock.h"
-#include "mongo/stdx/mutex.h"
#include "mongo/stdx/condition_variable.h"
+#include "mongo/stdx/mutex.h"
#include "mongo/util/net/hostandport.h"
namespace mongo {
diff --git a/src/mongo/db/repl/bgsync.cpp b/src/mongo/db/repl/bgsync.cpp
index c5a5b08a0e7..fb6e689a019 100644
--- a/src/mongo/db/repl/bgsync.cpp
+++ b/src/mongo/db/repl/bgsync.cpp
@@ -304,8 +304,8 @@ void BackgroundSync::_produce(
log() << "Our newest OpTime : " << lastOpTimeFetched;
log() << "Earliest OpTime available is " << syncSourceResp.earliestOpTimeSeen;
log() << "See http://dochub.mongodb.org/core/resyncingaverystalereplicasetmember";
- StorageInterface::get(txn)
- ->setMinValid(txn, {lastOpTimeFetched, syncSourceResp.earliestOpTimeSeen});
+ StorageInterface::get(txn)->setMinValid(
+ txn, {lastOpTimeFetched, syncSourceResp.earliestOpTimeSeen});
auto status = _replCoord->setMaintenanceMode(true);
if (!status.isOK()) {
warning() << "Failed to transition into maintenance mode.";
@@ -439,10 +439,11 @@ void BackgroundSync::_produce(
if (!boundaries.start.isNull() || boundaries.end > lastApplied) {
fassertNoTrace(18750,
Status(ErrorCodes::UnrecoverableRollbackError,
- str::stream()
- << "need to rollback, but in inconsistent state. "
- << "minvalid: " << boundaries.end.toString()
- << " > our last optime: " << lastApplied.toString()));
+ str::stream() << "need to rollback, but in inconsistent state. "
+ << "minvalid: "
+ << boundaries.end.toString()
+ << " > our last optime: "
+ << lastApplied.toString()));
}
_rollback(txn, source, getConnection);
diff --git a/src/mongo/db/repl/check_quorum_for_config_change_test.cpp b/src/mongo/db/repl/check_quorum_for_config_change_test.cpp
index b8da58e4372..86527def28a 100644
--- a/src/mongo/db/repl/check_quorum_for_config_change_test.cpp
+++ b/src/mongo/db/repl/check_quorum_for_config_change_test.cpp
@@ -46,18 +46,18 @@
#include "mongo/unittest/unittest.h"
#include "mongo/util/net/hostandport.h"
-#define ASSERT_REASON_CONTAINS(STATUS, PATTERN) \
- do { \
- const mongo::Status s_ = (STATUS); \
- ASSERT_FALSE(s_.reason().find(PATTERN) == std::string::npos) \
- << #STATUS ".reason() == " << s_.reason(); \
+#define ASSERT_REASON_CONTAINS(STATUS, PATTERN) \
+ do { \
+ const mongo::Status s_ = (STATUS); \
+ ASSERT_FALSE(s_.reason().find(PATTERN) == std::string::npos) << #STATUS ".reason() == " \
+ << s_.reason(); \
} while (false)
-#define ASSERT_NOT_REASON_CONTAINS(STATUS, PATTERN) \
- do { \
- const mongo::Status s_ = (STATUS); \
- ASSERT_TRUE(s_.reason().find(PATTERN) == std::string::npos) \
- << #STATUS ".reason() == " << s_.reason(); \
+#define ASSERT_NOT_REASON_CONTAINS(STATUS, PATTERN) \
+ do { \
+ const mongo::Status s_ = (STATUS); \
+ ASSERT_TRUE(s_.reason().find(PATTERN) == std::string::npos) << #STATUS ".reason() == " \
+ << s_.reason(); \
} while (false)
namespace mongo {
@@ -155,7 +155,9 @@ ReplicaSetConfig assertMakeRSConfig(const BSONObj& configBson) {
TEST_F(CheckQuorumForInitiate, ValidSingleNodeSet) {
ReplicaSetConfig config = assertMakeRSConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1"))));
startQuorumCheck(config, 0);
@@ -166,7 +168,9 @@ TEST_F(CheckQuorumForInitiate, QuorumCheckCanceledByShutdown) {
_executor->shutdown();
ReplicaSetConfig config = assertMakeRSConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1"))));
startQuorumCheck(config, 0);
@@ -177,18 +181,21 @@ TEST_F(CheckQuorumForInitiate, QuorumCheckFailedDueToSeveralDownNodes) {
// In this test, "we" are host "h3:1". All other nodes time out on
// their heartbeat request, and so the quorum check for initiate
// will fail because some members were unavailable.
- ReplicaSetConfig config =
- assertMakeRSConfig(BSON("_id"
- << "rs0"
- << "version" << 1 << "members"
- << BSON_ARRAY(BSON("_id" << 1 << "host"
- << "h1:1")
- << BSON("_id" << 2 << "host"
- << "h2:1") << BSON("_id" << 3 << "host"
- << "h3:1")
- << BSON("_id" << 4 << "host"
- << "h4:1") << BSON("_id" << 5 << "host"
- << "h5:1"))));
+ ReplicaSetConfig config = assertMakeRSConfig(BSON("_id"
+ << "rs0"
+ << "version"
+ << 1
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h1:1")
+ << BSON("_id" << 2 << "host"
+ << "h2:1")
+ << BSON("_id" << 3 << "host"
+ << "h3:1")
+ << BSON("_id" << 4 << "host"
+ << "h4:1")
+ << BSON("_id" << 5 << "host"
+ << "h5:1"))));
startQuorumCheck(config, 2);
_net->enterNetwork();
const Date_t startDate = _net->now();
@@ -231,15 +238,19 @@ TEST_F(CheckQuorumForInitiate, QuorumCheckSuccessForFiveNodes) {
const ReplicaSetConfig rsConfig =
assertMakeRSConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1:1")
<< BSON("_id" << 2 << "host"
- << "h2:1") << BSON("_id" << 3 << "host"
- << "h3:1")
+ << "h2:1")
+ << BSON("_id" << 3 << "host"
+ << "h3:1")
<< BSON("_id" << 4 << "host"
- << "h4:1") << BSON("_id" << 5 << "host"
- << "h5:1"))));
+ << "h4:1")
+ << BSON("_id" << 5 << "host"
+ << "h5:1"))));
const int myConfigIndex = 2;
const BSONObj hbRequest = makeHeartbeatRequest(rsConfig, myConfigIndex);
@@ -274,18 +285,25 @@ TEST_F(CheckQuorumForInitiate, QuorumCheckFailedDueToOneDownNode) {
const ReplicaSetConfig rsConfig =
assertMakeRSConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1:1")
<< BSON("_id" << 2 << "host"
<< "h2:1"
- << "priority" << 0 << "votes" << 0)
+ << "priority"
+ << 0
+ << "votes"
+ << 0)
<< BSON("_id" << 3 << "host"
- << "h3:1") << BSON("_id" << 4 << "host"
- << "h4:1")
+ << "h3:1")
+ << BSON("_id" << 4 << "host"
+ << "h4:1")
<< BSON("_id" << 5 << "host"
- << "h5:1") << BSON("_id" << 6 << "host"
- << "h6:1"))));
+ << "h5:1")
+ << BSON("_id" << 6 << "host"
+ << "h6:1"))));
const int myConfigIndex = 2;
const BSONObj hbRequest = makeHeartbeatRequest(rsConfig, myConfigIndex);
@@ -334,15 +352,19 @@ TEST_F(CheckQuorumForInitiate, QuorumCheckFailedDueToSetNameMismatch) {
const ReplicaSetConfig rsConfig =
assertMakeRSConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1:1")
<< BSON("_id" << 2 << "host"
- << "h2:1") << BSON("_id" << 3 << "host"
- << "h3:1")
+ << "h2:1")
+ << BSON("_id" << 3 << "host"
+ << "h3:1")
<< BSON("_id" << 4 << "host"
- << "h4:1") << BSON("_id" << 5 << "host"
- << "h5:1"))));
+ << "h4:1")
+ << BSON("_id" << 5 << "host"
+ << "h5:1"))));
const int myConfigIndex = 2;
const BSONObj hbRequest = makeHeartbeatRequest(rsConfig, myConfigIndex);
@@ -392,16 +414,21 @@ TEST_F(CheckQuorumForInitiate, QuorumCheckFailedDueToSetIdMismatch) {
const ReplicaSetConfig rsConfig =
assertMakeRSConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1:1")
<< BSON("_id" << 2 << "host"
- << "h2:1") << BSON("_id" << 3 << "host"
- << "h3:1")
+ << "h2:1")
+ << BSON("_id" << 3 << "host"
+ << "h3:1")
<< BSON("_id" << 4 << "host"
- << "h4:1") << BSON("_id" << 5 << "host"
- << "h5:1"))
- << "settings" << BSON("replicaSetId" << replicaSetId)));
+ << "h4:1")
+ << BSON("_id" << 5 << "host"
+ << "h5:1"))
+ << "settings"
+ << BSON("replicaSetId" << replicaSetId)));
const int myConfigIndex = 2;
const BSONObj hbRequest = makeHeartbeatRequest(rsConfig, myConfigIndex);
@@ -449,8 +476,10 @@ TEST_F(CheckQuorumForInitiate, QuorumCheckFailedDueToSetIdMismatch) {
ASSERT_EQUALS(ErrorCodes::NewReplicaSetConfigurationIncompatible, status);
ASSERT_REASON_CONTAINS(status,
str::stream() << "Our replica set ID of " << replicaSetId
- << " did not match that of " << incompatibleHost.toString()
- << ", which is " << unexpectedId);
+ << " did not match that of "
+ << incompatibleHost.toString()
+ << ", which is "
+ << unexpectedId);
ASSERT_NOT_REASON_CONTAINS(status, "h1:1");
ASSERT_NOT_REASON_CONTAINS(status, "h2:1");
ASSERT_NOT_REASON_CONTAINS(status, "h3:1");
@@ -466,15 +495,19 @@ TEST_F(CheckQuorumForInitiate, QuorumCheckFailedDueToInitializedNode) {
const ReplicaSetConfig rsConfig =
assertMakeRSConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1:1")
<< BSON("_id" << 2 << "host"
- << "h2:1") << BSON("_id" << 3 << "host"
- << "h3:1")
+ << "h2:1")
+ << BSON("_id" << 3 << "host"
+ << "h3:1")
<< BSON("_id" << 4 << "host"
- << "h4:1") << BSON("_id" << 5 << "host"
- << "h5:1"))));
+ << "h4:1")
+ << BSON("_id" << 5 << "host"
+ << "h5:1"))));
const int myConfigIndex = 2;
const BSONObj hbRequest = makeHeartbeatRequest(rsConfig, myConfigIndex);
@@ -495,7 +528,8 @@ TEST_F(CheckQuorumForInitiate, QuorumCheckFailedDueToInitializedNode) {
startDate + Milliseconds(10),
ResponseStatus(RemoteCommandResponse(BSON("ok" << 0 << "set"
<< "rs0"
- << "v" << 1),
+ << "v"
+ << 1),
BSONObj(),
Milliseconds(8))));
} else {
@@ -527,15 +561,19 @@ TEST_F(CheckQuorumForInitiate, QuorumCheckFailedDueToInitializedNodeOnlyOneRespo
const ReplicaSetConfig rsConfig =
assertMakeRSConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1:1")
<< BSON("_id" << 2 << "host"
- << "h2:1") << BSON("_id" << 3 << "host"
- << "h3:1")
+ << "h2:1")
+ << BSON("_id" << 3 << "host"
+ << "h3:1")
<< BSON("_id" << 4 << "host"
- << "h4:1") << BSON("_id" << 5 << "host"
- << "h5:1"))));
+ << "h4:1")
+ << BSON("_id" << 5 << "host"
+ << "h5:1"))));
const int myConfigIndex = 2;
const BSONObj hbRequest = makeHeartbeatRequest(rsConfig, myConfigIndex);
@@ -556,7 +594,8 @@ TEST_F(CheckQuorumForInitiate, QuorumCheckFailedDueToInitializedNodeOnlyOneRespo
startDate + Milliseconds(10),
ResponseStatus(RemoteCommandResponse(BSON("ok" << 0 << "set"
<< "rs0"
- << "v" << 1),
+ << "v"
+ << 1),
BSONObj(),
Milliseconds(8))));
} else {
@@ -583,15 +622,19 @@ TEST_F(CheckQuorumForInitiate, QuorumCheckFailedDueToNodeWithData) {
const ReplicaSetConfig rsConfig =
assertMakeRSConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1:1")
<< BSON("_id" << 2 << "host"
- << "h2:1") << BSON("_id" << 3 << "host"
- << "h3:1")
+ << "h2:1")
+ << BSON("_id" << 3 << "host"
+ << "h3:1")
<< BSON("_id" << 4 << "host"
- << "h4:1") << BSON("_id" << 5 << "host"
- << "h5:1"))));
+ << "h4:1")
+ << BSON("_id" << 5 << "host"
+ << "h5:1"))));
const int myConfigIndex = 2;
const BSONObj hbRequest = makeHeartbeatRequest(rsConfig, myConfigIndex);
@@ -637,12 +680,15 @@ TEST_F(CheckQuorumForReconfig, QuorumCheckVetoedDueToHigherConfigVersion) {
const ReplicaSetConfig rsConfig =
assertMakeRSConfig(BSON("_id"
<< "rs0"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1:1")
<< BSON("_id" << 2 << "host"
- << "h2:1") << BSON("_id" << 3 << "host"
- << "h3:1"))));
+ << "h2:1")
+ << BSON("_id" << 3 << "host"
+ << "h3:1"))));
const int myConfigIndex = 2;
const BSONObj hbRequest = makeHeartbeatRequest(rsConfig, myConfigIndex);
@@ -663,7 +709,8 @@ TEST_F(CheckQuorumForReconfig, QuorumCheckVetoedDueToHigherConfigVersion) {
startDate + Milliseconds(10),
ResponseStatus(RemoteCommandResponse(BSON("ok" << 0 << "set"
<< "rs0"
- << "v" << 5),
+ << "v"
+ << 5),
BSONObj(),
Milliseconds(8))));
} else {
@@ -688,12 +735,15 @@ TEST_F(CheckQuorumForReconfig, QuorumCheckVetoedDueToIncompatibleSetName) {
const ReplicaSetConfig rsConfig =
assertMakeRSConfig(BSON("_id"
<< "rs0"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1:1")
<< BSON("_id" << 2 << "host"
- << "h2:1") << BSON("_id" << 3 << "host"
- << "h3:1"))));
+ << "h2:1")
+ << BSON("_id" << 3 << "host"
+ << "h3:1"))));
const int myConfigIndex = 2;
const BSONObj hbRequest = makeHeartbeatRequest(rsConfig, myConfigIndex);
@@ -740,18 +790,27 @@ TEST_F(CheckQuorumForReconfig, QuorumCheckFailsDueToInsufficientVoters) {
const ReplicaSetConfig rsConfig =
assertMakeRSConfig(BSON("_id"
<< "rs0"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1:1")
<< BSON("_id" << 2 << "host"
- << "h2:1") << BSON("_id" << 3 << "host"
- << "h3:1")
+ << "h2:1")
+ << BSON("_id" << 3 << "host"
+ << "h3:1")
<< BSON("_id" << 4 << "host"
<< "h4:1"
- << "votes" << 0 << "priority" << 0)
+ << "votes"
+ << 0
+ << "priority"
+ << 0)
<< BSON("_id" << 5 << "host"
<< "h5:1"
- << "votes" << 0 << "priority" << 0))));
+ << "votes"
+ << 0
+ << "priority"
+ << 0))));
const int myConfigIndex = 3;
const BSONObj hbRequest = makeHeartbeatRequest(rsConfig, myConfigIndex);
@@ -797,18 +856,23 @@ TEST_F(CheckQuorumForReconfig, QuorumCheckFailsDueToNoElectableNodeResponding) {
const ReplicaSetConfig rsConfig =
assertMakeRSConfig(BSON("_id"
<< "rs0"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1:1")
<< BSON("_id" << 2 << "host"
- << "h2:1") << BSON("_id" << 3 << "host"
- << "h3:1")
+ << "h2:1")
+ << BSON("_id" << 3 << "host"
+ << "h3:1")
<< BSON("_id" << 4 << "host"
<< "h4:1"
- << "priority" << 0)
+ << "priority"
+ << 0)
<< BSON("_id" << 5 << "host"
<< "h5:1"
- << "priority" << 0))));
+ << "priority"
+ << 0))));
const int myConfigIndex = 3;
const BSONObj hbRequest = makeHeartbeatRequest(rsConfig, myConfigIndex);
@@ -850,18 +914,27 @@ TEST_F(CheckQuorumForReconfig, QuorumCheckSucceedsWithAsSoonAsPossible) {
const ReplicaSetConfig rsConfig =
assertMakeRSConfig(BSON("_id"
<< "rs0"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1:1")
<< BSON("_id" << 2 << "host"
- << "h2:1") << BSON("_id" << 3 << "host"
- << "h3:1")
+ << "h2:1")
+ << BSON("_id" << 3 << "host"
+ << "h3:1")
<< BSON("_id" << 4 << "host"
<< "h4:1"
- << "votes" << 0 << "priority" << 0)
+ << "votes"
+ << 0
+ << "priority"
+ << 0)
<< BSON("_id" << 5 << "host"
<< "h5:1"
- << "votes" << 0 << "priority" << 0))));
+ << "votes"
+ << 0
+ << "priority"
+ << 0))));
const int myConfigIndex = 3;
const BSONObj hbRequest = makeHeartbeatRequest(rsConfig, myConfigIndex);
diff --git a/src/mongo/db/repl/collection_cloner.cpp b/src/mongo/db/repl/collection_cloner.cpp
index 95c2fd66baf..ceae2886956 100644
--- a/src/mongo/db/repl/collection_cloner.cpp
+++ b/src/mongo/db/repl/collection_cloner.cpp
@@ -107,8 +107,8 @@ std::string CollectionCloner::getDiagnosticString() const {
output << " active: " << _active;
output << " listIndexes fetcher: " << _listIndexesFetcher.getDiagnosticString();
output << " find fetcher: " << _findFetcher.getDiagnosticString();
- output << " database worked callback handle: " << (_dbWorkCallbackHandle.isValid() ? "valid"
- : "invalid");
+ output << " database worked callback handle: "
+ << (_dbWorkCallbackHandle.isValid() ? "valid" : "invalid");
return output;
}
diff --git a/src/mongo/db/repl/collection_cloner.h b/src/mongo/db/repl/collection_cloner.h
index cf69d7f44ef..a7d9000bbfb 100644
--- a/src/mongo/db/repl/collection_cloner.h
+++ b/src/mongo/db/repl/collection_cloner.h
@@ -40,8 +40,8 @@
#include "mongo/db/namespace_string.h"
#include "mongo/db/repl/base_cloner.h"
#include "mongo/db/repl/replication_executor.h"
-#include "mongo/stdx/functional.h"
#include "mongo/stdx/condition_variable.h"
+#include "mongo/stdx/functional.h"
#include "mongo/stdx/mutex.h"
#include "mongo/util/net/hostandport.h"
diff --git a/src/mongo/db/repl/collection_cloner_test.cpp b/src/mongo/db/repl/collection_cloner_test.cpp
index 5ac2c71c992..c0320dc16b6 100644
--- a/src/mongo/db/repl/collection_cloner_test.cpp
+++ b/src/mongo/db/repl/collection_cloner_test.cpp
@@ -140,7 +140,8 @@ TEST_F(CollectionClonerTest, RemoteCollectionMissing) {
processNetworkResponse(BSON("ok" << 0 << "errmsg"
<< ""
- << "code" << ErrorCodes::NamespaceNotFound));
+ << "code"
+ << ErrorCodes::NamespaceNotFound));
ASSERT_EQUALS(ErrorCodes::NamespaceNotFound, getStatus().code());
ASSERT_FALSE(collectionCloner->isActive());
@@ -238,10 +239,12 @@ TEST_F(CollectionClonerTest, BeginCollection) {
const std::vector<BSONObj> specs = {idIndexSpec,
BSON("v" << 1 << "key" << BSON("a" << 1) << "name"
<< "a_1"
- << "ns" << nss.ns()),
+ << "ns"
+ << nss.ns()),
BSON("v" << 1 << "key" << BSON("b" << 1) << "name"
<< "b_1"
- << "ns" << nss.ns())};
+ << "ns"
+ << nss.ns())};
processNetworkResponse(createListIndexesResponse(1, BSON_ARRAY(specs[0] << specs[1])));
@@ -329,7 +332,8 @@ TEST_F(CollectionClonerTest, FindCommandFailed) {
processNetworkResponse(BSON("ok" << 0 << "errmsg"
<< ""
- << "code" << ErrorCodes::CursorNotFound));
+ << "code"
+ << ErrorCodes::CursorNotFound));
ASSERT_EQUALS(ErrorCodes::CursorNotFound, getStatus().code());
ASSERT_FALSE(collectionCloner->isActive());
diff --git a/src/mongo/db/repl/data_replicator.cpp b/src/mongo/db/repl/data_replicator.cpp
index f09013af383..ff99eb594b2 100644
--- a/src/mongo/db/repl/data_replicator.cpp
+++ b/src/mongo/db/repl/data_replicator.cpp
@@ -775,8 +775,8 @@ void DataReplicator::_onDataClonerFinish(const Status& status) {
return;
}
- BSONObj query = BSON("find" << _opts.remoteOplogNS.coll() << "sort" << BSON("$natural" << -1)
- << "limit" << 1);
+ BSONObj query = BSON(
+ "find" << _opts.remoteOplogNS.coll() << "sort" << BSON("$natural" << -1) << "limit" << 1);
TimestampStatus timestampStatus(ErrorCodes::BadValue, "");
_tmpFetcher = stdx::make_unique<Fetcher>(
diff --git a/src/mongo/db/repl/data_replicator.h b/src/mongo/db/repl/data_replicator.h
index e1c491ebd4d..bde976acd9c 100644
--- a/src/mongo/db/repl/data_replicator.h
+++ b/src/mongo/db/repl/data_replicator.h
@@ -37,10 +37,10 @@
#include "mongo/bson/bsonobj.h"
#include "mongo/bson/timestamp.h"
#include "mongo/db/namespace_string.h"
-#include "mongo/db/repl/multiapplier.h"
#include "mongo/db/repl/collection_cloner.h"
-#include "mongo/db/repl/database_cloner.h"
#include "mongo/db/repl/data_replicator_external_state.h"
+#include "mongo/db/repl/database_cloner.h"
+#include "mongo/db/repl/multiapplier.h"
#include "mongo/db/repl/oplog_fetcher.h"
#include "mongo/db/repl/optime.h"
#include "mongo/db/repl/replication_executor.h"
diff --git a/src/mongo/db/repl/data_replicator_test.cpp b/src/mongo/db/repl/data_replicator_test.cpp
index 300100d4726..43e42f7cc5e 100644
--- a/src/mongo/db/repl/data_replicator_test.cpp
+++ b/src/mongo/db/repl/data_replicator_test.cpp
@@ -40,18 +40,18 @@
#include "mongo/db/repl/data_replicator_external_state_mock.h"
#include "mongo/db/repl/member_state.h"
#include "mongo/db/repl/optime.h"
-#include "mongo/db/repl/update_position_args.h"
-#include "mongo/db/repl/replication_executor_test_fixture.h"
#include "mongo/db/repl/replication_executor.h"
+#include "mongo/db/repl/replication_executor_test_fixture.h"
#include "mongo/db/repl/reporter.h"
#include "mongo/db/repl/storage_interface.h"
#include "mongo/db/repl/storage_interface_mock.h"
-#include "mongo/db/repl/sync_source_selector.h"
#include "mongo/db/repl/sync_source_resolver.h"
+#include "mongo/db/repl/sync_source_selector.h"
+#include "mongo/db/repl/update_position_args.h"
#include "mongo/executor/network_interface_mock.h"
#include "mongo/stdx/mutex.h"
-#include "mongo/util/fail_point_service.h"
#include "mongo/util/concurrency/thread_name.h"
+#include "mongo/util/fail_point_service.h"
#include "mongo/util/log.h"
#include "mongo/util/mongoutils/str.h"
@@ -105,8 +105,9 @@ public:
* clear/reset state
*/
void reset() {
- _rollbackFn = [](OperationContext*, const OpTime&, const HostAndPort&)
- -> Status { return Status::OK(); };
+ _rollbackFn = [](OperationContext*, const OpTime&, const HostAndPort&) -> Status {
+ return Status::OK();
+ };
_setMyLastOptime = [this](const OpTime& opTime) { _myLastOpTime = opTime; };
_myLastOpTime = OpTime();
_memberState = MemberState::RS_UNKNOWN;
@@ -198,7 +199,7 @@ protected:
options.prepareReplSetUpdatePositionCommandFn =
[](ReplicationCoordinator::ReplSetUpdatePositionCommandStyle commandStyle)
- -> StatusWith<BSONObj> { return BSON(UpdatePositionArgs::kCommandFieldName << 1); };
+ -> StatusWith<BSONObj> { return BSON(UpdatePositionArgs::kCommandFieldName << 1); };
options.getMyLastOptime = [this]() { return _myLastOpTime; };
options.setMyLastOptime = [this](const OpTime& opTime) { _setMyLastOptime(opTime); };
options.setFollowerMode = [this](const MemberState& state) {
@@ -209,13 +210,17 @@ protected:
options.syncSourceSelector = this;
options.getReplSetConfig = []() {
ReplicaSetConfig config;
- ASSERT_OK(
- config.initialize(BSON("_id"
- << "myset"
- << "version" << 1 << "protocolVersion" << 1 << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "localhost:12345")) << "settings"
- << BSON("electionTimeoutMillis" << 10000))));
+ ASSERT_OK(config.initialize(BSON("_id"
+ << "myset"
+ << "version"
+ << 1
+ << "protocolVersion"
+ << 1
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost:12345"))
+ << "settings"
+ << BSON("electionTimeoutMillis" << 10000))));
return config;
};
@@ -333,10 +338,9 @@ protected:
_storage.beginCollectionFn = _beginCollectionFn;
_storage.insertDocumentsFn = _insertCollectionFn;
- _storage.insertMissingDocFn =
- [&](OperationContext* txn, const NamespaceString& nss, const BSONObj& doc) {
- return Status::OK();
- };
+ _storage.insertMissingDocFn = [&](OperationContext* txn,
+ const NamespaceString& nss,
+ const BSONObj& doc) { return Status::OK(); };
dr->_setInitialSyncStorageInterface(&_storage);
_isbr.reset(new InitialSyncBackgroundRunner(dr));
@@ -366,11 +370,15 @@ protected:
const long long cursorId = cmdElem.numberLong();
if (isGetMore && cursorId == 1LL) {
// process getmore requests from the oplog fetcher
- auto respBSON = fromjson(str::stream()
- << "{ok:1, cursor:{id:NumberLong(1), ns:'local.oplog.rs'"
- " , nextBatch:[{ts:Timestamp(" << ++c
- << ",1), h:1, ns:'test.a', v:" << OplogEntry::kOplogVersion
- << ", op:'u', o2:{_id:" << c << "}, o:{$set:{a:1}}}]}}");
+ auto respBSON =
+ fromjson(str::stream() << "{ok:1, cursor:{id:NumberLong(1), ns:'local.oplog.rs'"
+ " , nextBatch:[{ts:Timestamp("
+ << ++c
+ << ",1), h:1, ns:'test.a', v:"
+ << OplogEntry::kOplogVersion
+ << ", op:'u', o2:{_id:"
+ << c
+ << "}, o:{$set:{a:1}}}]}}");
net->scheduleResponse(
noi,
net->now(),
@@ -446,47 +454,50 @@ TEST_F(InitialSyncTest, Complete) {
*
*/
- const std::vector<BSONObj> responses = {
- // get rollback id
- fromjson(str::stream() << "{ok: 1, rbid:1}"),
- // get latest oplog ts
- fromjson(
- str::stream() << "{ok:1, cursor:{id:NumberLong(0), ns:'local.oplog.rs', firstBatch:["
- "{ts:Timestamp(1,1), h:1, ns:'a.a', v:" << OplogEntry::kOplogVersion
- << ", op:'i', o:{_id:1, a:1}}]}}"),
- // oplog fetcher find
- fromjson(
- str::stream() << "{ok:1, cursor:{id:NumberLong(1), ns:'local.oplog.rs', firstBatch:["
- "{ts:Timestamp(1,1), h:1, ns:'a.a', v:" << OplogEntry::kOplogVersion
- << ", op:'i', o:{_id:1, a:1}}]}}"),
- // Clone Start
- // listDatabases
- fromjson("{ok:1, databases:[{name:'a'}]}"),
- // listCollections for "a"
- fromjson(
- "{ok:1, cursor:{id:NumberLong(0), ns:'a.$cmd.listCollections', firstBatch:["
- "{name:'a', options:{}} "
- "]}}"),
- // listIndexes:a
- fromjson(str::stream()
- << "{ok:1, cursor:{id:NumberLong(0), ns:'a.$cmd.listIndexes.a', firstBatch:["
- "{v:" << OplogEntry::kOplogVersion
- << ", key:{_id:1}, name:'_id_', ns:'a.a'}]}}"),
- // find:a
- fromjson(
- "{ok:1, cursor:{id:NumberLong(0), ns:'a.a', firstBatch:["
- "{_id:1, a:1} "
- "]}}"),
- // Clone Done
- // get latest oplog ts
- fromjson(
- str::stream() << "{ok:1, cursor:{id:NumberLong(0), ns:'local.oplog.rs', firstBatch:["
- "{ts:Timestamp(2,2), h:1, ns:'b.c', v:" << OplogEntry::kOplogVersion
- << ", op:'i', o:{_id:1, c:1}}]}}"),
- // Applier starts ...
- // check for rollback
- fromjson(str::stream() << "{ok: 1, rbid:1}"),
- };
+ const std::vector<BSONObj> responses =
+ {
+ // get rollback id
+ fromjson(str::stream() << "{ok: 1, rbid:1}"),
+ // get latest oplog ts
+ fromjson(str::stream()
+ << "{ok:1, cursor:{id:NumberLong(0), ns:'local.oplog.rs', firstBatch:["
+ "{ts:Timestamp(1,1), h:1, ns:'a.a', v:"
+ << OplogEntry::kOplogVersion
+ << ", op:'i', o:{_id:1, a:1}}]}}"),
+ // oplog fetcher find
+ fromjson(str::stream()
+ << "{ok:1, cursor:{id:NumberLong(1), ns:'local.oplog.rs', firstBatch:["
+ "{ts:Timestamp(1,1), h:1, ns:'a.a', v:"
+ << OplogEntry::kOplogVersion
+ << ", op:'i', o:{_id:1, a:1}}]}}"),
+ // Clone Start
+ // listDatabases
+ fromjson("{ok:1, databases:[{name:'a'}]}"),
+ // listCollections for "a"
+ fromjson("{ok:1, cursor:{id:NumberLong(0), ns:'a.$cmd.listCollections', firstBatch:["
+ "{name:'a', options:{}} "
+ "]}}"),
+ // listIndexes:a
+ fromjson(str::stream()
+ << "{ok:1, cursor:{id:NumberLong(0), ns:'a.$cmd.listIndexes.a', firstBatch:["
+ "{v:"
+ << OplogEntry::kOplogVersion
+ << ", key:{_id:1}, name:'_id_', ns:'a.a'}]}}"),
+ // find:a
+ fromjson("{ok:1, cursor:{id:NumberLong(0), ns:'a.a', firstBatch:["
+ "{_id:1, a:1} "
+ "]}}"),
+ // Clone Done
+ // get latest oplog ts
+ fromjson(str::stream()
+ << "{ok:1, cursor:{id:NumberLong(0), ns:'local.oplog.rs', firstBatch:["
+ "{ts:Timestamp(2,2), h:1, ns:'b.c', v:"
+ << OplogEntry::kOplogVersion
+ << ", op:'i', o:{_id:1, c:1}}]}}"),
+ // Applier starts ...
+ // check for rollback
+ fromjson(str::stream() << "{ok: 1, rbid:1}"),
+ };
// Initial sync flag should not be set before starting.
ASSERT_FALSE(StorageInterface::get(getGlobalServiceContext())
@@ -516,58 +527,61 @@ TEST_F(InitialSyncTest, Complete) {
TEST_F(InitialSyncTest, MissingDocOnMultiApplyCompletes) {
DataReplicatorOptions opts;
int applyCounter{0};
- getExternalState()->multiApplyFn =
- [&](OperationContext*, const MultiApplier::Operations& ops, MultiApplier::ApplyOperationFn)
- -> StatusWith<OpTime> {
- if (++applyCounter == 1) {
- return Status(ErrorCodes::NoMatchingDocument, "failed: missing doc.");
- }
- return ops.back().getOpTime();
- };
-
- const std::vector<BSONObj> responses = {
- // get rollback id
- fromjson(str::stream() << "{ok: 1, rbid:1}"),
- // get latest oplog ts
- fromjson(
- str::stream() << "{ok:1, cursor:{id:NumberLong(0), ns:'local.oplog.rs', firstBatch:["
- "{ts:Timestamp(1,1), h:1, ns:'a.a', v:" << OplogEntry::kOplogVersion
- << ", op:'i', o:{_id:1, a:1}}]}}"),
- // oplog fetcher find
- fromjson(
- str::stream() << "{ok:1, cursor:{id:NumberLong(1), ns:'local.oplog.rs', firstBatch:["
- "{ts:Timestamp(1,1), h:1, ns:'a.a', v:" << OplogEntry::kOplogVersion
- << ", op:'u', o2:{_id:1}, o:{$set:{a:1}}}]}}"),
- // Clone Start
- // listDatabases
- fromjson("{ok:1, databases:[{name:'a'}]}"),
- // listCollections for "a"
- fromjson(
- "{ok:1, cursor:{id:NumberLong(0), ns:'a.$cmd.listCollections', firstBatch:["
- "{name:'a', options:{}} "
- "]}}"),
- // listIndexes:a
- fromjson(str::stream()
- << "{ok:1, cursor:{id:NumberLong(0), ns:'a.$cmd.listIndexes.a', firstBatch:["
- "{v:" << OplogEntry::kOplogVersion
- << ", key:{_id:1}, name:'_id_', ns:'a.a'}]}}"),
- // find:a -- empty
- fromjson("{ok:1, cursor:{id:NumberLong(0), ns:'a.a', firstBatch:[]}}"),
- // Clone Done
- // get latest oplog ts
- fromjson(
- str::stream() << "{ok:1, cursor:{id:NumberLong(0), ns:'local.oplog.rs', firstBatch:["
- "{ts:Timestamp(2,2), h:1, ns:'b.c', v:" << OplogEntry::kOplogVersion
- << ", op:'i', o:{_id:1, c:1}}]}}"),
- // Applier starts ...
- // missing doc fetch -- find:a {_id:1}
- fromjson(
- "{ok:1, cursor:{id:NumberLong(0), ns:'a.a', firstBatch:["
- "{_id:1, a:1} "
- "]}}"),
- // check for rollback
- fromjson(str::stream() << "{ok: 1, rbid:1}"),
+ getExternalState()->multiApplyFn = [&](OperationContext*,
+ const MultiApplier::Operations& ops,
+ MultiApplier::ApplyOperationFn) -> StatusWith<OpTime> {
+ if (++applyCounter == 1) {
+ return Status(ErrorCodes::NoMatchingDocument, "failed: missing doc.");
+ }
+ return ops.back().getOpTime();
};
+
+ const std::vector<BSONObj> responses =
+ {
+ // get rollback id
+ fromjson(str::stream() << "{ok: 1, rbid:1}"),
+ // get latest oplog ts
+ fromjson(str::stream()
+ << "{ok:1, cursor:{id:NumberLong(0), ns:'local.oplog.rs', firstBatch:["
+ "{ts:Timestamp(1,1), h:1, ns:'a.a', v:"
+ << OplogEntry::kOplogVersion
+ << ", op:'i', o:{_id:1, a:1}}]}}"),
+ // oplog fetcher find
+ fromjson(str::stream()
+ << "{ok:1, cursor:{id:NumberLong(1), ns:'local.oplog.rs', firstBatch:["
+ "{ts:Timestamp(1,1), h:1, ns:'a.a', v:"
+ << OplogEntry::kOplogVersion
+ << ", op:'u', o2:{_id:1}, o:{$set:{a:1}}}]}}"),
+ // Clone Start
+ // listDatabases
+ fromjson("{ok:1, databases:[{name:'a'}]}"),
+ // listCollections for "a"
+ fromjson("{ok:1, cursor:{id:NumberLong(0), ns:'a.$cmd.listCollections', firstBatch:["
+ "{name:'a', options:{}} "
+ "]}}"),
+ // listIndexes:a
+ fromjson(str::stream()
+ << "{ok:1, cursor:{id:NumberLong(0), ns:'a.$cmd.listIndexes.a', firstBatch:["
+ "{v:"
+ << OplogEntry::kOplogVersion
+ << ", key:{_id:1}, name:'_id_', ns:'a.a'}]}}"),
+ // find:a -- empty
+ fromjson("{ok:1, cursor:{id:NumberLong(0), ns:'a.a', firstBatch:[]}}"),
+ // Clone Done
+ // get latest oplog ts
+ fromjson(str::stream()
+ << "{ok:1, cursor:{id:NumberLong(0), ns:'local.oplog.rs', firstBatch:["
+ "{ts:Timestamp(2,2), h:1, ns:'b.c', v:"
+ << OplogEntry::kOplogVersion
+ << ", op:'i', o:{_id:1, c:1}}]}}"),
+ // Applier starts ...
+ // missing doc fetch -- find:a {_id:1}
+ fromjson("{ok:1, cursor:{id:NumberLong(0), ns:'a.a', firstBatch:["
+ "{_id:1, a:1} "
+ "]}}"),
+ // check for rollback
+ fromjson(str::stream() << "{ok: 1, rbid:1}"),
+ };
startSync();
setResponses(responses);
playResponses(true);
@@ -581,7 +595,9 @@ TEST_F(InitialSyncTest, Failpoint) {
BSONObj configObj = BSON("_id"
<< "mySet"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -611,12 +627,14 @@ TEST_F(InitialSyncTest, FailsOnClone) {
// get latest oplog ts
fromjson(
str::stream() << "{ok:1, cursor:{id:NumberLong(0), ns:'local.oplog.rs', firstBatch:["
- "{ts:Timestamp(1,1), h:1, ns:'a.a', v:" << OplogEntry::kOplogVersion
+ "{ts:Timestamp(1,1), h:1, ns:'a.a', v:"
+ << OplogEntry::kOplogVersion
<< ", op:'i', o:{_id:1, a:1}}]}}"),
// oplog fetcher find
fromjson(
str::stream() << "{ok:1, cursor:{id:NumberLong(1), ns:'local.oplog.rs', firstBatch:["
- "{ts:Timestamp(1,1), h:1, ns:'a.a', v:" << OplogEntry::kOplogVersion
+ "{ts:Timestamp(1,1), h:1, ns:'a.a', v:"
+ << OplogEntry::kOplogVersion
<< ", op:'i', o:{_id:1, a:1}}]}}"),
// Clone Start
// listDatabases
@@ -631,47 +649,50 @@ TEST_F(InitialSyncTest, FailsOnClone) {
}
TEST_F(InitialSyncTest, FailOnRollback) {
- const std::vector<BSONObj> responses = {
- // get rollback id
- fromjson(str::stream() << "{ok: 1, rbid:1}"),
- // get latest oplog ts
- fromjson(
- str::stream() << "{ok:1, cursor:{id:NumberLong(0), ns:'local.oplog.rs', firstBatch:["
- "{ts:Timestamp(1,1), h:1, ns:'a.a', v:" << OplogEntry::kOplogVersion
- << ", op:'i', o:{_id:1, a:1}}]}}"),
- // oplog fetcher find
- fromjson(
- str::stream() << "{ok:1, cursor:{id:NumberLong(1), ns:'local.oplog.rs', firstBatch:["
- "{ts:Timestamp(1,1), h:1, ns:'a.a', v:" << OplogEntry::kOplogVersion
- << ", op:'i', o:{_id:1, a:1}}]}}"),
- // Clone Start
- // listDatabases
- fromjson("{ok:1, databases:[{name:'a'}]}"),
- // listCollections for "a"
- fromjson(
- "{ok:1, cursor:{id:NumberLong(0), ns:'a.$cmd.listCollections', firstBatch:["
- "{name:'a', options:{}} "
- "]}}"),
- // listIndexes:a
- fromjson(str::stream()
- << "{ok:1, cursor:{id:NumberLong(0), ns:'a.$cmd.listIndexes.a', firstBatch:["
- "{v:" << OplogEntry::kOplogVersion
- << ", key:{_id:1}, name:'_id_', ns:'a.a'}]}}"),
- // find:a
- fromjson(
- "{ok:1, cursor:{id:NumberLong(0), ns:'a.a', firstBatch:["
- "{_id:1, a:1} "
- "]}}"),
- // Clone Done
- // get latest oplog ts
- fromjson(
- str::stream() << "{ok:1, cursor:{id:NumberLong(0), ns:'local.oplog.rs', firstBatch:["
- "{ts:Timestamp(2,2), h:1, ns:'b.c', v:" << OplogEntry::kOplogVersion
- << ", op:'i', o:{_id:1, c:1}}]}}"),
- // Applier starts ...
- // check for rollback
- fromjson(str::stream() << "{ok: 1, rbid:2}"),
- };
+ const std::vector<BSONObj> responses =
+ {
+ // get rollback id
+ fromjson(str::stream() << "{ok: 1, rbid:1}"),
+ // get latest oplog ts
+ fromjson(str::stream()
+ << "{ok:1, cursor:{id:NumberLong(0), ns:'local.oplog.rs', firstBatch:["
+ "{ts:Timestamp(1,1), h:1, ns:'a.a', v:"
+ << OplogEntry::kOplogVersion
+ << ", op:'i', o:{_id:1, a:1}}]}}"),
+ // oplog fetcher find
+ fromjson(str::stream()
+ << "{ok:1, cursor:{id:NumberLong(1), ns:'local.oplog.rs', firstBatch:["
+ "{ts:Timestamp(1,1), h:1, ns:'a.a', v:"
+ << OplogEntry::kOplogVersion
+ << ", op:'i', o:{_id:1, a:1}}]}}"),
+ // Clone Start
+ // listDatabases
+ fromjson("{ok:1, databases:[{name:'a'}]}"),
+ // listCollections for "a"
+ fromjson("{ok:1, cursor:{id:NumberLong(0), ns:'a.$cmd.listCollections', firstBatch:["
+ "{name:'a', options:{}} "
+ "]}}"),
+ // listIndexes:a
+ fromjson(str::stream()
+ << "{ok:1, cursor:{id:NumberLong(0), ns:'a.$cmd.listIndexes.a', firstBatch:["
+ "{v:"
+ << OplogEntry::kOplogVersion
+ << ", key:{_id:1}, name:'_id_', ns:'a.a'}]}}"),
+ // find:a
+ fromjson("{ok:1, cursor:{id:NumberLong(0), ns:'a.a', firstBatch:["
+ "{_id:1, a:1} "
+ "]}}"),
+ // Clone Done
+ // get latest oplog ts
+ fromjson(str::stream()
+ << "{ok:1, cursor:{id:NumberLong(0), ns:'local.oplog.rs', firstBatch:["
+ "{ts:Timestamp(2,2), h:1, ns:'b.c', v:"
+ << OplogEntry::kOplogVersion
+ << ", op:'i', o:{_id:1, c:1}}]}}"),
+ // Applier starts ...
+ // check for rollback
+ fromjson(str::stream() << "{ok: 1, rbid:2}"),
+ };
startSync();
setResponses({responses});
@@ -984,26 +1005,30 @@ TEST_F(SteadyStateTest, RollbackTwoSyncSourcesSecondRollbackSucceeds) {
TEST_F(SteadyStateTest, PauseDataReplicator) {
auto lastOperationApplied = BSON("op"
<< "a"
- << "v" << OplogEntry::kOplogVersion << "ts"
+ << "v"
+ << OplogEntry::kOplogVersion
+ << "ts"
<< Timestamp(Seconds(123), 0));
auto operationToApply = BSON("op"
<< "a"
- << "v" << OplogEntry::kOplogVersion << "ts"
+ << "v"
+ << OplogEntry::kOplogVersion
+ << "ts"
<< Timestamp(Seconds(456), 0));
stdx::mutex mutex;
unittest::Barrier barrier(2U);
Timestamp lastTimestampApplied;
BSONObj operationApplied;
- getExternalState()->multiApplyFn =
- [&](OperationContext*, const MultiApplier::Operations& ops, MultiApplier::ApplyOperationFn)
- -> StatusWith<OpTime> {
- stdx::lock_guard<stdx::mutex> lock(mutex);
- operationApplied = ops.back().raw;
- barrier.countDownAndWait();
- return ops.back().getOpTime();
- };
+ getExternalState()->multiApplyFn = [&](OperationContext*,
+ const MultiApplier::Operations& ops,
+ MultiApplier::ApplyOperationFn) -> StatusWith<OpTime> {
+ stdx::lock_guard<stdx::mutex> lock(mutex);
+ operationApplied = ops.back().raw;
+ barrier.countDownAndWait();
+ return ops.back().getOpTime();
+ };
DataReplicatorOptions::SetMyLastOptimeFn oldSetMyLastOptime = _setMyLastOptime;
_setMyLastOptime = [&](const OpTime& opTime) {
oldSetMyLastOptime(opTime);
@@ -1076,26 +1101,30 @@ TEST_F(SteadyStateTest, PauseDataReplicator) {
TEST_F(SteadyStateTest, ApplyOneOperation) {
auto lastOperationApplied = BSON("op"
<< "a"
- << "v" << OplogEntry::kOplogVersion << "ts"
+ << "v"
+ << OplogEntry::kOplogVersion
+ << "ts"
<< Timestamp(Seconds(123), 0));
auto operationToApply = BSON("op"
<< "a"
- << "v" << OplogEntry::kOplogVersion << "ts"
+ << "v"
+ << OplogEntry::kOplogVersion
+ << "ts"
<< Timestamp(Seconds(456), 0));
stdx::mutex mutex;
unittest::Barrier barrier(2U);
Timestamp lastTimestampApplied;
BSONObj operationApplied;
- getExternalState()->multiApplyFn =
- [&](OperationContext*, const MultiApplier::Operations& ops, MultiApplier::ApplyOperationFn)
- -> StatusWith<OpTime> {
- stdx::lock_guard<stdx::mutex> lock(mutex);
- operationApplied = ops.back().raw;
- barrier.countDownAndWait();
- return ops.back().getOpTime();
- };
+ getExternalState()->multiApplyFn = [&](OperationContext*,
+ const MultiApplier::Operations& ops,
+ MultiApplier::ApplyOperationFn) -> StatusWith<OpTime> {
+ stdx::lock_guard<stdx::mutex> lock(mutex);
+ operationApplied = ops.back().raw;
+ barrier.countDownAndWait();
+ return ops.back().getOpTime();
+ };
DataReplicatorOptions::SetMyLastOptimeFn oldSetMyLastOptime = _setMyLastOptime;
_setMyLastOptime = [&](const OpTime& opTime) {
oldSetMyLastOptime(opTime);
diff --git a/src/mongo/db/repl/database_cloner.cpp b/src/mongo/db/repl/database_cloner.cpp
index da4d3c33887..37e53a97776 100644
--- a/src/mongo/db/repl/database_cloner.cpp
+++ b/src/mongo/db/repl/database_cloner.cpp
@@ -219,16 +219,17 @@ void DatabaseCloner::_listCollectionsCallback(const StatusWith<Fetcher::QueryRes
for (auto&& info : _collectionInfos) {
BSONElement nameElement = info.getField(kNameFieldName);
if (nameElement.eoo()) {
- _finishCallback(Status(ErrorCodes::FailedToParse,
- str::stream() << "collection info must contain '"
- << kNameFieldName << "' "
- << "field : " << info));
+ _finishCallback(
+ Status(ErrorCodes::FailedToParse,
+ str::stream() << "collection info must contain '" << kNameFieldName << "' "
+ << "field : "
+ << info));
return;
}
if (nameElement.type() != mongo::String) {
- _finishCallback(Status(ErrorCodes::TypeMismatch,
- str::stream() << "'" << kNameFieldName
- << "' field must be a string: " << info));
+ _finishCallback(Status(
+ ErrorCodes::TypeMismatch,
+ str::stream() << "'" << kNameFieldName << "' field must be a string: " << info));
return;
}
const std::string collectionName = nameElement.String();
@@ -236,22 +237,27 @@ void DatabaseCloner::_listCollectionsCallback(const StatusWith<Fetcher::QueryRes
_finishCallback(Status(ErrorCodes::DuplicateKey,
str::stream()
<< "collection info contains duplicate collection name "
- << "'" << collectionName << "': " << info));
+ << "'"
+ << collectionName
+ << "': "
+ << info));
return;
}
BSONElement optionsElement = info.getField(kOptionsFieldName);
if (optionsElement.eoo()) {
- _finishCallback(Status(ErrorCodes::FailedToParse,
- str::stream() << "collection info must contain '"
- << kOptionsFieldName << "' "
- << "field : " << info));
+ _finishCallback(Status(
+ ErrorCodes::FailedToParse,
+ str::stream() << "collection info must contain '" << kOptionsFieldName << "' "
+ << "field : "
+ << info));
return;
}
if (!optionsElement.isABSONObj()) {
_finishCallback(Status(ErrorCodes::TypeMismatch,
str::stream() << "'" << kOptionsFieldName
- << "' field must be an object: " << info));
+ << "' field must be an object: "
+ << info));
return;
}
const BSONObj optionsObj = optionsElement.Obj();
diff --git a/src/mongo/db/repl/database_cloner.h b/src/mongo/db/repl/database_cloner.h
index 954f816cdaa..79dcf1529e2 100644
--- a/src/mongo/db/repl/database_cloner.h
+++ b/src/mongo/db/repl/database_cloner.h
@@ -37,8 +37,8 @@
#include "mongo/bson/bsonobj.h"
#include "mongo/client/fetcher.h"
#include "mongo/db/namespace_string.h"
-#include "mongo/db/repl/collection_cloner.h"
#include "mongo/db/repl/base_cloner.h"
+#include "mongo/db/repl/collection_cloner.h"
#include "mongo/db/repl/replication_executor.h"
#include "mongo/stdx/condition_variable.h"
#include "mongo/stdx/mutex.h"
diff --git a/src/mongo/db/repl/database_cloner_test.cpp b/src/mongo/db/repl/database_cloner_test.cpp
index d5494d80345..78d70018ae3 100644
--- a/src/mongo/db/repl/database_cloner_test.cpp
+++ b/src/mongo/db/repl/database_cloner_test.cpp
@@ -190,7 +190,8 @@ TEST_F(DatabaseClonerTest, InvalidListCollectionsFilter) {
processNetworkResponse(BSON("ok" << 0 << "errmsg"
<< "unknown operator"
- << "code" << ErrorCodes::BadValue));
+ << "code"
+ << ErrorCodes::BadValue));
ASSERT_EQUALS(ErrorCodes::BadValue, getStatus().code());
ASSERT_FALSE(databaseCloner->isActive());
@@ -214,8 +215,9 @@ TEST_F(DatabaseClonerTest, ListCollectionsReturnedNoCollections) {
}
TEST_F(DatabaseClonerTest, ListCollectionsPredicate) {
- DatabaseCloner::ListCollectionsPredicateFn pred =
- [](const BSONObj& info) { return info["name"].String() != "b"; };
+ DatabaseCloner::ListCollectionsPredicateFn pred = [](const BSONObj& info) {
+ return info["name"].String() != "b";
+ };
databaseCloner.reset(new DatabaseCloner(
&getReplExecutor(),
target,
@@ -232,13 +234,16 @@ TEST_F(DatabaseClonerTest, ListCollectionsPredicate) {
const std::vector<BSONObj> sourceInfos = {BSON("name"
<< "a"
- << "options" << BSONObj()),
+ << "options"
+ << BSONObj()),
BSON("name"
<< "b"
- << "options" << BSONObj()),
+ << "options"
+ << BSONObj()),
BSON("name"
<< "c"
- << "options" << BSONObj())};
+ << "options"
+ << BSONObj())};
processNetworkResponse(createListCollectionsResponse(
0, BSON_ARRAY(sourceInfos[0] << sourceInfos[1] << sourceInfos[2])));
@@ -256,10 +261,12 @@ TEST_F(DatabaseClonerTest, ListCollectionsMultipleBatches) {
const std::vector<BSONObj> sourceInfos = {BSON("name"
<< "a"
- << "options" << BSONObj()),
+ << "options"
+ << BSONObj()),
BSON("name"
<< "b"
- << "options" << BSONObj())};
+ << "options"
+ << BSONObj())};
processNetworkResponse(createListCollectionsResponse(1, BSON_ARRAY(sourceInfos[0])));
ASSERT_EQUALS(getDetectableErrorStatus(), getStatus());
@@ -305,11 +312,11 @@ TEST_F(DatabaseClonerTest, CollectionInfoNameNotAString) {
TEST_F(DatabaseClonerTest, CollectionInfoNameEmpty) {
ASSERT_OK(databaseCloner->start());
- processNetworkResponse(
- createListCollectionsResponse(0,
- BSON_ARRAY(BSON("name"
- << ""
- << "options" << BSONObj()))));
+ processNetworkResponse(createListCollectionsResponse(0,
+ BSON_ARRAY(BSON("name"
+ << ""
+ << "options"
+ << BSONObj()))));
ASSERT_EQUALS(ErrorCodes::BadValue, getStatus().code());
ASSERT_STRING_CONTAINS(getStatus().reason(), "invalid collection namespace: db.");
ASSERT_FALSE(databaseCloner->isActive());
@@ -317,14 +324,15 @@ TEST_F(DatabaseClonerTest, CollectionInfoNameEmpty) {
TEST_F(DatabaseClonerTest, CollectionInfoNameDuplicate) {
ASSERT_OK(databaseCloner->start());
- processNetworkResponse(
- createListCollectionsResponse(0,
- BSON_ARRAY(BSON("name"
- << "a"
- << "options" << BSONObj())
- << BSON("name"
- << "a"
- << "options" << BSONObj()))));
+ processNetworkResponse(createListCollectionsResponse(0,
+ BSON_ARRAY(BSON("name"
+ << "a"
+ << "options"
+ << BSONObj())
+ << BSON("name"
+ << "a"
+ << "options"
+ << BSONObj()))));
ASSERT_EQUALS(ErrorCodes::DuplicateKey, getStatus().code());
ASSERT_STRING_CONTAINS(getStatus().reason(), "duplicate collection name 'a'");
ASSERT_FALSE(databaseCloner->isActive());
@@ -345,7 +353,8 @@ TEST_F(DatabaseClonerTest, CollectionInfoOptionsNotAnObject) {
processNetworkResponse(createListCollectionsResponse(0,
BSON_ARRAY(BSON("name"
<< "a"
- << "options" << 123))));
+ << "options"
+ << 123))));
ASSERT_EQUALS(ErrorCodes::TypeMismatch, getStatus().code());
ASSERT_STRING_CONTAINS(getStatus().reason(), "'options' field must be an object");
ASSERT_FALSE(databaseCloner->isActive());
@@ -355,11 +364,11 @@ TEST_F(DatabaseClonerTest, InvalidCollectionOptions) {
ASSERT_OK(databaseCloner->start());
processNetworkResponse(
- createListCollectionsResponse(
- 0,
- BSON_ARRAY(BSON("name"
- << "a"
- << "options" << BSON("storageEngine" << 1)))));
+ createListCollectionsResponse(0,
+ BSON_ARRAY(BSON("name"
+ << "a"
+ << "options"
+ << BSON("storageEngine" << 1)))));
ASSERT_EQUALS(ErrorCodes::BadValue, getStatus().code());
ASSERT_FALSE(databaseCloner->isActive());
@@ -380,11 +389,11 @@ TEST_F(DatabaseClonerTest, ListCollectionsReturnsEmptyCollectionName) {
stdx::bind(&DatabaseClonerTest::setStatus, this, stdx::placeholders::_1)));
ASSERT_OK(databaseCloner->start());
- processNetworkResponse(
- createListCollectionsResponse(0,
- BSON_ARRAY(BSON("name"
- << ""
- << "options" << BSONObj()))));
+ processNetworkResponse(createListCollectionsResponse(0,
+ BSON_ARRAY(BSON("name"
+ << ""
+ << "options"
+ << BSONObj()))));
ASSERT_EQUALS(ErrorCodes::BadValue, getStatus().code());
ASSERT_STRING_CONTAINS(getStatus().reason(), "invalid collection namespace: db.");
@@ -397,11 +406,11 @@ TEST_F(DatabaseClonerTest, StartFirstCollectionClonerFailed) {
databaseCloner->setStartCollectionClonerFn(
[](CollectionCloner& cloner) { return Status(ErrorCodes::OperationFailed, ""); });
- processNetworkResponse(
- createListCollectionsResponse(0,
- BSON_ARRAY(BSON("name"
- << "a"
- << "options" << BSONObj()))));
+ processNetworkResponse(createListCollectionsResponse(0,
+ BSON_ARRAY(BSON("name"
+ << "a"
+ << "options"
+ << BSONObj()))));
ASSERT_EQUALS(ErrorCodes::OperationFailed, getStatus().code());
ASSERT_FALSE(databaseCloner->isActive());
@@ -424,14 +433,15 @@ TEST_F(DatabaseClonerTest, StartSecondCollectionClonerFailed) {
return cloner.start();
});
- processNetworkResponse(
- createListCollectionsResponse(0,
- BSON_ARRAY(BSON("name"
- << "a"
- << "options" << BSONObj())
- << BSON("name"
- << "b"
- << "options" << BSONObj()))));
+ processNetworkResponse(createListCollectionsResponse(0,
+ BSON_ARRAY(BSON("name"
+ << "a"
+ << "options"
+ << BSONObj())
+ << BSON("name"
+ << "b"
+ << "options"
+ << BSONObj()))));
processNetworkResponse(createListIndexesResponse(0, BSON_ARRAY(idIndexSpec)));
processNetworkResponse(createCursorResponse(0, BSONArray()));
@@ -452,10 +462,12 @@ TEST_F(DatabaseClonerTest, FirstCollectionListIndexesFailed) {
const std::vector<BSONObj> sourceInfos = {BSON("name"
<< "a"
- << "options" << BSONObj()),
+ << "options"
+ << BSONObj()),
BSON("name"
<< "b"
- << "options" << BSONObj())};
+ << "options"
+ << BSONObj())};
processNetworkResponse(
createListCollectionsResponse(0, BSON_ARRAY(sourceInfos[0] << sourceInfos[1])));
@@ -466,7 +478,8 @@ TEST_F(DatabaseClonerTest, FirstCollectionListIndexesFailed) {
// This affects the order of the network responses.
processNetworkResponse(BSON("ok" << 0 << "errmsg"
<< ""
- << "code" << ErrorCodes::NamespaceNotFound));
+ << "code"
+ << ErrorCodes::NamespaceNotFound));
processNetworkResponse(createListIndexesResponse(0, BSON_ARRAY(idIndexSpec)));
processNetworkResponse(createCursorResponse(0, BSONArray()));
@@ -497,10 +510,12 @@ TEST_F(DatabaseClonerTest, CreateCollections) {
const std::vector<BSONObj> sourceInfos = {BSON("name"
<< "a"
- << "options" << BSONObj()),
+ << "options"
+ << BSONObj()),
BSON("name"
<< "b"
- << "options" << BSONObj())};
+ << "options"
+ << BSONObj())};
processNetworkResponse(
createListCollectionsResponse(0, BSON_ARRAY(sourceInfos[0] << sourceInfos[1])));
diff --git a/src/mongo/db/repl/database_task.h b/src/mongo/db/repl/database_task.h
index 29f10f2902c..bde2df64c09 100644
--- a/src/mongo/db/repl/database_task.h
+++ b/src/mongo/db/repl/database_task.h
@@ -31,8 +31,8 @@
#include <string>
#include "mongo/db/concurrency/lock_manager_defs.h"
-#include "mongo/db/repl/task_runner.h"
#include "mongo/db/namespace_string.h"
+#include "mongo/db/repl/task_runner.h"
namespace mongo {
diff --git a/src/mongo/db/repl/elect_cmd_runner_test.cpp b/src/mongo/db/repl/elect_cmd_runner_test.cpp
index 1b4a82902c2..c92f931cf34 100644
--- a/src/mongo/db/repl/elect_cmd_runner_test.cpp
+++ b/src/mongo/db/repl/elect_cmd_runner_test.cpp
@@ -99,9 +99,13 @@ ReplicaSetConfig assertMakeRSConfig(const BSONObj& configBson) {
const BSONObj makeElectRequest(const ReplicaSetConfig& rsConfig, int selfIndex) {
const MemberConfig& myConfig = rsConfig.getMemberAt(selfIndex);
return BSON("replSetElect" << 1 << "set" << rsConfig.getReplSetName() << "who"
- << myConfig.getHostAndPort().toString() << "whoid"
- << myConfig.getId() << "cfgver" << rsConfig.getConfigVersion()
- << "round" << 380865962699346850ll);
+ << myConfig.getHostAndPort().toString()
+ << "whoid"
+ << myConfig.getId()
+ << "cfgver"
+ << rsConfig.getConfigVersion()
+ << "round"
+ << 380865962699346850ll);
}
BSONObj stripRound(const BSONObj& orig) {
@@ -158,7 +162,9 @@ TEST_F(ElectCmdRunnerTest, OneNode) {
// Only one node in the config.
const ReplicaSetConfig config = assertMakeRSConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1"))));
@@ -174,7 +180,9 @@ TEST_F(ElectCmdRunnerTest, TwoNodes) {
const ReplicaSetConfig config =
assertMakeRSConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h0")
<< BSON("_id" << 2 << "host"
@@ -210,7 +218,9 @@ TEST_F(ElectCmdRunnerTest, ShuttingDown) {
// Two nodes, we are node h1. Shutdown happens while we're scheduling remote commands.
ReplicaSetConfig config = assertMakeRSConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h0")
<< BSON("_id" << 2 << "host"
@@ -307,26 +317,33 @@ protected:
BSONObj threeNodesTwoArbitersConfig() {
return BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host0")
<< BSON("_id" << 1 << "host"
<< "host1"
- << "arbiterOnly" << true)
+ << "arbiterOnly"
+ << true)
<< BSON("_id" << 2 << "host"
<< "host2"
- << "arbiterOnly" << true)));
+ << "arbiterOnly"
+ << true)));
}
BSONObj basicThreeNodeConfig() {
return BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host0")
<< BSON("_id" << 1 << "host"
- << "host1") << BSON("_id" << 2 << "host"
- << "host2")));
+ << "host1")
+ << BSON("_id" << 2 << "host"
+ << "host2")));
}
private:
diff --git a/src/mongo/db/repl/freshness_checker_test.cpp b/src/mongo/db/repl/freshness_checker_test.cpp
index 45fde9ef816..2d0917254ec 100644
--- a/src/mongo/db/repl/freshness_checker_test.cpp
+++ b/src/mongo/db/repl/freshness_checker_test.cpp
@@ -121,9 +121,13 @@ const BSONObj makeFreshRequest(const ReplicaSetConfig& rsConfig,
int selfIndex) {
const MemberConfig& myConfig = rsConfig.getMemberAt(selfIndex);
return BSON("replSetFresh" << 1 << "set" << rsConfig.getReplSetName() << "opTime"
- << Date_t::fromMillisSinceEpoch(lastOpTimeApplied.asLL()) << "who"
- << myConfig.getHostAndPort().toString() << "cfgver"
- << rsConfig.getConfigVersion() << "id" << myConfig.getId());
+ << Date_t::fromMillisSinceEpoch(lastOpTimeApplied.asLL())
+ << "who"
+ << myConfig.getHostAndPort().toString()
+ << "cfgver"
+ << rsConfig.getConfigVersion()
+ << "id"
+ << myConfig.getId());
}
// This is necessary because the run method must be scheduled in the Replication Executor
@@ -159,7 +163,9 @@ TEST_F(FreshnessCheckerTest, TwoNodes) {
// Two nodes, we are node h1. We are freshest, but we tie with h2.
ReplicaSetConfig config = assertMakeRSConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h0")
<< BSON("_id" << 2 << "host"
@@ -177,16 +183,19 @@ TEST_F(FreshnessCheckerTest, TwoNodes) {
ASSERT_EQUALS("admin", noi->getRequest().dbname);
ASSERT_EQUALS(freshRequest, noi->getRequest().cmdObj);
ASSERT_EQUALS(HostAndPort("h1"), noi->getRequest().target);
- _net->scheduleResponse(noi,
- startDate + Milliseconds(10),
- ResponseStatus(RemoteCommandResponse(
- BSON("ok" << 1 << "id" << 2 << "set"
- << "rs0"
- << "who"
- << "h1"
- << "cfgver" << 1 << "opTime" << Date_t()),
- BSONObj(),
- Milliseconds(8))));
+ _net->scheduleResponse(
+ noi,
+ startDate + Milliseconds(10),
+ ResponseStatus(RemoteCommandResponse(BSON("ok" << 1 << "id" << 2 << "set"
+ << "rs0"
+ << "who"
+ << "h1"
+ << "cfgver"
+ << 1
+ << "opTime"
+ << Date_t()),
+ BSONObj(),
+ Milliseconds(8))));
}
_net->runUntil(startDate + Milliseconds(10));
_net->exitNetwork();
@@ -199,7 +208,9 @@ TEST_F(FreshnessCheckerTest, ShuttingDown) {
// Two nodes, we are node h1. Shutdown happens while we're scheduling remote commands.
ReplicaSetConfig config = assertMakeRSConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h0")
<< BSON("_id" << 2 << "host"
@@ -222,7 +233,9 @@ TEST_F(FreshnessCheckerTest, ElectNotElectingSelfWeAreNotFreshest) {
startCapturingLogMessages();
ReplicaSetConfig config = assertMakeRSConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h0")
<< BSON("_id" << 2 << "host"
@@ -248,8 +261,12 @@ TEST_F(FreshnessCheckerTest, ElectNotElectingSelfWeAreNotFreshest) {
<< "rs0"
<< "who"
<< "h1"
- << "cfgver" << 1 << "fresher" << true
- << "opTime" << Date_t()),
+ << "cfgver"
+ << 1
+ << "fresher"
+ << true
+ << "opTime"
+ << Date_t()),
BSONObj(),
Milliseconds(8))));
}
@@ -268,7 +285,9 @@ TEST_F(FreshnessCheckerTest, ElectNotElectingSelfWeAreNotFreshestOpTime) {
startCapturingLogMessages();
ReplicaSetConfig config = assertMakeRSConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h0")
<< BSON("_id" << 2 << "host"
@@ -295,7 +314,9 @@ TEST_F(FreshnessCheckerTest, ElectNotElectingSelfWeAreNotFreshestOpTime) {
<< "rs0"
<< "who"
<< "h1"
- << "cfgver" << 1 << "opTime"
+ << "cfgver"
+ << 1
+ << "opTime"
<< Date_t::fromMillisSinceEpoch(Timestamp(10, 0).asLL())),
BSONObj(),
Milliseconds(8))));
@@ -314,7 +335,9 @@ TEST_F(FreshnessCheckerTest, ElectWrongTypeInFreshnessResponse) {
startCapturingLogMessages();
ReplicaSetConfig config = assertMakeRSConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h0")
<< BSON("_id" << 2 << "host"
@@ -340,7 +363,10 @@ TEST_F(FreshnessCheckerTest, ElectWrongTypeInFreshnessResponse) {
<< "rs0"
<< "who"
<< "h1"
- << "cfgver" << 1 << "opTime" << 3),
+ << "cfgver"
+ << 1
+ << "opTime"
+ << 3),
BSONObj(),
Milliseconds(8))));
}
@@ -353,9 +379,8 @@ TEST_F(FreshnessCheckerTest, ElectWrongTypeInFreshnessResponse) {
ASSERT_EQUALS(shouldAbortElection(), FreshnessChecker::FresherNodeFound);
ASSERT_EQUALS(1,
- countLogLinesContaining(
- "wrong type for opTime argument in replSetFresh "
- "response: int"));
+ countLogLinesContaining("wrong type for opTime argument in replSetFresh "
+ "response: int"));
}
TEST_F(FreshnessCheckerTest, ElectVetoed) {
@@ -363,7 +388,9 @@ TEST_F(FreshnessCheckerTest, ElectVetoed) {
startCapturingLogMessages();
ReplicaSetConfig config = assertMakeRSConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h0")
<< BSON("_id" << 2 << "host"
@@ -390,9 +417,14 @@ TEST_F(FreshnessCheckerTest, ElectVetoed) {
<< "rs0"
<< "who"
<< "h1"
- << "cfgver" << 1 << "veto" << true << "errmsg"
+ << "cfgver"
+ << 1
+ << "veto"
+ << true
+ << "errmsg"
<< "I'd rather you didn't"
- << "opTime" << Date_t::fromMillisSinceEpoch(Timestamp(0, 0).asLL())),
+ << "opTime"
+ << Date_t::fromMillisSinceEpoch(Timestamp(0, 0).asLL())),
BSONObj(),
Milliseconds(8))));
}
@@ -405,9 +437,8 @@ TEST_F(FreshnessCheckerTest, ElectVetoed) {
ASSERT_EQUALS(shouldAbortElection(), FreshnessChecker::FresherNodeFound);
ASSERT_EQUALS(1,
- countLogLinesContaining(
- "not electing self, h1:27017 would veto with "
- "'I'd rather you didn't'"));
+ countLogLinesContaining("not electing self, h1:27017 would veto with "
+ "'I'd rather you didn't'"));
}
int findIdForMember(const ReplicaSetConfig& rsConfig, const HostAndPort& host) {
@@ -419,18 +450,21 @@ int findIdForMember(const ReplicaSetConfig& rsConfig, const HostAndPort& host) {
TEST_F(FreshnessCheckerTest, ElectNotElectingSelfWeAreNotFreshestManyNodes) {
// one other responds as fresher than us
startCapturingLogMessages();
- ReplicaSetConfig config =
- assertMakeRSConfig(BSON("_id"
- << "rs0"
- << "version" << 1 << "members"
- << BSON_ARRAY(BSON("_id" << 1 << "host"
- << "h0")
- << BSON("_id" << 2 << "host"
- << "h1") << BSON("_id" << 3 << "host"
- << "h2")
- << BSON("_id" << 4 << "host"
- << "h3") << BSON("_id" << 5 << "host"
- << "h4"))));
+ ReplicaSetConfig config = assertMakeRSConfig(BSON("_id"
+ << "rs0"
+ << "version"
+ << 1
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h0")
+ << BSON("_id" << 2 << "host"
+ << "h1")
+ << BSON("_id" << 3 << "host"
+ << "h2")
+ << BSON("_id" << 4 << "host"
+ << "h3")
+ << BSON("_id" << 5 << "host"
+ << "h4"))));
std::vector<HostAndPort> hosts;
for (ReplicaSetConfig::MemberIterator mem = ++config.membersBegin(); mem != config.membersEnd();
@@ -475,18 +509,21 @@ TEST_F(FreshnessCheckerTest, ElectNotElectingSelfWeAreNotFreshestManyNodes) {
TEST_F(FreshnessCheckerTest, ElectNotElectingSelfWeAreNotFreshestOpTimeManyNodes) {
// one other responds with a later optime than ours
startCapturingLogMessages();
- ReplicaSetConfig config =
- assertMakeRSConfig(BSON("_id"
- << "rs0"
- << "version" << 1 << "members"
- << BSON_ARRAY(BSON("_id" << 1 << "host"
- << "h0")
- << BSON("_id" << 2 << "host"
- << "h1") << BSON("_id" << 3 << "host"
- << "h2")
- << BSON("_id" << 4 << "host"
- << "h3") << BSON("_id" << 5 << "host"
- << "h4"))));
+ ReplicaSetConfig config = assertMakeRSConfig(BSON("_id"
+ << "rs0"
+ << "version"
+ << 1
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h0")
+ << BSON("_id" << 2 << "host"
+ << "h1")
+ << BSON("_id" << 3 << "host"
+ << "h2")
+ << BSON("_id" << 4 << "host"
+ << "h3")
+ << BSON("_id" << 5 << "host"
+ << "h4"))));
std::vector<HostAndPort> hosts;
for (ReplicaSetConfig::MemberIterator mem = config.membersBegin(); mem != config.membersEnd();
@@ -545,18 +582,21 @@ TEST_F(FreshnessCheckerTest, ElectNotElectingSelfWeAreNotFreshestOpTimeManyNodes
TEST_F(FreshnessCheckerTest, ElectWrongTypeInFreshnessResponseManyNodes) {
// one other responds with "opTime" field of non-Date value, causing not freshest
startCapturingLogMessages();
- ReplicaSetConfig config =
- assertMakeRSConfig(BSON("_id"
- << "rs0"
- << "version" << 1 << "members"
- << BSON_ARRAY(BSON("_id" << 1 << "host"
- << "h0")
- << BSON("_id" << 2 << "host"
- << "h1") << BSON("_id" << 3 << "host"
- << "h2")
- << BSON("_id" << 4 << "host"
- << "h3") << BSON("_id" << 5 << "host"
- << "h4"))));
+ ReplicaSetConfig config = assertMakeRSConfig(BSON("_id"
+ << "rs0"
+ << "version"
+ << 1
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h0")
+ << BSON("_id" << 2 << "host"
+ << "h1")
+ << BSON("_id" << 3 << "host"
+ << "h2")
+ << BSON("_id" << 4 << "host"
+ << "h3")
+ << BSON("_id" << 5 << "host"
+ << "h4"))));
std::vector<HostAndPort> hosts;
for (ReplicaSetConfig::MemberIterator mem = ++config.membersBegin(); mem != config.membersEnd();
@@ -597,26 +637,28 @@ TEST_F(FreshnessCheckerTest, ElectWrongTypeInFreshnessResponseManyNodes) {
stopCapturingLogMessages();
ASSERT_EQUALS(shouldAbortElection(), FreshnessChecker::FresherNodeFound);
ASSERT_EQUALS(1,
- countLogLinesContaining(
- "wrong type for opTime argument in replSetFresh "
- "response: int"));
+ countLogLinesContaining("wrong type for opTime argument in replSetFresh "
+ "response: int"));
}
TEST_F(FreshnessCheckerTest, ElectVetoedManyNodes) {
// one other responds with veto
startCapturingLogMessages();
- ReplicaSetConfig config =
- assertMakeRSConfig(BSON("_id"
- << "rs0"
- << "version" << 1 << "members"
- << BSON_ARRAY(BSON("_id" << 1 << "host"
- << "h0")
- << BSON("_id" << 2 << "host"
- << "h1") << BSON("_id" << 3 << "host"
- << "h2")
- << BSON("_id" << 4 << "host"
- << "h3") << BSON("_id" << 5 << "host"
- << "h4"))));
+ ReplicaSetConfig config = assertMakeRSConfig(BSON("_id"
+ << "rs0"
+ << "version"
+ << 1
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h0")
+ << BSON("_id" << 2 << "host"
+ << "h1")
+ << BSON("_id" << 3 << "host"
+ << "h2")
+ << BSON("_id" << 4 << "host"
+ << "h3")
+ << BSON("_id" << 5 << "host"
+ << "h4"))));
std::vector<HostAndPort> hosts;
for (ReplicaSetConfig::MemberIterator mem = ++config.membersBegin(); mem != config.membersEnd();
@@ -657,26 +699,28 @@ TEST_F(FreshnessCheckerTest, ElectVetoedManyNodes) {
stopCapturingLogMessages();
ASSERT_EQUALS(shouldAbortElection(), FreshnessChecker::FresherNodeFound);
ASSERT_EQUALS(1,
- countLogLinesContaining(
- "not electing self, h1:27017 would veto with "
- "'I'd rather you didn't'"));
+ countLogLinesContaining("not electing self, h1:27017 would veto with "
+ "'I'd rather you didn't'"));
}
TEST_F(FreshnessCheckerTest, ElectVetoedAndTiedFreshnessManyNodes) {
// one other responds with veto and another responds with tie
startCapturingLogMessages();
- ReplicaSetConfig config =
- assertMakeRSConfig(BSON("_id"
- << "rs0"
- << "version" << 1 << "members"
- << BSON_ARRAY(BSON("_id" << 1 << "host"
- << "h0")
- << BSON("_id" << 2 << "host"
- << "h1") << BSON("_id" << 3 << "host"
- << "h2")
- << BSON("_id" << 4 << "host"
- << "h3") << BSON("_id" << 5 << "host"
- << "h4"))));
+ ReplicaSetConfig config = assertMakeRSConfig(BSON("_id"
+ << "rs0"
+ << "version"
+ << 1
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h0")
+ << BSON("_id" << 2 << "host"
+ << "h1")
+ << BSON("_id" << 3 << "host"
+ << "h2")
+ << BSON("_id" << 4 << "host"
+ << "h3")
+ << BSON("_id" << 5 << "host"
+ << "h4"))));
std::vector<HostAndPort> hosts;
for (ReplicaSetConfig::MemberIterator mem = config.membersBegin(); mem != config.membersEnd();
@@ -726,9 +770,8 @@ TEST_F(FreshnessCheckerTest, ElectVetoedAndTiedFreshnessManyNodes) {
_net->runUntil(startDate + Milliseconds(10));
ASSERT_EQUALS(startDate + Milliseconds(10), _net->now());
ASSERT_EQUALS(0,
- countLogLinesContaining(
- "not electing self, h4:27017 would veto with '"
- "errmsg: \"I'd rather you didn't\"'"));
+ countLogLinesContaining("not electing self, h4:27017 would veto with '"
+ "errmsg: \"I'd rather you didn't\"'"));
_net->runUntil(startDate + Milliseconds(20));
ASSERT_EQUALS(startDate + Milliseconds(20), _net->now());
_net->exitNetwork();
@@ -736,24 +779,26 @@ TEST_F(FreshnessCheckerTest, ElectVetoedAndTiedFreshnessManyNodes) {
stopCapturingLogMessages();
ASSERT_EQUALS(shouldAbortElection(), FreshnessChecker::FresherNodeFound);
ASSERT_EQUALS(1,
- countLogLinesContaining(
- "not electing self, h4:27017 would veto with "
- "'I'd rather you didn't'"));
+ countLogLinesContaining("not electing self, h4:27017 would veto with "
+ "'I'd rather you didn't'"));
}
TEST_F(FreshnessCheckerTest, ElectManyNodesNotAllRespond) {
- ReplicaSetConfig config =
- assertMakeRSConfig(BSON("_id"
- << "rs0"
- << "version" << 1 << "members"
- << BSON_ARRAY(BSON("_id" << 1 << "host"
- << "h0")
- << BSON("_id" << 2 << "host"
- << "h1") << BSON("_id" << 3 << "host"
- << "h2")
- << BSON("_id" << 4 << "host"
- << "h3") << BSON("_id" << 5 << "host"
- << "h4"))));
+ ReplicaSetConfig config = assertMakeRSConfig(BSON("_id"
+ << "rs0"
+ << "version"
+ << 1
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h0")
+ << BSON("_id" << 2 << "host"
+ << "h1")
+ << BSON("_id" << 3 << "host"
+ << "h2")
+ << BSON("_id" << 4 << "host"
+ << "h3")
+ << BSON("_id" << 5 << "host"
+ << "h4"))));
std::vector<HostAndPort> hosts;
for (ReplicaSetConfig::MemberIterator mem = ++config.membersBegin(); mem != config.membersEnd();
@@ -806,12 +851,15 @@ public:
ReplicaSetConfig config;
config.initialize(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host0")
<< BSON("_id" << 1 << "host"
- << "host1") << BSON("_id" << 2 << "host"
- << "host2"))));
+ << "host1")
+ << BSON("_id" << 2 << "host"
+ << "host2"))));
std::vector<HostAndPort> hosts;
for (ReplicaSetConfig::MemberIterator mem = ++config.membersBegin();
diff --git a/src/mongo/db/repl/freshness_scanner.cpp b/src/mongo/db/repl/freshness_scanner.cpp
index ed4f160ee8f..fcba850e021 100644
--- a/src/mongo/db/repl/freshness_scanner.cpp
+++ b/src/mongo/db/repl/freshness_scanner.cpp
@@ -86,8 +86,9 @@ void FreshnessScanner::Algorithm::processResponse(const RemoteCommandRequest& re
int index = _rsConfig.findMemberIndexByHostAndPort(request.target);
FreshnessInfo freshnessInfo{index, lastOpTime};
- auto cmp =
- [](const FreshnessInfo& a, const FreshnessInfo& b) { return a.opTime > b.opTime; };
+ auto cmp = [](const FreshnessInfo& a, const FreshnessInfo& b) {
+ return a.opTime > b.opTime;
+ };
auto iter =
std::upper_bound(_freshnessInfos.begin(), _freshnessInfos.end(), freshnessInfo, cmp);
_freshnessInfos.insert(iter, freshnessInfo);
diff --git a/src/mongo/db/repl/freshness_scanner_test.cpp b/src/mongo/db/repl/freshness_scanner_test.cpp
index 5096b4ce9f2..53314298b5b 100644
--- a/src/mongo/db/repl/freshness_scanner_test.cpp
+++ b/src/mongo/db/repl/freshness_scanner_test.cpp
@@ -58,22 +58,29 @@ public:
}
virtual void setUp() {
- ASSERT_OK(
- _config.initialize(BSON("_id"
- << "rs0"
- << "version" << 1 << "members"
- << BSON_ARRAY(
- BSON("_id" << 0 << "host"
- << "host0")
- << BSON("_id" << 1 << "host"
- << "host1") << BSON("_id" << 2 << "host"
- << "host2")
- << BSON("_id" << 3 << "host"
- << "host3"
- << "votes" << 0 << "priority" << 0)
- << BSON("_id" << 4 << "host"
- << "host4"
- << "votes" << 0 << "priority" << 0)))));
+ ASSERT_OK(_config.initialize(BSON("_id"
+ << "rs0"
+ << "version"
+ << 1
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "host0")
+ << BSON("_id" << 1 << "host"
+ << "host1")
+ << BSON("_id" << 2 << "host"
+ << "host2")
+ << BSON("_id" << 3 << "host"
+ << "host3"
+ << "votes"
+ << 0
+ << "priority"
+ << 0)
+ << BSON("_id" << 4 << "host"
+ << "host4"
+ << "votes"
+ << 0
+ << "priority"
+ << 0)))));
ASSERT_OK(_config.validate());
_net = new NetworkInterfaceMock;
diff --git a/src/mongo/db/repl/is_master_response.cpp b/src/mongo/db/repl/is_master_response.cpp
index efd8bd5466e..78fe98e45de 100644
--- a/src/mongo/db/repl/is_master_response.cpp
+++ b/src/mongo/db/repl/is_master_response.cpp
@@ -219,7 +219,8 @@ Status IsMasterResponse::initialize(const BSONObj& doc) {
str::stream() << "Found \"" << kIsReplicaSetFieldName
<< "\" field which should indicate that no valid config "
"is loaded, but we didn't also have an \""
- << kInfoFieldName << "\" field as we expected");
+ << kInfoFieldName
+ << "\" field as we expected");
}
}
@@ -246,7 +247,8 @@ Status IsMasterResponse::initialize(const BSONObj& doc) {
return Status(ErrorCodes::TypeMismatch,
str::stream() << "Elements in \"" << kHostsFieldName
<< "\" array of isMaster response must be of type "
- << typeName(String) << " but found type "
+ << typeName(String)
+ << " but found type "
<< typeName(hostElement.type()));
}
_hosts.push_back(HostAndPort(hostElement.String()));
@@ -266,7 +268,8 @@ Status IsMasterResponse::initialize(const BSONObj& doc) {
return Status(ErrorCodes::TypeMismatch,
str::stream() << "Elements in \"" << kPassivesFieldName
<< "\" array of isMaster response must be of type "
- << typeName(String) << " but found type "
+ << typeName(String)
+ << " but found type "
<< typeName(passiveElement.type()));
}
_passives.push_back(HostAndPort(passiveElement.String()));
@@ -286,7 +289,8 @@ Status IsMasterResponse::initialize(const BSONObj& doc) {
return Status(ErrorCodes::TypeMismatch,
str::stream() << "Elements in \"" << kArbitersFieldName
<< "\" array of isMaster response must be of type "
- << typeName(String) << " but found type "
+ << typeName(String)
+ << " but found type "
<< typeName(arbiterElement.type()));
}
_arbiters.push_back(HostAndPort(arbiterElement.String()));
@@ -359,7 +363,8 @@ Status IsMasterResponse::initialize(const BSONObj& doc) {
str::stream() << "Elements in \"" << kTagsFieldName
<< "\" obj "
"of isMaster response must be of type "
- << typeName(String) << " but found type "
+ << typeName(String)
+ << " but found type "
<< typeName(tagsElement.type()));
}
_tags[tagElement.fieldNameStringData().toString()] = tagElement.String();
@@ -391,7 +396,8 @@ Status IsMasterResponse::initialize(const BSONObj& doc) {
str::stream() << "Elements in \"" << kLastWriteOpTimeFieldName
<< "\" obj "
"of isMaster response must be of type "
- << typeName(Object) << " but found type "
+ << typeName(Object)
+ << " but found type "
<< typeName(lastWriteOpTimeElement.type()));
}
auto lastWriteOpTime = OpTime::parseFromOplogEntry(lastWriteOpTimeElement.Obj());
@@ -411,7 +417,8 @@ Status IsMasterResponse::initialize(const BSONObj& doc) {
str::stream() << "Elements in \"" << kLastWriteDateFieldName
<< "\" obj "
"of isMaster response must be of type "
- << typeName(Date) << " but found type "
+ << typeName(Date)
+ << " but found type "
<< typeName(lastWriteDateElement.type()));
}
if (_lastWrite) {
@@ -431,7 +438,8 @@ Status IsMasterResponse::initialize(const BSONObj& doc) {
str::stream() << "Elements in \"" << kLastMajorityWriteOpTimeFieldName
<< "\" obj "
"of isMaster response must be of type "
- << typeName(Object) << " but found type "
+ << typeName(Object)
+ << " but found type "
<< typeName(lastMajorityWriteOpTimeElement.type()));
}
auto lastMajorityWriteOpTime =
@@ -452,7 +460,8 @@ Status IsMasterResponse::initialize(const BSONObj& doc) {
str::stream() << "Elements in \"" << kLastMajorityWriteDateFieldName
<< "\" obj "
"of isMaster response must be of type "
- << typeName(Date) << " but found type "
+ << typeName(Date)
+ << " but found type "
<< typeName(lastMajorityWriteDateElement.type()));
}
if (_lastMajorityWrite) {
diff --git a/src/mongo/db/repl/isself.cpp b/src/mongo/db/repl/isself.cpp
index 87cba6fe03b..50be2827f35 100644
--- a/src/mongo/db/repl/isself.cpp
+++ b/src/mongo/db/repl/isself.cpp
@@ -37,15 +37,15 @@
#include "mongo/base/init.h"
#include "mongo/bson/util/builder.h"
#include "mongo/client/dbclientinterface.h"
-#include "mongo/db/commands.h"
#include "mongo/db/auth/action_set.h"
#include "mongo/db/auth/action_type.h"
#include "mongo/db/auth/authorization_manager.h"
#include "mongo/db/auth/authorization_manager_global.h"
#include "mongo/db/auth/internal_user_auth.h"
#include "mongo/db/auth/privilege.h"
-#include "mongo/util/scopeguard.h"
+#include "mongo/db/commands.h"
#include "mongo/util/log.h"
+#include "mongo/util/scopeguard.h"
#if defined(__linux__) || defined(__APPLE__) || defined(__FreeBSD__) || defined(__sun) || \
defined(__OpenBSD__)
@@ -66,11 +66,11 @@
#endif
#elif defined(_WIN32)
+#include <Ws2tcpip.h>
#include <boost/asio/detail/socket_ops.hpp>
#include <boost/system/error_code.hpp>
#include <iphlpapi.h>
#include <winsock2.h>
-#include <Ws2tcpip.h>
#endif // defined(_WIN32)
namespace mongo {
diff --git a/src/mongo/db/repl/master_slave.cpp b/src/mongo/db/repl/master_slave.cpp
index a368b4b91c6..d7695947687 100644
--- a/src/mongo/db/repl/master_slave.cpp
+++ b/src/mongo/db/repl/master_slave.cpp
@@ -975,8 +975,8 @@ int ReplSource::_sync_pullOpLog(OperationContext* txn, int& nApplied) {
<< ((nextOpTime < syncedTo) ? "<??" : ">") << " syncedTo "
<< syncedTo.toStringLong() << '\n'
<< "time diff: " << (nextOpTime.getSecs() - syncedTo.getSecs()) << "sec\n"
- << "tailing: " << tailing << '\n' << "data too stale, halting replication"
- << endl;
+ << "tailing: " << tailing << '\n'
+ << "data too stale, halting replication" << endl;
replInfo = replAllDead = "data too stale halted replication";
verify(syncedTo < nextOpTime);
throw SyncException();
diff --git a/src/mongo/db/repl/member_config_test.cpp b/src/mongo/db/repl/member_config_test.cpp
index b051451524c..eda31743ba7 100644
--- a/src/mongo/db/repl/member_config_test.cpp
+++ b/src/mongo/db/repl/member_config_test.cpp
@@ -62,7 +62,8 @@ TEST(MemberConfig, ParseFailsWithIllegalFieldName) {
ASSERT_EQUALS(ErrorCodes::BadValue,
mc.initialize(BSON("_id" << 0 << "host"
<< "localhost"
- << "frim" << 1),
+ << "frim"
+ << 1),
&tagConfig));
}
@@ -121,12 +122,14 @@ TEST(MemberConfig, ParseArbiterOnly) {
MemberConfig mc;
ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host"
<< "h"
- << "arbiterOnly" << 1.0),
+ << "arbiterOnly"
+ << 1.0),
&tagConfig));
ASSERT_TRUE(mc.isArbiter());
ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host"
<< "h"
- << "arbiterOnly" << false),
+ << "arbiterOnly"
+ << false),
&tagConfig));
ASSERT_TRUE(!mc.isArbiter());
}
@@ -136,12 +139,14 @@ TEST(MemberConfig, ParseHidden) {
MemberConfig mc;
ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host"
<< "h"
- << "hidden" << 1.0),
+ << "hidden"
+ << 1.0),
&tagConfig));
ASSERT_TRUE(mc.isHidden());
ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host"
<< "h"
- << "hidden" << false),
+ << "hidden"
+ << false),
&tagConfig));
ASSERT_TRUE(!mc.isHidden());
ASSERT_EQUALS(ErrorCodes::TypeMismatch,
@@ -157,12 +162,14 @@ TEST(MemberConfig, ParseBuildIndexes) {
MemberConfig mc;
ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host"
<< "h"
- << "buildIndexes" << 1.0),
+ << "buildIndexes"
+ << 1.0),
&tagConfig));
ASSERT_TRUE(mc.shouldBuildIndexes());
ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host"
<< "h"
- << "buildIndexes" << false),
+ << "buildIndexes"
+ << false),
&tagConfig));
ASSERT_TRUE(!mc.shouldBuildIndexes());
}
@@ -172,40 +179,49 @@ TEST(MemberConfig, ParseVotes) {
MemberConfig mc;
ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host"
<< "h"
- << "votes" << 1.0),
+ << "votes"
+ << 1.0),
&tagConfig));
ASSERT_TRUE(mc.isVoter());
ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host"
<< "h"
- << "votes" << 0 << "priority" << 0),
+ << "votes"
+ << 0
+ << "priority"
+ << 0),
&tagConfig));
ASSERT_FALSE(mc.isVoter());
// For backwards compatibility, truncate 1.X to 1, and 0.X to 0 (and -0.X to 0).
ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host"
<< "h"
- << "votes" << 1.5),
+ << "votes"
+ << 1.5),
&tagConfig));
ASSERT_TRUE(mc.isVoter());
ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host"
<< "h"
- << "votes" << 0.5),
+ << "votes"
+ << 0.5),
&tagConfig));
ASSERT_FALSE(mc.isVoter());
ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host"
<< "h"
- << "votes" << -0.5),
+ << "votes"
+ << -0.5),
&tagConfig));
ASSERT_FALSE(mc.isVoter());
ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host"
<< "h"
- << "votes" << 2),
+ << "votes"
+ << 2),
&tagConfig));
ASSERT_EQUALS(ErrorCodes::TypeMismatch,
mc.initialize(BSON("_id" << 0 << "host"
<< "h"
- << "votes" << Date_t::fromMillisSinceEpoch(2)),
+ << "votes"
+ << Date_t::fromMillisSinceEpoch(2)),
&tagConfig));
}
@@ -214,24 +230,28 @@ TEST(MemberConfig, ParsePriority) {
MemberConfig mc;
ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host"
<< "h"
- << "priority" << 1),
+ << "priority"
+ << 1),
&tagConfig));
ASSERT_EQUALS(1.0, mc.getPriority());
ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host"
<< "h"
- << "priority" << 0),
+ << "priority"
+ << 0),
&tagConfig));
ASSERT_EQUALS(0.0, mc.getPriority());
ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host"
<< "h"
- << "priority" << 100.8),
+ << "priority"
+ << 100.8),
&tagConfig));
ASSERT_EQUALS(100.8, mc.getPriority());
ASSERT_EQUALS(ErrorCodes::TypeMismatch,
mc.initialize(BSON("_id" << 0 << "host"
<< "h"
- << "priority" << Date_t::fromMillisSinceEpoch(2)),
+ << "priority"
+ << Date_t::fromMillisSinceEpoch(2)),
&tagConfig));
}
@@ -240,7 +260,8 @@ TEST(MemberConfig, ParseSlaveDelay) {
MemberConfig mc;
ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host"
<< "h"
- << "slaveDelay" << 100),
+ << "slaveDelay"
+ << 100),
&tagConfig));
ASSERT_EQUALS(Seconds(100), mc.getSlaveDelay());
}
@@ -250,10 +271,11 @@ TEST(MemberConfig, ParseTags) {
MemberConfig mc;
ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host"
<< "h"
- << "tags" << BSON("k1"
- << "v1"
- << "k2"
- << "v2")),
+ << "tags"
+ << BSON("k1"
+ << "v1"
+ << "k2"
+ << "v2")),
&tagConfig));
ASSERT_EQUALS(5U, mc.getNumTags());
ASSERT_EQUALS(5, std::distance(mc.tagsBegin(), mc.tagsEnd()));
@@ -284,14 +306,18 @@ TEST(MemberConfig, ValidateVotes) {
ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host"
<< "h"
- << "votes" << 1.0),
+ << "votes"
+ << 1.0),
&tagConfig));
ASSERT_OK(mc.validate());
ASSERT_TRUE(mc.isVoter());
ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host"
<< "h"
- << "votes" << 0 << "priority" << 0),
+ << "votes"
+ << 0
+ << "priority"
+ << 0),
&tagConfig));
ASSERT_OK(mc.validate());
ASSERT_FALSE(mc.isVoter());
@@ -299,21 +325,28 @@ TEST(MemberConfig, ValidateVotes) {
// For backwards compatibility, truncate 1.X to 1, and 0.X to 0 (and -0.X to 0).
ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host"
<< "h"
- << "votes" << 1.5),
+ << "votes"
+ << 1.5),
&tagConfig));
ASSERT_OK(mc.validate());
ASSERT_TRUE(mc.isVoter());
ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host"
<< "h"
- << "votes" << 0.5 << "priority" << 0),
+ << "votes"
+ << 0.5
+ << "priority"
+ << 0),
&tagConfig));
ASSERT_OK(mc.validate());
ASSERT_FALSE(mc.isVoter());
ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host"
<< "h"
- << "votes" << -0.5 << "priority" << 0),
+ << "votes"
+ << -0.5
+ << "priority"
+ << 0),
&tagConfig));
ASSERT_OK(mc.validate());
ASSERT_FALSE(mc.isVoter());
@@ -321,13 +354,15 @@ TEST(MemberConfig, ValidateVotes) {
// Invalid values
ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host"
<< "h"
- << "votes" << 2),
+ << "votes"
+ << 2),
&tagConfig));
ASSERT_EQUALS(ErrorCodes::BadValue, mc.validate());
ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host"
<< "h"
- << "votes" << -1),
+ << "votes"
+ << -1),
&tagConfig));
ASSERT_EQUALS(ErrorCodes::BadValue, mc.validate());
}
@@ -337,22 +372,26 @@ TEST(MemberConfig, ValidatePriorityRanges) {
MemberConfig mc;
ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host"
<< "h"
- << "priority" << 0),
+ << "priority"
+ << 0),
&tagConfig));
ASSERT_OK(mc.validate());
ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host"
<< "h"
- << "priority" << 1000),
+ << "priority"
+ << 1000),
&tagConfig));
ASSERT_OK(mc.validate());
ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host"
<< "h"
- << "priority" << -1),
+ << "priority"
+ << -1),
&tagConfig));
ASSERT_EQUALS(ErrorCodes::BadValue, mc.validate());
ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host"
<< "h"
- << "priority" << 1001),
+ << "priority"
+ << 1001),
&tagConfig));
ASSERT_EQUALS(ErrorCodes::BadValue, mc.validate());
}
@@ -362,22 +401,34 @@ TEST(MemberConfig, ValidateSlaveDelays) {
MemberConfig mc;
ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host"
<< "h"
- << "priority" << 0 << "slaveDelay" << 0),
+ << "priority"
+ << 0
+ << "slaveDelay"
+ << 0),
&tagConfig));
ASSERT_OK(mc.validate());
ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host"
<< "h"
- << "priority" << 0 << "slaveDelay" << 3600 * 10),
+ << "priority"
+ << 0
+ << "slaveDelay"
+ << 3600 * 10),
&tagConfig));
ASSERT_OK(mc.validate());
ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host"
<< "h"
- << "priority" << 0 << "slaveDelay" << -1),
+ << "priority"
+ << 0
+ << "slaveDelay"
+ << -1),
&tagConfig));
ASSERT_EQUALS(ErrorCodes::BadValue, mc.validate());
ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host"
<< "h"
- << "priority" << 0 << "slaveDelay" << 3600 * 24 * 400),
+ << "priority"
+ << 0
+ << "slaveDelay"
+ << 3600 * 24 * 400),
&tagConfig));
ASSERT_EQUALS(ErrorCodes::BadValue, mc.validate());
}
@@ -387,7 +438,10 @@ TEST(MemberConfig, ValidatePriorityAndSlaveDelayRelationship) {
MemberConfig mc;
ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host"
<< "h"
- << "priority" << 1 << "slaveDelay" << 60),
+ << "priority"
+ << 1
+ << "slaveDelay"
+ << 60),
&tagConfig));
ASSERT_EQUALS(ErrorCodes::BadValue, mc.validate());
}
@@ -397,12 +451,18 @@ TEST(MemberConfig, ValidatePriorityAndHiddenRelationship) {
MemberConfig mc;
ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host"
<< "h"
- << "priority" << 1 << "hidden" << true),
+ << "priority"
+ << 1
+ << "hidden"
+ << true),
&tagConfig));
ASSERT_EQUALS(ErrorCodes::BadValue, mc.validate());
ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host"
<< "h"
- << "priority" << 1 << "hidden" << false),
+ << "priority"
+ << 1
+ << "hidden"
+ << false),
&tagConfig));
ASSERT_OK(mc.validate());
}
@@ -412,13 +472,19 @@ TEST(MemberConfig, ValidatePriorityAndBuildIndexesRelationship) {
MemberConfig mc;
ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host"
<< "h"
- << "priority" << 1 << "buildIndexes" << false),
+ << "priority"
+ << 1
+ << "buildIndexes"
+ << false),
&tagConfig));
ASSERT_EQUALS(ErrorCodes::BadValue, mc.validate());
ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host"
<< "h"
- << "priority" << 1 << "buildIndexes" << true),
+ << "priority"
+ << 1
+ << "buildIndexes"
+ << true),
&tagConfig));
ASSERT_OK(mc.validate());
}
@@ -428,25 +494,38 @@ TEST(MemberConfig, ValidateArbiterVotesRelationship) {
MemberConfig mc;
ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host"
<< "h"
- << "votes" << 1 << "arbiterOnly" << true),
+ << "votes"
+ << 1
+ << "arbiterOnly"
+ << true),
&tagConfig));
ASSERT_OK(mc.validate());
- ASSERT_OK(
- mc.initialize(BSON("_id" << 0 << "host"
- << "h"
- << "votes" << 0 << "priority" << 0 << "arbiterOnly" << false),
- &tagConfig));
+ ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host"
+ << "h"
+ << "votes"
+ << 0
+ << "priority"
+ << 0
+ << "arbiterOnly"
+ << false),
+ &tagConfig));
ASSERT_OK(mc.validate());
ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host"
<< "h"
- << "votes" << 1 << "arbiterOnly" << false),
+ << "votes"
+ << 1
+ << "arbiterOnly"
+ << false),
&tagConfig));
ASSERT_OK(mc.validate());
ASSERT_OK(mc.initialize(BSON("_id" << 0 << "host"
<< "h"
- << "votes" << 0 << "arbiterOnly" << true),
+ << "votes"
+ << 0
+ << "arbiterOnly"
+ << true),
&tagConfig));
ASSERT_EQUALS(ErrorCodes::BadValue, mc.validate());
}
diff --git a/src/mongo/db/repl/old_update_position_args.cpp b/src/mongo/db/repl/old_update_position_args.cpp
index 67575d7e280..92ffd74db79 100644
--- a/src/mongo/db/repl/old_update_position_args.cpp
+++ b/src/mongo/db/repl/old_update_position_args.cpp
@@ -141,8 +141,10 @@ BSONObj OldUpdatePositionArgs::toBSON() const {
++update) {
updateArray.append(BSON(kMemberRIDFieldName << update->rid << kOpTimeFieldName
<< update->ts.getTimestamp()
- << kConfigVersionFieldName << update->cfgver
- << kMemberIdFieldName << update->memberId));
+ << kConfigVersionFieldName
+ << update->cfgver
+ << kMemberIdFieldName
+ << update->memberId));
}
updateArray.doneFast();
}
diff --git a/src/mongo/db/repl/oplog.cpp b/src/mongo/db/repl/oplog.cpp
index 0d66d00aad6..2a9e7e0b40c 100644
--- a/src/mongo/db/repl/oplog.cpp
+++ b/src/mongo/db/repl/oplog.cpp
@@ -431,7 +431,7 @@ void logOps(OperationContext* txn,
txn, opstr, nss, begin[i], NULL, fromMigrate, slots[i].opTime, slots[i].hash));
}
- std::unique_ptr<DocWriter const* []> basePtrs(new DocWriter const* [count]);
+ std::unique_ptr<DocWriter const* []> basePtrs(new DocWriter const*[count]);
for (size_t i = 0; i < count; i++) {
basePtrs[i] = &writers[i];
}
@@ -605,72 +605,73 @@ struct ApplyOpMetadata {
std::map<std::string, ApplyOpMetadata> opsMap = {
{"create",
- {[](OperationContext* txn, const char* ns, BSONObj& cmd)
- -> Status { return createCollection(txn, NamespaceString(ns).db().toString(), cmd); },
- {ErrorCodes::NamespaceExists}}},
- {"collMod",
{[](OperationContext* txn, const char* ns, BSONObj& cmd) -> Status {
+ return createCollection(txn, NamespaceString(ns).db().toString(), cmd);
+ },
+ {ErrorCodes::NamespaceExists}}},
+ {"collMod", {[](OperationContext* txn, const char* ns, BSONObj& cmd) -> Status {
BSONObjBuilder resultWeDontCareAbout;
return collMod(txn, parseNs(ns, cmd), cmd, &resultWeDontCareAbout);
}}},
{"dropDatabase",
- {[](OperationContext* txn, const char* ns, BSONObj& cmd)
- -> Status { return dropDatabase(txn, NamespaceString(ns).db().toString()); },
+ {[](OperationContext* txn, const char* ns, BSONObj& cmd) -> Status {
+ return dropDatabase(txn, NamespaceString(ns).db().toString());
+ },
{ErrorCodes::NamespaceNotFound}}},
{"drop",
{[](OperationContext* txn, const char* ns, BSONObj& cmd) -> Status {
- BSONObjBuilder resultWeDontCareAbout;
- return dropCollection(txn, parseNs(ns, cmd), resultWeDontCareAbout);
- },
+ BSONObjBuilder resultWeDontCareAbout;
+ return dropCollection(txn, parseNs(ns, cmd), resultWeDontCareAbout);
+ },
// IllegalOperation is necessary because in 3.0 we replicate drops of system.profile
// TODO(dannenberg) remove IllegalOperation once we no longer need 3.0 compatibility
{ErrorCodes::NamespaceNotFound, ErrorCodes::IllegalOperation}}},
// deleteIndex(es) is deprecated but still works as of April 10, 2015
{"deleteIndex",
{[](OperationContext* txn, const char* ns, BSONObj& cmd) -> Status {
- BSONObjBuilder resultWeDontCareAbout;
- return dropIndexes(txn, parseNs(ns, cmd), cmd, &resultWeDontCareAbout);
- },
+ BSONObjBuilder resultWeDontCareAbout;
+ return dropIndexes(txn, parseNs(ns, cmd), cmd, &resultWeDontCareAbout);
+ },
{ErrorCodes::NamespaceNotFound, ErrorCodes::IndexNotFound}}},
{"deleteIndexes",
{[](OperationContext* txn, const char* ns, BSONObj& cmd) -> Status {
- BSONObjBuilder resultWeDontCareAbout;
- return dropIndexes(txn, parseNs(ns, cmd), cmd, &resultWeDontCareAbout);
- },
+ BSONObjBuilder resultWeDontCareAbout;
+ return dropIndexes(txn, parseNs(ns, cmd), cmd, &resultWeDontCareAbout);
+ },
{ErrorCodes::NamespaceNotFound, ErrorCodes::IndexNotFound}}},
{"dropIndex",
{[](OperationContext* txn, const char* ns, BSONObj& cmd) -> Status {
- BSONObjBuilder resultWeDontCareAbout;
- return dropIndexes(txn, parseNs(ns, cmd), cmd, &resultWeDontCareAbout);
- },
+ BSONObjBuilder resultWeDontCareAbout;
+ return dropIndexes(txn, parseNs(ns, cmd), cmd, &resultWeDontCareAbout);
+ },
{ErrorCodes::NamespaceNotFound, ErrorCodes::IndexNotFound}}},
{"dropIndexes",
{[](OperationContext* txn, const char* ns, BSONObj& cmd) -> Status {
- BSONObjBuilder resultWeDontCareAbout;
- return dropIndexes(txn, parseNs(ns, cmd), cmd, &resultWeDontCareAbout);
- },
+ BSONObjBuilder resultWeDontCareAbout;
+ return dropIndexes(txn, parseNs(ns, cmd), cmd, &resultWeDontCareAbout);
+ },
{ErrorCodes::NamespaceNotFound, ErrorCodes::IndexNotFound}}},
{"renameCollection",
{[](OperationContext* txn, const char* ns, BSONObj& cmd) -> Status {
- return renameCollection(txn,
- NamespaceString(cmd.firstElement().valuestrsafe()),
- NamespaceString(cmd["to"].valuestrsafe()),
- cmd["dropTarget"].trueValue(),
- cmd["stayTemp"].trueValue());
- },
+ return renameCollection(txn,
+ NamespaceString(cmd.firstElement().valuestrsafe()),
+ NamespaceString(cmd["to"].valuestrsafe()),
+ cmd["dropTarget"].trueValue(),
+ cmd["stayTemp"].trueValue());
+ },
{ErrorCodes::NamespaceNotFound, ErrorCodes::NamespaceExists}}},
{"applyOps",
{[](OperationContext* txn, const char* ns, BSONObj& cmd) -> Status {
- BSONObjBuilder resultWeDontCareAbout;
- return applyOps(txn, nsToDatabase(ns), cmd, &resultWeDontCareAbout);
- },
+ BSONObjBuilder resultWeDontCareAbout;
+ return applyOps(txn, nsToDatabase(ns), cmd, &resultWeDontCareAbout);
+ },
{ErrorCodes::UnknownError}}},
- {"convertToCapped",
- {[](OperationContext* txn, const char* ns, BSONObj& cmd)
- -> Status { return convertToCapped(txn, parseNs(ns, cmd), cmd["size"].number()); }}},
- {"emptycapped",
- {[](OperationContext* txn, const char* ns, BSONObj& cmd)
- -> Status { return emptyCapped(txn, parseNs(ns, cmd)); }}},
+ {"convertToCapped", {[](OperationContext* txn, const char* ns, BSONObj& cmd) -> Status {
+ return convertToCapped(txn, parseNs(ns, cmd), cmd["size"].number());
+ }}},
+ {"emptycapped", {[](OperationContext* txn, const char* ns, BSONObj& cmd) -> Status {
+ return emptyCapped(txn, parseNs(ns, cmd));
+ }}},
};
} // namespace
@@ -742,7 +743,9 @@ Status applyOperation_inlock(OperationContext* txn,
indexNss.isValid());
uassert(ErrorCodes::InvalidNamespace,
str::stream() << "Database name mismatch for database ("
- << nsToDatabaseSubstring(ns) << ") while creating index: " << op,
+ << nsToDatabaseSubstring(ns)
+ << ") while creating index: "
+ << op,
nsToDatabaseSubstring(ns) == indexNss.db());
opCounters->gotInsert();
@@ -773,10 +776,10 @@ Status applyOperation_inlock(OperationContext* txn,
}
return Status::OK();
}
- uassert(
- ErrorCodes::NamespaceNotFound,
- str::stream() << "Failed to apply insert due to missing collection: " << op.toString(),
- collection);
+ uassert(ErrorCodes::NamespaceNotFound,
+ str::stream() << "Failed to apply insert due to missing collection: "
+ << op.toString(),
+ collection);
if (fieldO.type() == Array) {
// Batched inserts.
diff --git a/src/mongo/db/repl/oplog.h b/src/mongo/db/repl/oplog.h
index d009083c709..c04fdd85579 100644
--- a/src/mongo/db/repl/oplog.h
+++ b/src/mongo/db/repl/oplog.h
@@ -32,8 +32,8 @@
#include <deque>
#include <string>
-#include "mongo/base/status.h"
#include "mongo/base/disallow_copying.h"
+#include "mongo/base/status.h"
#include "mongo/db/repl/optime.h"
#include "mongo/db/repl/replication_coordinator.h"
#include "mongo/stdx/functional.h"
diff --git a/src/mongo/db/repl/oplog_fetcher.cpp b/src/mongo/db/repl/oplog_fetcher.cpp
index ec02d16ce1a..f43e53817f7 100644
--- a/src/mongo/db/repl/oplog_fetcher.cpp
+++ b/src/mongo/db/repl/oplog_fetcher.cpp
@@ -127,17 +127,26 @@ Status checkRemoteOplogStart(const Fetcher::Documents& documents, OpTimeWithHash
if (!opTimeResult.isOK()) {
return Status(ErrorCodes::OplogStartMissing,
str::stream() << "our last op time fetched: " << lastFetched.opTime.toString()
- << " (hash: " << lastFetched.value << ")"
+ << " (hash: "
+ << lastFetched.value
+ << ")"
<< ". failed to parse optime from first oplog on source: "
- << o.toString() << ": " << opTimeResult.getStatus().toString());
+ << o.toString()
+ << ": "
+ << opTimeResult.getStatus().toString());
}
auto opTime = opTimeResult.getValue();
long long hash = o["h"].numberLong();
if (opTime != lastFetched.opTime || hash != lastFetched.value) {
return Status(ErrorCodes::OplogStartMissing,
str::stream() << "our last op time fetched: " << lastFetched.opTime.toString()
- << ". source's GTE: " << opTime.toString() << " hashes: ("
- << lastFetched.value << "/" << hash << ")");
+ << ". source's GTE: "
+ << opTime.toString()
+ << " hashes: ("
+ << lastFetched.value
+ << "/"
+ << hash
+ << ")");
}
return Status::OK();
}
@@ -149,7 +158,8 @@ StatusWith<OplogFetcher::DocumentsInfo> OplogFetcher::validateDocuments(
if (first && documents.empty()) {
return Status(ErrorCodes::OplogStartMissing,
str::stream() << "The first batch of oplog entries is empty, but expected at "
- "least 1 document matching ts: " << lastTS.toString());
+ "least 1 document matching ts: "
+ << lastTS.toString());
}
DocumentsInfo info;
@@ -178,8 +188,11 @@ StatusWith<OplogFetcher::DocumentsInfo> OplogFetcher::validateDocuments(
if (lastTS >= docTS) {
return Status(ErrorCodes::OplogOutOfOrder,
str::stream() << "Out of order entries in oplog. lastTS: "
- << lastTS.toString() << " outOfOrderTS:" << docTS.toString()
- << " at count:" << info.networkDocumentCount);
+ << lastTS.toString()
+ << " outOfOrderTS:"
+ << docTS.toString()
+ << " at count:"
+ << info.networkDocumentCount);
}
lastTS = docTS;
}
@@ -348,12 +361,14 @@ void OplogFetcher::_callback(const Fetcher::QueryResponseStatus& result,
if (_dataReplicatorExternalState->shouldStopFetching(_fetcher.getSource(), metadata)) {
_onShutdown(Status(ErrorCodes::InvalidSyncSource,
- str::stream()
- << "sync source " << _fetcher.getSource().toString()
- << " (last optime: " << metadata.getLastOpVisible().toString()
- << "; sync source index: " << metadata.getSyncSourceIndex()
- << "; primary index: " << metadata.getPrimaryIndex()
- << ") is no longer valid"),
+ str::stream() << "sync source " << _fetcher.getSource().toString()
+ << " (last optime: "
+ << metadata.getLastOpVisible().toString()
+ << "; sync source index: "
+ << metadata.getSyncSourceIndex()
+ << "; primary index: "
+ << metadata.getPrimaryIndex()
+ << ") is no longer valid"),
opTimeWithHash);
return;
}
diff --git a/src/mongo/db/repl/oplog_fetcher_test.cpp b/src/mongo/db/repl/oplog_fetcher_test.cpp
index 49875369842..6ec6ab36bc4 100644
--- a/src/mongo/db/repl/oplog_fetcher_test.cpp
+++ b/src/mongo/db/repl/oplog_fetcher_test.cpp
@@ -316,7 +316,8 @@ TEST_F(
_createConfig(true),
dataReplicatorExternalState.get(),
enqueueDocumentsFn,
- [](Status, OpTimeWithHash) {}).getCommandObject_forTest();
+ [](Status, OpTimeWithHash) {})
+ .getCommandObject_forTest();
ASSERT_EQUALS(mongo::BSONType::Object, cmdObj["filter"].type());
ASSERT_EQUALS(BSON("ts" << BSON("$gte" << lastFetched.opTime.getTimestamp())),
cmdObj["filter"].Obj());
@@ -335,7 +336,8 @@ TEST_F(
_createConfig(true),
dataReplicatorExternalState.get(),
enqueueDocumentsFn,
- [](Status, OpTimeWithHash) {}).getCommandObject_forTest();
+ [](Status, OpTimeWithHash) {})
+ .getCommandObject_forTest();
ASSERT_EQUALS(mongo::BSONType::Object, cmdObj["filter"].type());
ASSERT_EQUALS(BSON("ts" << BSON("$gte" << lastFetched.opTime.getTimestamp())),
cmdObj["filter"].Obj());
@@ -351,7 +353,8 @@ TEST_F(OplogFetcherTest, MetadataObjectContainsReplSetMetadataFieldUnderProtocol
_createConfig(true),
dataReplicatorExternalState.get(),
enqueueDocumentsFn,
- [](Status, OpTimeWithHash) {}).getMetadataObject_forTest();
+ [](Status, OpTimeWithHash) {})
+ .getMetadataObject_forTest();
ASSERT_EQUALS(1, metadataObj.nFields());
ASSERT_EQUALS(1, metadataObj[rpc::kReplSetMetadataFieldName].numberInt());
}
@@ -364,7 +367,8 @@ TEST_F(OplogFetcherTest, MetadataObjectIsEmptyUnderProtocolVersion0) {
_createConfig(false),
dataReplicatorExternalState.get(),
enqueueDocumentsFn,
- [](Status, OpTimeWithHash) {}).getMetadataObject_forTest();
+ [](Status, OpTimeWithHash) {})
+ .getMetadataObject_forTest();
ASSERT_EQUALS(BSONObj(), metadataObj);
}
@@ -377,7 +381,8 @@ TEST_F(OplogFetcherTest, RemoteCommandTimeoutShouldEqualElectionTimeout) {
config,
dataReplicatorExternalState.get(),
enqueueDocumentsFn,
- [](Status, OpTimeWithHash) {}).getRemoteCommandTimeout_forTest();
+ [](Status, OpTimeWithHash) {})
+ .getRemoteCommandTimeout_forTest();
ASSERT_EQUALS(config.getElectionTimeoutPeriod(), timeout);
}
@@ -390,7 +395,8 @@ TEST_F(OplogFetcherTest, AwaitDataTimeoutShouldEqualHalfElectionTimeoutUnderProt
config,
dataReplicatorExternalState.get(),
enqueueDocumentsFn,
- [](Status, OpTimeWithHash) {}).getAwaitDataTimeout_forTest();
+ [](Status, OpTimeWithHash) {})
+ .getAwaitDataTimeout_forTest();
ASSERT_EQUALS(config.getElectionTimeoutPeriod() / 2, timeout);
}
@@ -402,7 +408,8 @@ TEST_F(OplogFetcherTest, AwaitDataTimeoutShouldBeAConstantUnderProtocolVersion0)
_createConfig(false),
dataReplicatorExternalState.get(),
enqueueDocumentsFn,
- [](Status, OpTimeWithHash) {}).getAwaitDataTimeout_forTest();
+ [](Status, OpTimeWithHash) {})
+ .getAwaitDataTimeout_forTest();
ASSERT_EQUALS(OplogFetcher::kDefaultProtocolZeroAwaitDataTimeout, timeout);
}
diff --git a/src/mongo/db/repl/oplog_interface_local.cpp b/src/mongo/db/repl/oplog_interface_local.cpp
index 43a74b15d54..88aa50436ee 100644
--- a/src/mongo/db/repl/oplog_interface_local.cpp
+++ b/src/mongo/db/repl/oplog_interface_local.cpp
@@ -91,8 +91,8 @@ OplogInterfaceLocal::OplogInterfaceLocal(OperationContext* txn, const std::strin
std::string OplogInterfaceLocal::toString() const {
return str::stream() << "LocalOplogInterface: "
- "operation context: " << _txn->getOpID()
- << "; collection: " << _collectionName;
+ "operation context: "
+ << _txn->getOpID() << "; collection: " << _collectionName;
}
std::unique_ptr<OplogInterface::Iterator> OplogInterfaceLocal::makeIterator() const {
diff --git a/src/mongo/db/repl/oplog_interface_mock.h b/src/mongo/db/repl/oplog_interface_mock.h
index 524ab3c8d2f..7c1b32c506f 100644
--- a/src/mongo/db/repl/oplog_interface_mock.h
+++ b/src/mongo/db/repl/oplog_interface_mock.h
@@ -28,8 +28,8 @@
#pragma once
-#include <initializer_list>
#include "mongo/db/repl/oplog_interface.h"
+#include <initializer_list>
namespace mongo {
namespace repl {
diff --git a/src/mongo/db/repl/optime_extract_test.cpp b/src/mongo/db/repl/optime_extract_test.cpp
index ef1d82dff7d..5f5f5800e24 100644
--- a/src/mongo/db/repl/optime_extract_test.cpp
+++ b/src/mongo/db/repl/optime_extract_test.cpp
@@ -49,7 +49,8 @@ TEST(ExtractBSON, ExtractOpTimeField) {
// Missing timestamp field.
obj = BSON("a" << BSON("ts"
<< "notATimestamp"
- << "t" << 2));
+ << "t"
+ << 2));
ASSERT_EQUALS(ErrorCodes::TypeMismatch, bsonExtractOpTimeField(obj, "a", &opTime));
// Wrong typed timestamp field.
obj = BSON("a" << BSON("t" << 2));
diff --git a/src/mongo/db/repl/read_concern_args.cpp b/src/mongo/db/repl/read_concern_args.cpp
index 097bdc78655..79c9d7b65b4 100644
--- a/src/mongo/db/repl/read_concern_args.cpp
+++ b/src/mongo/db/repl/read_concern_args.cpp
@@ -125,7 +125,8 @@ Status ReadConcernArgs::initialize(const BSONElement& readConcernElem) {
} else {
return Status(ErrorCodes::InvalidOptions,
str::stream() << "Unrecognized option in " << kReadConcernFieldName
- << ": " << fieldName);
+ << ": "
+ << fieldName);
}
}
diff --git a/src/mongo/db/repl/read_concern_args_test.cpp b/src/mongo/db/repl/read_concern_args_test.cpp
index 57364f07d14..75843f5a945 100644
--- a/src/mongo/db/repl/read_concern_args_test.cpp
+++ b/src/mongo/db/repl/read_concern_args_test.cpp
@@ -38,12 +38,13 @@ namespace {
TEST(ReadAfterParse, ReadAfterOnly) {
ReadConcernArgs readAfterOpTime;
- ASSERT_OK(readAfterOpTime.initialize(BSON("find"
- << "test" << ReadConcernArgs::kReadConcernFieldName
- << BSON(ReadConcernArgs::kAfterOpTimeFieldName
- << BSON(OpTime::kTimestampFieldName
- << Timestamp(20, 30)
- << OpTime::kTermFieldName << 2)))));
+ ASSERT_OK(readAfterOpTime.initialize(BSON(
+ "find"
+ << "test"
+ << ReadConcernArgs::kReadConcernFieldName
+ << BSON(ReadConcernArgs::kAfterOpTimeFieldName
+ << BSON(OpTime::kTimestampFieldName << Timestamp(20, 30) << OpTime::kTermFieldName
+ << 2)))));
ASSERT_EQ(Timestamp(20, 30), readAfterOpTime.getOpTime().getTimestamp());
ASSERT_EQ(2, readAfterOpTime.getOpTime().getTerm());
@@ -54,7 +55,8 @@ TEST(ReadAfterParse, ReadCommitLevelOnly) {
ReadConcernArgs readAfterOpTime;
ASSERT_OK(
readAfterOpTime.initialize(BSON("find"
- << "test" << ReadConcernArgs::kReadConcernFieldName
+ << "test"
+ << ReadConcernArgs::kReadConcernFieldName
<< BSON(ReadConcernArgs::kLevelFieldName << "majority"))));
ASSERT_TRUE(readAfterOpTime.getOpTime().isNull());
@@ -63,13 +65,15 @@ TEST(ReadAfterParse, ReadCommitLevelOnly) {
TEST(ReadAfterParse, ReadCommittedFullSpecification) {
ReadConcernArgs readAfterOpTime;
- ASSERT_OK(readAfterOpTime.initialize(
- BSON("find"
- << "test" << ReadConcernArgs::kReadConcernFieldName
- << BSON(ReadConcernArgs::kAfterOpTimeFieldName
- << BSON(OpTime::kTimestampFieldName << Timestamp(20, 30)
- << OpTime::kTermFieldName << 2)
- << ReadConcernArgs::kLevelFieldName << "majority"))));
+ ASSERT_OK(readAfterOpTime.initialize(BSON(
+ "find"
+ << "test"
+ << ReadConcernArgs::kReadConcernFieldName
+ << BSON(ReadConcernArgs::kAfterOpTimeFieldName
+ << BSON(OpTime::kTimestampFieldName << Timestamp(20, 30) << OpTime::kTermFieldName
+ << 2)
+ << ReadConcernArgs::kLevelFieldName
+ << "majority"))));
ASSERT_EQ(Timestamp(20, 30), readAfterOpTime.getOpTime().getTimestamp());
ASSERT_EQ(2, readAfterOpTime.getOpTime().getTerm());
@@ -87,24 +91,26 @@ TEST(ReadAfterParse, Empty) {
TEST(ReadAfterParse, BadRootType) {
ReadConcernArgs readAfterOpTime;
- ASSERT_NOT_OK(
- readAfterOpTime.initialize(BSON("find"
- << "test" << ReadConcernArgs::kReadConcernFieldName
- << "x")));
+ ASSERT_NOT_OK(readAfterOpTime.initialize(BSON("find"
+ << "test"
+ << ReadConcernArgs::kReadConcernFieldName
+ << "x")));
}
TEST(ReadAfterParse, BadOpTimeType) {
ReadConcernArgs readAfterOpTime;
ASSERT_NOT_OK(
readAfterOpTime.initialize(BSON("find"
- << "test" << ReadConcernArgs::kReadConcernFieldName
+ << "test"
+ << ReadConcernArgs::kReadConcernFieldName
<< BSON(ReadConcernArgs::kAfterOpTimeFieldName << 2))));
}
TEST(ReadAfterParse, OpTimeNotNeededForValidReadConcern) {
ReadConcernArgs readAfterOpTime;
ASSERT_OK(readAfterOpTime.initialize(BSON("find"
- << "test" << ReadConcernArgs::kReadConcernFieldName
+ << "test"
+ << ReadConcernArgs::kReadConcernFieldName
<< BSONObj())));
}
@@ -112,47 +118,49 @@ TEST(ReadAfterParse, NoOpTimeTS) {
ReadConcernArgs readAfterOpTime;
ASSERT_NOT_OK(
readAfterOpTime.initialize(BSON("find"
- << "test" << ReadConcernArgs::kReadConcernFieldName
+ << "test"
+ << ReadConcernArgs::kReadConcernFieldName
<< BSON(ReadConcernArgs::kAfterOpTimeFieldName
<< BSON(OpTime::kTimestampFieldName << 2)))));
}
TEST(ReadAfterParse, NoOpTimeTerm) {
ReadConcernArgs readAfterOpTime;
- ASSERT_NOT_OK(
- readAfterOpTime.initialize(BSON("find"
- << "test" << ReadConcernArgs::kReadConcernFieldName
- << BSON(ReadConcernArgs::kAfterOpTimeFieldName
- << BSON(OpTime::kTermFieldName << 2)))));
+ ASSERT_NOT_OK(readAfterOpTime.initialize(BSON("find"
+ << "test"
+ << ReadConcernArgs::kReadConcernFieldName
+ << BSON(ReadConcernArgs::kAfterOpTimeFieldName
+ << BSON(OpTime::kTermFieldName << 2)))));
}
TEST(ReadAfterParse, BadOpTimeTSType) {
ReadConcernArgs readAfterOpTime;
- ASSERT_NOT_OK(
- readAfterOpTime.initialize(BSON("find"
- << "test" << ReadConcernArgs::kReadConcernFieldName
- << BSON(ReadConcernArgs::kAfterOpTimeFieldName
- << BSON(OpTime::kTimestampFieldName
- << BSON("x" << 1) << OpTime::kTermFieldName
- << 2)))));
+ ASSERT_NOT_OK(readAfterOpTime.initialize(
+ BSON("find"
+ << "test"
+ << ReadConcernArgs::kReadConcernFieldName
+ << BSON(ReadConcernArgs::kAfterOpTimeFieldName
+ << BSON(OpTime::kTimestampFieldName << BSON("x" << 1) << OpTime::kTermFieldName
+ << 2)))));
}
TEST(ReadAfterParse, BadOpTimeTermType) {
ReadConcernArgs readAfterOpTime;
- ASSERT_NOT_OK(
- readAfterOpTime.initialize(BSON("find"
- << "test" << ReadConcernArgs::kReadConcernFieldName
- << BSON(ReadConcernArgs::kAfterOpTimeFieldName
- << BSON(OpTime::kTimestampFieldName
- << Timestamp(1, 0) << OpTime::kTermFieldName
- << "y")))));
+ ASSERT_NOT_OK(readAfterOpTime.initialize(BSON(
+ "find"
+ << "test"
+ << ReadConcernArgs::kReadConcernFieldName
+ << BSON(ReadConcernArgs::kAfterOpTimeFieldName
+ << BSON(OpTime::kTimestampFieldName << Timestamp(1, 0) << OpTime::kTermFieldName
+ << "y")))));
}
TEST(ReadAfterParse, BadLevelType) {
ReadConcernArgs readAfterOpTime;
ASSERT_EQ(ErrorCodes::TypeMismatch,
readAfterOpTime.initialize(BSON("find"
- << "test" << ReadConcernArgs::kReadConcernFieldName
+ << "test"
+ << ReadConcernArgs::kReadConcernFieldName
<< BSON(ReadConcernArgs::kLevelFieldName << 7))));
}
@@ -160,7 +168,8 @@ TEST(ReadAfterParse, BadLevelValue) {
ReadConcernArgs readAfterOpTime;
ASSERT_EQ(ErrorCodes::FailedToParse,
readAfterOpTime.initialize(BSON("find"
- << "test" << ReadConcernArgs::kReadConcernFieldName
+ << "test"
+ << ReadConcernArgs::kReadConcernFieldName
<< BSON(ReadConcernArgs::kLevelFieldName
<< "seven is not a real level"))));
}
@@ -169,7 +178,8 @@ TEST(ReadAfterParse, BadOption) {
ReadConcernArgs readAfterOpTime;
ASSERT_EQ(ErrorCodes::InvalidOptions,
readAfterOpTime.initialize(BSON("find"
- << "test" << ReadConcernArgs::kReadConcernFieldName
+ << "test"
+ << ReadConcernArgs::kReadConcernFieldName
<< BSON("asdf" << 1))));
}
@@ -188,10 +198,10 @@ TEST(ReadAfterSerialize, ReadAfterOnly) {
ReadConcernArgs readAfterOpTime(OpTime(Timestamp(20, 30), 2), boost::none);
readAfterOpTime.appendInfo(&builder);
- BSONObj expectedObj(BSON(ReadConcernArgs::kReadConcernFieldName
- << BSON(ReadConcernArgs::kAfterOpTimeFieldName << BSON(
- OpTime::kTimestampFieldName
- << Timestamp(20, 30) << OpTime::kTermFieldName << 2))));
+ BSONObj expectedObj(BSON(
+ ReadConcernArgs::kReadConcernFieldName << BSON(
+ ReadConcernArgs::kAfterOpTimeFieldName << BSON(
+ OpTime::kTimestampFieldName << Timestamp(20, 30) << OpTime::kTermFieldName << 2))));
ASSERT_EQ(expectedObj, builder.done());
}
@@ -213,11 +223,13 @@ TEST(ReadAfterSerialize, FullSpecification) {
ReadConcernLevel::kMajorityReadConcern);
readAfterOpTime.appendInfo(&builder);
- BSONObj expectedObj(BSON(ReadConcernArgs::kReadConcernFieldName << BSON(
- ReadConcernArgs::kLevelFieldName
- << "majority" << ReadConcernArgs::kAfterOpTimeFieldName
- << BSON(OpTime::kTimestampFieldName
- << Timestamp(20, 30) << OpTime::kTermFieldName << 2))));
+ BSONObj expectedObj(BSON(
+ ReadConcernArgs::kReadConcernFieldName
+ << BSON(ReadConcernArgs::kLevelFieldName
+ << "majority"
+ << ReadConcernArgs::kAfterOpTimeFieldName
+ << BSON(OpTime::kTimestampFieldName << Timestamp(20, 30) << OpTime::kTermFieldName
+ << 2))));
ASSERT_EQ(expectedObj, builder.done());
}
diff --git a/src/mongo/db/repl/repl_set_heartbeat_response.cpp b/src/mongo/db/repl/repl_set_heartbeat_response.cpp
index f7388355c98..032ef5e3b1d 100644
--- a/src/mongo/db/repl/repl_set_heartbeat_response.cpp
+++ b/src/mongo/db/repl/repl_set_heartbeat_response.cpp
@@ -261,18 +261,18 @@ Status ReplSetHeartbeatResponse::initialize(const BSONObj& doc, long long term)
if (memberStateElement.eoo()) {
_stateSet = false;
} else if (memberStateElement.type() != NumberInt && memberStateElement.type() != NumberLong) {
- return Status(ErrorCodes::TypeMismatch,
- str::stream()
- << "Expected \"" << kMemberStateFieldName
+ return Status(
+ ErrorCodes::TypeMismatch,
+ str::stream() << "Expected \"" << kMemberStateFieldName
<< "\" field in response to replSetHeartbeat "
"command to have type NumberInt or NumberLong, but found type "
<< typeName(memberStateElement.type()));
} else {
long long stateInt = memberStateElement.numberLong();
if (stateInt < 0 || stateInt > MemberState::RS_MAX) {
- return Status(ErrorCodes::BadValue,
- str::stream()
- << "Value for \"" << kMemberStateFieldName
+ return Status(
+ ErrorCodes::BadValue,
+ str::stream() << "Value for \"" << kMemberStateFieldName
<< "\" in response to replSetHeartbeat is "
"out of range; legal values are non-negative and no more than "
<< MemberState::RS_MAX);
@@ -312,7 +312,8 @@ Status ReplSetHeartbeatResponse::initialize(const BSONObj& doc, long long term)
return Status(ErrorCodes::TypeMismatch,
str::stream() << "Expected \"" << kHbMessageFieldName
<< "\" field in response to replSetHeartbeat to have "
- "type String, but found " << typeName(hbMsgElement.type()));
+ "type String, but found "
+ << typeName(hbMsgElement.type()));
} else {
_hbmsg = hbMsgElement.String();
}
@@ -339,7 +340,8 @@ Status ReplSetHeartbeatResponse::initialize(const BSONObj& doc, long long term)
return Status(ErrorCodes::TypeMismatch,
str::stream() << "Expected \"" << kConfigFieldName
<< "\" in response to replSetHeartbeat to have type "
- "Object, but found " << typeName(rsConfigElement.type()));
+ "Object, but found "
+ << typeName(rsConfigElement.type()));
}
_configSet = true;
diff --git a/src/mongo/db/repl/repl_set_heartbeat_response_test.cpp b/src/mongo/db/repl/repl_set_heartbeat_response_test.cpp
index 87634a155a5..80548c1aacf 100644
--- a/src/mongo/db/repl/repl_set_heartbeat_response_test.cpp
+++ b/src/mongo/db/repl/repl_set_heartbeat_response_test.cpp
@@ -713,7 +713,8 @@ TEST(ReplSetHeartbeatResponse, InitializeHeartbeatMeessageWrongType) {
ReplSetHeartbeatResponse hbResponse;
BSONObj initializerObj =
BSON("ok" << 1.0 << "v" << 2 << // needs a version to get this far in initialize()
- "hbmsg" << 4);
+ "hbmsg"
+ << 4);
Status result = hbResponse.initialize(initializerObj, 0);
ASSERT_EQUALS(ErrorCodes::TypeMismatch, result);
ASSERT_EQUALS(
@@ -726,7 +727,8 @@ TEST(ReplSetHeartbeatResponse, InitializeSyncingToWrongType) {
ReplSetHeartbeatResponse hbResponse;
BSONObj initializerObj =
BSON("ok" << 1.0 << "v" << 2 << // needs a version to get this far in initialize()
- "syncingTo" << 4);
+ "syncingTo"
+ << 4);
Status result = hbResponse.initialize(initializerObj, 0);
ASSERT_EQUALS(ErrorCodes::TypeMismatch, result);
ASSERT_EQUALS(
@@ -739,7 +741,8 @@ TEST(ReplSetHeartbeatResponse, InitializeConfigWrongType) {
ReplSetHeartbeatResponse hbResponse;
BSONObj initializerObj =
BSON("ok" << 1.0 << "v" << 2 << // needs a version to get this far in initialize()
- "config" << 4);
+ "config"
+ << 4);
Status result = hbResponse.initialize(initializerObj, 0);
ASSERT_EQUALS(ErrorCodes::TypeMismatch, result);
ASSERT_EQUALS(
@@ -752,7 +755,8 @@ TEST(ReplSetHeartbeatResponse, InitializeBadConfig) {
ReplSetHeartbeatResponse hbResponse;
BSONObj initializerObj =
BSON("ok" << 1.0 << "v" << 2 << // needs a version to get this far in initialize()
- "config" << BSON("illegalFieldName" << 2));
+ "config"
+ << BSON("illegalFieldName" << 2));
Status result = hbResponse.initialize(initializerObj, 0);
ASSERT_EQUALS(ErrorCodes::BadValue, result);
ASSERT_EQUALS("Unexpected field illegalFieldName in replica set configuration",
diff --git a/src/mongo/db/repl/repl_set_html_summary.cpp b/src/mongo/db/repl/repl_set_html_summary.cpp
index 218dff908fd..14c2ff81b7d 100644
--- a/src/mongo/db/repl/repl_set_html_summary.cpp
+++ b/src/mongo/db/repl/repl_set_html_summary.cpp
@@ -32,8 +32,8 @@
#include "mongo/db/repl/repl_set_html_summary.h"
-#include <string>
#include <sstream>
+#include <string>
#include "mongo/util/mongoutils/html.h"
#include "mongo/util/mongoutils/str.h"
diff --git a/src/mongo/db/repl/repl_settings.h b/src/mongo/db/repl/repl_settings.h
index 36705f1ce74..b7c6918d75d 100644
--- a/src/mongo/db/repl/repl_settings.h
+++ b/src/mongo/db/repl/repl_settings.h
@@ -32,8 +32,8 @@
#include <string>
#include "mongo/db/jsobj.h"
-#include "mongo/util/concurrency/mutex.h"
#include "mongo/db/repl/bgsync.h"
+#include "mongo/util/concurrency/mutex.h"
namespace mongo {
namespace repl {
diff --git a/src/mongo/db/repl/replica_set_config.cpp b/src/mongo/db/repl/replica_set_config.cpp
index 8f619f6a5d9..b3d0d8f4fc8 100644
--- a/src/mongo/db/repl/replica_set_config.cpp
+++ b/src/mongo/db/repl/replica_set_config.cpp
@@ -128,7 +128,8 @@ Status ReplicaSetConfig::_initialize(const BSONObj& cfg,
if (memberElement.type() != Object) {
return Status(ErrorCodes::TypeMismatch,
str::stream() << "Expected type of " << kMembersFieldName << "."
- << memberElement.fieldName() << " to be Object, but found "
+ << memberElement.fieldName()
+ << " to be Object, but found "
<< typeName(memberElement.type()));
}
_members.resize(_members.size() + 1);
@@ -200,7 +201,8 @@ Status ReplicaSetConfig::_initialize(const BSONObj& cfg,
str::stream() << "replica set configuration cannot contain '"
<< kReplicaSetIdFieldName
<< "' "
- "field when called from replSetInitiate: " << cfg);
+ "field when called from replSetInitiate: "
+ << cfg);
}
_replicaSetId = OID::gen();
} else if (!_replicaSetId.isSet()) {
@@ -312,8 +314,10 @@ Status ReplicaSetConfig::_parseSettingsSubdocument(const BSONObj& settings) {
if (modeElement.type() != Object) {
return Status(ErrorCodes::TypeMismatch,
str::stream() << "Expected " << kSettingsFieldName << '.'
- << kGetLastErrorModesFieldName << '.'
- << modeElement.fieldName() << " to be an Object, not "
+ << kGetLastErrorModesFieldName
+ << '.'
+ << modeElement.fieldName()
+ << " to be an Object, not "
<< typeName(modeElement.type()));
}
ReplicaSetTagPattern pattern = _tagConfig.makePattern();
@@ -321,20 +325,26 @@ Status ReplicaSetConfig::_parseSettingsSubdocument(const BSONObj& settings) {
const BSONElement constraintElement = constraintIter.next();
if (!constraintElement.isNumber()) {
return Status(ErrorCodes::TypeMismatch,
- str::stream()
- << "Expected " << kSettingsFieldName << '.'
- << kGetLastErrorModesFieldName << '.' << modeElement.fieldName()
- << '.' << constraintElement.fieldName() << " to be a number, not "
- << typeName(constraintElement.type()));
+ str::stream() << "Expected " << kSettingsFieldName << '.'
+ << kGetLastErrorModesFieldName
+ << '.'
+ << modeElement.fieldName()
+ << '.'
+ << constraintElement.fieldName()
+ << " to be a number, not "
+ << typeName(constraintElement.type()));
}
const int minCount = constraintElement.numberInt();
if (minCount <= 0) {
return Status(ErrorCodes::BadValue,
str::stream() << "Value of " << kSettingsFieldName << '.'
- << kGetLastErrorModesFieldName << '.'
- << modeElement.fieldName() << '.'
+ << kGetLastErrorModesFieldName
+ << '.'
+ << modeElement.fieldName()
+ << '.'
<< constraintElement.fieldName()
- << " must be positive, but found " << minCount);
+ << " must be positive, but found "
+ << minCount);
}
status = _tagConfig.addTagCountConstraintToPattern(
&pattern, constraintElement.fieldNameStringData(), minCount);
@@ -370,7 +380,8 @@ Status ReplicaSetConfig::validate() const {
if (_replSetName.empty()) {
return Status(ErrorCodes::BadValue,
str::stream() << "Replica set configuration must have non-empty "
- << kIdFieldName << " field");
+ << kIdFieldName
+ << " field");
}
if (_heartbeatInterval < Milliseconds(0)) {
return Status(ErrorCodes::BadValue,
@@ -413,22 +424,41 @@ Status ReplicaSetConfig::validate() const {
const MemberConfig& memberJ = _members[j];
if (memberI.getId() == memberJ.getId()) {
return Status(ErrorCodes::BadValue,
- str::stream()
- << "Found two member configurations with same "
- << MemberConfig::kIdFieldName << " field, " << kMembersFieldName
- << "." << i << "." << MemberConfig::kIdFieldName
- << " == " << kMembersFieldName << "." << j << "."
- << MemberConfig::kIdFieldName << " == " << memberI.getId());
+ str::stream() << "Found two member configurations with same "
+ << MemberConfig::kIdFieldName
+ << " field, "
+ << kMembersFieldName
+ << "."
+ << i
+ << "."
+ << MemberConfig::kIdFieldName
+ << " == "
+ << kMembersFieldName
+ << "."
+ << j
+ << "."
+ << MemberConfig::kIdFieldName
+ << " == "
+ << memberI.getId());
}
if (memberI.getHostAndPort() == memberJ.getHostAndPort()) {
return Status(ErrorCodes::BadValue,
str::stream() << "Found two member configurations with same "
- << MemberConfig::kHostFieldName << " field, "
- << kMembersFieldName << "." << i << "."
<< MemberConfig::kHostFieldName
- << " == " << kMembersFieldName << "." << j << "."
+ << " field, "
+ << kMembersFieldName
+ << "."
+ << i
+ << "."
+ << MemberConfig::kHostFieldName
+ << " == "
+ << kMembersFieldName
+ << "."
+ << j
+ << "."
<< MemberConfig::kHostFieldName
- << " == " << memberI.getHostAndPort().toString());
+ << " == "
+ << memberI.getHostAndPort().toString());
}
}
}
@@ -438,7 +468,9 @@ Status ReplicaSetConfig::validate() const {
ErrorCodes::BadValue,
str::stream()
<< "Either all host names in a replica set configuration must be localhost "
- "references, or none must be; found " << localhostCount << " out of "
+ "references, or none must be; found "
+ << localhostCount
+ << " out of "
<< _members.size());
}
@@ -474,7 +506,8 @@ Status ReplicaSetConfig::validate() const {
if (_protocolVersion != 0 && _protocolVersion != 1) {
return Status(ErrorCodes::BadValue,
str::stream() << kProtocolVersionFieldName << " field value of "
- << _protocolVersion << " is not 1 or 0");
+ << _protocolVersion
+ << " is not 1 or 0");
}
if (_configServer) {
@@ -546,7 +579,8 @@ Status ReplicaSetConfig::checkIfWriteConcernCanBeSatisfied(
// write concern mode.
return Status(ErrorCodes::CannotSatisfyWriteConcern,
str::stream() << "Not enough nodes match write concern mode \""
- << writeConcern.wMode << "\"");
+ << writeConcern.wMode
+ << "\"");
} else {
int nodesRemaining = writeConcern.wNumNodes;
for (size_t j = 0; j < _members.size(); ++j) {
diff --git a/src/mongo/db/repl/replica_set_config_checks.cpp b/src/mongo/db/repl/replica_set_config_checks.cpp
index 6539bd08e3f..a45985d3cb4 100644
--- a/src/mongo/db/repl/replica_set_config_checks.cpp
+++ b/src/mongo/db/repl/replica_set_config_checks.cpp
@@ -60,8 +60,10 @@ StatusWith<int> findSelfInConfig(ReplicationCoordinatorExternalState* externalSt
if (meConfigs.empty()) {
return StatusWith<int>(ErrorCodes::NodeNotFound,
str::stream() << "No host described in new configuration "
- << newConfig.getConfigVersion() << " for replica set "
- << newConfig.getReplSetName() << " maps to this node");
+ << newConfig.getConfigVersion()
+ << " for replica set "
+ << newConfig.getReplSetName()
+ << " maps to this node");
}
if (meConfigs.size() > 1) {
str::stream message;
@@ -90,9 +92,11 @@ Status checkElectable(const ReplicaSetConfig& newConfig, int configIndex) {
if (!myConfig.isElectable()) {
return Status(ErrorCodes::NodeNotElectable,
str::stream() << "This node, " << myConfig.getHostAndPort().toString()
- << ", with _id " << myConfig.getId()
+ << ", with _id "
+ << myConfig.getId()
<< " is not electable under the new configuration version "
- << newConfig.getConfigVersion() << " for replica set "
+ << newConfig.getConfigVersion()
+ << " for replica set "
<< newConfig.getReplSetName());
}
return Status::OK();
@@ -138,22 +142,28 @@ Status validateOldAndNewConfigsCompatible(const ReplicaSetConfig& oldConfig,
return Status(ErrorCodes::NewReplicaSetConfigurationIncompatible,
str::stream()
<< "New replica set configuration version must be greater than old, but "
- << newConfig.getConfigVersion() << " is not greater than "
- << oldConfig.getConfigVersion() << " for replica set "
+ << newConfig.getConfigVersion()
+ << " is not greater than "
+ << oldConfig.getConfigVersion()
+ << " for replica set "
<< newConfig.getReplSetName());
}
if (oldConfig.getReplSetName() != newConfig.getReplSetName()) {
return Status(ErrorCodes::NewReplicaSetConfigurationIncompatible,
str::stream() << "New and old configurations differ in replica set name; "
- "old was " << oldConfig.getReplSetName() << ", and new is "
+ "old was "
+ << oldConfig.getReplSetName()
+ << ", and new is "
<< newConfig.getReplSetName());
}
if (oldConfig.getReplicaSetId() != newConfig.getReplicaSetId()) {
return Status(ErrorCodes::NewReplicaSetConfigurationIncompatible,
str::stream() << "New and old configurations differ in replica set ID; "
- "old was " << oldConfig.getReplicaSetId() << ", and new is "
+ "old was "
+ << oldConfig.getReplicaSetId()
+ << ", and new is "
<< newConfig.getReplicaSetId());
}
@@ -185,14 +195,18 @@ Status validateOldAndNewConfigsCompatible(const ReplicaSetConfig& oldConfig,
}
if (hostsEqual && !idsEqual) {
return Status(ErrorCodes::NewReplicaSetConfigurationIncompatible,
- str::stream()
- << "New and old configurations both have members with "
- << MemberConfig::kHostFieldName << " of "
- << mOld->getHostAndPort().toString()
- << " but in the new configuration the "
- << MemberConfig::kIdFieldName << " field is " << mNew->getId()
- << " and in the old configuration it is " << mOld->getId()
- << " for replica set " << newConfig.getReplSetName());
+ str::stream() << "New and old configurations both have members with "
+ << MemberConfig::kHostFieldName
+ << " of "
+ << mOld->getHostAndPort().toString()
+ << " but in the new configuration the "
+ << MemberConfig::kIdFieldName
+ << " field is "
+ << mNew->getId()
+ << " and in the old configuration it is "
+ << mOld->getId()
+ << " for replica set "
+ << newConfig.getReplSetName());
}
// At this point, the _id and host fields are equal, so we're looking at the old and
// new configurations for the same member node.
diff --git a/src/mongo/db/repl/replica_set_config_checks_test.cpp b/src/mongo/db/repl/replica_set_config_checks_test.cpp
index ab43959d37d..394d6535dbd 100644
--- a/src/mongo/db/repl/replica_set_config_checks_test.cpp
+++ b/src/mongo/db/repl/replica_set_config_checks_test.cpp
@@ -48,7 +48,9 @@ TEST(ValidateConfigForInitiate, VersionMustBe1) {
ReplicaSetConfig config;
ASSERT_OK(config.initializeForInitiate(BSON("_id"
<< "rs0"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1")))));
ASSERT_EQUALS(ErrorCodes::NewReplicaSetConfigurationIncompatible,
@@ -59,7 +61,9 @@ TEST(ValidateConfigForInitiate, MustFindSelf) {
ReplicaSetConfig config;
ASSERT_OK(config.initializeForInitiate(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1")
<< BSON("_id" << 2 << "host"
@@ -85,12 +89,15 @@ TEST(ValidateConfigForInitiate, SelfMustBeElectable) {
ReplicaSetConfig config;
ASSERT_OK(config.initializeForInitiate(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1")
<< BSON("_id" << 2 << "host"
<< "h2"
- << "priority" << 0)
+ << "priority"
+ << 0)
<< BSON("_id" << 3 << "host"
<< "h3")))));
ReplicationCoordinatorExternalStateMock presentOnceExternalState;
@@ -108,25 +115,29 @@ TEST(ValidateConfigForReconfig, NewConfigVersionNumberMustBeHigherThanOld) {
ReplicaSetConfig newConfig;
// Two configurations, identical except for version.
- ASSERT_OK(
- oldConfig.initialize(BSON("_id"
- << "rs0"
- << "version" << 1 << "members"
- << BSON_ARRAY(BSON("_id" << 1 << "host"
- << "h1")
- << BSON("_id" << 2 << "host"
- << "h2") << BSON("_id" << 3 << "host"
- << "h3")))));
+ ASSERT_OK(oldConfig.initialize(BSON("_id"
+ << "rs0"
+ << "version"
+ << 1
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h1")
+ << BSON("_id" << 2 << "host"
+ << "h2")
+ << BSON("_id" << 3 << "host"
+ << "h3")))));
- ASSERT_OK(
- newConfig.initialize(BSON("_id"
- << "rs0"
- << "version" << 3 << "members"
- << BSON_ARRAY(BSON("_id" << 1 << "host"
- << "h1")
- << BSON("_id" << 2 << "host"
- << "h2") << BSON("_id" << 3 << "host"
- << "h3")))));
+ ASSERT_OK(newConfig.initialize(BSON("_id"
+ << "rs0"
+ << "version"
+ << 3
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h1")
+ << BSON("_id" << 2 << "host"
+ << "h2")
+ << BSON("_id" << 3 << "host"
+ << "h3")))));
ASSERT_OK(oldConfig.validate());
ASSERT_OK(newConfig.validate());
@@ -162,25 +173,29 @@ TEST(ValidateConfigForReconfig, NewConfigMustNotChangeSetName) {
ReplicaSetConfig newConfig;
// Two configurations, compatible except for set name.
- ASSERT_OK(
- oldConfig.initialize(BSON("_id"
- << "rs0"
- << "version" << 1 << "members"
- << BSON_ARRAY(BSON("_id" << 1 << "host"
- << "h1")
- << BSON("_id" << 2 << "host"
- << "h2") << BSON("_id" << 3 << "host"
- << "h3")))));
+ ASSERT_OK(oldConfig.initialize(BSON("_id"
+ << "rs0"
+ << "version"
+ << 1
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h1")
+ << BSON("_id" << 2 << "host"
+ << "h2")
+ << BSON("_id" << 3 << "host"
+ << "h3")))));
- ASSERT_OK(
- newConfig.initialize(BSON("_id"
- << "rs1"
- << "version" << 3 << "members"
- << BSON_ARRAY(BSON("_id" << 1 << "host"
- << "h1")
- << BSON("_id" << 2 << "host"
- << "h2") << BSON("_id" << 3 << "host"
- << "h3")))));
+ ASSERT_OK(newConfig.initialize(BSON("_id"
+ << "rs1"
+ << "version"
+ << 3
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h1")
+ << BSON("_id" << 2 << "host"
+ << "h2")
+ << BSON("_id" << 3 << "host"
+ << "h3")))));
ASSERT_OK(oldConfig.validate());
ASSERT_OK(newConfig.validate());
@@ -201,27 +216,33 @@ TEST(ValidateConfigForReconfig, NewConfigMustNotChangeSetId) {
ReplicaSetConfig newConfig;
// Two configurations, compatible except for set ID.
- ASSERT_OK(
- oldConfig.initialize(BSON("_id"
- << "rs0"
- << "version" << 1 << "members"
- << BSON_ARRAY(BSON("_id" << 1 << "host"
- << "h1")
- << BSON("_id" << 2 << "host"
- << "h2") << BSON("_id" << 3 << "host"
- << "h3"))
- << "settings" << BSON("replicaSetId" << OID::gen()))));
+ ASSERT_OK(oldConfig.initialize(BSON("_id"
+ << "rs0"
+ << "version"
+ << 1
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h1")
+ << BSON("_id" << 2 << "host"
+ << "h2")
+ << BSON("_id" << 3 << "host"
+ << "h3"))
+ << "settings"
+ << BSON("replicaSetId" << OID::gen()))));
- ASSERT_OK(
- newConfig.initialize(BSON("_id"
- << "rs0"
- << "version" << 3 << "members"
- << BSON_ARRAY(BSON("_id" << 1 << "host"
- << "h1")
- << BSON("_id" << 2 << "host"
- << "h2") << BSON("_id" << 3 << "host"
- << "h3"))
- << "settings" << BSON("replicaSetId" << OID::gen()))));
+ ASSERT_OK(newConfig.initialize(BSON("_id"
+ << "rs0"
+ << "version"
+ << 3
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h1")
+ << BSON("_id" << 2 << "host"
+ << "h2")
+ << BSON("_id" << 3 << "host"
+ << "h3"))
+ << "settings"
+ << BSON("replicaSetId" << OID::gen()))));
ASSERT_OK(oldConfig.validate());
ASSERT_OK(newConfig.validate());
@@ -248,40 +269,51 @@ TEST(ValidateConfigForReconfig, NewConfigMustNotFlipBuildIndexesFlag) {
// The third, compatible with the first.
ASSERT_OK(oldConfig.initialize(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1")
<< BSON("_id" << 2 << "host"
<< "h2"
- << "buildIndexes" << false
- << "priority" << 0)
+ << "buildIndexes"
+ << false
+ << "priority"
+ << 0)
<< BSON("_id" << 3 << "host"
<< "h3")))));
ASSERT_OK(newConfig.initialize(BSON("_id"
<< "rs0"
- << "version" << 3 << "members"
+ << "version"
+ << 3
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1")
<< BSON("_id" << 2 << "host"
<< "h2"
- << "buildIndexes" << true
- << "priority" << 0)
+ << "buildIndexes"
+ << true
+ << "priority"
+ << 0)
<< BSON("_id" << 3 << "host"
<< "h3")))));
- ASSERT_OK(
- oldConfigRefresh.initialize(BSON("_id"
- << "rs0"
- << "version" << 2 << "members"
- << BSON_ARRAY(BSON("_id" << 1 << "host"
- << "h1")
- << BSON("_id" << 2 << "host"
- << "h2"
- << "buildIndexes" << false
- << "priority" << 0)
- << BSON("_id" << 3 << "host"
- << "h3")))));
+ ASSERT_OK(oldConfigRefresh.initialize(BSON("_id"
+ << "rs0"
+ << "version"
+ << 2
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h1")
+ << BSON("_id" << 2 << "host"
+ << "h2"
+ << "buildIndexes"
+ << false
+ << "priority"
+ << 0)
+ << BSON("_id" << 3 << "host"
+ << "h3")))));
ASSERT_OK(oldConfig.validate());
ASSERT_OK(newConfig.validate());
@@ -310,37 +342,45 @@ TEST(ValidateConfigForReconfig, NewConfigMustNotFlipArbiterFlag) {
// The third, compatible with the first.
ASSERT_OK(oldConfig.initialize(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1")
<< BSON("_id" << 2 << "host"
<< "h2"
- << "arbiterOnly" << false)
+ << "arbiterOnly"
+ << false)
<< BSON("_id" << 3 << "host"
<< "h3")))));
ASSERT_OK(newConfig.initialize(BSON("_id"
<< "rs0"
- << "version" << 3 << "members"
+ << "version"
+ << 3
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1")
<< BSON("_id" << 2 << "host"
<< "h2"
- << "arbiterOnly" << true)
+ << "arbiterOnly"
+ << true)
<< BSON("_id" << 3 << "host"
<< "h3")))));
- ASSERT_OK(
- oldConfigRefresh.initialize(BSON("_id"
- << "rs0"
- << "version" << 2 << "members"
- << BSON_ARRAY(BSON("_id" << 1 << "host"
- << "h1")
- << BSON("_id" << 2 << "host"
- << "h2"
- << "arbiterOnly" << false)
- << BSON("_id" << 3 << "host"
- << "h3")))));
+ ASSERT_OK(oldConfigRefresh.initialize(BSON("_id"
+ << "rs0"
+ << "version"
+ << 2
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h1")
+ << BSON("_id" << 2 << "host"
+ << "h2"
+ << "arbiterOnly"
+ << false)
+ << BSON("_id" << 3 << "host"
+ << "h3")))));
ASSERT_OK(oldConfig.validate());
ASSERT_OK(newConfig.validate());
@@ -369,15 +409,17 @@ TEST(ValidateConfigForReconfig, HostAndIdRemappingRestricted) {
ReplicaSetConfig illegalNewConfigReusingHost;
ReplicaSetConfig illegalNewConfigReusingId;
- ASSERT_OK(
- oldConfig.initialize(BSON("_id"
- << "rs0"
- << "version" << 1 << "members"
- << BSON_ARRAY(BSON("_id" << 1 << "host"
- << "h1")
- << BSON("_id" << 2 << "host"
- << "h2") << BSON("_id" << 3 << "host"
- << "h3")))));
+ ASSERT_OK(oldConfig.initialize(BSON("_id"
+ << "rs0"
+ << "version"
+ << 1
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h1")
+ << BSON("_id" << 2 << "host"
+ << "h2")
+ << BSON("_id" << 3 << "host"
+ << "h3")))));
ASSERT_OK(oldConfig.validate());
//
@@ -387,7 +429,9 @@ TEST(ValidateConfigForReconfig, HostAndIdRemappingRestricted) {
ASSERT_OK(
legalNewConfigWithNewHostAndId.initialize(BSON("_id"
<< "rs0"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1")
<< BSON("_id" << 4 << "host"
@@ -395,8 +439,9 @@ TEST(ValidateConfigForReconfig, HostAndIdRemappingRestricted) {
<< BSON("_id" << 3 << "host"
<< "h3")))));
ASSERT_OK(legalNewConfigWithNewHostAndId.validate());
- ASSERT_OK(validateConfigForReconfig(
- &externalState, oldConfig, legalNewConfigWithNewHostAndId, false).getStatus());
+ ASSERT_OK(
+ validateConfigForReconfig(&externalState, oldConfig, legalNewConfigWithNewHostAndId, false)
+ .getStatus());
//
// Here, the new config is invalid because we've reused host name "h2" with
@@ -404,7 +449,9 @@ TEST(ValidateConfigForReconfig, HostAndIdRemappingRestricted) {
//
ASSERT_OK(illegalNewConfigReusingHost.initialize(BSON("_id"
<< "rs0"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1")
<< BSON("_id" << 4 << "host"
@@ -412,20 +459,24 @@ TEST(ValidateConfigForReconfig, HostAndIdRemappingRestricted) {
<< BSON("_id" << 3 << "host"
<< "h3")))));
ASSERT_OK(illegalNewConfigReusingHost.validate());
- ASSERT_EQUALS(ErrorCodes::NewReplicaSetConfigurationIncompatible,
- validateConfigForReconfig(
- &externalState, oldConfig, illegalNewConfigReusingHost, false).getStatus());
+ ASSERT_EQUALS(
+ ErrorCodes::NewReplicaSetConfigurationIncompatible,
+ validateConfigForReconfig(&externalState, oldConfig, illegalNewConfigReusingHost, false)
+ .getStatus());
// Forced reconfigs also do not allow this.
- ASSERT_EQUALS(ErrorCodes::NewReplicaSetConfigurationIncompatible,
- validateConfigForReconfig(
- &externalState, oldConfig, illegalNewConfigReusingHost, true).getStatus());
+ ASSERT_EQUALS(
+ ErrorCodes::NewReplicaSetConfigurationIncompatible,
+ validateConfigForReconfig(&externalState, oldConfig, illegalNewConfigReusingHost, true)
+ .getStatus());
//
// Here, the new config is valid, because all we've changed is the name of
// the host representing _id 2.
//
ASSERT_OK(illegalNewConfigReusingId.initialize(BSON("_id"
<< "rs0"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1")
<< BSON("_id" << 2 << "host"
@@ -441,26 +492,30 @@ TEST(ValidateConfigForReconfig, MustFindSelf) {
// Old and new config are same except for version change; this is just testing that we can
// find ourself in the new config.
ReplicaSetConfig oldConfig;
- ASSERT_OK(
- oldConfig.initialize(BSON("_id"
- << "rs0"
- << "version" << 1 << "members"
- << BSON_ARRAY(BSON("_id" << 1 << "host"
- << "h1")
- << BSON("_id" << 2 << "host"
- << "h2") << BSON("_id" << 3 << "host"
- << "h3")))));
+ ASSERT_OK(oldConfig.initialize(BSON("_id"
+ << "rs0"
+ << "version"
+ << 1
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h1")
+ << BSON("_id" << 2 << "host"
+ << "h2")
+ << BSON("_id" << 3 << "host"
+ << "h3")))));
ReplicaSetConfig newConfig;
- ASSERT_OK(
- newConfig.initialize(BSON("_id"
- << "rs0"
- << "version" << 2 << "members"
- << BSON_ARRAY(BSON("_id" << 1 << "host"
- << "h1")
- << BSON("_id" << 2 << "host"
- << "h2") << BSON("_id" << 3 << "host"
- << "h3")))));
+ ASSERT_OK(newConfig.initialize(BSON("_id"
+ << "rs0"
+ << "version"
+ << 2
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h1")
+ << BSON("_id" << 2 << "host"
+ << "h2")
+ << BSON("_id" << 3 << "host"
+ << "h3")))));
ReplicationCoordinatorExternalStateMock notPresentExternalState;
ReplicationCoordinatorExternalStateMock presentOnceExternalState;
presentOnceExternalState.addSelf(HostAndPort("h2"));
@@ -472,9 +527,10 @@ TEST(ValidateConfigForReconfig, MustFindSelf) {
ASSERT_EQUALS(ErrorCodes::NodeNotFound,
validateConfigForReconfig(&notPresentExternalState, oldConfig, newConfig, false)
.getStatus());
- ASSERT_EQUALS(ErrorCodes::DuplicateKey,
- validateConfigForReconfig(
- &presentThriceExternalState, oldConfig, newConfig, false).getStatus());
+ ASSERT_EQUALS(
+ ErrorCodes::DuplicateKey,
+ validateConfigForReconfig(&presentThriceExternalState, oldConfig, newConfig, false)
+ .getStatus());
ASSERT_EQUALS(1,
unittest::assertGet(validateConfigForReconfig(
&presentOnceExternalState, oldConfig, newConfig, false)));
@@ -494,25 +550,30 @@ TEST(ValidateConfigForReconfig, SelfMustEndElectable) {
// Old and new config are same except for version change and the electability of one node;
// this is just testing that we must be electable in the new config.
ReplicaSetConfig oldConfig;
- ASSERT_OK(
- oldConfig.initialize(BSON("_id"
- << "rs0"
- << "version" << 1 << "members"
- << BSON_ARRAY(BSON("_id" << 1 << "host"
- << "h1")
- << BSON("_id" << 2 << "host"
- << "h2") << BSON("_id" << 3 << "host"
- << "h3")))));
+ ASSERT_OK(oldConfig.initialize(BSON("_id"
+ << "rs0"
+ << "version"
+ << 1
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h1")
+ << BSON("_id" << 2 << "host"
+ << "h2")
+ << BSON("_id" << 3 << "host"
+ << "h3")))));
ReplicaSetConfig newConfig;
ASSERT_OK(newConfig.initialize(BSON("_id"
<< "rs0"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1")
<< BSON("_id" << 2 << "host"
<< "h2"
- << "priority" << 0)
+ << "priority"
+ << 0)
<< BSON("_id" << 3 << "host"
<< "h3")))));
ReplicationCoordinatorExternalStateMock presentOnceExternalState;
@@ -533,7 +594,9 @@ TEST(ValidateConfigForInitiate, NewConfigInvalid) {
ReplicaSetConfig newConfig;
ASSERT_OK(newConfig.initializeForInitiate(BSON("_id"
<< "rs0"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "h2")
<< BSON("_id" << 0 << "host"
@@ -552,14 +615,18 @@ TEST(ValidateConfigForReconfig, NewConfigInvalid) {
ReplicaSetConfig oldConfig;
ASSERT_OK(oldConfig.initialize(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "h2")))));
ReplicaSetConfig newConfig;
ASSERT_OK(newConfig.initialize(BSON("_id"
<< "rs0"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "h2")
<< BSON("_id" << 0 << "host"
@@ -583,14 +650,18 @@ TEST(ValidateConfigForStartUp, NewConfigInvalid) {
ReplicaSetConfig oldConfig;
ASSERT_OK(oldConfig.initialize(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "h2")))));
ReplicaSetConfig newConfig;
ASSERT_OK(newConfig.initialize(BSON("_id"
<< "rs0"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "h2")
<< BSON("_id" << 0 << "host"
@@ -610,7 +681,9 @@ TEST(ValidateConfigForStartUp, OldAndNewConfigIncompatible) {
ReplicaSetConfig oldConfig;
ASSERT_OK(oldConfig.initialize(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "h2")
<< BSON("_id" << 1 << "host"
@@ -620,7 +693,9 @@ TEST(ValidateConfigForStartUp, OldAndNewConfigIncompatible) {
ReplicaSetConfig newConfig;
ASSERT_OK(newConfig.initialize(BSON("_id"
<< "rs0"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("_id" << 2 << "host"
<< "h2")
<< BSON("_id" << 1 << "host"
@@ -640,7 +715,9 @@ TEST(ValidateConfigForStartUp, OldAndNewConfigCompatible) {
ReplicaSetConfig oldConfig;
ASSERT_OK(oldConfig.initialize(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "h2")
<< BSON("_id" << 1 << "host"
@@ -650,10 +727,13 @@ TEST(ValidateConfigForStartUp, OldAndNewConfigCompatible) {
ReplicaSetConfig newConfig;
ASSERT_OK(newConfig.initialize(BSON("_id"
<< "rs0"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "h2"
- << "priority" << 3)
+ << "priority"
+ << 3)
<< BSON("_id" << 1 << "host"
<< "h3")))));
@@ -670,7 +750,9 @@ TEST(ValidateConfigForHeartbeatReconfig, NewConfigInvalid) {
ReplicaSetConfig newConfig;
ASSERT_OK(newConfig.initialize(BSON("_id"
<< "rs0"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "h2")
<< BSON("_id" << 0 << "host"
@@ -689,7 +771,9 @@ TEST(ValidateConfigForHeartbeatReconfig, NewConfigValid) {
ReplicaSetConfig newConfig;
ASSERT_OK(newConfig.initialize(BSON("_id"
<< "rs0"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "h2")
<< BSON("_id" << 1 << "host"
@@ -706,7 +790,9 @@ TEST(ValidateForReconfig, ForceStillNeedsValidConfig) {
ReplicaSetConfig oldConfig;
ASSERT_OK(oldConfig.initialize(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "h2")
<< BSON("_id" << 1 << "host"
@@ -716,7 +802,9 @@ TEST(ValidateForReconfig, ForceStillNeedsValidConfig) {
ReplicaSetConfig newConfig;
ASSERT_OK(newConfig.initialize(BSON("_id"
<< "rs0"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "h2")
<< BSON("_id" << 0 << "host"
@@ -735,7 +823,9 @@ TEST(ValidateForReconfig, ForceStillNeedsSelfPresent) {
ReplicaSetConfig oldConfig;
ASSERT_OK(oldConfig.initialize(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "h2")
<< BSON("_id" << 1 << "host"
@@ -745,7 +835,9 @@ TEST(ValidateForReconfig, ForceStillNeedsSelfPresent) {
ReplicaSetConfig newConfig;
ASSERT_OK(newConfig.initialize(BSON("_id"
<< "rs0"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h3")
<< BSON("_id" << 2 << "host"
diff --git a/src/mongo/db/repl/replica_set_config_test.cpp b/src/mongo/db/repl/replica_set_config_test.cpp
index 100ca89383f..6e5bb69b40b 100644
--- a/src/mongo/db/repl/replica_set_config_test.cpp
+++ b/src/mongo/db/repl/replica_set_config_test.cpp
@@ -34,8 +34,8 @@
#include "mongo/db/jsobj.h"
#include "mongo/db/repl/replica_set_config.h"
#include "mongo/db/server_options.h"
-#include "mongo/util/scopeguard.h"
#include "mongo/unittest/unittest.h"
+#include "mongo/util/scopeguard.h"
namespace mongo {
namespace repl {
@@ -62,7 +62,9 @@ TEST(ReplicaSetConfig, ParseMinimalConfigAndCheckDefaults) {
ReplicaSetConfig config;
ASSERT_OK(config.initialize(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345")))));
ASSERT_OK(config.validate());
@@ -88,20 +90,32 @@ TEST(ReplicaSetConfig, ParseMinimalConfigAndCheckDefaults) {
TEST(ReplicaSetConfig, ParseLargeConfigAndCheckAccessors) {
ReplicaSetConfig config;
- ASSERT_OK(config.initialize(BSON(
- "_id"
- << "rs0"
- << "version" << 1234 << "members" << BSON_ARRAY(BSON("_id" << 234 << "host"
- << "localhost:12345"
- << "tags" << BSON("NYC"
- << "NY")))
- << "protocolVersion" << 1 << "settings"
- << BSON("getLastErrorDefaults" << BSON("w"
- << "majority") << "getLastErrorModes"
- << BSON("eastCoast" << BSON("NYC" << 1)) << "chainingAllowed"
- << false << "heartbeatIntervalMillis" << 5000
- << "heartbeatTimeoutSecs" << 120 << "electionTimeoutMillis"
- << 10))));
+ ASSERT_OK(config.initialize(BSON("_id"
+ << "rs0"
+ << "version"
+ << 1234
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 234 << "host"
+ << "localhost:12345"
+ << "tags"
+ << BSON("NYC"
+ << "NY")))
+ << "protocolVersion"
+ << 1
+ << "settings"
+ << BSON("getLastErrorDefaults"
+ << BSON("w"
+ << "majority")
+ << "getLastErrorModes"
+ << BSON("eastCoast" << BSON("NYC" << 1))
+ << "chainingAllowed"
+ << false
+ << "heartbeatIntervalMillis"
+ << 5000
+ << "heartbeatTimeoutSecs"
+ << 120
+ << "electionTimeoutMillis"
+ << 10))));
ASSERT_OK(config.validate());
ASSERT_EQUALS("rs0", config.getReplSetName());
ASSERT_EQUALS(1234, config.getConfigVersion());
@@ -123,44 +137,57 @@ TEST(ReplicaSetConfig, ParseLargeConfigAndCheckAccessors) {
TEST(ReplicaSetConfig, GetConnectionStringFiltersHiddenNodes) {
ReplicaSetConfig config;
- ASSERT_OK(
- config.initialize(BSON("_id"
- << "rs0"
- << "version" << 1 << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "localhost:11111")
- << BSON("_id" << 1 << "host"
- << "localhost:22222"
- << "arbiterOnly" << true)
- << BSON("_id" << 2 << "host"
- << "localhost:33333"
- << "hidden" << true << "priority" << 0)
- << BSON("_id" << 3 << "host"
- << "localhost:44444")))));
+ ASSERT_OK(config.initialize(BSON("_id"
+ << "rs0"
+ << "version"
+ << 1
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost:11111")
+ << BSON("_id" << 1 << "host"
+ << "localhost:22222"
+ << "arbiterOnly"
+ << true)
+ << BSON("_id" << 2 << "host"
+ << "localhost:33333"
+ << "hidden"
+ << true
+ << "priority"
+ << 0)
+ << BSON("_id" << 3 << "host"
+ << "localhost:44444")))));
ASSERT_OK(config.validate());
- ASSERT_EQUALS(
- ConnectionString::forReplicaSet(
- "rs0", {HostAndPort{"localhost:11111"}, HostAndPort{"localhost:44444"}}).toString(),
- config.getConnectionString().toString());
+ ASSERT_EQUALS(ConnectionString::forReplicaSet(
+ "rs0", {HostAndPort{"localhost:11111"}, HostAndPort{"localhost:44444"}})
+ .toString(),
+ config.getConnectionString().toString());
}
TEST(ReplicaSetConfig, MajorityCalculationThreeVotersNoArbiters) {
ReplicaSetConfig config;
- ASSERT_OK(
- config.initialize(BSON("_id"
- << "rs0"
- << "version" << 2 << "members"
- << BSON_ARRAY(BSON("_id" << 1 << "host"
- << "h1:1")
- << BSON("_id" << 2 << "host"
- << "h2:1") << BSON("_id" << 3 << "host"
- << "h3:1")
- << BSON("_id" << 4 << "host"
- << "h4:1"
- << "votes" << 0 << "priority" << 0)
- << BSON("_id" << 5 << "host"
- << "h5:1"
- << "votes" << 0 << "priority" << 0)))));
+ ASSERT_OK(config.initialize(BSON("_id"
+ << "rs0"
+ << "version"
+ << 2
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h1:1")
+ << BSON("_id" << 2 << "host"
+ << "h2:1")
+ << BSON("_id" << 3 << "host"
+ << "h3:1")
+ << BSON("_id" << 4 << "host"
+ << "h4:1"
+ << "votes"
+ << 0
+ << "priority"
+ << 0)
+ << BSON("_id" << 5 << "host"
+ << "h5:1"
+ << "votes"
+ << 0
+ << "priority"
+ << 0)))));
ASSERT_OK(config.validate());
ASSERT_EQUALS(2, config.getWriteMajority());
@@ -168,24 +195,35 @@ TEST(ReplicaSetConfig, MajorityCalculationThreeVotersNoArbiters) {
TEST(ReplicaSetConfig, MajorityCalculationNearlyHalfArbiters) {
ReplicaSetConfig config;
- ASSERT_OK(
- config.initialize(BSON("_id"
- << "mySet"
- << "version" << 2 << "members"
- << BSON_ARRAY(BSON("host"
- << "node1:12345"
- << "_id" << 0)
- << BSON("host"
- << "node2:12345"
- << "_id" << 1) << BSON("host"
- << "node3:12345"
- << "_id" << 2)
- << BSON("host"
- << "node4:12345"
- << "_id" << 3 << "arbiterOnly" << true)
- << BSON("host"
- << "node5:12345"
- << "_id" << 4 << "arbiterOnly" << true)))));
+ ASSERT_OK(config.initialize(BSON("_id"
+ << "mySet"
+ << "version"
+ << 2
+ << "members"
+ << BSON_ARRAY(BSON("host"
+ << "node1:12345"
+ << "_id"
+ << 0)
+ << BSON("host"
+ << "node2:12345"
+ << "_id"
+ << 1)
+ << BSON("host"
+ << "node3:12345"
+ << "_id"
+ << 2)
+ << BSON("host"
+ << "node4:12345"
+ << "_id"
+ << 3
+ << "arbiterOnly"
+ << true)
+ << BSON("host"
+ << "node5:12345"
+ << "_id"
+ << 4
+ << "arbiterOnly"
+ << true)))));
ASSERT_OK(config.validate());
ASSERT_EQUALS(3, config.getWriteMajority());
}
@@ -194,43 +232,64 @@ TEST(ReplicaSetConfig, MajorityCalculationEvenNumberOfMembers) {
ReplicaSetConfig config;
ASSERT_OK(config.initialize(BSON("_id"
<< "mySet"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id" << 0)
+ << "_id"
+ << 0)
<< BSON("host"
<< "node2:12345"
- << "_id" << 1) << BSON("host"
- << "node3:12345"
- << "_id" << 2)
+ << "_id"
+ << 1)
+ << BSON("host"
+ << "node3:12345"
+ << "_id"
+ << 2)
<< BSON("host"
<< "node4:12345"
- << "_id" << 3)))));
+ << "_id"
+ << 3)))));
ASSERT_OK(config.validate());
ASSERT_EQUALS(3, config.getWriteMajority());
}
TEST(ReplicaSetConfig, MajorityCalculationNearlyHalfSecondariesNoVotes) {
ReplicaSetConfig config;
- ASSERT_OK(
- config.initialize(BSON("_id"
- << "mySet"
- << "version" << 2 << "members"
- << BSON_ARRAY(
- BSON("host"
- << "node1:12345"
- << "_id" << 0)
- << BSON("host"
- << "node2:12345"
- << "_id" << 1 << "votes" << 0 << "priority" << 0)
- << BSON("host"
- << "node3:12345"
- << "_id" << 2 << "votes" << 0 << "priority" << 0)
- << BSON("host"
- << "node4:12345"
- << "_id" << 3) << BSON("host"
- << "node5:12345"
- << "_id" << 4)))));
+ ASSERT_OK(config.initialize(BSON("_id"
+ << "mySet"
+ << "version"
+ << 2
+ << "members"
+ << BSON_ARRAY(BSON("host"
+ << "node1:12345"
+ << "_id"
+ << 0)
+ << BSON("host"
+ << "node2:12345"
+ << "_id"
+ << 1
+ << "votes"
+ << 0
+ << "priority"
+ << 0)
+ << BSON("host"
+ << "node3:12345"
+ << "_id"
+ << 2
+ << "votes"
+ << 0
+ << "priority"
+ << 0)
+ << BSON("host"
+ << "node4:12345"
+ << "_id"
+ << 3)
+ << BSON("host"
+ << "node5:12345"
+ << "_id"
+ << 4)))));
ASSERT_OK(config.validate());
ASSERT_EQUALS(2, config.getWriteMajority());
}
@@ -253,7 +312,9 @@ TEST(ReplicaSetConfig, ParseFailsWithBadOrMissingIdField) {
// Empty repl set name parses, but does not validate.
ASSERT_OK(config.initialize(BSON("_id"
<< ""
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345")))));
@@ -263,40 +324,44 @@ TEST(ReplicaSetConfig, ParseFailsWithBadOrMissingIdField) {
TEST(ReplicaSetConfig, ParseFailsWithBadOrMissingVersionField) {
ReplicaSetConfig config;
// Config version field must be present.
- ASSERT_EQUALS(
- ErrorCodes::NoSuchKey,
- config.initialize(BSON("_id"
- << "rs0"
- << "members" << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "localhost:12345")))));
- ASSERT_EQUALS(
- ErrorCodes::TypeMismatch,
- config.initialize(BSON("_id"
- << "rs0"
- << "version"
- << "1"
- << "members" << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "localhost:12345")))));
+ ASSERT_EQUALS(ErrorCodes::NoSuchKey,
+ config.initialize(BSON("_id"
+ << "rs0"
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost:12345")))));
+ ASSERT_EQUALS(ErrorCodes::TypeMismatch,
+ config.initialize(BSON("_id"
+ << "rs0"
+ << "version"
+ << "1"
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost:12345")))));
ASSERT_OK(config.initialize(BSON("_id"
<< "rs0"
- << "version" << 1.0 << "members"
+ << "version"
+ << 1.0
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345")))));
ASSERT_OK(config.validate());
ASSERT_OK(config.initialize(BSON("_id"
<< "rs0"
- << "version" << 0.0 << "members"
+ << "version"
+ << 0.0
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345")))));
ASSERT_EQUALS(ErrorCodes::BadValue, config.validate());
- ASSERT_OK(
- config.initialize(BSON("_id"
- << "rs0"
- << "version"
- << static_cast<long long>(std::numeric_limits<int>::max()) + 1
- << "members" << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "localhost:12345")))));
+ ASSERT_OK(config.initialize(BSON("_id"
+ << "rs0"
+ << "version"
+ << static_cast<long long>(std::numeric_limits<int>::max()) + 1
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost:12345")))));
ASSERT_EQUALS(ErrorCodes::BadValue, config.validate());
}
@@ -305,13 +370,17 @@ TEST(ReplicaSetConfig, ParseFailsWithBadMembers) {
ASSERT_EQUALS(ErrorCodes::TypeMismatch,
config.initialize(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345")
<< "localhost:23456"))));
ASSERT_NOT_OK(config.initialize(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("host"
<< "localhost:12345")))));
}
@@ -320,7 +389,9 @@ TEST(ReplicaSetConfig, ParseFailsWithLocalNonLocalHostMix) {
ReplicaSetConfig config;
ASSERT_OK(config.initialize(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost")
<< BSON("_id" << 1 << "host"
@@ -332,10 +403,13 @@ TEST(ReplicaSetConfig, ParseFailsWithNoElectableNodes) {
ReplicaSetConfig config;
const BSONObj configBsonNoElectableNodes = BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:1"
- << "priority" << 0)
+ << "priority"
+ << 0)
<< BSON("_id" << 1 << "host"
<< "localhost:2"
<< "priority"
@@ -344,38 +418,51 @@ TEST(ReplicaSetConfig, ParseFailsWithNoElectableNodes) {
ASSERT_OK(config.initialize(configBsonNoElectableNodes));
ASSERT_EQUALS(ErrorCodes::BadValue, config.validate());
- const BSONObj configBsonNoElectableNodesOneArbiter =
- BSON("_id"
- << "rs0"
- << "version" << 1 << "members" << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "localhost:1"
- << "arbiterOnly" << 1)
- << BSON("_id" << 1 << "host"
- << "localhost:2"
- << "priority" << 0)));
+ const BSONObj configBsonNoElectableNodesOneArbiter = BSON("_id"
+ << "rs0"
+ << "version"
+ << 1
+ << "members"
+ << BSON_ARRAY(
+ BSON("_id" << 0 << "host"
+ << "localhost:1"
+ << "arbiterOnly"
+ << 1)
+ << BSON("_id" << 1 << "host"
+ << "localhost:2"
+ << "priority"
+ << 0)));
ASSERT_OK(config.initialize(configBsonNoElectableNodesOneArbiter));
ASSERT_EQUALS(ErrorCodes::BadValue, config.validate());
- const BSONObj configBsonNoElectableNodesTwoArbiters =
- BSON("_id"
- << "rs0"
- << "version" << 1 << "members" << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "localhost:1"
- << "arbiterOnly" << 1)
- << BSON("_id" << 1 << "host"
- << "localhost:2"
- << "arbiterOnly" << 1)));
+ const BSONObj configBsonNoElectableNodesTwoArbiters = BSON("_id"
+ << "rs0"
+ << "version"
+ << 1
+ << "members"
+ << BSON_ARRAY(
+ BSON("_id" << 0 << "host"
+ << "localhost:1"
+ << "arbiterOnly"
+ << 1)
+ << BSON("_id" << 1 << "host"
+ << "localhost:2"
+ << "arbiterOnly"
+ << 1)));
ASSERT_OK(config.initialize(configBsonNoElectableNodesOneArbiter));
ASSERT_EQUALS(ErrorCodes::BadValue, config.validate());
const BSONObj configBsonOneElectableNode = BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:1"
- << "priority" << 0)
+ << "priority"
+ << 0)
<< BSON("_id" << 1 << "host"
<< "localhost:2"
<< "priority"
@@ -386,30 +473,42 @@ TEST(ReplicaSetConfig, ParseFailsWithNoElectableNodes) {
TEST(ReplicaSetConfig, ParseFailsWithTooFewVoters) {
ReplicaSetConfig config;
- const BSONObj configBsonNoVoters =
- BSON("_id"
- << "rs0"
- << "version" << 1 << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "localhost:1"
- << "votes" << 0 << "priority" << 0)
- << BSON("_id" << 1 << "host"
- << "localhost:2"
- << "votes" << 0 << "priority" << 0)));
+ const BSONObj configBsonNoVoters = BSON("_id"
+ << "rs0"
+ << "version"
+ << 1
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost:1"
+ << "votes"
+ << 0
+ << "priority"
+ << 0)
+ << BSON("_id" << 1 << "host"
+ << "localhost:2"
+ << "votes"
+ << 0
+ << "priority"
+ << 0)));
ASSERT_OK(config.initialize(configBsonNoVoters));
ASSERT_EQUALS(ErrorCodes::BadValue, config.validate());
const BSONObj configBsonOneVoter = BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:1"
- << "votes" << 0 << "priority"
+ << "votes"
+ << 0
+ << "priority"
<< 0)
<< BSON("_id" << 1 << "host"
<< "localhost:2"
- << "votes" << 1)));
+ << "votes"
+ << 1)));
ASSERT_OK(config.initialize(configBsonOneVoter));
ASSERT_OK(config.validate());
}
@@ -426,7 +525,9 @@ TEST(ReplicaSetConfig, ParseFailsWithDuplicateHost) {
ReplicaSetConfig config;
const BSONObj configBson = BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:1")
<< BSON("_id" << 1 << "host"
@@ -477,7 +578,9 @@ TEST(ReplicaSetConfig, ParseFailsWithUnexpectedField) {
ReplicaSetConfig config;
Status status = config.initialize(BSON("_id"
<< "rs0"
- << "version" << 1 << "unexpectedfield"
+ << "version"
+ << 1
+ << "unexpectedfield"
<< "value"));
ASSERT_EQUALS(ErrorCodes::BadValue, status);
}
@@ -486,7 +589,9 @@ TEST(ReplicaSetConfig, ParseFailsWithNonArrayMembersField) {
ReplicaSetConfig config;
Status status = config.initialize(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< "value"));
ASSERT_EQUALS(ErrorCodes::TypeMismatch, status);
}
@@ -495,11 +600,14 @@ TEST(ReplicaSetConfig, ParseFailsWithNonNumericHeartbeatIntervalMillisField) {
ReplicaSetConfig config;
Status status = config.initialize(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"))
- << "settings" << BSON("heartbeatIntervalMillis"
- << "no")));
+ << "settings"
+ << BSON("heartbeatIntervalMillis"
+ << "no")));
ASSERT_EQUALS(ErrorCodes::TypeMismatch, status);
ASSERT_FALSE(config.isInitialized());
@@ -512,11 +620,14 @@ TEST(ReplicaSetConfig, ParseFailsWithNonNumericElectionTimeoutMillisField) {
ReplicaSetConfig config;
Status status = config.initialize(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"))
- << "settings" << BSON("electionTimeoutMillis"
- << "no")));
+ << "settings"
+ << BSON("electionTimeoutMillis"
+ << "no")));
ASSERT_EQUALS(ErrorCodes::TypeMismatch, status);
}
@@ -524,11 +635,14 @@ TEST(ReplicaSetConfig, ParseFailsWithNonNumericHeartbeatTimeoutSecsField) {
ReplicaSetConfig config;
Status status = config.initialize(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"))
- << "settings" << BSON("heartbeatTimeoutSecs"
- << "no")));
+ << "settings"
+ << BSON("heartbeatTimeoutSecs"
+ << "no")));
ASSERT_EQUALS(ErrorCodes::TypeMismatch, status);
}
@@ -536,48 +650,57 @@ TEST(ReplicaSetConfig, ParseFailsWithNonBoolChainingAllowedField) {
ReplicaSetConfig config;
Status status = config.initialize(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"))
- << "settings" << BSON("chainingAllowed"
- << "no")));
+ << "settings"
+ << BSON("chainingAllowed"
+ << "no")));
ASSERT_EQUALS(ErrorCodes::TypeMismatch, status);
}
TEST(ReplicaSetConfig, ParseFailsWithNonBoolConfigServerField) {
ReplicaSetConfig config;
- Status status =
- config.initialize(BSON("_id"
- << "rs0"
- << "version" << 1 << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "localhost:12345")) << "configsvr"
- << "no"));
+ Status status = config.initialize(BSON("_id"
+ << "rs0"
+ << "version"
+ << 1
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost:12345"))
+ << "configsvr"
+ << "no"));
ASSERT_EQUALS(ErrorCodes::TypeMismatch, status);
}
TEST(ReplicaSetConfig, ParseFailsWithNonObjectSettingsField) {
ReplicaSetConfig config;
- Status status =
- config.initialize(BSON("_id"
- << "rs0"
- << "version" << 1 << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "localhost:12345")) << "settings"
- << "none"));
+ Status status = config.initialize(BSON("_id"
+ << "rs0"
+ << "version"
+ << 1
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost:12345"))
+ << "settings"
+ << "none"));
ASSERT_EQUALS(ErrorCodes::TypeMismatch, status);
}
TEST(ReplicaSetConfig, ParseFailsWithGetLastErrorDefaultsFieldUnparseable) {
ReplicaSetConfig config;
- Status status =
- config.initialize(BSON("_id"
- << "rs0"
- << "version" << 1 << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "localhost:12345")) << "settings"
- << BSON("getLastErrorDefaults" << BSON("fsync"
- << "seven"))));
+ Status status = config.initialize(BSON("_id"
+ << "rs0"
+ << "version"
+ << 1
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost:12345"))
+ << "settings"
+ << BSON("getLastErrorDefaults" << BSON("fsync"
+ << "seven"))));
ASSERT_EQUALS(ErrorCodes::FailedToParse, status);
}
@@ -585,11 +708,14 @@ TEST(ReplicaSetConfig, ParseFailsWithNonObjectGetLastErrorDefaultsField) {
ReplicaSetConfig config;
Status status = config.initialize(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"))
- << "settings" << BSON("getLastErrorDefaults"
- << "no")));
+ << "settings"
+ << BSON("getLastErrorDefaults"
+ << "no")));
ASSERT_EQUALS(ErrorCodes::TypeMismatch, status);
}
@@ -597,41 +723,50 @@ TEST(ReplicaSetConfig, ParseFailsWithNonObjectGetLastErrorModesField) {
ReplicaSetConfig config;
Status status = config.initialize(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"))
- << "settings" << BSON("getLastErrorModes"
- << "no")));
+ << "settings"
+ << BSON("getLastErrorModes"
+ << "no")));
ASSERT_EQUALS(ErrorCodes::TypeMismatch, status);
}
TEST(ReplicaSetConfig, ParseFailsWithDuplicateGetLastErrorModesField) {
ReplicaSetConfig config;
- Status status =
- config.initialize(BSON("_id"
- << "rs0"
- << "version" << 1 << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "localhost:12345"
- << "tags" << BSON("tag"
- << "yes"))) << "settings"
- << BSON("getLastErrorModes"
- << BSON("one" << BSON("tag" << 1) << "one"
- << BSON("tag" << 1)))));
+ Status status = config.initialize(BSON("_id"
+ << "rs0"
+ << "version"
+ << 1
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost:12345"
+ << "tags"
+ << BSON("tag"
+ << "yes")))
+ << "settings"
+ << BSON("getLastErrorModes"
+ << BSON("one" << BSON("tag" << 1) << "one"
+ << BSON("tag" << 1)))));
ASSERT_EQUALS(ErrorCodes::DuplicateKey, status);
}
TEST(ReplicaSetConfig, ParseFailsWithNonObjectGetLastErrorModesEntryField) {
ReplicaSetConfig config;
- Status status =
- config.initialize(BSON("_id"
- << "rs0"
- << "version" << 1 << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "localhost:12345"
- << "tags" << BSON("tag"
- << "yes"))) << "settings"
- << BSON("getLastErrorModes" << BSON("one" << 1))));
+ Status status = config.initialize(BSON("_id"
+ << "rs0"
+ << "version"
+ << 1
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost:12345"
+ << "tags"
+ << BSON("tag"
+ << "yes")))
+ << "settings"
+ << BSON("getLastErrorModes" << BSON("one" << 1))));
ASSERT_EQUALS(ErrorCodes::TypeMismatch, status);
}
@@ -640,11 +775,15 @@ TEST(ReplicaSetConfig, ParseFailsWithNonNumericGetLastErrorModesConstraintValue)
Status status =
config.initialize(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"
- << "tags" << BSON("tag"
- << "yes"))) << "settings"
+ << "tags"
+ << BSON("tag"
+ << "yes")))
+ << "settings"
<< BSON("getLastErrorModes" << BSON("one" << BSON("tag"
<< "no")))));
ASSERT_EQUALS(ErrorCodes::TypeMismatch, status);
@@ -655,11 +794,15 @@ TEST(ReplicaSetConfig, ParseFailsWithNegativeGetLastErrorModesConstraintValue) {
Status status =
config.initialize(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"
- << "tags" << BSON("tag"
- << "yes"))) << "settings"
+ << "tags"
+ << BSON("tag"
+ << "yes")))
+ << "settings"
<< BSON("getLastErrorModes" << BSON("one" << BSON("tag" << -1)))));
ASSERT_EQUALS(ErrorCodes::BadValue, status);
}
@@ -669,11 +812,15 @@ TEST(ReplicaSetConfig, ParseFailsWithNonExistentGetLastErrorModesConstraintTag)
Status status =
config.initialize(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"
- << "tags" << BSON("tag"
- << "yes"))) << "settings"
+ << "tags"
+ << BSON("tag"
+ << "yes")))
+ << "settings"
<< BSON("getLastErrorModes" << BSON("one" << BSON("tag2" << 1)))));
ASSERT_EQUALS(ErrorCodes::NoSuchKey, status);
}
@@ -682,7 +829,11 @@ TEST(ReplicaSetConfig, ValidateFailsWithBadProtocolVersion) {
ReplicaSetConfig config;
Status status = config.initialize(BSON("_id"
<< "rs0"
- << "protocolVersion" << 3 << "version" << 1 << "members"
+ << "protocolVersion"
+ << 3
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345")
<< BSON("_id" << 1 << "host"
@@ -697,7 +848,9 @@ TEST(ReplicaSetConfig, ValidateFailsWithDuplicateMemberId) {
ReplicaSetConfig config;
Status status = config.initialize(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345")
<< BSON("_id" << 0 << "host"
@@ -712,10 +865,13 @@ TEST(ReplicaSetConfig, ValidateFailsWithInvalidMember) {
ReplicaSetConfig config;
Status status = config.initialize(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"
- << "hidden" << true))));
+ << "hidden"
+ << true))));
ASSERT_OK(status);
status = config.validate();
@@ -726,18 +882,24 @@ TEST(ReplicaSetConfig, ChainingAllowedField) {
ReplicaSetConfig config;
ASSERT_OK(config.initialize(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
- << "localhost:12345")) << "settings"
+ << "localhost:12345"))
+ << "settings"
<< BSON("chainingAllowed" << true))));
ASSERT_OK(config.validate());
ASSERT_TRUE(config.isChainingAllowed());
ASSERT_OK(config.initialize(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
- << "localhost:12345")) << "settings"
+ << "localhost:12345"))
+ << "settings"
<< BSON("chainingAllowed" << false))));
ASSERT_OK(config.validate());
ASSERT_FALSE(config.isChainingAllowed());
@@ -745,18 +907,27 @@ TEST(ReplicaSetConfig, ChainingAllowedField) {
TEST(ReplicaSetConfig, ConfigServerField) {
ReplicaSetConfig config;
- ASSERT_OK(
- config.initialize(BSON("_id"
- << "rs0"
- << "protocolVersion" << 1 << "version" << 1 << "configsvr" << true
- << "members" << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "localhost:12345")))));
+ ASSERT_OK(config.initialize(BSON("_id"
+ << "rs0"
+ << "protocolVersion"
+ << 1
+ << "version"
+ << 1
+ << "configsvr"
+ << true
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost:12345")))));
ASSERT_TRUE(config.isConfigServer());
ReplicaSetConfig config2;
ASSERT_OK(config2.initialize(BSON("_id"
<< "rs0"
- << "version" << 1 << "configsvr" << false << "members"
+ << "version"
+ << 1
+ << "configsvr"
+ << false
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345")))));
ASSERT_FALSE(config2.isConfigServer());
@@ -779,18 +950,25 @@ TEST(ReplicaSetConfig, ConfigServerFieldDefaults) {
ReplicaSetConfig config;
ASSERT_OK(config.initialize(BSON("_id"
<< "rs0"
- << "protocolVersion" << 1 << "version" << 1 << "members"
+ << "protocolVersion"
+ << 1
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345")))));
ASSERT_FALSE(config.isConfigServer());
ReplicaSetConfig config2;
- ASSERT_OK(
- config2.initializeForInitiate(BSON("_id"
- << "rs0"
- << "protocolVersion" << 1 << "version" << 1 << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "localhost:12345")))));
+ ASSERT_OK(config2.initializeForInitiate(BSON("_id"
+ << "rs0"
+ << "protocolVersion"
+ << 1
+ << "version"
+ << 1
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost:12345")))));
ASSERT_FALSE(config2.isConfigServer());
serverGlobalParams.clusterRole = ClusterRole::ConfigServer;
@@ -799,18 +977,25 @@ TEST(ReplicaSetConfig, ConfigServerFieldDefaults) {
ReplicaSetConfig config3;
ASSERT_OK(config3.initialize(BSON("_id"
<< "rs0"
- << "protocolVersion" << 1 << "version" << 1 << "members"
+ << "protocolVersion"
+ << 1
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345")))));
ASSERT_FALSE(config3.isConfigServer());
ReplicaSetConfig config4;
- ASSERT_OK(
- config4.initializeForInitiate(BSON("_id"
- << "rs0"
- << "protocolVersion" << 1 << "version" << 1 << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "localhost:12345")))));
+ ASSERT_OK(config4.initializeForInitiate(BSON("_id"
+ << "rs0"
+ << "protocolVersion"
+ << 1
+ << "version"
+ << 1
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost:12345")))));
ASSERT_TRUE(config4.isConfigServer());
}
@@ -818,18 +1003,24 @@ TEST(ReplicaSetConfig, HeartbeatIntervalField) {
ReplicaSetConfig config;
ASSERT_OK(config.initialize(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
- << "localhost:12345")) << "settings"
+ << "localhost:12345"))
+ << "settings"
<< BSON("heartbeatIntervalMillis" << 5000))));
ASSERT_OK(config.validate());
ASSERT_EQUALS(Seconds(5), config.getHeartbeatInterval());
ASSERT_OK(config.initialize(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
- << "localhost:12345")) << "settings"
+ << "localhost:12345"))
+ << "settings"
<< BSON("heartbeatIntervalMillis" << -5000))));
ASSERT_EQUALS(ErrorCodes::BadValue, config.validate());
}
@@ -838,19 +1029,25 @@ TEST(ReplicaSetConfig, ElectionTimeoutField) {
ReplicaSetConfig config;
ASSERT_OK(config.initialize(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
- << "localhost:12345")) << "settings"
+ << "localhost:12345"))
+ << "settings"
<< BSON("electionTimeoutMillis" << 20))));
ASSERT_OK(config.validate());
ASSERT_EQUALS(Milliseconds(20), config.getElectionTimeoutPeriod());
auto status = config.initialize(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"))
- << "settings" << BSON("electionTimeoutMillis" << -20)));
+ << "settings"
+ << BSON("electionTimeoutMillis" << -20)));
ASSERT_EQUALS(ErrorCodes::BadValue, status);
ASSERT_STRING_CONTAINS(status.reason(), "election timeout must be greater than 0");
}
@@ -859,19 +1056,25 @@ TEST(ReplicaSetConfig, HeartbeatTimeoutField) {
ReplicaSetConfig config;
ASSERT_OK(config.initialize(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
- << "localhost:12345")) << "settings"
+ << "localhost:12345"))
+ << "settings"
<< BSON("heartbeatTimeoutSecs" << 20))));
ASSERT_OK(config.validate());
ASSERT_EQUALS(Seconds(20), config.getHeartbeatTimeoutPeriod());
auto status = config.initialize(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"))
- << "settings" << BSON("heartbeatTimeoutSecs" << -20)));
+ << "settings"
+ << BSON("heartbeatTimeoutSecs" << -20)));
ASSERT_EQUALS(ErrorCodes::BadValue, status);
ASSERT_STRING_CONTAINS(status.reason(), "heartbeat timeout must be greater than 0");
}
@@ -880,9 +1083,12 @@ TEST(ReplicaSetConfig, GleDefaultField) {
ReplicaSetConfig config;
ASSERT_OK(config.initialize(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
- << "localhost:12345")) << "settings"
+ << "localhost:12345"))
+ << "settings"
<< BSON("getLastErrorDefaults" << BSON("w"
<< "majority")))));
ASSERT_OK(config.validate());
@@ -890,32 +1096,43 @@ TEST(ReplicaSetConfig, GleDefaultField) {
ASSERT_OK(config.initialize(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
- << "localhost:12345")) << "settings"
+ << "localhost:12345"))
+ << "settings"
<< BSON("getLastErrorDefaults" << BSON("w"
<< "frim")))));
ASSERT_EQUALS(ErrorCodes::BadValue, config.validate());
ASSERT_OK(config.initialize(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
- << "localhost:12345")) << "settings"
+ << "localhost:12345"))
+ << "settings"
<< BSON("getLastErrorDefaults" << BSON("w" << 0)))));
ASSERT_EQUALS(ErrorCodes::BadValue, config.validate());
- ASSERT_OK(config.initialize(BSON("_id"
- << "rs0"
- << "version" << 1 << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "localhost:12345"
- << "tags" << BSON("a"
- << "v")))
- << "settings" << BSON("getLastErrorDefaults"
- << BSON("w"
- << "frim") << "getLastErrorModes"
- << BSON("frim" << BSON("a" << 1))))));
+ ASSERT_OK(
+ config.initialize(BSON("_id"
+ << "rs0"
+ << "version"
+ << 1
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost:12345"
+ << "tags"
+ << BSON("a"
+ << "v")))
+ << "settings"
+ << BSON("getLastErrorDefaults" << BSON("w"
+ << "frim")
+ << "getLastErrorModes"
+ << BSON("frim" << BSON("a" << 1))))));
ASSERT_OK(config.validate());
ASSERT_EQUALS("frim", config.getDefaultWriteConcern().wMode);
ASSERT_OK(config.findCustomWriteMode("frim").getStatus());
@@ -992,14 +1209,17 @@ bool operator==(const ReplicaSetConfig& a, const ReplicaSetConfig& b) {
TEST(ReplicaSetConfig, toBSONRoundTripAbility) {
ReplicaSetConfig configA;
ReplicaSetConfig configB;
- ASSERT_OK(configA.initialize(BSON("_id"
- << "rs0"
- << "version" << 1 << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "localhost:12345")) << "settings"
- << BSON("heartbeatIntervalMillis"
- << 5000 << "heartbeatTimeoutSecs" << 20
- << "replicaSetId" << OID::gen()))));
+ ASSERT_OK(configA.initialize(BSON(
+ "_id"
+ << "rs0"
+ << "version"
+ << 1
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost:12345"))
+ << "settings"
+ << BSON("heartbeatIntervalMillis" << 5000 << "heartbeatTimeoutSecs" << 20 << "replicaSetId"
+ << OID::gen()))));
ASSERT_OK(configB.initialize(configA.toBSON()));
ASSERT_TRUE(configA == configB);
}
@@ -1007,35 +1227,66 @@ TEST(ReplicaSetConfig, toBSONRoundTripAbility) {
TEST(ReplicaSetConfig, toBSONRoundTripAbilityLarge) {
ReplicaSetConfig configA;
ReplicaSetConfig configB;
- ASSERT_OK(configA.initialize(BSON(
- "_id"
- << "asdf"
- << "version" << 9 << "writeConcernMajorityJournalDefault" << true << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "localhost:12345"
- << "arbiterOnly" << true << "votes" << 1)
- << BSON("_id" << 3 << "host"
- << "localhost:3828"
- << "arbiterOnly" << false << "hidden" << true << "buildIndexes"
- << false << "priority" << 0 << "slaveDelay" << 17 << "votes"
- << 0 << "tags" << BSON("coast"
- << "east"
- << "ssd"
- << "true"))
- << BSON("_id" << 2 << "host"
- << "foo.com:3828"
- << "votes" << 0 << "priority" << 0 << "tags"
- << BSON("coast"
- << "west"
- << "hdd"
- << "true"))) << "protocolVersion" << 0 << "settings"
-
- << BSON("heartbeatIntervalMillis"
- << 5000 << "heartbeatTimeoutSecs" << 20 << "electionTimeoutMillis" << 4
- << "chainingAllowd" << true << "getLastErrorDefaults" << BSON("w"
- << "majority")
- << "getLastErrorModes" << BSON("disks" << BSON("ssd" << 1 << "hdd" << 1) << "coasts"
- << BSON("coast" << 2))))));
+ ASSERT_OK(configA.initialize(
+ BSON("_id"
+ << "asdf"
+ << "version"
+ << 9
+ << "writeConcernMajorityJournalDefault"
+ << true
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost:12345"
+ << "arbiterOnly"
+ << true
+ << "votes"
+ << 1)
+ << BSON("_id" << 3 << "host"
+ << "localhost:3828"
+ << "arbiterOnly"
+ << false
+ << "hidden"
+ << true
+ << "buildIndexes"
+ << false
+ << "priority"
+ << 0
+ << "slaveDelay"
+ << 17
+ << "votes"
+ << 0
+ << "tags"
+ << BSON("coast"
+ << "east"
+ << "ssd"
+ << "true"))
+ << BSON("_id" << 2 << "host"
+ << "foo.com:3828"
+ << "votes"
+ << 0
+ << "priority"
+ << 0
+ << "tags"
+ << BSON("coast"
+ << "west"
+ << "hdd"
+ << "true")))
+ << "protocolVersion"
+ << 0
+ << "settings"
+
+ << BSON("heartbeatIntervalMillis" << 5000 << "heartbeatTimeoutSecs" << 20
+ << "electionTimeoutMillis"
+ << 4
+ << "chainingAllowd"
+ << true
+ << "getLastErrorDefaults"
+ << BSON("w"
+ << "majority")
+ << "getLastErrorModes"
+ << BSON("disks" << BSON("ssd" << 1 << "hdd" << 1)
+ << "coasts"
+ << BSON("coast" << 2))))));
BSONObj configObjA = configA.toBSON();
// Ensure a protocolVersion does not show up if it is 0 to maintain cross version compatibility.
ASSERT_FALSE(configObjA.hasField("protocolVersion"));
@@ -1046,22 +1297,39 @@ TEST(ReplicaSetConfig, toBSONRoundTripAbilityLarge) {
TEST(ReplicaSetConfig, toBSONRoundTripAbilityInvalid) {
ReplicaSetConfig configA;
ReplicaSetConfig configB;
- ASSERT_OK(configA.initialize(
- BSON("_id"
- << ""
- << "version" << -3 << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "localhost:12345"
- << "arbiterOnly" << true << "votes" << 0 << "priority" << 0)
- << BSON("_id" << 0 << "host"
- << "localhost:3828"
- << "arbiterOnly" << false << "buildIndexes" << false
- << "priority" << 2)
- << BSON("_id" << 2 << "host"
- << "localhost:3828"
- << "votes" << 0 << "priority" << 0)) << "settings"
- << BSON("heartbeatIntervalMillis" << -5000 << "heartbeatTimeoutSecs" << 20
- << "electionTimeoutMillis" << 2))));
+ ASSERT_OK(
+ configA.initialize(BSON("_id"
+ << ""
+ << "version"
+ << -3
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost:12345"
+ << "arbiterOnly"
+ << true
+ << "votes"
+ << 0
+ << "priority"
+ << 0)
+ << BSON("_id" << 0 << "host"
+ << "localhost:3828"
+ << "arbiterOnly"
+ << false
+ << "buildIndexes"
+ << false
+ << "priority"
+ << 2)
+ << BSON("_id" << 2 << "host"
+ << "localhost:3828"
+ << "votes"
+ << 0
+ << "priority"
+ << 0))
+ << "settings"
+ << BSON("heartbeatIntervalMillis" << -5000 << "heartbeatTimeoutSecs"
+ << 20
+ << "electionTimeoutMillis"
+ << 2))));
ASSERT_OK(configB.initialize(configA.toBSON()));
ASSERT_NOT_OK(configA.validate());
ASSERT_NOT_OK(configB.validate());
@@ -1070,46 +1338,57 @@ TEST(ReplicaSetConfig, toBSONRoundTripAbilityInvalid) {
TEST(ReplicaSetConfig, CheckIfWriteConcernCanBeSatisfied) {
ReplicaSetConfig configA;
- ASSERT_OK(configA.initialize(BSON(
- "_id"
- << "rs0"
- << "version" << 1 << "members" << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "node0"
- << "tags" << BSON("dc"
- << "NA"
- << "rack"
- << "rackNA1"))
- << BSON("_id" << 1 << "host"
- << "node1"
- << "tags" << BSON("dc"
- << "NA"
- << "rack"
- << "rackNA2"))
- << BSON("_id" << 2 << "host"
- << "node2"
- << "tags" << BSON("dc"
- << "NA"
- << "rack"
- << "rackNA3"))
- << BSON("_id" << 3 << "host"
- << "node3"
- << "tags" << BSON("dc"
- << "EU"
- << "rack"
- << "rackEU1"))
- << BSON("_id" << 4 << "host"
- << "node4"
- << "tags" << BSON("dc"
- << "EU"
- << "rack"
- << "rackEU2"))
- << BSON("_id" << 5 << "host"
- << "node5"
- << "arbiterOnly" << true))
- << "settings" << BSON("getLastErrorModes"
- << BSON("valid" << BSON("dc" << 2 << "rack" << 3)
- << "invalidNotEnoughValues" << BSON("dc" << 3)
- << "invalidNotEnoughNodes" << BSON("rack" << 6))))));
+ ASSERT_OK(configA.initialize(BSON("_id"
+ << "rs0"
+ << "version"
+ << 1
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "node0"
+ << "tags"
+ << BSON("dc"
+ << "NA"
+ << "rack"
+ << "rackNA1"))
+ << BSON("_id" << 1 << "host"
+ << "node1"
+ << "tags"
+ << BSON("dc"
+ << "NA"
+ << "rack"
+ << "rackNA2"))
+ << BSON("_id" << 2 << "host"
+ << "node2"
+ << "tags"
+ << BSON("dc"
+ << "NA"
+ << "rack"
+ << "rackNA3"))
+ << BSON("_id" << 3 << "host"
+ << "node3"
+ << "tags"
+ << BSON("dc"
+ << "EU"
+ << "rack"
+ << "rackEU1"))
+ << BSON("_id" << 4 << "host"
+ << "node4"
+ << "tags"
+ << BSON("dc"
+ << "EU"
+ << "rack"
+ << "rackEU2"))
+ << BSON("_id" << 5 << "host"
+ << "node5"
+ << "arbiterOnly"
+ << true))
+ << "settings"
+ << BSON("getLastErrorModes"
+ << BSON("valid" << BSON("dc" << 2 << "rack" << 3)
+ << "invalidNotEnoughValues"
+ << BSON("dc" << 3)
+ << "invalidNotEnoughNodes"
+ << BSON("rack" << 6))))));
WriteConcernOptions validNumberWC;
validNumberWC.wNumNodes = 5;
@@ -1170,13 +1449,19 @@ TEST(ReplicaSetConfig, CheckConfigServerCantBeProtocolVersion0) {
ReplicaSetConfig configA;
ASSERT_OK(configA.initialize(BSON("_id"
<< "rs0"
- << "protocolVersion" << 0 << "version" << 1 << "configsvr"
- << true << "members"
+ << "protocolVersion"
+ << 0
+ << "version"
+ << 1
+ << "configsvr"
+ << true
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345")
<< BSON("_id" << 1 << "host"
<< "localhost:54321"
- << "arbiterOnly" << true)))));
+ << "arbiterOnly"
+ << true)))));
Status status = configA.validate();
ASSERT_EQUALS(ErrorCodes::BadValue, status);
ASSERT_STRING_CONTAINS(status.reason(), "cannot run in protocolVersion 0");
@@ -1186,13 +1471,19 @@ TEST(ReplicaSetConfig, CheckConfigServerCantHaveArbiters) {
ReplicaSetConfig configA;
ASSERT_OK(configA.initialize(BSON("_id"
<< "rs0"
- << "protocolVersion" << 1 << "version" << 1 << "configsvr"
- << true << "members"
+ << "protocolVersion"
+ << 1
+ << "version"
+ << 1
+ << "configsvr"
+ << true
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345")
<< BSON("_id" << 1 << "host"
<< "localhost:54321"
- << "arbiterOnly" << true)))));
+ << "arbiterOnly"
+ << true)))));
Status status = configA.validate();
ASSERT_EQUALS(ErrorCodes::BadValue, status);
ASSERT_STRING_CONTAINS(status.reason(), "Arbiters are not allowed");
@@ -1202,14 +1493,21 @@ TEST(ReplicaSetConfig, CheckConfigServerMustBuildIndexes) {
ReplicaSetConfig configA;
ASSERT_OK(configA.initialize(BSON("_id"
<< "rs0"
- << "protocolVersion" << 1 << "version" << 1 << "configsvr"
- << true << "members"
+ << "protocolVersion"
+ << 1
+ << "version"
+ << 1
+ << "configsvr"
+ << true
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345")
<< BSON("_id" << 1 << "host"
<< "localhost:54321"
- << "priority" << 0
- << "buildIndexes" << false)))));
+ << "priority"
+ << 0
+ << "buildIndexes"
+ << false)))));
Status status = configA.validate();
ASSERT_EQUALS(ErrorCodes::BadValue, status);
ASSERT_STRING_CONTAINS(status.reason(), "must build indexes");
@@ -1217,16 +1515,23 @@ TEST(ReplicaSetConfig, CheckConfigServerMustBuildIndexes) {
TEST(ReplicaSetConfig, CheckConfigServerCantHaveSlaveDelay) {
ReplicaSetConfig configA;
- ASSERT_OK(
- configA.initialize(BSON("_id"
- << "rs0"
- << "protocolVersion" << 1 << "version" << 1 << "configsvr" << true
- << "members" << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "localhost:12345")
- << BSON("_id" << 1 << "host"
- << "localhost:54321"
- << "priority" << 0
- << "slaveDelay" << 3)))));
+ ASSERT_OK(configA.initialize(BSON("_id"
+ << "rs0"
+ << "protocolVersion"
+ << 1
+ << "version"
+ << 1
+ << "configsvr"
+ << true
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost:12345")
+ << BSON("_id" << 1 << "host"
+ << "localhost:54321"
+ << "priority"
+ << 0
+ << "slaveDelay"
+ << 3)))));
Status status = configA.validate();
ASSERT_EQUALS(ErrorCodes::BadValue, status);
ASSERT_STRING_CONTAINS(status.reason(), "cannot have a non-zero slaveDelay");
@@ -1236,15 +1541,21 @@ TEST(ReplicaSetConfig, CheckConfigServerMustHaveTrueForWriteConcernMajorityJourn
serverGlobalParams.clusterRole = ClusterRole::ConfigServer;
ON_BLOCK_EXIT([&] { serverGlobalParams.clusterRole = ClusterRole::None; });
ReplicaSetConfig configA;
- ASSERT_OK(
- configA.initialize(BSON("_id"
- << "rs0"
- << "protocolVersion" << 1 << "version" << 1 << "configsvr" << true
- << "members" << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "localhost:12345")
- << BSON("_id" << 1 << "host"
- << "localhost:54321"))
- << "writeConcernMajorityJournalDefault" << false)));
+ ASSERT_OK(configA.initialize(BSON("_id"
+ << "rs0"
+ << "protocolVersion"
+ << 1
+ << "version"
+ << 1
+ << "configsvr"
+ << true
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "localhost:12345")
+ << BSON("_id" << 1 << "host"
+ << "localhost:54321"))
+ << "writeConcernMajorityJournalDefault"
+ << false)));
Status status = configA.validate();
ASSERT_EQUALS(ErrorCodes::BadValue, status);
ASSERT_STRING_CONTAINS(status.reason(), " must be true in replica set configurations being ");
@@ -1254,22 +1565,30 @@ TEST(ReplicaSetConfig, GetPriorityTakeoverDelay) {
ReplicaSetConfig configA;
ASSERT_OK(configA.initialize(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"
- << "priority" << 1)
+ << "priority"
+ << 1)
<< BSON("_id" << 1 << "host"
<< "localhost:54321"
- << "priority" << 2)
+ << "priority"
+ << 2)
<< BSON("_id" << 2 << "host"
<< "localhost:5321"
- << "priority" << 3)
+ << "priority"
+ << 3)
<< BSON("_id" << 3 << "host"
<< "localhost:5421"
- << "priority" << 4)
+ << "priority"
+ << 4)
<< BSON("_id" << 4 << "host"
<< "localhost:5431"
- << "priority" << 5)) << "settings"
+ << "priority"
+ << 5))
+ << "settings"
<< BSON("electionTimeoutMillis" << 1000))));
ASSERT_OK(configA.validate());
ASSERT_EQUALS(Milliseconds(5000), configA.getPriorityTakeoverDelay(0));
@@ -1281,22 +1600,30 @@ TEST(ReplicaSetConfig, GetPriorityTakeoverDelay) {
ReplicaSetConfig configB;
ASSERT_OK(configB.initialize(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"
- << "priority" << 1)
+ << "priority"
+ << 1)
<< BSON("_id" << 1 << "host"
<< "localhost:54321"
- << "priority" << 2)
+ << "priority"
+ << 2)
<< BSON("_id" << 2 << "host"
<< "localhost:5321"
- << "priority" << 2)
+ << "priority"
+ << 2)
<< BSON("_id" << 3 << "host"
<< "localhost:5421"
- << "priority" << 3)
+ << "priority"
+ << 3)
<< BSON("_id" << 4 << "host"
<< "localhost:5431"
- << "priority" << 3)) << "settings"
+ << "priority"
+ << 3))
+ << "settings"
<< BSON("electionTimeoutMillis" << 1000))));
ASSERT_OK(configB.validate());
ASSERT_EQUALS(Milliseconds(5000), configB.getPriorityTakeoverDelay(0));
@@ -1311,7 +1638,9 @@ TEST(ReplicaSetConfig, ConfirmDefaultValuesOfAndAbilityToSetWriteConcernMajority
ReplicaSetConfig config;
ASSERT_OK(config.initialize(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345")))));
ASSERT_OK(config.validate());
@@ -1321,10 +1650,13 @@ TEST(ReplicaSetConfig, ConfirmDefaultValuesOfAndAbilityToSetWriteConcernMajority
// Should be able to set it true in PV0.
ASSERT_OK(config.initialize(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"))
- << "writeConcernMajorityJournalDefault" << true)));
+ << "writeConcernMajorityJournalDefault"
+ << true)));
ASSERT_OK(config.validate());
ASSERT_TRUE(config.getWriteConcernMajorityShouldJournal());
ASSERT_TRUE(config.toBSON().hasField("writeConcernMajorityJournalDefault"));
@@ -1332,7 +1664,11 @@ TEST(ReplicaSetConfig, ConfirmDefaultValuesOfAndAbilityToSetWriteConcernMajority
// PV1, should default to true.
ASSERT_OK(config.initialize(BSON("_id"
<< "rs0"
- << "protocolVersion" << 1 << "version" << 1 << "members"
+ << "protocolVersion"
+ << 1
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345")))));
ASSERT_OK(config.validate());
@@ -1342,10 +1678,15 @@ TEST(ReplicaSetConfig, ConfirmDefaultValuesOfAndAbilityToSetWriteConcernMajority
// Should be able to set it false in PV1.
ASSERT_OK(config.initialize(BSON("_id"
<< "rs0"
- << "protocolVersion" << 1 << "version" << 1 << "members"
+ << "protocolVersion"
+ << 1
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"))
- << "writeConcernMajorityJournalDefault" << false)));
+ << "writeConcernMajorityJournalDefault"
+ << false)));
ASSERT_OK(config.validate());
ASSERT_FALSE(config.getWriteConcernMajorityShouldJournal());
ASSERT_TRUE(config.toBSON().hasField("writeConcernMajorityJournalDefault"));
@@ -1360,10 +1701,13 @@ TEST(ReplicaSetConfig, ReplSetId) {
auto status =
ReplicaSetConfig().initializeForInitiate(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"
- << "priority" << 1))
+ << "priority"
+ << 1))
<< "settings"
<< BSON("replicaSetId" << OID::gen())));
ASSERT_EQUALS(ErrorCodes::InvalidReplicaSetConfig, status);
@@ -1377,10 +1721,13 @@ TEST(ReplicaSetConfig, ReplSetId) {
ASSERT_OK(
configInitiate.initializeForInitiate(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"
- << "priority" << 1)))));
+ << "priority"
+ << 1)))));
ASSERT_OK(configInitiate.validate());
ASSERT_TRUE(configInitiate.hasReplicaSetId());
OID replicaSetId = configInitiate.getReplicaSetId();
@@ -1389,11 +1736,15 @@ TEST(ReplicaSetConfig, ReplSetId) {
ReplicaSetConfig configLocal;
ASSERT_OK(configLocal.initialize(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"
- << "priority" << 1))
- << "settings" << BSON("replicaSetId" << replicaSetId))));
+ << "priority"
+ << 1))
+ << "settings"
+ << BSON("replicaSetId" << replicaSetId))));
ASSERT_OK(configLocal.validate());
ASSERT_TRUE(configLocal.hasReplicaSetId());
ASSERT_EQUALS(replicaSetId, configLocal.getReplicaSetId());
@@ -1402,10 +1753,13 @@ TEST(ReplicaSetConfig, ReplSetId) {
OID defaultReplicaSetId = OID::gen();
ASSERT_OK(configLocal.initialize(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"
- << "priority" << 1))),
+ << "priority"
+ << 1))),
true,
defaultReplicaSetId));
ASSERT_OK(configLocal.validate());
@@ -1415,10 +1769,14 @@ TEST(ReplicaSetConfig, ReplSetId) {
// 'replicaSetId' field cannot be null.
status = configLocal.initialize(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"
- << "priority" << 1)) << "settings"
+ << "priority"
+ << 1))
+ << "settings"
<< BSON("replicaSetId" << OID())));
ASSERT_EQUALS(ErrorCodes::BadValue, status);
ASSERT_STRING_CONTAINS(status.reason(), "replicaSetId field value cannot be null");
@@ -1426,10 +1784,14 @@ TEST(ReplicaSetConfig, ReplSetId) {
// 'replicaSetId' field must be an OID.
status = configLocal.initialize(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "localhost:12345"
- << "priority" << 1)) << "settings"
+ << "priority"
+ << 1))
+ << "settings"
<< BSON("replicaSetId" << 12345)));
ASSERT_EQUALS(ErrorCodes::TypeMismatch, status);
ASSERT_STRING_CONTAINS(status.reason(),
diff --git a/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp b/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp
index e705fd17c9d..292f0ef9ae4 100644
--- a/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp
+++ b/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp
@@ -53,18 +53,18 @@
#include "mongo/db/repl/oplog.h"
#include "mongo/db/repl/repl_settings.h"
#include "mongo/db/repl/replication_coordinator_global.h"
-#include "mongo/db/repl/rs_sync.h"
#include "mongo/db/repl/rs_initialsync.h"
+#include "mongo/db/repl/rs_sync.h"
#include "mongo/db/repl/snapshot_thread.h"
#include "mongo/db/repl/storage_interface.h"
-#include "mongo/db/server_parameters.h"
-#include "mongo/db/service_context.h"
#include "mongo/db/s/sharding_state.h"
#include "mongo/db/s/sharding_state_recovery.h"
+#include "mongo/db/server_parameters.h"
+#include "mongo/db/service_context.h"
#include "mongo/db/storage/storage_engine.h"
#include "mongo/executor/network_interface.h"
-#include "mongo/s/grid.h"
#include "mongo/s/client/shard_registry.h"
+#include "mongo/s/grid.h"
#include "mongo/stdx/functional.h"
#include "mongo/stdx/memory.h"
#include "mongo/stdx/thread.h"
@@ -350,12 +350,15 @@ StatusWith<OpTime> ReplicationCoordinatorExternalStateImpl::loadLastOpTime(Opera
if (tsElement.eoo()) {
return StatusWith<OpTime>(ErrorCodes::NoSuchKey,
str::stream() << "Most recent entry in " << rsOplogName
- << " missing \"" << tsFieldName << "\" field");
+ << " missing \""
+ << tsFieldName
+ << "\" field");
}
if (tsElement.type() != bsonTimestamp) {
return StatusWith<OpTime>(ErrorCodes::TypeMismatch,
str::stream() << "Expected type of \"" << tsFieldName
- << "\" in most recent " << rsOplogName
+ << "\" in most recent "
+ << rsOplogName
<< " entry to have type Timestamp, but found "
<< typeName(tsElement.type()));
}
@@ -410,8 +413,8 @@ void ReplicationCoordinatorExternalStateImpl::updateShardIdentityConfigString(
if (ShardingState::get(txn)->enabled()) {
const auto configsvrConnStr =
Grid::get(txn)->shardRegistry()->getConfigShard()->getConnString();
- auto status = ShardingState::get(txn)
- ->updateShardIdentityConfigString(txn, configsvrConnStr.toString());
+ auto status = ShardingState::get(txn)->updateShardIdentityConfigString(
+ txn, configsvrConnStr.toString());
if (!status.isOK()) {
warning() << "error encountered while trying to update config connection string to "
<< configsvrConnStr << causedBy(status);
diff --git a/src/mongo/db/repl/replication_coordinator_impl.cpp b/src/mongo/db/repl/replication_coordinator_impl.cpp
index c33326031b7..8b89de49f60 100644
--- a/src/mongo/db/repl/replication_coordinator_impl.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl.cpp
@@ -203,18 +203,21 @@ ReplicationCoordinator::Mode getReplicationModeFromSettings(const ReplSettings&
DataReplicatorOptions createDataReplicatorOptions(ReplicationCoordinator* replCoord) {
DataReplicatorOptions options;
- options.rollbackFn =
- [](OperationContext*, const OpTime&, const HostAndPort&) -> Status { return Status::OK(); };
+ options.rollbackFn = [](OperationContext*, const OpTime&, const HostAndPort&) -> Status {
+ return Status::OK();
+ };
options.prepareReplSetUpdatePositionCommandFn =
[replCoord](ReplicationCoordinator::ReplSetUpdatePositionCommandStyle commandStyle)
- -> StatusWith<BSONObj> {
- return replCoord->prepareReplSetUpdatePositionCommand(commandStyle);
- };
+ -> StatusWith<BSONObj> {
+ return replCoord->prepareReplSetUpdatePositionCommand(commandStyle);
+ };
options.getMyLastOptime = [replCoord]() { return replCoord->getMyLastAppliedOpTime(); };
- options.setMyLastOptime =
- [replCoord](const OpTime& opTime) { replCoord->setMyLastAppliedOpTime(opTime); };
- options.setFollowerMode =
- [replCoord](const MemberState& newState) { return replCoord->setFollowerMode(newState); };
+ options.setMyLastOptime = [replCoord](const OpTime& opTime) {
+ replCoord->setMyLastAppliedOpTime(opTime);
+ };
+ options.setFollowerMode = [replCoord](const MemberState& newState) {
+ return replCoord->setFollowerMode(newState);
+ };
options.getSlaveDelay = [replCoord]() { return replCoord->getSlaveDelaySecs(); };
options.syncSourceSelector = replCoord;
options.replBatchLimitBytes = dur::UncommittedBytesLimit;
@@ -367,8 +370,8 @@ bool ReplicationCoordinatorImpl::_startLoadLocalConfig(OperationContext* txn) {
if (!status.isOK()) {
error() << "Locally stored replica set configuration does not parse; See "
"http://www.mongodb.org/dochub/core/recover-replica-set-from-invalid-config "
- "for information on how to recover from this. Got \"" << status
- << "\" while parsing " << cfg.getValue();
+ "for information on how to recover from this. Got \""
+ << status << "\" while parsing " << cfg.getValue();
fassertFailedNoTrace(28545);
}
@@ -417,8 +420,8 @@ void ReplicationCoordinatorImpl::_finishLoadLocalConfig(
} else {
error() << "Locally stored replica set configuration is invalid; See "
"http://www.mongodb.org/dochub/core/recover-replica-set-from-invalid-config"
- " for information on how to recover from this. Got \"" << myIndex.getStatus()
- << "\" while validating " << localConfig.toBSON();
+ " for information on how to recover from this. Got \""
+ << myIndex.getStatus() << "\" while validating " << localConfig.toBSON();
fassertFailedNoTrace(28544);
}
}
@@ -603,7 +606,8 @@ Status ReplicationCoordinatorImpl::waitForMemberState(MemberState expectedState,
if (!_memberStateChange.wait_for(lk, timeout.toSystemDuration(), pred)) {
return Status(ErrorCodes::ExceededTimeLimit,
str::stream() << "Timed out waiting for state to become "
- << expectedState.toString() << ". Current state is "
+ << expectedState.toString()
+ << ". Current state is "
<< _memberState.toString());
}
return Status::OK();
@@ -835,7 +839,8 @@ void ReplicationCoordinatorImpl::_updateSlaveInfoDurableOpTime_inlock(SlaveInfo*
if (slaveInfo->lastAppliedOpTime < opTime) {
log() << "Durable progress (" << opTime << ") is ahead of the applied progress ("
<< slaveInfo->lastAppliedOpTime << ". This is likely due to a "
- "rollback. slaveInfo: " << slaveInfo->toString();
+ "rollback. slaveInfo: "
+ << slaveInfo->toString();
return;
}
slaveInfo->lastDurableOpTime = opTime;
@@ -1009,9 +1014,9 @@ void ReplicationCoordinatorImpl::_setMyLastDurableOpTime_inlock(const OpTime& op
// lastAppliedOpTime cannot be behind lastDurableOpTime.
if (mySlaveInfo->lastAppliedOpTime < opTime) {
log() << "My durable progress (" << opTime << ") is ahead of my applied progress ("
- << mySlaveInfo->lastAppliedOpTime
- << ". This is likely due to a "
- "rollback. slaveInfo: " << mySlaveInfo->toString();
+ << mySlaveInfo->lastAppliedOpTime << ". This is likely due to a "
+ "rollback. slaveInfo: "
+ << mySlaveInfo->toString();
return;
}
_updateSlaveInfoDurableOpTime_inlock(mySlaveInfo, opTime);
@@ -2927,21 +2932,24 @@ SyncSourceResolverResponse ReplicationCoordinatorImpl::selectSyncSource(
// Candidate found.
Status queryStatus(ErrorCodes::NotYetInitialized, "not mutated");
BSONObj firstObjFound;
- auto work =
- [&firstObjFound, &queryStatus](const StatusWith<Fetcher::QueryResponse>& queryResult,
- NextAction* nextActiion,
- BSONObjBuilder* bob) {
- queryStatus = queryResult.getStatus();
- if (queryResult.isOK() && !queryResult.getValue().documents.empty()) {
- firstObjFound = queryResult.getValue().documents.front();
- }
- };
+ auto work = [&firstObjFound,
+ &queryStatus](const StatusWith<Fetcher::QueryResponse>& queryResult,
+ NextAction* nextActiion,
+ BSONObjBuilder* bob) {
+ queryStatus = queryResult.getStatus();
+ if (queryResult.isOK() && !queryResult.getValue().documents.empty()) {
+ firstObjFound = queryResult.getValue().documents.front();
+ }
+ };
Fetcher candidateProber(&_replExecutor,
candidate,
"local",
BSON("find"
<< "oplog.rs"
- << "limit" << 1 << "sort" << BSON("$natural" << 1)),
+ << "limit"
+ << 1
+ << "sort"
+ << BSON("$natural" << 1)),
work,
rpc::ServerSelectionMetadata(true, boost::none).toBSON(),
Milliseconds(30000));
@@ -3414,8 +3422,9 @@ void ReplicationCoordinatorImpl::_resetElectionInfoOnProtocolVersionUpgrade(
}
CallbackHandle ReplicationCoordinatorImpl::_scheduleWork(const CallbackFn& work) {
- auto scheduleFn =
- [this](const CallbackFn& workWrapped) { return _replExecutor.scheduleWork(workWrapped); };
+ auto scheduleFn = [this](const CallbackFn& workWrapped) {
+ return _replExecutor.scheduleWork(workWrapped);
+ };
return _wrapAndScheduleWork(scheduleFn, work);
}
@@ -3440,8 +3449,9 @@ void ReplicationCoordinatorImpl::_scheduleWorkAtAndWaitForCompletion(Date_t when
}
CallbackHandle ReplicationCoordinatorImpl::_scheduleDBWork(const CallbackFn& work) {
- auto scheduleFn =
- [this](const CallbackFn& workWrapped) { return _replExecutor.scheduleDBWork(workWrapped); };
+ auto scheduleFn = [this](const CallbackFn& workWrapped) {
+ return _replExecutor.scheduleDBWork(workWrapped);
+ };
return _wrapAndScheduleWork(scheduleFn, work);
}
diff --git a/src/mongo/db/repl/replication_coordinator_impl.h b/src/mongo/db/repl/replication_coordinator_impl.h
index 53f0ffde4c3..61fda88b59a 100644
--- a/src/mongo/db/repl/replication_coordinator_impl.h
+++ b/src/mongo/db/repl/replication_coordinator_impl.h
@@ -28,9 +28,9 @@
#pragma once
-#include <vector>
#include <memory>
#include <utility>
+#include <vector>
#include "mongo/base/status.h"
#include "mongo/bson/timestamp.h"
diff --git a/src/mongo/db/repl/replication_coordinator_impl_elect.cpp b/src/mongo/db/repl/replication_coordinator_impl_elect.cpp
index a70c8963af8..494b8f7a0d6 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_elect.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_elect.cpp
@@ -31,10 +31,10 @@
#include "mongo/platform/basic.h"
#include "mongo/base/disallow_copying.h"
-#include "mongo/db/repl/replication_coordinator_impl.h"
-#include "mongo/db/repl/topology_coordinator_impl.h"
#include "mongo/db/repl/elect_cmd_runner.h"
#include "mongo/db/repl/freshness_checker.h"
+#include "mongo/db/repl/replication_coordinator_impl.h"
+#include "mongo/db/repl/topology_coordinator_impl.h"
#include "mongo/util/log.h"
#include "mongo/util/scopeguard.h"
diff --git a/src/mongo/db/repl/replication_coordinator_impl_elect_test.cpp b/src/mongo/db/repl/replication_coordinator_impl_elect_test.cpp
index a414ac0aabb..e3779fd3dee 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_elect_test.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_elect_test.cpp
@@ -77,7 +77,8 @@ void ReplCoordElectTest::simulateFreshEnoughForElectability() {
net->now(),
makeResponseStatus(BSON("ok" << 1 << "fresher" << false << "opTime"
<< Date_t::fromMillisSinceEpoch(Timestamp(0, 0).asLL())
- << "veto" << false)));
+ << "veto"
+ << false)));
} else {
error() << "Black holing unexpected request to " << request.target << ": "
<< request.cmdObj;
@@ -95,7 +96,9 @@ TEST_F(ReplCoordElectTest, StartElectionDoesNotStartAnElectionWhenNodeHasNoOplog
startCapturingLogMessages();
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -112,16 +115,22 @@ TEST_F(ReplCoordElectTest, StartElectionDoesNotStartAnElectionWhenNodeHasNoOplog
* vote(s) to win.
*/
TEST_F(ReplCoordElectTest, ElectionSucceedsWhenNodeIsTheOnlyElectableNode) {
- assertStartSuccess(
- BSON("_id"
- << "mySet"
- << "version" << 1 << "members"
- << BSON_ARRAY(BSON("_id" << 1 << "host"
- << "node1:12345")
- << BSON("_id" << 2 << "host"
- << "node2:12345"
- << "votes" << 0 << "hidden" << true << "priority" << 0))),
- HostAndPort("node1", 12345));
+ assertStartSuccess(BSON("_id"
+ << "mySet"
+ << "version"
+ << 1
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "node1:12345")
+ << BSON("_id" << 2 << "host"
+ << "node2:12345"
+ << "votes"
+ << 0
+ << "hidden"
+ << true
+ << "priority"
+ << 0))),
+ HostAndPort("node1", 12345));
getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY);
// Fake OpTime from initiate, or a write op.
@@ -166,7 +175,9 @@ TEST_F(ReplCoordElectTest, ElectionSucceedsWhenNodeIsTheOnlyNode) {
startCapturingLogMessages();
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345"))),
HostAndPort("node1", 12345));
@@ -197,7 +208,9 @@ TEST_F(ReplCoordElectTest, ElectionSucceedsWhenNodeIsTheOnlyNode) {
TEST_F(ReplCoordElectTest, ElectionSucceedsWhenAllNodesVoteYea) {
BSONObj configObj = BSON("_id"
<< "mySet"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -221,7 +234,9 @@ TEST_F(ReplCoordElectTest, ElectionFailsWhenOneNodeVotesNay) {
startCapturingLogMessages();
BSONObj configObj = BSON("_id"
<< "mySet"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -266,7 +281,9 @@ TEST_F(ReplCoordElectTest, VotesWithStringValuesAreNotCountedAsYeas) {
startCapturingLogMessages();
BSONObj configObj = BSON("_id"
<< "mySet"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -298,7 +315,8 @@ TEST_F(ReplCoordElectTest, VotesWithStringValuesAreNotCountedAsYeas) {
net->now(),
makeResponseStatus(BSON("ok" << 1 << "vote"
<< "yea"
- << "round" << OID())));
+ << "round"
+ << OID())));
}
net->runReadyNetworkOperations();
}
@@ -311,7 +329,9 @@ TEST_F(ReplCoordElectTest, VotesWithStringValuesAreNotCountedAsYeas) {
TEST_F(ReplCoordElectTest, ElectionsAbortWhenNodeTransitionsToRollbackState) {
BSONObj configObj = BSON("_id"
<< "mySet"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -344,19 +364,22 @@ TEST_F(ReplCoordElectTest, NodeWillNotStandForElectionDuringHeartbeatReconfig) {
// start up, receive reconfig via heartbeat while at the same time, become candidate.
// candidate state should be cleared.
OperationContextNoop txn;
- assertStartSuccess(
- BSON("_id"
- << "mySet"
- << "version" << 2 << "members"
- << BSON_ARRAY(BSON("_id" << 1 << "host"
- << "node1:12345")
- << BSON("_id" << 2 << "host"
- << "node2:12345") << BSON("_id" << 3 << "host"
- << "node3:12345")
- << BSON("_id" << 4 << "host"
- << "node4:12345") << BSON("_id" << 5 << "host"
- << "node5:12345"))),
- HostAndPort("node1", 12345));
+ assertStartSuccess(BSON("_id"
+ << "mySet"
+ << "version"
+ << 2
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "node1:12345")
+ << BSON("_id" << 2 << "host"
+ << "node2:12345")
+ << BSON("_id" << 3 << "host"
+ << "node3:12345")
+ << BSON("_id" << 4 << "host"
+ << "node4:12345")
+ << BSON("_id" << 5 << "host"
+ << "node5:12345"))),
+ HostAndPort("node1", 12345));
ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
getReplCoord()->setMyLastAppliedOpTime(OpTime(Timestamp(100, 0), 0));
@@ -370,7 +393,9 @@ TEST_F(ReplCoordElectTest, NodeWillNotStandForElectionDuringHeartbeatReconfig) {
ReplicaSetConfig config;
config.initialize(BSON("_id"
<< "mySet"
- << "version" << 3 << "members"
+ << "version"
+ << 3
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -428,19 +453,21 @@ TEST_F(ReplCoordElectTest, NodeWillNotStandForElectionDuringHeartbeatReconfig) {
stopCapturingLogMessages();
// ensure node does not stand for election
ASSERT_EQUALS(1,
- countLogLinesContaining(
- "Not standing for election; processing "
- "a configuration change"));
+ countLogLinesContaining("Not standing for election; processing "
+ "a configuration change"));
getExternalState()->setStoreLocalConfigDocumentToHang(false);
}
TEST_F(ReplCoordElectTest, StepsDownRemoteIfNodeHasHigherPriorityThanCurrentPrimary) {
BSONObj configObj = BSON("_id"
<< "mySet"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345"
- << "priority" << 2)
+ << "priority"
+ << 2)
<< BSON("_id" << 2 << "host"
<< "node2:12345")
<< BSON("_id" << 3 << "host"
@@ -501,23 +528,27 @@ TEST_F(ReplCoordElectTest, StepsDownRemoteIfNodeHasHigherPriorityThanCurrentPrim
net->exitNetwork();
ASSERT_EQUALS(1,
countLogLinesContaining(str::stream() << "stepdown of primary("
- << target.toString() << ") succeeded"));
+ << target.toString()
+ << ") succeeded"));
}
TEST_F(ReplCoordElectTest, NodeCancelsElectionUponReceivingANewConfigDuringFreshnessCheckingPhase) {
// Start up and become electable.
OperationContextNoop txn;
- assertStartSuccess(
- BSON("_id"
- << "mySet"
- << "version" << 2 << "members"
- << BSON_ARRAY(BSON("_id" << 1 << "host"
- << "node1:12345")
- << BSON("_id" << 3 << "host"
- << "node3:12345") << BSON("_id" << 2 << "host"
- << "node2:12345"))
- << "settings" << BSON("heartbeatIntervalMillis" << 100)),
- HostAndPort("node1", 12345));
+ assertStartSuccess(BSON("_id"
+ << "mySet"
+ << "version"
+ << 2
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "node1:12345")
+ << BSON("_id" << 3 << "host"
+ << "node3:12345")
+ << BSON("_id" << 2 << "host"
+ << "node2:12345"))
+ << "settings"
+ << BSON("heartbeatIntervalMillis" << 100)),
+ HostAndPort("node1", 12345));
ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
getReplCoord()->setMyLastAppliedOpTime(OpTime(Timestamp(100, 0), 0));
getReplCoord()->setMyLastDurableOpTime(OpTime(Timestamp(100, 0), 0));
@@ -542,10 +573,13 @@ TEST_F(ReplCoordElectTest, NodeCancelsElectionUponReceivingANewConfigDuringFresh
ReplicationCoordinatorImpl::ReplSetReconfigArgs config = {
BSON("_id"
<< "mySet"
- << "version" << 4 << "members" << BSON_ARRAY(BSON("_id" << 1 << "host"
- << "node1:12345")
- << BSON("_id" << 2 << "host"
- << "node2:12345"))),
+ << "version"
+ << 4
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "node1:12345")
+ << BSON("_id" << 2 << "host"
+ << "node2:12345"))),
true};
BSONObjBuilder result;
@@ -560,17 +594,20 @@ TEST_F(ReplCoordElectTest, NodeCancelsElectionUponReceivingANewConfigDuringFresh
TEST_F(ReplCoordElectTest, NodeCancelsElectionUponReceivingANewConfigDuringElectionPhase) {
// Start up and become electable.
OperationContextNoop txn;
- assertStartSuccess(
- BSON("_id"
- << "mySet"
- << "version" << 2 << "members"
- << BSON_ARRAY(BSON("_id" << 1 << "host"
- << "node1:12345")
- << BSON("_id" << 3 << "host"
- << "node3:12345") << BSON("_id" << 2 << "host"
- << "node2:12345"))
- << "settings" << BSON("heartbeatIntervalMillis" << 100)),
- HostAndPort("node1", 12345));
+ assertStartSuccess(BSON("_id"
+ << "mySet"
+ << "version"
+ << 2
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "node1:12345")
+ << BSON("_id" << 3 << "host"
+ << "node3:12345")
+ << BSON("_id" << 2 << "host"
+ << "node2:12345"))
+ << "settings"
+ << BSON("heartbeatIntervalMillis" << 100)),
+ HostAndPort("node1", 12345));
ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
getReplCoord()->setMyLastAppliedOpTime(OpTime(Timestamp(100, 0), 0));
getReplCoord()->setMyLastDurableOpTime(OpTime(Timestamp(100, 0), 0));
@@ -582,10 +619,13 @@ TEST_F(ReplCoordElectTest, NodeCancelsElectionUponReceivingANewConfigDuringElect
ReplicationCoordinatorImpl::ReplSetReconfigArgs config = {
BSON("_id"
<< "mySet"
- << "version" << 4 << "members" << BSON_ARRAY(BSON("_id" << 1 << "host"
- << "node1:12345")
- << BSON("_id" << 2 << "host"
- << "node2:12345"))),
+ << "version"
+ << 4
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "node1:12345")
+ << BSON("_id" << 2 << "host"
+ << "node2:12345"))),
true};
BSONObjBuilder result;
diff --git a/src/mongo/db/repl/replication_coordinator_impl_elect_v1_test.cpp b/src/mongo/db/repl/replication_coordinator_impl_elect_v1_test.cpp
index beaf238fcef..100f44d4156 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_elect_v1_test.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_elect_v1_test.cpp
@@ -53,17 +53,24 @@ using executor::RemoteCommandRequest;
using executor::RemoteCommandResponse;
TEST_F(ReplCoordTest, ElectionSucceedsWhenNodeIsTheOnlyElectableNode) {
- assertStartSuccess(
- BSON("_id"
- << "mySet"
- << "version" << 1 << "members"
- << BSON_ARRAY(BSON("_id" << 1 << "host"
- << "node1:12345")
- << BSON("_id" << 2 << "host"
- << "node2:12345"
- << "votes" << 0 << "hidden" << true << "priority" << 0))
- << "protocolVersion" << 1),
- HostAndPort("node1", 12345));
+ assertStartSuccess(BSON("_id"
+ << "mySet"
+ << "version"
+ << 1
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "node1:12345")
+ << BSON("_id" << 2 << "host"
+ << "node2:12345"
+ << "votes"
+ << 0
+ << "hidden"
+ << true
+ << "priority"
+ << 0))
+ << "protocolVersion"
+ << 1),
+ HostAndPort("node1", 12345));
getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY);
@@ -117,11 +124,14 @@ TEST_F(ReplCoordTest, ElectionSucceedsWhenNodeIsTheOnlyElectableNode) {
TEST_F(ReplCoordTest, StartElectionDoesNotStartAnElectionWhenNodeIsRecovering) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
- << "node2:12345")) << "protocolVersion"
+ << "node2:12345"))
+ << "protocolVersion"
<< 1),
HostAndPort("node1", 12345));
@@ -142,9 +152,13 @@ TEST_F(ReplCoordTest, ElectionSucceedsWhenNodeIsTheOnlyNode) {
startCapturingLogMessages();
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
- << "node1:12345")) << "protocolVersion" << 1),
+ << "node1:12345"))
+ << "protocolVersion"
+ << 1),
HostAndPort("node1", 12345));
getReplCoord()->setMyLastAppliedOpTime(OpTime(Timestamp(10, 0), 0));
@@ -172,13 +186,16 @@ TEST_F(ReplCoordTest, ElectionSucceedsWhenNodeIsTheOnlyNode) {
TEST_F(ReplCoordTest, ElectionSucceedsWhenAllNodesVoteYea) {
BSONObj configObj = BSON("_id"
<< "mySet"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
<< "node2:12345")
<< BSON("_id" << 3 << "host"
- << "node3:12345")) << "protocolVersion"
+ << "node3:12345"))
+ << "protocolVersion"
<< 1);
assertStartSuccess(configObj, HostAndPort("node1", 12345));
OperationContextNoop txn;
@@ -202,20 +219,25 @@ TEST_F(ReplCoordTest, ElectionSucceedsWhenAllNodesVoteYea) {
TEST_F(ReplCoordTest, ElectionSucceedsWhenMaxSevenNodesVoteYea) {
BSONObj configObj = BSON("_id"
<< "mySet"
- << "version" << 1 << "members"
- << BSON_ARRAY(
- BSON("_id" << 1 << "host"
- << "node1:12345")
- << BSON("_id" << 2 << "host"
- << "node2:12345") << BSON("_id" << 3 << "host"
- << "node3:12345")
- << BSON("_id" << 4 << "host"
- << "node4:12345") << BSON("_id" << 5 << "host"
- << "node5:12345")
- << BSON("_id" << 6 << "host"
- << "node6:12345") << BSON("_id" << 7 << "host"
- << "node7:12345"))
- << "protocolVersion" << 1);
+ << "version"
+ << 1
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "node1:12345")
+ << BSON("_id" << 2 << "host"
+ << "node2:12345")
+ << BSON("_id" << 3 << "host"
+ << "node3:12345")
+ << BSON("_id" << 4 << "host"
+ << "node4:12345")
+ << BSON("_id" << 5 << "host"
+ << "node5:12345")
+ << BSON("_id" << 6 << "host"
+ << "node6:12345")
+ << BSON("_id" << 7 << "host"
+ << "node7:12345"))
+ << "protocolVersion"
+ << 1);
assertStartSuccess(configObj, HostAndPort("node1", 12345));
OperationContextNoop txn;
getReplCoord()->setMyLastAppliedOpTime(OpTime(Timestamp(100, 1), 0));
@@ -239,13 +261,16 @@ TEST_F(ReplCoordTest, ElectionFailsWhenInsufficientVotesAreReceivedDuringDryRun)
startCapturingLogMessages();
BSONObj configObj = BSON("_id"
<< "mySet"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
<< "node2:12345")
<< BSON("_id" << 3 << "host"
- << "node3:12345")) << "protocolVersion"
+ << "node3:12345"))
+ << "protocolVersion"
<< 1);
assertStartSuccess(configObj, HostAndPort("node1", 12345));
ReplicaSetConfig config = assertMakeRSConfig(configObj);
@@ -278,9 +303,9 @@ TEST_F(ReplCoordTest, ElectionFailsWhenInsufficientVotesAreReceivedDuringDryRun)
} else {
net->scheduleResponse(noi,
net->now(),
- makeResponseStatus(BSON("ok" << 1 << "term" << 0 << "voteGranted"
- << false << "reason"
- << "don't like him much")));
+ makeResponseStatus(BSON(
+ "ok" << 1 << "term" << 0 << "voteGranted" << false << "reason"
+ << "don't like him much")));
voteRequests++;
}
net->runReadyNetworkOperations();
@@ -295,13 +320,16 @@ TEST_F(ReplCoordTest, ElectionFailsWhenDryRunResponseContainsANewerTerm) {
startCapturingLogMessages();
BSONObj configObj = BSON("_id"
<< "mySet"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
<< "node2:12345")
<< BSON("_id" << 3 << "host"
- << "node3:12345")) << "protocolVersion"
+ << "node3:12345"))
+ << "protocolVersion"
<< 1);
assertStartSuccess(configObj, HostAndPort("node1", 12345));
ReplicaSetConfig config = assertMakeRSConfig(configObj);
@@ -336,7 +364,9 @@ TEST_F(ReplCoordTest, ElectionFailsWhenDryRunResponseContainsANewerTerm) {
noi,
net->now(),
makeResponseStatus(BSON("ok" << 1 << "term" << request.cmdObj["term"].Long() + 1
- << "voteGranted" << false << "reason"
+ << "voteGranted"
+ << false
+ << "reason"
<< "quit living in the past")));
voteRequests++;
}
@@ -353,20 +383,24 @@ TEST_F(ReplCoordTest, NodeWillNotStandForElectionDuringHeartbeatReconfig) {
// start up, receive reconfig via heartbeat while at the same time, become candidate.
// candidate state should be cleared.
OperationContextNoop txn;
- assertStartSuccess(
- BSON("_id"
- << "mySet"
- << "version" << 2 << "members"
- << BSON_ARRAY(BSON("_id" << 1 << "host"
- << "node1:12345")
- << BSON("_id" << 2 << "host"
- << "node2:12345") << BSON("_id" << 3 << "host"
- << "node3:12345")
- << BSON("_id" << 4 << "host"
- << "node4:12345") << BSON("_id" << 5 << "host"
- << "node5:12345"))
- << "protocolVersion" << 1),
- HostAndPort("node1", 12345));
+ assertStartSuccess(BSON("_id"
+ << "mySet"
+ << "version"
+ << 2
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "node1:12345")
+ << BSON("_id" << 2 << "host"
+ << "node2:12345")
+ << BSON("_id" << 3 << "host"
+ << "node3:12345")
+ << BSON("_id" << 4 << "host"
+ << "node4:12345")
+ << BSON("_id" << 5 << "host"
+ << "node5:12345"))
+ << "protocolVersion"
+ << 1),
+ HostAndPort("node1", 12345));
ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
getReplCoord()->setMyLastAppliedOpTime(OpTime(Timestamp(100, 0), 0));
getReplCoord()->setMyLastDurableOpTime(OpTime(Timestamp(100, 0), 0));
@@ -381,11 +415,14 @@ TEST_F(ReplCoordTest, NodeWillNotStandForElectionDuringHeartbeatReconfig) {
ReplicaSetConfig config;
config.initialize(BSON("_id"
<< "mySet"
- << "version" << 3 << "members"
+ << "version"
+ << 3
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
- << "node2:12345")) << "protocolVersion"
+ << "node2:12345"))
+ << "protocolVersion"
<< 1));
hbResp2.setConfig(config);
hbResp2.setConfigVersion(3);
@@ -451,9 +488,8 @@ TEST_F(ReplCoordTest, NodeWillNotStandForElectionDuringHeartbeatReconfig) {
stopCapturingLogMessages();
// ensure node does not stand for election
ASSERT_EQUALS(1,
- countLogLinesContaining(
- "Not standing for election; processing "
- "a configuration change"));
+ countLogLinesContaining("Not standing for election; processing "
+ "a configuration change"));
getExternalState()->setStoreLocalConfigDocumentToHang(false);
}
@@ -461,13 +497,16 @@ TEST_F(ReplCoordTest, ElectionFailsWhenInsufficientVotesAreReceivedDuringRequest
startCapturingLogMessages();
BSONObj configObj = BSON("_id"
<< "mySet"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
<< "node2:12345")
<< BSON("_id" << 3 << "host"
- << "node3:12345")) << "protocolVersion"
+ << "node3:12345"))
+ << "protocolVersion"
<< 1);
assertStartSuccess(configObj, HostAndPort("node1", 12345));
ReplicaSetConfig config = assertMakeRSConfig(configObj);
@@ -492,9 +531,9 @@ TEST_F(ReplCoordTest, ElectionFailsWhenInsufficientVotesAreReceivedDuringRequest
} else {
net->scheduleResponse(noi,
net->now(),
- makeResponseStatus(BSON("ok" << 1 << "term" << 1 << "voteGranted"
- << false << "reason"
- << "don't like him much")));
+ makeResponseStatus(BSON(
+ "ok" << 1 << "term" << 1 << "voteGranted" << false << "reason"
+ << "don't like him much")));
}
net->runReadyNetworkOperations();
}
@@ -509,13 +548,16 @@ TEST_F(ReplCoordTest, ElectionFailsWhenInsufficientVotesAreReceivedDuringRequest
TEST_F(ReplCoordTest, ElectionsAbortWhenNodeTransitionsToRollbackState) {
BSONObj configObj = BSON("_id"
<< "mySet"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
<< "node2:12345")
<< BSON("_id" << 3 << "host"
- << "node3:12345")) << "protocolVersion"
+ << "node3:12345"))
+ << "protocolVersion"
<< 1);
assertStartSuccess(configObj, HostAndPort("node1", 12345));
ReplicaSetConfig config = assertMakeRSConfig(configObj);
@@ -544,13 +586,16 @@ TEST_F(ReplCoordTest, ElectionFailsWhenVoteRequestResponseContainsANewerTerm) {
startCapturingLogMessages();
BSONObj configObj = BSON("_id"
<< "mySet"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
<< "node2:12345")
<< BSON("_id" << 3 << "host"
- << "node3:12345")) << "protocolVersion"
+ << "node3:12345"))
+ << "protocolVersion"
<< 1);
assertStartSuccess(configObj, HostAndPort("node1", 12345));
ReplicaSetConfig config = assertMakeRSConfig(configObj);
@@ -577,7 +622,9 @@ TEST_F(ReplCoordTest, ElectionFailsWhenVoteRequestResponseContainsANewerTerm) {
noi,
net->now(),
makeResponseStatus(BSON("ok" << 1 << "term" << request.cmdObj["term"].Long() + 1
- << "voteGranted" << false << "reason"
+ << "voteGranted"
+ << false
+ << "reason"
<< "quit living in the past")));
}
net->runReadyNetworkOperations();
@@ -594,13 +641,16 @@ TEST_F(ReplCoordTest, ElectionFailsWhenTermChangesDuringDryRun) {
startCapturingLogMessages();
BSONObj configObj = BSON("_id"
<< "mySet"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
<< "node2:12345")
<< BSON("_id" << 3 << "host"
- << "node3:12345")) << "protocolVersion"
+ << "node3:12345"))
+ << "protocolVersion"
<< 1);
assertStartSuccess(configObj, HostAndPort("node1", 12345));
@@ -631,13 +681,16 @@ TEST_F(ReplCoordTest, ElectionFailsWhenTermChangesDuringActualElection) {
startCapturingLogMessages();
BSONObj configObj = BSON("_id"
<< "mySet"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
<< "node2:12345")
<< BSON("_id" << 3 << "host"
- << "node3:12345")) << "protocolVersion"
+ << "node3:12345"))
+ << "protocolVersion"
<< 1);
assertStartSuccess(configObj, HostAndPort("node1", 12345));
ReplicaSetConfig config = assertMakeRSConfig(configObj);
@@ -665,9 +718,10 @@ TEST_F(ReplCoordTest, ElectionFailsWhenTermChangesDuringActualElection) {
net->scheduleResponse(
noi,
net->now(),
- makeResponseStatus(BSON("ok" << 1 << "term" << request.cmdObj["term"].Long()
- << "voteGranted" << true << "reason"
- << "")));
+ makeResponseStatus(BSON(
+ "ok" << 1 << "term" << request.cmdObj["term"].Long() << "voteGranted" << true
+ << "reason"
+ << "")));
}
net->runReadyNetworkOperations();
}
@@ -682,14 +736,18 @@ TEST_F(ReplCoordTest, SchedulesPriorityTakeoverIfNodeHasHigherPriorityThanCurren
startCapturingLogMessages();
BSONObj configObj = BSON("_id"
<< "mySet"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345"
- << "priority" << 2)
+ << "priority"
+ << 2)
<< BSON("_id" << 2 << "host"
<< "node2:12345")
<< BSON("_id" << 3 << "host"
- << "node3:12345")) << "protocolVersion"
+ << "node3:12345"))
+ << "protocolVersion"
<< 1);
assertStartSuccess(configObj, HostAndPort("node1", 12345));
ReplicaSetConfig config = assertMakeRSConfig(configObj);
@@ -746,17 +804,20 @@ TEST_F(ReplCoordTest, SchedulesPriorityTakeoverIfNodeHasHigherPriorityThanCurren
TEST_F(ReplCoordTest, NodeCancelsElectionUponReceivingANewConfigDuringDryRun) {
// Start up and become electable.
OperationContextNoop txn;
- assertStartSuccess(
- BSON("_id"
- << "mySet"
- << "version" << 2 << "members"
- << BSON_ARRAY(BSON("_id" << 1 << "host"
- << "node1:12345")
- << BSON("_id" << 3 << "host"
- << "node3:12345") << BSON("_id" << 2 << "host"
- << "node2:12345"))
- << "settings" << BSON("heartbeatIntervalMillis" << 100)),
- HostAndPort("node1", 12345));
+ assertStartSuccess(BSON("_id"
+ << "mySet"
+ << "version"
+ << 2
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "node1:12345")
+ << BSON("_id" << 3 << "host"
+ << "node3:12345")
+ << BSON("_id" << 2 << "host"
+ << "node2:12345"))
+ << "settings"
+ << BSON("heartbeatIntervalMillis" << 100)),
+ HostAndPort("node1", 12345));
ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
getReplCoord()->setMyLastAppliedOpTime(OpTime(Timestamp(100, 0), 0));
getReplCoord()->setMyLastDurableOpTime(OpTime(Timestamp(100, 0), 0));
@@ -779,10 +840,13 @@ TEST_F(ReplCoordTest, NodeCancelsElectionUponReceivingANewConfigDuringDryRun) {
ReplicationCoordinatorImpl::ReplSetReconfigArgs config = {
BSON("_id"
<< "mySet"
- << "version" << 4 << "members" << BSON_ARRAY(BSON("_id" << 1 << "host"
- << "node1:12345")
- << BSON("_id" << 2 << "host"
- << "node2:12345"))),
+ << "version"
+ << 4
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "node1:12345")
+ << BSON("_id" << 2 << "host"
+ << "node2:12345"))),
true};
BSONObjBuilder result;
@@ -797,17 +861,20 @@ TEST_F(ReplCoordTest, NodeCancelsElectionUponReceivingANewConfigDuringDryRun) {
TEST_F(ReplCoordTest, NodeCancelsElectionUponReceivingANewConfigDuringVotePhase) {
// Start up and become electable.
OperationContextNoop txn;
- assertStartSuccess(
- BSON("_id"
- << "mySet"
- << "version" << 2 << "members"
- << BSON_ARRAY(BSON("_id" << 1 << "host"
- << "node1:12345")
- << BSON("_id" << 3 << "host"
- << "node3:12345") << BSON("_id" << 2 << "host"
- << "node2:12345"))
- << "settings" << BSON("heartbeatIntervalMillis" << 100)),
- HostAndPort("node1", 12345));
+ assertStartSuccess(BSON("_id"
+ << "mySet"
+ << "version"
+ << 2
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "node1:12345")
+ << BSON("_id" << 3 << "host"
+ << "node3:12345")
+ << BSON("_id" << 2 << "host"
+ << "node2:12345"))
+ << "settings"
+ << BSON("heartbeatIntervalMillis" << 100)),
+ HostAndPort("node1", 12345));
ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
getReplCoord()->setMyLastAppliedOpTime(OpTime(Timestamp(100, 0), 0));
getReplCoord()->setMyLastDurableOpTime(OpTime(Timestamp(100, 0), 0));
@@ -819,10 +886,13 @@ TEST_F(ReplCoordTest, NodeCancelsElectionUponReceivingANewConfigDuringVotePhase)
ReplicationCoordinatorImpl::ReplSetReconfigArgs config = {
BSON("_id"
<< "mySet"
- << "version" << 4 << "members" << BSON_ARRAY(BSON("_id" << 1 << "host"
- << "node1:12345")
- << BSON("_id" << 2 << "host"
- << "node2:12345"))),
+ << "version"
+ << 4
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "node1:12345")
+ << BSON("_id" << 2 << "host"
+ << "node2:12345"))),
true};
BSONObjBuilder result;
diff --git a/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp b/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp
index d9b9297bfb7..4c8945ac064 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp
@@ -146,11 +146,11 @@ void ReplicationCoordinatorImpl::_handleHeartbeatResponse(
if (replMetadata.isOK() && _rsConfig.isInitialized() && _rsConfig.hasReplicaSetId() &&
replMetadata.getValue().getReplicaSetId().isSet() &&
_rsConfig.getReplicaSetId() != replMetadata.getValue().getReplicaSetId()) {
- responseStatus =
- Status(ErrorCodes::InvalidReplicaSetConfig,
- str::stream()
- << "replica set IDs do not match, ours: " << _rsConfig.getReplicaSetId()
- << "; remote node's: " << replMetadata.getValue().getReplicaSetId());
+ responseStatus = Status(ErrorCodes::InvalidReplicaSetConfig,
+ str::stream() << "replica set IDs do not match, ours: "
+ << _rsConfig.getReplicaSetId()
+ << "; remote node's: "
+ << replMetadata.getValue().getReplicaSetId());
// Ignore metadata.
replMetadata = responseStatus;
}
@@ -435,14 +435,16 @@ void ReplicationCoordinatorImpl::_heartbeatReconfigStore(
if (!myIndex.getStatus().isOK() && myIndex.getStatus() != ErrorCodes::NodeNotFound) {
warning() << "Not persisting new configuration in heartbeat response to disk because "
- "it is invalid: " << myIndex.getStatus();
+ "it is invalid: "
+ << myIndex.getStatus();
} else {
Status status = _externalState->storeLocalConfigDocument(cbd.txn, newConfig.toBSON());
lk.lock();
if (!status.isOK()) {
error() << "Ignoring new configuration in heartbeat response because we failed to"
- " write it to stable storage; " << status;
+ " write it to stable storage; "
+ << status;
invariant(_rsConfigState == kConfigHBReconfiguring);
if (_rsConfig.isInitialized()) {
_setConfigState_inlock(kConfigSteady);
diff --git a/src/mongo/db/repl/replication_coordinator_impl_heartbeat_test.cpp b/src/mongo/db/repl/replication_coordinator_impl_heartbeat_test.cpp
index 73c9e77c77c..91697d5ad01 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_heartbeat_test.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_heartbeat_test.cpp
@@ -90,7 +90,9 @@ TEST_F(ReplCoordHBTest, NodeJoinsExistingReplSetWhenReceivingAConfigContainingTh
logger::globalLogDomain()->setMinimumLoggedSeverity(logger::LogSeverity::Debug(3));
ReplicaSetConfig rsConfig = assertMakeRSConfigV0(BSON("_id"
<< "mySet"
- << "version" << 3 << "members"
+ << "version"
+ << 3
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1:1")
<< BSON("_id" << 2 << "host"
@@ -154,7 +156,9 @@ TEST_F(ReplCoordHBTest,
logger::globalLogDomain()->setMinimumLoggedSeverity(logger::LogSeverity::Debug(3));
ReplicaSetConfig rsConfig = assertMakeRSConfigV0(BSON("_id"
<< "mySet"
- << "version" << 3 << "members"
+ << "version"
+ << 3
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "h1:1")
<< BSON("_id" << 2 << "host"
@@ -230,7 +234,9 @@ TEST_F(ReplCoordHBTest,
logger::globalLogDomain()->setMinimumLoggedSeverity(logger::LogSeverity::Debug(3));
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -243,12 +249,12 @@ TEST_F(ReplCoordHBTest,
const NetworkInterfaceMock::NetworkOperationIterator noi = getNet()->getNextReadyRequest();
const RemoteCommandRequest& request = noi->getRequest();
log() << request.target.toString() << " processing " << request.cmdObj;
- getNet()->scheduleResponse(
- noi,
- getNet()->now(),
- makeResponseStatus(BSON("ok" << 0.0 << "errmsg"
- << "unauth'd"
- << "code" << ErrorCodes::Unauthorized)));
+ getNet()->scheduleResponse(noi,
+ getNet()->now(),
+ makeResponseStatus(BSON("ok" << 0.0 << "errmsg"
+ << "unauth'd"
+ << "code"
+ << ErrorCodes::Unauthorized)));
if (request.target != HostAndPort("node2", 12345) &&
request.cmdObj.firstElement().fieldNameStringData() != "replSetHeartbeat") {
diff --git a/src/mongo/db/repl/replication_coordinator_impl_heartbeat_v1_test.cpp b/src/mongo/db/repl/replication_coordinator_impl_heartbeat_v1_test.cpp
index 2233e21cc21..2290786e343 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_heartbeat_v1_test.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_heartbeat_v1_test.cpp
@@ -86,16 +86,19 @@ ReplSetHeartbeatResponse ReplCoordHBV1Test::receiveHeartbeatFrom(const ReplicaSe
TEST_F(ReplCoordHBV1Test,
NodeJoinsExistingReplSetWhenReceivingAConfigContainingTheNodeViaHeartbeat) {
logger::globalLogDomain()->setMinimumLoggedSeverity(logger::LogSeverity::Debug(3));
- ReplicaSetConfig rsConfig =
- assertMakeRSConfig(BSON("_id"
- << "mySet"
- << "version" << 3 << "members"
- << BSON_ARRAY(BSON("_id" << 1 << "host"
- << "h1:1")
- << BSON("_id" << 2 << "host"
- << "h2:1") << BSON("_id" << 3 << "host"
- << "h3:1"))
- << "protocolVersion" << 1));
+ ReplicaSetConfig rsConfig = assertMakeRSConfig(BSON("_id"
+ << "mySet"
+ << "version"
+ << 3
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h1:1")
+ << BSON("_id" << 2 << "host"
+ << "h2:1")
+ << BSON("_id" << 3 << "host"
+ << "h3:1"))
+ << "protocolVersion"
+ << 1));
init("mySet");
addSelf(HostAndPort("h2", 1));
const Date_t startDate = getNet()->now();
@@ -150,17 +153,21 @@ TEST_F(ReplCoordHBV1Test,
TEST_F(ReplCoordHBV1Test,
ArbiterJoinsExistingReplSetWhenReceivingAConfigContainingTheArbiterViaHeartbeat) {
logger::globalLogDomain()->setMinimumLoggedSeverity(logger::LogSeverity::Debug(3));
- ReplicaSetConfig rsConfig =
- assertMakeRSConfig(BSON("_id"
- << "mySet"
- << "version" << 3 << "members"
- << BSON_ARRAY(BSON("_id" << 1 << "host"
- << "h1:1")
- << BSON("_id" << 2 << "host"
- << "h2:1"
- << "arbiterOnly" << true)
- << BSON("_id" << 3 << "host"
- << "h3:1")) << "protocolVersion" << 1));
+ ReplicaSetConfig rsConfig = assertMakeRSConfig(BSON("_id"
+ << "mySet"
+ << "version"
+ << 3
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h1:1")
+ << BSON("_id" << 2 << "host"
+ << "h2:1"
+ << "arbiterOnly"
+ << true)
+ << BSON("_id" << 3 << "host"
+ << "h3:1"))
+ << "protocolVersion"
+ << 1));
init("mySet");
addSelf(HostAndPort("h2", 1));
const Date_t startDate = getNet()->now();
@@ -217,16 +224,19 @@ TEST_F(ReplCoordHBV1Test,
// Tests that a node in RS_STARTUP will not transition to RS_REMOVED if it receives a
// configuration that does not contain it.
logger::globalLogDomain()->setMinimumLoggedSeverity(logger::LogSeverity::Debug(3));
- ReplicaSetConfig rsConfig =
- assertMakeRSConfig(BSON("_id"
- << "mySet"
- << "version" << 3 << "members"
- << BSON_ARRAY(BSON("_id" << 1 << "host"
- << "h1:1")
- << BSON("_id" << 2 << "host"
- << "h2:1") << BSON("_id" << 3 << "host"
- << "h3:1"))
- << "protocolVersion" << 1));
+ ReplicaSetConfig rsConfig = assertMakeRSConfig(BSON("_id"
+ << "mySet"
+ << "version"
+ << 3
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "h1:1")
+ << BSON("_id" << 2 << "host"
+ << "h2:1")
+ << BSON("_id" << 3 << "host"
+ << "h3:1"))
+ << "protocolVersion"
+ << 1));
init("mySet");
addSelf(HostAndPort("h4", 1));
const Date_t startDate = getNet()->now();
@@ -296,7 +306,9 @@ TEST_F(ReplCoordHBV1Test,
logger::globalLogDomain()->setMinimumLoggedSeverity(logger::LogSeverity::Debug(3));
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -309,12 +321,12 @@ TEST_F(ReplCoordHBV1Test,
const NetworkInterfaceMock::NetworkOperationIterator noi = getNet()->getNextReadyRequest();
const RemoteCommandRequest& request = noi->getRequest();
log() << request.target.toString() << " processing " << request.cmdObj;
- getNet()->scheduleResponse(
- noi,
- getNet()->now(),
- makeResponseStatus(BSON("ok" << 0.0 << "errmsg"
- << "unauth'd"
- << "code" << ErrorCodes::Unauthorized)));
+ getNet()->scheduleResponse(noi,
+ getNet()->now(),
+ makeResponseStatus(BSON("ok" << 0.0 << "errmsg"
+ << "unauth'd"
+ << "code"
+ << ErrorCodes::Unauthorized)));
if (request.target != HostAndPort("node2", 12345) &&
request.cmdObj.firstElement().fieldNameStringData() != "replSetHeartbeat") {
@@ -331,10 +343,9 @@ TEST_F(ReplCoordHBV1Test,
TEST_F(ReplCoordHBV1Test, ArbiterRecordsCommittedOpTimeFromHeartbeatMetadata) {
// Tests that an arbiter will update its committed optime from the heartbeat metadata
- assertStartSuccess(fromjson(
- "{_id:'mySet', version:1, protocolVersion:1, members:["
- "{_id:1, host:'node1:12345', arbiterOnly:true}, "
- "{_id:2, host:'node2:12345'}]}"),
+ assertStartSuccess(fromjson("{_id:'mySet', version:1, protocolVersion:1, members:["
+ "{_id:1, host:'node1:12345', arbiterOnly:true}, "
+ "{_id:2, host:'node2:12345'}]}"),
HostAndPort("node1", 12345));
ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_ARBITER));
@@ -342,14 +353,21 @@ TEST_F(ReplCoordHBV1Test, ArbiterRecordsCommittedOpTimeFromHeartbeatMetadata) {
// its current optime to 'expected'
auto test = [this](OpTime committedOpTime, OpTime expected) {
// process heartbeat metadata directly
- StatusWith<rpc::ReplSetMetadata> metadata = rpc::ReplSetMetadata::readFromMetadata(BSON(
- rpc::kReplSetMetadataFieldName
- << BSON("lastOpCommitted" << BSON("ts" << committedOpTime.getTimestamp() << "t"
- << committedOpTime.getTerm()) << "lastOpVisible"
- << BSON("ts" << committedOpTime.getTimestamp() << "t"
- << committedOpTime.getTerm()) << "configVersion"
- << 1 << "primaryIndex" << 1 << "term"
- << committedOpTime.getTerm() << "syncSourceIndex" << 1)));
+ StatusWith<rpc::ReplSetMetadata> metadata = rpc::ReplSetMetadata::readFromMetadata(
+ BSON(rpc::kReplSetMetadataFieldName
+ << BSON("lastOpCommitted" << BSON("ts" << committedOpTime.getTimestamp() << "t"
+ << committedOpTime.getTerm())
+ << "lastOpVisible"
+ << BSON("ts" << committedOpTime.getTimestamp() << "t"
+ << committedOpTime.getTerm())
+ << "configVersion"
+ << 1
+ << "primaryIndex"
+ << 1
+ << "term"
+ << committedOpTime.getTerm()
+ << "syncSourceIndex"
+ << 1)));
ASSERT_OK(metadata.getStatus());
getReplCoord()->processReplSetMetadata(metadata.getValue());
@@ -368,11 +386,15 @@ TEST_F(ReplCoordHBV1Test, IgnoreTheContentsOfMetadataWhenItsReplicaSetIdDoesNotM
HostAndPort host2("node2:12345");
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host" << host2.toString()))
- << "settings" << BSON("replicaSetId" << OID::gen()) << "protocolVersion"
+ << "settings"
+ << BSON("replicaSetId" << OID::gen())
+ << "protocolVersion"
<< 1),
HostAndPort("node1", 12345));
ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
@@ -438,9 +460,10 @@ TEST_F(ReplCoordHBV1Test, IgnoreTheContentsOfMetadataWhenItsReplicaSetIdDoesNotM
ASSERT_EQ(MemberState(MemberState::RS_DOWN).toString(),
MemberState(member["state"].numberInt()).toString());
ASSERT_EQ(member["lastHeartbeatMessage"].String(),
- std::string(str::stream()
- << "replica set IDs do not match, ours: " << rsConfig.getReplicaSetId()
- << "; remote node's: " << unexpectedId));
+ std::string(str::stream() << "replica set IDs do not match, ours: "
+ << rsConfig.getReplicaSetId()
+ << "; remote node's: "
+ << unexpectedId));
}
} // namespace
diff --git a/src/mongo/db/repl/replication_coordinator_impl_reconfig_test.cpp b/src/mongo/db/repl/replication_coordinator_impl_reconfig_test.cpp
index 5c97e3bc976..e91aa8cb1e0 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_reconfig_test.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_reconfig_test.cpp
@@ -35,10 +35,10 @@
#include "mongo/db/repl/repl_set_heartbeat_args.h"
#include "mongo/db/repl/repl_set_heartbeat_response.h"
#include "mongo/db/repl/replica_set_config.h"
+#include "mongo/db/repl/replication_coordinator.h" // ReplSetReconfigArgs
#include "mongo/db/repl/replication_coordinator_external_state_mock.h"
#include "mongo/db/repl/replication_coordinator_impl.h"
#include "mongo/db/repl/replication_coordinator_test_fixture.h"
-#include "mongo/db/repl/replication_coordinator.h" // ReplSetReconfigArgs
#include "mongo/executor/network_interface_mock.h"
#include "mongo/unittest/unittest.h"
#include "mongo/util/log.h"
@@ -72,7 +72,9 @@ TEST_F(ReplCoordTest, NodeReturnsNotMasterWhenReconfigReceivedWhileSecondary) {
init();
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -96,7 +98,9 @@ TEST_F(ReplCoordTest, NodeReturnsInvalidReplicaSetConfigWhenReconfigReceivedWith
OperationContextNoop txn;
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -112,13 +116,19 @@ TEST_F(ReplCoordTest, NodeReturnsInvalidReplicaSetConfigWhenReconfigReceivedWith
args.force = false;
args.newConfigObj = BSON("_id"
<< "mySet"
- << "version" << 2 << "invalidlyNamedField" << 3 << "members"
+ << "version"
+ << 2
+ << "invalidlyNamedField"
+ << 3
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345"
- << "arbiterOnly" << true)
+ << "arbiterOnly"
+ << true)
<< BSON("_id" << 2 << "host"
<< "node2:12345"
- << "arbiterOnly" << true)));
+ << "arbiterOnly"
+ << true)));
// ErrorCodes::BadValue should be propagated from ReplicaSetConfig::initialize()
ASSERT_EQUALS(ErrorCodes::InvalidReplicaSetConfig,
getReplCoord()->processReplSetReconfig(&txn, args, &result));
@@ -130,7 +140,9 @@ TEST_F(ReplCoordTest, NodeReturnsInvalidReplicaSetConfigWhenReconfigReceivedWith
OperationContextNoop txn;
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -146,7 +158,9 @@ TEST_F(ReplCoordTest, NodeReturnsInvalidReplicaSetConfigWhenReconfigReceivedWith
args.force = false;
args.newConfigObj = BSON("_id"
<< "notMySet"
- << "version" << 3 << "members"
+ << "version"
+ << 3
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -162,11 +176,14 @@ TEST_F(ReplCoordTest, NodeReturnsInvalidReplicaSetConfigWhenReconfigReceivedWith
OperationContextNoop txn;
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
- << "node2:12345")) << "settings"
+ << "node2:12345"))
+ << "settings"
<< BSON("replicaSetId" << OID::gen())),
HostAndPort("node1", 12345));
ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
@@ -179,11 +196,14 @@ TEST_F(ReplCoordTest, NodeReturnsInvalidReplicaSetConfigWhenReconfigReceivedWith
args.force = false;
args.newConfigObj = BSON("_id"
<< "mySet"
- << "version" << 3 << "members"
+ << "version"
+ << 3
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
- << "node2:12345")) << "settings"
+ << "node2:12345"))
+ << "settings"
<< BSON("replicaSetId" << OID::gen()));
ASSERT_EQUALS(ErrorCodes::NewReplicaSetConfigurationIncompatible,
@@ -197,7 +217,9 @@ TEST_F(ReplCoordTest,
OperationContextNoop txn;
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -213,7 +235,9 @@ TEST_F(ReplCoordTest,
args.force = false;
args.newConfigObj = BSON("_id"
<< "mySet"
- << "version" << -3 << "members"
+ << "version"
+ << -3
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -231,7 +255,9 @@ void doReplSetInitiate(ReplicationCoordinatorImpl* replCoord, Status* status) {
replCoord->processReplSetInitiate(&txn,
BSON("_id"
<< "mySet"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -247,12 +273,15 @@ void doReplSetReconfig(ReplicationCoordinatorImpl* replCoord, Status* status) {
// Replica set id will be copied from existing configuration.
args.newConfigObj = BSON("_id"
<< "mySet"
- << "version" << 3 << "members"
+ << "version"
+ << 3
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
<< "node2:12345"
- << "priority" << 3)));
+ << "priority"
+ << 3)));
*status = replCoord->processReplSetReconfig(&txn, args, &garbage);
}
@@ -263,7 +292,9 @@ TEST_F(ReplCoordTest,
OperationContextNoop txn;
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -302,7 +333,9 @@ TEST_F(ReplCoordTest, NodeReturnsOutOfDiskSpaceWhenSavingANewConfigFailsDuringRe
OperationContextNoop txn;
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -329,7 +362,9 @@ TEST_F(ReplCoordTest,
OperationContextNoop txn;
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -353,7 +388,9 @@ TEST_F(ReplCoordTest,
args.force = false;
args.newConfigObj = BSON("_id"
<< "mySet"
- << "version" << 3 << "members"
+ << "version"
+ << 3
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -389,7 +426,9 @@ TEST_F(ReplCoordTest, NodeReturnsConfigurationInProgressWhenReceivingAReconfigWh
args.force = false;
args.newConfigObj = BSON("_id"
<< "mySet"
- << "version" << 3 << "members"
+ << "version"
+ << 3
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -408,11 +447,14 @@ TEST_F(ReplCoordTest, PrimaryNodeAcceptsNewConfigWhenReceivingAReconfigWithAComp
OperationContextNoop txn;
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
- << "node2:12345")) << "settings"
+ << "node2:12345"))
+ << "settings"
<< BSON("replicaSetId" << OID::gen())),
HostAndPort("node1", 12345));
ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
@@ -451,7 +493,9 @@ TEST_F(
OperationContextNoop txn;
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -473,7 +517,9 @@ TEST_F(
ReplicaSetConfig config;
config.initialize(BSON("_id"
<< "mySet"
- << "version" << 3 << "members"
+ << "version"
+ << 3
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -507,7 +553,9 @@ TEST_F(ReplCoordTest, NodeDoesNotAcceptHeartbeatReconfigWhileInTheMidstOfReconfi
OperationContextNoop txn;
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -536,7 +584,9 @@ TEST_F(ReplCoordTest, NodeDoesNotAcceptHeartbeatReconfigWhileInTheMidstOfReconfi
ReplicaSetConfig config;
config.initialize(BSON("_id"
<< "mySet"
- << "version" << 4 << "members"
+ << "version"
+ << 4
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -570,7 +620,9 @@ TEST_F(ReplCoordTest, NodeAcceptsConfigFromAReconfigWithForceTrueWhileNotPrimary
init();
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -586,7 +638,9 @@ TEST_F(ReplCoordTest, NodeAcceptsConfigFromAReconfigWithForceTrueWhileNotPrimary
args.force = false;
args.newConfigObj = BSON("_id"
<< "mySet"
- << "version" << 3 << "members"
+ << "version"
+ << 3
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
diff --git a/src/mongo/db/repl/replication_coordinator_impl_test.cpp b/src/mongo/db/repl/replication_coordinator_impl_test.cpp
index 41bbb2a2d4b..0343865aa6c 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_test.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_test.cpp
@@ -37,8 +37,8 @@
#include "mongo/bson/util/bson_extract.h"
#include "mongo/db/operation_context_noop.h"
-#include "mongo/db/repl/handshake_args.h"
#include "mongo/db/repl/bson_extract_optime.h"
+#include "mongo/db/repl/handshake_args.h"
#include "mongo/db/repl/is_master_response.h"
#include "mongo/db/repl/old_update_position_args.h"
#include "mongo/db/repl/optime.h"
@@ -112,7 +112,9 @@ void runSingleNodeElection(ServiceContext::UniqueOperationContext txn,
TEST_F(ReplCoordTest, NodeEntersStartup2StateWhenStartingUpWithValidLocalConfig) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345"))),
HostAndPort("node1", 12345));
@@ -123,10 +125,13 @@ TEST_F(ReplCoordTest, NodeEntersStartup2StateWhenStartingUpWithValidLocalConfig)
TEST_F(ReplCoordTest, NodeEntersArbiterStateWhenStartingUpWithValidLocalConfigWhereItIsAnArbiter) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345"
- << "arbiterOnly" << true)
+ << "arbiterOnly"
+ << true)
<< BSON("_id" << 2 << "host"
<< "node2:12345"))),
HostAndPort("node1", 12345));
@@ -138,7 +143,9 @@ TEST_F(ReplCoordTest, NodeEntersRemovedStateWhenStartingUpWithALocalConfigWhichL
startCapturingLogMessages();
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -155,7 +162,9 @@ TEST_F(ReplCoordTest,
startCapturingLogMessages();
assertStartSuccess(BSON("_id"
<< "notMySet"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "node1:12345"))),
HostAndPort("node1", 12345));
@@ -195,7 +204,9 @@ TEST_F(ReplCoordTest,
getReplCoord()->processReplSetInitiate(&txn,
BSON("_id"
<< "mySet"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "node1:12345"))),
&result1));
@@ -209,7 +220,9 @@ TEST_F(ReplCoordTest,
getReplCoord()->processReplSetInitiate(&txn,
BSON("_id"
<< "mySet"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "node1:12345"))),
&result2));
@@ -227,16 +240,20 @@ TEST_F(ReplCoordTest,
// Starting uninitialized, show that we can perform the initiate behavior.
BSONObjBuilder result1;
- auto status = getReplCoord()->processReplSetInitiate(
- &txn,
- BSON("_id"
- << "mySet"
- << "version" << 1 << "members" << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "node1:12345"
- << "arbiterOnly" << true)
- << BSON("_id" << 1 << "host"
- << "node2:12345"))),
- &result1);
+ auto status =
+ getReplCoord()->processReplSetInitiate(&txn,
+ BSON("_id"
+ << "mySet"
+ << "version"
+ << 1
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "node1:12345"
+ << "arbiterOnly"
+ << true)
+ << BSON("_id" << 1 << "host"
+ << "node2:12345"))),
+ &result1);
ASSERT_EQUALS(ErrorCodes::InvalidReplicaSetConfig, status);
ASSERT_STRING_CONTAINS(status.reason(), "is not electable under the new configuration version");
ASSERT_FALSE(getExternalState()->threadsStarted());
@@ -258,7 +275,9 @@ TEST_F(ReplCoordTest,
getReplCoord()->processReplSetInitiate(&txn,
BSON("_id"
<< "mySet"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "node1:12345"))),
&result1));
@@ -276,7 +295,9 @@ TEST_F(ReplCoordTest,
getReplCoord()->processReplSetInitiate(&txn,
BSON("_id"
<< "mySet"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "node4"))),
&result));
@@ -289,7 +310,9 @@ void doReplSetInitiate(ReplicationCoordinatorImpl* replCoord, Status* status) {
replCoord->processReplSetInitiate(&txn,
BSON("_id"
<< "mySet"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "node1:12345")
<< BSON("_id" << 1 << "host"
@@ -376,7 +399,9 @@ TEST_F(ReplCoordTest,
getReplCoord()->processReplSetInitiate(&txn,
BSON("_id"
<< "wrongSet"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "node1:12345"))),
&result1));
@@ -425,7 +450,9 @@ TEST_F(ReplCoordTest,
getReplCoord()->processReplSetInitiate(&txn,
BSON("_id"
<< "mySet"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "node1:12345"))),
&result1);
@@ -446,7 +473,9 @@ TEST_F(ReplCoordTest, InitiateFailsWithoutReplSetFlag) {
getReplCoord()->processReplSetInitiate(&txn,
BSON("_id"
<< "mySet"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "node1:12345"))),
&result1));
@@ -467,7 +496,9 @@ TEST_F(ReplCoordTest, NodeReturnsOutOfDiskSpaceWhenInitiateCannotWriteConfigToDi
getReplCoord()->processReplSetInitiate(&txn,
BSON("_id"
<< "mySet"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "node1:12345"))),
&result1));
@@ -519,9 +550,13 @@ TEST_F(
TEST_F(ReplCoordTest, NodeReturnsOkWhenCheckReplEnabledForCommandAfterReceivingAConfig) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 2 << "members" << BSON_ARRAY(BSON("host"
- << "node1:12345"
- << "_id" << 0))),
+ << "version"
+ << 2
+ << "members"
+ << BSON_ARRAY(BSON("host"
+ << "node1:12345"
+ << "_id"
+ << 0))),
HostAndPort("node1", 12345));
// check status OK and result is empty
@@ -580,15 +615,21 @@ TEST_F(ReplCoordTest, NodeReturnsImmediatelyWhenAwaitReplicationIsRanAgainstAMas
TEST_F(ReplCoordTest, NodeReturnsNotMasterWhenRunningAwaitReplicationAgainstASecondaryNode) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id" << 0)
+ << "_id"
+ << 0)
<< BSON("host"
<< "node2:12345"
- << "_id" << 1) << BSON("host"
- << "node3:12345"
- << "_id" << 2))),
+ << "_id"
+ << 1)
+ << BSON("host"
+ << "node3:12345"
+ << "_id"
+ << 2))),
HostAndPort("node1", 12345));
OperationContextNoop txn;
@@ -608,15 +649,21 @@ TEST_F(ReplCoordTest, NodeReturnsNotMasterWhenRunningAwaitReplicationAgainstASec
TEST_F(ReplCoordTest, NodeReturnsOkWhenRunningAwaitReplicationAgainstPrimaryWithWZero) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id" << 0)
+ << "_id"
+ << 0)
<< BSON("host"
<< "node2:12345"
- << "_id" << 1) << BSON("host"
- << "node3:12345"
- << "_id" << 2))),
+ << "_id"
+ << 1)
+ << BSON("host"
+ << "node3:12345"
+ << "_id"
+ << 2))),
HostAndPort("node1", 12345));
OperationContextNoop txn;
@@ -644,21 +691,28 @@ TEST_F(ReplCoordTest, NodeReturnsOkWhenRunningAwaitReplicationAgainstPrimaryWith
TEST_F(ReplCoordTest,
NodeReturnsWriteConcernFailedUntilASufficientNumberOfNodesHaveTheWriteDurable) {
OperationContextNoop txn;
- assertStartSuccess(
- BSON("_id"
- << "mySet"
- << "version" << 2 << "members"
- << BSON_ARRAY(BSON("host"
- << "node1:12345"
- << "_id" << 0)
- << BSON("host"
- << "node2:12345"
- << "_id" << 1) << BSON("host"
- << "node3:12345"
- << "_id" << 2) << BSON("host"
- << "node4:12345"
- << "_id" << 3))),
- HostAndPort("node1", 12345));
+ assertStartSuccess(BSON("_id"
+ << "mySet"
+ << "version"
+ << 2
+ << "members"
+ << BSON_ARRAY(BSON("host"
+ << "node1:12345"
+ << "_id"
+ << 0)
+ << BSON("host"
+ << "node2:12345"
+ << "_id"
+ << 1)
+ << BSON("host"
+ << "node3:12345"
+ << "_id"
+ << 2)
+ << BSON("host"
+ << "node4:12345"
+ << "_id"
+ << 3))),
+ HostAndPort("node1", 12345));
ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
getReplCoord()->setMyLastAppliedOpTime(OpTimeWithTermZero(100, 0));
getReplCoord()->setMyLastDurableOpTime(OpTimeWithTermZero(100, 0));
@@ -717,21 +771,28 @@ TEST_F(ReplCoordTest,
TEST_F(ReplCoordTest, NodeReturnsWriteConcernFailedUntilASufficientNumberOfNodesHaveTheWrite) {
OperationContextNoop txn;
- assertStartSuccess(
- BSON("_id"
- << "mySet"
- << "version" << 2 << "members"
- << BSON_ARRAY(BSON("host"
- << "node1:12345"
- << "_id" << 0)
- << BSON("host"
- << "node2:12345"
- << "_id" << 1) << BSON("host"
- << "node3:12345"
- << "_id" << 2) << BSON("host"
- << "node4:12345"
- << "_id" << 3))),
- HostAndPort("node1", 12345));
+ assertStartSuccess(BSON("_id"
+ << "mySet"
+ << "version"
+ << 2
+ << "members"
+ << BSON_ARRAY(BSON("host"
+ << "node1:12345"
+ << "_id"
+ << 0)
+ << BSON("host"
+ << "node2:12345"
+ << "_id"
+ << 1)
+ << BSON("host"
+ << "node3:12345"
+ << "_id"
+ << 2)
+ << BSON("host"
+ << "node4:12345"
+ << "_id"
+ << 3))),
+ HostAndPort("node1", 12345));
ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
getReplCoord()->setMyLastAppliedOpTime(OpTimeWithTermZero(100, 0));
getReplCoord()->setMyLastDurableOpTime(OpTimeWithTermZero(100, 0));
@@ -790,15 +851,19 @@ TEST_F(ReplCoordTest,
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "node0")
<< BSON("_id" << 1 << "host"
- << "node1") << BSON("_id" << 2 << "host"
- << "node2")
+ << "node1")
+ << BSON("_id" << 2 << "host"
+ << "node2")
<< BSON("_id" << 3 << "host"
- << "node3") << BSON("_id" << 4 << "host"
- << "node4"))),
+ << "node3")
+ << BSON("_id" << 4 << "host"
+ << "node4"))),
HostAndPort("node0"));
ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
getReplCoord()->setMyLastAppliedOpTime(OpTime(Timestamp(100, 0), 0));
@@ -828,37 +893,45 @@ TEST_F(
assertStartSuccess(
BSON("_id"
<< "mySet"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "node0"
- << "tags" << BSON("dc"
- << "NA"
- << "rack"
- << "rackNA1"))
+ << "tags"
+ << BSON("dc"
+ << "NA"
+ << "rack"
+ << "rackNA1"))
<< BSON("_id" << 1 << "host"
<< "node1"
- << "tags" << BSON("dc"
- << "NA"
- << "rack"
- << "rackNA2"))
+ << "tags"
+ << BSON("dc"
+ << "NA"
+ << "rack"
+ << "rackNA2"))
<< BSON("_id" << 2 << "host"
<< "node2"
- << "tags" << BSON("dc"
- << "NA"
- << "rack"
- << "rackNA3"))
+ << "tags"
+ << BSON("dc"
+ << "NA"
+ << "rack"
+ << "rackNA3"))
<< BSON("_id" << 3 << "host"
<< "node3"
- << "tags" << BSON("dc"
- << "EU"
- << "rack"
- << "rackEU1"))
+ << "tags"
+ << BSON("dc"
+ << "EU"
+ << "rack"
+ << "rackEU1"))
<< BSON("_id" << 4 << "host"
<< "node4"
- << "tags" << BSON("dc"
- << "EU"
- << "rack"
- << "rackEU2"))) << "settings"
+ << "tags"
+ << BSON("dc"
+ << "EU"
+ << "rack"
+ << "rackEU2")))
+ << "settings"
<< BSON("getLastErrorModes" << BSON("multiDC" << BSON("dc" << 2) << "multiDCAndRack"
<< BSON("dc" << 2 << "rack" << 3)))),
HostAndPort("node0"));
@@ -1020,15 +1093,21 @@ TEST_F(ReplCoordTest, NodeReturnsOkWhenAWriteConcernWithNoTimeoutHasBeenSatisfie
OperationContextNoop txn;
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id" << 0)
+ << "_id"
+ << 0)
<< BSON("host"
<< "node2:12345"
- << "_id" << 1) << BSON("host"
- << "node3:12345"
- << "_id" << 2))),
+ << "_id"
+ << 1)
+ << BSON("host"
+ << "node3:12345"
+ << "_id"
+ << 2))),
HostAndPort("node1", 12345));
ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
getReplCoord()->setMyLastAppliedOpTime(OpTimeWithTermZero(100, 0));
@@ -1079,15 +1158,21 @@ TEST_F(ReplCoordTest, NodeReturnsWriteConcernFailedWhenAWriteConcernTimesOutBefo
OperationContextNoop txn;
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id" << 0)
+ << "_id"
+ << 0)
<< BSON("host"
<< "node2:12345"
- << "_id" << 1) << BSON("host"
- << "node3:12345"
- << "_id" << 2))),
+ << "_id"
+ << 1)
+ << BSON("host"
+ << "node3:12345"
+ << "_id"
+ << 2))),
HostAndPort("node1", 12345));
ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
getReplCoord()->setMyLastAppliedOpTime(OpTimeWithTermZero(100, 0));
@@ -1120,15 +1205,21 @@ TEST_F(ReplCoordTest,
OperationContextNoop txn;
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id" << 0)
+ << "_id"
+ << 0)
<< BSON("host"
<< "node2:12345"
- << "_id" << 1) << BSON("host"
- << "node3:12345"
- << "_id" << 2))),
+ << "_id"
+ << 1)
+ << BSON("host"
+ << "node3:12345"
+ << "_id"
+ << 2))),
HostAndPort("node1", 12345));
ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
getReplCoord()->setMyLastAppliedOpTime(OpTimeWithTermZero(100, 0));
@@ -1161,15 +1252,21 @@ TEST_F(ReplCoordTest, NodeReturnsNotMasterWhenSteppingDownBeforeSatisfyingAWrite
// if the node steps down while it is waiting.
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id" << 0)
+ << "_id"
+ << 0)
<< BSON("host"
<< "node2:12345"
- << "_id" << 1) << BSON("host"
- << "node3:12345"
- << "_id" << 2))),
+ << "_id"
+ << 1)
+ << BSON("host"
+ << "node3:12345"
+ << "_id"
+ << 2))),
HostAndPort("node1", 12345));
ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
getReplCoord()->setMyLastAppliedOpTime(OpTimeWithTermZero(100, 0));
@@ -1203,12 +1300,15 @@ TEST_F(ReplCoordTest,
// Tests that a thread blocked in awaitReplication can be killed by a killOp operation
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "node1")
<< BSON("_id" << 1 << "host"
- << "node2") << BSON("_id" << 2 << "host"
- << "node3"))),
+ << "node2")
+ << BSON("_id" << 2 << "host"
+ << "node3"))),
HostAndPort("node1"));
ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
getReplCoord()->setMyLastAppliedOpTime(OpTimeWithTermZero(100, 0));
@@ -1254,7 +1354,9 @@ private:
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234")
<< BSON("_id" << 1 << "host"
@@ -1277,17 +1379,20 @@ TEST_F(ReplCoordTest, NodeReturnsBadValueWhenUpdateTermIsRunAgainstANonReplNode)
TEST_F(ReplCoordTest, NodeChangesTermAndStepsDownWhenAndOnlyWhenUpdateTermSuppliesAHigherTerm) {
init("mySet/test1:1234,test2:1234,test3:1234");
- assertStartSuccess(
- BSON("_id"
- << "mySet"
- << "version" << 1 << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "test1:1234")
- << BSON("_id" << 1 << "host"
- << "test2:1234") << BSON("_id" << 2 << "host"
- << "test3:1234"))
- << "protocolVersion" << 1),
- HostAndPort("test1", 1234));
+ assertStartSuccess(BSON("_id"
+ << "mySet"
+ << "version"
+ << 1
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "test1:1234")
+ << BSON("_id" << 1 << "host"
+ << "test2:1234")
+ << BSON("_id" << 2 << "host"
+ << "test3:1234"))
+ << "protocolVersion"
+ << 1),
+ HostAndPort("test1", 1234));
getReplCoord()->setMyLastAppliedOpTime(OpTime(Timestamp(100, 1), 0));
getReplCoord()->setMyLastDurableOpTime(OpTime(Timestamp(100, 1), 0));
ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
@@ -1324,17 +1429,20 @@ TEST_F(ReplCoordTest, NodeChangesTermAndStepsDownWhenAndOnlyWhenUpdateTermSuppli
TEST_F(ReplCoordTest, ConcurrentStepDownShouldNotSignalTheSameFinishEventMoreThanOnce) {
init("mySet/test1:1234,test2:1234,test3:1234");
- assertStartSuccess(
- BSON("_id"
- << "mySet"
- << "version" << 1 << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "test1:1234")
- << BSON("_id" << 1 << "host"
- << "test2:1234") << BSON("_id" << 2 << "host"
- << "test3:1234"))
- << "protocolVersion" << 1),
- HostAndPort("test1", 1234));
+ assertStartSuccess(BSON("_id"
+ << "mySet"
+ << "version"
+ << 1
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "test1:1234")
+ << BSON("_id" << 1 << "host"
+ << "test2:1234")
+ << BSON("_id" << 2 << "host"
+ << "test3:1234"))
+ << "protocolVersion"
+ << 1),
+ HostAndPort("test1", 1234));
getReplCoord()->setMyLastAppliedOpTime(OpTime(Timestamp(100, 1), 0));
getReplCoord()->setMyLastDurableOpTime(OpTime(Timestamp(100, 1), 0));
ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
@@ -1484,7 +1592,9 @@ TEST_F(ReplCoordTest, NodeBecomesPrimaryAgainWhenStepDownTimeoutExpiresInASingle
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234"))),
HostAndPort("test1", 1234));
@@ -1759,27 +1869,33 @@ TEST_F(ReplCoordTest,
ASSERT_EQUALS(MemberState::RS_STARTUP, getReplCoord()->getMemberState().s);
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 2 << "members" << BSON_ARRAY(BSON("host"
- << "node1:12345"
- << "_id" << 0))),
+ << "version"
+ << 2
+ << "members"
+ << BSON_ARRAY(BSON("host"
+ << "node1:12345"
+ << "_id"
+ << 0))),
HostAndPort("node1", 12345));
}
TEST_F(ReplCoordTest, NodeIncludesOtherMembersProgressInUpdatePositionCommand) {
OperationContextNoop txn;
init("mySet/test1:1234,test2:1234,test3:1234");
- assertStartSuccess(
- BSON("_id"
- << "mySet"
- << "version" << 1 << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "test1:1234")
- << BSON("_id" << 1 << "host"
- << "test2:1234") << BSON("_id" << 2 << "host"
- << "test3:1234")
- << BSON("_id" << 3 << "host"
- << "test4:1234"))),
- HostAndPort("test1", 1234));
+ assertStartSuccess(BSON("_id"
+ << "mySet"
+ << "version"
+ << 1
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "test1:1234")
+ << BSON("_id" << 1 << "host"
+ << "test2:1234")
+ << BSON("_id" << 2 << "host"
+ << "test3:1234")
+ << BSON("_id" << 3 << "host"
+ << "test4:1234"))),
+ HostAndPort("test1", 1234));
OpTime optime1({2, 1}, 1);
OpTime optime2({100, 1}, 1);
OpTime optime3({100, 2}, 1);
@@ -1846,16 +1962,18 @@ TEST_F(ReplCoordTest, NodeIncludesOtherMembersProgressInUpdatePositionCommand) {
TEST_F(ReplCoordTest, NodeIncludesOtherMembersProgressInOldUpdatePositionCommand) {
OperationContextNoop txn;
init("mySet/test1:1234,test2:1234,test3:1234");
- assertStartSuccess(
- BSON("_id"
- << "mySet"
- << "version" << 1 << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "test1:1234")
- << BSON("_id" << 1 << "host"
- << "test2:1234") << BSON("_id" << 2 << "host"
- << "test3:1234"))),
- HostAndPort("test1", 1234));
+ assertStartSuccess(BSON("_id"
+ << "mySet"
+ << "version"
+ << 1
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "test1:1234")
+ << BSON("_id" << 1 << "host"
+ << "test2:1234")
+ << BSON("_id" << 2 << "host"
+ << "test3:1234"))),
+ HostAndPort("test1", 1234));
OpTimeWithTermZero optime1(100, 1);
OpTimeWithTermZero optime2(100, 2);
OpTimeWithTermZero optime3(2, 1);
@@ -1899,16 +2017,20 @@ TEST_F(ReplCoordTest, NodeIncludesOtherMembersProgressInOldUpdatePositionCommand
TEST_F(ReplCoordTest,
NodeReturnsOperationFailedWhenSettingMaintenanceModeFalseWhenItHasNotBeenSetTrue) {
init("mySet/test1:1234,test2:1234,test3:1234");
- assertStartSuccess(
- BSON("_id"
- << "mySet"
- << "protocolVersion" << 1 << "version" << 1 << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "test1:1234")
- << BSON("_id" << 1 << "host"
- << "test2:1234") << BSON("_id" << 2 << "host"
- << "test3:1234"))),
- HostAndPort("test2", 1234));
+ assertStartSuccess(BSON("_id"
+ << "mySet"
+ << "protocolVersion"
+ << 1
+ << "version"
+ << 1
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "test1:1234")
+ << BSON("_id" << 1 << "host"
+ << "test2:1234")
+ << BSON("_id" << 2 << "host"
+ << "test3:1234"))),
+ HostAndPort("test2", 1234));
OperationContextNoop txn;
getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY);
getReplCoord()->setMyLastAppliedOpTime(OpTimeWithTermZero(100, 0));
@@ -1923,16 +2045,20 @@ TEST_F(ReplCoordTest,
TEST_F(ReplCoordTest,
ReportRollbackWhileInBothRollbackAndMaintenanceModeAndRecoveryAfterFinishingRollback) {
init("mySet/test1:1234,test2:1234,test3:1234");
- assertStartSuccess(
- BSON("_id"
- << "mySet"
- << "protocolVersion" << 1 << "version" << 1 << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "test1:1234")
- << BSON("_id" << 1 << "host"
- << "test2:1234") << BSON("_id" << 2 << "host"
- << "test3:1234"))),
- HostAndPort("test2", 1234));
+ assertStartSuccess(BSON("_id"
+ << "mySet"
+ << "protocolVersion"
+ << 1
+ << "version"
+ << 1
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "test1:1234")
+ << BSON("_id" << 1 << "host"
+ << "test2:1234")
+ << BSON("_id" << 2 << "host"
+ << "test3:1234"))),
+ HostAndPort("test2", 1234));
OperationContextNoop txn;
getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY);
getReplCoord()->setMyLastAppliedOpTime(OpTimeWithTermZero(100, 0));
@@ -1952,16 +2078,20 @@ TEST_F(ReplCoordTest,
TEST_F(ReplCoordTest, AllowAsManyUnsetMaintenanceModesAsThereHaveBeenSetMaintenanceModes) {
init("mySet/test1:1234,test2:1234,test3:1234");
- assertStartSuccess(
- BSON("_id"
- << "mySet"
- << "protocolVersion" << 1 << "version" << 1 << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "test1:1234")
- << BSON("_id" << 1 << "host"
- << "test2:1234") << BSON("_id" << 2 << "host"
- << "test3:1234"))),
- HostAndPort("test2", 1234));
+ assertStartSuccess(BSON("_id"
+ << "mySet"
+ << "protocolVersion"
+ << 1
+ << "version"
+ << 1
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "test1:1234")
+ << BSON("_id" << 1 << "host"
+ << "test2:1234")
+ << BSON("_id" << 2 << "host"
+ << "test3:1234"))),
+ HostAndPort("test2", 1234));
OperationContextNoop txn;
getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY);
getReplCoord()->setMyLastAppliedOpTime(OpTimeWithTermZero(100, 0));
@@ -1983,16 +2113,20 @@ TEST_F(ReplCoordTest, AllowAsManyUnsetMaintenanceModesAsThereHaveBeenSetMaintena
TEST_F(ReplCoordTest, SettingAndUnsettingMaintenanceModeShouldNotAffectRollbackState) {
init("mySet/test1:1234,test2:1234,test3:1234");
- assertStartSuccess(
- BSON("_id"
- << "mySet"
- << "protocolVersion" << 1 << "version" << 1 << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "test1:1234")
- << BSON("_id" << 1 << "host"
- << "test2:1234") << BSON("_id" << 2 << "host"
- << "test3:1234"))),
- HostAndPort("test2", 1234));
+ assertStartSuccess(BSON("_id"
+ << "mySet"
+ << "protocolVersion"
+ << 1
+ << "version"
+ << 1
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "test1:1234")
+ << BSON("_id" << 1 << "host"
+ << "test2:1234")
+ << BSON("_id" << 2 << "host"
+ << "test3:1234"))),
+ HostAndPort("test2", 1234));
OperationContextNoop txn;
getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY);
getReplCoord()->setMyLastAppliedOpTime(OpTimeWithTermZero(100, 0));
@@ -2022,16 +2156,20 @@ TEST_F(ReplCoordTest, SettingAndUnsettingMaintenanceModeShouldNotAffectRollbackS
TEST_F(ReplCoordTest, DoNotAllowMaintenanceModeWhilePrimary) {
init("mySet/test1:1234,test2:1234,test3:1234");
- assertStartSuccess(
- BSON("_id"
- << "mySet"
- << "protocolVersion" << 1 << "version" << 1 << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "test1:1234")
- << BSON("_id" << 1 << "host"
- << "test2:1234") << BSON("_id" << 2 << "host"
- << "test3:1234"))),
- HostAndPort("test2", 1234));
+ assertStartSuccess(BSON("_id"
+ << "mySet"
+ << "protocolVersion"
+ << 1
+ << "version"
+ << 1
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "test1:1234")
+ << BSON("_id" << 1 << "host"
+ << "test2:1234")
+ << BSON("_id" << 2 << "host"
+ << "test3:1234"))),
+ HostAndPort("test2", 1234));
OperationContextNoop txn;
getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY);
getReplCoord()->setMyLastAppliedOpTime(OpTimeWithTermZero(100, 0));
@@ -2055,16 +2193,20 @@ TEST_F(ReplCoordTest, DoNotAllowMaintenanceModeWhilePrimary) {
TEST_F(ReplCoordTest, DoNotAllowSettingMaintenanceModeWhileConductingAnElection) {
init("mySet/test1:1234,test2:1234,test3:1234");
- assertStartSuccess(
- BSON("_id"
- << "mySet"
- << "protocolVersion" << 1 << "version" << 1 << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "test1:1234")
- << BSON("_id" << 1 << "host"
- << "test2:1234") << BSON("_id" << 2 << "host"
- << "test3:1234"))),
- HostAndPort("test2", 1234));
+ assertStartSuccess(BSON("_id"
+ << "mySet"
+ << "protocolVersion"
+ << 1
+ << "version"
+ << 1
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "test1:1234")
+ << BSON("_id" << 1 << "host"
+ << "test2:1234")
+ << BSON("_id" << 2 << "host"
+ << "test3:1234"))),
+ HostAndPort("test2", 1234));
OperationContextNoop txn;
getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY);
getReplCoord()->setMyLastAppliedOpTime(OpTimeWithTermZero(100, 0));
@@ -2120,7 +2262,9 @@ TEST_F(ReplCoordTest,
HostAndPort client2Host("node3:12345");
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host" << myHost.toString())
<< BSON("_id" << 1 << "host" << client1Host.toString())
<< BSON("_id" << 2 << "host" << client2Host.toString()))),
@@ -2164,7 +2308,9 @@ TEST_F(ReplCoordTest,
HostAndPort client2Host("node3:12345");
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host" << myHost.toString())
<< BSON("_id" << 1 << "host" << client1Host.toString())
<< BSON("_id" << 2 << "host" << client2Host.toString()))),
@@ -2230,14 +2376,19 @@ TEST_F(ReplCoordTest, NodeReturnsNoNodesWhenGetOtherNodesInReplSetIsRunBeforeHav
TEST_F(ReplCoordTest, NodeReturnsListOfNodesOtherThanItselfInResponseToGetOtherNodesInReplSet) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "h1")
<< BSON("_id" << 1 << "host"
<< "h2")
<< BSON("_id" << 2 << "host"
<< "h3"
- << "priority" << 0 << "hidden" << true))),
+ << "priority"
+ << 0
+ << "hidden"
+ << true))),
HostAndPort("h1"));
std::vector<HostAndPort> otherNodes = getReplCoord()->getOtherNodesInReplSet();
@@ -2272,17 +2423,20 @@ TEST_F(ReplCoordTest, IsMaster) {
HostAndPort h3("h3");
HostAndPort h4("h4");
assertStartSuccess(
- BSON("_id"
- << "mySet"
- << "version" << 2 << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host" << h1.toString())
- << BSON("_id" << 1 << "host" << h2.toString())
- << BSON("_id" << 2 << "host" << h3.toString() << "arbiterOnly" << true)
- << BSON("_id" << 3 << "host" << h4.toString() << "priority" << 0
- << "tags" << BSON("key1"
- << "value1"
- << "key2"
- << "value2")))),
+ BSON(
+ "_id"
+ << "mySet"
+ << "version"
+ << 2
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host" << h1.toString())
+ << BSON("_id" << 1 << "host" << h2.toString())
+ << BSON("_id" << 2 << "host" << h3.toString() << "arbiterOnly" << true)
+ << BSON("_id" << 3 << "host" << h4.toString() << "priority" << 0 << "tags"
+ << BSON("key1"
+ << "value1"
+ << "key2"
+ << "value2")))),
h4);
getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY);
ASSERT_TRUE(getReplCoord()->getMemberState().secondary());
@@ -2337,7 +2491,9 @@ TEST_F(ReplCoordTest, IsMasterWithCommittedSnapshot) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234"))),
HostAndPort("test1", 1234));
@@ -2374,15 +2530,21 @@ TEST_F(ReplCoordTest, DoNotProcessSelfWhenUpdatePositionContainsInfoAboutSelf) {
OperationContextNoop txn;
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id" << 0)
+ << "_id"
+ << 0)
<< BSON("host"
<< "node2:12345"
- << "_id" << 1) << BSON("host"
- << "node3:12345"
- << "_id" << 2))),
+ << "_id"
+ << 1)
+ << BSON("host"
+ << "node3:12345"
+ << "_id"
+ << 2))),
HostAndPort("node1", 12345));
ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
getReplCoord()->setMyLastAppliedOpTime(OpTimeWithTermZero(100, 0));
@@ -2405,9 +2567,12 @@ TEST_F(ReplCoordTest, DoNotProcessSelfWhenUpdatePositionContainsInfoAboutSelf) {
UpdatePositionArgs args;
ASSERT_OK(args.initialize(
BSON(UpdatePositionArgs::kCommandFieldName
- << 1 << UpdatePositionArgs::kUpdateArrayFieldName
+ << 1
+ << UpdatePositionArgs::kUpdateArrayFieldName
<< BSON_ARRAY(BSON(UpdatePositionArgs::kConfigVersionFieldName
- << 2 << UpdatePositionArgs::kMemberIdFieldName << 0
+ << 2
+ << UpdatePositionArgs::kMemberIdFieldName
+ << 0
<< UpdatePositionArgs::kDurableOpTimeFieldName
<< BSON("ts" << time2.getTimestamp() << "t" << 2)
<< UpdatePositionArgs::kAppliedOpTimeFieldName
@@ -2422,15 +2587,21 @@ TEST_F(ReplCoordTest, DoNotProcessSelfWhenOldUpdatePositionContainsInfoAboutSelf
OperationContextNoop txn;
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id" << 0)
+ << "_id"
+ << 0)
<< BSON("host"
<< "node2:12345"
- << "_id" << 1) << BSON("host"
- << "node3:12345"
- << "_id" << 2))),
+ << "_id"
+ << 1)
+ << BSON("host"
+ << "node3:12345"
+ << "_id"
+ << 2))),
HostAndPort("node1", 12345));
ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
getReplCoord()->setMyLastAppliedOpTime(OpTimeWithTermZero(100, 0));
@@ -2452,12 +2623,15 @@ TEST_F(ReplCoordTest, DoNotProcessSelfWhenOldUpdatePositionContainsInfoAboutSelf
// receive updatePosition containing ourself, should not process the update for self
OldUpdatePositionArgs args;
- ASSERT_OK(args.initialize(
- BSON(OldUpdatePositionArgs::kCommandFieldName
- << 1 << OldUpdatePositionArgs::kUpdateArrayFieldName
- << BSON_ARRAY(BSON(OldUpdatePositionArgs::kConfigVersionFieldName
- << 2 << OldUpdatePositionArgs::kMemberIdFieldName << 0
- << OldUpdatePositionArgs::kOpTimeFieldName << time2.timestamp)))));
+ ASSERT_OK(args.initialize(BSON(OldUpdatePositionArgs::kCommandFieldName
+ << 1
+ << OldUpdatePositionArgs::kUpdateArrayFieldName
+ << BSON_ARRAY(BSON(OldUpdatePositionArgs::kConfigVersionFieldName
+ << 2
+ << OldUpdatePositionArgs::kMemberIdFieldName
+ << 0
+ << OldUpdatePositionArgs::kOpTimeFieldName
+ << time2.timestamp)))));
ASSERT_OK(getReplCoord()->processReplSetUpdatePosition(args, 0));
ASSERT_EQUALS(ErrorCodes::WriteConcernFailed,
@@ -2468,15 +2642,21 @@ TEST_F(ReplCoordTest, DoNotProcessUpdatePositionWhenItsConfigVersionIsIncorrect)
OperationContextNoop txn;
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id" << 0)
+ << "_id"
+ << 0)
<< BSON("host"
<< "node2:12345"
- << "_id" << 1) << BSON("host"
- << "node3:12345"
- << "_id" << 2))),
+ << "_id"
+ << 1)
+ << BSON("host"
+ << "node3:12345"
+ << "_id"
+ << 2))),
HostAndPort("node1", 12345));
ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
getReplCoord()->setMyLastAppliedOpTime(OpTimeWithTermZero(100, 0));
@@ -2496,9 +2676,12 @@ TEST_F(ReplCoordTest, DoNotProcessUpdatePositionWhenItsConfigVersionIsIncorrect)
UpdatePositionArgs args;
ASSERT_OK(args.initialize(
BSON(UpdatePositionArgs::kCommandFieldName
- << 1 << UpdatePositionArgs::kUpdateArrayFieldName
+ << 1
+ << UpdatePositionArgs::kUpdateArrayFieldName
<< BSON_ARRAY(BSON(UpdatePositionArgs::kConfigVersionFieldName
- << 3 << UpdatePositionArgs::kMemberIdFieldName << 1
+ << 3
+ << UpdatePositionArgs::kMemberIdFieldName
+ << 1
<< UpdatePositionArgs::kDurableOpTimeFieldName
<< BSON("ts" << time2.getTimestamp() << "t" << 3)
<< UpdatePositionArgs::kAppliedOpTimeFieldName
@@ -2515,15 +2698,21 @@ TEST_F(ReplCoordTest, DoNotProcessOldUpdatePositionWhenItsConfigVersionIsIncorre
OperationContextNoop txn;
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id" << 0)
+ << "_id"
+ << 0)
<< BSON("host"
<< "node2:12345"
- << "_id" << 1) << BSON("host"
- << "node3:12345"
- << "_id" << 2))),
+ << "_id"
+ << 1)
+ << BSON("host"
+ << "node3:12345"
+ << "_id"
+ << 2))),
HostAndPort("node1", 12345));
ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
getReplCoord()->setMyLastAppliedOpTime(OpTimeWithTermZero(100, 0));
@@ -2542,12 +2731,15 @@ TEST_F(ReplCoordTest, DoNotProcessOldUpdatePositionWhenItsConfigVersionIsIncorre
// receive updatePosition with incorrect config version
OldUpdatePositionArgs args;
- ASSERT_OK(args.initialize(
- BSON(OldUpdatePositionArgs::kCommandFieldName
- << 1 << OldUpdatePositionArgs::kUpdateArrayFieldName
- << BSON_ARRAY(BSON(OldUpdatePositionArgs::kConfigVersionFieldName
- << 3 << OldUpdatePositionArgs::kMemberIdFieldName << 1
- << OldUpdatePositionArgs::kOpTimeFieldName << time2.timestamp)))));
+ ASSERT_OK(args.initialize(BSON(OldUpdatePositionArgs::kCommandFieldName
+ << 1
+ << OldUpdatePositionArgs::kUpdateArrayFieldName
+ << BSON_ARRAY(BSON(OldUpdatePositionArgs::kConfigVersionFieldName
+ << 3
+ << OldUpdatePositionArgs::kMemberIdFieldName
+ << 1
+ << OldUpdatePositionArgs::kOpTimeFieldName
+ << time2.timestamp)))));
long long cfgver;
ASSERT_EQUALS(ErrorCodes::InvalidReplicaSetConfig,
@@ -2560,15 +2752,21 @@ TEST_F(ReplCoordTest, DoNotProcessUpdatePositionOfMembersWhoseIdsAreNotInTheConf
OperationContextNoop txn;
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id" << 0)
+ << "_id"
+ << 0)
<< BSON("host"
<< "node2:12345"
- << "_id" << 1) << BSON("host"
- << "node3:12345"
- << "_id" << 2))),
+ << "_id"
+ << 1)
+ << BSON("host"
+ << "node3:12345"
+ << "_id"
+ << 2))),
HostAndPort("node1", 12345));
ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
getReplCoord()->setMyLastAppliedOpTime(OpTimeWithTermZero(100, 0));
@@ -2588,9 +2786,12 @@ TEST_F(ReplCoordTest, DoNotProcessUpdatePositionOfMembersWhoseIdsAreNotInTheConf
UpdatePositionArgs args;
ASSERT_OK(args.initialize(
BSON(UpdatePositionArgs::kCommandFieldName
- << 1 << UpdatePositionArgs::kUpdateArrayFieldName
+ << 1
+ << UpdatePositionArgs::kUpdateArrayFieldName
<< BSON_ARRAY(BSON(UpdatePositionArgs::kConfigVersionFieldName
- << 2 << UpdatePositionArgs::kMemberIdFieldName << 9
+ << 2
+ << UpdatePositionArgs::kMemberIdFieldName
+ << 9
<< UpdatePositionArgs::kDurableOpTimeFieldName
<< BSON("ts" << time2.getTimestamp() << "t" << 2)
<< UpdatePositionArgs::kAppliedOpTimeFieldName
@@ -2605,15 +2806,21 @@ TEST_F(ReplCoordTest, DoNotProcessOldUpdatePositionOfMembersWhoseIdsAreNotInTheC
OperationContextNoop txn;
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id" << 0)
+ << "_id"
+ << 0)
<< BSON("host"
<< "node2:12345"
- << "_id" << 1) << BSON("host"
- << "node3:12345"
- << "_id" << 2))),
+ << "_id"
+ << 1)
+ << BSON("host"
+ << "node3:12345"
+ << "_id"
+ << 2))),
HostAndPort("node1", 12345));
ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
getReplCoord()->setMyLastAppliedOpTime(OpTimeWithTermZero(100, 0));
@@ -2632,12 +2839,15 @@ TEST_F(ReplCoordTest, DoNotProcessOldUpdatePositionOfMembersWhoseIdsAreNotInTheC
// receive updatePosition with nonexistent member id
OldUpdatePositionArgs args;
- ASSERT_OK(args.initialize(
- BSON(OldUpdatePositionArgs::kCommandFieldName
- << 1 << OldUpdatePositionArgs::kUpdateArrayFieldName
- << BSON_ARRAY(BSON(OldUpdatePositionArgs::kConfigVersionFieldName
- << 2 << OldUpdatePositionArgs::kMemberIdFieldName << 9
- << OldUpdatePositionArgs::kOpTimeFieldName << time2.timestamp)))));
+ ASSERT_OK(args.initialize(BSON(OldUpdatePositionArgs::kCommandFieldName
+ << 1
+ << OldUpdatePositionArgs::kUpdateArrayFieldName
+ << BSON_ARRAY(BSON(OldUpdatePositionArgs::kConfigVersionFieldName
+ << 2
+ << OldUpdatePositionArgs::kMemberIdFieldName
+ << 9
+ << OldUpdatePositionArgs::kOpTimeFieldName
+ << time2.timestamp)))));
ASSERT_EQUALS(ErrorCodes::NodeNotFound, getReplCoord()->processReplSetUpdatePosition(args, 0));
ASSERT_EQUALS(ErrorCodes::WriteConcernFailed,
@@ -2649,15 +2859,21 @@ TEST_F(ReplCoordTest,
OperationContextNoop txn;
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id" << 0)
+ << "_id"
+ << 0)
<< BSON("host"
<< "node2:12345"
- << "_id" << 1) << BSON("host"
- << "node3:12345"
- << "_id" << 2))),
+ << "_id"
+ << 1)
+ << BSON("host"
+ << "node3:12345"
+ << "_id"
+ << 2))),
HostAndPort("node1", 12345));
ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
getReplCoord()->setMyLastAppliedOpTime(OpTimeWithTermZero(100, 0));
@@ -2678,15 +2894,22 @@ TEST_F(ReplCoordTest,
getReplCoord()->setMyLastAppliedOpTime(time2);
getReplCoord()->setMyLastDurableOpTime(time2);
OldUpdatePositionArgs args;
- ASSERT_OK(args.initialize(BSON(
- OldUpdatePositionArgs::kCommandFieldName
- << 1 << OldUpdatePositionArgs::kUpdateArrayFieldName
- << BSON_ARRAY(BSON(OldUpdatePositionArgs::kConfigVersionFieldName
- << 2 << OldUpdatePositionArgs::kMemberIdFieldName << 1
- << OldUpdatePositionArgs::kOpTimeFieldName << time2.timestamp)
- << BSON(OldUpdatePositionArgs::kConfigVersionFieldName
- << 2 << OldUpdatePositionArgs::kMemberIdFieldName << 2
- << OldUpdatePositionArgs::kOpTimeFieldName << time2.timestamp)))));
+ ASSERT_OK(
+ args.initialize(BSON(OldUpdatePositionArgs::kCommandFieldName
+ << 1
+ << OldUpdatePositionArgs::kUpdateArrayFieldName
+ << BSON_ARRAY(BSON(OldUpdatePositionArgs::kConfigVersionFieldName
+ << 2
+ << OldUpdatePositionArgs::kMemberIdFieldName
+ << 1
+ << OldUpdatePositionArgs::kOpTimeFieldName
+ << time2.timestamp)
+ << BSON(OldUpdatePositionArgs::kConfigVersionFieldName
+ << 2
+ << OldUpdatePositionArgs::kMemberIdFieldName
+ << 2
+ << OldUpdatePositionArgs::kOpTimeFieldName
+ << time2.timestamp)))));
ASSERT_OK(getReplCoord()->processReplSetUpdatePosition(args, 0));
ASSERT_OK(getReplCoord()->awaitReplication(&txn, time2, writeConcern).status);
@@ -2702,10 +2925,13 @@ void doReplSetReconfig(ReplicationCoordinatorImpl* replCoord, Status* status) {
args.force = false;
args.newConfigObj = BSON("_id"
<< "mySet"
- << "version" << 3 << "members"
+ << "version"
+ << 3
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "node1:12345"
- << "priority" << 3)
+ << "priority"
+ << 3)
<< BSON("_id" << 1 << "host"
<< "node2:12345")
<< BSON("_id" << 2 << "host"
@@ -2717,15 +2943,21 @@ TEST_F(ReplCoordTest, AwaitReplicationShouldResolveAsNormalDuringAReconfig) {
OperationContextNoop txn;
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id" << 0)
+ << "_id"
+ << 0)
<< BSON("host"
<< "node2:12345"
- << "_id" << 1) << BSON("host"
- << "node3:12345"
- << "_id" << 2))),
+ << "_id"
+ << 1)
+ << BSON("host"
+ << "node3:12345"
+ << "_id"
+ << 2))),
HostAndPort("node1", 12345));
// Turn off readconcern majority support, and snapshots.
@@ -2790,7 +3022,9 @@ void doReplSetReconfigToFewer(ReplicationCoordinatorImpl* replCoord, Status* sta
args.force = false;
args.newConfigObj = BSON("_id"
<< "mySet"
- << "version" << 3 << "members"
+ << "version"
+ << 3
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "node1:12345")
<< BSON("_id" << 2 << "host"
@@ -2804,15 +3038,21 @@ TEST_F(
OperationContextNoop txn;
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id" << 0)
+ << "_id"
+ << 0)
<< BSON("host"
<< "node2:12345"
- << "_id" << 1) << BSON("host"
- << "node3:12345"
- << "_id" << 2))),
+ << "_id"
+ << 1)
+ << BSON("host"
+ << "node3:12345"
+ << "_id"
+ << 2))),
HostAndPort("node1", 12345));
ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
getReplCoord()->setMyLastAppliedOpTime(OpTimeWithTermZero(100, 2));
@@ -2860,20 +3100,29 @@ TEST_F(ReplCoordTest,
OperationContextNoop txn;
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id" << 0)
+ << "_id"
+ << 0)
<< BSON("host"
<< "node2:12345"
- << "_id" << 1) << BSON("host"
- << "node3:12345"
- << "_id" << 2)
+ << "_id"
+ << 1)
+ << BSON("host"
+ << "node3:12345"
+ << "_id"
+ << 2)
<< BSON("host"
<< "node4:12345"
- << "_id" << 3) << BSON("host"
- << "node5:12345"
- << "_id" << 4))),
+ << "_id"
+ << 3)
+ << BSON("host"
+ << "node5:12345"
+ << "_id"
+ << 4))),
HostAndPort("node1", 12345));
// Turn off readconcern majority support, and snapshots.
@@ -2934,21 +3183,35 @@ TEST_F(ReplCoordTest,
OperationContextNoop txn;
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id" << 0)
+ << "_id"
+ << 0)
<< BSON("host"
<< "node2:12345"
- << "_id" << 1) << BSON("host"
- << "node3:12345"
- << "_id" << 2)
+ << "_id"
+ << 1)
+ << BSON("host"
+ << "node3:12345"
+ << "_id"
+ << 2)
<< BSON("host"
<< "node4:12345"
- << "_id" << 3 << "votes" << 0 << "priority" << 0)
+ << "_id"
+ << 3
+ << "votes"
+ << 0
+ << "priority"
+ << 0)
<< BSON("host"
<< "node5:12345"
- << "_id" << 4 << "arbiterOnly" << true))),
+ << "_id"
+ << 4
+ << "arbiterOnly"
+ << true))),
HostAndPort("node1", 12345));
ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
OpTime time(Timestamp(100, 0), 1);
@@ -2990,21 +3253,35 @@ TEST_F(ReplCoordTest,
OperationContextNoop txn;
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id" << 0)
+ << "_id"
+ << 0)
<< BSON("host"
<< "node2:12345"
- << "_id" << 1) << BSON("host"
- << "node3:12345"
- << "_id" << 2)
+ << "_id"
+ << 1)
+ << BSON("host"
+ << "node3:12345"
+ << "_id"
+ << 2)
<< BSON("host"
<< "node4:12345"
- << "_id" << 3 << "votes" << 0 << "priority" << 0)
+ << "_id"
+ << 3
+ << "votes"
+ << 0
+ << "priority"
+ << 0)
<< BSON("host"
<< "node5:12345"
- << "_id" << 4 << "arbiterOnly" << true))),
+ << "_id"
+ << 4
+ << "arbiterOnly"
+ << true))),
HostAndPort("node1", 12345));
ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
OpTime zero(Timestamp(0, 0), 0);
@@ -3048,9 +3325,13 @@ TEST_F(ReplCoordTest, NodeReturnsShutdownInProgressWhenWaitingUntilAnOpTimeDurin
OperationContextNoop txn;
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 2 << "members" << BSON_ARRAY(BSON("host"
- << "node1:12345"
- << "_id" << 0))),
+ << "version"
+ << 2
+ << "members"
+ << BSON_ARRAY(BSON("host"
+ << "node1:12345"
+ << "_id"
+ << 0))),
HostAndPort("node1", 12345));
getReplCoord()->setMyLastAppliedOpTime(OpTimeWithTermZero(10, 0));
@@ -3068,9 +3349,13 @@ TEST_F(ReplCoordTest, NodeReturnsShutdownInProgressWhenWaitingUntilAnOpTimeDurin
TEST_F(ReplCoordTest, NodeReturnsInterruptedWhenWaitingUntilAnOpTimeIsInterrupted) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 2 << "members" << BSON_ARRAY(BSON("host"
- << "node1:12345"
- << "_id" << 0))),
+ << "version"
+ << 2
+ << "members"
+ << BSON_ARRAY(BSON("host"
+ << "node1:12345"
+ << "_id"
+ << 0))),
HostAndPort("node1", 12345));
getReplCoord()->setMyLastAppliedOpTime(OpTimeWithTermZero(10, 0));
@@ -3092,9 +3377,13 @@ TEST_F(ReplCoordTest, NodeReturnsOkImmediatelyWhenWaitingUntilOpTimePassesNoOpTi
OperationContextNoop txn;
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 2 << "members" << BSON_ARRAY(BSON("host"
- << "node1:12345"
- << "_id" << 0))),
+ << "version"
+ << 2
+ << "members"
+ << BSON_ARRAY(BSON("host"
+ << "node1:12345"
+ << "_id"
+ << 0))),
HostAndPort("node1", 12345));
auto result = getReplCoord()->waitUntilOpTime(&txn, ReadConcernArgs());
@@ -3107,9 +3396,13 @@ TEST_F(ReplCoordTest, NodeReturnsOkImmediatelyWhenWaitingUntilOpTimePassesAnOpTi
OperationContextNoop txn;
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 2 << "members" << BSON_ARRAY(BSON("host"
- << "node1:12345"
- << "_id" << 0))),
+ << "version"
+ << 2
+ << "members"
+ << BSON_ARRAY(BSON("host"
+ << "node1:12345"
+ << "_id"
+ << 0))),
HostAndPort("node1", 12345));
getReplCoord()->setMyLastAppliedOpTime(OpTimeWithTermZero(100, 0));
@@ -3125,9 +3418,13 @@ TEST_F(ReplCoordTest, NodeReturnsOkImmediatelyWhenWaitingUntilOpTimePassesAnOpTi
OperationContextNoop txn;
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 2 << "members" << BSON_ARRAY(BSON("host"
- << "node1:12345"
- << "_id" << 0))),
+ << "version"
+ << 2
+ << "members"
+ << BSON_ARRAY(BSON("host"
+ << "node1:12345"
+ << "_id"
+ << 0))),
HostAndPort("node1", 12345));
@@ -3170,9 +3467,13 @@ TEST_F(ReplCoordTest, ReadAfterCommittedWhileShutdown) {
OperationContextNoop txn;
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 2 << "members" << BSON_ARRAY(BSON("host"
- << "node1:12345"
- << "_id" << 0))),
+ << "version"
+ << 2
+ << "members"
+ << BSON_ARRAY(BSON("host"
+ << "node1:12345"
+ << "_id"
+ << 0))),
HostAndPort("node1", 12345));
runSingleNodeElection(makeOperationContext(), getReplCoord());
@@ -3191,9 +3492,13 @@ TEST_F(ReplCoordTest, ReadAfterCommittedWhileShutdown) {
TEST_F(ReplCoordTest, ReadAfterCommittedInterrupted) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 2 << "members" << BSON_ARRAY(BSON("host"
- << "node1:12345"
- << "_id" << 0))),
+ << "version"
+ << 2
+ << "members"
+ << BSON_ARRAY(BSON("host"
+ << "node1:12345"
+ << "_id"
+ << 0))),
HostAndPort("node1", 12345));
runSingleNodeElection(makeOperationContext(), getReplCoord());
const auto txnPtr = makeOperationContext();
@@ -3215,9 +3520,13 @@ TEST_F(ReplCoordTest, ReadAfterCommittedGreaterOpTime) {
OperationContextNoop txn;
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 2 << "members" << BSON_ARRAY(BSON("host"
- << "node1:12345"
- << "_id" << 0))),
+ << "version"
+ << 2
+ << "members"
+ << BSON_ARRAY(BSON("host"
+ << "node1:12345"
+ << "_id"
+ << 0))),
HostAndPort("node1", 12345));
runSingleNodeElection(makeOperationContext(), getReplCoord());
@@ -3235,9 +3544,13 @@ TEST_F(ReplCoordTest, ReadAfterCommittedEqualOpTime) {
OperationContextNoop txn;
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 2 << "members" << BSON_ARRAY(BSON("host"
- << "node1:12345"
- << "_id" << 0))),
+ << "version"
+ << 2
+ << "members"
+ << BSON_ARRAY(BSON("host"
+ << "node1:12345"
+ << "_id"
+ << 0))),
HostAndPort("node1", 12345));
runSingleNodeElection(makeOperationContext(), getReplCoord());
OpTime time(Timestamp(100, 0), 1);
@@ -3255,22 +3568,24 @@ TEST_F(ReplCoordTest, ReadAfterCommittedDeferredGreaterOpTime) {
OperationContextNoop txn;
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 2 << "members" << BSON_ARRAY(BSON("host"
- << "node1:12345"
- << "_id" << 0))),
+ << "version"
+ << 2
+ << "members"
+ << BSON_ARRAY(BSON("host"
+ << "node1:12345"
+ << "_id"
+ << 0))),
HostAndPort("node1", 12345));
runSingleNodeElection(makeOperationContext(), getReplCoord());
getReplCoord()->setMyLastAppliedOpTime(OpTime(Timestamp(0, 0), 1));
getReplCoord()->setMyLastDurableOpTime(OpTime(Timestamp(0, 0), 1));
OpTime committedOpTime(Timestamp(200, 0), 1);
- auto pseudoLogOp =
- stdx::async(stdx::launch::async,
- [this, &committedOpTime]() {
- // Not guaranteed to be scheduled after waitUntil blocks...
- getReplCoord()->setMyLastAppliedOpTime(committedOpTime);
- getReplCoord()->setMyLastDurableOpTime(committedOpTime);
- getReplCoord()->onSnapshotCreate(committedOpTime, SnapshotName(1));
- });
+ auto pseudoLogOp = stdx::async(stdx::launch::async, [this, &committedOpTime]() {
+ // Not guaranteed to be scheduled after waitUntil blocks...
+ getReplCoord()->setMyLastAppliedOpTime(committedOpTime);
+ getReplCoord()->setMyLastDurableOpTime(committedOpTime);
+ getReplCoord()->onSnapshotCreate(committedOpTime, SnapshotName(1));
+ });
auto result = getReplCoord()->waitUntilOpTime(
&txn,
@@ -3285,9 +3600,13 @@ TEST_F(ReplCoordTest, ReadAfterCommittedDeferredEqualOpTime) {
OperationContextNoop txn;
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 2 << "members" << BSON_ARRAY(BSON("host"
- << "node1:12345"
- << "_id" << 0))),
+ << "version"
+ << 2
+ << "members"
+ << BSON_ARRAY(BSON("host"
+ << "node1:12345"
+ << "_id"
+ << 0))),
HostAndPort("node1", 12345));
runSingleNodeElection(makeOperationContext(), getReplCoord());
getReplCoord()->setMyLastAppliedOpTime(OpTime(Timestamp(0, 0), 1));
@@ -3295,14 +3614,12 @@ TEST_F(ReplCoordTest, ReadAfterCommittedDeferredEqualOpTime) {
OpTime opTimeToWait(Timestamp(100, 0), 1);
- auto pseudoLogOp =
- stdx::async(stdx::launch::async,
- [this, &opTimeToWait]() {
- // Not guaranteed to be scheduled after waitUntil blocks...
- getReplCoord()->setMyLastAppliedOpTime(opTimeToWait);
- getReplCoord()->setMyLastDurableOpTime(opTimeToWait);
- getReplCoord()->onSnapshotCreate(opTimeToWait, SnapshotName(1));
- });
+ auto pseudoLogOp = stdx::async(stdx::launch::async, [this, &opTimeToWait]() {
+ // Not guaranteed to be scheduled after waitUntil blocks...
+ getReplCoord()->setMyLastAppliedOpTime(opTimeToWait);
+ getReplCoord()->setMyLastDurableOpTime(opTimeToWait);
+ getReplCoord()->onSnapshotCreate(opTimeToWait, SnapshotName(1));
+ });
auto result = getReplCoord()->waitUntilOpTime(
&txn, ReadConcernArgs(opTimeToWait, ReadConcernLevel::kMajorityReadConcern));
@@ -3316,34 +3633,53 @@ TEST_F(ReplCoordTest, IgnoreTheContentsOfMetadataWhenItsConfigVersionDoesNotMatc
// Ensure that we do not process ReplSetMetadata when ConfigVersions do not match.
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id" << 0)
+ << "_id"
+ << 0)
<< BSON("host"
<< "node2:12345"
- << "_id" << 1) << BSON("host"
- << "node3:12345"
- << "_id" << 2))),
+ << "_id"
+ << 1)
+ << BSON("host"
+ << "node3:12345"
+ << "_id"
+ << 2))),
HostAndPort("node1", 12345));
ASSERT_EQUALS(OpTime(Timestamp(0, 0), 0), getReplCoord()->getLastCommittedOpTime());
// lower configVersion
StatusWith<rpc::ReplSetMetadata> metadata = rpc::ReplSetMetadata::readFromMetadata(BSON(
- rpc::kReplSetMetadataFieldName << BSON(
- "lastOpCommitted" << BSON("ts" << Timestamp(10, 0) << "t" << 2) << "lastOpVisible"
- << BSON("ts" << Timestamp(10, 0) << "t" << 2) << "configVersion" << 1
- << "primaryIndex" << 2 << "term" << 2 << "syncSourceIndex" << 1)));
+ rpc::kReplSetMetadataFieldName
+ << BSON("lastOpCommitted" << BSON("ts" << Timestamp(10, 0) << "t" << 2) << "lastOpVisible"
+ << BSON("ts" << Timestamp(10, 0) << "t" << 2)
+ << "configVersion"
+ << 1
+ << "primaryIndex"
+ << 2
+ << "term"
+ << 2
+ << "syncSourceIndex"
+ << 1)));
getReplCoord()->processReplSetMetadata(metadata.getValue());
ASSERT_EQUALS(OpTime(Timestamp(0, 0), 0), getReplCoord()->getLastCommittedOpTime());
// higher configVersion
- StatusWith<rpc::ReplSetMetadata> metadata2 = rpc::ReplSetMetadata::readFromMetadata(
- BSON(rpc::kReplSetMetadataFieldName
- << BSON("lastOpCommitted"
- << BSON("ts" << Timestamp(10, 0) << "t" << 2) << "lastOpVisible"
- << BSON("ts" << Timestamp(10, 0) << "t" << 2) << "configVersion" << 100
- << "primaryIndex" << 2 << "term" << 2 << "syncSourceIndex" << 1)));
+ StatusWith<rpc::ReplSetMetadata> metadata2 = rpc::ReplSetMetadata::readFromMetadata(BSON(
+ rpc::kReplSetMetadataFieldName
+ << BSON("lastOpCommitted" << BSON("ts" << Timestamp(10, 0) << "t" << 2) << "lastOpVisible"
+ << BSON("ts" << Timestamp(10, 0) << "t" << 2)
+ << "configVersion"
+ << 100
+ << "primaryIndex"
+ << 2
+ << "term"
+ << 2
+ << "syncSourceIndex"
+ << 1)));
getReplCoord()->processReplSetMetadata(metadata2.getValue());
ASSERT_EQUALS(OpTime(Timestamp(0, 0), 0), getReplCoord()->getLastCommittedOpTime());
}
@@ -3353,16 +3689,23 @@ TEST_F(ReplCoordTest, UpdateLastCommittedOpTimeWhenTheLastCommittedOpTimeFromMet
// but not if the OpTime is older than the current LastCommittedOpTime.
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id" << 0)
+ << "_id"
+ << 0)
<< BSON("host"
<< "node2:12345"
- << "_id" << 1) << BSON("host"
- << "node3:12345"
- << "_id" << 2))
- << "protocolVersion" << 1),
+ << "_id"
+ << 1)
+ << BSON("host"
+ << "node3:12345"
+ << "_id"
+ << 2))
+ << "protocolVersion"
+ << 1),
HostAndPort("node1", 12345));
getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY);
ASSERT_EQUALS(OpTime(Timestamp(0, 0), 0), getReplCoord()->getLastCommittedOpTime());
@@ -3375,20 +3718,34 @@ TEST_F(ReplCoordTest, UpdateLastCommittedOpTimeWhenTheLastCommittedOpTimeFromMet
// higher OpTime, should change
StatusWith<rpc::ReplSetMetadata> metadata = rpc::ReplSetMetadata::readFromMetadata(BSON(
- rpc::kReplSetMetadataFieldName << BSON(
- "lastOpCommitted" << BSON("ts" << Timestamp(10, 0) << "t" << 1) << "lastOpVisible"
- << BSON("ts" << Timestamp(10, 0) << "t" << 1) << "configVersion" << 2
- << "primaryIndex" << 2 << "term" << 1 << "syncSourceIndex" << 1)));
+ rpc::kReplSetMetadataFieldName
+ << BSON("lastOpCommitted" << BSON("ts" << Timestamp(10, 0) << "t" << 1) << "lastOpVisible"
+ << BSON("ts" << Timestamp(10, 0) << "t" << 1)
+ << "configVersion"
+ << 2
+ << "primaryIndex"
+ << 2
+ << "term"
+ << 1
+ << "syncSourceIndex"
+ << 1)));
getReplCoord()->processReplSetMetadata(metadata.getValue());
ASSERT_EQUALS(OpTime(Timestamp(10, 0), 1), getReplCoord()->getLastCommittedOpTime());
ASSERT_EQUALS(OpTime(Timestamp(10, 0), 1), getReplCoord()->getCurrentCommittedSnapshotOpTime());
// lower OpTime, should not change
StatusWith<rpc::ReplSetMetadata> metadata2 = rpc::ReplSetMetadata::readFromMetadata(BSON(
- rpc::kReplSetMetadataFieldName << BSON(
- "lastOpCommitted" << BSON("ts" << Timestamp(9, 0) << "t" << 1) << "lastOpVisible"
- << BSON("ts" << Timestamp(9, 0) << "t" << 1) << "configVersion" << 2
- << "primaryIndex" << 2 << "term" << 1 << "syncSourceIndex" << 1)));
+ rpc::kReplSetMetadataFieldName
+ << BSON("lastOpCommitted" << BSON("ts" << Timestamp(9, 0) << "t" << 1) << "lastOpVisible"
+ << BSON("ts" << Timestamp(9, 0) << "t" << 1)
+ << "configVersion"
+ << 2
+ << "primaryIndex"
+ << 2
+ << "term"
+ << 1
+ << "syncSourceIndex"
+ << 1)));
getReplCoord()->processReplSetMetadata(metadata2.getValue());
ASSERT_EQUALS(OpTime(Timestamp(10, 0), 1), getReplCoord()->getLastCommittedOpTime());
}
@@ -3398,16 +3755,23 @@ TEST_F(ReplCoordTest, UpdateTermWhenTheTermFromMetadataIsNewerButNeverUpdateCurr
// Ensure that currentPrimaryIndex is never altered by ReplSetMetadata.
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id" << 0)
+ << "_id"
+ << 0)
<< BSON("host"
<< "node2:12345"
- << "_id" << 1) << BSON("host"
- << "node3:12345"
- << "_id" << 2))
- << "protocolVersion" << 1),
+ << "_id"
+ << 1)
+ << BSON("host"
+ << "node3:12345"
+ << "_id"
+ << 2))
+ << "protocolVersion"
+ << 1),
HostAndPort("node1", 12345));
ASSERT_EQUALS(OpTime(Timestamp(0, 0), 0), getReplCoord()->getLastCommittedOpTime());
OperationContextNoop txn;
@@ -3416,10 +3780,17 @@ TEST_F(ReplCoordTest, UpdateTermWhenTheTermFromMetadataIsNewerButNeverUpdateCurr
// higher term, should change
StatusWith<rpc::ReplSetMetadata> metadata = rpc::ReplSetMetadata::readFromMetadata(BSON(
- rpc::kReplSetMetadataFieldName << BSON(
- "lastOpCommitted" << BSON("ts" << Timestamp(10, 0) << "t" << 3) << "lastOpVisible"
- << BSON("ts" << Timestamp(10, 0) << "t" << 3) << "configVersion" << 2
- << "primaryIndex" << 2 << "term" << 3 << "syncSourceIndex" << 1)));
+ rpc::kReplSetMetadataFieldName
+ << BSON("lastOpCommitted" << BSON("ts" << Timestamp(10, 0) << "t" << 3) << "lastOpVisible"
+ << BSON("ts" << Timestamp(10, 0) << "t" << 3)
+ << "configVersion"
+ << 2
+ << "primaryIndex"
+ << 2
+ << "term"
+ << 3
+ << "syncSourceIndex"
+ << 1)));
getReplCoord()->processReplSetMetadata(metadata.getValue());
ASSERT_EQUALS(OpTime(Timestamp(10, 0), 3), getReplCoord()->getLastCommittedOpTime());
ASSERT_EQUALS(3, getReplCoord()->getTerm());
@@ -3427,10 +3798,17 @@ TEST_F(ReplCoordTest, UpdateTermWhenTheTermFromMetadataIsNewerButNeverUpdateCurr
// lower term, should not change
StatusWith<rpc::ReplSetMetadata> metadata2 = rpc::ReplSetMetadata::readFromMetadata(BSON(
- rpc::kReplSetMetadataFieldName << BSON(
- "lastOpCommitted" << BSON("ts" << Timestamp(11, 0) << "t" << 3) << "lastOpVisible"
- << BSON("ts" << Timestamp(11, 0) << "t" << 3) << "configVersion" << 2
- << "primaryIndex" << 1 << "term" << 2 << "syncSourceIndex" << 1)));
+ rpc::kReplSetMetadataFieldName
+ << BSON("lastOpCommitted" << BSON("ts" << Timestamp(11, 0) << "t" << 3) << "lastOpVisible"
+ << BSON("ts" << Timestamp(11, 0) << "t" << 3)
+ << "configVersion"
+ << 2
+ << "primaryIndex"
+ << 1
+ << "term"
+ << 2
+ << "syncSourceIndex"
+ << 1)));
getReplCoord()->processReplSetMetadata(metadata2.getValue());
ASSERT_EQUALS(OpTime(Timestamp(11, 0), 3), getReplCoord()->getLastCommittedOpTime());
ASSERT_EQUALS(3, getReplCoord()->getTerm());
@@ -3438,10 +3816,17 @@ TEST_F(ReplCoordTest, UpdateTermWhenTheTermFromMetadataIsNewerButNeverUpdateCurr
// same term, should not change
StatusWith<rpc::ReplSetMetadata> metadata3 = rpc::ReplSetMetadata::readFromMetadata(BSON(
- rpc::kReplSetMetadataFieldName << BSON(
- "lastOpCommitted" << BSON("ts" << Timestamp(11, 0) << "t" << 3) << "lastOpVisible"
- << BSON("ts" << Timestamp(11, 0) << "t" << 3) << "configVersion" << 2
- << "primaryIndex" << 1 << "term" << 3 << "syncSourceIndex" << 1)));
+ rpc::kReplSetMetadataFieldName
+ << BSON("lastOpCommitted" << BSON("ts" << Timestamp(11, 0) << "t" << 3) << "lastOpVisible"
+ << BSON("ts" << Timestamp(11, 0) << "t" << 3)
+ << "configVersion"
+ << 2
+ << "primaryIndex"
+ << 1
+ << "term"
+ << 3
+ << "syncSourceIndex"
+ << 1)));
getReplCoord()->processReplSetMetadata(metadata3.getValue());
ASSERT_EQUALS(OpTime(Timestamp(11, 0), 3), getReplCoord()->getLastCommittedOpTime());
ASSERT_EQUALS(3, getReplCoord()->getTerm());
@@ -3453,13 +3838,19 @@ TEST_F(ReplCoordTest,
// Ensure that the metadata is processed if it is contained in a heartbeat response.
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 2 << "members" << BSON_ARRAY(BSON("host"
- << "node1:12345"
- << "_id" << 0)
- << BSON("host"
- << "node2:12345"
- << "_id" << 1))
- << "protocolVersion" << 1),
+ << "version"
+ << 2
+ << "members"
+ << BSON_ARRAY(BSON("host"
+ << "node1:12345"
+ << "_id"
+ << 0)
+ << BSON("host"
+ << "node2:12345"
+ << "_id"
+ << 1))
+ << "protocolVersion"
+ << 1),
HostAndPort("node1", 12345));
ASSERT_EQUALS(OpTime(Timestamp(0, 0), 0), getReplCoord()->getLastCommittedOpTime());
OperationContextNoop txn;
@@ -3473,9 +3864,15 @@ TEST_F(ReplCoordTest,
StatusWith<rpc::ReplSetMetadata> metadata = rpc::ReplSetMetadata::readFromMetadata(BSON(
rpc::kReplSetMetadataFieldName
<< BSON("lastOpCommitted" << BSON("ts" << Timestamp(10, 0) << "t" << 3) << "lastOpVisible"
- << BSON("ts" << Timestamp(10, 0) << "t" << 3) << "configVersion"
- << config.getConfigVersion() << "primaryIndex" << 1 << "term" << 3
- << "syncSourceIndex" << 1)));
+ << BSON("ts" << Timestamp(10, 0) << "t" << 3)
+ << "configVersion"
+ << config.getConfigVersion()
+ << "primaryIndex"
+ << 1
+ << "term"
+ << 3
+ << "syncSourceIndex"
+ << 1)));
BSONObjBuilder metadataBuilder;
ASSERT_OK(metadata.getValue().writeToMetadata(&metadataBuilder));
auto metadataObj = metadataBuilder.obj();
@@ -3506,13 +3903,19 @@ TEST_F(ReplCoordTest,
ScheduleElectionToBeRunInElectionTimeoutFromNowWhenCancelAndRescheduleElectionTimeoutIsRun) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "protocolVersion" << 1 << "version" << 2 << "members"
+ << "protocolVersion"
+ << 1
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id" << 0)
+ << "_id"
+ << 0)
<< BSON("host"
<< "node2:12345"
- << "_id" << 1))),
+ << "_id"
+ << 1))),
HostAndPort("node1", 12345));
ReplicationCoordinatorImpl* replCoord = getReplCoord();
@@ -3551,13 +3954,19 @@ TEST_F(ReplCoordTest,
TEST_F(ReplCoordTest, DoNotScheduleElectionWhenCancelAndRescheduleElectionTimeoutIsRunInPV0) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "protocolVersion" << 0 << "version" << 2 << "members"
+ << "protocolVersion"
+ << 0
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id" << 0)
+ << "_id"
+ << 0)
<< BSON("host"
<< "node2:12345"
- << "_id" << 1))),
+ << "_id"
+ << 1))),
HostAndPort("node1", 12345));
ReplicationCoordinatorImpl* replCoord = getReplCoord();
ASSERT_TRUE(replCoord->setFollowerMode(MemberState::RS_SECONDARY));
@@ -3571,13 +3980,19 @@ TEST_F(ReplCoordTest, DoNotScheduleElectionWhenCancelAndRescheduleElectionTimeou
TEST_F(ReplCoordTest, DoNotScheduleElectionWhenCancelAndRescheduleElectionTimeoutIsRunInRollback) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "protocolVersion" << 1 << "version" << 2 << "members"
+ << "protocolVersion"
+ << 1
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id" << 0)
+ << "_id"
+ << 0)
<< BSON("host"
<< "node2:12345"
- << "_id" << 1))),
+ << "_id"
+ << 1))),
HostAndPort("node1", 12345));
ReplicationCoordinatorImpl* replCoord = getReplCoord();
ASSERT_TRUE(replCoord->setFollowerMode(MemberState::RS_ROLLBACK));
@@ -3592,13 +4007,23 @@ TEST_F(ReplCoordTest,
DoNotScheduleElectionWhenCancelAndRescheduleElectionTimeoutIsRunWhileUnelectable) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "protocolVersion" << 1 << "version" << 2 << "members"
+ << "protocolVersion"
+ << 1
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id" << 0 << "priority" << 0 << "hidden" << true)
+ << "_id"
+ << 0
+ << "priority"
+ << 0
+ << "hidden"
+ << true)
<< BSON("host"
<< "node2:12345"
- << "_id" << 1))),
+ << "_id"
+ << 1))),
HostAndPort("node1", 12345));
ReplicationCoordinatorImpl* replCoord = getReplCoord();
ASSERT_TRUE(replCoord->setFollowerMode(MemberState::RS_SECONDARY));
@@ -3613,13 +4038,19 @@ TEST_F(ReplCoordTest,
DoNotScheduleElectionWhenCancelAndRescheduleElectionTimeoutIsRunWhileRemoved) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "protocolVersion" << 1 << "version" << 2 << "members"
+ << "protocolVersion"
+ << 1
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id" << 0)
+ << "_id"
+ << 0)
<< BSON("host"
<< "node2:12345"
- << "_id" << 1))),
+ << "_id"
+ << 1))),
HostAndPort("node1", 12345));
ReplicationCoordinatorImpl* replCoord = getReplCoord();
@@ -3644,10 +4075,15 @@ TEST_F(ReplCoordTest,
ReplicaSetConfig config;
config.initialize(BSON("_id"
<< "mySet"
- << "protocolVersion" << 1 << "version" << 3 << "members"
+ << "protocolVersion"
+ << 1
+ << "version"
+ << 3
+ << "members"
<< BSON_ARRAY(BSON("host"
<< "node2:12345"
- << "_id" << 1))));
+ << "_id"
+ << 1))));
hbResp.setConfig(config);
hbResp.setConfigVersion(3);
hbResp.setSetName("mySet");
@@ -3668,13 +4104,19 @@ TEST_F(ReplCoordTest,
CancelAndRescheduleElectionTimeoutWhenProcessingHeartbeatResponseFromPrimary) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "protocolVersion" << 1 << "version" << 2 << "members"
+ << "protocolVersion"
+ << 1
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id" << 0)
+ << "_id"
+ << 0)
<< BSON("host"
<< "node2:12345"
- << "_id" << 1))),
+ << "_id"
+ << 1))),
HostAndPort("node1", 12345));
ReplicationCoordinatorImpl* replCoord = getReplCoord();
@@ -3714,13 +4156,19 @@ TEST_F(ReplCoordTest,
CancelAndRescheduleElectionTimeoutWhenProcessingHeartbeatResponseWithoutState) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "protocolVersion" << 1 << "version" << 2 << "members"
+ << "protocolVersion"
+ << 1
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id" << 0)
+ << "_id"
+ << 0)
<< BSON("host"
<< "node2:12345"
- << "_id" << 1))),
+ << "_id"
+ << 1))),
HostAndPort("node1", 12345));
ReplicationCoordinatorImpl* replCoord = getReplCoord();
@@ -3760,7 +4208,9 @@ TEST_F(ReplCoordTest, AdvanceCommittedSnapshotToMostRecentSnapshotPriorToOpTimeW
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234"))),
HostAndPort("test1", 1234));
@@ -3791,7 +4241,9 @@ TEST_F(ReplCoordTest, DoNotAdvanceCommittedSnapshotWhenAnOpTimeIsNewerThanOurLat
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234"))),
HostAndPort("test1", 1234));
@@ -3820,7 +4272,9 @@ TEST_F(ReplCoordTest,
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234"))),
HostAndPort("test1", 1234));
@@ -3851,7 +4305,9 @@ TEST_F(ReplCoordTest, ZeroCommittedSnapshotWhenAllSnapshotsAreDropped) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234"))),
HostAndPort("test1", 1234));
@@ -3878,7 +4334,9 @@ TEST_F(ReplCoordTest, DoNotAdvanceCommittedSnapshotWhenAppliedOpTimeChanges) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234"))),
HostAndPort("test1", 1234));
@@ -3902,9 +4360,13 @@ TEST_F(ReplCoordTest,
NodeChangesMyLastOpTimeWhenAndOnlyWhensetMyLastDurableOpTimeReceivesANewerOpTime4DurableSE) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 2 << "members" << BSON_ARRAY(BSON("host"
- << "node1:12345"
- << "_id" << 0))),
+ << "version"
+ << 2
+ << "members"
+ << BSON_ARRAY(BSON("host"
+ << "node1:12345"
+ << "_id"
+ << 0))),
HostAndPort("node1", 12345));
@@ -3925,13 +4387,18 @@ TEST_F(ReplCoordTest, OnlyForwardSyncProgressForOtherNodesWhenTheNodesAreBelieve
assertStartSuccess(
BSON("_id"
<< "mySet"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234")
<< BSON("_id" << 1 << "host"
- << "test2:1234") << BSON("_id" << 2 << "host"
- << "test3:1234"))
- << "protocolVersion" << 1 << "settings"
+ << "test2:1234")
+ << BSON("_id" << 2 << "host"
+ << "test3:1234"))
+ << "protocolVersion"
+ << 1
+ << "settings"
<< BSON("electionTimeoutMillis" << 2000 << "heartbeatIntervalMillis" << 40000)),
HostAndPort("test1", 1234));
OpTime optime(Timestamp(100, 2), 0);
@@ -4020,20 +4487,32 @@ TEST_F(ReplCoordTest, StepDownWhenHandleLivenessTimeoutMarksAMajorityOfVotingNod
assertStartSuccess(
BSON("_id"
<< "mySet"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("host"
<< "node1:12345"
- << "_id" << 0)
+ << "_id"
+ << 0)
<< BSON("host"
<< "node2:12345"
- << "_id" << 1) << BSON("host"
- << "node3:12345"
- << "_id" << 2) << BSON("host"
- << "node4:12345"
- << "_id" << 3)
+ << "_id"
+ << 1)
+ << BSON("host"
+ << "node3:12345"
+ << "_id"
+ << 2)
+ << BSON("host"
+ << "node4:12345"
+ << "_id"
+ << 3)
<< BSON("host"
<< "node5:12345"
- << "_id" << 4)) << "protocolVersion" << 1 << "settings"
+ << "_id"
+ << 4))
+ << "protocolVersion"
+ << 1
+ << "settings"
<< BSON("electionTimeoutMillis" << 2000 << "heartbeatIntervalMillis" << 40000)),
HostAndPort("node1", 12345));
ASSERT(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY));
@@ -4043,23 +4522,34 @@ TEST_F(ReplCoordTest, StepDownWhenHandleLivenessTimeoutMarksAMajorityOfVotingNod
// Receive notification that every node is up.
OldUpdatePositionArgs args;
- ASSERT_OK(args.initialize(BSON(
- OldUpdatePositionArgs::kCommandFieldName
- << 1 << OldUpdatePositionArgs::kUpdateArrayFieldName
- << BSON_ARRAY(
- BSON(OldUpdatePositionArgs::kConfigVersionFieldName
- << 2 << OldUpdatePositionArgs::kMemberIdFieldName << 1
- << OldUpdatePositionArgs::kOpTimeFieldName << startingOpTime.getTimestamp())
- << BSON(OldUpdatePositionArgs::kConfigVersionFieldName
- << 2 << OldUpdatePositionArgs::kMemberIdFieldName << 2
- << OldUpdatePositionArgs::kOpTimeFieldName << startingOpTime.getTimestamp())
- << BSON(OldUpdatePositionArgs::kConfigVersionFieldName
- << 2 << OldUpdatePositionArgs::kMemberIdFieldName << 3
- << OldUpdatePositionArgs::kOpTimeFieldName << startingOpTime.getTimestamp())
- << BSON(OldUpdatePositionArgs::kConfigVersionFieldName
- << 2 << OldUpdatePositionArgs::kMemberIdFieldName << 4
- << OldUpdatePositionArgs::kOpTimeFieldName
- << startingOpTime.getTimestamp())))));
+ ASSERT_OK(
+ args.initialize(BSON(OldUpdatePositionArgs::kCommandFieldName
+ << 1
+ << OldUpdatePositionArgs::kUpdateArrayFieldName
+ << BSON_ARRAY(BSON(OldUpdatePositionArgs::kConfigVersionFieldName
+ << 2
+ << OldUpdatePositionArgs::kMemberIdFieldName
+ << 1
+ << OldUpdatePositionArgs::kOpTimeFieldName
+ << startingOpTime.getTimestamp())
+ << BSON(OldUpdatePositionArgs::kConfigVersionFieldName
+ << 2
+ << OldUpdatePositionArgs::kMemberIdFieldName
+ << 2
+ << OldUpdatePositionArgs::kOpTimeFieldName
+ << startingOpTime.getTimestamp())
+ << BSON(OldUpdatePositionArgs::kConfigVersionFieldName
+ << 2
+ << OldUpdatePositionArgs::kMemberIdFieldName
+ << 3
+ << OldUpdatePositionArgs::kOpTimeFieldName
+ << startingOpTime.getTimestamp())
+ << BSON(OldUpdatePositionArgs::kConfigVersionFieldName
+ << 2
+ << OldUpdatePositionArgs::kMemberIdFieldName
+ << 4
+ << OldUpdatePositionArgs::kOpTimeFieldName
+ << startingOpTime.getTimestamp())))));
ASSERT_OK(getReplCoord()->processReplSetUpdatePosition(args, 0));
// Become PRIMARY.
@@ -4067,17 +4557,22 @@ TEST_F(ReplCoordTest, StepDownWhenHandleLivenessTimeoutMarksAMajorityOfVotingNod
// Keep two nodes alive.
OldUpdatePositionArgs args1;
- ASSERT_OK(args1.initialize(
- BSON(OldUpdatePositionArgs::kCommandFieldName
- << 1 << OldUpdatePositionArgs::kUpdateArrayFieldName
- << BSON_ARRAY(BSON(OldUpdatePositionArgs::kConfigVersionFieldName
- << 2 << OldUpdatePositionArgs::kMemberIdFieldName << 1
- << OldUpdatePositionArgs::kOpTimeFieldName
- << startingOpTime.getTimestamp())
- << BSON(OldUpdatePositionArgs::kConfigVersionFieldName
- << 2 << OldUpdatePositionArgs::kMemberIdFieldName << 2
- << OldUpdatePositionArgs::kOpTimeFieldName
- << startingOpTime.getTimestamp())))));
+ ASSERT_OK(
+ args1.initialize(BSON(OldUpdatePositionArgs::kCommandFieldName
+ << 1
+ << OldUpdatePositionArgs::kUpdateArrayFieldName
+ << BSON_ARRAY(BSON(OldUpdatePositionArgs::kConfigVersionFieldName
+ << 2
+ << OldUpdatePositionArgs::kMemberIdFieldName
+ << 1
+ << OldUpdatePositionArgs::kOpTimeFieldName
+ << startingOpTime.getTimestamp())
+ << BSON(OldUpdatePositionArgs::kConfigVersionFieldName
+ << 2
+ << OldUpdatePositionArgs::kMemberIdFieldName
+ << 2
+ << OldUpdatePositionArgs::kOpTimeFieldName
+ << startingOpTime.getTimestamp())))));
ASSERT_OK(getReplCoord()->processReplSetUpdatePosition(args1, 0));
// Confirm that the node remains PRIMARY after the other two nodes are marked DOWN.
@@ -4091,10 +4586,13 @@ TEST_F(ReplCoordTest, StepDownWhenHandleLivenessTimeoutMarksAMajorityOfVotingNod
OldUpdatePositionArgs args2;
ASSERT_OK(
args2.initialize(BSON(OldUpdatePositionArgs::kCommandFieldName
- << 1 << OldUpdatePositionArgs::kUpdateArrayFieldName
+ << 1
+ << OldUpdatePositionArgs::kUpdateArrayFieldName
<< BSON_ARRAY(BSON(OldUpdatePositionArgs::kConfigVersionFieldName
- << 2 << OldUpdatePositionArgs::kMemberIdFieldName
- << 1 << OldUpdatePositionArgs::kOpTimeFieldName
+ << 2
+ << OldUpdatePositionArgs::kMemberIdFieldName
+ << 1
+ << OldUpdatePositionArgs::kOpTimeFieldName
<< startingOpTime.getTimestamp())))));
ASSERT_OK(getReplCoord()->processReplSetUpdatePosition(args2, 0));
@@ -4131,7 +4629,9 @@ TEST_F(ReplCoordTest, WaitForMemberState) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234"))),
HostAndPort("test1", 1234));
@@ -4166,7 +4666,9 @@ TEST_F(ReplCoordTest, WaitForDrainFinish) {
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234"))),
HostAndPort("test1", 1234));
@@ -4205,39 +4707,62 @@ TEST_F(ReplCoordTest, UpdatePositionArgsReturnsNoSuchKeyWhenParsingOldUpdatePosi
OpTime opTime = OpTime(Timestamp(100, 1), 0);
ASSERT_EQUALS(
ErrorCodes::NoSuchKey,
- args2.initialize(BSON(
- OldUpdatePositionArgs::kCommandFieldName
- << 1 << OldUpdatePositionArgs::kUpdateArrayFieldName
- << BSON_ARRAY(
- BSON(OldUpdatePositionArgs::kConfigVersionFieldName
- << 2 << OldUpdatePositionArgs::kMemberIdFieldName << 1
- << OldUpdatePositionArgs::kOpTimeFieldName << opTime.getTimestamp())
- << BSON(OldUpdatePositionArgs::kConfigVersionFieldName
- << 2 << OldUpdatePositionArgs::kMemberIdFieldName << 2
- << OldUpdatePositionArgs::kOpTimeFieldName << opTime.getTimestamp())
- << BSON(OldUpdatePositionArgs::kConfigVersionFieldName
- << 2 << OldUpdatePositionArgs::kMemberIdFieldName << 3
- << OldUpdatePositionArgs::kOpTimeFieldName << opTime.getTimestamp())
- << BSON(OldUpdatePositionArgs::kConfigVersionFieldName
- << 2 << OldUpdatePositionArgs::kMemberIdFieldName << 4
- << OldUpdatePositionArgs::kOpTimeFieldName << opTime.getTimestamp())))));
-
- ASSERT_OK(args.initialize(BSON(
- OldUpdatePositionArgs::kCommandFieldName
- << 1 << OldUpdatePositionArgs::kUpdateArrayFieldName
- << BSON_ARRAY(BSON(OldUpdatePositionArgs::kConfigVersionFieldName
- << 2 << OldUpdatePositionArgs::kMemberIdFieldName << 1
- << OldUpdatePositionArgs::kOpTimeFieldName << opTime.getTimestamp())
- << BSON(OldUpdatePositionArgs::kConfigVersionFieldName
- << 2 << OldUpdatePositionArgs::kMemberIdFieldName << 2
- << OldUpdatePositionArgs::kOpTimeFieldName << opTime.getTimestamp())
- << BSON(OldUpdatePositionArgs::kConfigVersionFieldName
- << 2 << OldUpdatePositionArgs::kMemberIdFieldName << 3
- << OldUpdatePositionArgs::kOpTimeFieldName << opTime.getTimestamp())
- << BSON(OldUpdatePositionArgs::kConfigVersionFieldName
- << 2 << OldUpdatePositionArgs::kMemberIdFieldName << 4
- << OldUpdatePositionArgs::kOpTimeFieldName
- << opTime.getTimestamp())))));
+ args2.initialize(BSON(OldUpdatePositionArgs::kCommandFieldName
+ << 1
+ << OldUpdatePositionArgs::kUpdateArrayFieldName
+ << BSON_ARRAY(BSON(OldUpdatePositionArgs::kConfigVersionFieldName
+ << 2
+ << OldUpdatePositionArgs::kMemberIdFieldName
+ << 1
+ << OldUpdatePositionArgs::kOpTimeFieldName
+ << opTime.getTimestamp())
+ << BSON(OldUpdatePositionArgs::kConfigVersionFieldName
+ << 2
+ << OldUpdatePositionArgs::kMemberIdFieldName
+ << 2
+ << OldUpdatePositionArgs::kOpTimeFieldName
+ << opTime.getTimestamp())
+ << BSON(OldUpdatePositionArgs::kConfigVersionFieldName
+ << 2
+ << OldUpdatePositionArgs::kMemberIdFieldName
+ << 3
+ << OldUpdatePositionArgs::kOpTimeFieldName
+ << opTime.getTimestamp())
+ << BSON(OldUpdatePositionArgs::kConfigVersionFieldName
+ << 2
+ << OldUpdatePositionArgs::kMemberIdFieldName
+ << 4
+ << OldUpdatePositionArgs::kOpTimeFieldName
+ << opTime.getTimestamp())))));
+
+ ASSERT_OK(
+ args.initialize(BSON(OldUpdatePositionArgs::kCommandFieldName
+ << 1
+ << OldUpdatePositionArgs::kUpdateArrayFieldName
+ << BSON_ARRAY(BSON(OldUpdatePositionArgs::kConfigVersionFieldName
+ << 2
+ << OldUpdatePositionArgs::kMemberIdFieldName
+ << 1
+ << OldUpdatePositionArgs::kOpTimeFieldName
+ << opTime.getTimestamp())
+ << BSON(OldUpdatePositionArgs::kConfigVersionFieldName
+ << 2
+ << OldUpdatePositionArgs::kMemberIdFieldName
+ << 2
+ << OldUpdatePositionArgs::kOpTimeFieldName
+ << opTime.getTimestamp())
+ << BSON(OldUpdatePositionArgs::kConfigVersionFieldName
+ << 2
+ << OldUpdatePositionArgs::kMemberIdFieldName
+ << 3
+ << OldUpdatePositionArgs::kOpTimeFieldName
+ << opTime.getTimestamp())
+ << BSON(OldUpdatePositionArgs::kConfigVersionFieldName
+ << 2
+ << OldUpdatePositionArgs::kMemberIdFieldName
+ << 4
+ << OldUpdatePositionArgs::kOpTimeFieldName
+ << opTime.getTimestamp())))));
}
@@ -4248,54 +4773,72 @@ TEST_F(ReplCoordTest, OldUpdatePositionArgsReturnsBadValueWhenParsingUpdatePosit
ASSERT_EQUALS(ErrorCodes::BadValue,
args.initialize(BSON(
UpdatePositionArgs::kCommandFieldName
- << 1 << UpdatePositionArgs::kUpdateArrayFieldName
+ << 1
+ << UpdatePositionArgs::kUpdateArrayFieldName
<< BSON_ARRAY(BSON(UpdatePositionArgs::kConfigVersionFieldName
- << 2 << UpdatePositionArgs::kMemberIdFieldName << 1
+ << 2
+ << UpdatePositionArgs::kMemberIdFieldName
+ << 1
<< UpdatePositionArgs::kDurableOpTimeFieldName
<< BSON("ts" << opTime.getTimestamp() << "t" << 3)
<< UpdatePositionArgs::kAppliedOpTimeFieldName
<< BSON("ts" << opTime.getTimestamp() << "t" << 3))
<< BSON(UpdatePositionArgs::kConfigVersionFieldName
- << 2 << UpdatePositionArgs::kMemberIdFieldName << 2
+ << 2
+ << UpdatePositionArgs::kMemberIdFieldName
+ << 2
<< UpdatePositionArgs::kDurableOpTimeFieldName
<< BSON("ts" << opTime.getTimestamp() << "t" << 3)
<< UpdatePositionArgs::kAppliedOpTimeFieldName
<< BSON("ts" << opTime.getTimestamp() << "t" << 3))
<< BSON(UpdatePositionArgs::kConfigVersionFieldName
- << 2 << UpdatePositionArgs::kMemberIdFieldName << 3
+ << 2
+ << UpdatePositionArgs::kMemberIdFieldName
+ << 3
<< UpdatePositionArgs::kDurableOpTimeFieldName
<< BSON("ts" << opTime.getTimestamp() << "t" << 3)
<< UpdatePositionArgs::kAppliedOpTimeFieldName
<< BSON("ts" << opTime.getTimestamp() << "t" << 3))
<< BSON(UpdatePositionArgs::kConfigVersionFieldName
- << 2 << UpdatePositionArgs::kMemberIdFieldName << 4
+ << 2
+ << UpdatePositionArgs::kMemberIdFieldName
+ << 4
<< UpdatePositionArgs::kDurableOpTimeFieldName
<< BSON("ts" << opTime.getTimestamp() << "t" << 3)
<< UpdatePositionArgs::kAppliedOpTimeFieldName
<< BSON("ts" << opTime.getTimestamp() << "t" << 3))))));
ASSERT_OK(args2.initialize(
BSON(UpdatePositionArgs::kCommandFieldName
- << 1 << UpdatePositionArgs::kUpdateArrayFieldName
+ << 1
+ << UpdatePositionArgs::kUpdateArrayFieldName
<< BSON_ARRAY(BSON(UpdatePositionArgs::kConfigVersionFieldName
- << 2 << UpdatePositionArgs::kMemberIdFieldName << 1
+ << 2
+ << UpdatePositionArgs::kMemberIdFieldName
+ << 1
<< UpdatePositionArgs::kDurableOpTimeFieldName
<< BSON("ts" << opTime.getTimestamp() << "t" << 3)
<< UpdatePositionArgs::kAppliedOpTimeFieldName
<< BSON("ts" << opTime.getTimestamp() << "t" << 3))
<< BSON(UpdatePositionArgs::kConfigVersionFieldName
- << 2 << UpdatePositionArgs::kMemberIdFieldName << 2
+ << 2
+ << UpdatePositionArgs::kMemberIdFieldName
+ << 2
<< UpdatePositionArgs::kDurableOpTimeFieldName
<< BSON("ts" << opTime.getTimestamp() << "t" << 3)
<< UpdatePositionArgs::kAppliedOpTimeFieldName
<< BSON("ts" << opTime.getTimestamp() << "t" << 3))
<< BSON(UpdatePositionArgs::kConfigVersionFieldName
- << 2 << UpdatePositionArgs::kMemberIdFieldName << 3
+ << 2
+ << UpdatePositionArgs::kMemberIdFieldName
+ << 3
<< UpdatePositionArgs::kDurableOpTimeFieldName
<< BSON("ts" << opTime.getTimestamp() << "t" << 3)
<< UpdatePositionArgs::kAppliedOpTimeFieldName
<< BSON("ts" << opTime.getTimestamp() << "t" << 3))
<< BSON(UpdatePositionArgs::kConfigVersionFieldName
- << 2 << UpdatePositionArgs::kMemberIdFieldName << 4
+ << 2
+ << UpdatePositionArgs::kMemberIdFieldName
+ << 4
<< UpdatePositionArgs::kDurableOpTimeFieldName
<< BSON("ts" << opTime.getTimestamp() << "t" << 3)
<< UpdatePositionArgs::kAppliedOpTimeFieldName
@@ -4309,10 +4852,13 @@ TEST_F(
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234"))
- << "writeConcernMajorityJournalDefault" << false),
+ << "writeConcernMajorityJournalDefault"
+ << false),
HostAndPort("test1", 1234));
WriteConcernOptions wc;
@@ -4329,10 +4875,13 @@ TEST_F(
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234"))
- << "writeConcernMajorityJournalDefault" << true),
+ << "writeConcernMajorityJournalDefault"
+ << true),
HostAndPort("test1", 1234));
WriteConcernOptions wc;
@@ -4347,10 +4896,13 @@ TEST_F(ReplCoordTest, PopulateUnsetWriteConcernOptionsSyncModeReturnsInputIfSync
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234"))
- << "writeConcernMajorityJournalDefault" << false),
+ << "writeConcernMajorityJournalDefault"
+ << false),
HostAndPort("test1", 1234));
WriteConcernOptions wc;
@@ -4372,10 +4924,13 @@ TEST_F(ReplCoordTest, PopulateUnsetWriteConcernOptionsSyncModeReturnsInputIfWMod
assertStartSuccess(BSON("_id"
<< "mySet"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "test1:1234"))
- << "writeConcernMajorityJournalDefault" << false),
+ << "writeConcernMajorityJournalDefault"
+ << false),
HostAndPort("test1", 1234));
WriteConcernOptions wc;
diff --git a/src/mongo/db/repl/replication_coordinator_mock.cpp b/src/mongo/db/repl/replication_coordinator_mock.cpp
index fc0f9866f55..e8617423953 100644
--- a/src/mongo/db/repl/replication_coordinator_mock.cpp
+++ b/src/mongo/db/repl/replication_coordinator_mock.cpp
@@ -32,12 +32,12 @@
#include "mongo/base/status.h"
#include "mongo/db/namespace_string.h"
-#include "mongo/db/write_concern_options.h"
#include "mongo/db/repl/read_concern_args.h"
#include "mongo/db/repl/read_concern_response.h"
#include "mongo/db/repl/replica_set_config.h"
#include "mongo/db/repl/sync_source_resolver.h"
#include "mongo/db/storage/snapshot_name.h"
+#include "mongo/db/write_concern_options.h"
#include "mongo/util/assert_util.h"
namespace mongo {
diff --git a/src/mongo/db/repl/replication_coordinator_test_fixture.cpp b/src/mongo/db/repl/replication_coordinator_test_fixture.cpp
index 7d6d29614d2..319f67c3893 100644
--- a/src/mongo/db/repl/replication_coordinator_test_fixture.cpp
+++ b/src/mongo/db/repl/replication_coordinator_test_fixture.cpp
@@ -34,9 +34,9 @@
#include "mongo/db/operation_context_noop.h"
#include "mongo/db/repl/is_master_response.h"
-#include "mongo/db/repl/repl_settings.h"
#include "mongo/db/repl/repl_set_heartbeat_args.h"
#include "mongo/db/repl/repl_set_heartbeat_args_v1.h"
+#include "mongo/db/repl/repl_settings.h"
#include "mongo/db/repl/replication_coordinator_external_state_mock.h"
#include "mongo/db/repl/replication_coordinator_impl.h"
#include "mongo/db/repl/storage_interface_mock.h"
@@ -238,13 +238,14 @@ void ReplCoordTest::simulateSuccessfulDryRun(
if (request.cmdObj.firstElement().fieldNameStringData() == "replSetRequestVotes") {
ASSERT_TRUE(request.cmdObj.getBoolField("dryRun"));
onDryRunRequest(request);
- net->scheduleResponse(
- noi,
- net->now(),
- makeResponseStatus(BSON("ok" << 1 << "reason"
- << ""
- << "term" << request.cmdObj["term"].Long()
- << "voteGranted" << true)));
+ net->scheduleResponse(noi,
+ net->now(),
+ makeResponseStatus(BSON("ok" << 1 << "reason"
+ << ""
+ << "term"
+ << request.cmdObj["term"].Long()
+ << "voteGranted"
+ << true)));
voteRequests++;
} else {
error() << "Black holing unexpected request to " << request.target << ": "
@@ -298,13 +299,14 @@ void ReplCoordTest::simulateSuccessfulV1Election() {
hbResp.setConfigVersion(rsConfig.getConfigVersion());
net->scheduleResponse(noi, net->now(), makeResponseStatus(hbResp.toBSON(true)));
} else if (request.cmdObj.firstElement().fieldNameStringData() == "replSetRequestVotes") {
- net->scheduleResponse(
- noi,
- net->now(),
- makeResponseStatus(BSON("ok" << 1 << "reason"
- << ""
- << "term" << request.cmdObj["term"].Long()
- << "voteGranted" << true)));
+ net->scheduleResponse(noi,
+ net->now(),
+ makeResponseStatus(BSON("ok" << 1 << "reason"
+ << ""
+ << "term"
+ << request.cmdObj["term"].Long()
+ << "voteGranted"
+ << true)));
} else {
error() << "Black holing unexpected request to " << request.target << ": "
<< request.cmdObj;
@@ -359,8 +361,8 @@ void ReplCoordTest::simulateSuccessfulElection() {
net->scheduleResponse(
noi,
net->now(),
- makeResponseStatus(BSON("ok" << 1 << "fresher" << false << "opTime" << Date_t()
- << "veto" << false)));
+ makeResponseStatus(BSON(
+ "ok" << 1 << "fresher" << false << "opTime" << Date_t() << "veto" << false)));
} else if (request.cmdObj.firstElement().fieldNameStringData() == "replSetElect") {
net->scheduleResponse(noi,
net->now(),
diff --git a/src/mongo/db/repl/replication_executor.cpp b/src/mongo/db/repl/replication_executor.cpp
index 0caabfe808d..0154c5b53e7 100644
--- a/src/mongo/db/repl/replication_executor.cpp
+++ b/src/mongo/db/repl/replication_executor.cpp
@@ -310,8 +310,8 @@ void ReplicationExecutor::_finishRemoteCommand(const RemoteCommandRequest& reque
return;
}
- LOG(4) << "Received remote response: " << (response.isOK() ? response.getValue().toString()
- : response.getStatus().toString());
+ LOG(4) << "Received remote response: "
+ << (response.isOK() ? response.getValue().toString() : response.getStatus().toString());
callback->_callbackFn =
stdx::bind(remoteCommandFinished, stdx::placeholders::_1, cb, request, response);
diff --git a/src/mongo/db/repl/replication_executor_test.cpp b/src/mongo/db/repl/replication_executor_test.cpp
index ceb4ec89beb..7b2de1e1fe1 100644
--- a/src/mongo/db/repl/replication_executor_test.cpp
+++ b/src/mongo/db/repl/replication_executor_test.cpp
@@ -31,14 +31,14 @@
#include <map>
#include "mongo/base/init.h"
-#include "mongo/executor/task_executor_test_common.h"
#include "mongo/db/namespace_string.h"
#include "mongo/db/operation_context.h"
#include "mongo/db/repl/replication_executor.h"
#include "mongo/db/repl/replication_executor_test_fixture.h"
#include "mongo/executor/network_interface_mock.h"
-#include "mongo/stdx/memory.h"
+#include "mongo/executor/task_executor_test_common.h"
#include "mongo/stdx/functional.h"
+#include "mongo/stdx/memory.h"
#include "mongo/stdx/thread.h"
#include "mongo/unittest/barrier.h"
#include "mongo/unittest/unittest.h"
@@ -56,11 +56,10 @@ using unittest::assertGet;
const int64_t prngSeed = 1;
MONGO_INITIALIZER(ReplExecutorCommonTests)(InitializerContext*) {
- mongo::executor::addTestsForExecutor("ReplicationExecutorCommon",
- [](std::unique_ptr<executor::NetworkInterfaceMock>* net) {
- return stdx::make_unique<ReplicationExecutor>(
- net->release(), prngSeed);
- });
+ mongo::executor::addTestsForExecutor(
+ "ReplicationExecutorCommon", [](std::unique_ptr<executor::NetworkInterfaceMock>* net) {
+ return stdx::make_unique<ReplicationExecutor>(net->release(), prngSeed);
+ });
return Status::OK();
}
@@ -71,16 +70,19 @@ TEST_F(ReplicationExecutorTest, ScheduleDBWorkAndExclusiveWorkConcurrently) {
Status status1 = getDetectableErrorStatus();
OperationContext* txn = nullptr;
using CallbackData = ReplicationExecutor::CallbackArgs;
- ASSERT_OK(executor.scheduleDBWork([&](const CallbackData& cbData) {
- status1 = cbData.status;
- txn = cbData.txn;
- barrier.countDownAndWait();
- if (cbData.status != ErrorCodes::CallbackCanceled)
- cbData.executor->shutdown();
- }).getStatus());
- ASSERT_OK(executor.scheduleWorkWithGlobalExclusiveLock([&](const CallbackData& cbData) {
- barrier.countDownAndWait();
- }).getStatus());
+ ASSERT_OK(executor
+ .scheduleDBWork([&](const CallbackData& cbData) {
+ status1 = cbData.status;
+ txn = cbData.txn;
+ barrier.countDownAndWait();
+ if (cbData.status != ErrorCodes::CallbackCanceled)
+ cbData.executor->shutdown();
+ })
+ .getStatus());
+ ASSERT_OK(executor
+ .scheduleWorkWithGlobalExclusiveLock(
+ [&](const CallbackData& cbData) { barrier.countDownAndWait(); })
+ .getStatus());
executor.run();
ASSERT_OK(status1);
ASSERT(txn);
@@ -93,14 +95,20 @@ TEST_F(ReplicationExecutorTest, ScheduleDBWorkWithCollectionLock) {
OperationContext* txn = nullptr;
bool collectionIsLocked = false;
using CallbackData = ReplicationExecutor::CallbackArgs;
- ASSERT_OK(executor.scheduleDBWork([&](const CallbackData& cbData) {
- status1 = cbData.status;
- txn = cbData.txn;
- collectionIsLocked =
- txn ? txn->lockState()->isCollectionLockedForMode(nss.ns(), MODE_X) : false;
- if (cbData.status != ErrorCodes::CallbackCanceled)
- cbData.executor->shutdown();
- }, nss, MODE_X).getStatus());
+ ASSERT_OK(executor
+ .scheduleDBWork(
+ [&](const CallbackData& cbData) {
+ status1 = cbData.status;
+ txn = cbData.txn;
+ collectionIsLocked = txn
+ ? txn->lockState()->isCollectionLockedForMode(nss.ns(), MODE_X)
+ : false;
+ if (cbData.status != ErrorCodes::CallbackCanceled)
+ cbData.executor->shutdown();
+ },
+ nss,
+ MODE_X)
+ .getStatus());
executor.run();
ASSERT_OK(status1);
ASSERT(txn);
@@ -113,13 +121,15 @@ TEST_F(ReplicationExecutorTest, ScheduleExclusiveLockOperation) {
OperationContext* txn = nullptr;
bool lockIsW = false;
using CallbackData = ReplicationExecutor::CallbackArgs;
- ASSERT_OK(executor.scheduleWorkWithGlobalExclusiveLock([&](const CallbackData& cbData) {
- status1 = cbData.status;
- txn = cbData.txn;
- lockIsW = txn ? txn->lockState()->isW() : false;
- if (cbData.status != ErrorCodes::CallbackCanceled)
- cbData.executor->shutdown();
- }).getStatus());
+ ASSERT_OK(executor
+ .scheduleWorkWithGlobalExclusiveLock([&](const CallbackData& cbData) {
+ status1 = cbData.status;
+ txn = cbData.txn;
+ lockIsW = txn ? txn->lockState()->isW() : false;
+ if (cbData.status != ErrorCodes::CallbackCanceled)
+ cbData.executor->shutdown();
+ })
+ .getStatus());
executor.run();
ASSERT_OK(status1);
ASSERT(txn);
@@ -130,20 +140,24 @@ TEST_F(ReplicationExecutorTest, ShutdownBeforeRunningSecondExclusiveLockOperatio
ReplicationExecutor& executor = getReplExecutor();
using CallbackData = ReplicationExecutor::CallbackArgs;
Status status1 = getDetectableErrorStatus();
- ASSERT_OK(executor.scheduleWorkWithGlobalExclusiveLock([&](const CallbackData& cbData) {
- status1 = cbData.status;
- if (cbData.status != ErrorCodes::CallbackCanceled)
- cbData.executor->shutdown();
- }).getStatus());
+ ASSERT_OK(executor
+ .scheduleWorkWithGlobalExclusiveLock([&](const CallbackData& cbData) {
+ status1 = cbData.status;
+ if (cbData.status != ErrorCodes::CallbackCanceled)
+ cbData.executor->shutdown();
+ })
+ .getStatus());
// Second db work item is invoked by the main executor thread because the work item is
// moved from the exclusive lock queue to the ready work item queue when the first callback
// cancels the executor.
Status status2 = getDetectableErrorStatus();
- ASSERT_OK(executor.scheduleWorkWithGlobalExclusiveLock([&](const CallbackData& cbData) {
- status2 = cbData.status;
- if (cbData.status != ErrorCodes::CallbackCanceled)
- cbData.executor->shutdown();
- }).getStatus());
+ ASSERT_OK(executor
+ .scheduleWorkWithGlobalExclusiveLock([&](const CallbackData& cbData) {
+ status2 = cbData.status;
+ if (cbData.status != ErrorCodes::CallbackCanceled)
+ cbData.executor->shutdown();
+ })
+ .getStatus());
executor.run();
ASSERT_OK(status1);
ASSERT_EQUALS(ErrorCodes::CallbackCanceled, status2.code());
@@ -153,13 +167,12 @@ TEST_F(ReplicationExecutorTest, CancelBeforeRunningFutureWork) {
ReplicationExecutor& executor = getReplExecutor();
using CallbackData = ReplicationExecutor::CallbackArgs;
Status status1 = getDetectableErrorStatus();
- auto cbhWithStatus =
- executor.scheduleWorkAt(executor.now() + Milliseconds(1000),
- [&](const CallbackData& cbData) {
- status1 = cbData.status;
- if (cbData.status != ErrorCodes::CallbackCanceled)
- cbData.executor->shutdown();
- });
+ auto cbhWithStatus = executor.scheduleWorkAt(
+ executor.now() + Milliseconds(1000), [&](const CallbackData& cbData) {
+ status1 = cbData.status;
+ if (cbData.status != ErrorCodes::CallbackCanceled)
+ cbData.executor->shutdown();
+ });
ASSERT_OK(cbhWithStatus.getStatus());
ASSERT_EQUALS(1, executor.getDiagnosticBSON().getFieldDotted("queues.sleepers").Int());
diff --git a/src/mongo/db/repl/replication_executor_test_fixture.h b/src/mongo/db/repl/replication_executor_test_fixture.h
index 73e5ae2c504..7b7845302d7 100644
--- a/src/mongo/db/repl/replication_executor_test_fixture.h
+++ b/src/mongo/db/repl/replication_executor_test_fixture.h
@@ -28,9 +28,9 @@
#pragma once
-#include "mongo/stdx/memory.h"
#include "mongo/db/repl/replication_executor.h"
#include "mongo/executor/task_executor_test_fixture.h"
+#include "mongo/stdx/memory.h"
#include "mongo/unittest/unittest.h"
namespace mongo {
diff --git a/src/mongo/db/repl/replset_commands.cpp b/src/mongo/db/repl/replset_commands.cpp
index 70aaa8b7b6d..c91e28b71a6 100644
--- a/src/mongo/db/repl/replset_commands.cpp
+++ b/src/mongo/db/repl/replset_commands.cpp
@@ -47,8 +47,8 @@
#include "mongo/db/repl/initial_sync.h"
#include "mongo/db/repl/old_update_position_args.h"
#include "mongo/db/repl/oplog.h"
-#include "mongo/db/repl/repl_set_heartbeat_args_v1.h"
#include "mongo/db/repl/repl_set_heartbeat_args.h"
+#include "mongo/db/repl/repl_set_heartbeat_args_v1.h"
#include "mongo/db/repl/repl_set_heartbeat_response.h"
#include "mongo/db/repl/replication_coordinator_external_state_impl.h"
#include "mongo/db/repl/replication_coordinator_global.h"
@@ -451,7 +451,8 @@ public:
txn,
BSON("msg"
<< "Reconfig set"
- << "version" << parsedArgs.newConfigObj["version"]));
+ << "version"
+ << parsedArgs.newConfigObj["version"]));
}
wuow.commit();
@@ -837,7 +838,8 @@ public:
BSONElement cfgverElement = cmdObj["cfgver"];
uassert(28525,
str::stream() << "Expected cfgver argument to replSetFresh command to have "
- "numeric type, but found " << typeName(cfgverElement.type()),
+ "numeric type, but found "
+ << typeName(cfgverElement.type()),
cfgverElement.isNumber());
parsedArgs.cfgver = cfgverElement.safeNumberLong();
parsedArgs.opTime = Timestamp(cmdObj["opTime"].Date());
@@ -871,7 +873,8 @@ private:
BSONElement cfgverElement = cmdObj["cfgver"];
uassert(28526,
str::stream() << "Expected cfgver argument to replSetElect command to have "
- "numeric type, but found " << typeName(cfgverElement.type()),
+ "numeric type, but found "
+ << typeName(cfgverElement.type()),
cfgverElement.isNumber());
parsedArgs.cfgver = cfgverElement.safeNumberLong();
parsedArgs.round = cmdObj["round"].OID();
diff --git a/src/mongo/db/repl/replset_web_handler.cpp b/src/mongo/db/repl/replset_web_handler.cpp
index 6ec53363396..3f67cd9a45c 100644
--- a/src/mongo/db/repl/replset_web_handler.cpp
+++ b/src/mongo/db/repl/replset_web_handler.cpp
@@ -32,8 +32,8 @@
#include "mongo/db/dbwebserver.h"
#include "mongo/db/jsobj.h"
-#include "mongo/db/repl/replication_coordinator_global.h"
#include "mongo/db/repl/repl_set_html_summary.h"
+#include "mongo/db/repl/replication_coordinator_global.h"
#include "mongo/db/repl/rslog.h"
#include "mongo/util/mongoutils/html.h"
#include "mongo/util/mongoutils/str.h"
diff --git a/src/mongo/db/repl/reporter_test.cpp b/src/mongo/db/repl/reporter_test.cpp
index 3d6b8b81b43..926df8f3a62 100644
--- a/src/mongo/db/repl/reporter_test.cpp
+++ b/src/mongo/db/repl/reporter_test.cpp
@@ -32,8 +32,8 @@
#include "mongo/db/repl/optime.h"
#include "mongo/db/repl/reporter.h"
#include "mongo/db/repl/update_position_args.h"
-#include "mongo/executor/thread_pool_task_executor_test_fixture.h"
#include "mongo/executor/network_interface_mock.h"
+#include "mongo/executor/thread_pool_task_executor_test_fixture.h"
#include "mongo/stdx/memory.h"
#include "mongo/unittest/task_executor_proxy.h"
#include "mongo/unittest/unittest.h"
@@ -370,7 +370,8 @@ TEST_F(ReporterTestNoTriggerAtSetUp,
processNetworkResponse(BSON("ok" << 0 << "code" << int(ErrorCodes::InvalidReplicaSetConfig)
<< "errmsg"
<< "newer config"
- << "configVersion" << 100));
+ << "configVersion"
+ << 100));
ASSERT_EQUALS(Status(ErrorCodes::InvalidReplicaSetConfig, "invalid config"), reporter->join());
assertReporterDone();
@@ -389,7 +390,8 @@ TEST_F(ReporterTest, InvalidReplicaSetResponseWithSameConfigVersionOnSyncTargetS
processNetworkResponse(BSON("ok" << 0 << "code" << int(ErrorCodes::InvalidReplicaSetConfig)
<< "errmsg"
<< "invalid config"
- << "configVersion" << posUpdater->getConfigVersion()));
+ << "configVersion"
+ << posUpdater->getConfigVersion()));
ASSERT_EQUALS(Status(ErrorCodes::InvalidReplicaSetConfig, "invalid config"), reporter->join());
assertReporterDone();
@@ -406,7 +408,8 @@ TEST_F(
processNetworkResponse(BSON("ok" << 0 << "code" << int(ErrorCodes::InvalidReplicaSetConfig)
<< "errmsg"
<< "newer config"
- << "configVersion" << posUpdater->getConfigVersion() + 1));
+ << "configVersion"
+ << posUpdater->getConfigVersion() + 1));
ASSERT_TRUE(reporter->isActive());
}
@@ -430,7 +433,8 @@ TEST_F(
commandRequest = processNetworkResponse(
BSON("ok" << 0 << "code" << int(ErrorCodes::InvalidReplicaSetConfig) << "errmsg"
<< "newer config"
- << "configVersion" << posUpdater->getConfigVersion() + 1));
+ << "configVersion"
+ << posUpdater->getConfigVersion() + 1));
ASSERT_EQUALS(expectedOldStyleCommandRequest, commandRequest);
ASSERT_TRUE(reporter->isActive());
@@ -526,7 +530,7 @@ TEST_F(ReporterTestNoTriggerAtSetUp, CommandPreparationFailureStopsTheReporter)
Status expectedStatus(ErrorCodes::UnknownError, "unknown error");
prepareReplSetUpdatePositionCommandFn =
[expectedStatus](ReplicationCoordinator::ReplSetUpdatePositionCommandStyle commandStyle)
- -> StatusWith<BSONObj> { return expectedStatus; };
+ -> StatusWith<BSONObj> { return expectedStatus; };
ASSERT_OK(reporter->trigger());
ASSERT_EQUALS(expectedStatus, reporter->join());
@@ -544,7 +548,7 @@ TEST_F(ReporterTest, CommandPreparationFailureDuringRescheduleStopsTheReporter)
Status expectedStatus(ErrorCodes::UnknownError, "unknown error");
prepareReplSetUpdatePositionCommandFn =
[expectedStatus](ReplicationCoordinator::ReplSetUpdatePositionCommandStyle commandStyle)
- -> StatusWith<BSONObj> { return expectedStatus; };
+ -> StatusWith<BSONObj> { return expectedStatus; };
processNetworkResponse(BSON("ok" << 1));
@@ -704,7 +708,7 @@ TEST_F(ReporterTest, KeepAliveTimeoutFailingToScheduleRemoteCommandShouldMakeRep
Status expectedStatus(ErrorCodes::UnknownError, "failed to prepare update command");
prepareReplSetUpdatePositionCommandFn =
[expectedStatus](ReplicationCoordinator::ReplSetUpdatePositionCommandStyle commandStyle)
- -> StatusWith<BSONObj> { return expectedStatus; };
+ -> StatusWith<BSONObj> { return expectedStatus; };
runUntil(until);
diff --git a/src/mongo/db/repl/resync.cpp b/src/mongo/db/repl/resync.cpp
index 674b43f969a..86797724eed 100644
--- a/src/mongo/db/repl/resync.cpp
+++ b/src/mongo/db/repl/resync.cpp
@@ -28,10 +28,10 @@
#include "mongo/db/commands.h"
#include "mongo/db/concurrency/d_concurrency.h"
+#include "mongo/db/operation_context.h"
#include "mongo/db/repl/bgsync.h"
#include "mongo/db/repl/master_slave.h" // replSettings
#include "mongo/db/repl/replication_coordinator_global.h"
-#include "mongo/db/operation_context.h"
namespace mongo {
diff --git a/src/mongo/db/repl/roll_back_local_operations_test.cpp b/src/mongo/db/repl/roll_back_local_operations_test.cpp
index 06af9890571..87e888a62d0 100644
--- a/src/mongo/db/repl/roll_back_local_operations_test.cpp
+++ b/src/mongo/db/repl/roll_back_local_operations_test.cpp
@@ -121,8 +121,9 @@ TEST(RollBackLocalOperationsTest, RollbackOperationFailed) {
makeOpAndRecordId(2, 1), commonOperation,
});
OplogInterfaceMock localOplog(localOperations);
- auto rollbackOperation =
- [&](const BSONObj& operation) { return Status(ErrorCodes::OperationFailed, ""); };
+ auto rollbackOperation = [&](const BSONObj& operation) {
+ return Status(ErrorCodes::OperationFailed, "");
+ };
RollBackLocalOperations finder(localOplog, rollbackOperation);
auto result = finder.onRemoteOperation(commonOperation.first);
ASSERT_EQUALS(ErrorCodes::OperationFailed, result.getStatus().code());
@@ -211,8 +212,9 @@ TEST(RollBackLocalOperationsTest, SameTimestampDifferentHashesRollbackOperationF
makeOpAndRecordId(1, 3), commonOperation,
});
OplogInterfaceMock localOplog(localOperations);
- auto rollbackOperation =
- [&](const BSONObj& operation) { return Status(ErrorCodes::OperationFailed, ""); };
+ auto rollbackOperation = [&](const BSONObj& operation) {
+ return Status(ErrorCodes::OperationFailed, "");
+ };
RollBackLocalOperations finder(localOplog, rollbackOperation);
auto result = finder.onRemoteOperation(makeOp(1, 2));
ASSERT_EQUALS(ErrorCodes::OperationFailed, result.getStatus().code());
diff --git a/src/mongo/db/repl/rollback_checker.cpp b/src/mongo/db/repl/rollback_checker.cpp
index 7d1a710982a..cb86eb2a811 100644
--- a/src/mongo/db/repl/rollback_checker.cpp
+++ b/src/mongo/db/repl/rollback_checker.cpp
@@ -49,32 +49,34 @@ RollbackChecker::RollbackChecker(executor::TaskExecutor* executor, HostAndPort s
RollbackChecker::~RollbackChecker() {}
RollbackChecker::CallbackHandle RollbackChecker::checkForRollback(const CallbackFn& nextAction) {
- return _scheduleGetRollbackId([this, nextAction](const RemoteCommandCallbackArgs& args) {
- if (args.response.getStatus() == ErrorCodes::CallbackCanceled) {
- return;
- }
- if (!args.response.isOK()) {
- nextAction(args.response.getStatus());
- return;
- }
- if (auto rbidElement = args.response.getValue().data["rbid"]) {
- int remoteRBID = rbidElement.numberInt();
-
- UniqueLock lk(_mutex);
- bool hadRollback = _checkForRollback_inlock(remoteRBID);
- lk.unlock();
-
- if (hadRollback) {
- nextAction(Status(ErrorCodes::UnrecoverableRollbackError,
- "RollbackChecker detected rollback occurred"));
+ return _scheduleGetRollbackId(
+ [this, nextAction](const RemoteCommandCallbackArgs& args) {
+ if (args.response.getStatus() == ErrorCodes::CallbackCanceled) {
+ return;
+ }
+ if (!args.response.isOK()) {
+ nextAction(args.response.getStatus());
+ return;
+ }
+ if (auto rbidElement = args.response.getValue().data["rbid"]) {
+ int remoteRBID = rbidElement.numberInt();
+
+ UniqueLock lk(_mutex);
+ bool hadRollback = _checkForRollback_inlock(remoteRBID);
+ lk.unlock();
+
+ if (hadRollback) {
+ nextAction(Status(ErrorCodes::UnrecoverableRollbackError,
+ "RollbackChecker detected rollback occurred"));
+ } else {
+ nextAction(Status::OK());
+ }
} else {
- nextAction(Status::OK());
+ nextAction(Status(ErrorCodes::CommandFailed,
+ "replSetGetRBID command failed when checking for rollback"));
}
- } else {
- nextAction(Status(ErrorCodes::CommandFailed,
- "replSetGetRBID command failed when checking for rollback"));
- }
- }, nextAction);
+ },
+ nextAction);
}
bool RollbackChecker::hasHadRollback() {
@@ -87,27 +89,29 @@ bool RollbackChecker::hasHadRollback() {
}
RollbackChecker::CallbackHandle RollbackChecker::reset(const CallbackFn& nextAction) {
- return _scheduleGetRollbackId([this, nextAction](const RemoteCommandCallbackArgs& args) {
- if (args.response.getStatus() == ErrorCodes::CallbackCanceled) {
- return;
- }
- if (!args.response.isOK()) {
- nextAction(args.response.getStatus());
- return;
- }
- if (auto rbidElement = args.response.getValue().data["rbid"]) {
- int newRBID = rbidElement.numberInt();
-
- UniqueLock lk(_mutex);
- _setRBID_inlock(newRBID);
- lk.unlock();
-
- nextAction(Status::OK());
- } else {
- nextAction(Status(ErrorCodes::CommandFailed,
- "replSetGetRBID command failed when checking for rollback"));
- }
- }, nextAction);
+ return _scheduleGetRollbackId(
+ [this, nextAction](const RemoteCommandCallbackArgs& args) {
+ if (args.response.getStatus() == ErrorCodes::CallbackCanceled) {
+ return;
+ }
+ if (!args.response.isOK()) {
+ nextAction(args.response.getStatus());
+ return;
+ }
+ if (auto rbidElement = args.response.getValue().data["rbid"]) {
+ int newRBID = rbidElement.numberInt();
+
+ UniqueLock lk(_mutex);
+ _setRBID_inlock(newRBID);
+ lk.unlock();
+
+ nextAction(Status::OK());
+ } else {
+ nextAction(Status(ErrorCodes::CommandFailed,
+ "replSetGetRBID command failed when checking for rollback"));
+ }
+ },
+ nextAction);
}
Status RollbackChecker::reset_sync() {
diff --git a/src/mongo/db/repl/rollback_source_impl.cpp b/src/mongo/db/repl/rollback_source_impl.cpp
index 443e242deb2..f416af8c716 100644
--- a/src/mongo/db/repl/rollback_source_impl.cpp
+++ b/src/mongo/db/repl/rollback_source_impl.cpp
@@ -35,8 +35,8 @@
#include "mongo/db/jsobj.h"
#include "mongo/db/namespace_string.h"
#include "mongo/db/repl/oplogreader.h"
-#include "mongo/util/mongoutils/str.h"
#include "mongo/util/assert_util.h"
+#include "mongo/util/mongoutils/str.h"
namespace mongo {
namespace repl {
diff --git a/src/mongo/db/repl/rs_initialsync.cpp b/src/mongo/db/repl/rs_initialsync.cpp
index 2f31e9a63d7..fe25036f399 100644
--- a/src/mongo/db/repl/rs_initialsync.cpp
+++ b/src/mongo/db/repl/rs_initialsync.cpp
@@ -40,10 +40,9 @@
#include "mongo/db/catalog/document_validation.h"
#include "mongo/db/client.h"
#include "mongo/db/cloner.h"
-#include "mongo/db/db_raii.h"
#include "mongo/db/concurrency/write_conflict_exception.h"
+#include "mongo/db/db_raii.h"
#include "mongo/db/dbhelpers.h"
-#include "mongo/db/service_context.h"
#include "mongo/db/op_observer.h"
#include "mongo/db/repl/bgsync.h"
#include "mongo/db/repl/initial_sync.h"
@@ -52,6 +51,7 @@
#include "mongo/db/repl/repl_client_info.h"
#include "mongo/db/repl/replication_coordinator_global.h"
#include "mongo/db/repl/storage_interface.h"
+#include "mongo/db/service_context.h"
#include "mongo/util/exit.h"
#include "mongo/util/fail_point_service.h"
#include "mongo/util/log.h"
@@ -143,7 +143,8 @@ void checkAdminDatabasePostClone(OperationContext* txn, Database* adminDb) {
<< " but could not find an auth schema version document in "
<< AuthorizationManager::versionCollectionNamespace;
severe() << "This indicates that the primary of this replica set was not successfully "
- "upgraded to schema version " << AuthorizationManager::schemaVersion26Final
+ "upgraded to schema version "
+ << AuthorizationManager::schemaVersion26Final
<< ", which is the minimum supported schema version in this version of MongoDB";
fassertFailedNoTrace(28620);
}
diff --git a/src/mongo/db/repl/rs_rollback.cpp b/src/mongo/db/repl/rs_rollback.cpp
index bb5531dbf34..0663c13e4dd 100644
--- a/src/mongo/db/repl/rs_rollback.cpp
+++ b/src/mongo/db/repl/rs_rollback.cpp
@@ -37,17 +37,17 @@
#include <memory>
#include "mongo/bson/util/bson_extract.h"
-#include "mongo/db/auth/authorization_manager_global.h"
#include "mongo/db/auth/authorization_manager.h"
+#include "mongo/db/auth/authorization_manager_global.h"
#include "mongo/db/catalog/collection.h"
#include "mongo/db/catalog/collection_catalog_entry.h"
#include "mongo/db/catalog/document_validation.h"
#include "mongo/db/client.h"
#include "mongo/db/commands.h"
#include "mongo/db/concurrency/write_conflict_exception.h"
+#include "mongo/db/db_raii.h"
#include "mongo/db/dbhelpers.h"
#include "mongo/db/exec/working_set_common.h"
-#include "mongo/db/db_raii.h"
#include "mongo/db/ops/delete.h"
#include "mongo/db/ops/update.h"
#include "mongo/db/ops/update_lifecycle_impl.h"
@@ -454,7 +454,8 @@ void syncFixUp(OperationContext* txn,
auto status = options.parse(optionsField.Obj());
if (!status.isOK()) {
throw RSFatalException(str::stream() << "Failed to parse options " << info
- << ": " << status.toString());
+ << ": "
+ << status.toString());
}
} else {
// Use default options.
@@ -467,19 +468,19 @@ void syncFixUp(OperationContext* txn,
auto status = collection->setValidator(txn, options.validator);
if (!status.isOK()) {
- throw RSFatalException(str::stream()
- << "Failed to set validator: " << status.toString());
+ throw RSFatalException(str::stream() << "Failed to set validator: "
+ << status.toString());
}
status = collection->setValidationAction(txn, options.validationAction);
if (!status.isOK()) {
- throw RSFatalException(str::stream()
- << "Failed to set validationAction: " << status.toString());
+ throw RSFatalException(str::stream() << "Failed to set validationAction: "
+ << status.toString());
}
status = collection->setValidationLevel(txn, options.validationLevel);
if (!status.isOK()) {
- throw RSFatalException(str::stream()
- << "Failed to set validationLevel: " << status.toString());
+ throw RSFatalException(str::stream() << "Failed to set validationLevel: "
+ << status.toString());
}
wuow.commit();
@@ -822,7 +823,8 @@ Status _syncRollback(OperationContext* txn,
if (!replCoord->setFollowerMode(MemberState::RS_ROLLBACK)) {
return Status(ErrorCodes::OperationFailed,
str::stream() << "Cannot transition from "
- << replCoord->getMemberState().toString() << " to "
+ << replCoord->getMemberState().toString()
+ << " to "
<< MemberState(MemberState::RS_ROLLBACK).toString());
}
}
@@ -833,8 +835,9 @@ Status _syncRollback(OperationContext* txn,
{
log() << "rollback 2 FindCommonPoint";
try {
- auto processOperationForFixUp =
- [&how](const BSONObj& operation) { return refetch(how, operation); };
+ auto processOperationForFixUp = [&how](const BSONObj& operation) {
+ return refetch(how, operation);
+ };
auto res = syncRollBackLocalOperations(
localOplog, rollbackSource.getOplog(), processOperationForFixUp);
if (!res.isOK()) {
@@ -856,7 +859,8 @@ Status _syncRollback(OperationContext* txn,
return Status(ErrorCodes::UnrecoverableRollbackError,
str::stream()
<< "need to rollback, but unable to determine common point between"
- " local and remote oplog: " << e.what(),
+ " local and remote oplog: "
+ << e.what(),
18752);
} catch (const DBException& e) {
warning() << "rollback 2 exception " << e.toString() << "; sleeping 1 min";
@@ -912,11 +916,9 @@ Status syncRollback(OperationContext* txn,
const OplogInterface& localOplog,
const RollbackSource& rollbackSource,
ReplicationCoordinator* replCoord) {
- return syncRollback(txn,
- localOplog,
- rollbackSource,
- replCoord,
- [](Seconds seconds) { sleepsecs(durationCount<Seconds>(seconds)); });
+ return syncRollback(txn, localOplog, rollbackSource, replCoord, [](Seconds seconds) {
+ sleepsecs(durationCount<Seconds>(seconds));
+ });
}
} // namespace repl
diff --git a/src/mongo/db/repl/rs_rollback_test.cpp b/src/mongo/db/repl/rs_rollback_test.cpp
index 5387be5eb7c..096e3902bed 100644
--- a/src/mongo/db/repl/rs_rollback_test.cpp
+++ b/src/mongo/db/repl/rs_rollback_test.cpp
@@ -199,7 +199,8 @@ TEST_F(RSRollbackTest, SetFollowerModeFailed) {
RollbackSourceMock(std::unique_ptr<OplogInterface>(
new OplogInterfaceMock(kEmptyMockOperations))),
_coordinator,
- noSleep).code());
+ noSleep)
+ .code());
}
TEST_F(RSRollbackTest, OplogStartMissing) {
@@ -214,7 +215,8 @@ TEST_F(RSRollbackTest, OplogStartMissing) {
operation,
}))),
_coordinator,
- noSleep).code());
+ noSleep)
+ .code());
}
TEST_F(RSRollbackTest, NoRemoteOpLog) {
@@ -307,7 +309,8 @@ int _testRollbackDelete(OperationContext* txn,
<< "d"
<< "ns"
<< "test.t"
- << "o" << BSON("_id" << 0)),
+ << "o"
+ << BSON("_id" << 0)),
RecordId(2));
class RollbackSourceLocal : public RollbackSourceMock {
public:
@@ -383,7 +386,8 @@ TEST_F(RSRollbackTest, RollbackInsertDocumentWithNoId) {
<< "i"
<< "ns"
<< "test.t"
- << "o" << BSON("a" << 1)),
+ << "o"
+ << BSON("a" << 1)),
RecordId(2));
class RollbackSourceLocal : public RollbackSourceMock {
public:
@@ -419,7 +423,9 @@ TEST_F(RSRollbackTest, RollbackCreateIndexCommand) {
auto collection = _createCollection(_txn.get(), "test.t", CollectionOptions());
auto indexSpec = BSON("ns"
<< "test.t"
- << "key" << BSON("a" << 1) << "name"
+ << "key"
+ << BSON("a" << 1)
+ << "name"
<< "a_1");
{
Lock::DBLock dbLock(_txn->lockState(), "test", MODE_X);
@@ -439,7 +445,8 @@ TEST_F(RSRollbackTest, RollbackCreateIndexCommand) {
<< "i"
<< "ns"
<< "test.system.indexes"
- << "o" << indexSpec),
+ << "o"
+ << indexSpec),
RecordId(2));
class RollbackSourceLocal : public RollbackSourceMock {
public:
@@ -483,7 +490,9 @@ TEST_F(RSRollbackTest, RollbackCreateIndexCommandIndexNotInCatalog) {
auto collection = _createCollection(_txn.get(), "test.t", CollectionOptions());
auto indexSpec = BSON("ns"
<< "test.t"
- << "key" << BSON("a" << 1) << "name"
+ << "key"
+ << BSON("a" << 1)
+ << "name"
<< "a_1");
// Skip index creation to trigger warning during rollback.
{
@@ -499,7 +508,8 @@ TEST_F(RSRollbackTest, RollbackCreateIndexCommandIndexNotInCatalog) {
<< "i"
<< "ns"
<< "test.system.indexes"
- << "o" << indexSpec),
+ << "o"
+ << indexSpec),
RecordId(2));
class RollbackSourceLocal : public RollbackSourceMock {
public:
@@ -545,8 +555,9 @@ TEST_F(RSRollbackTest, RollbackCreateIndexCommandMissingNamespace) {
<< "i"
<< "ns"
<< "test.system.indexes"
- << "o" << BSON("key" << BSON("a" << 1) << "name"
- << "a_1")),
+ << "o"
+ << BSON("key" << BSON("a" << 1) << "name"
+ << "a_1")),
RecordId(2));
class RollbackSourceLocal : public RollbackSourceMock {
public:
@@ -587,10 +598,13 @@ TEST_F(RSRollbackTest, RollbackCreateIndexCommandInvalidNamespace) {
<< "i"
<< "ns"
<< "test.system.indexes"
- << "o" << BSON("ns"
- << "test."
- << "key" << BSON("a" << 1) << "name"
- << "a_1")),
+ << "o"
+ << BSON("ns"
+ << "test."
+ << "key"
+ << BSON("a" << 1)
+ << "name"
+ << "a_1")),
RecordId(2));
class RollbackSourceLocal : public RollbackSourceMock {
public:
@@ -631,9 +645,11 @@ TEST_F(RSRollbackTest, RollbackCreateIndexCommandMissingIndexName) {
<< "i"
<< "ns"
<< "test.system.indexes"
- << "o" << BSON("ns"
- << "test.t"
- << "key" << BSON("a" << 1))),
+ << "o"
+ << BSON("ns"
+ << "test.t"
+ << "key"
+ << BSON("a" << 1))),
RecordId(2));
class RollbackSourceLocal : public RollbackSourceMock {
public:
@@ -673,8 +689,9 @@ TEST_F(RSRollbackTest, RollbackUnknownCommand) {
<< "c"
<< "ns"
<< "test.t"
- << "o" << BSON("unknown_command"
- << "t")),
+ << "o"
+ << BSON("unknown_command"
+ << "t")),
RecordId(2));
{
Lock::DBLock dbLock(_txn->lockState(), "test", MODE_X);
@@ -705,8 +722,9 @@ TEST_F(RSRollbackTest, RollbackDropCollectionCommand) {
<< "c"
<< "ns"
<< "test.t"
- << "o" << BSON("drop"
- << "t")),
+ << "o"
+ << BSON("drop"
+ << "t")),
RecordId(2));
class RollbackSourceLocal : public RollbackSourceMock {
public:
@@ -787,24 +805,30 @@ TEST_F(RSRollbackTest, RollbackApplyOpsCommand) {
<< "u"
<< "ns"
<< "test.t"
- << "o2" << BSON("_id" << 1) << "o"
+ << "o2"
+ << BSON("_id" << 1)
+ << "o"
<< BSON("_id" << 1 << "v" << 2)),
BSON("op"
<< "u"
<< "ns"
<< "test.t"
- << "o2" << BSON("_id" << 2) << "o"
+ << "o2"
+ << BSON("_id" << 2)
+ << "o"
<< BSON("_id" << 2 << "v" << 4)),
BSON("op"
<< "d"
<< "ns"
<< "test.t"
- << "o" << BSON("_id" << 3)),
+ << "o"
+ << BSON("_id" << 3)),
BSON("op"
<< "i"
<< "ns"
<< "test.t"
- << "o" << BSON("_id" << 4))}),
+ << "o"
+ << BSON("_id" << 4))}),
RecordId(2));
class RollbackSourceLocal : public RollbackSourceMock {
@@ -870,8 +894,9 @@ TEST_F(RSRollbackTest, RollbackCreateCollectionCommand) {
<< "c"
<< "ns"
<< "test.t"
- << "o" << BSON("create"
- << "t")),
+ << "o"
+ << BSON("create"
+ << "t")),
RecordId(2));
RollbackSourceMock rollbackSource(std::unique_ptr<OplogInterface>(new OplogInterfaceMock({
commonOperation,
@@ -899,9 +924,11 @@ TEST_F(RSRollbackTest, RollbackCollectionModificationCommand) {
<< "c"
<< "ns"
<< "test.t"
- << "o" << BSON("collMod"
- << "t"
- << "noPadding" << false)),
+ << "o"
+ << BSON("collMod"
+ << "t"
+ << "noPadding"
+ << false)),
RecordId(2));
class RollbackSourceLocal : public RollbackSourceMock {
public:
@@ -940,9 +967,11 @@ TEST_F(RSRollbackTest, RollbackCollectionModificationCommandInvalidCollectionOpt
<< "c"
<< "ns"
<< "test.t"
- << "o" << BSON("collMod"
- << "t"
- << "noPadding" << false)),
+ << "o"
+ << BSON("collMod"
+ << "t"
+ << "noPadding"
+ << false)),
RecordId(2));
class RollbackSourceLocal : public RollbackSourceMock {
public:
diff --git a/src/mongo/db/repl/rs_sync.cpp b/src/mongo/db/repl/rs_sync.cpp
index cc8eaa320e5..455235e519c 100644
--- a/src/mongo/db/repl/rs_sync.cpp
+++ b/src/mongo/db/repl/rs_sync.cpp
@@ -41,8 +41,8 @@
#include "mongo/db/client.h"
#include "mongo/db/commands/fsync.h"
#include "mongo/db/commands/server_status.h"
-#include "mongo/db/curop.h"
#include "mongo/db/concurrency/d_concurrency.h"
+#include "mongo/db/curop.h"
#include "mongo/db/namespace_string.h"
#include "mongo/db/repl/bgsync.h"
#include "mongo/db/repl/optime.h"
diff --git a/src/mongo/db/repl/rs_sync.h b/src/mongo/db/repl/rs_sync.h
index ec174268b5c..513c6265657 100644
--- a/src/mongo/db/repl/rs_sync.h
+++ b/src/mongo/db/repl/rs_sync.h
@@ -32,10 +32,10 @@
#include <vector>
#include "mongo/db/client.h"
-#include "mongo/db/storage/mmap_v1/dur.h"
#include "mongo/db/jsobj.h"
#include "mongo/db/repl/initial_sync.h"
#include "mongo/db/repl/sync_tail.h"
+#include "mongo/db/storage/mmap_v1/dur.h"
#include "mongo/util/concurrency/old_thread_pool.h"
namespace mongo {
diff --git a/src/mongo/db/repl/storage_interface_impl.cpp b/src/mongo/db/repl/storage_interface_impl.cpp
index ef9935ae86f..3f3267158d8 100644
--- a/src/mongo/db/repl/storage_interface_impl.cpp
+++ b/src/mongo/db/repl/storage_interface_impl.cpp
@@ -154,7 +154,8 @@ void StorageInterfaceImpl::setMinValid(OperationContext* txn,
txn,
_minValidNss.ns().c_str(),
BSON("$set" << BSON("ts" << endOpTime.getTimestamp() << "t" << endOpTime.getTerm())
- << "$unset" << BSON(kBeginFieldName << 1)));
+ << "$unset"
+ << BSON(kBeginFieldName << 1)));
}
MONGO_WRITE_CONFLICT_RETRY_LOOP_END(
txn, "StorageInterfaceImpl::setMinValid", _minValidNss.ns());
@@ -174,7 +175,8 @@ void StorageInterfaceImpl::setMinValid(OperationContext* txn, const BatchBoundar
Helpers::putSingleton(txn,
_minValidNss.ns().c_str(),
BSON("$set" << BSON("ts" << end.getTimestamp() << "t" << end.getTerm()
- << kBeginFieldName << start.toBSON())));
+ << kBeginFieldName
+ << start.toBSON())));
}
MONGO_WRITE_CONFLICT_RETRY_LOOP_END(
txn, "StorageInterfaceImpl::setMinValid", _minValidNss.ns());
diff --git a/src/mongo/db/repl/sync_source_feedback.cpp b/src/mongo/db/repl/sync_source_feedback.cpp
index 411b3fb0133..ed05b1fc48b 100644
--- a/src/mongo/db/repl/sync_source_feedback.cpp
+++ b/src/mongo/db/repl/sync_source_feedback.cpp
@@ -33,11 +33,11 @@
#include "mongo/db/repl/sync_source_feedback.h"
#include "mongo/db/client.h"
+#include "mongo/db/operation_context.h"
#include "mongo/db/repl/bgsync.h"
#include "mongo/db/repl/replica_set_config.h"
#include "mongo/db/repl/replication_coordinator.h"
#include "mongo/db/repl/reporter.h"
-#include "mongo/db/operation_context.h"
#include "mongo/executor/network_interface_factory.h"
#include "mongo/executor/network_interface_thread_pool.h"
#include "mongo/executor/thread_pool_task_executor.h"
@@ -75,25 +75,24 @@ Milliseconds calculateKeepAliveInterval(OperationContext* txn, stdx::mutex& mtx)
*/
Reporter::PrepareReplSetUpdatePositionCommandFn makePrepareReplSetUpdatePositionCommandFn(
OperationContext* txn, stdx::mutex& mtx, const HostAndPort& syncTarget) {
- return [&mtx, syncTarget, txn](
- ReplicationCoordinator::ReplSetUpdatePositionCommandStyle commandStyle)
- -> StatusWith<BSONObj> {
- auto currentSyncTarget = BackgroundSync::get()->getSyncTarget();
- if (currentSyncTarget != syncTarget) {
- // Change in sync target
- return Status(ErrorCodes::InvalidSyncSource, "Sync target is no longer valid");
- }
+ return [&mtx, syncTarget, txn](ReplicationCoordinator::ReplSetUpdatePositionCommandStyle
+ commandStyle) -> StatusWith<BSONObj> {
+ auto currentSyncTarget = BackgroundSync::get()->getSyncTarget();
+ if (currentSyncTarget != syncTarget) {
+ // Change in sync target
+ return Status(ErrorCodes::InvalidSyncSource, "Sync target is no longer valid");
+ }
- stdx::lock_guard<stdx::mutex> lock(mtx);
- auto replCoord = repl::ReplicationCoordinator::get(txn);
- if (replCoord->getMemberState().primary()) {
- // Primary has no one to send updates to.
- return Status(ErrorCodes::InvalidSyncSource,
- "Currently primary - no one to send updates to");
- }
+ stdx::lock_guard<stdx::mutex> lock(mtx);
+ auto replCoord = repl::ReplicationCoordinator::get(txn);
+ if (replCoord->getMemberState().primary()) {
+ // Primary has no one to send updates to.
+ return Status(ErrorCodes::InvalidSyncSource,
+ "Currently primary - no one to send updates to");
+ }
- return replCoord->prepareReplSetUpdatePositionCommand(commandStyle);
- };
+ return replCoord->prepareReplSetUpdatePositionCommand(commandStyle);
+ };
}
} // namespace
@@ -254,7 +253,8 @@ void SyncSourceFeedback::run() {
auto status = _updateUpstream(txn.get());
if (!status.isOK()) {
LOG(1) << "The replication progress command (replSetUpdatePosition) failed and will be "
- "retried: " << status;
+ "retried: "
+ << status;
}
}
}
diff --git a/src/mongo/db/repl/sync_tail.cpp b/src/mongo/db/repl/sync_tail.cpp
index ebaff27d29c..d8bcb020c50 100644
--- a/src/mongo/db/repl/sync_tail.cpp
+++ b/src/mongo/db/repl/sync_tail.cpp
@@ -33,9 +33,9 @@
#include "mongo/db/repl/sync_tail.h"
+#include "third_party/murmurhash3/MurmurHash3.h"
#include <boost/functional/hash.hpp>
#include <memory>
-#include "third_party/murmurhash3/MurmurHash3.h"
#include "mongo/base/counter.h"
#include "mongo/db/auth/authorization_session.h"
@@ -749,7 +749,8 @@ void SyncTail::oplogApplication() {
str::stream() << "Attempted to apply an oplog entry ("
<< lastOpTime.toString()
<< ") which is not greater than our lastWrittenOptime ("
- << lastWriteOpTime.toString() << ")."));
+ << lastWriteOpTime.toString()
+ << ")."));
}
handleSlaveDelay(lastOpTime.getTimestamp());
@@ -1029,9 +1030,7 @@ void multiSyncApply(const std::vector<OplogEntry>& ops, SyncTail*) {
int batchSize = 0;
int batchCount = 0;
auto endOfGroupableOpsIterator = std::find_if(
- oplogEntriesIterator + 1,
- oplogEntryPointers.end(),
- [&](OplogEntry* nextEntry) {
+ oplogEntriesIterator + 1, oplogEntryPointers.end(), [&](OplogEntry* nextEntry) {
return nextEntry->opType[0] != 'i' || // Must be an insert.
nextEntry->ns != entry->ns || // Must be the same namespace.
// Must not create too large an object.
diff --git a/src/mongo/db/repl/sync_tail_test.cpp b/src/mongo/db/repl/sync_tail_test.cpp
index f81852dad0f..087889ed5ac 100644
--- a/src/mongo/db/repl/sync_tail_test.cpp
+++ b/src/mongo/db/repl/sync_tail_test.cpp
@@ -160,11 +160,11 @@ TEST_F(SyncTailTest, SyncApplyNoOp) {
ASSERT_FALSE(convertUpdateToUpsert);
return Status::OK();
};
- SyncTail::ApplyCommandInLockFn applyCmd =
- [&](OperationContext* txn, const BSONObj& theOperation) {
- FAIL("applyCommand unexpectedly invoked.");
- return Status::OK();
- };
+ SyncTail::ApplyCommandInLockFn applyCmd = [&](OperationContext* txn,
+ const BSONObj& theOperation) {
+ FAIL("applyCommand unexpectedly invoked.");
+ return Status::OK();
+ };
ASSERT_TRUE(_txn->writesAreReplicated());
ASSERT_FALSE(documentValidationDisabled(_txn.get()));
ASSERT_OK(SyncTail::syncApply(_txn.get(), op, false, applyOp, applyCmd, _incOps));
@@ -188,11 +188,11 @@ TEST_F(SyncTailTest, SyncApplyNoOpApplyOpThrowsException) {
}
return Status::OK();
};
- SyncTail::ApplyCommandInLockFn applyCmd =
- [&](OperationContext* txn, const BSONObj& theOperation) {
- FAIL("applyCommand unexpectedly invoked.");
- return Status::OK();
- };
+ SyncTail::ApplyCommandInLockFn applyCmd = [&](OperationContext* txn,
+ const BSONObj& theOperation) {
+ FAIL("applyCommand unexpectedly invoked.");
+ return Status::OK();
+ };
ASSERT_OK(SyncTail::syncApply(_txn.get(), op, false, applyOp, applyCmd, _incOps));
ASSERT_EQUALS(5, applyOpCalled);
}
@@ -219,11 +219,11 @@ void SyncTailTest::_testSyncApplyInsertDocument(LockMode expectedMode) {
ASSERT_TRUE(convertUpdateToUpsert);
return Status::OK();
};
- SyncTail::ApplyCommandInLockFn applyCmd =
- [&](OperationContext* txn, const BSONObj& theOperation) {
- FAIL("applyCommand unexpectedly invoked.");
- return Status::OK();
- };
+ SyncTail::ApplyCommandInLockFn applyCmd = [&](OperationContext* txn,
+ const BSONObj& theOperation) {
+ FAIL("applyCommand unexpectedly invoked.");
+ return Status::OK();
+ };
ASSERT_TRUE(_txn->writesAreReplicated());
ASSERT_FALSE(documentValidationDisabled(_txn.get()));
ASSERT_OK(SyncTail::syncApply(_txn.get(), op, true, applyOp, applyCmd, _incOps));
@@ -279,11 +279,11 @@ TEST_F(SyncTailTest, SyncApplyIndexBuild) {
ASSERT_FALSE(convertUpdateToUpsert);
return Status::OK();
};
- SyncTail::ApplyCommandInLockFn applyCmd =
- [&](OperationContext* txn, const BSONObj& theOperation) {
- FAIL("applyCommand unexpectedly invoked.");
- return Status::OK();
- };
+ SyncTail::ApplyCommandInLockFn applyCmd = [&](OperationContext* txn,
+ const BSONObj& theOperation) {
+ FAIL("applyCommand unexpectedly invoked.");
+ return Status::OK();
+ };
ASSERT_TRUE(_txn->writesAreReplicated());
ASSERT_FALSE(documentValidationDisabled(_txn.get()));
ASSERT_OK(SyncTail::syncApply(_txn.get(), op, false, applyOp, applyCmd, _incOps));
@@ -304,16 +304,16 @@ TEST_F(SyncTailTest, SyncApplyCommand) {
FAIL("applyOperation unexpectedly invoked.");
return Status::OK();
};
- SyncTail::ApplyCommandInLockFn applyCmd =
- [&](OperationContext* txn, const BSONObj& theOperation) {
- applyCmdCalled = true;
- ASSERT_TRUE(txn);
- ASSERT_TRUE(txn->lockState()->isW());
- ASSERT_TRUE(txn->writesAreReplicated());
- ASSERT_FALSE(documentValidationDisabled(txn));
- ASSERT_EQUALS(op, theOperation);
- return Status::OK();
- };
+ SyncTail::ApplyCommandInLockFn applyCmd = [&](OperationContext* txn,
+ const BSONObj& theOperation) {
+ applyCmdCalled = true;
+ ASSERT_TRUE(txn);
+ ASSERT_TRUE(txn->lockState()->isW());
+ ASSERT_TRUE(txn->writesAreReplicated());
+ ASSERT_FALSE(documentValidationDisabled(txn));
+ ASSERT_EQUALS(op, theOperation);
+ return Status::OK();
+ };
ASSERT_TRUE(_txn->writesAreReplicated());
ASSERT_FALSE(documentValidationDisabled(_txn.get()));
ASSERT_OK(SyncTail::syncApply(_txn.get(), op, false, applyOp, applyCmd, _incOps));
@@ -335,14 +335,14 @@ TEST_F(SyncTailTest, SyncApplyCommandThrowsException) {
FAIL("applyOperation unexpectedly invoked.");
return Status::OK();
};
- SyncTail::ApplyCommandInLockFn applyCmd =
- [&](OperationContext* txn, const BSONObj& theOperation) {
- applyCmdCalled++;
- if (applyCmdCalled < 5) {
- throw WriteConflictException();
- }
- return Status::OK();
- };
+ SyncTail::ApplyCommandInLockFn applyCmd = [&](OperationContext* txn,
+ const BSONObj& theOperation) {
+ applyCmdCalled++;
+ if (applyCmdCalled < 5) {
+ throw WriteConflictException();
+ }
+ return Status::OK();
+ };
ASSERT_OK(SyncTail::syncApply(_txn.get(), op, false, applyOp, applyCmd, _incOps));
ASSERT_EQUALS(5, applyCmdCalled);
ASSERT_EQUALS(1U, _opsApplied);
diff --git a/src/mongo/db/repl/task_runner.cpp b/src/mongo/db/repl/task_runner.cpp
index 1558e88929a..5cb02e05c0e 100644
--- a/src/mongo/db/repl/task_runner.cpp
+++ b/src/mongo/db/repl/task_runner.cpp
@@ -43,8 +43,8 @@
#include "mongo/util/concurrency/old_thread_pool.h"
#include "mongo/util/concurrency/thread_name.h"
#include "mongo/util/destructor_guard.h"
-#include "mongo/util/mongoutils/str.h"
#include "mongo/util/log.h"
+#include "mongo/util/mongoutils/str.h"
namespace mongo {
namespace repl {
diff --git a/src/mongo/db/repl/topology_coordinator.h b/src/mongo/db/repl/topology_coordinator.h
index 27242c393d8..1548cb774a9 100644
--- a/src/mongo/db/repl/topology_coordinator.h
+++ b/src/mongo/db/repl/topology_coordinator.h
@@ -28,8 +28,8 @@
#pragma once
-#include <string>
#include <iosfwd>
+#include <string>
#include "mongo/base/disallow_copying.h"
#include "mongo/db/repl/repl_set_heartbeat_response.h"
diff --git a/src/mongo/db/repl/topology_coordinator_impl.cpp b/src/mongo/db/repl/topology_coordinator_impl.cpp
index 1cfaee288d4..b72fe47f524 100644
--- a/src/mongo/db/repl/topology_coordinator_impl.cpp
+++ b/src/mongo/db/repl/topology_coordinator_impl.cpp
@@ -47,8 +47,8 @@
#include "mongo/db/repl/repl_set_request_votes_args.h"
#include "mongo/db/repl/replication_executor.h"
#include "mongo/db/repl/rslog.h"
-#include "mongo/s/catalog/catalog_manager.h"
#include "mongo/rpc/metadata/repl_set_metadata.h"
+#include "mongo/s/catalog/catalog_manager.h"
#include "mongo/util/hex.h"
#include "mongo/util/log.h"
#include "mongo/util/mongoutils/str.h"
@@ -466,14 +466,16 @@ void TopologyCoordinatorImpl::prepareFreshResponse(
*result =
Status(ErrorCodes::ReplicaSetNotFound,
str::stream() << "Wrong repl set name. Expected: " << _rsConfig.getReplSetName()
- << ", received: " << args.setName);
+ << ", received: "
+ << args.setName);
return;
}
if (args.id == static_cast<unsigned>(_selfConfig().getId())) {
*result = Status(ErrorCodes::BadValue,
str::stream() << "Received replSetFresh command from member with the "
- "same member ID as ourself: " << args.id);
+ "same member ID as ourself: "
+ << args.id);
return;
}
@@ -606,7 +608,8 @@ void TopologyCoordinatorImpl::prepareElectResponse(
} else if (myver > args.cfgver) {
// they are stale!
log() << "replSetElect command received stale config version # during election. "
- "Our version: " << myver << ", their version: " << args.cfgver;
+ "Our version: "
+ << myver << ", their version: " << args.cfgver;
vote = -10000;
} else if (!hopeful) {
log() << "replSetElect couldn't find member with id " << args.whoid;
@@ -666,7 +669,8 @@ Status TopologyCoordinatorImpl::prepareHeartbeatResponse(Date_t now,
response->noteMismatched();
return Status(ErrorCodes::InconsistentReplicaSetNames,
str::stream() << "Our set name of " << ourSetName << " does not match name "
- << rshb << " reported by remote node");
+ << rshb
+ << " reported by remote node");
}
const MemberState myState = getMemberState();
@@ -680,7 +684,8 @@ Status TopologyCoordinatorImpl::prepareHeartbeatResponse(Date_t now,
if (args.getSenderId() == _selfConfig().getId()) {
return Status(ErrorCodes::BadValue,
str::stream() << "Received heartbeat from member with the same "
- "member ID as ourself: " << args.getSenderId());
+ "member ID as ourself: "
+ << args.getSenderId());
}
}
@@ -752,7 +757,8 @@ Status TopologyCoordinatorImpl::prepareHeartbeatResponseV1(Date_t now,
<< "; remote node's: " << rshb;
return Status(ErrorCodes::InconsistentReplicaSetNames,
str::stream() << "Our set name of " << ourSetName << " does not match name "
- << rshb << " reported by remote node");
+ << rshb
+ << " reported by remote node");
}
const MemberState myState = getMemberState();
@@ -765,7 +771,8 @@ Status TopologyCoordinatorImpl::prepareHeartbeatResponseV1(Date_t now,
if (args.getSenderId() == _selfConfig().getId()) {
return Status(ErrorCodes::BadValue,
str::stream() << "Received heartbeat from member with the same "
- "member ID as ourself: " << args.getSenderId());
+ "member ID as ourself: "
+ << args.getSenderId());
}
}
@@ -988,7 +995,8 @@ HeartbeatResponseAction TopologyCoordinatorImpl::processHeartbeatResponse(
const int memberIndex = _rsConfig.findMemberIndexByHostAndPort(target);
if (memberIndex == -1) {
LOG(1) << "Could not find " << target << " in current config so ignoring --"
- " current config: " << _rsConfig.toBSON();
+ " current config: "
+ << _rsConfig.toBSON();
HeartbeatResponseAction nextAction = HeartbeatResponseAction::makeNoAction();
nextAction.setNextHeartbeatStartDate(nextHeartbeatStartDate);
return nextAction;
@@ -1001,7 +1009,8 @@ HeartbeatResponseAction TopologyCoordinatorImpl::processHeartbeatResponse(
if (!hbResponse.isOK()) {
if (isUnauthorized) {
LOG(1) << "setAuthIssue: heartbeat response failed due to authentication"
- " issue for member _id:" << member.getId();
+ " issue for member _id:"
+ << member.getId();
hbData.setAuthIssue(now);
} else if (hbStats.getNumFailuresSinceLastStart() > kMaxHeartbeatRetries ||
alreadyElapsed >= _rsConfig.getHeartbeatTimeoutPeriod()) {
@@ -1319,7 +1328,8 @@ Status TopologyCoordinatorImpl::checkShouldStandForElection(Date_t now,
return {ErrorCodes::NodeNotElectable,
str::stream() << "Not standing for election because "
<< _getUnelectableReasonString(unelectableReason)
- << "; my last optime is " << lastOpApplied.toString()
+ << "; my last optime is "
+ << lastOpApplied.toString()
<< " and the newest is "
<< _latestKnownOpTime(lastOpApplied).toString()};
}
diff --git a/src/mongo/db/repl/topology_coordinator_impl_test.cpp b/src/mongo/db/repl/topology_coordinator_impl_test.cpp
index 6d1e9f08a97..030142c628b 100644
--- a/src/mongo/db/repl/topology_coordinator_impl_test.cpp
+++ b/src/mongo/db/repl/topology_coordinator_impl_test.cpp
@@ -244,12 +244,15 @@ TEST_F(TopoCoordTest, NodeReturnsSecondaryWithMostRecentDataAsSyncSource) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
- << "h2") << BSON("_id" << 30 << "host"
- << "h3"))),
+ << "h2")
+ << BSON("_id" << 30 << "host"
+ << "h3"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
@@ -309,26 +312,41 @@ TEST_F(TopoCoordTest, NodeReturnsSecondaryWithMostRecentDataAsSyncSource) {
TEST_F(TopoCoordTest, NodeReturnsClosestValidSyncSourceAsSyncSource) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "hself")
<< BSON("_id" << 10 << "host"
<< "h1")
<< BSON("_id" << 20 << "host"
<< "h2"
- << "buildIndexes" << false << "priority" << 0)
+ << "buildIndexes"
+ << false
+ << "priority"
+ << 0)
<< BSON("_id" << 30 << "host"
<< "h3"
- << "hidden" << true << "priority" << 0 << "votes"
- << 0) << BSON("_id" << 40 << "host"
- << "h4"
- << "arbiterOnly" << true)
+ << "hidden"
+ << true
+ << "priority"
+ << 0
+ << "votes"
+ << 0)
+ << BSON("_id" << 40 << "host"
+ << "h4"
+ << "arbiterOnly"
+ << true)
<< BSON("_id" << 50 << "host"
<< "h5"
- << "slaveDelay" << 1 << "priority" << 0)
+ << "slaveDelay"
+ << 1
+ << "priority"
+ << 0)
<< BSON("_id" << 60 << "host"
- << "h6") << BSON("_id" << 70 << "host"
- << "hprimary"))),
+ << "h6")
+ << BSON("_id" << 70 << "host"
+ << "hprimary"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
@@ -460,13 +478,17 @@ TEST_F(TopoCoordTest, NodeReturnsClosestValidSyncSourceAsSyncSource) {
TEST_F(TopoCoordTest, ChooseOnlyPrimaryAsSyncSourceWhenChainingIsDisallowed) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "settings" << BSON("chainingAllowed" << false)
+ << "version"
+ << 1
+ << "settings"
+ << BSON("chainingAllowed" << false)
<< "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
- << "h2") << BSON("_id" << 30 << "host"
- << "h3"))),
+ << "h2")
+ << BSON("_id" << 30 << "host"
+ << "h3"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
@@ -512,12 +534,11 @@ TEST_F(TopoCoordTest, ChooseOnlyPrimaryAsSyncSourceWhenChainingIsDisallowed) {
}
TEST_F(TopoCoordTest, ChooseOnlyVotersAsSyncSourceWhenNodeIsAVoter) {
- updateConfig(fromjson(
- "{_id:'rs0', version:1, members:["
- "{_id:10, host:'hself'}, "
- "{_id:20, host:'h2', votes:0, priority:0}, "
- "{_id:30, host:'h3'} "
- "]}"),
+ updateConfig(fromjson("{_id:'rs0', version:1, members:["
+ "{_id:10, host:'hself'}, "
+ "{_id:20, host:'h2', votes:0, priority:0}, "
+ "{_id:30, host:'h3'} "
+ "]}"),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
@@ -550,12 +571,15 @@ TEST_F(TopoCoordTest, ChooseOnlyVotersAsSyncSourceWhenNodeIsAVoter) {
TEST_F(TopoCoordTest, ChooseNoSyncSourceWhenPrimary) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
- << "h2") << BSON("_id" << 30 << "host"
- << "h3"))),
+ << "h2")
+ << BSON("_id" << 30 << "host"
+ << "h3"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
@@ -597,12 +621,15 @@ TEST_F(TopoCoordTest, ChooseNoSyncSourceWhenPrimary) {
TEST_F(TopoCoordTest, ChooseRequestedSyncSourceOnlyTheFirstTimeAfterTheSyncSourceIsForciblySet) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
- << "h2") << BSON("_id" << 30 << "host"
- << "h3"))),
+ << "h2")
+ << BSON("_id" << 30 << "host"
+ << "h3"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
@@ -650,12 +677,15 @@ TEST_F(TopoCoordTest, ChooseRequestedSyncSourceOnlyTheFirstTimeAfterTheSyncSourc
TEST_F(TopoCoordTest, NodeDoesNotChooseBlacklistedSyncSourceUntilBlacklistingExpires) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
- << "h2") << BSON("_id" << 30 << "host"
- << "h3"))),
+ << "h2")
+ << BSON("_id" << 30 << "host"
+ << "h3"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
@@ -699,13 +729,17 @@ TEST_F(TopoCoordTest, NodeDoesNotChooseBlacklistedSyncSourceUntilBlacklistingExp
TEST_F(TopoCoordTest, ChooseNoSyncSourceWhenPrimaryIsBlacklistedAndChainingIsDisallowed) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "settings" << BSON("chainingAllowed" << false)
+ << "version"
+ << 1
+ << "settings"
+ << BSON("chainingAllowed" << false)
<< "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
- << "h2") << BSON("_id" << 30 << "host"
- << "h3"))),
+ << "h2")
+ << BSON("_id" << 30 << "host"
+ << "h3"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
@@ -750,12 +784,15 @@ TEST_F(TopoCoordTest, ChooseNoSyncSourceWhenPrimaryIsBlacklistedAndChainingIsDis
TEST_F(TopoCoordTest, NodeChangesToRecoveringWhenOnlyUnauthorizedNodesAreUp) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
- << "h2") << BSON("_id" << 30 << "host"
- << "h3"))),
+ << "h2")
+ << BSON("_id" << 30 << "host"
+ << "h3"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
@@ -814,18 +851,22 @@ TEST_F(TopoCoordTest, NodeChangesToRecoveringWhenOnlyUnauthorizedNodesAreUp) {
TEST_F(TopoCoordTest, NodeDoesNotActOnHeartbeatsWhenAbsentFromConfig) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "h1")
<< BSON("_id" << 20 << "host"
- << "h2") << BSON("_id" << 30 << "host"
- << "h3"))),
+ << "h2")
+ << BSON("_id" << 30 << "host"
+ << "h3"))),
-1);
ASSERT_NO_ACTION(heartbeatFromMember(HostAndPort("h2"),
"rs0",
MemberState::RS_SECONDARY,
OpTime(Timestamp(1, 0), 0),
- Milliseconds(300)).getAction());
+ Milliseconds(300))
+ .getAction());
}
TEST_F(TopoCoordTest, NodeReturnsNotSecondaryWhenSyncFromIsRunPriorToHavingAConfig) {
@@ -852,10 +893,13 @@ TEST_F(TopoCoordTest, NodeReturnsNotSecondaryWhenSyncFromIsRunAgainstArbiter) {
// Test trying to sync from another node when we are an arbiter
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "hself"
- << "arbiterOnly" << true)
+ << "arbiterOnly"
+ << true)
<< BSON("_id" << 1 << "host"
<< "h1"))),
0);
@@ -874,21 +918,29 @@ TEST_F(TopoCoordTest, NodeReturnsNotSecondaryWhenSyncFromIsRunAgainstPrimary) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "hself")
<< BSON("_id" << 1 << "host"
<< "h1"
- << "arbiterOnly" << true)
+ << "arbiterOnly"
+ << true)
<< BSON("_id" << 2 << "host"
<< "h2"
- << "priority" << 0 << "buildIndexes" << false)
+ << "priority"
+ << 0
+ << "buildIndexes"
+ << false)
<< BSON("_id" << 3 << "host"
- << "h3") << BSON("_id" << 4 << "host"
- << "h4")
+ << "h3")
+ << BSON("_id" << 4 << "host"
+ << "h4")
<< BSON("_id" << 5 << "host"
- << "h5") << BSON("_id" << 6 << "host"
- << "h6"))),
+ << "h5")
+ << BSON("_id" << 6 << "host"
+ << "h6"))),
0);
// Try to sync while PRIMARY
@@ -911,21 +963,29 @@ TEST_F(TopoCoordTest, NodeReturnsNodeNotFoundWhenSyncFromRequestsANodeNotInConfi
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "hself")
<< BSON("_id" << 1 << "host"
<< "h1"
- << "arbiterOnly" << true)
+ << "arbiterOnly"
+ << true)
<< BSON("_id" << 2 << "host"
<< "h2"
- << "priority" << 0 << "buildIndexes" << false)
+ << "priority"
+ << 0
+ << "buildIndexes"
+ << false)
<< BSON("_id" << 3 << "host"
- << "h3") << BSON("_id" << 4 << "host"
- << "h4")
+ << "h3")
+ << BSON("_id" << 4 << "host"
+ << "h4")
<< BSON("_id" << 5 << "host"
- << "h5") << BSON("_id" << 6 << "host"
- << "h6"))),
+ << "h5")
+ << BSON("_id" << 6 << "host"
+ << "h6"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
@@ -944,21 +1004,29 @@ TEST_F(TopoCoordTest, NodeReturnsInvalidOptionsWhenSyncFromRequestsSelf) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "hself")
<< BSON("_id" << 1 << "host"
<< "h1"
- << "arbiterOnly" << true)
+ << "arbiterOnly"
+ << true)
<< BSON("_id" << 2 << "host"
<< "h2"
- << "priority" << 0 << "buildIndexes" << false)
+ << "priority"
+ << 0
+ << "buildIndexes"
+ << false)
<< BSON("_id" << 3 << "host"
- << "h3") << BSON("_id" << 4 << "host"
- << "h4")
+ << "h3")
+ << BSON("_id" << 4 << "host"
+ << "h4")
<< BSON("_id" << 5 << "host"
- << "h5") << BSON("_id" << 6 << "host"
- << "h6"))),
+ << "h5")
+ << BSON("_id" << 6 << "host"
+ << "h6"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
@@ -977,21 +1045,29 @@ TEST_F(TopoCoordTest, NodeReturnsInvalidOptionsWhenSyncFromRequestsArbiter) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "hself")
<< BSON("_id" << 1 << "host"
<< "h1"
- << "arbiterOnly" << true)
+ << "arbiterOnly"
+ << true)
<< BSON("_id" << 2 << "host"
<< "h2"
- << "priority" << 0 << "buildIndexes" << false)
+ << "priority"
+ << 0
+ << "buildIndexes"
+ << false)
<< BSON("_id" << 3 << "host"
- << "h3") << BSON("_id" << 4 << "host"
- << "h4")
+ << "h3")
+ << BSON("_id" << 4 << "host"
+ << "h4")
<< BSON("_id" << 5 << "host"
- << "h5") << BSON("_id" << 6 << "host"
- << "h6"))),
+ << "h5")
+ << BSON("_id" << 6 << "host"
+ << "h6"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
@@ -1011,21 +1087,29 @@ TEST_F(TopoCoordTest, NodeReturnsInvalidOptionsWhenSyncFromRequestsAnIndexNonbui
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "hself")
<< BSON("_id" << 1 << "host"
<< "h1"
- << "arbiterOnly" << true)
+ << "arbiterOnly"
+ << true)
<< BSON("_id" << 2 << "host"
<< "h2"
- << "priority" << 0 << "buildIndexes" << false)
+ << "priority"
+ << 0
+ << "buildIndexes"
+ << false)
<< BSON("_id" << 3 << "host"
- << "h3") << BSON("_id" << 4 << "host"
- << "h4")
+ << "h3")
+ << BSON("_id" << 4 << "host"
+ << "h4")
<< BSON("_id" << 5 << "host"
- << "h5") << BSON("_id" << 6 << "host"
- << "h6"))),
+ << "h5")
+ << BSON("_id" << 6 << "host"
+ << "h6"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
@@ -1045,21 +1129,29 @@ TEST_F(TopoCoordTest, NodeReturnsHostUnreachableWhenSyncFromRequestsADownNode) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "hself")
<< BSON("_id" << 1 << "host"
<< "h1"
- << "arbiterOnly" << true)
+ << "arbiterOnly"
+ << true)
<< BSON("_id" << 2 << "host"
<< "h2"
- << "priority" << 0 << "buildIndexes" << false)
+ << "priority"
+ << 0
+ << "buildIndexes"
+ << false)
<< BSON("_id" << 3 << "host"
- << "h3") << BSON("_id" << 4 << "host"
- << "h4")
+ << "h3")
+ << BSON("_id" << 4 << "host"
+ << "h4")
<< BSON("_id" << 5 << "host"
- << "h5") << BSON("_id" << 6 << "host"
- << "h6"))),
+ << "h5")
+ << BSON("_id" << 6 << "host"
+ << "h6"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
@@ -1080,21 +1172,29 @@ TEST_F(TopoCoordTest, ChooseRequestedNodeWhenSyncFromRequestsAStaleNode) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "hself")
<< BSON("_id" << 1 << "host"
<< "h1"
- << "arbiterOnly" << true)
+ << "arbiterOnly"
+ << true)
<< BSON("_id" << 2 << "host"
<< "h2"
- << "priority" << 0 << "buildIndexes" << false)
+ << "priority"
+ << 0
+ << "buildIndexes"
+ << false)
<< BSON("_id" << 3 << "host"
- << "h3") << BSON("_id" << 4 << "host"
- << "h4")
+ << "h3")
+ << BSON("_id" << 4 << "host"
+ << "h4")
<< BSON("_id" << 5 << "host"
- << "h5") << BSON("_id" << 6 << "host"
- << "h6"))),
+ << "h5")
+ << BSON("_id" << 6 << "host"
+ << "h6"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
@@ -1119,21 +1219,29 @@ TEST_F(TopoCoordTest, ChooseRequestedNodeWhenSyncFromRequestsAValidNode) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "hself")
<< BSON("_id" << 1 << "host"
<< "h1"
- << "arbiterOnly" << true)
+ << "arbiterOnly"
+ << true)
<< BSON("_id" << 2 << "host"
<< "h2"
- << "priority" << 0 << "buildIndexes" << false)
+ << "priority"
+ << 0
+ << "buildIndexes"
+ << false)
<< BSON("_id" << 3 << "host"
- << "h3") << BSON("_id" << 4 << "host"
- << "h4")
+ << "h3")
+ << BSON("_id" << 4 << "host"
+ << "h4")
<< BSON("_id" << 5 << "host"
- << "h5") << BSON("_id" << 6 << "host"
- << "h6"))),
+ << "h5")
+ << BSON("_id" << 6 << "host"
+ << "h6"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
@@ -1159,21 +1267,29 @@ TEST_F(TopoCoordTest,
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "hself")
<< BSON("_id" << 1 << "host"
<< "h1"
- << "arbiterOnly" << true)
+ << "arbiterOnly"
+ << true)
<< BSON("_id" << 2 << "host"
<< "h2"
- << "priority" << 0 << "buildIndexes" << false)
+ << "priority"
+ << 0
+ << "buildIndexes"
+ << false)
<< BSON("_id" << 3 << "host"
- << "h3") << BSON("_id" << 4 << "host"
- << "h4")
+ << "h3")
+ << BSON("_id" << 4 << "host"
+ << "h4")
<< BSON("_id" << 5 << "host"
- << "h5") << BSON("_id" << 6 << "host"
- << "h6"))),
+ << "h5")
+ << BSON("_id" << 6 << "host"
+ << "h6"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
@@ -1198,21 +1314,29 @@ TEST_F(TopoCoordTest, NodeReturnsUnauthorizedWhenSyncFromRequestsANodeWeAreNotAu
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "hself")
<< BSON("_id" << 1 << "host"
<< "h1"
- << "arbiterOnly" << true)
+ << "arbiterOnly"
+ << true)
<< BSON("_id" << 2 << "host"
<< "h2"
- << "priority" << 0 << "buildIndexes" << false)
+ << "priority"
+ << 0
+ << "buildIndexes"
+ << false)
<< BSON("_id" << 3 << "host"
- << "h3") << BSON("_id" << 4 << "host"
- << "h4")
+ << "h3")
+ << BSON("_id" << 4 << "host"
+ << "h4")
<< BSON("_id" << 5 << "host"
- << "h5") << BSON("_id" << 6 << "host"
- << "h6"))),
+ << "h5")
+ << BSON("_id" << 6 << "host"
+ << "h6"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
@@ -1233,12 +1357,11 @@ TEST_F(TopoCoordTest, NodeReturnsInvalidOptionsWhenAskedToSyncFromANonVoterAsAVo
BSONObjBuilder response;
// Test trying to sync from another node
- updateConfig(fromjson(
- "{_id:'rs0', version:1, members:["
- "{_id:0, host:'self'},"
- "{_id:1, host:'h1'},"
- "{_id:2, host:'h2', votes:0, priority:0}"
- "]}"),
+ updateConfig(fromjson("{_id:'rs0', version:1, members:["
+ "{_id:0, host:'self'},"
+ "{_id:1, host:'h1'},"
+ "{_id:2, host:'h2', votes:0, priority:0}"
+ "]}"),
0);
getTopoCoord().prepareSyncFromResponse(HostAndPort("h2"), ourOpTime, &response, &result);
@@ -1257,21 +1380,29 @@ TEST_F(TopoCoordTest,
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "hself")
<< BSON("_id" << 1 << "host"
<< "h1"
- << "arbiterOnly" << true)
+ << "arbiterOnly"
+ << true)
<< BSON("_id" << 2 << "host"
<< "h2"
- << "priority" << 0 << "buildIndexes" << false)
+ << "priority"
+ << 0
+ << "buildIndexes"
+ << false)
<< BSON("_id" << 3 << "host"
- << "h3") << BSON("_id" << 4 << "host"
- << "h4")
+ << "h3")
+ << BSON("_id" << 4 << "host"
+ << "h4")
<< BSON("_id" << 5 << "host"
- << "h5") << BSON("_id" << 6 << "host"
- << "h6"))),
+ << "h5")
+ << BSON("_id" << 6 << "host"
+ << "h6"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
@@ -1328,17 +1459,17 @@ TEST_F(TopoCoordTest, ReplSetGetStatus) {
hb.setDurableOpTime(oplogDurable);
StatusWith<ReplSetHeartbeatResponse> hbResponseGood = StatusWith<ReplSetHeartbeatResponse>(hb);
- updateConfig(
- BSON("_id" << setName << "version" << 1 << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "test0:1234")
- << BSON("_id" << 1 << "host"
- << "test1:1234") << BSON("_id" << 2 << "host"
- << "test2:1234")
- << BSON("_id" << 3 << "host"
- << "test3:1234"))),
- 3,
- startupTime + Milliseconds(1));
+ updateConfig(BSON("_id" << setName << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "test0:1234")
+ << BSON("_id" << 1 << "host"
+ << "test1:1234")
+ << BSON("_id" << 2 << "host"
+ << "test2:1234")
+ << BSON("_id" << 3 << "host"
+ << "test3:1234"))),
+ 3,
+ startupTime + Milliseconds(1));
// Now that the replica set is setup, put the members into the states we want them in.
HostAndPort member = HostAndPort("test0:1234");
@@ -1467,15 +1598,15 @@ TEST_F(TopoCoordTest, NodeReturnsInvalidReplicaSetConfigInResponseToGetStatusWhe
OpTime oplogProgress(Timestamp(3, 4), 0);
std::string setName = "mySet";
- updateConfig(
- BSON("_id" << setName << "version" << 1 << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "test0:1234")
- << BSON("_id" << 1 << "host"
- << "test1:1234") << BSON("_id" << 2 << "host"
- << "test2:1234"))),
- -1, // This one is not part of the replica set.
- startupTime + Milliseconds(1));
+ updateConfig(BSON("_id" << setName << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "test0:1234")
+ << BSON("_id" << 1 << "host"
+ << "test1:1234")
+ << BSON("_id" << 2 << "host"
+ << "test2:1234"))),
+ -1, // This one is not part of the replica set.
+ startupTime + Milliseconds(1));
BSONObjBuilder statusBuilder;
Status resultStatus(ErrorCodes::InternalError, "prepareStatusResponse didn't set result");
@@ -1519,16 +1650,21 @@ TEST_F(TopoCoordTest,
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 10 << "members"
+ << "version"
+ << 10
+ << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself"
- << "priority" << 10)
+ << "priority"
+ << 10)
<< BSON("_id" << 20 << "host"
- << "h1") << BSON("_id" << 30 << "host"
- << "h2")
+ << "h1")
+ << BSON("_id" << 30 << "host"
+ << "h2")
<< BSON("_id" << 40 << "host"
<< "h3"
- << "priority" << 10))),
+ << "priority"
+ << 10))),
0);
heartbeatFromMember(HostAndPort("h1"), "rs0", MemberState::RS_SECONDARY, ourOpTime);
@@ -1551,16 +1687,21 @@ TEST_F(TopoCoordTest, NodeReturnsFresherWhenFreshnessIsCheckedWithStaleConfigVer
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 10 << "members"
+ << "version"
+ << 10
+ << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself"
- << "priority" << 10)
+ << "priority"
+ << 10)
<< BSON("_id" << 20 << "host"
- << "h1") << BSON("_id" << 30 << "host"
- << "h2")
+ << "h1")
+ << BSON("_id" << 30 << "host"
+ << "h2")
<< BSON("_id" << 40 << "host"
<< "h3"
- << "priority" << 10))),
+ << "priority"
+ << 10))),
0);
heartbeatFromMember(HostAndPort("h1"), "rs0", MemberState::RS_SECONDARY, ourOpTime);
@@ -1592,16 +1733,21 @@ TEST_F(TopoCoordTest, VetoWhenFreshnessIsCheckedWithAMemberWhoIsNotInTheConfig)
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 10 << "members"
+ << "version"
+ << 10
+ << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself"
- << "priority" << 10)
+ << "priority"
+ << 10)
<< BSON("_id" << 20 << "host"
- << "h1") << BSON("_id" << 30 << "host"
- << "h2")
+ << "h1")
+ << BSON("_id" << 30 << "host"
+ << "h2")
<< BSON("_id" << 40 << "host"
<< "h3"
- << "priority" << 10))),
+ << "priority"
+ << 10))),
0);
heartbeatFromMember(HostAndPort("h1"), "rs0", MemberState::RS_SECONDARY, ourOpTime);
@@ -1632,16 +1778,21 @@ TEST_F(TopoCoordTest, VetoWhenFreshnessIsCheckedWhilePrimary) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 10 << "members"
+ << "version"
+ << 10
+ << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself"
- << "priority" << 10)
+ << "priority"
+ << 10)
<< BSON("_id" << 20 << "host"
- << "h1") << BSON("_id" << 30 << "host"
- << "h2")
+ << "h1")
+ << BSON("_id" << 30 << "host"
+ << "h2")
<< BSON("_id" << 40 << "host"
<< "h3"
- << "priority" << 10))),
+ << "priority"
+ << 10))),
0);
heartbeatFromMember(HostAndPort("h1"), "rs0", MemberState::RS_SECONDARY, ourOpTime);
@@ -1677,16 +1828,21 @@ TEST_F(TopoCoordTest, VetoWhenFreshnessIsCheckedWhilePrimaryExists) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 10 << "members"
+ << "version"
+ << 10
+ << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself"
- << "priority" << 10)
+ << "priority"
+ << 10)
<< BSON("_id" << 20 << "host"
- << "h1") << BSON("_id" << 30 << "host"
- << "h2")
+ << "h1")
+ << BSON("_id" << 30 << "host"
+ << "h2")
<< BSON("_id" << 40 << "host"
<< "h3"
- << "priority" << 10))),
+ << "priority"
+ << 10))),
0);
heartbeatFromMember(HostAndPort("h1"), "rs0", MemberState::RS_SECONDARY, ourOpTime);
@@ -1725,16 +1881,21 @@ TEST_F(TopoCoordTest, NodeReturnsNotFreshestWhenFreshnessIsCheckedByALowPriority
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 10 << "members"
+ << "version"
+ << 10
+ << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself"
- << "priority" << 10)
+ << "priority"
+ << 10)
<< BSON("_id" << 20 << "host"
- << "h1") << BSON("_id" << 30 << "host"
- << "h2")
+ << "h1")
+ << BSON("_id" << 30 << "host"
+ << "h2")
<< BSON("_id" << 40 << "host"
<< "h3"
- << "priority" << 10))),
+ << "priority"
+ << 10))),
0);
heartbeatFromMember(HostAndPort("h1"), "rs0", MemberState::RS_SECONDARY, ourOpTime);
@@ -1772,16 +1933,21 @@ TEST_F(TopoCoordTest, VetoWhenFreshnessIsCheckedByANodeWeBelieveToBeDown) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 10 << "members"
+ << "version"
+ << 10
+ << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself"
- << "priority" << 10)
+ << "priority"
+ << 10)
<< BSON("_id" << 20 << "host"
- << "h1") << BSON("_id" << 30 << "host"
- << "h2")
+ << "h1")
+ << BSON("_id" << 30 << "host"
+ << "h2")
<< BSON("_id" << 40 << "host"
<< "h3"
- << "priority" << 10))),
+ << "priority"
+ << 10))),
0);
heartbeatFromMember(HostAndPort("h1"), "rs0", MemberState::RS_SECONDARY, ourOpTime);
@@ -1822,16 +1988,21 @@ TEST_F(TopoCoordTest, VetoWhenFreshnessIsCheckedByANodeThatIsPrimary) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 10 << "members"
+ << "version"
+ << 10
+ << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself"
- << "priority" << 10)
+ << "priority"
+ << 10)
<< BSON("_id" << 20 << "host"
- << "h1") << BSON("_id" << 30 << "host"
- << "h2")
+ << "h1")
+ << BSON("_id" << 30 << "host"
+ << "h2")
<< BSON("_id" << 40 << "host"
<< "h3"
- << "priority" << 10))),
+ << "priority"
+ << 10))),
0);
heartbeatFromMember(HostAndPort("h1"), "rs0", MemberState::RS_SECONDARY, ourOpTime);
@@ -1871,16 +2042,21 @@ TEST_F(TopoCoordTest, VetoWhenFreshnessIsCheckedByANodeThatIsInStartup) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 10 << "members"
+ << "version"
+ << 10
+ << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself"
- << "priority" << 10)
+ << "priority"
+ << 10)
<< BSON("_id" << 20 << "host"
- << "h1") << BSON("_id" << 30 << "host"
- << "h2")
+ << "h1")
+ << BSON("_id" << 30 << "host"
+ << "h2")
<< BSON("_id" << 40 << "host"
<< "h3"
- << "priority" << 10))),
+ << "priority"
+ << 10))),
0);
heartbeatFromMember(HostAndPort("h1"), "rs0", MemberState::RS_SECONDARY, ourOpTime);
@@ -1918,16 +2094,21 @@ TEST_F(TopoCoordTest, VetoWhenFreshnessIsCheckedByANodeThatIsRecovering) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 10 << "members"
+ << "version"
+ << 10
+ << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself"
- << "priority" << 10)
+ << "priority"
+ << 10)
<< BSON("_id" << 20 << "host"
- << "h1") << BSON("_id" << 30 << "host"
- << "h2")
+ << "h1")
+ << BSON("_id" << 30 << "host"
+ << "h2")
<< BSON("_id" << 40 << "host"
<< "h3"
- << "priority" << 10))),
+ << "priority"
+ << 10))),
0);
heartbeatFromMember(HostAndPort("h1"), "rs0", MemberState::RS_SECONDARY, ourOpTime);
@@ -1966,16 +2147,21 @@ TEST_F(TopoCoordTest,
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 10 << "members"
+ << "version"
+ << 10
+ << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself"
- << "priority" << 10)
+ << "priority"
+ << 10)
<< BSON("_id" << 20 << "host"
- << "h1") << BSON("_id" << 30 << "host"
- << "h2")
+ << "h1")
+ << BSON("_id" << 30 << "host"
+ << "h2")
<< BSON("_id" << 40 << "host"
<< "h3"
- << "priority" << 10))),
+ << "priority"
+ << 10))),
0);
// Test trying to elect a node that is fresher but lower priority than the existing primary
args.setName = "rs0";
@@ -2010,16 +2196,21 @@ TEST_F(TopoCoordTest, RespondPositivelyWhenFreshnessIsCheckedByAnElectableNode)
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 10 << "members"
+ << "version"
+ << 10
+ << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself"
- << "priority" << 10)
+ << "priority"
+ << 10)
<< BSON("_id" << 20 << "host"
- << "h1") << BSON("_id" << 30 << "host"
- << "h2")
+ << "h1")
+ << BSON("_id" << 30 << "host"
+ << "h2")
<< BSON("_id" << 40 << "host"
<< "h3"
- << "priority" << 10))),
+ << "priority"
+ << 10))),
0);
heartbeatFromMember(HostAndPort("h1"), "rs0", MemberState::RS_SECONDARY, ourOpTime);
@@ -2055,16 +2246,21 @@ TEST_F(TopoCoordTest, NodeReturnsBadValueWhenFreshnessIsCheckedByANodeWithOurID)
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 10 << "members"
+ << "version"
+ << 10
+ << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself"
- << "priority" << 10)
+ << "priority"
+ << 10)
<< BSON("_id" << 20 << "host"
- << "h1") << BSON("_id" << 30 << "host"
- << "h2")
+ << "h1")
+ << BSON("_id" << 30 << "host"
+ << "h2")
<< BSON("_id" << 40 << "host"
<< "h3"
- << "priority" << 10))),
+ << "priority"
+ << 10))),
0);
heartbeatFromMember(HostAndPort("h1"), "rs0", MemberState::RS_SECONDARY, ourOpTime);
@@ -2088,11 +2284,10 @@ TEST_F(TopoCoordTest, NodeReturnsBadValueWhenFreshnessIsCheckedByANodeWithOurID)
TEST_F(TopoCoordTest, HeartbeatFrequencyShouldBeHalfElectionTimeoutWhenArbiter) {
// This tests that arbiters issue heartbeats at electionTimeout/2 frequencies
TopoCoordTest::setUp();
- updateConfig(fromjson(
- "{_id:'mySet', version:1, protocolVersion:1, members:["
- "{_id:1, host:'node1:12345', arbiterOnly:true}, "
- "{_id:2, host:'node2:12345'}], "
- "settings:{heartbeatIntervalMillis:10, electionTimeoutMillis:5000}}"),
+ updateConfig(fromjson("{_id:'mySet', version:1, protocolVersion:1, members:["
+ "{_id:1, host:'node1:12345', arbiterOnly:true}, "
+ "{_id:2, host:'node2:12345'}], "
+ "settings:{heartbeatIntervalMillis:10, electionTimeoutMillis:5000}}"),
0);
HostAndPort target("host2", 27017);
Date_t requestDate = now();
@@ -2112,17 +2307,20 @@ class HeartbeatResponseTest : public TopoCoordTest {
public:
virtual void setUp() {
TopoCoordTest::setUp();
- updateConfig(
- BSON("_id"
- << "rs0"
- << "version" << 5 << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "host1:27017")
- << BSON("_id" << 1 << "host"
- << "host2:27017") << BSON("_id" << 2 << "host"
- << "host3:27017"))
- << "settings" << BSON("heartbeatTimeoutSecs" << 5)),
- 0);
+ updateConfig(BSON("_id"
+ << "rs0"
+ << "version"
+ << 5
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "host1:27017")
+ << BSON("_id" << 1 << "host"
+ << "host2:27017")
+ << BSON("_id" << 2 << "host"
+ << "host3:27017"))
+ << "settings"
+ << BSON("heartbeatTimeoutSecs" << 5)),
+ 0);
}
};
@@ -2338,13 +2536,16 @@ TEST_F(HeartbeatResponseHighVerbosityTest, UpdateHeartbeatDataSameConfig) {
ReplicaSetConfig originalConfig;
originalConfig.initialize(BSON("_id"
<< "rs0"
- << "version" << 5 << "members"
+ << "version"
+ << 5
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host1:27017")
<< BSON("_id" << 1 << "host"
<< "host2:27017")
<< BSON("_id" << 2 << "host"
- << "host3:27017")) << "settings"
+ << "host3:27017"))
+ << "settings"
<< BSON("heartbeatTimeoutSecs" << 5)));
ReplSetHeartbeatResponse sameConfigResponse;
@@ -2399,7 +2600,9 @@ TEST_F(HeartbeatResponseTestOneRetry, ReconfigWhenHeartbeatResponseContainsAConf
ReplicaSetConfig newConfig;
ASSERT_OK(newConfig.initialize(BSON("_id"
<< "rs0"
- << "version" << 7 << "members"
+ << "version"
+ << 7
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host1:27017")
<< BSON("_id" << 1 << "host"
@@ -2407,7 +2610,8 @@ TEST_F(HeartbeatResponseTestOneRetry, ReconfigWhenHeartbeatResponseContainsAConf
<< BSON("_id" << 2 << "host"
<< "host3:27017")
<< BSON("_id" << 3 << "host"
- << "host4:27017")) << "settings"
+ << "host4:27017"))
+ << "settings"
<< BSON("heartbeatTimeoutSecs" << 5))));
ASSERT_OK(newConfig.validate());
@@ -2580,7 +2784,9 @@ TEST_F(HeartbeatResponseTestTwoRetries, ReconfigWhenHeartbeatResponseContainsACo
ReplicaSetConfig newConfig;
ASSERT_OK(newConfig.initialize(BSON("_id"
<< "rs0"
- << "version" << 7 << "members"
+ << "version"
+ << 7
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host1:27017")
<< BSON("_id" << 1 << "host"
@@ -2588,7 +2794,8 @@ TEST_F(HeartbeatResponseTestTwoRetries, ReconfigWhenHeartbeatResponseContainsACo
<< BSON("_id" << 2 << "host"
<< "host3:27017")
<< BSON("_id" << 3 << "host"
- << "host4:27017")) << "settings"
+ << "host4:27017"))
+ << "settings"
<< BSON("heartbeatTimeoutSecs" << 5))));
ASSERT_OK(newConfig.validate());
@@ -2913,18 +3120,22 @@ TEST_F(HeartbeatResponseTest, UpdateHeartbeatDataStepDownPrimaryForHighPriorityF
// In this test, the Topology coordinator sees a PRIMARY ("host2") and then sees a higher
// priority and similarly fresh node ("host3"). However, since the coordinator's node
// (host1) is not the higher priority node, it takes no action.
- updateConfig(
- BSON("_id"
- << "rs0"
- << "version" << 6 << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "host1:27017")
- << BSON("_id" << 1 << "host"
- << "host2:27017") << BSON("_id" << 2 << "host"
- << "host3:27017"
- << "priority" << 3))
- << "settings" << BSON("heartbeatTimeoutSecs" << 5)),
- 0);
+ updateConfig(BSON("_id"
+ << "rs0"
+ << "version"
+ << 6
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "host1:27017")
+ << BSON("_id" << 1 << "host"
+ << "host2:27017")
+ << BSON("_id" << 2 << "host"
+ << "host3:27017"
+ << "priority"
+ << 3))
+ << "settings"
+ << BSON("heartbeatTimeoutSecs" << 5)),
+ 0);
setSelfMemberState(MemberState::RS_SECONDARY);
OpTime election = OpTime();
@@ -2957,18 +3168,22 @@ TEST_F(
//
// Despite having stepped down, we should remain electable, in order to dissuade lower
// priority nodes from standing for election.
- updateConfig(
- BSON("_id"
- << "rs0"
- << "version" << 6 << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "host1:27017")
- << BSON("_id" << 1 << "host"
- << "host2:27017") << BSON("_id" << 2 << "host"
- << "host3:27017"
- << "priority" << 3))
- << "settings" << BSON("heartbeatTimeoutSecs" << 5)),
- 0);
+ updateConfig(BSON("_id"
+ << "rs0"
+ << "version"
+ << 6
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "host1:27017")
+ << BSON("_id" << 1 << "host"
+ << "host2:27017")
+ << BSON("_id" << 2 << "host"
+ << "host3:27017"
+ << "priority"
+ << 3))
+ << "settings"
+ << BSON("heartbeatTimeoutSecs" << 5)),
+ 0);
OpTime election = OpTime(Timestamp(1000, 0), 0);
getTopoCoord().setFollowerMode(MemberState::RS_SECONDARY);
@@ -3000,18 +3215,22 @@ TEST_F(HeartbeatResponseTest,
NodeDoesNotStepDownSelfWhenHeartbeatResponseContainsALessFreshHigherPriorityNode) {
// In this test, the Topology coordinator becomes PRIMARY and then sees a higher priority
// and stale node ("host3"). As a result it responds with NoAction.
- updateConfig(
- BSON("_id"
- << "rs0"
- << "version" << 6 << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "host1:27017")
- << BSON("_id" << 1 << "host"
- << "host2:27017") << BSON("_id" << 2 << "host"
- << "host3:27017"
- << "priority" << 3))
- << "settings" << BSON("heartbeatTimeoutSecs" << 5)),
- 0);
+ updateConfig(BSON("_id"
+ << "rs0"
+ << "version"
+ << 6
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "host1:27017")
+ << BSON("_id" << 1 << "host"
+ << "host2:27017")
+ << BSON("_id" << 2 << "host"
+ << "host3:27017"
+ << "priority"
+ << 3))
+ << "settings"
+ << BSON("heartbeatTimeoutSecs" << 5)),
+ 0);
OpTime election = OpTime(Timestamp(1000, 0), 0);
OpTime staleTime = OpTime();
@@ -3028,18 +3247,22 @@ TEST_F(HeartbeatResponseTest,
NodeDoesNotStepDownRemoteWhenHeartbeatResponseContainsALessFreshHigherPriorityNode) {
// In this test, the Topology coordinator sees a PRIMARY ("host2") and then sees a higher
// priority and stale node ("host3"). As a result it responds with NoAction.
- updateConfig(
- BSON("_id"
- << "rs0"
- << "version" << 6 << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "host1:27017")
- << BSON("_id" << 1 << "host"
- << "host2:27017") << BSON("_id" << 2 << "host"
- << "host3:27017"
- << "priority" << 3))
- << "settings" << BSON("heartbeatTimeoutSecs" << 5)),
- 0);
+ updateConfig(BSON("_id"
+ << "rs0"
+ << "version"
+ << 6
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "host1:27017")
+ << BSON("_id" << 1 << "host"
+ << "host2:27017")
+ << BSON("_id" << 2 << "host"
+ << "host3:27017"
+ << "priority"
+ << 3))
+ << "settings"
+ << BSON("heartbeatTimeoutSecs" << 5)),
+ 0);
setSelfMemberState(MemberState::RS_SECONDARY);
OpTime election = OpTime(Timestamp(1000, 0), 0);
@@ -3107,17 +3330,20 @@ TEST_F(HeartbeatResponseTest,
NodeDoesNotStandForElectionWhenPrimaryIsMarkedDownViaHeartbeatButWeHaveZeroPriority) {
setSelfMemberState(MemberState::RS_SECONDARY);
- updateConfig(
- BSON("_id"
- << "rs0"
- << "version" << 5 << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "host1:27017"
- << "priority" << 0)
- << BSON("_id" << 1 << "host"
- << "host2:27017") << BSON("_id" << 2 << "host"
- << "host3:27017"))),
- 0);
+ updateConfig(BSON("_id"
+ << "rs0"
+ << "version"
+ << 5
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "host1:27017"
+ << "priority"
+ << 0)
+ << BSON("_id" << 1 << "host"
+ << "host2:27017")
+ << BSON("_id" << 2 << "host"
+ << "host3:27017"))),
+ 0);
OpTime election = OpTime(Timestamp(400, 0), 0);
OpTime lastOpTimeApplied = OpTime(Timestamp(300, 0), 0);
@@ -3238,17 +3464,20 @@ TEST_F(HeartbeatResponseTest,
TEST_F(HeartbeatResponseTest,
NodeDoesNotStandForElectionWhenPrimaryIsMarkedDownViaHeartbeatButWeAreAnArbiter) {
- updateConfig(
- BSON("_id"
- << "rs0"
- << "version" << 5 << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "host1:27017"
- << "arbiterOnly" << true)
- << BSON("_id" << 1 << "host"
- << "host2:27017") << BSON("_id" << 2 << "host"
- << "host3:27017"))),
- 0);
+ updateConfig(BSON("_id"
+ << "rs0"
+ << "version"
+ << 5
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "host1:27017"
+ << "arbiterOnly"
+ << true)
+ << BSON("_id" << 1 << "host"
+ << "host2:27017")
+ << BSON("_id" << 2 << "host"
+ << "host3:27017"))),
+ 0);
OpTime election = OpTime(Timestamp(400, 0), 0);
OpTime lastOpTimeApplied = OpTime(Timestamp(300, 0), 0);
@@ -3693,25 +3922,40 @@ TEST_F(HeartbeatResponseTest,
StartElectionIfAMajorityOfVotersIsVisibleEvenThoughATrueMajorityIsNot) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 5 << "members"
+ << "version"
+ << 5
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host1:27017")
<< BSON("_id" << 1 << "host"
<< "host2:27017")
<< BSON("_id" << 2 << "host"
<< "host3:27017"
- << "votes" << 0 << "priority" << 0)
+ << "votes"
+ << 0
+ << "priority"
+ << 0)
<< BSON("_id" << 3 << "host"
<< "host4:27017"
- << "votes" << 0 << "priority" << 0)
+ << "votes"
+ << 0
+ << "priority"
+ << 0)
<< BSON("_id" << 4 << "host"
<< "host5:27017"
- << "votes" << 0 << "priority" << 0)
+ << "votes"
+ << 0
+ << "priority"
+ << 0)
<< BSON("_id" << 5 << "host"
<< "host6:27017"
- << "votes" << 0 << "priority" << 0)
+ << "votes"
+ << 0
+ << "priority"
+ << 0)
<< BSON("_id" << 6 << "host"
- << "host7:27017")) << "settings"
+ << "host7:27017"))
+ << "settings"
<< BSON("heartbeatTimeoutSecs" << 5)),
0);
@@ -3793,16 +4037,21 @@ public:
TopoCoordTest::setUp();
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 10 << "members"
+ << "version"
+ << 10
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "hself")
<< BSON("_id" << 1 << "host"
- << "h1") << BSON("_id" << 2 << "host"
- << "h2"
- << "priority" << 10)
+ << "h1")
+ << BSON("_id" << 2 << "host"
+ << "h2"
+ << "priority"
+ << 10)
<< BSON("_id" << 3 << "host"
<< "h3"
- << "priority" << 10))),
+ << "priority"
+ << 10))),
0);
}
@@ -4079,9 +4328,8 @@ TEST_F(PrepareElectResponseTest,
ASSERT_EQUALS(0, response2["vote"].Int());
ASSERT_EQUALS(round, response2["round"].OID());
ASSERT_EQUALS(1,
- countLogLinesContaining(
- "voting no for h3:27017; "
- "voted for h2:27017 0 secs ago"));
+ countLogLinesContaining("voting no for h3:27017; "
+ "voted for h2:27017 0 secs ago"));
// Test that after enough time passes the same vote can proceed
now += Seconds(30) + Milliseconds(1); // just over 30 seconds later
@@ -4100,7 +4348,9 @@ TEST_F(PrepareElectResponseTest,
TEST_F(TopoCoordTest, NodeReturnsReplicaSetNotFoundWhenReceivingElectCommandWhileRemoved) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 5 << "members"
+ << "version"
+ << 5
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host1:27017")
<< BSON("_id" << 1 << "host"
@@ -4109,7 +4359,9 @@ TEST_F(TopoCoordTest, NodeReturnsReplicaSetNotFoundWhenReceivingElectCommandWhil
// Reconfig to remove self.
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "host2:27017")
<< BSON("_id" << 2 << "host"
@@ -4140,7 +4392,9 @@ public:
TopoCoordTest::setUp();
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 5 << "members"
+ << "version"
+ << 5
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host1:27017")
<< BSON("_id" << 1 << "host"
@@ -4211,8 +4465,11 @@ TEST_F(TopoCoordTest,
UnfreezeImmediatelyWhenToldToFreezeForZeroSecondsAfterBeingToldToFreezeForLonger) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 5 << "members" << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "host1:27017"))),
+ << "version"
+ << 5
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "host1:27017"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
@@ -4231,12 +4488,15 @@ public:
TopoCoordTest::setUp();
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
- << "h2") << BSON("_id" << 30 << "host"
- << "h3"))),
+ << "h2")
+ << BSON("_id" << 30 << "host"
+ << "h3"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
}
@@ -4296,9 +4556,8 @@ TEST_F(PrepareHeartbeatResponseTest,
ASSERT(result.reason().find("repl set names do not match")) << "Actual string was \""
<< result.reason() << '"';
ASSERT_EQUALS(1,
- countLogLinesContaining(
- "replSet set names do not match, ours: rs0; remote "
- "node's: rs1"));
+ countLogLinesContaining("replSet set names do not match, ours: rs0; remote "
+ "node's: rs1"));
ASSERT_TRUE(response.isMismatched());
ASSERT_EQUALS("", response.getHbMsg());
}
@@ -4541,8 +4800,11 @@ TEST_F(TopoCoordTest, BecomeCandidateWhenBecomingSecondaryInSingleNodeSet) {
ASSERT_EQUALS(MemberState::RS_STARTUP, getTopoCoord().getMemberState().s);
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members" << BSON_ARRAY(BSON("_id" << 1 << "host"
- << "hself"))),
+ << "version"
+ << 1
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "hself"))),
0);
ASSERT_EQUALS(MemberState::RS_STARTUP2, getTopoCoord().getMemberState().s);
@@ -4559,10 +4821,13 @@ TEST_F(TopoCoordTest, BecomeCandidateWhenReconfigToBeElectableInSingleNodeSet) {
ReplicaSetConfig cfg;
cfg.initialize(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "hself"
- << "priority" << 0))));
+ << "priority"
+ << 0))));
getTopoCoord().updateConfig(cfg, 0, now()++, OpTime());
ASSERT_EQUALS(MemberState::RS_STARTUP2, getTopoCoord().getMemberState().s);
@@ -4575,8 +4840,11 @@ TEST_F(TopoCoordTest, BecomeCandidateWhenReconfigToBeElectableInSingleNodeSet) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members" << BSON_ARRAY(BSON("_id" << 1 << "host"
- << "hself"))),
+ << "version"
+ << 1
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "hself"))),
0);
ASSERT_TRUE(TopologyCoordinator::Role::candidate == getTopoCoord().getRole());
}
@@ -4587,10 +4855,13 @@ TEST_F(TopoCoordTest, NodeDoesNotBecomeCandidateWhenBecomingSecondaryInSingleNod
ReplicaSetConfig cfg;
cfg.initialize(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "hself"
- << "priority" << 0))));
+ << "priority"
+ << 0))));
getTopoCoord().updateConfig(cfg, 0, now()++, OpTime());
ASSERT_EQUALS(MemberState::RS_STARTUP2, getTopoCoord().getMemberState().s);
@@ -4608,7 +4879,9 @@ TEST_F(TopoCoordTest, NodeTransitionsFromRemovedToStartup2WhenAddedToConfig) {
// config to be absent from the set
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "host2:27017")
<< BSON("_id" << 2 << "host"
@@ -4619,16 +4892,18 @@ TEST_F(TopoCoordTest, NodeTransitionsFromRemovedToStartup2WhenAddedToConfig) {
ASSERT_EQUALS(MemberState::RS_REMOVED, getTopoCoord().getMemberState().s);
// reconfig to add to set
- updateConfig(
- BSON("_id"
- << "rs0"
- << "version" << 2 << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "host1:27017")
- << BSON("_id" << 1 << "host"
- << "host2:27017") << BSON("_id" << 2 << "host"
- << "host3:27017"))),
- 0);
+ updateConfig(BSON("_id"
+ << "rs0"
+ << "version"
+ << 2
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "host1:27017")
+ << BSON("_id" << 1 << "host"
+ << "host2:27017")
+ << BSON("_id" << 2 << "host"
+ << "host3:27017"))),
+ 0);
// having been added to the config, we should no longer be REMOVED and should enter STARTUP2
ASSERT_TRUE(TopologyCoordinator::Role::follower == getTopoCoord().getRole());
ASSERT_EQUALS(MemberState::RS_STARTUP2, getTopoCoord().getMemberState().s);
@@ -4637,23 +4912,27 @@ TEST_F(TopoCoordTest, NodeTransitionsFromRemovedToStartup2WhenAddedToConfig) {
TEST_F(TopoCoordTest, NodeTransitionsToRemovedWhenRemovedFromConfig) {
ASSERT_TRUE(TopologyCoordinator::Role::follower == getTopoCoord().getRole());
ASSERT_EQUALS(MemberState::RS_STARTUP, getTopoCoord().getMemberState().s);
- updateConfig(
- BSON("_id"
- << "rs0"
- << "version" << 1 << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "host1:27017")
- << BSON("_id" << 1 << "host"
- << "host2:27017") << BSON("_id" << 2 << "host"
- << "host3:27017"))),
- 0);
+ updateConfig(BSON("_id"
+ << "rs0"
+ << "version"
+ << 1
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "host1:27017")
+ << BSON("_id" << 1 << "host"
+ << "host2:27017")
+ << BSON("_id" << 2 << "host"
+ << "host3:27017"))),
+ 0);
ASSERT_TRUE(TopologyCoordinator::Role::follower == getTopoCoord().getRole());
ASSERT_EQUALS(MemberState::RS_STARTUP2, getTopoCoord().getMemberState().s);
// reconfig to remove self
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "host2:27017")
<< BSON("_id" << 2 << "host"
@@ -4669,8 +4948,11 @@ TEST_F(TopoCoordTest, NodeTransitionsToRemovedWhenRemovedFromConfigEvenWhenPrima
ASSERT_EQUALS(MemberState::RS_STARTUP, getTopoCoord().getMemberState().s);
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members" << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "host1:27017"))),
+ << "version"
+ << 1
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "host1:27017"))),
0);
ASSERT_FALSE(TopologyCoordinator::Role::candidate == getTopoCoord().getRole());
ASSERT_EQUALS(MemberState::RS_STARTUP2, getTopoCoord().getMemberState().s);
@@ -4685,7 +4967,9 @@ TEST_F(TopoCoordTest, NodeTransitionsToRemovedWhenRemovedFromConfigEvenWhenPrima
// reconfig to remove self
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "host2:27017")
<< BSON("_id" << 2 << "host"
@@ -4701,8 +4985,11 @@ TEST_F(TopoCoordTest, NodeTransitionsToSecondaryWhenReconfiggingToBeUnelectable)
ASSERT_EQUALS(MemberState::RS_STARTUP, getTopoCoord().getMemberState().s);
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members" << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "host1:27017"))),
+ << "version"
+ << 1
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "host1:27017"))),
0);
ASSERT_FALSE(TopologyCoordinator::Role::candidate == getTopoCoord().getRole());
ASSERT_EQUALS(MemberState::RS_STARTUP2, getTopoCoord().getMemberState().s);
@@ -4715,17 +5002,20 @@ TEST_F(TopoCoordTest, NodeTransitionsToSecondaryWhenReconfiggingToBeUnelectable)
ASSERT_EQUALS(MemberState::RS_PRIMARY, getTopoCoord().getMemberState().s);
// now lose primary due to loss of electability
- updateConfig(
- BSON("_id"
- << "rs0"
- << "version" << 2 << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "host1:27017"
- << "priority" << 0)
- << BSON("_id" << 1 << "host"
- << "host2:27017") << BSON("_id" << 2 << "host"
- << "host3:27017"))),
- 0);
+ updateConfig(BSON("_id"
+ << "rs0"
+ << "version"
+ << 2
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "host1:27017"
+ << "priority"
+ << 0)
+ << BSON("_id" << 1 << "host"
+ << "host2:27017")
+ << BSON("_id" << 2 << "host"
+ << "host3:27017"))),
+ 0);
ASSERT_TRUE(TopologyCoordinator::Role::follower == getTopoCoord().getRole());
ASSERT_EQUALS(MemberState::RS_SECONDARY, getTopoCoord().getMemberState().s);
}
@@ -4735,8 +5025,11 @@ TEST_F(TopoCoordTest, NodeMaintainsPrimaryStateAcrossReconfigIfNodeRemainsElecta
ASSERT_EQUALS(MemberState::RS_STARTUP, getTopoCoord().getMemberState().s);
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members" << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "host1:27017"))),
+ << "version"
+ << 1
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "host1:27017"))),
0);
ASSERT_FALSE(TopologyCoordinator::Role::candidate == getTopoCoord().getRole());
@@ -4751,38 +5044,45 @@ TEST_F(TopoCoordTest, NodeMaintainsPrimaryStateAcrossReconfigIfNodeRemainsElecta
// Now reconfig in ways that leave us electable and ensure we are still the primary.
// Add hosts
- updateConfig(
- BSON("_id"
- << "rs0"
- << "version" << 2 << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "host1:27017")
- << BSON("_id" << 1 << "host"
- << "host2:27017") << BSON("_id" << 2 << "host"
- << "host3:27017"))),
- 0,
- Date_t::fromMillisSinceEpoch(-1),
- OpTime(Timestamp(10, 0), 0));
+ updateConfig(BSON("_id"
+ << "rs0"
+ << "version"
+ << 2
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "host1:27017")
+ << BSON("_id" << 1 << "host"
+ << "host2:27017")
+ << BSON("_id" << 2 << "host"
+ << "host3:27017"))),
+ 0,
+ Date_t::fromMillisSinceEpoch(-1),
+ OpTime(Timestamp(10, 0), 0));
ASSERT_TRUE(TopologyCoordinator::Role::leader == getTopoCoord().getRole());
ASSERT_EQUALS(MemberState::RS_PRIMARY, getTopoCoord().getMemberState().s);
// Change priorities and tags
- updateConfig(
- BSON("_id"
- << "rs0"
- << "version" << 2 << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "host1:27017"
- << "priority" << 10)
- << BSON("_id" << 1 << "host"
- << "host2:27017"
- << "priority" << 5 << "tags" << BSON("dc"
- << "NA"
- << "rack"
- << "rack1")))),
- 0,
- Date_t::fromMillisSinceEpoch(-1),
- OpTime(Timestamp(10, 0), 0));
+ updateConfig(BSON("_id"
+ << "rs0"
+ << "version"
+ << 2
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "host1:27017"
+ << "priority"
+ << 10)
+ << BSON("_id" << 1 << "host"
+ << "host2:27017"
+ << "priority"
+ << 5
+ << "tags"
+ << BSON("dc"
+ << "NA"
+ << "rack"
+ << "rack1")))),
+ 0,
+ Date_t::fromMillisSinceEpoch(-1),
+ OpTime(Timestamp(10, 0), 0));
ASSERT_TRUE(TopologyCoordinator::Role::leader == getTopoCoord().getRole());
ASSERT_EQUALS(MemberState::RS_PRIMARY, getTopoCoord().getMemberState().s);
}
@@ -4790,7 +5090,9 @@ TEST_F(TopoCoordTest, NodeMaintainsPrimaryStateAcrossReconfigIfNodeRemainsElecta
TEST_F(TopoCoordTest, NodeMaintainsSecondaryStateAcrossReconfig) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "host1:27017")
<< BSON("_id" << 2 << "host"
@@ -4802,16 +5104,18 @@ TEST_F(TopoCoordTest, NodeMaintainsSecondaryStateAcrossReconfig) {
ASSERT_EQUALS(MemberState::RS_SECONDARY, getTopoCoord().getMemberState().s);
// reconfig and stay secondary
- updateConfig(
- BSON("_id"
- << "rs0"
- << "version" << 2 << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "host1:27017")
- << BSON("_id" << 1 << "host"
- << "host2:27017") << BSON("_id" << 2 << "host"
- << "host3:27017"))),
- 0);
+ updateConfig(BSON("_id"
+ << "rs0"
+ << "version"
+ << 2
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "host1:27017")
+ << BSON("_id" << 1 << "host"
+ << "host2:27017")
+ << BSON("_id" << 2 << "host"
+ << "host3:27017"))),
+ 0);
ASSERT_TRUE(TopologyCoordinator::Role::follower == getTopoCoord().getRole());
ASSERT_EQUALS(MemberState::RS_SECONDARY, getTopoCoord().getMemberState().s);
}
@@ -4845,7 +5149,9 @@ TEST_F(HeartbeatResponseTest, ReconfigBetweenHeartbeatRequestAndRepsonse) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host1:27017")
<< BSON("_id" << 2 << "host"
@@ -4894,7 +5200,9 @@ TEST_F(HeartbeatResponseTest, ReconfigNodeRemovedBetweenHeartbeatRequestAndRepso
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host1:27017")
<< BSON("_id" << 1 << "host"
@@ -5148,14 +5456,19 @@ TEST_F(HeartbeatResponseTest, ShouldNotChangeSyncSourceWhenFresherMemberDoesNotB
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 6 << "members"
+ << "version"
+ << 6
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "hself")
<< BSON("_id" << 1 << "host"
<< "host2")
<< BSON("_id" << 2 << "host"
<< "host3"
- << "buildIndexes" << false << "priority" << 0))),
+ << "buildIndexes"
+ << false
+ << "priority"
+ << 0))),
0);
HeartbeatResponseAction nextAction = receiveUpHeartbeat(HostAndPort("host2"),
"rs0",
@@ -5189,15 +5502,23 @@ TEST_F(HeartbeatResponseTest,
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 7 << "members"
+ << "version"
+ << 7
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "hself"
- << "buildIndexes" << false << "priority" << 0)
+ << "buildIndexes"
+ << false
+ << "priority"
+ << 0)
<< BSON("_id" << 1 << "host"
<< "host2")
<< BSON("_id" << 2 << "host"
<< "host3"
- << "buildIndexes" << false << "priority" << 0))),
+ << "buildIndexes"
+ << false
+ << "priority"
+ << 0))),
0);
HeartbeatResponseAction nextAction = receiveUpHeartbeat(HostAndPort("host2"),
"rs0",
@@ -5225,12 +5546,15 @@ TEST_F(HeartbeatResponseTest,
TEST_F(TopoCoordTest, ShouldNotStandForElectionWhileAwareOfPrimary) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
- << "h2") << BSON("_id" << 30 << "host"
- << "h3"))),
+ << "h2")
+ << BSON("_id" << 30 << "host"
+ << "h3"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
@@ -5244,12 +5568,15 @@ TEST_F(TopoCoordTest, ShouldNotStandForElectionWhileAwareOfPrimary) {
TEST_F(TopoCoordTest, ShouldNotStandForElectionWhileTooStale) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
- << "h2") << BSON("_id" << 30 << "host"
- << "h3"))),
+ << "h2")
+ << BSON("_id" << 30 << "host"
+ << "h3"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
@@ -5264,12 +5591,15 @@ TEST_F(TopoCoordTest, ShouldNotStandForElectionWhileTooStale) {
TEST_F(TopoCoordTest, VoteForMyselfFailsWhileNotCandidate) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
- << "h2") << BSON("_id" << 30 << "host"
- << "h3"))),
+ << "h2")
+ << BSON("_id" << 30 << "host"
+ << "h3"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
ASSERT_FALSE(getTopoCoord().voteForMyself(now()++));
@@ -5278,13 +5608,17 @@ TEST_F(TopoCoordTest, VoteForMyselfFailsWhileNotCandidate) {
TEST_F(TopoCoordTest, NodeReturnsArbiterWhenGetMemberStateRunsAgainstArbiter) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself"
- << "arbiterOnly" << true)
+ << "arbiterOnly"
+ << true)
<< BSON("_id" << 20 << "host"
- << "h2") << BSON("_id" << 30 << "host"
- << "h3"))),
+ << "h2")
+ << BSON("_id" << 30 << "host"
+ << "h3"))),
0);
ASSERT_EQUALS(MemberState::RS_ARBITER, getTopoCoord().getMemberState().s);
}
@@ -5299,12 +5633,15 @@ TEST_F(TopoCoordTest, ShouldNotStandForElectionWhileRemovedFromTheConfig) {
TEST_F(TopoCoordTest, ShouldNotStandForElectionWhenAPositiveResponseWasGivenInTheVoteLeasePeriod) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
- << "h2") << BSON("_id" << 30 << "host"
- << "h3"))),
+ << "h2")
+ << BSON("_id" << 30 << "host"
+ << "h3"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
heartbeatFromMember(
@@ -5339,20 +5676,28 @@ TEST_F(TopoCoordTest, ShouldNotStandForElectionWhenAPositiveResponseWasGivenInTh
TEST_F(TopoCoordTest, NodeDoesNotGrantVotesToTwoDifferentNodesInTheSameTerm) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
- << "h2") << BSON("_id" << 30 << "host"
- << "h3"))),
+ << "h2")
+ << BSON("_id" << 30 << "host"
+ << "h3"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
ReplSetRequestVotesArgs args;
args.initialize(BSON("replSetRequestVotes" << 1 << "setName"
<< "rs0"
- << "term" << 1LL << "candidateIndex" << 0LL
- << "configVersion" << 1LL << "lastCommittedOp"
+ << "term"
+ << 1LL
+ << "candidateIndex"
+ << 0LL
+ << "configVersion"
+ << 1LL
+ << "lastCommittedOp"
<< BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
ReplSetRequestVotesResponse response;
OpTime lastAppliedOpTime;
@@ -5362,11 +5707,17 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantVotesToTwoDifferentNodesInTheSameTerm) {
ASSERT_TRUE(response.getVoteGranted());
ReplSetRequestVotesArgs args2;
- args2.initialize(BSON("replSetRequestVotes"
- << 1 << "setName"
- << "rs0"
- << "term" << 1LL << "candidateIndex" << 1LL << "configVersion" << 1LL
- << "lastCommittedOp" << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ args2.initialize(
+ BSON("replSetRequestVotes" << 1 << "setName"
+ << "rs0"
+ << "term"
+ << 1LL
+ << "candidateIndex"
+ << 1LL
+ << "configVersion"
+ << 1LL
+ << "lastCommittedOp"
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
ReplSetRequestVotesResponse response2;
// different candidate same term, should be a problem
@@ -5378,12 +5729,15 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantVotesToTwoDifferentNodesInTheSameTerm) {
TEST_F(TopoCoordTest, DryRunVoteRequestShouldNotPreventSubsequentDryRunsForThatTerm) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
- << "h2") << BSON("_id" << 30 << "host"
- << "h3"))),
+ << "h2")
+ << BSON("_id" << 30 << "host"
+ << "h3"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
@@ -5391,8 +5745,14 @@ TEST_F(TopoCoordTest, DryRunVoteRequestShouldNotPreventSubsequentDryRunsForThatT
ReplSetRequestVotesArgs args;
args.initialize(BSON("replSetRequestVotes" << 1 << "setName"
<< "rs0"
- << "dryRun" << true << "term" << 1LL
- << "candidateIndex" << 0LL << "configVersion" << 1LL
+ << "dryRun"
+ << true
+ << "term"
+ << 1LL
+ << "candidateIndex"
+ << 0LL
+ << "configVersion"
+ << 1LL
<< "lastCommittedOp"
<< BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
ReplSetRequestVotesResponse response;
@@ -5404,12 +5764,19 @@ TEST_F(TopoCoordTest, DryRunVoteRequestShouldNotPreventSubsequentDryRunsForThatT
// second dry run fine
ReplSetRequestVotesArgs args2;
- args2.initialize(BSON("replSetRequestVotes"
- << 1 << "setName"
- << "rs0"
- << "dryRun" << true << "term" << 1LL << "candidateIndex" << 0LL
- << "configVersion" << 1LL << "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ args2.initialize(
+ BSON("replSetRequestVotes" << 1 << "setName"
+ << "rs0"
+ << "dryRun"
+ << true
+ << "term"
+ << 1LL
+ << "candidateIndex"
+ << 0LL
+ << "configVersion"
+ << 1LL
+ << "lastCommittedOp"
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
ReplSetRequestVotesResponse response2;
getTopoCoord().processReplSetRequestVotes(args2, &response2, lastAppliedOpTime);
@@ -5420,12 +5787,15 @@ TEST_F(TopoCoordTest, DryRunVoteRequestShouldNotPreventSubsequentDryRunsForThatT
TEST_F(TopoCoordTest, VoteRequestShouldNotPreventDryRunsForThatTerm) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
- << "h2") << BSON("_id" << 30 << "host"
- << "h3"))),
+ << "h2")
+ << BSON("_id" << 30 << "host"
+ << "h3"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
@@ -5433,8 +5803,14 @@ TEST_F(TopoCoordTest, VoteRequestShouldNotPreventDryRunsForThatTerm) {
ReplSetRequestVotesArgs args;
args.initialize(BSON("replSetRequestVotes" << 1 << "setName"
<< "rs0"
- << "dryRun" << false << "term" << 1LL
- << "candidateIndex" << 0LL << "configVersion" << 1LL
+ << "dryRun"
+ << false
+ << "term"
+ << 1LL
+ << "candidateIndex"
+ << 0LL
+ << "configVersion"
+ << 1LL
<< "lastCommittedOp"
<< BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
ReplSetRequestVotesResponse response;
@@ -5446,12 +5822,19 @@ TEST_F(TopoCoordTest, VoteRequestShouldNotPreventDryRunsForThatTerm) {
// dry post real, fails
ReplSetRequestVotesArgs args2;
- args2.initialize(BSON("replSetRequestVotes"
- << 1 << "setName"
- << "rs0"
- << "dryRun" << false << "term" << 1LL << "candidateIndex" << 0LL
- << "configVersion" << 1LL << "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ args2.initialize(
+ BSON("replSetRequestVotes" << 1 << "setName"
+ << "rs0"
+ << "dryRun"
+ << false
+ << "term"
+ << 1LL
+ << "candidateIndex"
+ << 0LL
+ << "configVersion"
+ << 1LL
+ << "lastCommittedOp"
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
ReplSetRequestVotesResponse response2;
getTopoCoord().processReplSetRequestVotes(args2, &response2, lastAppliedOpTime);
@@ -5462,12 +5845,15 @@ TEST_F(TopoCoordTest, VoteRequestShouldNotPreventDryRunsForThatTerm) {
TEST_F(TopoCoordTest, NodeDoesNotGrantVoteWhenReplSetNameDoesNotMatch) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
- << "h2") << BSON("_id" << 30 << "host"
- << "h3"))),
+ << "h2")
+ << BSON("_id" << 30 << "host"
+ << "h3"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
@@ -5475,8 +5861,13 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantVoteWhenReplSetNameDoesNotMatch) {
ReplSetRequestVotesArgs args;
args.initialize(BSON("replSetRequestVotes" << 1 << "setName"
<< "wrongName"
- << "term" << 1LL << "candidateIndex" << 0LL
- << "configVersion" << 1LL << "lastCommittedOp"
+ << "term"
+ << 1LL
+ << "candidateIndex"
+ << 0LL
+ << "configVersion"
+ << 1LL
+ << "lastCommittedOp"
<< BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
ReplSetRequestVotesResponse response;
OpTime lastAppliedOpTime;
@@ -5489,12 +5880,15 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantVoteWhenReplSetNameDoesNotMatch) {
TEST_F(TopoCoordTest, NodeDoesNotGrantVoteWhenConfigVersionDoesNotMatch) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
- << "h2") << BSON("_id" << 30 << "host"
- << "h3"))),
+ << "h2")
+ << BSON("_id" << 30 << "host"
+ << "h3"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
@@ -5502,8 +5896,13 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantVoteWhenConfigVersionDoesNotMatch) {
ReplSetRequestVotesArgs args;
args.initialize(BSON("replSetRequestVotes" << 1 << "setName"
<< "rs0"
- << "term" << 1LL << "candidateIndex" << 1LL
- << "configVersion" << 0LL << "lastCommittedOp"
+ << "term"
+ << 1LL
+ << "candidateIndex"
+ << 1LL
+ << "configVersion"
+ << 0LL
+ << "lastCommittedOp"
<< BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
ReplSetRequestVotesResponse response;
OpTime lastAppliedOpTime;
@@ -5516,12 +5915,15 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantVoteWhenConfigVersionDoesNotMatch) {
TEST_F(TopoCoordTest, NodeDoesNotGrantVoteWhenTermIsStale) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
- << "h2") << BSON("_id" << 30 << "host"
- << "h3"))),
+ << "h2")
+ << BSON("_id" << 30 << "host"
+ << "h3"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
@@ -5533,8 +5935,13 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantVoteWhenTermIsStale) {
ReplSetRequestVotesArgs args;
args.initialize(BSON("replSetRequestVotes" << 1 << "setName"
<< "rs0"
- << "term" << 1LL << "candidateIndex" << 1LL
- << "configVersion" << 1LL << "lastCommittedOp"
+ << "term"
+ << 1LL
+ << "candidateIndex"
+ << 1LL
+ << "configVersion"
+ << 1LL
+ << "lastCommittedOp"
<< BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
ReplSetRequestVotesResponse response;
OpTime lastAppliedOpTime;
@@ -5548,12 +5955,15 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantVoteWhenTermIsStale) {
TEST_F(TopoCoordTest, NodeDoesNotGrantVoteWhenOpTimeIsStale) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
- << "h2") << BSON("_id" << 30 << "host"
- << "h3"))),
+ << "h2")
+ << BSON("_id" << 30 << "host"
+ << "h3"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
@@ -5562,8 +5972,13 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantVoteWhenOpTimeIsStale) {
ReplSetRequestVotesArgs args;
args.initialize(BSON("replSetRequestVotes" << 1 << "setName"
<< "rs0"
- << "term" << 3LL << "candidateIndex" << 1LL
- << "configVersion" << 1LL << "lastCommittedOp"
+ << "term"
+ << 3LL
+ << "candidateIndex"
+ << 1LL
+ << "configVersion"
+ << 1LL
+ << "lastCommittedOp"
<< BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
ReplSetRequestVotesResponse response;
OpTime lastAppliedOpTime2 = {Timestamp(20, 0), 0};
@@ -5576,12 +5991,15 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantVoteWhenOpTimeIsStale) {
TEST_F(TopoCoordTest, NodeDoesNotGrantDryRunVoteWhenReplSetNameDoesNotMatch) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
- << "h2") << BSON("_id" << 30 << "host"
- << "h3"))),
+ << "h2")
+ << BSON("_id" << 30 << "host"
+ << "h3"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
// set term to 1
@@ -5589,12 +6007,17 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantDryRunVoteWhenReplSetNameDoesNotMatch) {
getTopoCoord().updateTerm(1, now()));
// and make sure we voted in term 1
ReplSetRequestVotesArgs argsForRealVote;
- argsForRealVote.initialize(BSON("replSetRequestVotes"
- << 1 << "setName"
- << "rs0"
- << "term" << 1LL << "candidateIndex" << 0LL << "configVersion"
- << 1LL << "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ argsForRealVote.initialize(
+ BSON("replSetRequestVotes" << 1 << "setName"
+ << "rs0"
+ << "term"
+ << 1LL
+ << "candidateIndex"
+ << 0LL
+ << "configVersion"
+ << 1LL
+ << "lastCommittedOp"
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
ReplSetRequestVotesResponse responseForRealVote;
OpTime lastAppliedOpTime;
@@ -5608,8 +6031,14 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantDryRunVoteWhenReplSetNameDoesNotMatch) {
ReplSetRequestVotesArgs args;
args.initialize(BSON("replSetRequestVotes" << 1 << "setName"
<< "wrongName"
- << "dryRun" << true << "term" << 2LL
- << "candidateIndex" << 0LL << "configVersion" << 1LL
+ << "dryRun"
+ << true
+ << "term"
+ << 2LL
+ << "candidateIndex"
+ << 0LL
+ << "configVersion"
+ << 1LL
<< "lastCommittedOp"
<< BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
ReplSetRequestVotesResponse response;
@@ -5623,12 +6052,15 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantDryRunVoteWhenReplSetNameDoesNotMatch) {
TEST_F(TopoCoordTest, NodeDoesNotGrantDryRunVoteWhenConfigVersionDoesNotMatch) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
- << "h2") << BSON("_id" << 30 << "host"
- << "h3"))),
+ << "h2")
+ << BSON("_id" << 30 << "host"
+ << "h3"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
// set term to 1
@@ -5636,12 +6068,17 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantDryRunVoteWhenConfigVersionDoesNotMatch) {
getTopoCoord().updateTerm(1, now()));
// and make sure we voted in term 1
ReplSetRequestVotesArgs argsForRealVote;
- argsForRealVote.initialize(BSON("replSetRequestVotes"
- << 1 << "setName"
- << "rs0"
- << "term" << 1LL << "candidateIndex" << 0LL << "configVersion"
- << 1LL << "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ argsForRealVote.initialize(
+ BSON("replSetRequestVotes" << 1 << "setName"
+ << "rs0"
+ << "term"
+ << 1LL
+ << "candidateIndex"
+ << 0LL
+ << "configVersion"
+ << 1LL
+ << "lastCommittedOp"
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
ReplSetRequestVotesResponse responseForRealVote;
OpTime lastAppliedOpTime;
@@ -5655,8 +6092,14 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantDryRunVoteWhenConfigVersionDoesNotMatch) {
ReplSetRequestVotesArgs args;
args.initialize(BSON("replSetRequestVotes" << 1 << "setName"
<< "rs0"
- << "dryRun" << true << "term" << 2LL
- << "candidateIndex" << 1LL << "configVersion" << 0LL
+ << "dryRun"
+ << true
+ << "term"
+ << 2LL
+ << "candidateIndex"
+ << 1LL
+ << "configVersion"
+ << 0LL
<< "lastCommittedOp"
<< BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
ReplSetRequestVotesResponse response;
@@ -5670,12 +6113,15 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantDryRunVoteWhenConfigVersionDoesNotMatch) {
TEST_F(TopoCoordTest, NodeDoesNotGrantDryRunVoteWhenTermIsStale) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
- << "h2") << BSON("_id" << 30 << "host"
- << "h3"))),
+ << "h2")
+ << BSON("_id" << 30 << "host"
+ << "h3"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
// set term to 1
@@ -5683,12 +6129,17 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantDryRunVoteWhenTermIsStale) {
getTopoCoord().updateTerm(1, now()));
// and make sure we voted in term 1
ReplSetRequestVotesArgs argsForRealVote;
- argsForRealVote.initialize(BSON("replSetRequestVotes"
- << 1 << "setName"
- << "rs0"
- << "term" << 1LL << "candidateIndex" << 0LL << "configVersion"
- << 1LL << "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ argsForRealVote.initialize(
+ BSON("replSetRequestVotes" << 1 << "setName"
+ << "rs0"
+ << "term"
+ << 1LL
+ << "candidateIndex"
+ << 0LL
+ << "configVersion"
+ << 1LL
+ << "lastCommittedOp"
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
ReplSetRequestVotesResponse responseForRealVote;
OpTime lastAppliedOpTime;
@@ -5701,8 +6152,14 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantDryRunVoteWhenTermIsStale) {
ReplSetRequestVotesArgs args;
args.initialize(BSON("replSetRequestVotes" << 1 << "setName"
<< "rs0"
- << "dryRun" << true << "term" << 0LL
- << "candidateIndex" << 1LL << "configVersion" << 1LL
+ << "dryRun"
+ << true
+ << "term"
+ << 0LL
+ << "candidateIndex"
+ << 1LL
+ << "configVersion"
+ << 1LL
<< "lastCommittedOp"
<< BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
ReplSetRequestVotesResponse response;
@@ -5716,12 +6173,15 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantDryRunVoteWhenTermIsStale) {
TEST_F(TopoCoordTest, GrantDryRunVoteEvenWhenTermHasBeenSeen) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
- << "h2") << BSON("_id" << 30 << "host"
- << "h3"))),
+ << "h2")
+ << BSON("_id" << 30 << "host"
+ << "h3"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
// set term to 1
@@ -5729,12 +6189,17 @@ TEST_F(TopoCoordTest, GrantDryRunVoteEvenWhenTermHasBeenSeen) {
getTopoCoord().updateTerm(1, now()));
// and make sure we voted in term 1
ReplSetRequestVotesArgs argsForRealVote;
- argsForRealVote.initialize(BSON("replSetRequestVotes"
- << 1 << "setName"
- << "rs0"
- << "term" << 1LL << "candidateIndex" << 0LL << "configVersion"
- << 1LL << "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ argsForRealVote.initialize(
+ BSON("replSetRequestVotes" << 1 << "setName"
+ << "rs0"
+ << "term"
+ << 1LL
+ << "candidateIndex"
+ << 0LL
+ << "configVersion"
+ << 1LL
+ << "lastCommittedOp"
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
ReplSetRequestVotesResponse responseForRealVote;
OpTime lastAppliedOpTime;
@@ -5748,8 +6213,14 @@ TEST_F(TopoCoordTest, GrantDryRunVoteEvenWhenTermHasBeenSeen) {
ReplSetRequestVotesArgs args;
args.initialize(BSON("replSetRequestVotes" << 1 << "setName"
<< "rs0"
- << "dryRun" << true << "term" << 1LL
- << "candidateIndex" << 1LL << "configVersion" << 1LL
+ << "dryRun"
+ << true
+ << "term"
+ << 1LL
+ << "candidateIndex"
+ << 1LL
+ << "configVersion"
+ << 1LL
<< "lastCommittedOp"
<< BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
ReplSetRequestVotesResponse response;
@@ -5763,12 +6234,15 @@ TEST_F(TopoCoordTest, GrantDryRunVoteEvenWhenTermHasBeenSeen) {
TEST_F(TopoCoordTest, DoNotGrantDryRunVoteWhenOpTimeIsStale) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
- << "h2") << BSON("_id" << 30 << "host"
- << "h3"))),
+ << "h2")
+ << BSON("_id" << 30 << "host"
+ << "h3"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
// set term to 1
@@ -5776,12 +6250,17 @@ TEST_F(TopoCoordTest, DoNotGrantDryRunVoteWhenOpTimeIsStale) {
getTopoCoord().updateTerm(1, now()));
// and make sure we voted in term 1
ReplSetRequestVotesArgs argsForRealVote;
- argsForRealVote.initialize(BSON("replSetRequestVotes"
- << 1 << "setName"
- << "rs0"
- << "term" << 1LL << "candidateIndex" << 0LL << "configVersion"
- << 1LL << "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ argsForRealVote.initialize(
+ BSON("replSetRequestVotes" << 1 << "setName"
+ << "rs0"
+ << "term"
+ << 1LL
+ << "candidateIndex"
+ << 0LL
+ << "configVersion"
+ << 1LL
+ << "lastCommittedOp"
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
ReplSetRequestVotesResponse responseForRealVote;
OpTime lastAppliedOpTime;
@@ -5795,8 +6274,14 @@ TEST_F(TopoCoordTest, DoNotGrantDryRunVoteWhenOpTimeIsStale) {
ReplSetRequestVotesArgs args;
args.initialize(BSON("replSetRequestVotes" << 1 << "setName"
<< "rs0"
- << "dryRun" << true << "term" << 3LL
- << "candidateIndex" << 1LL << "configVersion" << 1LL
+ << "dryRun"
+ << true
+ << "term"
+ << 3LL
+ << "candidateIndex"
+ << 1LL
+ << "configVersion"
+ << 1LL
<< "lastCommittedOp"
<< BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
ReplSetRequestVotesResponse response;
@@ -5818,12 +6303,17 @@ TEST_F(TopoCoordTest, CSRSConfigServerRejectsPV0Config) {
auto configObj = BSON("_id"
<< "rs0"
- << "version" << 1 << "configsvr" << true << "members"
+ << "version"
+ << 1
+ << "configsvr"
+ << true
+ << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
- << "h2") << BSON("_id" << 30 << "host"
- << "h3")));
+ << "h2")
+ << BSON("_id" << 30 << "host"
+ << "h3")));
ReplicaSetConfig config;
ASSERT_OK(config.initialize(configObj, false));
ASSERT_EQ(ErrorCodes::BadValue, config.validate());
diff --git a/src/mongo/db/repl/topology_coordinator_impl_v1_test.cpp b/src/mongo/db/repl/topology_coordinator_impl_v1_test.cpp
index 826905a860a..9e04bf8942d 100644
--- a/src/mongo/db/repl/topology_coordinator_impl_v1_test.cpp
+++ b/src/mongo/db/repl/topology_coordinator_impl_v1_test.cpp
@@ -255,12 +255,15 @@ TEST_F(TopoCoordTest, NodeReturnsSecondaryWithMostRecentDataAsSyncSource) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
- << "h2") << BSON("_id" << 30 << "host"
- << "h3"))),
+ << "h2")
+ << BSON("_id" << 30 << "host"
+ << "h3"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
@@ -320,26 +323,41 @@ TEST_F(TopoCoordTest, NodeReturnsSecondaryWithMostRecentDataAsSyncSource) {
TEST_F(TopoCoordTest, NodeReturnsClosestValidSyncSourceAsSyncSource) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "hself")
<< BSON("_id" << 10 << "host"
<< "h1")
<< BSON("_id" << 20 << "host"
<< "h2"
- << "buildIndexes" << false << "priority" << 0)
+ << "buildIndexes"
+ << false
+ << "priority"
+ << 0)
<< BSON("_id" << 30 << "host"
<< "h3"
- << "hidden" << true << "priority" << 0 << "votes"
- << 0) << BSON("_id" << 40 << "host"
- << "h4"
- << "arbiterOnly" << true)
+ << "hidden"
+ << true
+ << "priority"
+ << 0
+ << "votes"
+ << 0)
+ << BSON("_id" << 40 << "host"
+ << "h4"
+ << "arbiterOnly"
+ << true)
<< BSON("_id" << 50 << "host"
<< "h5"
- << "slaveDelay" << 1 << "priority" << 0)
+ << "slaveDelay"
+ << 1
+ << "priority"
+ << 0)
<< BSON("_id" << 60 << "host"
- << "h6") << BSON("_id" << 70 << "host"
- << "hprimary"))),
+ << "h6")
+ << BSON("_id" << 70 << "host"
+ << "hprimary"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
@@ -471,13 +489,17 @@ TEST_F(TopoCoordTest, NodeReturnsClosestValidSyncSourceAsSyncSource) {
TEST_F(TopoCoordTest, ChooseOnlyPrimaryAsSyncSourceWhenChainingIsDisallowed) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "settings" << BSON("chainingAllowed" << false)
+ << "version"
+ << 1
+ << "settings"
+ << BSON("chainingAllowed" << false)
<< "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
- << "h2") << BSON("_id" << 30 << "host"
- << "h3"))),
+ << "h2")
+ << BSON("_id" << 30 << "host"
+ << "h3"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
@@ -523,12 +545,11 @@ TEST_F(TopoCoordTest, ChooseOnlyPrimaryAsSyncSourceWhenChainingIsDisallowed) {
}
TEST_F(TopoCoordTest, ChooseOnlyVotersAsSyncSourceWhenNodeIsAVoter) {
- updateConfig(fromjson(
- "{_id:'rs0', version:1, members:["
- "{_id:10, host:'hself'}, "
- "{_id:20, host:'h2', votes:0, priority:0}, "
- "{_id:30, host:'h3'} "
- "]}"),
+ updateConfig(fromjson("{_id:'rs0', version:1, members:["
+ "{_id:10, host:'hself'}, "
+ "{_id:20, host:'h2', votes:0, priority:0}, "
+ "{_id:30, host:'h3'} "
+ "]}"),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
@@ -561,12 +582,15 @@ TEST_F(TopoCoordTest, ChooseOnlyVotersAsSyncSourceWhenNodeIsAVoter) {
TEST_F(TopoCoordTest, ChooseNoSyncSourceWhenPrimary) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
- << "h2") << BSON("_id" << 30 << "host"
- << "h3"))),
+ << "h2")
+ << BSON("_id" << 30 << "host"
+ << "h3"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
@@ -608,12 +632,15 @@ TEST_F(TopoCoordTest, ChooseNoSyncSourceWhenPrimary) {
TEST_F(TopoCoordTest, ChooseRequestedSyncSourceOnlyTheFirstTimeAfterTheSyncSourceIsForciblySet) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
- << "h2") << BSON("_id" << 30 << "host"
- << "h3"))),
+ << "h2")
+ << BSON("_id" << 30 << "host"
+ << "h3"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
@@ -651,12 +678,15 @@ TEST_F(TopoCoordTest, ChooseRequestedSyncSourceOnlyTheFirstTimeAfterTheSyncSourc
TEST_F(TopoCoordTest, NodeDoesNotChooseBlacklistedSyncSourceUntilBlacklistingExpires) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
- << "h2") << BSON("_id" << 30 << "host"
- << "h3"))),
+ << "h2")
+ << BSON("_id" << 30 << "host"
+ << "h3"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
@@ -700,13 +730,17 @@ TEST_F(TopoCoordTest, NodeDoesNotChooseBlacklistedSyncSourceUntilBlacklistingExp
TEST_F(TopoCoordTest, ChooseNoSyncSourceWhenPrimaryIsBlacklistedAndChainingIsDisallowed) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "settings" << BSON("chainingAllowed" << false)
+ << "version"
+ << 1
+ << "settings"
+ << BSON("chainingAllowed" << false)
<< "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
- << "h2") << BSON("_id" << 30 << "host"
- << "h3"))),
+ << "h2")
+ << BSON("_id" << 30 << "host"
+ << "h3"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
@@ -751,12 +785,15 @@ TEST_F(TopoCoordTest, ChooseNoSyncSourceWhenPrimaryIsBlacklistedAndChainingIsDis
TEST_F(TopoCoordTest, NodeChangesToRecoveringWhenOnlyUnauthorizedNodesAreUp) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
- << "h2") << BSON("_id" << 30 << "host"
- << "h3"))),
+ << "h2")
+ << BSON("_id" << 30 << "host"
+ << "h3"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
@@ -815,18 +852,22 @@ TEST_F(TopoCoordTest, NodeChangesToRecoveringWhenOnlyUnauthorizedNodesAreUp) {
TEST_F(TopoCoordTest, NodeDoesNotActOnHeartbeatsWhenAbsentFromConfig) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "h1")
<< BSON("_id" << 20 << "host"
- << "h2") << BSON("_id" << 30 << "host"
- << "h3"))),
+ << "h2")
+ << BSON("_id" << 30 << "host"
+ << "h3"))),
-1);
ASSERT_NO_ACTION(heartbeatFromMember(HostAndPort("h2"),
"rs0",
MemberState::RS_SECONDARY,
OpTime(Timestamp(1, 0), 0),
- Milliseconds(300)).getAction());
+ Milliseconds(300))
+ .getAction());
}
TEST_F(TopoCoordTest, NodeReturnsNotSecondaryWhenSyncFromIsRunPriorToHavingAConfig) {
@@ -853,10 +894,13 @@ TEST_F(TopoCoordTest, NodeReturnsNotSecondaryWhenSyncFromIsRunAgainstArbiter) {
// Test trying to sync from another node when we are an arbiter
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "hself"
- << "arbiterOnly" << true)
+ << "arbiterOnly"
+ << true)
<< BSON("_id" << 1 << "host"
<< "h1"))),
0);
@@ -875,21 +919,29 @@ TEST_F(TopoCoordTest, NodeReturnsNotSecondaryWhenSyncFromIsRunAgainstPrimary) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "hself")
<< BSON("_id" << 1 << "host"
<< "h1"
- << "arbiterOnly" << true)
+ << "arbiterOnly"
+ << true)
<< BSON("_id" << 2 << "host"
<< "h2"
- << "priority" << 0 << "buildIndexes" << false)
+ << "priority"
+ << 0
+ << "buildIndexes"
+ << false)
<< BSON("_id" << 3 << "host"
- << "h3") << BSON("_id" << 4 << "host"
- << "h4")
+ << "h3")
+ << BSON("_id" << 4 << "host"
+ << "h4")
<< BSON("_id" << 5 << "host"
- << "h5") << BSON("_id" << 6 << "host"
- << "h6"))),
+ << "h5")
+ << BSON("_id" << 6 << "host"
+ << "h6"))),
0);
// Try to sync while PRIMARY
@@ -912,21 +964,29 @@ TEST_F(TopoCoordTest, NodeReturnsNodeNotFoundWhenSyncFromRequestsANodeNotInConfi
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "hself")
<< BSON("_id" << 1 << "host"
<< "h1"
- << "arbiterOnly" << true)
+ << "arbiterOnly"
+ << true)
<< BSON("_id" << 2 << "host"
<< "h2"
- << "priority" << 0 << "buildIndexes" << false)
+ << "priority"
+ << 0
+ << "buildIndexes"
+ << false)
<< BSON("_id" << 3 << "host"
- << "h3") << BSON("_id" << 4 << "host"
- << "h4")
+ << "h3")
+ << BSON("_id" << 4 << "host"
+ << "h4")
<< BSON("_id" << 5 << "host"
- << "h5") << BSON("_id" << 6 << "host"
- << "h6"))),
+ << "h5")
+ << BSON("_id" << 6 << "host"
+ << "h6"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
@@ -945,21 +1005,29 @@ TEST_F(TopoCoordTest, NodeReturnsInvalidOptionsWhenSyncFromRequestsSelf) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "hself")
<< BSON("_id" << 1 << "host"
<< "h1"
- << "arbiterOnly" << true)
+ << "arbiterOnly"
+ << true)
<< BSON("_id" << 2 << "host"
<< "h2"
- << "priority" << 0 << "buildIndexes" << false)
+ << "priority"
+ << 0
+ << "buildIndexes"
+ << false)
<< BSON("_id" << 3 << "host"
- << "h3") << BSON("_id" << 4 << "host"
- << "h4")
+ << "h3")
+ << BSON("_id" << 4 << "host"
+ << "h4")
<< BSON("_id" << 5 << "host"
- << "h5") << BSON("_id" << 6 << "host"
- << "h6"))),
+ << "h5")
+ << BSON("_id" << 6 << "host"
+ << "h6"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
@@ -978,21 +1046,29 @@ TEST_F(TopoCoordTest, NodeReturnsInvalidOptionsWhenSyncFromRequestsArbiter) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "hself")
<< BSON("_id" << 1 << "host"
<< "h1"
- << "arbiterOnly" << true)
+ << "arbiterOnly"
+ << true)
<< BSON("_id" << 2 << "host"
<< "h2"
- << "priority" << 0 << "buildIndexes" << false)
+ << "priority"
+ << 0
+ << "buildIndexes"
+ << false)
<< BSON("_id" << 3 << "host"
- << "h3") << BSON("_id" << 4 << "host"
- << "h4")
+ << "h3")
+ << BSON("_id" << 4 << "host"
+ << "h4")
<< BSON("_id" << 5 << "host"
- << "h5") << BSON("_id" << 6 << "host"
- << "h6"))),
+ << "h5")
+ << BSON("_id" << 6 << "host"
+ << "h6"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
@@ -1012,21 +1088,29 @@ TEST_F(TopoCoordTest, NodeReturnsInvalidOptionsWhenSyncFromRequestsAnIndexNonbui
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "hself")
<< BSON("_id" << 1 << "host"
<< "h1"
- << "arbiterOnly" << true)
+ << "arbiterOnly"
+ << true)
<< BSON("_id" << 2 << "host"
<< "h2"
- << "priority" << 0 << "buildIndexes" << false)
+ << "priority"
+ << 0
+ << "buildIndexes"
+ << false)
<< BSON("_id" << 3 << "host"
- << "h3") << BSON("_id" << 4 << "host"
- << "h4")
+ << "h3")
+ << BSON("_id" << 4 << "host"
+ << "h4")
<< BSON("_id" << 5 << "host"
- << "h5") << BSON("_id" << 6 << "host"
- << "h6"))),
+ << "h5")
+ << BSON("_id" << 6 << "host"
+ << "h6"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
@@ -1046,21 +1130,29 @@ TEST_F(TopoCoordTest, NodeReturnsHostUnreachableWhenSyncFromRequestsADownNode) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "hself")
<< BSON("_id" << 1 << "host"
<< "h1"
- << "arbiterOnly" << true)
+ << "arbiterOnly"
+ << true)
<< BSON("_id" << 2 << "host"
<< "h2"
- << "priority" << 0 << "buildIndexes" << false)
+ << "priority"
+ << 0
+ << "buildIndexes"
+ << false)
<< BSON("_id" << 3 << "host"
- << "h3") << BSON("_id" << 4 << "host"
- << "h4")
+ << "h3")
+ << BSON("_id" << 4 << "host"
+ << "h4")
<< BSON("_id" << 5 << "host"
- << "h5") << BSON("_id" << 6 << "host"
- << "h6"))),
+ << "h5")
+ << BSON("_id" << 6 << "host"
+ << "h6"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
@@ -1081,21 +1173,29 @@ TEST_F(TopoCoordTest, ChooseRequestedNodeWhenSyncFromRequestsAStaleNode) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "hself")
<< BSON("_id" << 1 << "host"
<< "h1"
- << "arbiterOnly" << true)
+ << "arbiterOnly"
+ << true)
<< BSON("_id" << 2 << "host"
<< "h2"
- << "priority" << 0 << "buildIndexes" << false)
+ << "priority"
+ << 0
+ << "buildIndexes"
+ << false)
<< BSON("_id" << 3 << "host"
- << "h3") << BSON("_id" << 4 << "host"
- << "h4")
+ << "h3")
+ << BSON("_id" << 4 << "host"
+ << "h4")
<< BSON("_id" << 5 << "host"
- << "h5") << BSON("_id" << 6 << "host"
- << "h6"))),
+ << "h5")
+ << BSON("_id" << 6 << "host"
+ << "h6"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
@@ -1120,21 +1220,29 @@ TEST_F(TopoCoordTest, ChooseRequestedNodeWhenSyncFromRequestsAValidNode) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "hself")
<< BSON("_id" << 1 << "host"
<< "h1"
- << "arbiterOnly" << true)
+ << "arbiterOnly"
+ << true)
<< BSON("_id" << 2 << "host"
<< "h2"
- << "priority" << 0 << "buildIndexes" << false)
+ << "priority"
+ << 0
+ << "buildIndexes"
+ << false)
<< BSON("_id" << 3 << "host"
- << "h3") << BSON("_id" << 4 << "host"
- << "h4")
+ << "h3")
+ << BSON("_id" << 4 << "host"
+ << "h4")
<< BSON("_id" << 5 << "host"
- << "h5") << BSON("_id" << 6 << "host"
- << "h6"))),
+ << "h5")
+ << BSON("_id" << 6 << "host"
+ << "h6"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
@@ -1160,21 +1268,29 @@ TEST_F(TopoCoordTest,
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "hself")
<< BSON("_id" << 1 << "host"
<< "h1"
- << "arbiterOnly" << true)
+ << "arbiterOnly"
+ << true)
<< BSON("_id" << 2 << "host"
<< "h2"
- << "priority" << 0 << "buildIndexes" << false)
+ << "priority"
+ << 0
+ << "buildIndexes"
+ << false)
<< BSON("_id" << 3 << "host"
- << "h3") << BSON("_id" << 4 << "host"
- << "h4")
+ << "h3")
+ << BSON("_id" << 4 << "host"
+ << "h4")
<< BSON("_id" << 5 << "host"
- << "h5") << BSON("_id" << 6 << "host"
- << "h6"))),
+ << "h5")
+ << BSON("_id" << 6 << "host"
+ << "h6"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
@@ -1199,21 +1315,29 @@ TEST_F(TopoCoordTest, NodeReturnsUnauthorizedWhenSyncFromRequestsANodeWeAreNotAu
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "hself")
<< BSON("_id" << 1 << "host"
<< "h1"
- << "arbiterOnly" << true)
+ << "arbiterOnly"
+ << true)
<< BSON("_id" << 2 << "host"
<< "h2"
- << "priority" << 0 << "buildIndexes" << false)
+ << "priority"
+ << 0
+ << "buildIndexes"
+ << false)
<< BSON("_id" << 3 << "host"
- << "h3") << BSON("_id" << 4 << "host"
- << "h4")
+ << "h3")
+ << BSON("_id" << 4 << "host"
+ << "h4")
<< BSON("_id" << 5 << "host"
- << "h5") << BSON("_id" << 6 << "host"
- << "h6"))),
+ << "h5")
+ << BSON("_id" << 6 << "host"
+ << "h6"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
@@ -1234,12 +1358,11 @@ TEST_F(TopoCoordTest, NodeReturnsInvalidOptionsWhenAskedToSyncFromANonVoterAsAVo
BSONObjBuilder response;
// Test trying to sync from another node
- updateConfig(fromjson(
- "{_id:'rs0', version:1, members:["
- "{_id:0, host:'self'},"
- "{_id:1, host:'h1'},"
- "{_id:2, host:'h2', votes:0, priority:0}"
- "]}"),
+ updateConfig(fromjson("{_id:'rs0', version:1, members:["
+ "{_id:0, host:'self'},"
+ "{_id:1, host:'h1'},"
+ "{_id:2, host:'h2', votes:0, priority:0}"
+ "]}"),
0);
getTopoCoord().prepareSyncFromResponse(HostAndPort("h2"), ourOpTime, &response, &result);
@@ -1258,21 +1381,29 @@ TEST_F(TopoCoordTest,
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "hself")
<< BSON("_id" << 1 << "host"
<< "h1"
- << "arbiterOnly" << true)
+ << "arbiterOnly"
+ << true)
<< BSON("_id" << 2 << "host"
<< "h2"
- << "priority" << 0 << "buildIndexes" << false)
+ << "priority"
+ << 0
+ << "buildIndexes"
+ << false)
<< BSON("_id" << 3 << "host"
- << "h3") << BSON("_id" << 4 << "host"
- << "h4")
+ << "h3")
+ << BSON("_id" << 4 << "host"
+ << "h4")
<< BSON("_id" << 5 << "host"
- << "h5") << BSON("_id" << 6 << "host"
- << "h6"))),
+ << "h5")
+ << BSON("_id" << 6 << "host"
+ << "h6"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
@@ -1329,17 +1460,17 @@ TEST_F(TopoCoordTest, ReplSetGetStatus) {
hb.setAppliedOpTime(oplogProgress);
StatusWith<ReplSetHeartbeatResponse> hbResponseGood = StatusWith<ReplSetHeartbeatResponse>(hb);
- updateConfig(
- BSON("_id" << setName << "version" << 1 << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "test0:1234")
- << BSON("_id" << 1 << "host"
- << "test1:1234") << BSON("_id" << 2 << "host"
- << "test2:1234")
- << BSON("_id" << 3 << "host"
- << "test3:1234"))),
- 3,
- startupTime + Milliseconds(1));
+ updateConfig(BSON("_id" << setName << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "test0:1234")
+ << BSON("_id" << 1 << "host"
+ << "test1:1234")
+ << BSON("_id" << 2 << "host"
+ << "test2:1234")
+ << BSON("_id" << 3 << "host"
+ << "test3:1234"))),
+ 3,
+ startupTime + Milliseconds(1));
// Now that the replica set is setup, put the members into the states we want them in.
HostAndPort member = HostAndPort("test0:1234");
@@ -1468,15 +1599,15 @@ TEST_F(TopoCoordTest, NodeReturnsInvalidReplicaSetConfigInResponseToGetStatusWhe
OpTime oplogProgress(Timestamp(3, 4), 0);
std::string setName = "mySet";
- updateConfig(
- BSON("_id" << setName << "version" << 1 << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "test0:1234")
- << BSON("_id" << 1 << "host"
- << "test1:1234") << BSON("_id" << 2 << "host"
- << "test2:1234"))),
- -1, // This one is not part of the replica set.
- startupTime + Milliseconds(1));
+ updateConfig(BSON("_id" << setName << "version" << 1 << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "test0:1234")
+ << BSON("_id" << 1 << "host"
+ << "test1:1234")
+ << BSON("_id" << 2 << "host"
+ << "test2:1234"))),
+ -1, // This one is not part of the replica set.
+ startupTime + Milliseconds(1));
BSONObjBuilder statusBuilder;
Status resultStatus(ErrorCodes::InternalError, "prepareStatusResponse didn't set result");
@@ -1497,11 +1628,10 @@ TEST_F(TopoCoordTest, NodeReturnsInvalidReplicaSetConfigInResponseToGetStatusWhe
TEST_F(TopoCoordTest, HeartbeatFrequencyShouldBeHalfElectionTimeoutWhenArbiter) {
// This tests that arbiters issue heartbeats at electionTimeout/2 frequencies
TopoCoordTest::setUp();
- updateConfig(fromjson(
- "{_id:'mySet', version:1, protocolVersion:1, members:["
- "{_id:1, host:'node1:12345', arbiterOnly:true}, "
- "{_id:2, host:'node2:12345'}], "
- "settings:{heartbeatIntervalMillis:10, electionTimeoutMillis:5000}}"),
+ updateConfig(fromjson("{_id:'mySet', version:1, protocolVersion:1, members:["
+ "{_id:1, host:'node1:12345', arbiterOnly:true}, "
+ "{_id:2, host:'node2:12345'}], "
+ "settings:{heartbeatIntervalMillis:10, electionTimeoutMillis:5000}}"),
0);
HostAndPort target("host2", 27017);
Date_t requestDate = now();
@@ -1523,12 +1653,16 @@ public:
TopoCoordTest::setUp();
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
- << "h2") << BSON("_id" << 30 << "host"
- << "h3")) << "settings"
+ << "h2")
+ << BSON("_id" << 30 << "host"
+ << "h3"))
+ << "settings"
<< BSON("protocolVersion" << 1)),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
@@ -1558,9 +1692,8 @@ TEST_F(PrepareHeartbeatResponseV1Test,
ASSERT(result.reason().find("repl set names do not match")) << "Actual string was \""
<< result.reason() << '"';
ASSERT_EQUALS(1,
- countLogLinesContaining(
- "replSet set names do not match, ours: rs0; remote "
- "node's: rs1"));
+ countLogLinesContaining("replSet set names do not match, ours: rs0; remote "
+ "node's: rs1"));
// only protocolVersion should be set in this failure case
ASSERT_EQUALS("", response.getReplicaSetName());
}
@@ -1570,11 +1703,15 @@ TEST_F(PrepareHeartbeatResponseV1Test,
// reconfig self out of set
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 3 << "members" << BSON_ARRAY(BSON("_id" << 20 << "host"
- << "h2")
- << BSON("_id" << 30 << "host"
- << "h3"))
- << "settings" << BSON("protocolVersion" << 1)),
+ << "version"
+ << 3
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 20 << "host"
+ << "h2")
+ << BSON("_id" << 30 << "host"
+ << "h3"))
+ << "settings"
+ << BSON("protocolVersion" << 1)),
-1);
ReplSetHeartbeatArgsV1 args;
args.setSetName("rs0");
@@ -1764,8 +1901,11 @@ TEST_F(TopoCoordTest, BecomeCandidateWhenBecomingSecondaryInSingleNodeSet) {
ASSERT_EQUALS(MemberState::RS_STARTUP, getTopoCoord().getMemberState().s);
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members" << BSON_ARRAY(BSON("_id" << 1 << "host"
- << "hself"))),
+ << "version"
+ << 1
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "hself"))),
0);
ASSERT_EQUALS(MemberState::RS_STARTUP2, getTopoCoord().getMemberState().s);
@@ -1782,10 +1922,13 @@ TEST_F(TopoCoordTest, BecomeCandidateWhenReconfigToBeElectableInSingleNodeSet) {
ReplicaSetConfig cfg;
cfg.initialize(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "hself"
- << "priority" << 0))));
+ << "priority"
+ << 0))));
getTopoCoord().updateConfig(cfg, 0, now()++, OpTime());
ASSERT_EQUALS(MemberState::RS_STARTUP2, getTopoCoord().getMemberState().s);
@@ -1798,8 +1941,11 @@ TEST_F(TopoCoordTest, BecomeCandidateWhenReconfigToBeElectableInSingleNodeSet) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members" << BSON_ARRAY(BSON("_id" << 1 << "host"
- << "hself"))),
+ << "version"
+ << 1
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 1 << "host"
+ << "hself"))),
0);
ASSERT_TRUE(TopologyCoordinator::Role::candidate == getTopoCoord().getRole());
}
@@ -1810,10 +1956,13 @@ TEST_F(TopoCoordTest, NodeDoesNotBecomeCandidateWhenBecomingSecondaryInSingleNod
ReplicaSetConfig cfg;
cfg.initialize(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "hself"
- << "priority" << 0))));
+ << "priority"
+ << 0))));
getTopoCoord().updateConfig(cfg, 0, now()++, OpTime());
ASSERT_EQUALS(MemberState::RS_STARTUP2, getTopoCoord().getMemberState().s);
@@ -1831,7 +1980,9 @@ TEST_F(TopoCoordTest, NodeTransitionsFromRemovedToStartup2WhenAddedToConfig) {
// config to be absent from the set
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "host2:27017")
<< BSON("_id" << 2 << "host"
@@ -1842,16 +1993,18 @@ TEST_F(TopoCoordTest, NodeTransitionsFromRemovedToStartup2WhenAddedToConfig) {
ASSERT_EQUALS(MemberState::RS_REMOVED, getTopoCoord().getMemberState().s);
// reconfig to add to set
- updateConfig(
- BSON("_id"
- << "rs0"
- << "version" << 2 << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "host1:27017")
- << BSON("_id" << 1 << "host"
- << "host2:27017") << BSON("_id" << 2 << "host"
- << "host3:27017"))),
- 0);
+ updateConfig(BSON("_id"
+ << "rs0"
+ << "version"
+ << 2
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "host1:27017")
+ << BSON("_id" << 1 << "host"
+ << "host2:27017")
+ << BSON("_id" << 2 << "host"
+ << "host3:27017"))),
+ 0);
// having been added to the config, we should no longer be REMOVED and should enter STARTUP2
ASSERT_TRUE(TopologyCoordinator::Role::follower == getTopoCoord().getRole());
ASSERT_EQUALS(MemberState::RS_STARTUP2, getTopoCoord().getMemberState().s);
@@ -1860,23 +2013,27 @@ TEST_F(TopoCoordTest, NodeTransitionsFromRemovedToStartup2WhenAddedToConfig) {
TEST_F(TopoCoordTest, NodeTransitionsToRemovedWhenRemovedFromConfig) {
ASSERT_TRUE(TopologyCoordinator::Role::follower == getTopoCoord().getRole());
ASSERT_EQUALS(MemberState::RS_STARTUP, getTopoCoord().getMemberState().s);
- updateConfig(
- BSON("_id"
- << "rs0"
- << "version" << 1 << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "host1:27017")
- << BSON("_id" << 1 << "host"
- << "host2:27017") << BSON("_id" << 2 << "host"
- << "host3:27017"))),
- 0);
+ updateConfig(BSON("_id"
+ << "rs0"
+ << "version"
+ << 1
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "host1:27017")
+ << BSON("_id" << 1 << "host"
+ << "host2:27017")
+ << BSON("_id" << 2 << "host"
+ << "host3:27017"))),
+ 0);
ASSERT_TRUE(TopologyCoordinator::Role::follower == getTopoCoord().getRole());
ASSERT_EQUALS(MemberState::RS_STARTUP2, getTopoCoord().getMemberState().s);
// reconfig to remove self
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "host2:27017")
<< BSON("_id" << 2 << "host"
@@ -1892,8 +2049,11 @@ TEST_F(TopoCoordTest, NodeTransitionsToRemovedWhenRemovedFromConfigEvenWhenPrima
ASSERT_EQUALS(MemberState::RS_STARTUP, getTopoCoord().getMemberState().s);
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members" << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "host1:27017"))),
+ << "version"
+ << 1
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "host1:27017"))),
0);
ASSERT_FALSE(TopologyCoordinator::Role::candidate == getTopoCoord().getRole());
ASSERT_EQUALS(MemberState::RS_STARTUP2, getTopoCoord().getMemberState().s);
@@ -1908,7 +2068,9 @@ TEST_F(TopoCoordTest, NodeTransitionsToRemovedWhenRemovedFromConfigEvenWhenPrima
// reconfig to remove self
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 2 << "members"
+ << "version"
+ << 2
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "host2:27017")
<< BSON("_id" << 2 << "host"
@@ -1924,8 +2086,11 @@ TEST_F(TopoCoordTest, NodeTransitionsToSecondaryWhenReconfiggingToBeUnelectable)
ASSERT_EQUALS(MemberState::RS_STARTUP, getTopoCoord().getMemberState().s);
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members" << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "host1:27017"))),
+ << "version"
+ << 1
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "host1:27017"))),
0);
ASSERT_FALSE(TopologyCoordinator::Role::candidate == getTopoCoord().getRole());
ASSERT_EQUALS(MemberState::RS_STARTUP2, getTopoCoord().getMemberState().s);
@@ -1938,17 +2103,20 @@ TEST_F(TopoCoordTest, NodeTransitionsToSecondaryWhenReconfiggingToBeUnelectable)
ASSERT_EQUALS(MemberState::RS_PRIMARY, getTopoCoord().getMemberState().s);
// now lose primary due to loss of electability
- updateConfig(
- BSON("_id"
- << "rs0"
- << "version" << 2 << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "host1:27017"
- << "priority" << 0)
- << BSON("_id" << 1 << "host"
- << "host2:27017") << BSON("_id" << 2 << "host"
- << "host3:27017"))),
- 0);
+ updateConfig(BSON("_id"
+ << "rs0"
+ << "version"
+ << 2
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "host1:27017"
+ << "priority"
+ << 0)
+ << BSON("_id" << 1 << "host"
+ << "host2:27017")
+ << BSON("_id" << 2 << "host"
+ << "host3:27017"))),
+ 0);
ASSERT_TRUE(TopologyCoordinator::Role::follower == getTopoCoord().getRole());
ASSERT_EQUALS(MemberState::RS_SECONDARY, getTopoCoord().getMemberState().s);
}
@@ -1958,8 +2126,11 @@ TEST_F(TopoCoordTest, NodeMaintainsPrimaryStateAcrossReconfigIfNodeRemainsElecta
ASSERT_EQUALS(MemberState::RS_STARTUP, getTopoCoord().getMemberState().s);
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members" << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "host1:27017"))),
+ << "version"
+ << 1
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "host1:27017"))),
0);
ASSERT_FALSE(TopologyCoordinator::Role::candidate == getTopoCoord().getRole());
@@ -1974,38 +2145,45 @@ TEST_F(TopoCoordTest, NodeMaintainsPrimaryStateAcrossReconfigIfNodeRemainsElecta
// Now reconfig in ways that leave us electable and ensure we are still the primary.
// Add hosts
- updateConfig(
- BSON("_id"
- << "rs0"
- << "version" << 2 << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "host1:27017")
- << BSON("_id" << 1 << "host"
- << "host2:27017") << BSON("_id" << 2 << "host"
- << "host3:27017"))),
- 0,
- Date_t::fromMillisSinceEpoch(-1),
- OpTime(Timestamp(10, 0), 0));
+ updateConfig(BSON("_id"
+ << "rs0"
+ << "version"
+ << 2
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "host1:27017")
+ << BSON("_id" << 1 << "host"
+ << "host2:27017")
+ << BSON("_id" << 2 << "host"
+ << "host3:27017"))),
+ 0,
+ Date_t::fromMillisSinceEpoch(-1),
+ OpTime(Timestamp(10, 0), 0));
ASSERT_TRUE(TopologyCoordinator::Role::leader == getTopoCoord().getRole());
ASSERT_EQUALS(MemberState::RS_PRIMARY, getTopoCoord().getMemberState().s);
// Change priorities and tags
- updateConfig(
- BSON("_id"
- << "rs0"
- << "version" << 2 << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "host1:27017"
- << "priority" << 10)
- << BSON("_id" << 1 << "host"
- << "host2:27017"
- << "priority" << 5 << "tags" << BSON("dc"
- << "NA"
- << "rack"
- << "rack1")))),
- 0,
- Date_t::fromMillisSinceEpoch(-1),
- OpTime(Timestamp(10, 0), 0));
+ updateConfig(BSON("_id"
+ << "rs0"
+ << "version"
+ << 2
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "host1:27017"
+ << "priority"
+ << 10)
+ << BSON("_id" << 1 << "host"
+ << "host2:27017"
+ << "priority"
+ << 5
+ << "tags"
+ << BSON("dc"
+ << "NA"
+ << "rack"
+ << "rack1")))),
+ 0,
+ Date_t::fromMillisSinceEpoch(-1),
+ OpTime(Timestamp(10, 0), 0));
ASSERT_TRUE(TopologyCoordinator::Role::leader == getTopoCoord().getRole());
ASSERT_EQUALS(MemberState::RS_PRIMARY, getTopoCoord().getMemberState().s);
}
@@ -2013,7 +2191,9 @@ TEST_F(TopoCoordTest, NodeMaintainsPrimaryStateAcrossReconfigIfNodeRemainsElecta
TEST_F(TopoCoordTest, NodeMaintainsSecondaryStateAcrossReconfig) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 1 << "host"
<< "host1:27017")
<< BSON("_id" << 2 << "host"
@@ -2025,16 +2205,18 @@ TEST_F(TopoCoordTest, NodeMaintainsSecondaryStateAcrossReconfig) {
ASSERT_EQUALS(MemberState::RS_SECONDARY, getTopoCoord().getMemberState().s);
// reconfig and stay secondary
- updateConfig(
- BSON("_id"
- << "rs0"
- << "version" << 2 << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "host1:27017")
- << BSON("_id" << 1 << "host"
- << "host2:27017") << BSON("_id" << 2 << "host"
- << "host3:27017"))),
- 0);
+ updateConfig(BSON("_id"
+ << "rs0"
+ << "version"
+ << 2
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "host1:27017")
+ << BSON("_id" << 1 << "host"
+ << "host2:27017")
+ << BSON("_id" << 2 << "host"
+ << "host3:27017"))),
+ 0);
ASSERT_TRUE(TopologyCoordinator::Role::follower == getTopoCoord().getRole());
ASSERT_EQUALS(MemberState::RS_SECONDARY, getTopoCoord().getMemberState().s);
}
@@ -2042,12 +2224,15 @@ TEST_F(TopoCoordTest, NodeMaintainsSecondaryStateAcrossReconfig) {
TEST_F(TopoCoordTest, ShouldNotStandForElectionWhileAwareOfPrimary) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
- << "h2") << BSON("_id" << 30 << "host"
- << "h3"))),
+ << "h2")
+ << BSON("_id" << 30 << "host"
+ << "h3"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
@@ -2059,12 +2244,15 @@ TEST_F(TopoCoordTest, ShouldNotStandForElectionWhileAwareOfPrimary) {
TEST_F(TopoCoordTest, ShouldStandForElectionDespiteNotCloseEnoughToLastOptime) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
- << "h2") << BSON("_id" << 30 << "host"
- << "h3"))),
+ << "h2")
+ << BSON("_id" << 30 << "host"
+ << "h3"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
@@ -2076,12 +2264,15 @@ TEST_F(TopoCoordTest, ShouldStandForElectionDespiteNotCloseEnoughToLastOptime) {
TEST_F(TopoCoordTest, VoteForMyselfFailsWhileNotCandidate) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
- << "h2") << BSON("_id" << 30 << "host"
- << "h3"))),
+ << "h2")
+ << BSON("_id" << 30 << "host"
+ << "h3"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
ASSERT_FALSE(getTopoCoord().voteForMyself(now()++));
@@ -2090,13 +2281,17 @@ TEST_F(TopoCoordTest, VoteForMyselfFailsWhileNotCandidate) {
TEST_F(TopoCoordTest, NodeReturnsArbiterWhenGetMemberStateRunsAgainstArbiter) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself"
- << "arbiterOnly" << true)
+ << "arbiterOnly"
+ << true)
<< BSON("_id" << 20 << "host"
- << "h2") << BSON("_id" << 30 << "host"
- << "h3"))),
+ << "h2")
+ << BSON("_id" << 30 << "host"
+ << "h3"))),
0);
ASSERT_EQUALS(MemberState::RS_ARBITER, getTopoCoord().getMemberState().s);
}
@@ -2111,20 +2306,28 @@ TEST_F(TopoCoordTest, ShouldNotStandForElectionWhileRemovedFromTheConfig) {
TEST_F(TopoCoordTest, NodeDoesNotGrantVotesToTwoDifferentNodesInTheSameTerm) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
- << "h2") << BSON("_id" << 30 << "host"
- << "h3"))),
+ << "h2")
+ << BSON("_id" << 30 << "host"
+ << "h3"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
ReplSetRequestVotesArgs args;
args.initialize(BSON("replSetRequestVotes" << 1 << "setName"
<< "rs0"
- << "term" << 1LL << "candidateIndex" << 0LL
- << "configVersion" << 1LL << "lastCommittedOp"
+ << "term"
+ << 1LL
+ << "candidateIndex"
+ << 0LL
+ << "configVersion"
+ << 1LL
+ << "lastCommittedOp"
<< BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
ReplSetRequestVotesResponse response;
OpTime lastAppliedOpTime;
@@ -2134,11 +2337,17 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantVotesToTwoDifferentNodesInTheSameTerm) {
ASSERT_TRUE(response.getVoteGranted());
ReplSetRequestVotesArgs args2;
- args2.initialize(BSON("replSetRequestVotes"
- << 1 << "setName"
- << "rs0"
- << "term" << 1LL << "candidateIndex" << 1LL << "configVersion" << 1LL
- << "lastCommittedOp" << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ args2.initialize(
+ BSON("replSetRequestVotes" << 1 << "setName"
+ << "rs0"
+ << "term"
+ << 1LL
+ << "candidateIndex"
+ << 1LL
+ << "configVersion"
+ << 1LL
+ << "lastCommittedOp"
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
ReplSetRequestVotesResponse response2;
// different candidate same term, should be a problem
@@ -2150,12 +2359,15 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantVotesToTwoDifferentNodesInTheSameTerm) {
TEST_F(TopoCoordTest, DryRunVoteRequestShouldNotPreventSubsequentDryRunsForThatTerm) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
- << "h2") << BSON("_id" << 30 << "host"
- << "h3"))),
+ << "h2")
+ << BSON("_id" << 30 << "host"
+ << "h3"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
@@ -2163,8 +2375,14 @@ TEST_F(TopoCoordTest, DryRunVoteRequestShouldNotPreventSubsequentDryRunsForThatT
ReplSetRequestVotesArgs args;
args.initialize(BSON("replSetRequestVotes" << 1 << "setName"
<< "rs0"
- << "dryRun" << true << "term" << 1LL
- << "candidateIndex" << 0LL << "configVersion" << 1LL
+ << "dryRun"
+ << true
+ << "term"
+ << 1LL
+ << "candidateIndex"
+ << 0LL
+ << "configVersion"
+ << 1LL
<< "lastCommittedOp"
<< BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
ReplSetRequestVotesResponse response;
@@ -2176,12 +2394,19 @@ TEST_F(TopoCoordTest, DryRunVoteRequestShouldNotPreventSubsequentDryRunsForThatT
// second dry run fine
ReplSetRequestVotesArgs args2;
- args2.initialize(BSON("replSetRequestVotes"
- << 1 << "setName"
- << "rs0"
- << "dryRun" << true << "term" << 1LL << "candidateIndex" << 0LL
- << "configVersion" << 1LL << "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ args2.initialize(
+ BSON("replSetRequestVotes" << 1 << "setName"
+ << "rs0"
+ << "dryRun"
+ << true
+ << "term"
+ << 1LL
+ << "candidateIndex"
+ << 0LL
+ << "configVersion"
+ << 1LL
+ << "lastCommittedOp"
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
ReplSetRequestVotesResponse response2;
getTopoCoord().processReplSetRequestVotes(args2, &response2, lastAppliedOpTime);
@@ -2190,12 +2415,19 @@ TEST_F(TopoCoordTest, DryRunVoteRequestShouldNotPreventSubsequentDryRunsForThatT
// real request fine
ReplSetRequestVotesArgs args3;
- args3.initialize(BSON("replSetRequestVotes"
- << 1 << "setName"
- << "rs0"
- << "dryRun" << false << "term" << 1LL << "candidateIndex" << 0LL
- << "configVersion" << 1LL << "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ args3.initialize(
+ BSON("replSetRequestVotes" << 1 << "setName"
+ << "rs0"
+ << "dryRun"
+ << false
+ << "term"
+ << 1LL
+ << "candidateIndex"
+ << 0LL
+ << "configVersion"
+ << 1LL
+ << "lastCommittedOp"
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
ReplSetRequestVotesResponse response3;
getTopoCoord().processReplSetRequestVotes(args3, &response3, lastAppliedOpTime);
@@ -2204,12 +2436,19 @@ TEST_F(TopoCoordTest, DryRunVoteRequestShouldNotPreventSubsequentDryRunsForThatT
// dry post real, fails
ReplSetRequestVotesArgs args4;
- args4.initialize(BSON("replSetRequestVotes"
- << 1 << "setName"
- << "rs0"
- << "dryRun" << false << "term" << 1LL << "candidateIndex" << 0LL
- << "configVersion" << 1LL << "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ args4.initialize(
+ BSON("replSetRequestVotes" << 1 << "setName"
+ << "rs0"
+ << "dryRun"
+ << false
+ << "term"
+ << 1LL
+ << "candidateIndex"
+ << 0LL
+ << "configVersion"
+ << 1LL
+ << "lastCommittedOp"
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
ReplSetRequestVotesResponse response4;
getTopoCoord().processReplSetRequestVotes(args4, &response4, lastAppliedOpTime);
@@ -2220,12 +2459,15 @@ TEST_F(TopoCoordTest, DryRunVoteRequestShouldNotPreventSubsequentDryRunsForThatT
TEST_F(TopoCoordTest, VoteRequestShouldNotPreventDryRunsForThatTerm) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
- << "h2") << BSON("_id" << 30 << "host"
- << "h3"))),
+ << "h2")
+ << BSON("_id" << 30 << "host"
+ << "h3"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
@@ -2233,8 +2475,14 @@ TEST_F(TopoCoordTest, VoteRequestShouldNotPreventDryRunsForThatTerm) {
ReplSetRequestVotesArgs args;
args.initialize(BSON("replSetRequestVotes" << 1 << "setName"
<< "rs0"
- << "dryRun" << false << "term" << 1LL
- << "candidateIndex" << 0LL << "configVersion" << 1LL
+ << "dryRun"
+ << false
+ << "term"
+ << 1LL
+ << "candidateIndex"
+ << 0LL
+ << "configVersion"
+ << 1LL
<< "lastCommittedOp"
<< BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
ReplSetRequestVotesResponse response;
@@ -2246,12 +2494,19 @@ TEST_F(TopoCoordTest, VoteRequestShouldNotPreventDryRunsForThatTerm) {
// dry post real, fails
ReplSetRequestVotesArgs args2;
- args2.initialize(BSON("replSetRequestVotes"
- << 1 << "setName"
- << "rs0"
- << "dryRun" << false << "term" << 1LL << "candidateIndex" << 0LL
- << "configVersion" << 1LL << "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ args2.initialize(
+ BSON("replSetRequestVotes" << 1 << "setName"
+ << "rs0"
+ << "dryRun"
+ << false
+ << "term"
+ << 1LL
+ << "candidateIndex"
+ << 0LL
+ << "configVersion"
+ << 1LL
+ << "lastCommittedOp"
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
ReplSetRequestVotesResponse response2;
getTopoCoord().processReplSetRequestVotes(args2, &response2, lastAppliedOpTime);
@@ -2262,12 +2517,15 @@ TEST_F(TopoCoordTest, VoteRequestShouldNotPreventDryRunsForThatTerm) {
TEST_F(TopoCoordTest, NodeDoesNotGrantVoteWhenReplSetNameDoesNotMatch) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
- << "h2") << BSON("_id" << 30 << "host"
- << "h3"))),
+ << "h2")
+ << BSON("_id" << 30 << "host"
+ << "h3"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
@@ -2275,8 +2533,13 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantVoteWhenReplSetNameDoesNotMatch) {
ReplSetRequestVotesArgs args;
args.initialize(BSON("replSetRequestVotes" << 1 << "setName"
<< "wrongName"
- << "term" << 1LL << "candidateIndex" << 0LL
- << "configVersion" << 1LL << "lastCommittedOp"
+ << "term"
+ << 1LL
+ << "candidateIndex"
+ << 0LL
+ << "configVersion"
+ << 1LL
+ << "lastCommittedOp"
<< BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
ReplSetRequestVotesResponse response;
OpTime lastAppliedOpTime;
@@ -2289,12 +2552,15 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantVoteWhenReplSetNameDoesNotMatch) {
TEST_F(TopoCoordTest, NodeDoesNotGrantVoteWhenConfigVersionDoesNotMatch) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
- << "h2") << BSON("_id" << 30 << "host"
- << "h3"))),
+ << "h2")
+ << BSON("_id" << 30 << "host"
+ << "h3"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
@@ -2302,8 +2568,13 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantVoteWhenConfigVersionDoesNotMatch) {
ReplSetRequestVotesArgs args;
args.initialize(BSON("replSetRequestVotes" << 1 << "setName"
<< "rs0"
- << "term" << 1LL << "candidateIndex" << 1LL
- << "configVersion" << 0LL << "lastCommittedOp"
+ << "term"
+ << 1LL
+ << "candidateIndex"
+ << 1LL
+ << "configVersion"
+ << 0LL
+ << "lastCommittedOp"
<< BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
ReplSetRequestVotesResponse response;
OpTime lastAppliedOpTime;
@@ -2316,12 +2587,15 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantVoteWhenConfigVersionDoesNotMatch) {
TEST_F(TopoCoordTest, NodeDoesNotGrantVoteWhenTermIsStale) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
- << "h2") << BSON("_id" << 30 << "host"
- << "h3"))),
+ << "h2")
+ << BSON("_id" << 30 << "host"
+ << "h3"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
@@ -2333,8 +2607,13 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantVoteWhenTermIsStale) {
ReplSetRequestVotesArgs args;
args.initialize(BSON("replSetRequestVotes" << 1 << "setName"
<< "rs0"
- << "term" << 1LL << "candidateIndex" << 1LL
- << "configVersion" << 1LL << "lastCommittedOp"
+ << "term"
+ << 1LL
+ << "candidateIndex"
+ << 1LL
+ << "configVersion"
+ << 1LL
+ << "lastCommittedOp"
<< BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
ReplSetRequestVotesResponse response;
OpTime lastAppliedOpTime;
@@ -2348,12 +2627,15 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantVoteWhenTermIsStale) {
TEST_F(TopoCoordTest, NodeDoesNotGrantVoteWhenOpTimeIsStale) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
- << "h2") << BSON("_id" << 30 << "host"
- << "h3"))),
+ << "h2")
+ << BSON("_id" << 30 << "host"
+ << "h3"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
@@ -2362,8 +2644,13 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantVoteWhenOpTimeIsStale) {
ReplSetRequestVotesArgs args;
args.initialize(BSON("replSetRequestVotes" << 1 << "setName"
<< "rs0"
- << "term" << 3LL << "candidateIndex" << 1LL
- << "configVersion" << 1LL << "lastCommittedOp"
+ << "term"
+ << 3LL
+ << "candidateIndex"
+ << 1LL
+ << "configVersion"
+ << 1LL
+ << "lastCommittedOp"
<< BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
ReplSetRequestVotesResponse response;
OpTime lastAppliedOpTime2 = {Timestamp(20, 0), 0};
@@ -2376,12 +2663,15 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantVoteWhenOpTimeIsStale) {
TEST_F(TopoCoordTest, NodeDoesNotGrantDryRunVoteWhenReplSetNameDoesNotMatch) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
- << "h2") << BSON("_id" << 30 << "host"
- << "h3"))),
+ << "h2")
+ << BSON("_id" << 30 << "host"
+ << "h3"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
// set term to 1
@@ -2389,12 +2679,17 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantDryRunVoteWhenReplSetNameDoesNotMatch) {
getTopoCoord().updateTerm(1, now()));
// and make sure we voted in term 1
ReplSetRequestVotesArgs argsForRealVote;
- argsForRealVote.initialize(BSON("replSetRequestVotes"
- << 1 << "setName"
- << "rs0"
- << "term" << 1LL << "candidateIndex" << 0LL << "configVersion"
- << 1LL << "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ argsForRealVote.initialize(
+ BSON("replSetRequestVotes" << 1 << "setName"
+ << "rs0"
+ << "term"
+ << 1LL
+ << "candidateIndex"
+ << 0LL
+ << "configVersion"
+ << 1LL
+ << "lastCommittedOp"
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
ReplSetRequestVotesResponse responseForRealVote;
OpTime lastAppliedOpTime;
@@ -2408,8 +2703,14 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantDryRunVoteWhenReplSetNameDoesNotMatch) {
ReplSetRequestVotesArgs args;
args.initialize(BSON("replSetRequestVotes" << 1 << "setName"
<< "wrongName"
- << "dryRun" << true << "term" << 2LL
- << "candidateIndex" << 0LL << "configVersion" << 1LL
+ << "dryRun"
+ << true
+ << "term"
+ << 2LL
+ << "candidateIndex"
+ << 0LL
+ << "configVersion"
+ << 1LL
<< "lastCommittedOp"
<< BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
ReplSetRequestVotesResponse response;
@@ -2423,12 +2724,15 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantDryRunVoteWhenReplSetNameDoesNotMatch) {
TEST_F(TopoCoordTest, NodeDoesNotGrantDryRunVoteWhenConfigVersionDoesNotMatch) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
- << "h2") << BSON("_id" << 30 << "host"
- << "h3"))),
+ << "h2")
+ << BSON("_id" << 30 << "host"
+ << "h3"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
// set term to 1
@@ -2436,12 +2740,17 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantDryRunVoteWhenConfigVersionDoesNotMatch) {
getTopoCoord().updateTerm(1, now()));
// and make sure we voted in term 1
ReplSetRequestVotesArgs argsForRealVote;
- argsForRealVote.initialize(BSON("replSetRequestVotes"
- << 1 << "setName"
- << "rs0"
- << "term" << 1LL << "candidateIndex" << 0LL << "configVersion"
- << 1LL << "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ argsForRealVote.initialize(
+ BSON("replSetRequestVotes" << 1 << "setName"
+ << "rs0"
+ << "term"
+ << 1LL
+ << "candidateIndex"
+ << 0LL
+ << "configVersion"
+ << 1LL
+ << "lastCommittedOp"
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
ReplSetRequestVotesResponse responseForRealVote;
OpTime lastAppliedOpTime;
@@ -2455,8 +2764,14 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantDryRunVoteWhenConfigVersionDoesNotMatch) {
ReplSetRequestVotesArgs args;
args.initialize(BSON("replSetRequestVotes" << 1 << "setName"
<< "rs0"
- << "dryRun" << true << "term" << 2LL
- << "candidateIndex" << 1LL << "configVersion" << 0LL
+ << "dryRun"
+ << true
+ << "term"
+ << 2LL
+ << "candidateIndex"
+ << 1LL
+ << "configVersion"
+ << 0LL
<< "lastCommittedOp"
<< BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
ReplSetRequestVotesResponse response;
@@ -2470,12 +2785,15 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantDryRunVoteWhenConfigVersionDoesNotMatch) {
TEST_F(TopoCoordTest, NodeDoesNotGrantDryRunVoteWhenTermIsStale) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
- << "h2") << BSON("_id" << 30 << "host"
- << "h3"))),
+ << "h2")
+ << BSON("_id" << 30 << "host"
+ << "h3"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
// set term to 1
@@ -2483,12 +2801,17 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantDryRunVoteWhenTermIsStale) {
getTopoCoord().updateTerm(1, now()));
// and make sure we voted in term 1
ReplSetRequestVotesArgs argsForRealVote;
- argsForRealVote.initialize(BSON("replSetRequestVotes"
- << 1 << "setName"
- << "rs0"
- << "term" << 1LL << "candidateIndex" << 0LL << "configVersion"
- << 1LL << "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ argsForRealVote.initialize(
+ BSON("replSetRequestVotes" << 1 << "setName"
+ << "rs0"
+ << "term"
+ << 1LL
+ << "candidateIndex"
+ << 0LL
+ << "configVersion"
+ << 1LL
+ << "lastCommittedOp"
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
ReplSetRequestVotesResponse responseForRealVote;
OpTime lastAppliedOpTime;
@@ -2501,8 +2824,14 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantDryRunVoteWhenTermIsStale) {
ReplSetRequestVotesArgs args;
args.initialize(BSON("replSetRequestVotes" << 1 << "setName"
<< "rs0"
- << "dryRun" << true << "term" << 0LL
- << "candidateIndex" << 1LL << "configVersion" << 1LL
+ << "dryRun"
+ << true
+ << "term"
+ << 0LL
+ << "candidateIndex"
+ << 1LL
+ << "configVersion"
+ << 1LL
<< "lastCommittedOp"
<< BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
ReplSetRequestVotesResponse response;
@@ -2516,12 +2845,15 @@ TEST_F(TopoCoordTest, NodeDoesNotGrantDryRunVoteWhenTermIsStale) {
TEST_F(TopoCoordTest, GrantDryRunVoteEvenWhenTermHasBeenSeen) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
- << "h2") << BSON("_id" << 30 << "host"
- << "h3"))),
+ << "h2")
+ << BSON("_id" << 30 << "host"
+ << "h3"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
// set term to 1
@@ -2529,12 +2861,17 @@ TEST_F(TopoCoordTest, GrantDryRunVoteEvenWhenTermHasBeenSeen) {
getTopoCoord().updateTerm(1, now()));
// and make sure we voted in term 1
ReplSetRequestVotesArgs argsForRealVote;
- argsForRealVote.initialize(BSON("replSetRequestVotes"
- << 1 << "setName"
- << "rs0"
- << "term" << 1LL << "candidateIndex" << 0LL << "configVersion"
- << 1LL << "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ argsForRealVote.initialize(
+ BSON("replSetRequestVotes" << 1 << "setName"
+ << "rs0"
+ << "term"
+ << 1LL
+ << "candidateIndex"
+ << 0LL
+ << "configVersion"
+ << 1LL
+ << "lastCommittedOp"
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
ReplSetRequestVotesResponse responseForRealVote;
OpTime lastAppliedOpTime;
@@ -2548,8 +2885,14 @@ TEST_F(TopoCoordTest, GrantDryRunVoteEvenWhenTermHasBeenSeen) {
ReplSetRequestVotesArgs args;
args.initialize(BSON("replSetRequestVotes" << 1 << "setName"
<< "rs0"
- << "dryRun" << true << "term" << 1LL
- << "candidateIndex" << 1LL << "configVersion" << 1LL
+ << "dryRun"
+ << true
+ << "term"
+ << 1LL
+ << "candidateIndex"
+ << 1LL
+ << "configVersion"
+ << 1LL
<< "lastCommittedOp"
<< BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
ReplSetRequestVotesResponse response;
@@ -2563,12 +2906,15 @@ TEST_F(TopoCoordTest, GrantDryRunVoteEvenWhenTermHasBeenSeen) {
TEST_F(TopoCoordTest, DoNotGrantDryRunVoteWhenOpTimeIsStale) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 1 << "members"
+ << "version"
+ << 1
+ << "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
- << "h2") << BSON("_id" << 30 << "host"
- << "h3"))),
+ << "h2")
+ << BSON("_id" << 30 << "host"
+ << "h3"))),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
// set term to 1
@@ -2576,12 +2922,17 @@ TEST_F(TopoCoordTest, DoNotGrantDryRunVoteWhenOpTimeIsStale) {
getTopoCoord().updateTerm(1, now()));
// and make sure we voted in term 1
ReplSetRequestVotesArgs argsForRealVote;
- argsForRealVote.initialize(BSON("replSetRequestVotes"
- << 1 << "setName"
- << "rs0"
- << "term" << 1LL << "candidateIndex" << 0LL << "configVersion"
- << 1LL << "lastCommittedOp"
- << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
+ argsForRealVote.initialize(
+ BSON("replSetRequestVotes" << 1 << "setName"
+ << "rs0"
+ << "term"
+ << 1LL
+ << "candidateIndex"
+ << 0LL
+ << "configVersion"
+ << 1LL
+ << "lastCommittedOp"
+ << BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
ReplSetRequestVotesResponse responseForRealVote;
OpTime lastAppliedOpTime;
@@ -2595,8 +2946,14 @@ TEST_F(TopoCoordTest, DoNotGrantDryRunVoteWhenOpTimeIsStale) {
ReplSetRequestVotesArgs args;
args.initialize(BSON("replSetRequestVotes" << 1 << "setName"
<< "rs0"
- << "dryRun" << true << "term" << 3LL
- << "candidateIndex" << 1LL << "configVersion" << 1LL
+ << "dryRun"
+ << true
+ << "term"
+ << 3LL
+ << "candidateIndex"
+ << 1LL
+ << "configVersion"
+ << 1LL
<< "lastCommittedOp"
<< BSON("ts" << Timestamp(10, 0) << "term" << 0LL)));
ReplSetRequestVotesResponse response;
@@ -2618,13 +2975,19 @@ TEST_F(TopoCoordTest, NodeTransitionsToRemovedIfCSRSButHaveNoReadCommittedSuppor
updateConfig(BSON("_id"
<< "rs0"
- << "protocolVersion" << 1 << "version" << 1 << "configsvr" << true
+ << "protocolVersion"
+ << 1
+ << "version"
+ << 1
+ << "configsvr"
+ << true
<< "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
- << "h2") << BSON("_id" << 30 << "host"
- << "h3"))),
+ << "h2")
+ << BSON("_id" << 30 << "host"
+ << "h3"))),
0);
ASSERT_EQUALS(MemberState::RS_REMOVED, getTopoCoord().getMemberState().s);
}
@@ -2639,13 +3002,19 @@ TEST_F(TopoCoordTest, NodeBecomesSecondaryAsNormalWhenReadCommittedSupportedAndC
updateConfig(BSON("_id"
<< "rs0"
- << "protocolVersion" << 1 << "version" << 1 << "configsvr" << true
+ << "protocolVersion"
+ << 1
+ << "version"
+ << 1
+ << "configsvr"
+ << true
<< "members"
<< BSON_ARRAY(BSON("_id" << 10 << "host"
<< "hself")
<< BSON("_id" << 20 << "host"
- << "h2") << BSON("_id" << 30 << "host"
- << "h3"))),
+ << "h2")
+ << BSON("_id" << 30 << "host"
+ << "h3"))),
0);
ASSERT_EQUALS(MemberState::RS_STARTUP2, getTopoCoord().getMemberState().s);
@@ -2657,17 +3026,22 @@ class HeartbeatResponseTestV1 : public TopoCoordTest {
public:
virtual void setUp() {
TopoCoordTest::setUp();
- updateConfig(
- BSON("_id"
- << "rs0"
- << "version" << 5 << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "host1:27017")
- << BSON("_id" << 1 << "host"
- << "host2:27017") << BSON("_id" << 2 << "host"
- << "host3:27017"))
- << "protocolVersion" << 1 << "settings" << BSON("heartbeatTimeoutSecs" << 5)),
- 0);
+ updateConfig(BSON("_id"
+ << "rs0"
+ << "version"
+ << 5
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "host1:27017")
+ << BSON("_id" << 1 << "host"
+ << "host2:27017")
+ << BSON("_id" << 2 << "host"
+ << "host3:27017"))
+ << "protocolVersion"
+ << 1
+ << "settings"
+ << BSON("heartbeatTimeoutSecs" << 5)),
+ 0);
}
};
@@ -2683,15 +3057,23 @@ TEST_F(HeartbeatResponseTestV1,
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 7 << "members"
+ << "version"
+ << 7
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "hself"
- << "buildIndexes" << false << "priority" << 0)
+ << "buildIndexes"
+ << false
+ << "priority"
+ << 0)
<< BSON("_id" << 1 << "host"
<< "host2")
<< BSON("_id" << 2 << "host"
<< "host3"
- << "buildIndexes" << false << "priority" << 0))),
+ << "buildIndexes"
+ << false
+ << "priority"
+ << 0))),
0);
HeartbeatResponseAction nextAction = receiveUpHeartbeat(HostAndPort("host2"),
"rs0",
@@ -3017,11 +3399,15 @@ TEST_F(HeartbeatResponseTestV1, ReconfigNodeRemovedBetweenHeartbeatRequestAndRep
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 2 << "members" << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "host1:27017")
- << BSON("_id" << 1 << "host"
- << "host2:27017"))
- << "protocolVersion" << 1),
+ << "version"
+ << 2
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "host1:27017")
+ << BSON("_id" << 1 << "host"
+ << "host2:27017"))
+ << "protocolVersion"
+ << 1),
0);
ReplSetHeartbeatResponse hb;
@@ -3066,11 +3452,15 @@ TEST_F(HeartbeatResponseTestV1, ReconfigBetweenHeartbeatRequestAndRepsonse) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 2 << "members" << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "host1:27017")
- << BSON("_id" << 2 << "host"
- << "host3:27017"))
- << "protocolVersion" << 1),
+ << "version"
+ << 2
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "host1:27017")
+ << BSON("_id" << 2 << "host"
+ << "host3:27017"))
+ << "protocolVersion"
+ << 1),
0);
ReplSetHeartbeatResponse hb;
@@ -3131,14 +3521,20 @@ TEST_F(HeartbeatResponseTestV1,
ScheduleAPriorityTakeoverWhenElectableAndReceiveHeartbeatFromLowerPriorityPrimary) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 5 << "members"
+ << "version"
+ << 5
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host1:27017"
- << "priority" << 2)
+ << "priority"
+ << 2)
<< BSON("_id" << 1 << "host"
- << "host2:27017") << BSON("_id" << 6 << "host"
- << "host7:27017"))
- << "protocolVersion" << 1 << "settings"
+ << "host2:27017")
+ << BSON("_id" << 6 << "host"
+ << "host7:27017"))
+ << "protocolVersion"
+ << 1
+ << "settings"
<< BSON("heartbeatTimeoutSecs" << 5)),
0);
@@ -3162,15 +3558,21 @@ TEST_F(HeartbeatResponseTestV1,
TEST_F(HeartbeatResponseTestV1, UpdateHeartbeatDataTermPreventsPriorityTakeover) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 5 << "members" << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "host0:27017"
- << "priority" << 2)
- << BSON("_id" << 1 << "host"
- << "host1:27017"
- << "priority" << 3)
- << BSON("_id" << 2 << "host"
- << "host2:27017"))
- << "settings" << BSON("heartbeatTimeoutSecs" << 5)),
+ << "version"
+ << 5
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "host0:27017"
+ << "priority"
+ << 2)
+ << BSON("_id" << 1 << "host"
+ << "host1:27017"
+ << "priority"
+ << 3)
+ << BSON("_id" << 2 << "host"
+ << "host2:27017"))
+ << "settings"
+ << BSON("heartbeatTimeoutSecs" << 5)),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
@@ -3223,26 +3625,43 @@ TEST_F(HeartbeatResponseTestV1,
ScheduleElectionIfAMajorityOfVotersIsVisibleEvenThoughATrueMajorityIsNot) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 5 << "members"
+ << "version"
+ << 5
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host1:27017")
<< BSON("_id" << 1 << "host"
<< "host2:27017")
<< BSON("_id" << 2 << "host"
<< "host3:27017"
- << "votes" << 0 << "priority" << 0)
+ << "votes"
+ << 0
+ << "priority"
+ << 0)
<< BSON("_id" << 3 << "host"
<< "host4:27017"
- << "votes" << 0 << "priority" << 0)
+ << "votes"
+ << 0
+ << "priority"
+ << 0)
<< BSON("_id" << 4 << "host"
<< "host5:27017"
- << "votes" << 0 << "priority" << 0)
+ << "votes"
+ << 0
+ << "priority"
+ << 0)
<< BSON("_id" << 5 << "host"
<< "host6:27017"
- << "votes" << 0 << "priority" << 0)
+ << "votes"
+ << 0
+ << "priority"
+ << 0)
<< BSON("_id" << 6 << "host"
- << "host7:27017")) << "protocolVersion" << 1
- << "settings" << BSON("heartbeatTimeoutSecs" << 5)),
+ << "host7:27017"))
+ << "protocolVersion"
+ << 1
+ << "settings"
+ << BSON("heartbeatTimeoutSecs" << 5)),
0);
setSelfMemberState(MemberState::RS_SECONDARY);
@@ -3323,14 +3742,19 @@ TEST_F(HeartbeatResponseTestV1,
NodeDoesNotStandForElectionWhenPrimaryIsMarkedDownViaHeartbeatButWeAreAnArbiter) {
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 5 << "members"
+ << "version"
+ << 5
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host1:27017"
- << "arbiterOnly" << true)
+ << "arbiterOnly"
+ << true)
<< BSON("_id" << 1 << "host"
- << "host2:27017") << BSON("_id" << 2 << "host"
- << "host3:27017"))
- << "protocolVersion" << 1),
+ << "host2:27017")
+ << BSON("_id" << 2 << "host"
+ << "host3:27017"))
+ << "protocolVersion"
+ << 1),
0);
OpTime election = OpTime(Timestamp(400, 0), 0);
@@ -3455,14 +3879,19 @@ TEST_F(HeartbeatResponseTestV1,
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 5 << "members"
+ << "version"
+ << 5
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host1:27017"
- << "priority" << 0)
+ << "priority"
+ << 0)
<< BSON("_id" << 1 << "host"
- << "host2:27017") << BSON("_id" << 2 << "host"
- << "host3:27017"))
- << "protocolVersion" << 1),
+ << "host2:27017")
+ << BSON("_id" << 2 << "host"
+ << "host3:27017"))
+ << "protocolVersion"
+ << 1),
0);
OpTime election = OpTime(Timestamp(400, 0), 0);
@@ -3553,18 +3982,24 @@ TEST_F(HeartbeatResponseTestV1,
// In this test, the Topology coordinator sees a PRIMARY ("host2") and then sees a higher
// priority and stale node ("host3"). It responds with NoAction, as it should in all
// multiprimary states in PV1.
- updateConfig(
- BSON("_id"
- << "rs0"
- << "version" << 6 << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "host1:27017")
- << BSON("_id" << 1 << "host"
- << "host2:27017") << BSON("_id" << 2 << "host"
- << "host3:27017"
- << "priority" << 3))
- << "protocolVersion" << 1 << "settings" << BSON("heartbeatTimeoutSecs" << 5)),
- 0);
+ updateConfig(BSON("_id"
+ << "rs0"
+ << "version"
+ << 6
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "host1:27017")
+ << BSON("_id" << 1 << "host"
+ << "host2:27017")
+ << BSON("_id" << 2 << "host"
+ << "host3:27017"
+ << "priority"
+ << 3))
+ << "protocolVersion"
+ << 1
+ << "settings"
+ << BSON("heartbeatTimeoutSecs" << 5)),
+ 0);
setSelfMemberState(MemberState::RS_SECONDARY);
OpTime election = OpTime(Timestamp(1000, 0), 0);
@@ -3585,18 +4020,24 @@ TEST_F(HeartbeatResponseTestV1,
// In this test, the Topology coordinator becomes PRIMARY and then sees a higher priority
// and stale node ("host3"). It responds with NoAction, as it should in all
// multiprimary states in PV1.
- updateConfig(
- BSON("_id"
- << "rs0"
- << "version" << 6 << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "host1:27017")
- << BSON("_id" << 1 << "host"
- << "host2:27017") << BSON("_id" << 2 << "host"
- << "host3:27017"
- << "priority" << 3))
- << "protocolVersion" << 1 << "settings" << BSON("heartbeatTimeoutSecs" << 5)),
- 0);
+ updateConfig(BSON("_id"
+ << "rs0"
+ << "version"
+ << 6
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "host1:27017")
+ << BSON("_id" << 1 << "host"
+ << "host2:27017")
+ << BSON("_id" << 2 << "host"
+ << "host3:27017"
+ << "priority"
+ << 3))
+ << "protocolVersion"
+ << 1
+ << "settings"
+ << BSON("heartbeatTimeoutSecs" << 5)),
+ 0);
OpTime election = OpTime(Timestamp(1000, 0), 0);
OpTime staleTime = OpTime();
@@ -3614,18 +4055,24 @@ TEST_F(HeartbeatResponseTestV1,
// In this test, the Topology coordinator becomes PRIMARY and then sees a higher priority
// and equally fresh node ("host3"). It responds with NoAction, as it should in all
// multiprimary states in PV1.
- updateConfig(
- BSON("_id"
- << "rs0"
- << "version" << 6 << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "host1:27017")
- << BSON("_id" << 1 << "host"
- << "host2:27017") << BSON("_id" << 2 << "host"
- << "host3:27017"
- << "priority" << 3))
- << "protocolVersion" << 1 << "settings" << BSON("heartbeatTimeoutSecs" << 5)),
- 0);
+ updateConfig(BSON("_id"
+ << "rs0"
+ << "version"
+ << 6
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "host1:27017")
+ << BSON("_id" << 1 << "host"
+ << "host2:27017")
+ << BSON("_id" << 2 << "host"
+ << "host3:27017"
+ << "priority"
+ << 3))
+ << "protocolVersion"
+ << 1
+ << "settings"
+ << BSON("heartbeatTimeoutSecs" << 5)),
+ 0);
OpTime election = OpTime(Timestamp(1000, 0), 0);
getTopoCoord().setFollowerMode(MemberState::RS_SECONDARY);
@@ -3644,18 +4091,24 @@ TEST_F(HeartbeatResponseTestV1,
// In this test, the Topology coordinator sees a PRIMARY ("host2") and then sees a higher
// priority and similarly fresh node ("host3"). It responds with NoAction, as it should
// in all multiprimary states in PV1.
- updateConfig(
- BSON("_id"
- << "rs0"
- << "version" << 6 << "members"
- << BSON_ARRAY(BSON("_id" << 0 << "host"
- << "host1:27017")
- << BSON("_id" << 1 << "host"
- << "host2:27017") << BSON("_id" << 2 << "host"
- << "host3:27017"
- << "priority" << 3))
- << "protocolVersion" << 1 << "settings" << BSON("heartbeatTimeoutSecs" << 5)),
- 0);
+ updateConfig(BSON("_id"
+ << "rs0"
+ << "version"
+ << 6
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "host1:27017")
+ << BSON("_id" << 1 << "host"
+ << "host2:27017")
+ << BSON("_id" << 2 << "host"
+ << "host3:27017"
+ << "priority"
+ << 3))
+ << "protocolVersion"
+ << 1
+ << "settings"
+ << BSON("heartbeatTimeoutSecs" << 5)),
+ 0);
setSelfMemberState(MemberState::RS_SECONDARY);
OpTime election = OpTime();
@@ -3811,15 +4264,21 @@ TEST_F(HeartbeatResponseTestV1, ShouldNotChangeSyncSourceWhenFresherMemberDoesNo
updateConfig(BSON("_id"
<< "rs0"
- << "version" << 6 << "members"
+ << "version"
+ << 6
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "hself")
<< BSON("_id" << 1 << "host"
<< "host2")
<< BSON("_id" << 2 << "host"
<< "host3"
- << "buildIndexes" << false << "priority" << 0))
- << "protocolVersion" << 1),
+ << "buildIndexes"
+ << false
+ << "priority"
+ << 0))
+ << "protocolVersion"
+ << 1),
0);
HeartbeatResponseAction nextAction = receiveUpHeartbeat(HostAndPort("host2"),
"rs0",
@@ -4184,14 +4643,18 @@ TEST_F(HeartbeatResponseHighVerbosityTestV1, UpdateHeartbeatDataSameConfig) {
ReplicaSetConfig originalConfig;
originalConfig.initialize(BSON("_id"
<< "rs0"
- << "version" << 5 << "members"
+ << "version"
+ << 5
+ << "members"
<< BSON_ARRAY(BSON("_id" << 0 << "host"
<< "host1:27017")
<< BSON("_id" << 1 << "host"
<< "host2:27017")
<< BSON("_id" << 2 << "host"
<< "host3:27017"))
- << "protocolVersion" << 1 << "settings"
+ << "protocolVersion"
+ << 1
+ << "settings"
<< BSON("heartbeatTimeoutSecs" << 5)));
ReplSetHeartbeatResponse sameConfigResponse;
@@ -4212,9 +4675,8 @@ TEST_F(HeartbeatResponseHighVerbosityTestV1, UpdateHeartbeatDataSameConfig) {
stopCapturingLogMessages();
ASSERT_NO_ACTION(action.getAction());
ASSERT_EQUALS(1,
- countLogLinesContaining(
- "Config from heartbeat response was "
- "same as ours."));
+ countLogLinesContaining("Config from heartbeat response was "
+ "same as ours."));
}
TEST_F(HeartbeatResponseHighVerbosityTestV1,
diff --git a/src/mongo/db/repl/update_position_args.cpp b/src/mongo/db/repl/update_position_args.cpp
index 6fa63988a45..80f3505b2cc 100644
--- a/src/mongo/db/repl/update_position_args.cpp
+++ b/src/mongo/db/repl/update_position_args.cpp
@@ -33,8 +33,8 @@
#include "mongo/base/status.h"
#include "mongo/bson/util/bson_check.h"
#include "mongo/bson/util/bson_extract.h"
-#include "mongo/db/repl/bson_extract_optime.h"
#include "mongo/db/jsobj.h"
+#include "mongo/db/repl/bson_extract_optime.h"
namespace mongo {
namespace repl {
diff --git a/src/mongo/db/repl/vote_requester_test.cpp b/src/mongo/db/repl/vote_requester_test.cpp
index c6e1ebe7b61..ba4691019d8 100644
--- a/src/mongo/db/repl/vote_requester_test.cpp
+++ b/src/mongo/db/repl/vote_requester_test.cpp
@@ -32,9 +32,9 @@
#include "mongo/base/status.h"
#include "mongo/db/jsobj.h"
-#include "mongo/db/repl/vote_requester.h"
#include "mongo/db/repl/repl_set_request_votes_args.h"
#include "mongo/db/repl/replication_executor.h"
+#include "mongo/db/repl/vote_requester.h"
#include "mongo/executor/network_interface_mock.h"
#include "mongo/stdx/functional.h"
#include "mongo/unittest/unittest.h"
@@ -58,22 +58,29 @@ class VoteRequesterTest : public mongo::unittest::Test {
public:
virtual void setUp() {
ReplicaSetConfig config;
- ASSERT_OK(
- config.initialize(BSON("_id"
- << "rs0"
- << "version" << 2 << "members"
- << BSON_ARRAY(
- BSON("_id" << 0 << "host"
- << "host0")
- << BSON("_id" << 1 << "host"
- << "host1") << BSON("_id" << 2 << "host"
- << "host2")
- << BSON("_id" << 3 << "host"
- << "host3"
- << "votes" << 0 << "priority" << 0)
- << BSON("_id" << 4 << "host"
- << "host4"
- << "votes" << 0 << "priority" << 0)))));
+ ASSERT_OK(config.initialize(BSON("_id"
+ << "rs0"
+ << "version"
+ << 2
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "host0")
+ << BSON("_id" << 1 << "host"
+ << "host1")
+ << BSON("_id" << 2 << "host"
+ << "host2")
+ << BSON("_id" << 3 << "host"
+ << "host3"
+ << "votes"
+ << 0
+ << "priority"
+ << 0)
+ << BSON("_id" << 4 << "host"
+ << "host4"
+ << "votes"
+ << 0
+ << "priority"
+ << 0)))));
ASSERT_OK(config.validate());
long long candidateId = 0;
long long term = 2;
@@ -184,22 +191,29 @@ class VoteRequesterDryRunTest : public VoteRequesterTest {
public:
virtual void setUp() {
ReplicaSetConfig config;
- ASSERT_OK(
- config.initialize(BSON("_id"
- << "rs0"
- << "version" << 2 << "members"
- << BSON_ARRAY(
- BSON("_id" << 0 << "host"
- << "host0")
- << BSON("_id" << 1 << "host"
- << "host1") << BSON("_id" << 2 << "host"
- << "host2")
- << BSON("_id" << 3 << "host"
- << "host3"
- << "votes" << 0 << "priority" << 0)
- << BSON("_id" << 4 << "host"
- << "host4"
- << "votes" << 0 << "priority" << 0)))));
+ ASSERT_OK(config.initialize(BSON("_id"
+ << "rs0"
+ << "version"
+ << 2
+ << "members"
+ << BSON_ARRAY(BSON("_id" << 0 << "host"
+ << "host0")
+ << BSON("_id" << 1 << "host"
+ << "host1")
+ << BSON("_id" << 2 << "host"
+ << "host2")
+ << BSON("_id" << 3 << "host"
+ << "host3"
+ << "votes"
+ << 0
+ << "priority"
+ << 0)
+ << BSON("_id" << 4 << "host"
+ << "host4"
+ << "votes"
+ << 0
+ << "priority"
+ << 0)))));
ASSERT_OK(config.validate());
long long candidateId = 0;
long long term = 2;
diff --git a/src/mongo/db/s/check_sharding_index_command.cpp b/src/mongo/db/s/check_sharding_index_command.cpp
index de1dbf6487c..8f24954949b 100644
--- a/src/mongo/db/s/check_sharding_index_command.cpp
+++ b/src/mongo/db/s/check_sharding_index_command.cpp
@@ -37,8 +37,8 @@
#include "mongo/db/db_raii.h"
#include "mongo/db/dbhelpers.h"
#include "mongo/db/exec/working_set_common.h"
-#include "mongo/db/index_legacy.h"
#include "mongo/db/index/index_descriptor.h"
+#include "mongo/db/index_legacy.h"
#include "mongo/db/keypattern.h"
#include "mongo/db/query/internal_plans.h"
#include "mongo/util/log.h"
diff --git a/src/mongo/db/s/cleanup_orphaned_cmd.cpp b/src/mongo/db/s/cleanup_orphaned_cmd.cpp
index 9ea761808ac..e3abdbb9354 100644
--- a/src/mongo/db/s/cleanup_orphaned_cmd.cpp
+++ b/src/mongo/db/s/cleanup_orphaned_cmd.cpp
@@ -44,10 +44,10 @@
#include "mongo/db/range_arithmetic.h"
#include "mongo/db/range_deleter_service.h"
#include "mongo/db/repl/replication_coordinator_global.h"
-#include "mongo/db/service_context.h"
+#include "mongo/db/s/chunk_move_write_concern_options.h"
#include "mongo/db/s/collection_metadata.h"
#include "mongo/db/s/sharding_state.h"
-#include "mongo/db/s/chunk_move_write_concern_options.h"
+#include "mongo/db/service_context.h"
#include "mongo/s/migration_secondary_throttle_options.h"
#include "mongo/util/log.h"
diff --git a/src/mongo/db/s/collection_metadata.cpp b/src/mongo/db/s/collection_metadata.cpp
index cc0983eeba5..5c16ace947b 100644
--- a/src/mongo/db/s/collection_metadata.cpp
+++ b/src/mongo/db/s/collection_metadata.cpp
@@ -270,7 +270,8 @@ StatusWith<std::unique_ptr<CollectionMetadata>> CollectionMetadata::cloneMerge(
if (!validStartEnd || !validNoHoles) {
return {ErrorCodes::IllegalOperation,
stream() << "cannot merge range " << rangeToString(minKey, maxKey)
- << ", overlapping chunks " << overlapToString(overlap)
+ << ", overlapping chunks "
+ << overlapToString(overlap)
<< (!validStartEnd ? " do not have the same min and max key"
: " are not all adjacent")};
}
diff --git a/src/mongo/db/s/collection_metadata_test.cpp b/src/mongo/db/s/collection_metadata_test.cpp
index c1ee18605b7..b6861350ed8 100644
--- a/src/mongo/db/s/collection_metadata_test.cpp
+++ b/src/mongo/db/s/collection_metadata_test.cpp
@@ -116,7 +116,8 @@ TEST_F(NoChunkFixture, IsKeyValid) {
ASSERT_TRUE(getCollMetadata().isValidKey(BSON("a" << 3)));
ASSERT_FALSE(getCollMetadata().isValidKey(BSON("a"
<< "abcde"
- << "b" << 1)));
+ << "b"
+ << 1)));
ASSERT_FALSE(getCollMetadata().isValidKey(BSON("c"
<< "abcde")));
}
@@ -333,10 +334,12 @@ protected:
BSONObj fooSingle = BSON(
ChunkType::name("test.foo-a_10")
- << ChunkType::ns("test.foo") << ChunkType::min(BSON("a" << 10))
+ << ChunkType::ns("test.foo")
+ << ChunkType::min(BSON("a" << 10))
<< ChunkType::max(BSON("a" << 20))
<< ChunkType::DEPRECATED_lastmod(Date_t::fromMillisSinceEpoch(chunkVersion.toLong()))
- << ChunkType::DEPRECATED_epoch(epoch) << ChunkType::shard("shard0000"));
+ << ChunkType::DEPRECATED_epoch(epoch)
+ << ChunkType::shard("shard0000"));
std::vector<BSONObj> chunksToSend{fooSingle};
auto future = launchAsync([this] {
@@ -587,10 +590,12 @@ protected:
BSONObj fooSingle = BSON(
ChunkType::name("test.foo-a_MinKey")
- << ChunkType::ns("test.foo") << ChunkType::min(BSON("a" << MINKEY << "b" << MINKEY))
+ << ChunkType::ns("test.foo")
+ << ChunkType::min(BSON("a" << MINKEY << "b" << MINKEY))
<< ChunkType::max(BSON("a" << MAXKEY << "b" << MAXKEY))
<< ChunkType::DEPRECATED_lastmod(Date_t::fromMillisSinceEpoch(chunkVersion.toLong()))
- << ChunkType::DEPRECATED_epoch(epoch) << ChunkType::shard("shard0000"));
+ << ChunkType::DEPRECATED_epoch(epoch)
+ << ChunkType::shard("shard0000"));
std::vector<BSONObj> chunksToSend{fooSingle};
auto future = launchAsync([this] {
@@ -654,16 +659,20 @@ protected:
std::vector<BSONObj> chunksToSend;
chunksToSend.push_back(BSON(
ChunkType::name("test.foo-a_10")
- << ChunkType::ns("test.foo") << ChunkType::min(BSON("a" << 10 << "b" << 0))
+ << ChunkType::ns("test.foo")
+ << ChunkType::min(BSON("a" << 10 << "b" << 0))
<< ChunkType::max(BSON("a" << 20 << "b" << 0))
<< ChunkType::DEPRECATED_lastmod(Date_t::fromMillisSinceEpoch(chunkVersion.toLong()))
- << ChunkType::DEPRECATED_epoch(epoch) << ChunkType::shard("shard0000")));
+ << ChunkType::DEPRECATED_epoch(epoch)
+ << ChunkType::shard("shard0000")));
chunksToSend.push_back(BSON(
ChunkType::name("test.foo-a_10")
- << ChunkType::ns("test.foo") << ChunkType::min(BSON("a" << 30 << "b" << 0))
+ << ChunkType::ns("test.foo")
+ << ChunkType::min(BSON("a" << 30 << "b" << 0))
<< ChunkType::max(BSON("a" << 40 << "b" << 0))
<< ChunkType::DEPRECATED_lastmod(Date_t::fromMillisSinceEpoch(chunkVersion.toLong()))
- << ChunkType::DEPRECATED_epoch(epoch) << ChunkType::shard("shard0000")));
+ << ChunkType::DEPRECATED_epoch(epoch)
+ << ChunkType::shard("shard0000")));
auto future = launchAsync([this] {
MetadataLoader loader;
@@ -847,30 +856,36 @@ protected:
ChunkVersion version(1, 1, epoch);
chunksToSend.push_back(BSON(
ChunkType::name("x.y-a_MinKey")
- << ChunkType::ns("x.y") << ChunkType::min(BSON("a" << MINKEY))
+ << ChunkType::ns("x.y")
+ << ChunkType::min(BSON("a" << MINKEY))
<< ChunkType::max(BSON("a" << 10))
<< ChunkType::DEPRECATED_lastmod(Date_t::fromMillisSinceEpoch(version.toLong()))
- << ChunkType::DEPRECATED_epoch(version.epoch()) << ChunkType::shard("shard0000")));
+ << ChunkType::DEPRECATED_epoch(version.epoch())
+ << ChunkType::shard("shard0000")));
}
{
ChunkVersion version(1, 3, epoch);
chunksToSend.push_back(BSON(
ChunkType::name("x.y-a_10")
- << ChunkType::ns("x.y") << ChunkType::min(BSON("a" << 10))
+ << ChunkType::ns("x.y")
+ << ChunkType::min(BSON("a" << 10))
<< ChunkType::max(BSON("a" << 20))
<< ChunkType::DEPRECATED_lastmod(Date_t::fromMillisSinceEpoch(version.toLong()))
- << ChunkType::DEPRECATED_epoch(version.epoch()) << ChunkType::shard("shard0000")));
+ << ChunkType::DEPRECATED_epoch(version.epoch())
+ << ChunkType::shard("shard0000")));
}
{
ChunkVersion version(1, 2, epoch);
chunksToSend.push_back(BSON(
ChunkType::name("x.y-a_30")
- << ChunkType::ns("x.y") << ChunkType::min(BSON("a" << 30))
+ << ChunkType::ns("x.y")
+ << ChunkType::min(BSON("a" << 30))
<< ChunkType::max(BSON("a" << MAXKEY))
<< ChunkType::DEPRECATED_lastmod(Date_t::fromMillisSinceEpoch(version.toLong()))
- << ChunkType::DEPRECATED_epoch(version.epoch()) << ChunkType::shard("shard0000")));
+ << ChunkType::DEPRECATED_epoch(version.epoch())
+ << ChunkType::shard("shard0000")));
}
auto future = launchAsync([this] {
diff --git a/src/mongo/db/s/collection_sharding_state.cpp b/src/mongo/db/s/collection_sharding_state.cpp
index b4c924034a0..bccdf9cf009 100644
--- a/src/mongo/db/s/collection_sharding_state.cpp
+++ b/src/mongo/db/s/collection_sharding_state.cpp
@@ -131,11 +131,11 @@ void CollectionShardingState::checkShardVersionOrThrow(OperationContext* txn) co
ChunkVersion received;
ChunkVersion wanted;
if (!_checkShardVersionOk(txn, &errmsg, &received, &wanted)) {
- throw SendStaleConfigException(_nss.ns(),
- str::stream() << "[" << _nss.ns()
- << "] shard version not ok: " << errmsg,
- received,
- wanted);
+ throw SendStaleConfigException(
+ _nss.ns(),
+ str::stream() << "[" << _nss.ns() << "] shard version not ok: " << errmsg,
+ received,
+ wanted);
}
}
@@ -246,8 +246,8 @@ bool CollectionShardingState::_checkShardVersionOk(OperationContext* txn,
// Set migration critical section on operation sharding state: operation will wait for the
// migration to finish before returning failure and retrying.
- OperationShardingState::get(txn)
- .setMigrationCriticalSection(_sourceMgr->getMigrationCriticalSection());
+ OperationShardingState::get(txn).setMigrationCriticalSection(
+ _sourceMgr->getMigrationCriticalSection());
return false;
}
diff --git a/src/mongo/db/s/collection_sharding_state_test.cpp b/src/mongo/db/s/collection_sharding_state_test.cpp
index 16b2c903b49..c12d4395f7f 100644
--- a/src/mongo/db/s/collection_sharding_state_test.cpp
+++ b/src/mongo/db/s/collection_sharding_state_test.cpp
@@ -34,12 +34,12 @@
#include "mongo/db/jsobj.h"
#include "mongo/db/namespace_string.h"
#include "mongo/db/operation_context_noop.h"
-#include "mongo/db/service_context_noop.h"
#include "mongo/db/s/collection_metadata.h"
#include "mongo/db/s/collection_sharding_state.h"
#include "mongo/db/s/sharding_state.h"
#include "mongo/db/s/type_shard_identity.h"
#include "mongo/db/service_context_noop.h"
+#include "mongo/db/service_context_noop.h"
#include "mongo/unittest/unittest.h"
#include "mongo/util/clock_source_mock.h"
diff --git a/src/mongo/db/s/metadata_loader_test.cpp b/src/mongo/db/s/metadata_loader_test.cpp
index c0756a8f29b..67ea0317db0 100644
--- a/src/mongo/db/s/metadata_loader_test.cpp
+++ b/src/mongo/db/s/metadata_loader_test.cpp
@@ -71,7 +71,8 @@ protected:
void expectFindOnConfigSendChunksDefault() {
BSONObj chunk = BSON(
ChunkType::name("test.foo-a_MinKey")
- << ChunkType::ns("test.foo") << ChunkType::min(BSON("a" << MINKEY))
+ << ChunkType::ns("test.foo")
+ << ChunkType::min(BSON("a" << MINKEY))
<< ChunkType::max(BSON("a" << MAXKEY))
<< ChunkType::DEPRECATED_lastmod(Date_t::fromMillisSinceEpoch(_maxCollVersion.toLong()))
<< ChunkType::DEPRECATED_epoch(_maxCollVersion.epoch())
diff --git a/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp b/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp
index 542c61dc8ce..a36feb23ebf 100644
--- a/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp
+++ b/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp
@@ -49,8 +49,8 @@
#include "mongo/executor/task_executor.h"
#include "mongo/executor/task_executor_pool.h"
#include "mongo/rpc/get_status_from_command_result.h"
-#include "mongo/s/client/shard_registry.h"
#include "mongo/s/chunk.h"
+#include "mongo/s/client/shard_registry.h"
#include "mongo/s/grid.h"
#include "mongo/util/elapsed_tracker.h"
#include "mongo/util/log.h"
@@ -514,7 +514,8 @@ Status MigrationChunkClonerSourceLegacy::_storeCurrentLocs(OperationContext* txn
if (!idx) {
return {ErrorCodes::IndexNotFound,
str::stream() << "can't find index with prefix " << _shardKeyPattern.toBSON()
- << " in storeCurrentLocs for " << _args.getNss().ns()};
+ << " in storeCurrentLocs for "
+ << _args.getNss().ns()};
}
// Install the stage, which will listen for notifications on the collection
@@ -604,10 +605,19 @@ Status MigrationChunkClonerSourceLegacy::_storeCurrentLocs(OperationContext* txn
return {
ErrorCodes::ChunkTooBig,
str::stream() << "Cannot move chunk: the maximum number of documents for a chunk is "
- << maxRecsWhenFull << ", the maximum chunk size is "
- << _args.getMaxChunkSizeBytes() << ", average document size is "
- << avgRecSize << ". Found " << recCount << " documents in chunk "
- << " ns: " << _args.getNss().ns() << " " << _args.getMinKey() << " -> "
+ << maxRecsWhenFull
+ << ", the maximum chunk size is "
+ << _args.getMaxChunkSizeBytes()
+ << ", average document size is "
+ << avgRecSize
+ << ". Found "
+ << recCount
+ << " documents in chunk "
+ << " ns: "
+ << _args.getNss().ns()
+ << " "
+ << _args.getMinKey()
+ << " -> "
<< _args.getMaxKey()};
}
diff --git a/src/mongo/db/s/migration_chunk_cloner_source_legacy_commands.cpp b/src/mongo/db/s/migration_chunk_cloner_source_legacy_commands.cpp
index eb72772db1e..7f7850cb22c 100644
--- a/src/mongo/db/s/migration_chunk_cloner_source_legacy_commands.cpp
+++ b/src/mongo/db/s/migration_chunk_cloner_source_legacy_commands.cpp
@@ -38,9 +38,9 @@
#include "mongo/db/commands.h"
#include "mongo/db/db_raii.h"
#include "mongo/db/s/collection_sharding_state.h"
+#include "mongo/db/s/migration_chunk_cloner_source_legacy.h"
#include "mongo/db/s/migration_source_manager.h"
#include "mongo/db/s/sharding_state.h"
-#include "mongo/db/s/migration_chunk_cloner_source_legacy.h"
/**
* This file contains commands, which are specific to the legacy chunk cloner source.
diff --git a/src/mongo/db/s/migration_destination_manager.cpp b/src/mongo/db/s/migration_destination_manager.cpp
index 36592d6bd2e..e6990087018 100644
--- a/src/mongo/db/s/migration_destination_manager.cpp
+++ b/src/mongo/db/s/migration_destination_manager.cpp
@@ -50,13 +50,13 @@
#include "mongo/db/range_deleter_service.h"
#include "mongo/db/repl/repl_client_info.h"
#include "mongo/db/repl/replication_coordinator_global.h"
-#include "mongo/db/service_context.h"
-#include "mongo/db/storage/mmap_v1/dur.h"
#include "mongo/db/s/collection_metadata.h"
#include "mongo/db/s/collection_sharding_state.h"
#include "mongo/db/s/move_timing_helper.h"
#include "mongo/db/s/sharded_connection_info.h"
#include "mongo/db/s/sharding_state.h"
+#include "mongo/db/service_context.h"
+#include "mongo/db/storage/mmap_v1/dur.h"
#include "mongo/logger/ramlog.h"
#include "mongo/s/catalog/type_chunk.h"
#include "mongo/s/shard_key_pattern.h"
@@ -260,8 +260,14 @@ Status MigrationDestinationManager::start(const string& ns,
if (_sessionId) {
return Status(ErrorCodes::ConflictingOperationInProgress,
str::stream() << "Active migration already in progress "
- << "ns: " << _ns << ", from: " << _from << ", min: " << _min
- << ", max: " << _max);
+ << "ns: "
+ << _ns
+ << ", from: "
+ << _from
+ << ", min: "
+ << _min
+ << ", max: "
+ << _max);
}
_state = READY;
@@ -978,9 +984,16 @@ Status MigrationDestinationManager::_notePending(OperationContext* txn,
if (!metadata || metadata->getCollVersion().epoch() != epoch) {
return {ErrorCodes::StaleShardVersion,
str::stream() << "could not note chunk "
- << "[" << min << "," << max << ")"
- << " as pending because the epoch for " << nss.ns()
- << " has changed from " << epoch << " to "
+ << "["
+ << min
+ << ","
+ << max
+ << ")"
+ << " as pending because the epoch for "
+ << nss.ns()
+ << " has changed from "
+ << epoch
+ << " to "
<< (metadata ? metadata->getCollVersion().epoch()
: ChunkVersion::UNSHARDED().epoch())};
}
@@ -1023,10 +1036,18 @@ Status MigrationDestinationManager::_forgetPending(OperationContext* txn,
if (!metadata || metadata->getCollVersion().epoch() != epoch) {
return {ErrorCodes::StaleShardVersion,
str::stream() << "no need to forget pending chunk "
- << "[" << min << "," << max << ")"
- << " because the epoch for " << nss.ns() << " has changed from "
- << epoch << " to " << (metadata ? metadata->getCollVersion().epoch()
- : ChunkVersion::UNSHARDED().epoch())};
+ << "["
+ << min
+ << ","
+ << max
+ << ")"
+ << " because the epoch for "
+ << nss.ns()
+ << " has changed from "
+ << epoch
+ << " to "
+ << (metadata ? metadata->getCollVersion().epoch()
+ : ChunkVersion::UNSHARDED().epoch())};
}
ChunkType chunk;
diff --git a/src/mongo/db/s/migration_session_id_test.cpp b/src/mongo/db/s/migration_session_id_test.cpp
index 7cb443207de..02625995941 100644
--- a/src/mongo/db/s/migration_session_id_test.cpp
+++ b/src/mongo/db/s/migration_session_id_test.cpp
@@ -71,8 +71,9 @@ TEST(MigrationSessionId, Comparison) {
}
TEST(MigrationSessionId, ErrorWhenTypeIsNotString) {
- ASSERT_NOT_OK(MigrationSessionId::extractFromBSON(
- BSON("SomeField" << 1 << "sessionId" << Date_t::now())).getStatus());
+ ASSERT_NOT_OK(
+ MigrationSessionId::extractFromBSON(BSON("SomeField" << 1 << "sessionId" << Date_t::now()))
+ .getStatus());
ASSERT_NOT_OK(MigrationSessionId::extractFromBSON(BSON("SomeField" << 1 << "sessionId" << 2))
.getStatus());
}
diff --git a/src/mongo/db/s/migration_source_manager.cpp b/src/mongo/db/s/migration_source_manager.cpp
index 109e6cfe8f7..49b07775b28 100644
--- a/src/mongo/db/s/migration_source_manager.cpp
+++ b/src/mongo/db/s/migration_source_manager.cpp
@@ -35,9 +35,9 @@
#include "mongo/bson/bsonobjbuilder.h"
#include "mongo/db/db_raii.h"
#include "mongo/db/operation_context.h"
-#include "mongo/db/s/migration_chunk_cloner_source_legacy.h"
#include "mongo/db/s/collection_metadata.h"
#include "mongo/db/s/collection_sharding_state.h"
+#include "mongo/db/s/migration_chunk_cloner_source_legacy.h"
#include "mongo/db/s/operation_sharding_state.h"
#include "mongo/db/s/sharding_state.h"
#include "mongo/db/s/sharding_state_recovery.h"
@@ -91,8 +91,12 @@ MigrationSourceManager::MigrationSourceManager(OperationContext* txn, MoveChunkR
if (!refreshStatus.isOK()) {
uasserted(refreshStatus.code(),
str::stream() << "moveChunk cannot start migrate of chunk "
- << "[" << _args.getMinKey() << "," << _args.getMaxKey()
- << ") due to " << refreshStatus.toString());
+ << "["
+ << _args.getMinKey()
+ << ","
+ << _args.getMaxKey()
+ << ") due to "
+ << refreshStatus.toString());
}
if (shardVersion.majorVersion() == 0) {
@@ -100,20 +104,29 @@ MigrationSourceManager::MigrationSourceManager(OperationContext* txn, MoveChunkR
// the first place
uasserted(ErrorCodes::IncompatibleShardingMetadata,
str::stream() << "moveChunk cannot start migrate of chunk "
- << "[" << _args.getMinKey() << "," << _args.getMaxKey() << ")"
+ << "["
+ << _args.getMinKey()
+ << ","
+ << _args.getMaxKey()
+ << ")"
<< " with zero shard version");
}
if (expectedCollectionVersion.epoch() != shardVersion.epoch()) {
- throw SendStaleConfigException(
- _args.getNss().ns(),
- str::stream() << "moveChunk cannot move chunk "
- << "[" << _args.getMinKey() << "," << _args.getMaxKey() << "), "
- << "collection may have been dropped. "
- << "current epoch: " << shardVersion.epoch()
- << ", cmd epoch: " << expectedCollectionVersion.epoch(),
- expectedCollectionVersion,
- shardVersion);
+ throw SendStaleConfigException(_args.getNss().ns(),
+ str::stream() << "moveChunk cannot move chunk "
+ << "["
+ << _args.getMinKey()
+ << ","
+ << _args.getMaxKey()
+ << "), "
+ << "collection may have been dropped. "
+ << "current epoch: "
+ << shardVersion.epoch()
+ << ", cmd epoch: "
+ << expectedCollectionVersion.epoch(),
+ expectedCollectionVersion,
+ shardVersion);
}
// Snapshot the committed metadata from the time the migration starts
@@ -137,13 +150,17 @@ MigrationSourceManager::MigrationSourceManager(OperationContext* txn, MoveChunkR
origChunk.getMin().woCompare(_args.getMinKey()) ||
origChunk.getMax().woCompare(_args.getMaxKey())) {
// Our boundaries are different from those passed in
- throw SendStaleConfigException(
- _args.getNss().ns(),
- str::stream() << "moveChunk cannot find chunk "
- << "[" << _args.getMinKey() << "," << _args.getMaxKey() << ")"
- << " to migrate, the chunk boundaries may be stale",
- expectedCollectionVersion,
- shardVersion);
+ throw SendStaleConfigException(_args.getNss().ns(),
+ str::stream()
+ << "moveChunk cannot find chunk "
+ << "["
+ << _args.getMinKey()
+ << ","
+ << _args.getMaxKey()
+ << ")"
+ << " to migrate, the chunk boundaries may be stale",
+ expectedCollectionVersion,
+ shardVersion);
}
}
@@ -160,12 +177,14 @@ Status MigrationSourceManager::startClone(OperationContext* txn) {
invariant(_state == kCreated);
auto scopedGuard = MakeGuard([&] { cleanupOnError(txn); });
- grid.catalogManager(txn)
- ->logChange(txn,
- "moveChunk.start",
- _args.getNss().ns(),
- BSON("min" << _args.getMinKey() << "max" << _args.getMaxKey() << "from"
- << _args.getFromShardId() << "to" << _args.getToShardId()));
+ grid.catalogManager(txn)->logChange(
+ txn,
+ "moveChunk.start",
+ _args.getNss().ns(),
+ BSON("min" << _args.getMinKey() << "max" << _args.getMaxKey() << "from"
+ << _args.getFromShardId()
+ << "to"
+ << _args.getToShardId()));
_cloneDriver = stdx::make_unique<MigrationChunkClonerSourceLegacy>(
_args, _committedMetadata->getKeyPattern());
@@ -228,7 +247,8 @@ Status MigrationSourceManager::enterCriticalSection(OperationContext* txn) {
str::stream()
<< "Sharding metadata changed while holding distributed lock. Expected: "
<< _committedMetadata->getCollVersion().toString()
- << ", actual: " << css->getMetadata()->getCollVersion().toString()};
+ << ", actual: "
+ << css->getMetadata()->getCollVersion().toString()};
}
// IMPORTANT: After this line, the critical section is in place and needs to be rolled back
@@ -394,15 +414,20 @@ Status MigrationSourceManager::commitDonateChunk(OperationContext* txn) {
"moveChunk.validating",
_args.getNss().ns(),
BSON("min" << _args.getMinKey() << "max" << _args.getMaxKey() << "from"
- << _args.getFromShardId() << "to" << _args.getToShardId()));
+ << _args.getFromShardId()
+ << "to"
+ << _args.getToShardId()));
if (!status.isOK()) {
- fassertStatusOK(40137,
- {status.code(),
- str::stream()
- << "applyOps failed to commit chunk [" << _args.getMinKey() << ","
- << _args.getMaxKey() << ") due to " << causedBy(applyOpsStatus)
- << ", and updating the optime with a write before refreshing the "
- << "metadata also failed: " << causedBy(status)});
+ fassertStatusOK(
+ 40137,
+ {status.code(),
+ str::stream() << "applyOps failed to commit chunk [" << _args.getMinKey() << ","
+ << _args.getMaxKey()
+ << ") due to "
+ << causedBy(applyOpsStatus)
+ << ", and updating the optime with a write before refreshing the "
+ << "metadata also failed: "
+ << causedBy(status)});
}
ShardingState* const shardingState = ShardingState::get(txn);
@@ -412,7 +437,9 @@ Status MigrationSourceManager::commitDonateChunk(OperationContext* txn) {
fassertStatusOK(34431,
{refreshStatus.code(),
str::stream() << "applyOps failed to commit chunk [" << _args.getMinKey()
- << "," << _args.getMaxKey() << ") due to "
+ << ","
+ << _args.getMaxKey()
+ << ") due to "
<< causedBy(applyOpsStatus)
<< ", and refreshing collection metadata failed: "
<< causedBy(refreshStatus)});
@@ -455,12 +482,14 @@ Status MigrationSourceManager::commitDonateChunk(OperationContext* txn) {
scopedGuard.Dismiss();
_cleanup(txn);
- grid.catalogManager(txn)
- ->logChange(txn,
- "moveChunk.commit",
- _args.getNss().ns(),
- BSON("min" << _args.getMinKey() << "max" << _args.getMaxKey() << "from"
- << _args.getFromShardId() << "to" << _args.getToShardId()));
+ grid.catalogManager(txn)->logChange(
+ txn,
+ "moveChunk.commit",
+ _args.getNss().ns(),
+ BSON("min" << _args.getMinKey() << "max" << _args.getMaxKey() << "from"
+ << _args.getFromShardId()
+ << "to"
+ << _args.getToShardId()));
return Status::OK();
}
@@ -470,12 +499,14 @@ void MigrationSourceManager::cleanupOnError(OperationContext* txn) {
return;
}
- grid.catalogManager(txn)
- ->logChange(txn,
- "moveChunk.error",
- _args.getNss().ns(),
- BSON("min" << _args.getMinKey() << "max" << _args.getMaxKey() << "from"
- << _args.getFromShardId() << "to" << _args.getToShardId()));
+ grid.catalogManager(txn)->logChange(
+ txn,
+ "moveChunk.error",
+ _args.getNss().ns(),
+ BSON("min" << _args.getMinKey() << "max" << _args.getMaxKey() << "from"
+ << _args.getFromShardId()
+ << "to"
+ << _args.getToShardId()));
_cleanup(txn);
}
diff --git a/src/mongo/db/s/move_chunk_command.cpp b/src/mongo/db/s/move_chunk_command.cpp
index 42beb940283..decb11713ca 100644
--- a/src/mongo/db/s/move_chunk_command.cpp
+++ b/src/mongo/db/s/move_chunk_command.cpp
@@ -90,9 +90,10 @@ private:
*/
DistLockManager::ScopedDistLock _acquireDistLock(OperationContext* txn,
const MoveChunkRequest& args) {
- const std::string whyMessage(str::stream() << "migrating chunk [" << args.getMinKey()
- << ", " << args.getMaxKey() << ") in "
- << args.getNss().ns());
+ const std::string whyMessage(
+ str::stream() << "migrating chunk [" << args.getMinKey() << ", " << args.getMaxKey()
+ << ") in "
+ << args.getNss().ns());
auto distLockStatus =
grid.catalogManager(txn)->distLock(txn, args.getNss().ns(), whyMessage);
if (!distLockStatus.isOK()) {
diff --git a/src/mongo/db/s/move_timing_helper.cpp b/src/mongo/db/s/move_timing_helper.cpp
index 68a9c83c9f3..4ff91d6e658 100644
--- a/src/mongo/db/s/move_timing_helper.cpp
+++ b/src/mongo/db/s/move_timing_helper.cpp
@@ -82,8 +82,8 @@ MoveTimingHelper::~MoveTimingHelper() {
_b.append("errmsg", *_cmdErrmsg);
}
- grid.catalogManager(_txn)
- ->logChange(_txn, str::stream() << "moveChunk." << _where, _ns, _b.obj());
+ grid.catalogManager(_txn)->logChange(
+ _txn, str::stream() << "moveChunk." << _where, _ns, _b.obj());
} catch (const std::exception& e) {
warning() << "couldn't record timing for moveChunk '" << _where << "': " << e.what();
}
diff --git a/src/mongo/db/s/sharding_initialization_mongod.cpp b/src/mongo/db/s/sharding_initialization_mongod.cpp
index fc75b886961..08bbce81b8c 100644
--- a/src/mongo/db/s/sharding_initialization_mongod.cpp
+++ b/src/mongo/db/s/sharding_initialization_mongod.cpp
@@ -36,9 +36,9 @@
#include "mongo/client/remote_command_targeter.h"
#include "mongo/client/remote_command_targeter_factory_impl.h"
#include "mongo/s/balancer/balancer_configuration.h"
-#include "mongo/s/client/shard_remote.h"
-#include "mongo/s/client/shard_local.h"
#include "mongo/s/client/shard_factory.h"
+#include "mongo/s/client/shard_local.h"
+#include "mongo/s/client/shard_remote.h"
#include "mongo/s/sharding_egress_metadata_hook_for_mongod.h"
#include "mongo/s/sharding_initialization.h"
#include "mongo/stdx/memory.h"
@@ -61,10 +61,10 @@ Status initializeGlobalShardingStateForMongod(const ConnectionString& configCS)
shardId, connStr, targeterFactoryPtr->create(connStr));
};
- ShardFactory::BuilderCallable localBuilder =
- [](const ShardId& shardId, const ConnectionString& connStr) {
- return stdx::make_unique<ShardLocal>(shardId);
- };
+ ShardFactory::BuilderCallable localBuilder = [](const ShardId& shardId,
+ const ConnectionString& connStr) {
+ return stdx::make_unique<ShardLocal>(shardId);
+ };
ShardFactory::BuildersMap buildersMap{
{ConnectionString::SET, std::move(setBuilder)},
@@ -76,9 +76,8 @@ Status initializeGlobalShardingStateForMongod(const ConnectionString& configCS)
stdx::make_unique<ShardFactory>(std::move(buildersMap), std::move(targeterFactory));
return initializeGlobalShardingState(
- configCS,
- ChunkSizeSettingsType::kDefaultMaxChunkSizeBytes,
- std::move(shardFactory),
- []() { return stdx::make_unique<rpc::ShardingEgressMetadataHookForMongod>(); });
+ configCS, ChunkSizeSettingsType::kDefaultMaxChunkSizeBytes, std::move(shardFactory), []() {
+ return stdx::make_unique<rpc::ShardingEgressMetadataHookForMongod>();
+ });
}
}
diff --git a/src/mongo/db/s/sharding_state.cpp b/src/mongo/db/s/sharding_state.cpp
index fbb6f0f2e62..1a66f122329 100644
--- a/src/mongo/db/s/sharding_state.cpp
+++ b/src/mongo/db/s/sharding_state.cpp
@@ -54,18 +54,18 @@
#include "mongo/rpc/metadata/config_server_metadata.h"
#include "mongo/s/catalog/catalog_manager.h"
#include "mongo/s/catalog/type_chunk.h"
-#include "mongo/s/client/shard_registry.h"
#include "mongo/s/chunk_version.h"
+#include "mongo/s/client/shard_registry.h"
#include "mongo/s/config.h"
#include "mongo/s/grid.h"
#include "mongo/s/sharding_initialization.h"
#include "mongo/util/log.h"
#include "mongo/util/mongoutils/str.h"
-#include <iostream>
-#include <iomanip>
-#include <ctime>
#include <chrono>
+#include <ctime>
+#include <iomanip>
+#include <iostream>
namespace mongo {
@@ -462,7 +462,8 @@ Status ShardingState::initializeFromShardIdentity(const ShardIdentityType& shard
if (_shardName != shardIdentity.getShardName()) {
return {ErrorCodes::InconsistentShardIdentity,
str::stream() << "shard name previously set as " << _shardName
- << " is different from stored: " << shardIdentity.getShardName()};
+ << " is different from stored: "
+ << shardIdentity.getShardName()};
}
auto prevConfigsvrConnStr = grid.shardRegistry()->getConfigServerConnectionString();
@@ -477,7 +478,8 @@ Status ShardingState::initializeFromShardIdentity(const ShardIdentityType& shard
return {ErrorCodes::InconsistentShardIdentity,
str::stream() << "config server connection string previously set as "
<< prevConfigsvrConnStr.toString()
- << " is different from stored: " << configSvrConnStr.toString()};
+ << " is different from stored: "
+ << configSvrConnStr.toString()};
}
// clusterId will only be unset if sharding state was initialized via the sharding
@@ -487,7 +489,8 @@ Status ShardingState::initializeFromShardIdentity(const ShardIdentityType& shard
} else if (_clusterId != shardIdentity.getClusterId()) {
return {ErrorCodes::InconsistentShardIdentity,
str::stream() << "cluster id previously set as " << _clusterId
- << " is different from stored: " << shardIdentity.getClusterId()};
+ << " is different from stored: "
+ << shardIdentity.getClusterId()};
}
return Status::OK();
diff --git a/src/mongo/db/s/sharding_state_recovery.cpp b/src/mongo/db/s/sharding_state_recovery.cpp
index 1574f611403..d076b995f53 100644
--- a/src/mongo/db/s/sharding_state_recovery.cpp
+++ b/src/mongo/db/s/sharding_state_recovery.cpp
@@ -40,9 +40,9 @@
#include "mongo/db/dbhelpers.h"
#include "mongo/db/namespace_string.h"
#include "mongo/db/operation_context.h"
+#include "mongo/db/ops/update.h"
#include "mongo/db/ops/update_lifecycle_impl.h"
#include "mongo/db/ops/update_request.h"
-#include "mongo/db/ops/update.h"
#include "mongo/db/repl/bson_extract_optime.h"
#include "mongo/db/repl/optime.h"
#include "mongo/db/repl/repl_client_info.h"
diff --git a/src/mongo/db/s/sharding_state_test.cpp b/src/mongo/db/s/sharding_state_test.cpp
index 5c710d352da..94b26b1f0b7 100644
--- a/src/mongo/db/s/sharding_state_test.cpp
+++ b/src/mongo/db/s/sharding_state_test.cpp
@@ -29,26 +29,26 @@
#include "mongo/platform/basic.h"
#include "mongo/base/status_with.h"
+#include "mongo/client/remote_command_targeter.h"
#include "mongo/client/remote_command_targeter_factory_mock.h"
#include "mongo/client/remote_command_targeter_mock.h"
-#include "mongo/client/remote_command_targeter.h"
#include "mongo/client/replica_set_monitor.h"
-#include "mongo/db/service_context_noop.h"
-#include "mongo/executor/network_interface_mock.h"
-#include "mongo/executor/thread_pool_task_executor_test_fixture.h"
#include "mongo/db/jsobj.h"
#include "mongo/db/namespace_string.h"
#include "mongo/db/operation_context_noop.h"
#include "mongo/db/s/sharding_state.h"
#include "mongo/db/s/type_shard_identity.h"
#include "mongo/db/service_context_noop.h"
+#include "mongo/db/service_context_noop.h"
+#include "mongo/executor/network_interface_mock.h"
#include "mongo/executor/task_executor_pool.h"
+#include "mongo/executor/thread_pool_task_executor_test_fixture.h"
#include "mongo/s/balancer/balancer_configuration.h"
#include "mongo/s/catalog/catalog_cache.h"
#include "mongo/s/catalog/catalog_manager_mock.h"
#include "mongo/s/client/shard_factory.h"
-#include "mongo/s/client/shard_remote.h"
#include "mongo/s/client/shard_registry.h"
+#include "mongo/s/client/shard_remote.h"
#include "mongo/s/grid.h"
#include "mongo/s/query/cluster_cursor_manager.h"
#include "mongo/unittest/unittest.h"
diff --git a/src/mongo/db/s/split_chunk_command.cpp b/src/mongo/db/s/split_chunk_command.cpp
index 7e3473299f3..58018441072 100644
--- a/src/mongo/db/s/split_chunk_command.cpp
+++ b/src/mongo/db/s/split_chunk_command.cpp
@@ -220,7 +220,8 @@ public:
//
const string whyMessage(str::stream() << "splitting chunk [" << min << ", " << max
- << ") in " << nss.toString());
+ << ") in "
+ << nss.toString());
auto scopedDistLock = grid.catalogManager(txn)->distLock(
txn, nss.ns(), whyMessage, DistLockManager::kSingleLockAttemptTimeout);
if (!scopedDistLock.isOK()) {
@@ -453,8 +454,8 @@ public:
chunkDetail.append("of", newChunksSize);
appendShortVersion(chunkDetail.subobjStart("chunk"), *newChunks[i]);
- grid.catalogManager(txn)
- ->logChange(txn, "multi-split", nss.ns(), chunkDetail.obj());
+ grid.catalogManager(txn)->logChange(
+ txn, "multi-split", nss.ns(), chunkDetail.obj());
}
}
diff --git a/src/mongo/db/s/start_chunk_clone_request_test.cpp b/src/mongo/db/s/start_chunk_clone_request_test.cpp
index 5071ff61edc..2b977178781 100644
--- a/src/mongo/db/s/start_chunk_clone_request_test.cpp
+++ b/src/mongo/db/s/start_chunk_clone_request_test.cpp
@@ -65,9 +65,10 @@ TEST(StartChunkCloneRequest, CreateAsCommandComplete) {
ASSERT_EQ(sessionId.toString(), request.getSessionId().toString());
ASSERT(sessionId.matches(request.getSessionId()));
ASSERT_EQ("TestConfigRS/CS1:12345,CS2:12345,CS3:12345", request.getConfigServerCS().toString());
- ASSERT_EQ(assertGet(ConnectionString::parse(
- "TestDonorRS/Donor1:12345,Donor2:12345,Donor3:12345")).toString(),
- request.getFromShardConnectionString().toString());
+ ASSERT_EQ(
+ assertGet(ConnectionString::parse("TestDonorRS/Donor1:12345,Donor2:12345,Donor3:12345"))
+ .toString(),
+ request.getFromShardConnectionString().toString());
ASSERT_EQ("shard0002", request.getToShardId());
ASSERT_EQ(BSON("Key" << -100), request.getMinKey());
ASSERT_EQ(BSON("Key" << 100), request.getMaxKey());
diff --git a/src/mongo/db/s/type_shard_identity_test.cpp b/src/mongo/db/s/type_shard_identity_test.cpp
index 8a2382e4bf7..960faff68ba 100644
--- a/src/mongo/db/s/type_shard_identity_test.cpp
+++ b/src/mongo/db/s/type_shard_identity_test.cpp
@@ -47,7 +47,8 @@ TEST(ShardIdentityType, RoundTrip) {
<< "test/a:123"
<< "shardName"
<< "s1"
- << "clusterId" << clusterId);
+ << "clusterId"
+ << clusterId);
auto result = ShardIdentityType::fromBSON(doc);
ASSERT_OK(result.getStatus());
@@ -68,7 +69,8 @@ TEST(ShardIdentityType, ParseMissingId) {
<< "test/a:123"
<< "shardName"
<< "s1"
- << "clusterId" << OID::gen());
+ << "clusterId"
+ << OID::gen());
auto result = ShardIdentityType::fromBSON(doc);
ASSERT_NOT_OK(result.getStatus());
@@ -79,7 +81,8 @@ TEST(ShardIdentityType, ParseMissingConfigsvrConnString) {
<< "shardIdentity"
<< "shardName"
<< "s1"
- << "clusterId" << OID::gen());
+ << "clusterId"
+ << OID::gen());
auto result = ShardIdentityType::fromBSON(doc);
ASSERT_NOT_OK(result.getStatus());
@@ -90,7 +93,8 @@ TEST(ShardIdentityType, ParseMissingShardName) {
<< "shardIdentity"
<< "configsvrConnectionString"
<< "test/a:123"
- << "clusterId" << OID::gen());
+ << "clusterId"
+ << OID::gen());
auto result = ShardIdentityType::fromBSON(doc);
ASSERT_NOT_OK(result.getStatus());
@@ -116,7 +120,8 @@ TEST(ShardIdentityType, InvalidConnectionString) {
<< "test/,,,"
<< "shardName"
<< "s1"
- << "clusterId" << clusterId);
+ << "clusterId"
+ << clusterId);
ASSERT_EQ(ErrorCodes::FailedToParse, ShardIdentityType::fromBSON(doc).getStatus());
}
@@ -129,7 +134,8 @@ TEST(ShardIdentityType, NonReplSetConnectionString) {
<< "local:123"
<< "shardName"
<< "s1"
- << "clusterId" << clusterId);
+ << "clusterId"
+ << clusterId);
ASSERT_EQ(ErrorCodes::UnsupportedFormat, ShardIdentityType::fromBSON(doc).getStatus());
}
diff --git a/src/mongo/db/server_options_helpers.cpp b/src/mongo/db/server_options_helpers.cpp
index 9ab8e97c86c..c1729324606 100644
--- a/src/mongo/db/server_options_helpers.cpp
+++ b/src/mongo/db/server_options_helpers.cpp
@@ -36,10 +36,10 @@
#define SYSLOG_NAMES
#include <syslog.h>
#endif
-#include <ios>
-#include <iostream>
#include <boost/filesystem.hpp>
#include <boost/filesystem/operations.hpp>
+#include <ios>
+#include <iostream>
#include "mongo/base/status.h"
#include "mongo/bson/util/builder.h"
@@ -82,26 +82,13 @@ typedef struct _code {
int c_val;
} CODE;
-CODE facilitynames[] = {{"auth", LOG_AUTH},
- {"cron", LOG_CRON},
- {"daemon", LOG_DAEMON},
- {"kern", LOG_KERN},
- {"lpr", LOG_LPR},
- {"mail", LOG_MAIL},
- {"news", LOG_NEWS},
- {"security", LOG_AUTH}, /* DEPRECATED */
- {"syslog", LOG_SYSLOG},
- {"user", LOG_USER},
- {"uucp", LOG_UUCP},
- {"local0", LOG_LOCAL0},
- {"local1", LOG_LOCAL1},
- {"local2", LOG_LOCAL2},
- {"local3", LOG_LOCAL3},
- {"local4", LOG_LOCAL4},
- {"local5", LOG_LOCAL5},
- {"local6", LOG_LOCAL6},
- {"local7", LOG_LOCAL7},
- {NULL, -1}};
+CODE facilitynames[] = {{"auth", LOG_AUTH}, {"cron", LOG_CRON}, {"daemon", LOG_DAEMON},
+ {"kern", LOG_KERN}, {"lpr", LOG_LPR}, {"mail", LOG_MAIL},
+ {"news", LOG_NEWS}, {"security", LOG_AUTH}, /* DEPRECATED */
+ {"syslog", LOG_SYSLOG}, {"user", LOG_USER}, {"uucp", LOG_UUCP},
+ {"local0", LOG_LOCAL0}, {"local1", LOG_LOCAL1}, {"local2", LOG_LOCAL2},
+ {"local3", LOG_LOCAL3}, {"local4", LOG_LOCAL4}, {"local5", LOG_LOCAL5},
+ {"local6", LOG_LOCAL6}, {"local7", LOG_LOCAL7}, {NULL, -1}};
#endif // !defined(INTERNAL_NOPRI)
#endif // defined(SYSLOG_NAMES)
@@ -127,10 +114,9 @@ Status addGeneralServerOptions(moe::OptionSection* options) {
options->addOptionChaining("version", "version", moe::Switch, "show version information")
.setSources(moe::SourceAllLegacy);
- options->addOptionChaining("config",
- "config,f",
- moe::String,
- "configuration file specifying additional options")
+ options
+ ->addOptionChaining(
+ "config", "config,f", moe::String, "configuration file specifying additional options")
.setSources(moe::SourceAllLegacy);
// The verbosity level can be set at startup in the following ways. Note that if multiple
@@ -166,11 +152,12 @@ Status addGeneralServerOptions(moe::OptionSection* options) {
// component: |
// Sharding: |
// verbosity: 5 | 5 (for Sharding only, 0 for default)
- options->addOptionChaining(
- "verbose",
- "verbose,v",
- moe::String,
- "be more verbose (include multiple times for more verbosity e.g. -vvvvv)")
+ options
+ ->addOptionChaining(
+ "verbose",
+ "verbose,v",
+ moe::String,
+ "be more verbose (include multiple times for more verbosity e.g. -vvvvv)")
.setImplicit(moe::Value(std::string("v")))
.setSources(moe::SourceAllLegacy);
@@ -183,11 +170,11 @@ Status addGeneralServerOptions(moe::OptionSection* options) {
if (component == logger::LogComponent::kDefault) {
continue;
}
- options->addOptionChaining("systemLog.component." + component.getDottedName() +
- ".verbosity",
- "",
- moe::Int,
- "set component verbose level for " + component.getDottedName())
+ options
+ ->addOptionChaining("systemLog.component." + component.getDottedName() + ".verbosity",
+ "",
+ moe::Int,
+ "set component verbose level for " + component.getDottedName())
.setSources(moe::SourceYAMLConfig);
}
@@ -207,36 +194,39 @@ Status addGeneralServerOptions(moe::OptionSection* options) {
options->addOptionChaining(
"net.maxIncomingConnections", "maxConns", moe::Int, maxConnInfoBuilder.str().c_str());
- options->addOptionChaining(
- "logpath",
- "logpath",
- moe::String,
- "log file to send write to instead of stdout - has to be a file, not directory")
+ options
+ ->addOptionChaining(
+ "logpath",
+ "logpath",
+ moe::String,
+ "log file to send write to instead of stdout - has to be a file, not directory")
.setSources(moe::SourceAllLegacy)
.incompatibleWith("syslog");
options
->addOptionChaining(
- "systemLog.path",
- "",
- moe::String,
- "log file to send writes to if logging to a file - has to be a file, not directory")
+ "systemLog.path",
+ "",
+ moe::String,
+ "log file to send writes to if logging to a file - has to be a file, not directory")
.setSources(moe::SourceYAMLConfig)
.hidden();
- options->addOptionChaining("systemLog.destination",
- "",
- moe::String,
- "Destination of system log output. (syslog/file)")
+ options
+ ->addOptionChaining("systemLog.destination",
+ "",
+ moe::String,
+ "Destination of system log output. (syslog/file)")
.setSources(moe::SourceYAMLConfig)
.hidden()
.format("(:?syslog)|(:?file)", "(syslog/file)");
#ifndef _WIN32
- options->addOptionChaining("syslog",
- "syslog",
- moe::Switch,
- "log to system's syslog facility instead of file or stdout")
+ options
+ ->addOptionChaining("syslog",
+ "syslog",
+ moe::Switch,
+ "log to system's syslog facility instead of file or stdout")
.incompatibleWith("logpath")
.setSources(moe::SourceAllLegacy);
@@ -267,10 +257,10 @@ Status addGeneralServerOptions(moe::OptionSection* options) {
moe::String,
"full path to pidfile (if not set, no pidfile is created)");
- options->addOptionChaining("security.keyFile",
- "keyFile",
- moe::String,
- "private key for cluster authentication").incompatibleWith("noauth");
+ options
+ ->addOptionChaining(
+ "security.keyFile", "keyFile", moe::String, "private key for cluster authentication")
+ .incompatibleWith("noauth");
options->addOptionChaining("noauth", "noauth", moe::Switch, "run without security")
.setSources(moe::SourceAllLegacy)
@@ -279,46 +269,52 @@ Status addGeneralServerOptions(moe::OptionSection* options) {
.incompatibleWith("transitionToAuth")
.incompatibleWith("clusterAuthMode");
- options->addOptionChaining(
- "setParameter", "setParameter", moe::StringMap, "Set a configurable parameter")
+ options
+ ->addOptionChaining(
+ "setParameter", "setParameter", moe::StringMap, "Set a configurable parameter")
.composing();
- options->addOptionChaining(
- "httpinterface", "httpinterface", moe::Switch, "enable http interface")
+ options
+ ->addOptionChaining("httpinterface", "httpinterface", moe::Switch, "enable http interface")
.setSources(moe::SourceAllLegacy)
.incompatibleWith("nohttpinterface");
options->addOptionChaining("net.http.enabled", "", moe::Bool, "enable http interface")
.setSources(moe::SourceYAMLConfig);
- options->addOptionChaining(
- "net.http.port", "", moe::Switch, "port to listen on for http interface")
+ options
+ ->addOptionChaining(
+ "net.http.port", "", moe::Switch, "port to listen on for http interface")
.setSources(moe::SourceYAMLConfig);
- options->addOptionChaining(
- "security.transitionToAuth",
- "transitionToAuth",
- moe::Switch,
- "For rolling access control upgrade. Attempt to authenticate over outgoing "
- "connections and proceed regardless of success. Accept incoming connections "
- "with or without authentication.").incompatibleWith("noauth");
+ options
+ ->addOptionChaining(
+ "security.transitionToAuth",
+ "transitionToAuth",
+ moe::Switch,
+ "For rolling access control upgrade. Attempt to authenticate over outgoing "
+ "connections and proceed regardless of success. Accept incoming connections "
+ "with or without authentication.")
+ .incompatibleWith("noauth");
- options->addOptionChaining(
- "security.clusterAuthMode",
- "clusterAuthMode",
- moe::String,
- "Authentication mode used for cluster authentication. Alternatives are "
- "(keyFile|sendKeyFile|sendX509|x509)")
+ options
+ ->addOptionChaining("security.clusterAuthMode",
+ "clusterAuthMode",
+ moe::String,
+ "Authentication mode used for cluster authentication. Alternatives are "
+ "(keyFile|sendKeyFile|sendX509|x509)")
.format("(:?keyFile)|(:?sendKeyFile)|(:?sendX509)|(:?x509)",
"(keyFile/sendKeyFile/sendX509/x509)");
#ifndef _WIN32
- options->addOptionChaining(
- "nounixsocket", "nounixsocket", moe::Switch, "disable listening on unix sockets")
+ options
+ ->addOptionChaining(
+ "nounixsocket", "nounixsocket", moe::Switch, "disable listening on unix sockets")
.setSources(moe::SourceAllLegacy);
- options->addOptionChaining(
- "net.unixDomainSocket.enabled", "", moe::Bool, "disable listening on unix sockets")
+ options
+ ->addOptionChaining(
+ "net.unixDomainSocket.enabled", "", moe::Bool, "disable listening on unix sockets")
.setSources(moe::SourceYAMLConfig);
options->addOptionChaining("net.unixDomainSocket.pathPrefix",
@@ -344,45 +340,52 @@ Status addGeneralServerOptions(moe::OptionSection* options) {
}
// Extra hidden options
- options->addOptionChaining(
- "nohttpinterface", "nohttpinterface", moe::Switch, "disable http interface")
+ options
+ ->addOptionChaining(
+ "nohttpinterface", "nohttpinterface", moe::Switch, "disable http interface")
.hidden()
.setSources(moe::SourceAllLegacy)
.incompatibleWith("httpinterface");
- options->addOptionChaining("objcheck",
- "objcheck",
- moe::Switch,
- "inspect client data for validity on receipt (DEFAULT)")
+ options
+ ->addOptionChaining("objcheck",
+ "objcheck",
+ moe::Switch,
+ "inspect client data for validity on receipt (DEFAULT)")
.hidden()
.setSources(moe::SourceAllLegacy)
.incompatibleWith("noobjcheck");
- options->addOptionChaining("noobjcheck",
- "noobjcheck",
- moe::Switch,
- "do NOT inspect client data for validity on receipt")
+ options
+ ->addOptionChaining("noobjcheck",
+ "noobjcheck",
+ moe::Switch,
+ "do NOT inspect client data for validity on receipt")
.hidden()
.setSources(moe::SourceAllLegacy)
.incompatibleWith("objcheck");
- options->addOptionChaining("net.wireObjectCheck",
- "",
- moe::Bool,
- "inspect client data for validity on receipt (DEFAULT)")
+ options
+ ->addOptionChaining("net.wireObjectCheck",
+ "",
+ moe::Bool,
+ "inspect client data for validity on receipt (DEFAULT)")
.hidden()
.setSources(moe::SourceYAMLConfig);
- options->addOptionChaining("systemLog.traceAllExceptions",
- "traceExceptions",
- moe::Switch,
- "log stack traces for every exception").hidden();
+ options
+ ->addOptionChaining("systemLog.traceAllExceptions",
+ "traceExceptions",
+ moe::Switch,
+ "log stack traces for every exception")
+ .hidden();
- options->addOptionChaining("enableExperimentalStorageDetailsCmd",
- "enableExperimentalStorageDetailsCmd",
- moe::Switch,
- "EXPERIMENTAL (UNSUPPORTED). "
- "Enable command computing aggregate statistics on storage.")
+ options
+ ->addOptionChaining("enableExperimentalStorageDetailsCmd",
+ "enableExperimentalStorageDetailsCmd",
+ moe::Switch,
+ "EXPERIMENTAL (UNSUPPORTED). "
+ "Enable command computing aggregate statistics on storage.")
.hidden()
.setSources(moe::SourceAllLegacy);
@@ -396,11 +399,12 @@ Status addWindowsServerOptions(moe::OptionSection* options) {
options->addOptionChaining("remove", "remove", moe::Switch, "remove Windows service")
.setSources(moe::SourceAllLegacy);
- options->addOptionChaining(
- "reinstall",
- "reinstall",
- moe::Switch,
- "reinstall Windows service (equivalent to --remove followed by --install)")
+ options
+ ->addOptionChaining(
+ "reinstall",
+ "reinstall",
+ moe::Switch,
+ "reinstall Windows service (equivalent to --remove followed by --install)")
.setSources(moe::SourceAllLegacy);
options->addOptionChaining("processManagement.windowsService.serviceName",
diff --git a/src/mongo/db/server_parameters.h b/src/mongo/db/server_parameters.h
index 9415d3470ab..9ce8d8b76be 100644
--- a/src/mongo/db/server_parameters.h
+++ b/src/mongo/db/server_parameters.h
@@ -30,8 +30,8 @@
#pragma once
-#include <string>
#include <map>
+#include <string>
#include "mongo/base/status.h"
#include "mongo/db/jsobj.h"
diff --git a/src/mongo/db/service_context_d.cpp b/src/mongo/db/service_context_d.cpp
index 03d2f8ff7f1..1f16d4c0090 100644
--- a/src/mongo/db/service_context_d.cpp
+++ b/src/mongo/db/service_context_d.cpp
@@ -85,7 +85,9 @@ void ServiceContextMongoD::createLockFile() {
} catch (const std::exception& ex) {
uassert(28596,
str::stream() << "Unable to determine status of lock file in the data directory "
- << storageGlobalParams.dbpath << ": " << ex.what(),
+ << storageGlobalParams.dbpath
+ << ": "
+ << ex.what(),
false);
}
bool wasUnclean = _lockFile->createdByUncleanShutdown();
@@ -127,12 +129,14 @@ void ServiceContextMongoD::initializeGlobalStorageEngine() {
if (factory) {
uassert(28662,
- str::stream()
- << "Cannot start server. Detected data files in " << dbpath
- << " created by"
- << " the '" << *existingStorageEngine << "' storage engine, but the"
- << " specified storage engine was '" << factory->getCanonicalName()
- << "'.",
+ str::stream() << "Cannot start server. Detected data files in " << dbpath
+ << " created by"
+ << " the '"
+ << *existingStorageEngine
+ << "' storage engine, but the"
+ << " specified storage engine was '"
+ << factory->getCanonicalName()
+ << "'.",
factory->getCanonicalName() == *existingStorageEngine);
}
} else {
@@ -164,7 +168,8 @@ void ServiceContextMongoD::initializeGlobalStorageEngine() {
uassert(34368,
str::stream()
<< "Server was started in read-only mode, but the configured storage engine, "
- << storageGlobalParams.engine << ", does not support read-only operation",
+ << storageGlobalParams.engine
+ << ", does not support read-only operation",
factory->supportsReadOnly());
}
diff --git a/src/mongo/db/service_context_noop.cpp b/src/mongo/db/service_context_noop.cpp
index 2703edea8ff..4e9b67fc28f 100644
--- a/src/mongo/db/service_context_noop.cpp
+++ b/src/mongo/db/service_context_noop.cpp
@@ -30,8 +30,8 @@
#include "mongo/db/service_context_noop.h"
-#include "mongo/db/operation_context_noop.h"
#include "mongo/db/op_observer.h"
+#include "mongo/db/operation_context_noop.h"
#include "mongo/stdx/memory.h"
namespace mongo {
diff --git a/src/mongo/db/sorter/sorter.cpp b/src/mongo/db/sorter/sorter.cpp
index fda8ee146ed..7f6e8866562 100644
--- a/src/mongo/db/sorter/sorter.cpp
+++ b/src/mongo/db/sorter/sorter.cpp
@@ -146,8 +146,7 @@ public:
/// Any number of values
template <typename Container>
- InMemIterator(const Container& input)
- : _data(input.begin(), input.end()) {}
+ InMemIterator(const Container& input) : _data(input.begin(), input.end()) {}
bool more() {
return !_data.empty();
@@ -167,7 +166,8 @@ template <typename Key, typename Value>
class FileIterator : public SortIteratorInterface<Key, Value> {
public:
typedef std::pair<typename Key::SorterDeserializeSettings,
- typename Value::SorterDeserializeSettings> Settings;
+ typename Value::SorterDeserializeSettings>
+ Settings;
typedef std::pair<Key, Value> Data;
FileIterator(const std::string& fileName,
@@ -179,8 +179,8 @@ public:
_fileDeleter(fileDeleter),
_file(_fileName.c_str(), std::ios::in | std::ios::binary) {
massert(16814,
- str::stream() << "error opening file \"" << _fileName
- << "\": " << myErrnoWithDescription(),
+ str::stream() << "error opening file \"" << _fileName << "\": "
+ << myErrnoWithDescription(),
_file.good());
massert(16815,
@@ -274,8 +274,8 @@ private:
}
msgasserted(16817,
- str::stream() << "error reading file \"" << _fileName
- << "\": " << myErrnoWithDescription());
+ str::stream() << "error reading file \"" << _fileName << "\": "
+ << myErrnoWithDescription());
}
verify(_file.gcount() == static_cast<std::streamsize>(size));
}
@@ -419,7 +419,8 @@ public:
typedef std::pair<Key, Value> Data;
typedef SortIteratorInterface<Key, Value> Iterator;
typedef std::pair<typename Key::SorterDeserializeSettings,
- typename Value::SorterDeserializeSettings> Settings;
+ typename Value::SorterDeserializeSettings>
+ Settings;
NoLimitSorter(const SortOptions& opts,
const Comparator& comp,
@@ -489,7 +490,8 @@ private:
// need to be revisited.
uasserted(16819,
str::stream()
- << "Sort exceeded memory limit of " << _opts.maxMemoryUsageBytes
+ << "Sort exceeded memory limit of "
+ << _opts.maxMemoryUsageBytes
<< " bytes, but did not opt in to external sorting. Aborting operation."
<< " Pass allowDiskUse:true to opt in.");
}
@@ -569,7 +571,8 @@ public:
typedef std::pair<Key, Value> Data;
typedef SortIteratorInterface<Key, Value> Iterator;
typedef std::pair<typename Key::SorterDeserializeSettings,
- typename Value::SorterDeserializeSettings> Settings;
+ typename Value::SorterDeserializeSettings>
+ Settings;
TopKSorter(const SortOptions& opts,
const Comparator& comp,
@@ -765,7 +768,8 @@ private:
// need to be revisited.
uasserted(16820,
str::stream()
- << "Sort exceeded memory limit of " << _opts.maxMemoryUsageBytes
+ << "Sort exceeded memory limit of "
+ << _opts.maxMemoryUsageBytes
<< " bytes, but did not opt in to external sorting. Aborting operation."
<< " Pass allowDiskUse:true to opt in.");
}
@@ -840,8 +844,8 @@ SortedFileWriter<Key, Value>::SortedFileWriter(const SortOptions& opts, const Se
_file.open(_fileName.c_str(), std::ios::binary | std::ios::out);
massert(16818,
- str::stream() << "error opening file \"" << _fileName
- << "\": " << sorter::myErrnoWithDescription(),
+ str::stream() << "error opening file \"" << _fileName << "\": "
+ << sorter::myErrnoWithDescription(),
_file.good());
_fileDeleter = std::make_shared<sorter::FileDeleter>(_fileName);
@@ -905,8 +909,8 @@ void SortedFileWriter<Key, Value>::spill() {
} catch (const std::exception&) {
msgasserted(16821,
- str::stream() << "error writing to file \"" << _fileName
- << "\": " << sorter::myErrnoWithDescription());
+ str::stream() << "error writing to file \"" << _fileName << "\": "
+ << sorter::myErrnoWithDescription());
}
_buffer.reset();
diff --git a/src/mongo/db/sorter/sorter.h b/src/mongo/db/sorter/sorter.h
index ba6f3e3192f..54f19dd0197 100644
--- a/src/mongo/db/sorter/sorter.h
+++ b/src/mongo/db/sorter/sorter.h
@@ -159,7 +159,8 @@ public:
typedef std::pair<Key, Value> Data;
typedef SortIteratorInterface<Key, Value> Iterator;
typedef std::pair<typename Key::SorterDeserializeSettings,
- typename Value::SorterDeserializeSettings> Settings;
+ typename Value::SorterDeserializeSettings>
+ Settings;
template <typename Comparator>
static Sorter* make(const SortOptions& opts,
@@ -187,7 +188,8 @@ class SortedFileWriter {
public:
typedef SortIteratorInterface<Key, Value> Iterator;
typedef std::pair<typename Key::SorterDeserializeSettings,
- typename Value::SorterDeserializeSettings> Settings;
+ typename Value::SorterDeserializeSettings>
+ Settings;
explicit SortedFileWriter(const SortOptions& opts, const Settings& settings = Settings());
diff --git a/src/mongo/db/sorter/sorter_test.cpp b/src/mongo/db/sorter/sorter_test.cpp
index c0a90a5fc1d..37bf118fe98 100644
--- a/src/mongo/db/sorter/sorter_test.cpp
+++ b/src/mongo/db/sorter/sorter_test.cpp
@@ -32,9 +32,9 @@
#include <boost/filesystem.hpp>
-#include "mongo/config.h"
#include "mongo/base/data_type_endian.h"
#include "mongo/base/init.h"
+#include "mongo/config.h"
#include "mongo/db/service_context.h"
#include "mongo/db/service_context_noop.h"
#include "mongo/stdx/memory.h"
@@ -192,7 +192,7 @@ void _assertIteratorsEquivalent(It1 it1, It2 it2, int line) {
#define ASSERT_ITERATORS_EQUIVALENT(it1, it2) _assertIteratorsEquivalent(it1, it2, __LINE__)
template <int N>
-std::shared_ptr<IWIterator> makeInMemIterator(const int(&array)[N]) {
+std::shared_ptr<IWIterator> makeInMemIterator(const int (&array)[N]) {
std::vector<IWPair> vec;
for (int i = 0; i < N; i++)
vec.push_back(IWPair(array[i], -array[i]));
@@ -200,7 +200,7 @@ std::shared_ptr<IWIterator> makeInMemIterator(const int(&array)[N]) {
}
template <typename IteratorPtr, int N>
-std::shared_ptr<IWIterator> mergeIterators(IteratorPtr(&array)[N],
+std::shared_ptr<IWIterator> mergeIterators(IteratorPtr (&array)[N],
Direction Dir = ASC,
const SortOptions& opts = SortOptions()) {
std::vector<std::shared_ptr<IWIterator>> vec;
diff --git a/src/mongo/db/startup_warnings_common.cpp b/src/mongo/db/startup_warnings_common.cpp
index f5e3706357d..d1e69ead044 100644
--- a/src/mongo/db/startup_warnings_common.cpp
+++ b/src/mongo/db/startup_warnings_common.cpp
@@ -79,7 +79,8 @@ void logCommonStartupWarnings(const ServerGlobalParams& serverParams) {
log() << "** WARNING: Access control is not enabled for the database."
<< startupWarningsLog;
log() << "** Read and write access to data and configuration is "
- "unrestricted." << startupWarningsLog;
+ "unrestricted."
+ << startupWarningsLog;
warned = true;
}
diff --git a/src/mongo/db/startup_warnings_mongod.cpp b/src/mongo/db/startup_warnings_mongod.cpp
index f40e3914b4e..30c8b261fdc 100644
--- a/src/mongo/db/startup_warnings_mongod.cpp
+++ b/src/mongo/db/startup_warnings_mongod.cpp
@@ -41,8 +41,8 @@
#include "mongo/db/server_options.h"
#include "mongo/db/startup_warnings_common.h"
#include "mongo/db/storage/storage_options.h"
-#include "mongo/util/mongoutils/str.h"
#include "mongo/util/log.h"
+#include "mongo/util/mongoutils/str.h"
#include "mongo/util/processinfo.h"
#include "mongo/util/version.h"
@@ -109,9 +109,9 @@ StatusWith<std::string> StartupWarningsMongod::readTransparentHugePagesParameter
opMode = line.substr(posBegin + 1, posEnd - posBegin - 1);
if (opMode.empty()) {
- return StatusWith<std::string>(ErrorCodes::BadValue,
- str::stream() << "invalid mode in " << filename << ": '"
- << line << "'");
+ return StatusWith<std::string>(
+ ErrorCodes::BadValue,
+ str::stream() << "invalid mode in " << filename << ": '" << line << "'");
}
// Check against acceptable values of opMode.
@@ -120,12 +120,16 @@ StatusWith<std::string> StartupWarningsMongod::readTransparentHugePagesParameter
ErrorCodes::BadValue,
str::stream()
<< "** WARNING: unrecognized transparent Huge Pages mode of operation in "
- << filename << ": '" << opMode << "''");
+ << filename
+ << ": '"
+ << opMode
+ << "''");
}
} catch (const boost::filesystem::filesystem_error& err) {
return StatusWith<std::string>(ErrorCodes::UnknownError,
str::stream() << "Failed to probe \"" << err.path1().string()
- << "\": " << err.code().message());
+ << "\": "
+ << err.code().message());
}
return StatusWith<std::string>(opMode);
diff --git a/src/mongo/db/stats/counters.h b/src/mongo/db/stats/counters.h
index cae0cb0ad16..fe86aa16d67 100644
--- a/src/mongo/db/stats/counters.h
+++ b/src/mongo/db/stats/counters.h
@@ -29,12 +29,12 @@
#pragma once
-#include "mongo/platform/basic.h"
#include "mongo/db/jsobj.h"
#include "mongo/platform/atomic_word.h"
+#include "mongo/platform/basic.h"
+#include "mongo/util/concurrency/spin_lock.h"
#include "mongo/util/net/message.h"
#include "mongo/util/processinfo.h"
-#include "mongo/util/concurrency/spin_lock.h"
namespace mongo {
diff --git a/src/mongo/db/stats/timer_stats_test.cpp b/src/mongo/db/stats/timer_stats_test.cpp
index cd755386329..c1284c55357 100644
--- a/src/mongo/db/stats/timer_stats_test.cpp
+++ b/src/mongo/db/stats/timer_stats_test.cpp
@@ -30,8 +30,8 @@
#include "mongo/db/jsobj.h"
#include "mongo/db/stats/timer_stats.h"
-#include "mongo/util/time_support.h"
#include "mongo/unittest/unittest.h"
+#include "mongo/util/time_support.h"
namespace {
diff --git a/src/mongo/db/storage/devnull/devnull_init.cpp b/src/mongo/db/storage/devnull/devnull_init.cpp
index afdfecc1457..b1c73dbbbcd 100644
--- a/src/mongo/db/storage/devnull/devnull_init.cpp
+++ b/src/mongo/db/storage/devnull/devnull_init.cpp
@@ -30,8 +30,8 @@
*/
#include "mongo/base/init.h"
-#include "mongo/db/service_context_d.h"
#include "mongo/db/service_context.h"
+#include "mongo/db/service_context_d.h"
#include "mongo/db/storage/devnull/devnull_kv_engine.h"
#include "mongo/db/storage/kv/kv_storage_engine.h"
#include "mongo/db/storage/storage_options.h"
diff --git a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_btree_impl.cpp b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_btree_impl.cpp
index a308b6d7984..a036b05c44e 100644
--- a/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_btree_impl.cpp
+++ b/src/mongo/db/storage/ephemeral_for_test/ephemeral_for_test_btree_impl.cpp
@@ -35,8 +35,8 @@
#include <set>
#include "mongo/db/catalog/index_catalog_entry.h"
-#include "mongo/db/storage/index_entry_comparison.h"
#include "mongo/db/storage/ephemeral_for_test/ephemeral_for_test_recovery_unit.h"
+#include "mongo/db/storage/index_entry_comparison.h"
#include "mongo/stdx/memory.h"
#include "mongo/util/mongoutils/str.h"
diff --git a/src/mongo/db/storage/key_string.cpp b/src/mongo/db/storage/key_string.cpp
index 300929ae41d..9b36c222c53 100644
--- a/src/mongo/db/storage/key_string.cpp
+++ b/src/mongo/db/storage/key_string.cpp
@@ -718,9 +718,9 @@ void KeyString::_appendNumberDecimal(const Decimal128 dec, bool invert) {
// in the normal range of double, so the decimal can be represented with at least 15 digits
// of precision by the double 'bin'
} else if (dec.getCoefficientHigh() == 0 && dec.getCoefficientLow() < k1E15) {
- dassert(Decimal128(std::abs(bin),
- Decimal128::kRoundTo15Digits,
- Decimal128::kRoundTowardPositive).isEqual(dec.toAbs()));
+ dassert(Decimal128(
+ std::abs(bin), Decimal128::kRoundTo15Digits, Decimal128::kRoundTowardPositive)
+ .isEqual(dec.toAbs()));
_appendDoubleWithoutTypeBits(bin, kDCMEqualToDoubleRoundedUpTo15Digits, invert);
return;
} else {
@@ -752,10 +752,10 @@ void KeyString::_appendNumberDecimal(const Decimal128 dec, bool invert) {
// Now we know that we can recover the original decimal value (but not its precision, which is
// given by the type bits) from the binary double plus the decimal continuation.
uint64_t decimalContinuation = decDiff.getCoefficientLow();
- dassert(storedValue.add(Decimal128(isNegative,
- storedValue.getBiasedExponent(),
- 0,
- decimalContinuation)).isEqual(dec));
+ dassert(
+ storedValue
+ .add(Decimal128(isNegative, storedValue.getBiasedExponent(), 0, decimalContinuation))
+ .isEqual(dec));
decimalContinuation = endian::nativeToBig(decimalContinuation);
_append(decimalContinuation, isNegative ? !invert : invert);
}
@@ -976,9 +976,10 @@ void KeyString::_appendTinyDecimalWithoutTypeBits(const Decimal128 dec,
_append(endian::nativeToBig(encoded), isNegative ? !invert : invert);
Decimal128 storedVal(scaledBin, Decimal128::kRoundTo34Digits, Decimal128::kRoundTowardPositive);
- storedVal = storedVal.multiply(kTinyDoubleExponentDownshiftFactorAsDecimal,
- Decimal128::kRoundTowardZero)
- .add(Decimal128::kLargestNegativeExponentZero);
+ storedVal =
+ storedVal
+ .multiply(kTinyDoubleExponentDownshiftFactorAsDecimal, Decimal128::kRoundTowardZero)
+ .add(Decimal128::kLargestNegativeExponentZero);
dassert(storedVal.isLess(magnitude));
Decimal128 decDiff = magnitude.subtract(storedVal);
dassert(decDiff.getBiasedExponent() == storedVal.getBiasedExponent() || decDiff.isZero());
diff --git a/src/mongo/db/storage/kv/kv_collection_catalog_entry_test.cpp b/src/mongo/db/storage/kv/kv_collection_catalog_entry_test.cpp
index a37eb986256..4bbc373c9c2 100644
--- a/src/mongo/db/storage/kv/kv_collection_catalog_entry_test.cpp
+++ b/src/mongo/db/storage/kv/kv_collection_catalog_entry_test.cpp
@@ -33,9 +33,9 @@
#include "mongo/db/catalog/collection_catalog_entry.h"
#include "mongo/db/catalog/database_catalog_entry.h"
-#include "mongo/db/operation_context_noop.h"
#include "mongo/db/index/index_descriptor.h"
#include "mongo/db/index/multikey_paths.h"
+#include "mongo/db/operation_context_noop.h"
#include "mongo/db/storage/devnull/devnull_kv_engine.h"
#include "mongo/db/storage/kv/kv_engine.h"
#include "mongo/db/storage/kv/kv_storage_engine.h"
@@ -105,7 +105,8 @@ public:
bool match = (expected == actual);
if (!match) {
FAIL(str::stream() << "Expected: " << dumpMultikeyPaths(expected) << ", "
- << "Actual: " << dumpMultikeyPaths(actual));
+ << "Actual: "
+ << dumpMultikeyPaths(actual));
}
ASSERT(match);
}
diff --git a/src/mongo/db/storage/kv/kv_engine_test_harness.cpp b/src/mongo/db/storage/kv/kv_engine_test_harness.cpp
index 6968d06d063..98f00ff1d07 100644
--- a/src/mongo/db/storage/kv/kv_engine_test_harness.cpp
+++ b/src/mongo/db/storage/kv/kv_engine_test_harness.cpp
@@ -30,8 +30,8 @@
#include "mongo/db/storage/kv/kv_engine_test_harness.h"
-#include "mongo/db/operation_context_noop.h"
#include "mongo/db/index/index_descriptor.h"
+#include "mongo/db/operation_context_noop.h"
#include "mongo/db/storage/kv/kv_catalog.h"
#include "mongo/db/storage/kv/kv_engine.h"
#include "mongo/db/storage/record_store.h"
diff --git a/src/mongo/db/storage/mmap_v1/btree/btree_logic.cpp b/src/mongo/db/storage/mmap_v1/btree/btree_logic.cpp
index ff42dd15c16..5a3914ea072 100644
--- a/src/mongo/db/storage/mmap_v1/btree/btree_logic.cpp
+++ b/src/mongo/db/storage/mmap_v1/btree/btree_logic.cpp
@@ -38,8 +38,8 @@
#include "mongo/db/storage/mmap_v1/btree/btree_logic.h"
#include "mongo/db/storage/mmap_v1/btree/key.h"
#include "mongo/db/storage/mmap_v1/diskloc.h"
-#include "mongo/db/storage/record_store.h"
#include "mongo/db/storage/mmap_v1/record_store_v1_base.h"
+#include "mongo/db/storage/record_store.h"
#include "mongo/util/log.h"
#include "mongo/util/mongoutils/str.h"
@@ -396,7 +396,8 @@ bool BtreeLogic<BtreeLayout>::pushBack(BucketType* bucket,
const FullKey klast = getFullKey(bucket, bucket->n - 1);
if (klast.data.woCompare(key, _ordering) > 0) {
log() << "btree bucket corrupt? "
- "consider reindexing or running validate command" << endl;
+ "consider reindexing or running validate command"
+ << endl;
log() << " klast: " << klast.data.toString() << endl;
log() << " key: " << key.toString() << endl;
invariant(false);
diff --git a/src/mongo/db/storage/mmap_v1/catalog/hashtab.h b/src/mongo/db/storage/mmap_v1/catalog/hashtab.h
index ff2e889a202..ecd0f8c4b4e 100644
--- a/src/mongo/db/storage/mmap_v1/catalog/hashtab.h
+++ b/src/mongo/db/storage/mmap_v1/catalog/hashtab.h
@@ -28,9 +28,9 @@
#pragma once
+#include "mongo/db/operation_context.h"
#include "mongo/db/storage/mmap_v1/catalog/namespace.h"
#include "mongo/db/storage/mmap_v1/catalog/namespace_details.h"
-#include "mongo/db/operation_context.h"
#include "mongo/stdx/functional.h"
namespace mongo {
diff --git a/src/mongo/db/storage/mmap_v1/catalog/namespace_details.cpp b/src/mongo/db/storage/mmap_v1/catalog/namespace_details.cpp
index 915e3a7e44d..dc270d813b2 100644
--- a/src/mongo/db/storage/mmap_v1/catalog/namespace_details.cpp
+++ b/src/mongo/db/storage/mmap_v1/catalog/namespace_details.cpp
@@ -42,10 +42,10 @@
#include "mongo/db/db.h"
#include "mongo/db/index_legacy.h"
#include "mongo/db/json.h"
+#include "mongo/db/operation_context.h"
#include "mongo/db/ops/delete.h"
#include "mongo/db/ops/update.h"
#include "mongo/db/storage/mmap_v1/catalog/namespace_index.h"
-#include "mongo/db/operation_context.h"
#include "mongo/scripting/engine.h"
#include "mongo/util/startup_test.h"
diff --git a/src/mongo/db/storage/mmap_v1/catalog/namespace_details_collection_entry.cpp b/src/mongo/db/storage/mmap_v1/catalog/namespace_details_collection_entry.cpp
index cc5e57e0868..acc2460f1ec 100644
--- a/src/mongo/db/storage/mmap_v1/catalog/namespace_details_collection_entry.cpp
+++ b/src/mongo/db/storage/mmap_v1/catalog/namespace_details_collection_entry.cpp
@@ -401,7 +401,8 @@ void NamespaceDetailsCollectionCatalogEntry::updateValidator(OperationContext* t
_updateSystemNamespaces(
txn,
BSON("$set" << BSON("options.validator" << validator << "options.validationLevel"
- << validationLevel << "options.validationAction"
+ << validationLevel
+ << "options.validationAction"
<< validationAction)));
}
diff --git a/src/mongo/db/storage/mmap_v1/data_file.cpp b/src/mongo/db/storage/mmap_v1/data_file.cpp
index 64fb1e64066..b5fc8bf4d6c 100644
--- a/src/mongo/db/storage/mmap_v1/data_file.cpp
+++ b/src/mongo/db/storage/mmap_v1/data_file.cpp
@@ -38,11 +38,11 @@
#include <utility>
#include <vector>
+#include "mongo/db/operation_context.h"
#include "mongo/db/storage/mmap_v1/dur.h"
#include "mongo/db/storage/mmap_v1/durable_mapped_file.h"
-#include "mongo/db/storage/mmap_v1/mmap_v1_options.h"
-#include "mongo/db/operation_context.h"
#include "mongo/db/storage/mmap_v1/file_allocator.h"
+#include "mongo/db/storage/mmap_v1/mmap_v1_options.h"
#include "mongo/util/log.h"
namespace mongo {
@@ -211,7 +211,9 @@ void DataFileHeader::init(OperationContext* txn, int fileno, int filelength, con
massert(13640,
str::stream() << "DataFileHeader looks corrupt at file open filelength:"
- << filelength << " fileno:" << fileno,
+ << filelength
+ << " fileno:"
+ << fileno,
filelength > 32768);
// The writes done in this function must not be rolled back. If the containing
diff --git a/src/mongo/db/storage/mmap_v1/data_file_sync.cpp b/src/mongo/db/storage/mmap_v1/data_file_sync.cpp
index 4fafae825ea..61e52b5dedf 100644
--- a/src/mongo/db/storage/mmap_v1/data_file_sync.cpp
+++ b/src/mongo/db/storage/mmap_v1/data_file_sync.cpp
@@ -34,8 +34,8 @@
#include "mongo/db/client.h"
#include "mongo/db/commands/server_status_metric.h"
-#include "mongo/db/service_context.h"
#include "mongo/db/instance.h"
+#include "mongo/db/service_context.h"
#include "mongo/db/storage/mmap_v1/dur_journal.h"
#include "mongo/db/storage/mmap_v1/mmap.h"
#include "mongo/db/storage/mmap_v1/mmap_v1_options.h"
diff --git a/src/mongo/db/storage/mmap_v1/dur.cpp b/src/mongo/db/storage/mmap_v1/dur.cpp
index db74bd99ea8..9bd44ba3ee3 100644
--- a/src/mongo/db/storage/mmap_v1/dur.cpp
+++ b/src/mongo/db/storage/mmap_v1/dur.cpp
@@ -488,10 +488,15 @@ void Stats::S::_asObj(BSONObjBuilder* builder) const {
<< _journaledBytes / (_uncompressedBytes + 1.0) << "commitsInWriteLock" << _commitsInWriteLock
<< "earlyCommits" << 0 << "timeMs"
<< BSON("dt" << _durationMillis << "prepLogBuffer" << (unsigned)(_prepLogBufferMicros / 1000)
- << "writeToJournal" << (unsigned)(_writeToJournalMicros / 1000)
- << "writeToDataFiles" << (unsigned)(_writeToDataFilesMicros / 1000)
- << "remapPrivateView" << (unsigned)(_remapPrivateViewMicros / 1000) << "commits"
- << (unsigned)(_commitsMicros / 1000) << "commitsInWriteLock"
+ << "writeToJournal"
+ << (unsigned)(_writeToJournalMicros / 1000)
+ << "writeToDataFiles"
+ << (unsigned)(_writeToDataFilesMicros / 1000)
+ << "remapPrivateView"
+ << (unsigned)(_remapPrivateViewMicros / 1000)
+ << "commits"
+ << (unsigned)(_commitsMicros / 1000)
+ << "commitsInWriteLock"
<< (unsigned)(_commitsInWriteLockMicros / 1000));
if (storageGlobalParams.journalCommitIntervalMs != 0) {
diff --git a/src/mongo/db/storage/mmap_v1/dur_commitjob.cpp b/src/mongo/db/storage/mmap_v1/dur_commitjob.cpp
index aff01c1c7bf..6a8ca62f15d 100644
--- a/src/mongo/db/storage/mmap_v1/dur_commitjob.cpp
+++ b/src/mongo/db/storage/mmap_v1/dur_commitjob.cpp
@@ -37,8 +37,8 @@
#include <iostream>
#include "mongo/db/storage/mmap_v1/dur.h"
-#include "mongo/db/storage/mmap_v1/durable_mapped_file.h"
#include "mongo/db/storage/mmap_v1/dur_stats.h"
+#include "mongo/db/storage/mmap_v1/durable_mapped_file.h"
#include "mongo/util/log.h"
#include "mongo/util/stacktrace.h"
diff --git a/src/mongo/db/storage/mmap_v1/dur_journal.cpp b/src/mongo/db/storage/mmap_v1/dur_journal.cpp
index 524085a87e9..91a6d6fd569 100644
--- a/src/mongo/db/storage/mmap_v1/dur_journal.cpp
+++ b/src/mongo/db/storage/mmap_v1/dur_journal.cpp
@@ -40,13 +40,13 @@
#include "mongo/base/init.h"
#include "mongo/config.h"
#include "mongo/db/client.h"
-#include "mongo/db/storage/mmap_v1/mmap.h"
#include "mongo/db/storage/mmap_v1/aligned_builder.h"
#include "mongo/db/storage/mmap_v1/compress.h"
#include "mongo/db/storage/mmap_v1/dur_journalformat.h"
#include "mongo/db/storage/mmap_v1/dur_journalimpl.h"
#include "mongo/db/storage/mmap_v1/dur_stats.h"
#include "mongo/db/storage/mmap_v1/logfile.h"
+#include "mongo/db/storage/mmap_v1/mmap.h"
#include "mongo/db/storage/mmap_v1/mmap_v1_options.h"
#include "mongo/db/storage/paths.h"
#include "mongo/db/storage/storage_options.h"
@@ -569,10 +569,10 @@ void LSNFile::set(unsigned long long x) {
if something highly surprising, throws to abort
*/
unsigned long long LSNFile::get() {
- uassert(
- 13614,
- str::stream() << "unexpected version number of lsn file in journal/ directory got: " << ver,
- ver == 0);
+ uassert(13614,
+ str::stream() << "unexpected version number of lsn file in journal/ directory got: "
+ << ver,
+ ver == 0);
if (~lsn != checkbytes) {
log() << "lsnfile not valid. recovery will be from log start. lsn: " << hex << lsn
<< " checkbytes: " << hex << checkbytes << endl;
diff --git a/src/mongo/db/storage/mmap_v1/dur_recover.cpp b/src/mongo/db/storage/mmap_v1/dur_recover.cpp
index dfd429d0713..15e7e994b38 100644
--- a/src/mongo/db/storage/mmap_v1/dur_recover.cpp
+++ b/src/mongo/db/storage/mmap_v1/dur_recover.cpp
@@ -47,8 +47,8 @@
#include "mongo/db/storage/mmap_v1/dur_journal.h"
#include "mongo/db/storage/mmap_v1/dur_journalformat.h"
#include "mongo/db/storage/mmap_v1/dur_stats.h"
-#include "mongo/db/storage/mmap_v1/durop.h"
#include "mongo/db/storage/mmap_v1/durable_mapped_file.h"
+#include "mongo/db/storage/mmap_v1/durop.h"
#include "mongo/db/storage/mmap_v1/mmap_v1_options.h"
#include "mongo/platform/strnlen.h"
#include "mongo/util/bufreader.h"
@@ -92,7 +92,7 @@ void removeJournalFiles();
boost::filesystem::path getJournalDir();
-struct ParsedJournalEntry {/*copyable*/
+struct ParsedJournalEntry { /*copyable*/
ParsedJournalEntry() : e(0) {}
// relative path of database for the operation.
@@ -121,7 +121,8 @@ static void getFiles(boost::filesystem::path dir, vector<boost::filesystem::path
if (m.count(u)) {
uasserted(13531,
str::stream() << "unexpected files in journal directory " << dir.string()
- << " : " << fileName);
+ << " : "
+ << fileName);
}
m.insert(pair<unsigned, boost::filesystem::path>(u, filepath));
}
@@ -130,7 +131,8 @@ static void getFiles(boost::filesystem::path dir, vector<boost::filesystem::path
if (i != m.begin() && m.count(i->first - 1) == 0) {
uasserted(13532,
str::stream() << "unexpected file in journal directory " << dir.string()
- << " : " << boost::filesystem::path(i->second).leaf().string()
+ << " : "
+ << boost::filesystem::path(i->second).leaf().string()
<< " : can't find its preceding file");
}
files.push_back(i->second);
@@ -489,7 +491,8 @@ bool RecoveryJob::processFileBuffer(const void* p, unsigned len) {
log() << "journal file version number mismatch got:" << hex << h._version
<< " expected:" << hex << (unsigned)JHeader::CurrentVersion
<< ". if you have just upgraded, recover with old version of mongod, "
- "terminate cleanly, then upgrade." << endl;
+ "terminate cleanly, then upgrade."
+ << endl;
// Not using JournalSectionCurruptException as we don't want to ignore
// journal files on upgrade.
uasserted(13536, str::stream() << "journal version number mismatch " << h._version);
diff --git a/src/mongo/db/storage/mmap_v1/durable_mapped_file.cpp b/src/mongo/db/storage/mmap_v1/durable_mapped_file.cpp
index 6eb2f82dcee..ff8add6cb91 100644
--- a/src/mongo/db/storage/mmap_v1/durable_mapped_file.cpp
+++ b/src/mongo/db/storage/mmap_v1/durable_mapped_file.cpp
@@ -46,8 +46,8 @@
#include "mongo/db/storage/mmap_v1/dur.h"
#include "mongo/db/storage/mmap_v1/dur_journalformat.h"
#include "mongo/db/storage/storage_options.h"
-#include "mongo/util/mongoutils/str.h"
#include "mongo/util/log.h"
+#include "mongo/util/mongoutils/str.h"
#include "mongo/util/processinfo.h"
using namespace mongoutils;
diff --git a/src/mongo/db/storage/mmap_v1/durop.cpp b/src/mongo/db/storage/mmap_v1/durop.cpp
index 0ea1949ad12..627d53df05d 100644
--- a/src/mongo/db/storage/mmap_v1/durop.cpp
+++ b/src/mongo/db/storage/mmap_v1/durop.cpp
@@ -39,9 +39,9 @@
#include "mongo/db/concurrency/d_concurrency.h"
#include "mongo/db/storage/mmap_v1/aligned_builder.h"
#include "mongo/db/storage/mmap_v1/durable_mapped_file.h"
+#include "mongo/db/storage/mmap_v1/file_allocator.h"
#include "mongo/db/storage/mmap_v1/mmap_v1_engine.h"
#include "mongo/util/file.h"
-#include "mongo/db/storage/mmap_v1/file_allocator.h"
#include "mongo/util/log.h"
#include "mongo/util/mongoutils/str.h"
diff --git a/src/mongo/db/storage/mmap_v1/durop.h b/src/mongo/db/storage/mmap_v1/durop.h
index 50ddc33318a..98aaf8406c3 100644
--- a/src/mongo/db/storage/mmap_v1/durop.h
+++ b/src/mongo/db/storage/mmap_v1/durop.h
@@ -49,7 +49,7 @@ namespace dur {
*
* For each op we want to journal, we define a subclass.
*/
-class DurOp {/* copyable */
+class DurOp { /* copyable */
public:
// @param opcode a sentinel value near max unsigned which uniquely identifies the operation.
// @see dur::JEntry
diff --git a/src/mongo/db/storage/mmap_v1/extent.cpp b/src/mongo/db/storage/mmap_v1/extent.cpp
index 7b92551fa8a..fb134504f10 100644
--- a/src/mongo/db/storage/mmap_v1/extent.cpp
+++ b/src/mongo/db/storage/mmap_v1/extent.cpp
@@ -44,8 +44,14 @@ static_assert(sizeof(Extent) - 4 == 48 + 128, "sizeof(Extent) - 4 == 48 + 128");
BSONObj Extent::dump() const {
return BSON("loc" << myLoc.toString() << "xnext" << xnext.toString() << "xprev"
- << xprev.toString() << "nsdiag" << nsDiagnostic.toString() << "size" << length
- << "firstRecord" << firstRecord.toString() << "lastRecord"
+ << xprev.toString()
+ << "nsdiag"
+ << nsDiagnostic.toString()
+ << "size"
+ << length
+ << "firstRecord"
+ << firstRecord.toString()
+ << "lastRecord"
<< lastRecord.toString());
}
diff --git a/src/mongo/db/storage/mmap_v1/file_allocator.cpp b/src/mongo/db/storage/mmap_v1/file_allocator.cpp
index 0be9d157481..7a630ea3118 100644
--- a/src/mongo/db/storage/mmap_v1/file_allocator.cpp
+++ b/src/mongo/db/storage/mmap_v1/file_allocator.cpp
@@ -38,8 +38,8 @@
#include <fcntl.h>
#if defined(__FreeBSD__)
-#include <sys/param.h>
#include <sys/mount.h>
+#include <sys/param.h>
#endif
#if defined(__linux__)
diff --git a/src/mongo/db/storage/mmap_v1/journal_latency_test_cmd.cpp b/src/mongo/db/storage/mmap_v1/journal_latency_test_cmd.cpp
index a67ecd85f3d..94beefc55ae 100644
--- a/src/mongo/db/storage/mmap_v1/journal_latency_test_cmd.cpp
+++ b/src/mongo/db/storage/mmap_v1/journal_latency_test_cmd.cpp
@@ -44,10 +44,10 @@
#include "mongo/db/index/index_descriptor.h"
#include "mongo/db/jsobj.h"
#include "mongo/db/query/internal_plans.h"
-#include "mongo/db/storage/storage_options.h"
#include "mongo/db/storage/mmap_v1/aligned_builder.h"
#include "mongo/db/storage/mmap_v1/logfile.h"
#include "mongo/db/storage/paths.h"
+#include "mongo/db/storage/storage_options.h"
#include "mongo/scripting/engine.h"
#include "mongo/util/background.h"
#include "mongo/util/timer.h"
diff --git a/src/mongo/db/storage/mmap_v1/logfile.cpp b/src/mongo/db/storage/mmap_v1/logfile.cpp
index 6124ce51248..ddc66b8ce2f 100644
--- a/src/mongo/db/storage/mmap_v1/logfile.cpp
+++ b/src/mongo/db/storage/mmap_v1/logfile.cpp
@@ -123,7 +123,9 @@ void LogFile::synchronousAppend(const void* _buf, size_t _len) {
else
uasserted(13517,
str::stream() << "error appending to file " << _name << ' ' << _len << ' '
- << toWrite << ' ' << errnoWithDescription(e));
+ << toWrite
+ << ' '
+ << errnoWithDescription(e));
} else {
dassert(written == toWrite);
}
@@ -137,10 +139,10 @@ void LogFile::synchronousAppend(const void* _buf, size_t _len) {
/// posix
-#include <sys/types.h>
-#include <sys/stat.h>
#include <fcntl.h>
#include <sys/ioctl.h>
+#include <sys/stat.h>
+#include <sys/types.h>
#ifdef __linux__
#include <linux/fs.h>
diff --git a/src/mongo/db/storage/mmap_v1/mmap.cpp b/src/mongo/db/storage/mmap_v1/mmap.cpp
index 90cda10c57e..82d62aba8e9 100644
--- a/src/mongo/db/storage/mmap_v1/mmap.cpp
+++ b/src/mongo/db/storage/mmap_v1/mmap.cpp
@@ -103,7 +103,9 @@ void* MemoryMappedFile::map(const char* filename) {
} catch (boost::filesystem::filesystem_error& e) {
uasserted(15922,
mongoutils::str::stream() << "couldn't get file length when opening mapping "
- << filename << ' ' << e.what());
+ << filename
+ << ' '
+ << e.what());
}
return map(filename, l);
}
diff --git a/src/mongo/db/storage/mmap_v1/mmap_posix.cpp b/src/mongo/db/storage/mmap_v1/mmap_posix.cpp
index a5fdc361694..382860f3556 100644
--- a/src/mongo/db/storage/mmap_v1/mmap_posix.cpp
+++ b/src/mongo/db/storage/mmap_v1/mmap_posix.cpp
@@ -37,10 +37,10 @@
#include <sys/stat.h>
#include <sys/types.h>
-#include "mongo/platform/atomic_word.h"
#include "mongo/db/concurrency/d_concurrency.h"
#include "mongo/db/storage/mmap_v1/file_allocator.h"
#include "mongo/db/storage/mmap_v1/mmap.h"
+#include "mongo/platform/atomic_word.h"
#include "mongo/util/log.h"
#include "mongo/util/mongoutils/str.h"
#include "mongo/util/processinfo.h"
@@ -162,7 +162,8 @@ void* MemoryMappedFile::map(const char* filename, unsigned long long& length) {
unsigned long long filelen = lseek(fd, 0, SEEK_END);
uassert(10447,
str::stream() << "map file alloc failed, wanted: " << length << " filelen: " << filelen
- << ' ' << sizeof(size_t),
+ << ' '
+ << sizeof(size_t),
filelen == length);
lseek(fd, 0, SEEK_SET);
@@ -174,7 +175,8 @@ void* MemoryMappedFile::map(const char* filename, unsigned long long& length) {
if (errno == ENOMEM) {
if (sizeof(void*) == 4)
error() << "mmap failed with out of memory. You are using a 32-bit build and "
- "probably need to upgrade to 64" << endl;
+ "probably need to upgrade to 64"
+ << endl;
else
error() << "mmap failed with out of memory. (64 bit build)" << endl;
}
@@ -202,7 +204,8 @@ void* MemoryMappedFile::createPrivateMap() {
if (errno == ENOMEM) {
if (sizeof(void*) == 4) {
error() << "mmap private failed with out of memory. You are using a 32-bit build "
- "and probably need to upgrade to 64" << endl;
+ "and probably need to upgrade to 64"
+ << endl;
} else {
error() << "mmap private failed with out of memory. (64 bit build)" << endl;
}
diff --git a/src/mongo/db/storage/mmap_v1/mmap_v1_database_catalog_entry.cpp b/src/mongo/db/storage/mmap_v1/mmap_v1_database_catalog_entry.cpp
index 83fda94c14d..37f96019430 100644
--- a/src/mongo/db/storage/mmap_v1/mmap_v1_database_catalog_entry.cpp
+++ b/src/mongo/db/storage/mmap_v1/mmap_v1_database_catalog_entry.cpp
@@ -37,10 +37,10 @@
#include "mongo/db/catalog/index_catalog_entry.h"
#include "mongo/db/index/2d_access_method.h"
#include "mongo/db/index/btree_access_method.h"
-#include "mongo/db/index/index_access_method.h"
#include "mongo/db/index/fts_access_method.h"
#include "mongo/db/index/hash_access_method.h"
#include "mongo/db/index/haystack_access_method.h"
+#include "mongo/db/index/index_access_method.h"
#include "mongo/db/index/s2_access_method.h"
#include "mongo/db/operation_context.h"
#include "mongo/db/record_id.h"
diff --git a/src/mongo/db/storage/mmap_v1/mmap_v1_database_catalog_entry.h b/src/mongo/db/storage/mmap_v1/mmap_v1_database_catalog_entry.h
index 3ecb76b9ecb..d02f7da17b0 100644
--- a/src/mongo/db/storage/mmap_v1/mmap_v1_database_catalog_entry.h
+++ b/src/mongo/db/storage/mmap_v1/mmap_v1_database_catalog_entry.h
@@ -34,8 +34,8 @@
#include "mongo/base/status.h"
#include "mongo/base/string_data.h"
#include "mongo/db/catalog/database_catalog_entry.h"
-#include "mongo/db/storage/mmap_v1/catalog/namespace_index.h"
#include "mongo/db/storage/mmap_v1/catalog/namespace_details_collection_entry.h"
+#include "mongo/db/storage/mmap_v1/catalog/namespace_index.h"
#include "mongo/db/storage/mmap_v1/mmap_v1_extent_manager.h"
namespace mongo {
diff --git a/src/mongo/db/storage/mmap_v1/mmap_v1_engine.cpp b/src/mongo/db/storage/mmap_v1/mmap_v1_engine.cpp
index b83e72dc580..e4f4b0340de 100644
--- a/src/mongo/db/storage/mmap_v1/mmap_v1_engine.cpp
+++ b/src/mongo/db/storage/mmap_v1/mmap_v1_engine.cpp
@@ -32,22 +32,22 @@
#include "mongo/db/storage/mmap_v1/mmap_v1_engine.h"
-#include <boost/filesystem/path.hpp>
#include <boost/filesystem/operations.hpp>
+#include <boost/filesystem/path.hpp>
#include <fstream>
#include "mongo/db/mongod_options.h"
-#include "mongo/db/storage/mmap_v1/mmap.h"
#include "mongo/db/storage/mmap_v1/data_file_sync.h"
#include "mongo/db/storage/mmap_v1/dur.h"
#include "mongo/db/storage/mmap_v1/dur_journal.h"
#include "mongo/db/storage/mmap_v1/dur_recover.h"
#include "mongo/db/storage/mmap_v1/dur_recovery_unit.h"
+#include "mongo/db/storage/mmap_v1/file_allocator.h"
+#include "mongo/db/storage/mmap_v1/mmap.h"
#include "mongo/db/storage/mmap_v1/mmap_v1_database_catalog_entry.h"
#include "mongo/db/storage/mmap_v1/mmap_v1_options.h"
#include "mongo/db/storage/storage_engine_lock_file.h"
#include "mongo/db/storage/storage_options.h"
-#include "mongo/db/storage/mmap_v1/file_allocator.h"
#include "mongo/util/clock_source.h"
#include "mongo/util/log.h"
@@ -132,9 +132,11 @@ void checkForUncleanShutdown(MMAPV1Engine* storageEngine,
if (!storageGlobalParams.dur && dur::haveJournalFiles()) {
log() << "**************" << endl;
log() << "Error: journal files are present in journal directory, yet starting without "
- "journaling enabled." << endl;
+ "journaling enabled."
+ << endl;
log() << "It is recommended that you start with journaling enabled so that recovery may "
- "occur." << endl;
+ "occur."
+ << endl;
log() << "**************" << endl;
uasserted(13597, "can't start without --journal enabled when journal/ files are present");
}
@@ -149,11 +151,14 @@ void checkForUncleanShutdown(MMAPV1Engine* storageEngine,
if (!storageGlobalParams.dur && dur::haveJournalFiles()) {
log() << "**************" << endl;
log() << "Error: journal files are present in journal directory, yet starting without "
- "--journal enabled." << endl;
+ "--journal enabled."
+ << endl;
log() << "It is recommended that you start with journaling enabled so that recovery may "
- "occur." << endl;
+ "occur."
+ << endl;
log() << "Alternatively (not recommended), you can backup everything, then delete the "
- "journal files, and run --repair" << endl;
+ "journal files, and run --repair"
+ << endl;
log() << "**************" << endl;
uasserted(13618, "can't start without --journal enabled when journal/ files are present");
}
diff --git a/src/mongo/db/storage/mmap_v1/mmap_v1_engine.h b/src/mongo/db/storage/mmap_v1/mmap_v1_engine.h
index 347b6e02d17..80a20ecbb0c 100644
--- a/src/mongo/db/storage/mmap_v1/mmap_v1_engine.h
+++ b/src/mongo/db/storage/mmap_v1/mmap_v1_engine.h
@@ -32,8 +32,8 @@
#include <map>
-#include "mongo/db/storage/mmap_v1/record_access_tracker.h"
#include "mongo/db/storage/mmap_v1/extent_manager.h"
+#include "mongo/db/storage/mmap_v1/record_access_tracker.h"
#include "mongo/db/storage/storage_engine.h"
#include "mongo/stdx/mutex.h"
diff --git a/src/mongo/db/storage/mmap_v1/mmap_v1_extent_manager.cpp b/src/mongo/db/storage/mmap_v1/mmap_v1_extent_manager.cpp
index 2076ca868b1..3fcf1205646 100644
--- a/src/mongo/db/storage/mmap_v1/mmap_v1_extent_manager.cpp
+++ b/src/mongo/db/storage/mmap_v1/mmap_v1_extent_manager.cpp
@@ -37,17 +37,17 @@
#include "mongo/base/counter.h"
#include "mongo/db/audit.h"
#include "mongo/db/client.h"
+#include "mongo/db/operation_context.h"
#include "mongo/db/service_context.h"
-#include "mongo/db/storage/mmap_v1/dur.h"
#include "mongo/db/storage/mmap_v1/data_file.h"
-#include "mongo/db/storage/mmap_v1/record.h"
+#include "mongo/db/storage/mmap_v1/dur.h"
#include "mongo/db/storage/mmap_v1/extent.h"
#include "mongo/db/storage/mmap_v1/extent_manager.h"
#include "mongo/db/storage/mmap_v1/mmap.h"
#include "mongo/db/storage/mmap_v1/mmap_v1_engine.h"
#include "mongo/db/storage/mmap_v1/mmap_v1_options.h"
+#include "mongo/db/storage/mmap_v1/record.h"
#include "mongo/db/storage/record_fetcher.h"
-#include "mongo/db/operation_context.h"
#include "mongo/stdx/memory.h"
#include "mongo/util/fail_point_service.h"
#include "mongo/util/file.h"
diff --git a/src/mongo/db/storage/mmap_v1/mmap_v1_init_test.cpp b/src/mongo/db/storage/mmap_v1/mmap_v1_init_test.cpp
index ab1bd378fea..19ec450e1ac 100644
--- a/src/mongo/db/storage/mmap_v1/mmap_v1_init_test.cpp
+++ b/src/mongo/db/storage/mmap_v1/mmap_v1_init_test.cpp
@@ -29,8 +29,8 @@
#include "mongo/platform/basic.h"
-#include "mongo/db/service_context.h"
#include "mongo/db/json.h"
+#include "mongo/db/service_context.h"
#include "mongo/db/storage/storage_engine_metadata.h"
#include "mongo/db/storage/storage_options.h"
#include "mongo/unittest/unittest.h"
@@ -85,9 +85,13 @@ void _testValidateMetadata(const StorageEngine::Factory* factory,
if (expectedCode != status.code()) {
FAIL(str::stream()
<< "Unexpected StorageEngine::Factory::validateMetadata result. Expected: "
- << ErrorCodes::errorString(expectedCode) << " but got " << status.toString()
- << " instead. metadataOptions: " << metadataOptions
- << "; directoryPerDB: " << directoryPerDB);
+ << ErrorCodes::errorString(expectedCode)
+ << " but got "
+ << status.toString()
+ << " instead. metadataOptions: "
+ << metadataOptions
+ << "; directoryPerDB: "
+ << directoryPerDB);
}
}
diff --git a/src/mongo/db/storage/mmap_v1/record_access_tracker.cpp b/src/mongo/db/storage/mmap_v1/record_access_tracker.cpp
index 535bc3fc447..f9725f6a104 100644
--- a/src/mongo/db/storage/mmap_v1/record_access_tracker.cpp
+++ b/src/mongo/db/storage/mmap_v1/record_access_tracker.cpp
@@ -36,8 +36,8 @@
#include "mongo/config.h"
#include "mongo/db/storage/mmap_v1/record.h"
#include "mongo/platform/bits.h"
-#include "mongo/util/concurrency/threadlocal.h"
#include "mongo/util/clock_source.h"
+#include "mongo/util/concurrency/threadlocal.h"
#include "mongo/util/debug_util.h"
#include "mongo/util/net/listen.h"
#include "mongo/util/processinfo.h"
diff --git a/src/mongo/db/storage/mmap_v1/record_store_v1_base.cpp b/src/mongo/db/storage/mmap_v1/record_store_v1_base.cpp
index c0f2796ee05..7950922afd7 100644
--- a/src/mongo/db/storage/mmap_v1/record_store_v1_base.cpp
+++ b/src/mongo/db/storage/mmap_v1/record_store_v1_base.cpp
@@ -572,17 +572,17 @@ Status RecordStoreV1Base::validate(OperationContext* txn,
if (_details->firstExtent(txn).isNull())
output->append("firstExtent", "null");
else
- output->append("firstExtent",
- str::stream()
- << _details->firstExtent(txn).toString() << " ns:"
- << _getExtent(txn, _details->firstExtent(txn))->nsDiagnostic.toString());
+ output->append(
+ "firstExtent",
+ str::stream() << _details->firstExtent(txn).toString() << " ns:"
+ << _getExtent(txn, _details->firstExtent(txn))->nsDiagnostic.toString());
if (_details->lastExtent(txn).isNull())
output->append("lastExtent", "null");
else
- output->append("lastExtent",
- str::stream()
- << _details->lastExtent(txn).toString() << " ns:"
- << _getExtent(txn, _details->lastExtent(txn))->nsDiagnostic.toString());
+ output->append(
+ "lastExtent",
+ str::stream() << _details->lastExtent(txn).toString() << " ns:"
+ << _getExtent(txn, _details->lastExtent(txn))->nsDiagnostic.toString());
// 22222222222222222222222222
{ // validate extent basics
@@ -784,9 +784,12 @@ Status RecordStoreV1Base::validate(OperationContext* txn,
break;
}
- string err(str::stream()
- << "bad pointer in deleted record list: " << loc.toString()
- << " bucket: " << i << " k: " << k);
+ string err(str::stream() << "bad pointer in deleted record list: "
+ << loc.toString()
+ << " bucket: "
+ << i
+ << " k: "
+ << k);
results->errors.push_back(err);
results->valid = false;
break;
diff --git a/src/mongo/db/storage/mmap_v1/record_store_v1_base.h b/src/mongo/db/storage/mmap_v1/record_store_v1_base.h
index 53b86129e13..489f084fffe 100644
--- a/src/mongo/db/storage/mmap_v1/record_store_v1_base.h
+++ b/src/mongo/db/storage/mmap_v1/record_store_v1_base.h
@@ -30,8 +30,8 @@
#pragma once
-#include "mongo/util/concurrency/spin_lock.h"
#include "mongo/platform/unordered_set.h"
+#include "mongo/util/concurrency/spin_lock.h"
#include "mongo/db/storage/mmap_v1/diskloc.h"
#include "mongo/db/storage/record_store.h"
diff --git a/src/mongo/db/storage/mmap_v1/record_store_v1_capped.cpp b/src/mongo/db/storage/mmap_v1/record_store_v1_capped.cpp
index 1c678074ace..7dab4124df6 100644
--- a/src/mongo/db/storage/mmap_v1/record_store_v1_capped.cpp
+++ b/src/mongo/db/storage/mmap_v1/record_store_v1_capped.cpp
@@ -101,11 +101,12 @@ StatusWith<DiskLoc> CappedRecordStoreV1::allocRecord(OperationContext* txn,
// since we have to iterate all the extents (for now) to get
// storage size
if (lenToAlloc > storageSize(txn)) {
- return StatusWith<DiskLoc>(ErrorCodes::DocTooLargeForCapped,
- mongoutils::str::stream()
- << "document is larger than capped size " << lenToAlloc
- << " > " << storageSize(txn),
- 16328);
+ return StatusWith<DiskLoc>(
+ ErrorCodes::DocTooLargeForCapped,
+ mongoutils::str::stream() << "document is larger than capped size " << lenToAlloc
+ << " > "
+ << storageSize(txn),
+ 16328);
}
}
DiskLoc loc;
@@ -161,8 +162,10 @@ StatusWith<DiskLoc> CappedRecordStoreV1::allocRecord(OperationContext* txn,
return StatusWith<DiskLoc>(ErrorCodes::DocTooLargeForCapped,
str::stream()
<< "document doesn't fit in capped collection."
- << " size: " << lenToAlloc
- << " storageSize:" << storageSize(txn),
+ << " size: "
+ << lenToAlloc
+ << " storageSize:"
+ << storageSize(txn),
28575);
}
continue;
diff --git a/src/mongo/db/storage/mmap_v1/record_store_v1_capped_test.cpp b/src/mongo/db/storage/mmap_v1/record_store_v1_capped_test.cpp
index 5f20ba62385..2bde7396e44 100644
--- a/src/mongo/db/storage/mmap_v1/record_store_v1_capped_test.cpp
+++ b/src/mongo/db/storage/mmap_v1/record_store_v1_capped_test.cpp
@@ -32,8 +32,8 @@
#include "mongo/db/storage/mmap_v1/record_store_v1_capped_iterator.h"
#include "mongo/db/operation_context_noop.h"
-#include "mongo/db/storage/mmap_v1/record.h"
#include "mongo/db/storage/mmap_v1/extent.h"
+#include "mongo/db/storage/mmap_v1/record.h"
#include "mongo/db/storage/mmap_v1/record_store_v1_test_help.h"
#include "mongo/unittest/unittest.h"
diff --git a/src/mongo/db/storage/mmap_v1/record_store_v1_repair_iterator.h b/src/mongo/db/storage/mmap_v1/record_store_v1_repair_iterator.h
index a45cb1ca9e7..b65782cd27b 100644
--- a/src/mongo/db/storage/mmap_v1/record_store_v1_repair_iterator.h
+++ b/src/mongo/db/storage/mmap_v1/record_store_v1_repair_iterator.h
@@ -30,8 +30,8 @@
#include <set>
-#include "mongo/db/storage/record_store.h"
#include "mongo/db/storage/mmap_v1/record_store_v1_base.h"
+#include "mongo/db/storage/record_store.h"
namespace mongo {
diff --git a/src/mongo/db/storage/mmap_v1/record_store_v1_simple.cpp b/src/mongo/db/storage/mmap_v1/record_store_v1_simple.cpp
index aa29b7f0174..51baec1cd29 100644
--- a/src/mongo/db/storage/mmap_v1/record_store_v1_simple.cpp
+++ b/src/mongo/db/storage/mmap_v1/record_store_v1_simple.cpp
@@ -37,17 +37,17 @@
#include "mongo/base/counter.h"
#include "mongo/db/catalog/collection.h"
#include "mongo/db/client.h"
-#include "mongo/db/curop.h"
#include "mongo/db/commands/server_status_metric.h"
+#include "mongo/db/curop.h"
+#include "mongo/db/operation_context.h"
#include "mongo/db/storage/mmap_v1/extent.h"
#include "mongo/db/storage/mmap_v1/extent_manager.h"
#include "mongo/db/storage/mmap_v1/record.h"
-#include "mongo/db/operation_context.h"
#include "mongo/db/storage/mmap_v1/record_store_v1_simple_iterator.h"
#include "mongo/stdx/memory.h"
#include "mongo/util/log.h"
-#include "mongo/util/progress_meter.h"
#include "mongo/util/mongoutils/str.h"
+#include "mongo/util/progress_meter.h"
#include "mongo/util/timer.h"
#include "mongo/util/touch_pages.h"
@@ -152,7 +152,8 @@ StatusWith<DiskLoc> SimpleRecordStoreV1::allocRecord(OperationContext* txn,
return StatusWith<DiskLoc>(
ErrorCodes::InvalidLength,
str::stream() << "Attempting to allocate a record larger than maximum size: "
- << lengthWithHeaders << " > 16.5MB");
+ << lengthWithHeaders
+ << " > 16.5MB");
}
DiskLoc loc = _allocFromExistingExtents(txn, lengthWithHeaders);
diff --git a/src/mongo/db/storage/mmap_v1/record_store_v1_test_help.cpp b/src/mongo/db/storage/mmap_v1/record_store_v1_test_help.cpp
index 234acf8695e..0c56ef9e6f1 100644
--- a/src/mongo/db/storage/mmap_v1/record_store_v1_test_help.cpp
+++ b/src/mongo/db/storage/mmap_v1/record_store_v1_test_help.cpp
@@ -32,8 +32,8 @@
#include "mongo/db/storage/mmap_v1/record_store_v1_test_help.h"
-#include <boost/next_prior.hpp>
#include <algorithm>
+#include <boost/next_prior.hpp>
#include <map>
#include <set>
#include <vector>
diff --git a/src/mongo/db/storage/mmap_v1/repair_database.cpp b/src/mongo/db/storage/mmap_v1/repair_database.cpp
index b73d346a708..5587e4c2b9a 100644
--- a/src/mongo/db/storage/mmap_v1/repair_database.cpp
+++ b/src/mongo/db/storage/mmap_v1/repair_database.cpp
@@ -45,11 +45,11 @@
#include "mongo/db/db_raii.h"
#include "mongo/db/index/index_descriptor.h"
#include "mongo/db/storage/mmap_v1/dur.h"
+#include "mongo/db/storage/mmap_v1/file_allocator.h"
#include "mongo/db/storage/mmap_v1/mmap.h"
#include "mongo/db/storage/mmap_v1/mmap_v1_database_catalog_entry.h"
#include "mongo/db/storage/mmap_v1/mmap_v1_options.h"
#include "mongo/util/file.h"
-#include "mongo/db/storage/mmap_v1/file_allocator.h"
#include "mongo/util/log.h"
#include "mongo/util/scopeguard.h"
@@ -95,8 +95,7 @@ void _deleteDataFiles(const std::string& database) {
virtual const char* op() const {
return "remove";
}
- }
- deleter;
+ } deleter;
_applyOpToDataFiles(database, deleter, true);
}
@@ -290,9 +289,11 @@ Status MMAPV1Engine::repairDatabase(OperationContext* txn,
if (freeSize > -1 && freeSize < totalSize) {
return Status(ErrorCodes::OutOfDiskSpace,
- str::stream()
- << "Cannot repair database " << dbName << " having size: " << totalSize
- << " (bytes) because free disk space is: " << freeSize << " (bytes)");
+ str::stream() << "Cannot repair database " << dbName << " having size: "
+ << totalSize
+ << " (bytes) because free disk space is: "
+ << freeSize
+ << " (bytes)");
}
txn->checkForInterrupt();
diff --git a/src/mongo/db/storage/paths.cpp b/src/mongo/db/storage/paths.cpp
index 2f6fb4d4a77..b9f05ad17d1 100644
--- a/src/mongo/db/storage/paths.cpp
+++ b/src/mongo/db/storage/paths.cpp
@@ -84,8 +84,8 @@ void flushMyDirectory(const boost::filesystem::path& file) {
int fd = ::open(dir.string().c_str(), O_RDONLY); // DO NOT THROW OR ASSERT BEFORE CLOSING
massert(13650,
- str::stream() << "Couldn't open directory '" << dir.string()
- << "' for flushing: " << errnoWithDescription(),
+ str::stream() << "Couldn't open directory '" << dir.string() << "' for flushing: "
+ << errnoWithDescription(),
fd >= 0);
if (fsync(fd) != 0) {
int e = errno;
@@ -102,8 +102,8 @@ void flushMyDirectory(const boost::filesystem::path& file) {
} else {
close(fd);
massert(13651,
- str::stream() << "Couldn't fsync directory '" << dir.string()
- << "': " << errnoWithDescription(e),
+ str::stream() << "Couldn't fsync directory '" << dir.string() << "': "
+ << errnoWithDescription(e),
false);
}
}
diff --git a/src/mongo/db/storage/paths.h b/src/mongo/db/storage/paths.h
index 7f9a479f416..384b6459419 100644
--- a/src/mongo/db/storage/paths.h
+++ b/src/mongo/db/storage/paths.h
@@ -31,9 +31,9 @@
#pragma once
#include <boost/filesystem/path.hpp>
-#include <sys/types.h>
-#include <sys/stat.h>
#include <fcntl.h>
+#include <sys/stat.h>
+#include <sys/types.h>
#include "mongo/util/mongoutils/str.h"
diff --git a/src/mongo/db/storage/sorted_data_interface_test_cursor_end_position.cpp b/src/mongo/db/storage/sorted_data_interface_test_cursor_end_position.cpp
index 21127d5342f..195f9e0a184 100644
--- a/src/mongo/db/storage/sorted_data_interface_test_cursor_end_position.cpp
+++ b/src/mongo/db/storage/sorted_data_interface_test_cursor_end_position.cpp
@@ -41,7 +41,7 @@ void testSetEndPosition_Next_Forward(bool unique, bool inclusive) {
auto sorted = harnessHelper->newSortedDataInterface(
unique,
{
- {key1, loc1}, {key2, loc1}, {key3, loc1}, {key4, loc1}, {key5, loc1},
+ {key1, loc1}, {key2, loc1}, {key3, loc1}, {key4, loc1}, {key5, loc1},
});
// Dup key on end point. Illegal for unique indexes.
@@ -80,7 +80,7 @@ void testSetEndPosition_Next_Reverse(bool unique, bool inclusive) {
auto sorted = harnessHelper->newSortedDataInterface(
unique,
{
- {key1, loc1}, {key2, loc1}, {key3, loc1}, {key4, loc1}, {key5, loc1},
+ {key1, loc1}, {key2, loc1}, {key3, loc1}, {key4, loc1}, {key5, loc1},
});
// Dup key on end point. Illegal for unique indexes.
@@ -119,10 +119,10 @@ void testSetEndPosition_Seek_Forward(bool unique, bool inclusive) {
auto opCtx = harnessHelper->newOperationContext();
auto sorted = harnessHelper->newSortedDataInterface(unique,
{
- {key1, loc1},
- // No key2
- {key3, loc1},
- {key4, loc1},
+ {key1, loc1},
+ // No key2
+ {key3, loc1},
+ {key4, loc1},
});
auto cursor = sorted->newCursor(opCtx.get());
@@ -167,10 +167,10 @@ void testSetEndPosition_Seek_Reverse(bool unique, bool inclusive) {
auto opCtx = harnessHelper->newOperationContext();
auto sorted = harnessHelper->newSortedDataInterface(unique,
{
- {key1, loc1},
- {key2, loc1},
- // No key3
- {key4, loc1},
+ {key1, loc1},
+ {key2, loc1},
+ // No key3
+ {key4, loc1},
});
auto cursor = sorted->newCursor(opCtx.get(), false);
@@ -217,7 +217,7 @@ void testSetEndPosition_Restore_Forward(bool unique) {
auto sorted = harnessHelper->newSortedDataInterface(
unique,
{
- {key1, loc1}, {key2, loc1}, {key3, loc1}, {key4, loc1},
+ {key1, loc1}, {key2, loc1}, {key3, loc1}, {key4, loc1},
});
auto cursor = sorted->newCursor(opCtx.get());
@@ -234,7 +234,7 @@ void testSetEndPosition_Restore_Forward(bool unique) {
removeFromIndex(opCtx,
sorted,
{
- {key2, loc1}, {key3, loc1},
+ {key2, loc1}, {key3, loc1},
});
cursor->restore();
@@ -253,7 +253,7 @@ void testSetEndPosition_Restore_Reverse(bool unique) {
auto sorted = harnessHelper->newSortedDataInterface(
unique,
{
- {key1, loc1}, {key2, loc1}, {key3, loc1}, {key4, loc1},
+ {key1, loc1}, {key2, loc1}, {key3, loc1}, {key4, loc1},
});
auto cursor = sorted->newCursor(opCtx.get(), false);
@@ -270,7 +270,7 @@ void testSetEndPosition_Restore_Reverse(bool unique) {
removeFromIndex(opCtx,
sorted,
{
- {key2, loc1}, {key3, loc1},
+ {key2, loc1}, {key3, loc1},
});
cursor->restore();
@@ -293,7 +293,7 @@ void testSetEndPosition_RestoreEndCursor_Forward(bool unique) {
auto opCtx = harnessHelper->newOperationContext();
auto sorted = harnessHelper->newSortedDataInterface(unique,
{
- {key1, loc1}, {key4, loc1},
+ {key1, loc1}, {key4, loc1},
});
auto cursor = sorted->newCursor(opCtx.get());
@@ -306,8 +306,8 @@ void testSetEndPosition_RestoreEndCursor_Forward(bool unique) {
insertToIndex(opCtx,
sorted,
{
- {key2, loc1}, // in range
- {key3, loc1}, // out of range
+ {key2, loc1}, // in range
+ {key3, loc1}, // out of range
});
cursor->restore();
@@ -327,7 +327,7 @@ void testSetEndPosition_RestoreEndCursor_Reverse(bool unique) {
auto opCtx = harnessHelper->newOperationContext();
auto sorted = harnessHelper->newSortedDataInterface(unique,
{
- {key1, loc1}, {key4, loc1},
+ {key1, loc1}, {key4, loc1},
});
auto cursor = sorted->newCursor(opCtx.get(), false);
@@ -339,8 +339,8 @@ void testSetEndPosition_RestoreEndCursor_Reverse(bool unique) {
insertToIndex(opCtx,
sorted,
{
- {key2, loc1}, // in range
- {key3, loc1}, // out of range
+ {key2, loc1}, // in range
+ {key3, loc1}, // out of range
});
cursor->restore(); // must restore end cursor even with saveUnpositioned().
@@ -360,10 +360,11 @@ TEST(SortedDataInterface, SetEndPosition_RestoreEndCursor_Reverse_Unique) {
void testSetEndPosition_Empty_Forward(bool unique, bool inclusive) {
auto harnessHelper = newHarnessHelper();
auto opCtx = harnessHelper->newOperationContext();
- auto sorted = harnessHelper->newSortedDataInterface(unique,
- {
- {key1, loc1}, {key2, loc1}, {key3, loc1},
- });
+ auto sorted =
+ harnessHelper->newSortedDataInterface(unique,
+ {
+ {key1, loc1}, {key2, loc1}, {key3, loc1},
+ });
auto cursor = sorted->newCursor(opCtx.get());
cursor->setEndPosition(BSONObj(), inclusive);
@@ -389,10 +390,11 @@ TEST(SortedDataInterface, SetEndPosition_Empty_Forward_Standard_Exclusive) {
void testSetEndPosition_Empty_Reverse(bool unique, bool inclusive) {
auto harnessHelper = newHarnessHelper();
auto opCtx = harnessHelper->newOperationContext();
- auto sorted = harnessHelper->newSortedDataInterface(unique,
- {
- {key1, loc1}, {key2, loc1}, {key3, loc1},
- });
+ auto sorted =
+ harnessHelper->newSortedDataInterface(unique,
+ {
+ {key1, loc1}, {key2, loc1}, {key3, loc1},
+ });
auto cursor = sorted->newCursor(opCtx.get(), false);
cursor->setEndPosition(BSONObj(), inclusive);
diff --git a/src/mongo/db/storage/sorted_data_interface_test_cursor_saverestore.cpp b/src/mongo/db/storage/sorted_data_interface_test_cursor_saverestore.cpp
index d900dbdc1d7..63c3bf6bc44 100644
--- a/src/mongo/db/storage/sorted_data_interface_test_cursor_saverestore.cpp
+++ b/src/mongo/db/storage/sorted_data_interface_test_cursor_saverestore.cpp
@@ -291,7 +291,7 @@ void testSaveAndRestorePositionSeesNewInserts(bool forward, bool unique) {
auto opCtx = harnessHelper->newOperationContext();
auto sorted = harnessHelper->newSortedDataInterface(unique,
{
- {key1, loc1}, {key3, loc1},
+ {key1, loc1}, {key3, loc1},
});
auto cursor = sorted->newCursor(opCtx.get(), forward);
@@ -325,7 +325,7 @@ void testSaveAndRestorePositionSeesNewInsertsAfterRemove(bool forward, bool uniq
auto opCtx = harnessHelper->newOperationContext();
auto sorted = harnessHelper->newSortedDataInterface(unique,
{
- {key1, loc1}, {key3, loc1},
+ {key1, loc1}, {key3, loc1},
});
auto cursor = sorted->newCursor(opCtx.get(), forward);
@@ -365,7 +365,7 @@ void testSaveAndRestorePositionSeesNewInsertsAfterEOF(bool forward, bool unique)
auto opCtx = harnessHelper->newOperationContext();
auto sorted = harnessHelper->newSortedDataInterface(false,
{
- {key1, loc1},
+ {key1, loc1},
});
auto cursor = sorted->newCursor(opCtx.get(), forward);
@@ -403,10 +403,11 @@ TEST(SortedDataInterface, SaveAndRestorePositionSeesNewInsertsAfterEOF_Reverse_S
TEST(SortedDataInterface, SaveAndRestorePositionStandardIndexConsidersRecordId_Forward) {
auto harnessHelper = newHarnessHelper();
auto opCtx = harnessHelper->newOperationContext();
- auto sorted = harnessHelper->newSortedDataInterface(/*isUnique*/ false,
- {
- {key1, loc1}, {key2, loc1}, {key3, loc1},
- });
+ auto sorted =
+ harnessHelper->newSortedDataInterface(/*isUnique*/ false,
+ {
+ {key1, loc1}, {key2, loc1}, {key3, loc1},
+ });
auto cursor = sorted->newCursor(opCtx.get());
@@ -482,10 +483,11 @@ TEST(SortedDataInterface, SaveAndRestorePositionUniqueIndexWontReturnDupKeys_For
TEST(SortedDataInterface, SaveAndRestorePositionStandardIndexConsidersRecordId_Reverse) {
auto harnessHelper = newHarnessHelper();
auto opCtx = harnessHelper->newOperationContext();
- auto sorted = harnessHelper->newSortedDataInterface(/*isUnique*/ false,
- {
- {key0, loc1}, {key1, loc1}, {key2, loc2},
- });
+ auto sorted =
+ harnessHelper->newSortedDataInterface(/*isUnique*/ false,
+ {
+ {key0, loc1}, {key1, loc1}, {key2, loc2},
+ });
auto cursor = sorted->newCursor(opCtx.get(), false);
@@ -561,10 +563,11 @@ TEST(SortedDataInterface, SaveAndRestorePositionUniqueIndexWontReturnDupKeys_Rev
TEST(SortedDataInterface, SaveUnpositionedAndRestore) {
auto harnessHelper = newHarnessHelper();
auto opCtx = harnessHelper->newOperationContext();
- auto sorted = harnessHelper->newSortedDataInterface(false,
- {
- {key1, loc1}, {key2, loc1}, {key3, loc1},
- });
+ auto sorted =
+ harnessHelper->newSortedDataInterface(false,
+ {
+ {key1, loc1}, {key2, loc1}, {key3, loc1},
+ });
auto cursor = sorted->newCursor(opCtx.get());
diff --git a/src/mongo/db/storage/sorted_data_interface_test_cursor_seek_exact.cpp b/src/mongo/db/storage/sorted_data_interface_test_cursor_seek_exact.cpp
index c767dbee859..ae22f28c52b 100644
--- a/src/mongo/db/storage/sorted_data_interface_test_cursor_seek_exact.cpp
+++ b/src/mongo/db/storage/sorted_data_interface_test_cursor_seek_exact.cpp
@@ -38,10 +38,11 @@ namespace mongo {
void testSeekExact_Hit(bool unique, bool forward) {
auto harnessHelper = newHarnessHelper();
auto opCtx = harnessHelper->newOperationContext();
- auto sorted = harnessHelper->newSortedDataInterface(unique,
- {
- {key1, loc1}, {key2, loc1}, {key3, loc1},
- });
+ auto sorted =
+ harnessHelper->newSortedDataInterface(unique,
+ {
+ {key1, loc1}, {key2, loc1}, {key3, loc1},
+ });
auto cursor = sorted->newCursor(opCtx.get(), forward);
@@ -71,9 +72,9 @@ void testSeekExact_Miss(bool unique, bool forward) {
auto opCtx = harnessHelper->newOperationContext();
auto sorted = harnessHelper->newSortedDataInterface(unique,
{
- {key1, loc1},
- // No key2.
- {key3, loc1},
+ {key1, loc1},
+ // No key2.
+ {key3, loc1},
});
auto cursor = sorted->newCursor(opCtx.get(), forward);
@@ -105,7 +106,7 @@ TEST(SortedDataInterface, SeekExact_HitWithDups_Forward) {
auto sorted = harnessHelper->newSortedDataInterface(
false,
{
- {key1, loc1}, {key2, loc1}, {key2, loc2}, {key3, loc1},
+ {key1, loc1}, {key2, loc1}, {key2, loc2}, {key3, loc1},
});
auto cursor = sorted->newCursor(opCtx.get());
@@ -124,7 +125,7 @@ TEST(SortedDataInterface, SeekExact_HitWithDups_Reverse) {
auto sorted = harnessHelper->newSortedDataInterface(
false,
{
- {key1, loc1}, {key2, loc1}, {key2, loc2}, {key3, loc1},
+ {key1, loc1}, {key2, loc1}, {key2, loc2}, {key3, loc1},
});
auto cursor = sorted->newCursor(opCtx.get(), false);
diff --git a/src/mongo/db/storage/sorted_data_interface_test_harness.cpp b/src/mongo/db/storage/sorted_data_interface_test_harness.cpp
index 8506af3e43e..40ce99a6911 100644
--- a/src/mongo/db/storage/sorted_data_interface_test_harness.cpp
+++ b/src/mongo/db/storage/sorted_data_interface_test_harness.cpp
@@ -529,10 +529,10 @@ TEST(SortedDataInterface, Locate4) {
auto harnessHelper = newHarnessHelper();
auto sorted = harnessHelper->newSortedDataInterface(false,
{
- {BSON("" << 1), RecordId(1, 2)},
- {BSON("" << 1), RecordId(1, 4)},
- {BSON("" << 1), RecordId(1, 6)},
- {BSON("" << 2), RecordId(1, 8)},
+ {BSON("" << 1), RecordId(1, 2)},
+ {BSON("" << 1), RecordId(1, 4)},
+ {BSON("" << 1), RecordId(1, 6)},
+ {BSON("" << 2), RecordId(1, 8)},
});
{
diff --git a/src/mongo/db/storage/storage_engine_lock_file_posix.cpp b/src/mongo/db/storage/storage_engine_lock_file_posix.cpp
index de9e0bb97aa..ab5f12484ca 100644
--- a/src/mongo/db/storage/storage_engine_lock_file_posix.cpp
+++ b/src/mongo/db/storage/storage_engine_lock_file_posix.cpp
@@ -35,11 +35,11 @@
#include <boost/filesystem.hpp>
#include <fcntl.h>
#include <ostream>
+#include <sstream>
#include <sys/file.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <unistd.h>
-#include <sstream>
#include "mongo/db/storage/paths.h"
#include "mongo/platform/process_id.h"
@@ -93,7 +93,8 @@ Status StorageEngineLockFile::open() {
} catch (const std::exception& ex) {
return Status(ErrorCodes::UnknownError,
str::stream() << "Unable to check existence of data directory " << _dbpath
- << ": " << ex.what());
+ << ": "
+ << ex.what());
}
// Use file permissions 644
@@ -151,7 +152,9 @@ Status StorageEngineLockFile::writePid() {
int errorcode = errno;
return Status(ErrorCodes::FileStreamFailed,
str::stream() << "Unable to write process id to file (ftruncate failed): "
- << _filespec << ' ' << errnoWithDescription(errorcode));
+ << _filespec
+ << ' '
+ << errnoWithDescription(errorcode));
}
ProcessId pid = ProcessId::getCurrent();
@@ -163,20 +166,26 @@ Status StorageEngineLockFile::writePid() {
int errorcode = errno;
return Status(ErrorCodes::FileStreamFailed,
str::stream() << "Unable to write process id " << pid.toString()
- << " to file: " << _filespec << ' '
+ << " to file: "
+ << _filespec
+ << ' '
<< errnoWithDescription(errorcode));
} else if (bytesWritten == 0) {
return Status(ErrorCodes::FileStreamFailed,
str::stream() << "Unable to write process id " << pid.toString()
- << " to file: " << _filespec << " no data written.");
+ << " to file: "
+ << _filespec
+ << " no data written.");
}
if (::fsync(_lockFileHandle->_fd)) {
int errorcode = errno;
return Status(ErrorCodes::FileStreamFailed,
str::stream() << "Unable to write process id " << pid.toString()
- << " to file (fsync failed): " << _filespec << ' '
+ << " to file (fsync failed): "
+ << _filespec
+ << ' '
<< errnoWithDescription(errorcode));
}
diff --git a/src/mongo/db/storage/storage_engine_lock_file_test.cpp b/src/mongo/db/storage/storage_engine_lock_file_test.cpp
index ee77676291a..e628c7a7ba2 100644
--- a/src/mongo/db/storage/storage_engine_lock_file_test.cpp
+++ b/src/mongo/db/storage/storage_engine_lock_file_test.cpp
@@ -38,9 +38,9 @@
#include "mongo/unittest/unittest.h"
#ifndef _WIN32
-#include <unistd.h>
-#include <sys/types.h>
#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
#endif
namespace {
diff --git a/src/mongo/db/storage/storage_engine_lock_file_windows.cpp b/src/mongo/db/storage/storage_engine_lock_file_windows.cpp
index dfac9d024bd..0016f8f0873 100644
--- a/src/mongo/db/storage/storage_engine_lock_file_windows.cpp
+++ b/src/mongo/db/storage/storage_engine_lock_file_windows.cpp
@@ -109,7 +109,8 @@ Status StorageEngineLockFile::open() {
} catch (const std::exception& ex) {
return Status(ErrorCodes::UnknownError,
str::stream() << "Unable to check existence of data directory " << _dbpath
- << ": " << ex.what());
+ << ": "
+ << ex.what());
}
HANDLE lockFileHandle = CreateFileA(_filespec.c_str(),
@@ -170,12 +171,16 @@ Status StorageEngineLockFile::writePid() {
int errorcode = GetLastError();
return Status(ErrorCodes::FileStreamFailed,
str::stream() << "Unable to write process id " << pid.toString()
- << " to file: " << _filespec << ' '
+ << " to file: "
+ << _filespec
+ << ' '
<< errnoWithDescription(errorcode));
} else if (bytesWritten == 0) {
return Status(ErrorCodes::FileStreamFailed,
str::stream() << "Unable to write process id " << pid.toString()
- << " to file: " << _filespec << " no data written.");
+ << " to file: "
+ << _filespec
+ << " no data written.");
}
::FlushFileBuffers(_lockFileHandle->_handle);
diff --git a/src/mongo/db/storage/storage_engine_metadata.cpp b/src/mongo/db/storage/storage_engine_metadata.cpp
index 1a281e171e6..144ba7c838b 100644
--- a/src/mongo/db/storage/storage_engine_metadata.cpp
+++ b/src/mongo/db/storage/storage_engine_metadata.cpp
@@ -32,9 +32,9 @@
#include "mongo/db/storage/storage_engine_metadata.h"
-#include <cstdio>
#include <boost/filesystem.hpp>
#include <boost/optional.hpp>
+#include <cstdio>
#include <fstream>
#include <limits>
#include <ostream>
@@ -158,16 +158,17 @@ Status StorageEngineMetadata::read() {
} catch (const std::exception& ex) {
return Status(ErrorCodes::FileStreamFailed,
str::stream() << "Unexpected error reading BSON data from " << filename
- << ": " << ex.what());
+ << ": "
+ << ex.what());
}
BSONObj obj;
try {
obj = BSONObj(&buffer[0]);
} catch (DBException& ex) {
- return Status(ErrorCodes::FailedToParse,
- str::stream() << "Failed to convert data in " << filename
- << " to BSON: " << ex.what());
+ return Status(
+ ErrorCodes::FailedToParse,
+ str::stream() << "Failed to convert data in " << filename << " to BSON: " << ex.what());
}
// Validate 'storage.engine' field.
@@ -235,8 +236,11 @@ Status StorageEngineMetadata::write() const {
} catch (const std::exception& ex) {
return Status(ErrorCodes::FileRenameFailed,
str::stream() << "Unexpected error while renaming temporary metadata file "
- << metadataTempPath.string() << " to " << metadataPath.string()
- << ": " << ex.what());
+ << metadataTempPath.string()
+ << " to "
+ << metadataPath.string()
+ << ": "
+ << ex.what());
}
return Status::OK();
@@ -252,7 +256,9 @@ Status StorageEngineMetadata::validateStorageEngineOption<bool>(StringData field
if (!element.isBoolean()) {
return Status(ErrorCodes::FailedToParse,
str::stream() << "Expected boolean field " << fieldName << " but got "
- << typeName(element.type()) << " instead: " << element);
+ << typeName(element.type())
+ << " instead: "
+ << element);
}
if (element.boolean() == expectedValue) {
return Status::OK();
@@ -260,9 +266,12 @@ Status StorageEngineMetadata::validateStorageEngineOption<bool>(StringData field
return Status(
ErrorCodes::InvalidOptions,
str::stream() << "Requested option conflicts with current storage engine option for "
- << fieldName << "; you requested " << (expectedValue ? "true" : "false")
+ << fieldName
+ << "; you requested "
+ << (expectedValue ? "true" : "false")
<< " but the current server storage is already set to "
- << (element.boolean() ? "true" : "false") << " and cannot be changed");
+ << (element.boolean() ? "true" : "false")
+ << " and cannot be changed");
}
} // namespace mongo
diff --git a/src/mongo/db/storage/storage_engine_metadata_test.cpp b/src/mongo/db/storage/storage_engine_metadata_test.cpp
index 0f0326a2161..466c0016037 100644
--- a/src/mongo/db/storage/storage_engine_metadata_test.cpp
+++ b/src/mongo/db/storage/storage_engine_metadata_test.cpp
@@ -36,8 +36,8 @@
#include <ostream>
#include "mongo/bson/bsonobj.h"
-#include "mongo/db/storage/storage_engine_metadata.h"
#include "mongo/db/json.h"
+#include "mongo/db/storage/storage_engine_metadata.h"
#include "mongo/unittest/temp_dir.h"
#include "mongo/unittest/unittest.h"
diff --git a/src/mongo/db/storage/storage_init.cpp b/src/mongo/db/storage/storage_init.cpp
index 16cffe81ae9..0af1b78a602 100644
--- a/src/mongo/db/storage/storage_init.cpp
+++ b/src/mongo/db/storage/storage_init.cpp
@@ -33,8 +33,8 @@
#include "mongo/db/client.h"
#include "mongo/db/commands/server_status.h"
#include "mongo/db/operation_context.h"
-#include "mongo/db/storage/storage_engine.h"
#include "mongo/db/service_context.h"
+#include "mongo/db/storage/storage_engine.h"
#include "mongo/db/storage/storage_options.h"
namespace mongo {
@@ -55,8 +55,10 @@ public:
virtual BSONObj generateSection(OperationContext* txn, const BSONElement& configElement) const {
auto engine = txn->getClient()->getServiceContext()->getGlobalStorageEngine();
return BSON("name" << storageGlobalParams.engine << "supportsCommittedReads"
- << bool(engine->getSnapshotManager()) << "readOnly"
- << storageGlobalParams.readOnly << "persistent"
+ << bool(engine->getSnapshotManager())
+ << "readOnly"
+ << storageGlobalParams.readOnly
+ << "persistent"
<< !engine->isEphemeral());
}
diff --git a/src/mongo/db/storage/storage_options.cpp b/src/mongo/db/storage/storage_options.cpp
index 9ea45c706b5..8b031ff7a59 100644
--- a/src/mongo/db/storage/storage_options.cpp
+++ b/src/mongo/db/storage/storage_options.cpp
@@ -85,7 +85,8 @@ public:
return Status(ErrorCodes::BadValue,
str::stream() << "journalCommitInterval must be between 1 and "
<< StorageGlobalParams::kMaxJournalCommitIntervalMs
- << ", but attempted to set to: " << potentialNewValue);
+ << ", but attempted to set to: "
+ << potentialNewValue);
}
return Status::OK();
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_global_options.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_global_options.cpp
index fd16df07dcb..0de33e67ce5 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_global_options.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_global_options.cpp
@@ -49,55 +49,63 @@ Status WiredTigerGlobalOptions::add(moe::OptionSection* options) {
moe::Double,
"maximum amount of memory to allocate for cache; "
"defaults to 1/2 of physical RAM");
- wiredTigerOptions.addOptionChaining(
- "storage.wiredTiger.engineConfig.statisticsLogDelaySecs",
- "wiredTigerStatisticsLogDelaySecs",
- moe::Int,
- "seconds to wait between each write to a statistics file in the dbpath; "
- "0 means do not log statistics")
+ wiredTigerOptions
+ .addOptionChaining("storage.wiredTiger.engineConfig.statisticsLogDelaySecs",
+ "wiredTigerStatisticsLogDelaySecs",
+ moe::Int,
+ "seconds to wait between each write to a statistics file in the dbpath; "
+ "0 means do not log statistics")
.validRange(0, 100000)
.setDefault(moe::Value(0));
- wiredTigerOptions.addOptionChaining("storage.wiredTiger.engineConfig.journalCompressor",
- "wiredTigerJournalCompressor",
- moe::String,
- "use a compressor for log records [none|snappy|zlib]")
+ wiredTigerOptions
+ .addOptionChaining("storage.wiredTiger.engineConfig.journalCompressor",
+ "wiredTigerJournalCompressor",
+ moe::String,
+ "use a compressor for log records [none|snappy|zlib]")
.format("(:?none)|(:?snappy)|(:?zlib)", "(none/snappy/zlib)")
.setDefault(moe::Value(std::string("snappy")));
wiredTigerOptions.addOptionChaining("storage.wiredTiger.engineConfig.directoryForIndexes",
"wiredTigerDirectoryForIndexes",
moe::Switch,
"Put indexes and data in different directories");
- wiredTigerOptions.addOptionChaining("storage.wiredTiger.engineConfig.configString",
- "wiredTigerEngineConfigString",
- moe::String,
- "WiredTiger storage engine custom "
- "configuration settings").hidden();
+ wiredTigerOptions
+ .addOptionChaining("storage.wiredTiger.engineConfig.configString",
+ "wiredTigerEngineConfigString",
+ moe::String,
+ "WiredTiger storage engine custom "
+ "configuration settings")
+ .hidden();
// WiredTiger collection options
- wiredTigerOptions.addOptionChaining("storage.wiredTiger.collectionConfig.blockCompressor",
- "wiredTigerCollectionBlockCompressor",
- moe::String,
- "block compression algorithm for collection data "
- "[none|snappy|zlib]")
+ wiredTigerOptions
+ .addOptionChaining("storage.wiredTiger.collectionConfig.blockCompressor",
+ "wiredTigerCollectionBlockCompressor",
+ moe::String,
+ "block compression algorithm for collection data "
+ "[none|snappy|zlib]")
.format("(:?none)|(:?snappy)|(:?zlib)", "(none/snappy/zlib)")
.setDefault(moe::Value(std::string("snappy")));
- wiredTigerOptions.addOptionChaining("storage.wiredTiger.collectionConfig.configString",
- "wiredTigerCollectionConfigString",
- moe::String,
- "WiredTiger custom collection configuration settings")
+ wiredTigerOptions
+ .addOptionChaining("storage.wiredTiger.collectionConfig.configString",
+ "wiredTigerCollectionConfigString",
+ moe::String,
+ "WiredTiger custom collection configuration settings")
.hidden();
// WiredTiger index options
- wiredTigerOptions.addOptionChaining("storage.wiredTiger.indexConfig.prefixCompression",
- "wiredTigerIndexPrefixCompression",
- moe::Bool,
- "use prefix compression on row-store leaf pages")
+ wiredTigerOptions
+ .addOptionChaining("storage.wiredTiger.indexConfig.prefixCompression",
+ "wiredTigerIndexPrefixCompression",
+ moe::Bool,
+ "use prefix compression on row-store leaf pages")
.setDefault(moe::Value(true));
- wiredTigerOptions.addOptionChaining("storage.wiredTiger.indexConfig.configString",
- "wiredTigerIndexConfigString",
- moe::String,
- "WiredTiger custom index configuration settings").hidden();
+ wiredTigerOptions
+ .addOptionChaining("storage.wiredTiger.indexConfig.configString",
+ "wiredTigerIndexConfigString",
+ moe::String,
+ "WiredTiger custom index configuration settings")
+ .hidden();
return options->addSection(wiredTigerOptions);
}
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_index.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_index.cpp
index 7bc63bc7903..28b7f7d28fb 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_index.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_index.cpp
@@ -37,22 +37,22 @@
#include <set>
#include "mongo/base/checked_cast.h"
-#include "mongo/db/json.h"
#include "mongo/db/catalog/index_catalog_entry.h"
#include "mongo/db/concurrency/write_conflict_exception.h"
#include "mongo/db/index/index_descriptor.h"
+#include "mongo/db/json.h"
#include "mongo/db/service_context.h"
#include "mongo/db/storage/key_string.h"
+#include "mongo/db/storage/storage_options.h"
#include "mongo/db/storage/wiredtiger/wiredtiger_customization_hooks.h"
#include "mongo/db/storage/wiredtiger/wiredtiger_global_options.h"
#include "mongo/db/storage/wiredtiger/wiredtiger_record_store.h"
#include "mongo/db/storage/wiredtiger/wiredtiger_session_cache.h"
#include "mongo/db/storage/wiredtiger/wiredtiger_util.h"
-#include "mongo/db/storage/storage_options.h"
#include "mongo/stdx/memory.h"
#include "mongo/util/assert_util.h"
-#include "mongo/util/hex.h"
#include "mongo/util/fail_point.h"
+#include "mongo/util/hex.h"
#include "mongo/util/log.h"
#include "mongo/util/mongoutils/str.h"
@@ -60,7 +60,7 @@
#if TRACING_ENABLED
#define TRACE_CURSOR log() << "WT index (" << (const void*)&_idx << ") "
-#define TRACE_INDEX log() << "WT index (" << (const void*) this << ") "
+#define TRACE_INDEX log() << "WT index (" << (const void*)this << ") "
#else
#define TRACE_CURSOR \
if (0) \
@@ -141,7 +141,8 @@ StatusWith<std::string> WiredTigerIndex::parseIndexOptions(const BSONObj& option
// Return error on first unrecognized field.
return StatusWith<std::string>(ErrorCodes::InvalidOptions,
str::stream() << '\'' << elem.fieldNameStringData()
- << '\'' << " is not a supported option.");
+ << '\''
+ << " is not a supported option.");
}
}
return StatusWith<std::string>(ss.str());
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_index.h b/src/mongo/db/storage/wiredtiger/wiredtiger_index.h
index c2d6c70cf12..ee729502550 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_index.h
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_index.h
@@ -31,8 +31,8 @@
#include <wiredtiger.h>
#include "mongo/base/status_with.h"
-#include "mongo/db/storage/key_string.h"
#include "mongo/db/storage/index_entry_comparison.h"
+#include "mongo/db/storage/key_string.h"
#include "mongo/db/storage/sorted_data_interface.h"
#include "mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.h"
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_index_test.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_index_test.cpp
index 7cd4a095699..4173f8ed455 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_index_test.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_index_test.cpp
@@ -71,7 +71,8 @@ public:
BSONObj spec = BSON("key" << BSON("a" << 1) << "name"
<< "testIndex"
- << "ns" << ns);
+ << "ns"
+ << ns);
IndexDescriptor desc(NULL, "", spec);
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_init.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_init.cpp
index fc60f082ea6..2d4db8a3123 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_init.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_init.cpp
@@ -33,20 +33,20 @@
#include "mongo/base/init.h"
#include "mongo/db/catalog/collection_options.h"
-#include "mongo/db/service_context_d.h"
-#include "mongo/db/service_context.h"
#include "mongo/db/jsobj.h"
+#include "mongo/db/service_context.h"
+#include "mongo/db/service_context_d.h"
#include "mongo/db/storage/kv/kv_storage_engine.h"
#include "mongo/db/storage/storage_engine_lock_file.h"
#include "mongo/db/storage/storage_engine_metadata.h"
-#include "mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h"
+#include "mongo/db/storage/storage_options.h"
#include "mongo/db/storage/wiredtiger/wiredtiger_global_options.h"
#include "mongo/db/storage/wiredtiger/wiredtiger_index.h"
+#include "mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h"
#include "mongo/db/storage/wiredtiger/wiredtiger_parameters.h"
#include "mongo/db/storage/wiredtiger/wiredtiger_record_store.h"
#include "mongo/db/storage/wiredtiger/wiredtiger_server_status.h"
#include "mongo/db/storage/wiredtiger/wiredtiger_util.h"
-#include "mongo/db/storage/storage_options.h"
#include "mongo/util/log.h"
namespace mongo {
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_init_test.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_init_test.cpp
index 4bf932ac276..a2830efebfd 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_init_test.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_init_test.cpp
@@ -29,12 +29,12 @@
#include "mongo/platform/basic.h"
-#include "mongo/db/service_context.h"
#include "mongo/db/json.h"
+#include "mongo/db/service_context.h"
#include "mongo/db/storage/storage_engine_metadata.h"
+#include "mongo/db/storage/storage_options.h"
#include "mongo/db/storage/wiredtiger/wiredtiger_global_options.h"
#include "mongo/db/storage/wiredtiger/wiredtiger_record_store.h"
-#include "mongo/db/storage/storage_options.h"
#include "mongo/unittest/unittest.h"
#include "mongo/util/mongoutils/str.h"
@@ -94,9 +94,15 @@ void _testValidateMetadata(const StorageEngine::Factory* factory,
if (expectedCode != status.code()) {
FAIL(str::stream()
<< "Unexpected StorageEngine::Factory::validateMetadata result. Expected: "
- << ErrorCodes::errorString(expectedCode) << " but got " << status.toString()
- << " instead. metadataOptions: " << metadataOptions << "; directoryPerDB: "
- << directoryPerDB << "; directoryForIndexes: " << directoryForIndexes);
+ << ErrorCodes::errorString(expectedCode)
+ << " but got "
+ << status.toString()
+ << " instead. metadataOptions: "
+ << metadataOptions
+ << "; directoryPerDB: "
+ << directoryPerDB
+ << "; directoryForIndexes: "
+ << directoryForIndexes);
}
}
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp
index 163654c12cb..e6c2875ec7c 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp
@@ -45,13 +45,14 @@
#include "mongo/bson/bsonobjbuilder.h"
#include "mongo/db/catalog/collection_catalog_entry.h"
#include "mongo/db/client.h"
+#include "mongo/db/commands/server_status_metric.h"
#include "mongo/db/concurrency/locker.h"
#include "mongo/db/concurrency/write_conflict_exception.h"
-#include "mongo/db/commands/server_status_metric.h"
#include "mongo/db/index/index_descriptor.h"
#include "mongo/db/server_parameters.h"
#include "mongo/db/service_context.h"
#include "mongo/db/storage/journal_listener.h"
+#include "mongo/db/storage/storage_options.h"
#include "mongo/db/storage/wiredtiger/wiredtiger_customization_hooks.h"
#include "mongo/db/storage/wiredtiger/wiredtiger_global_options.h"
#include "mongo/db/storage/wiredtiger/wiredtiger_index.h"
@@ -60,11 +61,10 @@
#include "mongo/db/storage/wiredtiger/wiredtiger_session_cache.h"
#include "mongo/db/storage/wiredtiger/wiredtiger_size_storer.h"
#include "mongo/db/storage/wiredtiger/wiredtiger_util.h"
-#include "mongo/db/storage/storage_options.h"
-#include "mongo/util/log.h"
#include "mongo/util/background.h"
#include "mongo/util/concurrency/ticketholder.h"
#include "mongo/util/exit.h"
+#include "mongo/util/log.h"
#include "mongo/util/processinfo.h"
#include "mongo/util/scopeguard.h"
#include "mongo/util/time_support.h"
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_parameters.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_parameters.cpp
index 49ef155b51d..cb7852d0bfc 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_parameters.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_parameters.cpp
@@ -70,7 +70,8 @@ Status WiredTigerEngineRuntimeConfigParameter::setFromString(const std::string&
return Status(ErrorCodes::BadValue,
(str::stream()
<< "WiredTiger configuration strings cannot have embedded null characters. "
- "Embedded null found at position " << pos));
+ "Embedded null found at position "
+ << pos));
}
log() << "Reconfiguring WiredTiger storage engine with config string: \"" << str << "\"";
@@ -79,7 +80,9 @@ Status WiredTigerEngineRuntimeConfigParameter::setFromString(const std::string&
if (ret != 0) {
string result =
(mongoutils::str::stream() << "WiredTiger reconfiguration failed with error code ("
- << ret << "): " << wiredtiger_strerror(ret));
+ << ret
+ << "): "
+ << wiredtiger_strerror(ret));
error() << result;
return Status(ErrorCodes::BadValue, result);
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp
index f31bbd1e38a..1543b5706f6 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp
@@ -488,7 +488,8 @@ public:
if (_forward && MONGO_FAIL_POINT(WTEmulateOutOfOrderNextRecordId)) {
log() << "WTEmulateOutOfOrderNextRecordId fail point has triggerd so RecordId is now "
- "RecordId(1) instead of " << id;
+ "RecordId(1) instead of "
+ << id;
// Replace the found RecordId with a (small) fake one.
id = RecordId{1};
}
@@ -649,7 +650,8 @@ StatusWith<std::string> WiredTigerRecordStore::parseOptionsField(const BSONObj o
// Return error on first unrecognized field.
return StatusWith<std::string>(ErrorCodes::InvalidOptions,
str::stream() << '\'' << elem.fieldNameStringData()
- << '\'' << " is not a supported option.");
+ << '\''
+ << " is not a supported option.");
}
}
return StatusWith<std::string>(ss.str());
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_test.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_test.cpp
index 37d64e8bd00..249dcacdc81 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_test.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_test.cpp
@@ -33,16 +33,16 @@
#include <sstream>
#include <string>
+#include "mongo/base/checked_cast.h"
#include "mongo/base/string_data.h"
#include "mongo/bson/bsonobjbuilder.h"
-#include "mongo/base/checked_cast.h"
#include "mongo/db/concurrency/write_conflict_exception.h"
#include "mongo/db/json.h"
#include "mongo/db/operation_context_noop.h"
#include "mongo/db/storage/record_store_test_harness.h"
-#include "mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.h"
#include "mongo/db/storage/wiredtiger/wiredtiger_record_store.h"
#include "mongo/db/storage/wiredtiger/wiredtiger_record_store_oplog_stones.h"
+#include "mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.h"
#include "mongo/db/storage/wiredtiger/wiredtiger_session_cache.h"
#include "mongo/db/storage/wiredtiger/wiredtiger_size_storer.h"
#include "mongo/db/storage/wiredtiger/wiredtiger_util.h"
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_util.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_util.cpp
index 415081d334c..6f4bcf0d025 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_util.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_util.cpp
@@ -154,7 +154,9 @@ Status WiredTigerUtil::getApplicationMetadata(OperationContext* opCtx,
if (keysSeen.count(key)) {
return Status(ErrorCodes::DuplicateKey,
str::stream() << "app_metadata must not contain duplicate keys. "
- << "Found multiple instances of key '" << key << "'.");
+ << "Found multiple instances of key '"
+ << key
+ << "'.");
}
keysSeen.insert(key);
@@ -230,7 +232,9 @@ Status WiredTigerUtil::checkApplicationMetadataFormatVersion(OperationContext* o
if (version < minimumVersion || version > maximumVersion) {
return Status(ErrorCodes::UnsupportedFormat,
str::stream() << "Application metadata for " << uri
- << " has unsupported format version: " << version << ".");
+ << " has unsupported format version: "
+ << version
+ << ".");
}
LOG(2) << "WiredTigerUtil::checkApplicationMetadataFormatVersion "
@@ -278,7 +282,8 @@ StatusWith<uint64_t> WiredTigerUtil::getStatisticsValue(WT_SESSION* session,
if (ret != 0) {
return StatusWith<uint64_t>(ErrorCodes::CursorNotFound,
str::stream() << "unable to open cursor at URI " << uri
- << ". reason: " << wiredtiger_strerror(ret));
+ << ". reason: "
+ << wiredtiger_strerror(ret));
}
invariant(cursor);
ON_BLOCK_EXIT(cursor->close, cursor);
@@ -286,19 +291,21 @@ StatusWith<uint64_t> WiredTigerUtil::getStatisticsValue(WT_SESSION* session,
cursor->set_key(cursor, statisticsKey);
ret = cursor->search(cursor);
if (ret != 0) {
- return StatusWith<uint64_t>(ErrorCodes::NoSuchKey,
- str::stream() << "unable to find key " << statisticsKey
- << " at URI " << uri
- << ". reason: " << wiredtiger_strerror(ret));
+ return StatusWith<uint64_t>(
+ ErrorCodes::NoSuchKey,
+ str::stream() << "unable to find key " << statisticsKey << " at URI " << uri
+ << ". reason: "
+ << wiredtiger_strerror(ret));
}
uint64_t value;
ret = cursor->get_value(cursor, NULL, NULL, &value);
if (ret != 0) {
- return StatusWith<uint64_t>(ErrorCodes::BadValue,
- str::stream() << "unable to get value for key " << statisticsKey
- << " at URI " << uri
- << ". reason: " << wiredtiger_strerror(ret));
+ return StatusWith<uint64_t>(
+ ErrorCodes::BadValue,
+ str::stream() << "unable to get value for key " << statisticsKey << " at URI " << uri
+ << ". reason: "
+ << wiredtiger_strerror(ret));
}
return StatusWith<uint64_t>(value);
@@ -437,8 +444,8 @@ Status WiredTigerUtil::exportTableToBSON(WT_SESSION* session,
int ret = session->open_cursor(session, uri.c_str(), NULL, cursorConfig, &c);
if (ret != 0) {
return Status(ErrorCodes::CursorNotFound,
- str::stream() << "unable to open cursor at URI " << uri
- << ". reason: " << wiredtiger_strerror(ret));
+ str::stream() << "unable to open cursor at URI " << uri << ". reason: "
+ << wiredtiger_strerror(ret));
}
bob->append("uri", uri);
invariant(c);
diff --git a/src/mongo/db/ttl.cpp b/src/mongo/db/ttl.cpp
index 6c557364edf..85da961f66c 100644
--- a/src/mongo/db/ttl.cpp
+++ b/src/mongo/db/ttl.cpp
@@ -48,9 +48,9 @@
#include "mongo/db/db_raii.h"
#include "mongo/db/exec/delete.h"
#include "mongo/db/index/index_descriptor.h"
-#include "mongo/db/ops/insert.h"
#include "mongo/db/matcher/extensions_callback_disallow_extensions.h"
#include "mongo/db/namespace_string.h"
+#include "mongo/db/ops/insert.h"
#include "mongo/db/query/internal_plans.h"
#include "mongo/db/repl/replication_coordinator_global.h"
#include "mongo/db/server_parameters.h"
diff --git a/src/mongo/db/update_index_data.cpp b/src/mongo/db/update_index_data.cpp
index 9d66a4a7a6e..2b1144d40a3 100644
--- a/src/mongo/db/update_index_data.cpp
+++ b/src/mongo/db/update_index_data.cpp
@@ -28,9 +28,9 @@
* it in the license file.
*/
+#include "mongo/db/update_index_data.h"
#include "mongo/bson/util/builder.h"
#include "mongo/db/field_ref.h"
-#include "mongo/db/update_index_data.h"
namespace mongo {
diff --git a/src/mongo/db/write_concern.cpp b/src/mongo/db/write_concern.cpp
index 3bde99387b7..e0a43779eea 100644
--- a/src/mongo/db/write_concern.cpp
+++ b/src/mongo/db/write_concern.cpp
@@ -36,11 +36,11 @@
#include "mongo/bson/util/bson_extract.h"
#include "mongo/db/client.h"
#include "mongo/db/commands/server_status_metric.h"
-#include "mongo/db/service_context.h"
#include "mongo/db/operation_context.h"
#include "mongo/db/repl/optime.h"
#include "mongo/db/repl/replication_coordinator_global.h"
#include "mongo/db/server_options.h"
+#include "mongo/db/service_context.h"
#include "mongo/db/stats/timer_stats.h"
#include "mongo/db/storage/storage_engine.h"
#include "mongo/db/write_concern_options.h"
@@ -117,7 +117,8 @@ Status validateWriteConcern(OperationContext* txn,
ErrorCodes::BadValue,
str::stream()
<< "w:1 and w:'majority' are the only valid write concerns when writing to "
- "config servers, got: " << writeConcern.toBSON().toString());
+ "config servers, got: "
+ << writeConcern.toBSON().toString());
}
if (replMode == repl::ReplicationCoordinator::modeReplSet && !isLocalDb &&
@@ -127,7 +128,8 @@ Status validateWriteConcern(OperationContext* txn,
ErrorCodes::BadValue,
str::stream()
<< "w: 'majority' is the only valid write concern when writing to config "
- "server replica sets, got: " << writeConcern.toBSON().toString());
+ "server replica sets, got: "
+ << writeConcern.toBSON().toString());
}
}
diff --git a/src/mongo/dbtests/basictests.cpp b/src/mongo/dbtests/basictests.cpp
index a0b915027fa..9734c1d8aab 100644
--- a/src/mongo/dbtests/basictests.cpp
+++ b/src/mongo/dbtests/basictests.cpp
@@ -34,8 +34,8 @@
#include <iostream>
#include "mongo/db/client.h"
-#include "mongo/db/storage/paths.h"
#include "mongo/db/storage/mmap_v1/compress.h"
+#include "mongo/db/storage/paths.h"
#include "mongo/dbtests/dbtests.h"
#include "mongo/util/base64.h"
#include "mongo/util/queue.h"
diff --git a/src/mongo/dbtests/chunktests.cpp b/src/mongo/dbtests/chunktests.cpp
index 36de5ad9498..fbc7b94987a 100644
--- a/src/mongo/dbtests/chunktests.cpp
+++ b/src/mongo/dbtests/chunktests.cpp
@@ -127,8 +127,9 @@ class MultiShardBase : public Base {
return BSON_ARRAY(BSON("a"
<< "x")
<< BSON("a"
- << "y") << BSON("a"
- << "z"));
+ << "y")
+ << BSON("a"
+ << "z"));
}
};
diff --git a/src/mongo/dbtests/commandtests.cpp b/src/mongo/dbtests/commandtests.cpp
index bfe87176273..db5d6227452 100644
--- a/src/mongo/dbtests/commandtests.cpp
+++ b/src/mongo/dbtests/commandtests.cpp
@@ -258,9 +258,12 @@ public:
cmd.append("indexes",
BSON_ARRAY(BSON("key" << BSON("loc"
<< "geoHaystack"
- << "z" << 1.0) << "name"
+ << "z"
+ << 1.0)
+ << "name"
<< "loc_geoHaystack_z_1"
- << "bucketSize" << static_cast<double>(0.7))));
+ << "bucketSize"
+ << static_cast<double>(0.7))));
BSONObj result;
ASSERT(db.runCommand(nsDb(), cmd.obj(), result));
diff --git a/src/mongo/dbtests/dbtests.cpp b/src/mongo/dbtests/dbtests.cpp
index 4a742f78e3f..b0590061e91 100644
--- a/src/mongo/dbtests/dbtests.cpp
+++ b/src/mongo/dbtests/dbtests.cpp
@@ -39,11 +39,11 @@
#include "mongo/db/catalog/index_create.h"
#include "mongo/db/commands.h"
#include "mongo/db/db_raii.h"
-#include "mongo/db/service_context_d.h"
-#include "mongo/db/service_context.h"
-#include "mongo/db/wire_version.h"
#include "mongo/db/repl/replication_coordinator_global.h"
#include "mongo/db/repl/replication_coordinator_mock.h"
+#include "mongo/db/service_context.h"
+#include "mongo/db/service_context_d.h"
+#include "mongo/db/wire_version.h"
#include "mongo/dbtests/framework.h"
#include "mongo/stdx/memory.h"
#include "mongo/util/quick_exit.h"
diff --git a/src/mongo/dbtests/directclienttests.cpp b/src/mongo/dbtests/directclienttests.cpp
index e420ac1f0ab..8e7c4a0a1e8 100644
--- a/src/mongo/dbtests/directclienttests.cpp
+++ b/src/mongo/dbtests/directclienttests.cpp
@@ -74,7 +74,10 @@ public:
BSONObj info;
BSONObj cmd = BSON("captrunc"
<< "b"
- << "n" << 1 << "inc" << true);
+ << "n"
+ << 1
+ << "inc"
+ << true);
// cout << cmd.toString() << endl;
bool ok = client.runCommand("a", cmd, info);
// cout << info.toString() << endl;
diff --git a/src/mongo/dbtests/framework.cpp b/src/mongo/dbtests/framework.cpp
index a201ddfe79a..b6022383f6b 100644
--- a/src/mongo/dbtests/framework.cpp
+++ b/src/mongo/dbtests/framework.cpp
@@ -38,9 +38,9 @@
#include "mongo/base/status.h"
#include "mongo/db/client.h"
#include "mongo/db/concurrency/lock_state.h"
+#include "mongo/db/s/sharding_state.h"
#include "mongo/db/service_context.h"
#include "mongo/db/service_context_d.h"
-#include "mongo/db/s/sharding_state.h"
#include "mongo/dbtests/dbtests.h"
#include "mongo/dbtests/framework_options.h"
#include "mongo/s/catalog/catalog_manager.h"
diff --git a/src/mongo/dbtests/framework_options.cpp b/src/mongo/dbtests/framework_options.cpp
index c51bc34e00f..0d2a4768213 100644
--- a/src/mongo/dbtests/framework_options.cpp
+++ b/src/mongo/dbtests/framework_options.cpp
@@ -66,12 +66,14 @@ FrameworkGlobalParams frameworkGlobalParams;
Status addTestFrameworkOptions(moe::OptionSection* options) {
options->addOptionChaining("help", "help,h", moe::Switch, "show this usage information");
- options->addOptionChaining(
- "dbpath",
- "dbpath",
- moe::String,
- "db data path for this test run. NOTE: the contents of this directory will "
- "be overwritten if it already exists").setDefault(moe::Value(default_test_dbpath));
+ options
+ ->addOptionChaining(
+ "dbpath",
+ "dbpath",
+ moe::String,
+ "db data path for this test run. NOTE: the contents of this directory will "
+ "be overwritten if it already exists")
+ .setDefault(moe::Value(default_test_dbpath));
options->addOptionChaining("debug", "debug", moe::Switch, "run tests with verbose output");
@@ -99,16 +101,18 @@ Status addTestFrameworkOptions(moe::OptionSection* options) {
options->addOptionChaining(
"perfHist", "perfHist", moe::Unsigned, "number of back runs of perf stats to display");
- options->addOptionChaining(
- "storage.engine", "storageEngine", moe::String, "what storage engine to use")
+ options
+ ->addOptionChaining(
+ "storage.engine", "storageEngine", moe::String, "what storage engine to use")
.setDefault(moe::Value(std::string("wiredTiger")));
options->addOptionChaining("suites", "suites", moe::StringVector, "test suites to run")
.hidden()
.positional(1, -1);
- options->addOptionChaining(
- "nopreallocj", "nopreallocj", moe::Switch, "disable journal prealloc").hidden();
+ options
+ ->addOptionChaining("nopreallocj", "nopreallocj", moe::Switch, "disable journal prealloc")
+ .hidden();
return Status::OK();
@@ -116,8 +120,8 @@ Status addTestFrameworkOptions(moe::OptionSection* options) {
std::string getTestFrameworkHelp(StringData name, const moe::OptionSection& options) {
StringBuilder sb;
- sb << "usage: " << name << " [options] [suite]...\n" << options.helpString()
- << "suite: run the specified test suite(s) only\n";
+ sb << "usage: " << name << " [options] [suite]...\n"
+ << options.helpString() << "suite: run the specified test suite(s) only\n";
return sb.str();
}
diff --git a/src/mongo/dbtests/framework_options_init.cpp b/src/mongo/dbtests/framework_options_init.cpp
index 7e3689ae5e9..1100b188362 100644
--- a/src/mongo/dbtests/framework_options_init.cpp
+++ b/src/mongo/dbtests/framework_options_init.cpp
@@ -31,9 +31,9 @@
#include <iostream>
#include "mongo/dbtests/framework_options.h"
+#include "mongo/util/exit_code.h"
#include "mongo/util/options_parser/startup_option_init.h"
#include "mongo/util/options_parser/startup_options.h"
-#include "mongo/util/exit_code.h"
#include "mongo/util/quick_exit.h"
namespace mongo {
diff --git a/src/mongo/dbtests/index_access_method_test.cpp b/src/mongo/dbtests/index_access_method_test.cpp
index 35e6cd020f9..f81a3039704 100644
--- a/src/mongo/dbtests/index_access_method_test.cpp
+++ b/src/mongo/dbtests/index_access_method_test.cpp
@@ -28,10 +28,10 @@
#include "mongo/platform/basic.h"
-#include "mongo/db/index/index_access_method.h"
-#include "mongo/unittest/unittest.h"
#include "mongo/bson/bsonobj.h"
+#include "mongo/db/index/index_access_method.h"
#include "mongo/db/json.h"
+#include "mongo/unittest/unittest.h"
namespace mongo {
diff --git a/src/mongo/dbtests/indexcatalogtests.cpp b/src/mongo/dbtests/indexcatalogtests.cpp
index 23d6e6f092a..ef2f1effb81 100644
--- a/src/mongo/dbtests/indexcatalogtests.cpp
+++ b/src/mongo/dbtests/indexcatalogtests.cpp
@@ -138,7 +138,8 @@ public:
ASSERT_OK(dbtests::createIndexFromSpec(&txn,
_ns,
BSON("name" << indexName << "ns" << _ns << "key"
- << BSON("x" << 1) << "expireAfterSeconds"
+ << BSON("x" << 1)
+ << "expireAfterSeconds"
<< 5)));
const IndexDescriptor* desc = _catalog->findIndexByName(&txn, indexName);
diff --git a/src/mongo/dbtests/indexupdatetests.cpp b/src/mongo/dbtests/indexupdatetests.cpp
index 3b3c3531ec9..99f254cb60d 100644
--- a/src/mongo/dbtests/indexupdatetests.cpp
+++ b/src/mongo/dbtests/indexupdatetests.cpp
@@ -372,8 +372,14 @@ public:
const BSONObj spec = BSON("name"
<< "a"
- << "ns" << coll->ns().ns() << "key" << BSON("a" << 1) << "unique"
- << true << "background" << background);
+ << "ns"
+ << coll->ns().ns()
+ << "key"
+ << BSON("a" << 1)
+ << "unique"
+ << true
+ << "background"
+ << background);
ASSERT_OK(indexer.init(spec));
ASSERT_OK(indexer.insertAllDocumentsInCollection());
@@ -418,8 +424,14 @@ public:
const BSONObj spec = BSON("name"
<< "a"
- << "ns" << coll->ns().ns() << "key" << BSON("a" << 1) << "unique"
- << true << "background" << background);
+ << "ns"
+ << coll->ns().ns()
+ << "key"
+ << BSON("a" << 1)
+ << "unique"
+ << true
+ << "background"
+ << background);
ASSERT_OK(indexer.init(spec));
const Status status = indexer.insertAllDocumentsInCollection();
@@ -463,8 +475,14 @@ public:
const BSONObj spec = BSON("name"
<< "a"
- << "ns" << coll->ns().ns() << "key" << BSON("a" << 1) << "unique"
- << true << "background" << background);
+ << "ns"
+ << coll->ns().ns()
+ << "key"
+ << BSON("a" << 1)
+ << "unique"
+ << true
+ << "background"
+ << background);
ASSERT_OK(indexer.init(spec));
@@ -729,7 +747,10 @@ public:
ASSERT_OK(createIndex("unittest",
BSON("name"
<< "x"
- << "ns" << _ns << "key" << BSON("x" << 1 << "y" << 1))));
+ << "ns"
+ << _ns
+ << "key"
+ << BSON("x" << 1 << "y" << 1))));
}
};
@@ -741,7 +762,11 @@ public:
createIndex("unittest",
BSON("name"
<< "x"
- << "ns" << _ns << "unique" << true << "key"
+ << "ns"
+ << _ns
+ << "unique"
+ << true
+ << "key"
<< BSON("x" << 1 << "y" << 1))));
}
};
@@ -752,7 +777,10 @@ public:
ASSERT_OK(createIndex("unittest",
BSON("name"
<< "x"
- << "ns" << _ns << "key" << BSON("x" << 1 << "y" << 1))));
+ << "ns"
+ << _ns
+ << "key"
+ << BSON("x" << 1 << "y" << 1))));
}
};
@@ -764,7 +792,10 @@ public:
createIndex("unittest",
BSON("name"
<< "x"
- << "ns" << _ns << "key" << BSON("y" << 1 << "x" << 1))));
+ << "ns"
+ << _ns
+ << "key"
+ << BSON("y" << 1 << "x" << 1))));
}
};
@@ -777,9 +808,17 @@ public:
ASSERT_OK(createIndex("unittests",
BSON("name"
<< "super"
- << "ns" << _ns << "unique" << 1 << "sparse" << true
- << "expireAfterSeconds" << 3600 << "key" << BSON("superIdx"
- << "2d"))));
+ << "ns"
+ << _ns
+ << "unique"
+ << 1
+ << "sparse"
+ << true
+ << "expireAfterSeconds"
+ << 3600
+ << "key"
+ << BSON("superIdx"
+ << "2d"))));
}
};
@@ -791,9 +830,17 @@ public:
ASSERT_OK(createIndex("unittests",
BSON("name"
<< "super2"
- << "ns" << _ns << "expireAfterSeconds" << 3600 << "sparse"
- << true << "unique" << 1 << "key" << BSON("superIdx"
- << "2d"))));
+ << "ns"
+ << _ns
+ << "expireAfterSeconds"
+ << 3600
+ << "sparse"
+ << true
+ << "unique"
+ << 1
+ << "key"
+ << BSON("superIdx"
+ << "2d"))));
}
};
@@ -807,23 +854,40 @@ public:
createIndex("unittest",
BSON("name"
<< "super2"
- << "ns" << _ns << "unique" << false << "sparse" << true
- << "expireAfterSeconds" << 3600 << "key" << BSON("superIdx"
- << "2d"))));
+ << "ns"
+ << _ns
+ << "unique"
+ << false
+ << "sparse"
+ << true
+ << "expireAfterSeconds"
+ << 3600
+ << "key"
+ << BSON("superIdx"
+ << "2d"))));
}
};
class SameSpecDifferentSparse : public ComplexIndex {
public:
void run() {
- ASSERT_EQUALS(
- ErrorCodes::IndexOptionsConflict,
- createIndex("unittest",
- BSON("name"
- << "super2"
- << "ns" << _ns << "unique" << 1 << "sparse" << false << "background"
- << true << "expireAfterSeconds" << 3600 << "key" << BSON("superIdx"
- << "2d"))));
+ ASSERT_EQUALS(ErrorCodes::IndexOptionsConflict,
+ createIndex("unittest",
+ BSON("name"
+ << "super2"
+ << "ns"
+ << _ns
+ << "unique"
+ << 1
+ << "sparse"
+ << false
+ << "background"
+ << true
+ << "expireAfterSeconds"
+ << 3600
+ << "key"
+ << BSON("superIdx"
+ << "2d"))));
}
};
@@ -834,9 +898,17 @@ public:
createIndex("unittest",
BSON("name"
<< "super2"
- << "ns" << _ns << "unique" << 1 << "sparse" << true
- << "expireAfterSeconds" << 2400 << "key" << BSON("superIdx"
- << "2d"))));
+ << "ns"
+ << _ns
+ << "unique"
+ << 1
+ << "sparse"
+ << true
+ << "expireAfterSeconds"
+ << 2400
+ << "key"
+ << BSON("superIdx"
+ << "2d"))));
}
};
@@ -883,7 +955,11 @@ protected:
BSONObj _createSpec(T storageEngineValue) {
return BSON("name"
<< "super2"
- << "ns" << _ns << "key" << BSON("a" << 1) << "storageEngine"
+ << "ns"
+ << _ns
+ << "key"
+ << BSON("a" << 1)
+ << "storageEngine"
<< storageEngineValue);
}
};
diff --git a/src/mongo/dbtests/jsobjtests.cpp b/src/mongo/dbtests/jsobjtests.cpp
index 13579765dfb..ee99ae37d3b 100644
--- a/src/mongo/dbtests/jsobjtests.cpp
+++ b/src/mongo/dbtests/jsobjtests.cpp
@@ -501,7 +501,8 @@ public:
ASSERT(BSON(""
<< "b"
<< ""
- << "h").woSortOrder(o, key) < 0);
+ << "h")
+ .woSortOrder(o, key) < 0);
}
ASSERT(BSON(""
@@ -754,135 +755,131 @@ public:
keyTest(BSON("" << now << "" << 3 << "" << jstNULL << "" << true));
keyTest(BSON("" << now << "" << 3 << "" << BSONObj() << "" << true));
+ {{// check signed dates with new key format
+ KeyV1Owned a(BSONObjBuilder().appendDate("", Date_t::fromMillisSinceEpoch(-50)).obj());
+ KeyV1Owned b(BSONObjBuilder().appendDate("", Date_t::fromMillisSinceEpoch(50)).obj());
+ ASSERT(a.woCompare(b, Ordering::make(BSONObj())) < 0);
+ }
+ {
+ // backward compatibility
+ KeyBson a(BSONObjBuilder().appendDate("", Date_t::fromMillisSinceEpoch(-50)).obj());
+ KeyBson b(BSONObjBuilder().appendDate("", Date_t::fromMillisSinceEpoch(50)).obj());
+ ASSERT(a.woCompare(b, Ordering::make(BSONObj())) > 0);
+ }
+ {
+ // this is an uncompactable key:
+ BSONObj uc1 = BSONObjBuilder()
+ .appendDate("", Date_t::fromMillisSinceEpoch(-50))
+ .appendCode("", "abc")
+ .obj();
+ BSONObj uc2 = BSONObjBuilder()
+ .appendDate("", Date_t::fromMillisSinceEpoch(55))
+ .appendCode("", "abc")
+ .obj();
+ ASSERT(uc1.woCompare(uc2, Ordering::make(BSONObj())) < 0);
{
- {
- // check signed dates with new key format
- KeyV1Owned a(
- BSONObjBuilder().appendDate("", Date_t::fromMillisSinceEpoch(-50)).obj());
- KeyV1Owned b(
- BSONObjBuilder().appendDate("", Date_t::fromMillisSinceEpoch(50)).obj());
- ASSERT(a.woCompare(b, Ordering::make(BSONObj())) < 0);
- }
- {
- // backward compatibility
- KeyBson a(BSONObjBuilder().appendDate("", Date_t::fromMillisSinceEpoch(-50)).obj());
- KeyBson b(BSONObjBuilder().appendDate("", Date_t::fromMillisSinceEpoch(50)).obj());
- ASSERT(a.woCompare(b, Ordering::make(BSONObj())) > 0);
- }
- {
- // this is an uncompactable key:
- BSONObj uc1 = BSONObjBuilder()
- .appendDate("", Date_t::fromMillisSinceEpoch(-50))
- .appendCode("", "abc")
- .obj();
- BSONObj uc2 = BSONObjBuilder()
- .appendDate("", Date_t::fromMillisSinceEpoch(55))
- .appendCode("", "abc")
- .obj();
- ASSERT(uc1.woCompare(uc2, Ordering::make(BSONObj())) < 0);
- {
- KeyV1Owned a(uc1);
- KeyV1Owned b(uc2);
- ASSERT(!a.isCompactFormat());
- ASSERT(a.woCompare(b, Ordering::make(BSONObj())) < 0);
- }
- {
- KeyBson a(uc1);
- KeyBson b(uc2);
- ASSERT(!a.isCompactFormat());
- ASSERT(a.woCompare(b, Ordering::make(BSONObj())) > 0);
- }
- }
+ KeyV1Owned a(uc1);
+ KeyV1Owned b(uc2);
+ ASSERT(!a.isCompactFormat());
+ ASSERT(a.woCompare(b, Ordering::make(BSONObj())) < 0);
}
-
{
- BSONObjBuilder b;
- b.appendBinData("f", 8, (BinDataType)1, "aaaabbbb");
- b.appendBinData("e", 3, (BinDataType)1, "aaa");
- b.appendBinData("b", 1, (BinDataType)1, "x");
- BSONObj o = b.obj();
- keyTest(o, true);
+ KeyBson a(uc1);
+ KeyBson b(uc2);
+ ASSERT(!a.isCompactFormat());
+ ASSERT(a.woCompare(b, Ordering::make(BSONObj())) > 0);
}
+ }
+}
- {
- // check (non)equality
- BSONObj a = BSONObjBuilder().appendBinData("", 8, (BinDataType)1, "abcdefgh").obj();
- BSONObj b = BSONObjBuilder().appendBinData("", 8, (BinDataType)1, "abcdefgj").obj();
- ASSERT(!a.equal(b));
- int res_ab = a.woCompare(b);
- ASSERT(res_ab != 0);
- keyTest(a, true);
- keyTest(b, true);
-
- // check subtypes do not equal
- BSONObj c = BSONObjBuilder().appendBinData("", 8, (BinDataType)4, "abcdefgh").obj();
- BSONObj d = BSONObjBuilder().appendBinData("", 8, (BinDataType)0x81, "abcdefgh").obj();
- ASSERT(!a.equal(c));
- int res_ac = a.woCompare(c);
- ASSERT(res_ac != 0);
- keyTest(c, true);
- ASSERT(!a.equal(d));
- int res_ad = a.woCompare(d);
- ASSERT(res_ad != 0);
- keyTest(d, true);
-
- KeyV1Owned A(a);
- KeyV1Owned B(b);
- KeyV1Owned C(c);
- KeyV1Owned D(d);
- ASSERT(!A.woEqual(B));
- ASSERT(A.woCompare(B, Ordering::make(BSONObj())) < 0 && res_ab < 0);
- ASSERT(!A.woEqual(C));
- ASSERT(A.woCompare(C, Ordering::make(BSONObj())) < 0 && res_ac < 0);
- ASSERT(!A.woEqual(D));
- ASSERT(A.woCompare(D, Ordering::make(BSONObj())) < 0 && res_ad < 0);
- }
+{
+ BSONObjBuilder b;
+ b.appendBinData("f", 8, (BinDataType)1, "aaaabbbb");
+ b.appendBinData("e", 3, (BinDataType)1, "aaa");
+ b.appendBinData("b", 1, (BinDataType)1, "x");
+ BSONObj o = b.obj();
+ keyTest(o, true);
+}
- {
- BSONObjBuilder b;
- b.appendBinData("f", 33, (BinDataType)1, "123456789012345678901234567890123");
- BSONObj o = b.obj();
- keyTest(o, false);
- }
+{
+ // check (non)equality
+ BSONObj a = BSONObjBuilder().appendBinData("", 8, (BinDataType)1, "abcdefgh").obj();
+ BSONObj b = BSONObjBuilder().appendBinData("", 8, (BinDataType)1, "abcdefgj").obj();
+ ASSERT(!a.equal(b));
+ int res_ab = a.woCompare(b);
+ ASSERT(res_ab != 0);
+ keyTest(a, true);
+ keyTest(b, true);
+
+ // check subtypes do not equal
+ BSONObj c = BSONObjBuilder().appendBinData("", 8, (BinDataType)4, "abcdefgh").obj();
+ BSONObj d = BSONObjBuilder().appendBinData("", 8, (BinDataType)0x81, "abcdefgh").obj();
+ ASSERT(!a.equal(c));
+ int res_ac = a.woCompare(c);
+ ASSERT(res_ac != 0);
+ keyTest(c, true);
+ ASSERT(!a.equal(d));
+ int res_ad = a.woCompare(d);
+ ASSERT(res_ad != 0);
+ keyTest(d, true);
+
+ KeyV1Owned A(a);
+ KeyV1Owned B(b);
+ KeyV1Owned C(c);
+ KeyV1Owned D(d);
+ ASSERT(!A.woEqual(B));
+ ASSERT(A.woCompare(B, Ordering::make(BSONObj())) < 0 && res_ab < 0);
+ ASSERT(!A.woEqual(C));
+ ASSERT(A.woCompare(C, Ordering::make(BSONObj())) < 0 && res_ac < 0);
+ ASSERT(!A.woEqual(D));
+ ASSERT(A.woCompare(D, Ordering::make(BSONObj())) < 0 && res_ad < 0);
+}
- {
- for (int i = 1; i <= 3; i++) {
- for (int j = 1; j <= 3; j++) {
- BSONObjBuilder b;
- b.appendBinData("f", i, (BinDataType)j, "abc");
- BSONObj o = b.obj();
- keyTest(o, j != ByteArrayDeprecated);
- }
- }
- }
+{
+ BSONObjBuilder b;
+ b.appendBinData("f", 33, (BinDataType)1, "123456789012345678901234567890123");
+ BSONObj o = b.obj();
+ keyTest(o, false);
+}
- {
+{
+ for (int i = 1; i <= 3; i++) {
+ for (int j = 1; j <= 3; j++) {
BSONObjBuilder b;
- b.appendBinData("f", 1, (BinDataType)133, "a");
+ b.appendBinData("f", i, (BinDataType)j, "abc");
BSONObj o = b.obj();
- keyTest(o, true);
+ keyTest(o, j != ByteArrayDeprecated);
}
+ }
+}
- {
- BSONObjBuilder b;
- b.append("AA", 3);
- b.appendBinData("f", 0, (BinDataType)0, "");
- b.appendBinData("e", 3, (BinDataType)7, "aaa");
- b.appendBinData("b", 1, (BinDataType)128, "x");
- b.append("z", 3);
- b.appendBinData("bb", 0, (BinDataType)129, "x");
- BSONObj o = b.obj();
- keyTest(o, true);
- }
+{
+ BSONObjBuilder b;
+ b.appendBinData("f", 1, (BinDataType)133, "a");
+ BSONObj o = b.obj();
+ keyTest(o, true);
+}
- {
- // 9 is not supported in compact format. so test a non-compact case here.
- BSONObjBuilder b;
- b.appendBinData("f", 9, (BinDataType)0, "aaaabbbbc");
- BSONObj o = b.obj();
- keyTest(o);
- }
- }
+{
+ BSONObjBuilder b;
+ b.append("AA", 3);
+ b.appendBinData("f", 0, (BinDataType)0, "");
+ b.appendBinData("e", 3, (BinDataType)7, "aaa");
+ b.appendBinData("b", 1, (BinDataType)128, "x");
+ b.append("z", 3);
+ b.appendBinData("bb", 0, (BinDataType)129, "x");
+ BSONObj o = b.obj();
+ keyTest(o, true);
+}
+
+{
+ // 9 is not supported in compact format. so test a non-compact case here.
+ BSONObjBuilder b;
+ b.appendBinData("f", 9, (BinDataType)0, "aaaabbbbc");
+ BSONObj o = b.obj();
+ keyTest(o);
+}
+}
};
class ToStringNumber {
@@ -1452,13 +1449,18 @@ class LabelShares : public LabelBase {
BSONObj expected() {
return BSON("z"
<< "q"
- << "a" << (BSON("$gt" << 1)) << "x"
+ << "a"
+ << (BSON("$gt" << 1))
+ << "x"
<< "p");
}
BSONObj actual() {
return BSON("z"
<< "q"
- << "a" << GT << 1 << "x"
+ << "a"
+ << GT
+ << 1
+ << "x"
<< "p");
}
};
@@ -1477,14 +1479,20 @@ class LabelDoubleShares : public LabelBase {
BSONObj expected() {
return BSON("z"
<< "q"
- << "a" << (BSON("$gt" << 1 << "$lte"
- << "x")) << "x"
+ << "a"
+ << (BSON("$gt" << 1 << "$lte"
+ << "x"))
+ << "x"
<< "p");
}
BSONObj actual() {
return BSON("z"
<< "q"
- << "a" << GT << 1 << LTE << "x"
+ << "a"
+ << GT
+ << 1
+ << LTE
+ << "x"
<< "x"
<< "p");
}
@@ -1503,17 +1511,33 @@ class LabelMulti : public LabelBase {
BSONObj expected() {
return BSON("z"
<< "q"
- << "a" << BSON("$gt" << 1 << "$lte"
- << "x") << "b" << BSON("$ne" << 1 << "$ne"
- << "f"
- << "$ne" << 22.3) << "x"
+ << "a"
+ << BSON("$gt" << 1 << "$lte"
+ << "x")
+ << "b"
+ << BSON("$ne" << 1 << "$ne"
+ << "f"
+ << "$ne"
+ << 22.3)
+ << "x"
<< "p");
}
BSONObj actual() {
return BSON("z"
<< "q"
- << "a" << GT << 1 << LTE << "x"
- << "b" << NE << 1 << NE << "f" << NE << 22.3 << "x"
+ << "a"
+ << GT
+ << 1
+ << LTE
+ << "x"
+ << "b"
+ << NE
+ << 1
+ << NE
+ << "f"
+ << NE
+ << 22.3
+ << "x"
<< "p");
}
};
@@ -1523,7 +1547,8 @@ class LabelishOr : public LabelBase {
<< "x"))
<< BSON("b" << BSON("$ne" << 1 << "$ne"
<< "f"
- << "$ne" << 22.3))
+ << "$ne"
+ << 22.3))
<< BSON("x"
<< "p")));
}
@@ -1960,9 +1985,12 @@ struct ArrayMacroTest {
<< "qux")));
BSONObj obj = BSON("0"
<< "hello"
- << "1" << 1 << "2" << BSON("foo" << BSON_ARRAY("bar"
- << "baz"
- << "qux")));
+ << "1"
+ << 1
+ << "2"
+ << BSON("foo" << BSON_ARRAY("bar"
+ << "baz"
+ << "qux")));
ASSERT_EQUALS(arr, obj);
ASSERT_EQUALS(arr["2"].type(), Object);
@@ -2071,26 +2099,38 @@ public:
// DBRef stuff -- json parser can't handle this yet
good(BSON("a" << BSON("$ref"
<< "coll"
- << "$id" << 1)));
+ << "$id"
+ << 1)));
good(BSON("a" << BSON("$ref"
<< "coll"
- << "$id" << 1 << "$db"
+ << "$id"
+ << 1
+ << "$db"
<< "a")));
good(BSON("a" << BSON("$ref"
<< "coll"
- << "$id" << 1 << "stuff" << 1)));
+ << "$id"
+ << 1
+ << "stuff"
+ << 1)));
good(BSON("a" << BSON("$ref"
<< "coll"
- << "$id" << 1 << "$db"
+ << "$id"
+ << 1
+ << "$db"
<< "a"
- << "stuff" << 1)));
+ << "stuff"
+ << 1)));
bad(BSON("a" << BSON("$ref" << 1 << "$id" << 1)));
bad(BSON("a" << BSON("$ref" << 1 << "$id" << 1 << "$db"
<< "a")));
bad(BSON("a" << BSON("$ref"
<< "coll"
- << "$id" << 1 << "$db" << 1)));
+ << "$id"
+ << 1
+ << "$db"
+ << 1)));
bad(BSON("a" << BSON("$ref"
<< "coll")));
bad(BSON("a" << BSON("$ref"
@@ -2102,10 +2142,16 @@ public:
<< "coll")));
bad(BSON("a" << BSON("$ref"
<< "coll"
- << "$id" << 1 << "$hater" << 1)));
+ << "$id"
+ << 1
+ << "$hater"
+ << 1)));
bad(BSON("a" << BSON("$ref"
<< "coll"
- << "$id" << 1 << "dot.dot" << 1)));
+ << "$id"
+ << 1
+ << "dot.dot"
+ << 1)));
// _id isn't a RegEx, or Array
good("{_id: 0}");
diff --git a/src/mongo/dbtests/jsontests.cpp b/src/mongo/dbtests/jsontests.cpp
index 04a5b375a34..ecd156ed5a6 100644
--- a/src/mongo/dbtests/jsontests.cpp
+++ b/src/mongo/dbtests/jsontests.cpp
@@ -65,7 +65,8 @@ public:
void run() {
ASSERT_EQUALS("{ \"a\" : \"b\" }",
BSON("a"
- << "b").jsonString(Strict));
+ << "b")
+ .jsonString(Strict));
}
};
@@ -2473,7 +2474,8 @@ public:
virtual BSONObj bson() const {
return BSON("int" << 123 << "long" << 9223372036854775807ll // 2**63 - 1
- << "double" << 3.14);
+ << "double"
+ << 3.14);
}
virtual string json() const {
return "{ \"int\": 123, \"long\": 9223372036854775807, \"double\": 3.14 }";
@@ -2496,7 +2498,8 @@ public:
virtual BSONObj bson() const {
return BSON("int" << 123 << "long" << 9223372036854775807ll // 2**63 - 1
- << "double" << 3.14);
+ << "double"
+ << 3.14);
}
virtual string json() const {
return "{ 'int': NumberInt(123), "
@@ -2596,7 +2599,8 @@ public:
virtual BSONObj bson() const {
return BSON("int" << -123 << "long" << -9223372036854775807ll // -1 * (2**63 - 1)
- << "double" << -3.14);
+ << "double"
+ << -3.14);
}
virtual string json() const {
return "{ \"int\": -123, \"long\": -9223372036854775807, \"double\": -3.14 }";
diff --git a/src/mongo/dbtests/jstests.cpp b/src/mongo/dbtests/jstests.cpp
index 35a23e99f6c..0e25183116b 100644
--- a/src/mongo/dbtests/jstests.cpp
+++ b/src/mongo/dbtests/jstests.cpp
@@ -396,7 +396,8 @@ public:
<< "eliot"
<< "z"
<< "sara"
- << "zz" << BSONObj());
+ << "zz"
+ << BSONObj());
s->setObject("blah", o, true);
BSONObj out;
diff --git a/src/mongo/dbtests/mock/mock_remote_db_server.cpp b/src/mongo/dbtests/mock/mock_remote_db_server.cpp
index a77606e7ae9..45b189bf9e8 100644
--- a/src/mongo/dbtests/mock/mock_remote_db_server.cpp
+++ b/src/mongo/dbtests/mock/mock_remote_db_server.cpp
@@ -36,10 +36,10 @@
#include "mongo/rpc/command_reply_builder.h"
#include "mongo/rpc/metadata.h"
#include "mongo/stdx/memory.h"
+#include "mongo/util/assert_util.h"
#include "mongo/util/mongoutils/str.h"
#include "mongo/util/net/socket_exception.h"
#include "mongo/util/time_support.h"
-#include "mongo/util/assert_util.h"
using std::string;
using std::vector;
diff --git a/src/mongo/dbtests/mock/mock_replica_set.h b/src/mongo/dbtests/mock/mock_replica_set.h
index c3f22c3cb97..b2b48747d8e 100644
--- a/src/mongo/dbtests/mock/mock_replica_set.h
+++ b/src/mongo/dbtests/mock/mock_replica_set.h
@@ -27,12 +27,12 @@
#pragma once
-#include "mongo/dbtests/mock/mock_remote_db_server.h"
#include "mongo/db/repl/member_config.h"
#include "mongo/db/repl/replica_set_config.h"
+#include "mongo/dbtests/mock/mock_remote_db_server.h"
-#include <string>
#include <map>
+#include <string>
#include <vector>
namespace mongo {
diff --git a/src/mongo/dbtests/mock_dbclient_conn_test.cpp b/src/mongo/dbtests/mock_dbclient_conn_test.cpp
index 7febff697a3..d347eb76897 100644
--- a/src/mongo/dbtests/mock_dbclient_conn_test.cpp
+++ b/src/mongo/dbtests/mock_dbclient_conn_test.cpp
@@ -36,8 +36,8 @@
#include "mongo/dbtests/mock/mock_dbclient_connection.h"
#include "mongo/unittest/unittest.h"
#include "mongo/util/net/sock.h"
-#include "mongo/util/timer.h"
#include "mongo/util/net/socket_exception.h"
+#include "mongo/util/timer.h"
#include <ctime>
#include <string>
@@ -413,10 +413,16 @@ TEST(MockDBClientConnTest, CyclingCmd) {
vector<BSONObj> isMasterSequence;
isMasterSequence.push_back(BSON("set"
<< "a"
- << "isMaster" << true << "ok" << 1));
+ << "isMaster"
+ << true
+ << "ok"
+ << 1));
isMasterSequence.push_back(BSON("set"
<< "a"
- << "isMaster" << false << "ok" << 1));
+ << "isMaster"
+ << false
+ << "ok"
+ << 1));
server.setCommandReply("isMaster", isMasterSequence);
}
diff --git a/src/mongo/dbtests/multikey_paths_test.cpp b/src/mongo/dbtests/multikey_paths_test.cpp
index 429bc9447eb..a5031bcb44e 100644
--- a/src/mongo/dbtests/multikey_paths_test.cpp
+++ b/src/mongo/dbtests/multikey_paths_test.cpp
@@ -31,9 +31,9 @@
#include <iostream>
#include <string>
+#include "mongo/db/catalog/index_create.h"
#include "mongo/db/client.h"
#include "mongo/db/db_raii.h"
-#include "mongo/db/catalog/index_create.h"
#include "mongo/db/index/multikey_paths.h"
#include "mongo/db/namespace_string.h"
#include "mongo/db/service_context.h"
@@ -91,7 +91,8 @@ public:
const bool match = (expectedMultikeyPaths == actualMultikeyPaths);
if (!match) {
FAIL(str::stream() << "Expected: " << dumpMultikeyPaths(expectedMultikeyPaths)
- << ", Actual: " << dumpMultikeyPaths(actualMultikeyPaths));
+ << ", Actual: "
+ << dumpMultikeyPaths(actualMultikeyPaths));
}
ASSERT_TRUE(match);
} else {
@@ -150,7 +151,10 @@ TEST_F(MultikeyPathsTest, PathsUpdatedOnIndexCreation) {
createIndex(collection,
BSON("name"
<< "a_1_b_1"
- << "ns" << _nss.ns() << "key" << keyPattern));
+ << "ns"
+ << _nss.ns()
+ << "key"
+ << keyPattern));
assertMultikeyPaths(collection, keyPattern, {std::set<size_t>{}, {0U}});
}
@@ -181,7 +185,10 @@ TEST_F(MultikeyPathsTest, PathsUpdatedOnIndexCreationWithMultipleDocuments) {
createIndex(collection,
BSON("name"
<< "a_1_b_1"
- << "ns" << _nss.ns() << "key" << keyPattern));
+ << "ns"
+ << _nss.ns()
+ << "key"
+ << keyPattern));
assertMultikeyPaths(collection, keyPattern, {{0U}, {0U}});
}
@@ -195,7 +202,10 @@ TEST_F(MultikeyPathsTest, PathsUpdatedOnDocumentInsert) {
createIndex(collection,
BSON("name"
<< "a_1_b_1"
- << "ns" << _nss.ns() << "key" << keyPattern));
+ << "ns"
+ << _nss.ns()
+ << "key"
+ << keyPattern));
{
WriteUnitOfWork wuow(_opCtx.get());
@@ -235,7 +245,10 @@ TEST_F(MultikeyPathsTest, PathsUpdatedOnDocumentUpdate) {
createIndex(collection,
BSON("name"
<< "a_1_b_1"
- << "ns" << _nss.ns() << "key" << keyPattern));
+ << "ns"
+ << _nss.ns()
+ << "key"
+ << keyPattern));
{
WriteUnitOfWork wuow(_opCtx.get());
@@ -285,7 +298,10 @@ TEST_F(MultikeyPathsTest, PathsNotUpdatedOnDocumentDelete) {
createIndex(collection,
BSON("name"
<< "a_1_b_1"
- << "ns" << _nss.ns() << "key" << keyPattern));
+ << "ns"
+ << _nss.ns()
+ << "key"
+ << keyPattern));
{
WriteUnitOfWork wuow(_opCtx.get());
@@ -326,13 +342,19 @@ TEST_F(MultikeyPathsTest, PathsUpdatedForMultipleIndexesOnDocumentInsert) {
createIndex(collection,
BSON("name"
<< "a_1_b_1"
- << "ns" << _nss.ns() << "key" << keyPatternAB));
+ << "ns"
+ << _nss.ns()
+ << "key"
+ << keyPatternAB));
BSONObj keyPatternAC = BSON("a" << 1 << "c" << 1);
createIndex(collection,
BSON("name"
<< "a_1_c_1"
- << "ns" << _nss.ns() << "key" << keyPatternAC));
+ << "ns"
+ << _nss.ns()
+ << "key"
+ << keyPatternAC));
{
WriteUnitOfWork wuow(_opCtx.get());
OpDebug* const nullOpDebug = nullptr;
diff --git a/src/mongo/dbtests/namespacetests.cpp b/src/mongo/dbtests/namespacetests.cpp
index b6d29ca2ddd..479d7d2317c 100644
--- a/src/mongo/dbtests/namespacetests.cpp
+++ b/src/mongo/dbtests/namespacetests.cpp
@@ -124,7 +124,9 @@ public:
const ServiceContext::UniqueOperationContext txnPtr = cc().makeOperationContext();
OperationContext& txn = *txnPtr;
BSONObj spec(BSON("key" << BSON("a"
- << "hashed") << "seed" << 0x5eed));
+ << "hashed")
+ << "seed"
+ << 0x5eed));
BSONObj nullObj = BSON("a" << BSONNULL);
BSONObjSet nullFieldKeySet;
diff --git a/src/mongo/dbtests/oplogstarttests.cpp b/src/mongo/dbtests/oplogstarttests.cpp
index 977a4dad4b8..4a01d5ee9b2 100644
--- a/src/mongo/dbtests/oplogstarttests.cpp
+++ b/src/mongo/dbtests/oplogstarttests.cpp
@@ -223,8 +223,10 @@ protected:
void buildCollection() {
BSONObj info;
// Create a collection with specified extent sizes
- BSONObj command = BSON("create" << nss.coll() << "capped" << true << "$nExtents"
- << extentSizes() << "autoIndexId" << false);
+ BSONObj command =
+ BSON("create" << nss.coll() << "capped" << true << "$nExtents" << extentSizes()
+ << "autoIndexId"
+ << false);
ASSERT(client()->runCommand(nss.db().toString(), command, info));
// Populate documents.
diff --git a/src/mongo/dbtests/plan_ranking.cpp b/src/mongo/dbtests/plan_ranking.cpp
index 970f7d02ac9..6acaffe12ff 100644
--- a/src/mongo/dbtests/plan_ranking.cpp
+++ b/src/mongo/dbtests/plan_ranking.cpp
@@ -230,11 +230,11 @@ public:
// both the {a:1} and {b:1} indices even though it performs poorly.
soln = pickBestPlan(cq.get());
- ASSERT(QueryPlannerTestLib::solutionMatches(
- "{fetch: {node: {andSorted: {nodes: ["
- "{ixscan: {filter: null, pattern: {a:1}}},"
- "{ixscan: {filter: null, pattern: {b:1}}}]}}}}",
- soln->root.get()));
+ ASSERT(
+ QueryPlannerTestLib::solutionMatches("{fetch: {node: {andSorted: {nodes: ["
+ "{ixscan: {filter: null, pattern: {a:1}}},"
+ "{ixscan: {filter: null, pattern: {b:1}}}]}}}}",
+ soln->root.get()));
}
};
@@ -267,11 +267,11 @@ public:
internalQueryForceIntersectionPlans = true;
QuerySolution* soln = pickBestPlan(cq.get());
- ASSERT(QueryPlannerTestLib::solutionMatches(
- "{fetch: {node: {andHash: {nodes: ["
- "{ixscan: {filter: null, pattern: {a:1}}},"
- "{ixscan: {filter: null, pattern: {b:1}}}]}}}}",
- soln->root.get()));
+ ASSERT(
+ QueryPlannerTestLib::solutionMatches("{fetch: {node: {andHash: {nodes: ["
+ "{ixscan: {filter: null, pattern: {a:1}}},"
+ "{ixscan: {filter: null, pattern: {b:1}}}]}}}}",
+ soln->root.get()));
// Confirm that a backup plan is available.
ASSERT(hasBackupPlan());
@@ -550,10 +550,10 @@ public:
// so we expect to choose {d: 1, e: 1}, as it allows us
// to avoid the sort stage.
QuerySolution* soln = pickBestPlan(cq.get());
- ASSERT(QueryPlannerTestLib::solutionMatches(
- "{fetch: {filter: {a:1}, node: "
- "{ixscan: {filter: null, pattern: {d:1,e:1}}}}}",
- soln->root.get()));
+ ASSERT(
+ QueryPlannerTestLib::solutionMatches("{fetch: {filter: {a:1}, node: "
+ "{ixscan: {filter: null, pattern: {d:1,e:1}}}}}",
+ soln->root.get()));
}
};
diff --git a/src/mongo/dbtests/query_stage_count.cpp b/src/mongo/dbtests/query_stage_count.cpp
index e24e4047ce2..24f31ebc1be 100644
--- a/src/mongo/dbtests/query_stage_count.cpp
+++ b/src/mongo/dbtests/query_stage_count.cpp
@@ -70,10 +70,13 @@ public:
_coll = _ctx.db()->createCollection(&_txn, ns());
_coll->getIndexCatalog()->createIndexOnEmptyCollection(&_txn,
- BSON("key"
- << BSON("x" << 1) << "name"
- << "x_1"
- << "ns" << ns() << "v" << 1));
+ BSON("key" << BSON("x" << 1)
+ << "name"
+ << "x_1"
+ << "ns"
+ << ns()
+ << "v"
+ << 1));
for (int i = 0; i < kDocuments; i++) {
insert(BSON(GENOID << "x" << i));
diff --git a/src/mongo/dbtests/query_stage_ensure_sorted.cpp b/src/mongo/dbtests/query_stage_ensure_sorted.cpp
index f89bbd42ea5..858106660c8 100644
--- a/src/mongo/dbtests/query_stage_ensure_sorted.cpp
+++ b/src/mongo/dbtests/query_stage_ensure_sorted.cpp
@@ -30,8 +30,8 @@
#include "mongo/db/client.h"
#include "mongo/db/exec/ensure_sorted.h"
-#include "mongo/db/exec/sort_key_generator.h"
#include "mongo/db/exec/queued_data_stage.h"
+#include "mongo/db/exec/sort_key_generator.h"
#include "mongo/db/json.h"
#include "mongo/db/query/collation/collator_interface_mock.h"
#include "mongo/stdx/memory.h"
diff --git a/src/mongo/dbtests/query_stage_subplan.cpp b/src/mongo/dbtests/query_stage_subplan.cpp
index 825152c4007..36a6e9f502b 100644
--- a/src/mongo/dbtests/query_stage_subplan.cpp
+++ b/src/mongo/dbtests/query_stage_subplan.cpp
@@ -105,7 +105,8 @@ public:
OldClientWriteContext ctx(&_txn, nss.ns());
addIndex(BSON("a"
<< "2d"
- << "b" << 1));
+ << "b"
+ << 1));
addIndex(BSON("a"
<< "2d"));
diff --git a/src/mongo/dbtests/querytests.cpp b/src/mongo/dbtests/querytests.cpp
index 8cf6d28d1a5..46cc769b1d1 100644
--- a/src/mongo/dbtests/querytests.cpp
+++ b/src/mongo/dbtests/querytests.cpp
@@ -199,7 +199,8 @@ public:
bool ok = cl.runCommand("unittests",
BSON("godinsert"
<< "querytests"
- << "obj" << BSONObj()),
+ << "obj"
+ << BSONObj()),
info);
ASSERT(ok);
@@ -590,7 +591,12 @@ public:
_client.runCommand("unittests",
BSON("create"
<< "querytests.TailableQueryOnId"
- << "capped" << true << "size" << 8192 << "autoIndexId" << true),
+ << "capped"
+ << true
+ << "size"
+ << 8192
+ << "autoIndexId"
+ << true),
info);
insertA(ns, 0);
insertA(ns, 1);
@@ -676,7 +682,10 @@ public:
_client.runCommand("unittests",
BSON("create"
<< "querytests.OplogReplaySlaveReadTill"
- << "capped" << true << "size" << 8192),
+ << "capped"
+ << true
+ << "size"
+ << 8192),
info);
Date_t one = Date_t::fromMillisSinceEpoch(getNextGlobalTimestamp().asLL());
@@ -1285,14 +1294,18 @@ public:
ASSERT_EQUALS(17, _client.findOne(ns(), b.obj())["z"].number());
}
ASSERT_EQUALS(17,
- _client.findOne(ns(),
- BSON("x"
- << "eliot"))["z"].number());
+ _client
+ .findOne(ns(),
+ BSON("x"
+ << "eliot"))["z"]
+ .number());
ASSERT_OK(dbtests::createIndex(&_txn, ns(), BSON("x" << 1)));
ASSERT_EQUALS(17,
- _client.findOne(ns(),
- BSON("x"
- << "eliot"))["z"].number());
+ _client
+ .findOne(ns(),
+ BSON("x"
+ << "eliot"))["z"]
+ .number());
}
};
@@ -1314,7 +1327,8 @@ public:
ctx.db(),
ns(),
fromjson("{ capped : true, size : 2000, max: 10000 }"),
- false).isOK());
+ false)
+ .isOK());
wunit.commit();
}
@@ -1452,7 +1466,11 @@ public:
ASSERT(_client.runCommand("unittests",
BSON("create"
<< "querytests.findingstart"
- << "capped" << true << "$nExtents" << 5 << "autoIndexId"
+ << "capped"
+ << true
+ << "$nExtents"
+ << 5
+ << "autoIndexId"
<< false),
info));
@@ -1499,7 +1517,11 @@ public:
ASSERT(_client.runCommand("unittests",
BSON("create"
<< "querytests.findingstart"
- << "capped" << true << "$nExtents" << 5 << "autoIndexId"
+ << "capped"
+ << true
+ << "$nExtents"
+ << 5
+ << "autoIndexId"
<< false),
info));
@@ -1547,7 +1569,11 @@ public:
ASSERT(_client.runCommand("unittests",
BSON("create"
<< "querytests.findingstart"
- << "capped" << true << "$nExtents" << 5 << "autoIndexId"
+ << "capped"
+ << true
+ << "$nExtents"
+ << 5
+ << "autoIndexId"
<< false),
info));
@@ -1600,7 +1626,10 @@ public:
ASSERT(_client.runCommand("unittests",
BSON("create"
<< "querytests.exhaust"
- << "capped" << true << "size" << 8192),
+ << "capped"
+ << true
+ << "size"
+ << 8192),
info));
_client.insert(ns(), BSON("ts" << 0));
Message message;
diff --git a/src/mongo/dbtests/replica_set_monitor_test.cpp b/src/mongo/dbtests/replica_set_monitor_test.cpp
index 68d36194b8f..bab15d05f48 100644
--- a/src/mongo/dbtests/replica_set_monitor_test.cpp
+++ b/src/mongo/dbtests/replica_set_monitor_test.cpp
@@ -29,8 +29,8 @@
#include "mongo/platform/basic.h"
#include "mongo/client/connpool.h"
-#include "mongo/client/dbclientinterface.h"
#include "mongo/client/dbclient_rs.h"
+#include "mongo/client/dbclientinterface.h"
#include "mongo/client/replica_set_monitor.h"
#include "mongo/client/replica_set_monitor_internal.h"
#include "mongo/dbtests/mock/mock_conn_registry.h"
diff --git a/src/mongo/dbtests/repltests.cpp b/src/mongo/dbtests/repltests.cpp
index ec092669747..10996568f12 100644
--- a/src/mongo/dbtests/repltests.cpp
+++ b/src/mongo/dbtests/repltests.cpp
@@ -916,7 +916,8 @@ public:
_client.insert("unittests.system.indexes",
BSON("ns" << ns() << "key" << BSON("a" << 1) << "name"
<< "foo"
- << "sparse" << true));
+ << "sparse"
+ << true));
}
~EmptyPushSparseIndex() {
_client.dropIndexes(ns());
@@ -1410,10 +1411,12 @@ public:
void run() {
bool threw = false;
BSONObj o = BSON("ns" << ns() << "o" << BSON("foo"
- << "bar") << "o2" << BSON("_id"
- << "in oplog"
- << "foo"
- << "bar"));
+ << "bar")
+ << "o2"
+ << BSON("_id"
+ << "in oplog"
+ << "foo"
+ << "bar"));
ScopedTransaction transaction(&_txn, MODE_X);
Lock::GlobalWrite lk(_txn.lockState());
@@ -1433,9 +1436,11 @@ public:
// now this should succeed
SyncTest t;
verify(t.shouldRetry(&_txn, o));
- verify(!_client.findOne(ns(),
- BSON("_id"
- << "on remote")).isEmpty());
+ verify(!_client
+ .findOne(ns(),
+ BSON("_id"
+ << "on remote"))
+ .isEmpty());
// force it not to find an obj
t.returnEmpty = true;
diff --git a/src/mongo/dbtests/sort_key_generator_test.cpp b/src/mongo/dbtests/sort_key_generator_test.cpp
index 5a8faa6d2b2..0993579b54e 100644
--- a/src/mongo/dbtests/sort_key_generator_test.cpp
+++ b/src/mongo/dbtests/sort_key_generator_test.cpp
@@ -30,8 +30,8 @@
#include "mongo/db/exec/sort_key_generator.h"
#include "mongo/db/json.h"
-#include "mongo/stdx/memory.h"
#include "mongo/db/query/query_test_service_context.h"
+#include "mongo/stdx/memory.h"
#include "mongo/unittest/unittest.h"
namespace mongo {
diff --git a/src/mongo/dbtests/threadedtests.cpp b/src/mongo/dbtests/threadedtests.cpp
index 943bfffed15..5ab410361eb 100644
--- a/src/mongo/dbtests/threadedtests.cpp
+++ b/src/mongo/dbtests/threadedtests.cpp
@@ -98,7 +98,7 @@ const int nthr = 135;
#endif
class MongoMutexTest : public ThreadedTest<nthr> {
#if defined(MONGO_CONFIG_DEBUG_BUILD)
- enum { N = 2000 };
+ enum {N = 2000};
#else
enum { N = 4000 /*0*/ };
#endif
diff --git a/src/mongo/dbtests/updatetests.cpp b/src/mongo/dbtests/updatetests.cpp
index c21b70a551f..961dd084be9 100644
--- a/src/mongo/dbtests/updatetests.cpp
+++ b/src/mongo/dbtests/updatetests.cpp
@@ -190,9 +190,11 @@ public:
<< "b"),
BSON("$set" << BSON("a"
<< "c")));
- ASSERT(!_client.findOne(ns(),
- BSON("a"
- << "c")).isEmpty());
+ ASSERT(!_client
+ .findOne(ns(),
+ BSON("a"
+ << "c"))
+ .isEmpty());
}
};
@@ -207,9 +209,11 @@ public:
<< "b"),
BSON("$set" << BSON("a"
<< "cd")));
- ASSERT(!_client.findOne(ns(),
- BSON("a"
- << "cd")).isEmpty());
+ ASSERT(!_client
+ .findOne(ns(),
+ BSON("a"
+ << "cd"))
+ .isEmpty());
}
};
@@ -363,9 +367,11 @@ public:
Query(),
BSON("$set" << BSON("a.b"
<< "llll")));
- ASSERT(!_client.findOne(ns(),
- BSON("a.b"
- << "llll")).isEmpty());
+ ASSERT(!_client
+ .findOne(ns(),
+ BSON("a.b"
+ << "llll"))
+ .isEmpty());
}
};
@@ -377,10 +383,11 @@ public:
Query(),
BSON("$set" << BSON("a.b"
<< "lllll")));
- ASSERT(_client.findOne(ns(),
- BSON("a.b"
- << "lllll")).woCompare(fromjson("{'_id':0,a:{b:'lllll'}}")) ==
- 0);
+ ASSERT(_client
+ .findOne(ns(),
+ BSON("a.b"
+ << "lllll"))
+ .woCompare(fromjson("{'_id':0,a:{b:'lllll'}}")) == 0);
}
};
@@ -392,10 +399,11 @@ public:
BSONObj(),
BSON("$set" << BSON("a.b"
<< "lllll")));
- ASSERT(_client.findOne(ns(),
- BSON("a.b"
- << "lllll")).woCompare(fromjson("{'_id':0,a:{b:'lllll'}}")) ==
- 0);
+ ASSERT(_client
+ .findOne(ns(),
+ BSON("a.b"
+ << "lllll"))
+ .woCompare(fromjson("{'_id':0,a:{b:'lllll'}}")) == 0);
}
};
@@ -1652,8 +1660,8 @@ public:
void run() {
_client.insert(ns(), fromjson("{'_id':0,x:[{a:1},{a:3}]}"));
// { $push : { x : { $each : [ {a:2} ], $sort: {a:1}, $slice:-2 } } }
- BSONObj pushObj = BSON("$each" << BSON_ARRAY(BSON("a" << 2)) << "$sort" << BSON("a" << 1)
- << "$slice" << -2.0);
+ BSONObj pushObj = BSON(
+ "$each" << BSON_ARRAY(BSON("a" << 2)) << "$sort" << BSON("a" << 1) << "$slice" << -2.0);
_client.update(ns(), Query(), BSON("$push" << BSON("x" << pushObj)));
BSONObj expected = fromjson("{'_id':0,x:[{a:2},{a:3}]}");
BSONObj result = _client.findOne(ns(), Query());
@@ -1667,8 +1675,9 @@ public:
BSONObj expected = fromjson("{'_id':0,x:[{a:1},{a:3}]}");
_client.insert(ns(), expected);
// { $push : { x : { $each : [ {a:2} ], $sort : {a:1}, $sort: {a:1} } } }
- BSONObj pushObj = BSON("$each" << BSON_ARRAY(BSON("a" << 2)) << "$sort" << BSON("a" << 1)
- << "$sort" << BSON("a" << 1));
+ BSONObj pushObj =
+ BSON("$each" << BSON_ARRAY(BSON("a" << 2)) << "$sort" << BSON("a" << 1) << "$sort"
+ << BSON("a" << 1));
_client.update(ns(), Query(), BSON("$push" << BSON("x" << pushObj)));
BSONObj result = _client.findOne(ns(), Query());
ASSERT_EQUALS(result, expected);
@@ -1748,7 +1757,9 @@ public:
ns(), BSON("_id" << 0 << "a" << 1 << "x" << BSONObj() << "x" << BSONObj() << "z" << 5));
_client.update(ns(), BSONObj(), BSON("$set" << BSON("x.b" << 1 << "x.c" << 1)));
ASSERT_EQUALS(BSON("_id" << 0 << "a" << 1 << "x" << BSON("b" << 1 << "c" << 1) << "x"
- << BSONObj() << "z" << 5),
+ << BSONObj()
+ << "z"
+ << 5),
_client.findOne(ns(), BSONObj()));
}
};
@@ -1762,7 +1773,9 @@ public:
_client.update(
ns(), BSONObj(), BSON("$set" << BSON("x.b" << 1 << "x.c" << 1 << "x.d" << 1)));
ASSERT_EQUALS(BSON("_id" << 0 << "x" << BSON("b" << 1 << "c" << 1 << "d" << 1) << "x"
- << BSONObj() << "x" << BSONObj()),
+ << BSONObj()
+ << "x"
+ << BSONObj()),
_client.findOne(ns(), BSONObj()));
}
};
diff --git a/src/mongo/dbtests/validate_tests.cpp b/src/mongo/dbtests/validate_tests.cpp
index dbaffd3f8bf..99b08f298fd 100644
--- a/src/mongo/dbtests/validate_tests.cpp
+++ b/src/mongo/dbtests/validate_tests.cpp
@@ -167,13 +167,16 @@ public:
wunit.commit();
}
- auto status =
- dbtests::createIndexFromSpec(&_txn,
- coll->ns().ns(),
- BSON("name"
- << "a"
- << "ns" << coll->ns().ns() << "key" << BSON("a" << 1)
- << "background" << false));
+ auto status = dbtests::createIndexFromSpec(&_txn,
+ coll->ns().ns(),
+ BSON("name"
+ << "a"
+ << "ns"
+ << coll->ns().ns()
+ << "key"
+ << BSON("a" << 1)
+ << "background"
+ << false));
ASSERT_OK(status);
ASSERT_TRUE(checkValid());
@@ -225,13 +228,16 @@ public:
wunit.commit();
}
- auto status =
- dbtests::createIndexFromSpec(&_txn,
- coll->ns().ns(),
- BSON("name"
- << "a"
- << "ns" << coll->ns().ns() << "key" << BSON("a" << 1)
- << "background" << false));
+ auto status = dbtests::createIndexFromSpec(&_txn,
+ coll->ns().ns(),
+ BSON("name"
+ << "a"
+ << "ns"
+ << coll->ns().ns()
+ << "key"
+ << BSON("a" << 1)
+ << "background"
+ << false));
ASSERT_OK(status);
ASSERT_TRUE(checkValid());
@@ -355,13 +361,16 @@ public:
ASSERT_TRUE(checkValid());
// Create multi-key index.
- auto status =
- dbtests::createIndexFromSpec(&_txn,
- coll->ns().ns(),
- BSON("name"
- << "multikey_index"
- << "ns" << coll->ns().ns() << "key"
- << BSON("a.b" << 1) << "background" << false));
+ auto status = dbtests::createIndexFromSpec(&_txn,
+ coll->ns().ns(),
+ BSON("name"
+ << "multikey_index"
+ << "ns"
+ << coll->ns().ns()
+ << "key"
+ << BSON("a.b" << 1)
+ << "background"
+ << false));
ASSERT_OK(status);
ASSERT_TRUE(checkValid());
@@ -416,13 +425,18 @@ public:
}
// Create a sparse index.
- auto status =
- dbtests::createIndexFromSpec(&_txn,
- coll->ns().ns(),
- BSON("name"
- << "sparse_index"
- << "ns" << coll->ns().ns() << "key" << BSON("a" << 1)
- << "background" << false << "sparse" << true));
+ auto status = dbtests::createIndexFromSpec(&_txn,
+ coll->ns().ns(),
+ BSON("name"
+ << "sparse_index"
+ << "ns"
+ << coll->ns().ns()
+ << "key"
+ << BSON("a" << 1)
+ << "background"
+ << false
+ << "sparse"
+ << true));
ASSERT_OK(status);
ASSERT_TRUE(checkValid());
@@ -469,14 +483,18 @@ public:
}
// Create a partial index.
- auto status =
- dbtests::createIndexFromSpec(&_txn,
- coll->ns().ns(),
- BSON("name"
- << "partial_index"
- << "ns" << coll->ns().ns() << "key" << BSON("a" << 1)
- << "background" << false << "partialFilterExpression"
- << BSON("a" << BSON("$gt" << 1))));
+ auto status = dbtests::createIndexFromSpec(&_txn,
+ coll->ns().ns(),
+ BSON("name"
+ << "partial_index"
+ << "ns"
+ << coll->ns().ns()
+ << "key"
+ << BSON("a" << 1)
+ << "background"
+ << false
+ << "partialFilterExpression"
+ << BSON("a" << BSON("$gt" << 1))));
ASSERT_OK(status);
ASSERT_TRUE(checkValid());
@@ -529,17 +547,23 @@ public:
coll->ns().ns(),
BSON("name"
<< "compound_index_1"
- << "ns" << coll->ns().ns() << "key"
+ << "ns"
+ << coll->ns().ns()
+ << "key"
<< BSON("a" << 1 << "b" << -1)
- << "background" << false));
+ << "background"
+ << false));
ASSERT_OK(status);
status = dbtests::createIndexFromSpec(&_txn,
coll->ns().ns(),
BSON("name"
<< "compound_index_2"
- << "ns" << coll->ns().ns() << "key"
- << BSON("a" << -1 << "b" << 1) << "background"
+ << "ns"
+ << coll->ns().ns()
+ << "key"
+ << BSON("a" << -1 << "b" << 1)
+ << "background"
<< false));
ASSERT_OK(status);
@@ -588,7 +612,8 @@ public:
&_txn,
coll->ns().ns(),
BSON("name" << indexName << "ns" << coll->ns().ns() << "key" << BSON("a" << 1)
- << "background" << false));
+ << "background"
+ << false));
ASSERT_OK(status);
ASSERT_TRUE(checkValid());
@@ -648,7 +673,8 @@ public:
&_txn,
coll->ns().ns(),
BSON("name" << indexName << "ns" << coll->ns().ns() << "key" << BSON("a" << 1)
- << "background" << false));
+ << "background"
+ << false));
ASSERT_OK(status);
ASSERT_TRUE(checkValid());
diff --git a/src/mongo/executor/async_mock_stream_factory.cpp b/src/mongo/executor/async_mock_stream_factory.cpp
index ae8356a4982..c2fee263c60 100644
--- a/src/mongo/executor/async_mock_stream_factory.cpp
+++ b/src/mongo/executor/async_mock_stream_factory.cpp
@@ -124,18 +124,17 @@ AsyncMockStreamFactory::MockStream::~MockStream() {
void AsyncMockStreamFactory::MockStream::connect(asio::ip::tcp::resolver::iterator endpoints,
ConnectHandler&& connectHandler) {
// Suspend execution after "connecting"
- _defer(kBlockedBeforeConnect,
- [this, connectHandler, endpoints]() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
-
- // We shim a lambda to give connectHandler the right signature since it doesn't take
- // a size_t param.
- checkCanceled(
- _strand,
- &_state,
- [connectHandler](std::error_code ec, std::size_t) { return connectHandler(ec); },
- 0);
- });
+ _defer(kBlockedBeforeConnect, [this, connectHandler, endpoints]() {
+ stdx::unique_lock<stdx::mutex> lk(_mutex);
+
+ // We shim a lambda to give connectHandler the right signature since it doesn't take
+ // a size_t param.
+ checkCanceled(
+ _strand,
+ &_state,
+ [connectHandler](std::error_code ec, std::size_t) { return connectHandler(ec); },
+ 0);
+ });
}
void AsyncMockStreamFactory::MockStream::write(asio::const_buffer buf,
@@ -147,11 +146,10 @@ void AsyncMockStreamFactory::MockStream::write(asio::const_buffer buf,
_writeQueue.push({begin, begin + size});
// Suspend execution after data is written.
- _defer_inlock(kBlockedAfterWrite,
- [this, writeHandler, size]() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
- checkCanceled(_strand, &_state, std::move(writeHandler), size);
- });
+ _defer_inlock(kBlockedAfterWrite, [this, writeHandler, size]() {
+ stdx::unique_lock<stdx::mutex> lk(_mutex);
+ checkCanceled(_strand, &_state, std::move(writeHandler), size);
+ });
}
void AsyncMockStreamFactory::MockStream::cancel() {
@@ -169,45 +167,44 @@ void AsyncMockStreamFactory::MockStream::cancel() {
void AsyncMockStreamFactory::MockStream::read(asio::mutable_buffer buf,
StreamHandler&& readHandler) {
// Suspend execution before data is read.
- _defer(kBlockedBeforeRead,
- [this, buf, readHandler]() {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
- int nToCopy = 0;
-
- // If we've set an error, return that instead of a read.
- if (!_error) {
- auto nextRead = std::move(_readQueue.front());
- _readQueue.pop();
-
- auto beginDst = asio::buffer_cast<uint8_t*>(buf);
- nToCopy = std::min(nextRead.size(), asio::buffer_size(buf));
-
- auto endSrc = std::begin(nextRead);
- std::advance(endSrc, nToCopy);
-
- auto endDst = std::copy(std::begin(nextRead), endSrc, beginDst);
- invariant((endDst - beginDst) == static_cast<std::ptrdiff_t>(nToCopy));
- log() << "read " << nToCopy << " bytes, " << (nextRead.size() - nToCopy)
- << " remaining in buffer";
- }
-
- auto handler = readHandler;
-
- // If we did not receive all the bytes, we should return an error
- if (static_cast<size_t>(nToCopy) < asio::buffer_size(buf)) {
- handler = [readHandler](std::error_code ec, size_t len) {
- // If we have an error here we've been canceled, and that takes precedence
- if (ec)
- return readHandler(ec, len);
-
- // Call the original handler with an error
- readHandler(make_error_code(ErrorCodes::InvalidLength), len);
- };
- }
-
- checkCanceled(_strand, &_state, std::move(handler), nToCopy, _error);
- _error.clear();
- });
+ _defer(kBlockedBeforeRead, [this, buf, readHandler]() {
+ stdx::unique_lock<stdx::mutex> lk(_mutex);
+ int nToCopy = 0;
+
+ // If we've set an error, return that instead of a read.
+ if (!_error) {
+ auto nextRead = std::move(_readQueue.front());
+ _readQueue.pop();
+
+ auto beginDst = asio::buffer_cast<uint8_t*>(buf);
+ nToCopy = std::min(nextRead.size(), asio::buffer_size(buf));
+
+ auto endSrc = std::begin(nextRead);
+ std::advance(endSrc, nToCopy);
+
+ auto endDst = std::copy(std::begin(nextRead), endSrc, beginDst);
+ invariant((endDst - beginDst) == static_cast<std::ptrdiff_t>(nToCopy));
+ log() << "read " << nToCopy << " bytes, " << (nextRead.size() - nToCopy)
+ << " remaining in buffer";
+ }
+
+ auto handler = readHandler;
+
+ // If we did not receive all the bytes, we should return an error
+ if (static_cast<size_t>(nToCopy) < asio::buffer_size(buf)) {
+ handler = [readHandler](std::error_code ec, size_t len) {
+ // If we have an error here we've been canceled, and that takes precedence
+ if (ec)
+ return readHandler(ec, len);
+
+ // Call the original handler with an error
+ readHandler(make_error_code(ErrorCodes::InvalidLength), len);
+ };
+ }
+
+ checkCanceled(_strand, &_state, std::move(handler), nToCopy, _error);
+ _error.clear();
+ });
}
void AsyncMockStreamFactory::MockStream::pushRead(std::vector<uint8_t> toRead) {
diff --git a/src/mongo/executor/async_mock_stream_factory.h b/src/mongo/executor/async_mock_stream_factory.h
index 11c10128065..0c1e33ad3b4 100644
--- a/src/mongo/executor/async_mock_stream_factory.h
+++ b/src/mongo/executor/async_mock_stream_factory.h
@@ -30,8 +30,8 @@
#include <asio.hpp>
#include <cstdint>
-#include <queue>
#include <memory>
+#include <queue>
#include <unordered_map>
#include "mongo/executor/async_stream_factory_interface.h"
diff --git a/src/mongo/executor/async_stream_test.cpp b/src/mongo/executor/async_stream_test.cpp
index b772b5ac0e1..2731df5d713 100644
--- a/src/mongo/executor/async_stream_test.cpp
+++ b/src/mongo/executor/async_stream_test.cpp
@@ -123,11 +123,10 @@ TEST(AsyncStreamTest, IsOpen) {
executor::Deferred<bool> opened;
log() << "opening up outgoing connection";
- stream.connect(endpoints,
- [opened](std::error_code ec) mutable {
- log() << "opened outgoing connection";
- opened.emplace(!ec);
- });
+ stream.connect(endpoints, [opened](std::error_code ec) mutable {
+ log() << "opened outgoing connection";
+ opened.emplace(!ec);
+ });
ASSERT_TRUE(opened.get());
ASSERT_TRUE(stream.isOpen());
diff --git a/src/mongo/executor/connection_pool.cpp b/src/mongo/executor/connection_pool.cpp
index e6c0d150694..77e9f542af7 100644
--- a/src/mongo/executor/connection_pool.cpp
+++ b/src/mongo/executor/connection_pool.cpp
@@ -31,8 +31,8 @@
#include "mongo/executor/connection_pool.h"
-#include "mongo/executor/connection_pool_stats.h"
#include "mongo/bson/bsonobjbuilder.h"
+#include "mongo/executor/connection_pool_stats.h"
#include "mongo/executor/remote_command_request.h"
#include "mongo/stdx/memory.h"
#include "mongo/util/assert_util.h"
@@ -363,30 +363,29 @@ void ConnectionPool::SpecificPool::addToReady(stdx::unique_lock<stdx::mutex>& lk
// Our strategy for refreshing connections is to check them out and
// immediately check them back in (which kicks off the refresh logic in
// returnConnection
- connPtr->setTimeout(_parent->_options.refreshRequirement,
- [this, connPtr]() {
- OwnedConnection conn;
+ connPtr->setTimeout(_parent->_options.refreshRequirement, [this, connPtr]() {
+ OwnedConnection conn;
- stdx::unique_lock<stdx::mutex> lk(_parent->_mutex);
+ stdx::unique_lock<stdx::mutex> lk(_parent->_mutex);
- if (!_readyPool.count(connPtr)) {
- // We've already been checked out. We don't need to refresh
- // ourselves.
- return;
- }
+ if (!_readyPool.count(connPtr)) {
+ // We've already been checked out. We don't need to refresh
+ // ourselves.
+ return;
+ }
- conn = takeFromPool(_readyPool, connPtr);
+ conn = takeFromPool(_readyPool, connPtr);
- // If we're in shutdown, we don't need to refresh connections
- if (_state == State::kInShutdown)
- return;
+ // If we're in shutdown, we don't need to refresh connections
+ if (_state == State::kInShutdown)
+ return;
- _checkedOutPool[connPtr] = std::move(conn);
+ _checkedOutPool[connPtr] = std::move(conn);
- connPtr->indicateSuccess();
+ connPtr->indicateSuccess();
- returnConnection(connPtr, std::move(lk));
- });
+ returnConnection(connPtr, std::move(lk));
+ });
fulfillRequests(lk);
}
@@ -608,31 +607,29 @@ void ConnectionPool::SpecificPool::updateStateInLock() {
// We set a timer for the most recent request, then invoke each timed
// out request we couldn't service
- _requestTimer->setTimeout(
- timeout,
- [this]() {
- stdx::unique_lock<stdx::mutex> lk(_parent->_mutex);
-
- auto now = _parent->_factory->now();
-
- while (_requests.size()) {
- auto& x = _requests.top();
-
- if (x.first <= now) {
- auto cb = std::move(x.second);
- _requests.pop();
-
- lk.unlock();
- cb(Status(ErrorCodes::ExceededTimeLimit,
- "Couldn't get a connection within the time limit"));
- lk.lock();
- } else {
- break;
- }
+ _requestTimer->setTimeout(timeout, [this]() {
+ stdx::unique_lock<stdx::mutex> lk(_parent->_mutex);
+
+ auto now = _parent->_factory->now();
+
+ while (_requests.size()) {
+ auto& x = _requests.top();
+
+ if (x.first <= now) {
+ auto cb = std::move(x.second);
+ _requests.pop();
+
+ lk.unlock();
+ cb(Status(ErrorCodes::ExceededTimeLimit,
+ "Couldn't get a connection within the time limit"));
+ lk.lock();
+ } else {
+ break;
}
+ }
- updateStateInLock();
- });
+ updateStateInLock();
+ });
} else if (_checkedOutPool.size()) {
// If we have no requests, but someone's using a connection, we just
// hang around until the next request or a return
diff --git a/src/mongo/executor/connection_pool.h b/src/mongo/executor/connection_pool.h
index 1ac0b7194ee..68ea3ad4eba 100644
--- a/src/mongo/executor/connection_pool.h
+++ b/src/mongo/executor/connection_pool.h
@@ -28,8 +28,8 @@
#pragma once
#include <memory>
-#include <unordered_map>
#include <queue>
+#include <unordered_map>
#include "mongo/base/disallow_copying.h"
#include "mongo/stdx/chrono.h"
diff --git a/src/mongo/executor/connection_pool_asio.cpp b/src/mongo/executor/connection_pool_asio.cpp
index 7be1874e7b3..2cb395df612 100644
--- a/src/mongo/executor/connection_pool_asio.cpp
+++ b/src/mongo/executor/connection_pool_asio.cpp
@@ -231,18 +231,16 @@ void ASIOConnection::refresh(Milliseconds timeout, RefreshCallback cb) {
cb(this, failedResponse.getStatus());
});
- _global->_impl->_asyncRunCommand(
- op,
- [this, op](std::error_code ec, size_t bytes) {
- cancelTimeout();
+ _global->_impl->_asyncRunCommand(op, [this, op](std::error_code ec, size_t bytes) {
+ cancelTimeout();
- auto cb = std::move(_refreshCallback);
+ auto cb = std::move(_refreshCallback);
- if (ec)
- return cb(this, Status(ErrorCodes::HostUnreachable, ec.message()));
+ if (ec)
+ return cb(this, Status(ErrorCodes::HostUnreachable, ec.message()));
- cb(this, Status::OK());
- });
+ cb(this, Status::OK());
+ });
});
}
diff --git a/src/mongo/executor/connection_pool_asio.h b/src/mongo/executor/connection_pool_asio.h
index 36286fe7803..00b09667c1c 100644
--- a/src/mongo/executor/connection_pool_asio.h
+++ b/src/mongo/executor/connection_pool_asio.h
@@ -31,10 +31,10 @@
#include <memory>
+#include "mongo/executor/async_stream_interface.h"
#include "mongo/executor/connection_pool.h"
-#include "mongo/executor/network_interface_asio.h"
#include "mongo/executor/network_interface.h"
-#include "mongo/executor/async_stream_interface.h"
+#include "mongo/executor/network_interface_asio.h"
#include "mongo/stdx/mutex.h"
namespace mongo {
diff --git a/src/mongo/executor/connection_pool_test.cpp b/src/mongo/executor/connection_pool_test.cpp
index 24949a142c7..5f7cf6676da 100644
--- a/src/mongo/executor/connection_pool_test.cpp
+++ b/src/mongo/executor/connection_pool_test.cpp
@@ -30,9 +30,9 @@
#include "mongo/executor/connection_pool_test_fixture.h"
#include "mongo/executor/connection_pool.h"
-#include "mongo/unittest/unittest.h"
-#include "mongo/stdx/memory.h"
#include "mongo/stdx/future.h"
+#include "mongo/stdx/memory.h"
+#include "mongo/unittest/unittest.h"
namespace mongo {
namespace executor {
@@ -56,7 +56,7 @@ private:
};
#define CONN2ID(swConn) \
- [](StatusWith<ConnectionPool::ConnectionHandle> & swConn) { \
+ [](StatusWith<ConnectionPool::ConnectionHandle>& swConn) { \
ASSERT(swConn.isOK()); \
return static_cast<ConnectionImpl*>(swConn.getValue().get())->id(); \
}(swConn)
diff --git a/src/mongo/executor/connection_pool_test_fixture.cpp b/src/mongo/executor/connection_pool_test_fixture.cpp
index 2d48ad0b5e2..53badbbe1f3 100644
--- a/src/mongo/executor/connection_pool_test_fixture.cpp
+++ b/src/mongo/executor/connection_pool_test_fixture.cpp
@@ -159,9 +159,9 @@ void ConnectionImpl::cancelTimeout() {
void ConnectionImpl::setup(Milliseconds timeout, SetupCallback cb) {
_setupCallback = std::move(cb);
- _timer.setTimeout(
- timeout,
- [this] { _setupCallback(this, Status(ErrorCodes::ExceededTimeLimit, "timeout")); });
+ _timer.setTimeout(timeout, [this] {
+ _setupCallback(this, Status(ErrorCodes::ExceededTimeLimit, "timeout"));
+ });
_setupQueue.push_back(this);
@@ -175,9 +175,9 @@ void ConnectionImpl::setup(Milliseconds timeout, SetupCallback cb) {
void ConnectionImpl::refresh(Milliseconds timeout, RefreshCallback cb) {
_refreshCallback = std::move(cb);
- _timer.setTimeout(
- timeout,
- [this] { _refreshCallback(this, Status(ErrorCodes::ExceededTimeLimit, "timeout")); });
+ _timer.setTimeout(timeout, [this] {
+ _refreshCallback(this, Status(ErrorCodes::ExceededTimeLimit, "timeout"));
+ });
_refreshQueue.push_back(this);
diff --git a/src/mongo/executor/downconvert_find_and_getmore_commands.cpp b/src/mongo/executor/downconvert_find_and_getmore_commands.cpp
index e03e8a0b2d4..667c955988f 100644
--- a/src/mongo/executor/downconvert_find_and_getmore_commands.cpp
+++ b/src/mongo/executor/downconvert_find_and_getmore_commands.cpp
@@ -68,8 +68,10 @@ StatusWith<std::tuple<CursorId, BSONArray>> getBatchFromReply(std::int32_t reque
if (header.getResponseToMsgId() != requestId) {
return {ErrorCodes::ProtocolError,
str::stream() << "responseTo field of OP_REPLY header with value '"
- << header.getResponseToMsgId() << "' does not match requestId '"
- << requestId << "'"};
+ << header.getResponseToMsgId()
+ << "' does not match requestId '"
+ << requestId
+ << "'"};
}
if ((header.dataLen() < 0) ||
@@ -95,7 +97,8 @@ StatusWith<std::tuple<CursorId, BSONArray>> getBatchFromReply(std::int32_t reque
if (qr.getNReturned() != 1) {
return {ErrorCodes::BadValue,
str::stream() << "ResultFlag_ErrSet flag set on reply, but nReturned was '"
- << qr.getNReturned() << "' - expected 1"};
+ << qr.getNReturned()
+ << "' - expected 1"};
}
// Convert error document to a Status.
// Will throw if first document is invalid BSON.
@@ -121,7 +124,8 @@ StatusWith<std::tuple<CursorId, BSONArray>> getBatchFromReply(std::int32_t reque
return {ErrorCodes::InvalidLength,
str::stream() << "Count of documents in OP_REPLY message (" << batch.arrSize()
<< ") did not match the value specified in the nReturned field ("
- << qr.getNReturned() << ")"};
+ << qr.getNReturned()
+ << ")"};
}
return {std::make_tuple(qr.getCursorId(), batch.arr())};
diff --git a/src/mongo/executor/network_interface_asio.cpp b/src/mongo/executor/network_interface_asio.cpp
index ca54f813e37..54dbeb7434f 100644
--- a/src/mongo/executor/network_interface_asio.cpp
+++ b/src/mongo/executor/network_interface_asio.cpp
@@ -155,7 +155,8 @@ void NetworkInterfaceASIO::startup() {
_io_service.run();
} catch (...) {
severe() << "Uncaught exception in NetworkInterfaceASIO IO "
- "worker thread of type: " << exceptionToStatus();
+ "worker thread of type: "
+ << exceptionToStatus();
fassertFailed(28820);
}
});
diff --git a/src/mongo/executor/network_interface_asio.h b/src/mongo/executor/network_interface_asio.h
index 2bbded1202d..130e9e83e3d 100644
--- a/src/mongo/executor/network_interface_asio.h
+++ b/src/mongo/executor/network_interface_asio.h
@@ -43,8 +43,8 @@
#include "mongo/base/system_error.h"
#include "mongo/executor/async_stream_factory_interface.h"
#include "mongo/executor/async_stream_interface.h"
-#include "mongo/executor/connection_pool.h"
#include "mongo/executor/async_timer_interface.h"
+#include "mongo/executor/connection_pool.h"
#include "mongo/executor/network_connection_hook.h"
#include "mongo/executor/network_interface.h"
#include "mongo/executor/remote_command_request.h"
diff --git a/src/mongo/executor/network_interface_asio_auth.cpp b/src/mongo/executor/network_interface_asio_auth.cpp
index df01ad3b5c3..52d7029d42a 100644
--- a/src/mongo/executor/network_interface_asio_auth.cpp
+++ b/src/mongo/executor/network_interface_asio_auth.cpp
@@ -134,10 +134,9 @@ void NetworkInterfaceASIO::_runIsMaster(AsyncOp* op) {
};
- _asyncRunCommand(op,
- [this, op, parseIsMaster](std::error_code ec, size_t bytes) {
- _validateAndRun(op, ec, std::move(parseIsMaster));
- });
+ _asyncRunCommand(op, [this, op, parseIsMaster](std::error_code ec, size_t bytes) {
+ _validateAndRun(op, ec, std::move(parseIsMaster));
+ });
}
void NetworkInterfaceASIO::_authenticate(AsyncOp* op) {
diff --git a/src/mongo/executor/network_interface_asio_command.cpp b/src/mongo/executor/network_interface_asio_command.cpp
index db7ba556d26..f6812fd9c5a 100644
--- a/src/mongo/executor/network_interface_asio_command.cpp
+++ b/src/mongo/executor/network_interface_asio_command.cpp
@@ -143,9 +143,11 @@ ResponseStatus decodeRPC(Message* received,
return Status(ErrorCodes::RPCProtocolNegotiationFailed,
str::stream() << "Mismatched RPC protocols - request was '"
- << requestProtocol.getValue().toString() << "' '"
+ << requestProtocol.getValue().toString()
+ << "' '"
<< " but reply was '"
- << networkOpToString(received->operation()) << "'");
+ << networkOpToString(received->operation())
+ << "'");
}
auto commandReply = reply->getCommandReply();
auto replyMetadata = reply->getMetadata();
@@ -246,10 +248,9 @@ void NetworkInterfaceASIO::_beginCommunication(AsyncOp* op) {
return _completeOperation(op, beginStatus);
}
- _asyncRunCommand(op,
- [this, op](std::error_code ec, size_t bytes) {
- _validateAndRun(op, ec, [this, op]() { _completedOpCallback(op); });
- });
+ _asyncRunCommand(op, [this, op](std::error_code ec, size_t bytes) {
+ _validateAndRun(op, ec, [this, op]() { _completedOpCallback(op); });
+ });
}
void NetworkInterfaceASIO::_completedOpCallback(AsyncOp* op) {
@@ -370,39 +371,34 @@ void NetworkInterfaceASIO::_asyncRunCommand(AsyncOp* op, NetworkOpHandler handle
size_t bytes) {
// The operation could have been canceled after starting the command, but before
// receiving the header
- _validateAndRun(op,
- ec,
- [this, op, recvMessageCallback, ec, bytes, cmd, handler] {
- // validate response id
- uint32_t expectedId = cmd->toSend().header().getId();
- uint32_t actualId = cmd->header().constView().getResponseToMsgId();
- if (actualId != expectedId) {
- LOG(3) << "got wrong response:"
- << " expected response id: " << expectedId
- << ", got response id: " << actualId;
- return handler(make_error_code(ErrorCodes::ProtocolError), bytes);
- }
-
- asyncRecvMessageBody(cmd->conn().stream(),
- &cmd->header(),
- &cmd->toRecv(),
- std::move(recvMessageCallback));
- });
+ _validateAndRun(op, ec, [this, op, recvMessageCallback, ec, bytes, cmd, handler] {
+ // validate response id
+ uint32_t expectedId = cmd->toSend().header().getId();
+ uint32_t actualId = cmd->header().constView().getResponseToMsgId();
+ if (actualId != expectedId) {
+ LOG(3) << "got wrong response:"
+ << " expected response id: " << expectedId
+ << ", got response id: " << actualId;
+ return handler(make_error_code(ErrorCodes::ProtocolError), bytes);
+ }
+
+ asyncRecvMessageBody(cmd->conn().stream(),
+ &cmd->header(),
+ &cmd->toRecv(),
+ std::move(recvMessageCallback));
+ });
};
// Step 2
- auto sendMessageCallback =
- [this, cmd, handler, recvHeaderCallback, op](std::error_code ec, size_t bytes) {
- _validateAndRun(op,
- ec,
- [this, cmd, op, recvHeaderCallback] {
- asyncRecvMessageHeader(cmd->conn().stream(),
- &cmd->header(),
- std::move(recvHeaderCallback));
- });
+ auto sendMessageCallback = [this, cmd, handler, recvHeaderCallback, op](std::error_code ec,
+ size_t bytes) {
+ _validateAndRun(op, ec, [this, cmd, op, recvHeaderCallback] {
+ asyncRecvMessageHeader(
+ cmd->conn().stream(), &cmd->header(), std::move(recvHeaderCallback));
+ });
- };
+ };
// Step 1
asyncSendMessage(cmd->conn().stream(), &cmd->toSend(), std::move(sendMessageCallback));
@@ -451,10 +447,9 @@ void NetworkInterfaceASIO::_runConnectionHook(AsyncOp* op) {
return _beginCommunication(op);
};
- return _asyncRunCommand(op,
- [this, op, finishHook](std::error_code ec, std::size_t bytes) {
- _validateAndRun(op, ec, finishHook);
- });
+ return _asyncRunCommand(op, [this, op, finishHook](std::error_code ec, std::size_t bytes) {
+ _validateAndRun(op, ec, finishHook);
+ });
}
diff --git a/src/mongo/executor/network_interface_asio_connect.cpp b/src/mongo/executor/network_interface_asio_connect.cpp
index a72db76bfe7..4bda92b42b9 100644
--- a/src/mongo/executor/network_interface_asio_connect.cpp
+++ b/src/mongo/executor/network_interface_asio_connect.cpp
@@ -102,10 +102,9 @@ void NetworkInterfaceASIO::_setupSocket(AsyncOp* op, tcp::resolver::iterator end
auto& stream = op->connection().stream();
- stream.connect(std::move(endpoints),
- [this, op](std::error_code ec) {
- _validateAndRun(op, ec, [this, op]() { _runIsMaster(op); });
- });
+ stream.connect(std::move(endpoints), [this, op](std::error_code ec) {
+ _validateAndRun(op, ec, [this, op]() { _runIsMaster(op); });
+ });
}
} // namespace executor
diff --git a/src/mongo/executor/network_interface_asio_integration_test.cpp b/src/mongo/executor/network_interface_asio_integration_test.cpp
index 617ae363937..9272e6abac4 100644
--- a/src/mongo/executor/network_interface_asio_integration_test.cpp
+++ b/src/mongo/executor/network_interface_asio_integration_test.cpp
@@ -90,11 +90,10 @@ public:
Deferred<StatusWith<RemoteCommandResponse>> runCommand(
const TaskExecutor::CallbackHandle& cbHandle, const RemoteCommandRequest& request) {
Deferred<StatusWith<RemoteCommandResponse>> deferred;
- net().startCommand(cbHandle,
- request,
- [deferred](StatusWith<RemoteCommandResponse> resp) mutable {
- deferred.emplace(std::move(resp));
- });
+ net().startCommand(
+ cbHandle, request, [deferred](StatusWith<RemoteCommandResponse> resp) mutable {
+ deferred.emplace(std::move(resp));
+ });
return deferred;
}
@@ -153,7 +152,8 @@ TEST_F(NetworkInterfaceASIOIntegrationTest, Timeouts) {
assertCommandFailsOnClient("admin",
BSON("sleep" << 1 << "lock"
<< "none"
- << "secs" << 10),
+ << "secs"
+ << 10),
Milliseconds(100),
ErrorCodes::ExceededTimeLimit);
@@ -161,7 +161,8 @@ TEST_F(NetworkInterfaceASIOIntegrationTest, Timeouts) {
assertCommandOK("admin",
BSON("sleep" << 1 << "lock"
<< "none"
- << "secs" << 1),
+ << "secs"
+ << 1),
Milliseconds(10000000));
}
@@ -173,25 +174,25 @@ public:
Deferred<Status> run(Fixture* fixture, Pool* pool, Milliseconds timeout = Milliseconds(60000)) {
auto cb = makeCallbackHandle();
auto self = *this;
- auto out =
- fixture->runCommand(cb,
- {unittest::getFixtureConnectionString().getServers()[0],
- "admin",
- _command,
- timeout})
- .then(pool,
- [self](StatusWith<RemoteCommandResponse> resp) -> Status {
- auto status = resp.isOK()
- ? getStatusFromCommandResult(resp.getValue().data)
- : resp.getStatus();
-
- return status == self._expected
- ? Status::OK()
- : Status{ErrorCodes::BadValue,
- str::stream() << "Expected "
- << ErrorCodes::errorString(self._expected)
- << " but got " << status.toString()};
- });
+ auto out = fixture
+ ->runCommand(cb,
+ {unittest::getFixtureConnectionString().getServers()[0],
+ "admin",
+ _command,
+ timeout})
+ .then(pool, [self](StatusWith<RemoteCommandResponse> resp) -> Status {
+ auto status = resp.isOK()
+ ? getStatusFromCommandResult(resp.getValue().data)
+ : resp.getStatus();
+
+ return status == self._expected
+ ? Status::OK()
+ : Status{ErrorCodes::BadValue,
+ str::stream() << "Expected "
+ << ErrorCodes::errorString(self._expected)
+ << " but got "
+ << status.toString()};
+ });
if (_cancel) {
invariant(fixture->randomNumberGenerator());
sleepmillis(fixture->randomNumberGenerator()->nextInt32(10));
@@ -203,33 +204,41 @@ public:
static Deferred<Status> runTimeoutOp(Fixture* fixture, Pool* pool) {
return StressTestOp(BSON("sleep" << 1 << "lock"
<< "none"
- << "secs" << 1),
+ << "secs"
+ << 1),
ErrorCodes::ExceededTimeLimit,
- false).run(fixture, pool, Milliseconds(100));
+ false)
+ .run(fixture, pool, Milliseconds(100));
}
static Deferred<Status> runCompleteOp(Fixture* fixture, Pool* pool) {
return StressTestOp(BSON("sleep" << 1 << "lock"
<< "none"
- << "millis" << 100),
+ << "millis"
+ << 100),
ErrorCodes::OK,
- false).run(fixture, pool);
+ false)
+ .run(fixture, pool);
}
static Deferred<Status> runCancelOp(Fixture* fixture, Pool* pool) {
return StressTestOp(BSON("sleep" << 1 << "lock"
<< "none"
- << "secs" << 10),
+ << "secs"
+ << 10),
ErrorCodes::CallbackCanceled,
- true).run(fixture, pool);
+ true)
+ .run(fixture, pool);
}
static Deferred<Status> runLongOp(Fixture* fixture, Pool* pool) {
return StressTestOp(BSON("sleep" << 1 << "lock"
<< "none"
- << "secs" << 30),
+ << "secs"
+ << 30),
ErrorCodes::OK,
- false).run(fixture, pool, RemoteCommandRequest::kNoTimeout);
+ false)
+ .run(fixture, pool, RemoteCommandRequest::kNoTimeout);
}
private:
@@ -265,26 +274,24 @@ TEST_F(NetworkInterfaceASIOIntegrationTest, StressTest) {
pool.join();
});
- std::generate_n(std::back_inserter(ops),
- numOps,
- [&rng, &pool, this] {
+ std::generate_n(std::back_inserter(ops), numOps, [&rng, &pool, this] {
- // stagger operations slightly to mitigate connection pool contention
- sleepmillis(rng.nextInt32(10));
+ // stagger operations slightly to mitigate connection pool contention
+ sleepmillis(rng.nextInt32(10));
- auto i = rng.nextCanonicalDouble();
+ auto i = rng.nextCanonicalDouble();
- if (i < .3) {
- return StressTestOp::runCancelOp(this, &pool);
- } else if (i < .7) {
- return StressTestOp::runCompleteOp(this, &pool);
- } else if (i < .99) {
- return StressTestOp::runTimeoutOp(this, &pool);
- } else {
- // Just a sprinkling of long ops, to mitigate connection pool contention
- return StressTestOp::runLongOp(this, &pool);
- }
- });
+ if (i < .3) {
+ return StressTestOp::runCancelOp(this, &pool);
+ } else if (i < .7) {
+ return StressTestOp::runCompleteOp(this, &pool);
+ } else if (i < .99) {
+ return StressTestOp::runTimeoutOp(this, &pool);
+ } else {
+ // Just a sprinkling of long ops, to mitigate connection pool contention
+ return StressTestOp::runLongOp(this, &pool);
+ }
+ });
log() << "running ops";
auto res = helpers::collect(ops, &pool)
@@ -313,7 +320,8 @@ class HangingHook : public executor::NetworkConnectionHook {
"admin",
BSON("sleep" << 1 << "lock"
<< "none"
- << "secs" << 100000000),
+ << "secs"
+ << 100000000),
BSONObj()))};
}
diff --git a/src/mongo/executor/network_interface_asio_operation.cpp b/src/mongo/executor/network_interface_asio_operation.cpp
index 7c0e66c6eda..1fe4f33b88d 100644
--- a/src/mongo/executor/network_interface_asio_operation.cpp
+++ b/src/mongo/executor/network_interface_asio_operation.cpp
@@ -370,9 +370,9 @@ bool NetworkInterfaceASIO::AsyncOp::operator==(const AsyncOp& other) const {
}
bool NetworkInterfaceASIO::AsyncOp::_hasSeenState(AsyncOp::State state) const {
- return std::any_of(std::begin(_states),
- std::end(_states),
- [state](AsyncOp::State _state) { return _state == state; });
+ return std::any_of(std::begin(_states), std::end(_states), [state](AsyncOp::State _state) {
+ return _state == state;
+ });
}
void NetworkInterfaceASIO::AsyncOp::_transitionToState(AsyncOp::State newState) {
@@ -393,9 +393,9 @@ void NetworkInterfaceASIO::AsyncOp::_transitionToState_inlock(AsyncOp::State new
// multiple times. Ignore that transition if we're already cancelled.
if (newState == State::kCanceled) {
// Find the current state
- auto iter = std::find_if_not(_states.rbegin(),
- _states.rend(),
- [](const State& state) { return state == State::kNoState; });
+ auto iter = std::find_if_not(_states.rbegin(), _states.rend(), [](const State& state) {
+ return state == State::kNoState;
+ });
// If its cancelled, just return
if (iter != _states.rend() && *iter == State::kCanceled) {
diff --git a/src/mongo/executor/network_interface_asio_test.cpp b/src/mongo/executor/network_interface_asio_test.cpp
index 7462070bad9..a6193dbccde 100644
--- a/src/mongo/executor/network_interface_asio_test.cpp
+++ b/src/mongo/executor/network_interface_asio_test.cpp
@@ -166,8 +166,9 @@ TEST_F(NetworkInterfaceASIOTest, CancelOperation) {
// simulate isMaster reply.
stream->simulateServer(rpc::Protocol::kOpQuery,
- [](RemoteCommandRequest request)
- -> RemoteCommandResponse { return simulateIsMaster(request); });
+ [](RemoteCommandRequest request) -> RemoteCommandResponse {
+ return simulateIsMaster(request);
+ });
{
// Cancel operation while blocked in the write for determinism. By calling cancel here we
@@ -196,8 +197,9 @@ TEST_F(NetworkInterfaceASIOTest, ImmediateCancel) {
auto stream = streamFactory().blockUntilStreamExists(testHost);
ConnectEvent{stream}.skip();
stream->simulateServer(rpc::Protocol::kOpQuery,
- [](RemoteCommandRequest request)
- -> RemoteCommandResponse { return simulateIsMaster(request); });
+ [](RemoteCommandRequest request) -> RemoteCommandResponse {
+ return simulateIsMaster(request);
+ });
auto& result = deferred.get();
ASSERT(result == ErrorCodes::CallbackCanceled);
@@ -214,8 +216,9 @@ TEST_F(NetworkInterfaceASIOTest, LateCancel) {
auto stream = streamFactory().blockUntilStreamExists(testHost);
ConnectEvent{stream}.skip();
stream->simulateServer(rpc::Protocol::kOpQuery,
- [](RemoteCommandRequest request)
- -> RemoteCommandResponse { return simulateIsMaster(request); });
+ [](RemoteCommandRequest request) -> RemoteCommandResponse {
+ return simulateIsMaster(request);
+ });
// Simulate user command
stream->simulateServer(rpc::Protocol::kOpCommandV1,
@@ -244,8 +247,9 @@ TEST_F(NetworkInterfaceASIOTest, CancelWithNetworkError) {
// simulate isMaster reply.
stream->simulateServer(rpc::Protocol::kOpQuery,
- [](RemoteCommandRequest request)
- -> RemoteCommandResponse { return simulateIsMaster(request); });
+ [](RemoteCommandRequest request) -> RemoteCommandResponse {
+ return simulateIsMaster(request);
+ });
{
WriteEvent{stream}.skip();
@@ -272,8 +276,9 @@ TEST_F(NetworkInterfaceASIOTest, CancelWithTimeout) {
ConnectEvent{stream}.skip();
stream->simulateServer(rpc::Protocol::kOpQuery,
- [](RemoteCommandRequest request)
- -> RemoteCommandResponse { return simulateIsMaster(request); });
+ [](RemoteCommandRequest request) -> RemoteCommandResponse {
+ return simulateIsMaster(request);
+ });
{
WriteEvent write{stream};
@@ -299,8 +304,9 @@ TEST_F(NetworkInterfaceASIOTest, TimeoutWithNetworkError) {
auto stream = streamFactory().blockUntilStreamExists(testHost);
ConnectEvent{stream}.skip();
stream->simulateServer(rpc::Protocol::kOpQuery,
- [](RemoteCommandRequest request)
- -> RemoteCommandResponse { return simulateIsMaster(request); });
+ [](RemoteCommandRequest request) -> RemoteCommandResponse {
+ return simulateIsMaster(request);
+ });
{
WriteEvent{stream}.skip();
@@ -327,8 +333,9 @@ TEST_F(NetworkInterfaceASIOTest, CancelWithTimeoutAndNetworkError) {
auto stream = streamFactory().blockUntilStreamExists(testHost);
ConnectEvent{stream}.skip();
stream->simulateServer(rpc::Protocol::kOpQuery,
- [](RemoteCommandRequest request)
- -> RemoteCommandResponse { return simulateIsMaster(request); });
+ [](RemoteCommandRequest request) -> RemoteCommandResponse {
+ return simulateIsMaster(request);
+ });
{
WriteEvent{stream}.skip();
@@ -358,8 +365,9 @@ TEST_F(NetworkInterfaceASIOTest, AsyncOpTimeout) {
// Simulate isMaster reply.
stream->simulateServer(rpc::Protocol::kOpQuery,
- [](RemoteCommandRequest request)
- -> RemoteCommandResponse { return simulateIsMaster(request); });
+ [](RemoteCommandRequest request) -> RemoteCommandResponse {
+ return simulateIsMaster(request);
+ });
{
// Wait for the operation to block on write so we know it's been added.
@@ -393,27 +401,28 @@ TEST_F(NetworkInterfaceASIOTest, StartCommand) {
// simulate isMaster reply.
stream->simulateServer(rpc::Protocol::kOpQuery,
- [](RemoteCommandRequest request)
- -> RemoteCommandResponse { return simulateIsMaster(request); });
+ [](RemoteCommandRequest request) -> RemoteCommandResponse {
+ return simulateIsMaster(request);
+ });
auto expectedMetadata = BSON("meep"
<< "beep");
auto expectedCommandReply = BSON("boop"
<< "bop"
- << "ok" << 1.0);
+ << "ok"
+ << 1.0);
// simulate user command
- stream->simulateServer(rpc::Protocol::kOpCommandV1,
- [&](RemoteCommandRequest request) -> RemoteCommandResponse {
- ASSERT_EQ(std::string{request.cmdObj.firstElementFieldName()},
- "foo");
- ASSERT_EQ(request.dbname, "testDB");
+ stream->simulateServer(
+ rpc::Protocol::kOpCommandV1, [&](RemoteCommandRequest request) -> RemoteCommandResponse {
+ ASSERT_EQ(std::string{request.cmdObj.firstElementFieldName()}, "foo");
+ ASSERT_EQ(request.dbname, "testDB");
- RemoteCommandResponse response;
- response.data = expectedCommandReply;
- response.metadata = expectedMetadata;
- return response;
- });
+ RemoteCommandResponse response;
+ response.data = expectedCommandReply;
+ response.metadata = expectedMetadata;
+ return response;
+ });
auto& res = deferred.get();
@@ -450,8 +459,9 @@ public:
auto stream = streamFactory().blockUntilStreamExists(testHost);
ConnectEvent{stream}.skip();
stream->simulateServer(rpc::Protocol::kOpQuery,
- [](RemoteCommandRequest request)
- -> RemoteCommandResponse { return simulateIsMaster(request); });
+ [](RemoteCommandRequest request) -> RemoteCommandResponse {
+ return simulateIsMaster(request);
+ });
uint32_t messageId = 0;
@@ -499,11 +509,9 @@ public:
};
TEST_F(MalformedMessageTest, messageHeaderWrongResponseTo) {
- runMessageTest(ErrorCodes::ProtocolError,
- false,
- [](MsgData::View message) {
- message.setResponseToMsgId(message.getResponseToMsgId() + 1);
- });
+ runMessageTest(ErrorCodes::ProtocolError, false, [](MsgData::View message) {
+ message.setResponseToMsgId(message.getResponseToMsgId() + 1);
+ });
}
TEST_F(MalformedMessageTest, messageHeaderlenZero) {
@@ -512,15 +520,15 @@ TEST_F(MalformedMessageTest, messageHeaderlenZero) {
}
TEST_F(MalformedMessageTest, MessageHeaderLenTooSmall) {
- runMessageTest(ErrorCodes::InvalidLength,
- false,
- [](MsgData::View message) { message.setLen(6); }); // min is 16
+ runMessageTest(ErrorCodes::InvalidLength, false, [](MsgData::View message) {
+ message.setLen(6);
+ }); // min is 16
}
TEST_F(MalformedMessageTest, MessageHeaderLenTooLarge) {
- runMessageTest(ErrorCodes::InvalidLength,
- false,
- [](MsgData::View message) { message.setLen(48000001); }); // max is 48000000
+ runMessageTest(ErrorCodes::InvalidLength, false, [](MsgData::View message) {
+ message.setLen(48000001);
+ }); // max is 48000000
}
TEST_F(MalformedMessageTest, MessageHeaderLenNegative) {
@@ -529,27 +537,27 @@ TEST_F(MalformedMessageTest, MessageHeaderLenNegative) {
}
TEST_F(MalformedMessageTest, MessageLenSmallerThanActual) {
- runMessageTest(ErrorCodes::InvalidBSON,
- true,
- [](MsgData::View message) { message.setLen(message.getLen() - 10); });
+ runMessageTest(ErrorCodes::InvalidBSON, true, [](MsgData::View message) {
+ message.setLen(message.getLen() - 10);
+ });
}
TEST_F(MalformedMessageTest, FailedToReadAllBytesForMessage) {
- runMessageTest(ErrorCodes::InvalidLength,
- true,
- [](MsgData::View message) { message.setLen(message.getLen() + 100); });
+ runMessageTest(ErrorCodes::InvalidLength, true, [](MsgData::View message) {
+ message.setLen(message.getLen() + 100);
+ });
}
TEST_F(MalformedMessageTest, UnsupportedOpcode) {
- runMessageTest(ErrorCodes::UnsupportedFormat,
- true,
- [](MsgData::View message) { message.setOperation(2222); });
+ runMessageTest(ErrorCodes::UnsupportedFormat, true, [](MsgData::View message) {
+ message.setOperation(2222);
+ });
}
TEST_F(MalformedMessageTest, MismatchedOpcode) {
- runMessageTest(ErrorCodes::UnsupportedFormat,
- true,
- [](MsgData::View message) { message.setOperation(2006); });
+ runMessageTest(ErrorCodes::UnsupportedFormat, true, [](MsgData::View message) {
+ message.setOperation(2006);
+ });
}
class NetworkInterfaceASIOConnectionHookTest : public NetworkInterfaceASIOTest {
@@ -605,14 +613,14 @@ TEST_F(NetworkInterfaceASIOConnectionHookTest, ValidateHostInvalid) {
// simulate isMaster reply.
stream->simulateServer(
- rpc::Protocol::kOpQuery,
- [](RemoteCommandRequest request) -> RemoteCommandResponse {
+ rpc::Protocol::kOpQuery, [](RemoteCommandRequest request) -> RemoteCommandResponse {
RemoteCommandResponse response;
response.data =
- BSON("minWireVersion"
- << mongo::WireSpec::instance().minWireVersionIncoming << "maxWireVersion"
- << mongo::WireSpec::instance().maxWireVersionIncoming << "TESTKEY"
- << "TESTVALUE");
+ BSON("minWireVersion" << mongo::WireSpec::instance().minWireVersionIncoming
+ << "maxWireVersion"
+ << mongo::WireSpec::instance().maxWireVersionIncoming
+ << "TESTKEY"
+ << "TESTVALUE");
return response;
});
@@ -637,8 +645,9 @@ TEST_F(NetworkInterfaceASIOConnectionHookTest, MakeRequestReturnsError) {
Status makeRequestError{ErrorCodes::DBPathInUse, "bloooh"};
start(makeTestHook(
- [&](const HostAndPort& remoteHost, const RemoteCommandResponse& isMasterReply)
- -> Status { return Status::OK(); },
+ [&](const HostAndPort& remoteHost, const RemoteCommandResponse& isMasterReply) -> Status {
+ return Status::OK();
+ },
[&](const HostAndPort& remoteHost) -> StatusWith<boost::optional<RemoteCommandRequest>> {
makeRequestCalled = true;
return makeRequestError;
@@ -659,8 +668,9 @@ TEST_F(NetworkInterfaceASIOConnectionHookTest, MakeRequestReturnsError) {
// simulate isMaster reply.
stream->simulateServer(rpc::Protocol::kOpQuery,
- [](RemoteCommandRequest request)
- -> RemoteCommandResponse { return simulateIsMaster(request); });
+ [](RemoteCommandRequest request) -> RemoteCommandResponse {
+ return simulateIsMaster(request);
+ });
// We should stop here.
auto& res = deferred.get();
@@ -676,8 +686,9 @@ TEST_F(NetworkInterfaceASIOConnectionHookTest, MakeRequestReturnsNone) {
bool handleReplyCalled = false;
start(makeTestHook(
- [&](const HostAndPort& remoteHost, const RemoteCommandResponse& isMasterReply)
- -> Status { return Status::OK(); },
+ [&](const HostAndPort& remoteHost, const RemoteCommandResponse& isMasterReply) -> Status {
+ return Status::OK();
+ },
[&](const HostAndPort& remoteHost) -> StatusWith<boost::optional<RemoteCommandRequest>> {
makeRequestCalled = true;
return {boost::none};
@@ -692,7 +703,8 @@ TEST_F(NetworkInterfaceASIOConnectionHookTest, MakeRequestReturnsNone) {
auto commandReply = BSON("foo"
<< "boo"
- << "ok" << 1.0);
+ << "ok"
+ << 1.0);
auto metadata = BSON("aaa"
<< "bbb");
@@ -704,8 +716,9 @@ TEST_F(NetworkInterfaceASIOConnectionHookTest, MakeRequestReturnsNone) {
// simulate isMaster reply.
stream->simulateServer(rpc::Protocol::kOpQuery,
- [](RemoteCommandRequest request)
- -> RemoteCommandResponse { return simulateIsMaster(request); });
+ [](RemoteCommandRequest request) -> RemoteCommandResponse {
+ return simulateIsMaster(request);
+ });
// Simulate user command.
stream->simulateServer(rpc::Protocol::kOpCommandV1,
@@ -739,14 +752,16 @@ TEST_F(NetworkInterfaceASIOConnectionHookTest, HandleReplyReturnsError) {
BSONObj hookCommandReply = BSON("blah"
<< "blah"
- << "ok" << 1.0);
+ << "ok"
+ << 1.0);
BSONObj hookReplyMetadata = BSON("1111" << 2222);
Status handleReplyError{ErrorCodes::AuthSchemaIncompatible, "daowdjkpowkdjpow"};
start(makeTestHook(
- [&](const HostAndPort& remoteHost, const RemoteCommandResponse& isMasterReply)
- -> Status { return Status::OK(); },
+ [&](const HostAndPort& remoteHost, const RemoteCommandResponse& isMasterReply) -> Status {
+ return Status::OK();
+ },
[&](const HostAndPort& remoteHost) -> StatusWith<boost::optional<RemoteCommandRequest>> {
makeRequestCalled = true;
return {boost::make_optional<RemoteCommandRequest>(
@@ -769,8 +784,9 @@ TEST_F(NetworkInterfaceASIOConnectionHookTest, HandleReplyReturnsError) {
// simulate isMaster reply.
stream->simulateServer(rpc::Protocol::kOpQuery,
- [](RemoteCommandRequest request)
- -> RemoteCommandResponse { return simulateIsMaster(request); });
+ [](RemoteCommandRequest request) -> RemoteCommandResponse {
+ return simulateIsMaster(request);
+ });
// Simulate hook reply
stream->simulateServer(rpc::Protocol::kOpCommandV1,
@@ -873,8 +889,9 @@ TEST_F(NetworkInterfaceASIOMetadataTest, Metadata) {
// simulate isMaster reply.
stream->simulateServer(rpc::Protocol::kOpQuery,
- [](RemoteCommandRequest request)
- -> RemoteCommandResponse { return simulateIsMaster(request); });
+ [](RemoteCommandRequest request) -> RemoteCommandResponse {
+ return simulateIsMaster(request);
+ });
// Simulate hook reply
stream->simulateServer(rpc::Protocol::kOpCommandV1,
diff --git a/src/mongo/executor/network_interface_asio_test_utils.h b/src/mongo/executor/network_interface_asio_test_utils.h
index bfd489514b0..0c1ff03cf11 100644
--- a/src/mongo/executor/network_interface_asio_test_utils.h
+++ b/src/mongo/executor/network_interface_asio_test_utils.h
@@ -135,23 +135,22 @@ static Deferred<std::vector<T>> collect(std::vector<Deferred<T>>& ds, ThreadPool
collectState->mem.resize(collectState->goal);
for (std::size_t i = 0; i < ds.size(); ++i) {
- ds[i].then(pool,
- [collectState, out, i](T res) mutable {
- // The bool return is unused.
- stdx::lock_guard<stdx::mutex> lk(collectState->mtx);
- collectState->mem[i] = std::move(res);
-
- // If we're done.
- if (collectState->goal == ++collectState->numFinished) {
- std::vector<T> outInitialized;
- outInitialized.reserve(collectState->mem.size());
- for (auto&& mem_entry : collectState->mem) {
- outInitialized.emplace_back(std::move(*mem_entry));
- }
- out.emplace(outInitialized);
- }
- return true;
- });
+ ds[i].then(pool, [collectState, out, i](T res) mutable {
+ // The bool return is unused.
+ stdx::lock_guard<stdx::mutex> lk(collectState->mtx);
+ collectState->mem[i] = std::move(res);
+
+ // If we're done.
+ if (collectState->goal == ++collectState->numFinished) {
+ std::vector<T> outInitialized;
+ outInitialized.reserve(collectState->mem.size());
+ for (auto&& mem_entry : collectState->mem) {
+ outInitialized.emplace_back(std::move(*mem_entry));
+ }
+ out.emplace(outInitialized);
+ }
+ return true;
+ });
}
return out;
}
diff --git a/src/mongo/executor/network_interface_mock.cpp b/src/mongo/executor/network_interface_mock.cpp
index 1c822c9c48b..2a121e8fbb5 100644
--- a/src/mongo/executor/network_interface_mock.cpp
+++ b/src/mongo/executor/network_interface_mock.cpp
@@ -30,8 +30,8 @@
#include "mongo/platform/basic.h"
-#include "mongo/executor/network_interface_mock.h"
#include "mongo/executor/network_connection_hook.h"
+#include "mongo/executor/network_interface_mock.h"
#include <algorithm>
#include <iterator>
@@ -400,27 +400,27 @@ void NetworkInterfaceMock::_connectThenEnqueueOperation_inlock(const HostAndPort
}
// The completion handler for the postconnect command schedules the original command.
- auto postconnectCompletionHandler =
- [this, op](StatusWith<RemoteCommandResponse> response) mutable {
- stdx::lock_guard<stdx::mutex> lk(_mutex);
- if (!response.isOK()) {
- op.setResponse(_now_inlock(), response.getStatus());
- op.finishResponse();
- return;
- }
-
- auto handleStatus =
- _hook->handleReply(op.getRequest().target, std::move(response.getValue()));
-
- if (!handleStatus.isOK()) {
- op.setResponse(_now_inlock(), handleStatus);
- op.finishResponse();
- return;
- }
-
- _enqueueOperation_inlock(std::move(op));
- _connections.emplace(op.getRequest().target);
- };
+ auto postconnectCompletionHandler = [this,
+ op](StatusWith<RemoteCommandResponse> response) mutable {
+ stdx::lock_guard<stdx::mutex> lk(_mutex);
+ if (!response.isOK()) {
+ op.setResponse(_now_inlock(), response.getStatus());
+ op.finishResponse();
+ return;
+ }
+
+ auto handleStatus =
+ _hook->handleReply(op.getRequest().target, std::move(response.getValue()));
+
+ if (!handleStatus.isOK()) {
+ op.setResponse(_now_inlock(), handleStatus);
+ op.finishResponse();
+ return;
+ }
+
+ _enqueueOperation_inlock(std::move(op));
+ _connections.emplace(op.getRequest().target);
+ };
auto postconnectOp = NetworkOperation(op.getCallbackHandle(),
std::move(*hookPostconnectCommand),
diff --git a/src/mongo/executor/network_interface_mock_test.cpp b/src/mongo/executor/network_interface_mock_test.cpp
index 4e1e3f45a35..d59f7970474 100644
--- a/src/mongo/executor/network_interface_mock_test.cpp
+++ b/src/mongo/executor/network_interface_mock_test.cpp
@@ -164,16 +164,14 @@ TEST_F(NetworkInterfaceMockTest, ConnectionHook) {
BSONObj(),
Milliseconds(0)};
- ASSERT_OK(net().startCommand(cb,
- actualCommandExpected,
- [&](StatusWith<RemoteCommandResponse> resp) {
- commandFinished = true;
- if (resp.isOK()) {
- gotCorrectCommandReply =
- (actualResponseExpected.toString() ==
- resp.getValue().toString());
- }
- }));
+ ASSERT_OK(
+ net().startCommand(cb, actualCommandExpected, [&](StatusWith<RemoteCommandResponse> resp) {
+ commandFinished = true;
+ if (resp.isOK()) {
+ gotCorrectCommandReply =
+ (actualResponseExpected.toString() == resp.getValue().toString());
+ }
+ }));
// At this point validate and makeRequest should have been called.
ASSERT(validateCalled);
@@ -224,10 +222,12 @@ TEST_F(NetworkInterfaceMockTest, ConnectionHookFailedValidation) {
// We just need some obscure non-OK code.
return {ErrorCodes::ConflictingOperationInProgress, "blah"};
},
- [&](const HostAndPort& remoteHost)
- -> StatusWith<boost::optional<RemoteCommandRequest>> { MONGO_UNREACHABLE; },
- [&](const HostAndPort& remoteHost, RemoteCommandResponse&& response)
- -> Status { MONGO_UNREACHABLE; }));
+ [&](const HostAndPort& remoteHost) -> StatusWith<boost::optional<RemoteCommandRequest>> {
+ MONGO_UNREACHABLE;
+ },
+ [&](const HostAndPort& remoteHost, RemoteCommandResponse&& response) -> Status {
+ MONGO_UNREACHABLE;
+ }));
startNetwork();
@@ -261,14 +261,16 @@ TEST_F(NetworkInterfaceMockTest, ConnectionHookFailedValidation) {
TEST_F(NetworkInterfaceMockTest, ConnectionHookNoRequest) {
bool makeRequestCalled = false;
net().setConnectionHook(makeTestHook(
- [&](const HostAndPort& remoteHost, const RemoteCommandResponse& isMasterReply)
- -> Status { return Status::OK(); },
+ [&](const HostAndPort& remoteHost, const RemoteCommandResponse& isMasterReply) -> Status {
+ return Status::OK();
+ },
[&](const HostAndPort& remoteHost) -> StatusWith<boost::optional<RemoteCommandRequest>> {
makeRequestCalled = true;
return {boost::none};
},
- [&](const HostAndPort& remoteHost, RemoteCommandResponse&& response)
- -> Status { MONGO_UNREACHABLE; }));
+ [&](const HostAndPort& remoteHost, RemoteCommandResponse&& response) -> Status {
+ MONGO_UNREACHABLE;
+ }));
startNetwork();
@@ -296,14 +298,16 @@ TEST_F(NetworkInterfaceMockTest, ConnectionHookNoRequest) {
TEST_F(NetworkInterfaceMockTest, ConnectionHookMakeRequestFails) {
bool makeRequestCalled = false;
net().setConnectionHook(makeTestHook(
- [&](const HostAndPort& remoteHost, const RemoteCommandResponse& isMasterReply)
- -> Status { return Status::OK(); },
+ [&](const HostAndPort& remoteHost, const RemoteCommandResponse& isMasterReply) -> Status {
+ return Status::OK();
+ },
[&](const HostAndPort& remoteHost) -> StatusWith<boost::optional<RemoteCommandRequest>> {
makeRequestCalled = true;
return {ErrorCodes::InvalidSyncSource, "blah"};
},
- [&](const HostAndPort& remoteHost, RemoteCommandResponse&& response)
- -> Status { MONGO_UNREACHABLE; }));
+ [&](const HostAndPort& remoteHost, RemoteCommandResponse&& response) -> Status {
+ MONGO_UNREACHABLE;
+ }));
startNetwork();
@@ -333,8 +337,9 @@ TEST_F(NetworkInterfaceMockTest, ConnectionHookMakeRequestFails) {
TEST_F(NetworkInterfaceMockTest, ConnectionHookHandleReplyFails) {
bool handleReplyCalled = false;
net().setConnectionHook(makeTestHook(
- [&](const HostAndPort& remoteHost, const RemoteCommandResponse& isMasterReply)
- -> Status { return Status::OK(); },
+ [&](const HostAndPort& remoteHost, const RemoteCommandResponse& isMasterReply) -> Status {
+ return Status::OK();
+ },
[&](const HostAndPort& remoteHost) -> StatusWith<boost::optional<RemoteCommandRequest>> {
return boost::make_optional<RemoteCommandRequest>({});
},
@@ -404,8 +409,9 @@ TEST_F(NetworkInterfaceMockTest, CommandTimeout) {
request.timeout = Milliseconds(2000);
ErrorCodes::Error statusPropagated = ErrorCodes::OK;
- auto finishFn =
- [&](StatusWith<RemoteCommandResponse> resp) { statusPropagated = resp.getStatus().code(); };
+ auto finishFn = [&](StatusWith<RemoteCommandResponse> resp) {
+ statusPropagated = resp.getStatus().code();
+ };
//
// Command times out.
diff --git a/src/mongo/executor/network_interface_thread_pool.cpp b/src/mongo/executor/network_interface_thread_pool.cpp
index b678a9486d1..f556b2aab35 100644
--- a/src/mongo/executor/network_interface_thread_pool.cpp
+++ b/src/mongo/executor/network_interface_thread_pool.cpp
@@ -132,12 +132,11 @@ void NetworkInterfaceThreadPool::consumeTasks(stdx::unique_lock<stdx::mutex> lk)
if (!_registeredAlarm) {
_registeredAlarm = true;
lk.unlock();
- _net->setAlarm(_net->now(),
- [this] {
- stdx::unique_lock<stdx::mutex> lk(_mutex);
- _registeredAlarm = false;
- consumeTasks(std::move(lk));
- });
+ _net->setAlarm(_net->now(), [this] {
+ stdx::unique_lock<stdx::mutex> lk(_mutex);
+ _registeredAlarm = false;
+ consumeTasks(std::move(lk));
+ });
}
return;
diff --git a/src/mongo/executor/network_interface_thread_pool_test.cpp b/src/mongo/executor/network_interface_thread_pool_test.cpp
index 319d0299b27..b9a8c070b2d 100644
--- a/src/mongo/executor/network_interface_thread_pool_test.cpp
+++ b/src/mongo/executor/network_interface_thread_pool_test.cpp
@@ -33,10 +33,10 @@
#include "mongo/base/init.h"
#include "mongo/executor/network_interface_asio.h"
#include "mongo/executor/network_interface_thread_pool.h"
+#include "mongo/stdx/memory.h"
#include "mongo/unittest/unittest.h"
#include "mongo/util/concurrency/thread_pool_test_common.h"
#include "mongo/util/concurrency/thread_pool_test_fixture.h"
-#include "mongo/stdx/memory.h"
namespace {
using namespace mongo;
diff --git a/src/mongo/executor/remote_command_response.h b/src/mongo/executor/remote_command_response.h
index b6cedb8f976..3e997e448de 100644
--- a/src/mongo/executor/remote_command_response.h
+++ b/src/mongo/executor/remote_command_response.h
@@ -29,8 +29,8 @@
#pragma once
#include <iosfwd>
-#include <string>
#include <memory>
+#include <string>
#include "mongo/db/jsobj.h"
#include "mongo/util/net/message.h"
diff --git a/src/mongo/executor/task_executor.h b/src/mongo/executor/task_executor.h
index ec68a837c8e..fd630e527b7 100644
--- a/src/mongo/executor/task_executor.h
+++ b/src/mongo/executor/task_executor.h
@@ -28,9 +28,9 @@
#pragma once
-#include <string>
-#include <memory>
#include <functional>
+#include <memory>
+#include <string>
#include "mongo/base/disallow_copying.h"
#include "mongo/base/status.h"
diff --git a/src/mongo/executor/task_executor_test_common.cpp b/src/mongo/executor/task_executor_test_common.cpp
index d6c479a1a32..ed956b93567 100644
--- a/src/mongo/executor/task_executor_test_common.cpp
+++ b/src/mongo/executor/task_executor_test_common.cpp
@@ -90,20 +90,20 @@ public:
}
};
-#define COMMON_EXECUTOR_TEST(TEST_NAME) \
- class CET_##TEST_NAME : public CommonTaskExecutorTestFixture { \
- public: \
- CET_##TEST_NAME(ExecutorFactory makeExecutor) \
- : CommonTaskExecutorTestFixture(std::move(makeExecutor)) {} \
- \
- private: \
- void _doTest() override; \
- static const CetRegistrationAgent _agent; \
- }; \
- const CetRegistrationAgent CET_##TEST_NAME::_agent(#TEST_NAME, \
- [](ExecutorFactory makeExecutor) { \
- return stdx::make_unique<CET_##TEST_NAME>(std::move(makeExecutor)); \
- }); \
+#define COMMON_EXECUTOR_TEST(TEST_NAME) \
+ class CET_##TEST_NAME : public CommonTaskExecutorTestFixture { \
+ public: \
+ CET_##TEST_NAME(ExecutorFactory makeExecutor) \
+ : CommonTaskExecutorTestFixture(std::move(makeExecutor)) {} \
+ \
+ private: \
+ void _doTest() override; \
+ static const CetRegistrationAgent _agent; \
+ }; \
+ const CetRegistrationAgent CET_##TEST_NAME::_agent( \
+ #TEST_NAME, [](ExecutorFactory makeExecutor) { \
+ return stdx::make_unique<CET_##TEST_NAME>(std::move(makeExecutor)); \
+ }); \
void CET_##TEST_NAME::_doTest()
void setStatus(const TaskExecutor::CallbackArgs& cbData, Status* target) {
@@ -132,9 +132,10 @@ void scheduleSetStatusAndShutdown(const TaskExecutor::CallbackArgs& cbData,
*outStatus1 = cbData.status;
return;
}
- *outStatus1 = cbData.executor->scheduleWork(stdx::bind(setStatusAndShutdown,
- stdx::placeholders::_1,
- outStatus2)).getStatus();
+ *outStatus1 =
+ cbData.executor
+ ->scheduleWork(stdx::bind(setStatusAndShutdown, stdx::placeholders::_1, outStatus2))
+ .getStatus();
}
COMMON_EXECUTOR_TEST(RunOne) {
@@ -180,10 +181,10 @@ COMMON_EXECUTOR_TEST(OneSchedulesAnother) {
TaskExecutor& executor = getExecutor();
Status status1 = getDetectableErrorStatus();
Status status2 = getDetectableErrorStatus();
- ASSERT_OK(executor.scheduleWork(stdx::bind(scheduleSetStatusAndShutdown,
- stdx::placeholders::_1,
- &status1,
- &status2)).getStatus());
+ ASSERT_OK(executor
+ .scheduleWork(stdx::bind(
+ scheduleSetStatusAndShutdown, stdx::placeholders::_1, &status1, &status2))
+ .getStatus());
launchExecutorThread();
joinExecutorThread();
ASSERT_OK(status1);
@@ -360,9 +361,10 @@ static void setStatusOnRemoteCommandCompletion(
Status* outStatus) {
if (cbData.request != expectedRequest) {
*outStatus = Status(ErrorCodes::BadValue,
- mongoutils::str::stream()
- << "Actual request: " << getRequestDescription(cbData.request)
- << "; expected: " << getRequestDescription(expectedRequest));
+ mongoutils::str::stream() << "Actual request: "
+ << getRequestDescription(cbData.request)
+ << "; expected: "
+ << getRequestDescription(expectedRequest));
return;
}
*outStatus = cbData.response.getStatus();
diff --git a/src/mongo/executor/thread_pool_task_executor.cpp b/src/mongo/executor/thread_pool_task_executor.cpp
index 6d58745f194..9b5d18cbd84 100644
--- a/src/mongo/executor/thread_pool_task_executor.cpp
+++ b/src/mongo/executor/thread_pool_task_executor.cpp
@@ -264,20 +264,18 @@ StatusWith<TaskExecutor::CallbackHandle> ThreadPoolTaskExecutor::scheduleWorkAt(
return cbHandle;
}
lk.unlock();
- _net->setAlarm(when,
- [this, when, cbHandle] {
- auto cbState =
- checked_cast<CallbackState*>(getCallbackFromHandle(cbHandle.getValue()));
- if (cbState->canceled.load()) {
- return;
- }
- invariant(now() >= when);
- stdx::unique_lock<stdx::mutex> lk(_mutex);
- if (cbState->canceled.load()) {
- return;
- }
- scheduleIntoPool_inlock(&_sleepersQueue, cbState->iter, std::move(lk));
- });
+ _net->setAlarm(when, [this, when, cbHandle] {
+ auto cbState = checked_cast<CallbackState*>(getCallbackFromHandle(cbHandle.getValue()));
+ if (cbState->canceled.load()) {
+ return;
+ }
+ invariant(now() >= when);
+ stdx::unique_lock<stdx::mutex> lk(_mutex);
+ if (cbState->canceled.load()) {
+ return;
+ }
+ scheduleIntoPool_inlock(&_sleepersQueue, cbState->iter, std::move(lk));
+ });
return cbHandle;
}
@@ -350,9 +348,9 @@ StatusWith<TaskExecutor::CallbackHandle> ThreadPoolTaskExecutor::scheduleRemoteC
if (_inShutdown) {
return;
}
- LOG(3) << "Received remote response: " << (response.isOK()
- ? response.getValue().toString()
- : response.getStatus().toString());
+ LOG(3) << "Received remote response: "
+ << (response.isOK() ? response.getValue().toString()
+ : response.getStatus().toString());
swap(cbState->callback, newCb);
scheduleIntoPool_inlock(&_networkInProgressQueue, cbState->iter, std::move(lk));
});
diff --git a/src/mongo/executor/thread_pool_task_executor.h b/src/mongo/executor/thread_pool_task_executor.h
index f4afb7b58c9..9cd55507649 100644
--- a/src/mongo/executor/thread_pool_task_executor.h
+++ b/src/mongo/executor/thread_pool_task_executor.h
@@ -32,8 +32,8 @@
#include "mongo/base/disallow_copying.h"
#include "mongo/executor/task_executor.h"
-#include "mongo/stdx/list.h"
#include "mongo/stdx/condition_variable.h"
+#include "mongo/stdx/list.h"
#include "mongo/stdx/mutex.h"
#include "mongo/stdx/thread.h"
diff --git a/src/mongo/executor/thread_pool_task_executor_test.cpp b/src/mongo/executor/thread_pool_task_executor_test.cpp
index b2d43e669a0..887cfa59d45 100644
--- a/src/mongo/executor/thread_pool_task_executor_test.cpp
+++ b/src/mongo/executor/thread_pool_task_executor_test.cpp
@@ -48,10 +48,9 @@ namespace executor {
namespace {
MONGO_INITIALIZER(ThreadPoolExecutorCommonTests)(InitializerContext*) {
- addTestsForExecutor("ThreadPoolExecutorCommon",
- [](std::unique_ptr<NetworkInterfaceMock>* net) {
- return makeThreadPoolTestExecutor(std::move(*net));
- });
+ addTestsForExecutor("ThreadPoolExecutorCommon", [](std::unique_ptr<NetworkInterfaceMock>* net) {
+ return makeThreadPoolTestExecutor(std::move(*net));
+ });
return Status::OK();
}
@@ -100,14 +99,16 @@ TEST_F(ThreadPoolExecutorTest, ShutdownAndScheduleRaceDoesNotCrash) {
auto& executor = getExecutor();
launchExecutorThread();
- ASSERT_OK(executor.scheduleWork([&](const TaskExecutor::CallbackArgs& cbData) {
- status1 = cbData.status;
- if (!status1.isOK())
- return;
- barrier.countDownAndWait();
- cb2 = cbData.executor->scheduleWork(
- [&status2](const TaskExecutor::CallbackArgs& cbData) { status2 = cbData.status; });
- }).getStatus());
+ ASSERT_OK(executor
+ .scheduleWork([&](const TaskExecutor::CallbackArgs& cbData) {
+ status1 = cbData.status;
+ if (!status1.isOK())
+ return;
+ barrier.countDownAndWait();
+ cb2 = cbData.executor->scheduleWork([&status2](
+ const TaskExecutor::CallbackArgs& cbData) { status2 = cbData.status; });
+ })
+ .getStatus());
auto fpTPTE1 =
getGlobalFailPointRegistry()->getFailPoint("scheduleIntoPoolSpinsUntilThreadPoolShutsDown");
diff --git a/src/mongo/executor/thread_pool_task_executor_test_fixture.h b/src/mongo/executor/thread_pool_task_executor_test_fixture.h
index 04a3089bbab..b549184b26f 100644
--- a/src/mongo/executor/thread_pool_task_executor_test_fixture.h
+++ b/src/mongo/executor/thread_pool_task_executor_test_fixture.h
@@ -31,8 +31,8 @@
#include <memory>
#include "mongo/executor/network_interface_mock.h"
-#include "mongo/executor/thread_pool_task_executor.h"
#include "mongo/executor/task_executor_test_fixture.h"
+#include "mongo/executor/thread_pool_task_executor.h"
namespace mongo {
namespace executor {
diff --git a/src/mongo/logger/console_test.cpp b/src/mongo/logger/console_test.cpp
index 456935c9d77..592d632f927 100644
--- a/src/mongo/logger/console_test.cpp
+++ b/src/mongo/logger/console_test.cpp
@@ -7,11 +7,11 @@
*/
#include "mongo/logger/console.h"
+#include "mongo/unittest/unittest.h"
#include <iostream>
#include <ostream>
#include <sstream>
#include <string>
-#include "mongo/unittest/unittest.h"
namespace {
diff --git a/src/mongo/logger/log_component.cpp b/src/mongo/logger/log_component.cpp
index fc83d76aaf2..d64c4d8c66d 100644
--- a/src/mongo/logger/log_component.cpp
+++ b/src/mongo/logger/log_component.cpp
@@ -46,7 +46,7 @@ std::string _dottedNames[LogComponent::kNumLogComponents + 1];
* Returns StringData created from a string literal
*/
template <size_t N>
-StringData createStringData(const char(&val)[N]) {
+StringData createStringData(const char (&val)[N]) {
return StringData(val, StringData::LiteralTag());
}
@@ -54,8 +54,8 @@ StringData createStringData(const char(&val)[N]) {
// Fully initialize _dottedNames before we enter multithreaded execution.
//
-MONGO_INITIALIZER_WITH_PREREQUISITES(SetupDottedNames,
- MONGO_NO_PREREQUISITES)(InitializerContext* context) {
+MONGO_INITIALIZER_WITH_PREREQUISITES(SetupDottedNames, MONGO_NO_PREREQUISITES)
+(InitializerContext* context) {
for (int i = 0; i <= int(LogComponent::kNumLogComponents); ++i) {
logger::LogComponent component = static_cast<logger::LogComponent::Value>(i);
component.getDottedName();
diff --git a/src/mongo/logger/log_domain-impl.h b/src/mongo/logger/log_domain-impl.h
index 3f5e5038d34..bbf4d93aba9 100644
--- a/src/mongo/logger/log_domain-impl.h
+++ b/src/mongo/logger/log_domain-impl.h
@@ -42,8 +42,7 @@ namespace mongo {
namespace logger {
template <typename E>
-LogDomain<E>::LogDomain()
- : _abortOnFailure(false) {}
+LogDomain<E>::LogDomain() : _abortOnFailure(false) {}
template <typename E>
LogDomain<E>::~LogDomain() {
diff --git a/src/mongo/logger/log_test.cpp b/src/mongo/logger/log_test.cpp
index 8868e037fc8..f4b8ec2bc34 100644
--- a/src/mongo/logger/log_test.cpp
+++ b/src/mongo/logger/log_test.cpp
@@ -376,7 +376,8 @@ void testEncodedLogLine(const MessageEventEphemeral& event, const std::string& e
std::string s = os.str();
if (s.find(expectedSubstring) == std::string::npos) {
FAIL(str::stream() << "encoded log line does not contain substring \"" << expectedSubstring
- << "\". log line: " << s);
+ << "\". log line: "
+ << s);
}
}
diff --git a/src/mongo/logger/logger.h b/src/mongo/logger/logger.h
index 71411b3b581..7ce4dcb8478 100644
--- a/src/mongo/logger/logger.h
+++ b/src/mongo/logger/logger.h
@@ -27,8 +27,8 @@
#pragma once
-#include "mongo/logger/message_log_domain.h"
#include "mongo/logger/log_manager.h"
+#include "mongo/logger/message_log_domain.h"
#include "mongo/logger/rotatable_file_manager.h"
namespace mongo {
diff --git a/src/mongo/logger/parse_log_component_settings.cpp b/src/mongo/logger/parse_log_component_settings.cpp
index a99a69857d6..eb82cc60ae3 100644
--- a/src/mongo/logger/parse_log_component_settings.cpp
+++ b/src/mongo/logger/parse_log_component_settings.cpp
@@ -93,20 +93,22 @@ StatusWith<std::vector<LogComponentSetting>> parseLogComponentSettings(const BSO
const LogComponent curr = _getComponentForShortName(shortName);
if (curr == LogComponent::kNumLogComponents || curr.parent() != parentComponent) {
- return StatusWith<Result>(ErrorCodes::BadValue,
- str::stream() << "Invalid component name "
- << parentComponent.getDottedName() << "."
- << shortName);
+ return StatusWith<Result>(
+ ErrorCodes::BadValue,
+ str::stream() << "Invalid component name " << parentComponent.getDottedName() << "."
+ << shortName);
}
if (elem.isNumber()) {
levelsToSet.push_back(LogComponentSetting(curr, elem.numberInt()));
continue;
}
if (elem.type() != Object) {
- return StatusWith<Result>(
- ErrorCodes::BadValue,
- str::stream() << "Invalid type " << typeName(elem.type()) << "for component "
- << parentComponent.getDottedName() << "." << shortName);
+ return StatusWith<Result>(ErrorCodes::BadValue,
+ str::stream() << "Invalid type " << typeName(elem.type())
+ << "for component "
+ << parentComponent.getDottedName()
+ << "."
+ << shortName);
}
iterators.push_back(iter);
parentComponent = curr;
diff --git a/src/mongo/logger/parse_log_component_settings_test.cpp b/src/mongo/logger/parse_log_component_settings_test.cpp
index 4f32825361a..3284d41d3f4 100644
--- a/src/mongo/logger/parse_log_component_settings_test.cpp
+++ b/src/mongo/logger/parse_log_component_settings_test.cpp
@@ -139,7 +139,10 @@ TEST(Multi, FailBadComponent) {
BSONObj input =
BSON("verbosity" << 6 << "accessControl" << BSON("verbosity" << 5) << "storage"
<< BSON("verbosity" << 4 << "journal" << BSON("verbosity" << 6))
- << "No Such Component" << BSON("verbosity" << 2) << "extrafield" << 123);
+ << "No Such Component"
+ << BSON("verbosity" << 2)
+ << "extrafield"
+ << 123);
StatusWith<Settings> result = parseLogComponentSettings(input);
diff --git a/src/mongo/logger/ramlog.cpp b/src/mongo/logger/ramlog.cpp
index e9d7685d49f..42742f32bcc 100644
--- a/src/mongo/logger/ramlog.cpp
+++ b/src/mongo/logger/ramlog.cpp
@@ -34,8 +34,8 @@
#include "mongo/base/init.h"
#include "mongo/base/status.h"
#include "mongo/logger/message_event_utf8_encoder.h"
-#include "mongo/util/mongoutils/html.h"
#include "mongo/util/map_util.h"
+#include "mongo/util/mongoutils/html.h"
#include "mongo/util/mongoutils/str.h"
namespace mongo {
diff --git a/src/mongo/logger/ramlog.h b/src/mongo/logger/ramlog.h
index c69fb1e4814..54901948f74 100644
--- a/src/mongo/logger/ramlog.h
+++ b/src/mongo/logger/ramlog.h
@@ -29,10 +29,10 @@
#pragma once
+#include <boost/version.hpp>
#include <sstream>
#include <string>
#include <vector>
-#include <boost/version.hpp>
#include "mongo/base/disallow_copying.h"
#include "mongo/base/status.h"
diff --git a/src/mongo/logger/rotatable_file_writer.cpp b/src/mongo/logger/rotatable_file_writer.cpp
index 9b23952ac02..561869645ca 100644
--- a/src/mongo/logger/rotatable_file_writer.cpp
+++ b/src/mongo/logger/rotatable_file_writer.cpp
@@ -247,25 +247,34 @@ Status RotatableFileWriter::Use::rotate(bool renameOnRotate, const std::string&
if (renameOnRotate) {
try {
if (boost::filesystem::exists(renameTarget)) {
- return Status(ErrorCodes::FileRenameFailed,
- mongoutils::str::stream()
- << "Renaming file " << _writer->_fileName << " to "
- << renameTarget << " failed; destination already exists");
+ return Status(
+ ErrorCodes::FileRenameFailed,
+ mongoutils::str::stream() << "Renaming file " << _writer->_fileName
+ << " to "
+ << renameTarget
+ << " failed; destination already exists");
}
} catch (const std::exception& e) {
- return Status(ErrorCodes::FileRenameFailed,
- mongoutils::str::stream()
- << "Renaming file " << _writer->_fileName << " to "
- << renameTarget << " failed; Cannot verify whether destination "
- "already exists: " << e.what());
+ return Status(
+ ErrorCodes::FileRenameFailed,
+ mongoutils::str::stream() << "Renaming file " << _writer->_fileName << " to "
+ << renameTarget
+ << " failed; Cannot verify whether destination "
+ "already exists: "
+ << e.what());
}
if (0 != renameFile(_writer->_fileName, renameTarget)) {
return Status(ErrorCodes::FileRenameFailed,
- mongoutils::str::stream()
- << "Failed to rename \"" << _writer->_fileName << "\" to \""
- << renameTarget << "\": " << strerror(errno) << " (" << errno
- << ')');
+ mongoutils::str::stream() << "Failed to rename \""
+ << _writer->_fileName
+ << "\" to \""
+ << renameTarget
+ << "\": "
+ << strerror(errno)
+ << " ("
+ << errno
+ << ')');
// TODO(schwerin): Make errnoWithDescription() available in the logger library, and
// use it here.
}
diff --git a/src/mongo/platform/decimal128.cpp b/src/mongo/platform/decimal128.cpp
index d0af1ea5bb9..1639c7fb749 100644
--- a/src/mongo/platform/decimal128.cpp
+++ b/src/mongo/platform/decimal128.cpp
@@ -25,8 +25,8 @@
* then also delete it in the license file.
*/
-#include "mongo/platform/basic.h"
#include "mongo/platform/decimal128.h"
+#include "mongo/platform/basic.h"
#include <cmath>
#include <cstdlib>
diff --git a/src/mongo/platform/posix_fadvise.cpp b/src/mongo/platform/posix_fadvise.cpp
index eecf49e5351..76b5831b5d5 100644
--- a/src/mongo/platform/posix_fadvise.cpp
+++ b/src/mongo/platform/posix_fadvise.cpp
@@ -52,9 +52,8 @@ int posix_fadvise(int fd, off_t offset, off_t len, int advice) {
// 'posix_fadvise()' on Solaris will call the emulation if the symbol is not found
//
-MONGO_INITIALIZER_GENERAL(SolarisPosixFadvise,
- MONGO_NO_PREREQUISITES,
- ("default"))(InitializerContext* context) {
+MONGO_INITIALIZER_GENERAL(SolarisPosixFadvise, MONGO_NO_PREREQUISITES, ("default"))
+(InitializerContext* context) {
void* functionAddress = dlsym(RTLD_DEFAULT, "posix_fadvise");
if (functionAddress != NULL) {
mongo::pal::posix_fadvise_switcher =
diff --git a/src/mongo/platform/process_id.cpp b/src/mongo/platform/process_id.cpp
index e485c9e0878..12bab6fbeab 100644
--- a/src/mongo/platform/process_id.cpp
+++ b/src/mongo/platform/process_id.cpp
@@ -30,8 +30,8 @@
#include "mongo/platform/process_id.h"
#include <iostream>
-#include <sstream>
#include <limits>
+#include <sstream>
namespace mongo {
diff --git a/src/mongo/platform/random.cpp b/src/mongo/platform/random.cpp
index 48df149d460..c1ab7e8429c 100644
--- a/src/mongo/platform/random.cpp
+++ b/src/mongo/platform/random.cpp
@@ -43,13 +43,13 @@
#define _CRT_RAND_S
#include <cstdlib>
-#include <iostream>
#include <fstream>
+#include <iostream>
#include <limits>
#include <mongo/stdx/memory.h>
-#include <mongo/util/log.h>
#include <mongo/util/assert_util.h>
+#include <mongo/util/log.h>
namespace mongo {
@@ -112,7 +112,8 @@ public:
&_algHandle, BCRYPT_RNG_ALGORITHM, MS_PRIMITIVE_PROVIDER, 0);
if (ntstatus != STATUS_SUCCESS) {
error() << "Failed to open crypto algorithm provider while creating secure random "
- "object; NTSTATUS: " << ntstatus;
+ "object; NTSTATUS: "
+ << ntstatus;
fassertFailed(28815);
}
}
@@ -121,7 +122,8 @@ public:
auto ntstatus = ::BCryptCloseAlgorithmProvider(_algHandle, 0);
if (ntstatus != STATUS_SUCCESS) {
warning() << "Failed to close crypto algorithm provider destroying secure random "
- "object; NTSTATUS: " << ntstatus;
+ "object; NTSTATUS: "
+ << ntstatus;
}
}
diff --git a/src/mongo/platform/shared_library_posix.cpp b/src/mongo/platform/shared_library_posix.cpp
index 2cad9be817e..9468665dfe6 100644
--- a/src/mongo/platform/shared_library_posix.cpp
+++ b/src/mongo/platform/shared_library_posix.cpp
@@ -78,7 +78,8 @@ StatusWith<void*> SharedLibrary::getSymbol(StringData name) {
if (error_msg != nullptr) {
return StatusWith<void*>(ErrorCodes::InternalError,
str::stream() << "dlsym failed for symbol " << name
- << " with error message: " << error_msg);
+ << " with error message: "
+ << error_msg);
}
return StatusWith<void*>(symbol);
diff --git a/src/mongo/platform/shared_library_windows.cpp b/src/mongo/platform/shared_library_windows.cpp
index e3f9431ce60..19e90bc2af0 100644
--- a/src/mongo/platform/shared_library_windows.cpp
+++ b/src/mongo/platform/shared_library_windows.cpp
@@ -55,9 +55,9 @@ StatusWith<std::unique_ptr<SharedLibrary>> SharedLibrary::create(
HMODULE handle = LoadLibraryW(full_path.c_str());
if (handle == nullptr) {
- return StatusWith<std::unique_ptr<SharedLibrary>>(
- ErrorCodes::InternalError,
- str::stream() << "Load library failed: " << errnoWithDescription());
+ return StatusWith<std::unique_ptr<SharedLibrary>>(ErrorCodes::InternalError,
+ str::stream() << "Load library failed: "
+ << errnoWithDescription());
}
return StatusWith<std::unique_ptr<SharedLibrary>>(
@@ -73,9 +73,9 @@ StatusWith<void*> SharedLibrary::getSymbol(StringData name) {
if (function == nullptr) {
DWORD gle = GetLastError();
if (gle != ERROR_PROC_NOT_FOUND) {
- return StatusWith<void*>(
- ErrorCodes::InternalError,
- str::stream() << "GetProcAddress failed for symbol: " << errnoWithDescription());
+ return StatusWith<void*>(ErrorCodes::InternalError,
+ str::stream() << "GetProcAddress failed for symbol: "
+ << errnoWithDescription());
}
}
diff --git a/src/mongo/platform/strcasestr.cpp b/src/mongo/platform/strcasestr.cpp
index eb5f39b442e..d71e61c8fe6 100644
--- a/src/mongo/platform/strcasestr.cpp
+++ b/src/mongo/platform/strcasestr.cpp
@@ -94,9 +94,8 @@ namespace mongo {
// 'strcasestr()' on Solaris will call the emulation if the symbol is not found
//
-MONGO_INITIALIZER_GENERAL(SolarisStrCaseCmp,
- MONGO_NO_PREREQUISITES,
- ("default"))(InitializerContext* context) {
+MONGO_INITIALIZER_GENERAL(SolarisStrCaseCmp, MONGO_NO_PREREQUISITES, ("default"))
+(InitializerContext* context) {
void* functionAddress = dlsym(RTLD_DEFAULT, "strcasestr");
if (functionAddress != NULL) {
mongo::pal::strcasestr_switcher =
diff --git a/src/mongo/platform/strnlen.cpp b/src/mongo/platform/strnlen.cpp
index 04bf74d7b65..1cd78875241 100644
--- a/src/mongo/platform/strnlen.cpp
+++ b/src/mongo/platform/strnlen.cpp
@@ -25,8 +25,8 @@
* then also delete it in the license file.
*/
-#include "mongo/config.h"
#include "mongo/platform/strnlen.h"
+#include "mongo/config.h"
#ifndef MONGO_CONFIG_HAVE_STRNLEN
diff --git a/src/mongo/platform/windows_basic.h b/src/mongo/platform/windows_basic.h
index ac6cb289dc8..372630ffd5f 100644
--- a/src/mongo/platform/windows_basic.h
+++ b/src/mongo/platform/windows_basic.h
@@ -86,9 +86,9 @@
// get the definitions from ntstatus.h, which has a more complete list.
#define WIN32_NO_STATUS
+#include <windows.h>
#include <winsock2.h> //this must be included before the first windows.h include
#include <ws2tcpip.h>
-#include <windows.h>
#undef WIN32_NO_STATUS
diff --git a/src/mongo/rpc/command_request.cpp b/src/mongo/rpc/command_request.cpp
index 577c3a27349..9a27ac45512 100644
--- a/src/mongo/rpc/command_request.cpp
+++ b/src/mongo/rpc/command_request.cpp
@@ -74,8 +74,11 @@ CommandRequest::CommandRequest(const Message* message) : _message(message) {
uassert(28636,
str::stream() << "Database parsed in OP_COMMAND message must be between"
- << kMinDatabaseLength << " and " << kMaxDatabaseLength
- << " bytes. Got: " << _database,
+ << kMinDatabaseLength
+ << " and "
+ << kMaxDatabaseLength
+ << " bytes. Got: "
+ << _database,
(_database.size() >= kMinDatabaseLength) && (_database.size() <= kMaxDatabaseLength));
uassert(
@@ -88,8 +91,11 @@ CommandRequest::CommandRequest(const Message* message) : _message(message) {
uassert(28637,
str::stream() << "Command name parsed in OP_COMMAND message must be between"
- << kMinCommandNameLength << " and " << kMaxCommandNameLength
- << " bytes. Got: " << _database,
+ << kMinCommandNameLength
+ << " and "
+ << kMaxCommandNameLength
+ << " bytes. Got: "
+ << _database,
(_commandName.size() >= kMinCommandNameLength) &&
(_commandName.size() <= kMaxCommandNameLength));
@@ -99,7 +105,8 @@ CommandRequest::CommandRequest(const Message* message) : _message(message) {
uassert(39950,
str::stream() << "Command name parsed in OP_COMMAND message '" << _commandName
<< "' doesn't match command name from object '"
- << _commandArgs.firstElementFieldName() << '\'',
+ << _commandArgs.firstElementFieldName()
+ << '\'',
_commandArgs.firstElementFieldName() == _commandName);
uassertStatusOK(cur.readAndAdvance<>(&obj));
diff --git a/src/mongo/rpc/command_request_test.cpp b/src/mongo/rpc/command_request_test.cpp
index 2dc944b7118..fece0b22352 100644
--- a/src/mongo/rpc/command_request_test.cpp
+++ b/src/mongo/rpc/command_request_test.cpp
@@ -36,8 +36,8 @@
#include "mongo/rpc/command_request.h"
#include "mongo/rpc/command_request_builder.h"
#include "mongo/unittest/unittest.h"
-#include "mongo/util/net/message.h"
#include "mongo/util/assert_util.h"
+#include "mongo/util/net/message.h"
namespace {
diff --git a/src/mongo/rpc/legacy_reply.cpp b/src/mongo/rpc/legacy_reply.cpp
index e298a97814f..e9af23dee9e 100644
--- a/src/mongo/rpc/legacy_reply.cpp
+++ b/src/mongo/rpc/legacy_reply.cpp
@@ -30,8 +30,8 @@
#include "mongo/rpc/legacy_reply.h"
-#include <utility>
#include <tuple>
+#include <utility>
#include "mongo/rpc/legacy_reply_builder.h"
#include "mongo/rpc/metadata.h"
@@ -51,17 +51,20 @@ LegacyReply::LegacyReply(const Message* message) : _message(std::move(message))
uassert(ErrorCodes::BadValue,
str::stream() << "Got legacy command reply with a bad cursorId field,"
- << " expected a value of 0 but got " << qr.getCursorId(),
+ << " expected a value of 0 but got "
+ << qr.getCursorId(),
qr.getCursorId() == 0);
uassert(ErrorCodes::BadValue,
str::stream() << "Got legacy command reply with a bad nReturned field,"
- << " expected a value of 1 but got " << qr.getNReturned(),
+ << " expected a value of 1 but got "
+ << qr.getNReturned(),
qr.getNReturned() == 1);
uassert(ErrorCodes::BadValue,
str::stream() << "Got legacy command reply with a bad startingFrom field,"
- << " expected a value of 0 but got " << qr.getStartingFrom(),
+ << " expected a value of 0 but got "
+ << qr.getStartingFrom(),
qr.getStartingFrom() == 0);
std::tie(_commandReply, _metadata) =
diff --git a/src/mongo/rpc/legacy_reply.h b/src/mongo/rpc/legacy_reply.h
index 9e6fb8b0b79..bfb366f1da2 100644
--- a/src/mongo/rpc/legacy_reply.h
+++ b/src/mongo/rpc/legacy_reply.h
@@ -31,8 +31,8 @@
#include "mongo/db/dbmessage.h"
#include "mongo/db/jsobj.h"
#include "mongo/rpc/document_range.h"
-#include "mongo/rpc/reply_interface.h"
#include "mongo/rpc/protocol.h"
+#include "mongo/rpc/reply_interface.h"
namespace mongo {
class Message;
diff --git a/src/mongo/rpc/legacy_request_builder.cpp b/src/mongo/rpc/legacy_request_builder.cpp
index c3a78803db3..c2155537611 100644
--- a/src/mongo/rpc/legacy_request_builder.cpp
+++ b/src/mongo/rpc/legacy_request_builder.cpp
@@ -30,8 +30,8 @@
#include "mongo/rpc/legacy_request_builder.h"
-#include <utility>
#include <tuple>
+#include <utility>
#include "mongo/db/namespace_string.h"
#include "mongo/rpc/metadata.h"
diff --git a/src/mongo/rpc/metadata.cpp b/src/mongo/rpc/metadata.cpp
index 09d88d78d31..5c9a2a47a40 100644
--- a/src/mongo/rpc/metadata.cpp
+++ b/src/mongo/rpc/metadata.cpp
@@ -34,8 +34,8 @@
#include "mongo/db/jsobj.h"
#include "mongo/rpc/metadata/audit_metadata.h"
#include "mongo/rpc/metadata/config_server_metadata.h"
-#include "mongo/rpc/metadata/sharding_metadata.h"
#include "mongo/rpc/metadata/server_selection_metadata.h"
+#include "mongo/rpc/metadata/sharding_metadata.h"
namespace mongo {
namespace rpc {
diff --git a/src/mongo/rpc/metadata/audit_metadata.h b/src/mongo/rpc/metadata/audit_metadata.h
index 44caeef87f0..b7519d3cbd7 100644
--- a/src/mongo/rpc/metadata/audit_metadata.h
+++ b/src/mongo/rpc/metadata/audit_metadata.h
@@ -32,9 +32,9 @@
#include <vector>
#include "mongo/base/disallow_copying.h"
-#include "mongo/db/operation_context.h"
-#include "mongo/db/auth/user_name.h"
#include "mongo/db/auth/role_name.h"
+#include "mongo/db/auth/user_name.h"
+#include "mongo/db/operation_context.h"
namespace mongo {
class BSONObj;
diff --git a/src/mongo/rpc/metadata/config_server_metadata.cpp b/src/mongo/rpc/metadata/config_server_metadata.cpp
index dae494bc505..bf8be47c641 100644
--- a/src/mongo/rpc/metadata/config_server_metadata.cpp
+++ b/src/mongo/rpc/metadata/config_server_metadata.cpp
@@ -63,7 +63,9 @@ StatusWith<ConfigServerMetadata> ConfigServerMetadata::readFromMetadata(
} else if (metadataElem.type() != mongo::Object) {
return {ErrorCodes::TypeMismatch,
str::stream() << "ConfigServerMetadata element has incorrect type: expected"
- << mongo::Object << " but got " << metadataElem.type()};
+ << mongo::Object
+ << " but got "
+ << metadataElem.type()};
}
BSONObj configMetadataObj = metadataElem.Obj();
diff --git a/src/mongo/rpc/metadata/repl_set_metadata_test.cpp b/src/mongo/rpc/metadata/repl_set_metadata_test.cpp
index 46df9bed886..bd2109fe413 100644
--- a/src/mongo/rpc/metadata/repl_set_metadata_test.cpp
+++ b/src/mongo/rpc/metadata/repl_set_metadata_test.cpp
@@ -60,8 +60,14 @@ TEST(ReplResponseMetadataTest, Roundtrip) {
<< BSON("ts" << opTime.getTimestamp() << "t" << opTime.getTerm())
<< "lastOpVisible"
<< BSON("ts" << opTime2.getTimestamp() << "t" << opTime2.getTerm())
- << "configVersion" << 6 << "replicaSetId" << metadata.getReplicaSetId()
- << "primaryIndex" << 12 << "syncSourceIndex" << -1)));
+ << "configVersion"
+ << 6
+ << "replicaSetId"
+ << metadata.getReplicaSetId()
+ << "primaryIndex"
+ << 12
+ << "syncSourceIndex"
+ << -1)));
BSONObj serializedObj = builder.obj();
ASSERT_EQ(expectedObj, serializedObj);
diff --git a/src/mongo/rpc/metadata/server_selection_metadata.cpp b/src/mongo/rpc/metadata/server_selection_metadata.cpp
index 56e82e0ae5a..9100fc18c55 100644
--- a/src/mongo/rpc/metadata/server_selection_metadata.cpp
+++ b/src/mongo/rpc/metadata/server_selection_metadata.cpp
@@ -30,8 +30,8 @@
#include "mongo/rpc/metadata/server_selection_metadata.h"
-#include <utility>
#include <tuple>
+#include <utility>
#include "mongo/base/status_with.h"
#include "mongo/bson/util/bson_extract.h"
@@ -160,7 +160,9 @@ StatusWith<ServerSelectionMetadata> ServerSelectionMetadata::readFromMetadata(
} else if (metadataElem.type() != mongo::Object) {
return {ErrorCodes::TypeMismatch,
str::stream() << "ServerSelectionMetadata element has incorrect type: expected"
- << mongo::Object << " but got " << metadataElem.type()};
+ << mongo::Object
+ << " but got "
+ << metadataElem.type()};
}
bool secondaryOk = false;
@@ -174,7 +176,9 @@ StatusWith<ServerSelectionMetadata> ServerSelectionMetadata::readFromMetadata(
if (ssmElem.type() != mongo::Object) {
return Status(ErrorCodes::TypeMismatch,
str::stream() << "ReadPreference has incorrect type: expected"
- << mongo::Object << "but got" << metadataElem.type());
+ << mongo::Object
+ << "but got"
+ << metadataElem.type());
}
auto parsedRps = ReadPreferenceSetting::fromBSON(ssmElem.Obj());
if (!parsedRps.isOK()) {
diff --git a/src/mongo/rpc/metadata/server_selection_metadata_test.cpp b/src/mongo/rpc/metadata/server_selection_metadata_test.cpp
index e2f7c103178..90b66f86185 100644
--- a/src/mongo/rpc/metadata/server_selection_metadata_test.cpp
+++ b/src/mongo/rpc/metadata/server_selection_metadata_test.cpp
@@ -112,18 +112,21 @@ TEST(ServerSelectionMetadata, UpconvertValidMetadata) {
// Wrapped in 'query', with readPref.
checkUpconvert(BSON("query" << BSON("pong" << 1 << "foo"
- << "bar") << "$readPreference"
+ << "bar")
+ << "$readPreference"
<< BSON("mode"
<< "primary"
- << "tags" << BSON("dc"
- << "ny"))),
+ << "tags"
+ << BSON("dc"
+ << "ny"))),
0,
BSON("pong" << 1 << "foo"
<< "bar"),
BSON("$ssm" << BSON("$readPreference" << BSON("mode"
<< "primary"
- << "tags" << BSON("dc"
- << "ny")))));
+ << "tags"
+ << BSON("dc"
+ << "ny")))));
// Unwrapped, no readPref, no slaveOk
checkUpconvert(BSON("ping" << 1), 0, BSON("ping" << 1), BSONObj());
@@ -133,15 +136,17 @@ TEST(ServerSelectionMetadata, UpconvertValidMetadata) {
<< "$queryOptions"
<< BSON("$readPreference" << BSON("mode"
<< "nearest"
- << "tags" << BSON("rack"
- << "city")))),
+ << "tags"
+ << BSON("rack"
+ << "city")))),
0,
BSON("pang"
<< "pong"),
BSON("$ssm" << BSON("$readPreference" << BSON("mode"
<< "nearest"
- << "tags" << BSON("rack"
- << "city")))));
+ << "tags"
+ << BSON("rack"
+ << "city")))));
}
void checkUpconvertFails(const BSONObj& legacyCommand, ErrorCodes::Error error) {
@@ -156,27 +161,35 @@ void checkUpconvertFails(const BSONObj& legacyCommand, ErrorCodes::Error error)
TEST(ServerSelectionMetadata, UpconvertInvalidMetadata) {
// $readPreference not an object.
checkUpconvertFails(BSON("$query" << BSON("pang"
- << "pong") << "$readPreference" << 2),
+ << "pong")
+ << "$readPreference"
+ << 2),
ErrorCodes::TypeMismatch);
// has $maxTimeMS option
checkUpconvertFails(BSON("query" << BSON("foo"
- << "bar") << "$maxTimeMS" << 200),
+ << "bar")
+ << "$maxTimeMS"
+ << 200),
ErrorCodes::InvalidOptions);
checkUpconvertFails(BSON("$query" << BSON("foo"
- << "bar") << "$maxTimeMS" << 200),
+ << "bar")
+ << "$maxTimeMS"
+ << 200),
ErrorCodes::InvalidOptions);
// has $queryOptions field, but invalid $readPreference
checkUpconvertFails(BSON("ping"
<< "pong"
- << "$queryOptions" << BSON("$readPreference" << 1.2)),
+ << "$queryOptions"
+ << BSON("$readPreference" << 1.2)),
ErrorCodes::TypeMismatch);
// has $queryOptions field, but no $readPreference
checkUpconvertFails(BSON("ping"
<< "pong"
- << "$queryOptions" << BSONObj()),
+ << "$queryOptions"
+ << BSONObj()),
ErrorCodes::NoSuchKey);
// invalid wrapped query
diff --git a/src/mongo/rpc/metadata/sharding_metadata_test.cpp b/src/mongo/rpc/metadata/sharding_metadata_test.cpp
index 90050f6781d..51ebf5452bc 100644
--- a/src/mongo/rpc/metadata/sharding_metadata_test.cpp
+++ b/src/mongo/rpc/metadata/sharding_metadata_test.cpp
@@ -53,7 +53,8 @@ TEST(ShardingMetadata, ReadFromMetadata) {
auto sm = checkParse(
BSON("$gleStats" << BSON("lastOpTime" << BSON("ts" << kLastOpTime.getTimestamp() << "t"
<< kLastOpTime.getTerm())
- << "electionId" << kElectionId)));
+ << "electionId"
+ << kElectionId)));
ASSERT_EQ(sm.getLastElectionId(), kElectionId);
ASSERT_EQ(sm.getLastOpTime(), kLastOpTime);
}
@@ -87,7 +88,8 @@ TEST(ShardingMetadata, ReadFromInvalidMetadata) {
checkParseFails(
BSON("$gleStats" << BSON("lastOpTime" << BSON("ts" << kLastOpTime.getTimestamp() << "t"
<< kLastOpTime.getTerm())
- << "electionId" << 3)),
+ << "electionId"
+ << 3)),
ErrorCodes::TypeMismatch);
}
{
@@ -101,7 +103,9 @@ TEST(ShardingMetadata, ReadFromInvalidMetadata) {
checkParseFails(
BSON("$gleStats" << BSON("lastOpTime" << BSON("ts" << kLastOpTime.getTimestamp() << "t"
<< kLastOpTime.getTerm())
- << "electionId" << kElectionId << "extra"
+ << "electionId"
+ << kElectionId
+ << "extra"
<< "this should not be here")),
ErrorCodes::InvalidOptions);
}
@@ -131,14 +135,16 @@ TEST(ShardingMetadata, UpconvertValidMetadata) {
checkUpconvert(
BSON("ok" << 1 << "$gleStats"
<< BSON("lastOpTime" << BSON("ts" << kLastOpTime.getTimestamp() << "t"
- << kLastOpTime.getTerm()) << "electionId"
+ << kLastOpTime.getTerm())
+ << "electionId"
<< kElectionId)),
BSON("ok" << 1),
BSON("$gleStats" << BSON("lastOpTime" << BSON("ts" << kLastOpTime.getTimestamp() << "t"
<< kLastOpTime.getTerm())
- << "electionId" << kElectionId)));
+ << "electionId"
+ << kElectionId)));
}
{
checkUpconvert(
@@ -146,8 +152,10 @@ TEST(ShardingMetadata, UpconvertValidMetadata) {
<< "some other stuff"
<< "$gleStats"
<< BSON("lastOpTime" << BSON("ts" << kLastOpTime.getTimestamp() << "t"
- << kLastOpTime.getTerm()) << "electionId"
- << kElectionId) << "morestuff"
+ << kLastOpTime.getTerm())
+ << "electionId"
+ << kElectionId)
+ << "morestuff"
<< "more other stuff"),
BSON("ok" << 1 << "somestuff"
@@ -157,7 +165,8 @@ TEST(ShardingMetadata, UpconvertValidMetadata) {
BSON("$gleStats" << BSON("lastOpTime" << BSON("ts" << kLastOpTime.getTimestamp() << "t"
<< kLastOpTime.getTerm())
- << "electionId" << kElectionId)));
+ << "electionId"
+ << kElectionId)));
}
}
@@ -181,13 +190,15 @@ TEST(ShardingMetadata, UpconvertInvalidMetadata) {
ErrorCodes::TypeMismatch);
}
{
- checkUpconvertFails(BSON("ok"
- << 1 << "$gleStats"
- << BSON("lastOpTime" << BSON("ts" << kLastOpTime.getTimestamp()
- << "t" << kLastOpTime.getTerm())
- << "electionId" << kElectionId << "krandom"
- << "shouldnotbehere")),
- ErrorCodes::InvalidOptions);
+ checkUpconvertFails(
+ BSON("ok" << 1 << "$gleStats"
+ << BSON("lastOpTime" << BSON("ts" << kLastOpTime.getTimestamp() << "t"
+ << kLastOpTime.getTerm())
+ << "electionId"
+ << kElectionId
+ << "krandom"
+ << "shouldnotbehere")),
+ ErrorCodes::InvalidOptions);
}
}
@@ -205,9 +216,11 @@ TEST(ShardingMetadata, Downconvert) {
BSON("ok" << 1),
BSON("$gleStats" << BSON("lastOpTime" << BSON("ts" << kLastOpTime.getTimestamp() << "t"
<< kLastOpTime.getTerm())
- << "electionId" << kElectionId)),
- BSON("ok" << 1 << "$gleStats" << BSON("lastOpTime" << kLastOpTime.getTimestamp()
- << "electionId" << kElectionId)));
+ << "electionId"
+ << kElectionId)),
+ BSON("ok" << 1 << "$gleStats"
+ << BSON("lastOpTime" << kLastOpTime.getTimestamp() << "electionId"
+ << kElectionId)));
}
{ checkDownconvert(BSON("ok" << 1), BSONObj(), BSON("ok" << 1)); }
}
diff --git a/src/mongo/rpc/object_check.h b/src/mongo/rpc/object_check.h
index 8d58baa203b..8c8be115136 100644
--- a/src/mongo/rpc/object_check.h
+++ b/src/mongo/rpc/object_check.h
@@ -29,8 +29,8 @@
#pragma once
#include "mongo/base/data_type_validated.h"
-#include "mongo/bson/bsontypes.h"
#include "mongo/bson/bson_validate.h"
+#include "mongo/bson/bsontypes.h"
#include "mongo/db/server_options.h"
// We do not use the rpc namespace here so we can specialize Validator.
diff --git a/src/mongo/rpc/object_check_test.cpp b/src/mongo/rpc/object_check_test.cpp
index 03adedd0983..00092ec009d 100644
--- a/src/mongo/rpc/object_check_test.cpp
+++ b/src/mongo/rpc/object_check_test.cpp
@@ -53,8 +53,9 @@ TEST(DataTypeValidated, BSONValidationEnabled) {
BSONObj valid = BSON("baz"
<< "bar"
- << "garply" << BSON("foo"
- << "bar"));
+ << "garply"
+ << BSON("foo"
+ << "bar"));
char buf[1024] = {0};
std::copy(valid.objdata(), valid.objdata() + valid.objsize(), begin(buf));
{
diff --git a/src/mongo/rpc/protocol.cpp b/src/mongo/rpc/protocol.cpp
index 6ad7d5859a4..b7e2c8f098c 100644
--- a/src/mongo/rpc/protocol.cpp
+++ b/src/mongo/rpc/protocol.cpp
@@ -74,9 +74,9 @@ StatusWith<Protocol> negotiate(ProtocolSet fst, ProtocolSet snd) {
ProtocolSet common = fst & snd;
- auto it = std::find_if(begin(kPreferredProtos),
- end(kPreferredProtos),
- [common](Protocol p) { return common & static_cast<ProtocolSet>(p); });
+ auto it = std::find_if(begin(kPreferredProtos), end(kPreferredProtos), [common](Protocol p) {
+ return common & static_cast<ProtocolSet>(p);
+ });
if (it == end(kPreferredProtos)) {
return Status(ErrorCodes::RPCProtocolNegotiationFailed, "No common protocol found.");
diff --git a/src/mongo/rpc/protocol_test.cpp b/src/mongo/rpc/protocol_test.cpp
index ad8437ee280..150c97583e7 100644
--- a/src/mongo/rpc/protocol_test.cpp
+++ b/src/mongo/rpc/protocol_test.cpp
@@ -81,15 +81,16 @@ TEST(Protocol, parseProtocolSetFromIsMasterReply) {
// MongoDB 3.2 (mongos)
auto mongos32 =
BSON("maxWireVersion" << static_cast<int>(WireVersion::FIND_COMMAND) << "minWireVersion"
- << static_cast<int>(WireVersion::RELEASE_2_4_AND_BEFORE) << "msg"
+ << static_cast<int>(WireVersion::RELEASE_2_4_AND_BEFORE)
+ << "msg"
<< "isdbgrid");
ASSERT_EQ(assertGet(parseProtocolSetFromIsMasterReply(mongos32)), supports::kOpQueryOnly);
}
{
// MongoDB 3.0 (mongod)
- auto mongod30 = BSON("maxWireVersion"
- << static_cast<int>(WireVersion::RELEASE_2_7_7) << "minWireVersion"
+ auto mongod30 = BSON(
+ "maxWireVersion" << static_cast<int>(WireVersion::RELEASE_2_7_7) << "minWireVersion"
<< static_cast<int>(WireVersion::RELEASE_2_4_AND_BEFORE));
ASSERT_EQ(assertGet(parseProtocolSetFromIsMasterReply(mongod30)), supports::kOpQueryOnly);
}
diff --git a/src/mongo/rpc/reply_builder_test.cpp b/src/mongo/rpc/reply_builder_test.cpp
index ce37dc36141..dddfbd535a1 100644
--- a/src/mongo/rpc/reply_builder_test.cpp
+++ b/src/mongo/rpc/reply_builder_test.cpp
@@ -32,12 +32,12 @@
#include "mongo/db/jsobj.h"
#include "mongo/db/json.h"
#include "mongo/rpc/command_reply.h"
-#include "mongo/rpc/legacy_reply.h"
#include "mongo/rpc/command_reply_builder.h"
-#include "mongo/rpc/legacy_reply_builder.h"
#include "mongo/rpc/document_range.h"
-#include "mongo/unittest/unittest.h"
+#include "mongo/rpc/legacy_reply.h"
+#include "mongo/rpc/legacy_reply_builder.h"
#include "mongo/unittest/death_test.h"
+#include "mongo/unittest/unittest.h"
namespace {
diff --git a/src/mongo/s/balancer/balancer.cpp b/src/mongo/s/balancer/balancer.cpp
index c08ab24b900..0b8a93f8fe9 100644
--- a/src/mongo/s/balancer/balancer.cpp
+++ b/src/mongo/s/balancer/balancer.cpp
@@ -366,11 +366,11 @@ void Balancer::_mainThread() {
uassert(13258, "oids broken after resetting!", _checkOIDs(txn.get()));
{
- auto scopedDistLock = shardingContext->catalogManager(txn.get())
- ->distLock(txn.get(),
- "balancer",
- "doing balance round",
- DistLockManager::kSingleLockAttemptTimeout);
+ auto scopedDistLock = shardingContext->catalogManager(txn.get())->distLock(
+ txn.get(),
+ "balancer",
+ "doing balance round",
+ DistLockManager::kSingleLockAttemptTimeout);
if (!scopedDistLock.isOK()) {
LOG(1) << "skipping balancing round" << causedBy(scopedDistLock.getStatus());
@@ -412,8 +412,8 @@ void Balancer::_mainThread() {
roundDetails.setSucceeded(static_cast<int>(candidateChunks.size()),
_balancedLastTime);
- shardingContext->catalogManager(txn.get())
- ->logAction(txn.get(), "balancer.round", "", roundDetails.toBSON());
+ shardingContext->catalogManager(txn.get())->logAction(
+ txn.get(), "balancer.round", "", roundDetails.toBSON());
}
LOG(1) << "*** End of balancing round";
@@ -432,8 +432,8 @@ void Balancer::_mainThread() {
// This round failed, tell the world!
roundDetails.setFailed(e.what());
- shardingContext->catalogManager(txn.get())
- ->logAction(txn.get(), "balancer.round", "", roundDetails.toBSON());
+ shardingContext->catalogManager(txn.get())->logAction(
+ txn.get(), "balancer.round", "", roundDetails.toBSON());
// Sleep a fair amount before retrying because of the error
sleepFor(balanceRoundInterval);
@@ -464,7 +464,8 @@ bool Balancer::_init(OperationContext* txn) {
return true;
} catch (const std::exception& e) {
warning() << "could not initialize balancer, please check that all shards and config "
- "servers are up: " << e.what();
+ "servers are up: "
+ << e.what();
return false;
}
}
diff --git a/src/mongo/s/balancer/balancer_chunk_selection_policy_impl.cpp b/src/mongo/s/balancer/balancer_chunk_selection_policy_impl.cpp
index a685dee7317..48e596a871d 100644
--- a/src/mongo/s/balancer/balancer_chunk_selection_policy_impl.cpp
+++ b/src/mongo/s/balancer/balancer_chunk_selection_policy_impl.cpp
@@ -329,7 +329,8 @@ StatusWith<MigrateInfoVector> BalancerChunkSelectionPolicyImpl::_getMigrateCandi
// some progress balancing.
return {ErrorCodes::IllegalOperation,
str::stream()
- << "Tag boundaries " << tagInfo.toString()
+ << "Tag boundaries "
+ << tagInfo.toString()
<< " fall in the middle of an existing chunk. Balancing for collection "
<< nss.ns()
<< " will be postponed until the chunk is split appropriately."};
diff --git a/src/mongo/s/balancer/balancer_configuration.h b/src/mongo/s/balancer/balancer_configuration.h
index 0cc2cb48cd4..62ac468fbdd 100644
--- a/src/mongo/s/balancer/balancer_configuration.h
+++ b/src/mongo/s/balancer/balancer_configuration.h
@@ -33,9 +33,9 @@
#include <cstdint>
#include "mongo/base/disallow_copying.h"
+#include "mongo/platform/atomic_word.h"
#include "mongo/s/migration_secondary_throttle_options.h"
#include "mongo/stdx/mutex.h"
-#include "mongo/platform/atomic_word.h"
namespace mongo {
diff --git a/src/mongo/s/balancer/balancer_configuration_test.cpp b/src/mongo/s/balancer/balancer_configuration_test.cpp
index ffe95dde645..e31e76499c8 100644
--- a/src/mongo/s/balancer/balancer_configuration_test.cpp
+++ b/src/mongo/s/balancer/balancer_configuration_test.cpp
@@ -194,41 +194,44 @@ TEST(BalancerSettingsType, BalancingWindowStopLessThanStart) {
}
TEST(BalancerSettingsType, InvalidBalancingWindowStartEqualsStop) {
- ASSERT_NOT_OK(
- BalancerSettingsType::fromBSON(BSON("activeWindow" << BSON("start"
- << "00:00"
- << "stop"
- << "00:00"))).getStatus());
+ ASSERT_NOT_OK(BalancerSettingsType::fromBSON(BSON("activeWindow" << BSON("start"
+ << "00:00"
+ << "stop"
+ << "00:00")))
+ .getStatus());
}
TEST(BalancerSettingsType, InvalidBalancingWindowTimeFormat) {
ASSERT_NOT_OK(BalancerSettingsType::fromBSON(BSON("activeWindow" << BSON("start"
<< "23"
<< "stop"
- << "6"))).getStatus());
-
- ASSERT_NOT_OK(BalancerSettingsType::fromBSON(
- BSON("activeWindow" << BSON("start" << 23LL << "stop"
- << "6:00"))).getStatus());
+ << "6")))
+ .getStatus());
ASSERT_NOT_OK(
- BalancerSettingsType::fromBSON(BSON("activeWindow" << BSON("start"
- << "23:00"
- << "stop" << 6LL))).getStatus());
+ BalancerSettingsType::fromBSON(BSON("activeWindow" << BSON("start" << 23LL << "stop"
+ << "6:00")))
+ .getStatus());
+
+ ASSERT_NOT_OK(BalancerSettingsType::fromBSON(BSON("activeWindow" << BSON("start"
+ << "23:00"
+ << "stop"
+ << 6LL)))
+ .getStatus());
}
TEST(BalancerSettingsType, InvalidBalancingWindowFormat) {
- ASSERT_NOT_OK(
- BalancerSettingsType::fromBSON(BSON("activeWindow" << BSON("begin"
- << "23:00"
- << "stop"
- << "6:00"))).getStatus());
+ ASSERT_NOT_OK(BalancerSettingsType::fromBSON(BSON("activeWindow" << BSON("begin"
+ << "23:00"
+ << "stop"
+ << "6:00")))
+ .getStatus());
- ASSERT_NOT_OK(
- BalancerSettingsType::fromBSON(BSON("activeWindow" << BSON("start"
- << "23:00"
- << "end"
- << "6:00"))).getStatus());
+ ASSERT_NOT_OK(BalancerSettingsType::fromBSON(BSON("activeWindow" << BSON("start"
+ << "23:00"
+ << "end"
+ << "6:00")))
+ .getStatus());
}
TEST(ChunkSizeSettingsType, NormalValues) {
@@ -255,7 +258,8 @@ TEST(ChunkSizeSettingsType, IllegalValues) {
ASSERT_NOT_OK(ChunkSizeSettingsType::fromBSON(BSON("value" << -1)).getStatus());
ASSERT_NOT_OK(ChunkSizeSettingsType::fromBSON(BSON("value" << 1025)).getStatus());
ASSERT_NOT_OK(ChunkSizeSettingsType::fromBSON(BSON("value"
- << "WrongType")).getStatus());
+ << "WrongType"))
+ .getStatus());
ASSERT_NOT_OK(ChunkSizeSettingsType::fromBSON(BSON("IllegalKey" << 1)).getStatus());
}
diff --git a/src/mongo/s/catalog/dist_lock_catalog_mock.cpp b/src/mongo/s/catalog/dist_lock_catalog_mock.cpp
index 4f19df96a47..db7d3151d6e 100644
--- a/src/mongo/s/catalog/dist_lock_catalog_mock.cpp
+++ b/src/mongo/s/catalog/dist_lock_catalog_mock.cpp
@@ -52,8 +52,14 @@ void noGrabLockFuncSet(StringData lockID,
Date_t time,
StringData why) {
FAIL(str::stream() << "grabLock not expected to be called. "
- << "lockID: " << lockID << ", who: " << who << ", processId: " << processId
- << ", why: " << why);
+ << "lockID: "
+ << lockID
+ << ", who: "
+ << who
+ << ", processId: "
+ << processId
+ << ", why: "
+ << why);
}
void noOvertakeLockFuncSet(StringData lockID,
@@ -64,13 +70,22 @@ void noOvertakeLockFuncSet(StringData lockID,
Date_t time,
StringData why) {
FAIL(str::stream() << "overtakeLock not expected to be called. "
- << "lockID: " << lockID << ", currentHolderTS: " << currentHolderTS
- << ", who: " << who << ", processId: " << processId << ", why: " << why);
+ << "lockID: "
+ << lockID
+ << ", currentHolderTS: "
+ << currentHolderTS
+ << ", who: "
+ << who
+ << ", processId: "
+ << processId
+ << ", why: "
+ << why);
}
void noUnLockFuncSet(const OID& lockSessionID) {
FAIL(str::stream() << "unlock not expected to be called. "
- << "lockSessionID: " << lockSessionID);
+ << "lockSessionID: "
+ << lockSessionID);
}
void noPingFuncSet(StringData processID, Date_t ping) {
@@ -79,22 +94,26 @@ void noPingFuncSet(StringData processID, Date_t ping) {
void noStopPingFuncSet(StringData processID) {
FAIL(str::stream() << "stopPing not expected to be called. "
- << "processID: " << processID);
+ << "processID: "
+ << processID);
}
void noGetLockByTSSet(const OID& lockSessionID) {
FAIL(str::stream() << "getLockByTS not expected to be called. "
- << "lockSessionID: " << lockSessionID);
+ << "lockSessionID: "
+ << lockSessionID);
}
void noGetLockByNameSet(StringData name) {
FAIL(str::stream() << "getLockByName not expected to be called. "
- << "lockName: " << name);
+ << "lockName: "
+ << name);
}
void noGetPingSet(StringData processId) {
FAIL(str::stream() << "getPing not expected to be called. "
- << "lockName: " << processId);
+ << "lockName: "
+ << processId);
}
void noGetServerInfoSet() {
diff --git a/src/mongo/s/catalog/dist_lock_manager_mock.cpp b/src/mongo/s/catalog/dist_lock_manager_mock.cpp
index 7c2c58e83ed..9325caf29d7 100644
--- a/src/mongo/s/catalog/dist_lock_manager_mock.cpp
+++ b/src/mongo/s/catalog/dist_lock_manager_mock.cpp
@@ -34,9 +34,9 @@
#include <algorithm>
+#include "mongo/unittest/unittest.h"
#include "mongo/util/mongoutils/str.h"
#include "mongo/util/time_support.h"
-#include "mongo/unittest/unittest.h"
namespace mongo {
@@ -47,8 +47,14 @@ void NoLockFuncSet(StringData name,
Milliseconds waitFor,
Milliseconds lockTryInterval) {
FAIL(str::stream() << "Lock not expected to be called. "
- << "Name: " << name << ", whyMessage: " << whyMessage
- << ", waitFor: " << waitFor << ", lockTryInterval: " << lockTryInterval);
+ << "Name: "
+ << name
+ << ", whyMessage: "
+ << whyMessage
+ << ", waitFor: "
+ << waitFor
+ << ", lockTryInterval: "
+ << lockTryInterval);
}
} // namespace
@@ -90,9 +96,9 @@ StatusWith<DistLockManager::ScopedDistLock> DistLockManagerMock::lockWithSession
return _lockReturnStatus;
}
- if (_locks.end() != std::find_if(_locks.begin(),
- _locks.end(),
- [name](LockInfo info) -> bool { return info.name == name; })) {
+ if (_locks.end() != std::find_if(_locks.begin(), _locks.end(), [name](LockInfo info) -> bool {
+ return info.name == name;
+ })) {
return Status(ErrorCodes::LockBusy,
str::stream() << "Lock \"" << name << "\" is already taken");
}
@@ -111,9 +117,9 @@ void DistLockManagerMock::unlockAll(OperationContext* txn, const std::string& pr
void DistLockManagerMock::unlock(OperationContext* txn, const DistLockHandle& lockHandle) {
std::vector<LockInfo>::iterator it =
- std::find_if(_locks.begin(),
- _locks.end(),
- [&lockHandle](LockInfo info) -> bool { return info.lockID == lockHandle; });
+ std::find_if(_locks.begin(), _locks.end(), [&lockHandle](LockInfo info) -> bool {
+ return info.lockID == lockHandle;
+ });
if (it == _locks.end()) {
return;
}
diff --git a/src/mongo/s/catalog/replset/catalog_manager_replica_set.cpp b/src/mongo/s/catalog/replset/catalog_manager_replica_set.cpp
index f5e1522eed6..2db57413984 100644
--- a/src/mongo/s/catalog/replset/catalog_manager_replica_set.cpp
+++ b/src/mongo/s/catalog/replset/catalog_manager_replica_set.cpp
@@ -56,16 +56,16 @@
#include "mongo/s/catalog/config_server_version.h"
#include "mongo/s/catalog/dist_lock_manager.h"
#include "mongo/s/catalog/type_changelog.h"
+#include "mongo/s/catalog/type_chunk.h"
#include "mongo/s/catalog/type_collection.h"
#include "mongo/s/catalog/type_config_version.h"
-#include "mongo/s/catalog/type_chunk.h"
#include "mongo/s/catalog/type_database.h"
#include "mongo/s/catalog/type_shard.h"
#include "mongo/s/catalog/type_tags.h"
+#include "mongo/s/chunk_manager.h"
#include "mongo/s/client/shard.h"
#include "mongo/s/client/shard_connection.h"
#include "mongo/s/client/shard_registry.h"
-#include "mongo/s/chunk_manager.h"
#include "mongo/s/config.h"
#include "mongo/s/grid.h"
#include "mongo/s/set_shard_version_request.h"
@@ -188,7 +188,9 @@ StatusWith<ShardType> CatalogManagerReplicaSet::_validateHostAsShard(
return {ErrorCodes::OperationFailed,
str::stream() << "'" << hostAndPort.toString() << "' "
<< "is already a member of the existing shard '"
- << shard->getConnString().toString() << "' (" << shard->getId()
+ << shard->getConnString().toString()
+ << "' ("
+ << shard->getId()
<< ")."};
}
}
@@ -231,7 +233,8 @@ StatusWith<ShardType> CatalogManagerReplicaSet::_validateHostAsShard(
str::stream() << "isMaster returned invalid 'ismaster' "
<< "field when attempting to add "
<< connectionString.toString()
- << " as a shard: " << status.reason());
+ << " as a shard: "
+ << status.reason());
}
if (!isMaster) {
return {ErrorCodes::NotMaster,
@@ -255,7 +258,8 @@ StatusWith<ShardType> CatalogManagerReplicaSet::_validateHostAsShard(
if (!providedSetName.empty() && foundSetName.empty()) {
return {ErrorCodes::OperationFailed,
str::stream() << "host did not return a set name; "
- << "is the replica set still initializing? " << resIsMaster};
+ << "is the replica set still initializing? "
+ << resIsMaster};
}
// Make sure the set name specified in the connection string matches the one where its hosts
@@ -263,7 +267,8 @@ StatusWith<ShardType> CatalogManagerReplicaSet::_validateHostAsShard(
if (!providedSetName.empty() && (providedSetName != foundSetName)) {
return {ErrorCodes::OperationFailed,
str::stream() << "the provided connection string (" << connectionString.toString()
- << ") does not match the actual set name " << foundSetName};
+ << ") does not match the actual set name "
+ << foundSetName};
}
// Is it a config server?
@@ -304,8 +309,11 @@ StatusWith<ShardType> CatalogManagerReplicaSet::_validateHostAsShard(
if (hostSet.find(host) == hostSet.end()) {
return {ErrorCodes::OperationFailed,
str::stream() << "in seed list " << connectionString.toString() << ", host "
- << host << " does not belong to replica set " << foundSetName
- << "; found " << resIsMaster.toString()};
+ << host
+ << " does not belong to replica set "
+ << foundSetName
+ << "; found "
+ << resIsMaster.toString()};
}
}
}
@@ -417,8 +425,7 @@ StatusWith<BSONObj> CatalogManagerReplicaSet::_runCommandForAddShard(
Status(ErrorCodes::InternalError, "Internal error running command");
auto callStatus = _executorForAddShard->scheduleRemoteCommand(
- request,
- [&responseStatus](const executor::TaskExecutor::RemoteCommandCallbackArgs& args) {
+ request, [&responseStatus](const executor::TaskExecutor::RemoteCommandCallbackArgs& args) {
responseStatus = args.response;
});
if (!callStatus.isOK()) {
@@ -465,9 +472,13 @@ StatusWith<string> CatalogManagerReplicaSet::addShard(OperationContext* txn,
const auto& dbDoc = dbt.getValue().value;
return Status(ErrorCodes::OperationFailed,
str::stream() << "can't add shard "
- << "'" << shardConnectionString.toString() << "'"
- << " because a local database '" << dbName
- << "' exists in another " << dbDoc.getPrimary());
+ << "'"
+ << shardConnectionString.toString()
+ << "'"
+ << " because a local database '"
+ << dbName
+ << "' exists in another "
+ << dbDoc.getPrimary());
} else if (dbt != ErrorCodes::NamespaceNotFound) {
return dbt.getStatus();
}
@@ -807,7 +818,8 @@ Status CatalogManagerReplicaSet::shardCollection(OperationContext* txn,
if (countStatus.getValue() > 0) {
return Status(ErrorCodes::AlreadyInitialized,
str::stream() << "collection " << ns << " already sharded with "
- << countStatus.getValue() << " chunks.");
+ << countStatus.getValue()
+ << " chunks.");
}
}
@@ -1094,7 +1106,9 @@ Status CatalogManagerReplicaSet::getCollections(OperationContext* txn,
collections->clear();
return {ErrorCodes::FailedToParse,
str::stream() << "error while parsing " << CollectionType::ConfigNS
- << " document: " << obj << " : "
+ << " document: "
+ << obj
+ << " : "
<< collectionResult.getStatus().toString()};
}
@@ -1334,7 +1348,8 @@ Status CatalogManagerReplicaSet::getChunks(OperationContext* txn,
return {ErrorCodes::FailedToParse,
stream() << "Failed to parse chunk with id ("
<< obj[ChunkType::name()].toString()
- << "): " << chunkRes.getStatus().toString()};
+ << "): "
+ << chunkRes.getStatus().toString()};
}
chunks->push_back(chunkRes.getValue());
@@ -1366,8 +1381,8 @@ Status CatalogManagerReplicaSet::getTagsForCollection(OperationContext* txn,
if (!tagRes.isOK()) {
tags->clear();
return Status(ErrorCodes::FailedToParse,
- str::stream()
- << "Failed to parse tag: " << tagRes.getStatus().toString());
+ str::stream() << "Failed to parse tag: "
+ << tagRes.getStatus().toString());
}
tags->push_back(tagRes.getValue());
@@ -1381,7 +1396,8 @@ StatusWith<string> CatalogManagerReplicaSet::getTagForChunk(OperationContext* tx
const ChunkType& chunk) {
BSONObj query =
BSON(TagsType::ns(collectionNs) << TagsType::min() << BSON("$lte" << chunk.getMin())
- << TagsType::max() << BSON("$gte" << chunk.getMax()));
+ << TagsType::max()
+ << BSON("$gte" << chunk.getMax()));
auto findStatus = _exhaustiveFindOnConfig(
txn, kConfigReadSelector, NamespaceString(TagsType::ConfigNS), query, BSONObj(), 1);
if (!findStatus.isOK()) {
@@ -1400,7 +1416,8 @@ StatusWith<string> CatalogManagerReplicaSet::getTagForChunk(OperationContext* tx
if (!tagsResult.isOK()) {
return {ErrorCodes::FailedToParse,
stream() << "error while parsing " << TagsType::ConfigNS << " document: " << tagsDoc
- << " : " << tagsResult.getStatus().toString()};
+ << " : "
+ << tagsResult.getStatus().toString()};
}
return tagsResult.getValue().getTag();
}
@@ -1424,7 +1441,8 @@ StatusWith<repl::OpTimeWith<std::vector<ShardType>>> CatalogManagerReplicaSet::g
shards.clear();
return {ErrorCodes::FailedToParse,
stream() << "Failed to parse shard with id ("
- << doc[ShardType::name()].toString() << ")"
+ << doc[ShardType::name()].toString()
+ << ")"
<< causedBy(shardRes.getStatus())};
}
@@ -1432,7 +1450,8 @@ StatusWith<repl::OpTimeWith<std::vector<ShardType>>> CatalogManagerReplicaSet::g
if (!validateStatus.isOK()) {
return {validateStatus.code(),
stream() << "Failed to validate shard with id ("
- << doc[ShardType::name()].toString() << ")"
+ << doc[ShardType::name()].toString()
+ << ")"
<< causedBy(validateStatus)};
}
@@ -1550,8 +1569,9 @@ Status CatalogManagerReplicaSet::applyChunkOpsDeprecated(OperationContext* txn,
const BSONArray& preCondition,
const std::string& nss,
const ChunkVersion& lastChunkVersion) {
- BSONObj cmd = BSON("applyOps" << updateOps << "preCondition" << preCondition
- << kWriteConcernField << kMajorityWriteConcern.toBSON());
+ BSONObj cmd =
+ BSON("applyOps" << updateOps << "preCondition" << preCondition << kWriteConcernField
+ << kMajorityWriteConcern.toBSON());
auto response = Grid::get(txn)->shardRegistry()->getConfigShard()->runCommand(
txn,
@@ -1631,7 +1651,8 @@ void CatalogManagerReplicaSet::writeConfigServerDirect(OperationContext* txn,
if (batchRequest.sizeWriteOps() != 1) {
toBatchError(Status(ErrorCodes::InvalidOptions,
str::stream() << "Writes to config servers must have batch size of 1, "
- << "found " << batchRequest.sizeWriteOps()),
+ << "found "
+ << batchRequest.sizeWriteOps()),
batchResponse);
return;
}
@@ -1846,7 +1867,10 @@ Status CatalogManagerReplicaSet::_checkDbDoesNotExist(OperationContext* txn,
return Status(ErrorCodes::DatabaseDifferCase,
str::stream() << "can't have 2 databases that just differ on case "
- << " have: " << actualDbName << " want to add: " << dbName);
+ << " have: "
+ << actualDbName
+ << " want to add: "
+ << dbName);
}
StatusWith<std::string> CatalogManagerReplicaSet::_generateNewShardName(OperationContext* txn) {
@@ -1997,7 +2021,8 @@ Status CatalogManagerReplicaSet::initConfigVersion(OperationContext* txn) {
if (versionInfo.getCurrentVersion() < CURRENT_CONFIG_VERSION) {
return {ErrorCodes::IncompatibleShardingConfigVersion,
str::stream() << "need to upgrade current cluster version to v"
- << CURRENT_CONFIG_VERSION << "; currently at v"
+ << CURRENT_CONFIG_VERSION
+ << "; currently at v"
<< versionInfo.getCurrentVersion()};
}
@@ -2006,7 +2031,8 @@ Status CatalogManagerReplicaSet::initConfigVersion(OperationContext* txn) {
return {ErrorCodes::IncompatibleShardingConfigVersion,
str::stream() << "unable to create new config version document after "
- << kMaxConfigVersionInitRetry << " retries"};
+ << kMaxConfigVersionInitRetry
+ << " retries"};
}
StatusWith<VersionType> CatalogManagerReplicaSet::_getConfigVersion(OperationContext* txn) {
diff --git a/src/mongo/s/catalog/replset/catalog_manager_replica_set_add_shard_test.cpp b/src/mongo/s/catalog/replset/catalog_manager_replica_set_add_shard_test.cpp
index 5b877f60f74..4bbdb9bcebd 100644
--- a/src/mongo/s/catalog/replset/catalog_manager_replica_set_add_shard_test.cpp
+++ b/src/mongo/s/catalog/replset/catalog_manager_replica_set_add_shard_test.cpp
@@ -234,13 +234,16 @@ TEST_F(AddShardTest, Standalone) {
expectListDatabases(shardTarget,
std::vector<BSONObj>{BSON("name"
<< "local"
- << "sizeOnDisk" << 1000),
+ << "sizeOnDisk"
+ << 1000),
BSON("name"
<< "TestDB1"
- << "sizeOnDisk" << 2000),
+ << "sizeOnDisk"
+ << 2000),
BSON("name"
<< "TestDB2"
- << "sizeOnDisk" << 5000)});
+ << "sizeOnDisk"
+ << 5000)});
// Make sure the shard add code checks for the presence of each of the two databases we returned
// in the previous call, in the config server metadata
@@ -309,13 +312,16 @@ TEST_F(AddShardTest, StandaloneGenerateName) {
expectListDatabases(shardTarget,
std::vector<BSONObj>{BSON("name"
<< "local"
- << "sizeOnDisk" << 1000),
+ << "sizeOnDisk"
+ << 1000),
BSON("name"
<< "TestDB1"
- << "sizeOnDisk" << 2000),
+ << "sizeOnDisk"
+ << 2000),
BSON("name"
<< "TestDB2"
- << "sizeOnDisk" << 5000)});
+ << "sizeOnDisk"
+ << 5000)});
// Make sure the shard add code checks for the presence of each of the two databases we returned
// in the previous call, in the config server metadata
@@ -629,7 +635,8 @@ TEST_F(AddShardTest, ShardIsCSRSConfigServer) {
BSONObj commandResponse = BSON("ok" << 1 << "ismaster" << true << "setName"
<< "config"
- << "configsvr" << true);
+ << "configsvr"
+ << true);
expectValidationCheck(shardTarget, commandResponse);
future.timed_get(kFutureTimeout);
@@ -660,7 +667,8 @@ TEST_F(AddShardTest, ReplicaSetMissingHostsProvidedInSeedList) {
hosts.append("host1:12345");
BSONObj commandResponse = BSON("ok" << 1 << "ismaster" << true << "setName"
<< "mySet"
- << "hosts" << hosts.arr());
+ << "hosts"
+ << hosts.arr());
expectValidationCheck(shardTarget, commandResponse);
future.timed_get(kFutureTimeout);
@@ -692,7 +700,8 @@ TEST_F(AddShardTest, ShardNameIsConfig) {
hosts.append("host2:12345");
BSONObj commandResponse = BSON("ok" << 1 << "ismaster" << true << "setName"
<< "mySet"
- << "hosts" << hosts.arr());
+ << "hosts"
+ << hosts.arr());
expectValidationCheck(shardTarget, commandResponse);
future.timed_get(kFutureTimeout);
@@ -724,7 +733,8 @@ TEST_F(AddShardTest, ShardContainsExistingDatabase) {
hosts.append("host2:12345");
BSONObj commandResponse = BSON("ok" << 1 << "ismaster" << true << "setName"
<< "mySet"
- << "hosts" << hosts.arr());
+ << "hosts"
+ << hosts.arr());
expectValidationCheck(shardTarget, commandResponse);
expectListDatabases(shardTarget,
@@ -764,7 +774,8 @@ TEST_F(AddShardTest, ReAddExistingShard) {
hosts.append("host2:12345");
BSONObj commandResponse = BSON("ok" << 1 << "ismaster" << true << "setName"
<< "mySet"
- << "hosts" << hosts.arr());
+ << "hosts"
+ << hosts.arr());
expectValidationCheck(shardTarget, commandResponse);
expectListDatabases(shardTarget,
@@ -828,7 +839,8 @@ TEST_F(AddShardTest, SuccessfullyAddReplicaSet) {
hosts.append("host2:12345");
BSONObj commandResponse = BSON("ok" << 1 << "ismaster" << true << "setName"
<< "mySet"
- << "hosts" << hosts.arr());
+ << "hosts"
+ << hosts.arr());
expectValidationCheck(shardTarget, commandResponse);
expectListDatabases(shardTarget,
@@ -884,7 +896,8 @@ TEST_F(AddShardTest, AddShardSucceedsEvenIfAddingDBsFromNewShardFails) {
hosts.append("host2:12345");
BSONObj commandResponse = BSON("ok" << 1 << "ismaster" << true << "setName"
<< "mySet"
- << "hosts" << hosts.arr());
+ << "hosts"
+ << hosts.arr());
expectValidationCheck(shardTarget, commandResponse);
expectListDatabases(shardTarget,
@@ -964,7 +977,8 @@ TEST_F(AddShardTest, ReplicaSetExtraHostsDiscovered) {
hosts.append("host2:12345");
BSONObj commandResponse = BSON("ok" << 1 << "ismaster" << true << "setName"
<< "mySet"
- << "hosts" << hosts.arr());
+ << "hosts"
+ << hosts.arr());
expectValidationCheck(shardTarget, commandResponse);
expectListDatabases(shardTarget, {});
diff --git a/src/mongo/s/catalog/replset/catalog_manager_replica_set_shard_collection_test.cpp b/src/mongo/s/catalog/replset/catalog_manager_replica_set_shard_collection_test.cpp
index 7e4fdb8330c..901f12798ea 100644
--- a/src/mongo/s/catalog/replset/catalog_manager_replica_set_shard_collection_test.cpp
+++ b/src/mongo/s/catalog/replset/catalog_manager_replica_set_shard_collection_test.cpp
@@ -340,8 +340,11 @@ TEST_F(ShardCollectionTest, noInitialChunksOrData) {
{
BSONObj logChangeDetail =
BSON("shardKey" << keyPattern.toBSON() << "collection" << ns << "primary"
- << shard.getName() + ":" + shard.getHost() << "initShards"
- << BSONArray() << "numChunks" << 1);
+ << shard.getName() + ":" + shard.getHost()
+ << "initShards"
+ << BSONArray()
+ << "numChunks"
+ << 1);
expectChangeLogCreate(configHost, BSON("ok" << 1));
expectChangeLogInsert(
configHost, network()->now(), "shardCollection.start", ns, logChangeDetail);
@@ -511,9 +514,11 @@ TEST_F(ShardCollectionTest, withInitialChunks) {
{
BSONObj logChangeDetail =
BSON("shardKey" << keyPattern.toBSON() << "collection" << ns << "primary"
- << shard0.getName() + ":" + shard0.getHost() << "initShards"
+ << shard0.getName() + ":" + shard0.getHost()
+ << "initShards"
<< BSON_ARRAY(shard0.getName() << shard1.getName() << shard2.getName())
- << "numChunks" << (int)expectedChunks.size());
+ << "numChunks"
+ << (int)expectedChunks.size());
expectChangeLogCreate(configHost, BSON("ok" << 1));
expectChangeLogInsert(
configHost, network()->now(), "shardCollection.start", ns, logChangeDetail);
@@ -655,8 +660,11 @@ TEST_F(ShardCollectionTest, withInitialData) {
{
BSONObj logChangeDetail =
BSON("shardKey" << keyPattern.toBSON() << "collection" << ns << "primary"
- << shard.getName() + ":" + shard.getHost() << "initShards"
- << BSONArray() << "numChunks" << 1);
+ << shard.getName() + ":" + shard.getHost()
+ << "initShards"
+ << BSONArray()
+ << "numChunks"
+ << 1);
expectChangeLogCreate(configHost, BSON("ok" << 1));
expectChangeLogInsert(
configHost, network()->now(), "shardCollection.start", ns, logChangeDetail);
diff --git a/src/mongo/s/catalog/replset/catalog_manager_replica_set_test.cpp b/src/mongo/s/catalog/replset/catalog_manager_replica_set_test.cpp
index c539ee6ad5d..574a309c63b 100644
--- a/src/mongo/s/catalog/replset/catalog_manager_replica_set_test.cpp
+++ b/src/mongo/s/catalog/replset/catalog_manager_replica_set_test.cpp
@@ -32,8 +32,8 @@
#include <pcrecpp.h>
-#include "mongo/client/remote_command_targeter_mock.h"
#include "mongo/bson/json.h"
+#include "mongo/client/remote_command_targeter_mock.h"
#include "mongo/db/commands.h"
#include "mongo/db/query/lite_parsed_query.h"
#include "mongo/db/repl/read_concern_args.h"
@@ -658,9 +658,13 @@ TEST_F(CatalogManagerReplSetTest, RunUserManagementWriteCommandSuccess) {
// Since no write concern was sent we will add w:majority
ASSERT_EQUALS(BSON("dropUser"
<< "test"
- << "writeConcern" << BSON("w"
- << "majority"
- << "wtimeout" << 0) << "maxTimeMS" << 30000),
+ << "writeConcern"
+ << BSON("w"
+ << "majority"
+ << "wtimeout"
+ << 0)
+ << "maxTimeMS"
+ << 30000),
request.cmdObj);
ASSERT_EQUALS(BSON(rpc::kReplSetMetadataFieldName << 1), request.metadata);
@@ -679,14 +683,14 @@ TEST_F(CatalogManagerReplSetTest, RunUserManagementWriteCommandInvalidWriteConce
configTargeter()->setFindHostReturnValue(HostAndPort("TestHost1"));
BSONObjBuilder responseBuilder;
- bool ok =
- catalogManager()->runUserManagementWriteCommand(operationContext(),
- "dropUser",
- "test",
- BSON("dropUser"
- << "test"
- << "writeConcern" << BSON("w" << 2)),
- &responseBuilder);
+ bool ok = catalogManager()->runUserManagementWriteCommand(operationContext(),
+ "dropUser",
+ "test",
+ BSON("dropUser"
+ << "test"
+ << "writeConcern"
+ << BSON("w" << 2)),
+ &responseBuilder);
ASSERT_FALSE(ok);
Status commandStatus = getStatusFromCommandResult(responseBuilder.obj());
@@ -708,31 +712,35 @@ TEST_F(CatalogManagerReplSetTest, RunUserManagementWriteCommandRewriteWriteConce
},
Status::OK());
- auto future =
- launchAsync([this] {
- BSONObjBuilder responseBuilder;
- bool ok =
- catalogManager()->runUserManagementWriteCommand(
- operationContext(),
- "dropUser",
- "test",
- BSON("dropUser"
- << "test"
- << "writeConcern" << BSON("w" << 1 << "wtimeout" << 30)),
- &responseBuilder);
- ASSERT_FALSE(ok);
-
- Status commandStatus = getStatusFromCommandResult(responseBuilder.obj());
- ASSERT_EQUALS(ErrorCodes::UserNotFound, commandStatus);
- });
+ auto future = launchAsync([this] {
+ BSONObjBuilder responseBuilder;
+ bool ok =
+ catalogManager()->runUserManagementWriteCommand(operationContext(),
+ "dropUser",
+ "test",
+ BSON("dropUser"
+ << "test"
+ << "writeConcern"
+ << BSON("w" << 1 << "wtimeout"
+ << 30)),
+ &responseBuilder);
+ ASSERT_FALSE(ok);
+
+ Status commandStatus = getStatusFromCommandResult(responseBuilder.obj());
+ ASSERT_EQUALS(ErrorCodes::UserNotFound, commandStatus);
+ });
onCommand([](const RemoteCommandRequest& request) {
ASSERT_EQUALS("test", request.dbname);
ASSERT_EQUALS(BSON("dropUser"
<< "test"
- << "writeConcern" << BSON("w"
- << "majority"
- << "wtimeout" << 30) << "maxTimeMS" << 30000),
+ << "writeConcern"
+ << BSON("w"
+ << "majority"
+ << "wtimeout"
+ << 30)
+ << "maxTimeMS"
+ << 30000),
request.cmdObj);
ASSERT_EQUALS(BSON(rpc::kReplSetMetadataFieldName << 1), request.metadata);
@@ -815,9 +823,13 @@ TEST_F(CatalogManagerReplSetTest, RunUserManagementWriteCommandNotMasterRetrySuc
// Since no write concern was sent we will add w:majority
ASSERT_EQUALS(BSON("dropUser"
<< "test"
- << "writeConcern" << BSON("w"
- << "majority"
- << "wtimeout" << 0) << "maxTimeMS" << 30000),
+ << "writeConcern"
+ << BSON("w"
+ << "majority"
+ << "wtimeout"
+ << 0)
+ << "maxTimeMS"
+ << 30000),
request.cmdObj);
ASSERT_EQUALS(BSON(rpc::kReplSetMetadataFieldName << 1), request.metadata);
@@ -1193,9 +1205,10 @@ TEST_F(CatalogManagerReplSetTest, GetTagForChunkOneTagFound) {
ASSERT_EQ(query->ns(), TagsType::ConfigNS);
ASSERT_EQ(query->getFilter(),
- BSON(TagsType::ns(chunk.getNS())
- << TagsType::min() << BSON("$lte" << chunk.getMin()) << TagsType::max()
- << BSON("$gte" << chunk.getMax())));
+ BSON(TagsType::ns(chunk.getNS()) << TagsType::min()
+ << BSON("$lte" << chunk.getMin())
+ << TagsType::max()
+ << BSON("$gte" << chunk.getMax())));
checkReadConcern(request.cmdObj, Timestamp(0, 0), repl::OpTime::kUninitializedTerm);
@@ -1237,9 +1250,10 @@ TEST_F(CatalogManagerReplSetTest, GetTagForChunkNoTagFound) {
ASSERT_EQ(query->ns(), TagsType::ConfigNS);
ASSERT_EQ(query->getFilter(),
- BSON(TagsType::ns(chunk.getNS())
- << TagsType::min() << BSON("$lte" << chunk.getMin()) << TagsType::max()
- << BSON("$gte" << chunk.getMax())));
+ BSON(TagsType::ns(chunk.getNS()) << TagsType::min()
+ << BSON("$lte" << chunk.getMin())
+ << TagsType::max()
+ << BSON("$gte" << chunk.getMax())));
checkReadConcern(request.cmdObj, Timestamp(0, 0), repl::OpTime::kUninitializedTerm);
@@ -1278,9 +1292,10 @@ TEST_F(CatalogManagerReplSetTest, GetTagForChunkInvalidTagDoc) {
ASSERT_EQ(query->ns(), TagsType::ConfigNS);
ASSERT_EQ(query->getFilter(),
- BSON(TagsType::ns(chunk.getNS())
- << TagsType::min() << BSON("$lte" << chunk.getMin()) << TagsType::max()
- << BSON("$gte" << chunk.getMax())));
+ BSON(TagsType::ns(chunk.getNS()) << TagsType::min()
+ << BSON("$lte" << chunk.getMin())
+ << TagsType::max()
+ << BSON("$gte" << chunk.getMax())));
checkReadConcern(request.cmdObj, Timestamp(0, 0), repl::OpTime::kUninitializedTerm);
@@ -1388,7 +1403,8 @@ TEST_F(CatalogManagerReplSetTest, ApplyChunkOpsDeprecatedSuccessful) {
ASSERT_EQUALS("config", request.dbname);
ASSERT_EQUALS(BSON("w"
<< "majority"
- << "wtimeout" << 15000),
+ << "wtimeout"
+ << 15000),
request.cmdObj["writeConcern"].Obj());
ASSERT_EQUALS(BSON(rpc::kReplSetMetadataFieldName << 1), request.metadata);
ASSERT_EQUALS(updateOps, request.cmdObj["applyOps"].Obj());
@@ -1925,10 +1941,12 @@ TEST_F(CatalogManagerReplSetTest, EnableShardingNoDBExists) {
shardRegistry()->getShard(operationContext(), "shard0")->getTargeter());
shardTargeter->setFindHostReturnValue(HostAndPort("shard0:12"));
- distLock()->expectLock([](StringData name, StringData whyMessage, Milliseconds, Milliseconds) {
- ASSERT_EQ("test", name);
- ASSERT_FALSE(whyMessage.empty());
- }, Status::OK());
+ distLock()->expectLock(
+ [](StringData name, StringData whyMessage, Milliseconds, Milliseconds) {
+ ASSERT_EQ("test", name);
+ ASSERT_FALSE(whyMessage.empty());
+ },
+ Status::OK());
auto future = launchAsync([this] {
auto status = catalogManager()->enableSharding(operationContext(), "test");
diff --git a/src/mongo/s/catalog/replset/catalog_manager_replica_set_upgrade_test.cpp b/src/mongo/s/catalog/replset/catalog_manager_replica_set_upgrade_test.cpp
index c8e2a975a58..43946d897c2 100644
--- a/src/mongo/s/catalog/replset/catalog_manager_replica_set_upgrade_test.cpp
+++ b/src/mongo/s/catalog/replset/catalog_manager_replica_set_upgrade_test.cpp
@@ -76,8 +76,11 @@ TEST_F(CatalogManagerReplSetTestFixture, UpgradeNotNeeded) {
checkReadConcern(findCmd, Timestamp(0, 0), repl::OpTime::kUninitializedTerm);
BSONObj versionDoc(BSON("_id" << 1 << "minCompatibleVersion"
- << MIN_COMPATIBLE_CONFIG_VERSION << "currentVersion"
- << CURRENT_CONFIG_VERSION << "clusterId" << OID::gen()));
+ << MIN_COMPATIBLE_CONFIG_VERSION
+ << "currentVersion"
+ << CURRENT_CONFIG_VERSION
+ << "clusterId"
+ << OID::gen()));
return vector<BSONObj>{versionDoc};
});
diff --git a/src/mongo/s/catalog/replset/dist_lock_catalog_impl.cpp b/src/mongo/s/catalog/replset/dist_lock_catalog_impl.cpp
index cbdef7a2761..dd9a51c34bc 100644
--- a/src/mongo/s/catalog/replset/dist_lock_catalog_impl.cpp
+++ b/src/mongo/s/catalog/replset/dist_lock_catalog_impl.cpp
@@ -98,7 +98,8 @@ StatusWith<BSONObj> extractFindAndModifyNewObj(StatusWith<Shard::CommandResponse
return {ErrorCodes::UnsupportedFormat,
str::stream() << "expected an object from the findAndModify response '"
<< kFindAndModifyResponseResultDocField
- << "'field, got: " << newDocElem};
+ << "'field, got: "
+ << newDocElem};
}
return newDocElem.Obj().getOwned();
@@ -191,10 +192,14 @@ StatusWith<LocksType> DistLockCatalogImpl::grabLock(OperationContext* txn,
StringData processId,
Date_t time,
StringData why) {
- BSONObj newLockDetails(BSON(LocksType::lockID(lockSessionID)
- << LocksType::state(LocksType::LOCKED) << LocksType::who() << who
- << LocksType::process() << processId << LocksType::when(time)
- << LocksType::why() << why));
+ BSONObj newLockDetails(BSON(
+ LocksType::lockID(lockSessionID) << LocksType::state(LocksType::LOCKED) << LocksType::who()
+ << who
+ << LocksType::process()
+ << processId
+ << LocksType::when(time)
+ << LocksType::why()
+ << why));
auto request = FindAndModifyRequest::makeUpdate(
_locksNS,
@@ -246,10 +251,14 @@ StatusWith<LocksType> DistLockCatalogImpl::overtakeLock(OperationContext* txn,
BSON(LocksType::name() << lockID << LocksType::state(LocksType::UNLOCKED)));
orQueryBuilder.append(BSON(LocksType::name() << lockID << LocksType::lockID(currentHolderTS)));
- BSONObj newLockDetails(BSON(LocksType::lockID(lockSessionID)
- << LocksType::state(LocksType::LOCKED) << LocksType::who() << who
- << LocksType::process() << processId << LocksType::when(time)
- << LocksType::why() << why));
+ BSONObj newLockDetails(BSON(
+ LocksType::lockID(lockSessionID) << LocksType::state(LocksType::LOCKED) << LocksType::who()
+ << who
+ << LocksType::process()
+ << processId
+ << LocksType::when(time)
+ << LocksType::why()
+ << why));
auto request = FindAndModifyRequest::makeUpdate(
_locksNS, BSON("$or" << orQueryBuilder.arr()), BSON("$set" << newLockDetails));
@@ -343,7 +352,8 @@ Status DistLockCatalogImpl::unlockAll(OperationContext* txn, const std::string&
return Status(ErrorCodes::FailedToParse,
str::stream()
<< "Failed to parse config server response to batch request for "
- "unlocking existing distributed locks" << causedBy(errmsg));
+ "unlocking existing distributed locks"
+ << causedBy(errmsg));
}
return batchResponse.toStatus();
}
diff --git a/src/mongo/s/catalog/replset/dist_lock_catalog_impl_test.cpp b/src/mongo/s/catalog/replset/dist_lock_catalog_impl_test.cpp
index 35c9be42c9b..5d538ea3407 100644
--- a/src/mongo/s/catalog/replset/dist_lock_catalog_impl_test.cpp
+++ b/src/mongo/s/catalog/replset/dist_lock_catalog_impl_test.cpp
@@ -1036,14 +1036,17 @@ TEST_F(DistLockCatalogFixture, UnlockWriteConcernError) {
// The dist lock catalog calls into the ShardRegistry, which will retry 3 times for
// WriteConcernFailed errors
- onCommand([&](const RemoteCommandRequest& request)
- -> StatusWith<BSONObj> { return writeConcernFailedResponse; });
+ onCommand([&](const RemoteCommandRequest& request) -> StatusWith<BSONObj> {
+ return writeConcernFailedResponse;
+ });
- onCommand([&](const RemoteCommandRequest& request)
- -> StatusWith<BSONObj> { return writeConcernFailedResponse; });
+ onCommand([&](const RemoteCommandRequest& request) -> StatusWith<BSONObj> {
+ return writeConcernFailedResponse;
+ });
- onCommand([&](const RemoteCommandRequest& request)
- -> StatusWith<BSONObj> { return writeConcernFailedResponse; });
+ onCommand([&](const RemoteCommandRequest& request) -> StatusWith<BSONObj> {
+ return writeConcernFailedResponse;
+ });
future.timed_get(kFutureTimeout);
}
@@ -1090,30 +1093,31 @@ TEST_F(DistLockCatalogFixture, BasicUnlockAll) {
ASSERT_OK(status);
});
- onCommand([](const RemoteCommandRequest& request)
- -> StatusWith<BSONObj> {
- ASSERT_EQUALS(dummyHost, request.target);
- ASSERT_EQUALS("config", request.dbname);
-
- std::string errmsg;
- BatchedUpdateRequest batchRequest;
- ASSERT(batchRequest.parseBSON("config", request.cmdObj, &errmsg));
- ASSERT_EQUALS(LocksType::ConfigNS, batchRequest.getNS().toString());
- ASSERT_EQUALS(BSON("w"
- << "majority"
- << "wtimeout" << 15000),
- batchRequest.getWriteConcern());
- auto updates = batchRequest.getUpdates();
- ASSERT_EQUALS(1U, updates.size());
- auto update = updates.front();
- ASSERT_FALSE(update->getUpsert());
- ASSERT_TRUE(update->getMulti());
- ASSERT_EQUALS(BSON(LocksType::process("processID")), update->getQuery());
- ASSERT_EQUALS(BSON("$set" << BSON(LocksType::state(LocksType::UNLOCKED))),
- update->getUpdateExpr());
-
- return BSON("ok" << 1);
- });
+ onCommand(
+ [](const RemoteCommandRequest& request) -> StatusWith<BSONObj> {
+ ASSERT_EQUALS(dummyHost, request.target);
+ ASSERT_EQUALS("config", request.dbname);
+
+ std::string errmsg;
+ BatchedUpdateRequest batchRequest;
+ ASSERT(batchRequest.parseBSON("config", request.cmdObj, &errmsg));
+ ASSERT_EQUALS(LocksType::ConfigNS, batchRequest.getNS().toString());
+ ASSERT_EQUALS(BSON("w"
+ << "majority"
+ << "wtimeout"
+ << 15000),
+ batchRequest.getWriteConcern());
+ auto updates = batchRequest.getUpdates();
+ ASSERT_EQUALS(1U, updates.size());
+ auto update = updates.front();
+ ASSERT_FALSE(update->getUpsert());
+ ASSERT_TRUE(update->getMulti());
+ ASSERT_EQUALS(BSON(LocksType::process("processID")), update->getQuery());
+ ASSERT_EQUALS(BSON("$set" << BSON(LocksType::state(LocksType::UNLOCKED))),
+ update->getUpdateExpr());
+
+ return BSON("ok" << 1);
+ });
future.timed_get(kFutureTimeout);
}
@@ -1488,8 +1492,9 @@ TEST_F(DistLockCatalogFixture, GetPingNotFound) {
ASSERT_FALSE(status.reason().empty());
});
- onFindCommand([](const RemoteCommandRequest& request)
- -> StatusWith<vector<BSONObj>> { return std::vector<BSONObj>(); });
+ onFindCommand([](const RemoteCommandRequest& request) -> StatusWith<vector<BSONObj>> {
+ return std::vector<BSONObj>();
+ });
future.timed_get(kFutureTimeout);
}
@@ -1572,8 +1577,9 @@ TEST_F(DistLockCatalogFixture, GetLockByTSNotFound) {
ASSERT_FALSE(status.reason().empty());
});
- onFindCommand([](const RemoteCommandRequest& request)
- -> StatusWith<vector<BSONObj>> { return std::vector<BSONObj>(); });
+ onFindCommand([](const RemoteCommandRequest& request) -> StatusWith<vector<BSONObj>> {
+ return std::vector<BSONObj>();
+ });
future.timed_get(kFutureTimeout);
}
@@ -1660,8 +1666,9 @@ TEST_F(DistLockCatalogFixture, GetLockByNameNotFound) {
ASSERT_FALSE(status.reason().empty());
});
- onFindCommand([](const RemoteCommandRequest& request)
- -> StatusWith<vector<BSONObj>> { return std::vector<BSONObj>(); });
+ onFindCommand([](const RemoteCommandRequest& request) -> StatusWith<vector<BSONObj>> {
+ return std::vector<BSONObj>();
+ });
future.timed_get(kFutureTimeout);
}
diff --git a/src/mongo/s/catalog/replset/replset_dist_lock_manager.cpp b/src/mongo/s/catalog/replset/replset_dist_lock_manager.cpp
index 8fcd79fc6cd..8b387f30506 100644
--- a/src/mongo/s/catalog/replset/replset_dist_lock_manager.cpp
+++ b/src/mongo/s/catalog/replset/replset_dist_lock_manager.cpp
@@ -34,8 +34,8 @@
#include "mongo/base/status.h"
#include "mongo/base/status_with.h"
-#include "mongo/db/service_context.h"
#include "mongo/db/operation_context_noop.h"
+#include "mongo/db/service_context.h"
#include "mongo/s/catalog/dist_lock_catalog.h"
#include "mongo/s/catalog/type_lockpings.h"
#include "mongo/s/catalog/type_locks.h"
@@ -47,8 +47,8 @@
#include "mongo/util/fail_point_service.h"
#include "mongo/util/log.h"
#include "mongo/util/mongoutils/str.h"
-#include "mongo/util/timer.h"
#include "mongo/util/time_support.h"
+#include "mongo/util/timer.h"
namespace mongo {
diff --git a/src/mongo/s/catalog/replset/replset_dist_lock_manager_test.cpp b/src/mongo/s/catalog/replset/replset_dist_lock_manager_test.cpp
index 792bb3ca9d8..1b16e1f35da 100644
--- a/src/mongo/s/catalog/replset/replset_dist_lock_manager_test.cpp
+++ b/src/mongo/s/catalog/replset/replset_dist_lock_manager_test.cpp
@@ -30,19 +30,19 @@
#include "mongo/s/catalog/replset/replset_dist_lock_manager.h"
+#include <boost/optional.hpp>
+#include <boost/optional/optional_io.hpp>
#include <map>
#include <string>
#include <type_traits>
#include <vector>
-#include <boost/optional.hpp>
-#include <boost/optional/optional_io.hpp>
#include "mongo/base/status.h"
#include "mongo/base/status_with.h"
#include "mongo/bson/json.h"
#include "mongo/bson/util/builder.h"
-#include "mongo/client/remote_command_targeter_factory_mock.h"
#include "mongo/client/remote_command_targeter.h"
+#include "mongo/client/remote_command_targeter_factory_mock.h"
#include "mongo/db/jsobj.h"
#include "mongo/db/operation_context_noop.h"
#include "mongo/db/service_context_noop.h"
@@ -364,9 +364,11 @@ TEST_F(RSDistLockMgrWithMockTickSource, LockSuccessAfterRetry) {
getMockCatalog()->expectNoGrabLock();
- getMockCatalog()->expectGetLockByName([](StringData name) {
- FAIL("should not attempt to overtake lock after successful lock");
- }, LocksType());
+ getMockCatalog()->expectGetLockByName(
+ [](StringData name) {
+ FAIL("should not attempt to overtake lock after successful lock");
+ },
+ LocksType());
},
goodLockDoc);
}
@@ -828,10 +830,12 @@ TEST_F(ReplSetDistLockManagerFixture, MultipleQueuedUnlock) {
* Returns true if all values in the map are greater than 2.
*/
auto mapEntriesGreaterThanTwo = [](const decltype(unlockIDMap)& map) -> bool {
- auto iter = find_if(map.begin(),
- map.end(),
- [](const std::remove_reference<decltype(map)>::type::value_type& entry)
- -> bool { return entry.second < 3; });
+ auto iter = find_if(
+ map.begin(),
+ map.end(),
+ [](const std::remove_reference<decltype(map)>::type::value_type& entry) -> bool {
+ return entry.second < 3;
+ });
return iter == map.end();
};
@@ -844,10 +848,12 @@ TEST_F(ReplSetDistLockManagerFixture, MultipleQueuedUnlock) {
// Wait until we see at least 2 unique lockSessionID more than twice.
if (unlockIDMap.size() >= 2 && mapEntriesGreaterThanTwo(unlockIDMap)) {
- getMockCatalog()->expectUnLock([&testMutex, &unlockCV](const OID& lockSessionID) {
- stdx::unique_lock<stdx::mutex> lk(testMutex);
- unlockCV.notify_all();
- }, Status::OK());
+ getMockCatalog()->expectUnLock(
+ [&testMutex, &unlockCV](const OID& lockSessionID) {
+ stdx::unique_lock<stdx::mutex> lk(testMutex);
+ unlockCV.notify_all();
+ },
+ Status::OK());
}
},
{ErrorCodes::NetworkTimeout, "bad test network"});
@@ -910,10 +916,12 @@ TEST_F(ReplSetDistLockManagerFixture, MultipleQueuedUnlock) {
TEST_F(ReplSetDistLockManagerFixture, CleanupPingOnShutdown) {
bool stopPingCalled = false;
- getMockCatalog()->expectStopPing([this, &stopPingCalled](StringData processID) {
- ASSERT_EQUALS(getProcessID(), processID);
- stopPingCalled = true;
- }, Status::OK());
+ getMockCatalog()->expectStopPing(
+ [this, &stopPingCalled](StringData processID) {
+ ASSERT_EQUALS(getProcessID(), processID);
+ stopPingCalled = true;
+ },
+ Status::OK());
getMgr()->shutDown(txn());
ASSERT_TRUE(stopPingCalled);
@@ -989,9 +997,9 @@ TEST_F(ReplSetDistLockManagerFixture, CheckLockStatusNoLongerOwn) {
auto& scopedLock = lockStatus.getValue();
getMockCatalog()->expectNoGrabLock();
- getMockCatalog()->expectGetLockByTS([&lockSessionID](const OID& ts) {
- ASSERT_EQUALS(lockSessionID, ts);
- }, {ErrorCodes::LockNotFound, "no lock"});
+ getMockCatalog()->expectGetLockByTS(
+ [&lockSessionID](const OID& ts) { ASSERT_EQUALS(lockSessionID, ts); },
+ {ErrorCodes::LockNotFound, "no lock"});
ASSERT_NOT_OK(scopedLock.checkStatus());
}
@@ -1028,9 +1036,9 @@ TEST_F(ReplSetDistLockManagerFixture, CheckLockStatusError) {
auto& scopedLock = lockStatus.getValue();
getMockCatalog()->expectNoGrabLock();
- getMockCatalog()->expectGetLockByTS([&lockSessionID](const OID& ts) {
- ASSERT_EQUALS(lockSessionID, ts);
- }, {ErrorCodes::NetworkTimeout, "bad test network"});
+ getMockCatalog()->expectGetLockByTS(
+ [&lockSessionID](const OID& ts) { ASSERT_EQUALS(lockSessionID, ts); },
+ {ErrorCodes::NetworkTimeout, "bad test network"});
ASSERT_NOT_OK(scopedLock.checkStatus());
}
@@ -1264,9 +1272,9 @@ TEST_F(ReplSetDistLockManagerFixture, GetPingErrorWhileOvertaking) {
getMockCatalog()->expectGetLockByName([](StringData name) { ASSERT_EQUALS("bar", name); },
currentLockDoc);
- getMockCatalog()->expectGetPing([](StringData process) {
- ASSERT_EQUALS("otherProcess", process);
- }, {ErrorCodes::NetworkTimeout, "bad test network"});
+ getMockCatalog()->expectGetPing(
+ [](StringData process) { ASSERT_EQUALS("otherProcess", process); },
+ {ErrorCodes::NetworkTimeout, "bad test network"});
auto status = getMgr()->lock(txn(), "bar", "", Milliseconds(0), Milliseconds(0)).getStatus();
ASSERT_NOT_OK(status);
@@ -1406,9 +1414,9 @@ TEST_F(ReplSetDistLockManagerFixture, CannotOvertakeIfPingIsActive) {
getMockCatalog()->expectGetPing(
[](StringData process) { ASSERT_EQUALS("otherProcess", process); }, pingDoc);
- getMockCatalog()->expectGetServerInfo([&getServerInfoCallCount]() {
- getServerInfoCallCount++;
- }, DistLockCatalog::ServerInfo(configServerLocalTime, OID()));
+ getMockCatalog()->expectGetServerInfo(
+ [&getServerInfoCallCount]() { getServerInfoCallCount++; },
+ DistLockCatalog::ServerInfo(configServerLocalTime, OID()));
auto status =
getMgr()->lock(txn(), "bar", "", Milliseconds(0), Milliseconds(0)).getStatus();
@@ -1419,9 +1427,9 @@ TEST_F(ReplSetDistLockManagerFixture, CannotOvertakeIfPingIsActive) {
ASSERT_EQUALS(kLoopCount, getServerInfoCallCount);
configServerLocalTime += kLockExpiration;
- getMockCatalog()->expectGetServerInfo([&getServerInfoCallCount]() {
- getServerInfoCallCount++;
- }, DistLockCatalog::ServerInfo(configServerLocalTime, OID()));
+ getMockCatalog()->expectGetServerInfo(
+ [&getServerInfoCallCount]() { getServerInfoCallCount++; },
+ DistLockCatalog::ServerInfo(configServerLocalTime, OID()));
OID lockTS;
// Make sure that overtake is now ok since ping is no longer updated.
@@ -1505,9 +1513,9 @@ TEST_F(ReplSetDistLockManagerFixture, CannotOvertakeIfOwnerJustChanged) {
getMockCatalog()->expectGetLockByName([](StringData name) { ASSERT_EQUALS("bar", name); },
currentLockDoc);
- getMockCatalog()->expectGetServerInfo([&getServerInfoCallCount]() {
- getServerInfoCallCount++;
- }, DistLockCatalog::ServerInfo(configServerLocalTime, OID()));
+ getMockCatalog()->expectGetServerInfo(
+ [&getServerInfoCallCount]() { getServerInfoCallCount++; },
+ DistLockCatalog::ServerInfo(configServerLocalTime, OID()));
auto status =
getMgr()->lock(txn(), "bar", "", Milliseconds(0), Milliseconds(0)).getStatus();
@@ -1518,9 +1526,9 @@ TEST_F(ReplSetDistLockManagerFixture, CannotOvertakeIfOwnerJustChanged) {
ASSERT_EQUALS(kLoopCount, getServerInfoCallCount);
configServerLocalTime += kLockExpiration;
- getMockCatalog()->expectGetServerInfo([&getServerInfoCallCount]() {
- getServerInfoCallCount++;
- }, DistLockCatalog::ServerInfo(configServerLocalTime, OID()));
+ getMockCatalog()->expectGetServerInfo(
+ [&getServerInfoCallCount]() { getServerInfoCallCount++; },
+ DistLockCatalog::ServerInfo(configServerLocalTime, OID()));
OID lockTS;
// Make sure that overtake is now ok since lock owner didn't change.
@@ -1607,9 +1615,9 @@ TEST_F(ReplSetDistLockManagerFixture, CannotOvertakeIfElectionIdChanged) {
[](StringData process) { ASSERT_EQUALS("otherProcess", process); }, fixedPingDoc);
lastElectionId = OID::gen();
- getMockCatalog()->expectGetServerInfo([&getServerInfoCallCount]() {
- getServerInfoCallCount++;
- }, DistLockCatalog::ServerInfo(configServerLocalTime, lastElectionId));
+ getMockCatalog()->expectGetServerInfo(
+ [&getServerInfoCallCount]() { getServerInfoCallCount++; },
+ DistLockCatalog::ServerInfo(configServerLocalTime, lastElectionId));
auto status =
getMgr()->lock(txn(), "bar", "", Milliseconds(0), Milliseconds(0)).getStatus();
@@ -1620,9 +1628,9 @@ TEST_F(ReplSetDistLockManagerFixture, CannotOvertakeIfElectionIdChanged) {
ASSERT_EQUALS(kLoopCount, getServerInfoCallCount);
configServerLocalTime += kLockExpiration;
- getMockCatalog()->expectGetServerInfo([&getServerInfoCallCount]() {
- getServerInfoCallCount++;
- }, DistLockCatalog::ServerInfo(configServerLocalTime, lastElectionId));
+ getMockCatalog()->expectGetServerInfo(
+ [&getServerInfoCallCount]() { getServerInfoCallCount++; },
+ DistLockCatalog::ServerInfo(configServerLocalTime, lastElectionId));
OID lockTS;
// Make sure that overtake is now ok since electionId didn't change.
@@ -1893,8 +1901,9 @@ TEST_F(ReplSetDistLockManagerFixture, CannotOvertakeIfConfigServerClockGoesBackw
}
// Make config server time go backwards by lock expiration duration.
- getMockCatalog()->expectGetServerInfo([]() {
- }, DistLockCatalog::ServerInfo(configClock - kLockExpiration - Milliseconds(1), OID()));
+ getMockCatalog()->expectGetServerInfo(
+ []() {},
+ DistLockCatalog::ServerInfo(configClock - kLockExpiration - Milliseconds(1), OID()));
// Second attempt should not overtake lock.
{
@@ -1970,9 +1979,9 @@ TEST_F(RSDistLockMgrWithMockTickSource, CanOvertakeIfNoPingDocument) {
getMockCatalog()->expectGetLockByName([](StringData name) { ASSERT_EQUALS("bar", name); },
currentLockDoc);
- getMockCatalog()->expectGetPing([](StringData process) {
- ASSERT_EQUALS("otherProcess", process);
- }, {ErrorCodes::NoMatchingDocument, "no ping"});
+ getMockCatalog()->expectGetPing(
+ [](StringData process) { ASSERT_EQUALS("otherProcess", process); },
+ {ErrorCodes::NoMatchingDocument, "no ping"});
getMockCatalog()->expectGetServerInfo([]() {}, DistLockCatalog::ServerInfo(Date_t(), OID()));
@@ -1994,9 +2003,9 @@ TEST_F(RSDistLockMgrWithMockTickSource, CanOvertakeIfNoPingDocument) {
getMockCatalog()->expectGetLockByName([](StringData name) { ASSERT_EQUALS("bar", name); },
currentLockDoc);
- getMockCatalog()->expectGetPing([](StringData process) {
- ASSERT_EQUALS("otherProcess", process);
- }, {ErrorCodes::NoMatchingDocument, "no ping"});
+ getMockCatalog()->expectGetPing(
+ [](StringData process) { ASSERT_EQUALS("otherProcess", process); },
+ {ErrorCodes::NoMatchingDocument, "no ping"});
getMockCatalog()->expectGetServerInfo(
[]() {}, DistLockCatalog::ServerInfo(Date_t() + kLockExpiration + Milliseconds(1), OID()));
diff --git a/src/mongo/s/catalog/type_changelog_test.cpp b/src/mongo/s/catalog/type_changelog_test.cpp
index 1090b7be0e7..904c9fdfa46 100644
--- a/src/mongo/s/catalog/type_changelog_test.cpp
+++ b/src/mongo/s/catalog/type_changelog_test.cpp
@@ -44,12 +44,14 @@ TEST(ChangeLogType, Empty) {
}
TEST(ChangeLogType, Valid) {
- BSONObj obj = BSON(
- ChangeLogType::changeId("host.local-2012-11-21T19:14:10-8")
- << ChangeLogType::server("host.local") << ChangeLogType::clientAddr("192.168.0.189:51128")
- << ChangeLogType::time(Date_t::fromMillisSinceEpoch(1)) << ChangeLogType::what("split")
- << ChangeLogType::ns("test.test") << ChangeLogType::details(BSON("dummy"
- << "info")));
+ BSONObj obj = BSON(ChangeLogType::changeId("host.local-2012-11-21T19:14:10-8")
+ << ChangeLogType::server("host.local")
+ << ChangeLogType::clientAddr("192.168.0.189:51128")
+ << ChangeLogType::time(Date_t::fromMillisSinceEpoch(1))
+ << ChangeLogType::what("split")
+ << ChangeLogType::ns("test.test")
+ << ChangeLogType::details(BSON("dummy"
+ << "info")));
auto changeLogResult = ChangeLogType::fromBSON(obj);
ASSERT_OK(changeLogResult.getStatus());
@@ -68,60 +70,65 @@ TEST(ChangeLogType, Valid) {
}
TEST(ChangeLogType, MissingChangeId) {
- BSONObj obj =
- BSON(ChangeLogType::server("host.local")
- << ChangeLogType::clientAddr("192.168.0.189:51128")
- << ChangeLogType::time(Date_t::fromMillisSinceEpoch(1)) << ChangeLogType::what("split")
- << ChangeLogType::ns("test.test") << ChangeLogType::details(BSON("dummy"
- << "info")));
+ BSONObj obj = BSON(ChangeLogType::server("host.local")
+ << ChangeLogType::clientAddr("192.168.0.189:51128")
+ << ChangeLogType::time(Date_t::fromMillisSinceEpoch(1))
+ << ChangeLogType::what("split")
+ << ChangeLogType::ns("test.test")
+ << ChangeLogType::details(BSON("dummy"
+ << "info")));
auto changeLogResult = ChangeLogType::fromBSON(obj);
ASSERT_EQ(ErrorCodes::NoSuchKey, changeLogResult.getStatus());
}
TEST(ChangeLogType, MissingServer) {
- BSONObj obj =
- BSON(ChangeLogType::changeId("host.local-2012-11-21T19:14:10-8")
- << ChangeLogType::clientAddr("192.168.0.189:51128")
- << ChangeLogType::time(Date_t::fromMillisSinceEpoch(1)) << ChangeLogType::what("split")
- << ChangeLogType::ns("test.test") << ChangeLogType::details(BSON("dummy"
- << "info")));
+ BSONObj obj = BSON(ChangeLogType::changeId("host.local-2012-11-21T19:14:10-8")
+ << ChangeLogType::clientAddr("192.168.0.189:51128")
+ << ChangeLogType::time(Date_t::fromMillisSinceEpoch(1))
+ << ChangeLogType::what("split")
+ << ChangeLogType::ns("test.test")
+ << ChangeLogType::details(BSON("dummy"
+ << "info")));
auto changeLogResult = ChangeLogType::fromBSON(obj);
ASSERT_EQ(ErrorCodes::NoSuchKey, changeLogResult.getStatus());
}
TEST(ChangeLogType, MissingClientAddr) {
- BSONObj obj =
- BSON(ChangeLogType::changeId("host.local-2012-11-21T19:14:10-8")
- << ChangeLogType::server("host.local")
- << ChangeLogType::time(Date_t::fromMillisSinceEpoch(1)) << ChangeLogType::what("split")
- << ChangeLogType::ns("test.test") << ChangeLogType::details(BSON("dummy"
- << "info")));
+ BSONObj obj = BSON(ChangeLogType::changeId("host.local-2012-11-21T19:14:10-8")
+ << ChangeLogType::server("host.local")
+ << ChangeLogType::time(Date_t::fromMillisSinceEpoch(1))
+ << ChangeLogType::what("split")
+ << ChangeLogType::ns("test.test")
+ << ChangeLogType::details(BSON("dummy"
+ << "info")));
auto changeLogResult = ChangeLogType::fromBSON(obj);
ASSERT_EQ(ErrorCodes::NoSuchKey, changeLogResult.getStatus());
}
TEST(ChangeLogType, MissingTime) {
- BSONObj obj =
- BSON(ChangeLogType::changeId("host.local-2012-11-21T19:14:10-8")
- << ChangeLogType::server("host.local")
- << ChangeLogType::clientAddr("192.168.0.189:51128") << ChangeLogType::what("split")
- << ChangeLogType::ns("test.test") << ChangeLogType::details(BSON("dummy"
- << "info")));
+ BSONObj obj = BSON(ChangeLogType::changeId("host.local-2012-11-21T19:14:10-8")
+ << ChangeLogType::server("host.local")
+ << ChangeLogType::clientAddr("192.168.0.189:51128")
+ << ChangeLogType::what("split")
+ << ChangeLogType::ns("test.test")
+ << ChangeLogType::details(BSON("dummy"
+ << "info")));
auto changeLogResult = ChangeLogType::fromBSON(obj);
ASSERT_EQ(ErrorCodes::NoSuchKey, changeLogResult.getStatus());
}
TEST(ChangeLogType, MissingWhat) {
- BSONObj obj = BSON(
- ChangeLogType::changeId("host.local-2012-11-21T19:14:10-8")
- << ChangeLogType::server("host.local") << ChangeLogType::clientAddr("192.168.0.189:51128")
- << ChangeLogType::time(Date_t::fromMillisSinceEpoch(1)) << ChangeLogType::ns("test.test")
- << ChangeLogType::details(BSON("dummy"
- << "info")));
+ BSONObj obj = BSON(ChangeLogType::changeId("host.local-2012-11-21T19:14:10-8")
+ << ChangeLogType::server("host.local")
+ << ChangeLogType::clientAddr("192.168.0.189:51128")
+ << ChangeLogType::time(Date_t::fromMillisSinceEpoch(1))
+ << ChangeLogType::ns("test.test")
+ << ChangeLogType::details(BSON("dummy"
+ << "info")));
auto changeLogResult = ChangeLogType::fromBSON(obj);
ASSERT_EQ(ErrorCodes::NoSuchKey, changeLogResult.getStatus());
@@ -132,7 +139,8 @@ TEST(ChangeLogType, MissingDetails) {
<< ChangeLogType::server("host.local")
<< ChangeLogType::clientAddr("192.168.0.189:51128")
<< ChangeLogType::time(Date_t::fromMillisSinceEpoch(1))
- << ChangeLogType::what("split") << ChangeLogType::ns("test.test"));
+ << ChangeLogType::what("split")
+ << ChangeLogType::ns("test.test"));
auto changeLogResult = ChangeLogType::fromBSON(obj);
ASSERT_EQ(ErrorCodes::NoSuchKey, changeLogResult.getStatus());
diff --git a/src/mongo/s/catalog/type_chunk_test.cpp b/src/mongo/s/catalog/type_chunk_test.cpp
index cd4bbb4b1d4..a246df00c54 100644
--- a/src/mongo/s/catalog/type_chunk_test.cpp
+++ b/src/mongo/s/catalog/type_chunk_test.cpp
@@ -46,25 +46,33 @@ TEST(ChunkType, MissingRequiredFields) {
ChunkVersion chunkVersion(1, 2, OID::gen());
BSONObj objModNS =
- BSON(ChunkType::name("test.mycol-a_MinKey")
- << ChunkType::min(BSON("a" << 10 << "b" << 10)) << ChunkType::max(BSON("a" << 20))
- << "lastmod" << Timestamp(chunkVersion.toLong()) << "lastmodEpoch"
- << chunkVersion.epoch() << ChunkType::shard("shard0001"));
+ BSON(ChunkType::name("test.mycol-a_MinKey") << ChunkType::min(BSON("a" << 10 << "b" << 10))
+ << ChunkType::max(BSON("a" << 20))
+ << "lastmod"
+ << Timestamp(chunkVersion.toLong())
+ << "lastmodEpoch"
+ << chunkVersion.epoch()
+ << ChunkType::shard("shard0001"));
StatusWith<ChunkType> chunkRes = ChunkType::fromBSON(objModNS);
ASSERT_FALSE(chunkRes.isOK());
BSONObj objModKeys =
- BSON(ChunkType::name("test.mycol-a_MinKey")
- << ChunkType::ns("test.mycol") << "lastmod" << Timestamp(chunkVersion.toLong())
- << "lastmodEpoch" << chunkVersion.epoch() << ChunkType::shard("shard0001"));
+ BSON(ChunkType::name("test.mycol-a_MinKey") << ChunkType::ns("test.mycol") << "lastmod"
+ << Timestamp(chunkVersion.toLong())
+ << "lastmodEpoch"
+ << chunkVersion.epoch()
+ << ChunkType::shard("shard0001"));
chunkRes = ChunkType::fromBSON(objModKeys);
ASSERT_FALSE(chunkRes.isOK());
BSONObj objModShard =
- BSON(ChunkType::name("test.mycol-a_MinKey")
- << ChunkType::ns("test.mycol") << ChunkType::min(BSON("a" << 10 << "b" << 10))
- << ChunkType::max(BSON("a" << 20)) << "lastmod" << Timestamp(chunkVersion.toLong())
- << "lastmodEpoch" << chunkVersion.epoch());
+ BSON(ChunkType::name("test.mycol-a_MinKey") << ChunkType::ns("test.mycol")
+ << ChunkType::min(BSON("a" << 10 << "b" << 10))
+ << ChunkType::max(BSON("a" << 20))
+ << "lastmod"
+ << Timestamp(chunkVersion.toLong())
+ << "lastmodEpoch"
+ << chunkVersion.epoch());
chunkRes = ChunkType::fromBSON(objModShard);
ASSERT_FALSE(chunkRes.isOK());
}
@@ -72,10 +80,14 @@ TEST(ChunkType, MissingRequiredFields) {
TEST(ChunkType, MinAndMaxShardKeysDifferInNumberOfKeys) {
ChunkVersion chunkVersion(1, 2, OID::gen());
BSONObj obj =
- BSON(ChunkType::name("test.mycol-a_MinKey")
- << ChunkType::ns("test.mycol") << ChunkType::min(BSON("a" << 10 << "b" << 10))
- << ChunkType::max(BSON("a" << 20)) << "lastmod" << Timestamp(chunkVersion.toLong())
- << "lastmodEpoch" << chunkVersion.epoch() << ChunkType::shard("shard0001"));
+ BSON(ChunkType::name("test.mycol-a_MinKey") << ChunkType::ns("test.mycol")
+ << ChunkType::min(BSON("a" << 10 << "b" << 10))
+ << ChunkType::max(BSON("a" << 20))
+ << "lastmod"
+ << Timestamp(chunkVersion.toLong())
+ << "lastmodEpoch"
+ << chunkVersion.epoch()
+ << ChunkType::shard("shard0001"));
StatusWith<ChunkType> chunkRes = ChunkType::fromBSON(obj);
ASSERT_OK(chunkRes.getStatus());
ASSERT_FALSE(chunkRes.getValue().validate().isOK());
@@ -83,11 +95,14 @@ TEST(ChunkType, MinAndMaxShardKeysDifferInNumberOfKeys) {
TEST(ChunkType, MinAndMaxShardKeysDifferInKeyNames) {
ChunkVersion chunkVersion(1, 2, OID::gen());
- BSONObj obj =
- BSON(ChunkType::name("test.mycol-a_MinKey")
- << ChunkType::ns("test.mycol") << ChunkType::min(BSON("a" << 10))
- << ChunkType::max(BSON("b" << 20)) << "lastmod" << Timestamp(chunkVersion.toLong())
- << "lastmodEpoch" << chunkVersion.epoch() << ChunkType::shard("shard0001"));
+ BSONObj obj = BSON(ChunkType::name("test.mycol-a_MinKey") << ChunkType::ns("test.mycol")
+ << ChunkType::min(BSON("a" << 10))
+ << ChunkType::max(BSON("b" << 20))
+ << "lastmod"
+ << Timestamp(chunkVersion.toLong())
+ << "lastmodEpoch"
+ << chunkVersion.epoch()
+ << ChunkType::shard("shard0001"));
StatusWith<ChunkType> chunkRes = ChunkType::fromBSON(obj);
ASSERT_OK(chunkRes.getStatus());
ASSERT_FALSE(chunkRes.getValue().validate().isOK());
@@ -95,11 +110,14 @@ TEST(ChunkType, MinAndMaxShardKeysDifferInKeyNames) {
TEST(ChunkType, NotAscending) {
ChunkVersion chunkVersion(1, 2, OID::gen());
- BSONObj obj =
- BSON(ChunkType::name("test.mycol-a_MinKey")
- << ChunkType::ns("test.mycol") << ChunkType::min(BSON("a" << 20))
- << ChunkType::max(BSON("a" << 10)) << "lastmod" << Timestamp(chunkVersion.toLong())
- << "lastmodEpoch" << chunkVersion.epoch() << ChunkType::shard("shard0001"));
+ BSONObj obj = BSON(ChunkType::name("test.mycol-a_MinKey") << ChunkType::ns("test.mycol")
+ << ChunkType::min(BSON("a" << 20))
+ << ChunkType::max(BSON("a" << 10))
+ << "lastmod"
+ << Timestamp(chunkVersion.toLong())
+ << "lastmodEpoch"
+ << chunkVersion.epoch()
+ << ChunkType::shard("shard0001"));
StatusWith<ChunkType> chunkRes = ChunkType::fromBSON(obj);
ASSERT_OK(chunkRes.getStatus());
ASSERT_FALSE(chunkRes.getValue().validate().isOK());
@@ -107,11 +125,14 @@ TEST(ChunkType, NotAscending) {
TEST(ChunkType, CorrectContents) {
ChunkVersion chunkVersion(1, 2, OID::gen());
- BSONObj obj =
- BSON(ChunkType::name("test.mycol-a_MinKey")
- << ChunkType::ns("test.mycol") << ChunkType::min(BSON("a" << 10))
- << ChunkType::max(BSON("a" << 20)) << "lastmod" << Timestamp(chunkVersion.toLong())
- << "lastmodEpoch" << chunkVersion.epoch() << ChunkType::shard("shard0001"));
+ BSONObj obj = BSON(ChunkType::name("test.mycol-a_MinKey") << ChunkType::ns("test.mycol")
+ << ChunkType::min(BSON("a" << 10))
+ << ChunkType::max(BSON("a" << 20))
+ << "lastmod"
+ << Timestamp(chunkVersion.toLong())
+ << "lastmodEpoch"
+ << chunkVersion.epoch()
+ << ChunkType::shard("shard0001"));
StatusWith<ChunkType> chunkRes = ChunkType::fromBSON(obj);
ASSERT_OK(chunkRes.getStatus());
ChunkType chunk = chunkRes.getValue();
@@ -126,13 +147,18 @@ TEST(ChunkType, CorrectContents) {
}
TEST(ChunkType, Pre22Format) {
- ChunkType chunk = assertGet(
- ChunkType::fromBSON(BSON("_id"
- << "test.mycol-a_MinKey"
- << "lastmod" << Date_t::fromMillisSinceEpoch(1) << "ns"
- << "test.mycol"
- << "min" << BSON("a" << 10) << "max" << BSON("a" << 20) << "shard"
- << "shard0001")));
+ ChunkType chunk = assertGet(ChunkType::fromBSON(BSON("_id"
+ << "test.mycol-a_MinKey"
+ << "lastmod"
+ << Date_t::fromMillisSinceEpoch(1)
+ << "ns"
+ << "test.mycol"
+ << "min"
+ << BSON("a" << 10)
+ << "max"
+ << BSON("a" << 20)
+ << "shard"
+ << "shard0001")));
ASSERT_OK(chunk.validate());
ASSERT_EQUALS(chunk.getNS(), "test.mycol");
diff --git a/src/mongo/s/catalog/type_collection_test.cpp b/src/mongo/s/catalog/type_collection_test.cpp
index 893f3e6cb56..ead96ca0850 100644
--- a/src/mongo/s/catalog/type_collection_test.cpp
+++ b/src/mongo/s/catalog/type_collection_test.cpp
@@ -28,8 +28,8 @@
#include "mongo/platform/basic.h"
-#include "mongo/bson/oid.h"
#include "mongo/base/status_with.h"
+#include "mongo/bson/oid.h"
#include "mongo/s/catalog/type_collection.h"
#include "mongo/unittest/unittest.h"
#include "mongo/util/time_support.h"
@@ -47,10 +47,12 @@ TEST(CollectionType, Empty) {
TEST(CollectionType, Basic) {
const OID oid = OID::gen();
- StatusWith<CollectionType> status = CollectionType::fromBSON(BSON(
- CollectionType::fullNs("db.coll")
- << CollectionType::epoch(oid) << CollectionType::updatedAt(Date_t::fromMillisSinceEpoch(1))
- << CollectionType::keyPattern(BSON("a" << 1)) << CollectionType::unique(true)));
+ StatusWith<CollectionType> status =
+ CollectionType::fromBSON(BSON(CollectionType::fullNs("db.coll")
+ << CollectionType::epoch(oid)
+ << CollectionType::updatedAt(Date_t::fromMillisSinceEpoch(1))
+ << CollectionType::keyPattern(BSON("a" << 1))
+ << CollectionType::unique(true)));
ASSERT_TRUE(status.isOK());
CollectionType coll = status.getValue();
@@ -88,11 +90,16 @@ TEST(CollectionType, EpochCorrectness) {
}
TEST(CollectionType, Pre22Format) {
- CollectionType coll = assertGet(
- CollectionType::fromBSON(BSON("_id"
- << "db.coll"
- << "lastmod" << Date_t::fromMillisSinceEpoch(1) << "dropped"
- << false << "key" << BSON("a" << 1) << "unique" << false)));
+ CollectionType coll = assertGet(CollectionType::fromBSON(BSON("_id"
+ << "db.coll"
+ << "lastmod"
+ << Date_t::fromMillisSinceEpoch(1)
+ << "dropped"
+ << false
+ << "key"
+ << BSON("a" << 1)
+ << "unique"
+ << false)));
ASSERT(coll.getNs() == NamespaceString{"db.coll"});
ASSERT(!coll.getEpoch().isSet());
@@ -105,10 +112,12 @@ TEST(CollectionType, Pre22Format) {
TEST(CollectionType, InvalidCollectionNamespace) {
const OID oid = OID::gen();
- StatusWith<CollectionType> result = CollectionType::fromBSON(BSON(
- CollectionType::fullNs("foo\\bar.coll")
- << CollectionType::epoch(oid) << CollectionType::updatedAt(Date_t::fromMillisSinceEpoch(1))
- << CollectionType::keyPattern(BSON("a" << 1)) << CollectionType::unique(true)));
+ StatusWith<CollectionType> result =
+ CollectionType::fromBSON(BSON(CollectionType::fullNs("foo\\bar.coll")
+ << CollectionType::epoch(oid)
+ << CollectionType::updatedAt(Date_t::fromMillisSinceEpoch(1))
+ << CollectionType::keyPattern(BSON("a" << 1))
+ << CollectionType::unique(true)));
ASSERT_TRUE(result.isOK());
CollectionType collType = result.getValue();
ASSERT_FALSE(collType.validate().isOK());
diff --git a/src/mongo/s/catalog/type_config_version_test.cpp b/src/mongo/s/catalog/type_config_version_test.cpp
index dabe32c7782..db3ff5373f0 100644
--- a/src/mongo/s/catalog/type_config_version_test.cpp
+++ b/src/mongo/s/catalog/type_config_version_test.cpp
@@ -253,10 +253,10 @@ TEST(Excludes, BadRangeArray) {
<< "1.2.3"); // empty bound
BSONArray includeArr = bab.arr();
- auto versionInfoResult = VersionType::fromBSON(
- BSON(VersionType::minCompatibleVersion(3)
- << VersionType::currentVersion(4) << VersionType::clusterId(OID::gen())
- << VersionType::excludingMongoVersions(includeArr)));
+ auto versionInfoResult = VersionType::fromBSON(BSON(
+ VersionType::minCompatibleVersion(3) << VersionType::currentVersion(4)
+ << VersionType::clusterId(OID::gen())
+ << VersionType::excludingMongoVersions(includeArr)));
ASSERT_EQ(ErrorCodes::FailedToParse, versionInfoResult.getStatus());
}
diff --git a/src/mongo/s/catalog/type_locks_test.cpp b/src/mongo/s/catalog/type_locks_test.cpp
index aa169872c8a..5425b24b19b 100644
--- a/src/mongo/s/catalog/type_locks_test.cpp
+++ b/src/mongo/s/catalog/type_locks_test.cpp
@@ -45,12 +45,12 @@ TEST(Validity, Empty) {
TEST(Validity, UnlockedWithOptional) {
OID testLockID = OID::gen();
- BSONObj obj =
- BSON(LocksType::name("balancer")
- << LocksType::process("host.local:27017:1352918870:16807")
- << LocksType::state(LocksType::State::UNLOCKED) << LocksType::lockID(testLockID)
- << LocksType::who("host.local:27017:1352918870:16807:Balancer:282475249")
- << LocksType::why("doing balance round"));
+ BSONObj obj = BSON(LocksType::name("balancer")
+ << LocksType::process("host.local:27017:1352918870:16807")
+ << LocksType::state(LocksType::State::UNLOCKED)
+ << LocksType::lockID(testLockID)
+ << LocksType::who("host.local:27017:1352918870:16807:Balancer:282475249")
+ << LocksType::why("doing balance round"));
auto locksResult = LocksType::fromBSON(obj);
ASSERT_OK(locksResult.getStatus());
@@ -77,12 +77,12 @@ TEST(Validity, UnlockedWithoutOptional) {
TEST(Validity, LockedValid) {
OID testLockID = OID::gen();
- BSONObj obj =
- BSON(LocksType::name("balancer")
- << LocksType::process("host.local:27017:1352918870:16807")
- << LocksType::state(LocksType::State::LOCKED) << LocksType::lockID(testLockID)
- << LocksType::who("host.local:27017:1352918870:16807:Balancer:282475249")
- << LocksType::why("doing balance round"));
+ BSONObj obj = BSON(LocksType::name("balancer")
+ << LocksType::process("host.local:27017:1352918870:16807")
+ << LocksType::state(LocksType::State::LOCKED)
+ << LocksType::lockID(testLockID)
+ << LocksType::who("host.local:27017:1352918870:16807:Balancer:282475249")
+ << LocksType::why("doing balance round"));
auto locksResult = LocksType::fromBSON(obj);
ASSERT_OK(locksResult.getStatus());
@@ -97,11 +97,11 @@ TEST(Validity, LockedValid) {
}
TEST(Validity, LockedMissingProcess) {
- BSONObj obj =
- BSON(LocksType::name("balancer")
- << LocksType::state(LocksType::State::LOCKED) << LocksType::lockID(OID::gen())
- << LocksType::who("host.local:27017:1352918870:16807:Balancer:282475249")
- << LocksType::why("doing balance round"));
+ BSONObj obj = BSON(LocksType::name("balancer")
+ << LocksType::state(LocksType::State::LOCKED)
+ << LocksType::lockID(OID::gen())
+ << LocksType::who("host.local:27017:1352918870:16807:Balancer:282475249")
+ << LocksType::why("doing balance round"));
auto locksResult = LocksType::fromBSON(obj);
ASSERT_OK(locksResult.getStatus());
@@ -123,10 +123,11 @@ TEST(Validity, LockedMissingLockID) {
}
TEST(Validity, LockedMissingWho) {
- BSONObj obj = BSON(LocksType::name("balancer")
- << LocksType::process("host.local:27017:1352918870:16807")
- << LocksType::state(LocksType::State::LOCKED)
- << LocksType::lockID(OID::gen()) << LocksType::why("doing balance round"));
+ BSONObj obj =
+ BSON(LocksType::name("balancer") << LocksType::process("host.local:27017:1352918870:16807")
+ << LocksType::state(LocksType::State::LOCKED)
+ << LocksType::lockID(OID::gen())
+ << LocksType::why("doing balance round"));
auto locksResult = LocksType::fromBSON(obj);
ASSERT_OK(locksResult.getStatus());
@@ -135,11 +136,11 @@ TEST(Validity, LockedMissingWho) {
}
TEST(Validity, LockedMissingWhy) {
- BSONObj obj =
- BSON(LocksType::name("balancer")
- << LocksType::process("host.local:27017:1352918870:16807")
- << LocksType::state(LocksType::State::LOCKED) << LocksType::lockID(OID::gen())
- << LocksType::who("host.local:27017:1352918870:16807:Balancer:282475249"));
+ BSONObj obj = BSON(LocksType::name("balancer")
+ << LocksType::process("host.local:27017:1352918870:16807")
+ << LocksType::state(LocksType::State::LOCKED)
+ << LocksType::lockID(OID::gen())
+ << LocksType::who("host.local:27017:1352918870:16807:Balancer:282475249"));
auto locksResult = LocksType::fromBSON(obj);
ASSERT_OK(locksResult.getStatus());
@@ -148,12 +149,12 @@ TEST(Validity, LockedMissingWhy) {
}
TEST(Validity, ContestedValid) {
- BSONObj obj =
- BSON(LocksType::name("balancer")
- << LocksType::process("host.local:27017:1352918870:16807")
- << LocksType::state(LocksType::State::LOCK_PREP) << LocksType::lockID(OID::gen())
- << LocksType::who("host.local:27017:1352918870:16807:Balancer:282475249")
- << LocksType::why("doing balance round"));
+ BSONObj obj = BSON(LocksType::name("balancer")
+ << LocksType::process("host.local:27017:1352918870:16807")
+ << LocksType::state(LocksType::State::LOCK_PREP)
+ << LocksType::lockID(OID::gen())
+ << LocksType::who("host.local:27017:1352918870:16807:Balancer:282475249")
+ << LocksType::why("doing balance round"));
auto locksResult = LocksType::fromBSON(obj);
ASSERT_OK(locksResult.getStatus());
@@ -162,11 +163,11 @@ TEST(Validity, ContestedValid) {
}
TEST(Validity, ContestedMissingProcess) {
- BSONObj obj =
- BSON(LocksType::name("balancer")
- << LocksType::state(LocksType::State::LOCK_PREP) << LocksType::lockID(OID::gen())
- << LocksType::who("host.local:27017:1352918870:16807:Balancer:282475249")
- << LocksType::why("doing balance round"));
+ BSONObj obj = BSON(LocksType::name("balancer")
+ << LocksType::state(LocksType::State::LOCK_PREP)
+ << LocksType::lockID(OID::gen())
+ << LocksType::who("host.local:27017:1352918870:16807:Balancer:282475249")
+ << LocksType::why("doing balance round"));
auto locksResult = LocksType::fromBSON(obj);
ASSERT_OK(locksResult.getStatus());
@@ -188,10 +189,11 @@ TEST(Validity, ContestedMissingLockID) {
}
TEST(Validity, ContestedMissingWho) {
- BSONObj obj = BSON(LocksType::name("balancer")
- << LocksType::process("host.local:27017:1352918870:16807")
- << LocksType::state(LocksType::State::LOCK_PREP)
- << LocksType::lockID(OID::gen()) << LocksType::why("doing balance round"));
+ BSONObj obj =
+ BSON(LocksType::name("balancer") << LocksType::process("host.local:27017:1352918870:16807")
+ << LocksType::state(LocksType::State::LOCK_PREP)
+ << LocksType::lockID(OID::gen())
+ << LocksType::why("doing balance round"));
auto locksResult = LocksType::fromBSON(obj);
ASSERT_OK(locksResult.getStatus());
@@ -200,11 +202,11 @@ TEST(Validity, ContestedMissingWho) {
}
TEST(Validity, ContestedMissingWhy) {
- BSONObj obj =
- BSON(LocksType::name("balancer")
- << LocksType::process("host.local:27017:1352918870:16807")
- << LocksType::state(LocksType::State::LOCK_PREP) << LocksType::lockID(OID::gen())
- << LocksType::who("host.local:27017:1352918870:16807:Balancer:282475249"));
+ BSONObj obj = BSON(LocksType::name("balancer")
+ << LocksType::process("host.local:27017:1352918870:16807")
+ << LocksType::state(LocksType::State::LOCK_PREP)
+ << LocksType::lockID(OID::gen())
+ << LocksType::who("host.local:27017:1352918870:16807:Balancer:282475249"));
auto locksResult = LocksType::fromBSON(obj);
ASSERT_OK(locksResult.getStatus());
diff --git a/src/mongo/s/catalog/type_mongos_test.cpp b/src/mongo/s/catalog/type_mongos_test.cpp
index 291019e0886..8fc449ab6f7 100644
--- a/src/mongo/s/catalog/type_mongos_test.cpp
+++ b/src/mongo/s/catalog/type_mongos_test.cpp
@@ -39,48 +39,54 @@ namespace {
using namespace mongo;
TEST(Validity, MissingName) {
- BSONObj obj = BSON(MongosType::ping(Date_t::fromMillisSinceEpoch(1))
- << MongosType::uptime(100) << MongosType::waiting(false)
- << MongosType::mongoVersion("x.x.x") << MongosType::configVersion(0));
+ BSONObj obj =
+ BSON(MongosType::ping(Date_t::fromMillisSinceEpoch(1)) << MongosType::uptime(100)
+ << MongosType::waiting(false)
+ << MongosType::mongoVersion("x.x.x")
+ << MongosType::configVersion(0));
auto mongosTypeResult = MongosType::fromBSON(obj);
ASSERT_EQ(ErrorCodes::NoSuchKey, mongosTypeResult.getStatus());
}
TEST(Validity, MissingPing) {
- BSONObj obj = BSON(MongosType::name("localhost:27017")
- << MongosType::uptime(100) << MongosType::waiting(false)
- << MongosType::mongoVersion("x.x.x") << MongosType::configVersion(0));
+ BSONObj obj = BSON(MongosType::name("localhost:27017") << MongosType::uptime(100)
+ << MongosType::waiting(false)
+ << MongosType::mongoVersion("x.x.x")
+ << MongosType::configVersion(0));
auto mongosTypeResult = MongosType::fromBSON(obj);
ASSERT_EQ(ErrorCodes::NoSuchKey, mongosTypeResult.getStatus());
}
TEST(Validity, MissingUp) {
- BSONObj obj =
- BSON(MongosType::name("localhost:27017")
- << MongosType::ping(Date_t::fromMillisSinceEpoch(1)) << MongosType::waiting(false)
- << MongosType::mongoVersion("x.x.x") << MongosType::configVersion(0));
+ BSONObj obj = BSON(MongosType::name("localhost:27017")
+ << MongosType::ping(Date_t::fromMillisSinceEpoch(1))
+ << MongosType::waiting(false)
+ << MongosType::mongoVersion("x.x.x")
+ << MongosType::configVersion(0));
auto mongosTypeResult = MongosType::fromBSON(obj);
ASSERT_EQ(ErrorCodes::NoSuchKey, mongosTypeResult.getStatus());
}
TEST(Validity, MissingWaiting) {
- BSONObj obj =
- BSON(MongosType::name("localhost:27017")
- << MongosType::ping(Date_t::fromMillisSinceEpoch(1)) << MongosType::uptime(100)
- << MongosType::mongoVersion("x.x.x") << MongosType::configVersion(0));
+ BSONObj obj = BSON(MongosType::name("localhost:27017")
+ << MongosType::ping(Date_t::fromMillisSinceEpoch(1))
+ << MongosType::uptime(100)
+ << MongosType::mongoVersion("x.x.x")
+ << MongosType::configVersion(0));
auto mongosTypeResult = MongosType::fromBSON(obj);
ASSERT_EQ(ErrorCodes::NoSuchKey, mongosTypeResult.getStatus());
}
TEST(Validity, MissingMongoVersion) {
- BSONObj obj =
- BSON(MongosType::name("localhost:27017")
- << MongosType::ping(Date_t::fromMillisSinceEpoch(1)) << MongosType::uptime(100)
- << MongosType::waiting(false) << MongosType::configVersion(0));
+ BSONObj obj = BSON(MongosType::name("localhost:27017")
+ << MongosType::ping(Date_t::fromMillisSinceEpoch(1))
+ << MongosType::uptime(100)
+ << MongosType::waiting(false)
+ << MongosType::configVersion(0));
auto mongosTypeResult = MongosType::fromBSON(obj);
ASSERT_OK(mongosTypeResult.getStatus());
@@ -94,10 +100,11 @@ TEST(Validity, MissingMongoVersion) {
}
TEST(Validity, MissingConfigVersion) {
- BSONObj obj =
- BSON(MongosType::name("localhost:27017")
- << MongosType::ping(Date_t::fromMillisSinceEpoch(1)) << MongosType::uptime(100)
- << MongosType::waiting(false) << MongosType::mongoVersion("x.x.x"));
+ BSONObj obj = BSON(MongosType::name("localhost:27017")
+ << MongosType::ping(Date_t::fromMillisSinceEpoch(1))
+ << MongosType::uptime(100)
+ << MongosType::waiting(false)
+ << MongosType::mongoVersion("x.x.x"));
auto mongosTypeResult = MongosType::fromBSON(obj);
ASSERT_OK(mongosTypeResult.getStatus());
@@ -113,8 +120,10 @@ TEST(Validity, MissingConfigVersion) {
TEST(Validity, Valid) {
BSONObj obj = BSON(MongosType::name("localhost:27017")
<< MongosType::ping(Date_t::fromMillisSinceEpoch(1))
- << MongosType::uptime(100) << MongosType::waiting(false)
- << MongosType::mongoVersion("x.x.x") << MongosType::configVersion(0));
+ << MongosType::uptime(100)
+ << MongosType::waiting(false)
+ << MongosType::mongoVersion("x.x.x")
+ << MongosType::configVersion(0));
auto mongosTypeResult = MongosType::fromBSON(obj);
MongosType& mType = mongosTypeResult.getValue();
diff --git a/src/mongo/s/catalog/type_shard_test.cpp b/src/mongo/s/catalog/type_shard_test.cpp
index f89a54d5a3f..9461ee47dcb 100644
--- a/src/mongo/s/catalog/type_shard_test.cpp
+++ b/src/mongo/s/catalog/type_shard_test.cpp
@@ -72,7 +72,8 @@ TEST(ShardType, AllOptionalsPresent) {
TEST(ShardType, MaxSizeAsFloat) {
BSONObj obj = BSON(ShardType::name("shard0000") << ShardType::host("localhost:27017")
- << ShardType::maxSizeMB() << 100.0);
+ << ShardType::maxSizeMB()
+ << 100.0);
StatusWith<ShardType> shardRes = ShardType::fromBSON(obj);
ASSERT(shardRes.isOK());
ShardType shard = shardRes.getValue();
diff --git a/src/mongo/s/chunk.cpp b/src/mongo/s/chunk.cpp
index 5e81999afe7..09addfc588c 100644
--- a/src/mongo/s/chunk.cpp
+++ b/src/mongo/s/chunk.cpp
@@ -151,7 +151,8 @@ BSONObj Chunk::_getExtremeKey(OperationContext* txn, bool doSplitAtLower) const
uassert(28736,
str::stream() << "failed to initialize cursor during auto split due to "
- << "connection problem with " << conn->getServerAddress(),
+ << "connection problem with "
+ << conn->getServerAddress(),
cursor.get() != nullptr);
if (cursor->more()) {
@@ -265,14 +266,16 @@ StatusWith<boost::optional<ChunkRange>> Chunk::split(OperationContext* txn,
// It's also a good place to sanity check.
if (_min == splitPoints.front()) {
string msg(str::stream() << "not splitting chunk " << toString() << ", split point "
- << splitPoints.front() << " is exactly on chunk bounds");
+ << splitPoints.front()
+ << " is exactly on chunk bounds");
log() << msg;
return Status(ErrorCodes::CannotSplit, msg);
}
if (_max == splitPoints.back()) {
string msg(str::stream() << "not splitting chunk " << toString() << ", split point "
- << splitPoints.back() << " is exactly on chunk bounds");
+ << splitPoints.back()
+ << " is exactly on chunk bounds");
log() << msg;
return Status(ErrorCodes::CannotSplit, msg);
}
diff --git a/src/mongo/s/chunk_diff.h b/src/mongo/s/chunk_diff.h
index 294fae769a2..384f95e7df9 100644
--- a/src/mongo/s/chunk_diff.h
+++ b/src/mongo/s/chunk_diff.h
@@ -75,8 +75,8 @@ public:
typedef typename std::map<BSONObj, ValType, BSONObjCmp> RangeMap;
// Pair of iterators defining a subset of ranges
- typedef
- typename std::pair<typename RangeMap::iterator, typename RangeMap::iterator> RangeOverlap;
+ typedef typename std::pair<typename RangeMap::iterator, typename RangeMap::iterator>
+ RangeOverlap;
// Map of shard identifiers to the maximum chunk version on that shard
typedef typename std::map<ShardId, ChunkVersion> MaxChunkVersionMap;
diff --git a/src/mongo/s/chunk_diff_test.cpp b/src/mongo/s/chunk_diff_test.cpp
index 0e7674929fe..46cbce4dabc 100644
--- a/src/mongo/s/chunk_diff_test.cpp
+++ b/src/mongo/s/chunk_diff_test.cpp
@@ -28,8 +28,8 @@
#include "mongo/platform/basic.h"
-#include <string>
#include <map>
+#include <string>
#include <utility>
#include <vector>
diff --git a/src/mongo/s/chunk_manager.cpp b/src/mongo/s/chunk_manager.cpp
index de4785240df..5548ea4f642 100644
--- a/src/mongo/s/chunk_manager.cpp
+++ b/src/mongo/s/chunk_manager.cpp
@@ -427,11 +427,11 @@ Status ChunkManager::createFirstChunks(OperationContext* txn,
chunk.setShard(shardIds[i % shardIds.size()]);
chunk.setVersion(version);
- Status status = grid.catalogManager(txn)
- ->insertConfigDocument(txn, ChunkType::ConfigNS, chunk.toBSON());
+ Status status = grid.catalogManager(txn)->insertConfigDocument(
+ txn, ChunkType::ConfigNS, chunk.toBSON());
if (!status.isOK()) {
- const string errMsg = str::stream()
- << "Creating first chunks failed: " << status.reason();
+ const string errMsg = str::stream() << "Creating first chunks failed: "
+ << status.reason();
error() << errMsg;
return Status(status.code(), errMsg);
}
@@ -472,9 +472,12 @@ shared_ptr<Chunk> ChunkManager::findIntersectingChunk(OperationContext* txn,
}
msgasserted(8070,
- str::stream() << "couldn't find a chunk intersecting: " << shardKey
- << " for ns: " << _ns << " at version: " << _version.toString()
- << ", number of chunks: " << _chunkMap.size());
+ str::stream() << "couldn't find a chunk intersecting: " << shardKey << " for ns: "
+ << _ns
+ << " at version: "
+ << _version.toString()
+ << ", number of chunks: "
+ << _chunkMap.size());
}
void ChunkManager::getShardIdsForQuery(OperationContext* txn,
@@ -714,12 +717,10 @@ ChunkManager::ChunkRangeMap ChunkManager::_constructRanges(const ChunkMap& chunk
while (current != chunkMap.cend()) {
const auto rangeFirst = current;
- current = std::find_if(current,
- chunkMap.cend(),
- [&rangeFirst](const ChunkMap::value_type& chunkMapEntry) {
- return chunkMapEntry.second->getShardId() !=
- rangeFirst->second->getShardId();
- });
+ current = std::find_if(
+ current, chunkMap.cend(), [&rangeFirst](const ChunkMap::value_type& chunkMapEntry) {
+ return chunkMapEntry.second->getShardId() != rangeFirst->second->getShardId();
+ });
const auto rangeLast = std::prev(current);
const BSONObj rangeMin = rangeFirst->second->getMin();
diff --git a/src/mongo/s/chunk_manager_targeter.cpp b/src/mongo/s/chunk_manager_targeter.cpp
index 717692e324b..c9e9bcb76de 100644
--- a/src/mongo/s/chunk_manager_targeter.cpp
+++ b/src/mongo/s/chunk_manager_targeter.cpp
@@ -38,8 +38,8 @@
#include "mongo/s/client/shard_registry.h"
#include "mongo/s/config.h"
#include "mongo/s/grid.h"
-#include "mongo/s/sharding_raii.h"
#include "mongo/s/shard_key_pattern.h"
+#include "mongo/s/sharding_raii.h"
#include "mongo/util/log.h"
#include "mongo/util/mongoutils/str.h"
@@ -553,7 +553,8 @@ Status ChunkManagerTargeter::targetAllShards(vector<ShardEndpoint*>* endpoints)
if (!_primary && !_manager) {
return Status(ErrorCodes::NamespaceNotFound,
str::stream() << "could not target every shard with versions for "
- << getNS().ns() << "; metadata not found");
+ << getNS().ns()
+ << "; metadata not found");
}
vector<ShardId> shardIds;
diff --git a/src/mongo/s/chunk_manager_targeter_test.cpp b/src/mongo/s/chunk_manager_targeter_test.cpp
index 9c2e9327992..24d2398defa 100644
--- a/src/mongo/s/chunk_manager_targeter_test.cpp
+++ b/src/mongo/s/chunk_manager_targeter_test.cpp
@@ -319,7 +319,8 @@ TEST(CMCollapseTreeTest, Regex) {
OrderedIntervalList expected;
expected.intervals.push_back(Interval(BSON(""
<< ""
- << "" << BSONObj()),
+ << ""
+ << BSONObj()),
true,
false));
BSONObjBuilder builder;
diff --git a/src/mongo/s/chunk_version.cpp b/src/mongo/s/chunk_version.cpp
index 1ad883b603b..258d8512f93 100644
--- a/src/mongo/s/chunk_version.cpp
+++ b/src/mongo/s/chunk_version.cpp
@@ -30,9 +30,9 @@
#include "mongo/s/chunk_version.h"
+#include "mongo/base/status_with.h"
#include "mongo/bson/bsonobj.h"
#include "mongo/bson/bsonobjbuilder.h"
-#include "mongo/base/status_with.h"
#include "mongo/bson/util/bson_extract.h"
#include "mongo/util/mongoutils/str.h"
diff --git a/src/mongo/s/client/shard_factory.cpp b/src/mongo/s/client/shard_factory.cpp
index cc5ca205a0f..bf71e7c62aa 100644
--- a/src/mongo/s/client/shard_factory.cpp
+++ b/src/mongo/s/client/shard_factory.cpp
@@ -34,8 +34,8 @@
#include "mongo/s/client/shard_factory.h"
#include "mongo/base/status_with.h"
-#include "mongo/client/remote_command_targeter.h"
#include "mongo/client/connection_string.h"
+#include "mongo/client/remote_command_targeter.h"
#include "mongo/stdx/memory.h"
#include "mongo/util/assert_util.h"
#include "mongo/util/log.h"
diff --git a/src/mongo/s/client/shard_factory.h b/src/mongo/s/client/shard_factory.h
index caa6743e75b..c3a6ff8d8de 100644
--- a/src/mongo/s/client/shard_factory.h
+++ b/src/mongo/s/client/shard_factory.h
@@ -28,10 +28,10 @@
#pragma once
+#include <functional>
+#include <map>
#include <memory>
#include <string>
-#include <map>
-#include <functional>
#include "mongo/base/disallow_copying.h"
diff --git a/src/mongo/s/client/shard_local_test.cpp b/src/mongo/s/client/shard_local_test.cpp
index 214202d4a7d..f6e3009f97e 100644
--- a/src/mongo/s/client/shard_local_test.cpp
+++ b/src/mongo/s/client/shard_local_test.cpp
@@ -31,8 +31,8 @@
#include "mongo/s/client/shard_local.h"
#include "mongo/client/read_preference.h"
-#include "mongo/db/client.h"
#include "mongo/db/catalog/database_holder.h"
+#include "mongo/db/client.h"
#include "mongo/db/db_raii.h"
#include "mongo/db/query/find_and_modify_request.h"
#include "mongo/db/repl/replication_coordinator_global.h"
diff --git a/src/mongo/s/client/shard_registry.cpp b/src/mongo/s/client/shard_registry.cpp
index 271346d6e86..fb9339b660c 100644
--- a/src/mongo/s/client/shard_registry.cpp
+++ b/src/mongo/s/client/shard_registry.cpp
@@ -42,8 +42,8 @@
#include "mongo/s/catalog/type_shard.h"
#include "mongo/s/client/shard.h"
#include "mongo/s/client/shard_connection.h"
-#include "mongo/s/grid.h"
#include "mongo/s/client/shard_factory.h"
+#include "mongo/s/grid.h"
#include "mongo/stdx/memory.h"
#include "mongo/stdx/mutex.h"
#include "mongo/util/log.h"
diff --git a/src/mongo/s/client/shard_registry.h b/src/mongo/s/client/shard_registry.h
index 66dc5d225ee..89502ff507a 100644
--- a/src/mongo/s/client/shard_registry.h
+++ b/src/mongo/s/client/shard_registry.h
@@ -29,10 +29,10 @@
#pragma once
#include <memory>
+#include <set>
#include <string>
#include <unordered_map>
#include <vector>
-#include <set>
#include "mongo/base/disallow_copying.h"
#include "mongo/db/jsobj.h"
diff --git a/src/mongo/s/client/shard_remote.cpp b/src/mongo/s/client/shard_remote.cpp
index ef033ff718a..9a31ee60424 100644
--- a/src/mongo/s/client/shard_remote.cpp
+++ b/src/mongo/s/client/shard_remote.cpp
@@ -42,8 +42,8 @@
#include "mongo/client/replica_set_monitor.h"
#include "mongo/db/jsobj.h"
#include "mongo/db/operation_context.h"
-#include "mongo/db/repl/read_concern_args.h"
#include "mongo/db/query/lite_parsed_query.h"
+#include "mongo/db/repl/read_concern_args.h"
#include "mongo/executor/task_executor_pool.h"
#include "mongo/rpc/get_status_from_command_result.h"
#include "mongo/rpc/metadata/repl_set_metadata.h"
diff --git a/src/mongo/s/client/sharding_network_connection_hook.cpp b/src/mongo/s/client/sharding_network_connection_hook.cpp
index 80e5b73460d..a6b7a071250 100644
--- a/src/mongo/s/client/sharding_network_connection_hook.cpp
+++ b/src/mongo/s/client/sharding_network_connection_hook.cpp
@@ -36,8 +36,8 @@
#include "mongo/executor/remote_command_request.h"
#include "mongo/executor/remote_command_response.h"
#include "mongo/rpc/get_status_from_command_result.h"
-#include "mongo/s/grid.h"
#include "mongo/s/client/shard_registry.h"
+#include "mongo/s/grid.h"
#include "mongo/s/set_shard_version_request.h"
#include "mongo/util/mongoutils/str.h"
#include "mongo/util/net/hostandport.h"
diff --git a/src/mongo/s/client/version_manager.cpp b/src/mongo/s/client/version_manager.cpp
index 5fcb8743740..c0df531ec9f 100644
--- a/src/mongo/s/client/version_manager.cpp
+++ b/src/mongo/s/client/version_manager.cpp
@@ -294,13 +294,19 @@ bool checkShardVersion(OperationContext* txn,
const ChunkVersion refVersion(refManager->getVersion(shard->getId()));
const ChunkVersion currentVersion(manager->getVersion(shard->getId()));
- string msg(str::stream()
- << "manager (" << currentVersion.toString() << " : "
- << manager->getSequenceNumber() << ") "
- << "not compatible with reference manager (" << refVersion.toString()
- << " : " << refManager->getSequenceNumber() << ") "
- << "on shard " << shard->getId() << " (" << shard->getConnString().toString()
- << ")");
+ string msg(str::stream() << "manager (" << currentVersion.toString() << " : "
+ << manager->getSequenceNumber()
+ << ") "
+ << "not compatible with reference manager ("
+ << refVersion.toString()
+ << " : "
+ << refManager->getSequenceNumber()
+ << ") "
+ << "on shard "
+ << shard->getId()
+ << " ("
+ << shard->getConnString().toString()
+ << ")");
throw SendStaleConfigException(ns, msg, refVersion, currentVersion);
}
@@ -309,9 +315,13 @@ bool checkShardVersion(OperationContext* txn,
<< ((manager.get() == 0) ? string("<none>") : str::stream()
<< manager->getSequenceNumber())
<< ") but has reference manager ("
- << refManager->getSequenceNumber() << ") "
- << "on conn " << conn->getServerAddress() << " ("
- << conn_in->getServerAddress() << ")");
+ << refManager->getSequenceNumber()
+ << ") "
+ << "on conn "
+ << conn->getServerAddress()
+ << " ("
+ << conn_in->getServerAddress()
+ << ")");
throw SendStaleConfigException(
ns, msg, refManager->getVersion(shard->getId()), ChunkVersion::UNSHARDED());
diff --git a/src/mongo/s/cluster_write.cpp b/src/mongo/s/cluster_write.cpp
index 4299dbe807c..9f999279db0 100644
--- a/src/mongo/s/cluster_write.cpp
+++ b/src/mongo/s/cluster_write.cpp
@@ -266,7 +266,8 @@ void ClusterWriter::write(OperationContext* txn,
str::stream()
<< "unable to target"
<< (request->isInsertIndexRequest() ? " index" : "")
- << " write op for collection " << request->getTargetingNS()
+ << " write op for collection "
+ << request->getTargetingNS()
<< causedBy(targetInitStatus)),
response);
return;
diff --git a/src/mongo/s/commands/cluster_count_cmd.cpp b/src/mongo/s/commands/cluster_count_cmd.cpp
index aafa05cd583..32306c3c7e2 100644
--- a/src/mongo/s/commands/cluster_count_cmd.cpp
+++ b/src/mongo/s/commands/cluster_count_cmd.cpp
@@ -31,8 +31,8 @@
#include <vector>
#include "mongo/db/commands.h"
-#include "mongo/s/commands/cluster_explain.h"
#include "mongo/s/commands/cluster_commands_common.h"
+#include "mongo/s/commands/cluster_explain.h"
#include "mongo/s/commands/strategy.h"
#include "mongo/util/timer.h"
diff --git a/src/mongo/s/commands/cluster_current_op.cpp b/src/mongo/s/commands/cluster_current_op.cpp
index 7cc9ce7e97a..68ab29cf1ad 100644
--- a/src/mongo/s/commands/cluster_current_op.cpp
+++ b/src/mongo/s/commands/cluster_current_op.cpp
@@ -30,8 +30,8 @@
#include "mongo/platform/basic.h"
-#include <vector>
#include <tuple>
+#include <vector>
#include "mongo/client/connpool.h"
#include "mongo/db/auth/action_type.h"
@@ -128,8 +128,10 @@ public:
if (fieldName == kOpIdFieldName) {
uassert(28630,
str::stream() << "expected numeric opid from currentOp response"
- << " from shard " << shardName
- << ", got: " << shardOpElement,
+ << " from shard "
+ << shardName
+ << ", got: "
+ << shardOpElement,
shardOpElement.isNumber());
modifiedShardOpBob.append(kOpIdFieldName,
diff --git a/src/mongo/s/commands/cluster_explain.cpp b/src/mongo/s/commands/cluster_explain.cpp
index acd915981ef..36fb17a0e61 100644
--- a/src/mongo/s/commands/cluster_explain.cpp
+++ b/src/mongo/s/commands/cluster_explain.cpp
@@ -150,14 +150,16 @@ Status ClusterExplain::validateShardResults(const vector<Strategy::CommandResult
return Status(error,
str::stream() << "Explain command on shard "
<< shardResults[i].target.toString()
- << " failed, caused by: " << shardResults[i].result);
+ << " failed, caused by: "
+ << shardResults[i].result);
}
if (Object != shardResults[i].result["queryPlanner"].type()) {
return Status(ErrorCodes::OperationFailed,
str::stream() << "Explain command on shard "
<< shardResults[i].target.toString()
- << " failed, caused by: " << shardResults[i].result);
+ << " failed, caused by: "
+ << shardResults[i].result);
}
if (shardResults[i].result.hasField("executionStats")) {
diff --git a/src/mongo/s/commands/cluster_find_and_modify_cmd.cpp b/src/mongo/s/commands/cluster_find_and_modify_cmd.cpp
index c4c185e116e..196b0f5f722 100644
--- a/src/mongo/s/commands/cluster_find_and_modify_cmd.cpp
+++ b/src/mongo/s/commands/cluster_find_and_modify_cmd.cpp
@@ -36,18 +36,18 @@
#include "mongo/db/commands.h"
#include "mongo/db/commands/find_and_modify.h"
#include "mongo/s/balancer/balancer_configuration.h"
+#include "mongo/s/catalog/catalog_cache.h"
+#include "mongo/s/chunk_manager.h"
#include "mongo/s/client/shard_connection.h"
#include "mongo/s/client/shard_registry.h"
-#include "mongo/s/catalog/catalog_cache.h"
+#include "mongo/s/commands/cluster_explain.h"
#include "mongo/s/commands/sharded_command_processing.h"
+#include "mongo/s/commands/strategy.h"
#include "mongo/s/config.h"
-#include "mongo/s/chunk_manager.h"
-#include "mongo/s/commands/cluster_explain.h"
#include "mongo/s/grid.h"
#include "mongo/s/mongos_options.h"
#include "mongo/s/sharding_raii.h"
#include "mongo/s/stale_exception.h"
-#include "mongo/s/commands/strategy.h"
#include "mongo/util/timer.h"
namespace mongo {
diff --git a/src/mongo/s/commands/cluster_find_cmd.cpp b/src/mongo/s/commands/cluster_find_cmd.cpp
index cc7323e28a7..2ff26bb1707 100644
--- a/src/mongo/s/commands/cluster_find_cmd.cpp
+++ b/src/mongo/s/commands/cluster_find_cmd.cpp
@@ -36,8 +36,8 @@
#include "mongo/db/matcher/extensions_callback_noop.h"
#include "mongo/db/query/cursor_response.h"
#include "mongo/db/stats/counters.h"
-#include "mongo/s/query/cluster_find.h"
#include "mongo/s/commands/strategy.h"
+#include "mongo/s/query/cluster_find.h"
namespace mongo {
namespace {
diff --git a/src/mongo/s/commands/cluster_get_last_error_cmd.cpp b/src/mongo/s/commands/cluster_get_last_error_cmd.cpp
index 49c4599f121..78d7e892391 100644
--- a/src/mongo/s/commands/cluster_get_last_error_cmd.cpp
+++ b/src/mongo/s/commands/cluster_get_last_error_cmd.cpp
@@ -30,11 +30,11 @@
#include <vector>
+#include "mongo/client/remote_command_targeter.h"
#include "mongo/db/client.h"
#include "mongo/db/commands.h"
#include "mongo/db/lasterror.h"
#include "mongo/s/client/dbclient_multi_command.h"
-#include "mongo/client/remote_command_targeter.h"
#include "mongo/s/client/shard_registry.h"
#include "mongo/s/cluster_last_error_info.h"
#include "mongo/s/grid.h"
diff --git a/src/mongo/s/commands/cluster_getmore_cmd.cpp b/src/mongo/s/commands/cluster_getmore_cmd.cpp
index 3af7d0bbd84..270100a2b47 100644
--- a/src/mongo/s/commands/cluster_getmore_cmd.cpp
+++ b/src/mongo/s/commands/cluster_getmore_cmd.cpp
@@ -87,8 +87,8 @@ public:
}
const GetMoreRequest& request = parseStatus.getValue();
- return AuthorizationSession::get(client)
- ->checkAuthForGetMore(request.nss, request.cursorid, request.term.is_initialized());
+ return AuthorizationSession::get(client)->checkAuthForGetMore(
+ request.nss, request.cursorid, request.term.is_initialized());
}
bool run(OperationContext* txn,
diff --git a/src/mongo/s/commands/cluster_kill_op.cpp b/src/mongo/s/commands/cluster_kill_op.cpp
index 31590d3c5a9..1e626883ff9 100644
--- a/src/mongo/s/commands/cluster_kill_op.cpp
+++ b/src/mongo/s/commands/cluster_kill_op.cpp
@@ -91,7 +91,9 @@ public:
uassert(28625,
str::stream() << "The op argument to killOp must be of the format shardid:opid"
- << " but found \"" << opToKill << '"',
+ << " but found \""
+ << opToKill
+ << '"',
(opToKill.size() >= 3) && // must have at least N:N
(opSepPos != std::string::npos) && // must have ':' as separator
(opSepPos != 0) && // can't be :NN
diff --git a/src/mongo/s/commands/cluster_map_reduce_cmd.cpp b/src/mongo/s/commands/cluster_map_reduce_cmd.cpp
index 5cc29ea502f..352d5969927 100644
--- a/src/mongo/s/commands/cluster_map_reduce_cmd.cpp
+++ b/src/mongo/s/commands/cluster_map_reduce_cmd.cpp
@@ -42,15 +42,15 @@
#include "mongo/s/balancer/balancer_configuration.h"
#include "mongo/s/catalog/catalog_cache.h"
#include "mongo/s/catalog/catalog_manager.h"
-#include "mongo/s/client/shard_connection.h"
+#include "mongo/s/catalog/dist_lock_manager.h"
#include "mongo/s/chunk_manager.h"
+#include "mongo/s/client/shard_connection.h"
+#include "mongo/s/client/shard_registry.h"
#include "mongo/s/commands/cluster_commands_common.h"
#include "mongo/s/commands/sharded_command_processing.h"
+#include "mongo/s/commands/strategy.h"
#include "mongo/s/config.h"
-#include "mongo/s/catalog/dist_lock_manager.h"
-#include "mongo/s/client/shard_registry.h"
#include "mongo/s/grid.h"
-#include "mongo/s/commands/strategy.h"
#include "mongo/s/sharding_raii.h"
#include "mongo/stdx/chrono.h"
#include "mongo/util/log.h"
@@ -319,7 +319,9 @@ public:
Strategy::commandOp(txn, dbname, shardedCommand, 0, nss.ns(), q, &mrCommandResults);
} catch (DBException& e) {
e.addContext(str::stream() << "could not run map command on all shards for ns "
- << nss.ns() << " and query " << q);
+ << nss.ns()
+ << " and query "
+ << q);
throw;
}
@@ -341,8 +343,8 @@ public:
if (!ok) {
// At this point we will return
- errmsg = str::stream()
- << "MR parallel processing failed: " << singleResult.toString();
+ errmsg = str::stream() << "MR parallel processing failed: "
+ << singleResult.toString();
continue;
}
@@ -503,7 +505,9 @@ public:
ok = true;
} catch (DBException& e) {
e.addContext(str::stream() << "could not run final reduce on all shards for "
- << nss.ns() << ", output " << outputCollNss.ns());
+ << nss.ns()
+ << ", output "
+ << outputCollNss.ns());
throw;
}
diff --git a/src/mongo/s/commands/cluster_move_chunk_cmd.cpp b/src/mongo/s/commands/cluster_move_chunk_cmd.cpp
index ca08547f300..5afec743ab3 100644
--- a/src/mongo/s/commands/cluster_move_chunk_cmd.cpp
+++ b/src/mongo/s/commands/cluster_move_chunk_cmd.cpp
@@ -141,7 +141,8 @@ public:
const auto to = grid.shardRegistry()->getShard(txn, toString);
if (!to) {
string msg(str::stream() << "Could not move chunk in '" << nss.ns() << "' to shard '"
- << toString << "' because that shard does not exist");
+ << toString
+ << "' because that shard does not exist");
log() << msg;
return appendCommandStatus(result, Status(ErrorCodes::ShardNotFound, msg));
}
diff --git a/src/mongo/s/commands/cluster_move_primary_cmd.cpp b/src/mongo/s/commands/cluster_move_primary_cmd.cpp
index 5be8441a951..f5ba8b9fb93 100644
--- a/src/mongo/s/commands/cluster_move_primary_cmd.cpp
+++ b/src/mongo/s/commands/cluster_move_primary_cmd.cpp
@@ -133,7 +133,8 @@ public:
shared_ptr<Shard> toShard = grid.shardRegistry()->getShard(txn, to);
if (!toShard) {
string msg(str::stream() << "Could not move database '" << dbname << "' to shard '"
- << to << "' because the shard does not exist");
+ << to
+ << "' because the shard does not exist");
log() << msg;
return appendCommandStatus(result, Status(ErrorCodes::ShardNotFound, msg));
}
@@ -192,8 +193,11 @@ public:
bool worked = toconn->runCommand(
dbname.c_str(),
BSON("clone" << fromShard->getConnString().toString() << "collsToIgnore" << barr.arr()
- << bypassDocumentValidationCommandOption() << true
- << "_checkForCatalogChange" << true << "writeConcern"
+ << bypassDocumentValidationCommandOption()
+ << true
+ << "_checkForCatalogChange"
+ << true
+ << "writeConcern"
<< txn->getWriteConcern().toBSON()),
cloneRes);
toconn.done();
@@ -233,7 +237,8 @@ public:
}
} catch (DBException& e) {
e.addContext(str::stream() << "movePrimary could not drop the database " << dbname
- << " on " << oldPrimary);
+ << " on "
+ << oldPrimary);
throw;
}
@@ -268,7 +273,9 @@ public:
} catch (DBException& e) {
e.addContext(str::stream()
<< "movePrimary could not drop the cloned collection "
- << el.String() << " on " << oldPrimary);
+ << el.String()
+ << " on "
+ << oldPrimary);
throw;
}
}
diff --git a/src/mongo/s/commands/cluster_pipeline_cmd.cpp b/src/mongo/s/commands/cluster_pipeline_cmd.cpp
index dc332571d23..e4e3dde7fbf 100644
--- a/src/mongo/s/commands/cluster_pipeline_cmd.cpp
+++ b/src/mongo/s/commands/cluster_pipeline_cmd.cpp
@@ -303,7 +303,8 @@ std::vector<DocumentSourceMergeCursors::CursorDescriptor> PipelineCommand::parse
invariant(errCode == result["code"].numberInt() || errCode == 17022);
uasserted(errCode,
str::stream() << "sharded pipeline failed on shard "
- << shardResults[i].shardTargetId << ": "
+ << shardResults[i].shardTargetId
+ << ": "
<< result.toString());
}
@@ -321,7 +322,8 @@ std::vector<DocumentSourceMergeCursors::CursorDescriptor> PipelineCommand::parse
massert(17025,
str::stream() << "shard " << shardResults[i].shardTargetId
- << " returned invalid ns: " << cursor["ns"],
+ << " returned invalid ns: "
+ << cursor["ns"],
NamespaceString(cursor["ns"].String()).isValid());
cursors.emplace_back(
@@ -340,8 +342,8 @@ void PipelineCommand::uassertAllShardsSupportExplain(
const vector<Strategy::CommandResult>& shardResults) {
for (size_t i = 0; i < shardResults.size(); i++) {
uassert(17403,
- str::stream() << "Shard " << shardResults[i].target.toString()
- << " failed: " << shardResults[i].result,
+ str::stream() << "Shard " << shardResults[i].target.toString() << " failed: "
+ << shardResults[i].result,
shardResults[i].result["ok"].trueValue());
uassert(17404,
@@ -400,10 +402,10 @@ BSONObj PipelineCommand::aggRunCommand(DBClientBase* conn,
0, // nToSkip
NULL, // fieldsToReturn
queryOptions);
- massert(
- 17014,
- str::stream() << "aggregate command didn't return results on host: " << conn->toString(),
- cursor && cursor->more());
+ massert(17014,
+ str::stream() << "aggregate command didn't return results on host: "
+ << conn->toString(),
+ cursor && cursor->more());
BSONObj result = cursor->nextSafe().getOwned();
diff --git a/src/mongo/s/commands/cluster_plan_cache_cmd.cpp b/src/mongo/s/commands/cluster_plan_cache_cmd.cpp
index 2ae15e1610f..32d87ac39bf 100644
--- a/src/mongo/s/commands/cluster_plan_cache_cmd.cpp
+++ b/src/mongo/s/commands/cluster_plan_cache_cmd.cpp
@@ -26,15 +26,15 @@
* it in the license file.
*/
-#include "mongo/base/init.h"
#include "mongo/base/error_codes.h"
+#include "mongo/base/init.h"
#include "mongo/db/auth/authorization_session.h"
#include "mongo/db/client_basic.h"
#include "mongo/db/commands.h"
+#include "mongo/s/commands/strategy.h"
#include "mongo/s/config.h"
#include "mongo/s/grid.h"
#include "mongo/s/stale_exception.h"
-#include "mongo/s/commands/strategy.h"
namespace mongo {
diff --git a/src/mongo/s/commands/cluster_remove_shard_cmd.cpp b/src/mongo/s/commands/cluster_remove_shard_cmd.cpp
index c2aef2fd0e6..4443ad6f2e5 100644
--- a/src/mongo/s/commands/cluster_remove_shard_cmd.cpp
+++ b/src/mongo/s/commands/cluster_remove_shard_cmd.cpp
@@ -38,9 +38,9 @@
#include "mongo/db/operation_context.h"
#include "mongo/s/catalog/catalog_manager.h"
#include "mongo/s/catalog/type_chunk.h"
+#include "mongo/s/client/shard.h"
#include "mongo/s/client/shard_registry.h"
#include "mongo/s/grid.h"
-#include "mongo/s/client/shard.h"
#include "mongo/util/log.h"
namespace mongo {
diff --git a/src/mongo/s/commands/cluster_split_cmd.cpp b/src/mongo/s/commands/cluster_split_cmd.cpp
index 11df2bbe8ab..49e33189ba2 100644
--- a/src/mongo/s/commands/cluster_split_cmd.cpp
+++ b/src/mongo/s/commands/cluster_split_cmd.cpp
@@ -41,8 +41,8 @@
#include "mongo/db/commands.h"
#include "mongo/db/field_parser.h"
#include "mongo/s/catalog/catalog_cache.h"
-#include "mongo/s/client/shard_connection.h"
#include "mongo/s/chunk_manager.h"
+#include "mongo/s/client/shard_connection.h"
#include "mongo/s/config.h"
#include "mongo/s/grid.h"
#include "mongo/s/shard_util.h"
diff --git a/src/mongo/s/commands/cluster_user_management_commands.cpp b/src/mongo/s/commands/cluster_user_management_commands.cpp
index 02a87dfc9d2..a282cd57d1d 100644
--- a/src/mongo/s/commands/cluster_user_management_commands.cpp
+++ b/src/mongo/s/commands/cluster_user_management_commands.cpp
@@ -34,11 +34,11 @@
#include "mongo/base/status.h"
#include "mongo/bson/mutable/document.h"
+#include "mongo/client/dbclientinterface.h"
+#include "mongo/config.h"
#include "mongo/db/auth/authorization_manager.h"
#include "mongo/db/auth/authorization_manager_global.h"
#include "mongo/db/auth/user_management_commands_parser.h"
-#include "mongo/client/dbclientinterface.h"
-#include "mongo/config.h"
#include "mongo/db/commands.h"
#include "mongo/db/jsobj.h"
#include "mongo/rpc/write_concern_error_detail.h"
@@ -85,8 +85,8 @@ public:
int options,
string& errmsg,
BSONObjBuilder& result) {
- return grid.catalogManager(txn)
- ->runUserManagementWriteCommand(txn, getName(), dbname, cmdObj, &result);
+ return grid.catalogManager(txn)->runUserManagementWriteCommand(
+ txn, getName(), dbname, cmdObj, &result);
}
virtual void redactForLogging(mutablebson::Document* cmdObj) {
@@ -395,8 +395,8 @@ public:
int options,
string& errmsg,
BSONObjBuilder& result) {
- return grid.catalogManager(txn)
- ->runUserManagementWriteCommand(txn, getName(), dbname, cmdObj, &result);
+ return grid.catalogManager(txn)->runUserManagementWriteCommand(
+ txn, getName(), dbname, cmdObj, &result);
}
} cmdCreateRole;
@@ -817,8 +817,8 @@ public:
int options,
string& errmsg,
BSONObjBuilder& result) {
- return grid.catalogManager(txn)
- ->runUserManagementWriteCommand(txn, getName(), dbname, cmdObj, &result);
+ return grid.catalogManager(txn)->runUserManagementWriteCommand(
+ txn, getName(), dbname, cmdObj, &result);
}
} cmdMergeAuthzCollections;
@@ -916,8 +916,8 @@ public:
string& errmsg,
BSONObjBuilder& result) {
// Run the authSchemaUpgrade command on the config servers
- if (!grid.catalogManager(txn)
- ->runUserManagementWriteCommand(txn, getName(), dbname, cmdObj, &result)) {
+ if (!grid.catalogManager(txn)->runUserManagementWriteCommand(
+ txn, getName(), dbname, cmdObj, &result)) {
return false;
}
diff --git a/src/mongo/s/commands/commands_public.cpp b/src/mongo/s/commands/commands_public.cpp
index 8c7918cc49f..a2215e527a4 100644
--- a/src/mongo/s/commands/commands_public.cpp
+++ b/src/mongo/s/commands/commands_public.cpp
@@ -995,7 +995,9 @@ public:
if (!status.isOK()) {
return Status(status.getStatus().code(),
str::stream() << "Passthrough command failed: " << command.toString()
- << " on ns " << nss.ns() << ". Caused by "
+ << " on ns "
+ << nss.ns()
+ << ". Caused by "
<< causedBy(status.getStatus()));
}
@@ -1003,7 +1005,8 @@ public:
if (conf->isSharded(nss.ns())) {
return Status(ErrorCodes::IllegalOperation,
str::stream() << "Passthrough command failed: " << command.toString()
- << " on ns " << nss.ns()
+ << " on ns "
+ << nss.ns()
<< ". Cannot run on sharded namespace.");
}
@@ -1018,8 +1021,10 @@ public:
conn.done();
return Status(ErrorCodes::OperationFailed,
str::stream() << "Passthrough command failed: " << command
- << " on ns " << nss.ns()
- << "; result: " << shardResult);
+ << " on ns "
+ << nss.ns()
+ << "; result: "
+ << shardResult);
}
conn.done();
} catch (const DBException& ex) {
@@ -1175,8 +1180,10 @@ public:
} else if (queryElt.type() != BSONType::jstNULL) {
return Status(ErrorCodes::TypeMismatch,
str::stream() << "\"query\" had the wrong type. Expected "
- << typeName(BSONType::Object) << " or "
- << typeName(BSONType::jstNULL) << ", found "
+ << typeName(BSONType::Object)
+ << " or "
+ << typeName(BSONType::jstNULL)
+ << ", found "
<< typeName(queryElt.type()));
}
}
@@ -1612,8 +1619,8 @@ public:
}
return Status(ErrorCodes::Unauthorized,
- str::stream()
- << "Not authorized to list indexes on collection: " << ns.coll());
+ str::stream() << "Not authorized to list indexes on collection: "
+ << ns.coll());
}
virtual bool supportsWriteConcern(const BSONObj& cmd) const override {
diff --git a/src/mongo/s/commands/request.cpp b/src/mongo/s/commands/request.cpp
index dff8a8472b6..050483d2969 100644
--- a/src/mongo/s/commands/request.cpp
+++ b/src/mongo/s/commands/request.cpp
@@ -40,8 +40,8 @@
#include "mongo/db/lasterror.h"
#include "mongo/db/stats/counters.h"
#include "mongo/s/cluster_last_error_info.h"
-#include "mongo/s/grid.h"
#include "mongo/s/commands/strategy.h"
+#include "mongo/s/grid.h"
#include "mongo/util/log.h"
#include "mongo/util/timer.h"
diff --git a/src/mongo/s/commands/strategy.cpp b/src/mongo/s/commands/strategy.cpp
index f2a670ab4ce..214b0568849 100644
--- a/src/mongo/s/commands/strategy.cpp
+++ b/src/mongo/s/commands/strategy.cpp
@@ -35,8 +35,8 @@
#include "mongo/base/data_cursor.h"
#include "mongo/base/owned_pointer_vector.h"
#include "mongo/base/status.h"
-#include "mongo/bson/util/builder.h"
#include "mongo/bson/util/bson_extract.h"
+#include "mongo/bson/util/builder.h"
#include "mongo/client/connpool.h"
#include "mongo/client/dbclientcursor.h"
#include "mongo/client/parallel.h"
@@ -44,22 +44,22 @@
#include "mongo/db/auth/action_type.h"
#include "mongo/db/auth/authorization_session.h"
#include "mongo/db/commands.h"
-#include "mongo/db/max_time.h"
-#include "mongo/db/server_parameters.h"
#include "mongo/db/matcher/extensions_callback_noop.h"
+#include "mongo/db/max_time.h"
#include "mongo/db/namespace_string.h"
#include "mongo/db/query/find_common.h"
-#include "mongo/db/query/lite_parsed_query.h"
#include "mongo/db/query/getmore_request.h"
+#include "mongo/db/query/lite_parsed_query.h"
+#include "mongo/db/server_parameters.h"
#include "mongo/db/stats/counters.h"
#include "mongo/rpc/metadata/server_selection_metadata.h"
#include "mongo/s/bson_serializable.h"
#include "mongo/s/catalog/catalog_cache.h"
+#include "mongo/s/chunk_manager.h"
+#include "mongo/s/chunk_version.h"
#include "mongo/s/client/shard_connection.h"
#include "mongo/s/client/shard_registry.h"
#include "mongo/s/client/version_manager.h"
-#include "mongo/s/chunk_manager.h"
-#include "mongo/s/chunk_version.h"
#include "mongo/s/commands/cluster_explain.h"
#include "mongo/s/commands/request.h"
#include "mongo/s/config.h"
@@ -67,8 +67,8 @@
#include "mongo/s/query/cluster_cursor_manager.h"
#include "mongo/s/query/cluster_find.h"
#include "mongo/s/stale_exception.h"
-#include "mongo/s/write_ops/batched_command_request.h"
#include "mongo/s/write_ops/batch_upconvert.h"
+#include "mongo/s/write_ops/batched_command_request.h"
#include "mongo/util/log.h"
#include "mongo/util/mongoutils/str.h"
#include "mongo/util/timer.h"
@@ -413,7 +413,9 @@ void Strategy::killCursors(OperationContext* txn, Request& request) {
const int numCursors = dbMessage.pullInt();
massert(34425,
str::stream() << "Invalid killCursors message. numCursors: " << numCursors
- << ", message size: " << dbMessage.msg().dataSize() << ".",
+ << ", message size: "
+ << dbMessage.msg().dataSize()
+ << ".",
dbMessage.msg().dataSize() == 8 + (8 * numCursors));
uassert(28794,
str::stream() << "numCursors must be between 1 and 29999. numCursors: " << numCursors
diff --git a/src/mongo/s/commands/strategy.h b/src/mongo/s/commands/strategy.h
index 630373b71a1..ebd9e36c520 100644
--- a/src/mongo/s/commands/strategy.h
+++ b/src/mongo/s/commands/strategy.h
@@ -30,8 +30,8 @@
#include <atomic>
-#include "mongo/db/query/explain_common.h"
#include "mongo/client/connection_string.h"
+#include "mongo/db/query/explain_common.h"
#include "mongo/s/client/shard.h"
namespace mongo {
diff --git a/src/mongo/s/config.cpp b/src/mongo/s/config.cpp
index 8952de421a9..14abfd14398 100644
--- a/src/mongo/s/config.cpp
+++ b/src/mongo/s/config.cpp
@@ -40,8 +40,8 @@
#include "mongo/s/balancer/balancer_configuration.h"
#include "mongo/s/catalog/catalog_cache.h"
#include "mongo/s/catalog/catalog_manager.h"
-#include "mongo/s/catalog/type_collection.h"
#include "mongo/s/catalog/type_chunk.h"
+#include "mongo/s/catalog/type_collection.h"
#include "mongo/s/catalog/type_database.h"
#include "mongo/s/catalog/type_shard.h"
#include "mongo/s/chunk_manager.h"
@@ -462,8 +462,8 @@ bool DBConfig::_loadIfNeeded(OperationContext* txn, Counter reloadIteration) {
// Load all collections
vector<CollectionType> collections;
repl::OpTime configOpTimeWhenLoadingColl;
- uassertStatusOK(grid.catalogManager(txn)
- ->getCollections(txn, &_name, &collections, &configOpTimeWhenLoadingColl));
+ uassertStatusOK(grid.catalogManager(txn)->getCollections(
+ txn, &_name, &collections, &configOpTimeWhenLoadingColl));
int numCollsErased = 0;
int numCollsSharded = 0;
diff --git a/src/mongo/s/grid.cpp b/src/mongo/s/grid.cpp
index 96f42340229..0f102def839 100644
--- a/src/mongo/s/grid.cpp
+++ b/src/mongo/s/grid.cpp
@@ -38,8 +38,8 @@
#include "mongo/s/balancer/balancer_configuration.h"
#include "mongo/s/catalog/catalog_cache.h"
#include "mongo/s/catalog/catalog_manager.h"
-#include "mongo/s/client/shard_registry.h"
#include "mongo/s/client/shard_factory.h"
+#include "mongo/s/client/shard_registry.h"
#include "mongo/s/query/cluster_cursor_manager.h"
#include "mongo/stdx/memory.h"
#include "mongo/util/log.h"
diff --git a/src/mongo/s/mongos_options.cpp b/src/mongo/s/mongos_options.cpp
index 34c8ea62029..e01b19e2d5e 100644
--- a/src/mongo/s/mongos_options.cpp
+++ b/src/mongo/s/mongos_options.cpp
@@ -99,14 +99,15 @@ Status addMongosOptions(moe::OptionSection* options) {
sharding_options.addOptionChaining(
"sharding.chunkSize", "chunkSize", moe::Int, "maximum amount of data per chunk");
- sharding_options.addOptionChaining("net.http.JSONPEnabled",
- "jsonp",
- moe::Switch,
- "allow JSONP access via http (has security implications)")
+ sharding_options
+ .addOptionChaining("net.http.JSONPEnabled",
+ "jsonp",
+ moe::Switch,
+ "allow JSONP access via http (has security implications)")
.setSources(moe::SourceAllLegacy);
- sharding_options.addOptionChaining(
- "noscripting", "noscripting", moe::Switch, "disable scripting engine")
+ sharding_options
+ .addOptionChaining("noscripting", "noscripting", moe::Switch, "disable scripting engine")
.setSources(moe::SourceAllLegacy);
@@ -122,15 +123,14 @@ Status addMongosOptions(moe::OptionSection* options) {
options->addSection(ssl_options);
#endif
- options->addOptionChaining("noAutoSplit",
- "noAutoSplit",
- moe::Switch,
- "do not send split commands with writes")
+ options
+ ->addOptionChaining(
+ "noAutoSplit", "noAutoSplit", moe::Switch, "do not send split commands with writes")
.hidden()
.setSources(moe::SourceAllLegacy);
- options->addOptionChaining(
- "sharding.autoSplit", "", moe::Bool, "send split commands with writes")
+ options
+ ->addOptionChaining("sharding.autoSplit", "", moe::Bool, "send split commands with writes")
.setSources(moe::SourceYAMLConfig);
diff --git a/src/mongo/s/mongos_options_init.cpp b/src/mongo/s/mongos_options_init.cpp
index d26bdb039f7..5a091d16c23 100644
--- a/src/mongo/s/mongos_options_init.cpp
+++ b/src/mongo/s/mongos_options_init.cpp
@@ -30,9 +30,9 @@
#include <iostream>
+#include "mongo/util/exit_code.h"
#include "mongo/util/options_parser/startup_option_init.h"
#include "mongo/util/options_parser/startup_options.h"
-#include "mongo/util/exit_code.h"
#include "mongo/util/quick_exit.h"
namespace mongo {
diff --git a/src/mongo/s/ns_targeter.h b/src/mongo/s/ns_targeter.h
index 56dfefc290b..23bb6656f52 100644
--- a/src/mongo/s/ns_targeter.h
+++ b/src/mongo/s/ns_targeter.h
@@ -30,13 +30,13 @@
#include <string>
-#include "mongo/bson/bsonobj.h"
#include "mongo/base/status.h"
+#include "mongo/bson/bsonobj.h"
#include "mongo/client/dbclientinterface.h"
#include "mongo/db/namespace_string.h"
#include "mongo/s/chunk_version.h"
-#include "mongo/s/write_ops/batched_update_document.h"
#include "mongo/s/write_ops/batched_delete_document.h"
+#include "mongo/s/write_ops/batched_update_document.h"
namespace mongo {
diff --git a/src/mongo/s/query/async_results_merger.cpp b/src/mongo/s/query/async_results_merger.cpp
index 40ab9d8e427..233631bd3e7 100644
--- a/src/mongo/s/query/async_results_merger.cpp
+++ b/src/mongo/s/query/async_results_merger.cpp
@@ -276,7 +276,8 @@ Status AsyncResultsMerger::askForNextBatch_inlock(size_t remoteIndex) {
adjustedBatchSize,
_awaitDataTimeout,
boost::none,
- boost::none).toBSON();
+ boost::none)
+ .toBSON();
} else {
// Do the first time shard host resolution.
invariant(_params.readPreference);
@@ -429,10 +430,10 @@ void AsyncResultsMerger::handleBatchResponse(
if (!cursorResponseStatus.isOK()) {
auto shard = remote.getShard();
if (!shard) {
- remote.status =
- Status(cursorResponseStatus.getStatus().code(),
- str::stream() << "Could not find shard " << *remote.shardId
- << " containing host " << remote.getTargetHost().toString());
+ remote.status = Status(cursorResponseStatus.getStatus().code(),
+ str::stream() << "Could not find shard " << *remote.shardId
+ << " containing host "
+ << remote.getTargetHost().toString());
} else {
shard->updateReplSetMonitor(remote.getTargetHost(), cursorResponseStatus.getStatus());
@@ -443,7 +444,8 @@ void AsyncResultsMerger::handleBatchResponse(
Shard::RetryPolicy::kIdempotent)) {
invariant(remote.shardId);
LOG(1) << "Initial cursor establishment failed with retriable error and will be "
- "retried" << causedBy(cursorResponseStatus.getStatus());
+ "retried"
+ << causedBy(cursorResponseStatus.getStatus());
++remote.retryCount;
@@ -489,7 +491,8 @@ void AsyncResultsMerger::handleBatchResponse(
remote.status = Status(ErrorCodes::InternalError,
str::stream() << "Missing field '"
<< ClusterClientCursorParams::kSortKeyField
- << "' in document: " << obj);
+ << "' in document: "
+ << obj);
return;
}
diff --git a/src/mongo/s/query/async_results_merger_test.cpp b/src/mongo/s/query/async_results_merger_test.cpp
index 01ebefe3a78..7159d6c0271 100644
--- a/src/mongo/s/query/async_results_merger_test.cpp
+++ b/src/mongo/s/query/async_results_merger_test.cpp
@@ -873,7 +873,8 @@ TEST_F(AsyncResultsMergerTest, KillTwoOutstandingBatches) {
// command against this id.
BSONObj expectedCmdObj = BSON("killCursors"
<< "testcoll"
- << "cursors" << BSON_ARRAY(CursorId(123)));
+ << "cursors"
+ << BSON_ARRAY(CursorId(123)));
ASSERT_EQ(getFirstPendingRequest().cmdObj, expectedCmdObj);
// Ensure that we properly signal both those waiting for the kill, and those waiting for more
@@ -919,7 +920,8 @@ TEST_F(AsyncResultsMergerTest, KillOutstandingGetMore) {
// scheduled.
BSONObj expectedCmdObj = BSON("killCursors"
<< "testcoll"
- << "cursors" << BSON_ARRAY(CursorId(123)));
+ << "cursors"
+ << BSON_ARRAY(CursorId(123)));
ASSERT_EQ(getFirstPendingRequest().cmdObj, expectedCmdObj);
// Ensure that we properly signal both those waiting for the kill, and those waiting for more
@@ -1331,7 +1333,8 @@ TEST_F(AsyncResultsMergerTest, GetMoreRequestIncludesMaxTimeMS) {
// Pending getMore request should include maxTimeMS.
BSONObj expectedCmdObj = BSON("getMore" << CursorId(123) << "collection"
<< "testcoll"
- << "maxTimeMS" << 789);
+ << "maxTimeMS"
+ << 789);
ASSERT_EQ(getFirstPendingRequest().cmdObj, expectedCmdObj);
responses.clear();
diff --git a/src/mongo/s/query/cluster_cursor_manager.cpp b/src/mongo/s/query/cluster_cursor_manager.cpp
index 22a3e3bd20e..3c84fa7fa54 100644
--- a/src/mongo/s/query/cluster_cursor_manager.cpp
+++ b/src/mongo/s/query/cluster_cursor_manager.cpp
@@ -54,8 +54,9 @@ Status cursorNotFoundStatus(const NamespaceString& nss, CursorId cursorId) {
Status cursorInUseStatus(const NamespaceString& nss, CursorId cursorId) {
return {ErrorCodes::CursorInUse,
- str::stream() << "Cursor already in use (namespace: '" << nss.ns()
- << "', id: " << cursorId << ")."};
+ str::stream() << "Cursor already in use (namespace: '" << nss.ns() << "', id: "
+ << cursorId
+ << ")."};
}
//
diff --git a/src/mongo/s/query/cluster_cursor_manager_test.cpp b/src/mongo/s/query/cluster_cursor_manager_test.cpp
index 1c32aa33ec2..0edda882977 100644
--- a/src/mongo/s/query/cluster_cursor_manager_test.cpp
+++ b/src/mongo/s/query/cluster_cursor_manager_test.cpp
@@ -34,8 +34,8 @@
#include "mongo/s/query/cluster_client_cursor_mock.h"
#include "mongo/stdx/memory.h"
-#include "mongo/util/clock_source_mock.h"
#include "mongo/unittest/unittest.h"
+#include "mongo/util/clock_source_mock.h"
namespace mongo {
diff --git a/src/mongo/s/query/cluster_find.cpp b/src/mongo/s/query/cluster_find.cpp
index 99398e9ce1b..18f44d9e510 100644
--- a/src/mongo/s/query/cluster_find.cpp
+++ b/src/mongo/s/query/cluster_find.cpp
@@ -87,7 +87,9 @@ StatusWith<std::unique_ptr<LiteParsedQuery>> transformQueryForShards(const LiteP
ErrorCodes::Overflow,
str::stream()
<< "sum of limit and skip cannot be represented as a 64-bit integer, limit: "
- << *lpq.getLimit() << ", skip: " << lpq.getSkip().value_or(0));
+ << *lpq.getLimit()
+ << ", skip: "
+ << lpq.getSkip().value_or(0));
}
newLimit = newLimitValue;
}
@@ -103,8 +105,10 @@ StatusWith<std::unique_ptr<LiteParsedQuery>> transformQueryForShards(const LiteP
return Status(ErrorCodes::Overflow,
str::stream()
<< "sum of ntoreturn and skip cannot be represented as a 64-bit "
- "integer, ntoreturn: " << *lpq.getNToReturn()
- << ", skip: " << lpq.getSkip().value_or(0));
+ "integer, ntoreturn: "
+ << *lpq.getNToReturn()
+ << ", skip: "
+ << lpq.getSkip().value_or(0));
}
newLimit = newLimitValue;
} else {
@@ -114,8 +118,10 @@ StatusWith<std::unique_ptr<LiteParsedQuery>> transformQueryForShards(const LiteP
return Status(ErrorCodes::Overflow,
str::stream()
<< "sum of ntoreturn and skip cannot be represented as a 64-bit "
- "integer, ntoreturn: " << *lpq.getNToReturn()
- << ", skip: " << lpq.getSkip().value_or(0));
+ "integer, ntoreturn: "
+ << *lpq.getNToReturn()
+ << ", skip: "
+ << lpq.getSkip().value_or(0));
}
newNToReturn = newNToReturnValue;
}
@@ -288,7 +294,8 @@ StatusWith<CursorId> ClusterFind::runQuery(OperationContext* txn,
return {ErrorCodes::BadValue,
str::stream() << "Projection contains illegal field '"
<< ClusterClientCursorParams::kSortKeyField
- << "': " << query.getParsed().getProj()};
+ << "': "
+ << query.getParsed().getProj()};
}
auto dbConfig = grid.catalogCache()->getDatabase(txn, query.nss().db().toString());
diff --git a/src/mongo/s/query/router_stage_merge.h b/src/mongo/s/query/router_stage_merge.h
index a146c66f346..d74870f8a94 100644
--- a/src/mongo/s/query/router_stage_merge.h
+++ b/src/mongo/s/query/router_stage_merge.h
@@ -29,9 +29,9 @@
#pragma once
#include "mongo/executor/task_executor.h"
-#include "mongo/s/query/router_exec_stage.h"
-#include "mongo/s/query/cluster_client_cursor_params.h"
#include "mongo/s/query/async_results_merger.h"
+#include "mongo/s/query/cluster_client_cursor_params.h"
+#include "mongo/s/query/router_exec_stage.h"
#include "mongo/util/net/hostandport.h"
namespace mongo {
diff --git a/src/mongo/s/request_types/add_shard_request_test.cpp b/src/mongo/s/request_types/add_shard_request_test.cpp
index c0fbca0f9ee..fee8461ba81 100644
--- a/src/mongo/s/request_types/add_shard_request_test.cpp
+++ b/src/mongo/s/request_types/add_shard_request_test.cpp
@@ -65,8 +65,9 @@ TEST(AddShardRequest, ParseInternalFieldsInvalidConnectionString) {
TEST(AddShardRequest, ParseInternalFieldsMissingMaxSize) {
{
- BSONObj obj = BSON(AddShardRequest::mongosAddShard
- << kConnString << AddShardRequest::shardName << kShardName);
+ BSONObj obj =
+ BSON(AddShardRequest::mongosAddShard << kConnString << AddShardRequest::shardName
+ << kShardName);
auto swAddShardRequest = AddShardRequest::parseFromMongosCommand(obj);
ASSERT_OK(swAddShardRequest.getStatus());
@@ -79,8 +80,9 @@ TEST(AddShardRequest, ParseInternalFieldsMissingMaxSize) {
}
{
- BSONObj obj = BSON(AddShardRequest::configsvrAddShard
- << kConnString << AddShardRequest::shardName << kShardName);
+ BSONObj obj =
+ BSON(AddShardRequest::configsvrAddShard << kConnString << AddShardRequest::shardName
+ << kShardName);
auto swAddShardRequest = AddShardRequest::parseFromConfigCommand(obj);
@@ -96,8 +98,9 @@ TEST(AddShardRequest, ParseInternalFieldsMissingMaxSize) {
TEST(AddShardRequest, ParseInternalFieldsMissingName) {
{
- BSONObj obj = BSON(AddShardRequest::mongosAddShard
- << kConnString << AddShardRequest::maxSizeMB << kMaxSizeMB);
+ BSONObj obj =
+ BSON(AddShardRequest::mongosAddShard << kConnString << AddShardRequest::maxSizeMB
+ << kMaxSizeMB);
auto swAddShardRequest = AddShardRequest::parseFromMongosCommand(obj);
ASSERT_OK(swAddShardRequest.getStatus());
@@ -110,8 +113,9 @@ TEST(AddShardRequest, ParseInternalFieldsMissingName) {
}
{
- BSONObj obj = BSON(AddShardRequest::configsvrAddShard
- << kConnString << AddShardRequest::maxSizeMB << kMaxSizeMB);
+ BSONObj obj =
+ BSON(AddShardRequest::configsvrAddShard << kConnString << AddShardRequest::maxSizeMB
+ << kMaxSizeMB);
auto swAddShardRequest = AddShardRequest::parseFromConfigCommand(obj);
ASSERT_OK(swAddShardRequest.getStatus());
@@ -126,9 +130,11 @@ TEST(AddShardRequest, ParseInternalFieldsMissingName) {
TEST(AddShardRequest, ParseInternalFieldsAllFieldsPresent) {
{
- BSONObj obj = BSON(AddShardRequest::mongosAddShard
- << kConnString << AddShardRequest::shardName << kShardName
- << AddShardRequest::maxSizeMB << kMaxSizeMB);
+ BSONObj obj =
+ BSON(AddShardRequest::mongosAddShard << kConnString << AddShardRequest::shardName
+ << kShardName
+ << AddShardRequest::maxSizeMB
+ << kMaxSizeMB);
auto swAddShardRequest = AddShardRequest::parseFromMongosCommand(obj);
ASSERT_OK(swAddShardRequest.getStatus());
@@ -142,9 +148,11 @@ TEST(AddShardRequest, ParseInternalFieldsAllFieldsPresent) {
}
{
- BSONObj obj = BSON(AddShardRequest::configsvrAddShard
- << kConnString << AddShardRequest::shardName << kShardName
- << AddShardRequest::maxSizeMB << kMaxSizeMB);
+ BSONObj obj =
+ BSON(AddShardRequest::configsvrAddShard << kConnString << AddShardRequest::shardName
+ << kShardName
+ << AddShardRequest::maxSizeMB
+ << kMaxSizeMB);
auto swAddShardRequest = AddShardRequest::parseFromConfigCommand(obj);
ASSERT_OK(swAddShardRequest.getStatus());
@@ -161,9 +169,10 @@ TEST(AddShardRequest, ParseInternalFieldsAllFieldsPresent) {
// Test converting a valid AddShardRequest to the internal config version of the command.
TEST(AddShardRequest, ToCommandForConfig) {
- BSONObj mongosCmdObj = BSON(AddShardRequest::mongosAddShard
- << kConnString << AddShardRequest::shardName << kShardName
- << AddShardRequest::maxSizeMB << kMaxSizeMB);
+ BSONObj mongosCmdObj = BSON(
+ AddShardRequest::mongosAddShard << kConnString << AddShardRequest::shardName << kShardName
+ << AddShardRequest::maxSizeMB
+ << kMaxSizeMB);
auto swAddShardRequest = AddShardRequest::parseFromMongosCommand(mongosCmdObj);
ASSERT_OK(swAddShardRequest.getStatus());
@@ -176,8 +185,8 @@ TEST(AddShardRequest, ToCommandForConfig) {
}
TEST(AddShardRequest, ToCommandForConfigMissingName) {
- BSONObj mongosCmdObj = BSON(AddShardRequest::mongosAddShard
- << kConnString << AddShardRequest::maxSizeMB << kMaxSizeMB);
+ BSONObj mongosCmdObj = BSON(
+ AddShardRequest::mongosAddShard << kConnString << AddShardRequest::maxSizeMB << kMaxSizeMB);
auto swAddShardRequest = AddShardRequest::parseFromMongosCommand(mongosCmdObj);
ASSERT_OK(swAddShardRequest.getStatus());
@@ -190,8 +199,8 @@ TEST(AddShardRequest, ToCommandForConfigMissingName) {
}
TEST(AddShardRequest, ToCommandForConfigMissingMaxSize) {
- BSONObj mongosCmdObj = BSON(AddShardRequest::mongosAddShard
- << kConnString << AddShardRequest::shardName << kShardName);
+ BSONObj mongosCmdObj = BSON(
+ AddShardRequest::mongosAddShard << kConnString << AddShardRequest::shardName << kShardName);
auto swAddShardRequest = AddShardRequest::parseFromMongosCommand(mongosCmdObj);
ASSERT_OK(swAddShardRequest.getStatus());
diff --git a/src/mongo/s/server.cpp b/src/mongo/s/server.cpp
index a6755a82082..8dbacbdc624 100644
--- a/src/mongo/s/server.cpp
+++ b/src/mongo/s/server.cpp
@@ -38,8 +38,8 @@
#include "mongo/client/connpool.h"
#include "mongo/client/dbclient_rs.h"
#include "mongo/client/global_conn_pool.h"
-#include "mongo/client/remote_command_targeter_factory_impl.h"
#include "mongo/client/remote_command_targeter.h"
+#include "mongo/client/remote_command_targeter_factory_impl.h"
#include "mongo/client/replica_set_monitor.h"
#include "mongo/config.h"
#include "mongo/db/audit.h"
@@ -63,14 +63,14 @@
#include "mongo/s/balancer/balancer.h"
#include "mongo/s/catalog/catalog_manager.h"
#include "mongo/s/catalog/type_chunk.h"
-#include "mongo/s/catalog/type_locks.h"
#include "mongo/s/catalog/type_lockpings.h"
+#include "mongo/s/catalog/type_locks.h"
#include "mongo/s/catalog/type_shard.h"
#include "mongo/s/catalog/type_tags.h"
#include "mongo/s/client/shard_connection.h"
-#include "mongo/s/client/shard_remote.h"
#include "mongo/s/client/shard_factory.h"
#include "mongo/s/client/shard_registry.h"
+#include "mongo/s/client/shard_remote.h"
#include "mongo/s/client/sharding_connection_hook_for_mongos.h"
#include "mongo/s/cluster_write.h"
#include "mongo/s/commands/request.h"
@@ -78,10 +78,10 @@
#include "mongo/s/grid.h"
#include "mongo/s/mongos_options.h"
#include "mongo/s/query/cluster_cursor_cleanup_job.h"
+#include "mongo/s/query/cluster_cursor_manager.h"
#include "mongo/s/sharding_egress_metadata_hook_for_mongos.h"
#include "mongo/s/sharding_initialization.h"
#include "mongo/s/version_mongos.h"
-#include "mongo/s/query/cluster_cursor_manager.h"
#include "mongo/stdx/memory.h"
#include "mongo/stdx/thread.h"
#include "mongo/util/admin_access.h"
@@ -421,9 +421,8 @@ static ExitCode runMongosServer() {
return inShutdown() ? EXIT_CLEAN : EXIT_NET_ERROR;
}
-MONGO_INITIALIZER_GENERAL(ForkServer,
- ("EndStartupOptionHandling"),
- ("default"))(InitializerContext* context) {
+MONGO_INITIALIZER_GENERAL(ForkServer, ("EndStartupOptionHandling"), ("default"))
+(InitializerContext* context) {
mongo::forkServerOrDie();
return Status::OK();
}
@@ -508,9 +507,8 @@ MONGO_INITIALIZER(SetGlobalEnvironment)(InitializerContext* context) {
}
#ifdef MONGO_CONFIG_SSL
-MONGO_INITIALIZER_GENERAL(setSSLManagerType,
- MONGO_NO_PREREQUISITES,
- ("SSLManager"))(InitializerContext* context) {
+MONGO_INITIALIZER_GENERAL(setSSLManagerType, MONGO_NO_PREREQUISITES, ("SSLManager"))
+(InitializerContext* context) {
isSSLServer = true;
return Status::OK();
}
diff --git a/src/mongo/s/set_shard_version_request_test.cpp b/src/mongo/s/set_shard_version_request_test.cpp
index 960d412bb8a..794d8bb552c 100644
--- a/src/mongo/s/set_shard_version_request_test.cpp
+++ b/src/mongo/s/set_shard_version_request_test.cpp
@@ -47,12 +47,16 @@ const ConnectionString shardCS = ConnectionString::forReplicaSet(
TEST(SetShardVersionRequest, ParseInitMissingAuthoritative) {
SetShardVersionRequest request =
- assertGet(SetShardVersionRequest::parseFromBSON(
- BSON("setShardVersion"
- << ""
- << "init" << true << "configdb" << configCS.toString() << "shard"
- << "TestShard"
- << "shardHost" << shardCS.toString())));
+ assertGet(SetShardVersionRequest::parseFromBSON(BSON("setShardVersion"
+ << ""
+ << "init"
+ << true
+ << "configdb"
+ << configCS.toString()
+ << "shard"
+ << "TestShard"
+ << "shardHost"
+ << shardCS.toString())));
ASSERT(request.isInit());
ASSERT(!request.isAuthoritative());
@@ -63,13 +67,19 @@ TEST(SetShardVersionRequest, ParseInitMissingAuthoritative) {
}
TEST(SetShardVersionRequest, ParseInitWithAuthoritative) {
- SetShardVersionRequest request = assertGet(
- SetShardVersionRequest::parseFromBSON(BSON("setShardVersion"
- << ""
- << "init" << true << "authoritative" << true
- << "configdb" << configCS.toString() << "shard"
- << "TestShard"
- << "shardHost" << shardCS.toString())));
+ SetShardVersionRequest request =
+ assertGet(SetShardVersionRequest::parseFromBSON(BSON("setShardVersion"
+ << ""
+ << "init"
+ << true
+ << "authoritative"
+ << true
+ << "configdb"
+ << configCS.toString()
+ << "shard"
+ << "TestShard"
+ << "shardHost"
+ << shardCS.toString())));
ASSERT(request.isInit());
ASSERT(request.isAuthoritative());
@@ -81,13 +91,20 @@ TEST(SetShardVersionRequest, ParseInitWithAuthoritative) {
TEST(SetShardVersionRequest, ParseInitNoConnectionVersioning) {
SetShardVersionRequest request =
- assertGet(SetShardVersionRequest::parseFromBSON(
- BSON("setShardVersion"
- << ""
- << "init" << true << "authoritative" << true << "configdb" << configCS.toString()
- << "shard"
- << "TestShard"
- << "shardHost" << shardCS.toString() << "noConnectionVersioning" << true)));
+ assertGet(SetShardVersionRequest::parseFromBSON(BSON("setShardVersion"
+ << ""
+ << "init"
+ << true
+ << "authoritative"
+ << true
+ << "configdb"
+ << configCS.toString()
+ << "shard"
+ << "TestShard"
+ << "shardHost"
+ << shardCS.toString()
+ << "noConnectionVersioning"
+ << true)));
ASSERT(request.isInit());
ASSERT(request.isAuthoritative());
@@ -101,13 +118,18 @@ TEST(SetShardVersionRequest, ParseFull) {
const ChunkVersion chunkVersion(1, 2, OID::gen());
SetShardVersionRequest request =
- assertGet(SetShardVersionRequest::parseFromBSON(
- BSON("setShardVersion"
- << "db.coll"
- << "configdb" << configCS.toString() << "shard"
- << "TestShard"
- << "shardHost" << shardCS.toString() << "version"
- << Timestamp(chunkVersion.toLong()) << "versionEpoch" << chunkVersion.epoch())));
+ assertGet(SetShardVersionRequest::parseFromBSON(BSON("setShardVersion"
+ << "db.coll"
+ << "configdb"
+ << configCS.toString()
+ << "shard"
+ << "TestShard"
+ << "shardHost"
+ << shardCS.toString()
+ << "version"
+ << Timestamp(chunkVersion.toLong())
+ << "versionEpoch"
+ << chunkVersion.epoch())));
ASSERT(!request.isInit());
ASSERT(!request.isAuthoritative());
@@ -125,14 +147,20 @@ TEST(SetShardVersionRequest, ParseFullWithAuthoritative) {
const ChunkVersion chunkVersion(1, 2, OID::gen());
SetShardVersionRequest request =
- assertGet(SetShardVersionRequest::parseFromBSON(
- BSON("setShardVersion"
- << "db.coll"
- << "configdb" << configCS.toString() << "shard"
- << "TestShard"
- << "shardHost" << shardCS.toString() << "version"
- << Timestamp(chunkVersion.toLong()) << "versionEpoch" << chunkVersion.epoch()
- << "authoritative" << true)));
+ assertGet(SetShardVersionRequest::parseFromBSON(BSON("setShardVersion"
+ << "db.coll"
+ << "configdb"
+ << configCS.toString()
+ << "shard"
+ << "TestShard"
+ << "shardHost"
+ << shardCS.toString()
+ << "version"
+ << Timestamp(chunkVersion.toLong())
+ << "versionEpoch"
+ << chunkVersion.epoch()
+ << "authoritative"
+ << true)));
ASSERT(!request.isInit());
ASSERT(request.isAuthoritative());
@@ -150,14 +178,20 @@ TEST(SetShardVersionRequest, ParseFullNoConnectionVersioning) {
const ChunkVersion chunkVersion(1, 2, OID::gen());
SetShardVersionRequest request =
- assertGet(SetShardVersionRequest::parseFromBSON(
- BSON("setShardVersion"
- << "db.coll"
- << "configdb" << configCS.toString() << "shard"
- << "TestShard"
- << "shardHost" << shardCS.toString() << "version"
- << Timestamp(chunkVersion.toLong()) << "versionEpoch" << chunkVersion.epoch()
- << "noConnectionVersioning" << true)));
+ assertGet(SetShardVersionRequest::parseFromBSON(BSON("setShardVersion"
+ << "db.coll"
+ << "configdb"
+ << configCS.toString()
+ << "shard"
+ << "TestShard"
+ << "shardHost"
+ << shardCS.toString()
+ << "version"
+ << Timestamp(chunkVersion.toLong())
+ << "versionEpoch"
+ << chunkVersion.epoch()
+ << "noConnectionVersioning"
+ << true)));
ASSERT(!request.isInit());
ASSERT(!request.isAuthoritative());
@@ -172,12 +206,14 @@ TEST(SetShardVersionRequest, ParseFullNoConnectionVersioning) {
}
TEST(SetShardVersionRequest, ParseInitNoConfigServer) {
- auto ssvStatus =
- SetShardVersionRequest::parseFromBSON(BSON("setShardVersion"
- << ""
- << "init" << true << "shard"
- << "TestShard"
- << "shardHost" << shardCS.toString()));
+ auto ssvStatus = SetShardVersionRequest::parseFromBSON(BSON("setShardVersion"
+ << ""
+ << "init"
+ << true
+ << "shard"
+ << "TestShard"
+ << "shardHost"
+ << shardCS.toString()));
ASSERT_EQ(ErrorCodes::NoSuchKey, ssvStatus.getStatus().code());
}
@@ -185,14 +221,18 @@ TEST(SetShardVersionRequest, ParseInitNoConfigServer) {
TEST(SetShardVersionRequest, ParseFullNoNS) {
const ChunkVersion chunkVersion(1, 2, OID::gen());
- auto ssvStatus =
- SetShardVersionRequest::parseFromBSON(BSON("setShardVersion"
- << ""
- << "configdb" << configCS.toString() << "shard"
- << "TestShard"
- << "shardHost" << shardCS.toString() << "version"
- << Timestamp(chunkVersion.toLong())
- << "versionEpoch" << chunkVersion.epoch()));
+ auto ssvStatus = SetShardVersionRequest::parseFromBSON(BSON("setShardVersion"
+ << ""
+ << "configdb"
+ << configCS.toString()
+ << "shard"
+ << "TestShard"
+ << "shardHost"
+ << shardCS.toString()
+ << "version"
+ << Timestamp(chunkVersion.toLong())
+ << "versionEpoch"
+ << chunkVersion.epoch()));
ASSERT_EQ(ErrorCodes::InvalidNamespace, ssvStatus.getStatus().code());
}
@@ -200,14 +240,18 @@ TEST(SetShardVersionRequest, ParseFullNoNS) {
TEST(SetShardVersionRequest, ParseFullNSContainsDBOnly) {
const ChunkVersion chunkVersion(1, 2, OID::gen());
- auto ssvStatus =
- SetShardVersionRequest::parseFromBSON(BSON("setShardVersion"
- << "dbOnly"
- << "configdb" << configCS.toString() << "shard"
- << "TestShard"
- << "shardHost" << shardCS.toString() << "version"
- << Timestamp(chunkVersion.toLong())
- << "versionEpoch" << chunkVersion.epoch()));
+ auto ssvStatus = SetShardVersionRequest::parseFromBSON(BSON("setShardVersion"
+ << "dbOnly"
+ << "configdb"
+ << configCS.toString()
+ << "shard"
+ << "TestShard"
+ << "shardHost"
+ << shardCS.toString()
+ << "version"
+ << Timestamp(chunkVersion.toLong())
+ << "versionEpoch"
+ << chunkVersion.epoch()));
ASSERT_EQ(ErrorCodes::InvalidNamespace, ssvStatus.getStatus().code());
}
@@ -226,10 +270,18 @@ TEST(SetShardVersionRequest, ToSSVCommandInit) {
ASSERT_EQ(ssv.toBSON(),
BSON("setShardVersion"
<< ""
- << "init" << true << "authoritative" << true << "configdb" << configCS.toString()
+ << "init"
+ << true
+ << "authoritative"
+ << true
+ << "configdb"
+ << configCS.toString()
<< "shard"
<< "TestShard"
- << "shardHost" << shardCS.toString() << "maxTimeMS" << 30000));
+ << "shardHost"
+ << shardCS.toString()
+ << "maxTimeMS"
+ << 30000));
}
TEST(SetShardVersionRequest, ToSSVCommandInitNoConnectionVersioning) {
@@ -246,11 +298,20 @@ TEST(SetShardVersionRequest, ToSSVCommandInitNoConnectionVersioning) {
ASSERT_EQ(ssv.toBSON(),
BSON("setShardVersion"
<< ""
- << "init" << true << "authoritative" << true << "configdb" << configCS.toString()
+ << "init"
+ << true
+ << "authoritative"
+ << true
+ << "configdb"
+ << configCS.toString()
<< "shard"
<< "TestShard"
- << "shardHost" << shardCS.toString() << "maxTimeMS" << 30000
- << "noConnectionVersioning" << true));
+ << "shardHost"
+ << shardCS.toString()
+ << "maxTimeMS"
+ << 30000
+ << "noConnectionVersioning"
+ << true));
}
TEST(SetShardVersionRequest, ToSSVCommandFull) {
@@ -272,11 +333,20 @@ TEST(SetShardVersionRequest, ToSSVCommandFull) {
ASSERT_EQ(ssv.toBSON(),
BSON("setShardVersion"
<< "db.coll"
- << "init" << false << "authoritative" << false << "configdb"
- << configCS.toString() << "shard"
+ << "init"
+ << false
+ << "authoritative"
+ << false
+ << "configdb"
+ << configCS.toString()
+ << "shard"
<< "TestShard"
- << "shardHost" << shardCS.toString() << "version"
- << Timestamp(chunkVersion.toLong()) << "versionEpoch" << chunkVersion.epoch()));
+ << "shardHost"
+ << shardCS.toString()
+ << "version"
+ << Timestamp(chunkVersion.toLong())
+ << "versionEpoch"
+ << chunkVersion.epoch()));
}
TEST(SetShardVersionRequest, ToSSVCommandFullAuthoritative) {
@@ -298,11 +368,20 @@ TEST(SetShardVersionRequest, ToSSVCommandFullAuthoritative) {
ASSERT_EQ(ssv.toBSON(),
BSON("setShardVersion"
<< "db.coll"
- << "init" << false << "authoritative" << true << "configdb"
- << configCS.toString() << "shard"
+ << "init"
+ << false
+ << "authoritative"
+ << true
+ << "configdb"
+ << configCS.toString()
+ << "shard"
<< "TestShard"
- << "shardHost" << shardCS.toString() << "version"
- << Timestamp(chunkVersion.toLong()) << "versionEpoch" << chunkVersion.epoch()));
+ << "shardHost"
+ << shardCS.toString()
+ << "version"
+ << Timestamp(chunkVersion.toLong())
+ << "versionEpoch"
+ << chunkVersion.epoch()));
}
TEST(SetShardVersionRequest, ToSSVCommandFullNoConnectionVersioning) {
@@ -324,12 +403,22 @@ TEST(SetShardVersionRequest, ToSSVCommandFullNoConnectionVersioning) {
ASSERT_EQ(ssv.toBSON(),
BSON("setShardVersion"
<< "db.coll"
- << "init" << false << "authoritative" << true << "configdb"
- << configCS.toString() << "shard"
+ << "init"
+ << false
+ << "authoritative"
+ << true
+ << "configdb"
+ << configCS.toString()
+ << "shard"
<< "TestShard"
- << "shardHost" << shardCS.toString() << "version"
- << Timestamp(chunkVersion.toLong()) << "versionEpoch" << chunkVersion.epoch()
- << "noConnectionVersioning" << true));
+ << "shardHost"
+ << shardCS.toString()
+ << "version"
+ << Timestamp(chunkVersion.toLong())
+ << "versionEpoch"
+ << chunkVersion.epoch()
+ << "noConnectionVersioning"
+ << true));
}
} // namespace
diff --git a/src/mongo/s/shard_key_pattern.cpp b/src/mongo/s/shard_key_pattern.cpp
index 8776830647a..607f2fc2a42 100644
--- a/src/mongo/s/shard_key_pattern.cpp
+++ b/src/mongo/s/shard_key_pattern.cpp
@@ -62,7 +62,10 @@ Status ShardKeyPattern::checkShardKeySize(const BSONObj& shardKey) {
return Status(ErrorCodes::ShardKeyTooBig,
stream() << "shard keys must be less than " << kMaxShardKeySizeBytes
- << " bytes, but key " << shardKey << " is " << shardKey.objsize()
+ << " bytes, but key "
+ << shardKey
+ << " is "
+ << shardKey.objsize()
<< " bytes");
}
diff --git a/src/mongo/s/shard_key_pattern_test.cpp b/src/mongo/s/shard_key_pattern_test.cpp
index bb54fc289b5..a00157179b9 100644
--- a/src/mongo/s/shard_key_pattern_test.cpp
+++ b/src/mongo/s/shard_key_pattern_test.cpp
@@ -54,12 +54,15 @@ TEST(ShardKeyPattern, ValidShardKeyPatternSingle) {
ASSERT(!ShardKeyPattern(BSON("a" << -1)).isValid());
ASSERT(!ShardKeyPattern(BSON("a" << -1.0)).isValid());
ASSERT(!ShardKeyPattern(BSON("a"
- << "1")).isValid());
+ << "1"))
+ .isValid());
ASSERT(ShardKeyPattern(BSON("a"
- << "hashed")).isValid());
+ << "hashed"))
+ .isValid());
ASSERT(!ShardKeyPattern(BSON("a"
- << "hash")).isValid());
+ << "hash"))
+ .isValid());
ASSERT(!ShardKeyPattern(BSON("" << 1)).isValid());
ASSERT(!ShardKeyPattern(BSON("." << 1)).isValid());
}
@@ -73,7 +76,8 @@ TEST(ShardKeyPattern, ValidShardKeyPatternComposite) {
ASSERT(ShardKeyPattern(BSON("a" << 1.0f << "b" << 1.0)).isValid());
ASSERT(!ShardKeyPattern(BSON("a" << 1 << "b" << -1)).isValid());
ASSERT(!ShardKeyPattern(BSON("a" << 1 << "b"
- << "1")).isValid());
+ << "1"))
+ .isValid());
ASSERT(ShardKeyPattern(BSON("a" << 1 << "b" << 1.0 << "c" << 1.0f)).isValid());
ASSERT(!ShardKeyPattern(BSON("a" << 1 << "b." << 1.0)).isValid());
@@ -148,7 +152,8 @@ TEST(ShardKeyPattern, ExtractDocShardKeySingle) {
BSON("a" << regex));
const BSONObj ref = BSON("$ref"
<< "coll"
- << "$id" << 1);
+ << "$id"
+ << 1);
ASSERT_EQUALS(docKey(pattern, BSON("a" << ref)), BSON("a" << ref));
ASSERT_EQUALS(docKey(pattern, BSONObj()), BSONObj());
@@ -172,7 +177,8 @@ TEST(ShardKeyPattern, ExtractDocShardKeyCompound) {
ASSERT_EQUALS(docKey(pattern,
BSON("c" << 30 << "b"
<< "20"
- << "a" << 10)),
+ << "a"
+ << 10)),
fromjson("{a:10, b:'20'}"));
ASSERT_EQUALS(docKey(pattern, fromjson("{a:10, b:[1, 2]}")), BSONObj());
@@ -199,7 +205,8 @@ TEST(ShardKeyPattern, ExtractDocShardKeyNested) {
fromjson("{'a.b':10, c:30}"));
const BSONObj ref = BSON("$ref"
<< "coll"
- << "$id" << 1);
+ << "$id"
+ << 1);
ASSERT_EQUALS(docKey(pattern, BSON("a" << BSON("b" << ref) << "c" << 30)),
BSON("a.b" << ref << "c" << 30));
@@ -306,7 +313,8 @@ TEST(ShardKeyPattern, ExtractQueryShardKeyCompound) {
ASSERT_EQUALS(queryKey(pattern,
BSON("c" << 30 << "b"
<< "20"
- << "a" << 10)),
+ << "a"
+ << 10)),
fromjson("{a:10, b:'20'}"));
ASSERT_EQUALS(queryKey(pattern, fromjson("{a:10, b:[1, 2]}")), BSONObj());
diff --git a/src/mongo/s/shard_util.cpp b/src/mongo/s/shard_util.cpp
index 7de40831818..7b96fb78239 100644
--- a/src/mongo/s/shard_util.cpp
+++ b/src/mongo/s/shard_util.cpp
@@ -40,8 +40,8 @@
#include "mongo/s/client/shard_registry.h"
#include "mongo/s/grid.h"
#include "mongo/s/shard_key_pattern.h"
-#include "mongo/util/mongoutils/str.h"
#include "mongo/util/log.h"
+#include "mongo/util/mongoutils/str.h"
namespace mongo {
namespace shardutil {
diff --git a/src/mongo/s/sharding_initialization.cpp b/src/mongo/s/sharding_initialization.cpp
index fee8c72178d..1ed199e497b 100644
--- a/src/mongo/s/sharding_initialization.cpp
+++ b/src/mongo/s/sharding_initialization.cpp
@@ -47,14 +47,14 @@
#include "mongo/rpc/metadata/config_server_metadata.h"
#include "mongo/rpc/metadata/metadata_hook.h"
#include "mongo/s/balancer/balancer_configuration.h"
-#include "mongo/s/client/shard_factory.h"
-#include "mongo/s/client/shard_registry.h"
-#include "mongo/s/client/sharding_network_connection_hook.h"
-#include "mongo/s/grid.h"
#include "mongo/s/catalog/catalog_cache.h"
#include "mongo/s/catalog/replset/catalog_manager_replica_set.h"
#include "mongo/s/catalog/replset/dist_lock_catalog_impl.h"
#include "mongo/s/catalog/replset/replset_dist_lock_manager.h"
+#include "mongo/s/client/shard_factory.h"
+#include "mongo/s/client/shard_registry.h"
+#include "mongo/s/client/sharding_network_connection_hook.h"
+#include "mongo/s/grid.h"
#include "mongo/s/query/cluster_cursor_manager.h"
#include "mongo/s/sharding_egress_metadata_hook.h"
#include "mongo/stdx/memory.h"
diff --git a/src/mongo/s/sharding_test_fixture.cpp b/src/mongo/s/sharding_test_fixture.cpp
index 76d1ff9322a..ef4916f0885 100644
--- a/src/mongo/s/sharding_test_fixture.cpp
+++ b/src/mongo/s/sharding_test_fixture.cpp
@@ -343,8 +343,9 @@ void ShardingTestFixture::expectConfigCollectionCreate(const HostAndPort& config
ASSERT_EQUALS(configHost, request.target);
ASSERT_EQUALS("config", request.dbname);
- BSONObj expectedCreateCmd = BSON("create" << collName << "capped" << true << "size"
- << cappedSize << "maxTimeMS" << 30000);
+ BSONObj expectedCreateCmd =
+ BSON("create" << collName << "capped" << true << "size" << cappedSize << "maxTimeMS"
+ << 30000);
ASSERT_EQUALS(expectedCreateCmd, request.cmdObj);
return response;
diff --git a/src/mongo/s/sharding_test_fixture.h b/src/mongo/s/sharding_test_fixture.h
index 6758cd345fe..0ed720de482 100644
--- a/src/mongo/s/sharding_test_fixture.h
+++ b/src/mongo/s/sharding_test_fixture.h
@@ -32,8 +32,8 @@
#include "mongo/db/service_context.h"
#include "mongo/executor/network_test_env.h"
-#include "mongo/util/net/message_port_mock.h"
#include "mongo/unittest/unittest.h"
+#include "mongo/util/net/message_port_mock.h"
namespace mongo {
diff --git a/src/mongo/s/stale_exception.h b/src/mongo/s/stale_exception.h
index 90d6e0060e6..5268da4c5ef 100644
--- a/src/mongo/s/stale_exception.h
+++ b/src/mongo/s/stale_exception.h
@@ -47,8 +47,11 @@ public:
ChunkVersion wanted)
: AssertionException(
str::stream() << raw << " ( ns : " << ns << ", received : " << received.toString()
- << ", wanted : " << wanted.toString() << ", "
- << (code == ErrorCodes::SendStaleConfig ? "send" : "recv") << " )",
+ << ", wanted : "
+ << wanted.toString()
+ << ", "
+ << (code == ErrorCodes::SendStaleConfig ? "send" : "recv")
+ << " )",
code),
_ns(ns),
_received(received),
@@ -56,16 +59,18 @@ public:
/** Preferred if we're rebuilding this from a thrown exception */
StaleConfigException(const std::string& raw, int code, const BSONObj& error)
- : AssertionException(str::stream()
- << raw << " ( ns : " << (error["ns"].type() == String
- ? error["ns"].String()
- : std::string("<unknown>"))
- << ", received : "
- << ChunkVersion::fromBSON(error, "vReceived").toString()
- << ", wanted : "
- << ChunkVersion::fromBSON(error, "vWanted").toString() << ", "
- << (code == ErrorCodes::SendStaleConfig ? "send" : "recv") << " )",
- code),
+ : AssertionException(
+ str::stream() << raw << " ( ns : " << (error["ns"].type() == String
+ ? error["ns"].String()
+ : std::string("<unknown>"))
+ << ", received : "
+ << ChunkVersion::fromBSON(error, "vReceived").toString()
+ << ", wanted : "
+ << ChunkVersion::fromBSON(error, "vWanted").toString()
+ << ", "
+ << (code == ErrorCodes::SendStaleConfig ? "send" : "recv")
+ << " )",
+ code),
// For legacy reasons, we may not always get a namespace here
_ns(error["ns"].type() == String ? error["ns"].String() : ""),
_received(ChunkVersion::fromBSON(error, "vReceived")),
diff --git a/src/mongo/s/write_ops/batch_downconvert_test.cpp b/src/mongo/s/write_ops/batch_downconvert_test.cpp
index 548bfb6f732..526f9096b8d 100644
--- a/src/mongo/s/write_ops/batch_downconvert_test.cpp
+++ b/src/mongo/s/write_ops/batch_downconvert_test.cpp
@@ -202,9 +202,14 @@ TEST(LegacyGLESuppress, StripCode) {
TEST(LegacyGLESuppress, TimeoutDupError24) {
const BSONObj gleResponse = BSON("ok" << 0.0 << "err"
<< "message"
- << "code" << 12345 << "err"
+ << "code"
+ << 12345
+ << "err"
<< "timeout"
- << "code" << 56789 << "wtimeout" << true);
+ << "code"
+ << 56789
+ << "wtimeout"
+ << true);
BSONObj stripped = stripNonWCInfo(gleResponse);
ASSERT_EQUALS(stripped.nFields(), 4);
diff --git a/src/mongo/s/write_ops/batch_write_exec_test.cpp b/src/mongo/s/write_ops/batch_write_exec_test.cpp
index 74b84e4503c..dea991b80ef 100644
--- a/src/mongo/s/write_ops/batch_write_exec_test.cpp
+++ b/src/mongo/s/write_ops/batch_write_exec_test.cpp
@@ -31,8 +31,8 @@
#include "mongo/s/write_ops/batch_write_exec.h"
#include "mongo/base/owned_pointer_vector.h"
-#include "mongo/client/remote_command_targeter_mock.h"
#include "mongo/client/remote_command_targeter_factory_mock.h"
+#include "mongo/client/remote_command_targeter_mock.h"
#include "mongo/s/catalog/type_shard.h"
#include "mongo/s/client/mock_multi_write_command.h"
#include "mongo/s/client/shard_registry.h"
diff --git a/src/mongo/s/write_ops/batch_write_op.cpp b/src/mongo/s/write_ops/batch_write_op.cpp
index 38adfd8aaad..fbd0b945b23 100644
--- a/src/mongo/s/write_ops/batch_write_op.cpp
+++ b/src/mongo/s/write_ops/batch_write_op.cpp
@@ -572,7 +572,7 @@ void BatchWriteOp::noteBatchResponse(const TargetedWriteBatch& targetedBatch,
vector<WriteErrorDetail*>::iterator itemErrorIt = itemErrors.begin();
int index = 0;
WriteErrorDetail* lastError = NULL;
- for (vector<TargetedWrite*>::const_iterator it = targetedBatch.getWrites().begin();
+ for (vector<TargetedWrite *>::const_iterator it = targetedBatch.getWrites().begin();
it != targetedBatch.getWrites().end();
++it, ++index) {
const TargetedWrite* write = *it;
diff --git a/src/mongo/s/write_ops/batch_write_op_test.cpp b/src/mongo/s/write_ops/batch_write_op_test.cpp
index 62dbde7093b..a20f5c98f47 100644
--- a/src/mongo/s/write_ops/batch_write_op_test.cpp
+++ b/src/mongo/s/write_ops/batch_write_op_test.cpp
@@ -31,9 +31,9 @@
#include "mongo/base/owned_pointer_vector.h"
#include "mongo/db/operation_context_noop.h"
#include "mongo/s/mock_ns_targeter.h"
+#include "mongo/s/write_ops/batch_write_op.h"
#include "mongo/s/write_ops/batched_command_request.h"
#include "mongo/s/write_ops/batched_delete_document.h"
-#include "mongo/s/write_ops/batch_write_op.h"
#include "mongo/s/write_ops/write_error_detail.h"
#include "mongo/unittest/unittest.h"
@@ -1754,7 +1754,8 @@ TEST(WriteOpLimitTests, UpdateOverheadIncluded) {
string dataString(updateDataBytes -
BSON("x" << 1 << "data"
- << "").objsize(),
+ << "")
+ .objsize(),
'x');
BatchedCommandRequest request(BatchedCommandRequest::BatchType_Update);
diff --git a/src/mongo/s/write_ops/batched_command_request.h b/src/mongo/s/write_ops/batched_command_request.h
index 1d4d51d8405..a86c28f120d 100644
--- a/src/mongo/s/write_ops/batched_command_request.h
+++ b/src/mongo/s/write_ops/batched_command_request.h
@@ -32,9 +32,9 @@
#include "mongo/base/disallow_copying.h"
#include "mongo/s/chunk_version.h"
+#include "mongo/s/write_ops/batched_delete_request.h"
#include "mongo/s/write_ops/batched_insert_request.h"
#include "mongo/s/write_ops/batched_update_request.h"
-#include "mongo/s/write_ops/batched_delete_request.h"
namespace mongo {
diff --git a/src/mongo/s/write_ops/batched_command_request_test.cpp b/src/mongo/s/write_ops/batched_command_request_test.cpp
index 6b9cda96a5a..7a09e8ec3b6 100644
--- a/src/mongo/s/write_ops/batched_command_request_test.cpp
+++ b/src/mongo/s/write_ops/batched_command_request_test.cpp
@@ -41,8 +41,12 @@ TEST(BatchedCommandRequest, BasicInsert) {
BSONObj origInsertRequestObj = BSON("insert"
<< "test"
- << "documents" << insertArray << "writeConcern"
- << BSON("w" << 1) << "ordered" << true);
+ << "documents"
+ << insertArray
+ << "writeConcern"
+ << BSON("w" << 1)
+ << "ordered"
+ << true);
std::string errMsg;
BatchedCommandRequest insertRequest(BatchedCommandRequest::BatchType_Insert);
@@ -59,8 +63,13 @@ TEST(BatchedCommandRequest, InsertWithShardVersion) {
BSONObj origInsertRequestObj = BSON("insert"
<< "test"
- << "documents" << insertArray << "writeConcern"
- << BSON("w" << 1) << "ordered" << true << "shardVersion"
+ << "documents"
+ << insertArray
+ << "writeConcern"
+ << BSON("w" << 1)
+ << "ordered"
+ << true
+ << "shardVersion"
<< BSON_ARRAY(Timestamp(1, 2) << epoch));
std::string errMsg;
@@ -98,7 +107,9 @@ TEST(BatchedCommandRequest, InsertClone) {
TEST(BatchedCommandRequest, InsertIndexClone) {
BSONObj indexSpec(BSON("ns"
<< "xyz.user"
- << "key" << BSON("x" << 1) << "name"
+ << "key"
+ << BSON("x" << 1)
+ << "name"
<< "y"));
auto insertRequest = stdx::make_unique<BatchedInsertRequest>();
diff --git a/src/mongo/s/write_ops/batched_command_response.h b/src/mongo/s/write_ops/batched_command_response.h
index 6e22ae63cfe..58f2631833f 100644
--- a/src/mongo/s/write_ops/batched_command_response.h
+++ b/src/mongo/s/write_ops/batched_command_response.h
@@ -36,8 +36,8 @@
#include "mongo/db/repl/optime.h"
#include "mongo/rpc/write_concern_error_detail.h"
#include "mongo/s/bson_serializable.h"
-#include "mongo/s/write_ops/write_error_detail.h"
#include "mongo/s/write_ops/batched_upsert_detail.h"
+#include "mongo/s/write_ops/write_error_detail.h"
namespace mongo {
diff --git a/src/mongo/s/write_ops/batched_command_response_test.cpp b/src/mongo/s/write_ops/batched_command_response_test.cpp
index 33d35526d77..400962b3006 100644
--- a/src/mongo/s/write_ops/batched_command_response_test.cpp
+++ b/src/mongo/s/write_ops/batched_command_response_test.cpp
@@ -53,13 +53,16 @@ TEST(BatchedCommandResponse, Basic) {
BSONObj writeConcernError(BSON("code" << 8 << "errInfo" << BSON("a" << 1) << "errmsg"
<< "norepl"));
- BSONObj origResponseObj =
- BSON(BatchedCommandResponse::ok(false)
- << BatchedCommandResponse::errCode(-1)
- << BatchedCommandResponse::errMessage("this batch didn't work")
- << BatchedCommandResponse::n(0) << "opTime" << mongo::Timestamp(1ULL)
- << BatchedCommandResponse::writeErrors() << writeErrorsArray
- << BatchedCommandResponse::writeConcernError() << writeConcernError);
+ BSONObj origResponseObj = BSON(BatchedCommandResponse::ok(false)
+ << BatchedCommandResponse::errCode(-1)
+ << BatchedCommandResponse::errMessage("this batch didn't work")
+ << BatchedCommandResponse::n(0)
+ << "opTime"
+ << mongo::Timestamp(1ULL)
+ << BatchedCommandResponse::writeErrors()
+ << writeErrorsArray
+ << BatchedCommandResponse::writeConcernError()
+ << writeConcernError);
string errMsg;
BatchedCommandResponse response;
diff --git a/src/mongo/s/write_ops/batched_delete_request_test.cpp b/src/mongo/s/write_ops/batched_delete_request_test.cpp
index a9fe5ca0c5f..dc816003c13 100644
--- a/src/mongo/s/write_ops/batched_delete_request_test.cpp
+++ b/src/mongo/s/write_ops/batched_delete_request_test.cpp
@@ -46,10 +46,10 @@ TEST(BatchedDeleteRequest, Basic) {
BSON(BatchedDeleteDocument::query(BSON("a" << 1)) << BatchedDeleteDocument::limit(1))
<< BSON(BatchedDeleteDocument::query(BSON("b" << 1)) << BatchedDeleteDocument::limit(1)));
- BSONObj origDeleteRequestObj = BSON(BatchedDeleteRequest::collName("test")
- << BatchedDeleteRequest::deletes() << deleteArray
- << BatchedDeleteRequest::writeConcern(BSON("w" << 1))
- << BatchedDeleteRequest::ordered(true));
+ BSONObj origDeleteRequestObj = BSON(
+ BatchedDeleteRequest::collName("test") << BatchedDeleteRequest::deletes() << deleteArray
+ << BatchedDeleteRequest::writeConcern(BSON("w" << 1))
+ << BatchedDeleteRequest::ordered(true));
string errMsg;
BatchedDeleteRequest request;
diff --git a/src/mongo/s/write_ops/batched_insert_request.cpp b/src/mongo/s/write_ops/batched_insert_request.cpp
index c50816af23f..6d17bfd5b1e 100644
--- a/src/mongo/s/write_ops/batched_insert_request.cpp
+++ b/src/mongo/s/write_ops/batched_insert_request.cpp
@@ -147,8 +147,8 @@ bool BatchedInsertRequest::parseBSON(StringData dbName, const BSONObj& source, s
std::initializer_list<StringData> ignoredFields = {"maxTimeMS", "shardVersion"};
if (std::find(ignoredFields.begin(), ignoredFields.end(), sourceEl.fieldName()) ==
ignoredFields.end()) {
- *errMsg = str::stream()
- << "Unknown option to insert command: " << sourceEl.fieldName();
+ *errMsg = str::stream() << "Unknown option to insert command: "
+ << sourceEl.fieldName();
return false;
}
}
diff --git a/src/mongo/s/write_ops/batched_insert_request_test.cpp b/src/mongo/s/write_ops/batched_insert_request_test.cpp
index 8ec4f7621c1..ec9efb48aa5 100644
--- a/src/mongo/s/write_ops/batched_insert_request_test.cpp
+++ b/src/mongo/s/write_ops/batched_insert_request_test.cpp
@@ -45,10 +45,10 @@ namespace {
TEST(BatchedInsertRequest, Basic) {
BSONArray insertArray = BSON_ARRAY(BSON("a" << 1) << BSON("b" << 1));
- BSONObj origInsertRequestObj = BSON(BatchedInsertRequest::collName("test")
- << BatchedInsertRequest::documents() << insertArray
- << BatchedInsertRequest::writeConcern(BSON("w" << 1))
- << BatchedInsertRequest::ordered(true));
+ BSONObj origInsertRequestObj = BSON(
+ BatchedInsertRequest::collName("test") << BatchedInsertRequest::documents() << insertArray
+ << BatchedInsertRequest::writeConcern(BSON("w" << 1))
+ << BatchedInsertRequest::ordered(true));
string errMsg;
BatchedInsertRequest request;
diff --git a/src/mongo/s/write_ops/batched_update_request_test.cpp b/src/mongo/s/write_ops/batched_update_request_test.cpp
index a4ce7067e82..01da12a7719 100644
--- a/src/mongo/s/write_ops/batched_update_request_test.cpp
+++ b/src/mongo/s/write_ops/batched_update_request_test.cpp
@@ -42,18 +42,20 @@ using std::string;
namespace {
TEST(BatchedUpdateRequest, Basic) {
- BSONArray updateArray = BSON_ARRAY(
- BSON(BatchedUpdateDocument::query(BSON("a" << 1))
- << BatchedUpdateDocument::updateExpr(BSON("$set" << BSON("a" << 1)))
- << BatchedUpdateDocument::multi(false) << BatchedUpdateDocument::upsert(false))
- << BSON(BatchedUpdateDocument::query(BSON("b" << 1))
- << BatchedUpdateDocument::updateExpr(BSON("$set" << BSON("b" << 2)))
- << BatchedUpdateDocument::multi(false) << BatchedUpdateDocument::upsert(false)));
+ BSONArray updateArray =
+ BSON_ARRAY(BSON(BatchedUpdateDocument::query(BSON("a" << 1))
+ << BatchedUpdateDocument::updateExpr(BSON("$set" << BSON("a" << 1)))
+ << BatchedUpdateDocument::multi(false)
+ << BatchedUpdateDocument::upsert(false))
+ << BSON(BatchedUpdateDocument::query(BSON("b" << 1))
+ << BatchedUpdateDocument::updateExpr(BSON("$set" << BSON("b" << 2)))
+ << BatchedUpdateDocument::multi(false)
+ << BatchedUpdateDocument::upsert(false)));
- BSONObj origUpdateRequestObj = BSON(BatchedUpdateRequest::collName("test")
- << BatchedUpdateRequest::updates() << updateArray
- << BatchedUpdateRequest::writeConcern(BSON("w" << 1))
- << BatchedUpdateRequest::ordered(true));
+ BSONObj origUpdateRequestObj = BSON(
+ BatchedUpdateRequest::collName("test") << BatchedUpdateRequest::updates() << updateArray
+ << BatchedUpdateRequest::writeConcern(BSON("w" << 1))
+ << BatchedUpdateRequest::ordered(true));
string errMsg;
BatchedUpdateRequest request;
diff --git a/src/mongo/s/write_ops/write_op.h b/src/mongo/s/write_ops/write_op.h
index bd50896b04a..d56be517fe7 100644
--- a/src/mongo/s/write_ops/write_op.h
+++ b/src/mongo/s/write_ops/write_op.h
@@ -33,8 +33,8 @@
#include "mongo/base/string_data.h"
#include "mongo/bson/bsonobj.h"
#include "mongo/s/ns_targeter.h"
-#include "mongo/s/write_ops/write_error_detail.h"
#include "mongo/s/write_ops/batched_command_request.h"
+#include "mongo/s/write_ops/write_error_detail.h"
namespace mongo {
diff --git a/src/mongo/scripting/bson_template_evaluator.h b/src/mongo/scripting/bson_template_evaluator.h
index e5b6c5925a6..534f5ed7c26 100644
--- a/src/mongo/scripting/bson_template_evaluator.h
+++ b/src/mongo/scripting/bson_template_evaluator.h
@@ -47,8 +47,8 @@
#include <string>
#include "mongo/db/jsobj.h"
-#include "mongo/stdx/functional.h"
#include "mongo/platform/random.h"
+#include "mongo/stdx/functional.h"
namespace mongo {
@@ -94,7 +94,8 @@ public:
typedef stdx::function<Status(BsonTemplateEvaluator* btl,
const char* fieldName,
const BSONObj& in,
- BSONObjBuilder& builder)> OperatorFn;
+ BSONObjBuilder& builder)>
+ OperatorFn;
/*
* @params seed : Random seed to be used when generating random data
diff --git a/src/mongo/scripting/bson_template_evaluator_test.cpp b/src/mongo/scripting/bson_template_evaluator_test.cpp
index d4a4c2850f4..51e573fc7e0 100644
--- a/src/mongo/scripting/bson_template_evaluator_test.cpp
+++ b/src/mongo/scripting/bson_template_evaluator_test.cpp
@@ -26,8 +26,8 @@
* then also delete it in the license file.
*/
-#include "mongo/db/jsobj.h"
#include "mongo/scripting/bson_template_evaluator.h"
+#include "mongo/db/jsobj.h"
#include "mongo/unittest/unittest.h"
namespace mongo {
@@ -94,7 +94,8 @@ TEST(BSONTemplateEvaluatorTest, RAND_INT) {
ASSERT_EQUALS(BsonTemplateEvaluator::StatusSuccess,
t.evaluate(BSON("randField" << randObj << "hello"
<< "world"
- << "id" << 1),
+ << "id"
+ << 1),
builder8));
BSONObj obj8 = builder8.obj();
ASSERT_EQUALS(obj8.nFields(), 3);
@@ -121,7 +122,8 @@ TEST(BSONTemplateEvaluatorTest, RAND_INT) {
ASSERT_EQUALS(BsonTemplateEvaluator::StatusSuccess,
t.evaluate(BSON("randField1" << randObj << "hello"
<< "world"
- << "randField2" << randObj),
+ << "randField2"
+ << randObj),
builder10));
BSONObj obj10 = builder10.obj();
ASSERT_EQUALS(obj10.nFields(), 3);
@@ -138,7 +140,8 @@ TEST(BSONTemplateEvaluatorTest, RAND_INT) {
ASSERT_EQUALS(BsonTemplateEvaluator::StatusSuccess,
t.evaluate(BSON("testArray" << BSON_ARRAY(0 << 5 << 10 << 20) << "hello"
<< "world"
- << "randField" << randObj),
+ << "randField"
+ << randObj),
builder11));
BSONObj obj11 = builder11.obj();
ASSERT_EQUALS(obj11.nFields(), 3);
@@ -188,7 +191,8 @@ TEST(BSONTemplateEvaluatorTest, RAND_INT_PLUS_THREAD) {
ASSERT_EQUALS(BsonTemplateEvaluator::StatusSuccess,
t.evaluate(BSON("randField" << randObj << "hello"
<< "world"
- << "id" << 1),
+ << "id"
+ << 1),
builder8));
BSONObj obj8 = builder8.obj();
ASSERT_EQUALS(obj8.nFields(), 3);
@@ -215,7 +219,8 @@ TEST(BSONTemplateEvaluatorTest, RAND_INT_PLUS_THREAD) {
ASSERT_EQUALS(BsonTemplateEvaluator::StatusSuccess,
t.evaluate(BSON("randField1" << randObj << "hello"
<< "world"
- << "randField2" << randObj),
+ << "randField2"
+ << randObj),
builder10));
BSONObj obj10 = builder10.obj();
ASSERT_EQUALS(obj10.nFields(), 3);
@@ -234,7 +239,8 @@ TEST(BSONTemplateEvaluatorTest, RAND_INT_PLUS_THREAD) {
ASSERT_EQUALS(BsonTemplateEvaluator::StatusSuccess,
t.evaluate(BSON("testArray" << BSON_ARRAY(0 << 5 << 10 << 20) << "hello"
<< "world"
- << "randField" << randObj),
+ << "randField"
+ << randObj),
builder11));
BSONObj obj11 = builder11.obj();
ASSERT_EQUALS(obj11.nFields(), 3);
@@ -438,7 +444,8 @@ TEST(BSONTemplateEvaluatorTest, RAND_STRING) {
ASSERT_EQUALS(BsonTemplateEvaluator::StatusSuccess,
t.evaluate(BSON("id" << 1 << "hello"
<< "world"
- << "randField" << randObj),
+ << "randField"
+ << randObj),
builder6));
BSONObj obj6 = builder6.obj();
ASSERT_EQUALS(obj6.nFields(), 3);
@@ -452,7 +459,8 @@ TEST(BSONTemplateEvaluatorTest, RAND_STRING) {
ASSERT_EQUALS(BsonTemplateEvaluator::StatusSuccess,
t.evaluate(BSON("randField" << randObj << "hello"
<< "world"
- << "id" << 1),
+ << "id"
+ << 1),
builder7));
BSONObj obj7 = builder7.obj();
ASSERT_EQUALS(obj7.nFields(), 3);
@@ -477,7 +485,8 @@ TEST(BSONTemplateEvaluatorTest, RAND_STRING) {
ASSERT_EQUALS(BsonTemplateEvaluator::StatusSuccess,
t.evaluate(BSON("randField1" << randObj << "hello"
<< "world"
- << "randField2" << randObj),
+ << "randField2"
+ << randObj),
builder10));
BSONObj obj10 = builder10.obj();
ASSERT_EQUALS(obj10.nFields(), 3);
@@ -493,7 +502,8 @@ TEST(BSONTemplateEvaluatorTest, RAND_STRING) {
ASSERT_EQUALS(BsonTemplateEvaluator::StatusSuccess,
t.evaluate(BSON("testArray" << BSON_ARRAY(0 << 5 << 10 << 20) << "hello"
<< "world"
- << "randField" << randObj),
+ << "randField"
+ << randObj),
builder11));
BSONObj obj11 = builder11.obj();
ASSERT_EQUALS(obj11.nFields(), 3);
@@ -548,7 +558,9 @@ TEST(BSONTemplateEvaluatorTest, CONCAT) {
ASSERT_EQUALS(obj4.nFields(), 3);
expectedObj = BSON("concatField1"
<< "hello world"
- << "middleKey" << 1 << "concatField2"
+ << "middleKey"
+ << 1
+ << "concatField2"
<< "hello world");
ASSERT_EQUALS(obj4.equal(expectedObj), true);
@@ -670,7 +682,8 @@ TEST(BSONTemplateEvaluatorTest, NESTING) {
BSONObj bazObj = BSON("baz" << innerObj);
outerObj = BSON("foo"
<< "hi"
- << "bar" << bazObj);
+ << "bar"
+ << bazObj);
ASSERT_EQUALS(BsonTemplateEvaluator::StatusSuccess,
t.evaluate(BSON("id" << outerObj), builder3));
BSONObj obj3 = builder3.obj();
@@ -691,7 +704,10 @@ TEST(BSONTemplateEvaluatorTest, NESTING) {
<< "bye");
outerObj = BSON("foo"
<< "hi"
- << "bar" << barObj4 << "baz" << bazObj4);
+ << "bar"
+ << barObj4
+ << "baz"
+ << bazObj4);
ASSERT_EQUALS(BsonTemplateEvaluator::StatusSuccess,
t.evaluate(BSON("id" << outerObj), builder4));
BSONObj obj4 = builder4.obj();
@@ -715,7 +731,8 @@ TEST(BSONTemplateEvaluatorTest, NESTING) {
<< "let"
<< "target"
<< "x"
- << "value" << innerObj);
+ << "value"
+ << innerObj);
ASSERT_EQUALS(BsonTemplateEvaluator::StatusBadOperator, t.evaluate(outerObj, builder5));
// Test success for elements in an array that need evaluation
@@ -726,7 +743,8 @@ TEST(BSONTemplateEvaluatorTest, NESTING) {
BSONObj elem3 = BSON("baz" << 42);
outerObj = BSON("foo"
<< "hi"
- << "bar" << BSON_ARRAY(elem1 << elem2 << elem3 << 7));
+ << "bar"
+ << BSON_ARRAY(elem1 << elem2 << elem3 << 7));
ASSERT_EQUALS(BsonTemplateEvaluator::StatusSuccess, t.evaluate(outerObj, builder6));
BSONObj obj6 = builder6.obj();
BSONElement obj6_bar = obj6["bar"];
diff --git a/src/mongo/scripting/engine.cpp b/src/mongo/scripting/engine.cpp
index d693601276d..392dcdc6e8b 100644
--- a/src/mongo/scripting/engine.cpp
+++ b/src/mongo/scripting/engine.cpp
@@ -33,13 +33,13 @@
#include "mongo/scripting/engine.h"
-#include <cctype>
#include <boost/filesystem/operations.hpp>
+#include <cctype>
#include "mongo/client/dbclientcursor.h"
#include "mongo/client/dbclientinterface.h"
-#include "mongo/db/service_context.h"
#include "mongo/db/operation_context.h"
+#include "mongo/db/service_context.h"
#include "mongo/platform/unordered_set.h"
#include "mongo/util/file.h"
#include "mongo/util/log.h"
diff --git a/src/mongo/scripting/engine.h b/src/mongo/scripting/engine.h
index 219b5a4f5c8..98384f1515a 100644
--- a/src/mongo/scripting/engine.h
+++ b/src/mongo/scripting/engine.h
@@ -29,8 +29,8 @@
#pragma once
-#include "mongo/db/service_context.h"
#include "mongo/db/jsobj.h"
+#include "mongo/db/service_context.h"
#include "mongo/platform/atomic_word.h"
namespace mongo {
diff --git a/src/mongo/scripting/mozjs/bindata.cpp b/src/mongo/scripting/mozjs/bindata.cpp
index ba07c3164ad..d7bacb5558e 100644
--- a/src/mongo/scripting/mozjs/bindata.cpp
+++ b/src/mongo/scripting/mozjs/bindata.cpp
@@ -30,8 +30,8 @@
#include "mongo/scripting/mozjs/bindata.h"
-#include <iomanip>
#include <cctype>
+#include <iomanip>
#include "mongo/scripting/mozjs/implscope.h"
#include "mongo/scripting/mozjs/internedstring.h"
diff --git a/src/mongo/scripting/mozjs/db.cpp b/src/mongo/scripting/mozjs/db.cpp
index 565cf20cb1c..ed8575b0f4a 100644
--- a/src/mongo/scripting/mozjs/db.cpp
+++ b/src/mongo/scripting/mozjs/db.cpp
@@ -32,13 +32,13 @@
#include "mongo/db/namespace_string.h"
#include "mongo/db/operation_context.h"
+#include "mongo/s/d_state.h"
#include "mongo/scripting/mozjs/idwrapper.h"
#include "mongo/scripting/mozjs/implscope.h"
#include "mongo/scripting/mozjs/internedstring.h"
#include "mongo/scripting/mozjs/objectwrapper.h"
#include "mongo/scripting/mozjs/valuereader.h"
#include "mongo/scripting/mozjs/valuewriter.h"
-#include "mongo/s/d_state.h"
namespace mongo {
namespace mozjs {
diff --git a/src/mongo/scripting/mozjs/dbcollection.cpp b/src/mongo/scripting/mozjs/dbcollection.cpp
index 8d094721f8f..e7c6bb67ae0 100644
--- a/src/mongo/scripting/mozjs/dbcollection.cpp
+++ b/src/mongo/scripting/mozjs/dbcollection.cpp
@@ -32,12 +32,12 @@
#include "mongo/db/namespace_string.h"
#include "mongo/db/operation_context.h"
+#include "mongo/s/d_state.h"
#include "mongo/scripting/mozjs/bson.h"
#include "mongo/scripting/mozjs/db.h"
#include "mongo/scripting/mozjs/implscope.h"
#include "mongo/scripting/mozjs/objectwrapper.h"
#include "mongo/scripting/mozjs/valuewriter.h"
-#include "mongo/s/d_state.h"
namespace mongo {
namespace mozjs {
diff --git a/src/mongo/scripting/mozjs/implscope.cpp b/src/mongo/scripting/mozjs/implscope.cpp
index f8978b01b7d..73a18238af1 100644
--- a/src/mongo/scripting/mozjs/implscope.cpp
+++ b/src/mongo/scripting/mozjs/implscope.cpp
@@ -32,9 +32,9 @@
#include "mongo/scripting/mozjs/implscope.h"
+#include <js/CharacterEncoding.h>
#include <jscustomallocator.h>
#include <jsfriendapi.h>
-#include <js/CharacterEncoding.h>
#include "mongo/base/error_codes.h"
#include "mongo/db/operation_context.h"
diff --git a/src/mongo/scripting/mozjs/implscope.h b/src/mongo/scripting/mozjs/implscope.h
index be125f3e987..59aaa297ff0 100644
--- a/src/mongo/scripting/mozjs/implscope.h
+++ b/src/mongo/scripting/mozjs/implscope.h
@@ -53,9 +53,9 @@
#include "mongo/scripting/mozjs/mongo.h"
#include "mongo/scripting/mozjs/mongohelpers.h"
#include "mongo/scripting/mozjs/nativefunction.h"
+#include "mongo/scripting/mozjs/numberdecimal.h"
#include "mongo/scripting/mozjs/numberint.h"
#include "mongo/scripting/mozjs/numberlong.h"
-#include "mongo/scripting/mozjs/numberdecimal.h"
#include "mongo/scripting/mozjs/object.h"
#include "mongo/scripting/mozjs/oid.h"
#include "mongo/scripting/mozjs/regexp.h"
diff --git a/src/mongo/scripting/mozjs/jscustomallocator.cpp b/src/mongo/scripting/mozjs/jscustomallocator.cpp
index a817d2a8cad..ea7a3346350 100644
--- a/src/mongo/scripting/mozjs/jscustomallocator.cpp
+++ b/src/mongo/scripting/mozjs/jscustomallocator.cpp
@@ -29,12 +29,12 @@
#include "mongo/platform/basic.h"
#include <cstddef>
-#include <type_traits>
#include <jscustomallocator.h>
+#include <type_traits>
#include "mongo/config.h"
-#include "mongo/util/concurrency/threadlocal.h"
#include "mongo/scripting/mozjs/implscope.h"
+#include "mongo/util/concurrency/threadlocal.h"
#ifdef __linux__
#include <malloc.h>
@@ -189,10 +189,13 @@ void js_free(void* p) {
mongo::sm::total_bytes = tb - current;
}
- mongo::sm::wrap_alloc([](void* ptr, size_t b) {
- std::free(ptr);
- return nullptr;
- }, p, 0);
+ mongo::sm::wrap_alloc(
+ [](void* ptr, size_t b) {
+ std::free(ptr);
+ return nullptr;
+ },
+ p,
+ 0);
}
void* js_realloc(void* p, size_t bytes) {
diff --git a/src/mongo/scripting/mozjs/jsthread.cpp b/src/mongo/scripting/mozjs/jsthread.cpp
index 783272c7d75..a8f72a56039 100644
--- a/src/mongo/scripting/mozjs/jsthread.cpp
+++ b/src/mongo/scripting/mozjs/jsthread.cpp
@@ -32,8 +32,8 @@
#include "mongo/scripting/mozjs/jsthread.h"
-#include <cstdio>
#include "vm/PosixNSPR.h"
+#include <cstdio>
#include "mongo/db/jsobj.h"
#include "mongo/scripting/mozjs/implscope.h"
diff --git a/src/mongo/scripting/mozjs/maxkey.cpp b/src/mongo/scripting/mozjs/maxkey.cpp
index 0ba67d63f8f..411f71dd746 100644
--- a/src/mongo/scripting/mozjs/maxkey.cpp
+++ b/src/mongo/scripting/mozjs/maxkey.cpp
@@ -30,8 +30,8 @@
#include "mongo/scripting/mozjs/maxkey.h"
-#include "mongo/scripting/mozjs/internedstring.h"
#include "mongo/scripting/mozjs/implscope.h"
+#include "mongo/scripting/mozjs/internedstring.h"
#include "mongo/scripting/mozjs/objectwrapper.h"
#include "mongo/scripting/mozjs/valuereader.h"
#include "mongo/scripting/mozjs/wrapconstrainedmethod.h"
diff --git a/src/mongo/scripting/mozjs/minkey.cpp b/src/mongo/scripting/mozjs/minkey.cpp
index c4906fba6f5..c85ca2f6b25 100644
--- a/src/mongo/scripting/mozjs/minkey.cpp
+++ b/src/mongo/scripting/mozjs/minkey.cpp
@@ -30,8 +30,8 @@
#include "mongo/scripting/mozjs/minkey.h"
-#include "mongo/scripting/mozjs/internedstring.h"
#include "mongo/scripting/mozjs/implscope.h"
+#include "mongo/scripting/mozjs/internedstring.h"
#include "mongo/scripting/mozjs/objectwrapper.h"
#include "mongo/scripting/mozjs/valuereader.h"
#include "mongo/scripting/mozjs/wrapconstrainedmethod.h"
diff --git a/src/mongo/scripting/mozjs/mongo.cpp b/src/mongo/scripting/mozjs/mongo.cpp
index 53bf47c77c9..1b7ce7dcd21 100644
--- a/src/mongo/scripting/mozjs/mongo.cpp
+++ b/src/mongo/scripting/mozjs/mongo.cpp
@@ -374,11 +374,13 @@ void MongoBase::Functions::auth::call(JSContext* cx, JS::CallArgs args) {
params = ValueWriter(cx, args.get(0)).toBSON();
break;
case 3:
- params = BSON(saslCommandMechanismFieldName
- << "MONGODB-CR" << saslCommandUserDBFieldName
- << ValueWriter(cx, args[0]).toString() << saslCommandUserFieldName
- << ValueWriter(cx, args[1]).toString() << saslCommandPasswordFieldName
- << ValueWriter(cx, args[2]).toString());
+ params =
+ BSON(saslCommandMechanismFieldName << "MONGODB-CR" << saslCommandUserDBFieldName
+ << ValueWriter(cx, args[0]).toString()
+ << saslCommandUserFieldName
+ << ValueWriter(cx, args[1]).toString()
+ << saslCommandPasswordFieldName
+ << ValueWriter(cx, args[2]).toString());
break;
default:
uasserted(ErrorCodes::BadValue, "mongoAuth takes 1 object or 3 string arguments");
@@ -485,7 +487,8 @@ void MongoBase::Functions::copyDatabaseWithSCRAM::call(JSContext* cx, JS::CallAr
BSONObj saslFirstCommandPrefix =
BSON("copydbsaslstart" << 1 << "fromhost" << fromHost << "fromdb" << fromDb
- << saslCommandMechanismFieldName << "SCRAM-SHA-1");
+ << saslCommandMechanismFieldName
+ << "SCRAM-SHA-1");
BSONObj saslFollowupCommandPrefix =
BSON("copydb" << 1 << "fromhost" << fromHost << "fromdb" << fromDb << "todb" << toDb);
diff --git a/src/mongo/scripting/mozjs/objectwrapper.cpp b/src/mongo/scripting/mozjs/objectwrapper.cpp
index 0f891b549fe..c9a800fee2e 100644
--- a/src/mongo/scripting/mozjs/objectwrapper.cpp
+++ b/src/mongo/scripting/mozjs/objectwrapper.cpp
@@ -524,8 +524,11 @@ BSONObj ObjectWrapper::toBSON() {
const int sizeWithEOO = b.len() + 1 /*EOO*/ - 4 /*BSONObj::Holder ref count*/;
uassert(17260,
str::stream() << "Converting from JavaScript to BSON failed: "
- << "Object size " << sizeWithEOO << " exceeds limit of "
- << BSONObjMaxInternalSize << " bytes.",
+ << "Object size "
+ << sizeWithEOO
+ << " exceeds limit of "
+ << BSONObjMaxInternalSize
+ << " bytes.",
sizeWithEOO <= BSONObjMaxInternalSize);
return b.obj();
diff --git a/src/mongo/scripting/mozjs/proxyscope.cpp b/src/mongo/scripting/mozjs/proxyscope.cpp
index 7c360302d31..d5233f8379d 100644
--- a/src/mongo/scripting/mozjs/proxyscope.cpp
+++ b/src/mongo/scripting/mozjs/proxyscope.cpp
@@ -336,11 +336,9 @@ void MozJSProxyScope::implThread(void* arg) {
while (true) {
stdx::unique_lock<stdx::mutex> lk(proxy->_mutex);
- proxy->_condvar.wait(lk,
- [proxy] {
- return proxy->_state == State::ProxyRequest ||
- proxy->_state == State::Shutdown;
- });
+ proxy->_condvar.wait(lk, [proxy] {
+ return proxy->_state == State::ProxyRequest || proxy->_state == State::Shutdown;
+ });
if (proxy->_state == State::Shutdown)
break;
diff --git a/src/mongo/scripting/mozjs/timestamp.cpp b/src/mongo/scripting/mozjs/timestamp.cpp
index 48364d62550..99d83f7c343 100644
--- a/src/mongo/scripting/mozjs/timestamp.cpp
+++ b/src/mongo/scripting/mozjs/timestamp.cpp
@@ -56,7 +56,9 @@ double getTimestampArg(JSContext* cx, JS::CallArgs args, int idx, std::string na
if (val < 0 || val > maxArgVal) {
uasserted(ErrorCodes::BadValue,
str::stream() << name << " must be non-negative and not greater than "
- << maxArgVal << ", got " << val);
+ << maxArgVal
+ << ", got "
+ << val);
}
return val;
}
diff --git a/src/mongo/scripting/mozjs/wrapconstrainedmethod.h b/src/mongo/scripting/mozjs/wrapconstrainedmethod.h
index db21289cb93..74ea796665e 100644
--- a/src/mongo/scripting/mozjs/wrapconstrainedmethod.h
+++ b/src/mongo/scripting/mozjs/wrapconstrainedmethod.h
@@ -95,19 +95,22 @@ bool wrapConstrainedMethod(JSContext* cx, unsigned argc, JS::Value* vp) {
uasserted(ErrorCodes::BadValue,
str::stream() << "Cannot call \"" << T::name()
<< "\" on non-object of type \""
- << ValueWriter(cx, args.thisv()).typeAsString() << "\"");
+ << ValueWriter(cx, args.thisv()).typeAsString()
+ << "\"");
}
if (!instanceOf<Args..., void>(getScope(cx), &isProto, args.thisv())) {
uasserted(ErrorCodes::BadValue,
str::stream() << "Cannot call \"" << T::name() << "\" on object of type \""
- << ObjectWrapper(cx, args.thisv()).getClassName() << "\"");
+ << ObjectWrapper(cx, args.thisv()).getClassName()
+ << "\"");
}
if (noProto && isProto) {
uasserted(ErrorCodes::BadValue,
str::stream() << "Cannot call \"" << T::name() << "\" on prototype of \""
- << ObjectWrapper(cx, args.thisv()).getClassName() << "\"");
+ << ObjectWrapper(cx, args.thisv()).getClassName()
+ << "\"");
}
T::call(cx, args);
diff --git a/src/mongo/shell/bench.cpp b/src/mongo/shell/bench.cpp
index ae9c7b093d9..8413f12b731 100644
--- a/src/mongo/shell/bench.cpp
+++ b/src/mongo/shell/bench.cpp
@@ -34,8 +34,8 @@
#include "mongo/shell/bench.h"
-#include <pcrecpp.h>
#include <iostream>
+#include <pcrecpp.h>
#include "mongo/client/dbclientcursor.h"
#include "mongo/db/namespace_string.h"
@@ -322,7 +322,8 @@ BenchRunOp opFromBson(const BSONObj& op) {
} else if (name == "query") {
uassert(34389,
str::stream() << "Field 'query' is only valid for findOne, find, update, and "
- "remove types. Type is " << opType,
+ "remove types. Type is "
+ << opType,
(opType == "findOne") || (opType == "query") ||
(opType == "find" || (opType == "update") || (opType == "delete") ||
(opType == "remove")));
@@ -886,8 +887,9 @@ void BenchRunWorker::generateLoadOnConnection(DBClientBase* conn) {
BSONObjBuilder builder;
builder.append("update", nsToCollectionSubstring(op.ns));
BSONArrayBuilder docBuilder(builder.subarrayStart("updates"));
- docBuilder.append(BSON("q" << query << "u" << update << "multi"
- << op.multi << "upsert" << op.upsert));
+ docBuilder.append(BSON(
+ "q" << query << "u" << update << "multi" << op.multi << "upsert"
+ << op.upsert));
docBuilder.done();
builder.append("writeConcern", op.writeConcern);
conn->runCommand(nsToDatabaseSubstring(op.ns).toString(),
@@ -1082,7 +1084,8 @@ void BenchRunWorker::generateLoadOnConnection(DBClientBase* conn) {
{
stats.trappedErrors.push_back(BSON("error" << ex.what() << "op"
<< opTypeName.find(op.op)->second
- << "count" << count));
+ << "count"
+ << count));
}
if (_config->breakOnTrap)
return;
@@ -1169,11 +1172,11 @@ void BenchRunner::start() {
if (_config->username != "") {
string errmsg;
if (!conn->auth("admin", _config->username, _config->password, errmsg)) {
- uasserted(16704,
- str::stream()
- << "User " << _config->username
- << " could not authenticate to admin db; admin db access is "
- "required to use benchRun with auth enabled");
+ uasserted(
+ 16704,
+ str::stream() << "User " << _config->username
+ << " could not authenticate to admin db; admin db access is "
+ "required to use benchRun with auth enabled");
}
}
@@ -1206,11 +1209,11 @@ void BenchRunner::stop() {
string errmsg;
// this can only fail if admin access was revoked since start of run
if (!conn->auth("admin", _config->username, _config->password, errmsg)) {
- uasserted(16705,
- str::stream()
- << "User " << _config->username
- << " could not authenticate to admin db; admin db access is "
- "still required to use benchRun with auth enabled");
+ uasserted(
+ 16705,
+ str::stream() << "User " << _config->username
+ << " could not authenticate to admin db; admin db access is "
+ "still required to use benchRun with auth enabled");
}
}
}
diff --git a/src/mongo/shell/bridge.js b/src/mongo/shell/bridge.js
index 7f621f14a52..74e6e674d71 100644
--- a/src/mongo/shell/bridge.js
+++ b/src/mongo/shell/bridge.js
@@ -36,7 +36,11 @@ function MongoBridge(options) {
// Start the mongobridge on port 'this.port' routing network traffic to 'this.dest'.
var args = ['mongobridge', '--port', this.port, '--dest', this.dest];
- var keysToSkip = ['dest', 'hostName', 'port', ];
+ var keysToSkip = [
+ 'dest',
+ 'hostName',
+ 'port',
+ ];
// Append any command line arguments that are optional for mongobridge.
Object.keys(options).forEach(function(key) {
@@ -95,14 +99,12 @@ function MongoBridge(options) {
// connection object that is equivalent to its 'host' property. Certain functions in
// ReplSetTest and ShardingTest use the 'name' property instead of the 'host' property, so
// we define it here for consistency.
- Object.defineProperty(userConn,
- 'name',
- {
- enumerable: true,
- get: function() {
- return this.host;
- },
- });
+ Object.defineProperty(userConn, 'name', {
+ enumerable: true,
+ get: function() {
+ return this.host;
+ },
+ });
controlConn = new Mongo(hostName + ':' + this.port);
};
@@ -229,12 +231,10 @@ function MongoBridge(options) {
bridges.forEach(throwErrorIfNotMongoBridgeInstance);
bridges.forEach(bridge => {
- var res = runBridgeCommand(controlConn,
- 'delayMessagesFrom',
- {
- host: bridge.dest,
- delay: delay,
- });
+ var res = runBridgeCommand(controlConn, 'delayMessagesFrom', {
+ host: bridge.dest,
+ delay: delay,
+ });
assert.commandWorked(res,
'failed to configure the mongobridge listening on port ' +
this.port + ' to delay messages from ' + bridge.dest + ' by ' +
@@ -256,12 +256,10 @@ function MongoBridge(options) {
bridges.forEach(throwErrorIfNotMongoBridgeInstance);
bridges.forEach(bridge => {
- var res = runBridgeCommand(controlConn,
- 'discardMessagesFrom',
- {
- host: bridge.dest,
- loss: lossProbability,
- });
+ var res = runBridgeCommand(controlConn, 'discardMessagesFrom', {
+ host: bridge.dest,
+ loss: lossProbability,
+ });
assert.commandWorked(res,
'failed to configure the mongobridge listening on port ' +
this.port + ' to discard messages from ' + bridge.dest +
@@ -272,32 +270,31 @@ function MongoBridge(options) {
// Use a Proxy to "extend" the underlying connection object. The C++ functions, e.g.
// runCommand(), require that they are called on the Mongo instance itself and so typical
// prototypical inheritance isn't possible.
- return new Proxy(this,
- {
- get: function get(target, property, receiver) {
- // If the property is defined on the MongoBridge instance itself, then
- // return it.
- // Otherwise, get the value of the property from the Mongo instance.
- if (target.hasOwnProperty(property)) {
- return target[property];
- }
- var value = userConn[property];
- if (typeof value === 'function') {
- return value.bind(userConn);
- }
- return value;
- },
-
- set: function set(target, property, value, receiver) {
- // Delegate setting the value of any property to the Mongo instance so
- // that it can be
- // accessed in functions acting on the Mongo instance directly instead of
- // this Proxy.
- // For example, the "slaveOk" property needs to be set on the Mongo
- // instance in order
- // for the query options bit to be set correctly.
- userConn[property] = value;
- return true;
- },
- });
+ return new Proxy(this, {
+ get: function get(target, property, receiver) {
+ // If the property is defined on the MongoBridge instance itself, then
+ // return it.
+ // Otherwise, get the value of the property from the Mongo instance.
+ if (target.hasOwnProperty(property)) {
+ return target[property];
+ }
+ var value = userConn[property];
+ if (typeof value === 'function') {
+ return value.bind(userConn);
+ }
+ return value;
+ },
+
+ set: function set(target, property, value, receiver) {
+ // Delegate setting the value of any property to the Mongo instance so
+ // that it can be
+ // accessed in functions acting on the Mongo instance directly instead of
+ // this Proxy.
+ // For example, the "slaveOk" property needs to be set on the Mongo
+ // instance in order
+ // for the query options bit to be set correctly.
+ userConn[property] = value;
+ return true;
+ },
+ });
}
diff --git a/src/mongo/shell/bulk_api.js b/src/mongo/shell/bulk_api.js
index 8ef10b5c976..2dfe15d65d2 100644
--- a/src/mongo/shell/bulk_api.js
+++ b/src/mongo/shell/bulk_api.js
@@ -22,14 +22,12 @@ var _bulk_api_module = (function() {
* Helper function to define properties
*/
var defineReadOnlyProperty = function(self, name, value) {
- Object.defineProperty(self,
- name,
- {
- enumerable: true,
- get: function() {
- return value;
- }
- });
+ Object.defineProperty(self, name, {
+ enumerable: true,
+ get: function() {
+ return value;
+ }
+ });
};
/**
@@ -543,26 +541,24 @@ var _bulk_api_module = (function() {
var batches = [];
var defineBatchTypeCounter = function(self, name, type) {
- Object.defineProperty(self,
- name,
- {
- enumerable: true,
- get: function() {
- var counter = 0;
-
- for (var i = 0; i < batches.length; i++) {
- if (batches[i].batchType == type) {
- counter += batches[i].operations.length;
- }
- }
-
- if (currentBatch && currentBatch.batchType == type) {
- counter += currentBatch.operations.length;
- }
-
- return counter;
- }
- });
+ Object.defineProperty(self, name, {
+ enumerable: true,
+ get: function() {
+ var counter = 0;
+
+ for (var i = 0; i < batches.length; i++) {
+ if (batches[i].batchType == type) {
+ counter += batches[i].operations.length;
+ }
+ }
+
+ if (currentBatch && currentBatch.batchType == type) {
+ counter += currentBatch.operations.length;
+ }
+
+ return counter;
+ }
+ });
};
defineBatchTypeCounter(this, "nInsertOps", INSERT);
@@ -634,9 +630,7 @@ var _bulk_api_module = (function() {
var addIdIfNeeded = function(obj) {
if (typeof(obj._id) == "undefined" && !Array.isArray(obj)) {
var tmp = obj; // don't want to modify input
- obj = {
- _id: new ObjectId()
- };
+ obj = {_id: new ObjectId()};
for (var key in tmp) {
obj[key] = tmp[key];
}
@@ -667,12 +661,8 @@ var _bulk_api_module = (function() {
// Set the top value for the update 0 = multi true, 1 = multi false
var upsert = typeof currentOp.upsert == 'boolean' ? currentOp.upsert : false;
// Establish the update command
- var document = {
- q: currentOp.selector,
- u: updateDocument,
- multi: true,
- upsert: upsert
- };
+ var document =
+ {q: currentOp.selector, u: updateDocument, multi: true, upsert: upsert};
// Copy over the collation, if we have one.
if (currentOp.hasOwnProperty('collation')) {
@@ -691,12 +681,8 @@ var _bulk_api_module = (function() {
// Set the top value for the update 0 = multi true, 1 = multi false
var upsert = typeof currentOp.upsert == 'boolean' ? currentOp.upsert : false;
// Establish the update command
- var document = {
- q: currentOp.selector,
- u: updateDocument,
- multi: false,
- upsert: upsert
- };
+ var document =
+ {q: currentOp.selector, u: updateDocument, multi: false, upsert: upsert};
// Copy over the collation, if we have one.
if (currentOp.hasOwnProperty('collation')) {
@@ -723,10 +709,7 @@ var _bulk_api_module = (function() {
collection._validateRemoveDoc(currentOp.selector);
// Establish the removeOne command
- var document = {
- q: currentOp.selector,
- limit: 1
- };
+ var document = {q: currentOp.selector, limit: 1};
// Copy over the collation, if we have one.
if (currentOp.hasOwnProperty('collation')) {
@@ -743,10 +726,7 @@ var _bulk_api_module = (function() {
collection._validateRemoveDoc(currentOp.selector);
// Establish the remove command
- var document = {
- q: currentOp.selector,
- limit: 0
- };
+ var document = {q: currentOp.selector, limit: 0};
// Copy over the collation, if we have one.
if (currentOp.hasOwnProperty('collation')) {
@@ -781,9 +761,7 @@ var _bulk_api_module = (function() {
if (selector == undefined)
throw Error("find() requires query criteria");
// Save a current selector
- currentOp = {
- selector: selector
- };
+ currentOp = {selector: selector};
// Return the find Operations
return findOperations;
@@ -857,11 +835,7 @@ var _bulk_api_module = (function() {
// Generate the right update
if (batch.batchType == UPDATE) {
- cmd = {
- update: coll.getName(),
- updates: batch.operations,
- ordered: ordered
- };
+ cmd = {update: coll.getName(), updates: batch.operations, ordered: ordered};
} else if (batch.batchType == INSERT) {
var transformedInserts = [];
batch.operations.forEach(function(insertDoc) {
@@ -869,17 +843,9 @@ var _bulk_api_module = (function() {
});
batch.operations = transformedInserts;
- cmd = {
- insert: coll.getName(),
- documents: batch.operations,
- ordered: ordered
- };
+ cmd = {insert: coll.getName(), documents: batch.operations, ordered: ordered};
} else if (batch.batchType == REMOVE) {
- cmd = {
- delete: coll.getName(),
- deletes: batch.operations,
- ordered: ordered
- };
+ cmd = {delete: coll.getName(), deletes: batch.operations, ordered: ordered};
}
// If we have a write concern
@@ -910,7 +876,8 @@ var _bulk_api_module = (function() {
-1 /* limit */,
0 /* skip */,
0 /* batchSize */,
- 0 /* flags */).next();
+ 0 /* flags */)
+ .next();
if (result.ok == 0) {
throw new WriteCommandError(result);
@@ -971,17 +938,11 @@ var _bulk_api_module = (function() {
var code = gleResponse.code;
var timeout = gleResponse.wtimeout ? true : false;
- var extractedErr = {
- writeError: null,
- wcError: null,
- unknownError: null
- };
+ var extractedErr = {writeError: null, wcError: null, unknownError: null};
if (err == 'norepl' || err == 'noreplset') {
// Know this is legacy gle and the repl not enforced - write concern error in 2.4.
- var errObj = {
- code: WRITE_CONCERN_FAILED
- };
+ var errObj = {code: WRITE_CONCERN_FAILED};
if (errMsg != '') {
errObj.errmsg = errMsg;
@@ -994,9 +955,7 @@ var _bulk_api_module = (function() {
extractedErr.wcError = errObj;
} else if (timeout) {
// Know there was not write error.
- var errObj = {
- code: WRITE_CONCERN_FAILED
- };
+ var errObj = {code: WRITE_CONCERN_FAILED};
if (errMsg != '') {
errObj.errmsg = errMsg;
@@ -1004,35 +963,21 @@ var _bulk_api_module = (function() {
errObj.errmsg = err;
}
- errObj.errInfo = {
- wtimeout: true
- };
+ errObj.errInfo = {wtimeout: true};
extractedErr.wcError = errObj;
} else if (code == 19900 || // No longer primary
code == 16805 || // replicatedToNum no longer primary
code == 14330 || // gle wmode changed; invalid
code == NOT_MASTER ||
code == UNKNOWN_REPL_WRITE_CONCERN || code == WRITE_CONCERN_FAILED) {
- extractedErr.wcError = {
- code: code,
- errmsg: errMsg
- };
+ extractedErr.wcError = {code: code, errmsg: errMsg};
} else if (!isOK) {
// This is a GLE failure we don't understand
- extractedErr.unknownError = {
- code: code,
- errmsg: errMsg
- };
+ extractedErr.unknownError = {code: code, errmsg: errMsg};
} else if (err != '') {
- extractedErr.writeError = {
- code: (code == 0) ? UNKNOWN_ERROR : code,
- errmsg: err
- };
+ extractedErr.writeError = {code: (code == 0) ? UNKNOWN_ERROR : code, errmsg: err};
} else if (jNote != '') {
- extractedErr.writeError = {
- code: WRITE_CONCERN_FAILED,
- errmsg: jNote
- };
+ extractedErr.writeError = {code: WRITE_CONCERN_FAILED, errmsg: jNote};
}
// Handling of writeback not needed for mongo shell.
@@ -1043,9 +988,7 @@ var _bulk_api_module = (function() {
* getLastErrorMethod that supports all write concerns
*/
var executeGetLastError = function(db, options) {
- var cmd = {
- getlasterror: 1
- };
+ var cmd = {getlasterror: 1};
cmd = Object.extend(cmd, options);
// Execute the getLastErrorCommand
return db.runCommand(cmd);
@@ -1054,11 +997,7 @@ var _bulk_api_module = (function() {
// Execute the operations, serially
var executeBatchWithLegacyOps = function(batch) {
- var batchResult = {
- n: 0,
- writeErrors: [],
- upserted: []
- };
+ var batchResult = {n: 0, writeErrors: [], upserted: []};
var extractedErr = null;
@@ -1234,10 +1173,7 @@ var _bulk_api_module = (function() {
var explainBatch = batches[0];
var writeCmd = buildBatchCmd(explainBatch);
- return {
- "explain": writeCmd,
- "verbosity": verbosity
- };
+ return {"explain": writeCmd, "verbosity": verbosity};
};
};
diff --git a/src/mongo/shell/collection.js b/src/mongo/shell/collection.js
index 72efdbaaf97..67f4d8327dc 100644
--- a/src/mongo/shell/collection.js
+++ b/src/mongo/shell/collection.js
@@ -192,27 +192,19 @@ DBCollection.prototype._massageObject = function(q) {
var type = typeof q;
if (type == "function")
- return {
- $where: q
- };
+ return {$where: q};
if (q.isObjectId)
- return {
- _id: q
- };
+ return {_id: q};
if (type == "object")
return q;
if (type == "string") {
if (q.length == 24)
- return {
- _id: q
- };
+ return {_id: q};
- return {
- $where: q
- };
+ return {$where: q};
}
throw Error("don't know how to massage : " + type);
@@ -369,9 +361,7 @@ DBCollection.prototype.insert = function(obj, options, _allow_dot) {
if (typeof(obj._id) == "undefined" && !Array.isArray(obj)) {
var tmp = obj; // don't want to modify input
- obj = {
- _id: new ObjectId()
- };
+ obj = {_id: new ObjectId()};
for (var key in tmp) {
obj[key] = tmp[key];
}
@@ -428,12 +418,7 @@ DBCollection.prototype._parseRemove = function(t, justOne) {
wc = this.getWriteConcern();
}
- return {
- "query": query,
- "justOne": justOne,
- "wc": wc,
- "collation": collation
- };
+ return {"query": query, "justOne": justOne, "wc": wc, "collation": collation};
};
DBCollection.prototype.remove = function(t, justOne) {
@@ -645,11 +630,7 @@ DBCollection.prototype._genIndexName = function(keys) {
};
DBCollection.prototype._indexSpec = function(keys, options) {
- var ret = {
- ns: this._fullName,
- key: keys,
- name: this._genIndexName(keys)
- };
+ var ret = {ns: this._fullName, key: keys, name: this._genIndexName(keys)};
if (!options) {
} else if (typeof(options) == "string")
@@ -710,15 +691,9 @@ DBCollection.prototype.createIndexes = function(keys, options) {
// Return the first error
var error = result.hasWriteErrors() ? result.getWriteErrors()[0]
: result.getWriteConcernError();
- return {
- ok: 0.0,
- code: error.code,
- errmsg: error.errmsg
- };
+ return {ok: 0.0, code: error.code, errmsg: error.errmsg};
} else {
- return {
- ok: 1.0
- };
+ return {ok: 1.0};
}
} else {
this._db.getCollection("system.indexes").insert(indexSpecs, 0, true);
@@ -771,9 +746,7 @@ DBCollection.prototype.drop = function() {
};
DBCollection.prototype.findAndModify = function(args) {
- var cmd = {
- findandmodify: this.getName()
- };
+ var cmd = {findandmodify: this.getName()};
for (var key in args) {
cmd[key] = args[key];
}
@@ -825,9 +798,7 @@ DBCollection.prototype._printExtraInfo = function(action, startTime) {
};
DBCollection.prototype.validate = function(full) {
- var cmd = {
- validate: this.getName()
- };
+ var cmd = {validate: this.getName()};
if (typeof(full) == 'object') // support arbitrary options here
Object.extend(cmd, full);
@@ -864,10 +835,7 @@ DBCollection.prototype.validate = function(full) {
* getDiskStorageStats provides a human-readable summary of the command output
*/
DBCollection.prototype.diskStorageStats = function(opt) {
- var cmd = {
- storageDetails: this.getName(),
- analyze: 'diskStorage'
- };
+ var cmd = {storageDetails: this.getName(), analyze: 'diskStorage'};
if (typeof(opt) == 'object')
Object.extend(cmd, opt);
@@ -894,11 +862,12 @@ DBCollection.prototype.getDiskStorageStats = function(params) {
var BAR_WIDTH = 70;
var formatSliceData = function(data) {
- var bar = _barFormat([
- [data.bsonBytes / data.onDiskBytes, "="],
- [(data.recBytes - data.bsonBytes) / data.onDiskBytes, "-"]
- ],
- BAR_WIDTH);
+ var bar = _barFormat(
+ [
+ [data.bsonBytes / data.onDiskBytes, "="],
+ [(data.recBytes - data.bsonBytes) / data.onDiskBytes, "-"]
+ ],
+ BAR_WIDTH);
return sh._dataFormat(data.onDiskBytes).pad(9) + " " + data.numEntries.toFixed(0).pad(10) +
" " + bar + " " + (data.bsonBytes / data.onDiskBytes).toPercentStr().pad(8) + " " +
@@ -943,10 +912,7 @@ DBCollection.prototype.getDiskStorageStats = function(params) {
* getPagesInRAM provides a human-readable summary of the command output
*/
DBCollection.prototype.pagesInRAM = function(opt) {
- var cmd = {
- storageDetails: this.getName(),
- analyze: 'pagesInRAM'
- };
+ var cmd = {storageDetails: this.getName(), analyze: 'pagesInRAM'};
if (typeof(opt) == 'object')
Object.extend(cmd, opt);
@@ -1031,9 +997,7 @@ DBCollection.prototype.getShardVersion = function() {
DBCollection.prototype._getIndexesSystemIndexes = function(filter) {
var si = this.getDB().getCollection("system.indexes");
- var query = {
- ns: this.getFullName()
- };
+ var query = {ns: this.getFullName()};
if (filter)
query = Object.extend(query, filter);
return si.find(query).toArray();
@@ -1081,10 +1045,7 @@ DBCollection.prototype.getIndexKeys = function() {
};
DBCollection.prototype.hashAllDocs = function() {
- var cmd = {
- dbhash: 1,
- collections: [this._shortName]
- };
+ var cmd = {dbhash: 1, collections: [this._shortName]};
var res = this._dbCommand(cmd);
var hash = res.collections[this._shortName];
assert(hash);
@@ -1296,9 +1257,7 @@ DBCollection.prototype.aggregate = function(pipeline, aggregateOptions) {
// Assign the cleaned up options
aggregateOptions = copy;
// Create the initial command document
- var cmd = {
- pipeline: pipeline
- };
+ var cmd = {pipeline: pipeline};
Object.extend(cmd, aggregateOptions);
if (!('cursor' in cmd)) {
@@ -1329,10 +1288,7 @@ DBCollection.prototype.aggregate = function(pipeline, aggregateOptions) {
if ('result' in res && !("cursor" in res)) {
// convert old-style output to cursor-style output
- res.cursor = {
- ns: '',
- id: NumberLong(0)
- };
+ res.cursor = {ns: '', id: NumberLong(0)};
res.cursor.firstBatch = res.result;
delete res.result;
}
@@ -1407,11 +1363,7 @@ DBCollection.prototype.convertToSingleObject = function(valueField) {
* @param optional object of optional fields;
*/
DBCollection.prototype.mapReduce = function(map, reduce, optionsOrOutString) {
- var c = {
- mapreduce: this._shortName,
- map: map,
- reduce: reduce
- };
+ var c = {mapreduce: this._shortName, map: map, reduce: reduce};
assert(optionsOrOutString, "need to supply an optionsOrOutString");
if (typeof(optionsOrOutString) == "string")
@@ -1776,11 +1728,7 @@ DBCollection.prototype.distinct = function(keyString, query, options) {
}
// Distinct command
- var cmd = {
- distinct: this.getName(),
- key: keyString,
- query: query || {}
- };
+ var cmd = {distinct: this.getName(), key: keyString, query: query || {}};
// Set maxTimeMS if provided
if (opts.maxTimeMS) {
@@ -1925,8 +1873,10 @@ PlanCache.prototype.clear = function() {
* List plans for a query shape.
*/
PlanCache.prototype.getPlansByQuery = function(query, projection, sort) {
- return this._runCommandThrowOnError("planCacheListPlans",
- this._parseQueryShape(query, projection, sort)).plans;
+ return this
+ ._runCommandThrowOnError("planCacheListPlans",
+ this._parseQueryShape(query, projection, sort))
+ .plans;
};
/**
diff --git a/src/mongo/shell/crud_api.js b/src/mongo/shell/crud_api.js
index 9a2d74ab62c..dafbb10510d 100644
--- a/src/mongo/shell/crud_api.js
+++ b/src/mongo/shell/crud_api.js
@@ -29,9 +29,7 @@ DBCollection.prototype._createWriteConcern = function(options) {
DBCollection.prototype.addIdIfNeeded = function(obj) {
if (typeof(obj._id) == "undefined" && !Array.isArray(obj)) {
var tmp = obj; // don't want to modify input
- obj = {
- _id: new ObjectId()
- };
+ obj = {_id: new ObjectId()};
for (var key in tmp) {
obj[key] = tmp[key];
@@ -74,9 +72,7 @@ DBCollection.prototype.bulkWrite = function(operations, options) {
var writeConcern = this._createWriteConcern(opts);
// Result
- var result = {
- acknowledged: (writeConcern && writeConcern.w == 0) ? false : true
- };
+ var result = {acknowledged: (writeConcern && writeConcern.w == 0) ? false : true};
// Use bulk operation API already in the shell
var bulkOp = opts.ordered ? this.initializeOrderedBulkOp() : this.initializeUnorderedBulkOp();
@@ -230,9 +226,7 @@ DBCollection.prototype.insertOne = function(document, options) {
var writeConcern = this._createWriteConcern(opts);
// Result
- var result = {
- acknowledged: (writeConcern && writeConcern.w == 0) ? false : true
- };
+ var result = {acknowledged: (writeConcern && writeConcern.w == 0) ? false : true};
// Use bulk operation API already in the shell
var bulk = this.initializeOrderedBulkOp();
@@ -289,9 +283,7 @@ DBCollection.prototype.insertMany = function(documents, options) {
var writeConcern = this._createWriteConcern(opts);
// Result
- var result = {
- acknowledged: (writeConcern && writeConcern.w == 0) ? false : true
- };
+ var result = {acknowledged: (writeConcern && writeConcern.w == 0) ? false : true};
// Use bulk operation API already in the shell
var bulk = opts.ordered ? this.initializeOrderedBulkOp() : this.initializeUnorderedBulkOp();
@@ -335,9 +327,7 @@ DBCollection.prototype.deleteOne = function(filter, options) {
var writeConcern = this._createWriteConcern(opts);
// Result
- var result = {
- acknowledged: (writeConcern && writeConcern.w == 0) ? false : true
- };
+ var result = {acknowledged: (writeConcern && writeConcern.w == 0) ? false : true};
// Use bulk operation API already in the shell
var bulk = this.initializeOrderedBulkOp();
@@ -392,9 +382,7 @@ DBCollection.prototype.deleteMany = function(filter, options) {
var writeConcern = this._createWriteConcern(opts);
// Result
- var result = {
- acknowledged: (writeConcern && writeConcern.w == 0) ? false : true
- };
+ var result = {acknowledged: (writeConcern && writeConcern.w == 0) ? false : true};
// Use bulk operation API already in the shell
var bulk = this.initializeOrderedBulkOp();
@@ -458,9 +446,7 @@ DBCollection.prototype.replaceOne = function(filter, replacement, options) {
var writeConcern = this._createWriteConcern(opts);
// Result
- var result = {
- acknowledged: (writeConcern && writeConcern.w == 0) ? false : true
- };
+ var result = {acknowledged: (writeConcern && writeConcern.w == 0) ? false : true};
// Use bulk operation API already in the shell
var bulk = this.initializeOrderedBulkOp();
@@ -537,9 +523,7 @@ DBCollection.prototype.updateOne = function(filter, update, options) {
var writeConcern = this._createWriteConcern(opts);
// Result
- var result = {
- acknowledged: (writeConcern && writeConcern.w == 0) ? false : true
- };
+ var result = {acknowledged: (writeConcern && writeConcern.w == 0) ? false : true};
// Use bulk operation API already in the shell
var bulk = this.initializeOrderedBulkOp();
@@ -616,9 +600,7 @@ DBCollection.prototype.updateMany = function(filter, update, options) {
var writeConcern = this._createWriteConcern(opts);
// Result
- var result = {
- acknowledged: (writeConcern && writeConcern.w == 0) ? false : true
- };
+ var result = {acknowledged: (writeConcern && writeConcern.w == 0) ? false : true};
// Use bulk operation API already in the shell
var bulk = this.initializeOrderedBulkOp();
@@ -680,10 +662,7 @@ DBCollection.prototype.updateMany = function(filter, update, options) {
DBCollection.prototype.findOneAndDelete = function(filter, options) {
var opts = Object.extend({}, options || {});
// Set up the command
- var cmd = {
- query: filter,
- remove: true
- };
+ var cmd = {query: filter, remove: true};
if (opts.sort) {
cmd.sort = opts.sort;
@@ -741,10 +720,7 @@ DBCollection.prototype.findOneAndReplace = function(filter, replacement, options
}
// Set up the command
- var cmd = {
- query: filter,
- update: replacement
- };
+ var cmd = {query: filter, update: replacement};
if (opts.sort) {
cmd.sort = opts.sort;
}
@@ -809,10 +785,7 @@ DBCollection.prototype.findOneAndUpdate = function(filter, update, options) {
}
// Set up the command
- var cmd = {
- query: filter,
- update: update
- };
+ var cmd = {query: filter, update: update};
if (opts.sort) {
cmd.sort = opts.sort;
}
diff --git a/src/mongo/shell/db.js b/src/mongo/shell/db.js
index 6330b9abe32..4f98467f840 100644
--- a/src/mongo/shell/db.js
+++ b/src/mongo/shell/db.js
@@ -62,10 +62,7 @@ var DB;
// The server selection spec mandates that the key is '$query', but
// the shell has historically used 'query'. The server accepts both,
// so we maintain the existing behavior
- var cmdObjWithReadPref = {
- query: clonedCmdObj,
- $readPreference: readPref
- };
+ var cmdObjWithReadPref = {query: clonedCmdObj, $readPreference: readPref};
return cmdObjWithReadPref;
};
@@ -233,9 +230,7 @@ var DB;
options.flags = flags;
}
- var cmd = {
- create: name
- };
+ var cmd = {create: name};
Object.extend(cmd, options);
return this._dbCommand(cmd);
@@ -288,9 +283,7 @@ var DB;
return "shutdown command only works with the admin database; try 'use admin'";
}
- var cmd = {
- 'shutdown': 1
- };
+ var cmd = {'shutdown': 1};
opts = opts || {};
for (var o in opts) {
cmd[o] = opts[o];
@@ -531,9 +524,7 @@ var DB;
throw errorObject;
}
- var cmd = {
- profile: level
- };
+ var cmd = {profile: level};
if (isNumber(slowms))
cmd["slowms"] = slowms;
return assert.commandWorked(this._dbCommand(cmd));
@@ -566,9 +557,7 @@ var DB;
DB.prototype.eval = function(jsfunction) {
print("WARNING: db.eval is deprecated");
- var cmd = {
- $eval: jsfunction
- };
+ var cmd = {$eval: jsfunction};
if (arguments.length > 1) {
cmd.args = Array.from(arguments).slice(1);
}
@@ -697,9 +686,7 @@ var DB;
return res.err;
};
DB.prototype.getLastErrorObj = function(w, wtimeout) {
- var cmd = {
- getlasterror: 1
- };
+ var cmd = {getlasterror: 1};
if (w) {
cmd.w = w;
if (wtimeout)
@@ -825,9 +812,7 @@ var DB;
q["$all"] = true;
}
- var commandObj = {
- "currentOp": 1
- };
+ var commandObj = {"currentOp": 1};
Object.extend(commandObj, q);
var res = this.adminCommand(commandObj);
if (commandUnsupported(res)) {
@@ -1081,9 +1066,7 @@ var DB;
};
DB.prototype.serverStatus = function(options) {
- var cmd = {
- serverStatus: 1
- };
+ var cmd = {serverStatus: 1};
if (options) {
Object.extend(cmd, options);
}
@@ -1200,10 +1183,7 @@ var DB;
/////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////
- var _defaultWriteConcern = {
- w: 'majority',
- wtimeout: 30 * 1000
- };
+ var _defaultWriteConcern = {w: 'majority', wtimeout: 30 * 1000};
function getUserObjString(userObj) {
var pwd = userObj.pwd;
@@ -1237,9 +1217,7 @@ var DB;
DB.prototype.createUser = function(userObj, writeConcern) {
var name = userObj["user"];
- var cmdObj = {
- createUser: name
- };
+ var cmdObj = {createUser: name};
cmdObj = Object.extend(cmdObj, userObj);
delete cmdObj["user"];
@@ -1299,9 +1277,7 @@ var DB;
};
DB.prototype.updateUser = function(name, updateObject, writeConcern) {
- var cmdObj = {
- updateUser: name
- };
+ var cmdObj = {updateUser: name};
cmdObj = Object.extend(cmdObj, updateObject);
cmdObj['writeConcern'] = writeConcern ? writeConcern : _defaultWriteConcern;
this._modifyCommandToDigestPasswordIfNecessary(cmdObj, name);
@@ -1412,10 +1388,7 @@ var DB;
DB.prototype._authOrThrow = function() {
var params;
if (arguments.length == 2) {
- params = {
- user: arguments[0],
- pwd: arguments[1]
- };
+ params = {user: arguments[0], pwd: arguments[1]};
} else if (arguments.length == 1) {
if (typeof(arguments[0]) != "object")
throw Error("Single-argument form of auth expects a parameter object");
@@ -1490,9 +1463,7 @@ var DB;
if (typeof username != "string") {
throw Error("User name for getUser shell helper must be a string");
}
- var cmdObj = {
- usersInfo: username
- };
+ var cmdObj = {usersInfo: username};
Object.extend(cmdObj, args);
var res = this.runCommand(cmdObj);
@@ -1507,9 +1478,7 @@ var DB;
};
DB.prototype.getUsers = function(args) {
- var cmdObj = {
- usersInfo: 1
- };
+ var cmdObj = {usersInfo: 1};
Object.extend(cmdObj, args);
var res = this.runCommand(cmdObj);
if (!res.ok) {
@@ -1528,9 +1497,7 @@ var DB;
DB.prototype.createRole = function(roleObj, writeConcern) {
var name = roleObj["role"];
- var cmdObj = {
- createRole: name
- };
+ var cmdObj = {createRole: name};
cmdObj = Object.extend(cmdObj, roleObj);
delete cmdObj["role"];
cmdObj["writeConcern"] = writeConcern ? writeConcern : _defaultWriteConcern;
@@ -1544,9 +1511,7 @@ var DB;
};
DB.prototype.updateRole = function(name, updateObject, writeConcern) {
- var cmdObj = {
- updateRole: name
- };
+ var cmdObj = {updateRole: name};
cmdObj = Object.extend(cmdObj, updateObject);
cmdObj['writeConcern'] = writeConcern ? writeConcern : _defaultWriteConcern;
var res = this.runCommand(cmdObj);
@@ -1638,9 +1603,7 @@ var DB;
if (typeof rolename != "string") {
throw Error("Role name for getRole shell helper must be a string");
}
- var cmdObj = {
- rolesInfo: rolename
- };
+ var cmdObj = {rolesInfo: rolename};
Object.extend(cmdObj, args);
var res = this.runCommand(cmdObj);
if (!res.ok) {
@@ -1654,9 +1617,7 @@ var DB;
};
DB.prototype.getRoles = function(args) {
- var cmdObj = {
- rolesInfo: 1
- };
+ var cmdObj = {rolesInfo: 1};
Object.extend(cmdObj, args);
var res = this.runCommand(cmdObj);
if (!res.ok) {
diff --git a/src/mongo/shell/dbshell.cpp b/src/mongo/shell/dbshell.cpp
index 5ef6f3f9982..1378ee263b4 100644
--- a/src/mongo/shell/dbshell.cpp
+++ b/src/mongo/shell/dbshell.cpp
@@ -748,7 +748,8 @@ int _main(int argc, char* argv[], char** envp) {
hasMongoRC = true;
if (!scope->execFile(rcLocation, false, true)) {
cout << "The \".mongorc.js\" file located in your home folder could not be "
- "executed" << endl;
+ "executed"
+ << endl;
return -5;
}
}
diff --git a/src/mongo/shell/explain_query.js b/src/mongo/shell/explain_query.js
index cb14b71f0cf..0670734891e 100644
--- a/src/mongo/shell/explain_query.js
+++ b/src/mongo/shell/explain_query.js
@@ -150,9 +150,7 @@ var DBExplainQuery = (function() {
innerCmd = this._query._convertToCommand(canAttachReadPref);
}
- var explainCmd = {
- explain: innerCmd
- };
+ var explainCmd = {explain: innerCmd};
explainCmd["verbosity"] = this._verbosity;
var explainDb = this._query._db;
diff --git a/src/mongo/shell/explainable.js b/src/mongo/shell/explainable.js
index 4f54694acd3..2fe8ce0de70 100644
--- a/src/mongo/shell/explainable.js
+++ b/src/mongo/shell/explainable.js
@@ -126,23 +126,15 @@ var Explainable = (function() {
this.findAndModify = function(params) {
var famCmd = Object.extend({"findAndModify": this._collection.getName()}, params);
- var explainCmd = {
- "explain": famCmd,
- "verbosity": this._verbosity
- };
+ var explainCmd = {"explain": famCmd, "verbosity": this._verbosity};
var explainResult = this._collection.runReadCommand(explainCmd);
return throwOrReturn(explainResult);
};
this.group = function(params) {
params.ns = this._collection.getName();
- var grpCmd = {
- "group": this._collection.getDB()._groupFixParms(params)
- };
- var explainCmd = {
- "explain": grpCmd,
- "verbosity": this._verbosity
- };
+ var grpCmd = {"group": this._collection.getDB()._groupFixParms(params)};
+ var explainCmd = {"explain": grpCmd, "verbosity": this._verbosity};
var explainResult = this._collection.runReadCommand(explainCmd);
return throwOrReturn(explainResult);
};
@@ -158,10 +150,7 @@ var Explainable = (function() {
distinctCmd.collation = options.collation;
}
- var explainCmd = {
- explain: distinctCmd,
- verbosity: this._verbosity
- };
+ var explainCmd = {explain: distinctCmd, verbosity: this._verbosity};
var explainResult = this._collection.runReadCommand(explainCmd);
return throwOrReturn(explainResult);
};
diff --git a/src/mongo/shell/linenoise.cpp b/src/mongo/shell/linenoise.cpp
index 03ca8bbf12f..f75028c8845 100644
--- a/src/mongo/shell/linenoise.cpp
+++ b/src/mongo/shell/linenoise.cpp
@@ -86,8 +86,8 @@
#ifdef _WIN32
#include <conio.h>
-#include <windows.h>
#include <io.h>
+#include <windows.h>
#define strcasecmp _stricmp
#define strdup _strdup
#define isatty _isatty
@@ -96,24 +96,24 @@
#else /* _WIN32 */
+#include <cctype>
#include <signal.h>
-#include <termios.h>
-#include <unistd.h>
#include <stdlib.h>
#include <string.h>
-#include <sys/types.h>
#include <sys/ioctl.h>
-#include <cctype>
+#include <sys/types.h>
+#include <termios.h>
+#include <unistd.h>
#include <wctype.h>
#endif /* _WIN32 */
-#include <stdio.h>
-#include <errno.h>
-#include <fcntl.h>
#include "linenoise.h"
#include "linenoise_utf8.h"
#include "mk_wcwidth.h"
+#include <errno.h>
+#include <fcntl.h>
+#include <stdio.h>
#include <string>
#include <vector>
diff --git a/src/mongo/shell/linenoise_utf8.cpp b/src/mongo/shell/linenoise_utf8.cpp
index 73d8168ac56..055ac0cf793 100644
--- a/src/mongo/shell/linenoise_utf8.cpp
+++ b/src/mongo/shell/linenoise_utf8.cpp
@@ -30,9 +30,9 @@
#include "mongo/shell/linenoise_utf8.h"
#ifdef _WIN32
-#include <io.h>
#include "mongo/platform/windows_basic.h"
#include "mongo/util/text.h"
+#include <io.h>
#else
#include <unistd.h>
#endif
diff --git a/src/mongo/shell/mk_wcwidth.cpp b/src/mongo/shell/mk_wcwidth.cpp
index 58e3a368293..cb4674344f5 100644
--- a/src/mongo/shell/mk_wcwidth.cpp
+++ b/src/mongo/shell/mk_wcwidth.cpp
@@ -124,148 +124,43 @@ static int bisearch(int ucs, const struct interval* table, int max) {
int mk_wcwidth(int ucs) {
/* sorted list of non-overlapping intervals of non-spacing characters */
/* generated by "uniset +cat=Me +cat=Mn +cat=Cf -00AD +1160-11FF +200B c" */
- static const struct interval combining[] = {{0x0300, 0x036F},
- {0x0483, 0x0486},
- {0x0488, 0x0489},
- {0x0591, 0x05BD},
- {0x05BF, 0x05BF},
- {0x05C1, 0x05C2},
- {0x05C4, 0x05C5},
- {0x05C7, 0x05C7},
- {0x0600, 0x0603},
- {0x0610, 0x0615},
- {0x064B, 0x065E},
- {0x0670, 0x0670},
- {0x06D6, 0x06E4},
- {0x06E7, 0x06E8},
- {0x06EA, 0x06ED},
- {0x070F, 0x070F},
- {0x0711, 0x0711},
- {0x0730, 0x074A},
- {0x07A6, 0x07B0},
- {0x07EB, 0x07F3},
- {0x0901, 0x0902},
- {0x093C, 0x093C},
- {0x0941, 0x0948},
- {0x094D, 0x094D},
- {0x0951, 0x0954},
- {0x0962, 0x0963},
- {0x0981, 0x0981},
- {0x09BC, 0x09BC},
- {0x09C1, 0x09C4},
- {0x09CD, 0x09CD},
- {0x09E2, 0x09E3},
- {0x0A01, 0x0A02},
- {0x0A3C, 0x0A3C},
- {0x0A41, 0x0A42},
- {0x0A47, 0x0A48},
- {0x0A4B, 0x0A4D},
- {0x0A70, 0x0A71},
- {0x0A81, 0x0A82},
- {0x0ABC, 0x0ABC},
- {0x0AC1, 0x0AC5},
- {0x0AC7, 0x0AC8},
- {0x0ACD, 0x0ACD},
- {0x0AE2, 0x0AE3},
- {0x0B01, 0x0B01},
- {0x0B3C, 0x0B3C},
- {0x0B3F, 0x0B3F},
- {0x0B41, 0x0B43},
- {0x0B4D, 0x0B4D},
- {0x0B56, 0x0B56},
- {0x0B82, 0x0B82},
- {0x0BC0, 0x0BC0},
- {0x0BCD, 0x0BCD},
- {0x0C3E, 0x0C40},
- {0x0C46, 0x0C48},
- {0x0C4A, 0x0C4D},
- {0x0C55, 0x0C56},
- {0x0CBC, 0x0CBC},
- {0x0CBF, 0x0CBF},
- {0x0CC6, 0x0CC6},
- {0x0CCC, 0x0CCD},
- {0x0CE2, 0x0CE3},
- {0x0D41, 0x0D43},
- {0x0D4D, 0x0D4D},
- {0x0DCA, 0x0DCA},
- {0x0DD2, 0x0DD4},
- {0x0DD6, 0x0DD6},
- {0x0E31, 0x0E31},
- {0x0E34, 0x0E3A},
- {0x0E47, 0x0E4E},
- {0x0EB1, 0x0EB1},
- {0x0EB4, 0x0EB9},
- {0x0EBB, 0x0EBC},
- {0x0EC8, 0x0ECD},
- {0x0F18, 0x0F19},
- {0x0F35, 0x0F35},
- {0x0F37, 0x0F37},
- {0x0F39, 0x0F39},
- {0x0F71, 0x0F7E},
- {0x0F80, 0x0F84},
- {0x0F86, 0x0F87},
- {0x0F90, 0x0F97},
- {0x0F99, 0x0FBC},
- {0x0FC6, 0x0FC6},
- {0x102D, 0x1030},
- {0x1032, 0x1032},
- {0x1036, 0x1037},
- {0x1039, 0x1039},
- {0x1058, 0x1059},
- {0x1160, 0x11FF},
- {0x135F, 0x135F},
- {0x1712, 0x1714},
- {0x1732, 0x1734},
- {0x1752, 0x1753},
- {0x1772, 0x1773},
- {0x17B4, 0x17B5},
- {0x17B7, 0x17BD},
- {0x17C6, 0x17C6},
- {0x17C9, 0x17D3},
- {0x17DD, 0x17DD},
- {0x180B, 0x180D},
- {0x18A9, 0x18A9},
- {0x1920, 0x1922},
- {0x1927, 0x1928},
- {0x1932, 0x1932},
- {0x1939, 0x193B},
- {0x1A17, 0x1A18},
- {0x1B00, 0x1B03},
- {0x1B34, 0x1B34},
- {0x1B36, 0x1B3A},
- {0x1B3C, 0x1B3C},
- {0x1B42, 0x1B42},
- {0x1B6B, 0x1B73},
- {0x1DC0, 0x1DCA},
- {0x1DFE, 0x1DFF},
- {0x200B, 0x200F},
- {0x202A, 0x202E},
- {0x2060, 0x2063},
- {0x206A, 0x206F},
- {0x20D0, 0x20EF},
- {0x302A, 0x302F},
- {0x3099, 0x309A},
- {0xA806, 0xA806},
- {0xA80B, 0xA80B},
- {0xA825, 0xA826},
- {0xFB1E, 0xFB1E},
- {0xFE00, 0xFE0F},
- {0xFE20, 0xFE23},
- {0xFEFF, 0xFEFF},
- {0xFFF9, 0xFFFB},
- {0x10A01, 0x10A03},
- {0x10A05, 0x10A06},
- {0x10A0C, 0x10A0F},
- {0x10A38, 0x10A3A},
- {0x10A3F, 0x10A3F},
- {0x1D167, 0x1D169},
- {0x1D173, 0x1D182},
- {0x1D185, 0x1D18B},
- {0x1D1AA, 0x1D1AD},
- {0x1D242, 0x1D244},
- {0xE0001, 0xE0001},
- {0xE0020, 0xE007F},
- {0xE0100, 0xE01EF}};
+ static const struct interval combining[] = {
+ {0x0300, 0x036F}, {0x0483, 0x0486}, {0x0488, 0x0489}, {0x0591, 0x05BD},
+ {0x05BF, 0x05BF}, {0x05C1, 0x05C2}, {0x05C4, 0x05C5}, {0x05C7, 0x05C7},
+ {0x0600, 0x0603}, {0x0610, 0x0615}, {0x064B, 0x065E}, {0x0670, 0x0670},
+ {0x06D6, 0x06E4}, {0x06E7, 0x06E8}, {0x06EA, 0x06ED}, {0x070F, 0x070F},
+ {0x0711, 0x0711}, {0x0730, 0x074A}, {0x07A6, 0x07B0}, {0x07EB, 0x07F3},
+ {0x0901, 0x0902}, {0x093C, 0x093C}, {0x0941, 0x0948}, {0x094D, 0x094D},
+ {0x0951, 0x0954}, {0x0962, 0x0963}, {0x0981, 0x0981}, {0x09BC, 0x09BC},
+ {0x09C1, 0x09C4}, {0x09CD, 0x09CD}, {0x09E2, 0x09E3}, {0x0A01, 0x0A02},
+ {0x0A3C, 0x0A3C}, {0x0A41, 0x0A42}, {0x0A47, 0x0A48}, {0x0A4B, 0x0A4D},
+ {0x0A70, 0x0A71}, {0x0A81, 0x0A82}, {0x0ABC, 0x0ABC}, {0x0AC1, 0x0AC5},
+ {0x0AC7, 0x0AC8}, {0x0ACD, 0x0ACD}, {0x0AE2, 0x0AE3}, {0x0B01, 0x0B01},
+ {0x0B3C, 0x0B3C}, {0x0B3F, 0x0B3F}, {0x0B41, 0x0B43}, {0x0B4D, 0x0B4D},
+ {0x0B56, 0x0B56}, {0x0B82, 0x0B82}, {0x0BC0, 0x0BC0}, {0x0BCD, 0x0BCD},
+ {0x0C3E, 0x0C40}, {0x0C46, 0x0C48}, {0x0C4A, 0x0C4D}, {0x0C55, 0x0C56},
+ {0x0CBC, 0x0CBC}, {0x0CBF, 0x0CBF}, {0x0CC6, 0x0CC6}, {0x0CCC, 0x0CCD},
+ {0x0CE2, 0x0CE3}, {0x0D41, 0x0D43}, {0x0D4D, 0x0D4D}, {0x0DCA, 0x0DCA},
+ {0x0DD2, 0x0DD4}, {0x0DD6, 0x0DD6}, {0x0E31, 0x0E31}, {0x0E34, 0x0E3A},
+ {0x0E47, 0x0E4E}, {0x0EB1, 0x0EB1}, {0x0EB4, 0x0EB9}, {0x0EBB, 0x0EBC},
+ {0x0EC8, 0x0ECD}, {0x0F18, 0x0F19}, {0x0F35, 0x0F35}, {0x0F37, 0x0F37},
+ {0x0F39, 0x0F39}, {0x0F71, 0x0F7E}, {0x0F80, 0x0F84}, {0x0F86, 0x0F87},
+ {0x0F90, 0x0F97}, {0x0F99, 0x0FBC}, {0x0FC6, 0x0FC6}, {0x102D, 0x1030},
+ {0x1032, 0x1032}, {0x1036, 0x1037}, {0x1039, 0x1039}, {0x1058, 0x1059},
+ {0x1160, 0x11FF}, {0x135F, 0x135F}, {0x1712, 0x1714}, {0x1732, 0x1734},
+ {0x1752, 0x1753}, {0x1772, 0x1773}, {0x17B4, 0x17B5}, {0x17B7, 0x17BD},
+ {0x17C6, 0x17C6}, {0x17C9, 0x17D3}, {0x17DD, 0x17DD}, {0x180B, 0x180D},
+ {0x18A9, 0x18A9}, {0x1920, 0x1922}, {0x1927, 0x1928}, {0x1932, 0x1932},
+ {0x1939, 0x193B}, {0x1A17, 0x1A18}, {0x1B00, 0x1B03}, {0x1B34, 0x1B34},
+ {0x1B36, 0x1B3A}, {0x1B3C, 0x1B3C}, {0x1B42, 0x1B42}, {0x1B6B, 0x1B73},
+ {0x1DC0, 0x1DCA}, {0x1DFE, 0x1DFF}, {0x200B, 0x200F}, {0x202A, 0x202E},
+ {0x2060, 0x2063}, {0x206A, 0x206F}, {0x20D0, 0x20EF}, {0x302A, 0x302F},
+ {0x3099, 0x309A}, {0xA806, 0xA806}, {0xA80B, 0xA80B}, {0xA825, 0xA826},
+ {0xFB1E, 0xFB1E}, {0xFE00, 0xFE0F}, {0xFE20, 0xFE23}, {0xFEFF, 0xFEFF},
+ {0xFFF9, 0xFFFB}, {0x10A01, 0x10A03}, {0x10A05, 0x10A06}, {0x10A0C, 0x10A0F},
+ {0x10A38, 0x10A3A}, {0x10A3F, 0x10A3F}, {0x1D167, 0x1D169}, {0x1D173, 0x1D182},
+ {0x1D185, 0x1D18B}, {0x1D1AA, 0x1D1AD}, {0x1D242, 0x1D244}, {0xE0001, 0xE0001},
+ {0xE0020, 0xE007F}, {0xE0100, 0xE01EF}};
/* test for 8-bit control characters */
if (ucs == 0)
@@ -319,162 +214,46 @@ int mk_wcswidth(const int* pwcs, size_t n) {
int mk_wcwidth_cjk(int ucs) {
/* sorted list of non-overlapping intervals of East Asian Ambiguous
* characters, generated by "uniset +WIDTH-A -cat=Me -cat=Mn -cat=Cf c" */
- static const struct interval ambiguous[] = {{0x00A1, 0x00A1},
- {0x00A4, 0x00A4},
- {0x00A7, 0x00A8},
- {0x00AA, 0x00AA},
- {0x00AE, 0x00AE},
- {0x00B0, 0x00B4},
- {0x00B6, 0x00BA},
- {0x00BC, 0x00BF},
- {0x00C6, 0x00C6},
- {0x00D0, 0x00D0},
- {0x00D7, 0x00D8},
- {0x00DE, 0x00E1},
- {0x00E6, 0x00E6},
- {0x00E8, 0x00EA},
- {0x00EC, 0x00ED},
- {0x00F0, 0x00F0},
- {0x00F2, 0x00F3},
- {0x00F7, 0x00FA},
- {0x00FC, 0x00FC},
- {0x00FE, 0x00FE},
- {0x0101, 0x0101},
- {0x0111, 0x0111},
- {0x0113, 0x0113},
- {0x011B, 0x011B},
- {0x0126, 0x0127},
- {0x012B, 0x012B},
- {0x0131, 0x0133},
- {0x0138, 0x0138},
- {0x013F, 0x0142},
- {0x0144, 0x0144},
- {0x0148, 0x014B},
- {0x014D, 0x014D},
- {0x0152, 0x0153},
- {0x0166, 0x0167},
- {0x016B, 0x016B},
- {0x01CE, 0x01CE},
- {0x01D0, 0x01D0},
- {0x01D2, 0x01D2},
- {0x01D4, 0x01D4},
- {0x01D6, 0x01D6},
- {0x01D8, 0x01D8},
- {0x01DA, 0x01DA},
- {0x01DC, 0x01DC},
- {0x0251, 0x0251},
- {0x0261, 0x0261},
- {0x02C4, 0x02C4},
- {0x02C7, 0x02C7},
- {0x02C9, 0x02CB},
- {0x02CD, 0x02CD},
- {0x02D0, 0x02D0},
- {0x02D8, 0x02DB},
- {0x02DD, 0x02DD},
- {0x02DF, 0x02DF},
- {0x0391, 0x03A1},
- {0x03A3, 0x03A9},
- {0x03B1, 0x03C1},
- {0x03C3, 0x03C9},
- {0x0401, 0x0401},
- {0x0410, 0x044F},
- {0x0451, 0x0451},
- {0x2010, 0x2010},
- {0x2013, 0x2016},
- {0x2018, 0x2019},
- {0x201C, 0x201D},
- {0x2020, 0x2022},
- {0x2024, 0x2027},
- {0x2030, 0x2030},
- {0x2032, 0x2033},
- {0x2035, 0x2035},
- {0x203B, 0x203B},
- {0x203E, 0x203E},
- {0x2074, 0x2074},
- {0x207F, 0x207F},
- {0x2081, 0x2084},
- {0x20AC, 0x20AC},
- {0x2103, 0x2103},
- {0x2105, 0x2105},
- {0x2109, 0x2109},
- {0x2113, 0x2113},
- {0x2116, 0x2116},
- {0x2121, 0x2122},
- {0x2126, 0x2126},
- {0x212B, 0x212B},
- {0x2153, 0x2154},
- {0x215B, 0x215E},
- {0x2160, 0x216B},
- {0x2170, 0x2179},
- {0x2190, 0x2199},
- {0x21B8, 0x21B9},
- {0x21D2, 0x21D2},
- {0x21D4, 0x21D4},
- {0x21E7, 0x21E7},
- {0x2200, 0x2200},
- {0x2202, 0x2203},
- {0x2207, 0x2208},
- {0x220B, 0x220B},
- {0x220F, 0x220F},
- {0x2211, 0x2211},
- {0x2215, 0x2215},
- {0x221A, 0x221A},
- {0x221D, 0x2220},
- {0x2223, 0x2223},
- {0x2225, 0x2225},
- {0x2227, 0x222C},
- {0x222E, 0x222E},
- {0x2234, 0x2237},
- {0x223C, 0x223D},
- {0x2248, 0x2248},
- {0x224C, 0x224C},
- {0x2252, 0x2252},
- {0x2260, 0x2261},
- {0x2264, 0x2267},
- {0x226A, 0x226B},
- {0x226E, 0x226F},
- {0x2282, 0x2283},
- {0x2286, 0x2287},
- {0x2295, 0x2295},
- {0x2299, 0x2299},
- {0x22A5, 0x22A5},
- {0x22BF, 0x22BF},
- {0x2312, 0x2312},
- {0x2460, 0x24E9},
- {0x24EB, 0x254B},
- {0x2550, 0x2573},
- {0x2580, 0x258F},
- {0x2592, 0x2595},
- {0x25A0, 0x25A1},
- {0x25A3, 0x25A9},
- {0x25B2, 0x25B3},
- {0x25B6, 0x25B7},
- {0x25BC, 0x25BD},
- {0x25C0, 0x25C1},
- {0x25C6, 0x25C8},
- {0x25CB, 0x25CB},
- {0x25CE, 0x25D1},
- {0x25E2, 0x25E5},
- {0x25EF, 0x25EF},
- {0x2605, 0x2606},
- {0x2609, 0x2609},
- {0x260E, 0x260F},
- {0x2614, 0x2615},
- {0x261C, 0x261C},
- {0x261E, 0x261E},
- {0x2640, 0x2640},
- {0x2642, 0x2642},
- {0x2660, 0x2661},
- {0x2663, 0x2665},
- {0x2667, 0x266A},
- {0x266C, 0x266D},
- {0x266F, 0x266F},
- {0x273D, 0x273D},
- {0x2776, 0x277F},
- {0xE000, 0xF8FF},
- {0xFFFD, 0xFFFD},
- {0xF0000, 0xFFFFD},
- {0x100000, 0x10FFFD}};
+ static const struct interval ambiguous[] = {
+ {0x00A1, 0x00A1}, {0x00A4, 0x00A4}, {0x00A7, 0x00A8}, {0x00AA, 0x00AA},
+ {0x00AE, 0x00AE}, {0x00B0, 0x00B4}, {0x00B6, 0x00BA}, {0x00BC, 0x00BF},
+ {0x00C6, 0x00C6}, {0x00D0, 0x00D0}, {0x00D7, 0x00D8}, {0x00DE, 0x00E1},
+ {0x00E6, 0x00E6}, {0x00E8, 0x00EA}, {0x00EC, 0x00ED}, {0x00F0, 0x00F0},
+ {0x00F2, 0x00F3}, {0x00F7, 0x00FA}, {0x00FC, 0x00FC}, {0x00FE, 0x00FE},
+ {0x0101, 0x0101}, {0x0111, 0x0111}, {0x0113, 0x0113}, {0x011B, 0x011B},
+ {0x0126, 0x0127}, {0x012B, 0x012B}, {0x0131, 0x0133}, {0x0138, 0x0138},
+ {0x013F, 0x0142}, {0x0144, 0x0144}, {0x0148, 0x014B}, {0x014D, 0x014D},
+ {0x0152, 0x0153}, {0x0166, 0x0167}, {0x016B, 0x016B}, {0x01CE, 0x01CE},
+ {0x01D0, 0x01D0}, {0x01D2, 0x01D2}, {0x01D4, 0x01D4}, {0x01D6, 0x01D6},
+ {0x01D8, 0x01D8}, {0x01DA, 0x01DA}, {0x01DC, 0x01DC}, {0x0251, 0x0251},
+ {0x0261, 0x0261}, {0x02C4, 0x02C4}, {0x02C7, 0x02C7}, {0x02C9, 0x02CB},
+ {0x02CD, 0x02CD}, {0x02D0, 0x02D0}, {0x02D8, 0x02DB}, {0x02DD, 0x02DD},
+ {0x02DF, 0x02DF}, {0x0391, 0x03A1}, {0x03A3, 0x03A9}, {0x03B1, 0x03C1},
+ {0x03C3, 0x03C9}, {0x0401, 0x0401}, {0x0410, 0x044F}, {0x0451, 0x0451},
+ {0x2010, 0x2010}, {0x2013, 0x2016}, {0x2018, 0x2019}, {0x201C, 0x201D},
+ {0x2020, 0x2022}, {0x2024, 0x2027}, {0x2030, 0x2030}, {0x2032, 0x2033},
+ {0x2035, 0x2035}, {0x203B, 0x203B}, {0x203E, 0x203E}, {0x2074, 0x2074},
+ {0x207F, 0x207F}, {0x2081, 0x2084}, {0x20AC, 0x20AC}, {0x2103, 0x2103},
+ {0x2105, 0x2105}, {0x2109, 0x2109}, {0x2113, 0x2113}, {0x2116, 0x2116},
+ {0x2121, 0x2122}, {0x2126, 0x2126}, {0x212B, 0x212B}, {0x2153, 0x2154},
+ {0x215B, 0x215E}, {0x2160, 0x216B}, {0x2170, 0x2179}, {0x2190, 0x2199},
+ {0x21B8, 0x21B9}, {0x21D2, 0x21D2}, {0x21D4, 0x21D4}, {0x21E7, 0x21E7},
+ {0x2200, 0x2200}, {0x2202, 0x2203}, {0x2207, 0x2208}, {0x220B, 0x220B},
+ {0x220F, 0x220F}, {0x2211, 0x2211}, {0x2215, 0x2215}, {0x221A, 0x221A},
+ {0x221D, 0x2220}, {0x2223, 0x2223}, {0x2225, 0x2225}, {0x2227, 0x222C},
+ {0x222E, 0x222E}, {0x2234, 0x2237}, {0x223C, 0x223D}, {0x2248, 0x2248},
+ {0x224C, 0x224C}, {0x2252, 0x2252}, {0x2260, 0x2261}, {0x2264, 0x2267},
+ {0x226A, 0x226B}, {0x226E, 0x226F}, {0x2282, 0x2283}, {0x2286, 0x2287},
+ {0x2295, 0x2295}, {0x2299, 0x2299}, {0x22A5, 0x22A5}, {0x22BF, 0x22BF},
+ {0x2312, 0x2312}, {0x2460, 0x24E9}, {0x24EB, 0x254B}, {0x2550, 0x2573},
+ {0x2580, 0x258F}, {0x2592, 0x2595}, {0x25A0, 0x25A1}, {0x25A3, 0x25A9},
+ {0x25B2, 0x25B3}, {0x25B6, 0x25B7}, {0x25BC, 0x25BD}, {0x25C0, 0x25C1},
+ {0x25C6, 0x25C8}, {0x25CB, 0x25CB}, {0x25CE, 0x25D1}, {0x25E2, 0x25E5},
+ {0x25EF, 0x25EF}, {0x2605, 0x2606}, {0x2609, 0x2609}, {0x260E, 0x260F},
+ {0x2614, 0x2615}, {0x261C, 0x261C}, {0x261E, 0x261E}, {0x2640, 0x2640},
+ {0x2642, 0x2642}, {0x2660, 0x2661}, {0x2663, 0x2665}, {0x2667, 0x266A},
+ {0x266C, 0x266D}, {0x266F, 0x266F}, {0x273D, 0x273D}, {0x2776, 0x277F},
+ {0xE000, 0xF8FF}, {0xFFFD, 0xFFFD}, {0xF0000, 0xFFFFD}, {0x100000, 0x10FFFD}};
/* binary search in table of non-spacing characters */
if (bisearch(ucs, ambiguous, sizeof(ambiguous) / sizeof(struct interval) - 1))
diff --git a/src/mongo/shell/mongo.js b/src/mongo/shell/mongo.js
index fbdff5d61bd..7fb711d8a91 100644
--- a/src/mongo/shell/mongo.js
+++ b/src/mongo/shell/mongo.js
@@ -88,9 +88,7 @@ Mongo.prototype.setLogLevel = function(logLevel, component) {
} else if (component !== undefined) {
throw Error("setLogLevel component must be a string:" + tojson(component));
}
- var vDoc = {
- verbosity: logLevel
- };
+ var vDoc = {verbosity: logLevel};
// nest vDoc
for (var key, obj; componentNames.length > 0;) {
diff --git a/src/mongo/shell/mr.js b/src/mongo/shell/mr.js
index a810a7f0d79..e501a5c71fb 100644
--- a/src/mongo/shell/mr.js
+++ b/src/mongo/shell/mr.js
@@ -22,11 +22,7 @@ MR.emit = function(k, v) {
var num = nativeHelper.apply(get_num_, [k]);
var data = $arr[num];
if (!data) {
- data = {
- key: k,
- values: new Array(1000),
- count: 0
- };
+ data = {key: k, values: new Array(1000), count: 0};
$arr[num] = data;
}
data.values[data.count++] = v;
diff --git a/src/mongo/shell/query.js b/src/mongo/shell/query.js
index 813d9be9e59..3a32b7a951a 100644
--- a/src/mongo/shell/query.js
+++ b/src/mongo/shell/query.js
@@ -85,9 +85,7 @@ DBQuery.prototype._ensureSpecial = function() {
if (this._special)
return;
- var n = {
- query: this._query
- };
+ var n = {query: this._query};
this._query = n;
this._special = true;
};
@@ -343,9 +341,7 @@ DBQuery.prototype.toArray = function() {
};
DBQuery.prototype._convertToCountCmd = function(applySkipLimit) {
- var cmd = {
- count: this._collection.getName()
- };
+ var cmd = {count: this._collection.getName()};
if (this._query) {
if (this._special) {
@@ -472,9 +468,7 @@ DBQuery.prototype.maxTimeMS = function(maxTimeMS) {
};
DBQuery.prototype.readConcern = function(level) {
- var readConcernObj = {
- level: level
- };
+ var readConcernObj = {level: level};
return this._addSpecial("readConcern", readConcernObj);
};
@@ -493,9 +487,7 @@ DBQuery.prototype.collation = function(collationSpec) {
* @return this cursor
*/
DBQuery.prototype.readPref = function(mode, tagSet) {
- var readPrefObj = {
- mode: mode
- };
+ var readPrefObj = {mode: mode};
if (tagSet) {
readPrefObj.tags = tagSet;
@@ -759,10 +751,7 @@ DBCommandCursor.prototype.close = function() {
*/
DBCommandCursor.prototype._runGetMoreCommand = function() {
// Construct the getMore command.
- var getMoreCmd = {
- getMore: this._cursorid,
- collection: this._collName
- };
+ var getMoreCmd = {getMore: this._cursorid, collection: this._collName};
if (this._batchSize) {
getMoreCmd["batchSize"] = this._batchSize;
diff --git a/src/mongo/shell/replsettest.js b/src/mongo/shell/replsettest.js
index 662397267c1..394af60484d 100644
--- a/src/mongo/shell/replsettest.js
+++ b/src/mongo/shell/replsettest.js
@@ -74,10 +74,7 @@ var ReplSetTest = function(opts) {
var self = this;
// Replica set health states
- var Health = {
- UP: 1,
- DOWN: 0
- };
+ var Health = {UP: 1, DOWN: 0};
var _alldbpaths;
var _configSettings;
@@ -94,10 +91,7 @@ var ReplSetTest = function(opts) {
* Populates a reference to all reachable nodes.
*/
function _clearLiveNodes() {
- self.liveNodes = {
- master: null,
- slaves: []
- };
+ self.liveNodes = {master: null, slaves: []};
}
/**
@@ -310,10 +304,8 @@ var ReplSetTest = function(opts) {
function _getLastCommittedOpTime(conn) {
var replSetStatus =
assert.commandWorked(conn.getDB("admin").runCommand({replSetGetStatus: 1}));
- return (replSetStatus.OpTimes || replSetStatus.optimes).lastCommittedOpTime || {
- ts: Timestamp(0, 0),
- t: NumberLong(0)
- };
+ return (replSetStatus.OpTimes || replSetStatus.optimes).lastCommittedOpTime ||
+ {ts: Timestamp(0, 0), t: NumberLong(0)};
}
/**
@@ -325,22 +317,14 @@ var ReplSetTest = function(opts) {
function _getReadConcernMajorityOpTime(conn) {
var replSetStatus =
assert.commandWorked(conn.getDB("admin").runCommand({replSetGetStatus: 1}));
- return (replSetStatus.OpTimes || replSetStatus.optimes).readConcernMajorityOpTime || {
- ts: Timestamp(0, 0),
- t: NumberLong(0)
- };
+ return (replSetStatus.OpTimes || replSetStatus.optimes).readConcernMajorityOpTime ||
+ {ts: Timestamp(0, 0), t: NumberLong(0)};
}
function _isEarlierOpTime(ot1, ot2) {
// Make sure both optimes have a timestamp and a term.
- ot1 = ot1.t ? ot1 : {
- ts: ot1,
- t: NumberLong(-1)
- };
- ot2 = ot2.t ? ot2 : {
- ts: ot2,
- t: NumberLong(-1)
- };
+ ot1 = ot1.t ? ot1 : {ts: ot1, t: NumberLong(-1)};
+ ot2 = ot2.t ? ot2 : {ts: ot2, t: NumberLong(-1)};
// If both optimes have a term that's not -1 and one has a lower term, return that optime.
if (!friendlyEqual(ot1.t, NumberLong(-1)) && !friendlyEqual(ot2.t, NumberLong(-1))) {
@@ -916,10 +900,7 @@ var ReplSetTest = function(opts) {
options.restart = options.restart || restart;
- var pathOpts = {
- node: n,
- set: this.name
- };
+ var pathOpts = {node: n, set: this.name};
options.pathOpts = Object.merge(options.pathOpts || {}, pathOpts);
if (tojson(options) != tojson({}))
@@ -929,15 +910,13 @@ var ReplSetTest = function(opts) {
if (_useBridge) {
var bridgeOptions = Object.merge(_bridgeOptions, options.bridgeOptions || {});
- bridgeOptions = Object.merge(
- bridgeOptions,
- {
- hostName: this.host,
- port: this.ports[n],
- // The mongod processes identify themselves to mongobridge as host:port, where the
- // host is the actual hostname of the machine and not localhost.
- dest: getHostName() + ":" + _unbridgedPorts[n],
- });
+ bridgeOptions = Object.merge(bridgeOptions, {
+ hostName: this.host,
+ port: this.ports[n],
+ // The mongod processes identify themselves to mongobridge as host:port, where the
+ // host is the actual hostname of the machine and not localhost.
+ dest: getHostName() + ":" + _unbridgedPorts[n],
+ });
this.nodes[n] = new MongoBridge(bridgeOptions);
}
@@ -1117,9 +1096,7 @@ var ReplSetTest = function(opts) {
this.query = function(ts) {
var coll = this.getOplogColl();
- var query = {
- "ts": {"$gte": ts ? ts : new Timestamp()}
- };
+ var query = {"ts": {"$gte": ts ? ts : new Timestamp()}};
this.cursor = coll.find(query).sort({$natural: 1});
this.cursor.addOption(DBQuery.Option.oplogReplay);
};
@@ -1309,9 +1286,7 @@ ReplSetTest.awaitRSClientHosts = function(conn, host, hostOk, rs, timeout) {
timeout = timeout || 60000;
if (hostOk == undefined)
- hostOk = {
- ok: true
- };
+ hostOk = {ok: true};
if (host.host)
host = host.host;
if (rs)
diff --git a/src/mongo/shell/servers.js b/src/mongo/shell/servers.js
index 2496b70fb91..6bfc15d945d 100644
--- a/src/mongo/shell/servers.js
+++ b/src/mongo/shell/servers.js
@@ -382,9 +382,7 @@ var MongoRunner, _startMongod, startMongoProgram, runMongoProgram, startMongoPro
// If we're a mongo object
if (opts.getDB) {
- opts = {
- restart: opts.runId
- };
+ opts = {restart: opts.runId};
}
// Initialize and create a copy of the opts
@@ -794,11 +792,10 @@ var MongoRunner, _startMongod, startMongoProgram, runMongoProgram, startMongoPro
if (!Array.contains(allowedExitCodes, returnCode)) {
throw new MongoRunner.StopError(
- // clang-format off
+ // clang-format off
`MongoDB process on port ${port} exited with error code ${returnCode}`,
- // clang-format on
- returnCode
- );
+ // clang-format on
+ returnCode);
}
return returnCode;
@@ -884,10 +881,9 @@ var MongoRunner, _startMongod, startMongoProgram, runMongoProgram, startMongoPro
}
}
if (!hasAuthMechs) {
- argArray.push(...[
- '--setParameter',
- "authenticationMechanisms=" + jsTest.options().authMechanism
- ]);
+ argArray.push(
+ ...['--setParameter',
+ "authenticationMechanisms=" + jsTest.options().authMechanism]);
}
}
if (jsTest.options().auth) {
@@ -916,22 +912,16 @@ var MongoRunner, _startMongod, startMongoProgram, runMongoProgram, startMongoPro
}
}
if (jsTest.options().wiredTigerEngineConfigString) {
- argArray.push(...[
- '--wiredTigerEngineConfigString',
- jsTest.options().wiredTigerEngineConfigString
- ]);
+ argArray.push(...['--wiredTigerEngineConfigString',
+ jsTest.options().wiredTigerEngineConfigString]);
}
if (jsTest.options().wiredTigerCollectionConfigString) {
- argArray.push(...[
- '--wiredTigerCollectionConfigString',
- jsTest.options().wiredTigerCollectionConfigString
- ]);
+ argArray.push(...['--wiredTigerCollectionConfigString',
+ jsTest.options().wiredTigerCollectionConfigString]);
}
if (jsTest.options().wiredTigerIndexConfigString) {
- argArray.push(...[
- '--wiredTigerIndexConfigString',
- jsTest.options().wiredTigerIndexConfigString
- ]);
+ argArray.push(...['--wiredTigerIndexConfigString',
+ jsTest.options().wiredTigerIndexConfigString]);
}
// apply setParameters for mongod
if (jsTest.options().setParameters) {
diff --git a/src/mongo/shell/shardingtest.js b/src/mongo/shell/shardingtest.js
index d1fd416d568..be105364261 100644
--- a/src/mongo/shell/shardingtest.js
+++ b/src/mongo/shell/shardingtest.js
@@ -410,12 +410,10 @@ var ShardingTest = function(params) {
}
var s = "";
- this.config.chunks.find(q)
- .sort({ns: 1, min: 1})
- .forEach(function(z) {
- s += " " + z._id + "\t" + z.lastmod.t + "|" + z.lastmod.i + "\t" + tojson(z.min) +
- " -> " + tojson(z.max) + " " + z.shard + " " + z.ns + "\n";
- });
+ this.config.chunks.find(q).sort({ns: 1, min: 1}).forEach(function(z) {
+ s += " " + z._id + "\t" + z.lastmod.t + "|" + z.lastmod.i + "\t" + tojson(z.min) +
+ " -> " + tojson(z.max) + " " + z.shard + " " + z.ns + "\n";
+ });
return s;
};
@@ -494,13 +492,12 @@ var ShardingTest = function(params) {
x[z._id] = 0;
});
- this.config.chunks.find({ns: dbName + "." + collName})
- .forEach(function(z) {
- if (x[z.shard])
- x[z.shard]++;
- else
- x[z.shard] = 1;
- });
+ this.config.chunks.find({ns: dbName + "." + collName}).forEach(function(z) {
+ if (x[z.shard])
+ x[z.shard]++;
+ else
+ x[z.shard] = 1;
+ });
return x;
};
@@ -538,11 +535,9 @@ var ShardingTest = function(params) {
this.getShardNames = function() {
var shards = [];
- this.s.getCollection("config.shards")
- .find()
- .forEach(function(shardDoc) {
- shards.push(shardDoc._id);
- });
+ this.s.getCollection("config.shards").find().forEach(function(shardDoc) {
+ shards.push(shardDoc._id);
+ });
return shards;
};
@@ -788,15 +783,13 @@ var ShardingTest = function(params) {
var bridgeOptions =
(opts !== mongos) ? opts.bridgeOptions : mongos.fullOptions.bridgeOptions;
bridgeOptions = Object.merge(otherParams.bridgeOptions, bridgeOptions || {});
- bridgeOptions = Object.merge(
- bridgeOptions,
- {
- hostName: otherParams.useHostname ? hostName : "localhost",
- port: this._mongos[n].port,
- // The mongos processes identify themselves to mongobridge as host:port, where the
- // host is the actual hostname of the machine and not localhost.
- dest: hostName + ":" + opts.port,
- });
+ bridgeOptions = Object.merge(bridgeOptions, {
+ hostName: otherParams.useHostname ? hostName : "localhost",
+ port: this._mongos[n].port,
+ // The mongos processes identify themselves to mongobridge as host:port, where the
+ // host is the actual hostname of the machine and not localhost.
+ dest: hostName + ":" + opts.port,
+ });
this._mongos[n] = new MongoBridge(bridgeOptions);
}
@@ -850,15 +843,13 @@ var ShardingTest = function(params) {
var bridgeOptions =
(opts !== mongod) ? opts.bridgeOptions : mongod.fullOptions.bridgeOptions;
bridgeOptions = Object.merge(otherParams.bridgeOptions, bridgeOptions || {});
- bridgeOptions = Object.merge(
- bridgeOptions,
- {
- hostName: otherParams.useHostname ? hostName : "localhost",
- port: this._connections[n].port,
- // The mongod processes identify themselves to mongobridge as host:port, where the
- // host is the actual hostname of the machine and not localhost.
- dest: hostName + ":" + opts.port,
- });
+ bridgeOptions = Object.merge(bridgeOptions, {
+ hostName: otherParams.useHostname ? hostName : "localhost",
+ port: this._connections[n].port,
+ // The mongod processes identify themselves to mongobridge as host:port, where the
+ // host is the actual hostname of the machine and not localhost.
+ dest: hostName + ":" + opts.port,
+ });
this._connections[n] = new MongoBridge(bridgeOptions);
}
@@ -911,15 +902,13 @@ var ShardingTest = function(params) {
if (otherParams.useBridge) {
var bridgeOptions =
Object.merge(otherParams.bridgeOptions, mongod.fullOptions.bridgeOptions || {});
- bridgeOptions = Object.merge(
- bridgeOptions,
- {
- hostName: otherParams.useHostname ? hostName : "localhost",
- port: this._configServers[n].port,
- // The mongod processes identify themselves to mongobridge as host:port, where the
- // host is the actual hostname of the machine and not localhost.
- dest: hostName + ":" + mongod.port,
- });
+ bridgeOptions = Object.merge(bridgeOptions, {
+ hostName: otherParams.useHostname ? hostName : "localhost",
+ port: this._configServers[n].port,
+ // The mongod processes identify themselves to mongobridge as host:port, where the
+ // host is the actual hostname of the machine and not localhost.
+ dest: hostName + ":" + mongod.port,
+ });
this._configServers[n] = new MongoBridge(bridgeOptions);
}
@@ -1033,9 +1022,7 @@ var ShardingTest = function(params) {
this._testName = testName;
this._otherParams = otherParams;
- var pathOpts = {
- testName: testName
- };
+ var pathOpts = {testName: testName};
for (var k in otherParams) {
if (k.startsWith("rs") && otherParams[k] != undefined) {
@@ -1092,12 +1079,8 @@ var ShardingTest = function(params) {
settings: rsSettings
});
- this._rs[i] = {
- setName: setName,
- test: rs,
- nodes: rs.startSet(rsDefaults),
- url: rs.getURL()
- };
+ this._rs[i] =
+ {setName: setName, test: rs, nodes: rs.startSet(rsDefaults), url: rs.getURL()};
rs.initiate(null, null, initiateTimeout);
@@ -1133,14 +1116,12 @@ var ShardingTest = function(params) {
if (otherParams.useBridge) {
var bridgeOptions =
Object.merge(otherParams.bridgeOptions, options.bridgeOptions || {});
- bridgeOptions = Object.merge(
- bridgeOptions,
- {
- hostName: otherParams.useHostname ? hostName : "localhost",
- // The mongod processes identify themselves to mongobridge as host:port, where
- // the host is the actual hostname of the machine and not localhost.
- dest: hostName + ":" + options.port,
- });
+ bridgeOptions = Object.merge(bridgeOptions, {
+ hostName: otherParams.useHostname ? hostName : "localhost",
+ // The mongod processes identify themselves to mongobridge as host:port, where
+ // the host is the actual hostname of the machine and not localhost.
+ dest: hostName + ":" + options.port,
+ });
var bridge = new MongoBridge(bridgeOptions);
}
@@ -1177,11 +1158,9 @@ var ShardingTest = function(params) {
rs.getPrimary().getDB("admin").foo.save({x: 1});
if (keyFile) {
- authutil.asCluster(rs.nodes,
- keyFile,
- function() {
- rs.awaitReplication();
- });
+ authutil.asCluster(rs.nodes, keyFile, function() {
+ rs.awaitReplication();
+ });
}
rs.awaitSecondaryNodes();
@@ -1288,14 +1267,12 @@ var ShardingTest = function(params) {
if (otherParams.useBridge) {
var bridgeOptions =
Object.merge(otherParams.bridgeOptions, options.bridgeOptions || {});
- bridgeOptions = Object.merge(
- bridgeOptions,
- {
- hostName: otherParams.useHostname ? hostName : "localhost",
- // The mongos processes identify themselves to mongobridge as host:port, where the
- // host is the actual hostname of the machine and not localhost.
- dest: hostName + ":" + options.port,
- });
+ bridgeOptions = Object.merge(bridgeOptions, {
+ hostName: otherParams.useHostname ? hostName : "localhost",
+ // The mongos processes identify themselves to mongobridge as host:port, where the
+ // host is the actual hostname of the machine and not localhost.
+ dest: hostName + ":" + options.port,
+ });
var bridge = new MongoBridge(bridgeOptions);
}
@@ -1325,13 +1302,11 @@ var ShardingTest = function(params) {
// If auth is enabled for the test, login the mongos connections as system in order to
// configure the instances and then log them out again.
if (keyFile) {
- authutil.assertAuthenticate(this._mongos,
- 'admin',
- {
- user: '__system',
- mechanism: 'MONGODB-CR',
- pwd: cat(keyFile).replace(/[\011-\015\040]/g, '')
- });
+ authutil.assertAuthenticate(this._mongos, 'admin', {
+ user: '__system',
+ mechanism: 'MONGODB-CR',
+ pwd: cat(keyFile).replace(/[\011-\015\040]/g, '')
+ });
}
try {
diff --git a/src/mongo/shell/shell_options.cpp b/src/mongo/shell/shell_options.cpp
index 63ab20931dc..7bafc00c54c 100644
--- a/src/mongo/shell/shell_options.cpp
+++ b/src/mongo/shell/shell_options.cpp
@@ -84,14 +84,15 @@ Status addMongoShellOptions(moe::OptionSection* options) {
authenticationOptions.addOptionChaining(
"username", "username,u", moe::String, "username for authentication");
- authenticationOptions.addOptionChaining(
- "password", "password,p", moe::String, "password for authentication")
+ authenticationOptions
+ .addOptionChaining("password", "password,p", moe::String, "password for authentication")
.setImplicit(moe::Value(std::string("")));
- authenticationOptions.addOptionChaining("authenticationDatabase",
- "authenticationDatabase",
- moe::String,
- "user source (defaults to dbname)")
+ authenticationOptions
+ .addOptionChaining("authenticationDatabase",
+ "authenticationDatabase",
+ moe::String,
+ "user source (defaults to dbname)")
.setDefault(moe::Value(std::string("")));
authenticationOptions.addOptionChaining("authenticationMechanism",
@@ -99,11 +100,11 @@ Status addMongoShellOptions(moe::OptionSection* options) {
moe::String,
"authentication mechanism");
- authenticationOptions.addOptionChaining(
- "gssapiServiceName",
- "gssapiServiceName",
- moe::String,
- "Service name to use when authenticating using GSSAPI/Kerberos")
+ authenticationOptions
+ .addOptionChaining("gssapiServiceName",
+ "gssapiServiceName",
+ moe::String,
+ "Service name to use when authenticating using GSSAPI/Kerberos")
.setDefault(moe::Value(std::string(saslDefaultServiceName)));
authenticationOptions.addOptionChaining(
@@ -128,10 +129,11 @@ Status addMongoShellOptions(moe::OptionSection* options) {
moe::Switch,
"disable the Javascript Just In Time compiler");
- options->addOptionChaining("disableJavaScriptProtection",
- "disableJavaScriptProtection",
- moe::Switch,
- "allow automatic JavaScript function marshalling")
+ options
+ ->addOptionChaining("disableJavaScriptProtection",
+ "disableJavaScriptProtection",
+ moe::Switch,
+ "allow automatic JavaScript function marshalling")
.incompatibleWith("enableJavaScriptProtection");
Status ret = Status::OK();
@@ -142,11 +144,11 @@ Status addMongoShellOptions(moe::OptionSection* options) {
}
#endif
- options->addOptionChaining(
- "enableJavaScriptProtection",
- "enableJavaScriptProtection",
- moe::Switch,
- "disable automatic JavaScript function marshalling (defaults to true)")
+ options
+ ->addOptionChaining("enableJavaScriptProtection",
+ "enableJavaScriptProtection",
+ moe::Switch,
+ "disable automatic JavaScript function marshalling (defaults to true)")
.hidden()
.incompatibleWith("disableJavaScriptProtection");
@@ -165,27 +167,33 @@ Status addMongoShellOptions(moe::OptionSection* options) {
// for testing, will kill op without prompting
options->addOptionChaining("autokillop", "autokillop", moe::Switch, "autokillop").hidden();
- options->addOptionChaining("useLegacyWriteOps",
- "useLegacyWriteOps",
- moe::Switch,
- "use legacy write ops instead of write commands").hidden();
-
- options->addOptionChaining("writeMode",
- "writeMode",
- moe::String,
- "mode to determine how writes are done:"
- " commands, compatibility, legacy").hidden();
-
- options->addOptionChaining("readMode",
- "readMode",
- moe::String,
- "mode to determine how .find() queries are done:"
- " commands, compatibility, legacy").hidden();
-
- options->addOptionChaining("rpcProtocols",
- "rpcProtocols",
- moe::String,
- " none, opQueryOnly, opCommandOnly, all").hidden();
+ options
+ ->addOptionChaining("useLegacyWriteOps",
+ "useLegacyWriteOps",
+ moe::Switch,
+ "use legacy write ops instead of write commands")
+ .hidden();
+
+ options
+ ->addOptionChaining("writeMode",
+ "writeMode",
+ moe::String,
+ "mode to determine how writes are done:"
+ " commands, compatibility, legacy")
+ .hidden();
+
+ options
+ ->addOptionChaining("readMode",
+ "readMode",
+ moe::String,
+ "mode to determine how .find() queries are done:"
+ " commands, compatibility, legacy")
+ .hidden();
+
+ options
+ ->addOptionChaining(
+ "rpcProtocols", "rpcProtocols", moe::String, " none, opQueryOnly, opCommandOnly, all")
+ .hidden();
return Status::OK();
}
@@ -313,7 +321,8 @@ Status storeMongoShellOptions(const moe::Environment& params,
throw MsgAssertionException(
17397,
mongoutils::str::stream()
- << "Unknown readMode option: '" << mode
+ << "Unknown readMode option: '"
+ << mode
<< "'. Valid modes are: {commands, compatibility, legacy}");
}
shellGlobalParams.readMode = mode;
diff --git a/src/mongo/shell/shell_utils.cpp b/src/mongo/shell/shell_utils.cpp
index bae9cce9081..1d723a4c54c 100644
--- a/src/mongo/shell/shell_utils.cpp
+++ b/src/mongo/shell/shell_utils.cpp
@@ -31,13 +31,13 @@
#include "mongo/shell/shell_utils.h"
-#include "mongo/client/replica_set_monitor.h"
#include "mongo/client/dbclientinterface.h"
+#include "mongo/client/replica_set_monitor.h"
#include "mongo/db/catalog/index_key_validate.h"
#include "mongo/db/index/external_key_generator.h"
#include "mongo/platform/random.h"
-#include "mongo/shell/bench.h"
#include "mongo/scripting/engine.h"
+#include "mongo/shell/bench.h"
#include "mongo/shell/shell_options.h"
#include "mongo/shell/shell_utils_extended.h"
#include "mongo/shell/shell_utils_launcher.h"
diff --git a/src/mongo/shell/shell_utils_launcher.cpp b/src/mongo/shell/shell_utils_launcher.cpp
index b71f7a8ebc6..cd2f4f24336 100644
--- a/src/mongo/shell/shell_utils_launcher.cpp
+++ b/src/mongo/shell/shell_utils_launcher.cpp
@@ -43,9 +43,9 @@
#include <io.h>
#define SIGKILL 9
#else
-#include <sys/socket.h>
#include <netinet/in.h>
#include <signal.h>
+#include <sys/socket.h>
#include <sys/stat.h>
#include <sys/wait.h>
#endif
@@ -297,10 +297,12 @@ ProgramRunner::ProgramRunner(const BSONObj& args, const BSONObj& env) {
// we explicitly override them.
#ifdef _WIN32
wchar_t* processEnv = GetEnvironmentStringsW();
- ON_BLOCK_EXIT([](wchar_t* toFree) {
- if (toFree)
- FreeEnvironmentStringsW(toFree);
- }, processEnv);
+ ON_BLOCK_EXIT(
+ [](wchar_t* toFree) {
+ if (toFree)
+ FreeEnvironmentStringsW(toFree);
+ },
+ processEnv);
// Windows' GetEnvironmentStringsW returns a NULL terminated array of NULL separated
// <key>=<value> pairs.
@@ -397,7 +399,7 @@ void ProgramRunner::operator()() {
programOutputLogger.appendLine(
_port, _pid, "WARNING: mongod wrote null bytes to output");
char* last = buf;
- for (char* i = strchr(buf, '\n'); i; last = i + 1, i = strchr(last, '\n')) {
+ for (char *i = strchr(buf, '\n'); i; last = i + 1, i = strchr(last, '\n')) {
*i = '\0';
programOutputLogger.appendLine(_port, _pid, last);
}
@@ -589,10 +591,9 @@ void ProgramRunner::launchProcess(int child_stdout) {
std::string execErrMsg = str::stream() << "Unable to start program " << _argv[0];
auto constCharStorageMaker = [](const std::vector<std::string>& in) {
std::vector<const char*> out;
- std::transform(in.begin(),
- in.end(),
- std::back_inserter(out),
- [](const std::string& x) { return x.c_str(); });
+ std::transform(in.begin(), in.end(), std::back_inserter(out), [](const std::string& x) {
+ return x.c_str();
+ });
out.push_back(nullptr);
return out;
};
diff --git a/src/mongo/shell/shell_utils_launcher.h b/src/mongo/shell/shell_utils_launcher.h
index 7ecb8e95384..94ec8b3fec5 100644
--- a/src/mongo/shell/shell_utils_launcher.h
+++ b/src/mongo/shell/shell_utils_launcher.h
@@ -33,8 +33,8 @@
#include <map>
#include <sstream>
#include <string>
-#include <vector>
#include <utility>
+#include <vector>
#include "mongo/bson/bsonobj.h"
#include "mongo/platform/process_id.h"
diff --git a/src/mongo/shell/types.js b/src/mongo/shell/types.js
index d932b212cf9..0179dd0a43b 100644
--- a/src/mongo/shell/types.js
+++ b/src/mongo/shell/types.js
@@ -565,10 +565,7 @@ Map.prototype._get = function(key) {
return a[i];
}
}
- var o = {
- key: key,
- value: null
- };
+ var o = {key: key, value: null};
a.push(o);
return o;
};
diff --git a/src/mongo/shell/utils.js b/src/mongo/shell/utils.js
index 26bfd1b7cc8..c99d50a97e7 100644
--- a/src/mongo/shell/utils.js
+++ b/src/mongo/shell/utils.js
@@ -162,42 +162,38 @@ jsTestName = function() {
return "__unknown_name__";
};
-var _jsTestOptions = {
- enableTestCommands: true
-}; // Test commands should be enabled by default
+var _jsTestOptions = {enableTestCommands: true}; // Test commands should be enabled by default
jsTestOptions = function() {
if (TestData) {
- return Object.merge(
- _jsTestOptions,
- {
- setParameters: TestData.setParameters,
- setParametersMongos: TestData.setParametersMongos,
- storageEngine: TestData.storageEngine,
- wiredTigerEngineConfigString: TestData.wiredTigerEngineConfigString,
- wiredTigerCollectionConfigString: TestData.wiredTigerCollectionConfigString,
- wiredTigerIndexConfigString: TestData.wiredTigerIndexConfigString,
- noJournal: TestData.noJournal,
- noJournalPrealloc: TestData.noJournalPrealloc,
- auth: TestData.auth,
- keyFile: TestData.keyFile,
- authUser: "__system",
- authPassword: TestData.keyFileData,
- authMechanism: TestData.authMechanism,
- adminUser: TestData.adminUser || "admin",
- adminPassword: TestData.adminPassword || "password",
- useLegacyConfigServers: TestData.useLegacyConfigServers || false,
- useLegacyReplicationProtocol: TestData.useLegacyReplicationProtocol || false,
- enableMajorityReadConcern: TestData.enableMajorityReadConcern,
- writeConcernMajorityShouldJournal: TestData.writeConcernMajorityShouldJournal,
- enableEncryption: TestData.enableEncryption,
- encryptionKeyFile: TestData.encryptionKeyFile,
- auditDestination: TestData.auditDestination,
- minPort: TestData.minPort,
- maxPort: TestData.maxPort,
- // Note: does not support the array version
- mongosBinVersion: TestData.mongosBinVersion || "",
- });
+ return Object.merge(_jsTestOptions, {
+ setParameters: TestData.setParameters,
+ setParametersMongos: TestData.setParametersMongos,
+ storageEngine: TestData.storageEngine,
+ wiredTigerEngineConfigString: TestData.wiredTigerEngineConfigString,
+ wiredTigerCollectionConfigString: TestData.wiredTigerCollectionConfigString,
+ wiredTigerIndexConfigString: TestData.wiredTigerIndexConfigString,
+ noJournal: TestData.noJournal,
+ noJournalPrealloc: TestData.noJournalPrealloc,
+ auth: TestData.auth,
+ keyFile: TestData.keyFile,
+ authUser: "__system",
+ authPassword: TestData.keyFileData,
+ authMechanism: TestData.authMechanism,
+ adminUser: TestData.adminUser || "admin",
+ adminPassword: TestData.adminPassword || "password",
+ useLegacyConfigServers: TestData.useLegacyConfigServers || false,
+ useLegacyReplicationProtocol: TestData.useLegacyReplicationProtocol || false,
+ enableMajorityReadConcern: TestData.enableMajorityReadConcern,
+ writeConcernMajorityShouldJournal: TestData.writeConcernMajorityShouldJournal,
+ enableEncryption: TestData.enableEncryption,
+ encryptionKeyFile: TestData.encryptionKeyFile,
+ auditDestination: TestData.auditDestination,
+ minPort: TestData.minPort,
+ maxPort: TestData.maxPort,
+ // Note: does not support the array version
+ mongosBinVersion: TestData.mongosBinVersion || "",
+ });
}
return _jsTestOptions;
};
@@ -290,19 +286,12 @@ defaultPrompt = function() {
try {
var prompt = replSetMemberStatePrompt();
// set our status that it was good
- db.getMongo().authStatus = {
- replSetGetStatus: true,
- isMaster: true
- };
+ db.getMongo().authStatus = {replSetGetStatus: true, isMaster: true};
return prefix + prompt;
} catch (e) {
// don't have permission to run that, or requires auth
// print(e);
- status = {
- authRequired: true,
- replSetGetStatus: false,
- isMaster: true
- };
+ status = {authRequired: true, replSetGetStatus: false, isMaster: true};
}
}
// auth detected
@@ -338,9 +327,7 @@ defaultPrompt = function() {
} catch (ex) {
printjson(ex);
// reset status and let it figure it out next time.
- status = {
- isMaster: true
- };
+ status = {isMaster: true};
}
db.getMongo().authStatus = status;
@@ -540,8 +527,8 @@ shellAutocomplete = function(
var worker = function(prefix) {
var global = (function() {
- return this;
- }).call(); // trick to get global object
+ return this;
+ }).call(); // trick to get global object
var curObj = global;
var parts = prefix.split('.');
@@ -999,9 +986,7 @@ _awaitRSHostViaRSMonitor = function(hostAddr, desiredState, rsName, timeout) {
timeout = timeout || 60 * 1000;
if (desiredState == undefined) {
- desiredState = {
- ok: true
- };
+ desiredState = {ok: true};
}
print("Awaiting " + hostAddr + " to be " + tojson(desiredState) + " in " + " rs " + rsName);
@@ -1123,9 +1108,7 @@ rs._runCmd = function(c) {
};
rs.reconfig = function(cfg, options) {
cfg.version = rs.conf().version + 1;
- cmd = {
- replSetReconfig: cfg
- };
+ cmd = {replSetReconfig: cfg};
for (var i in options) {
cmd[i] = options[i];
}
@@ -1147,10 +1130,7 @@ rs.add = function(hostport, arb) {
if (c.members[i]._id > max)
max = c.members[i]._id;
if (isString(hostport)) {
- cfg = {
- _id: max + 1,
- host: hostport
- };
+ cfg = {_id: max + 1, host: hostport};
if (arb)
cfg.arbiterOnly = true;
} else if (arb == true) {
@@ -1168,9 +1148,7 @@ rs.syncFrom = function(host) {
return db._adminCommand({replSetSyncFrom: host});
};
rs.stepDown = function(stepdownSecs, catchUpSecs) {
- var cmdObj = {
- replSetStepDown: stepdownSecs === undefined ? 60 : stepdownSecs
- };
+ var cmdObj = {replSetStepDown: stepdownSecs === undefined ? 60 : stepdownSecs};
if (catchUpSecs !== undefined) {
cmdObj['secondaryCatchUpPeriodSecs'] = catchUpSecs;
}
@@ -1229,9 +1207,7 @@ rs.debug.nullLastOpWritten = function(primary, secondary) {
if (!last.value.o || !last.value.o._id) {
print("couldn't find an _id?");
} else {
- last.value.o = {
- _id: last.value.o._id
- };
+ last.value.o = {_id: last.value.o._id};
}
print("nulling out this op:");
diff --git a/src/mongo/shell/utils_auth.js b/src/mongo/shell/utils_auth.js
index e096e2c8c67..6fd913c963a 100644
--- a/src/mongo/shell/utils_auth.js
+++ b/src/mongo/shell/utils_auth.js
@@ -67,13 +67,11 @@ var authutil;
*/
authutil.asCluster = function(conn, keyfile, action) {
var ex;
- authutil.assertAuthenticate(conn,
- 'local',
- {
- user: '__system',
- mechanism: 'SCRAM-SHA-1',
- pwd: cat(keyfile).replace(/[\011-\015\040]/g, '')
- });
+ authutil.assertAuthenticate(conn, 'local', {
+ user: '__system',
+ mechanism: 'SCRAM-SHA-1',
+ pwd: cat(keyfile).replace(/[\011-\015\040]/g, '')
+ });
try {
return action();
diff --git a/src/mongo/shell/utils_sh.js b/src/mongo/shell/utils_sh.js
index 8aa5e5d19ec..99a2eff00c6 100644
--- a/src/mongo/shell/utils_sh.js
+++ b/src/mongo/shell/utils_sh.js
@@ -92,10 +92,7 @@ sh.shardCollection = function(fullName, key, unique) {
assert(key, "need a key");
assert(typeof(key) == "object", "key needs to be an object");
- var cmd = {
- shardCollection: fullName,
- key: key
- };
+ var cmd = {shardCollection: fullName, key: key};
if (unique)
cmd.unique = true;
@@ -356,9 +353,7 @@ sh._lastMigration = function(ns) {
}
}
- var searchDoc = {
- what: /^moveChunk/
- };
+ var searchDoc = {what: /^moveChunk/};
if (coll)
searchDoc.ns = coll + "";
if (dbase)
@@ -466,11 +461,7 @@ sh.getRecentFailedRounds = function(configDB) {
if (configDB === undefined)
configDB = db.getSiblingDB('config');
var balErrs = configDB.actionlog.find({what: "balancer.round"}).sort({time: -1}).limit(5);
- var result = {
- count: 0,
- lastErr: "",
- lastTime: " "
- };
+ var result = {count: 0, lastErr: "", lastTime: " "};
if (balErrs != null) {
balErrs.forEach(function(r) {
if (r.details.errorOccured) {
@@ -494,41 +485,51 @@ sh.getRecentMigrations = function(configDB) {
var yesterday = new Date(new Date() - 24 * 60 * 60 * 1000);
// Successful migrations.
- var result = configDB.changelog.aggregate([
- {
- $match: {
- time: {$gt: yesterday},
- what: "moveChunk.from", 'details.errmsg': {$exists: false}, 'details.note': 'success'
- }
- },
- {$group: {_id: {msg: "$details.errmsg"}, count: {$sum: 1}}},
- {$project: {_id: {$ifNull: ["$_id.msg", "Success"]}, count: "$count"}}
- ]).toArray();
+ var result = configDB.changelog
+ .aggregate([
+ {
+ $match: {
+ time: {$gt: yesterday},
+ what: "moveChunk.from",
+ 'details.errmsg': {$exists: false},
+ 'details.note': 'success'
+ }
+ },
+ {$group: {_id: {msg: "$details.errmsg"}, count: {$sum: 1}}},
+ {$project: {_id: {$ifNull: ["$_id.msg", "Success"]}, count: "$count"}}
+ ])
+ .toArray();
// Failed migrations.
- result = result.concat(configDB.changelog.aggregate([
- {
- $match: {
- time: {$gt: yesterday},
- what: "moveChunk.from",
- $or: [{'details.errmsg': {$exists: true}}, {'details.note': {$ne: 'success'}}]
- }
- },
- {
- $group: {
- _id: {msg: "$details.errmsg", from: "$details.from", to: "$details.to"},
- count: {$sum: 1}
- }
- },
- {
- $project: {
- _id: {$ifNull: ['$_id.msg', 'aborted']},
- from: "$_id.from",
- to: "$_id.to",
- count: "$count"
- }
- }
- ]).toArray());
+ result = result.concat(
+ configDB.changelog
+ .aggregate([
+ {
+ $match: {
+ time: {$gt: yesterday},
+ what: "moveChunk.from",
+ $or: [
+ {'details.errmsg': {$exists: true}},
+ {'details.note': {$ne: 'success'}}
+ ]
+ }
+ },
+ {
+ $group: {
+ _id: {msg: "$details.errmsg", from: "$details.from", to: "$details.to"},
+ count: {$sum: 1}
+ }
+ },
+ {
+ $project: {
+ _id: {$ifNull: ['$_id.msg', 'aborted']},
+ from: "$_id.from",
+ to: "$_id.to",
+ count: "$count"
+ }
+ }
+ ])
+ .toArray());
return result;
};
@@ -588,17 +589,16 @@ function printShardingStatus(configDB, verbose) {
};
if (verbose) {
- configDB.mongos.find(recentMongosQuery)
- .sort({ping: -1})
- .forEach(function(z) {
- output("\t" + tojsononeline(z));
- });
+ configDB.mongos.find(recentMongosQuery).sort({ping: -1}).forEach(function(z) {
+ output("\t" + tojsononeline(z));
+ });
} else {
- configDB.mongos.aggregate([
- {$match: recentMongosQuery},
- {$group: {_id: "$mongoVersion", num: {$sum: 1}}},
- {$sort: {num: -1}}
- ])
+ configDB.mongos
+ .aggregate([
+ {$match: recentMongosQuery},
+ {$group: {_id: "$mongoVersion", num: {$sum: 1}}},
+ {$sort: {num: -1}}
+ ])
.forEach(function(z) {
output("\t" + tojson(z._id) + " : " + z.num);
});
@@ -733,12 +733,10 @@ function printShardingStatus(configDB, verbose) {
"\t\t\ttoo many chunks to print, use verbose if you want to force print");
}
- configDB.tags.find({ns: coll._id})
- .sort({min: 1})
- .forEach(function(tag) {
- output("\t\t\t tag: " + tag.tag + " " + tojson(tag.min) +
- " -->> " + tojson(tag.max));
- });
+ configDB.tags.find({ns: coll._id}).sort({min: 1}).forEach(function(tag) {
+ output("\t\t\t tag: " + tag.tag + " " + tojson(tag.min) + " -->> " +
+ tojson(tag.max));
+ });
}
});
}
@@ -783,23 +781,21 @@ function printShardingSizes(configDB) {
.sort({_id: 1})
.forEach(function(coll) {
output("\t\t" + coll._id + " chunks:");
- configDB.chunks.find({"ns": coll._id})
- .sort({min: 1})
- .forEach(function(chunk) {
- var mydb = shards[chunk.shard].getDB(db._id);
- var out = mydb.runCommand({
- dataSize: coll._id,
- keyPattern: coll.key,
- min: chunk.min,
- max: chunk.max
- });
- delete out.millis;
- delete out.ok;
-
- output("\t\t\t" + tojson(chunk.min) + " -->> " + tojson(chunk.max) +
- " on : " + chunk.shard + " " + tojson(out));
-
+ configDB.chunks.find({"ns": coll._id}).sort({min: 1}).forEach(function(chunk) {
+ var mydb = shards[chunk.shard].getDB(db._id);
+ var out = mydb.runCommand({
+ dataSize: coll._id,
+ keyPattern: coll.key,
+ min: chunk.min,
+ max: chunk.max
});
+ delete out.millis;
+ delete out.ok;
+
+ output("\t\t\t" + tojson(chunk.min) + " -->> " + tojson(chunk.max) +
+ " on : " + chunk.shard + " " + tojson(out));
+
+ });
});
}
});
diff --git a/src/mongo/tools/bridge.cpp b/src/mongo/tools/bridge.cpp
index e61fd05d9f9..52762e455ea 100644
--- a/src/mongo/tools/bridge.cpp
+++ b/src/mongo/tools/bridge.cpp
@@ -51,18 +51,18 @@
#include "mongo/tools/bridge_commands.h"
#include "mongo/tools/mongobridge_options.h"
#include "mongo/util/assert_util.h"
+#include "mongo/util/exit.h"
#include "mongo/util/log.h"
#include "mongo/util/mongoutils/str.h"
#include "mongo/util/net/abstract_message_port.h"
#include "mongo/util/net/listen.h"
#include "mongo/util/net/message.h"
-#include "mongo/util/exit.h"
#include "mongo/util/quick_exit.h"
-#include "mongo/util/static_observer.h"
#include "mongo/util/signal_handlers.h"
+#include "mongo/util/static_observer.h"
#include "mongo/util/text.h"
-#include "mongo/util/timer.h"
#include "mongo/util/time_support.h"
+#include "mongo/util/timer.h"
namespace mongo {
diff --git a/src/mongo/tools/bridge_commands.cpp b/src/mongo/tools/bridge_commands.cpp
index 2de947b77f0..b6071476933 100644
--- a/src/mongo/tools/bridge_commands.cpp
+++ b/src/mongo/tools/bridge_commands.cpp
@@ -35,8 +35,8 @@
#include "mongo/base/status_with.h"
#include "mongo/bson/bsonobj.h"
#include "mongo/bson/util/bson_extract.h"
-#include "mongo/util/net/hostandport.h"
#include "mongo/util/mongoutils/str.h"
+#include "mongo/util/net/hostandport.h"
#include "mongo/util/string_map.h"
namespace mongo {
diff --git a/src/mongo/tools/mongobridge_options.cpp b/src/mongo/tools/mongobridge_options.cpp
index 16e2b9d0767..577a2c1bf11 100644
--- a/src/mongo/tools/mongobridge_options.cpp
+++ b/src/mongo/tools/mongobridge_options.cpp
@@ -59,7 +59,8 @@ Status addMongoBridgeOptions(moe::OptionSection* options) {
void printMongoBridgeHelp(std::ostream* out) {
*out << "Usage: mongobridge --port <port> --dest <dest> [ --seed <seed> ] [ --verbose <vvv> ]"
- " [ --help ]" << std::endl;
+ " [ --help ]"
+ << std::endl;
*out << moe::startupOptions.helpString();
*out << std::flush;
}
diff --git a/src/mongo/tools/mongobridge_options_init.cpp b/src/mongo/tools/mongobridge_options_init.cpp
index 4dbf03ce479..5207fc0b49d 100644
--- a/src/mongo/tools/mongobridge_options_init.cpp
+++ b/src/mongo/tools/mongobridge_options_init.cpp
@@ -30,9 +30,9 @@
#include <iostream>
+#include "mongo/util/exit_code.h"
#include "mongo/util/options_parser/startup_option_init.h"
#include "mongo/util/options_parser/startup_options.h"
-#include "mongo/util/exit_code.h"
#include "mongo/util/quick_exit.h"
namespace mongo {
diff --git a/src/mongo/tools/sniffer.cpp b/src/mongo/tools/sniffer.cpp
index 64abdf01a67..255d41c1394 100644
--- a/src/mongo/tools/sniffer.cpp
+++ b/src/mongo/tools/sniffer.cpp
@@ -51,8 +51,8 @@
#include <pcap.h>
#include <stdio.h>
#include <stdlib.h>
-#include <string>
#include <string.h>
+#include <string>
#include <sys/types.h>
#ifndef _WIN32
@@ -483,7 +483,8 @@ void usage() {
" else. Spurious messages about invalid documents may result\n"
" when there are dropped tcp packets.\n"
"<port0>... These parameters are used to filter sniffing. By default, \n"
- " only port 27017 is sniffed.\n" << endl;
+ " only port 27017 is sniffed.\n"
+ << endl;
}
int toolMain(int argc, char** argv, char** envp) {
diff --git a/src/mongo/unittest/death_test.h b/src/mongo/unittest/death_test.h
index a8391709dca..01ab994b70c 100644
--- a/src/mongo/unittest/death_test.h
+++ b/src/mongo/unittest/death_test.h
@@ -45,20 +45,21 @@
* in your death test, start them in the test body, or use DEATH_TEST_F and start them
* in the setUp() method of the fixture.
*/
-#define DEATH_TEST(CASE_NAME, TEST_NAME, MATCH_EXPR) \
- class _TEST_TYPE_NAME(CASE_NAME, TEST_NAME) : public ::mongo::unittest::Test { \
- private: \
- virtual void _doTest(); \
- \
- static const RegistrationAgent< \
- ::mongo::unittest::DeathTest<_TEST_TYPE_NAME(CASE_NAME, TEST_NAME)>> _agent; \
- }; \
- const ::mongo::unittest::Test::RegistrationAgent<::mongo::unittest::DeathTest<_TEST_TYPE_NAME( \
- CASE_NAME, TEST_NAME)>> _TEST_TYPE_NAME(CASE_NAME, TEST_NAME)::_agent(#CASE_NAME, \
- #TEST_NAME); \
- std::string getDeathTestPattern(_TEST_TYPE_NAME(CASE_NAME, TEST_NAME)*) { \
- return MATCH_EXPR; \
- } \
+#define DEATH_TEST(CASE_NAME, TEST_NAME, MATCH_EXPR) \
+ class _TEST_TYPE_NAME(CASE_NAME, TEST_NAME) : public ::mongo::unittest::Test { \
+ private: \
+ virtual void _doTest(); \
+ \
+ static const RegistrationAgent< \
+ ::mongo::unittest::DeathTest<_TEST_TYPE_NAME(CASE_NAME, TEST_NAME)>> \
+ _agent; \
+ }; \
+ const ::mongo::unittest::Test::RegistrationAgent< \
+ ::mongo::unittest::DeathTest<_TEST_TYPE_NAME(CASE_NAME, TEST_NAME)>> \
+ _TEST_TYPE_NAME(CASE_NAME, TEST_NAME)::_agent(#CASE_NAME, #TEST_NAME); \
+ std::string getDeathTestPattern(_TEST_TYPE_NAME(CASE_NAME, TEST_NAME)*) { \
+ return MATCH_EXPR; \
+ } \
void _TEST_TYPE_NAME(CASE_NAME, TEST_NAME)::_doTest()
/**
@@ -67,20 +68,21 @@
*
* See description of DEATH_TEST for more details on death tests.
*/
-#define DEATH_TEST_F(FIXTURE_NAME, TEST_NAME, MATCH_EXPR) \
- class _TEST_TYPE_NAME(FIXTURE_NAME, TEST_NAME) : public FIXTURE_NAME { \
- private: \
- virtual void _doTest(); \
- \
- static const RegistrationAgent< \
- ::mongo::unittest::DeathTest<_TEST_TYPE_NAME(FIXTURE_NAME, TEST_NAME)>> _agent; \
- }; \
- const ::mongo::unittest::Test::RegistrationAgent<::mongo::unittest::DeathTest<_TEST_TYPE_NAME( \
- FIXTURE_NAME, TEST_NAME)>> _TEST_TYPE_NAME(FIXTURE_NAME, TEST_NAME)::_agent(#FIXTURE_NAME, \
- #TEST_NAME); \
- std::string getDeathTestPattern(_TEST_TYPE_NAME(FIXTURE_NAME, TEST_NAME)*) { \
- return MATCH_EXPR; \
- } \
+#define DEATH_TEST_F(FIXTURE_NAME, TEST_NAME, MATCH_EXPR) \
+ class _TEST_TYPE_NAME(FIXTURE_NAME, TEST_NAME) : public FIXTURE_NAME { \
+ private: \
+ virtual void _doTest(); \
+ \
+ static const RegistrationAgent< \
+ ::mongo::unittest::DeathTest<_TEST_TYPE_NAME(FIXTURE_NAME, TEST_NAME)>> \
+ _agent; \
+ }; \
+ const ::mongo::unittest::Test::RegistrationAgent< \
+ ::mongo::unittest::DeathTest<_TEST_TYPE_NAME(FIXTURE_NAME, TEST_NAME)>> \
+ _TEST_TYPE_NAME(FIXTURE_NAME, TEST_NAME)::_agent(#FIXTURE_NAME, #TEST_NAME); \
+ std::string getDeathTestPattern(_TEST_TYPE_NAME(FIXTURE_NAME, TEST_NAME)*) { \
+ return MATCH_EXPR; \
+ } \
void _TEST_TYPE_NAME(FIXTURE_NAME, TEST_NAME)::_doTest()
namespace mongo {
@@ -104,8 +106,7 @@ public:
static const std::string pattern;
template <typename... Args>
- DeathTest(Args&&... args)
- : DeathTestImpl(stdx::make_unique<T>(std::forward<Args>(args)...)) {}
+ DeathTest(Args&&... args) : DeathTestImpl(stdx::make_unique<T>(std::forward<Args>(args)...)) {}
private:
std::string getPattern() override {
diff --git a/src/mongo/unittest/temp_dir.cpp b/src/mongo/unittest/temp_dir.cpp
index f70052e5863..7313298194a 100644
--- a/src/mongo/unittest/temp_dir.cpp
+++ b/src/mongo/unittest/temp_dir.cpp
@@ -36,10 +36,10 @@
#include "mongo/base/init.h"
#include "mongo/unittest/unittest.h"
-#include "mongo/util/mongoutils/str.h"
#include "mongo/util/log.h"
-#include "mongo/util/options_parser/startup_options.h"
+#include "mongo/util/mongoutils/str.h"
#include "mongo/util/options_parser/startup_option_init.h"
+#include "mongo/util/options_parser/startup_options.h"
namespace mongo {
diff --git a/src/mongo/unittest/temp_dir_test.cpp b/src/mongo/unittest/temp_dir_test.cpp
index fa6c24f27b3..e73cd8888cc 100644
--- a/src/mongo/unittest/temp_dir_test.cpp
+++ b/src/mongo/unittest/temp_dir_test.cpp
@@ -30,8 +30,8 @@
#include "mongo/unittest/temp_dir.h"
-#include <fstream>
#include <boost/filesystem.hpp>
+#include <fstream>
#include "mongo/unittest/unittest.h"
diff --git a/src/mongo/unittest/unittest.cpp b/src/mongo/unittest/unittest.cpp
index ef523832b1e..ade28b65821 100644
--- a/src/mongo/unittest/unittest.cpp
+++ b/src/mongo/unittest/unittest.cpp
@@ -77,8 +77,8 @@ logger::LogstreamBuilder log() {
return LogstreamBuilder(unittestOutput, getThreadName(), logger::LogSeverity::Log());
}
-MONGO_INITIALIZER_WITH_PREREQUISITES(UnitTestOutput,
- ("GlobalLogManager", "default"))(InitializerContext*) {
+MONGO_INITIALIZER_WITH_PREREQUISITES(UnitTestOutput, ("GlobalLogManager", "default"))
+(InitializerContext*) {
unittestOutput->attachAppender(logger::MessageLogDomain::AppenderAutoPtr(
new logger::ConsoleAppender<logger::MessageLogDomain::Event>(
new logger::MessageEventDetailsEncoder)));
diff --git a/src/mongo/unittest/unittest.h b/src/mongo/unittest/unittest.h
index 48ed89695d3..51cfde2391b 100644
--- a/src/mongo/unittest/unittest.h
+++ b/src/mongo/unittest/unittest.h
@@ -126,21 +126,20 @@
* Behaves like ASSERT_THROWS, above, but also fails if calling what() on the thrown exception
* does not return a string equal to EXPECTED_WHAT.
*/
-#define ASSERT_THROWS_WHAT(STATEMENT, EXCEPTION_TYPE, EXPECTED_WHAT) \
- ASSERT_THROWS_PRED(STATEMENT, \
- EXCEPTION_TYPE, \
- ([&](const EXCEPTION_TYPE& ex) { \
- return ::mongo::StringData(ex.what()) == ::mongo::StringData(EXPECTED_WHAT); \
+#define ASSERT_THROWS_WHAT(STATEMENT, EXCEPTION_TYPE, EXPECTED_WHAT) \
+ ASSERT_THROWS_PRED(STATEMENT, EXCEPTION_TYPE, ([&](const EXCEPTION_TYPE& ex) { \
+ return ::mongo::StringData(ex.what()) == \
+ ::mongo::StringData(EXPECTED_WHAT); \
}))
/**
* Behaves like ASSERT_THROWS, above, but also fails if calling getCode() on the thrown exception
* does not return an error code equal to EXPECTED_CODE.
*/
-#define ASSERT_THROWS_CODE(STATEMENT, EXCEPTION_TYPE, EXPECTED_CODE) \
- ASSERT_THROWS_PRED(STATEMENT, \
- EXCEPTION_TYPE, \
- ([](const EXCEPTION_TYPE& ex) { return (EXPECTED_CODE) == ex.getCode(); }))
+#define ASSERT_THROWS_CODE(STATEMENT, EXCEPTION_TYPE, EXPECTED_CODE) \
+ ASSERT_THROWS_PRED(STATEMENT, EXCEPTION_TYPE, ([](const EXCEPTION_TYPE& ex) { \
+ return (EXPECTED_CODE) == ex.getCode(); \
+ }))
/**
* Behaves like ASSERT_THROWS, above, but also fails if calling getCode() on the thrown exception
@@ -148,11 +147,10 @@
* does not return a string equal to EXPECTED_WHAT.
*/
#define ASSERT_THROWS_CODE_AND_WHAT(STATEMENT, EXCEPTION_TYPE, EXPECTED_CODE, EXPECTED_WHAT) \
- ASSERT_THROWS_PRED(STATEMENT, \
- EXCEPTION_TYPE, \
- ([](const EXCEPTION_TYPE& ex) { \
- return (EXPECTED_CODE) == ex.getCode() && \
- ::mongo::StringData(ex.what()) == ::mongo::StringData(EXPECTED_WHAT); \
+ ASSERT_THROWS_PRED(STATEMENT, EXCEPTION_TYPE, ([](const EXCEPTION_TYPE& ex) { \
+ return (EXPECTED_CODE) == ex.getCode() && \
+ ::mongo::StringData(ex.what()) == \
+ ::mongo::StringData(EXPECTED_WHAT); \
}))
/**
@@ -534,12 +532,12 @@ private:
std::shared_ptr<TestAssertionFailure> _assertion; \
}
-DECLARE_COMPARISON_ASSERTION(EQ, == );
-DECLARE_COMPARISON_ASSERTION(NE, != );
-DECLARE_COMPARISON_ASSERTION(LT, < );
-DECLARE_COMPARISON_ASSERTION(LTE, <= );
-DECLARE_COMPARISON_ASSERTION(GT, > );
-DECLARE_COMPARISON_ASSERTION(GTE, >= );
+DECLARE_COMPARISON_ASSERTION(EQ, ==);
+DECLARE_COMPARISON_ASSERTION(NE, !=);
+DECLARE_COMPARISON_ASSERTION(LT, <);
+DECLARE_COMPARISON_ASSERTION(LTE, <=);
+DECLARE_COMPARISON_ASSERTION(GT, >);
+DECLARE_COMPARISON_ASSERTION(GTE, >=);
#undef DECLARE_COMPARISON_ASSERTION
/**
diff --git a/src/mongo/unittest/unittest_test.cpp b/src/mongo/unittest/unittest_test.cpp
index 77535023db0..ec1ba8c8b30 100644
--- a/src/mongo/unittest/unittest_test.cpp
+++ b/src/mongo/unittest/unittest_test.cpp
@@ -36,9 +36,9 @@
#include <string>
#include "mongo/stdx/functional.h"
-#include "mongo/util/assert_util.h"
#include "mongo/unittest/death_test.h"
#include "mongo/unittest/unittest.h"
+#include "mongo/util/assert_util.h"
namespace {
namespace stdx = mongo::stdx;
diff --git a/src/mongo/util/admin_access.h b/src/mongo/util/admin_access.h
index 37ca3f0e29f..721aeb2db23 100644
--- a/src/mongo/util/admin_access.h
+++ b/src/mongo/util/admin_access.h
@@ -31,8 +31,8 @@
#pragma once
-#include "mongo/db/jsobj.h"
#include "mongo/db/auth/user_name.h"
+#include "mongo/db/jsobj.h"
namespace mongo {
diff --git a/src/mongo/util/assert_util.cpp b/src/mongo/util/assert_util.cpp
index f1523de6517..b832278cac9 100644
--- a/src/mongo/util/assert_util.cpp
+++ b/src/mongo/util/assert_util.cpp
@@ -288,12 +288,13 @@ Status exceptionToStatus() {
} catch (const std::exception& ex) {
return Status(ErrorCodes::UnknownError,
str::stream() << "Caught std::exception of type " << demangleName(typeid(ex))
- << ": " << ex.what());
+ << ": "
+ << ex.what());
} catch (const boost::exception& ex) {
- return Status(ErrorCodes::UnknownError,
- str::stream() << "Caught boost::exception of type "
- << demangleName(typeid(ex)) << ": "
- << boost::diagnostic_information(ex));
+ return Status(
+ ErrorCodes::UnknownError,
+ str::stream() << "Caught boost::exception of type " << demangleName(typeid(ex)) << ": "
+ << boost::diagnostic_information(ex));
} catch (...) {
severe() << "Caught unknown exception in exceptionToStatus()";
diff --git a/src/mongo/util/assert_util.h b/src/mongo/util/assert_util.h
index 1d72c045764..7748ba447e1 100644
--- a/src/mongo/util/assert_util.h
+++ b/src/mongo/util/assert_util.h
@@ -27,8 +27,8 @@
#pragma once
-#include <typeinfo>
#include <string>
+#include <typeinfo>
#include "mongo/base/status.h" // NOTE: This is safe as utils depend on base
#include "mongo/base/status_with.h"
diff --git a/src/mongo/util/background_thread_clock_source.h b/src/mongo/util/background_thread_clock_source.h
index c532efd3817..6ea43ed86de 100644
--- a/src/mongo/util/background_thread_clock_source.h
+++ b/src/mongo/util/background_thread_clock_source.h
@@ -33,12 +33,12 @@
#include "mongo/base/disallow_copying.h"
#include "mongo/platform/atomic_word.h"
-#include "mongo/util/clock_source.h"
-#include "mongo/util/time_support.h"
#include "mongo/stdx/condition_variable.h"
#include "mongo/stdx/memory.h"
#include "mongo/stdx/mutex.h"
#include "mongo/stdx/thread.h"
+#include "mongo/util/clock_source.h"
+#include "mongo/util/time_support.h"
namespace mongo {
diff --git a/src/mongo/util/base64.h b/src/mongo/util/base64.h
index d5cf3a71f55..740811936ff 100644
--- a/src/mongo/util/base64.h
+++ b/src/mongo/util/base64.h
@@ -29,8 +29,8 @@
#pragma once
-#include <memory>
#include <iosfwd>
+#include <memory>
#include <string>
#include "mongo/util/assert_util.h"
diff --git a/src/mongo/util/cmdline_utils/censor_cmdline_test.cpp b/src/mongo/util/cmdline_utils/censor_cmdline_test.cpp
index a9cb8fe95ba..70e37aacef5 100644
--- a/src/mongo/util/cmdline_utils/censor_cmdline_test.cpp
+++ b/src/mongo/util/cmdline_utils/censor_cmdline_test.cpp
@@ -221,7 +221,8 @@ TEST(BSONObjCensorTests, Strings) {
<< "also not a password"
<< "processManagement.windowsService.servicePassword"
<< "this password should also be censored"
- << "lastarg" << false);
+ << "lastarg"
+ << false);
BSONObj res = BSON("firstarg"
<< "not a password"
@@ -233,35 +234,45 @@ TEST(BSONObjCensorTests, Strings) {
<< "also not a password"
<< "processManagement.windowsService.servicePassword"
<< "<password>"
- << "lastarg" << false);
+ << "lastarg"
+ << false);
cmdline_utils::censorBSONObj(&obj);
ASSERT_EQUALS(res, obj);
}
TEST(BSONObjCensorTests, Arrays) {
- BSONObj obj =
- BSON("firstarg"
- << "not a password"
- << "net.ssl.PEMKeyPassword" << BSON_ARRAY("first censored password"
- << "next censored password")
- << "net.ssl.clusterPassword" << BSON_ARRAY("first censored password"
- << "next censored password") << "middlearg"
- << "also not a password"
- << "processManagement.windowsService.servicePassword"
- << BSON_ARRAY("first censored password"
- << "next censored password") << "lastarg" << false);
+ BSONObj obj = BSON("firstarg"
+ << "not a password"
+ << "net.ssl.PEMKeyPassword"
+ << BSON_ARRAY("first censored password"
+ << "next censored password")
+ << "net.ssl.clusterPassword"
+ << BSON_ARRAY("first censored password"
+ << "next censored password")
+ << "middlearg"
+ << "also not a password"
+ << "processManagement.windowsService.servicePassword"
+ << BSON_ARRAY("first censored password"
+ << "next censored password")
+ << "lastarg"
+ << false);
BSONObj res = BSON("firstarg"
<< "not a password"
- << "net.ssl.PEMKeyPassword" << BSON_ARRAY("<password>"
- << "<password>")
- << "net.ssl.clusterPassword" << BSON_ARRAY("<password>"
- << "<password>") << "middlearg"
+ << "net.ssl.PEMKeyPassword"
+ << BSON_ARRAY("<password>"
+ << "<password>")
+ << "net.ssl.clusterPassword"
+ << BSON_ARRAY("<password>"
+ << "<password>")
+ << "middlearg"
<< "also not a password"
<< "processManagement.windowsService.servicePassword"
<< BSON_ARRAY("<password>"
- << "<password>") << "lastarg" << false);
+ << "<password>")
+ << "lastarg"
+ << false);
cmdline_utils::censorBSONObj(&obj);
ASSERT_EQUALS(res, obj);
@@ -272,26 +283,32 @@ TEST(BSONObjCensorTests, SubObjects) {
BSON("firstarg"
<< "not a password"
<< "net"
- << BSON("ssl" << BSON("PEMKeyPassword"
- << BSON_ARRAY("first censored password"
- << "next censored password") << "PEMKeyPassword"
- << "should be censored too"
- << "clusterPassword" << BSON_ARRAY("first censored password"
- << "next censored password")
- << "clusterPassword"
- << "should be censored too")) << "lastarg" << false);
-
- BSONObj res = BSON(
- "firstarg"
- << "not a password"
- << "net"
- << BSON("ssl" << BSON("PEMKeyPassword" << BSON_ARRAY("<password>"
- << "<password>") << "PEMKeyPassword"
- << "<password>"
- << "clusterPassword" << BSON_ARRAY("<password>"
- << "<password>")
- << "clusterPassword"
- << "<password>")) << "lastarg" << false);
+ << BSON("ssl" << BSON("PEMKeyPassword" << BSON_ARRAY("first censored password"
+ << "next censored password")
+ << "PEMKeyPassword"
+ << "should be censored too"
+ << "clusterPassword"
+ << BSON_ARRAY("first censored password"
+ << "next censored password")
+ << "clusterPassword"
+ << "should be censored too"))
+ << "lastarg"
+ << false);
+
+ BSONObj res = BSON("firstarg"
+ << "not a password"
+ << "net"
+ << BSON("ssl" << BSON("PEMKeyPassword" << BSON_ARRAY("<password>"
+ << "<password>")
+ << "PEMKeyPassword"
+ << "<password>"
+ << "clusterPassword"
+ << BSON_ARRAY("<password>"
+ << "<password>")
+ << "clusterPassword"
+ << "<password>"))
+ << "lastarg"
+ << false);
cmdline_utils::censorBSONObj(&obj);
ASSERT_EQUALS(res, obj);
diff --git a/src/mongo/util/concurrency/rwlockimpl.cpp b/src/mongo/util/concurrency/rwlockimpl.cpp
index 28eeebd363b..755724cd3a5 100644
--- a/src/mongo/util/concurrency/rwlockimpl.cpp
+++ b/src/mongo/util/concurrency/rwlockimpl.cpp
@@ -34,9 +34,9 @@
#define NOMINMAX
#include <windows.h>
#endif
+#include <boost/version.hpp>
#include <map>
#include <set>
-#include <boost/version.hpp>
#include "mongo/config.h"
#include "mongo/stdx/condition_variable.h"
diff --git a/src/mongo/util/concurrency/thread_pool.cpp b/src/mongo/util/concurrency/thread_pool.cpp
index 4a5e456b972..90464d7b167 100644
--- a/src/mongo/util/concurrency/thread_pool.cpp
+++ b/src/mongo/util/concurrency/thread_pool.cpp
@@ -137,23 +137,21 @@ void ThreadPool::join() {
}
void ThreadPool::_join_inlock(stdx::unique_lock<stdx::mutex>* lk) {
- _stateChange.wait(*lk,
- [this] {
- switch (_state) {
- case preStart:
- return false;
- case running:
- return false;
- case joinRequired:
- return true;
- case joining:
- case shutdownComplete:
- severe() << "Attempted to join pool " << _options.poolName
- << " more than once";
- fassertFailed(28700);
- }
- MONGO_UNREACHABLE;
- });
+ _stateChange.wait(*lk, [this] {
+ switch (_state) {
+ case preStart:
+ return false;
+ case running:
+ return false;
+ case joinRequired:
+ return true;
+ case joining:
+ case shutdownComplete:
+ severe() << "Attempted to join pool " << _options.poolName << " more than once";
+ fassertFailed(28700);
+ }
+ MONGO_UNREACHABLE;
+ });
_setState_inlock(joining);
++_numIdleThreads;
while (!_pendingTasks.empty()) {
diff --git a/src/mongo/util/concurrency/thread_pool_test_common.cpp b/src/mongo/util/concurrency/thread_pool_test_common.cpp
index 1f965e4a2c4..ea63df2df83 100644
--- a/src/mongo/util/concurrency/thread_pool_test_common.cpp
+++ b/src/mongo/util/concurrency/thread_pool_test_common.cpp
@@ -99,40 +99,39 @@ public:
}
};
-#define COMMON_THREAD_POOL_TEST(TEST_NAME) \
- class TPT_##TEST_NAME : public CommonThreadPoolTestFixture { \
- public: \
- TPT_##TEST_NAME(ThreadPoolFactory makeThreadPool) \
- : CommonThreadPoolTestFixture(std::move(makeThreadPool)) {} \
- \
- private: \
- void _doTest() override; \
- static const TptRegistrationAgent _agent; \
- }; \
- const TptRegistrationAgent TPT_##TEST_NAME::_agent(#TEST_NAME, \
- [](ThreadPoolFactory makeThreadPool) { \
- return stdx::make_unique<TPT_##TEST_NAME>(std::move(makeThreadPool)); \
- }); \
+#define COMMON_THREAD_POOL_TEST(TEST_NAME) \
+ class TPT_##TEST_NAME : public CommonThreadPoolTestFixture { \
+ public: \
+ TPT_##TEST_NAME(ThreadPoolFactory makeThreadPool) \
+ : CommonThreadPoolTestFixture(std::move(makeThreadPool)) {} \
+ \
+ private: \
+ void _doTest() override; \
+ static const TptRegistrationAgent _agent; \
+ }; \
+ const TptRegistrationAgent TPT_##TEST_NAME::_agent( \
+ #TEST_NAME, [](ThreadPoolFactory makeThreadPool) { \
+ return stdx::make_unique<TPT_##TEST_NAME>(std::move(makeThreadPool)); \
+ }); \
void TPT_##TEST_NAME::_doTest()
-#define COMMON_THREAD_POOL_DEATH_TEST(TEST_NAME, MATCH_EXPR) \
- class TPT_##TEST_NAME : public CommonThreadPoolTestFixture { \
- public: \
- TPT_##TEST_NAME(ThreadPoolFactory makeThreadPool) \
- : CommonThreadPoolTestFixture(std::move(makeThreadPool)) {} \
- \
- private: \
- void _doTest() override; \
- static const TptDeathRegistrationAgent<TPT_##TEST_NAME> _agent; \
- }; \
- const TptDeathRegistrationAgent<TPT_##TEST_NAME> TPT_##TEST_NAME::_agent( \
- #TEST_NAME, \
- [](ThreadPoolFactory makeThreadPool) { \
- return stdx::make_unique<TPT_##TEST_NAME>(std::move(makeThreadPool)); \
- }); \
- std::string getDeathTestPattern(TPT_##TEST_NAME*) { \
- return MATCH_EXPR; \
- } \
+#define COMMON_THREAD_POOL_DEATH_TEST(TEST_NAME, MATCH_EXPR) \
+ class TPT_##TEST_NAME : public CommonThreadPoolTestFixture { \
+ public: \
+ TPT_##TEST_NAME(ThreadPoolFactory makeThreadPool) \
+ : CommonThreadPoolTestFixture(std::move(makeThreadPool)) {} \
+ \
+ private: \
+ void _doTest() override; \
+ static const TptDeathRegistrationAgent<TPT_##TEST_NAME> _agent; \
+ }; \
+ const TptDeathRegistrationAgent<TPT_##TEST_NAME> TPT_##TEST_NAME::_agent( \
+ #TEST_NAME, [](ThreadPoolFactory makeThreadPool) { \
+ return stdx::make_unique<TPT_##TEST_NAME>(std::move(makeThreadPool)); \
+ }); \
+ std::string getDeathTestPattern(TPT_##TEST_NAME*) { \
+ return MATCH_EXPR; \
+ } \
void TPT_##TEST_NAME::_doTest()
COMMON_THREAD_POOL_TEST(UnusedPool) {
diff --git a/src/mongo/util/concurrency/ticketholder.cpp b/src/mongo/util/concurrency/ticketholder.cpp
index 34535fa98b6..3a99d643419 100644
--- a/src/mongo/util/concurrency/ticketholder.cpp
+++ b/src/mongo/util/concurrency/ticketholder.cpp
@@ -96,7 +96,8 @@ Status TicketHolder::resize(int newSize) {
if (newSize > SEM_VALUE_MAX)
return Status(ErrorCodes::BadValue,
str::stream() << "Maximum value for semaphore is " << SEM_VALUE_MAX
- << "; given " << newSize);
+ << "; given "
+ << newSize);
while (_outof.load() < newSize) {
release();
diff --git a/src/mongo/util/debugger.cpp b/src/mongo/util/debugger.cpp
index 933af392b12..9682c54db14 100644
--- a/src/mongo/util/debugger.cpp
+++ b/src/mongo/util/debugger.cpp
@@ -34,8 +34,8 @@
#include <cstdlib>
#if defined(USE_GDBSERVER)
-#include <unistd.h>
#include <cstdio>
+#include <unistd.h>
#endif
#ifndef _WIN32
diff --git a/src/mongo/util/descriptive_stats-inl.h b/src/mongo/util/descriptive_stats-inl.h
index 1ae73134063..41746b354fc 100644
--- a/src/mongo/util/descriptive_stats-inl.h
+++ b/src/mongo/util/descriptive_stats-inl.h
@@ -67,8 +67,7 @@ void BasicEstimators<Sample>::appendBasicToBSONObjBuilder(BSONObjBuilder& b) con
}
template <std::size_t NumQuantiles>
-DistributionEstimators<NumQuantiles>::DistributionEstimators()
- : _count(0) {
+DistributionEstimators<NumQuantiles>::DistributionEstimators() : _count(0) {
for (std::size_t i = 0; i < NumMarkers; i++) {
_actual_positions[i] = i + 1;
}
diff --git a/src/mongo/util/duration.h b/src/mongo/util/duration.h
index 97c67ba04e4..353cb1fca9f 100644
--- a/src/mongo/util/duration.h
+++ b/src/mongo/util/duration.h
@@ -219,8 +219,7 @@ public:
typename Rep2,
stdx::enable_if_t<std::is_convertible<Rep2, rep>::value && std::is_integral<Rep2>::value,
int> = 0>
- constexpr explicit Duration(const Rep2& r)
- : _count(r) {
+ constexpr explicit Duration(const Rep2& r) : _count(r) {
static_assert(std::is_signed<Rep2>::value || sizeof(Rep2) < sizeof(rep),
"Durations must be constructed from values of integral type that are "
"representable as 64-bit signed integers");
diff --git a/src/mongo/util/duration_test.cpp b/src/mongo/util/duration_test.cpp
index 95501cc5d95..aac33feec99 100644
--- a/src/mongo/util/duration_test.cpp
+++ b/src/mongo/util/duration_test.cpp
@@ -28,8 +28,8 @@
#include "mongo/platform/basic.h"
#include "mongo/stdx/chrono.h"
-#include "mongo/util/duration.h"
#include "mongo/unittest/unittest.h"
+#include "mongo/util/duration.h"
namespace mongo {
namespace {
diff --git a/src/mongo/util/exception_filter_win32.cpp b/src/mongo/util/exception_filter_win32.cpp
index f24cbbe10c4..db09154229c 100644
--- a/src/mongo/util/exception_filter_win32.cpp
+++ b/src/mongo/util/exception_filter_win32.cpp
@@ -32,8 +32,8 @@
#include "mongo/platform/basic.h"
-#include <ostream>
#include <DbgHelp.h>
+#include <ostream>
#include "mongo/config.h"
#include "mongo/util/assert_util.h"
diff --git a/src/mongo/util/fail_point_service.cpp b/src/mongo/util/fail_point_service.cpp
index 68bb4897286..a4766784df8 100644
--- a/src/mongo/util/fail_point_service.cpp
+++ b/src/mongo/util/fail_point_service.cpp
@@ -42,9 +42,8 @@ MONGO_INITIALIZER(FailPointRegistry)(InitializerContext* context) {
return Status::OK();
}
-MONGO_INITIALIZER_GENERAL(AllFailPointsRegistered,
- MONGO_NO_PREREQUISITES,
- MONGO_NO_DEPENDENTS)(InitializerContext* context) {
+MONGO_INITIALIZER_GENERAL(AllFailPointsRegistered, MONGO_NO_PREREQUISITES, MONGO_NO_DEPENDENTS)
+(InitializerContext* context) {
_fpRegistry->freeze();
return Status::OK();
}
diff --git a/src/mongo/util/file.cpp b/src/mongo/util/file.cpp
index bcb4b74e989..1d55b4d51b6 100644
--- a/src/mongo/util/file.cpp
+++ b/src/mongo/util/file.cpp
@@ -136,10 +136,14 @@ void File::read(fileofs o, char* data, unsigned len) {
} else if (bytesRead != len) {
_bad = true;
msgasserted(10438,
- mongoutils::str::stream()
- << "In File::read(), ReadFile for '" << _name << "' read " << bytesRead
- << " bytes while trying to read " << len << " bytes starting at offset "
- << o << ", truncated file?");
+ mongoutils::str::stream() << "In File::read(), ReadFile for '" << _name
+ << "' read "
+ << bytesRead
+ << " bytes while trying to read "
+ << len
+ << " bytes starting at offset "
+ << o
+ << ", truncated file?");
}
}
@@ -259,10 +263,14 @@ void File::read(fileofs o, char* data, unsigned len) {
} else if (bytesRead != static_cast<ssize_t>(len)) {
_bad = true;
msgasserted(16569,
- mongoutils::str::stream()
- << "In File::read(), ::pread for '" << _name << "' read " << bytesRead
- << " bytes while trying to read " << len << " bytes starting at offset "
- << o << ", truncated file?");
+ mongoutils::str::stream() << "In File::read(), ::pread for '" << _name
+ << "' read "
+ << bytesRead
+ << " bytes while trying to read "
+ << len
+ << " bytes starting at offset "
+ << o
+ << ", truncated file?");
}
}
diff --git a/src/mongo/util/intrusive_counter.cpp b/src/mongo/util/intrusive_counter.cpp
index 8a10384cfb3..75c3b0f0acc 100644
--- a/src/mongo/util/intrusive_counter.cpp
+++ b/src/mongo/util/intrusive_counter.cpp
@@ -39,7 +39,8 @@ using namespace mongoutils;
intrusive_ptr<const RCString> RCString::create(StringData s) {
uassert(16493,
str::stream() << "Tried to create string longer than "
- << (BSONObjMaxUserSize / 1024 / 1024) << "MB",
+ << (BSONObjMaxUserSize / 1024 / 1024)
+ << "MB",
s.size() < static_cast<size_t>(BSONObjMaxUserSize));
const size_t sizeWithNUL = s.size() + 1;
diff --git a/src/mongo/util/intrusive_counter.h b/src/mongo/util/intrusive_counter.h
index 1b1ce182b90..76c1bee5a78 100644
--- a/src/mongo/util/intrusive_counter.h
+++ b/src/mongo/util/intrusive_counter.h
@@ -31,9 +31,9 @@
#include <boost/intrusive_ptr.hpp>
#include <stdlib.h>
-#include "mongo/platform/atomic_word.h"
#include "mongo/base/disallow_copying.h"
#include "mongo/base/string_data.h"
+#include "mongo/platform/atomic_word.h"
#include "mongo/util/allocator.h"
namespace mongo {
diff --git a/src/mongo/util/itoa_test.cpp b/src/mongo/util/itoa_test.cpp
index abf0a2fbf45..ba363249e00 100644
--- a/src/mongo/util/itoa_test.cpp
+++ b/src/mongo/util/itoa_test.cpp
@@ -28,9 +28,9 @@
#include "mongo/platform/basic.h"
+#include <array>
#include <cstdint>
#include <limits>
-#include <array>
#include "mongo/base/string_data.h"
#include "mongo/unittest/unittest.h"
diff --git a/src/mongo/util/log.cpp b/src/mongo/util/log.cpp
index b3c130bf264..86c70d27f66 100644
--- a/src/mongo/util/log.cpp
+++ b/src/mongo/util/log.cpp
@@ -40,8 +40,8 @@
#include "mongo/logger/ramlog.h"
#include "mongo/logger/rotatable_file_manager.h"
#include "mongo/util/assert_util.h"
-#include "mongo/util/concurrency/threadlocal.h"
#include "mongo/util/concurrency/thread_name.h"
+#include "mongo/util/concurrency/threadlocal.h"
#include "mongo/util/stacktrace.h"
#include "mongo/util/text.h"
#include "mongo/util/time_support.h"
diff --git a/src/mongo/util/mongoutils/str.h b/src/mongo/util/mongoutils/str.h
index 561bf20008c..2aefea6f261 100644
--- a/src/mongo/util/mongoutils/str.h
+++ b/src/mongo/util/mongoutils/str.h
@@ -36,8 +36,8 @@
* TODO: Retire the mongoutils namespace, and move str under the mongo namespace.
*/
-#include <string>
#include <sstream>
+#include <string>
#include "mongo/bson/util/builder.h"
diff --git a/src/mongo/util/net/asio_message_port.cpp b/src/mongo/util/net/asio_message_port.cpp
index f2225bccec8..3f84cceb3be 100644
--- a/src/mongo/util/net/asio_message_port.cpp
+++ b/src/mongo/util/net/asio_message_port.cpp
@@ -317,7 +317,9 @@ bool ASIOMessagingPort::recv(Message& m) {
LOG(_logLevel) << httpMsg;
std::stringstream ss;
ss << "HTTP/1.0 200 OK\r\nConnection: close\r\nContent-Type: "
- "text/plain\r\nContent-Length: " << httpMsg.size() << "\r\n\r\n" << httpMsg;
+ "text/plain\r\nContent-Length: "
+ << httpMsg.size() << "\r\n\r\n"
+ << httpMsg;
auto s = ss.str();
send(s.c_str(), s.size(), nullptr);
return false;
diff --git a/src/mongo/util/net/hostandport.cpp b/src/mongo/util/net/hostandport.cpp
index ea1d0f93467..36106ac83fa 100644
--- a/src/mongo/util/net/hostandport.cpp
+++ b/src/mongo/util/net/hostandport.cpp
@@ -36,8 +36,8 @@
#include "mongo/base/string_data.h"
#include "mongo/bson/util/builder.h"
#include "mongo/db/server_options.h"
-#include "mongo/util/mongoutils/str.h"
#include "mongo/util/assert_util.h"
+#include "mongo/util/mongoutils/str.h"
namespace mongo {
@@ -127,7 +127,8 @@ Status HostAndPort::initialize(StringData s) {
} else if (colonPos != closeBracketPos + 1) {
return Status(ErrorCodes::FailedToParse,
str::stream() << "Extraneous characters between ']' and pre-port ':'"
- << " in " << s.toString());
+ << " in "
+ << s.toString());
}
} else if (closeBracketPos != std::string::npos) {
return Status(ErrorCodes::FailedToParse,
@@ -142,7 +143,8 @@ Status HostAndPort::initialize(StringData s) {
if (hostPart.empty()) {
return Status(ErrorCodes::FailedToParse,
str::stream() << "Empty host component parsing HostAndPort from \""
- << escape(s.toString()) << "\"");
+ << escape(s.toString())
+ << "\"");
}
int port;
@@ -156,7 +158,8 @@ Status HostAndPort::initialize(StringData s) {
return Status(ErrorCodes::FailedToParse,
str::stream() << "Port number " << port
<< " out of range parsing HostAndPort from \""
- << escape(s.toString()) << "\"");
+ << escape(s.toString())
+ << "\"");
}
} else {
port = -1;
diff --git a/src/mongo/util/net/hostname_canonicalization.cpp b/src/mongo/util/net/hostname_canonicalization.cpp
index d76cfb28b05..5dfc69d6ba4 100644
--- a/src/mongo/util/net/hostname_canonicalization.cpp
+++ b/src/mongo/util/net/hostname_canonicalization.cpp
@@ -34,10 +34,10 @@
#if !defined(_WIN32)
#include <arpa/inet.h>
+#include <netdb.h>
#include <netinet/in.h>
-#include <sys/types.h>
#include <sys/socket.h>
-#include <netdb.h>
+#include <sys/types.h>
#endif
#include "mongo/util/log.h"
diff --git a/src/mongo/util/net/listen.cpp b/src/mongo/util/net/listen.cpp
index 2cbee476522..a5f80406f1b 100644
--- a/src/mongo/util/net/listen.cpp
+++ b/src/mongo/util/net/listen.cpp
@@ -56,14 +56,14 @@
#include <sys/resource.h>
#include <sys/stat.h>
-#include <sys/types.h>
-#include <sys/socket.h>
-#include <sys/un.h>
-#include <netinet/in.h>
-#include <netinet/tcp.h>
#include <arpa/inet.h>
#include <errno.h>
#include <netdb.h>
+#include <netinet/in.h>
+#include <netinet/tcp.h>
+#include <sys/socket.h>
+#include <sys/types.h>
+#include <sys/un.h>
#ifdef __OpenBSD__
#include <sys/uio.h>
#endif
@@ -323,7 +323,8 @@ void Listener::initAndListen() {
if (x == EMFILE || x == ENFILE) {
// Connection still in listen queue but we can't accept it yet
error() << "Out of file descriptors. Waiting one second before trying to "
- "accept more connections." << warnings;
+ "accept more connections."
+ << warnings;
sleepsecs(1);
}
}
@@ -545,7 +546,8 @@ void Listener::initAndListen() {
if (x == EMFILE || x == ENFILE) {
// Connection still in listen queue but we can't accept it yet
error() << "Out of file descriptors. Waiting one second before"
- " trying to accept more connections." << warnings;
+ " trying to accept more connections."
+ << warnings;
sleepsecs(1);
}
}
diff --git a/src/mongo/util/net/message.cpp b/src/mongo/util/net/message.cpp
index 9c91ce0d124..11736ac1821 100644
--- a/src/mongo/util/net/message.cpp
+++ b/src/mongo/util/net/message.cpp
@@ -31,8 +31,8 @@
#include "mongo/util/net/message.h"
-#include <fcntl.h>
#include <errno.h>
+#include <fcntl.h>
#include <time.h>
#include "mongo/util/net/listen.h"
diff --git a/src/mongo/util/net/message_port.cpp b/src/mongo/util/net/message_port.cpp
index 225dfd06940..69e33377e59 100644
--- a/src/mongo/util/net/message_port.cpp
+++ b/src/mongo/util/net/message_port.cpp
@@ -143,7 +143,9 @@ bool MessagingPort::recv(Message& m) {
LOG(_psock->getLogLevel()) << msg;
std::stringstream ss;
ss << "HTTP/1.0 200 OK\r\nConnection: close\r\nContent-Type: "
- "text/plain\r\nContent-Length: " << msg.size() << "\r\n\r\n" << msg;
+ "text/plain\r\nContent-Length: "
+ << msg.size() << "\r\n\r\n"
+ << msg;
string s = ss.str();
send(s.c_str(), s.size(), "http");
return false;
diff --git a/src/mongo/util/net/sock.cpp b/src/mongo/util/net/sock.cpp
index c0c526a0b22..9c04caef048 100644
--- a/src/mongo/util/net/sock.cpp
+++ b/src/mongo/util/net/sock.cpp
@@ -34,14 +34,14 @@
#include "mongo/util/net/sock.h"
#if !defined(_WIN32)
-#include <sys/socket.h>
-#include <sys/types.h>
-#include <sys/un.h>
-#include <netinet/in.h>
-#include <netinet/tcp.h>
#include <arpa/inet.h>
#include <errno.h>
#include <netdb.h>
+#include <netinet/in.h>
+#include <netinet/tcp.h>
+#include <sys/socket.h>
+#include <sys/types.h>
+#include <sys/un.h>
#if defined(__OpenBSD__)
#include <sys/uio.h>
#endif
@@ -54,8 +54,8 @@
#include "mongo/util/debug_util.h"
#include "mongo/util/fail_point_service.h"
#include "mongo/util/hex.h"
-#include "mongo/util/mongoutils/str.h"
#include "mongo/util/log.h"
+#include "mongo/util/mongoutils/str.h"
#include "mongo/util/net/message.h"
#include "mongo/util/net/socket_exception.h"
#include "mongo/util/net/socket_poll.h"
diff --git a/src/mongo/util/net/sock.h b/src/mongo/util/net/sock.h
index d607eb8051c..a9da4bdbbfb 100644
--- a/src/mongo/util/net/sock.h
+++ b/src/mongo/util/net/sock.h
@@ -33,10 +33,10 @@
#ifndef _WIN32
+#include <errno.h>
#include <sys/socket.h>
#include <sys/types.h>
#include <sys/un.h>
-#include <errno.h>
#ifdef __OpenBSD__
#include <sys/uio.h>
diff --git a/src/mongo/util/net/sockaddr.cpp b/src/mongo/util/net/sockaddr.cpp
index f16eb447f8b..00960f2fd97 100644
--- a/src/mongo/util/net/sockaddr.cpp
+++ b/src/mongo/util/net/sockaddr.cpp
@@ -33,14 +33,14 @@
#include "mongo/util/net/sockaddr.h"
#if !defined(_WIN32)
-#include <sys/socket.h>
-#include <sys/types.h>
-#include <sys/un.h>
-#include <netinet/in.h>
-#include <netinet/tcp.h>
#include <arpa/inet.h>
#include <errno.h>
#include <netdb.h>
+#include <netinet/in.h>
+#include <netinet/tcp.h>
+#include <sys/socket.h>
+#include <sys/types.h>
+#include <sys/un.h>
#if defined(__OpenBSD__)
#include <sys/uio.h>
#endif
diff --git a/src/mongo/util/net/sockaddr.h b/src/mongo/util/net/sockaddr.h
index 046167e4196..4c52d3453f7 100644
--- a/src/mongo/util/net/sockaddr.h
+++ b/src/mongo/util/net/sockaddr.h
@@ -32,10 +32,10 @@
#ifndef _WIN32
+#include <errno.h>
#include <sys/socket.h>
#include <sys/types.h>
#include <sys/un.h>
-#include <errno.h>
#ifdef __OpenBSD__
#include <sys/uio.h>
diff --git a/src/mongo/util/net/ssl_manager.cpp b/src/mongo/util/net/ssl_manager.cpp
index a1e25a31ced..da090bde45d 100644
--- a/src/mongo/util/net/ssl_manager.cpp
+++ b/src/mongo/util/net/ssl_manager.cpp
@@ -851,9 +851,9 @@ Status importCertStoreToX509_STORE(LPWSTR storeName, DWORD storeLocation, X509_S
}
int lastError = GetLastError();
if (lastError != CRYPT_E_NOT_FOUND) {
- return {
- ErrorCodes::InvalidSSLConfiguration,
- str::stream() << "Error enumerating certificates: " << errnoWithDescription(lastError)};
+ return {ErrorCodes::InvalidSSLConfiguration,
+ str::stream() << "Error enumerating certificates: "
+ << errnoWithDescription(lastError)};
}
return Status::OK();
@@ -902,9 +902,9 @@ Status importKeychainToX509_STORE(X509_STORE* verifyStore) {
auto rawData = makeCFTypeRefHolder(SecCertificateCopyData(cert));
if (!rawData) {
- return {
- ErrorCodes::InvalidSSLConfiguration,
- str::stream() << "Error enumerating certificates: " << OSStatusToString(status)};
+ return {ErrorCodes::InvalidSSLConfiguration,
+ str::stream() << "Error enumerating certificates: "
+ << OSStatusToString(status)};
}
const uint8_t* rawDataPtr = CFDataGetBytePtr(rawData);
@@ -934,11 +934,14 @@ Status SSLManager::_setupSystemCA(SSL_CTX* context) {
// On non-Windows/non-Apple platforms, the OpenSSL libraries should have been configured
// with default locations for CA certificates.
if (SSL_CTX_set_default_verify_paths(context) != 1) {
- return {
- ErrorCodes::InvalidSSLConfiguration,
- str::stream() << "error loading system CA certificates "
- << "(default certificate file: " << X509_get_default_cert_file() << ", "
- << "default certificate path: " << X509_get_default_cert_dir() << ")"};
+ return {ErrorCodes::InvalidSSLConfiguration,
+ str::stream() << "error loading system CA certificates "
+ << "(default certificate file: "
+ << X509_get_default_cert_file()
+ << ", "
+ << "default certificate path: "
+ << X509_get_default_cert_dir()
+ << ")"};
}
return Status::OK();
#else
diff --git a/src/mongo/util/net/ssl_options.cpp b/src/mongo/util/net/ssl_options.cpp
index 452cf0507fb..8873ac3b021 100644
--- a/src/mongo/util/net/ssl_options.cpp
+++ b/src/mongo/util/net/ssl_options.cpp
@@ -36,18 +36,19 @@
#include "mongo/base/status.h"
#include "mongo/db/server_options.h"
#include "mongo/util/log.h"
-#include "mongo/util/text.h"
#include "mongo/util/options_parser/startup_options.h"
+#include "mongo/util/text.h"
namespace mongo {
using std::string;
Status addSSLServerOptions(moe::OptionSection* options) {
- options->addOptionChaining("net.ssl.sslOnNormalPorts",
- "sslOnNormalPorts",
- moe::Switch,
- "use ssl on configured ports")
+ options
+ ->addOptionChaining("net.ssl.sslOnNormalPorts",
+ "sslOnNormalPorts",
+ moe::Switch,
+ "use ssl on configured ports")
.setSources(moe::SourceAllLegacy)
.incompatibleWith("net.ssl.mode");
@@ -60,8 +61,9 @@ Status addSSLServerOptions(moe::OptionSection* options) {
options->addOptionChaining(
"net.ssl.PEMKeyFile", "sslPEMKeyFile", moe::String, "PEM file for ssl");
- options->addOptionChaining(
- "net.ssl.PEMKeyPassword", "sslPEMKeyPassword", moe::String, "PEM file password")
+ options
+ ->addOptionChaining(
+ "net.ssl.PEMKeyPassword", "sslPEMKeyPassword", moe::String, "PEM file password")
.setImplicit(moe::Value(std::string("")));
options->addOptionChaining("net.ssl.clusterFile",
@@ -69,10 +71,11 @@ Status addSSLServerOptions(moe::OptionSection* options) {
moe::String,
"Key file for internal SSL authentication");
- options->addOptionChaining("net.ssl.clusterPassword",
- "sslClusterPassword",
- moe::String,
- "Internal authentication key file password")
+ options
+ ->addOptionChaining("net.ssl.clusterPassword",
+ "sslClusterPassword",
+ moe::String,
+ "Internal authentication key file password")
.setImplicit(moe::Value(std::string("")));
options->addOptionChaining(
@@ -81,10 +84,12 @@ Status addSSLServerOptions(moe::OptionSection* options) {
options->addOptionChaining(
"net.ssl.CRLFile", "sslCRLFile", moe::String, "Certificate Revocation List file for SSL");
- options->addOptionChaining("net.ssl.sslCipherConfig",
- "sslCipherConfig",
- moe::String,
- "OpenSSL cipher configuration string").hidden();
+ options
+ ->addOptionChaining("net.ssl.sslCipherConfig",
+ "sslCipherConfig",
+ moe::String,
+ "OpenSSL cipher configuration string")
+ .hidden();
options->addOptionChaining(
"net.ssl.disabledProtocols",
@@ -123,36 +128,41 @@ Status addSSLServerOptions(moe::OptionSection* options) {
Status addSSLClientOptions(moe::OptionSection* options) {
options->addOptionChaining("ssl", "ssl", moe::Switch, "use SSL for all connections");
- options->addOptionChaining(
- "ssl.CAFile", "sslCAFile", moe::String, "Certificate Authority file for SSL")
+ options
+ ->addOptionChaining(
+ "ssl.CAFile", "sslCAFile", moe::String, "Certificate Authority file for SSL")
.requires("ssl");
- options->addOptionChaining(
- "ssl.PEMKeyFile", "sslPEMKeyFile", moe::String, "PEM certificate/key file for SSL")
+ options
+ ->addOptionChaining(
+ "ssl.PEMKeyFile", "sslPEMKeyFile", moe::String, "PEM certificate/key file for SSL")
.requires("ssl");
- options->addOptionChaining("ssl.PEMKeyPassword",
- "sslPEMKeyPassword",
- moe::String,
- "password for key in PEM file for SSL").requires("ssl");
+ options
+ ->addOptionChaining("ssl.PEMKeyPassword",
+ "sslPEMKeyPassword",
+ moe::String,
+ "password for key in PEM file for SSL")
+ .requires("ssl");
- options->addOptionChaining("ssl.CRLFile",
- "sslCRLFile",
- moe::String,
- "Certificate Revocation List file for SSL")
+ options
+ ->addOptionChaining(
+ "ssl.CRLFile", "sslCRLFile", moe::String, "Certificate Revocation List file for SSL")
.requires("ssl")
.requires("ssl.CAFile");
- options->addOptionChaining("net.ssl.allowInvalidHostnames",
- "sslAllowInvalidHostnames",
- moe::Switch,
- "allow connections to servers with non-matching hostnames")
+ options
+ ->addOptionChaining("net.ssl.allowInvalidHostnames",
+ "sslAllowInvalidHostnames",
+ moe::Switch,
+ "allow connections to servers with non-matching hostnames")
.requires("ssl");
- options->addOptionChaining("ssl.allowInvalidCertificates",
- "sslAllowInvalidCertificates",
- moe::Switch,
- "allow connections to servers with invalid certificates")
+ options
+ ->addOptionChaining("ssl.allowInvalidCertificates",
+ "sslAllowInvalidCertificates",
+ moe::Switch,
+ "allow connections to servers with invalid certificates")
.requires("ssl");
options->addOptionChaining(
diff --git a/src/mongo/util/ntservice.cpp b/src/mongo/util/ntservice.cpp
index 43028bf50e0..81ff20e3e17 100644
--- a/src/mongo/util/ntservice.cpp
+++ b/src/mongo/util/ntservice.cpp
@@ -78,8 +78,10 @@ bool shouldStartService() {
return _startService;
}
-static DWORD WINAPI
-serviceCtrl(DWORD dwControl, DWORD dwEventType, LPVOID lpEventData, LPVOID lpContext);
+static DWORD WINAPI serviceCtrl(DWORD dwControl,
+ DWORD dwEventType,
+ LPVOID lpEventData,
+ LPVOID lpContext);
void configureService(ServiceCallback serviceCallback,
const moe::Environment& params,
@@ -586,8 +588,10 @@ static void serviceShutdown(const char* controlCodeName) {
// Note: we will report exit status in initService
}
-static DWORD WINAPI
-serviceCtrl(DWORD dwControl, DWORD dwEventType, LPVOID lpEventData, LPVOID lpContext) {
+static DWORD WINAPI serviceCtrl(DWORD dwControl,
+ DWORD dwEventType,
+ LPVOID lpEventData,
+ LPVOID lpContext) {
switch (dwControl) {
case SERVICE_CONTROL_INTERROGATE:
// Return NO_ERROR per MSDN even though we do nothing for this control code.
diff --git a/src/mongo/util/options_parser/environment_test.cpp b/src/mongo/util/options_parser/environment_test.cpp
index e87267c013e..b5b7b774b1c 100644
--- a/src/mongo/util/options_parser/environment_test.cpp
+++ b/src/mongo/util/options_parser/environment_test.cpp
@@ -27,9 +27,9 @@
#include "mongo/bson/util/builder.h"
+#include "mongo/unittest/unittest.h"
#include "mongo/util/options_parser/constraints.h"
#include "mongo/util/options_parser/environment.h"
-#include "mongo/unittest/unittest.h"
namespace {
@@ -146,7 +146,9 @@ TEST(ToBSONTests, DottedValues) {
ASSERT_OK(environment.set(moe::Key("val2"), moe::Value(true)));
ASSERT_OK(environment.set(moe::Key("val1.dotted2"), moe::Value(std::string("string"))));
mongo::BSONObj obj = BSON("val1" << BSON("dotted1" << 6 << "dotted2"
- << "string") << "val2" << true);
+ << "string")
+ << "val2"
+ << true);
// TODO: Put a comparison here that doesn't depend on the field order. Right now it is
// based on the sort order of keys in a std::map.
ASSERT_EQUALS(obj, environment.toBSON());
@@ -161,9 +163,12 @@ TEST(ToBSONTests, DeepDottedValues) {
ASSERT_OK(environment.set(moe::Key("val2"), moe::Value(6.0)));
mongo::BSONObj obj =
BSON("val1" << BSON("first1" << BSON("second1" << BSON("third1" << 6 << "third2" << true)
- << "second2" << BSON("third1" << false))
+ << "second2"
+ << BSON("third1" << false))
<< "first2"
- << "string") << "val2" << 6.0);
+ << "string")
+ << "val2"
+ << 6.0);
// TODO: Put a comparison here that doesn't depend on the field order. Right now it is
// based on the sort order of keys in a std::map.
ASSERT_EQUALS(obj, environment.toBSON());
diff --git a/src/mongo/util/options_parser/options_parser.cpp b/src/mongo/util/options_parser/options_parser.cpp
index ff2bcb49a16..5535533a3a8 100644
--- a/src/mongo/util/options_parser/options_parser.cpp
+++ b/src/mongo/util/options_parser/options_parser.cpp
@@ -29,8 +29,8 @@
#include "mongo/util/options_parser/options_parser.h"
-#include <boost/program_options.hpp>
#include <algorithm>
+#include <boost/program_options.hpp>
#include <cerrno>
#include <fstream>
#include <stdio.h>
@@ -40,8 +40,8 @@
#include "mongo/base/status.h"
#include "mongo/db/jsobj.h"
#include "mongo/db/json.h"
-#include "mongo/util/mongoutils/str.h"
#include "mongo/util/log.h"
+#include "mongo/util/mongoutils/str.h"
#include "mongo/util/options_parser/constraints.h"
#include "mongo/util/options_parser/environment.h"
#include "mongo/util/options_parser/option_description.h"
diff --git a/src/mongo/util/options_parser/options_parser_test.cpp b/src/mongo/util/options_parser/options_parser_test.cpp
index 79de691ea17..dba5910d395 100644
--- a/src/mongo/util/options_parser/options_parser_test.cpp
+++ b/src/mongo/util/options_parser/options_parser_test.cpp
@@ -205,8 +205,8 @@ TEST(Registration, ComposableWithImplicit) {
try {
std::vector<std::string> implicitVal;
implicitVal.push_back("implicit");
- testOpts.addOptionChaining(
- "setParameter", "setParameter", moe::StringVector, "Multiple Values")
+ testOpts
+ .addOptionChaining("setParameter", "setParameter", moe::StringVector, "Multiple Values")
.setImplicit(moe::Value(implicitVal))
.composing();
FAIL("Was able to register composable option with implicit value");
@@ -216,8 +216,8 @@ TEST(Registration, ComposableWithImplicit) {
try {
std::vector<std::string> implicitVal;
implicitVal.push_back("implicit");
- testOpts.addOptionChaining(
- "setParameter", "setParameter", moe::StringVector, "Multiple Values")
+ testOpts
+ .addOptionChaining("setParameter", "setParameter", moe::StringVector, "Multiple Values")
.composing()
.setImplicit(moe::Value(implicitVal));
FAIL("Was able to set implicit value on composable option");
@@ -230,8 +230,8 @@ TEST(Registration, ComposableWithDefault) {
try {
std::vector<std::string> defaultVal;
defaultVal.push_back("default");
- testOpts.addOptionChaining(
- "setParameter", "setParameter", moe::StringVector, "Multiple Values")
+ testOpts
+ .addOptionChaining("setParameter", "setParameter", moe::StringVector, "Multiple Values")
.setDefault(moe::Value(defaultVal))
.composing();
FAIL("Was able to register composable option with default value");
@@ -241,8 +241,8 @@ TEST(Registration, ComposableWithDefault) {
try {
std::vector<std::string> defaultVal;
defaultVal.push_back("default");
- testOpts.addOptionChaining(
- "setParameter", "setParameter", moe::StringVector, "Multiple Values")
+ testOpts
+ .addOptionChaining("setParameter", "setParameter", moe::StringVector, "Multiple Values")
.composing()
.setDefault(moe::Value(defaultVal));
FAIL("Was able to set default value on composable option");
@@ -867,8 +867,9 @@ TEST(Style, Verbosity) {
/* support for -vv -vvvv etc. */
for (std::string s = "vv"; s.length() <= 12; s.append("v")) {
- testOpts.addOptionChaining(
- s.c_str(), s.c_str(), moe::Switch, "higher verbosity levels (hidden)")
+ testOpts
+ .addOptionChaining(
+ s.c_str(), s.c_str(), moe::Switch, "higher verbosity levels (hidden)")
.hidden();
}
@@ -3782,8 +3783,7 @@ TEST(YAMLConfigFile, DeprecatedDottedNameMultipleDeprecated) {
std::map<std::string, std::string> env_map;
std::stringstream ss;
- ss << deprecatedDottedNames[0] << ": 6" << std::endl
- << deprecatedDottedNames[1] << ": 7";
+ ss << deprecatedDottedNames[0] << ": 6" << std::endl << deprecatedDottedNames[1] << ": 7";
parser.setConfig("config.yaml", ss.str());
ASSERT_NOT_OK(parser.run(testOpts, argv, env_map, &environment));
diff --git a/src/mongo/util/platform_init.cpp b/src/mongo/util/platform_init.cpp
index fa34f7a820d..3ececf3eb0f 100644
--- a/src/mongo/util/platform_init.cpp
+++ b/src/mongo/util/platform_init.cpp
@@ -31,10 +31,10 @@
#include "mongo/platform/basic.h"
#ifdef _WIN32
-#include <mmsystem.h>
#include <crtdbg.h>
-#include <stdlib.h>
+#include <mmsystem.h>
#include <stdio.h>
+#include <stdlib.h>
#endif
#include "mongo/base/init.h"
diff --git a/src/mongo/util/processinfo.cpp b/src/mongo/util/processinfo.cpp
index baaa874eaff..434b2e766cb 100644
--- a/src/mongo/util/processinfo.cpp
+++ b/src/mongo/util/processinfo.cpp
@@ -34,8 +34,8 @@
#include "mongo/base/init.h"
#include "mongo/util/processinfo.h"
-#include <iostream>
#include <fstream>
+#include <iostream>
#include "mongo/util/log.h"
@@ -89,9 +89,8 @@ void ProcessInfo::initializeSystemInfo() {
* We need this get the system page size for the secure allocator, which the enterprise modules need
* for storage for command line parameters.
*/
-MONGO_INITIALIZER_GENERAL(SystemInfo,
- MONGO_NO_PREREQUISITES,
- MONGO_NO_DEPENDENTS)(InitializerContext* context) {
+MONGO_INITIALIZER_GENERAL(SystemInfo, MONGO_NO_PREREQUISITES, MONGO_NO_DEPENDENTS)
+(InitializerContext* context) {
ProcessInfo::initializeSystemInfo();
return Status::OK();
}
diff --git a/src/mongo/util/processinfo_freebsd.cpp b/src/mongo/util/processinfo_freebsd.cpp
index f335addfd9e..d8a8d5edce7 100644
--- a/src/mongo/util/processinfo_freebsd.cpp
+++ b/src/mongo/util/processinfo_freebsd.cpp
@@ -41,8 +41,8 @@
#include <unistd.h>
#include <vm/vm_param.h>
-#include "mongo/util/scopeguard.h"
#include "mongo/util/log.h"
+#include "mongo/util/scopeguard.h"
#include "processinfo.h"
using namespace std;
diff --git a/src/mongo/util/processinfo_linux.cpp b/src/mongo/util/processinfo_linux.cpp
index 4bebeb5189c..c831e135f0d 100644
--- a/src/mongo/util/processinfo_linux.cpp
+++ b/src/mongo/util/processinfo_linux.cpp
@@ -29,25 +29,25 @@
#define MONGO_LOG_DEFAULT_COMPONENT ::mongo::logger::LogComponent::kControl
-#include <malloc.h>
#include <iostream>
+#include <malloc.h>
#include <sched.h>
#include <stdio.h>
-#include <unistd.h>
#include <sys/mman.h>
#include <sys/utsname.h>
+#include <unistd.h>
#ifdef __UCLIBC__
#include <features.h>
#else
#include <gnu/libc-version.h>
#endif
-#include "processinfo.h"
#include "boost/filesystem.hpp"
+#include "mongo/util/log.h"
+#include "processinfo.h"
#include <boost/none.hpp>
#include <boost/optional.hpp>
#include <mongo/util/file.h>
-#include "mongo/util/log.h"
using namespace std;
diff --git a/src/mongo/util/processinfo_openbsd.cpp b/src/mongo/util/processinfo_openbsd.cpp
index 7bbd2a15c2d..8974dbb6ea8 100644
--- a/src/mongo/util/processinfo_openbsd.cpp
+++ b/src/mongo/util/processinfo_openbsd.cpp
@@ -40,8 +40,8 @@
#include <sys/vmmeter.h>
#include <unistd.h>
-#include "mongo/util/scopeguard.h"
#include "mongo/util/log.h"
+#include "mongo/util/scopeguard.h"
#include "processinfo.h"
using namespace std;
diff --git a/src/mongo/util/processinfo_osx.cpp b/src/mongo/util/processinfo_osx.cpp
index f15601c5390..ad0c8f7d99d 100644
--- a/src/mongo/util/processinfo_osx.cpp
+++ b/src/mongo/util/processinfo_osx.cpp
@@ -34,19 +34,19 @@
#include <boost/none.hpp>
#include <boost/optional.hpp>
-#include <mach/vm_statistics.h>
-#include <mach/task_info.h>
-#include <mach/mach_init.h>
+#include <iostream>
#include <mach/mach_host.h>
+#include <mach/mach_init.h>
#include <mach/mach_traps.h>
+#include <mach/shared_region.h>
#include <mach/task.h>
+#include <mach/task_info.h>
#include <mach/vm_map.h>
-#include <mach/shared_region.h>
-#include <iostream>
+#include <mach/vm_statistics.h>
-#include <sys/types.h>
#include <sys/mman.h>
#include <sys/sysctl.h>
+#include <sys/types.h>
#include "mongo/db/jsobj.h"
#include "mongo/util/log.h"
diff --git a/src/mongo/util/processinfo_solaris.cpp b/src/mongo/util/processinfo_solaris.cpp
index 6fa36cf48ca..53fda06efad 100644
--- a/src/mongo/util/processinfo_solaris.cpp
+++ b/src/mongo/util/processinfo_solaris.cpp
@@ -70,15 +70,15 @@ struct ProcPsinfo {
ProcPsinfo() {
FILE* f = fopen("/proc/self/psinfo", "r");
massert(16846,
- mongoutils::str::stream()
- << "couldn't open \"/proc/self/psinfo\": " << errnoWithDescription(),
+ mongoutils::str::stream() << "couldn't open \"/proc/self/psinfo\": "
+ << errnoWithDescription(),
f);
size_t num = fread(&psinfo, sizeof(psinfo), 1, f);
int err = errno;
fclose(f);
massert(16847,
- mongoutils::str::stream()
- << "couldn't read from \"/proc/self/psinfo\": " << errnoWithDescription(err),
+ mongoutils::str::stream() << "couldn't read from \"/proc/self/psinfo\": "
+ << errnoWithDescription(err),
num == 1);
}
psinfo_t psinfo;
@@ -88,15 +88,15 @@ struct ProcUsage {
ProcUsage() {
FILE* f = fopen("/proc/self/usage", "r");
massert(16848,
- mongoutils::str::stream()
- << "couldn't open \"/proc/self/usage\": " << errnoWithDescription(),
+ mongoutils::str::stream() << "couldn't open \"/proc/self/usage\": "
+ << errnoWithDescription(),
f);
size_t num = fread(&prusage, sizeof(prusage), 1, f);
int err = errno;
fclose(f);
massert(16849,
- mongoutils::str::stream()
- << "couldn't read from \"/proc/self/usage\": " << errnoWithDescription(err),
+ mongoutils::str::stream() << "couldn't read from \"/proc/self/usage\": "
+ << errnoWithDescription(err),
num == 1);
}
prusage_t prusage;
diff --git a/src/mongo/util/processinfo_test.cpp b/src/mongo/util/processinfo_test.cpp
index f84e7a38c35..76f7e7ee551 100644
--- a/src/mongo/util/processinfo_test.cpp
+++ b/src/mongo/util/processinfo_test.cpp
@@ -32,8 +32,8 @@
#include <iostream>
#include <vector>
-#include "mongo/util/processinfo.h"
#include "mongo/unittest/unittest.h"
+#include "mongo/util/processinfo.h"
using mongo::ProcessInfo;
using boost::optional;
diff --git a/src/mongo/util/processinfo_windows.cpp b/src/mongo/util/processinfo_windows.cpp
index 7fa9a7e3943..0e9cb8cc578 100644
--- a/src/mongo/util/processinfo_windows.cpp
+++ b/src/mongo/util/processinfo_windows.cpp
@@ -37,8 +37,8 @@
#include <iostream>
#include <psapi.h>
-#include "mongo/util/processinfo.h"
#include "mongo/util/log.h"
+#include "mongo/util/processinfo.h"
using namespace std;
using std::unique_ptr;
diff --git a/src/mongo/util/progress_meter_test.cpp b/src/mongo/util/progress_meter_test.cpp
index 7dbada064d0..619f754e9ee 100644
--- a/src/mongo/util/progress_meter_test.cpp
+++ b/src/mongo/util/progress_meter_test.cpp
@@ -28,8 +28,8 @@
#include "mongo/platform/basic.h"
-#include "mongo/util/progress_meter.h"
#include "mongo/unittest/unittest.h"
+#include "mongo/util/progress_meter.h"
namespace {
diff --git a/src/mongo/util/safe_num-inl.h b/src/mongo/util/safe_num-inl.h
index f60ade9f9f9..8015a600760 100644
--- a/src/mongo/util/safe_num-inl.h
+++ b/src/mongo/util/safe_num-inl.h
@@ -70,7 +70,7 @@ inline SafeNum SafeNum::operator+(const SafeNum& rhs) const {
}
inline SafeNum& SafeNum::operator+=(const SafeNum& rhs) {
- return * this = addInternal(*this, rhs);
+ return *this = addInternal(*this, rhs);
}
inline SafeNum SafeNum::operator*(const SafeNum& rhs) const {
@@ -78,7 +78,7 @@ inline SafeNum SafeNum::operator*(const SafeNum& rhs) const {
}
inline SafeNum& SafeNum::operator*=(const SafeNum& rhs) {
- return * this = mulInternal(*this, rhs);
+ return *this = mulInternal(*this, rhs);
}
inline SafeNum SafeNum::bitAnd(const SafeNum& rhs) const {
@@ -90,7 +90,7 @@ inline SafeNum SafeNum::operator&(const SafeNum& rhs) const {
}
inline SafeNum& SafeNum::operator&=(const SafeNum& rhs) {
- return * this = bitAnd(rhs);
+ return *this = bitAnd(rhs);
}
inline SafeNum SafeNum::bitOr(const SafeNum& rhs) const {
@@ -102,7 +102,7 @@ inline SafeNum SafeNum::operator|(const SafeNum& rhs) const {
}
inline SafeNum& SafeNum::operator|=(const SafeNum& rhs) {
- return * this = bitOr(rhs);
+ return *this = bitOr(rhs);
}
inline SafeNum SafeNum::bitXor(const SafeNum& rhs) const {
@@ -114,7 +114,7 @@ inline SafeNum SafeNum::operator^(const SafeNum& rhs) const {
}
inline SafeNum& SafeNum::operator^=(const SafeNum& rhs) {
- return * this = bitXor(rhs);
+ return *this = bitXor(rhs);
}
inline bool SafeNum::isValid() const {
diff --git a/src/mongo/util/safe_num_test.cpp b/src/mongo/util/safe_num_test.cpp
index 4086729e9c6..57b30efb807 100644
--- a/src/mongo/util/safe_num_test.cpp
+++ b/src/mongo/util/safe_num_test.cpp
@@ -30,11 +30,11 @@
#include "mongo/platform/basic.h"
#undef MONGO_PCH_WHITELISTED // for malloc/realloc pulled from bson
-#include "mongo/bson/bsontypes.h"
#include "mongo/bson/bsonobj.h"
+#include "mongo/bson/bsontypes.h"
#include "mongo/platform/decimal128.h"
-#include "mongo/util/safe_num.h"
#include "mongo/unittest/unittest.h"
+#include "mongo/util/safe_num.h"
namespace {
diff --git a/src/mongo/util/signal_handlers_synchronous.cpp b/src/mongo/util/signal_handlers_synchronous.cpp
index fa383d97800..2f8996b3c36 100644
--- a/src/mongo/util/signal_handlers_synchronous.cpp
+++ b/src/mongo/util/signal_handlers_synchronous.cpp
@@ -36,8 +36,8 @@
#include <boost/exception/exception.hpp>
#include <csignal>
#include <exception>
-#include <iostream>
#include <fstream>
+#include <iostream>
#include <memory>
#include <streambuf>
#include <typeinfo>
diff --git a/src/mongo/util/signal_handlers_synchronous_test.cpp b/src/mongo/util/signal_handlers_synchronous_test.cpp
index f4104bafa16..51bc874f25a 100644
--- a/src/mongo/util/signal_handlers_synchronous_test.cpp
+++ b/src/mongo/util/signal_handlers_synchronous_test.cpp
@@ -31,11 +31,11 @@
#include "mongo/platform/basic.h"
#include <cmath>
-#include <cstdlib>
#include <csignal>
+#include <cstdlib>
-#include "mongo/unittest/unittest.h"
#include "mongo/unittest/death_test.h"
+#include "mongo/unittest/unittest.h"
#include "mongo/util/signal_handlers_synchronous.h"
namespace {
diff --git a/src/mongo/util/signal_win32.cpp b/src/mongo/util/signal_win32.cpp
index 95913067f7b..7407cfcb3e0 100644
--- a/src/mongo/util/signal_win32.cpp
+++ b/src/mongo/util/signal_win32.cpp
@@ -28,8 +28,8 @@
#include "mongo/platform/basic.h"
-#include <string>
#include "mongo/util/mongoutils/str.h"
+#include <string>
namespace mongo {
diff --git a/src/mongo/util/string_map_test.cpp b/src/mongo/util/string_map_test.cpp
index 60c888ca7af..5c0374e3cec 100644
--- a/src/mongo/util/string_map_test.cpp
+++ b/src/mongo/util/string_map_test.cpp
@@ -33,8 +33,8 @@
#include "mongo/platform/random.h"
#include "mongo/platform/unordered_map.h"
-#include "mongo/util/string_map.h"
#include "mongo/util/log.h"
+#include "mongo/util/string_map.h"
#include "mongo/util/timer.h"
namespace {
diff --git a/src/mongo/util/stringutils_test.cpp b/src/mongo/util/stringutils_test.cpp
index faabd5f5b8c..302fc5c3de3 100644
--- a/src/mongo/util/stringutils_test.cpp
+++ b/src/mongo/util/stringutils_test.cpp
@@ -29,8 +29,8 @@
#include "mongo/unittest/unittest.h"
-#include "mongo/util/stringutils.h"
#include "mongo/util/hex.h"
+#include "mongo/util/stringutils.h"
namespace mongo {
diff --git a/src/mongo/util/tcmalloc_set_parameter.cpp b/src/mongo/util/tcmalloc_set_parameter.cpp
index 951086ae4c1..130f77535cf 100644
--- a/src/mongo/util/tcmalloc_set_parameter.cpp
+++ b/src/mongo/util/tcmalloc_set_parameter.cpp
@@ -85,16 +85,18 @@ Status TcmallocNumericPropertyServerParameter::set(const BSONElement& newValueEl
return Status(ErrorCodes::TypeMismatch,
str::stream() << "Expected server parameter " << newValueElement.fieldName()
<< " to have numeric type, but found "
- << newValueElement.toString(false) << " of type "
+ << newValueElement.toString(false)
+ << " of type "
<< typeName(newValueElement.type()));
}
long long valueAsLongLong = newValueElement.safeNumberLong();
if (valueAsLongLong < 0 ||
static_cast<unsigned long long>(valueAsLongLong) > std::numeric_limits<size_t>::max()) {
- return Status(ErrorCodes::BadValue,
- str::stream()
- << "Value " << newValueElement.toString(false) << " is out of range for "
- << newValueElement.fieldName() << "; expected a value between 0 and "
+ return Status(
+ ErrorCodes::BadValue,
+ str::stream() << "Value " << newValueElement.toString(false) << " is out of range for "
+ << newValueElement.fieldName()
+ << "; expected a value between 0 and "
<< std::min<unsigned long long>(std::numeric_limits<size_t>::max(),
std::numeric_limits<long long>::max()));
}
@@ -128,7 +130,8 @@ TcmallocNumericPropertyServerParameter tcmallocAggressiveMemoryDecommit(
MONGO_INITIALIZER_GENERAL(TcmallocConfigurationDefaults,
("SystemInfo"),
- ("BeginStartupOptionHandling"))(InitializerContext*) {
+ ("BeginStartupOptionHandling"))
+(InitializerContext*) {
// Before processing the command line options, if the user has not specified a value in via
// the environment, set tcmalloc.max_total_thread_cache_bytes to its default value.
if (getenv("TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES")) {
diff --git a/src/mongo/util/text.cpp b/src/mongo/util/text.cpp
index edb5c1a50b9..f8282cfdccb 100644
--- a/src/mongo/util/text.cpp
+++ b/src/mongo/util/text.cpp
@@ -275,7 +275,8 @@ bool writeUtf8ToWindowsConsole(const char* utf8String, unsigned int utf8StringSi
if (!errorMessageShown) {
std::cout << "\n---\nUnicode text could not be correctly displayed.\n"
"Please change your console font to a Unicode font "
- "(e.g. Lucida Console).\n---\n" << std::endl;
+ "(e.g. Lucida Console).\n---\n"
+ << std::endl;
errorMessageShown = true;
}
// we can't display the text properly using a raster font,
diff --git a/src/mongo/util/text.h b/src/mongo/util/text.h
index 844f9cf2db5..08c87742bad 100644
--- a/src/mongo/util/text.h
+++ b/src/mongo/util/text.h
@@ -30,8 +30,8 @@
#pragma once
-#include <vector>
#include <string>
+#include <vector>
#include "mongo/base/disallow_copying.h"
#include "mongo/config.h"
diff --git a/src/mongo/util/time_support.cpp b/src/mongo/util/time_support.cpp
index b26e0a07361..22c943b0818 100644
--- a/src/mongo/util/time_support.cpp
+++ b/src/mongo/util/time_support.cpp
@@ -32,8 +32,8 @@
#include <boost/thread/tss.hpp>
#include <cstdint>
#include <cstdio>
-#include <string>
#include <iostream>
+#include <string>
#include "mongo/base/init.h"
#include "mongo/base/parse_number.h"
@@ -43,10 +43,10 @@
#include "mongo/util/mongoutils/str.h"
#ifdef _WIN32
-#include <boost/date_time/filetime_functions.hpp>
#include "mongo/util/concurrency/mutex.h"
#include "mongo/util/system_tick_source.h"
#include "mongo/util/timer.h"
+#include <boost/date_time/filetime_functions.hpp>
#endif
#ifdef __sun
@@ -379,7 +379,7 @@ Status parseMillisFromToken(StringData millisStr, int* resultMillis) {
millisMagnitude = 100;
}
- *resultMillis = *resultMillis* millisMagnitude;
+ *resultMillis = *resultMillis * millisMagnitude;
if (*resultMillis < 0 || *resultMillis > 1000) {
StringBuilder sb;
@@ -826,8 +826,8 @@ static unsigned long long resyncInterval = 0;
static SimpleMutex _curTimeMicros64ReadMutex;
static SimpleMutex _curTimeMicros64ResyncMutex;
-typedef WINBASEAPI VOID(WINAPI* pGetSystemTimePreciseAsFileTime)(_Out_ LPFILETIME
- lpSystemTimeAsFileTime);
+typedef WINBASEAPI VOID(WINAPI* pGetSystemTimePreciseAsFileTime)(
+ _Out_ LPFILETIME lpSystemTimeAsFileTime);
static pGetSystemTimePreciseAsFileTime GetSystemTimePreciseAsFileTimeFunc;
diff --git a/src/mongo/util/time_support.h b/src/mongo/util/time_support.h
index a445e9b6f47..df07e064dce 100644
--- a/src/mongo/util/time_support.h
+++ b/src/mongo/util/time_support.h
@@ -174,7 +174,7 @@ public:
template <typename Duration>
Date_t& operator-=(Duration d) {
- return * this += (-d);
+ return *this += (-d);
}
template <typename Duration>
diff --git a/src/mongo/util/unowned_ptr.h b/src/mongo/util/unowned_ptr.h
index fd7ef04157b..ecac433207f 100644
--- a/src/mongo/util/unowned_ptr.h
+++ b/src/mongo/util/unowned_ptr.h
@@ -58,20 +58,16 @@ struct unowned_ptr {
unowned_ptr(T* p) : _p(p) {}
template <typename U, typename = IfConvertibleFrom<U>>
- unowned_ptr(U* p)
- : _p(p) {}
+ unowned_ptr(U* p) : _p(p) {}
template <typename U, typename = IfConvertibleFrom<U>>
- unowned_ptr(const unowned_ptr<U>& p)
- : _p(p) {}
+ unowned_ptr(const unowned_ptr<U>& p) : _p(p) {}
template <typename U, typename = IfConvertibleFrom<U>>
- unowned_ptr(const std::unique_ptr<U>& p)
- : _p(p.get()) {}
+ unowned_ptr(const std::unique_ptr<U>& p) : _p(p.get()) {}
template <typename U, typename = IfConvertibleFrom<U>>
- unowned_ptr(const std::shared_ptr<U>& p)
- : _p(p.get()) {}
+ unowned_ptr(const std::shared_ptr<U>& p) : _p(p.get()) {}
//
// Modifiers
diff --git a/src/mongo/util/winutil.h b/src/mongo/util/winutil.h
index 5d4b727b111..a49fcc2f406 100644
--- a/src/mongo/util/winutil.h
+++ b/src/mongo/util/winutil.h
@@ -32,8 +32,8 @@
#pragma once
#if defined(_WIN32)
-#include <windows.h>
#include "text.h"
+#include <windows.h>
namespace mongo {